diff --git a/.editorconfig b/.editorconfig index 2b40ec32fac3e..50cc9dacd7e42 100644 --- a/.editorconfig +++ b/.editorconfig @@ -9,6 +9,11 @@ trim_trailing_whitespace=true max_line_length=100 insert_final_newline=true +[*.md] +max_line_length=80 +indent_style=space +indent_size=2 + [*.yml] indent_style=space indent_size=2 diff --git a/.github/allowed-actions.js b/.github/allowed-actions.js new file mode 100644 index 0000000000000..4fb894758060d --- /dev/null +++ b/.github/allowed-actions.js @@ -0,0 +1,7 @@ +// This is a whitelist of GitHub Actions that are approved for use in this project. +// If a new or existing workflow file is updated to use an action or action version +// not listed here, CI will fail. + +module.exports = [ + 'gaurav-nelson/github-action-markdown-link-check@7481451f70251762f149d69596e3e276ebf2b236', // gaurav-nelson/github-action-markdown-link-check@v1.0.8 +] diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000..a321729dcbc81 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + labels: ["A2-insubstantial", "B0-silent", "C1-low 📌"] + schedule: + interval: "daily" diff --git a/.github/stale.yml b/.github/stale.yml new file mode 100644 index 0000000000000..61d0fd0228d97 --- /dev/null +++ b/.github/stale.yml @@ -0,0 +1,18 @@ +# Number of days of inactivity before an issue becomes stale +daysUntilStale: 30 +# Number of days of inactivity before a stale issue is closed +daysUntilClose: 14 +# Issues with these labels will never be considered stale +exemptLabels: + - "D9-needsaudit 👮" +# Label to use when marking an issue as stale +staleLabel: "A3-stale" +# we only bother with pull requests +only: pulls +# Comment to post when marking an issue as stale. Set to `false` to disable +markComment: > + Hey, is anyone still working on this? Due to the inactivity this issue has + been automatically marked as stale. It will be closed if no further activity + occurs. Thank you for your contributions. +# Comment to post when closing a stale issue. Set to `false` to disable +closeComment: false diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml new file mode 100644 index 0000000000000..7180e7b509662 --- /dev/null +++ b/.github/workflows/check-labels.yml @@ -0,0 +1,21 @@ +name: Check labels + +on: + pull_request: + types: [labeled, opened, synchronize, unlabeled] + +jobs: + check-labels: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name }} + - name: Check labels + run: bash ${{ github.workspace }}/.maintain/github/check_labels.sh + env: + GITHUB_PR: ${{ github.event.pull_request.number }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + HEAD_SHA: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/md-link-check.yml b/.github/workflows/md-link-check.yml new file mode 100644 index 0000000000000..868569911d471 --- /dev/null +++ b/.github/workflows/md-link-check.yml @@ -0,0 +1,19 @@ +name: Check Links + +on: + pull_request: + branches: + - master + push: + branches: + - master + +jobs: + markdown-link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: gaurav-nelson/github-action-markdown-link-check@7481451f70251762f149d69596e3e276ebf2b236 + with: + use-quiet-mode: 'yes' + config-file: '.github/workflows/mlc_config.json' diff --git a/.github/workflows/mlc_config.json b/.github/workflows/mlc_config.json new file mode 100644 index 0000000000000..e7e620b39e0a9 --- /dev/null +++ b/.github/workflows/mlc_config.json @@ -0,0 +1,7 @@ +{ + "ignorePatterns": [ + { + "pattern": "^https://crates.io", + } + ] +} diff --git a/.github/workflows/monthly-tag.yml b/.github/workflows/monthly-tag.yml new file mode 100644 index 0000000000000..8736a341cecf9 --- /dev/null +++ b/.github/workflows/monthly-tag.yml @@ -0,0 +1,43 @@ +name: Monthly Snapshot Tag + +on: + schedule: + - cron: "0 1 1 * *" + workflow_dispatch: + +jobs: + build: + name: Take Snapshot + runs-on: ubuntu-latest + steps: + - name: Get the tags by date + id: tags + run: | + echo "::set-output name=new::$(date +'monthly-%Y-%m')" + echo "::set-output name=old::$(date -d'1 month ago' +'monthly-%Y-%m')" + - name: Checkout branch "master" + uses: actions/checkout@v2 + with: + ref: 'master' + fetch-depth: 0 + - name: Generate changelog + id: changelog + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + echo "# Automatic snapshot pre-release ${{ steps.tags.outputs.new }}" > Changelog.md + echo "" >> Changelog.md + echo "## Changes since last snapshot (${{ steps.tags.outputs.old }})" >> Changelog.md + echo "" >> Changelog.md + ./.maintain/gitlab/generate_changelog.sh ${{ steps.tags.outputs.old }} >> Changelog.md + - name: Release snapshot + id: release-snapshot + uses: actions/create-release@latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.tags.outputs.new }} + release_name: ${{ steps.tags.outputs.new }} + draft: false + prerelease: true + body_path: Changelog.md diff --git a/.gitignore b/.gitignore index c8f1ea9567bc2..0486a1a716e5c 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,5 @@ rls*.log **/hfuzz_workspace/ .cargo/ .cargo-remote.toml +*.bin +*.iml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 527ce7f425a96..ecafc9338a587 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,12 +24,13 @@ stages: - check - test - build - - post-build-test - - chaos-env - - chaos - publish - deploy - - flaming-fir + +workflow: + rules: + - if: $CI_COMMIT_TAG + - if: $CI_COMMIT_BRANCH variables: &default-vars GIT_STRATEGY: fetch @@ -37,9 +38,13 @@ variables: &default-vars CARGO_INCREMENTAL: 0 DOCKER_OS: "debian:stretch" ARCH: "x86_64" + CI_IMAGE: "paritytech/ci-linux:production" # FIXME set to release - CARGO_UNLEASH_INSTALL_PARAMS: "--version 1.0.0-alpha.10" + CARGO_UNLEASH_INSTALL_PARAMS: "--version 1.0.0-alpha.12" CARGO_UNLEASH_PKG_DEF: "--skip node node-* pallet-template pallet-example pallet-example-* subkey chain-spec-builder" + VAULT_SERVER_URL: "https://vault.parity-mgmt-vault.parity.io" + VAULT_AUTH_PATH: "gitlab-parity-io-jwt" + VAULT_AUTH_ROLE: "cicd_gitlab_parity_${CI_PROJECT_NAME}" default: cache: {} @@ -52,19 +57,28 @@ default: paths: - artifacts/ -.kubernetes-build: &kubernetes-build +.kubernetes-env: &kubernetes-env + retry: + max: 2 + when: + - runner_system_failure + - unknown_failure + - api_failure + interruptible: true tags: - kubernetes-parity-build - environment: - name: parity-build - interruptible: true + +.rust-info-script: &rust-info-script + - rustup show + - cargo --version + - rustup +nightly show + - cargo +nightly --version + - sccache -s .docker-env: &docker-env - image: paritytech/ci-linux:production + image: "${CI_IMAGE}" before_script: - - rustup show - - cargo --version - - sccache -s + - *rust-info-script retry: max: 2 when: @@ -75,32 +89,149 @@ default: tags: - linux-docker -workflow: +.test-refs: &test-refs rules: - - if: $CI_COMMIT_TAG - - if: $CI_COMMIT_BRANCH + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 -.test-refs: &test-refs +.test-refs-no-trigger: &test-refs-no-trigger + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ + +.test-refs-no-trigger-prs-only: &test-refs-no-trigger-prs-only + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + +.test-refs-wasmer-sandbox: &test-refs-wasmer-sandbox rules: - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" + changes: + - client/executor/**/* + - frame/contracts/**/* + - primitives/sandbox/**/* - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + changes: + - client/executor/**/* + - frame/contracts/**/* + - primitives/sandbox/**/* - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + changes: + - client/executor/**/* + - frame/contracts/**/* + - primitives/sandbox/**/* .build-refs: &build-refs rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 +.nightly-pipeline: &nightly-pipeline + rules: + # this job runs only on nightly pipeline with the mentioned variable, against `master` branch + - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" + +.merge-ref-into-master-script: &merge-ref-into-master-script + - if [ $CI_COMMIT_REF_NAME != "master" ]; then + git fetch origin +master:master; + git fetch origin +$CI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME; + git checkout master; + git config user.email "ci@gitlab.parity.io"; + git merge $CI_COMMIT_REF_NAME --verbose --no-edit; + fi + +.cargo-check-benches-script: &cargo-check-benches-script + - mkdir -p artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA + - SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all + - 'cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json + | tee artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json' + - 'cargo run --release -p node-bench -- ::trie::read::small --json + | tee artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json' + - sccache -s + +#### Vault secrets +.vault-secrets: &vault-secrets + secrets: + DOCKER_HUB_USER: + vault: cicd/gitlab/parity/DOCKER_HUB_USER@kv + file: false + DOCKER_HUB_PASS: + vault: cicd/gitlab/parity/DOCKER_HUB_PASS@kv + file: false + GITHUB_PR_TOKEN: + vault: cicd/gitlab/parity/GITHUB_PR_TOKEN@kv + file: false + AWS_ACCESS_KEY_ID: + vault: cicd/gitlab/$CI_PROJECT_PATH/AWS_ACCESS_KEY_ID@kv + file: false + AWS_SECRET_ACCESS_KEY: + vault: cicd/gitlab/$CI_PROJECT_PATH/AWS_SECRET_ACCESS_KEY@kv + file: false + AWX_TOKEN: + vault: cicd/gitlab/$CI_PROJECT_PATH/AWX_TOKEN@kv + file: false + CRATES_TOKEN: + vault: cicd/gitlab/$CI_PROJECT_PATH/CRATES_TOKEN@kv + file: false + DOCKER_CHAOS_TOKEN: + vault: cicd/gitlab/$CI_PROJECT_PATH/DOCKER_CHAOS_TOKEN@kv + file: false + DOCKER_CHAOS_USER: + vault: cicd/gitlab/$CI_PROJECT_PATH/DOCKER_CHAOS_USER@kv + file: false + GITHUB_EMAIL: + vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_EMAIL@kv + file: false + GITHUB_RELEASE_TOKEN: + vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_RELEASE_TOKEN@kv + file: false + GITHUB_TOKEN: + vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_TOKEN@kv + file: false + GITHUB_USER: + vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_USER@kv + file: false + MATRIX_ACCESS_TOKEN: + vault: cicd/gitlab/$CI_PROJECT_PATH/MATRIX_ACCESS_TOKEN@kv + file: false + MATRIX_ROOM_ID: + vault: cicd/gitlab/$CI_PROJECT_PATH/MATRIX_ROOM_ID@kv + file: false + PIPELINE_TOKEN: + vault: cicd/gitlab/$CI_PROJECT_PATH/PIPELINE_TOKEN@kv + file: false + VALIDATOR_KEYS: + vault: cicd/gitlab/$CI_PROJECT_PATH/VALIDATOR_KEYS@kv + file: false + VALIDATOR_KEYS_CHAOS: + vault: cicd/gitlab/$CI_PROJECT_PATH/VALIDATOR_KEYS_CHAOS@kv + file: false + #### stage: .pre skip-if-draft: image: paritytech/tools:latest - <<: *kubernetes-build + <<: *kubernetes-env + <<: *vault-secrets stage: .pre rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs @@ -115,7 +246,8 @@ skip-if-draft: check-runtime: stage: check image: paritytech/tools:latest - <<: *kubernetes-build + <<: *kubernetes-env + <<: *vault-secrets rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs variables: @@ -129,17 +261,18 @@ check-runtime: check-signed-tag: stage: check image: paritytech/tools:latest - <<: *kubernetes-build + <<: *kubernetes-env + <<: *vault-secrets rules: - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: - ./.maintain/gitlab/check_signed.sh check-line-width: stage: check image: paritytech/tools:latest - <<: *kubernetes-build + <<: *kubernetes-env rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs script: @@ -149,37 +282,34 @@ check-line-width: test-dependency-rules: stage: check image: paritytech/tools:latest - <<: *kubernetes-build + <<: *kubernetes-env + rules: + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs script: - .maintain/ensure-deps.sh -#### stage: test - -cargo-audit: - stage: test - <<: *docker-env +test-prometheus-alerting-rules: + stage: check + image: paritytech/tools:latest + <<: *kubernetes-env rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_PIPELINE_SOURCE == "pipeline" when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + - if: $CI_COMMIT_BRANCH + changes: + - .gitlab-ci.yml + - .maintain/monitoring/**/* script: - - cargo audit - allow_failure: true + - promtool check rules .maintain/monitoring/alerting-rules/alerting-rules.yaml + - cat .maintain/monitoring/alerting-rules/alerting-rules.yaml | + promtool test rules .maintain/monitoring/alerting-rules/alerting-rule-tests.yaml + +#### stage: test cargo-deny: stage: test <<: *docker-env - rules: - - if: $CI_COMMIT_MESSAGE =~ /skip-checks/ - when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + <<: *nightly-pipeline script: - cargo deny check --hide-inclusion-graph -c .maintain/deny.toml after_script: @@ -191,16 +321,50 @@ cargo-deny: when: always paths: - deny.log + # FIXME: Temorarily allow to fail. + allow_failure: true + +cargo-fmt: + stage: test + <<: *docker-env + <<: *test-refs + script: + - cargo +nightly fmt --all -- --check cargo-check-benches: stage: test <<: *docker-env <<: *test-refs + <<: *collect-artifacts + before_script: + # merges in the master branch on PRs + - *merge-ref-into-master-script + - *rust-info-script script: - - BUILD_DUMMY_WASM_BINARY=1 time cargo +nightly check --benches --all - - cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small - - cargo run --release -p node-bench -- ::trie::read::small - - sccache -s + - *cargo-check-benches-script + +node-bench-regression-guard: + # it's not belong to `build` semantically, but dag jobs can't depend on each other + # within the single stage - https://gitlab.com/gitlab-org/gitlab/-/issues/30632 + # more: https://github.com/paritytech/substrate/pull/8519#discussion_r608012402 + stage: build + <<: *docker-env + <<: *test-refs-no-trigger-prs-only + needs: + # this is a DAG + - job: cargo-check-benches + artifacts: true + # this does not like a DAG, just polls the artifact + - project: $CI_PROJECT_PATH + job: cargo-check-benches + ref: master + artifacts: true + variables: + CI_IMAGE: "paritytech/node-bench-regression-guard:latest" + before_script: [""] + script: + - 'node-bench-regression-guard --reference artifacts/benches/master-* + --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA' cargo-check-subkey: stage: test @@ -208,7 +372,23 @@ cargo-check-subkey: <<: *test-refs script: - cd ./bin/utils/subkey - - BUILD_DUMMY_WASM_BINARY=1 time cargo check --release + - SKIP_WASM_BUILD=1 time cargo check --release + - sccache -s + +cargo-check-try-runtime: + stage: test + <<: *docker-env + <<: *test-refs + script: + - time cargo check --features try-runtime + - sccache -s + +cargo-check-wasmer-sandbox: + stage: test + <<: *docker-env + <<: *test-refs + script: + - time cargo check --features wasmer-sandbox - sccache -s test-deterministic-wasm: @@ -222,7 +402,7 @@ test-deterministic-wasm: # build runtime - cargo build --verbose --release -p node-runtime # make checksum - - sha256sum target/release/wbuild/target/wasm32-unknown-unknown/release/node_runtime.wasm > checksum.sha256 + - sha256sum target/release/wbuild/node-runtime/target/wasm32-unknown-unknown/release/node_runtime.wasm > checksum.sha256 # clean up – FIXME: can we reuse some of the artifacts? - cargo clean # build again @@ -245,22 +425,24 @@ test-linux-stable: &test-linux script: # this job runs all tests in former runtime-benchmarks, frame-staking and wasmtime tests - time cargo test --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml + - time cargo test -p frame-support-test --features=conditional-storage --manifest-path frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - sccache -s -unleash-check: - stage: test - <<: *docker-env - rules: - - if: $CI_COMMIT_MESSAGE =~ /skip-checks/ - when: never - # .test-refs - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - script: - - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} - - cargo unleash check ${CARGO_UNLEASH_PKG_DEF} +#unleash-check: + #stage: test + #<<: *docker-env + #<<: *test-refs-no-trigger + #script: + #- cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} + #- cargo unleash de-dev-deps + # Reuse build artifacts when running checks (cuts down check time by 3x) + # TODO: Implement this optimization in cargo-unleash rather than here + #- mkdir -p target/unleash + #- export CARGO_TARGET_DIR=target/unleash + #- cargo unleash check ${CARGO_UNLEASH_PKG_DEF} + # FIXME: this job must not fail, or unleash-to-crates-io will publish broken stuff + #allow_failure: true test-frame-examples-compile-to-wasm: # into one job @@ -271,7 +453,7 @@ test-frame-examples-compile-to-wasm: <<: *default-vars # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. - RUSTFLAGS: -Cdebug-assertions=y + RUSTFLAGS: "-Cdebug-assertions=y" RUST_BACKTRACE: 1 script: - cd frame/example-offchain-worker/ @@ -282,6 +464,7 @@ test-frame-examples-compile-to-wasm: test-linux-stable-int: <<: *test-linux + stage: test script: - echo "___Logs will be partly shown at the end in case of failure.___" - echo "___Full log will be saved to the job artifacts only in case of failure.___" @@ -299,22 +482,11 @@ test-linux-stable-int: paths: - ${CI_COMMIT_SHORT_SHA}_int_failure.log -check-web-wasm: +check-tracing: stage: test <<: *docker-env <<: *test-refs script: - # WASM support is in progress. As more and more crates support WASM, we - # should add entries here. See https://github.com/paritytech/substrate/issues/2416 - - time cargo build --target=wasm32-unknown-unknown -p sp-io - - time cargo build --target=wasm32-unknown-unknown -p sp-runtime - - time cargo build --target=wasm32-unknown-unknown -p sp-std - - time cargo build --target=wasm32-unknown-unknown -p sc-consensus-aura - - time cargo build --target=wasm32-unknown-unknown -p sc-consensus-babe - - time cargo build --target=wasm32-unknown-unknown -p sp-consensus - - time cargo build --target=wasm32-unknown-unknown -p sc-telemetry - # Note: the command below is a bit weird because several Cargo issues prevent us from compiling the node in a more straight-forward way. - - time cargo +nightly build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features browser --target=wasm32-unknown-unknown -Z features=itarget # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features --features=with-tracing @@ -328,7 +500,7 @@ test-full-crypto-feature: <<: *default-vars # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. - RUSTFLAGS: -Cdebug-assertions=y + RUSTFLAGS: "-Cdebug-assertions=y" RUST_BACKTRACE: 1 script: - cd primitives/core/ @@ -337,31 +509,34 @@ test-full-crypto-feature: - time cargo +nightly build --verbose --no-default-features --features full_crypto - sccache -s +test-wasmer-sandbox: + stage: test + <<: *docker-env + <<: *test-refs-wasmer-sandbox + variables: + <<: *default-vars + script: + - time cargo test --release --features runtime-benchmarks,wasmer-sandbox + - sccache -s + cargo-check-macos: stage: test # shell runner on mac ignores the image set in *docker-env <<: *docker-env - <<: *test-refs + <<: *test-refs-no-trigger script: - - BUILD_DUMMY_WASM_BINARY=1 time cargo check --release + - SKIP_WASM_BUILD=1 time cargo check --release - sccache -s tags: - osx -test-prometheus-alerting-rules: - stage: test - image: paritytech/tools:latest - <<: *kubernetes-build - script: - - promtool check rules .maintain/monitoring/alerting-rules/alerting-rules.yaml - - cat .maintain/monitoring/alerting-rules/alerting-rules.yaml | promtool test rules .maintain/monitoring/alerting-rules/alerting-rule-tests.yaml - #### stage: build check-polkadot-companion-status: stage: build image: paritytech/tools:latest - <<: *kubernetes-build + <<: *kubernetes-env + <<: *vault-secrets rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs script: @@ -370,7 +545,8 @@ check-polkadot-companion-status: check-polkadot-companion-build: stage: build <<: *docker-env - <<: *test-refs + <<: *test-refs-no-trigger + <<: *vault-secrets needs: - job: test-linux-stable-int artifacts: false @@ -380,35 +556,11 @@ check-polkadot-companion-build: - cd polkadot && git rev-parse --abbrev-ref HEAD allow_failure: true -test-browser-node: - stage: build - <<: *docker-env - <<: *test-refs - needs: - - job: check-web-wasm - artifacts: false - variables: - <<: *default-vars - CHROMEDRIVER_ARGS: "--log-level=INFO --whitelisted-ips=127.0.0.1" - CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_RUNNER: "wasm-bindgen-test-runner" - WASM_BINDGEN_TEST_TIMEOUT: 120 - script: - - cargo +nightly test --target wasm32-unknown-unknown -p node-browser-testing -Z features=itarget - build-linux-substrate: &build-binary stage: build <<: *collect-artifacts <<: *docker-env - rules: - # .build-refs with manual on PRs and chaos - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_MESSAGE =~ /\[chaos:(basic|medium|large)\]/ && $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # i.e add [chaos:basic] in commit message to trigger - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - when: manual - allow_failure: true + <<: *build-refs needs: - job: test-linux-stable artifacts: false @@ -435,15 +587,7 @@ build-linux-subkey: &build-subkey stage: build <<: *collect-artifacts <<: *docker-env - rules: - # .build-refs with manual on PRs - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - when: manual - allow_failure: true + <<: *build-refs needs: - job: cargo-check-subkey artifacts: false @@ -451,7 +595,7 @@ build-linux-subkey: &build-subkey - mkdir -p ./artifacts/subkey script: - cd ./bin/utils/subkey - - BUILD_DUMMY_WASM_BINARY=1 time cargo build --release --verbose + - SKIP_WASM_BUILD=1 time cargo build --release --verbose - cd - - mv ./target/release/subkey ./artifacts/subkey/. - echo -n "Subkey version = " @@ -467,14 +611,13 @@ build-macos-subkey: tags: - osx -build-rust-doc: +build-rustdoc: stage: build <<: *docker-env <<: *test-refs - allow_failure: true variables: <<: *default-vars - RUSTFLAGS: -Dwarnings + SKIP_WASM_BUILD: 1 artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" when: on_success @@ -482,159 +625,71 @@ build-rust-doc: paths: - ./crate-docs/ script: - - rm -f ./crate-docs/index.html # use it as an indicator if the job succeeds - - BUILD_DUMMY_WASM_BINARY=1 RUSTDOCFLAGS="--html-in-header $(pwd)/.maintain/rustdoc-header.html" + # FIXME: it fails with `RUSTDOCFLAGS="-Dwarnings"` + - RUSTDOCFLAGS="--html-in-header $(pwd)/.maintain/rustdoc-header.html" time cargo +nightly doc --no-deps --workspace --all-features --verbose + - rm -f ./target/doc/.lock - mv ./target/doc ./crate-docs + # FIXME: remove me after CI image gets nonroot + - chown -R nonroot:nonroot ./crate-docs - echo "" > ./crate-docs/index.html - sccache -s -#### stage: post-build-test - -trigger-contracts-ci: - stage: post-build-test - needs: - - job: build-linux-substrate - artifacts: false - - job: test-linux-stable - artifacts: false - trigger: - project: parity/srml-contracts-waterfall - branch: master - strategy: depend - rules: - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - -#### stage: chaos-env - -build-chaos-docker: - stage: chaos-env - rules: - # .build-refs with chaos - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_MESSAGE =~ /\[chaos:(basic|medium|large)\]/ && $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # i.e add [chaos:basic] in commit message to trigger - needs: - - job: build-linux-substrate - image: docker:stable - tags: - - kubernetes-parity-build - variables: - <<: *default-vars - DOCKER_HOST: tcp://localhost:2375 - DOCKER_DRIVER: overlay2 - PRODUCT: substrate - DOCKERFILE: $PRODUCT.Dockerfile - CONTAINER_IMAGE: paritypr/$PRODUCT - environment: - name: parity-chaosnet - services: - - docker:dind - before_script: - - test "$DOCKER_CHAOS_USER" -a "$DOCKER_CHAOS_TOKEN" - || ( echo "no docker credentials provided"; exit 1 ) - - docker login -u "$DOCKER_CHAOS_USER" -p "$DOCKER_CHAOS_TOKEN" - - docker info - script: - - cd ./artifacts/$PRODUCT/ - - VERSION="ci-${CI_COMMIT_SHORT_SHA}" - - echo "${PRODUCT} version = ${VERSION}" - - test -z "${VERSION}" && exit 1 - - docker build - --build-arg VCS_REF="${CI_COMMIT_SHA}" - --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - --tag $CONTAINER_IMAGE:$VERSION - --file $DOCKERFILE . - - docker push $CONTAINER_IMAGE:$VERSION - after_script: - - docker logout - -#### stage: chaos - -chaos-test-singlenodeheight: - stage: chaos - rules: - # .build-refs with chaos - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_MESSAGE =~ /\[chaos:(basic|medium|large)\]/ && $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # i.e add [chaos:basic] in commit message to trigger - image: paritypr/simnet:latest - needs: - - job: build-chaos-docker - tags: - - parity-chaos - variables: - <<: *default-vars - PRODUCT: substrate - DOCKERFILE: $PRODUCT.Dockerfile - CONTAINER_IMAGE: paritypr/$PRODUCT - KEEP_NAMESPACE: 0 - NAMESPACE: "substrate-ci-${CI_COMMIT_SHORT_SHA}-${CI_PIPELINE_ID}" - VERSION: "ci-${CI_COMMIT_SHORT_SHA}" - interruptible: true - environment: - name: parity-chaosnet - script: - - simnet spawn dev -i $CONTAINER_IMAGE:$VERSION - - simnet singlenodeheight -h 30 - after_script: - - simnet clean - #### stage: publish .build-push-docker-image: &build-push-docker-image <<: *build-refs - <<: *kubernetes-build - image: docker:stable - services: - - docker:dind + <<: *kubernetes-env + <<: *vault-secrets + image: quay.io/buildah/stable variables: &docker-build-vars <<: *default-vars - DOCKER_HOST: tcp://localhost:2375 - DOCKER_DRIVER: overlay2 GIT_STRATEGY: none DOCKERFILE: $PRODUCT.Dockerfile - CONTAINER_IMAGE: parity/$PRODUCT + IMAGE_NAME: docker.io/parity/$PRODUCT before_script: - - test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity" - || ( echo "no docker credentials provided"; exit 1 ) - - docker login -u "$Docker_Hub_User_Parity" -p "$Docker_Hub_Pass_Parity" - - docker info - script: - cd ./artifacts/$PRODUCT/ - VERSION="$(cat ./VERSION)" - echo "${PRODUCT} version = ${VERSION}" - test -z "${VERSION}" && exit 1 - - docker build - --build-arg VCS_REF="${CI_COMMIT_SHA}" - --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - --tag $CONTAINER_IMAGE:$VERSION - --tag $CONTAINER_IMAGE:latest - --file $DOCKERFILE . - - docker push $CONTAINER_IMAGE:$VERSION - - docker push $CONTAINER_IMAGE:latest + script: + - test "$DOCKER_HUB_USER" -a "$DOCKER_HUB_PASS" || + ( echo "no docker credentials provided"; exit 1 ) + - buildah bud + --format=docker + --build-arg VCS_REF="${CI_COMMIT_SHA}" + --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" + --tag "$IMAGE_NAME:$VERSION" + --tag "$IMAGE_NAME:latest" + --file "$DOCKERFILE" . + - echo "$DOCKER_HUB_PASS" | + buildah login --username "$DOCKER_HUB_USER" --password-stdin docker.io + - buildah info + - buildah push --format=v2s2 "$IMAGE_NAME:$VERSION" + - buildah push --format=v2s2 "$IMAGE_NAME:latest" + after_script: + - buildah logout --all + # pass artifacts to the trigger-simnet job + - echo "SUBSTRATE_IMAGE_NAME=${IMAGE_NAME}" | tee -a ./artifacts/$PRODUCT/build.env + - IMAGE_TAG="$(cat ./artifacts/$PRODUCT/VERSION)" + - echo "SUBSTRATE_IMAGE_TAG=${IMAGE_TAG}" | tee -a ./artifacts/$PRODUCT/build.env + - cat ./artifacts/$PRODUCT/build.env publish-docker-substrate: stage: publish <<: *build-push-docker-image - # collect VERSION artifact here to pass it on to kubernetes - <<: *collect-artifacts + <<: *build-refs needs: - job: build-linux-substrate artifacts: true variables: <<: *docker-build-vars PRODUCT: substrate - after_script: - - docker logout - # only VERSION information is needed for the deployment - - find ./artifacts/ -depth -not -name VERSION -type f -delete + artifacts: + reports: + # this artifact is used in trigger-simnet job + # https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance + dotenv: ./artifacts/substrate/build.env publish-docker-subkey: stage: publish @@ -645,13 +700,12 @@ publish-docker-subkey: variables: <<: *docker-build-vars PRODUCT: subkey - after_script: - - docker logout publish-s3-release: stage: publish <<: *build-refs - <<: *kubernetes-build + <<: *kubernetes-env + <<: *vault-secrets needs: - job: build-linux-substrate artifacts: true @@ -670,121 +724,128 @@ publish-s3-release: - aws s3 ls s3://${BUCKET}/${PREFIX}/latest/ --recursive --human-readable --summarize -publish-s3-doc: +publish-rustdoc: stage: publish - image: paritytech/awscli:latest - allow_failure: true + <<: *kubernetes-env + <<: *vault-secrets + image: paritytech/tools:latest + variables: + GIT_DEPTH: 100 + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME == "master" + # `needs:` can be removed after CI image gets nonroot. In this case `needs:` stops other + # artifacts from being dowloaded by this job. needs: - - job: build-rust-doc + - job: build-rustdoc artifacts: true - <<: *build-refs - <<: *kubernetes-build - variables: - GIT_STRATEGY: none - BUCKET: "releases.parity.io" - PREFIX: "substrate-rustdoc" - script: - - test -r ./crate-docs/index.html || ( - echo "./crate-docs/index.html not present, build:rust:doc:release job not complete"; - exit 1 - ) - - aws s3 sync --delete --size-only --only-show-errors - ./crate-docs/ s3://${BUCKET}/${PREFIX}/ + script: + - rm -rf /tmp/* + # Set git config + - rm -rf .git/config + - git config user.email "devops-team@parity.io" + - git config user.name "${GITHUB_USER}" + - git config remote.origin.url "https://${GITHUB_TOKEN}@github.com/paritytech/substrate.git" + - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" + - git fetch origin gh-pages + # Save README and docs + - cp -r ./crate-docs/ /tmp/doc/ + - cp README.md /tmp/doc/ + - git checkout gh-pages + # Remove everything and restore generated docs and README + - rm -rf ./* + - mv /tmp/doc/* . + # Upload files + - git add --all --force + # `git commit` has an exit code of > 0 if there is nothing to commit. + # This causes GitLab to exit immediately and marks this job failed. + # We don't want to mark the entire job failed if there's nothing to + # publish though, hence the `|| true`. + - git commit -m "Updated docs for ${CI_COMMIT_REF_NAME}" || + echo "___Nothing to commit___" + - git push origin gh-pages --force after_script: - - aws s3 ls s3://${BUCKET}/${PREFIX}/ - --human-readable --summarize + - rm -rf .git/ ./* publish-draft-release: stage: publish + <<: *vault-secrets image: paritytech/tools:latest rules: - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: - ./.maintain/gitlab/publish_draft_release.sh allow_failure: true -publish-to-crates-io: +unleash-to-crates-io: stage: publish <<: *docker-env + <<: *vault-secrets rules: - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + # FIXME: wait until https://github.com/paritytech/cargo-unleash/issues/50 is fixed, also + # remove allow_failure: true on the check job + # - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} - cargo unleash em-dragons --no-check --owner github:paritytech:core-devs ${CARGO_UNLEASH_PKG_DEF} allow_failure: true -deploy-kubernetes-alerting-rules: +#### stage: deploy + +deploy-prometheus-alerting-rules: stage: deploy - interruptible: true - retry: 1 - tags: - - kubernetes-parity-build - image: paritytech/kubetools:latest - environment: - name: parity-mgmt-polkadot-alerting + needs: + - job: test-prometheus-alerting-rules + artifacts: false + allow_failure: true + trigger: + project: parity/infrastructure/cloud-infra variables: - NAMESPACE: monitoring - PROMETHEUSRULE: prometheus-k8s-rules-polkadot-alerting - RULES: .maintain/monitoring/alerting-rules/alerting-rules.yaml - script: - - echo "deploying prometheus alerting rules" - - kubectl -n ${NAMESPACE} patch prometheusrule ${PROMETHEUSRULE} - --type=merge --patch "$(sed 's/^/ /;1s/^/spec:\n/' ${RULES})" + SUBSTRATE_CI_COMMIT_NAME: "${CI_COMMIT_REF_NAME}" + SUBSTRATE_CI_COMMIT_REF: "${CI_COMMIT_SHORT_SHA}" + UPSTREAM_TRIGGER_PROJECT: "${CI_PROJECT_PATH}" rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never - if: $CI_COMMIT_REF_NAME == "master" changes: - .gitlab-ci.yml - .maintain/monitoring/**/* -.validator-deploy: &validator-deploy - stage: flaming-fir +# Runs "quick" and "long" tests on nightly schedule and on commit / merge to master +# A "quick" test is a smoke test where basic check-expect tests run by +# checking values from metrics exposed by the app. +# A "long" test is the load testing where we send 50K transactions into the +# network and check if all completed successfully +simnet-tests: + stage: deploy + image: docker.io/paritytech/simnet:${SIMNET_REF} + <<: *kubernetes-env + <<: *vault-secrets rules: - # .build-refs, but manual + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME == "master" - when: manual - - if: $CI_PIPELINE_SOURCE == "web" - when: manual - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - when: manual needs: - # script will fail if there is no artifacts/substrate/VERSION - job: publish-docker-substrate - artifacts: true - image: parity/azure-ansible:v1 - allow_failure: true - interruptible: true + # variables: + # `build.env` brings here `${SUBSTRATE_IMAGE_NAME}` and `${SUBSTRATE_IMAGE_TAG}` + # (`$VERSION` here, # i.e. `2643-0.8.29-5f689e0a-6b24dc54`). + # ${SIMNET_REF} is a gitlab variable + before_script: + - echo "Simnet Tests Config + docker.io/paritytech/simnet:${SIMNET_REF} + ${SUBSTRATE_IMAGE_NAME} ${SUBSTRATE_IAMGE_TAG}" + script: + - /home/nonroot/simnet/gurke/scripts/run-test-environment-manager.sh + --github-remote-dir="https://github.com/paritytech/substrate/tree/master/simnet_tests" + --config="simnet_tests/configs/default_local_testnet.toml" + --image="${SUBSTRATE_IMAGE_NAME}:${SUBSTRATE_IMAGE_TAG}" + retry: 2 tags: - - linux-docker - -validator 1 4: - <<: *validator-deploy - script: - - ./.maintain/flamingfir-deploy.sh flamingfir-validator1 - -validator 2 4: - <<: *validator-deploy - script: - - ./.maintain/flamingfir-deploy.sh flamingfir-validator2 - -validator 3 4: - <<: *validator-deploy - script: - - ./.maintain/flamingfir-deploy.sh flamingfir-validator3 - -validator 4 4: - <<: *validator-deploy - script: - - ./.maintain/flamingfir-deploy.sh flamingfir-validator4 - -#### stage: .post - -check-labels: - stage: .post - image: paritytech/tools:latest - <<: *kubernetes-build - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - script: - - ./.maintain/gitlab/check_labels.sh + - parity-simnet diff --git a/.maintain/chaostest/.eslintignore b/.maintain/chaostest/.eslintignore deleted file mode 100644 index 3c3629e647f5d..0000000000000 --- a/.maintain/chaostest/.eslintignore +++ /dev/null @@ -1 +0,0 @@ -node_modules diff --git a/.maintain/chaostest/.eslintrc.json b/.maintain/chaostest/.eslintrc.json deleted file mode 100644 index 43e483a80b2ea..0000000000000 --- a/.maintain/chaostest/.eslintrc.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "env": { - "node": true, - "commonjs": true, - "es6": true - }, - "extends": [ - "standard" - ], - "globals": { - "Atomics": "readonly", - "SharedArrayBuffer": "readonly" - }, - "parserOptions": { - "ecmaVersion": 2018 - }, - "rules": { - } -} diff --git a/.maintain/chaostest/.gitignore b/.maintain/chaostest/.gitignore deleted file mode 100644 index ef9e9d1e696e4..0000000000000 --- a/.maintain/chaostest/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -*-debug.log -*-error.log -/.nyc_output -/dist -/tmp -/log -.DS_Store -.editorconfig -yarn.lock -node_modules -/src/config/config.json diff --git a/.maintain/chaostest/README.md b/.maintain/chaostest/README.md deleted file mode 100644 index dc3d07b57905e..0000000000000 --- a/.maintain/chaostest/README.md +++ /dev/null @@ -1,89 +0,0 @@ -chaostest -========= - -A cli for chaos testing on substrate - -[![oclif](https://img.shields.io/badge/cli-oclif-brightgreen.svg)](https://oclif.io) -[![Version](https://img.shields.io/npm/v/chaostest.svg)](https://npmjs.org/package/chaostest) -[![Downloads/week](https://img.shields.io/npm/dw/chaostest.svg)](https://npmjs.org/package/chaostest) - - -* [Usage](#usage) -* [Commands](#commands) - -# Usage - -```sh-session -$ npm install -g chaostest // yarn add global chaostest -$ chaostest COMMAND -running command... -$ chaostest (-v|--version|version) -chaostest/0.0.0 darwin-x64 node-v8.16.0 -$ chaostest --help [COMMAND] -USAGE - $ chaostest COMMAND -... -``` - -# Commands - -* [`chaostest spawn`](#chaostest-spawn) -* [`chaostest singlenodeheight`](#chaostest-singlenodeheight) -* [`chaostest clean`](#chaostest-clean) - -## `chaostest spawn` - -Spawn a testnet based on your local k8s configuration. Could be either a dev node, a two node alicebob chain or a customized chain with various validators/fullnodes. - -``` -USAGE - $ chaostest spawn [ARGUMENTS] [FLAGS] - -Arguments - dev, a single fullnode in --dev mode - alicebob, a two nodes private chain with Alice as bootnode and Bob as validator - [chainName], a customized chain deployed with -v numbers of validators and -n numbers of fullnodes - -Flags - --image, -i, the image tag of the certain substrate version you want to deploy - --port, -p, the port to expose when image is deployed in a pod - --namespace, the desired namespace to deploy on - --validator, -v, the number of substrate validators to deploy - --node, -n, the number of full nodes, if not set but exists, default to 1 - -DESCRIPTION - ... - Extra documentation goes here -``` - -_See code: [src/commands/spawn/index.js](https://github.com/paritytech/substrate/blob/harry/chaostest-init/.maintain/chaostest/src/commands/spawn/index.js)_ - -## `chaostest singlenodeheight` - -Test against a fullnode on --dev mode to check if it can successfully produce blocks to a certain height. - -``` -USAGE - $ chaostest singlenodeheight [FLAGS] - -FLAGS - -h , the desired height of blocks to check if reachable, this only works with integers smaller than 2^6 - -t, the wait time out before it halts the polling -``` - -_See code: [src/commands/singlenodeheight/index.js](https://github.com/paritytech/substrate/blob/harry/chaostest-init/.maintain/chaostest/src/commands/singlenodeheight/index.js)_ - -## `chaostest clean` - -Clean up the k8s deployment by namespace. - -``` -USAGE - $ chaostest clean [FLAGS] - -FLAGS - -n , the desired namespace to delete on your k8s cluster -``` - -_See code: [src/commands/clean/index.js](https://github.com/paritytech/substrate/blob/harry/chaostest-init/.maintain/chaostest/src/commands/clean/index.js)_ - diff --git a/.maintain/chaostest/bin/run b/.maintain/chaostest/bin/run deleted file mode 100755 index 30b14e177331d..0000000000000 --- a/.maintain/chaostest/bin/run +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env node - -require('@oclif/command').run() -.then(require('@oclif/command/flush')) -.catch(require('@oclif/errors/handle')) diff --git a/.maintain/chaostest/bin/run.cmd b/.maintain/chaostest/bin/run.cmd deleted file mode 100644 index 968fc30758e68..0000000000000 --- a/.maintain/chaostest/bin/run.cmd +++ /dev/null @@ -1,3 +0,0 @@ -@echo off - -node "%~dp0\run" %* diff --git a/.maintain/chaostest/package-lock.json b/.maintain/chaostest/package-lock.json deleted file mode 100644 index 09468e12fb4f9..0000000000000 --- a/.maintain/chaostest/package-lock.json +++ /dev/null @@ -1,5950 +0,0 @@ -{ - "name": "chaostest", - "version": "0.0.0", - "lockfileVersion": 1, - "requires": true, - "dependencies": { - "@babel/code-frame": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.8.3.tgz", - "integrity": "sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g==", - "dev": true, - "requires": { - "@babel/highlight": "^7.8.3" - } - }, - "@babel/generator": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.9.6.tgz", - "integrity": "sha512-+htwWKJbH2bL72HRluF8zumBxzuX0ZZUFl3JLNyoUjM/Ho8wnVpPXM6aUz8cfKDqQ/h7zHqKt4xzJteUosckqQ==", - "dev": true, - "requires": { - "@babel/types": "^7.9.6", - "jsesc": "^2.5.1", - "lodash": "^4.17.13", - "source-map": "^0.5.0" - } - }, - "@babel/helper-function-name": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.9.5.tgz", - "integrity": "sha512-JVcQZeXM59Cd1qanDUxv9fgJpt3NeKUaqBqUEvfmQ+BCOKq2xUgaWZW2hr0dkbyJgezYuplEoh5knmrnS68efw==", - "dev": true, - "requires": { - "@babel/helper-get-function-arity": "^7.8.3", - "@babel/template": "^7.8.3", - "@babel/types": "^7.9.5" - } - }, - "@babel/helper-get-function-arity": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.8.3.tgz", - "integrity": "sha512-FVDR+Gd9iLjUMY1fzE2SR0IuaJToR4RkCDARVfsBBPSP53GEqSFjD8gNyxg246VUyc/ALRxFaAK8rVG7UT7xRA==", - "dev": true, - "requires": { - "@babel/types": "^7.8.3" - } - }, - "@babel/helper-split-export-declaration": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.8.3.tgz", - "integrity": "sha512-3x3yOeyBhW851hroze7ElzdkeRXQYQbFIb7gLK1WQYsw2GWDay5gAJNw1sWJ0VFP6z5J1whqeXH/WCdCjZv6dA==", - "dev": true, - "requires": { - "@babel/types": "^7.8.3" - } - }, - "@babel/helper-validator-identifier": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.9.5.tgz", - "integrity": "sha512-/8arLKUFq882w4tWGj9JYzRpAlZgiWUJ+dtteNTDqrRBz9Iguck9Rn3ykuBDoUwh2TO4tSAJlrxDUOXWklJe4g==", - "dev": true - }, - "@babel/highlight": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.9.0.tgz", - "integrity": "sha512-lJZPilxX7Op3Nv/2cvFdnlepPXDxi29wxteT57Q965oc5R9v86ztx0jfxVrTcBk8C2kcPkkDa2Z4T3ZsPPVWsQ==", - "dev": true, - "requires": { - "@babel/helper-validator-identifier": "^7.9.0", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - } - }, - "@babel/parser": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.9.6.tgz", - "integrity": "sha512-AoeIEJn8vt+d/6+PXDRPaksYhnlbMIiejioBZvvMQsOjW/JYK6k/0dKnvvP3EhK5GfMBWDPtrxRtegWdAcdq9Q==", - "dev": true - }, - "@babel/runtime": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.9.6.tgz", - "integrity": "sha512-64AF1xY3OAkFHqOb9s4jpgk1Mm5vDZ4L3acHvAml+53nO1XbXLuDodsVpO4OIUsmemlUHMxNdYMNJmsvOwLrvQ==", - "requires": { - "regenerator-runtime": "^0.13.4" - } - }, - "@babel/template": { - "version": "7.8.6", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.6.tgz", - "integrity": "sha512-zbMsPMy/v0PWFZEhQJ66bqjhH+z0JgMoBWuikXybgG3Gkd/3t5oQ1Rw2WQhnSrsOmsKXnZOx15tkC4qON/+JPg==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.8.3", - "@babel/parser": "^7.8.6", - "@babel/types": "^7.8.6" - } - }, - "@babel/traverse": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.9.6.tgz", - "integrity": "sha512-b3rAHSjbxy6VEAvlxM8OV/0X4XrG72zoxme6q1MOoe2vd0bEc+TwayhuC1+Dfgqh1QEG+pj7atQqvUprHIccsg==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.8.3", - "@babel/generator": "^7.9.6", - "@babel/helper-function-name": "^7.9.5", - "@babel/helper-split-export-declaration": "^7.8.3", - "@babel/parser": "^7.9.6", - "@babel/types": "^7.9.6", - "debug": "^4.1.0", - "globals": "^11.1.0", - "lodash": "^4.17.13" - } - }, - "@babel/types": { - "version": "7.9.6", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.9.6.tgz", - "integrity": "sha512-qxXzvBO//jO9ZnoasKF1uJzHd2+M6Q2ZPIVfnFps8JJvXy0ZBbwbNOmE6SGIY5XOY6d1Bo5lb9d9RJ8nv3WSeA==", - "dev": true, - "requires": { - "@babel/helper-validator-identifier": "^7.9.5", - "lodash": "^4.17.13", - "to-fast-properties": "^2.0.0" - } - }, - "@kubernetes/client-node": { - "version": "0.11.2", - "resolved": "https://registry.npmjs.org/@kubernetes/client-node/-/client-node-0.11.2.tgz", - "integrity": "sha512-Uhwd2y2qCvugICnHRC5h2MT5vw0a1dJPVVltVwmkeMuyGTPBccsTtpTcSfSLitwOrh4yr+9wG5bRcMdgeRjYPw==", - "requires": { - "@types/js-yaml": "^3.12.1", - "@types/node": "^10.12.0", - "@types/request": "^2.47.1", - "@types/underscore": "^1.8.9", - "@types/ws": "^6.0.1", - "byline": "^5.0.0", - "execa": "1.0.0", - "isomorphic-ws": "^4.0.1", - "js-yaml": "^3.13.1", - "jsonpath-plus": "^0.19.0", - "openid-client": "2.5.0", - "request": "^2.88.0", - "rfc4648": "^1.3.0", - "shelljs": "^0.8.2", - "tslib": "^1.9.3", - "underscore": "^1.9.1", - "ws": "^6.1.0" - }, - "dependencies": { - "jsonpath-plus": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-0.19.0.tgz", - "integrity": "sha512-GSVwsrzW9LsA5lzsqe4CkuZ9wp+kxBb2GwNniaWzI2YFn5Ig42rSW8ZxVpWXaAfakXNrx5pgY5AbQq7kzX29kg==" - } - } - }, - "@nodelib/fs.scandir": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.3.tgz", - "integrity": "sha512-eGmwYQn3gxo4r7jdQnkrrN6bY478C3P+a/y72IJukF8LjB6ZHeB3c+Ehacj3sYeSmUXGlnA67/PmbM9CVwL7Dw==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "2.0.3", - "run-parallel": "^1.1.9" - } - }, - "@nodelib/fs.stat": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.3.tgz", - "integrity": "sha512-bQBFruR2TAwoevBEd/NWMoAAtNGzTRgdrqnYCc7dhzfoNvqPzLyqlEQnzZ3kVnNrSp25iyxE00/3h2fqGAGArA==", - "dev": true - }, - "@nodelib/fs.walk": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.4.tgz", - "integrity": "sha512-1V9XOY4rDW0rehzbrcqAmHnz8e7SKvX27gh8Gt2WgB0+pdzdiLV83p72kZPU+jvMbS1qU5mauP2iOvO8rhmurQ==", - "dev": true, - "requires": { - "@nodelib/fs.scandir": "2.1.3", - "fastq": "^1.6.0" - } - }, - "@oclif/command": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/@oclif/command/-/command-1.6.1.tgz", - "integrity": "sha512-pvmMmfGn+zm4e4RwVw63mg9sIaqKqmVsFbImQoUrCO/43UmWzoSHWNXKdgEGigOezWrkZfFucaeZcSbp149OWg==", - "requires": { - "@oclif/config": "^1.15.1", - "@oclif/errors": "^1.2.2", - "@oclif/parser": "^3.8.3", - "@oclif/plugin-help": "^3", - "debug": "^4.1.1", - "semver": "^5.6.0" - }, - "dependencies": { - "@oclif/plugin-help": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@oclif/plugin-help/-/plugin-help-3.0.1.tgz", - "integrity": "sha512-Q1OITeUBkkydPf6r5qX75KgE9capr1mNrfHtfD7gkVXmqoTndrbc++z4KfAYNf5nhTCY7N9l52sjbF6BrSGu9w==", - "requires": { - "@oclif/command": "^1.5.20", - "@oclif/config": "^1.15.1", - "chalk": "^2.4.1", - "indent-string": "^4.0.0", - "lodash.template": "^4.4.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0", - "widest-line": "^2.0.1", - "wrap-ansi": "^4.0.0" - } - }, - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "@oclif/config": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/@oclif/config/-/config-1.15.1.tgz", - "integrity": "sha512-GdyHpEZuWlfU8GSaZoiywtfVBsPcfYn1KuSLT1JTfvZGpPG6vShcGr24YZ3HG2jXUFlIuAqDcYlTzOrqOdTPNQ==", - "requires": { - "@oclif/errors": "^1.0.0", - "@oclif/parser": "^3.8.0", - "debug": "^4.1.1", - "tslib": "^1.9.3" - } - }, - "@oclif/dev-cli": { - "version": "1.22.2", - "resolved": "https://registry.npmjs.org/@oclif/dev-cli/-/dev-cli-1.22.2.tgz", - "integrity": "sha512-c7633R37RxrQIpwqPKxjNRm6/jb1yuG8fd16hmNz9Nw+/MUhEtQtKHSCe9ScH8n5M06l6LEo4ldk9LEGtpaWwA==", - "dev": true, - "requires": { - "@oclif/command": "^1.5.13", - "@oclif/config": "^1.12.12", - "@oclif/errors": "^1.2.2", - "@oclif/plugin-help": "^2.1.6", - "cli-ux": "^5.2.1", - "debug": "^4.1.1", - "fs-extra": "^7.0.1", - "github-slugger": "^1.2.1", - "lodash": "^4.17.11", - "normalize-package-data": "^2.5.0", - "qqjs": "^0.3.10", - "tslib": "^1.9.3" - } - }, - "@oclif/errors": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@oclif/errors/-/errors-1.2.2.tgz", - "integrity": "sha512-Eq8BFuJUQcbAPVofDxwdE0bL14inIiwt5EaKRVY9ZDIG11jwdXZqiQEECJx0VfnLyUZdYfRd/znDI/MytdJoKg==", - "requires": { - "clean-stack": "^1.3.0", - "fs-extra": "^7.0.0", - "indent-string": "^3.2.0", - "strip-ansi": "^5.0.0", - "wrap-ansi": "^4.0.0" - } - }, - "@oclif/linewrap": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@oclif/linewrap/-/linewrap-1.0.0.tgz", - "integrity": "sha512-Ups2dShK52xXa8w6iBWLgcjPJWjais6KPJQq3gQ/88AY6BXoTX+MIGFPrWQO1KLMiQfoTpcLnUwloN4brrVUHw==" - }, - "@oclif/parser": { - "version": "3.8.5", - "resolved": "https://registry.npmjs.org/@oclif/parser/-/parser-3.8.5.tgz", - "integrity": "sha512-yojzeEfmSxjjkAvMRj0KzspXlMjCfBzNRPkWw8ZwOSoNWoJn+OCS/m/S+yfV6BvAM4u2lTzX9Y5rCbrFIgkJLg==", - "requires": { - "@oclif/errors": "^1.2.2", - "@oclif/linewrap": "^1.0.0", - "chalk": "^2.4.2", - "tslib": "^1.9.3" - } - }, - "@oclif/plugin-help": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/@oclif/plugin-help/-/plugin-help-2.2.3.tgz", - "integrity": "sha512-bGHUdo5e7DjPJ0vTeRBMIrfqTRDBfyR5w0MP41u0n3r7YG5p14lvMmiCXxi6WDaP2Hw5nqx3PnkAIntCKZZN7g==", - "requires": { - "@oclif/command": "^1.5.13", - "chalk": "^2.4.1", - "indent-string": "^4.0.0", - "lodash.template": "^4.4.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0", - "widest-line": "^2.0.1", - "wrap-ansi": "^4.0.0" - }, - "dependencies": { - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "@oclif/screen": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@oclif/screen/-/screen-1.0.4.tgz", - "integrity": "sha512-60CHpq+eqnTxLZQ4PGHYNwUX572hgpMHGPtTWMjdTMsAvlm69lZV/4ly6O3sAYkomo4NggGcomrDpBe34rxUqw==", - "dev": true - }, - "@oclif/test": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@oclif/test/-/test-1.2.6.tgz", - "integrity": "sha512-8BQm0VFwTf/JpDnI3x6Lbp3S4RRUvQcv8WalKm82+7FNEylWMAXFNgBuzG65cNPj11J2jhlVo0gOWGF6hbiaJQ==", - "dev": true, - "requires": { - "fancy-test": "^1.4.3" - } - }, - "@polkadot/api": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/api/-/api-0.95.2.tgz", - "integrity": "sha512-SrYiEE9T+AmCx18NyhEk5l/7yPvVqogiz7rmW8YGlOZ89OEPHe2dOTaD5tZJ5daKXEkXFsqPPtwemCv2OZ2F1g==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/api-derive": "^0.95.2", - "@polkadot/api-metadata": "^0.95.2", - "@polkadot/keyring": "^1.6.1", - "@polkadot/rpc-core": "^0.95.2", - "@polkadot/rpc-provider": "^0.95.2", - "@polkadot/types": "^0.95.2", - "@polkadot/util-crypto": "^1.6.1" - } - }, - "@polkadot/api-derive": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/api-derive/-/api-derive-0.95.2.tgz", - "integrity": "sha512-IScOMoUnrs/TCPk2zZZWUfw1EfV718HuFbIRFVg11PiG/uYQ+knNpr9cG/auRWelDMO0ef7eI+YOpf9+gV3EZw==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/api": "^0.95.2", - "@polkadot/types": "^0.95.2" - } - }, - "@polkadot/api-metadata": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/api-metadata/-/api-metadata-0.95.2.tgz", - "integrity": "sha512-RyHr6o8Qdi0k1cTJj11AqZ3MFoPbqUK37RMpFH8vK6VHlZRlpqaZsCctWMEiOXQC2CtTnE5CIoQH11AKeIK+jw==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/types": "^0.95.2", - "@polkadot/util": "^1.6.1", - "@polkadot/util-crypto": "^1.6.1" - } - }, - "@polkadot/jsonrpc": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/jsonrpc/-/jsonrpc-0.95.2.tgz", - "integrity": "sha512-U8cx5MuhWPRcuosSHv/Qw4OmlgSk410oTQtYvHAFDoHuPDcYXTBcCJ0e31cCZFBkaed+GTelkex9EPnHFi0x1g==", - "requires": { - "@babel/runtime": "^7.6.3" - } - }, - "@polkadot/keyring": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/keyring/-/keyring-1.8.1.tgz", - "integrity": "sha512-KeDbfP8biY3bXEhMv1ANp9d3kCuXj2oxseuDK0jvxRo7CehVME9UwAMGQK3Y9NCUuYWd+xTO2To0ZOqR7hdmuQ==", - "requires": { - "@babel/runtime": "^7.7.7", - "@polkadot/util": "^1.8.1", - "@polkadot/util-crypto": "^1.8.1" - } - }, - "@polkadot/rpc-core": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-core/-/rpc-core-0.95.2.tgz", - "integrity": "sha512-IjuzYfNSBWalzingkvpGdO9lZH6s5wFc5lWCINFDP/MSlnLfKzufzR0JeSiVCluraoohtUB/INVuBujDziZPzg==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/jsonrpc": "^0.95.2", - "@polkadot/rpc-provider": "^0.95.2", - "@polkadot/types": "^0.95.2", - "@polkadot/util": "^1.6.1", - "rxjs": "^6.5.3" - } - }, - "@polkadot/rpc-provider": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-provider/-/rpc-provider-0.95.2.tgz", - "integrity": "sha512-+vSoI9mdHPnjL7jK666+HLJ21Ymxo8GHdO72mI1A3xGO7wBmjKbUMHEYUtRwxg7DGF4mSZ/HJogoSU4i9smzpw==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/api-metadata": "^0.95.2", - "@polkadot/util": "^1.6.1", - "@polkadot/util-crypto": "^1.6.1", - "@types/nock": "^11.1.0", - "eventemitter3": "^4.0.0", - "isomorphic-fetch": "^2.2.1", - "websocket": "^1.0.30" - } - }, - "@polkadot/types": { - "version": "0.95.2", - "resolved": "https://registry.npmjs.org/@polkadot/types/-/types-0.95.2.tgz", - "integrity": "sha512-YiZbLgJ82rmgwbsYWEL8vtYqO1n1xEPxD5C8D0dmZQcwn9iSUibIqeij1xfd8y2ZyUmMW3YhdoJR6a8Ah6g3yw==", - "requires": { - "@babel/runtime": "^7.6.3", - "@polkadot/util": "^1.6.1", - "@polkadot/util-crypto": "^1.6.1", - "@types/memoizee": "^0.4.3", - "memoizee": "^0.4.14" - } - }, - "@polkadot/util": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/util/-/util-1.8.1.tgz", - "integrity": "sha512-sFpr+JLCG9d+epjboXsmJ1qcKa96r8ZYzXmVo8+aPzI/9jKKyez6Unox/dnfnpKppZB2nJuLcsxQm6nocp2Caw==", - "requires": { - "@babel/runtime": "^7.7.7", - "@types/bn.js": "^4.11.6", - "bn.js": "^4.11.8", - "camelcase": "^5.3.1", - "chalk": "^3.0.0", - "ip-regex": "^4.1.0", - "moment": "^2.24.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "@polkadot/util-crypto": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/@polkadot/util-crypto/-/util-crypto-1.8.1.tgz", - "integrity": "sha512-ypUs10hV1HPvYc0ZsEu+LTGSEh0rkr0as/FUh7+Z9v3Bxibn3aO+EOxJPQuDbZZ59FSMRmc9SeOSa0wn9ddrnw==", - "requires": { - "@babel/runtime": "^7.7.7", - "@polkadot/util": "^1.8.1", - "@polkadot/wasm-crypto": "^0.14.1", - "@types/bip39": "^2.4.2", - "@types/bs58": "^4.0.0", - "@types/pbkdf2": "^3.0.0", - "@types/secp256k1": "^3.5.0", - "@types/xxhashjs": "^0.2.1", - "base-x": "3.0.5", - "bip39": "^2.5.0", - "blakejs": "^1.1.0", - "bs58": "^4.0.1", - "js-sha3": "^0.8.0", - "secp256k1": "^3.8.0", - "tweetnacl": "^1.0.1", - "xxhashjs": "^0.2.2" - }, - "dependencies": { - "secp256k1": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/secp256k1/-/secp256k1-3.8.0.tgz", - "integrity": "sha512-k5ke5avRZbtl9Tqx/SA7CbY3NF6Ro+Sj9cZxezFzuBlLDmyqPiL8hJJ+EmzD8Ig4LUDByHJ3/iPOVoRixs/hmw==", - "requires": { - "bindings": "^1.5.0", - "bip66": "^1.1.5", - "bn.js": "^4.11.8", - "create-hash": "^1.2.0", - "drbg.js": "^1.0.1", - "elliptic": "^6.5.2", - "nan": "^2.14.0", - "safe-buffer": "^5.1.2" - } - }, - "tweetnacl": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-1.0.3.tgz", - "integrity": "sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==" - } - } - }, - "@polkadot/wasm-crypto": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto/-/wasm-crypto-0.14.1.tgz", - "integrity": "sha512-Xng7L2Z8TNZa/5g6pot4O06Jf0ohQRZdvfl8eQL+E/L2mcqJYC1IjkMxJBSBuQEV7hisWzh9mHOy5WCcgPk29Q==" - }, - "@sindresorhus/is": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.7.0.tgz", - "integrity": "sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow==" - }, - "@types/bip39": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/@types/bip39/-/bip39-2.4.2.tgz", - "integrity": "sha512-Vo9lqOIRq8uoIzEVrV87ZvcIM0PN9t0K3oYZ/CS61fIYKCBdOIM7mlWzXuRvSXrDtVa1uUO2w1cdfufxTC0bzg==", - "requires": { - "@types/node": "*" - } - }, - "@types/bn.js": { - "version": "4.11.6", - "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-4.11.6.tgz", - "integrity": "sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg==", - "requires": { - "@types/node": "*" - } - }, - "@types/bs58": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@types/bs58/-/bs58-4.0.1.tgz", - "integrity": "sha512-yfAgiWgVLjFCmRv8zAcOIHywYATEwiTVccTLnRp6UxTNavT55M9d/uhK3T03St/+8/z/wW+CRjGKUNmEqoHHCA==", - "requires": { - "base-x": "^3.0.6" - }, - "dependencies": { - "base-x": { - "version": "3.0.8", - "resolved": "https://registry.npmjs.org/base-x/-/base-x-3.0.8.tgz", - "integrity": "sha512-Rl/1AWP4J/zRrk54hhlxH4drNxPJXYUaKffODVI53/dAsV4t9fBxyxYKAVPU1XBHxYwOWP9h9H0hM2MVw4YfJA==", - "requires": { - "safe-buffer": "^5.0.1" - } - } - } - }, - "@types/caseless": { - "version": "0.12.2", - "resolved": "https://registry.npmjs.org/@types/caseless/-/caseless-0.12.2.tgz", - "integrity": "sha512-6ckxMjBBD8URvjB6J3NcnuAn5Pkl7t3TizAg+xdlzzQGSPSmBcXf8KoIH0ua/i+tio+ZRUHEXp0HEmvaR4kt0w==" - }, - "@types/chai": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.2.11.tgz", - "integrity": "sha512-t7uW6eFafjO+qJ3BIV2gGUyZs27egcNRkUdalkud+Qa3+kg//f129iuOFivHDXQ+vnU3fDXuwgv0cqMCbcE8sw==", - "dev": true - }, - "@types/color-name": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", - "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" - }, - "@types/events": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/events/-/events-3.0.0.tgz", - "integrity": "sha512-EaObqwIvayI5a8dCzhFrjKzVwKLxjoG9T6Ppd5CEo07LRKfQ8Yokw54r5+Wq7FaBQ+yXRvQAYPrHwya1/UFt9g==", - "dev": true - }, - "@types/glob": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.1.tgz", - "integrity": "sha512-1Bh06cbWJUHMC97acuD6UMG29nMt0Aqz1vF3guLfG+kHHJhy3AyohZFFxYk2f7Q1SQIrNwvncxAE0N/9s70F2w==", - "dev": true, - "requires": { - "@types/events": "*", - "@types/minimatch": "*", - "@types/node": "*" - } - }, - "@types/js-yaml": { - "version": "3.12.4", - "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.4.tgz", - "integrity": "sha512-fYMgzN+9e28R81weVN49inn/u798ruU91En1ZnGvSZzCRc5jXx9B2EDhlRaWmcO1RIxFHL8AajRXzxDuJu93+A==" - }, - "@types/lodash": { - "version": "4.14.152", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.152.tgz", - "integrity": "sha512-Vwf9YF2x1GE3WNeUMjT5bTHa2DqgUo87ocdgTScupY2JclZ5Nn7W2RLM/N0+oreexUk8uaVugR81NnTY/jNNXg==", - "dev": true - }, - "@types/memoizee": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/@types/memoizee/-/memoizee-0.4.4.tgz", - "integrity": "sha512-c9+1g6+6vEqcw5UuM0RbfQV0mssmZcoG9+hNC5ptDCsv4G+XJW1Z4pE13wV5zbc9e0+YrDydALBTiD3nWG1a3g==" - }, - "@types/minimatch": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.3.tgz", - "integrity": "sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==", - "dev": true - }, - "@types/mocha": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-7.0.2.tgz", - "integrity": "sha512-ZvO2tAcjmMi8V/5Z3JsyofMe3hasRcaw88cto5etSVMwVQfeivGAlEYmaQgceUSVYFofVjT+ioHsATjdWcFt1w==", - "dev": true - }, - "@types/nock": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/@types/nock/-/nock-11.1.0.tgz", - "integrity": "sha512-jI/ewavBQ7X5178262JQR0ewicPAcJhXS/iFaNJl0VHLfyosZ/kwSrsa6VNQNSO8i9d8SqdRgOtZSOKJ/+iNMw==", - "requires": { - "nock": "*" - } - }, - "@types/node": { - "version": "10.17.24", - "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.24.tgz", - "integrity": "sha512-5SCfvCxV74kzR3uWgTYiGxrd69TbT1I6+cMx1A5kEly/IVveJBimtAMlXiEyVFn5DvUFewQWxOOiJhlxeQwxgA==" - }, - "@types/pbkdf2": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/pbkdf2/-/pbkdf2-3.0.0.tgz", - "integrity": "sha512-6J6MHaAlBJC/eVMy9jOwj9oHaprfutukfW/Dyt0NEnpQ/6HN6YQrpvLwzWdWDeWZIdenjGHlbYDzyEODO5Z+2Q==", - "requires": { - "@types/node": "*" - } - }, - "@types/request": { - "version": "2.48.5", - "resolved": "https://registry.npmjs.org/@types/request/-/request-2.48.5.tgz", - "integrity": "sha512-/LO7xRVnL3DxJ1WkPGDQrp4VTV1reX9RkC85mJ+Qzykj2Bdw+mG15aAfDahc76HtknjzE16SX/Yddn6MxVbmGQ==", - "requires": { - "@types/caseless": "*", - "@types/node": "*", - "@types/tough-cookie": "*", - "form-data": "^2.5.0" - } - }, - "@types/secp256k1": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/@types/secp256k1/-/secp256k1-3.5.3.tgz", - "integrity": "sha512-NGcsPDR0P+Q71O63e2ayshmiZGAwCOa/cLJzOIuhOiDvmbvrCIiVtEpqdCJGogG92Bnr6tw/6lqVBsRMEl15OQ==", - "requires": { - "@types/node": "*" - } - }, - "@types/sinon": { - "version": "9.0.4", - "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-9.0.4.tgz", - "integrity": "sha512-sJmb32asJZY6Z2u09bl0G2wglSxDlROlAejCjsnor+LzBMz17gu8IU7vKC/vWDnv9zEq2wqADHVXFjf4eE8Gdw==", - "dev": true, - "requires": { - "@types/sinonjs__fake-timers": "*" - } - }, - "@types/sinonjs__fake-timers": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-6.0.1.tgz", - "integrity": "sha512-yYezQwGWty8ziyYLdZjwxyMb0CZR49h8JALHGrxjQHWlqGgc8kLdHEgWrgL0uZ29DMvEVBDnHU2Wg36zKSIUtA==", - "dev": true - }, - "@types/tough-cookie": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.0.tgz", - "integrity": "sha512-I99sngh224D0M7XgW1s120zxCt3VYQ3IQsuw3P3jbq5GG4yc79+ZjyKznyOGIQrflfylLgcfekeZW/vk0yng6A==" - }, - "@types/underscore": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/@types/underscore/-/underscore-1.10.0.tgz", - "integrity": "sha512-ZAbqul7QAKpM2h1PFGa5ETN27ulmqtj0QviYHasw9LffvXZvVHuraOx/FOsIPPDNGZN0Qo1nASxxSfMYOtSoCw==" - }, - "@types/ws": { - "version": "6.0.4", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-6.0.4.tgz", - "integrity": "sha512-PpPrX7SZW9re6+Ha8ojZG4Se8AZXgf0GK6zmfqEuCsY49LFDNXO3SByp44X3dFEqtB73lkCDAdUazhAjVPiNwg==", - "requires": { - "@types/node": "*" - } - }, - "@types/xxhashjs": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/@types/xxhashjs/-/xxhashjs-0.2.2.tgz", - "integrity": "sha512-+hlk/W1kgnZn0vR22XNhxHk/qIRQYF54i0UTF2MwBAPd0e7xSy+jKOJwSwTdRQrNnOMRVv+vsh8ITV0uyhp2yg==", - "requires": { - "@types/node": "*" - } - }, - "acorn": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.2.0.tgz", - "integrity": "sha512-apwXVmYVpQ34m/i71vrApRrRKCWQnZZF1+npOD0WV5xZFfwWOmKGQ2RWlfdy9vWITsenisM8M0Qeq8agcFHNiQ==", - "dev": true - }, - "acorn-jsx": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.2.0.tgz", - "integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==", - "dev": true - }, - "aggregate-error": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-1.0.0.tgz", - "integrity": "sha1-iINE2tAiCnLjr1CQYRf0h3GSX6w=", - "requires": { - "clean-stack": "^1.0.0", - "indent-string": "^3.0.0" - } - }, - "ajv": { - "version": "6.12.2", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.2.tgz", - "integrity": "sha512-k+V+hzjm5q/Mr8ef/1Y9goCmlsK4I6Sm74teeyGvFk1XrOsbsKLjEdrvny42CZ+a8sXbk8KWpY/bDwS+FLL2UQ==", - "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "ansi-escapes": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.1.tgz", - "integrity": "sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA==", - "dev": true, - "requires": { - "type-fest": "^0.11.0" - } - }, - "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" - }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "ansicolors": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/ansicolors/-/ansicolors-0.3.2.tgz", - "integrity": "sha1-ZlWX3oap/+Oqm/vmyuXG6kJrSXk=", - "dev": true - }, - "append-transform": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/append-transform/-/append-transform-1.0.0.tgz", - "integrity": "sha512-P009oYkeHyU742iSZJzZZywj4QRJdnTWffaKuJQLablCZ1uz6/cW4yaRgcDaoQ+uwOxxnt0gRUcwfsNP2ri0gw==", - "dev": true, - "requires": { - "default-require-extensions": "^2.0.0" - } - }, - "archy": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/archy/-/archy-1.0.0.tgz", - "integrity": "sha1-+cjBN1fMHde8N5rHeyxipcKGjEA=", - "dev": true - }, - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "array-includes": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.1.tgz", - "integrity": "sha512-c2VXaCHl7zPsvpkFsw4nxvFie4fh1ur9bpcgsVkIjqn0H/Xwdg+7fv3n2r/isyS8EBj5b06M9kHyZuIr4El6WQ==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.0", - "is-string": "^1.0.5" - } - }, - "array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true - }, - "array.prototype.flat": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.2.3.tgz", - "integrity": "sha512-gBlRZV0VSmfPIeWfuuy56XZMvbVfbEUnOXUvt3F/eUUUSyzlgLxhEX4YAEpxNAogRGehPSnfXyPtYyKAhkzQhQ==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.0-next.1" - } - }, - "asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "requires": { - "safer-buffer": "~2.1.0" - } - }, - "assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" - }, - "assertion-error": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", - "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", - "dev": true - }, - "astral-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", - "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", - "dev": true - }, - "async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", - "requires": { - "lodash": "^4.17.14" - } - }, - "async-limiter": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", - "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" - }, - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" - }, - "aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" - }, - "aws4": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.9.1.tgz", - "integrity": "sha512-wMHVg2EOHaMRxbzgFJ9gtjOOCrI80OHLG14rxi28XwOW8ux6IiEbRCGGGqCtdAIg4FQCbW20k9RsT4y3gJlFug==" - }, - "balanced-match": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", - "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" - }, - "base-x": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/base-x/-/base-x-3.0.5.tgz", - "integrity": "sha512-C3picSgzPSLE+jW3tcBzJoGwitOtazb5B+5YmAxZm2ybmTi9LNgAtDO/jjVEBZwHoXmDBZ9m/IELj3elJVRBcA==", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "base64-js": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz", - "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==" - }, - "base64url": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/base64url/-/base64url-3.0.1.tgz", - "integrity": "sha512-ir1UPr3dkwexU7FdV8qBBbNDRUhMmIekYMFZfi+C/sLNnRESKPl23nB9b2pltqfOQNnGzsDdId90AEtG5tCx4A==" - }, - "bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "requires": { - "tweetnacl": "^0.14.3" - } - }, - "bindings": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", - "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", - "requires": { - "file-uri-to-path": "1.0.0" - } - }, - "bip39": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/bip39/-/bip39-2.6.0.tgz", - "integrity": "sha512-RrnQRG2EgEoqO24ea+Q/fftuPUZLmrEM3qNhhGsA3PbaXaCW791LTzPuVyx/VprXQcTbPJ3K3UeTna8ZnVl2sg==", - "requires": { - "create-hash": "^1.1.0", - "pbkdf2": "^3.0.9", - "randombytes": "^2.0.1", - "safe-buffer": "^5.0.1", - "unorm": "^1.3.3" - } - }, - "bip66": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/bip66/-/bip66-1.1.5.tgz", - "integrity": "sha1-AfqHSHhcpwlV1QESF9GzE5lpyiI=", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "bl": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.0.3.tgz", - "integrity": "sha512-fs4G6/Hu4/EE+F75J8DuN/0IpQqNjAdC7aEQv7Qt8MHGUH7Ckv2MwTEEeN9QehD0pfIDkMI1bkHYkKy7xHyKIg==", - "dev": true, - "requires": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - }, - "dependencies": { - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dev": true, - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - } - } - }, - "blakejs": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/blakejs/-/blakejs-1.1.0.tgz", - "integrity": "sha1-ad+S75U6qIylGjLfarHFShVfx6U=" - }, - "bn.js": { - "version": "4.11.9", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, - "requires": { - "fill-range": "^7.0.1" - } - }, - "brorand": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz", - "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8=" - }, - "browserify-aes": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz", - "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==", - "requires": { - "buffer-xor": "^1.0.3", - "cipher-base": "^1.0.0", - "create-hash": "^1.1.0", - "evp_bytestokey": "^1.0.3", - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "browserify-zlib": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz", - "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", - "requires": { - "pako": "~1.0.5" - } - }, - "bs58": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/bs58/-/bs58-4.0.1.tgz", - "integrity": "sha1-vhYedsNU9veIrkBx9j806MTwpCo=", - "requires": { - "base-x": "^3.0.2" - } - }, - "buffer": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.6.0.tgz", - "integrity": "sha512-/gDYp/UtU0eA1ys8bOs9J6a+E/KWIY+DZ+Q2WESNUA0jFRsJOc0SNUO6xJ5SGA1xueg3NL65W6s+NY5l9cunuw==", - "requires": { - "base64-js": "^1.0.2", - "ieee754": "^1.1.4" - } - }, - "buffer-xor": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz", - "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk=" - }, - "byline": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz", - "integrity": "sha1-dBxSFkaOrcRXsDQQEYrXfejB3bE=" - }, - "cacheable-request": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-2.1.4.tgz", - "integrity": "sha1-DYCIAbY0KtM8kd+dC0TcCbkeXD0=", - "requires": { - "clone-response": "1.0.2", - "get-stream": "3.0.0", - "http-cache-semantics": "3.8.1", - "keyv": "3.0.0", - "lowercase-keys": "1.0.0", - "normalize-url": "2.0.1", - "responselike": "1.0.2" - }, - "dependencies": { - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=" - }, - "lowercase-keys": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz", - "integrity": "sha1-TjNms55/VFfjXxMkvfb4jQv8cwY=" - } - } - }, - "caching-transform": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/caching-transform/-/caching-transform-3.0.2.tgz", - "integrity": "sha512-Mtgcv3lh3U0zRii/6qVgQODdPA4G3zhG+jtbCWj39RXuUFTMzH0vcdMtaJS1jPowd+It2Pqr6y3NJMQqOqCE2w==", - "dev": true, - "requires": { - "hasha": "^3.0.0", - "make-dir": "^2.0.0", - "package-hash": "^3.0.0", - "write-file-atomic": "^2.4.2" - }, - "dependencies": { - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "write-file-atomic": { - "version": "2.4.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-2.4.3.tgz", - "integrity": "sha512-GaETH5wwsX+GcnzhPgKcKjJ6M2Cq3/iZp1WyY/X1CSqrW+jVNM9Y7D8EC2sM4ZG/V8wZlSniJnCKWPmBYAucRQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.11", - "imurmurhash": "^0.1.4", - "signal-exit": "^3.0.2" - } - } - } - }, - "callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true - }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==" - }, - "cardinal": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/cardinal/-/cardinal-2.1.1.tgz", - "integrity": "sha1-fMEFXYItISlU0HsIXeolHMe8VQU=", - "dev": true, - "requires": { - "ansicolors": "~0.3.2", - "redeyed": "~2.1.0" - } - }, - "caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" - }, - "chai": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.2.0.tgz", - "integrity": "sha512-XQU3bhBukrOsQCuwZndwGcCVQHyZi53fQ6Ys1Fym7E4olpIqqZZhhoFJoaKVvV17lWQoXYwgWN2nF5crA8J2jw==", - "dev": true, - "requires": { - "assertion-error": "^1.1.0", - "check-error": "^1.0.2", - "deep-eql": "^3.0.1", - "get-func-name": "^2.0.0", - "pathval": "^1.1.0", - "type-detect": "^4.0.5" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "chardet": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", - "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", - "dev": true - }, - "check-error": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", - "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", - "dev": true - }, - "chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", - "dev": true - }, - "cipher-base": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz", - "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==", - "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "clean-regexp": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/clean-regexp/-/clean-regexp-1.0.0.tgz", - "integrity": "sha1-jffHquUf02h06PjQW5GAvBGj/tc=", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5" - } - }, - "clean-stack": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-1.3.0.tgz", - "integrity": "sha1-noIVAa6XmYbEax1m0tQy2y/UrjE=" - }, - "cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "dev": true, - "requires": { - "restore-cursor": "^3.1.0" - } - }, - "cli-progress": { - "version": "3.8.2", - "resolved": "https://registry.npmjs.org/cli-progress/-/cli-progress-3.8.2.tgz", - "integrity": "sha512-qRwBxLldMSfxB+YGFgNRaj5vyyHe1yMpVeDL79c+7puGujdKJHQHydgqXDcrkvQgJ5U/d3lpf6vffSoVVUftVQ==", - "dev": true, - "requires": { - "colors": "^1.1.2", - "string-width": "^4.2.0" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true - }, - "string-width": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", - "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - } - } - }, - "cli-ux": { - "version": "5.4.6", - "resolved": "https://registry.npmjs.org/cli-ux/-/cli-ux-5.4.6.tgz", - "integrity": "sha512-EeiS2TzEndRVknCqE+8Ri8g0bsP617a1nq6n+3Trwft1JCDzyUNlX2J1fl7fwTgRPWtmBmiF6xIyueL5YGs65g==", - "dev": true, - "requires": { - "@oclif/command": "^1.6.0", - "@oclif/errors": "^1.2.1", - "@oclif/linewrap": "^1.0.0", - "@oclif/screen": "^1.0.3", - "ansi-escapes": "^4.3.0", - "ansi-styles": "^4.2.0", - "cardinal": "^2.1.1", - "chalk": "^2.4.1", - "clean-stack": "^2.0.0", - "cli-progress": "^3.4.0", - "extract-stack": "^1.0.0", - "fs-extra": "^7.0.1", - "hyperlinker": "^1.0.0", - "indent-string": "^4.0.0", - "is-wsl": "^1.1.0", - "js-yaml": "^3.13.1", - "lodash": "^4.17.11", - "natural-orderby": "^2.0.1", - "object-treeify": "^1.1.4", - "password-prompt": "^1.1.2", - "semver": "^5.6.0", - "string-width": "^3.1.0", - "strip-ansi": "^5.1.0", - "supports-color": "^5.5.0", - "supports-hyperlinks": "^1.0.1", - "tslib": "^1.9.3" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "dev": true - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "dev": true - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "cli-width": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.1.tgz", - "integrity": "sha512-GRMWDxpOB6Dgk2E5Uo+3eEBvtOOlimMmpbFiKuLFnQzYDavtLFY3K5ona41jgN/WdRZtG7utuVSVTL4HbZHGkw==", - "dev": true - }, - "cliui": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", - "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", - "dev": true, - "requires": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - }, - "dependencies": { - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - } - } - } - }, - "clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "requires": { - "mimic-response": "^1.0.0" - } - }, - "color": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/color/-/color-3.0.0.tgz", - "integrity": "sha512-jCpd5+s0s0t7p3pHQKpnJ0TpQKKdleP71LWcA0aqiljpiuAkOSUFN/dyH8ZwF0hRmFlrIuRhufds1QyEP9EB+w==", - "requires": { - "color-convert": "^1.9.1", - "color-string": "^1.5.2" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" - }, - "color-string": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", - "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", - "requires": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } - }, - "colornames": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/colornames/-/colornames-1.1.1.tgz", - "integrity": "sha1-+IiQMGhcfE/54qVZ9Qd+t2qBb5Y=" - }, - "colors": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", - "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==" - }, - "colorspace": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/colorspace/-/colorspace-1.1.2.tgz", - "integrity": "sha512-vt+OoIP2d76xLhjwbBaucYlNSpPsrJWPlBTtwCpQKIu6/CSMutyzX93O/Do0qzpH3YoHEes8YEFXyZ797rEhzQ==", - "requires": { - "color": "3.0.x", - "text-hex": "1.0.x" - } - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "commondir": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=", - "dev": true - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" - }, - "contains-path": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/contains-path/-/contains-path-0.1.0.tgz", - "integrity": "sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=", - "dev": true - }, - "content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", - "dev": true - }, - "convert-source-map": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", - "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", - "dev": true, - "requires": { - "safe-buffer": "~5.1.1" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - } - } - }, - "core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" - }, - "cp-file": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/cp-file/-/cp-file-6.2.0.tgz", - "integrity": "sha512-fmvV4caBnofhPe8kOcitBwSn2f39QLjnAnGq3gO9dfd75mUytzKNZB1hde6QHunW2Rt+OwuBOMc3i1tNElbszA==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "make-dir": "^2.0.0", - "nested-error-stacks": "^2.0.0", - "pify": "^4.0.1", - "safe-buffer": "^5.0.1" - }, - "dependencies": { - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - } - } - }, - "create-hash": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz", - "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==", - "requires": { - "cipher-base": "^1.0.1", - "inherits": "^2.0.1", - "md5.js": "^1.3.4", - "ripemd160": "^2.0.1", - "sha.js": "^2.4.0" - } - }, - "create-hmac": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz", - "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==", - "requires": { - "cipher-base": "^1.0.3", - "create-hash": "^1.1.0", - "inherits": "^2.0.1", - "ripemd160": "^2.0.0", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - } - }, - "cross-spawn": { - "version": "6.0.5", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", - "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", - "requires": { - "nice-try": "^1.0.4", - "path-key": "^2.0.1", - "semver": "^5.5.0", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - } - }, - "cuint": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/cuint/-/cuint-0.2.2.tgz", - "integrity": "sha1-QICG1AlVDCYxFVYZ6fp7ytw7mRs=" - }, - "d": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz", - "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==", - "requires": { - "es5-ext": "^0.10.50", - "type": "^1.0.1" - } - }, - "dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "requires": { - "assert-plus": "^1.0.0" - } - }, - "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", - "requires": { - "ms": "^2.1.1" - } - }, - "decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "dev": true - }, - "decode-uri-component": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", - "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=" - }, - "decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", - "requires": { - "mimic-response": "^1.0.0" - } - }, - "deep-eql": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", - "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", - "dev": true, - "requires": { - "type-detect": "^4.0.0" - } - }, - "deep-is": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", - "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", - "dev": true - }, - "default-require-extensions": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/default-require-extensions/-/default-require-extensions-2.0.0.tgz", - "integrity": "sha1-9fj7sYp9bVCyH2QfZJ67Uiz+JPc=", - "dev": true, - "requires": { - "strip-bom": "^3.0.0" - }, - "dependencies": { - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - } - } - }, - "define-properties": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", - "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", - "dev": true, - "requires": { - "object-keys": "^1.0.12" - } - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" - }, - "detect-indent": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-6.0.0.tgz", - "integrity": "sha512-oSyFlqaTHCItVRGK5RmrmjB+CmaMOW7IaNA/kdxqhoa6d17j/5ce9O9eWXmV/KEdRwqpQA+Vqe8a8Bsybu4YnA==", - "dev": true - }, - "diagnostics": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/diagnostics/-/diagnostics-1.1.1.tgz", - "integrity": "sha512-8wn1PmdunLJ9Tqbx+Fx/ZEuHfJf4NKSN2ZBj7SJC/OWRWha843+WsTjqMe1B5E3p28jqBlp+mJ2fPVxPyNgYKQ==", - "requires": { - "colorspace": "1.1.x", - "enabled": "1.0.x", - "kuler": "1.0.x" - } - }, - "dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "requires": { - "path-type": "^4.0.0" - } - }, - "doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "requires": { - "esutils": "^2.0.2" - } - }, - "drbg.js": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/drbg.js/-/drbg.js-1.0.1.tgz", - "integrity": "sha1-Pja2xCs3BDgjzbwzLVjzHiRFSAs=", - "requires": { - "browserify-aes": "^1.0.6", - "create-hash": "^1.1.2", - "create-hmac": "^1.1.4" - } - }, - "duplexer3": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" - }, - "ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "requires": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "elliptic": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz", - "integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==", - "requires": { - "bn.js": "^4.4.0", - "brorand": "^1.0.1", - "hash.js": "^1.0.0", - "hmac-drbg": "^1.0.0", - "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0", - "minimalistic-crypto-utils": "^1.0.0" - } - }, - "emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" - }, - "enabled": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/enabled/-/enabled-1.0.2.tgz", - "integrity": "sha1-ll9lE9LC0cX0ZStkouM5ZGf8L5M=", - "requires": { - "env-variable": "0.0.x" - } - }, - "encoding": { - "version": "0.1.12", - "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.12.tgz", - "integrity": "sha1-U4tm8+5izRq1HsMjgp0flIDHS+s=", - "requires": { - "iconv-lite": "~0.4.13" - } - }, - "end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "requires": { - "once": "^1.4.0" - } - }, - "env-variable": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/env-variable/-/env-variable-0.0.6.tgz", - "integrity": "sha512-bHz59NlBbtS0NhftmR8+ExBEekE7br0e01jw+kk0NDro7TtZzBYZ5ScGPs3OmwnpyfHTHOtr1Y6uedCdrIldtg==" - }, - "error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dev": true, - "requires": { - "is-arrayish": "^0.2.1" - } - }, - "es-abstract": { - "version": "1.17.5", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.5.tgz", - "integrity": "sha512-BR9auzDbySxOcfog0tLECW8l28eRGpDpU3Dm3Hp4q/N+VtLTmyj4EUN088XZWQDW/hzj6sYRDXeOFsaAODKvpg==", - "dev": true, - "requires": { - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1", - "is-callable": "^1.1.5", - "is-regex": "^1.0.5", - "object-inspect": "^1.7.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimleft": "^2.1.1", - "string.prototype.trimright": "^2.1.1" - } - }, - "es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dev": true, - "requires": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - } - }, - "es5-ext": { - "version": "0.10.53", - "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.53.tgz", - "integrity": "sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q==", - "requires": { - "es6-iterator": "~2.0.3", - "es6-symbol": "~3.1.3", - "next-tick": "~1.0.0" - }, - "dependencies": { - "next-tick": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", - "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=" - } - } - }, - "es6-error": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz", - "integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==", - "dev": true - }, - "es6-iterator": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", - "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", - "requires": { - "d": "1", - "es5-ext": "^0.10.35", - "es6-symbol": "^3.1.1" - } - }, - "es6-promise": { - "version": "4.2.8", - "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", - "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" - }, - "es6-symbol": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz", - "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==", - "requires": { - "d": "^1.0.1", - "ext": "^1.1.2" - } - }, - "es6-weak-map": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/es6-weak-map/-/es6-weak-map-2.0.3.tgz", - "integrity": "sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==", - "requires": { - "d": "1", - "es5-ext": "^0.10.46", - "es6-iterator": "^2.0.3", - "es6-symbol": "^3.1.1" - } - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" - }, - "eslint": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.1.0.tgz", - "integrity": "sha512-DfS3b8iHMK5z/YLSme8K5cge168I8j8o1uiVmFCgnnjxZQbCGyraF8bMl7Ju4yfBmCuxD7shOF7eqGkcuIHfsA==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.0.0", - "ajv": "^6.10.0", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.0.1", - "doctrine": "^3.0.0", - "eslint-scope": "^5.0.0", - "eslint-utils": "^2.0.0", - "eslint-visitor-keys": "^1.1.0", - "espree": "^7.0.0", - "esquery": "^1.2.0", - "esutils": "^2.0.2", - "file-entry-cache": "^5.0.1", - "functional-red-black-tree": "^1.0.1", - "glob-parent": "^5.0.0", - "globals": "^12.1.0", - "ignore": "^4.0.6", - "import-fresh": "^3.0.0", - "imurmurhash": "^0.1.4", - "inquirer": "^7.0.0", - "is-glob": "^4.0.0", - "js-yaml": "^3.13.1", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash": "^4.17.14", - "minimatch": "^3.0.4", - "natural-compare": "^1.4.0", - "optionator": "^0.9.1", - "progress": "^2.0.0", - "regexpp": "^3.1.0", - "semver": "^7.2.1", - "strip-ansi": "^6.0.0", - "strip-json-comments": "^3.1.0", - "table": "^5.2.3", - "text-table": "^0.2.0", - "v8-compile-cache": "^2.0.3" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.0.0.tgz", - "integrity": "sha512-N9oWFcegS0sFr9oh1oz2d7Npos6vNoWW9HvtCg5N1KRFpUhaAhvTv5Y58g880fZaEYSNm3qDz8SU1UrGvp+n7A==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "requires": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - } - }, - "globals": { - "version": "12.4.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz", - "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==", - "dev": true, - "requires": { - "type-fest": "^0.8.1" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "ignore": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "dev": true - }, - "path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true - }, - "semver": { - "version": "7.3.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", - "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==", - "dev": true - }, - "shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "requires": { - "shebang-regex": "^3.0.0" - } - }, - "shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - }, - "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true - }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - } - } - }, - "eslint-ast-utils": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eslint-ast-utils/-/eslint-ast-utils-1.1.0.tgz", - "integrity": "sha512-otzzTim2/1+lVrlH19EfQQJEhVJSu0zOb9ygb3iapN6UlyaDtyRq4b5U1FuW0v1lRa9Fp/GJyHkSwm6NqABgCA==", - "dev": true, - "requires": { - "lodash.get": "^4.4.2", - "lodash.zip": "^4.2.0" - } - }, - "eslint-config-oclif": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/eslint-config-oclif/-/eslint-config-oclif-3.1.0.tgz", - "integrity": "sha512-Tqgy43cNXsSdhTLWW4RuDYGFhV240sC4ISSv/ZiUEg/zFxExSEUpRE6J+AGnkKY9dYwIW4C9b2YSUVv8z/miMA==", - "dev": true, - "requires": { - "eslint-config-xo-space": "^0.20.0", - "eslint-plugin-mocha": "^5.2.0", - "eslint-plugin-node": "^7.0.1", - "eslint-plugin-unicorn": "^6.0.1" - }, - "dependencies": { - "eslint-plugin-es": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-1.4.1.tgz", - "integrity": "sha512-5fa/gR2yR3NxQf+UXkeLeP8FBBl6tSgdrAz1+cF84v1FMM4twGwQoqTnn+QxFLcPOrF4pdKEJKDB/q9GoyJrCA==", - "dev": true, - "requires": { - "eslint-utils": "^1.4.2", - "regexpp": "^2.0.1" - } - }, - "eslint-plugin-node": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-7.0.1.tgz", - "integrity": "sha512-lfVw3TEqThwq0j2Ba/Ckn2ABdwmL5dkOgAux1rvOk6CO7A6yGyPI2+zIxN6FyNkp1X1X/BSvKOceD6mBWSj4Yw==", - "dev": true, - "requires": { - "eslint-plugin-es": "^1.3.1", - "eslint-utils": "^1.3.1", - "ignore": "^4.0.2", - "minimatch": "^3.0.4", - "resolve": "^1.8.1", - "semver": "^5.5.0" - } - }, - "eslint-plugin-unicorn": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-unicorn/-/eslint-plugin-unicorn-6.0.1.tgz", - "integrity": "sha512-hjy9LhTdtL7pz8WTrzS0CGXRkWK3VAPLDjihofj8JC+uxQLfXm0WwZPPPB7xKmcjRyoH+jruPHOCrHNEINpG/Q==", - "dev": true, - "requires": { - "clean-regexp": "^1.0.0", - "eslint-ast-utils": "^1.0.0", - "import-modules": "^1.1.0", - "lodash.camelcase": "^4.1.1", - "lodash.kebabcase": "^4.0.1", - "lodash.snakecase": "^4.0.1", - "lodash.upperfirst": "^4.2.0", - "safe-regex": "^1.1.0" - } - }, - "eslint-utils": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", - "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", - "dev": true, - "requires": { - "eslint-visitor-keys": "^1.1.0" - } - }, - "ignore": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", - "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", - "dev": true - }, - "regexpp": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", - "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", - "dev": true - } - } - }, - "eslint-config-standard": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/eslint-config-standard/-/eslint-config-standard-14.1.1.tgz", - "integrity": "sha512-Z9B+VR+JIXRxz21udPTL9HpFMyoMUEeX1G251EQ6e05WD9aPVtVBn09XUmZ259wCMlCDmYDSZG62Hhm+ZTJcUg==", - "dev": true - }, - "eslint-config-xo": { - "version": "0.24.2", - "resolved": "https://registry.npmjs.org/eslint-config-xo/-/eslint-config-xo-0.24.2.tgz", - "integrity": "sha512-ivQ7qISScW6gfBp+p31nQntz1rg34UCybd3uvlngcxt5Utsf4PMMi9QoAluLFcPUM5Tvqk4JGraR9qu3msKPKQ==", - "dev": true - }, - "eslint-config-xo-space": { - "version": "0.20.0", - "resolved": "https://registry.npmjs.org/eslint-config-xo-space/-/eslint-config-xo-space-0.20.0.tgz", - "integrity": "sha512-bOsoZA8M6v1HviDUIGVq1fLVnSu3mMZzn85m2tqKb73tSzu4GKD4Jd2Py4ZKjCgvCbRRByEB5HPC3fTMnnJ1uw==", - "dev": true, - "requires": { - "eslint-config-xo": "^0.24.0" - } - }, - "eslint-import-resolver-node": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.3.tgz", - "integrity": "sha512-b8crLDo0M5RSe5YG8Pu2DYBj71tSB6OvXkfzwbJU2w7y8P4/yo0MyF8jU26IEuEuHF2K5/gcAJE3LhQGqBBbVg==", - "dev": true, - "requires": { - "debug": "^2.6.9", - "resolve": "^1.13.1" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - } - } - }, - "eslint-module-utils": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.6.0.tgz", - "integrity": "sha512-6j9xxegbqe8/kZY8cYpcp0xhbK0EgJlg3g9mib3/miLaExuuwc3n5UEfSnU6hWMbT0FAYVvDbL9RrRgpUeQIvA==", - "dev": true, - "requires": { - "debug": "^2.6.9", - "pkg-dir": "^2.0.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dev": true, - "requires": { - "locate-path": "^2.0.0" - } - }, - "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dev": true, - "requires": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - }, - "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dev": true, - "requires": { - "p-try": "^1.0.0" - } - }, - "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dev": true, - "requires": { - "p-limit": "^1.1.0" - } - }, - "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", - "dev": true - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "pkg-dir": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-2.0.0.tgz", - "integrity": "sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s=", - "dev": true, - "requires": { - "find-up": "^2.1.0" - } - } - } - }, - "eslint-plugin-es": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz", - "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==", - "dev": true, - "requires": { - "eslint-utils": "^2.0.0", - "regexpp": "^3.0.0" - } - }, - "eslint-plugin-import": { - "version": "2.20.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.20.2.tgz", - "integrity": "sha512-FObidqpXrR8OnCh4iNsxy+WACztJLXAHBO5hK79T1Hc77PgQZkyDGA5Ag9xAvRpglvLNxhH/zSmZ70/pZ31dHg==", - "dev": true, - "requires": { - "array-includes": "^3.0.3", - "array.prototype.flat": "^1.2.1", - "contains-path": "^0.1.0", - "debug": "^2.6.9", - "doctrine": "1.5.0", - "eslint-import-resolver-node": "^0.3.2", - "eslint-module-utils": "^2.4.1", - "has": "^1.0.3", - "minimatch": "^3.0.4", - "object.values": "^1.1.0", - "read-pkg-up": "^2.0.0", - "resolve": "^1.12.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "doctrine": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-1.5.0.tgz", - "integrity": "sha1-N53Ocw9hZvds76TmcHoVmwLFpvo=", - "dev": true, - "requires": { - "esutils": "^2.0.2", - "isarray": "^1.0.0" - } - }, - "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", - "dev": true, - "requires": { - "locate-path": "^2.0.0" - } - }, - "load-json-file": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", - "integrity": "sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "parse-json": "^2.2.0", - "pify": "^2.0.0", - "strip-bom": "^3.0.0" - } - }, - "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", - "dev": true, - "requires": { - "p-locate": "^2.0.0", - "path-exists": "^3.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - }, - "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", - "dev": true, - "requires": { - "p-try": "^1.0.0" - } - }, - "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", - "dev": true, - "requires": { - "p-limit": "^1.1.0" - } - }, - "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=", - "dev": true - }, - "parse-json": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", - "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", - "dev": true, - "requires": { - "error-ex": "^1.2.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "path-type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", - "integrity": "sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM=", - "dev": true, - "requires": { - "pify": "^2.0.0" - } - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true - }, - "read-pkg": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-2.0.0.tgz", - "integrity": "sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg=", - "dev": true, - "requires": { - "load-json-file": "^2.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^2.0.0" - } - }, - "read-pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-2.0.0.tgz", - "integrity": "sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4=", - "dev": true, - "requires": { - "find-up": "^2.0.0", - "read-pkg": "^2.0.0" - } - }, - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - } - } - }, - "eslint-plugin-mocha": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-mocha/-/eslint-plugin-mocha-5.3.0.tgz", - "integrity": "sha512-3uwlJVLijjEmBeNyH60nzqgA1gacUWLUmcKV8PIGNvj1kwP/CTgAWQHn2ayyJVwziX+KETkr9opNwT1qD/RZ5A==", - "dev": true, - "requires": { - "ramda": "^0.26.1" - } - }, - "eslint-plugin-node": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz", - "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==", - "dev": true, - "requires": { - "eslint-plugin-es": "^3.0.0", - "eslint-utils": "^2.0.0", - "ignore": "^5.1.1", - "minimatch": "^3.0.4", - "resolve": "^1.10.1", - "semver": "^6.1.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - } - } - }, - "eslint-plugin-promise": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-promise/-/eslint-plugin-promise-4.2.1.tgz", - "integrity": "sha512-VoM09vT7bfA7D+upt+FjeBO5eHIJQBUWki1aPvB+vbNiHS3+oGIJGIeyBtKQTME6UPXXy3vV07OL1tHd3ANuDw==", - "dev": true - }, - "eslint-plugin-standard": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-standard/-/eslint-plugin-standard-4.0.1.tgz", - "integrity": "sha512-v/KBnfyaOMPmZc/dmc6ozOdWqekGp7bBGq4jLAecEfPGmfKiWS4sA8sC0LqiV9w5qmXAtXVn4M3p1jSyhY85SQ==", - "dev": true - }, - "eslint-scope": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.0.0.tgz", - "integrity": "sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==", - "dev": true, - "requires": { - "esrecurse": "^4.1.0", - "estraverse": "^4.1.1" - } - }, - "eslint-utils": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.0.0.tgz", - "integrity": "sha512-0HCPuJv+7Wv1bACm8y5/ECVfYdfsAm9xmVb7saeFlxjPYALefjhbYoCkBjPdPzGH8wWyTpAez82Fh3VKYEZ8OA==", - "dev": true, - "requires": { - "eslint-visitor-keys": "^1.1.0" - } - }, - "eslint-visitor-keys": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz", - "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==", - "dev": true - }, - "espree": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-7.0.0.tgz", - "integrity": "sha512-/r2XEx5Mw4pgKdyb7GNLQNsu++asx/dltf/CI8RFi9oGHxmQFgvLbc5Op4U6i8Oaj+kdslhJtVlEZeAqH5qOTw==", - "dev": true, - "requires": { - "acorn": "^7.1.1", - "acorn-jsx": "^5.2.0", - "eslint-visitor-keys": "^1.1.0" - } - }, - "esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" - }, - "esquery": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.3.1.tgz", - "integrity": "sha512-olpvt9QG0vniUBZspVRN6lwB7hOZoTRtT+jzR+tS4ffYx2mzbw+z0XCOk44aaLYKApNX5nMm+E+P6o25ip/DHQ==", - "dev": true, - "requires": { - "estraverse": "^5.1.0" - }, - "dependencies": { - "estraverse": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.1.0.tgz", - "integrity": "sha512-FyohXK+R0vE+y1nHLoBM7ZTyqRpqAlhdZHCWIWEviFLiGB8b04H6bQs8G+XTthacvT8VuwvteiP7RJSxMs8UEw==", - "dev": true - } - } - }, - "esrecurse": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", - "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", - "dev": true, - "requires": { - "estraverse": "^4.1.0" - } - }, - "estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true - }, - "esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true - }, - "event-emitter": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz", - "integrity": "sha1-34xp7vFkeSPHFXuc6DhAYQsCzDk=", - "requires": { - "d": "1", - "es5-ext": "~0.10.14" - } - }, - "eventemitter3": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.4.tgz", - "integrity": "sha512-rlaVLnVxtxvoyLsQQFBx53YmXHDxRIzzTLbdfxqi4yocpSjAxXwkU0cScM5JgSKMqEhrZpnvQ2D9gjylR0AimQ==" - }, - "evp_bytestokey": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz", - "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==", - "requires": { - "md5.js": "^1.3.4", - "safe-buffer": "^5.1.1" - } - }, - "execa": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz", - "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==", - "requires": { - "cross-spawn": "^6.0.0", - "get-stream": "^4.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "ext": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/ext/-/ext-1.4.0.tgz", - "integrity": "sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==", - "requires": { - "type": "^2.0.0" - }, - "dependencies": { - "type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/type/-/type-2.0.0.tgz", - "integrity": "sha512-KBt58xCHry4Cejnc2ISQAF7QY+ORngsWfxezO68+12hKV6lQY8P/psIkcbjeHWn7MqcgciWJyCCevFMJdIXpow==" - } - } - }, - "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "external-editor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", - "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", - "dev": true, - "requires": { - "chardet": "^0.7.0", - "iconv-lite": "^0.4.24", - "tmp": "^0.0.33" - }, - "dependencies": { - "tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "dev": true, - "requires": { - "os-tmpdir": "~1.0.2" - } - } - } - }, - "extract-stack": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/extract-stack/-/extract-stack-1.0.0.tgz", - "integrity": "sha1-uXrK+UQe6iMyUpYktzL8WhyBZfo=", - "dev": true - }, - "extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" - }, - "fancy-test": { - "version": "1.4.8", - "resolved": "https://registry.npmjs.org/fancy-test/-/fancy-test-1.4.8.tgz", - "integrity": "sha512-/uCv78YSAz4UOQ3ZptnxOq6qYhJDMtwFHQnsghzGl2g6uO2VNfJDKlyczqFpG+KueXe7phoeIS6hMU1x/qhizQ==", - "dev": true, - "requires": { - "@types/chai": "*", - "@types/lodash": "*", - "@types/mocha": "*", - "@types/node": "*", - "@types/sinon": "*", - "lodash": "^4.17.13", - "mock-stdin": "^0.3.1", - "stdout-stderr": "^0.1.9" - } - }, - "fast-deep-equal": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz", - "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==" - }, - "fast-glob": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.2.tgz", - "integrity": "sha512-UDV82o4uQyljznxwMxyVRJgZZt3O5wENYojjzbaGEGZgeOxkLFf+V4cnUD+krzb2F72E18RhamkMZ7AdeggF7A==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.0", - "merge2": "^1.3.0", - "micromatch": "^4.0.2", - "picomatch": "^2.2.1" - } - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, - "fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", - "dev": true - }, - "fast-safe-stringify": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz", - "integrity": "sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA==" - }, - "fastq": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.8.0.tgz", - "integrity": "sha512-SMIZoZdLh/fgofivvIkmknUXyPnvxRE3DhtZ5Me3Mrsk5gyPL42F0xr51TdRXskBxHfMp+07bcYzfsYEsSQA9Q==", - "dev": true, - "requires": { - "reusify": "^1.0.4" - } - }, - "fecha": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fecha/-/fecha-2.3.3.tgz", - "integrity": "sha512-lUGBnIamTAwk4znq5BcqsDaxSmZ9nDVJaij6NvRt/Tg4R69gERA+otPKbS86ROw9nxVMw2/mp1fnaiWqbs6Sdg==" - }, - "figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5" - } - }, - "file-entry-cache": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", - "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", - "dev": true, - "requires": { - "flat-cache": "^2.0.1" - } - }, - "file-uri-to-path": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==" - }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "requires": { - "to-regex-range": "^5.0.1" - } - }, - "find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", - "dev": true, - "requires": { - "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "dev": true, - "requires": { - "find-up": "^3.0.0" - } - } - } - }, - "find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "requires": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - } - }, - "flat-cache": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", - "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", - "dev": true, - "requires": { - "flatted": "^2.0.0", - "rimraf": "2.6.3", - "write": "1.0.3" - }, - "dependencies": { - "rimraf": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", - "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", - "dev": true, - "requires": { - "glob": "^7.1.3" - } - } - } - }, - "flatted": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", - "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==", - "dev": true - }, - "foreground-child": { - "version": "1.5.6", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-1.5.6.tgz", - "integrity": "sha1-T9ca0t/elnibmApcCilZN8svXOk=", - "dev": true, - "requires": { - "cross-spawn": "^4", - "signal-exit": "^3.0.0" - }, - "dependencies": { - "cross-spawn": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-4.0.2.tgz", - "integrity": "sha1-e5JHYhwjrf3ThWAEqCPL45dCTUE=", - "dev": true, - "requires": { - "lru-cache": "^4.0.1", - "which": "^1.2.9" - } - }, - "lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "dev": true, - "requires": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=", - "dev": true - } - } - }, - "forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" - }, - "form-data": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz", - "integrity": "sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - } - }, - "from2": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", - "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", - "requires": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.0" - } - }, - "fs-constants": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", - "dev": true - }, - "fs-extra": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", - "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", - "requires": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - } - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true - }, - "functional-red-black-tree": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", - "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", - "dev": true - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true - }, - "get-func-name": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", - "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", - "dev": true - }, - "get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "requires": { - "pump": "^3.0.0" - } - }, - "getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "requires": { - "assert-plus": "^1.0.0" - } - }, - "github-slugger": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.3.0.tgz", - "integrity": "sha512-gwJScWVNhFYSRDvURk/8yhcFBee6aFjye2a7Lhb2bUyRulpIoek9p0I9Kt7PT67d/nUlZbFu8L9RLiA0woQN8Q==", - "dev": true, - "requires": { - "emoji-regex": ">=6.0.0 <=6.1.1" - }, - "dependencies": { - "emoji-regex": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-6.1.1.tgz", - "integrity": "sha1-xs0OwbBkLio8Z6ETfvxeeW2k+I4=", - "dev": true - } - } - }, - "glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "glob-parent": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz", - "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==", - "dev": true, - "requires": { - "is-glob": "^4.0.1" - } - }, - "globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "dev": true - }, - "globby": { - "version": "10.0.2", - "resolved": "https://registry.npmjs.org/globby/-/globby-10.0.2.tgz", - "integrity": "sha512-7dUi7RvCoT/xast/o/dLN53oqND4yk0nsHkhRgn9w65C4PofCLOoJ39iSOg+qVDdWQPIEj+eszMHQ+aLVwwQSg==", - "dev": true, - "requires": { - "@types/glob": "^7.1.1", - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.0.3", - "glob": "^7.1.3", - "ignore": "^5.1.1", - "merge2": "^1.2.3", - "slash": "^3.0.0" - } - }, - "got": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/got/-/got-8.3.2.tgz", - "integrity": "sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw==", - "requires": { - "@sindresorhus/is": "^0.7.0", - "cacheable-request": "^2.1.1", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^3.0.0", - "into-stream": "^3.1.0", - "is-retry-allowed": "^1.1.0", - "isurl": "^1.0.0-alpha5", - "lowercase-keys": "^1.0.0", - "mimic-response": "^1.0.0", - "p-cancelable": "^0.4.0", - "p-timeout": "^2.0.1", - "pify": "^3.0.0", - "safe-buffer": "^5.1.1", - "timed-out": "^4.0.1", - "url-parse-lax": "^3.0.0", - "url-to-options": "^1.0.1" - }, - "dependencies": { - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=" - } - } - }, - "graceful-fs": { - "version": "4.2.4", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", - "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" - }, - "har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" - }, - "har-validator": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.3.tgz", - "integrity": "sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==", - "requires": { - "ajv": "^6.5.5", - "har-schema": "^2.0.0" - } - }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "requires": { - "function-bind": "^1.1.1" - } - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" - }, - "has-symbol-support-x": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz", - "integrity": "sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw==" - }, - "has-symbols": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.1.tgz", - "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==", - "dev": true - }, - "has-to-string-tag-x": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz", - "integrity": "sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw==", - "requires": { - "has-symbol-support-x": "^1.4.1" - } - }, - "hash-base": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", - "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", - "requires": { - "inherits": "^2.0.4", - "readable-stream": "^3.6.0", - "safe-buffer": "^5.2.0" - }, - "dependencies": { - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - } - } - }, - "hash.js": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz", - "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==", - "requires": { - "inherits": "^2.0.3", - "minimalistic-assert": "^1.0.1" - } - }, - "hasha": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/hasha/-/hasha-3.0.0.tgz", - "integrity": "sha1-UqMvq4Vp1BymmmH/GiFPjrfIvTk=", - "dev": true, - "requires": { - "is-stream": "^1.0.1" - } - }, - "hmac-drbg": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", - "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=", - "requires": { - "hash.js": "^1.0.3", - "minimalistic-assert": "^1.0.0", - "minimalistic-crypto-utils": "^1.0.1" - } - }, - "hosted-git-info": { - "version": "2.8.8", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.8.tgz", - "integrity": "sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg==", - "dev": true - }, - "html-escaper": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", - "dev": true - }, - "http-cache-semantics": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz", - "integrity": "sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w==" - }, - "http-call": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/http-call/-/http-call-5.3.0.tgz", - "integrity": "sha512-ahwimsC23ICE4kPl9xTBjKB4inbRaeLyZeRunC/1Jy/Z6X8tv22MEAjK+KBOMSVLaqXPTTmd8638waVIKLGx2w==", - "dev": true, - "requires": { - "content-type": "^1.0.4", - "debug": "^4.1.1", - "is-retry-allowed": "^1.1.0", - "is-stream": "^2.0.0", - "parse-json": "^4.0.0", - "tunnel-agent": "^0.6.0" - }, - "dependencies": { - "is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", - "dev": true - } - } - }, - "http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "requires": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - } - }, - "hyperlinker": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hyperlinker/-/hyperlinker-1.0.0.tgz", - "integrity": "sha512-Ty8UblRWFEcfSuIaajM34LdPXIhbs1ajEX/BBPv24J+enSVaEVY63xQ6lTO9VRYS5LAoghIG0IDJ+p+IPzKUQQ==", - "dev": true - }, - "iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "requires": { - "safer-buffer": ">= 2.1.2 < 3" - } - }, - "ieee754": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", - "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" - }, - "ignore": { - "version": "5.1.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.4.tgz", - "integrity": "sha512-MzbUSahkTW1u7JpKKjY7LCARd1fU5W2rLdxlM4kdkayuCwZImjkpluF9CM1aLewYJguPDqewLam18Y6AU69A8A==", - "dev": true - }, - "import-fresh": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz", - "integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==", - "dev": true, - "requires": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - } - }, - "import-modules": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/import-modules/-/import-modules-1.1.0.tgz", - "integrity": "sha1-dI23nFzEK7lwHvq0JPiU5yYA6dw=", - "dev": true - }, - "imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "dev": true - }, - "indent-string": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz", - "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=" - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "inquirer": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.1.0.tgz", - "integrity": "sha512-5fJMWEmikSYu0nv/flMc475MhGbB7TSPd/2IpFV4I4rMklboCH2rQjYY5kKiYGHqUF9gvaambupcJFFG9dvReg==", - "dev": true, - "requires": { - "ansi-escapes": "^4.2.1", - "chalk": "^3.0.0", - "cli-cursor": "^3.1.0", - "cli-width": "^2.0.0", - "external-editor": "^3.0.3", - "figures": "^3.0.0", - "lodash": "^4.17.15", - "mute-stream": "0.0.8", - "run-async": "^2.4.0", - "rxjs": "^6.5.3", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0", - "through": "^2.3.6" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true - }, - "string-width": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", - "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - }, - "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "interpret": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.2.0.tgz", - "integrity": "sha512-mT34yGKMNceBQUoVn7iCDKDntA7SC6gycMAWzGx1z/CMCTV7b2AAtXlo3nRyHZ1FelRkQbQjprHSYGwzLtkVbw==" - }, - "into-stream": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-3.1.0.tgz", - "integrity": "sha1-lvsKk2wSur1v8XUqF9BWFqvQlMY=", - "requires": { - "from2": "^2.1.1", - "p-is-promise": "^1.1.0" - } - }, - "ip-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-4.1.0.tgz", - "integrity": "sha512-pKnZpbgCTfH/1NLIlOduP/V+WRXzC2MOz3Qo8xmxk8C5GudJLgK5QyLVXOSWy3ParAH7Eemurl3xjv/WXYFvMA==" - }, - "is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", - "dev": true - }, - "is-callable": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.5.tgz", - "integrity": "sha512-ESKv5sMCJB2jnHTWZ3O5itG+O128Hsus4K4Qh1h2/cgn2vbgnLSVqfV46AeJA9D5EeeLa9w81KUXMtn34zhX+Q==", - "dev": true - }, - "is-date-object": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", - "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", - "dev": true - }, - "is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" - }, - "is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", - "dev": true, - "requires": { - "is-extglob": "^2.1.1" - } - }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true - }, - "is-object": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-object/-/is-object-1.0.1.tgz", - "integrity": "sha1-iVJojF7C/9awPsyF52ngKQMINHA=" - }, - "is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=" - }, - "is-promise": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", - "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" - }, - "is-regex": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.5.tgz", - "integrity": "sha512-vlKW17SNq44owv5AQR3Cq0bQPEb8+kF3UKZ2fiZNOWtztYE5i0CzCZxFDwO58qAOWtxdBRVO/V5Qin1wjCqFYQ==", - "dev": true, - "requires": { - "has": "^1.0.3" - } - }, - "is-retry-allowed": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz", - "integrity": "sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==" - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=" - }, - "is-string": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz", - "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==", - "dev": true - }, - "is-symbol": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", - "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", - "dev": true, - "requires": { - "has-symbols": "^1.0.1" - } - }, - "is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" - }, - "is-wsl": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz", - "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=", - "dev": true - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" - }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" - }, - "isomorphic-fetch": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz", - "integrity": "sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk=", - "requires": { - "node-fetch": "^1.0.1", - "whatwg-fetch": ">=0.10.0" - } - }, - "isomorphic-ws": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/isomorphic-ws/-/isomorphic-ws-4.0.1.tgz", - "integrity": "sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w==" - }, - "isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" - }, - "istanbul-lib-coverage": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.5.tgz", - "integrity": "sha512-8aXznuEPCJvGnMSRft4udDRDtb1V3pkQkMMI5LI+6HuQz5oQ4J2UFn1H82raA3qJtyOLkkwVqICBQkjnGtn5mA==", - "dev": true - }, - "istanbul-lib-hook": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/istanbul-lib-hook/-/istanbul-lib-hook-2.0.7.tgz", - "integrity": "sha512-vrRztU9VRRFDyC+aklfLoeXyNdTfga2EI3udDGn4cZ6fpSXpHLV9X6CHvfoMCPtggg8zvDDmC4b9xfu0z6/llA==", - "dev": true, - "requires": { - "append-transform": "^1.0.0" - } - }, - "istanbul-lib-instrument": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-3.3.0.tgz", - "integrity": "sha512-5nnIN4vo5xQZHdXno/YDXJ0G+I3dAm4XgzfSVTPLQpj/zAV2dV6Juy0yaf10/zrJOJeHoN3fraFe+XRq2bFVZA==", - "dev": true, - "requires": { - "@babel/generator": "^7.4.0", - "@babel/parser": "^7.4.3", - "@babel/template": "^7.4.0", - "@babel/traverse": "^7.4.3", - "@babel/types": "^7.4.0", - "istanbul-lib-coverage": "^2.0.5", - "semver": "^6.0.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - } - } - }, - "istanbul-lib-report": { - "version": "2.0.8", - "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-2.0.8.tgz", - "integrity": "sha512-fHBeG573EIihhAblwgxrSenp0Dby6tJMFR/HvlerBsrCTD5bkUuoNtn3gVh29ZCS824cGGBPn7Sg7cNk+2xUsQ==", - "dev": true, - "requires": { - "istanbul-lib-coverage": "^2.0.5", - "make-dir": "^2.1.0", - "supports-color": "^6.1.0" - }, - "dependencies": { - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "dev": true, - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "istanbul-lib-source-maps": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-3.0.6.tgz", - "integrity": "sha512-R47KzMtDJH6X4/YW9XTx+jrLnZnscW4VpNN+1PViSYTejLVPWv7oov+Duf8YQSPyVRUvueQqz1TcsC6mooZTXw==", - "dev": true, - "requires": { - "debug": "^4.1.1", - "istanbul-lib-coverage": "^2.0.5", - "make-dir": "^2.1.0", - "rimraf": "^2.6.3", - "source-map": "^0.6.1" - }, - "dependencies": { - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - } - } - }, - "istanbul-reports": { - "version": "2.2.7", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-2.2.7.tgz", - "integrity": "sha512-uu1F/L1o5Y6LzPVSVZXNOoD/KXpJue9aeLRd0sM9uMXfZvzomB0WxVamWb5ue8kA2vVWEmW7EG+A5n3f1kqHKg==", - "dev": true, - "requires": { - "html-escaper": "^2.0.0" - } - }, - "isurl": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isurl/-/isurl-1.0.0.tgz", - "integrity": "sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w==", - "requires": { - "has-to-string-tag-x": "^1.2.0", - "is-object": "^1.0.1" - } - }, - "js-sha3": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.8.0.tgz", - "integrity": "sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q==" - }, - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true - }, - "js-yaml": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", - "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" - }, - "jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "dev": true - }, - "json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=" - }, - "json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", - "dev": true - }, - "json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" - }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=", - "dev": true - }, - "json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" - }, - "jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "requires": { - "graceful-fs": "^4.1.6" - } - }, - "jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "requires": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "keyv": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz", - "integrity": "sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==", - "requires": { - "json-buffer": "3.0.0" - } - }, - "kuler": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/kuler/-/kuler-1.0.1.tgz", - "integrity": "sha512-J9nVUucG1p/skKul6DU3PUZrhs0LPulNaeUOox0IyXDi8S4CztTHs1gQphhuZmzXG7VOQSf6NJfKuzteQLv9gQ==", - "requires": { - "colornames": "^1.1.1" - } - }, - "levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - } - }, - "lines-and-columns": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", - "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", - "dev": true - }, - "load-json-file": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-6.2.0.tgz", - "integrity": "sha512-gUD/epcRms75Cw8RT1pUdHugZYM5ce64ucs2GEISABwkRsOQr0q2wm/MV2TKThycIe5e0ytRweW2RZxclogCdQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.15", - "parse-json": "^5.0.0", - "strip-bom": "^4.0.0", - "type-fest": "^0.6.0" - }, - "dependencies": { - "parse-json": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.0.0.tgz", - "integrity": "sha512-OOY5b7PAEFV0E2Fir1KOkxchnZNCdowAJgQ5NuxjpBKTRP3pQhwkrkxqQjeoKJ+fO7bCpmIZaogI4eZGDMEGOw==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1", - "lines-and-columns": "^1.1.6" - } - }, - "type-fest": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", - "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", - "dev": true - } - } - }, - "locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "requires": { - "p-locate": "^4.1.0" - } - }, - "lodash": { - "version": "4.17.20", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz", - "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==" - }, - "lodash._reinterpolate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz", - "integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0=" - }, - "lodash.camelcase": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=", - "dev": true - }, - "lodash.flattendeep": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz", - "integrity": "sha1-+wMJF/hqMTTlvJvsDWngAT3f7bI=", - "dev": true - }, - "lodash.get": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=", - "dev": true - }, - "lodash.kebabcase": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz", - "integrity": "sha1-hImxyw0p/4gZXM7KRI/21swpXDY=", - "dev": true - }, - "lodash.snakecase": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.snakecase/-/lodash.snakecase-4.1.1.tgz", - "integrity": "sha1-OdcUo1NXFHg3rv1ktdy7Fr7Nj40=", - "dev": true - }, - "lodash.template": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz", - "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==", - "requires": { - "lodash._reinterpolate": "^3.0.0", - "lodash.templatesettings": "^4.0.0" - } - }, - "lodash.templatesettings": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz", - "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==", - "requires": { - "lodash._reinterpolate": "^3.0.0" - } - }, - "lodash.upperfirst": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/lodash.upperfirst/-/lodash.upperfirst-4.3.1.tgz", - "integrity": "sha1-E2Xt9DFIBIHvDRxolXpe2Z1J984=", - "dev": true - }, - "lodash.zip": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/lodash.zip/-/lodash.zip-4.2.0.tgz", - "integrity": "sha1-7GZi5IlkCO1KtsVCo5kLcswIACA=", - "dev": true - }, - "logform": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/logform/-/logform-2.1.2.tgz", - "integrity": "sha512-+lZh4OpERDBLqjiwDLpAWNQu6KMjnlXH2ByZwCuSqVPJletw0kTWJf5CgSNAUKn1KUkv3m2cUz/LK8zyEy7wzQ==", - "requires": { - "colors": "^1.2.1", - "fast-safe-stringify": "^2.0.4", - "fecha": "^2.3.3", - "ms": "^2.1.1", - "triple-beam": "^1.3.0" - } - }, - "long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==" - }, - "lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==" - }, - "lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "requires": { - "yallist": "^3.0.2" - } - }, - "lru-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/lru-queue/-/lru-queue-0.1.0.tgz", - "integrity": "sha1-Jzi9nw089PhEkMVzbEhpmsYyzaM=", - "requires": { - "es5-ext": "~0.10.2" - } - }, - "make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dev": true, - "requires": { - "semver": "^6.0.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - } - } - }, - "md5.js": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz", - "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==", - "requires": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1", - "safe-buffer": "^5.1.2" - } - }, - "memoizee": { - "version": "0.4.14", - "resolved": "https://registry.npmjs.org/memoizee/-/memoizee-0.4.14.tgz", - "integrity": "sha512-/SWFvWegAIYAO4NQMpcX+gcra0yEZu4OntmUdrBaWrJncxOqAziGFlHxc7yjKVK2uu3lpPW27P27wkR82wA8mg==", - "requires": { - "d": "1", - "es5-ext": "^0.10.45", - "es6-weak-map": "^2.0.2", - "event-emitter": "^0.3.5", - "is-promise": "^2.1", - "lru-queue": "0.1", - "next-tick": "1", - "timers-ext": "^0.1.5" - } - }, - "merge-source-map": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.1.0.tgz", - "integrity": "sha512-Qkcp7P2ygktpMPh2mCQZaf3jhN6D3Z/qVZHSdWvQ+2Ef5HgRAPBO57A77+ENm0CPx2+1Ce/MYKi3ymqdfuqibw==", - "dev": true, - "requires": { - "source-map": "^0.6.1" - }, - "dependencies": { - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - } - } - }, - "merge2": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.3.0.tgz", - "integrity": "sha512-2j4DAdlBOkiSZIsaXk4mTE3sRS02yBHAtfy127xRV3bQUFqXkjHCHLW6Scv7DwNRbIWNHH8zpnz9zMaKXIdvYw==", - "dev": true - }, - "micromatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz", - "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==", - "dev": true, - "requires": { - "braces": "^3.0.1", - "picomatch": "^2.0.5" - } - }, - "mime-db": { - "version": "1.44.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", - "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==" - }, - "mime-types": { - "version": "2.1.27", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", - "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", - "requires": { - "mime-db": "1.44.0" - } - }, - "mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "dev": true - }, - "mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==" - }, - "minimalistic-assert": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", - "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==" - }, - "minimalistic-crypto-utils": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz", - "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo=" - }, - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true - }, - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dev": true, - "requires": { - "minimist": "^1.2.5" - } - }, - "mkdirp-classic": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", - "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", - "dev": true - }, - "mock-stdin": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/mock-stdin/-/mock-stdin-0.3.1.tgz", - "integrity": "sha1-xlfZZC2QeGQ1xkyl6Zu9TQm9fdM=", - "dev": true - }, - "moment": { - "version": "2.26.0", - "resolved": "https://registry.npmjs.org/moment/-/moment-2.26.0.tgz", - "integrity": "sha512-oIixUO+OamkUkwjhAVE18rAMfRJNsNe/Stid/gwHSOfHrOtw9EhAY2AHvdKZ/k/MggcYELFCJz/Sn2pL8b8JMw==" - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "mute-stream": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", - "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", - "dev": true - }, - "nan": { - "version": "2.14.1", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.1.tgz", - "integrity": "sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw==" - }, - "natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", - "dev": true - }, - "natural-orderby": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/natural-orderby/-/natural-orderby-2.0.3.tgz", - "integrity": "sha512-p7KTHxU0CUrcOXe62Zfrb5Z13nLvPhSWR/so3kFulUQU0sgUll2Z0LwpsLN351eOOD+hRGu/F1g+6xDfPeD++Q==", - "dev": true - }, - "nested-error-stacks": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/nested-error-stacks/-/nested-error-stacks-2.1.0.tgz", - "integrity": "sha512-AO81vsIO1k1sM4Zrd6Hu7regmJN1NSiAja10gc4bX3F0wd+9rQmcuHQaHVQCYIEC8iFXnE+mavh23GOt7wBgug==", - "dev": true - }, - "next-tick": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz", - "integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==" - }, - "nice-try": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" - }, - "nock": { - "version": "12.0.3", - "resolved": "https://registry.npmjs.org/nock/-/nock-12.0.3.tgz", - "integrity": "sha512-QNb/j8kbFnKCiyqi9C5DD0jH/FubFGj5rt9NQFONXwQm3IPB0CULECg/eS3AU1KgZb/6SwUa4/DTRKhVxkGABw==", - "requires": { - "debug": "^4.1.0", - "json-stringify-safe": "^5.0.1", - "lodash": "^4.17.13", - "propagate": "^2.0.0" - } - }, - "node-fetch": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-1.7.3.tgz", - "integrity": "sha512-NhZ4CsKx7cYm2vSrBAr2PvFOe6sWDf0UYLRqA6svUYg7+/TSfVAu49jYC4BvQ4Sms9SZgdqGBgroqfDhJdTyKQ==", - "requires": { - "encoding": "^0.1.11", - "is-stream": "^1.0.1" - } - }, - "node-forge": { - "version": "0.8.5", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.8.5.tgz", - "integrity": "sha512-vFMQIWt+J/7FLNyKouZ9TazT74PRV3wgv9UT4cRjC8BffxFbKXkgIWR42URCPSnHm/QDz6BOlb2Q0U4+VQT67Q==" - }, - "node-jose": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/node-jose/-/node-jose-1.1.4.tgz", - "integrity": "sha512-L31IFwL3pWWcMHxxidCY51ezqrDXMkvlT/5pLTfNw5sXmmOLJuN6ug7txzF/iuZN55cRpyOmoJrotwBQIoo5Lw==", - "requires": { - "base64url": "^3.0.1", - "browserify-zlib": "^0.2.0", - "buffer": "^5.5.0", - "es6-promise": "^4.2.8", - "lodash": "^4.17.15", - "long": "^4.0.0", - "node-forge": "^0.8.5", - "process": "^0.11.10", - "react-zlib-js": "^1.0.4", - "uuid": "^3.3.3" - } - }, - "normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "requires": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, - "normalize-url": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz", - "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", - "requires": { - "prepend-http": "^2.0.0", - "query-string": "^5.0.1", - "sort-keys": "^2.0.0" - } - }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", - "requires": { - "path-key": "^2.0.0" - } - }, - "nyc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/nyc/-/nyc-14.1.1.tgz", - "integrity": "sha512-OI0vm6ZGUnoGZv/tLdZ2esSVzDwUC88SNs+6JoSOMVxA+gKMB8Tk7jBwgemLx4O40lhhvZCVw1C+OYLOBOPXWw==", - "dev": true, - "requires": { - "archy": "^1.0.0", - "caching-transform": "^3.0.2", - "convert-source-map": "^1.6.0", - "cp-file": "^6.2.0", - "find-cache-dir": "^2.1.0", - "find-up": "^3.0.0", - "foreground-child": "^1.5.6", - "glob": "^7.1.3", - "istanbul-lib-coverage": "^2.0.5", - "istanbul-lib-hook": "^2.0.7", - "istanbul-lib-instrument": "^3.3.0", - "istanbul-lib-report": "^2.0.8", - "istanbul-lib-source-maps": "^3.0.6", - "istanbul-reports": "^2.2.4", - "js-yaml": "^3.13.1", - "make-dir": "^2.1.0", - "merge-source-map": "^1.1.0", - "resolve-from": "^4.0.0", - "rimraf": "^2.6.3", - "signal-exit": "^3.0.2", - "spawn-wrap": "^1.4.2", - "test-exclude": "^5.2.3", - "uuid": "^3.3.2", - "yargs": "^13.2.2", - "yargs-parser": "^13.0.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", - "dev": true, - "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - } - } - }, - "oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=" - }, - "object-hash": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-1.3.1.tgz", - "integrity": "sha512-OSuu/pU4ENM9kmREg0BdNrUDIl1heYa4mBZacJc+vVWz4GtAwu7jO8s4AIt2aGRUTqxykpWzI3Oqnsm13tTMDA==" - }, - "object-inspect": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.7.0.tgz", - "integrity": "sha512-a7pEHdh1xKIAgTySUGgLMx/xwDZskN1Ud6egYYN3EdRW4ZMPNEDUTF+hwy2LUC+Bl+SyLXANnwz/jyh/qutKUw==", - "dev": true - }, - "object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true - }, - "object-treeify": { - "version": "1.1.24", - "resolved": "https://registry.npmjs.org/object-treeify/-/object-treeify-1.1.24.tgz", - "integrity": "sha512-ttlIN3MoqnhevarRtDNELvNjQ85Wguq2zSkR2N9DGFM3pFWMjsz7tSqbjX7lx16BmFwLOwBa3w0TY1jJajklFg==", - "dev": true - }, - "object.assign": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", - "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", - "dev": true, - "requires": { - "define-properties": "^1.1.2", - "function-bind": "^1.1.1", - "has-symbols": "^1.0.0", - "object-keys": "^1.0.11" - } - }, - "object.values": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.1.tgz", - "integrity": "sha512-WTa54g2K8iu0kmS/us18jEmdv1a4Wi//BZ/DTVYEcH0XhLM5NYdpDHja3gt57VrZLcNAO2WGA+KpWsDBaHt6eA==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.0-next.1", - "function-bind": "^1.1.1", - "has": "^1.0.3" - } - }, - "oidc-token-hash": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/oidc-token-hash/-/oidc-token-hash-3.0.2.tgz", - "integrity": "sha512-dTzp80/y/da+um+i+sOucNqiPpwRL7M/xPwj7pH1TFA2/bqQ+OK2sJahSXbemEoLtPkHcFLyhLhLWZa9yW5+RA==" - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "requires": { - "wrappy": "1" - } - }, - "one-time": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/one-time/-/one-time-0.0.4.tgz", - "integrity": "sha1-+M33eISCb+Tf+T46nMN7HkSAdC4=" - }, - "onetime": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.0.tgz", - "integrity": "sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q==", - "dev": true, - "requires": { - "mimic-fn": "^2.1.0" - } - }, - "openid-client": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-2.5.0.tgz", - "integrity": "sha512-t3hFD7xEoW1U25RyBcRFaL19fGGs6hNVTysq9pgmiltH0IVUPzH/bQV9w24pM5Q7MunnGv2/5XjIru6BQcWdxg==", - "requires": { - "base64url": "^3.0.0", - "got": "^8.3.2", - "lodash": "^4.17.11", - "lru-cache": "^5.1.1", - "node-jose": "^1.1.0", - "object-hash": "^1.3.1", - "oidc-token-hash": "^3.0.1", - "p-any": "^1.1.0" - } - }, - "optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", - "dev": true, - "requires": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" - } - }, - "os-homedir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", - "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=", - "dev": true - }, - "os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", - "dev": true - }, - "p-any": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-any/-/p-any-1.1.0.tgz", - "integrity": "sha512-Ef0tVa4CZ5pTAmKn+Cg3w8ABBXh+hHO1aV8281dKOoUHfX+3tjG2EaFcC+aZyagg9b4EYGsHEjz21DnEE8Og2g==", - "requires": { - "p-some": "^2.0.0" - } - }, - "p-cancelable": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-0.4.1.tgz", - "integrity": "sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ==" - }, - "p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=" - }, - "p-is-promise": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz", - "integrity": "sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4=" - }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "requires": { - "p-limit": "^2.2.0" - } - }, - "p-some": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/p-some/-/p-some-2.0.1.tgz", - "integrity": "sha1-Zdh8ixVO289SIdFnd4ttLhUPbwY=", - "requires": { - "aggregate-error": "^1.0.0" - } - }, - "p-timeout": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-2.0.1.tgz", - "integrity": "sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA==", - "requires": { - "p-finally": "^1.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true - }, - "package-hash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/package-hash/-/package-hash-3.0.0.tgz", - "integrity": "sha512-lOtmukMDVvtkL84rJHI7dpTYq+0rli8N2wlnqUcBuDWCfVhRUfOmnR9SsoHFMLpACvEV60dX7rd0rFaYDZI+FA==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.15", - "hasha": "^3.0.0", - "lodash.flattendeep": "^4.4.0", - "release-zalgo": "^1.0.0" - } - }, - "pako": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", - "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" - }, - "parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dev": true, - "requires": { - "callsites": "^3.0.0" - } - }, - "parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dev": true, - "requires": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - } - }, - "password-prompt": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/password-prompt/-/password-prompt-1.1.2.tgz", - "integrity": "sha512-bpuBhROdrhuN3E7G/koAju0WjVw9/uQOG5Co5mokNj0MiOSBVZS1JTwM4zl55hu0WFmIEFvO9cU9sJQiBIYeIA==", - "dev": true, - "requires": { - "ansi-escapes": "^3.1.0", - "cross-spawn": "^6.0.5" - }, - "dependencies": { - "ansi-escapes": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz", - "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", - "dev": true - } - } - }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" - }, - "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" - }, - "path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" - }, - "path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true - }, - "pathval": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.0.tgz", - "integrity": "sha1-uULm1L3mUwBe9rcTYd74cn0GReA=", - "dev": true - }, - "pbkdf2": { - "version": "3.0.17", - "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.17.tgz", - "integrity": "sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA==", - "requires": { - "create-hash": "^1.1.2", - "create-hmac": "^1.1.4", - "ripemd160": "^2.0.1", - "safe-buffer": "^5.0.1", - "sha.js": "^2.4.8" - } - }, - "performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" - }, - "picomatch": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.2.tgz", - "integrity": "sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==", - "dev": true - }, - "pify": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=" - }, - "pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "dev": true, - "requires": { - "find-up": "^4.0.0" - } - }, - "prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true - }, - "prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=" - }, - "process": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", - "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=" - }, - "process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" - }, - "progress": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", - "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", - "dev": true - }, - "propagate": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/propagate/-/propagate-2.0.1.tgz", - "integrity": "sha512-vGrhOavPSTz4QVNuBNdcNXePNdNMaO1xj9yBeH1ScQPjk/rhg9sSlCXPhMkFuaNNW/syTvYqsnbIJxMBfRbbag==" - }, - "pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=", - "dev": true - }, - "psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" - }, - "pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" - }, - "qqjs": { - "version": "0.3.11", - "resolved": "https://registry.npmjs.org/qqjs/-/qqjs-0.3.11.tgz", - "integrity": "sha512-pB2X5AduTl78J+xRSxQiEmga1jQV0j43jOPs/MTgTLApGFEOn6NgdE2dEjp7nvDtjkIOZbvFIojAiYUx6ep3zg==", - "dev": true, - "requires": { - "chalk": "^2.4.1", - "debug": "^4.1.1", - "execa": "^0.10.0", - "fs-extra": "^6.0.1", - "get-stream": "^5.1.0", - "glob": "^7.1.2", - "globby": "^10.0.1", - "http-call": "^5.1.2", - "load-json-file": "^6.2.0", - "pkg-dir": "^4.2.0", - "tar-fs": "^2.0.0", - "tmp": "^0.1.0", - "write-json-file": "^4.1.1" - }, - "dependencies": { - "execa": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.10.0.tgz", - "integrity": "sha512-7XOMnz8Ynx1gGo/3hyV9loYNPWM94jG3+3T3Y8tsfSstFmETmENCMU/A/zj8Lyaj1lkgEepKepvd6240tBRvlw==", - "dev": true, - "requires": { - "cross-spawn": "^6.0.0", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - }, - "dependencies": { - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=", - "dev": true - } - } - }, - "fs-extra": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-6.0.1.tgz", - "integrity": "sha512-GnyIkKhhzXZUWFCaJzvyDLEEgDkPfb4/TPvJCJVuS8MWZgoSsErf++QpiAlDnKFcqhRlm+tIOcencCjyJE6ZCA==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - } - }, - "get-stream": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.1.0.tgz", - "integrity": "sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==", - "dev": true, - "requires": { - "pump": "^3.0.0" - } - } - } - }, - "qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" - }, - "query-string": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz", - "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", - "requires": { - "decode-uri-component": "^0.2.0", - "object-assign": "^4.1.0", - "strict-uri-encode": "^1.0.0" - } - }, - "ramda": { - "version": "0.26.1", - "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.26.1.tgz", - "integrity": "sha512-hLWjpy7EnsDBb0p+Z3B7rPi3GDeRG5ZtiI33kJhTt+ORCd38AbAIjB/9zRIUoeTbE/AVX5ZkU7m6bznsvrf8eQ==", - "dev": true - }, - "randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "requires": { - "safe-buffer": "^5.1.0" - } - }, - "react-zlib-js": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/react-zlib-js/-/react-zlib-js-1.0.4.tgz", - "integrity": "sha512-ynXD9DFxpE7vtGoa3ZwBtPmZrkZYw2plzHGbanUjBOSN4RtuXdektSfABykHtTiWEHMh7WdYj45LHtp228ZF1A==" - }, - "read-pkg": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", - "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=", - "dev": true, - "requires": { - "load-json-file": "^4.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^3.0.0" - }, - "dependencies": { - "load-json-file": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", - "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "parse-json": "^4.0.0", - "pify": "^3.0.0", - "strip-bom": "^3.0.0" - } - }, - "path-type": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", - "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", - "dev": true, - "requires": { - "pify": "^3.0.0" - } - }, - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - } - } - }, - "read-pkg-up": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-4.0.0.tgz", - "integrity": "sha512-6etQSH7nJGsK0RbG/2TeDzZFa8shjQ1um+SwQQ5cwKy0dhSXdOncEhb1CPpvQG4h7FyOV6EB6YlV0yJvZQNAkA==", - "dev": true, - "requires": { - "find-up": "^3.0.0", - "read-pkg": "^3.0.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - } - } - }, - "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } - } - }, - "rechoir": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=", - "requires": { - "resolve": "^1.1.6" - } - }, - "redeyed": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/redeyed/-/redeyed-2.1.1.tgz", - "integrity": "sha1-iYS1gV2ZyyIEacme7v/jiRPmzAs=", - "dev": true, - "requires": { - "esprima": "~4.0.0" - } - }, - "regenerator-runtime": { - "version": "0.13.5", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.5.tgz", - "integrity": "sha512-ZS5w8CpKFinUzOwW3c83oPeVXoNsrLsaCoLtJvAClH135j/R77RuymhiSErhm2lKcwSCIpmvIWSbDkIfAqKQlA==" - }, - "regexpp": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.1.0.tgz", - "integrity": "sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q==", - "dev": true - }, - "release-zalgo": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/release-zalgo/-/release-zalgo-1.0.0.tgz", - "integrity": "sha1-CXALflB0Mpc5Mw5TXFqQ+2eFFzA=", - "dev": true, - "requires": { - "es6-error": "^4.0.1" - } - }, - "request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "requires": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "dependencies": { - "form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - } - } - } - }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "dev": true - }, - "require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", - "dev": true - }, - "resolve": { - "version": "1.17.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.17.0.tgz", - "integrity": "sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w==", - "requires": { - "path-parse": "^1.0.6" - } - }, - "resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true - }, - "responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", - "requires": { - "lowercase-keys": "^1.0.0" - } - }, - "restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "dev": true, - "requires": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - } - }, - "ret": { - "version": "0.1.15", - "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", - "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==", - "dev": true - }, - "reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "dev": true - }, - "rfc4648": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/rfc4648/-/rfc4648-1.3.0.tgz", - "integrity": "sha512-x36K12jOflpm1V8QjPq3I+pt7Z1xzeZIjiC8J2Oxd7bE1efTrOG241DTYVJByP/SxR9jl1t7iZqYxDX864jgBQ==" - }, - "rimraf": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", - "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", - "dev": true, - "requires": { - "glob": "^7.1.3" - } - }, - "ripemd160": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz", - "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==", - "requires": { - "hash-base": "^3.0.0", - "inherits": "^2.0.1" - } - }, - "run-async": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", - "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", - "dev": true - }, - "run-parallel": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.1.9.tgz", - "integrity": "sha512-DEqnSRTDw/Tc3FXf49zedI638Z9onwUotBMiUFKmrO2sdFKIbXamXGQ3Axd4qgphxKB4kw/qP1w5kTxnfU1B9Q==", - "dev": true - }, - "rxjs": { - "version": "6.5.5", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.5.5.tgz", - "integrity": "sha512-WfQI+1gohdf0Dai/Bbmk5L5ItH5tYqm3ki2c5GdWhKjalzjg93N3avFjVStyZZz+A2Em+ZxKH5bNghw9UeylGQ==", - "requires": { - "tslib": "^1.9.0" - } - }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "safe-regex": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", - "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", - "dev": true, - "requires": { - "ret": "~0.1.10" - } - }, - "safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - }, - "set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", - "dev": true - }, - "sha.js": { - "version": "2.4.11", - "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", - "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", - "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" - } - }, - "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" - }, - "shelljs": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.4.tgz", - "integrity": "sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ==", - "requires": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" - } - }, - "signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" - }, - "simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=", - "requires": { - "is-arrayish": "^0.3.1" - }, - "dependencies": { - "is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" - } - } - }, - "slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true - }, - "slice-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", - "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.0", - "astral-regex": "^1.0.0", - "is-fullwidth-code-point": "^2.0.0" - } - }, - "sort-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz", - "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=", - "requires": { - "is-plain-obj": "^1.0.0" - } - }, - "source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", - "dev": true - }, - "spawn-wrap": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-1.4.3.tgz", - "integrity": "sha512-IgB8md0QW/+tWqcavuFgKYR/qIRvJkRLPJDFaoXtLLUaVcCDK0+HeFTkmQHj3eprcYhc+gOl0aEA1w7qZlYezw==", - "dev": true, - "requires": { - "foreground-child": "^1.5.6", - "mkdirp": "^0.5.0", - "os-homedir": "^1.0.1", - "rimraf": "^2.6.2", - "signal-exit": "^3.0.2", - "which": "^1.3.0" - } - }, - "spdx-correct": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.0.tgz", - "integrity": "sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q==", - "dev": true, - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", - "dev": true - }, - "spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dev": true, - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz", - "integrity": "sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==", - "dev": true - }, - "sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" - }, - "sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", - "requires": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - } - }, - "stack-trace": { - "version": "0.0.10", - "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.10.tgz", - "integrity": "sha1-VHxws0fo0ytOEI6hoqFZ5f3eGcA=" - }, - "stdout-stderr": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/stdout-stderr/-/stdout-stderr-0.1.13.tgz", - "integrity": "sha512-Xnt9/HHHYfjZ7NeQLvuQDyL1LnbsbddgMFKCuaQKwGCdJm8LnstZIXop+uOY36UR1UXXoHXfMbC1KlVdVd2JLA==", - "dev": true, - "requires": { - "debug": "^4.1.1", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - } - } - }, - "strict-uri-encode": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", - "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "string.prototype.trimend": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.1.tgz", - "integrity": "sha512-LRPxFUaTtpqYsTeNKaFOw3R4bxIzWOnbQ837QfBylo8jIxtcbK/A/sMV7Q+OAV/vWo+7s25pOE10KYSjaSO06g==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" - } - }, - "string.prototype.trimleft": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/string.prototype.trimleft/-/string.prototype.trimleft-2.1.2.tgz", - "integrity": "sha512-gCA0tza1JBvqr3bfAIFJGqfdRTyPae82+KTnm3coDXkZN9wnuW3HjGgN386D7hfv5CHQYCI022/rJPVlqXyHSw==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5", - "string.prototype.trimstart": "^1.0.0" - } - }, - "string.prototype.trimright": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/string.prototype.trimright/-/string.prototype.trimright-2.1.2.tgz", - "integrity": "sha512-ZNRQ7sY3KroTaYjRS6EbNiiHrOkjihL9aQE/8gfQ4DtAC/aEBRHFJa44OmoWxGGqXuJlfKkZW4WcXErGr+9ZFg==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5", - "string.prototype.trimend": "^1.0.0" - } - }, - "string.prototype.trimstart": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.1.tgz", - "integrity": "sha512-XxZn+QpvrBI1FOcg6dIpxUPgWCPuNXvMD72aaRaUQv1eD4e/Qy8i/hFTe0BUmD60p/QA6bh1avmuPTfNjqVWRw==", - "dev": true, - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } - } - }, - "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "requires": { - "ansi-regex": "^4.1.0" - } - }, - "strip-bom": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", - "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", - "dev": true - }, - "strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=" - }, - "strip-json-comments": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.0.tgz", - "integrity": "sha512-e6/d0eBu7gHtdCqFt0xJr642LdToM5/cN4Qb9DbHjVx1CP5RyeM+zH7pbecEmDv/lBqb0QH+6Uqq75rxFPkM0w==", - "dev": true - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - }, - "supports-hyperlinks": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-1.0.1.tgz", - "integrity": "sha512-HHi5kVSefKaJkGYXbDuKbUGRVxqnWGn3J2e39CYcNJEfWciGq2zYtOhXLTlvrOZW1QU7VX67w7fMmWafHX9Pfw==", - "dev": true, - "requires": { - "has-flag": "^2.0.0", - "supports-color": "^5.0.0" - }, - "dependencies": { - "has-flag": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-2.0.0.tgz", - "integrity": "sha1-6CB68cx7MNRGzHC3NLXovhj4jVE=", - "dev": true - } - } - }, - "table": { - "version": "5.4.6", - "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", - "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", - "dev": true, - "requires": { - "ajv": "^6.10.2", - "lodash": "^4.17.14", - "slice-ansi": "^2.1.0", - "string-width": "^3.0.0" - }, - "dependencies": { - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "tar-fs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.0.tgz", - "integrity": "sha512-9uW5iDvrIMCVpvasdFHW0wJPez0K4JnMZtsuIeDI7HyMGJNxmDZDOCQROr7lXyS+iL/QMpj07qcjGYTSdRFXUg==", - "dev": true, - "requires": { - "chownr": "^1.1.1", - "mkdirp-classic": "^0.5.2", - "pump": "^3.0.0", - "tar-stream": "^2.0.0" - } - }, - "tar-stream": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.1.2.tgz", - "integrity": "sha512-UaF6FoJ32WqALZGOIAApXx+OdxhekNMChu6axLJR85zMMjXKWFGjbIRe+J6P4UnRGg9rAwWvbTT0oI7hD/Un7Q==", - "dev": true, - "requires": { - "bl": "^4.0.1", - "end-of-stream": "^1.4.1", - "fs-constants": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.1.1" - }, - "dependencies": { - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "dev": true, - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - } - } - }, - "test-exclude": { - "version": "5.2.3", - "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-5.2.3.tgz", - "integrity": "sha512-M+oxtseCFO3EDtAaGH7iiej3CBkzXqFMbzqYAACdzKui4eZA+pq3tZEwChvOdNfa7xxy8BfbmgJSIr43cC/+2g==", - "dev": true, - "requires": { - "glob": "^7.1.3", - "minimatch": "^3.0.4", - "read-pkg-up": "^4.0.0", - "require-main-filename": "^2.0.0" - } - }, - "text-hex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/text-hex/-/text-hex-1.0.0.tgz", - "integrity": "sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==" - }, - "text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=", - "dev": true - }, - "through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", - "dev": true - }, - "timed-out": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-4.0.1.tgz", - "integrity": "sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8=" - }, - "timers-ext": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/timers-ext/-/timers-ext-0.1.7.tgz", - "integrity": "sha512-b85NUNzTSdodShTIbky6ZF02e8STtVVfD+fu4aXXShEELpozH+bCpJLYMPZbsABN2wDH7fJpqIoXxJpzbf0NqQ==", - "requires": { - "es5-ext": "~0.10.46", - "next-tick": "1" - } - }, - "tmp": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.1.0.tgz", - "integrity": "sha512-J7Z2K08jbGcdA1kkQpJSqLF6T0tdQqpR2pnSUXsIchbPdTI9v3e85cLW0d6WDhwuAleOV71j2xWs8qMPfK7nKw==", - "dev": true, - "requires": { - "rimraf": "^2.6.3" - } - }, - "to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", - "dev": true - }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "requires": { - "is-number": "^7.0.0" - } - }, - "tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "requires": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - } - }, - "triple-beam": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/triple-beam/-/triple-beam-1.3.0.tgz", - "integrity": "sha512-XrHUvV5HpdLmIj4uVMxHggLbFSZYIn7HEWsqePZcI50pco+MPqJ50wMGY794X7AOOhxOBAjbkqfAbEe/QMp2Lw==" - }, - "tslib": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.13.0.tgz", - "integrity": "sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q==" - }, - "tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" - }, - "type": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz", - "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==" - }, - "type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1" - } - }, - "type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", - "dev": true - }, - "type-fest": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz", - "integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==", - "dev": true - }, - "typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "requires": { - "is-typedarray": "^1.0.0" - } - }, - "underscore": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.10.2.tgz", - "integrity": "sha512-N4P+Q/BuyuEKFJ43B9gYuOj4TQUHXX+j2FqguVOpjkssLUUrnJofCcBccJSCoeturDoZU6GorDTHSvUDlSQbTg==" - }, - "universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==" - }, - "unorm": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/unorm/-/unorm-1.6.0.tgz", - "integrity": "sha512-b2/KCUlYZUeA7JFUuRJZPUtr4gZvBh7tavtv4fvk4+KV9pfGiR6CQAQAWl49ZpR3ts2dk4FYkP7EIgDJoiOLDA==" - }, - "uri-js": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", - "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", - "requires": { - "punycode": "^2.1.0" - } - }, - "url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", - "requires": { - "prepend-http": "^2.0.0" - } - }, - "url-to-options": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/url-to-options/-/url-to-options-1.0.1.tgz", - "integrity": "sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k=" - }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" - }, - "uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" - }, - "v8-compile-cache": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.1.0.tgz", - "integrity": "sha512-usZBT3PW+LOjM25wbqIlZwPeJV+3OSz3M1k1Ws8snlW39dZyYL9lOGC5FgPVHfk0jKmjiDV8Z0mIbVQPiwFs7g==", - "dev": true - }, - "validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dev": true, - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "requires": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "websocket": { - "version": "1.0.31", - "resolved": "https://registry.npmjs.org/websocket/-/websocket-1.0.31.tgz", - "integrity": "sha512-VAouplvGKPiKFDTeCCO65vYHsyay8DqoBSlzIO3fayrfOgU94lQN5a1uWVnFrMLceTJw/+fQXR5PGbUVRaHshQ==", - "requires": { - "debug": "^2.2.0", - "es5-ext": "^0.10.50", - "nan": "^2.14.0", - "typedarray-to-buffer": "^3.1.5", - "yaeti": "^0.0.6" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - } - } - }, - "whatwg-fetch": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.0.0.tgz", - "integrity": "sha512-9GSJUgz1D4MfyKU7KRqwOjXCXTqWdFNvEr7eUBYchQiVc744mqK/MzXPNR2WsPkmkOa4ywfg8C2n8h+13Bey1Q==" - }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "requires": { - "isexe": "^2.0.0" - } - }, - "which-module": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", - "dev": true - }, - "widest-line": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-2.0.1.tgz", - "integrity": "sha512-Ba5m9/Fa4Xt9eb2ELXt77JxVDV8w7qQrH0zS/TWSJdLyAwQjWoOzpzj5lwVftDz6n/EOu3tNACS84v509qwnJA==", - "requires": { - "string-width": "^2.1.1" - } - }, - "winston": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/winston/-/winston-3.2.1.tgz", - "integrity": "sha512-zU6vgnS9dAWCEKg/QYigd6cgMVVNwyTzKs81XZtTFuRwJOcDdBg7AU0mXVyNbs7O5RH2zdv+BdNZUlx7mXPuOw==", - "requires": { - "async": "^2.6.1", - "diagnostics": "^1.1.1", - "is-stream": "^1.1.0", - "logform": "^2.1.1", - "one-time": "0.0.4", - "readable-stream": "^3.1.1", - "stack-trace": "0.0.x", - "triple-beam": "^1.3.0", - "winston-transport": "^4.3.0" - }, - "dependencies": { - "readable-stream": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", - "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - } - } - }, - "winston-transport": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.3.0.tgz", - "integrity": "sha512-B2wPuwUi3vhzn/51Uukcao4dIduEiPOcOt9HJ3QeaXgkJ5Z7UwpBzxS4ZGNHtrxrUvTwemsQiSys0ihOf8Mp1A==", - "requires": { - "readable-stream": "^2.3.6", - "triple-beam": "^1.2.0" - } - }, - "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true - }, - "wrap-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-4.0.0.tgz", - "integrity": "sha512-uMTsj9rDb0/7kk1PbcbCcwvHUxp60fGDB/NNXpVa0Q+ic/e7y5+BwTxKfQ33VYgDppSwi/FBzpetYzo8s6tfbg==", - "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^2.1.1", - "strip-ansi": "^4.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" - }, - "write": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", - "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", - "dev": true, - "requires": { - "mkdirp": "^0.5.1" - } - }, - "write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "dev": true, - "requires": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "write-json-file": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/write-json-file/-/write-json-file-4.3.0.tgz", - "integrity": "sha512-PxiShnxf0IlnQuMYOPPhPkhExoCQuTUNPOa/2JWCYTmBquU9njyyDuwRKN26IZBlp4yn1nt+Agh2HOOBl+55HQ==", - "dev": true, - "requires": { - "detect-indent": "^6.0.0", - "graceful-fs": "^4.1.15", - "is-plain-obj": "^2.0.0", - "make-dir": "^3.0.0", - "sort-keys": "^4.0.0", - "write-file-atomic": "^3.0.0" - }, - "dependencies": { - "is-plain-obj": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", - "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", - "dev": true - }, - "sort-keys": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-4.0.0.tgz", - "integrity": "sha512-hlJLzrn/VN49uyNkZ8+9b+0q9DjmmYcYOnbMQtpkLrYpPwRApDPZfmqbUfJnAA3sb/nRib+nDot7Zi/1ER1fuA==", - "dev": true, - "requires": { - "is-plain-obj": "^2.0.0" - } - } - } - }, - "ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", - "requires": { - "async-limiter": "~1.0.0" - } - }, - "xxhashjs": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/xxhashjs/-/xxhashjs-0.2.2.tgz", - "integrity": "sha512-AkTuIuVTET12tpsVIQo+ZU6f/qDmKuRUcjaqR+OIvm+aCBsZ95i7UVY5WJ9TMsSaZ0DA2WxoZ4acu0sPH+OKAw==", - "requires": { - "cuint": "^0.2.2" - } - }, - "y18n": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", - "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==", - "dev": true - }, - "yaeti": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/yaeti/-/yaeti-0.0.6.tgz", - "integrity": "sha1-8m9ITXJoTPQr7ft2lwqhYI+/lXc=" - }, - "yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" - }, - "yargs": { - "version": "13.3.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", - "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", - "dev": true, - "requires": { - "cliui": "^5.0.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^13.1.2" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - } - } - }, - "yargs-parser": { - "version": "13.1.2", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", - "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", - "dev": true, - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - } - } -} diff --git a/.maintain/chaostest/package.json b/.maintain/chaostest/package.json deleted file mode 100644 index b659f75181113..0000000000000 --- a/.maintain/chaostest/package.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "name": "chaostest", - "description": "A cli for chaos testing on substrate", - "version": "0.0.0", - "author": "HarryHong", - "bin": { - "chaostest": "./bin/run" - }, - "bugs": "https://github.com/paritytech/substrate/issues", - "dependencies": { - "@kubernetes/client-node": "^0.11.1", - "@oclif/command": "^1", - "@oclif/config": "^1", - "@oclif/plugin-help": "^2", - "@polkadot/api": "^0.95.0-beta.14", - "@polkadot/keyring": "^1.6.0-beta.9", - "winston": "^3.2.1" - }, - "devDependencies": { - "@oclif/dev-cli": "^1", - "@oclif/test": "^1", - "chai": "^4", - "eslint": "^7.1.0", - "eslint-config-oclif": "^3.1", - "eslint-config-standard": "^14.1.1", - "eslint-plugin-import": "^2.20.2", - "eslint-plugin-node": "^11.1.0", - "eslint-plugin-promise": "^4.2.1", - "eslint-plugin-standard": "^4.0.1", - "globby": "^10", - "nyc": "^14" - }, - "engines": { - "node": ">=8.0.0" - }, - "files": [ - "/bin", - "/npm-shrinkwrap.json", - "/oclif.manifest.json", - "/src" - ], - "homepage": "https://github.com/paritytech/substrate/tree/master/.maintain/chaostest", - "keywords": [ - "oclif" - ], - "main": "src/index.js", - "oclif": { - "commands": "./src/commands", - "bin": "chaostest", - "plugins": [ - "@oclif/plugin-help" - ] - }, - "repository": "https://github.com/paritytech/substrate/tree/master/.maintain/chaostest", - "scripts": { - "postpack": "rm -f oclif.manifest.json", - "posttest": "eslint .", - "prepack": "oclif-dev manifest && oclif-dev readme", - "version": "oclif-dev readme && git add README.md" - } -} diff --git a/.maintain/chaostest/src/commands/clean/index.js b/.maintain/chaostest/src/commands/clean/index.js deleted file mode 100644 index 9f8f5b95f8978..0000000000000 --- a/.maintain/chaostest/src/commands/clean/index.js +++ /dev/null @@ -1,31 +0,0 @@ -const { Command, flags } = require('@oclif/command') -const CONFIG = require('../../config')() -const logger = require('../../utils/logger') -const Hypervisor = require('../../hypervisor') - -class CleanCommand extends Command { - async run () { - const { flags } = this.parse(CleanCommand) - const namespace = flags.namespace || CONFIG.namespace - const hypervisor = new Hypervisor(CONFIG) - // Delete corresponding namespace, default to CONFIG.namespace - try { - if (namespace) { - await hypervisor.cleanup(namespace) - } else { - logger.debug('Nothing to clean up') - } - } catch (error) { - logger.error(error) - process.exit(1) - } - } -} - -CleanCommand.description = 'Clean up resources based on namespace' - -CleanCommand.flags = { - namespace: flags.string({ char: 'n', description: 'desired namespace to clean up', env: 'NAMESPACE' }) -} - -module.exports = CleanCommand diff --git a/.maintain/chaostest/src/commands/singlenodeheight/index.js b/.maintain/chaostest/src/commands/singlenodeheight/index.js deleted file mode 100644 index 05006d745b4e2..0000000000000 --- a/.maintain/chaostest/src/commands/singlenodeheight/index.js +++ /dev/null @@ -1,63 +0,0 @@ -const { Command, flags } = require('@oclif/command') -const CONFIG = require('../../config')() -const { succeedExit, errorExit } = require('../../utils/exit') -const Hypervisor = require('../../hypervisor') -const logger = require('../../utils/logger') - -class SingleNodeHeightCommand extends Command { - async run () { - const { flags } = this.parse(SingleNodeHeightCommand) - let port = flags.port - let url = flags.url - const wait = flags.wait || 600 * 1000 - const height = flags.height || 10 - const namespace = flags.namespace || CONFIG.namespace - const pod = flags.pod || (CONFIG.nodes && CONFIG.nodes[0]) ? CONFIG.nodes[0].podName : undefined - const now = Date.now() - - const hypervisor = new Hypervisor(CONFIG) - if (!!url && !!port) { - JsonRpcCallTestHeight(url, port) - } else if (!!pod && !!namespace) { - url = 'http://127.0.0.1' - port = 9933 - await hypervisor.startForwardServer(namespace, pod, port) - JsonRpcCallTestHeight(url, port) - } else { - errorExit('Not enough parameters provided. Either specify url and port or pod and namespace.') - } - - async function JsonRpcCallTestHeight (url, port) { - logger.debug('Polling chain height...') - if (Date.now() < now + wait) { - try { - const curHeight = await hypervisor.getChainBlockHeight(url, port) - logger.debug('Current Block Height: ' + curHeight) - if (curHeight > height) { - logger.info(`Single dev node Blockheight reached ${height}`) - succeedExit() - } else { - setTimeout(() => JsonRpcCallTestHeight(url, port), 2000) - } - } catch (error) { - errorExit('Error requesting chain block height', error) - } - } else { - errorExit('Timed out') - } - } - } -} - -SingleNodeHeightCommand.description = 'Test if targeted node is producing blocks > certain height' - -SingleNodeHeightCommand.flags = { - port: flags.integer({ char: 'p', description: 'port to deploy' }), - url: flags.string({ char: 'u', description: 'connect url' }), - timeout: flags.string({ char: 't', description: 'wait time in miliseconds to halt' }), - height: flags.string({ char: 'h', description: 'desired height to test' }), - pod: flags.string({ description: 'desired pod to test' }), - namespace: flags.string({ description: 'desired namespace to test' }) -} - -module.exports = SingleNodeHeightCommand diff --git a/.maintain/chaostest/src/commands/spawn/index.js b/.maintain/chaostest/src/commands/spawn/index.js deleted file mode 100644 index 785037b029536..0000000000000 --- a/.maintain/chaostest/src/commands/spawn/index.js +++ /dev/null @@ -1,52 +0,0 @@ -const { Command, flags } = require('@oclif/command') -const logger = require('../../utils/logger') -const Hypervisor = require('../../hypervisor') -const CONFIG = require('../../config')() - -class SpawnCommand extends Command { - async run () { - const { flags } = this.parse(SpawnCommand) - const { args } = this.parse(SpawnCommand) - const imageTag = flags.image || 'parity/substrate:latest' - const port = flags.port || 9933 - const namespace = flags.namespace || 'substrate-ci' - const validator = flags.validator || 0 - const node = flags.node || 1 - - const hypervisor = new Hypervisor(CONFIG) - try { - // Check/Create namespace - await hypervisor.readOrCreateNamespace(namespace) - const chainName = args.chainName - if (chainName) { - if (chainName === 'dev') { - logger.debug('Starting a fullnode in dev mode...') - await hypervisor.createDevNode(imageTag, port) - } else if (chainName === 'alicebob') { - await hypervisor.createAliceBobNodes(imageTag, port) - } else { - // TODO: customized chain with chainName - } - } - } catch (error) { - logger.error(error) - process.exit(1) - } - } -} - -SpawnCommand.description = 'Spawn a local testnet with options' - -SpawnCommand.flags = { - image: flags.string({ char: 'i', description: 'image to deploy' }), - port: flags.integer({ char: 'p', description: 'port to deploy on' }), - namespace: flags.string({ description: 'desired namespace to deploy to', env: 'NAMESPACE' }), - validator: flags.string({ char: 'v', description: 'number of validators' }), - node: flags.string({ char: 'n', description: 'number of full nodes, if not set but exists, default to 1' }), - key: flags.string({ char: 'k', description: 'number of full nodes, if not set but exists, default to 1' }), - chainspec: flags.string({ char: 'c', description: 'number of full nodes, if not set but exists, default to 1' }) -} - -SpawnCommand.args = [{ name: 'chainName' }] - -module.exports = SpawnCommand diff --git a/.maintain/chaostest/src/config/README.md b/.maintain/chaostest/src/config/README.md deleted file mode 100644 index 655e6deacb376..0000000000000 --- a/.maintain/chaostest/src/config/README.md +++ /dev/null @@ -1,34 +0,0 @@ -chaostest CONFIG -========= - -Since deployment can behave differently, we want to keep a state between phases including different test subjects. - -# Content -The state could include informations such as: -``` -{ - namespace, - image, - bootnode: { - podname, - ip, - port, - peerId, - privateKey, - publicKey - }, - nodes: [{ - podname, - ip, - port, - nodeType: 'validator' | 'bootnode' | , - privateKey (validator only), - publicKey (validator only) - }] -} -``` - -# TODO -k8s configuration -chainspec -chaos-agent diff --git a/.maintain/chaostest/src/config/index.js b/.maintain/chaostest/src/config/index.js deleted file mode 100644 index 400597c2bddcc..0000000000000 --- a/.maintain/chaostest/src/config/index.js +++ /dev/null @@ -1,70 +0,0 @@ -const fs = require('fs') -const path = require('path') -const configPath = path.join(__dirname, './config.json') -const logger = require('../utils/logger') - -class Config { - constructor () { - this.load() - } - - async load () { - fs.readFile(configPath, (err, data) => { - if (err) { - if (err.code === 'ENOENT') { - this.reset() - } else { - throw err - } - } else { - try { - Object.assign(this, JSON.parse(data)) - } catch (error) { - logger.error('config file is corrupted, resetting...') - this.reset() - } - }; - }) - }; - - getConfig () { - return this - } - - async update () { - const data = JSON.stringify(this.getConfig()) - fs.writeFile(configPath, data, (err) => { - if (err) throw err - logger.debug('Configuration updated') - }) - } - - async setNamespace (namespace) { - this.namespace = namespace - this.update() - } - - async addNode (node) { - if (!this.nodes || Array.isArray(this.nodes)) { - this.nodes = [] - } - if (node.nodeType === 'bootnode') { - this.bootnode = node - } - this.nodes.push(node) - this.update() - } - - async reset () { - const data = JSON.stringify({}) - fs.writeFile(configPath, data, (err) => { - if (err) throw err - this.load() - }) - } -} - -module.exports = () => { - const config = new Config() - return config -} diff --git a/.maintain/chaostest/src/hypervisor/chainApi/api.js b/.maintain/chaostest/src/hypervisor/chainApi/api.js deleted file mode 100644 index f9265b6386ee4..0000000000000 --- a/.maintain/chaostest/src/hypervisor/chainApi/api.js +++ /dev/null @@ -1,16 +0,0 @@ -const chainApi = require('../modules/chainApi') - -exports.getApi = async function (endpoint) { - if (this._apiInstance && this._apiInstance.endpoint === endpoint) { - return this._apiInstance.instance - } else { - const instance = await chainApi.getApi(endpoint) - this._apiInstance = { endpoint, instance } - return instance - } -} - -exports.getChainBlockHeight = async function (url, port) { - const api = await this.getApi(url + ':' + port) - return chainApi.getChainBlockHeight(api) -} diff --git a/.maintain/chaostest/src/hypervisor/chainApi/index.js b/.maintain/chaostest/src/hypervisor/chainApi/index.js deleted file mode 100644 index c0802401d9182..0000000000000 --- a/.maintain/chaostest/src/hypervisor/chainApi/index.js +++ /dev/null @@ -1,4 +0,0 @@ -const api = require('./api') -module.exports = function (Hypervisor) { - Object.assign(Hypervisor.prototype, api) -} diff --git a/.maintain/chaostest/src/hypervisor/deployment/deployment.js b/.maintain/chaostest/src/hypervisor/deployment/deployment.js deleted file mode 100644 index 906734393af67..0000000000000 --- a/.maintain/chaostest/src/hypervisor/deployment/deployment.js +++ /dev/null @@ -1,123 +0,0 @@ -const k8s = require('../modules/k8s') -const { pollUntil } = require('../../utils/wait') -const { getBootNodeUrl } = require('../../utils') -const logger = require('../../utils/logger') - -exports.readOrCreateNamespace = async function (namespace) { - try { - logger.debug('Reading namespace') - await k8s.readNamespace(namespace) // if namespace is available, do not create here - } catch (error) { - if (error.response.statusCode !== 404) { - logger.error(error) - throw error - } - logger.debug('Namespace not present, creating...') - await k8s.createNamespace(namespace) - } - this.config.setNamespace(namespace) -} -exports.createAlice = async function (image, port) { - const substrateArgs = [ - '--chain=local', - '--node-key', - '0000000000000000000000000000000000000000000000000000000000000001', - '--validator', - '--no-telemetry', - '--rpc-cors', - 'all', - '--alice'] - const nodeSpec = { - nodeId: 'alice', - image, - port, - args: substrateArgs - } - nodeSpec.extraInfo = { - nodeType: 'bootnode', - privateKey: '', - publicKey: '', - peerId: '12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp' - } - await this.createNode(nodeSpec) -} - -exports.createBob = async function (image, port) { - const substrateArgs = [ - '--chain=local', - '--node-key', - '0000000000000000000000000000000000000000000000000000000000000002', - '--validator', - '--bob', - '--no-telemetry', - '--rpc-cors', - 'all', - '--bootnodes', - getBootNodeUrl(this.config.bootnode)] - const nodeSpec = { - nodeId: 'bob', - image, - port, - args: substrateArgs - } - nodeSpec.extraInfo = { - nodeType: 'validator', - privateKey: '', - publicKey: '' - } - await this.createNode(nodeSpec) -} - -exports.createAliceBobNodes = async function (image, port) { - await this.createAlice(image, port) - await this.createBob(image, port) -} - -exports.createDevNode = async function (image, port) { - const substrateArgs = ['--dev', '--rpc-external', '--ws-external'] - const nodeSpec = { - nodeId: 'node-1', - image, - port, - args: substrateArgs - } - await this.createNode(nodeSpec) -} - -exports.createNode = async function (nodeSpec) { - logger.info(`Creating ${nodeSpec.nodeId} as ${nodeSpec.extraInfo ? nodeSpec.extraInfo.nodeType : 'FullNode'} in ${this.config.namespace}`) - await k8s.createPod(nodeSpec, this.config.namespace) - logger.debug('Polling pod status') - const pod = await pollUntil( - () => k8s.getPod(nodeSpec.nodeId, this.config.namespace) - ) - const nodeInfo = { - podName: nodeSpec.nodeId, - ip: pod.status.podIP, - port: nodeSpec.port - } - if (nodeSpec.extraInfo) { - Object.assign(nodeInfo, nodeSpec.extraInfo) - } - logger.info(`${nodeSpec.nodeId} is created`) - this.config.addNode(nodeInfo) -} - -exports.cleanup = async function (namespace) { - await k8s.deleteNamespace(namespace) - if (namespace === this.config.namespace) { - this.config.reset() - } -} - -exports.getPodInfoInConfig = function (namespace, podName) { - if (this.config.namespace === namespace && Array.isArray(this.config.nodes)) { - return this.config.nodes.find((node) => node.podName === podName) - } else { - throw Error('No pod present in the namespace in config') - } -} - -exports.startForwardServer = async function (namespace, pod, port, onReady) { - await k8s.startForwardServer(namespace, pod, port, onReady) -} diff --git a/.maintain/chaostest/src/hypervisor/deployment/index.js b/.maintain/chaostest/src/hypervisor/deployment/index.js deleted file mode 100644 index a01865b6a5438..0000000000000 --- a/.maintain/chaostest/src/hypervisor/deployment/index.js +++ /dev/null @@ -1,4 +0,0 @@ -const deployment = require('./deployment') -module.exports = function (Hypervisor) { - Object.assign(Hypervisor.prototype, deployment) -} diff --git a/.maintain/chaostest/src/hypervisor/index.js b/.maintain/chaostest/src/hypervisor/index.js deleted file mode 100644 index 607f3a33d8421..0000000000000 --- a/.maintain/chaostest/src/hypervisor/index.js +++ /dev/null @@ -1,11 +0,0 @@ -const CONFIG = require('../config')() - -function Hypervisor (config) { - this.config = config || CONFIG -} - -// Mount sub modules of the Hypervisor class -require('./deployment')(Hypervisor) -require('./chainApi')(Hypervisor) - -module.exports = Hypervisor diff --git a/.maintain/chaostest/src/hypervisor/modules/chainApi.js b/.maintain/chaostest/src/hypervisor/modules/chainApi.js deleted file mode 100644 index b2ad897d06cba..0000000000000 --- a/.maintain/chaostest/src/hypervisor/modules/chainApi.js +++ /dev/null @@ -1,18 +0,0 @@ -const { ApiPromise, WsProvider } = require('@polkadot/api') -const { HttpProvider } = require('@polkadot/rpc-provider') - -const getApi = async (url) => { - const httpProvider = new HttpProvider(url) - return httpProvider - // const api = await ApiPromise.create({ provider: wsProvider }) - // return api - // TODO: tried to use websocket provider here, but the polkadot/api version is not stable yet, using http provider for now -} - -const getChainBlockHeight = async (provider) => { - const data = await provider.send('chain_getBlock', []) - const height = parseInt(data.block.header.number, 16) - return height -} - -module.exports = { getApi, getChainBlockHeight } diff --git a/.maintain/chaostest/src/hypervisor/modules/k8s.js b/.maintain/chaostest/src/hypervisor/modules/k8s.js deleted file mode 100644 index 14f22ff5e8dff..0000000000000 --- a/.maintain/chaostest/src/hypervisor/modules/k8s.js +++ /dev/null @@ -1,113 +0,0 @@ -const k8s = require('@kubernetes/client-node') -const { isFunction } = require('../../utils') -const logger = require('../../utils/logger') - -// load k8s -const kc = new k8s.KubeConfig() -kc.loadFromDefault() - -// load k8s Apis -const k8sAppApi = kc.makeApiClient(k8s.AppsV1Api) -const k8sCoreApi = kc.makeApiClient(k8s.CoreV1Api) - -const createNamespace = async namespace => { - const namespaceJson = { - apiVersion: 'v1', - kind: 'Namespace', - metadata: { - name: namespace - } - } - return await k8sCoreApi.createNamespace(namespaceJson) -} - -const readNamespace = async namespace => { - return await k8sCoreApi.readNamespace(namespace) -} - -const createPod = async (nodeSpec, namespace) => { - const { label, nodeId, image, args, port } = nodeSpec - const spec = { - metadata: { - labels: { - app: label - }, - name: nodeId - }, - spec: { - containers: [ - { - image: image, - imagePullPolicy: 'Always', - name: nodeId, - ports: [{ containerPort: port }], - args: args - } - ] - } - } - return await k8sCoreApi.createNamespacedPod(namespace, spec) -} - -const getDeploymentStatus = async (deploymentName, namespace) => { - const response = await k8sAppApi.readNamespacedDeploymentStatus(deploymentName, namespace) - const status = response.response.body.status - function getAvailability (item) { - return item.type === 'Available' - } - if (status && status.conditions) { - return status.conditions.find(getAvailability) - } - return undefined -} - -const deleteNamespace = async (namespace) => { - logger.debug(`Taking down Namespace ${namespace}...`) - if (process.env.KEEP_NAMESPACE && process.env.KEEP_NAMESPACE === 1) { - return - } - return k8sCoreApi.deleteNamespace(namespace) -} - -const getNamespacedPods = async (namespace) => { - const response = await k8sCoreApi.listNamespacedPod(namespace) - return response.body.items -} - -const getPod = async (podName, namespace) => { - const pods = await getNamespacedPods(namespace) - const found = pods.find( - (pod) => !!pod.metadata && pod.metadata.name === podName && !!pod.status && pod.status.podIP - ) - if (!found) { - throw Error(`GetNode(${podName}): node is not present in the cluster`) - } - return found -} - -const startForwardServer = async (namespace, pod, port, onReady) => new Promise((resolve, reject) => { - const net = require('net') - const forward = new k8s.PortForward(kc) - - // This simple server just forwards traffic from itself to a service running in kubernetes - // -> localhost:8080 -> port-forward-tunnel -> kubernetes-pod - // This is basically equivalent to 'kubectl port-forward ...' but in Javascript. - const server = net.createServer((socket) => { - forward.portForward(namespace, pod, [port], socket, null, socket) - }) - // TODO: add Ws proxy server to adopt the polkadot/api - server.listen(port, '127.0.0.1', (err) => { - if (err) { - logger.error('Error starting server') - reject(err) - } - logger.info('Forwarding server started, ready to connect') - resolve() - // Optional onReady hook when server started - if (onReady && isFunction(onReady)) { - onReady() - } - }) -}) - -module.exports = { createNamespace, readNamespace, createPod, deleteNamespace, getDeploymentStatus, getPod, getNamespacedPods, startForwardServer } diff --git a/.maintain/chaostest/src/index.js b/.maintain/chaostest/src/index.js deleted file mode 100644 index 176eca6d71ba6..0000000000000 --- a/.maintain/chaostest/src/index.js +++ /dev/null @@ -1 +0,0 @@ -module.exports = require('@oclif/command') diff --git a/.maintain/chaostest/src/utils/exit.js b/.maintain/chaostest/src/utils/exit.js deleted file mode 100644 index 3cf06d290440b..0000000000000 --- a/.maintain/chaostest/src/utils/exit.js +++ /dev/null @@ -1,12 +0,0 @@ -const logger = require('../utils/logger') - -const succeedExit = function () { - process.exit(0) -} - -const errorExit = function (msg, err) { - logger.error(msg, err) - process.exit(1) -} - -module.exports = { succeedExit, errorExit } diff --git a/.maintain/chaostest/src/utils/index.js b/.maintain/chaostest/src/utils/index.js deleted file mode 100644 index b50c177215a24..0000000000000 --- a/.maintain/chaostest/src/utils/index.js +++ /dev/null @@ -1,9 +0,0 @@ -const getBootNodeUrl = (bootnode) => { - return `/dns4/${bootnode.ip}/tcp/30333/p2p/${bootnode.peerId}` -} - -const isFunction = (obj) => { - return !!(obj && obj.constructor && obj.call && obj.apply) -} - -module.exports = { getBootNodeUrl, isFunction } diff --git a/.maintain/chaostest/src/utils/logger.js b/.maintain/chaostest/src/utils/logger.js deleted file mode 100644 index e1da0d8d07f49..0000000000000 --- a/.maintain/chaostest/src/utils/logger.js +++ /dev/null @@ -1,50 +0,0 @@ -const winston = require('winston') -const fs = require('fs') -const logDir = 'log' // Or read from a configuration -const { format, transports } = winston -const env = process.env.NODE_ENV || 'development' -const util = require('util') - -if (!fs.existsSync(logDir)) { - // Create the directory if it does not exist - fs.mkdirSync(logDir) -} - -const logFormat = format.printf(info => { - info.message = util.format(info.message) - if (info.metadata && Object.keys(info.metadata).length) { - info.message = util.format(info.message, info.metadata) - } - return `${info.timestamp} ${info.level}: ${info.message}` -}) - -const logger = winston.createLogger({ - level: env === 'development' ? 'debug' : 'info', - transports: [ - new transports.Console({ - format: format.combine( - format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), - // Format the metadata object - format.metadata({ fillExcept: ['message', 'level', 'timestamp', 'label'] }), - format.colorize(), - logFormat - ) - }), - new winston.transports.File({ - level: env === 'development' ? 'debug' : 'info', - filename: logDir + '/logs.log', - format: format.combine( - format.timestamp(), - format.json() - ), - maxsize: 1024 * 1024 * 10 // 10MB - }) - ], - exceptionHandlers: [ - new winston.transports.File({ - filename: 'log/exceptions.log' - }) - ] -}) - -module.exports = logger diff --git a/.maintain/chaostest/src/utils/wait.js b/.maintain/chaostest/src/utils/wait.js deleted file mode 100644 index 72498d1acb2a6..0000000000000 --- a/.maintain/chaostest/src/utils/wait.js +++ /dev/null @@ -1,32 +0,0 @@ -const logger = require('./logger') -/** - * Wait n milliseconds - * - * @param n - In milliseconds - */ -function waitNMilliseconds (n) { - return new Promise((resolve) => { - setTimeout(resolve, n) - }) -} - -/** - * Run a function until that function correctly resolves - * - * @param fn - The function to run - */ -async function pollUntil (fn) { - try { - const result = await fn() - - return result - } catch (_error) { - logger.error('Error polling', _error) - logger.debug('awaiting...') - await waitNMilliseconds(5000) // FIXME We can add exponential delay here - - return pollUntil(fn) - } -} - -module.exports = { pollUntil, waitNMilliseconds } diff --git a/.maintain/gitlab/lib.sh b/.maintain/common/lib.sh similarity index 81% rename from .maintain/gitlab/lib.sh rename to .maintain/common/lib.sh index 33477b52f5891..ce6c566d799ab 100755 --- a/.maintain/gitlab/lib.sh +++ b/.maintain/common/lib.sh @@ -66,17 +66,23 @@ has_label(){ repo="$1" pr_id="$2" label="$3" + + # These will exist if the function is called in Gitlab. + # If the function's called in Github, we should have GITHUB_ACCESS_TOKEN set + # already. if [ -n "$GITHUB_RELEASE_TOKEN" ]; then - out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$repo/pulls/$pr_id") - else - out=$(curl -H "Authorization: token $GITHUB_PR_TOKEN" -s "$api_base/$repo/pulls/$pr_id") + GITHUB_TOKEN="$GITHUB_RELEASE_TOKEN" + elif [ -n "$GITHUB_PR_TOKEN" ]; then + GITHUB_TOKEN="$GITHUB_PR_TOKEN" fi + + out=$(curl -H "Authorization: token $GITHUB_TOKEN" -s "$api_base/$repo/pulls/$pr_id") [ -n "$(echo "$out" | tr -d '\r\n' | jq ".labels | .[] | select(.name==\"$label\")")" ] } # Formats a message into a JSON string for posting to Matrix # message: 'any plaintext message' -# formatted_message: 'optional message formatted in html' +# formatted_message: 'optional message formatted in html' # Usage: structure_message $content $formatted_content (optional) structure_message() { if [ -z "$2" ]; then @@ -95,3 +101,17 @@ structure_message() { send_message() { curl -XPOST -d "$1" "https://matrix.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" } + +# Check for runtime changes between two commits. This is defined as any changes +# to bin/node/src/runtime, frame/ and primitives/sr_* trees. +has_runtime_changes() { + from=$1 + to=$2 + if git diff --name-only "${from}...${to}" \ + | grep -q -e '^frame/' -e '^primitives/' + then + return 0 + else + return 1 + fi +} diff --git a/.maintain/docker/subkey.Dockerfile b/.maintain/docker/subkey.Dockerfile index 9184cad5b4058..5797295806d00 100644 --- a/.maintain/docker/subkey.Dockerfile +++ b/.maintain/docker/subkey.Dockerfile @@ -1,4 +1,4 @@ -FROM debian:stretch-slim +FROM docker.io/library/ubuntu:20.04 # metadata ARG VCS_REF @@ -28,4 +28,3 @@ USER subkey RUN /usr/local/bin/subkey --version ENTRYPOINT ["/usr/local/bin/subkey"] - diff --git a/.maintain/docker/substrate.Dockerfile b/.maintain/docker/substrate.Dockerfile index 7cd4576a9e89f..e13dfb426adfd 100644 --- a/.maintain/docker/substrate.Dockerfile +++ b/.maintain/docker/substrate.Dockerfile @@ -1,4 +1,4 @@ -FROM debian:stretch-slim +FROM docker.io/library/ubuntu:20.04 # metadata ARG VCS_REF @@ -42,4 +42,3 @@ EXPOSE 30333 9933 9944 VOLUME ["/substrate"] ENTRYPOINT ["/usr/local/bin/substrate"] - diff --git a/.maintain/flamingfir-deploy.sh b/.maintain/flamingfir-deploy.sh deleted file mode 100755 index 8f0fb3a2bc016..0000000000000 --- a/.maintain/flamingfir-deploy.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -RETRY_COUNT=10 -RETRY_ATTEMPT=0 -SLEEP_TIME=15 -TARGET_HOST="$1" -COMMIT=$(cat artifacts/substrate/VERSION) -DOWNLOAD_URL="https://releases.parity.io/substrate/x86_64-debian:stretch/${COMMIT}/substrate/substrate" -POST_DATA='{"extra_vars":{"artifact_path":"'${DOWNLOAD_URL}'","target_host":"'${TARGET_HOST}'"}}' - -JOB_ID=$(wget -O - --header "Authorization: Bearer ${AWX_TOKEN}" --header "Content-type: application/json" --post-data "${POST_DATA}" https://ansible-awx.parity.io/api/v2/job_templates/32/launch/ | jq .job) - -echo "Launched job: $JOB_ID" - - -while [ ${RETRY_ATTEMPT} -le ${RETRY_COUNT} ] ; do - export RETRY_RESULT=$(wget -O - --header "Authorization: Bearer ${AWX_TOKEN}" https://ansible-awx.parity.io/api/v2/jobs/${JOB_ID}/ | jq .status) - RETRY_ATTEMPT=$(( $RETRY_ATTEMPT +1 )) - sleep $SLEEP_TIME - if [ $(echo $RETRY_RESULT | egrep -e successful -e failed) ] ; then - break - fi -done - -AWX_OUTPUT=$(wget -O - --header "Authorization: Bearer ${AWX_TOKEN}" https://ansible-awx.parity.io/api/v2/jobs/${JOB_ID}/stdout?format=txt_download) - -echo "AWX job log:" -echo "${AWX_OUTPUT}" - - -JOB_STATUS=$(wget -O - --header "Authorization: Bearer ${AWX_TOKEN}" https://ansible-awx.parity.io/api/v2/jobs/${JOB_ID}/ | jq .status ) - -echo "===================================" -echo -e "Ansible AWX Remote Job: ${JOB_ID} \x1B[31mStatus: ${JOB_STATUS}\x1B[0m" -echo "===================================" diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs new file mode 100644 index 0000000000000..045140d54dff7 --- /dev/null +++ b/.maintain/frame-weight-template.hbs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for {{pallet}} +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} +//! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: {{cmd.repeat}}, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` +//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} + +// Executed Command: +{{#each args as |arg|~}} +// {{arg}} +{{/each}} + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for {{pallet}}. +pub trait WeightInfo { + {{~#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{c.name}}: u32, {{/each~}} + ) -> Weight; + {{~/each}} +} + +/// Weights for {{pallet}} using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +{{~#if (eq pallet "frame_system")}} +impl WeightInfo for SubstrateWeight { +{{~else}} +impl WeightInfo for SubstrateWeight { +{{~/if}} + {{~#each benchmarks as |benchmark|}} + {{~#each benchmark.comments as |comment|}} + // {{comment}} + {{~/each}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + ({{underscore benchmark.base_weight}} as Weight) + {{~#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + {{~/each}} + {{~#if (ne benchmark.base_reads "0")}} + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) + {{~/if}} + {{~#each benchmark.component_reads as |cr|}} + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + {{~/each}} + {{~#if (ne benchmark.base_writes "0")}} + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) + {{~/if}} + {{~#each benchmark.component_writes as |cw|}} + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + {{~/each}} + } + {{~/each}} +} + +// For backwards compatibility and tests +impl WeightInfo for () { + {{~#each benchmarks as |benchmark|}} + {{~#each benchmark.comments as |comment|}} + // {{comment}} + {{~/each}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + ({{underscore benchmark.base_weight}} as Weight) + {{~#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + {{~/each}} + {{~#if (ne benchmark.base_reads "0")}} + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) + {{~/if}} + {{~#each benchmark.component_reads as |cr|}} + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + {{~/each}} + {{~#if (ne benchmark.base_writes "0")}} + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) + {{~/if}} + {{~#each benchmark.component_writes as |cw|}} + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + {{~/each}} + } + {{~/each}} +} diff --git a/.maintain/github/check_labels.sh b/.maintain/github/check_labels.sh new file mode 100755 index 0000000000000..7b0aed9fe7345 --- /dev/null +++ b/.maintain/github/check_labels.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +set -e + +#shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" + +repo="$GITHUB_REPOSITORY" +pr="$GITHUB_PR" + +ensure_labels() { + for label in "$@"; do + if has_label "$repo" "$pr" "$label"; then + return 0 + fi + done + return 1 +} + +# Must have one of the following labels +releasenotes_labels=( + 'B0-silent' + 'B3-apinoteworthy' + 'B5-clientnoteworthy' + 'B7-runtimenoteworthy' +) + +criticality_labels=( + 'C1-low 📌' + 'C3-medium 📣' + 'C7-high ❗️' + 'C9-critical ‼️' +) + +audit_labels=( + 'D1-audited 👍' + 'D2-notlive 💤' + 'D3-trivial 🧸' + 'D5-nicetohaveaudit ⚠️' + 'D9-needsaudit 👮' +) + +echo "[+] Checking release notes (B) labels" +if ensure_labels "${releasenotes_labels[@]}"; then + echo "[+] Release notes label detected. All is well." +else + echo "[!] Release notes label not detected. Please add one of: ${releasenotes_labels[*]}" + exit 1 +fi + +echo "[+] Checking release criticality (C) labels" +if ensure_labels "${criticality_labels[@]}"; then + echo "[+] Release criticality label detected. All is well." +else + echo "[!] Release criticality label not detected. Please add one of: ${criticality_labels[*]}" + exit 1 +fi + +if has_runtime_changes origin/master "${HEAD_SHA}"; then + echo "[+] Runtime changes detected. Checking audit (D) labels" + if ensure_labels "${audit_labels[@]}"; then + echo "[+] Release audit label detected. All is well." + else + echo "[!] Release audit label not detected. Please add one of: ${audit_labels[*]}" + exit 1 + fi +fi + +exit 0 diff --git a/.maintain/gitlab/check_labels.sh b/.maintain/gitlab/check_labels.sh deleted file mode 100755 index 5ab099b38291c..0000000000000 --- a/.maintain/gitlab/check_labels.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash - -#shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" - -ensure_labels() { - for label in "$@"; do - if has_label 'paritytech/substrate' "$CI_COMMIT_BRANCH" "$label"; then - return 0 - fi - done - return 1 -} - -# Must have one of the following labels -releasenotes_labels=( - 'B0-silent' - 'B3-apinoteworthy' - 'B5-clientnoteworthy' - 'B7-runtimenoteworthy' -) - -criticality_labels=( - 'C1-low' - 'C3-medium' - 'C7-high' - 'C9-critical' -) - -echo "[+] Checking release notes (B) labels for $CI_COMMIT_BRANCH" -if ensure_labels "${releasenotes_labels[@]}"; then - echo "[+] Release notes label detected. All is well." -else - echo "[!] Release notes label not detected. Please add one of: ${releasenotes_labels[*]}" - exit 1 -fi - -echo "[+] Checking release criticality (C) labels for $CI_COMMIT_BRANCH" -if ensure_labels "${criticality_labels[@]}"; then - echo "[+] Release criticality label detected. All is well." -else - echo "[!] Release criticality label not detected. Please add one of: ${criticality_labels[*]}" - exit 1 -fi - -exit 0 diff --git a/.maintain/gitlab/check_line_width.sh b/.maintain/gitlab/check_line_width.sh index 611d3ae2681e2..ebab3013e4b48 100755 --- a/.maintain/gitlab/check_line_width.sh +++ b/.maintain/gitlab/check_line_width.sh @@ -25,7 +25,7 @@ do echo "| error!" echo "| Lines must not be longer than ${LINE_WIDTH} characters." echo "| " - echo "| see more https://wiki.parity.io/Substrate-Style-Guide" + echo "| see more https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md" echo "|" FAIL="true" fi @@ -41,7 +41,7 @@ do echo "| warning!" echo "| Lines should be longer than ${GOOD_LINE_WIDTH} characters only in exceptional circumstances!" echo "| " - echo "| see more https://wiki.parity.io/Substrate-Style-Guide" + echo "| see more https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md" echo "|" fi echo "| file: ${file}" diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 219af5001b053..72bfaf7151522 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -9,6 +9,7 @@ # polkadot companion: paritytech/polkadot#567 # +set -e github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" # use github api v3 in order to access the data without authentication @@ -40,10 +41,9 @@ EOT git config --global user.name 'CI system' git config --global user.email '<>' -cargo install -f --version 0.2.0 diener - # Merge master into our branch before building Polkadot to make sure we don't miss # any commits that are required by Polkadot. +git fetch --depth 100 origin git merge origin/master # Clone the current Polkadot master branch into ./polkadot. @@ -67,8 +67,8 @@ then pr_body="$(sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p' "${pr_data_file}")" pr_companion="$(echo "${pr_body}" | sed -n -r \ - -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ - -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ + -e 's;^.*[Cc]ompanion.*paritytech/polkadot#([0-9]+).*$;\1;p' \ + -e 's;^.*[Cc]ompanion.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" if [ "${pr_companion}" ] @@ -85,10 +85,15 @@ else boldprint "this is not a pull request - building polkadot:master" fi -cd .. -$CARGO_HOME/bin/diener --substrate --branch $CI_COMMIT_REF_NAME --git https://gitlab.parity.io/parity/substrate.git --path polkadot -cd polkadot +# Patch all Substrate crates in Polkadot +diener patch --crates-to-patch ../ --substrate --path Cargo.toml + +# We need to update specifically our patched Substrate crates so that other +# crates that depend on them (e.g. Polkadot, BEEFY) use this unified version +# NOTE: There's no way to only update patched crates, so we use a heuristic +# of updating a crucial Substrate crate (`sp-core`) to minimize the impact of +# updating unrelated dependencies +cargo update -p sp-core # Test Polkadot pr or master branch with this Substrate commit. -cargo update -p sp-io -time cargo test --all --release --verbose +time cargo test --workspace --release --verbose --features=runtime-benchmarks diff --git a/.maintain/gitlab/check_polkadot_companion_status.sh b/.maintain/gitlab/check_polkadot_companion_status.sh index 35c2983886f46..e0412c7b7bec7 100755 --- a/.maintain/gitlab/check_polkadot_companion_status.sh +++ b/.maintain/gitlab/check_polkadot_companion_status.sh @@ -43,8 +43,8 @@ pr_body="$(curl -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_C # get companion if explicitly specified pr_companion="$(echo "${pr_body}" | sed -n -r \ - -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ - -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ + -e 's;^.*[Cc]ompanion.*paritytech/polkadot#([0-9]+).*$;\1;p' \ + -e 's;^.*[Cc]ompanion.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" if [ -z "${pr_companion}" ] @@ -56,7 +56,7 @@ fi boldprint "companion pr: #${pr_companion}" # check the status of that pull request - needs to be -# mergable and approved +# approved and mergable curl -H "${github_header}" -sS -o companion_pr.json \ ${github_api_polkadot_pull_url}/${pr_companion} @@ -64,20 +64,6 @@ curl -H "${github_header}" -sS -o companion_pr.json \ pr_head_sha=$(jq -r -e '.head.sha' < companion_pr.json) boldprint "Polkadot PR's HEAD SHA: $pr_head_sha" -if jq -e .merged < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} already merged" - exit 0 -fi - -if jq -e '.mergeable' < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} mergeable" -else - boldprint "polkadot pr #${pr_companion} not mergeable" - exit 1 -fi - curl -H "${github_header}" -sS -o companion_pr_reviews.json \ ${github_api_polkadot_pull_url}/${pr_companion}/reviews @@ -98,6 +84,19 @@ if [ -z "$(jq -r -e '.[].state | select(. == "APPROVED")' < companion_pr_reviews fi boldprint "polkadot pr #${pr_companion} state APPROVED" -exit 0 +if jq -e .merged < companion_pr.json >/dev/null +then + boldprint "polkadot pr #${pr_companion} already merged" + exit 0 +fi + +if jq -e '.mergeable' < companion_pr.json >/dev/null +then + boldprint "polkadot pr #${pr_companion} mergeable" +else + boldprint "polkadot pr #${pr_companion} not mergeable" + exit 1 +fi +exit 0 diff --git a/.maintain/gitlab/check_runtime.sh b/.maintain/gitlab/check_runtime.sh index 6d009c5aafc6a..71d6965ecf4fb 100755 --- a/.maintain/gitlab/check_runtime.sh +++ b/.maintain/gitlab/check_runtime.sh @@ -8,12 +8,13 @@ set -e # fail on any error - +#shellcheck source=../common/lib.sh +. "$(dirname "${0}")/../common/lib.sh" VERSIONS_FILE="bin/node/runtime/src/lib.rs" -boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } -boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } +boldprint () { printf "|\n| \033[1m%s\033[0m\n|\n" "${@}"; } +boldcat () { printf "|\n"; while read -r l; do printf "| \033[1m%s\033[0m\n" "${l}"; done; printf "|\n" ; } github_label () { echo @@ -23,7 +24,7 @@ github_label () { -F "ref=master" \ -F "variables[LABEL]=${1}" \ -F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \ - ${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline + "${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline" } @@ -31,16 +32,14 @@ boldprint "latest 10 commits of ${CI_COMMIT_REF_NAME}" git log --graph --oneline --decorate=short -n 10 boldprint "make sure the master branch and release tag are available in shallow clones" -git fetch --depth=${GIT_DEPTH:-100} origin master -git fetch --depth=${GIT_DEPTH:-100} origin release +git fetch --depth="${GIT_DEPTH:-100}" origin master +git fetch --depth="${GIT_DEPTH:-100}" origin release git tag -f release FETCH_HEAD git log -n1 release boldprint "check if the wasm sources changed" -if ! git diff --name-only origin/master...${CI_COMMIT_SHA} \ - | grep -v -e '^primitives/sr-arithmetic/fuzzer' \ - | grep -q -e '^bin/node/src/runtime' -e '^frame/' -e '^primitives/sr-' +if ! has_runtime_changes origin/master "${CI_COMMIT_SHA}" then boldcat <<-EOT @@ -57,9 +56,9 @@ fi # consensus-critical logic that has changed. the runtime wasm blobs must be # rebuilt. -add_spec_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ +add_spec_version="$(git diff tags/release ${CI_COMMIT_SHA} -- "${VERSIONS_FILE}" \ | sed -n -r "s/^\+[[:space:]]+spec_version: +([0-9]+),$/\1/p")" -sub_spec_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ +sub_spec_version="$(git diff tags/release ${CI_COMMIT_SHA} -- "${VERSIONS_FILE}" \ | sed -n -r "s/^\-[[:space:]]+spec_version: +([0-9]+),$/\1/p")" @@ -67,8 +66,6 @@ sub_spec_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ if [ "${add_spec_version}" != "${sub_spec_version}" ] then - github_label "D2-breaksapi" - boldcat <<-EOT changes to the runtime sources and changes in the spec version. @@ -82,9 +79,9 @@ else # check for impl_version updates: if only the impl versions changed, we assume # there is no consensus-critical logic that has changed. - add_impl_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ + add_impl_version="$(git diff tags/release ${CI_COMMIT_SHA} -- "${VERSIONS_FILE}" \ | sed -n -r 's/^\+[[:space:]]+impl_version: +([0-9]+),$/\1/p')" - sub_impl_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ + sub_impl_version="$(git diff tags/release ${CI_COMMIT_SHA} -- "${VERSIONS_FILE}" \ | sed -n -r 's/^\-[[:space:]]+impl_version: +([0-9]+),$/\1/p')" diff --git a/.maintain/gitlab/check_signed.sh b/.maintain/gitlab/check_signed.sh index 7c4cc47baba38..20d47c2304767 100755 --- a/.maintain/gitlab/check_signed.sh +++ b/.maintain/gitlab/check_signed.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" +# shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" version="$CI_COMMIT_TAG" diff --git a/.maintain/gitlab/generate_changelog.sh b/.maintain/gitlab/generate_changelog.sh index c13871f50ee49..32ac1760a6117 100755 --- a/.maintain/gitlab/generate_changelog.sh +++ b/.maintain/gitlab/generate_changelog.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" +# shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" version="$2" last_version="$1" @@ -32,7 +32,7 @@ $line" runtime_changes="$runtime_changes $line" fi - if has_label 'paritytech/substrate' "$pr_id" 'D1-runtime-migration'; then + if has_label 'paritytech/substrate' "$pr_id" 'E1-runtime-migration'; then migrations="$migrations $line" fi diff --git a/.maintain/gitlab/publish_draft_release.sh b/.maintain/gitlab/publish_draft_release.sh index c5813718a69f2..36ee0d63e78f9 100755 --- a/.maintain/gitlab/publish_draft_release.sh +++ b/.maintain/gitlab/publish_draft_release.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" +# shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" version="$CI_COMMIT_TAG" diff --git a/.maintain/kubernetes/Chart.yaml b/.maintain/kubernetes/Chart.yaml deleted file mode 100644 index 8e000ae09f1c1..0000000000000 --- a/.maintain/kubernetes/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: substrate -version: 0.2 -appVersion: 0.9.1 -description: "Substrate: The platform for blockchain innovators" -home: https://substrate.network/ -icon: https://substrate.network/favicon.ico -sources: - - https://github.com/paritytech/substrate/ -maintainers: - - name: Paritytech Devops Team - email: devops-team@parity.io -tillerVersion: ">=2.8.0" diff --git a/.maintain/kubernetes/README.md b/.maintain/kubernetes/README.md deleted file mode 100644 index 0f3ec38990375..0000000000000 --- a/.maintain/kubernetes/README.md +++ /dev/null @@ -1,47 +0,0 @@ - - -# Substrate Kubernetes Helm Chart - -This [Helm Chart](https://helm.sh/) can be used for deploying containerized -**Substrate** to a [Kubernetes](https://kubernetes.io/) cluster. - - -## Prerequisites - -- Tested on Kubernetes 1.10.7-gke.6 - -## Installation - -To install the chart with the release name `my-release` into namespace -`my-namespace` from within this directory: - -```console -$ helm install --namespace my-namespace --name my-release --values values.yaml ./ -``` - -The command deploys Substrate on the Kubernetes cluster in the configuration -given in `values.yaml`. When the namespace is omitted it'll be installed in -the default one. - - -## Removal of the Chart - -To uninstall/delete the `my-release` deployment: - -```console -$ helm delete --namespace my-namespace my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - - -## Upgrading - -Once the chart is installed and a new version should be deployed helm takes -care of this by - -```console -$ helm upgrade --namespace my-namespace --values values.yaml my-release ./ -``` - - diff --git a/.maintain/kubernetes/templates/poddisruptionbudget.yaml b/.maintain/kubernetes/templates/poddisruptionbudget.yaml deleted file mode 100644 index 56958b1fbafd9..0000000000000 --- a/.maintain/kubernetes/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ .Values.GitlabEnvSlug | default .Values.app }} -spec: - selector: - matchLabels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - maxUnavailable: 1 - diff --git a/.maintain/kubernetes/templates/secrets.yaml b/.maintain/kubernetes/templates/secrets.yaml deleted file mode 100644 index 97e73ae7ff038..0000000000000 --- a/.maintain/kubernetes/templates/secrets.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.validator.keys }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Values.app }}-secrets - labels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} -type: Opaque -data: - secrets: {{ .Values.validator.keys | default "" }} -{{- end }} diff --git a/.maintain/kubernetes/templates/service.yaml b/.maintain/kubernetes/templates/service.yaml deleted file mode 100644 index b14bb74c10a1a..0000000000000 --- a/.maintain/kubernetes/templates/service.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# see: -# https://kubernetes.io/docs/tutorials/services/ -# https://kubernetes.io/docs/concepts/services-networking/service/ -# headless service for rpc -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.app }}-rpc -spec: - ports: - - port: 9933 - name: http-rpc - - port: 9944 - name: websocket-rpc - selector: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - sessionAffinity: None - type: ClusterIP - clusterIP: None ---- -{{- if .Values.listen_node_port }} -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.app }} -spec: - ports: - - port: 30333 - name: p2p - nodePort: 30333 - protocol: TCP - selector: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - sessionAffinity: None - type: NodePort - # don't route external traffic to non-local pods - externalTrafficPolicy: Local -{{- else if .Values.validator.keys }} -{{- $root := . -}} -{{- range until (int .Values.nodes.replicas) }} ---- -kind: Service -apiVersion: v1 -metadata: - name: {{ $root.Values.app }}-{{ . }} -spec: - selector: - statefulset.kubernetes.io/pod-name: {{ $root.Values.app }}-{{ . }} - ports: - - port: 30333 - targetPort: 30333 - protocol: TCP -{{- end }} -{{- end }} diff --git a/.maintain/kubernetes/templates/serviceaccount.yaml b/.maintain/kubernetes/templates/serviceaccount.yaml deleted file mode 100644 index 53d016bffedf9..0000000000000 --- a/.maintain/kubernetes/templates/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- if .Values.rbac.enable }} -# service account for substrate pods themselves -# no permissions for the api are required -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - name: {{ .Values.rbac.name }} -{{- end }} diff --git a/.maintain/kubernetes/templates/statefulset.yaml b/.maintain/kubernetes/templates/statefulset.yaml deleted file mode 100644 index 0f34b3507a1d1..0000000000000 --- a/.maintain/kubernetes/templates/statefulset.yaml +++ /dev/null @@ -1,139 +0,0 @@ -# https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/ -# https://cloud.google.com/kubernetes-engine/docs/concepts/statefulset -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.app }} -spec: - selector: - matchLabels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - serviceName: {{ .Values.app }} - replicas: {{ .Values.nodes.replicas }} - updateStrategy: - type: RollingUpdate - podManagementPolicy: Parallel - template: - metadata: - labels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - spec: - {{- if .Values.rbac.enable }} - serviceAccountName: {{ .Values.rbac.name }} - {{- else }} - serviceAccountName: default - {{- end }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node - operator: In - values: - - substrate - {{- if .Values.listen_node_port }} - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "app" - operator: In - values: - - {{ .Values.app }} - topologyKey: "kubernetes.io/hostname" - {{- end }} - terminationGracePeriodSeconds: 300 - {{- if .Values.validator.keys }} - volumes: - - name: {{ .Values.app }}-validator-secrets - secret: - secretName: {{ .Values.app }}-secrets - initContainers: - - name: prepare-secrets - image: busybox - command: [ "/bin/sh" ] - args: - - -c - - sed -n -r "s/^${POD_NAME}-key ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/key; - sed -n -r "s/^${POD_NAME}-node-key ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/node-key; - sed -n -r "s/^${POD_NAME}-name ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/name; - test -s {{ .Values.image.basepath }}/name || echo "${POD_NAME}" > {{ .Values.image.basepath }}/name - env: - # from (workaround for hostname) - # https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - volumeMounts: - - name: {{ .Values.app }}-validator-secrets - readOnly: true - mountPath: "/etc/validator" - - name: {{ .Values.app }}dir - mountPath: {{ .Values.image.basepath }} - {{- end }} - containers: - - name: {{ .Values.app }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- if .Values.resources }} - resources: - requests: - memory: {{ .Values.resources.memory }} - cpu: {{ .Values.resources.cpu }} - {{- end }} - ports: - - containerPort: 30333 - name: p2p - - containerPort: 9933 - name: http-rpc - - containerPort: 9944 - name: websocket-rpc - command: ["/bin/sh"] - args: - - -c - - exec /usr/local/bin/substrate - --base-path {{ .Values.image.basepath }} - {{- if .Values.validator.keys }} - --validator - --name $(cat {{ .Values.image.basepath }}/name) - --key $(cat {{ .Values.image.basepath }}/key) - --node-key $(cat {{ .Values.image.basepath }}/node-key) - {{- else }} - --name $(POD_NAME) - {{- end }} - {{- range .Values.nodes.args }} {{ . }} {{- end }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - volumeMounts: - - name: {{ .Values.app }}dir - mountPath: {{ .Values.image.basepath }} - readinessProbe: - httpGet: - path: /health - port: http-rpc - initialDelaySeconds: 10 - periodSeconds: 10 - livenessProbe: - httpGet: - path: /health - port: http-rpc - initialDelaySeconds: 10 - periodSeconds: 10 - securityContext: - runAsUser: 1000 - fsGroup: 1000 - volumeClaimTemplates: - - metadata: - name: {{ .Values.app }}dir - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: ssd - resources: - requests: - storage: 32Gi - diff --git a/.maintain/kubernetes/values.yaml b/.maintain/kubernetes/values.yaml deleted file mode 100644 index 4c3cb5c7d702d..0000000000000 --- a/.maintain/kubernetes/values.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# set tag manually --set image.tag=latest -image: - repository: parity/substrate - tag: latest - pullPolicy: Always - basepath: /substrate - - -# if set to true a service account for substrate will be created -rbac: - enable: true - name: substrate - - -# name of the statefulset -app: substrate -listen_node_port: true - -nodes: - replicas: 2 - args: - # name and data directory are set by the chart itself - # key and node-key may be provided on commandline invocation - # - # - --chain - # - krummelanke - # serve rpc within the local network - # - fenced off the world via firewall - # - used for health checks - - --rpc-external - - --ws-external - # - --log - # - sub-libp2p=trace - - -validator: {} - # providing 'keys' string via --set commandline parameter will run the nodes - # in validator mode (--validator). - # - # name, key and node-key can be given in a base64 encoded keyfile string (at - # validator.keys) which has the following format: - # - # substrate-0-name - # substrate-0-key - # substrate-0-node-key - # substrate-1-name - # substrate-1-key - # substrate-1-node-key - # - # pod names are canonical. changing these or providing different amount of - # keys than the replicas count will lead to behavior no one ever has - # experienced before. - - -# maybe adopt resource limits here to the nodes of the pool -# resources: -# memory: "5Gi" -# cpu: "1.5" - diff --git a/.maintain/sentry-node/docker-compose.yml b/.maintain/local-docker-test-network/docker-compose.yml similarity index 61% rename from .maintain/sentry-node/docker-compose.yml rename to .maintain/local-docker-test-network/docker-compose.yml index 2af9449853c77..53e2a2913f38b 100644 --- a/.maintain/sentry-node/docker-compose.yml +++ b/.maintain/local-docker-test-network/docker-compose.yml @@ -1,24 +1,26 @@ -# Docker compose file to simulate a sentry node setup. +# Docker compose file to start a multi node local test network. # +# # Nodes # -# Setup: +# - Validator node A +# - Validator node B +# - Light client C # -# Validator A is not supposed to be connected to the public internet. Instead it -# connects to a sentry node (sentry-a) which connects to the public internet. -# Validator B can reach validator A via sentry node A and vice versa. +# # Auxiliary nodes # +# - Prometheus monitoring each node. +# - Grafana pointed at the Prometheus node, configured with all dashboards. # -# Usage: +# # Usage # # 1. Build `target/release/substrate` binary: `cargo build --release` -# -# 2. Start networks and containers: `sudo docker-compose -f .maintain/sentry-node/docker-compose.yml up` -# -# 3. Reach: -# - polkadot/apps on localhost:3000 +# 2. Start networks and containers: +# `sudo docker-compose -f .maintain/sentry-node/docker-compose.yml up` +# 3. Connect to nodes: # - validator-a: localhost:9944 # - validator-b: localhost:9945 -# - sentry-a: localhost:9946 +# - light-c: localhost:9946 +# - via polkadot.js/apps: https://polkadot.js.org/apps/?rpc=ws%3A%2F%2Flocalhost%3A#/explorer # - grafana: localhost:3001 # - prometheus: localhost:9090 @@ -34,9 +36,8 @@ services: - ../../target/release/substrate:/usr/local/bin/substrate image: parity/substrate networks: - - network-a + - internet command: - # Local node id: QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR - "--node-key" - "0000000000000000000000000000000000000000000000000000000000000001" - "--base-path" @@ -46,48 +47,38 @@ services: - "30333" - "--validator" - "--alice" - - "--sentry-nodes" - - "/dns/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" - - "--reserved-nodes" - - "/dns/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" + - "--bootnodes" + - "/dns/validator-b/tcp/30333/p2p/12D3KooWHdiAxVd8uMQR1hGWXccidmfCwLqcMpGwR6QcTP6QRMuD" # Not only bind to localhost. - "--unsafe-ws-external" - "--unsafe-rpc-external" - # - "--log" - # - "sub-libp2p=trace" - # - "--log" - # - "afg=trace" - "--log" - - "sub-authority-discovery=trace" + - "sub-libp2p=trace" - "--no-telemetry" - "--rpc-cors" - "all" - "--prometheus-external" - sentry-a: + validator-b: image: parity/substrate ports: - - "9946:9944" + - "9945:9944" volumes: - ../../target/release/substrate:/usr/local/bin/substrate networks: - - network-a - internet command: - # Local node id: QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi - "--node-key" - - "0000000000000000000000000000000000000000000000000000000000000003" + - "0000000000000000000000000000000000000000000000000000000000000002" - "--base-path" - - "/tmp/sentry" + - "/tmp/bob" - "--chain=local" - "--port" - "30333" - - "--sentry" - - "/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" - - "--reserved-nodes" - - "/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" + - "--validator" + - "--bob" - "--bootnodes" - - "/dns/validator-b/tcp/30333/p2p/QmSVnNf9HwVMT1Y4cK1P6aoJcEZjmoTXpjKBmAABLMnZEk" + - "/dns/validator-a/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp" - "--no-telemetry" - "--rpc-cors" - "all" @@ -95,32 +86,30 @@ services: - "--unsafe-ws-external" - "--unsafe-rpc-external" - "--log" - - "sub-authority-discovery=trace" + - "sub-libp2p=trace" - "--prometheus-external" - validator-b: + light-c: image: parity/substrate ports: - - "9945:9944" + - "9946:9944" volumes: - ../../target/release/substrate:/usr/local/bin/substrate networks: - internet command: - # Local node id: QmSVnNf9HwVMT1Y4cK1P6aoJcEZjmoTXpjKBmAABLMnZEk - "--node-key" - - "0000000000000000000000000000000000000000000000000000000000000002" + - "0000000000000000000000000000000000000000000000000000000000000003" - "--base-path" - - "/tmp/bob" + - "/tmp/light" - "--chain=local" - "--port" - "30333" - - "--validator" - - "--bob" + - "--light" - "--bootnodes" - - "/dns/validator-a/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR" + - "/dns/validator-a/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp" - "--bootnodes" - - "/dns/sentry-a/tcp/30333/p2p/QmV7EhW6J6KgmNdr558RH1mPx2xGGznW7At4BhXzntRFsi" + - "/dns/validator-b/tcp/30333/p2p/12D3KooWHdiAxVd8uMQR1hGWXccidmfCwLqcMpGwR6QcTP6QRMuD" - "--no-telemetry" - "--rpc-cors" - "all" @@ -128,20 +117,19 @@ services: - "--unsafe-ws-external" - "--unsafe-rpc-external" - "--log" - - "sub-authority-discovery=trace" + - "sub-libp2p=trace" - "--prometheus-external" prometheus: image: prom/prometheus networks: - - network-a - internet ports: - "9090:9090" links: - validator-a:validator-a - - sentry-a:sentry-a - validator-b:validator-b + - light-c:light-c volumes: - ./prometheus/:/etc/prometheus/ restart: always @@ -152,7 +140,6 @@ services: depends_on: - prometheus networks: - - network-a - internet ports: - 3001:3000 diff --git a/.maintain/sentry-node/grafana/provisioning/dashboards/dashboards.yml b/.maintain/local-docker-test-network/grafana/provisioning/dashboards/dashboards.yml similarity index 100% rename from .maintain/sentry-node/grafana/provisioning/dashboards/dashboards.yml rename to .maintain/local-docker-test-network/grafana/provisioning/dashboards/dashboards.yml diff --git a/.maintain/sentry-node/grafana/provisioning/datasources/datasource.yml b/.maintain/local-docker-test-network/grafana/provisioning/datasources/datasource.yml similarity index 100% rename from .maintain/sentry-node/grafana/provisioning/datasources/datasource.yml rename to .maintain/local-docker-test-network/grafana/provisioning/datasources/datasource.yml diff --git a/.maintain/sentry-node/prometheus/prometheus.yml b/.maintain/local-docker-test-network/prometheus/prometheus.yml similarity index 89% rename from .maintain/sentry-node/prometheus/prometheus.yml rename to .maintain/local-docker-test-network/prometheus/prometheus.yml index 547d4bea57ae5..f8acb7c0b8ccd 100644 --- a/.maintain/sentry-node/prometheus/prometheus.yml +++ b/.maintain/local-docker-test-network/prometheus/prometheus.yml @@ -7,9 +7,9 @@ scrape_configs: - targets: ['validator-a:9615'] labels: network: dev - - targets: ['sentry-a:9615'] + - targets: ['validator-b:9615'] labels: network: dev - - targets: ['validator-b:9615'] + - targets: ['light-c:9615'] labels: network: dev diff --git a/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml b/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml index 40a489bd09cf0..7ad916f022154 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rule-tests.yaml @@ -175,7 +175,7 @@ tests: polkadot-abcdef01234-abcdef has been monotonically increasing for more than 10 minutes." - exp_labels: - severity: critical + severity: warning pod: polkadot-abcdef01234-abcdef instance: polkadot-abcdef01234-abcdef job: polkadot @@ -190,7 +190,7 @@ tests: # same. Thus expect an alert. exp_alerts: - exp_labels: - severity: critical + severity: warning pod: polkadot-abcdef01234-abcdef instance: polkadot-abcdef01234-abcdef job: polkadot diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index 16a27c06d3e05..7a69cba66c3f3 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -47,8 +47,8 @@ groups: # Under the assumption of an average block production of 6 seconds, # "best" and "finalized" being more than 10 blocks apart would imply # more than a 1 minute delay between block production and finalization. - expr: '(polkadot_block_height_number{status="best"} - ignoring(status) - polkadot_block_height_number{status="finalized"}) > 10' + expr: '(polkadot_block_height{status="best"} - ignoring(status) + polkadot_block_height{status="finalized"}) > 10' for: 8m labels: severity: critical @@ -74,7 +74,7 @@ groups: increase(polkadot_sub_txpool_validations_finished[5m]) > 0' for: 30m labels: - severity: critical + severity: warning annotations: message: 'The transaction pool size on node {{ $labels.instance }} has been monotonically increasing for more than 30 minutes.' @@ -83,7 +83,7 @@ groups: polkadot_sub_txpool_validations_finished > 10000' for: 5m labels: - severity: critical + severity: warning annotations: message: 'The transaction pool size on node {{ $labels.instance }} has been above 10_000 for more than 5 minutes.' @@ -108,6 +108,13 @@ groups: annotations: message: 'The node {{ $labels.instance }} has less than 3 peers for more than 15 minutes' + - alert: NoIncomingConnection + expr: increase(polkadot_sub_libp2p_incoming_connections_total[20m]) == 0 + labels: + severity: warning + annotations: + message: 'The node {{ $labels.instance }} has not received any new incoming + TCP connection in the past 20 minutes. Is it connected to the Internet?' ############################################################################## # System @@ -127,8 +134,8 @@ groups: ############################################################################## - alert: ContinuousTaskEnded - expr: '(polkadot_tasks_spawned_total == 1) - on(instance, task_name) - (polkadot_tasks_ended_total == 1)' + expr: '(polkadot_tasks_spawned_total{task_name != "basic-authorship-proposer", task_name != "substrate-rpc-subscription"} == 1) + - on(instance, task_name) group_left() (polkadot_tasks_ended_total == 1)' for: 5m labels: severity: warning @@ -147,3 +154,28 @@ groups: message: 'Authority discovery on node {{ $labels.instance }} fails to process more than 50 % of the values found on the DHT for more than 2 hours.' + + - alert: UnboundedChannelPersistentlyLarge + expr: '( + (polkadot_unbounded_channel_len{action = "send"} - + ignoring(action) polkadot_unbounded_channel_len{action = "received"}) + or on(instance) polkadot_unbounded_channel_len{action = "send"} + ) >= 200' + for: 5m + labels: + severity: warning + annotations: + message: 'Channel {{ $labels.entity }} on node {{ $labels.instance }} contains + more than 200 items for more than 5 minutes. Node might be frozen.' + + - alert: UnboundedChannelVeryLarge + expr: '( + (polkadot_unbounded_channel_len{action = "send"} - + ignoring(action) polkadot_unbounded_channel_len{action = "received"}) + or on(instance) polkadot_unbounded_channel_len{action = "send"} + ) > 15000' + labels: + severity: warning + annotations: + message: 'Channel {{ $labels.entity }} on node {{ $labels.instance }} contains more than + 15000 items.' diff --git a/.maintain/monitoring/grafana-dashboards/README_dashboard.md b/.maintain/monitoring/grafana-dashboards/README_dashboard.md index 37bebc6f8eaae..e00b89449cfaf 100644 --- a/.maintain/monitoring/grafana-dashboards/README_dashboard.md +++ b/.maintain/monitoring/grafana-dashboards/README_dashboard.md @@ -5,10 +5,3 @@ Shared templated Grafana dashboards. To import the dashboards follow the [Grafana documentation](https://grafana.com/docs/grafana/latest/reference/export_import/). You can see an example setup [here](../../../.maintain/sentry-node). - -#### Required labels on Prometheus metrics - -- `instance` referring to a single scrape target (see [Prometheus docs for - details](https://prometheus.io/docs/concepts/jobs_instances/)). - -- `network` referring to the Blockchain network e.g. Kusama. diff --git a/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json b/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json deleted file mode 100644 index 629b22617b22a..0000000000000 --- a/.maintain/monitoring/grafana-dashboards/substrate-dashboard.json +++ /dev/null @@ -1,1650 +0,0 @@ -{ - "annotations": { - "list": [ - { - "$$hashKey": "object:15", - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "iteration": 1586424254170, - "links": [ - { - "icon": "external link", - "tags": [], - "targetBlank": true, - "title": "With love from ColmenaLabs", - "tooltip": "", - "type": "link", - "url": "https://colmenalabs.org" - }, - { - "icon": "external link", - "tags": [], - "targetBlank": true, - "title": "Polkastats.io", - "tooltip": "", - "type": "link", - "url": "https://polkastats.io" - } - ], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate([[metric_namespace]]_block_height{status=\"finalized\",instance=\"[[instance]]\",network=\"[[network]]\"}[10m])/rate([[metric_namespace]]_block_height{status=\"finalized\",instance=\"[[instance]]\",network=\"[[network]]\"}[1m])", - "intervalFactor": 1, - "legendFormat": "rate[10m] / rate[1m]", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Relative Block Production Speed", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 0 - }, - "hiddenSeries": false, - "id": 15, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_peers_count{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Peers count", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 17, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pluginVersion": "6.4.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar([[metric_namespace]]_block_height{status=\"best\",instance=\"[[instance]]\",network=\"[[network]]\"})-scalar([[metric_namespace]]_block_height{status=\"finalized\",instance=\"[[instance]]\",network=\"[[network]]\"})", - "intervalFactor": 2, - "legendFormat": "[[hostname]]", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Diff -> ( Best Block - Finalized )", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 0 - }, - "hiddenSeries": false, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate([[metric_namespace]]_block_height{status=\"finalized\",instance=\"[[instance]]\",network=\"[[network]]\"}[10m])*60", - "intervalFactor": 10, - "legendFormat": "{{instance}} Blocks / minute", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Block rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 6 - }, - "hiddenSeries": false, - "id": 10, - "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "increase([[metric_namespace]]_block_height{instance=\"[[instance]]\",network=\"[[network]]\",status=~\"finalized|sync_target\"}[1m])", - "intervalFactor": 5, - "legendFormat": "{{status}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Blocks Av per min", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 6 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_block_height{instance=\"[[instance]]\",network=\"[[network]]\",status=~\"finalized|sync_target\"}", - "legendFormat": "{{instance}} {{status}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Block Finalized", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 6 - }, - "hiddenSeries": false, - "id": 13, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_block_height{status=\"best\",instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Block height", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 6 - }, - "hiddenSeries": false, - "id": 20, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "data": "", - "expr": "[[metric_namespace]]_ready_transactions_number{instance=\"[[instance]]\",network=\"[[network]]\"}", - "hide": false, - "legendFormat": "{{instance}}", - "refId": "A", - "target": "txcount", - "type": "timeseries" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "TXs Count", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 12 - }, - "hiddenSeries": false, - "id": 23, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_active{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} active", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_failed{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} failed", - "refId": "B" - }, - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_importing{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} importing", - "refId": "C" - }, - { - "expr": "[[metric_namespace]]_sync_extra_finality_proofs_pending{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} pending", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Sync Proof", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 12 - }, - "hiddenSeries": false, - "id": 22, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sync_extra_justifications_active{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} active", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sync_extra_justifications_failed{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} failed", - "refId": "B" - }, - { - "expr": "[[metric_namespace]]_sync_extra_justifications_importing{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} importing", - "refId": "C" - }, - { - "expr": "[[metric_namespace]]_sync_extra_justifications_pending{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} pending", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Sync justifications", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 12 - }, - "hiddenSeries": false, - "id": 24, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_connections{instance=\"[[instance]]\",network=\"[[network]]\"}", - "hide": false, - "legendFormat": "{{instance}} connections", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_is_major_syncing{instance=\"[[instance]]\",network=\"[[network]]\"}", - "hide": false, - "legendFormat": "{{instance}} syncing", - "refId": "B" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_kbuckets_num_nodes{instance=\"[[instance]]\",network=\"[[network]]\"}", - "hide": false, - "legendFormat": "{{instance}} num_nodes", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "sub_libp2p", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 12 - }, - "hiddenSeries": false, - "id": 26, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"FRNK\",direction=\"in\"}", - "hide": false, - "legendFormat": "{{instance}} FRNK in", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"FRNK\",direction=\"out\"}", - "hide": false, - "legendFormat": "{{instance}} FRNK out", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "libp2p_notifications", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 18 - }, - "hiddenSeries": false, - "id": 28, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_cpu_usage_percentage{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "CPU usage %", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 18 - }, - "hiddenSeries": false, - "id": 27, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_memory_usage_bytes{instance=\"[[instance]]\",network=\"[[network]]\"}", - "legendFormat": "{{instance}} Mem bytes", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 2, - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 18 - }, - "hiddenSeries": false, - "id": 25, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_network_per_sec_bytes", - "hide": false, - "legendFormat": "{{instance}}", - "refId": "A" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total", - "hide": true, - "legendFormat": "{{instance}}", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "libp2p_network_per_sec_bytes", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "dashLength": 10, - "dashes": false, - "datasource": null, - "description": "", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 18 - }, - "hiddenSeries": false, - "id": 29, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pluginVersion": "6.5.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"dot1\",direction=\"in\"}", - "hide": false, - "legendFormat": "{{instance}} dot1 in", - "refId": "B" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"dot2\",direction=\"in\"}", - "hide": false, - "legendFormat": "{{instance}} dot2 in", - "refId": "C" - }, - { - "expr": "[[metric_namespace]]_sub_libp2p_notifications_total{instance=\"[[instance]]\",network=\"[[network]]\",protocol=\"dot2\",direction=\"out\"}", - "hide": false, - "legendFormat": "{{instance}} dot2 out", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "libp2p_notifications", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 22, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "allValue": null, - "current": { - "selected": true, - "text": "substrate", - "value": "substrate" - }, - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "metric_namespace", - "options": [ - { - "selected": true, - "text": "substrate", - "value": "substrate" - }, - { - "selected": false, - "text": "polkadot", - "value": "polkadot" - } - ], - "query": "substrate, polkadot", - "skipUrlSync": false, - "type": "custom" - }, - { - "allValue": null, - "current": { - "selected": true, - "text": "dev", - "value": "dev" - }, - "datasource": "Prometheus", - "definition": "label_values(network)", - "hide": 0, - "includeAll": false, - "index": -1, - "label": null, - "multi": false, - "name": "network", - "options": [], - "query": "label_values(network)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "selected": false, - "text": "validator-a:9615", - "value": "validator-a:9615" - }, - "datasource": "Prometheus", - "definition": "label_values(instance)", - "hide": 0, - "includeAll": false, - "index": -1, - "label": null, - "multi": false, - "name": "instance", - "options": [], - "query": "label_values(instance)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Substrate Dashboard", - "uid": "ColmenaLabs", - "variables": { - "list": [] - }, - "version": 2 -} diff --git a/.maintain/monitoring/grafana-dashboards/substrate-networking.json b/.maintain/monitoring/grafana-dashboards/substrate-networking.json index dfc143005493d..46942cf582fc6 100644 --- a/.maintain/monitoring/grafana-dashboards/substrate-networking.json +++ b/.maintain/monitoring/grafana-dashboards/substrate-networking.json @@ -1,13 +1,5 @@ { "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - }, { "name": "VAR_METRIC_NAMESPACE", "type": "constant", @@ -17,11 +9,17 @@ } ], "__requires": [ + { + "type": "panel", + "id": "dashlist", + "name": "Dashboard list", + "version": "" + }, { "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "6.7.3" + "version": "7.3.6" }, { "type": "panel", @@ -29,12 +27,6 @@ "name": "Graph", "version": "" }, - { - "type": "panel", - "id": "heatmap", - "name": "Heatmap", - "version": "" - }, { "type": "datasource", "id": "prometheus", @@ -76,10 +68,11 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1600780210197, + "iteration": 1621244671073, "links": [], "panels": [ { + "collapsed": false, "datasource": null, "gridPos": { "h": 1, @@ -87,8 +80,9 @@ "x": 0, "y": 0 }, - "id": 167, - "title": "Sync", + "id": 27, + "panels": [], + "title": "Transport", "type": "row" }, { @@ -97,57 +91,116 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 5, + "h": 6, "w": 24, "x": 0, "y": 1 }, "hiddenSeries": false, - "id": 101, + "id": 19, + "interval": "", "legend": { + "alignAsTable": false, "avg": false, "current": false, + "hideEmpty": false, "max": false, "min": false, - "show": false, + "rightSide": false, + "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "connected", + "maxPerRow": 12, + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [], + "repeat": "nodename", + "repeatDirection": "h", + "seriesOverrides": [ + { + "$$hashKey": "object:70", + "alias": "established (in)", + "color": "#37872D" + }, + { + "$$hashKey": "object:71", + "alias": "established (out)", + "color": "#C4162A" + }, + { + "$$hashKey": "object:72", + "alias": "pending (out)", + "color": "#FF7383" + }, + { + "$$hashKey": "object:73", + "alias": "closed-recently", + "color": "#FADE2A", + "steppedLine": true + } + ], "spaceLength": 10, - "stack": false, - "steppedLine": true, + "stack": true, + "steppedLine": false, "targets": [ { - "expr": "1 - (${metric_namespace}_sub_libp2p_peerset_num_requested{instance=~\"${nodename}\"} - ${metric_namespace}_sub_libp2p_peers_count{instance=~\"${nodename}\"}) / ${metric_namespace}_sub_libp2p_peerset_num_requested{instance=~\"${nodename}\"}", + "expr": "(\n sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance) -\n sum(${metric_namespace}_sub_libp2p_connections_closed_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance)\n)\n\n# Because `closed_total` can be null, this serves as fallback\nor on(instance) sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance)", + "format": "time_series", + "hide": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "established (in)", "refId": "A" + }, + { + "expr": "(\n sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance) -\n sum(${metric_namespace}_sub_libp2p_connections_closed_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance)\n)\n\n# Because `closed_total` can be null, this serves as fallback\nor on(instance) sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance)", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "established (out)", + "refId": "C" + }, + { + "expr": "sum by (instance) (${metric_namespace}_sub_libp2p_pending_connections{instance=~\"${nodename}\"})", + "hide": false, + "interval": "", + "legendFormat": "pending (out)", + "refId": "B" + }, + { + "expr": "sum(rate(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "closed-per-sec", + "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of peer slots filled", + "title": "Average transport-level (TCP, QUIC, ...) connections", "tooltip": { "shared": true, - "sort": 1, + "sort": 0, "value_type": "individual" }, "type": "graph", @@ -160,14 +213,16 @@ }, "yaxes": [ { - "format": "percentunit", - "label": null, + "$$hashKey": "object:100", + "format": "short", + "label": "Connections", "logBase": 1, - "max": "1.0", + "max": null, "min": null, "show": true }, { + "$$hashKey": "object:101", "format": "short", "label": null, "logBase": 1, @@ -181,63 +236,65 @@ "alignLevel": null } }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 6 - }, - "id": 29, - "panels": [], - "repeat": "request_protocol", - "title": "Requests (${request_protocol})", - "type": "row" - }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 4, - "w": 12, + "h": 6, + "w": 24, "x": 0, "y": 7 }, "hiddenSeries": false, - "id": 148, + "id": 189, + "interval": "", "legend": { + "alignAsTable": false, "avg": false, "current": false, + "hideEmpty": false, "max": false, "min": false, - "show": false, + "rightSide": false, + "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "maxPerRow": 12, + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "irate(${metric_namespace}_sub_libp2p_requests_out_started_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])", + "expr": "1 - \n\navg(\n ${metric_namespace}_sub_libp2p_distinct_peers_connections_opened_total{instance=~\"${nodename}\"} - ${metric_namespace}_sub_libp2p_distinct_peers_connections_closed_total{instance=~\"${nodename}\"}\n) by (instance)\n\n/\n\navg(\r\n sum(${metric_namespace}_sub_libp2p_connections_opened_total{instance=~\"${nodename}\"}) by (instance) - sum(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}) by (instance)\r\n) by (instance)", + "format": "time_series", + "hide": false, "interval": "", "legendFormat": "{{instance}}", "refId": "A" @@ -247,7 +304,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Requests emitted per second", + "title": "Percentage of peers for which we have more than one connection open", "tooltip": { "shared": true, "sort": 2, @@ -263,20 +320,22 @@ }, "yaxes": [ { - "format": "reqps", - "label": null, + "$$hashKey": "object:184", + "format": "percentunit", + "label": "", "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:185", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -286,20 +345,28 @@ }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 4, - "w": 12, - "x": 12, - "y": 7 + "h": 6, + "w": 24, + "x": 0, + "y": 13 }, "hiddenSeries": false, - "id": 151, + "id": 39, + "interval": "", "legend": { "avg": false, "current": false, @@ -309,33 +376,48 @@ "total": false, "values": false }, - "lines": true, + "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [], + "repeat": "nodename", + "seriesOverrides": [ + { + "$$hashKey": "object:263", + "alias": "/.*/", + "color": "#FF780A" + } + ], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "irate(${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])", + "expr": "increase(${metric_namespace}_sub_libp2p_incoming_connections_handshake_errors_total{instance=~\"${nodename}\"}[$__rate_interval])", + "hide": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{reason}}", "refId": "A" + }, + { + "expr": "increase(${metric_namespace}_sub_libp2p_listeners_errors_total{instance=~\"${nodename}\"}[$__rate_interval])", + "interval": "", + "legendFormat": "pre-handshake", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Requests served per second", + "title": "Number of incoming connection errors", "tooltip": { "shared": true, "sort": 2, @@ -351,20 +433,22 @@ }, "yaxes": [ { - "format": "reqps", - "label": null, + "$$hashKey": "object:270", + "format": "short", + "label": "Errors", "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:271", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -378,56 +462,64 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 4, - "w": 12, + "h": 6, + "w": 24, "x": 0, - "y": 11 + "y": 19 }, "hiddenSeries": false, - "id": 256, + "id": 4, "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": false, + "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le)) > 0", - "instant": false, + "expr": "rate(${metric_namespace}_sub_libp2p_network_bytes_total{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", - "legendFormat": "{{instance}}", - "refId": "A" + "legendFormat": "{{direction}}", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Median request answer time", + "title": "Network bandwidth", "tooltip": { "shared": true, - "sort": 2, + "sort": 0, "value_type": "individual" }, "type": "graph", @@ -440,7 +532,8 @@ }, "yaxes": [ { - "format": "s", + "$$hashKey": "object:352", + "format": "binBps", "label": null, "logBase": 1, "max": null, @@ -448,6 +541,7 @@ "show": true }, { + "$$hashKey": "object:353", "format": "short", "label": null, "logBase": 1, @@ -463,48 +557,63 @@ }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 4, - "w": 12, - "x": 12, - "y": 11 + "h": 7, + "w": 24, + "x": 0, + "y": 25 }, "hiddenSeries": false, - "id": 258, + "id": 81, + "interval": "", "legend": { + "alignAsTable": false, "avg": false, "current": false, + "hideEmpty": true, + "hideZero": true, "max": false, "min": false, - "show": false, + "rightSide": false, + "show": true, "total": false, "values": false }, - "lines": true, + "lines": false, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", + "expr": "increase(${metric_namespace}_sub_libp2p_pending_connections_errors_total{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{reason}}", "refId": "A" } ], @@ -512,7 +621,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Median request serving time", + "title": "Dialing attempt errors", "tooltip": { "shared": true, "sort": 2, @@ -528,7 +637,8 @@ }, "yaxes": [ { - "format": "s", + "$$hashKey": "object:431", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -536,12 +646,13 @@ "show": true }, { + "$$hashKey": "object:432", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -555,45 +666,56 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 4, - "w": 12, + "h": 7, + "w": 24, "x": 0, - "y": 15 + "y": 32 }, "hiddenSeries": false, - "id": 257, + "id": 46, + "interval": "", "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": false, + "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "maxPerRow": 12, + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le)) > 0", - "instant": false, + "expr": "rate(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{reason}} ({{direction}})", "refId": "A" } ], @@ -601,7 +723,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "99th percentile request answer time", + "title": "Disconnects/sec", "tooltip": { "shared": true, "sort": 2, @@ -617,20 +739,23 @@ }, "yaxes": [ { - "format": "s", - "label": null, + "$$hashKey": "object:514", + "decimals": null, + "format": "cps", + "label": "Disconnects", "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:515", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -639,21 +764,43 @@ } }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 0, + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 39 + }, + "id": 167, + "panels": [], + "repeat": null, + "title": "Sync", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, "fillGradient": 0, "gridPos": { - "h": 4, - "w": 12, - "x": 12, - "y": 15 + "h": 5, + "w": 24, + "x": 0, + "y": 40 }, "hiddenSeries": false, - "id": 259, + "id": 101, "legend": { "avg": false, "current": false, @@ -665,34 +812,43 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", + "expr": "${metric_namespace}_sub_libp2p_peerset_num_requested{instance=~\"${nodename}\"}", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "peers-requested", "refId": "A" + }, + { + "expr": "${metric_namespace}_sub_libp2p_peers_count{instance=~\"${nodename}.*\"}", + "interval": "", + "legendFormat": "peers-count", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "99th percentile request serving time", + "title": "Number of peer slots filled", "tooltip": { - "shared": false, - "sort": 2, + "shared": true, + "sort": 1, "value_type": "individual" }, "type": "graph", @@ -705,20 +861,22 @@ }, "yaxes": [ { - "format": "s", + "$$hashKey": "object:679", + "format": "none", "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { + "$$hashKey": "object:680", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -726,22 +884,44 @@ "alignLevel": null } }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 45 + }, + "id": 29, + "panels": [], + "repeat": "request_protocol_out", + "title": "Outbound requests (${request_protocol_out})", + "type": "row" + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 4, - "w": 12, + "w": 24, "x": 0, - "y": 19 + "y": 46 }, "hiddenSeries": false, - "id": 287, + "id": 148, "legend": { "avg": false, "current": false, @@ -753,24 +933,26 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_sub_libp2p_requests_out_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (reason)", - "instant": false, + "expr": "rate(${metric_namespace}_sub_libp2p_requests_out_success_total_count{instance=~\"${nodename}\", protocol=\"${request_protocol_out}\"}[$__rate_interval]) + on(instance) sum(rate(${metric_namespace}_sub_libp2p_requests_out_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol_out}\"}[$__rate_interval])) by (instance)\n\nor\n\nrate(${metric_namespace}_sub_libp2p_requests_out_success_total_count{instance=~\"${nodename}\", protocol=\"${request_protocol_out}\"}[$__rate_interval])", + "hide": false, "interval": "", - "legendFormat": "{{reason}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -778,7 +960,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Outgoing request failures per second", + "title": "Requests emitted per second", "tooltip": { "shared": true, "sort": 2, @@ -794,7 +976,8 @@ }, "yaxes": [ { - "format": "short", + "$$hashKey": "object:209", + "format": "reqps", "label": null, "logBase": 1, "max": null, @@ -802,6 +985,7 @@ "show": true }, { + "$$hashKey": "object:210", "format": "short", "label": null, "logBase": 1, @@ -821,16 +1005,23 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 4, - "w": 12, - "x": 12, - "y": 19 + "w": 24, + "x": 0, + "y": 50 }, "hiddenSeries": false, - "id": 286, + "id": 448, "legend": { "avg": false, "current": false, @@ -842,23 +1033,26 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_sub_libp2p_requests_in_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (reason)", - "instant": false, + "expr": "sum(rate(${metric_namespace}_sub_libp2p_requests_out_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol_out}\", reason != \"obsolete\"}[$__rate_interval])) by (instance, reason)", + "hide": false, "interval": "", + "intervalFactor": 1, "legendFormat": "{{reason}}", "refId": "A" } @@ -867,7 +1061,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Ingoing request failures per second", + "title": "Outbound requests failures (other than \"obsolete\")", "tooltip": { "shared": true, "sort": 2, @@ -883,7 +1077,8 @@ }, "yaxes": [ { - "format": "short", + "$$hashKey": "object:209", + "format": "reqps", "label": null, "logBase": 1, "max": null, @@ -891,6 +1086,7 @@ "show": true }, { + "$$hashKey": "object:210", "format": "short", "label": null, "logBase": 1, @@ -904,79 +1100,60 @@ "alignLevel": null } }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 40 - }, - "id": 23, - "panels": [], - "repeat": "notif_protocol", - "title": "Notifications (${notif_protocol})", - "type": "row" - }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 7, - "w": 12, + "h": 4, + "w": 24, "x": 0, - "y": 41 + "y": 54 }, "hiddenSeries": false, - "id": 31, - "interval": "1m", + "id": 256, "legend": { "avg": false, "current": false, "max": false, "min": false, - "rightSide": true, - "show": true, + "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, - "maxPerRow": 2, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [ - { - "alias": "/(in)/", - "color": "#73BF69" - }, - { - "alias": "/(out)/", - "color": "#F2495C" - } - ], + "repeat": "nodename", + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg by (direction) (irate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__interval]))", + "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol_out}\"}[$__rate_interval])) by (instance, le)) > 0", + "instant": false, "interval": "", - "legendFormat": "{{direction}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -984,7 +1161,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Average network notifications per second", + "title": "Median request answer time", "tooltip": { "shared": true, "sort": 2, @@ -1000,20 +1177,22 @@ }, "yaxes": [ { - "format": "cps", - "label": "Notifs/sec", + "$$hashKey": "object:1069", + "format": "s", + "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:1070", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1027,62 +1206,54 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 41 + "h": 4, + "w": 24, + "x": 0, + "y": 58 }, "hiddenSeries": false, - "id": 37, - "interval": "1m", + "id": 257, "legend": { - "alignAsTable": false, "avg": false, "current": false, - "hideEmpty": false, - "hideZero": false, "max": false, "min": false, - "rightSide": true, - "show": true, + "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, - "maxPerRow": 2, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [ - { - "alias": "/(in)/", - "color": "#73BF69" - }, - { - "alias": "/(out)/", - "color": "#F2495C" - } - ], + "repeat": "nodename", + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__interval])) by (direction)", + "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol_out}\"}[$__rate_interval])) by (instance, le)) > 0", "instant": false, "interval": "", - "legendFormat": "{{direction}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -1090,7 +1261,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Average bandwidth used by notifications", + "title": "99th percentile request answer time", "tooltip": { "shared": true, "sort": 2, @@ -1106,20 +1277,22 @@ }, "yaxes": [ { - "format": "Bps", - "label": "Bandwidth", + "$$hashKey": "object:988", + "format": "s", + "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:989", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1127,22 +1300,44 @@ "alignLevel": null } }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 62 + }, + "id": 504, + "panels": [], + "repeat": "request_protocol_in", + "title": "Inbound requests (${request_protocol_in})", + "type": "row" + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 1, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, "fillGradient": 0, "gridPos": { - "h": 6, - "w": 12, + "h": 4, + "w": 24, "x": 0, - "y": 48 + "y": 63 }, "hiddenSeries": false, - "id": 16, + "id": 151, "legend": { "avg": false, "current": false, @@ -1154,21 +1349,23 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, - "steppedLine": true, + "steppedLine": false, "targets": [ { - "expr": "max(${metric_namespace}_sub_libp2p_out_events_notifications_sizes{instance=~\"${nodename}\", protocol=\"${notif_protocol}\", action=\"sent\"} - ignoring(action) ${metric_namespace}_sub_libp2p_out_events_notifications_sizes{instance=~\"${nodename}\", protocol=\"${notif_protocol}\", action=\"received\"}) by (instance) > 0", + "expr": "rate(${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\", protocol=\"${request_protocol_in}\"}[$__rate_interval])", "interval": "", "legendFormat": "{{instance}}", "refId": "A" @@ -1178,10 +1375,10 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Total sizes of notifications waiting to be delivered to the rest of Substrate", + "title": "Requests served per second", "tooltip": { - "shared": false, - "sort": 1, + "shared": true, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -1194,7 +1391,8 @@ }, "yaxes": [ { - "format": "bytes", + "$$hashKey": "object:907", + "format": "reqps", "label": null, "logBase": 1, "max": null, @@ -1202,12 +1400,13 @@ "show": true }, { + "$$hashKey": "object:908", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1221,16 +1420,23 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 1, - "fillGradient": 1, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 48 + "h": 4, + "w": 24, + "x": 0, + "y": 67 }, "hiddenSeries": false, - "id": 21, + "id": 449, "legend": { "avg": false, "current": false, @@ -1242,25 +1448,27 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, - "pluginVersion": "6.4.5", + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[5m])) by (direction, protocol) / sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[5m])) by (direction, protocol)", - "format": "time_series", + "expr": "sum(rate(${metric_namespace}_sub_libp2p_requests_in_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol_in}\"}[$__rate_interval])) by (instance, reason)", + "hide": false, "interval": "", - "legendFormat": "{{direction}}", + "intervalFactor": 1, + "legendFormat": "{{reason}}", "refId": "A" } ], @@ -1268,7 +1476,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Average size of sent and received notifications in the past 5 minutes", + "title": "Inbound requests failures", "tooltip": { "shared": true, "sort": 2, @@ -1284,20 +1492,22 @@ }, "yaxes": [ { - "format": "bytes", - "label": "Max. notification size", - "logBase": 10, + "$$hashKey": "object:209", + "format": "reqps", + "label": null, + "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:210", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -1311,660 +1521,53 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "description": "99.9% of the time, the output queue size for this protocol is below the given value", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 6, - "w": 12, + "h": 4, + "w": 24, "x": 0, - "y": 54 + "y": 71 }, "hiddenSeries": false, - "id": 14, + "id": 258, "legend": { - "alignAsTable": false, "avg": false, - "current": true, - "hideEmpty": false, - "hideZero": true, - "max": true, + "current": false, + "max": false, "min": false, - "rightSide": true, - "show": true, + "show": false, "total": false, - "values": true + "values": false }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [ - { - "alias": "max", - "fill": 1, - "linewidth": 0 - } - ], + "repeat": "nodename", + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_notifications_queues_size_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[2m])) by (le, instance))", - "hide": false, + "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol_in}\"}[$__rate_interval])) by (instance, le))", "interval": "", - "legendFormat": "{{protocol}}", - "refId": "A" - }, - { - "expr": "max(histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_notifications_queues_size_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[2m])) by (le, instance)))", - "interval": "", - "legendFormat": "max", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "99th percentile of queues sizes", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": "300", - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 1, - "fillGradient": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 54 - }, - "hiddenSeries": false, - "id": 134, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pluginVersion": "6.4.5", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(1.0, sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[5m])) by (direction, le))", - "format": "time_series", - "interval": "", - "legendFormat": "{{direction}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Maximum size of sent and received notifications in the past 5 minutes", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": "Max. notification size", - "logBase": 10, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 60 - }, - "id": 27, - "panels": [], - "title": "Transport", - "type": "row" - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 61 - }, - "hiddenSeries": false, - "id": 19, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "maxPerRow": 2, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [ - { - "alias": "established (in)", - "color": "#37872D" - }, - { - "alias": "established (out)", - "color": "#C4162A" - }, - { - "alias": "pending (out)", - "color": "#FF7383" - }, - { - "alias": "closed-recently", - "color": "#FADE2A", - "steppedLine": true - } - ], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "avg(sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance) - sum(${metric_namespace}_sub_libp2p_connections_closed_total{direction=\"in\", instance=~\"${nodename}\"}) by (instance))", - "format": "time_series", - "hide": false, - "interval": "", - "legendFormat": "established (in)", - "refId": "A" - }, - { - "expr": "avg(sum(${metric_namespace}_sub_libp2p_connections_opened_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance) - sum(${metric_namespace}_sub_libp2p_connections_closed_total{direction=\"out\", instance=~\"${nodename}\"}) by (instance))", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "established (out)", - "refId": "C" - }, - { - "expr": "avg(sum by (instance) (${metric_namespace}_sub_libp2p_pending_connections{instance=~\"${nodename}\"}))", - "hide": false, - "interval": "", - "legendFormat": "pending (out)", - "refId": "B" - }, - { - "expr": "avg(sum by(instance) (increase(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}[$__interval])))", - "hide": false, - "interval": "", - "legendFormat": "closed-recently", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average transport-level (TCP, QUIC, ...) connections per node", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "Connections", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 0, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 67 - }, - "hiddenSeries": false, - "id": 189, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "maxPerRow": 2, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "repeatDirection": "v", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "1 - \n\navg(\n ${metric_namespace}_sub_libp2p_distinct_peers_connections_opened_total{instance=~\"${nodename}\"} - ${metric_namespace}_sub_libp2p_distinct_peers_connections_closed_total{instance=~\"${nodename}\"}\n) by (instance)\n\n/\n\navg(\r\n sum(${metric_namespace}_sub_libp2p_connections_opened_total{instance=~\"${nodename}\"}) by (instance) - sum(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}) by (instance)\r\n) by (instance)", - "format": "time_series", - "hide": false, - "interval": "", - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Percentage of peers for which we have more than one connection open", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 73 - }, - "hiddenSeries": false, - "id": 39, - "interval": "1m", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/.*/", - "color": "#FF780A" - } - ], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "avg(increase(${metric_namespace}_sub_libp2p_incoming_connections_handshake_errors_total{instance=~\"${nodename}\"}[$__interval])) by (reason)", - "interval": "", - "legendFormat": "{{reason}}", - "refId": "A" - }, - { - "expr": "avg(increase(${metric_namespace}_sub_libp2p_listeners_errors_total{instance=~\"${nodename}\"}[$__interval]))", - "interval": "", - "legendFormat": "pre-handshake", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of incoming connection errors, averaged by node", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "Errors", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "cards": { - "cardPadding": null, - "cardRound": null - }, - "color": { - "cardColor": "#b4ff00", - "colorScale": "sqrt", - "colorScheme": "interpolateOranges", - "exponent": 0.5, - "max": 100, - "min": 0, - "mode": "spectrum" - }, - "dataFormat": "timeseries", - "datasource": "$data_source", - "description": "Each bucket represent a certain number of nodes using a certain bandwidth range.", - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 73 - }, - "heatmap": {}, - "hideZeroBuckets": false, - "highlightCards": true, - "id": 4, - "legend": { - "show": false - }, - "reverseYBuckets": false, - "targets": [ - { - "expr": "${metric_namespace}_network_per_sec_bytes{instance=~\"${nodename}\"}", - "format": "time_series", - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Heatmap of network bandwidth", - "tooltip": { - "show": true, - "showHistogram": false - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": "2.5m", - "yAxis": { - "decimals": null, - "format": "Bps", - "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 79 - }, - "hiddenSeries": false, - "id": 81, - "interval": "1m", - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "avg(increase(${metric_namespace}_sub_libp2p_pending_connections_errors_total{instance=~\"${nodename}\"}[$__interval])) by (reason)", - "interval": "", - "legendFormat": "{{reason}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -1972,7 +1575,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Dialing attempt errors, averaged per node", + "title": "Median request serving time", "tooltip": { "shared": true, "sort": 2, @@ -1988,7 +1591,8 @@ }, "yaxes": [ { - "format": "short", + "$$hashKey": "object:666", + "format": "s", "label": null, "logBase": 1, "max": null, @@ -1996,12 +1600,13 @@ "show": true }, { + "$$hashKey": "object:667", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -2011,52 +1616,57 @@ }, { "aliasColors": {}, - "bars": true, + "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 1, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, "fillGradient": 0, "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 79 + "h": 4, + "w": 24, + "x": 0, + "y": 75 }, "hiddenSeries": false, - "id": 46, - "interval": "1m", + "id": 259, "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": true, + "show": false, "total": false, "values": false }, - "lines": false, + "lines": true, "linewidth": 1, - "maxPerRow": 2, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, - "stack": true, + "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(increase(${metric_namespace}_sub_libp2p_connections_closed_total{instance=~\"${nodename}\"}[$__interval])) by (reason)", + "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol_in}\"}[$__rate_interval])) by (instance, le))", "interval": "", - "legendFormat": "{{reason}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -2064,9 +1674,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Disconnects, averaged per node", + "title": "99th percentile request serving time", "tooltip": { - "shared": true, + "shared": false, "sort": 2, "value_type": "individual" }, @@ -2080,21 +1690,22 @@ }, "yaxes": [ { - "decimals": null, - "format": "short", - "label": "Disconnects", + "$$hashKey": "object:747", + "format": "s", + "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:748", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { @@ -2109,204 +1720,86 @@ "h": 1, "w": 24, "x": 0, - "y": 86 + "y": 79 }, - "id": 52, + "id": 23, "panels": [], - "title": "GrandPa", + "repeat": "notif_protocol", + "title": "Notifications (${notif_protocol})", "type": "row" }, { "aliasColors": {}, - "bars": true, + "bars": false, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 87 + "y": 80 }, "hiddenSeries": false, - "id": 54, - "interval": "1m", + "id": 447, "legend": { "alignAsTable": true, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "rightSide": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, "show": true, - "total": true, + "total": false, "values": true }, - "lines": false, + "lines": true, "linewidth": 1, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", - "seriesOverrides": [ - { - "alias": "/discard/", - "color": "#FA6400", - "zindex": -2 - }, - { - "alias": "/keep/", - "color": "#73BF69", - "zindex": 2 - }, - { - "alias": "/process_and_discard/", - "color": "#5794F2" - } - ], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "avg(increase(${metric_namespace}_finality_grandpa_communication_gossip_validator_messages{instance=~\"${nodename}\"}[$__interval])) by (action, message)", - "interval": "", - "legendFormat": "{{message}} => {{action}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "GrandPa messages received from the network, and action", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "datasource": null, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 93 - }, - "id": 25, - "panels": [], - "repeat": null, - "title": "Kademlia & authority-discovery", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "description": "", - "fill": 0, - "fillGradient": 0, - "gridPos": { - "h": 5, - "w": 12, - "x": 0, - "y": 94 - }, - "hiddenSeries": false, - "id": 33, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": true, - "pointradius": 2, - "points": false, - "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "sum(${metric_namespace}_sub_libp2p_kbuckets_num_nodes{instance=~\"${nodename}\"}) by (instance)", - "format": "time_series", - "instant": false, + "expr": "${metric_namespace}_sub_libp2p_notifications_streams_opened_total{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"} - ${metric_namespace}_sub_libp2p_notifications_streams_closed_total{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}", "interval": "", "legendFormat": "{{instance}}", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of entries in Kademlia k-buckets", + "title": "Number of open substreams", "tooltip": { - "shared": true, + "shared": false, "sort": 1, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, - "max": 0, - "min": null, "mode": "time", "name": null, "show": true, @@ -2314,6 +1807,7 @@ }, "yaxes": [ { + "$$hashKey": "object:896", "format": "short", "label": null, "logBase": 1, @@ -2322,6 +1816,7 @@ "show": true }, { + "$$hashKey": "object:897", "format": "short", "label": null, "logBase": 1, @@ -2341,55 +1836,67 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, - "fillGradient": 7, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, "gridPos": { - "h": 5, - "w": 12, - "x": 12, - "y": 94 + "h": 6, + "w": 24, + "x": 0, + "y": 86 }, "hiddenSeries": false, - "id": 35, - "interval": "", + "id": 486, "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, "show": false, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", + "repeatDirection": "h", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "rate(${metric_namespace}_sub_libp2p_kademlia_random_queries_total{instance=~\"${nodename}\"}[5m])", + "expr": "rate(${metric_namespace}_sub_libp2p_notifications_streams_closed_total{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__rate_interval])", "interval": "", + "intervalFactor": 4, "legendFormat": "{{instance}}", - "refId": "A" + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Kademlia random discovery queries started per second", + "title": "Substreams closed/sec", "tooltip": { - "shared": true, + "shared": false, "sort": 1, "value_type": "individual" }, @@ -2403,14 +1910,16 @@ }, "yaxes": [ { - "format": "cps", - "label": "Queries per second", + "$$hashKey": "object:484", + "format": "short", + "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:485", "format": "short", "label": null, "logBase": 1, @@ -2430,44 +1939,68 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 4, - "w": 12, + "h": 6, + "w": 24, "x": 0, - "y": 99 + "y": 92 }, "hiddenSeries": false, - "id": 111, + "id": 31, + "interval": "", "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": false, + "rightSide": false, + "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "maxPerRow": 12, + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [], + "repeat": "nodename", + "repeatDirection": "h", + "seriesOverrides": [ + { + "$$hashKey": "object:399", + "alias": "/(in)/", + "color": "#73BF69" + }, + { + "$$hashKey": "object:400", + "alias": "/(out)/", + "color": "#F2495C" + } + ], "spaceLength": 10, "stack": false, - "steppedLine": true, + "steppedLine": false, "targets": [ { - "expr": "${metric_namespace}_sub_libp2p_kademlia_records_count{instance=~\"${nodename}\"}", + "expr": "avg by (direction) (rate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__rate_interval]))", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{direction}}", "refId": "A" } ], @@ -2475,10 +2008,10 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of Kademlia records", + "title": "Number of network notifications", "tooltip": { "shared": true, - "sort": 1, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -2491,14 +2024,16 @@ }, "yaxes": [ { + "$$hashKey": "object:413", "format": "short", - "label": null, + "label": "Notifs/sec", "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:414", "format": "short", "label": null, "logBase": 1, @@ -2518,44 +2053,72 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { - "h": 4, - "w": 12, - "x": 12, - "y": 99 + "h": 6, + "w": 24, + "x": 0, + "y": 98 }, "hiddenSeries": false, - "id": 112, + "id": 37, + "interval": "", "legend": { + "alignAsTable": false, "avg": false, "current": false, + "hideEmpty": false, + "hideZero": false, "max": false, "min": false, - "show": false, + "rightSide": false, + "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "maxPerRow": 12, + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [], + "repeat": "nodename", + "repeatDirection": "h", + "seriesOverrides": [ + { + "$$hashKey": "object:492", + "alias": "/(in)/", + "color": "#73BF69" + }, + { + "$$hashKey": "object:493", + "alias": "/(out)/", + "color": "#F2495C" + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "${metric_namespace}_sub_libp2p_kademlia_records_sizes_total{instance=~\"${nodename}\"}", + "expr": "avg(rate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__rate_interval])) by (direction)", + "instant": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{direction}}", "refId": "A" } ], @@ -2563,7 +2126,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Total size of Kademlia records", + "title": "Average bandwidth used by notifications", "tooltip": { "shared": true, "sort": 2, @@ -2579,14 +2142,16 @@ }, "yaxes": [ { - "format": "bytes", - "label": null, + "$$hashKey": "object:506", + "format": "Bps", + "label": "Bandwidth", "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:507", "format": "short", "label": null, "logBase": 1, @@ -2606,19 +2171,24 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "description": "", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 5, - "w": 12, + "h": 6, + "w": 24, "x": 0, - "y": 103 + "y": 104 }, "hiddenSeries": false, - "id": 211, + "id": 16, "legend": { - "alignAsTable": false, "avg": false, "current": false, "max": false, @@ -2629,25 +2199,25 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, - "percentage": true, + "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "${metric_namespace}_authority_discovery_known_authorities_count{instance=~\"${nodename}\"}", - "format": "time_series", - "instant": false, + "expr": "${metric_namespace}_sub_libp2p_out_events_notifications_sizes{instance=~\"${nodename}\", protocol=\"${notif_protocol}\", action=\"sent\"} - ignoring(action) ${metric_namespace}_sub_libp2p_out_events_notifications_sizes{instance=~\"${nodename}\", protocol=\"${notif_protocol}\", action=\"received\"}", "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{name}}", "refId": "A" } ], @@ -2655,17 +2225,15 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of authorities discovered by authority-discovery", + "title": "Total sizes of notifications waiting to be delivered to the rest of Substrate", "tooltip": { - "shared": true, + "shared": false, "sort": 1, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, - "max": 0, - "min": null, "mode": "time", "name": null, "show": true, @@ -2673,7 +2241,8 @@ }, "yaxes": [ { - "format": "short", + "$$hashKey": "object:232", + "format": "bytes", "label": null, "logBase": 1, "max": null, @@ -2681,6 +2250,7 @@ "show": true }, { + "$$hashKey": "object:233", "format": "short", "label": null, "logBase": 1, @@ -2700,19 +2270,24 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "description": "", - "fill": 0, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 1, "gridPos": { - "h": 5, - "w": 12, - "x": 12, - "y": 103 + "h": 6, + "w": 24, + "x": 0, + "y": 110 }, "hiddenSeries": false, - "id": 233, + "id": 21, "legend": { - "alignAsTable": false, "avg": false, "current": false, "max": false, @@ -2723,25 +2298,26 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, - "percentage": true, + "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "${metric_namespace}_authority_discovery_amount_external_addresses_last_published{instance=~\"${nodename}\"}", + "expr": "sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__rate_interval])) by (direction, protocol) / sum(rate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__rate_interval])) by (direction, protocol)", "format": "time_series", - "instant": false, "interval": "", - "legendFormat": "{{instance}}", + "legendFormat": "{{direction}}", "refId": "A" } ], @@ -2749,17 +2325,15 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of addresses published by authority-discovery", + "title": "Average size of sent and received notifications", "tooltip": { "shared": true, - "sort": 1, + "sort": 2, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, - "max": 0, - "min": null, "mode": "time", "name": null, "show": true, @@ -2767,14 +2341,16 @@ }, "yaxes": [ { - "format": "short", - "label": null, - "logBase": 1, + "$$hashKey": "object:322", + "format": "bytes", + "label": "Max. notification size", + "logBase": 10, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:323", "format": "short", "label": null, "logBase": 1, @@ -2794,17 +2370,23 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 1, "gridPos": { - "h": 5, - "w": 12, + "h": 6, + "w": 24, "x": 0, - "y": 108 + "y": 116 }, "hiddenSeries": false, - "id": 68, - "interval": "1m", + "id": 134, "legend": { "avg": false, "current": false, @@ -2816,36 +2398,37 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "connected", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "v", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_found\", instance=~\"${nodename}\"}[2h]) / ignoring(name) (\n rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_found\", instance=~\"${nodename}\"}[2h]) +\n ignoring(name) rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_not_found\", instance=~\"${nodename}\"}[2h])\n)", + "expr": "histogram_quantile(0.99, sum(irate(${metric_namespace}_sub_libp2p_notifications_sizes_bucket{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__rate_interval])) by (direction, le))", + "format": "time_series", "interval": "", - "legendFormat": "{{instance}}", - "refId": "B" + "legendFormat": "{{direction}}", + "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Authority discovery get_value success rate in past two hours", + "title": "99th percentile of size of sent and received notifications", "tooltip": { "shared": true, - "sort": 1, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -2858,14 +2441,16 @@ }, "yaxes": [ { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": "1.0", + "$$hashKey": "object:244", + "format": "bytes", + "label": "Max. notification size", + "logBase": 10, + "max": null, "min": null, "show": true }, { + "$$hashKey": "object:245", "format": "short", "label": null, "logBase": 1, @@ -2879,63 +2464,108 @@ "alignLevel": null } }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 122 + }, + "id": 52, + "panels": [], + "title": "GrandPa", + "type": "row" + }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 5, - "w": 12, - "x": 12, - "y": 108 + "h": 6, + "w": 24, + "x": 0, + "y": 123 }, "hiddenSeries": false, - "id": 234, + "id": 54, "interval": "1m", "legend": { + "alignAsTable": true, "avg": false, "current": false, + "hideEmpty": true, + "hideZero": true, "max": false, "min": false, - "show": false, - "total": false, - "values": false + "rightSide": true, + "show": true, + "total": true, + "values": true }, - "lines": true, + "lines": false, "linewidth": 1, - "nullPointMode": "connected", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", - "repeatDirection": "v", - "seriesOverrides": [], + "repeat": "nodename", + "repeatDirection": "h", + "seriesOverrides": [ + { + "$$hashKey": "object:366", + "alias": "/discard/", + "color": "#FA6400", + "zindex": -2 + }, + { + "$$hashKey": "object:367", + "alias": "/keep/", + "color": "#73BF69", + "zindex": 2 + }, + { + "$$hashKey": "object:368", + "alias": "/process_and_discard/", + "color": "#5794F2" + } + ], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_put\", instance=~\"${nodename}\"}[2h]) / ignoring(name) (\n rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_put\", instance=~\"${nodename}\"}[2h]) +\n ignoring(name) rate(${metric_namespace}_authority_discovery_dht_event_received{name=\"value_put_failed\", instance=~\"${nodename}\"}[2h])\n)", + "expr": "rate(${metric_namespace}_finality_grandpa_communication_gossip_validator_messages{instance=~\"${nodename}\"}[$__interval])", "interval": "", - "legendFormat": "{{instance}}", - "refId": "B" + "legendFormat": "{{message}} => {{action}}", + "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Authority discovery put_value success rate in past two hours", + "title": "GrandPa messages received from the network, and action", "tooltip": { "shared": true, - "sort": 1, + "sort": 0, "value_type": "individual" }, "type": "graph", @@ -2948,30 +2578,77 @@ }, "yaxes": [ { - "format": "percentunit", + "$$hashKey": "object:409", + "format": "short", "label": null, "logBase": 1, - "max": "1.0", + "max": null, "min": null, "show": true }, { + "$$hashKey": "object:410", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { "align": false, "alignLevel": null } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 129 + }, + "id": 25, + "panels": [], + "repeat": null, + "title": "Kademlia & authority-discovery", + "type": "row" + }, + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "folderId": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 130 + }, + "headings": true, + "id": 423, + "limit": 10, + "pluginVersion": "7.2.1", + "query": "kademlia", + "recent": false, + "search": false, + "starred": false, + "tags": [], + "timeFrom": null, + "timeShift": null, + "title": "Kademlia and Authority Discovery metrics moved to \"kademlia-and-authority-discovery\" dashboard.", + "type": "dashlist" } ], - "refresh": "1m", - "schemaVersion": 22, + "refresh": false, + "schemaVersion": 26, "style": "dark", "tags": [], "templating": { @@ -2981,9 +2658,9 @@ "current": {}, "datasource": "$data_source", "definition": "${metric_namespace}_process_start_time_seconds", + "error": null, "hide": 0, - "includeAll": true, - "index": -1, + "includeAll": false, "label": "Instance name filter", "multi": true, "name": "nodename", @@ -3003,15 +2680,15 @@ "allValue": null, "current": {}, "datasource": "$data_source", - "definition": "${metric_namespace}_sub_libp2p_notifications_sizes_count", + "definition": "${metric_namespace}_sub_libp2p_notifications_streams_opened_total{instance=~\"${nodename}\"}", + "error": null, "hide": 2, "includeAll": true, - "index": -1, "label": null, "multi": false, "name": "notif_protocol", "options": [], - "query": "${metric_namespace}_sub_libp2p_notifications_sizes_count", + "query": "${metric_namespace}_sub_libp2p_notifications_streams_opened_total{instance=~\"${nodename}\"}", "refresh": 1, "regex": "/protocol=\"(.*?)\"/", "skipUrlSync": false, @@ -3026,15 +2703,15 @@ "allValue": null, "current": {}, "datasource": "$data_source", - "definition": "${metric_namespace}_sub_libp2p_requests_out_started_total", + "definition": "${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\"}", + "error": null, "hide": 2, "includeAll": true, - "index": -1, "label": null, "multi": false, - "name": "request_protocol", + "name": "request_protocol_in", "options": [], - "query": "${metric_namespace}_sub_libp2p_requests_out_started_total", + "query": "${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\"}", "refresh": 1, "regex": "/protocol=\"(.*?)\"/", "skipUrlSync": false, @@ -3048,9 +2725,10 @@ { "current": { "selected": false, - "text": "Prometheus", - "value": "Prometheus" + "text": "prometheus.parity-mgmt", + "value": "prometheus.parity-mgmt" }, + "error": null, "hide": 0, "includeAll": false, "label": "Source of data", @@ -3058,6 +2736,7 @@ "name": "data_source", "options": [], "query": "prometheus", + "queryValue": "", "refresh": 1, "regex": "", "skipUrlSync": false, @@ -3066,20 +2745,46 @@ { "current": { "value": "${VAR_METRIC_NAMESPACE}", - "text": "${VAR_METRIC_NAMESPACE}" + "text": "${VAR_METRIC_NAMESPACE}", + "selected": false }, + "error": null, "hide": 2, "label": "Prefix of the metrics", "name": "metric_namespace", "options": [ { "value": "${VAR_METRIC_NAMESPACE}", - "text": "${VAR_METRIC_NAMESPACE}" + "text": "${VAR_METRIC_NAMESPACE}", + "selected": false } ], "query": "${VAR_METRIC_NAMESPACE}", "skipUrlSync": false, "type": "constant" + }, + { + "allValue": null, + "current": {}, + "datasource": "$data_source", + "definition": "${metric_namespace}_sub_libp2p_requests_out_success_total_count{instance=~\"${nodename}\"}", + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": false, + "name": "request_protocol_out", + "options": [], + "query": "${metric_namespace}_sub_libp2p_requests_out_success_total_count{instance=~\"${nodename}\"}", + "refresh": 1, + "regex": "/protocol=\"(.*?)\"/", + "skipUrlSync": false, + "sort": 5, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, @@ -3101,11 +2806,8 @@ "1d" ] }, - "timezone": "", + "timezone": "utc", "title": "Substrate Networking", "uid": "vKVuiD9Zk", - "variables": { - "list": [] - }, - "version": 121 -} + "version": 176 +} \ No newline at end of file diff --git a/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json index 539fdec086a37..2f08ac7bb34c5 100644 --- a/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json +++ b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json @@ -13,7 +13,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "6.7.3" + "version": "7.3.6" }, { "type": "panel", @@ -26,11 +26,18 @@ "id": "prometheus", "name": "Prometheus", "version": "1.0.0" + }, + { + "type": "panel", + "id": "text", + "name": "Text", + "version": "" } ], "annotations": { "list": [ { + "$$hashKey": "object:326", "builtIn": 1, "datasource": "-- Grafana --", "enable": true, @@ -42,6 +49,7 @@ "type": "dashboard" }, { + "$$hashKey": "object:327", "datasource": "$data_source", "enable": true, "expr": "increase(${metric_namespace}_tasks_ended_total{reason=\"panic\", instance=~\"${nodename}\"}[10m])", @@ -58,6 +66,7 @@ "type": "tags" }, { + "$$hashKey": "object:621", "datasource": "$data_source", "enable": true, "expr": "changes(${metric_namespace}_process_start_time_seconds{instance=~\"${nodename}\"}[10m])", @@ -75,18 +84,45 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1599471940817, + "iteration": 1621244116095, "links": [], "panels": [ { - "collapsed": false, "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, + "id": 42, + "options": { + "content": "", + "mode": "markdown" + }, + "pluginVersion": "7.3.6", + "repeat": "nodename", + "timeFrom": null, + "timeShift": null, + "title": "$nodename", + "type": "text" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, "id": 29, "panels": [], "title": "Tasks", @@ -98,17 +134,24 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 3, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 1 + "y": 2 }, "hiddenSeries": false, "id": 11, - "interval": "1m", + "interval": "", "legend": { "alignAsTable": true, "avg": true, @@ -123,22 +166,24 @@ "values": true }, "lines": true, - "linewidth": 2, - "nullPointMode": "null", + "linewidth": 1, + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, - "stack": false, - "steppedLine": true, + "stack": true, + "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[10m])) by (task_name)", + "expr": "rate(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -148,7 +193,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "CPU time spent on each task (average per node)", + "title": "CPU time spent on each task", "tooltip": { "shared": true, "sort": 2, @@ -164,6 +209,7 @@ }, "yaxes": [ { + "$$hashKey": "object:2721", "format": "percentunit", "label": null, "logBase": 1, @@ -172,6 +218,7 @@ "show": true }, { + "$$hashKey": "object:2722", "format": "short", "label": null, "logBase": 1, @@ -191,13 +238,20 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 3, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 7 + "y": 8 }, "hiddenSeries": false, "id": 30, @@ -216,22 +270,24 @@ "values": true }, "lines": true, - "linewidth": 2, - "nullPointMode": "null", + "linewidth": 1, + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, - "stack": false, - "steppedLine": true, + "stack": true, + "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[10m])) by (task_name)", + "expr": "rate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -241,7 +297,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Task polling rate per second (average per node)", + "title": "Task polling rate per second", "tooltip": { "shared": true, "sort": 2, @@ -257,6 +313,7 @@ }, "yaxes": [ { + "$$hashKey": "object:2571", "format": "cps", "label": null, "logBase": 1, @@ -265,6 +322,7 @@ "show": true }, { + "$$hashKey": "object:2572", "format": "short", "label": null, "logBase": 1, @@ -284,21 +342,30 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 13 + "y": 14 }, "hiddenSeries": false, - "id": 31, + "id": 43, "interval": "", "legend": { "alignAsTable": true, - "avg": false, - "current": false, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": false, "max": true, "min": false, "rightSide": true, @@ -307,22 +374,24 @@ "values": true }, "lines": true, - "linewidth": 2, - "nullPointMode": "null", + "linewidth": 1, + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, - "steppedLine": true, + "steppedLine": false, "targets": [ { - "expr": "max(irate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[10m])) by (task_name)", + "expr": "increase(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[$__rate_interval]) / increase(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -332,7 +401,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Task polling rate per second (maximum per node)", + "title": "Average time it takes to call Future::poll()", "tooltip": { "shared": true, "sort": 2, @@ -348,14 +417,16 @@ }, "yaxes": [ { - "format": "cps", + "$$hashKey": "object:2571", + "format": "s", "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { + "$$hashKey": "object:2572", "format": "short", "label": null, "logBase": 1, @@ -371,50 +442,60 @@ }, { "aliasColors": {}, - "bars": false, + "bars": true, "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 19 + "y": 20 }, "hiddenSeries": false, "id": 15, "interval": "", "legend": { "alignAsTable": true, - "avg": true, + "avg": false, "current": false, "max": false, "min": false, "rightSide": true, "show": true, - "total": false, + "total": true, "values": true }, - "lines": true, + "lines": false, "linewidth": 1, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": true, "targets": [ { - "expr": "avg by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[10m]))", + "expr": "increase(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", + "intervalFactor": 1, "legendFormat": "{{task_name}}", "refId": "A" } @@ -423,7 +504,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of tasks started per second (average per node)", + "title": "Number of tasks started", "tooltip": { "shared": true, "sort": 2, @@ -439,6 +520,7 @@ }, "yaxes": [ { + "$$hashKey": "object:771", "format": "short", "label": null, "logBase": 10, @@ -447,6 +529,7 @@ "show": true }, { + "$$hashKey": "object:772", "format": "short", "label": null, "logBase": 1, @@ -466,205 +549,30 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", - "fill": 0, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 25 - }, - "hiddenSeries": false, - "id": 16, - "interval": "", - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "max by(task_name) (irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[10m]))", - "interval": "", - "legendFormat": "{{task_name}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of tasks started per second (maximum over all nodes)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 10, - "max": null, - "min": "0", - "show": true + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", - "fill": 0, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 31 - }, - "hiddenSeries": false, - "id": 2, - "interval": "", - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "avg by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", - "interval": "", - "legendFormat": "{{task_name}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of tasks running (average per node)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" + "overrides": [] }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 10, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$data_source", "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 37 + "y": 26 }, "hiddenSeries": false, - "id": 3, + "id": 2, "interval": "", "legend": { "alignAsTable": true, "avg": false, - "current": false, + "current": true, "max": true, - "min": false, + "min": true, "rightSide": true, "show": true, "total": false, @@ -672,21 +580,23 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, - "steppedLine": true, + "steppedLine": false, "targets": [ { - "expr": "max by(task_name) (${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason))", + "expr": "${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"} - sum(${metric_namespace}_tasks_ended_total{instance=~\"${nodename}\"}) without(reason)\n\n# Fallback if tasks_ended_total is null for that task\nor on(instance, task_name) ${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -696,7 +606,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Number of tasks running (maximum over all nodes)", + "title": "Number of tasks running", "tooltip": { "shared": true, "sort": 2, @@ -712,14 +622,16 @@ }, "yaxes": [ { + "$$hashKey": "object:919", "format": "short", "label": null, - "logBase": 10, + "logBase": 1, "max": null, "min": "0", "show": true }, { + "$$hashKey": "object:920", "format": "short", "label": null, "logBase": 1, @@ -740,13 +652,20 @@ "dashes": false, "datasource": "$data_source", "decimals": null, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 6, "w": 24, "x": 0, - "y": 43 + "y": 32 }, "hiddenSeries": false, "id": 7, @@ -768,19 +687,21 @@ "linewidth": 1, "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": true, "steppedLine": true, "targets": [ { - "expr": "avg(\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"+Inf\"}[10m])\n - ignoring(le)\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"1.024\"}[10m])\n) by (task_name) > 0", + "expr": "irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"+Inf\"}[$__rate_interval])\n - ignoring(le)\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"1.024\"}[$__rate_interval]) > 0", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -790,7 +711,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Calls to `Future::poll` that took more than one second (average per node)", + "title": "Number of calls to `Future::poll` that took more than one second", "tooltip": { "shared": true, "sort": 2, @@ -806,6 +727,7 @@ }, "yaxes": [ { + "$$hashKey": "object:3040", "decimals": null, "format": "cps", "label": "Calls to `Future::poll`/second", @@ -815,6 +737,7 @@ "show": true }, { + "$$hashKey": "object:3041", "format": "short", "label": null, "logBase": 1, @@ -835,11 +758,11 @@ "h": 1, "w": 24, "x": 0, - "y": 49 + "y": 38 }, "id": 27, "panels": [], - "title": "Misc", + "title": "Unbounded Channels", "type": "row" }, { @@ -848,13 +771,20 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, - "y": 50 + "y": 39 }, "hiddenSeries": false, "id": 32, @@ -871,21 +801,23 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"} - ignoring(action) ${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"received\"}) by (entity)", + "expr": "(\n ${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"} - ignoring(action) ${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"received\"}\n)\n\n# Fallback if the `received` is null\nor on(instance) ${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"}", "interval": "", "legendFormat": "{{entity}}", "refId": "B" @@ -895,7 +827,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Unbounded channels size (average per node)", + "title": "Unbounded channels size", "tooltip": { "shared": true, "sort": 2, @@ -911,6 +843,7 @@ }, "yaxes": [ { + "$$hashKey": "object:626", "format": "short", "label": null, "logBase": 1, @@ -919,6 +852,7 @@ "show": true }, { + "$$hashKey": "object:627", "format": "short", "label": null, "logBase": 1, @@ -938,13 +872,20 @@ "dashLength": 10, "dashes": false, "datasource": "$data_source", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, - "y": 57 + "y": 46 }, "hiddenSeries": false, "id": 33, @@ -961,21 +902,23 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.6", "pointradius": 2, "points": false, "renderer": "flot", + "repeat": "nodename", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"}[10m])) by (entity)", + "expr": "irate(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"}[$__rate_interval])", "interval": "", "legendFormat": "{{entity}}", "refId": "B" @@ -985,7 +928,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Unbounded channels rate (average per node)", + "title": "Unbounded channels message sending rate (1s)", "tooltip": { "shared": true, "sort": 2, @@ -1001,6 +944,7 @@ }, "yaxes": [ { + "$$hashKey": "object:626", "format": "cps", "label": null, "logBase": 1, @@ -1009,6 +953,7 @@ "show": true }, { + "$$hashKey": "object:627", "format": "short", "label": null, "logBase": 1, @@ -1024,7 +969,7 @@ } ], "refresh": false, - "schemaVersion": 22, + "schemaVersion": 26, "style": "dark", "tags": [], "templating": { @@ -1034,9 +979,9 @@ "current": {}, "datasource": "$data_source", "definition": "${metric_namespace}_process_start_time_seconds", + "error": null, "hide": 0, - "includeAll": true, - "index": -1, + "includeAll": false, "label": "Instance filter", "multi": true, "name": "nodename", @@ -1055,15 +1000,18 @@ { "current": { "value": "${VAR_METRIC_NAMESPACE}", - "text": "${VAR_METRIC_NAMESPACE}" + "text": "${VAR_METRIC_NAMESPACE}", + "selected": false }, + "error": null, "hide": 2, "label": "Prefix of the metrics", "name": "metric_namespace", "options": [ { "value": "${VAR_METRIC_NAMESPACE}", - "text": "${VAR_METRIC_NAMESPACE}" + "text": "${VAR_METRIC_NAMESPACE}", + "selected": false } ], "query": "${VAR_METRIC_NAMESPACE}", @@ -1076,6 +1024,7 @@ "text": "prometheus.parity-mgmt", "value": "prometheus.parity-mgmt" }, + "error": null, "hide": 0, "includeAll": false, "label": "Source of all the data", @@ -1108,11 +1057,8 @@ "1d" ] }, - "timezone": "", + "timezone": "utc", "title": "Substrate Service Tasks", "uid": "3LA6XNqZz", - "variables": { - "list": [] - }, - "version": 52 -} + "version": 69 +} \ No newline at end of file diff --git a/.maintain/node-template-release.sh b/.maintain/node-template-release.sh index 1a6c245320593..cb5e72e7fa98f 100755 --- a/.maintain/node-template-release.sh +++ b/.maintain/node-template-release.sh @@ -10,7 +10,7 @@ if [ "$#" -ne 1 ]; then exit 1 fi -PATH_TO_ARCHIVE=$(pwd)/$1 +PATH_TO_ARCHIVE=$1 cd $PROJECT_ROOT/.maintain/node-template-release -cargo run $PROJECT_ROOT/bin/node-template $PATH_TO_ARCHIVE +cargo run $PROJECT_ROOT/bin/node-template $PROJECT_ROOT/$PATH_TO_ARCHIVE diff --git a/.maintain/node-template-release/Cargo.toml b/.maintain/node-template-release/Cargo.toml index dd3166d58ddf4..c1d9f2da7faea 100644 --- a/.maintain/node-template-release/Cargo.toml +++ b/.maintain/node-template-release/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template-release" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" diff --git a/.maintain/node-template-release/src/main.rs b/.maintain/node-template-release/src/main.rs index a1d85bf33fe33..7dcb1f0f4d816 100644 --- a/.maintain/node-template-release/src/main.rs +++ b/.maintain/node-template-release/src/main.rs @@ -1,8 +1,11 @@ use structopt::StructOpt; use std::{ - path::{PathBuf, Path}, collections::HashMap, fs::{File, OpenOptions, self}, io::{Read, Write}, - process::Command + collections::HashMap, + fs::{self, File, OpenOptions}, + io::{Read, Write}, + path::{Path, PathBuf}, + process::Command, }; use glob; @@ -40,11 +43,9 @@ fn find_cargo_tomls(path: PathBuf) -> Vec { let glob = glob::glob(&path).expect("Generates globbing pattern"); let mut result = Vec::new(); - glob.into_iter().for_each(|file| { - match file { - Ok(file) => result.push(file), - Err(e) => println!("{:?}", e), - } + glob.into_iter().for_each(|file| match file { + Ok(file) => result.push(file), + Err(e) => println!("{:?}", e), }); if result.is_empty() { @@ -78,31 +79,44 @@ fn get_git_commit_id(path: &Path) -> String { /// Parse the given `Cargo.toml` into a `HashMap` fn parse_cargo_toml(file: &Path) -> CargoToml { let mut content = String::new(); - File::open(file).expect("Cargo.toml exists").read_to_string(&mut content).expect("Reads file"); + File::open(file) + .expect("Cargo.toml exists") + .read_to_string(&mut content) + .expect("Reads file"); toml::from_str(&content).expect("Cargo.toml is a valid toml file") } /// Replaces all substrate path dependencies with a git dependency. -fn replace_path_dependencies_with_git(cargo_toml_path: &Path, commit_id: &str, cargo_toml: &mut CargoToml) { +fn replace_path_dependencies_with_git( + cargo_toml_path: &Path, + commit_id: &str, + cargo_toml: &mut CargoToml, +) { let mut cargo_toml_path = cargo_toml_path.to_path_buf(); // remove `Cargo.toml` cargo_toml_path.pop(); for &table in &["dependencies", "build-dependencies", "dev-dependencies"] { - let mut dependencies: toml::value::Table = match cargo_toml - .remove(table) - .and_then(|v| v.try_into().ok()) { - Some(deps) => deps, - None => continue, - }; + let mut dependencies: toml::value::Table = + match cargo_toml.remove(table).and_then(|v| v.try_into().ok()) { + Some(deps) => deps, + None => continue, + }; let deps_rewritten = dependencies .iter() - .filter_map(|(k, v)| v.clone().try_into::().ok().map(move |v| (k, v))) - .filter(|t| t.1.contains_key("path")) + .filter_map(|(k, v)| { + v.clone().try_into::().ok().map(move |v| (k, v)) + }) .filter(|t| { - // if the path does not exists, we need to add this as git dependency - t.1.get("path").unwrap().as_str().map(|path| !cargo_toml_path.join(path).exists()).unwrap_or(false) + t.1.contains_key("path") && { + // if the path does not exists, we need to add this as git dependency + t.1.get("path") + .unwrap() + .as_str() + .map(|path| !cargo_toml_path.join(path).exists()) + .unwrap_or(false) + } }) .map(|(k, mut v)| { // remove `path` and add `git` and `rev` @@ -111,7 +125,8 @@ fn replace_path_dependencies_with_git(cargo_toml_path: &Path, commit_id: &str, c v.insert("rev".into(), commit_id.into()); (k.clone(), v.into()) - }).collect::>(); + }) + .collect::>(); dependencies.extend(deps_rewritten.into_iter()); @@ -136,8 +151,9 @@ fn update_top_level_cargo_toml( cargo_toml.insert("profile".into(), profile.into()); - let members = workspace_members.iter() - .map(|p| + let members = workspace_members + .iter() + .map(|p| { p.strip_prefix(node_template_path) .expect("Workspace member is a child of the node template path!") .parent() @@ -146,7 +162,7 @@ fn update_top_level_cargo_toml( .expect("The given path ends with `Cargo.toml` as file name!") .display() .to_string() - ) + }) .collect::>(); let mut members_section = toml::value::Table::new(); @@ -164,24 +180,20 @@ fn write_cargo_toml(path: &Path, cargo_toml: CargoToml) { /// Build and test the generated node-template fn build_and_test(path: &Path, cargo_tomls: &[PathBuf]) { // Build node - assert!( - Command::new("cargo") - .args(&["build", "--all"]) - .current_dir(path) - .status() - .expect("Compiles node") - .success() - ); + assert!(Command::new("cargo") + .args(&["build", "--all"]) + .current_dir(path) + .status() + .expect("Compiles node") + .success()); // Test node - assert!( - Command::new("cargo") - .args(&["test", "--all"]) - .current_dir(path) - .status() - .expect("Tests node") - .success() - ); + assert!(Command::new("cargo") + .args(&["test", "--all"]) + .current_dir(path) + .status() + .expect("Tests node") + .success()); // Remove all `target` directories for toml in cargo_tomls { @@ -190,7 +202,8 @@ fn build_and_test(path: &Path, cargo_tomls: &[PathBuf]) { target_path = target_path.join("target"); if target_path.exists() { - fs::remove_dir_all(&target_path).expect(&format!("Removes `{}`", target_path.display())); + fs::remove_dir_all(&target_path) + .expect(&format!("Removes `{}`", target_path.display())); } } } @@ -220,7 +233,10 @@ fn main() { // Check if top level Cargo.toml exists. If not, create one in the destination if !cargo_tomls.contains(&top_level_cargo_toml_path) { // create the top_level_cargo_toml - OpenOptions::new().create(true).write(true).open(top_level_cargo_toml_path.clone()) + OpenOptions::new() + .create(true) + .write(true) + .open(top_level_cargo_toml_path.clone()) .expect("Create root level `Cargo.toml` failed."); // push into our data structure @@ -234,9 +250,8 @@ fn main() { // Check if this is the top level `Cargo.toml`, as this requires some special treatments. if top_level_cargo_toml_path == *t { // All workspace member `Cargo.toml` file paths. - let workspace_members = cargo_tomls.iter() - .filter(|p| **p != top_level_cargo_toml_path) - .collect(); + let workspace_members = + cargo_tomls.iter().filter(|p| **p != top_level_cargo_toml_path).collect(); update_top_level_cargo_toml(&mut cargo_toml, workspace_members, &node_template_path); } @@ -244,10 +259,21 @@ fn main() { write_cargo_toml(&t, cargo_toml); }); + // adding root rustfmt to node template build path + let node_template_rustfmt_toml_path = node_template_path.join("rustfmt.toml"); + let root_rustfmt_toml = + &options.node_template.join("../../rustfmt.toml"); + if root_rustfmt_toml.exists() { + fs::copy(&root_rustfmt_toml, &node_template_rustfmt_toml_path) + .expect("Copying rustfmt.toml."); + } + build_and_test(&node_template_path, &cargo_tomls); - let output = GzEncoder::new(File::create(&options.output) - .expect("Creates output file"), Compression::default()); + let output = GzEncoder::new( + File::create(&options.output).expect("Creates output file"), + Compression::default(), + ); let mut tar = tar::Builder::new(output); tar.append_dir_all("substrate-node-template", node_template_path) .expect("Writes substrate-node-template archive"); diff --git a/.maintain/update-copyright.sh b/.maintain/update-copyright.sh index d48fc3cc979d6..d67cab7c1e152 100755 --- a/.maintain/update-copyright.sh +++ b/.maintain/update-copyright.sh @@ -1,15 +1,14 @@ #!/usr/bin/env bash -SINGLE_DATES=$(grep -lr "// Copyright [0-9]* Parity Technologies (UK) Ltd.") -RANGE_DATES=$(grep -lr "// Copyright [0-9]*-[0-9]* Parity Technologies (UK) Ltd.") +SINGLE_DATES=$(grep -lr "// Copyright (C) [0-9]* Parity Technologies (UK) Ltd.") YEAR=$(date +%Y) for file in $SINGLE_DATES; do - FILE_YEAR=$(cat $file | sed -n "s|// Copyright \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\) Parity Technologies (UK) Ltd.|\1|p") + FILE_YEAR=$(cat $file | sed -n "s|// Copyright (C) \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\) Parity Technologies (UK) Ltd.|\1|p") if [ $YEAR -ne $FILE_YEAR ]; then - sed -i -e "s|// Copyright \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\) Parity Technologies (UK) Ltd.|// Copyright \1-$YEAR Parity Technologies (UK) Ltd.|g" $file + sed -i -e "s|// Copyright (C) \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\) Parity Technologies (UK) Ltd.|// Copyright (C) \1-$YEAR Parity Technologies (UK) Ltd.|g" $file fi done -grep -lr "// Copyright [0-9]*-[0-9]* Parity Technologies (UK) Ltd." | - xargs sed -i -e "s|// Copyright \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\)-[[:digit:]][[:digit:]][[:digit:]][[:digit:]] Parity Technologies (UK) Ltd.|// Copyright \1-$YEAR Parity Technologies (UK) Ltd.|g" +grep -lr "// Copyright (C) [0-9]*-[0-9]* Parity Technologies (UK) Ltd." | + xargs sed -i -e "s|// Copyright (C) \([[:digit:]][[:digit:]][[:digit:]][[:digit:]]\)-[[:digit:]][[:digit:]][[:digit:]][[:digit:]] Parity Technologies (UK) Ltd.|// Copyright (C) \1-$YEAR Parity Technologies (UK) Ltd.|g" diff --git a/Cargo.lock b/Cargo.lock index f43bc9e7ec839..0f75bc2b51626 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "Inflector" version = "0.11.4" @@ -12,108 +14,83 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.13.0" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a" dependencies = [ - "gimli 0.22.0", + "gimli 0.24.0", +] + +[[package]] +name = "addr2line" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" +dependencies = [ + "gimli 0.25.0", ] [[package]] name = "adler" -version = "0.2.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aead" -version = "0.3.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] name = "aes" -version = "0.4.0" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7001367fde4c768a19d1029f0a8be5abd9308e1119846d5bd9ad26297b8faf5" +checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ - "aes-soft", - "aesni", - "block-cipher", + "cfg-if 1.0.0", + "cipher", + "cpufeatures 0.2.1", + "opaque-debug 0.3.0", ] [[package]] name = "aes-gcm" -version = "0.6.0" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f5007801316299f922a6198d1d09a0bae95786815d066d5880d13f7c45ead1" +checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ "aead", "aes", - "block-cipher", + "cipher", + "ctr", "ghash", - "subtle 2.2.3", -] - -[[package]] -name = "aes-soft" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" -dependencies = [ - "block-cipher", - "byteorder 1.3.4", - "opaque-debug 0.2.3", -] - -[[package]] -name = "aesni" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050d39b0b7688b3a3254394c3e30a9d66c41dcf9b05b0e2dbdc623f6505d264" -dependencies = [ - "block-cipher", - "opaque-debug 0.2.3", + "subtle 2.4.1", ] [[package]] name = "ahash" -version = "0.2.19" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29661b60bec623f0586702976ff4d0c9942dcb6723161c2df0eea78455cfedfb" +checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" dependencies = [ - "const-random", + "getrandom 0.2.3", + "once_cell", + "version_check 0.9.3", ] -[[package]] -name = "ahash" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" - [[package]] name = "aho-corasick" -version = "0.7.13" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ "memchr", ] -[[package]] -name = "alga" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" -dependencies = [ - "approx", - "num-complex", - "num-traits", -] - [[package]] name = "ansi_term" version = "0.11.0" @@ -134,30 +111,24 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.31" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f" +checksum = "61604a8f862e1d5c3229fdd78f8b02c68dcf73a4c4b05fd636d12240aaa242c1" [[package]] name = "approx" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" +checksum = "072df7202e63b127ab55acfe16ce97013d5b97bf160489336d3f1840fd78e99e" dependencies = [ "num-traits", ] [[package]] name = "arbitrary" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cb544f1057eaaff4b34f8c4dcf56fc3cd04debd291998405d135017a7c3c0f4" - -[[package]] -name = "arc-swap" -version = "0.4.7" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" +checksum = "577b08a4acd7b99869f863c50011b01eb73424ccc798ecd996f2e24817adfca7" [[package]] name = "arrayref" @@ -176,35 +147,29 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] -name = "asn1_der" -version = "0.6.3" +name = "arrayvec" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fce6b6a0ffdafebd82c87e79e3f40e8d2c523e5fea5566ff6b90509bf98d638" -dependencies = [ - "asn1_der_derive", -] +checksum = "be4dc07131ffa69b8072d35f5007352af944213cde02545e2103680baed38fcd" [[package]] -name = "asn1_der_derive" -version = "0.1.2" +name = "asn1_der" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" -dependencies = [ - "quote", - "syn", -] +checksum = "9d6e24d2cce90c53b948c46271bfb053e4bdc2db9b5d3f65e20f8cf28a1b7fc3" [[package]] name = "assert_cmd" -version = "1.0.1" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c88b9ca26f9c16ec830350d309397e74ee9abdfd8eb1f71cb6ecc71a3fc818da" +checksum = "c98233c6673d8601ab23e77eb38f999c51100d46c5703b17288c57fddf3a1ffe" dependencies = [ + "bstr", "doc-comment", "predicates", "predicates-core", @@ -214,15 +179,25 @@ dependencies = [ [[package]] name = "assert_matches" -version = "1.3.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn", +] [[package]] name = "async-channel" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59740d83946db6a5af71ae25ddf9562c2b176b2ca42cf99a455f09f4a220d6b9" +checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" dependencies = [ "concurrent-queue", "event-listener", @@ -231,53 +206,62 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d373d78ded7d0b3fa8039375718cde0aace493f2e34fb60f51cbf567562ca801" +checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" dependencies = [ "async-task", "concurrent-queue", "fastrand", "futures-lite", - "once_cell 1.4.1", - "vec-arena", + "once_cell", + "slab", ] [[package]] name = "async-global-executor" -version = "1.3.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fefeb39da249f4c33af940b779a56723ce45809ef5c54dad84bb538d4ffb6d9e" +checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" dependencies = [ + "async-channel", "async-executor", "async-io", + "async-mutex", + "blocking", "futures-lite", "num_cpus", - "once_cell 1.4.1", + "once_cell", ] [[package]] name = "async-io" -version = "1.1.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38628c78a34f111c5a6b98fc87dfc056cd1590b61afe748b145be4623c56d194" +checksum = "a811e6a479f2439f0c04038796b5cfb3d2ad56c230e0f2d3f7b04d68cfee607b" dependencies = [ - "cfg-if", "concurrent-queue", - "fastrand", "futures-lite", "libc", - "log", - "once_cell 1.4.1", + "log 0.4.14", + "once_cell", "parking", "polling", - "socket2", - "vec-arena", + "slab", + "socket2 0.4.2", "waker-fn", - "wepoll-sys-stjepang", "winapi 0.3.9", ] +[[package]] +name = "async-lock" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b" +dependencies = [ + "event-listener", +] + [[package]] name = "async-mutex" version = "1.4.0" @@ -287,67 +271,138 @@ dependencies = [ "event-listener", ] +[[package]] +name = "async-process" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b21b63ab5a0db0369deb913540af2892750e42d949faacc7a61495ac418a1692" +dependencies = [ + "async-io", + "blocking", + "cfg-if 1.0.0", + "event-listener", + "futures-lite", + "libc", + "once_cell", + "signal-hook", + "winapi 0.3.9", +] + [[package]] name = "async-std" -version = "1.6.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9fa76751505e8df1c7a77762f60486f60c71bbd9b8557f4da6ad47d083732ed" +checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952" dependencies = [ + "async-attributes", + "async-channel", "async-global-executor", "async-io", - "async-mutex", - "blocking", - "crossbeam-utils", + "async-lock", + "async-process", + "crossbeam-utils 0.8.5", "futures-channel", "futures-core", "futures-io", "futures-lite", "gloo-timers", "kv-log-macro", - "log", + "log 0.4.14", "memchr", "num_cpus", - "once_cell 1.4.1", - "pin-project-lite", + "once_cell", + "pin-project-lite 0.2.7", "pin-utils", "slab", "wasm-bindgen-futures", ] [[package]] -name = "async-task" -version = "4.0.2" +name = "async-std-resolver" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ab27c1aa62945039e44edaeee1dc23c74cc0c303dd5fe0fb462a184f1c3a518" +checksum = "ed4e2c3da14d8ad45acb1e3191db7a918e9505b6f155b218e70a7c9a1a48c638" +dependencies = [ + "async-std", + "async-trait", + "futures-io", + "futures-util", + "pin-utils", + "trust-dns-resolver", +] [[package]] -name = "async-tls" -version = "0.8.0" +name = "async-stream" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df097e3f506bec0e1a24f06bb3c962c228f36671de841ff579cb99f371772634" +checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" dependencies = [ - "futures 0.3.5", - "rustls", - "webpki", - "webpki-roots 0.19.0", + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] +[[package]] +name = "async-task" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" + [[package]] name = "async-trait" -version = "0.1.37" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caae68055714ff28740f310927e04f2eba76ff580b16fb18ed90073ee71646f7" +checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "asynchronous-codec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb4401f0a3622dad2e0763fa79e0eb328bc70fb7dccfdd645341f00d671247d6" +dependencies = [ + "bytes 1.1.0", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite 0.2.7", +] + +[[package]] +name = "asynchronous-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" +dependencies = [ + "bytes 1.1.0", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite 0.2.7", +] + [[package]] name = "atomic" -version = "0.4.6" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" +checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +dependencies = [ + "autocfg 1.0.1", +] [[package]] name = "atomic-waker" @@ -374,24 +429,44 @@ checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" [[package]] name = "autocfg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.50" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" dependencies = [ - "addr2line", - "cfg-if", + "addr2line 0.16.0", + "cc", + "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.20.0", + "object 0.26.2", "rustc-demangle", ] +[[package]] +name = "bae" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec107f431ee3d8a8e45e6dd117adab769556ef463959e77bf6a4888d5fd500cf" +dependencies = [ + "heck", + "proc-macro-error 0.4.12", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "base-x" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" + [[package]] name = "base58" version = "0.1.0" @@ -400,9 +475,22 @@ checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" [[package]] name = "base64" -version = "0.11.0" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" +dependencies = [ + "byteorder", + "safemem", +] + +[[package]] +name = "base64" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" +checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +dependencies = [ + "byteorder", +] [[package]] name = "base64" @@ -410,88 +498,88 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + +[[package]] +name = "beef" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed554bd50246729a1ec158d08aa3235d1b69d94ad120ebe187e28894787e736" +dependencies = [ + "serde", +] + [[package]] name = "bincode" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "byteorder 1.3.4", "serde", ] [[package]] name = "bindgen" -version = "0.54.0" +version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" +checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375" dependencies = [ "bitflags", "cexpr", - "cfg-if", "clang-sys", - "clap", - "env_logger", "lazy_static", "lazycell", - "log", "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "which", -] - -[[package]] -name = "bip39" -version = "0.6.0-beta.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7059804e226b3ac116519a252d7f5fb985a5ccc0e93255e036a5f7e7283323f4" -dependencies = [ - "failure", - "hashbrown 0.1.8", - "hmac", - "once_cell 0.1.8", - "pbkdf2", - "rand 0.6.5", - "sha2 0.8.2", ] [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] -name = "bitmask" -version = "0.5.0" +name = "bitvec" +version = "0.19.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da9b3d9f6f585199287a473f4f8dfab6566cf827d15c00c219f53c645687ead" +checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" +dependencies = [ + "funty", + "radium 0.5.3", + "tap", + "wyz", +] [[package]] name = "bitvec" -version = "0.17.4" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" dependencies = [ - "either", - "radium", + "funty", + "radium 0.6.2", + "tap", + "wyz", ] [[package]] name = "blake2" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ce5b6108f8e154604bd4eb76a2f726066c3464d5a552a4229262a18c9bb471" +checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" dependencies = [ - "byte-tools", - "byteorder 1.3.4", "crypto-mac 0.8.0", "digest 0.9.0", - "opaque-debug 0.2.3", + "opaque-debug 0.3.0", ] [[package]] @@ -506,24 +594,39 @@ dependencies = [ [[package]] name = "blake2b_simd" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", - "arrayvec 0.5.1", + "arrayvec 0.5.2", "constant_time_eq", ] [[package]] name = "blake2s_simd" -version = "0.5.10" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "constant_time_eq", +] + +[[package]] +name = "blake3" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9e07352b829279624ceb7c64adb4f585dacdb81d35cafae81139ccd617cf44" +checksum = "b64485778c4f16a6a5a9d335e80d449ac6c70cdd6a06d2af18a6f6f775a125b3" dependencies = [ "arrayref", - "arrayvec 0.5.1", + "arrayvec 0.5.2", + "cc", + "cfg-if 0.1.10", "constant_time_eq", + "crypto-mac 0.8.0", + "digest 0.9.0", ] [[package]] @@ -532,10 +635,10 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding", + "block-padding 0.1.5", "byte-tools", - "byteorder 1.3.4", - "generic-array 0.12.3", + "byteorder", + "generic-array 0.12.4", ] [[package]] @@ -544,16 +647,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.3", -] - -[[package]] -name = "block-cipher" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa136449e765dc7faa244561ccae839c394048667929af599b5d931ebe7b7f10" -dependencies = [ - "generic-array 0.14.3", + "block-padding 0.2.1", + "generic-array 0.14.4", ] [[package]] @@ -565,6 +660,12 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + [[package]] name = "blocking" version = "1.0.2" @@ -576,20 +677,20 @@ dependencies = [ "atomic-waker", "fastrand", "futures-lite", - "once_cell 1.4.1", + "once_cell", ] [[package]] name = "bs58" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31accafdb70df7871592c058eca3985b71104e15ac32f64706022c58867da931" +checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279" dependencies = [ "lazy_static", "memchr", @@ -608,15 +709,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" [[package]] name = "byte-slice-cast" -version = "0.3.5" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" +checksum = "ca0796d76a983651b4a0ddda16203032759f2fd9103d9181f7c65c06ee8872e6" [[package]] name = "byte-tools" @@ -626,15 +727,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "byteorder" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" - -[[package]] -name = "byteorder" -version = "1.3.4" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" @@ -642,8 +737,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" dependencies = [ - "byteorder 1.3.4", - "either", + "byteorder", "iovec", ] @@ -654,10 +748,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] -name = "c_linked_list" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4964518bd3b4a8190e832886cdc0da9794f12e8e6c1613a9e90ff331c4c8724b" +name = "bytes" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cache-padded" @@ -665,41 +759,61 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" +[[package]] +name = "camino" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52d74260d9bf6944e2208aa46841b4b8f0d7ffc0849a06837b2f510337f86b2b" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +dependencies = [ + "serde", +] + [[package]] name = "cargo_metadata" -version = "0.10.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052dbdd9db69a339d5fa9ac87bfe2e1319f709119f0345988a597af82bb1011c" +checksum = "081e3f0755c1f380c2d010481b6fa2e02973586d5f2b24eebb7a2a1d98b143d8" dependencies = [ - "semver 0.10.0", + "camino", + "cargo-platform", + "semver 0.11.0", + "semver-parser 0.10.2", "serde", - "serde_derive", "serde_json", ] [[package]] name = "cast" -version = "0.2.3" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0" +checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" dependencies = [ - "rustc_version", + "rustc_version 0.4.0", ] [[package]] name = "cc" -version = "1.0.58" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" +checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" dependencies = [ "jobserver", ] [[package]] name = "cexpr" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89" dependencies = [ "nom", ] @@ -710,26 +824,34 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + [[package]] name = "chacha20" -version = "0.4.3" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c0f07ac275808b7bf9a39f2fd013aae1498be83632814c8c4e0bd53f2dc58" +checksum = "fee7ad89dc1128635074c268ee661f90c3f7e83d9fd12910608c36b47d6c3412" dependencies = [ - "stream-cipher 0.4.1", + "cfg-if 1.0.0", + "cipher", + "cpufeatures 0.1.5", "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.5.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b0c90556d8e3fec7cf18d84a2f53d27b21288f2fe481b830fadcf809e48205" +checksum = "1580317203210c517b6d44794abfbe600698276db18127e37ad3e69bf5e848e5" dependencies = [ "aead", "chacha20", + "cipher", "poly1305", - "stream-cipher 0.4.1", "zeroize", ] @@ -749,38 +871,78 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.13" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ - "js-sys", + "libc", "num-integer", "num-traits", "time", - "wasm-bindgen", + "winapi 0.3.9", +] + +[[package]] +name = "cid" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8709d481fb78b9808f34a1b4b4fadd08a15a0971052c18bc2b751faefaed595e" +dependencies = [ + "multibase 0.8.0", + "multihash 0.11.4", + "unsigned-varint 0.3.3", +] + +[[package]] +name = "cid" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff0e3bc0b6446b3f9663c1a6aba6ef06c5aeaa1bc92bd18077be337198ab9768" +dependencies = [ + "multibase 0.8.0", + "multihash 0.13.2", + "unsigned-varint 0.5.1", +] + +[[package]] +name = "cipher" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "ckb-merkle-mountain-range" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f061f97d64fd1822664bdfb722f7ae5469a97b77567390f7442be5b5dc82a5b" +dependencies = [ + "cfg-if 0.1.10", ] [[package]] name = "clang-sys" -version = "0.29.3" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a" +checksum = "10612c0ec0e0a1ff0e97980647cb058a6e7aedb913d01d009c406b8b7d0b26ee" dependencies = [ "glob", "libc", - "libloading", + "libloading 0.7.0", ] [[package]] name = "clap" -version = "2.33.1" +version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ "ansi_term 0.11.0", "atty", "bitflags", - "strsim", + "strsim 0.8.0", "textwrap", "unicode-width", "vec_map", @@ -795,15 +957,6 @@ dependencies = [ "bitflags", ] -[[package]] -name = "cloudabi" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" -dependencies = [ - "bitflags", -] - [[package]] name = "concurrent-queue" version = "1.2.2" @@ -814,186 +967,240 @@ dependencies = [ ] [[package]] -name = "console_error_panic_hook" -version = "0.1.6" +name = "constant_time_eq" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -dependencies = [ - "cfg-if", - "wasm-bindgen", -] +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] -name = "console_log" -version = "0.1.2" +name = "convert_case" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7871d2947441b0fdd8e2bd1ce2a2f75304f896582c0d572162d48290683c48" -dependencies = [ - "log", - "web-sys", -] +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] -name = "const-random" -version = "0.1.11" +name = "core-foundation" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02dc82c12dc2ee6e1ded861cf7d582b46f66f796d1b6c93fa28b911ead95da02" +checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" dependencies = [ - "const-random-macro", - "proc-macro-hack", + "core-foundation-sys", + "libc", ] [[package]] -name = "const-random-macro" -version = "0.1.11" +name = "core-foundation-sys" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" + +[[package]] +name = "cpp_demangle" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc757bbb9544aa296c2ae00c679e81f886b37e28e59097defe0cf524306f6685" +checksum = "8ea47428dc9d2237f3c6bc134472edfd63ebba0af932e783506dcfd66f10d18a" dependencies = [ - "getrandom 0.2.0", - "proc-macro-hack", + "cfg-if 1.0.0", ] [[package]] -name = "constant_time_eq" +name = "cpufeatures" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" +dependencies = [ + "libc", +] [[package]] -name = "core-foundation" -version = "0.7.0" +name = "cpufeatures" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" dependencies = [ - "core-foundation-sys", "libc", ] [[package]] -name = "core-foundation-sys" -version = "0.7.0" +name = "cranelift-bforest" +version = "0.68.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +checksum = "9221545c0507dc08a62b2d8b5ffe8e17ac580b0a74d1813b496b8d70b070fbd0" +dependencies = [ + "cranelift-entity 0.68.0", +] [[package]] -name = "cpuid-bool" -version = "0.1.2" +name = "cranelift-bforest" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" +checksum = "c8ca3560686e7c9c7ed7e0fe77469f2410ba5d7781b1acaa9adc8d8deea28e3e" +dependencies = [ + "cranelift-entity 0.74.0", +] [[package]] -name = "cranelift-bforest" -version = "0.66.0" +name = "cranelift-codegen" +version = "0.68.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dcc286b052ee24a1e5a222e7c1125e6010ad35b0f248709b9b3737a8fedcfdf" +checksum = "7e9936ea608b6cd176f107037f6adbb4deac933466fc7231154f96598b2d3ab1" dependencies = [ - "cranelift-entity", + "byteorder", + "cranelift-bforest 0.68.0", + "cranelift-codegen-meta 0.68.0", + "cranelift-codegen-shared 0.68.0", + "cranelift-entity 0.68.0", + "gimli 0.22.0", + "log 0.4.14", + "regalloc", + "smallvec 1.6.1", + "target-lexicon 0.11.2", + "thiserror", ] [[package]] name = "cranelift-codegen" -version = "0.66.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d9badfe36176cb653506091693bc2bb1970c9bddfcd6ec7fac404f7eaec6f38" +checksum = "baf9bf1ffffb6ce3d2e5ebc83549bd2436426c99b31cc550d521364cbe35d276" dependencies = [ - "byteorder 1.3.4", - "cranelift-bforest", - "cranelift-codegen-meta", - "cranelift-codegen-shared", - "cranelift-entity", - "gimli 0.21.0", - "log", + "cranelift-bforest 0.74.0", + "cranelift-codegen-meta 0.74.0", + "cranelift-codegen-shared 0.74.0", + "cranelift-entity 0.74.0", + "gimli 0.24.0", + "log 0.4.14", "regalloc", "serde", - "smallvec 1.4.1", - "target-lexicon", - "thiserror", + "smallvec 1.6.1", + "target-lexicon 0.12.2", +] + +[[package]] +name = "cranelift-codegen-meta" +version = "0.68.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ef2b2768568306540f4c8db3acce9105534d34c4a1e440529c1e702d7f8c8d7" +dependencies = [ + "cranelift-codegen-shared 0.68.0", + "cranelift-entity 0.68.0", ] [[package]] name = "cranelift-codegen-meta" -version = "0.66.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3f460031861e4f4ad510be62b2ae50bba6cc886b598a36f9c0a970feab9598" +checksum = "4cc21936a5a6d07e23849ffe83e5c1f6f50305c074f4b2970ca50c13bf55b821" dependencies = [ - "cranelift-codegen-shared", - "cranelift-entity", + "cranelift-codegen-shared 0.74.0", + "cranelift-entity 0.74.0", ] [[package]] name = "cranelift-codegen-shared" -version = "0.66.0" +version = "0.68.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6759012d6d19c4caec95793f052613e9d4113e925e7f14154defbac0f1d4c938" + +[[package]] +name = "cranelift-codegen-shared" +version = "0.74.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca5b6ffaa87560bebe69a5446449da18090b126037920b0c1c6d5945f72faf6b" +dependencies = [ + "serde", +] + +[[package]] +name = "cranelift-entity" +version = "0.68.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ad12409e922e7697cd0bdc7dc26992f64a77c31880dfe5e3c7722f4710206d" +checksum = "86badbce14e15f52a45b666b38abe47b204969dd7f8fb7488cb55dd46b361fa6" +dependencies = [ + "serde", +] [[package]] name = "cranelift-entity" -version = "0.66.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97cdc58972ea065d107872cfb9079f4c92ade78a8af85aaff519a65b5d13f71" +checksum = "7d6b4a8bef04f82e4296782646f733c641d09497df2fabf791323fefaa44c64c" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.66.0" +version = "0.68.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b608bb7656c554d0a4cf8f50c7a10b857e80306f6ff829ad6d468a7e2323c8d8" +dependencies = [ + "cranelift-codegen 0.68.0", + "log 0.4.14", + "smallvec 1.6.1", + "target-lexicon 0.11.2", +] + +[[package]] +name = "cranelift-frontend" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ef419efb4f94ecc02e5d9fbcc910d2bb7f0040e2de570e63a454f883bc891d6" +checksum = "c31b783b351f966fce33e3c03498cb116d16d97a8f9978164a60920bd0d3a99c" dependencies = [ - "cranelift-codegen", - "log", - "smallvec 1.4.1", - "target-lexicon", + "cranelift-codegen 0.74.0", + "log 0.4.14", + "smallvec 1.6.1", + "target-lexicon 0.12.2", ] [[package]] name = "cranelift-native" -version = "0.66.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e69d44d59826eef6794066ac2c0f4ad3975f02d97030c60dbc04e3886adf36e" +checksum = "a77c88d3dd48021ff1e37e978a00098524abd3513444ae252c08d37b310b3d2a" dependencies = [ - "cranelift-codegen", - "raw-cpuid", - "target-lexicon", + "cranelift-codegen 0.74.0", + "target-lexicon 0.12.2", ] [[package]] name = "cranelift-wasm" -version = "0.66.0" +version = "0.74.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "979df666b1304624abe99738e9e0e7c7479ee5523ba4b8b237df9ff49996acbb" +checksum = "edb6d408e2da77cdbbd65466298d44c86ae71c1785d2ab0d8657753cdb4d9d89" dependencies = [ - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "log", + "cranelift-codegen 0.74.0", + "cranelift-entity 0.74.0", + "cranelift-frontend 0.74.0", + "itertools", + "log 0.4.14", "serde", + "smallvec 1.6.1", "thiserror", - "wasmparser 0.59.0", + "wasmparser 0.78.2", ] [[package]] name = "crc32fast" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] name = "criterion" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70daa7ceec6cf143990669a04c7df13391d55fb27bd4079d252fca774ba244d8" +checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" dependencies = [ "atty", "cast", "clap", "criterion-plot", "csv", - "itertools 0.9.0", + "itertools", "lazy_static", "num-traits", "oorandom", @@ -1010,59 +1217,66 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d" +checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" dependencies = [ "cast", - "itertools 0.9.0", + "itertools", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils 0.8.5", ] [[package]] name = "crossbeam-deque" -version = "0.7.3" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ + "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils", - "maybe-uninit", + "crossbeam-utils 0.8.5", ] [[package]] name = "crossbeam-epoch" -version = "0.8.2" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ - "autocfg 1.0.0", - "cfg-if", - "crossbeam-utils", + "cfg-if 1.0.0", + "crossbeam-utils 0.8.5", "lazy_static", - "maybe-uninit", "memoffset", - "scopeguard 1.1.0", + "scopeguard", ] [[package]] -name = "crossbeam-queue" -version = "0.2.3" +name = "crossbeam-utils" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ - "cfg-if", - "crossbeam-utils", - "maybe-uninit", + "autocfg 1.0.1", + "cfg-if 0.1.10", + "lazy_static", ] [[package]] name = "crossbeam-utils" -version = "0.7.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" dependencies = [ - "autocfg 1.0.0", - "cfg-if", + "cfg-if 1.0.0", "lazy_static", ] @@ -1078,7 +1292,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" dependencies = [ - "generic-array 0.12.3", + "generic-array 0.12.4", "subtle 1.0.0", ] @@ -1088,15 +1302,25 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.3", - "subtle 2.2.3", + "generic-array 0.14.4", + "subtle 2.4.1", +] + +[[package]] +name = "crypto-mac" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.4.1", ] [[package]] name = "csv" -version = "1.1.3" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00affe7f6ab566df61b4be3ce8cf16bc2576bca0963ceb0955e45d514bf9a279" +checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr", "csv-core", @@ -1116,76 +1340,168 @@ dependencies = [ [[package]] name = "ct-logs" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c8e13110a84b6315df212c045be706af261fd364791cad863285439ebba672e" +checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" dependencies = [ "sct", ] [[package]] name = "ctor" -version = "0.1.15" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39858aa5bac06462d4dd4b9164848eb81ffc4aa5c479746393598fd193afa227" +checksum = "ccc0a48a9b826acdf4028595adc9db92caea352f7af011a3034acd172a52a0aa" dependencies = [ "quote", "syn", ] +[[package]] +name = "ctr" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" +dependencies = [ + "cipher", +] + [[package]] name = "cuckoofilter" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd43f7cfaffe0a386636a10baea2ee05cc50df3b77bea4a456c9572a939bf1f" +checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" dependencies = [ - "byteorder 0.5.3", - "rand 0.3.23", + "byteorder", + "fnv", + "rand 0.7.3", ] [[package]] name = "curve25519-dalek" -version = "2.1.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" +checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" dependencies = [ - "byteorder 1.3.4", + "byteorder", "digest 0.8.1", "rand_core 0.5.1", - "subtle 2.2.3", + "subtle 2.4.1", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle 2.4.1", "zeroize", ] +[[package]] +name = "darling" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "757c0ded2af11d8e739c4daea1ac623dd1624b06c844cf3f5a39f1bdbd99bb12" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c34d8efb62d0c2d7f60ece80f75e5c63c1588ba68032740494b0b9a996466e3" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" +dependencies = [ + "darling_core", + "quote", + "syn", +] + [[package]] name = "data-encoding" -version = "2.2.1" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + +[[package]] +name = "data-encoding-macro" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69" +checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" +dependencies = [ + "data-encoding", + "syn", +] [[package]] name = "derive_more" -version = "0.99.9" +version = "0.99.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298998b1cf6b5b2c8a7b023dfd45821825ce3ba8a8af55c921a0e734e4653f76" +checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version 0.3.3", "syn", ] +[[package]] +name = "diff" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" + [[package]] name = "difference" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" dependencies = [ - "generic-array 0.12.3", + "generic-array 0.12.4", ] [[package]] @@ -1194,27 +1510,47 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.3", + "generic-array 0.14.4", ] [[package]] name = "directories" -version = "2.0.2" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" +checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" dependencies = [ - "cfg-if", "dirs-sys", ] [[package]] -name = "dirs-sys" -version = "0.3.5" +name = "directories-next" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" dependencies = [ - "libc", - "redox_users", + "cfg-if 1.0.0", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +dependencies = [ + "libc", + "redox_users", + "winapi 0.3.9", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", "winapi 0.3.9", ] @@ -1230,8 +1566,8 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" dependencies = [ - "byteorder 1.3.4", - "quick-error", + "byteorder", + "quick-error 1.2.3", ] [[package]] @@ -1240,6 +1576,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "downcast-rs" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" + [[package]] name = "dyn-clonable" version = "0.9.0" @@ -1263,38 +1605,76 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.2" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" + +[[package]] +name = "dynasm" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdc2d9a5e44da60059bd38db2d05cbb478619541b8c79890547861ec1e3194f0" +dependencies = [ + "bitflags", + "byteorder", + "lazy_static", + "proc-macro-error 1.0.4", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dynasmrt" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c53dc3a653e0f64081026e4bf048d48fec9fce90c66e8326ca7292df0ff2d82" +checksum = "42276e3f205fe63887cca255aa9a65a63fb72764c30b9a6252a7c7e46994f689" +dependencies = [ + "byteorder", + "dynasm", + "memmap2", +] [[package]] name = "ed25519" -version = "1.0.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf038a7b6fd7ef78ad3348b63f3a17550877b0e28f8d68bcc94894d1412158bc" +checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" dependencies = [ "signature", ] [[package]] name = "ed25519-dalek" -version = "1.0.0-pre.4" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a8a37f4e8b35af971e6db5e3897e7a6344caa3f92f6544f88125a1f5f0035a" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.2.0", "ed25519", "rand 0.7.3", "serde", - "sha2 0.8.2", + "sha2 0.9.8", "zeroize", ] [[package]] name = "either" -version = "1.6.0" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "enum-as-inner" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56b59865bce947ac5958779cfa508f6c3b9497cc762b7e24a12d11ccde2c4f" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] [[package]] name = "enumflags2" @@ -1317,132 +1697,103 @@ dependencies = [ ] [[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "environmental" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e" - -[[package]] -name = "erased-serde" -version = "0.3.12" +name = "enumset" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ca8b296792113e1500fd935ae487be6e00ce318952a6880555554824d6ebf38" +checksum = "7e76129da36102af021b8e5000dab2c1c30dbef85c1e482beeff8da5dde0e0b0" dependencies = [ - "serde", + "enumset_derive", ] [[package]] -name = "errno" -version = "0.2.6" +name = "enumset_derive" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eab5ee3df98a279d9b316b1af6ac95422127b1290317e6d18c1743c99418b01" +checksum = "6451128aa6655d880755345d085494cf7561a6bee7c8dc821e5d77e6d267ecd4" dependencies = [ - "errno-dragonfly", - "libc", - "winapi 0.3.9", + "darling", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "errno-dragonfly" -version = "0.1.1" +name = "env_logger" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ - "gcc", - "libc", + "atty", + "humantime 1.3.0", + "log 0.4.14", + "regex", + "termcolor", ] [[package]] -name = "ethbloom" -version = "0.9.2" +name = "env_logger" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a6567e6fd35589fea0c63b94b4cf2e55573e413901bdbe60ab15cf0e25e5df" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", + "log 0.4.14", + "regex", ] [[package]] -name = "ethereum-types" -version = "0.9.2" +name = "env_logger" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473aecff686bd8e7b9db0165cbbb53562376b39bf35b427f0c60446a9e1634b0" +checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", + "atty", + "humantime 2.1.0", + "log 0.4.14", + "regex", + "termcolor", ] [[package]] -name = "event-listener" -version = "2.5.1" +name = "environmental" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" +checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" [[package]] -name = "evm" -version = "0.17.0" +name = "erased-serde" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68224b0aa788720ef0c8a23030a4412a021ed73df069a922bee8f0db9ed617e2" +checksum = "3de9ad4541d99dc22b59134e7ff8dc3d6c988c89ecd7324bf10a8362b07a2afa" dependencies = [ - "evm-core", - "evm-gasometer", - "evm-runtime", - "primitive-types", - "rlp", "serde", - "sha3", ] [[package]] -name = "evm-core" -version = "0.17.0" +name = "errno" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a040378759577447945c89da1b07d6e33fda32a97a104afe0ec3fa1c382949d" +checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe" dependencies = [ - "primitive-types", + "errno-dragonfly", + "libc", + "winapi 0.3.9", ] [[package]] -name = "evm-gasometer" -version = "0.17.0" +name = "errno-dragonfly" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bb5bc051afad6bb0735c82b46656bbdfac41917861307a608b1404a546fec42" +checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" dependencies = [ - "evm-core", - "evm-runtime", - "primitive-types", + "gcc", + "libc", ] [[package]] -name = "evm-runtime" -version = "0.17.0" +name = "event-listener" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7410f5677a52203d3fca02b0eb8f96f9799f3a45cff82946a8ed28379e6b1b04" -dependencies = [ - "evm-core", - "primitive-types", - "sha3", -] +checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" [[package]] name = "exit-future" @@ -1450,29 +1801,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.5", -] - -[[package]] -name = "failure" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" -dependencies = [ - "backtrace", - "failure_derive", -] - -[[package]] -name = "failure_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", + "futures 0.3.17", ] [[package]] @@ -1489,9 +1818,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3" +checksum = "b394ed3d285a429378d3b384b9eb1285267e7df4b166df24b7a6939a04dc392e" dependencies = [ "instant", ] @@ -1507,38 +1836,51 @@ dependencies = [ [[package]] name = "file-per-thread-logger" -version = "0.1.3" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" +dependencies = [ + "env_logger 0.7.1", + "log 0.4.14", +] + +[[package]] +name = "filetime" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b3937f028664bd0e13df401ba49a4567ccda587420365823242977f06609ed1" +checksum = "975ccf83d8d9d0d84682850a38c8169027be83368805971cc4f238c2b245bc98" dependencies = [ - "env_logger", - "log", + "cfg-if 1.0.0", + "libc", + "redox_syscall 0.2.10", + "winapi 0.3.9", ] [[package]] name = "finality-grandpa" -version = "0.12.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" +checksum = "e8ac3ff5224ef91f3c97e03eb1de2db82743427e91aaa5ac635f454f0b164f5a" dependencies = [ "either", - "futures 0.3.5", - "futures-timer 2.0.2", - "log", + "futures 0.3.17", + "futures-timer 3.0.2", + "log 0.4.14", "num-traits", "parity-scale-codec", - "parking_lot 0.9.0", - "rand 0.6.5", + "parking_lot 0.11.2", + "rand 0.8.4", + "scale-info", ] [[package]] name = "fixed-hash" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11498d382790b7a8f2fd211780bec78619bba81cdad3a283997c0c41f836759c" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ - "byteorder 1.3.4", - "rand 0.7.3", + "byteorder", + "rand 0.8.4", "rustc-hex", "static_assertions", ] @@ -1551,11 +1893,11 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.16" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" +checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crc32fast", "libc", "libz-sys", @@ -1568,23 +1910,50 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "fork-tree" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", ] +[[package]] +name = "form_urlencoded" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +dependencies = [ + "matches", + "percent-encoding 2.1.0", +] + [[package]] name = "frame-benchmarking" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "hex-literal", "linregress", + "log 0.4.14", "parity-scale-codec", - "paste 0.1.18", + "paste 1.0.5", + "scale-info", "sp-api", "sp-io", "sp-runtime", @@ -1595,15 +1964,21 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ + "Inflector", "chrono", "frame-benchmarking", + "frame-support", + "handlebars", + "linked-hash-map", + "log 0.4.14", "parity-scale-codec", "sc-cli", "sc-client-db", "sc-executor", "sc-service", + "serde", "sp-core", "sp-externalities", "sp-keystore", @@ -1612,19 +1987,35 @@ dependencies = [ "structopt", ] +[[package]] +name = "frame-election-provider-support" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-npos-elections", + "sp-runtime", + "sp-std", +] + [[package]] name = "frame-executive" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "hex-literal", "pallet-balances", - "pallet-indices", "pallet-transaction-payment", "parity-scale-codec", - "serde", + "scale-info", "sp-core", + "sp-inherents", "sp-io", "sp-runtime", "sp-std", @@ -1634,47 +2025,51 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "12.0.0" +version = "14.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96616f82e069102b95a72c87de4c84d2f87ef7f0f20630e78ce3824436483110" dependencies = [ + "cfg-if 1.0.0", "parity-scale-codec", + "scale-info", "serde", - "sp-core", - "sp-std", ] [[package]] name = "frame-support" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "bitmask", + "assert_matches", + "bitflags", "frame-metadata", "frame-support-procedural", "frame-system", "impl-trait-for-tuples", - "log", - "once_cell 1.4.1", + "log 0.4.14", + "once_cell", "parity-scale-codec", "parity-util-mem", - "paste 0.1.18", - "pretty_assertions", + "paste 1.0.5", + "pretty_assertions 0.6.1", + "scale-info", "serde", - "smallvec 1.4.1", - "sp-api", + "smallvec 1.6.1", "sp-arithmetic", "sp-core", "sp-inherents", "sp-io", "sp-runtime", + "sp-staking", "sp-state-machine", "sp-std", "sp-tracing", - "substrate-test-runtime-client", ] [[package]] name = "frame-support-procedural" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ + "Inflector", "frame-support-procedural-tools", "proc-macro2", "quote", @@ -1683,10 +2078,10 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support-procedural-tools-derive", - "proc-macro-crate", + "proc-macro-crate 1.1.0", "proc-macro2", "quote", "syn", @@ -1694,7 +2089,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" -version = "2.0.0" +version = "3.0.0" dependencies = [ "proc-macro2", "quote", @@ -1703,31 +2098,45 @@ dependencies = [ [[package]] name = "frame-support-test" -version = "2.0.0" +version = "3.0.0" dependencies = [ - "frame-metadata", "frame-support", + "frame-support-test-pallet", + "frame-system", "parity-scale-codec", - "pretty_assertions", + "pretty_assertions 0.6.1", "rustversion", + "scale-info", "serde", + "sp-arithmetic", "sp-core", - "sp-inherents", "sp-io", "sp-runtime", "sp-state-machine", "sp-std", + "sp-version", "trybuild", ] +[[package]] +name = "frame-support-test-pallet" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", +] + [[package]] name = "frame-system" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "criterion", "frame-support", - "impl-trait-for-tuples", + "log 0.4.14", "parity-scale-codec", + "scale-info", "serde", "sp-core", "sp-externalities", @@ -1740,13 +2149,13 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -1755,21 +2164,31 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "parity-scale-codec", "sp-api", ] [[package]] -name = "fs-swap" -version = "0.2.4" +name = "frame-try-runtime" +version = "0.10.0-dev" +dependencies = [ + "frame-support", + "sp-api", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "fs-swap" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921d332c89b3b61a826de38c61ee5b6e02c56806cade1b0e5d81bd71f57a71bb" +checksum = "03d47dad3685eceed8488986cad3d5027165ea5edb164331770e2059555f10a5" dependencies = [ "lazy_static", "libc", - "libloading", + "libloading 0.5.2", "winapi 0.3.9", ] @@ -1785,9 +2204,9 @@ dependencies = [ [[package]] name = "fs_extra" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" [[package]] name = "fuchsia-cprng" @@ -1811,17 +2230,23 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "futures" -version = "0.1.29" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" +checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" dependencies = [ "futures-channel", "futures-core", @@ -1834,66 +2259,25 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" dependencies = [ "futures-core", "futures-sink", ] -[[package]] -name = "futures-channel-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5e5f4df964fa9c1c2f8bddeb5c3611631cacd93baf810fc8bb2fb4b495c263a" -dependencies = [ - "futures-core-preview", -] - [[package]] name = "futures-core" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" - -[[package]] -name = "futures-core-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35b6263fb1ef523c3056565fa67b1d16f0a8604ff12b11b08c25f28a734c60a" - -[[package]] -name = "futures-cpupool" -version = "0.1.8" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" -dependencies = [ - "futures 0.1.29", - "num_cpus", -] - -[[package]] -name = "futures-diagnose" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" -dependencies = [ - "futures 0.1.29", - "futures 0.3.5", - "lazy_static", - "log", - "parking_lot 0.9.0", - "pin-project", - "serde", - "serde_json", -] +checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" [[package]] name = "futures-executor" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" dependencies = [ "futures-core", "futures-task", @@ -1903,51 +2287,60 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" [[package]] name = "futures-lite" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381a7ad57b1bad34693f63f6f377e1abded7a9c85c9d3eb6771e11c60aaadab9" +checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" dependencies = [ "fastrand", "futures-core", "futures-io", "memchr", "parking", - "pin-project-lite", + "pin-project-lite 0.2.7", "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" dependencies = [ + "autocfg 1.0.1", "proc-macro-hack", "proc-macro2", "quote", "syn", ] +[[package]] +name = "futures-rustls" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a1387e07917c711fb4ee4f48ea0adb04a3c9739e53ef85bf43ae1edc2937a8b" +dependencies = [ + "futures-io", + "rustls", + "webpki", +] + [[package]] name = "futures-sink" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" +checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" [[package]] name = "futures-task" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" -dependencies = [ - "once_cell 1.4.1", -] +checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" [[package]] name = "futures-timer" @@ -1960,18 +2353,15 @@ name = "futures-timer" version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" -dependencies = [ - "gloo-timers", - "send_wrapper 0.4.0", -] [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" dependencies = [ - "futures 0.1.29", + "autocfg 1.0.1", + "futures 0.1.31", "futures-channel", "futures-core", "futures-io", @@ -1979,7 +2369,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project", + "pin-project-lite 0.2.7", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -1987,27 +2377,12 @@ dependencies = [ ] [[package]] -name = "futures-util-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce968633c17e5f97936bd2797b6e38fb56cf16a7422319f7ec2e30d3c470e8d" -dependencies = [ - "futures-channel-preview", - "futures-core-preview", - "pin-utils", - "slab", -] - -[[package]] -name = "futures_codec" -version = "0.4.1" +name = "fxhash" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" dependencies = [ - "bytes 0.5.6", - "futures 0.3.5", - "memchr", - "pin-project", + "byteorder", ] [[package]] @@ -2018,82 +2393,73 @@ checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" [[package]] name = "generic-array" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" dependencies = [ "typenum", ] [[package]] name = "generic-array" -version = "0.14.3" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60fb4bb6bba52f78a471264d9a3b7d026cc0af47b22cd2cffbc0b787ca003e63" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", - "version_check", + "version_check 0.9.3", ] [[package]] -name = "get_if_addrs" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abddb55a898d32925f3148bd281174a68eeb68bbfd9a5938a57b18f506ee4ef7" -dependencies = [ - "c_linked_list", - "get_if_addrs-sys", - "libc", - "winapi 0.2.8", -] - -[[package]] -name = "get_if_addrs-sys" -version = "0.1.1" +name = "getrandom" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d04f9fb746cf36b191c00f3ede8bde9c8e64f9f4b05ae2694a9ccf5e3f5ab48" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "gcc", + "cfg-if 1.0.0", "libc", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.1.14" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", + "js-sys", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] -name = "getrandom" -version = "0.2.0" +name = "ghash" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4" +checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ - "cfg-if", - "libc", - "wasi", + "opaque-debug 0.3.0", + "polyval", ] [[package]] -name = "ghash" -version = "0.3.0" +name = "gimli" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6e27f0689a6e15944bdce7e45425efb87eaa8ab0c6e87f11d0987a9133e2531" +checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" dependencies = [ - "polyval", + "fallible-iterator", + "indexmap", + "stable_deref_trait", ] [[package]] name = "gimli" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" +checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" dependencies = [ "fallible-iterator", "indexmap", @@ -2102,9 +2468,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" +checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" [[package]] name = "glob" @@ -2114,14 +2480,14 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "globset" -version = "0.4.5" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ad1da430bd7281dde2576f44c84cc3f0f7b475e7202cd503042dff01a8c8120" +checksum = "10463d9ff00a2a068db14231982f5132edebad0d7660cd956a1c30292dbcbfbd" dependencies = [ "aho-corasick", "bstr", "fnv", - "log", + "log 0.4.14", "regex", ] @@ -2139,48 +2505,25 @@ dependencies = [ ] [[package]] -name = "h2" -version = "0.1.26" +name = "half" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" -dependencies = [ - "byteorder 1.3.4", - "bytes 0.4.12", - "fnv", - "futures 0.1.29", - "http 0.1.21", - "indexmap", - "log", - "slab", - "string", - "tokio-io", -] +checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3" [[package]] -name = "h2" -version = "0.2.6" +name = "handlebars" +version = "3.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53" +checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3" dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.1", - "indexmap", - "slab", - "tokio 0.2.22", - "tokio-util", - "tracing", + "log 0.4.14", + "pest", + "pest_derive", + "quick-error 2.0.1", + "serde", + "serde_json", ] -[[package]] -name = "half" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36fab90f82edc3c747f9d438e06cf0a491055896f2a279638bb5beed6c40177" - [[package]] name = "hash-db" version = "0.15.2" @@ -2197,64 +2540,49 @@ dependencies = [ ] [[package]] -name = "hashbrown" -version = "0.1.8" +name = "hash_hasher" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da" -dependencies = [ - "byteorder 1.3.4", - "scopeguard 0.3.3", -] +checksum = "74721d007512d0cb3338cd20f0654ac913920061a4c4d0d8708edb3f2a698c0c" [[package]] name = "hashbrown" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" -dependencies = [ - "ahash 0.2.19", - "autocfg 0.1.7", -] - -[[package]] -name = "hashbrown" -version = "0.8.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f595585f103464d8d2f6e9864682d74c1601fed5e07d62b1c9058dba8246fb" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash 0.3.8", - "autocfg 1.0.0", + "ahash", ] [[package]] name = "heck" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.15" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] [[package]] name = "hex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8" +checksum = "21e4590e13640f19f249fe3e4eca5113bc4289f2497710378190e7f4bd96f45b" [[package]] name = "hex_fmt" @@ -2272,6 +2600,26 @@ dependencies = [ "digest 0.8.1", ] +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + +[[package]] +name = "hmac" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" +dependencies = [ + "crypto-mac 0.11.1", + "digest 0.9.0", +] + [[package]] name = "hmac-drbg" version = "0.2.0" @@ -2279,15 +2627,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" dependencies = [ "digest 0.8.1", - "generic-array 0.12.3", - "hmac", + "generic-array 0.12.4", + "hmac 0.7.1", +] + +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.4", + "hmac 0.8.1", ] [[package]] name = "honggfuzz" -version = "0.5.49" +version = "0.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "832bac18a82ec7d6c21887daa8616b238fe90d5d5e762d0d4b9372cdaa9e097f" +checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" dependencies = [ "arbitrary", "lazy_static", @@ -2295,54 +2654,49 @@ dependencies = [ ] [[package]] -name = "http" -version = "0.1.21" +name = "hostname" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ - "bytes 0.4.12", - "fnv", - "itoa", + "libc", + "match_cfg", + "winapi 0.3.9", ] [[package]] name = "http" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", "fnv", "itoa", ] [[package]] name = "http-body" -version = "0.1.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" +checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" dependencies = [ - "bytes 0.4.12", - "futures 0.1.29", - "http 0.1.21", - "tokio-buf", + "bytes 1.1.0", + "http", + "pin-project-lite 0.2.7", ] [[package]] -name = "http-body" -version = "0.3.1" +name = "httparse" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -dependencies = [ - "bytes 0.5.6", - "http 0.2.1", -] +checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] -name = "httparse" -version = "1.3.4" +name = "httpdate" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "humantime" @@ -2350,81 +2704,93 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" dependencies = [ - "quick-error", + "quick-error 1.2.3", ] +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" -version = "0.12.35" +version = "0.10.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" +checksum = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" dependencies = [ - "bytes 0.4.12", - "futures 0.1.29", - "futures-cpupool", - "h2 0.1.26", - "http 0.1.21", - "http-body 0.1.0", + "base64 0.9.3", "httparse", - "iovec", - "itoa", - "log", - "net2", - "rustc_version", + "language-tags", + "log 0.3.9", + "mime", + "num_cpus", "time", - "tokio 0.1.22", - "tokio-buf", - "tokio-executor 0.1.10", - "tokio-io", - "tokio-reactor", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "want 0.2.0", + "traitobject", + "typeable", + "unicase 1.4.2", + "url 1.7.2", ] [[package]] name = "hyper" -version = "0.13.7" +version = "0.14.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" +checksum = "15d1cfb9e4f68655fa04c01f59edb405b6074a0f7118ea881e5026e4a1cd8593" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", "futures-channel", "futures-core", "futures-util", - "h2 0.2.6", - "http 0.2.1", - "http-body 0.3.1", + "http", + "http-body", "httparse", + "httpdate", "itoa", - "pin-project", - "socket2", - "time", - "tokio 0.2.22", + "pin-project-lite 0.2.7", + "socket2 0.4.2", + "tokio", "tower-service", "tracing", - "want 0.3.0", + "want", ] [[package]] name = "hyper-rustls" -version = "0.21.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" dependencies = [ - "bytes 0.5.6", "ct-logs", "futures-util", - "hyper 0.13.7", - "log", + "hyper 0.14.13", + "log 0.4.14", "rustls", "rustls-native-certs", - "tokio 0.2.22", + "tokio", "tokio-rustls", "webpki", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes 1.1.0", + "hyper 0.14.13", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.1.5" @@ -2438,9 +2804,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.0" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" dependencies = [ "matches", "unicode-bidi", @@ -2448,21 +2814,49 @@ dependencies = [ ] [[package]] -name = "impl-codec" -version = "0.4.2" +name = "if-addrs" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" +checksum = "c9a83ec4af652890ac713ffd8dc859e650420a5ef47f7b9be29b6664ab50fbc8" dependencies = [ - "parity-scale-codec", + "if-addrs-sys", + "libc", + "winapi 0.3.9", ] [[package]] -name = "impl-rlp" -version = "0.2.1" +name = "if-addrs-sys" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "if-watch" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f7a72f11830b52333f36e3b09a288333888bf54380fd0ac0790a3c31ab0f3c5" +checksum = "ae8ab7f67bad3240049cb24fb9cb0b4c2c6af4c245840917fbbdededeee91179" dependencies = [ - "rlp", + "async-io", + "futures 0.3.17", + "futures-lite", + "if-addrs", + "ipnet", + "libc", + "log 0.4.14", + "winapi 0.3.9", +] + +[[package]] +name = "impl-codec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" +dependencies = [ + "parity-scale-codec", ] [[package]] @@ -2476,9 +2870,9 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.1.3" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" +checksum = "d5dacb10c5b3bb92d46ba347505a9041e676bb20ad220101326bffb0c93031ee" dependencies = [ "proc-macro2", "quote", @@ -2487,26 +2881,32 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.5.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b88cd59ee5f71fea89a62248fc8f387d44400cefe05ef548466d61ced9029a7" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ - "autocfg 1.0.0", - "hashbrown 0.8.1", + "autocfg 1.0.1", + "hashbrown", "serde", ] [[package]] name = "instant" -version = "0.1.6" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b141fdc7836c525d4d594027d318c84161ca17aaf8113ab1f81ab93ae897485" +checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +dependencies = [ + "cfg-if 1.0.0", +] [[package]] name = "integer-sqrt" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", +] [[package]] name = "intervalier" @@ -2514,7 +2914,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "futures-timer 2.0.2", ] @@ -2529,83 +2929,161 @@ dependencies = [ [[package]] name = "ip_network" -version = "0.3.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ee15951c035f79eddbef745611ec962f63f4558f1dadf98ab723cc603487c6f" +checksum = "09b746553d2f4a1ca26fab939943ddfb217a091f34f53571620a8e3d30691303" [[package]] -name = "ipnet" -version = "2.3.0" +name = "ipconfig" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2 0.3.19", + "widestring", + "winapi 0.3.9", + "winreg", +] [[package]] -name = "itertools" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" +name = "ipfs" +version = "0.2.1" +source = "git+https://github.com/rs-ipfs/rust-ipfs#111f116e366e8f3f9b409d1c92be6ac51f7009f4" +dependencies = [ + "anyhow", + "async-stream", + "async-trait", + "base64 0.13.0", + "byteorder", + "bytes 1.1.0", + "cid 0.5.1", + "either", + "fs2", + "futures 0.3.17", + "hash_hasher", + "ipfs-bitswap", + "ipfs-unixfs", + "libp2p", + "multibase 0.9.1", + "multihash 0.11.4", + "once_cell", + "prost", + "prost-build", + "serde", + "serde_json", + "sled", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", + "tracing-futures", + "trust-dns-resolver", + "void", +] + +[[package]] +name = "ipfs-bitswap" +version = "0.1.0" +source = "git+https://github.com/rs-ipfs/rust-ipfs#111f116e366e8f3f9b409d1c92be6ac51f7009f4" +dependencies = [ + "cid 0.5.1", + "fnv", + "futures 0.3.17", + "hash_hasher", + "libp2p-core", + "libp2p-swarm", + "multihash 0.11.4", + "prost", + "prost-build", + "thiserror", + "tokio", + "tracing", + "unsigned-varint 0.3.3", +] + +[[package]] +name = "ipfs-unixfs" +version = "0.2.0" +source = "git+https://github.com/rs-ipfs/rust-ipfs#111f116e366e8f3f9b409d1c92be6ac51f7009f4" dependencies = [ + "cid 0.5.1", "either", + "filetime", + "multihash 0.11.4", + "quick-protobuf", + "sha2 0.9.8", ] +[[package]] +name = "ipnet" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" + [[package]] name = "itertools" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" dependencies = [ "either", ] [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "jobserver" -version = "0.1.21" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" +checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.39" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] [[package]] name = "jsonrpc-client-transports" -version = "15.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f7b1cdf66312002e15682a24430728bd13036c641163c016bc53fb686a7c2d" +checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ - "failure", - "futures 0.1.29", - "hyper 0.12.35", + "derive_more", + "futures 0.3.17", + "hyper 0.14.13", + "hyper-tls", "jsonrpc-core", "jsonrpc-pubsub", - "log", + "log 0.4.14", "serde", "serde_json", + "tokio", "url 1.7.2", + "websocket", ] [[package]] name = "jsonrpc-core" -version = "15.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b12567a31d48588a65b6cf870081e6ba1d7b2ae353977cb9820d512e69c70" +checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.1.29", - "log", + "futures 0.3.17", + "futures-executor", + "futures-util", + "log 0.4.14", "serde", "serde_derive", "serde_json", @@ -2613,20 +3091,21 @@ dependencies = [ [[package]] name = "jsonrpc-core-client" -version = "15.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d175ca0cf77439b5495612bf216c650807d252d665b4b70ab2eebd895a88fac1" +checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ + "futures 0.3.17", "jsonrpc-client-transports", ] [[package]] name = "jsonrpc-derive" -version = "15.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2cc6ea7f785232d9ca8786a44e9fa698f92149dcdc1acc4aa1fc69c4993d79e" +checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 0.1.5", "proc-macro2", "quote", "syn", @@ -2634,74 +3113,137 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "15.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9996b26c0c7a59626d0ed6c5ec8bf06218e62ce1474bd2849f9b9fd38a0158c0" +checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "hyper 0.12.35", + "futures 0.3.17", + "hyper 0.14.13", "jsonrpc-core", "jsonrpc-server-utils", - "log", + "log 0.4.14", "net2", - "parking_lot 0.10.2", - "unicase", + "parking_lot 0.11.2", + "unicase 2.6.0", ] [[package]] name = "jsonrpc-ipc-server" -version = "15.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e8f2278fb2b277175b6e21b23e7ecf30e78daff5ee301d0a2a411d9a821a0a" +checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ + "futures 0.3.17", "jsonrpc-core", "jsonrpc-server-utils", - "log", + "log 0.4.14", "parity-tokio-ipc", - "parking_lot 0.10.2", - "tokio-service", + "parking_lot 0.11.2", + "tower-service", ] [[package]] name = "jsonrpc-pubsub" -version = "15.0.0" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f389c5cd1f3db258a99296892c21047e21ae73ff4c0e2d39650ea86fe994b4c7" +checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ + "futures 0.3.17", "jsonrpc-core", - "log", - "parking_lot 0.10.2", + "lazy_static", + "log 0.4.14", + "parking_lot 0.11.2", "rand 0.7.3", "serde", ] [[package]] -name = "jsonrpc-server-utils" -version = "15.0.0" +name = "jsonrpc-server-utils" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" +dependencies = [ + "bytes 1.1.0", + "futures 0.3.17", + "globset", + "jsonrpc-core", + "lazy_static", + "log 0.4.14", + "tokio", + "tokio-stream", + "tokio-util", + "unicase 2.6.0", +] + +[[package]] +name = "jsonrpc-ws-server" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f892c7d766369475ab7b0669f417906302d7c0fb521285c0a0c92e52e7c8e946" +dependencies = [ + "futures 0.3.17", + "jsonrpc-core", + "jsonrpc-server-utils", + "log 0.4.14", + "parity-ws", + "parking_lot 0.11.2", + "slab", +] + +[[package]] +name = "jsonrpsee-proc-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f37924e16300e249a52a22cabb5632f846dc9760b39355f5e8bc70cd23dc6300" +dependencies = [ + "Inflector", + "bae", + "proc-macro-crate 1.1.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c623e1895d0d9110cb0ea7736cfff13191ff52335ad33b21bd5c775ea98b27af" +checksum = "d67724d368c59e08b557a516cf8fcc51100e7a708850f502e1044b151fe89788" dependencies = [ - "bytes 0.4.12", - "globset", - "jsonrpc-core", - "lazy_static", - "log", - "tokio 0.1.22", - "tokio-codec", - "unicase", + "async-trait", + "beef", + "futures-channel", + "futures-util", + "hyper 0.14.13", + "log 0.4.14", + "serde", + "serde_json", + "soketto 0.6.0", + "thiserror", ] [[package]] -name = "jsonrpc-ws-server" -version = "15.0.0" +name = "jsonrpsee-ws-client" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436a92034d0137ab3e3c64a7a6350b428f31cb4d7d1a89f284bcdbcd98a7bc56" +checksum = "8e2834b6e7f57ce9a4412ed4d6dc95125d2c8612e68f86b9d9a07369164e4198" dependencies = [ - "jsonrpc-core", - "jsonrpc-server-utils", - "log", - "parity-ws", - "parking_lot 0.10.2", - "slab", + "async-trait", + "fnv", + "futures 0.3.17", + "jsonrpsee-types", + "log 0.4.14", + "pin-project 1.0.8", + "rustls", + "rustls-native-certs", + "serde", + "serde_json", + "soketto 0.6.0", + "thiserror", + "tokio", + "tokio-rustls", + "tokio-util", + "url 2.2.2", ] [[package]] @@ -2737,64 +3279,53 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ - "log", + "log 0.4.14", ] [[package]] name = "kvdb" -version = "0.7.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0315ef2f688e33844400b31f11c263f2b3dc21d8b9355c6891c5f185fae43f9a" +checksum = "45a3f58dc069ec0e205a27f5b45920722a46faed802a0541538241af6228f512" dependencies = [ "parity-util-mem", - "smallvec 1.4.1", + "smallvec 1.6.1", ] [[package]] name = "kvdb-memorydb" -version = "0.7.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73de822b260a3bdfb889dbbb65bb2d473eee2253973d6fa4a5d149a2a4a7c66e" +checksum = "c3b6b85fc643f5acd0bffb2cc8a6d150209379267af0d41db72170021841f9f5" dependencies = [ "kvdb", "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.2", ] [[package]] name = "kvdb-rocksdb" -version = "0.9.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44947dd392f09475af614d740fe0320b66d01cb5b977f664bbbb5e45a70ea4c1" +checksum = "9b1b6ea8f2536f504b645ad78419c8246550e19d2c3419a167080ce08edee35a" dependencies = [ "fs-swap", "kvdb", - "log", + "log 0.4.14", "num_cpus", "owning_ref", "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "regex", "rocksdb", - "smallvec 1.4.1", + "smallvec 1.6.1", ] [[package]] -name = "kvdb-web" -version = "0.7.0" +name = "language-tags" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2701a1369d6ea4f1b9f606db46e5e2a4a8e47f22530a07823d653f85ab1f6c34" -dependencies = [ - "futures 0.3.5", - "js-sys", - "kvdb", - "kvdb-memorydb", - "log", - "parity-util-mem", - "send_wrapper 0.3.0", - "wasm-bindgen", - "web-sys", -] +checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" [[package]] name = "lazy_static" @@ -2804,9 +3335,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lazycell" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "leb128" @@ -2816,9 +3347,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.79" +version = "0.2.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" +checksum = "a2a5ac8f984bfcf3a823267e5fde638acc3325f6496633a5da6bb6eb2171e103" [[package]] name = "libloading" @@ -2830,6 +3361,26 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "libloading" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883" +dependencies = [ + "cfg-if 1.0.0", + "winapi 0.3.9", +] + +[[package]] +name = "libloading" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" +dependencies = [ + "cfg-if 1.0.0", + "winapi 0.3.9", +] + [[package]] name = "libm" version = "0.2.1" @@ -2838,16 +3389,15 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.28.1" +version = "0.39.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571f5a4604c1a40d75651da141dfde29ad15329f537a779528803297d2220274" +checksum = "9004c06878ef8f3b4b4067e69a140d87ed20bf777287f82223e49713b36ee433" dependencies = [ "atomic", - "bytes 0.5.6", - "futures 0.3.5", + "bytes 1.1.0", + "futures 0.3.17", "lazy_static", "libp2p-core", - "libp2p-core-derive", "libp2p-deflate", "libp2p-dns", "libp2p-floodsub", @@ -2860,227 +3410,222 @@ dependencies = [ "libp2p-ping", "libp2p-plaintext", "libp2p-pnet", + "libp2p-relay", "libp2p-request-response", "libp2p-swarm", + "libp2p-swarm-derive", "libp2p-tcp", "libp2p-uds", "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "multihash", - "parity-multiaddr", - "parking_lot 0.10.2", - "pin-project", - "smallvec 1.4.1", + "multiaddr", + "parking_lot 0.11.2", + "pin-project 1.0.8", + "smallvec 1.6.1", "wasm-timer", ] [[package]] name = "libp2p-core" -version = "0.22.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52f13ba8c7df0768af2eb391696d562c7de88cc3a35122531aaa6a7d77754d25" +checksum = "af9b4abdeaa420593a297c8592f63fad4234f4b88dc9343b8fd8e736c35faa59" dependencies = [ "asn1_der", "bs58", "ed25519-dalek", "either", "fnv", - "futures 0.3.5", + "futures 0.3.17", "futures-timer 3.0.2", "lazy_static", - "libsecp256k1", - "log", - "multihash", + "libsecp256k1 0.5.0", + "log 0.4.14", + "multiaddr", + "multihash 0.14.0", "multistream-select", - "parity-multiaddr", - "parking_lot 0.10.2", - "pin-project", + "parking_lot 0.11.2", + "pin-project 1.0.8", "prost", "prost-build", "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.8.2", - "smallvec 1.4.1", + "sha2 0.9.8", + "smallvec 1.6.1", "thiserror", - "unsigned-varint 0.4.0", + "unsigned-varint 0.7.0", "void", "zeroize", ] -[[package]] -name = "libp2p-core-derive" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f753d9324cd3ec14bf04b8a8cd0d269c87f294153d6bf2a84497a63a5ad22213" -dependencies = [ - "quote", - "syn", -] - [[package]] name = "libp2p-deflate" -version = "0.22.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74029ae187f35f4b8ddf26b9779a68b340045d708528a103917cdca49a296db5" +checksum = "66097fccc0b7f8579f90a03ea76ba6196332ea049fd07fd969490a06819dcdc8" dependencies = [ "flate2", - "futures 0.3.5", + "futures 0.3.17", "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.22.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cf319822e08dd65c8e060d2354e9f952895bbc433f5706c75ed010c152aee5e" +checksum = "58ff08b3196b85a17f202d80589e93b1660a574af67275706657fdc762e42c32" dependencies = [ - "futures 0.3.5", + "async-std-resolver", + "futures 0.3.17", "libp2p-core", - "log", + "log 0.4.14", + "smallvec 1.6.1", + "trust-dns-resolver", ] [[package]] name = "libp2p-floodsub" -version = "0.22.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a9acb43a3e4a4e413e0c4abe0fa49308df7c6335c88534757b647199cb8a51" +checksum = "404eca8720967179dac7a5b4275eb91f904a53859c69ca8d018560ad6beb214f" dependencies = [ "cuckoofilter", "fnv", - "futures 0.3.5", + "futures 0.3.17", "libp2p-core", "libp2p-swarm", + "log 0.4.14", "prost", "prost-build", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.6.1", ] [[package]] name = "libp2p-gossipsub" -version = "0.22.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab20fcb60edebe3173bbb708c6ac3444afdf1e3152dc2866b10c4f5497f17467" +checksum = "b1cc48709bcbc3a3321f08a73560b4bbb4166a7d56f6fdb615bc775f4f91058e" dependencies = [ - "base64 0.11.0", - "byteorder 1.3.4", - "bytes 0.5.6", + "asynchronous-codec 0.6.0", + "base64 0.13.0", + "byteorder", + "bytes 1.1.0", "fnv", - "futures 0.3.5", - "futures_codec", + "futures 0.3.17", "hex_fmt", "libp2p-core", "libp2p-swarm", - "log", - "lru_time_cache", + "log 0.4.14", "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.2", - "smallvec 1.4.1", - "unsigned-varint 0.4.0", + "regex", + "sha2 0.9.8", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.22.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56396ee63aa9164eacf40c2c5d2bda8c4133c2f57e1b0425d51d3a4e362583b1" +checksum = "a7b61f6cf07664fb97016c318c4d4512b3dd4cc07238607f3f0163245f99008e" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "libp2p-core", "libp2p-swarm", - "log", + "log 0.4.14", "prost", "prost-build", - "smallvec 1.4.1", + "smallvec 1.6.1", "wasm-timer", ] [[package]] name = "libp2p-kad" -version = "0.23.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7fa9047f8b8f544278a35c2d9d45d3b2c1785f2d86d4e1629d6edf97be3955" +checksum = "50ed78489c87924235665a0ab345b298ee34dff0f7ad62c0ba6608b2144fb75e" dependencies = [ - "arrayvec 0.5.1", - "bytes 0.5.6", + "arrayvec 0.5.2", + "asynchronous-codec 0.6.0", + "bytes 1.1.0", "either", "fnv", - "futures 0.3.5", - "futures_codec", + "futures 0.3.17", "libp2p-core", "libp2p-swarm", - "log", - "multihash", + "log 0.4.14", "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.2", - "smallvec 1.4.1", + "sha2 0.9.8", + "smallvec 1.6.1", "uint", - "unsigned-varint 0.4.0", + "unsigned-varint 0.7.0", "void", "wasm-timer", ] [[package]] name = "libp2p-mdns" -version = "0.22.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3173b5a6b2f690c29ae07798d85b9441a131ac76ddae9015ef22905b623d0c69" +checksum = "a29e6cbc2a24b8471b6567e580a0e8e7b70a6d0f0ea2be0844d1e842d7d4fa33" dependencies = [ - "async-std", + "async-io", "data-encoding", "dns-parser", - "either", - "futures 0.3.5", + "futures 0.3.17", + "if-watch", "lazy_static", "libp2p-core", "libp2p-swarm", - "log", - "net2", - "rand 0.7.3", - "smallvec 1.4.1", + "log 0.4.14", + "rand 0.8.4", + "smallvec 1.6.1", + "socket2 0.4.2", "void", - "wasm-timer", ] [[package]] name = "libp2p-mplex" -version = "0.22.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a73a799cc8410b36e40b8f4c4b6babbcb9efd3727111bf517876e4acfa612d3" +checksum = "313d9ea526c68df4425f580024e67a9d3ffd49f2c33de5154b1f5019816f7a99" dependencies = [ - "bytes 0.5.6", - "fnv", - "futures 0.3.5", - "futures_codec", + "asynchronous-codec 0.6.0", + "bytes 1.1.0", + "futures 0.3.17", "libp2p-core", - "log", - "parking_lot 0.10.2", - "unsigned-varint 0.4.0", + "log 0.4.14", + "nohash-hasher", + "parking_lot 0.11.2", + "rand 0.7.3", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", ] [[package]] name = "libp2p-noise" -version = "0.24.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ef6c490042f549fb1025f2892dfe6083d97a77558f450c1feebe748ca9eb15a" +checksum = "3f1db7212f342b6ba7c981cc40e31f76e9e56cb48e65fa4c142ecaca5839523e" dependencies = [ - "bytes 0.5.6", - "curve25519-dalek", - "futures 0.3.5", + "bytes 1.1.0", + "curve25519-dalek 3.2.0", + "futures 0.3.17", "lazy_static", "libp2p-core", - "log", + "log 0.4.14", "prost", "prost-build", - "rand 0.7.3", - "sha2 0.8.2", + "rand 0.8.4", + "sha2 0.9.8", "snow", "static_assertions", "x25519-dalek", @@ -3089,14 +3634,14 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.22.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad063c21dfcea4518ac9e8bd4119d33a5b26c41e674f602f41f05617a368a5c8" +checksum = "2482cfd9eb0b7a0baaf3e7b329dc4f2785181a161b1a47b7192f8d758f54a439" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "libp2p-core", "libp2p-swarm", - "log", + "log 0.4.14", "rand 0.7.3", "void", "wasm-timer", @@ -3104,107 +3649,142 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.22.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903a12e99c72dbebefea258de887982adeacc7025baa1ceb10b7fa9928f54791" +checksum = "13b4783e5423870b9a5c199f65a7a3bc66d86ab56b2b9beebf3c338d889cf8e4" dependencies = [ - "bytes 0.5.6", - "futures 0.3.5", - "futures_codec", + "asynchronous-codec 0.6.0", + "bytes 1.1.0", + "futures 0.3.17", "libp2p-core", - "log", + "log 0.4.14", "prost", "prost-build", - "rw-stream-sink", - "unsigned-varint 0.4.0", + "unsigned-varint 0.7.0", "void", ] [[package]] name = "libp2p-pnet" -version = "0.19.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d0db10e139d22d7af0b23ed7949449ec86262798aa0fd01595abdbcb02dc87" +checksum = "07cb4dd4b917e5b40ddefe49b96b07adcd8d342e0317011d175b7b2bb1dcc974" dependencies = [ - "futures 0.3.5", - "log", - "pin-project", + "futures 0.3.17", + "log 0.4.14", + "pin-project 1.0.8", "rand 0.7.3", "salsa20", "sha3", ] [[package]] -name = "libp2p-request-response" +name = "libp2p-relay" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c0c9e8a4cd69d97e9646c54313d007512f411aba8c5226cfcda16df6a6e84a3" +checksum = "0133f6cfd81cdc16e716de2982e012c62e6b9d4f12e41967b3ee361051c622aa" +dependencies = [ + "asynchronous-codec 0.6.0", + "bytes 1.1.0", + "futures 0.3.17", + "futures-timer 3.0.2", + "libp2p-core", + "libp2p-swarm", + "log 0.4.14", + "pin-project 1.0.8", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-request-response" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06cdae44b6821466123af93cbcdec7c9e6ba9534a8af9cdc296446d39416d241" dependencies = [ "async-trait", - "bytes 0.5.6", - "futures 0.3.5", + "bytes 1.1.0", + "futures 0.3.17", "libp2p-core", "libp2p-swarm", - "log", - "lru 0.6.0", + "log 0.4.14", + "lru", "minicbor", "rand 0.7.3", - "smallvec 1.4.1", - "unsigned-varint 0.5.1", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", "wasm-timer", ] [[package]] name = "libp2p-swarm" -version = "0.22.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7193e444210132237b81b755ec7fe53f1c4bd2f53cf719729b94c0c72eb6eaa1" +checksum = "7083861341e1555467863b4cd802bea1e8c4787c0f7b5110097d0f1f3248f9a9" dependencies = [ "either", - "futures 0.3.5", + "futures 0.3.17", "libp2p-core", - "log", + "log 0.4.14", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.6.1", "void", "wasm-timer", ] +[[package]] +name = "libp2p-swarm-derive" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab8cb308d4fc854869f5abb54fdab0833d2cf670d407c745849dc47e6e08d79c" +dependencies = [ + "quote", + "syn", +] + [[package]] name = "libp2p-tcp" -version = "0.22.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f42ec130d7a37a7e47bf4398026b7ad9185c08ed26972e2720f8b94112796f" +checksum = "79edd26b6b4bb5feee210dcda562dca186940dfecb0024b979c3f50824b3bf28" dependencies = [ - "async-std", - "futures 0.3.5", + "async-io", + "futures 0.3.17", "futures-timer 3.0.2", - "get_if_addrs", + "if-addrs", + "if-watch", "ipnet", + "libc", "libp2p-core", - "log", - "socket2", + "log 0.4.14", + "socket2 0.4.2", + "tokio", ] [[package]] name = "libp2p-uds" -version = "0.22.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea7acb0a034f70d7db94c300eba3f65c0f6298820105624088a9609c9974d77" +checksum = "280e793440dd4e9f273d714f4497325c72cddb0fe85a49f9a03c88f41dd20182" dependencies = [ "async-std", - "futures 0.3.5", + "futures 0.3.17", "libp2p-core", - "log", + "log 0.4.14", ] [[package]] name = "libp2p-wasm-ext" -version = "0.22.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34c1faac6f92c21fbe155417957863ea822fba9e9fd5eb24c0912336a100e63f" +checksum = "f553b7140fad3d7a76f50497b0ea591e26737d9607428a75509fc191e4d1b1f6" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -3214,42 +3794,40 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.23.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d650534ebd99f48f6fa292ed5db10d30df2444943afde4407ceeddab8e513fca" +checksum = "ddf99dcbf5063e9d59087f61b1e85c686ceab2f5abedb472d32288065c0e5e27" dependencies = [ - "async-tls", "either", - "futures 0.3.5", + "futures 0.3.17", + "futures-rustls", "libp2p-core", - "log", + "log 0.4.14", "quicksink", - "rustls", "rw-stream-sink", - "soketto", - "url 2.1.1", - "webpki", - "webpki-roots 0.18.0", + "soketto 0.4.2", + "url 2.2.2", + "webpki-roots", ] [[package]] name = "libp2p-yamux" -version = "0.25.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "781d9b9f043dcdabc40640807125368596b849fd4d96cdca2dcf052fdf6f33fd" +checksum = "214cc0dd9c37cbed27f0bb1eba0c41bbafdb93a8be5e9d6ae1e6b4b42cd044bf" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "libp2p-core", - "parking_lot 0.11.0", + "parking_lot 0.11.2", "thiserror", "yamux", ] [[package]] name = "librocksdb-sys" -version = "6.11.4" +version = "6.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b56f651c204634b936be2f92dbb42c36867e00ff7fe2405591f3b9fa66f09" +checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" dependencies = [ "bindgen", "cc", @@ -3266,30 +3844,96 @@ dependencies = [ "arrayref", "crunchy", "digest 0.8.1", - "hmac-drbg", + "hmac-drbg 0.2.0", "rand 0.7.3", "sha2 0.8.2", - "subtle 2.2.3", + "subtle 2.4.1", + "typenum", +] + +[[package]] +name = "libsecp256k1" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" +dependencies = [ + "arrayref", + "base64 0.12.3", + "digest 0.9.0", + "hmac-drbg 0.3.0", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.7.3", + "serde", + "sha2 0.9.8", + "typenum", +] + +[[package]] +name = "libsecp256k1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" +dependencies = [ + "arrayref", + "base64 0.12.3", + "digest 0.9.0", + "hmac-drbg 0.3.0", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.7.3", + "serde", + "sha2 0.9.8", "typenum", ] +[[package]] +name = "libsecp256k1-core" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle 2.4.1", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccab96b584d38fac86a83f07e659f0deafd0253dc096dab5a36d53efe653c5c3" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67abfe149395e3aa1c48a2beb32b068e2334402df8181f818d3aee2b304c4f5d" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "libz-sys" -version = "1.0.25" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" +checksum = "de5435b8549c16d423ed0c03dbaafe57cf6c3344744f1242520d59c9d8ecec66" dependencies = [ "cc", - "libc", "pkg-config", "vcpkg", ] [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "linked_hash_set" @@ -3302,20 +3946,19 @@ dependencies = [ [[package]] name = "linregress" -version = "0.1.7" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9290cf6f928576eeb9c096c6fad9d8d452a0a1a70a2bbffa6e36064eedc0aac9" +checksum = "d6c601a85f5ecd1aba625247bca0031585fb1c446461b142878a16f8245ddeb8" dependencies = [ - "failure", "nalgebra", "statrs", ] [[package]] name = "lite-json" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c73e713a23ac6e12074c9e96ef2dfb770921e0cb9244c093bd38424209e0e523" +checksum = "0460d985423a026b4d9b828a7c6eed1bcf606f476322f3f9b507529686a61715" dependencies = [ "lite-parser", ] @@ -3331,72 +3974,78 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.1.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ - "scopeguard 0.3.3", + "scopeguard", ] [[package]] name = "lock_api" -version = "0.3.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" +checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" dependencies = [ - "scopeguard 1.1.0", + "scopeguard", ] [[package]] -name = "lock_api" -version = "0.4.1" +name = "log" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" +checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" dependencies = [ - "scopeguard 1.1.0", + "log 0.4.14", ] [[package]] name = "log" -version = "0.4.11" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", + "value-bag", ] [[package]] name = "lru" -version = "0.4.3" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237" +checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" dependencies = [ - "hashbrown 0.6.3", + "hashbrown", ] [[package]] -name = "lru" -version = "0.5.3" +name = "lru-cache" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c456c123957de3a220cd03786e0d86aa542a88b46029973b542f426da6ef34" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" dependencies = [ - "hashbrown 0.6.3", + "linked-hash-map", ] [[package]] -name = "lru" -version = "0.6.0" +name = "lz4" +version = "1.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111b945ac72ec09eb7bc62a0fbdc3cc6e80555a7245f52a69d3921a75b53b153" +checksum = "aac20ed6991e01bf6a2e68cc73df2b389707403662a8ba89f68511fb340f724c" dependencies = [ - "hashbrown 0.8.1", + "libc", + "lz4-sys", ] [[package]] -name = "lru_time_cache" -version = "0.10.0" +name = "lz4-sys" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb241df5c4caeb888755363fc95f8a896618dc0d435e9e775f7930cb099beab" +checksum = "dca79aa95d8b3226213ad454d328369853be3a1382d89532a854f4d69640acae" +dependencies = [ + "cc", + "libc", +] [[package]] name = "mach" @@ -3407,6 +4056,18 @@ dependencies = [ "libc", ] +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.0.1" @@ -3418,15 +4079,15 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "matrixmultiply" -version = "0.2.3" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4f7ec66360130972f34830bfad9ef05c6610a43938a467bcc9ab9369ab3478f" +checksum = "5a8a15b776d9dfaecd44b03c5828c2199cddff5247215858aac14624f8d6b741" dependencies = [ "rawpointer", ] @@ -3439,9 +4100,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memmap" @@ -3453,23 +4114,32 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "memmap2" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "723e3ebdcdc5c023db1df315364573789f8857c11b631a2fdfad7c00f5c046b4" +dependencies = [ + "libc", +] + [[package]] name = "memoffset" -version = "0.5.5" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" +checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", ] [[package]] name = "memory-db" -version = "0.24.1" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f36ddb0b2cdc25d38babba472108798e3477f02be5165f038c5e393e50c57a" +checksum = "de006e09d04fc301a5f7e817b75aa49801c4479a8af753764416b085337ddcc5" dependencies = [ "hash-db", - "hashbrown 0.8.1", + "hashbrown", "parity-util-mem", ] @@ -3481,30 +4151,39 @@ checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" [[package]] name = "merlin" -version = "2.0.0" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "mime" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" +checksum = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" dependencies = [ - "byteorder 1.3.4", - "keccak", - "rand_core 0.5.1", - "zeroize", + "log 0.3.9", ] [[package]] name = "minicbor" -version = "0.5.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fc03ad6f8f548db7194a5ff5a6f96342ecae4e3ef67d2bf18bacc0e245cd041" +checksum = "51aa5bb0ca22415daca596a227b507f880ad1b2318a87fa9325312a5d285ca0d" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.4.1" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c214bf3d90099b52f3e4b328ae0fe34837fd0fab683ad1e10fceb4629106df48" +checksum = "54999f917cd092b13904737e26631aa2b2b88d625db68e4bab461dcd8006c788" dependencies = [ "proc-macro2", "quote", @@ -3513,72 +4192,63 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", + "autocfg 1.0.1", ] [[package]] name = "mio" -version = "0.6.22" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fuchsia-zircon", "fuchsia-zircon-sys", "iovec", "kernel32-sys", "libc", - "log", - "miow 0.2.1", + "log 0.4.14", + "miow 0.2.2", "net2", "slab", "winapi 0.2.8", ] [[package]] -name = "mio-extras" -version = "2.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" -dependencies = [ - "lazycell", - "log", - "mio", - "slab", -] - -[[package]] -name = "mio-named-pipes" -version = "0.1.7" +name = "mio" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" +checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" dependencies = [ - "log", - "mio", - "miow 0.3.5", + "libc", + "log 0.4.14", + "miow 0.3.7", + "ntapi", "winapi 0.3.9", ] [[package]] -name = "mio-uds" -version = "0.6.8" +name = "mio-extras" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" dependencies = [ - "iovec", - "libc", - "mio", + "lazycell", + "log 0.4.14", + "mio 0.6.23", + "slab", ] [[package]] name = "miow" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" dependencies = [ "kernel32-sys", "net2", @@ -3588,11 +4258,10 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "socket2", "winapi 0.3.9", ] @@ -3602,104 +4271,217 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" +[[package]] +name = "multiaddr" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ee4ea82141951ac6379f964f71b20876d43712bea8faf6dd1a375e08a46499" +dependencies = [ + "arrayref", + "bs58", + "byteorder", + "data-encoding", + "multihash 0.14.0", + "percent-encoding 2.1.0", + "serde", + "static_assertions", + "unsigned-varint 0.7.0", + "url 2.2.2", +] + +[[package]] +name = "multibase" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b78c60039650ff12e140ae867ef5299a58e19dded4d334c849dc7177083667e2" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + [[package]] name = "multihash" -version = "0.11.2" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f75db05d738947aa5389863aadafbcf2e509d7ba099dc2ddcdf4fc66bf7a9e03" +checksum = "567122ab6492f49b59def14ecc36e13e64dca4188196dd0cd41f9f3f979f3df6" dependencies = [ "blake2b_simd", "blake2s_simd", - "digest 0.8.1", - "sha-1", - "sha2 0.8.2", + "digest 0.9.0", + "sha-1 0.9.8", + "sha2 0.9.8", "sha3", - "unsigned-varint 0.3.3", + "unsigned-varint 0.5.1", +] + +[[package]] +name = "multihash" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" +dependencies = [ + "blake2b_simd", + "blake2s_simd", + "blake3", + "digest 0.9.0", + "generic-array 0.14.4", + "multihash-derive", + "sha2 0.9.8", + "sha3", + "unsigned-varint 0.5.1", +] + +[[package]] +name = "multihash" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "752a61cd890ff691b4411423d23816d5866dd5621e4d1c5687a53b94b5a979d8" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.4", + "multihash-derive", + "sha2 0.9.8", + "unsigned-varint 0.7.0", +] + +[[package]] +name = "multihash-derive" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "424f6e86263cd5294cbd7f1e95746b95aca0e0d66bff31e5a40d6baa87b4aa99" +dependencies = [ + "proc-macro-crate 1.1.0", + "proc-macro-error 1.0.4", + "proc-macro2", + "quote", + "syn", + "synstructure", ] [[package]] name = "multimap" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "multistream-select" -version = "0.8.2" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9157e87afbc2ef0d84cc0345423d715f445edde00141c93721c162de35a05e5" +checksum = "7d91ec0a2440aaff5f78ec35631a7027d50386c6163aa975f7caa0d5da4b6ff8" dependencies = [ - "bytes 0.5.6", - "futures 0.3.5", - "log", - "pin-project", - "smallvec 1.4.1", - "unsigned-varint 0.4.0", + "bytes 1.1.0", + "futures 0.3.17", + "log 0.4.14", + "pin-project 1.0.8", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", ] [[package]] name = "nalgebra" -version = "0.18.1" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaa9fddbc34c8c35dd2108515587b8ce0cab396f17977b8c738568e4edb521a2" +checksum = "462fffe4002f4f2e1f6a9dcf12cc1a6fc0e15989014efc02a941d3e0f5dc2120" dependencies = [ - "alga", "approx", - "generic-array 0.12.3", "matrixmultiply", + "nalgebra-macros", "num-complex", - "num-rational", + "num-rational 0.4.0", "num-traits", - "rand 0.6.5", + "rand 0.8.4", + "rand_distr", + "simba", "typenum", ] +[[package]] +name = "nalgebra-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "names" -version = "0.11.0" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a8690bf09abf659851e58cd666c3d37ac6af07c2bd7a9e332cfba471715775" +dependencies = [ + "rand 0.8.4", +] + +[[package]] +name = "native-tls" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef320dab323286b50fb5cdda23f61c796a72a89998ab565ca32525c5c556f2da" +checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" dependencies = [ - "rand 0.3.23", + "lazy_static", + "libc", + "log 0.4.14", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", ] [[package]] name = "net2" -version = "0.2.34" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] [[package]] name = "nix" -version = "0.17.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363" +checksum = "b2ccba0cfe4fdf15982d1674c69b1fd80bad427d293849982668dfe454bd61f2" dependencies = [ "bitflags", "cc", - "cfg-if", + "cfg-if 1.0.0", "libc", - "void", ] [[package]] name = "node-bench" -version = "0.8.0" +version = "0.9.0-dev" dependencies = [ "derive_more", "fs_extra", - "futures 0.3.5", + "futures 0.3.17", "hash-db", "hex", "kvdb", "kvdb-rocksdb", "lazy_static", - "log", + "log 0.4.14", "node-primitives", "node-runtime", "node-testing", @@ -3707,9 +4489,9 @@ dependencies = [ "parity-util-mem", "rand 0.7.3", "sc-basic-authorship", - "sc-cli", "sc-client-api", "sc-transaction-pool", + "sc-transaction-pool-api", "serde", "serde_json", "sp-consensus", @@ -3719,57 +4501,32 @@ dependencies = [ "sp-state-machine", "sp-timestamp", "sp-tracing", - "sp-transaction-pool", "sp-trie", "structopt", "tempfile", ] -[[package]] -name = "node-browser-testing" -version = "2.0.0" -dependencies = [ - "futures 0.3.5", - "futures-timer 3.0.2", - "jsonrpc-core", - "libp2p", - "node-cli", - "sc-rpc-api", - "serde", - "serde_json", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test", -] - [[package]] name = "node-cli" -version = "2.0.0" +version = "3.0.0-dev" dependencies = [ "assert_cmd", + "async-std", "frame-benchmarking-cli", - "frame-support", "frame-system", - "futures 0.3.5", + "futures 0.3.17", "hex-literal", - "log", + "ipfs", + "log 0.4.14", "nix", "node-executor", "node-inspect", "node-primitives", "node-rpc", "node-runtime", - "pallet-authority-discovery", - "pallet-balances", - "pallet-contracts", - "pallet-grandpa", "pallet-im-online", - "pallet-indices", - "pallet-staking", - "pallet-timestamp", "pallet-transaction-payment", "parity-scale-codec", - "parking_lot 0.10.2", "platforms", "rand 0.7.3", "regex", @@ -3782,85 +4539,84 @@ dependencies = [ "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", + "sc-consensus-slots", + "sc-consensus-uncles", + "sc-executor", "sc-finality-grandpa", "sc-keystore", "sc-network", - "sc-offchain", "sc-rpc", "sc-service", "sc-service-test", + "sc-sync-state-rpc", "sc-telemetry", - "sc-tracing", "sc-transaction-pool", + "sc-transaction-pool-api", "serde", "serde_json", + "soketto 0.4.2", "sp-authority-discovery", + "sp-authorship", "sp-consensus", "sp-consensus-babe", "sp-core", "sp-finality-grandpa", "sp-inherents", - "sp-io", "sp-keyring", "sp-keystore", "sp-runtime", "sp-timestamp", + "sp-tracing", "sp-transaction-pool", + "sp-transaction-storage-proof", "sp-trie", "structopt", - "substrate-browser-utils", "substrate-build-script-utils", "substrate-frame-cli", "tempfile", - "tracing", - "wasm-bindgen", - "wasm-bindgen-futures", + "try-runtime-cli", ] [[package]] name = "node-executor" -version = "2.0.0" +version = "3.0.0-dev" dependencies = [ "criterion", "frame-benchmarking", "frame-support", "frame-system", + "futures 0.3.17", "node-primitives", "node-runtime", "node-testing", "pallet-balances", "pallet-contracts", - "pallet-grandpa", "pallet-im-online", - "pallet-indices", - "pallet-session", "pallet-timestamp", - "pallet-transaction-payment", "pallet-treasury", "parity-scale-codec", "sc-executor", + "scale-info", "sp-application-crypto", + "sp-consensus-babe", "sp-core", "sp-externalities", - "sp-io", "sp-keystore", "sp-runtime", "sp-state-machine", "sp-trie", - "substrate-test-client", - "trie-root", "wat", ] [[package]] name = "node-inspect" -version = "0.8.0" +version = "0.9.0-dev" dependencies = [ "derive_more", - "log", "parity-scale-codec", "sc-cli", "sc-client-api", + "sc-executor", "sc-service", "sp-blockchain", "sp-core", @@ -3874,21 +4630,20 @@ version = "2.0.0" dependencies = [ "frame-system", "parity-scale-codec", - "pretty_assertions", + "scale-info", "sp-application-crypto", "sp-core", "sp-runtime", - "sp-serializer", ] [[package]] name = "node-rpc" -version = "2.0.0" +version = "3.0.0-dev" dependencies = [ "jsonrpc-core", "node-primitives", - "node-runtime", "pallet-contracts-rpc", + "pallet-mmr-rpc", "pallet-transaction-payment-rpc", "sc-chain-spec", "sc-client-api", @@ -3897,10 +4652,10 @@ dependencies = [ "sc-consensus-epochs", "sc-finality-grandpa", "sc-finality-grandpa-rpc", - "sc-keystore", "sc-rpc", "sc-rpc-api", "sc-sync-state-rpc", + "sc-transaction-pool-api", "sp-api", "sp-block-builder", "sp-blockchain", @@ -3908,7 +4663,6 @@ dependencies = [ "sp-consensus-babe", "sp-keystore", "sp-runtime", - "sp-transaction-pool", "substrate-frame-rpc-system", ] @@ -3916,10 +4670,8 @@ dependencies = [ name = "node-rpc-client" version = "2.0.0" dependencies = [ - "futures 0.1.29", - "hyper 0.12.35", + "futures 0.3.17", "jsonrpc-core-client", - "log", "node-primitives", "sc-rpc", "sp-tracing", @@ -3927,32 +4679,40 @@ dependencies = [ [[package]] name = "node-runtime" -version = "2.0.0" +version = "3.0.0-dev" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-executive", "frame-support", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", - "integer-sqrt", + "log 0.4.14", "node-primitives", + "pallet-assets", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-balances", + "pallet-bounties", "pallet-collective", "pallet-contracts", "pallet-contracts-primitives", "pallet-contracts-rpc-runtime-api", "pallet-democracy", + "pallet-election-provider-multi-phase", "pallet-elections-phragmen", + "pallet-gilt", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", + "pallet-lottery", "pallet-membership", + "pallet-mmr", "pallet-multisig", "pallet-offences", "pallet-offences-benchmarking", @@ -3967,13 +4727,16 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-sudo", "pallet-timestamp", + "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", + "pallet-transaction-storage", "pallet-treasury", + "pallet-uniques", "pallet-utility", "pallet-vesting", "parity-scale-codec", - "serde", + "scale-info", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -3982,6 +4745,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-keyring", + "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-session", @@ -3990,15 +4754,16 @@ dependencies = [ "sp-transaction-pool", "sp-version", "static_assertions", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] name = "node-template" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-benchmarking-cli", + "ipfs", "jsonrpc-core", "node-template-runtime", "pallet-transaction-payment-rpc", @@ -4009,10 +4774,13 @@ dependencies = [ "sc-consensus-aura", "sc-executor", "sc-finality-grandpa", + "sc-keystore", "sc-rpc", "sc-rpc-api", "sc-service", + "sc-telemetry", "sc-transaction-pool", + "sc-transaction-pool-api", "sp-api", "sp-block-builder", "sp-blockchain", @@ -4020,9 +4788,8 @@ dependencies = [ "sp-consensus-aura", "sp-core", "sp-finality-grandpa", - "sp-inherents", "sp-runtime", - "sp-transaction-pool", + "sp-timestamp", "structopt", "substrate-build-script-utils", "substrate-frame-rpc-system", @@ -4030,7 +4797,7 @@ dependencies = [ [[package]] name = "node-template-runtime" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-executive", @@ -4049,7 +4816,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", - "serde", + "scale-info", "sp-api", "sp-block-builder", "sp-consensus-aura", @@ -4061,37 +4828,26 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] name = "node-testing" -version = "2.0.0" +version = "3.0.0-dev" dependencies = [ - "criterion", - "frame-support", "frame-system", "fs_extra", - "futures 0.3.5", - "log", + "futures 0.3.17", + "log 0.4.14", "node-executor", "node-primitives", "node-runtime", - "pallet-balances", - "pallet-contracts", - "pallet-grandpa", - "pallet-indices", - "pallet-session", - "pallet-society", - "pallet-staking", - "pallet-timestamp", "pallet-transaction-payment", - "pallet-treasury", "parity-scale-codec", "sc-block-builder", - "sc-cli", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", "sc-service", "sp-api", @@ -4122,12 +4878,23 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "5.1.2" +version = "6.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" dependencies = [ + "bitvec 0.19.5", + "funty", "memchr", - "version_check", + "version_check 0.9.3", +] + +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", ] [[package]] @@ -4136,28 +4903,27 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-integer", "num-traits", ] [[package]] name = "num-complex" -version = "0.2.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" +checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" dependencies = [ - "autocfg 1.0.0", "num-traits", ] [[package]] name = "num-integer" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-traits", ] @@ -4167,19 +4933,30 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "num-bigint", "num-integer", "num-traits", ] +[[package]] +name = "num-rational" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +dependencies = [ + "autocfg 1.0.1", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "libm", ] @@ -4195,44 +4972,47 @@ dependencies = [ [[package]] name = "object" -version = "0.19.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cbca9424c482ee628fa549d9c812e2cd22f1180b9222c9200fdfa6eb31aecb2" +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" +dependencies = [ + "crc32fast", + "indexmap", +] [[package]] name = "object" -version = "0.20.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" +checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170" dependencies = [ "crc32fast", "indexmap", - "wasmparser 0.57.0", ] [[package]] -name = "once_cell" -version = "0.1.8" +name = "object" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532c29a261168a45ce28948f9537ddd7a5dd272cc513b3017b1e82a88f962c37" +checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2" dependencies = [ - "parking_lot 0.7.1", + "memchr", ] [[package]] name = "once_cell" -version = "1.4.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" dependencies = [ - "parking_lot 0.11.0", + "parking_lot 0.11.2", ] [[package]] name = "oorandom" -version = "11.1.2" +version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a170cebd8021a008ea92e4db85a72f80b35df514ec664b296fdcbb654eac0b2c" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opaque-debug" @@ -4246,11 +5026,38 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl" +version = "0.10.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a" +dependencies = [ + "bitflags", + "cfg-if 1.0.0", + "foreign-types", + "libc", + "once_cell", + "openssl-sys", +] + [[package]] name = "openssl-probe" -version = "0.1.2" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" + +[[package]] +name = "openssl-sys" +version = "0.9.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +checksum = "1996d2d305e561b70d1ee0c53f1542833f4e1ac6ce9a6708b6ff2738ca67dc82" +dependencies = [ + "autocfg 1.0.1", + "cc", + "libc", + "pkg-config", + "vcpkg", +] [[package]] name = "output_vt100" @@ -4272,12 +5079,14 @@ dependencies = [ [[package]] name = "pallet-assets" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", + "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4286,13 +5095,13 @@ dependencies = [ [[package]] name = "pallet-atomic-swap" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4301,55 +5110,49 @@ dependencies = [ [[package]] name = "pallet-aura" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", - "lazy_static", - "pallet-session", "pallet-timestamp", "parity-scale-codec", - "parking_lot 0.10.2", - "serde", + "scale-info", "sp-application-crypto", "sp-consensus-aura", "sp-core", - "sp-inherents", "sp-io", "sp-runtime", "sp-std", - "sp-timestamp", ] [[package]] name = "pallet-authority-discovery" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "pallet-session", "parity-scale-codec", - "serde", + "scale-info", "sp-application-crypto", "sp-authority-discovery", "sp-core", "sp-io", "sp-runtime", - "sp-staking", "sp-std", ] [[package]] name = "pallet-authorship" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", "parity-scale-codec", + "scale-info", "sp-authorship", "sp-core", - "sp-inherents", "sp-io", "sp-runtime", "sp-std", @@ -4357,11 +5160,13 @@ dependencies = [ [[package]] name = "pallet-babe" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", + "log 0.4.14", "pallet-authorship", "pallet-balances", "pallet-offences", @@ -4370,30 +5175,29 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "serde", + "scale-info", "sp-application-crypto", "sp-consensus-babe", "sp-consensus-vrf", "sp-core", - "sp-inherents", "sp-io", "sp-runtime", "sp-session", "sp-staking", "sp-std", - "sp-timestamp", ] [[package]] name = "pallet-balances" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log 0.4.14", "pallet-transaction-payment", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4401,16 +5205,32 @@ dependencies = [ ] [[package]] -name = "pallet-collective" -version = "2.0.0" +name = "pallet-bounties" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", "pallet-balances", + "pallet-treasury", "parity-scale-codec", - "serde", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-collective" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log 0.4.14", + "parity-scale-codec", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4419,7 +5239,7 @@ dependencies = [ [[package]] name = "pallet-contracts" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "assert_matches", "bitflags", @@ -4427,16 +5247,22 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", + "libsecp256k1 0.3.5", + "log 0.4.14", "pallet-balances", "pallet-contracts-primitives", + "pallet-contracts-proc-macro", "pallet-randomness-collective-flip", "pallet-timestamp", + "pallet-utility", "parity-scale-codec", - "parity-wasm 0.41.0", - "paste 1.0.0", - "pretty_assertions", + "pretty_assertions 0.7.2", "pwasm-utils", + "rand 0.7.3", + "rand_pcg 0.2.1", + "scale-info", "serde", + "smallvec 1.6.1", "sp-core", "sp-io", "sp-runtime", @@ -4448,16 +5274,29 @@ dependencies = [ [[package]] name = "pallet-contracts-primitives" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ + "bitflags", "parity-scale-codec", + "scale-info", + "serde", + "sp-core", "sp-runtime", "sp-std", ] +[[package]] +name = "pallet-contracts-proc-macro" +version = "4.0.0-dev" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pallet-contracts-rpc" -version = "0.8.0" +version = "4.0.0-dev" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4476,10 +5315,11 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0" +version = "4.0.0-dev" dependencies = [ "pallet-contracts-primitives", "parity-scale-codec", + "scale-info", "sp-api", "sp-runtime", "sp-std", @@ -4487,91 +5327,93 @@ dependencies = [ [[package]] name = "pallet-democracy" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", "pallet-balances", "pallet-scheduler", "parity-scale-codec", + "scale-info", "serde", "sp-core", "sp-io", "sp-runtime", "sp-std", - "sp-storage", - "substrate-test-utils", ] [[package]] -name = "pallet-elections" -version = "2.0.0" +name = "pallet-election-provider-multi-phase" +version = "4.0.0-dev" dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", - "hex-literal", + "log 0.4.14", "pallet-balances", "parity-scale-codec", - "serde", + "parking_lot 0.11.2", + "rand 0.7.3", + "scale-info", + "sp-arithmetic", "sp-core", "sp-io", + "sp-npos-elections", "sp-runtime", "sp-std", + "sp-tracing", + "static_assertions", + "strum 0.21.0", + "strum_macros 0.21.1", ] [[package]] -name = "pallet-elections-phragmen" -version = "2.0.0" +name = "pallet-elections" +version = "4.0.0-dev" dependencies = [ - "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", - "sp-npos-elections", "sp-runtime", "sp-std", - "substrate-test-utils", ] [[package]] -name = "pallet-evm" -version = "2.0.0" +name = "pallet-elections-phragmen" +version = "5.0.0-dev" dependencies = [ - "evm", + "frame-benchmarking", "frame-support", "frame-system", - "impl-trait-for-tuples", + "log 0.4.14", "pallet-balances", - "pallet-timestamp", "parity-scale-codec", - "primitive-types", - "ripemd160", - "rlp", - "serde", - "sha3", + "scale-info", "sp-core", "sp-io", + "sp-npos-elections", "sp-runtime", "sp-std", + "substrate-test-utils", ] [[package]] name = "pallet-example" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log 0.4.14", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4580,13 +5422,14 @@ dependencies = [ [[package]] name = "pallet-example-offchain-worker" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "lite-json", + "log 0.4.14", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-keystore", @@ -4596,11 +5439,12 @@ dependencies = [ [[package]] name = "pallet-example-parallel" -version = "2.0.0" +version = "3.0.0-dev" dependencies = [ "frame-support", "frame-system", "parity-scale-codec", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4608,14 +5452,33 @@ dependencies = [ "sp-tasks", ] +[[package]] +name = "pallet-gilt" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-grandpa" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "finality-grandpa", "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", + "log 0.4.14", "pallet-authorship", "pallet-balances", "pallet-offences", @@ -4624,7 +5487,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "serde", + "scale-info", "sp-application-crypto", "sp-core", "sp-finality-grandpa", @@ -4638,7 +5501,7 @@ dependencies = [ [[package]] name = "pallet-identity" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4646,7 +5509,7 @@ dependencies = [ "frame-system", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4655,15 +5518,16 @@ dependencies = [ [[package]] name = "pallet-im-online" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log 0.4.14", "pallet-authorship", "pallet-session", "parity-scale-codec", - "serde", + "scale-info", "sp-application-crypto", "sp-core", "sp-io", @@ -4674,14 +5538,14 @@ dependencies = [ [[package]] name = "pallet-indices" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-keyring", @@ -4689,30 +5553,101 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-lottery" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-support-test", + "frame-system", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-membership" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", + "log 0.4.14", "parity-scale-codec", - "serde", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-mmr" +version = "4.0.0-dev" +dependencies = [ + "ckb-merkle-mountain-range", + "env_logger 0.9.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "pallet-mmr-primitives", + "parity-scale-codec", + "scale-info", "sp-core", "sp-io", "sp-runtime", "sp-std", ] +[[package]] +name = "pallet-mmr-primitives" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "hex-literal", + "log 0.4.14", + "parity-scale-codec", + "serde", + "sp-api", + "sp-core", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-mmr-rpc" +version = "3.0.0" +dependencies = [ + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "pallet-mmr-primitives", + "parity-scale-codec", + "serde", + "serde_json", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-runtime", +] + [[package]] name = "pallet-multisig" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4721,13 +5656,13 @@ dependencies = [ [[package]] name = "pallet-nicks" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4736,12 +5671,13 @@ dependencies = [ [[package]] name = "pallet-node-authorization" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", + "log 0.4.14", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4750,12 +5686,14 @@ dependencies = [ [[package]] name = "pallet-offences" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", + "log 0.4.14", "pallet-balances", "parity-scale-codec", + "scale-info", "serde", "sp-core", "sp-io", @@ -4766,9 +5704,10 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", "pallet-babe", @@ -4781,7 +5720,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4791,7 +5730,7 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "2.0.1" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", @@ -4799,7 +5738,7 @@ dependencies = [ "pallet-balances", "pallet-utility", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4808,12 +5747,13 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "parity-scale-codec", "safe-mix", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4822,14 +5762,13 @@ dependencies = [ [[package]] name = "pallet-recovery" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "enumflags2", "frame-support", "frame-system", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4838,13 +5777,14 @@ dependencies = [ [[package]] name = "pallet-scheduler" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log 0.4.14", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4854,13 +5794,13 @@ dependencies = [ [[package]] name = "pallet-scored-pool" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4869,16 +5809,15 @@ dependencies = [ [[package]] name = "pallet-session" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "lazy_static", + "log 0.4.14", "pallet-timestamp", "parity-scale-codec", - "serde", - "sp-application-crypto", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4890,9 +5829,10 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", "pallet-balances", @@ -4902,7 +5842,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "rand 0.7.3", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4912,14 +5852,15 @@ dependencies = [ [[package]] name = "pallet-society" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", + "frame-support-test", "frame-system", "pallet-balances", "parity-scale-codec", "rand_chacha 0.2.2", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -4928,74 +5869,59 @@ dependencies = [ [[package]] name = "pallet-staking" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-system", - "hex", + "log 0.4.14", "pallet-authorship", "pallet-balances", "pallet-session", "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "parking_lot 0.10.2", "rand_chacha 0.2.2", + "scale-info", "serde", "sp-application-crypto", "sp-core", "sp-io", - "sp-npos-elections", "sp-runtime", "sp-staking", "sp-std", - "sp-storage", "sp-tracing", - "static_assertions", "substrate-test-utils", ] -[[package]] -name = "pallet-staking-fuzz" -version = "0.0.0" -dependencies = [ - "frame-support", - "frame-system", - "honggfuzz", - "pallet-balances", - "pallet-indices", - "pallet-session", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-timestamp", - "parity-scale-codec", - "sp-core", - "sp-io", - "sp-npos-elections", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-staking-reward-curve" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.0", "proc-macro2", "quote", "sp-runtime", "syn", ] +[[package]] +name = "pallet-staking-reward-fn" +version = "4.0.0-dev" +dependencies = [ + "log 0.4.14", + "sp-arithmetic", +] + [[package]] name = "pallet-sudo" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -5004,11 +5930,13 @@ dependencies = [ [[package]] name = "pallet-template" -version = "2.0.0" +version = "3.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "parity-scale-codec", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -5016,50 +5944,69 @@ dependencies = [ [[package]] name = "pallet-timestamp" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "impl-trait-for-tuples", + "log 0.4.14", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + +[[package]] +name = "pallet-tips" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log 0.4.14", + "pallet-balances", + "pallet-treasury", "parity-scale-codec", + "scale-info", "serde", "sp-core", - "sp-inherents", "sp-io", "sp-runtime", "sp-std", - "sp-timestamp", + "sp-storage", ] [[package]] name = "pallet-transaction-payment" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", "pallet-balances", - "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", + "scale-info", "serde", - "smallvec 1.4.1", + "serde_json", + "smallvec 1.6.1", "sp-core", "sp-io", "sp-runtime", "sp-std", - "sp-storage", ] [[package]] name = "pallet-transaction-payment-rpc" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", - "serde", "sp-api", "sp-blockchain", "sp-core", @@ -5069,44 +6016,78 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0" +version = "4.0.0-dev" +dependencies = [ + "pallet-transaction-payment", + "parity-scale-codec", + "sp-api", + "sp-runtime", +] + +[[package]] +name = "pallet-transaction-storage" +version = "4.0.0-dev" dependencies = [ + "frame-benchmarking", "frame-support", + "frame-system", + "hex-literal", + "pallet-balances", "parity-scale-codec", + "scale-info", "serde", - "serde_json", - "sp-api", + "sp-core", + "sp-inherents", + "sp-io", "sp-runtime", "sp-std", + "sp-transaction-storage-proof", ] [[package]] name = "pallet-treasury" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "impl-trait-for-tuples", "pallet-balances", "parity-scale-codec", + "scale-info", "serde", "sp-core", "sp-io", "sp-runtime", "sp-std", - "sp-storage", +] + +[[package]] +name = "pallet-uniques" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", ] [[package]] name = "pallet-utility" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", @@ -5115,75 +6096,61 @@ dependencies = [ [[package]] name = "pallet-vesting" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "enumflags2", "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", + "log 0.4.14", "pallet-balances", "parity-scale-codec", - "serde", + "scale-info", "sp-core", "sp-io", "sp-runtime", "sp-std", - "sp-storage", ] [[package]] name = "parity-db" -version = "0.1.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00d595e372d119261593297debbe4193811a4dc811d2a1ccbb8caaa6666ad7ab" +checksum = "241f9c5d25063080f2c02846221f13e1d0e5e18fa00c32c234aad585b744ee55" dependencies = [ "blake2-rfc", "crc32fast", + "fs2", + "hex", "libc", - "log", - "memmap", - "parking_lot 0.10.2", -] - -[[package]] -name = "parity-multiaddr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2165a93382a93de55868dcbfa11e4a8f99676a9164eee6a2b4a9479ad319c257" -dependencies = [ - "arrayref", - "bs58", - "byteorder 1.3.4", - "data-encoding", - "multihash", - "percent-encoding 2.1.0", - "serde", - "static_assertions", - "unsigned-varint 0.4.0", - "url 2.1.1", + "log 0.4.14", + "lz4", + "memmap2", + "parking_lot 0.11.2", + "rand 0.8.4", + "snap", ] [[package]] name = "parity-scale-codec" -version = "1.3.4" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d38aeaffc032ec69faa476b3caaca8d4dd7f3f798137ff30359e5c7869ceb6" +checksum = "e11263a97373b43da4b426edbb52ef99a7b51e2d9752ef56a7f8b356f48495a5" dependencies = [ - "arrayvec 0.5.1", - "bitvec", + "arrayvec 0.7.1", + "bitvec 0.20.4", "byte-slice-cast", + "impl-trait-for-tuples", "parity-scale-codec-derive", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "1.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd20ff7e0399b274a5f5bb37b712fccb5b3a64b9128200d1c3cc40fe709cb073" +checksum = "b157dc92b3db2bae522afb31b3843e91ae097eb01d66c72dda66a2e86bc3ca14" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.0", "proc-macro2", "quote", "syn", @@ -5197,38 +6164,31 @@ checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" [[package]] name = "parity-tokio-ipc" -version = "0.4.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e57fea504fea33f9fbb5f49f378359030e7e026a6ab849bb9e8f0787376f1bf" +checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "bytes 0.4.12", - "futures 0.1.29", + "futures 0.3.17", "libc", - "log", - "mio-named-pipes", - "miow 0.3.5", + "log 0.4.14", "rand 0.7.3", - "tokio 0.1.22", - "tokio-named-pipes", - "tokio-uds", + "tokio", "winapi 0.3.9", ] [[package]] name = "parity-util-mem" -version = "0.7.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" +checksum = "7ad6f1acec69b95caf435bbd158d486e5a0a44fcf51531e84922c59ff09e8457" dependencies = [ - "cfg-if", - "ethereum-types", - "hashbrown 0.8.1", + "cfg-if 1.0.0", + "hashbrown", "impl-trait-for-tuples", - "lru 0.5.3", "parity-util-mem-derive", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "primitive-types", - "smallvec 1.4.1", + "smallvec 1.6.1", "winapi 0.3.9", ] @@ -5249,31 +6209,31 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16ad52817c4d343339b3bc2e26861bd21478eda0b7509acf83505727000512ac" dependencies = [ - "byteorder 1.3.4", + "byteorder", ] [[package]] name = "parity-wasm" -version = "0.41.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" +checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" [[package]] name = "parity-ws" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e02a625dd75084c2a7024f07c575b61b782f729d18702dabb3cdbf31911dc61" +checksum = "d0ab8a461779bd022964cae2b4989fa9c99deb270bec162da2125ec03c09fcaa" dependencies = [ - "byteorder 1.3.4", + "byteorder", "bytes 0.4.12", "httparse", - "log", - "mio", + "log 0.4.14", + "mio 0.6.23", "mio-extras", "rand 0.7.3", - "sha-1", + "sha-1 0.8.2", "slab", - "url 2.1.1", + "url 2.2.2", ] [[package]] @@ -5282,16 +6242,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" -[[package]] -name = "parking_lot" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" -dependencies = [ - "lock_api 0.1.5", - "parking_lot_core 0.4.0", -] - [[package]] name = "parking_lot" version = "0.9.0" @@ -5300,7 +6250,7 @@ checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" dependencies = [ "lock_api 0.3.4", "parking_lot_core 0.6.2", - "rustc_version", + "rustc_version 0.2.3", ] [[package]] @@ -5315,26 +6265,13 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", - "lock_api 0.4.1", - "parking_lot_core 0.8.0", -] - -[[package]] -name = "parking_lot_core" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" -dependencies = [ - "libc", - "rand 0.6.5", - "rustc_version", - "smallvec 0.6.13", - "winapi 0.3.9", + "lock_api 0.4.5", + "parking_lot_core 0.8.5", ] [[package]] @@ -5343,12 +6280,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" dependencies = [ - "cfg-if", - "cloudabi 0.0.3", + "cfg-if 0.1.10", + "cloudabi", "libc", - "redox_syscall", - "rustc_version", - "smallvec 0.6.13", + "redox_syscall 0.1.57", + "rustc_version 0.2.3", + "smallvec 0.6.14", "winapi 0.3.9", ] @@ -5358,26 +6295,25 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ - "cfg-if", - "cloudabi 0.0.3", + "cfg-if 0.1.10", + "cloudabi", "libc", - "redox_syscall", - "smallvec 1.4.1", + "redox_syscall 0.1.57", + "smallvec 1.6.1", "winapi 0.3.9", ] [[package]] name = "parking_lot_core" -version = "0.8.0" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if", - "cloudabi 0.1.0", + "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", - "smallvec 1.4.1", + "redox_syscall 0.2.10", + "smallvec 1.6.1", "winapi 0.3.9", ] @@ -5393,9 +6329,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.0" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ddc8e145de01d9180ac7b78b9676f95a9c2447f6a88b2c2a04702211bc5d71" +checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" [[package]] name = "paste-impl" @@ -5408,20 +6344,21 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" dependencies = [ - "byteorder 1.3.4", - "crypto-mac 0.7.0", - "rayon", + "crypto-mac 0.8.0", ] [[package]] -name = "pdqselect" -version = "0.1.0" +name = "pbkdf2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec91767ecc0a0bbe558ce8c9da33c068066c57ecc8bb8477ef8c1ad3ef77c27" +checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" +dependencies = [ + "crypto-mac 0.11.1", +] [[package]] name = "peeking_take_while" @@ -5441,6 +6378,49 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "pest" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +dependencies = [ + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pest_meta" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" +dependencies = [ + "maplit", + "pest", + "sha-1 0.8.2", +] + [[package]] name = "petgraph" version = "0.5.1" @@ -5453,18 +6433,38 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.22" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "918192b5c59119d51e0cd221f4d49dde9112824ba717369e903c97d076083d0f" +dependencies = [ + "pin-project-internal 0.4.28", +] + +[[package]] +name = "pin-project" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" dependencies = [ - "pin-project-internal", + "pin-project-internal 1.0.8", ] [[package]] name = "pin-project-internal" -version = "0.4.22" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" dependencies = [ "proc-macro2", "quote", @@ -5473,9 +6473,15 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.7" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" + +[[package]] +name = "pin-project-lite" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" @@ -5485,87 +6491,108 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "platforms" -version = "0.2.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e" +checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" [[package]] name = "plotters" -version = "0.2.15" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d1685fbe7beba33de0330629da9d955ac75bd54f33d7b79f9a895590124f6bb" +checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" dependencies = [ - "js-sys", "num-traits", + "plotters-backend", + "plotters-svg", "wasm-bindgen", "web-sys", ] +[[package]] +name = "plotters-backend" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" + +[[package]] +name = "plotters-svg" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" +dependencies = [ + "plotters-backend", +] + [[package]] name = "polling" -version = "1.1.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0720e0b9ea9d52451cf29d3413ba8a9303f8815d9d9653ef70e03ff73e65566" +checksum = "92341d779fa34ea8437ef4d82d440d5e1ce3f3ff7f824aa64424cd481f9a1f25" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", - "log", - "wepoll-sys-stjepang", + "log 0.4.14", + "wepoll-ffi", "winapi 0.3.9", ] [[package]] name = "poly1305" -version = "0.6.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b42192ab143ed7619bf888a7f9c6733a9a2153b218e2cd557cfdb52fbf9bb1" +checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ + "cpufeatures 0.2.1", + "opaque-debug 0.3.0", "universal-hash", ] [[package]] name = "polyval" -version = "0.4.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a50142b55ab3ed0e9f68dfb3709f1d90d29da24e91033f28b96330643107dc" +checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", + "cpufeatures 0.2.1", + "opaque-debug 0.3.0", "universal-hash", ] [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "predicates" -version = "1.0.5" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96bfead12e90dccead362d62bb2c90a5f6fc4584963645bc7f71a735e0b0735a" +checksum = "c143348f141cc87aab5b950021bac6145d0e5ae754b0591de23244cee42c9308" dependencies = [ - "difference", + "difflib", + "itertools", "predicates-core", ] [[package]] name = "predicates-core" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06075c3a3e92559ff8929e7a280684489ea27fe44805174c3ebd9328dcb37178" +checksum = "57e35a3326b75e49aa85f5dc6ec15b41108cf5aee58eabb1f274dd18b73c2451" [[package]] name = "predicates-tree" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e63c4859013b38a76eca2414c64911fba30def9e3202ac461a2d22831220124" +checksum = "d7dd0fd014130206c9352efbdc92be592751b2b9274dff685348341082c6ea3d" dependencies = [ "predicates-core", "treeline", @@ -5584,15 +6611,27 @@ dependencies = [ ] [[package]] -name = "primitive-types" +name = "pretty_assertions" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55c21c64d0eaa4d7ed885d959ef2d62d9e488c27c0e02d9aa5ce6c877b7d5f8" +checksum = "1cab0e7c02cf376875e9335e0ba1da535775beb5450d21e1dffca068818ed98b" +dependencies = [ + "ansi_term 0.12.1", + "ctor", + "diff", + "output_vt100", +] + +[[package]] +name = "primitive-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" dependencies = [ "fixed-hash", "impl-codec", - "impl-rlp", "impl-serde", + "scale-info", "uint", ] @@ -5605,87 +6644,121 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" +dependencies = [ + "thiserror", + "toml", +] + [[package]] name = "proc-macro-error" -version = "1.0.3" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18f33027081eba0a6d8aba6d1b1c3a3be58cbb12106341c2d5759fcd9b5277e7" +dependencies = [ + "proc-macro-error-attr 0.4.12", + "proc-macro2", + "quote", + "syn", + "version_check 0.9.3", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ - "proc-macro-error-attr", + "proc-macro-error-attr 1.0.4", "proc-macro2", "quote", "syn", - "version_check", + "version_check 0.9.3", ] [[package]] name = "proc-macro-error-attr" -version = "1.0.3" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" +checksum = "8a5b4b77fdb63c1eca72173d68d24501c54ab1269409f6b672c85deb18af69de" dependencies = [ "proc-macro2", "quote", "syn", "syn-mid", - "version_check", + "version_check 0.9.3", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check 0.9.3", ] [[package]] name = "proc-macro-hack" -version = "0.5.16" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.19" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12" +checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" dependencies = [ "unicode-xid", ] [[package]] name = "prometheus" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d70cf4412832bcac9cffe27906f4a66e450d323525e977168c70d1b36120ae" +checksum = "c8425533e7122f0c3cc7a37e6244b16ad3a2cc32ae7ac6276e2a75da0d9c200d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "fnv", "lazy_static", - "parking_lot 0.11.0", + "parking_lot 0.11.2", "regex", "thiserror", ] [[package]] name = "prost" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" +checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", "prost-derive", ] [[package]] name = "prost-build" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" +checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", "heck", - "itertools 0.8.2", - "log", + "itertools", + "log 0.4.14", "multimap", "petgraph", "prost", @@ -5696,12 +6769,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" +checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" dependencies = [ "anyhow", - "itertools 0.8.2", + "itertools", "proc-macro2", "quote", "syn", @@ -5709,23 +6782,32 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" +checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", "prost", ] +[[package]] +name = "psm" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd136ff4382c4753fc061cb9e4712ab2af263376b95bbd5bd8cd50c020b78e69" +dependencies = [ + "cc", +] + [[package]] name = "pwasm-utils" -version = "0.14.0" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f53bc2558e8376358ebdc28301546471d67336584f6438ed4b7c7457a055fd7" +checksum = "880b3384fb00b8f6ecccd5d358b93bd2201900ae3daad213791d1864f6441f5c" dependencies = [ - "byteorder 1.3.4", - "log", - "parity-wasm 0.41.0", + "byteorder", + "log 0.4.14", + "parity-wasm 0.42.2", ] [[package]] @@ -5734,16 +6816,30 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + +[[package]] +name = "quick-protobuf" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ca6639207ac869e31cca06b8adbc7676278f22b321e51115766009b4f192dbb" +dependencies = [ + "byteorder", +] + [[package]] name = "quickcheck" -version = "0.9.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "env_logger", - "log", - "rand 0.7.3", - "rand_core 0.5.1", + "env_logger 0.8.4", + "log 0.4.14", + "rand 0.8.4", ] [[package]] @@ -5754,59 +6850,29 @@ checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" dependencies = [ "futures-core", "futures-sink", - "pin-project-lite", + "pin-project-lite 0.1.12", ] [[package]] name = "quote" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2", ] [[package]] name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - -[[package]] -name = "rand" -version = "0.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" -dependencies = [ - "libc", - "rand 0.4.6", -] - -[[package]] -name = "rand" -version = "0.4.6" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi 0.3.9", -] +checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" [[package]] -name = "rand" -version = "0.5.6" +name = "radium" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" -dependencies = [ - "cloudabi 0.0.3", - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "winapi 0.3.9", -] +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" [[package]] name = "rand" @@ -5833,7 +6899,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.14", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -5841,6 +6907,18 @@ dependencies = [ "rand_pcg 0.2.1", ] +[[package]] +name = "rand" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.3", + "rand_hc 0.3.1", +] + [[package]] name = "rand_chacha" version = "0.1.1" @@ -5861,6 +6939,16 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.3", +] + [[package]] name = "rand_core" version = "0.3.1" @@ -5882,7 +6970,26 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.14", + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom 0.2.3", +] + +[[package]] +name = "rand_distr" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "051b398806e42b9cd04ad9ec8f81e355d0a382c543ac6672c62f5a5b452ef142" +dependencies = [ + "num-traits", + "rand 0.8.4", ] [[package]] @@ -5903,6 +7010,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_hc" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" +dependencies = [ + "rand_core 0.6.3", +] + [[package]] name = "rand_isaac" version = "0.1.1" @@ -5929,12 +7045,11 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" dependencies = [ - "cloudabi 0.0.3", + "cloudabi", "fuchsia-cprng", "libc", "rand_core 0.4.2", "rdrand", - "wasm-bindgen", "winapi 0.3.9", ] @@ -5966,17 +7081,6 @@ dependencies = [ "rand_core 0.3.1", ] -[[package]] -name = "raw-cpuid" -version = "7.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a349ca83373cfa5d6dbb66fd76e58b2cca08da71a5f6400de0a0a6a9bceeaf" -dependencies = [ - "bitflags", - "cc", - "rustc_version", -] - [[package]] name = "rawpointer" version = "0.2.1" @@ -5985,11 +7089,11 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.3.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080" +checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" dependencies = [ - "autocfg 1.0.0", + "autocfg 1.0.1", "crossbeam-deque", "either", "rayon-core", @@ -5997,13 +7101,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.7.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280" +checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" dependencies = [ + "crossbeam-channel", "crossbeam-deque", - "crossbeam-queue", - "crossbeam-utils", + "crossbeam-utils 0.8.5", "lazy_static", "num_cpus", ] @@ -6023,31 +7127,39 @@ version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +[[package]] +name = "redox_syscall" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" -version = "0.3.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.1.14", - "redox_syscall", - "rust-argon2", + "getrandom 0.2.3", + "redox_syscall 0.2.10", ] [[package]] name = "ref-cast" -version = "1.0.2" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "745c1787167ddae5569661d5ffb8b25ae5fedbf46717eaa92d652221cec72623" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.2" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -6056,42 +7168,41 @@ dependencies = [ [[package]] name = "regalloc" -version = "0.0.27" +version = "0.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ba8aaf5fe7cf307c6dbdaeed85478961d29e25e3bee5169e11b92fa9f027a8" +checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" dependencies = [ - "log", + "log 0.4.14", "rustc-hash", - "smallvec 1.4.1", + "serde", + "smallvec 1.6.1", ] [[package]] name = "regex" -version = "1.3.9" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", "regex-syntax", - "thread_local", ] [[package]] name = "regex-automata" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "byteorder 1.3.4", "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.18" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "region" @@ -6105,6 +7216,25 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "remote-externalities" +version = "0.10.0-dev" +dependencies = [ + "env_logger 0.9.0", + "jsonrpsee-proc-macros", + "jsonrpsee-ws-client", + "log 0.4.14", + "pallet-elections-phragmen", + "parity-scale-codec", + "serde", + "serde_json", + "sp-core", + "sp-io", + "sp-runtime", + "sp-version", + "tokio", +] + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -6114,52 +7244,42 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error 1.2.3", +] + [[package]] name = "retain_mut" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e005d658ad26eacc2b6c506dfde519f4e277e328d0eb3379ca61647d70a8f531" +checksum = "e9c17925a9027d298a4603d286befe3f9dc0e8ed02523141914eb628798d6e5b" [[package]] name = "ring" -version = "0.16.15" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", - "once_cell 1.4.1", + "once_cell", "spin", "untrusted", "web-sys", "winapi 0.3.9", ] -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "rlp" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a7d3f9bed94764eac15b8f14af59fac420c236adaff743b7bcc88e265cb4345" -dependencies = [ - "rustc-hex", -] - [[package]] name = "rocksdb" -version = "0.15.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d83c02c429044d58474eaf5ae31e062d0de894e21125b47437ec0edc1397e6" +checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" dependencies = [ "libc", "librocksdb-sys", @@ -6167,31 +7287,19 @@ dependencies = [ [[package]] name = "rpassword" -version = "4.0.5" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" +checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" dependencies = [ "libc", "winapi 0.3.9", ] -[[package]] -name = "rust-argon2" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" -dependencies = [ - "base64 0.11.0", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils", -] - [[package]] name = "rustc-demangle" -version = "0.1.16" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc-hash" @@ -6214,14 +7322,32 @@ dependencies = [ "semver 0.9.0", ] +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.4", +] + [[package]] name = "rustls" -version = "0.18.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64 0.12.3", - "log", + "base64 0.13.0", + "log 0.4.14", "ring", "sct", "webpki", @@ -6229,9 +7355,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" dependencies = [ "openssl-probe", "rustls", @@ -6241,14 +7367,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bdc5e856e51e685846fb6c13a1f5e5432946c2c90501bdc76a1319f19e29da" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" [[package]] name = "rw-stream-sink" @@ -6256,8 +7377,8 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.5", - "pin-project", + "futures 0.3.17", + "pin-project 0.4.28", "static_assertions", ] @@ -6273,27 +7394,22 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d3d055a2582e6b00ed7a31c1524040aa391092bf636328350813f3a0605215c" dependencies = [ - "rustc_version", + "rustc_version 0.2.3", ] [[package]] -name = "salsa20" -version = "0.3.0" +name = "safemem" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2324b0e8c3bb9a586a571fdb3136f70e7e2c748de00a78043f86e0cff91f91fe" -dependencies = [ - "byteorder 1.3.4", - "salsa20-core", - "stream-cipher 0.3.2", -] +checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" [[package]] -name = "salsa20-core" -version = "0.2.3" +name = "salsa20" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fe6cc1b9f5a5867853ade63099de70f042f7679e408d1ffe52821c9248e6e69" +checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" dependencies = [ - "stream-cipher 0.3.2", + "cipher", ] [[package]] @@ -6305,27 +7421,34 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "sc-allocator" +version = "4.0.0-dev" +dependencies = [ + "log 0.4.14", + "sp-core", + "sp-wasm-interface", + "thiserror", +] + [[package]] name = "sc-authority-discovery" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "bytes 0.5.6", + "async-trait", "derive_more", - "either", - "futures 0.3.5", + "futures 0.3.17", "futures-timer 3.0.2", + "ip_network", "libp2p", - "log", + "log 0.4.14", "parity-scale-codec", "prost", "prost-build", "quickcheck", "rand 0.7.3", "sc-client-api", - "sc-keystore", "sc-network", - "sc-peerset", - "serde_json", "sp-api", "sp-authority-discovery", "sp-blockchain", @@ -6339,73 +7462,65 @@ dependencies = [ [[package]] name = "sc-basic-authorship" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "futures-timer 3.0.2", - "log", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", "sc-telemetry", "sc-transaction-pool", + "sc-transaction-pool-api", "sp-api", "sp-blockchain", "sp-consensus", "sp-core", "sp-inherents", "sp-runtime", - "sp-transaction-pool", "substrate-prometheus-endpoint", "substrate-test-runtime-client", - "tokio-executor 0.2.0-alpha.6", ] [[package]] name = "sc-block-builder" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "parity-scale-codec", "sc-client-api", "sp-api", "sp-block-builder", "sp-blockchain", - "sp-consensus", "sp-core", "sp-inherents", "sp-runtime", "sp-state-machine", - "sp-trie", "substrate-test-runtime-client", ] [[package]] name = "sc-chain-spec" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", "sc-chain-spec-derive", - "sc-consensus-babe", - "sc-consensus-epochs", - "sc-finality-grandpa", "sc-network", "sc-telemetry", "serde", "serde_json", - "sp-chain-spec", - "sp-consensus-babe", "sp-core", "sp-runtime", ] [[package]] name = "sc-chain-spec-derive" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.0", "proc-macro2", "quote", "syn", @@ -6413,171 +7528,142 @@ dependencies = [ [[package]] name = "sc-cli" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "ansi_term 0.12.1", - "atty", - "bip39", "chrono", - "derive_more", "fdlimit", - "futures 0.3.5", + "futures 0.3.17", "hex", - "lazy_static", "libp2p", - "log", + "log 0.4.14", "names", - "nix", "parity-scale-codec", - "parity-util-mem", "rand 0.7.3", "regex", "rpassword", - "sc-cli-proc-macro", "sc-client-api", - "sc-consensus-babe", - "sc-consensus-epochs", - "sc-finality-grandpa", - "sc-informant", "sc-keystore", "sc-network", "sc-service", "sc-telemetry", "sc-tracing", + "sc-utils", "serde", "serde_json", - "sp-application-crypto", "sp-blockchain", - "sp-core", - "sp-io", - "sp-keyring", - "sp-keystore", - "sp-panic-handler", - "sp-runtime", - "sp-state-machine", - "sp-utils", - "sp-version", - "structopt", - "substrate-prometheus-endpoint", - "tempfile", - "time", - "tokio 0.2.22", - "tracing", - "tracing-log", - "tracing-subscriber", -] - -[[package]] -name = "sc-cli-proc-macro" -version = "2.0.0" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", + "sp-core", + "sp-keyring", + "sp-keystore", + "sp-panic-handler", + "sp-runtime", + "sp-version", + "structopt", + "tempfile", + "thiserror", + "tiny-bip39", + "tokio", ] [[package]] name = "sc-client-api" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "derive_more", "fnv", - "futures 0.3.5", + "futures 0.3.17", "hash-db", - "hex-literal", - "kvdb", - "kvdb-memorydb", - "lazy_static", - "log", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "sc-executor", - "sc-telemetry", + "sc-transaction-pool-api", + "sc-utils", "sp-api", "sp-blockchain", "sp-consensus", "sp-core", "sp-database", "sp-externalities", - "sp-inherents", - "sp-keyring", "sp-keystore", "sp-runtime", "sp-state-machine", - "sp-std", "sp-storage", "sp-test-primitives", - "sp-transaction-pool", "sp-trie", - "sp-utils", - "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime", + "thiserror", ] [[package]] name = "sc-client-db" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "blake2-rfc", "hash-db", "kvdb", "kvdb-memorydb", "kvdb-rocksdb", "linked-hash-map", - "log", + "log 0.4.14", "parity-db", "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "quickcheck", "sc-client-api", - "sc-executor", "sc-state-db", "sp-arithmetic", "sp-blockchain", - "sp-consensus", "sp-core", "sp-database", - "sp-keyring", "sp-runtime", "sp-state-machine", "sp-tracing", "sp-trie", - "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", ] [[package]] name = "sc-consensus" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ + "async-trait", + "futures 0.3.17", + "futures-timer 3.0.2", + "libp2p", + "log 0.4.14", + "parking_lot 0.11.2", "sc-client-api", + "sc-utils", + "serde", + "sp-api", "sp-blockchain", "sp-consensus", + "sp-core", "sp-runtime", + "sp-state-machine", + "sp-test-primitives", + "substrate-prometheus-endpoint", + "thiserror", ] [[package]] name = "sc-consensus-aura" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ + "async-trait", "derive_more", - "futures 0.3.5", - "futures-timer 3.0.2", - "log", + "futures 0.3.17", + "getrandom 0.2.3", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-consensus-slots", - "sc-executor", "sc-keystore", "sc-network", "sc-network-test", - "sc-service", "sc-telemetry", "sp-api", "sp-application-crypto", @@ -6585,15 +7671,14 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-aura", + "sp-consensus-slots", "sp-core", "sp-inherents", - "sp-io", "sp-keyring", "sp-keystore", "sp-runtime", "sp-timestamp", "sp-tracing", - "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", @@ -6601,33 +7686,30 @@ dependencies = [ [[package]] name = "sc-consensus-babe" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ + "async-trait", "derive_more", "fork-tree", - "futures 0.3.5", - "futures-timer 3.0.2", - "log", + "futures 0.3.17", + "log 0.4.14", "merlin", "num-bigint", - "num-rational", + "num-rational 0.2.4", "num-traits", "parity-scale-codec", - "parking_lot 0.10.2", - "pdqselect", + "parking_lot 0.11.2", "rand 0.7.3", "rand_chacha 0.2.2", "retain_mut", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-consensus-epochs", "sc-consensus-slots", - "sc-consensus-uncles", - "sc-executor", "sc-keystore", "sc-network", "sc-network-test", - "sc-service", "sc-telemetry", "schnorrkel", "serde", @@ -6637,16 +7719,15 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-consensus-slots", "sp-consensus-vrf", "sp-core", "sp-inherents", "sp-io", - "sp-keyring", "sp-keystore", "sp-runtime", "sp-timestamp", "sp-tracing", - "sp-utils", "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -6655,10 +7736,10 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.17", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -6684,62 +7765,66 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "fork-tree", "parity-scale-codec", - "parking_lot 0.10.2", "sc-client-api", + "sc-consensus", "sp-blockchain", "sp-runtime", ] [[package]] name = "sc-consensus-manual-seal" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "assert_matches", + "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.17", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", - "log", - "parking_lot 0.10.2", + "log 0.4.14", + "parity-scale-codec", "sc-basic-authorship", "sc-client-api", + "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", "sc-transaction-pool", + "sc-transaction-pool-api", "serde", "sp-api", "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-consensus-slots", "sp-core", "sp-inherents", "sp-keystore", "sp-runtime", "sp-timestamp", - "sp-transaction-pool", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", - "tempfile", - "tokio 0.2.22", + "tokio", ] [[package]] name = "sc-consensus-pow" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ + "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.17", "futures-timer 3.0.2", - "log", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "sc-client-api", + "sc-consensus", "sp-api", "sp-block-builder", "sp-blockchain", @@ -6748,23 +7833,23 @@ dependencies = [ "sp-core", "sp-inherents", "sp-runtime", - "sp-timestamp", "substrate-prometheus-endpoint", ] [[package]] name = "sc-consensus-slots" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "futures 0.3.5", + "async-trait", + "futures 0.3.17", "futures-timer 3.0.2", - "log", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", "sc-client-api", + "sc-consensus", "sc-telemetry", "sp-api", - "sp-application-crypto", + "sp-arithmetic", "sp-blockchain", "sp-consensus", "sp-consensus-slots", @@ -6772,35 +7857,33 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-timestamp", "substrate-test-runtime-client", + "thiserror", ] [[package]] name = "sc-consensus-uncles" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "log", "sc-client-api", "sp-authorship", - "sp-consensus", - "sp-core", - "sp-inherents", "sp-runtime", + "thiserror", ] [[package]] name = "sc-executor" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "assert_matches", - "derive_more", "hex-literal", "lazy_static", - "libsecp256k1", - "log", + "libsecp256k1 0.6.0", + "log 0.4.14", "parity-scale-codec", - "parity-wasm 0.41.0", - "parking_lot 0.10.2", + "parking_lot 0.11.2", + "paste 1.0.5", + "regex", "sc-executor-common", "sc-executor-wasmi", "sc-executor-wasmtime", @@ -6810,18 +7893,16 @@ dependencies = [ "sp-core", "sp-externalities", "sp-io", + "sp-maybe-compressed-blob", "sp-panic-handler", "sp-runtime", "sp-runtime-interface", - "sp-serializer", "sp-state-machine", "sp-tasks", - "sp-tracing", "sp-trie", "sp-version", "sp-wasm-interface", "substrate-test-runtime", - "test-case", "tracing", "tracing-subscriber", "wasmi", @@ -6830,28 +7911,32 @@ dependencies = [ [[package]] name = "sc-executor-common" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "derive_more", - "log", + "environmental", "parity-scale-codec", - "parity-wasm 0.41.0", - "sp-allocator", + "pwasm-utils", + "sc-allocator", "sp-core", - "sp-runtime-interface", + "sp-maybe-compressed-blob", "sp-serializer", "sp-wasm-interface", + "thiserror", + "wasmer", + "wasmer-compiler-singlepass", "wasmi", ] [[package]] name = "sc-executor-wasmi" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "log", + "log 0.4.14", "parity-scale-codec", + "sc-allocator", "sc-executor-common", - "sp-allocator", + "scoped-tls", "sp-core", "sp-runtime-interface", "sp-wasm-interface", @@ -6860,37 +7945,41 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "assert_matches", - "log", + "cfg-if 1.0.0", + "libc", + "log 0.4.14", "parity-scale-codec", - "parity-wasm 0.41.0", - "pwasm-utils", + "parity-wasm 0.42.2", + "sc-allocator", "sc-executor-common", + "sc-runtime-test", "scoped-tls", - "sp-allocator", "sp-core", + "sp-io", "sp-runtime-interface", "sp-wasm-interface", "wasmtime", + "wat", ] [[package]] name = "sc-finality-grandpa" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "assert_matches", + "async-trait", "derive_more", + "dyn-clone", "finality-grandpa", "fork-tree", - "futures 0.3.5", + "futures 0.3.17", "futures-timer 3.0.2", - "log", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", - "pin-project", - "rand 0.7.3", + "parking_lot 0.11.2", + "rand 0.8.4", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -6899,51 +7988,45 @@ dependencies = [ "sc-network-gossip", "sc-network-test", "sc-telemetry", + "sc-utils", "serde_json", "sp-api", "sp-application-crypto", "sp-arithmetic", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", "sp-core", "sp-finality-grandpa", - "sp-inherents", "sp-keyring", "sp-keystore", "sp-runtime", - "sp-state-machine", "sp-tracing", - "sp-utils", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.22", + "tokio", ] [[package]] name = "sc-finality-grandpa-rpc" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.5", + "futures 0.3.17", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", - "lazy_static", - "log", + "log 0.4.14", "parity-scale-codec", "sc-block-builder", "sc-client-api", "sc-finality-grandpa", - "sc-network-test", "sc-rpc", "serde", "serde_json", "sp-blockchain", - "sp-consensus", "sp-core", "sp-finality-grandpa", "sp-keyring", @@ -6953,49 +8036,42 @@ dependencies = [ [[package]] name = "sc-informant" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "ansi_term 0.12.1", - "futures 0.3.5", - "log", + "futures 0.3.17", + "futures-timer 3.0.2", + "log 0.4.14", "parity-util-mem", "sc-client-api", "sc-network", + "sc-transaction-pool-api", "sp-blockchain", "sp-runtime", - "sp-transaction-pool", - "sp-utils", - "wasm-timer", ] [[package]] name = "sc-keystore" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "async-trait", "derive_more", - "futures 0.3.5", - "futures-util", "hex", - "merlin", - "parking_lot 0.10.2", - "rand 0.7.3", + "parking_lot 0.11.2", "serde_json", "sp-application-crypto", "sp-core", "sp-keystore", - "subtle 2.2.3", "tempfile", ] [[package]] name = "sc-light" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "hash-db", - "lazy_static", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "sc-client-api", "sc-executor", "sp-api", @@ -7008,92 +8084,90 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "assert_matches", "async-std", "async-trait", + "asynchronous-codec 0.5.0", "bitflags", - "bs58", - "bytes 0.5.6", + "bytes 1.1.0", + "cid 0.6.1", "derive_more", "either", - "erased-serde", "fnv", "fork-tree", - "futures 0.3.5", + "futures 0.3.17", "futures-timer 3.0.2", - "futures_codec", "hex", "ip_network", "libp2p", "linked-hash-map", "linked_hash_set", - "log", - "lru 0.4.3", - "nohash-hasher", + "log 0.4.14", + "lru", "parity-scale-codec", - "parking_lot 0.10.2", - "pin-project", + "parking_lot 0.11.2", + "pin-project 1.0.8", "prost", "prost-build", "quickcheck", "rand 0.7.3", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-peerset", + "sc-utils", "serde", "serde_json", - "slog", - "slog_derive", - "smallvec 0.6.13", + "smallvec 1.6.1", "sp-arithmetic", "sp-blockchain", "sp-consensus", "sp-core", - "sp-keyring", + "sp-finality-grandpa", "sp-runtime", "sp-test-primitives", "sp-tracing", - "sp-utils", "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", "thiserror", - "unsigned-varint 0.4.0", + "unsigned-varint 0.6.0", "void", - "wasm-timer", "zeroize", ] [[package]] name = "sc-network-gossip" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "async-std", - "futures 0.3.5", + "futures 0.3.17", "futures-timer 3.0.2", "libp2p", - "log", - "lru 0.4.3", + "log 0.4.14", + "lru", "quickcheck", - "rand 0.7.3", "sc-network", "sp-runtime", + "substrate-prometheus-endpoint", "substrate-test-runtime-client", - "wasm-timer", + "tracing", ] [[package]] name = "sc-network-test" version = "0.8.0" dependencies = [ - "futures 0.3.5", + "async-std", + "async-trait", + "futures 0.3.17", "futures-timer 3.0.2", "libp2p", - "log", - "parking_lot 0.10.2", + "log 0.4.14", + "parking_lot 0.11.2", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -7108,88 +8182,92 @@ dependencies = [ "sp-tracing", "substrate-test-runtime", "substrate-test-runtime-client", - "tempfile", ] [[package]] name = "sc-offchain" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", + "cid 0.5.1", "fnv", - "futures 0.3.5", + "futures 0.3.17", "futures-timer 3.0.2", - "hyper 0.13.7", + "hex", + "hyper 0.14.13", "hyper-rustls", + "ipfs", "lazy_static", - "log", + "log 0.4.14", "num_cpus", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "rand 0.7.3", + "sc-block-builder", "sc-client-api", "sc-client-db", - "sc-keystore", "sc-network", "sc-transaction-pool", + "sc-transaction-pool-api", + "sc-utils", "sp-api", + "sp-consensus", "sp-core", "sp-offchain", "sp-runtime", "sp-tracing", - "sp-transaction-pool", - "sp-utils", "substrate-test-runtime-client", "threadpool", - "tokio 0.2.22", + "tokio", ] [[package]] name = "sc-peerset" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "libp2p", - "log", + "log 0.4.14", "rand 0.7.3", + "sc-utils", "serde_json", - "sp-utils", "wasm-timer", ] [[package]] name = "sc-proposer-metrics" -version = "0.8.0" +version = "0.9.0" dependencies = [ - "log", + "log 0.4.14", "substrate-prometheus-endpoint", ] [[package]] name = "sc-rpc" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "assert_matches", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.3.17", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", "lazy_static", - "log", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "sc-block-builder", + "sc-chain-spec", "sc-client-api", - "sc-executor", - "sc-keystore", "sc-network", "sc-rpc-api", + "sc-tracing", "sc-transaction-pool", + "sc-transaction-pool-api", + "sc-utils", "serde_json", "sp-api", "sp-blockchain", - "sp-chain-spec", + "sp-consensus", "sp-core", "sp-io", "sp-keystore", @@ -7197,95 +8275,88 @@ dependencies = [ "sp-rpc", "sp-runtime", "sp-session", - "sp-state-machine", - "sp-transaction-pool", - "sp-utils", "sp-version", "substrate-test-runtime-client", - "tokio 0.1.22", ] [[package]] name = "sc-rpc-api" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "derive_more", - "futures 0.3.5", + "futures 0.3.17", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", - "log", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", + "sc-chain-spec", + "sc-transaction-pool-api", "serde", "serde_json", - "sp-chain-spec", "sp-core", "sp-rpc", "sp-runtime", - "sp-transaction-pool", + "sp-tracing", "sp-version", + "thiserror", ] [[package]] name = "sc-rpc-server" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "futures 0.1.29", + "futures 0.3.17", "jsonrpc-core", "jsonrpc-http-server", "jsonrpc-ipc-server", "jsonrpc-pubsub", "jsonrpc-ws-server", - "log", - "serde", + "log 0.4.14", "serde_json", - "sp-runtime", "substrate-prometheus-endpoint", + "tokio", ] [[package]] name = "sc-runtime-test" version = "2.0.0" dependencies = [ - "sp-allocator", "sp-core", "sp-io", "sp-runtime", "sp-sandbox", "sp-std", "sp-tasks", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] name = "sc-service" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "async-std", - "derive_more", + "async-trait", "directories", "exit-future", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.3.17", "futures-timer 3.0.2", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", - "lazy_static", - "log", + "log 0.4.14", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.10.2", - "pin-project", + "parking_lot 0.11.2", + "pin-project 1.0.8", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", - "sc-finality-grandpa", "sc-informant", "sc-keystore", "sc-light", @@ -7296,36 +8367,36 @@ dependencies = [ "sc-telemetry", "sc-tracing", "sc-transaction-pool", + "sc-transaction-pool-api", + "sc-utils", "serde", "serde_json", - "slog", "sp-api", "sp-application-crypto", "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", "sp-core", "sp-externalities", - "sp-finality-grandpa", "sp-inherents", - "sp-io", "sp-keystore", "sp-runtime", "sp-session", "sp-state-machine", + "sp-storage", "sp-tracing", "sp-transaction-pool", + "sp-transaction-storage-proof", "sp-trie", - "sp-utils", "sp-version", "substrate-prometheus-endpoint", + "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.22", + "thiserror", + "tokio", "tracing", "tracing-futures", - "wasm-timer", ] [[package]] @@ -7333,19 +8404,20 @@ name = "sc-service-test" version = "2.0.0" dependencies = [ "fdlimit", - "futures 0.1.29", - "futures 0.3.5", + "futures 0.3.17", "hex-literal", - "log", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "sc-block-builder", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", "sc-light", "sc-network", "sc-service", + "sc-transaction-pool-api", "sp-api", "sp-blockchain", "sp-consensus", @@ -7356,137 +8428,183 @@ dependencies = [ "sp-state-machine", "sp-storage", "sp-tracing", - "sp-transaction-pool", "sp-trie", "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", - "tokio 0.1.22", + "tokio", ] [[package]] name = "sc-state-db" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "log", + "log 0.4.14", "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "sc-client-api", "sp-core", ] [[package]] name = "sc-sync-state-rpc" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", + "parity-scale-codec", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", "sc-consensus-epochs", "sc-finality-grandpa", "sc-rpc-api", + "serde", "serde_json", "sp-blockchain", "sp-runtime", + "thiserror", ] [[package]] name = "sc-telemetry" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "futures 0.3.5", - "futures-timer 3.0.2", + "chrono", + "futures 0.3.17", "libp2p", - "log", - "parking_lot 0.10.2", - "pin-project", + "log 0.4.14", + "parking_lot 0.11.2", + "pin-project 1.0.8", "rand 0.7.3", "serde", - "slog", - "slog-json", - "slog-scope", - "take_mut", - "void", + "serde_json", + "thiserror", "wasm-timer", ] [[package]] name = "sc-tracing" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "erased-serde", - "log", - "parking_lot 0.10.2", + "ansi_term 0.12.1", + "atty", + "lazy_static", + "log 0.4.14", + "once_cell", + "parking_lot 0.11.2", + "regex", "rustc-hash", - "sc-telemetry", + "sc-client-api", + "sc-rpc-server", + "sc-tracing-proc-macro", "serde", - "serde_json", - "slog", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-rpc", + "sp-runtime", "sp-tracing", + "thiserror", "tracing", - "tracing-core", + "tracing-log", "tracing-subscriber", ] [[package]] -name = "sc-transaction-graph" -version = "2.0.0" +name = "sc-tracing-proc-macro" +version = "4.0.0-dev" dependencies = [ - "assert_matches", - "criterion", - "derive_more", - "futures 0.3.5", - "linked-hash-map", - "log", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.10.2", - "retain_mut", - "serde", - "sp-blockchain", - "sp-core", - "sp-runtime", - "sp-transaction-pool", - "sp-utils", - "substrate-test-runtime", - "wasm-timer", + "proc-macro-crate 1.1.0", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "sc-transaction-pool" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "assert_matches", - "derive_more", - "futures 0.3.5", - "futures-diagnose", + "criterion", + "futures 0.3.17", "hex", "intervalier", - "log", + "linked-hash-map", + "log 0.4.14", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.10.2", + "parking_lot 0.11.2", + "retain_mut", "sc-block-builder", "sc-client-api", - "sc-transaction-graph", + "sc-transaction-pool-api", + "sc-utils", + "serde", "sp-api", "sp-blockchain", "sp-consensus", "sp-core", - "sp-keyring", "sp-runtime", "sp-tracing", "sp-transaction-pool", - "sp-utils", "substrate-prometheus-endpoint", + "substrate-test-runtime", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", - "wasm-timer", + "thiserror", +] + +[[package]] +name = "sc-transaction-pool-api" +version = "4.0.0-dev" +dependencies = [ + "derive_more", + "futures 0.3.17", + "log 0.4.14", + "serde", + "sp-blockchain", + "sp-runtime", + "thiserror", +] + +[[package]] +name = "sc-utils" +version = "4.0.0-dev" +dependencies = [ + "futures 0.3.17", + "futures-timer 3.0.2", + "lazy_static", + "prometheus", +] + +[[package]] +name = "scale-info" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c55b744399c25532d63a0d2789b109df8d46fc93752d46b0782991a931a782f" +dependencies = [ + "bitvec 0.20.4", + "cfg-if 1.0.0", + "derive_more", + "parity-scale-codec", + "scale-info-derive", + "serde", +] + +[[package]] +name = "scale-info-derive" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd" +dependencies = [ + "proc-macro-crate 1.1.0", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -7506,14 +8624,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" dependencies = [ "arrayref", - "arrayvec 0.5.1", - "curve25519-dalek", - "getrandom 0.1.14", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.3", + "getrandom 0.1.16", "merlin", "rand 0.7.3", "rand_core 0.5.1", "sha2 0.8.2", - "subtle 2.2.3", + "subtle 2.4.1", "zeroize", ] @@ -7523,43 +8641,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" -[[package]] -name = "scopeguard" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" - [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scroll" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb2332cb595d33f7edd5700f4cbf94892e680c7f0ae56adab58a35190b66cb1" -dependencies = [ - "scroll_derive", -] - -[[package]] -name = "scroll_derive" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "sct" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ "ring", "untrusted", @@ -7567,18 +8659,18 @@ dependencies = [ [[package]] name = "secrecy" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182278ed645df3477a9c27bfee0621c621aa16f6972635f7f795dae3d81070f" +checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" dependencies = [ "zeroize", ] [[package]] name = "security-framework" -version = "1.0.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" +checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" dependencies = [ "bitflags", "core-foundation", @@ -7589,9 +8681,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "1.0.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" +checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" dependencies = [ "core-foundation-sys", "libc", @@ -7603,7 +8695,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", ] [[package]] @@ -7612,57 +8704,63 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", ] [[package]] name = "semver" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "394cec28fa623e00903caf7ba4fa6fb9a0e260280bb8cdbbba029611108a0190" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser", + "semver-parser 0.10.2", "serde", ] [[package]] -name = "semver-parser" -version = "0.7.0" +name = "semver" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" [[package]] -name = "send_wrapper" -version = "0.2.0" +name = "semver-parser" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] -name = "send_wrapper" -version = "0.3.0" +name = "semver-parser" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686ef91cf020ad8d4aca9a7047641fd6add626b7b89e14546c2b6a76781cf822" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] [[package]] -name = "send_wrapper" -version = "0.4.0" +name = "serde" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +dependencies = [ + "serde_derive", +] [[package]] -name = "serde" -version = "1.0.114" +name = "serde_bytes" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" dependencies = [ - "serde_derive", + "serde", ] [[package]] name = "serde_cbor" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" dependencies = [ "half", "serde", @@ -7670,9 +8768,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.114" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ "proc-macro2", "quote", @@ -7681,9 +8779,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.58" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a230ea9107ca2220eea9d46de97eddcb04cd00e92d13dda78e478dd33fa82bd4" +checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" dependencies = [ "itoa", "ryu", @@ -7702,6 +8800,25 @@ dependencies = [ "opaque-debug 0.2.3", ] +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures 0.2.1", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sha1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" + [[package]] name = "sha2" version = "0.8.2" @@ -7716,188 +8833,209 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.1" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" +checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ "block-buffer 0.9.0", - "cfg-if", - "cpuid-bool", + "cfg-if 1.0.0", + "cpufeatures 0.2.1", "digest 0.9.0", "opaque-debug 0.3.0", ] [[package]] name = "sha3" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer 0.7.3", - "byte-tools", - "digest 0.8.1", + "block-buffer 0.9.0", + "digest 0.9.0", "keccak", - "opaque-debug 0.2.3", + "opaque-debug 0.3.0", ] [[package]] name = "sharded-slab" -version = "0.0.9" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e" +checksum = "740223c51853f3145fe7c90360d2d4232f2b62e3449489c207eccde818979982" dependencies = [ "lazy_static", ] [[package]] name = "shlex" -version = "0.1.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] -name = "signal-hook-registry" -version = "1.2.0" +name = "signal-hook" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +checksum = "9c98891d737e271a2954825ef19e46bd16bdb98e2746f2eec4f7a4ef7946efd1" dependencies = [ - "arc-swap", "libc", + "signal-hook-registry", ] [[package]] -name = "signature" -version = "1.1.0" +name = "signal-hook-registry" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65211b7b6fc3f14ff9fc7a2011a434e3e6880585bd2e9e9396315ae24cbf7852" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] [[package]] -name = "slab" -version = "0.4.2" +name = "signature" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335" [[package]] -name = "slog" -version = "2.5.2" +name = "simba" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cc9c640a4adbfbcc11ffb95efe5aa7af7309e002adab54b185507dbf2377b99" +checksum = "8e82063457853d00243beda9952e910b82593e4b07ae9f721b9278a99a0d3d5c" dependencies = [ - "erased-serde", + "approx", + "num-complex", + "num-traits", + "paste 1.0.5", ] [[package]] -name = "slog-json" -version = "2.3.0" +name = "slab" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc0d2aff1f8f325ef660d9a0eb6e6dcd20b30b3f581a5897f58bf42d061c37a" -dependencies = [ - "chrono", - "erased-serde", - "serde", - "serde_json", - "slog", -] +checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" [[package]] -name = "slog-scope" -version = "4.3.0" +name = "sled" +version = "0.34.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c44c89dd8b0ae4537d1ae318353eaf7840b4869c536e31c41e963d1ea523ee6" +checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" dependencies = [ - "arc-swap", - "lazy_static", - "slog", + "crc32fast", + "crossbeam-epoch", + "crossbeam-utils 0.8.5", + "fs2", + "fxhash", + "libc", + "log 0.4.14", + "parking_lot 0.11.2", ] [[package]] -name = "slog_derive" -version = "0.2.0" +name = "slog" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a945ec7f7ce853e89ffa36be1e27dce9a43e82ff9093bf3461c30d5da74ed11b" +checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" dependencies = [ - "proc-macro2", - "quote", - "syn", + "erased-serde", ] [[package]] name = "smallvec" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" dependencies = [ "maybe-uninit", ] [[package]] name = "smallvec" -version = "1.4.1" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" + +[[package]] +name = "snap" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" +checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451" [[package]] name = "snow" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32bf8474159a95551661246cda4976e89356999e3cbfef36f493dacc3fae1e8e" +checksum = "6142f7c25e94f6fd25a32c3348ec230df9109b463f59c8c7acc4bd34936babb7" dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "rand 0.7.3", - "rand_core 0.5.1", + "rand 0.8.4", + "rand_core 0.6.3", "ring", - "rustc_version", - "sha2 0.9.1", - "subtle 2.2.3", + "rustc_version 0.3.3", + "sha2 0.9.8", + "subtle 2.4.1", "x25519-dalek", ] [[package]] name = "socket2" -version = "0.3.12" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "socket2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" dependencies = [ - "cfg-if", "libc", - "redox_syscall", "winapi 0.3.9", ] [[package]] name = "soketto" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85457366ae0c6ce56bf05a958aef14cd38513c236568618edbcd9a8c52cb80b0" +checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" dependencies = [ "base64 0.12.3", "bytes 0.5.6", "flate2", - "futures 0.3.5", + "futures 0.3.17", "httparse", - "log", + "log 0.4.14", "rand 0.7.3", - "sha-1", + "sha-1 0.9.8", ] [[package]] -name = "sp-allocator" -version = "2.0.0" +name = "soketto" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a74e48087dbeed4833785c2f3352b59140095dc192dce966a3bfc155020a439f" dependencies = [ - "derive_more", - "log", - "sp-core", - "sp-std", - "sp-wasm-interface", + "base64 0.13.0", + "bytes 1.1.0", + "futures 0.3.17", + "httparse", + "log 0.4.14", + "rand 0.8.4", + "sha-1 0.9.8", ] [[package]] name = "sp-api" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "hash-db", + "log 0.4.14", "parity-scale-codec", "sp-api-proc-macro", "sp-core", @@ -7906,14 +9044,15 @@ dependencies = [ "sp-std", "sp-test-primitives", "sp-version", + "thiserror", ] [[package]] name = "sp-api-proc-macro" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "blake2-rfc", - "proc-macro-crate", + "proc-macro-crate 1.1.0", "proc-macro2", "quote", "syn", @@ -7921,18 +9060,20 @@ dependencies = [ [[package]] name = "sp-api-test" -version = "2.0.0" +version = "2.0.1" dependencies = [ "criterion", + "futures 0.3.17", + "log 0.4.14", "parity-scale-codec", "rustversion", "sc-block-builder", "sp-api", - "sp-blockchain", "sp-consensus", "sp-core", "sp-runtime", "sp-state-machine", + "sp-tracing", "sp-version", "substrate-test-runtime-client", "trybuild", @@ -7940,9 +9081,10 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "parity-scale-codec", + "scale-info", "serde", "sp-core", "sp-io", @@ -7963,7 +9105,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "criterion", "integer-sqrt", @@ -7971,10 +9113,11 @@ dependencies = [ "parity-scale-codec", "primitive-types", "rand 0.7.3", + "scale-info", "serde", - "serde_json", "sp-debug-derive", "sp-std", + "static_assertions", ] [[package]] @@ -7983,16 +9126,16 @@ version = "2.0.0" dependencies = [ "honggfuzz", "num-bigint", - "num-traits", "primitive-types", "sp-arithmetic", ] [[package]] name = "sp-authority-discovery" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "parity-scale-codec", + "scale-info", "sp-api", "sp-application-crypto", "sp-runtime", @@ -8001,8 +9144,9 @@ dependencies = [ [[package]] name = "sp-authorship" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ + "async-trait", "parity-scale-codec", "sp-inherents", "sp-runtime", @@ -8011,7 +9155,7 @@ dependencies = [ [[package]] name = "sp-block-builder" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "parity-scale-codec", "sp-api", @@ -8022,61 +9166,51 @@ dependencies = [ [[package]] name = "sp-blockchain" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "derive_more", - "log", - "lru 0.4.3", + "futures 0.3.17", + "log 0.4.14", + "lru", "parity-scale-codec", - "parking_lot 0.10.2", - "sp-block-builder", + "parking_lot 0.11.2", + "sp-api", "sp-consensus", "sp-database", "sp-runtime", "sp-state-machine", -] - -[[package]] -name = "sp-chain-spec" -version = "2.0.0" -dependencies = [ - "serde", - "serde_json", + "thiserror", ] [[package]] name = "sp-consensus" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "derive_more", - "futures 0.3.5", + "async-trait", + "futures 0.3.17", "futures-timer 3.0.2", - "libp2p", - "log", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", - "serde", - "sp-api", "sp-core", "sp-inherents", "sp-runtime", "sp-state-machine", "sp-std", "sp-test-primitives", - "sp-trie", - "sp-utils", "sp-version", - "substrate-prometheus-endpoint", - "wasm-timer", + "thiserror", ] [[package]] name = "sp-consensus-aura" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ + "async-trait", "parity-scale-codec", + "scale-info", "sp-api", "sp-application-crypto", + "sp-consensus", + "sp-consensus-slots", "sp-inherents", "sp-runtime", "sp-std", @@ -8085,10 +9219,13 @@ dependencies = [ [[package]] name = "sp-consensus-babe" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ + "async-trait", "merlin", "parity-scale-codec", + "scale-info", + "serde", "sp-api", "sp-application-crypto", "sp-consensus", @@ -8104,7 +9241,7 @@ dependencies = [ [[package]] name = "sp-consensus-pow" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "parity-scale-codec", "sp-api", @@ -8115,15 +9252,17 @@ dependencies = [ [[package]] name = "sp-consensus-slots" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "parity-scale-codec", + "scale-info", + "sp-arithmetic", "sp-runtime", ] [[package]] name = "sp-consensus-vrf" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -8134,38 +9273,37 @@ dependencies = [ [[package]] name = "sp-core" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "base58", "blake2-rfc", - "byteorder 1.3.4", + "byteorder", "criterion", "dyn-clonable", "ed25519-dalek", - "futures 0.3.5", + "futures 0.3.17", "hash-db", "hash256-std-hasher", "hex", "hex-literal", "impl-serde", "lazy_static", - "libsecp256k1", - "log", + "libsecp256k1 0.6.0", + "log 0.4.14", "merlin", "num-traits", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.10.2", - "pretty_assertions", + "parking_lot 0.11.2", "primitive-types", "rand 0.7.3", - "rand_chacha 0.2.2", "regex", + "scale-info", "schnorrkel", "secrecy", "serde", "serde_json", - "sha2 0.8.2", + "sha2 0.9.8", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", @@ -8173,6 +9311,7 @@ dependencies = [ "sp-std", "sp-storage", "substrate-bip39", + "thiserror", "tiny-bip39", "tiny-keccak", "twox-hash", @@ -8182,15 +9321,15 @@ dependencies = [ [[package]] name = "sp-database" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "kvdb", - "parking_lot 0.10.2", + "parking_lot 0.11.2", ] [[package]] name = "sp-debug-derive" -version = "2.0.0" +version = "3.0.0" dependencies = [ "proc-macro2", "quote", @@ -8199,7 +9338,7 @@ dependencies = [ [[package]] name = "sp-externalities" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "environmental", "parity-scale-codec", @@ -8209,11 +9348,12 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "finality-grandpa", - "log", + "log 0.4.14", "parity-scale-codec", + "scale-info", "serde", "sp-api", "sp-application-crypto", @@ -8225,28 +9365,32 @@ dependencies = [ [[package]] name = "sp-inherents" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "derive_more", + "async-trait", + "futures 0.3.17", + "impl-trait-for-tuples", "parity-scale-codec", - "parking_lot 0.10.2", "sp-core", + "sp-runtime", "sp-std", + "thiserror", ] [[package]] name = "sp-io" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "hash-db", - "libsecp256k1", - "log", + "libsecp256k1 0.6.0", + "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "sp-core", "sp-externalities", "sp-keystore", + "sp-maybe-compressed-blob", "sp-runtime-interface", "sp-state-machine", "sp-std", @@ -8259,55 +9403,55 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "lazy_static", "sp-core", "sp-runtime", - "strum", + "strum 0.20.0", ] [[package]] name = "sp-keystore" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.17", "merlin", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.2", "rand 0.7.3", "rand_chacha 0.2.2", "schnorrkel", + "serde", "sp-core", "sp-externalities", ] +[[package]] +name = "sp-maybe-compressed-blob" +version = "4.0.0-dev" +dependencies = [ + "zstd", +] + [[package]] name = "sp-npos-elections" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "parity-scale-codec", "rand 0.7.3", + "scale-info", "serde", "sp-arithmetic", - "sp-npos-elections-compact", + "sp-core", + "sp-npos-elections-solution-type", "sp-runtime", "sp-std", "substrate-test-utils", ] -[[package]] -name = "sp-npos-elections-compact" -version = "2.0.0" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" @@ -8315,33 +9459,48 @@ dependencies = [ "honggfuzz", "parity-scale-codec", "rand 0.7.3", + "scale-info", "sp-npos-elections", "sp-runtime", - "sp-std", + "structopt", +] + +[[package]] +name = "sp-npos-elections-solution-type" +version = "4.0.0-dev" +dependencies = [ + "parity-scale-codec", + "proc-macro-crate 1.1.0", + "proc-macro2", + "quote", + "scale-info", + "sp-arithmetic", + "sp-npos-elections", + "syn", + "trybuild", ] [[package]] name = "sp-offchain" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "sp-api", "sp-core", "sp-runtime", - "sp-state-machine", ] [[package]] name = "sp-panic-handler" -version = "2.0.0" +version = "3.0.0" dependencies = [ "backtrace", - "log", ] [[package]] name = "sp-rpc" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ + "rustc-hash", "serde", "serde_json", "sp-core", @@ -8349,31 +9508,35 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", - "log", + "log 0.4.14", "parity-scale-codec", "parity-util-mem", - "paste 0.1.18", + "paste 1.0.5", "rand 0.7.3", + "scale-info", "serde", "serde_json", + "sp-api", "sp-application-crypto", "sp-arithmetic", "sp-core", - "sp-inherents", "sp-io", "sp-state-machine", "sp-std", + "sp-tracing", + "substrate-test-runtime-client", ] [[package]] name = "sp-runtime-interface" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ + "impl-trait-for-tuples", "parity-scale-codec", "primitive-types", "rustversion", @@ -8393,10 +9556,10 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "Inflector", - "proc-macro-crate", + "proc-macro-crate 1.1.0", "proc-macro2", "quote", "syn", @@ -8407,7 +9570,7 @@ name = "sp-runtime-interface-test" version = "2.0.0" dependencies = [ "sc-executor", - "sp-core", + "sc-executor-common", "sp-io", "sp-runtime", "sp-runtime-interface", @@ -8426,7 +9589,7 @@ dependencies = [ "sp-io", "sp-runtime-interface", "sp-std", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -8437,14 +9600,15 @@ dependencies = [ "sp-io", "sp-runtime-interface", "sp-std", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] name = "sp-sandbox" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "assert_matches", + "log 0.4.14", "parity-scale-codec", "sp-core", "sp-io", @@ -8456,7 +9620,7 @@ dependencies = [ [[package]] name = "sp-serializer" -version = "2.0.0" +version = "3.0.0" dependencies = [ "serde", "serde_json", @@ -8464,9 +9628,10 @@ dependencies = [ [[package]] name = "sp-session" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "parity-scale-codec", + "scale-info", "sp-api", "sp-core", "sp-runtime", @@ -8476,43 +9641,46 @@ dependencies = [ [[package]] name = "sp-staking" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "parity-scale-codec", + "scale-info", "sp-runtime", "sp-std", ] [[package]] name = "sp-state-machine" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ "hash-db", "hex-literal", - "log", + "log 0.4.14", "num-traits", "parity-scale-codec", - "parking_lot 0.10.2", - "pretty_assertions", + "parking_lot 0.11.2", + "pretty_assertions 0.6.1", "rand 0.7.3", - "smallvec 1.4.1", + "smallvec 1.6.1", "sp-core", "sp-externalities", "sp-panic-handler", "sp-runtime", "sp-std", "sp-trie", + "thiserror", + "tracing", "trie-db", "trie-root", ] [[package]] name = "sp-std" -version = "2.0.0" +version = "4.0.0-dev" [[package]] name = "sp-storage" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8524,9 +9692,9 @@ dependencies = [ [[package]] name = "sp-tasks" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "log", + "log 0.4.14", "parity-scale-codec", "sp-core", "sp-externalities", @@ -8549,23 +9717,30 @@ dependencies = [ [[package]] name = "sp-timestamp" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "impl-trait-for-tuples", + "async-trait", + "futures-timer 3.0.2", + "log 0.4.14", "parity-scale-codec", "sp-api", "sp-inherents", "sp-runtime", "sp-std", - "wasm-timer", + "thiserror", ] [[package]] name = "sp-tracing" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "log", + "erased-serde", + "log 0.4.14", "parity-scale-codec", + "parking_lot 0.10.2", + "serde", + "serde_json", + "slog", "sp-std", "tracing", "tracing-core", @@ -8574,27 +9749,37 @@ dependencies = [ [[package]] name = "sp-transaction-pool" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "derive_more", - "futures 0.3.5", - "log", - "parity-scale-codec", - "serde", "sp-api", - "sp-blockchain", "sp-runtime", ] +[[package]] +name = "sp-transaction-storage-proof" +version = "4.0.0-dev" +dependencies = [ + "async-trait", + "log 0.4.14", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-std", + "sp-trie", +] + [[package]] name = "sp-trie" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "criterion", "hash-db", "hex-literal", "memory-db", "parity-scale-codec", + "scale-info", "sp-core", "sp-runtime", "sp-std", @@ -8604,31 +9789,35 @@ dependencies = [ "trie-standardmap", ] -[[package]] -name = "sp-utils" -version = "2.0.0" -dependencies = [ - "futures 0.3.5", - "futures-core", - "futures-timer 3.0.2", - "lazy_static", - "prometheus", -] - [[package]] name = "sp-version" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "impl-serde", "parity-scale-codec", + "parity-wasm 0.42.2", + "scale-info", "serde", "sp-runtime", "sp-std", + "sp-version-proc-macro", + "thiserror", +] + +[[package]] +name = "sp-version-proc-macro" +version = "4.0.0-dev" +dependencies = [ + "parity-scale-codec", + "proc-macro2", + "quote", + "sp-version", + "syn", ] [[package]] name = "sp-wasm-interface" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8656,51 +9845,34 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "statrs" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10102ac8d55e35db2b3fafc26f81ba8647da2e15879ab686a67e6d19af2685e8" -dependencies = [ - "rand 0.5.6", -] - -[[package]] -name = "stream-cipher" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8131256a5896cabcf5eb04f4d6dacbe1aefda854b0d9896e09cb58829ec5638c" -dependencies = [ - "generic-array 0.12.3", -] - -[[package]] -name = "stream-cipher" -version = "0.4.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f8ed9974042b8c3672ff3030a69fcc03b74c47c3d1ecb7755e8a3626011e88" +checksum = "05bdbb8e4e78216a85785a85d3ec3183144f98d0097b9281802c019bb07a6f05" dependencies = [ - "generic-array 0.14.3", + "approx", + "lazy_static", + "nalgebra", + "num-traits", + "rand 0.8.4", ] [[package]] -name = "string" -version = "0.2.1" +name = "strsim" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" -dependencies = [ - "bytes 0.4.12", -] +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "strsim" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "structopt" -version = "0.3.15" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2f5e239ee807089b62adce73e48c625e0ed80df02c7ab3f068f5db5281065c" +checksum = "bf9d950ef167e25e0bdb073cf1d68e9ad2795ac826f2f3f59647817cf23c0bfa" dependencies = [ "clap", "lazy_static", @@ -8709,12 +9881,12 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.8" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510413f9de616762a4fbeab62509bf15c729603b72d7cd71280fbca431b1c118" +checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba" dependencies = [ "heck", - "proc-macro-error", + "proc-macro-error 1.0.4", "proc-macro2", "quote", "syn", @@ -8722,18 +9894,36 @@ dependencies = [ [[package]] name = "strum" -version = "0.16.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6138f8f88a16d90134763314e3fc76fa3ed6a7db4725d6acf9a3ef95a3188d22" +checksum = "7318c509b5ba57f18533982607f24070a55d353e90d4cae30c467cdb2ad5ac5c" dependencies = [ - "strum_macros", + "strum_macros 0.20.1", ] +[[package]] +name = "strum" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2" + [[package]] name = "strum_macros" -version = "0.16.0" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8bc6b87a5112aeeab1f4a9f7ab634fe6cbefc4850006df31267f4cfb9e3149" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "strum_macros" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" +checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec" dependencies = [ "heck", "proc-macro2", @@ -8743,66 +9933,37 @@ dependencies = [ [[package]] name = "subkey" -version = "2.0.0" +version = "2.0.1" dependencies = [ - "frame-system", - "node-primitives", - "node-runtime", "sc-cli", - "sp-core", "structopt", - "substrate-frame-cli", ] [[package]] name = "substrate-bip39" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" +checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" dependencies = [ - "hmac", - "pbkdf2", + "hmac 0.11.0", + "pbkdf2 0.8.0", "schnorrkel", - "sha2 0.8.2", + "sha2 0.9.8", "zeroize", ] -[[package]] -name = "substrate-browser-utils" -version = "0.8.0" -dependencies = [ - "chrono", - "console_error_panic_hook", - "console_log", - "futures 0.1.29", - "futures 0.3.5", - "futures-timer 3.0.2", - "js-sys", - "kvdb-web", - "libp2p-wasm-ext", - "log", - "rand 0.6.5", - "rand 0.7.3", - "sc-chain-spec", - "sc-informant", - "sc-network", - "sc-service", - "sp-database", - "wasm-bindgen", - "wasm-bindgen-futures", -] - [[package]] name = "substrate-build-script-utils" -version = "2.0.0" +version = "3.0.0" dependencies = [ "platforms", ] [[package]] name = "substrate-frame-cli" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ + "frame-support", "frame-system", "sc-cli", "sp-core", @@ -8812,65 +9973,63 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-support" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "futures 0.3.5", + "futures 0.3.17", "jsonrpc-client-transports", - "jsonrpc-core", "parity-scale-codec", "sc-rpc-api", + "scale-info", "serde", "sp-storage", - "tokio 0.2.22", + "tokio", ] [[package]] name = "substrate-frame-rpc-system" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ "frame-system-rpc-runtime-api", - "futures 0.3.5", + "futures 0.3.17", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", - "log", + "log 0.4.14", "parity-scale-codec", "sc-client-api", "sc-rpc-api", "sc-transaction-pool", - "serde", + "sc-transaction-pool-api", "sp-api", "sp-block-builder", "sp-blockchain", "sp-core", "sp-runtime", "sp-tracing", - "sp-transaction-pool", "substrate-test-runtime-client", ] [[package]] name = "substrate-prometheus-endpoint" -version = "0.8.0" +version = "0.9.0" dependencies = [ "async-std", "derive_more", "futures-util", - "hyper 0.13.7", - "log", + "hyper 0.14.13", + "log 0.4.14", "prometheus", - "tokio 0.2.22", + "tokio", ] [[package]] name = "substrate-test-client" -version = "2.0.0" +version = "2.0.1" dependencies = [ - "futures 0.1.29", - "futures 0.3.5", - "hash-db", + "async-trait", + "futures 0.3.17", "hex", "parity-scale-codec", "sc-client-api", @@ -8878,6 +10037,7 @@ dependencies = [ "sc-consensus", "sc-executor", "sc-light", + "sc-offchain", "sc-service", "serde", "serde_json", @@ -8894,12 +10054,12 @@ dependencies = [ name = "substrate-test-runtime" version = "2.0.0" dependencies = [ - "cfg-if", - "frame-executive", + "cfg-if 1.0.0", "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "log", + "futures 0.3.17", + "log 0.4.14", "memory-db", "pallet-babe", "pallet-timestamp", @@ -8908,10 +10068,12 @@ dependencies = [ "sc-block-builder", "sc-executor", "sc-service", + "scale-info", "serde", "sp-api", "sp-application-crypto", "sp-block-builder", + "sp-consensus", "sp-consensus-aura", "sp-consensus-babe", "sp-core", @@ -8930,7 +10092,7 @@ dependencies = [ "sp-trie", "sp-version", "substrate-test-runtime-client", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", "trie-db", ] @@ -8938,13 +10100,12 @@ dependencies = [ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "parity-scale-codec", "sc-block-builder", "sc-client-api", "sc-consensus", "sc-light", - "sc-service", "sp-api", "sp-blockchain", "sp-consensus", @@ -8959,32 +10120,33 @@ name = "substrate-test-runtime-transaction-pool" version = "2.0.0" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.17", "parity-scale-codec", - "parking_lot 0.10.2", - "sc-transaction-graph", + "parking_lot 0.11.2", + "sc-transaction-pool", + "sc-transaction-pool-api", "sp-blockchain", "sp-runtime", - "sp-transaction-pool", "substrate-test-runtime-client", ] [[package]] name = "substrate-test-utils" -version = "2.0.0" +version = "4.0.0-dev" dependencies = [ - "futures 0.3.5", + "futures 0.3.17", "sc-service", "substrate-test-utils-derive", - "tokio 0.2.22", + "tokio", "trybuild", ] [[package]] name = "substrate-test-utils-derive" -version = "0.8.0" +version = "0.10.0-dev" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.0", + "proc-macro2", "quote", "syn", ] @@ -8995,29 +10157,23 @@ version = "0.1.0" dependencies = [ "sc-service", "substrate-test-utils", - "tokio 0.2.22", + "tokio", ] [[package]] name = "substrate-wasm-builder" -version = "2.0.1" +version = "5.0.0-dev" dependencies = [ "ansi_term 0.12.1", - "atty", "build-helper", "cargo_metadata", - "fs2", - "itertools 0.8.2", + "sp-maybe-compressed-blob", "tempfile", "toml", "walkdir", "wasm-gc-api", ] -[[package]] -name = "substrate-wasm-builder-runner" -version = "2.0.0" - [[package]] name = "subtle" version = "1.0.0" @@ -9026,15 +10182,15 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "subtle" -version = "2.2.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.35" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb7f4c519df8c117855e19dd8cc851e89eb746fe7a73f0157e0d95fdec5369b0" +checksum = "c6f107db402c2c2055242dbf4d2af0e69197202e9faacbef9571bbe47f5a1b84" dependencies = [ "proc-macro2", "quote", @@ -9043,9 +10199,9 @@ dependencies = [ [[package]] name = "syn-mid" -version = "0.5.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" +checksum = "baa8e7560a164edb1621a55d18a0c59abf49d360f47aa7b821061dd7eea7fac9" dependencies = [ "proc-macro2", "quote", @@ -9054,9 +10210,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" dependencies = [ "proc-macro2", "quote", @@ -9065,51 +10221,110 @@ dependencies = [ ] [[package]] -name = "take_mut" -version = "0.2.2" +name = "tap" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.10.0" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "422045212ea98508ae3d28025bc5aaa2bd4a9cdaecd442a08da2ee620ee9ea95" + +[[package]] +name = "target-lexicon" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab0e7238dcc7b40a7be719a25365910f6807bd864f4cce6b2e6b873658e2b19d" +checksum = "d9bffcddbc2458fa3e6058414599e3c838a022abae82e5c67b4f7f80298d5bff" [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.4", + "redox_syscall 0.2.10", "remove_dir_all", "winapi 0.3.9", ] [[package]] name = "termcolor" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" dependencies = [ "winapi-util", ] [[package]] -name = "test-case" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a605baa797821796a751f4a959e1206079b24a4b7e1ed302b7d785d81a9276c9" +name = "test-runner" +version = "0.9.0" +dependencies = [ + "frame-system", + "futures 0.3.17", + "jsonrpc-core", + "log 0.4.14", + "num-traits", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-babe", + "sc-consensus-manual-seal", + "sc-executor", + "sc-finality-grandpa", + "sc-informant", + "sc-network", + "sc-rpc", + "sc-rpc-server", + "sc-service", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-babe", + "sp-core", + "sp-externalities", + "sp-finality-grandpa", + "sp-inherents", + "sp-keyring", + "sp-offchain", + "sp-runtime", + "sp-runtime-interface", + "sp-session", + "sp-state-machine", + "sp-transaction-pool", + "sp-wasm-interface", + "tokio", +] + +[[package]] +name = "test-runner-example" +version = "0.1.0" dependencies = [ - "lazy_static", - "proc-macro2", - "quote", - "syn", - "version_check", + "frame-benchmarking", + "frame-system", + "node-cli", + "node-primitives", + "node-runtime", + "pallet-transaction-payment", + "sc-consensus", + "sc-consensus-babe", + "sc-consensus-manual-seal", + "sc-executor", + "sc-finality-grandpa", + "sc-service", + "sp-consensus-babe", + "sp-keyring", + "sp-runtime", + "test-runner", ] [[package]] @@ -9123,18 +10338,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.20" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" +checksum = "602eca064b2d83369e2b2f34b09c70b605402801927c65c11071ac911d299b88" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.20" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" +checksum = "bad553cc2c78e8de258400763a647e80e6d1b31ee237275d756f6836d204494c" dependencies = [ "proc-macro2", "quote", @@ -9143,11 +10358,11 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -9161,28 +10376,31 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi 0.3.9", ] [[package]] name = "tiny-bip39" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" dependencies = [ - "failure", - "hmac", - "once_cell 1.4.1", - "pbkdf2", + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.8.2", + "sha2 0.9.8", + "thiserror", "unicode-normalization", + "zeroize", ] [[package]] @@ -9196,9 +10414,9 @@ dependencies = [ [[package]] name = "tinytemplate" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", @@ -9206,68 +10424,39 @@ dependencies = [ [[package]] name = "tinyvec" -version = "0.3.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" +checksum = "5241dd6f21443a3606b432718b166d3cedc962fd4b8bea54a8bc7f514ebda986" +dependencies = [ + "tinyvec_macros", +] [[package]] -name = "tokio" -version = "0.1.22" +name = "tinyvec_macros" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.29", - "mio", - "num_cpus", - "tokio-codec", - "tokio-current-thread", - "tokio-executor 0.1.10", - "tokio-fs", - "tokio-io", - "tokio-reactor", - "tokio-sync 0.1.8", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "tokio-udp", - "tokio-uds", -] +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "0.2.22" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd" +checksum = "b4efe6fc2395938c8155973d7be49fe8d03a843726e285e100a8a383cc0154ce" dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", + "autocfg 1.0.1", + "bytes 1.1.0", "libc", "memchr", - "mio", - "mio-uds", + "mio 0.7.13", "num_cpus", - "pin-project-lite", + "once_cell", + "parking_lot 0.11.2", + "pin-project-lite 0.2.7", "signal-hook-registry", - "slab", "tokio-macros", "winapi 0.3.9", ] -[[package]] -name = "tokio-buf" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -dependencies = [ - "bytes 0.4.12", - "either", - "futures 0.1.29", -] - [[package]] name = "tokio-codec" version = "0.1.2" @@ -9275,50 +10464,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.31", "tokio-io", ] -[[package]] -name = "tokio-current-thread" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" -dependencies = [ - "futures 0.1.29", - "tokio-executor 0.1.10", -] - [[package]] name = "tokio-executor" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", -] - -[[package]] -name = "tokio-executor" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ee9ceecf69145923834ea73f32ba40c790fd877b74a7817dd0b089f1eb9c7c8" -dependencies = [ - "futures-util-preview", - "lazy_static", - "tokio-sync 0.2.0-alpha.6", -] - -[[package]] -name = "tokio-fs" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" -dependencies = [ - "futures 0.1.29", - "tokio-io", - "tokio-threadpool", + "crossbeam-utils 0.7.2", + "futures 0.1.31", ] [[package]] @@ -9328,15 +10485,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", - "log", + "futures 0.1.31", + "log 0.4.14", ] [[package]] name = "tokio-macros" -version = "0.2.5" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" dependencies = [ "proc-macro2", "quote", @@ -9344,16 +10501,13 @@ dependencies = [ ] [[package]] -name = "tokio-named-pipes" -version = "0.1.0" +name = "tokio-native-tls" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d282d483052288b2308ba5ee795f5673b159c9bdf63c385a05609da782a5eae" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" dependencies = [ - "bytes 0.4.12", - "futures 0.1.29", - "mio", - "mio-named-pipes", - "tokio 0.1.22", + "native-tls", + "tokio", ] [[package]] @@ -9362,38 +10516,39 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.31", "lazy_static", - "log", - "mio", + "log 0.4.14", + "mio 0.6.23", "num_cpus", "parking_lot 0.9.0", "slab", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-io", - "tokio-sync 0.1.8", + "tokio-sync", ] [[package]] name = "tokio-rustls" -version = "0.14.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "futures-core", "rustls", - "tokio 0.2.22", + "tokio", "webpki", ] [[package]] -name = "tokio-service" -version = "0.1.0" +name = "tokio-stream" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" +checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" dependencies = [ - "futures 0.1.29", + "futures-core", + "pin-project-lite 0.2.7", + "tokio", ] [[package]] @@ -9403,18 +10558,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" dependencies = [ "fnv", - "futures 0.1.29", -] - -[[package]] -name = "tokio-sync" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1aaeb685540f7407ea0e27f1c9757d258c7c6bf4e3eb19da6fc59b747239d2" -dependencies = [ - "fnv", - "futures-core-preview", - "futures-util-preview", + "futures 0.1.31", ] [[package]] @@ -9424,122 +10568,72 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.31", "iovec", - "mio", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-threadpool" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" -dependencies = [ - "crossbeam-deque", - "crossbeam-queue", - "crossbeam-utils", - "futures 0.1.29", - "lazy_static", - "log", - "num_cpus", - "slab", - "tokio-executor 0.1.10", -] - -[[package]] -name = "tokio-timer" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" -dependencies = [ - "crossbeam-utils", - "futures 0.1.29", - "slab", - "tokio-executor 0.1.10", -] - -[[package]] -name = "tokio-udp" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.29", - "log", - "mio", - "tokio-codec", + "mio 0.6.23", "tokio-io", "tokio-reactor", ] [[package]] -name = "tokio-uds" -version = "0.2.7" +name = "tokio-tls" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" +checksum = "354b8cd83825b3c20217a9dc174d6a0c67441a2fae5c41bcb1ea6679f6ae0f7c" dependencies = [ - "bytes 0.4.12", - "futures 0.1.29", - "iovec", - "libc", - "log", - "mio", - "mio-uds", - "tokio-codec", + "futures 0.1.31", + "native-tls", "tokio-io", - "tokio-reactor", ] [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "08d3725d3efa29485e87311c5b699de63cde14b00ed4d256b8318aa30ca452cd" dependencies = [ - "bytes 0.5.6", + "bytes 1.1.0", "futures-core", + "futures-io", "futures-sink", - "log", - "pin-project-lite", - "tokio 0.2.22", + "log 0.4.14", + "pin-project-lite 0.2.7", + "tokio", ] [[package]] name = "toml" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ "serde", ] [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.21" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" +checksum = "c2ba9ab62b7d6497a8638dfda5e5c4fb3b2d5a7fca4118f2b96151c8ef1a437e" dependencies = [ - "cfg-if", - "log", - "pin-project-lite", + "cfg-if 1.0.0", + "log 0.4.14", + "pin-project-lite 0.2.7", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.11" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" +checksum = "98863d0dd09fa59a1b79c6750ad80dbda6b75f4e71c437a6a1a8cb91a8bcbd77" dependencies = [ "proc-macro2", "quote", @@ -9548,31 +10642,33 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "46125608c26121c81b0c6d693eab5a420e416da7e43c426d2e8f7df8da8a3acf" dependencies = [ "lazy_static", ] [[package]] name = "tracing-futures" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project", + "futures 0.3.17", + "futures-task", + "pin-project 1.0.8", "tracing", ] [[package]] name = "tracing-log" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" dependencies = [ "lazy_static", - "log", + "log 0.4.14", "tracing-core", ] @@ -9588,9 +10684,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.13" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef0a5e15477aa303afbfac3a44cba9b6430fdaad52423b1e6c0dbbe28c3eedd" +checksum = "56c42e73a9d277d4d2b6a88389a137ccf3c58599660b17e8f5fc39305e490669" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -9600,7 +10696,7 @@ dependencies = [ "serde", "serde_json", "sharded-slab", - "smallvec 1.4.1", + "smallvec 1.6.1", "thread_local", "tracing", "tracing-core", @@ -9608,6 +10704,12 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "traitobject" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" + [[package]] name = "treeline" version = "0.1.0" @@ -9616,9 +10718,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.25.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af2cc37cac8cc158119982c920cbb9b8243d8540c1d13b8aca84484bfc83a426" +checksum = "4edd9bdf0c2e08fd77c0fb2608179cac7ebed997ae18f58d47a2d96425ff51f0" dependencies = [ "criterion", "hash-db", @@ -9632,15 +10734,15 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.22.0" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f1a9a9252d38c5337cf0c5392988821a5cf1b2103245016968f2ab41de9e38" +checksum = "9eac131e334e81b6b3be07399482042838adcd7957aa0010231d0813e39e02fa" dependencies = [ "hash-db", - "hashbrown 0.8.1", - "log", + "hashbrown", + "log 0.4.14", "rustc-hex", - "smallvec 1.4.1", + "smallvec 1.6.1", ] [[package]] @@ -9662,17 +10764,81 @@ dependencies = [ "keccak-hasher", ] +[[package]] +name = "trust-dns-proto" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" +dependencies = [ + "async-trait", + "cfg-if 1.0.0", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.2.3", + "ipnet", + "lazy_static", + "log 0.4.14", + "rand 0.8.4", + "smallvec 1.6.1", + "thiserror", + "tinyvec", + "tokio", + "url 2.2.2", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" +dependencies = [ + "cfg-if 1.0.0", + "futures-util", + "ipconfig", + "lazy_static", + "log 0.4.14", + "lru-cache", + "parking_lot 0.11.2", + "resolv-conf", + "smallvec 1.6.1", + "thiserror", + "tokio", + "trust-dns-proto", +] + [[package]] name = "try-lock" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "try-runtime-cli" +version = "0.10.0-dev" +dependencies = [ + "log 0.4.14", + "parity-scale-codec", + "remote-externalities", + "sc-chain-spec", + "sc-cli", + "sc-executor", + "sc-service", + "serde", + "sp-core", + "sp-keystore", + "sp-runtime", + "sp-state-machine", + "structopt", +] + [[package]] name = "trybuild" -version = "1.0.35" +version = "1.0.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7d30fe369fd650072b352b1a9cb9587669de6b89be3b8225544012c1c45292d" +checksum = "5bdaf2a1d317f3d58b44b31c7f6436b9b9acafe7bddfeace50897c2b804d7792" dependencies = [ "dissimilar", "glob", @@ -9685,84 +10851,104 @@ dependencies = [ [[package]] name = "twox-hash" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" +checksum = "1f559b464de2e2bdabcac6a210d12e9b5a5973c251e102c44c585c71d51bd78e" dependencies = [ - "rand 0.7.3", + "cfg-if 1.0.0", + "rand 0.8.4", + "static_assertions", ] +[[package]] +name = "typeable" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" + [[package]] name = "typenum" -version = "1.12.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" + +[[package]] +name = "ucd-trie" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" [[package]] name = "uint" -version = "0.8.3" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "173cd16430c206dc1a430af8a89a0e9c076cf15cb42b4aedb10e8cc8fee73681" +checksum = "6470ab50f482bde894a037a57064480a246dbfdd5960bd65a44824693f08da5f" dependencies = [ - "byteorder 1.3.4", + "byteorder", "crunchy", - "rustc-hex", + "hex", "static_assertions", ] +[[package]] +name = "unicase" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" +dependencies = [ + "version_check 0.1.5", +] + [[package]] name = "unicase" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ - "version_check", + "version_check 0.9.3", ] [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -dependencies = [ - "matches", -] +checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" [[package]] name = "unicode-normalization" -version = "0.1.13" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-width" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "universal-hash" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ - "generic-array 0.14.3", - "subtle 2.2.3", + "generic-array 0.14.4", + "subtle 2.4.1", ] [[package]] @@ -9773,22 +10959,30 @@ checksum = "f67332660eb59a6f1eb24ff1220c9e8d01738a8503c6002e30bcfe4bd9f2b4a9" [[package]] name = "unsigned-varint" -version = "0.4.0" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" + +[[package]] +name = "unsigned-varint" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" +checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2" dependencies = [ - "bytes 0.5.6", + "asynchronous-codec 0.5.0", + "bytes 1.1.0", "futures-io", "futures-util", - "futures_codec", ] [[package]] name = "unsigned-varint" -version = "0.5.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" +checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" dependencies = [ + "asynchronous-codec 0.6.0", + "bytes 1.1.0", "futures-io", "futures-util", ] @@ -9812,26 +11006,31 @@ dependencies = [ [[package]] name = "url" -version = "2.1.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ - "idna 0.2.0", + "form_urlencoded", + "idna 0.2.3", "matches", "percent-encoding 2.1.0", ] [[package]] -name = "vcpkg" -version = "0.2.10" +name = "value-bag" +version = "1.0.0-alpha.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "dd320e1520f94261153e96f7534476ad869c14022aee1e59af7c778075d840ae" +dependencies = [ + "ctor", + "version_check 0.9.3", +] [[package]] -name = "vec-arena" -version = "1.0.0" +name = "vcpkg" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eafc1b9b2dfc6f5529177b62cf806484db55b32dc7c9658a118e11bbeb33061d" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vec_map" @@ -9841,9 +11040,15 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.2" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" + +[[package]] +name = "version_check" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "void" @@ -9862,39 +11067,28 @@ dependencies = [ [[package]] name = "waker-fn" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9571542c2ce85ce642e6b58b3364da2fb53526360dfb7c211add4f5c23105ff7" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ "same-file", "winapi 0.3.9", "winapi-util", ] -[[package]] -name = "want" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" -dependencies = [ - "futures 0.1.29", - "log", - "try-lock", -] - [[package]] name = "want" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "log", + "log 0.4.14", "try-lock", ] @@ -9904,27 +11098,31 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" -version = "0.2.67" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0563a9a4b071746dd5aedbc3a28c6fe9be4586fb3fbadb67c400d4f53c6b16c" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ - "cfg-if", - "serde", - "serde_json", + "cfg-if 1.0.0", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.67" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc71e4c5efa60fb9e74160e89b93353bc24059999c0ae0fb03affc39770310b0" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", - "log", + "log 0.4.14", "proc-macro2", "quote", "syn", @@ -9933,11 +11131,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.12" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a369c5e1dfb7569e14d62af4da642a3cbc2f9a3652fe586e26ac22222aa4b04" +checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -9945,9 +11143,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.67" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97c57cefa5fa80e2ba15641578b44d36e7a64279bc5ed43c6dbaf329457a2ed2" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9955,9 +11153,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.67" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841a6d1c35c6f596ccea1f82504a192a60378f64b3bb0261904ad8f2f5657556" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ "proc-macro2", "quote", @@ -9968,190 +11166,394 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.67" +version = "0.2.78" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" + +[[package]] +name = "wasm-gc-api" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93b162580e34310e5931c4b792560108b10fd14d64915d7fff8ff00180e70092" +checksum = "d0c32691b6c7e6c14e7f8fd55361a9088b507aa49620fcd06c09b3a1082186b9" +dependencies = [ + "log 0.4.14", + "parity-wasm 0.32.0", + "rustc-demangle", +] [[package]] -name = "wasm-bindgen-test" -version = "0.3.12" +name = "wasm-timer" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd8e9dad8040e378f0696b017570c6bc929aac373180e06b3d67ac5059c52da3" +checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "console_error_panic_hook", + "futures 0.3.17", "js-sys", - "scoped-tls", + "parking_lot 0.11.2", + "pin-utils", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-bindgen-test-macro", + "web-sys", +] + +[[package]] +name = "wasmer" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a70cfae554988d904d64ca17ab0e7cd652ee5c8a0807094819c1ea93eb9d6866" +dependencies = [ + "cfg-if 0.1.10", + "indexmap", + "more-asserts", + "target-lexicon 0.11.2", + "thiserror", + "wasmer-compiler", + "wasmer-compiler-cranelift", + "wasmer-derive", + "wasmer-engine", + "wasmer-engine-jit", + "wasmer-engine-native", + "wasmer-types", + "wasmer-vm", + "wat", + "winapi 0.3.9", +] + +[[package]] +name = "wasmer-compiler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7732a9cab472bd921d5a0c422f45b3d03f62fa2c40a89e0770cef6d47e383e" +dependencies = [ + "enumset", + "serde", + "serde_bytes", + "smallvec 1.6.1", + "target-lexicon 0.11.2", + "thiserror", + "wasmer-types", + "wasmer-vm", + "wasmparser 0.65.0", +] + +[[package]] +name = "wasmer-compiler-cranelift" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48cb9395f094e1d81534f4c5e330ed4cdb424e8df870d29ad585620284f5fddb" +dependencies = [ + "cranelift-codegen 0.68.0", + "cranelift-frontend 0.68.0", + "gimli 0.22.0", + "more-asserts", + "rayon", + "serde", + "smallvec 1.6.1", + "tracing", + "wasmer-compiler", + "wasmer-types", + "wasmer-vm", +] + +[[package]] +name = "wasmer-compiler-singlepass" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "426ae6ef0f606ca815510f3e2ef6f520e217514bfb7a664defe180b9a9e75d07" +dependencies = [ + "byteorder", + "dynasm", + "dynasmrt", + "lazy_static", + "more-asserts", + "rayon", + "serde", + "smallvec 1.6.1", + "wasmer-compiler", + "wasmer-types", + "wasmer-vm", ] [[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.12" +name = "wasmer-derive" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c358c8d2507c1bae25efa069e62ea907aa28700b25c8c33dafb0b15ba4603627" +checksum = "d8b86dcd2c3efdb8390728a2b56f762db07789aaa5aa872a9dc776ba3a7912ed" dependencies = [ + "proc-macro-error 1.0.4", "proc-macro2", "quote", + "syn", ] [[package]] -name = "wasm-gc-api" -version = "0.1.11" +name = "wasmer-engine" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c32691b6c7e6c14e7f8fd55361a9088b507aa49620fcd06c09b3a1082186b9" +checksum = "efe4667d6bd888f26ae8062a63a9379fa697415b4b4e380f33832e8418fd71b5" dependencies = [ - "log", - "parity-wasm 0.32.0", + "backtrace", + "bincode", + "lazy_static", + "memmap2", + "more-asserts", "rustc-demangle", + "serde", + "serde_bytes", + "target-lexicon 0.11.2", + "thiserror", + "wasmer-compiler", + "wasmer-types", + "wasmer-vm", ] [[package]] -name = "wasm-timer" -version = "0.2.4" +name = "wasmer-engine-jit" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" +checksum = "26770be802888011b4a3072f2a282fc2faa68aa48c71b3db6252a3937a85f3da" dependencies = [ - "futures 0.3.5", - "js-sys", - "parking_lot 0.9.0", - "pin-utils", - "send_wrapper 0.2.0", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", + "bincode", + "cfg-if 0.1.10", + "region", + "serde", + "serde_bytes", + "wasmer-compiler", + "wasmer-engine", + "wasmer-types", + "wasmer-vm", + "winapi 0.3.9", +] + +[[package]] +name = "wasmer-engine-native" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bb4083a6c69f2cd4b000b82a80717f37c6cc2e536aee3a8ffe9af3edc276a8b" +dependencies = [ + "bincode", + "cfg-if 0.1.10", + "leb128", + "libloading 0.6.7", + "serde", + "tempfile", + "tracing", + "wasmer-compiler", + "wasmer-engine", + "wasmer-object", + "wasmer-types", + "wasmer-vm", + "which", +] + +[[package]] +name = "wasmer-object" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf8e0c12b82ff81ebecd30d7e118be5fec871d6de885a90eeb105df0a769a7b" +dependencies = [ + "object 0.22.0", + "thiserror", + "wasmer-compiler", + "wasmer-types", +] + +[[package]] +name = "wasmer-types" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f4ac28c2951cd792c18332f03da523ed06b170f5cf6bb5b1bdd7e36c2a8218" +dependencies = [ + "cranelift-entity 0.68.0", + "serde", + "thiserror", +] + +[[package]] +name = "wasmer-vm" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7635ba0b6d2fd325f588d69a950ad9fa04dddbf6ad08b6b2a183146319bf6ae" +dependencies = [ + "backtrace", + "cc", + "cfg-if 0.1.10", + "indexmap", + "libc", + "memoffset", + "more-asserts", + "region", + "serde", + "thiserror", + "wasmer-types", + "winapi 0.3.9", ] [[package]] name = "wasmi" -version = "0.6.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" +checksum = "d2ee05bba3d1d994652079893941a2ef9324d2b58a63c31b40678fb7eddd7a5a" dependencies = [ + "downcast-rs", "errno", "libc", + "libm", "memory_units", - "num-rational", + "num-rational 0.2.4", "num-traits", - "parity-wasm 0.41.0", + "parity-wasm 0.42.2", "wasmi-validation", ] [[package]] name = "wasmi-validation" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" +checksum = "a2eb8e860796d8be48efef530b60eebf84e74a88bce107374fffb0da97d504b8" dependencies = [ - "parity-wasm 0.41.0", + "parity-wasm 0.42.2", ] [[package]] name = "wasmparser" -version = "0.57.0" +version = "0.65.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32fddd575d477c6e9702484139cf9f23dcd554b06d185ed0f56c857dd3a47aa6" +checksum = "87cc2fe6350834b4e528ba0901e7aa405d78b89dc1fa3145359eb4de0e323fcf" [[package]] name = "wasmparser" -version = "0.59.0" +version = "0.78.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a950e6a618f62147fd514ff445b2a0b53120d382751960797f85f058c7eda9b9" +checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" [[package]] name = "wasmtime" -version = "0.19.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd3c4f449382779ef6e0a7c3ec6752ae614e20a42e4100000c3efdc973100e2" +checksum = "b310b9d20fcf59385761d1ade7a3ef06aecc380e3d3172035b919eaf7465d9f7" dependencies = [ "anyhow", "backtrace", - "cfg-if", + "bincode", + "cfg-if 1.0.0", + "cpp_demangle", + "indexmap", "lazy_static", "libc", - "log", + "log 0.4.14", + "paste 1.0.5", + "psm", "region", "rustc-demangle", - "smallvec 1.4.1", - "target-lexicon", - "wasmparser 0.59.0", + "serde", + "smallvec 1.6.1", + "target-lexicon 0.12.2", + "wasmparser 0.78.2", + "wasmtime-cache", "wasmtime-environ", "wasmtime-jit", "wasmtime-profiling", "wasmtime-runtime", - "wat", "winapi 0.3.9", ] +[[package]] +name = "wasmtime-cache" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d14d500d5c3dc5f5c097158feee123d64b3097f0d836a2a27dff9c761c73c843" +dependencies = [ + "anyhow", + "base64 0.13.0", + "bincode", + "directories-next", + "errno", + "file-per-thread-logger", + "libc", + "log 0.4.14", + "serde", + "sha2 0.9.8", + "toml", + "winapi 0.3.9", + "zstd", +] + +[[package]] +name = "wasmtime-cranelift" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c525b39f062eada7db3c1298287b96dcb6e472b9f6b22501300b28d9fa7582f6" +dependencies = [ + "cranelift-codegen 0.74.0", + "cranelift-entity 0.74.0", + "cranelift-frontend 0.74.0", + "cranelift-wasm", + "target-lexicon 0.12.2", + "wasmparser 0.78.2", + "wasmtime-environ", +] + [[package]] name = "wasmtime-debug" -version = "0.19.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e634af9067a3af6cf2c7d33dc3b84767ddaf5d010ba68e80eecbcea73d4a349" +checksum = "c5d2a763e7a6fc734218e0e463196762a4f409c483063d81e0e85f96343b2e0a" dependencies = [ "anyhow", - "gimli 0.21.0", + "gimli 0.24.0", "more-asserts", - "object 0.20.0", - "target-lexicon", + "object 0.24.0", + "target-lexicon 0.12.2", "thiserror", - "wasmparser 0.59.0", + "wasmparser 0.78.2", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.19.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f85619a94ee4034bd5bb87fc3dcf71fd2237b81c840809da1201061eec9ab3" +checksum = "f64d0c2d881c31b0d65c1f2695e022d71eb60b9fbdd336aacca28208b58eac90" dependencies = [ - "anyhow", - "base64 0.12.3", - "bincode", - "cfg-if", - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", + "cfg-if 1.0.0", + "cranelift-codegen 0.74.0", + "cranelift-entity 0.74.0", "cranelift-wasm", - "directories", - "errno", - "file-per-thread-logger", + "gimli 0.24.0", "indexmap", - "libc", - "log", + "log 0.4.14", "more-asserts", - "rayon", "serde", - "sha2 0.8.2", "thiserror", - "toml", - "wasmparser 0.59.0", - "winapi 0.3.9", - "zstd", + "wasmparser 0.78.2", ] [[package]] name = "wasmtime-jit" -version = "0.19.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e914c013c7a9f15f4e429d5431f2830fb8adb56e40567661b69c5ec1d645be23" +checksum = "4d4539ea734422b7c868107e2187d7746d8affbcaa71916d72639f53757ad707" dependencies = [ + "addr2line 0.15.2", "anyhow", - "cfg-if", - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", + "cfg-if 1.0.0", + "cranelift-codegen 0.74.0", + "cranelift-entity 0.74.0", + "cranelift-frontend 0.74.0", "cranelift-native", "cranelift-wasm", - "gimli 0.21.0", - "log", + "gimli 0.24.0", + "log 0.4.14", "more-asserts", - "object 0.20.0", + "object 0.24.0", + "rayon", "region", - "target-lexicon", + "serde", + "target-lexicon 0.12.2", "thiserror", - "wasmparser 0.59.0", + "wasmparser 0.78.2", + "wasmtime-cranelift", "wasmtime-debug", "wasmtime-environ", "wasmtime-obj", @@ -10162,52 +11564,52 @@ dependencies = [ [[package]] name = "wasmtime-obj" -version = "0.19.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e81d8e02e9bc9fe2da9b6d48bbc217f96e089f7df613f11a28a3958abc44641e" +checksum = "8e1a8ff85246d091828e2225af521a6208ed28c997bb5c39eb697366dc2e2f2b" dependencies = [ "anyhow", "more-asserts", - "object 0.20.0", - "target-lexicon", + "object 0.24.0", + "target-lexicon 0.12.2", "wasmtime-debug", "wasmtime-environ", ] [[package]] name = "wasmtime-profiling" -version = "0.19.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8d4d1af8dd5f7096cfcc89dd668d358e52980c38cce199643372ffd6590e27" +checksum = "e24364d522dcd67c897c8fffc42e5bdfc57207bbb6d7eeade0da9d4a7d70105b" dependencies = [ "anyhow", - "cfg-if", - "gimli 0.21.0", + "cfg-if 1.0.0", "lazy_static", "libc", - "object 0.19.0", - "scroll", "serde", - "target-lexicon", + "target-lexicon 0.12.2", "wasmtime-environ", "wasmtime-runtime", ] [[package]] name = "wasmtime-runtime" -version = "0.19.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a25f140bbbaadb07c531cba99ce1a966dba216138dc1b2a0ddecec851a01a93" +checksum = "c51e57976e8a19a18a18e002c6eb12e5769554204238e47ff155fda1809ef0f7" dependencies = [ + "anyhow", "backtrace", "cc", - "cfg-if", + "cfg-if 1.0.0", "indexmap", "lazy_static", "libc", - "log", + "log 0.4.14", + "mach", "memoffset", "more-asserts", + "rand 0.8.4", "region", "thiserror", "wasmtime-environ", @@ -10216,27 +11618,27 @@ dependencies = [ [[package]] name = "wast" -version = "21.0.0" +version = "38.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1844f66a2bc8526d71690104c0e78a8e59ffa1597b7245769d174ebb91deb5" +checksum = "0ebc29df4629f497e0893aacd40f13a4a56b85ef6eb4ab6d603f07244f1a7bf2" dependencies = [ "leb128", ] [[package]] name = "wat" -version = "1.0.22" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce85d72b74242c340e9e3492cfb602652d7bb324c3172dd441b5577e39a2e18c" +checksum = "adcfaeb27e2578d2c6271a45609f4a055e6d7ba3a12eff35b1fd5ba147bdf046" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.39" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" +checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" dependencies = [ "js-sys", "wasm-bindgen", @@ -10244,9 +11646,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.3" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab146130f5f790d45f82aeeb09e55a256573373ec64409fc19a6fb82fb1032ae" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" dependencies = [ "ring", "untrusted", @@ -10254,40 +11656,80 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.18.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" +checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" dependencies = [ "webpki", ] [[package]] -name = "webpki-roots" -version = "0.19.0" +name = "websocket" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8eff4b7516a57307f9349c64bf34caa34b940b66fed4b2fb3136cb7386e5739" +checksum = "413b37840b9e27b340ce91b319ede10731de8c72f5bc4cb0206ec1ca4ce581d0" dependencies = [ - "webpki", + "bytes 0.4.12", + "futures 0.1.31", + "hyper 0.10.16", + "native-tls", + "rand 0.6.5", + "tokio-codec", + "tokio-io", + "tokio-reactor", + "tokio-tcp", + "tokio-tls", + "unicase 1.4.2", + "url 1.7.2", + "websocket-base", ] [[package]] -name = "wepoll-sys-stjepang" -version = "1.0.6" +name = "websocket-base" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e3810f0d00c4dccb54c30a4eee815e703232819dec7b007db115791c42aa374" +dependencies = [ + "base64 0.10.1", + "bitflags", + "byteorder", + "bytes 0.4.12", + "futures 0.1.31", + "native-tls", + "rand 0.6.5", + "sha1", + "tokio-codec", + "tokio-io", + "tokio-tcp", + "tokio-tls", +] + +[[package]] +name = "wepoll-ffi" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd319e971980166b53e17b1026812ad66c6b54063be879eb182342b55284694" +checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" dependencies = [ "cc", ] [[package]] name = "which" -version = "3.1.1" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +checksum = "ea187a8ef279bc014ec368c27a920da2024d2a711109bfbe3440585d5cf27ad9" dependencies = [ + "either", + "lazy_static", "libc", ] +[[package]] +name = "widestring" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" + [[package]] name = "winapi" version = "0.2.8" @@ -10331,6 +11773,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -10341,45 +11792,51 @@ dependencies = [ "winapi-build", ] +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "x25519-dalek" -version = "0.6.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637ff90c9540fa3073bb577e65033069e4bae7c79d49d74aa3ffdf5342a53217" +checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.2.0", "rand_core 0.5.1", "zeroize", ] [[package]] name = "yamux" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aeb8c4043cac71c3c299dff107171c220d179492350ea198e109a414981b83c" +checksum = "e7d9028f208dd5e63c614be69f115c1b53cacc1111437d4c765185856666c107" dependencies = [ - "futures 0.3.5", - "log", + "futures 0.3.17", + "log 0.4.14", "nohash-hasher", - "parking_lot 0.11.0", - "rand 0.7.3", + "parking_lot 0.11.2", + "rand 0.8.4", "static_assertions", ] [[package]] name = "zeroize" -version = "1.1.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" +checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" +checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" dependencies = [ "proc-macro2", "quote", @@ -10389,18 +11846,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.5.3+zstd.1.4.5" +version = "0.6.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8" +checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.5+zstd.1.4.5" +version = "3.0.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055" +checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" dependencies = [ "libc", "zstd-sys", @@ -10408,12 +11865,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.17+zstd.1.4.5" +version = "1.4.20+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b" +checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" dependencies = [ "cc", - "glob", - "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index b78c4da055801..bca0c816217ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,19 +1,21 @@ [workspace] +resolver = "2" + members = [ "bin/node-template/node", - "bin/node-template/runtime", "bin/node-template/pallets/template", + "bin/node-template/runtime", "bin/node/bench", - "bin/node/browser-testing", "bin/node/cli", + "bin/node/test-runner-example", "bin/node/executor", "bin/node/primitives", - "bin/node/rpc-client", "bin/node/rpc", + "bin/node/rpc-client", "bin/node/runtime", "bin/node/testing", - "bin/utils/subkey", "bin/utils/chain-spec-builder", + "bin/utils/subkey", "client/api", "client/authority-discovery", "client/basic-authorship", @@ -21,71 +23,77 @@ members = [ "client/chain-spec", "client/chain-spec/derive", "client/cli", - "client/cli/proc-macro", "client/consensus/aura", "client/consensus/babe", "client/consensus/babe/rpc", "client/consensus/common", + "client/consensus/epochs", "client/consensus/manual-seal", "client/consensus/pow", - "client/consensus/uncles", "client/consensus/slots", - "client/consensus/epochs", + "client/consensus/uncles", "client/db", "client/executor", "client/executor/common", + "client/executor/runtime-test", "client/executor/wasmi", "client/executor/wasmtime", - "client/executor/runtime-test", "client/finality-grandpa", "client/informant", - "client/light", - "client/tracing", "client/keystore", + "client/light", "client/network", - "client/network/test", "client/network-gossip", + "client/network/test", "client/offchain", "client/peerset", + "client/allocator", "client/proposer-metrics", - "client/rpc-servers", "client/rpc", "client/rpc-api", + "client/rpc-servers", "client/service", "client/service/test", "client/state-db", "client/sync-state-rpc", "client/telemetry", + "client/tracing", + "client/tracing/proc-macro", "client/transaction-pool", - "client/transaction-pool/graph", - "utils/prometheus", - "utils/wasm-builder-runner", + "client/transaction-pool/api", + "client/utils", "frame/assets", - "frame/aura", "frame/atomic-swap", + "frame/aura", "frame/authority-discovery", "frame/authorship", "frame/babe", "frame/balances", "frame/benchmarking", + "frame/bounties", "frame/collective", "frame/contracts", "frame/contracts/rpc", "frame/contracts/rpc/runtime-api", "frame/democracy", - "frame/elections-phragmen", + "frame/try-runtime", "frame/elections", - "frame/evm", + "frame/election-provider-multi-phase", + "frame/election-provider-support", "frame/example", "frame/example-offchain-worker", "frame/example-parallel", "frame/executive", + "frame/gilt", "frame/grandpa", "frame/identity", "frame/im-online", "frame/indices", + "frame/lottery", "frame/membership", - "frame/metadata", + "frame/merkle-mountain-range", + "frame/merkle-mountain-range/primitives", + "frame/merkle-mountain-range/rpc", "frame/multisig", "frame/nicks", "frame/node-authorization", @@ -100,7 +108,7 @@ members = [ "frame/society", "frame/staking", "frame/staking/reward-curve", - "frame/staking/fuzzer", + "frame/staking/reward-fn", "frame/sudo", "frame/support", "frame/support/procedural", @@ -114,12 +122,19 @@ members = [ "frame/transaction-payment", "frame/transaction-payment/rpc", "frame/transaction-payment/rpc/runtime-api", + "frame/transaction-storage", "frame/treasury", + "frame/tips", + "frame/uniques", "frame/utility", "frame/vesting", - "primitives/allocator", + "primitives/api", + "primitives/api/proc-macro", + "primitives/api/test", "primitives/application-crypto", "primitives/application-crypto/test", + "primitives/arithmetic", + "primitives/arithmetic/fuzzer", "primitives/authority-discovery", "primitives/authorship", "primitives/block-builder", @@ -130,61 +145,60 @@ members = [ "primitives/consensus/pow", "primitives/consensus/vrf", "primitives/core", - "primitives/chain-spec", "primitives/database", "primitives/debug-derive", - "primitives/storage", "primitives/externalities", "primitives/finality-grandpa", "primitives/inherents", + "primitives/io", "primitives/keyring", "primitives/keystore", - "primitives/offchain", - "primitives/panic-handler", + "primitives/maybe-compressed-blob", "primitives/npos-elections", + "primitives/npos-elections/solution-type", "primitives/npos-elections/fuzzer", - "primitives/npos-elections/compact", + "primitives/offchain", + "primitives/panic-handler", "primitives/rpc", + "primitives/runtime", "primitives/runtime-interface", "primitives/runtime-interface/proc-macro", + "primitives/runtime-interface/test", "primitives/runtime-interface/test-wasm", "primitives/runtime-interface/test-wasm-deprecated", - "primitives/runtime-interface/test", + "primitives/sandbox", "primitives/serializer", "primitives/session", - "primitives/api", - "primitives/api/proc-macro", - "primitives/api/test", - "primitives/arithmetic", - "primitives/arithmetic/fuzzer", - "primitives/io", - "primitives/runtime", - "primitives/sandbox", "primitives/staking", - "primitives/std", - "primitives/version", "primitives/state-machine", + "primitives/std", + "primitives/storage", "primitives/tasks", - "primitives/timestamp", "primitives/test-primitives", - "primitives/transaction-pool", + "primitives/timestamp", "primitives/tracing", + "primitives/transaction-pool", + "primitives/transaction-storage-proof", "primitives/trie", - "primitives/utils", + "primitives/version", + "primitives/version/proc-macro", "primitives/wasm-interface", "test-utils/client", "test-utils/derive", "test-utils/runtime", "test-utils/runtime/client", "test-utils/runtime/transaction-pool", + "test-utils/test-runner", "test-utils/test-crate", - "utils/browser", "utils/build-script-utils", "utils/fork-tree", "utils/frame/benchmarking-cli", + "utils/frame/remote-externalities", "utils/frame/frame-utilities-cli", + "utils/frame/try-runtime/cli", "utils/frame/rpc/support", "utils/frame/rpc/system", + "utils/prometheus", "utils/wasm-builder", ] @@ -204,27 +218,20 @@ members = [ # # This list is ordered alphabetically. [profile.dev.package] -aes-soft = { opt-level = 3 } -aesni = { opt-level = 3 } blake2 = { opt-level = 3 } blake2-rfc = { opt-level = 3 } blake2b_simd = { opt-level = 3 } -blake2s_simd = { opt-level = 3 } chacha20poly1305 = { opt-level = 3 } cranelift-codegen = { opt-level = 3 } cranelift-wasm = { opt-level = 3 } crc32fast = { opt-level = 3 } crossbeam-deque = { opt-level = 3 } -crossbeam-queue = { opt-level = 3 } crypto-mac = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } ed25519-dalek = { opt-level = 3 } -evm-core = { opt-level = 3 } -evm-runtime = { opt-level = 3 } flate2 = { opt-level = 3 } futures-channel = { opt-level = 3 } hashbrown = { opt-level = 3 } -h2 = { opt-level = 3 } hash-db = { opt-level = 3 } hmac = { opt-level = 3 } httparse = { opt-level = 3 } @@ -253,7 +260,6 @@ wasmi = { opt-level = 3 } x25519-dalek = { opt-level = 3 } yamux = { opt-level = 3 } zeroize = { opt-level = 3 } - [profile.release] # Substrate runtime requires unwinding. -panic = "unwind" +panic = "unwind" \ No newline at end of file diff --git a/HEADER b/HEADER-APACHE2 similarity index 92% rename from HEADER rename to HEADER-APACHE2 index c9b28a07b0f22..f364f4bdf845a 100644 --- a/HEADER +++ b/HEADER-APACHE2 @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/test-utils/tests/ui/missing-func-parameter.rs b/HEADER-GPL3 similarity index 83% rename from test-utils/tests/ui/missing-func-parameter.rs rename to HEADER-GPL3 index bd34a76902ef9..0dd7e4f76028f 100644 --- a/test-utils/tests/ui/missing-func-parameter.rs +++ b/HEADER-GPL3 @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -15,10 +15,3 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . - -#[substrate_test_utils::test] -async fn missing_func_parameter() { - assert!(true); -} - -fn main() {} diff --git a/README.md b/README.md index c586919a1ddc3..6288540548a0d 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,15 @@ -# Substrate · [![GitHub license](https://img.shields.io/badge/license-GPL3%2FApache2-blue)](LICENSE) [![GitLab Status](https://gitlab.parity.io/parity/substrate/badges/master/pipeline.svg)](https://gitlab.parity.io/parity/substrate/pipelines) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/CONTRIBUTING.adoc) +# Substrate · [![GitHub license](https://img.shields.io/badge/license-GPL3%2FApache2-blue)](#LICENSE) [![GitLab Status](https://gitlab.parity.io/parity/substrate/badges/master/pipeline.svg)](https://gitlab.parity.io/parity/substrate/pipelines) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/CONTRIBUTING.adoc) [![Matrix](https://img.shields.io/matrix/substrate-technical:matrix.org)](https://matrix.to/#/#substrate-technical:matrix.org)

- Substrate is a next-generation framework for blockchain innovation 🚀. ## Trying it out -Simply go to [substrate.dev](https://substrate.dev) and follow the -[installation](https://substrate.dev/docs/en/knowledgebase/getting-started/) instructions. You can +Simply go to [substrate.dev](https://substrate.dev) and follow the +[installation](https://substrate.dev/docs/en/knowledgebase/getting-started/) instructions. You can also try out one of the [tutorials](https://substrate.dev/en/tutorials). ## Contributions & Code of Conduct diff --git a/bin/node-template/.editorconfig b/bin/node-template/.editorconfig new file mode 100644 index 0000000000000..5adac74ca24b3 --- /dev/null +++ b/bin/node-template/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +indent_style=space +indent_size=2 +tab_width=2 +end_of_line=lf +charset=utf-8 +trim_trailing_whitespace=true +insert_final_newline = true + +[*.{rs,toml}] +indent_style=tab +indent_size=tab +tab_width=4 +max_line_length=100 diff --git a/bin/node-template/README.md b/bin/node-template/README.md index 5623fedb5342b..cd977fac84493 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -1,97 +1,71 @@ # Substrate Node Template -A new FRAME-based Substrate node, ready for hacking :rocket: +A fresh FRAME-based [Substrate](https://www.substrate.io/) node, ready for hacking :rocket: -## Local Development +## Getting Started -Follow these steps to prepare a local Substrate development environment :hammer_and_wrench: +Follow these steps to get started with the Node Template :hammer_and_wrench: -### Simple Setup +### Rust Setup -Install all the required dependencies with a single command (be patient, this can take up to 30 -minutes). +First, complete the [basic Rust setup instructions](./doc/rust-setup.md). -```bash -curl https://getsubstrate.io -sSf | bash -s -- --fast -``` +### Run -### Manual Setup +Use Rust's native `cargo` command to build and launch the template node: -Find manual setup instructions at the -[Substrate Developer Hub](https://substrate.dev/docs/en/knowledgebase/getting-started/#manual-installation). +```sh +cargo run --release -- --dev --tmp +``` ### Build -Once the development environment is set up, build the node template. This command will build the -[Wasm](https://substrate.dev/docs/en/knowledgebase/advanced/executor#wasm-execution) and -[native](https://substrate.dev/docs/en/knowledgebase/advanced/executor#native-execution) code: +The `cargo run` command will perform an initial build. Use the following command to build the node +without launching it: -```bash +```sh cargo build --release ``` -## Run - -### Single Node Development Chain +### Embedded Docs -Purge any existing dev chain state: +Once the project has been built, the following command can be used to explore all parameters and +subcommands: -```bash -./target/release/node-template purge-chain --dev +```sh +./target/release/node-template -h ``` -Start a dev chain: +## Run -```bash -./target/release/node-template --dev -``` +The provided `cargo run` command will launch a temporary node and its state will be discarded after +you terminate the process. After the project has been built, there are other ways to launch the +node. + +### Single-Node Development Chain -Or, start a dev chain with detailed logging: +This command will start the single-node development chain with persistent state: ```bash -RUST_LOG=debug RUST_BACKTRACE=1 ./target/release/node-template -lruntime=debug --dev +./target/release/node-template --dev ``` -### Multi-Node Local Testnet - -To see the multi-node consensus algorithm in action, run a local testnet with two validator nodes, -Alice and Bob, that have been [configured](/bin/node-template/node/src/chain_spec.rs) as the initial -authorities of the `local` testnet chain and endowed with testnet units. - -Note: this will require two terminal sessions (one for each node). - -Start Alice's node first. The command below uses the default TCP port (30333) and specifies -`/tmp/alice` as the chain database location. Alice's node ID will be -`12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp` (legacy representation: -`QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR`); this is determined by the `node-key`. +Purge the development chain's state: ```bash -cargo run -- \ - --base-path /tmp/alice \ - --chain=local \ - --alice \ - --node-key 0000000000000000000000000000000000000000000000000000000000000001 \ - --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ - --validator +./target/release/node-template purge-chain --dev ``` -In another terminal, use the following command to start Bob's node on a different TCP port (30334) -and with a chain database location of `/tmp/bob`. The `--bootnodes` option will connect his node to -Alice's on TCP port 30333: +Start the development chain with detailed logging: ```bash -cargo run -- \ - --base-path /tmp/bob \ - --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp \ - --chain=local \ - --bob \ - --port 30334 \ - --ws-port 9945 \ - --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ - --validator +RUST_BACKTRACE=1 ./target/release/node-template -ldebug --dev ``` -Execute `cargo run -- --help` to learn more about the template node's CLI options. +### Multi-Node Local Testnet + +If you want to see the multi-node consensus algorithm in action, refer to +[our Start a Private Network tutorial](https://substrate.dev/docs/en/tutorials/start-a-private-network/). ## Template Structure @@ -157,7 +131,7 @@ Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this the following: - This file configures several pallets to include in the runtime. Each pallet configuration is - defined by a code block that begins with `impl $PALLET_NAME::Trait for Runtime`. + defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`. - The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) macro, which is part of the core @@ -181,27 +155,31 @@ A FRAME pallet is compromised of a number of blockchain primitives: - Events: Substrate uses [events](https://substrate.dev/docs/en/knowledgebase/runtime/events) to notify users of important changes in the runtime. - Errors: When a dispatchable fails, it returns an error. -- Trait: The `Trait` configuration interface is used to define the types and parameters upon which - a FRAME pallet depends. +- Config: The `Config` configuration interface is used to define the types and parameters upon + which a FRAME pallet depends. -## Generate a Custom Node Template +### Run in Docker -Generate a Substrate node template based on a particular commit by running the following commands: +First, install [Docker](https://docs.docker.com/get-docker/) and +[Docker Compose](https://docs.docker.com/compose/install/). + +Then run the following command to start a single node development chain. ```bash -# Clone from the main Substrate repo -git clone https://github.com/paritytech/substrate.git -cd substrate +./scripts/docker_run.sh +``` -# Switch to the branch or commit to base the template on -git checkout +This command will firstly compile your code, and then start a local development network. You can +also replace the default command (`cargo build --release && ./target/release/node-template --dev --ws-external`) +by appending your own. A few useful ones are as follow. -# Run the helper script to generate a node template. This script compiles Substrate, so it will take -# a while to complete. It expects a single parameter: the location for the script's output expressed -# as a relative path. -.maintain/node-template-release.sh ../node-template.tar.gz -``` +```bash +# Run Substrate node without re-compiling +./scripts/docker_run.sh ./target/release/node-template --dev --ws-external + +# Purge the local dev chain +./scripts/docker_run.sh ./target/release/node-template purge-chain --dev -Custom node templates are not supported. Please use a recently tagged version of the -[Substrate Developer Node Template](https://github.com/substrate-developer-hub/substrate-node-template) -in order to receive support. +# Check whether the code is compilable +./scripts/docker_run.sh cargo check +``` diff --git a/bin/node-template/docker-compose.yml b/bin/node-template/docker-compose.yml new file mode 100644 index 0000000000000..cfc4437bbae41 --- /dev/null +++ b/bin/node-template/docker-compose.yml @@ -0,0 +1,17 @@ +version: "3.2" + +services: + dev: + container_name: node-template + image: paritytech/ci-linux:974ba3ac-20201006 + working_dir: /var/www/node-template + ports: + - "9944:9944" + environment: + - CARGO_HOME=/var/www/node-template/.cargo + volumes: + - .:/var/www/node-template + - type: bind + source: ./.local + target: /root/.local + command: bash -c "cargo build --release && ./target/release/node-template --dev --ws-external" diff --git a/bin/node-template/docs/rust-setup.md b/bin/node-template/docs/rust-setup.md new file mode 100644 index 0000000000000..34f6e43e7f0dd --- /dev/null +++ b/bin/node-template/docs/rust-setup.md @@ -0,0 +1,81 @@ +--- +title: Installation +--- + +This page will guide you through the steps needed to prepare a computer for development with the +Substrate Node Template. Since Substrate is built with +[the Rust programming language](https://www.rust-lang.org/), the first thing you will need to do is +prepare the computer for Rust development - these steps will vary based on the computer's operating +system. Once Rust is configured, you will use its toolchains to interact with Rust projects; the +commands for Rust's toolchains will be the same for all supported, Unix-based operating systems. + +## Unix-Based Operating Systems + +Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples +in the Substrate [Tutorials](https://substrate.dev/tutorials) and [Recipes](https://substrate.dev/recipes/) +use Unix-style terminals to demonstrate how to interact with Substrate from the command line. + +### macOS + +Open the Terminal application and execute the following commands: + +```bash +# Install Homebrew if necessary https://brew.sh/ +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" + +# Make sure Homebrew is up-to-date, install openssl and cmake +brew update +brew install openssl cmake +``` + +### Ubuntu/Debian + +Use a terminal shell to execute the following commands: + +```bash +sudo apt update +# May prompt for location information +sudo apt install -y cmake pkg-config libssl-dev git build-essential clang libclang-dev curl +``` + +### Arch Linux + +Run these commands from a terminal: + +```bash +pacman -Syu --needed --noconfirm cmake gcc openssl-1.0 pkgconf git clang +export OPENSSL_LIB_DIR="/usr/lib/openssl-1.0" +export OPENSSL_INCLUDE_DIR="/usr/include/openssl-1.0" +``` + +### Fedora/RHEL/CentOS + +Use a terminal to run the following commands: + +```bash +# Update +sudo dnf update +# Install packages +sudo dnf install cmake pkgconfig rocksdb rocksdb-devel llvm git libcurl libcurl-devel curl-devel clang +``` + +## Rust Developer Environment + +This project uses [`rustup`](https://rustup.rs/) to help manage the Rust toolchain. First install +and configure `rustup`: + +```bash +# Install +curl https://sh.rustup.rs -sSf | sh +# Configure +source ~/.cargo/env +``` + +Finally, configure the Rust toolchain: + +```bash +rustup default stable +rustup update nightly +rustup update stable +rustup target add wasm32-unknown-unknown --toolchain nightly +``` diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 1a2991e471bb6..17bc7b32c15c1 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,13 +1,14 @@ [package] name = "node-template" -version = "2.0.0" -authors = ["Anonymous"] -description = "A new FRAME-based Substrate node, ready for hacking." +version = "3.0.0" +authors = ["Substrate DevHub "] +description = "A fresh FRAME-based Substrate node, ready for hacking." edition = "2018" license = "Unlicense" build = "build.rs" homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -16,44 +17,46 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-template" [dependencies] -ipfs = { git = "https://github.com/rs-ipfs/rust-ipfs" } +ipfs = { git = "https://github.com/rs-ipfs/rust-ipfs"} structopt = "0.3.8" -sc-cli = { version = "0.8.0", path = "../../../client/cli", features = ["wasmtime"] } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-executor = { version = "0.8.0", path = "../../../client/executor", features = ["wasmtime"] } -sc-service = { version = "0.8.0", path = "../../../client/service", features = ["wasmtime"] } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-consensus-aura = { version = "0.8.0", path = "../../../client/consensus/aura" } -sp-consensus-aura = { version = "0.8.0", path = "../../../primitives/consensus/aura" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-finality-grandpa = { version = "0.8.0", path = "../../../client/finality-grandpa" } -sp-finality-grandpa = { version = "2.0.0", path = "../../../primitives/finality-grandpa" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", features = ["wasmtime"] } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor", features = ["wasmtime"] } +sc-service = { version = "0.10.0-dev", path = "../../../client/service", features = ["wasmtime"] } +sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } +sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } +sc-consensus-aura = { version = "0.10.0-dev", path = "../../../client/consensus/aura" } +sp-consensus-aura = { version = "0.10.0-dev", path = "../../../primitives/consensus/aura" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality-grandpa" } +sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } +sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } # These dependencies are used for the node template's RPCs -jsonrpc-core = "15.0.0" -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sc-rpc-api = { version = "0.8.0", path = "../../../client/rpc-api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-basic-authorship = { version = "0.8.0", path = "../../../client/basic-authorship" } -substrate-frame-rpc-system = { version = "2.0.0", path = "../../../utils/frame/rpc/system" } -pallet-transaction-payment-rpc = { version = "2.0.0", path = "../../../frame/transaction-payment/rpc/" } +jsonrpc-core = "18.0.0" +sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sc-basic-authorship = { version = "0.10.0-dev", path = "../../../client/basic-authorship" } +substrate-frame-rpc-system = { version = "4.0.0-dev", path = "../../../utils/frame/rpc/system" } +pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/rpc/" } # These dependencies are used for runtime benchmarking -frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } -frame-benchmarking-cli = { version = "2.0.0", path = "../../../utils/frame/benchmarking-cli" } +frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } +frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/benchmarking-cli" } -node-template-runtime = { version = "2.0.0", path = "../runtime" } +node-template-runtime = { version = "3.0.0", path = "../runtime" } [build-dependencies] -substrate-build-script-utils = { version = "2.0.0", path = "../../../utils/build-script-utils" } +substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } [features] default = [] diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index 41f582fb64a46..7009b3be5c279 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -1,12 +1,12 @@ -use sp_core::{Pair, Public, sr25519}; use node_template_runtime::{ - AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, - SudoConfig, SystemConfig, WASM_BINARY, Signature + AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, Signature, SudoConfig, + SystemConfig, WASM_BINARY, }; +use sc_service::ChainType; use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{sr25519, Pair, Public}; use sp_finality_grandpa::AuthorityId as GrandpaId; -use sp_runtime::traits::{Verify, IdentifyAccount}; -use sc_service::ChainType; +use sp_runtime::traits::{IdentifyAccount, Verify}; // The URL for the telemetry server. // const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; @@ -24,22 +24,20 @@ pub fn get_from_seed(seed: &str) -> ::Pu type AccountPublic = ::Signer; /// Generate an account ID from seed. -pub fn get_account_id_from_seed(seed: &str) -> AccountId where - AccountPublic: From<::Public> +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, { AccountPublic::from(get_from_seed::(seed)).into_account() } /// Generate an Aura authority key. pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { - ( - get_from_seed::(s), - get_from_seed::(s), - ) + (get_from_seed::(s), get_from_seed::(s)) } pub fn development_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Development wasm binary not available".to_string())?; + let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( // Name @@ -47,23 +45,23 @@ pub fn development_config() -> Result { // ID "dev", ChainType::Development, - move || testnet_genesis( - wasm_binary, - // Initial PoA authorities - vec![ - authority_keys_from_seed("Alice"), - ], - // Sudo account - get_account_id_from_seed::("Alice"), - // Pre-funded accounts - vec![ + move || { + testnet_genesis( + wasm_binary, + // Initial PoA authorities + vec![authority_keys_from_seed("Alice")], + // Sudo account get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - true, - ), + // Pre-funded accounts + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, + ) + }, // Bootnodes vec![], // Telemetry @@ -78,7 +76,7 @@ pub fn development_config() -> Result { } pub fn local_testnet_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Development wasm binary not available".to_string())?; + let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; Ok(ChainSpec::from_genesis( // Name @@ -86,32 +84,31 @@ pub fn local_testnet_config() -> Result { // ID "local_testnet", ChainType::Local, - move || testnet_genesis( - wasm_binary, - // Initial PoA authorities - vec![ - authority_keys_from_seed("Alice"), - authority_keys_from_seed("Bob"), - ], - // Sudo account - get_account_id_from_seed::("Alice"), - // Pre-funded accounts - vec![ + move || { + testnet_genesis( + wasm_binary, + // Initial PoA authorities + vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], + // Sudo account get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - true, - ), + // Pre-funded accounts + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + true, + ) + }, // Bootnodes vec![], // Telemetry @@ -134,24 +131,24 @@ fn testnet_genesis( _enable_println: bool, ) -> GenesisConfig { GenesisConfig { - frame_system: Some(SystemConfig { + system: SystemConfig { // Add Wasm runtime to storage. code: wasm_binary.to_vec(), changes_trie_config: Default::default(), - }), - pallet_balances: Some(BalancesConfig { + }, + balances: BalancesConfig { // Configure endowed accounts with initial balance of 1 << 60. - balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), - }), - pallet_aura: Some(AuraConfig { + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), + }, + aura: AuraConfig { authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), - }), - pallet_grandpa: Some(GrandpaConfig { + }, + grandpa: GrandpaConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), - }), - pallet_sudo: Some(SudoConfig { + }, + sudo: SudoConfig { // Assign network admin rights. key: root_key, - }), + }, } } diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index f2faf17e4ddf4..8b551051c1b19 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -1,5 +1,5 @@ -use structopt::StructOpt; use sc_cli::RunCmd; +use structopt::StructOpt; #[derive(Debug, StructOpt)] pub struct Cli { @@ -12,6 +12,8 @@ pub struct Cli { #[derive(Debug, StructOpt)] pub enum Subcommand { + /// Key management cli utilities + Key(sc_cli::KeySubcommand), /// Build a chain specification. BuildSpec(sc_cli::BuildSpecCmd), diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index ac950b50483ac..e948c3f53b716 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -1,25 +1,11 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{chain_spec, service}; -use crate::cli::{Cli, Subcommand}; -use sc_cli::{SubstrateCli, RuntimeVersion, Role, ChainSpec}; -use sc_service::PartialComponents; +use crate::{ + chain_spec, + cli::{Cli, Subcommand}, + service, +}; use node_template_runtime::Block; +use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; +use sc_service::PartialComponents; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -50,9 +36,8 @@ impl SubstrateCli for Cli { Ok(match id { "dev" => Box::new(chain_spec::development_config()?), "" | "local" => Box::new(chain_spec::local_testnet_config()?), - path => Box::new(chain_spec::ChainSpec::from_json_file( - std::path::PathBuf::from(path), - )?), + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) } @@ -66,6 +51,7 @@ pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); match &cli.subcommand { + Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::BuildSpec(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) @@ -73,32 +59,30 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; Ok((cmd.run(client, config.database), task_manager)) }) }, Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) }, Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, @@ -109,21 +93,21 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, backend, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, backend, .. } = + service::new_partial(&config)?; Ok((cmd.run(client, backend), task_manager)) }) }, - Some(Subcommand::Benchmark(cmd)) => { + Some(Subcommand::Benchmark(cmd)) => if cfg!(feature = "runtime-benchmarks") { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) } else { - Err("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`.".into()) - } - }, + Err("Benchmarking wasn't enabled when building the node. You can enable it with \ + `--features runtime-benchmarks`." + .into()) + }, None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { @@ -131,7 +115,8 @@ pub fn run() -> sc_cli::Result<()> { Role::Light => service::new_light(config), _ => service::new_full(config), } + .map_err(sc_cli::Error::Service) }) - } + }, } } diff --git a/bin/node-template/node/src/lib.rs b/bin/node-template/node/src/lib.rs deleted file mode 100644 index 777c4f0a77147..0000000000000 --- a/bin/node-template/node/src/lib.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod chain_spec; -pub mod service; -pub mod rpc; diff --git a/bin/node-template/node/src/rpc.rs b/bin/node-template/node/src/rpc.rs index c1f0e0a8457bc..d23b23178ec2a 100644 --- a/bin/node-template/node/src/rpc.rs +++ b/bin/node-template/node/src/rpc.rs @@ -8,12 +8,11 @@ use std::sync::Arc; use node_template_runtime::{opaque::Block, AccountId, Balance, Index}; +pub use sc_rpc_api::DenyUnsafe; +use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; -use sp_blockchain::{Error as BlockChainError, HeaderMetadata, HeaderBackend}; use sp_block_builder::BlockBuilder; -pub use sc_rpc_api::DenyUnsafe; -use sp_transaction_pool::TransactionPool; - +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; /// Full client dependencies. pub struct FullDeps { @@ -26,34 +25,25 @@ pub struct FullDeps { } /// Instantiate all full RPC extensions. -pub fn create_full( - deps: FullDeps, -) -> jsonrpc_core::IoHandler where +pub fn create_full(deps: FullDeps) -> jsonrpc_core::IoHandler +where C: ProvideRuntimeApi, - C: HeaderBackend + HeaderMetadata + 'static, + C: HeaderBackend + HeaderMetadata + 'static, C: Send + Sync + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BlockBuilder, P: TransactionPool + 'static, { - use substrate_frame_rpc_system::{FullSystem, SystemApi}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; + use substrate_frame_rpc_system::{FullSystem, SystemApi}; let mut io = jsonrpc_core::IoHandler::default(); - let FullDeps { - client, - pool, - deny_unsafe, - } = deps; + let FullDeps { client, pool, deny_unsafe } = deps; - io.extend_with( - SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)) - ); + io.extend_with(SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe))); - io.extend_with( - TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())) - ); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone()))); // Extend this RPC with a custom API by using the following syntax. // `YourRpcStruct` should have a reference to a client, which is needed diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 1ddf8e7dbf1a6..c7b9503de1d08 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -1,96 +1,187 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use std::sync::Arc; -use std::time::Duration; -use sc_client_api::{ExecutorProvider, RemoteBackend}; use node_template_runtime::{self, opaque::Block, RuntimeApi}; +use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; +pub use sc_executor::NativeElseWasmExecutor; +use sc_finality_grandpa::SharedVoterState; +use sc_keystore::LocalKeystore; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; -use sp_inherents::InherentDataProviders; -use sc_executor::native_executor_instance; -pub use sc_executor::NativeExecutor; -use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use sc_finality_grandpa::{FinalityProofProvider as GrandpaFinalityProofProvider, SharedVoterState}; +use sc_telemetry::{Telemetry, TelemetryWorker}; +use sp_consensus::SlotData; +use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; +use std::{sync::Arc, time::Duration}; // Our native executor instance. -native_executor_instance!( - pub Executor, - node_template_runtime::api::dispatch, - node_template_runtime::native_version, - frame_benchmarking::benchmarking::HostFunctions, -); - -type FullClient = sc_service::TFullClient; +pub struct ExecutorDispatch; + +impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + node_template_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + node_template_runtime::native_version() + } +} + +type FullClient = + sc_service::TFullClient>; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; -pub fn new_partial(config: &Configuration) -> Result, - sc_transaction_pool::FullPool, - ( - sc_consensus_aura::AuraBlockImport< - Block, - FullClient, - sc_finality_grandpa::GrandpaBlockImport, - AuraPair - >, - sc_finality_grandpa::LinkHalf - ) ->, ServiceError> { - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sc_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + ( + sc_finality_grandpa::GrandpaBlockImport< + FullBackend, + Block, + FullClient, + FullSelectChain, + >, + sc_finality_grandpa::LinkHalf, + Option, + ), + >, + ServiceError, +> { + if config.keystore_remote.is_some() { + return Err(ServiceError::Other(format!("Remote Keystores are not supported."))) + } + + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(&config)?; + sc_service::new_full_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; let client = Arc::new(client); + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = sc_transaction_pool::BasicPool::new_full( config.transaction_pool.clone(), + config.role.is_authority().into(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), ); let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( - client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), )?; - let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( - grandpa_block_import.clone(), client.clone(), - ); + let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import.clone(), - Some(Box::new(grandpa_block_import.clone())), - None, - client.clone(), - inherent_data_providers.clone(), - &task_manager.spawn_handle(), - config.prometheus_registry(), - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), - )?; + let import_queue = + sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: grandpa_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import.clone())), + client: client.clone(), + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + Ok((timestamp, slot)) + }, + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::CanAuthorWithNativeVersion::new( + client.executor().clone(), + ), + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; Ok(sc_service::PartialComponents { - client, backend, task_manager, import_queue, keystore_container, - select_chain, transaction_pool,inherent_data_providers, - other: (aura_block_import, grandpa_link), + client, + backend, + task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (grandpa_block_import, grandpa_link, telemetry), }) } +fn remote_keystore(_url: &String) -> Result, &'static str> { + // FIXME: here would the concrete keystore be built, + // must return a concrete type (NOT `LocalKeystore`) that + // implements `CryptoStore` and `SyncCryptoStore` + Err("Remote Keystore not supported.") +} + /// Builds a new service for a full client. -pub fn new_full(config: Configuration) -> Result { +pub fn new_full(mut config: Configuration) -> Result { let sc_service::PartialComponents { - client, backend, mut task_manager, import_queue, keystore_container, - select_chain, transaction_pool, inherent_data_providers, - other: (block_import, grandpa_link), + client, + backend, + mut task_manager, + import_queue, + mut keystore_container, + select_chain, + transaction_pool, + other: (block_import, grandpa_link, mut telemetry), } = new_partial(&config)?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + if let Some(url) = &config.keystore_remote { + match remote_keystore(url) { + Ok(k) => keystore_container.set_remote_keystore(k), + Err(e) => + return Err(ServiceError::Other(format!( + "Error hooking up remote keystore for {}: {}", + url, e + ))), + }; + } + + config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); + let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + )); - let (network, network_status_sinks, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -99,74 +190,96 @@ pub fn new_full(config: Configuration) -> Result { import_queue, on_demand: None, block_announce_validator_builder: None, - finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider.clone()), + warp_sync: Some(warp_sync), })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers( - &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), task_manager.ipfs_rt.clone(), ); } let role = config.role.clone(); let force_authoring = config.force_authoring; + let backoff_authoring_blocks: Option<()> = None; let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); - let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); let rpc_extensions_builder = { let client = client.clone(); let pool = transaction_pool.clone(); Box::new(move |deny_unsafe, _| { - let deps = crate::rpc::FullDeps { - client: client.clone(), - pool: pool.clone(), - deny_unsafe, - }; + let deps = + crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; - crate::rpc::create_full(deps) + Ok(crate::rpc::create_full(deps)) }) }; - sc_service::spawn_tasks(sc_service::SpawnTasksParams { + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { network: network.clone(), client: client.clone(), keystore: keystore_container.sync_keystore(), task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), - telemetry_connection_sinks: telemetry_connection_sinks.clone(), rpc_extensions_builder, on_demand: None, remote_blockchain: None, - backend, network_status_sinks, system_rpc_tx, config, + backend, + system_rpc_tx, + config, + telemetry: telemetry.as_mut(), })?; if role.is_authority() { - let proposer = sc_basic_authorship::ProposerFactory::new( + let proposer_factory = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), ); let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - client.clone(), - select_chain, - block_import, - proposer, - network.clone(), - inherent_data_providers.clone(), - force_authoring, - keystore_container.sync_keystore(), - can_author_with, + let slot_duration = sc_consensus_aura::slot_duration(&*client)?; + let raw_slot_duration = slot_duration.slot_duration(); + + let aura = sc_consensus_aura::start_aura::( + StartAuraParams { + slot_duration, + client: client.clone(), + select_chain, + block_import, + proposer_factory, + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + raw_slot_duration, + ); + + Ok((timestamp, slot)) + }, + force_authoring, + backoff_authoring_blocks, + keystore: keystore_container.sync_keystore(), + can_author_with, + sync_oracle: network.clone(), + justification_sync_link: network.clone(), + block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + max_block_proposal_slot_portion: None, + telemetry: telemetry.as_ref().map(|x| x.handle()), + }, )?; // the AURA authoring task is considered essential, i.e. if it @@ -176,11 +289,8 @@ pub fn new_full(config: Configuration) -> Result { // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(keystore_container.sync_keystore()) - } else { - None - }; + let keystore = + if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; let grandpa_config = sc_finality_grandpa::Config { // FIXME #1578 make this available through chainspec @@ -189,7 +299,8 @@ pub fn new_full(config: Configuration) -> Result { name: Some(name), observer_enabled: false, keystore, - is_authority: role.is_network_authority(), + local_role: role, + telemetry: telemetry.as_ref().map(|x| x.handle()), }; if enable_grandpa { @@ -203,20 +314,18 @@ pub fn new_full(config: Configuration) -> Result { config: grandpa_config, link: grandpa_link, network, - telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state: SharedVoterState::empty(), + telemetry: telemetry.as_ref().map(|x| x.handle()), }; // the GRANDPA voter task is considered infallible, i.e. // if it fails we take down the service with it. task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", - sc_finality_grandpa::run_grandpa_voter(grandpa_config)? + sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, ); - } else { - sc_finality_grandpa::setup_disabled_grandpa(network)?; } network_starter.start_network(); @@ -224,42 +333,86 @@ pub fn new_full(config: Configuration) -> Result { } /// Builds a new service for a light client. -pub fn new_light(config: Configuration) -> Result { +pub fn new_light(mut config: Configuration) -> Result { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::(&config)?; + sc_service::new_light_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + + let mut telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + + config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), on_demand.clone(), )); - let grandpa_block_import = sc_finality_grandpa::light_block_import( - client.clone(), backend.clone(), &(client.clone() as Arc<_>), - Arc::new(on_demand.checker().clone()) as Arc<_>, - )?; - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); - - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - grandpa_block_import, - None, - Some(Box::new(finality_proof_import)), + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( client.clone(), - InherentDataProviders::new(), - &task_manager.spawn_handle(), - config.prometheus_registry(), - sp_consensus::NeverCanAuthor, + &(client.clone() as Arc<_>), + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), )?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); - let (network, network_status_sinks, system_rpc_tx, network_starter) = + let import_queue = + sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: grandpa_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import.clone())), + client: client.clone(), + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + Ok((timestamp, slot)) + }, + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::NeverCanAuthor, + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; + + let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + )); + + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -268,34 +421,54 @@ pub fn new_light(config: Configuration) -> Result { import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), + warp_sync: Some(warp_sync), })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers( - &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), task_manager.ipfs_rt.clone(), ); } + let enable_grandpa = !config.disable_grandpa; + if enable_grandpa { + let name = config.network.node_name.clone(); + + let config = sc_finality_grandpa::Config { + gossip_duration: std::time::Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore: None, + local_role: config.role.clone(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + task_manager.spawn_handle().spawn_blocking( + "grandpa-observer", + sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?, + ); + } + sc_service::spawn_tasks(sc_service::SpawnTasksParams { remote_blockchain: Some(backend.remote_blockchain()), transaction_pool, task_manager: &mut task_manager, on_demand: Some(on_demand), - rpc_extensions_builder: Box::new(|_, _| ()), - telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), + rpc_extensions_builder: Box::new(|_, _| Ok(())), config, client, keystore: keystore_container.sync_keystore(), backend, network, - network_status_sinks, system_rpc_tx, - })?; - - network_starter.start_network(); + telemetry: telemetry.as_mut(), + })?; - Ok(task_manager) + network_starter.start_network(); + Ok(task_manager) } diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index b00dcf930043d..e977ae363b481 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -1,56 +1,44 @@ [package] -authors = ['Anonymous'] +authors = ['Substrate DevHub '] edition = '2018' name = 'pallet-template' -version = "2.0.0" +version = "3.0.0" license = "Unlicense" homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" description = "FRAME pallet template for defining custom runtime logic." readme = "README.md" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } - -[dependencies.frame-support] -default-features = false -version = "2.0.0" -path = "../../../../frame/support" - -[dependencies.frame-system] -default-features = false -version = "2.0.0" -path = "../../../../frame/system" - -[dependencies.sp-core] -default-features = false -version = "2.0.0" -path = "../../../../primitives/core" - -[dependencies.sp-io] -default-features = false -version = "2.0.0" -path = "../../../../primitives/io" - -[dependencies.sp-runtime] -default-features = false -version = "2.0.0" -path = "../../../../primitives/runtime" - -[dependencies.sp-std] -default-features = false -version = "2.0.0-rc6" -path = "../../../../primitives/std" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { default-features = false, version = "4.0.0-dev", path = "../../../../frame/support" } +frame-system = { default-features = false, version = "4.0.0-dev", path = "../../../../frame/system" } +frame-benchmarking = { default-features = false, version = "4.0.0-dev", path = "../../../../frame/benchmarking", optional = true } + +[dev-dependencies] +frame-system = { default-features = false, version = "4.0.0-dev", path = "../../../../frame/system" } +sp-core = { default-features = false, version = "4.0.0-dev", path = "../../../../primitives/core" } +sp-io = { default-features = false, version = "4.0.0-dev", path = "../../../../primitives/io" } +sp-runtime = { default-features = false, version = "4.0.0-dev", path = "../../../../primitives/runtime" } [features] default = ['std'] std = [ 'codec/std', + 'scale-info/std', 'frame-support/std', 'frame-system/std', + 'frame-benchmarking/std', + 'frame-system/std', 'sp-io/std', - 'sp-std/std', ] + +runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/bin/node-template/pallets/template/src/benchmarking.rs b/bin/node-template/pallets/template/src/benchmarking.rs new file mode 100644 index 0000000000000..2117c048cfbdb --- /dev/null +++ b/bin/node-template/pallets/template/src/benchmarking.rs @@ -0,0 +1,20 @@ +//! Benchmarking setup for pallet-template + +use super::*; + +#[allow(unused)] +use crate::Pallet as Template; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_system::RawOrigin; + +benchmarks! { + do_something { + let s in 0 .. 100; + let caller: T::AccountId = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), s) + verify { + assert_eq!(Something::::get(), Some(s)); + } +} + +impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index a3dff240e4847..4532d3d09b497 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -1,30 +1,39 @@ -use crate::{Module, Trait}; +use crate as pallet_template; +use frame_support::parameter_types; +use frame_system as system; use sp_core::H256; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, Perbill, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, }; -use frame_system as system; -impl_outer_origin! { - pub enum Origin for Test {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; // Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, + } +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const SS58Prefix: u8 = 42; } -impl system::Trait for Test { - type BaseCallFilter = (); +impl system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; - type Call = (); + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -32,29 +41,22 @@ impl system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); } -impl Trait for Test { - type Event = (); +impl pallet_template::Config for Test { + type Event = Event; } -pub type TemplateModule = Module; - // Build genesis storage according to the mock runtime. pub fn new_test_ext() -> sp_io::TestExternalities { system::GenesisConfig::default().build_storage::().unwrap().into() diff --git a/bin/node-template/pallets/template/src/tests.rs b/bin/node-template/pallets/template/src/tests.rs index 3356b29ff3598..2205658601721 100644 --- a/bin/node-template/pallets/template/src/tests.rs +++ b/bin/node-template/pallets/template/src/tests.rs @@ -1,5 +1,5 @@ -use crate::{Error, mock::*}; -use frame_support::{assert_ok, assert_noop}; +use crate::{mock::*, Error}; +use frame_support::{assert_noop, assert_ok}; #[test] fn it_works_for_default_value() { @@ -15,9 +15,6 @@ fn it_works_for_default_value() { fn correct_error_for_none_value() { new_test_ext().execute_with(|| { // Ensure the expected error is thrown when no value is present. - assert_noop!( - TemplateModule::cause_error(Origin::signed(1)), - Error::::NoneValue - ); + assert_noop!(TemplateModule::cause_error(Origin::signed(1)), Error::::NoneValue); }); } diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index ed5a114b813f8..47e67af2b9ae1 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,70 +1,74 @@ [package] name = "node-template-runtime" -version = "2.0.0" -authors = ["Anonymous"] +version = "3.0.0" +authors = ["Substrate DevHub "] edition = "2018" license = "Unlicense" homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } -pallet-aura = { version = "2.0.0", default-features = false, path = "../../../frame/aura" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../../../frame/balances" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../../frame/grandpa" } -pallet-randomness-collective-flip = { version = "2.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-sudo = { version = "2.0.0", default-features = false, path = "../../../frame/sudo" } -frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment" } -frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0"} -sp-consensus-aura = { version = "0.8.0", default-features = false, path = "../../../primitives/consensus/aura" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0"} -sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } +pallet-aura = { version = "4.0.0-dev", default-features = false, path = "../../../frame/aura" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } +pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } +pallet-randomness-collective-flip = { version = "4.0.0-dev", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } +frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "4.0.0-dev"} +sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/consensus/aura" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } +sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "4.0.0-dev"} +sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/version" } # Used for the node template's RPCs -frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } # Used for runtime benchmarking -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } -frame-system-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } hex-literal = { version = "0.3.1", optional = true } -template = { version = "2.0.0", default-features = false, path = "../pallets/template", package = "pallet-template" } +pallet-template = { version = "3.0.0", default-features = false, path = "../pallets/template" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "frame-executive/std", "frame-support/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", "pallet-aura/std", "pallet-balances/std", "pallet-grandpa/std", "pallet-randomness-collective-flip/std", "pallet-sudo/std", + "pallet-template/std", "pallet-timestamp/std", - "pallet-transaction-payment/std", "pallet-transaction-payment-rpc-runtime-api/std", - "serde", + "pallet-transaction-payment/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", @@ -76,17 +80,15 @@ std = [ "sp-std/std", "sp-transaction-pool/std", "sp-version/std", - "frame-system/std", - "frame-system-rpc-runtime-api/std", - "template/std", ] runtime-benchmarks = [ - "sp-runtime/runtime-benchmarks", "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system-benchmarking", - "hex-literal", "frame-system/runtime-benchmarks", + "hex-literal", "pallet-balances/runtime-benchmarks", + "pallet-template/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", ] diff --git a/bin/node-template/runtime/build.rs b/bin/node-template/runtime/build.rs index 9654139121f6f..9b53d2457dffd 100644 --- a/bin/node-template/runtime/build.rs +++ b/bin/node-template/runtime/build.rs @@ -1,9 +1,8 @@ -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.1") .export_heap_base() .import_memory() .build() diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index e96de63731745..eecc93e166666 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -1,45 +1,47 @@ #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit="256"] +#![recursion_limit = "256"] // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use sp_std::prelude::*; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - ApplyExtrinsicResult, generic, create_runtime_str, impl_opaque_keys, MultiSignature, - transaction_validity::{TransactionValidity, TransactionSource}, -}; -use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, IdentityLookup, Verify, IdentifyAccount, NumberFor, Saturating, +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; -use pallet_grandpa::fg_primitives; -use sp_version::RuntimeVersion; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiSignature, +}; +use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; +use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use pallet_timestamp::Call as TimestampCall; -pub use pallet_balances::Call as BalancesCall; -pub use sp_runtime::{Permill, Perbill}; pub use frame_support::{ - construct_runtime, parameter_types, StorageValue, - traits::{KeyOwnerProofSystem, Randomness}, + construct_runtime, parameter_types, + traits::{KeyOwnerProofSystem, Randomness, StorageInfo}, weights::{ - Weight, IdentityFee, constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + IdentityFee, Weight, }, + StorageValue, }; +pub use pallet_balances::Call as BalancesCall; +pub use pallet_timestamp::Call as TimestampCall; +use pallet_transaction_payment::CurrencyAdapter; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; /// Import the template pallet. -pub use template; +pub use pallet_template; /// An index to a block. pub type BlockNumber = u32; @@ -51,10 +53,6 @@ pub type Signature = MultiSignature; /// to the public key of our transaction signing scheme. pub type AccountId = <::Signer as IdentifyAccount>::AccountId; -/// The type for looking up accounts. We don't expect more than 4 billion of them, but you -/// never know... -pub type AccountIndex = u32; - /// Balance of an account. pub type Balance = u128; @@ -64,9 +62,6 @@ pub type Index = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; -/// Digest item type. -pub type DigestItem = generic::DigestItem; - /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades @@ -91,18 +86,34 @@ pub mod opaque { } } +// To learn more about runtime versioning and what each of the following value means: +// https://substrate.dev/docs/en/knowledgebase/runtime/upgrades#runtime-versioning +#[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-template"), impl_name: create_runtime_str!("node-template"), authoring_version: 1, - spec_version: 1, + // The version of the runtime specification. A full node will not attempt to use its native + // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + // `spec_version`, and `authoring_version` are the same between Wasm and native. + // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use + // the compatible custom types. + spec_version: 100, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, }; +/// This determines the average expected block time that we are targeting. +/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. +/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked +/// up by `pallet_aura` to implement `fn slot_duration()`. +/// +/// Change this to adjust the block time. pub const MILLISECS_PER_BLOCK: u64 = 6000; +// NOTE: Currently it is not possible to change the slot duration after the chain has started. +// Attempting to do so will brick block production. pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; // Time is measured by number of blocks. @@ -113,35 +124,37 @@ pub const DAYS: BlockNumber = HOURS * 24; /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + parameter_types! { + pub const Version: RuntimeVersion = VERSION; pub const BlockHashCount: BlockNumber = 2400; /// We allow for 2 seconds of compute with a 6 second average block time. - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Assume 10% of weight for average on_initialize calls. - pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get() - .saturating_sub(Perbill::from_percent(10)) * MaximumBlockWeight::get(); - pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; - pub const Version: RuntimeVersion = VERSION; + pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights + ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength + ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { /// The basic call filter to use in dispatchable. - type BaseCallFilter = (); + type BaseCallFilter = frame_support::traits::Everything; + /// Block & extrinsics weights: base values and limits. + type BlockWeights = BlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = BlockLength; /// The identifier used to distinguish between accounts. type AccountId = AccountId; /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = IdentityLookup; + type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. @@ -158,24 +171,8 @@ impl frame_system::Trait for Runtime { type Origin = Origin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; - /// Maximum weight of each block. - type MaximumBlockWeight = MaximumBlockWeight; /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The weight of the overhead invoked on the block import process, independent of the - /// extrinsics included in that block. - type BlockExecutionWeight = BlockExecutionWeight; - /// The base weight of any extrinsic processed by the runtime, independent of the - /// logic of that extrinsic. (Signature verification, nonce increment, fee, etc...) - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - /// The maximum weight that a single extrinsic of `Normal` dispatch class can have, - /// idependent of the logic of that extrinsics. (Roughly max block weight - average on - /// initialize cost). - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - /// Maximum size of all encoded transactions (in bytes) that are allowed in one block. - type MaximumBlockLength = MaximumBlockLength; - /// Portion of the block weight that is available to all normal transactions. - type AvailableBlockRatio = AvailableBlockRatio; /// Version of the runtime. type Version = Version; /// Converts a module to the index of the module in `construct_runtime!`. @@ -190,13 +187,25 @@ impl frame_system::Trait for Runtime { type AccountData = pallet_balances::AccountData; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); + /// This is used as an identifier of the chain. 42 is the generic substrate prefix. + type SS58Prefix = SS58Prefix; + /// The set code logic, just the default since we're not a parachain. + type OnSetCode = (); } -impl pallet_aura::Trait for Runtime { +impl pallet_randomness_collective_flip::Config for Runtime {} + +parameter_types! { + pub const MaxAuthorities: u32 = 32; +} + +impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = MaxAuthorities; } -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -219,7 +228,7 @@ parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = Aura; @@ -232,8 +241,10 @@ parameter_types! { pub const MaxLocks: u32 = 50; } -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. @@ -241,28 +252,27 @@ impl pallet_balances::Trait for Runtime { type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; - type WeightInfo = (); + type WeightInfo = pallet_balances::weights::SubstrateWeight; } parameter_types! { pub const TransactionByteFee: Balance = 1; } -impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = (); +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } -impl pallet_sudo::Trait for Runtime { +impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } -/// Configure the pallet template in pallets/template. -impl template::Trait for Runtime { +/// Configure the pallet-template in pallets/template. +impl pallet_template::Config for Runtime { type Event = Event; } @@ -273,29 +283,25 @@ construct_runtime!( NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Config, Storage, Event}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, - Aura: pallet_aura::{Module, Config, Inherent}, - Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Module, Storage}, - Sudo: pallet_sudo::{Module, Call, Config, Storage, Event}, - // Include the custom logic from the template pallet in the runtime. - TemplateModule: template::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Aura: pallet_aura::{Pallet, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + // Include the custom logic from the pallet-template in the runtime. + TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, } ); /// The address format for describing accounts. -pub type Address = AccountId; +pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion, @@ -304,19 +310,17 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment + pallet_transaction_payment::ChargeTransactionPayment, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -/// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, Block, frame_system::ChainContext, Runtime, - AllModules, + AllPallets, >; impl_runtime_apis! { @@ -326,7 +330,7 @@ impl_runtime_apis! { } fn execute_block(block: Block) { - Executive::execute_block(block) + Executive::execute_block(block); } fn initialize_block(header: &::Header) { @@ -336,7 +340,7 @@ impl_runtime_apis! { impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { - Runtime::metadata().into() + OpaqueMetadata::new(Runtime::metadata().into()) } } @@ -359,18 +363,15 @@ impl_runtime_apis! { ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } - - fn random_seed() -> ::Hash { - RandomnessCollectiveFlip::random_seed() - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, + block_hash: ::Hash, ) -> TransactionValidity { - Executive::validate_transaction(source, tx) + Executive::validate_transaction(source, tx, block_hash) } } @@ -381,12 +382,12 @@ impl_runtime_apis! { } impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) } fn authorities() -> Vec { - Aura::authorities() + Aura::authorities().into_inner() } } @@ -407,6 +408,10 @@ impl_runtime_apis! { Grandpa::grandpa_authorities() } + fn current_set_id() -> fg_primitives::SetId { + Grandpa::current_set_id() + } + fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: fg_primitives::EquivocationProof< ::Hash, @@ -441,17 +446,43 @@ impl_runtime_apis! { ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } } #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + + let mut list = Vec::::new(); + + list_benchmark!(list, extra, frame_system, SystemBench::); + list_benchmark!(list, extra, pallet_balances, Balances); + list_benchmark!(list, extra, pallet_timestamp, Timestamp); + list_benchmark!(list, extra, pallet_template, TemplateModule); + + let storage_info = AllPalletsWithSystem::storage_info(); + + return (list, storage_info) + } + fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; - use frame_system_benchmarking::Module as SystemBench; - impl frame_system_benchmarking::Trait for Runtime {} + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ // Block Number @@ -472,6 +503,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_balances, Balances); add_benchmark!(params, batches, pallet_timestamp, Timestamp); + add_benchmark!(params, batches, pallet_template, TemplateModule); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) diff --git a/bin/node-template/scripts/docker_run.sh b/bin/node-template/scripts/docker_run.sh new file mode 100644 index 0000000000000..0bac44b4cfb3b --- /dev/null +++ b/bin/node-template/scripts/docker_run.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# This script is meant to be run on Unix/Linux based systems +set -e + +echo "*** Start Substrate node template ***" + +cd $(dirname ${BASH_SOURCE[0]})/.. + +docker-compose down --remove-orphans +docker-compose run --rm --service-ports dev $@ diff --git a/bin/node-template/scripts/init.sh b/bin/node-template/scripts/init.sh index 1405a41ef333e..f976f7235d700 100755 --- a/bin/node-template/scripts/init.sh +++ b/bin/node-template/scripts/init.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash - +# This script is meant to be run on Unix/Linux based systems set -e echo "*** Initializing WASM build environment" diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 88362f7e51022..b19a71966fb87 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-bench" -version = "0.8.0" +version = "0.9.0-dev" authors = ["Parity Technologies "] description = "Substrate node integration benchmarks." edition = "2018" @@ -11,33 +11,34 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] log = "0.4.8" node-primitives = { version = "2.0.0", path = "../primitives" } -node-testing = { version = "2.0.0", path = "../testing" } -node-runtime = { version = "2.0.0", path = "../runtime" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0", path = "../../../client/api/" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -serde = "1.0.101" -serde_json = "1.0.41" +node-testing = { version = "3.0.0-dev", path = "../testing" } +node-runtime = { version = "3.0.0-dev", path = "../runtime" } +sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } +serde = "1.0.126" +serde_json = "1.0.68" structopt = "0.3" derive_more = "0.99.2" -kvdb = "0.7" -kvdb-rocksdb = "0.9.1" -sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-basic-authorship = { version = "0.8.0", path = "../../../client/basic-authorship" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } +kvdb = "0.10.0" +kvdb-rocksdb = "0.14.0" +sp-trie = { version = "4.0.0-dev", path = "../../../primitives/trie" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-basic-authorship = { version = "0.10.0-dev", path = "../../../client/basic-authorship" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/timestamp" } +sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } hash-db = "0.15.2" tempfile = "3.1.0" fs_extra = "1" hex = "0.4.0" rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -parity-db = { version = "0.1.2" } -sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } +parity-util-mem = { version = "0.10.0", default-features = false, features = [ + "primitive-types", +] } +parity-db = { version = "0.3" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } futures = { version = "0.3.4", features = ["thread-pool"] } diff --git a/bin/node/bench/src/common.rs b/bin/node/bench/src/common.rs index 2637d6e9bd04d..d04d79e9907af 100644 --- a/bin/node/bench/src/common.rs +++ b/bin/node/bench/src/common.rs @@ -1,7 +1,6 @@ - // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -45,4 +44,4 @@ impl SizeType { SizeType::Custom(val) => Some(*val), } } -} \ No newline at end of file +} diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs index 5506dc426de0b..1532e02bd3ef6 100644 --- a/bin/node/bench/src/construct.rs +++ b/bin/node/bench/src/construct.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -24,35 +24,22 @@ //! DO NOT depend on user input). Thus transaction generation should be //! based on randomized data. -use std::{ - borrow::Cow, - collections::HashMap, - pin::Pin, - sync::Arc, -}; use futures::Future; +use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc}; use node_primitives::Block; -use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes, DatabaseType}; -use sp_runtime::{ - generic::BlockId, - traits::NumberFor, - OpaqueExtrinsic, -}; -use sp_transaction_pool::{ - ImportNotificationStream, - PoolFuture, - PoolStatus, - TransactionFor, - TransactionSource, - TransactionStatusStreamFor, - TxHash, +use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; +use sc_transaction_pool_api::{ + ImportNotificationStream, PoolFuture, PoolStatus, TransactionFor, TransactionSource, + TransactionStatusStreamFor, TxHash, }; -use sp_consensus::{Environment, Proposer, RecordProof}; +use sp_consensus::{Environment, Proposer}; +use sp_inherents::InherentDataProvider; +use sp_runtime::{generic::BlockId, traits::NumberFor, OpaqueExtrinsic}; use crate::{ common::SizeType, - core::{self, Path, Mode}, + core::{self, Mode, Path}, }; pub struct ConstructionBenchmarkDescription { @@ -71,7 +58,6 @@ pub struct ConstructionBenchmark { impl core::BenchmarkDescription for ConstructionBenchmarkDescription { fn path(&self) -> Path { - let mut path = Path::new(&["node", "proposer"]); match self.profile { @@ -103,11 +89,7 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription { fn setup(self: Box) -> Box { let mut extrinsics: Vec> = Vec::new(); - let mut bench_db = BenchDb::with_key_types( - self.database_type, - 50_000, - self.key_types - ); + let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types); let client = bench_db.client(); @@ -126,11 +108,9 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription { fn name(&self) -> Cow<'static, str> { format!( "Block construction ({:?}/{}, {:?}, {:?} backend)", - self.block_type, - self.size, - self.profile, - self.database_type, - ).into() + self.block_type, self.size, self.profile, self.database_type, + ) + .into() } } @@ -138,7 +118,9 @@ impl core::Benchmark for ConstructionBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let context = self.database.create_context(self.profile); - let _ = context.client.runtime_version_at(&BlockId::Number(0)) + let _ = context + .client + .runtime_version_at(&BlockId::Number(0)) .expect("Failed to get runtime version") .spec_version; @@ -151,28 +133,31 @@ impl core::Benchmark for ConstructionBenchmark { context.client.clone(), self.transactions.clone().into(), None, + None, ); - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - inherent_data_providers - .register_provider(sp_timestamp::InherentDataProvider) - .expect("Failed to register timestamp data provider"); + let timestamp_provider = sp_timestamp::InherentDataProvider::from_system_time(); let start = std::time::Instant::now(); - let proposer = futures::executor::block_on(proposer_factory.init( - &context.client.header(&BlockId::number(0)) - .expect("Database error querying block #0") - .expect("Block #0 should exist"), - )).expect("Proposer initialization failed"); - - let _block = futures::executor::block_on( - proposer.propose( - inherent_data_providers.create_inherent_data().expect("Create inherent data failed"), - Default::default(), - std::time::Duration::from_secs(20), - RecordProof::Yes, + let proposer = futures::executor::block_on( + proposer_factory.init( + &context + .client + .header(&BlockId::number(0)) + .expect("Database error querying block #0") + .expect("Block #0 should exist"), ), - ).map(|r| r.block).expect("Proposing failed"); + ) + .expect("Proposer initialization failed"); + + let _block = futures::executor::block_on(proposer.propose( + timestamp_provider.create_inherent_data().expect("Create inherent data failed"), + Default::default(), + std::time::Duration::from_secs(20), + None, + )) + .map(|r| r.block) + .expect("Proposing failed"); let elapsed = start.elapsed(); @@ -192,14 +177,11 @@ pub struct PoolTransaction { impl From for PoolTransaction { fn from(e: OpaqueExtrinsic) -> Self { - PoolTransaction { - data: e, - hash: node_primitives::Hash::zero(), - } + PoolTransaction { data: e, hash: node_primitives::Hash::zero() } } } -impl sp_transaction_pool::InPoolTransaction for PoolTransaction { +impl sc_transaction_pool_api::InPoolTransaction for PoolTransaction { type Transaction = OpaqueExtrinsic; type Hash = node_primitives::Hash; @@ -211,25 +193,35 @@ impl sp_transaction_pool::InPoolTransaction for PoolTransaction { &self.hash } - fn priority(&self) -> &u64 { unimplemented!() } + fn priority(&self) -> &u64 { + unimplemented!() + } - fn longevity(&self) -> &u64 { unimplemented!() } + fn longevity(&self) -> &u64 { + unimplemented!() + } - fn requires(&self) -> &[Vec] { unimplemented!() } + fn requires(&self) -> &[Vec] { + unimplemented!() + } - fn provides(&self) -> &[Vec] { unimplemented!() } + fn provides(&self) -> &[Vec] { + unimplemented!() + } - fn is_propagable(&self) -> bool { unimplemented!() } + fn is_propagable(&self) -> bool { + unimplemented!() + } } #[derive(Clone, Debug)] pub struct Transactions(Vec>); -impl sp_transaction_pool::TransactionPool for Transactions { +impl sc_transaction_pool_api::TransactionPool for Transactions { type Block = Block; type Hash = node_primitives::Hash; type InPoolTransaction = PoolTransaction; - type Error = sp_transaction_pool::error::Error; + type Error = sc_transaction_pool_api::error::Error; /// Returns a future that imports a bunch of unverified transactions to the pool. fn submit_at( @@ -237,7 +229,7 @@ impl sp_transaction_pool::TransactionPool for Transactions { _at: &BlockId, _source: TransactionSource, _xts: Vec>, - ) -> PoolFuture>, Self::Error> { + ) -> PoolFuture>, Self::Error> { unimplemented!() } @@ -256,18 +248,25 @@ impl sp_transaction_pool::TransactionPool for Transactions { _at: &BlockId, _source: TransactionSource, _xt: TransactionFor, - ) -> PoolFuture>, Self::Error> { + ) -> PoolFuture>>, Self::Error> { unimplemented!() } - fn ready_at(&self, _at: NumberFor) - -> Pin> + Send>> + Send>> - { - let iter: Box> + Send> = Box::new(self.0.clone().into_iter()); + fn ready_at( + &self, + _at: NumberFor, + ) -> Pin< + Box< + dyn Future> + Send>> + + Send, + >, + > { + let iter: Box> + Send> = + Box::new(self.0.clone().into_iter()); Box::pin(futures::future::ready(iter)) } - fn ready(&self) -> Box> + Send> { + fn ready(&self) -> Box> + Send> { unimplemented!() } diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index 6faa7b72721f4..56c0f3526a4dc 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,8 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{fmt, borrow::{Cow, ToOwned}}; use serde::Serialize; +use std::{ + borrow::{Cow, ToOwned}, + fmt, +}; pub struct Path(Vec); @@ -33,7 +36,11 @@ impl Path { } pub fn full(&self) -> String { - self.0.iter().fold(String::new(), |mut val, next| { val.push_str("::"); val.push_str(next); val }) + self.0.iter().fold(String::new(), |mut val, next| { + val.push_str("::"); + val.push_str(next); + val + }) } pub fn has(&self, path: &str) -> bool { @@ -115,10 +122,7 @@ impl fmt::Display for BenchmarkOutput { } } -pub fn run_benchmark( - benchmark: Box, - mode: Mode, -) -> BenchmarkOutput { +pub fn run_benchmark(benchmark: Box, mode: Mode) -> BenchmarkOutput { let name = benchmark.name().to_owned(); let mut benchmark = benchmark.setup(); @@ -133,11 +137,7 @@ pub fn run_benchmark( let raw_average = (durations.iter().sum::() / (durations.len() as u128)) as u64; let average = (durations.iter().skip(10).take(30).sum::() / 30) as u64; - BenchmarkOutput { - name: name.into(), - raw_average, - average, - } + BenchmarkOutput { name: name.into(), raw_average, average } } macro_rules! matrix( diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 759a4299c7275..e3aa1192b5d1f 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -30,14 +30,15 @@ use crate::simple_trie::SimpleTrie; /// return root. pub fn generate_trie( db: Arc, - key_values: impl IntoIterator, Vec)>, + key_values: impl IntoIterator, Vec)>, ) -> Hash { let mut root = Hash::default(); let (db, overlay) = { let mut overlay = HashMap::new(); overlay.insert( - hex::decode("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").expect("null key is valid"), + hex::decode("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") + .expect("null key is valid"), Some(vec![0]), ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; @@ -50,7 +51,7 @@ pub fn generate_trie( trie_db.commit(); } - ( trie.db, overlay ) + (trie.db, overlay) }; let mut transaction = db.transaction(); diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index ae28a20089e10..5bbf1ddf3b73e 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -32,15 +32,15 @@ use std::borrow::Cow; -use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes, DatabaseType}; use node_primitives::Block; +use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; use sc_client_api::backend::Backend; use sp_runtime::generic::BlockId; use sp_state_machine::InspectState; use crate::{ common::SizeType, - core::{self, Path, Mode}, + core::{self, Mode, Path}, }; pub struct ImportBenchmarkDescription { @@ -60,7 +60,6 @@ pub struct ImportBenchmark { impl core::BenchmarkDescription for ImportBenchmarkDescription { fn path(&self) -> Path { - let mut path = Path::new(&["node", "import"]); match self.profile { @@ -91,11 +90,7 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription { fn setup(self: Box) -> Box { let profile = self.profile; - let mut bench_db = BenchDb::with_key_types( - self.database_type, - 50_000, - self.key_types - ); + let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types); let block = bench_db.generate_block(self.block_type.to_content(self.size.transactions())); Box::new(ImportBenchmark { database: bench_db, @@ -108,11 +103,9 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription { fn name(&self) -> Cow<'static, str> { format!( "Block import ({:?}/{}, {:?}, {:?} backend)", - self.block_type, - self.size, - self.profile, - self.database_type, - ).into() + self.block_type, self.size, self.profile, self.database_type, + ) + .into() } } @@ -120,7 +113,9 @@ impl core::Benchmark for ImportBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let mut context = self.database.create_context(self.profile); - let _ = context.client.runtime_version_at(&BlockId::Number(0)) + let _ = context + .client + .runtime_version_at(&BlockId::Number(0)) .expect("Failed to get runtime version") .spec_version; @@ -133,7 +128,8 @@ impl core::Benchmark for ImportBenchmark { let elapsed = start.elapsed(); // Sanity checks. - context.client + context + .client .state_at(&BlockId::number(1)) .expect("state_at failed for block#1") .inspect_state(|| { @@ -142,7 +138,8 @@ impl core::Benchmark for ImportBenchmark { // should be 5 per signed extrinsic + 1 per unsigned // we have 1 unsigned and the rest are signed in the block // those 5 events per signed are: - // - new account (RawEvent::NewAccount) as we always transfer fund to non-existant account + // - new account (RawEvent::NewAccount) as we always transfer fund to + // non-existant account // - endowed (RawEvent::Endowed) for this new account // - successful transfer (RawEvent::Transfer) for this transfer operation // - deposit event for charging transaction fee @@ -155,19 +152,17 @@ impl core::Benchmark for ImportBenchmark { BlockType::Noop => { assert_eq!( node_runtime::System::events().len(), - // should be 2 per signed extrinsic + 1 per unsigned // we have 1 unsigned and the rest are signed in the block // those 2 events per signed are: // - deposit event for charging transaction fee // - extrinsic success - (self.block.extrinsics.len() - 1) * 2 + 1, + (self.block.extrinsics.len() - 1) * 2 + 1, ); }, _ => {}, } - } - ); + }); if mode == Mode::Profile { std::thread::park_timeout(std::time::Duration::from_secs(1)); diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index 46b659dd88387..4b006b387d0ea 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,9 +18,10 @@ mod common; mod construct; -#[macro_use] mod core; -mod import; +#[macro_use] +mod core; mod generator; +mod import; mod simple_trie; mod state_sizes; mod tempdb; @@ -29,15 +30,15 @@ mod txpool; use structopt::StructOpt; -use node_testing::bench::{Profile, KeyTypes, BlockType, DatabaseType as BenchDataBaseType}; +use node_testing::bench::{BlockType, DatabaseType as BenchDataBaseType, KeyTypes, Profile}; use crate::{ common::SizeType, + construct::ConstructionBenchmarkDescription, core::{run_benchmark, Mode as BenchmarkMode}, - tempdb::DatabaseType, import::ImportBenchmarkDescription, - trie::{TrieReadBenchmarkDescription, TrieWriteBenchmarkDescription, DatabaseSize}, - construct::ConstructionBenchmarkDescription, + tempdb::DatabaseType, + trie::{DatabaseSize, TrieReadBenchmarkDescription, TrieWriteBenchmarkDescription}, txpool::PoolBenchmarkDescription, }; @@ -92,14 +93,25 @@ fn main() { SizeType::Large, SizeType::Full, SizeType::Custom(opt.transactions.unwrap_or(0)), - ].iter() { + ] + .iter() + { for block_type in [ BlockType::RandomTransfersKeepAlive, BlockType::RandomTransfersReaping, BlockType::Noop, - ].iter() { - for database_type in [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb].iter() { - import_benchmarks.push((profile, size.clone(), block_type.clone(), database_type)); + ] + .iter() + { + for database_type in + [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb].iter() + { + import_benchmarks.push(( + profile, + size.clone(), + block_type.clone(), + database_type, + )); } } } @@ -163,7 +175,7 @@ fn main() { println!("{}: {}", benchmark.name(), benchmark.path().full()) } } - return; + return } let mut results = Vec::new(); @@ -183,7 +195,8 @@ fn main() { } if opt.json { - let json_result: String = serde_json::to_string(&results).expect("Failed to construct json"); + let json_result: String = + serde_json::to_string(&results).expect("Failed to construct json"); println!("{}", json_result); } } diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index 3cfd7ddb300a9..651772c71575f 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,10 +18,10 @@ use std::{collections::HashMap, sync::Arc}; +use hash_db::{AsHashDB, HashDB, Hasher as _, Prefix}; use kvdb::KeyValueDB; use node_primitives::Hash; use sp_trie::DBValue; -use hash_db::{HashDB, AsHashDB, Prefix, Hasher as _}; pub type Hasher = sp_core::Blake2Hasher; @@ -32,7 +32,9 @@ pub struct SimpleTrie<'a> { } impl<'a> AsHashDB for SimpleTrie<'a> { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + fn as_hash_db(&self) -> &dyn hash_db::HashDB { + &*self + } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { &mut *self @@ -43,7 +45,7 @@ impl<'a> HashDB for SimpleTrie<'a> { fn get(&self, key: &Hash, prefix: Prefix) -> Option { let key = sp_trie::prefixed_key::(key, prefix); if let Some(value) = self.overlay.get(&key) { - return value.clone(); + return value.clone() } self.db.get(0, &key).expect("Database backend error") } diff --git a/bin/node/bench/src/state_sizes.rs b/bin/node/bench/src/state_sizes.rs index d35989f61be34..27112ed42d455 100644 --- a/bin/node/bench/src/state_sizes.rs +++ b/bin/node/bench/src/state_sizes.rs @@ -1,21 +1,23 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity. +// This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Parity is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// along with this program. If not, see . /// Kusama value size distribution -pub const KUSAMA_STATE_DISTRIBUTION: &'static[(u32, u32)] = &[ +pub const KUSAMA_STATE_DISTRIBUTION: &'static [(u32, u32)] = &[ (32, 35), (33, 20035), (34, 5369), @@ -4753,4 +4755,4 @@ pub const KUSAMA_STATE_DISTRIBUTION: &'static[(u32, u32)] = &[ (1516670, 1), (1605731, 1), (1605821, 1), -]; \ No newline at end of file +]; diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index 4020fd1029368..518c0dd96127c 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,9 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{io, sync::Arc}; -use kvdb::{KeyValueDB, DBTransaction}; -use kvdb_rocksdb::{DatabaseConfig, Database}; +use kvdb::{DBTransaction, KeyValueDB}; +use kvdb_rocksdb::{Database, DatabaseConfig}; +use std::{io, path::PathBuf, sync::Arc}; #[derive(Debug, Clone, Copy, derive_more::Display)] pub enum DatabaseType { @@ -44,13 +44,14 @@ impl KeyValueDB for ParityDbWrapper { /// Write a transaction of changes to the buffer. fn write(&self, transaction: DBTransaction) -> io::Result<()> { - self.0.commit( - transaction.ops.iter().map(|op| match op { - kvdb::DBOp::Insert { col, key, value } => (*col as u8, &key[key.len() - 32..], Some(value.to_vec())), + self.0 + .commit(transaction.ops.iter().map(|op| match op { + kvdb::DBOp::Insert { col, key, value } => + (*col as u8, &key[key.len() - 32..], Some(value.to_vec())), kvdb::DBOp::Delete { col, key } => (*col as u8, &key[key.len() - 32..], None), - kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!() - }) - ).expect("db error"); + kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!(), + })) + .expect("db error"); Ok(()) } @@ -90,21 +91,18 @@ impl TempDatabase { match db_type { DatabaseType::RocksDb => { let db_cfg = DatabaseConfig::with_columns(1); - let db = Database::open(&db_cfg, &self.0.path().to_string_lossy()).expect("Database backend error"); + let db = Database::open(&db_cfg, &self.0.path()).expect("Database backend error"); Arc::new(db) }, - DatabaseType::ParityDb => { - Arc::new(ParityDbWrapper({ - let mut options = parity_db::Options::with_columns(self.0.path(), 1); - let mut column_options = &mut options.columns[0]; - column_options.ref_counted = true; - column_options.preimage = true; - column_options.uniform = true; - parity_db::Db::open(&options).expect("db open error") - })) - } + DatabaseType::ParityDb => Arc::new(ParityDbWrapper({ + let mut options = parity_db::Options::with_columns(self.0.path(), 1); + let mut column_options = &mut options.columns[0]; + column_options.ref_counted = true; + column_options.preimage = true; + column_options.uniform = true; + parity_db::Db::open_or_create(&options).expect("db open error") + })), } - } } @@ -121,15 +119,10 @@ impl Clone for TempDatabase { ); let self_db_files = std::fs::read_dir(self_dir) .expect("failed to list file in seed dir") - .map(|f_result| - f_result.expect("failed to read file in seed db") - .path() - ).collect(); - fs_extra::copy_items( - &self_db_files, - new_dir.path(), - &fs_extra::dir::CopyOptions::new(), - ).expect("Copy of seed database is ok"); + .map(|f_result| f_result.expect("failed to read file in seed db").path()) + .collect::>(); + fs_extra::copy_items(&self_db_files, new_dir.path(), &fs_extra::dir::CopyOptions::new()) + .expect("Copy of seed database is ok"); TempDatabase(new_dir) } diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index eb6c574e27170..a17e386ca879b 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,13 +18,13 @@ //! Trie benchmark (integrated). -use std::{borrow::Cow, collections::HashMap, sync::Arc}; +use hash_db::Prefix; use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; -use hash_db::Prefix; use sp_state_machine::Backend as _; use sp_trie::{trie_types::TrieDBMut, TrieMut as _}; +use std::{borrow::Cow, collections::HashMap, sync::Arc}; use node_primitives::Hash; @@ -32,7 +32,7 @@ use crate::{ core::{self, Mode, Path}, generator::generate_trie, simple_trie::SimpleTrie, - tempdb::{TempDatabase, DatabaseType}, + tempdb::{DatabaseType, TempDatabase}, }; pub const SAMPLE_SIZE: usize = 100; @@ -142,10 +142,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { assert_eq!(warmup_keys.len(), SAMPLE_SIZE); assert_eq!(query_keys.len(), SAMPLE_SIZE); - let root = generate_trie( - database.open(self.database_type), - key_values, - ); + let root = generate_trie(database.open(self.database_type), key_values); Box::new(TrieReadBenchmark { database, @@ -162,7 +159,8 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { self.database_size, pretty_print(self.database_size.keys()), self.database_type, - ).into() + ) + .into() } } @@ -182,12 +180,10 @@ impl core::Benchmark for TrieReadBenchmark { let storage: Arc> = Arc::new(Storage(db.open(self.database_type))); - let trie_backend = sp_state_machine::TrieBackend::new( - storage, - self.root, - ); + let trie_backend = sp_state_machine::TrieBackend::new(storage, self.root); for (warmup_key, warmup_value) in self.warmup_keys.iter() { - let value = trie_backend.storage(&warmup_key[..]) + let value = trie_backend + .storage(&warmup_key[..]) .expect("Failed to get key: db error") .expect("Warmup key should exist"); @@ -218,7 +214,6 @@ pub struct TrieWriteBenchmarkDescription { pub database_type: DatabaseType, } - impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { fn path(&self) -> Path { let mut path = Path::new(&["trie", "write"]); @@ -253,10 +248,7 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { assert_eq!(warmup_keys.len(), SAMPLE_SIZE); - let root = generate_trie( - database.open(self.database_type), - key_values, - ); + let root = generate_trie(database.open(self.database_type), key_values); Box::new(TrieWriteBenchmark { database, @@ -272,7 +264,8 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { self.database_size, pretty_print(self.database_size.keys()), self.database_type, - ).into() + ) + .into() } } @@ -292,15 +285,13 @@ impl core::Benchmark for TrieWriteBenchmark { let mut new_root = self.root.clone(); let mut overlay = HashMap::new(); - let mut trie = SimpleTrie { - db: kvdb.clone(), - overlay: &mut overlay, - }; - let mut trie_db_mut = TrieDBMut::from_existing(&mut trie, &mut new_root) - .expect("Failed to create TrieDBMut"); + let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay }; + let mut trie_db_mut = + TrieDBMut::from_existing(&mut trie, &mut new_root).expect("Failed to create TrieDBMut"); for (warmup_key, warmup_value) in self.warmup_keys.iter() { - let value = trie_db_mut.get(&warmup_key[..]) + let value = trie_db_mut + .get(&warmup_key[..]) .expect("Failed to get key: db error") .expect("Warmup key should exist"); @@ -367,7 +358,9 @@ impl SizePool { fn value(&self, rng: &mut R) -> Vec { let sr = (rng.next_u64() % self.total as u64) as u32; - let mut range = self.distribution.range((std::ops::Bound::Included(sr), std::ops::Bound::Unbounded)); + let mut range = self + .distribution + .range((std::ops::Bound::Included(sr), std::ops::Bound::Unbounded)); let size = *range.next().unwrap().1 as usize; random_vec(rng, size) } diff --git a/bin/node/bench/src/txpool.rs b/bin/node/bench/src/txpool.rs index 7ea13fc15ec68..b0db734534855 100644 --- a/bin/node/bench/src/txpool.rs +++ b/bin/node/bench/src/txpool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -23,13 +23,13 @@ use std::borrow::Cow; -use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes, DatabaseType}; +use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; use sc_transaction_pool::BasicPool; +use sc_transaction_pool_api::{TransactionPool, TransactionSource}; use sp_runtime::generic::BlockId; -use sp_transaction_pool::{TransactionPool, TransactionSource}; -use crate::core::{self, Path, Mode}; +use crate::core::{self, Mode, Path}; pub struct PoolBenchmarkDescription { pub database_type: DatabaseType, @@ -46,11 +46,7 @@ impl core::BenchmarkDescription for PoolBenchmarkDescription { fn setup(self: Box) -> Box { Box::new(PoolBenchmark { - database: BenchDb::with_key_types( - self.database_type, - 50_000, - KeyTypes::Sr25519, - ), + database: BenchDb::with_key_types(self.database_type, 50_000, KeyTypes::Sr25519), }) } @@ -63,7 +59,9 @@ impl core::Benchmark for PoolBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let context = self.database.create_context(Profile::Wasm); - let _ = context.client.runtime_version_at(&BlockId::Number(0)) + let _ = context + .client + .runtime_version_at(&BlockId::Number(0)) .expect("Failed to get runtime version") .spec_version; @@ -74,27 +72,26 @@ impl core::Benchmark for PoolBenchmark { let executor = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, executor, context.client.clone(), ); - let generated_transactions = self.database.block_content( - BlockType::RandomTransfersKeepAlive.to_content(Some(100)), - &context.client, - ).into_iter().collect::>(); + let generated_transactions = self + .database + .block_content( + BlockType::RandomTransfersKeepAlive.to_content(Some(100)), + &context.client, + ) + .into_iter() + .collect::>(); let start = std::time::Instant::now(); - let submissions = generated_transactions.into_iter().map(|tx| { - txpool.submit_one( - &BlockId::Number(0), - TransactionSource::External, - tx, - ) - }); - futures::executor::block_on( - futures::future::join_all(submissions) - ); + let submissions = generated_transactions + .into_iter() + .map(|tx| txpool.submit_one(&BlockId::Number(0), TransactionSource::External, tx)); + futures::executor::block_on(futures::future::join_all(submissions)); let elapsed = start.elapsed(); if mode == Mode::Profile { diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml deleted file mode 100644 index 13d6e057a1e16..0000000000000 --- a/bin/node/browser-testing/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "node-browser-testing" -version = "2.0.0" -authors = ["Parity Technologies "] -description = "Tests for the in-browser light client." -edition = "2018" -license = "Apache-2.0" - -[dependencies] -futures-timer = "3.0.2" -libp2p = { version = "0.28.1", default-features = false } -jsonrpc-core = "15.0.0" -serde = "1.0.106" -serde_json = "1.0.48" -wasm-bindgen = { version = "=0.2.67", features = ["serde-serialize"] } -wasm-bindgen-futures = "0.4.10" -wasm-bindgen-test = "0.3.10" -futures = "0.3.4" - -node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0"} -sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.8.0"} diff --git a/bin/node/browser-testing/src/lib.rs b/bin/node/browser-testing/src/lib.rs deleted file mode 100644 index 777e5ea9f132e..0000000000000 --- a/bin/node/browser-testing/src/lib.rs +++ /dev/null @@ -1,70 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Running -//! Running this test can be done with -//! ```text -//! wasm-pack test --firefox --release --headless bin/node/browser-testing -//! ``` -//! or (without `wasm-pack`) -//! ```text -//! CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_RUNNER=wasm-bindgen-test-runner WASM_BINDGEN_TEST_TIMEOUT=60 cargo test --target wasm32-unknown-unknown -//! ``` -//! For debug infomation, such as the informant, run without the `--headless` -//! flag and open a browser to the url that `wasm-pack test` outputs. -//! For more infomation see https://rustwasm.github.io/docs/wasm-pack/. - -use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; -use wasm_bindgen_futures::JsFuture; -use wasm_bindgen::JsValue; -use jsonrpc_core::types::{MethodCall, Success, Version, Params, Id}; -use serde::de::DeserializeOwned; - -wasm_bindgen_test_configure!(run_in_browser); - -fn rpc_call(method: &str) -> String { - serde_json::to_string(&MethodCall { - jsonrpc: Some(Version::V2), - method: method.into(), - params: Params::None, - id: Id::Num(1) - }).unwrap() -} - -fn deserialize_rpc_result(js_value: JsValue) -> T { - let string = js_value.as_string().unwrap(); - let value = serde_json::from_str::(&string).unwrap().result; - // We need to convert a `Value::Object` into a proper type. - let value_string = serde_json::to_string(&value).unwrap(); - serde_json::from_str(&value_string).unwrap() -} - -#[wasm_bindgen_test] -async fn runs() { - let mut client = node_cli::start_client(None, "info".into()) - .await - .unwrap(); - - // Check that the node handles rpc calls. - // TODO: Re-add the code that checks if the node is syncing. - let chain_name: String = deserialize_rpc_result( - JsFuture::from(client.rpc_send(&rpc_call("system_chain"))) - .await - .unwrap() - ); - assert_eq!(chain_name, "Development"); -} diff --git a/bin/node/browser-testing/webdriver.json b/bin/node/browser-testing/webdriver.json deleted file mode 100644 index 417ac35a7bccd..0000000000000 --- a/bin/node/browser-testing/webdriver.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "goog:chromeOptions": { - "args": [ - "--whitelisted-ips=127.0.0.1" - ] - } -} diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 96cd9d4f8c533..efed171e2ef61 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-cli" -version = "2.0.0" +version = "3.0.0-dev" authors = ["Parity Technologies "] description = "Generic Substrate node implementation in Rust." build = "build.rs" @@ -34,117 +34,109 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.3.4" } -serde = { version = "1.0.102", features = ["derive"] } -futures = { version = "0.3.1", features = ["compat"] } +codec = { package = "parity-scale-codec", version = "2.0.0" } +serde = { version = "1.0.126", features = ["derive"] } +futures = "0.3.16" hex-literal = "0.3.1" -ipfs = { git = "https://github.com/rs-ipfs/rust-ipfs" } +ipfs = { git = "https://github.com/rs-ipfs/rust-ipfs"} log = "0.4.8" rand = "0.7.2" structopt = { version = "0.3.8", optional = true } -tracing = "0.1.19" -parking_lot = "0.10.0" # primitives -sp-authority-discovery = { version = "2.0.0", path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sp-authority-discovery = { version = "4.0.0-dev", path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } +grandpa-primitives = { version = "4.0.0-dev", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } +sp-authorship = { version = "4.0.0-dev", path = "../../../primitives/authorship" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sp-keyring = { version = "4.0.0-dev", path = "../../../primitives/keyring" } +sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-transaction-pool = { version = "4.0.0-dev", path = "../../../primitives/transaction-pool" } +sp-transaction-storage-proof = { version = "4.0.0-dev", path = "../../../primitives/transaction-storage-proof" } # client dependencies -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } -sc-network = { version = "0.8.0", path = "../../../client/network" } -sc-consensus-babe = { version = "0.8.0", path = "../../../client/consensus/babe" } -grandpa = { version = "0.8.0", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -sc-client-db = { version = "0.8.0", default-features = false, path = "../../../client/db" } -sc-offchain = { version = "2.0.0", path = "../../../client/offchain" } -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } -sc-basic-authorship = { version = "0.8.0", path = "../../../client/basic-authorship" } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } -sc-tracing = { version = "2.0.0", path = "../../../client/tracing" } -sc-telemetry = { version = "2.0.0", path = "../../../client/telemetry" } -sc-authority-discovery = { version = "0.8.0", path = "../../../client/authority-discovery" } +sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } +sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } +sc-network = { version = "0.10.0-dev", path = "../../../client/network" } +sc-consensus-slots = { version = "0.10.0-dev", path = "../../../client/consensus/slots" } +sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } +sc-consensus-uncles = { version = "0.10.0-dev", path = "../../../client/consensus/uncles" } +grandpa = { version = "0.10.0-dev", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } +sc-basic-authorship = { version = "0.10.0-dev", path = "../../../client/basic-authorship" } +sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../client/service" } +sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } +sc-authority-discovery = { version = "0.10.0-dev", path = "../../../client/authority-discovery" } +sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state-rpc" } # frame dependencies -pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } -pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -pallet-im-online = { version = "2.0.0", default-features = false, path = "../../../frame/im-online" } -pallet-authority-discovery = { version = "2.0.0", path = "../../../frame/authority-discovery" } -pallet-staking = { version = "2.0.0", path = "../../../frame/staking" } -pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } +frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } +pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } +pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } # node-specific dependencies -node-runtime = { version = "2.0.0", path = "../runtime" } -node-rpc = { version = "2.0.0", path = "../rpc" } +node-runtime = { version = "3.0.0-dev", path = "../runtime" } +node-rpc = { version = "3.0.0-dev", path = "../rpc" } node-primitives = { version = "2.0.0", path = "../primitives" } -node-executor = { version = "2.0.0", path = "../executor" } +node-executor = { version = "3.0.0-dev", path = "../executor" } # CLI-specific dependencies -sc-cli = { version = "0.8.0", optional = true, path = "../../../client/cli" } -frame-benchmarking-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } -node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } - -# WASM-specific dependencies -wasm-bindgen = { version = "0.2.57", optional = true } -wasm-bindgen-futures = { version = "0.4.7", optional = true } -browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0"} +sc-cli = { version = "0.10.0-dev", optional = true, path = "../../../client/cli" } +frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/benchmarking-cli" } +node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } +try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } [target.'cfg(target_arch="x86_64")'.dependencies] -node-executor = { version = "2.0.0", path = "../executor", features = [ "wasmtime" ] } -sc-cli = { version = "0.8.0", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } -sp-trie = { version = "2.0.0", default-features = false, path = "../../../primitives/trie", features = ["memory-tracker"] } +node-executor = { version = "3.0.0-dev", path = "../executor", features = [ + "wasmtime", +] } +sc-cli = { version = "0.10.0-dev", optional = true, path = "../../../client/cli", features = [ + "wasmtime", +] } +sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../client/service", features = [ + "wasmtime", +] } +sp-trie = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/trie", features = [ + "memory-tracker", +] } [dev-dependencies] -sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-consensus-babe = { version = "0.8.0", features = ["test-helpers"], path = "../../../client/consensus/babe" } -sc-consensus-epochs = { version = "0.8.0", path = "../../../client/consensus/epochs" } +sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } +sc-client-db = { version = "0.10.0-dev", path = "../../../client/db" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } +sc-consensus-epochs = { version = "0.10.0-dev", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } -futures = "0.3.4" +sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } +futures = "0.3.16" tempfile = "3.1.0" assert_cmd = "1.0" -nix = "0.17" +nix = "0.19" serde_json = "1.0" regex = "1" -platforms = "0.2.1" +platforms = "1.1" +async-std = { version = "1.6.5", features = ["attributes"] } +soketto = "0.4.2" [build-dependencies] structopt = { version = "0.3.8", optional = true } -node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } -frame-benchmarking-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } -substrate-build-script-utils = { version = "2.0.0", optional = true, path = "../../../utils/build-script-utils" } -substrate-frame-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/frame-utilities-cli" } - -[build-dependencies.sc-cli] -version = "0.8.0" -package = "sc-cli" -path = "../../../client/cli" -optional = true +node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } +frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/benchmarking-cli" } +substrate-build-script-utils = { version = "3.0.0", optional = true, path = "../../../utils/build-script-utils" } +substrate-frame-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/frame-utilities-cli" } +try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", optional = true } [features] default = [ "cli" ] -browser = [ - "browser-utils", - "wasm-bindgen", - "wasm-bindgen-futures", -] cli = [ "node-executor/wasmi-errno", "node-inspect", @@ -154,8 +146,12 @@ cli = [ "sc-service/db", "structopt", "substrate-build-script-utils", + "try-runtime-cli", ] runtime-benchmarks = [ "node-runtime/runtime-benchmarks", "frame-benchmarking-cli", ] +# Enable features that allow the runtime to be tried and debugged. Name might be subject to change +# in the near future. +try-runtime = ["node-runtime/try-runtime", "try-runtime-cli"] diff --git a/bin/node/cli/bin/main.rs b/bin/node/cli/bin/main.rs index 299b760c82e36..cf32a7cf28860 100644 --- a/bin/node/cli/bin/main.rs +++ b/bin/node/cli/bin/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/browser-demo/.gitignore b/bin/node/cli/browser-demo/.gitignore deleted file mode 100644 index 0c6117d9fb83b..0000000000000 --- a/bin/node/cli/browser-demo/.gitignore +++ /dev/null @@ -1 +0,0 @@ -pkg \ No newline at end of file diff --git a/bin/node/cli/browser-demo/README.md b/bin/node/cli/browser-demo/README.md deleted file mode 100644 index a11b250ba1f15..0000000000000 --- a/bin/node/cli/browser-demo/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to run this demo - -```sh -# If necessary, install wasm-bindgen -# The version must match that used when building the browser demo. -cargo install --version 0.2.67 wasm-bindgen-cli - -# Run the build script -./build.sh -``` diff --git a/bin/node/cli/browser-demo/build.sh b/bin/node/cli/browser-demo/build.sh deleted file mode 100755 index be52b7a523f01..0000000000000 --- a/bin/node/cli/browser-demo/build.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env sh -cargo +nightly build --release -p node-cli --target wasm32-unknown-unknown --no-default-features --features browser -Z features=itarget -wasm-bindgen ../../../../target/wasm32-unknown-unknown/release/node_cli.wasm --out-dir pkg --target web -python -m http.server 8000 diff --git a/bin/node/cli/browser-demo/favicon.png b/bin/node/cli/browser-demo/favicon.png deleted file mode 100644 index 8a4548ce34dfa..0000000000000 Binary files a/bin/node/cli/browser-demo/favicon.png and /dev/null differ diff --git a/bin/node/cli/browser-demo/index.html b/bin/node/cli/browser-demo/index.html deleted file mode 100644 index 60acfde39f559..0000000000000 --- a/bin/node/cli/browser-demo/index.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - - Substrate node - - - - - diff --git a/bin/node/cli/build.rs b/bin/node/cli/build.rs index a36f0d01a0a03..90aec2222c9ec 100644 --- a/bin/node/cli/build.rs +++ b/bin/node/cli/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,8 +25,8 @@ fn main() { mod cli { include!("src/cli.rs"); - use std::{fs, env, path::Path}; use sc_cli::structopt::clap::Shell; + use std::{env, fs, path::Path}; use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; pub fn main() { @@ -51,9 +51,12 @@ mod cli { Some(dir) => dir, }; let path = Path::new(&outdir) - .parent().unwrap() - .parent().unwrap() - .parent().unwrap() + .parent() + .unwrap() + .parent() + .unwrap() + .parent() + .unwrap() .join("completion-scripts"); fs::create_dir(&path).ok(); diff --git a/bin/node/cli/src/browser.rs b/bin/node/cli/src/browser.rs deleted file mode 100644 index 41770f5fcde6d..0000000000000 --- a/bin/node/cli/src/browser.rs +++ /dev/null @@ -1,61 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use crate::chain_spec::ChainSpec; -use log::info; -use wasm_bindgen::prelude::*; -use browser_utils::{ - Client, - browser_configuration, set_console_error_panic_hook, init_console_log, -}; -use std::str::FromStr; - -/// Starts the client. -#[wasm_bindgen] -pub async fn start_client(chain_spec: Option, log_level: String) -> Result { - start_inner(chain_spec, log_level) - .await - .map_err(|err| JsValue::from_str(&err.to_string())) -} - -async fn start_inner(chain_spec: Option, log_level: String) -> Result> { - set_console_error_panic_hook(); - init_console_log(log::Level::from_str(&log_level)?)?; - let chain_spec = match chain_spec { - Some(chain_spec) => ChainSpec::from_json_bytes(chain_spec.as_bytes().to_vec()) - .map_err(|e| format!("{:?}", e))?, - None => crate::chain_spec::development_config(), - }; - - let config = browser_configuration(chain_spec).await?; - - info!("Substrate browser node"); - info!("✌️ version {}", config.impl_version); - info!("❤️ by Parity Technologies, 2017-2020"); - info!("📋 Chain specification: {}", config.chain_spec.name()); - info!("🏷 Node name: {}", config.network.node_name); - info!("👤 Role: {:?}", config.role); - - // Create the service. This is the most heavy initialization step. - let (task_manager, rpc_handlers) = - crate::service::new_light_base(config) - .map(|(components, rpc_handlers, _, _, _)| (components, rpc_handlers)) - .map_err(|e| format!("{:?}", e))?; - - Ok(browser_utils::start_client(task_manager, rpc_handlers)) -} diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 90824a5572f12..352e007a891ba 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,25 +18,26 @@ //! Substrate chain configurations. -use sc_chain_spec::ChainSpecExtension; -use sp_core::{Pair, Public, crypto::UncheckedInto, sr25519}; -use serde::{Serialize, Deserialize}; +use grandpa_primitives::AuthorityId as GrandpaId; +use hex_literal::hex; use node_runtime::{ - AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, CouncilConfig, - DemocracyConfig,GrandpaConfig, ImOnlineConfig, SessionConfig, SessionKeys, StakerStatus, - StakingConfig, ElectionsConfig, IndicesConfig, SocietyConfig, SudoConfig, SystemConfig, - TechnicalCommitteeConfig, wasm_binary_unwrap, + constants::currency::*, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, + BalancesConfig, Block, CouncilConfig, DemocracyConfig, ElectionsConfig, GrandpaConfig, + ImOnlineConfig, IndicesConfig, SessionConfig, SessionKeys, SocietyConfig, StakerStatus, + StakingConfig, SudoConfig, SystemConfig, TechnicalCommitteeConfig, MAX_NOMINATIONS, }; -use node_runtime::Block; -use node_runtime::constants::currency::*; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use sc_chain_spec::ChainSpecExtension; use sc_service::ChainType; -use hex_literal::hex; use sc_telemetry::TelemetryEndpoints; -use grandpa_primitives::{AuthorityId as GrandpaId}; -use sp_consensus_babe::{AuthorityId as BabeId}; -use pallet_im_online::sr25519::{AuthorityId as ImOnlineId}; +use serde::{Deserialize, Serialize}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_runtime::{Perbill, traits::{Verify, IdentifyAccount}}; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; +use sp_runtime::{ + traits::{IdentifyAccount, Verify}, + Perbill, +}; pub use node_primitives::{AccountId, Balance, Signature}; pub use node_runtime::GenesisConfig; @@ -56,13 +57,12 @@ pub struct Extensions { pub fork_blocks: sc_client_api::ForkBlocks, /// Known bad block hashes. pub bad_blocks: sc_client_api::BadBlocks, + /// The light sync state extension used by the sync-state rpc. + pub light_sync_state: sc_sync_state_rpc::LightSyncStateExtension, } /// Specialized `ChainSpec`. -pub type ChainSpec = sc_service::GenericChainSpec< - GenesisConfig, - Extensions, ->; +pub type ChainSpec = sc_service::GenericChainSpec; /// Flaming Fir testnet generator pub fn flaming_fir_config() -> Result { ChainSpec::from_json_bytes(&include_bytes!("../res/flaming-fir.json")[..]) @@ -78,80 +78,107 @@ fn session_keys( } fn staging_testnet_config_genesis() -> GenesisConfig { + #[rustfmt::skip] // stash, controller, session-key // generated with secret: // for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done + // // and + // // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done - let initial_authorities: Vec<(AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId)> = vec![( - // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy - hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), - // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq - hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), - // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC - hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(), - // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), - // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), - // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), - ),( - // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 - hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), - // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF - hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), - // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE - hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(), - // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), - // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), - // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), - ),( - // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp - hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), - // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 - hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), - // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d - hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(), - // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), - // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), - // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), - ),( - // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 - hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), - // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn - hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), - // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 - hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(), - // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), - // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), - // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), - )]; + let initial_authorities: Vec<( + AccountId, + AccountId, + GrandpaId, + BabeId, + ImOnlineId, + AuthorityDiscoveryId, + )> = vec![ + ( + // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy + hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), + // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq + hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), + // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC + hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"] + .unchecked_into(), + // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + .unchecked_into(), + // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + .unchecked_into(), + // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + .unchecked_into(), + ), + ( + // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 + hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), + // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF + hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), + // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE + hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"] + .unchecked_into(), + // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + .unchecked_into(), + // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + .unchecked_into(), + // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + .unchecked_into(), + ), + ( + // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp + hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), + // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 + hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), + // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d + hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"] + .unchecked_into(), + // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + .unchecked_into(), + // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + .unchecked_into(), + // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + .unchecked_into(), + ), + ( + // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 + hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), + // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn + hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), + // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 + hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"] + .unchecked_into(), + // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + .unchecked_into(), + // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + .unchecked_into(), + // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + .unchecked_into(), + ), + ]; // generated with secret: subkey inspect "$secret"/fir let root_key: AccountId = hex![ // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" - ].into(); + ] + .into(); let endowed_accounts: Vec = vec![root_key.clone()]; - testnet_genesis( - initial_authorities, - root_key, - Some(endowed_accounts), - false, - ) + testnet_genesis(initial_authorities, vec![], root_key, Some(endowed_accounts)) } /// Staging testnet config. @@ -163,8 +190,10 @@ pub fn staging_testnet_config() -> ChainSpec { ChainType::Live, staging_testnet_config_genesis, boot_nodes, - Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)]) - .expect("Staging telemetry url is valid; qed")), + Some( + TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)]) + .expect("Staging telemetry url is valid; qed"), + ), None, None, Default::default(), @@ -179,21 +208,17 @@ pub fn get_from_seed(seed: &str) -> ::Pu } /// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId where - AccountPublic: From<::Public> +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, { AccountPublic::from(get_from_seed::(seed)).into_account() } /// Helper function to generate stash, controller and session key from seed -pub fn authority_keys_from_seed(seed: &str) -> ( - AccountId, - AccountId, - GrandpaId, - BabeId, - ImOnlineId, - AuthorityDiscoveryId, -) { +pub fn authority_keys_from_seed( + seed: &str, +) -> (AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId) { ( get_account_id_from_seed::(&format!("{}//stash", seed)), get_account_id_from_seed::(seed), @@ -214,11 +239,11 @@ pub fn testnet_genesis( ImOnlineId, AuthorityDiscoveryId, )>, + initial_nominators: Vec, root_key: AccountId, endowed_accounts: Option>, - enable_println: bool, ) -> GenesisConfig { - let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { + let mut endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -234,104 +259,119 @@ pub fn testnet_genesis( get_account_id_from_seed::("Ferdie//stash"), ] }); + // endow all authorities and nominators. + initial_authorities + .iter() + .map(|x| &x.0) + .chain(initial_nominators.iter()) + .for_each(|x| { + if !endowed_accounts.contains(&x) { + endowed_accounts.push(x.clone()) + } + }); + + // stakers: all validators and nominators. + let mut rng = rand::thread_rng(); + let stakers = initial_authorities + .iter() + .map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator)) + .chain(initial_nominators.iter().map(|x| { + use rand::{seq::SliceRandom, Rng}; + let limit = (MAX_NOMINATIONS as usize).min(initial_authorities.len()); + let count = rng.gen::() % limit; + let nominations = initial_authorities + .as_slice() + .choose_multiple(&mut rng, count) + .into_iter() + .map(|choice| choice.0.clone()) + .collect::>(); + (x.clone(), x.clone(), STASH, StakerStatus::Nominator(nominations)) + })) + .collect::>(); + let num_endowed_accounts = endowed_accounts.len(); const ENDOWMENT: Balance = 10_000_000 * DOLLARS; - const STASH: Balance = 100 * DOLLARS; + const STASH: Balance = ENDOWMENT / 1000; GenesisConfig { - frame_system: Some(SystemConfig { + system: SystemConfig { code: wasm_binary_unwrap().to_vec(), changes_trie_config: Default::default(), - }), - pallet_balances: Some(BalancesConfig { - balances: endowed_accounts.iter().cloned() - .map(|k| (k, ENDOWMENT)) - .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) - .collect(), - }), - pallet_indices: Some(IndicesConfig { - indices: vec![], - }), - pallet_session: Some(SessionConfig { - keys: initial_authorities.iter().map(|x| { - (x.0.clone(), x.0.clone(), session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - )) - }).collect::>(), - }), - pallet_staking: Some(StakingConfig { - validator_count: initial_authorities.len() as u32 * 2, + }, + balances: BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect(), + }, + indices: IndicesConfig { indices: vec![] }, + session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()), + ) + }) + .collect::>(), + }, + staking: StakingConfig { + validator_count: initial_authorities.len() as u32, minimum_validator_count: initial_authorities.len() as u32, - stakers: initial_authorities.iter().map(|x| { - (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator) - }).collect(), invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), slash_reward_fraction: Perbill::from_percent(10), - .. Default::default() - }), - pallet_democracy: Some(DemocracyConfig::default()), - pallet_elections_phragmen: Some(ElectionsConfig { - members: endowed_accounts.iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .map(|member| (member, STASH)) - .collect(), - }), - pallet_collective_Instance1: Some(CouncilConfig::default()), - pallet_collective_Instance2: Some(TechnicalCommitteeConfig { - members: endowed_accounts.iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .collect(), + stakers, + ..Default::default() + }, + democracy: DemocracyConfig::default(), + elections: ElectionsConfig { + members: endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .map(|member| (member, STASH)) + .collect(), + }, + council: CouncilConfig::default(), + technical_committee: TechnicalCommitteeConfig { + members: endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .collect(), phantom: Default::default(), - }), - pallet_contracts: Some(ContractsConfig { - current_schedule: pallet_contracts::Schedule { - enable_println, // this should only be enabled on development chains - ..Default::default() - }, - }), - pallet_sudo: Some(SudoConfig { - key: root_key, - }), - pallet_babe: Some(BabeConfig { + }, + sudo: SudoConfig { key: root_key }, + babe: BabeConfig { authorities: vec![], - }), - pallet_im_online: Some(ImOnlineConfig { - keys: vec![], - }), - pallet_authority_discovery: Some(AuthorityDiscoveryConfig { - keys: vec![], - }), - pallet_grandpa: Some(GrandpaConfig { - authorities: vec![], - }), - pallet_membership_Instance1: Some(Default::default()), - pallet_treasury: Some(Default::default()), - pallet_society: Some(SocietyConfig { - members: endowed_accounts.iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .collect(), + epoch_config: Some(node_runtime::BABE_GENESIS_EPOCH_CONFIG), + }, + im_online: ImOnlineConfig { keys: vec![] }, + authority_discovery: AuthorityDiscoveryConfig { keys: vec![] }, + grandpa: GrandpaConfig { authorities: vec![] }, + technical_membership: Default::default(), + treasury: Default::default(), + society: SocietyConfig { + members: endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .collect(), pot: 0, max_members: 999, - }), - pallet_vesting: Some(Default::default()), + }, + vesting: Default::default(), + gilt: Default::default(), + transaction_storage: Default::default(), } } fn development_config_genesis() -> GenesisConfig { testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - ], + vec![authority_keys_from_seed("Alice")], + vec![], get_account_id_from_seed::("Alice"), None, - true, ) } @@ -352,13 +392,10 @@ pub fn development_config() -> ChainSpec { fn local_testnet_genesis() -> GenesisConfig { testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - authority_keys_from_seed("Bob"), - ], + vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], + vec![], get_account_id_from_seed::("Alice"), None, - false, ) } @@ -386,12 +423,10 @@ pub(crate) mod tests { fn local_testnet_genesis_instant_single() -> GenesisConfig { testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - ], + vec![authority_keys_from_seed("Alice")], + vec![], get_account_id_from_seed::("Alice"), None, - false, ) } @@ -428,17 +463,29 @@ pub(crate) mod tests { #[test] #[ignore] fn test_connectivity() { + sp_tracing::try_init_simple(); + sc_service_test::connectivity( integration_test_config_with_two_authorities(), |config| { - let NewFullBase { task_manager, client, network, transaction_pool, .. } - = new_full_base(config,|_, _| ())?; - Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) + let NewFullBase { task_manager, client, network, transaction_pool, .. } = + new_full_base(config, |_, _| ())?; + Ok(sc_service_test::TestNetComponents::new( + task_manager, + client, + network, + transaction_pool, + )) }, |config| { let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) - } + Ok(sc_service_test::TestNetComponents::new( + keep_alive, + client, + network, + transaction_pool, + )) + }, ); } diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 2130ff1e4b106..850581748fde3 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_cli::{RunCmd, KeySubcommand, SignCmd, VanityCmd, VerifyCmd}; +use sc_cli::{KeySubcommand, RunCmd, SignCmd, VanityCmd, VerifyCmd}; use structopt::StructOpt; /// An overarching CLI command definition. @@ -47,6 +47,14 @@ pub enum Subcommand { #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] Benchmark(frame_benchmarking_cli::BenchmarkCmd), + /// Try some command against runtime state. + #[cfg(feature = "try-runtime")] + TryRuntime(try_runtime_cli::TryRuntimeCmd), + + /// Try some command against runtime state. Note: `try-runtime` feature must be enabled. + #[cfg(not(feature = "try-runtime"))] + TryRuntime, + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. Verify(VerifyCmd), diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index f8a0f3f9b3a34..17375094f2a1b 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,12 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{chain_spec, service, Cli, Subcommand}; -use node_executor::Executor; +use crate::{chain_spec, service, service::new_partial, Cli, Subcommand}; +use node_executor::ExecutorDispatch; use node_runtime::{Block, RuntimeApi}; -use sc_cli::{Result, SubstrateCli, RuntimeVersion, Role, ChainSpec}; +use sc_cli::{ChainSpec, Result, Role, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; -use crate::service::new_partial; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -49,15 +48,20 @@ impl SubstrateCli for Cli { } fn load_spec(&self, id: &str) -> std::result::Result, String> { - Ok(match id { + let spec = match id { + "" => + return Err( + "Please specify which chain you want to run, e.g. --dev or --chain=local" + .into(), + ), "dev" => Box::new(chain_spec::development_config()), "local" => Box::new(chain_spec::local_testnet_config()), - "" | "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), + "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), "staging" => Box::new(chain_spec::staging_testnet_config()), - path => Box::new(chain_spec::ChainSpec::from_json_file( - std::path::PathBuf::from(path), - )?), - }) + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + }; + Ok(spec) } fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { @@ -77,24 +81,25 @@ pub fn run() -> Result<()> { Role::Light => service::new_light(config), _ => service::new_full(config), } + .map_err(sc_cli::Error::Service) }) - } + }, Some(Subcommand::Inspect(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) - } - Some(Subcommand::Benchmark(cmd)) => { + runner.sync_run(|config| cmd.run::(config)) + }, + Some(Subcommand::Benchmark(cmd)) => if cfg!(feature = "runtime-benchmarks") { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`.".into()) - } - } - Some(Subcommand::Key(cmd)) => cmd.run(), + You can enable it with `--features runtime-benchmarks`." + .into()) + }, + Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::Sign(cmd)) => cmd.run(), Some(Subcommand::Verify(cmd)) => cmd.run(), Some(Subcommand::Vanity(cmd)) => cmd.run(), @@ -105,32 +110,30 @@ pub fn run() -> Result<()> { Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = new_partial(&config)?; Ok((cmd.run(client, config.database), task_manager)) }) }, Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = new_partial(&config)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) }, Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, @@ -141,10 +144,27 @@ pub fn run() -> Result<()> { Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, backend, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, backend, .. } = new_partial(&config)?; Ok((cmd.run(client, backend), task_manager)) }) }, + #[cfg(feature = "try-runtime")] + Some(Subcommand::TryRuntime(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + // we don't need any of the components of new_partial, just a runtime, or a task + // manager to do `async_run`. + let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + let task_manager = + sc_service::TaskManager::new(config.tokio_handle.clone(), registry) + .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; + + Ok((cmd.run::(config), task_manager)) + }) + }, + #[cfg(not(feature = "try-runtime"))] + Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ + You can enable it with `--features try-runtime`." + .into()), } } diff --git a/bin/node/cli/src/lib.rs b/bin/node/cli/src/lib.rs index bd2298514a7a2..1a4c1b0eab8db 100644 --- a/bin/node/cli/src/lib.rs +++ b/bin/node/cli/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -34,15 +34,11 @@ pub mod chain_spec; #[macro_use] mod service; -#[cfg(feature = "browser")] -mod browser; #[cfg(feature = "cli")] mod cli; #[cfg(feature = "cli")] mod command; -#[cfg(feature = "browser")] -pub use browser::*; #[cfg(feature = "cli")] pub use cli::*; #[cfg(feature = "cli")] diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 4d55d8dbc694d..696a1c1a8242c 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,64 +20,98 @@ //! Service implementation. Specialized wrapper over substrate service. -use std::sync::Arc; -use sc_consensus_babe; -use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; +use futures::prelude::*; +use node_executor::ExecutorDispatch; use node_primitives::Block; use node_runtime::RuntimeApi; -use sc_service::{ - config::{Role, Configuration}, error::{Error as ServiceError}, - RpcHandlers, TaskManager, -}; -use sp_inherents::InherentDataProviders; +use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_consensus_babe::{self, SlotProportion}; +use sc_executor::NativeElseWasmExecutor; use sc_network::{Event, NetworkService}; +use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_runtime::traits::Block as BlockT; -use futures::prelude::*; -use sc_client_api::{ExecutorProvider, RemoteBackend}; -use node_executor::Executor; +use std::sync::Arc; -type FullClient = sc_service::TFullClient; +type FullClient = + sc_service::TFullClient>; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; type FullGrandpaBlockImport = grandpa::GrandpaBlockImport; -type LightClient = sc_service::TLightClient; - -pub fn new_partial(config: &Configuration) -> Result, - sc_transaction_pool::FullPool, - ( - impl Fn( - node_rpc::DenyUnsafe, - sc_rpc::SubscriptionTaskExecutor, - ) -> node_rpc::IoHandler, - ( - sc_consensus_babe::BabeBlockImport, - grandpa::LinkHalf, - sc_consensus_babe::BabeLink, - ), +type LightClient = + sc_service::TLightClient>; + +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sc_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, ( + impl Fn( + node_rpc::DenyUnsafe, + sc_rpc::SubscriptionTaskExecutor, + ) -> Result, + ( + sc_consensus_babe::BabeBlockImport, + grandpa::LinkHalf, + sc_consensus_babe::BabeLink, + ), grandpa::SharedVoterState, - Arc>, + Option, ), - ) ->, ServiceError> { + >, + ServiceError, +> { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(&config)?; + sc_service::new_full_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; let client = Arc::new(client); + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = sc_transaction_pool::BasicPool::new_full( config.transaction_pool.clone(), + config.role.is_authority().into(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), ); let (grandpa_block_import, grandpa_link) = grandpa::block_import( - client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), )?; let justification_import = grandpa_block_import.clone(); @@ -87,19 +121,31 @@ pub fn new_partial(config: &Configuration) -> Result::Header>::check_inherents(); + + Ok((timestamp, slot, uncles)) + }, + &task_manager.spawn_essential_handle(), config.prometheus_registry(), sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), + telemetry.as_ref().map(|x| x.handle()), )?; let import_setup = (block_import, grandpa_link, babe_link); @@ -110,10 +156,12 @@ pub fn new_partial(config: &Configuration) -> Result Result, pub network: Arc::Hash>>, - pub network_status_sinks: sc_service::NetworkStatusSinks, pub transaction_pool: Arc>, } /// Creates a full service from the configuration. pub fn new_full_base( - config: Configuration, + mut config: Configuration, with_startup_data: impl FnOnce( &sc_consensus_babe::BabeBlockImport, &sc_consensus_babe::BabeLink, - ) + ), ) -> Result { let sc_service::PartialComponents { - client, backend, mut task_manager, import_queue, keystore_container, - select_chain, transaction_pool, inherent_data_providers, - other: (rpc_extensions_builder, import_setup, rpc_setup), + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (rpc_extensions_builder, import_setup, rpc_setup, mut telemetry), } = new_partial(&config)?; - let (shared_voter_state, finality_proof_provider) = rpc_setup; + let shared_voter_state = rpc_setup; + let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + + config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); + let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + import_setup.1.shared_authority_set().clone(), + )); - let (network, network_status_sinks, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -192,25 +255,28 @@ pub fn new_full_base( import_queue, on_demand: None, block_announce_validator_builder: None, - finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider.clone()), + warp_sync: Some(warp_sync), })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers( - &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), task_manager.ipfs_rt.clone(), ); } let role = config.role.clone(); let force_authoring = config.force_authoring; + let backoff_authoring_blocks = + Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default()); let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); - let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); - sc_service::spawn_tasks(sc_service::SpawnTasksParams { + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { config, backend: backend.clone(), client: client.clone(), @@ -221,9 +287,8 @@ pub fn new_full_base( task_manager: &mut task_manager, on_demand: None, remote_blockchain: None, - telemetry_connection_sinks: telemetry_connection_sinks.clone(), - network_status_sinks: network_status_sinks.clone(), system_rpc_tx, + telemetry: telemetry.as_mut(), })?; let (block_import, grandpa_link, babe_link) = import_setup; @@ -236,11 +301,14 @@ pub fn new_full_base( client.clone(), transaction_pool.clone(), prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), ); let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + let client_clone = client.clone(); + let slot_duration = babe_link.config().slot_duration(); let babe_config = sc_consensus_babe::BabeParams { keystore: keystore_container.sync_keystore(), client: client.clone(), @@ -248,10 +316,39 @@ pub fn new_full_base( env: proposer, block_import, sync_oracle: network.clone(), - inherent_data_providers: inherent_data_providers.clone(), + justification_sync_link: network.clone(), + create_inherent_data_providers: move |parent, ()| { + let client_clone = client_clone.clone(); + async move { + let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider( + &*client_clone, + parent, + )?; + + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + let storage_proof = + sp_transaction_storage_proof::registration::new_data_provider( + &*client_clone, + &parent, + )?; + + Ok((timestamp, slot, uncles, storage_proof)) + } + }, force_authoring, + backoff_authoring_blocks, babe_link, can_author_with, + block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, + telemetry: telemetry.as_ref().map(|x| x.handle()), }; let babe = sc_consensus_babe::start_babe(babe_config)?; @@ -259,45 +356,38 @@ pub fn new_full_base( } // Spawn authority discovery module. - if matches!(role, Role::Authority{..} | Role::Sentry {..}) { - let (sentries, authority_discovery_role) = match role { - sc_service::config::Role::Authority { ref sentry_nodes } => ( - sentry_nodes.clone(), - sc_authority_discovery::Role::Authority ( - keystore_container.keystore(), - ), - ), - sc_service::config::Role::Sentry {..} => ( - vec![], - sc_authority_discovery::Role::Sentry, - ), - _ => unreachable!("Due to outer matches! constraint; qed.") - }; - - let dht_event_stream = network.event_stream("authority-discovery") - .filter_map(|e| async move { match e { - Event::Dht(e) => Some(e), - _ => None, - }}); - let (authority_discovery_worker, _service) = sc_authority_discovery::new_worker_and_service( - client.clone(), - network.clone(), - sentries, - Box::pin(dht_event_stream), - authority_discovery_role, - prometheus_registry.clone(), - ); + if role.is_authority() { + let authority_discovery_role = + sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()); + let dht_event_stream = + network.event_stream("authority-discovery").filter_map(|e| async move { + match e { + Event::Dht(e) => Some(e), + _ => None, + } + }); + let (authority_discovery_worker, _service) = + sc_authority_discovery::new_worker_and_service_with_config( + sc_authority_discovery::WorkerConfig { + publish_non_global_ips: auth_disc_publish_non_global_ips, + ..Default::default() + }, + client.clone(), + network.clone(), + Box::pin(dht_event_stream), + authority_discovery_role, + prometheus_registry.clone(), + ); - task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker.run()); + task_manager + .spawn_handle() + .spawn("authority-discovery-worker", authority_discovery_worker.run()); } // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(keystore_container.sync_keystore()) - } else { - None - }; + let keystore = + if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; let config = grandpa::Config { // FIXME #1578 make this available through chainspec @@ -306,7 +396,8 @@ pub fn new_full_base( name: Some(name), observer_enabled: false, keystore, - is_authority: role.is_network_authority(), + local_role: role, + telemetry: telemetry.as_ref().map(|x| x.handle()), }; if enable_grandpa { @@ -320,7 +411,7 @@ pub fn new_full_base( config, link: grandpa_link, network: network.clone(), - telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), + telemetry: telemetry.as_ref().map(|x| x.handle()), voting_rule: grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state, @@ -328,59 +419,82 @@ pub fn new_full_base( // the GRANDPA voter task is considered infallible, i.e. // if it fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking( - "grandpa-voter", - grandpa::run_grandpa_voter(grandpa_config)? - ); - } else { - grandpa::setup_disabled_grandpa(network.clone())?; + task_manager + .spawn_essential_handle() + .spawn_blocking("grandpa-voter", grandpa::run_grandpa_voter(grandpa_config)?); } network_starter.start_network(); - Ok(NewFullBase { - task_manager, - inherent_data_providers, - client, - network, - network_status_sinks, - transaction_pool, - }) + Ok(NewFullBase { task_manager, client, network, transaction_pool }) } /// Builds a new service for a full client. -pub fn new_full(config: Configuration) --> Result { - new_full_base(config, |_, _| ()).map(|NewFullBase { task_manager, .. }| { - task_manager - }) +pub fn new_full(config: Configuration) -> Result { + new_full_base(config, |_, _| ()).map(|NewFullBase { task_manager, .. }| task_manager) } -pub fn new_light_base(config: Configuration) -> Result<( - TaskManager, RpcHandlers, Arc, - Arc::Hash>>, - Arc>> -), ServiceError> { +pub fn new_light_base( + mut config: Configuration, +) -> Result< + ( + TaskManager, + RpcHandlers, + Arc, + Arc::Hash>>, + Arc< + sc_transaction_pool::LightPool>, + >, + ), + ServiceError, +> { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::(&config)?; + sc_service::new_light_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + + let mut telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + + config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( config.transaction_pool.clone(), config.prometheus_registry(), - task_manager.spawn_handle(), + task_manager.spawn_essential_handle(), client.clone(), on_demand.clone(), )); - let grandpa_block_import = grandpa::light_block_import( - client.clone(), backend.clone(), &(client.clone() as Arc<_>), - Arc::new(on_demand.checker().clone()), + let (grandpa_block_import, grandpa_link) = grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), )?; - - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); + let justification_import = grandpa_block_import.clone(); let (babe_block_import, babe_link) = sc_consensus_babe::block_import( sc_consensus_babe::Config::get_or_compute(&*client)?, @@ -388,25 +502,39 @@ pub fn new_light_base(config: Configuration) -> Result<( client.clone(), )?; - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - + let slot_duration = babe_link.config().slot_duration(); let import_queue = sc_consensus_babe::import_queue( babe_link, babe_block_import, - None, - Some(Box::new(finality_proof_import)), + Some(Box::new(justification_import)), client.clone(), select_chain.clone(), - inherent_data_providers.clone(), - &task_manager.spawn_handle(), + move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + let uncles = + sp_authorship::InherentDataProvider::<::Header>::check_inherents(); + + Ok((timestamp, slot, uncles)) + }, + &task_manager.spawn_essential_handle(), config.prometheus_registry(), sp_consensus::NeverCanAuthor, + telemetry.as_ref().map(|x| x.handle()), )?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + )); - let (network, network_status_sinks, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -415,14 +543,35 @@ pub fn new_light_base(config: Configuration) -> Result<( import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), + warp_sync: Some(warp_sync), })?; - network_starter.start_network(); + + let enable_grandpa = !config.disable_grandpa; + if enable_grandpa { + let name = config.network.node_name.clone(); + + let config = grandpa::Config { + gossip_duration: std::time::Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore: None, + local_role: config.role.clone(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + task_manager.spawn_handle().spawn_blocking( + "grandpa-observer", + grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?, + ); + } if config.offchain_worker.enabled { sc_service::build_offchain_workers( - &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), task_manager.ipfs_rt.clone(), ); } @@ -436,62 +585,59 @@ pub fn new_light_base(config: Configuration) -> Result<( let rpc_extensions = node_rpc::create_light(light_deps); - let rpc_handlers = - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - on_demand: Some(on_demand), - remote_blockchain: Some(backend.remote_blockchain()), - rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), - client: client.clone(), - transaction_pool: transaction_pool.clone(), - keystore: keystore_container.sync_keystore(), - config, backend, network_status_sinks, system_rpc_tx, - network: network.clone(), - telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), - task_manager: &mut task_manager, - })?; + let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + on_demand: Some(on_demand), + remote_blockchain: Some(backend.remote_blockchain()), + rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), + client: client.clone(), + transaction_pool: transaction_pool.clone(), + keystore: keystore_container.sync_keystore(), + config, + backend, + system_rpc_tx, + network: network.clone(), + task_manager: &mut task_manager, + telemetry: telemetry.as_mut(), + })?; + network_starter.start_network(); Ok((task_manager, rpc_handlers, client, network, transaction_pool)) } /// Builds a new service for a light client. pub fn new_light(config: Configuration) -> Result { - new_light_base(config).map(|(task_manager, _, _, _, _)| { - task_manager - }) + new_light_base(config).map(|(task_manager, _, _, _, _)| task_manager) } #[cfg(test)] mod tests { - use std::{sync::Arc, borrow::Cow, any::Any, convert::TryInto}; - use sc_consensus_babe::{CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY}; - use sc_consensus_epochs::descendent_query; - use sp_consensus::{ - Environment, Proposer, BlockImportParams, BlockOrigin, ForkChoiceStrategy, BlockImport, - RecordProof, - }; - use node_primitives::{Block, DigestItem, Signature}; - use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; - use node_runtime::constants::{currency::CENTS, time::SLOT_DURATION}; + use crate::service::{new_full_base, new_light_base, NewFullBase}; use codec::Encode; - use sp_core::{ - crypto::Pair as CryptoPair, - H256, - Public + use node_primitives::{Block, DigestItem, Signature}; + use node_runtime::{ + constants::{currency::CENTS, time::SLOT_DURATION}, + Address, BalancesCall, Call, UncheckedExtrinsic, }; - use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; + use sc_client_api::BlockBackend; + use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; + use sc_consensus_babe::{BabeIntermediate, CompatibleDigestItem, INTERMEDIATE_KEY}; + use sc_consensus_epochs::descendent_query; + use sc_keystore::LocalKeystore; + use sc_service_test::TestNetNode; + use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; + use sp_consensus::{BlockOrigin, Environment, Proposer}; + use sp_core::{crypto::Pair as CryptoPair, Public, H256}; + use sp_inherents::InherentDataProvider; + use sp_keyring::AccountKeyring; + use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ - generic::{BlockId, Era, Digest, SignedPayload}, - traits::{Block as BlockT, Header as HeaderT}, - traits::Verify, + generic::{BlockId, Digest, Era, SignedPayload}, + key_types::BABE, + traits::{Block as BlockT, Header as HeaderT, IdentifyAccount, Verify}, + RuntimeAppPublic, }; use sp_timestamp; - use sp_keyring::AccountKeyring; - use sc_service_test::TestNetNode; - use crate::service::{new_full_base, new_light_base, NewFullBase}; - use sp_runtime::{key_types::BABE, traits::IdentifyAccount, RuntimeAppPublic}; - use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; - use sc_client_api::BlockBackend; - use sc_keystore::LocalKeystore; + use std::{borrow::Cow, convert::TryInto, sync::Arc}; type AccountPublic = ::Signer; @@ -500,16 +646,20 @@ mod tests { // This can be run locally with `cargo test --release -p node-cli test_sync -- --ignored`. #[ignore] fn test_sync() { + sp_tracing::try_init_simple(); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); - let alice: sp_consensus_babe::AuthorityId = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) - .expect("Creates authority pair").into(); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); + let alice: sp_consensus_babe::AuthorityId = + SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) + .expect("Creates authority pair") + .into(); let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); // For the block factory - let mut slot_num = 1u64; + let mut slot = 1u64; // For the extrinsics factory let bob = Arc::new(AccountKeyring::Bob.pair()); @@ -520,89 +670,105 @@ mod tests { chain_spec, |config| { let mut setup_handles = None; - let NewFullBase { - task_manager, inherent_data_providers, client, network, transaction_pool, .. - } = new_full_base(config, - | - block_import: &sc_consensus_babe::BabeBlockImport, - babe_link: &sc_consensus_babe::BabeLink, - | { - setup_handles = Some((block_import.clone(), babe_link.clone())); - } - )?; + let NewFullBase { task_manager, client, network, transaction_pool, .. } = + new_full_base( + config, + |block_import: &sc_consensus_babe::BabeBlockImport, + babe_link: &sc_consensus_babe::BabeLink| { + setup_handles = Some((block_import.clone(), babe_link.clone())); + }, + )?; let node = sc_service_test::TestNetComponents::new( - task_manager, client, network, transaction_pool + task_manager, + client, + network, + transaction_pool, ); - Ok((node, (inherent_data_providers, setup_handles.unwrap()))) + Ok((node, setup_handles.unwrap())) }, |config| { let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + Ok(sc_service_test::TestNetComponents::new( + keep_alive, + client, + network, + transaction_pool, + )) }, - |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { - let mut inherent_data = inherent_data_providers - .create_inherent_data() - .expect("Creates inherent data."); - + |service, &mut (ref mut block_import, ref babe_link)| { let parent_id = BlockId::number(service.client().chain_info().best_number); let parent_header = service.client().header(&parent_id).unwrap().unwrap(); let parent_hash = parent_header.hash(); let parent_number = *parent_header.number(); - futures::executor::block_on( - service.transaction_pool().maintain( - ChainEvent::NewBestBlock { - hash: parent_header.hash(), - tree_route: None, - }, - ) - ); + futures::executor::block_on(service.transaction_pool().maintain( + ChainEvent::NewBestBlock { hash: parent_header.hash(), tree_route: None }, + )); let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( service.spawn_handle(), service.client(), service.transaction_pool(), None, + None, ); - let epoch_descriptor = babe_link.epoch_changes().lock().epoch_descriptor_for_child_of( - descendent_query(&*service.client()), - &parent_hash, - parent_number, - slot_num, - ).unwrap().unwrap(); - let mut digest = Digest::::default(); // even though there's only one authority some slots might be empty, // so we must keep trying the next slots until we can claim one. - let babe_pre_digest = loop { - inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); - if let Some(babe_pre_digest) = sc_consensus_babe::test_helpers::claim_slot( - slot_num, - &parent_header, - &*service.client(), - keystore.clone(), - &babe_link, - ) { - break babe_pre_digest; + let (babe_pre_digest, epoch_descriptor) = loop { + let epoch_descriptor = babe_link + .epoch_changes() + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*service.client()), + &parent_hash, + parent_number, + slot.into(), + ) + .unwrap() + .unwrap(); + + let epoch = babe_link + .epoch_changes() + .shared_data() + .epoch_data(&epoch_descriptor, |slot| { + sc_consensus_babe::Epoch::genesis(&babe_link.config(), slot) + }) + .unwrap(); + + if let Some(babe_pre_digest) = + sc_consensus_babe::authorship::claim_slot(slot.into(), &epoch, &keystore) + .map(|(digest, _)| digest) + { + break (babe_pre_digest, epoch_descriptor) } - slot_num += 1; + slot += 1; }; + let inherent_data = ( + sp_timestamp::InherentDataProvider::new( + std::time::Duration::from_millis(SLOT_DURATION * slot).into(), + ), + sp_consensus_babe::inherents::InherentDataProvider::new(slot.into()), + ) + .create_inherent_data() + .expect("Creates inherent data"); + digest.push(::babe_pre_digest(babe_pre_digest)); let new_block = futures::executor::block_on(async move { let proposer = proposer_factory.init(&parent_header).await; - proposer.unwrap().propose( - inherent_data, - digest, - std::time::Duration::from_secs(1), - RecordProof::Yes, - ).await - }).expect("Error making test block").block; + proposer + .unwrap() + .propose(inherent_data, digest, std::time::Duration::from_secs(1), None) + .await + }) + .expect("Error making test block") + .block; let (new_header, new_body) = new_block.deconstruct(); let pre_hash = new_header.hash(); @@ -614,24 +780,24 @@ mod tests { sp_consensus_babe::AuthorityId::ID, &alice.to_public_crypto_pair(), &to_sign, - ).unwrap() - .try_into() - .unwrap(); - let item = ::babe_seal( - signature, - ); - slot_num += 1; + ) + .unwrap() + .unwrap() + .try_into() + .unwrap(); + let item = ::babe_seal(signature); + slot += 1; let mut params = BlockImportParams::new(BlockOrigin::File, new_header); params.post_digests.push(item); params.body = Some(new_body); params.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - block_import.import_block(params, Default::default()) + futures::executor::block_on(block_import.import_block(params, Default::default())) .expect("error importing test block"); }, |service, _| { @@ -646,7 +812,8 @@ mod tests { }; let signer = charlie.clone(); - let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); + let function = + Call::Balances(BalancesCall::transfer { dest: to.into(), value: amount }); let check_spec_version = frame_system::CheckSpecVersion::new(); let check_tx_version = frame_system::CheckTxVersion::new(); @@ -667,19 +834,13 @@ mod tests { let raw_payload = SignedPayload::from_raw( function, extra, - (spec_version, transaction_version, genesis_hash, genesis_hash, (), (), ()) + (spec_version, transaction_version, genesis_hash, genesis_hash, (), (), ()), ); - let signature = raw_payload.using_encoded(|payload| { - signer.sign(payload) - }); + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); let (function, extra, _) = raw_payload.deconstruct(); index += 1; - UncheckedExtrinsic::new_signed( - function, - from.into(), - signature.into(), - extra, - ).into() + UncheckedExtrinsic::new_signed(function, from.into(), signature.into(), extra) + .into() }, ); } @@ -687,21 +848,30 @@ mod tests { #[test] #[ignore] fn test_consensus() { + sp_tracing::try_init_simple(); + sc_service_test::consensus( crate::chain_spec::tests::integration_test_config_with_two_authorities(), |config| { - let NewFullBase { task_manager, client, network, transaction_pool, .. } - = new_full_base(config,|_, _| ())?; - Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) + let NewFullBase { task_manager, client, network, transaction_pool, .. } = + new_full_base(config, |_, _| ())?; + Ok(sc_service_test::TestNetComponents::new( + task_manager, + client, + network, + transaction_pool, + )) }, |config| { let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + Ok(sc_service_test::TestNetComponents::new( + keep_alive, + client, + network, + transaction_pool, + )) }, - vec![ - "//Alice".into(), - "//Bob".into(), - ], + vec!["//Alice".into(), "//Bob".into()], ) } } diff --git a/bin/node/cli/tests/build_spec_works.rs b/bin/node/cli/tests/build_spec_works.rs index 800a4a8c51e61..6d863ea7f949d 100644 --- a/bin/node/cli/tests/build_spec_works.rs +++ b/bin/node/cli/tests/build_spec_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index 34078b08cf074..707fd217e33e8 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -28,7 +28,7 @@ pub mod common; fn check_block_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_node_for_a_while(base_path.path(), &["--dev"]); let status = Command::new(cargo_bin("substrate")) .args(&["check-block", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 61a07dd1ca877..54b9c749bf1de 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,11 +18,18 @@ #![cfg(unix)] -use std::{process::{Child, ExitStatus}, thread, time::Duration, path::Path}; use assert_cmd::cargo::cargo_bin; -use std::{convert::TryInto, process::Command}; -use nix::sys::signal::{kill, Signal::SIGINT}; -use nix::unistd::Pid; +use nix::{ + sys::signal::{kill, Signal::SIGINT}, + unistd::Pid, +}; +use std::{ + convert::TryInto, + path::Path, + process::{Child, Command, ExitStatus}, + thread, + time::Duration, +}; /// Wait for the given `child` the given number of `secs`. /// @@ -47,15 +54,10 @@ pub fn wait_for(child: &mut Child, secs: usize) -> Option { } /// Run the node for a while (30 seconds) -pub fn run_dev_node_for_a_while(base_path: &Path) { +pub fn run_node_for_a_while(base_path: &Path, args: &[&str]) { let mut cmd = Command::new(cargo_bin("substrate")); - let mut cmd = cmd - .args(&["--dev"]) - .arg("-d") - .arg(base_path) - .spawn() - .unwrap(); + let mut cmd = cmd.args(args).arg("-d").arg(base_path).spawn().unwrap(); // Let it produce some blocks. thread::sleep(Duration::from_secs(30)); @@ -65,3 +67,14 @@ pub fn run_dev_node_for_a_while(base_path: &Path) { kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); assert!(wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); } + +/// Run the node asserting that it fails with an error +pub fn run_node_assert_fail(base_path: &Path, args: &[&str]) { + let mut cmd = Command::new(cargo_bin("substrate")); + + let mut cmd = cmd.args(args).arg("-d").arg(base_path).spawn().unwrap(); + + // Let it produce some blocks. + thread::sleep(Duration::from_secs(10)); + assert!(cmd.try_wait().unwrap().is_some(), "the process should not be running anymore"); +} diff --git a/bin/node/cli/tests/database_role_subdir_migration.rs b/bin/node/cli/tests/database_role_subdir_migration.rs new file mode 100644 index 0000000000000..516908111ae72 --- /dev/null +++ b/bin/node/cli/tests/database_role_subdir_migration.rs @@ -0,0 +1,115 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sc_client_db::{ + light::LightStorage, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode, + TransactionStorageMode, +}; +use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; +use tempfile::tempdir; + +pub mod common; + +#[test] +#[cfg(unix)] +fn database_role_subdir_migration() { + type Block = RawBlock>; + + let base_path = tempdir().expect("could not create a temp dir"); + let path = base_path.path().join("chains/dev/db"); + // create a dummy database dir + { + let _old_db = LightStorage::::new(DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + source: DatabaseSource::RocksDb { path: path.to_path_buf(), cache_size: 128 }, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + }) + .unwrap(); + } + + assert!(path.join("db_version").exists()); + assert!(!path.join("light").exists()); + + // start a light client + common::run_node_for_a_while( + base_path.path(), + &[ + "--dev", + "--light", + "--port", + "30335", + "--rpc-port", + "44444", + "--ws-port", + "44445", + "--no-prometheus", + ], + ); + + // check if the database dir had been migrated + assert!(!path.join("db_version").exists()); + assert!(path.join("light/db_version").exists()); +} + +#[test] +#[cfg(unix)] +fn database_role_subdir_migration_fail_on_different_role() { + type Block = RawBlock>; + + let base_path = tempdir().expect("could not create a temp dir"); + let path = base_path.path().join("chains/dev/db"); + + // create a database with the old layout + { + let _old_db = LightStorage::::new(DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + source: DatabaseSource::RocksDb { path: path.to_path_buf(), cache_size: 128 }, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + }) + .unwrap(); + } + + assert!(path.join("db_version").exists()); + assert!(!path.join("light/db_version").exists()); + + // start a client with a different role (full), it should fail and not change any files on disk + common::run_node_assert_fail( + &base_path.path(), + &[ + "--dev", + "--port", + "30334", + "--rpc-port", + "44446", + "--ws-port", + "44447", + "--no-prometheus", + ], + ); + + // check if the files are unchanged + assert!(path.join("db_version").exists()); + assert!(!path.join("light/db_version").exists()); + assert!(!path.join("full/db_version").exists()); +} diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index 557e722ddb7b5..7cbaa152699b4 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,9 +19,9 @@ #![cfg(unix)] use assert_cmd::cargo::cargo_bin; -use std::{process::Command, fs, path::PathBuf}; -use tempfile::{tempdir, TempDir}; use regex::Regex; +use std::{fs, path::PathBuf, process::Command}; +use tempfile::{tempdir, TempDir}; pub mod common; @@ -63,26 +63,24 @@ impl<'a> ExportImportRevertExecutor<'a> { fn new( base_path: &'a TempDir, exported_blocks_file: &'a PathBuf, - db_path: &'a PathBuf + db_path: &'a PathBuf, ) -> Self { - Self { - base_path, - exported_blocks_file, - db_path, - num_exported_blocks: None, - } + Self { base_path, exported_blocks_file, db_path, num_exported_blocks: None } } /// Helper method to run a command. Returns a string corresponding to what has been logged. - fn run_block_command(&self, + fn run_block_command( + &self, sub_command: SubCommand, format_opt: FormatOpt, - expected_to_fail: bool + expected_to_fail: bool, ) -> String { let sub_command_str = sub_command.to_string(); // Adding "--binary" if need be. let arguments: Vec<&str> = match format_opt { - FormatOpt::Binary => vec![&sub_command_str, "--dev", "--pruning", "archive", "--binary", "-d"], + FormatOpt::Binary => { + vec![&sub_command_str, "--dev", "--pruning", "archive", "--binary", "-d"] + }, FormatOpt::Json => vec![&sub_command_str, "--dev", "--pruning", "archive", "-d"], }; @@ -94,7 +92,7 @@ impl<'a> ExportImportRevertExecutor<'a> { SubCommand::ImportBlocks => { tmp = tempdir().unwrap(); tmp.path() - } + }, }; // Running the command and capturing the output. @@ -144,16 +142,13 @@ impl<'a> ExportImportRevertExecutor<'a> { if !expected_to_fail { // Using regex to find out how much block we imported, // and what's the best current block. - let re = Regex::new(r"Imported (?P\d*) blocks. Best: #(?P\d*)").unwrap(); + let re = + Regex::new(r"Imported (?P\d*) blocks. Best: #(?P\d*)").unwrap(); let caps = re.captures(&log).expect("capture should have succeeded"); let imported = caps["imported"].parse::().unwrap(); let best = caps["best"].parse::().unwrap(); - assert_eq!( - imported, - best, - "numbers of blocks imported and best number differs" - ); + assert_eq!(imported, best, "numbers of blocks imported and best number differs"); assert_eq!( best, self.num_exported_blocks.expect("number of exported blocks cannot be None; qed"), @@ -193,13 +188,9 @@ fn export_import_revert() { let exported_blocks_file = base_path.path().join("exported_blocks"); let db_path = base_path.path().join("db"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_node_for_a_while(base_path.path(), &["--dev"]); - let mut executor = ExportImportRevertExecutor::new( - &base_path, - &exported_blocks_file, - &db_path, - ); + let mut executor = ExportImportRevertExecutor::new(&base_path, &exported_blocks_file, &db_path); // Binary and binary should work. executor.run(FormatOpt::Binary, FormatOpt::Binary, false); diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index aa9653acadba5..2a89801547a4b 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -28,7 +28,7 @@ pub mod common; fn inspect_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_node_for_a_while(base_path.path(), &["--dev"]); let status = Command::new(cargo_bin("substrate")) .args(&["inspect", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index 001bed8b136f5..0f16a51e5d0a4 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -27,7 +27,7 @@ pub mod common; fn purge_chain_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_node_for_a_while(base_path.path(), &["--dev"]); let status = Command::new(cargo_bin("substrate")) .args(&["purge-chain", "--dev", "-d"]) @@ -39,5 +39,5 @@ fn purge_chain_works() { // Make sure that the `dev` chain folder exists, but the `db` is deleted. assert!(base_path.path().join("chains/dev/").exists()); - assert!(!base_path.path().join("chains/dev/db").exists()); + assert!(!base_path.path().join("chains/dev/db/full").exists()); } diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index bd79dcd77a49a..03a1826f2f080 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,18 +16,30 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![cfg(unix)] + use assert_cmd::cargo::cargo_bin; -use std::{convert::TryInto, process::Command, thread, time::Duration}; +use nix::{ + sys::signal::{ + kill, + Signal::{self, SIGINT, SIGTERM}, + }, + unistd::Pid, +}; +use sc_service::Deref; +use std::{ + convert::TryInto, + ops::DerefMut, + process::{Child, Command}, + thread, + time::Duration, +}; use tempfile::tempdir; pub mod common; #[test] -#[cfg(unix)] fn running_the_node_works_and_can_be_interrupted() { - use nix::sys::signal::{kill, Signal::{self, SIGINT, SIGTERM}}; - use nix::unistd::Pid; - fn run_command_and_kill(signal: Signal) { let base_path = tempdir().expect("could not create a temp dir"); let mut cmd = Command::new(cargo_bin("substrate")) @@ -50,3 +62,57 @@ fn running_the_node_works_and_can_be_interrupted() { run_command_and_kill(SIGINT); run_command_and_kill(SIGTERM); } + +struct KillChildOnDrop(Child); + +impl Drop for KillChildOnDrop { + fn drop(&mut self) { + let _ = self.0.kill(); + } +} + +impl Deref for KillChildOnDrop { + type Target = Child; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for KillChildOnDrop { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[test] +fn running_two_nodes_with_the_same_ws_port_should_work() { + fn start_node() -> Child { + Command::new(cargo_bin("substrate")) + .args(&["--dev", "--tmp", "--ws-port=45789"]) + .spawn() + .unwrap() + } + + let mut first_node = KillChildOnDrop(start_node()); + let mut second_node = KillChildOnDrop(start_node()); + + thread::sleep(Duration::from_secs(30)); + + assert!(first_node.try_wait().unwrap().is_none(), "The first node should still be running"); + assert!(second_node.try_wait().unwrap().is_none(), "The second node should still be running"); + + kill(Pid::from_raw(first_node.id().try_into().unwrap()), SIGINT).unwrap(); + kill(Pid::from_raw(second_node.id().try_into().unwrap()), SIGINT).unwrap(); + + assert_eq!( + common::wait_for(&mut first_node, 30).map(|x| x.success()), + Some(true), + "The first node must exit gracefully", + ); + assert_eq!( + common::wait_for(&mut second_node, 30).map(|x| x.success()), + Some(true), + "The second node must exit gracefully", + ); +} diff --git a/bin/node/cli/tests/telemetry.rs b/bin/node/cli/tests/telemetry.rs new file mode 100644 index 0000000000000..78a306284c4ac --- /dev/null +++ b/bin/node/cli/tests/telemetry.rs @@ -0,0 +1,93 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use assert_cmd::cargo::cargo_bin; +use nix::{ + sys::signal::{kill, Signal::SIGINT}, + unistd::Pid, +}; +use std::{convert::TryInto, process}; + +pub mod common; +pub mod websocket_server; + +#[async_std::test] +async fn telemetry_works() { + let config = websocket_server::Config { + capacity: 1, + max_frame_size: 1048 * 1024, + send_buffer_len: 32, + bind_address: "127.0.0.1:0".parse().unwrap(), + }; + let mut server = websocket_server::WsServer::new(config).await.unwrap(); + + let addr = server.local_addr().unwrap(); + + let server_task = async_std::task::spawn(async move { + loop { + use websocket_server::Event; + match server.next_event().await { + // New connection on the listener. + Event::ConnectionOpen { address } => { + println!("New connection from {:?}", address); + server.accept(); + }, + + // Received a message from a connection. + Event::BinaryFrame { message, .. } => { + let json: serde_json::Value = serde_json::from_slice(&message).unwrap(); + let object = + json.as_object().unwrap().get("payload").unwrap().as_object().unwrap(); + if matches!(object.get("best"), Some(serde_json::Value::String(_))) { + break + } + }, + + Event::TextFrame { .. } => panic!("Got a TextFrame over the socket, this is a bug"), + + // Connection has been closed. + Event::ConnectionError { .. } => {}, + } + } + }); + + let mut substrate = process::Command::new(cargo_bin("substrate")); + + let mut substrate = substrate + .args(&["--dev", "--tmp", "--telemetry-url"]) + .arg(format!("ws://{} 10", addr)) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .stdin(process::Stdio::null()) + .spawn() + .unwrap(); + + server_task.await; + + assert!(substrate.try_wait().unwrap().is_none(), "the process should still be running"); + + // Stop the process + kill(Pid::from_raw(substrate.id().try_into().unwrap()), SIGINT).unwrap(); + assert!(common::wait_for(&mut substrate, 40).map(|x| x.success()).unwrap_or_default()); + + let output = substrate.wait_with_output().unwrap(); + + println!("{}", String::from_utf8(output.stdout).unwrap()); + eprintln!("{}", String::from_utf8(output.stderr).unwrap()); + assert!(output.status.success()); +} diff --git a/bin/node/cli/tests/temp_base_path_works.rs b/bin/node/cli/tests/temp_base_path_works.rs index 9351568d87955..c107740b9b0a5 100644 --- a/bin/node/cli/tests/temp_base_path_works.rs +++ b/bin/node/cli/tests/temp_base_path_works.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,15 +19,19 @@ #![cfg(unix)] use assert_cmd::cargo::cargo_bin; -use nix::sys::signal::{kill, Signal::SIGINT}; -use nix::unistd::Pid; +use nix::{ + sys::signal::{kill, Signal::SIGINT}, + unistd::Pid, +}; use regex::Regex; -use std::convert::TryInto; -use std::io::Read; -use std::path::PathBuf; -use std::process::{Command, Stdio}; -use std::thread; -use std::time::Duration; +use std::{ + convert::TryInto, + io::Read, + path::PathBuf, + process::{Command, Stdio}, + thread, + time::Duration, +}; pub mod common; @@ -44,29 +48,18 @@ fn temp_base_path_works() { // Let it produce some blocks. thread::sleep(Duration::from_secs(30)); - assert!( - cmd.try_wait().unwrap().is_none(), - "the process should still be running" - ); + assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); // Stop the process kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(common::wait_for(&mut cmd, 40) - .map(|x| x.success()) - .unwrap_or_default()); + assert!(common::wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); // Ensure the database has been deleted let mut stderr = String::new(); cmd.stderr.unwrap().read_to_string(&mut stderr).unwrap(); let re = Regex::new(r"Database: .+ at (\S+)").unwrap(); - let db_path = PathBuf::from( - re.captures(stderr.as_str()) - .unwrap() - .get(1) - .unwrap() - .as_str() - .to_string(), - ); + let db_path = + PathBuf::from(re.captures(stderr.as_str()).unwrap().get(1).unwrap().as_str().to_string()); assert!(!db_path.exists()); } diff --git a/bin/node/cli/tests/version.rs b/bin/node/cli/tests/version.rs index bbc9139d4f0f8..5ed3a9a8800c8 100644 --- a/bin/node/cli/tests/version.rs +++ b/bin/node/cli/tests/version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,61 +22,45 @@ use regex::Regex; use std::process::Command; fn expected_regex() -> Regex { - Regex::new(r"^substrate (\d+\.\d+\.\d+(?:-.+?)?)-([a-f\d]+|unknown)-(.+?)-(.+?)(?:-(.+))?$").unwrap() + Regex::new(r"^substrate (\d+\.\d+\.\d+(?:-.+?)?)-([a-f\d]+|unknown)-(.+?)-(.+?)(?:-(.+))?$") + .unwrap() } #[test] fn version_is_full() { let expected = expected_regex(); - let output = Command::new(cargo_bin("substrate")) - .args(&["--version"]) - .output() - .unwrap(); + let output = Command::new(cargo_bin("substrate")).args(&["--version"]).output().unwrap(); - assert!( - output.status.success(), - "command returned with non-success exit code" - ); + assert!(output.status.success(), "command returned with non-success exit code"); let output = String::from_utf8_lossy(&output.stdout).trim().to_owned(); - let captures = expected - .captures(output.as_str()) - .expect("could not parse version in output"); + let captures = expected.captures(output.as_str()).expect("could not parse version in output"); assert_eq!(&captures[1], env!("CARGO_PKG_VERSION")); assert_eq!(&captures[3], TARGET_ARCH.as_str()); assert_eq!(&captures[4], TARGET_OS.as_str()); - assert_eq!( - captures.get(5).map(|x| x.as_str()), - TARGET_ENV.map(|x| x.as_str()) - ); + assert_eq!(captures.get(5).map(|x| x.as_str()), TARGET_ENV.map(|x| x.as_str())); } #[test] fn test_regex_matches_properly() { let expected = expected_regex(); - let captures = expected - .captures("substrate 2.0.0-da487d19d-x86_64-linux-gnu") - .unwrap(); + let captures = expected.captures("substrate 2.0.0-da487d19d-x86_64-linux-gnu").unwrap(); assert_eq!(&captures[1], "2.0.0"); assert_eq!(&captures[2], "da487d19d"); assert_eq!(&captures[3], "x86_64"); assert_eq!(&captures[4], "linux"); assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); - let captures = expected - .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux-gnu") - .unwrap(); + let captures = expected.captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux-gnu").unwrap(); assert_eq!(&captures[1], "2.0.0-alpha.5"); assert_eq!(&captures[2], "da487d19d"); assert_eq!(&captures[3], "x86_64"); assert_eq!(&captures[4], "linux"); assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); - let captures = expected - .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux") - .unwrap(); + let captures = expected.captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux").unwrap(); assert_eq!(&captures[1], "2.0.0-alpha.5"); assert_eq!(&captures[2], "da487d19d"); assert_eq!(&captures[3], "x86_64"); diff --git a/bin/node/cli/tests/websocket_server.rs b/bin/node/cli/tests/websocket_server.rs new file mode 100644 index 0000000000000..658b8de463454 --- /dev/null +++ b/bin/node/cli/tests/websocket_server.rs @@ -0,0 +1,274 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use async_std::net::{TcpListener, TcpStream}; +use core::pin::Pin; +use futures::prelude::*; +use soketto::handshake::{server::Response, Server}; +use std::{io, net::SocketAddr}; + +/// Configuration for a [`WsServer`]. +pub struct Config { + /// IP address to try to bind to. + pub bind_address: SocketAddr, + + /// Maximum size, in bytes, of a frame sent by the remote. + /// + /// Since the messages are entirely buffered before being returned, a maximum value is + /// necessary in order to prevent malicious clients from sending huge frames that would + /// occupy a lot of memory. + pub max_frame_size: usize, + + /// Number of pending messages to buffer up for sending before the socket is considered + /// unresponsive. + pub send_buffer_len: usize, + + /// Pre-allocated capacity for the list of connections. + pub capacity: usize, +} + +/// Identifier for a connection with regard to a [`WsServer`]. +/// +/// After a connection has been closed, its [`ConnectionId`] might be reused. +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub struct ConnectionId(u64); + +/// A WebSocket message. +pub enum Message { + Text(String), + Binary(Vec), +} + +/// WebSockets listening socket and list of open connections. +pub struct WsServer { + /// Value passed through [`Config::max_frame_size`]. + max_frame_size: usize, + + /// Endpoint for incoming TCP sockets. + listener: TcpListener, + + /// Pending incoming connection to accept. Accepted by calling [`WsServer::accept`]. + pending_incoming: Option, + + /// List of TCP connections that are currently negotiating the WebSocket handshake. + /// + /// The output can be an error if the handshake fails. + negotiating: stream::FuturesUnordered< + Pin< + Box< + dyn Future, Box>> + + Send, + >, + >, + >, + + /// List of streams of incoming messages for all connections. + incoming_messages: stream::SelectAll< + Pin>> + Send>>, + >, + + /// Tasks dedicated to closing sockets that have been rejected. + rejected_sockets: stream::FuturesUnordered + Send>>>, +} + +impl WsServer { + /// Try opening a TCP listening socket. + /// + /// Returns an error if the listening socket fails to open. + pub async fn new(config: Config) -> Result { + let listener = TcpListener::bind(config.bind_address).await?; + + Ok(WsServer { + max_frame_size: config.max_frame_size, + listener, + pending_incoming: None, + negotiating: stream::FuturesUnordered::new(), + incoming_messages: stream::SelectAll::new(), + rejected_sockets: stream::FuturesUnordered::new(), + }) + } + + /// Address of the local TCP listening socket, as provided by the operating system. + pub fn local_addr(&self) -> Result { + self.listener.local_addr() + } + + /// Accepts the pending connection. + /// + /// Either [`WsServer::accept`] or [`WsServer::reject`] must be called after a + /// [`Event::ConnectionOpen`] event is returned. + /// + /// # Panic + /// + /// Panics if no connection is pending. + pub fn accept(&mut self) { + let pending_incoming = self.pending_incoming.take().expect("no pending socket"); + + self.negotiating.push(Box::pin(async move { + let mut server = Server::new(pending_incoming); + + let websocket_key = match server.receive_request().await { + Ok(req) => req.into_key(), + Err(err) => return Err(Box::new(err) as Box<_>), + }; + + match server + .send_response(&{ Response::Accept { key: &websocket_key, protocol: None } }) + .await + { + Ok(()) => {}, + Err(err) => return Err(Box::new(err) as Box<_>), + }; + + Ok(server) + })); + } + + /// Reject the pending connection. + /// + /// Either [`WsServer::accept`] or [`WsServer::reject`] must be called after a + /// [`Event::ConnectionOpen`] event is returned. + /// + /// # Panic + /// + /// Panics if no connection is pending. + pub fn reject(&mut self) { + let _ = self.pending_incoming.take().expect("no pending socket"); + } + + /// Returns the next event happening on the server. + pub async fn next_event(&mut self) -> Event { + loop { + futures::select! { + // Only try to fetch a new incoming connection if none is pending. + socket = { + let listener = &self.listener; + let has_pending = self.pending_incoming.is_some(); + async move { + if !has_pending { + listener.accept().await + } else { + loop { futures::pending!() } + } + } + }.fuse() => { + let (socket, address) = match socket { + Ok(s) => s, + Err(_) => continue, + }; + debug_assert!(self.pending_incoming.is_none()); + self.pending_incoming = Some(socket); + return Event::ConnectionOpen { address }; + }, + + result = self.negotiating.select_next_some() => { + let server = match result { + Ok(s) => s, + Err(error) => return Event::ConnectionError { + error, + }, + }; + + let (mut _sender, receiver) = { + let mut builder = server.into_builder(); + builder.set_max_frame_size(self.max_frame_size); + builder.set_max_message_size(self.max_frame_size); + builder.finish() + }; + + // Spawn a task dedicated to receiving messages from the socket. + self.incoming_messages.push({ + // Turn `receiver` into a stream of received packets. + let socket_packets = stream::unfold((receiver, Vec::new()), move |(mut receiver, mut buf)| async { + buf.clear(); + let ret = match receiver.receive_data(&mut buf).await { + Ok(soketto::Data::Text(len)) => String::from_utf8(buf[..len].to_vec()) + .map(Message::Text) + .map_err(|err| Box::new(err) as Box<_>), + Ok(soketto::Data::Binary(len)) => Ok(buf[..len].to_vec()) + .map(Message::Binary), + Err(err) => Err(Box::new(err) as Box<_>), + }; + Some((ret, (receiver, buf))) + }); + + Box::pin(socket_packets.map(move |msg| (msg))) + }); + }, + + result = self.incoming_messages.select_next_some() => { + let message = match result { + Ok(m) => m, + Err(error) => return Event::ConnectionError { + error, + }, + }; + + match message { + Message::Text(message) => { + return Event::TextFrame { + message, + } + } + Message::Binary(message) => { + return Event::BinaryFrame { + message, + } + } + } + }, + + _ = self.rejected_sockets.select_next_some() => { + } + } + } + } +} + +/// Event that has happened on a [`WsServer`]. +#[derive(Debug)] +pub enum Event { + /// A new TCP connection has arrived on the listening socket. + /// + /// The connection *must* be accepted or rejected using [`WsServer::accept`] or + /// [`WsServer::reject`]. + /// No other [`Event::ConnectionOpen`] event will be generated until the current pending + /// connection has been either accepted or rejected. + ConnectionOpen { + /// Address of the remote, as provided by the operating system. + address: SocketAddr, + }, + + /// An error has happened on a connection. The connection is now closed and its + /// [`ConnectionId`] is now invalid. + ConnectionError { error: Box }, + + /// A text frame has been received on a connection. + TextFrame { + /// Message sent by the remote. Its content is entirely decided by the client, and + /// nothing must be assumed about the validity of this message. + message: String, + }, + + /// A text frame has been received on a connection. + BinaryFrame { + /// Message sent by the remote. Its content is entirely decided by the client, and + /// nothing must be assumed about the validity of this message. + message: Vec, + }, +} diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index f7bef798e4d02..f283a913915f3 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-executor" -version = "2.0.0" +version = "3.0.0-dev" authors = ["Parity Technologies "] description = "Substrate node implementation in Rust." edition = "2018" @@ -12,45 +12,37 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +scale-info = { version = "1.0", features = ["derive"] } node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "2.0.0", path = "../runtime" } -sc-executor = { version = "0.8.0", path = "../../../client/executor" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } -trie-root = "0.16.0" -frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } +node-runtime = { version = "3.0.0-dev", path = "../runtime" } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } +sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } +sp-trie = { version = "4.0.0-dev", path = "../../../primitives/trie" } +frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } [dev-dependencies] criterion = "0.3.0" -frame-support = { version = "2.0.0", path = "../../../frame/support" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } -node-testing = { version = "2.0.0", path = "../testing" } -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0", path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } -pallet-session = { version = "2.0.0", path = "../../../frame/session" } -pallet-timestamp = { version = "2.0.0", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0", path = "../../../frame/treasury" } -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } -substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } +frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } +frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } +node-testing = { version = "3.0.0-dev", path = "../testing" } +pallet-balances = { version = "4.0.0-dev", path = "../../../frame/balances" } +pallet-contracts = { version = "4.0.0-dev", path = "../../../frame/contracts" } +pallet-im-online = { version = "4.0.0-dev", path = "../../../frame/im-online" } +pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } +pallet-treasury = { version = "4.0.0-dev", path = "../../../frame/treasury" } +sp-application-crypto = { version = "4.0.0-dev", path = "../../../primitives/application-crypto" } +sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-externalities = { version = "0.10.0-dev", path = "../../../primitives/externalities" } wat = "1.0" +futures = "0.3.9" [features] -wasmtime = [ - "sc-executor/wasmtime", -] -wasmi-errno = [ - "sc-executor/wasmi-errno", -] +wasmtime = ["sc-executor/wasmtime"] +wasmi-errno = ["sc-executor/wasmi-errno"] stress-test = [] [[bench]] diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 168cff0ff4568..1a39c9decb321 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,29 +16,33 @@ // limitations under the License. use codec::{Decode, Encode}; -use criterion::{BatchSize, Criterion, criterion_group, criterion_main}; -use node_executor::Executor; +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; +use frame_support::Hashable; +use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; use node_runtime::{ - Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, UncheckedExtrinsic, + constants::currency::*, Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, + UncheckedExtrinsic, }; -use node_runtime::constants::currency::*; use node_testing::keyring::*; -use sp_core::{NativeOrEncoded, NeverNativeValue}; -use sp_core::storage::well_known_keys; -use sp_core::traits::{CodeExecutor, RuntimeCode}; -use frame_support::Hashable; -use sp_state_machine::TestExternalities as CoreTestExternalities; -use sc_executor::{NativeExecutor, RuntimeInfo, WasmExecutionMethod, Externalities}; +use sc_executor::{Externalities, NativeElseWasmExecutor, RuntimeVersionOf, WasmExecutionMethod}; +use sp_core::{ + storage::well_known_keys, + traits::{CodeExecutor, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, +}; use sp_runtime::traits::BlakeTwo256; +use sp_state_machine::TestExternalities as CoreTestExternalities; criterion_group!(benches, bench_execute_block); criterion_main!(benches); /// The wasm runtime code. pub fn compact_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY.expect("Development wasm binary is not available. \ - Testing is only supported with the flag disabled.") + node_runtime::WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only supported with the flag \ + disabled.", + ) } const GENESIS_HASH: [u8; 32] = [69u8; 32]; @@ -66,27 +70,29 @@ fn new_test_ext(genesis_config: &GenesisConfig) -> TestExternalities( - executor: &NativeExecutor, + executor: &NativeElseWasmExecutor, ext: &mut E, number: BlockNumber, parent_hash: Hash, extrinsics: Vec, ) -> (Vec, Hash) { - use sp_trie::{TrieConfiguration, trie_types::Layout}; + use sp_trie::{trie_types::Layout, TrieConfiguration}; // sign extrinsics. let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = Layout::::ordered_trie_root( - extrinsics.iter().map(Encode::encode) - ).to_fixed_bytes() - .into(); + let extrinsics_root = + Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); let header = Header { parent_hash, @@ -103,34 +109,44 @@ fn construct_block( }; // execute the block to get the real header. - executor.call:: _>( - ext, - &runtime_code, - "Core_initialize_block", - &header.encode(), - true, - None, - ).0.unwrap(); - - for i in extrinsics.iter() { - executor.call:: _>( + executor + .call:: _>( ext, &runtime_code, - "BlockBuilder_apply_extrinsic", - &i.encode(), + "Core_initialize_block", + &header.encode(), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); + + for i in extrinsics.iter() { + executor + .call:: _>( + ext, + &runtime_code, + "BlockBuilder_apply_extrinsic", + &i.encode(), + true, + None, + ) + .0 + .unwrap(); } - let header = match executor.call:: _>( - ext, - &runtime_code, - "BlockBuilder_finalize_block", - &[0u8;0], - true, - None, - ).0.unwrap() { + let header = match executor + .call:: _>( + ext, + &runtime_code, + "BlockBuilder_finalize_block", + &[0u8; 0], + true, + None, + ) + .0 + .unwrap() + { NativeOrEncoded::Native(_) => unreachable!(), NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), }; @@ -139,45 +155,46 @@ fn construct_block( (Block { header, extrinsics }.encode(), hash.into()) } - -fn test_blocks(genesis_config: &GenesisConfig, executor: &NativeExecutor) - -> Vec<(Vec, Hash)> -{ +fn test_blocks( + genesis_config: &GenesisConfig, + executor: &NativeElseWasmExecutor, +) -> Vec<(Vec, Hash)> { let mut test_ext = new_test_ext(genesis_config); - let mut block1_extrinsics = vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), - }, - ]; - block1_extrinsics.extend((0..20).map(|i| { - CheckedExtrinsic { - signed: Some((alice(), signed_extra(i, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 1 * DOLLARS)), - } + let mut block1_extrinsics = vec![CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set { now: 0 }), + }]; + block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { + signed: Some((alice(), signed_extra(i, 0))), + function: Call::Balances(pallet_balances::Call::transfer { + dest: bob().into(), + value: 1 * DOLLARS, + }), })); - let block1 = construct_block( - executor, - &mut test_ext.ext(), - 1, - GENESIS_HASH.into(), - block1_extrinsics, - ); + let block1 = + construct_block(executor, &mut test_ext.ext(), 1, GENESIS_HASH.into(), block1_extrinsics); vec![block1] } fn bench_execute_block(c: &mut Criterion) { - c.bench_function_over_inputs( - "execute blocks", - |b, strategy| { + let mut group = c.benchmark_group("execute blocks"); + let execution_methods = vec![ + ExecutionMethod::Native, + ExecutionMethod::Wasm(WasmExecutionMethod::Interpreted), + #[cfg(feature = "wasmtime")] + ExecutionMethod::Wasm(WasmExecutionMethod::Compiled), + ]; + + for strategy in execution_methods { + group.bench_function(format!("{:?}", strategy), |b| { let genesis_config = node_testing::genesis::config(false, Some(compact_code_unwrap())); let (use_native, wasm_method) = match strategy { ExecutionMethod::Native => (true, WasmExecutionMethod::Interpreted), - ExecutionMethod::Wasm(wasm_method) => (false, *wasm_method), + ExecutionMethod::Wasm(wasm_method) => (false, wasm_method), }; - let executor = NativeExecutor::new(wasm_method, None, 8); + let executor = NativeElseWasmExecutor::new(wasm_method, None, 8); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()), hash: vec![1, 2, 3], @@ -196,24 +213,21 @@ fn bench_execute_block(c: &mut Criterion) { || new_test_ext(&genesis_config), |test_ext| { for block in blocks.iter() { - executor.call:: _>( - &mut test_ext.ext(), - &runtime_code, - "Core_execute_block", - &block.0, - use_native, - None, - ).0.unwrap(); + executor + .call:: _>( + &mut test_ext.ext(), + &runtime_code, + "Core_execute_block", + &block.0, + use_native, + None, + ) + .0 + .unwrap(); } }, BatchSize::LargeInput, ); - }, - vec![ - ExecutionMethod::Native, - ExecutionMethod::Wasm(WasmExecutionMethod::Interpreted), - #[cfg(feature = "wasmtime")] - ExecutionMethod::Wasm(WasmExecutionMethod::Compiled), - ], - ); + }); + } } diff --git a/bin/node/executor/src/lib.rs b/bin/node/executor/src/lib.rs index 4c3b82bc7d3b5..9a7a0c4d3c110 100644 --- a/bin/node/executor/src/lib.rs +++ b/bin/node/executor/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,20 @@ //! A `CodeExecutor` specialization which uses natively compiled runtime when the wasm to be //! executed is equivalent to the natively compiled code. -pub use sc_executor::NativeExecutor; -use sc_executor::native_executor_instance; - -// Declare an instance of the native executor named `Executor`. Include the wasm binary as the -// equivalent wasm code. -native_executor_instance!( - pub Executor, - node_runtime::api::dispatch, - node_runtime::native_version, - frame_benchmarking::benchmarking::HostFunctions, -); +pub use sc_executor::NativeElseWasmExecutor; + +// Declare an instance of the native executor named `ExecutorDispatch`. Include the wasm binary as +// the equivalent wasm code. +pub struct ExecutorDispatch; + +impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + node_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + node_runtime::native_version() + } +} diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 723e3a7e4ba62..c1ab5e5a0fe13 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,32 +15,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode, Joiner}; +use codec::{Decode, Encode, Joiner}; use frame_support::{ - StorageValue, StorageMap, traits::Currency, - weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, }; -use sp_core::{NeverNativeValue, traits::Externalities, storage::well_known_keys}; +use frame_system::{self, AccountInfo, EventRecord, Phase}; +use sp_core::{storage::well_known_keys, traits::Externalities, NeverNativeValue}; use sp_runtime::{ - ApplyExtrinsicResult, - traits::Hash as HashT, - transaction_validity::InvalidTransaction, + traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult, }; -use pallet_contracts::ContractAddressFor; -use frame_system::{self, EventRecord, Phase}; +use node_primitives::{Balance, Hash}; use node_runtime::{ - Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Call, Runtime, Balances, - System, TransactionPayment, Event, - constants::currency::*, + constants::{currency::*, time::SLOT_DURATION}, + Balances, Block, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment, + UncheckedExtrinsic, }; -use node_primitives::{Balance, Hash}; -use wat; use node_testing::keyring::*; +use wat; pub mod common; -use self::common::{*, sign}; +use self::common::{sign, *}; /// The wasm runtime binary which hasn't undergone the compacting process. /// @@ -48,11 +44,14 @@ use self::common::{*, sign}; /// have to execute provided wasm code instead of the native equivalent. This trick is used to /// test code paths that differ between native and wasm versions. pub fn bloaty_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY_BLOATY.expect("Development wasm binary is not available. \ - Testing is only supported with the flag disabled.") + node_runtime::WASM_BINARY_BLOATY.expect( + "Development wasm binary is not available. \ + Testing is only supported with the flag disabled.", + ) } -/// Default transfer fee. This will use the same logic that is implemented in transaction-payment module. +/// Default transfer fee. This will use the same logic that is implemented in transaction-payment +/// module. /// /// Note that reads the multiplier from storage directly, hence to get the fee of `extrinsic` /// at block `n`, it must be called prior to executing block `n` to do the calculation with the @@ -77,6 +76,7 @@ fn set_heap_pages(ext: &mut E, heap_pages: u64) { } fn changes_trie_block() -> (Vec, Hash) { + let time = 42 * 1000; construct_block( &mut new_test_ext(compact_code_unwrap(), true), 1, @@ -84,13 +84,17 @@ fn changes_trie_block() -> (Vec, Hash) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 69 * DOLLARS)), + function: Call::Balances(pallet_balances::Call::transfer { + dest: bob().into(), + value: 69 * DOLLARS, + }), }, - ] + ], + (time / SLOT_DURATION).into(), ) } @@ -99,6 +103,7 @@ fn changes_trie_block() -> (Vec, Hash) { /// from block1's execution to block2 to derive the correct storage_root. fn blocks() -> ((Vec, Hash), (Vec, Hash)) { let mut t = new_test_ext(compact_code_unwrap(), false); + let time1 = 42 * 1000; let block1 = construct_block( &mut t, 1, @@ -106,14 +111,19 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 69 * DOLLARS)), + function: Call::Balances(pallet_balances::Call::transfer { + dest: bob().into(), + value: 69 * DOLLARS, + }), }, - ] + ], + (time1 / SLOT_DURATION).into(), ); + let time2 = 52 * 1000; let block2 = construct_block( &mut t, 2, @@ -121,22 +131,29 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { signed: Some((bob(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer(alice().into(), 5 * DOLLARS)), + function: Call::Balances(pallet_balances::Call::transfer { + dest: alice().into(), + value: 5 * DOLLARS, + }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(1, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 15 * DOLLARS)), - } - ] + function: Call::Balances(pallet_balances::Call::transfer { + dest: bob().into(), + value: 15 * DOLLARS, + }), + }, + ], + (time2 / SLOT_DURATION).into(), ); // session change => consensus authorities change => authorities change digest item appears let digest = Header::decode(&mut &block2.0[..]).unwrap().digest; - assert_eq!(digest.logs().len(), 0); + assert_eq!(digest.logs().len(), 1 /* Just babe slot */); (block1, block2) } @@ -149,13 +166,14 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(nonce, 0))), - function: Call::System(frame_system::Call::remark(vec![0; size])), - } - ] + function: Call::System(frame_system::Call::remark { remark: vec![0; size] }), + }, + ], + (time * 1000 / SLOT_DURATION).into(), ) } @@ -164,7 +182,7 @@ fn panic_execution_with_foreign_code_gives_error() { let mut t = new_test_ext(bloaty_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (69u128, 0u32, 0u128, 0u128, 0u128).encode() + (69u128, 0u32, 0u128, 0u128, 0u128).encode(), ); t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -175,7 +193,8 @@ fn panic_execution_with_foreign_code_gives_error() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let v = executor_call:: _>( &mut t, @@ -183,7 +202,9 @@ fn panic_execution_with_foreign_code_gives_error() { &vec![].and(&xt()), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -193,7 +214,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode(), ); t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -204,7 +225,8 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let v = executor_call:: _>( &mut t, @@ -212,7 +234,9 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { &vec![].and(&xt()), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -222,15 +246,23 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + AccountInfo::<::Index, _> { + data: (111 * DOLLARS, 0u128, 0u128, 0u128), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + AccountInfo::<::Index, _> { + data: (0 * DOLLARS, 0u128, 0u128, 0u128), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key().to_vec(), - (111 * DOLLARS).encode() + (111 * DOLLARS).encode(), ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -240,7 +272,8 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); @@ -251,7 +284,8 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { &vec![].and(&xt()), true, None, - ).0; + ) + .0; assert!(r.is_ok()); t.execute_with(|| { @@ -265,15 +299,23 @@ fn successful_execution_with_foreign_code_gives_ok() { let mut t = new_test_ext(bloaty_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + AccountInfo::<::Index, _> { + data: (111 * DOLLARS, 0u128, 0u128, 0u128), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + AccountInfo::<::Index, _> { + data: (0 * DOLLARS, 0u128, 0u128, 0u128), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key().to_vec(), - (111 * DOLLARS).encode() + (111 * DOLLARS).encode(), ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -283,7 +325,8 @@ fn successful_execution_with_foreign_code_gives_ok() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); @@ -294,7 +337,8 @@ fn successful_execution_with_foreign_code_gives_ok() { &vec![].and(&xt()), true, None, - ).0; + ) + .0; assert!(r.is_ok()); t.execute_with(|| { @@ -313,7 +357,9 @@ fn full_native_block_import_works() { let mut fees = t.execute_with(|| transfer_fee(&xt())); let transfer_weight = default_transfer_call().get_dispatch_info().weight; - let timestamp_weight = pallet_timestamp::Call::set::(Default::default()).get_dispatch_info().weight; + let timestamp_weight = pallet_timestamp::Call::set:: { now: Default::default() } + .get_dispatch_info() + .weight; executor_call:: _>( &mut t, @@ -321,7 +367,9 @@ fn full_native_block_import_works() { &block1.0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -330,14 +378,16 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: timestamp_weight, + class: DispatchClass::Mandatory, + ..Default::default() + })), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_balances(pallet_balances::RawEvent::Transfer( + event: Event::Balances(pallet_balances::Event::Transfer( alice().into(), bob().into(), 69 * DOLLARS, @@ -346,14 +396,15 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), + event: Event::Treasury(pallet_treasury::Event::Deposit(fees * 8 / 10)), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: transfer_weight, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: transfer_weight, + ..Default::default() + })), topics: vec![], }, ]; @@ -368,69 +419,68 @@ fn full_native_block_import_works() { &block2.0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), alice_last_known_balance - 10 * DOLLARS - fees, ); - assert_eq!( - Balances::total_balance(&bob()), - 179 * DOLLARS - fees, - ); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees); let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: timestamp_weight, + class: DispatchClass::Mandatory, + ..Default::default() + })), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_balances( - pallet_balances::RawEvent::Transfer( - bob().into(), - alice().into(), - 5 * DOLLARS, - ) - ), + event: Event::Balances(pallet_balances::Event::Transfer( + bob().into(), + alice().into(), + 5 * DOLLARS, + )), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), + event: Event::Treasury(pallet_treasury::Event::Deposit(fees * 8 / 10)), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: transfer_weight, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: transfer_weight, + ..Default::default() + })), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::pallet_balances( - pallet_balances::RawEvent::Transfer( - alice().into(), - bob().into(), - 15 * DOLLARS, - ) - ), + event: Event::Balances(pallet_balances::Event::Transfer( + alice().into(), + bob().into(), + 15 * DOLLARS, + )), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), + event: Event::Treasury(pallet_treasury::Event::Deposit(fees * 8 / 10)), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: transfer_weight, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: transfer_weight, + ..Default::default() + })), topics: vec![], }, ]; @@ -453,7 +503,9 @@ fn full_wasm_block_import_works() { &block1.0, false, None, - ).0.unwrap(); + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -469,17 +521,16 @@ fn full_wasm_block_import_works() { &block2.0, false, None, - ).0.unwrap(); + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), alice_last_known_balance - 10 * DOLLARS - fees, ); - assert_eq!( - Balances::total_balance(&bob()), - 179 * DOLLARS - 1 * fees, - ); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - 1 * fees); }); } @@ -581,16 +632,13 @@ const CODE_TRANSFER: &str = r#" #[test] fn deploying_wasm_contract_should_work() { let transfer_code = wat::parse_str(CODE_TRANSFER).unwrap(); - let transfer_ch = ::Hashing::hash(&transfer_code); + let transfer_ch = ::Hashing::hash(&transfer_code); - let addr = ::DetermineContractAddress::contract_address_for( - &transfer_ch, - &[], - &charlie(), - ); + let addr = pallet_contracts::Pallet::::contract_address(&charlie(), &transfer_ch, &[]); - let subsistence = pallet_contracts::Config::::subsistence_threshold_uncached(); + let subsistence = pallet_contracts::Pallet::::subsistence_threshold(); + let time = 42 * 1000; let b = construct_block( &mut new_test_ext(compact_code_unwrap(), false), 1, @@ -598,58 +646,43 @@ fn deploying_wasm_contract_should_work() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), function: Call::Contracts( - pallet_contracts::Call::put_code::(transfer_code) + pallet_contracts::Call::instantiate_with_code:: { + endowment: 1000 * DOLLARS + subsistence, + gas_limit: 500_000_000, + code: transfer_code, + data: Vec::new(), + salt: Vec::new(), + }, ), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), - function: Call::Contracts( - pallet_contracts::Call::instantiate::( - 1 * DOLLARS + subsistence, - 500_000_000, - transfer_ch, - Vec::new() - ) - ), + function: Call::Contracts(pallet_contracts::Call::call:: { + dest: sp_runtime::MultiAddress::Id(addr.clone()), + value: 10, + gas_limit: 500_000_000, + data: vec![0x00, 0x01, 0x02, 0x03], + }), }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(2, 0))), - function: Call::Contracts( - pallet_contracts::Call::call::( - pallet_indices::address::Address::Id(addr.clone()), - 10, - 500_000_000, - vec![0x00, 0x01, 0x02, 0x03] - ) - ), - }, - ] + ], + (time / SLOT_DURATION).into(), ); let mut t = new_test_ext(compact_code_unwrap(), false); - executor_call:: _>( - &mut t, - "Core_execute_block", - &b.0, - false, - None, - ).0.unwrap(); + executor_call:: _>(&mut t, "Core_execute_block", &b.0, false, None) + .0 + .unwrap(); t.execute_with(|| { - // Verify that the contract constructor worked well and code of TRANSFER contract is actually deployed. - assert_eq!( - &pallet_contracts::ContractInfoOf::::get(addr) - .and_then(|c| c.get_alive()) - .unwrap() - .code_hash, - &transfer_ch - ); + // Verify that the contract does exist by querying some of its storage items + // It does not matter that the storage item itself does not exist. + assert!(&pallet_contracts::Pallet::::get_storage(addr, Default::default()).is_ok()); }); } @@ -665,7 +698,8 @@ fn wasm_big_block_import_fails() { &block_with_size(42, 0, 120_000).0, false, None, - ).0; + ) + .0; assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) }))) } @@ -679,22 +713,28 @@ fn native_big_block_import_succeeds() { &block_with_size(42, 0, 120_000).0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); } #[test] fn native_big_block_import_fails_on_fallback() { let mut t = new_test_ext(compact_code_unwrap(), false); - assert!( - executor_call:: _>( - &mut t, - "Core_execute_block", - &block_with_size(42, 0, 120_000).0, - false, - None, - ).0.is_err() - ); + // We set the heap pages to 8 because we know that should give an OOM in WASM with the given + // block. + set_heap_pages(&mut t.ext(), 8); + + assert!(executor_call:: _>( + &mut t, + "Core_execute_block", + &block_with_size(42, 0, 120_000).0, + false, + None, + ) + .0 + .is_err()); } #[test] @@ -702,7 +742,11 @@ fn panic_execution_gives_error() { let mut t = new_test_ext(bloaty_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + AccountInfo::<::Index, _> { + data: (0 * DOLLARS, 0u128, 0u128, 0u128), + ..Default::default() + } + .encode(), ); t.insert(>::hashed_key().to_vec(), 0_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -713,7 +757,8 @@ fn panic_execution_gives_error() { &vec![].and(&from_block_number(1u32)), false, None, - ).0; + ) + .0; assert!(r.is_ok()); let r = executor_call:: _>( &mut t, @@ -721,7 +766,10 @@ fn panic_execution_gives_error() { &vec![].and(&xt()), false, None, - ).0.unwrap().into_encoded(); + ) + .0 + .unwrap() + .into_encoded(); let r = ApplyExtrinsicResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -731,15 +779,23 @@ fn successful_execution_gives_ok() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + AccountInfo::<::Index, _> { + data: (111 * DOLLARS, 0u128, 0u128, 0u128), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key_for(bob()), - (0u32, 0u32, 0 * DOLLARS, 0u128, 0u128, 0u128).encode() + AccountInfo::<::Index, _> { + data: (0 * DOLLARS, 0u128, 0u128, 0u128), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key().to_vec(), - (111 * DOLLARS).encode() + (111 * DOLLARS).encode(), ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -749,7 +805,8 @@ fn successful_execution_gives_ok() { &vec![].and(&from_block_number(1u32)), false, None, - ).0; + ) + .0; assert!(r.is_ok()); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS); @@ -763,7 +820,10 @@ fn successful_execution_gives_ok() { &vec![].and(&xt()), false, None, - ).0.unwrap().into_encoded(); + ) + .0 + .unwrap() + .into_encoded(); ApplyExtrinsicResult::decode(&mut &r[..]) .unwrap() .expect("Extrinsic could not be applied") @@ -788,7 +848,9 @@ fn full_native_block_import_works_with_changes_trie() { &block.encode(), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some()); } @@ -804,7 +866,9 @@ fn full_wasm_block_import_works_with_changes_trie() { &block1.0, false, None, - ).0.unwrap(); + ) + .0 + .unwrap(); assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some()); } @@ -812,8 +876,7 @@ fn full_wasm_block_import_works_with_changes_trie() { #[test] fn should_import_block_with_test_client() { use node_testing::client::{ - ClientBlockImportExt, TestClientBuilderExt, TestClientBuilder, - sp_consensus::BlockOrigin, + sp_consensus::BlockOrigin, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, }; let mut client = TestClientBuilder::new().build(); @@ -821,5 +884,5 @@ fn should_import_block_with_test_client() { let block_data = block1.0; let block = node_primitives::Block::decode(&mut &block_data[..]).unwrap(); - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index efc54ebebf199..d1c24c83c836d 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,31 +15,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; -use frame_system::offchain::AppCrypto; +use codec::{Decode, Encode}; use frame_support::Hashable; -use sp_state_machine::TestExternalities as CoreTestExternalities; +use frame_system::offchain::AppCrypto; +use sc_executor::{error::Result, NativeElseWasmExecutor, WasmExecutionMethod}; +use sp_consensus_babe::{ + digests::{PreDigest, SecondaryPlainPreDigest}, + Slot, BABE_ENGINE_ID, +}; use sp_core::{ - NeverNativeValue, NativeOrEncoded, crypto::KeyTypeId, sr25519::Signature, traits::{CodeExecutor, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, }; use sp_runtime::{ - ApplyExtrinsicResult, - MultiSigner, - MultiSignature, - traits::{Header as HeaderT, BlakeTwo256}, + traits::{BlakeTwo256, Header as HeaderT}, + ApplyExtrinsicResult, Digest, DigestItem, MultiSignature, MultiSigner, }; -use sc_executor::{NativeExecutor, WasmExecutionMethod}; -use sc_executor::error::Result; +use sp_state_machine::TestExternalities as CoreTestExternalities; -use node_executor::Executor; +use node_executor::ExecutorDispatch; +use node_primitives::{BlockNumber, Hash}; use node_runtime::{ - Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Runtime, BuildStorage, - constants::currency::*, + constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, Runtime, + UncheckedExtrinsic, }; -use node_primitives::{Hash, BlockNumber}; use node_testing::keyring::*; use sp_externalities::Externalities; @@ -47,8 +48,8 @@ pub const TEST_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"test"); pub mod sr25519 { mod app_sr25519 { - use sp_application_crypto::{app_crypto, sr25519}; use super::super::TEST_KEY_TYPE_ID; + use sp_application_crypto::{app_crypto, sr25519}; app_crypto!(sr25519, TEST_KEY_TYPE_ID); } @@ -66,11 +67,12 @@ impl AppCrypto for TestAuthorityId { /// /// `compact` since it is after post-processing with wasm-gc which performs tree-shaking thus /// making the binary slimmer. There is a convention to use compact version of the runtime -/// as canonical. This is why `native_executor_instance` also uses the compact version of the -/// runtime. +/// as canonical. pub fn compact_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY.expect("Development wasm binary is not available. \ - Testing is only supported with the flag disabled.") + node_runtime::WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only supported with the flag \ + disabled.", + ) } pub const GENESIS_HASH: [u8; 32] = [69u8; 32]; @@ -86,20 +88,21 @@ pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { } pub fn default_transfer_call() -> pallet_balances::Call { - pallet_balances::Call::transfer::(bob().into(), 69 * DOLLARS) + pallet_balances::Call::::transfer { dest: bob().into(), value: 69 * DOLLARS } } pub fn from_block_number(n: u32) -> Header { Header::new(n, Default::default(), Default::default(), [69; 32].into(), Default::default()) } -pub fn executor() -> NativeExecutor { - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) +pub fn executor() -> NativeElseWasmExecutor { + NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } pub fn executor_call< - R:Decode + Encode + PartialEq, - NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe + R: Decode + Encode + PartialEq, + NC: FnOnce() -> std::result::Result> + + std::panic::UnwindSafe, >( t: &mut TestExternalities, method: &str, @@ -117,20 +120,15 @@ pub fn executor_call< heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()), }; - executor().call::( - &mut t, - &runtime_code, - method, - data, - use_native, - native_call, - ) + executor().call::(&mut t, &runtime_code, method, data, use_native, native_call) } pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { let mut ext = TestExternalities::new_with_code( code, - node_testing::genesis::config(support_changes_trie, Some(code)).build_storage().unwrap(), + node_testing::genesis::config(support_changes_trie, Some(code)) + .build_storage() + .unwrap(), ); ext.changes_trie_storage().insert(0, GENESIS_HASH.into(), Default::default()); ext @@ -145,8 +143,9 @@ pub fn construct_block( number: BlockNumber, parent_hash: Hash, extrinsics: Vec, + babe_slot: Slot, ) -> (Vec, Hash) { - use sp_trie::{TrieConfiguration, trie_types::Layout}; + use sp_trie::{trie_types::Layout, TrieConfiguration}; // sign extrinsics. let extrinsics = extrinsics.into_iter().map(sign).collect::>(); @@ -162,7 +161,16 @@ pub fn construct_block( number, extrinsics_root, state_root: Default::default(), - digest: Default::default(), + digest: Digest { + logs: vec![DigestItem::PreRuntime( + BABE_ENGINE_ID, + PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + slot: babe_slot, + authority_index: 42, + }) + .encode(), + )], + }, }; // execute the block to get the real header. @@ -172,7 +180,9 @@ pub fn construct_block( &header.encode(), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); for extrinsic in extrinsics.iter() { // Try to apply the `extrinsic`. It should be valid, in the sense that it passes @@ -183,8 +193,13 @@ pub fn construct_block( &extrinsic.encode(), true, None, - ).0.expect("application of an extrinsic failed").into_encoded(); - match ApplyExtrinsicResult::decode(&mut &r[..]).expect("apply result deserialization failed") { + ) + .0 + .expect("application of an extrinsic failed") + .into_encoded(); + match ApplyExtrinsicResult::decode(&mut &r[..]) + .expect("apply result deserialization failed") + { Ok(_) => {}, Err(e) => panic!("Applying extrinsic failed: {:?}", e), } @@ -193,10 +208,13 @@ pub fn construct_block( let header = match executor_call:: _>( env, "BlockBuilder_finalize_block", - &[0u8;0], + &[0u8; 0], true, None, - ).0.unwrap() { + ) + .0 + .unwrap() + { NativeOrEncoded::Native(_) => unreachable!(), NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), }; diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index d04af1d827009..379cdda5b76a3 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,22 +17,22 @@ use codec::{Encode, Joiner}; use frame_support::{ - StorageValue, StorageMap, traits::Currency, - weights::{GetDispatchInfo, constants::ExtrinsicBaseWeight, IdentityFee, WeightToFeePolynomial}, + weights::{ + constants::ExtrinsicBaseWeight, GetDispatchInfo, IdentityFee, WeightToFeePolynomial, + }, }; -use sp_core::NeverNativeValue; -use sp_runtime::{Perbill, FixedPointNumber}; +use node_primitives::Balance; use node_runtime::{ - CheckedExtrinsic, Call, Runtime, Balances, TransactionPayment, Multiplier, - TransactionByteFee, - constants::currency::*, + constants::{currency::*, time::SLOT_DURATION}, + Balances, Call, CheckedExtrinsic, Multiplier, Runtime, TransactionByteFee, TransactionPayment, }; -use node_primitives::Balance; use node_testing::keyring::*; +use sp_core::NeverNativeValue; +use sp_runtime::{traits::One, Perbill}; pub mod common; -use self::common::{*, sign}; +use self::common::{sign, *}; #[test] fn fee_multiplier_increases_and_decreases_on_big_weight() { @@ -47,6 +47,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { let mut tt = new_test_ext(compact_code_unwrap(), false); + let time1 = 42 * 1000; // big one in terms of weight. let block1 = construct_block( &mut tt, @@ -55,15 +56,19 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), - function: Call::System(frame_system::Call::fill_block(Perbill::from_percent(60))), - } - ] + function: Call::System(frame_system::Call::fill_block { + ratio: Perbill::from_percent(60), + }), + }, + ], + (time1 / SLOT_DURATION).into(), ); + let time2 = 52 * 1000; // small one in terms of weight. let block2 = construct_block( &mut tt, @@ -72,13 +77,14 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), - function: Call::System(frame_system::Call::remark(vec![0; 1])), - } - ] + function: Call::System(frame_system::Call::remark { remark: vec![0; 1] }), + }, + ], + (time2 / SLOT_DURATION).into(), ); println!( @@ -94,7 +100,9 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { &block1.0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -111,7 +119,9 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { &block2.0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -121,6 +131,17 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { }); } +fn new_account_info(free_dollars: u128) -> Vec { + frame_system::AccountInfo { + nonce: 0u32, + consumers: 0, + providers: 0, + sufficients: 0, + data: (free_dollars * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS), + } + .encode() +} + #[test] fn transaction_fee_is_correct() { // This uses the exact values of substrate-node. @@ -131,17 +152,11 @@ fn transaction_fee_is_correct() { // - 1 milli-dot based on current polkadot runtime. // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) let mut t = new_test_ext(compact_code_unwrap(), false); - t.insert( - >::hashed_key_for(alice()), - (0u32, 0u32, 100 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() - ); - t.insert( - >::hashed_key_for(bob()), - (0u32, 0u32, 10 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() - ); + t.insert(>::hashed_key_for(alice()), new_account_info(100)); + t.insert(>::hashed_key_for(bob()), new_account_info(10)); t.insert( >::hashed_key().to_vec(), - (110 * DOLLARS).encode() + (110 * DOLLARS).encode(), ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -157,7 +172,8 @@ fn transaction_fee_is_correct() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let r = executor_call:: _>( @@ -166,7 +182,8 @@ fn transaction_fee_is_correct() { &vec![].and(&xt.clone()), true, None, - ).0; + ) + .0; assert!(r.is_ok()); t.execute_with(|| { @@ -217,26 +234,32 @@ fn block_weight_capacity_report() { let mut time = 10; let mut nonce: Index = 0; let mut block_number = 1; - let mut previous_hash: Hash = GENESIS_HASH.into(); + let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); loop { let num_transfers = block_number * factor; - let mut xts = (0..num_transfers).map(|i| CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 0)), - }).collect::>(); - - xts.insert(0, CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), - }); + let mut xts = (0..num_transfers) + .map(|i| CheckedExtrinsic { + signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), + function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 0)), + }) + .collect::>(); + + xts.insert( + 0, + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), + }, + ); // NOTE: this is super slow. Can probably be improved. let block = construct_block( &mut tt, block_number, previous_hash, - xts + xts, + (time * 1000 / SLOT_DURATION).into(), ); let len = block.0.len(); @@ -254,7 +277,8 @@ fn block_weight_capacity_report() { &block.0, true, None, - ).0; + ) + .0; println!(" || Result = {:?}", r); assert!(r.is_ok()); @@ -284,7 +308,7 @@ fn block_length_capacity_report() { let mut time = 10; let mut nonce: Index = 0; let mut block_number = 1; - let mut previous_hash: Hash = GENESIS_HASH.into(); + let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); loop { // NOTE: this is super slow. Can probably be improved. @@ -299,9 +323,12 @@ fn block_length_capacity_report() { }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(nonce, 0))), - function: Call::System(frame_system::Call::remark(vec![0u8; (block_number * factor) as usize])), + function: Call::System(frame_system::Call::remark { + remark: vec![0u8; (block_number * factor) as usize], + }), }, - ] + ], + (time * 1000 / SLOT_DURATION).into(), ); let len = block.0.len(); @@ -318,7 +345,8 @@ fn block_length_capacity_report() { &block.0, true, None, - ).0; + ) + .0; println!(" || Result = {:?}", r); assert!(r.is_ok()); diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index 5bac6b5e374c7..19ca8e5677c43 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,26 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::Arc; -use node_runtime::{ - Executive, Indices, Runtime, UncheckedExtrinsic, -}; -use sp_application_crypto::AppKey; -use sp_core::{ - offchain::{ - TransactionPoolExt, - testing::TestTransactionPoolExt, - }, -}; -use sp_keystore::{KeystoreExt, SyncCryptoStore, testing::KeyStore}; -use frame_system::{ - offchain::{ - Signer, - SubmitTransaction, - SendSignedTransaction, - } -}; use codec::Decode; +use frame_system::offchain::{SendSignedTransaction, Signer, SubmitTransaction}; +use node_runtime::{Executive, Indices, Runtime, UncheckedExtrinsic}; +use sp_application_crypto::AppKey; +use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; +use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStore}; +use std::sync::Arc; pub mod common; use self::common::*; @@ -55,9 +42,11 @@ fn should_submit_unsigned_transaction() { validators_len: 0, }; - let call = pallet_im_online::Call::heartbeat(heartbeat_data, signature); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) - .unwrap(); + let call = pallet_im_online::Call::heartbeat { heartbeat: heartbeat_data, signature }; + SubmitTransaction::>::submit_unsigned_transaction( + call.into(), + ) + .unwrap(); assert_eq!(state.read().transactions.len(), 1) }); @@ -75,24 +64,30 @@ fn should_submit_signed_transaction() { SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter2", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter2", PHRASE)), + ) + .unwrap(); SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter3", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter3", PHRASE)), + ) + .unwrap(); t.register_extension(KeystoreExt(Arc::new(keystore))); t.execute_with(|| { - let results = Signer::::all_accounts() - .send_signed_transaction(|_| { - pallet_balances::Call::transfer(Default::default(), Default::default()) + let results = + Signer::::all_accounts().send_signed_transaction(|_| { + pallet_balances::Call::transfer { + dest: Default::default(), + value: Default::default(), + } }); let len = results.len(); @@ -112,28 +107,36 @@ fn should_submit_signed_twice_from_the_same_account() { SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter2", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter2", PHRASE)), + ) + .unwrap(); t.register_extension(KeystoreExt(Arc::new(keystore))); t.execute_with(|| { - let result = Signer::::any_account() - .send_signed_transaction(|_| { - pallet_balances::Call::transfer(Default::default(), Default::default()) + let result = + Signer::::any_account().send_signed_transaction(|_| { + pallet_balances::Call::transfer { + dest: Default::default(), + value: Default::default(), + } }); assert!(result.is_some()); assert_eq!(state.read().transactions.len(), 1); // submit another one from the same account. The nonce should be incremented. - let result = Signer::::any_account() - .send_signed_transaction(|_| { - pallet_balances::Call::transfer(Default::default(), Default::default()) + let result = + Signer::::any_account().send_signed_transaction(|_| { + pallet_balances::Call::transfer { + dest: Default::default(), + value: Default::default(), + } }); assert!(result.is_some()); @@ -147,10 +150,7 @@ fn should_submit_signed_twice_from_the_same_account() { } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap()); - assert!( - nonce1 != nonce2, - "Transactions should have different nonces. Got: {:?}", nonce1 - ); + assert!(nonce1 != nonce2, "Transactions should have different nonces. Got: {:?}", nonce1); }); } @@ -161,20 +161,18 @@ fn should_submit_signed_twice_from_all_accounts() { t.register_extension(TransactionPoolExt::new(pool)); let keystore = KeyStore::new(); - keystore.sr25519_generate_new( - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); - keystore.sr25519_generate_new( - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter2", PHRASE)) - ).unwrap(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE))) + .unwrap(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter2", PHRASE))) + .unwrap(); t.register_extension(KeystoreExt(Arc::new(keystore))); t.execute_with(|| { let results = Signer::::all_accounts() .send_signed_transaction(|_| { - pallet_balances::Call::transfer(Default::default(), Default::default()) + pallet_balances::Call::transfer { dest: Default::default(), value: Default::default() } }); let len = results.len(); @@ -185,7 +183,7 @@ fn should_submit_signed_twice_from_all_accounts() { // submit another one from the same account. The nonce should be incremented. let results = Signer::::all_accounts() .send_signed_transaction(|_| { - pallet_balances::Call::transfer(Default::default(), Default::default()) + pallet_balances::Call::transfer { dest: Default::default(), value: Default::default() } }); let len = results.len(); @@ -217,9 +215,10 @@ fn should_submit_signed_twice_from_all_accounts() { #[test] fn submitted_transaction_should_be_valid() { use codec::Encode; - use frame_support::storage::StorageMap; - use sp_runtime::transaction_validity::{TransactionSource, TransactionTag}; - use sp_runtime::traits::StaticLookup; + use sp_runtime::{ + traits::StaticLookup, + transaction_validity::{TransactionSource, TransactionTag}, + }; let mut t = new_test_ext(compact_code_unwrap(), false); let (pool, state) = TestTransactionPoolExt::new(); @@ -228,14 +227,19 @@ fn submitted_transaction_should_be_valid() { let keystore = KeyStore::new(); SyncCryptoStore::sr25519_generate_new( &keystore, - sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + sr25519::AuthorityId::ID, + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); t.register_extension(KeystoreExt(Arc::new(keystore))); t.execute_with(|| { - let results = Signer::::all_accounts() - .send_signed_transaction(|_| { - pallet_balances::Call::transfer(Default::default(), Default::default()) + let results = + Signer::::all_accounts().send_signed_transaction(|_| { + pallet_balances::Call::transfer { + dest: Default::default(), + value: Default::default(), + } }); let len = results.len(); assert_eq!(len, 1); @@ -253,16 +257,21 @@ fn submitted_transaction_should_be_valid() { let author = extrinsic.signature.clone().unwrap().0; let address = Indices::lookup(author).unwrap(); let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; - let account = frame_system::AccountInfo { nonce: 0, refcount: 0, data }; + let account = frame_system::AccountInfo { data, ..Default::default() }; >::insert(&address, account); // check validity - let res = Executive::validate_transaction(source, extrinsic).unwrap(); + let res = Executive::validate_transaction( + source, + extrinsic, + frame_system::BlockHash::::get(0), + ) + .unwrap(); // We ignore res.priority since this number can change based on updates to weights and such. assert_eq!(res.requires, Vec::::new()); assert_eq!(res.provides, vec![(address, 0).encode()]); - assert_eq!(res.longevity, 2048); + assert_eq!(res.longevity, 2047); assert_eq!(res.propagate, true); }); } diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index 3686ddf27669b..1570e5dbf8e44 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-inspect" -version = "0.8.0" +version = "0.9.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -11,13 +11,13 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } +codec = { package = "parity-scale-codec", version = "2.0.0" } derive_more = "0.99" -log = "0.4.8" -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } +sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } +sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../client/service" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } structopt = "0.3.8" diff --git a/bin/node/inspect/src/cli.rs b/bin/node/inspect/src/cli.rs index d66644bab52fa..c054fedaf57c4 100644 --- a/bin/node/inspect/src/cli.rs +++ b/bin/node/inspect/src/cli.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,8 +18,8 @@ //! Structs to easily compose inspect sub-command for CLI. -use std::fmt::Debug; use sc_cli::{ImportParams, SharedParams}; +use std::fmt::Debug; use structopt::StructOpt; /// The `inspect` command used to print decoded chain data. diff --git a/bin/node/inspect/src/command.rs b/bin/node/inspect/src/command.rs index fae6c10c7fe78..9bf69511689c4 100644 --- a/bin/node/inspect/src/command.rs +++ b/bin/node/inspect/src/command.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,9 +18,12 @@ //! Command ran by the CLI -use crate::cli::{InspectCmd, InspectSubCmd}; -use crate::Inspector; +use crate::{ + cli::{InspectCmd, InspectSubCmd}, + Inspector, +}; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; +use sc_executor::NativeElseWasmExecutor; use sc_service::{new_full_client, Configuration, NativeExecutionDispatch}; use sp_runtime::traits::Block; use std::str::FromStr; @@ -34,7 +37,13 @@ impl InspectCmd { RA: Send + Sync + 'static, EX: NativeExecutionDispatch + 'static, { - let client = new_full_client::(&config)?; + let executor = NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let client = new_full_client::(&config, None, executor)?; let inspect = Inspector::::new(client); match &self.command { @@ -43,13 +52,13 @@ impl InspectCmd { let res = inspect.block(input).map_err(|e| format!("{}", e))?; println!("{}", res); Ok(()) - } + }, InspectSubCmd::Extrinsic { input } => { let input = input.parse()?; let res = inspect.extrinsic(input).map_err(|e| format!("{}", e))?; println!("{}", res); Ok(()) - } + }, } } } diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 02f5614b81a78..30e7250ea2c6c 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. // -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // // This program is free software: you can redistribute it and/or modify @@ -27,33 +27,27 @@ pub mod cli; pub mod command; -use std::{ - fmt, - fmt::Debug, - marker::PhantomData, - str::FromStr, -}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sc_client_api::BlockBackend; use sp_blockchain::HeaderBackend; use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ generic::BlockId, - traits::{Block, HashFor, NumberFor, Hash} + traits::{Block, Hash, HashFor, NumberFor}, }; +use std::{fmt, fmt::Debug, marker::PhantomData, str::FromStr}; /// A helper type for a generic block input. -pub type BlockAddressFor = BlockAddress< - as Hash>::Output, - NumberFor ->; +pub type BlockAddressFor = + BlockAddress< as Hash>::Output, NumberFor>; /// A Pretty formatter implementation. pub trait PrettyPrinter { /// Nicely format block. fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result; /// Nicely format extrinsic. - fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) -> fmt::Result; + fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) + -> fmt::Result; } /// Default dummy debug printer. @@ -72,8 +66,12 @@ impl PrettyPrinter for DebugPrinter { Ok(()) } - fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) -> fmt::Result { - writeln!(fmt, " {:?}", extrinsic)?; + fn fmt_extrinsic( + &self, + fmt: &mut fmt::Formatter, + extrinsic: &TBlock::Extrinsic, + ) -> fmt::Result { + writeln!(fmt, " {:#?}", extrinsic)?; writeln!(fmt, " Bytes: {:?}", HexDisplay::from(&extrinsic.encode()))?; Ok(()) } @@ -101,15 +99,14 @@ impl std::error::Error for Error { } /// A helper trait to access block headers and bodies. -pub trait ChainAccess: - HeaderBackend + - BlockBackend -{} +pub trait ChainAccess: HeaderBackend + BlockBackend {} -impl ChainAccess for T where +impl ChainAccess for T +where TBlock: Block, T: sp_blockchain::HeaderBackend + sc_client_api::BlockBackend, -{} +{ +} /// Blockchain inspector. pub struct Inspector = DebugPrinter> { @@ -120,22 +117,16 @@ pub struct Inspector = DebugPrint impl> Inspector { /// Create new instance of the inspector with default printer. - pub fn new( - chain: impl ChainAccess + 'static, - ) -> Self where TPrinter: Default { + pub fn new(chain: impl ChainAccess + 'static) -> Self + where + TPrinter: Default, + { Self::with_printer(chain, Default::default()) } /// Customize pretty-printing of the data. - pub fn with_printer( - chain: impl ChainAccess + 'static, - printer: TPrinter, - ) -> Self { - Inspector { - chain: Box::new(chain) as _, - printer, - _block: Default::default(), - } + pub fn with_printer(chain: impl ChainAccess + 'static, printer: TPrinter) -> Self { + Inspector { chain: Box::new(chain) as _, printer, _block: Default::default() } } /// Get a pretty-printed block. @@ -153,25 +144,27 @@ impl> Inspector fn get_block(&self, input: BlockAddressFor) -> Result { Ok(match input { - BlockAddress::Bytes(bytes) => { - TBlock::decode(&mut &*bytes)? - }, + BlockAddress::Bytes(bytes) => TBlock::decode(&mut &*bytes)?, BlockAddress::Number(number) => { let id = BlockId::number(number); let not_found = format!("Could not find block {:?}", id); - let body = self.chain.block_body(&id)? - .ok_or_else(|| Error::NotFound(not_found.clone()))?; - let header = self.chain.header(id)? + let body = self + .chain + .block_body(&id)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; + let header = + self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; TBlock::new(header, body) }, BlockAddress::Hash(hash) => { let id = BlockId::hash(hash); let not_found = format!("Could not find block {:?}", id); - let body = self.chain.block_body(&id)? - .ok_or_else(|| Error::NotFound(not_found.clone()))?; - let header = self.chain.header(id)? + let body = self + .chain + .block_body(&id)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; + let header = + self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; TBlock::new(header, body) }, }) @@ -192,16 +185,14 @@ impl> Inspector let ext = match input { ExtrinsicAddress::Block(block, index) => { let block = self.get_block(block)?; - block.extrinsics() - .get(index) - .cloned() - .ok_or_else(|| Error::NotFound(format!( - "Could not find extrinsic {} in block {:?}", index, block - )))? + block.extrinsics().get(index).cloned().ok_or_else(|| { + Error::NotFound(format!( + "Could not find extrinsic {} in block {:?}", + index, block + )) + })? }, - ExtrinsicAddress::Bytes(bytes) => { - TBlock::Extrinsic::decode(&mut &*bytes)? - } + ExtrinsicAddress::Bytes(bytes) => TBlock::Extrinsic::decode(&mut &*bytes)?, }; Ok(format!("{}", ExtrinsicPrinter(ext, &self.printer))) @@ -234,12 +225,12 @@ impl FromStr for BlockAddress { } // then assume it's bytes (hex-encoded) - sp_core::bytes::from_hex(s) - .map(Self::Bytes) - .map_err(|e| format!( + sp_core::bytes::from_hex(s).map(Self::Bytes).map_err(|e| { + format!( "Given string does not look like hash or number. It could not be parsed as bytes either: {}", e - )) + ) + }) } } @@ -263,11 +254,13 @@ impl FromStr for ExtrinsicAddres // split by a bunch of different characters let mut it = s.split(|c| c == '.' || c == ':' || c == ' '); - let block = it.next() + let block = it + .next() .expect("First element of split iterator is never empty; qed") .parse()?; - let index = it.next() + let index = it + .next() .ok_or_else(|| format!("Extrinsic index missing: example \"5:0\""))? .parse() .map_err(|e| format!("Invalid index format: {}", e))?; @@ -290,10 +283,10 @@ mod tests { let b2 = BlockAddress::from_str("0"); let b3 = BlockAddress::from_str("0x0012345f"); - - assert_eq!(b0, Ok(BlockAddress::Hash( - "3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap() - ))); + assert_eq!( + b0, + Ok(BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap())) + ); assert_eq!(b1, Ok(BlockAddress::Number(1234))); assert_eq!(b2, Ok(BlockAddress::Number(0))); assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); @@ -310,20 +303,16 @@ mod tests { let b2 = ExtrinsicAddress::from_str("0 0"); let b3 = ExtrinsicAddress::from_str("0x0012345f"); - assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); - assert_eq!(b0, Ok(ExtrinsicAddress::Block( - BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), - 5 - ))); - assert_eq!(b1, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(1234), - 0 - ))); - assert_eq!(b2, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(0), - 0 - ))); + assert_eq!( + b0, + Ok(ExtrinsicAddress::Block( + BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), + 5 + )) + ); + assert_eq!(b1, Ok(ExtrinsicAddress::Block(BlockAddress::Number(1234), 0))); + assert_eq!(b2, Ok(ExtrinsicAddress::Block(BlockAddress::Number(0), 0))); assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); } } diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 305764970c149..12ec57e4d55b6 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -11,20 +11,20 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../../primitives/application-crypto" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } - -[dev-dependencies] -sp-serializer = { version = "2.0.0", path = "../../../primitives/serializer" } -pretty_assertions = "0.6.1" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/application-crypto" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "frame-system/std", "sp-application-crypto/std", "sp-core/std", diff --git a/bin/node/primitives/src/lib.rs b/bin/node/primitives/src/lib.rs index 137fb1d94c778..dade598c704d2 100644 --- a/bin/node/primitives/src/lib.rs +++ b/bin/node/primitives/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,11 +18,12 @@ //! Low-level types used throughout the Substrate code. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] use sp_runtime::{ - generic, traits::{Verify, BlakeTwo256, IdentifyAccount}, OpaqueExtrinsic, MultiSignature + generic, + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiSignature, OpaqueExtrinsic, }; /// An index to a block. diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index 9f358e901dafa..a5255769158a4 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -11,10 +11,10 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.1.29" -hyper = "0.12.35" -jsonrpc-core-client = { version = "15.0.0", default-features = false, features = ["http"] } -log = "0.4.8" +futures = "0.3.16" +jsonrpc-core-client = { version = "18.0.0", default-features = false, features = [ + "http", +] } node-primitives = { version = "2.0.0", path = "../primitives" } -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } +sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } +sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } diff --git a/bin/node/rpc-client/src/main.rs b/bin/node/rpc-client/src/main.rs index 31f1efa28ccd0..6d0b88799f54c 100644 --- a/bin/node/rpc-client/src/main.rs +++ b/bin/node/rpc-client/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,32 +22,21 @@ //! This module shows how you can write a Rust RPC client that connects to a running //! substrate node and use statically typed RPC wrappers. -use futures::Future; -use hyper::rt; +use futures::{Future, TryFutureExt}; +use jsonrpc_core_client::{transports::http, RpcError}; use node_primitives::Hash; -use sc_rpc::author::{ - AuthorClient, - hash::ExtrinsicOrHash, -}; -use jsonrpc_core_client::{ - transports::http, - RpcError, -}; +use sc_rpc::author::{hash::ExtrinsicOrHash, AuthorClient}; -fn main() { +fn main() -> Result<(), RpcError> { sp_tracing::try_init_simple(); - rt::run(rt::lazy(|| { + futures::executor::block_on(async { let uri = "http://localhost:9933"; http::connect(uri) - .and_then(|client: AuthorClient| { - remove_all_extrinsics(client) - }) - .map_err(|e| { - println!("Error: {:?}", e); - }) - })) + .and_then(|client: AuthorClient| remove_all_extrinsics(client)) + .await + }) } /// Remove all pending extrinsics from the node. @@ -58,14 +47,17 @@ fn main() { /// /// As the result of running the code the entire content of the transaction pool is going /// to be removed and the extrinsics are going to be temporarily banned. -fn remove_all_extrinsics(client: AuthorClient) -> impl Future { - client.pending_extrinsics() +fn remove_all_extrinsics( + client: AuthorClient, +) -> impl Future> { + client + .pending_extrinsics() .and_then(move |pending| { client.remove_extrinsic( - pending.into_iter().map(|tx| ExtrinsicOrHash::Extrinsic(tx.into())).collect() + pending.into_iter().map(|tx| ExtrinsicOrHash::Extrinsic(tx.into())).collect(), ) }) - .map(|removed| { + .map_ok(|removed| { println!("Removed extrinsics: {:?}", removed); }) } diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index aef4a82db776a..0cb606f79f086 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-rpc" -version = "2.0.0" +version = "3.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -11,28 +11,27 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpc-core = "15.0.0" +jsonrpc-core = "18.0.0" node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "2.0.0", path = "../runtime" } -pallet-contracts-rpc = { version = "0.8.0", path = "../../../frame/contracts/rpc/" } -pallet-transaction-payment-rpc = { version = "2.0.0", path = "../../../frame/transaction-payment/rpc/" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-consensus-babe = { version = "0.8.0", path = "../../../client/consensus/babe" } -sc-consensus-babe-rpc = { version = "0.8.0", path = "../../../client/consensus/babe/rpc" } -sc-consensus-epochs = { version = "0.8.0", path = "../../../client/consensus/epochs" } -sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } -sc-finality-grandpa = { version = "0.8.0", path = "../../../client/finality-grandpa" } -sc-finality-grandpa-rpc = { version = "0.8.0", path = "../../../client/finality-grandpa/rpc" } -sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } -sc-rpc-api = { version = "0.8.0", path = "../../../client/rpc-api" } -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } -sc-sync-state-rpc = { version = "0.8.0", path = "../../../client/sync-state-rpc" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -substrate-frame-rpc-system = { version = "2.0.0", path = "../../../utils/frame/rpc/system" } +pallet-contracts-rpc = { version = "4.0.0-dev", path = "../../../frame/contracts/rpc/" } +pallet-mmr-rpc = { version = "3.0.0", path = "../../../frame/merkle-mountain-range/rpc/" } +pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/rpc/" } +sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } +sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } +sc-consensus-babe-rpc = { version = "0.10.0-dev", path = "../../../client/consensus/babe/rpc" } +sc-consensus-epochs = { version = "0.10.0-dev", path = "../../../client/consensus/epochs" } +sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } +sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality-grandpa" } +sc-finality-grandpa-rpc = { version = "0.10.0-dev", path = "../../../client/finality-grandpa/rpc" } +sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } +sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } +sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state-rpc" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } +substrate-frame-rpc-system = { version = "4.0.0-dev", path = "../../../utils/frame/rpc/system" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 1ced3d60ab362..2f7862d3d2644 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,24 +32,24 @@ use std::sync::Arc; -use sp_keystore::SyncCryptoStorePtr; -use node_primitives::{Block, BlockNumber, AccountId, Index, Balance, Hash}; +use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Index}; +use sc_client_api::AuxStore; use sc_consensus_babe::{Config, Epoch}; use sc_consensus_babe_rpc::BabeRpcHandler; use sc_consensus_epochs::SharedEpochChanges; use sc_finality_grandpa::{ - SharedVoterState, SharedAuthoritySet, FinalityProofProvider, GrandpaJustificationStream + FinalityProofProvider, GrandpaJustificationStream, SharedAuthoritySet, SharedVoterState, }; use sc_finality_grandpa_rpc::GrandpaRpcHandler; +use sc_rpc::SubscriptionTaskExecutor; pub use sc_rpc_api::DenyUnsafe; +use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; -use sp_blockchain::{Error as BlockChainError, HeaderMetadata, HeaderBackend}; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; -use sc_rpc::SubscriptionTaskExecutor; -use sp_transaction_pool::TransactionPool; -use sc_client_api::AuxStore; +use sp_keystore::SyncCryptoStorePtr; /// Light client extra dependencies. pub struct LightDeps { @@ -111,39 +111,35 @@ pub type IoHandler = jsonrpc_core::IoHandler; /// Instantiate all Full RPC extensions. pub fn create_full( deps: FullDeps, -) -> jsonrpc_core::IoHandler where - C: ProvideRuntimeApi + HeaderBackend + AuxStore + - HeaderMetadata + Sync + Send + 'static, +) -> Result, Box> +where + C: ProvideRuntimeApi + + HeaderBackend + + AuxStore + + HeaderMetadata + + Sync + + Send + + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_contracts_rpc::ContractsRuntimeApi, + C::Api: pallet_contracts_rpc::ContractsRuntimeApi, + C::Api: pallet_mmr_rpc::MmrRuntimeApi::Hash>, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, C::Api: BlockBuilder, P: TransactionPool + 'static, - SC: SelectChain +'static, + SC: SelectChain + 'static, B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, { - use substrate_frame_rpc_system::{FullSystem, SystemApi}; use pallet_contracts_rpc::{Contracts, ContractsApi}; + use pallet_mmr_rpc::{Mmr, MmrApi}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; + use substrate_frame_rpc_system::{FullSystem, SystemApi}; let mut io = jsonrpc_core::IoHandler::default(); - let FullDeps { - client, - pool, - select_chain, - chain_spec, - deny_unsafe, - babe, - grandpa, - } = deps; + let FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa } = deps; - let BabeDeps { - keystore, - babe_config, - shared_epoch_changes, - } = babe; + let BabeDeps { keystore, babe_config, shared_epoch_changes } = babe; let GrandpaDeps { shared_voter_state, shared_authority_set, @@ -152,61 +148,45 @@ pub fn create_full( finality_provider, } = grandpa; - io.extend_with( - SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)) - ); + io.extend_with(SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe))); // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 // These RPCs should use an asynchronous caller instead. - io.extend_with( - ContractsApi::to_delegate(Contracts::new(client.clone())) - ); - io.extend_with( - TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())) - ); - io.extend_with( - sc_consensus_babe_rpc::BabeApi::to_delegate( - BabeRpcHandler::new( - client.clone(), - shared_epoch_changes.clone(), - keystore, - babe_config, - select_chain, - deny_unsafe, - ), - ) - ); - io.extend_with( - sc_finality_grandpa_rpc::GrandpaApi::to_delegate( - GrandpaRpcHandler::new( - shared_authority_set.clone(), - shared_voter_state, - justification_stream, - subscription_executor, - finality_provider, - ) - ) - ); - - io.extend_with( - sc_sync_state_rpc::SyncStateRpcApi::to_delegate( - sc_sync_state_rpc::SyncStateRpcHandler::new( - chain_spec, - client, - shared_authority_set, - shared_epoch_changes, - deny_unsafe, - ) - ) - ); - - io + io.extend_with(ContractsApi::to_delegate(Contracts::new(client.clone()))); + io.extend_with(MmrApi::to_delegate(Mmr::new(client.clone()))); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone()))); + io.extend_with(sc_consensus_babe_rpc::BabeApi::to_delegate(BabeRpcHandler::new( + client.clone(), + shared_epoch_changes.clone(), + keystore, + babe_config, + select_chain, + deny_unsafe, + ))); + io.extend_with(sc_finality_grandpa_rpc::GrandpaApi::to_delegate(GrandpaRpcHandler::new( + shared_authority_set.clone(), + shared_voter_state, + justification_stream, + subscription_executor, + finality_provider, + ))); + + io.extend_with(sc_sync_state_rpc::SyncStateRpcApi::to_delegate( + sc_sync_state_rpc::SyncStateRpcHandler::new( + chain_spec, + client, + shared_authority_set, + shared_epoch_changes, + deny_unsafe, + )?, + )); + + Ok(io) } /// Instantiate all Light RPC extensions. -pub fn create_light( - deps: LightDeps, -) -> jsonrpc_core::IoHandler where +pub fn create_light(deps: LightDeps) -> jsonrpc_core::IoHandler +where C: sp_blockchain::HeaderBackend, C: Send + Sync + 'static, F: sc_client_api::light::Fetcher + 'static, @@ -215,16 +195,14 @@ pub fn create_light( { use substrate_frame_rpc_system::{LightSystem, SystemApi}; - let LightDeps { + let LightDeps { client, pool, remote_blockchain, fetcher } = deps; + let mut io = jsonrpc_core::IoHandler::default(); + io.extend_with(SystemApi::::to_delegate(LightSystem::new( client, - pool, remote_blockchain, - fetcher - } = deps; - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with( - SystemApi::::to_delegate(LightSystem::new(client, remote_blockchain, fetcher, pool)) - ); + fetcher, + pool, + ))); io } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 80c914ff57580..dafd9db8bab96 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-runtime" -version = "2.0.0" +version = "3.0.0-dev" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -14,89 +14,107 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -integer-sqrt = { version = "0.1.2" } -serde = { version = "1.0.102", optional = true } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = [ + "derive", + "max-encoded-len", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } static_assertions = "1.1.0" hex-literal = { version = "0.3.1", optional = true } +log = { version = "0.4.14", default-features = false } # primitives -sp-authority-discovery = { version = "2.0.0", default-features = false, path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0", default-features = false, path = "../../../primitives/consensus/babe" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0"} -sp-inherents = { version = "2.0.0", default-features = false, path = "../../../primitives/inherents" } +sp-authority-discovery = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/consensus/babe" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "4.0.0-dev" } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents" } node-primitives = { version = "2.0.0", default-features = false, path = "../primitives" } -sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../../primitives/staking" } -sp-keyring = { version = "2.0.0", optional = true, path = "../../../primitives/keyring" } -sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } +sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } +sp-keyring = { version = "4.0.0-dev", optional = true, path = "../../../primitives/keyring" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } +sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/version" } +sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/npos-elections" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" } # frame dependencies -frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } -frame-system-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } -frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-authority-discovery = { version = "2.0.0", default-features = false, path = "../../../frame/authority-discovery" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../../../frame/authorship" } -pallet-babe = { version = "2.0.0", default-features = false, path = "../../../frame/babe" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../../../frame/balances" } -pallet-collective = { version = "2.0.0", default-features = false, path = "../../../frame/collective" } -pallet-contracts = { version = "2.0.0", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } -pallet-democracy = { version = "2.0.0", default-features = false, path = "../../../frame/democracy" } -pallet-elections-phragmen = { version = "2.0.0", default-features = false, path = "../../../frame/elections-phragmen" } -pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0", default-features = false, path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0", default-features = false, path = "../../../frame/indices" } -pallet-identity = { version = "2.0.0", default-features = false, path = "../../../frame/identity" } -pallet-membership = { version = "2.0.0", default-features = false, path = "../../../frame/membership" } -pallet-multisig = { version = "2.0.0", default-features = false, path = "../../../frame/multisig" } -pallet-offences = { version = "2.0.0", default-features = false, path = "../../../frame/offences" } -pallet-offences-benchmarking = { version = "2.0.0", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } -pallet-proxy = { version = "2.0.0", default-features = false, path = "../../../frame/proxy" } -pallet-randomness-collective-flip = { version = "2.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-recovery = { version = "2.0.0", default-features = false, path = "../../../frame/recovery" } -pallet-session = { version = "2.0.0", features = ["historical"], path = "../../../frame/session", default-features = false } -pallet-session-benchmarking = { version = "2.0.0", path = "../../../frame/session/benchmarking", default-features = false, optional = true } -pallet-staking = { version = "2.0.0", default-features = false, path = "../../../frame/staking" } -pallet-staking-reward-curve = { version = "2.0.0", default-features = false, path = "../../../frame/staking/reward-curve" } -pallet-scheduler = { version = "2.0.0", default-features = false, path = "../../../frame/scheduler" } -pallet-society = { version = "2.0.0", default-features = false, path = "../../../frame/society" } -pallet-sudo = { version = "2.0.0", default-features = false, path = "../../../frame/sudo" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } -pallet-treasury = { version = "2.0.0", default-features = false, path = "../../../frame/treasury" } -pallet-utility = { version = "2.0.0", default-features = false, path = "../../../frame/utility" } -pallet-transaction-payment = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-vesting = { version = "2.0.0", default-features = false, path = "../../../frame/vesting" } +frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } +frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-support" } +frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } +pallet-assets = { version = "4.0.0-dev", default-features = false, path = "../../../frame/assets" } +pallet-authority-discovery = { version = "4.0.0-dev", default-features = false, path = "../../../frame/authority-discovery" } +pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../../../frame/authorship" } +pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../../frame/babe" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } +pallet-bounties = { version = "4.0.0-dev", default-features = false, path = "../../../frame/bounties" } +pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../../../frame/collective" } +pallet-contracts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts" } +pallet-contracts-primitives = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/common/" } +pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-democracy = { version = "4.0.0-dev", default-features = false, path = "../../../frame/democracy" } +pallet-election-provider-multi-phase = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-multi-phase" } +pallet-elections-phragmen = { version = "5.0.0-dev", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-gilt = { version = "4.0.0-dev", default-features = false, path = "../../../frame/gilt" } +pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } +pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } +pallet-indices = { version = "4.0.0-dev", default-features = false, path = "../../../frame/indices" } +pallet-identity = { version = "4.0.0-dev", default-features = false, path = "../../../frame/identity" } +pallet-lottery = { version = "4.0.0-dev", default-features = false, path = "../../../frame/lottery" } +pallet-membership = { version = "4.0.0-dev", default-features = false, path = "../../../frame/membership" } +pallet-mmr = { version = "4.0.0-dev", default-features = false, path = "../../../frame/merkle-mountain-range" } +pallet-multisig = { version = "4.0.0-dev", default-features = false, path = "../../../frame/multisig" } +pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../../frame/offences" } +pallet-offences-benchmarking = { version = "4.0.0-dev", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } +pallet-proxy = { version = "4.0.0-dev", default-features = false, path = "../../../frame/proxy" } +pallet-randomness-collective-flip = { version = "4.0.0-dev", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-recovery = { version = "4.0.0-dev", default-features = false, path = "../../../frame/recovery" } +pallet-session = { version = "4.0.0-dev", features = [ + "historical", +], path = "../../../frame/session", default-features = false } +pallet-session-benchmarking = { version = "4.0.0-dev", path = "../../../frame/session/benchmarking", default-features = false, optional = true } +pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking" } +pallet-staking-reward-curve = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking/reward-curve" } +pallet-scheduler = { version = "4.0.0-dev", default-features = false, path = "../../../frame/scheduler" } +pallet-society = { version = "4.0.0-dev", default-features = false, path = "../../../frame/society" } +pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../frame/timestamp" } +pallet-tips = { version = "4.0.0-dev", default-features = false, path = "../../../frame/tips" } +pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../../../frame/treasury" } +pallet-utility = { version = "4.0.0-dev", default-features = false, path = "../../../frame/utility" } +pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } +pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +pallet-transaction-storage = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-storage" } +pallet-uniques = { version = "4.0.0-dev", default-features = false, path = "../../../frame/uniques" } +pallet-vesting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/vesting" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } - -[dev-dependencies] -sp-io = { version = "2.0.0", path = "../../../primitives/io" } +substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } [features] default = ["std"] -with-tracing = [ "frame-executive/with-tracing" ] +with-tracing = ["frame-executive/with-tracing"] std = [ "sp-authority-discovery/std", + "pallet-assets/std", "pallet-authority-discovery/std", "pallet-authorship/std", "sp-consensus-babe/std", "pallet-babe/std", "pallet-balances/std", + "pallet-bounties/std", "sp-block-builder/std", "codec/std", + "scale-info/std", "pallet-collective/std", "pallet-contracts/std", "pallet-contracts-primitives/std", @@ -104,11 +122,14 @@ std = [ "pallet-democracy/std", "pallet-elections-phragmen/std", "frame-executive/std", + "pallet-gilt/std", "pallet-grandpa/std", "pallet-im-online/std", "pallet-indices/std", "sp-inherents/std", + "pallet-lottery/std", "pallet-membership/std", + "pallet-mmr/std", "pallet-multisig/std", "pallet-identity/std", "pallet-scheduler/std", @@ -119,7 +140,6 @@ std = [ "sp-core/std", "pallet-randomness-collective-flip/std", "sp-std/std", - "serde", "pallet-session/std", "sp-api/std", "sp-runtime/std", @@ -132,43 +152,105 @@ std = [ "frame-benchmarking/std", "frame-system-rpc-runtime-api/std", "frame-system/std", + "pallet-election-provider-multi-phase/std", "pallet-timestamp/std", + "pallet-tips/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", + "pallet-transaction-storage/std", "pallet-treasury/std", "sp-transaction-pool/std", "pallet-utility/std", "sp-version/std", "pallet-society/std", "pallet-recovery/std", + "pallet-uniques/std", "pallet-vesting/std", + "log/std", + "frame-try-runtime/std", + "sp-npos-elections/std", + "sp-io/std" ] runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-election-provider-multi-phase/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-bounties/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-contracts/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", "pallet-elections-phragmen/runtime-benchmarks", + "pallet-gilt/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", + "pallet-lottery/runtime-benchmarks", + "pallet-membership/runtime-benchmarks", + "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-tips/runtime-benchmarks", + "pallet-transaction-storage/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", + "pallet-uniques/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-offences-benchmarking", "pallet-session-benchmarking", "frame-system-benchmarking", "hex-literal", ] +try-runtime = [ + "frame-executive/try-runtime", + "frame-try-runtime", + "frame-system/try-runtime", + "pallet-assets/try-runtime", + "pallet-authority-discovery/try-runtime", + "pallet-authorship/try-runtime", + "pallet-babe/try-runtime", + "pallet-balances/try-runtime", + "pallet-bounties/try-runtime", + "pallet-collective/try-runtime", + "pallet-contracts/try-runtime", + "pallet-democracy/try-runtime", + "pallet-elections-phragmen/try-runtime", + "pallet-grandpa/try-runtime", + "pallet-im-online/try-runtime", + "pallet-indices/try-runtime", + "pallet-lottery/try-runtime", + "pallet-membership/try-runtime", + "pallet-mmr/try-runtime", + "pallet-multisig/try-runtime", + "pallet-identity/try-runtime", + "pallet-scheduler/try-runtime", + "pallet-offences/try-runtime", + "pallet-proxy/try-runtime", + "pallet-randomness-collective-flip/try-runtime", + "pallet-session/try-runtime", + "pallet-staking/try-runtime", + "pallet-sudo/try-runtime", + "pallet-election-provider-multi-phase/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-tips/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-treasury/try-runtime", + "pallet-utility/try-runtime", + "pallet-society/try-runtime", + "pallet-recovery/try-runtime", + "pallet-uniques/try-runtime", + "pallet-vesting/try-runtime", + "pallet-gilt/try-runtime", +] +# Make contract callable functions marked as __unstable__ available. Do not enable +# on live chains as those are subject to change. +contracts-unstable-interface = ["pallet-contracts/unstable-interface"] diff --git a/bin/node/runtime/build.rs b/bin/node/runtime/build.rs index 4f111bc993007..a1c4b2d892cfe 100644 --- a/bin/node/runtime/build.rs +++ b/bin/node/runtime/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build() diff --git a/bin/node/runtime/src/constants.rs b/bin/node/runtime/src/constants.rs index 8e87d61c1e6b5..7533025a70b00 100644 --- a/bin/node/runtime/src/constants.rs +++ b/bin/node/runtime/src/constants.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ pub mod currency { use node_primitives::Balance; pub const MILLICENTS: Balance = 1_000_000_000; - pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent. + pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent. pub const DOLLARS: Balance = 100 * CENTS; pub const fn deposit(items: u32, bytes: u32) -> Balance { @@ -32,10 +32,10 @@ pub mod currency { /// Time. pub mod time { - use node_primitives::{Moment, BlockNumber}; + use node_primitives::{BlockNumber, Moment}; /// Since BABE is probabilistic this is the average expected block time that - /// we are targetting. Blocks will be produced at a minimum duration defined + /// we are targeting. Blocks will be produced at a minimum duration defined /// by `SLOT_DURATION`, but some slots will not be allocated to any /// authority and hence no block will be produced. We expect to have this /// block time on average following the defined slot duration and the value @@ -50,15 +50,19 @@ pub mod time { /// always be assigned, in which case `MILLISECS_PER_BLOCK` and /// `SLOT_DURATION` should have the same value. /// - /// + /// pub const MILLISECS_PER_BLOCK: Moment = 3000; pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; + // NOTE: Currently it is not possible to change the slot duration after the chain has started. + // Attempting to do so will brick block production. pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; // 1 in 4 blocks (on average, not counting collisions) will be primary BABE blocks. pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); + // NOTE: Currently it is not possible to change the epoch duration after the chain has started. + // Attempting to do so will brick block production. pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10 * MINUTES; pub const EPOCH_DURATION_IN_SLOTS: u64 = { const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64; diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 16666997b3a55..e315a45e698ce 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ //! Some configurable implementations as associated type for the substrate runtime. -use frame_support::traits::{OnUnbalanced, Currency}; -use crate::{Balances, Authorship, NegativeImbalance}; +use crate::{Authorship, Balances, NegativeImbalance}; +use frame_support::traits::{Currency, OnUnbalanced}; pub struct Author; impl OnUnbalanced for Author { @@ -29,18 +29,25 @@ impl OnUnbalanced for Author { #[cfg(test)] mod multiplier_tests { - use sp_runtime::{assert_eq_error_rate, FixedPointNumber, traits::Convert}; use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; + use sp_runtime::{ + assert_eq_error_rate, + traits::{Convert, One, Zero}, + FixedPointNumber, + }; use crate::{ constants::{currency::*, time::*}, - TransactionPayment, MaximumBlockWeight, AvailableBlockRatio, Runtime, TargetBlockFullness, - AdjustmentVariable, System, MinimumMultiplier, + AdjustmentVariable, MinimumMultiplier, Runtime, RuntimeBlockWeights as BlockWeights, + System, TargetBlockFullness, TransactionPayment, }; - use frame_support::weights::{Weight, WeightToFeePolynomial}; + use frame_support::weights::{DispatchClass, Weight, WeightToFeePolynomial}; - fn max() -> Weight { - AvailableBlockRatio::get() * MaximumBlockWeight::get() + fn max_normal() -> Weight { + BlockWeights::get() + .get(DispatchClass::Normal) + .max_total + .unwrap_or_else(|| BlockWeights::get().max_block) } fn min_multiplier() -> Multiplier { @@ -48,7 +55,7 @@ mod multiplier_tests { } fn target() -> Weight { - TargetBlockFullness::get() * max() + TargetBlockFullness::get() * max_normal() } // update based on runtime impl. @@ -62,34 +69,39 @@ mod multiplier_tests { } // update based on reference impl. - fn truth_value_update(block_weight: Weight, previous: Multiplier) -> Multiplier { + fn truth_value_update(block_weight: Weight, previous: Multiplier) -> Multiplier { let accuracy = Multiplier::accuracy() as f64; let previous_float = previous.into_inner() as f64 / accuracy; // bump if it is zero. let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy); // maximum tx weight - let m = max() as f64; + let m = max_normal() as f64; // block weight always truncated to max weight let block_weight = (block_weight as f64).min(m); - let v: f64 = AdjustmentVariable::get().to_fraction(); + let v: f64 = AdjustmentVariable::get().to_float(); // Ideal saturation in terms of weight let ss = target() as f64; // Current saturation in terms of weight let s = block_weight; - let t1 = v * (s/m - ss/m); - let t2 = v.powi(2) * (s/m - ss/m).powi(2) / 2.0; + let t1 = v * (s / m - ss / m); + let t2 = v.powi(2) * (s / m - ss / m).powi(2) / 2.0; let next_float = previous_float * (1.0 + t1 + t2); - Multiplier::from_fraction(next_float) + Multiplier::from_float(next_float) } - fn run_with_system_weight(w: Weight, assertions: F) where F: Fn() -> () { - let mut t: sp_io::TestExternalities = - frame_system::GenesisConfig::default().build_storage::().unwrap().into(); + fn run_with_system_weight(w: Weight, assertions: F) + where + F: Fn() -> (), + { + let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into(); t.execute_with(|| { - System::set_block_limits(w, 0); + System::set_block_consumed_resources(w, 0); assertions() }); } @@ -102,8 +114,8 @@ mod multiplier_tests { (100, fm.clone()), (1000, fm.clone()), (target(), fm.clone()), - (max() / 2, fm.clone()), - (max(), fm.clone()), + (max_normal() / 2, fm.clone()), + (max_normal(), fm.clone()), ]; test_set.into_iter().for_each(|(w, fm)| { run_with_system_weight(w, || { @@ -155,7 +167,9 @@ mod multiplier_tests { loop { let next = runtime_multiplier_update(fm); fm = next; - if fm == min_multiplier() { break; } + if fm == min_multiplier() { + break + } iterations += 1; } assert!(iterations > 533_333); @@ -164,7 +178,7 @@ mod multiplier_tests { #[test] fn min_change_per_day() { - run_with_system_weight(max(), || { + run_with_system_weight(max_normal(), || { let mut fm = Multiplier::one(); // See the example in the doc of `TargetedFeeAdjustment`. are at least 0.234, hence // `fm > 1.234`. @@ -182,7 +196,7 @@ mod multiplier_tests { // `cargo test congested_chain_simulation -- --nocapture` to get some insight. // almost full. The entire quota of normal transactions is taken. - let block_weight = AvailableBlockRatio::get() * max() - 100; + let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - 100; // Default substrate weight. let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get(); @@ -196,11 +210,13 @@ mod multiplier_tests { loop { let next = runtime_multiplier_update(fm); // if no change, panic. This should never happen in this case. - if fm == next { panic!("The fee should ever increase"); } + if fm == next { + panic!("The fee should ever increase"); + } fm = next; iterations += 1; let fee = - ::WeightToFee::calc(&tx_weight); + ::WeightToFee::calc(&tx_weight); let adjusted_fee = fm.saturating_mul_acc_int(fee); println!( "iteration {}, new fm = {:?}. Fee at this point is: {} units / {} millicents, \ @@ -223,7 +239,7 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); assert_eq_error_rate!( next, - truth_value_update(target() / 4 , fm), + truth_value_update(target() / 4, fm), Multiplier::from_inner(100), ); @@ -235,12 +251,11 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); assert_eq_error_rate!( next, - truth_value_update(target() / 2 , fm), + truth_value_update(target() / 2, fm), Multiplier::from_inner(100), ); // Light block. Multiplier is reduced a little. assert!(next < fm); - }); run_with_system_weight(target(), || { let next = runtime_multiplier_update(fm); @@ -257,7 +272,7 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); assert_eq_error_rate!( next, - truth_value_update(target() * 2 , fm), + truth_value_update(target() * 2, fm), Multiplier::from_inner(100), ); @@ -305,7 +320,7 @@ mod multiplier_tests { fn weight_to_fee_should_not_overflow_on_large_weights() { let kb = 1024 as Weight; let mb = kb * kb; - let max_fm = Multiplier::saturating_from_integer(i128::max_value()); + let max_fm = Multiplier::saturating_from_integer(i128::MAX); // check that for all values it can compute, correctly. vec![ @@ -320,11 +335,13 @@ mod multiplier_tests { 10 * mb, 2147483647, 4294967295, - MaximumBlockWeight::get() / 2, - MaximumBlockWeight::get(), + BlockWeights::get().max_block / 2, + BlockWeights::get().max_block, Weight::max_value() / 2, Weight::max_value(), - ].into_iter().for_each(|i| { + ] + .into_iter() + .for_each(|i| { run_with_system_weight(i, || { let next = runtime_multiplier_update(Multiplier::one()); let truth = truth_value_update(i, Multiplier::one()); @@ -334,14 +351,12 @@ mod multiplier_tests { // Some values that are all above the target and will cause an increase. let t = target(); - vec![t + 100, t * 2, t * 4] - .into_iter() - .for_each(|i| { - run_with_system_weight(i, || { - let fm = runtime_multiplier_update(max_fm); - // won't grow. The convert saturates everything. - assert_eq!(fm, max_fm); - }) - }); + vec![t + 100, t * 2, t * 4].into_iter().for_each(|i| { + run_with_system_weight(i, || { + let fm = runtime_multiplier_update(max_fm); + // won't grow. The convert saturates everything. + assert_eq!(fm, max_fm); + }) + }); } } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index dfa7a4680abe8..7c6475bd18d6a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,68 +16,72 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! The Substrate runtime. This can be compiled with ``#[no_std]`, ready for Wasm. +//! The Substrate runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] - -use sp_std::prelude::*; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - construct_runtime, parameter_types, debug, RuntimeDebug, + construct_runtime, parameter_types, + traits::{ + Currency, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, + Nothing, OnUnbalanced, U128CurrencyToVote, + }, weights::{ - Weight, IdentityFee, constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + DispatchClass, IdentityFee, Weight, }, - traits::{ - Currency, Imbalance, KeyOwnerProofSystem, OnUnbalanced, Randomness, LockIdentifier, - U128CurrencyToVote, - }, + PalletId, RuntimeDebug, }; -use frame_system::{EnsureRoot, EnsureOneOf}; -use frame_support::traits::InstanceFilter; -use codec::{Encode, Decode}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureOneOf, EnsureRoot, +}; +pub use node_primitives::{AccountId, Signature}; +use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; +use pallet_contracts::weights::WeightInfo; +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, +}; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use pallet_session::historical as pallet_session_historical; +pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; +use sp_api::impl_runtime_apis; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_core::{ crypto::KeyTypeId, u32_trait::{_1, _2, _3, _4, _5}, OpaqueMetadata, }; -pub use node_primitives::{AccountId, Signature}; -use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; -use sp_api::impl_runtime_apis; +use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{ - Permill, Perbill, Perquintill, Percent, ApplyExtrinsicResult, - impl_opaque_keys, generic, create_runtime_str, ModuleId, FixedPointNumber, -}; -use sp_runtime::curve::PiecewiseLinear; -use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}; -use sp_runtime::traits::{ - self, BlakeTwo256, Block as BlockT, StaticLookup, SaturatedConversion, - ConvertInto, OpaqueKeys, NumberFor, Saturating, + create_runtime_str, + curve::PiecewiseLinear, + generic, impl_opaque_keys, + traits::{ + self, BlakeTwo256, Block as BlockT, ConvertInto, NumberFor, OpaqueKeys, + SaturatedConversion, StaticLookup, + }, + transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, FixedPointNumber, Perbill, Percent, Permill, Perquintill, }; -use sp_version::RuntimeVersion; +use sp_std::prelude::*; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; -use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; -use pallet_grandpa::fg_primitives; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; -pub use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; -use pallet_contracts_rpc_runtime_api::ContractExecResult; -use pallet_session::{historical as pallet_session_historical}; -use sp_inherents::{InherentData, CheckInherentsResult}; +use sp_version::RuntimeVersion; use static_assertions::const_assert; #[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; +pub use frame_system::Call as SystemCall; #[cfg(any(feature = "std", test))] pub use pallet_balances::Call as BalancesCall; #[cfg(any(feature = "std", test))] -pub use frame_system::Call as SystemCall; -#[cfg(any(feature = "std", test))] pub use pallet_staking::StakerStatus; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; /// Implementations of some helper traits passed into runtime modules as associated types. pub mod impls; @@ -85,25 +89,25 @@ use impls::Author; /// Constant values used within the runtime. pub mod constants; -use constants::{time::*, currency::*}; +use constants::{currency::*, time::*}; use sp_runtime::generic::Era; -/// Weights for pallets used in the runtime. -mod weights; - // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. This means the client is \ - built with `BUILD_DUMMY_WASM_BINARY` flag and it is only usable for \ - production chains. Please rebuild with the flag disabled.") + WASM_BINARY.expect( + "Development wasm binary is not available. This means the client is built with \ + `SKIP_WASM_BUILD` flag and it is only usable for production chains. Please rebuild with \ + the flag disabled.", + ) } /// Runtime version. +#[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node"), impl_name: create_runtime_str!("substrate-node"), @@ -112,26 +116,30 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 260, - impl_version: 0, + spec_version: 267, + impl_version: 1, apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + transaction_version: 2, }; +/// The BABE epoch configuration at genesis. +pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = + sp_consensus_babe::BabeEpochConfiguration { + c: PRIMARY_PROBABILITY, + allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots, + }; + /// Native version. #[cfg(any(feature = "std", test))] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } type NegativeImbalance = >::NegativeImbalance; pub struct DealWithFees; impl OnUnbalanced for DealWithFees { - fn on_unbalanceds(mut fees_then_tips: impl Iterator) { + fn on_unbalanceds(mut fees_then_tips: impl Iterator) { if let Some(fees) = fees_then_tips.next() { // for fees, 80% to treasury, 20% to author let mut split = fees.ration(80, 20); @@ -145,23 +153,48 @@ impl OnUnbalanced for DealWithFees { } } -const AVERAGE_ON_INITIALIZE_WEIGHT: Perbill = Perbill::from_percent(10); +/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. +/// This is used to limit the maximal weight of a single extrinsic. +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); +/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for 2 seconds of compute with a 6 second average block time. +const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; + parameter_types! { pub const BlockHashCount: BlockNumber = 2400; - /// We allow for 2 seconds of compute with a 6 second average block time. - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Assume 10% of weight for average on_initialize calls. - pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get().saturating_sub(AVERAGE_ON_INITIALIZE_WEIGHT) - * MaximumBlockWeight::get(); - pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; pub const Version: RuntimeVersion = VERSION; -} - -const_assert!(AvailableBlockRatio::get().deconstruct() >= AVERAGE_ON_INITIALIZE_WEIGHT.deconstruct()); - -impl frame_system::Trait for Runtime { - type BaseCallFilter = (); + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u16 = 42; +} + +const_assert!(NORMAL_DISPATCH_RATIO.deconstruct() >= AVERAGE_ON_INITIALIZE_RATIO.deconstruct()); + +impl frame_system::Config for Runtime { + type BaseCallFilter = Everything; + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type DbWeight = RocksDbWeight; type Origin = Origin; type Call = Call; type Index = Index; @@ -173,25 +206,22 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); - type SystemWeightInfo = weights::frame_system::WeightInfo; + type SystemWeightInfo = frame_system::weights::SubstrateWeight; + type SS58Prefix = SS58Prefix; + type OnSetCode = (); } -impl pallet_utility::Trait for Runtime { +impl pallet_randomness_collective_flip::Config for Runtime {} + +impl pallet_utility::Config for Runtime { type Event = Event; type Call = Call; - type WeightInfo = weights::pallet_utility::WeightInfo; + type WeightInfo = pallet_utility::weights::SubstrateWeight; } parameter_types! { @@ -202,14 +232,14 @@ parameter_types! { pub const MaxSignatories: u16 = 100; } -impl pallet_multisig::Trait for Runtime { +impl pallet_multisig::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; - type WeightInfo = weights::pallet_multisig::WeightInfo; + type WeightInfo = pallet_multisig::weights::SubstrateWeight; } parameter_types! { @@ -224,14 +254,30 @@ parameter_types! { } /// The type used to represent the kinds of proxying allowed. -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug)] +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] pub enum ProxyType { Any, NonTransfer, Governance, Staking, } -impl Default for ProxyType { fn default() -> Self { Self::Any } } +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} impl InstanceFilter for ProxyType { fn filter(&self, c: &Call) -> bool { match self { @@ -239,17 +285,16 @@ impl InstanceFilter for ProxyType { ProxyType::NonTransfer => !matches!( c, Call::Balances(..) | - Call::Vesting(pallet_vesting::Call::vested_transfer(..)) | - Call::Indices(pallet_indices::Call::transfer(..)) + Call::Assets(..) | Call::Uniques(..) | + Call::Vesting(pallet_vesting::Call::vested_transfer { .. }) | + Call::Indices(pallet_indices::Call::transfer { .. }) ), ProxyType::Governance => matches!( c, Call::Democracy(..) | - Call::Council(..) | - Call::Society(..) | - Call::TechnicalCommittee(..) | - Call::Elections(..) | - Call::Treasury(..) + Call::Council(..) | Call::Society(..) | + Call::TechnicalCommittee(..) | + Call::Elections(..) | Call::Treasury(..) ), ProxyType::Staking => matches!(c, Call::Staking(..)), } @@ -265,7 +310,7 @@ impl InstanceFilter for ProxyType { } } -impl pallet_proxy::Trait for Runtime { +impl pallet_proxy::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -273,7 +318,7 @@ impl pallet_proxy::Trait for Runtime { type ProxyDepositBase = ProxyDepositBase; type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; - type WeightInfo = weights::pallet_proxy::WeightInfo; + type WeightInfo = pallet_proxy::weights::SubstrateWeight; type MaxPending = MaxPending; type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; @@ -281,11 +326,12 @@ impl pallet_proxy::Trait for Runtime { } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + RuntimeBlockWeights::get().max_block; pub const MaxScheduledPerBlock: u32 = 50; } -impl pallet_scheduler::Trait for Runtime { +impl pallet_scheduler::Config for Runtime { type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; @@ -293,18 +339,23 @@ impl pallet_scheduler::Trait for Runtime { type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = MaxScheduledPerBlock; - type WeightInfo = weights::pallet_scheduler::WeightInfo; + type WeightInfo = pallet_scheduler::weights::SubstrateWeight; } parameter_types! { + // NOTE: Currently it is not possible to change the epoch duration after the chain has started. + // Attempting to do so will brick block production. pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; + pub const ReportLongevity: u64 = + BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * EpochDuration::get(); } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; type EpochChangeTrigger = pallet_babe::ExternalTrigger; + type DisabledValidators = Session; type KeyOwnerProofSystem = Historical; @@ -319,7 +370,7 @@ impl pallet_babe::Trait for Runtime { )>>::IdentificationTuple; type HandleEquivocation = - pallet_babe::EquivocationHandler; + pallet_babe::EquivocationHandler; type WeightInfo = (); } @@ -328,12 +379,12 @@ parameter_types! { pub const IndexDeposit: Balance = 1 * DOLLARS; } -impl pallet_indices::Trait for Runtime { +impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; type Event = Event; - type WeightInfo = weights::pallet_indices::WeightInfo; + type WeightInfo = pallet_indices::weights::SubstrateWeight; } parameter_types! { @@ -341,16 +392,19 @@ parameter_types! { // For weight estimation, we assume that the most locks on an individual account will be 50. // This number may need to be adjusted in the future if this assumption no longer holds true. pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; } -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Module; - type WeightInfo = weights::pallet_balances::WeightInfo; + type AccountStore = frame_system::Pallet; + type WeightInfo = pallet_balances::weights::SubstrateWeight; } parameter_types! { @@ -360,9 +414,8 @@ parameter_types! { pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000_000u128); } -impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = DealWithFees; +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = @@ -373,18 +426,18 @@ parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { type Moment = Moment; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; - type WeightInfo = weights::pallet_timestamp::WeightInfo; + type WeightInfo = pallet_timestamp::weights::SubstrateWeight; } parameter_types! { pub const UncleGenerations: BlockNumber = 5; } -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -404,9 +457,9 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type Event = Event; - type ValidatorId = ::AccountId; + type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; @@ -414,10 +467,10 @@ impl pallet_session::Trait for Runtime { type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - type WeightInfo = weights::pallet_session::WeightInfo; + type WeightInfo = pallet_session::weights::SubstrateWeight; } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -439,16 +492,17 @@ parameter_types! { pub const SlashDeferDuration: pallet_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 256; - pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4; - pub const MaxIterations: u32 = 10; - // 0.05%. The higher the value, the more strict solution acceptance becomes. - pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); - pub OffchainSolutionWeightLimit: Weight = MaximumExtrinsicWeight::get() - .saturating_sub(BlockExecutionWeight::get()) - .saturating_sub(ExtrinsicBaseWeight::get()); + pub OffchainRepeat: BlockNumber = 5; +} + +use frame_election_provider_support::onchain; +impl onchain::Config for Runtime { + type Accuracy = Perbill; + type DataProvider = Staking; } -impl pallet_staking::Trait for Runtime { +impl pallet_staking::Config for Runtime { + const MAX_NOMINATIONS: u32 = MAX_NOMINATIONS; type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = U128CurrencyToVote; @@ -463,21 +517,123 @@ impl pallet_staking::Trait for Runtime { type SlashCancelOrigin = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>, >; type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type MaxIterations = MaxIterations; - type MinSolutionScoreBump = MinSolutionScoreBump; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = StakingUnsignedPriority; - // The unsigned solution weight targeted by the OCW. We set it to the maximum possible value of - // a single extrinsic. - type OffchainSolutionWeightLimit = OffchainSolutionWeightLimit; - type WeightInfo = weights::pallet_staking::WeightInfo; + type ElectionProvider = ElectionProviderMultiPhase; + type GenesisElectionProvider = onchain::OnChainSequentialPhragmen; + type WeightInfo = pallet_staking::weights::SubstrateWeight; +} + +parameter_types! { + // phase durations. 1/4 of the last session for each. + pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; + pub const UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; + + // signed config + pub const SignedMaxSubmissions: u32 = 10; + pub const SignedRewardBase: Balance = 1 * DOLLARS; + pub const SignedDepositBase: Balance = 1 * DOLLARS; + pub const SignedDepositByte: Balance = 1 * CENTS; + + pub SolutionImprovementThreshold: Perbill = Perbill::from_rational(1u32, 10_000); + + // miner configs + pub const MultiPhaseUnsignedPriority: TransactionPriority = StakingUnsignedPriority::get() - 1u64; + pub MinerMaxWeight: Weight = RuntimeBlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic.expect("Normal extrinsics have a weight limit configured; qed") + .saturating_sub(BlockExecutionWeight::get()); + // Solution can occupy 90% of normal block size + pub MinerMaxLength: u32 = Perbill::from_rational(9u32, 10) * + *RuntimeBlockLength::get() + .max + .get(DispatchClass::Normal); +} + +sp_npos_elections::generate_solution_type!( + #[compact] + pub struct NposSolution16::< + VoterIndex = u32, + TargetIndex = u16, + Accuracy = sp_runtime::PerU16, + >(16) +); + +pub const MAX_NOMINATIONS: u32 = ::LIMIT as u32; + +/// The numbers configured here should always be more than the the maximum limits of staking pallet +/// to ensure election snapshot will not run out of memory. +pub struct BenchmarkConfig; +impl pallet_election_provider_multi_phase::BenchmarkingConfig for BenchmarkConfig { + const VOTERS: [u32; 2] = [5_000, 10_000]; + const TARGETS: [u32; 2] = [1_000, 2_000]; + const ACTIVE_VOTERS: [u32; 2] = [1000, 4_000]; + const DESIRED_TARGETS: [u32; 2] = [400, 800]; + const SNAPSHOT_MAXIMUM_VOTERS: u32 = 25_000; + const MINER_MAXIMUM_VOTERS: u32 = 15_000; + const MAXIMUM_TARGETS: u32 = 2000; +} + +/// Maximum number of iterations for balancing that will be executed in the embedded OCW +/// miner of election provider multi phase. +pub const MINER_MAX_ITERATIONS: u32 = 10; + +/// A source of random balance for NposSolver, which is meant to be run by the OCW election miner. +pub struct OffchainRandomBalancing; +impl frame_support::pallet_prelude::Get> + for OffchainRandomBalancing +{ + fn get() -> Option<(usize, sp_npos_elections::ExtendedBalance)> { + use sp_runtime::traits::TrailingZeroInput; + let iters = match MINER_MAX_ITERATIONS { + 0 => 0, + max @ _ => { + let seed = sp_io::offchain::random_seed(); + let random = ::decode(&mut TrailingZeroInput::new(&seed)) + .expect("input is padded with zeroes; qed") % + max.saturating_add(1); + random as usize + }, + }; + + Some((iters, 0)) + } +} + +impl pallet_election_provider_multi_phase::Config for Runtime { + type Event = Event; + type Currency = Balances; + type EstimateCallFee = TransactionPayment; + type SignedPhase = SignedPhase; + type UnsignedPhase = UnsignedPhase; + type SolutionImprovementThreshold = SolutionImprovementThreshold; + type OffchainRepeat = OffchainRepeat; + type MinerMaxWeight = MinerMaxWeight; + type MinerMaxLength = MinerMaxLength; + type MinerTxPriority = MultiPhaseUnsignedPriority; + type SignedMaxSubmissions = SignedMaxSubmissions; + type SignedRewardBase = SignedRewardBase; + type SignedDepositBase = SignedDepositBase; + type SignedDepositByte = SignedDepositByte; + type SignedDepositWeight = (); + type SignedMaxWeight = MinerMaxWeight; + type SlashHandler = (); // burn slashes + type RewardHandler = (); // nothing to do upon rewards + type DataProvider = Staking; + type Solution = NposSolution16; + type Fallback = pallet_election_provider_multi_phase::NoFallback; + type Solver = frame_election_provider_support::SequentialPhragmen< + AccountId, + pallet_election_provider_multi_phase::SolutionAccuracyOf, + OffchainRandomBalancing, + >; + type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight; + type ForceOrigin = EnsureRootOrHalfCouncil; + type BenchmarkingConfig = BenchmarkConfig; } parameter_types! { @@ -494,29 +650,36 @@ parameter_types! { pub const MaxProposals: u32 = 100; } -impl pallet_democracy::Trait for Runtime { +impl pallet_democracy::Config for Runtime { type Proposal = Call; type Event = Event; type Currency = Balances; type EnactmentPeriod = EnactmentPeriod; type LaunchPeriod = LaunchPeriod; type VotingPeriod = VotingPeriod; + type VoteLockingPeriod = EnactmentPeriod; // Same as EnactmentPeriod type MinimumDeposit = MinimumDeposit; /// A straight majority of the council can decide what their next motion is. - type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; + type ExternalOrigin = + pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; /// A super-majority can have the next scheduled referendum be a straight majority-carries vote. - type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; + type ExternalMajorityOrigin = + pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; /// A unanimous council can have the next scheduled referendum be a straight default-carries /// (NTB) vote. - type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; + type ExternalDefaultOrigin = + pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote /// be tabled immediately and with a shorter voting/enactment period. - type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; - type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; + type FastTrackOrigin = + pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; + type InstantOrigin = + pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; type InstantAllowed = InstantAllowed; type FastTrackVotingPeriod = FastTrackVotingPeriod; // To cancel a proposal which has been passed, 2/3 of the council must agree to it. - type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; + type CancellationOrigin = + pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; // To cancel a proposal before it has been passed, the technical committee must be unanimous or // Root must agree. type CancelProposalOrigin = EnsureOneOf< @@ -526,7 +689,7 @@ impl pallet_democracy::Trait for Runtime { >; type BlacklistOrigin = EnsureRoot; // Any single technical committee member may veto a coming council proposal, however they can - // only do it once and it lasts only for the cooloff period. + // only do it once and it lasts only for the cool-off period. type VetoOrigin = pallet_collective::EnsureMember; type CooloffPeriod = CooloffPeriod; type PreimageByteDeposit = PreimageByteDeposit; @@ -535,7 +698,7 @@ impl pallet_democracy::Trait for Runtime { type Scheduler = Scheduler; type PalletsOrigin = OriginCaller; type MaxVotes = MaxVotes; - type WeightInfo = weights::pallet_democracy::WeightInfo; + type WeightInfo = pallet_democracy::weights::SubstrateWeight; type MaxProposals = MaxProposals; } @@ -546,7 +709,7 @@ parameter_types! { } type CouncilCollective = pallet_collective::Instance1; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -554,24 +717,27 @@ impl pallet_collective::Trait for Runtime { type MaxProposals = CouncilMaxProposals; type MaxMembers = CouncilMaxMembers; type DefaultVote = pallet_collective::PrimeDefaultVote; - type WeightInfo = weights::pallet_collective::WeightInfo; + type WeightInfo = pallet_collective::weights::SubstrateWeight; } parameter_types! { pub const CandidacyBond: Balance = 10 * DOLLARS; - pub const VotingBond: Balance = 1 * DOLLARS; + // 1 storage item created, key size is 32 bytes, value size is 16+16. + pub const VotingBondBase: Balance = deposit(1, 64); + // additional data per vote is 32 bytes (account id). + pub const VotingBondFactor: Balance = deposit(0, 32); pub const TermDuration: BlockNumber = 7 * DAYS; pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 7; - pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect"; + pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } // Make sure that there are no more than `MaxMembers` members elected via elections-phragmen. const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); -impl pallet_elections_phragmen::Trait for Runtime { +impl pallet_elections_phragmen::Config for Runtime { type Event = Event; - type ModuleId = ElectionsPhragmenModuleId; + type PalletId = ElectionsPhragmenPalletId; type Currency = Balances; type ChangeMembers = Council; // NOTE: this implies that council's genesis members cannot be set directly and must come from @@ -579,14 +745,14 @@ impl pallet_elections_phragmen::Trait for Runtime { type InitializeMembers = Council; type CurrencyToVote = U128CurrencyToVote; type CandidacyBond = CandidacyBond; - type VotingBond = VotingBond; + type VotingBondBase = VotingBondBase; + type VotingBondFactor = VotingBondFactor; type LoserCandidate = (); - type BadReport = (); type KickedMember = (); type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type TermDuration = TermDuration; - type WeightInfo = weights::pallet_elections_phragmen::WeightInfo; + type WeightInfo = pallet_elections_phragmen::weights::SubstrateWeight; } parameter_types! { @@ -596,7 +762,7 @@ parameter_types! { } type TechnicalCollective = pallet_collective::Instance2; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -604,15 +770,15 @@ impl pallet_collective::Trait for Runtime { type MaxProposals = TechnicalMaxProposals; type MaxMembers = TechnicalMaxMembers; type DefaultVote = pallet_collective::PrimeDefaultVote; - type WeightInfo = weights::pallet_collective::WeightInfo; + type WeightInfo = pallet_collective::weights::SubstrateWeight; } type EnsureRootOrHalfCouncil = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>, >; -impl pallet_membership::Trait for Runtime { +impl pallet_membership::Config for Runtime { type Event = Event; type AddOrigin = EnsureRootOrHalfCouncil; type RemoveOrigin = EnsureRootOrHalfCouncil; @@ -621,6 +787,8 @@ impl pallet_membership::Trait for Runtime { type PrimeOrigin = EnsureRootOrHalfCouncil; type MembershipInitialized = TechnicalCommittee; type MembershipChanged = TechnicalCommittee; + type MaxMembers = TechnicalMaxMembers; + type WeightInfo = pallet_membership::weights::SubstrateWeight; } parameter_types! { @@ -634,89 +802,118 @@ parameter_types! { pub const DataDepositPerByte: Balance = 1 * CENTS; pub const BountyDepositBase: Balance = 1 * DOLLARS; pub const BountyDepositPayoutDelay: BlockNumber = 1 * DAYS; - pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const BountyUpdatePeriod: BlockNumber = 14 * DAYS; pub const MaximumReasonLength: u32 = 16384; pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); pub const BountyValueMinimum: Balance = 5 * DOLLARS; + pub const MaxApprovals: u32 = 100; } -impl pallet_treasury::Trait for Runtime { - type ModuleId = TreasuryModuleId; +impl pallet_treasury::Config for Runtime { + type PalletId = TreasuryPalletId; type Currency = Balances; type ApproveOrigin = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective>, >; type RejectOrigin = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>, >; - type Tippers = Elections; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type DataDepositPerByte = DataDepositPerByte; type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type BurnDestination = (); + type SpendFunds = Bounties; + type WeightInfo = pallet_treasury::weights::SubstrateWeight; + type MaxApprovals = MaxApprovals; +} + +impl pallet_bounties::Config for Runtime { + type Event = Event; type BountyDepositBase = BountyDepositBase; type BountyDepositPayoutDelay = BountyDepositPayoutDelay; type BountyUpdatePeriod = BountyUpdatePeriod; type BountyCuratorDeposit = BountyCuratorDeposit; type BountyValueMinimum = BountyValueMinimum; + type DataDepositPerByte = DataDepositPerByte; type MaximumReasonLength = MaximumReasonLength; - type BurnDestination = (); - type WeightInfo = weights::pallet_treasury::WeightInfo; + type WeightInfo = pallet_bounties::weights::SubstrateWeight; } -parameter_types! { - pub const TombstoneDeposit: Balance = 16 * MILLICENTS; - pub const RentByteFee: Balance = 4 * MILLICENTS; - pub const RentDepositOffset: Balance = 1000 * MILLICENTS; - pub const SurchargeReward: Balance = 150 * MILLICENTS; +impl pallet_tips::Config for Runtime { + type Event = Event; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type Tippers = Elections; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type WeightInfo = pallet_tips::weights::SubstrateWeight; } -impl pallet_contracts::Trait for Runtime { +parameter_types! { + pub ContractDeposit: Balance = deposit( + 1, + >::contract_info_size(), + ); + pub const MaxValueSize: u32 = 16 * 1024; + // The lazy deletion runs inside on_initialize. + pub DeletionWeightLimit: Weight = AVERAGE_ON_INITIALIZE_RATIO * + RuntimeBlockWeights::get().max_block; + // The weight needed for decoding the queue should be less or equal than a fifth + // of the overall weight dedicated to the lazy deletion. + pub DeletionQueueDepth: u32 = ((DeletionWeightLimit::get() / ( + ::WeightInfo::on_initialize_per_queue_item(1) - + ::WeightInfo::on_initialize_per_queue_item(0) + )) / 5) as u32; + pub Schedule: pallet_contracts::Schedule = Default::default(); +} + +impl pallet_contracts::Config for Runtime { type Time = Timestamp; type Randomness = RandomnessCollectiveFlip; type Currency = Balances; type Event = Event; - type DetermineContractAddress = pallet_contracts::SimpleAddressDeterminer; - type TrieIdGenerator = pallet_contracts::TrieIdFromParentCounter; - type RentPayment = (); - type SignedClaimHandicap = pallet_contracts::DefaultSignedClaimHandicap; - type TombstoneDeposit = TombstoneDeposit; - type StorageSizeOffset = pallet_contracts::DefaultStorageSizeOffset; - type RentByteFee = RentByteFee; - type RentDepositOffset = RentDepositOffset; - type SurchargeReward = SurchargeReward; - type MaxDepth = pallet_contracts::DefaultMaxDepth; - type MaxValueSize = pallet_contracts::DefaultMaxValueSize; - type WeightPrice = pallet_transaction_payment::Module; - type WeightInfo = weights::pallet_contracts::WeightInfo; -} - -impl pallet_sudo::Trait for Runtime { + type Call = Call; + /// The safest default is to allow no calls at all. + /// + /// Runtimes should whitelist dispatchables that are allowed to be called from contracts + /// and make sure they are stable. Dispatchables exposed to contracts are not allowed to + /// change because that would break already deployed contracts. The `Call` structure itself + /// is not allowed to change the indices of existing pallets, too. + type CallFilter = Nothing; + type ContractDeposit = ContractDeposit; + type CallStack = [pallet_contracts::Frame; 31]; + type WeightPrice = pallet_transaction_payment::Pallet; + type WeightInfo = pallet_contracts::weights::SubstrateWeight; + type ChainExtension = (); + type DeletionQueueDepth = DeletionQueueDepth; + type DeletionWeightLimit = DeletionWeightLimit; + type Schedule = Schedule; +} + +impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } parameter_types! { - pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_SLOTS as _; pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); /// We prioritize im-online heartbeats over election solution submission. pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; + pub const MaxAuthorities: u32 = 100; } impl frame_system::offchain::CreateSignedTransaction for Runtime - where - Call: From, +where + Call: From, { fn create_transaction>( call: Call, @@ -726,10 +923,8 @@ impl frame_system::offchain::CreateSignedTransaction for R ) -> Option<(Call, ::SignaturePayload)> { let tip = 0; // take the biggest period possible. - let period = BlockHashCount::get() - .checked_next_power_of_two() - .map(|c| c / 2) - .unwrap_or(2) as u64; + let period = + BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let current_block = System::block_number() .saturated_into::() // The `System::block_number` is initialized with `n+1`, @@ -747,13 +942,10 @@ impl frame_system::offchain::CreateSignedTransaction for R ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { - debug::warn!("Unable to create signed payload: {:?}", e); + log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; - let signature = raw_payload - .using_encoded(|payload| { - C::sign(payload, public) - })?; + let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let address = Indices::unlookup(account); let (call, extra, _) = raw_payload.deconstruct(); Some((call, (address, signature.into(), extra))) @@ -765,36 +957,35 @@ impl frame_system::offchain::SigningTypes for Runtime { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Runtime where +impl frame_system::offchain::SendTransactionTypes for Runtime +where Call: From, { type Extrinsic = UncheckedExtrinsic; type OverarchingCall = Call; } -impl pallet_im_online::Trait for Runtime { +impl pallet_im_online::Config for Runtime { type AuthorityId = ImOnlineId; type Event = Event; - type SessionDuration = SessionDuration; + type NextSessionRotation = Babe; + type ValidatorSet = Historical; type ReportUnresponsiveness = Offences; type UnsignedPriority = ImOnlineUnsignedPriority; - type WeightInfo = weights::pallet_im_online::WeightInfo; + type WeightInfo = pallet_im_online::weights::SubstrateWeight; } -parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); -} - -impl pallet_offences::Trait for Runtime { +impl pallet_offences::Config for Runtime { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; - type WeightSoftLimit = OffencesWeightSoftLimit; } -impl pallet_authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Config for Runtime { + type MaxAuthorities = MaxAuthorities; +} -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -808,8 +999,11 @@ impl pallet_grandpa::Trait for Runtime { GrandpaId, )>>::IdentificationTuple; - type HandleEquivocation = - pallet_grandpa::EquivocationHandler; + type HandleEquivocation = pallet_grandpa::EquivocationHandler< + Self::KeyOwnerIdentification, + Offences, + ReportLongevity, + >; type WeightInfo = (); } @@ -823,7 +1017,7 @@ parameter_types! { pub const MaxRegistrars: u32 = 20; } -impl pallet_identity::Trait for Runtime { +impl pallet_identity::Config for Runtime { type Event = Event; type Currency = Balances; type BasicDeposit = BasicDeposit; @@ -835,7 +1029,7 @@ impl pallet_identity::Trait for Runtime { type Slashed = Treasury; type ForceOrigin = EnsureRootOrHalfCouncil; type RegistrarOrigin = EnsureRootOrHalfCouncil; - type WeightInfo = weights::pallet_identity::WeightInfo; + type WeightInfo = pallet_identity::weights::SubstrateWeight; } parameter_types! { @@ -845,7 +1039,7 @@ parameter_types! { pub const RecoveryDeposit: Balance = 5 * DOLLARS; } -impl pallet_recovery::Trait for Runtime { +impl pallet_recovery::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -863,12 +1057,13 @@ parameter_types! { pub const PeriodSpend: Balance = 500 * DOLLARS; pub const MaxLockDuration: BlockNumber = 36 * 30 * DAYS; pub const ChallengePeriod: BlockNumber = 7 * DAYS; - pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); + pub const MaxCandidateIntake: u32 = 10; + pub const SocietyPalletId: PalletId = PalletId(*b"py/socie"); } -impl pallet_society::Trait for Runtime { +impl pallet_society::Config for Runtime { type Event = Event; - type ModuleId = SocietyModuleId; + type PalletId = SocietyPalletId; type Currency = Balances; type Randomness = RandomnessCollectiveFlip; type CandidateDeposit = CandidateDeposit; @@ -878,8 +1073,10 @@ impl pallet_society::Trait for Runtime { type MembershipChanged = (); type RotationPeriod = RotationPeriod; type MaxLockDuration = MaxLockDuration; - type FounderSetOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type FounderSetOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; type SuspensionJudgementOrigin = pallet_society::EnsureFounder; + type MaxCandidateIntake = MaxCandidateIntake; type ChallengePeriod = ChallengePeriod; } @@ -887,12 +1084,128 @@ parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; } -impl pallet_vesting::Trait for Runtime { +impl pallet_vesting::Config for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; - type WeightInfo = weights::pallet_vesting::WeightInfo; + type WeightInfo = pallet_vesting::weights::SubstrateWeight; + // `VestingInfo` encode length is 36bytes. 28 schedules gets encoded as 1009 bytes, which is the + // highest number of schedules that encodes less than 2^10. + const MAX_VESTING_SCHEDULES: u32 = 28; +} + +impl pallet_mmr::Config for Runtime { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + type Hashing = ::Hashing; + type Hash = ::Hash; + type LeafData = frame_system::Pallet; + type OnNewRoot = (); + type WeightInfo = (); +} + +parameter_types! { + pub const LotteryPalletId: PalletId = PalletId(*b"py/lotto"); + pub const MaxCalls: u32 = 10; + pub const MaxGenerateRandom: u32 = 10; +} + +impl pallet_lottery::Config for Runtime { + type PalletId = LotteryPalletId; + type Call = Call; + type Currency = Balances; + type Randomness = RandomnessCollectiveFlip; + type Event = Event; + type ManagerOrigin = EnsureRoot; + type MaxCalls = MaxCalls; + type ValidateCall = Lottery; + type MaxGenerateRandom = MaxGenerateRandom; + type WeightInfo = pallet_lottery::weights::SubstrateWeight; +} + +parameter_types! { + pub const AssetDeposit: Balance = 100 * DOLLARS; + pub const ApprovalDeposit: Balance = 1 * DOLLARS; + pub const StringLimit: u32 = 50; + pub const MetadataDepositBase: Balance = 10 * DOLLARS; + pub const MetadataDepositPerByte: Balance = 1 * DOLLARS; +} + +impl pallet_assets::Config for Runtime { + type Event = Event; + type Balance = u64; + type AssetId = u32; + type Currency = Balances; + type ForceOrigin = EnsureRoot; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = StringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = pallet_assets::weights::SubstrateWeight; +} + +parameter_types! { + pub IgnoredIssuance: Balance = Treasury::pot(); + pub const QueueCount: u32 = 300; + pub const MaxQueueLen: u32 = 1000; + pub const FifoQueueLen: u32 = 500; + pub const Period: BlockNumber = 30 * DAYS; + pub const MinFreeze: Balance = 100 * DOLLARS; + pub const IntakePeriod: BlockNumber = 10; + pub const MaxIntakeBids: u32 = 10; +} + +impl pallet_gilt::Config for Runtime { + type Event = Event; + type Currency = Balances; + type CurrencyBalance = Balance; + type AdminOrigin = frame_system::EnsureRoot; + type Deficit = (); + type Surplus = (); + type IgnoredIssuance = IgnoredIssuance; + type QueueCount = QueueCount; + type MaxQueueLen = MaxQueueLen; + type FifoQueueLen = FifoQueueLen; + type Period = Period; + type MinFreeze = MinFreeze; + type IntakePeriod = IntakePeriod; + type MaxIntakeBids = MaxIntakeBids; + type WeightInfo = pallet_gilt::weights::SubstrateWeight; +} + +parameter_types! { + pub const ClassDeposit: Balance = 100 * DOLLARS; + pub const InstanceDeposit: Balance = 1 * DOLLARS; + pub const KeyLimit: u32 = 32; + pub const ValueLimit: u32 = 256; +} + +impl pallet_uniques::Config for Runtime { + type Event = Event; + type ClassId = u32; + type InstanceId = u32; + type Currency = Balances; + type ForceOrigin = frame_system::EnsureRoot; + type ClassDeposit = ClassDeposit; + type InstanceDeposit = InstanceDeposit; + type MetadataDepositBase = MetadataDepositBase; + type AttributeDepositBase = MetadataDepositBase; + type DepositPerByte = MetadataDepositPerByte; + type StringLimit = StringLimit; + type KeyLimit = KeyLimit; + type ValueLimit = ValueLimit; + type WeightInfo = pallet_uniques::weights::SubstrateWeight; +} + +impl pallet_transaction_storage::Config for Runtime { + type Event = Event; + type Currency = Balances; + type Call = Call; + type FeeDestination = (); + type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; } construct_runtime!( @@ -901,42 +1214,51 @@ construct_runtime!( NodeBlock = node_primitives::Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Config, Storage, Event}, - Utility: pallet_utility::{Module, Call, Event}, - Babe: pallet_babe::{Module, Call, Storage, Config, Inherent, ValidateUnsigned}, - Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, - Authorship: pallet_authorship::{Module, Call, Storage, Inherent}, - Indices: pallet_indices::{Module, Call, Storage, Config, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Module, Storage}, - Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, - Democracy: pallet_democracy::{Module, Call, Storage, Config, Event}, - Council: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, - TechnicalCommittee: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, - Elections: pallet_elections_phragmen::{Module, Call, Storage, Event, Config}, - TechnicalMembership: pallet_membership::::{Module, Call, Storage, Event, Config}, - Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event, ValidateUnsigned}, - Treasury: pallet_treasury::{Module, Call, Storage, Config, Event}, - Contracts: pallet_contracts::{Module, Call, Config, Storage, Event}, - Sudo: pallet_sudo::{Module, Call, Config, Storage, Event}, - ImOnline: pallet_im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, - AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, - Offences: pallet_offences::{Module, Call, Storage, Event}, - Historical: pallet_session_historical::{Module}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage}, - Identity: pallet_identity::{Module, Call, Storage, Event}, - Society: pallet_society::{Module, Call, Storage, Event, Config}, - Recovery: pallet_recovery::{Module, Call, Storage, Event}, - Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, - Scheduler: pallet_scheduler::{Module, Call, Storage, Event}, - Proxy: pallet_proxy::{Module, Call, Storage, Event}, - Multisig: pallet_multisig::{Module, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Utility: pallet_utility::{Pallet, Call, Event}, + Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, + Indices: pallet_indices::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + ElectionProviderMultiPhase: pallet_election_provider_multi_phase::{Pallet, Call, Storage, Event, ValidateUnsigned}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Democracy: pallet_democracy::{Pallet, Call, Storage, Config, Event}, + Council: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config}, + TechnicalCommittee: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config}, + Elections: pallet_elections_phragmen::{Pallet, Call, Storage, Event, Config}, + TechnicalMembership: pallet_membership::::{Pallet, Call, Storage, Event, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned}, + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + Contracts: pallet_contracts::{Pallet, Call, Storage, Event}, + Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config}, + Offences: pallet_offences::{Pallet, Storage, Event}, + Historical: pallet_session_historical::{Pallet}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, + Identity: pallet_identity::{Pallet, Call, Storage, Event}, + Society: pallet_society::{Pallet, Call, Storage, Event, Config}, + Recovery: pallet_recovery::{Pallet, Call, Storage, Event}, + Vesting: pallet_vesting::{Pallet, Call, Storage, Event, Config}, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, + Proxy: pallet_proxy::{Pallet, Call, Storage, Event}, + Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, + Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, + Tips: pallet_tips::{Pallet, Call, Storage, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Event}, + Mmr: pallet_mmr::{Pallet, Storage}, + Lottery: pallet_lottery::{Pallet, Call, Storage, Event}, + Gilt: pallet_gilt::{Pallet, Call, Storage, Event, Config}, + Uniques: pallet_uniques::{Pallet, Call, Storage, Event}, + TransactionStorage: pallet_transaction_storage::{Pallet, Call, Storage, Inherent, Config, Event}, } ); /// The address format for describing accounts. -pub type Address = ::Source; +pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. @@ -966,7 +1288,24 @@ pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive, Runtime, AllModules>; +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPallets, + (), +>; + +/// MMR helper types. +mod mmr { + use super::Runtime; + pub use pallet_mmr::primitives::*; + + pub type Leaf = <::LeafData as LeafDataProvider>::LeafData; + pub type Hash = ::Hash; + pub type Hashing = ::Hashing; +} impl_runtime_apis! { impl sp_api::Core for Runtime { @@ -975,7 +1314,7 @@ impl_runtime_apis! { } fn execute_block(block: Block) { - Executive::execute_block(block) + Executive::execute_block(block); } fn initialize_block(header: &::Header) { @@ -985,7 +1324,7 @@ impl_runtime_apis! { impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { - Runtime::metadata().into() + OpaqueMetadata::new(Runtime::metadata().into()) } } @@ -1005,18 +1344,15 @@ impl_runtime_apis! { fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult { data.check_extrinsics(&block) } - - fn random_seed() -> ::Hash { - RandomnessCollectiveFlip::random_seed() - } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, + block_hash: ::Hash, ) -> TransactionValidity { - Executive::validate_transaction(source, tx) + Executive::validate_transaction(source, tx, block_hash) } } @@ -1031,6 +1367,10 @@ impl_runtime_apis! { Grandpa::grandpa_authorities() } + fn current_set_id() -> fg_primitives::SetId { + Grandpa::current_set_id() + } + fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: fg_primitives::EquivocationProof< ::Hash, @@ -1068,19 +1408,27 @@ impl_runtime_apis! { sp_consensus_babe::BabeGenesisConfiguration { slot_duration: Babe::slot_duration(), epoch_length: EpochDuration::get(), - c: PRIMARY_PROBABILITY, + c: BABE_GENESIS_EPOCH_CONFIG.c, genesis_authorities: Babe::authorities(), randomness: Babe::randomness(), - allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots, + allowed_slots: BABE_GENESIS_EPOCH_CONFIG.allowed_slots, } } - fn current_epoch_start() -> sp_consensus_babe::SlotNumber { + fn current_epoch_start() -> sp_consensus_babe::Slot { Babe::current_epoch_start() } + fn current_epoch() -> sp_consensus_babe::Epoch { + Babe::current_epoch() + } + + fn next_epoch() -> sp_consensus_babe::Epoch { + Babe::next_epoch() + } + fn generate_key_ownership_proof( - _slot_number: sp_consensus_babe::SlotNumber, + _slot: sp_consensus_babe::Slot, authority_id: sp_consensus_babe::AuthorityId, ) -> Option { use codec::Encode; @@ -1115,7 +1463,9 @@ impl_runtime_apis! { } } - impl pallet_contracts_rpc_runtime_api::ContractsApi + impl pallet_contracts_rpc_runtime_api::ContractsApi< + Block, AccountId, Balance, BlockNumber, Hash, + > for Runtime { fn call( @@ -1124,17 +1474,20 @@ impl_runtime_apis! { value: Balance, gas_limit: u64, input_data: Vec, - ) -> ContractExecResult { - let (exec_result, gas_consumed) = - Contracts::bare_call(origin, dest.into(), value, gas_limit, input_data); - match exec_result { - Ok(v) => ContractExecResult::Success { - flags: v.flags.bits(), - data: v.data, - gas_consumed: gas_consumed, - }, - Err(_) => ContractExecResult::Error, - } + ) -> pallet_contracts_primitives::ContractExecResult { + Contracts::bare_call(origin, dest, value, gas_limit, input_data, true) + } + + fn instantiate( + origin: AccountId, + endowment: Balance, + gas_limit: u64, + code: pallet_contracts_primitives::Code, + data: Vec, + salt: Vec, + ) -> pallet_contracts_primitives::ContractInstantiateResult + { + Contracts::bare_instantiate(origin, endowment, gas_limit, code, data, salt, true) } fn get_storage( @@ -1143,12 +1496,6 @@ impl_runtime_apis! { ) -> pallet_contracts_primitives::GetStorageResult { Contracts::get_storage(address, key) } - - fn rent_projection( - address: AccountId, - ) -> pallet_contracts_primitives::RentProjectionResult { - Contracts::rent_projection(address) - } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< @@ -1158,6 +1505,40 @@ impl_runtime_apis! { fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } + fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + } + + impl pallet_mmr::primitives::MmrApi< + Block, + mmr::Hash, + > for Runtime { + fn generate_proof(leaf_index: u64) + -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> + { + Mmr::generate_proof(leaf_index) + .map(|(leaf, proof)| (mmr::EncodableOpaqueLeaf::from_leaf(&leaf), proof)) + } + + fn verify_proof(leaf: mmr::EncodableOpaqueLeaf, proof: mmr::Proof) + -> Result<(), mmr::Error> + { + let leaf: mmr::Leaf = leaf + .into_opaque_leaf() + .try_decode() + .ok_or(mmr::Error::Verify)?; + Mmr::verify_leaf(leaf, proof) + } + + fn verify_proof_stateless( + root: mmr::Hash, + leaf: mmr::EncodableOpaqueLeaf, + proof: mmr::Proof + ) -> Result<(), mmr::Error> { + let node = mmr::DataOrHash::Data(leaf.into_opaque_leaf()); + pallet_mmr::verify_leaf_proof::(root, node, proof) + } } impl sp_session::SessionKeys for Runtime { @@ -1172,22 +1553,84 @@ impl_runtime_apis! { } } + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade() -> Result<(Weight, Weight), sp_runtime::RuntimeString> { + let weight = Executive::try_runtime_upgrade()?; + Ok((weight, RuntimeBlockWeights::get().max_block)) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + + // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency + // issues. To get around that, we separated the Session benchmarks into its own crate, + // which is why we need these two lines below. + use pallet_session_benchmarking::Pallet as SessionBench; + use pallet_offences_benchmarking::Pallet as OffencesBench; + use frame_system_benchmarking::Pallet as SystemBench; + + let mut list = Vec::::new(); + + list_benchmark!(list, extra, pallet_assets, Assets); + list_benchmark!(list, extra, pallet_babe, Babe); + list_benchmark!(list, extra, pallet_balances, Balances); + list_benchmark!(list, extra, pallet_bounties, Bounties); + list_benchmark!(list, extra, pallet_collective, Council); + list_benchmark!(list, extra, pallet_contracts, Contracts); + list_benchmark!(list, extra, pallet_democracy, Democracy); + list_benchmark!(list, extra, pallet_election_provider_multi_phase, ElectionProviderMultiPhase); + list_benchmark!(list, extra, pallet_elections_phragmen, Elections); + list_benchmark!(list, extra, pallet_gilt, Gilt); + list_benchmark!(list, extra, pallet_grandpa, Grandpa); + list_benchmark!(list, extra, pallet_identity, Identity); + list_benchmark!(list, extra, pallet_im_online, ImOnline); + list_benchmark!(list, extra, pallet_indices, Indices); + list_benchmark!(list, extra, pallet_lottery, Lottery); + list_benchmark!(list, extra, pallet_membership, TechnicalMembership); + list_benchmark!(list, extra, pallet_mmr, Mmr); + list_benchmark!(list, extra, pallet_multisig, Multisig); + list_benchmark!(list, extra, pallet_offences, OffencesBench::); + list_benchmark!(list, extra, pallet_proxy, Proxy); + list_benchmark!(list, extra, pallet_scheduler, Scheduler); + list_benchmark!(list, extra, pallet_session, SessionBench::); + list_benchmark!(list, extra, pallet_staking, Staking); + list_benchmark!(list, extra, frame_system, SystemBench::); + list_benchmark!(list, extra, pallet_timestamp, Timestamp); + list_benchmark!(list, extra, pallet_tips, Tips); + list_benchmark!(list, extra, pallet_transaction_storage, TransactionStorage); + list_benchmark!(list, extra, pallet_treasury, Treasury); + list_benchmark!(list, extra, pallet_uniques, Uniques); + list_benchmark!(list, extra, pallet_utility, Utility); + list_benchmark!(list, extra, pallet_vesting, Vesting); + + let storage_info = AllPalletsWithSystem::storage_info(); + + return (list, storage_info) + } + fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; - // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. - // To get around that, we separated the Session benchmarks into its own crate, which is why - // we need these two lines below. - use pallet_session_benchmarking::Module as SessionBench; - use pallet_offences_benchmarking::Module as OffencesBench; - use frame_system_benchmarking::Module as SystemBench; - impl pallet_session_benchmarking::Trait for Runtime {} - impl pallet_offences_benchmarking::Trait for Runtime {} - impl frame_system_benchmarking::Trait for Runtime {} + // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency + // issues. To get around that, we separated the Session benchmarks into its own crate, + // which is why we need these two lines below. + use pallet_session_benchmarking::Pallet as SessionBench; + use pallet_offences_benchmarking::Pallet as OffencesBench; + use frame_system_benchmarking::Pallet as SystemBench; + + impl pallet_session_benchmarking::Config for Runtime {} + impl pallet_offences_benchmarking::Config for Runtime {} + impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ // Block Number @@ -1200,6 +1643,8 @@ impl_runtime_apis! { hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + // System BlockWeight + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96").to_vec().into(), // Treasury Account hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec().into(), ]; @@ -1207,16 +1652,23 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&config, &whitelist); + add_benchmark!(params, batches, pallet_assets, Assets); add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_bounties, Bounties); add_benchmark!(params, batches, pallet_collective, Council); add_benchmark!(params, batches, pallet_contracts, Contracts); add_benchmark!(params, batches, pallet_democracy, Democracy); + add_benchmark!(params, batches, pallet_election_provider_multi_phase, ElectionProviderMultiPhase); add_benchmark!(params, batches, pallet_elections_phragmen, Elections); + add_benchmark!(params, batches, pallet_gilt, Gilt); add_benchmark!(params, batches, pallet_grandpa, Grandpa); add_benchmark!(params, batches, pallet_identity, Identity); add_benchmark!(params, batches, pallet_im_online, ImOnline); add_benchmark!(params, batches, pallet_indices, Indices); + add_benchmark!(params, batches, pallet_lottery, Lottery); + add_benchmark!(params, batches, pallet_membership, TechnicalMembership); + add_benchmark!(params, batches, pallet_mmr, Mmr); add_benchmark!(params, batches, pallet_multisig, Multisig); add_benchmark!(params, batches, pallet_offences, OffencesBench::); add_benchmark!(params, batches, pallet_proxy, Proxy); @@ -1225,7 +1677,10 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_staking, Staking); add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_timestamp, Timestamp); + add_benchmark!(params, batches, pallet_tips, Tips); + add_benchmark!(params, batches, pallet_transaction_storage, TransactionStorage); add_benchmark!(params, batches, pallet_treasury, Treasury); + add_benchmark!(params, batches, pallet_uniques, Uniques); add_benchmark!(params, batches, pallet_utility, Utility); add_benchmark!(params, batches, pallet_vesting, Vesting); @@ -1239,13 +1694,36 @@ impl_runtime_apis! { mod tests { use super::*; use frame_system::offchain::CreateSignedTransaction; + use sp_runtime::UpperOf; #[test] fn validate_transaction_submitter_bounds() { - fn is_submit_signed_transaction() where + fn is_submit_signed_transaction() + where T: CreateSignedTransaction, - {} + { + } is_submit_signed_transaction::(); } + + #[test] + fn perbill_as_onchain_accuracy() { + type OnChainAccuracy = ::Accuracy; + let maximum_chain_accuracy: Vec> = (0..MAX_NOMINATIONS) + .map(|_| >::from(OnChainAccuracy::one().deconstruct())) + .collect(); + let _: UpperOf = + maximum_chain_accuracy.iter().fold(0, |acc, x| acc.checked_add(*x).unwrap()); + } + + #[test] + fn call_size() { + assert!( + core::mem::size_of::() <= 200, + "size of Call is more than 200 bytes: some calls have too big arguments, use Box to reduce the + size of Call. + If the limit is too strong, maybe consider increase the limit to 300.", + ); + } } diff --git a/bin/node/runtime/src/weights/frame_system.rs b/bin/node/runtime/src/weights/frame_system.rs deleted file mode 100644 index 6831dad0620d6..0000000000000 --- a/bin/node/runtime/src/weights/frame_system.rs +++ /dev/null @@ -1,57 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 - -#![allow(unused_parens)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - fn remark(_b: u32) -> Weight { - (1305000 as Weight) - } - fn set_heap_pages() -> Weight { - (2023000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_changes_trie_config() -> Weight { - (10026000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn set_storage(i: u32, ) -> Weight { - (0 as Weight) - .saturating_add((656000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) - } - fn kill_storage(i: u32, ) -> Weight { - (4327000 as Weight) - .saturating_add((478000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) - } - fn kill_prefix(p: u32, ) -> Weight { - (8349000 as Weight) - .saturating_add((838000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) - } - fn suicide() -> Weight { - (29247000 as Weight) - } -} diff --git a/bin/node/runtime/src/weights/mod.rs b/bin/node/runtime/src/weights/mod.rs deleted file mode 100644 index c75ff83085b6e..0000000000000 --- a/bin/node/runtime/src/weights/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A list of the different weight modules for our runtime. - -pub mod frame_system; -pub mod pallet_balances; -pub mod pallet_collective; -pub mod pallet_contracts; -pub mod pallet_democracy; -pub mod pallet_elections_phragmen; -pub mod pallet_identity; -pub mod pallet_im_online; -pub mod pallet_indices; -pub mod pallet_multisig; -pub mod pallet_proxy; -pub mod pallet_scheduler; -pub mod pallet_session; -pub mod pallet_staking; -pub mod pallet_timestamp; -pub mod pallet_treasury; -pub mod pallet_utility; -pub mod pallet_vesting; diff --git a/bin/node/runtime/src/weights/pallet_balances.rs b/bin/node/runtime/src/weights/pallet_balances.rs deleted file mode 100644 index 18a971b20c0ec..0000000000000 --- a/bin/node/runtime/src/weights/pallet_balances.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { - fn transfer() -> Weight { - (65949000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn transfer_keep_alive() -> Weight { - (46665000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_balance_creating() -> Weight { - (27086000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_balance_killing() -> Weight { - (33424000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn force_transfer() -> Weight { - (65343000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_collective.rs b/bin/node/runtime/src/weights/pallet_collective.rs deleted file mode 100644 index 5e91dc19abcb9..0000000000000 --- a/bin/node/runtime/src/weights/pallet_collective.rs +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_collective::WeightInfo for WeightInfo { - fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - (0 as Weight) - .saturating_add((21040000 as Weight).saturating_mul(m as Weight)) - .saturating_add((173000 as Weight).saturating_mul(n as Weight)) - .saturating_add((31595000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) - } - fn execute(b: u32, m: u32, ) -> Weight { - (43359000 as Weight) - .saturating_add((4000 as Weight).saturating_mul(b as Weight)) - .saturating_add((123000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - } - fn propose_execute(b: u32, m: u32, ) -> Weight { - (54134000 as Weight) - .saturating_add((4000 as Weight).saturating_mul(b as Weight)) - .saturating_add((239000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - } - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (90650000 as Weight) - .saturating_add((5000 as Weight).saturating_mul(b as Weight)) - .saturating_add((152000 as Weight).saturating_mul(m as Weight)) - .saturating_add((970000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } - fn vote(m: u32, ) -> Weight { - (74460000 as Weight) - .saturating_add((290000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (86360000 as Weight) - .saturating_add((232000 as Weight).saturating_mul(m as Weight)) - .saturating_add((954000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (123653000 as Weight) - .saturating_add((1000 as Weight).saturating_mul(b as Weight)) - .saturating_add((287000 as Weight).saturating_mul(m as Weight)) - .saturating_add((920000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn close_disapproved(m: u32, p: u32, ) -> Weight { - (95395000 as Weight) - .saturating_add((236000 as Weight).saturating_mul(m as Weight)) - .saturating_add((965000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (135284000 as Weight) - .saturating_add((4000 as Weight).saturating_mul(b as Weight)) - .saturating_add((218000 as Weight).saturating_mul(m as Weight)) - .saturating_add((951000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn disapprove_proposal(p: u32, ) -> Weight { - (50500000 as Weight) - .saturating_add((966000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_contracts.rs b/bin/node/runtime/src/weights/pallet_contracts.rs deleted file mode 100644 index 8cd97b4a72191..0000000000000 --- a/bin/node/runtime/src/weights/pallet_contracts.rs +++ /dev/null @@ -1,294 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Weights for pallet_contracts -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-06, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_contracts::WeightInfo for WeightInfo { - fn update_schedule() -> Weight { - (33_207_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn put_code(n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((144_833_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn instantiate(n: u32, ) -> Weight { - (223_974_000 as Weight) - .saturating_add((1_007_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn call() -> Weight { - (210_638_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn claim_surcharge() -> Weight { - (508_079_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn seal_caller(r: u32, ) -> Weight { - (143_336_000 as Weight) - .saturating_add((397_788_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_address(r: u32, ) -> Weight { - (147_296_000 as Weight) - .saturating_add((396_962_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_gas_left(r: u32, ) -> Weight { - (141_677_000 as Weight) - .saturating_add((393_308_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_balance(r: u32, ) -> Weight { - (157_556_000 as Weight) - .saturating_add((879_861_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } - fn seal_value_transferred(r: u32, ) -> Weight { - (148_867_000 as Weight) - .saturating_add((391_678_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_minimum_balance(r: u32, ) -> Weight { - (147_252_000 as Weight) - .saturating_add((393_977_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_tombstone_deposit(r: u32, ) -> Weight { - (144_208_000 as Weight) - .saturating_add((394_625_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_rent_allowance(r: u32, ) -> Weight { - (135_320_000 as Weight) - .saturating_add((925_541_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_block_number(r: u32, ) -> Weight { - (145_849_000 as Weight) - .saturating_add((390_065_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_now(r: u32, ) -> Weight { - (146_363_000 as Weight) - .saturating_add((391_772_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_weight_to_fee(r: u32, ) -> Weight { - (129_872_000 as Weight) - .saturating_add((670_744_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } - fn seal_gas(r: u32, ) -> Weight { - (130_985_000 as Weight) - .saturating_add((198_427_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_input(r: u32, ) -> Weight { - (138_647_000 as Weight) - .saturating_add((8_363_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_input_per_kb(n: u32, ) -> Weight { - (149_418_000 as Weight) - .saturating_add((272_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_return(r: u32, ) -> Weight { - (129_116_000 as Weight) - .saturating_add((5_745_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_return_per_kb(n: u32, ) -> Weight { - (139_601_000 as Weight) - .saturating_add((680_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_terminate(r: u32, ) -> Weight { - (138_548_000 as Weight) - .saturating_add((355_473_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) - } - fn seal_restore_to(r: u32, ) -> Weight { - (239_880_000 as Weight) - .saturating_add((138_305_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) - } - fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (40_572_000 as Weight) - .saturating_add((3_748_632_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) - } - fn seal_random(r: u32, ) -> Weight { - (148_156_000 as Weight) - .saturating_add((1_036_452_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } - fn seal_deposit_event(r: u32, ) -> Weight { - (176_039_000 as Weight) - .saturating_add((1_497_705_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_923_547_000 as Weight) - .saturating_add((783_354_000 as Weight).saturating_mul(t as Weight)) - .saturating_add((240_600_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) - } - fn seal_set_rent_allowance(r: u32, ) -> Weight { - (151_095_000 as Weight) - .saturating_add((1_104_696_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn seal_set_storage(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((14_975_467_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (2_465_724_000 as Weight) - .saturating_add((203_125_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn seal_clear_storage(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((5_254_595_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_get_storage(r: u32, ) -> Weight { - (60_303_000 as Weight) - .saturating_add((1_135_486_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (931_900_000 as Weight) - .saturating_add((144_572_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - } - fn seal_transfer(r: u32, ) -> Weight { - (50_722_000 as Weight) - .saturating_add((6_701_164_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_call(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((10_589_747_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (11_223_388_000 as Weight) - .saturating_add((4_965_182_000 as Weight).saturating_mul(t as Weight)) - .saturating_add((50_603_000 as Weight).saturating_mul(i as Weight)) - .saturating_add((72_972_000 as Weight).saturating_mul(o as Weight)) - .saturating_add(T::DbWeight::get().reads(105 as Weight)) - .saturating_add(T::DbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) - } - fn seal_instantiate(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((22_933_938_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((200 as Weight).saturating_mul(r as Weight))) - } - fn seal_instantiate_per_input_output_kb(i: u32, o: u32, ) -> Weight { - (20_986_307_000 as Weight) - .saturating_add((152_611_000 as Weight).saturating_mul(i as Weight)) - .saturating_add((73_457_000 as Weight).saturating_mul(o as Weight)) - .saturating_add(T::DbWeight::get().reads(207 as Weight)) - .saturating_add(T::DbWeight::get().writes(202 as Weight)) - } - fn seal_hash_sha2_256(r: u32, ) -> Weight { - (145_988_000 as Weight) - .saturating_add((343_540_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (719_758_000 as Weight) - .saturating_add((420_306_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_keccak_256(r: u32, ) -> Weight { - (116_261_000 as Weight) - .saturating_add((360_601_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (583_726_000 as Weight) - .saturating_add((333_091_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_256(r: u32, ) -> Weight { - (144_609_000 as Weight) - .saturating_add((332_388_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (612_987_000 as Weight) - .saturating_add((150_030_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_128(r: u32, ) -> Weight { - (142_085_000 as Weight) - .saturating_add((329_426_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (632_517_000 as Weight) - .saturating_add((149_974_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_democracy.rs b/bin/node/runtime/src/weights/pallet_democracy.rs deleted file mode 100644 index 51eca2855a384..0000000000000 --- a/bin/node/runtime/src/weights/pallet_democracy.rs +++ /dev/null @@ -1,173 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Weights for pallet_democracy -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-24, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_democracy::WeightInfo for WeightInfo { - fn propose() -> Weight { - (96_316_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn second(s: u32, ) -> Weight { - (58_386_000 as Weight) - .saturating_add((259_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn vote_new(r: u32, ) -> Weight { - (70_374_000 as Weight) - .saturating_add((291_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn vote_existing(r: u32, ) -> Weight { - (70_097_000 as Weight) - .saturating_add((296_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn emergency_cancel() -> Weight { - (41_731_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn blacklist(p: u32, ) -> Weight { - (117_847_000 as Weight) - .saturating_add((871_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) - } - fn external_propose(v: u32, ) -> Weight { - (20_972_000 as Weight) - .saturating_add((114_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn external_propose_majority() -> Weight { - (5_030_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn external_propose_default() -> Weight { - (4_981_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn fast_track() -> Weight { - (42_801_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn veto_external(v: u32, ) -> Weight { - (44_115_000 as Weight) - .saturating_add((194_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn cancel_proposal(p: u32, ) -> Weight { - (73_937_000 as Weight) - .saturating_add((962_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn cancel_referendum() -> Weight { - (25_233_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn cancel_queued(r: u32, ) -> Weight { - (48_251_000 as Weight) - .saturating_add((3_590_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn on_initialize_base(r: u32, ) -> Weight { - (17_597_000 as Weight) - .saturating_add((7_248_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - } - fn delegate(r: u32, ) -> Weight { - (93_916_000 as Weight) - .saturating_add((10_794_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) - } - fn undelegate(r: u32, ) -> Weight { - (47_855_000 as Weight) - .saturating_add((10_805_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) - } - fn clear_public_proposals() -> Weight { - (4_864_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn note_preimage(b: u32, ) -> Weight { - (66_754_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn note_imminent_preimage(b: u32, ) -> Weight { - (44_664_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn reap_preimage(b: u32, ) -> Weight { - (59_968_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn unlock_remove(r: u32, ) -> Weight { - (58_573_000 as Weight) - .saturating_add((131_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn unlock_set(r: u32, ) -> Weight { - (53_831_000 as Weight) - .saturating_add((324_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn remove_vote(r: u32, ) -> Weight { - (31_846_000 as Weight) - .saturating_add((327_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn remove_other_vote(r: u32, ) -> Weight { - (31_880_000 as Weight) - .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_elections_phragmen.rs b/bin/node/runtime/src/weights/pallet_elections_phragmen.rs deleted file mode 100644 index 8da9838d5d7a1..0000000000000 --- a/bin/node/runtime/src/weights/pallet_elections_phragmen.rs +++ /dev/null @@ -1,90 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_elections_phragmen::WeightInfo for WeightInfo { - fn vote(v: u32, ) -> Weight { - (91_489_000 as Weight) - .saturating_add((199_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn vote_update(v: u32, ) -> Weight { - (56_511_000 as Weight) - .saturating_add((245_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn remove_voter() -> Weight { - (76_714_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1_743_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((31_750_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1_733_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((31_861_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn submit_candidacy(c: u32, ) -> Weight { - (74_714_000 as Weight) - .saturating_add((315_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (50_408_000 as Weight) - .saturating_add((159_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn renounce_candidacy_members() -> Weight { - (79_626_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } - fn renounce_candidacy_runners_up() -> Weight { - (49_715_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn remove_member_with_replacement() -> Weight { - (76_572_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - } - fn remove_member_wrong_refund() -> Weight { - (8_777_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_identity.rs b/bin/node/runtime/src/weights/pallet_identity.rs deleted file mode 100644 index a43b63c0fb041..0000000000000 --- a/bin/node/runtime/src/weights/pallet_identity.rs +++ /dev/null @@ -1,137 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_identity::WeightInfo for WeightInfo { - fn add_registrar(r: u32, ) -> Weight { - (39_603_000 as Weight) - .saturating_add((418_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_identity(r: u32, x: u32, ) -> Weight { - (110_679_000 as Weight) - .saturating_add((389_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_985_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_subs_new(s: u32, ) -> Weight { - (78_697_000 as Weight) - .saturating_add((15_225_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn set_subs_old(p: u32, ) -> Weight { - (71_308_000 as Weight) - .saturating_add((5_772_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) - } - fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (91_553_000 as Weight) - .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((5_749_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_621_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn request_judgement(r: u32, x: u32, ) -> Weight { - (110_856_000 as Weight) - .saturating_add((496_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_221_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn cancel_request(r: u32, x: u32, ) -> Weight { - (96_857_000 as Weight) - .saturating_add((311_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_204_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_fee(r: u32, ) -> Weight { - (16_276_000 as Weight) - .saturating_add((381_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_account_id(r: u32, ) -> Weight { - (18_530_000 as Weight) - .saturating_add((391_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_fields(r: u32, ) -> Weight { - (16_359_000 as Weight) - .saturating_add((379_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn provide_judgement(r: u32, x: u32, ) -> Weight { - (72_869_000 as Weight) - .saturating_add((423_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_187_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - (123_199_000 as Weight) - .saturating_add((71_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((5_730_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn add_sub(s: u32, ) -> Weight { - (110_070_000 as Weight) - .saturating_add((262_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn rename_sub(s: u32, ) -> Weight { - (37_130_000 as Weight) - .saturating_add((79_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn remove_sub(s: u32, ) -> Weight { - (103_295_000 as Weight) - .saturating_add((235_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn quit_sub(s: u32, ) -> Weight { - (65_716_000 as Weight) - .saturating_add((227_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_im_online.rs b/bin/node/runtime/src/weights/pallet_im_online.rs deleted file mode 100644 index a85672da51c5a..0000000000000 --- a/bin/node/runtime/src/weights/pallet_im_online.rs +++ /dev/null @@ -1,35 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_im_online::WeightInfo for WeightInfo { - fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (139830000 as Weight) - .saturating_add((211000 as Weight).saturating_mul(k as Weight)) - .saturating_add((654000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_indices.rs b/bin/node/runtime/src/weights/pallet_indices.rs deleted file mode 100644 index e8845f3352896..0000000000000 --- a/bin/node/runtime/src/weights/pallet_indices.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_indices::WeightInfo for WeightInfo { - fn claim() -> Weight { - (56_237_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn transfer() -> Weight { - (63_665_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn free() -> Weight { - (50_736_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn force_transfer() -> Weight { - (52_361_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn freeze() -> Weight { - (46_483_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_multisig.rs b/bin/node/runtime/src/weights/pallet_multisig.rs deleted file mode 100644 index 0af7c7c75e1ea..0000000000000 --- a/bin/node/runtime/src/weights/pallet_multisig.rs +++ /dev/null @@ -1,91 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { - fn as_multi_threshold_1(z: u32, ) -> Weight { - (17_161_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - } - fn as_multi_create(s: u32, z: u32, ) -> Weight { - (79_857_000 as Weight) - .saturating_add((131_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (90_218_000 as Weight) - .saturating_add((129_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (48_402_000 as Weight) - .saturating_add((132_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (88_390_000 as Weight) - .saturating_add((120_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (98_960_000 as Weight) - .saturating_add((276_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((6_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn approve_as_multi_create(s: u32, ) -> Weight { - (80_185_000 as Weight) - .saturating_add((121_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn approve_as_multi_approve(s: u32, ) -> Weight { - (48_386_000 as Weight) - .saturating_add((143_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn approve_as_multi_complete(s: u32, ) -> Weight { - (177_181_000 as Weight) - .saturating_add((273_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn cancel_as_multi(s: u32, ) -> Weight { - (126_334_000 as Weight) - .saturating_add((124_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_proxy.rs b/bin/node/runtime/src/weights/pallet_proxy.rs deleted file mode 100644 index c43b5db14ed95..0000000000000 --- a/bin/node/runtime/src/weights/pallet_proxy.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_proxy::WeightInfo for WeightInfo { - fn proxy(p: u32, ) -> Weight { - (26127000 as Weight) - .saturating_add((214000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - } - fn proxy_announced(a: u32, p: u32, ) -> Weight { - (55405000 as Weight) - .saturating_add((774000 as Weight).saturating_mul(a as Weight)) - .saturating_add((209000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn remove_announcement(a: u32, p: u32, ) -> Weight { - (35879000 as Weight) - .saturating_add((783000 as Weight).saturating_mul(a as Weight)) - .saturating_add((20000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn reject_announcement(a: u32, p: u32, ) -> Weight { - (36097000 as Weight) - .saturating_add((780000 as Weight).saturating_mul(a as Weight)) - .saturating_add((12000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn announce(a: u32, p: u32, ) -> Weight { - (53769000 as Weight) - .saturating_add((675000 as Weight).saturating_mul(a as Weight)) - .saturating_add((214000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn add_proxy(p: u32, ) -> Weight { - (36082000 as Weight) - .saturating_add((234000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn remove_proxy(p: u32, ) -> Weight { - (32885000 as Weight) - .saturating_add((267000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn remove_proxies(p: u32, ) -> Weight { - (31735000 as Weight) - .saturating_add((215000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn anonymous(p: u32, ) -> Weight { - (50907000 as Weight) - .saturating_add((61000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn kill_anonymous(p: u32, ) -> Weight { - (33926000 as Weight) - .saturating_add((208000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_scheduler.rs b/bin/node/runtime/src/weights/pallet_scheduler.rs deleted file mode 100644 index 895a282488313..0000000000000 --- a/bin/node/runtime/src/weights/pallet_scheduler.rs +++ /dev/null @@ -1,52 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_scheduler::WeightInfo for WeightInfo { - fn schedule(s: u32, ) -> Weight { - (37_835_000 as Weight) - .saturating_add((81_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn cancel(s: u32, ) -> Weight { - (34_707_000 as Weight) - .saturating_add((3_125_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn schedule_named(s: u32, ) -> Weight { - (48_065_000 as Weight) - .saturating_add((110_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn cancel_named(s: u32, ) -> Weight { - (38_776_000 as Weight) - .saturating_add((3_138_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_session.rs b/bin/node/runtime/src/weights/pallet_session.rs deleted file mode 100644 index 1ca5c29237b4e..0000000000000 --- a/bin/node/runtime/src/weights/pallet_session.rs +++ /dev/null @@ -1,38 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { - fn set_keys() -> Weight { - (88_411_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - } - fn purge_keys() -> Weight { - (51_843_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_staking.rs b/bin/node/runtime/src/weights/pallet_staking.rs deleted file mode 100644 index a4484a2685949..0000000000000 --- a/bin/node/runtime/src/weights/pallet_staking.rs +++ /dev/null @@ -1,171 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Default weights of pallet-staking. -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_staking::WeightInfo for WeightInfo { - fn bond() -> Weight { - (144278000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } - fn bond_extra() -> Weight { - (110715000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn unbond() -> Weight { - (99840000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn withdraw_unbonded_update(s: u32, ) -> Weight { - (100728000 as Weight) - .saturating_add((63000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (168879000 as Weight) - .saturating_add((6666000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn validate() -> Weight { - (35539000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn nominate(n: u32, ) -> Weight { - (48596000 as Weight) - .saturating_add((308000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn chill() -> Weight { - (35144000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn set_payee() -> Weight { - (24255000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_controller() -> Weight { - (52294000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn set_validator_count() -> Weight { - (5185000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn force_no_eras() -> Weight { - (5907000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn force_new_era() -> Weight { - (5917000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn force_new_era_always() -> Weight { - (5952000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn set_invulnerables(v: u32, ) -> Weight { - (6324000 as Weight) - .saturating_add((9000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn force_unstake(s: u32, ) -> Weight { - (119691000 as Weight) - .saturating_add((6681000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn cancel_deferred_slash(s: u32, ) -> Weight { - (5820201000 as Weight) - .saturating_add((34672000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((92486000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) - } - fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((117324000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) - } - fn rebond(l: u32, ) -> Weight { - (71316000 as Weight) - .saturating_add((142000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn set_history_depth(e: u32, ) -> Weight { - (0 as Weight) - .saturating_add((51901000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) - } - fn reap_stash(s: u32, ) -> Weight { - (147166000 as Weight) - .saturating_add((6661000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn new_era(v: u32, n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1440459000 as Weight).saturating_mul(v as Weight)) - .saturating_add((182580000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(10 as Weight)) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) - } - fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { - (0 as Weight) - .saturating_add((964000 as Weight).saturating_mul(v as Weight)) - .saturating_add((432000 as Weight).saturating_mul(n as Weight)) - .saturating_add((204294000 as Weight).saturating_mul(a as Weight)) - .saturating_add((9546000 as Weight).saturating_mul(w as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } -} diff --git a/bin/node/runtime/src/weights/pallet_timestamp.rs b/bin/node/runtime/src/weights/pallet_timestamp.rs deleted file mode 100644 index ee0dd77c63af0..0000000000000 --- a/bin/node/runtime/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 - -#![allow(unused_parens)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - fn set() -> Weight { - (9133000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn on_finalize() -> Weight { - (5915000 as Weight) - } -} diff --git a/bin/node/runtime/src/weights/pallet_treasury.rs b/bin/node/runtime/src/weights/pallet_treasury.rs deleted file mode 100644 index d8fe9b578b275..0000000000000 --- a/bin/node/runtime/src/weights/pallet_treasury.rs +++ /dev/null @@ -1,140 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_treasury::WeightInfo for WeightInfo { - fn propose_spend() -> Weight { - (79604000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn reject_proposal() -> Weight { - (61001000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn approve_proposal() -> Weight { - (17835000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn report_awesome(r: u32, ) -> Weight { - (101602000 as Weight) - .saturating_add((2000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn retract_tip() -> Weight { - (82970000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (63995000 as Weight) - .saturating_add((2000 as Weight).saturating_mul(r as Weight)) - .saturating_add((153000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn tip(t: u32, ) -> Weight { - (46765000 as Weight) - .saturating_add((711000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn close_tip(t: u32, ) -> Weight { - (160874000 as Weight) - .saturating_add((379000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn propose_bounty(d: u32, ) -> Weight { - (86198000 as Weight) - .saturating_add((1000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } - fn approve_bounty() -> Weight { - (23063000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn propose_curator() -> Weight { - (18890000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn unassign_curator() -> Weight { - (66768000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn accept_curator() -> Weight { - (69131000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn award_bounty() -> Weight { - (48184000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn claim_bounty() -> Weight { - (243104000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) - } - fn close_bounty_proposed() -> Weight { - (65917000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn close_bounty_active() -> Weight { - (157232000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } - fn extend_bounty_expiry() -> Weight { - (46216000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn on_initialize_proposals(p: u32, ) -> Weight { - (119765000 as Weight) - .saturating_add((108368000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) - } - fn on_initialize_bounties(b: u32, ) -> Weight { - (112536000 as Weight) - .saturating_add((107132000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) - } -} diff --git a/bin/node/runtime/src/weights/pallet_vesting.rs b/bin/node/runtime/src/weights/pallet_vesting.rs deleted file mode 100644 index ac63b0177b81c..0000000000000 --- a/bin/node/runtime/src/weights/pallet_vesting.rs +++ /dev/null @@ -1,64 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -pub struct WeightInfo(PhantomData); -impl pallet_vesting::WeightInfo for WeightInfo { - fn vest_locked(l: u32, ) -> Weight { - (82109000 as Weight) - .saturating_add((332000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn vest_unlocked(l: u32, ) -> Weight { - (88419000 as Weight) - .saturating_add((3000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn vest_other_locked(l: u32, ) -> Weight { - (81277000 as Weight) - .saturating_add((321000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - } - fn vest_other_unlocked(l: u32, ) -> Weight { - (87584000 as Weight) - .saturating_add((19000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn vested_transfer(l: u32, ) -> Weight { - (185916000 as Weight) - .saturating_add((625000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn force_vested_transfer(l: u32, ) -> Weight { - (185916000 as Weight) - .saturating_add((625000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } -} diff --git a/bin/node/test-runner-example/Cargo.toml b/bin/node/test-runner-example/Cargo.toml new file mode 100644 index 0000000000000..96c4c2047ac4f --- /dev/null +++ b/bin/node/test-runner-example/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "test-runner-example" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +publish = false + +[dependencies] +test-runner = { path = "../../../test-utils/test-runner" } + +frame-system = { path = "../../../frame/system" } +frame-benchmarking = { path = "../../../frame/benchmarking" } +pallet-transaction-payment = { path = "../../../frame/transaction-payment" } + +node-runtime = { path = "../runtime" } +node-primitives = { path = "../primitives" } +node-cli = { path = "../cli" } + +grandpa = { package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sp-consensus-babe = { path = "../../../primitives/consensus/babe" } +sc-consensus-babe = { path = "../../../client/consensus/babe" } +sc-consensus-manual-seal = { path = "../../../client/consensus/manual-seal" } +sc-service = { default-features = false, path = "../../../client/service" } +sc-executor = { path = "../../../client/executor" } +sc-consensus = { path = "../../../client/consensus/common" } + +sp-runtime = { path = "../../../primitives/runtime" } +sp-keyring = { path = "../../../primitives/keyring" } diff --git a/bin/node/test-runner-example/src/lib.rs b/bin/node/test-runner-example/src/lib.rs new file mode 100644 index 0000000000000..0de7f5a4e2b70 --- /dev/null +++ b/bin/node/test-runner-example/src/lib.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +#![deny(unused_extern_crates, missing_docs)] + +//! Basic example of end to end runtime tests. + +use grandpa::GrandpaBlockImport; +use sc_consensus_babe::BabeBlockImport; +use sc_consensus_manual_seal::consensus::babe::SlotTimestampProvider; +use sc_executor::NativeElseWasmExecutor; +use sc_service::{TFullBackend, TFullClient}; +use sp_runtime::generic::Era; +use test_runner::{ChainInfo, SignatureVerificationOverride}; + +type BlockImport = BabeBlockImport>; + +/// A unit struct which implements `NativeExecutionDispatch` feeding in the +/// hard-coded runtime. +pub struct ExecutorDispatch; + +impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + type ExtendHostFunctions = + (frame_benchmarking::benchmarking::HostFunctions, SignatureVerificationOverride); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + node_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + node_runtime::native_version() + } +} + +/// ChainInfo implementation. +struct NodeTemplateChainInfo; + +impl ChainInfo for NodeTemplateChainInfo { + type Block = node_primitives::Block; + type ExecutorDispatch = ExecutorDispatch; + type Runtime = node_runtime::Runtime; + type RuntimeApi = node_runtime::RuntimeApi; + type SelectChain = sc_consensus::LongestChain, Self::Block>; + type BlockImport = BlockImport< + Self::Block, + TFullBackend, + TFullClient>, + Self::SelectChain, + >; + type SignedExtras = node_runtime::SignedExtra; + type InherentDataProviders = + (SlotTimestampProvider, sp_consensus_babe::inherents::InherentDataProvider); + + fn signed_extras( + from: ::AccountId, + ) -> Self::SignedExtras { + ( + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::::from(Era::Immortal), + frame_system::CheckNonce::::from( + frame_system::Pallet::::account_nonce(from), + ), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(0), + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use node_cli::chain_spec::development_config; + use sp_keyring::sr25519::Keyring::Alice; + use sp_runtime::{traits::IdentifyAccount, MultiSigner}; + use test_runner::{build_runtime, client_parts, ConfigOrChainSpec, Node}; + + #[test] + fn test_runner() { + let tokio_runtime = build_runtime().unwrap(); + let (rpc, task_manager, client, pool, command_sink, backend) = + client_parts::(ConfigOrChainSpec::ChainSpec( + Box::new(development_config()), + tokio_runtime.handle().clone(), + )) + .unwrap(); + let node = Node::::new( + rpc, + task_manager, + client, + pool, + command_sink, + backend, + ); + + tokio_runtime.block_on(async { + // seals blocks + node.seal_blocks(1).await; + // submit extrinsics + let alice = MultiSigner::from(Alice.public()).into_account(); + let _hash = node + .submit_extrinsic( + frame_system::Call::remark { remark: (b"hello world").to_vec() }, + Some(alice), + ) + .await + .unwrap(); + + // look ma, I can read state. + let _events = + node.with_state(|| frame_system::Pallet::::events()); + // get access to the underlying client. + let _client = node.client(); + }) + } +} diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index bc1f07645eed9..d05d815121f88 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-testing" -version = "2.0.0" +version = "3.0.0-dev" authors = ["Parity Technologies "] description = "Test utilities for Substrate node." edition = "2018" @@ -13,43 +13,38 @@ publish = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -sc-service = { version = "0.8.0", features = ["test-helpers", "db"], path = "../../../client/service" } -sc-client-db = { version = "0.8.0", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } -sc-client-api = { version = "2.0.0", path = "../../../client/api/" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } -pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -node-executor = { version = "2.0.0", path = "../executor" } +sc-service = { version = "0.10.0-dev", features = [ + "test-helpers", + "db", +], path = "../../../client/service" } +sc-client-db = { version = "0.10.0-dev", path = "../../../client/db/", features = [ + "kvdb-rocksdb", + "parity-db", +] } +sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sp-keyring = { version = "4.0.0-dev", path = "../../../primitives/keyring" } +node-executor = { version = "3.0.0-dev", path = "../executor" } node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "2.0.0", path = "../runtime" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -frame-support = { version = "2.0.0", path = "../../../frame/support" } -pallet-session = { version = "2.0.0", path = "../../../frame/session" } -pallet-society = { version = "2.0.0", path = "../../../frame/society" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -pallet-staking = { version = "2.0.0", path = "../../../frame/staking" } -sc-executor = { version = "0.8.0", path = "../../../client/executor", features = ["wasmtime"] } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } +node-runtime = { version = "3.0.0-dev", path = "../runtime" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor", features = [ + "wasmtime", +] } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } -pallet-timestamp = { version = "2.0.0", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0", path = "../../../frame/treasury" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/timestamp" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } log = "0.4.8" tempfile = "3.1.0" fs_extra = "1" futures = "0.3.1" - -[dev-dependencies] -criterion = "0.3.0" -sc-cli = { version = "0.8.0", path = "../../../client/cli" } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 153a52375c2a9..cf0a463cc3e99 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,44 +22,41 @@ //! can pregenerate seed database and `clone` it for every iteration of your benchmarks //! or tests to get consistent, smooth benchmark experience! -use std::{sync::Arc, path::{Path, PathBuf}, collections::BTreeMap}; - -use node_primitives::Block; -use crate::client::{Client, Backend}; -use crate::keyring::*; -use sc_client_db::PruningMode; -use sc_executor::{NativeExecutor, WasmExecutionMethod}; -use sp_consensus::{ - BlockOrigin, BlockImport, BlockImportParams, - ForkChoiceStrategy, ImportResult, ImportedAux +use std::{ + collections::BTreeMap, + path::{Path, PathBuf}, + sync::Arc, }; -use sp_runtime::{ - generic::BlockId, - OpaqueExtrinsic, - traits::{Block as BlockT, Verify, Zero, IdentifyAccount}, + +use crate::{ + client::{Backend, Client}, + keyring::*, }; use codec::{Decode, Encode}; +use futures::executor; +use node_primitives::Block; use node_runtime::{ - Call, - CheckedExtrinsic, - constants::currency::DOLLARS, - UncheckedExtrinsic, - MinimumPeriod, - SystemCall, - BalancesCall, - AccountId, - Signature, + constants::currency::DOLLARS, AccountId, BalancesCall, Call, CheckedExtrinsic, MinimumPeriod, + Signature, SystemCall, UncheckedExtrinsic, }; -use sp_core::{ExecutionContext, blake2_256, traits::SpawnNamed, Pair, Public, sr25519, ed25519}; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{ + execution_extensions::{ExecutionExtensions, ExecutionStrategies}, + BlockBackend, ExecutionStrategy, +}; +use sc_client_db::PruningMode; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, ImportedAux}; +use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; +use sp_consensus::BlockOrigin; +use sp_core::{blake2_256, ed25519, sr25519, traits::SpawnNamed, ExecutionContext, Pair, Public}; use sp_inherents::InherentData; -use sc_client_api::{ - ExecutionStrategy, BlockBackend, - execution_extensions::{ExecutionExtensions, ExecutionStrategies}, +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, IdentifyAccount, Verify, Zero}, + OpaqueExtrinsic, }; -use sc_block_builder::BlockBuilderProvider; -use futures::executor; /// Keyring full of accounts for benching. /// @@ -92,19 +89,21 @@ impl BenchPair { /// /// Will panic if cache drop is impossbile. pub fn drop_system_cache() { - #[cfg(target_os = "windows")] { + #[cfg(target_os = "windows")] + { log::warn!( target: "bench-logistics", "Clearing system cache on windows is not supported. Benchmark might totally be wrong.", ); - return; + return } std::process::Command::new("sync") .output() .expect("Failed to execute system cache clear"); - #[cfg(target_os = "linux")] { + #[cfg(target_os = "linux")] + { log::trace!(target: "bench-logistics", "Clearing system cache..."); std::process::Command::new("echo") .args(&["3", ">", "/proc/sys/vm/drop_caches", "2>", "/dev/null"]) @@ -133,7 +132,8 @@ pub fn drop_system_cache() { log::trace!(target: "bench-logistics", "Clearing system cache done!"); } - #[cfg(target_os = "macos")] { + #[cfg(target_os = "macos")] + { log::trace!(target: "bench-logistics", "Clearing system cache..."); if let Err(err) = std::process::Command::new("purge").output() { log::error!("purge error {:?}: ", err); @@ -169,15 +169,10 @@ impl Clone for BenchDb { ); let seed_db_files = std::fs::read_dir(seed_dir) .expect("failed to list file in seed dir") - .map(|f_result| - f_result.expect("failed to read file in seed db") - .path() - ).collect(); - fs_extra::copy_items( - &seed_db_files, - dir.path(), - &fs_extra::dir::CopyOptions::new(), - ).expect("Copy of seed database is ok"); + .map(|f_result| f_result.expect("failed to read file in seed db").path()) + .collect::>(); + fs_extra::copy_items(&seed_db_files, dir.path(), &fs_extra::dir::CopyOptions::new()) + .expect("Copy of seed database is ok"); // We clear system cache after db clone but before any warmups. // This populates system cache with some data unrelated to actual @@ -204,10 +199,7 @@ pub enum BlockType { impl BlockType { /// Create block content description with specified number of transactions. pub fn to_content(self, size: Option) -> BlockContent { - BlockContent { - block_type: self, - size, - } + BlockContent { block_type: self, size } } } @@ -228,15 +220,10 @@ pub enum DatabaseType { } impl DatabaseType { - fn into_settings(self, path: PathBuf) -> sc_client_db::DatabaseSettingsSrc { + fn into_settings(self, path: PathBuf) -> sc_client_db::DatabaseSource { match self { - Self::RocksDb => sc_client_db::DatabaseSettingsSrc::RocksDb { - path, - cache_size: 512, - }, - Self::ParityDb => sc_client_db::DatabaseSettingsSrc::ParityDb { - path, - } + Self::RocksDb => sc_client_db::DatabaseSource::RocksDb { path, cache_size: 512 }, + Self::ParityDb => sc_client_db::DatabaseSource::ParityDb { path }, } } } @@ -251,10 +238,7 @@ pub struct TaskExecutor { impl TaskExecutor { fn new() -> Self { - Self { - pool: executor::ThreadPool::new() - .expect("Failed to create task executor") - } + Self { pool: executor::ThreadPool::new().expect("Failed to create task executor") } } } @@ -279,21 +263,17 @@ pub struct BlockContentIterator<'a> { impl<'a> BlockContentIterator<'a> { fn new(content: BlockContent, keyring: &'a BenchKeyring, client: &Client) -> Self { - let runtime_version = client.runtime_version_at(&BlockId::number(0)) + let runtime_version = client + .runtime_version_at(&BlockId::number(0)) .expect("There should be runtime version at 0"); - let genesis_hash = client.block_hash(Zero::zero()) + let genesis_hash = client + .block_hash(Zero::zero()) .expect("Database error?") .expect("Genesis block always exists; qed") .into(); - BlockContentIterator { - iteration: 0, - content, - keyring, - runtime_version, - genesis_hash, - } + BlockContentIterator { iteration: 0, content, keyring, runtime_version, genesis_hash } } } @@ -302,41 +282,36 @@ impl<'a> Iterator for BlockContentIterator<'a> { fn next(&mut self) -> Option { if self.content.size.map(|size| size <= self.iteration).unwrap_or(false) { - return None; + return None } let sender = self.keyring.at(self.iteration); - let receiver = get_account_id_from_seed::( - &format!("random-user//{}", self.iteration) - ); + let receiver = get_account_id_from_seed::(&format!( + "random-user//{}", + self.iteration + )); let signed = self.keyring.sign( CheckedExtrinsic { - signed: Some((sender, signed_extra(0, node_runtime::ExistentialDeposit::get() + 1))), + signed: Some(( + sender, + signed_extra(0, node_runtime::ExistentialDeposit::get() + 1), + )), function: match self.content.block_type { - BlockType::RandomTransfersKeepAlive => { - Call::Balances( - BalancesCall::transfer_keep_alive( - pallet_indices::address::Address::Id(receiver), - node_runtime::ExistentialDeposit::get() + 1, - ) - ) - }, + BlockType::RandomTransfersKeepAlive => + Call::Balances(BalancesCall::transfer_keep_alive { + dest: sp_runtime::MultiAddress::Id(receiver), + value: node_runtime::ExistentialDeposit::get() + 1, + }), BlockType::RandomTransfersReaping => { - Call::Balances( - BalancesCall::transfer( - pallet_indices::address::Address::Id(receiver), - // Transfer so that ending balance would be 1 less than existential deposit - // so that we kill the sender account. - 100*DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), - ) - ) - }, - BlockType::Noop => { - Call::System( - SystemCall::remark(Vec::new()) - ) + Call::Balances(BalancesCall::transfer { + dest: sp_runtime::MultiAddress::Id(receiver), + // Transfer so that ending balance would be 1 less than existential + // deposit so that we kill the sender account. + value: 100 * DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), + }) }, + BlockType::Noop => Call::System(SystemCall::remark { remark: Vec::new() }), }, }, self.runtime_version.spec_version, @@ -346,8 +321,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { let encoded = Encode::encode(&signed); - let opaque = OpaqueExtrinsic::decode(&mut &encoded[..]) - .expect("Failed to decode opaque"); + let opaque = OpaqueExtrinsic::decode(&mut &encoded[..]).expect("Failed to decode opaque"); self.iteration += 1; @@ -373,12 +347,8 @@ impl BenchDb { "Created seed db at {}", dir.path().to_string_lossy(), ); - let (_client, _backend, _task_executor) = Self::bench_client( - database_type, - dir.path(), - Profile::Native, - &keyring, - ); + let (_client, _backend, _task_executor) = + Self::bench_client(database_type, dir.path(), Profile::Native, &keyring); let directory_guard = Guard(dir); BenchDb { keyring, directory_guard, database_type } @@ -408,24 +378,29 @@ impl BenchDb { keyring: &BenchKeyring, ) -> (Client, std::sync::Arc, TaskExecutor) { let db_config = sc_client_db::DatabaseSettings { - state_cache_size: 16*1024*1024, + state_cache_size: 16 * 1024 * 1024, state_cache_child_ratio: Some((0, 100)), - pruning: PruningMode::ArchiveAll, + state_pruning: PruningMode::ArchiveAll, source: database_type.into_settings(dir.into()), + keep_blocks: sc_client_db::KeepBlocks::All, + transaction_storage: sc_client_db::TransactionStorageMode::BlockBody, }; let task_executor = TaskExecutor::new(); - let (client, backend) = sc_service::new_client( - db_config, - NativeExecutor::new(WasmExecutionMethod::Compiled, None, 8), + let backend = sc_service::new_db_backend(db_config).expect("Should not fail"); + let client = sc_service::new_client( + backend.clone(), + NativeElseWasmExecutor::new(WasmExecutionMethod::Compiled, None, 8), &keyring.generate_genesis(), None, None, - ExecutionExtensions::new(profile.into_execution_strategies(), None), + ExecutionExtensions::new(profile.into_execution_strategies(), None, None), Box::new(task_executor.clone()), None, + None, Default::default(), - ).expect("Should not fail"); + ) + .expect("Should not fail"); (client, backend, task_executor) } @@ -441,12 +416,14 @@ impl BenchDb { .put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) .expect("Put timestamp failed"); - client.runtime_api() + client + .runtime_api() .inherent_extrinsics_with_context( &BlockId::number(0), ExecutionContext::BlockConstruction, inherent_data, - ).expect("Get inherents failed") + ) + .expect("Get inherents failed") } /// Iterate over some block content with transaction signed using this database keyring. @@ -470,9 +447,7 @@ impl BenchDb { pub fn generate_block(&mut self, content: BlockContent) -> Block { let client = self.client(); - let mut block = client - .new_block(Default::default()) - .expect("Block creation failed"); + let mut block = client.new_block(Default::default()).expect("Block creation failed"); for extrinsic in self.generate_inherents(&client) { block.push(extrinsic).expect("Push inherent failed"); @@ -482,14 +457,12 @@ impl BenchDb { for opaque in self.block_content(content, &client) { match block.push(opaque) { Err(sp_blockchain::Error::ApplyExtrinsicFailed( - sp_blockchain::ApplyExtrinsicFailed::Validity(e) - )) if e.exhausted_resources() => { - break; - }, + sp_blockchain::ApplyExtrinsicFailed::Validity(e), + )) if e.exhausted_resources() => break, Err(err) => panic!("Error pushing transaction: {:?}", err), Ok(_) => {}, } - }; + } let block = block.build().expect("Block build failed").block; @@ -510,12 +483,8 @@ impl BenchDb { /// Clone this database and create context for testing/benchmarking. pub fn create_context(&self, profile: Profile) -> BenchContext { let BenchDb { directory_guard, keyring, database_type } = self.clone(); - let (client, backend, task_executor) = Self::bench_client( - database_type, - directory_guard.path(), - profile, - &keyring - ); + let (client, backend, task_executor) = + Self::bench_client(database_type, directory_guard.path(), profile, &keyring); BenchContext { client: Arc::new(client), @@ -545,7 +514,8 @@ impl BenchKeyring { let seed = format!("//endowed-user/{}", n); let (account_id, pair) = match key_types { KeyTypes::Sr25519 => { - let pair = sr25519::Pair::from_string(&seed, None).expect("failed to generate pair"); + let pair = + sr25519::Pair::from_string(&seed, None).expect("failed to generate pair"); let account_id = AccountPublic::from(pair.public()).into_account(); (account_id, BenchPair::Sr25519(pair)) }, @@ -577,28 +547,34 @@ impl BenchKeyring { xt: CheckedExtrinsic, spec_version: u32, tx_version: u32, - genesis_hash: [u8; 32] + genesis_hash: [u8; 32], ) -> UncheckedExtrinsic { match xt.signed { Some((signed, extra)) => { - let payload = (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); + let payload = ( + xt.function, + extra.clone(), + spec_version, + tx_version, + genesis_hash, + genesis_hash, + ); let key = self.accounts.get(&signed).expect("Account id not found in keyring"); - let signature = payload.using_encoded(|b| { - if b.len() > 256 { - key.sign(&sp_io::hashing::blake2_256(b)) - } else { - key.sign(b) - } - }).into(); + let signature = payload + .using_encoded(|b| { + if b.len() > 256 { + key.sign(&sp_io::hashing::blake2_256(b)) + } else { + key.sign(b) + } + }) + .into(); UncheckedExtrinsic { - signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), + signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), function: payload.0, } - } - None => UncheckedExtrinsic { - signature: None, - function: xt.function, }, + None => UncheckedExtrinsic { signature: None, function: xt.function }, } } @@ -637,7 +613,7 @@ impl Profile { block_construction: ExecutionStrategy::NativeElseWasm, offchain_worker: ExecutionStrategy::NativeElseWasm, other: ExecutionStrategy::NativeElseWasm, - } + }, } } } @@ -672,7 +648,7 @@ fn get_from_seed(seed: &str) -> ::Public fn get_account_id_from_seed(seed: &str) -> AccountId where - AccountPublic: From<::Public> + AccountPublic: From<::Public>, { AccountPublic::from(get_from_seed::(seed)).into_account() } @@ -680,25 +656,25 @@ where impl BenchContext { /// Import some block. pub fn import_block(&mut self, block: Block) { - let mut import_params = BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header.clone()); + let mut import_params = + BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header.clone()); import_params.body = Some(block.extrinsics().to_vec()); import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); assert_eq!(self.client.chain_info().best_number, 0); assert_eq!( - self.client.import_block(import_params, Default::default()) - .expect("Failed to import block"), - ImportResult::Imported( - ImportedAux { - header_only: false, - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - } + futures::executor::block_on( + self.client.import_block(import_params, Default::default()) ) + .expect("Failed to import block"), + ImportResult::Imported(ImportedAux { + header_only: false, + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + is_new_best: true, + }) ); assert_eq!(self.client.chain_info().best_number, 1); diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index f44747b26b7a6..8bd75834c5496 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,13 +18,13 @@ //! Utilities to build a `TestClient` for `node-runtime`. -use sp_runtime::BuildStorage; use sc_service::client; +use sp_runtime::BuildStorage; /// Re-export test-client utilities. pub use substrate_test_client::*; /// Call executor for `node-runtime` `TestClient`. -pub type Executor = sc_executor::NativeExecutor; +pub type ExecutorDispatch = sc_executor::NativeElseWasmExecutor; /// Default backend type. pub type Backend = sc_client_db::Backend; @@ -32,7 +32,7 @@ pub type Backend = sc_client_db::Backend; /// Test client type. pub type Client = client::Client< Backend, - client::LocalCallExecutor, + client::LocalCallExecutor, node_primitives::Block, node_runtime::RuntimeApi, >; @@ -61,13 +61,15 @@ pub trait TestClientBuilderExt: Sized { fn build(self) -> Client; } -impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< - node_primitives::Block, - client::LocalCallExecutor, - Backend, - GenesisParameters, -> { - fn new() -> Self{ +impl TestClientBuilderExt + for substrate_test_client::TestClientBuilder< + node_primitives::Block, + client::LocalCallExecutor, + Backend, + GenesisParameters, + > +{ + fn new() -> Self { Self::default() } @@ -75,5 +77,3 @@ impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< self.build_with_native_executor(None).0 } } - - diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 6fa178ba4bcdd..50c1e6f9d20be 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,14 +19,13 @@ //! Genesis Configuration. use crate::keyring::*; -use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use node_runtime::{ - GenesisConfig, BalancesConfig, SessionConfig, StakingConfig, SystemConfig, - GrandpaConfig, IndicesConfig, ContractsConfig, SocietyConfig, wasm_binary_unwrap, - AccountId, StakerStatus, + constants::currency::*, wasm_binary_unwrap, AccountId, BabeConfig, BalancesConfig, + GenesisConfig, GrandpaConfig, IndicesConfig, SessionConfig, SocietyConfig, StakerStatus, + StakingConfig, SystemConfig, BABE_GENESIS_EPOCH_CONFIG, }; -use node_runtime::constants::currency::*; use sp_core::ChangesTrieConfiguration; +use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use sp_runtime::Perbill; /// Create genesis runtime configuration for tests. @@ -41,7 +40,6 @@ pub fn config_endowed( code: Option<&[u8]>, extra_endowed: Vec, ) -> GenesisConfig { - let mut endowed = vec![ (alice(), 111 * DOLLARS), (bob(), 100 * DOLLARS), @@ -51,73 +49,56 @@ pub fn config_endowed( (ferdie(), 100 * DOLLARS), ]; - endowed.extend( - extra_endowed.into_iter().map(|endowed| (endowed, 100*DOLLARS)) - ); + endowed.extend(extra_endowed.into_iter().map(|endowed| (endowed, 100 * DOLLARS))); GenesisConfig { - frame_system: Some(SystemConfig { - changes_trie_config: if support_changes_trie { Some(ChangesTrieConfiguration { - digest_interval: 2, - digest_levels: 2, - }) } else { None }, + system: SystemConfig { + changes_trie_config: if support_changes_trie { + Some(ChangesTrieConfiguration { digest_interval: 2, digest_levels: 2 }) + } else { + None + }, code: code.map(|x| x.to_vec()).unwrap_or_else(|| wasm_binary_unwrap().to_vec()), - }), - pallet_indices: Some(IndicesConfig { - indices: vec![], - }), - pallet_balances: Some(BalancesConfig { - balances: endowed, - }), - pallet_session: Some(SessionConfig { + }, + indices: IndicesConfig { indices: vec![] }, + balances: BalancesConfig { balances: endowed }, + session: SessionConfig { keys: vec![ - (dave(), alice(), to_session_keys( - &Ed25519Keyring::Alice, - &Sr25519Keyring::Alice, - )), - (eve(), bob(), to_session_keys( - &Ed25519Keyring::Bob, - &Sr25519Keyring::Bob, - )), - (ferdie(), charlie(), to_session_keys( - &Ed25519Keyring::Charlie, - &Sr25519Keyring::Charlie, - )), - ] - }), - pallet_staking: Some(StakingConfig { + (dave(), alice(), to_session_keys(&Ed25519Keyring::Alice, &Sr25519Keyring::Alice)), + (eve(), bob(), to_session_keys(&Ed25519Keyring::Bob, &Sr25519Keyring::Bob)), + ( + ferdie(), + charlie(), + to_session_keys(&Ed25519Keyring::Charlie, &Sr25519Keyring::Charlie), + ), + ], + }, + staking: StakingConfig { stakers: vec![ (dave(), alice(), 111 * DOLLARS, StakerStatus::Validator), (eve(), bob(), 100 * DOLLARS, StakerStatus::Validator), - (ferdie(), charlie(), 100 * DOLLARS, StakerStatus::Validator) + (ferdie(), charlie(), 100 * DOLLARS, StakerStatus::Validator), ], validator_count: 3, minimum_validator_count: 0, slash_reward_fraction: Perbill::from_percent(10), invulnerables: vec![alice(), bob(), charlie()], - .. Default::default() - }), - pallet_contracts: Some(ContractsConfig { - current_schedule: Default::default(), - }), - pallet_babe: Some(Default::default()), - pallet_grandpa: Some(GrandpaConfig { - authorities: vec![], - }), - pallet_im_online: Some(Default::default()), - pallet_authority_discovery: Some(Default::default()), - pallet_democracy: Some(Default::default()), - pallet_collective_Instance1: Some(Default::default()), - pallet_collective_Instance2: Some(Default::default()), - pallet_membership_Instance1: Some(Default::default()), - pallet_elections_phragmen: Some(Default::default()), - pallet_sudo: Some(Default::default()), - pallet_treasury: Some(Default::default()), - pallet_society: Some(SocietyConfig { - members: vec![alice(), bob()], - pot: 0, - max_members: 999, - }), - pallet_vesting: Some(Default::default()), + ..Default::default() + }, + babe: BabeConfig { authorities: vec![], epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG) }, + grandpa: GrandpaConfig { authorities: vec![] }, + im_online: Default::default(), + authority_discovery: Default::default(), + democracy: Default::default(), + council: Default::default(), + technical_committee: Default::default(), + technical_membership: Default::default(), + elections: Default::default(), + sudo: Default::default(), + treasury: Default::default(), + society: SocietyConfig { members: vec![alice(), bob()], pot: 0, max_members: 999 }, + vesting: Default::default(), + gilt: Default::default(), + transaction_storage: Default::default(), } } diff --git a/bin/node/testing/src/keyring.rs b/bin/node/testing/src/keyring.rs index 3413748563633..4e2d88b4bba33 100644 --- a/bin/node/testing/src/keyring.rs +++ b/bin/node/testing/src/keyring.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,11 +18,11 @@ //! Test accounts. -use sp_keyring::{AccountKeyring, Sr25519Keyring, Ed25519Keyring}; +use codec::Encode; use node_primitives::{AccountId, Balance, Index}; -use node_runtime::{CheckedExtrinsic, UncheckedExtrinsic, SessionKeys, SignedExtra}; +use node_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; +use sp_keyring::{AccountKeyring, Ed25519Keyring, Sr25519Keyring}; use sp_runtime::generic::Era; -use codec::Encode; /// Alice's account id. pub fn alice() -> AccountId { @@ -81,26 +81,31 @@ pub fn signed_extra(nonce: Index, extra_fee: Balance) -> SignedExtra { } /// Sign given `CheckedExtrinsic`. -pub fn sign(xt: CheckedExtrinsic, spec_version: u32, tx_version: u32, genesis_hash: [u8; 32]) -> UncheckedExtrinsic { +pub fn sign( + xt: CheckedExtrinsic, + spec_version: u32, + tx_version: u32, + genesis_hash: [u8; 32], +) -> UncheckedExtrinsic { match xt.signed { Some((signed, extra)) => { - let payload = (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); + let payload = + (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); let key = AccountKeyring::from_account_id(&signed).unwrap(); - let signature = payload.using_encoded(|b| { - if b.len() > 256 { - key.sign(&sp_io::hashing::blake2_256(b)) - } else { - key.sign(b) - } - }).into(); + let signature = payload + .using_encoded(|b| { + if b.len() > 256 { + key.sign(&sp_io::hashing::blake2_256(b)) + } else { + key.sign(b) + } + }) + .into(); UncheckedExtrinsic { - signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), + signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), function: payload.0, } - } - None => UncheckedExtrinsic { - signature: None, - function: xt.function, }, + None => UncheckedExtrinsic { signature: None, function: xt.function }, } } diff --git a/bin/node/testing/src/lib.rs b/bin/node/testing/src/lib.rs index d682347e40019..a3392bcb29d5d 100644 --- a/bin/node/testing/src/lib.rs +++ b/bin/node/testing/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,7 +20,7 @@ #![warn(missing_docs)] +pub mod bench; pub mod client; pub mod genesis; pub mod keyring; -pub mod bench; diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index a57dadd26bda8..5bdf01badc3f4 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -8,16 +8,17 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" readme = "README.md" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } -sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } -node-cli = { version = "2.0.0", path = "../../node/cli" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } +sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } +sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } +node-cli = { version = "3.0.0-dev", path = "../../node/cli" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } rand = "0.7.2" structopt = "0.3.8" diff --git a/bin/utils/chain-spec-builder/build.rs b/bin/utils/chain-spec-builder/build.rs index 8d5aac1a08742..57424f016f3e5 100644 --- a/bin/utils/chain-spec-builder/build.rs +++ b/bin/utils/chain-spec-builder/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/main.rs index c2db944050eb4..bf5f1a149578e 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,19 +16,23 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{fs, path::{Path, PathBuf}, sync::Arc}; +use std::{ + fs, + path::{Path, PathBuf}, + sync::Arc, +}; use ansi_term::Style; -use rand::{Rng, distributions::Alphanumeric, rngs::OsRng}; +use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; use structopt::StructOpt; -use sc_keystore::LocalKeystore; use node_cli::chain_spec::{self, AccountId}; +use sc_keystore::LocalKeystore; use sp_core::{ - sr25519, crypto::{Public, Ss58Codec}, + sr25519, }; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; /// A utility to easily create a testnet chain spec definition with a given set /// of authorities and endowed accounts and/or generate random accounts. @@ -41,6 +45,10 @@ enum ChainSpecBuilder { /// Authority key seed. #[structopt(long, short, required = true)] authority_seeds: Vec, + /// Active nominators (SS58 format), each backing a random subset of the aforementioned + /// authorities. + #[structopt(long, short, default_value = "0")] + nominator_accounts: Vec, /// Endowed account address (SS58 format). #[structopt(long, short)] endowed_accounts: Vec, @@ -57,6 +65,11 @@ enum ChainSpecBuilder { /// The number of authorities. #[structopt(long, short)] authorities: usize, + /// The number of nominators backing the aforementioned authorities. + /// + /// Will nominate a random subset of `authorities`. + #[structopt(long, short, default_value = "0")] + nominators: usize, /// The number of endowed accounts. #[structopt(long, short, default_value = "0")] endowed: usize, @@ -77,16 +90,15 @@ impl ChainSpecBuilder { /// Returns the path where the chain spec should be saved. fn chain_spec_path(&self) -> &Path { match self { - ChainSpecBuilder::New { chain_spec_path, .. } => - chain_spec_path.as_path(), - ChainSpecBuilder::Generate { chain_spec_path, .. } => - chain_spec_path.as_path(), + ChainSpecBuilder::New { chain_spec_path, .. } => chain_spec_path.as_path(), + ChainSpecBuilder::Generate { chain_spec_path, .. } => chain_spec_path.as_path(), } } } fn genesis_constructor( authority_seeds: &[String], + nominator_accounts: &[AccountId], endowed_accounts: &[AccountId], sudo_account: &AccountId, ) -> chain_spec::GenesisConfig { @@ -96,38 +108,49 @@ fn genesis_constructor( .map(chain_spec::authority_keys_from_seed) .collect::>(); - let enable_println = true; - chain_spec::testnet_genesis( authorities, + nominator_accounts.to_vec(), sudo_account.clone(), Some(endowed_accounts.to_vec()), - enable_println, ) } fn generate_chain_spec( authority_seeds: Vec, + nominator_accounts: Vec, endowed_accounts: Vec, sudo_account: String, ) -> Result { - let parse_account = |address: &String| { - AccountId::from_string(address) + let parse_account = |address: String| { + AccountId::from_string(&address) .map_err(|err| format!("Failed to parse account address: {:?}", err)) }; + let nominator_accounts = nominator_accounts + .into_iter() + .map(parse_account) + .collect::, String>>()?; + let endowed_accounts = endowed_accounts - .iter() + .into_iter() .map(parse_account) .collect::, String>>()?; - let sudo_account = parse_account(&sudo_account)?; + let sudo_account = parse_account(sudo_account)?; let chain_spec = chain_spec::ChainSpec::from_genesis( "Custom", "custom", sc_chain_spec::ChainType::Live, - move || genesis_constructor(&authority_seeds, &endowed_accounts, &sudo_account), + move || { + genesis_constructor( + &authority_seeds, + &nominator_accounts, + &endowed_accounts, + &sudo_account, + ) + }, vec![], None, None, @@ -138,42 +161,26 @@ fn generate_chain_spec( chain_spec.as_json(false).map_err(|err| err) } -fn generate_authority_keys_and_store( - seeds: &[String], - keystore_path: &Path, -) -> Result<(), String> { +fn generate_authority_keys_and_store(seeds: &[String], keystore_path: &Path) -> Result<(), String> { for (n, seed) in seeds.into_iter().enumerate() { - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open( - keystore_path.join(format!("auth-{}", n)), - None, - ).map_err(|err| err.to_string())?); + let keystore: SyncCryptoStorePtr = Arc::new( + LocalKeystore::open(keystore_path.join(format!("auth-{}", n)), None) + .map_err(|err| err.to_string())?, + ); let (_, _, grandpa, babe, im_online, authority_discovery) = chain_spec::authority_keys_from_seed(seed); let insert_key = |key_type, public| { - SyncCryptoStore::insert_unknown( - &*keystore, - key_type, - &format!("//{}", seed), - public, - ).map_err(|_| format!("Failed to insert key: {}", grandpa)) + SyncCryptoStore::insert_unknown(&*keystore, key_type, &format!("//{}", seed), public) + .map_err(|_| format!("Failed to insert key: {}", grandpa)) }; - insert_key( - sp_core::crypto::key_types::BABE, - babe.as_slice(), - )?; + insert_key(sp_core::crypto::key_types::BABE, babe.as_slice())?; - insert_key( - sp_core::crypto::key_types::GRANDPA, - grandpa.as_slice(), - )?; + insert_key(sp_core::crypto::key_types::GRANDPA, grandpa.as_slice())?; - insert_key( - sp_core::crypto::key_types::IM_ONLINE, - im_online.as_slice(), - )?; + insert_key(sp_core::crypto::key_types::IM_ONLINE, im_online.as_slice())?; insert_key( sp_core::crypto::key_types::AUTHORITY_DISCOVERY, @@ -186,6 +193,7 @@ fn generate_authority_keys_and_store( fn print_seeds( authority_seeds: &[String], + nominator_seeds: &[String], endowed_seeds: &[String], sudo_seed: &str, ) { @@ -195,10 +203,13 @@ fn print_seeds( println!("{}", header.paint("Authority seeds")); for (n, seed) in authority_seeds.iter().enumerate() { - println!("{} //{}", - entry.paint(format!("auth-{}:", n)), - seed, - ); + println!("{} //{}", entry.paint(format!("auth-{}:", n)), seed); + } + + println!("{}", header.paint("Nominator seeds")); + + for (n, seed) in nominator_seeds.iter().enumerate() { + println!("{} //{}", entry.paint(format!("nom-{}:", n)), seed); } println!(); @@ -206,10 +217,7 @@ fn print_seeds( if !endowed_seeds.is_empty() { println!("{}", header.paint("Endowed seeds")); for (n, seed) in endowed_seeds.iter().enumerate() { - println!("{} //{}", - entry.paint(format!("endowed-{}:", n)), - seed, - ); + println!("{} //{}", entry.paint(format!("endowed-{}:", n)), seed); } println!(); @@ -220,62 +228,62 @@ fn print_seeds( } fn main() -> Result<(), String> { - #[cfg(build_type="debug")] + #[cfg(build_type = "debug")] println!( - "The chain spec builder builds a chain specification that includes a Substrate runtime compiled as WASM. To \ - ensure proper functioning of the included runtime compile (or run) the chain spec builder binary in \ - `--release` mode.\n", + "The chain spec builder builds a chain specification that includes a Substrate runtime \ + compiled as WASM. To ensure proper functioning of the included runtime compile (or run) \ + the chain spec builder binary in `--release` mode.\n", ); let builder = ChainSpecBuilder::from_args(); let chain_spec_path = builder.chain_spec_path().to_path_buf(); - let (authority_seeds, endowed_accounts, sudo_account) = match builder { - ChainSpecBuilder::Generate { authorities, endowed, keystore_path, .. } => { + let (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) = match builder { + ChainSpecBuilder::Generate { authorities, nominators, endowed, keystore_path, .. } => { let authorities = authorities.max(1); - let rand_str = || -> String { - OsRng.sample_iter(&Alphanumeric) - .take(32) - .collect() - }; + let rand_str = || -> String { OsRng.sample_iter(&Alphanumeric).take(32).collect() }; let authority_seeds = (0..authorities).map(|_| rand_str()).collect::>(); + let nominator_seeds = (0..nominators).map(|_| rand_str()).collect::>(); let endowed_seeds = (0..endowed).map(|_| rand_str()).collect::>(); let sudo_seed = rand_str(); - print_seeds( - &authority_seeds, - &endowed_seeds, - &sudo_seed, - ); + print_seeds(&authority_seeds, &nominator_seeds, &endowed_seeds, &sudo_seed); if let Some(keystore_path) = keystore_path { - generate_authority_keys_and_store( - &authority_seeds, - &keystore_path, - )?; + generate_authority_keys_and_store(&authority_seeds, &keystore_path)?; } - let endowed_accounts = endowed_seeds.iter().map(|seed| { - chain_spec::get_account_id_from_seed::(seed) - .to_ss58check() - }).collect(); + let nominator_accounts = nominator_seeds + .into_iter() + .map(|seed| { + chain_spec::get_account_id_from_seed::(&seed).to_ss58check() + }) + .collect(); - let sudo_account = chain_spec::get_account_id_from_seed::(&sudo_seed) - .to_ss58check(); + let endowed_accounts = endowed_seeds + .into_iter() + .map(|seed| { + chain_spec::get_account_id_from_seed::(&seed).to_ss58check() + }) + .collect(); - (authority_seeds, endowed_accounts, sudo_account) - }, - ChainSpecBuilder::New { authority_seeds, endowed_accounts, sudo_account, .. } => { - (authority_seeds, endowed_accounts, sudo_account) + let sudo_account = + chain_spec::get_account_id_from_seed::(&sudo_seed).to_ss58check(); + + (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) }, + ChainSpecBuilder::New { + authority_seeds, + nominator_accounts, + endowed_accounts, + sudo_account, + .. + } => (authority_seeds, nominator_accounts, endowed_accounts, sudo_account), }; - let json = generate_chain_spec( - authority_seeds, - endowed_accounts, - sudo_account, - )?; + let json = + generate_chain_spec(authority_seeds, nominator_accounts, endowed_accounts, sudo_account)?; fs::write(chain_spec_path, json).map_err(|err| err.to_string()) } diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index fa0b345bc8406..9bd38a21a664b 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -1,28 +1,21 @@ [package] name = "subkey" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" readme = "README.md" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] [[bin]] path = "src/main.rs" name = "subkey" -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - [dependencies] -node-runtime = { version = "2.0.0", path = "../../node/runtime" } -node-primitives = { version = "2.0.0", path = "../../node/primitives" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -substrate-frame-cli = { version = "2.0.0", path = "../../../utils/frame/frame-utilities-cli" } +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } structopt = "0.3.14" -frame-system = { version = "2.0.0", path = "../../../frame/system" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } - -[features] -bench = [] diff --git a/bin/utils/subkey/README.adoc b/bin/utils/subkey/README.adoc deleted file mode 100644 index 5ce0d2d324470..0000000000000 --- a/bin/utils/subkey/README.adoc +++ /dev/null @@ -1,83 +0,0 @@ -= Subkey - -Subkey is a commandline utility included with Substrate that generates or restores Substrate keys. - -`subkey` will use the http://wiki.polkadot.network/en/latest/polkadot/learn/cryptography/#keypairs-and-signing[sr25519] cryptography by default. If you need to use the older ed25519 cryptography to generate or restore your key pass the `--ed25519` flag to any of the commands. - -== Usage - -=== Generate a random account - -```bash -subkey generate -``` - -Will output a mnemonic phrase and give you the seed, public key, and address of a new account. DO NOT SHARE your mnemonic or seed with ANYONE it will give them access to your funds. If someone is making a transfer to you they will only need your **Address**. - -=== Inspecting a key - -You can inspect a given URI (mnemonic, seed, public key, or address) and recover the public key and the address. - -```bash -subkey inspect - -OUTPUT: - Public key (hex): 0x461edcf1ba99e43f50dec4bdeb3d1a2cf521ad7c3cd0eeee5cd3314e50fd424c - Address (SS58): 5DeeNqcAcaHDSed2HYnqMDK7JHcvxZ5QUE9EKmjc5snvU6wF -``` - -=== Signing - -`subkey` expects a message to come in on STDIN, one way to sign a message would look like this: - -```bash -echo -n | subkey sign --suri - -OUTPUT: -a69da4a6ccbf81dbbbfad235fa12cf8528c18012b991ae89214de8d20d29c1280576ced6eb38b7406d1b7e03231df6dd4a5257546ddad13259356e1c3adfb509 -``` - -=== Verifying a signature - -```bash -echo -n | subkey verify
- -OUTPUT: -Signature verifies correctly. -``` - -=== Using the vanity generator - -You can use the included vanity generator to find a seed that provides an address which includes the desired pattern. Be warned, depending on your hardware this may take a while. - -```bash -subkey vanity 1337 -``` - -=== Signing a transaction - -Sign a transaction from an encoded `Call`. - -```bash -subkey sign-transaction \ - --call \ - --nonce 0 \ - --suri \ - --password \ - --prior-block-hash -``` - -Will output a signed and encoded `UncheckedMortalCompactExtrinsic` as hex. - -=== Inspecting a module ID - -```bash -subkey module-id "py/trsry" --network kusama - -OUTPUT: -Public Key URI `F3opxRbN5ZbjJNU511Kj2TLuzFcDq9BGduA9TgiECafpg29` is account: - Network ID/version: kusama - Public key (hex): 0x6d6f646c70792f74727372790000000000000000000000000000000000000000 - Account ID: 0x6d6f646c70792f74727372790000000000000000000000000000000000000000 - SS58 Address: F3opxRbN5ZbjJNU511Kj2TLuzFcDq9BGduA9TgiECafpg29 -``` \ No newline at end of file diff --git a/bin/utils/subkey/README.md b/bin/utils/subkey/README.md index 3e9ac0bddbdc1..fbb486247a770 100644 --- a/bin/utils/subkey/README.md +++ b/bin/utils/subkey/README.md @@ -1 +1,208 @@ -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +# Subkey + +Subkey is a commandline utility included with Substrate. It allows generating and restoring keys for Substrate based chains such as Polkadot, Kusama and a growing number of parachains and Substrate based projects. + +`subkey` provides a few sub-commands to generate keys, check keys, sign messages, verify messages, etc... + +You can see the full list of commands with `subkey --help`. Most commands have additional help available with for instance `subkey generate --help` for the `generate` command. + +## Satefy first + +`subkey` does not need an internet connection to work. Indeed, for the best security, you should be using `subkey` on a machine that is **not connected** to the internet. + +`subkey` deals with **seeds** and **private keys**. Make sure to use `subkey` in a safe environment (ie. no one looking over your shoulder) and on a safe computer (ie. no one able to check you commands history). + +If you save any output of `subkey` into a file, make sure to apply proper permissions and/or delete the file as soon as possible. + +## Usage + +The following guide explains *some* of the `subkey` commands. For the full list and the most up to date documentation, make sure to check the integrated help with `subkey --help`. + +### Generate a random account + +Generating a new key is as simple as running: + + subkey generate + +The output looks similar to: + +``` +Secret phrase `hotel forest jar hover kite book view eight stuff angle legend defense` is account: + Secret seed: 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d + Public key (hex): 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 + Account ID: 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 + SS58 Address: 5Hpm9fq3W3dQgwWpAwDS2ZHKAdnk86QRCu7iX4GnmDxycrte +``` + +--- +☠️ DO NT RE-USE ANY OF THE SEEDS AND SECRETS FROM THIS PAGE ☠️. + +You can read more about security and risks in [SECURITY.md](./SECURITY.md) and in the [Polkadot Wiki](https://wiki.polkadot.network/docs/learn-account-generation). + +--- + +The output above shows a **secret phrase** (also called **mnemonic phrase**) and the **secret seed** (also called **Private Key**). Those 2 secrets are the pieces of information you MUST keep safe and secret. All the other information below can be derived from those secrets. + +The output above also show the **public key** and the **Account ID**. Those are the independant from the network where you will use the key. + +The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public keys of an account for a given network (for instance Kusama or Polkadot). + +You can read more about the SS58 format in the [substrate wiki](https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58)) and see the list of reserved prefixes in the [Polkadot wiki](https://wiki.polkadot.network/docs/build-ss58-registry). + +For instance, considering the previous seed `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` the SS58 addresses are: +- Polkadot: `16m4J167Mptt8UXL8aGSAi7U2FnPpPxZHPrCgMG9KJzVoFqM` +- Kusama: `JLNozAv8QeLSbLFwe2UvWeKKE4yvmDbfGxTuiYkF2BUMx4M` + +### Json output + +`subkey` can calso generate the output as *json*. This is useful for automation. + +command: +``` +subkey generate --output-type json +``` + +output: +``` +{ + "accountId": "0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515", + "publicKey": "0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515", + "secretPhrase": "hotel forest jar hover kite book view eight stuff angle legend defense", + "secretSeed": "0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d", + "ss58Address": "5Hpm9fq3W3dQgwWpAwDS2ZHKAdnk86QRCu7iX4GnmDxycrte" +} +``` + +So if you only want to get the `secretSeed` for instance, you can use: + +command: +``` +subkey generate --output-type json | jq -r .secretSeed +``` + +output: +``` +0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d +``` + +### Additional user-defined password + +`subkey` supports an additional user-defined secret that will be appended to the seed. Let's see the following example: + + subkey generate --password extra_secret + +output: +``` +Secret phrase `soup lyrics media market way crouch elevator put moon useful question wide` is account: + Secret seed: 0xe7cfd179d6537a676cb94bac3b5c5c9cb1550e846ac4541040d077dfbac2e7fd + Public key (hex): 0xf6a233c3e1de1a2ae0486100b460b3ce3d7231ddfe9dadabbd35ab968c70905d + Account ID: 0xf6a233c3e1de1a2ae0486100b460b3ce3d7231ddfe9dadabbd35ab968c70905d + SS58 Address: 5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC +``` + +Using the `inspect` command (see more details below), we see that knowning only the **secret seed** is no longer sufficient to recover the account: + + subkey inspect "soup lyrics media market way crouch elevator put moon useful question wide" + +which recovers the account `5Fe4sqj2K4fRuzEGvToi4KATqZfiDU7TqynjXG6PZE2dxwyh` and not `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC` as we expected. The additional user-defined **password** (`extra_secret` in our example) is now required to fully recover the account. Let's inspect the the previous mnemonic, this time passing also the required `password` as shown below: + + subkey inspect --password extra_secret "soup lyrics media market way crouch elevator put moon useful question wide" + +This time, we properly recovered `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC`. + +### Inspecting a key + +If you have *some data* about a key, `subkey inpsect` will help you discover more information about it. + +If you have **secrets** that you would like to verify for instance, you can use: + + subkey inspect < mnemonic | seed > + +If you have only **public data**, you can see a subset of the information: + + subkey inspect --public < pubkey | address > + +**NOTE**: While you will be able to recover the secret seed from the mnemonic, the opposite is not possible. + +**NOTE**: For obvious reasons, the **secrets** cannot be recovered from passing **public data** such as `pubkey` or `address` as input. + +command: +``` +subkey inspect 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d +``` + +output: +``` +Secret Key URI `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` is account: + Secret seed: 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d + Public key (hex): 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 + Account ID: 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 + SS58 Address: 5Hpm9fq3W3dQgwWpAwDS2ZHKAdnk86QRCu7iX4GnmDxycrte +``` + +### Signing + +`subkey` allows using a **secret key** to sign a random message. The signature can then be verified by anyone using your **public key**: + + echo -n | subkey sign --suri + +example: + + MESSAGE=hello + SURI=0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d + echo -n $MESSAGE | subkey sign --suri $SURI + +output: + + 9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c + +**NOTE**: Each run of the `sign` command will yield a different output. While each signature is different, they are all valid. + +### Verifying a signature + +Given a message, a signature and an address, `subkey` can verify whether the **message** has been digitally signed by the holder (or one of the holders) of the **private key** for the given **address**: + + echo -n | subkey verify
+ +example: + + MESSAGE=hello + URI=0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 + SIGNATURE=9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c + echo -n $MESSAGE | subkey verify $SIGNATURE $URI + +output: + + Signature verifies correctly. + +A failure looks like: + + Error: SignatureInvalid + +### Using the vanity generator + +You can use the included vanity generator to find a seed that provides an address which includes the desired pattern. Be warned, depending on your hardware this may take a while. + +command: +``` +subkey vanity --network polkadot --pattern bob +``` + +output: +``` +Generating key containing pattern 'bob' +best: 190 == top: 189 +Secret Key URI `0x8c9a73097f235b84021a446bc2826a00c690ea0be3e0d81a84931cb4146d6691` is account: + Secret seed: 0x8c9a73097f235b84021a446bc2826a00c690ea0be3e0d81a84931cb4146d6691 + Public key (hex): 0x1a8b32e95c1f571118ea0b84801264c3c70f823e320d099e5de31b9b1f18f843 + Account ID: 0x1a8b32e95c1f571118ea0b84801264c3c70f823e320d099e5de31b9b1f18f843 + SS58 Address: 1bobYxBPjZWRPbVo35aSwci1u5Zmq8P6J2jpa4kkudBZMqE +``` + +`Bob` now got a nice address starting with his name: 1**bob**YxBPjZWRPbVo35aSwci1u5Zmq8P6J2jpa4kkudBZMqE. + +**Note**: While `Bob`, having a short name (3 chars), got a result rather quickly, it will take much longer for `Alice` who has a much longer name, thus the chances to generate a random address that contains the chain `alice` will be much smaller. + +## License + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/bin/utils/subkey/SECURITY.md b/bin/utils/subkey/SECURITY.md new file mode 100644 index 0000000000000..672d2965c7eae --- /dev/null +++ b/bin/utils/subkey/SECURITY.md @@ -0,0 +1,25 @@ +# Keys and Security + +The following information is not exhaustive but meant to prevent the most common mistakes. +You can read more about security and risks in the [Polkadot Wiki](https://wiki.polkadot.network/docs/learn-account-generation). +The Polkadot network has a few **test networks**, e.g. **Westend**. Test networks are a great way to experiment and learn safely as you can lose tokens on those networks without any financial consequences. + +`subkey` generates and provides 2 pieces of **secret** information: +- **secret phrase**: a bunch of words, exactly 12 by default (can be 12, 15, 18, 21 or 24) +- **secret seed**: a big hexadecimal value + +There are 2 risks related to private keys: +- loss of keys: this can happen if you don't have a proper backup +- leak of the keys: this can unfortunately happen in many ways, including malware, phishing, key logger, backups on system that are online and not properly secured + +You want to ensure that: +- you **do not lose** those secrets +- **no one but you can access** those secrets + +☠️ **DO NOT SHARE** your mnemonic phrase or secret seed with ANYONE under **ANY** circumstances. Doing so would give them access to your funds and to send transactions on your behalf. + +☠️ If someone is asking for your **secret** phrase or **secret** seed, you can be **SURE** they are attempting to steal your funds. + +✅ It is however fine to share your **SS58 Address** as this is meant to be public information and is needed by anyone you want to be able to make transfer to or otherwise interact with your account. They will only ever need your **Public Address**. + +⚠️ While using the same key on multiple networks is possible, it is usually **not** recommended unless you have good motivations for doing so and understand the associated risks and drawbacks. diff --git a/bin/utils/subkey/src/lib.rs b/bin/utils/subkey/src/lib.rs index 051628e84a193..5052d1b104c2c 100644 --- a/bin/utils/subkey/src/lib.rs +++ b/bin/utils/subkey/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,19 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use structopt::StructOpt; use sc_cli::{ - Error, VanityCmd, SignCmd, VerifyCmd, InsertCmd, - GenerateNodeKeyCmd, GenerateCmd, InspectKeyCmd, InspectNodeKeyCmd + Error, GenerateCmd, GenerateNodeKeyCmd, InspectKeyCmd, InspectNodeKeyCmd, SignCmd, VanityCmd, + VerifyCmd, }; -use substrate_frame_cli::ModuleIdCmd; -use sp_core::crypto::Ss58Codec; +use structopt::StructOpt; #[derive(Debug, StructOpt)] #[structopt( name = "subkey", author = "Parity Team ", - about = "Utility for generating and restoring with Substrate keys", + about = "Utility for generating and restoring with Substrate keys" )] pub enum Subkey { /// Generate a random node libp2p key, save it to file or print it to stdout @@ -44,12 +42,6 @@ pub enum Subkey { /// Print the peer ID corresponding to the node key in the given file InspectNodeKey(InspectNodeKeyCmd), - /// Insert a key to the keystore of a node. - Insert(InsertCmd), - - /// Inspect a module ID address - ModuleId(ModuleIdCmd), - /// Sign a message, with a given (secret) key. Sign(SignCmd), @@ -60,23 +52,15 @@ pub enum Subkey { Verify(VerifyCmd), } -/// Run the subkey command, given the apropriate runtime. -pub fn run() -> Result<(), Error> - where - R: frame_system::Trait, - R::AccountId: Ss58Codec -{ +/// Run the subkey command, given the appropriate runtime. +pub fn run() -> Result<(), Error> { match Subkey::from_args() { - Subkey::GenerateNodeKey(cmd) => cmd.run()?, - Subkey::Generate(cmd) => cmd.run()?, - Subkey::Inspect(cmd) => cmd.run()?, - Subkey::InspectNodeKey(cmd) => cmd.run()?, - Subkey::Insert(cmd) => cmd.run()?, - Subkey::ModuleId(cmd) => cmd.run::()?, - Subkey::Vanity(cmd) => cmd.run()?, - Subkey::Verify(cmd) => cmd.run()?, - Subkey::Sign(cmd) => cmd.run()?, - }; - - Ok(()) + Subkey::GenerateNodeKey(cmd) => cmd.run(), + Subkey::Generate(cmd) => cmd.run(), + Subkey::Inspect(cmd) => cmd.run(), + Subkey::InspectNodeKey(cmd) => cmd.run(), + Subkey::Vanity(cmd) => cmd.run(), + Subkey::Verify(cmd) => cmd.run(), + Subkey::Sign(cmd) => cmd.run(), + } } diff --git a/bin/utils/subkey/src/main.rs b/bin/utils/subkey/src/main.rs index dd14425130b7d..2a0f0850713fa 100644 --- a/bin/utils/subkey/src/main.rs +++ b/bin/utils/subkey/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,8 +18,6 @@ //! Subkey utility, based on node_runtime. -use node_runtime::Runtime; - fn main() -> Result<(), sc_cli::Error> { - subkey::run::() + subkey::run() } diff --git a/client/allocator/Cargo.toml b/client/allocator/Cargo.toml new file mode 100644 index 0000000000000..5ebab6cf9d61a --- /dev/null +++ b/client/allocator/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "sc-allocator" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Collection of allocator implementations." +documentation = "https://docs.rs/sc-allocator" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-wasm-interface = { version = "4.0.0-dev", path = "../../primitives/wasm-interface" } +log = "0.4.11" +thiserror = "1.0.21" diff --git a/client/allocator/README.md b/client/allocator/README.md new file mode 100644 index 0000000000000..b89348b4c6950 --- /dev/null +++ b/client/allocator/README.md @@ -0,0 +1,6 @@ +Collection of allocator implementations. + +This crate provides the following allocator implementations: +- A freeing-bump allocator: [`FreeingBumpHeapAllocator`](https://docs.rs/sc-allocator/latest/sc_allocator/struct.FreeingBumpHeapAllocator.html) + +License: Apache-2.0 \ No newline at end of file diff --git a/primitives/allocator/src/error.rs b/client/allocator/src/error.rs similarity index 64% rename from primitives/allocator/src/error.rs rename to client/allocator/src/error.rs index 7b634af4d5b29..9b9a55325f758 100644 --- a/primitives/allocator/src/error.rs +++ b/client/allocator/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,24 +16,18 @@ // limitations under the License. /// The error type used by the allocators. -#[derive(sp_core::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(derive_more::Display))] +#[derive(thiserror::Error, Debug)] pub enum Error { /// Someone tried to allocate more memory than the allowed maximum per allocation. - #[cfg_attr(feature = "std", display(fmt="Requested allocation size is too large"))] + #[error("Requested allocation size is too large")] RequestedAllocationTooLarge, /// Allocator run out of space. - #[cfg_attr(feature = "std", display(fmt="Allocator ran out of space"))] + #[error("Allocator ran out of space")] AllocatorOutOfSpace, + /// The client passed a memory instance which is smaller than previously observed. + #[error("Shrinking of the underlying memory is observed")] + MemoryShrinked, /// Some other error occurred. - Other(&'static str) -} - -#[cfg(feature = "std")] -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - _ => None, - } - } + #[error("Other: {0}")] + Other(&'static str), } diff --git a/primitives/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs similarity index 80% rename from primitives/allocator/src/freeing_bump.rs rename to client/allocator/src/freeing_bump.rs index 19d7866e1b53b..741f4012cdcbe 100644 --- a/primitives/allocator/src/freeing_bump.rs +++ b/client/allocator/src/freeing_bump.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,18 +36,45 @@ //! //! For implementing freeing we maintain a linked lists for each order. The maximum supported //! allocation size is capped, therefore the number of orders and thus the linked lists is as well -//! limited. Currently, the maximum size of an allocation is 16 MiB. +//! limited. Currently, the maximum size of an allocation is 32 MiB. //! -//! When the allocator serves an allocation request it first checks the linked list for the respective -//! order. If it doesn't have any free chunks, the allocator requests memory from the bump allocator. -//! In any case the order is stored in the header of the allocation. +//! When the allocator serves an allocation request it first checks the linked list for the +//! respective order. If it doesn't have any free chunks, the allocator requests memory from the +//! bump allocator. In any case the order is stored in the header of the allocation. //! //! Upon deallocation we get the order of the allocation from its header and then add that //! allocation to the linked list for the respective order. +//! +//! # Caveats +//! +//! This is a fast allocator but it is also dumb. There are specifically two main shortcomings +//! that the user should keep in mind: +//! +//! - Once the bump allocator space is exhausted, there is no way to reclaim the memory. This means +//! that it's possible to end up in a situation where there are no live allocations yet a new +//! allocation will fail. +//! +//! Let's look into an example. Given a heap of 32 MiB. The user makes a 32 MiB allocation that we +//! call `X` . Now the heap is full. Then user deallocates `X`. Since all the space in the bump +//! allocator was consumed by the 32 MiB allocation, allocations of all sizes except 32 MiB will +//! fail. +//! +//! - Sizes of allocations are rounded up to the nearest order. That is, an allocation of 2,00001 +//! MiB will be put into the bucket of 4 MiB. Therefore, any allocation of size `(N, 2N]` will +//! take up to `2N`, thus assuming a uniform distribution of allocation sizes, the average amount +//! in use of a `2N` space on the heap will be `(3N + ε) / 2`. So average utilization is going to +//! be around 75% (`(3N + ε) / 2 / 2N`) meaning that around 25% of the space in allocation will be +//! wasted. This is more pronounced (in terms of absolute heap amounts) with larger allocation +//! sizes. use crate::Error; -use sp_std::{mem, convert::{TryFrom, TryInto}, ops::{Range, Index, IndexMut}}; +pub use sp_core::MAX_POSSIBLE_ALLOCATION; use sp_wasm_interface::{Pointer, WordSize}; +use std::{ + convert::{TryFrom, TryInto}, + mem, + ops::{Index, IndexMut, Range}, +}; /// The minimal alignment guaranteed by this allocator. /// @@ -64,29 +91,18 @@ fn error(msg: &'static str) -> Error { Error::Other(msg) } -/// A custom "trace" implementation that is only activated when `feature = std`. -/// -/// Uses `wasm-heap` as default target. -macro_rules! trace { - ( $( $args:expr ),+ ) => { - sp_std::if_std! { - log::trace!(target: "wasm-heap", $( $args ),+); - } - } -} +const LOG_TARGET: &'static str = "wasm-heap"; // The minimum possible allocation size is chosen to be 8 bytes because in that case we would have // easier time to provide the guaranteed alignment of 8. // -// The maximum possible allocation size was chosen rather arbitrary. 16 MiB should be enough for -// everybody. +// The maximum possible allocation size is set in the primitives to 32MiB. // // N_ORDERS - represents the number of orders supported. // // This number corresponds to the number of powers between the minimum possible allocation and -// maximum possible allocation, or: 2^3...2^24 (both ends inclusive, hence 22). -const N_ORDERS: usize = 22; -const MAX_POSSIBLE_ALLOCATION: u32 = 16777216; // 2^24 bytes, 16 MiB +// maximum possible allocation, or: 2^3...2^25 (both ends inclusive, hence 23). +const N_ORDERS: usize = 23; const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes /// The exponent for the power of two sized block adjusted to the minimum size. @@ -100,6 +116,7 @@ const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes /// 64 | 3 /// ... /// 16777216 | 21 +/// 33554432 | 22 /// /// and so on. #[derive(Copy, Clone, PartialEq, Eq, Debug)] @@ -124,7 +141,8 @@ impl Order { /// `MIN_POSSIBLE_ALLOCATION <= size <= MAX_POSSIBLE_ALLOCATION` fn from_size(size: u32) -> Result { let clamped_size = if size > MAX_POSSIBLE_ALLOCATION { - return Err(Error::RequestedAllocationTooLarge); + log::warn!(target: LOG_TARGET, "going to fail due to allocating {:?}", size); + return Err(Error::RequestedAllocationTooLarge) } else if size < MIN_POSSIBLE_ALLOCATION { MIN_POSSIBLE_ALLOCATION } else { @@ -157,7 +175,7 @@ impl Order { } /// A special magic value for a pointer in a link that denotes the end of the linked list. -const NIL_MARKER: u32 = u32::max_value(); +const NIL_MARKER: u32 = u32::MAX; /// A link between headers in the free list. #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -199,9 +217,8 @@ impl Link { /// | 0 | next element link | /// +--------------+-------------------+ /// ``` -/// +/// /// ## Occupied header -/// /// ```ignore /// 64 32 0 // +--------------+-------------------+ @@ -275,9 +292,7 @@ struct FreeLists { impl FreeLists { /// Creates the free empty lists. fn new() -> Self { - Self { - heads: [Link::Nil; N_ORDERS] - } + Self { heads: [Link::Nil; N_ORDERS] } } /// Replaces a given link for the specified order and returns the old one. @@ -309,6 +324,20 @@ pub struct FreeingBumpHeapAllocator { free_lists: FreeLists, total_size: u32, poisoned: bool, + max_total_size: u32, + max_bumper: u32, + last_observed_memory_size: u32, +} + +impl Drop for FreeingBumpHeapAllocator { + fn drop(&mut self) { + log::debug!( + target: LOG_TARGET, + "allocator being destroyed, max_total_size {}, max_bumper {}", + self.max_total_size, + self.max_bumper, + ) + } } impl FreeingBumpHeapAllocator { @@ -325,15 +354,21 @@ impl FreeingBumpHeapAllocator { free_lists: FreeLists::new(), total_size: 0, poisoned: false, + max_total_size: 0, + max_bumper: aligned_heap_base, + last_observed_memory_size: 0, } } /// Gets requested number of bytes to allocate and returns a pointer. - /// The maximum size which can be allocated at once is 16 MiB. + /// The maximum size which can be allocated at once is 32 MiB. /// There is no minimum size, but whatever size is passed into /// this function is rounded to the next power of two. If the requested /// size is below 8 bytes it will be rounded up to 8 bytes. /// + /// The identity or the type of the passed memory object does not matter. However, the size of + /// memory cannot shrink compared to the memory passed in previous invocations. + /// /// NOTE: Once the allocator has returned an error all subsequent requests will return an error. /// /// # Arguments @@ -350,6 +385,8 @@ impl FreeingBumpHeapAllocator { } let bomb = PoisonBomb { poisoned: &mut self.poisoned }; + + Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?; let order = Order::from_size(size)?; let header_ptr: u32 = match self.free_lists[order] { @@ -367,22 +404,32 @@ impl FreeingBumpHeapAllocator { self.free_lists[order] = next_free; header_ptr - } + }, Link::Nil => { // Corresponding free list is empty. Allocate a new item. - Self::bump( - &mut self.bumper, - order.size() + HEADER_SIZE, - mem.size(), - )? - } + Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem.size())? + }, }; // Write the order in the occupied header. Header::Occupied(order).write_into(mem, header_ptr)?; self.total_size += order.size() + HEADER_SIZE; - trace!("Heap size is {} bytes after allocation", self.total_size); + + log::trace!( + target: LOG_TARGET, + "after allocation, total_size = {}, bumper = {}.", + self.total_size, + self.bumper, + ); + + // update trackers if needed. + if self.total_size > self.max_total_size { + self.max_total_size = self.total_size; + } + if self.bumper > self.max_bumper { + self.max_bumper = self.bumper; + } bomb.disarm(); Ok(Pointer::new(header_ptr + HEADER_SIZE)) @@ -390,19 +437,28 @@ impl FreeingBumpHeapAllocator { /// Deallocates the space which was allocated for a pointer. /// + /// The identity or the type of the passed memory object does not matter. However, the size of + /// memory cannot shrink compared to the memory passed in previous invocations. + /// /// NOTE: Once the allocator has returned an error all subsequent requests will return an error. /// /// # Arguments /// /// - `mem` - a slice representing the linear memory on which this allocator operates. /// - `ptr` - pointer to the allocated chunk - pub fn deallocate(&mut self, mem: &mut M, ptr: Pointer) -> Result<(), Error> { + pub fn deallocate( + &mut self, + mem: &mut M, + ptr: Pointer, + ) -> Result<(), Error> { if self.poisoned { return Err(error("the allocator has been poisoned")) } let bomb = PoisonBomb { poisoned: &mut self.poisoned }; + Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?; + let header_ptr = u32::from(ptr) .checked_sub(HEADER_SIZE) .ok_or_else(|| error("Invalid pointer for deallocation"))?; @@ -420,7 +476,11 @@ impl FreeingBumpHeapAllocator { .total_size .checked_sub(order.size() + HEADER_SIZE) .ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?; - trace!("Heap size is {} bytes after deallocation", self.total_size); + log::trace!( + "after deallocation, total_size = {}, bumper = {}.", + self.total_size, + self.bumper, + ); bomb.disarm(); Ok(()) @@ -428,18 +488,34 @@ impl FreeingBumpHeapAllocator { /// Increases the `bumper` by `size`. /// - /// Returns the `bumper` from before the increase. - /// Returns an `Error::AllocatorOutOfSpace` if the operation - /// would exhaust the heap. + /// Returns the `bumper` from before the increase. Returns an `Error::AllocatorOutOfSpace` if + /// the operation would exhaust the heap. fn bump(bumper: &mut u32, size: u32, heap_end: u32) -> Result { if *bumper + size > heap_end { - return Err(Error::AllocatorOutOfSpace); + log::error!( + target: LOG_TARGET, + "running out of space with current bumper {}, mem size {}", + bumper, + heap_end + ); + return Err(Error::AllocatorOutOfSpace) } let res = *bumper; *bumper += size; Ok(res) } + + fn observe_memory_size( + last_observed_memory_size: &mut u32, + mem: &mut M, + ) -> Result<(), Error> { + if mem.size() < *last_observed_memory_size { + return Err(Error::MemoryShrinked) + } + *last_observed_memory_size = mem.size(); + Ok(()) + } } /// A trait for abstraction of accesses to a wasm linear memory. Used to read or modify the @@ -473,7 +549,7 @@ impl Memory for [u8] { let range = heap_range(ptr, 8, self.len()).ok_or_else(|| error("write out of heap bounds"))?; let bytes = val.to_le_bytes(); - &mut self[range].copy_from_slice(&bytes[..]); + self[range].copy_from_slice(&bytes[..]); Ok(()) } fn size(&self) -> u32 { @@ -813,7 +889,7 @@ mod tests { #[test] fn should_get_max_item_size_from_index() { // given - let raw_order = 21; + let raw_order = 22; // when let item_size = Order::from_raw(raw_order).unwrap().size(); @@ -868,4 +944,48 @@ mod tests { assert!(heap.poisoned); assert!(heap.deallocate(mem.as_mut(), alloc_ptr).is_err()); } + + #[test] + fn test_n_orders() { + // Test that N_ORDERS is consistent with min and max possible allocation. + assert_eq!( + MIN_POSSIBLE_ALLOCATION * 2u32.pow(N_ORDERS as u32 - 1), + MAX_POSSIBLE_ALLOCATION + ); + } + + #[test] + fn accepts_growing_memory() { + const ITEM_SIZE: u32 = 16; + const ITEM_ON_HEAP_SIZE: usize = 16 + HEADER_SIZE as usize; + + let mut mem = vec![0u8; ITEM_ON_HEAP_SIZE * 2]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap(); + let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap(); + + mem.extend_from_slice(&[0u8; ITEM_ON_HEAP_SIZE]); + + let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap(); + } + + #[test] + fn doesnt_accept_shrinking_memory() { + const ITEM_SIZE: u32 = 16; + const ITEM_ON_HEAP_SIZE: usize = 16 + HEADER_SIZE as usize; + + let initial_size = ITEM_ON_HEAP_SIZE * 3; + let mut mem = vec![0u8; initial_size]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + let _ = heap.allocate(&mut mem[..], ITEM_SIZE).unwrap(); + + mem.truncate(initial_size - 1); + + match heap.allocate(&mut mem[..], ITEM_SIZE).unwrap_err() { + Error::MemoryShrinked => (), + _ => panic!(), + } + } } diff --git a/primitives/allocator/src/lib.rs b/client/allocator/src/lib.rs similarity index 91% rename from primitives/allocator/src/lib.rs rename to client/allocator/src/lib.rs index b7cfce8048354..4493db3c7d146 100644 --- a/primitives/allocator/src/lib.rs +++ b/client/allocator/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,11 +20,10 @@ //! This crate provides the following allocator implementations: //! - A freeing-bump allocator: [`FreeingBumpHeapAllocator`](freeing_bump::FreeingBumpHeapAllocator) -#![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] mod error; mod freeing_bump; -pub use freeing_bump::FreeingBumpHeapAllocator; pub use error::Error; +pub use freeing_bump::FreeingBumpHeapAllocator; diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index d0fb5fc3ee0e2..772f22e822eb2 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-api" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,38 +14,31 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -derive_more = "0.99.2" -sc-executor = { version = "0.8.0", path = "../executor" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-executor = { version = "0.10.0-dev", path = "../executor" } +sp-externalities = { version = "0.10.0-dev", path = "../../primitives/externalities" } fnv = "1.0.6" futures = "0.3.1" hash-db = { version = "0.15.2", default-features = false } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -hex-literal = "0.3.1" -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -kvdb = "0.7.0" +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } log = "0.4.8" -parking_lot = "0.10.0" -lazy_static = "1.4.0" -sp-database = { version = "2.0.0", path = "../../primitives/database" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", default-features = false, path = "../../primitives/keystore" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } +parking_lot = "0.11.1" +sp-database = { version = "4.0.0-dev", path = "../../primitives/database" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", default-features = false, path = "../../primitives/keystore" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } +sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" } +sp-storage = { version = "4.0.0-dev", path = "../../primitives/storage" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } [dev-dependencies] -kvdb-memorydb = "0.7.0" sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } -substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +thiserror = "1.0.21" diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 47fec977f5e82..8b5bd50ffa614 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,27 +18,29 @@ //! Substrate Client data backend -use std::sync::Arc; -use std::collections::{HashMap, HashSet}; -use sp_core::ChangesTrieConfigurationRange; -use sp_core::offchain::{OffchainStorage,storage::OffchainOverlayedChanges}; -use sp_runtime::{generic::BlockId, Justification, Storage}; -use sp_runtime::traits::{Block as BlockT, NumberFor, HashFor}; -use sp_state_machine::{ - ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, -}; -use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; use crate::{ - blockchain::{ - Backend as BlockchainBackend, well_known_cache_keys - }, + blockchain::{well_known_cache_keys, Backend as BlockchainBackend}, light::RemoteBlockchain, UsageInfo, }; +use parking_lot::RwLock; use sp_blockchain; use sp_consensus::BlockOrigin; -use parking_lot::RwLock; +use sp_core::{offchain::OffchainStorage, ChangesTrieConfigurationRange}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor, NumberFor}, + Justification, Justifications, Storage, +}; +use sp_state_machine::{ + ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, + ChildStorageCollection, IndexOperation, OffchainChangesCollection, StorageCollection, +}; +use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; pub use sp_state_machine::Backend as StateBackend; use std::marker::PhantomData; @@ -89,16 +91,17 @@ pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, D, I>( insert: I, delete: D, ) -> sp_blockchain::Result<()> - where - Block: BlockT, - B: Backend, - I: IntoIterator, - D: IntoIterator, +where + Block: BlockT, + B: Backend, + I: IntoIterator, + D: IntoIterator, { operation.op.insert_aux( - insert.into_iter() + insert + .into_iter() .map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - .chain(delete.into_iter().map(|k| (k.to_vec(), None))) + .chain(delete.into_iter().map(|k| (k.to_vec(), None))), ) } @@ -148,7 +151,8 @@ pub trait BlockImportOperation { &mut self, header: Block::Header, body: Option>, - justification: Option, + indexed_body: Option>>, + justifications: Option, state: NewBlockState, ) -> sp_blockchain::Result<()>; @@ -161,6 +165,14 @@ pub trait BlockImportOperation { update: TransactionForSB, ) -> sp_blockchain::Result<()>; + /// Set genesis state. If `commit` is `false` the state is saved in memory, but is not written + /// to the database. + fn set_genesis_state( + &mut self, + storage: Storage, + commit: bool, + ) -> sp_blockchain::Result; + /// Inject storage data into the database replacing any existing data. fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result; @@ -174,9 +186,9 @@ pub trait BlockImportOperation { /// Write offchain storage changes to the database. fn update_offchain_storage( &mut self, - _offchain_update: OffchainOverlayedChanges, + _offchain_update: OffchainChangesCollection, ) -> sp_blockchain::Result<()> { - Ok(()) + Ok(()) } /// Inject changes trie data into the database. @@ -189,7 +201,8 @@ pub trait BlockImportOperation { /// /// Values are `None` if should be deleted. fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> - where I: IntoIterator, Option>)>; + where + I: IntoIterator, Option>)>; /// Mark a block as finalized. fn mark_finalized( @@ -197,18 +210,23 @@ pub trait BlockImportOperation { id: BlockId, justification: Option, ) -> sp_blockchain::Result<()>; + /// Mark a block as new head. If both block import and set head are specified, set head /// overrides block import's best block rule. fn mark_head(&mut self, id: BlockId) -> sp_blockchain::Result<()>; + + /// Add a transaction index operation. + fn update_transaction_index(&mut self, index: Vec) + -> sp_blockchain::Result<()>; } /// Interface for performing operations on the backend. pub trait LockImportRun> { /// Lock the import lock, and run operations inside. fn lock_import_and_run(&self, f: F) -> Result - where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From; + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From; } /// Finalize Facilities @@ -230,7 +248,6 @@ pub trait Finalizer> { notify: bool, ) -> sp_blockchain::Result<()>; - /// Finalize a block. /// /// This will implicitly finalize all blocks up to it and @@ -250,7 +267,6 @@ pub trait Finalizer> { justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; - } /// Provides access to an auxiliary database. @@ -262,9 +278,13 @@ pub trait AuxStore { 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()>; + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()>; /// Query auxiliary data from key-value store. fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>>; @@ -273,38 +293,48 @@ pub trait AuxStore { /// An `Iterator` that iterates keys in a given block under a prefix. pub struct KeyIterator<'a, State, Block> { state: State, + child_storage: Option, prefix: Option<&'a StorageKey>, current_key: Vec, _phantom: PhantomData, } -impl <'a, State, Block> KeyIterator<'a, State, Block> { +impl<'a, State, Block> KeyIterator<'a, State, Block> { /// create a KeyIterator instance pub fn new(state: State, prefix: Option<&'a StorageKey>, current_key: Vec) -> Self { - Self { - state, - prefix, - current_key, - _phantom: PhantomData, - } + Self { state, child_storage: None, prefix, current_key, _phantom: PhantomData } + } + + /// Create a `KeyIterator` instance for a child storage. + pub fn new_child( + state: State, + child_info: ChildInfo, + prefix: Option<&'a StorageKey>, + current_key: Vec, + ) -> Self { + Self { state, child_storage: Some(child_info), prefix, current_key, _phantom: PhantomData } } } -impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where +impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> +where Block: BlockT, State: StateBackend>, { type Item = StorageKey; fn next(&mut self) -> Option { - let next_key = self.state - .next_storage_key(&self.current_key) - .ok() - .flatten()?; + let next_key = if let Some(child_info) = self.child_storage.as_ref() { + self.state.next_child_storage_key(child_info, &self.current_key) + } else { + self.state.next_storage_key(&self.current_key) + } + .ok() + .flatten()?; // this terminates the iterator the first time it fails. if let Some(prefix) = self.prefix { if !next_key.starts_with(&prefix.0[..]) { - return None; + return None } } self.current_key = next_key.clone(); @@ -315,51 +345,78 @@ impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where /// Provides acess to storage primitives pub trait StorageProvider> { /// Given a `BlockId` and a key, return the value under the key in that block. - fn storage(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result>; + fn storage( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result>; /// Given a `BlockId` and a key prefix, return the matching storage keys in that block. - fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result>; + fn storage_keys( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result>; /// Given a `BlockId` and a key, return the value under the hash in that block. - fn storage_hash(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result>; + fn storage_hash( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result>; - /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block. + /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in + /// that block. fn storage_pairs( &self, id: &BlockId, - key_prefix: &StorageKey + key_prefix: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block. + /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in + /// that block. fn storage_keys_iter<'a>( &self, id: &BlockId, prefix: Option<&'a StorageKey>, - start_key: Option<&StorageKey> + start_key: Option<&StorageKey>, ) -> sp_blockchain::Result>; - /// Given a `BlockId`, a key and a child storage key, return the value under the key in that block. + /// Given a `BlockId`, a key and a child storage key, return the value under the key in that + /// block. fn child_storage( &self, id: &BlockId, child_info: &ChildInfo, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys. + /// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage + /// keys. fn child_storage_keys( &self, id: &BlockId, child_info: &ChildInfo, - key_prefix: &StorageKey + key_prefix: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block. + /// Given a `BlockId` and a key `prefix` and a child storage key, + /// return a `KeyIterator` that iterates matching storage keys in that block. + fn child_storage_keys_iter<'a>( + &self, + id: &BlockId, + child_info: ChildInfo, + prefix: Option<&'a StorageKey>, + start_key: Option<&StorageKey>, + ) -> sp_blockchain::Result>; + + /// Given a `BlockId`, a key and a child storage key, return the hash under the key in that + /// block. fn child_storage_hash( &self, id: &BlockId, child_info: &ChildInfo, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result>; /// Get longest range within [first; last] that is possible to use in `key_changes` @@ -381,7 +438,7 @@ pub trait StorageProvider> { first: NumberFor, last: BlockId, storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result, u32)>>; } @@ -432,6 +489,15 @@ pub trait Backend: AuxStore + Send + Sync { justification: Option, ) -> sp_blockchain::Result<()>; + /// Append justification to the block with the given Id. + /// + /// This should only be called for blocks that are already finalized. + fn append_justification( + &self, + block: BlockId, + justification: Justification, + ) -> sp_blockchain::Result<()>; + /// Returns reference to blockchain backend. fn blockchain(&self) -> &Self::Blockchain; @@ -464,15 +530,21 @@ pub trait Backend: AuxStore + Send + Sync { revert_finalized: bool, ) -> sp_blockchain::Result<(NumberFor, HashSet)>; + /// Discard non-best, unfinalized leaf block. + fn remove_leaf_block(&self, hash: &Block::Hash) -> sp_blockchain::Result<()>; + /// Insert auxiliary data into key-value store. fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> - { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { AuxStore::insert_aux(self, insert, delete) } /// Query auxiliary data from key-value store. @@ -496,12 +568,14 @@ pub trait PrunableStateChangesTrieStorage: /// Get reference to StateChangesTrieStorage. fn storage(&self) -> &dyn StateChangesTrieStorage, NumberFor>; /// Get configuration at given block. - fn configuration_at(&self, at: &BlockId) -> sp_blockchain::Result< - ChangesTrieConfigurationRange, Block::Hash> - >; + fn configuration_at( + &self, + at: &BlockId, + ) -> sp_blockchain::Result, Block::Hash>>; /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. /// It is guaranteed that we have no any changes tries before (and including) this block. - /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). + /// It is guaranteed that all existing changes tries after this block are not yet pruned (if + /// created). fn oldest_pruned_digest_range_end(&self) -> NumberFor; } @@ -532,7 +606,8 @@ pub fn changes_tries_state_at_block<'a, Block: BlockT>( let config_range = storage.configuration_at(block)?; match config_range.config { - Some(config) => Ok(Some(ChangesTrieState::new(config, config_range.zero.0, storage.storage()))), + Some(config) => + Ok(Some(ChangesTrieState::new(config, config_range.zero.0, storage.storage()))), None => Ok(None), } } @@ -547,7 +622,8 @@ pub trait ProvideChtRoots { block: NumberFor, ) -> sp_blockchain::Result>; - /// Get changes trie CHT root for given block. Returns None if the block is not a part of any CHT. + /// Get changes trie CHT root for given block. Returns None if the block is not a part of any + /// CHT. fn changes_trie_cht_root( &self, cht_size: NumberFor, diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index d9d43900dfc94..22af495c06542 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,20 +18,16 @@ //! A method call executor interface. -use std::{panic::UnwindSafe, result, cell::RefCell}; -use codec::{Encode, Decode}; -use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HashFor}, -}; -use sp_state_machine::{ - OverlayedChanges, ExecutionManager, ExecutionStrategy, StorageProof, -}; -use sc_executor::{RuntimeVersion, NativeVersion}; +use codec::{Decode, Encode}; +use sc_executor::RuntimeVersion; +use sp_core::NativeOrEncoded; use sp_externalities::Extensions; -use sp_core::{NativeOrEncoded,offchain::storage::OffchainOverlayedChanges}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_state_machine::{ExecutionManager, ExecutionStrategy, OverlayedChanges, StorageProof}; +use std::{cell::RefCell, panic::UnwindSafe, result}; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use crate::execution_extensions::ExecutionExtensions; +use sp_api::{ProofRecorder, StorageTransactionCache}; /// Executor Provider pub trait ExecutorProvider { @@ -71,66 +67,43 @@ pub trait CallExecutor { /// Before executing the method, passed header is installed as the current header /// of the execution context. fn contextual_call< - 'a, - IB: Fn() -> sp_blockchain::Result<()>, EM: Fn( Result, Self::Error>, - Result, Self::Error> + Result, Self::Error>, ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, - initialize_block_fn: IB, at: &BlockId, method: &str, call_data: &[u8], changes: &RefCell, - offchain_changes: &RefCell, - storage_transaction_cache: Option<&RefCell< - StorageTransactionCache>::State>, - >>, - initialize_block: InitializeBlock<'a, B>, + storage_transaction_cache: Option< + &RefCell< + StorageTransactionCache>::State>, + >, + >, execution_manager: ExecutionManager, native_call: Option, proof_recorder: &Option>, extensions: Option, - ) -> sp_blockchain::Result> where ExecutionManager: Clone; + ) -> sp_blockchain::Result> + where + ExecutionManager: Clone; /// Extract RuntimeVersion of given block /// /// No changes are made. fn runtime_version(&self, id: &BlockId) -> Result; - /// Execute a call to a contract on top of given state, gathering execution proof. - /// - /// No changes are made. - fn prove_at_state>>( - &self, - mut state: S, - overlay: &mut OverlayedChanges, - method: &str, - call_data: &[u8] - ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) - as Box - )?; - self.prove_at_trie_state(trie_state, overlay, method, call_data) - } - - /// Execute a call to a contract on top of given trie state, gathering execution proof. + /// Prove the execution of the given `method`. /// /// No changes are made. - fn prove_at_trie_state>>( + fn prove_execution( &self, - trie_state: &sp_state_machine::TrieBackend>, - overlay: &mut OverlayedChanges, + at: &BlockId, method: &str, - call_data: &[u8] + call_data: &[u8], ) -> Result<(Vec, StorageProof), sp_blockchain::Error>; - - /// Get runtime version if supported. - fn native_runtime_version(&self) -> Option<&NativeVersion>; } diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 30cfd3a1b671b..ee7854b5d8297 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,18 +22,18 @@ //! One is generated for every `SIZE` blocks, allowing us to discard those blocks in //! favor of the trie root. When the "ancient" blocks need to be accessed, we simply //! request an inclusion proof of a specific block number against the trie with the -//! root has. A correct proof implies that the claimed block is identical to the one +//! root hash. A correct proof implies that the claimed block is identical to the one //! we discarded. -use hash_db; use codec::Encode; +use hash_db; use sp_trie; -use sp_core::{H256, convert_hash}; -use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; +use sp_core::{convert_hash, H256}; +use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero}; use sp_state_machine::{ - MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend + prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend, + Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -47,19 +47,20 @@ pub fn size>() -> N { SIZE.into() } -/// Returns Some(cht_number) if CHT is need to be built when the block with given number is canonized. +/// Returns Some(cht_number) if CHT is need to be built when the block with given number is +/// canonized. pub fn is_build_required(cht_size: N, block_num: N) -> Option - where - N: Clone + AtLeast32Bit, +where + N: Clone + AtLeast32Bit, { let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?; let two = N::one() + N::one(); if block_cht_num < two { - return None; + return None } let cht_start = start_number(cht_size, block_cht_num.clone()); if cht_start != block_num { - return None; + return None } Some(block_cht_num - two) @@ -67,13 +68,13 @@ pub fn is_build_required(cht_size: N, block_num: N) -> Option /// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number. pub fn max_cht_number(cht_size: N, max_canonical_block: N) -> Option - where - N: Clone + AtLeast32Bit, +where + N: Clone + AtLeast32Bit, { let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?; let two = N::one() + N::one(); if max_cht_number < two { - return None; + return None } Some(max_cht_number - two) } @@ -86,16 +87,16 @@ pub fn compute_root( cht_num: Header::Number, hashes: I, ) -> ClientResult - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - I: IntoIterator>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord, + I: IntoIterator>>, { use sp_trie::TrieConfiguration; - Ok(sp_trie::trie_types::Layout::::trie_root( - build_pairs::(cht_size, cht_num, hashes)? - )) + Ok(sp_trie::trie_types::Layout::::trie_root(build_pairs::( + cht_size, cht_num, hashes, + )?)) } /// Build CHT-based header proof. @@ -103,26 +104,28 @@ pub fn build_proof( cht_size: Header::Number, cht_num: Header::Number, blocks: BlocksI, - hashes: HashesI + hashes: HashesI, ) -> ClientResult - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, - BlocksI: IntoIterator, - HashesI: IntoIterator>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + codec::Codec, + BlocksI: IntoIterator, + HashesI: IntoIterator>>, { let transaction = build_pairs::(cht_size, cht_num, hashes)? .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); - let trie_storage = storage.as_trie_backend() + let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + let trie_storage = storage + .as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), - ).map_err(ClientError::Execution) + ) + .map_err(ClientError::from_state) } /// Check CHT-based header proof. @@ -132,25 +135,24 @@ pub fn check_proof( remote_hash: Header::Hash, remote_proof: StorageProof, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + codec::Codec, { do_check_proof::( local_root, local_number, remote_hash, - move |local_root, local_cht_key| + move |local_root, local_cht_key| { read_proof_check::( local_root, remote_proof, ::std::iter::once(local_cht_key), ) - .map(|mut map| map - .remove(local_cht_key) - .expect("checked proof of local_cht_key; qed")) - .map_err(|e| ClientError::from(e)), + .map(|mut map| map.remove(local_cht_key).expect("checked proof of local_cht_key; qed")) + .map_err(ClientError::from_state) + }, ) } @@ -161,20 +163,19 @@ pub fn check_proof_on_proving_backend( remote_hash: Header::Hash, proving_backend: &TrieBackend, Hasher>, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + codec::Codec, { do_check_proof::( local_root, local_number, remote_hash, - |_, local_cht_key| - read_proof_check_on_proving_backend::( - proving_backend, - local_cht_key, - ).map_err(|e| ClientError::from(e)), + |_, local_cht_key| { + read_proof_check_on_proving_backend::(proving_backend, local_cht_key) + .map_err(ClientError::from_state) + }, ) } @@ -185,22 +186,22 @@ fn do_check_proof( remote_hash: Header::Hash, checker: F, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord, + F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, { let root: Hasher::Out = convert_hash(&local_root); let local_cht_key = encode_cht_key(local_number); let local_cht_value = checker(root, &local_cht_key)?; let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?; - let local_hash = decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?; + let local_hash = + decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?; match &local_hash[..] == remote_hash.as_ref() { true => Ok(()), false => Err(ClientError::InvalidCHTProof.into()), } - } /// Group ordered blocks by CHT number and call functor with blocks of each group. @@ -210,32 +211,31 @@ pub fn for_each_cht_group( mut functor: F, mut functor_param: P, ) -> ClientResult<()> - where - Header: HeaderT, - I: IntoIterator, - F: FnMut(P, Header::Number, Vec) -> ClientResult

, +where + Header: HeaderT, + I: IntoIterator, + F: FnMut(P, Header::Number, Vec) -> ClientResult

, { let mut current_cht_num = None; let mut current_cht_blocks = Vec::new(); for block in blocks { - let new_cht_num = match block_to_cht_number(cht_size, block) { - Some(new_cht_num) => new_cht_num, - None => return Err(ClientError::Backend(format!( - "Cannot compute CHT root for the block #{}", block)).into() - ), - }; + let new_cht_num = block_to_cht_number(cht_size, block).ok_or_else(|| { + ClientError::Backend(format!("Cannot compute CHT root for the block #{}", block)) + })?; let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); if advance_to_next_cht { - let current_cht_num = current_cht_num.expect("advance_to_next_cht is true; - it is true only when current_cht_num is Some; qed"); - assert!(new_cht_num > current_cht_num, "for_each_cht_group only supports ordered iterators"); - - functor_param = functor( - functor_param, - current_cht_num, - std::mem::take(&mut current_cht_blocks), - )?; + let current_cht_num = current_cht_num.expect( + "advance_to_next_cht is true; + it is true only when current_cht_num is Some; qed", + ); + assert!( + new_cht_num > current_cht_num, + "for_each_cht_group only supports ordered iterators" + ); + + functor_param = + functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; } current_cht_blocks.push(block); @@ -243,11 +243,7 @@ pub fn for_each_cht_group( } if let Some(current_cht_num) = current_cht_num { - functor( - functor_param, - current_cht_num, - std::mem::take(&mut current_cht_blocks), - )?; + functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; } Ok(()) @@ -257,26 +253,22 @@ pub fn for_each_cht_group( fn build_pairs( cht_size: Header::Number, cht_num: Header::Number, - hashes: I + hashes: I, ) -> ClientResult, Vec)>> - where - Header: HeaderT, - I: IntoIterator>>, +where + Header: HeaderT, + I: IntoIterator>>, { let start_num = start_number(cht_size, cht_num); let mut pairs = Vec::new(); let mut hash_index = Header::Number::zero(); for hash in hashes.into_iter() { - let hash = hash?.ok_or_else(|| ClientError::from( - ClientError::MissingHashRequiredForCHT - ))?; - pairs.push(( - encode_cht_key(start_num + hash_index).to_vec(), - encode_cht_value(hash) - )); + let hash = + hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?; + pairs.push((encode_cht_key(start_num + hash_index).to_vec(), encode_cht_value(hash))); hash_index += Header::Number::one(); if hash_index == cht_size { - break; + break } } @@ -328,7 +320,6 @@ pub fn decode_cht_value(value: &[u8]) -> Option { 32 => Some(H256::from_slice(&value[0..32])), _ => None, } - } #[cfg(test)] @@ -382,8 +373,12 @@ mod tests { #[test] fn build_pairs_fails_when_no_enough_blocks() { - assert!(build_pairs::(SIZE as _, 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2)).is_err()); + assert!(build_pairs::( + SIZE as _, + 0, + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2) + ) + .is_err()); } #[test] @@ -394,9 +389,12 @@ mod tests { ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) .take(SIZE as usize / 2) .chain(::std::iter::once(Ok(None))) - .chain(::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) - .take(SIZE as usize / 2 - 1)) - ).is_err()); + .chain( + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) + .take(SIZE as usize / 2 - 1) + ) + ) + .is_err()); } #[test] @@ -404,9 +402,9 @@ mod tests { assert!(compute_root::( SIZE as _, 42, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize) - ).is_ok()); + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_ok()); } #[test] @@ -416,9 +414,9 @@ mod tests { SIZE as _, 0, vec![(SIZE * 1000) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize) - ).is_err()); + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_err()); } #[test] @@ -427,9 +425,9 @@ mod tests { SIZE as _, 0, vec![(SIZE / 2) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize) - ).is_ok()); + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_ok()); } #[test] @@ -450,19 +448,27 @@ mod tests { let _ = for_each_cht_group::( cht_size, vec![ - cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5, - cht_size * 4 + 1, cht_size * 4 + 7, - cht_size * 6 + 1 - ], |_, cht_num, blocks| { + cht_size * 2 + 1, + cht_size * 2 + 2, + cht_size * 2 + 5, + cht_size * 4 + 1, + cht_size * 4 + 7, + cht_size * 6 + 1, + ], + |_, cht_num, blocks| { match cht_num { - 2 => assert_eq!(blocks, vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5]), + 2 => assert_eq!( + blocks, + vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5] + ), 4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]), 6 => assert_eq!(blocks, vec![cht_size * 6 + 1]), _ => unreachable!(), } Ok(()) - }, () + }, + (), ); } } diff --git a/client/api/src/client.rs b/client/api/src/client.rs index f97daa487638f..21f8aecad0536 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -1,33 +1,35 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! A set of APIs supported by the client along with their primitives. -use std::{fmt, collections::HashSet, sync::Arc, convert::TryFrom}; +use sp_consensus::BlockOrigin; use sp_core::storage::StorageKey; use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, generic::{BlockId, SignedBlock}, - Justification, + traits::{Block as BlockT, NumberFor}, + Justifications, }; -use sp_consensus::BlockOrigin; +use std::{collections::HashSet, convert::TryFrom, fmt, sync::Arc}; -use crate::blockchain::Info; -use crate::notifications::StorageEventStream; -use sp_utils::mpsc::TracingUnboundedReceiver; +use crate::{blockchain::Info, notifications::StorageEventStream}; +use sc_transaction_pool_api::ChainEvent; +use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain; /// Type that implements `futures::Stream` of block import events. @@ -79,27 +81,52 @@ pub trait BlockBackend { /// Get block body by ID. Returns `None` if the body is not stored. fn block_body( &self, - id: &BlockId + id: &BlockId, ) -> sp_blockchain::Result::Extrinsic>>>; + /// Get all indexed transactions for a block, + /// including renewed transactions. + /// + /// Note that this will only fetch transactions + /// that are indexed by the runtime with `storage_index_transaction`. + fn block_indexed_body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result>>>; + /// Get full block by id. fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; /// Get block status. - fn block_status(&self, id: &BlockId) -> sp_blockchain::Result; + fn block_status(&self, id: &BlockId) + -> sp_blockchain::Result; - /// Get block justification set by id. - fn justification(&self, id: &BlockId) -> sp_blockchain::Result>; + /// Get block justifications for the block with the given id. + fn justifications(&self, id: &BlockId) -> sp_blockchain::Result>; /// Get block hash by number. fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result>; + + /// Get single indexed transaction by content hash. + /// + /// Note that this will only fetch transactions + /// that are indexed by the runtime with `storage_index_transaction`. + fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>>; + + /// Check if transaction index exists. + fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { + Ok(self.indexed_transaction(hash)?.is_some()) + } } /// Provide a list of potential uncle headers for a given block. pub trait ProvideUncles { /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. - fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) - -> sp_blockchain::Result>; + fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> sp_blockchain::Result>; } /// Client info @@ -252,25 +279,20 @@ pub struct FinalityNotification { pub header: Block::Header, } -impl TryFrom> for sp_transaction_pool::ChainEvent { +impl TryFrom> for ChainEvent { type Error = (); fn try_from(n: BlockImportNotification) -> Result { if n.is_new_best { - Ok(Self::NewBestBlock { - hash: n.hash, - tree_route: n.tree_route, - }) + Ok(Self::NewBestBlock { hash: n.hash, tree_route: n.tree_route }) } else { Err(()) } } } -impl From> for sp_transaction_pool::ChainEvent { +impl From> for ChainEvent { fn from(n: FinalityNotification) -> Self { - Self::Finalized { - hash: n.hash, - } + Self::Finalized { hash: n.hash } } } diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 4fdd897b21579..ec44294b8a96c 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Execution extensions for runtime calls. //! @@ -20,20 +22,19 @@ //! strategy for the runtime calls and provide the right `Externalities` //! extensions to support APIs for particular execution context & capabilities. -use std::sync::{Weak, Arc}; use codec::Decode; +use parking_lot::RwLock; +use sc_transaction_pool_api::OffchainSubmitTransaction; use sp_core::{ + offchain::{self, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, ExecutionContext, - offchain::{self, OffchainExt, TransactionPoolExt}, -}; -use sp_keystore::{KeystoreExt, SyncCryptoStorePtr}; -use sp_runtime::{ - generic::BlockId, - traits, }; -use sp_state_machine::{ExecutionStrategy, ExecutionManager, DefaultHandler}; use sp_externalities::Extensions; -use parking_lot::RwLock; +use sp_keystore::{KeystoreExt, SyncCryptoStorePtr}; +use sp_runtime::{generic::BlockId, traits}; +pub use sp_state_machine::ExecutionStrategy; +use sp_state_machine::{DefaultHandler, ExecutionManager}; +use std::sync::{Arc, Weak}; /// Execution strategies settings. #[derive(Debug, Clone)] @@ -74,6 +75,18 @@ impl ExtensionsFactory for () { } } +/// Create a Offchain DB accessor object. +pub trait DbExternalitiesFactory: Send + Sync { + /// Create [`offchain::DbExternalities`] instance. + fn create(&self) -> Box; +} + +impl DbExternalitiesFactory for T { + fn create(&self) -> Box { + Box::new(self.clone()) + } +} + /// A producer of execution extensions for offchain calls. /// /// This crate aggregates extensions available for the offchain calls @@ -82,13 +95,14 @@ impl ExtensionsFactory for () { pub struct ExecutionExtensions { strategies: ExecutionStrategies, keystore: Option, + offchain_db: Option>, // FIXME: these two are only RwLock because of https://github.com/paritytech/substrate/issues/4587 // remove when fixed. // To break retain cycle between `Client` and `TransactionPool` we require this // extension to be a `Weak` reference. // That's also the reason why it's being registered lazily instead of // during initialization. - transaction_pool: RwLock>>>, + transaction_pool: RwLock>>>, extensions_factory: RwLock>, } @@ -97,6 +111,7 @@ impl Default for ExecutionExtensions { Self { strategies: Default::default(), keystore: None, + offchain_db: None, transaction_pool: RwLock::new(None), extensions_factory: RwLock::new(Box::new(())), } @@ -108,12 +123,14 @@ impl ExecutionExtensions { pub fn new( strategies: ExecutionStrategies, keystore: Option, + offchain_db: Option>, ) -> Self { let transaction_pool = RwLock::new(None); let extensions_factory = Box::new(()); Self { strategies, keystore, + offchain_db, extensions_factory: RwLock::new(extensions_factory), transaction_pool, } @@ -131,36 +148,15 @@ impl ExecutionExtensions { /// Register transaction pool extension. pub fn register_transaction_pool(&self, pool: &Arc) - where T: sp_transaction_pool::OffchainSubmitTransaction + 'static + where + T: OffchainSubmitTransaction + 'static, { *self.transaction_pool.write() = Some(Arc::downgrade(&pool) as _); } - /// Create `ExecutionManager` and `Extensions` for given offchain call. - /// /// Based on the execution context and capabilities it produces - /// the right manager and extensions object to support desired set of APIs. - pub fn manager_and_extensions( - &self, - at: &BlockId, - context: ExecutionContext, - ) -> ( - ExecutionManager>, - Extensions, - ) { - let manager = match context { - ExecutionContext::BlockConstruction => - self.strategies.block_construction.get_manager(), - ExecutionContext::Syncing => - self.strategies.syncing.get_manager(), - ExecutionContext::Importing => - self.strategies.importing.get_manager(), - ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() => - self.strategies.offchain_worker.get_manager(), - ExecutionContext::OffchainCall(_) => - self.strategies.other.get_manager(), - }; - + /// the extensions object to support desired set of APIs. + pub fn extensions(&self, at: &BlockId, context: ExecutionContext) -> Extensions { let capabilities = context.capabilities(); let mut extensions = self.extensions_factory.read().extensions_for(capabilities); @@ -173,31 +169,60 @@ impl ExecutionExtensions { if capabilities.has(offchain::Capability::TransactionPool) { if let Some(pool) = self.transaction_pool.read().as_ref().and_then(|x| x.upgrade()) { - extensions.register( - TransactionPoolExt( - Box::new(TransactionPoolAdapter { - at: *at, - pool, - }) as _ - ), - ); + extensions + .register(TransactionPoolExt( + Box::new(TransactionPoolAdapter { at: *at, pool }) as _, + )); + } + } + + if capabilities.has(offchain::Capability::OffchainDbRead) || + capabilities.has(offchain::Capability::OffchainDbWrite) + { + if let Some(offchain_db) = self.offchain_db.as_ref() { + extensions.register(OffchainDbExt::new(offchain::LimitedExternalities::new( + capabilities, + offchain_db.create(), + ))); } } if let ExecutionContext::OffchainCall(Some(ext)) = context { - extensions.register( - OffchainExt::new(offchain::LimitedExternalities::new(capabilities, ext.0)), - ); + extensions.register(OffchainWorkerExt::new(offchain::LimitedExternalities::new( + capabilities, + ext.0, + ))); } - (manager, extensions) + extensions + } + + /// Create `ExecutionManager` and `Extensions` for given offchain call. + /// + /// Based on the execution context and capabilities it produces + /// the right manager and extensions object to support desired set of APIs. + pub fn manager_and_extensions( + &self, + at: &BlockId, + context: ExecutionContext, + ) -> (ExecutionManager>, Extensions) { + let manager = match context { + ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(), + ExecutionContext::Syncing => self.strategies.syncing.get_manager(), + ExecutionContext::Importing => self.strategies.importing.get_manager(), + ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() => + self.strategies.offchain_worker.get_manager(), + ExecutionContext::OffchainCall(_) => self.strategies.other.get_manager(), + }; + + (manager, self.extensions(at, context)) } } /// A wrapper type to pass `BlockId` to the actual transaction pool. struct TransactionPoolAdapter { at: BlockId, - pool: Arc>, + pool: Arc>, } impl offchain::TransactionPool for TransactionPoolAdapter { @@ -205,8 +230,8 @@ impl offchain::TransactionPool for TransactionPoolAdapter< let xt = match Block::Extrinsic::decode(&mut &*data) { Ok(xt) => xt, Err(e) => { - log::warn!("Unable to decode extrinsic: {:?}: {}", data, e.what()); - return Err(()); + log::warn!("Unable to decode extrinsic: {:?}: {}", data, e); + return Err(()) }, }; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index ded030fb8046f..e8fce19f8124e 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,30 +18,31 @@ //! In memory client backend -use std::collections::{HashMap, HashSet}; -use std::ptr; -use std::sync::Arc; use parking_lot::RwLock; +use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; use sp_core::{ - storage::well_known_keys, offchain::storage::InMemOffchainStorage as OffchainStorage, + offchain::storage::InMemOffchainStorage as OffchainStorage, storage::well_known_keys, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, Zero}, + Justification, Justifications, Storage, }; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor}; -use sp_runtime::{Justification, Storage}; use sp_state_machine::{ - ChangesTrieTransaction, InMemoryBackend, Backend as StateBackend, StorageCollection, - ChildStorageCollection, + Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, + IndexOperation, StorageCollection, +}; +use std::{ + collections::{HashMap, HashSet}, + ptr, + sync::Arc, }; -use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; use crate::{ backend::{self, NewBlockState, ProvideChtRoots}, - blockchain::{ - self, BlockStatus, HeaderBackend, well_known_cache_keys::Id as CacheKeyId - }, - UsageInfo, - light, + blockchain::{self, well_known_cache_keys::Id as CacheKeyId, BlockStatus, HeaderBackend}, leaves::LeafSet, + light, UsageInfo, }; struct PendingBlock { @@ -51,12 +52,16 @@ struct PendingBlock { #[derive(PartialEq, Eq, Clone)] enum StoredBlock { - Header(B::Header, Option), - Full(B, Option), + Header(B::Header, Option), + Full(B, Option), } impl StoredBlock { - fn new(header: B::Header, body: Option>, just: Option) -> Self { + fn new( + header: B::Header, + body: Option>, + just: Option, + ) -> Self { match body { Some(body) => StoredBlock::Full(B::new(header, body), just), None => StoredBlock::Header(header, just), @@ -70,9 +75,9 @@ impl StoredBlock { } } - fn justification(&self) -> Option<&Justification> { + fn justifications(&self) -> Option<&Justifications> { match *self { - StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref() + StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref(), } } @@ -83,13 +88,13 @@ impl StoredBlock { } } - fn into_inner(self) -> (B::Header, Option>, Option) { + fn into_inner(self) -> (B::Header, Option>, Option) { match self { StoredBlock::Header(header, just) => (header, None, just), StoredBlock::Full(block, just) => { let (header, body) = block.deconstruct(); (header, Some(body), just) - } + }, } } } @@ -123,9 +128,7 @@ impl Default for Blockchain { impl Clone for Blockchain { fn clone(&self) -> Self { let storage = Arc::new(RwLock::new(self.storage.read().clone())); - Blockchain { - storage, - } + Blockchain { storage } } } @@ -140,23 +143,20 @@ impl Blockchain { /// Create new in-memory blockchain storage. pub fn new() -> Blockchain { - let storage = Arc::new(RwLock::new( - BlockchainStorage { - blocks: HashMap::new(), - hashes: HashMap::new(), - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - header_cht_roots: HashMap::new(), - changes_trie_cht_roots: HashMap::new(), - leaves: LeafSet::new(), - aux: HashMap::new(), - })); - Blockchain { - storage, - } + let storage = Arc::new(RwLock::new(BlockchainStorage { + blocks: HashMap::new(), + hashes: HashMap::new(), + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + header_cht_roots: HashMap::new(), + changes_trie_cht_roots: HashMap::new(), + leaves: LeafSet::new(), + aux: HashMap::new(), + })); + Blockchain { storage } } /// Insert a block header and associated data. @@ -164,7 +164,7 @@ impl Blockchain { &self, hash: Block::Hash, header: ::Header, - justification: Option, + justifications: Option, body: Option::Extrinsic>>, new_state: NewBlockState, ) -> sp_blockchain::Result<()> { @@ -175,8 +175,12 @@ impl Blockchain { { let mut storage = self.storage.write(); - storage.leaves.import(hash.clone(), number.clone(), header.parent_hash().clone()); - storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification)); + storage + .leaves + .import(hash.clone(), number.clone(), header.parent_hash().clone()); + storage + .blocks + .insert(hash.clone(), StoredBlock::new(header, body, justifications)); if let NewBlockState::Final = new_state { storage.finalized_hash = hash; @@ -200,7 +204,7 @@ impl Blockchain { pub fn equals_to(&self, other: &Self) -> bool { // Check ptr equality first to avoid double read locks. if ptr::eq(self, other) { - return true; + return true } self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks } @@ -209,14 +213,14 @@ impl Blockchain { pub fn canon_equals_to(&self, other: &Self) -> bool { // Check ptr equality first to avoid double read locks. if ptr::eq(self, other) { - return true; + return true } let this = self.storage.read(); let other = other.storage.read(); - this.hashes == other.hashes - && this.best_hash == other.best_hash - && this.best_number == other.best_number - && this.genesis_hash == other.genesis_hash + this.hashes == other.hashes && + this.best_hash == other.best_hash && + this.best_number == other.best_number && + this.genesis_hash == other.genesis_hash } /// Insert header CHT root. @@ -226,10 +230,9 @@ impl Blockchain { /// Set an existing block as head. pub fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { - let header = match self.header(id)? { - Some(h) => h, - None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), - }; + let header = self + .header(id)? + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", id)))?; self.apply_head(&header) } @@ -272,7 +275,11 @@ impl Blockchain { Ok(()) } - fn finalize_header(&self, id: BlockId, justification: Option) -> sp_blockchain::Result<()> { + fn finalize_header( + &self, + id: BlockId, + justification: Option, + ) -> sp_blockchain::Result<()> { let hash = match self.header(id)? { Some(h) => h.hash(), None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), @@ -282,19 +289,51 @@ impl Blockchain { storage.finalized_hash = hash; if justification.is_some() { - let block = storage.blocks.get_mut(&hash) + let block = storage + .blocks + .get_mut(&hash) .expect("hash was fetched from a block in the db; qed"); - let block_justification = match block { - StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j + let block_justifications = match block { + StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j, }; - *block_justification = justification; + *block_justifications = justification.map(Justifications::from); } Ok(()) } + fn append_justification( + &self, + id: BlockId, + justification: Justification, + ) -> sp_blockchain::Result<()> { + let hash = self.expect_block_hash_from_id(&id)?; + let mut storage = self.storage.write(); + + let block = storage + .blocks + .get_mut(&hash) + .expect("hash was fetched from a block in the db; qed"); + + let block_justifications = match block { + StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j, + }; + + if let Some(stored_justifications) = block_justifications { + if !stored_justifications.append(justification) { + return Err(sp_blockchain::Error::BadJustification( + "Duplicate consensus engine ID".into(), + )) + } + } else { + *block_justifications = Some(Justifications::from(justification)); + }; + + Ok(()) + } + fn write_aux(&self, ops: Vec<(Vec, Option>)>) { let mut storage = self.storage.write(); for (k, v) in ops { @@ -307,10 +346,13 @@ impl Blockchain { } impl HeaderBackend for Blockchain { - fn header(&self, id: BlockId) -> sp_blockchain::Result::Header>> { - Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash).map(|b| b.header().clone()) - })) + fn header( + &self, + id: BlockId, + ) -> sp_blockchain::Result::Header>> { + Ok(self + .id(id) + .and_then(|hash| self.storage.read().blocks.get(&hash).map(|b| b.header().clone()))) } fn info(&self) -> blockchain::Info { @@ -321,7 +363,12 @@ impl HeaderBackend for Blockchain { genesis_hash: storage.genesis_hash, finalized_hash: storage.finalized_hash, finalized_number: storage.finalized_number, - number_leaves: storage.leaves.count() + finalized_state: if storage.finalized_hash != Default::default() { + Some((storage.finalized_hash.clone(), storage.finalized_number)) + } else { + None + }, + number_leaves: storage.leaves.count(), } } @@ -336,7 +383,10 @@ impl HeaderBackend for Blockchain { Ok(self.storage.read().blocks.get(&hash).map(|b| *b.header().number())) } - fn hash(&self, number: <::Header as HeaderT>::Number) -> sp_blockchain::Result> { + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> sp_blockchain::Result> { Ok(self.id(BlockId::Number(number))) } } @@ -344,9 +394,15 @@ impl HeaderBackend for Blockchain { impl HeaderMetadata for Blockchain { type Error = sp_blockchain::Error; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header)) - .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("header not found: {}", hash))) + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header(BlockId::hash(hash))? + .map(|header| CachedHeaderMetadata::from(&header)) + .ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("header not found: {}", hash)) + }) } fn insert_header_metadata(&self, _hash: Block::Hash, _metadata: CachedHeaderMetadata) { @@ -358,17 +414,27 @@ impl HeaderMetadata for Blockchain { } impl blockchain::Backend for Blockchain { - fn body(&self, id: BlockId) -> sp_blockchain::Result::Extrinsic>>> { + fn body( + &self, + id: BlockId, + ) -> sp_blockchain::Result::Extrinsic>>> { Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash) + self.storage + .read() + .blocks + .get(&hash) .and_then(|b| b.extrinsics().map(|x| x.to_vec())) })) } - fn justification(&self, id: BlockId) -> sp_blockchain::Result> { - Ok(self.id(id).and_then(|hash| self.storage.read().blocks.get(&hash).and_then(|b| - b.justification().map(|x| x.clone())) - )) + fn justifications(&self, id: BlockId) -> sp_blockchain::Result> { + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.justifications().map(|x| x.clone())) + })) } fn last_finalized(&self) -> sp_blockchain::Result { @@ -386,6 +452,17 @@ impl blockchain::Backend for Blockchain { fn children(&self, _parent_hash: Block::Hash) -> sp_blockchain::Result> { unimplemented!() } + + fn indexed_transaction(&self, _hash: &Block::Hash) -> sp_blockchain::Result>> { + unimplemented!("Not supported by the in-mem backend.") + } + + fn block_indexed_body( + &self, + _id: BlockId, + ) -> sp_blockchain::Result>>> { + unimplemented!("Not supported by the in-mem backend.") + } } impl blockchain::ProvideCache for Blockchain { @@ -399,9 +476,13 @@ impl backend::AuxStore for Blockchain { 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { let mut storage = self.storage.write(); for (k, v) in insert { storage.aux.insert(k.to_vec(), v.to_vec()); @@ -418,8 +499,8 @@ impl backend::AuxStore for Blockchain { } impl light::Storage for Blockchain - where - Block::Hash: From<[u8; 32]>, +where + Block::Hash: From<[u8; 32]>, { fn import_header( &self, @@ -462,8 +543,14 @@ impl ProvideChtRoots for Blockchain { _cht_size: NumberFor, block: NumberFor, ) -> sp_blockchain::Result> { - self.storage.read().header_cht_roots.get(&block).cloned() - .ok_or_else(|| sp_blockchain::Error::Backend(format!("Header CHT for block {} not exists", block))) + self.storage + .read() + .header_cht_roots + .get(&block) + .cloned() + .ok_or_else(|| { + sp_blockchain::Error::Backend(format!("Header CHT for block {} not exists", block)) + }) .map(Some) } @@ -472,8 +559,17 @@ impl ProvideChtRoots for Blockchain { _cht_size: NumberFor, block: NumberFor, ) -> sp_blockchain::Result> { - self.storage.read().changes_trie_cht_roots.get(&block).cloned() - .ok_or_else(|| sp_blockchain::Error::Backend(format!("Changes trie CHT for block {} not exists", block))) + self.storage + .read() + .changes_trie_cht_roots + .get(&block) + .cloned() + .ok_or_else(|| { + sp_blockchain::Error::Backend(format!( + "Changes trie CHT for block {} not exists", + block + )) + }) .map(Some) } } @@ -481,15 +577,46 @@ impl ProvideChtRoots for Blockchain { /// In-memory operation. pub struct BlockImportOperation { pending_block: Option>, - pending_cache: HashMap>, old_state: InMemoryBackend>, - new_state: Option<> as StateBackend>>::Transaction>, + new_state: + Option<> as StateBackend>>::Transaction>, aux: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, set_head: Option>, } -impl backend::BlockImportOperation for BlockImportOperation where +impl BlockImportOperation +where + Block::Hash: Ord, +{ + fn apply_storage( + &mut self, + storage: Storage, + commit: bool, + ) -> sp_blockchain::Result { + check_genesis_storage(&storage)?; + + let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { + ( + &child_content.child_info, + child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + ) + }); + + let (root, transaction) = self.old_state.full_storage_root( + storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + child_delta, + ); + + if commit { + self.new_state = Some(transaction); + } + Ok(root) + } +} + +impl backend::BlockImportOperation for BlockImportOperation +where Block::Hash: Ord, { type State = InMemoryBackend>; @@ -502,20 +629,17 @@ impl backend::BlockImportOperation for BlockImportOperatio &mut self, header: ::Header, body: Option::Extrinsic>>, - justification: Option, + _indexed_body: Option>>, + justifications: Option, state: NewBlockState, ) -> sp_blockchain::Result<()> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - self.pending_block = Some(PendingBlock { - block: StoredBlock::new(header, body, justification), - state, - }); + self.pending_block = + Some(PendingBlock { block: StoredBlock::new(header, body, justifications), state }); Ok(()) } - fn update_cache(&mut self, cache: HashMap>) { - self.pending_cache = cache; - } + fn update_cache(&mut self, _cache: HashMap>) {} fn update_db_storage( &mut self, @@ -532,28 +656,21 @@ impl backend::BlockImportOperation for BlockImportOperatio Ok(()) } - fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { - check_genesis_storage(&storage)?; - - let child_delta = storage.children_default.iter() - .map(|(_storage_key, child_content)| - ( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))) - ) - ); - - let (root, transaction) = self.old_state.full_storage_root( - storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), - child_delta, - ); + fn set_genesis_state( + &mut self, + storage: Storage, + commit: bool, + ) -> sp_blockchain::Result { + self.apply_storage(storage, commit) + } - self.new_state = Some(transaction); - Ok(root) + fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { + self.apply_storage(storage, true) } fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> - where I: IntoIterator, Option>)> + where + I: IntoIterator, Option>)>, { self.aux.append(&mut ops.into_iter().collect()); Ok(()) @@ -581,19 +698,32 @@ impl backend::BlockImportOperation for BlockImportOperatio self.set_head = Some(block); Ok(()) } + + fn update_transaction_index( + &mut self, + _index: Vec, + ) -> sp_blockchain::Result<()> { + Ok(()) + } } /// In-memory backend. Keeps all states and blocks in memory. /// /// > **Warning**: Doesn't support all the features necessary for a proper database. Only use this /// > struct for testing purposes. Do **NOT** use in production. -pub struct Backend where Block::Hash: Ord { +pub struct Backend +where + Block::Hash: Ord, +{ states: RwLock>>>, blockchain: Blockchain, import_lock: RwLock<()>, } -impl Backend where Block::Hash: Ord { +impl Backend +where + Block::Hash: Ord, +{ /// Create a new instance of in-mem backend. pub fn new() -> Self { Backend { @@ -604,14 +734,21 @@ impl Backend where Block::Hash: Ord { } } -impl backend::AuxStore for Backend where Block::Hash: Ord { +impl backend::AuxStore for Backend +where + Block::Hash: Ord, +{ fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { self.blockchain.insert_aux(insert, delete) } @@ -620,7 +757,10 @@ impl backend::AuxStore for Backend where Block::Hash: Ord } } -impl backend::Backend for Backend where Block::Hash: Ord { +impl backend::Backend for Backend +where + Block::Hash: Ord, +{ type BlockImportOperation = BlockImportOperation; type Blockchain = Blockchain; type State = InMemoryBackend>; @@ -630,7 +770,6 @@ impl backend::Backend for Backend where Block::Hash let old_state = self.state_at(BlockId::Hash(Default::default()))?; Ok(BlockImportOperation { pending_block: None, - pending_cache: Default::default(), old_state, new_state: None, aux: Default::default(), @@ -648,10 +787,7 @@ impl backend::Backend for Backend where Block::Hash Ok(()) } - fn commit_operation( - &self, - operation: Self::BlockImportOperation, - ) -> sp_blockchain::Result<()> { + fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> { if !operation.finalized_blocks.is_empty() { for (block, justification) in operation.finalized_blocks { self.blockchain.finalize_header(block, justification)?; @@ -693,6 +829,14 @@ impl backend::Backend for Backend where Block::Hash self.blockchain.finalize_header(block, justification) } + fn append_justification( + &self, + block: BlockId, + justification: Justification, + ) -> sp_blockchain::Result<()> { + self.blockchain.append_justification(block, justification) + } + fn blockchain(&self) -> &Self::Blockchain { &self.blockchain } @@ -711,16 +855,14 @@ impl backend::Backend for Backend where Block::Hash fn state_at(&self, block: BlockId) -> sp_blockchain::Result { match block { - BlockId::Hash(h) if h == Default::default() => { - return Ok(Self::State::default()); - }, + BlockId::Hash(h) if h == Default::default() => return Ok(Self::State::default()), _ => {}, } - match self.blockchain.id(block).and_then(|id| self.states.read().get(&id).cloned()) { - Some(state) => Ok(state), - None => Err(sp_blockchain::Error::UnknownBlock(format!("{}", block))), - } + self.blockchain + .id(block) + .and_then(|id| self.states.read().get(&id).cloned()) + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", block))) } fn revert( @@ -731,6 +873,10 @@ impl backend::Backend for Backend where Block::Hash Ok((Zero::zero(), HashSet::new())) } + fn remove_leaf_block(&self, _hash: &Block::Hash) -> sp_blockchain::Result<()> { + Ok(()) + } + fn get_import_lock(&self) -> &RwLock<()> { &self.import_lock } @@ -738,9 +884,13 @@ impl backend::Backend for Backend where Block::Hash impl backend::LocalBackend for Backend where Block::Hash: Ord {} -impl backend::RemoteBackend for Backend where Block::Hash: Ord { +impl backend::RemoteBackend for Backend +where + Block::Hash: Ord, +{ fn is_local_state_available(&self, block: &BlockId) -> bool { - self.blockchain.expect_block_number_from_id(block) + self.blockchain + .expect_block_number_from_id(block) .map(|num| num.is_zero()) .unwrap_or(false) } @@ -753,13 +903,91 @@ impl backend::RemoteBackend for Backend where Block /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); + return Err(sp_blockchain::Error::InvalidState.into()) } - if storage.children_default.keys() - .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); + if storage + .children_default + .keys() + .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) + { + return Err(sp_blockchain::Error::InvalidState.into()) } Ok(()) } + +#[cfg(test)] +mod tests { + use crate::{in_mem::Blockchain, NewBlockState}; + use sp_api::{BlockId, HeaderT}; + use sp_blockchain::Backend; + use sp_runtime::{ConsensusEngineId, Justifications}; + use substrate_test_runtime::{Block, Header, H256}; + + pub const ID1: ConsensusEngineId = *b"TST1"; + pub const ID2: ConsensusEngineId = *b"TST2"; + + fn header(number: u64) -> Header { + let parent_hash = match number { + 0 => Default::default(), + _ => header(number - 1).hash(), + }; + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(0), + parent_hash, + Default::default(), + ) + } + + fn test_blockchain() -> Blockchain { + let blockchain = Blockchain::::new(); + let just0 = Some(Justifications::from((ID1, vec![0]))); + let just1 = Some(Justifications::from((ID1, vec![1]))); + let just2 = None; + let just3 = Some(Justifications::from((ID1, vec![3]))); + blockchain + .insert(header(0).hash(), header(0), just0, None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(1).hash(), header(1), just1, None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(2).hash(), header(2), just2, None, NewBlockState::Best) + .unwrap(); + blockchain + .insert(header(3).hash(), header(3), just3, None, NewBlockState::Final) + .unwrap(); + blockchain + } + + #[test] + fn append_and_retrieve_justifications() { + let blockchain = test_blockchain(); + let last_finalized = blockchain.last_finalized().unwrap(); + let block = BlockId::Hash(last_finalized); + + blockchain.append_justification(block, (ID2, vec![4])).unwrap(); + let justifications = { + let mut just = Justifications::from((ID1, vec![3])); + just.append((ID2, vec![4])); + just + }; + assert_eq!(blockchain.justifications(block).unwrap(), Some(justifications)); + } + + #[test] + fn store_duplicate_justifications_is_forbidden() { + let blockchain = test_blockchain(); + let last_finalized = blockchain.last_finalized().unwrap(); + let block = BlockId::Hash(last_finalized); + + blockchain.append_justification(block, (ID2, vec![0])).unwrap(); + assert!(matches!( + blockchain.append_justification(block, (ID2, vec![1])), + Err(sp_blockchain::Error::BadJustification(_)), + )); + } +} diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index d10fa7ac0e565..80216bc4664bd 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,14 +18,13 @@ //! Helper for managing the set of available leaves in the chain for DB implementations. -use std::collections::BTreeMap; -use std::cmp::Reverse; +use codec::{Decode, Encode}; +use sp_blockchain::{Error, Result}; use sp_database::{Database, Transaction}; use sp_runtime::traits::AtLeast32Bit; -use codec::{Encode, Decode}; -use sp_blockchain::{Error, Result}; +use std::{cmp::Reverse, collections::BTreeMap}; -type DbHash = [u8; 32]; +type DbHash = sp_core::H256; #[derive(Debug, Clone, PartialEq, Eq)] struct LeafSetItem { @@ -55,6 +54,11 @@ impl FinalizationDisplaced { // one transaction, then there will be no overlap in the keys. self.leaves.append(&mut other.leaves); } + + /// Iterate over all displaced leaves. + pub fn leaves(&self) -> impl IntoIterator { + self.leaves.values().flatten() + } } /// list of leaf hashes ordered by number (descending). @@ -67,17 +71,14 @@ pub struct LeafSet { pending_removed: Vec, } -impl LeafSet where +impl LeafSet +where H: Clone + PartialEq + Decode + Encode, N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, { /// Construct a new, blank leaf set. pub fn new() -> Self { - Self { - storage: BTreeMap::new(), - pending_added: Vec::new(), - pending_removed: Vec::new(), - } + Self { storage: BTreeMap::new(), pending_added: Vec::new(), pending_removed: Vec::new() } } /// Read the leaf list from the DB, using given prefix for keys. @@ -93,14 +94,10 @@ impl LeafSet where for (number, hashes) in vals.into_iter() { storage.insert(Reverse(number), hashes); } - } + }, None => {}, } - Ok(Self { - storage, - pending_added: Vec::new(), - pending_removed: Vec::new(), - }) + Ok(Self { storage, pending_added: Vec::new(), pending_removed: Vec::new() }) } /// update the leaf list on import. returns a displaced leaf if there was one. @@ -114,10 +111,7 @@ impl LeafSet where self.pending_removed.push(parent_hash.clone()); Some(ImportDisplaced { new_hash: hash.clone(), - displaced: LeafSetItem { - hash: parent_hash, - number: new_number, - }, + displaced: LeafSetItem { hash: parent_hash, number: new_number }, }) } else { None @@ -131,7 +125,8 @@ impl LeafSet where displaced } - /// Note a block height finalized, displacing all leaves with number less than the finalized block's. + /// Note a block height finalized, displacing all leaves with number less than the finalized + /// block's. /// /// Although it would be more technically correct to also prune out leaves at the /// same number as the finalized block, but with different hashes, the current behavior @@ -139,16 +134,15 @@ impl LeafSet where /// will be pruned soon afterwards anyway. pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced { let boundary = if number == N::zero() { - return FinalizationDisplaced { leaves: BTreeMap::new() }; + return FinalizationDisplaced { leaves: BTreeMap::new() } } else { number - N::one() }; let below_boundary = self.storage.split_off(&Reverse(boundary)); - self.pending_removed.extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); - FinalizationDisplaced { - leaves: below_boundary, - } + self.pending_removed + .extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); + FinalizationDisplaced { leaves: below_boundary } } /// Undo all pending operations. @@ -164,7 +158,9 @@ impl LeafSet where /// Revert to the given block height by dropping all leaves in the leaf set /// with a block number higher than the target. pub fn revert(&mut self, best_hash: H, best_number: N) { - let items = self.storage.iter() + let items = self + .storage + .iter() .flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), number.clone()))) .collect::>(); @@ -180,7 +176,8 @@ impl LeafSet where } let best_number = Reverse(best_number); - let leaves_contains_best = self.storage + let leaves_contains_best = self + .storage .get(&best_number) .map_or(false, |hashes| hashes.contains(&best_hash)); @@ -204,16 +201,23 @@ impl LeafSet where } /// Write the leaf list to the database transaction. - pub fn prepare_transaction(&mut self, tx: &mut Transaction, column: u32, prefix: &[u8]) { + pub fn prepare_transaction( + &mut self, + tx: &mut Transaction, + column: u32, + prefix: &[u8], + ) { let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0.clone(), h.clone())).collect(); tx.set_from_vec(column, prefix, leaves.encode()); self.pending_added.clear(); self.pending_removed.clear(); } - #[cfg(test)] - fn contains(&self, number: N, hash: H) -> bool { - self.storage.get(&Reverse(number)).map_or(false, |hashes| hashes.contains(&hash)) + /// Check if given block is a leaf. + pub fn contains(&self, number: N, hash: H) -> bool { + self.storage + .get(&Reverse(number)) + .map_or(false, |hashes| hashes.contains(&hash)) } fn insert_leaf(&mut self, number: Reverse, hash: H) { @@ -225,14 +229,18 @@ impl LeafSet where let mut empty = false; let removed = self.storage.get_mut(number).map_or(false, |leaves| { let mut found = false; - leaves.retain(|h| if h == hash { - found = true; - false - } else { - true + leaves.retain(|h| { + if h == hash { + found = true; + false + } else { + true + } }); - if leaves.is_empty() { empty = true } + if leaves.is_empty() { + empty = true + } found }); @@ -250,7 +258,8 @@ pub struct Undo<'a, H: 'a, N: 'a> { inner: &'a mut LeafSet, } -impl<'a, H: 'a, N: 'a> Undo<'a, H, N> where +impl<'a, H: 'a, N: 'a> Undo<'a, H, N> +where H: Clone + PartialEq + Decode + Encode, N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, { @@ -324,7 +333,7 @@ mod tests { fn two_leaves_same_height_can_be_included() { let mut set = LeafSet::new(); - set.import(1_1u32, 10u32,0u32); + set.import(1_1u32, 10u32, 0u32); set.import(1_2, 10, 0); assert!(set.storage.contains_key(&Reverse(10))); diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 677066936330e..16935b1e846cf 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -1,46 +1,48 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate client interfaces. #![warn(missing_docs)] pub mod backend; pub mod call_executor; -pub mod client; pub mod cht; +pub mod client; pub mod execution_extensions; pub mod in_mem; -pub mod light; pub mod leaves; +pub mod light; pub mod notifications; pub mod proof_provider; -pub use sp_blockchain as blockchain; pub use backend::*; -pub use notifications::*; pub use call_executor::*; pub use client::*; pub use light::*; pub use notifications::*; pub use proof_provider::*; +pub use sp_blockchain as blockchain; +pub use sp_blockchain::HeaderBackend; -pub use sp_state_machine::{StorageProof, ExecutionStrategy}; +pub use sp_state_machine::{ExecutionStrategy, StorageProof}; +pub use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey}; /// Usage Information Provider interface -/// pub trait UsageProvider { /// Get usage info about current client. fn usage_info(&self) -> ClientInfo; @@ -48,7 +50,7 @@ pub trait UsageProvider { /// Utility methods for the client. pub mod utils { - use sp_blockchain::{HeaderBackend, HeaderMetadata, Error}; + use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::Block as BlockT; use std::borrow::Borrow; @@ -62,19 +64,24 @@ pub mod utils { client: &'a T, current: Option<(Block::Hash, Block::Hash)>, ) -> impl Fn(&Block::Hash, &Block::Hash) -> Result + 'a - where T: HeaderBackend + HeaderMetadata, + where + T: HeaderBackend + HeaderMetadata, { move |base, hash| { - if base == hash { return Ok(false); } + if base == hash { + return Ok(false) + } let current = current.as_ref().map(|(c, p)| (c.borrow(), p.borrow())); let mut hash = hash; if let Some((current_hash, current_parent_hash)) = current { - if base == current_hash { return Ok(false); } + if base == current_hash { + return Ok(false) + } if hash == current_hash { if base == current_parent_hash { - return Ok(true); + return Ok(true) } else { hash = current_parent_hash; } diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 144851dac0075..8638ddf741f30 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -1,38 +1,43 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate light client interfaces -use std::sync::Arc; -use std::collections::{BTreeMap, HashMap}; -use std::future::Future; +use std::{ + collections::{BTreeMap, HashMap}, + future::Future, + sync::Arc, +}; -use sp_runtime::{ - traits::{ - Block as BlockT, Header as HeaderT, NumberFor, - }, - generic::BlockId +use crate::{ + backend::{AuxStore, NewBlockState}, + ProvideChtRoots, UsageInfo, }; -use sp_core::{ChangesTrieConfigurationRange, storage::PrefixedStorageKey}; -use sp_state_machine::StorageProof; use sp_blockchain::{ - HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, - Error as ClientError, Result as ClientResult, + well_known_cache_keys, Cache as BlockchainCache, Error as ClientError, HeaderBackend, + HeaderMetadata, Result as ClientResult, +}; +use sp_core::{storage::PrefixedStorageKey, ChangesTrieConfigurationRange}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, }; -use crate::{backend::{AuxStore, NewBlockState}, UsageInfo, ProvideChtRoots}; +use sp_state_machine::StorageProof; /// Remote call request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] @@ -140,48 +145,48 @@ pub struct RemoteBodyRequest { /// is correct (see FetchedDataChecker) and return already checked data. pub trait Fetcher: Send + Sync { /// Remote header future. - type RemoteHeaderResult: Future> + Unpin + Send + 'static; + type RemoteHeaderResult: Future> + + Unpin + + Send + + 'static; /// Remote storage read future. - type RemoteReadResult: Future, Option>>, - ClientError, - >> + Unpin + Send + 'static; + type RemoteReadResult: Future, Option>>, ClientError>> + + Unpin + + Send + + 'static; /// Remote call result future. - type RemoteCallResult: Future, - ClientError, - >> + Unpin + Send + 'static; + type RemoteCallResult: Future, ClientError>> + Unpin + Send + 'static; /// Remote changes result future. - type RemoteChangesResult: Future, u32)>, - ClientError, - >> + Unpin + Send + 'static; + type RemoteChangesResult: Future, u32)>, ClientError>> + + Unpin + + Send + + 'static; /// Remote block body result future. - type RemoteBodyResult: Future, - ClientError, - >> + Unpin + Send + 'static; + type RemoteBodyResult: Future, ClientError>> + + Unpin + + Send + + 'static; /// Fetch remote header. - fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult; - /// Fetch remote storage value. - fn remote_read( + fn remote_header( &self, - request: RemoteReadRequest - ) -> Self::RemoteReadResult; + request: RemoteHeaderRequest, + ) -> Self::RemoteHeaderResult; + /// Fetch remote storage value. + fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult; /// Fetch remote storage child value. fn remote_read_child( &self, - request: RemoteReadChildRequest + request: RemoteReadChildRequest, ) -> Self::RemoteReadResult; /// Fetch remote call result. fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; /// Fetch remote changes ((block number, extrinsic index)) where given key has been changed /// at a given blocks range. - fn remote_changes(&self, request: RemoteChangesRequest) -> Self::RemoteChangesResult; + fn remote_changes( + &self, + request: RemoteChangesRequest, + ) -> Self::RemoteChangesResult; /// Fetch remote block body fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult; } @@ -220,20 +225,22 @@ pub trait FetchChecker: Send + Sync { fn check_changes_proof( &self, request: &RemoteChangesRequest, - proof: ChangesProof + proof: ChangesProof, ) -> ClientResult, u32)>>; /// Check remote body proof. fn check_body_proof( &self, request: &RemoteBodyRequest, - body: Vec + body: Vec, ) -> ClientResult>; } - /// Light client blockchain storage. -pub trait Storage: AuxStore + HeaderBackend - + HeaderMetadata + ProvideChtRoots +pub trait Storage: + AuxStore + + HeaderBackend + + HeaderMetadata + + ProvideChtRoots { /// Store new header. Should refuse to revert any finalized blocks. /// @@ -278,10 +285,10 @@ pub enum LocalOrRemote { /// locally, or fetches required data from remote node. pub trait RemoteBlockchain: Send + Sync { /// Get block header. - fn header(&self, id: BlockId) -> ClientResult, - >>; + fn header( + &self, + id: BlockId, + ) -> ClientResult>>; } /// Returns future that resolves header either locally, or remotely. @@ -293,11 +300,8 @@ pub fn future_header>( use futures::future::{ready, Either, FutureExt}; match blockchain.header(id) { - Ok(LocalOrRemote::Remote(request)) => Either::Left( - fetcher - .remote_header(request) - .then(|header| ready(header.map(Some))) - ), + Ok(LocalOrRemote::Remote(request)) => + Either::Left(fetcher.remote_header(request).then(|header| ready(header.map(Some)))), Ok(LocalOrRemote::Unknown) => Either::Right(ready(Ok(None))), Ok(LocalOrRemote::Local(local_header)) => Either::Right(ready(Ok(Some(local_header)))), Err(err) => Either::Right(ready(Err(err))), @@ -306,19 +310,26 @@ pub fn future_header>( #[cfg(test)] pub mod tests { + use super::*; use futures::future::Ready; use parking_lot::Mutex; use sp_blockchain::Error as ClientError; - use sp_test_primitives::{Block, Header, Extrinsic}; - use super::*; + use sp_test_primitives::{Block, Extrinsic, Header}; + + #[derive(Debug, thiserror::Error)] + #[error("Not implemented on test node")] + struct MockError; + + impl Into for MockError { + fn into(self) -> ClientError { + ClientError::Application(Box::new(self)) + } + } pub type OkCallFetcher = Mutex>; - fn not_implemented_in_tests() -> Ready> - where - E: std::convert::From<&'static str>, - { - futures::future::ready(Err("Not implemented on test node".into())) + fn not_implemented_in_tests() -> Ready> { + futures::future::ready(Err(MockError.into())) } impl Fetcher for OkCallFetcher { @@ -336,7 +347,10 @@ pub mod tests { not_implemented_in_tests() } - fn remote_read_child(&self, _request: RemoteReadChildRequest

) -> Self::RemoteReadResult { + fn remote_read_child( + &self, + _request: RemoteReadChildRequest
, + ) -> Self::RemoteReadResult { not_implemented_in_tests() } @@ -344,7 +358,10 @@ pub mod tests { futures::future::ready(Ok((*self.lock()).clone())) } - fn remote_changes(&self, _request: RemoteChangesRequest
) -> Self::RemoteChangesResult { + fn remote_changes( + &self, + _request: RemoteChangesRequest
, + ) -> Self::RemoteChangesResult { not_implemented_in_tests() } diff --git a/client/api/src/notifications.rs b/client/api/src/notifications.rs index ec63c372c7e59..1346afd5e54d2 100644 --- a/client/api/src/notifications.rs +++ b/client/api/src/notifications.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,15 +19,15 @@ //! Storage notifications use std::{ - collections::{HashSet, HashMap}, + collections::{HashMap, HashSet}, sync::Arc, }; -use fnv::{FnvHashSet, FnvHashMap}; -use sp_core::storage::{StorageKey, StorageData}; +use fnv::{FnvHashMap, FnvHashSet}; +use prometheus_endpoint::{register, CounterVec, Opts, Registry, U64}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_core::storage::{StorageData, StorageKey}; use sp_runtime::traits::Block as BlockT; -use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; -use prometheus_endpoint::{Registry, CounterVec, Opts, U64, register}; /// Storage change set #[derive(Debug)] @@ -40,29 +40,33 @@ pub struct StorageChangeSet { impl StorageChangeSet { /// Convert the change set into iterator over storage items. - pub fn iter<'a>(&'a self) - -> impl Iterator, &'a StorageKey, Option<&'a StorageData>)> + 'a { - let top = self.changes + pub fn iter<'a>( + &'a self, + ) -> impl Iterator, &'a StorageKey, Option<&'a StorageData>)> + 'a + { + let top = self + .changes .iter() .filter(move |&(key, _)| match self.filter { Some(ref filter) => filter.contains(key), None => true, }) - .map(move |(k,v)| (None, k, v.as_ref())); - let children = self.child_changes + .map(move |(k, v)| (None, k, v.as_ref())); + let children = self + .child_changes .iter() .filter_map(move |(sk, changes)| { - if let Some(cf) = self.child_filters.as_ref() { - if let Some(filter) = cf.get(sk) { - Some(changes + self.child_filters.as_ref().and_then(|cf| { + cf.get(sk).map(|filter| { + changes .iter() .filter(move |&(key, _)| match filter { Some(ref filter) => filter.contains(key), None => true, }) - .map(move |(k,v)| (Some(sk), k, v.as_ref()))) - } else { None } - } else { None } + .map(move |(k, v)| (Some(sk), k, v.as_ref())) + }) + }) }) .flatten(); top.chain(children) @@ -83,15 +87,18 @@ pub struct StorageNotifications { next_id: SubscriberId, wildcard_listeners: FnvHashSet, listeners: HashMap>, - child_listeners: HashMap>, - FnvHashSet - )>, - sinks: FnvHashMap, - Option>, - Option>>>, - )>, + child_listeners: HashMap< + StorageKey, + (HashMap>, FnvHashSet), + >, + sinks: FnvHashMap< + SubscriberId, + ( + TracingUnboundedSender<(Block::Hash, StorageChangeSet)>, + Option>, + Option>>>, + ), + >, } impl Default for StorageNotifications { @@ -111,16 +118,17 @@ impl StorageNotifications { /// Initialize a new StorageNotifications /// optionally pass a prometheus registry to send subscriber metrics to pub fn new(prometheus_registry: Option) -> Self { - let metrics = prometheus_registry.and_then(|r| + let metrics = prometheus_registry.and_then(|r| { CounterVec::new( Opts::new( "storage_notification_subscribers", - "Number of subscribers in storage notification sytem" + "Number of subscribers in storage notification sytem", ), - &["action"], //added | removed - ).and_then(|g| register(g, &r)) + &["action"], // added | removed + ) + .and_then(|g| register(g, &r)) .ok() - ); + }); StorageNotifications { metrics, @@ -138,17 +146,16 @@ impl StorageNotifications { pub fn trigger( &mut self, hash: &Block::Hash, - changeset: impl Iterator, Option>)>, + changeset: impl Iterator, Option>)>, child_changeset: impl Iterator< - Item=(Vec, impl Iterator, Option>)>) + Item = (Vec, impl Iterator, Option>)>), >, ) { - let has_wildcard = !self.wildcard_listeners.is_empty(); // early exit if no listeners if !has_wildcard && self.listeners.is_empty() && self.child_listeners.is_empty() { - return; + return } let mut subscribers = self.wildcard_listeners.clone(); @@ -194,24 +201,29 @@ impl StorageNotifications { // Don't send empty notifications if changes.is_empty() && child_changes.is_empty() { - return; + return } let changes = Arc::new(changes); let child_changes = Arc::new(child_changes); // Trigger the events - let to_remove = self.sinks + let to_remove = self + .sinks .iter() .filter_map(|(subscriber, &(ref sink, ref filter, ref child_filters))| { let should_remove = { if subscribers.contains(subscriber) { - sink.unbounded_send((hash.clone(), StorageChangeSet { - changes: changes.clone(), - child_changes: child_changes.clone(), - filter: filter.clone(), - child_filters: child_filters.clone(), - })).is_err() + sink.unbounded_send(( + hash.clone(), + StorageChangeSet { + changes: changes.clone(), + child_changes: child_changes.clone(), + filter: filter.clone(), + child_filters: child_filters.clone(), + }, + )) + .is_err() } else { sink.is_closed() } @@ -222,7 +234,8 @@ impl StorageNotifications { } else { None } - }).collect::>(); + }) + .collect::>(); for sub_id in to_remove { self.remove_subscriber(sub_id); @@ -234,13 +247,12 @@ impl StorageNotifications { filters: &Option>, listeners: &mut HashMap>, wildcards: &mut FnvHashSet, - ){ + ) { match filters { None => { wildcards.remove(subscriber); }, - Some(filters) => { - + Some(filters) => for key in filters.iter() { let remove_key = match listeners.get_mut(key) { Some(ref mut set) => { @@ -253,8 +265,7 @@ impl StorageNotifications { if remove_key { listeners.remove(key); } - } - } + }, } } @@ -268,7 +279,6 @@ impl StorageNotifications { ); if let Some(child_filters) = child_filters.as_ref() { for (c_key, filters) in child_filters { - if let Some((listeners, wildcards)) = self.child_listeners.get_mut(&c_key) { Self::remove_subscriber_from( &subscriber, @@ -294,20 +304,24 @@ impl StorageNotifications { filter_keys: &Option>, listeners: &mut HashMap>, wildcards: &mut FnvHashSet, - ) -> Option> - { + ) -> Option> { match filter_keys { None => { wildcards.insert(current_id); None }, - Some(keys) => Some(keys.as_ref().iter().map(|key| { - listeners - .entry(key.clone()) - .or_insert_with(Default::default) - .insert(current_id); - key.clone() - }).collect()) + Some(keys) => Some( + keys.as_ref() + .iter() + .map(|key| { + listeners + .entry(key.clone()) + .or_insert_with(Default::default) + .insert(current_id); + key.clone() + }) + .collect(), + ), } } @@ -328,21 +342,20 @@ impl StorageNotifications { &mut self.wildcard_listeners, ); let child_keys = filter_child_keys.map(|filter_child_keys| { - filter_child_keys.iter().map(|(c_key, o_keys)| { - let (c_listeners, c_wildcards) = self.child_listeners - .entry(c_key.clone()) - .or_insert_with(Default::default); - - (c_key.clone(), Self::listen_from( - current_id, - o_keys, - &mut *c_listeners, - &mut *c_wildcards, - )) - }).collect() + filter_child_keys + .iter() + .map(|(c_key, o_keys)| { + let (c_listeners, c_wildcards) = + self.child_listeners.entry(c_key.clone()).or_insert_with(Default::default); + + ( + c_key.clone(), + Self::listen_from(current_id, o_keys, &mut *c_listeners, &mut *c_wildcards), + ) + }) + .collect() }); - // insert sink let (tx, rx) = tracing_unbounded("mpsc_storage_notification_items"); self.sinks.insert(current_id, (tx, keys, child_keys)); @@ -357,8 +370,8 @@ impl StorageNotifications { #[cfg(test)] mod tests { - use sp_runtime::testing::{H256 as Hash, Block as RawBlock, ExtrinsicWrapper}; use super::*; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; use std::iter::{empty, Empty}; type TestChangeSet = ( @@ -370,10 +383,12 @@ mod tests { impl From for StorageChangeSet { fn from(changes: TestChangeSet) -> Self { // warning hardcoded child trie wildcard to test upon - let child_filters = Some([ - (StorageKey(vec![4]), None), - (StorageKey(vec![5]), None), - ].iter().cloned().collect()); + let child_filters = Some( + [(StorageKey(vec![4]), None), (StorageKey(vec![5]), None)] + .iter() + .cloned() + .collect(), + ); StorageChangeSet { changes: Arc::new(changes.0), child_changes: Arc::new(changes.1), @@ -397,34 +412,40 @@ mod tests { // given let mut notifications = StorageNotifications::::default(); let child_filter = [(StorageKey(vec![4]), None)]; - let mut recv = futures::executor::block_on_stream( - notifications.listen(None, Some(&child_filter[..])) - ); + let mut recv = + futures::executor::block_on_stream(notifications.listen(None, Some(&child_filter[..]))); // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![3], None), - ]; - let c_changeset_1 = vec![ - (vec![5], Some(vec![4])), - (vec![6], None), - ]; + let changeset = vec![(vec![2], Some(vec![3])), (vec![3], None)]; + let c_changeset_1 = vec![(vec![5], Some(vec![4])), (vec![6], None)]; let c_changeset = vec![(vec![4], c_changeset_1)]; notifications.trigger( &Hash::from_low_u64_be(1), changeset.into_iter(), - c_changeset.into_iter().map(|(a,b)| (a, b.into_iter())), + c_changeset.into_iter().map(|(a, b)| (a, b.into_iter())), ); // then - assert_eq!(recv.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ - (StorageKey(vec![2]), Some(StorageData(vec![3]))), - (StorageKey(vec![3]), None), - ], vec![(StorageKey(vec![4]), vec![ - (StorageKey(vec![5]), Some(StorageData(vec![4]))), - (StorageKey(vec![6]), None), - ])]).into())); + assert_eq!( + recv.next().unwrap(), + ( + Hash::from_low_u64_be(1), + ( + vec![ + (StorageKey(vec![2]), Some(StorageData(vec![3]))), + (StorageKey(vec![3]), None), + ], + vec![( + StorageKey(vec![4]), + vec![ + (StorageKey(vec![5]), Some(StorageData(vec![4]))), + (StorageKey(vec![6]), None), + ] + )] + ) + .into() + ) + ); } #[test] @@ -433,44 +454,52 @@ mod tests { let mut notifications = StorageNotifications::::default(); let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))]; let mut recv1 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![1])]), None) + notifications.listen(Some(&[StorageKey(vec![1])]), None), ); let mut recv2 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![2])]), None) + notifications.listen(Some(&[StorageKey(vec![2])]), None), ); let mut recv3 = futures::executor::block_on_stream( - notifications.listen(Some(&[]), Some(&child_filter)) + notifications.listen(Some(&[]), Some(&child_filter)), ); // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![1], None), - ]; - let c_changeset_1 = vec![ - (vec![5], Some(vec![4])), - (vec![6], None), - ]; + let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)]; + let c_changeset_1 = vec![(vec![5], Some(vec![4])), (vec![6], None)]; let c_changeset = vec![(vec![4], c_changeset_1)]; notifications.trigger( &Hash::from_low_u64_be(1), changeset.into_iter(), - c_changeset.into_iter().map(|(a,b)| (a, b.into_iter())), + c_changeset.into_iter().map(|(a, b)| (a, b.into_iter())), ); // then - assert_eq!(recv1.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ - (StorageKey(vec![1]), None), - ], vec![]).into())); - assert_eq!(recv2.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ - (StorageKey(vec![2]), Some(StorageData(vec![3]))), - ], vec![]).into())); - assert_eq!(recv3.next().unwrap(), (Hash::from_low_u64_be(1), (vec![], - vec![ - (StorageKey(vec![4]), vec![(StorageKey(vec![5]), Some(StorageData(vec![4])))]), - ]).into())); - + assert_eq!( + recv1.next().unwrap(), + (Hash::from_low_u64_be(1), (vec![(StorageKey(vec![1]), None),], vec![]).into()) + ); + assert_eq!( + recv2.next().unwrap(), + ( + Hash::from_low_u64_be(1), + (vec![(StorageKey(vec![2]), Some(StorageData(vec![3]))),], vec![]).into() + ) + ); + assert_eq!( + recv3.next().unwrap(), + ( + Hash::from_low_u64_be(1), + ( + vec![], + vec![( + StorageKey(vec![4]), + vec![(StorageKey(vec![5]), Some(StorageData(vec![4])))] + ),] + ) + .into() + ) + ); } #[test] @@ -480,27 +509,21 @@ mod tests { { let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))]; let _recv1 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![1])]), None) + notifications.listen(Some(&[StorageKey(vec![1])]), None), ); let _recv2 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![2])]), None) - ); - let _recv3 = futures::executor::block_on_stream( - notifications.listen(None, None) - ); - let _recv4 = futures::executor::block_on_stream( - notifications.listen(None, Some(&child_filter)) + notifications.listen(Some(&[StorageKey(vec![2])]), None), ); + let _recv3 = futures::executor::block_on_stream(notifications.listen(None, None)); + let _recv4 = + futures::executor::block_on_stream(notifications.listen(None, Some(&child_filter))); assert_eq!(notifications.listeners.len(), 2); assert_eq!(notifications.wildcard_listeners.len(), 2); assert_eq!(notifications.child_listeners.len(), 1); } // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![1], None), - ]; + let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)]; let c_changeset = empty::<(_, Empty<_>)>(); notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter(), c_changeset); diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 5749ae0576fc3..79444f0069232 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,12 +17,9 @@ // along with this program. If not, see . //! Proof utilities -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT}, -}; -use crate::{StorageProof, ChangesProof}; -use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; +use crate::{ChangesProof, StorageProof}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_storage::{ChildInfo, PrefixedStorageKey, StorageKey}; /// Interface for providing block proving utilities. pub trait ProofProvider { @@ -30,7 +27,7 @@ pub trait ProofProvider { fn read_proof( &self, id: &BlockId, - keys: &mut dyn Iterator, + keys: &mut dyn Iterator, ) -> sp_blockchain::Result; /// Reads child storage value at a given block + storage_key + key, returning @@ -39,7 +36,7 @@ pub trait ProofProvider { &self, id: &BlockId, child_info: &ChildInfo, - keys: &mut dyn Iterator, + keys: &mut dyn Iterator, ) -> sp_blockchain::Result; /// Execute a call to a contract on top of state in a block of given hash @@ -53,13 +50,16 @@ pub trait ProofProvider { call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)>; /// Reads given header and generates CHT-based header proof. - fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)>; + fn header_proof( + &self, + id: &BlockId, + ) -> sp_blockchain::Result<(Block::Header, StorageProof)>; - /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. - /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using - /// changes tries from ascendants of this block, we should provide proofs for changes tries roots - /// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants - /// of this block. + /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given + /// blocks range. `min` is the hash of the first block, which changes trie root is known to the + /// requester - when we're using changes tries from ascendants of this block, we should provide + /// proofs for changes tries roots `max` is the hash of the last block known to the requester - + /// we can't use changes tries from descendants of this block. /// Works only for runtimes that are supporting changes tries. fn key_changes_proof( &self, @@ -70,4 +70,32 @@ pub trait ProofProvider { storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result>; + + /// Given a `BlockId` iterate over all storage values starting at `start_key` exclusively, + /// building proofs until size limit is reached. Returns combined proof and the number of + /// collected keys. + fn read_proof_collection( + &self, + id: &BlockId, + start_key: &[u8], + size_limit: usize, + ) -> sp_blockchain::Result<(StorageProof, u32)>; + + /// Given a `BlockId` iterate over all storage values starting at `start_key`. + /// Returns collected keys and values. + fn storage_collection( + &self, + id: &BlockId, + start_key: &[u8], + size_limit: usize, + ) -> sp_blockchain::Result, Vec)>>; + + /// Verify read storage proof for a set of keys. + /// Returns collected key-value pairs and a flag indicating if iteration is complete. + fn verify_range_proof( + &self, + root: Block::Hash, + proof: StorageProof, + start_key: &[u8], + ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)>; } diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 0e5c22380ded2..8d5ed20730f0c 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-authority-discovery" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -14,33 +14,30 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.6.1" +prost-build = "0.8" [dependencies] -bytes = "0.5.0" -codec = { package = "parity-scale-codec", default-features = false, version = "1.3.4" } +async-trait = "0.1" +codec = { package = "parity-scale-codec", default-features = false, version = "2.0.0" } derive_more = "0.99.2" -either = "1.5.3" -futures = "0.3.4" +futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.28.1", default-features = false, features = ["kad"] } +ip_network = "0.4.0" +libp2p = { version = "0.39.1", default-features = false, features = ["kad"] } log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -prost = "0.6.1" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0" } +prost = "0.8" rand = "0.7.2" -sc-client-api = { version = "2.0.0", path = "../api" } -sc-keystore = { version = "2.0.0", path = "../keystore" } -sc-network = { version = "0.8.0", path = "../network" } -serde_json = "1.0.41" -sp-authority-discovery = { version = "2.0.0", path = "../../primitives/authority-discovery" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sp-authority-discovery = { version = "4.0.0-dev", path = "../../primitives/authority-discovery" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } [dev-dependencies] -quickcheck = "0.9.0" -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -sc-peerset = { version = "2.0.0", path = "../peerset" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client"} +quickcheck = "1.0.3" +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index 48bcdf33114b1..b271f7b9d62bb 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Authority discovery errors. @@ -31,13 +33,11 @@ pub enum Error { /// Failed to verify a dht payload with the given signature. VerifyingDhtPayload, /// Failed to hash the authority id to be used as a dht key. - HashingAuthorityId(libp2p::core::multiaddr::multihash::EncodeError), + HashingAuthorityId(libp2p::core::multiaddr::multihash::Error), /// Failed calling into the Substrate runtime. CallingRuntime(sp_blockchain::Error), /// Received a dht record with a key that does not match any in-flight awaited keys. ReceivingUnexpectedRecord, - /// Failed to set the authority discovery peerset priority group in the peerset module. - SettingPeersetPriorityGroup(String), /// Failed to encode a protobuf payload. EncodingProto(prost::EncodeError), /// Failed to decode a protobuf payload. diff --git a/client/authority-discovery/src/interval.rs b/client/authority-discovery/src/interval.rs new file mode 100644 index 0000000000000..f4e7c43e60d21 --- /dev/null +++ b/client/authority-discovery/src/interval.rs @@ -0,0 +1,60 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use futures::{future::FutureExt, ready, stream::Stream}; +use futures_timer::Delay; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +/// Exponentially increasing interval +/// +/// Doubles interval duration on each tick until the configured maximum is reached. +pub struct ExpIncInterval { + max: Duration, + next: Duration, + delay: Delay, +} + +impl ExpIncInterval { + /// Create a new [`ExpIncInterval`]. + pub fn new(start: Duration, max: Duration) -> Self { + let delay = Delay::new(start); + Self { max, next: start * 2, delay } + } + + /// Fast forward the exponentially increasing interval to the configured maximum. + pub fn set_to_max(&mut self) { + self.next = self.max; + self.delay = Delay::new(self.next); + } +} + +impl Stream for ExpIncInterval { + type Item = (); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(self.delay.poll_unpin(cx)); + self.delay = Delay::new(self.next); + self.next = std::cmp::min(self.max, self.next * 2); + + Poll::Ready(Some(())) + } +} diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 42cf120d70f8a..800f683aa0aef 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . #![warn(missing_docs)] #![recursion_limit = "1024"] @@ -24,30 +26,85 @@ //! //! See [`Worker`] and [`Service`] for more documentation. -pub use crate::{service::Service, worker::{NetworkProvider, Worker, Role}}; +pub use crate::{ + service::Service, + worker::{NetworkProvider, Role, Worker}, +}; -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; -use futures::channel::{mpsc, oneshot}; -use futures::Stream; +use futures::{ + channel::{mpsc, oneshot}, + Stream, +}; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{config::MultiaddrWithPeerId, DhtEvent, Multiaddr, PeerId}; +use sc_network::{DhtEvent, Multiaddr, PeerId}; +use sp_api::ProvideRuntimeApi; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId}; use sp_runtime::traits::Block as BlockT; -use sp_api::ProvideRuntimeApi; mod error; +mod interval; mod service; +mod worker; + #[cfg(test)] mod tests; -mod worker; + +/// Configuration of [`Worker`]. +pub struct WorkerConfig { + /// The maximum interval in which the node will publish its own address on the DHT. + /// + /// By default this is set to 1 hour. + pub max_publish_interval: Duration, + /// Interval at which the keystore is queried. If the keys have changed, unconditionally + /// re-publish its addresses on the DHT. + /// + /// By default this is set to 1 minute. + pub keystore_refresh_interval: Duration, + /// The maximum interval in which the node will query the DHT for new entries. + /// + /// By default this is set to 10 minutes. + pub max_query_interval: Duration, + + /// If `false`, the node won't publish on the DHT multiaddresses that contain non-global + /// IP addresses (such as 10.0.0.1). + /// + /// Recommended: `false` for live chains, and `true` for local chains or for testing. + /// + /// Defaults to `true` to avoid the surprise factor. + pub publish_non_global_ips: bool, +} + +impl Default for WorkerConfig { + fn default() -> Self { + Self { + // Kademlia's default time-to-live for Dht records is 36h, republishing records every + // 24h through libp2p-kad. Given that a node could restart at any point in time, one can + // not depend on the republishing process, thus publishing own external addresses should + // happen on an interval < 36h. + max_publish_interval: Duration::from_secs(1 * 60 * 60), + keystore_refresh_interval: Duration::from_secs(60), + // External addresses of remote authorities can change at any given point in time. The + // interval on which to trigger new queries for the current and next authorities is a + // trade off between efficiency and performance. + // + // Querying 700 [`AuthorityId`]s takes ~8m on the Kusama DHT (16th Nov 2020) when + // comparing `authority_discovery_authority_addresses_requested_total` and + // `authority_discovery_dht_event_received`. + max_query_interval: Duration::from_secs(10 * 60), + publish_non_global_ips: true, + } + } +} /// Create a new authority discovery [`Worker`] and [`Service`]. +/// +/// See the struct documentation of each for more details. pub fn new_worker_and_service( client: Arc, network: Arc, - sentry_nodes: Vec, dht_event_rx: DhtEventStream, role: Role, prometheus_registry: Option, @@ -56,14 +113,41 @@ where Block: BlockT + Unpin + 'static, Network: NetworkProvider, Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, + >::Api: AuthorityDiscoveryApi, + DhtEventStream: Stream + Unpin, +{ + new_worker_and_service_with_config( + Default::default(), + client, + network, + dht_event_rx, + role, + prometheus_registry, + ) +} + +/// Same as [`new_worker_and_service`] but with support for providing the `config`. +/// +/// When in doubt use [`new_worker_and_service`] as it will use the default configuration. +pub fn new_worker_and_service_with_config( + config: WorkerConfig, + client: Arc, + network: Arc, + dht_event_rx: DhtEventStream, + role: Role, + prometheus_registry: Option, +) -> (Worker, Service) +where + Block: BlockT + Unpin + 'static, + Network: NetworkProvider, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: AuthorityDiscoveryApi, DhtEventStream: Stream + Unpin, { let (to_worker, from_service) = mpsc::channel(0); - let worker = Worker::new( - from_service, client, network, sentry_nodes, dht_event_rx, role, prometheus_registry, - ); + let worker = + Worker::new(from_service, client, network, dht_event_rx, role, prometheus_registry, config); let service = Service::new(to_worker); (worker, service) @@ -74,5 +158,5 @@ pub(crate) enum ServicetoWorkerMsg { /// See [`Service::get_addresses_by_authority_id`]. GetAddressesByAuthorityId(AuthorityId, oneshot::Sender>>), /// See [`Service::get_authority_id_by_peer_id`]. - GetAuthorityIdByPeerId(PeerId, oneshot::Sender>) + GetAuthorityIdByPeerId(PeerId, oneshot::Sender>), } diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index ed0205d262fc6..2e5ae66e4dd4a 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -1,40 +1,50 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . + +use std::fmt::Debug; use crate::ServicetoWorkerMsg; -use futures::channel::{mpsc, oneshot}; -use futures::SinkExt; +use futures::{ + channel::{mpsc, oneshot}, + SinkExt, +}; use sc_network::{Multiaddr, PeerId}; use sp_authority_discovery::AuthorityId; -/// Service to interact with the [`Worker`]. +/// Service to interact with the [`crate::Worker`]. #[derive(Clone)] pub struct Service { to_worker: mpsc::Sender, } -/// A [`Service`] allows to interact with a [`Worker`], e.g. by querying the -/// [`Worker`]'s local address cache for a given [`AuthorityId`]. +impl Debug for Service { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("AuthorityDiscoveryService").finish() + } +} + +/// A [`Service`] allows to interact with a [`crate::Worker`], e.g. by querying the +/// [`crate::Worker`]'s local address cache for a given [`AuthorityId`]. impl Service { pub(crate) fn new(to_worker: mpsc::Sender) -> Self { - Self { - to_worker, - } + Self { to_worker } } /// Get the addresses for the given [`AuthorityId`] from the local address @@ -43,13 +53,16 @@ impl Service { /// Returns `None` if no entry was present or connection to the /// [`crate::Worker`] failed. /// - /// [`Multiaddr`]s returned always include a [`PeerId`] via a - /// [`libp2p::core::multiaddr:Protocol::P2p`] component. [`Multiaddr`]s - /// might differ in their [`PeerId`], e.g. when each [`Multiaddr`] - /// represents a different sentry node. This might change once support for - /// sentry nodes is removed (see - /// https://github.com/paritytech/substrate/issues/6845). - pub async fn get_addresses_by_authority_id(&mut self, authority: AuthorityId) -> Option> { + /// Note: [`Multiaddr`]s returned always include a [`PeerId`] via a + /// [`libp2p::core::multiaddr::Protocol::P2p`] component. Equality of + /// [`PeerId`]s across [`Multiaddr`]s returned by a single call is not + /// enforced today, given that there are still authorities out there + /// publishing the addresses of their sentry nodes on the DHT. In the future + /// this guarantee can be provided. + pub async fn get_addresses_by_authority_id( + &mut self, + authority: AuthorityId, + ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_worker diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 88aad0af0696b..3784b4c834266 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,15 +16,24 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{new_worker_and_service, worker::{tests::{TestApi, TestNetwork}, Role}}; +use crate::{ + new_worker_and_service, + worker::{ + tests::{TestApi, TestNetwork}, + Role, + }, +}; -use std::sync::Arc; use futures::{channel::mpsc::channel, executor::LocalPool, task::LocalSpawn}; -use libp2p::core::{multiaddr::{Multiaddr, Protocol}, PeerId}; +use libp2p::core::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; +use std::sync::Arc; use sp_authority_discovery::AuthorityId; use sp_core::crypto::key_types; -use sp_keystore::{CryptoStore, testing::KeyStore}; +use sp_keystore::{testing::KeyStore, CryptoStore}; #[test] fn get_addresses_and_authority_id() { @@ -44,20 +53,18 @@ fn get_addresses_and_authority_id() { }); let remote_peer_id = PeerId::random(); - let remote_addr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() + let remote_addr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" + .parse::() .unwrap() - .with(Protocol::P2p(remote_peer_id.clone().into())); + .with(Protocol::P2p(remote_peer_id.into())); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); + let test_api = Arc::new(TestApi { authorities: vec![] }); let (mut worker, mut service) = new_worker_and_service( test_api, network.clone(), - vec![], Box::pin(dht_event_rx), - Role::Authority(key_store.into()), + Role::PublishAndDiscover(key_store.into()), None, ); worker.inject_addresses(remote_authority_id.clone(), vec![remote_addr.clone()]); diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index ca8a1bdd6370d..a689d0bafd262 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -1,149 +1,132 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . -use crate::{error::{Error, Result}, ServicetoWorkerMsg}; +use crate::{ + error::{Error, Result}, + interval::ExpIncInterval, + ServicetoWorkerMsg, +}; -use std::collections::{HashMap, HashSet}; -use std::convert::TryInto; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, + marker::PhantomData, + sync::Arc, + time::Duration, +}; -use futures::channel::mpsc; -use futures::{FutureExt, Stream, StreamExt, stream::Fuse}; -use futures_timer::Delay; +use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt}; use addr_cache::AddrCache; +use async_trait::async_trait; use codec::Decode; -use either::Either; -use libp2p::{core::multiaddr, multihash::Multihash}; +use ip_network::IpNetwork; +use libp2p::{ + core::multiaddr, + multihash::{Hasher, Multihash}, +}; use log::{debug, error, log_enabled}; -use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; +use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; use prost::Message; use rand::{seq::SliceRandom, thread_rng}; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{ - config::MultiaddrWithPeerId, - DhtEvent, - ExHashT, - Multiaddr, - NetworkStateInfo, - PeerId, +use sc_network::{DhtEvent, ExHashT, Multiaddr, NetworkStateInfo, PeerId}; +use sp_api::ProvideRuntimeApi; +use sp_authority_discovery::{ + AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, }; -use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; -use sp_core::crypto::{key_types, Pair}; +use sp_core::crypto::{key_types, CryptoTypePublicPair, Pair}; use sp_keystore::CryptoStore; -use sp_runtime::{traits::Block as BlockT, generic::BlockId}; -use sp_api::ProvideRuntimeApi; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; mod addr_cache; /// Dht payload schemas generated from Protobuf definitions via Prost crate in build.rs. -mod schema { include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); } +mod schema { + include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); +} #[cfg(test)] pub mod tests; -type Interval = Box + Unpin + Send + Sync>; - const LOG_TARGET: &'static str = "sub-authority-discovery"; -/// Upper bound estimation on how long one should wait before accessing the Kademlia DHT. -const LIBP2P_KADEMLIA_BOOTSTRAP_TIME: Duration = Duration::from_secs(30); - -/// Name of the Substrate peerset priority group for authorities discovered through the authority -/// discovery module. -const AUTHORITIES_PRIORITY_GROUP_NAME: &'static str = "authorities"; - /// Maximum number of addresses cached per authority. Additional addresses are discarded. const MAX_ADDRESSES_PER_AUTHORITY: usize = 10; /// Maximum number of in-flight DHT lookups at any given point in time. const MAX_IN_FLIGHT_LOOKUPS: usize = 8; -/// Role an authority discovery module can run as. +/// Role an authority discovery [`Worker`] can run as. pub enum Role { - /// Actual authority as well as a reference to its key store. - Authority(Arc), - /// Sentry node that guards an authority. - /// - /// No reference to its key store needed, as sentry nodes don't have an identity to sign - /// addresses with in the first place. - Sentry, + /// Publish own addresses and discover addresses of others. + PublishAndDiscover(Arc), + /// Discover addresses of others. + Discover, } -/// A [`Worker`] makes a given authority discoverable and discovers other -/// authorities. +/// An authority discovery [`Worker`] can publish the local node's addresses as well as discover +/// those of other nodes via a Kademlia DHT. /// -/// The [`Worker`] implements the Future trait. By -/// polling [`Worker`] an authority: +/// When constructed with [`Role::PublishAndDiscover`] a [`Worker`] will /// -/// 1. **Makes itself discoverable** +/// 1. Retrieve its external addresses (including peer id). /// -/// 1. Retrieves its external addresses (including peer id) or the ones of -/// its sentry nodes. +/// 2. Get the list of keys owned by the local node participating in the current authority set. /// -/// 2. Signs the above. +/// 3. Sign the addresses with the keys. /// -/// 3. Puts the signature and the addresses on the libp2p Kademlia DHT. +/// 4. Put addresses and signature as a record with the authority id as a key on a Kademlia DHT. /// +/// When constructed with either [`Role::PublishAndDiscover`] or [`Role::Discover`] a [`Worker`] +/// will /// -/// 2. **Discovers other authorities** +/// 1. Retrieve the current and next set of authorities. /// -/// 1. Retrieves the current and next set of authorities. +/// 2. Start DHT queries for the ids of the authorities. /// -/// 2. Starts DHT queries for the ids of the authorities. +/// 3. Validate the signatures of the retrieved key value pairs. /// -/// 3. Validates the signatures of the retrieved key value pairs. +/// 4. Add the retrieved external addresses as priority nodes to the +/// network peerset. /// -/// 4. Adds the retrieved external addresses as priority nodes to the -/// peerset. -/// -/// When run as a sentry node, the [`Worker`] does not publish -/// any addresses to the DHT but still discovers validators and sentry nodes of -/// validators, i.e. only step 2 (Discovers other authorities) is executed. -pub struct Worker -where - Block: BlockT + 'static, - Network: NetworkProvider, - Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, -{ - /// Channel receiver for messages send by an [`Service`]. +/// 5. Allow querying of the collected addresses via the [`crate::Service`]. +pub struct Worker { + /// Channel receiver for messages send by a [`crate::Service`]. from_service: Fuse>, client: Arc, network: Arc, - /// List of sentry node public addresses. - // - // There are 3 states: - // - None: No addresses were specified. - // - Some(vec![]): Addresses were specified, but none could be parsed as proper - // Multiaddresses. - // - Some(vec![a, b, c, ...]): Valid addresses were specified. - sentry_nodes: Option>, /// Channel we receive Dht events on. dht_event_rx: DhtEventStream, /// Interval to be proactive, publishing own addresses. - publish_interval: Interval, + publish_interval: ExpIncInterval, + /// Pro-actively publish our own addresses at this interval, if the keys in the keystore + /// have changed. + publish_if_changed_interval: ExpIncInterval, + /// List of keys onto which addresses have been published at the latest publication. + /// Used to check whether they have changed. + latest_published_keys: HashSet, + /// Same value as in the configuration. + publish_non_global_ips: bool, + /// Interval at which to request addresses of authorities, refilling the pending lookups queue. - query_interval: Interval, - /// Interval on which to set the peerset priority group to a new random - /// set of addresses. - priority_group_set_interval: Interval, + query_interval: ExpIncInterval, /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, @@ -164,65 +147,43 @@ where Block: BlockT + Unpin + 'static, Network: NetworkProvider, Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: - AuthorityDiscoveryApi, + >::Api: AuthorityDiscoveryApi, DhtEventStream: Stream + Unpin, { - /// Return a new [`Worker`]. - /// - /// Note: When specifying `sentry_nodes` this module will not advertise the public addresses of - /// the node itself but only the public addresses of its sentry nodes. + /// Construct a [`Worker`]. pub(crate) fn new( from_service: mpsc::Receiver, client: Arc, network: Arc, - sentry_nodes: Vec, dht_event_rx: DhtEventStream, role: Role, prometheus_registry: Option, + config: crate::WorkerConfig, ) -> Self { - // Kademlia's default time-to-live for Dht records is 36h, republishing records every 24h. - // Given that a node could restart at any point in time, one can not depend on the - // republishing process, thus publishing own external addresses should happen on an interval - // < 36h. - let publish_interval = interval_at( - Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME, - Duration::from_secs(12 * 60 * 60), - ); - - // External addresses of remote authorities can change at any given point in time. The - // interval on which to trigger new queries for the current authorities is a trade off - // between efficiency and performance. - let query_interval_start = Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME; - let query_interval_duration = Duration::from_secs(10 * 60); - let query_interval = interval_at(query_interval_start, query_interval_duration); - - // Querying 500 [`AuthorityId`]s takes ~1m on the Kusama DHT (10th of August 2020) when - // comparing `authority_discovery_authority_addresses_requested_total` and - // `authority_discovery_dht_event_received`. With that in mind set the peerset priority - // group on the same interval as the [`query_interval`] above, just delayed by 5 minutes. - let priority_group_set_interval = interval_at( - query_interval_start + Duration::from_secs(5 * 60), - query_interval_duration, - ); - - let sentry_nodes = if !sentry_nodes.is_empty() { - Some(sentry_nodes.into_iter().map(|ma| ma.concat()).collect::>()) - } else { - None - }; + // When a node starts up publishing and querying might fail due to various reasons, for + // example due to being not yet fully bootstrapped on the DHT. Thus one should retry rather + // sooner than later. On the other hand, a long running node is likely well connected and + // thus timely retries are not needed. For this reasoning use an exponentially increasing + // interval for `publish_interval`, `query_interval` and `priority_group_set_interval` + // instead of a constant interval. + let publish_interval = + ExpIncInterval::new(Duration::from_secs(2), config.max_publish_interval); + let query_interval = ExpIncInterval::new(Duration::from_secs(2), config.max_query_interval); + + // An `ExpIncInterval` is overkill here because the interval is constant, but consistency + // is more simple. + let publish_if_changed_interval = + ExpIncInterval::new(config.keystore_refresh_interval, config.keystore_refresh_interval); let addr_cache = AddrCache::new(); let metrics = match prometheus_registry { - Some(registry) => { - match Metrics::register(®istry) { - Ok(metrics) => Some(metrics), - Err(e) => { - error!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); - None - }, - } + Some(registry) => match Metrics::register(®istry) { + Ok(metrics) => Some(metrics), + Err(e) => { + error!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); + None + }, }, None => None, }; @@ -231,11 +192,12 @@ where from_service: from_service.fuse(), client, network, - sentry_nodes, dht_event_rx, publish_interval, + publish_if_changed_interval, + latest_published_keys: HashSet::new(), + publish_non_global_ips: config.publish_non_global_ips, query_interval, - priority_group_set_interval, pending_lookups: Vec::new(), in_flight_lookups: HashMap::new(), addr_cache, @@ -265,18 +227,12 @@ where msg = self.from_service.select_next_some() => { self.process_message_from_service(msg); }, - // Set peerset priority group to a new random set of addresses. - _ = self.priority_group_set_interval.next().fuse() => { - if let Err(e) = self.set_priority_group() { - error!( - target: LOG_TARGET, - "Failed to set priority group: {:?}", e, - ); - } - }, // Publish own addresses. - _ = self.publish_interval.next().fuse() => { - if let Err(e) = self.publish_ext_addresses().await { + only_if_changed = future::select( + self.publish_interval.next().map(|_| false), + self.publish_if_changed_interval.next().map(|_| true) + ).map(|e| e.factor_first().0).fuse() => { + if let Err(e) = self.publish_ext_addresses(only_if_changed).await { error!( target: LOG_TARGET, "Failed to publish external addresses: {:?}", e, @@ -302,90 +258,100 @@ where let _ = sender.send( self.addr_cache.get_addresses_by_authority_id(&authority).map(Clone::clone), ); - } + }, ServicetoWorkerMsg::GetAuthorityIdByPeerId(peer_id, sender) => { - let _ = sender.send( - self.addr_cache.get_authority_id_by_peer_id(&peer_id).map(Clone::clone), - ); - } + let _ = sender + .send(self.addr_cache.get_authority_id_by_peer_id(&peer_id).map(Clone::clone)); + }, } } - fn addresses_to_publish(&self) -> impl ExactSizeIterator { - match &self.sentry_nodes { - Some(addrs) => Either::Left(addrs.clone().into_iter()), - None => { - let peer_id: Multihash = self.network.local_peer_id().into(); - Either::Right( - self.network.external_addresses() - .into_iter() - .map(move |a| { - if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { - a - } else { - a.with(multiaddr::Protocol::P2p(peer_id.clone())) - } - }), - ) - } - } + fn addresses_to_publish(&self) -> impl Iterator { + let peer_id: Multihash = self.network.local_peer_id().into(); + let publish_non_global_ips = self.publish_non_global_ips; + self.network + .external_addresses() + .into_iter() + .filter(move |a| { + if publish_non_global_ips { + return true + } + + a.iter().all(|p| match p { + // The `ip_network` library is used because its `is_global()` method is stable, + // while `is_global()` in the standard library currently isn't. + multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, + multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, + _ => true, + }) + }) + .map(move |a| { + if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { + a + } else { + a.with(multiaddr::Protocol::P2p(peer_id)) + } + }) } - /// Publish either our own or if specified the public addresses of our sentry nodes. - async fn publish_ext_addresses(&mut self) -> Result<()> { + /// Publish own public addresses. + /// + /// If `only_if_changed` is true, the function has no effect if the list of keys to publish + /// is equal to `self.latest_published_keys`. + async fn publish_ext_addresses(&mut self, only_if_changed: bool) -> Result<()> { let key_store = match &self.role { - Role::Authority(key_store) => key_store, - // Only authority nodes can put addresses (their own or the ones of their sentry nodes) - // on the Dht. Sentry nodes don't have a known identity to authenticate such addresses, - // thus `publish_ext_addresses` becomes a no-op. - Role::Sentry => return Ok(()), + Role::PublishAndDiscover(key_store) => key_store, + Role::Discover => return Ok(()), }; - let addresses = self.addresses_to_publish(); + let keys = Worker::::get_own_public_keys_within_authority_set( + key_store.clone(), + self.client.as_ref(), + ).await?.into_iter().map(Into::into).collect::>(); + + if only_if_changed && keys == self.latest_published_keys { + return Ok(()) + } + + let addresses = self.addresses_to_publish().map(|a| a.to_vec()).collect::>(); if let Some(metrics) = &self.metrics { metrics.publish.inc(); - metrics.amount_addresses_last_published.set( - addresses.len().try_into().unwrap_or(std::u64::MAX), - ); + metrics + .amount_addresses_last_published + .set(addresses.len().try_into().unwrap_or(std::u64::MAX)); } let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { addresses: addresses.map(|a| a.to_vec()).collect() } + schema::AuthorityAddresses { addresses } .encode(&mut serialized_addresses) .map_err(Error::EncodingProto)?; - let keys = Worker::::get_own_public_keys_within_authority_set( - key_store.clone(), - self.client.as_ref(), - ).await?.into_iter().map(Into::into).collect::>(); - - let signatures = key_store.sign_with_all( - key_types::AUTHORITY_DISCOVERY, - keys.clone(), - serialized_addresses.as_slice(), - ).await.map_err(|_| Error::Signing)?; + let keys_vec = keys.iter().cloned().collect::>(); + let signatures = key_store + .sign_with_all( + key_types::AUTHORITY_DISCOVERY, + keys_vec.clone(), + serialized_addresses.as_slice(), + ) + .await + .map_err(|_| Error::Signing)?; - for (sign_result, key) in signatures.into_iter().zip(keys) { + for (sign_result, key) in signatures.into_iter().zip(keys_vec.iter()) { let mut signed_addresses = vec![]; - // sign_with_all returns Result signature - // is generated for a public key that is supported. // Verify that all signatures exist for all provided keys. - let signature = sign_result.map_err(|_| Error::MissingSignature(key.clone()))?; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses.clone(), - signature, - } - .encode(&mut signed_addresses) + let signature = + sign_result.ok().flatten().ok_or_else(|| Error::MissingSignature(key.clone()))?; + schema::SignedAuthorityAddresses { addresses: serialized_addresses.clone(), signature } + .encode(&mut signed_addresses) .map_err(Error::EncodingProto)?; - self.network.put_value( - hash_authority_id(key.1.as_ref()), - signed_addresses, - ); + self.network.put_value(hash_authority_id(key.1.as_ref()), signed_addresses); } + self.latest_published_keys = keys; + Ok(()) } @@ -393,19 +359,19 @@ where let id = BlockId::hash(self.client.info().best_hash); let local_keys = match &self.role { - Role::Authority(key_store) => { - key_store.sr25519_public_keys( - key_types::AUTHORITY_DISCOVERY - ).await.into_iter().collect::>() - }, - Role::Sentry => HashSet::new(), + Role::PublishAndDiscover(key_store) => key_store + .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) + .await + .into_iter() + .collect::>(), + Role::Discover => HashSet::new(), }; let mut authorities = self .client .runtime_api() .authorities(&id) - .map_err(Error::CallingRuntime)? + .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .filter(|id| !local_keys.contains(id.as_ref())) .collect(); @@ -419,9 +385,9 @@ where self.in_flight_lookups.clear(); if let Some(metrics) = &self.metrics { - metrics.requests_pending.set( - self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX), - ); + metrics + .requests_pending + .set(self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX)); } Ok(()) @@ -434,15 +400,14 @@ where None => return, }; let hash = hash_authority_id(authority_id.as_ref()); - self.network - .get_value(&hash); + self.network.get_value(&hash); self.in_flight_lookups.insert(hash, authority_id); if let Some(metrics) = &self.metrics { metrics.requests.inc(); - metrics.requests_pending.set( - self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX), - ); + metrics + .requests_pending + .set(self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX)); } } } @@ -456,11 +421,8 @@ where } if log_enabled!(log::Level::Debug) { - let hashes = v.iter().map(|(hash, _value)| hash.clone()); - debug!( - target: LOG_TARGET, - "Value for hash '{:?}' found on Dht.", hashes, - ); + let hashes: Vec<_> = v.iter().map(|(hash, _value)| hash.clone()).collect(); + debug!(target: LOG_TARGET, "Value for hash '{:?}' found on Dht.", hashes); } if let Err(e) = self.handle_dht_value_found_event(v) { @@ -468,22 +430,16 @@ where metrics.handle_value_found_event_failure.inc(); } - debug!( - target: LOG_TARGET, - "Failed to handle Dht value found event: {:?}", e, - ); + debug!(target: LOG_TARGET, "Failed to handle Dht value found event: {:?}", e); } - } + }, DhtEvent::ValueNotFound(hash) => { if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_not_found"]).inc(); } if self.in_flight_lookups.remove(&hash).is_some() { - debug!( - target: LOG_TARGET, - "Value for hash '{:?}' not found on Dht.", hash - ) + debug!(target: LOG_TARGET, "Value for hash '{:?}' not found on Dht.", hash) } else { debug!( target: LOG_TARGET, @@ -492,25 +448,24 @@ where } }, DhtEvent::ValuePut(hash) => { + // Fast forward the exponentially increasing interval to the configured maximum. In + // case this was the first successful address publishing there is no need for a + // timely retry. + self.publish_interval.set_to_max(); + if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put"]).inc(); } - debug!( - target: LOG_TARGET, - "Successfully put hash '{:?}' on Dht.", hash, - ) + debug!(target: LOG_TARGET, "Successfully put hash '{:?}' on Dht.", hash) }, DhtEvent::ValuePutFailed(hash) => { if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc(); } - debug!( - target: LOG_TARGET, - "Failed to put hash '{:?}' on Dht.", hash - ) - } + debug!(target: LOG_TARGET, "Failed to put hash '{:?}' on Dht.", hash) + }, } } @@ -519,34 +474,36 @@ where values: Vec<(libp2p::kad::record::Key, Vec)>, ) -> Result<()> { // Ensure `values` is not empty and all its keys equal. - let remote_key = values.iter().fold(Ok(None), |acc, (key, _)| { - match acc { + let remote_key = values + .iter() + .fold(Ok(None), |acc, (key, _)| match acc { Ok(None) => Ok(Some(key.clone())), - Ok(Some(ref prev_key)) if prev_key != key => Err( - Error::ReceivingDhtValueFoundEventWithDifferentKeys - ), + Ok(Some(ref prev_key)) if prev_key != key => + Err(Error::ReceivingDhtValueFoundEventWithDifferentKeys), x @ Ok(_) => x, Err(e) => Err(e), - } - })?.ok_or(Error::ReceivingDhtValueFoundEventWithNoRecords)?; + })? + .ok_or(Error::ReceivingDhtValueFoundEventWithNoRecords)?; - let authority_id: AuthorityId = self.in_flight_lookups + let authority_id: AuthorityId = self + .in_flight_lookups .remove(&remote_key) .ok_or(Error::ReceivingUnexpectedRecord)?; let local_peer_id = self.network.local_peer_id(); - let remote_addresses: Vec = values.into_iter() + let remote_addresses: Vec = values + .into_iter() .map(|(_k, v)| { let schema::SignedAuthorityAddresses { signature, addresses } = schema::SignedAuthorityAddresses::decode(v.as_slice()) - .map_err(Error::DecodingProto)?; + .map_err(Error::DecodingProto)?; let signature = AuthoritySignature::decode(&mut &signature[..]) .map_err(Error::EncodingDecodingScale)?; if !AuthorityPair::verify(&signature, &addresses, &authority_id) { - return Err(Error::VerifyingDhtPayload); + return Err(Error::VerifyingDhtPayload) } let addresses = schema::AuthorityAddresses::decode(addresses.as_slice()) @@ -563,40 +520,41 @@ where .into_iter() .flatten() // Ignore [`Multiaddr`]s without [`PeerId`] and own addresses. - .filter(|addr| addr.iter().any(|protocol| { - // Parse to PeerId first as Multihashes of old and new PeerId - // representation don't equal. - // - // See https://github.com/libp2p/rust-libp2p/issues/555 for - // details. - if let multiaddr::Protocol::P2p(hash) = protocol { - let peer_id = match PeerId::from_multihash(hash) { - Ok(peer_id) => peer_id, - Err(_) => return false, // Discard address. - }; - - // Discard if equal to local peer id, keep if it differs. - return !(peer_id == local_peer_id); - } + .filter(|addr| { + addr.iter().any(|protocol| { + // Parse to PeerId first as Multihashes of old and new PeerId + // representation don't equal. + // + // See https://github.com/libp2p/rust-libp2p/issues/555 for + // details. + if let multiaddr::Protocol::P2p(hash) = protocol { + let peer_id = match PeerId::from_multihash(hash) { + Ok(peer_id) => peer_id, + Err(_) => return false, // Discard address. + }; + + // Discard if equal to local peer id, keep if it differs. + return !(peer_id == local_peer_id) + } - false // `protocol` is not a [`Protocol::P2p`], let's keep looking. - })) + false // `protocol` is not a [`Protocol::P2p`], let's keep looking. + }) + }) .take(MAX_ADDRESSES_PER_AUTHORITY) .collect(); if !remote_addresses.is_empty() { self.addr_cache.insert(authority_id, remote_addresses); if let Some(metrics) = &self.metrics { - metrics.known_authorities_count.set( - self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX) - ); + metrics + .known_authorities_count + .set(self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX)); } } Ok(()) } /// Retrieve our public keys within the current and next authority set. - // // A node might have multiple authority discovery keys within its keystore, e.g. an old one and // one for the upcoming session. In addition it could be participating in the current and (/ or) // next authority set with two keys. The function does not return all of the local authority @@ -612,65 +570,29 @@ where .collect::>(); let id = BlockId::hash(client.info().best_hash); - let authorities = client.runtime_api() + let authorities = client + .runtime_api() .authorities(&id) - .map_err(Error::CallingRuntime)? + .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .map(std::convert::Into::into) .collect::>(); - let intersection = local_pub_keys.intersection(&authorities) + let intersection = local_pub_keys + .intersection(&authorities) .cloned() .map(std::convert::Into::into) .collect(); Ok(intersection) } - - /// Set the peer set 'authority' priority group to a new random set of - /// [`Multiaddr`]s. - fn set_priority_group(&self) -> Result<()> { - let addresses = self.addr_cache.get_random_subset(); - - if addresses.is_empty() { - debug!( - target: LOG_TARGET, - "Got no addresses in cache for peerset priority group.", - ); - return Ok(()); - } - - if let Some(metrics) = &self.metrics { - metrics.priority_group_size.set(addresses.len().try_into().unwrap_or(std::u64::MAX)); - } - - debug!( - target: LOG_TARGET, - "Applying priority group {:?} to peerset.", addresses, - ); - - self.network - .set_priority_group( - AUTHORITIES_PRIORITY_GROUP_NAME.to_string(), - addresses.into_iter().collect(), - ) - .map_err(Error::SettingPeersetPriorityGroup)?; - - Ok(()) - } } /// NetworkProvider provides [`Worker`] with all necessary hooks into the -/// underlying Substrate networking. Using this trait abstraction instead of [`NetworkService`] -/// directly is necessary to unit test [`Worker`]. +/// underlying Substrate networking. Using this trait abstraction instead of +/// [`sc_network::NetworkService`] directly is necessary to unit test [`Worker`]. +#[async_trait] pub trait NetworkProvider: NetworkStateInfo { - /// Modify a peerset priority group. - fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String>; - /// Start putting a value in the Dht. fn put_value(&self, key: libp2p::kad::record::Key, value: Vec); @@ -678,18 +600,12 @@ pub trait NetworkProvider: NetworkStateInfo { fn get_value(&self, key: &libp2p::kad::record::Key); } +#[async_trait::async_trait] impl NetworkProvider for sc_network::NetworkService where B: BlockT + 'static, H: ExHashT, { - fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String> { - self.set_priority_group(group_id, peers) - } fn put_value(&self, key: libp2p::kad::record::Key, value: Vec) { self.put_value(key, value) } @@ -702,16 +618,6 @@ fn hash_authority_id(id: &[u8]) -> libp2p::kad::record::Key { libp2p::kad::record::Key::new(&libp2p::multihash::Sha2_256::digest(id)) } -fn interval_at(start: Instant, duration: Duration) -> Interval { - let stream = futures::stream::unfold(start, move |next| { - let time_until_next = next.saturating_duration_since(Instant::now()); - - Delay::new(time_until_next).map(move |_| Some(((), next + duration))) - }); - - Box::new(stream) -} - /// Prometheus metrics for a [`Worker`]. #[derive(Clone)] pub(crate) struct Metrics { @@ -722,7 +628,6 @@ pub(crate) struct Metrics { dht_event_received: CounterVec, handle_value_found_event_failure: Counter, known_authorities_count: Gauge, - priority_group_size: Gauge, } impl Metrics { @@ -731,7 +636,7 @@ impl Metrics { publish: register( Counter::new( "authority_discovery_times_published_total", - "Number of times authority discovery has published external addresses." + "Number of times authority discovery has published external addresses.", )?, registry, )?, @@ -739,7 +644,7 @@ impl Metrics { Gauge::new( "authority_discovery_amount_external_addresses_last_published", "Number of external addresses published when authority discovery last \ - published addresses." + published addresses.", )?, registry, )?, @@ -747,14 +652,14 @@ impl Metrics { Counter::new( "authority_discovery_authority_addresses_requested_total", "Number of times authority discovery has requested external addresses of a \ - single authority." + single authority.", )?, registry, )?, requests_pending: register( Gauge::new( "authority_discovery_authority_address_requests_pending", - "Number of pending authority address requests." + "Number of pending authority address requests.", )?, registry, )?, @@ -762,7 +667,7 @@ impl Metrics { CounterVec::new( Opts::new( "authority_discovery_dht_event_received", - "Number of dht events received by authority discovery." + "Number of dht events received by authority discovery.", ), &["name"], )?, @@ -771,21 +676,14 @@ impl Metrics { handle_value_found_event_failure: register( Counter::new( "authority_discovery_handle_value_found_event_failure", - "Number of times handling a dht value found event failed." + "Number of times handling a dht value found event failed.", )?, registry, )?, known_authorities_count: register( Gauge::new( "authority_discovery_known_authorities_count", - "Number of authorities known by authority discovery." - )?, - registry, - )?, - priority_group_size: register( - Gauge::new( - "authority_discovery_priority_group_size", - "Number of addresses passed to the peer set as a priority group." + "Number of authorities known by authority discovery.", )?, registry, )?, @@ -795,13 +693,7 @@ impl Metrics { // Helper functions for unit testing. #[cfg(test)] -impl Worker -where - Block: BlockT + 'static, - Network: NetworkProvider, - Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, -{ +impl Worker { pub(crate) fn inject_addresses(&mut self, authority: AuthorityId, addresses: Vec) { self.addr_cache.insert(authority, addresses); } diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index a2cd3f33e9215..e770297f6f3be 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -1,33 +1,32 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use libp2p::core::multiaddr::{Multiaddr, Protocol}; -use rand::seq::SliceRandom; use std::collections::HashMap; -use sp_authority_discovery::AuthorityId; use sc_network::PeerId; - -/// The maximum number of authority connections initialized through the authority discovery module. -/// -/// In other words the maximum size of the `authority` peerset priority group. -const MAX_NUM_AUTHORITY_CONN: usize = 10; +use sp_authority_discovery::AuthorityId; /// Cache for [`AuthorityId`] -> [`Vec`] and [`PeerId`] -> [`AuthorityId`] mappings. pub(super) struct AddrCache { + // The addresses found in `authority_id_to_addresses` are guaranteed to always match + // the peerids found in `peer_id_to_authority_id`. In other words, these two hashmaps + // are similar to a bi-directional map. authority_id_to_addresses: HashMap>, peer_id_to_authority_id: HashMap, } @@ -43,21 +42,50 @@ impl AddrCache { /// Inserts the given [`AuthorityId`] and [`Vec`] pair for future lookups by /// [`AuthorityId`] or [`PeerId`]. pub fn insert(&mut self, authority_id: AuthorityId, mut addresses: Vec) { - if addresses.is_empty() { - return; - } + addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); // Insert into `self.peer_id_to_authority_id`. - let peer_ids = addresses.iter() + let peer_ids = addresses + .iter() .map(|a| peer_id_from_multiaddr(a)) .filter_map(|peer_id| peer_id); - for peer_id in peer_ids { - self.peer_id_to_authority_id.insert(peer_id, authority_id.clone()); + for peer_id in peer_ids.clone() { + let former_auth = + match self.peer_id_to_authority_id.insert(peer_id, authority_id.clone()) { + Some(a) if a != authority_id => a, + _ => continue, + }; + + // PeerId was associated to a different authority id before. + // Remove corresponding authority from `self.authority_id_to_addresses`. + let former_auth_addrs = match self.authority_id_to_addresses.get_mut(&former_auth) { + Some(a) => a, + None => { + debug_assert!(false); + continue + }, + }; + former_auth_addrs.retain(|a| peer_id_from_multiaddr(a).map_or(true, |p| p != peer_id)); } // Insert into `self.authority_id_to_addresses`. - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); - self.authority_id_to_addresses.insert(authority_id, addresses); + for former_addr in self + .authority_id_to_addresses + .insert(authority_id.clone(), addresses.clone()) + .unwrap_or_default() + { + // Must remove from `self.peer_id_to_authority_id` any PeerId formerly associated + // to that authority but that can't be found in its new addresses. + + let peer_id = match peer_id_from_multiaddr(&former_addr) { + Some(p) => p, + None => continue, + }; + + if !peer_ids.clone().any(|p| p == peer_id) { + self.peer_id_to_authority_id.remove(&peer_id); + } + } } /// Returns the number of authority IDs in the cache. @@ -66,7 +94,10 @@ impl AddrCache { } /// Returns the addresses for the given [`AuthorityId`]. - pub fn get_addresses_by_authority_id(&self, authority_id: &AuthorityId) -> Option<&Vec> { + pub fn get_addresses_by_authority_id( + &self, + authority_id: &AuthorityId, + ) -> Option<&Vec> { self.authority_id_to_addresses.get(&authority_id) } @@ -75,35 +106,13 @@ impl AddrCache { self.peer_id_to_authority_id.get(peer_id) } - /// Returns a single address for a random subset (maximum of [`MAX_NUM_AUTHORITY_CONN`]) of all - /// known authorities. - pub fn get_random_subset(&self) -> Vec { - let mut rng = rand::thread_rng(); - - let mut addresses = self - .authority_id_to_addresses - .iter() - .filter_map(|(_authority_id, addresses)| { - debug_assert!(!addresses.is_empty()); - addresses - .choose(&mut rng) - }) - .collect::>(); - - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); - addresses.dedup(); - - addresses - .choose_multiple(&mut rng, MAX_NUM_AUTHORITY_CONN) - .map(|a| (**a).clone()) - .collect() - } - /// Removes all [`PeerId`]s and [`Multiaddr`]s from the cache that are not related to the given /// [`AuthorityId`]s. pub fn retain_ids(&mut self, authority_ids: &Vec) { // The below logic could be replaced by `BtreeMap::drain_filter` once it stabilized. - let authority_ids_to_remove = self.authority_id_to_addresses.iter() + let authority_ids_to_remove = self + .authority_id_to_addresses + .iter() .filter(|(id, _addresses)| !authority_ids.contains(id)) .map(|entry| entry.0) .cloned() @@ -114,7 +123,8 @@ impl AddrCache { let addresses = self.authority_id_to_addresses.remove(&authority_id_to_remove); // Remove other entries from `self.peer_id_to_authority_id`. - let peer_ids = addresses.iter() + let peer_ids = addresses + .iter() .flatten() .map(|a| peer_id_from_multiaddr(a)) .filter_map(|peer_id| peer_id); @@ -128,10 +138,12 @@ impl AddrCache { } fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option { - addr.iter().last().and_then(|protocol| if let Protocol::P2p(multihash) = protocol { - PeerId::from_multihash(multihash).ok() - } else { - None + addr.iter().last().and_then(|protocol| { + if let Protocol::P2p(multihash) = protocol { + PeerId::from_multihash(multihash).ok() + } else { + None + } }) } @@ -139,9 +151,8 @@ fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option { mod tests { use super::*; - use libp2p::multihash; + use libp2p::multihash::{self, Multihash}; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; - use rand::Rng; use sp_authority_discovery::{AuthorityId, AuthorityPair}; use sp_core::crypto::Pair; @@ -150,8 +161,8 @@ mod tests { struct TestAuthorityId(AuthorityId); impl Arbitrary for TestAuthorityId { - fn arbitrary(g: &mut G) -> Self { - let seed: [u8; 32] = g.gen(); + fn arbitrary(g: &mut Gen) -> Self { + let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); TestAuthorityId(AuthorityPair::from_seed_slice(&seed).unwrap().public()) } } @@ -160,12 +171,14 @@ mod tests { struct TestMultiaddr(Multiaddr); impl Arbitrary for TestMultiaddr { - fn arbitrary(g: &mut G) -> Self { - let seed: [u8; 32] = g.gen(); + fn arbitrary(g: &mut Gen) -> Self { + let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); let peer_id = PeerId::from_multihash( - multihash::wrap(multihash::Code::Sha2_256, &seed) - ).unwrap(); - let multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() + Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap(), + ) + .unwrap(); + let multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" + .parse::() .unwrap() .with(Protocol::P2p(peer_id.into())); @@ -173,6 +186,28 @@ mod tests { } } + #[derive(Clone, Debug)] + struct TestMultiaddrsSamePeerCombo(Multiaddr, Multiaddr); + + impl Arbitrary for TestMultiaddrsSamePeerCombo { + fn arbitrary(g: &mut Gen) -> Self { + let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); + let peer_id = PeerId::from_multihash( + Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap(), + ) + .unwrap(); + let multiaddr1 = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" + .parse::() + .unwrap() + .with(Protocol::P2p(peer_id.into())); + let multiaddr2 = "/ip6/2002:db8:0:0:0:0:0:2/tcp/30133" + .parse::() + .unwrap() + .with(Protocol::P2p(peer_id.into())); + TestMultiaddrsSamePeerCombo(multiaddr1, multiaddr2) + } + } + #[test] fn retains_only_entries_of_provided_authority_ids() { fn property( @@ -190,11 +225,6 @@ mod tests { cache.insert(second.0.clone(), vec![second.1.clone()]); cache.insert(third.0.clone(), vec![third.1.clone()]); - let subset = cache.get_random_subset(); - assert!( - subset.contains(&first.1) && subset.contains(&second.1) && subset.contains(&third.1), - "Expect initial subset to contain all authorities.", - ); assert_eq!( Some(&vec![third.1.clone()]), cache.get_addresses_by_authority_id(&third.0), @@ -208,18 +238,14 @@ mod tests { cache.retain_ids(&vec![first.0, second.0]); - let subset = cache.get_random_subset(); - assert!( - subset.contains(&first.1) || subset.contains(&second.1), - "Expected both first and second authority." - ); - assert!(!subset.contains(&third.1), "Did not expect address from third authority"); assert_eq!( - None, cache.get_addresses_by_authority_id(&third.0), + None, + cache.get_addresses_by_authority_id(&third.0), "Expect `get_addresses_by_authority_id` to not return `None` for third authority." ); assert_eq!( - None, cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), + None, + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), "Expect `get_authority_id_by_peer_id` to return `None` for third authority." ); @@ -230,4 +256,76 @@ mod tests { .max_tests(10) .quickcheck(property as fn(_, _, _) -> TestResult) } + + #[test] + fn keeps_consistency_between_authority_id_and_peer_id() { + fn property( + authority1: TestAuthorityId, + authority2: TestAuthorityId, + multiaddr1: TestMultiaddr, + multiaddr2: TestMultiaddr, + multiaddr3: TestMultiaddrsSamePeerCombo, + ) -> TestResult { + let authority1 = authority1.0; + let authority2 = authority2.0; + let multiaddr1 = multiaddr1.0; + let multiaddr2 = multiaddr2.0; + let TestMultiaddrsSamePeerCombo(multiaddr3, multiaddr4) = multiaddr3; + + let mut cache = AddrCache::new(); + + cache.insert(authority1.clone(), vec![multiaddr1.clone()]); + cache.insert( + authority1.clone(), + vec![multiaddr2.clone(), multiaddr3.clone(), multiaddr4.clone()], + ); + + assert_eq!( + None, + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr1).unwrap()) + ); + assert_eq!( + Some(&authority1), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + ); + assert_eq!( + Some(&authority1), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + ); + assert_eq!( + Some(&authority1), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr4).unwrap()) + ); + + cache.insert(authority2.clone(), vec![multiaddr2.clone()]); + + assert_eq!( + Some(&authority2), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + ); + assert_eq!( + Some(&authority1), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + ); + assert_eq!(cache.get_addresses_by_authority_id(&authority1).unwrap().len(), 2); + + cache.insert(authority2.clone(), vec![multiaddr2.clone(), multiaddr3.clone()]); + + assert_eq!( + Some(&authority2), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + ); + assert_eq!( + Some(&authority2), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + ); + assert!(cache.get_addresses_by_authority_id(&authority1).unwrap().is_empty()); + + TestResult::passed() + } + + QuickCheck::new() + .max_tests(10) + .quickcheck(property as fn(_, _, _, _, _) -> TestResult) + } } diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index cb1f8df8a822d..f10d2751ccd35 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,84 +18,30 @@ use crate::worker::schema; -use std::{iter::FromIterator, sync::{Arc, Mutex}, task::Poll}; - -use futures::channel::mpsc::{self, channel}; -use futures::executor::{block_on, LocalPool}; -use futures::future::FutureExt; -use futures::sink::SinkExt; -use futures::task::LocalSpawn; -use libp2p::{kad, core::multiaddr, PeerId}; +use std::{ + sync::{Arc, Mutex}, + task::Poll, +}; + +use async_trait::async_trait; +use futures::{ + channel::mpsc::{self, channel}, + executor::{block_on, LocalPool}, + future::FutureExt, + sink::SinkExt, + task::LocalSpawn, +}; +use libp2p::{core::multiaddr, kad, PeerId}; use prometheus_endpoint::prometheus::default_registry; -use sp_api::{ProvideRuntimeApi, ApiRef}; +use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_core::crypto::Public; use sp_keystore::{testing::KeyStore, CryptoStore}; -use sp_runtime::traits::{Zero, Block as BlockT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use substrate_test_runtime_client::runtime::Block; use super::*; -#[test] -fn interval_at_with_start_now() { - let start = Instant::now(); - - let mut interval = interval_at( - std::time::Instant::now(), - std::time::Duration::from_secs(10), - ); - - futures::executor::block_on(async { - interval.next().await; - }); - - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_secs(1), - "Expected low resolution instant interval to fire within less than a second.", - ); -} - -#[test] -fn interval_at_is_queuing_ticks() { - let start = Instant::now(); - - let interval = interval_at(start, std::time::Duration::from_millis(100)); - - // Let's wait for 200ms, thus 3 elements should be queued up (1st at 0ms, 2nd at 100ms, 3rd - // at 200ms). - std::thread::sleep(Duration::from_millis(200)); - - futures::executor::block_on(async { - interval.take(3).collect::>().await; - }); - - // Make sure we did not wait for more than 300 ms, which would imply that `at_interval` is - // not queuing ticks. - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_millis(300), - "Expect interval to /queue/ events when not polled for a while.", - ); -} - -#[test] -fn interval_at_with_initial_delay() { - let start = Instant::now(); - - let mut interval = interval_at( - std::time::Instant::now() + Duration::from_millis(100), - std::time::Duration::from_secs(10), - ); - - futures::executor::block_on(async { - interval.next().await; - }); - - assert!( - Instant::now().saturating_duration_since(start) > Duration::from_millis(100), - "Expected interval with initial delay not to fire right away.", - ); -} - #[derive(Clone)] pub(crate) struct TestApi { pub(crate) authorities: Vec, @@ -105,9 +51,7 @@ impl ProvideRuntimeApi for TestApi { type Api = RuntimeApi; fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { - RuntimeApi { - authorities: self.authorities.clone(), - }.into() + RuntimeApi { authorities: self.authorities.clone() }.into() } } @@ -128,6 +72,7 @@ impl HeaderBackend for TestApi { finalized_number: Zero::zero(), genesis_hash: Default::default(), number_leaves: Default::default(), + finalized_state: None, } } @@ -159,8 +104,6 @@ pub(crate) struct RuntimeApi { sp_api::mock_impl_runtime_apis! { impl AuthorityDiscoveryApi for RuntimeApi { - type Error = sp_blockchain::Error; - fn authorities(&self) -> Vec { self.authorities.clone() } @@ -171,10 +114,6 @@ sp_api::mock_impl_runtime_apis! { pub enum TestNetworkEvent { GetCalled(kad::record::Key), PutCalled(kad::record::Key, Vec), - SetPriorityGroupCalled { - group_id: String, - peers: HashSet - }, } pub struct TestNetwork { @@ -184,7 +123,6 @@ pub struct TestNetwork { // vectors below. pub put_value_call: Arc)>>>, pub get_value_call: Arc>>, - pub set_priority_group_call: Arc)>>>, event_sender: mpsc::UnboundedSender, event_receiver: Option>, } @@ -200,48 +138,36 @@ impl Default for TestNetwork { let (tx, rx) = mpsc::unbounded(); TestNetwork { peer_id: PeerId::random(), - external_addresses: vec![ - "/ip6/2001:db8::/tcp/30333" - .parse().unwrap(), - ], + external_addresses: vec!["/ip6/2001:db8::/tcp/30333".parse().unwrap()], put_value_call: Default::default(), get_value_call: Default::default(), - set_priority_group_call: Default::default(), event_sender: tx, event_receiver: Some(rx), } } } +#[async_trait] impl NetworkProvider for TestNetwork { - fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String> { - self.set_priority_group_call - .lock() - .unwrap() - .push((group_id.clone(), peers.clone())); - self.event_sender.clone().unbounded_send(TestNetworkEvent::SetPriorityGroupCalled { - group_id, - peers, - }).unwrap(); - Ok(()) - } fn put_value(&self, key: kad::record::Key, value: Vec) { self.put_value_call.lock().unwrap().push((key.clone(), value.clone())); - self.event_sender.clone().unbounded_send(TestNetworkEvent::PutCalled(key, value)).unwrap(); + self.event_sender + .clone() + .unbounded_send(TestNetworkEvent::PutCalled(key, value)) + .unwrap(); } fn get_value(&self, key: &kad::record::Key) { self.get_value_call.lock().unwrap().push(key.clone()); - self.event_sender.clone().unbounded_send(TestNetworkEvent::GetCalled(key.clone())).unwrap(); + self.event_sender + .clone() + .unbounded_send(TestNetworkEvent::GetCalled(key.clone())) + .unwrap(); } } impl NetworkStateInfo for TestNetwork { fn local_peer_id(&self) -> PeerId { - self.peer_id.clone() + self.peer_id } fn external_addresses(&self) -> Vec { @@ -255,9 +181,8 @@ async fn build_dht_event( key_store: &KeyStore, ) -> (libp2p::kad::record::Key, Vec) { let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { - addresses: addresses.into_iter().map(|a| a.to_vec()).collect() - }.encode(&mut serialized_addresses) + schema::AuthorityAddresses { addresses: addresses.into_iter().map(|a| a.to_vec()).collect() } + .encode(&mut serialized_addresses) .map_err(Error::EncodingProto) .unwrap(); @@ -268,16 +193,12 @@ async fn build_dht_event( serialized_addresses.as_slice(), ) .await - .map_err(|_| Error::Signing) + .unwrap() .unwrap(); let mut signed_addresses = vec![]; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses.clone(), - signature, - } - .encode(&mut signed_addresses) - .map_err(Error::EncodingProto) + schema::SignedAuthorityAddresses { addresses: serialized_addresses.clone(), signature } + .encode(&mut signed_addresses) .unwrap(); let key = hash_authority_id(&public_key.to_raw_vec()); @@ -290,9 +211,7 @@ fn new_registers_metrics() { let (_dht_event_tx, dht_event_rx) = mpsc::channel(1000); let network: Arc = Arc::new(Default::default()); let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); + let test_api = Arc::new(TestApi { authorities: vec![] }); let registry = prometheus_endpoint::Registry::new(); @@ -301,10 +220,10 @@ fn new_registers_metrics() { from_service, test_api, network.clone(), - vec![], Box::pin(dht_event_rx), - Role::Authority(key_store.into()), + Role::PublishAndDiscover(key_store.into()), Some(registry.clone()), + Default::default(), ); assert!(registry.gather().len() > 0); @@ -330,10 +249,10 @@ fn triggers_dht_get_query() { from_service, test_api, network.clone(), - vec![], Box::pin(dht_event_rx), - Role::Authority(key_store.into()), + Role::PublishAndDiscover(key_store.into()), None, + Default::default(), ); futures::executor::block_on(async { @@ -354,92 +273,74 @@ fn publish_discover_cycle() { let (_dht_event_tx, dht_event_rx) = channel(1000); let network: Arc = Arc::new(Default::default()); - let node_a_multiaddr = { - let peer_id = network.local_peer_id(); - let address = network.external_addresses().pop().unwrap(); - - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) - }; let key_store = KeyStore::new(); - let _ = pool.spawner().spawn_local_obj(async move { - let node_a_public = key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) - .await - .unwrap(); - let test_api = Arc::new(TestApi { - authorities: vec![node_a_public.into()], - }); - - let (_to_worker, from_service) = mpsc::channel(0); - let mut worker = Worker::new( - from_service, - test_api, - network.clone(), - vec![], - Box::pin(dht_event_rx), - Role::Authority(key_store.into()), - None, - ); - - worker.publish_ext_addresses().await.unwrap(); - - // Expect authority discovery to put a new record onto the dht. - assert_eq!(network.put_value_call.lock().unwrap().len(), 1); - - let dht_event = { - let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); - sc_network::DhtEvent::ValueFound(vec![(key, value)]) - }; - - // Node B discovering node A's address. - - let (mut dht_event_tx, dht_event_rx) = channel(1000); - let test_api = Arc::new(TestApi { - // Make sure node B identifies node A as an authority. - authorities: vec![node_a_public.into()], - }); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - - let (_to_worker, from_service) = mpsc::channel(0); - let mut worker = Worker::new( - from_service, - test_api, - network.clone(), - vec![], - Box::pin(dht_event_rx), - Role::Authority(key_store.into()), - None, - ); - - dht_event_tx.try_send(dht_event.clone()).unwrap(); - - worker.refill_pending_lookups_queue().await.unwrap(); - worker.start_new_lookups(); - - // Make authority discovery handle the event. - worker.handle_dht_event(dht_event).await; - - worker.set_priority_group().unwrap(); - - // Expect authority discovery to set the priority set. - assert_eq!(network.set_priority_group_call.lock().unwrap().len(), 1); - - assert_eq!( - network.set_priority_group_call.lock().unwrap()[0], - ( - "authorities".to_string(), - HashSet::from_iter(vec![node_a_multiaddr.clone()].into_iter()) - ) - ); - }.boxed_local().into()); + let _ = pool.spawner().spawn_local_obj( + async move { + let node_a_public = key_store + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .await + .unwrap(); + let test_api = Arc::new(TestApi { authorities: vec![node_a_public.into()] }); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + Box::pin(dht_event_rx), + Role::PublishAndDiscover(key_store.into()), + None, + Default::default(), + ); + + worker.publish_ext_addresses(false).await.unwrap(); + + // Expect authority discovery to put a new record onto the dht. + assert_eq!(network.put_value_call.lock().unwrap().len(), 1); + + let dht_event = { + let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); + sc_network::DhtEvent::ValueFound(vec![(key, value)]) + }; + + // Node B discovering node A's address. + + let (mut dht_event_tx, dht_event_rx) = channel(1000); + let test_api = Arc::new(TestApi { + // Make sure node B identifies node A as an authority. + authorities: vec![node_a_public.into()], + }); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + Box::pin(dht_event_rx), + Role::PublishAndDiscover(key_store.into()), + None, + Default::default(), + ); + + dht_event_tx.try_send(dht_event.clone()).unwrap(); + + worker.refill_pending_lookups_queue().await.unwrap(); + worker.start_new_lookups(); + + // Make authority discovery handle the event. + worker.handle_dht_event(dht_event).await; + } + .boxed_local() + .into(), + ); pool.run(); } + /// Don't terminate when sender side of service channel is dropped. Terminate when network event /// stream terminates. #[test] @@ -447,20 +348,19 @@ fn terminate_when_event_stream_terminates() { let (dht_event_tx, dht_event_rx) = channel(1000); let network: Arc = Arc::new(Default::default()); let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); + let test_api = Arc::new(TestApi { authorities: vec![] }); let (to_worker, from_service) = mpsc::channel(0); let worker = Worker::new( from_service, test_api, network.clone(), - vec![], Box::pin(dht_event_rx), - Role::Authority(key_store.into()), + Role::PublishAndDiscover(key_store.into()), None, - ).run(); + Default::default(), + ) + .run(); futures::pin_mut!(worker); block_on(async { @@ -469,7 +369,8 @@ fn terminate_when_event_stream_terminates() { // Drop sender side of service channel. drop(to_worker); assert_eq!( - Poll::Pending, futures::poll!(&mut worker), + Poll::Pending, + futures::poll!(&mut worker), "Expect the authority discovery module not to terminate once the \ sender side of the service channel is closed.", ); @@ -479,11 +380,13 @@ fn terminate_when_event_stream_terminates() { drop(dht_event_tx); assert_eq!( - Poll::Ready(()), futures::poll!(&mut worker), + Poll::Ready(()), + futures::poll!(&mut worker), "Expect the authority discovery module to terminate once the \ sending side of the dht event channel is closed.", ); - });} + }); +} #[test] fn dont_stop_polling_dht_event_stream_after_bogus_event() { @@ -491,14 +394,13 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { let peer_id = PeerId::random(); let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) + address.with(multiaddr::Protocol::P2p(peer_id.into())) }; let remote_key_store = KeyStore::new(); - let remote_public_key: AuthorityId = block_on( - remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None), - ).unwrap().into(); + let remote_public_key: AuthorityId = + block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) + .unwrap() + .into(); let (mut dht_event_tx, dht_event_rx) = channel(1); let (network, mut network_events) = { @@ -508,9 +410,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { }; let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![remote_public_key.clone()], - }); + let test_api = Arc::new(TestApi { authorities: vec![remote_public_key.clone()] }); let mut pool = LocalPool::new(); let (mut to_worker, from_service) = mpsc::channel(1); @@ -518,40 +418,45 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { from_service, test_api, network.clone(), - vec![], Box::pin(dht_event_rx), - Role::Authority(Arc::new(key_store)), + Role::PublishAndDiscover(Arc::new(key_store)), None, + Default::default(), ); // Spawn the authority discovery to make sure it is polled independently. // // As this is a local pool, only one future at a time will have the CPU and // can make progress until the future returns `Pending`. - let _ = pool.spawner().spawn_local_obj(async move { - // Refilling `pending_lookups` only happens every X minutes. Fast - // forward by calling `refill_pending_lookups_queue` directly. - worker.refill_pending_lookups_queue().await.unwrap(); - worker.run().await - }.boxed_local().into()); + let _ = pool.spawner().spawn_local_obj( + async move { + // Refilling `pending_lookups` only happens every X minutes. Fast + // forward by calling `refill_pending_lookups_queue` directly. + worker.refill_pending_lookups_queue().await.unwrap(); + worker.run().await + } + .boxed_local() + .into(), + ); pool.run_until(async { // Assert worker to trigger a lookup for the one and only authority. - assert!(matches!( - network_events.next().await, - Some(TestNetworkEvent::GetCalled(_)) - )); + assert!(matches!(network_events.next().await, Some(TestNetworkEvent::GetCalled(_)))); // Send an event that should generate an error - dht_event_tx.send(DhtEvent::ValueFound(Default::default())).await + dht_event_tx + .send(DhtEvent::ValueFound(Default::default())) + .await .expect("Channel has capacity of 1."); // Make previously triggered lookup succeed. let dht_event = { let (key, value) = build_dht_event( vec![remote_multiaddr.clone()], - remote_public_key.clone(), &remote_key_store, - ).await; + remote_public_key.clone(), + &remote_key_store, + ) + .await; sc_network::DhtEvent::ValueFound(vec![(key, value)]) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -559,107 +464,30 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { // Expect authority discovery to function normally, now knowing the // address for the remote node. let (sender, addresses) = futures::channel::oneshot::channel(); - to_worker.send(ServicetoWorkerMsg::GetAddressesByAuthorityId( - remote_public_key, - sender, - )).await.expect("Channel has capacity of 1."); + to_worker + .send(ServicetoWorkerMsg::GetAddressesByAuthorityId(remote_public_key, sender)) + .await + .expect("Channel has capacity of 1."); assert_eq!(Some(vec![remote_multiaddr]), addresses.await.unwrap()); }); } -/// In the scenario of a validator publishing the address of its sentry node to -/// the DHT, said sentry node should not add its own Multiaddr to the -/// peerset "authority" priority group. -#[test] -fn never_add_own_address_to_priority_group() { - let validator_key_store = KeyStore::new(); - let validator_public = block_on(validator_key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) - .unwrap(); - - let sentry_network: Arc = Arc::new(Default::default()); - - let sentry_multiaddr = { - let peer_id = sentry_network.local_peer_id(); - let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse().unwrap(); - - address.with(multiaddr::Protocol::P2p(peer_id.into())) - }; - - // Address of some other sentry node of `validator`. - let random_multiaddr = { - let peer_id = PeerId::random(); - let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) - }; - - let dht_event = block_on(build_dht_event( - vec![sentry_multiaddr, random_multiaddr.clone()], - validator_public.into(), - &validator_key_store, - )); - - let (_dht_event_tx, dht_event_rx) = channel(1); - let sentry_test_api = Arc::new(TestApi { - // Make sure the sentry node identifies its validator as an authority. - authorities: vec![validator_public.into()], - }); - - let (_to_worker, from_service) = mpsc::channel(0); - let mut sentry_worker = Worker::new( - from_service, - sentry_test_api, - sentry_network.clone(), - vec![], - Box::pin(dht_event_rx), - Role::Sentry, - None, - ); - - block_on(sentry_worker.refill_pending_lookups_queue()).unwrap(); - sentry_worker.start_new_lookups(); - - sentry_worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); - sentry_worker.set_priority_group().unwrap(); - - assert_eq!( - sentry_network.set_priority_group_call.lock().unwrap().len(), 1, - "Expect authority discovery to set the priority set.", - ); - - assert_eq!( - sentry_network.set_priority_group_call.lock().unwrap()[0], - ( - "authorities".to_string(), - HashSet::from_iter(vec![random_multiaddr.clone()].into_iter(),) - ), - "Expect authority discovery to only add `random_multiaddr`." - ); -} - #[test] fn limit_number_of_addresses_added_to_cache_per_authority() { let remote_key_store = KeyStore::new(); - let remote_public = block_on(remote_key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) - .unwrap(); + let remote_public = + block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) + .unwrap(); - let addresses = (0..100).map(|_| { - let peer_id = PeerId::random(); - let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) - }).collect(); + let addresses = (0..100) + .map(|_| { + let peer_id = PeerId::random(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + address.with(multiaddr::Protocol::P2p(peer_id.into())) + }) + .collect(); - let dht_event = block_on(build_dht_event( - addresses, - remote_public.into(), - &remote_key_store, - )); + let dht_event = block_on(build_dht_event(addresses, remote_public.into(), &remote_key_store)); let (_dht_event_tx, dht_event_rx) = channel(1); @@ -668,10 +496,10 @@ fn limit_number_of_addresses_added_to_cache_per_authority() { from_service, Arc::new(TestApi { authorities: vec![remote_public.into()] }), Arc::new(TestNetwork::default()), - vec![], Box::pin(dht_event_rx), - Role::Sentry, + Role::Discover, None, + Default::default(), ); block_on(worker.refill_pending_lookups_queue()).unwrap(); @@ -680,16 +508,20 @@ fn limit_number_of_addresses_added_to_cache_per_authority() { worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); assert_eq!( MAX_ADDRESSES_PER_AUTHORITY, - worker.addr_cache.get_addresses_by_authority_id(&remote_public.into()).unwrap().len(), + worker + .addr_cache + .get_addresses_by_authority_id(&remote_public.into()) + .unwrap() + .len(), ); } #[test] fn do_not_cache_addresses_without_peer_id() { let remote_key_store = KeyStore::new(); - let remote_public = block_on(remote_key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) - .unwrap(); + let remote_public = + block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) + .unwrap(); let multiaddr_with_peer_id = { let peer_id = PeerId::random(); @@ -698,22 +530,17 @@ fn do_not_cache_addresses_without_peer_id() { address.with(multiaddr::Protocol::P2p(peer_id.into())) }; - let multiaddr_without_peer_id: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + let multiaddr_without_peer_id: Multiaddr = + "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); let dht_event = block_on(build_dht_event( - vec![ - multiaddr_with_peer_id.clone(), - multiaddr_without_peer_id, - ], + vec![multiaddr_with_peer_id.clone(), multiaddr_without_peer_id], remote_public.into(), &remote_key_store, )); let (_dht_event_tx, dht_event_rx) = channel(1); - let local_test_api = Arc::new(TestApi { - // Make sure the sentry node identifies its validator as an authority. - authorities: vec![remote_public.into()], - }); + let local_test_api = Arc::new(TestApi { authorities: vec![remote_public.into()] }); let local_network: Arc = Arc::new(Default::default()); let local_key_store = KeyStore::new(); @@ -722,10 +549,10 @@ fn do_not_cache_addresses_without_peer_id() { from_service, local_test_api, local_network.clone(), - vec![], Box::pin(dht_event_rx), - Role::Authority(Arc::new(local_key_store)), + Role::PublishAndDiscover(Arc::new(local_key_store)), None, + Default::default(), ); block_on(local_worker.refill_pending_lookups_queue()).unwrap(); @@ -753,14 +580,12 @@ fn addresses_to_publish_adds_p2p() { let (_to_worker, from_service) = mpsc::channel(0); let worker = Worker::new( from_service, - Arc::new(TestApi { - authorities: vec![], - }), + Arc::new(TestApi { authorities: vec![] }), network.clone(), - vec![], Box::pin(dht_event_rx), - Role::Authority(Arc::new(KeyStore::new())), + Role::PublishAndDiscover(Arc::new(KeyStore::new())), Some(prometheus_endpoint::Registry::new()), + Default::default(), ); assert!( @@ -780,26 +605,26 @@ fn addresses_to_publish_respects_existing_p2p_protocol() { let network: Arc = Arc::new(TestNetwork { external_addresses: vec![ "/ip6/2001:db8::/tcp/30333/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC" - .parse().unwrap(), + .parse() + .unwrap(), ], - .. Default::default() + ..Default::default() }); let (_to_worker, from_service) = mpsc::channel(0); let worker = Worker::new( from_service, - Arc::new(TestApi { - authorities: vec![], - }), + Arc::new(TestApi { authorities: vec![] }), network.clone(), - vec![], Box::pin(dht_event_rx), - Role::Authority(Arc::new(KeyStore::new())), + Role::PublishAndDiscover(Arc::new(KeyStore::new())), Some(prometheus_endpoint::Registry::new()), + Default::default(), ); assert_eq!( - network.external_addresses, worker.addresses_to_publish().collect::>(), + network.external_addresses, + worker.addresses_to_publish().collect::>(), "Expected Multiaddr from `TestNetwork` to not be altered.", ); } @@ -810,21 +635,21 @@ fn lookup_throttling() { let peer_id = PeerId::random(); let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) + address.with(multiaddr::Protocol::P2p(peer_id.into())) }; let remote_key_store = KeyStore::new(); - let remote_public_keys: Vec = (0..20).map(|_| { - block_on(remote_key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) - .unwrap().into() - }).collect(); - let remote_hash_to_key = remote_public_keys.iter() + let remote_public_keys: Vec = (0..20) + .map(|_| { + block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) + .unwrap() + .into() + }) + .collect(); + let remote_hash_to_key = remote_public_keys + .iter() .map(|k| (hash_authority_id(k.as_ref()), k.clone())) .collect::>(); - let (mut dht_event_tx, dht_event_rx) = channel(1); let (_to_worker, from_service) = mpsc::channel(0); let mut network = TestNetwork::default(); @@ -834,65 +659,70 @@ fn lookup_throttling() { from_service, Arc::new(TestApi { authorities: remote_public_keys.clone() }), network.clone(), - vec![], dht_event_rx.boxed(), - Role::Sentry, + Role::Discover, Some(default_registry().clone()), + Default::default(), ); let mut pool = LocalPool::new(); let metrics = worker.metrics.clone().unwrap(); - let _ = pool.spawner().spawn_local_obj(async move { - // Refilling `pending_lookups` only happens every X minutes. Fast - // forward by calling `refill_pending_lookups_queue` directly. - worker.refill_pending_lookups_queue().await.unwrap(); - worker.run().await - }.boxed_local().into()); + let _ = pool.spawner().spawn_local_obj( + async move { + // Refilling `pending_lookups` only happens every X minutes. Fast + // forward by calling `refill_pending_lookups_queue` directly. + worker.refill_pending_lookups_queue().await.unwrap(); + worker.run().await + } + .boxed_local() + .into(), + ); - pool.run_until(async { - // Assert worker to trigger MAX_IN_FLIGHT_LOOKUPS lookups. - for _ in 0..MAX_IN_FLIGHT_LOOKUPS { + pool.run_until( + async { + // Assert worker to trigger MAX_IN_FLIGHT_LOOKUPS lookups. + for _ in 0..MAX_IN_FLIGHT_LOOKUPS { + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + } + assert_eq!( + metrics.requests_pending.get(), + (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS) as u64 + ); + assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); + + // Make first lookup succeed. + let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); + let remote_key: AuthorityId = remote_hash_to_key.get(&remote_hash).unwrap().clone(); + let dht_event = { + let (key, value) = + build_dht_event(vec![remote_multiaddr.clone()], remote_key, &remote_key_store) + .await; + sc_network::DhtEvent::ValueFound(vec![(key, value)]) + }; + dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); + + // Assert worker to trigger another lookup. assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert_eq!( + metrics.requests_pending.get(), + (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 1) as u64 + ); + assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); + + // Make second one fail. + let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); + let dht_event = sc_network::DhtEvent::ValueNotFound(remote_hash); + dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); + + // Assert worker to trigger another lookup. + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert_eq!( + metrics.requests_pending.get(), + (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 2) as u64 + ); + assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); } - assert_eq!( - metrics.requests_pending.get(), - (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS) as u64 - ); - assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); - - // Make first lookup succeed. - let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); - let remote_key: AuthorityId = remote_hash_to_key.get(&remote_hash).unwrap().clone(); - let dht_event = { - let (key, value) = build_dht_event( - vec![remote_multiaddr.clone()], - remote_key, - &remote_key_store - ).await; - sc_network::DhtEvent::ValueFound(vec![(key, value)]) - }; - dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); - - // Assert worker to trigger another lookup. - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); - assert_eq!( - metrics.requests_pending.get(), - (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 1) as u64 - ); - assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); - - // Make second one fail. - let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); - let dht_event = sc_network::DhtEvent::ValueNotFound(remote_hash); - dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); - - // Assert worker to trigger another lookup. - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); - assert_eq!( - metrics.requests_pending.get(), - (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 2) as u64 - ); - assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); - }.boxed_local()); + .boxed_local(), + ); } diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 1b1d8921bcfb3..469df55cf0233 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-basic-authorship" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,25 +13,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } -futures = "0.3.4" +codec = { package = "parity-scale-codec", version = "2.0.0" } +futures = "0.3.9" futures-timer = "3.0.1" log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sc-block-builder = { version = "0.8.0", path = "../block-builder" } -sc-proposer-metrics = { version = "0.8.0", path = "../proposer-metrics" } -tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } +sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../client/transaction-pool/api" } +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +sc-proposer-metrics = { version = "0.9.0", path = "../proposer-metrics" } [dev-dependencies] -sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -parking_lot = "0.10.0" +parking_lot = "0.11.1" diff --git a/client/basic-authorship/README.md b/client/basic-authorship/README.md index 1a20593c09eaa..d29ce258e5134 100644 --- a/client/basic-authorship/README.md +++ b/client/basic-authorship/README.md @@ -20,7 +20,6 @@ let future = proposer.propose( Default::default(), Default::default(), Duration::from_secs(2), - RecordProof::Yes, ); // We wait until the proposition is performed. @@ -29,4 +28,4 @@ println!("Generated block: {:?}", block.block); ``` -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 89edfac0d4e98..144a3ab6850ff 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,30 +20,45 @@ // FIXME #1021 move this into sp-consensus -use std::{pin::Pin, time, sync::Arc}; +use codec::{Decode, Encode}; +use futures::{ + channel::oneshot, + future, + future::{Future, FutureExt}, + select, +}; +use log::{debug, error, info, trace, warn}; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_client_api::backend; -use codec::Decode; -use sp_consensus::{evaluation, Proposal, RecordProof}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; +use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; +use sp_consensus::{ + evaluation, DisableProofRecording, EnableProofRecording, ProofRecording, Proposal, +}; use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; -use log::{error, info, debug, trace, warn}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Hash as HashT, Header as HeaderT, DigestFor, BlakeTwo256}, + traits::{BlakeTwo256, Block as BlockT, DigestFor, Hash as HashT, Header as HeaderT}, }; -use sp_transaction_pool::{TransactionPool, InPoolTransaction}; -use sc_telemetry::{telemetry, CONSENSUS_INFO}; -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; -use sp_api::{ProvideRuntimeApi, ApiExt}; -use futures::{future, future::{Future, FutureExt}, channel::oneshot, select}; -use sp_blockchain::{HeaderBackend, ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed}; -use std::marker::PhantomData; +use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_proposer_metrics::MetricsLink as PrometheusMetrics; -/// Proposer factory. -pub struct ProposerFactory { +/// Default block size limit in bytes used by [`Proposer`]. +/// +/// Can be overwritten by [`ProposerFactory::set_default_block_size_limit`]. +/// +/// Be aware that there is also an upper packet size on what the networking code +/// will accept. If the block doesn't fit in such a package, it can not be +/// transferred to other nodes. +pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512; + +/// [`Proposer`] factory. +pub struct ProposerFactory { spawn_handle: Box, /// The client instance. client: Arc, @@ -51,49 +66,114 @@ pub struct ProposerFactory { transaction_pool: Arc, /// Prometheus Link, metrics: PrometheusMetrics, - /// phantom member to pin the `Backend` type. - _phantom: PhantomData, + /// The default block size limit. + /// + /// If no `block_size_limit` is passed to [`sp_consensus::Proposer::propose`], this block size + /// limit will be used. + default_block_size_limit: usize, + telemetry: Option, + /// When estimating the block size, should the proof be included? + include_proof_in_block_size_estimation: bool, + /// phantom member to pin the `Backend`/`ProofRecording` type. + _phantom: PhantomData<(B, PR)>, } -impl ProposerFactory { +impl ProposerFactory { + /// Create a new proposer factory. + /// + /// Proof recording will be disabled when using proposers built by this instance to build + /// blocks. pub fn new( spawn_handle: impl SpawnNamed + 'static, client: Arc, transaction_pool: Arc, prometheus: Option<&PrometheusRegistry>, + telemetry: Option, ) -> Self { ProposerFactory { spawn_handle: Box::new(spawn_handle), + transaction_pool, + metrics: PrometheusMetrics::new(prometheus), + default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT, + telemetry, + client, + include_proof_in_block_size_estimation: false, + _phantom: PhantomData, + } + } +} + +impl ProposerFactory { + /// Create a new proposer factory with proof recording enabled. + /// + /// Each proposer created by this instance will record a proof while building a block. + /// + /// This will also include the proof into the estimation of the block size. This can be disabled + /// by calling [`ProposerFactory::disable_proof_in_block_size_estimation`]. + pub fn with_proof_recording( + spawn_handle: impl SpawnNamed + 'static, + client: Arc, + transaction_pool: Arc, + prometheus: Option<&PrometheusRegistry>, + telemetry: Option, + ) -> Self { + ProposerFactory { client, + spawn_handle: Box::new(spawn_handle), transaction_pool, metrics: PrometheusMetrics::new(prometheus), + default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT, + telemetry, + include_proof_in_block_size_estimation: true, _phantom: PhantomData, } } + + /// Disable the proof inclusion when estimating the block size. + pub fn disable_proof_in_block_size_estimation(&mut self) { + self.include_proof_in_block_size_estimation = false; + } } -impl ProposerFactory - where - A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, +impl ProposerFactory { + /// Set the default block size limit in bytes. + /// + /// The default value for the block size limit is: + /// [`DEFAULT_BLOCK_SIZE_LIMIT`]. + /// + /// If there is no block size limit passed to [`sp_consensus::Proposer::propose`], this value + /// will be used. + pub fn set_default_block_size_limit(&mut self, limit: usize) { + self.default_block_size_limit = limit; + } +} + +impl ProposerFactory +where + A: TransactionPool + 'static, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: + ApiExt> + BlockBuilderApi, { - pub fn init_with_now( + fn init_with_now( &mut self, parent_header: &::Header, now: Box time::Instant + Send + Sync>, - ) -> Proposer { + ) -> Proposer { let parent_hash = parent_header.hash(); let id = BlockId::hash(parent_hash); info!("🙌 Starting consensus session on top of parent {:?}", parent_hash); - let proposer = Proposer { + let proposer = Proposer::<_, _, _, _, PR> { spawn_handle: self.spawn_handle.clone(), client: self.client.clone(), parent_hash, @@ -102,38 +182,42 @@ impl ProposerFactory transaction_pool: self.transaction_pool.clone(), now, metrics: self.metrics.clone(), + default_block_size_limit: self.default_block_size_limit, + telemetry: self.telemetry.clone(), _phantom: PhantomData, + include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, }; proposer } } -impl sp_consensus::Environment for - ProposerFactory - where - A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, +impl sp_consensus::Environment for ProposerFactory +where + A: TransactionPool + 'static, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: + ApiExt> + BlockBuilderApi, + PR: ProofRecording, { type CreateProposer = future::Ready>; - type Proposer = Proposer; + type Proposer = Proposer; type Error = sp_blockchain::Error; - fn init( - &mut self, - parent_header: &::Header, - ) -> Self::CreateProposer { + fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now)))) } } /// The proposer logic. -pub struct Proposer { +pub struct Proposer { spawn_handle: Box, client: Arc, parent_hash: ::Hash, @@ -142,99 +226,112 @@ pub struct Proposer { transaction_pool: Arc, now: Box time::Instant + Send + Sync>, metrics: PrometheusMetrics, - _phantom: PhantomData, + default_block_size_limit: usize, + include_proof_in_block_size_estimation: bool, + telemetry: Option, + _phantom: PhantomData<(B, PR)>, } -impl sp_consensus::Proposer for - Proposer - where - A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, +impl sp_consensus::Proposer for Proposer +where + A: TransactionPool + 'static, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: + ApiExt> + BlockBuilderApi, + PR: ProofRecording, { type Transaction = backend::TransactionFor; - type Proposal = Pin, Self::Error> - > + Send>>; + type Proposal = Pin< + Box< + dyn Future, Self::Error>> + + Send, + >, + >; type Error = sp_blockchain::Error; + type ProofRecording = PR; + type Proof = PR::Proof; fn propose( self, inherent_data: InherentData, inherent_digests: DigestFor, max_duration: time::Duration, - record_proof: RecordProof, + block_size_limit: Option, ) -> Self::Proposal { let (tx, rx) = oneshot::channel(); let spawn_handle = self.spawn_handle.clone(); - spawn_handle.spawn_blocking("basic-authorship-proposer", Box::pin(async move { - // leave some time for evaluation and block finalization (33%) - let deadline = (self.now)() + max_duration - max_duration / 3; - let res = self.propose_with( - inherent_data, - inherent_digests, - deadline, - record_proof, - ).await; - if tx.send(res).is_err() { - trace!("Could not send block production result to proposer!"); - } - })); + spawn_handle.spawn_blocking( + "basic-authorship-proposer", + Box::pin(async move { + // leave some time for evaluation and block finalization (33%) + let deadline = (self.now)() + max_duration - max_duration / 3; + let res = self + .propose_with(inherent_data, inherent_digests, deadline, block_size_limit) + .await; + if tx.send(res).is_err() { + trace!("Could not send block production result to proposer!"); + } + }), + ); - async move { - match rx.await { - Ok(x) => x, - Err(err) => Err(sp_blockchain::Error::Msg(err.to_string())) - } - }.boxed() + async move { rx.await? }.boxed() } } -impl Proposer - where - A: TransactionPool, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, +impl Proposer +where + A: TransactionPool, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: + ApiExt> + BlockBuilderApi, + PR: ProofRecording, { async fn propose_with( self, inherent_data: InherentData, inherent_digests: DigestFor, deadline: time::Instant, - record_proof: RecordProof, - ) -> Result>, sp_blockchain::Error> { + block_size_limit: Option, + ) -> Result, PR::Proof>, sp_blockchain::Error> + { /// If the block is full we will attempt to push at most /// this number of transactions before quitting for real. /// It allows us to increase block utilization. const MAX_SKIPPED_TRANSACTIONS: usize = 8; - let mut block_builder = self.client.new_block_at( - &self.parent_id, - inherent_digests, - record_proof, - )?; + let mut block_builder = + self.client.new_block_at(&self.parent_id, inherent_digests, PR::ENABLED)?; for inherent in block_builder.create_inherents(inherent_data)? { match block_builder.push(inherent) { - Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => - warn!("⚠️ Dropping non-mandatory inherent from overweight block."), + Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { + warn!("⚠️ Dropping non-mandatory inherent from overweight block.") + }, Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => { - error!("❌️ Mandatory inherent extrinsic returned error. Block cannot be produced."); + error!( + "❌️ Mandatory inherent extrinsic returned error. Block cannot be produced." + ); Err(ApplyExtrinsicFailed(Validity(e)))? - } + }, Err(e) => { warn!("❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e); - } - Ok(_) => {} + }, + Ok(_) => {}, } } @@ -244,7 +341,8 @@ impl Proposer let mut unqueue_invalid = Vec::new(); let mut t1 = self.transaction_pool.ready_at(self.parent_number).fuse(); - let mut t2 = futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8).fuse(); + let mut t2 = + futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8).fuse(); let pending_iterator = select! { res = t1 => res, @@ -258,26 +356,50 @@ impl Proposer }, }; + let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit); + debug!("Attempting to push transactions from the pool."); debug!("Pool status: {:?}", self.transaction_pool.status()); + let mut transaction_pushed = false; + let mut hit_block_size_limit = false; + for pending_tx in pending_iterator { if (self.now)() > deadline { debug!( "Consensus deadline reached when pushing block transactions, \ proceeding with proposing." ); - break; + break } let pending_tx_data = pending_tx.data().clone(); let pending_tx_hash = pending_tx.hash().clone(); + + let block_size = + block_builder.estimate_block_size(self.include_proof_in_block_size_estimation); + if block_size + pending_tx_data.encoded_size() > block_size_limit { + if skipped < MAX_SKIPPED_TRANSACTIONS { + skipped += 1; + debug!( + "Transaction would overflow the block size limit, \ + but will try {} more transactions before quitting.", + MAX_SKIPPED_TRANSACTIONS - skipped, + ); + continue + } else { + debug!("Reached block size limit, proceeding with proposing."); + hit_block_size_limit = true; + break + } + } + trace!("[{:?}] Pushing to the block.", pending_tx_hash); match sc_block_builder::BlockBuilder::push(&mut block_builder, pending_tx_data) { Ok(()) => { + transaction_pushed = true; debug!("[{:?}] Pushed to the block.", pending_tx_hash); - } - Err(ApplyExtrinsicFailed(Validity(e))) - if e.exhausted_resources() => { + }, + Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; debug!( @@ -286,35 +408,41 @@ impl Proposer ); } else { debug!("Block is full, proceed with proposing."); - break; + break } - } + }, Err(e) if skipped > 0 => { trace!( "[{:?}] Ignoring invalid transaction when skipping: {}", pending_tx_hash, e ); - } + }, Err(e) => { debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); unqueue_invalid.push(pending_tx_hash); - } + }, } } + if hit_block_size_limit && !transaction_pushed { + warn!( + "Hit block size limit of `{}` without including any transaction!", + block_size_limit, + ); + } + self.transaction_pool.remove_invalid(&unqueue_invalid); let (block, storage_changes, proof) = block_builder.build()?.into_inner(); - self.metrics.report( - |metrics| { - metrics.number_of_transactions.set(block.extrinsics().len() as u64); - metrics.block_constructed.observe(block_timer.elapsed().as_secs_f64()); - } - ); + self.metrics.report(|metrics| { + metrics.number_of_transactions.set(block.extrinsics().len() as u64); + metrics.block_constructed.observe(block_timer.elapsed().as_secs_f64()); + }); - info!("🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", + info!( + "🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", block.header().number(), ::Hash::from(block.header().hash()), block.header().parent_hash(), @@ -325,7 +453,10 @@ impl Proposer .collect::>() .join(", ") ); - telemetry!(CONSENSUS_INFO; "prepared_block_for_proposing"; + telemetry!( + self.telemetry; + CONSENSUS_INFO; + "prepared_block_for_proposing"; "number" => ?block.header().number(), "hash" => ?::Hash::from(block.header().hash()), ); @@ -334,10 +465,14 @@ impl Proposer error!("Failed to verify block encoding/decoding"); } - if let Err(err) = evaluation::evaluate_initial(&block, &self.parent_hash, self.parent_number) { + if let Err(err) = + evaluation::evaluate_initial(&block, &self.parent_hash, self.parent_number) + { error!("Failed to evaluate authored block: {:?}", err); } + let proof = + PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; Ok(Proposal { block, proof, storage_changes }) } } @@ -346,17 +481,20 @@ impl Proposer mod tests { use super::*; + use futures::executor::block_on; use parking_lot::Mutex; - use sp_consensus::{BlockOrigin, Proposer}; - use substrate_test_runtime_client::{ - prelude::*, TestClientBuilder, runtime::{Extrinsic, Transfer}, TestClientBuilderExt, - }; - use sp_transaction_pool::{ChainEvent, MaintainedTransactionPool, TransactionSource}; + use sc_client_api::Backend; use sc_transaction_pool::BasicPool; + use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource}; use sp_api::Core; use sp_blockchain::HeaderBackend; + use sp_consensus::{BlockOrigin, Environment, Proposer}; use sp_runtime::traits::NumberFor; - use sc_client_api::Backend; + use substrate_test_runtime_client::{ + prelude::*, + runtime::{Extrinsic, Transfer}, + TestClientBuilder, TestClientBuilderExt, + }; const SOURCE: TransactionSource = TransactionSource::External; @@ -366,16 +504,15 @@ mod tests { nonce, from: AccountKeyring::Alice.into(), to: Default::default(), - }.into_signed_tx() + } + .into_signed_tx() } fn chain_event(header: B::Header) -> ChainEvent - where NumberFor: From + where + NumberFor: From, { - ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - } + ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None } } #[test] @@ -385,29 +522,26 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, spawner.clone(), client.clone(), ); - futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)]) - ).unwrap(); + block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)])) + .unwrap(); - futures::executor::block_on( + block_on( txpool.maintain(chain_event( - client.header(&BlockId::Number(0u64)) + client + .header(&BlockId::Number(0u64)) .expect("header get error") - .expect("there should be header") - )) + .expect("there should be header"), + )), ); - let mut proposer_factory = ProposerFactory::new( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - ); + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( @@ -416,20 +550,21 @@ mod tests { let mut value = cell.lock(); if !value.0 { value.0 = true; - return value.1; + return value.1 } let old = value.1; let new = old + time::Duration::from_secs(2); *value = (true, new); old - }) + }), ); // when let deadline = time::Duration::from_secs(3); - let block = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) - ).map(|r| r.block).unwrap(); + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); // then // block should have some extrinsics although we have some more in the pool. @@ -443,17 +578,14 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, spawner.clone(), client.clone(), ); - let mut proposer_factory = ProposerFactory::new( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - ); + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( @@ -462,18 +594,18 @@ mod tests { let mut value = cell.lock(); if !value.0 { value.0 = true; - return value.1; + return value.1 } let new = value.1 + time::Duration::from_secs(160); *value = (true, new); new - }) + }), ); let deadline = time::Duration::from_secs(1); - futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) - ).map(|r| r.block).unwrap(); + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); } #[test] @@ -483,6 +615,7 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, spawner.clone(), client.clone(), @@ -491,24 +624,19 @@ mod tests { let genesis_hash = client.info().best_hash; let block_id = BlockId::Hash(genesis_hash); - futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)]), - ).unwrap(); + block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)])).unwrap(); - futures::executor::block_on( + block_on( txpool.maintain(chain_event( - client.header(&BlockId::Number(0u64)) + client + .header(&BlockId::Number(0u64)) .expect("header get error") .expect("there should be header"), - )) + )), ); - let mut proposer_factory = ProposerFactory::new( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - ); + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let proposer = proposer_factory.init_with_now( &client.header(&block_id).unwrap().unwrap(), @@ -516,9 +644,9 @@ mod tests { ); let deadline = time::Duration::from_secs(9); - let proposal = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No), - ).unwrap(); + let proposal = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .unwrap(); assert_eq!(proposal.block.extrinsics().len(), 1); @@ -526,16 +654,13 @@ mod tests { api.execute_block(&block_id, proposal.block).unwrap(); let state = backend.state_at(block_id).unwrap(); - let changes_trie_state = backend::changes_tries_state_at_block( - &block_id, - backend.changes_trie_storage(), - ).unwrap(); + let changes_trie_state = + backend::changes_tries_state_at_block(&block_id, backend.changes_trie_storage()) + .unwrap(); - let storage_changes = api.into_storage_changes( - &state, - changes_trie_state.as_ref(), - genesis_hash, - ).unwrap(); + let storage_changes = api + .into_storage_changes(&state, changes_trie_state.as_ref(), genesis_hash) + .unwrap(); assert_eq!( proposal.storage_changes.transaction_storage_root, @@ -550,13 +675,16 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), + true.into(), None, spawner.clone(), client.clone(), ); - futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, vec![ + block_on(txpool.submit_at( + &BlockId::number(0), + SOURCE, + vec![ extrinsic(0), extrinsic(1), Transfer { @@ -574,21 +702,16 @@ mod tests { }.into_resources_exhausting_tx(), extrinsic(5), extrinsic(6), - ]) - ).unwrap(); - - let mut proposer_factory = ProposerFactory::new( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - ); - let mut propose_block = | - client: &TestClient, - number, - expected_block_extrinsics, - expected_pool_transactions, - | { + ], + )) + .unwrap(); + + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); + let mut propose_block = |client: &TestClient, + number, + expected_block_extrinsics, + expected_pool_transactions| { let proposer = proposer_factory.init_with_now( &client.header(&BlockId::number(number)).unwrap().unwrap(), Box::new(move || time::Instant::now()), @@ -596,9 +719,10 @@ mod tests { // when let deadline = time::Duration::from_secs(9); - let block = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) - ).map(|r| r.block).unwrap(); + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); // then // block should have some extrinsics although we have some more in the pool. @@ -608,28 +732,117 @@ mod tests { block }; - futures::executor::block_on( + block_on( txpool.maintain(chain_event( - client.header(&BlockId::Number(0u64)) + client + .header(&BlockId::Number(0u64)) .expect("header get error") - .expect("there should be header") - )) + .expect("there should be header"), + )), ); // let's create one block and import it let block = propose_block(&client, 0, 2, 7); - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); - futures::executor::block_on( + block_on( txpool.maintain(chain_event( - client.header(&BlockId::Number(1)) + client + .header(&BlockId::Number(1)) .expect("header get error") - .expect("there should be header") - )) + .expect("there should be header"), + )), ); // now let's make sure that we can still make some progress let block = propose_block(&client, 1, 2, 5); - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); + } + + #[test] + fn should_cease_building_block_when_block_limit_is_reached() { + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner.clone(), + client.clone(), + ); + let genesis_header = client + .header(&BlockId::Number(0u64)) + .expect("header get error") + .expect("there should be header"); + + let extrinsics_num = 4; + let extrinsics = (0..extrinsics_num) + .map(|v| Extrinsic::IncludeData(vec![v as u8; 10])) + .collect::>(); + + let block_limit = genesis_header.encoded_size() + + extrinsics + .iter() + .take(extrinsics_num - 1) + .map(Encode::encoded_size) + .sum::() + + Vec::::new().encoded_size(); + + block_on(txpool.submit_at(&BlockId::number(0), SOURCE, extrinsics)).unwrap(); + + block_on(txpool.maintain(chain_event(genesis_header.clone()))); + + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); + + let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); + + // Give it enough time + let deadline = time::Duration::from_secs(300); + let block = block_on(proposer.propose( + Default::default(), + Default::default(), + deadline, + Some(block_limit), + )) + .map(|r| r.block) + .unwrap(); + + // Based on the block limit, one transaction shouldn't be included. + assert_eq!(block.extrinsics().len(), extrinsics_num - 1); + + let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); + + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); + + // Without a block limit we should include all of them + assert_eq!(block.extrinsics().len(), extrinsics_num); + + let mut proposer_factory = ProposerFactory::with_proof_recording( + spawner.clone(), + client.clone(), + txpool.clone(), + None, + None, + ); + + let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); + + // Give it enough time + let block = block_on(proposer.propose( + Default::default(), + Default::default(), + deadline, + Some(block_limit), + )) + .map(|r| r.block) + .unwrap(); + + // The block limit didn't changed, but we now include the proof in the estimation of the + // block size and thus, one less transaction should fit into the limit. + assert_eq!(block.extrinsics().len(), extrinsics_num - 2); } } diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 9b0c491508231..2b2fe554efdff 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,7 +22,7 @@ //! //! ``` //! # use sc_basic_authorship::ProposerFactory; -//! # use sp_consensus::{Environment, Proposer, RecordProof}; +//! # use sp_consensus::{Environment, Proposer}; //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; //! # use substrate_test_runtime_client::{ @@ -34,17 +34,19 @@ //! # let spawner = sp_core::testing::TaskExecutor::new(); //! # let txpool = BasicPool::new_full( //! # Default::default(), +//! # true.into(), //! # None, //! # spawner.clone(), //! # client.clone(), //! # ); //! // The first step is to create a `ProposerFactory`. //! let mut proposer_factory = ProposerFactory::new( -//! spawner, -//! client.clone(), -//! txpool.clone(), -//! None, -//! ); +//! spawner, +//! client.clone(), +//! txpool.clone(), +//! None, +//! None, +//! ); //! //! // From this factory, we create a `Proposer`. //! let proposer = proposer_factory.init( @@ -60,15 +62,14 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes, +//! None, //! ); //! //! // We wait until the proposition is performed. //! let block = futures::executor::block_on(future).unwrap(); //! println!("Generated block: {:?}", block.block); //! ``` -//! mod basic_authorship; -pub use crate::basic_authorship::{ProposerFactory, Proposer}; +pub use crate::basic_authorship::{Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT}; diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index 0c3d289bbcbc1..6fef8498134eb 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-block-builder" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-block-builder = { version = "2.0.0", path = "../../primitives/block-builder" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sc-client-api = { version = "2.0.0", path = "../api" } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } +sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-block-builder = { version = "4.0.0-dev", path = "../../primitives/block-builder" } +sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = [ + "derive", +] } [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } diff --git a/client/block-builder/README.md b/client/block-builder/README.md index c691f6692abff..b105d4203362f 100644 --- a/client/block-builder/README.md +++ b/client/block-builder/README.md @@ -1,7 +1,7 @@ Substrate block builder This crate provides the [`BlockBuilder`] utility and the corresponding runtime api -[`BlockBuilder`](sp_block_builder::BlockBuilder).Error +[`BlockBuilder`](https://docs.rs/sc-block-builder/latest/sc_block_builder/struct.BlockBuilder.html).Error The block builder utility is used in the node as an abstraction over the runtime api to initialize a block, to push extrinsics and to finalize a block. diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 904667b1afc6e..e89421edfb168 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,7 +19,7 @@ //! Substrate block builder //! //! This crate provides the [`BlockBuilder`] utility and the corresponding runtime api -//! [`BlockBuilder`](sp_block_builder::BlockBuilder).Error +//! [`BlockBuilder`](sp_block_builder::BlockBuilder). //! //! The block builder utility is used in the node as an abstraction over the runtime api to //! initialize a block, to push extrinsics and to finalize a block. @@ -28,22 +28,57 @@ use codec::Encode; -use sp_runtime::{ - generic::BlockId, - traits::{Header as HeaderT, Hash, Block as BlockT, HashFor, DigestFor, NumberFor, One}, +use sp_api::{ + ApiExt, ApiRef, Core, ProvideRuntimeApi, StorageChanges, StorageProof, TransactionOutcome, }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; -use sp_api::{ - Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof, - TransactionOutcome, +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, DigestFor, Hash, HashFor, Header as HeaderT, NumberFor, One}, }; -use sp_consensus::RecordProof; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; use sc_client_api::backend; +/// Used as parameter to [`BlockBuilderProvider`] to express if proof recording should be enabled. +/// +/// When `RecordProof::Yes` is given, all accessed trie nodes should be saved. These recorded +/// trie nodes can be used by a third party to proof this proposal without having access to the +/// full storage. +#[derive(Copy, Clone, PartialEq)] +pub enum RecordProof { + /// `Yes`, record a proof. + Yes, + /// `No`, don't record any proof. + No, +} + +impl RecordProof { + /// Returns if `Self` == `Yes`. + pub fn yes(&self) -> bool { + matches!(self, Self::Yes) + } +} + +/// Will return [`RecordProof::No`] as default value. +impl Default for RecordProof { + fn default() -> Self { + Self::No + } +} + +impl From for RecordProof { + fn from(val: bool) -> Self { + if val { + Self::Yes + } else { + Self::No + } + } +} + /// A block that was build by [`BlockBuilder`] plus some additional data. /// /// This additional data includes the `storage_changes`, these changes can be applied to the @@ -59,7 +94,9 @@ pub struct BuiltBlock, } -impl>> BuiltBlock { +impl>> + BuiltBlock +{ /// Convert into the inner values. pub fn into_inner(self) -> (Block, StorageChanges, Option) { (self.block, self.storage_changes, self.proof) @@ -68,11 +105,11 @@ impl>> BuiltBl /// Block builder provider pub trait BlockBuilderProvider - where - Block: BlockT, - B: backend::Backend, - Self: Sized, - RA: ProvideRuntimeApi, +where + Block: BlockT, + B: backend::Backend, + Self: Sized, + RA: ProvideRuntimeApi, { /// Create a new block, built on top of `parent`. /// @@ -100,14 +137,16 @@ pub struct BlockBuilder<'a, Block: BlockT, A: ProvideRuntimeApi, B> { block_id: BlockId, parent_hash: Block::Hash, backend: &'a B, + /// The estimated size of the block header. + estimated_header_size: usize, } impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B> where Block: BlockT, A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi + - ApiExt>, + A::Api: + BlockBuilderApi + ApiExt>, B: backend::Backend, { /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. @@ -122,7 +161,7 @@ where record_proof: RecordProof, inherent_digests: DigestFor, backend: &'a B, - ) -> Result> { + ) -> Result { let header = <::Header as HeaderT>::new( parent_number + One::one(), Default::default(), @@ -131,6 +170,8 @@ where inherent_digests, ); + let estimated_header_size = header.encoded_size(); + let mut api = api.runtime_api(); if record_proof.yes() { @@ -139,9 +180,7 @@ where let block_id = BlockId::Hash(parent_hash); - api.initialize_block_with_context( - &block_id, ExecutionContext::BlockConstruction, &header, - )?; + api.initialize_block_with_context(&block_id, ExecutionContext::BlockConstruction, &header)?; Ok(Self { parent_hash, @@ -149,13 +188,14 @@ where api, block_id, backend, + estimated_header_size, }) } /// Push onto the block's list of extrinsics. /// /// This will ensure the extrinsic can be validly executed (by executing it). - pub fn push(&mut self, xt: ::Extrinsic) -> Result<(), ApiErrorFor> { + pub fn push(&mut self, xt: ::Extrinsic) -> Result<(), Error> { let block_id = &self.block_id; let extrinsics = &mut self.extrinsics; @@ -168,13 +208,11 @@ where Ok(Ok(_)) => { extrinsics.push(xt); TransactionOutcome::Commit(Ok(())) - } - Ok(Err(tx_validity)) => { - TransactionOutcome::Rollback( - Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), - ) }, - Err(e) => TransactionOutcome::Rollback(Err(e)), + Ok(Err(tx_validity)) => TransactionOutcome::Rollback(Err( + ApplyExtrinsicFailed::Validity(tx_validity).into(), + )), + Err(e) => TransactionOutcome::Rollback(Err(Error::from(e))), } }) } @@ -184,13 +222,10 @@ where /// Returns the build `Block`, the changes to the storage and an optional `StorageProof` /// supplied by `self.api`, combined as [`BuiltBlock`]. /// The storage proof will be `Some(_)` when proof recording was enabled. - pub fn build(mut self) -> Result< - BuiltBlock>, - ApiErrorFor - > { - let header = self.api.finalize_block_with_context( - &self.block_id, ExecutionContext::BlockConstruction - )?; + pub fn build(mut self) -> Result>, Error> { + let header = self + .api + .finalize_block_with_context(&self.block_id, ExecutionContext::BlockConstruction)?; debug_assert_eq!( header.extrinsics_root().clone(), @@ -208,11 +243,10 @@ where )?; let parent_hash = self.parent_hash; - let storage_changes = self.api.into_storage_changes( - &state, - changes_trie_state.as_ref(), - parent_hash, - )?; + let storage_changes = self + .api + .into_storage_changes(&state, changes_trie_state.as_ref(), parent_hash) + .map_err(|e| sp_blockchain::Error::StorageChanges(e))?; Ok(BuiltBlock { block: ::new(header, self.extrinsics), @@ -227,17 +261,33 @@ where pub fn create_inherents( &mut self, inherent_data: sp_inherents::InherentData, - ) -> Result, ApiErrorFor> { + ) -> Result, Error> { let block_id = self.block_id; - self.api.execute_in_transaction(move |api| { - // `create_inherents` should not change any state, to ensure this we always rollback - // the transaction. - TransactionOutcome::Rollback(api.inherent_extrinsics_with_context( - &block_id, - ExecutionContext::BlockConstruction, - inherent_data - )) - }) + self.api + .execute_in_transaction(move |api| { + // `create_inherents` should not change any state, to ensure this we always rollback + // the transaction. + TransactionOutcome::Rollback(api.inherent_extrinsics_with_context( + &block_id, + ExecutionContext::BlockConstruction, + inherent_data, + )) + }) + .map_err(|e| Error::Application(Box::new(e))) + } + + /// Estimate the size of the block in the current state. + /// + /// If `include_proof` is `true`, the estimated size of the storage proof will be added + /// to the estimation. + pub fn estimate_block_size(&self, include_proof: bool) -> usize { + let size = self.estimated_header_size + self.extrinsics.encoded_size(); + + if include_proof { + size + self.api.proof_recorder().map(|pr| pr.estimate_encoded_size()).unwrap_or(0) + } else { + size + } } } @@ -262,19 +312,22 @@ mod tests { RecordProof::Yes, Default::default(), &*backend, - ).unwrap().build().unwrap(); + ) + .unwrap() + .build() + .unwrap(); let proof = block.proof.expect("Proof is build on request"); let backend = sp_state_machine::create_proof_check_backend::( block.storage_changes.transaction_storage_root, proof, - ).unwrap(); + ) + .unwrap(); - assert!( - backend.storage(&sp_core::storage::well_known_keys::CODE) - .unwrap_err() - .contains("Database missing expected key"), - ); + assert!(backend + .storage(&sp_core::storage::well_known_keys::CODE) + .unwrap_err() + .contains("Database missing expected key"),); } } diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 79f14058aad6d..8af2996e968d8 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,17 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-chain-spec-derive = { version = "2.0.0", path = "./derive" } -impl-trait-for-tuples = "0.1.3" -sc-network = { version = "0.8.0", path = "../network" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -serde = { version = "1.0.101", features = ["derive"] } -serde_json = "1.0.41" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-chain-spec = { version = "2.0.0", path = "../../primitives/chain-spec" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-consensus-babe = { version = "0.8.0-rc6", path = "../consensus/babe" } -sp-consensus-babe = { version = "0.8.0-rc6", path = "../../primitives/consensus/babe" } -sc-consensus-epochs = { version = "0.8.0-rc6", path = "../consensus/epochs" } -sc-finality-grandpa = { version = "0.8.0-rc6", path = "../finality-grandpa" } +sc-chain-spec-derive = { version = "4.0.0-dev", path = "./derive" } +impl-trait-for-tuples = "0.2.1" +sc-network = { version = "0.10.0-dev", path = "../network" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +serde = { version = "1.0.126", features = ["derive"] } +serde_json = "1.0.68" +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } +codec = { package = "parity-scale-codec", version = "2.0.0" } diff --git a/client/chain-spec/README.md b/client/chain-spec/README.md index 59a66aa5ace79..5525affbed81c 100644 --- a/client/chain-spec/README.md +++ b/client/chain-spec/README.md @@ -4,7 +4,7 @@ This crate contains structs and utilities to declare a runtime-specific configuration file (a.k.a chain spec). Basic chain spec type containing all required parameters is -[`ChainSpec`](https://docs.rs/sc-chain-spec/latest/sc_chain_spec/struct.ChainSpec.html). It can be extended with +[`ChainSpec`](https://docs.rs/sc-chain-spec/latest/sc_chain_spec/struct.GenericChainSpec.html). It can be extended with additional options that contain configuration specific to your chain. Usually the extension is going to be an amalgamate of types exposed by Substrate core modules. To allow the core modules to retrieve diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index 6826168a206a5..b210fa1320e04 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec-derive" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,9 +15,9 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "0.1.4" -proc-macro2 = "1.0.6" +proc-macro-crate = "1.0.0" +proc-macro2 = "1.0.29" quote = "1.0.3" -syn = "1.0.7" +syn = "1.0.58" [dev-dependencies] diff --git a/client/chain-spec/derive/src/impls.rs b/client/chain-spec/derive/src/impls.rs index ded961a6da815..8c56430e81d02 100644 --- a/client/chain-spec/derive/src/impls.rs +++ b/client/chain-spec/derive/src/impls.rs @@ -1,23 +1,25 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use proc_macro2::{Span, TokenStream}; +use proc_macro_crate::{crate_name, FoundCrate}; use quote::quote; -use syn::{DeriveInput, Ident, Error}; -use proc_macro_crate::crate_name; +use syn::{DeriveInput, Error, Ident}; const CRATE_NAME: &str = "sc-chain-spec"; const ATTRIBUTE_NAME: &str = "forks"; @@ -29,14 +31,18 @@ const ATTRIBUTE_NAME: &str = "forks"; pub fn extension_derive(ast: &DeriveInput) -> proc_macro::TokenStream { derive(ast, |crate_name, name, generics: &syn::Generics, field_names, field_types, fields| { let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - let forks = fields.named.iter().find_map(|f| { - if f.attrs.iter().any(|attr| attr.path.is_ident(ATTRIBUTE_NAME)) { - let typ = &f.ty; - Some(quote! { #typ }) - } else { - None - } - }).unwrap_or_else(|| quote! { #crate_name::NoExtension }); + let forks = fields + .named + .iter() + .find_map(|f| { + if f.attrs.iter().any(|attr| attr.path.is_ident(ATTRIBUTE_NAME)) { + let typ = &f.ty; + Some(quote! { #typ }) + } else { + None + } + }) + .unwrap_or_else(|| quote! { #crate_name::NoExtension }); quote! { impl #impl_generics #crate_name::Extension for #name #ty_generics #where_clause { @@ -59,6 +65,15 @@ pub fn extension_derive(ast: &DeriveInput) -> proc_macro::TokenStream { _ => self, } } + + fn get_any_mut(&mut self, t: std::any::TypeId) -> &mut dyn std::any::Any { + use std::any::{Any, TypeId}; + + match t { + #( x if x == TypeId::of::<#field_types>() => &mut self.#field_names ),*, + _ => self, + } + } } } }) @@ -70,20 +85,20 @@ pub fn group_derive(ast: &DeriveInput) -> proc_macro::TokenStream { let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let fork_name = Ident::new(&format!("{}Fork", name), Span::call_site()); - let fork_fields = generate_fork_fields(&crate_name, &field_names, &field_types); + let fork_fields = generate_fork_fields(crate_name, &field_names, &field_types); let to_fork = generate_base_to_fork(&fork_name, &field_names); let combine_with = generate_combine_with(&field_names); let to_base = generate_fork_to_base(name, &field_names); let serde_crate_name = match proc_macro_crate::crate_name("serde") { - Ok(name) => Ident::new(&name.replace("-", "_"), Span::call_site()), + Ok(FoundCrate::Itself) => Ident::new("serde", Span::call_site()), + Ok(FoundCrate::Name(name)) => Ident::new(&name, Span::call_site()), Err(e) => { - let err = Error::new( - Span::call_site(), - &format!("Could not find `serde` crate: {}", e), - ).to_compile_error(); + let err = + Error::new(Span::call_site(), &format!("Could not find `serde` crate: {}", e)) + .to_compile_error(); - return quote!( #err ).into(); - } + return quote!( #err ) + }, }; quote! { @@ -128,14 +143,20 @@ pub fn group_derive(ast: &DeriveInput) -> proc_macro::TokenStream { pub fn derive( ast: &DeriveInput, derive: impl Fn( - &Ident, &Ident, &syn::Generics, Vec<&Ident>, Vec<&syn::Type>, &syn::FieldsNamed, + &Ident, + &Ident, + &syn::Generics, + Vec<&Ident>, + Vec<&syn::Type>, + &syn::FieldsNamed, ) -> TokenStream, ) -> proc_macro::TokenStream { let err = || { let err = Error::new( Span::call_site(), - "ChainSpecGroup is only available for structs with named fields." - ).to_compile_error(); + "ChainSpecGroup is only available for structs with named fields.", + ) + .to_compile_error(); quote!( #err ).into() }; @@ -149,14 +170,11 @@ pub fn derive( _ => return err(), }; - const PROOF: &str = "CARGO_PKG_NAME always defined when compiling; qed"; let name = &ast.ident; let crate_name = match crate_name(CRATE_NAME) { - Ok(chain_spec_name) => chain_spec_name, - Err(e) => if std::env::var("CARGO_PKG_NAME").expect(PROOF) == CRATE_NAME { - // we return the name of the crate here instead of `crate` to support doc tests. - CRATE_NAME.replace("-", "_") - } else { + Ok(FoundCrate::Itself) => CRATE_NAME.replace("-", "_"), + Ok(FoundCrate::Name(chain_spec_name)) => chain_spec_name, + Err(e) => { let err = Error::new(Span::call_site(), &e).to_compile_error(); return quote!( #err ).into() }, @@ -168,47 +186,35 @@ pub fn derive( derive(&crate_name, name, &ast.generics, field_names, field_types, fields).into() } -fn generate_fork_fields( - crate_name: &Ident, - names: &[&Ident], - types: &[&syn::Type], -) -> TokenStream { +fn generate_fork_fields(crate_name: &Ident, names: &[&Ident], types: &[&syn::Type]) -> TokenStream { let crate_name = std::iter::repeat(crate_name); quote! { #( pub #names: Option<<#types as #crate_name::Group>::Fork>, )* } } -fn generate_base_to_fork( - fork_name: &Ident, - names: &[&Ident], -) -> TokenStream { +fn generate_base_to_fork(fork_name: &Ident, names: &[&Ident]) -> TokenStream { let names2 = names.to_vec(); - quote!{ + quote! { #fork_name { #( #names: Some(self.#names2.to_fork()), )* } } } -fn generate_combine_with( - names: &[&Ident], -) -> TokenStream { +fn generate_combine_with(names: &[&Ident]) -> TokenStream { let names2 = names.to_vec(); - quote!{ + quote! { #( self.#names.combine_with(other.#names2); )* } } -fn generate_fork_to_base( - fork: &Ident, - names: &[&Ident], -) -> TokenStream { +fn generate_fork_to_base(fork: &Ident, names: &[&Ident]) -> TokenStream { let names2 = names.to_vec(); - quote!{ + quote! { Some(#fork { #( #names: self.#names2?.to_base()?, )* }) diff --git a/client/chain-spec/derive/src/lib.rs b/client/chain-spec/derive/src/lib.rs index 0dc053f7e301e..53f0c69491ecd 100644 --- a/client/chain-spec/derive/src/lib.rs +++ b/client/chain-spec/derive/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Macros to derive chain spec extension traits implementation. diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 39c47e32908df..fcdb053c47c16 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,15 +19,17 @@ //! Substrate chain configurations. #![warn(missing_docs)] -use std::{borrow::Cow, fs::File, path::PathBuf, sync::Arc, collections::HashMap}; -use serde::{Serialize, Deserialize}; -use sp_core::storage::{StorageKey, StorageData, ChildInfo, Storage, StorageChild}; -use sp_runtime::BuildStorage; -use serde_json as json; -use crate::{RuntimeGenesis, ChainType, extension::GetExtension, Properties}; +use crate::{extension::GetExtension, ChainType, Properties, RuntimeGenesis}; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use serde::{Deserialize, Serialize}; +use serde_json as json; +use sp_core::{ + storage::{ChildInfo, Storage, StorageChild, StorageData, StorageKey}, + Bytes, +}; +use sp_runtime::BuildStorage; +use std::{borrow::Cow, collections::HashMap, fs::File, path::PathBuf, sync::Arc}; enum GenesisSource { File(PathBuf), @@ -56,8 +58,8 @@ impl GenesisSource { match self { Self::File(path) => { - let file = File::open(path) - .map_err(|e| format!("Error opening spec file: {}", e))?; + let file = + File::open(path).map_err(|e| format!("Error opening spec file: {}", e))?; let genesis: GenesisContainer = json::from_reader(file) .map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(genesis.genesis) @@ -69,22 +71,25 @@ impl GenesisSource { }, Self::Factory(f) => Ok(Genesis::Runtime(f())), Self::Storage(storage) => { - let top = storage.top + let top = storage + .top .iter() .map(|(k, v)| (StorageKey(k.clone()), StorageData(v.clone()))) .collect(); - let children_default = storage.children_default + let children_default = storage + .children_default .iter() - .map(|(k, child)| - ( - StorageKey(k.clone()), - child.data + .map(|(k, child)| { + ( + StorageKey(k.clone()), + child + .data .iter() .map(|(k, v)| (StorageKey(k.clone()), StorageData(v.clone()))) - .collect() - ) - ) + .collect(), + ) + }) .collect(); Ok(Genesis::Raw(RawGenesis { top, children_default })) @@ -99,24 +104,24 @@ impl BuildStorage for ChainSpec { Genesis::Runtime(gc) => gc.build_storage(), Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - children_default: children_map.into_iter().map(|(storage_key, child_content)| { - let child_info = ChildInfo::new_default(storage_key.0.as_slice()); - ( - storage_key.0, - StorageChild { - data: child_content.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - child_info, - }, - ) - }).collect(), + children_default: children_map + .into_iter() + .map(|(storage_key, child_content)| { + let child_info = ChildInfo::new_default(storage_key.0.as_slice()); + ( + storage_key.0, + StorageChild { + data: child_content.into_iter().map(|(k, v)| (k.0, v.0)).collect(), + child_info, + }, + ) + }) + .collect(), }), } } - fn assimilate_storage( - &self, - _: &mut Storage, - ) -> Result<(), String> { + fn assimilate_storage(&self, _: &mut Storage) -> Result<(), String> { Err("`assimilate_storage` not implemented for `ChainSpec`.".into()) } } @@ -159,7 +164,12 @@ struct ClientSpec { consensus_engine: (), #[serde(skip_serializing)] genesis: serde::de::IgnoredAny, - light_sync_state: Option, + /// Mapping from `block_hash` to `wasm_code`. + /// + /// The given `wasm_code` will be used to substitute the on-chain wasm code from the given + /// block hash onwards. + #[serde(default)] + code_substitutes: HashMap, } /// A type denoting empty extensions. @@ -175,10 +185,7 @@ pub struct ChainSpec { impl Clone for ChainSpec { fn clone(&self) -> Self { - ChainSpec { - client_spec: self.client_spec.clone(), - genesis: self.genesis.clone(), - } + ChainSpec { client_spec: self.client_spec.clone(), genesis: self.genesis.clone() } } } @@ -220,11 +227,16 @@ impl ChainSpec { self.client_spec.boot_nodes.push(addr) } - /// Returns a reference to defined chain spec extensions. + /// Returns a reference to the defined chain spec extensions. pub fn extensions(&self) -> &E { &self.client_spec.extensions } + /// Returns a mutable reference to the defined chain spec extensions. + pub fn extensions_mut(&mut self) -> &mut E { + &mut self.client_spec.extensions + } + /// Create hardcoded spec. pub fn from_genesis G + 'static + Send + Sync>( name: &str, @@ -248,24 +260,16 @@ impl ChainSpec { extensions, consensus_engine: (), genesis: Default::default(), - light_sync_state: None, + code_substitutes: HashMap::new(), }; - ChainSpec { - client_spec, - genesis: GenesisSource::Factory(Arc::new(constructor)), - } + ChainSpec { client_spec, genesis: GenesisSource::Factory(Arc::new(constructor)) } } /// Type of the chain. fn chain_type(&self) -> ChainType { self.client_spec.chain_type.clone() } - - /// Hardcode infomation to allow light clients to sync quickly into the chain spec. - fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState) { - self.client_spec.light_sync_state = Some(light_sync_state); - } } impl ChainSpec { @@ -274,22 +278,15 @@ impl ChainSpec { let json = json.into(); let client_spec = json::from_slice(json.as_ref()) .map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { - client_spec, - genesis: GenesisSource::Binary(json), - }) + Ok(ChainSpec { client_spec, genesis: GenesisSource::Binary(json) }) } /// Parse json file into a `ChainSpec` pub fn from_json_file(path: PathBuf) -> Result { - let file = File::open(&path) - .map_err(|e| format!("Error opening spec file: {}", e))?; - let client_spec = json::from_reader(file) - .map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { - client_spec, - genesis: GenesisSource::File(path), - }) + let file = File::open(&path).map_err(|e| format!("Error opening spec file: {}", e))?; + let client_spec = + json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; + Ok(ChainSpec { client_spec, genesis: GenesisSource::File(path) }) } } @@ -305,33 +302,34 @@ impl ChainSpec { let genesis = match (raw, self.genesis.resolve()?) { (true, Genesis::Runtime(g)) => { let storage = g.build_storage()?; - let top = storage.top.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(); - let children_default = storage.children_default.into_iter() - .map(|(sk, child)| ( - StorageKey(sk), - child.data.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - )) + let top = + storage.top.into_iter().map(|(k, v)| (StorageKey(k), StorageData(v))).collect(); + let children_default = storage + .children_default + .into_iter() + .map(|(sk, child)| { + ( + StorageKey(sk), + child + .data + .into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(), + ) + }) .collect(); Genesis::Raw(RawGenesis { top, children_default }) }, (_, genesis) => genesis, }; - Ok(JsonContainer { - client_spec: self.client_spec.clone(), - genesis, - }) + Ok(JsonContainer { client_spec: self.client_spec.clone(), genesis }) } /// Dump to json string. pub fn as_json(&self, raw: bool) -> Result { let container = self.json_container(raw)?; - json::to_string_pretty(&container) - .map_err(|e| format!("Error generating spec json: {}", e)) + json::to_string_pretty(&container).map_err(|e| format!("Error generating spec json: {}", e)) } } @@ -376,6 +374,10 @@ where ChainSpec::extensions(self) as &dyn GetExtension } + fn extensions_mut(&mut self) -> &mut dyn GetExtension { + ChainSpec::extensions_mut(self) as &mut dyn GetExtension + } + fn as_json(&self, raw: bool) -> Result { ChainSpec::as_json(self, raw) } @@ -392,64 +394,15 @@ where self.genesis = GenesisSource::Storage(storage); } - fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState) { - ChainSpec::set_light_sync_state(self, light_sync_state) - } -} - -/// Hardcoded infomation that allows light clients to sync quickly. -pub struct LightSyncState { - /// The header of the best finalized block. - pub finalized_block_header: ::Header, - /// The epoch changes tree for babe. - pub babe_epoch_changes: sc_consensus_epochs::EpochChangesFor, - /// The babe weight of the finalized block. - pub babe_finalized_block_weight: sp_consensus_babe::BabeBlockWeight, - /// The authority set for grandpa. - pub grandpa_authority_set: sc_finality_grandpa::AuthoritySet<::Hash, NumberFor>, -} - -impl LightSyncState { - /// Convert into a `SerializableLightSyncState`. - pub fn to_serializable(&self) -> SerializableLightSyncState { - use codec::Encode; - - SerializableLightSyncState { - finalized_block_header: StorageData(self.finalized_block_header.encode()), - babe_epoch_changes: - StorageData(self.babe_epoch_changes.encode()), - babe_finalized_block_weight: - self.babe_finalized_block_weight, - grandpa_authority_set: - StorageData(self.grandpa_authority_set.encode()), - } - } - - /// Convert from a `SerializableLightSyncState`. - pub fn from_serializable(serialized: &SerializableLightSyncState) -> Result { - Ok(Self { - finalized_block_header: codec::Decode::decode(&mut &serialized.finalized_block_header.0[..])?, - babe_epoch_changes: - codec::Decode::decode(&mut &serialized.babe_epoch_changes.0[..])?, - babe_finalized_block_weight: - serialized.babe_finalized_block_weight, - grandpa_authority_set: - codec::Decode::decode(&mut &serialized.grandpa_authority_set.0[..])?, - }) + fn code_substitutes(&self) -> std::collections::HashMap> { + self.client_spec + .code_substitutes + .iter() + .map(|(h, c)| (h.clone(), c.0.clone())) + .collect() } } -/// The serializable form of `LightSyncState`. Created using `LightSyncState::serialize`. -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct SerializableLightSyncState { - finalized_block_header: StorageData, - babe_epoch_changes: StorageData, - babe_finalized_block_weight: sp_consensus_babe::BabeBlockWeight, - grandpa_authority_set: StorageData, -} - #[cfg(test)] mod tests { use super::*; @@ -458,12 +411,9 @@ mod tests { struct Genesis(HashMap); impl BuildStorage for Genesis { - fn assimilate_storage( - &self, - storage: &mut Storage, - ) -> Result<(), String> { + fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { storage.top.extend( - self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())) + self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())), ); Ok(()) } @@ -474,11 +424,10 @@ mod tests { #[test] fn should_deserialize_example_chain_spec() { let spec1 = TestSpec::from_json_bytes(Cow::Owned( - include_bytes!("../res/chain_spec.json").to_vec() - )).unwrap(); - let spec2 = TestSpec::from_json_file( - PathBuf::from("./res/chain_spec.json") - ).unwrap(); + include_bytes!("../res/chain_spec.json").to_vec(), + )) + .unwrap(); + let spec2 = TestSpec::from_json_file(PathBuf::from("./res/chain_spec.json")).unwrap(); assert_eq!(spec1.as_json(false), spec2.as_json(false)); assert_eq!(spec2.chain_type(), ChainType::Live) @@ -495,8 +444,9 @@ mod tests { #[test] fn should_deserialize_chain_spec_with_extensions() { let spec = TestSpec2::from_json_bytes(Cow::Owned( - include_bytes!("../res/chain_spec2.json").to_vec() - )).unwrap(); + include_bytes!("../res/chain_spec2.json").to_vec(), + )) + .unwrap(); assert_eq!(spec.extensions().my_property, "Test Extension"); } diff --git a/client/chain-spec/src/extension.rs b/client/chain-spec/src/extension.rs index c0338203eb10e..4b59232cf5770 100644 --- a/client/chain-spec/src/extension.rs +++ b/client/chain-spec/src/extension.rs @@ -1,34 +1,38 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Chain Spec extensions helpers. -use std::fmt::Debug; -use std::any::{TypeId, Any}; +use std::{ + any::{Any, TypeId}, + fmt::Debug, +}; use std::collections::BTreeMap; -use serde::{Serialize, Deserialize, de::DeserializeOwned}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// A `ChainSpec` extension. /// /// This trait is implemented automatically by `ChainSpecGroup` macro. pub trait Group: Clone + Sized { /// An associated type containing fork definition. - type Fork: Fork; + type Fork: Fork; /// Convert to fork type. fn to_fork(self) -> Self::Fork; @@ -43,7 +47,7 @@ pub trait Group: Clone + Sized { /// a complete set of parameters pub trait Fork: Serialize + DeserializeOwned + Clone + Sized { /// A base `Group` type. - type Base: Group; + type Base: Group; /// Combine with another struct. /// @@ -122,11 +126,14 @@ pub trait Extension: Serialize + DeserializeOwned + Clone { /// Get an extension of specific type. fn get(&self) -> Option<&T>; - /// Get an extension of specific type as refernce to `Any` + /// Get an extension of specific type as reference to `Any`. fn get_any(&self, t: TypeId) -> &dyn Any; + /// Get an extension of specific type as mutable reference to `Any`. + fn get_any_mut(&mut self, t: TypeId) -> &mut dyn Any; /// Get forkable extensions of specific type. - fn forks(&self) -> Option> where + fn forks(&self) -> Option> + where BlockNumber: Ord + Clone + 'static, T: Group + 'static, ::Extension: Extension, @@ -140,8 +147,15 @@ pub trait Extension: Serialize + DeserializeOwned + Clone { impl Extension for crate::NoExtension { type Forks = Self; - fn get(&self) -> Option<&T> { None } - fn get_any(&self, _t: TypeId) -> &dyn Any { self } + fn get(&self) -> Option<&T> { + None + } + fn get_any(&self, _t: TypeId) -> &dyn Any { + self + } + fn get_any_mut(&mut self, _: TypeId) -> &mut dyn Any { + self + } } pub trait IsForks { @@ -164,14 +178,12 @@ pub struct Forks { impl Default for Forks { fn default() -> Self { - Self { - base: Default::default(), - forks: Default::default(), - } + Self { base: Default::default(), forks: Default::default() } } } -impl Forks where +impl Forks +where T::Fork: Debug, { /// Create new fork definition given the base and the forks. @@ -193,7 +205,8 @@ impl Forks where } } -impl IsForks for Forks where +impl IsForks for Forks +where B: Ord + 'static, T: Group + 'static, { @@ -201,56 +214,69 @@ impl IsForks for Forks where type Extension = T; } -impl Forks where +impl Forks +where T::Fork: Extension, { /// Get forks definition for a subset of this extension. /// /// Returns the `Forks` struct, but limited to a particular type /// within the extension. - pub fn for_type(&self) -> Option> where + pub fn for_type(&self) -> Option> + where X: Group + 'static, { let base = self.base.get::()?.clone(); - let forks = self.forks.iter().filter_map(|(k, v)| { - Some((k.clone(), v.get::>()?.clone()?)) - }).collect(); - - Some(Forks { - base, - forks, - }) + let forks = self + .forks + .iter() + .filter_map(|(k, v)| Some((k.clone(), v.get::>()?.clone()?))) + .collect(); + + Some(Forks { base, forks }) } } -impl Extension for Forks where +impl Extension for Forks +where B: Serialize + DeserializeOwned + Ord + Clone + 'static, E: Extension + Group + 'static, { type Forks = Self; fn get(&self) -> Option<&T> { - match TypeId::of::() { - x if x == TypeId::of::() => Any::downcast_ref(&self.base), - _ => self.base.get(), + if TypeId::of::() == TypeId::of::() { + ::downcast_ref(&self.base) + } else { + self.base.get() } } fn get_any(&self, t: TypeId) -> &dyn Any { - match t { - x if x == TypeId::of::() => &self.base, - _ => self.base.get_any(t), + if t == TypeId::of::() { + &self.base + } else { + self.base.get_any(t) } } - fn forks(&self) -> Option> where + fn get_any_mut(&mut self, t: TypeId) -> &mut dyn Any { + if t == TypeId::of::() { + &mut self.base + } else { + self.base.get_any_mut(t) + } + } + + fn forks(&self) -> Option> + where BlockNumber: Ord + Clone + 'static, T: Group + 'static, ::Extension: Extension, <::Extension as Group>::Fork: Extension, { if TypeId::of::() == TypeId::of::() { - Any::downcast_ref(&self.for_type::()?).cloned() + ::downcast_ref(&self.for_type::()?).cloned() } else { self.get::::Extension>>()? .for_type() @@ -262,24 +288,35 @@ impl Extension for Forks where pub trait GetExtension { /// Get an extension of specific type. fn get_any(&self, t: TypeId) -> &dyn Any; + + /// Get an extension of specific type with mutable access. + fn get_any_mut(&mut self, t: TypeId) -> &mut dyn Any; } -impl GetExtension for E { +impl GetExtension for E { fn get_any(&self, t: TypeId) -> &dyn Any { Extension::get_any(self, t) } + + fn get_any_mut(&mut self, t: TypeId) -> &mut dyn Any { + Extension::get_any_mut(self, t) + } } -/// Helper function that queries an extension by type from `GetExtension` -/// trait object. +/// Helper function that queries an extension by type from `GetExtension` trait object. pub fn get_extension(e: &dyn GetExtension) -> Option<&T> { - Any::downcast_ref(GetExtension::get_any(e, TypeId::of::())) + ::downcast_ref(GetExtension::get_any(e, TypeId::of::())) +} + +/// Helper function that queries an extension by type from `GetExtension` trait object. +pub fn get_extension_mut(e: &mut dyn GetExtension) -> Option<&mut T> { + ::downcast_mut(GetExtension::get_any_mut(e, TypeId::of::())) } #[cfg(test)] mod tests { use super::*; - use sc_chain_spec_derive::{ChainSpecGroup, ChainSpecExtension}; + use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; // Make the proc macro work for tests and doc tests. use crate as sc_chain_spec; @@ -295,7 +332,9 @@ mod tests { pub test: u8, } - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] + #[derive( + Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension, + )] #[serde(deny_unknown_fields)] pub struct Extensions { pub ext1: Extension1, @@ -313,11 +352,12 @@ mod tests { #[test] fn forks_should_work_correctly() { - use super::Extension as _ ; + use super::Extension as _; // We first need to deserialize into a `Value` because of the following bug: // https://github.com/serde-rs/json/issues/505 - let ext_val: serde_json::Value = serde_json::from_str(r#" + let ext_val: serde_json::Value = serde_json::from_str( + r#" { "test": 11, "forkable": { @@ -340,40 +380,40 @@ mod tests { } } } - "#).unwrap(); + "#, + ) + .unwrap(); let ext: Ext2 = serde_json::from_value(ext_val).unwrap(); - assert_eq!(ext.get::(), Some(&Extension1 { - test: 11 - })); + assert_eq!(ext.get::(), Some(&Extension1 { test: 11 })); // get forks definition let forks = ext.get::>().unwrap(); - assert_eq!(forks.at_block(0), Extensions { - ext1: Extension1 { test: 15 }, - ext2: Extension2 { test: 123 }, - }); - assert_eq!(forks.at_block(1), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 123 }, - }); - assert_eq!(forks.at_block(2), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 5 }, - }); - assert_eq!(forks.at_block(4), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 5 }, - }); - assert_eq!(forks.at_block(5), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 1 }, - }); - assert_eq!(forks.at_block(10), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 1 }, - }); + assert_eq!( + forks.at_block(0), + Extensions { ext1: Extension1 { test: 15 }, ext2: Extension2 { test: 123 } } + ); + assert_eq!( + forks.at_block(1), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 123 } } + ); + assert_eq!( + forks.at_block(2), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 5 } } + ); + assert_eq!( + forks.at_block(4), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 5 } } + ); + assert_eq!( + forks.at_block(5), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 1 } } + ); + assert_eq!( + forks.at_block(10), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 1 } } + ); assert!(forks.at_block(10).get::().is_some()); // filter forks for `Extension2` diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 94ed93758bb2d..334d8f8b3d7ac 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate chain configurations. //! @@ -20,7 +22,7 @@ //! a runtime-specific configuration file (a.k.a chain spec). //! //! Basic chain spec type containing all required parameters is -//! [`ChainSpec`](./struct.ChainSpec.html). It can be extended with +//! [`GenericChainSpec`]. It can be extended with //! additional options that contain configuration specific to your chain. //! Usually the extension is going to be an amalgamate of types exposed //! by Substrate core modules. To allow the core modules to retrieve @@ -33,7 +35,7 @@ //! //! #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecExtension)] //! pub struct MyExtension { -//! pub known_blocks: HashMap, +//! pub known_blocks: HashMap, //! } //! //! pub type MyChainSpec = GenericChainSpec; @@ -51,19 +53,19 @@ //! //! #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] //! pub struct ClientParams { -//! max_block_size: usize, -//! max_extrinsic_size: usize, +//! max_block_size: usize, +//! max_extrinsic_size: usize, //! } //! //! #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] //! pub struct PoolParams { -//! max_transaction_size: usize, +//! max_transaction_size: usize, //! } //! //! #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup, ChainSpecExtension)] //! pub struct Extension { -//! pub client: ClientParams, -//! pub pool: PoolParams, +//! pub client: ClientParams, +//! pub pool: PoolParams, //! } //! //! pub type BlockNumber = u64; @@ -86,20 +88,20 @@ //! //! #[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] //! pub struct ClientParams { -//! max_block_size: usize, -//! max_extrinsic_size: usize, +//! max_block_size: usize, +//! max_extrinsic_size: usize, //! } //! //! #[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] //! pub struct PoolParams { -//! max_transaction_size: usize, +//! max_transaction_size: usize, //! } //! //! #[derive(Clone, Debug, Serialize, Deserialize, ChainSpecExtension)] //! pub struct Extension { -//! pub client: ClientParams, -//! #[forks] -//! pub pool: Forks, +//! pub client: ClientParams, +//! #[forks] +//! pub pool: Forks, //! } //! //! pub type MyChainSpec = GenericChainSpec; @@ -108,18 +110,42 @@ mod chain_spec; mod extension; -pub use chain_spec::{ - ChainSpec as GenericChainSpec, NoExtension, LightSyncState, SerializableLightSyncState, +pub use chain_spec::{ChainSpec as GenericChainSpec, NoExtension}; +pub use extension::{ + get_extension, get_extension_mut, Extension, Fork, Forks, GetExtension, Group, }; -pub use extension::{Group, Fork, Forks, Extension, GetExtension, get_extension}; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; -pub use sp_chain_spec::{Properties, ChainType}; -use serde::{Serialize, de::DeserializeOwned}; -use sp_runtime::BuildStorage; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; +use serde::{de::DeserializeOwned, Serialize}; use sp_core::storage::Storage; +use sp_runtime::BuildStorage; + +/// The type of a chain. +/// +/// This can be used by tools to determine the type of a chain for displaying +/// additional information or enabling additional features. +#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Clone)] +pub enum ChainType { + /// A development chain that runs mainly on one node. + Development, + /// A local chain that runs locally on multiple nodes for testing purposes. + Local, + /// A live chain. + Live, + /// Some custom chain type. + Custom(String), +} + +impl Default for ChainType { + fn default() -> Self { + Self::Live + } +} + +/// Arbitrary properties defined in chain spec as a JSON object +pub type Properties = serde_json::map::Map; /// A set of traits for the runtime genesis config. pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} @@ -143,8 +169,10 @@ pub trait ChainSpec: BuildStorage + Send + Sync { /// /// Returns an empty JSON object if 'properties' not defined in config fn properties(&self) -> Properties; - /// Returns a reference to defined chain spec extensions. + /// Returns a reference to the defined chain spec extensions. fn extensions(&self) -> &dyn GetExtension; + /// Returns a mutable reference to the defined chain spec extensions. + fn extensions_mut(&mut self) -> &mut dyn GetExtension; /// Add a bootnode to the list. fn add_boot_node(&mut self, addr: MultiaddrWithPeerId); /// Return spec as JSON. @@ -157,8 +185,8 @@ pub trait ChainSpec: BuildStorage + Send + Sync { /// /// This will be used as storage at genesis. fn set_storage(&mut self, storage: Storage); - /// Hardcode infomation to allow light clients to sync quickly into the chain spec. - fn set_light_sync_state(&mut self, light_sync_state: SerializableLightSyncState); + /// Returns code substitutes that should be used for the on chain wasm. + fn code_substitutes(&self) -> std::collections::HashMap>; } impl std::fmt::Debug for dyn ChainSpec { diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index c19d61aecc102..e7a0330e76e0c 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-cli" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Substrate CLI interface." edition = "2018" @@ -13,62 +13,40 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -derive_more = "0.99.2" -log = "0.4.8" -atty = "0.2.13" -regex = "1.3.4" -time = "0.1.42" -ansi_term = "0.12.1" -lazy_static = "1.4.0" -tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } -futures = "0.3.4" +log = "0.4.11" +regex = "1.4.2" +tokio = { version = "1.10", features = [ "signal", "rt-multi-thread" ] } +futures = "0.3.9" fdlimit = "0.2.1" -libp2p = "0.28.1" -parity-scale-codec = "1.3.0" +libp2p = "0.39.1" +parity-scale-codec = "2.0.0" hex = "0.4.2" rand = "0.7.3" -bip39 = "0.6.0-beta.1" -serde_json = "1.0.41" -sc-keystore = { version = "2.0.0", path = "../keystore" } -sc-informant = { version = "0.8.0", path = "../informant" } -sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0", path = "../network" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sc-service = { version = "0.8.0", default-features = false, path = "../service" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0"} -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -sc-consensus-babe = { version = "0.8.0", path = "../consensus/babe" } -sc-consensus-epochs = { version = "0.8.0", path = "../consensus/epochs" } -sc-finality-grandpa = { version = "0.8.0", path = "../finality-grandpa" } -names = "0.11.0" +tiny-bip39 = "0.8.0" +serde_json = "1.0.68" +sc-keystore = { version = "4.0.0-dev", path = "../keystore" } +sp-panic-handler = { version = "3.0.0", path = "../../primitives/panic-handler" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } +sp-version = { version = "4.0.0-dev", path = "../../primitives/version" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } +sc-service = { version = "0.10.0-dev", default-features = false, path = "../service" } +sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } +sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } +names = { version = "0.12.0", default-features = false } structopt = "0.3.8" -sc-tracing = { version = "2.0.0", path = "../tracing" } +sc-tracing = { version = "4.0.0-dev", path = "../tracing" } chrono = "0.4.10" -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -serde = "1.0.111" -tracing = "0.1.10" -tracing-log = "0.1.1" -tracing-subscriber = "0.2.10" -sc-cli-proc-macro = { version = "2.0.0", path = "./proc-macro" } - -[target.'cfg(not(target_os = "unknown"))'.dependencies] -rpassword = "4.0.1" - -[target.'cfg(target_family = "unix")'.dependencies] -nix = "0.17.0" +serde = "1.0.126" +thiserror = "1.0.21" +rpassword = "5.0.0" [dev-dependencies] tempfile = "3.1.0" -sp-io = { version = "2.0.0-rc3", path = "../../primitives/io" } -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } [features] wasmtime = [ diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 85400f2a27759..5221500f08b33 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,35 +20,62 @@ use structopt::clap::arg_enum; -arg_enum! { - /// How to execute Wasm runtime code - #[allow(missing_docs)] - #[derive(Debug, Clone, Copy)] - pub enum WasmExecutionMethod { - // Uses an interpreter. - Interpreted, - // Uses a compiled runtime. - Compiled, +/// How to execute Wasm runtime code. +#[derive(Debug, Clone, Copy)] +pub enum WasmExecutionMethod { + /// Uses an interpreter. + Interpreted, + /// Uses a compiled runtime. + Compiled, +} + +impl std::fmt::Display for WasmExecutionMethod { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Interpreted => write!(f, "Interpreted"), + Self::Compiled => write!(f, "Compiled"), + } + } +} + +impl std::str::FromStr for WasmExecutionMethod { + type Err = String; + + fn from_str(s: &str) -> Result { + if s.eq_ignore_ascii_case("interpreted-i-know-what-i-do") { + Ok(Self::Interpreted) + } else if s.eq_ignore_ascii_case("compiled") { + #[cfg(feature = "wasmtime")] + { + Ok(Self::Compiled) + } + #[cfg(not(feature = "wasmtime"))] + { + Err(format!("`Compiled` variant requires the `wasmtime` feature to be enabled")) + } + } else { + Err(format!("Unknown variant `{}`, known variants: {:?}", s, Self::variants())) + } } } impl WasmExecutionMethod { - /// Returns list of variants that are not disabled by feature flags. - pub fn enabled_variants() -> Vec<&'static str> { - Self::variants() - .iter() - .cloned() - .filter(|&name| cfg!(feature = "wasmtime") || name != "Compiled") - .collect() + /// Returns all the variants of this enum to be shown in the cli. + pub fn variants() -> &'static [&'static str] { + let variants = &["interpreted-i-know-what-i-do", "compiled"]; + if cfg!(feature = "wasmtime") { + variants + } else { + &variants[..1] + } } } impl Into for WasmExecutionMethod { fn into(self) -> sc_service::config::WasmExecutionMethod { match self { - WasmExecutionMethod::Interpreted => { - sc_service::config::WasmExecutionMethod::Interpreted - } + WasmExecutionMethod::Interpreted => + sc_service::config::WasmExecutionMethod::Interpreted, #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled, #[cfg(not(feature = "wasmtime"))] @@ -64,7 +91,6 @@ arg_enum! { #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum TracingReceiver { Log, - Telemetry, } } @@ -72,7 +98,6 @@ impl Into for TracingReceiver { fn into(self) -> sc_tracing::TracingReceiver { match self { TracingReceiver::Log => sc_tracing::TracingReceiver::Log, - TracingReceiver::Telemetry => sc_tracing::TracingReceiver::Telemetry, } } } @@ -165,18 +190,40 @@ impl Into for RpcMethods { } } -arg_enum! { - /// Database backend - #[allow(missing_docs)] - #[derive(Debug, Clone, Copy)] - pub enum Database { - // Facebooks RocksDB - RocksDb, - // ParityDb. https://github.com/paritytech/parity-db/ - ParityDb, +/// Database backend +#[derive(Debug, Clone, Copy)] +pub enum Database { + /// Facebooks RocksDB + RocksDb, + /// ParityDb. + ParityDb, + /// Detect whether there is an existing database. Use it, if there is, if not, create new + /// instance of paritydb + Auto, +} + +impl std::str::FromStr for Database { + type Err = String; + + fn from_str(s: &str) -> Result { + if s.eq_ignore_ascii_case("rocksdb") { + Ok(Self::RocksDb) + } else if s.eq_ignore_ascii_case("paritydb-experimental") { + Ok(Self::ParityDb) + } else if s.eq_ignore_ascii_case("auto") { + Ok(Self::Auto) + } else { + Err(format!("Unknown variant `{}`, known variants: {:?}", s, Self::variants())) + } } } +impl Database { + /// Returns all the variants of this enum to be shown in the cli. + pub fn variants() -> &'static [&'static str] { + &["rocksdb", "paritydb-experimental", "auto"] + } +} arg_enum! { /// Whether off-chain workers are enabled. @@ -189,6 +236,35 @@ arg_enum! { } } +arg_enum! { + /// Syncing mode. + #[allow(missing_docs)] + #[derive(Debug, Clone, Copy)] + pub enum SyncMode { + // Full sync. Donwnload end verify all blocks. + Full, + // Download blocks without executing them. Download latest state with proofs. + Fast, + // Download blocks without executing them. Download latest state without proofs. + FastUnsafe, + // Prove finality and download the latest state. + Warp, + } +} + +impl Into for SyncMode { + fn into(self) -> sc_network::config::SyncMode { + match self { + SyncMode::Full => sc_network::config::SyncMode::Full, + SyncMode::Fast => + sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false }, + SyncMode::FastUnsafe => + sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false }, + SyncMode::Warp => sc_network::config::SyncMode::Warp, + } + } +} + /// Default value for the `--execution-syncing` parameter. pub const DEFAULT_EXECUTION_SYNCING: ExecutionStrategy = ExecutionStrategy::NativeElseWasm; /// Default value for the `--execution-import-block` parameter. diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index 616c5139f64f0..75fdf07643ee2 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,18 +16,22 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::NodeKeyParams; -use crate::params::SharedParams; -use crate::CliConfiguration; +use crate::{ + error, + params::{NodeKeyParams, SharedParams}, + CliConfiguration, +}; use log::info; use sc_network::config::build_multiaddr; -use sc_service::{config::{MultiaddrWithPeerId, NetworkConfiguration}, ChainSpec}; -use structopt::StructOpt; +use sc_service::{ + config::{MultiaddrWithPeerId, NetworkConfiguration}, + ChainSpec, +}; use std::io::Write; +use structopt::StructOpt; /// The `build-spec` command used to build a specification. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct BuildSpecCmd { /// Force raw genesis storage output. #[structopt(long = "raw")] diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index b536d4f26bb6c..07a76319dca3f 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,7 +17,9 @@ // along with this program. If not, see . use crate::{ - CliConfiguration, error, params::{ImportParams, SharedParams, BlockNumberOrHash}, + error, + params::{BlockNumberOrHash, ImportParams, SharedParams}, + CliConfiguration, }; use sc_client_api::{BlockBackend, UsageProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -25,7 +27,7 @@ use std::{fmt::Debug, str::FromStr, sync::Arc}; use structopt::StructOpt; /// The `check-block` command used to validate blocks. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct CheckBlockCmd { /// Block hash or number #[structopt(value_name = "HASH or NUMBER")] @@ -48,11 +50,7 @@ pub struct CheckBlockCmd { impl CheckBlockCmd { /// Run the check-block command - pub async fn run( - &self, - client: Arc, - import_queue: IQ, - ) -> error::Result<()> + pub async fn run(&self, client: Arc, import_queue: IQ) -> error::Result<()> where B: BlockT + for<'de> serde::Deserialize<'de>, C: BlockBackend + UsageProvider + Send + Sync + 'static, diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index e175d498941b8..ca3069442a1d3 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,25 +16,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::{GenericNumber, DatabaseParams, PruningParams, SharedParams}; -use crate::CliConfiguration; -use log::info; -use sc_service::{ - config::DatabaseConfig, chain_ops::export_blocks, +use crate::{ + error, + params::{DatabaseParams, GenericNumber, PruningParams, SharedParams}, + CliConfiguration, }; +use log::info; use sc_client_api::{BlockBackend, UsageProvider}; +use sc_service::{chain_ops::export_blocks, config::DatabaseSource}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::fmt::Debug; -use std::fs; -use std::io; -use std::path::PathBuf; -use std::str::FromStr; -use std::sync::Arc; +use std::{fmt::Debug, fs, io, path::PathBuf, str::FromStr, sync::Arc}; use structopt::StructOpt; /// The `export-blocks` command used to export blocks. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct ExportBlocksCmd { /// Output file name or stdout if unspecified. #[structopt(parse(from_os_str))] @@ -74,18 +69,18 @@ impl ExportBlocksCmd { pub async fn run( &self, client: Arc, - database_config: DatabaseConfig, + database_config: DatabaseSource, ) -> error::Result<()> where B: BlockT, C: BlockBackend + UsageProvider + 'static, <::Number as FromStr>::Err: Debug, { - if let DatabaseConfig::RocksDb { ref path, .. } = database_config { + if let DatabaseSource::RocksDb { ref path, .. } = database_config { info!("DB path: {}", path.display()); } - let from = self.from.as_ref().and_then(|f| f.parse().ok()).unwrap_or(1); + let from = self.from.as_ref().and_then(|f| f.parse().ok()).unwrap_or(1u32); let to = self.to.as_ref().and_then(|t| t.parse().ok()); let binary = self.binary; @@ -95,9 +90,7 @@ impl ExportBlocksCmd { None => Box::new(io::stdout()), }; - export_blocks(client, file, from.into(), to, binary) - .await - .map_err(Into::into) + export_blocks(client, file, from.into(), to, binary).await.map_err(Into::into) } } diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index c078db0d8aea9..36eabd2c24f5c 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,17 +17,19 @@ // along with this program. If not, see . use crate::{ - CliConfiguration, error, params::{PruningParams, SharedParams, BlockNumberOrHash}, + error, + params::{BlockNumberOrHash, PruningParams, SharedParams}, + CliConfiguration, }; use log::info; +use sc_client_api::{StorageProvider, UsageProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::{fmt::Debug, str::FromStr, io::Write, sync::Arc}; +use std::{fmt::Debug, io::Write, str::FromStr, sync::Arc}; use structopt::StructOpt; -use sc_client_api::{StorageProvider, UsageProvider}; /// The `export-state` command used to export the state of a given block into /// a chain spec. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct ExportStateCmd { /// Block hash or number. #[structopt(value_name = "HASH or NUMBER")] diff --git a/client/cli/src/commands/generate.rs b/client/cli/src/commands/generate.rs index 86b039ce6a4c5..7032ebd72e0c7 100644 --- a/client/cli/src/commands/generate.rs +++ b/client/cli/src/commands/generate.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,15 +16,15 @@ // limitations under the License. //! Implementation of the `generate` subcommand -use bip39::{MnemonicType, Mnemonic, Language}; -use structopt::StructOpt; use crate::{ - utils::print_from_uri, KeystoreParams, Error, - with_crypto_scheme, NetworkSchemeFlag, OutputTypeFlag, CryptoSchemeFlag, + utils::print_from_uri, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, + NetworkSchemeFlag, OutputTypeFlag, }; +use bip39::{Language, Mnemonic, MnemonicType}; +use structopt::StructOpt; /// The `generate` command -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] #[structopt(name = "generate", about = "Generate a random account")] pub struct GenerateCmd { /// The number of words in the phrase to generate. One of 12 (default), 15, 18, 21 and 24. @@ -52,12 +52,11 @@ impl GenerateCmd { /// Run the command pub fn run(&self) -> Result<(), Error> { let words = match self.words { - Some(words) => { - MnemonicType::for_word_count(words) - .map_err(|_| { - Error::Input("Invalid number of words given for phrase: must be 12/15/18/21/24".into()) - })? - }, + Some(words) => MnemonicType::for_word_count(words).map_err(|_| { + Error::Input( + "Invalid number of words given for phrase: must be 12/15/18/21/24".into(), + ) + })?, None => MnemonicType::Words12, }; let mnemonic = Mnemonic::new(words, Language::English); diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs index ad292e4712d84..74a4197f36621 100644 --- a/client/cli/src/commands/generate_node_key.rs +++ b/client/cli/src/commands/generate_node_key.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,9 @@ //! Implementation of the `generate-node-key` subcommand use crate::Error; -use structopt::StructOpt; -use std::{path::PathBuf, fs}; use libp2p::identity::{ed25519 as libp2p_ed25519, PublicKey}; +use std::{fs, path::PathBuf}; +use structopt::StructOpt; /// The `generate-node-key` command #[derive(Debug, StructOpt)] @@ -59,15 +59,14 @@ impl GenerateNodeKeyCmd { #[cfg(test)] mod tests { use super::*; - use tempfile::Builder; use std::io::Read; + use tempfile::Builder; #[test] fn generate_node_key() { let mut file = Builder::new().prefix("keyfile").tempfile().unwrap(); let file_path = file.path().display().to_string(); - let generate = - GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", &file_path]); + let generate = GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", &file_path]); assert!(generate.run().is_ok()); let mut buf = String::new(); assert!(file.read_to_string(&mut buf).is_ok()); diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index 00f8ec43b02fe..9b211b88d5563 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,19 +16,22 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::ImportParams; -use crate::params::SharedParams; -use crate::CliConfiguration; +use crate::{ + error, + params::{ImportParams, SharedParams}, + CliConfiguration, +}; +use sc_client_api::UsageProvider; use sc_service::chain_ops::import_blocks; use sp_runtime::traits::Block as BlockT; -use std::fmt::Debug; -use std::fs; -use std::io::{self, Read, Seek}; -use std::path::PathBuf; -use std::sync::Arc; +use std::{ + fmt::Debug, + fs, + io::{self, Read, Seek}, + path::PathBuf, + sync::Arc, +}; use structopt::StructOpt; -use sc_client_api::UsageProvider; /// The `import-blocks` command used to import blocks. #[derive(Debug, StructOpt)] @@ -63,11 +66,7 @@ impl ReadPlusSeek for T {} impl ImportBlocksCmd { /// Run the import-blocks command - pub async fn run( - &self, - client: Arc, - import_queue: IQ, - ) -> error::Result<()> + pub async fn run(&self, client: Arc, import_queue: IQ) -> error::Result<()> where C: UsageProvider + Send + Sync + 'static, B: BlockT + for<'de> serde::Deserialize<'de>, @@ -79,7 +78,7 @@ impl ImportBlocksCmd { let mut buffer = Vec::new(); io::stdin().read_to_end(&mut buffer)?; Box::new(io::Cursor::new(buffer)) - } + }, }; import_blocks(client, import_queue, file, false, self.binary) diff --git a/client/cli/src/commands/insert.rs b/client/cli/src/commands/insert.rs deleted file mode 100644 index 60cf9ff8c2423..0000000000000 --- a/client/cli/src/commands/insert.rs +++ /dev/null @@ -1,95 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of the `insert` subcommand - -use crate::{Error, KeystoreParams, CryptoSchemeFlag, SharedParams, utils, with_crypto_scheme}; -use std::sync::Arc; -use structopt::StructOpt; -use sp_core::crypto::KeyTypeId; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use std::convert::TryFrom; -use sc_service::config::KeystoreConfig; -use sc_keystore::LocalKeystore; -use sp_core::crypto::SecretString; - -/// The `insert` command -#[derive(Debug, StructOpt)] -#[structopt( - name = "insert", - about = "Insert a key to the keystore of a node." -)] -pub struct InsertCmd { - /// The secret key URI. - /// If the value is a file, the file content is used as URI. - /// If not given, you will be prompted for the URI. - #[structopt(long)] - suri: Option, - - /// Key type, examples: "gran", or "imon" - #[structopt(long)] - key_type: String, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub keystore_params: KeystoreParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub crypto_scheme: CryptoSchemeFlag, -} - -impl InsertCmd { - /// Run the command - pub fn run(&self) -> Result<(), Error> { - let suri = utils::read_uri(self.suri.as_ref())?; - let base_path = self.shared_params.base_path.as_ref() - .ok_or_else(|| Error::Other("please supply base path".into()))?; - - let (keystore, public) = match self.keystore_params.keystore_config(base_path)? { - KeystoreConfig::Path { path, password } => { - let public = with_crypto_scheme!( - self.crypto_scheme.scheme, - to_vec(&suri, password.clone()) - )?; - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(path, password) - .map_err(|e| format!("{}", e))?); - (keystore, public) - }, - _ => unreachable!("keystore_config always returns path and password; qed") - }; - - let key_type = KeyTypeId::try_from(self.key_type.as_str()) - .map_err(|_| { - Error::Other("Cannot convert argument to keytype: argument should be 4-character string".into()) - })?; - - SyncCryptoStore::insert_unknown(&*keystore, key_type, &suri, &public[..]) - .map_err(|e| Error::Other(format!("{:?}", e)))?; - - Ok(()) - } -} - -fn to_vec(uri: &str, pass: Option) -> Result, Error> { - let p = utils::pair_from_suri::

(uri, pass)?; - Ok(p.public().as_ref().to_vec()) -} diff --git a/client/cli/src/commands/insert_key.rs b/client/cli/src/commands/insert_key.rs new file mode 100644 index 0000000000000..05055dc53c1e2 --- /dev/null +++ b/client/cli/src/commands/insert_key.rs @@ -0,0 +1,172 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `insert` subcommand + +use crate::{ + utils, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, SharedParams, SubstrateCli, +}; +use sc_keystore::LocalKeystore; +use sc_service::config::{BasePath, KeystoreConfig}; +use sp_core::crypto::{KeyTypeId, SecretString}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use std::{convert::TryFrom, sync::Arc}; +use structopt::StructOpt; + +/// The `insert` command +#[derive(Debug, StructOpt, Clone)] +#[structopt(name = "insert", about = "Insert a key to the keystore of a node.")] +pub struct InsertKeyCmd { + /// The secret key URI. + /// If the value is a file, the file content is used as URI. + /// If not given, you will be prompted for the URI. + #[structopt(long)] + suri: Option, + + /// Key type, examples: "gran", or "imon" + #[structopt(long)] + key_type: String, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub crypto_scheme: CryptoSchemeFlag, +} + +impl InsertKeyCmd { + /// Run the command + pub fn run(&self, cli: &C) -> Result<(), Error> { + let suri = utils::read_uri(self.suri.as_ref())?; + let base_path = self + .shared_params + .base_path() + .unwrap_or_else(|| BasePath::from_project("", "", &C::executable_name())); + let chain_id = self.shared_params.chain_id(self.shared_params.is_dev()); + let chain_spec = cli.load_spec(&chain_id)?; + let config_dir = base_path.config_dir(chain_spec.id()); + + let (keystore, public) = match self.keystore_params.keystore_config(&config_dir)? { + (_, KeystoreConfig::Path { path, password }) => { + let public = with_crypto_scheme!( + self.crypto_scheme.scheme, + to_vec(&suri, password.clone()) + )?; + let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(path, password)?); + (keystore, public) + }, + _ => unreachable!("keystore_config always returns path and password; qed"), + }; + + let key_type = + KeyTypeId::try_from(self.key_type.as_str()).map_err(|_| Error::KeyTypeInvalid)?; + + SyncCryptoStore::insert_unknown(&*keystore, key_type, &suri, &public[..]) + .map_err(|_| Error::KeyStoreOperation)?; + + Ok(()) + } +} + +fn to_vec(uri: &str, pass: Option) -> Result, Error> { + let p = utils::pair_from_suri::

(uri, pass)?; + Ok(p.public().as_ref().to_vec()) +} + +#[cfg(test)] +mod tests { + use super::*; + use sc_service::{ChainSpec, ChainType, GenericChainSpec, NoExtension}; + use sp_core::{sr25519::Pair, Pair as _, Public}; + use structopt::StructOpt; + use tempfile::TempDir; + + struct Cli; + + impl SubstrateCli for Cli { + fn impl_name() -> String { + "test".into() + } + + fn impl_version() -> String { + "2.0".into() + } + + fn description() -> String { + "test".into() + } + + fn support_url() -> String { + "test.test".into() + } + + fn copyright_start_year() -> i32 { + 2021 + } + + fn author() -> String { + "test".into() + } + + fn native_runtime_version(_: &Box) -> &'static sp_version::RuntimeVersion { + unimplemented!("Not required in tests") + } + + fn load_spec(&self, _: &str) -> std::result::Result, String> { + Ok(Box::new(GenericChainSpec::from_genesis( + "test", + "test_id", + ChainType::Development, + || unimplemented!("Not required in tests"), + Vec::new(), + None, + None, + None, + NoExtension::None, + ))) + } + } + + #[test] + fn insert_with_custom_base_path() { + let path = TempDir::new().unwrap(); + let path_str = format!("{}", path.path().display()); + let (key, uri, _) = Pair::generate_with_phrase(None); + + let inspect = InsertKeyCmd::from_iter(&[ + "insert-key", + "-d", + &path_str, + "--key-type", + "test", + "--suri", + &uri, + ]); + assert!(inspect.run(&Cli).is_ok()); + + let keystore = + LocalKeystore::open(path.path().join("chains").join("test_id").join("keystore"), None) + .unwrap(); + assert!(keystore.has_keys(&[(key.public().to_raw_vec(), KeyTypeId(*b"test"))])); + } +} diff --git a/client/cli/src/commands/inspect_key.rs b/client/cli/src/commands/inspect_key.rs index fb3a7ef4f3b44..277c9015f4daf 100644 --- a/client/cli/src/commands/inspect_key.rs +++ b/client/cli/src/commands/inspect_key.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,14 @@ //! Implementation of the `inspect` subcommand use crate::{ - utils::{self, print_from_uri, print_from_public}, KeystoreParams, - with_crypto_scheme, NetworkSchemeFlag, OutputTypeFlag, CryptoSchemeFlag, Error, + utils::{self, print_from_public, print_from_uri}, + with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, NetworkSchemeFlag, OutputTypeFlag, }; use structopt::StructOpt; /// The `inspect` command #[derive(Debug, StructOpt)] #[structopt( - name = "inspect-key", + name = "inspect", about = "Gets a public key and a SS58 address from the provided Secret URI" )] pub struct InspectKeyCmd { @@ -103,8 +103,7 @@ mod tests { "remember fiber forum demise paper uniform squirrel feel access exclude casual effort"; let seed = "0xad1fb77243b536b90cfe5f0d351ab1b1ac40e3890b41dc64f766ee56340cfca5"; - let inspect = - InspectKeyCmd::from_iter(&["inspect-key", words, "--password", "12345"]); + let inspect = InspectKeyCmd::from_iter(&["inspect-key", words, "--password", "12345"]); assert!(inspect.run().is_ok()); let inspect = InspectKeyCmd::from_iter(&["inspect-key", seed]); diff --git a/client/cli/src/commands/inspect_node_key.rs b/client/cli/src/commands/inspect_node_key.rs index be0b88589d5e9..92a71f8975052 100644 --- a/client/cli/src/commands/inspect_node_key.rs +++ b/client/cli/src/commands/inspect_node_key.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,8 @@ //! Implementation of the `inspect-node-key` subcommand use crate::{Error, NetworkSchemeFlag}; -use std::fs; -use libp2p::identity::{PublicKey, ed25519}; -use std::path::PathBuf; +use libp2p::identity::{ed25519, PublicKey}; +use std::{fs, path::PathBuf}; use structopt::StructOpt; /// The `inspect-node-key` command @@ -42,10 +41,10 @@ pub struct InspectNodeKeyCmd { impl InspectNodeKeyCmd { /// runs the command pub fn run(&self) -> Result<(), Error> { - let mut file_content = hex::decode(fs::read(&self.file)?) - .map_err(|_| "failed to decode secret as hex")?; - let secret = ed25519::SecretKey::from_bytes(&mut file_content) - .map_err(|_| "Bad node key file")?; + let mut file_content = + hex::decode(fs::read(&self.file)?).map_err(|_| "failed to decode secret as hex")?; + let secret = + ed25519::SecretKey::from_bytes(&mut file_content).map_err(|_| "Bad node key file")?; let keypair = ed25519::Keypair::from(secret); let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); @@ -58,8 +57,7 @@ impl InspectNodeKeyCmd { #[cfg(test)] mod tests { - use super::*; - use super::super::GenerateNodeKeyCmd; + use super::{super::GenerateNodeKeyCmd, *}; #[test] fn inspect_node_key() { diff --git a/client/cli/src/commands/key.rs b/client/cli/src/commands/key.rs index e5bce08145cb8..8e1103a8ca512 100644 --- a/client/cli/src/commands/key.rs +++ b/client/cli/src/commands/key.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,15 +17,12 @@ //! Key related CLI utilities -use crate::Error; +use crate::{Error, SubstrateCli}; use structopt::StructOpt; use super::{ - insert::InsertCmd, - inspect_key::InspectKeyCmd, - generate::GenerateCmd, - inspect_node_key::InspectNodeKeyCmd, - generate_node_key::GenerateNodeKeyCmd, + generate::GenerateCmd, generate_node_key::GenerateNodeKeyCmd, insert_key::InsertKeyCmd, + inspect_key::InspectKeyCmd, inspect_node_key::InspectNodeKeyCmd, }; /// Key utilities for the cli. @@ -39,23 +36,23 @@ pub enum KeySubcommand { Generate(GenerateCmd), /// Gets a public key and a SS58 address from the provided Secret URI - InspectKey(InspectKeyCmd), + Inspect(InspectKeyCmd), /// Print the peer ID corresponding to the node key in the given file InspectNodeKey(InspectNodeKeyCmd), /// Insert a key to the keystore of a node. - Insert(InsertCmd), + Insert(InsertKeyCmd), } impl KeySubcommand { /// run the key subcommands - pub fn run(&self) -> Result<(), Error> { + pub fn run(&self, cli: &C) -> Result<(), Error> { match self { KeySubcommand::GenerateNodeKey(cmd) => cmd.run(), KeySubcommand::Generate(cmd) => cmd.run(), - KeySubcommand::InspectKey(cmd) => cmd.run(), - KeySubcommand::Insert(cmd) => cmd.run(), + KeySubcommand::Inspect(cmd) => cmd.run(), + KeySubcommand::Insert(cmd) => cmd.run(cli), KeySubcommand::InspectNodeKey(cmd) => cmd.run(), } } diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index 9867f61cd277f..9e7c5689b49c8 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,37 +19,26 @@ mod build_spec_cmd; mod check_block_cmd; mod export_blocks_cmd; mod export_state_cmd; +mod generate; +mod generate_node_key; mod import_blocks_cmd; +mod insert_key; +mod inspect_key; +mod inspect_node_key; +mod key; mod purge_chain_cmd; -mod sign; -mod verify; -mod vanity; mod revert_cmd; mod run_cmd; -mod generate_node_key; -mod generate; -mod insert; -mod inspect_node_key; -mod inspect_key; -mod key; +mod sign; pub mod utils; +mod vanity; +mod verify; pub use self::{ - build_spec_cmd::BuildSpecCmd, - check_block_cmd::CheckBlockCmd, - export_blocks_cmd::ExportBlocksCmd, - export_state_cmd::ExportStateCmd, - import_blocks_cmd::ImportBlocksCmd, - purge_chain_cmd::PurgeChainCmd, - sign::SignCmd, - generate::GenerateCmd, - insert::InsertCmd, - inspect_key::InspectKeyCmd, - generate_node_key::GenerateNodeKeyCmd, - inspect_node_key::InspectNodeKeyCmd, - key::KeySubcommand, - vanity::VanityCmd, - verify::VerifyCmd, - revert_cmd::RevertCmd, - run_cmd::RunCmd, + build_spec_cmd::BuildSpecCmd, check_block_cmd::CheckBlockCmd, + export_blocks_cmd::ExportBlocksCmd, export_state_cmd::ExportStateCmd, generate::GenerateCmd, + generate_node_key::GenerateNodeKeyCmd, import_blocks_cmd::ImportBlocksCmd, + insert_key::InsertKeyCmd, inspect_key::InspectKeyCmd, inspect_node_key::InspectNodeKeyCmd, + key::KeySubcommand, purge_chain_cmd::PurgeChainCmd, revert_cmd::RevertCmd, run_cmd::RunCmd, + sign::SignCmd, vanity::VanityCmd, verify::VerifyCmd, }; diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index 9c9c6e91fb241..e1bdb3a03cc59 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,17 +16,21 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::{DatabaseParams, SharedParams}; -use crate::CliConfiguration; -use sc_service::DatabaseConfig; -use std::fmt::Debug; -use std::fs; -use std::io::{self, Write}; +use crate::{ + error, + params::{DatabaseParams, SharedParams}, + CliConfiguration, +}; +use sc_service::DatabaseSource; +use std::{ + fmt::Debug, + fs, + io::{self, Write}, +}; use structopt::StructOpt; /// The `purge-chain` command used to remove the whole chain. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct PurgeChainCmd { /// Skip interactive prompt by answering yes automatically. #[structopt(short = "y")] @@ -43,11 +47,10 @@ pub struct PurgeChainCmd { impl PurgeChainCmd { /// Run the purge command - pub fn run(&self, database_config: DatabaseConfig) -> error::Result<()> { - let db_path = database_config.path() - .ok_or_else(|| - error::Error::Input("Cannot purge custom database implementation".into()) - )?; + pub fn run(&self, database_config: DatabaseSource) -> error::Result<()> { + let db_path = database_config.path().ok_or_else(|| { + error::Error::Input("Cannot purge custom database implementation".into()) + })?; if !self.yes { print!("Are you sure to remove {:?}? [y/N]: ", &db_path); @@ -61,7 +64,7 @@ impl PurgeChainCmd { Some('y') | Some('Y') => {}, _ => { println!("Aborted"); - return Ok(()); + return Ok(()) }, } } diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index b2e3c1bf8e2b6..9ad49a03aa5fd 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,16 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::{GenericNumber, PruningParams, SharedParams}; -use crate::CliConfiguration; +use crate::{ + error, + params::{GenericNumber, PruningParams, SharedParams}, + CliConfiguration, +}; +use sc_client_api::{Backend, UsageProvider}; use sc_service::chain_ops::revert_chain; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::fmt::Debug; -use std::str::FromStr; -use std::sync::Arc; +use std::{fmt::Debug, str::FromStr, sync::Arc}; use structopt::StructOpt; -use sc_client_api::{Backend, UsageProvider}; /// The `revert` command used revert the chain to a previous state. #[derive(Debug, StructOpt)] @@ -45,11 +45,7 @@ pub struct RevertCmd { impl RevertCmd { /// Run the revert command - pub async fn run( - &self, - client: Arc, - backend: Arc, - ) -> error::Result<()> + pub async fn run(&self, client: Arc, backend: Arc) -> error::Result<()> where B: BlockT, BA: Backend, diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 019b760e5b4ae..98f2090c6f446 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,18 +16,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::arg_enums::RpcMethods; -use crate::error::{Error, Result}; -use crate::params::ImportParams; -use crate::params::KeystoreParams; -use crate::params::NetworkParams; -use crate::params::OffchainWorkerParams; -use crate::params::SharedParams; -use crate::params::TransactionPoolParams; -use crate::CliConfiguration; +use crate::{ + arg_enums::RpcMethods, + error::{Error, Result}, + params::{ + ImportParams, KeystoreParams, NetworkParams, OffchainWorkerParams, SharedParams, + TransactionPoolParams, + }, + CliConfiguration, +}; use regex::Regex; use sc_service::{ - config::{BasePath, MultiaddrWithPeerId, PrometheusConfig, TransactionPoolOptions}, + config::{BasePath, PrometheusConfig, TransactionPoolOptions}, ChainSpec, Role, }; use sc_telemetry::TelemetryEndpoints; @@ -35,47 +35,30 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use structopt::StructOpt; /// The `run` command used to run a node. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct RunCmd { /// Enable validator mode. /// /// The node will be started with the authority role and actively /// participate in any consensus task that it can (e.g. depending on /// availability of local keys). - #[structopt( - long = "validator", - conflicts_with_all = &[ "sentry" ] - )] + #[structopt(long)] pub validator: bool, - /// Enable sentry mode. - /// - /// The node will be started with the authority role and participate in - /// consensus tasks as an "observer", it will never actively participate - /// regardless of whether it could (e.g. keys are available locally). This - /// mode is useful as a secure proxy for validators (which would run - /// detached from the network), since we want this node to participate in - /// the full consensus protocols in order to have all needed consensus data - /// available to relay to private nodes. - #[structopt( - long = "sentry", - conflicts_with_all = &[ "validator", "light" ], - parse(try_from_str) - )] - pub sentry: Vec, - - /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA observer. + /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA + /// observer. #[structopt(long)] pub no_grandpa: bool, /// Experimental: Run in light client mode. - #[structopt(long = "light", conflicts_with = "sentry")] + #[structopt(long = "light")] pub light: bool, /// Listen to all RPC interfaces. /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC + /// proxy server to filter out dangerous methods. More details: + /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[structopt(long = "rpc-external")] pub rpc_external: bool, @@ -90,8 +73,8 @@ pub struct RunCmd { /// /// - `Unsafe`: Exposes every RPC method. /// - `Safe`: Exposes only a safe subset of RPC methods, denying unsafe RPC methods. - /// - `Auto`: Acts as `Safe` if RPC is served externally, e.g. when `--{rpc,ws}-external` is passed, - /// otherwise acts as `Unsafe`. + /// - `Auto`: Acts as `Safe` if RPC is served externally, e.g. when `--{rpc,ws}-external` is + /// passed, otherwise acts as `Unsafe`. #[structopt( long, value_name = "METHOD SET", @@ -104,8 +87,9 @@ pub struct RunCmd { /// Listen to all Websocket interfaces. /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC + /// proxy server to filter out dangerous methods. More details: + /// . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[structopt(long = "ws-external")] pub ws_external: bool, @@ -116,7 +100,12 @@ pub struct RunCmd { #[structopt(long = "unsafe-ws-external")] pub unsafe_ws_external: bool, - /// Listen to all Prometheus data source interfaces. + /// Set the the maximum RPC payload size for both requests and responses (both http and ws), in + /// megabytes. Default is 15MiB. + #[structopt(long = "rpc-max-payload")] + pub rpc_max_payload: Option, + + /// Expose Prometheus exporter on all interfaces. /// /// Default is local. #[structopt(long = "prometheus-external")] @@ -142,16 +131,16 @@ pub struct RunCmd { /// /// A comma-separated list of origins (protocol://domain or special `null` /// value). Value of `all` will disable origin validation. Default is to - /// allow localhost and https://polkadot.js.org origins. When running in + /// allow localhost and origins. When running in /// --dev mode the default is to allow all origins. #[structopt(long = "rpc-cors", value_name = "ORIGINS", parse(try_from_str = parse_cors))] pub rpc_cors: Option, - /// Specify Prometheus data source server TCP Port. + /// Specify Prometheus exporter TCP Port. #[structopt(long = "prometheus-port", value_name = "PORT")] pub prometheus_port: Option, - /// Do not expose a Prometheus metric endpoint. + /// Do not expose a Prometheus exporter endpoint. /// /// Prometheus metric endpoint is enabled by default. #[structopt(long = "no-prometheus")] @@ -206,7 +195,8 @@ pub struct RunCmd { #[structopt(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub bob: bool, - /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to keystore. + /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to + /// keystore. #[structopt(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] pub charlie: bool, @@ -244,17 +234,6 @@ pub struct RunCmd { #[structopt(long)] pub max_runtime_instances: Option, - /// Specify a list of sentry node public addresses. - /// - /// Can't be used with --public-addr as the sentry node would take precedence over the public address - /// specified there. - #[structopt( - long = "sentry-nodes", - value_name = "ADDR", - conflicts_with_all = &[ "sentry", "public-addr" ] - )] - pub sentry_nodes: Vec, - /// Run a temporary node. /// /// A temporary directory will be created to store the configuration and will be deleted @@ -325,7 +304,7 @@ impl CliConfiguration for RunCmd { Error::Input(format!( "Invalid node name '{}'. Reason: {}. If unsure, use none.", name, msg - )) + )) })?; Ok(name) @@ -365,13 +344,7 @@ impl CliConfiguration for RunCmd { Ok(if is_light { sc_service::Role::Light } else if is_authority { - sc_service::Role::Authority { - sentry_nodes: self.sentry_nodes.clone(), - } - } else if !self.sentry.is_empty() { - sc_service::Role::Sentry { - validators: self.sentry.clone(), - } + sc_service::Role::Authority } else { sc_service::Role::Full }) @@ -386,18 +359,13 @@ impl CliConfiguration for RunCmd { Ok(if self.no_prometheus { None } else { - let interface = if self.prometheus_external { - Ipv4Addr::UNSPECIFIED - } else { - Ipv4Addr::LOCALHOST - }; - - Some(PrometheusConfig::new_with_default_registry( - SocketAddr::new( - interface.into(), - self.prometheus_port.unwrap_or(default_listen_port), - ) - )) + let interface = + if self.prometheus_external { Ipv4Addr::UNSPECIFIED } else { Ipv4Addr::LOCALHOST }; + + Some(PrometheusConfig::new_with_default_registry(SocketAddr::new( + interface.into(), + self.prometheus_port.unwrap_or(default_listen_port), + ))) }) } @@ -435,7 +403,7 @@ impl CliConfiguration for RunCmd { self.rpc_external, self.unsafe_rpc_external, self.rpc_methods, - self.validator + self.validator, )?; Ok(Some(SocketAddr::new(interface, self.rpc_port.unwrap_or(default_listen_port)))) @@ -460,6 +428,10 @@ impl CliConfiguration for RunCmd { Ok(self.rpc_methods.into()) } + fn rpc_max_payload(&self) -> Result> { + Ok(self.rpc_max_payload) + } + fn transaction_pool(&self) -> Result { Ok(self.pool_config.transaction_pool()) } @@ -481,19 +453,19 @@ impl CliConfiguration for RunCmd { pub fn is_node_name_valid(_name: &str) -> std::result::Result<(), &str> { let name = _name.to_string(); if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { - return Err("Node name too long"); + return Err("Node name too long") } let invalid_chars = r"[\\.@]"; let re = Regex::new(invalid_chars).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain invalid chars such as '.' and '@'"); + return Err("Node name should not contain invalid chars such as '.' and '@'") } let invalid_patterns = r"(https?:\\/+)?(www)+"; let re = Regex::new(invalid_patterns).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain urls"); + return Err("Node name should not contain urls") } Ok(()) @@ -507,19 +479,18 @@ fn rpc_interface( ) -> Result { if is_external && is_validator && rpc_methods != RpcMethods::Unsafe { return Err(Error::Input( - "--rpc-external and --ws-external options shouldn't be \ - used if the node is running as a validator. Use `--unsafe-rpc-external` \ - or `--rpc-methods=unsafe` if you understand the risks. See the options \ - description for more information." + "--rpc-external and --ws-external options shouldn't be used if the node is running as \ + a validator. Use `--unsafe-rpc-external` or `--rpc-methods=unsafe` if you understand \ + the risks. See the options description for more information." .to_owned(), - )); + )) } if is_external || is_unsafe_external { if rpc_methods == RpcMethods::Unsafe { log::warn!( "It isn't safe to expose RPC publicly without a proxy server that filters \ - available set of RPC methods." + available set of RPC methods." ); } @@ -552,11 +523,10 @@ fn parse_telemetry_endpoints(s: &str) -> std::result::Result<(String, u8), Telem None => Err(TelemetryParsingError::MissingVerbosity), Some(pos_) => { let url = s[..pos_].to_string(); - let verbosity = s[pos_ + 1..] - .parse() - .map_err(TelemetryParsingError::VerbosityParsingError)?; + let verbosity = + s[pos_ + 1..].parse().map_err(TelemetryParsingError::VerbosityParsingError)?; Ok((url, verbosity)) - } + }, } } @@ -589,17 +559,13 @@ fn parse_cors(s: &str) -> std::result::Result> match part { "all" | "*" => { is_all = true; - break; - } + break + }, other => origins.push(other.to_owned()), } } - Ok(if is_all { - Cors::All - } else { - Cors::List(origins) - }) + Ok(if is_all { Cors::All } else { Cors::List(origins) }) } #[cfg(test)] @@ -615,7 +581,8 @@ mod tests { fn tests_node_name_bad() { assert!(is_node_name_valid( "very very long names are really not very cool for the ui at all, really they're not" - ).is_err()); + ) + .is_err()); assert!(is_node_name_valid("Dots.not.Ok").is_err()); assert!(is_node_name_valid("http://visit.me").is_err()); assert!(is_node_name_valid("https://visit.me").is_err()); diff --git a/client/cli/src/commands/sign.rs b/client/cli/src/commands/sign.rs index 605fd5b12313f..20aacd9bf0020 100644 --- a/client/cli/src/commands/sign.rs +++ b/client/cli/src/commands/sign.rs @@ -1,11 +1,11 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or +// the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, @@ -18,15 +18,12 @@ //! Implementation of the `sign` subcommand use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag, KeystoreParams}; -use structopt::StructOpt; use sp_core::crypto::SecretString; +use structopt::StructOpt; /// The `sign` command -#[derive(Debug, StructOpt)] -#[structopt( - name = "sign", - about = "Sign a message, with a given (secret) key" -)] +#[derive(Debug, StructOpt, Clone)] +#[structopt(name = "sign", about = "Sign a message, with a given (secret) key")] pub struct SignCmd { /// The secret key URI. /// If the value is a file, the file content is used as URI. @@ -52,7 +49,6 @@ pub struct SignCmd { pub crypto_scheme: CryptoSchemeFlag, } - impl SignCmd { /// Run the command pub fn run(&self) -> error::Result<()> { @@ -60,17 +56,19 @@ impl SignCmd { let suri = utils::read_uri(self.suri.as_ref())?; let password = self.keystore_params.read_password()?; - let signature = with_crypto_scheme!( - self.crypto_scheme.scheme, - sign(&suri, password, message) - )?; + let signature = + with_crypto_scheme!(self.crypto_scheme.scheme, sign(&suri, password, message))?; println!("{}", signature); Ok(()) } } -fn sign(suri: &str, password: Option, message: Vec) -> error::Result { +fn sign( + suri: &str, + password: Option, + message: Vec, +) -> error::Result { let pair = utils::pair_from_suri::

(suri, password)?; Ok(format!("{}", hex::encode(pair.sign(&message)))) } @@ -91,7 +89,7 @@ mod test { "--message", &seed[2..], "--password", - "12345" + "12345", ]); assert!(sign.run().is_ok()); } diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 38263af50cfbb..864d7e920f81a 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -1,11 +1,11 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or +// the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, @@ -17,15 +17,18 @@ // along with this program. If not, see . //! subcommand utilities -use std::{io::Read, path::PathBuf, convert::TryFrom}; -use sp_core::{ - Pair, hexdisplay::HexDisplay, - crypto::{Ss58Codec, Ss58AddressFormat}, +use crate::{ + error::{self, Error}, + OutputType, }; -use sp_runtime::{MultiSigner, traits::IdentifyAccount}; -use crate::{OutputType, error::{self, Error}}; use serde_json::json; -use sp_core::crypto::{SecretString, Zeroize, ExposeSecret}; +use sp_core::{ + crypto::{ExposeSecret, SecretString, Ss58AddressFormat, Ss58Codec, Zeroize}, + hexdisplay::HexDisplay, + Pair, +}; +use sp_runtime::{traits::IdentifyAccount, MultiSigner}; +use std::{convert::TryFrom, io::Read, path::PathBuf}; /// Public key type for Runtime pub type PublicFor

=

::Public; @@ -37,9 +40,7 @@ pub fn read_uri(uri: Option<&String>) -> error::Result { let uri = if let Some(uri) = uri { let file = PathBuf::from(&uri); if file.is_file() { - std::fs::read_to_string(uri)? - .trim_end() - .to_owned() + std::fs::read_to_string(uri)?.trim_end().to_owned() } else { uri.into() } @@ -54,10 +55,11 @@ pub fn read_uri(uri: Option<&String>) -> error::Result { /// /// 1. Try to construct the `Pair` while using `uri` as input for [`sp_core::Pair::from_phrase`]. /// -/// 2. Try to construct the `Pair` while using `uri` as input for [`sp_core::Pair::from_string_with_seed`]. +/// 2. Try to construct the `Pair` while using `uri` as input for +/// [`sp_core::Pair::from_string_with_seed`]. /// /// 3. Try to construct the `Pair::Public` while using `uri` as input for -/// [`sp_core::Pair::Public::from_string_with_version`]. +/// [`sp_core::crypto::Ss58Codec::from_string_with_version`]. pub fn print_from_uri( uri: &str, password: Option, @@ -78,22 +80,28 @@ pub fn print_from_uri( "secretPhrase": uri, "secretSeed": format_seed::(seed), "publicKey": format_public_key::(public_key.clone()), + "ss58PublicKey": public_key.to_ss58check_with_version(network_override), "accountId": format_account_id::(public_key), "ss58Address": pair.public().into().into_account().to_ss58check_with_version(network_override), }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + println!( + "{}", + serde_json::to_string_pretty(&json).expect("Json pretty print failed") + ); }, OutputType::Text => { println!( - "Secret phrase `{}` is account:\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", + "Secret phrase: {}\n \ + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + Public key (SS58): {}\n \ + SS58 Address: {}", uri, format_seed::(seed), format_public_key::(public_key.clone()), - format_account_id::(public_key), + format_account_id::(public_key.clone()), + public_key.to_ss58check_with_version(network_override), pair.public().into().into_account().to_ss58check_with_version(network_override), ); }, @@ -108,22 +116,28 @@ pub fn print_from_uri( "secretKeyUri": uri, "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, "publicKey": format_public_key::(public_key.clone()), + "ss58PublicKey": public_key.to_ss58check_with_version(network_override), "accountId": format_account_id::(public_key), "ss58Address": pair.public().into().into_account().to_ss58check_with_version(network_override), }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + println!( + "{}", + serde_json::to_string_pretty(&json).expect("Json pretty print failed") + ); }, OutputType::Text => { println!( "Secret Key URI `{}` is account:\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + Public key (SS58): {}\n \ + SS58 Address: {}", uri, if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, format_public_key::(public_key.clone()), - format_account_id::(public_key), + format_account_id::(public_key.clone()), + public_key.to_ss58check_with_version(network_override), pair.public().into().into_account().to_ss58check_with_version(network_override), ); }, @@ -138,10 +152,14 @@ pub fn print_from_uri( "networkId": String::from(network_override), "publicKey": format_public_key::(public_key.clone()), "accountId": format_account_id::(public_key.clone()), + "ss58PublicKey": public_key.to_ss58check_with_version(network_override), "ss58Address": public_key.to_ss58check_with_version(network_override), }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + println!( + "{}", + serde_json::to_string_pretty(&json).expect("Json pretty print failed") + ); }, OutputType::Text => { println!( @@ -149,14 +167,16 @@ pub fn print_from_uri( Network ID/version: {}\n \ Public key (hex): {}\n \ Account ID: {}\n \ + Public key (SS58): {}\n \ SS58 Address: {}", uri, String::from(network_override), format_public_key::(public_key.clone()), format_account_id::(public_key.clone()), public_key.to_ss58check_with_version(network_override), + public_key.to_ss58check_with_version(network_override), ); - } + }, } } else { println!("Invalid phrase/URI given"); @@ -186,6 +206,7 @@ where "networkId": String::from(network_override), "publicKey": format_public_key::(public_key.clone()), "accountId": format_account_id::(public_key.clone()), + "ss58PublicKey": public_key.to_ss58check_with_version(network_override), "ss58Address": public_key.to_ss58check_with_version(network_override), }); @@ -196,13 +217,15 @@ where "Network ID/version: {}\n \ Public key (hex): {}\n \ Account ID: {}\n \ + Public key (SS58): {}\n \ SS58 Address: {}", String::from(network_override), format_public_key::(public_key.clone()), format_account_id::(public_key.clone()), public_key.to_ss58check_with_version(network_override), + public_key.to_ss58check_with_version(network_override), ); - } + }, } Ok(()) @@ -234,8 +257,8 @@ fn format_public_key(public_key: PublicFor

) -> String { /// formats public key as accountId as hex fn format_account_id(public_key: PublicFor

) -> String - where - PublicFor

: Into, +where + PublicFor

: Into, { format!("0x{}", HexDisplay::from(&public_key.into().into_account().as_ref())) } @@ -246,8 +269,7 @@ pub fn decode_hex>(message: T) -> Result, Error> { if message[..2] == [b'0', b'x'] { message = &message[2..] } - hex::decode(message) - .map_err(|e| Error::Other(format!("Invalid hex ({})", e))) + Ok(hex::decode(message)?) } /// checks if message is Some, otherwise reads message from stdin and optionally decodes hex @@ -262,12 +284,11 @@ pub fn read_message(msg: Option<&String>, should_decode: bool) -> Result if should_decode { message = decode_hex(&message)?; } - } + }, } Ok(message) } - /// Allows for calling $method with appropriate crypto impl. #[macro_export] macro_rules! with_crypto_scheme { diff --git a/client/cli/src/commands/vanity.rs b/client/cli/src/commands/vanity.rs index 33b9025c13fbc..daeb81e86a1a1 100644 --- a/client/cli/src/commands/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -1,11 +1,11 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or +// the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, @@ -19,21 +19,17 @@ //! implementation of the `vanity` subcommand use crate::{ - error, utils, with_crypto_scheme, - CryptoSchemeFlag, NetworkSchemeFlag, OutputTypeFlag, + error, utils, with_crypto_scheme, CryptoSchemeFlag, NetworkSchemeFlag, OutputTypeFlag, }; -use sp_core::crypto::{Ss58Codec, Ss58AddressFormat}; -use structopt::StructOpt; use rand::{rngs::OsRng, RngCore}; +use sp_core::crypto::{Ss58AddressFormat, Ss58Codec}; use sp_runtime::traits::IdentifyAccount; +use structopt::StructOpt; use utils::print_from_uri; /// The `vanity` command -#[derive(Debug, StructOpt)] -#[structopt( - name = "vanity", - about = "Generate a seed that provides a vanity address" -)] +#[derive(Debug, StructOpt, Clone)] +#[structopt(name = "vanity", about = "Generate a seed that provides a vanity address")] pub struct VanityCmd { /// Desired pattern #[structopt(long, parse(try_from_str = assert_non_empty_string))] @@ -78,10 +74,10 @@ fn generate_key( desired: &str, network_override: Ss58AddressFormat, ) -> Result - where - Pair: sp_core::Pair, - Pair::Public: IdentifyAccount, - ::AccountId: Ss58Codec, +where + Pair: sp_core::Pair, + Pair::Public: IdentifyAccount, + ::AccountId: Ss58Codec, { println!("Generating key containing pattern '{}'", desired); @@ -104,7 +100,7 @@ fn generate_key( best = score; if best >= top { println!("best: {} == top: {}", best, top); - return Ok(utils::format_seed::(seed.clone())); + return Ok(utils::format_seed::(seed.clone())) } } done += 1; @@ -129,11 +125,11 @@ fn next_seed(seed: &mut [u8]) { match seed[i] { 255 => { seed[i] = 0; - } + }, _ => { seed[i] += 1; - break; - } + break + }, } } } @@ -145,7 +141,7 @@ fn calculate_score(_desired: &str, key: &str) -> usize { let snip_size = _desired.len() - truncate; let truncated = &_desired[0..snip_size]; if let Some(pos) = key.find(truncated) { - return (47 - pos) + (snip_size * 48); + return (47 - pos) + (snip_size * 48) } } 0 @@ -160,15 +156,13 @@ fn assert_non_empty_string(pattern: &str) -> Result { } } - #[cfg(test)] mod tests { use super::*; - use sp_core::{crypto::Ss58Codec, Pair}; - use sp_core::sr25519; + use sp_core::{crypto::Ss58Codec, sr25519, Pair}; + use structopt::StructOpt; #[cfg(feature = "bench")] use test::Bencher; - use structopt::StructOpt; #[test] fn vanity() { @@ -179,25 +173,21 @@ mod tests { #[test] fn test_generation_with_single_char() { let seed = generate_key::("ab", Default::default()).unwrap(); - assert!( - sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) - .unwrap() - .public() - .to_ss58check() - .contains("ab") - ); + assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) + .unwrap() + .public() + .to_ss58check() + .contains("ab")); } #[test] fn generate_key_respects_network_override() { let seed = generate_key::("ab", Ss58AddressFormat::PolkadotAccount).unwrap(); - assert!( - sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) - .unwrap() - .public() - .to_ss58check_with_version(Ss58AddressFormat::PolkadotAccount) - .contains("ab") - ); + assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) + .unwrap() + .public() + .to_ss58check_with_version(Ss58AddressFormat::PolkadotAccount) + .contains("ab")); } #[test] @@ -208,10 +198,7 @@ mod tests { #[test] fn test_score_100() { - let score = calculate_score( - "Polkadot", - "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim", - ); + let score = calculate_score("Polkadot", "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"); assert_eq!(score, 430); } @@ -219,10 +206,7 @@ mod tests { fn test_score_50_2() { // 50% for the position + 50% for the size assert_eq!( - calculate_score( - "Polkadot", - "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim" - ), + calculate_score("Polkadot", "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"), 238 ); } @@ -230,10 +214,7 @@ mod tests { #[test] fn test_score_0() { assert_eq!( - calculate_score( - "Polkadot", - "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK" - ), + calculate_score("Polkadot", "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK"), 0 ); } diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs index ad16c11d5e441..760793374242e 100644 --- a/client/cli/src/commands/verify.rs +++ b/client/cli/src/commands/verify.rs @@ -1,11 +1,11 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or +// the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, @@ -19,11 +19,11 @@ //! implementation of the `verify` subcommand use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag}; -use sp_core::{Public, crypto::Ss58Codec}; +use sp_core::{crypto::Ss58Codec, Public}; use structopt::StructOpt; /// The `verify` command -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] #[structopt( name = "verify", about = "Verify a signature for a message, provided on STDIN, with a given (public or secret) key" @@ -57,47 +57,36 @@ impl VerifyCmd { let message = utils::read_message(self.message.as_ref(), self.hex)?; let sig_data = utils::decode_hex(&self.sig)?; let uri = utils::read_uri(self.uri.as_ref())?; - let uri = if uri.starts_with("0x") { - &uri[2..] - } else { - &uri - }; - - with_crypto_scheme!( - self.crypto_scheme.scheme, - verify(sig_data, message, uri) - ) + let uri = if uri.starts_with("0x") { &uri[2..] } else { &uri }; + + with_crypto_scheme!(self.crypto_scheme.scheme, verify(sig_data, message, uri)) } } fn verify(sig_data: Vec, message: Vec, uri: &str) -> error::Result<()> - where - Pair: sp_core::Pair, - Pair::Signature: Default + AsMut<[u8]>, +where + Pair: sp_core::Pair, + Pair::Signature: Default + AsMut<[u8]>, { let mut signature = Pair::Signature::default(); if sig_data.len() != signature.as_ref().len() { - return Err(error::Error::Other(format!( - "signature has an invalid length. read {} bytes, expected {} bytes", - sig_data.len(), - signature.as_ref().len(), - ))); + return Err(error::Error::SignatureInvalidLength { + read: sig_data.len(), + expected: signature.as_ref().len(), + }) } signature.as_mut().copy_from_slice(&sig_data); let pubkey = if let Ok(pubkey_vec) = hex::decode(uri) { Pair::Public::from_slice(pubkey_vec.as_slice()) } else { - Pair::Public::from_string(uri) - .map_err(|_| { - error::Error::Other(format!("Invalid URI; expecting either a secret URI or a public URI.")) - })? + Pair::Public::from_string(uri)? }; if Pair::verify(&signature, &message, &pubkey) { println!("Signature verifies correctly."); } else { - return Err(error::Error::Other("Signature invalid.".into())) + return Err(error::Error::SignatureInvalid) } Ok(()) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 43b755100244f..59fc6bd438a1c 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,23 +18,23 @@ //! Configuration trait for a CLI based on substrate -use crate::arg_enums::Database; -use crate::error::Result; use crate::{ - init_logger, DatabaseParams, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, - OffchainWorkerParams, PruningParams, SharedParams, SubstrateCli, + arg_enums::Database, error::Result, DatabaseParams, ImportParams, KeystoreParams, + NetworkParams, NodeKeyParams, OffchainWorkerParams, PruningParams, SharedParams, SubstrateCli, }; use log::warn; use names::{Generator, Name}; use sc_client_api::execution_extensions::ExecutionStrategies; -use sc_service::config::{ - BasePath, Configuration, DatabaseConfig, ExtTransport, KeystoreConfig, NetworkConfiguration, - NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, - TaskExecutor, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, +use sc_service::{ + config::{ + BasePath, Configuration, DatabaseSource, KeystoreConfig, NetworkConfiguration, + NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, + TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, + }, + ChainSpec, KeepBlocks, TracingReceiver, TransactionStorageMode, }; -use sc_service::{ChainSpec, TracingReceiver}; -use std::net::SocketAddr; -use std::path::PathBuf; +use sc_tracing::logging::LoggerBuilder; +use std::{net::SocketAddr, path::PathBuf}; /// The maximum number of characters for a node name. pub(crate) const NODE_NAME_MAX_LENGTH: usize = 64; @@ -47,7 +47,7 @@ const RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT: u64 = 10_000; /// Default configuration values used by Substrate /// -/// These values will be used by [`CliConfiguritation`] to set +/// These values will be used by [`CliConfiguration`] to set /// default values for e.g. the listen port or the RPC port. pub trait DefaultConfigurationValues { /// The port Substrate should listen on for p2p connections. @@ -158,6 +158,7 @@ pub trait CliConfiguration: Sized { &self, chain_spec: &Box, is_dev: bool, + is_validator: bool, net_config_dir: PathBuf, client_id: &str, node_name: &str, @@ -168,6 +169,7 @@ pub trait CliConfiguration: Sized { network_params.network_config( chain_spec, is_dev, + is_validator, Some(net_config_dir), client_id, node_name, @@ -175,32 +177,33 @@ pub trait CliConfiguration: Sized { default_listen_port, ) } else { - NetworkConfiguration::new( - node_name, - client_id, - node_key, - Some(net_config_dir), - ) + NetworkConfiguration::new(node_name, client_id, node_key, Some(net_config_dir)) }) } /// Get the keystore configuration. /// - /// Bu default this is retrieved from `KeystoreParams` if it is available. Otherwise it uses + /// By default this is retrieved from `KeystoreParams` if it is available. Otherwise it uses /// `KeystoreConfig::InMemory`. - fn keystore_config(&self, base_path: &PathBuf) -> Result { + fn keystore_config(&self, config_dir: &PathBuf) -> Result<(Option, KeystoreConfig)> { self.keystore_params() - .map(|x| x.keystore_config(base_path)) - .unwrap_or(Ok(KeystoreConfig::InMemory)) + .map(|x| x.keystore_config(config_dir)) + .unwrap_or_else(|| Ok((None, KeystoreConfig::InMemory))) } /// Get the database cache size. /// /// By default this is retrieved from `DatabaseParams` if it is available. Otherwise its `None`. fn database_cache_size(&self) -> Result> { - Ok(self.database_params() - .map(|x| x.database_cache_size()) - .unwrap_or_default()) + Ok(self.database_params().map(|x| x.database_cache_size()).unwrap_or_default()) + } + + /// Get the database transaction storage scheme. + fn database_transaction_storage(&self) -> Result { + Ok(self + .database_params() + .map(|x| x.transaction_storage()) + .unwrap_or(TransactionStorageMode::BlockBody)) } /// Get the database backend variant. @@ -216,15 +219,18 @@ pub trait CliConfiguration: Sized { base_path: &PathBuf, cache_size: usize, database: Database, - ) -> Result { + role: &Role, + ) -> Result { + let role_dir = match role { + Role::Light => "light", + Role::Full | Role::Authority => "full", + }; + let rocksdb_path = base_path.join("db").join(role_dir); + let paritydb_path = base_path.join("paritydb").join(role_dir); Ok(match database { - Database::RocksDb => DatabaseConfig::RocksDb { - path: base_path.join("db"), - cache_size, - }, - Database::ParityDb => DatabaseConfig::ParityDb { - path: base_path.join("paritydb"), - }, + Database::RocksDb => DatabaseSource::RocksDb { path: rocksdb_path, cache_size }, + Database::ParityDb => DatabaseSource::ParityDb { path: rocksdb_path }, + Database::Auto => DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size }, }) } @@ -232,9 +238,7 @@ pub trait CliConfiguration: Sized { /// /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `0`. fn state_cache_size(&self) -> Result { - Ok(self.import_params() - .map(|x| x.state_cache_size()) - .unwrap_or_default()) + Ok(self.import_params().map(|x| x.state_cache_size()).unwrap_or_default()) } /// Get the state cache child ratio (if any). @@ -244,16 +248,26 @@ pub trait CliConfiguration: Sized { Ok(Default::default()) } - /// Get the pruning mode. + /// Get the state pruning mode. /// /// By default this is retrieved from `PruningMode` if it is available. Otherwise its /// `PruningMode::default()`. - fn pruning(&self, unsafe_pruning: bool, role: &Role) -> Result { + fn state_pruning(&self, unsafe_pruning: bool, role: &Role) -> Result { self.pruning_params() - .map(|x| x.pruning(unsafe_pruning, role)) + .map(|x| x.state_pruning(unsafe_pruning, role)) .unwrap_or_else(|| Ok(Default::default())) } + /// Get the block pruning mode. + /// + /// By default this is retrieved from `block_pruning` if it is available. Otherwise its + /// `KeepBlocks::All`. + fn keep_blocks(&self) -> Result { + self.pruning_params() + .map(|x| x.keep_blocks()) + .unwrap_or_else(|| Ok(KeepBlocks::All)) + } + /// Get the chain ID (string). /// /// By default this is retrieved from `SharedParams`. @@ -273,9 +287,14 @@ pub trait CliConfiguration: Sized { /// By default this is retrieved from `ImportParams` if it is available. Otherwise its /// `WasmExecutionMethod::default()`. fn wasm_method(&self) -> Result { - Ok(self.import_params() - .map(|x| x.wasm_method()) - .unwrap_or_default()) + Ok(self.import_params().map(|x| x.wasm_method()).unwrap_or_default()) + } + + /// Get the path where WASM overrides live. + /// + /// By default this is `None`. + fn wasm_runtime_overrides(&self) -> Option { + self.import_params().map(|x| x.wasm_runtime_overrides()).unwrap_or_default() } /// Get the execution strategies. @@ -336,6 +355,11 @@ pub trait CliConfiguration: Sized { Ok(Some(Vec::new())) } + /// Get maximum RPC payload. + fn rpc_max_payload(&self) -> Result> { + Ok(None) + } + /// Get the prometheus configuration (`None` if disabled) /// /// By default this is `None`. @@ -353,13 +377,6 @@ pub trait CliConfiguration: Sized { Ok(chain_spec.telemetry_endpoints().clone()) } - /// Get the telemetry external transport - /// - /// By default this is `None`. - fn telemetry_external_transport(&self) -> Result> { - Ok(None) - } - /// Get the default value for heap pages /// /// By default this is `None`. @@ -399,22 +416,18 @@ pub trait CliConfiguration: Sized { /// Get the tracing targets from the current object (if any) /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// By default this is retrieved from [`SharedParams`] if it is available. Otherwise its /// `None`. fn tracing_targets(&self) -> Result> { - Ok(self.import_params() - .map(|x| x.tracing_targets()) - .unwrap_or_else(|| Default::default())) + Ok(self.shared_params().tracing_targets()) } /// Get the TracingReceiver value from the current object /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// By default this is retrieved from [`SharedParams`] if it is available. Otherwise its /// `TracingReceiver::default()`. fn tracing_receiver(&self) -> Result { - Ok(self.import_params() - .map(|x| x.tracing_receiver()) - .unwrap_or_default()) + Ok(self.shared_params().tracing_receiver()) } /// Get the node key from the current object @@ -445,19 +458,15 @@ pub trait CliConfiguration: Sized { fn create_configuration( &self, cli: &C, - task_executor: TaskExecutor, + tokio_handle: tokio::runtime::Handle, ) -> Result { let is_dev = self.is_dev()?; let chain_id = self.chain_id(is_dev)?; - let chain_spec = cli.load_spec(chain_id.as_str())?; + let chain_spec = cli.load_spec(&chain_id)?; let base_path = self .base_path()? .unwrap_or_else(|| BasePath::from_project("", "", &C::executable_name())); - let config_dir = base_path - .path() - .to_path_buf() - .join("chains") - .join(chain_spec.id()); + let config_dir = base_path.config_dir(chain_spec.id()); let net_config_dir = config_dir.join(DEFAULT_NETWORK_CONFIG_PATH); let client_id = C::client_id(); let database_cache_size = self.database_cache_size()?.unwrap_or(128); @@ -465,33 +474,37 @@ pub trait CliConfiguration: Sized { let node_key = self.node_key(&net_config_dir)?; let role = self.role(is_dev)?; let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8); - let is_validator = role.is_network_authority(); + let is_validator = role.is_authority(); + let (keystore_remote, keystore) = self.keystore_config(&config_dir)?; + let telemetry_endpoints = self.telemetry_endpoints(&chain_spec)?; - let unsafe_pruning = self - .import_params() - .map(|p| p.unsafe_pruning) - .unwrap_or(false); + let unsafe_pruning = self.import_params().map(|p| p.unsafe_pruning).unwrap_or(false); Ok(Configuration { impl_name: C::impl_name(), impl_version: C::impl_version(), - task_executor, + tokio_handle, transaction_pool: self.transaction_pool()?, network: self.network_config( &chain_spec, is_dev, + is_validator, net_config_dir, client_id.as_str(), self.node_name()?.as_str(), node_key, DCV::p2p_listen_port(), )?, - keystore: self.keystore_config(&config_dir)?, - database: self.database_config(&config_dir, database_cache_size, database)?, + keystore_remote, + keystore, + database: self.database_config(&config_dir, database_cache_size, database, &role)?, state_cache_size: self.state_cache_size()?, state_cache_child_ratio: self.state_cache_child_ratio()?, - pruning: self.pruning(unsafe_pruning, &role)?, + state_pruning: self.state_pruning(unsafe_pruning, &role)?, + keep_blocks: self.keep_blocks()?, + transaction_storage: self.database_transaction_storage()?, wasm_method: self.wasm_method()?, + wasm_runtime_overrides: self.wasm_runtime_overrides(), execution_strategies: self.execution_strategies(is_dev, is_validator)?, rpc_http: self.rpc_http(DCV::rpc_http_listen_port())?, rpc_ws: self.rpc_ws(DCV::rpc_ws_listen_port())?, @@ -499,9 +512,9 @@ pub trait CliConfiguration: Sized { rpc_methods: self.rpc_methods()?, rpc_ws_max_connections: self.rpc_ws_max_connections()?, rpc_cors: self.rpc_cors(is_dev)?, + rpc_max_payload: self.rpc_max_payload()?, prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?, - telemetry_endpoints: self.telemetry_endpoints(&chain_spec)?, - telemetry_external_transport: self.telemetry_external_transport()?, + telemetry_endpoints, default_heap_pages: self.default_heap_pages()?, offchain_worker: self.offchain_worker(&role)?, force_authoring: self.force_authoring()?, @@ -509,6 +522,7 @@ pub trait CliConfiguration: Sized { dev_key_seed: self.dev_key_seed(is_dev)?, tracing_targets: self.tracing_targets()?, tracing_receiver: self.tracing_receiver()?, + disable_log_reloading: self.is_log_filter_reloading_disabled()?, chain_spec, max_runtime_instances, announce_block: self.announce_block()?, @@ -528,6 +542,16 @@ pub trait CliConfiguration: Sized { Ok(self.shared_params().log_filters().join(",")) } + /// Is log reloading disabled (enabled by default) + fn is_log_filter_reloading_disabled(&self) -> Result { + Ok(self.shared_params().is_log_filter_reloading_disabled()) + } + + /// Should the log color output be disabled? + fn disable_log_color(&self) -> Result { + Ok(self.shared_params().disable_log_color()) + } + /// Initialize substrate. This must be done only once per process. /// /// This method: @@ -536,21 +560,27 @@ pub trait CliConfiguration: Sized { /// 2. Initializes the logger /// 3. Raises the FD limit fn init(&self) -> Result<()> { - let logger_pattern = self.log_filters()?; - let tracing_receiver = self.tracing_receiver()?; - let tracing_targets = self.tracing_targets()?; - sp_panic_handler::set(&C::support_url(), &C::impl_version()); - if let Err(e) = init_logger(&logger_pattern, tracing_receiver, tracing_targets) { - log::warn!("💬 Problem initializing global logging framework: {:}", e) + let mut logger = LoggerBuilder::new(self.log_filters()?); + logger.with_log_reloading(!self.is_log_filter_reloading_disabled()?); + + if let Some(tracing_targets) = self.tracing_targets()? { + let tracing_receiver = self.tracing_receiver()?; + logger.with_profiling(tracing_receiver, tracing_targets); } + if self.disable_log_color()? { + logger.with_colors(false); + } + + logger.init()?; + if let Some(new_limit) = fdlimit::raise_fd_limit() { if new_limit < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { warn!( "Low open file descriptor limit configured for the process. \ - Current value: {:?}, recommended value: {:?}.", + Current value: {:?}, recommended value: {:?}.", new_limit, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, ); } @@ -569,7 +599,7 @@ pub fn generate_node_name() -> String { let count = node_name.chars().count(); if count < NODE_NAME_MAX_LENGTH { - return node_name; + return node_name } } } diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index 7404d31fcf7bb..c5784b2018172 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,42 +18,68 @@ //! Initialization errors. - +use sp_core::crypto; /// Result type alias for the CLI. pub type Result = std::result::Result; /// Error type for the CLI. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { - /// Io error - Io(std::io::Error), - /// Cli error - Cli(structopt::clap::Error), - /// Service error - Service(sc_service::Error), - /// Client error - Client(sp_blockchain::Error), - /// scale codec error - Codec(parity_scale_codec::Error), - /// Input error - #[from(ignore)] + #[error(transparent)] + Io(#[from] std::io::Error), + + #[error(transparent)] + Cli(#[from] structopt::clap::Error), + + #[error(transparent)] + Service(#[from] sc_service::Error), + + #[error(transparent)] + Client(#[from] sp_blockchain::Error), + + #[error(transparent)] + Codec(#[from] parity_scale_codec::Error), + + #[error("Invalid input: {0}")] Input(String), - /// Invalid listen multiaddress - #[display(fmt="Invalid listen multiaddress")] - #[from(ignore)] + + #[error("Invalid listen multiaddress")] InvalidListenMultiaddress, - /// Other uncategorized error. - #[from(ignore)] - Other(String), -} -/// Must be implemented explicitly because `derive_more` won't generate this -/// case due to conflicting derive for `Other(String)`. -impl std::convert::From for Error { - fn from(s: String) -> Error { - Error::Input(s) - } + #[error("Invalid URI; expecting either a secret URI or a public URI.")] + InvalidUri(crypto::PublicError), + + #[error("Signature has an invalid length. Read {read} bytes, expected {expected} bytes")] + SignatureInvalidLength { + /// Amount of signature bytes read. + read: usize, + /// Expected number of signature bytes. + expected: usize, + }, + + #[error("Unknown key type, must be a known 4-character sequence")] + KeyTypeInvalid, + + #[error("Signature verification failed")] + SignatureInvalid, + + #[error("Key store operation failed")] + KeyStoreOperation, + + #[error("Key storage issue encountered")] + KeyStorage(#[from] sc_keystore::Error), + + #[error("Invalid hexadecimal string data")] + HexDataConversion(#[from] hex::FromHexError), + + /// Application specific error chain sequence forwarder. + #[error(transparent)] + Application(#[from] Box), + + #[error(transparent)] + GlobalLoggerError(#[from] sc_tracing::logging::Error), } impl std::convert::From<&str> for Error { @@ -62,17 +88,14 @@ impl std::convert::From<&str> for Error { } } -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Io(ref err) => Some(err), - Error::Cli(ref err) => Some(err), - Error::Service(ref err) => Some(err), - Error::Client(ref err) => Some(err), - Error::Codec(ref err) => Some(err), - Error::Input(_) => None, - Error::InvalidListenMultiaddress => None, - Error::Other(_) => None, - } +impl std::convert::From for Error { + fn from(s: String) -> Error { + Error::Input(s) + } +} + +impl std::convert::From for Error { + fn from(e: crypto::PublicError) -> Error { + Error::InvalidUri(e) } } diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index e63e379533a62..bb1bff94145f7 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,12 +20,12 @@ #![warn(missing_docs)] #![warn(unused_extern_crates)] +#![warn(unused_imports)] pub mod arg_enums; mod commands; mod config; mod error; -mod logging; mod params; mod runner; @@ -35,9 +35,9 @@ pub use config::*; pub use error::*; pub use params::*; pub use runner::*; -pub use sc_cli_proc_macro::*; +use sc_service::Configuration; pub use sc_service::{ChainSpec, Role}; -use sc_service::{Configuration, TaskExecutor}; +pub use sc_tracing::logging::LoggerBuilder; pub use sp_version::RuntimeVersion; use std::io::Write; pub use structopt; @@ -45,13 +45,6 @@ use structopt::{ clap::{self, AppSettings}, StructOpt, }; -#[doc(hidden)] -pub use tracing; -use tracing_subscriber::{ - filter::Directive, fmt::time::ChronoLocal, layer::SubscriberExt, FmtSubscriber, Layer, -}; - -pub use logging::PREFIX_LOG_SPAN; /// Substrate client CLI /// @@ -76,7 +69,8 @@ pub trait SubstrateCli: Sized { /// Extracts the file name from `std::env::current_exe()`. /// Resorts to the env var `CARGO_PKG_NAME` in case of Error. fn executable_name() -> String { - std::env::current_exe().ok() + std::env::current_exe() + .ok() .and_then(|e| e.file_name().map(|s| s.to_os_string())) .and_then(|w| w.into_string().ok()) .unwrap_or_else(|| env!("CARGO_PKG_NAME").into()) @@ -98,8 +92,9 @@ pub trait SubstrateCli: Sized { fn load_spec(&self, id: &str) -> std::result::Result, String>; /// Helper function used to parse the command line arguments. This is the equivalent of - /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of - /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. + /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the + /// name of the application, author, "about" and version. It will also set + /// `AppSettings::GlobalVersion`. /// /// To allow running the node without subcommand, tt also sets a few more settings: /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. @@ -114,8 +109,9 @@ pub trait SubstrateCli: Sized { } /// Helper function used to parse the command line arguments. This is the equivalent of - /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of - /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. + /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the + /// name of the application, author, "about" and version. It will also set + /// `AppSettings::GlobalVersion`. /// /// To allow running the node without subcommand, it also sets a few more settings: /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. @@ -145,6 +141,7 @@ pub trait SubstrateCli: Sized { AppSettings::GlobalVersion, AppSettings::ArgsNegateSubcommands, AppSettings::SubcommandsNegateReqs, + AppSettings::ColoredHelp, ]); let matches = match app.get_matches_from_safe(iter) { @@ -171,8 +168,9 @@ pub trait SubstrateCli: Sized { } /// Helper function used to parse the command line arguments. This is the equivalent of - /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of - /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. + /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the + /// name of the application, author, "about" and version. It will also set + /// `AppSettings::GlobalVersion`. /// /// To allow running the node without subcommand, it also sets a few more settings: /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. @@ -218,9 +216,9 @@ pub trait SubstrateCli: Sized { fn create_configuration, DVC: DefaultConfigurationValues>( &self, command: &T, - task_executor: TaskExecutor, + tokio_handle: tokio::runtime::Handle, ) -> error::Result { - command.create_configuration(self, task_executor) + command.create_configuration(self, tokio_handle) } /// Create a runner for the command provided in argument. This will create a Configuration and @@ -233,221 +231,3 @@ pub trait SubstrateCli: Sized { /// Native runtime version. fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion; } - -/// Initialize the global logger -/// -/// This sets various global logging and tracing instances and thus may only be called once. -pub fn init_logger( - pattern: &str, - tracing_receiver: sc_tracing::TracingReceiver, - profiling_targets: Option, -) -> std::result::Result<(), String> { - fn parse_directives(dirs: impl AsRef) -> Vec { - dirs.as_ref() - .split(',') - .filter_map(|s| s.parse().ok()) - .collect() - } - - if let Err(e) = tracing_log::LogTracer::init() { - return Err(format!( - "Registering Substrate logger failed: {:}!", e - )) - } - - let mut env_filter = tracing_subscriber::EnvFilter::default() - // Disable info logging by default for some modules. - .add_directive("ws=off".parse().expect("provided directive is valid")) - .add_directive("yamux=off".parse().expect("provided directive is valid")) - .add_directive("cranelift_codegen=off".parse().expect("provided directive is valid")) - // Set warn logging by default for some modules. - .add_directive("cranelift_wasm=warn".parse().expect("provided directive is valid")) - .add_directive("hyper=warn".parse().expect("provided directive is valid")) - // Enable info for others. - .add_directive(tracing_subscriber::filter::LevelFilter::INFO.into()); - - if let Ok(lvl) = std::env::var("RUST_LOG") { - if lvl != "" { - // We're not sure if log or tracing is available at this moment, so silently ignore the - // parse error. - for directive in parse_directives(lvl) { - env_filter = env_filter.add_directive(directive); - } - } - } - - if pattern != "" { - // We're not sure if log or tracing is available at this moment, so silently ignore the - // parse error. - for directive in parse_directives(pattern) { - env_filter = env_filter.add_directive(directive); - } - } - - // If we're only logging `INFO` entries then we'll use a simplified logging format. - let simple = match Layer::::max_level_hint(&env_filter) { - Some(level) if level <= tracing_subscriber::filter::LevelFilter::INFO => true, - _ => false, - }; - - // Always log the special target `sc_tracing`, overrides global level. - // NOTE: this must be done after we check the `max_level_hint` otherwise - // it is always raised to `TRACE`. - env_filter = env_filter.add_directive( - "sc_tracing=trace" - .parse() - .expect("provided directive is valid"), - ); - - // Make sure to include profiling targets in the filter - if let Some(profiling_targets) = profiling_targets.clone() { - for directive in parse_directives(profiling_targets) { - env_filter = env_filter.add_directive(directive); - } - } - - let isatty = atty::is(atty::Stream::Stderr); - let enable_color = isatty; - let timer = ChronoLocal::with_format(if simple { - "%Y-%m-%d %H:%M:%S".to_string() - } else { - "%Y-%m-%d %H:%M:%S%.3f".to_string() - }); - - let subscriber = FmtSubscriber::builder() - .with_env_filter(env_filter) - .with_writer(std::io::stderr) - .event_format(logging::EventFormat { - timer, - ansi: enable_color, - display_target: !simple, - display_level: !simple, - display_thread_name: !simple, - }) - .finish().with(logging::NodeNameLayer); - - if let Some(profiling_targets) = profiling_targets { - let profiling = sc_tracing::ProfilingLayer::new(tracing_receiver, &profiling_targets); - - if let Err(e) = tracing::subscriber::set_global_default(subscriber.with(profiling)) { - return Err(format!( - "Registering Substrate tracing subscriber failed: {:}!", e - )) - } - } else { - if let Err(e) = tracing::subscriber::set_global_default(subscriber) { - return Err(format!( - "Registering Substrate tracing subscriber failed: {:}!", e - )) - } - } - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate as sc_cli; - use std::{env, process::Command}; - use tracing::{metadata::Kind, subscriber::Interest, Callsite, Level, Metadata}; - - #[test] - fn test_logger_filters() { - let test_pattern = "afg=debug,sync=trace,client=warn,telemetry,something-with-dash=error"; - init_logger(&test_pattern, Default::default(), Default::default()).unwrap(); - - tracing::dispatcher::get_default(|dispatcher| { - let test_filter = |target, level| { - struct DummyCallSite; - impl Callsite for DummyCallSite { - fn set_interest(&self, _: Interest) {} - fn metadata(&self) -> &Metadata<'_> { - unreachable!(); - } - } - - let metadata = tracing::metadata!( - name: "", - target: target, - level: level, - fields: &[], - callsite: &DummyCallSite, - kind: Kind::SPAN, - ); - - dispatcher.enabled(&metadata) - }; - - assert!(test_filter("afg", Level::INFO)); - assert!(test_filter("afg", Level::DEBUG)); - assert!(!test_filter("afg", Level::TRACE)); - - assert!(test_filter("sync", Level::TRACE)); - assert!(test_filter("client", Level::WARN)); - - assert!(test_filter("telemetry", Level::TRACE)); - assert!(test_filter("something-with-dash", Level::ERROR)); - }); - } - - const EXPECTED_LOG_MESSAGE: &'static str = "yeah logging works as expected"; - - #[test] - fn dash_in_target_name_works() { - let executable = env::current_exe().unwrap(); - let output = Command::new(executable) - .env("ENABLE_LOGGING", "1") - .args(&["--nocapture", "log_something_with_dash_target_name"]) - .output() - .unwrap(); - - let output = String::from_utf8(output.stderr).unwrap(); - assert!(output.contains(EXPECTED_LOG_MESSAGE)); - } - - /// This is no actual test, it will be used by the `dash_in_target_name_works` test. - /// The given test will call the test executable to only execute this test that - /// will only print `EXPECTED_LOG_MESSAGE` through logging while using a target - /// name that contains a dash. This ensures that targets names with dashes work. - #[test] - fn log_something_with_dash_target_name() { - if env::var("ENABLE_LOGGING").is_ok() { - let test_pattern = "test-target=info"; - init_logger(&test_pattern, Default::default(), Default::default()).unwrap(); - - log::info!(target: "test-target", "{}", EXPECTED_LOG_MESSAGE); - } - } - - const EXPECTED_NODE_NAME: &'static str = "THE_NODE"; - - #[test] - fn prefix_in_log_lines() { - let executable = env::current_exe().unwrap(); - let output = Command::new(executable) - .env("ENABLE_LOGGING", "1") - .args(&["--nocapture", "prefix_in_log_lines_entrypoint"]) - .output() - .unwrap(); - - let output = String::from_utf8(output.stderr).unwrap(); - assert!(output.contains(&format!(" [{}] ", EXPECTED_NODE_NAME))); - } - - /// This is no actual test, it will be used by the `prefix_in_log_lines` test. - /// The given test will call the test executable to only execute this test that - /// will only print a log line prefixed by the node name `EXPECTED_NODE_NAME`. - #[test] - fn prefix_in_log_lines_entrypoint() { - if env::var("ENABLE_LOGGING").is_ok() { - let test_pattern = "test-target=info"; - init_logger(&test_pattern, Default::default(), Default::default()).unwrap(); - prefix_in_log_lines_process(); - } - } - - #[crate::prefix_logs_with(EXPECTED_NODE_NAME)] - fn prefix_in_log_lines_process() { - log::info!("Hello World!"); - } -} diff --git a/client/cli/src/params/database_params.rs b/client/cli/src/params/database_params.rs index 24b23f6076a02..4d6cf5f1d3674 100644 --- a/client/cli/src/params/database_params.rs +++ b/client/cli/src/params/database_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,10 +17,11 @@ // along with this program. If not, see . use crate::arg_enums::Database; +use sc_service::TransactionStorageMode; use structopt::StructOpt; /// Parameters for block import. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct DatabaseParams { /// Select database backend to use. #[structopt( @@ -28,12 +29,22 @@ pub struct DatabaseParams { alias = "db", value_name = "DB", case_insensitive = true, + possible_values = &Database::variants(), )] pub database: Option, /// Limit the memory the database cache can use. #[structopt(long = "db-cache", value_name = "MiB")] pub database_cache_size: Option, + + /// Enable storage chain mode + /// + /// This changes the storage format for blocks bodies. + /// If this is enabled, each transaction is stored separately in the + /// transaction database column and is only referenced by hash + /// in the block body column. + #[structopt(long)] + pub storage_chain: bool, } impl DatabaseParams { @@ -46,4 +57,13 @@ impl DatabaseParams { pub fn database_cache_size(&self) -> Option { self.database_cache_size } + + /// Transaction storage scheme. + pub fn transaction_storage(&self) -> TransactionStorageMode { + if self.storage_chain { + TransactionStorageMode::StorageChain + } else { + TransactionStorageMode::BlockBody + } + } } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index e60779429b179..3c87e91c220f7 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,18 +16,26 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::arg_enums::{ - ExecutionStrategy, TracingReceiver, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, - DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR, - DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, DEFAULT_EXECUTION_SYNCING, +use crate::{ + arg_enums::{ + ExecutionStrategy, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, + DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR, + DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, DEFAULT_EXECUTION_SYNCING, + }, + params::{DatabaseParams, PruningParams}, }; -use crate::params::DatabaseParams; -use crate::params::PruningParams; use sc_client_api::execution_extensions::ExecutionStrategies; +use std::path::PathBuf; use structopt::StructOpt; +#[cfg(feature = "wasmtime")] +const WASM_METHOD_DEFAULT: &str = "Compiled"; + +#[cfg(not(feature = "wasmtime"))] +const WASM_METHOD_DEFAULT: &str = "interpreted-i-know-what-i-do"; + /// Parameters for block import. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct ImportParams { #[allow(missing_docs)] #[structopt(flatten)] @@ -49,50 +57,28 @@ pub struct ImportParams { #[structopt( long = "wasm-execution", value_name = "METHOD", - possible_values = &WasmExecutionMethod::enabled_variants(), + possible_values = &WasmExecutionMethod::variants(), case_insensitive = true, - default_value = "Interpreted" + default_value = WASM_METHOD_DEFAULT )] pub wasm_method: WasmExecutionMethod, + /// Specify the path where local WASM runtimes are stored. + /// + /// These runtimes will override on-chain runtimes when the version matches. + #[structopt(long, value_name = "PATH", parse(from_os_str))] + pub wasm_runtime_overrides: Option, + #[allow(missing_docs)] #[structopt(flatten)] pub execution_strategies: ExecutionStrategiesParams, /// Specify the state cache size. - #[structopt( - long = "state-cache-size", - value_name = "Bytes", - default_value = "67108864" - )] + #[structopt(long = "state-cache-size", value_name = "Bytes", default_value = "67108864")] pub state_cache_size: usize, - - /// Comma separated list of targets for tracing. - #[structopt(long = "tracing-targets", value_name = "TARGETS")] - pub tracing_targets: Option, - - /// Receiver to process tracing messages. - #[structopt( - long = "tracing-receiver", - value_name = "RECEIVER", - possible_values = &TracingReceiver::variants(), - case_insensitive = true, - default_value = "Log" - )] - pub tracing_receiver: TracingReceiver, } impl ImportParams { - /// Receiver to process tracing messages. - pub fn tracing_receiver(&self) -> sc_service::TracingReceiver { - self.tracing_receiver.clone().into() - } - - /// Comma separated list of targets for tracing. - pub fn tracing_targets(&self) -> Option { - self.tracing_targets.clone() - } - /// Specify the state cache size. pub fn state_cache_size(&self) -> usize { self.state_cache_size @@ -103,15 +89,17 @@ impl ImportParams { self.wasm_method.into() } + /// Enable overriding on-chain WASM with locally-stored WASM + /// by specifying the path where local WASM is stored. + pub fn wasm_runtime_overrides(&self) -> Option { + self.wasm_runtime_overrides.clone() + } + /// Get execution strategies for the parameters pub fn execution_strategies(&self, is_dev: bool, is_validator: bool) -> ExecutionStrategies { let exec = &self.execution_strategies; let exec_all_or = |strat: Option, default: ExecutionStrategy| { - let default = if is_dev { - ExecutionStrategy::Native - } else { - default - }; + let default = if is_dev { ExecutionStrategy::Native } else { default }; exec.execution.unwrap_or_else(|| strat.unwrap_or(default)).into() }; @@ -125,17 +113,21 @@ impl ImportParams { ExecutionStrategies { syncing: exec_all_or(exec.execution_syncing, DEFAULT_EXECUTION_SYNCING), importing: exec_all_or(exec.execution_import_block, default_execution_import_block), - block_construction: - exec_all_or(exec.execution_block_construction, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION), - offchain_worker: - exec_all_or(exec.execution_offchain_worker, DEFAULT_EXECUTION_OFFCHAIN_WORKER), + block_construction: exec_all_or( + exec.execution_block_construction, + DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, + ), + offchain_worker: exec_all_or( + exec.execution_offchain_worker, + DEFAULT_EXECUTION_OFFCHAIN_WORKER, + ), other: exec_all_or(exec.execution_other, DEFAULT_EXECUTION_OTHER), } } } /// Execution strategies parameters. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct ExecutionStrategiesParams { /// The means of execution used when calling into the runtime for importing blocks as /// part of an initial sync. @@ -175,7 +167,8 @@ pub struct ExecutionStrategiesParams { )] pub execution_offchain_worker: Option, - /// The means of execution used when calling into the runtime while not syncing, importing or constructing blocks. + /// The means of execution used when calling into the runtime while not syncing, importing or + /// constructing blocks. #[structopt( long = "execution-other", value_name = "STRATEGY", diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index 3c04d63144595..951f61bd1bc5f 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,21 +16,25 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error::Result; +use crate::{error, error::Result}; use sc_service::config::KeystoreConfig; -use std::fs; -use std::path::PathBuf; +use sp_core::crypto::SecretString; +use std::{ + fs, + path::{Path, PathBuf}, +}; use structopt::StructOpt; -use crate::error; -use sp_core::crypto::{SecretString, Zeroize}; -use std::str::FromStr; /// default sub directory for the key store const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; /// Parameters of the keystore -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct KeystoreParams { + /// Specify custom URIs to connect to for keystore-services + #[structopt(long = "keystore-uri")] + pub keystore_uri: Option, + /// Specify custom keystore path. #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] pub keystore_path: Option, @@ -42,7 +46,8 @@ pub struct KeystoreParams { )] pub password_interactive: bool, - /// Password used by the keystore. + /// Password used by the keystore. This allows appending an extra user-defined secret to the + /// seed. #[structopt( long = "password", parse(try_from_str = secret_string_from_str), @@ -62,31 +67,19 @@ pub struct KeystoreParams { /// Parse a sercret string, returning a displayable error. pub fn secret_string_from_str(s: &str) -> std::result::Result { - Ok(std::str::FromStr::from_str(s) - .map_err(|_e| "Could not get SecretString".to_string())?) + std::str::FromStr::from_str(s).map_err(|_| "Could not get SecretString".to_string()) } impl KeystoreParams { /// Get the keystore configuration for the parameters - pub fn keystore_config(&self, base_path: &PathBuf) -> Result { + /// + /// Returns a vector of remote-urls and the local Keystore configuration + pub fn keystore_config(&self, config_dir: &Path) -> Result<(Option, KeystoreConfig)> { let password = if self.password_interactive { - #[cfg(not(target_os = "unknown"))] - { - let mut password = input_keystore_password()?; - let secret = std::str::FromStr::from_str(password.as_str()) - .map_err(|()| "Error reading password")?; - password.zeroize(); - Some(secret) - } - #[cfg(target_os = "unknown")] - None + Some(SecretString::new(input_keystore_password()?)) } else if let Some(ref file) = self.password_filename { - let mut password = fs::read_to_string(file) - .map_err(|e| format!("{}", e))?; - let secret = std::str::FromStr::from_str(password.as_str()) - .map_err(|()| "Error reading password")?; - password.zeroize(); - Some(secret) + let password = fs::read_to_string(file).map_err(|e| format!("{}", e))?; + Some(SecretString::new(password)) } else { self.password.clone() }; @@ -94,9 +87,9 @@ impl KeystoreParams { let path = self .keystore_path .clone() - .unwrap_or_else(|| base_path.join(DEFAULT_KEYSTORE_CONFIG_PATH)); + .unwrap_or_else(|| config_dir.join(DEFAULT_KEYSTORE_CONFIG_PATH)); - Ok(KeystoreConfig::Path { path, password }) + Ok((self.keystore_uri.clone(), KeystoreConfig::Path { path, password })) } /// helper method to fetch password from `KeyParams` or read from stdin @@ -104,10 +97,8 @@ impl KeystoreParams { let (password_interactive, password) = (self.password_interactive, self.password.clone()); let pass = if password_interactive { - let mut password = rpassword::read_password_from_tty(Some("Key password: "))?; - let pass = Some(FromStr::from_str(&password).map_err(|()| "Error reading password")?); - password.zeroize(); - pass + let password = rpassword::read_password_from_tty(Some("Key password: "))?; + Some(SecretString::new(password)) } else { password }; @@ -116,7 +107,6 @@ impl KeystoreParams { } } -#[cfg(not(target_os = "unknown"))] fn input_keystore_password() -> Result { rpassword::read_password_from_tty(Some("Keystore password: ")) .map_err(|e| format!("{:?}", e).into()) diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index 93467bc8ec637..dac832a1f897c 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,23 +25,23 @@ mod pruning_params; mod shared_params; mod transaction_pool_params; -use std::{fmt::Debug, str::FromStr, convert::TryFrom}; -use sp_runtime::{generic::BlockId, traits::{Block as BlockT, NumberFor}}; +use crate::arg_enums::{CryptoScheme, OutputType}; use sp_core::crypto::Ss58AddressFormat; -use crate::arg_enums::{OutputType, CryptoScheme}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor}, +}; +use std::{convert::TryFrom, fmt::Debug, str::FromStr}; use structopt::StructOpt; -pub use crate::params::database_params::*; -pub use crate::params::import_params::*; -pub use crate::params::keystore_params::*; -pub use crate::params::network_params::*; -pub use crate::params::node_key_params::*; -pub use crate::params::offchain_worker_params::*; -pub use crate::params::pruning_params::*; -pub use crate::params::shared_params::*; -pub use crate::params::transaction_pool_params::*; - -/// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a decimal. +pub use crate::params::{ + database_params::*, import_params::*, keystore_params::*, network_params::*, + node_key_params::*, offchain_worker_params::*, pruning_params::*, shared_params::*, + transaction_pool_params::*, +}; + +/// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a +/// decimal. #[derive(Debug, Clone)] pub struct GenericNumber(String); @@ -50,10 +50,7 @@ impl FromStr for GenericNumber { fn from_str(block_number: &str) -> Result { if let Some(pos) = block_number.chars().position(|d| !d.is_digit(10)) { - Err(format!( - "Expected block number, found illegal digit at position: {}", - pos, - )) + Err(format!("Expected block number, found illegal digit at position: {}", pos)) } else { Ok(Self(block_number.to_owned())) } @@ -66,16 +63,16 @@ impl GenericNumber { /// See `https://doc.rust-lang.org/std/primitive.str.html#method.parse` for more elaborate /// documentation. pub fn parse(&self) -> Result - where - N: FromStr, - N::Err: std::fmt::Debug, + where + N: FromStr, + N::Err: std::fmt::Debug, { FromStr::from_str(&self.0).map_err(|e| format!("Failed to parse block number: {:?}", e)) } } /// Wrapper type that is either a `Hash` or the number of a `Block`. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct BlockNumberOrHash(String); impl FromStr for BlockNumberOrHash { @@ -109,7 +106,7 @@ impl BlockNumberOrHash { if self.0.starts_with("0x") { Ok(BlockId::Hash( FromStr::from_str(&self.0[2..]) - .map_err(|e| format!("Failed to parse block hash: {:?}", e))? + .map_err(|e| format!("Failed to parse block hash: {:?}", e))?, )) } else { GenericNumber(self.0.clone()).parse().map(BlockId::Number) @@ -117,9 +114,8 @@ impl BlockNumberOrHash { } } - /// Optional flag for specifying crypto algorithm -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct CryptoSchemeFlag { /// cryptography scheme #[structopt( @@ -133,7 +129,7 @@ pub struct CryptoSchemeFlag { } /// Optional flag for specifying output type -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct OutputTypeFlag { /// output format #[structopt( @@ -147,7 +143,7 @@ pub struct OutputTypeFlag { } /// Optional flag for specifying network scheme -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct NetworkSchemeFlag { /// network address format #[structopt( diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 209742f54e9b8..6eaf068fdaecd 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,17 +16,22 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::params::node_key_params::NodeKeyParams; +use crate::{arg_enums::SyncMode, params::node_key_params::NodeKeyParams}; use sc_network::{ - config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, TransportConfig}, + config::{ + NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig, + }, multiaddr::Protocol, }; -use sc_service::{ChainSpec, config::{Multiaddr, MultiaddrWithPeerId}}; -use std::path::PathBuf; +use sc_service::{ + config::{Multiaddr, MultiaddrWithPeerId}, + ChainSpec, ChainType, +}; +use std::{borrow::Cow, path::PathBuf}; use structopt::StructOpt; /// Parameters used to create the network configuration. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct NetworkParams { /// Specify a list of bootnodes. #[structopt(long = "bootnodes", value_name = "ADDR")] @@ -36,10 +41,14 @@ pub struct NetworkParams { #[structopt(long = "reserved-nodes", value_name = "ADDR")] pub reserved_nodes: Vec, - /// Whether to only allow connections to/from reserved nodes. + /// Whether to only synchronize the chain with reserved nodes. + /// + /// Also disables automatic peer discovery. /// - /// If you are a validator your node might still connect to other validator - /// nodes regardless of whether they are defined as reserved nodes. + /// TCP connections might still be established with non-reserved nodes. + /// In particular, if you are a validator your node might still connect to other + /// validator nodes and collator nodes regardless of whether they are defined as + /// reserved nodes. #[structopt(long = "reserved-only")] pub reserved_only: bool, @@ -49,6 +58,10 @@ pub struct NetworkParams { pub public_addr: Vec, /// Listen on this multiaddress. + /// + /// By default: + /// If `--validator` is passed: `/ip4/0.0.0.0/tcp/` and `/ip6/[::]/tcp/`. + /// Otherwise: `/ip4/0.0.0.0/tcp//ws` and `/ip6/[::]/tcp//ws`. #[structopt(long = "listen-addr", value_name = "LISTEN_ADDR")] pub listen_addr: Vec, @@ -56,12 +69,19 @@ pub struct NetworkParams { #[structopt(long = "port", value_name = "PORT", conflicts_with_all = &[ "listen-addr" ])] pub port: Option, - /// Forbid connecting to private IPv4 addresses (as specified in + /// Always forbid connecting to private IPv4 addresses (as specified in /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with - /// `--reserved-nodes` or `--bootnodes`. - #[structopt(long = "no-private-ipv4")] + /// `--reserved-nodes` or `--bootnodes`. Enabled by default for chains marked as "live" in + /// their chain specifications. + #[structopt(long = "no-private-ipv4", conflicts_with_all = &["allow-private-ipv4"])] pub no_private_ipv4: bool, + /// Always accept connecting to private IPv4 addresses (as specified in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Enabled by default for chains marked as + /// "local" in their chain specifications, or when `--dev` is passed. + #[structopt(long = "allow-private-ipv4", conflicts_with_all = &["no-private-ipv4"])] + pub allow_private_ipv4: bool, + /// Specify the number of outgoing connections we're trying to maintain. #[structopt(long = "out-peers", value_name = "COUNT", default_value = "25")] pub out_peers: u32, @@ -81,11 +101,7 @@ pub struct NetworkParams { /// /// This allows downloading announced blocks from multiple peers. Decrease to save /// traffic and risk increased latency. - #[structopt( - long = "max-parallel-downloads", - value_name = "COUNT", - default_value = "5" - )] + #[structopt(long = "max-parallel-downloads", value_name = "COUNT", default_value = "5")] pub max_parallel_downloads: u32, #[allow(missing_docs)] @@ -94,17 +110,32 @@ pub struct NetworkParams { /// Enable peer discovery on local networks. /// - /// By default this option is true for `--dev` and false otherwise. + /// By default this option is `true` for `--dev` or when the chain type is + /// `Local`/`Development` and false otherwise. #[structopt(long)] pub discover_local: bool, - /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in the - /// presence of potentially adversarial nodes. + /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in + /// the presence of potentially adversarial nodes. /// /// See the S/Kademlia paper for more information on the high level design as well as its /// security improvements. #[structopt(long)] pub kademlia_disjoint_query_paths: bool, + + /// Join the IPFS network and serve transactions over bitswap protocol. + #[structopt(long)] + pub ipfs_server: bool, + + /// Blockchain syncing mode. + /// + /// - `Full`: Download and validate full blockchain history. + /// + /// - `Fast`: Download blocks and the latest state only. + /// + /// - `FastUnsafe`: Same as `Fast`, but skip downloading state proofs. + #[structopt(long, value_name = "SYNC_MODE", default_value = "Full")] + pub sync: SyncMode, } impl NetworkParams { @@ -113,6 +144,7 @@ impl NetworkParams { &self, chain_spec: &Box, is_dev: bool, + is_validator: bool, net_config_path: Option, client_id: &str, node_name: &str, @@ -122,14 +154,27 @@ impl NetworkParams { let port = self.port.unwrap_or(default_listen_port); let listen_addresses = if self.listen_addr.is_empty() { - vec![ - Multiaddr::empty() - .with(Protocol::Ip6([0, 0, 0, 0, 0, 0, 0, 0].into())) - .with(Protocol::Tcp(port)), - Multiaddr::empty() - .with(Protocol::Ip4([0, 0, 0, 0].into())) - .with(Protocol::Tcp(port)), - ] + if is_validator { + vec![ + Multiaddr::empty() + .with(Protocol::Ip6([0, 0, 0, 0, 0, 0, 0, 0].into())) + .with(Protocol::Tcp(port)), + Multiaddr::empty() + .with(Protocol::Ip4([0, 0, 0, 0].into())) + .with(Protocol::Tcp(port)), + ] + } else { + vec![ + Multiaddr::empty() + .with(Protocol::Ip6([0, 0, 0, 0, 0, 0, 0, 0].into())) + .with(Protocol::Tcp(port)) + .with(Protocol::Ws(Cow::Borrowed("/"))), + Multiaddr::empty() + .with(Protocol::Ip4([0, 0, 0, 0].into())) + .with(Protocol::Tcp(port)) + .with(Protocol::Ws(Cow::Borrowed("/"))), + ] + } } else { self.listen_addr.clone() }; @@ -139,32 +184,52 @@ impl NetworkParams { let mut boot_nodes = chain_spec.boot_nodes().to_vec(); boot_nodes.extend(self.bootnodes.clone()); + let chain_type = chain_spec.chain_type(); + // Activate if the user explicitly requested local discovery, `--dev` is given or the + // chain type is `Local`/`Development` + let allow_non_globals_in_dht = + self.discover_local || + is_dev || matches!(chain_type, ChainType::Local | ChainType::Development); + + let allow_private_ipv4 = match (self.allow_private_ipv4, self.no_private_ipv4) { + (true, true) => unreachable!("`*_private_ipv4` flags are mutually exclusive; qed"), + (true, false) => true, + (false, true) => false, + (false, false) => + is_dev || matches!(chain_type, ChainType::Local | ChainType::Development), + }; + NetworkConfiguration { boot_nodes, net_config_path, - reserved_nodes: self.reserved_nodes.clone(), - non_reserved_mode: if self.reserved_only { - NonReservedPeerMode::Deny - } else { - NonReservedPeerMode::Accept + default_peers_set: SetConfig { + in_peers: self.in_peers, + out_peers: self.out_peers, + reserved_nodes: self.reserved_nodes.clone(), + non_reserved_mode: if self.reserved_only { + NonReservedPeerMode::Deny + } else { + NonReservedPeerMode::Accept + }, }, listen_addresses, public_addresses, - notifications_protocols: Vec::new(), + extra_sets: Vec::new(), request_response_protocols: Vec::new(), node_key, node_name: node_name.to_string(), client_version: client_id.to_string(), - in_peers: self.in_peers, - out_peers: self.out_peers, transport: TransportConfig::Normal { enable_mdns: !is_dev && !self.no_mdns, - allow_private_ipv4: !self.no_private_ipv4, - wasm_external_transport: None, + allow_private_ipv4, }, max_parallel_downloads: self.max_parallel_downloads, - allow_non_globals_in_dht: self.discover_local || is_dev, + enable_dht_random_walk: !self.reserved_only, + allow_non_globals_in_dht, kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths, + yamux_window_size: None, + ipfs_server: self.ipfs_server, + sync_mode: self.sync.into(), } } } diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 875411fbfb620..41f9033d282d1 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,13 +16,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_network::{config::identity::ed25519, config::NodeKeyConfig}; +use sc_network::config::{identity::ed25519, NodeKeyConfig}; use sp_core::H256; use std::{path::PathBuf, str::FromStr}; use structopt::StructOpt; -use crate::arg_enums::NodeKeyType; -use crate::error; +use crate::{arg_enums::NodeKeyType, error}; /// The file name of the node's Ed25519 secret key inside the chain-specific /// network config directory, if neither `--node-key` nor `--node-key-file` @@ -31,7 +30,7 @@ const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; /// Parameters used to create the `NodeKeyConfig`, which determines the keypair /// used for libp2p networking. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct NodeKeyParams { /// The secret key to use for libp2p networking. /// @@ -54,17 +53,16 @@ pub struct NodeKeyParams { /// /// The secret key of the node is obtained as follows: /// - /// * If the `--node-key` option is given, the value is parsed as a secret key - /// according to the type. See the documentation for `--node-key`. + /// * If the `--node-key` option is given, the value is parsed as a secret key according to + /// the type. See the documentation for `--node-key`. /// - /// * If the `--node-key-file` option is given, the secret key is read from the - /// specified file. See the documentation for `--node-key-file`. + /// * If the `--node-key-file` option is given, the secret key is read from the specified + /// file. See the documentation for `--node-key-file`. /// - /// * Otherwise, the secret key is read from a file with a predetermined, - /// type-specific name from the chain-specific network config directory - /// inside the base directory specified by `--base-dir`. If this file does - /// not exist, it is created with a newly generated secret key of the - /// chosen type. + /// * Otherwise, the secret key is read from a file with a predetermined, type-specific name + /// from the chain-specific network config directory inside the base directory specified by + /// `--base-dir`. If this file does not exist, it is created with a newly generated secret + /// key of the chosen type. /// /// The node's secret key determines the corresponding public key and hence the /// node's peer ID in the context of libp2p. @@ -103,12 +101,12 @@ impl NodeKeyParams { sc_network::config::Secret::File( self.node_key_file .clone() - .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)) + .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)), ) }; NodeKeyConfig::Ed25519(secret) - } + }, }) } } @@ -120,13 +118,11 @@ fn invalid_node_key(e: impl std::fmt::Display) -> error::Error { /// Parse a Ed25519 secret key from a hex string into a `sc_network::Secret`. fn parse_ed25519_secret(hex: &str) -> error::Result { - H256::from_str(&hex) - .map_err(invalid_node_key) - .and_then(|bytes| { - ed25519::SecretKey::from_bytes(bytes) - .map(sc_network::config::Secret::Input) - .map_err(invalid_node_key) - }) + H256::from_str(&hex).map_err(invalid_node_key).and_then(|bytes| { + ed25519::SecretKey::from_bytes(bytes) + .map(sc_network::config::Secret::Input) + .map_err(invalid_node_key) + }) } #[cfg(test)] @@ -151,9 +147,7 @@ mod tests { params.node_key(net_config_dir).and_then(|c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() => - { - Ok(()) - } + Ok(()), _ => Err(error::Error::Input("Unexpected node key config".into())), }) }) @@ -171,14 +165,14 @@ mod tests { node_key_file: Some(file), }; - let node_key = params.node_key(&PathBuf::from("not-used")) + let node_key = params + .node_key(&PathBuf::from("not-used")) .expect("Creates node key config") .into_keypair() .expect("Creates node key pair"); match node_key { - Keypair::Ed25519(ref pair) - if pair.secret().as_ref() == key.as_ref() => {} + Keypair::Ed25519(ref pair) if pair.secret().as_ref() == key.as_ref() => {}, _ => panic!("Invalid key"), } } @@ -202,11 +196,7 @@ mod tests { { NodeKeyType::variants().iter().try_for_each(|t| { let node_key_type = NodeKeyType::from_str(t).unwrap(); - f(NodeKeyParams { - node_key_type, - node_key: None, - node_key_file: None, - }) + f(NodeKeyParams { node_key_type, node_key: None, node_key_file: None }) }) } @@ -214,17 +204,12 @@ mod tests { with_def_params(|params| { let dir = PathBuf::from(net_config_dir.clone()); let typ = params.node_key_type; - params - .node_key(net_config_dir) - .and_then(move |c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if typ == NodeKeyType::Ed25519 - && f == &dir.join(NODE_KEY_ED25519_FILE) => - { - Ok(()) - } - _ => Err(error::Error::Input("Unexpected node key config".into())), - }) + params.node_key(net_config_dir).and_then(move |c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) + if typ == NodeKeyType::Ed25519 && f == &dir.join(NODE_KEY_ED25519_FILE) => + Ok(()), + _ => Err(error::Error::Input("Unexpected node key config".into())), + }) }) } diff --git a/client/cli/src/params/offchain_worker_params.rs b/client/cli/src/params/offchain_worker_params.rs index f8d48edc4729d..685328ef17795 100644 --- a/client/cli/src/params/offchain_worker_params.rs +++ b/client/cli/src/params/offchain_worker_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -23,16 +23,14 @@ //! targeted at handling input parameter parsing providing //! a reasonable abstraction. -use structopt::StructOpt; -use sc_service::config::OffchainWorkerConfig; use sc_network::config::Role; +use sc_service::config::OffchainWorkerConfig; +use structopt::StructOpt; -use crate::error; -use crate::OffchainWorkerEnabled; - +use crate::{error, OffchainWorkerEnabled}; /// Offchain worker related parameters. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct OffchainWorkerParams { /// Should execute offchain workers on every block. /// @@ -50,20 +48,13 @@ pub struct OffchainWorkerParams { /// /// Enables a runtime to write directly to a offchain workers /// DB during block import. - #[structopt( - long = "enable-offchain-indexing", - value_name = "ENABLE_OFFCHAIN_INDEXING" - )] + #[structopt(long = "enable-offchain-indexing", value_name = "ENABLE_OFFCHAIN_INDEXING")] pub indexing_enabled: bool, } impl OffchainWorkerParams { /// Load spec to `Configuration` from `OffchainWorkerParams` and spec factory. - pub fn offchain_worker( - &self, - role: &Role, - ) -> error::Result - { + pub fn offchain_worker(&self, role: &Role) -> error::Result { let enabled = match (&self.enabled, role) { (OffchainWorkerEnabled::WhenValidating, Role::Authority { .. }) => true, (OffchainWorkerEnabled::Always, _) => true, @@ -71,8 +62,7 @@ impl OffchainWorkerParams { (OffchainWorkerEnabled::WhenValidating, _) => false, }; - let indexing_enabled = enabled && self.indexing_enabled; - + let indexing_enabled = self.indexing_enabled; Ok(OffchainWorkerConfig { enabled, indexing_enabled }) } } diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 7db808e6d8f2f..28c7fa301cc60 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,11 +17,11 @@ // along with this program. If not, see . use crate::error; -use sc_service::{PruningMode, Role}; +use sc_service::{KeepBlocks, PruningMode, Role}; use structopt::StructOpt; /// Parameters to define the pruning mode -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct PruningParams { /// Specify the state pruning mode, a number of blocks to keep or 'archive'. /// @@ -30,32 +30,45 @@ pub struct PruningParams { /// 256 blocks. #[structopt(long = "pruning", value_name = "PRUNING_MODE")] pub pruning: Option, + /// Specify the number of finalized blocks to keep in the database. + /// + /// Default is to keep all blocks. + #[structopt(long, value_name = "COUNT")] + pub keep_blocks: Option, } impl PruningParams { /// Get the pruning value from the parameters - pub fn pruning(&self, unsafe_pruning: bool, role: &Role) -> error::Result { + pub fn state_pruning(&self, unsafe_pruning: bool, role: &Role) -> error::Result { // by default we disable pruning if the node is an authority (i.e. // `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the // node is an authority and pruning is enabled explicitly, then we error // unless `unsafe_pruning` is set. Ok(match &self.pruning { Some(ref s) if s == "archive" => PruningMode::ArchiveAll, - None if role.is_network_authority() => PruningMode::ArchiveAll, + None if role.is_authority() => PruningMode::ArchiveAll, None => PruningMode::default(), Some(s) => { - if role.is_network_authority() && !unsafe_pruning { + if role.is_authority() && !unsafe_pruning { return Err(error::Error::Input( "Validators should run with state pruning disabled (i.e. archive). \ You can ignore this check with `--unsafe-pruning`." .to_string(), - )); + )) } PruningMode::keep_blocks(s.parse().map_err(|_| { error::Error::Input("Invalid pruning mode specified".to_string()) })?) - } + }, + }) + } + + /// Get the block pruning value from the parameters + pub fn keep_blocks(&self) -> error::Result { + Ok(match self.keep_blocks { + Some(n) => KeepBlocks::Some(n), + None => KeepBlocks::All, }) } } diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index ad9ab04070563..41472387d2639 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,14 +16,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::arg_enums::TracingReceiver; use sc_service::config::BasePath; use std::path::PathBuf; use structopt::StructOpt; /// Shared parameters used by all `CoreParams`. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct SharedParams { - /// Specify the chain specification (one of dev, local, or staging). + /// Specify the chain specification. + /// + /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file + /// with the chainspec (such as one exported by the `build-spec` subcommand). #[structopt(long, value_name = "CHAIN_SPEC")] pub chain: Option, @@ -41,6 +45,32 @@ pub struct SharedParams { /// By default, all targets log `info`. The global log level can be set with -l. #[structopt(short = "l", long, value_name = "LOG_PATTERN")] pub log: Vec, + + /// Disable log color output. + #[structopt(long)] + pub disable_log_color: bool, + + /// Disable feature to dynamically update and reload the log filter. + /// + /// By default this feature is enabled, however it leads to a small performance decrease. + /// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this + /// option set. + #[structopt(long = "disable-log-reloading")] + pub disable_log_reloading: bool, + + /// Sets a custom profiling filter. Syntax is the same as for logging: = + #[structopt(long = "tracing-targets", value_name = "TARGETS")] + pub tracing_targets: Option, + + /// Receiver to process tracing messages. + #[structopt( + long = "tracing-receiver", + value_name = "RECEIVER", + possible_values = &TracingReceiver::variants(), + case_insensitive = true, + default_value = "Log" + )] + pub tracing_receiver: TracingReceiver, } impl SharedParams { @@ -58,13 +88,12 @@ impl SharedParams { pub fn chain_id(&self, is_dev: bool) -> String { match self.chain { Some(ref chain) => chain.clone(), - None => { + None => if is_dev { "dev".into() } else { "".into() - } - } + }, } } @@ -72,4 +101,24 @@ impl SharedParams { pub fn log_filters(&self) -> &[String] { &self.log } + + /// Should the log color output be disabled? + pub fn disable_log_color(&self) -> bool { + self.disable_log_color + } + + /// Is log reloading disabled + pub fn is_log_filter_reloading_disabled(&self) -> bool { + self.disable_log_reloading + } + + /// Receiver to process tracing messages. + pub fn tracing_receiver(&self) -> sc_service::TracingReceiver { + self.tracing_receiver.clone().into() + } + + /// Comma separated list of targets for tracing. + pub fn tracing_targets(&self) -> Option { + self.tracing_targets.clone() + } } diff --git a/client/cli/src/params/transaction_pool_params.rs b/client/cli/src/params/transaction_pool_params.rs index 3ad278426922e..feea19c97c2d6 100644 --- a/client/cli/src/params/transaction_pool_params.rs +++ b/client/cli/src/params/transaction_pool_params.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,7 +20,7 @@ use sc_service::config::TransactionPoolOptions; use structopt::StructOpt; /// Parameters used to create the pool configuration. -#[derive(Debug, StructOpt)] +#[derive(Debug, StructOpt, Clone)] pub struct TransactionPoolParams { /// Maximum number of transactions in the transaction pool. #[structopt(long = "pool-limit", value_name = "COUNT", default_value = "8192")] diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index e6d35282ada2b..6f03e02a12d05 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,28 +16,24 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::CliConfiguration; -use crate::Result; -use crate::SubstrateCli; +use crate::{error::Error as CliError, CliConfiguration, Result, SubstrateCli}; use chrono::prelude::*; -use futures::pin_mut; -use futures::select; -use futures::{future, future::FutureExt, Future}; +use futures::{future, future::FutureExt, pin_mut, select, Future}; use log::info; -use sc_service::{Configuration, TaskType, TaskManager}; -use sp_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; +use sc_service::{Configuration, Error as ServiceError, TaskManager}; +use sc_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; use std::marker::PhantomData; #[cfg(target_family = "unix")] -async fn main(func: F) -> std::result::Result<(), Box> +async fn main(func: F) -> std::result::Result<(), E> where F: Future> + future::FusedFuture, - E: 'static + std::error::Error, + E: std::error::Error + Send + Sync + 'static + From, { use tokio::signal::unix::{signal, SignalKind}; - let mut stream_int = signal(SignalKind::interrupt())?; - let mut stream_term = signal(SignalKind::terminate())?; + let mut stream_int = signal(SignalKind::interrupt()).map_err(ServiceError::Io)?; + let mut stream_term = signal(SignalKind::terminate()).map_err(ServiceError::Io)?; let t1 = stream_int.recv().fuse(); let t2 = stream_term.recv().fuse(); @@ -55,10 +51,10 @@ where } #[cfg(not(unix))] -async fn main(func: F) -> std::result::Result<(), Box> +async fn main(func: F) -> std::result::Result<(), E> where F: Future> + future::FusedFuture, - E: 'static + std::error::Error, + E: std::error::Error + Send + Sync + 'static + From, { use tokio::signal::ctrl_c; @@ -77,8 +73,7 @@ where /// Build a tokio runtime with all features pub fn build_runtime() -> std::result::Result { - tokio::runtime::Builder::new() - .threaded_scheduler() + tokio::runtime::Builder::new_multi_thread() .on_thread_start(|| { TOKIO_THREADS_ALIVE.inc(); TOKIO_THREADS_TOTAL.inc(); @@ -90,19 +85,19 @@ pub fn build_runtime() -> std::result::Result( - mut tokio_runtime: tokio::runtime::Runtime, - future: FUT, +fn run_until_exit( + tokio_runtime: tokio::runtime::Runtime, + future: F, task_manager: TaskManager, -) -> Result<()> +) -> std::result::Result<(), E> where - FUT: Future> + future::Future, - ERR: 'static + std::error::Error, + F: Future> + future::Future, + E: std::error::Error + Send + Sync + 'static + From, { let f = future.fuse(); pin_mut!(f); - tokio_runtime.block_on(main(f)).map_err(|e| e.to_string())?; + tokio_runtime.block_on(main(f))?; tokio_runtime.block_on(task_manager.clean_shutdown()); Ok(()) @@ -121,17 +116,8 @@ impl Runner { let tokio_runtime = build_runtime()?; let runtime_handle = tokio_runtime.handle().clone(); - let task_executor = move |fut, task_type| { - match task_type { - TaskType::Async => runtime_handle.spawn(fut).map(drop), - TaskType::Blocking => - runtime_handle.spawn_blocking(move || futures::executor::block_on(fut)) - .map(drop), - } - }; - Ok(Runner { - config: command.create_configuration(cli, task_executor.into())?, + config: command.create_configuration(cli, runtime_handle)?, tokio_runtime, phantom: PhantomData, }) @@ -152,52 +138,49 @@ impl Runner { /// 2020-06-03 16:14:21 ⛓ Native runtime: node-251 (substrate-node-1.tx1.au10) /// ``` fn print_node_infos(&self) { - info!("{}", C::impl_name()); - info!("✌️ version {}", C::impl_version()); - info!( - "❤️ by {}, {}-{}", - C::author(), - C::copyright_start_year(), - Local::today().year(), - ); - info!("📋 Chain specification: {}", self.config.chain_spec.name()); - info!("🏷 Node name: {}", self.config.network.node_name); - info!("👤 Role: {}", self.config.display_role()); - info!("💾 Database: {} at {}", - self.config.database, - self.config.database.path().map_or_else(|| "".to_owned(), |p| p.display().to_string()) - ); - info!("⛓ Native runtime: {}", C::native_runtime_version(&self.config.chain_spec)); + print_node_infos::(self.config()) } /// A helper function that runs a node with tokio and stops if the process receives the signal /// `SIGTERM` or `SIGINT`. - pub fn run_node_until_exit>>( - mut self, + pub fn run_node_until_exit( + self, initialize: impl FnOnce(Configuration) -> F, - ) -> Result<()> { + ) -> std::result::Result<(), E> + where + F: Future>, + E: std::error::Error + Send + Sync + 'static + From, + { self.print_node_infos(); let mut task_manager = self.tokio_runtime.block_on(initialize(self.config))?; let res = self.tokio_runtime.block_on(main(task_manager.future().fuse())); self.tokio_runtime.block_on(task_manager.clean_shutdown()); - res.map_err(|e| e.to_string().into()) + Ok(res?) } /// A helper function that runs a command with the configuration of this node. - pub fn sync_run(self, runner: impl FnOnce(Configuration) -> Result<()>) -> Result<()> { + pub fn sync_run( + self, + runner: impl FnOnce(Configuration) -> std::result::Result<(), E>, + ) -> std::result::Result<(), E> + where + E: std::error::Error + Send + Sync + 'static + From, + { runner(self.config) } /// A helper function that runs a future with tokio and stops if the process receives /// the signal `SIGTERM` or `SIGINT`. - pub fn async_run( - self, runner: impl FnOnce(Configuration) -> Result<(FUT, TaskManager)>, - ) -> Result<()> + pub fn async_run( + self, + runner: impl FnOnce(Configuration) -> std::result::Result<(F, TaskManager), E>, + ) -> std::result::Result<(), E> where - FUT: Future>, + F: Future>, + E: std::error::Error + Send + Sync + 'static + From + From, { let (future, task_manager) = runner(self.config)?; - run_until_exit(self.tokio_runtime, future, task_manager) + run_until_exit::<_, E>(self.tokio_runtime, future, task_manager) } /// Get an immutable reference to the node Configuration @@ -210,3 +193,22 @@ impl Runner { &mut self.config } } + +/// Log information about the node itself. +pub fn print_node_infos(config: &Configuration) { + info!("{}", C::impl_name()); + info!("✌️ version {}", C::impl_version()); + info!("❤️ by {}, {}-{}", C::author(), C::copyright_start_year(), Local::today().year()); + info!("📋 Chain specification: {}", config.chain_spec.name()); + info!("🏷 Node name: {}", config.network.node_name); + info!("👤 Role: {}", config.display_role()); + info!( + "💾 Database: {} at {}", + config.database, + config + .database + .path() + .map_or_else(|| "".to_owned(), |p| p.display().to_string()) + ); + info!("⛓ Native runtime: {}", C::native_runtime_version(&config.chain_spec)); +} diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index ccc4d515a8e11..75595779427bb 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-aura" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Aura consensus algorithm for substrate" edition = "2018" @@ -13,38 +13,39 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0", path = "../../../primitives/consensus/aura" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } -sc-client-api = { version = "2.0.0", path = "../../api" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-application-crypto = { version = "4.0.0-dev", path = "../../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.10.0-dev", path = "../../../primitives/consensus/aura" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } derive_more = "0.99.2" -futures = "0.3.4" -futures-timer = "3.0.1" -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +futures = "0.3.9" +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } log = "0.4.8" -parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-version = { version = "2.0.0", path = "../../../primitives/version" } -sc-consensus-slots = { version = "0.8.0", path = "../slots" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } +sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" } +async-trait = "0.1.50" +# We enable it only for web-wasm check +# See https://docs.rs/getrandom/0.2.1/getrandom/#webassembly-support +getrandom = { version = "0.2", features = ["js"], optional = true } [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } -sc-executor = { version = "0.8.0", path = "../../executor" } -sc-keystore = { version = "2.0.0", path = "../../keystore" } -sc-network = { version = "0.8.0", path = "../../network" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } +sp-keyring = { version = "4.0.0-dev", path = "../../../primitives/keyring" } +sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } +sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } +sc-network = { version = "0.10.0-dev", path = "../../network" } sc-network-test = { version = "0.8.0", path = "../../network/test" } -sc-service = { version = "0.8.0", default-features = false, path = "../../service" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tempfile = "3.1.0" +parking_lot = "0.11.1" diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs new file mode 100644 index 0000000000000..a4dbe5012ea19 --- /dev/null +++ b/client/consensus/aura/src/import_queue.rs @@ -0,0 +1,449 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Module implementing the logic for verifying and importing AuRa blocks. + +use crate::{aura_err, authorities, find_pre_digest, slot_author, AuthorityId, Error}; +use codec::{Codec, Decode, Encode}; +use log::{debug, info, trace}; +use prometheus_endpoint::Registry; +use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; +use sc_consensus::{ + block_import::{BlockImport, BlockImportParams, ForkChoiceStrategy}, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, +}; +use sc_consensus_slots::{check_equivocation, CheckedHeader, InherentDataProviderExt}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{ + well_known_cache_keys::{self, Id as CacheKeyId}, + HeaderBackend, ProvideCache, +}; +use sp_consensus::{CanAuthorWith, Error as ConsensusError}; +use sp_consensus_aura::{ + digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi, ConsensusLog, + AURA_ENGINE_ID, +}; +use sp_consensus_slots::Slot; +use sp_core::{crypto::Pair, ExecutionContext}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, DigestItemFor, Header}, +}; +use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; + +/// check a header has been signed by the right key. If the slot is too far in the future, an error +/// will be returned. If it's successful, returns the pre-header and the digest item +/// containing the seal. +/// +/// This digest item will always return `Some` when used with `as_aura_seal`. +fn check_header( + client: &C, + slot_now: Slot, + mut header: B::Header, + hash: B::Hash, + authorities: &[AuthorityId

], + check_for_equivocation: CheckForEquivocation, +) -> Result)>, Error> +where + DigestItemFor: CompatibleDigestItem, + P::Signature: Codec, + C: sc_client_api::backend::AuxStore, + P::Public: Encode + Decode + PartialEq + Clone, +{ + let seal = header.digest_mut().pop().ok_or_else(|| Error::HeaderUnsealed(hash))?; + + let sig = seal.as_aura_seal().ok_or_else(|| aura_err(Error::HeaderBadSeal(hash)))?; + + let slot = find_pre_digest::(&header)?; + + if slot > slot_now { + header.digest_mut().push(seal); + Ok(CheckedHeader::Deferred(header, slot)) + } else { + // check the signature is valid under the expected authority and + // chain state. + let expected_author = + slot_author::

(slot, &authorities).ok_or_else(|| Error::SlotAuthorNotFound)?; + + let pre_hash = header.hash(); + + if P::verify(&sig, pre_hash.as_ref(), expected_author) { + if check_for_equivocation.check_for_equivocation() { + if let Some(equivocation_proof) = + check_equivocation(client, slot_now, slot, &header, expected_author) + .map_err(Error::Client)? + { + info!( + target: "aura", + "Slot author is equivocating at slot {} with headers {:?} and {:?}", + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + } + } + + Ok(CheckedHeader::Checked(header, (slot, seal))) + } else { + Err(Error::BadSignature(hash)) + } + } +} + +/// A verifier for Aura blocks. +pub struct AuraVerifier { + client: Arc, + phantom: PhantomData

, + create_inherent_data_providers: CIDP, + can_author_with: CAW, + check_for_equivocation: CheckForEquivocation, + telemetry: Option, +} + +impl AuraVerifier { + pub(crate) fn new( + client: Arc, + create_inherent_data_providers: CIDP, + can_author_with: CAW, + check_for_equivocation: CheckForEquivocation, + telemetry: Option, + ) -> Self { + Self { + client, + create_inherent_data_providers, + can_author_with, + check_for_equivocation, + telemetry, + phantom: PhantomData, + } + } +} + +impl AuraVerifier +where + P: Send + Sync + 'static, + CAW: Send + Sync + 'static, + CIDP: Send, +{ + async fn check_inherents( + &self, + block: B, + block_id: BlockId, + inherent_data: sp_inherents::InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, + execution_context: ExecutionContext, + ) -> Result<(), Error> + where + C: ProvideRuntimeApi, + C::Api: BlockBuilderApi, + CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, + { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "aura", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + + let inherent_res = self + .client + .runtime_api() + .check_inherents_with_context(&block_id, execution_context, block, inherent_data) + .map_err(|e| Error::Client(e.into()))?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(Error::Inherent)?, + None => return Err(Error::UnknownInherentError(i)), + } + } + } + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Verifier for AuraVerifier +where + C: ProvideRuntimeApi + + Send + + Sync + + sc_client_api::backend::AuxStore + + ProvideCache + + BlockOf, + C::Api: BlockBuilderApi + AuraApi> + ApiExt, + DigestItemFor: CompatibleDigestItem, + P: Pair + Send + Sync + 'static, + P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, + P::Signature: Encode + Decode, + CAW: CanAuthorWith + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + async fn verify( + &mut self, + mut block: BlockImportParams, + ) -> Result<(BlockImportParams, Option)>>), String> { + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + let authorities = authorities(self.client.as_ref(), &BlockId::Hash(parent_hash)) + .map_err(|e| format!("Could not fetch authorities at {:?}: {:?}", parent_hash, e))?; + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_blockchain::Error::Application(e)))?; + + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::Inherent)?; + + let slot_now = create_inherent_data_providers.slot(); + + // we add one to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of + // headers + let checked_header = check_header::( + &self.client, + slot_now + 1, + block.header, + hash, + &authorities[..], + self.check_for_equivocation, + ) + .map_err(|e| e.to_string())?; + match checked_header { + CheckedHeader::Checked(pre_header, (slot, seal)) => { + // if the body is passed through, we need to use the runtime + // to check that the internally-set timestamp in the inherents + // actually matches the slot set in the seal. + if let Some(inner_body) = block.body.take() { + let new_block = B::new(pre_header.clone(), inner_body); + + inherent_data.aura_replace_inherent_data(slot); + + // skip the inherents verification if the runtime API is old. + if self + .client + .runtime_api() + .has_api_with::, _>( + &BlockId::Hash(parent_hash), + |v| v >= 2, + ) + .map_err(|e| format!("{:?}", e))? + { + self.check_inherents( + new_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + create_inherent_data_providers, + block.origin.into(), + ) + .await + .map_err(|e| e.to_string())?; + } + + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); + } + + trace!(target: "aura", "Checked {:?}; importing.", pre_header); + telemetry!( + self.telemetry; + CONSENSUS_TRACE; + "aura.checked_and_importing"; + "pre_header" => ?pre_header, + ); + + // Look for an authorities-change log. + let maybe_keys = pre_header + .digest() + .logs() + .iter() + .filter_map(|l| { + l.try_to::>>(OpaqueDigestItemId::Consensus( + &AURA_ENGINE_ID, + )) + }) + .find_map(|l| match l { + ConsensusLog::AuthoritiesChange(a) => + Some(vec![(well_known_cache_keys::AUTHORITIES, a.encode())]), + _ => None, + }); + + block.header = pre_header; + block.post_digests.push(seal); + block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + block.post_hash = Some(hash); + + Ok((block, maybe_keys)) + }, + CheckedHeader::Deferred(a, b) => { + debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "aura.header_too_far_in_future"; + "hash" => ?hash, + "a" => ?a, + "b" => ?b, + ); + Err(format!("Header {:?} rejected: too far in the future", hash)) + }, + } + } +} + +/// Should we check for equivocation of a block author? +#[derive(Debug, Clone, Copy)] +pub enum CheckForEquivocation { + /// Yes, check for equivocation. + /// + /// This is the default setting for this. + Yes, + /// No, don't check for equivocation. + No, +} + +impl CheckForEquivocation { + /// Should we check for equivocation? + fn check_for_equivocation(self) -> bool { + matches!(self, Self::Yes) + } +} + +impl Default for CheckForEquivocation { + fn default() -> Self { + Self::Yes + } +} + +/// Parameters of [`import_queue`]. +pub struct ImportQueueParams<'a, Block, I, C, S, CAW, CIDP> { + /// The block import to use. + pub block_import: I, + /// The justification import. + pub justification_import: Option>, + /// The client to interact with the chain. + pub client: Arc, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// The spawner to spawn background tasks. + pub spawner: &'a S, + /// The prometheus registry. + pub registry: Option<&'a Registry>, + /// Can we author with the current node? + pub can_author_with: CAW, + /// Should we check for equivocation? + pub check_for_equivocation: CheckForEquivocation, + /// Telemetry instance used to report telemetry metrics. + pub telemetry: Option, +} + +/// Start an import queue for the Aura consensus algorithm. +pub fn import_queue<'a, P, Block, I, C, S, CAW, CIDP>( + ImportQueueParams { + block_import, + justification_import, + client, + create_inherent_data_providers, + spawner, + registry, + can_author_with, + check_for_equivocation, + telemetry, + }: ImportQueueParams<'a, Block, I, C, S, CAW, CIDP>, +) -> Result, sp_consensus::Error> +where + Block: BlockT, + C::Api: BlockBuilderApi + AuraApi> + ApiExt, + C: 'static + + ProvideRuntimeApi + + BlockOf + + ProvideCache + + Send + + Sync + + AuxStore + + UsageProvider + + HeaderBackend, + I: BlockImport> + + Send + + Sync + + 'static, + DigestItemFor: CompatibleDigestItem, + P: Pair + Send + Sync + 'static, + P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, + P::Signature: Encode + Decode, + S: sp_core::traits::SpawnEssentialNamed, + CAW: CanAuthorWith + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Sync + Send + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + let verifier = build_verifier::(BuildVerifierParams { + client, + create_inherent_data_providers, + can_author_with, + check_for_equivocation, + telemetry, + }); + + Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) +} + +/// Parameters of [`build_verifier`]. +pub struct BuildVerifierParams { + /// The client to interact with the chain. + pub client: Arc, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Can we author with the current node? + pub can_author_with: CAW, + /// Should we check for equivocation? + pub check_for_equivocation: CheckForEquivocation, + /// Telemetry instance used to report telemetry metrics. + pub telemetry: Option, +} + +/// Build the [`AuraVerifier`] +pub fn build_verifier( + BuildVerifierParams { + client, + create_inherent_data_providers, + can_author_with, + check_for_equivocation, + telemetry, + }: BuildVerifierParams, +) -> AuraVerifier { + AuraVerifier::<_, P, _, _>::new( + client, + create_inherent_data_providers, + can_author_with, + check_for_equivocation, + telemetry, + ) +} diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 426a0e873f2e9..946e0b90c4dd4 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -31,191 +31,309 @@ //! NOTE: Aura itself is designed to be generic over the crypto used. #![forbid(missing_docs, unsafe_code)] use std::{ - sync::Arc, time::Duration, thread, marker::PhantomData, hash::Hash, fmt::Debug, pin::Pin, - collections::HashMap, convert::{TryFrom, TryInto}, + convert::{TryFrom, TryInto}, + fmt::Debug, + hash::Hash, + marker::PhantomData, + pin::Pin, + sync::Arc, }; use futures::prelude::*; -use parking_lot::Mutex; -use log::{debug, info, trace}; -use prometheus_endpoint::Registry; +use log::{debug, trace}; -use codec::{Encode, Decode, Codec}; +use codec::{Codec, Decode, Encode}; -use sp_consensus::{ - self, BlockImport, Environment, Proposer, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, - BlockOrigin, Error as ConsensusError, SelectChain, SlotData, BlockCheckParams, ImportResult -}; -use sp_consensus::import_queue::{ - Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, BoxFinalityProofImport, -}; -use sc_client_api::{backend::AuxStore, BlockOf}; -use sp_blockchain::{ - self, Result as CResult, well_known_cache_keys::{self, Id as CacheKeyId}, - ProvideCache, HeaderBackend, -}; -use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_core::crypto::Public; -use sp_application_crypto::{AppKey, AppPublic}; -use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, - Justification, +use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; +use sc_consensus_slots::{ + BackoffAuthoringBlocksStrategy, InherentDataProviderExt, SlotInfo, StorageChanges, }; -use sp_runtime::traits::{Block as BlockT, Header, DigestItemFor, Zero, Member}; +use sc_telemetry::TelemetryHandle; use sp_api::ProvideRuntimeApi; -use sp_core::crypto::Pair; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_inherents::{InherentDataProviders, InherentData}; -use sp_timestamp::{ - TimestampInherentData, InherentType as TimestampInherent, InherentError as TIError +use sp_application_crypto::{AppKey, AppPublic}; +use sp_blockchain::{HeaderBackend, ProvideCache, Result as CResult}; +use sp_consensus::{ + BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, }; -use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG, CONSENSUS_INFO}; - -use sc_consensus_slots::{ - CheckedHeader, SlotWorker, SlotInfo, SlotCompatible, StorageChanges, check_equivocation, +use sp_consensus_slots::Slot; +use sp_core::crypto::{Pair, Public}; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, DigestItemFor, Header, Member, NumberFor, Zero}, }; -use sp_api::ApiExt; +mod import_queue; -pub use sp_consensus_aura::{ - ConsensusLog, AuraApi, AURA_ENGINE_ID, - inherents::{ - InherentType as AuraInherent, - AuraInherentData, INHERENT_IDENTIFIER, InherentDataProvider, - }, +pub use import_queue::{ + build_verifier, import_queue, AuraVerifier, BuildVerifierParams, CheckForEquivocation, + ImportQueueParams, }; +pub use sc_consensus_slots::SlotProportion; pub use sp_consensus::SyncOracle; -pub use digests::CompatibleDigestItem; - -mod digests; +pub use sp_consensus_aura::{ + digests::CompatibleDigestItem, + inherents::{InherentDataProvider, InherentType as AuraInherent, INHERENT_IDENTIFIER}, + AuraApi, ConsensusLog, AURA_ENGINE_ID, +}; type AuthorityId

=

::Public; /// Slot duration type for Aura. -pub type SlotDuration = sc_consensus_slots::SlotDuration; +pub type SlotDuration = sc_consensus_slots::SlotDuration; /// Get type of `SlotDuration` for Aura. -pub fn slot_duration(client: &C) -> CResult where +pub fn slot_duration(client: &C) -> CResult +where A: Codec, B: BlockT, - C: AuxStore + ProvideRuntimeApi, - C::Api: AuraApi, + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: AuraApi, { - SlotDuration::get_or_compute(client, |a, b| a.slot_duration(b)) + SlotDuration::get_or_compute(client, |a, b| a.slot_duration(b).map_err(Into::into)) } /// Get slot author for given block along with authorities. -fn slot_author(slot_num: u64, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { - if authorities.is_empty() { return None } +fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { + if authorities.is_empty() { + return None + } - let idx = slot_num % (authorities.len() as u64); + let idx = *slot % (authorities.len() as u64); assert!( - idx <= usize::max_value() as u64, + idx <= usize::MAX as u64, "It is impossible to have a vector with length beyond the address space; qed", ); - let current_author = authorities.get(idx as usize) - .expect("authorities not empty; index constrained to list length;\ - this is a valid index; qed"); + let current_author = authorities.get(idx as usize).expect( + "authorities not empty; index constrained to list length;this is a valid index; qed", + ); Some(current_author) } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -struct AuraSlotCompatible; - -impl SlotCompatible for AuraSlotCompatible { - fn extract_timestamp_and_slot( - &self, - data: &InherentData - ) -> Result<(TimestampInherent, AuraInherent, std::time::Duration), sp_consensus::Error> { - data.timestamp_inherent_data() - .and_then(|t| data.aura_inherent_data().map(|a| (t, a))) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - .map(|(x, y)| (x, y, Default::default())) - } +/// Parameters of [`start_aura`]. +pub struct StartAuraParams { + /// The duration of a slot. + pub slot_duration: SlotDuration, + /// The client to interact with the chain. + pub client: Arc, + /// A select chain implementation to select the best block. + pub select_chain: SC, + /// The block import. + pub block_import: I, + /// The proposer factory to build proposer instances. + pub proposer_factory: PF, + /// The sync oracle that can give us the current sync status. + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Should we force the authoring of blocks? + pub force_authoring: bool, + /// The backoff strategy when we miss slots. + pub backoff_authoring_blocks: Option, + /// The keystore used by the node. + pub keystore: SyncCryptoStorePtr, + /// Can we author a block with this node? + pub can_author_with: CAW, + /// The proportion of the slot dedicated to proposing. + /// + /// The block proposing will be limited to this proportion of the slot from the starting of the + /// slot. However, the proposing can still take longer when there is some lenience factor + /// applied, because there were no blocks produced for some slots. + pub block_proposal_slot_portion: SlotProportion, + /// The maximum proportion of the slot dedicated to proposing with any lenience factor applied + /// due to no blocks being produced. + pub max_block_proposal_slot_portion: Option, + /// Telemetry instance used to report telemetry metrics. + pub telemetry: Option, } /// Start the aura worker. The returned future should be run in a futures executor. -pub fn start_aura( - slot_duration: SlotDuration, - client: Arc, - select_chain: SC, - block_import: I, - env: E, - sync_oracle: SO, - inherent_data_providers: InherentDataProviders, - force_authoring: bool, - keystore: SyncCryptoStorePtr, - can_author_with: CAW, -) -> Result, sp_consensus::Error> where - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + Send + Sync, - C::Api: AuraApi>, - SC: SelectChain, - E: Environment + Send + Sync + 'static, - E::Proposer: Proposer>, +pub fn start_aura( + StartAuraParams { + slot_duration, + client, + select_chain, + block_import, + proposer_factory, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + backoff_authoring_blocks, + keystore, + can_author_with, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + telemetry, + }: StartAuraParams, +) -> Result, sp_consensus::Error> +where P: Pair + Send + Sync, P::Public: AppPublic + Hash + Member + Encode + Decode, P::Signature: TryFrom> + Hash + Member + Encode + Decode, + B: BlockT, + C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, + C::Api: AuraApi>, + SC: SelectChain, I: BlockImport> + Send + Sync + 'static, - Error: std::error::Error + Send + From + 'static, + PF: Environment + Send + Sync + 'static, + PF::Proposer: Proposer>, SO: SyncOracle + Send + Sync + Clone, + L: sc_consensus::JustificationSyncLink, + CIDP: CreateInherentDataProviders + Send, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, + BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, CAW: CanAuthorWith + Send, + Error: std::error::Error + Send + From + 'static, { - let worker = AuraWorker { - client, - block_import: Arc::new(Mutex::new(block_import)), - env, + let worker = build_aura_worker::(BuildAuraWorkerParams { + client: client.clone(), + block_import, + proposer_factory, keystore, sync_oracle: sync_oracle.clone(), + justification_sync_link, force_authoring, - _key_type: PhantomData::

, - }; - register_aura_inherent_data_provider( - &inherent_data_providers, - slot_duration.slot_duration() - )?; - Ok(sc_consensus_slots::start_slot_worker::<_, _, _, _, _, AuraSlotCompatible, _>( + backoff_authoring_blocks, + telemetry, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + }); + + Ok(sc_consensus_slots::start_slot_worker( slot_duration, select_chain, worker, sync_oracle, - inherent_data_providers, - AuraSlotCompatible, + create_inherent_data_providers, can_author_with, )) } -struct AuraWorker { +/// Parameters of [`build_aura_worker`]. +pub struct BuildAuraWorkerParams { + /// The client to interact with the chain. + pub client: Arc, + /// The block import. + pub block_import: I, + /// The proposer factory to build proposer instances. + pub proposer_factory: PF, + /// The sync oracle that can give us the current sync status. + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Should we force the authoring of blocks? + pub force_authoring: bool, + /// The backoff strategy when we miss slots. + pub backoff_authoring_blocks: Option, + /// The keystore used by the node. + pub keystore: SyncCryptoStorePtr, + /// The proportion of the slot dedicated to proposing. + /// + /// The block proposing will be limited to this proportion of the slot from the starting of the + /// slot. However, the proposing can still take longer when there is some lenience factor + /// applied, because there were no blocks produced for some slots. + pub block_proposal_slot_portion: SlotProportion, + /// The maximum proportion of the slot dedicated to proposing with any lenience factor applied + /// due to no blocks being produced. + pub max_block_proposal_slot_portion: Option, + /// Telemetry instance used to report telemetry metrics. + pub telemetry: Option, +} + +/// Build the aura worker. +/// +/// The caller is responsible for running this worker, otherwise it will do nothing. +pub fn build_aura_worker( + BuildAuraWorkerParams { + client, + block_import, + proposer_factory, + sync_oracle, + justification_sync_link, + backoff_authoring_blocks, + keystore, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + telemetry, + force_authoring, + }: BuildAuraWorkerParams, +) -> impl sc_consensus_slots::SlotWorker>::Proof> +where + B: BlockT, + C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, + C::Api: AuraApi>, + PF: Environment + Send + Sync + 'static, + PF::Proposer: Proposer>, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, + I: BlockImport> + Send + Sync + 'static, + Error: std::error::Error + Send + From + 'static, + SO: SyncOracle + Send + Sync + Clone, + L: sc_consensus::JustificationSyncLink, + BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, +{ + AuraWorker { + client, + block_import, + env: proposer_factory, + keystore, + sync_oracle, + justification_sync_link, + force_authoring, + backoff_authoring_blocks, + telemetry, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + _key_type: PhantomData::

, + } +} + +struct AuraWorker { client: Arc, - block_import: Arc>, + block_import: I, env: E, keystore: SyncCryptoStorePtr, sync_oracle: SO, + justification_sync_link: L, force_authoring: bool, + backoff_authoring_blocks: Option, + block_proposal_slot_portion: SlotProportion, + max_block_proposal_slot_portion: Option, + telemetry: Option, _key_type: PhantomData

, } -impl sc_consensus_slots::SimpleSlotWorker for AuraWorker where +#[async_trait::async_trait] +impl sc_consensus_slots::SimpleSlotWorker + for AuraWorker +where B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + Sync, + C: ProvideRuntimeApi + BlockOf + ProvideCache + HeaderBackend + Sync, C::Api: AuraApi>, - E: Environment, + E: Environment + Send + Sync, E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, P: Pair + Send + Sync, P::Public: AppPublic + Public + Member + Encode + Decode + Hash, P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug, - SO: SyncOracle + Send + Clone, + SO: SyncOracle + Send + Clone + Sync, + L: sc_consensus::JustificationSyncLink, + BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, Error: std::error::Error + Send + From + 'static, { type BlockImport = I; type SyncOracle = SO; - type CreateProposer = Pin> + Send + 'static - >>; + type JustificationSyncLink = L; + type CreateProposer = + Pin> + Send + 'static>>; type Proposer = E::Proposer; type Claim = P::Public; type EpochData = Vec>; @@ -224,14 +342,14 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW "aura" } - fn block_import(&self) -> Arc> { - self.block_import.clone() + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import } fn epoch_data( &self, header: &B::Header, - _slot_number: u64, + _slot: Slot, ) -> Result { authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) } @@ -240,17 +358,17 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW Some(epoch_data.len()) } - fn claim_slot( + async fn claim_slot( &self, _header: &B::Header, - slot_number: u64, + slot: Slot, epoch_data: &Self::EpochData, ) -> Option { - let expected_author = slot_author::

(slot_number, epoch_data); + let expected_author = slot_author::

(slot, epoch_data); expected_author.and_then(|p| { - if SyncCryptoStore::has_keys( - &*self.keystore, - &[(p.to_raw_vec(), sp_application_crypto::key_types::AURA)], + if SyncCryptoStore::has_keys( + &*self.keystore, + &[(p.to_raw_vec(), sp_application_crypto::key_types::AURA)], ) { Some(p.clone()) } else { @@ -261,25 +379,28 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW fn pre_digest_data( &self, - slot_number: u64, + slot: Slot, _claim: &Self::Claim, ) -> Vec> { - vec![ - as CompatibleDigestItem

>::aura_pre_digest(slot_number), - ] + vec![ as CompatibleDigestItem>::aura_pre_digest(slot)] } - fn block_import_params(&self) -> Box, - StorageChanges, B>, - Self::Claim, - Self::EpochData, - ) -> Result< - sp_consensus::BlockImportParams>, - sp_consensus::Error> + Send + 'static> - { + fn block_import_params( + &self, + ) -> Box< + dyn Fn( + B::Header, + &B::Hash, + Vec, + StorageChanges, B>, + Self::Claim, + Self::EpochData, + ) -> Result< + sc_consensus::BlockImportParams>, + sp_consensus::Error, + > + Send + + 'static, + > { let keystore = self.keystore.clone(); Box::new(move |header, header_hash, body, storage_changes, public, _epoch| { // sign the pre-sealed hash of the block and then @@ -290,21 +411,28 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW &*keystore, as AppKey>::ID, &public_type_pair, - header_hash.as_ref() - ).map_err(|e| sp_consensus::Error::CannotSign( - public.clone(), e.to_string(), - ))?; - let signature = signature.clone().try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature( - signature, public - ))?; - - let signature_digest_item = as CompatibleDigestItem

>::aura_seal(signature); + header_hash.as_ref(), + ) + .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? + .ok_or_else(|| { + sp_consensus::Error::CannotSign( + public.clone(), + "Could not find key in keystore.".into(), + ) + })?; + let signature = signature + .clone() + .try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + + let signature_digest_item = + as CompatibleDigestItem>::aura_seal(signature); let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(signature_digest_item); import_block.body = Some(body); - import_block.storage_changes = Some(storage_changes); + import_block.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); Ok(import_block) @@ -315,61 +443,52 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW self.force_authoring } + fn should_backoff(&self, slot: Slot, chain_head: &B::Header) -> bool { + if let Some(ref strategy) = self.backoff_authoring_blocks { + if let Ok(chain_head_slot) = find_pre_digest::(chain_head) { + return strategy.should_backoff( + *chain_head.number(), + chain_head_slot, + self.client.info().finalized_number, + slot, + self.logging_target(), + ) + } + } + false + } + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { &mut self.sync_oracle } - fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin(self.env.init(block).map_err(|e| { - sp_consensus::Error::ClientImport(format!("{:?}", e)).into() - })) + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link } - fn proposing_remaining_duration( - &self, - head: &B::Header, - slot_info: &SlotInfo, - ) -> Option { - let slot_remaining = self.slot_remaining_duration(slot_info); - - let parent_slot = match find_pre_digest::(head) { - Err(_) => return Some(slot_remaining), - Ok(d) => d, - }; - - if let Some(slot_lenience) = - sc_consensus_slots::slot_lenience_exponential(parent_slot, slot_info) - { - debug!(target: "aura", - "No block for {} slots. Applying linear lenience of {}s", - slot_info.number.saturating_sub(parent_slot + 1), - slot_lenience.as_secs(), - ); + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)).into()), + ) + } - Some(slot_remaining + slot_lenience) - } else { - Some(slot_remaining) - } + fn telemetry(&self) -> Option { + self.telemetry.clone() } -} -impl SlotWorker for AuraWorker where - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + Sync + Send, - C::Api: AuraApi>, - E: Environment + Send + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - P: Pair + Send + Sync, - P::Public: AppPublic + Member + Encode + Decode + Hash, - P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug, - SO: SyncOracle + Send + Sync + Clone, - Error: std::error::Error + Send + From + 'static, -{ - type OnSlot = Pin> + Send>>; + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> std::time::Duration { + let parent_slot = find_pre_digest::(&slot_info.chain_head).ok(); - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { - >::on_slot(self, chain_head, slot_info) + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &self.block_proposal_slot_portion, + self.max_block_proposal_slot_portion.as_ref(), + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) } } @@ -392,15 +511,11 @@ enum Error { SlotAuthorNotFound, #[display(fmt = "Bad signature on {:?}", _0)] BadSignature(B::Hash), - #[display(fmt = "Rejecting block too far in future")] - TooFarInFuture, Client(sp_blockchain::Error), - DataProvider(String), - Runtime(String), - #[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)] - SlotNumberMustIncrease(u64, u64), - #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] - ParentUnavailable(B::Hash, B::Hash), + #[display(fmt = "Unknown inherent error for identifier: {}", "String::from_utf8_lossy(_0)")] + UnknownInherentError(sp_inherents::InherentIdentifier), + #[display(fmt = "Inherent error: {}", _0)] + Inherent(sp_inherents::Error), } impl std::convert::From> for String { @@ -409,19 +524,15 @@ impl std::convert::From> for String { } } -fn find_pre_digest(header: &B::Header) -> Result> - where DigestItemFor: CompatibleDigestItem

, - P::Signature: Decode, - P::Public: Encode + Decode + PartialEq + Clone, -{ +fn find_pre_digest(header: &B::Header) -> Result> { if header.number().is_zero() { - return Ok(0); + return Ok(0.into()) } - let mut pre_digest: Option = None; + let mut pre_digest: Option = None; for log in header.digest().logs() { trace!(target: "aura", "Checking log {:?}", log); - match (log.as_aura_pre_digest(), pre_digest.is_some()) { + match (CompatibleDigestItem::::as_aura_pre_digest(log), pre_digest.is_some()) { (Some(_), true) => Err(aura_err(Error::MultipleHeaders))?, (None, _) => trace!(target: "aura", "Ignoring digest not meant for us"), (s, false) => pre_digest = s, @@ -430,475 +541,52 @@ fn find_pre_digest(header: &B::Header) -> Result( - client: &C, - slot_now: u64, - mut header: B::Header, - hash: B::Hash, - authorities: &[AuthorityId

], -) -> Result)>, Error> where - DigestItemFor: CompatibleDigestItem

, - P::Signature: Decode, - C: sc_client_api::backend::AuxStore, - P::Public: Encode + Decode + PartialEq + Clone, -{ - let seal = match header.digest_mut().pop() { - Some(x) => x, - None => return Err(Error::HeaderUnsealed(hash)), - }; - - let sig = seal.as_aura_seal().ok_or_else(|| { - aura_err(Error::HeaderBadSeal(hash)) - })?; - - let slot_num = find_pre_digest::(&header)?; - - if slot_num > slot_now { - header.digest_mut().push(seal); - Ok(CheckedHeader::Deferred(header, slot_num)) - } else { - // check the signature is valid under the expected authority and - // chain state. - let expected_author = match slot_author::

(slot_num, &authorities) { - None => return Err(Error::SlotAuthorNotFound), - Some(author) => author, - }; - - let pre_hash = header.hash(); - - if P::verify(&sig, pre_hash.as_ref(), expected_author) { - if let Some(equivocation_proof) = check_equivocation( - client, - slot_now, - slot_num, - &header, - expected_author, - ).map_err(Error::Client)? { - info!( - "Slot author is equivocating at slot {} with headers {:?} and {:?}", - slot_num, - equivocation_proof.first_header.hash(), - equivocation_proof.second_header.hash(), - ); - } - - Ok(CheckedHeader::Checked(header, (slot_num, seal))) - } else { - Err(Error::BadSignature(hash)) - } - } -} - -/// A verifier for Aura blocks. -pub struct AuraVerifier { - client: Arc, - phantom: PhantomData

, - inherent_data_providers: sp_inherents::InherentDataProviders, - can_author_with: CAW, -} - -impl AuraVerifier where - P: Send + Sync + 'static, - CAW: Send + Sync + 'static, -{ - fn check_inherents( - &self, - block: B, - block_id: BlockId, - inherent_data: InherentData, - timestamp_now: u64, - ) -> Result<(), Error> where - C: ProvideRuntimeApi, C::Api: BlockBuilderApi, - CAW: CanAuthorWith, - { - const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; - - if let Err(e) = self.can_author_with.can_author_with(&block_id) { - debug!( - target: "aura", - "Skipping `check_inherents` as authoring version is not compatible: {}", - e, - ); - - return Ok(()) - } - - let inherent_res = self.client.runtime_api().check_inherents( - &block_id, - block, - inherent_data, - ).map_err(Error::Client)?; - - if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { - Some(TIError::ValidAtTimestamp(timestamp)) => { - // halt import until timestamp is valid. - // reject when too far ahead. - if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { - return Err(Error::TooFarInFuture); - } - - let diff = timestamp.saturating_sub(timestamp_now); - info!( - target: "aura", - "halting for block {} seconds in the future", - diff - ); - telemetry!(CONSENSUS_INFO; "aura.halting_for_future_block"; - "diff" => ?diff - ); - thread::sleep(Duration::from_secs(diff)); - Ok(()) - }, - Some(TIError::Other(e)) => Err(Error::Runtime(e.into())), - None => Err(Error::DataProvider( - self.inherent_data_providers.error_to_string(&i, &e) - )), - }) - } else { - Ok(()) - } - } -} - -#[forbid(deprecated)] -impl Verifier for AuraVerifier where - C: ProvideRuntimeApi + - Send + - Sync + - sc_client_api::backend::AuxStore + - ProvideCache + - BlockOf, - C::Api: BlockBuilderApi + AuraApi> + ApiExt, - DigestItemFor: CompatibleDigestItem

, - P: Pair + Send + Sync + 'static, - P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, - P::Signature: Encode + Decode, - CAW: CanAuthorWith + Send + Sync + 'static, -{ - fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { - let mut inherent_data = self.inherent_data_providers - .create_inherent_data() - .map_err(|e| e.into_string())?; - let (timestamp_now, slot_now, _) = AuraSlotCompatible.extract_timestamp_and_slot(&inherent_data) - .map_err(|e| format!("Could not extract timestamp and slot: {:?}", e))?; - let hash = header.hash(); - let parent_hash = *header.parent_hash(); - let authorities = authorities(self.client.as_ref(), &BlockId::Hash(parent_hash)) - .map_err(|e| format!("Could not fetch authorities at {:?}: {:?}", parent_hash, e))?; - - // we add one to allow for some small drift. - // FIXME #1019 in the future, alter this queue to allow deferring of - // headers - let checked_header = check_header::( - &self.client, - slot_now + 1, - header, - hash, - &authorities[..], - ).map_err(|e| e.to_string())?; - match checked_header { - CheckedHeader::Checked(pre_header, (slot_num, seal)) => { - // if the body is passed through, we need to use the runtime - // to check that the internally-set timestamp in the inherents - // actually matches the slot set in the seal. - if let Some(inner_body) = body.take() { - inherent_data.aura_replace_inherent_data(slot_num); - let block = B::new(pre_header.clone(), inner_body); - - // skip the inherents verification if the runtime API is old. - if self.client - .runtime_api() - .has_api_with::, _>( - &BlockId::Hash(parent_hash), - |v| v >= 2, - ) - .map_err(|e| format!("{:?}", e))? - { - self.check_inherents( - block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - timestamp_now, - ).map_err(|e| e.to_string())?; - } - - let (_, inner_body) = block.deconstruct(); - body = Some(inner_body); - } - - trace!(target: "aura", "Checked {:?}; importing.", pre_header); - telemetry!(CONSENSUS_TRACE; "aura.checked_and_importing"; "pre_header" => ?pre_header); - - // Look for an authorities-change log. - let maybe_keys = pre_header.digest() - .logs() - .iter() - .filter_map(|l| l.try_to::>>( - OpaqueDigestItemId::Consensus(&AURA_ENGINE_ID) - )) - .find_map(|l| match l { - ConsensusLog::AuthoritiesChange(a) => Some( - vec![(well_known_cache_keys::AUTHORITIES, a.encode())] - ), - _ => None, - }); - - let mut import_block = BlockImportParams::new(origin, pre_header); - import_block.post_digests.push(seal); - import_block.body = body; - import_block.justification = justification; - import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); - import_block.post_hash = Some(hash); - - Ok((import_block, maybe_keys)) - } - CheckedHeader::Deferred(a, b) => { - debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); - telemetry!(CONSENSUS_DEBUG; "aura.header_too_far_in_future"; - "hash" => ?hash, "a" => ?a, "b" => ?b - ); - Err(format!("Header {:?} rejected: too far in the future", hash)) - } - } - } -} - -fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusError> where - A: Codec, - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache, - C::Api: AuraApi, -{ - // no cache => no initialization - let cache = match client.cache() { - Some(cache) => cache, - None => return Ok(()), - }; - - // check if we already have initialized the cache - let map_err = |error| sp_consensus::Error::from(sp_consensus::Error::ClientImport( - format!( - "Error initializing authorities cache: {}", - error, - ))); - - let genesis_id = BlockId::Number(Zero::zero()); - let genesis_authorities: Option> = cache - .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id) - .unwrap_or(None) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()); - if genesis_authorities.is_some() { - return Ok(()); - } - - let genesis_authorities = authorities(client, &genesis_id)?; - cache.initialize(&well_known_cache_keys::AUTHORITIES, genesis_authorities.encode()) - .map_err(map_err)?; - - Ok(()) -} - -#[allow(deprecated)] -fn authorities(client: &C, at: &BlockId) -> Result, ConsensusError> where - A: Codec, +fn authorities(client: &C, at: &BlockId) -> Result, ConsensusError> +where + A: Codec + Debug, B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache, C::Api: AuraApi, { client - .cache() - .and_then(|cache| cache - .get_at(&well_known_cache_keys::AUTHORITIES, at) - .unwrap_or(None) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()) - ) - .or_else(|| AuraApi::authorities(&*client.runtime_api(), at).ok()) + .runtime_api() + .authorities(at) + .ok() .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet.into()) } -/// Register the aura inherent data provider, if not registered already. -fn register_aura_inherent_data_provider( - inherent_data_providers: &InherentDataProviders, - slot_duration: u64, -) -> Result<(), sp_consensus::Error> { - if !inherent_data_providers.has_provider(&INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(InherentDataProvider::new(slot_duration)) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - } else { - Ok(()) - } -} - -/// A block-import handler for Aura. -pub struct AuraBlockImport, P> { - inner: I, - client: Arc, - _phantom: PhantomData<(Block, P)>, -} - -impl, P> Clone for AuraBlockImport { - fn clone(&self) -> Self { - AuraBlockImport { - inner: self.inner.clone(), - client: self.client.clone(), - _phantom: PhantomData, - } - } -} - -impl, P> AuraBlockImport { - /// New aura block import. - pub fn new( - inner: I, - client: Arc, - ) -> Self { - Self { - inner, - client, - _phantom: PhantomData, - } - } -} - -impl BlockImport for AuraBlockImport where - I: BlockImport> + Send + Sync, - I::Error: Into, - C: HeaderBackend + ProvideRuntimeApi, - P: Pair + Send + Sync + 'static, - P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, - P::Signature: Encode + Decode, -{ - type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block).map_err(Into::into) - } - - fn import_block( - &mut self, - block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - let hash = block.post_hash(); - let slot_number = find_pre_digest::(&block.header) - .expect("valid Aura headers must contain a predigest; \ - header has been already verified; qed"); - - let parent_hash = *block.header.parent_hash(); - let parent_header = self.client.header(BlockId::Hash(parent_hash)) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| ConsensusError::ChainLookup(aura_err( - Error::::ParentUnavailable(parent_hash, hash) - ).into()))?; - - let parent_slot = find_pre_digest::(&parent_header) - .expect("valid Aura headers contain a pre-digest; \ - parent header has already been verified; qed"); - - // make sure that slot number is strictly increasing - if slot_number <= parent_slot { - return Err( - ConsensusError::ClientImport(aura_err( - Error::::SlotNumberMustIncrease(parent_slot, slot_number) - ).into()) - ); - } - - self.inner.import_block(block, new_cache).map_err(Into::into) - } -} - -/// Start an import queue for the Aura consensus algorithm. -pub fn import_queue( - slot_duration: SlotDuration, - block_import: I, - justification_import: Option>, - finality_proof_import: Option>, - client: Arc, - inherent_data_providers: InherentDataProviders, - spawner: &S, - registry: Option<&Registry>, - can_author_with: CAW, -) -> Result, sp_consensus::Error> where - B: BlockT, - C::Api: BlockBuilderApi + AuraApi> + ApiExt, - C: 'static + ProvideRuntimeApi + BlockOf + ProvideCache + Send + Sync + AuxStore + HeaderBackend, - I: BlockImport> + Send + Sync + 'static, - DigestItemFor: CompatibleDigestItem

, - P: Pair + Send + Sync + 'static, - P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, - P::Signature: Encode + Decode, - S: sp_core::traits::SpawnNamed, - CAW: CanAuthorWith + Send + Sync + 'static, -{ - register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; - initialize_authorities_cache(&*client)?; - - let verifier = AuraVerifier { - client, - inherent_data_providers, - phantom: PhantomData, - can_author_with, - }; - - Ok(BasicQueue::new( - verifier, - Box::new(block_import), - justification_import, - finality_proof_import, - spawner, - registry, - )) -} - #[cfg(test)] mod tests { use super::*; - use sp_consensus::{NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor}; - use sc_network_test::{Block as TestBlock, *}; - use sp_runtime::traits::{Block as BlockT, DigestFor}; - use sc_network::config::ProtocolConfig; + use futures::executor; use parking_lot::Mutex; - use sp_keyring::sr25519::Keyring; - use sc_client_api::BlockchainEvents; - use sp_consensus_aura::sr25519::AuthorityPair; - use sc_consensus_slots::SimpleSlotWorker; - use std::task::Poll; use sc_block_builder::BlockBuilderProvider; - use sp_runtime::traits::Header as _; - use substrate_test_runtime_client::runtime::{Header, H256}; + use sc_client_api::BlockchainEvents; + use sc_consensus::BoxJustificationImport; + use sc_consensus_slots::{BackoffAuthoringOnFinalizedHeadLagging, SimpleSlotWorker}; use sc_keystore::LocalKeystore; + use sc_network::config::ProtocolConfig; + use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::AURA; + use sp_consensus::{ + AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal, SlotData, + }; + use sp_consensus_aura::sr25519::AuthorityPair; + use sp_inherents::InherentData; + use sp_keyring::sr25519::Keyring; + use sp_runtime::traits::{Block as BlockT, DigestFor, Header as _}; + use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; + use std::{ + task::Poll, + time::{Duration, Instant}, + }; + use substrate_test_runtime_client::{ + runtime::{Header, H256}, + TestClient, + }; type Error = sp_blockchain::Error; - type TestClient = substrate_test_runtime_client::client::Client< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, - TestBlock, - substrate_test_runtime_client::runtime::RuntimeApi - >; - struct DummyFactory(Arc); struct DummyProposer(u64, Arc); @@ -907,33 +595,31 @@ mod tests { type CreateProposer = futures::future::Ready>; type Error = Error; - fn init(&mut self, parent_header: &::Header) - -> Self::CreateProposer - { + fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { futures::future::ready(Ok(DummyProposer(parent_header.number + 1, self.0.clone()))) } } impl Proposer for DummyProposer { type Error = Error; - type Transaction = sc_client_api::TransactionFor< - substrate_test_runtime_client::Backend, - TestBlock - >; - type Proposal = future::Ready, Error>>; + type Transaction = + sc_client_api::TransactionFor; + type Proposal = future::Ready, Error>>; + type ProofRecording = DisableProofRecording; + type Proof = (); fn propose( self, _: InherentData, digests: DigestFor, _: Duration, - _: RecordProof, + _: Option, ) -> Self::Proposal { let r = self.1.new_block(digests).unwrap().build().map_err(|e| e.into()); future::ready(r.map(|b| Proposal { block: b.block, - proof: b.proof, + proof: (), storage_changes: b.storage_changes, })) } @@ -941,69 +627,94 @@ mod tests { const SLOT_DURATION: u64 = 1000; + type AuraVerifier = import_queue::AuraVerifier< + PeersFullClient, + AuthorityPair, + AlwaysCanAuthor, + Box< + dyn CreateInherentDataProviders< + TestBlock, + (), + InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), + >, + >, + >; + type AuraPeer = Peer<(), PeersClient>; + pub struct AuraTestNet { - peers: Vec>, + peers: Vec, } impl TestNetFactory for AuraTestNet { - type Verifier = AuraVerifier; + type Verifier = AuraVerifier; type PeerData = (); + type BlockImport = PeersClient; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { - AuraTestNet { - peers: Vec::new(), - } + AuraTestNet { peers: Vec::new() } } - fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig, _peer_data: &()) - -> Self::Verifier - { + fn make_verifier( + &self, + client: PeersClient, + _cfg: &ProtocolConfig, + _peer_data: &(), + ) -> Self::Verifier { match client { PeersClient::Full(client, _) => { let slot_duration = slot_duration(&*client).expect("slot duration available"); - let inherent_data_providers = InherentDataProviders::new(); - register_aura_inherent_data_provider( - &inherent_data_providers, - slot_duration.get() - ).expect("Registers aura inherent data provider"); - - assert_eq!(slot_duration.get(), SLOT_DURATION); - AuraVerifier { + + assert_eq!(slot_duration.slot_duration().as_millis() as u64, SLOT_DURATION); + import_queue::AuraVerifier::new( client, - inherent_data_providers, - phantom: Default::default(), - can_author_with: AlwaysCanAuthor, - } + Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }), + AlwaysCanAuthor, + CheckForEquivocation::Yes, + None, + ) }, PeersClient::Light(_, _) => unreachable!("No (yet) tests for light client + Aura"), } } - fn peer(&mut self, i: usize) -> &mut Peer { + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ) { + (client.as_block_import(), None, ()) + } + + fn peer(&mut self, i: usize) -> &mut AuraPeer { &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec { &self.peers } - - fn mut_peers>)>(&mut self, closure: F) { + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } } #[test] - #[allow(deprecated)] fn authoring_blocks() { sp_tracing::try_init_simple(); let net = AuraTestNet::new(3); - let peers = &[ - (0, Keyring::Alice), - (1, Keyring::Bob), - (2, Keyring::Charlie), - ]; + let peers = &[(0, Keyring::Alice), (1, Keyring::Bob), (2, Keyring::Charlie)]; let net = Arc::new(Mutex::new(net)); let mut import_notifications = Vec::new(); @@ -1016,9 +727,9 @@ mod tests { let client = peer.client().as_full().expect("full clients are created").clone(); let select_chain = peer.select_chain().expect("full client has a select chain"); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore.")); - + let keystore = Arc::new( + LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore."), + ); SyncCryptoStore::sr25519_generate_new(&*keystore, AURA, Some(&key.to_seed())) .expect("Creates authority key"); @@ -1026,41 +737,54 @@ mod tests { let environ = DummyFactory(client.clone()); import_notifications.push( - client.import_notification_stream() - .take_while(|n| future::ready(!(n.origin != BlockOrigin::Own && n.header.number() < &5))) - .for_each(move |_| future::ready(())) + client + .import_notification_stream() + .take_while(|n| { + future::ready(!(n.origin != BlockOrigin::Own && n.header.number() < &5)) + }) + .for_each(move |_| future::ready(())), ); let slot_duration = slot_duration(&*client).expect("slot duration available"); - let inherent_data_providers = InherentDataProviders::new(); - register_aura_inherent_data_provider( - &inherent_data_providers, slot_duration.get() - ).expect("Registers aura inherent data provider"); - - aura_futures.push(start_aura::<_, _, _, _, _, AuthorityPair, _, _, _>( - slot_duration, - client.clone(), - select_chain, - client, - environ, - DummyOracle, - inherent_data_providers, - false, - keystore, - sp_consensus::AlwaysCanAuthor, - ).expect("Starts aura")); + aura_futures.push( + start_aura::(StartAuraParams { + slot_duration, + block_import: client.clone(), + select_chain, + client, + proposer_factory: environ, + sync_oracle: DummyOracle, + justification_sync_link: (), + create_inherent_data_providers: |_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }, + force_authoring: false, + backoff_authoring_blocks: Some( + BackoffAuthoringOnFinalizedHeadLagging::default(), + ), + keystore, + can_author_with: sp_consensus::AlwaysCanAuthor, + block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, + telemetry: None, + }) + .expect("Starts aura"), + ); } - futures::executor::block_on(future::select( + executor::block_on(future::select( future::poll_fn(move |cx| { net.lock().poll(cx); Poll::<()>::Pending }), - future::select( - future::join_all(aura_futures), - future::join_all(import_notifications) - ) + future::select(future::join_all(aura_futures), future::join_all(import_notifications)), )); } @@ -1069,11 +793,14 @@ mod tests { let client = substrate_test_runtime_client::new(); assert_eq!(client.chain_info().best_number, 0); - assert_eq!(authorities(&client, &BlockId::Number(0)).unwrap(), vec![ - Keyring::Alice.public().into(), - Keyring::Bob.public().into(), - Keyring::Charlie.public().into() - ]); + assert_eq!( + authorities(&client, &BlockId::Number(0)).unwrap(), + vec![ + Keyring::Alice.public().into(), + Keyring::Bob.public().into(), + Keyring::Charlie.public().into() + ] + ); } #[test] @@ -1083,12 +810,11 @@ mod tests { let mut authorities = vec![ Keyring::Alice.public().into(), Keyring::Bob.public().into(), - Keyring::Charlie.public().into() + Keyring::Charlie.public().into(), ]; let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore."); + let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore."); let public = SyncCryptoStore::sr25519_generate_new(&keystore, AuthorityPair::ID, None) .expect("Key should be created"); authorities.push(public.into()); @@ -1102,12 +828,17 @@ mod tests { let worker = AuraWorker { client: client.clone(), - block_import: Arc::new(Mutex::new(client)), + block_import: client, env: environ, keystore: keystore.into(), sync_oracle: DummyOracle.clone(), + justification_sync_link: (), force_authoring: false, + backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), + telemetry: None, _key_type: PhantomData::, + block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, }; let head = Header::new( @@ -1115,15 +846,67 @@ mod tests { H256::from_low_u64_be(0), H256::from_low_u64_be(0), Default::default(), - Default::default() + Default::default(), ); - assert!(worker.claim_slot(&head, 0, &authorities).is_none()); - assert!(worker.claim_slot(&head, 1, &authorities).is_none()); - assert!(worker.claim_slot(&head, 2, &authorities).is_none()); - assert!(worker.claim_slot(&head, 3, &authorities).is_some()); - assert!(worker.claim_slot(&head, 4, &authorities).is_none()); - assert!(worker.claim_slot(&head, 5, &authorities).is_none()); - assert!(worker.claim_slot(&head, 6, &authorities).is_none()); - assert!(worker.claim_slot(&head, 7, &authorities).is_some()); + assert!(executor::block_on(worker.claim_slot(&head, 0.into(), &authorities)).is_none()); + assert!(executor::block_on(worker.claim_slot(&head, 1.into(), &authorities)).is_none()); + assert!(executor::block_on(worker.claim_slot(&head, 2.into(), &authorities)).is_none()); + assert!(executor::block_on(worker.claim_slot(&head, 3.into(), &authorities)).is_some()); + assert!(executor::block_on(worker.claim_slot(&head, 4.into(), &authorities)).is_none()); + assert!(executor::block_on(worker.claim_slot(&head, 5.into(), &authorities)).is_none()); + assert!(executor::block_on(worker.claim_slot(&head, 6.into(), &authorities)).is_none()); + assert!(executor::block_on(worker.claim_slot(&head, 7.into(), &authorities)).is_some()); + } + + #[test] + fn on_slot_returns_correct_block() { + let net = AuraTestNet::new(4); + + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore."); + SyncCryptoStore::sr25519_generate_new( + &keystore, + AuthorityPair::ID, + Some(&Keyring::Alice.to_seed()), + ) + .expect("Key should be created"); + + let net = Arc::new(Mutex::new(net)); + + let mut net = net.lock(); + let peer = net.peer(3); + let client = peer.client().as_full().expect("full clients are created").clone(); + let environ = DummyFactory(client.clone()); + + let mut worker = AuraWorker { + client: client.clone(), + block_import: client.clone(), + env: environ, + keystore: keystore.into(), + sync_oracle: DummyOracle.clone(), + justification_sync_link: (), + force_authoring: false, + backoff_authoring_blocks: Option::<()>::None, + telemetry: None, + _key_type: PhantomData::, + block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, + }; + + let head = client.header(&BlockId::Number(0)).unwrap().unwrap(); + + let res = executor::block_on(worker.on_slot(SlotInfo { + slot: 0.into(), + timestamp: 0.into(), + ends_at: Instant::now() + Duration::from_secs(100), + inherent_data: InherentData::new(), + duration: Duration::from_millis(1000), + chain_head: head, + block_size_limit: None, + })) + .unwrap(); + + // The returned block should be imported and we should be able to get its header by now. + assert!(client.header(&BlockId::Hash(res.block.hash())).unwrap().is_some()); } } diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 2178f1cf97010..65dfc57133206 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "BABE consensus algorithm for substrate" edition = "2018" @@ -14,56 +14,51 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = [ + "derive", +] } +sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-application-crypto = { version = "4.0.0-dev", path = "../../../primitives/application-crypto" } +sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" -serde = { version = "1.0.104", features = ["derive"] } -sp-version = { version = "2.0.0", path = "../../../primitives/version" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } -sc-keystore = { version = "2.0.0", path = "../../keystore" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sc-consensus-epochs = { version = "0.8.0", path = "../epochs" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-consensus-vrf = { version = "0.8.0", path = "../../../primitives/consensus/vrf" } -sc-consensus-uncles = { version = "0.8.0", path = "../uncles" } -sc-consensus-slots = { version = "0.8.0", path = "../slots" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../../primitives/utils" } -fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} -futures = "0.3.4" -futures-timer = "3.0.1" -parking_lot = "0.10.0" +serde = { version = "1.0.126", features = ["derive"] } +sp-version = { version = "4.0.0-dev", path = "../../../primitives/version" } +sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } +sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sc-consensus-epochs = { version = "0.10.0-dev", path = "../epochs" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } +sp-consensus-vrf = { version = "0.10.0-dev", path = "../../../primitives/consensus/vrf" } +sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" } +futures = "0.3.9" +parking_lot = "0.11.1" log = "0.4.8" schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } rand = "0.7.2" merlin = "2.0" -pdqselect = "0.1.0" derive_more = "0.99.2" -retain_mut = "0.1.1" +retain_mut = "0.1.3" +async-trait = "0.1.50" [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } -sc-executor = { version = "0.8.0", path = "../../executor" } -sc-network = { version = "0.8.0", path = "../../network" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } +sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } +sc-network = { version = "0.10.0-dev", path = "../../network" } sc-network-test = { version = "0.8.0", path = "../../network/test" } -sc-service = { version = "0.8.0", default-features = false, path = "../../service" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0", path = "../../block-builder" } +sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } rand_chacha = "0.2.2" tempfile = "3.1.0" - -[features] -test-helpers = [] diff --git a/client/consensus/babe/README.md b/client/consensus/babe/README.md index faba3948ed715..a404d2ea44706 100644 --- a/client/consensus/babe/README.md +++ b/client/consensus/babe/README.md @@ -43,6 +43,6 @@ primary blocks in the chain. We will pick the heaviest chain (more primary blocks) and will go with the longest one in case of a tie. An in-depth description and analysis of the protocol can be found here: - + License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 5b3169e600a98..8d5625705a48c 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe-rpc" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "RPC extensions for the BABE consensus algorithm" edition = "2018" @@ -13,28 +13,28 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-consensus-babe = { version = "0.8.0", path = "../" } -sc-rpc-api = { version = "0.8.0", path = "../../../rpc-api" } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -sp-consensus-babe = { version = "0.8.0", path = "../../../../primitives/consensus/babe" } -serde = { version = "1.0.104", features=["derive"] } -sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../../primitives/runtime" } -sc-consensus-epochs = { version = "0.8.0", path = "../../epochs" } -futures = { version = "0.3.4", features = ["compat"] } +sc-consensus-babe = { version = "0.10.0-dev", path = "../" } +sc-rpc-api = { version = "0.10.0-dev", path = "../../../rpc-api" } +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" +sp-consensus-babe = { version = "0.10.0-dev", path = "../../../../primitives/consensus/babe" } +serde = { version = "1.0.126", features=["derive"] } +sp-blockchain = { version = "4.0.0-dev", path = "../../../../primitives/blockchain" } +sp-runtime = { version = "4.0.0-dev", path = "../../../../primitives/runtime" } +sc-consensus-epochs = { version = "0.10.0-dev", path = "../../epochs" } +futures = "0.3.16" derive_more = "0.99.2" -sp-api = { version = "2.0.0", path = "../../../../primitives/api" } -sp-consensus = { version = "0.8.0", path = "../../../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../../../primitives/core" } -sp-application-crypto = { version = "2.0.0", path = "../../../../primitives/application-crypto" } -sp-keystore = { version = "0.8.0", path = "../../../../primitives/keystore" } +sp-api = { version = "4.0.0-dev", path = "../../../../primitives/api" } +sp-consensus = { version = "0.10.0-dev", path = "../../../../primitives/consensus/common" } +sp-core = { version = "4.0.0-dev", path = "../../../../primitives/core" } +sp-application-crypto = { version = "4.0.0-dev", path = "../../../../primitives/application-crypto" } +sp-keystore = { version = "0.10.0-dev", path = "../../../../primitives/keystore" } [dev-dependencies] -sc-consensus = { version = "0.8.0", path = "../../../consensus/common" } -serde_json = "1.0.50" -sp-keyring = { version = "2.0.0", path = "../../../../primitives/keyring" } -sc-keystore = { version = "2.0.0", path = "../../../keystore" } +sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" } +serde_json = "1.0.68" +sp-keyring = { version = "4.0.0-dev", path = "../../../../primitives/keyring" } +sc-keystore = { version = "4.0.0-dev", path = "../../../keystore" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index a90964cdf73f7..285cfe543cee8 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,33 +18,24 @@ //! RPC api for babe. -use sc_consensus_babe::{Epoch, authorship, Config}; -use futures::{FutureExt as _, TryFutureExt as _}; -use jsonrpc_core::{ - Error as RpcError, - futures::future as rpc_future, -}; +use futures::{FutureExt, TryFutureExt}; +use jsonrpc_core::Error as RpcError; use jsonrpc_derive::rpc; +use sc_consensus_babe::{authorship, Config, Epoch}; use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; -use sp_consensus_babe::{ - AuthorityId, - BabeApi as BabeRuntimeApi, - digests::PreDigest, -}; +use sc_rpc_api::DenyUnsafe; use serde::{Deserialize, Serialize}; -use sp_core::{ - crypto::Public, -}; +use sp_api::{BlockId, ProvideRuntimeApi}; use sp_application_crypto::AppKey; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sc_rpc_api::DenyUnsafe; -use sp_api::{ProvideRuntimeApi, BlockId}; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sp_consensus::{Error as ConsensusError, SelectChain}; +use sp_consensus_babe::{digests::PreDigest, AuthorityId, BabeApi as BabeRuntimeApi}; +use sp_core::crypto::Public; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::traits::{Block as BlockT, Header as _}; -use sp_consensus::{SelectChain, Error as ConsensusError}; -use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as BlockChainError}; use std::{collections::HashMap, sync::Arc}; -type FutureResult = Box + Send>; +type FutureResult = jsonrpc_core::BoxFuture>; /// Provides rpc methods for interacting with Babe. #[rpc] @@ -81,59 +72,56 @@ impl BabeRpcHandler { select_chain: SC, deny_unsafe: DenyUnsafe, ) -> Self { - Self { - client, - shared_epoch_changes, - keystore, - babe_config, - select_chain, - deny_unsafe, - } + Self { client, shared_epoch_changes, keystore, babe_config, select_chain, deny_unsafe } } } impl BabeApi for BabeRpcHandler - where - B: BlockT, - C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata + 'static, - C::Api: BabeRuntimeApi, - SC: SelectChain + Clone + 'static, +where + B: BlockT, + C: ProvideRuntimeApi + + HeaderBackend + + HeaderMetadata + + 'static, + C::Api: BabeRuntimeApi, + SC: SelectChain + Clone + 'static, { fn epoch_authorship(&self) -> FutureResult> { if let Err(err) = self.deny_unsafe.check_if_safe() { - return Box::new(rpc_future::err(err.into())); + return async move { Err(err.into()) }.boxed() } - let ( - babe_config, - keystore, - shared_epoch, - client, - select_chain, - ) = ( + let (babe_config, keystore, shared_epoch, client, select_chain) = ( self.babe_config.clone(), self.keystore.clone(), self.shared_epoch_changes.clone(), self.client.clone(), self.select_chain.clone(), ); - let future = async move { - let header = select_chain.best_chain().map_err(Error::Consensus)?; - let epoch_start = client.runtime_api() + + async move { + let header = select_chain.best_chain().map_err(Error::Consensus).await?; + let epoch_start = client + .runtime_api() .current_epoch_start(&BlockId::Hash(header.hash())) - .map_err(|err| { - Error::StringError(format!("{:?}", err)) - })?; - let epoch = epoch_data(&shared_epoch, &client, &babe_config, epoch_start, &select_chain)?; + .map_err(|err| Error::StringError(format!("{:?}", err)))?; + let epoch = + epoch_data(&shared_epoch, &client, &babe_config, *epoch_start, &select_chain) + .await?; let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); let mut claims: HashMap = HashMap::new(); let keys = { - epoch.authorities.iter() + epoch + .authorities + .iter() .enumerate() .filter_map(|(i, a)| { - if SyncCryptoStore::has_keys(&*keystore, &[(a.0.to_raw_vec(), AuthorityId::ID)]) { + if SyncCryptoStore::has_keys( + &*keystore, + &[(a.0.to_raw_vec(), AuthorityId::ID)], + ) { Some((a.0.clone(), i)) } else { None @@ -142,32 +130,31 @@ impl BabeApi for BabeRpcHandler .collect::>() }; - for slot_number in epoch_start..epoch_end { + for slot in *epoch_start..*epoch_end { if let Some((claim, key)) = - authorship::claim_slot_using_keys(slot_number, &epoch, &keystore, &keys) + authorship::claim_slot_using_keys(slot.into(), &epoch, &keystore, &keys) { match claim { PreDigest::Primary { .. } => { - claims.entry(key).or_default().primary.push(slot_number); - } + claims.entry(key).or_default().primary.push(slot); + }, PreDigest::SecondaryPlain { .. } => { - claims.entry(key).or_default().secondary.push(slot_number); - } + claims.entry(key).or_default().secondary.push(slot); + }, PreDigest::SecondaryVRF { .. } => { - claims.entry(key).or_default().secondary_vrf.push(slot_number); + claims.entry(key).or_default().secondary_vrf.push(slot.into()); }, }; } } Ok(claims) - }.boxed(); - - Box::new(future.compat()) + } + .boxed() } } -/// Holds information about the `slot_number`'s that can be claimed by a given key. +/// Holds information about the `slot`'s that can be claimed by a given key. #[derive(Default, Debug, Deserialize, Serialize)] pub struct EpochAuthorship { /// the array of primary slots that can be claimed @@ -184,7 +171,7 @@ pub enum Error { /// Consensus error Consensus(ConsensusError), /// Errors that can be formatted as a String - StringError(String) + StringError(String), } impl From for jsonrpc_core::Error { @@ -197,27 +184,29 @@ impl From for jsonrpc_core::Error { } } -/// fetches the epoch data for a given slot_number. -fn epoch_data( +/// Fetches the epoch data for a given slot. +async fn epoch_data( epoch_changes: &SharedEpochChanges, client: &Arc, babe_config: &Config, - slot_number: u64, + slot: u64, select_chain: &SC, ) -> Result - where - B: BlockT, - C: HeaderBackend + HeaderMetadata + 'static, - SC: SelectChain, +where + B: BlockT, + C: HeaderBackend + HeaderMetadata + 'static, + SC: SelectChain, { - let parent = select_chain.best_chain()?; - epoch_changes.lock().epoch_data_for_child_of( - descendent_query(&**client), - &parent.hash(), - parent.number().clone(), - slot_number, - |slot| Epoch::genesis(&babe_config, slot), - ) + let parent = select_chain.best_chain().await?; + epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&**client), + &parent.hash(), + parent.number().clone(), + slot.into(), + |slot| Epoch::genesis(&babe_config, slot), + ) .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(format!("{:?}", e))))? .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) } @@ -225,31 +214,27 @@ fn epoch_data( #[cfg(test)] mod tests { use super::*; + use sc_keystore::LocalKeystore; + use sp_application_crypto::AppPair; + use sp_core::crypto::key_types::BABE; + use sp_keyring::Sr25519Keyring; + use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use substrate_test_runtime_client::{ - runtime::Block, - Backend, - DefaultTestClientBuilderExt, - TestClient, + runtime::Block, Backend, DefaultTestClientBuilderExt, TestClient, TestClientBuilder, TestClientBuilderExt, - TestClientBuilder, }; - use sp_application_crypto::AppPair; - use sp_keyring::Sr25519Keyring; - use sp_core::{crypto::key_types::BABE}; - use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; - use sc_keystore::LocalKeystore; - use std::sync::Arc; - use sc_consensus_babe::{Config, block_import, AuthorityPair}; use jsonrpc_core::IoHandler; + use sc_consensus_babe::{block_import, AuthorityPair, Config}; + use std::sync::Arc; /// creates keystore backed by a temp file fn create_temp_keystore( authority: Sr25519Keyring, ) -> (SyncCryptoStorePtr, tempfile::TempDir) { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(&authority.to_seed())) .expect("Creates authority key"); @@ -257,17 +242,14 @@ mod tests { } fn test_babe_rpc_handler( - deny_unsafe: DenyUnsafe + deny_unsafe: DenyUnsafe, ) -> BabeRpcHandler> { let builder = TestClientBuilder::new(); let (client, longest_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let config = Config::get_or_compute(&*client).expect("config available"); - let (_, link) = block_import( - config.clone(), - client.clone(), - client.clone(), - ).expect("can initialize block-import"); + let (_, link) = block_import(config.clone(), client.clone(), client.clone()) + .expect("can initialize block-import"); let epoch_changes = link.epoch_changes().clone(); let keystore = create_temp_keystore::(Sr25519Keyring::Alice).0; diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 28a3692958e18..609f96c83c194 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -1,41 +1,34 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! BABE authority selection and slot claiming. +use super::Epoch; +use codec::Encode; +use schnorrkel::{keys::PublicKey, vrf::VRFInOut}; use sp_application_crypto::AppKey; use sp_consensus_babe::{ - BABE_VRF_PREFIX, - AuthorityId, BabeAuthorityWeight, - SlotNumber, - make_transcript, - make_transcript_data, -}; -use sp_consensus_babe::digests::{ - PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, + digests::{PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest}, + make_transcript, make_transcript_data, AuthorityId, BabeAuthorityWeight, Slot, BABE_VRF_PREFIX, }; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; -use sp_core::{U256, blake2_256, crypto::Public}; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use codec::Encode; -use schnorrkel::{ - keys::PublicKey, - vrf::VRFInOut, -}; -use super::Epoch; +use sp_core::{blake2_256, crypto::Public, U256}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; /// Calculates the primary selection threshold for a given authority, taking /// into account `c` (`1 - c` represents the probability of a slot being empty). @@ -50,8 +43,7 @@ pub(super) fn calculate_primary_threshold( let c = c.0 as f64 / c.1 as f64; - let theta = - authorities[authority_index].1 as f64 / + let theta = authorities[authority_index].1 as f64 / authorities.iter().map(|(_, weight)| weight).sum::() as f64; assert!(theta > 0.0, "authority with weight 0."); @@ -75,14 +67,14 @@ pub(super) fn calculate_primary_threshold( "returns None when the given value is negative; \ p is defined as `1 - n` where n is defined in (0, 1]; \ p must be a value in [0, 1); \ - qed." + qed.", ); let denom = p.denom().to_biguint().expect( "returns None when the given value is negative; \ p is defined as `1 - n` where n is defined in (0, 1]; \ p must be a value in [0, 1); \ - qed." + qed.", ); ((BigUint::one() << 128) * numer / denom).to_u128().expect( @@ -104,22 +96,23 @@ pub(super) fn check_primary_threshold(inout: &VRFInOut, threshold: u128) -> bool /// authorities. This should always assign the slot to some authority unless the /// authorities list is empty. pub(super) fn secondary_slot_author( - slot_number: u64, + slot: Slot, authorities: &[(AuthorityId, BabeAuthorityWeight)], randomness: [u8; 32], ) -> Option<&AuthorityId> { if authorities.is_empty() { - return None; + return None } - let rand = U256::from((randomness, slot_number).using_encoded(blake2_256)); + let rand = U256::from((randomness, slot).using_encoded(blake2_256)); let authorities_len = U256::from(authorities.len()); let idx = rand % authorities_len; - let expected_author = authorities.get(idx.as_u32() as usize) - .expect("authorities not empty; index constrained to list length; \ - this is a valid index; qed"); + let expected_author = authorities.get(idx.as_u32() as usize).expect( + "authorities not empty; index constrained to list length; \ + this is a valid index; qed", + ); Some(&expected_author.0) } @@ -128,7 +121,7 @@ pub(super) fn secondary_slot_author( /// pre-digest to use when authoring the block, or `None` if it is not our turn /// to propose. fn claim_secondary_slot( - slot_number: SlotNumber, + slot: Slot, epoch: &Epoch, keys: &[(AuthorityId, usize)], keystore: &SyncCryptoStorePtr, @@ -137,32 +130,24 @@ fn claim_secondary_slot( let Epoch { authorities, randomness, epoch_index, .. } = epoch; if authorities.is_empty() { - return None; + return None } - let expected_author = super::authorship::secondary_slot_author( - slot_number, - authorities, - *randomness, - )?; + let expected_author = secondary_slot_author(slot, authorities, *randomness)?; for (authority_id, authority_index) in keys { if authority_id == expected_author { let pre_digest = if author_secondary_vrf { - let transcript_data = super::authorship::make_transcript_data( - randomness, - slot_number, - *epoch_index, - ); + let transcript_data = make_transcript_data(randomness, slot, *epoch_index); let result = SyncCryptoStore::sr25519_vrf_sign( &**keystore, AuthorityId::ID, authority_id.as_ref(), transcript_data, ); - if let Ok(signature) = result { + if let Ok(Some(signature)) = result { Some(PreDigest::SecondaryVRF(SecondaryVRFPreDigest { - slot_number, + slot, vrf_output: VRFOutput(signature.output), vrf_proof: VRFProof(signature.proof), authority_index: *authority_index as u32, @@ -170,9 +155,12 @@ fn claim_secondary_slot( } else { None } - } else if SyncCryptoStore::has_keys(&**keystore, &[(authority_id.to_raw_vec(), AuthorityId::ID)]) { + } else if SyncCryptoStore::has_keys( + &**keystore, + &[(authority_id.to_raw_vec(), AuthorityId::ID)], + ) { Some(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { - slot_number, + slot, authority_index: *authority_index as u32, })) } else { @@ -180,7 +168,7 @@ fn claim_secondary_slot( }; if let Some(pre_digest) = pre_digest { - return Some((pre_digest, authority_id.clone())); + return Some((pre_digest, authority_id.clone())) } } } @@ -193,41 +181,42 @@ fn claim_secondary_slot( /// secondary slots enabled for the given epoch, we will fallback to trying to /// claim a secondary slot. pub fn claim_slot( - slot_number: SlotNumber, + slot: Slot, epoch: &Epoch, keystore: &SyncCryptoStorePtr, ) -> Option<(PreDigest, AuthorityId)> { - let authorities = epoch.authorities.iter() + let authorities = epoch + .authorities + .iter() .enumerate() .map(|(index, a)| (a.0.clone(), index)) .collect::>(); - claim_slot_using_keys(slot_number, epoch, keystore, &authorities) + claim_slot_using_keys(slot, epoch, keystore, &authorities) } /// Like `claim_slot`, but allows passing an explicit set of key pairs. Useful if we intend /// to make repeated calls for different slots using the same key pairs. pub fn claim_slot_using_keys( - slot_number: SlotNumber, + slot: Slot, epoch: &Epoch, keystore: &SyncCryptoStorePtr, keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { - claim_primary_slot(slot_number, epoch, epoch.config.c, keystore, &keys) - .or_else(|| { - if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() || - epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() - { - claim_secondary_slot( - slot_number, - &epoch, - keys, - &keystore, - epoch.config.allowed_slots.is_secondary_vrf_slots_allowed(), - ) - } else { - None - } - }) + claim_primary_slot(slot, epoch, epoch.config.c, keystore, &keys).or_else(|| { + if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() || + epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() + { + claim_secondary_slot( + slot, + &epoch, + keys, + &keystore, + epoch.config.allowed_slots.is_secondary_vrf_slots_allowed(), + ) + } else { + None + } + }) } /// Claim a primary slot if it is our turn. Returns `None` if it is not our turn. @@ -235,7 +224,7 @@ pub fn claim_slot_using_keys( /// the VRF. If the VRF produces a value less than `threshold`, it is our turn, /// so it returns `Some(_)`. Otherwise, it returns `None`. fn claim_primary_slot( - slot_number: SlotNumber, + slot: Slot, epoch: &Epoch, c: (u64, u64), keystore: &SyncCryptoStorePtr, @@ -244,21 +233,13 @@ fn claim_primary_slot( let Epoch { authorities, randomness, epoch_index, .. } = epoch; for (authority_id, authority_index) in keys { - let transcript = super::authorship::make_transcript( - randomness, - slot_number, - *epoch_index - ); - let transcript_data = super::authorship::make_transcript_data( - randomness, - slot_number, - *epoch_index - ); + let transcript = make_transcript(randomness, slot, *epoch_index); + let transcript_data = make_transcript_data(randomness, slot, *epoch_index); // Compute the threshold we will use. // // We already checked that authorities contains `key.public()`, so it can't // be empty. Therefore, this division in `calculate_threshold` is safe. - let threshold = super::authorship::calculate_primary_threshold(c, authorities, *authority_index); + let threshold = calculate_primary_threshold(c, authorities, *authority_index); let result = SyncCryptoStore::sr25519_vrf_sign( &**keystore, @@ -266,21 +247,21 @@ fn claim_primary_slot( authority_id.as_ref(), transcript_data, ); - if let Ok(signature) = result { + if let Ok(Some(signature)) = result { let public = PublicKey::from_bytes(&authority_id.to_raw_vec()).ok()?; let inout = match signature.output.attach_input_hash(&public, transcript) { Ok(inout) => inout, Err(_) => continue, }; - if super::authorship::check_primary_threshold(&inout, threshold) { + if check_primary_threshold(&inout, threshold) { let pre_digest = PreDigest::Primary(PrimaryPreDigest { - slot_number, + slot, vrf_output: VRFOutput(signature.output), vrf_proof: VRFProof(signature.proof), authority_index: *authority_index as u32, }); - return Some((pre_digest, authority_id.clone())); + return Some((pre_digest, authority_id.clone())) } } } @@ -291,10 +272,10 @@ fn claim_primary_slot( #[cfg(test)] mod tests { use super::*; - use std::sync::Arc; - use sp_core::{sr25519::Pair, crypto::Pair as _}; - use sp_consensus_babe::{AuthorityId, BabeEpochConfiguration, AllowedSlots}; use sc_keystore::LocalKeystore; + use sp_consensus_babe::{AllowedSlots, AuthorityId, BabeEpochConfiguration}; + use sp_core::{crypto::Pair as _, sr25519::Pair}; + use std::sync::Arc; #[test] fn claim_secondary_plain_slot_works() { @@ -303,7 +284,8 @@ mod tests { &*keystore, AuthorityId::ID, Some(sp_core::crypto::DEV_PHRASE), - ).unwrap(); + ) + .unwrap(); let authorities = vec![ (AuthorityId::from(Pair::generate().0.public()), 5), @@ -312,7 +294,7 @@ mod tests { let mut epoch = Epoch { epoch_index: 10, - start_slot: 0, + start_slot: 0.into(), duration: 20, authorities: authorities.clone(), randomness: Default::default(), @@ -322,9 +304,9 @@ mod tests { }, }; - assert!(claim_slot(10, &epoch, &keystore).is_none()); + assert!(claim_slot(10.into(), &epoch, &keystore).is_none()); epoch.authorities.push((valid_public_key.clone().into(), 10)); - assert_eq!(claim_slot(10, &epoch, &keystore).unwrap().1, valid_public_key.into()); + assert_eq!(claim_slot(10.into(), &epoch, &keystore).unwrap().1, valid_public_key.into()); } } diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 287121566a417..b18220c3e360a 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -1,52 +1,53 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Schema for BABE epoch changes in the aux-db. -use std::sync::Arc; -use parking_lot::Mutex; -use log::info; use codec::{Decode, Encode}; +use log::info; +use crate::{migration::EpochV0, Epoch}; use sc_client_api::backend::AuxStore; -use sp_blockchain::{Result as ClientResult, Error as ClientError}; -use sp_runtime::traits::Block as BlockT; +use sc_consensus_epochs::{migration::EpochChangesForV0, EpochChangesFor, SharedEpochChanges}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_consensus_babe::{BabeBlockWeight, BabeGenesisConfiguration}; -use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges, migration::EpochChangesForV0}; -use crate::{Epoch, migration::EpochV0}; +use sp_runtime::traits::Block as BlockT; const BABE_EPOCH_CHANGES_VERSION: &[u8] = b"babe_epoch_changes_version"; const BABE_EPOCH_CHANGES_KEY: &[u8] = b"babe_epoch_changes"; const BABE_EPOCH_CHANGES_CURRENT_VERSION: u32 = 2; -fn block_weight_key(block_hash: H) -> Vec { +/// The aux storage key used to store the block weight of the given block hash. +pub fn block_weight_key(block_hash: H) -> Vec { (b"block_weight", block_hash).encode() } fn load_decode(backend: &B, key: &[u8]) -> ClientResult> - where - B: AuxStore, - T: Decode, +where + B: AuxStore, + T: Decode, { let corrupt = |e: codec::Error| { - ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e.what())) + ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e)) }; match backend.get_aux(key)? { None => Ok(None), - Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt) + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt), } } @@ -58,37 +59,32 @@ pub fn load_epoch_changes( let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; let maybe_epoch_changes = match version { - None => load_decode::<_, EpochChangesForV0>( - backend, - BABE_EPOCH_CHANGES_KEY, - )?.map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))), - Some(1) => load_decode::<_, EpochChangesFor>( - backend, - BABE_EPOCH_CHANGES_KEY, - )?.map(|v1| v1.map(|_, _, epoch| epoch.migrate(config))), - Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => load_decode::<_, EpochChangesFor>( - backend, - BABE_EPOCH_CHANGES_KEY, - )?, - Some(other) => { - return Err(ClientError::Backend( - format!("Unsupported BABE DB version: {:?}", other) - )) - }, + None => + load_decode::<_, EpochChangesForV0>(backend, BABE_EPOCH_CHANGES_KEY)? + .map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))), + Some(1) => + load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)? + .map(|v1| v1.map(|_, _, epoch| epoch.migrate(config))), + Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => + load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)?, + Some(other) => + return Err(ClientError::Backend(format!("Unsupported BABE DB version: {:?}", other))), }; - let epoch_changes = Arc::new(Mutex::new(maybe_epoch_changes.unwrap_or_else(|| { - info!(target: "babe", - "👶 Creating empty BABE epoch changes on what appears to be first startup." - ); - EpochChangesFor::::default() - }))); + let epoch_changes = + SharedEpochChanges::::new(maybe_epoch_changes.unwrap_or_else(|| { + info!( + target: "babe", + "👶 Creating empty BABE epoch changes on what appears to be first startup.", + ); + EpochChangesFor::::default() + })); // rebalance the tree after deserialization. this isn't strictly necessary // since the tree is now rebalanced on every update operation. but since the // tree wasn't rebalanced initially it's useful to temporarily leave it here // to avoid having to wait until an import for rebalancing. - epoch_changes.lock().rebalance(); + epoch_changes.shared_data().rebalance(); Ok(epoch_changes) } @@ -97,15 +93,16 @@ pub fn load_epoch_changes( pub(crate) fn write_epoch_changes( epoch_changes: &EpochChangesFor, write_aux: F, -) -> R where +) -> R +where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { BABE_EPOCH_CHANGES_CURRENT_VERSION.using_encoded(|version| { let encoded_epoch_changes = epoch_changes.encode(); - write_aux( - &[(BABE_EPOCH_CHANGES_KEY, encoded_epoch_changes.as_slice()), - (BABE_EPOCH_CHANGES_VERSION, version)], - ) + write_aux(&[ + (BABE_EPOCH_CHANGES_KEY, encoded_epoch_changes.as_slice()), + (BABE_EPOCH_CHANGES_VERSION, version), + ]) }) } @@ -114,15 +111,12 @@ pub(crate) fn write_block_weight( block_hash: H, block_weight: BabeBlockWeight, write_aux: F, -) -> R where +) -> R +where F: FnOnce(&[(Vec, &[u8])]) -> R, { let key = block_weight_key(block_hash); - block_weight.using_encoded(|s| - write_aux( - &[(key, s)], - ) - ) + block_weight.using_encoded(|s| write_aux(&[(key, s)])) } /// Load the cumulative chain-weight associated with a block. @@ -138,18 +132,18 @@ mod test { use super::*; use crate::migration::EpochV0; use fork_tree::ForkTree; - use substrate_test_runtime_client; + use sc_consensus_epochs::{EpochHeader, PersistedEpoch, PersistedEpochHeader}; + use sc_network_test::Block as TestBlock; + use sp_consensus::Error as ConsensusError; + use sp_consensus_babe::{AllowedSlots, BabeGenesisConfiguration}; use sp_core::H256; use sp_runtime::traits::NumberFor; - use sp_consensus_babe::{AllowedSlots, BabeGenesisConfiguration}; - use sc_consensus_epochs::{PersistedEpoch, PersistedEpochHeader, EpochHeader}; - use sp_consensus::Error as ConsensusError; - use sc_network_test::Block as TestBlock; + use substrate_test_runtime_client; #[test] fn load_decode_from_v0_epoch_changes() { let epoch = EpochV0 { - start_slot: 0, + start_slot: 0.into(), authorities: vec![], randomness: [0; 32], epoch_index: 1, @@ -157,26 +151,30 @@ mod test { }; let client = substrate_test_runtime_client::new(); let mut v0_tree = ForkTree::, _>::new(); - v0_tree.import::<_, ConsensusError>( - Default::default(), - Default::default(), - PersistedEpoch::Regular(epoch), - &|_, _| Ok(false), // Test is single item only so this can be set to false. - ).unwrap(); - - client.insert_aux( - &[(BABE_EPOCH_CHANGES_KEY, - &EpochChangesForV0::::from_raw(v0_tree).encode()[..])], - &[], - ).unwrap(); - - assert_eq!( - load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), - None, - ); + v0_tree + .import::<_, ConsensusError>( + Default::default(), + Default::default(), + PersistedEpoch::Regular(epoch), + &|_, _| Ok(false), // Test is single item only so this can be set to false. + ) + .unwrap(); + + client + .insert_aux( + &[( + BABE_EPOCH_CHANGES_KEY, + &EpochChangesForV0::::from_raw(v0_tree).encode()[..], + )], + &[], + ) + .unwrap(); + + assert_eq!(load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), None); let epoch_changes = load_epoch_changes::( - &client, &BabeGenesisConfiguration { + &client, + &BabeGenesisConfiguration { slot_duration: 10, epoch_length: 4, c: (3, 10), @@ -184,30 +182,26 @@ mod test { randomness: Default::default(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, }, - ).unwrap(); + ) + .unwrap(); assert!( - epoch_changes.lock() + epoch_changes + .shared_data() .tree() .iter() .map(|(_, _, epoch)| epoch.clone()) .collect::>() == vec![PersistedEpochHeader::Regular(EpochHeader { - start_slot: 0, - end_slot: 100, + start_slot: 0.into(), + end_slot: 100.into(), })], ); // PersistedEpochHeader does not implement Debug, so we use assert! directly. - write_epoch_changes::( - &epoch_changes.lock(), - |values| { - client.insert_aux(values, &[]).unwrap(); - }, - ); + write_epoch_changes::(&epoch_changes.shared_data(), |values| { + client.insert_aux(values, &[]).unwrap(); + }); - assert_eq!( - load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), - Some(2), - ); + assert_eq!(load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), Some(2)); } } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 6105e9876bb5e..a0b6bde025b3f 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! # BABE (Blind Assignment for Blockchain Extension) //! @@ -59,78 +61,84 @@ //! blocks) and will go with the longest one in case of a tie. //! //! An in-depth description and analysis of the protocol can be found here: -//! +//! #![forbid(unsafe_code)] #![warn(missing_docs)] -pub use sp_consensus_babe::{ - BabeApi, ConsensusLog, BABE_ENGINE_ID, SlotNumber, - BabeEpochConfiguration, BabeGenesisConfiguration, - AuthorityId, AuthorityPair, AuthoritySignature, - BabeAuthorityWeight, VRF_OUTPUT_LENGTH, - digests::{ - CompatibleDigestItem, NextEpochDescriptor, NextConfigDescriptor, PreDigest, - PrimaryPreDigest, SecondaryPlainPreDigest, + +use std::{ + borrow::Cow, collections::HashMap, convert::TryInto, pin::Pin, sync::Arc, time::Duration, u64, +}; + +use codec::{Decode, Encode}; +use futures::{ + channel::{ + mpsc::{channel, Receiver, Sender}, + oneshot, }, + prelude::*, }; -pub use sp_consensus::SyncOracle; -use std::{ - collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, - any::Any, borrow::Cow, convert::TryInto, +use log::{debug, info, log, trace, warn}; +use parking_lot::Mutex; +use prometheus_endpoint::Registry; +use retain_mut::RetainMut; +use schnorrkel::SignatureError; + +use sc_client_api::{backend::AuxStore, BlockchainEvents, ProvideUncles, UsageProvider}; +use sc_consensus::{ + block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, + StateAction, + }, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, }; -use sp_consensus::{ImportResult, CanAuthorWith}; -use sp_consensus::import_queue::{ - BoxJustificationImport, BoxFinalityProofImport, +use sc_consensus_epochs::{ + descendent_query, Epoch as EpochT, EpochChangesFor, SharedEpochChanges, ViableEpochDescriptor, }; -use sp_core::crypto::Public; +use sc_consensus_slots::{ + check_equivocation, BackoffAuthoringBlocksStrategy, CheckedHeader, InherentDataProviderExt, + SlotInfo, StorageChanges, +}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::{ApiExt, NumberFor, ProvideRuntimeApi}; use sp_application_crypto::AppKey; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, Justification, - traits::{Block as BlockT, Header, DigestItemFor, Zero}, +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{ + Error as ClientError, HeaderBackend, HeaderMetadata, ProvideCache, Result as ClientResult, }; -use sp_api::{ProvideRuntimeApi, NumberFor}; -use parking_lot::Mutex; -use sp_inherents::{InherentDataProviders, InherentData}; -use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG}; use sp_consensus::{ - self, BlockImport, Environment, Proposer, BlockCheckParams, - ForkChoiceStrategy, BlockImportParams, BlockOrigin, Error as ConsensusError, + BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SlotData, }; use sp_consensus_babe::inherents::BabeInherentData; -use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; -use sp_consensus::import_queue::{Verifier, BasicQueue, DefaultImportQueue, CacheKeyId}; -use sc_client_api::{ - backend::AuxStore, - BlockchainEvents, ProvideUncles, +use sp_consensus_slots::Slot; +use sp_core::{crypto::Public, ExecutionContext}; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, DigestItemFor, Header, Zero}, }; -use sp_block_builder::BlockBuilder as BlockBuilderApi; -use futures::channel::mpsc::{channel, Sender, Receiver}; -use retain_mut::RetainMut; -use futures::prelude::*; -use log::{debug, info, log, trace, warn}; -use prometheus_endpoint::Registry; -use sc_consensus_slots::{ - SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, -}; -use sc_consensus_epochs::{ - descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, -}; -use sp_blockchain::{ - Result as ClientResult, Error as ClientError, - HeaderBackend, ProvideCache, HeaderMetadata +pub use sc_consensus_slots::SlotProportion; +pub use sp_consensus::SyncOracle; +pub use sp_consensus_babe::{ + digests::{ + CompatibleDigestItem, NextConfigDescriptor, NextEpochDescriptor, PreDigest, + PrimaryPreDigest, SecondaryPlainPreDigest, + }, + AuthorityId, AuthorityPair, AuthoritySignature, BabeApi, BabeAuthorityWeight, BabeBlockWeight, + BabeEpochConfiguration, BabeGenesisConfiguration, ConsensusLog, BABE_ENGINE_ID, + VRF_OUTPUT_LENGTH, }; -use schnorrkel::SignatureError; -use codec::{Encode, Decode}; -use sp_api::ApiExt; -mod verification; +pub use aux_schema::load_block_weight as block_weight; + mod migration; +mod verification; -pub mod aux_schema; pub mod authorship; +pub mod aux_schema; #[cfg(test)] mod tests; @@ -140,9 +148,9 @@ pub struct Epoch { /// The epoch index. pub epoch_index: u64, /// The starting slot of the epoch. - pub start_slot: SlotNumber, + pub start_slot: Slot, /// The duration of this epoch. - pub duration: SlotNumber, + pub duration: u64, /// The authorities and their weights. pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// Randomness for this epoch. @@ -153,11 +161,11 @@ pub struct Epoch { impl EpochT for Epoch { type NextEpochDescriptor = (NextEpochDescriptor, BabeEpochConfiguration); - type SlotNumber = SlotNumber; + type Slot = Slot; fn increment( &self, - (descriptor, config): (NextEpochDescriptor, BabeEpochConfiguration) + (descriptor, config): (NextEpochDescriptor, BabeEpochConfiguration), ) -> Epoch { Epoch { epoch_index: self.epoch_index + 1, @@ -169,25 +177,35 @@ impl EpochT for Epoch { } } - fn start_slot(&self) -> SlotNumber { + fn start_slot(&self) -> Slot { self.start_slot } - fn end_slot(&self) -> SlotNumber { + fn end_slot(&self) -> Slot { self.start_slot + self.duration } } +impl From for Epoch { + fn from(epoch: sp_consensus_babe::Epoch) -> Self { + Epoch { + epoch_index: epoch.epoch_index, + start_slot: epoch.start_slot, + duration: epoch.duration, + authorities: epoch.authorities, + randomness: epoch.randomness, + config: epoch.config, + } + } +} + impl Epoch { /// Create the genesis epoch (epoch #0). This is defined to start at the slot of /// the first block, so that has to be provided. - pub fn genesis( - genesis_config: &BabeGenesisConfiguration, - slot_number: SlotNumber - ) -> Epoch { + pub fn genesis(genesis_config: &BabeGenesisConfiguration, slot: Slot) -> Epoch { Epoch { epoch_index: 0, - start_slot: slot_number, + start_slot: slot, duration: genesis_config.epoch_length, authorities: genesis_config.genesis_authorities.clone(), randomness: genesis_config.randomness, @@ -199,58 +217,96 @@ impl Epoch { } } +/// Errors encountered by the babe authorship task. #[derive(derive_more::Display, Debug)] -enum Error { +pub enum Error { + /// Multiple BABE pre-runtime digests #[display(fmt = "Multiple BABE pre-runtime digests, rejecting!")] MultiplePreRuntimeDigests, + /// No BABE pre-runtime digest found #[display(fmt = "No BABE pre-runtime digest found")] NoPreRuntimeDigest, + /// Multiple BABE epoch change digests #[display(fmt = "Multiple BABE epoch change digests, rejecting!")] MultipleEpochChangeDigests, + /// Multiple BABE config change digests #[display(fmt = "Multiple BABE config change digests, rejecting!")] MultipleConfigChangeDigests, + /// Could not extract timestamp and slot #[display(fmt = "Could not extract timestamp and slot: {:?}", _0)] Extraction(sp_consensus::Error), + /// Could not fetch epoch #[display(fmt = "Could not fetch epoch at {:?}", _0)] FetchEpoch(B::Hash), + /// Header rejected: too far in the future #[display(fmt = "Header {:?} rejected: too far in the future", _0)] TooFarInFuture(B::Hash), + /// Parent unavailable. Cannot import #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] ParentUnavailable(B::Hash, B::Hash), + /// Slot number must increase #[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)] - SlotNumberMustIncrease(u64, u64), + SlotMustIncrease(Slot, Slot), + /// Header has a bad seal #[display(fmt = "Header {:?} has a bad seal", _0)] HeaderBadSeal(B::Hash), + /// Header is unsealed #[display(fmt = "Header {:?} is unsealed", _0)] HeaderUnsealed(B::Hash), + /// Slot author not found #[display(fmt = "Slot author not found")] SlotAuthorNotFound, + /// Secondary slot assignments are disabled for the current epoch. #[display(fmt = "Secondary slot assignments are disabled for the current epoch.")] SecondarySlotAssignmentsDisabled, + /// Bad signature #[display(fmt = "Bad signature on {:?}", _0)] BadSignature(B::Hash), + /// Invalid author: Expected secondary author #[display(fmt = "Invalid author: Expected secondary author: {:?}, got: {:?}.", _0, _1)] InvalidAuthor(AuthorityId, AuthorityId), + /// No secondary author expected. #[display(fmt = "No secondary author expected.")] NoSecondaryAuthorExpected, - #[display(fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", _0, _1)] + /// VRF verification of block by author failed + #[display( + fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", + _0, + _1 + )] VRFVerificationOfBlockFailed(AuthorityId, u128), + /// VRF verification failed #[display(fmt = "VRF verification failed: {:?}", _0)] VRFVerificationFailed(SignatureError), + /// Could not fetch parent header #[display(fmt = "Could not fetch parent header: {:?}", _0)] FetchParentHeader(sp_blockchain::Error), + /// Expected epoch change to happen. #[display(fmt = "Expected epoch change to happen at {:?}, s{}", _0, _1)] - ExpectedEpochChange(B::Hash, u64), + ExpectedEpochChange(B::Hash, Slot), + /// Unexpected config change. #[display(fmt = "Unexpected config change")] UnexpectedConfigChange, + /// Unexpected epoch change #[display(fmt = "Unexpected epoch change")] UnexpectedEpochChange, + /// Parent block has no associated weight #[display(fmt = "Parent block of {} has no associated weight", _0)] ParentBlockNoAssociatedWeight(B::Hash), + /// Check inherents error #[display(fmt = "Checking inherents failed: {}", _0)] - CheckInherents(String), + CheckInherents(sp_inherents::Error), + /// Unhandled check inherents error + #[display(fmt = "Checking inherents unhandled error: {}", "String::from_utf8_lossy(_0)")] + CheckInherentsUnhandled(sp_inherents::InherentIdentifier), + /// Create inherents error. + #[display(fmt = "Creating inherents failed: {}", _0)] + CreateInherents(sp_inherents::Error), + /// Client error Client(sp_blockchain::Error), - Runtime(sp_inherents::Error), + /// Runtime Api error. + RuntimeApi(sp_api::ApiError), + /// Fork tree error ForkTree(Box>), } @@ -284,37 +340,43 @@ pub struct Config(sc_consensus_slots::SlotDuration); impl Config { /// Either fetch the slot duration from disk or compute it from the genesis /// state. - pub fn get_or_compute(client: &C) -> ClientResult where - C: AuxStore + ProvideRuntimeApi, C::Api: BabeApi, + pub fn get_or_compute(client: &C) -> ClientResult + where + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: BabeApi, { trace!(target: "babe", "Getting slot duration"); match sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| { - let has_api_v1 = a.has_api_with::, _>( - &b, |v| v == 1, - )?; - let has_api_v2 = a.has_api_with::, _>( - &b, |v| v == 2, - )?; + let has_api_v1 = a.has_api_with::, _>(&b, |v| v == 1)?; + let has_api_v2 = a.has_api_with::, _>(&b, |v| v == 2)?; if has_api_v1 { - #[allow(deprecated)] { + #[allow(deprecated)] + { Ok(a.configuration_before_version_2(b)?.into()) } } else if has_api_v2 { - a.configuration(b) + a.configuration(b).map_err(Into::into) } else { Err(sp_blockchain::Error::VersionInvalid( - "Unsupported or invalid BabeApi version".to_string() + "Unsupported or invalid BabeApi version".to_string(), )) } - }).map(Self) { + }) + .map(Self) + { Ok(s) => Ok(s), Err(s) => { warn!(target: "babe", "Failed to get slot duration"); Err(s) - } + }, } } + + /// Get the inner slot duration + pub fn slot_duration(&self) -> Duration { + self.0.slot_duration() + } } impl std::ops::Deref for Config { @@ -326,7 +388,7 @@ impl std::ops::Deref for Config { } /// Parameters for BABE. -pub struct BabeParams { +pub struct BabeParams { /// The keystore that manages the keys of the node. pub keystore: SyncCryptoStorePtr, @@ -347,105 +409,234 @@ pub struct BabeParams { /// A sync oracle pub sync_oracle: SO, - /// Providers for inherent data. - pub inherent_data_providers: InherentDataProviders, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, /// Force authoring of blocks even if we are offline pub force_authoring: bool, + /// Strategy and parameters for backing off block production. + pub backoff_authoring_blocks: Option, + /// The source of timestamps for relative slots pub babe_link: BabeLink, /// Checks if the current native implementation can author with a runtime at a given block. pub can_author_with: CAW, + + /// The proportion of the slot dedicated to proposing. + /// + /// The block proposing will be limited to this proportion of the slot from the starting of the + /// slot. However, the proposing can still take longer when there is some lenience factor + /// applied, because there were no blocks produced for some slots. + pub block_proposal_slot_portion: SlotProportion, + + /// The maximum proportion of the slot dedicated to proposing with any lenience factor applied + /// due to no blocks being produced. + pub max_block_proposal_slot_portion: Option, + + /// Handle use to report telemetries. + pub telemetry: Option, } /// Start the babe worker. -pub fn start_babe(BabeParams { - keystore, - client, - select_chain, - env, - block_import, - sync_oracle, - inherent_data_providers, - force_authoring, - babe_link, - can_author_with, -}: BabeParams) -> Result< - BabeWorker, - sp_consensus::Error, -> where +pub fn start_babe( + BabeParams { + keystore, + client, + select_chain, + env, + block_import, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + backoff_authoring_blocks, + babe_link, + can_author_with, + block_proposal_slot_portion, + max_block_proposal_slot_portion, + telemetry, + }: BabeParams, +) -> Result, sp_consensus::Error> +where B: BlockT, - C: ProvideRuntimeApi + ProvideCache + ProvideUncles + BlockchainEvents - + HeaderBackend + HeaderMetadata + Send + Sync + 'static, + C: ProvideRuntimeApi + + ProvideCache + + ProvideUncles + + BlockchainEvents + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, C::Api: BabeApi, SC: SelectChain + 'static, E: Environment + Send + Sync + 'static, E::Proposer: Proposer>, - I: BlockImport> + Send - + Sync + 'static, - Error: std::error::Error + Send + From + From + 'static, + I: BlockImport> + + Send + + Sync + + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, - CAW: CanAuthorWith + Send + 'static, + L: sc_consensus::JustificationSyncLink + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, + BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, + CAW: CanAuthorWith + Send + Sync + 'static, + Error: std::error::Error + Send + From + From + 'static, { + const HANDLE_BUFFER_SIZE: usize = 1024; + let config = babe_link.config; let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); let worker = BabeSlotWorker { client: client.clone(), - block_import: Arc::new(Mutex::new(block_import)), + block_import, env, sync_oracle: sync_oracle.clone(), + justification_sync_link, force_authoring, + backoff_authoring_blocks, keystore, epoch_changes: babe_link.epoch_changes.clone(), slot_notification_sinks: slot_notification_sinks.clone(), config: config.clone(), + block_proposal_slot_portion, + max_block_proposal_slot_portion, + telemetry, }; - register_babe_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; - sc_consensus_uncles::register_uncles_inherent_data_provider( - client, - select_chain.clone(), - &inherent_data_providers, - )?; - info!(target: "babe", "👶 Starting BABE Authorship worker"); let inner = sc_consensus_slots::start_slot_worker( - config.0, + config.0.clone(), select_chain, worker, sync_oracle, - inherent_data_providers, - babe_link.time_source, + create_inherent_data_providers, can_author_with, ); + + let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE); + + let answer_requests = + answer_requests(worker_rx, config.0, client, babe_link.epoch_changes.clone()); Ok(BabeWorker { - inner: Box::pin(inner), + inner: Box::pin(future::join(inner, answer_requests).map(|_| ())), slot_notification_sinks, + handle: BabeWorkerHandle(worker_tx), }) } +async fn answer_requests( + mut request_rx: Receiver>, + genesis_config: sc_consensus_slots::SlotDuration, + client: Arc, + epoch_changes: SharedEpochChanges, +) where + C: ProvideRuntimeApi + + ProvideCache + + ProvideUncles + + BlockchainEvents + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, +{ + while let Some(request) = request_rx.next().await { + match request { + BabeRequest::EpochForChild(parent_hash, parent_number, slot_number, response) => { + let lookup = || { + let epoch_changes = epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*client), + &parent_hash, + parent_number, + slot_number, + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&genesis_config, slot) + }) + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + + Ok(sp_consensus_babe::Epoch { + epoch_index: viable_epoch.as_ref().epoch_index, + start_slot: viable_epoch.as_ref().start_slot, + duration: viable_epoch.as_ref().duration, + authorities: viable_epoch.as_ref().authorities.clone(), + randomness: viable_epoch.as_ref().randomness, + config: viable_epoch.as_ref().config.clone(), + }) + }; + + let _ = response.send(lookup()); + }, + } + } +} + +/// Requests to the BABE service. +#[non_exhaustive] +pub enum BabeRequest { + /// Request the epoch that a child of the given block, with the given slot number would have. + /// + /// The parent block is identified by its hash and number. + EpochForChild( + B::Hash, + NumberFor, + Slot, + oneshot::Sender>>, + ), +} + +/// A handle to the BABE worker for issuing requests. +#[derive(Clone)] +pub struct BabeWorkerHandle(Sender>); + +impl BabeWorkerHandle { + /// Send a request to the BABE service. + pub async fn send(&mut self, request: BabeRequest) { + // Failure to send means that the service is down. + // This will manifest as the receiver of the request being dropped. + let _ = self.0.send(request).await; + } +} + /// Worker for Babe which implements `Future`. This must be polled. #[must_use] pub struct BabeWorker { - inner: Pin + Send + 'static>>, - slot_notification_sinks: Arc, Epoch>)>>>>, + inner: Pin + Send + 'static>>, + slot_notification_sinks: SlotNotificationSinks, + handle: BabeWorkerHandle, } impl BabeWorker { /// Return an event stream of notifications for when new slot happens, and the corresponding /// epoch descriptor. pub fn slot_notification_stream( - &self - ) -> Receiver<(u64, ViableEpochDescriptor, Epoch>)> { + &self, + ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { const CHANNEL_BUFFER_SIZE: usize = 1024; let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); self.slot_notification_sinks.lock().push(sink); stream } + + /// Get a handle to the worker. + pub fn handle(&self) -> BabeWorkerHandle { + self.handle.clone() + } } impl futures::Future for BabeWorker { @@ -453,46 +644,58 @@ impl futures::Future for BabeWorker { fn poll( mut self: Pin<&mut Self>, - cx: &mut futures::task::Context + cx: &mut futures::task::Context, ) -> futures::task::Poll { self.inner.as_mut().poll(cx) } } /// Slot notification sinks. -type SlotNotificationSinks = Arc::Hash, NumberFor, Epoch>)>>>>; +type SlotNotificationSinks = Arc< + Mutex::Hash, NumberFor, Epoch>)>>>, +>; -struct BabeSlotWorker { +struct BabeSlotWorker { client: Arc, - block_import: Arc>, + block_import: I, env: E, sync_oracle: SO, + justification_sync_link: L, force_authoring: bool, + backoff_authoring_blocks: Option, keystore: SyncCryptoStorePtr, epoch_changes: SharedEpochChanges, slot_notification_sinks: SlotNotificationSinks, config: Config, + block_proposal_slot_portion: SlotProportion, + max_block_proposal_slot_portion: Option, + telemetry: Option, } -impl sc_consensus_slots::SimpleSlotWorker for BabeSlotWorker where +#[async_trait::async_trait] +impl sc_consensus_slots::SimpleSlotWorker + for BabeSlotWorker +where B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata, C::Api: BabeApi, - E: Environment, + E: Environment + Sync, E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, - SO: SyncOracle + Send + Clone, + SO: SyncOracle + Send + Clone + Sync, + L: sc_consensus::JustificationSyncLink, + BS: BackoffAuthoringBlocksStrategy> + Sync, Error: std::error::Error + Send + From + From + 'static, { type EpochData = ViableEpochDescriptor, Epoch>; type Claim = (PreDigest, AuthorityId); type SyncOracle = SO; - type CreateProposer = Pin> + Send + 'static - >>; + type JustificationSyncLink = L; + type CreateProposer = + Pin> + Send + 'static>>; type Proposer = E::Proposer; type BlockImport = I; @@ -500,49 +703,52 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot "babe" } - fn block_import(&self) -> Arc> { - self.block_import.clone() + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import } fn epoch_data( &self, parent: &B::Header, - slot_number: u64, + slot: Slot, ) -> Result { - self.epoch_changes.lock().epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent.hash(), - parent.number().clone(), - slot_number, - ) + self.epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot, + ) .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { - self.epoch_changes.lock() + self.epoch_changes + .shared_data() .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .map(|epoch| epoch.as_ref().authorities.len()) } - fn claim_slot( + async fn claim_slot( &self, _parent_header: &B::Header, - slot_number: SlotNumber, + slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) -> Option { - debug!(target: "babe", "Attempting to claim slot {}", slot_number); + debug!(target: "babe", "Attempting to claim slot {}", slot); let s = authorship::claim_slot( - slot_number, - self.epoch_changes.lock().viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot) - )?.as_ref(), + slot, + self.epoch_changes + .shared_data() + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot))? + .as_ref(), &self.keystore, ); if s.is_some() { - debug!(target: "babe", "Claimed slot {}", slot_number); + debug!(target: "babe", "Claimed slot {}", slot); } s @@ -551,153 +757,153 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlot fn notify_slot( &self, _parent_header: &B::Header, - slot_number: SlotNumber, + slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) { - self.slot_notification_sinks.lock() - .retain_mut(|sink| { - match sink.try_send((slot_number, epoch_descriptor.clone())) { - Ok(()) => true, - Err(e) => { - if e.is_full() { - warn!(target: "babe", "Trying to notify a slot but the channel is full"); - true - } else { - false - } + self.slot_notification_sinks.lock().retain_mut(|sink| { + match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: "babe", "Trying to notify a slot but the channel is full"); + true + } else { + false }, - } - }); + } + }); } fn pre_digest_data( &self, - _slot_number: u64, + _slot: Slot, claim: &Self::Claim, ) -> Vec> { - vec![ - as CompatibleDigestItem>::babe_pre_digest(claim.0.clone()), - ] + vec![ as CompatibleDigestItem>::babe_pre_digest(claim.0.clone())] } - fn block_import_params(&self) -> Box, - StorageChanges, - Self::Claim, - Self::EpochData, - ) -> Result< - sp_consensus::BlockImportParams, - sp_consensus::Error> + Send + 'static> - { + fn block_import_params( + &self, + ) -> Box< + dyn Fn( + B::Header, + &B::Hash, + Vec, + StorageChanges, + Self::Claim, + Self::EpochData, + ) -> Result, sp_consensus::Error> + + Send + + 'static, + > { let keystore = self.keystore.clone(); - Box::new(move |header, header_hash, body, storage_changes, (_, public), epoch_descriptor| { - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let public_type_pair = public.clone().into(); - let public = public.to_raw_vec(); - let signature = SyncCryptoStore::sign_with( - &*keystore, - ::ID, - &public_type_pair, - header_hash.as_ref() - ) - .map_err(|e| sp_consensus::Error::CannotSign( - public.clone(), e.to_string(), - ))?; - let signature: AuthoritySignature = signature.clone().try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature( - signature, public - ))?; - let digest_item = as CompatibleDigestItem>::babe_seal(signature.into()); - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(digest_item); - import_block.body = Some(body); - import_block.storage_changes = Some(storage_changes); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, - ); + Box::new( + move |header, header_hash, body, storage_changes, (_, public), epoch_descriptor| { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let public_type_pair = public.clone().into(); + let public = public.to_raw_vec(); + let signature = SyncCryptoStore::sign_with( + &*keystore, + ::ID, + &public_type_pair, + header_hash.as_ref(), + ) + .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? + .ok_or_else(|| { + sp_consensus::Error::CannotSign( + public.clone(), + "Could not find key in keystore.".into(), + ) + })?; + let signature: AuthoritySignature = signature + .clone() + .try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + let digest_item = + as CompatibleDigestItem>::babe_seal(signature.into()); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(digest_item); + import_block.body = Some(body); + import_block.state_action = StateAction::ApplyChanges( + sc_consensus::StorageChanges::Changes(storage_changes), + ); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + ); - Ok(import_block) - }) + Ok(import_block) + }, + ) } fn force_authoring(&self) -> bool { self.force_authoring } + fn should_backoff(&self, slot: Slot, chain_head: &B::Header) -> bool { + if let Some(ref strategy) = self.backoff_authoring_blocks { + if let Ok(chain_head_slot) = + find_pre_digest::(chain_head).map(|digest| digest.slot()) + { + return strategy.should_backoff( + *chain_head.number(), + chain_head_slot, + self.client.info().finalized_number, + slot, + self.logging_target(), + ) + } + } + false + } + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { &mut self.sync_oracle } - fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin(self.env.init(block).map_err(|e| { - sp_consensus::Error::ClientImport(format!("{:?}", e)) - })) + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link } - fn proposing_remaining_duration( - &self, - head: &B::Header, - slot_info: &SlotInfo, - ) -> Option { - let slot_remaining = self.slot_remaining_duration(slot_info); - - let parent_slot = match find_pre_digest::(head) { - Err(_) => return Some(slot_remaining), - Ok(d) => d.slot_number(), - }; - - if let Some(slot_lenience) = - sc_consensus_slots::slot_lenience_exponential(parent_slot, slot_info) - { - debug!(target: "babe", - "No block for {} slots. Applying exponential lenience of {}s", - slot_info.number.saturating_sub(parent_slot + 1), - slot_lenience.as_secs(), - ); + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), + ) + } - Some(slot_remaining + slot_lenience) - } else { - Some(slot_remaining) - } + fn telemetry(&self) -> Option { + self.telemetry.clone() } -} -impl SlotWorker for BabeSlotWorker where - B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata + Send + Sync, - C::Api: BabeApi, - E: Environment + Send + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone, - Error: std::error::Error + Send + From + From + 'static, -{ - type OnSlot = Pin> + Send>>; + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> std::time::Duration { + let parent_slot = find_pre_digest::(&slot_info.chain_head).ok().map(|d| d.slot()); - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { - >::on_slot(self, chain_head, slot_info) + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &self.block_proposal_slot_portion, + self.max_block_proposal_slot_portion.as_ref(), + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) } } /// Extract the BABE pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. -fn find_pre_digest(header: &B::Header) -> Result> -{ +pub fn find_pre_digest(header: &B::Header) -> Result> { // genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { return Ok(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { - slot_number: 0, + slot: 0.into(), authority_index: 0, - })); + })) } let mut pre_digest: Option<_> = None; @@ -713,16 +919,19 @@ fn find_pre_digest(header: &B::Header) -> Result> } /// Extract the BABE epoch change digest from the given header, if it exists. -fn find_next_epoch_digest(header: &B::Header) - -> Result, Error> - where DigestItemFor: CompatibleDigestItem, +fn find_next_epoch_digest( + header: &B::Header, +) -> Result, Error> +where + DigestItemFor: CompatibleDigestItem, { let mut epoch_digest: Option<_> = None; for log in header.digest().logs() { trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, epoch_digest.is_some()) { - (Some(ConsensusLog::NextEpochData(_)), true) => return Err(babe_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(_)), true) => + return Err(babe_err(Error::MultipleEpochChangeDigests)), (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } @@ -732,16 +941,19 @@ fn find_next_epoch_digest(header: &B::Header) } /// Extract the BABE config change digest from the given header, if it exists. -fn find_next_config_digest(header: &B::Header) - -> Result, Error> - where DigestItemFor: CompatibleDigestItem, +fn find_next_config_digest( + header: &B::Header, +) -> Result, Error> +where + DigestItemFor: CompatibleDigestItem, { let mut config_digest: Option<_> = None; for log in header.digest().logs() { trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, config_digest.is_some()) { - (Some(ConsensusLog::NextConfigData(_)), true) => return Err(babe_err(Error::MultipleConfigChangeDigests)), + (Some(ConsensusLog::NextConfigData(_)), true) => + return Err(babe_err(Error::MultipleConfigChangeDigests)), (Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } @@ -750,27 +962,9 @@ fn find_next_config_digest(header: &B::Header) Ok(config_digest) } -#[derive(Default, Clone)] -struct TimeSource(Arc, Vec<(Instant, u64)>)>>); - -impl SlotCompatible for TimeSource { - fn extract_timestamp_and_slot( - &self, - data: &InherentData, - ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { - trace!(target: "babe", "extract timestamp"); - data.timestamp_inherent_data() - .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) - } -} - /// State that must be shared between the import queue and the authoring logic. #[derive(Clone)] pub struct BabeLink { - time_source: TimeSource, epoch_changes: SharedEpochChanges, config: Config, } @@ -788,30 +982,32 @@ impl BabeLink { } /// A verifier for Babe blocks. -pub struct BabeVerifier { +pub struct BabeVerifier { client: Arc, select_chain: SelectChain, - inherent_data_providers: sp_inherents::InherentDataProviders, + create_inherent_data_providers: CIDP, config: Config, epoch_changes: SharedEpochChanges, - time_source: TimeSource, can_author_with: CAW, + telemetry: Option, } -impl BabeVerifier +impl BabeVerifier where Block: BlockT, Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - Client::Api: BlockBuilderApi - + BabeApi, + Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, { - fn check_inherents( + async fn check_inherents( &self, block: Block, block_id: BlockId, inherent_data: InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, + execution_context: ExecutionContext, ) -> Result<(), Error> { if let Err(e) = self.can_author_with.can_author_with(&block_id) { debug!( @@ -823,27 +1019,28 @@ where return Ok(()) } - let inherent_res = self.client.runtime_api().check_inherents( - &block_id, - block, - inherent_data, - ).map_err(Error::Client)?; + let inherent_res = self + .client + .runtime_api() + .check_inherents_with_context(&block_id, execution_context, block, inherent_data) + .map_err(Error::RuntimeApi)?; if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| { - Err(Error::CheckInherents(self.inherent_data_providers.error_to_string(&i, &e))) - }) - } else { - Ok(()) + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(|e| Error::CheckInherents(e))?, + None => return Err(Error::CheckInherentsUnhandled(i)), + } + } } + + Ok(()) } - fn check_and_report_equivocation( + async fn check_and_report_equivocation( &self, - slot_now: SlotNumber, - slot: SlotNumber, + slot_now: Slot, + slot: Slot, header: &Block::Header, author: &AuthorityId, origin: &BlockOrigin, @@ -851,7 +1048,7 @@ where // don't report any equivocations during initial sync // as they are most likely stale. if *origin == BlockOrigin::NetworkInitialSync { - return Ok(()); + return Ok(()) } // check if authorship of this header is an equivocation and return a proof if so. @@ -875,6 +1072,7 @@ where let best_id = self .select_chain .best_chain() + .await .map(|h| BlockId::Hash(h.hash())) .map_err(|e| Error::Client(e.into()))?; @@ -890,7 +1088,7 @@ where self.client .runtime_api() .generate_key_ownership_proof(block_id, slot, equivocation_proof.offender.clone()) - .map_err(Error::Client) + .map_err(Error::RuntimeApi) }; let parent_id = BlockId::Hash(*header.parent_hash()); @@ -900,8 +1098,8 @@ where Some(proof) => proof, None => { debug!(target: "babe", "Equivocation offender is not part of the authority set."); - return Ok(()); - } + return Ok(()) + }, }, }; @@ -913,7 +1111,7 @@ where equivocation_proof, key_owner_proof, ) - .map_err(Error::Client)?; + .map_err(Error::RuntimeApi)?; info!(target: "babe", "Submitted equivocation report for author {:?}", author); @@ -921,152 +1119,173 @@ where } } -impl Verifier - for BabeVerifier +type BlockVerificationResult = + Result<(BlockImportParams, Option)>>), String>; + +#[async_trait::async_trait] +impl Verifier + for BabeVerifier where Block: BlockT, - Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi - + Send + Sync + AuxStore + ProvideCache, - Client::Api: BlockBuilderApi + BabeApi, + Client: HeaderMetadata + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + AuxStore + + ProvideCache, + Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith + Send + Sync, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - fn verify( + async fn verify( &mut self, - origin: BlockOrigin, - header: Block::Header, - justification: Option, - mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { + mut block: BlockImportParams, + ) -> BlockVerificationResult { trace!( target: "babe", - "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", - origin, - header, - justification, - body, + "Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", + block.origin, + block.header, + block.justifications, + block.body, ); - debug!(target: "babe", "We have {:?} logs in this header", header.digest().logs().len()); - let mut inherent_data = self - .inherent_data_providers - .create_inherent_data() - .map_err(Error::::Runtime)?; + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + + if block.with_state() { + // When importing whole state we don't calculate epoch descriptor, but rather + // read it from the state after import. We also skip all verifications + // because there's no parent state and we trust the sync module to verify + // that the state is correct and finalized. + return Ok((block, Default::default())) + } - let (_, slot_now, _) = self.time_source.extract_timestamp_and_slot(&inherent_data) - .map_err(Error::::Extraction)?; + debug!(target: "babe", "We have {:?} logs in this header", block.header.digest().logs().len()); - let hash = header.hash(); - let parent_hash = *header.parent_hash(); + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; - let parent_header_metadata = self.client.header_metadata(parent_hash) + let slot_now = create_inherent_data_providers.slot(); + + let parent_header_metadata = self + .client + .header_metadata(parent_hash) .map_err(Error::::FetchParentHeader)?; - let pre_digest = find_pre_digest::(&header)?; - let epoch_changes = self.epoch_changes.lock(); - let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot_number(), - ) - .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot) - ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; - - // We add one to the current slot to allow for some small drift. - // FIXME #1019 in the future, alter this queue to allow deferring of headers - let v_params = verification::VerificationParams { - header: header.clone(), - pre_digest: Some(pre_digest), - slot_now: slot_now + 1, - epoch: viable_epoch.as_ref(), + let pre_digest = find_pre_digest::(&block.header)?; + let (check_header, epoch_descriptor) = { + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot(), + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + + // We add one to the current slot to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of headers + let v_params = verification::VerificationParams { + header: block.header.clone(), + pre_digest: Some(pre_digest), + slot_now: slot_now + 1, + epoch: viable_epoch.as_ref(), + }; + + (verification::check_header::(v_params)?, epoch_descriptor) }; - match verification::check_header::(v_params)? { + match check_header { CheckedHeader::Checked(pre_header, verified_info) => { - let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() + let babe_pre_digest = verified_info + .pre_digest + .as_babe_pre_digest() .expect("check_header always returns a pre-digest digest item; qed"); - let slot_number = babe_pre_digest.slot_number(); + let slot = babe_pre_digest.slot(); // the header is valid but let's check if there was something else already // proposed at the same slot by the given author. if there was, we will // report the equivocation to the runtime. - if let Err(err) = self.check_and_report_equivocation( - slot_now, - slot_number, - &header, - &verified_info.author, - &origin, - ) { + if let Err(err) = self + .check_and_report_equivocation( + slot_now, + slot, + &block.header, + &verified_info.author, + &block.origin, + ) + .await + { warn!(target: "babe", "Error checking/reporting BABE equivocation: {:?}", err); } // if the body is passed through, we need to use the runtime // to check that the internally-set timestamp in the inherents // actually matches the slot set in the seal. - if let Some(inner_body) = body.take() { - inherent_data.babe_replace_inherent_data(slot_number); - let block = Block::new(pre_header.clone(), inner_body); + if let Some(inner_body) = block.body { + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::CreateInherents)?; + inherent_data.babe_replace_inherent_data(slot); + let new_block = Block::new(pre_header.clone(), inner_body); self.check_inherents( - block.clone(), + new_block.clone(), BlockId::Hash(parent_hash), inherent_data, - )?; + create_inherent_data_providers, + block.origin.into(), + ) + .await?; - let (_, inner_body) = block.deconstruct(); - body = Some(inner_body); + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); } trace!(target: "babe", "Checked {:?}; importing.", pre_header); telemetry!( + self.telemetry; CONSENSUS_TRACE; "babe.checked_and_importing"; - "pre_header" => ?pre_header); + "pre_header" => ?pre_header, + ); - let mut import_block = BlockImportParams::new(origin, pre_header); - import_block.post_digests.push(verified_info.seal); - import_block.body = body; - import_block.justification = justification; - import_block.intermediates.insert( + block.header = pre_header; + block.post_digests.push(verified_info.seal); + block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); - import_block.post_hash = Some(hash); + block.post_hash = Some(hash); - Ok((import_block, Default::default())) - } + Ok((block, Default::default())) + }, CheckedHeader::Deferred(a, b) => { debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); - telemetry!(CONSENSUS_DEBUG; "babe.header_too_far_in_future"; + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "babe.header_too_far_in_future"; "hash" => ?hash, "a" => ?a, "b" => ?b ); Err(Error::::TooFarInFuture(hash).into()) - } + }, } } } -/// Register the babe inherent data provider, if not registered already. -pub fn register_babe_inherent_data_provider( - inherent_data_providers: &InherentDataProviders, - slot_duration: u64, -) -> Result<(), sp_consensus::Error> { - debug!(target: "babe", "Registering"); - if !inherent_data_providers.has_provider(&sp_consensus_babe::inherents::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_consensus_babe::inherents::InherentDataProvider::new(slot_duration)) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - } else { - Ok(()) - } -} - /// A block-import handler for BABE. /// /// This scans each imported block for epoch change signals. The signals are @@ -1100,27 +1319,95 @@ impl BabeBlockImport { block_import: I, config: Config, ) -> Self { - BabeBlockImport { - client, - inner: block_import, - epoch_changes, - config, - } + BabeBlockImport { client, inner: block_import, epoch_changes, config } } } -impl BlockImport for BabeBlockImport where +impl BabeBlockImport +where Block: BlockT, Inner: BlockImport> + Send + Sync, Inner::Error: Into, - Client: HeaderBackend + HeaderMetadata - + AuxStore + ProvideRuntimeApi + ProvideCache + Send + Sync, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + ProvideCache + + Send + + Sync, + Client::Api: BabeApi + ApiExt, +{ + /// Import whole state after warp sync. + // This function makes multiple transactions to the DB. If one of them fails we may + // end up in an inconsistent state and have to resync. + async fn import_state( + &mut self, + mut block: BlockImportParams>, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let parent_hash = *block.header.parent_hash(); + let number = *block.header.number(); + + block.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + // Reset block weight. + aux_schema::write_block_weight(hash, 0, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // First make the client import the state. + let import_result = self.inner.import_block(block, new_cache).await; + let aux = match import_result { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => + return Err(ConsensusError::ClientImport(format!( + "Unexpected import result: {:?}", + r + ))), + Err(r) => return Err(r.into()), + }; + + // Read epoch info from the imported state. + let block_id = BlockId::hash(hash); + let current_epoch = self.client.runtime_api().current_epoch(&block_id).map_err(|e| { + ConsensusError::ClientImport(babe_err::(Error::RuntimeApi(e)).into()) + })?; + let next_epoch = self.client.runtime_api().next_epoch(&block_id).map_err(|e| { + ConsensusError::ClientImport(babe_err::(Error::RuntimeApi(e)).into()) + })?; + + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + epoch_changes.reset(parent_hash, hash, number, current_epoch.into(), next_epoch.into()); + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + self.client.insert_aux(insert, []) + }) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(ImportResult::Imported(aux)) + } +} + +#[async_trait::async_trait] +impl BlockImport for BabeBlockImport +where + Block: BlockT, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + ProvideCache + + Send + + Sync, Client::Api: BabeApi + ApiExt, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; - fn import_block( + async fn import_block( &mut self, mut block: BlockImportParams, new_cache: HashMap>, @@ -1131,233 +1418,242 @@ impl BlockImport for BabeBlockImport return Ok(ImportResult::AlreadyInChain), + Ok(sp_blockchain::BlockStatus::InChain) => { + // When re-importing existing block strip away intermediates. + let _ = block.take_intermediate::>(INTERMEDIATE_KEY); + block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); + return self.inner.import_block(block, new_cache).await.map_err(Into::into) + }, Ok(sp_blockchain::BlockStatus::Unknown) => {}, Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } - let pre_digest = find_pre_digest::(&block.header) - .expect("valid babe headers must contain a predigest; \ - header has been already verified; qed"); - let slot_number = pre_digest.slot_number(); + if block.with_state() { + return self.import_state(block, new_cache).await + } + + let pre_digest = find_pre_digest::(&block.header).expect( + "valid babe headers must contain a predigest; header has been already verified; qed", + ); + let slot = pre_digest.slot(); let parent_hash = *block.header.parent_hash(); - let parent_header = self.client.header(BlockId::Hash(parent_hash)) + let parent_header = self + .client + .header(BlockId::Hash(parent_hash)) .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| ConsensusError::ChainLookup(babe_err( - Error::::ParentUnavailable(parent_hash, hash) - ).into()))?; + .ok_or_else(|| { + ConsensusError::ChainLookup( + babe_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; - let parent_slot = find_pre_digest::(&parent_header) - .map(|d| d.slot_number()) - .expect("parent is non-genesis; valid BABE headers contain a pre-digest; \ - header has already been verified; qed"); + let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot()).expect( + "parent is non-genesis; valid BABE headers contain a pre-digest; header has already \ + been verified; qed", + ); // make sure that slot number is strictly increasing - if slot_number <= parent_slot { - return Err( - ConsensusError::ClientImport(babe_err( - Error::::SlotNumberMustIncrease(parent_slot, slot_number) - ).into()) - ); - } - - let mut epoch_changes = self.epoch_changes.lock(); - - // check if there's any epoch change expected to happen at this slot. - // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true - // if this is the first block in its chain for that epoch. - // - // also provides the total weight of the chain, including the imported block. - let (epoch_descriptor, first_in_epoch, parent_weight) = { - let parent_weight = if *parent_header.number() == Zero::zero() { - 0 - } else { - aux_schema::load_block_weight(&*self.client, parent_hash) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .ok_or_else(|| ConsensusError::ClientImport( - babe_err(Error::::ParentBlockNoAssociatedWeight(hash)).into() - ))? - }; - - let intermediate = block.take_intermediate::>( - INTERMEDIATE_KEY - )?; - - let epoch_descriptor = intermediate.epoch_descriptor; - let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); - (epoch_descriptor, first_in_epoch, parent_weight) - }; - - let total_weight = parent_weight + pre_digest.added_weight(); - - // search for this all the time so we can reject unexpected announcements. - let next_epoch_digest = find_next_epoch_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - let next_config_digest = find_next_config_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) { - (true, true, _) => {}, - (false, false, false) => {}, - (false, false, true) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::UnexpectedConfigChange).into(), - ) - ) - }, - (true, false, _) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::ExpectedEpochChange(hash, slot_number)).into(), - ) - ) - }, - (false, true, _) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::UnexpectedEpochChange).into(), - ) - ) - }, + if slot <= parent_slot { + return Err(ConsensusError::ClientImport( + babe_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), + )) } // if there's a pending epoch we'll save the previous epoch changes here // this way we can revert it if there's any error let mut old_epoch_changes = None; - let info = self.client.info(); - - if let Some(next_epoch_descriptor) = next_epoch_digest { - old_epoch_changes = Some(epoch_changes.clone()); - - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot) - ).ok_or_else(|| { - ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; - - let epoch_config = next_config_digest.map(Into::into).unwrap_or_else( - || viable_epoch.as_ref().config.clone() - ); - - // restrict info logging during initial sync to avoid spam - let log_level = if block.origin == BlockOrigin::NetworkInitialSync { - log::Level::Debug - } else { - log::Level::Info + // Use an extra scope to make the compiler happy, because otherwise he complains about the + // mutex, even if we dropped it... + let mut epoch_changes = { + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + + // check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + // + // also provides the total weight of the chain, including the imported block. + let (epoch_descriptor, first_in_epoch, parent_weight) = { + let parent_weight = if *parent_header.number() == Zero::zero() { + 0 + } else { + aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ClientImport( + babe_err(Error::::ParentBlockNoAssociatedWeight(hash)) + .into(), + ) + })? + }; + + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; + + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + (epoch_descriptor, first_in_epoch, parent_weight) }; - log!(target: "babe", - log_level, - "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, - hash, - slot_number, - viable_epoch.as_ref().start_slot, - ); + let total_weight = parent_weight + pre_digest.added_weight(); - let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config)); + // search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let next_config_digest = find_next_config_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - log!(target: "babe", - log_level, - "👶 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, - ); + match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) { + (true, true, _) => {}, + (false, false, false) => {}, + (false, false, true) => + return Err(ConsensusError::ClientImport( + babe_err(Error::::UnexpectedConfigChange).into(), + )), + (true, false, _) => + return Err(ConsensusError::ClientImport( + babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), + )), + (false, true, _) => + return Err(ConsensusError::ClientImport( + babe_err(Error::::UnexpectedEpochChange).into(), + )), + } - // prune the tree of epochs not part of the finalized chain or - // that are not live anymore, and then track the given epoch change - // in the tree. - // NOTE: it is important that these operations are done in this - // order, otherwise if pruning after import the `is_descendent_of` - // used by pruning may not know about the block that is being - // imported. - let prune_and_import = || { - prune_finalized( - self.client.clone(), - &mut epoch_changes, - )?; - - epoch_changes.import( - descendent_query(&*self.client), - hash, - number, - *block.header.parent_hash(), - next_epoch, - ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + let info = self.client.info(); + + if let Some(next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some((*epoch_changes).clone()); + + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; + + let epoch_config = next_config_digest + .map(Into::into) + .unwrap_or_else(|| viable_epoch.as_ref().config.clone()); + + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; + + log!(target: "babe", + log_level, + "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot, + viable_epoch.as_ref().start_slot, + ); - Ok(()) - }; + let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config)); - if let Err(e) = prune_and_import() { - debug!(target: "babe", "Failed to launch next epoch: {:?}", e); - *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e); - } + log!(target: "babe", + log_level, + "👶 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); - crate::aux_schema::write_epoch_changes::( - &*epoch_changes, - |insert| block.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) - ); - } + // prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized(self.client.clone(), &mut epoch_changes)?; + + epoch_changes + .import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + debug!(target: "babe", "Failed to launch next epoch: {:?}", e); + *epoch_changes = + old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e) + } - aux_schema::write_block_weight( - hash, - total_weight, - |values| block.auxiliary.extend( - values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ), - ); + crate::aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + } - // The fork choice rule is that we pick the heaviest chain (i.e. - // more primary blocks), if there's a tie we go with the longest - // chain. - block.fork_choice = { - let (last_best, last_best_number) = (info.best_hash, info.best_number); + aux_schema::write_block_weight(hash, total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); - let last_best_weight = if &last_best == block.header.parent_hash() { - // the parent=genesis case is already covered for loading parent weight, - // so we don't need to cover again here. - parent_weight - } else { - aux_schema::load_block_weight(&*self.client, last_best) - .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? - .ok_or_else( - || ConsensusError::ChainLookup("No block weight for parent header.".to_string()) - )? + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + "No block weight for parent header.".to_string(), + ) + })? + }; + + Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + })) }; - Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { - true - } else if total_weight == last_best_weight { - number > last_best_number - } else { - false - })) + // Release the mutex, but it stays locked + epoch_changes.release_mutex() }; - let import_result = self.inner.import_block(block, new_cache); + let import_result = self.inner.import_block(block, new_cache).await; // revert to the original epoch changes in case there's an error // importing the block if import_result.is_err() { if let Some(old_epoch_changes) = old_epoch_changes { - *epoch_changes = old_epoch_changes; + *epoch_changes.upgrade() = old_epoch_changes; } } import_result.map_err(Into::into) } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block).map_err(Into::into) + self.inner.check_block(block).await.map_err(Into::into) } } @@ -1365,30 +1661,34 @@ impl BlockImport for BabeBlockImport( client: Arc, epoch_changes: &mut EpochChangesFor, -) -> Result<(), ConsensusError> where +) -> Result<(), ConsensusError> +where Block: BlockT, Client: HeaderBackend + HeaderMetadata, { let info = client.info(); let finalized_slot = { - let finalized_header = client.header(BlockId::Hash(info.finalized_hash)) + let finalized_header = client + .header(BlockId::Hash(info.finalized_hash)) .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))? - .expect("best finalized hash was given by client; \ - finalized headers must exist in db; qed"); + .expect( + "best finalized hash was given by client; finalized headers must exist in db; qed", + ); find_pre_digest::(&finalized_header) - .expect("finalized header must be valid; \ - valid blocks have a pre-digest; qed") - .slot_number() + .expect("finalized header must be valid; valid blocks have a pre-digest; qed") + .slot() }; - epoch_changes.prune_finalized( - descendent_query(&*client), - &info.finalized_hash, - info.finalized_number, - finalized_slot, - ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + epoch_changes + .prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; Ok(()) } @@ -1402,30 +1702,19 @@ pub fn block_import( config: Config, wrapped_block_import: I, client: Arc, -) -> ClientResult<(BabeBlockImport, BabeLink)> where +) -> ClientResult<(BabeBlockImport, BabeLink)> +where Client: AuxStore + HeaderBackend + HeaderMetadata, { let epoch_changes = aux_schema::load_epoch_changes::(&*client, &config)?; - let link = BabeLink { - epoch_changes: epoch_changes.clone(), - time_source: Default::default(), - config: config.clone(), - }; + let link = BabeLink { epoch_changes: epoch_changes.clone(), config: config.clone() }; // NOTE: this isn't entirely necessary, but since we didn't use to prune the // epoch tree it is useful as a migration, so that nodes prune long trees on // startup rather than waiting until importing the next epoch change block. - prune_finalized( - client.clone(), - &mut epoch_changes.lock(), - )?; + prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; - let import = BabeBlockImport::new( - client, - epoch_changes, - wrapped_block_import, - config, - ); + let import = BabeBlockImport::new(client, epoch_changes, wrapped_block_import, config); Ok((import, link)) } @@ -1439,82 +1728,49 @@ pub fn block_import( /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( babe_link: BabeLink, block_import: Inner, justification_import: Option>, - finality_proof_import: Option>, client: Arc, select_chain: SelectChain, - inherent_data_providers: InherentDataProviders, - spawner: &impl sp_core::traits::SpawnNamed, + create_inherent_data_providers: CIDP, + spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, can_author_with: CAW, -) -> ClientResult> where - Inner: BlockImport> - + Send + Sync + 'static, - Client: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, - Client: HeaderBackend + HeaderMetadata, - Client::Api: BlockBuilderApi + BabeApi + ApiExt, + telemetry: Option, +) -> ClientResult> +where + Inner: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor, + > + Send + + Sync + + 'static, + Client: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata + + AuxStore + + Send + + Sync + + 'static, + Client::Api: BlockBuilderApi + BabeApi + ApiExt, SelectChain: sp_consensus::SelectChain + 'static, CAW: CanAuthorWith + Send + Sync + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; - let verifier = BabeVerifier { - client, select_chain, - inherent_data_providers, + create_inherent_data_providers, config: babe_link.config, epoch_changes: babe_link.epoch_changes, - time_source: babe_link.time_source, can_author_with, + telemetry, + client, }; - Ok(BasicQueue::new( - verifier, - Box::new(block_import), - justification_import, - finality_proof_import, - spawner, - registry, - )) -} - -/// BABE test helpers. Utility methods for manually authoring blocks. -#[cfg(feature = "test-helpers")] -pub mod test_helpers { - use super::*; - - /// Try to claim the given slot and return a `BabePreDigest` if - /// successful. - pub fn claim_slot( - slot_number: u64, - parent: &B::Header, - client: &C, - keystore: SyncCryptoStorePtr, - link: &BabeLink, - ) -> Option where - B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata, - C::Api: BabeApi, - { - let epoch_changes = link.epoch_changes.lock(); - let epoch = epoch_changes.epoch_data_for_child_of( - descendent_query(client), - &parent.hash(), - parent.number().clone(), - slot_number, - |slot| Epoch::genesis(&link.config, slot), - ).unwrap().unwrap(); - - authorship::claim_slot( - slot_number, - &epoch, - &keystore, - ).map(|(digest, _)| digest) - } + Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) } diff --git a/client/consensus/babe/src/migration.rs b/client/consensus/babe/src/migration.rs index 2a5a8749cc3c1..a248c9da24db8 100644 --- a/client/consensus/babe/src/migration.rs +++ b/client/consensus/babe/src/migration.rs @@ -1,9 +1,28 @@ -use codec::{Encode, Decode}; -use sc_consensus_epochs::Epoch as EpochT; +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use crate::{ - Epoch, SlotNumber, AuthorityId, BabeAuthorityWeight, BabeGenesisConfiguration, - BabeEpochConfiguration, VRF_OUTPUT_LENGTH, NextEpochDescriptor, + AuthorityId, BabeAuthorityWeight, BabeEpochConfiguration, BabeGenesisConfiguration, Epoch, + NextEpochDescriptor, VRF_OUTPUT_LENGTH, }; +use codec::{Decode, Encode}; +use sc_consensus_epochs::Epoch as EpochT; +use sp_consensus_slots::Slot; /// BABE epoch information, version 0. #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] @@ -11,9 +30,9 @@ pub struct EpochV0 { /// The epoch index. pub epoch_index: u64, /// The starting slot of the epoch. - pub start_slot: SlotNumber, + pub start_slot: Slot, /// The duration of this epoch. - pub duration: SlotNumber, + pub duration: u64, /// The authorities and their weights. pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// Randomness for this epoch. @@ -22,12 +41,9 @@ pub struct EpochV0 { impl EpochT for EpochV0 { type NextEpochDescriptor = NextEpochDescriptor; - type SlotNumber = SlotNumber; + type Slot = Slot; - fn increment( - &self, - descriptor: NextEpochDescriptor - ) -> EpochV0 { + fn increment(&self, descriptor: NextEpochDescriptor) -> EpochV0 { EpochV0 { epoch_index: self.epoch_index + 1, start_slot: self.start_slot + self.duration, @@ -37,11 +53,11 @@ impl EpochT for EpochV0 { } } - fn start_slot(&self) -> SlotNumber { + fn start_slot(&self) -> Slot { self.start_slot } - fn end_slot(&self) -> SlotNumber { + fn end_slot(&self) -> Slot { self.start_slot + self.duration } } @@ -55,10 +71,7 @@ impl EpochV0 { duration: self.duration, authorities: self.authorities, randomness: self.randomness, - config: BabeEpochConfiguration { - c: config.c, - allowed_slots: config.allowed_slots, - }, + config: BabeEpochConfiguration { c: config.c, allowed_slots: config.allowed_slots }, } } } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 6b0f5870ba53d..c033f4535be0b 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! BABE testsuite @@ -21,37 +23,31 @@ #![allow(deprecated)] use super::*; use authorship::claim_slot; -use sp_core::crypto::Pair; -use sp_keystore::{ - SyncCryptoStore, - vrf::make_transcript as transcript_from_data, -}; -use sp_consensus_babe::{ - AuthorityPair, - SlotNumber, - AllowedSlots, - make_transcript, - make_transcript_data, -}; -use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; -use sp_consensus::{ - NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor, - import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, -}; -use sc_network_test::*; -use sc_network_test::{Block as TestBlock, PeersClient}; -use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; -use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; -use sc_client_api::{BlockchainEvents, backend::TransactionFor}; +use futures::executor::block_on; use log::debug; -use std::{time::Duration, cell::RefCell, task::Poll}; use rand::RngCore; -use rand_chacha::{ - rand_core::SeedableRng, - ChaChaRng, -}; +use rand_chacha::{rand_core::SeedableRng, ChaChaRng}; +use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_client_api::{backend::TransactionFor, BlockchainEvents}; +use sc_consensus::{BoxBlockImport, BoxJustificationImport}; +use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_keystore::LocalKeystore; +use sc_network::config::ProtocolConfig; +use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; +use sp_consensus::{AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal}; +use sp_consensus_babe::{ + inherents::InherentDataProvider, make_transcript, make_transcript_data, AllowedSlots, + AuthorityPair, Slot, +}; +use sp_core::crypto::Pair; +use sp_keystore::{vrf::make_transcript as transcript_from_data, SyncCryptoStore}; +use sp_runtime::{ + generic::DigestItem, + traits::{Block as BlockT, DigestFor}, +}; +use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; +use std::{cell::RefCell, task::Poll, time::Duration}; type Item = DigestItem; @@ -59,7 +55,7 @@ type Error = sp_blockchain::Error; type TestClient = substrate_test_runtime_client::client::Client< substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, + substrate_test_runtime_client::ExecutorDispatch, TestBlock, substrate_test_runtime_client::runtime::RuntimeApi, >; @@ -72,6 +68,9 @@ enum Stage { type Mutator = Arc; +type BabeBlockImport = + PanickingBlockImport>>; + #[derive(Clone)] struct DummyFactory { client: Arc, @@ -84,7 +83,7 @@ struct DummyProposer { factory: DummyFactory, parent_hash: Hash, parent_number: u64, - parent_slot: SlotNumber, + parent_slot: Slot, } impl Environment for DummyFactory { @@ -92,13 +91,10 @@ impl Environment for DummyFactory { type Proposer = DummyProposer; type Error = Error; - fn init(&mut self, parent_header: &::Header) - -> Self::CreateProposer - { - + fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { let parent_slot = crate::find_pre_digest::(parent_header) .expect("parent header has a pre-digest") - .slot_number(); + .slot(); future::ready(Ok(DummyProposer { factory: self.clone(), @@ -110,22 +106,24 @@ impl Environment for DummyFactory { } impl DummyProposer { - fn propose_with(&mut self, pre_digests: DigestFor) - -> future::Ready< - Result< - Proposal< - TestBlock, - sc_client_api::TransactionFor - >, - Error - > - > - { - let block_builder = self.factory.client.new_block_at( - &BlockId::Hash(self.parent_hash), - pre_digests, - false, - ).unwrap(); + fn propose_with( + &mut self, + pre_digests: DigestFor, + ) -> future::Ready< + Result< + Proposal< + TestBlock, + sc_client_api::TransactionFor, + (), + >, + Error, + >, + > { + let block_builder = self + .factory + .client + .new_block_at(&BlockId::Hash(self.parent_hash), pre_digests, false) + .unwrap(); let mut block = match block_builder.build().map_err(|e| e.into()) { Ok(b) => b.block, @@ -134,18 +132,19 @@ impl DummyProposer { let this_slot = crate::find_pre_digest::(block.header()) .expect("baked block has valid pre-digest") - .slot_number(); + .slot(); // figure out if we should add a consensus digest, since the test runtime // doesn't. - let epoch_changes = self.factory.epoch_changes.lock(); - let epoch = epoch_changes.epoch_data_for_child_of( - descendent_query(&*self.factory.client), - &self.parent_hash, - self.parent_number, - this_slot, - |slot| Epoch::genesis(&self.factory.config, slot), - ) + let epoch_changes = self.factory.epoch_changes.shared_data(); + let epoch = epoch_changes + .epoch_data_for_child_of( + descendent_query(&*self.factory.client), + &self.parent_hash, + self.parent_number, + this_slot, + |slot| Epoch::genesis(&self.factory.config, slot), + ) .expect("client has data to find epoch") .expect("can compute epoch for baked block"); @@ -158,7 +157,8 @@ impl DummyProposer { let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { authorities: epoch.authorities.clone(), randomness: epoch.randomness.clone(), - }).encode(); + }) + .encode(); let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); block.header.digest_mut().push(digest) } @@ -166,21 +166,24 @@ impl DummyProposer { // mutate the block header according to the mutator. (self.factory.mutator)(&mut block.header, Stage::PreSeal); - future::ready(Ok(Proposal { block, proof: None, storage_changes: Default::default() })) + future::ready(Ok(Proposal { block, proof: (), storage_changes: Default::default() })) } } impl Proposer for DummyProposer { type Error = Error; - type Transaction = sc_client_api::TransactionFor; - type Proposal = future::Ready, Error>>; + type Transaction = + sc_client_api::TransactionFor; + type Proposal = future::Ready, Error>>; + type ProofRecording = DisableProofRecording; + type Proof = (); fn propose( mut self, _: InherentData, pre_digests: DigestFor, _: Duration, - _: RecordProof, + _: Option, ) -> Self::Proposal { self.propose_with(pre_digests) } @@ -191,112 +194,121 @@ thread_local! { } #[derive(Clone)] -struct PanickingBlockImport(B); - -impl> BlockImport for PanickingBlockImport { +pub struct PanickingBlockImport(B); + +#[async_trait::async_trait] +impl> BlockImport for PanickingBlockImport +where + B::Transaction: Send, + B: Send, +{ type Error = B::Error; type Transaction = B::Transaction; - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, new_cache: HashMap>, ) -> Result { - Ok(self.0.import_block(block, new_cache).expect("importing block failed")) + Ok(self.0.import_block(block, new_cache).await.expect("importing block failed")) } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - Ok(self.0.check_block(block).expect("checking block failed")) + Ok(self.0.check_block(block).await.expect("checking block failed")) } } +type BabePeer = Peer, BabeBlockImport>; + pub struct BabeTestNet { - peers: Vec>>, + peers: Vec, } type TestHeader = ::Header; -type TestExtrinsic = ::Extrinsic; -type TestSelectChain = substrate_test_runtime_client::LongestChain< - substrate_test_runtime_client::Backend, - TestBlock, ->; +type TestSelectChain = + substrate_test_runtime_client::LongestChain; pub struct TestVerifier { - inner: BabeVerifier, + inner: BabeVerifier< + TestBlock, + PeersFullClient, + TestSelectChain, + AlwaysCanAuthor, + Box< + dyn CreateInherentDataProviders< + TestBlock, + (), + InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), + >, + >, + >, mutator: Mutator, } +#[async_trait::async_trait] impl Verifier for TestVerifier { /// Verify the given data and return the BlockImportParams and an optional /// new set of validators to import. If not, err with an Error-Message /// presented to the User in the logs. - fn verify( + async fn verify( &mut self, - origin: BlockOrigin, - mut header: TestHeader, - justification: Option, - body: Option>, + mut block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { // apply post-sealing mutations (i.e. stripping seal, if desired). - (self.mutator)(&mut header, Stage::PostSeal); - self.inner.verify(origin, header, justification, body) + (self.mutator)(&mut block.header, Stage::PostSeal); + self.inner.verify(block).await } } pub struct PeerData { link: BabeLink, - inherent_data_providers: InherentDataProviders, block_import: Mutex< - Option>> + Option< + BoxBlockImport< + TestBlock, + TransactionFor, + >, + >, >, } impl TestNetFactory for BabeTestNet { type Verifier = TestVerifier; type PeerData = Option; + type BlockImport = BabeBlockImport; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { debug!(target: "babe", "Creating test network from config"); - BabeTestNet { - peers: Vec::new(), - } + BabeTestNet { peers: Vec::new() } } - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - Option, - ) - { + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Option, + ) { let client = client.as_full().expect("only full clients are tested"); - let inherent_data_providers = InherentDataProviders::new(); let config = Config::get_or_compute(&*client).expect("config available"); - let (block_import, link) = crate::block_import( - config, - client.clone(), - client.clone(), - ).expect("can initialize block-import"); + let (block_import, link) = crate::block_import(config, client.clone(), client.clone()) + .expect("can initialize block-import"); let block_import = PanickingBlockImport(block_import); - let data_block_import = Mutex::new( - Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>) - ); + let data_block_import = + Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>)); ( - BlockImportAdapter::new_full(block_import), - None, + BlockImportAdapter::new(block_import), None, - None, - Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), + Some(PeerData { link, block_import: data_block_import }), ) } @@ -305,16 +317,16 @@ impl TestNetFactory for BabeTestNet { client: PeersClient, _cfg: &ProtocolConfig, maybe_link: &Option, - ) - -> Self::Verifier - { + ) -> Self::Verifier { use substrate_test_runtime_client::DefaultTestClientBuilderExt; let client = client.as_full().expect("only full clients are used in test"); trace!(target: "babe", "Creating a verifier"); // ensure block import and verifier are linked correctly. - let data = maybe_link.as_ref().expect("babe link always provided to verifier instantiation"); + let data = maybe_link + .as_ref() + .expect("babe link always provided to verifier instantiation"); let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); @@ -322,30 +334,35 @@ impl TestNetFactory for BabeTestNet { inner: BabeVerifier { client: client.clone(), select_chain: longest_chain, - inherent_data_providers: data.inherent_data_providers.clone(), + create_inherent_data_providers: Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }), config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), - time_source: data.link.time_source.clone(), can_author_with: AlwaysCanAuthor, + telemetry: None, }, mutator: MUTATOR.with(|m| m.borrow().clone()), } } - fn peer(&mut self, i: usize) -> &mut Peer { + fn peer(&mut self, i: usize) -> &mut BabePeer { trace!(target: "babe", "Retrieving a peer"); &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec { trace!(target: "babe", "Retrieving peers"); &self.peers } - fn mut_peers>)>( - &mut self, - closure: F, - ) { + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } } @@ -355,28 +372,20 @@ impl TestNetFactory for BabeTestNet { fn rejects_empty_block() { sp_tracing::try_init_simple(); let mut net = BabeTestNet::new(3); - let block_builder = |builder: BlockBuilder<_, _, _>| { - builder.build().unwrap().block - }; + let block_builder = |builder: BlockBuilder<_, _, _>| builder.build().unwrap().block; net.mut_peers(|peer| { peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder); }) } -fn run_one_test( - mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static, -) { +fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static) { sp_tracing::try_init_simple(); let mutator = Arc::new(mutator) as Mutator; MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); let net = BabeTestNet::new(3); - let peers = &[ - (0, "//Alice"), - (1, "//Bob"), - (2, "//Charlie"), - ]; + let peers = &[(0, "//Alice"), (1, "//Bob"), (2, "//Charlie")]; let net = Arc::new(Mutex::new(net)); let mut import_notifications = Vec::new(); @@ -390,9 +399,10 @@ fn run_one_test( let select_chain = peer.select_chain().expect("Full client has select_chain"); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); - SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(seed)).expect("Generates authority key"); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); + SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(seed)) + .expect("Generates authority key"); keystore_paths.push(keystore_path); let mut got_own = false; @@ -410,36 +420,56 @@ fn run_one_test( import_notifications.push( // run each future until we get one of our own blocks with number higher than 5 // that was produced locally. - client.import_notification_stream() - .take_while(move |n| future::ready(n.header.number() < &5 || { - if n.origin == BlockOrigin::Own { - got_own = true; - } else { - got_other = true; - } - - // continue until we have at least one block of our own - // and one of another peer. - !(got_own && got_other) - })) - .for_each(|_| future::ready(()) ) + client + .import_notification_stream() + .take_while(move |n| { + future::ready( + n.header.number() < &5 || { + if n.origin == BlockOrigin::Own { + got_own = true; + } else { + got_other = true; + } + + // continue until we have at least one block of our own + // and one of another peer. + !(got_own && got_other) + }, + ) + }) + .for_each(|_| future::ready(())), ); - - babe_futures.push(start_babe(BabeParams { - block_import: data.block_import.lock().take().expect("import set up during init"), - select_chain, - client, - env: environ, - sync_oracle: DummyOracle, - inherent_data_providers: data.inherent_data_providers.clone(), - force_authoring: false, - babe_link: data.link.clone(), - keystore, - can_author_with: sp_consensus::AlwaysCanAuthor, - }).expect("Starts babe")); + babe_futures.push( + start_babe(BabeParams { + block_import: data.block_import.lock().take().expect("import set up during init"), + select_chain, + client, + env: environ, + sync_oracle: DummyOracle, + create_inherent_data_providers: Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }), + force_authoring: false, + backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), + babe_link: data.link.clone(), + keystore, + can_author_with: sp_consensus::AlwaysCanAuthor, + justification_sync_link: (), + block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, + telemetry: None, + }) + .expect("Starts babe"), + ); } - futures::executor::block_on(future::select( + block_on(future::select( futures::future::poll_fn(move |cx| { let mut net = net.lock(); net.poll(cx); @@ -451,7 +481,7 @@ fn run_one_test( Poll::<()>::Pending }), - future::select(future::join_all(import_notifications), future::join_all(babe_futures)) + future::select(future::join_all(import_notifications), future::join_all(babe_futures)), )); } @@ -465,7 +495,8 @@ fn authoring_blocks() { fn rejects_missing_inherent_digest() { run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::take(&mut header.digest_mut().logs); - header.digest_mut().logs = v.into_iter() + header.digest_mut().logs = v + .into_iter() .filter(|v| stage == Stage::PostSeal || v.as_babe_pre_digest().is_none()) .collect() }) @@ -476,7 +507,8 @@ fn rejects_missing_inherent_digest() { fn rejects_missing_seals() { run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::take(&mut header.digest_mut().logs); - header.digest_mut().logs = v.into_iter() + header.digest_mut().logs = v + .into_iter() .filter(|v| stage == Stage::PreSeal || v.as_babe_seal().is_none()) .collect() }) @@ -487,7 +519,8 @@ fn rejects_missing_seals() { fn rejects_missing_consensus_digests() { run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::take(&mut header.digest_mut().logs); - header.digest_mut().logs = v.into_iter() + header.digest_mut().logs = v + .into_iter() .filter(|v| stage == Stage::PostSeal || v.as_next_epoch_descriptor().is_none()) .collect() }); @@ -522,14 +555,14 @@ fn sig_is_not_pre_digest() { fn can_author_block() { sp_tracing::try_init_simple(); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); let public = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) .expect("Generates authority pair"); let mut i = 0; let epoch = Epoch { - start_slot: 0, + start_slot: 0.into(), authorities: vec![(public.into(), 1)], randomness: [0; 32], epoch_index: 1, @@ -550,7 +583,7 @@ fn can_author_block() { }; // with secondary slots enabled it should never be empty - match claim_slot(i, &epoch, &keystore) { + match claim_slot(i.into(), &epoch, &keystore) { None => i += 1, Some(s) => debug!(target: "babe", "Authored block {:?}", s.0), } @@ -559,51 +592,52 @@ fn can_author_block() { // of times. config.allowed_slots = AllowedSlots::PrimarySlots; loop { - match claim_slot(i, &epoch, &keystore) { + match claim_slot(i.into(), &epoch, &keystore) { None => i += 1, Some(s) => { debug!(target: "babe", "Authored block {:?}", s.0); - break; - } + break + }, } } } // Propose and import a new BABE block on top of the given parent. -fn propose_and_import_block( +fn propose_and_import_block( parent: &TestHeader, - slot_number: Option, + slot: Option, proposer_factory: &mut DummyFactory, block_import: &mut BoxBlockImport, ) -> sp_core::H256 { let mut proposer = futures::executor::block_on(proposer_factory.init(parent)).unwrap(); - let slot_number = slot_number.unwrap_or_else(|| { + let slot = slot.unwrap_or_else(|| { let parent_pre_digest = find_pre_digest::(parent).unwrap(); - parent_pre_digest.slot_number() + 1 + parent_pre_digest.slot() + 1 }); let pre_digest = sp_runtime::generic::Digest { - logs: vec![ - Item::babe_pre_digest( - PreDigest::SecondaryPlain(SecondaryPlainPreDigest { - authority_index: 0, - slot_number, - }), - ), - ], + logs: vec![Item::babe_pre_digest(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + authority_index: 0, + slot, + }))], }; let parent_hash = parent.hash(); let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; - let epoch_descriptor = proposer_factory.epoch_changes.lock().epoch_descriptor_for_child_of( - descendent_query(&*proposer_factory.client), - &parent_hash, - *parent.number(), - slot_number, - ).unwrap().unwrap(); + let epoch_descriptor = proposer_factory + .epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*proposer_factory.client), + &parent_hash, + *parent.number(), + slot, + ) + .unwrap() + .unwrap(); let seal = { // sign the pre-sealed hash of the block and then @@ -626,10 +660,10 @@ fn propose_and_import_block( import.body = Some(block.extrinsics); import.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - let import_result = block_import.import_block(import, Default::default()).unwrap(); + let import_result = block_on(block_import.import_block(import, Default::default())).unwrap(); match import_result { ImportResult::Imported(_) => {}, @@ -660,21 +694,20 @@ fn importing_block_one_sets_genesis_epoch() { let block_hash = propose_and_import_block( &genesis_header, - Some(999), + Some(999.into()), &mut proposer_factory, &mut block_import, ); - let genesis_epoch = Epoch::genesis(&data.link.config, 999); + let genesis_epoch = Epoch::genesis(&data.link.config, 999.into()); - let epoch_changes = data.link.epoch_changes.lock(); - let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( - descendent_query(&*client), - &block_hash, - 1, - 1000, - |slot| Epoch::genesis(&data.link.config, slot), - ).unwrap().unwrap(); + let epoch_changes = data.link.epoch_changes.shared_data(); + let epoch_for_second_block = epoch_changes + .epoch_data_for_child_of(descendent_query(&*client), &block_hash, 1, 1000.into(), |slot| { + Epoch::genesis(&data.link.config, slot) + }) + .unwrap() + .unwrap(); assert_eq!(epoch_for_second_block, genesis_epoch); } @@ -741,16 +774,10 @@ fn importing_epoch_change_block_prunes_tree() { let fork_3 = propose_and_import_blocks(BlockId::Hash(canon_hashes[18]), 10); // We should be tracking a total of 9 epochs in the fork tree - assert_eq!( - epoch_changes.lock().tree().iter().count(), - 9, - ); + assert_eq!(epoch_changes.shared_data().tree().iter().count(), 9); // And only one root - assert_eq!( - epoch_changes.lock().tree().roots().count(), - 1, - ); + assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1); // We finalize block #13 from the canon chain, so on the next epoch // change the tree should be pruned, to not contain F (#7). @@ -758,32 +785,47 @@ fn importing_epoch_change_block_prunes_tree() { propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 7); // at this point no hashes from the first fork must exist on the tree - assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), - ); + assert!(!epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_1.contains(h)),); // but the epoch changes from the other forks must still exist - assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) - ); - - assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), - ); + assert!(epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_2.contains(h))); + + assert!(epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_3.contains(h)),); // finalizing block #25 from the canon chain should prune out the second fork client.finalize_block(BlockId::Hash(canon_hashes[24]), None, false).unwrap(); propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 8); // at this point no hashes from the second fork must exist on the tree - assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), - ); + assert!(!epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_2.contains(h)),); // while epoch changes from the last fork should still exist - assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), - ); + assert!(epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_3.contains(h)),); } #[test] @@ -809,7 +851,7 @@ fn verify_slots_are_strictly_increasing() { // we should have no issue importing this block let b1 = propose_and_import_block( &genesis_header, - Some(999), + Some(999.into()), &mut proposer_factory, &mut block_import, ); @@ -818,25 +860,20 @@ fn verify_slots_are_strictly_increasing() { // we should fail to import this block since the slot number didn't increase. // we will panic due to the `PanickingBlockImport` defined above. - propose_and_import_block( - &b1, - Some(999), - &mut proposer_factory, - &mut block_import, - ); + propose_and_import_block(&b1, Some(999.into()), &mut proposer_factory, &mut block_import); } #[test] fn babe_transcript_generation_match() { sp_tracing::try_init_simple(); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); let public = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) .expect("Generates authority pair"); let epoch = Epoch { - start_slot: 0, + start_slot: 0.into(), authorities: vec![(public.into(), 1)], randomness: [0; 32], epoch_index: 1, @@ -847,14 +884,12 @@ fn babe_transcript_generation_match() { }, }; - let orig_transcript = make_transcript(&epoch.randomness.clone(), 1, epoch.epoch_index); - let new_transcript = make_transcript_data(&epoch.randomness, 1, epoch.epoch_index); + let orig_transcript = make_transcript(&epoch.randomness.clone(), 1.into(), epoch.epoch_index); + let new_transcript = make_transcript_data(&epoch.randomness, 1.into(), epoch.epoch_index); let test = |t: merlin::Transcript| -> [u8; 16] { let mut b = [0u8; 16]; - t.build_rng() - .finalize(&mut ChaChaRng::from_seed([0u8;32])) - .fill_bytes(&mut b); + t.build_rng().finalize(&mut ChaChaRng::from_seed([0u8; 32])).fill_bytes(&mut b); b }; debug_assert!(test(orig_transcript) == test(transcript_from_data(new_transcript))); diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index fd3c27be4f34e..af118312dd07c 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -1,31 +1,38 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Verification for BABE headers. -use sp_runtime::{traits::Header, traits::DigestItemFor}; -use sp_core::{Pair, Public}; -use sp_consensus_babe::{make_transcript, AuthoritySignature, SlotNumber, AuthorityPair, AuthorityId}; -use sp_consensus_babe::digests::{ - PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, - CompatibleDigestItem +use super::{ + authorship::{calculate_primary_threshold, check_primary_threshold, secondary_slot_author}, + babe_err, find_pre_digest, BlockT, Epoch, Error, }; -use sc_consensus_slots::CheckedHeader; use log::{debug, trace}; -use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; -use super::authorship::{calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; +use sc_consensus_slots::CheckedHeader; +use sp_consensus_babe::{ + digests::{ + CompatibleDigestItem, PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, + SecondaryVRFPreDigest, + }, + make_transcript, AuthorityId, AuthorityPair, AuthoritySignature, +}; +use sp_consensus_slots::Slot; +use sp_core::{Pair, Public}; +use sp_runtime::traits::{DigestItemFor, Header}; /// BABE verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { @@ -36,7 +43,7 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// work. pub(super) pre_digest: Option, /// The slot number of the current time. - pub(super) slot_now: SlotNumber, + pub(super) slot_now: Slot, /// Epoch descriptor of the epoch this block _should_ be under, if it's valid. pub(super) epoch: &'a Epoch, } @@ -54,36 +61,32 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// with each having different validation logic. pub(super) fn check_header( params: VerificationParams, -) -> Result>, Error> where +) -> Result>, Error> +where DigestItemFor: CompatibleDigestItem, { - let VerificationParams { - mut header, - pre_digest, - slot_now, - epoch, - } = params; + let VerificationParams { mut header, pre_digest, slot_now, epoch } = params; let authorities = &epoch.authorities; let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; trace!(target: "babe", "Checking header"); - let seal = match header.digest_mut().pop() { - Some(x) => x, - None => return Err(babe_err(Error::HeaderUnsealed(header.hash()))), - }; + let seal = header + .digest_mut() + .pop() + .ok_or_else(|| babe_err(Error::HeaderUnsealed(header.hash())))?; - let sig = seal.as_babe_seal().ok_or_else(|| { - babe_err(Error::HeaderBadSeal(header.hash())) - })?; + let sig = seal + .as_babe_seal() + .ok_or_else(|| babe_err(Error::HeaderBadSeal(header.hash())))?; // the pre-hash of the header doesn't include the seal // and that's what we sign let pre_hash = header.hash(); - if pre_digest.slot_number() > slot_now { + if pre_digest.slot() > slot_now { header.digest_mut().push(seal); - return Ok(CheckedHeader::Deferred(header, pre_digest.slot_number())); + return Ok(CheckedHeader::Deferred(header, pre_digest.slot())) } let author = match authorities.get(pre_digest.authority_index() as usize) { @@ -93,37 +96,37 @@ pub(super) fn check_header( match &pre_digest { PreDigest::Primary(primary) => { - debug!(target: "babe", "Verifying Primary block"); - - check_primary_header::( - pre_hash, - primary, - sig, - &epoch, - epoch.config.c, - )?; - }, - PreDigest::SecondaryPlain(secondary) if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() => { - debug!(target: "babe", "Verifying Secondary plain block"); - check_secondary_plain_header::( - pre_hash, - secondary, - sig, - &epoch, - )?; - }, - PreDigest::SecondaryVRF(secondary) if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => { - debug!(target: "babe", "Verifying Secondary VRF block"); - check_secondary_vrf_header::( - pre_hash, - secondary, - sig, - &epoch, - )?; + debug!(target: "babe", + "Verifying primary block #{} at slot: {}", + header.number(), + primary.slot, + ); + + check_primary_header::(pre_hash, primary, sig, &epoch, epoch.config.c)?; }, - _ => { - return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)); + PreDigest::SecondaryPlain(secondary) + if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() => + { + debug!(target: "babe", + "Verifying secondary plain block #{} at slot: {}", + header.number(), + secondary.slot, + ); + + check_secondary_plain_header::(pre_hash, secondary, sig, &epoch)?; } + PreDigest::SecondaryVRF(secondary) + if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => + { + debug!(target: "babe", + "Verifying secondary VRF block #{} at slot: {}", + header.number(), + secondary.slot, + ); + + check_secondary_vrf_header::(pre_hash, secondary, sig, &epoch)?; + } + _ => return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)), } let info = VerifiedHeaderInfo { @@ -155,27 +158,20 @@ fn check_primary_header( if AuthorityPair::verify(&signature, pre_hash, &author) { let (inout, _) = { - let transcript = make_transcript( - &epoch.randomness, - pre_digest.slot_number, - epoch.epoch_index, - ); + let transcript = make_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); - schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { - p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) - }).map_err(|s| { - babe_err(Error::VRFVerificationFailed(s)) - })? + schnorrkel::PublicKey::from_bytes(author.as_slice()) + .and_then(|p| { + p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) + }) + .map_err(|s| babe_err(Error::VRFVerificationFailed(s)))? }; - let threshold = calculate_primary_threshold( - c, - &epoch.authorities, - pre_digest.authority_index as usize, - ); + let threshold = + calculate_primary_threshold(c, &epoch.authorities, pre_digest.authority_index as usize); if !check_primary_threshold(&inout, threshold) { - return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))); + return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))) } Ok(()) @@ -196,16 +192,14 @@ fn check_secondary_plain_header( ) -> Result<(), Error> { // check the signature is valid under the expected authority and // chain state. - let expected_author = secondary_slot_author( - pre_digest.slot_number, - &epoch.authorities, - epoch.randomness, - ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; + let expected_author = + secondary_slot_author(pre_digest.slot, &epoch.authorities, epoch.randomness) + .ok_or_else(|| Error::NoSecondaryAuthorExpected)?; let author = &epoch.authorities[pre_digest.authority_index as usize].0; if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) } if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { @@ -224,30 +218,22 @@ fn check_secondary_vrf_header( ) -> Result<(), Error> { // check the signature is valid under the expected authority and // chain state. - let expected_author = secondary_slot_author( - pre_digest.slot_number, - &epoch.authorities, - epoch.randomness, - ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; + let expected_author = + secondary_slot_author(pre_digest.slot, &epoch.authorities, epoch.randomness) + .ok_or_else(|| Error::NoSecondaryAuthorExpected)?; let author = &epoch.authorities[pre_digest.authority_index as usize].0; if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) } if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { - let transcript = make_transcript( - &epoch.randomness, - pre_digest.slot_number, - epoch.epoch_index, - ); - - schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { - p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) - }).map_err(|s| { - babe_err(Error::VRFVerificationFailed(s)) - })?; + let transcript = make_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); + + schnorrkel::PublicKey::from_bytes(author.as_slice()) + .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) + .map_err(|s| babe_err(Error::VRFVerificationFailed(s)))?; Ok(()) } else { diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 0a8a4c43d7117..6829bd2c6d8b5 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,7 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +thiserror = "1.0.21" +libp2p = { version = "0.39.1", default-features = false } +log = "0.4.8" +futures = { version = "0.3.1", features = ["thread-pool"] } +futures-timer = "3.0.1" +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-core = { path = "../../../primitives/core", version = "4.0.0-dev" } +sp-consensus = { path = "../../../primitives/consensus/common", version = "0.10.0-dev" } +sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sc-utils = { version = "4.0.0-dev", path = "../../utils" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +parking_lot = "0.11.1" +serde = { version = "1.0", features = ["derive"] } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" } +async-trait = "0.1.42" + +[dev-dependencies] +sp-test-primitives = { version = "2.0.0", path = "../../../primitives/test-primitives" } diff --git a/primitives/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs similarity index 53% rename from primitives/consensus/common/src/block_import.rs rename to client/consensus/common/src/block_import.rs index 5e593da1163d7..6d411dd9afbf1 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -1,32 +1,31 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . //! Block import helpers. -use sp_runtime::traits::{Block as BlockT, DigestItemFor, Header as HeaderT, NumberFor, HashFor}; -use sp_runtime::Justification; -use serde::{Serialize, Deserialize}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::Arc; -use std::any::Any; +use serde::{Deserialize, Serialize}; +use sp_runtime::{ + traits::{Block as BlockT, DigestItemFor, HashFor, Header as HeaderT, NumberFor}, + Justification, Justifications, +}; +use std::{any::Any, borrow::Cow, collections::HashMap, sync::Arc}; -use crate::Error; -use crate::import_queue::{Verifier, CacheKeyId}; +use sp_consensus::{BlockOrigin, CacheKeyId, Error}; /// Block import result. #[derive(Debug, PartialEq, Eq)] @@ -54,8 +53,6 @@ pub struct ImportedAux { pub needs_justification: bool, /// Received a bad justification. pub bad_justification: bool, - /// Request a finality proof for the given block. - pub needs_finality_proof: bool, /// Whether the block that was imported is the new best block. pub is_new_best: bool, } @@ -63,30 +60,37 @@ pub struct ImportedAux { impl ImportResult { /// Returns default value for `ImportResult::Imported` with /// `clear_justification_requests`, `needs_justification`, - /// `bad_justification` and `needs_finality_proof` set to false. + /// `bad_justification` set to false. pub fn imported(is_new_best: bool) -> ImportResult { let mut aux = ImportedAux::default(); aux.is_new_best = is_new_best; ImportResult::Imported(aux) } -} -/// Block data origin. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum BlockOrigin { - /// Genesis block built into the client. - Genesis, - /// Block is part of the initial sync with the network. - NetworkInitialSync, - /// Block was broadcasted on the network. - NetworkBroadcast, - /// Block that was received from the network and validated in the consensus process. - ConsensusBroadcast, - /// Block that was collated by this node. - Own, - /// Block was imported from a file. - File, + /// Handles any necessary request for justifications (or clearing of pending requests) based on + /// the outcome of this block import. + pub fn handle_justification( + &self, + hash: &B::Hash, + number: NumberFor, + justification_sync_link: &mut dyn JustificationSyncLink, + ) where + B: BlockT, + { + match self { + ImportResult::Imported(aux) => { + if aux.clear_justification_requests { + justification_sync_link.clear_justification_requests(); + } + + if aux.needs_justification { + justification_sync_link.request_justification(hash, number); + } + }, + _ => {}, + } + } } /// Fork choice strategy. @@ -109,10 +113,47 @@ pub struct BlockCheckParams { pub parent_hash: Block::Hash, /// Allow importing the block skipping state verification if parent state is missing. pub allow_missing_state: bool, + /// Allow importing the block if parent block is missing. + pub allow_missing_parent: bool, /// Re-validate existing block. pub import_existing: bool, } +/// Precomputed storage. +pub enum StorageChanges { + /// Changes coming from block execution. + Changes(sp_state_machine::StorageChanges, NumberFor>), + /// Whole new state. + Import(ImportedState), +} + +/// Imported state data. A vector of key-value pairs that should form a trie. +#[derive(PartialEq, Eq, Clone)] +pub struct ImportedState { + /// Target block hash. + pub block: B::Hash, + /// State keys and values. + pub state: Vec<(Vec, Vec)>, +} + +impl std::fmt::Debug for ImportedState { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.debug_struct("ImportedState").field("block", &self.block).finish() + } +} + +/// Defines how a new state is computed for a given imported block. +pub enum StateAction { + /// Apply precomputed changes coming from block execution or state sync. + ApplyChanges(StorageChanges), + /// Execute block body (required) and compute state. + Execute, + /// Execute block body if parent state is available and compute state. + ExecuteIfPossible, + /// Don't execute or import state. + Skip, +} + /// Data required to import a Block. #[non_exhaustive] pub struct BlockImportParams { @@ -130,25 +171,24 @@ pub struct BlockImportParams { /// re-executed in a runtime that checks digest equivalence -- the /// post-runtime digests are pushed back on after. pub header: Block::Header, - /// Justification provided for this block from the outside. - pub justification: Option, + /// Justification(s) provided for this block from the outside. + pub justifications: Option, /// Digest items that have been added after the runtime for external /// work, like a consensus signature. pub post_digests: Vec>, /// The body of the block. pub body: Option>, - /// The changes to the storage to create the state for the block. If this is `Some(_)`, - /// the block import will not need to re-execute the block for importing it. - pub storage_changes: Option< - sp_state_machine::StorageChanges, NumberFor> - >, + /// Indexed transaction body of the block. + pub indexed_body: Option>>, + /// Specify how the new state is computed. + pub state_action: StateAction, /// Is this block finalized already? /// `true` implies instant finality. pub finalized: bool, /// Intermediate values that are interpreted by block importers. Each block importer, /// upon handling a value, removes it from the intermediate list. The final block importer /// rejects block import if there are still intermediate values that remain unhandled. - pub intermediates: HashMap, Box>, + pub intermediates: HashMap, Box>, /// Auxiliary consensus data produced by the block. /// Contains a list of key-value pairs. If values are `None`, the keys /// will be deleted. @@ -160,8 +200,6 @@ pub struct BlockImportParams { /// to modify it. If `None` is passed all the way down to bottom block /// importer, the import fails with an `IncompletePipeline` error. pub fork_choice: Option, - /// Allow importing the block skipping state verification if parent state is missing. - pub allow_missing_state: bool, /// Re-validate existing block. pub import_existing: bool, /// Cached full header hash (with post-digests applied). @@ -170,21 +208,19 @@ pub struct BlockImportParams { impl BlockImportParams { /// Create a new block import params. - pub fn new( - origin: BlockOrigin, - header: Block::Header, - ) -> Self { + pub fn new(origin: BlockOrigin, header: Block::Header) -> Self { Self { - origin, header, - justification: None, + origin, + header, + justifications: None, post_digests: Vec::new(), body: None, - storage_changes: None, + indexed_body: None, + state_action: StateAction::Execute, finalized: false, intermediates: HashMap::new(), auxiliary: Vec::new(), fork_choice: None, - allow_missing_state: false, import_existing: false, post_hash: None, } @@ -195,35 +231,51 @@ impl BlockImportParams { if let Some(hash) = self.post_hash { hash } else { - if self.post_digests.is_empty() { - self.header.hash() - } else { - let mut hdr = self.header.clone(); - for digest_item in &self.post_digests { - hdr.digest_mut().push(digest_item.clone()); - } + self.post_header().hash() + } + } - hdr.hash() + /// Get the post header. + pub fn post_header(&self) -> Block::Header { + if self.post_digests.is_empty() { + self.header.clone() + } else { + let mut hdr = self.header.clone(); + for digest_item in &self.post_digests { + hdr.digest_mut().push(digest_item.clone()); } + + hdr } } /// Auxiliary function for "converting" the transaction type. /// - /// Actually this just sets `storage_changes` to `None` and makes rustc think that `Self` now - /// uses a different transaction type. - pub fn convert_transaction(self) -> BlockImportParams { + /// Actually this just sets `StorageChanges::Changes` to `None` and makes rustc think that + /// `Self` now uses a different transaction type. + pub fn clear_storage_changes_and_mutate( + self, + ) -> BlockImportParams { + // Preserve imported state. + let state_action = match self.state_action { + StateAction::ApplyChanges(StorageChanges::Import(state)) => + StateAction::ApplyChanges(StorageChanges::Import(state)), + StateAction::ApplyChanges(StorageChanges::Changes(_)) => StateAction::Skip, + StateAction::Execute => StateAction::Execute, + StateAction::ExecuteIfPossible => StateAction::ExecuteIfPossible, + StateAction::Skip => StateAction::Skip, + }; BlockImportParams { origin: self.origin, header: self.header, - justification: self.justification, + justifications: self.justifications, post_digests: self.post_digests, body: self.body, - storage_changes: None, + indexed_body: self.indexed_body, + state_action, finalized: self.finalized, auxiliary: self.auxiliary, intermediates: self.intermediates, - allow_missing_state: self.allow_missing_state, fork_choice: self.fork_choice, import_existing: self.import_existing, post_hash: self.post_hash, @@ -234,18 +286,16 @@ impl BlockImportParams { pub fn take_intermediate(&mut self, key: &[u8]) -> Result, Error> { let (k, v) = self.intermediates.remove_entry(key).ok_or(Error::NoIntermediate)?; - match v.downcast::() { - Ok(v) => Ok(v), - Err(v) => { - self.intermediates.insert(k, v); - Err(Error::InvalidIntermediate) - }, - } + v.downcast::().or_else(|v| { + self.intermediates.insert(k, v); + Err(Error::InvalidIntermediate) + }) } /// Get a reference to a given intermediate. pub fn intermediate(&self, key: &[u8]) -> Result<&T, Error> { - self.intermediates.get(key) + self.intermediates + .get(key) .ok_or(Error::NoIntermediate)? .downcast_ref::() .ok_or(Error::InvalidIntermediate) @@ -253,22 +303,29 @@ impl BlockImportParams { /// Get a mutable reference to a given intermediate. pub fn intermediate_mut(&mut self, key: &[u8]) -> Result<&mut T, Error> { - self.intermediates.get_mut(key) + self.intermediates + .get_mut(key) .ok_or(Error::NoIntermediate)? .downcast_mut::() .ok_or(Error::InvalidIntermediate) } + + /// Check if this block contains state import action + pub fn with_state(&self) -> bool { + matches!(self.state_action, StateAction::ApplyChanges(StorageChanges::Import(_))) + } } /// Block import trait. +#[async_trait::async_trait] pub trait BlockImport { /// The error type. type Error: std::error::Error + Send + 'static; /// The transaction type used by the backend. - type Transaction; + type Transaction: Send + 'static; /// Check block preconditions. - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result; @@ -276,69 +333,78 @@ pub trait BlockImport { /// Import a block. /// /// Cached data can be accessed through the blockchain cache. - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, cache: HashMap>, ) -> Result; } -impl BlockImport for crate::import_queue::BoxBlockImport { - type Error = crate::error::Error; +#[async_trait::async_trait] +impl BlockImport for crate::import_queue::BoxBlockImport +where + Transaction: Send + 'static, +{ + type Error = sp_consensus::error::Error; type Transaction = Transaction; /// Check block preconditions. - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - (**self).check_block(block) + (**self).check_block(block).await } /// Import a block. /// /// Cached data can be accessed through the blockchain cache. - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, cache: HashMap>, ) -> Result { - (**self).import_block(block, cache) + (**self).import_block(block, cache).await } } +#[async_trait::async_trait] impl BlockImport for Arc - where for<'r> &'r T: BlockImport +where + for<'r> &'r T: BlockImport, + T: Send + Sync, + Transaction: Send + 'static, { type Error = E; type Transaction = Transaction; - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - (&**self).check_block(block) + (&**self).check_block(block).await } - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, cache: HashMap>, ) -> Result { - (&**self).import_block(block, cache) + (&**self).import_block(block, cache).await } } /// Justification import trait +#[async_trait::async_trait] pub trait JustificationImport { type Error: std::error::Error + Send + 'static; /// Called by the import queue when it is started. Returns a list of justifications to request /// from the network. - fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)> { Vec::new() } + async fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)>; /// Import a Block justification and finalize the given block. - fn import_justification( + async fn import_justification( &mut self, hash: B::Hash, number: NumberFor, @@ -346,20 +412,31 @@ pub trait JustificationImport { ) -> Result<(), Self::Error>; } -/// Finality proof import trait. -pub trait FinalityProofImport { - type Error: std::error::Error + Send + 'static; +/// Control the synchronization process of block justifications. +/// +/// When importing blocks different consensus engines might require that +/// additional finality data is provided (i.e. a justification for the block). +/// This trait abstracts the required methods to issue those requests +pub trait JustificationSyncLink: Send + Sync { + /// Request a justification for the given block. + fn request_justification(&self, hash: &B::Hash, number: NumberFor); - /// Called by the import queue when it is started. Returns a list of finality proofs to request - /// from the network. - fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)> { Vec::new() } + /// Clear all pending justification requests. + fn clear_justification_requests(&self); +} - /// Import a Block justification and finalize the given block. Returns finalized block or error. - fn import_finality_proof( - &mut self, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(B::Hash, NumberFor), Self::Error>; +impl JustificationSyncLink for () { + fn request_justification(&self, _hash: &B::Hash, _number: NumberFor) {} + + fn clear_justification_requests(&self) {} +} + +impl> JustificationSyncLink for Arc { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + L::request_justification(&*self, hash, number); + } + + fn clear_justification_requests(&self) { + L::clear_justification_requests(&*self); + } } diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs new file mode 100644 index 0000000000000..3f2126ccadf66 --- /dev/null +++ b/client/consensus/common/src/import_queue.rs @@ -0,0 +1,310 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Import Queue primitive: something which can verify and import blocks. +//! +//! This serves as an intermediate and abstracted step between synchronization +//! and import. Each mode of consensus will have its own requirements for block +//! verification. Some algorithms can verify in parallel, while others only +//! sequentially. +//! +//! The `ImportQueue` trait allows such verification strategies to be +//! instantiated. The `BasicQueue` and `BasicVerifier` traits allow serial +//! queues to be instantiated simply. + +use std::{collections::HashMap, iter::FromIterator}; + +use log::{debug, trace}; +use sp_runtime::{ + traits::{Block as BlockT, Header as _, NumberFor}, + Justifications, +}; + +use crate::{ + block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ImportResult, ImportedAux, ImportedState, + JustificationImport, StateAction, + }, + metrics::Metrics, +}; +pub use basic_queue::BasicQueue; +use sp_consensus::{error::Error as ConsensusError, BlockOrigin, CacheKeyId}; + +/// A commonly-used Import Queue type. +/// +/// This defines the transaction type of the `BasicQueue` to be the transaction type for a client. +pub type DefaultImportQueue = + BasicQueue>; + +mod basic_queue; +pub mod buffered_link; + +/// Shared block import struct used by the queue. +pub type BoxBlockImport = + Box + Send + Sync>; + +/// Shared justification import struct used by the queue. +pub type BoxJustificationImport = + Box + Send + Sync>; + +/// Maps to the Origin used by the network. +pub type Origin = libp2p::PeerId; + +/// Block data used by the queue. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct IncomingBlock { + /// Block header hash. + pub hash: ::Hash, + /// Block header if requested. + pub header: Option<::Header>, + /// Block body if requested. + pub body: Option::Extrinsic>>, + /// Indexed block body if requested. + pub indexed_body: Option>>, + /// Justification(s) if requested. + pub justifications: Option, + /// The peer, we received this from + pub origin: Option, + /// Allow importing the block skipping state verification if parent state is missing. + pub allow_missing_state: bool, + /// Skip block execution and state verification. + pub skip_execution: bool, + /// Re-validate existing block. + pub import_existing: bool, + /// Do not compute new state, but rather set it to the given set. + pub state: Option>, +} + +/// Verify a justification of a block +#[async_trait::async_trait] +pub trait Verifier: Send + Sync { + /// Verify the given data and return the BlockImportParams and an optional + /// new set of validators to import. If not, err with an Error-Message + /// presented to the User in the logs. + async fn verify( + &mut self, + block: BlockImportParams, + ) -> Result<(BlockImportParams, Option)>>), String>; +} + +/// Blocks import queue API. +/// +/// The `import_*` methods can be called in order to send elements for the import queue to verify. +/// Afterwards, call `poll_actions` to determine how to respond to these elements. +pub trait ImportQueue: Send { + /// Import bunch of blocks. + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); + /// Import block justifications. + fn import_justifications( + &mut self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ); + /// Polls for actions to perform on the network. + /// + /// This method should behave in a way similar to `Future::poll`. It can register the current + /// task and notify later when more actions are ready to be polled. To continue the comparison, + /// it is as if this method always returned `Poll::Pending`. + fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link); +} + +/// Hooks that the verification queue can use to influence the synchronization +/// algorithm. +pub trait Link: Send { + /// Batch of blocks imported, with or without error. + fn blocks_processed( + &mut self, + _imported: usize, + _count: usize, + _results: Vec<(BlockImportResult, B::Hash)>, + ) { + } + + /// Justification import result. + fn justification_imported( + &mut self, + _who: Origin, + _hash: &B::Hash, + _number: NumberFor, + _success: bool, + ) { + } + + /// Request a justification for the given block. + fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} +} + +/// Block import successful result. +#[derive(Debug, PartialEq)] +pub enum BlockImportStatus { + /// Imported known block. + ImportedKnown(N, Option), + /// Imported unknown block. + ImportedUnknown(N, ImportedAux, Option), +} + +/// Block import error. +#[derive(Debug)] +pub enum BlockImportError { + /// Block missed header, can't be imported + IncompleteHeader(Option), + /// Block verification failed, can't be imported + VerificationFailed(Option, String), + /// Block is known to be Bad + BadBlock(Option), + /// Parent state is missing. + MissingState, + /// Block has an unknown parent + UnknownParent, + /// Block import has been cancelled. This can happen if the parent block fails to be imported. + Cancelled, + /// Other error. + Other(ConsensusError), +} + +type BlockImportResult = Result>, BlockImportError>; + +/// Single block import function. +pub async fn import_single_block, Transaction: Send + 'static>( + import_handle: &mut impl BlockImport, + block_origin: BlockOrigin, + block: IncomingBlock, + verifier: &mut V, +) -> BlockImportResult { + import_single_block_metered(import_handle, block_origin, block, verifier, None).await +} + +/// Single block import function with metering. +pub(crate) async fn import_single_block_metered< + B: BlockT, + V: Verifier, + Transaction: Send + 'static, +>( + import_handle: &mut impl BlockImport, + block_origin: BlockOrigin, + block: IncomingBlock, + verifier: &mut V, + metrics: Option, +) -> BlockImportResult { + let peer = block.origin; + + let (header, justifications) = match (block.header, block.justifications) { + (Some(header), justifications) => (header, justifications), + (None, _) => { + if let Some(ref peer) = peer { + debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); + } else { + debug!(target: "sync", "Header {} was not provided ", block.hash); + } + return Err(BlockImportError::IncompleteHeader(peer)) + }, + }; + + trace!(target: "sync", "Header {} has {:?} logs", block.hash, header.digest().logs().len()); + + let number = header.number().clone(); + let hash = block.hash; + let parent_hash = header.parent_hash().clone(); + + let import_handler = |import| match import { + Ok(ImportResult::AlreadyInChain) => { + trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); + Ok(BlockImportStatus::ImportedKnown(number, peer.clone())) + }, + Ok(ImportResult::Imported(aux)) => + Ok(BlockImportStatus::ImportedUnknown(number, aux, peer.clone())), + Ok(ImportResult::MissingState) => { + debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", + number, hash, parent_hash); + Err(BlockImportError::MissingState) + }, + Ok(ImportResult::UnknownParent) => { + debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", + number, hash, parent_hash); + Err(BlockImportError::UnknownParent) + }, + Ok(ImportResult::KnownBad) => { + debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); + Err(BlockImportError::BadBlock(peer.clone())) + }, + Err(e) => { + debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); + Err(BlockImportError::Other(e)) + }, + }; + + match import_handler( + import_handle + .check_block(BlockCheckParams { + hash, + number, + parent_hash, + allow_missing_state: block.allow_missing_state, + import_existing: block.import_existing, + allow_missing_parent: block.state.is_some(), + }) + .await, + )? { + BlockImportStatus::ImportedUnknown { .. } => (), + r => return Ok(r), // Any other successful result means that the block is already imported. + } + + let started = std::time::Instant::now(); + + let mut import_block = BlockImportParams::new(block_origin, header); + import_block.body = block.body; + import_block.justifications = justifications; + import_block.post_hash = Some(hash); + import_block.import_existing = block.import_existing; + import_block.indexed_body = block.indexed_body; + + if let Some(state) = block.state { + let changes = crate::block_import::StorageChanges::Import(state); + import_block.state_action = StateAction::ApplyChanges(changes); + } else if block.skip_execution { + import_block.state_action = StateAction::Skip; + } else if block.allow_missing_state { + import_block.state_action = StateAction::ExecuteIfPossible; + } + + let (import_block, maybe_keys) = verifier.verify(import_block).await.map_err(|msg| { + if let Some(ref peer) = peer { + trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); + } else { + trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); + } + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification(false, started.elapsed()); + } + BlockImportError::VerificationFailed(peer.clone(), msg) + })?; + + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification(true, started.elapsed()); + } + + let cache = HashMap::from_iter(maybe_keys.unwrap_or_default()); + let import_block = import_block.clear_storage_changes_and_mutate(); + let imported = import_handle.import_block(import_block, cache).await; + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification_and_import(started.elapsed()); + } + import_handler(imported) +} diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs new file mode 100644 index 0000000000000..9042c8798be4f --- /dev/null +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -0,0 +1,633 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use futures::{ + prelude::*, + task::{Context, Poll}, +}; +use futures_timer::Delay; +use log::{debug, trace}; +use prometheus_endpoint::Registry; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_consensus::BlockOrigin; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + Justification, Justifications, +}; +use std::{marker::PhantomData, pin::Pin, time::Duration}; + +use crate::{ + import_queue::{ + buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, + import_single_block_metered, BlockImportError, BlockImportStatus, BoxBlockImport, + BoxJustificationImport, ImportQueue, IncomingBlock, Link, Origin, Verifier, + }, + metrics::Metrics, +}; + +/// Interface to a basic block import queue that is importing blocks sequentially in a separate +/// task, with plugable verification. +pub struct BasicQueue { + /// Channel to send justification import messages to the background task. + justification_sender: TracingUnboundedSender>, + /// Channel to send block import messages to the background task. + block_import_sender: TracingUnboundedSender>, + /// Results coming from the worker task. + result_port: BufferedLinkReceiver, + _phantom: PhantomData, +} + +impl Drop for BasicQueue { + fn drop(&mut self) { + // Flush the queue and close the receiver to terminate the future. + self.justification_sender.close_channel(); + self.block_import_sender.close_channel(); + self.result_port.close(); + } +} + +impl BasicQueue { + /// Instantiate a new basic queue, with given verifier. + /// + /// This creates a background task, and calls `on_start` on the justification importer. + pub fn new>( + verifier: V, + block_import: BoxBlockImport, + justification_import: Option>, + spawner: &impl sp_core::traits::SpawnEssentialNamed, + prometheus_registry: Option<&Registry>, + ) -> Self { + let (result_sender, result_port) = buffered_link::buffered_link(); + + let metrics = prometheus_registry.and_then(|r| { + Metrics::register(r) + .map_err(|err| { + log::warn!("Failed to register Prometheus metrics: {}", err); + }) + .ok() + }); + + let (future, justification_sender, block_import_sender) = BlockImportWorker::new( + result_sender, + verifier, + block_import, + justification_import, + metrics, + ); + + spawner.spawn_essential_blocking("basic-block-import-worker", future.boxed()); + + Self { justification_sender, block_import_sender, result_port, _phantom: PhantomData } + } +} + +impl ImportQueue for BasicQueue { + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { + if blocks.is_empty() { + return + } + + trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); + let res = self + .block_import_sender + .unbounded_send(worker_messages::ImportBlocks(origin, blocks)); + + if res.is_err() { + log::error!( + target: "sync", + "import_blocks: Background import task is no longer alive" + ); + } + } + + fn import_justifications( + &mut self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ) { + for justification in justifications { + let res = self.justification_sender.unbounded_send( + worker_messages::ImportJustification(who, hash, number, justification), + ); + + if res.is_err() { + log::error!( + target: "sync", + "import_justification: Background import task is no longer alive" + ); + } + } + } + + fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { + if self.result_port.poll_actions(cx, link).is_err() { + log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); + } + } +} + +/// Messages destinated to the background worker. +mod worker_messages { + use super::*; + + pub struct ImportBlocks(pub BlockOrigin, pub Vec>); + pub struct ImportJustification( + pub Origin, + pub B::Hash, + pub NumberFor, + pub Justification, + ); +} + +/// The process of importing blocks. +/// +/// This polls the `block_import_receiver` for new blocks to import and than awaits on +/// importing these blocks. After each block is imported, this async function yields once +/// to give other futures the possibility to be run. +/// +/// Returns when `block_import` ended. +async fn block_import_process( + mut block_import: BoxBlockImport, + mut verifier: impl Verifier, + mut result_sender: BufferedLinkSender, + mut block_import_receiver: TracingUnboundedReceiver>, + metrics: Option, + delay_between_blocks: Duration, +) { + loop { + let worker_messages::ImportBlocks(origin, blocks) = match block_import_receiver.next().await + { + Some(blocks) => blocks, + None => { + log::debug!( + target: "block-import", + "Stopping block import because the import channel was closed!", + ); + return + }, + }; + + let res = import_many_blocks( + &mut block_import, + origin, + blocks, + &mut verifier, + delay_between_blocks, + metrics.clone(), + ) + .await; + + result_sender.blocks_processed(res.imported, res.block_count, res.results); + } +} + +struct BlockImportWorker { + result_sender: BufferedLinkSender, + justification_import: Option>, + metrics: Option, +} + +impl BlockImportWorker { + fn new, Transaction: Send + 'static>( + result_sender: BufferedLinkSender, + verifier: V, + block_import: BoxBlockImport, + justification_import: Option>, + metrics: Option, + ) -> ( + impl Future + Send, + TracingUnboundedSender>, + TracingUnboundedSender>, + ) { + use worker_messages::*; + + let (justification_sender, mut justification_port) = + tracing_unbounded("mpsc_import_queue_worker_justification"); + + let (block_import_sender, block_import_port) = + tracing_unbounded("mpsc_import_queue_worker_blocks"); + + let mut worker = BlockImportWorker { result_sender, justification_import, metrics }; + + let delay_between_blocks = Duration::default(); + + let future = async move { + // Let's initialize `justification_import` + if let Some(justification_import) = worker.justification_import.as_mut() { + for (hash, number) in justification_import.on_start().await { + worker.result_sender.request_justification(&hash, number); + } + } + + let block_import_process = block_import_process( + block_import, + verifier, + worker.result_sender.clone(), + block_import_port, + worker.metrics.clone(), + delay_between_blocks, + ); + futures::pin_mut!(block_import_process); + + loop { + // If the results sender is closed, that means that the import queue is shutting + // down and we should end this future. + if worker.result_sender.is_closed() { + log::debug!( + target: "block-import", + "Stopping block import because result channel was closed!", + ); + return + } + + // Make sure to first process all justifications + while let Poll::Ready(justification) = futures::poll!(justification_port.next()) { + match justification { + Some(ImportJustification(who, hash, number, justification)) => + worker.import_justification(who, hash, number, justification).await, + None => { + log::debug!( + target: "block-import", + "Stopping block import because justification channel was closed!", + ); + return + }, + } + } + + if let Poll::Ready(()) = futures::poll!(&mut block_import_process) { + return + } + + // All futures that we polled are now pending. + futures::pending!() + } + }; + + (future, justification_sender, block_import_sender) + } + + async fn import_justification( + &mut self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justification: Justification, + ) { + let started = std::time::Instant::now(); + + let success = match self.justification_import.as_mut() { + Some(justification_import) => justification_import + .import_justification(hash, number, justification) + .await + .map_err(|e| { + debug!( + target: "sync", + "Justification import failed with {:?} for hash: {:?} number: {:?} coming from node: {:?}", + e, + hash, + number, + who, + ); + e + }) + .is_ok(), + None => false, + }; + + if let Some(metrics) = self.metrics.as_ref() { + metrics.justification_import_time.observe(started.elapsed().as_secs_f64()); + } + + self.result_sender.justification_imported(who, &hash, number, success); + } +} + +/// Result of [`import_many_blocks`]. +struct ImportManyBlocksResult { + /// The number of blocks imported successfully. + imported: usize, + /// The total number of blocks processed. + block_count: usize, + /// The import results for each block. + results: Vec<(Result>, BlockImportError>, B::Hash)>, +} + +/// Import several blocks at once, returning import result for each block. +/// +/// This will yield after each imported block once, to ensure that other futures can +/// be called as well. +async fn import_many_blocks, Transaction: Send + 'static>( + import_handle: &mut BoxBlockImport, + blocks_origin: BlockOrigin, + blocks: Vec>, + verifier: &mut V, + delay_between_blocks: Duration, + metrics: Option, +) -> ImportManyBlocksResult { + let count = blocks.len(); + + let blocks_range = match ( + blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), + blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + + trace!(target: "sync", "Starting import of {} blocks {}", count, blocks_range); + + let mut imported = 0; + let mut results = vec![]; + let mut has_error = false; + let mut blocks = blocks.into_iter(); + + // Blocks in the response/drain should be in ascending order. + loop { + // Is there any block left to import? + let block = match blocks.next() { + Some(b) => b, + None => { + // No block left to import, success! + return ImportManyBlocksResult { block_count: count, imported, results } + }, + }; + + let block_number = block.header.as_ref().map(|h| h.number().clone()); + let block_hash = block.hash; + let import_result = if has_error { + Err(BlockImportError::Cancelled) + } else { + // The actual import. + import_single_block_metered( + import_handle, + blocks_origin.clone(), + block, + verifier, + metrics.clone(), + ) + .await + }; + + if let Some(metrics) = metrics.as_ref() { + metrics.report_import::(&import_result); + } + + if import_result.is_ok() { + trace!( + target: "sync", + "Block imported successfully {:?} ({})", + block_number, + block_hash, + ); + imported += 1; + } else { + has_error = true; + } + + results.push((import_result, block_hash)); + + if delay_between_blocks != Duration::default() && !has_error { + Delay::new(delay_between_blocks).await; + } else { + Yield::new().await + } + } +} + +/// A future that will always `yield` on the first call of `poll` but schedules the +/// current task for re-execution. +/// +/// This is done by getting the waker and calling `wake_by_ref` followed by returning +/// `Pending`. The next time the `poll` is called, it will return `Ready`. +struct Yield(bool); + +impl Yield { + fn new() -> Self { + Self(false) + } +} + +impl Future for Yield { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { + if !self.0 { + self.0 = true; + cx.waker().wake_by_ref(); + Poll::Pending + } else { + Poll::Ready(()) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, + }, + import_queue::{CacheKeyId, Verifier}, + }; + use futures::{executor::block_on, Future}; + use sp_test_primitives::{Block, BlockNumber, Extrinsic, Hash, Header}; + use std::collections::HashMap; + + #[async_trait::async_trait] + impl Verifier for () { + async fn verify( + &mut self, + block: BlockImportParams, + ) -> Result<(BlockImportParams, Option)>>), String> { + Ok((BlockImportParams::new(block.origin, block.header), None)) + } + } + + #[async_trait::async_trait] + impl BlockImport for () { + type Error = sp_consensus::Error; + type Transaction = Extrinsic; + + async fn check_block( + &mut self, + _block: BlockCheckParams, + ) -> Result { + Ok(ImportResult::imported(false)) + } + + async fn import_block( + &mut self, + _block: BlockImportParams, + _cache: HashMap>, + ) -> Result { + Ok(ImportResult::imported(true)) + } + } + + #[async_trait::async_trait] + impl JustificationImport for () { + type Error = sp_consensus::Error; + + async fn on_start(&mut self) -> Vec<(Hash, BlockNumber)> { + Vec::new() + } + + async fn import_justification( + &mut self, + _hash: Hash, + _number: BlockNumber, + _justification: Justification, + ) -> Result<(), Self::Error> { + Ok(()) + } + } + + #[derive(Debug, PartialEq)] + enum Event { + JustificationImported(Hash), + BlockImported(Hash), + } + + #[derive(Default)] + struct TestLink { + events: Vec, + } + + impl Link for TestLink { + fn blocks_processed( + &mut self, + _imported: usize, + _count: usize, + results: Vec<(Result, BlockImportError>, Hash)>, + ) { + if let Some(hash) = results.into_iter().find_map(|(r, h)| r.ok().map(|_| h)) { + self.events.push(Event::BlockImported(hash)); + } + } + + fn justification_imported( + &mut self, + _who: Origin, + hash: &Hash, + _number: BlockNumber, + _success: bool, + ) { + self.events.push(Event::JustificationImported(hash.clone())) + } + } + + #[test] + fn prioritizes_finality_work_over_block_import() { + let (result_sender, mut result_port) = buffered_link::buffered_link(); + + let (worker, mut finality_sender, mut block_import_sender) = + BlockImportWorker::new(result_sender, (), Box::new(()), Some(Box::new(())), None); + futures::pin_mut!(worker); + + let mut import_block = |n| { + let header = Header { + parent_hash: Hash::random(), + number: n, + extrinsics_root: Hash::random(), + state_root: Default::default(), + digest: Default::default(), + }; + + let hash = header.hash(); + + block_on(block_import_sender.send(worker_messages::ImportBlocks( + BlockOrigin::Own, + vec![IncomingBlock { + hash, + header: Some(header), + body: None, + indexed_body: None, + justifications: None, + origin: None, + allow_missing_state: false, + import_existing: false, + state: None, + skip_execution: false, + }], + ))) + .unwrap(); + + hash + }; + + let mut import_justification = || { + let hash = Hash::random(); + block_on(finality_sender.send(worker_messages::ImportJustification( + libp2p::PeerId::random(), + hash, + 1, + (*b"TEST", Vec::new()), + ))) + .unwrap(); + + hash + }; + + let mut link = TestLink::default(); + + // we send a bunch of tasks to the worker + let block1 = import_block(1); + let block2 = import_block(2); + let block3 = import_block(3); + let justification1 = import_justification(); + let justification2 = import_justification(); + let block4 = import_block(4); + let block5 = import_block(5); + let block6 = import_block(6); + let justification3 = import_justification(); + + // we poll the worker until we have processed 9 events + block_on(futures::future::poll_fn(|cx| { + while link.events.len() < 9 { + match Future::poll(Pin::new(&mut worker), cx) { + Poll::Pending => {}, + Poll::Ready(()) => panic!("import queue worker should not conclude."), + } + + result_port.poll_actions(cx, &mut link).unwrap(); + } + + Poll::Ready(()) + })); + + // all justification tasks must be done before any block import work + assert_eq!( + link.events, + vec![ + Event::JustificationImported(justification1), + Event::JustificationImported(justification2), + Event::JustificationImported(justification3), + Event::BlockImported(block1), + Event::BlockImported(block2), + Event::BlockImported(block3), + Event::BlockImported(block4), + Event::BlockImported(block5), + Event::BlockImported(block6), + ] + ); + } +} diff --git a/primitives/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs similarity index 62% rename from primitives/consensus/common/src/import_queue/buffered_link.rs rename to client/consensus/common/src/import_queue/buffered_link.rs index a37d4c53c2603..87ea6dde5c473 100644 --- a/primitives/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -1,19 +1,20 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . //! Provides the `buffered_link` utility. //! @@ -22,8 +23,8 @@ //! # Example //! //! ``` -//! use sp_consensus::import_queue::Link; -//! # use sp_consensus::import_queue::buffered_link::buffered_link; +//! use sc_consensus::import_queue::Link; +//! # use sc_consensus::import_queue::buffered_link::buffered_link; //! # use sp_test_primitives::Block; //! # struct DummyLink; impl Link for DummyLink {} //! # let mut my_link = DummyLink; @@ -36,13 +37,17 @@ //! std::task::Poll::Pending::<()> //! }); //! ``` -//! +use crate::import_queue::{Link, Origin}; use futures::prelude::*; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; -use std::{pin::Pin, task::Context, task::Poll}; -use crate::import_queue::{Origin, Link, BlockImportResult, BlockImportError}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +use super::BlockImportResult; /// Wraps around an unbounded channel from the `futures` crate. The sender implements `Link` and /// can be used to buffer commands, and the receiver can be used to poll said commands and transfer @@ -70,19 +75,15 @@ impl BufferedLinkSender { impl Clone for BufferedLinkSender { fn clone(&self) -> Self { - BufferedLinkSender { - tx: self.tx.clone(), - } + BufferedLinkSender { tx: self.tx.clone() } } } /// Internal buffered message. enum BlockImportWorkerMsg { - BlocksProcessed(usize, usize, Vec<(Result>, BlockImportError>, B::Hash)>), + BlocksProcessed(usize, usize, Vec<(BlockImportResult, B::Hash)>), JustificationImported(Origin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), - FinalityProofImported(Origin, (B::Hash, NumberFor), Result<(B::Hash, NumberFor), ()>), - RequestFinalityProof(B::Hash, NumberFor), } impl Link for BufferedLinkSender { @@ -90,9 +91,11 @@ impl Link for BufferedLinkSender { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(BlockImportResult, B::Hash)>, ) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::BlocksProcessed(imported, count, results)); + let _ = self + .tx + .unbounded_send(BlockImportWorkerMsg::BlocksProcessed(imported, count, results)); } fn justification_imported( @@ -100,28 +103,16 @@ impl Link for BufferedLinkSender { who: Origin, hash: &B::Hash, number: NumberFor, - success: bool + success: bool, ) { let msg = BlockImportWorkerMsg::JustificationImported(who, hash.clone(), number, success); let _ = self.tx.unbounded_send(msg); } fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::RequestJustification(hash.clone(), number)); - } - - fn finality_proof_imported( - &mut self, - who: Origin, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - let msg = BlockImportWorkerMsg::FinalityProofImported(who, request_block, finalization_result); - let _ = self.tx.unbounded_send(msg); - } - - fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::RequestFinalityProof(hash.clone(), number)); + let _ = self + .tx + .unbounded_send(BlockImportWorkerMsg::RequestJustification(hash.clone(), number)); } } @@ -154,10 +145,6 @@ impl BufferedLinkReceiver { link.justification_imported(who, &hash, number, success), BlockImportWorkerMsg::RequestJustification(hash, number) => link.request_justification(&hash, number), - BlockImportWorkerMsg::FinalityProofImported(who, block, result) => - link.finality_proof_imported(who, block, result), - BlockImportWorkerMsg::RequestFinalityProof(hash, number) => - link.request_finality_proof(&hash, number), } } } diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index 1d9b072cfe964..640bad237e882 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -1,20 +1,39 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Collection of common consensus specific implementations + +pub mod block_import; +pub mod import_queue; +pub mod metrics; + +pub use block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, + ImportedAux, ImportedState, JustificationImport, JustificationSyncLink, StateAction, + StorageChanges, +}; +pub use import_queue::{ + import_single_block, BasicQueue, BlockImportError, BlockImportStatus, BoxBlockImport, + BoxJustificationImport, DefaultImportQueue, ImportQueue, IncomingBlock, Link, Verifier, +}; + mod longest_chain; +pub mod shared_data; + pub use longest_chain::LongestChain; diff --git a/client/consensus/common/src/longest_chain.rs b/client/consensus/common/src/longest_chain.rs index 981dbad0f6070..b1f7f94f9eb28 100644 --- a/client/consensus/common/src/longest_chain.rs +++ b/client/consensus/common/src/longest_chain.rs @@ -1,69 +1,69 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . + //! Longest chain implementation -use std::sync::Arc; -use std::marker::PhantomData; use sc_client_api::backend; -use sp_consensus::{SelectChain, Error as ConsensusError}; use sp_blockchain::{Backend, HeaderBackend}; +use sp_consensus::{Error as ConsensusError, SelectChain}; use sp_runtime::{ - traits::{NumberFor, Block as BlockT}, generic::BlockId, + traits::{Block as BlockT, NumberFor}, }; +use std::{marker::PhantomData, sync::Arc}; /// Implement Longest Chain Select implementation /// where 'longest' is defined as the highest number of blocks pub struct LongestChain { backend: Arc, - _phantom: PhantomData + _phantom: PhantomData, } impl Clone for LongestChain { fn clone(&self) -> Self { let backend = self.backend.clone(); - LongestChain { - backend, - _phantom: Default::default() - } + LongestChain { backend, _phantom: Default::default() } } } impl LongestChain - where - B: backend::Backend, - Block: BlockT, +where + B: backend::Backend, + Block: BlockT, { /// Instantiate a new LongestChain for Backend B pub fn new(backend: Arc) -> Self { - LongestChain { - backend, - _phantom: Default::default() - } + LongestChain { backend, _phantom: Default::default() } } fn best_block_header(&self) -> sp_blockchain::Result<::Header> { let info = self.backend.blockchain().info(); let import_lock = self.backend.get_import_lock(); - let best_hash = self.backend + let best_hash = self + .backend .blockchain() .best_containing(info.best_hash, None, import_lock)? .unwrap_or(info.best_hash); - Ok(self.backend.blockchain().header(BlockId::Hash(best_hash))? + Ok(self + .backend + .blockchain() + .header(BlockId::Hash(best_hash))? .expect("given block hash was fetched from block in db; qed")) } @@ -72,30 +72,30 @@ impl LongestChain } } +#[async_trait::async_trait] impl SelectChain for LongestChain - where - B: backend::Backend, - Block: BlockT, +where + B: backend::Backend, + Block: BlockT, { - - fn leaves(&self) -> Result::Hash>, ConsensusError> { - LongestChain::leaves(self) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) + async fn leaves(&self) -> Result::Hash>, ConsensusError> { + LongestChain::leaves(self).map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) } - fn best_chain(&self) -> Result<::Header, ConsensusError> - { + async fn best_chain(&self) -> Result<::Header, ConsensusError> { LongestChain::best_block_header(&self) .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) } - fn finality_target( + async fn finality_target( &self, target_hash: Block::Hash, - maybe_max_number: Option> + maybe_max_number: Option>, ) -> Result, ConsensusError> { let import_lock = self.backend.get_import_lock(); - self.backend.blockchain().best_containing(target_hash, maybe_max_number, import_lock) + self.backend + .blockchain() + .best_containing(target_hash, maybe_max_number, import_lock) .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) } -} \ No newline at end of file +} diff --git a/primitives/consensus/common/src/metrics.rs b/client/consensus/common/src/metrics.rs similarity index 57% rename from primitives/consensus/common/src/metrics.rs rename to client/consensus/common/src/metrics.rs index a35b7c4968f7f..ade45e3ffb687 100644 --- a/primitives/consensus/common/src/metrics.rs +++ b/client/consensus/common/src/metrics.rs @@ -1,28 +1,31 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Metering tools for consensus use prometheus_endpoint::{ - register, U64, Registry, PrometheusError, Opts, CounterVec, Histogram, HistogramVec, HistogramOpts + register, CounterVec, Histogram, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, + U64, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use crate::import_queue::{BlockImportResult, BlockImportError}; +use crate::import_queue::{BlockImportError, BlockImportStatus}; /// Generic Prometheus metrics for common consensus functionality. #[derive(Clone)] @@ -30,7 +33,6 @@ pub(crate) struct Metrics { pub import_queue_processed: CounterVec, pub block_verification_time: HistogramVec, pub block_verification_and_import_time: Histogram, - pub finality_proof_import_time: Histogram, pub justification_import_time: Histogram, } @@ -40,45 +42,29 @@ impl Metrics { import_queue_processed: register( CounterVec::new( Opts::new("import_queue_processed_total", "Blocks processed by import queue"), - &["result"] // 'success or failure + &["result"], // 'success or failure )?, registry, )?, block_verification_time: register( HistogramVec::new( - HistogramOpts::new( - "block_verification_time", - "Time taken to verify blocks", - ), + HistogramOpts::new("block_verification_time", "Time taken to verify blocks"), &["result"], )?, registry, )?, block_verification_and_import_time: register( - Histogram::with_opts( - HistogramOpts::new( - "block_verification_and_import_time", - "Time taken to verify and import blocks", - ), - )?, - registry, - )?, - finality_proof_import_time: register( - Histogram::with_opts( - HistogramOpts::new( - "finality_proof_import_time", - "Time taken to import finality proofs", - ), - )?, + Histogram::with_opts(HistogramOpts::new( + "block_verification_and_import_time", + "Time taken to verify and import blocks", + ))?, registry, )?, justification_import_time: register( - Histogram::with_opts( - HistogramOpts::new( - "justification_import_time", - "Time taken to import justifications", - ), - )?, + Histogram::with_opts(HistogramOpts::new( + "justification_import_time", + "Time taken to import justifications", + ))?, registry, )?, }) @@ -86,12 +72,12 @@ impl Metrics { pub fn report_import( &self, - result: &Result>, BlockImportError>, + result: &Result>, BlockImportError>, ) { let label = match result { Ok(_) => "success", Err(BlockImportError::IncompleteHeader(_)) => "incomplete_header", - Err(BlockImportError::VerificationFailed(_,_)) => "verification_failed", + Err(BlockImportError::VerificationFailed(_, _)) => "verification_failed", Err(BlockImportError::BadBlock(_)) => "bad_block", Err(BlockImportError::MissingState) => "missing_state", Err(BlockImportError::UnknownParent) => "unknown_parent", @@ -99,15 +85,13 @@ impl Metrics { Err(BlockImportError::Other(_)) => "failed", }; - self.import_queue_processed.with_label_values( - &[label] - ).inc(); + self.import_queue_processed.with_label_values(&[label]).inc(); } pub fn report_verification(&self, success: bool, time: std::time::Duration) { - self.block_verification_time.with_label_values( - &[if success { "success" } else { "verification_failed" }] - ).observe(time.as_secs_f64()); + self.block_verification_time + .with_label_values(&[if success { "success" } else { "verification_failed" }]) + .observe(time.as_secs_f64()); } pub fn report_verification_and_import(&self, time: std::time::Duration) { diff --git a/client/consensus/common/src/shared_data.rs b/client/consensus/common/src/shared_data.rs new file mode 100644 index 0000000000000..7a25660e08aac --- /dev/null +++ b/client/consensus/common/src/shared_data.rs @@ -0,0 +1,265 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Provides a generic wrapper around shared data. See [`SharedData`] for more information. + +use parking_lot::{Condvar, MappedMutexGuard, Mutex, MutexGuard}; +use std::sync::Arc; + +/// Created by [`SharedDataLocked::release_mutex`]. +/// +/// As long as the object isn't dropped, the shared data is locked. It is advised to drop this +/// object when the shared data doesn't need to be locked anymore. To get access to the shared data +/// [`Self::upgrade`] is provided. +#[must_use = "Shared data will be unlocked on drop!"] +pub struct SharedDataLockedUpgradable { + shared_data: SharedData, +} + +impl SharedDataLockedUpgradable { + /// Upgrade to a *real* mutex guard that will give access to the inner data. + /// + /// Every call to this function will reaquire the mutex again. + pub fn upgrade(&mut self) -> MappedMutexGuard { + MutexGuard::map(self.shared_data.inner.lock(), |i| &mut i.shared_data) + } +} + +impl Drop for SharedDataLockedUpgradable { + fn drop(&mut self) { + let mut inner = self.shared_data.inner.lock(); + // It should not be locked anymore + inner.locked = false; + + // Notify all waiting threads. + self.shared_data.cond_var.notify_all(); + } +} + +/// Created by [`SharedData::shared_data_locked`]. +/// +/// As long as this object isn't dropped, the shared data is held in a mutex guard and the shared +/// data is tagged as locked. Access to the shared data is provided through +/// [`Deref`](std::ops::Deref) and [`DerefMut`](std::ops::DerefMut). The trick is to use +/// [`Self::release_mutex`] to release the mutex, but still keep the shared data locked. This means +/// every other thread trying to access the shared data in this time will need to wait until this +/// lock is freed. +/// +/// If this object is dropped without calling [`Self::release_mutex`], the lock will be dropped +/// immediately. +#[must_use = "Shared data will be unlocked on drop!"] +pub struct SharedDataLocked<'a, T> { + /// The current active mutex guard holding the inner data. + inner: MutexGuard<'a, SharedDataInner>, + /// The [`SharedData`] instance that created this instance. + /// + /// This instance is only taken on drop or when calling [`Self::release_mutex`]. + shared_data: Option>, +} + +impl<'a, T> SharedDataLocked<'a, T> { + /// Release the mutex, but keep the shared data locked. + pub fn release_mutex(mut self) -> SharedDataLockedUpgradable { + SharedDataLockedUpgradable { + shared_data: self.shared_data.take().expect("`shared_data` is only taken on drop; qed"), + } + } +} + +impl<'a, T> Drop for SharedDataLocked<'a, T> { + fn drop(&mut self) { + if let Some(shared_data) = self.shared_data.take() { + // If the `shared_data` is still set, it means [`Self::release_mutex`] wasn't + // called and the lock should be released. + self.inner.locked = false; + + // Notify all waiting threads about the released lock. + shared_data.cond_var.notify_all(); + } + } +} + +impl<'a, T> std::ops::Deref for SharedDataLocked<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.inner.shared_data + } +} + +impl<'a, T> std::ops::DerefMut for SharedDataLocked<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner.shared_data + } +} + +/// Holds the shared data and if the shared data is currently locked. +/// +/// For more information see [`SharedData`]. +struct SharedDataInner { + /// The actual shared data that is protected here against concurrent access. + shared_data: T, + /// Is `shared_data` currently locked and can not be accessed? + locked: bool, +} + +/// Some shared data that provides support for locking this shared data for some time. +/// +/// When working with consensus engines there is often data that needs to be shared between multiple +/// parts of the system, like block production and block import. This struct provides an abstraction +/// for this shared data in a generic way. +/// +/// The pain point when sharing this data is often the usage of mutex guards in an async context as +/// this doesn't work for most of them as these guards don't implement `Send`. This abstraction +/// provides a way to lock the shared data, while not having the mutex locked. So, the data stays +/// locked and we are still able to hold this lock over an `await` call. +/// +/// # Example +/// +/// ``` +/// # use sc_consensus::shared_data::SharedData; +/// +/// let shared_data = SharedData::new(String::from("hello world")); +/// +/// let lock = shared_data.shared_data_locked(); +/// +/// let shared_data2 = shared_data.clone(); +/// let join_handle1 = std::thread::spawn(move || { +/// // This will need to wait for the outer lock to be released before it can access the data. +/// shared_data2.shared_data().push_str("1"); +/// }); +/// +/// assert_eq!(*lock, "hello world"); +/// +/// // Let us release the mutex, but we still keep it locked. +/// // Now we could call `await` for example. +/// let mut lock = lock.release_mutex(); +/// +/// let shared_data2 = shared_data.clone(); +/// let join_handle2 = std::thread::spawn(move || { +/// shared_data2.shared_data().push_str("2"); +/// }); +/// +/// // We still have the lock and can upgrade it to access the data. +/// assert_eq!(*lock.upgrade(), "hello world"); +/// lock.upgrade().push_str("3"); +/// +/// drop(lock); +/// join_handle1.join().unwrap(); +/// join_handle2.join().unwrap(); +/// +/// let data = shared_data.shared_data(); +/// // As we don't know the order of the threads, we need to check for both combinations +/// assert!(*data == "hello world321" || *data == "hello world312"); +/// ``` +pub struct SharedData { + inner: Arc>>, + cond_var: Arc, +} + +impl Clone for SharedData { + fn clone(&self) -> Self { + Self { inner: self.inner.clone(), cond_var: self.cond_var.clone() } + } +} + +impl SharedData { + /// Create a new instance of [`SharedData`] to share the given `shared_data`. + pub fn new(shared_data: T) -> Self { + Self { + inner: Arc::new(Mutex::new(SharedDataInner { shared_data, locked: false })), + cond_var: Default::default(), + } + } + + /// Acquire access to the shared data. + /// + /// This will give mutable access to the shared data. After the returned mutex guard is dropped, + /// the shared data is accessible by other threads. So, this function should be used when + /// reading/writing of the shared data in a local context is required. + /// + /// When requiring to lock shared data for some longer time, even with temporarily releasing the + /// lock, [`Self::shared_data_locked`] should be used. + pub fn shared_data(&self) -> MappedMutexGuard { + let mut guard = self.inner.lock(); + + while guard.locked { + self.cond_var.wait(&mut guard); + } + + debug_assert!(!guard.locked); + + MutexGuard::map(guard, |i| &mut i.shared_data) + } + + /// Acquire access to the shared data and lock it. + /// + /// This will give mutable access to the shared data. The returned [`SharedDataLocked`] + /// provides the function [`SharedDataLocked::release_mutex`] to release the mutex, but + /// keeping the data locked. This is useful in async contexts for example where the data needs + /// to be locked, but a mutex guard can not be held. + /// + /// For an example see [`SharedData`]. + pub fn shared_data_locked(&self) -> SharedDataLocked { + let mut guard = self.inner.lock(); + + while guard.locked { + self.cond_var.wait(&mut guard); + } + + debug_assert!(!guard.locked); + guard.locked = true; + + SharedDataLocked { inner: guard, shared_data: Some(self.clone()) } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn shared_data_locking_works() { + const THREADS: u32 = 100; + let shared_data = SharedData::new(0u32); + + let lock = shared_data.shared_data_locked(); + + for i in 0..THREADS { + let data = shared_data.clone(); + std::thread::spawn(move || { + if i % 2 == 1 { + *data.shared_data() += 1; + } else { + let mut lock = data.shared_data_locked().release_mutex(); + // Give the other threads some time to wake up + std::thread::sleep(std::time::Duration::from_millis(10)); + *lock.upgrade() += 1; + } + }); + } + + let lock = lock.release_mutex(); + std::thread::sleep(std::time::Duration::from_millis(100)); + drop(lock); + + while *shared_data.shared_data() < THREADS { + std::thread::sleep(std::time::Duration::from_millis(100)); + } + } +} diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index d50ec29ed9c6e..78e5cc31ea07e 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-epochs" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Generic epochs-based utilities for consensus" edition = "2018" @@ -13,9 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -parking_lot = "0.10.0" -fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } -sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0"} -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sc-client-api = { path = "../../api" , version = "2.0.0"} +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } +sp-runtime = { path = "../../../primitives/runtime" , version = "4.0.0-dev"} +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sc-client-api = { path = "../../api" , version = "4.0.0-dev"} +sc-consensus = { path = "../common" , version = "0.10.0-dev"} diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index acb07dd668a3c..f3cfc55bae69b 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -1,30 +1,35 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Generic utilities for epoch-based consensus engines. pub mod migration; -use std::{sync::Arc, ops::Add, collections::BTreeMap, borrow::{Borrow, BorrowMut}}; -use parking_lot::Mutex; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use fork_tree::ForkTree; use sc_client_api::utils::is_descendent_of; -use sp_blockchain::{HeaderMetadata, HeaderBackend, Error as ClientError}; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::{Block as BlockT, NumberFor, One, Zero}; +use std::{ + borrow::{Borrow, BorrowMut}, + collections::BTreeMap, + ops::{Add, Sub}, +}; /// A builder for `is_descendent_of` functions. pub trait IsDescendentOfBuilder { @@ -40,8 +45,7 @@ pub trait IsDescendentOfBuilder { /// details aren't yet stored, but its parent is. /// /// The format of `current` when `Some` is `(current, current_parent)`. - fn build_is_descendent_of(&self, current: Option<(Hash, Hash)>) - -> Self::IsDescendentOf; + fn build_is_descendent_of(&self, current: Option<(Hash, Hash)>) -> Self::IsDescendentOf; } /// Produce a descendent query object given the client. @@ -54,16 +58,18 @@ pub fn descendent_query(client: &H) -> HeaderBackendDescendentBuilder< pub struct HeaderBackendDescendentBuilder(H, std::marker::PhantomData); impl<'a, H, Block> IsDescendentOfBuilder - for HeaderBackendDescendentBuilder<&'a H, Block> where - H: HeaderBackend + HeaderMetadata, + for HeaderBackendDescendentBuilder<&'a H, Block> +where + H: HeaderBackend + HeaderMetadata, Block: BlockT, { type Error = ClientError; type IsDescendentOf = Box Result + 'a>; - fn build_is_descendent_of(&self, current: Option<(Block::Hash, Block::Hash)>) - -> Self::IsDescendentOf - { + fn build_is_descendent_of( + &self, + current: Option<(Block::Hash, Block::Hash)>, + ) -> Self::IsDescendentOf { Box::new(is_descendent_of(self.0, current)) } } @@ -76,23 +82,20 @@ pub trait Epoch { /// Descriptor for the next epoch. type NextEpochDescriptor; /// Type of the slot number. - type SlotNumber: Ord + Copy; + type Slot: Ord + Copy; /// The starting slot of the epoch. - fn start_slot(&self) -> Self::SlotNumber; + fn start_slot(&self) -> Self::Slot; /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. - fn end_slot(&self) -> Self::SlotNumber; + fn end_slot(&self) -> Self::Slot; /// Increment the epoch data, using the next epoch descriptor. fn increment(&self, descriptor: Self::NextEpochDescriptor) -> Self; } impl<'a, E: Epoch> From<&'a E> for EpochHeader { fn from(epoch: &'a E) -> EpochHeader { - Self { - start_slot: epoch.start_slot(), - end_slot: epoch.end_slot(), - } + Self { start_slot: epoch.start_slot(), end_slot: epoch.end_slot() } } } @@ -100,18 +103,15 @@ impl<'a, E: Epoch> From<&'a E> for EpochHeader { #[derive(Eq, PartialEq, Encode, Decode, Debug)] pub struct EpochHeader { /// The starting slot of the epoch. - pub start_slot: E::SlotNumber, + pub start_slot: E::Slot, /// The end slot of the epoch. This is NOT inclusive to the epoch, /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. - pub end_slot: E::SlotNumber, + pub end_slot: E::Slot, } impl Clone for EpochHeader { fn clone(&self) -> Self { - Self { - start_slot: self.start_slot, - end_slot: self.end_slot, - } + Self { start_slot: self.start_slot, end_slot: self.end_slot } } } @@ -148,7 +148,8 @@ pub enum ViableEpoch { Signaled(ERef), } -impl AsRef for ViableEpoch where +impl AsRef for ViableEpoch +where ERef: Borrow, { fn as_ref(&self) -> &E { @@ -159,7 +160,8 @@ impl AsRef for ViableEpoch where } } -impl AsMut for ViableEpoch where +impl AsMut for ViableEpoch +where ERef: BorrowMut, { fn as_mut(&mut self) -> &mut E { @@ -170,7 +172,8 @@ impl AsMut for ViableEpoch where } } -impl ViableEpoch where +impl ViableEpoch +where E: Epoch + Clone, ERef: Borrow, { @@ -186,18 +189,14 @@ impl ViableEpoch where /// Get cloned value for the viable epoch. pub fn into_cloned(self) -> ViableEpoch { match self { - ViableEpoch::UnimportedGenesis(e) => - ViableEpoch::UnimportedGenesis(e), + ViableEpoch::UnimportedGenesis(e) => ViableEpoch::UnimportedGenesis(e), ViableEpoch::Signaled(e) => ViableEpoch::Signaled(e.borrow().clone()), } } /// Increment the epoch, yielding an `IncrementedEpoch` to be imported /// into the fork-tree. - pub fn increment( - &self, - next_descriptor: E::NextEpochDescriptor - ) -> IncrementedEpoch { + pub fn increment(&self, next_descriptor: E::NextEpochDescriptor) -> IncrementedEpoch { let next = self.as_ref().increment(next_descriptor); let to_persist = match *self { ViableEpoch::UnimportedGenesis(ref epoch_0) => @@ -213,14 +212,14 @@ impl ViableEpoch where #[derive(PartialEq, Eq, Clone, Debug)] pub enum ViableEpochDescriptor { /// The epoch is an unimported genesis, with given start slot number. - UnimportedGenesis(E::SlotNumber), + UnimportedGenesis(E::Slot), /// The epoch is signaled and has been imported, with given identifier and header. - Signaled(EpochIdentifier, EpochHeader) + Signaled(EpochIdentifier, EpochHeader), } impl ViableEpochDescriptor { /// Start slot of the descriptor. - pub fn start_slot(&self) -> E::SlotNumber { + pub fn start_slot(&self) -> E::Slot { match self { Self::UnimportedGenesis(start_slot) => *start_slot, Self::Signaled(_, header) => header.start_slot, @@ -229,7 +228,7 @@ impl ViableEpochDescriptor { } /// Persisted epoch stored in EpochChanges. -#[derive(Clone, Encode, Decode, Debug)] +#[derive(Clone, Encode, Decode)] pub enum PersistedEpoch { /// Genesis persisted epoch data. epoch_0, epoch_1. Genesis(E, E), @@ -242,8 +241,7 @@ impl<'a, E: Epoch> From<&'a PersistedEpoch> for PersistedEpochHeader { match epoch { PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()), - PersistedEpoch::Regular(ref epoch_n) => - PersistedEpochHeader::Regular(epoch_n.into()), + PersistedEpoch::Regular(ref epoch_n) => PersistedEpochHeader::Regular(epoch_n.into()), } } } @@ -311,7 +309,8 @@ fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { h } -impl Default for EpochChanges where +impl Default for EpochChanges +where Hash: PartialEq + Ord, Number: Ord, { @@ -320,9 +319,10 @@ impl Default for EpochChanges where } } -impl EpochChanges where +impl EpochChanges +where Hash: PartialEq + Ord + AsRef<[u8]> + AsMut<[u8]> + Copy, - Number: Ord + One + Zero + Add + Copy, + Number: Ord + One + Zero + Add + Sub + Copy, { /// Create a new epoch change. pub fn new() -> Self { @@ -336,51 +336,38 @@ impl EpochChanges where } /// Map the epoch changes from one storing data to a different one. - pub fn map(self, mut f: F) -> EpochChanges where - B: Epoch, + pub fn map(self, mut f: F) -> EpochChanges + where + B: Epoch, F: FnMut(&Hash, &Number, E) -> B, { EpochChanges { - inner: self.inner.map(&mut |_, _, header| { - match header { - PersistedEpochHeader::Genesis(epoch_0, epoch_1) => { - PersistedEpochHeader::Genesis( - EpochHeader { - start_slot: epoch_0.start_slot, - end_slot: epoch_0.end_slot, - }, - EpochHeader { - start_slot: epoch_1.start_slot, - end_slot: epoch_1.end_slot, - }, - ) - }, - PersistedEpochHeader::Regular(epoch_n) => { - PersistedEpochHeader::Regular( - EpochHeader { - start_slot: epoch_n.start_slot, - end_slot: epoch_n.end_slot, - }, - ) - }, - } + inner: self.inner.map(&mut |_, _, header| match header { + PersistedEpochHeader::Genesis(epoch_0, epoch_1) => PersistedEpochHeader::Genesis( + EpochHeader { start_slot: epoch_0.start_slot, end_slot: epoch_0.end_slot }, + EpochHeader { start_slot: epoch_1.start_slot, end_slot: epoch_1.end_slot }, + ), + PersistedEpochHeader::Regular(epoch_n) => + PersistedEpochHeader::Regular(EpochHeader { + start_slot: epoch_n.start_slot, + end_slot: epoch_n.end_slot, + }), }), - epochs: self.epochs.into_iter().map(|((hash, number), epoch)| { - let bepoch = match epoch { - PersistedEpoch::Genesis(epoch_0, epoch_1) => { - PersistedEpoch::Genesis( + epochs: self + .epochs + .into_iter() + .map(|((hash, number), epoch)| { + let bepoch = match epoch { + PersistedEpoch::Genesis(epoch_0, epoch_1) => PersistedEpoch::Genesis( f(&hash, &number, epoch_0), f(&hash, &number, epoch_1), - ) - }, - PersistedEpoch::Regular(epoch_n) => { - PersistedEpoch::Regular( - f(&hash, &number, epoch_n) - ) - }, - }; - ((hash, number), bepoch) - }).collect(), + ), + PersistedEpoch::Regular(epoch_n) => + PersistedEpoch::Regular(f(&hash, &number, epoch_n)), + }; + ((hash, number), bepoch) + }) + .collect(), } } @@ -392,27 +379,19 @@ impl EpochChanges where descendent_of_builder: D, hash: &Hash, number: Number, - slot: E::SlotNumber, + slot: E::Slot, ) -> Result<(), fork_tree::Error> { - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(None); + let is_descendent_of = descendent_of_builder.build_is_descendent_of(None); let predicate = |epoch: &PersistedEpochHeader| match *epoch { - PersistedEpochHeader::Genesis(_, ref epoch_1) => - slot >= epoch_1.end_slot, - PersistedEpochHeader::Regular(ref epoch_n) => - slot >= epoch_n.end_slot, + PersistedEpochHeader::Genesis(_, ref epoch_1) => slot >= epoch_1.end_slot, + PersistedEpochHeader::Regular(ref epoch_n) => slot >= epoch_n.end_slot, }; // prune any epochs which could not be _live_ as of the children of the // finalized block, i.e. re-root the fork tree to the oldest ancestor of // (hash, number) where epoch.end_slot() >= finalized_slot - let removed = self.inner.prune( - hash, - &number, - &is_descendent_of, - &predicate, - )?; + let removed = self.inner.prune(hash, &number, &is_descendent_of, &predicate)?; for (hash, number, _) in removed { self.epochs.remove(&(hash, number)); @@ -423,18 +402,18 @@ impl EpochChanges where /// Get a reference to an epoch with given identifier. pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { - self.epochs.get(&(id.hash, id.number)) - .and_then(|v| { - match v { - PersistedEpoch::Genesis(ref epoch_0, _) - if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), - PersistedEpoch::Genesis(_, ref epoch_1) - if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), - PersistedEpoch::Regular(ref epoch_n) - if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), - _ => None, - } - }) + self.epochs.get(&(id.hash, id.number)).and_then(|v| match v { + PersistedEpoch::Genesis(ref epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => + Some(epoch_0), + PersistedEpoch::Genesis(_, ref epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => + Some(epoch_1), + PersistedEpoch::Regular(ref epoch_n) + if id.position == EpochIdentifierPosition::Regular => + Some(epoch_n), + _ => None, + }) } /// Get a reference to a viable epoch with given descriptor. @@ -442,33 +421,32 @@ impl EpochChanges where &self, descriptor: &ViableEpochDescriptor, make_genesis: G, - ) -> Option> where - G: FnOnce(E::SlotNumber) -> E + ) -> Option> + where + G: FnOnce(E::Slot) -> E, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch(&identifier).map(ViableEpoch::Signaled) - }, + ViableEpochDescriptor::UnimportedGenesis(slot) => + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))), + ViableEpochDescriptor::Signaled(identifier, _) => + self.epoch(&identifier).map(ViableEpoch::Signaled), } } /// Get a mutable reference to an epoch with given identifier. pub fn epoch_mut(&mut self, id: &EpochIdentifier) -> Option<&mut E> { - self.epochs.get_mut(&(id.hash, id.number)) - .and_then(|v| { - match v { - PersistedEpoch::Genesis(ref mut epoch_0, _) - if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), - PersistedEpoch::Genesis(_, ref mut epoch_1) - if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), - PersistedEpoch::Regular(ref mut epoch_n) - if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), - _ => None, - } - }) + self.epochs.get_mut(&(id.hash, id.number)).and_then(|v| match v { + PersistedEpoch::Genesis(ref mut epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => + Some(epoch_0), + PersistedEpoch::Genesis(_, ref mut epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => + Some(epoch_1), + PersistedEpoch::Regular(ref mut epoch_n) + if id.position == EpochIdentifierPosition::Regular => + Some(epoch_n), + _ => None, + }) } /// Get a mutable reference to a viable epoch with given descriptor. @@ -476,16 +454,15 @@ impl EpochChanges where &mut self, descriptor: &ViableEpochDescriptor, make_genesis: G, - ) -> Option> where - G: FnOnce(E::SlotNumber) -> E + ) -> Option> + where + G: FnOnce(E::Slot) -> E, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch_mut(&identifier).map(ViableEpoch::Signaled) - }, + ViableEpochDescriptor::UnimportedGenesis(slot) => + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))), + ViableEpochDescriptor::Signaled(identifier, _) => + self.epoch_mut(&identifier).map(ViableEpoch::Signaled), } } @@ -496,18 +473,15 @@ impl EpochChanges where pub fn epoch_data( &self, descriptor: &ViableEpochDescriptor, - make_genesis: G - ) -> Option where - G: FnOnce(E::SlotNumber) -> E, + make_genesis: G, + ) -> Option + where + G: FnOnce(E::Slot) -> E, E: Clone, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - Some(make_genesis(*slot_number)) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch(&identifier).cloned() - }, + ViableEpochDescriptor::UnimportedGenesis(slot) => Some(make_genesis(*slot)), + ViableEpochDescriptor::Signaled(identifier, _) => self.epoch(&identifier).cloned(), } } @@ -521,17 +495,18 @@ impl EpochChanges where descendent_of_builder: D, parent_hash: &Hash, parent_number: Number, - slot_number: E::SlotNumber, + slot: E::Slot, make_genesis: G, - ) -> Result, fork_tree::Error> where - G: FnOnce(E::SlotNumber) -> E, + ) -> Result, fork_tree::Error> + where + G: FnOnce(E::Slot) -> E, E: Clone, { let descriptor = self.epoch_descriptor_for_child_of( descendent_of_builder, parent_hash, parent_number, - slot_number + slot, )?; Ok(descriptor.and_then(|des| self.epoch_data(&des, make_genesis))) @@ -546,7 +521,7 @@ impl EpochChanges where descendent_of_builder: D, parent_hash: &Hash, parent_number: Number, - slot_number: E::SlotNumber, + slot: E::Slot, ) -> Result>, fork_tree::Error> { // find_node_where will give you the node in the fork-tree which is an ancestor // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, @@ -554,12 +529,12 @@ impl EpochChanges where // "descends" from our parent-hash. let fake_head_hash = fake_head_hash(parent_hash); - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(Some((fake_head_hash, *parent_hash))); + let is_descendent_of = + descendent_of_builder.build_is_descendent_of(Some((fake_head_hash, *parent_hash))); if parent_number == Zero::zero() { // need to insert the genesis epoch. - return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot_number))) + return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot))) } // We want to find the deepest node in the tree which is an ancestor @@ -568,37 +543,42 @@ impl EpochChanges where // at epoch_1 -- all we're doing here is figuring out which node // we need. let predicate = |epoch: &PersistedEpochHeader| match *epoch { - PersistedEpochHeader::Genesis(ref epoch_0, _) => - epoch_0.start_slot <= slot_number, - PersistedEpochHeader::Regular(ref epoch_n) => - epoch_n.start_slot <= slot_number, + PersistedEpochHeader::Genesis(ref epoch_0, _) => epoch_0.start_slot <= slot, + PersistedEpochHeader::Regular(ref epoch_n) => epoch_n.start_slot <= slot, }; - self.inner.find_node_where( - &fake_head_hash, - &(parent_number + One::one()), - &is_descendent_of, - &predicate, - ) + self.inner + .find_node_where( + &fake_head_hash, + &(parent_number + One::one()), + &is_descendent_of, + &predicate, + ) .map(|n| { - n.map(|node| (match node.data { - // Ok, we found our node. - // and here we figure out which of the internal epochs - // of a genesis node to use based on their start slot. - PersistedEpochHeader::Genesis(ref epoch_0, ref epoch_1) => - if epoch_1.start_slot <= slot_number { - (EpochIdentifierPosition::Genesis1, epoch_1.clone()) - } else { - (EpochIdentifierPosition::Genesis0, epoch_0.clone()) + n.map(|node| { + ( + match node.data { + // Ok, we found our node. + // and here we figure out which of the internal epochs + // of a genesis node to use based on their start slot. + PersistedEpochHeader::Genesis(ref epoch_0, ref epoch_1) => { + if epoch_1.start_slot <= slot { + (EpochIdentifierPosition::Genesis1, epoch_1.clone()) + } else { + (EpochIdentifierPosition::Genesis0, epoch_0.clone()) + } + }, + PersistedEpochHeader::Regular(ref epoch_n) => + (EpochIdentifierPosition::Regular, epoch_n.clone()), }, - PersistedEpochHeader::Regular(ref epoch_n) => - (EpochIdentifierPosition::Regular, epoch_n.clone()), - }, node)).map(|((position, header), node)| { - ViableEpochDescriptor::Signaled(EpochIdentifier { - position, - hash: node.hash, - number: node.number - }, header) + node, + ) + }) + .map(|((position, header), node)| { + ViableEpochDescriptor::Signaled( + EpochIdentifier { position, hash: node.hash, number: node.number }, + header, + ) }) }) } @@ -616,16 +596,11 @@ impl EpochChanges where parent_hash: Hash, epoch: IncrementedEpoch, ) -> Result<(), fork_tree::Error> { - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(Some((hash, parent_hash))); + let is_descendent_of = + descendent_of_builder.build_is_descendent_of(Some((hash, parent_hash))); let header = PersistedEpochHeader::::from(&epoch.0); - let res = self.inner.import( - hash, - number, - header, - &is_descendent_of, - ); + let res = self.inner.import(hash, number, header, &is_descendent_of); match res { Ok(_) | Err(fork_tree::Error::Duplicate) => { @@ -640,18 +615,39 @@ impl EpochChanges where pub fn tree(&self) -> &ForkTree> { &self.inner } + + /// Reset to a specified pair of epochs, as if they were announced at blocks `parent_hash` and + /// `hash`. + pub fn reset(&mut self, parent_hash: Hash, hash: Hash, number: Number, current: E, next: E) { + self.inner = ForkTree::new(); + self.epochs.clear(); + let persisted = PersistedEpoch::Regular(current); + let header = PersistedEpochHeader::from(&persisted); + let _res = self.inner.import(parent_hash, number - One::one(), header, &|_, _| { + Ok(false) as Result> + }); + self.epochs.insert((parent_hash, number - One::one()), persisted); + + let persisted = PersistedEpoch::Regular(next); + let header = PersistedEpochHeader::from(&persisted); + let _res = self.inner.import(hash, number, header, &|_, _| { + Ok(true) as Result> + }); + self.epochs.insert((hash, number), persisted); + } } /// Type alias to produce the epoch-changes tree from a block type. -pub type EpochChangesFor = EpochChanges<::Hash, NumberFor, Epoch>; +pub type EpochChangesFor = + EpochChanges<::Hash, NumberFor, Epoch>; /// A shared epoch changes tree. -pub type SharedEpochChanges = Arc>>; +pub type SharedEpochChanges = + sc_consensus::shared_data::SharedData>; #[cfg(test)] mod tests { - use super::*; - use super::Epoch as EpochT; + use super::{Epoch as EpochT, *}; #[derive(Debug, PartialEq)] pub struct TestError; @@ -664,15 +660,14 @@ mod tests { impl std::error::Error for TestError {} - impl<'a, F: 'a , H: 'a + PartialEq + std::fmt::Debug> IsDescendentOfBuilder for &'a F - where F: Fn(&H, &H) -> Result + impl<'a, F: 'a, H: 'a + PartialEq + std::fmt::Debug> IsDescendentOfBuilder for &'a F + where + F: Fn(&H, &H) -> Result, { type Error = TestError; type IsDescendentOf = Box Result + 'a>; - fn build_is_descendent_of(&self, current: Option<(H, H)>) - -> Self::IsDescendentOf - { + fn build_is_descendent_of(&self, current: Option<(H, H)>) -> Self::IsDescendentOf { let f = *self; Box::new(move |base, head| { let mut head = head; @@ -680,7 +675,7 @@ mod tests { if let Some((ref c_head, ref c_parent)) = current { if head == c_head { if base == c_parent { - return Ok(true); + return Ok(true) } else { head = c_parent; } @@ -693,30 +688,27 @@ mod tests { } type Hash = [u8; 1]; - type SlotNumber = u64; + type Slot = u64; #[derive(Debug, Clone, Eq, PartialEq)] struct Epoch { - start_slot: SlotNumber, - duration: SlotNumber, + start_slot: Slot, + duration: Slot, } impl EpochT for Epoch { type NextEpochDescriptor = (); - type SlotNumber = SlotNumber; + type Slot = Slot; fn increment(&self, _: ()) -> Self { - Epoch { - start_slot: self.start_slot + self.duration, - duration: self.duration, - } + Epoch { start_slot: self.start_slot + self.duration, duration: self.duration } } - fn end_slot(&self) -> SlotNumber { + fn end_slot(&self) -> Slot { self.start_slot + self.duration } - fn start_slot(&self) -> SlotNumber { + fn start_slot(&self) -> Slot { self.start_slot } } @@ -738,30 +730,26 @@ mod tests { }; let epoch_changes = EpochChanges::<_, _, Epoch>::new(); - let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 10101, - ).unwrap().unwrap(); + let genesis_epoch = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 10101) + .unwrap() + .unwrap(); match genesis_epoch { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - assert_eq!(slot_number, 10101u64); + ViableEpochDescriptor::UnimportedGenesis(slot) => { + assert_eq!(slot, 10101u64); }, _ => panic!("should be unimported genesis"), }; - let genesis_epoch_2 = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 10102, - ).unwrap().unwrap(); + let genesis_epoch_2 = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 10102) + .unwrap() + .unwrap(); match genesis_epoch_2 { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - assert_eq!(slot_number, 10102u64); + ViableEpochDescriptor::UnimportedGenesis(slot) => { + assert_eq!(slot, 10102u64); }, _ => panic!("should be unimported genesis"), }; @@ -783,34 +771,23 @@ mod tests { } }; - let make_genesis = |slot| Epoch { - start_slot: slot, - duration: 100, - }; + let make_genesis = |slot| Epoch { start_slot: slot, duration: 100 }; let mut epoch_changes = EpochChanges::<_, _, Epoch>::new(); - let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 100, - ).unwrap().unwrap(); + let genesis_epoch = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 100) + .unwrap() + .unwrap(); assert_eq!(genesis_epoch, ViableEpochDescriptor::UnimportedGenesis(100)); - let import_epoch_1 = epoch_changes - .viable_epoch(&genesis_epoch, &make_genesis) - .unwrap() - .increment(()); + let import_epoch_1 = + epoch_changes.viable_epoch(&genesis_epoch, &make_genesis).unwrap().increment(()); let epoch_1 = import_epoch_1.as_ref().clone(); - epoch_changes.import( - &is_descendent_of, - *b"A", - 1, - *b"0", - import_epoch_1, - ).unwrap(); + epoch_changes + .import(&is_descendent_of, *b"A", 1, *b"0", import_epoch_1) + .unwrap(); let genesis_epoch = epoch_changes.epoch_data(&genesis_epoch, &make_genesis).unwrap(); assert!(is_descendent_of(b"0", b"A").unwrap()); @@ -820,13 +797,10 @@ mod tests { { // x is still within the genesis epoch. - let x = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - end_slot - 1, - &make_genesis, - ).unwrap().unwrap(); + let x = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, end_slot - 1, &make_genesis) + .unwrap() + .unwrap(); assert_eq!(x, genesis_epoch); } @@ -834,13 +808,10 @@ mod tests { { // x is now at the next epoch, because the block is now at the // start slot of epoch 1. - let x = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - end_slot, - &make_genesis, - ).unwrap().unwrap(); + let x = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, end_slot, &make_genesis) + .unwrap() + .unwrap(); assert_eq!(x, epoch_1); } @@ -848,13 +819,16 @@ mod tests { { // x is now at the next epoch, because the block is now after // start slot of epoch 1. - let x = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - epoch_1.end_slot() - 1, - &make_genesis, - ).unwrap().unwrap(); + let x = epoch_changes + .epoch_data_for_child_of( + &is_descendent_of, + b"A", + 1, + epoch_1.end_slot() - 1, + &make_genesis, + ) + .unwrap() + .unwrap(); assert_eq!(x, epoch_1); } @@ -877,90 +851,65 @@ mod tests { let duration = 100; - let make_genesis = |slot| Epoch { - start_slot: slot, - duration, - }; + let make_genesis = |slot| Epoch { start_slot: slot, duration }; let mut epoch_changes = EpochChanges::new(); let next_descriptor = (); // insert genesis epoch for A { - let genesis_epoch_a_descriptor = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 100, - ).unwrap().unwrap(); + let genesis_epoch_a_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 100) + .unwrap() + .unwrap(); let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() .increment(next_descriptor.clone()); - epoch_changes.import( - &is_descendent_of, - *b"A", - 1, - *b"0", - incremented_epoch, - ).unwrap(); + epoch_changes + .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) + .unwrap(); } // insert genesis epoch for X { - let genesis_epoch_x_descriptor = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 1000, - ).unwrap().unwrap(); + let genesis_epoch_x_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 1000) + .unwrap() + .unwrap(); let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() .increment(next_descriptor.clone()); - epoch_changes.import( - &is_descendent_of, - *b"X", - 1, - *b"0", - incremented_epoch, - ).unwrap(); + epoch_changes + .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) + .unwrap(); } // now check that the genesis epochs for our respective block 1s // respect the chain structure. { - let epoch_for_a_child = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - 101, - &make_genesis, - ).unwrap().unwrap(); + let epoch_for_a_child = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, 101, &make_genesis) + .unwrap() + .unwrap(); assert_eq!(epoch_for_a_child, make_genesis(100)); - let epoch_for_x_child = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"X", - 1, - 1001, - &make_genesis, - ).unwrap().unwrap(); + let epoch_for_x_child = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"X", 1, 1001, &make_genesis) + .unwrap() + .unwrap(); assert_eq!(epoch_for_x_child, make_genesis(1000)); - let epoch_for_x_child_before_genesis = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"X", - 1, - 101, - &make_genesis, - ).unwrap(); + let epoch_for_x_child_before_genesis = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"X", 1, 101, &make_genesis) + .unwrap(); // even though there is a genesis epoch at that slot, it's not in // this chain. diff --git a/client/consensus/epochs/src/migration.rs b/client/consensus/epochs/src/migration.rs index e4717b5584e0e..49e08240df8c3 100644 --- a/client/consensus/epochs/src/migration.rs +++ b/client/consensus/epochs/src/migration.rs @@ -1,26 +1,28 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Migration types for epoch changes. -use std::collections::BTreeMap; -use codec::{Encode, Decode}; +use crate::{Epoch, EpochChanges, PersistedEpoch, PersistedEpochHeader}; +use codec::{Decode, Encode}; use fork_tree::ForkTree; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use crate::{Epoch, EpochChanges, PersistedEpoch, PersistedEpochHeader}; +use std::collections::BTreeMap; /// Legacy definition of epoch changes. #[derive(Clone, Encode, Decode)] @@ -29,9 +31,11 @@ pub struct EpochChangesV0 { } /// Type alias for legacy definition of epoch changes. -pub type EpochChangesForV0 = EpochChangesV0<::Hash, NumberFor, Epoch>; +pub type EpochChangesForV0 = + EpochChangesV0<::Hash, NumberFor, Epoch>; -impl EpochChangesV0 where +impl EpochChangesV0 +where Hash: PartialEq + Ord + Copy, Number: Ord + Copy, { diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index dba8121264f40..d9ae8521c12f6 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-manual-seal" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Manual sealing engine for Substrate" edition = "2018" @@ -14,36 +14,38 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -futures = "0.3.4" -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" +futures = "0.3.9" +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" log = "0.4.8" -parking_lot = "0.10.0" -serde = { version = "1.0", features=["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0" } +serde = { version = "1.0", features = ["derive"] } assert_matches = "1.3.0" +async-trait = "0.1.50" -sc-client-api = { path = "../../api", version = "2.0.0" } -sc-consensus-babe = { path = "../../consensus/babe", version = "0.8.0" } -sc-consensus-epochs = { path = "../../consensus/epochs", version = "0.8.0" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe", version = "0.8.0" } +sc-client-api = { path = "../../api", version = "4.0.0-dev" } +sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } +sc-consensus-babe = { path = "../../consensus/babe", version = "0.10.0-dev" } +sc-consensus-epochs = { path = "../../consensus/epochs", version = "0.10.0-dev" } +sp-consensus-babe = { path = "../../../primitives/consensus/babe", version = "0.10.0-dev" } -sc-transaction-pool = { path = "../../transaction-pool", version = "2.0.0" } -sp-blockchain = { path = "../../../primitives/blockchain", version = "2.0.0" } -sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common", version = "0.8.0" } -sp-inherents = { path = "../../../primitives/inherents", version = "2.0.0" } -sp-runtime = { path = "../../../primitives/runtime", version = "2.0.0" } -sp-core = { path = "../../../primitives/core", version = "2.0.0" } -sp-keystore = { path = "../../../primitives/keystore", version = "0.8.0" } -sp-api = { path = "../../../primitives/api", version = "2.0.0" } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", version = "2.0.0" } -sp-timestamp = { path = "../../../primitives/timestamp", version = "2.0.0" } +sc-transaction-pool = { path = "../../transaction-pool", version = "4.0.0-dev" } +sp-blockchain = { path = "../../../primitives/blockchain", version = "4.0.0-dev" } +sp-consensus = { path = "../../../primitives/consensus/common", version = "0.10.0-dev" } +sp-consensus-slots = { path = "../../../primitives/consensus/slots", version = "0.10.0-dev" } +sp-inherents = { path = "../../../primitives/inherents", version = "4.0.0-dev" } +sp-runtime = { path = "../../../primitives/runtime", version = "4.0.0-dev" } +sp-core = { path = "../../../primitives/core", version = "4.0.0-dev" } +sp-keystore = { path = "../../../primitives/keystore", version = "0.10.0-dev" } +sp-api = { path = "../../../primitives/api", version = "4.0.0-dev" } +sc-transaction-pool-api = { path = "../../../client/transaction-pool/api", version = "4.0.0-dev" } +sp-timestamp = { path = "../../../primitives/timestamp", version = "4.0.0-dev" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" } [dev-dependencies] -tokio = { version = "0.2", features = ["rt-core", "macros"] } -sc-basic-authorship = { path = "../../basic-authorship", version = "0.8.0" } +tokio = { version = "1.10.0", features = ["rt-multi-thread", "macros"] } +sc-basic-authorship = { path = "../../basic-authorship", version = "0.10.0-dev" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0" } substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0" } -tempfile = "3.1.0" diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index 7bafeb50207d4..33a4c8616f6d2 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,26 +19,30 @@ //! Extensions for manual seal to produce blocks valid for any runtime. use super::Error; -use sp_runtime::traits::{Block as BlockT, DigestFor}; +use sc_consensus::BlockImportParams; use sp_inherents::InherentData; -use sp_consensus::BlockImportParams; +use sp_runtime::traits::{Block as BlockT, DigestFor}; pub mod babe; -/// Consensus data provider, manual seal uses this trait object for authoring blocks valid +/// Consensus data provider, manual seal uses this trait object for authoring blocks valid /// for any runtime. pub trait ConsensusDataProvider: Send + Sync { /// Block import transaction type type Transaction; /// Attempt to create a consensus digest. - fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result, Error>; + fn create_digest( + &self, + parent: &B::Header, + inherents: &InherentData, + ) -> Result, Error>; /// set up the neccessary import params. fn append_block_import( &self, parent: &B::Header, params: &mut BlockImportParams, - inherents: &InherentData + inherents: &InherentData, ) -> Result<(), Error>; } diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index e51eb42e49e13..1d3afe392d62f 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,31 +20,38 @@ use super::ConsensusDataProvider; use crate::Error; - +use codec::Encode; +use sc_client_api::{AuxStore, UsageProvider}; +use sc_consensus_babe::{ + authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Config, Epoch, + INTERMEDIATE_KEY, +}; +use sc_consensus_epochs::{ + descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, +}; +use sp_keystore::SyncCryptoStorePtr; use std::{ - any::Any, borrow::Cow, - sync::{Arc, atomic}, + sync::{atomic, Arc}, time::SystemTime, }; -use sc_client_api::AuxStore; -use sc_consensus_babe::{ - Config, Epoch, authorship, CompatibleDigestItem, BabeIntermediate, - register_babe_inherent_data_provider, INTERMEDIATE_KEY, -}; -use sc_consensus_epochs::{SharedEpochChanges, descendent_query}; +use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; use sp_api::{ProvideRuntimeApi, TransactionFor}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; -use sp_consensus::BlockImportParams; -use sp_consensus_babe::{BabeApi, inherents::BabeInherentData}; -use sp_keystore::SyncCryptoStorePtr; -use sp_inherents::{InherentDataProviders, InherentData, ProvideInherentData, InherentIdentifier}; +use sp_consensus::CacheKeyId; +use sp_consensus_babe::{ + digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, + inherents::BabeInherentData, + AuthorityId, BabeApi, BabeAuthorityWeight, ConsensusLog, BABE_ENGINE_ID, +}; +use sp_consensus_slots::Slot; +use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; use sp_runtime::{ - traits::{DigestItemFor, DigestFor, Block as BlockT, Header as _}, - generic::Digest, + generic::{BlockId, Digest}, + traits::{Block as BlockT, DigestFor, DigestItemFor, Header, Zero}, }; -use sp_timestamp::{InherentType, InherentError, INHERENT_IDENTIFIER}; +use sp_timestamp::{InherentType, TimestampInherentData, INHERENT_IDENTIFIER}; /// Provides BABE-compatible predigests and BlockImportParams. /// Intended for use with BABE runtimes. @@ -60,99 +67,248 @@ pub struct BabeConsensusDataProvider { /// BABE config, gotten from the runtime. config: Config, + + /// Authorities to be used for this babe chain. + authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, +} + +/// Verifier to be used for babe chains +pub struct BabeVerifier { + /// Shared epoch changes + epoch_changes: SharedEpochChanges, + + /// Shared reference to the client. + client: Arc, +} + +impl BabeVerifier { + /// create a nrew verifier + pub fn new(epoch_changes: SharedEpochChanges, client: Arc) -> BabeVerifier { + BabeVerifier { epoch_changes, client } + } +} + +/// The verifier for the manual seal engine; instantly finalizes. +#[async_trait::async_trait] +impl Verifier for BabeVerifier +where + B: BlockT, + C: HeaderBackend + HeaderMetadata, +{ + async fn verify( + &mut self, + mut import_params: BlockImportParams, + ) -> Result<(BlockImportParams, Option)>>), String> { + import_params.finalized = false; + import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + let pre_digest = find_pre_digest::(&import_params.header)?; + + let parent_hash = import_params.header.parent_hash(); + let parent = self + .client + .header(BlockId::Hash(*parent_hash)) + .ok() + .flatten() + .ok_or_else(|| format!("header for block {} not found", parent_hash))?; + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + pre_digest.slot(), + ) + .map_err(|e| format!("failed to fetch epoch_descriptor: {}", e))? + .ok_or_else(|| format!("{:?}", sp_consensus::Error::InvalidAuthoritiesSet))?; + // drop the lock + drop(epoch_changes); + + import_params.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + ); + + Ok((import_params, None)) + } } impl BabeConsensusDataProvider - where - B: BlockT, - C: AuxStore + ProvideRuntimeApi, - C::Api: BabeApi, +where + B: BlockT, + C: AuxStore + + HeaderBackend + + ProvideRuntimeApi + + HeaderMetadata + + UsageProvider, + C::Api: BabeApi, { pub fn new( client: Arc, keystore: SyncCryptoStorePtr, - provider: &InherentDataProviders, epoch_changes: SharedEpochChanges, + authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, ) -> Result { - let config = Config::get_or_compute(&*client)?; - let timestamp_provider = SlotTimestampProvider::new(config.slot_duration)?; + if authorities.is_empty() { + return Err(Error::StringError("Cannot supply empty authority set!".into())) + } - provider.register_provider(timestamp_provider)?; - register_babe_inherent_data_provider(provider, config.slot_duration)?; + let config = Config::get_or_compute(&*client)?; - Ok(Self { - config, - client, - keystore, - epoch_changes, - }) + Ok(Self { config, client, keystore, epoch_changes, authorities }) } -} - -impl ConsensusDataProvider for BabeConsensusDataProvider - where - B: BlockT, - C: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - C::Api: BabeApi, -{ - type Transaction = TransactionFor; - - fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result, Error> { - let slot_number = inherents.babe_inherent_data()?; - let epoch_changes = self.epoch_changes.lock(); + fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { + let epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent.hash(), parent.number().clone(), - slot_number, + slot, ) .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; let epoch = epoch_changes - .viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot), - ) + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .ok_or_else(|| { log::info!(target: "babe", "create_digest: no viable_epoch :("); sp_consensus::Error::InvalidAuthoritiesSet })?; + Ok(epoch.as_ref().clone()) + } +} + +impl ConsensusDataProvider for BabeConsensusDataProvider +where + B: BlockT, + C: AuxStore + + HeaderBackend + + HeaderMetadata + + UsageProvider + + ProvideRuntimeApi, + C::Api: BabeApi, +{ + type Transaction = TransactionFor; + + fn create_digest( + &self, + parent: &B::Header, + inherents: &InherentData, + ) -> Result, Error> { + let slot = inherents + .babe_inherent_data()? + .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; + let epoch = self.epoch(parent, slot)?; + // this is a dev node environment, we should always be able to claim a slot. - let (predigest, _) = authorship::claim_slot(slot_number, epoch.as_ref(), &self.keystore) - .ok_or_else(|| Error::StringError("failed to claim slot for authorship".into()))?; - - Ok(Digest { - logs: vec![ - as CompatibleDigestItem>::babe_pre_digest(predigest), - ], - }) + let logs = if let Some((predigest, _)) = + authorship::claim_slot(slot, &epoch, &self.keystore) + { + vec![ as CompatibleDigestItem>::babe_pre_digest(predigest)] + } else { + // well we couldn't claim a slot because this is an existing chain and we're not in the + // authorities. we need to tell BabeBlockImport that the epoch has changed, and we put + // ourselves in the authorities. + let predigest = + PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot, authority_index: 0_u32 }); + + let mut epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot, + ) + .map_err(|e| { + Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)) + })? + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + + match epoch_descriptor { + ViableEpochDescriptor::Signaled(identifier, _epoch_header) => { + let epoch_mut = epoch_changes + .epoch_mut(&identifier) + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + + // mutate the current epoch + epoch_mut.authorities = self.authorities.clone(); + + let next_epoch = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: self.authorities.clone(), + // copy the old randomness + randomness: epoch_mut.randomness.clone(), + }); + + vec![ + DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode()), + DigestItemFor::::Consensus(BABE_ENGINE_ID, next_epoch.encode()), + ] + }, + ViableEpochDescriptor::UnimportedGenesis(_) => { + // since this is the genesis, secondary predigest works for now. + vec![DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode())] + }, + } + }; + + Ok(Digest { logs }) } fn append_block_import( &self, parent: &B::Header, params: &mut BlockImportParams, - inherents: &InherentData + inherents: &InherentData, ) -> Result<(), Error> { - let slot_number = inherents.babe_inherent_data()?; - - let epoch_descriptor = self.epoch_changes.lock() + let slot = inherents + .babe_inherent_data()? + .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; + let epoch_changes = self.epoch_changes.shared_data(); + let mut epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent.hash(), parent.number().clone(), - slot_number, + slot, ) - .map_err(|e| Error::StringError(format!("failed to fetch epoch data: {}", e)))? + .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; + // drop the lock + drop(epoch_changes); + // a quick check to see if we're in the authorities + let epoch = self.epoch(parent, slot)?; + let (authority, _) = self.authorities.first().expect("authorities is non-emptyp; qed"); + let has_authority = epoch.authorities.iter().find(|(id, _)| *id == *authority).is_some(); + + if !has_authority { + log::info!(target: "manual-seal", "authority not found"); + let timestamp = inherents + .timestamp_inherent_data()? + .ok_or_else(|| Error::StringError("No timestamp inherent data".into()))?; + let slot = *timestamp / self.config.slot_duration; + // manually hard code epoch descriptor + epoch_descriptor = match epoch_descriptor { + ViableEpochDescriptor::Signaled(identifier, _header) => + ViableEpochDescriptor::Signaled( + identifier, + EpochHeader { + start_slot: slot.into(), + end_slot: (slot * self.config.epoch_length).into(), + }, + ), + _ => unreachable!( + "we're not in the authorities, so this isn't the genesis epoch; qed" + ), + }; + } params.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); Ok(()) @@ -161,37 +317,64 @@ impl ConsensusDataProvider for BabeConsensusDataProvider /// Provide duration since unix epoch in millisecond for timestamp inherent. /// Mocks the timestamp inherent to always produce the timestamp for the next babe slot. -struct SlotTimestampProvider { +pub struct SlotTimestampProvider { time: atomic::AtomicU64, - slot_duration: u64 + slot_duration: u64, } impl SlotTimestampProvider { - /// create a new mocked time stamp provider. - fn new(slot_duration: u64) -> Result { - let now = SystemTime::now(); - let duration = now.duration_since(SystemTime::UNIX_EPOCH) - .map_err(|err| Error::StringError(format!("{}", err)))?; - Ok(Self { - time: atomic::AtomicU64::new(duration.as_millis() as u64), - slot_duration, - }) + /// Create a new mocked time stamp provider. + pub fn new(client: Arc) -> Result + where + B: BlockT, + C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider, + C::Api: BabeApi, + { + let slot_duration = Config::get_or_compute(&*client)?.slot_duration; + let info = client.info(); + + // looks like this isn't the first block, rehydrate the fake time. + // otherwise we'd be producing blocks for older slots. + let time = if info.best_number != Zero::zero() { + let header = client.header(BlockId::Hash(info.best_hash))?.unwrap(); + let slot = find_pre_digest::(&header).unwrap().slot(); + // add the slot duration so there's no collision of slots + (*slot * slot_duration) + slot_duration + } else { + // this is the first block, use the correct time. + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .map_err(|err| Error::StringError(format!("{}", err)))? + .as_millis() as u64 + }; + + Ok(Self { time: atomic::AtomicU64::new(time), slot_duration }) } -} -impl ProvideInherentData for SlotTimestampProvider { - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER + /// Get the current slot number + pub fn slot(&self) -> u64 { + self.time.load(atomic::Ordering::SeqCst) / self.slot_duration } +} - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), sp_inherents::Error> { +#[async_trait::async_trait] +impl InherentDataProvider for SlotTimestampProvider { + fn provide_inherent_data( + &self, + inherent_data: &mut InherentData, + ) -> Result<(), sp_inherents::Error> { // we update the time here. - let duration: InherentType = self.time.fetch_add(self.slot_duration, atomic::Ordering::SeqCst); + let duration: InherentType = + self.time.fetch_add(self.slot_duration, atomic::Ordering::SeqCst).into(); inherent_data.put_data(INHERENT_IDENTIFIER, &duration)?; Ok(()) } - fn error_to_string(&self, error: &[u8]) -> Option { - InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + None } } diff --git a/client/consensus/manual-seal/src/error.rs b/client/consensus/manual-seal/src/error.rs index e2628008c24c7..8585e6a70d644 100644 --- a/client/consensus/manual-seal/src/error.rs +++ b/client/consensus/manual-seal/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,10 +19,11 @@ //! A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. //! This is suitable for a testing environment. -use sp_consensus::{Error as ConsensusError, ImportResult}; +use futures::channel::{mpsc::SendError, oneshot}; +use sc_consensus::ImportResult; use sp_blockchain::Error as BlockchainError; +use sp_consensus::Error as ConsensusError; use sp_inherents::Error as InherentsError; -use futures::channel::{oneshot, mpsc::SendError}; /// Error code for rpc mod codes { @@ -63,14 +64,14 @@ pub enum Error { #[display(fmt = "{}", _0)] #[from(ignore)] StringError(String), - ///send error + /// send error #[display(fmt = "Consensus process is terminating")] Canceled(oneshot::Canceled), - ///send error + /// send error #[display(fmt = "Consensus process is terminating")] SendError(SendError), /// Some other error. - #[display(fmt="Other error: {}", _0)] + #[display(fmt = "Other error: {}", _0)] Other(Box), } @@ -85,7 +86,7 @@ impl Error { InherentError(_) => codes::INHERENTS_ERROR, BlockchainError(_) => codes::BLOCKCHAIN_ERROR, SendError(_) | Canceled(_) => codes::SERVER_SHUTTING_DOWN, - _ => codes::UNKNOWN_ERROR + _ => codes::UNKNOWN_ERROR, } } } @@ -95,7 +96,7 @@ impl std::convert::From for jsonrpc_core::Error { jsonrpc_core::Error { code: jsonrpc_core::ErrorCode::ServerError(error.to_code()), message: format!("{}", error), - data: None + data: None, } } } diff --git a/client/consensus/manual-seal/src/finalize_block.rs b/client/consensus/manual-seal/src/finalize_block.rs index 5780a25f97256..a5ddf1d162f7a 100644 --- a/client/consensus/manual-seal/src/finalize_block.rs +++ b/client/consensus/manual-seal/src/finalize_block.rs @@ -1,30 +1,27 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Block finalization utilities use crate::rpc; -use sp_runtime::{ - Justification, - traits::Block as BlockT, - generic::BlockId, -}; -use std::sync::Arc; use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; -use std::marker::PhantomData; +use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification}; +use std::{marker::PhantomData, sync::Arc}; /// params for block finalization. pub struct FinalizeBlockParams { @@ -40,30 +37,23 @@ pub struct FinalizeBlockParams { pub _phantom: PhantomData, } - /// finalizes a block in the backend with the given params. pub async fn finalize_block(params: FinalizeBlockParams) - where - B: BlockT, - F: Finalizer, - CB: ClientBackend, +where + B: BlockT, + F: Finalizer, + CB: ClientBackend, { - let FinalizeBlockParams { - hash, - mut sender, - justification, - finalizer, - .. - } = params; + let FinalizeBlockParams { hash, mut sender, justification, finalizer, .. } = params; match finalizer.finalize_block(BlockId::Hash(hash), justification, true) { Err(e) => { log::warn!("Failed to finalize block {:?}", e); rpc::send_result(&mut sender, Err(e.into())) - } + }, Ok(()) => { log::info!("✅ Successfully finalized block: {}", hash); rpc::send_result(&mut sender, Ok(())) - } + }, } } diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index d025d6aaf689f..390c23fe032f1 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,18 +20,17 @@ //! This is suitable for a testing environment. use futures::prelude::*; -use sp_consensus::{ - Environment, Proposer, SelectChain, BlockImport, - ForkChoiceStrategy, BlockImportParams, BlockOrigin, - import_queue::{Verifier, BasicQueue, CacheKeyId, BoxBlockImport}, +use prometheus_endpoint::Registry; +use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; +use sc_consensus::{ + block_import::{BlockImport, BlockImportParams, ForkChoiceStrategy}, + import_queue::{BasicQueue, BoxBlockImport, Verifier}, }; use sp_blockchain::HeaderBackend; -use sp_inherents::InherentDataProviders; -use sp_runtime::{traits::Block as BlockT, Justification}; -use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; -use sc_transaction_pool::txpool; -use std::{sync::Arc, marker::PhantomData}; -use prometheus_endpoint::Registry; +use sp_consensus::{CacheKeyId, Environment, Proposer, SelectChain}; +use sp_inherents::CreateInherentDataProviders; +use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use std::{marker::PhantomData, sync::Arc}; mod error; mod finalize_block; @@ -41,57 +40,48 @@ pub mod consensus; pub mod rpc; pub use self::{ - error::Error, consensus::ConsensusDataProvider, + error::Error, finalize_block::{finalize_block, FinalizeBlockParams}, - seal_block::{SealBlockParams, seal_block, MAX_PROPOSAL_DURATION}, - rpc::{EngineCommand, CreatedBlock}, + rpc::{CreatedBlock, EngineCommand}, + seal_block::{seal_block, SealBlockParams, MAX_PROPOSAL_DURATION}, }; +use sc_transaction_pool_api::TransactionPool; use sp_api::{ProvideRuntimeApi, TransactionFor}; +/// The `ConsensusEngineId` of Manual Seal. +pub const MANUAL_SEAL_ENGINE_ID: ConsensusEngineId = [b'm', b'a', b'n', b'l']; + /// The verifier for the manual seal engine; instantly finalizes. struct ManualSealVerifier; +#[async_trait::async_trait] impl Verifier for ManualSealVerifier { - fn verify( + async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, + mut block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let mut import_params = BlockImportParams::new(origin, header); - import_params.justification = justification; - import_params.body = body; - import_params.finalized = false; - import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - Ok((import_params, None)) + block.finalized = false; + block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + Ok((block, None)) } } /// Instantiate the import queue for the manual seal consensus engine. pub fn import_queue( block_import: BoxBlockImport, - spawner: &impl sp_core::traits::SpawnNamed, + spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, ) -> BasicQueue - where - Block: BlockT, - Transaction: Send + Sync + 'static, +where + Block: BlockT, + Transaction: Send + Sync + 'static, { - BasicQueue::new( - ManualSealVerifier, - block_import, - None, - None, - spawner, - registry, - ) + BasicQueue::new(ManualSealVerifier, block_import, None, spawner, registry) } /// Params required to start the instant sealing authorship task. -pub struct ManualSealParams, A: txpool::ChainApi, SC, CS> { +pub struct ManualSealParams, TP, SC, CS, CIDP> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -102,24 +92,25 @@ pub struct ManualSealParams, A: txpool pub client: Arc, /// Shared reference to the transaction pool. - pub pool: Arc>, + pub pool: Arc, - /// Stream, Basically the receiving end of a channel for sending commands to - /// the authorship task. + /// Stream, Basically the receiving end of a channel for sending + /// commands to the authorship task. pub commands_stream: CS, /// SelectChain strategy. pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option>>>, + pub consensus_data_provider: + Option>>>, - /// Provider for inherents to include in blocks. - pub inherent_data_providers: InherentDataProviders, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, } /// Params required to start the manual sealing authorship task. -pub struct InstantSealParams, A: txpool::ChainApi, SC> { +pub struct InstantSealParams, TP, SC, CIDP> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -130,20 +121,21 @@ pub struct InstantSealParams, A: txpoo pub client: Arc, /// Shared reference to the transaction pool. - pub pool: Arc>, + pub pool: Arc, /// SelectChain strategy. pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option>>>, + pub consensus_data_provider: + Option>>>, - /// Provider for inherents to include in blocks. - pub inherent_data_providers: InherentDataProviders, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, } /// Creates the background authorship task for the manual seal engine. -pub async fn run_manual_seal( +pub async fn run_manual_seal( ManualSealParams { mut block_import, mut env, @@ -151,59 +143,54 @@ pub async fn run_manual_seal( pool, mut commands_stream, select_chain, - inherent_data_providers, consensus_data_provider, - .. - }: ManualSealParams -) - where - A: txpool::ChainApi + 'static, - B: BlockT + 'static, - BI: BlockImport> - + Send + Sync + 'static, - C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, - CB: ClientBackend + 'static, - E: Environment + 'static, - E::Error: std::fmt::Display, - >::Error: std::fmt::Display, - CS: Stream::Hash>> + Unpin + 'static, - SC: SelectChain + 'static, + create_inherent_data_providers, + }: ManualSealParams, +) where + B: BlockT + 'static, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Proposer: Proposer>, + CS: Stream::Hash>> + Unpin + 'static, + SC: SelectChain + 'static, + TransactionFor: 'static, + TP: TransactionPool, + CIDP: CreateInherentDataProviders, { while let Some(command) = commands_stream.next().await { match command { - EngineCommand::SealNewBlock { - create_empty, - finalize, - parent_hash, - sender, - } => { - seal_block( - SealBlockParams { - sender, - parent_hash, - finalize, - create_empty, - env: &mut env, - select_chain: &select_chain, - block_import: &mut block_import, - inherent_data_provider: &inherent_data_providers, - consensus_data_provider: consensus_data_provider.as_ref().map(|p| &**p), - pool: pool.clone(), - client: client.clone(), - } - ).await; - } + EngineCommand::SealNewBlock { create_empty, finalize, parent_hash, sender } => { + seal_block(SealBlockParams { + sender, + parent_hash, + finalize, + create_empty, + env: &mut env, + select_chain: &select_chain, + block_import: &mut block_import, + consensus_data_provider: consensus_data_provider.as_ref().map(|p| &**p), + pool: pool.clone(), + client: client.clone(), + create_inherent_data_providers: &create_inherent_data_providers, + }) + .await; + }, EngineCommand::FinalizeBlock { hash, sender, justification } => { - finalize_block( - FinalizeBlockParams { - hash, - sender, - justification, - finalizer: client.clone(), - _phantom: PhantomData, - } - ).await - } + let justification = justification.map(|j| (MANUAL_SEAL_ENGINE_ID, j)); + finalize_block(FinalizeBlockParams { + hash, + sender, + justification, + finalizer: client.clone(), + _phantom: PhantomData, + }) + .await + }, } } } @@ -211,7 +198,7 @@ pub async fn run_manual_seal( /// runs the background authorship task for the instant seal engine. /// instant-seal creates a new block for every transaction imported into /// the transaction pool. -pub async fn run_instant_seal( +pub async fn run_instant_seal( InstantSealParams { block_import, env, @@ -219,66 +206,58 @@ pub async fn run_instant_seal( pool, select_chain, consensus_data_provider, - inherent_data_providers, - .. - }: InstantSealParams -) - where - A: txpool::ChainApi + 'static, - B: BlockT + 'static, - BI: BlockImport> - + Send + Sync + 'static, - C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, - CB: ClientBackend + 'static, - E: Environment + 'static, - E::Error: std::fmt::Display, - >::Error: std::fmt::Display, - SC: SelectChain + 'static + create_inherent_data_providers, + }: InstantSealParams, +) where + B: BlockT + 'static, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Proposer: Proposer>, + SC: SelectChain + 'static, + TransactionFor: 'static, + TP: TransactionPool, + CIDP: CreateInherentDataProviders, { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. - let commands_stream = pool.validated_pool() - .import_notification_stream() - .map(|_| { - EngineCommand::SealNewBlock { - create_empty: false, - finalize: false, - parent_hash: None, - sender: None, - } - }); - - run_manual_seal( - ManualSealParams { - block_import, - env, - client, - pool, - commands_stream, - select_chain, - consensus_data_provider, - inherent_data_providers, - } - ).await + let commands_stream = pool.import_notification_stream().map(|_| EngineCommand::SealNewBlock { + create_empty: false, + finalize: false, + parent_hash: None, + sender: None, + }); + + run_manual_seal(ManualSealParams { + block_import, + env, + client, + pool, + commands_stream, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }) + .await } #[cfg(test)] mod tests { use super::*; - use substrate_test_runtime_client::{ - DefaultTestClientBuilderExt, - TestClientBuilderExt, - AccountKeyring::*, - TestClientBuilder, - }; - use sc_transaction_pool::{BasicPool, RevalidationType, txpool::Options}; - use substrate_test_runtime_transaction_pool::{TestApi, uxt}; - use sp_transaction_pool::{TransactionPool, MaintainedTransactionPool, TransactionSource}; - use sp_runtime::generic::BlockId; - use sp_consensus::ImportedAux; - use sp_inherents::InherentDataProviders; use sc_basic_authorship::ProposerFactory; use sc_client_api::BlockBackend; + use sc_consensus::ImportedAux; + use sc_transaction_pool::{BasicPool, Options, RevalidationType}; + use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; + use sp_runtime::generic::BlockId; + use substrate_test_runtime_client::{ + AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + }; + use substrate_test_runtime_transaction_pool::{uxt, TestApi}; fn api() -> Arc { Arc::new(TestApi::empty()) @@ -291,46 +270,45 @@ mod tests { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let inherent_data_providers = InherentDataProviders::new(); let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( - Options::default(), api(), None, RevalidationType::Full, spawner.clone(), - )); - let env = ProposerFactory::new( - spawner.clone(), - client.clone(), - pool.clone(), + Options::default(), + true.into(), + api(), None, - ); - // this test checks that blocks are created as soon as transactions are imported into the pool. + RevalidationType::Full, + spawner.clone(), + 0, + )); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); + // this test checks that blocks are created as soon as transactions are imported into the + // pool. let (sender, receiver) = futures::channel::oneshot::channel(); let mut sender = Arc::new(Some(sender)); - let commands_stream = pool.pool().validated_pool().import_notification_stream() - .map(move |_| { + let commands_stream = + pool.pool().validated_pool().import_notification_stream().map(move |_| { // we're only going to submit one tx so this fn will only be called once. - let mut_sender = Arc::get_mut(&mut sender).unwrap(); + let mut_sender = Arc::get_mut(&mut sender).unwrap(); let sender = std::mem::take(mut_sender); EngineCommand::SealNewBlock { create_empty: false, finalize: true, parent_hash: None, - sender + sender, } }); - let future = run_manual_seal( - ManualSealParams { - block_import: client.clone(), - env, - client: client.clone(), - pool: pool.pool().clone(), - commands_stream, - select_chain, - inherent_data_providers, - consensus_data_provider: None, - } - ); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + create_inherent_data_providers: |_, _| async { Ok(()) }, + consensus_data_provider: None, + }); std::thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); // spawn the background authorship task rt.block_on(future); }); @@ -349,7 +327,6 @@ mod tests { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, } } @@ -363,33 +340,32 @@ mod tests { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let inherent_data_providers = InherentDataProviders::new(); let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( - Options::default(), api(), None, RevalidationType::Full, spawner.clone(), - )); - let env = ProposerFactory::new( - spawner.clone(), - client.clone(), - pool.clone(), + Options::default(), + true.into(), + api(), None, - ); - // this test checks that blocks are created as soon as an engine command is sent over the stream. + RevalidationType::Full, + spawner.clone(), + 0, + )); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); + // this test checks that blocks are created as soon as an engine command is sent over the + // stream. let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal( - ManualSealParams { - block_import: client.clone(), - env, - client: client.clone(), - pool: pool.pool().clone(), - commands_stream, - select_chain, - consensus_data_provider: None, - inherent_data_providers, - } - ); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers: |_, _| async { Ok(()) }, + }); std::thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); // spawn the background authorship task rt.block_on(future); }); @@ -403,7 +379,9 @@ mod tests { sender: Some(tx), create_empty: false, finalize: false, - }).await.unwrap(); + }) + .await + .unwrap(); let created_block = rx.await.unwrap().unwrap(); // assert that the background task returns ok @@ -416,7 +394,6 @@ mod tests { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, } } @@ -427,8 +404,10 @@ mod tests { sink.send(EngineCommand::FinalizeBlock { sender: Some(tx), hash: header.hash(), - justification: None - }).await.unwrap(); + justification: None, + }) + .await + .unwrap(); // assert that the background task returns ok assert_eq!(rx.await.unwrap().unwrap(), ()); } @@ -438,34 +417,33 @@ mod tests { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let inherent_data_providers = InherentDataProviders::new(); let pool_api = api(); let spawner = sp_core::testing::TaskExecutor::new(); let pool = Arc::new(BasicPool::with_revalidation_type( - Options::default(), pool_api.clone(), None, RevalidationType::Full, spawner.clone(), - )); - let env = ProposerFactory::new( - spawner.clone(), - client.clone(), - pool.clone(), + Options::default(), + true.into(), + pool_api.clone(), None, - ); - // this test checks that blocks are created as soon as an engine command is sent over the stream. + RevalidationType::Full, + spawner.clone(), + 0, + )); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); + // this test checks that blocks are created as soon as an engine command is sent over the + // stream. let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal( - ManualSealParams { - block_import: client.clone(), - env, - client: client.clone(), - pool: pool.pool().clone(), - commands_stream, - select_chain, - consensus_data_provider: None, - inherent_data_providers, - } - ); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers: |_, _| async { Ok(()) }, + }); std::thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); // spawn the background authorship task rt.block_on(future); }); @@ -480,7 +458,9 @@ mod tests { sender: Some(tx), create_empty: false, finalize: false, - }).await.unwrap(); + }) + .await + .unwrap(); let created_block = rx.await.unwrap().unwrap(); pool_api.increment_nonce(Alice.into()); @@ -494,7 +474,6 @@ mod tests { clear_justification_requests: false, needs_justification: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true } } @@ -504,34 +483,38 @@ mod tests { assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); let header = client.header(&BlockId::Number(1)).expect("db error").expect("imported above"); - pool.maintain(sp_transaction_pool::ChainEvent::NewBestBlock { + pool.maintain(sc_transaction_pool_api::ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None, - }).await; + }) + .await; let (tx1, rx1) = futures::channel::oneshot::channel(); - assert!(sink.send(EngineCommand::SealNewBlock { - parent_hash: Some(created_block.hash), - sender: Some(tx1), - create_empty: false, - finalize: false, - }).await.is_ok()); - assert_matches::assert_matches!( - rx1.await.expect("should be no error receiving"), - Ok(_) - ); + assert!(sink + .send(EngineCommand::SealNewBlock { + parent_hash: Some(created_block.hash), + sender: Some(tx1), + create_empty: false, + finalize: false, + }) + .await + .is_ok()); + assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); let block = client.block(&BlockId::Number(2)).unwrap().unwrap().block; pool_api.add_block(block, true); pool_api.increment_nonce(Alice.into()); - assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 2)).await.is_ok()); + assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); - assert!(sink.send(EngineCommand::SealNewBlock { - parent_hash: Some(created_block.hash), - sender: Some(tx2), - create_empty: false, - finalize: false, - }).await.is_ok()); + assert!(sink + .send(EngineCommand::SealNewBlock { + parent_hash: Some(created_block.hash), + sender: Some(tx2), + create_empty: false, + finalize: false, + }) + .await + .is_ok()); let imported = rx2.await.unwrap().unwrap(); // assert that fork block is in the db assert!(client.header(&BlockId::Hash(imported.hash)).unwrap().is_some()) diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index 690b6c1eb9996..6755879ceedd6 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -1,36 +1,36 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! RPC interface for the `ManualSeal` Engine. -use sp_consensus::ImportedAux; -use jsonrpc_core::Error; -use jsonrpc_derive::rpc; +pub use self::gen_client::Client as ManualSealClient; use futures::{ channel::{mpsc, oneshot}, - TryFutureExt, - FutureExt, - SinkExt + FutureExt, SinkExt, TryFutureExt, }; +use jsonrpc_core::Error; +use jsonrpc_derive::rpc; +use sc_consensus::ImportedAux; use serde::{Deserialize, Serialize}; -use sp_runtime::Justification; -pub use self::gen_client::Client as ManualSealClient; +use sp_runtime::EncodedJustification; /// Future's type for jsonrpc -type FutureResult = Box + Send>; +type FutureResult = jsonrpc_core::BoxFuture>; /// sender passed to the authorship task to report errors or successes. pub type Sender = Option>>; @@ -60,8 +60,8 @@ pub enum EngineCommand { /// sender to report errors/success to the rpc. sender: Sender<()>, /// finalization justification - justification: Option, - } + justification: Option, + }, } /// RPC trait that provides methods for interacting with the manual-seal authorship task over rpc. @@ -73,7 +73,7 @@ pub trait ManualSealApi { &self, create_empty: bool, finalize: bool, - parent_hash: Option + parent_hash: Option, ) -> FutureResult>; /// Instructs the manual-seal authorship task to finalize a block @@ -81,7 +81,7 @@ pub trait ManualSealApi { fn finalize_block( &self, hash: Hash, - justification: Option + justification: Option, ) -> FutureResult; } @@ -96,7 +96,7 @@ pub struct CreatedBlock { /// hash of the created block. pub hash: Hash, /// some extra details about the import operation - pub aux: ImportedAux + pub aux: ImportedAux, } impl ManualSeal { @@ -111,10 +111,10 @@ impl ManualSealApi for ManualSeal { &self, create_empty: bool, finalize: bool, - parent_hash: Option + parent_hash: Option, ) -> FutureResult> { let mut sink = self.import_block_channel.clone(); - let future = async move { + async move { let (sender, receiver) = oneshot::channel(); let command = EngineCommand::SealNewBlock { create_empty, @@ -124,23 +124,26 @@ impl ManualSealApi for ManualSeal { }; sink.send(command).await?; receiver.await? - }.boxed(); - - Box::new(future.map_err(Error::from).compat()) + } + .map_err(Error::from) + .boxed() } - fn finalize_block(&self, hash: Hash, justification: Option) -> FutureResult { + fn finalize_block( + &self, + hash: Hash, + justification: Option, + ) -> FutureResult { let mut sink = self.import_block_channel.clone(); - let future = async move { + async move { let (sender, receiver) = oneshot::channel(); - sink.send( - EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification } - ).await?; + sink.send(EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification }) + .await?; receiver.await?.map(|_| true) - }; - - Box::new(future.boxed().map_err(Error::from).compat()) + } + .map_err(Error::from) + .boxed() } } @@ -148,7 +151,7 @@ impl ManualSealApi for ManualSeal { /// to the rpc pub fn send_result( sender: &mut Sender, - result: std::result::Result + result: std::result::Result, ) { if let Some(sender) = sender.take() { if let Err(err) = sender.send(result) { @@ -158,7 +161,7 @@ pub fn send_result( // instant seal doesn't report errors over rpc, simply log them. match result { Ok(r) => log::info!("Instant Seal success: {:?}", r), - Err(e) => log::error!("Instant Seal encountered an error: {}", e) + Err(e) => log::error!("Instant Seal encountered an error: {}", e), } } } diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 58f017f2d41ad..502705b411621 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -1,44 +1,42 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Block sealing utilities -use crate::{Error, rpc, CreatedBlock, ConsensusDataProvider}; -use std::sync::Arc; +use crate::{rpc, ConsensusDataProvider, CreatedBlock, Error}; +use futures::prelude::*; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction}; +use sc_transaction_pool_api::TransactionPool; +use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_blockchain::HeaderBackend; +use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT}, generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, }; -use futures::prelude::*; -use sc_transaction_pool::txpool; -use sp_consensus::{ - self, BlockImport, Environment, Proposer, ForkChoiceStrategy, - BlockImportParams, BlockOrigin, ImportResult, SelectChain, -}; -use sp_blockchain::HeaderBackend; -use std::collections::HashMap; -use std::time::Duration; -use sp_inherents::InherentDataProviders; -use sp_api::{ProvideRuntimeApi, TransactionFor}; +use std::{collections::HashMap, sync::Arc, time::Duration}; /// max duration for creating a proposal in secs pub const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block -pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: txpool::ChainApi> { +pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP> { /// if true, empty blocks(without extrinsics) will be created. /// otherwise, will return Error::EmptyTransactionPool. pub create_empty: bool, @@ -49,7 +47,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: /// sender to report errors/success to the rpc. pub sender: rpc::Sender::Hash>>, /// transaction pool - pub pool: Arc>, + pub pool: Arc, /// header backend pub client: Arc, /// Environment trait object for creating a proposer @@ -57,15 +55,16 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, P: /// SelectChain object pub select_chain: &'a SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider>>, + pub consensus_data_provider: + Option<&'a dyn ConsensusDataProvider>>, /// block import object pub block_import: &'a mut BI, - /// inherent data provider - pub inherent_data_provider: &'a InherentDataProviders, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: &'a CIDP, } /// seals a new block with the given params -pub async fn seal_block( +pub async fn seal_block( SealBlockParams { create_empty, finalize, @@ -75,25 +74,26 @@ pub async fn seal_block( select_chain, block_import, env, - inherent_data_provider, + create_inherent_data_providers, consensus_data_provider: digest_provider, mut sender, - .. - }: SealBlockParams<'_, B, BI, SC, C, E, P> -) - where - B: BlockT, - BI: BlockImport> - + Send + Sync + 'static, - C: HeaderBackend + ProvideRuntimeApi, - E: Environment, - >::Error: std::fmt::Display, - >::Error: std::fmt::Display, - P: txpool::ChainApi, - SC: SelectChain, + }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP>, +) where + B: BlockT, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + ProvideRuntimeApi, + E: Environment, + E::Proposer: Proposer>, + TP: TransactionPool, + SC: SelectChain, + TransactionFor: 'static, + CIDP: CreateInherentDataProviders, { let future = async { - if pool.validated_pool().status().ready == 0 && !create_empty { + if pool.status().ready == 0 && !create_empty { return Err(Error::EmptyTransactionPool) } @@ -101,28 +101,40 @@ pub async fn seal_block( // use the parent_hash supplied via `EngineCommand` // or fetch the best_block. let parent = match parent_hash { - Some(hash) => { - match client.header(BlockId::Hash(hash))? { - Some(header) => header, - None => return Err(Error::BlockNotFound(format!("{}", hash))), - } - } - None => select_chain.best_chain()? + Some(hash) => client + .header(BlockId::Hash(hash))? + .ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))?, + None => select_chain.best_chain().await?, }; - let proposer = env.init(&parent) - .map_err(|err| Error::StringError(format!("{}", err))).await?; - let id = inherent_data_provider.create_inherent_data()?; - let inherents_len = id.len(); + let inherent_data_providers = create_inherent_data_providers + .create_inherent_data_providers(parent.hash(), ()) + .await + .map_err(|e| Error::Other(e))?; + + let inherent_data = inherent_data_providers.create_inherent_data()?; + + let proposer = env + .init(&parent) + .map_err(|err| Error::StringError(format!("{:?}", err))) + .await?; + let inherents_len = inherent_data.len(); let digest = if let Some(digest_provider) = digest_provider { - digest_provider.create_digest(&parent, &id)? + digest_provider.create_digest(&parent, &inherent_data)? } else { Default::default() }; - let proposal = proposer.propose(id.clone(), digest, Duration::from_secs(MAX_PROPOSAL_DURATION), false.into()) - .map_err(|err| Error::StringError(format!("{}", err))).await?; + let proposal = proposer + .propose( + inherent_data.clone(), + digest, + Duration::from_secs(MAX_PROPOSAL_DURATION), + None, + ) + .map_err(|err| Error::StringError(format!("{:?}", err))) + .await?; if proposal.block.extrinsics().len() == inherents_len && !create_empty { return Err(Error::EmptyTransactionPool) @@ -133,15 +145,17 @@ pub async fn seal_block( params.body = Some(body); params.finalized = finalize; params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( + proposal.storage_changes, + )); if let Some(digest_provider) = digest_provider { - digest_provider.append_block_import(&parent, &mut params, &id)?; + digest_provider.append_block_import(&parent, &mut params, &inherent_data)?; } - match block_import.import_block(params, HashMap::new())? { - ImportResult::Imported(aux) => { - Ok(CreatedBlock { hash: ::Header::hash(&header), aux }) - }, + match block_import.import_block(params, HashMap::new()).await? { + ImportResult::Imported(aux) => + Ok(CreatedBlock { hash: ::Header::hash(&header), aux }), other => Err(other.into()), } }; diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index fbb02ccc71121..c71e11aef275e 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-pow" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "PoW consensus algorithm for substrate" edition = "2018" @@ -13,20 +13,21 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-consensus-pow = { version = "0.8.0", path = "../../../primitives/consensus/pow" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sp-consensus-pow = { version = "0.10.0-dev", path = "../../../primitives/consensus/pow" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } log = "0.4.8" -futures = { version = "0.3.1", features = ["compat"] } +futures = "0.3.16" futures-timer = "3.0.1" -parking_lot = "0.10.0" -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } +parking_lot = "0.11.1" derive_more = "0.99.2" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} +async-trait = "0.1.50" diff --git a/client/consensus/pow/README.md b/client/consensus/pow/README.md index a335ec367047b..8dba30fc5a38e 100644 --- a/client/consensus/pow/README.md +++ b/client/consensus/pow/README.md @@ -3,7 +3,15 @@ Proof of work consensus for Substrate. To use this engine, you can need to have a struct that implements `PowAlgorithm`. After that, pass an instance of the struct, along with other necessary client references to `import_queue` to setup -the queue. Use the `start_mine` function for basic CPU mining. +the queue. + +This library also comes with an async mining worker, which can be +started via the `start_mining_worker` function. It returns a worker +handle together with a future. The future must be pulled. Through +the worker handle, you can pull the metadata needed to start the +mining process via `MiningWorker::metadata`, and then do the actual +mining on a standalone thread. Finally, when a seal is found, call +`MiningWorker::submit` to build the block. The auxiliary storage for PoW engine only stores the total difficulty. For other storage requirements for particular PoW algorithm (such as @@ -13,4 +21,4 @@ for the auxiliary storage. It is also possible to just use the runtime as the storage, but it is not recommended as it won't work well with light clients. -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index b73b9aa91f802..1f5781434ef71 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,52 +19,59 @@ //! Proof of work consensus for Substrate. //! //! To use this engine, you can need to have a struct that implements -//! `PowAlgorithm`. After that, pass an instance of the struct, along -//! with other necessary client references to `import_queue` to setup -//! the queue. Use the `start_mine` function for basic CPU mining. +//! [`PowAlgorithm`]. After that, pass an instance of the struct, along +//! with other necessary client references to [`import_queue`] to setup +//! the queue. +//! +//! This library also comes with an async mining worker, which can be +//! started via the [`start_mining_worker`] function. It returns a worker +//! handle together with a future. The future must be pulled. Through +//! the worker handle, you can pull the metadata needed to start the +//! mining process via [`MiningWorker::metadata`], and then do the actual +//! mining on a standalone thread. Finally, when a seal is found, call +//! [`MiningWorker::submit`] to build the block. //! //! The auxiliary storage for PoW engine only stores the total difficulty. //! For other storage requirements for particular PoW algorithm (such as //! the actual difficulty for each particular blocks), you can take a client -//! reference in your `PowAlgorithm` implementation, and use a separate prefix +//! reference in your [`PowAlgorithm`] implementation, and use a separate prefix //! for the auxiliary storage. It is also possible to just use the runtime //! as the storage, but it is not recommended as it won't work well with light //! clients. mod worker; -pub use crate::worker::{MiningWorker, MiningMetadata, MiningBuild}; +pub use crate::worker::{MiningBuild, MiningMetadata, MiningWorker}; -use std::{ - sync::Arc, any::Any, borrow::Cow, collections::HashMap, marker::PhantomData, - cmp::Ordering, time::Duration, -}; -use futures::{prelude::*, future::Either}; +use crate::worker::UntilImportedOrTimeout; +use codec::{Decode, Encode}; +use futures::{Future, StreamExt}; +use log::*; use parking_lot::Mutex; -use sc_client_api::{BlockOf, backend::AuxStore, BlockchainEvents}; -use sp_blockchain::{HeaderBackend, ProvideCache, well_known_cache_keys::Id as CacheKeyId}; -use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_runtime::{Justification, RuntimeString}; -use sp_runtime::generic::{BlockId, Digest, DigestItem}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use prometheus_endpoint::Registry; +use sc_client_api::{self, backend::AuxStore, BlockOf, BlockchainEvents}; +use sc_consensus::{ + BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxBlockImport, + BoxJustificationImport, ForkChoiceStrategy, ImportResult, Verifier, +}; use sp_api::ProvideRuntimeApi; -use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; -use sp_inherents::{InherentDataProviders, InherentData}; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend, ProvideCache}; use sp_consensus::{ - BlockImportParams, BlockOrigin, ForkChoiceStrategy, SyncOracle, Environment, Proposer, - SelectChain, Error as ConsensusError, CanAuthorWith, RecordProof, BlockImport, - BlockCheckParams, ImportResult, + CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; -use sp_consensus::import_queue::{ - BoxBlockImport, BasicQueue, Verifier, BoxJustificationImport, BoxFinalityProofImport, +use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; +use sp_core::ExecutionContext; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; +use sp_runtime::{ + generic::{BlockId, Digest, DigestItem}, + traits::{Block as BlockT, Header as HeaderT}, + RuntimeString, +}; +use std::{ + borrow::Cow, cmp::Ordering, collections::HashMap, marker::PhantomData, sync::Arc, + time::Duration, }; -use codec::{Encode, Decode}; -use prometheus_endpoint::Registry; -use sc_client_api; -use log::*; -use sp_timestamp::{InherentError as TIError, TimestampInherentData}; - -use crate::worker::UntilImportedOrTimeout; #[derive(derive_more::Display, Debug)] pub enum Error { @@ -93,7 +100,12 @@ pub enum Error { #[display(fmt = "Creating inherents failed: {}", _0)] CreateInherents(sp_inherents::Error), #[display(fmt = "Checking inherents failed: {}", _0)] - CheckInherents(String), + CheckInherents(sp_inherents::Error), + #[display( + fmt = "Checking inherents unknown error for identifier: {:?}", + "String::from_utf8_lossy(_0)" + )] + CheckInherentsUnknownError(sp_inherents::InherentIdentifier), #[display(fmt = "Multiple pre-runtime digests")] MultiplePreRuntimeDigests, Client(sp_blockchain::Error), @@ -142,7 +154,8 @@ pub struct PowAux { pub total_difficulty: Difficulty, } -impl PowAux where +impl PowAux +where Difficulty: Decode + Default, { /// Read the auxiliary from client. @@ -182,11 +195,7 @@ pub trait PowAlgorithm { /// breaking algorithms will help to protect against selfish mining. /// /// Returns if the new seal should be considered best block. - fn break_tie( - &self, - _own_seal: &Seal, - _new_seal: &Seal, - ) -> bool { + fn break_tie(&self, _own_seal: &Seal, _new_seal: &Seal) -> bool { false } /// Verify that the difficulty is valid against given seal. @@ -201,18 +210,18 @@ pub trait PowAlgorithm { } /// A block importer for PoW. -pub struct PowBlockImport { +pub struct PowBlockImport { algorithm: Algorithm, inner: I, select_chain: S, client: Arc, - inherent_data_providers: sp_inherents::InherentDataProviders, + create_inherent_data_providers: Arc, check_inherents_after: <::Header as HeaderT>::Number, can_author_with: CAW, } -impl Clone - for PowBlockImport +impl Clone + for PowBlockImport { fn clone(&self) -> Self { Self { @@ -220,21 +229,23 @@ impl Clone inner: self.inner.clone(), select_chain: self.select_chain.clone(), client: self.client.clone(), - inherent_data_providers: self.inherent_data_providers.clone(), + create_inherent_data_providers: self.create_inherent_data_providers.clone(), check_inherents_after: self.check_inherents_after.clone(), can_author_with: self.can_author_with.clone(), } } } -impl PowBlockImport where +impl PowBlockImport +where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, - C::Api: BlockBuilderApi, + C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, CAW: CanAuthorWith, + CIDP: CreateInherentDataProviders, { /// Create a new block import suitable to be used in PoW pub fn new( @@ -243,7 +254,7 @@ impl PowBlockImport wher algorithm: Algorithm, check_inherents_after: <::Header as HeaderT>::Number, select_chain: S, - inherent_data_providers: sp_inherents::InherentDataProviders, + create_inherent_data_providers: CIDP, can_author_with: CAW, ) -> Self { Self { @@ -252,20 +263,18 @@ impl PowBlockImport wher algorithm, check_inherents_after, select_chain, - inherent_data_providers, + create_inherent_data_providers: Arc::new(create_inherent_data_providers), can_author_with, } } - fn check_inherents( + async fn check_inherents( &self, block: B, block_id: BlockId, - inherent_data: InherentData, - timestamp_now: u64, + inherent_data_providers: CIDP::InherentDataProviders, + execution_context: ExecutionContext, ) -> Result<(), Error> { - const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; - if *block.header().number() < self.check_inherents_after { return Ok(()) } @@ -280,61 +289,63 @@ impl PowBlockImport wher return Ok(()) } - let inherent_res = self.client.runtime_api().check_inherents( - &block_id, - block, - inherent_data, - ).map_err(Error::Client)?; + let inherent_data = inherent_data_providers + .create_inherent_data() + .map_err(|e| Error::CreateInherents(e))?; + + let inherent_res = self + .client + .runtime_api() + .check_inherents_with_context(&block_id, execution_context, block, inherent_data) + .map_err(|e| Error::Client(e.into()))?; if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { - Some(TIError::ValidAtTimestamp(timestamp)) => { - if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { - return Err(Error::TooFarInFuture); - } - - Ok(()) - }, - Some(TIError::Other(e)) => Err(Error::Runtime(e)), - None => Err(Error::CheckInherents( - self.inherent_data_providers.error_to_string(&i, &e) - )), - }) - } else { - Ok(()) + for (identifier, error) in inherent_res.into_errors() { + match inherent_data_providers.try_handle_error(&identifier, &error).await { + Some(res) => res.map_err(Error::CheckInherents)?, + None => return Err(Error::CheckInherentsUnknownError(identifier)), + } + } } + + Ok(()) } } -impl BlockImport for PowBlockImport where +#[async_trait::async_trait] +impl BlockImport + for PowBlockImport +where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, S: SelectChain, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, - C::Api: BlockBuilderApi, - Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static, - CAW: CanAuthorWith, + C::Api: BlockBuilderApi, + Algorithm: PowAlgorithm + Send + Sync, + Algorithm::Difficulty: 'static + Send, + CAW: CanAuthorWith + Send + Sync, + CIDP: CreateInherentDataProviders + Send + Sync, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block).map_err(Into::into) + self.inner.check_block(block).await.map_err(Into::into) } - fn import_block( + async fn import_block( &mut self, mut block: BlockImportParams, new_cache: HashMap>, ) -> Result { - let best_header = self.select_chain.best_chain() + let best_header = self + .select_chain + .best_chain() + .await .map_err(|e| format!("Fetch best chain failed via select chain: {:?}", e))?; let best_hash = best_header.hash(); @@ -343,27 +354,25 @@ impl BlockImport for PowBlockImport(self.client.as_ref(), &parent_hash)?; if let Some(inner_body) = block.body.take() { - let inherent_data = self.inherent_data_providers - .create_inherent_data().map_err(|e| e.into_string())?; - let timestamp_now = inherent_data.timestamp_inherent_data().map_err(|e| e.into_string())?; - let check_block = B::new(block.header.clone(), inner_body); self.check_inherents( check_block.clone(), BlockId::Hash(parent_hash), - inherent_data, - timestamp_now - )?; + self.create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await?, + block.origin.into(), + ) + .await?; block.body = Some(check_block.deconstruct().1); } let inner_seal = fetch_seal::(block.post_digests.last(), block.header.hash())?; - let intermediate = block.take_intermediate::>( - INTERMEDIATE_KEY - )?; + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; let difficulty = match intermediate.difficulty { Some(difficulty) => difficulty, @@ -393,18 +402,16 @@ impl BlockImport for PowBlockImport false, Ordering::Greater => true, Ordering::Equal => { - let best_inner_seal = fetch_seal::( - best_header.digest().logs.last(), - best_hash, - )?; + let best_inner_seal = + fetch_seal::(best_header.digest().logs.last(), best_hash)?; self.algorithm.break_tie(&best_inner_seal, &inner_seal) }, - } + }, )); } - self.inner.import_block(block, new_cache).map_err(Into::into) + self.inner.import_block(block, new_cache).await.map_err(Into::into) } } @@ -415,84 +422,61 @@ pub struct PowVerifier { } impl PowVerifier { - pub fn new( - algorithm: Algorithm, - ) -> Self { + pub fn new(algorithm: Algorithm) -> Self { Self { algorithm, _marker: PhantomData } } fn check_header( &self, mut header: B::Header, - ) -> Result<(B::Header, DigestItem), Error> where + ) -> Result<(B::Header, DigestItem), Error> + where Algorithm: PowAlgorithm, { let hash = header.hash(); let (seal, inner_seal) = match header.digest_mut().pop() { - Some(DigestItem::Seal(id, seal)) => { + Some(DigestItem::Seal(id, seal)) => if id == POW_ENGINE_ID { (DigestItem::Seal(id, seal.clone()), seal) } else { return Err(Error::WrongEngine(id)) - } - }, + }, _ => return Err(Error::HeaderUnsealed(hash)), }; let pre_hash = header.hash(); if !self.algorithm.preliminary_verify(&pre_hash, &inner_seal)?.unwrap_or(true) { - return Err(Error::FailedPreliminaryVerify); + return Err(Error::FailedPreliminaryVerify) } Ok((header, seal)) } } -impl Verifier for PowVerifier where +#[async_trait::async_trait] +impl Verifier for PowVerifier +where Algorithm: PowAlgorithm + Send + Sync, - Algorithm::Difficulty: 'static, + Algorithm::Difficulty: 'static + Send, { - fn verify( + async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, + mut block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let hash = header.hash(); - let (checked_header, seal) = self.check_header(header)?; - - let intermediate = PowIntermediate:: { - difficulty: None, - }; - - let mut import_block = BlockImportParams::new(origin, checked_header); - import_block.post_digests.push(seal); - import_block.body = body; - import_block.justification = justification; - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box - ); - import_block.post_hash = Some(hash); - - Ok((import_block, None)) - } -} - -/// Register the PoW inherent data provider, if not registered already. -pub fn register_pow_inherent_data_provider( - inherent_data_providers: &InherentDataProviders, -) -> Result<(), sp_consensus::Error> { - if !inherent_data_providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_timestamp::InherentDataProvider) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - } else { - Ok(()) + let hash = block.header.hash(); + let (checked_header, seal) = self.check_header(block.header)?; + + let intermediate = PowIntermediate:: { difficulty: None }; + block.header = checked_header; + block.post_digests.push(seal); + block + .intermediates + .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); + block.post_hash = Some(hash); + + Ok((block, None)) } } @@ -503,31 +487,19 @@ pub type PowImportQueue = BasicQueue; pub fn import_queue( block_import: BoxBlockImport, justification_import: Option>, - finality_proof_import: Option>, algorithm: Algorithm, - inherent_data_providers: InherentDataProviders, - spawner: &impl sp_core::traits::SpawnNamed, + spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, -) -> Result< - PowImportQueue, - sp_consensus::Error -> where +) -> Result, sp_consensus::Error> +where B: BlockT, Transaction: Send + Sync + 'static, Algorithm: PowAlgorithm + Clone + Send + Sync + 'static, + Algorithm::Difficulty: Send, { - register_pow_inherent_data_provider(&inherent_data_providers)?; - let verifier = PowVerifier::new(algorithm); - Ok(BasicQueue::new( - verifier, - block_import, - justification_import, - finality_proof_import, - spawner, - registry, - )) + Ok(BasicQueue::new(verifier, block_import, justification_import, spawner, registry)) } /// Start the mining worker for PoW. This function provides the necessary helper functions that can @@ -539,117 +511,139 @@ pub fn import_queue( /// /// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted /// for blocks being built. This can encode authorship information, or just be a graffiti. -pub fn start_mining_worker( +pub fn start_mining_worker( block_import: BoxBlockImport>, client: Arc, select_chain: S, algorithm: Algorithm, mut env: E, mut sync_oracle: SO, + justification_sync_link: L, pre_runtime: Option>, - inherent_data_providers: sp_inherents::InherentDataProviders, + create_inherent_data_providers: CIDP, timeout: Duration, build_time: Duration, can_author_with: CAW, -) -> (Arc>>, impl Future) where +) -> ( + Arc>::Proof>>>, + impl Future, +) +where Block: BlockT, C: ProvideRuntimeApi + BlockchainEvents + 'static, S: SelectChain + 'static, Algorithm: PowAlgorithm + Clone, - Algorithm::Difficulty: 'static, + Algorithm::Difficulty: Send + 'static, E: Environment + Send + Sync + 'static, E::Error: std::fmt::Debug, E::Proposer: Proposer>, SO: SyncOracle + Clone + Send + Sync + 'static, + L: sc_consensus::JustificationSyncLink, + CIDP: CreateInherentDataProviders, CAW: CanAuthorWith + Clone + Send + 'static, { - if let Err(_) = register_pow_inherent_data_provider(&inherent_data_providers) { - warn!("Registering inherent data provider for timestamp failed"); - } - - let timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); - let worker = Arc::new(Mutex::new(MiningWorker:: { + let mut timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); + let worker = Arc::new(Mutex::new(MiningWorker { build: None, algorithm: algorithm.clone(), block_import, + justification_sync_link, })); let worker_ret = worker.clone(); - let task = timer.for_each(move |()| { - let worker = worker.clone(); + let task = async move { + loop { + if timer.next().await.is_none() { + break + } - if sync_oracle.is_major_syncing() { - debug!(target: "pow", "Skipping proposal due to sync."); - worker.lock().on_major_syncing(); - return Either::Left(future::ready(())) - } + if sync_oracle.is_major_syncing() { + debug!(target: "pow", "Skipping proposal due to sync."); + worker.lock().on_major_syncing(); + continue + } + + let best_header = match select_chain.best_chain().await { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to pull new block for authoring. \ + Select best chain error: {:?}", + err + ); + continue + }, + }; + let best_hash = best_header.hash(); - let best_header = match select_chain.best_chain() { - Ok(x) => x, - Err(err) => { + if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(best_hash)) { warn!( target: "pow", - "Unable to pull new block for authoring. \ - Select best chain error: {:?}", - err + "Skipping proposal `can_author_with` returned: {} \ + Probably a node update is required!", + err, ); - return Either::Left(future::ready(())) - }, - }; - let best_hash = best_header.hash(); + continue + } - if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(best_hash)) { - warn!( - target: "pow", - "Skipping proposal `can_author_with` returned: {} \ - Probably a node update is required!", - err, - ); - return Either::Left(future::ready(())) - } + if worker.lock().best_hash() == Some(best_hash) { + continue + } - if worker.lock().best_hash() == Some(best_hash) { - return Either::Left(future::ready(())) - } + // The worker is locked for the duration of the whole proposing period. Within this + // period, the mining target is outdated and useless anyway. - // The worker is locked for the duration of the whole proposing period. Within this period, - // the mining target is outdated and useless anyway. + let difficulty = match algorithm.difficulty(best_hash) { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Fetch difficulty failed: {:?}", + err, + ); + continue + }, + }; - let difficulty = match algorithm.difficulty(best_hash) { - Ok(x) => x, - Err(err) => { - warn!( - target: "pow", - "Unable to propose new block for authoring. \ - Fetch difficulty failed: {:?}", - err, - ); - return Either::Left(future::ready(())) - }, - }; + let inherent_data_providers = match create_inherent_data_providers + .create_inherent_data_providers(best_hash, ()) + .await + { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Creating inherent data providers failed: {:?}", + err, + ); + continue + }, + }; - let awaiting_proposer = env.init(&best_header); - let inherent_data = match inherent_data_providers.create_inherent_data() { - Ok(x) => x, - Err(err) => { - warn!( - target: "pow", - "Unable to propose new block for authoring. \ - Creating inherent data failed: {:?}", - err, - ); - return Either::Left(future::ready(())) - }, - }; - let mut inherent_digest = Digest::::default(); - if let Some(pre_runtime) = &pre_runtime { - inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); - } + let inherent_data = match inherent_data_providers.create_inherent_data() { + Ok(r) => r, + Err(e) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Creating inherent data failed: {:?}", + e, + ); + continue + }, + }; + + let mut inherent_digest = Digest::::default(); + if let Some(pre_runtime) = &pre_runtime { + inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); + } - let pre_runtime = pre_runtime.clone(); + let pre_runtime = pre_runtime.clone(); - Either::Right(async move { - let proposer = match awaiting_proposer.await { + let proposer = match env.init(&best_header).await { Ok(x) => x, Err(err) => { warn!( @@ -658,16 +652,14 @@ pub fn start_mining_worker( Creating proposer failed: {:?}", err, ); - return + continue }, }; - let proposal = match proposer.propose( - inherent_data, - inherent_digest, - build_time.clone(), - RecordProof::No, - ).await { + let proposal = match proposer + .propose(inherent_data, inherent_digest, build_time.clone(), None) + .await + { Ok(x) => x, Err(err) => { warn!( @@ -676,11 +668,11 @@ pub fn start_mining_worker( Creating proposal failed: {:?}", err, ); - return + continue }, }; - let build = MiningBuild:: { + let build = MiningBuild:: { metadata: MiningMetadata { best_hash, pre_hash: proposal.block.header().hash(), @@ -691,8 +683,8 @@ pub fn start_mining_worker( }; worker.lock().on_build(build); - }) - }); + } + }; (worker_ret, task) } @@ -703,9 +695,8 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err for log in header.digest().logs() { trace!(target: "pow", "Checking log {:?}, looking for pre runtime digest", log); match (log, pre_digest.is_some()) { - (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => { - return Err(Error::MultiplePreRuntimeDigests) - }, + (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => + return Err(Error::MultiplePreRuntimeDigests), (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { pre_digest = Some(v.clone()); }, @@ -722,13 +713,12 @@ fn fetch_seal( hash: B::Hash, ) -> Result, Error> { match digest { - Some(DigestItem::Seal(id, seal)) => { + Some(DigestItem::Seal(id, seal)) => if id == &POW_ENGINE_ID { Ok(seal.clone()) } else { return Err(Error::::WrongEngine(*id).into()) - } - }, + }, _ => return Err(Error::::HeaderUnsealed(hash).into()), } } diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index 4ed863dcd9ed9..c0ca16ccad3aa 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,15 +16,23 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{pin::Pin, time::Duration, collections::HashMap, any::Any, borrow::Cow}; -use sc_client_api::ImportNotifications; -use sp_runtime::{DigestItem, traits::Block as BlockT, generic::BlockId}; -use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, import_queue::BoxBlockImport}; -use futures::{prelude::*, task::{Context, Poll}}; +use futures::{ + prelude::*, + task::{Context, Poll}, +}; use futures_timer::Delay; use log::*; +use sc_client_api::ImportNotifications; +use sc_consensus::{BlockImportParams, BoxBlockImport, StateAction, StorageChanges}; +use sp_consensus::{BlockOrigin, Proposal}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, + DigestItem, +}; +use std::{borrow::Cow, collections::HashMap, pin::Pin, time::Duration}; -use crate::{INTERMEDIATE_KEY, POW_ENGINE_ID, Seal, PowAlgorithm, PowIntermediate}; +use crate::{PowAlgorithm, PowIntermediate, Seal, INTERMEDIATE_KEY, POW_ENGINE_ID}; /// Mining metadata. This is the information needed to start an actual mining loop. #[derive(Clone, Eq, PartialEq)] @@ -40,25 +48,40 @@ pub struct MiningMetadata { } /// A build of mining, containing the metadata and the block proposal. -pub struct MiningBuild, C: sp_api::ProvideRuntimeApi> { +pub struct MiningBuild< + Block: BlockT, + Algorithm: PowAlgorithm, + C: sp_api::ProvideRuntimeApi, + Proof, +> { /// Mining metadata. pub metadata: MiningMetadata, /// Mining proposal. - pub proposal: Proposal>, + pub proposal: Proposal, Proof>, } /// Mining worker that exposes structs to query the current mining build and submit mined blocks. -pub struct MiningWorker, C: sp_api::ProvideRuntimeApi> { - pub(crate) build: Option>, +pub struct MiningWorker< + Block: BlockT, + Algorithm: PowAlgorithm, + C: sp_api::ProvideRuntimeApi, + L: sc_consensus::JustificationSyncLink, + Proof, +> { + pub(crate) build: Option>, pub(crate) algorithm: Algorithm, pub(crate) block_import: BoxBlockImport>, + pub(crate) justification_sync_link: L, } -impl MiningWorker where +impl MiningWorker +where Block: BlockT, C: sp_api::ProvideRuntimeApi, Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static, + Algorithm::Difficulty: 'static + Send, + L: sc_consensus::JustificationSyncLink, + sp_api::TransactionFor: Send + 'static, { /// Get the current best hash. `None` if the worker has just started or the client is doing /// major syncing. @@ -70,10 +93,7 @@ impl MiningWorker where self.build = None; } - pub(crate) fn on_build( - &mut self, - build: MiningBuild, - ) { + pub(crate) fn on_build(&mut self, build: MiningBuild) { self.build = Some(build); } @@ -84,7 +104,7 @@ impl MiningWorker where /// Submit a mined seal. The seal will be validated again. Returns true if the submission is /// successful. - pub fn submit(&mut self, seal: Seal) -> bool { + pub async fn submit(&mut self, seal: Seal) -> bool { if let Some(build) = self.build.take() { match self.algorithm.verify( &BlockId::Hash(build.metadata.best_hash), @@ -117,19 +137,26 @@ impl MiningWorker where let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(seal); import_block.body = Some(body); - import_block.storage_changes = Some(build.proposal.storage_changes); + import_block.state_action = + StateAction::ApplyChanges(StorageChanges::Changes(build.proposal.storage_changes)); let intermediate = PowIntermediate:: { difficulty: Some(build.metadata.difficulty), }; - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box - ); + import_block + .intermediates + .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); + + let header = import_block.post_header(); + match self.block_import.import_block(import_block, HashMap::default()).await { + Ok(res) => { + res.handle_justification( + &header.hash(), + *header.number(), + &mut self.justification_sync_link, + ); - match self.block_import.import_block(import_block, HashMap::default()) { - Ok(_) => { info!( target: "pow", "✅ Successfully mined block on top of: {}", @@ -165,15 +192,8 @@ pub struct UntilImportedOrTimeout { impl UntilImportedOrTimeout { /// Create a new stream using the given import notification and timeout duration. - pub fn new( - import_notifications: ImportNotifications, - timeout: Duration, - ) -> Self { - Self { - import_notifications, - timeout, - inner_delay: None, - } + pub fn new(import_notifications: ImportNotifications, timeout: Duration) -> Self { + Self { import_notifications, timeout, inner_delay: None } } } diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 3a636360e795d..4c0142829bb5c 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-slots" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Generic slots-based utilities for consensus" edition = "2018" @@ -14,22 +14,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-consensus-slots = { version = "0.8.0", path = "../../../primitives/consensus/slots" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -futures = "0.3.4" +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-arithmetic = { version = "4.0.0-dev", path = "../../../primitives/arithmetic" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } +futures = "0.3.9" futures-timer = "3.0.1" -parking_lot = "0.10.0" -log = "0.4.8" +log = "0.4.11" +thiserror = "1.0.21" +async-trait = "0.1.50" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/build.rs b/client/consensus/slots/build.rs index 513cc234d4363..57424f016f3e5 100644 --- a/client/consensus/slots/build.rs +++ b/client/consensus/slots/build.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use std::env; diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index 1f1fe37068f82..c2fe3f6f4e6bb 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -1,25 +1,27 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Schema for slots in the aux-db. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sc_client_api::backend::AuxStore; -use sp_blockchain::{Result as ClientResult, Error as ClientError}; -use sp_consensus_slots::EquivocationProof; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_consensus_slots::{EquivocationProof, Slot}; use sp_runtime::traits::Header; const SLOT_HEADER_MAP_KEY: &[u8] = b"slot_header_map"; @@ -31,17 +33,17 @@ pub const MAX_SLOT_CAPACITY: u64 = 1000; pub const PRUNING_BOUND: u64 = 2 * MAX_SLOT_CAPACITY; fn load_decode(backend: &C, key: &[u8]) -> ClientResult> - where - C: AuxStore, - T: Decode, +where + C: AuxStore, + T: Decode, { match backend.get_aux(key)? { None => Ok(None), Some(t) => T::decode(&mut &t[..]) - .map_err( - |e| ClientError::Backend(format!("Slots DB is corrupted. Decode error: {}", e.what())), - ) - .map(Some) + .map_err(|e| { + ClientError::Backend(format!("Slots DB is corrupted. Decode error: {}", e)) + }) + .map(Some), } } @@ -50,19 +52,19 @@ fn load_decode(backend: &C, key: &[u8]) -> ClientResult> /// Note: it detects equivocations only when slot_now - slot <= MAX_SLOT_CAPACITY. pub fn check_equivocation( backend: &C, - slot_now: u64, - slot: u64, + slot_now: Slot, + slot: Slot, header: &H, signer: &P, ) -> ClientResult>> - where - H: Header, - C: AuxStore, - P: Clone + Encode + Decode + PartialEq, +where + H: Header, + C: AuxStore, + P: Clone + Encode + Decode + PartialEq, { // We don't check equivocations for old headers out of our capacity. - if slot_now.saturating_sub(slot) > MAX_SLOT_CAPACITY { - return Ok(None); + if slot_now.saturating_sub(*slot) > Slot::from(MAX_SLOT_CAPACITY) { + return Ok(None) } // Key for this slot. @@ -70,17 +72,16 @@ pub fn check_equivocation( slot.using_encoded(|s| curr_slot_key.extend(s)); // Get headers of this slot. - let mut headers_with_sig = load_decode::<_, Vec<(H, P)>>(backend, &curr_slot_key[..])? - .unwrap_or_else(Vec::new); + let mut headers_with_sig = + load_decode::<_, Vec<(H, P)>>(backend, &curr_slot_key[..])?.unwrap_or_else(Vec::new); // Get first slot saved. let slot_header_start = SLOT_HEADER_START.to_vec(); - let first_saved_slot = load_decode::<_, u64>(backend, &slot_header_start[..])? - .unwrap_or(slot); + let first_saved_slot = load_decode::<_, Slot>(backend, &slot_header_start[..])?.unwrap_or(slot); if slot_now < first_saved_slot { // The code below assumes that slots will be visited sequentially. - return Ok(None); + return Ok(None) } for (prev_header, prev_signer) in headers_with_sig.iter() { @@ -90,11 +91,11 @@ pub fn check_equivocation( // 2) with different hash if header.hash() != prev_header.hash() { return Ok(Some(EquivocationProof { - slot_number: slot, + slot, offender: signer.clone(), first_header: prev_header.clone(), second_header: header.clone(), - })); + })) } else { // We don't need to continue in case of duplicated header, // since it's already saved and a possible equivocation @@ -107,11 +108,11 @@ pub fn check_equivocation( let mut keys_to_delete = vec![]; let mut new_first_saved_slot = first_saved_slot; - if slot_now - first_saved_slot >= PRUNING_BOUND { + if *slot_now - *first_saved_slot >= PRUNING_BOUND { let prefix = SLOT_HEADER_MAP_KEY.to_vec(); new_first_saved_slot = slot_now.saturating_sub(MAX_SLOT_CAPACITY); - for s in first_saved_slot..new_first_saved_slot { + for s in u64::from(first_saved_slot)..new_first_saved_slot.into() { let mut p = prefix.clone(); s.using_encoded(|s| p.extend(s)); keys_to_delete.push(p); @@ -133,12 +134,11 @@ pub fn check_equivocation( #[cfg(test)] mod test { - use sp_core::{sr25519, Pair}; - use sp_core::hash::H256; - use sp_runtime::testing::{Header as HeaderTest, Digest as DigestTest}; + use sp_core::{hash::H256, sr25519, Pair}; + use sp_runtime::testing::{Digest as DigestTest, Header as HeaderTest}; use substrate_test_runtime_client; - use super::{MAX_SLOT_CAPACITY, PRUNING_BOUND, check_equivocation}; + use super::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; fn create_header(number: u64) -> HeaderTest { // so that different headers for the same number get different hashes @@ -149,7 +149,7 @@ mod test { number, state_root: Default::default(), extrinsics_root: Default::default(), - digest: DigestTest { logs: vec![], }, + digest: DigestTest { logs: vec![] }, }; header @@ -169,79 +169,55 @@ mod test { let header6 = create_header(3); // @ slot 4 // It's ok to sign same headers. - assert!( - check_equivocation( - &client, - 2, - 2, - &header1, - &public, - ).unwrap().is_none(), - ); - - assert!( - check_equivocation( - &client, - 3, - 2, - &header1, - &public, - ).unwrap().is_none(), - ); + assert!(check_equivocation(&client, 2.into(), 2.into(), &header1, &public) + .unwrap() + .is_none(),); + + assert!(check_equivocation(&client, 3.into(), 2.into(), &header1, &public) + .unwrap() + .is_none(),); // But not two different headers at the same slot. - assert!( - check_equivocation( - &client, - 4, - 2, - &header2, - &public, - ).unwrap().is_some(), - ); + assert!(check_equivocation(&client, 4.into(), 2.into(), &header2, &public) + .unwrap() + .is_some(),); // Different slot is ok. - assert!( - check_equivocation( - &client, - 5, - 4, - &header3, - &public, - ).unwrap().is_none(), - ); + assert!(check_equivocation(&client, 5.into(), 4.into(), &header3, &public) + .unwrap() + .is_none(),); // Here we trigger pruning and save header 4. - assert!( - check_equivocation( - &client, - PRUNING_BOUND + 2, - MAX_SLOT_CAPACITY + 4, - &header4, - &public, - ).unwrap().is_none(), - ); + assert!(check_equivocation( + &client, + (PRUNING_BOUND + 2).into(), + (MAX_SLOT_CAPACITY + 4).into(), + &header4, + &public, + ) + .unwrap() + .is_none(),); // This fails because header 5 is an equivocation of header 4. - assert!( - check_equivocation( - &client, - PRUNING_BOUND + 3, - MAX_SLOT_CAPACITY + 4, - &header5, - &public, - ).unwrap().is_some(), - ); + assert!(check_equivocation( + &client, + (PRUNING_BOUND + 3).into(), + (MAX_SLOT_CAPACITY + 4).into(), + &header5, + &public, + ) + .unwrap() + .is_some(),); // This is ok because we pruned the corresponding header. Shows that we are pruning. - assert!( - check_equivocation( - &client, - PRUNING_BOUND + 4, - 4, - &header6, - &public, - ).unwrap().is_none(), - ); + assert!(check_equivocation( + &client, + (PRUNING_BOUND + 4).into(), + 4.into(), + &header6, + &public, + ) + .unwrap() + .is_none(),); } } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 7d346ffe3954d..bfaa388014ef0 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Slots functionality for Substrate. //! @@ -20,27 +22,33 @@ //! time during which certain events can and/or must occur. This crate //! provides generic functionality for slots. -#![forbid(unsafe_code, missing_docs)] +#![forbid(unsafe_code)] +#![warn(missing_docs)] -mod slots; mod aux_schema; +mod slots; -pub use slots::{SignedDuration, SlotInfo}; -use slots::Slots; pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; +pub use slots::SlotInfo; +use slots::Slots; use codec::{Decode, Encode}; -use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData, RecordProof}; -use futures::{prelude::*, future::{self, Either}}; +use futures::{future::Either, Future, TryFutureExt}; use futures_timer::Delay; -use sp_inherents::{InherentData, InherentDataProviders}; use log::{debug, error, info, warn}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; -use sp_api::{ProvideRuntimeApi, ApiRef}; -use std::{fmt::Debug, ops::Deref, pin::Pin, sync::Arc, time::{Instant, Duration}}; -use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO}; -use parking_lot::Mutex; +use sc_consensus::{BlockImport, JustificationSyncLink}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; +use sp_api::{ApiRef, ProvideRuntimeApi}; +use sp_arithmetic::traits::BaseArithmetic; +use sp_consensus::{CanAuthorWith, Proposer, SelectChain, SlotData, SyncOracle}; +use sp_consensus_slots::Slot; +use sp_inherents::CreateInherentDataProviders; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor}, +}; +use sp_timestamp::Timestamp; +use std::{fmt::Debug, ops::Deref, time::Duration}; /// The changes that need to applied to the storage to create the state for a block. /// @@ -48,159 +56,194 @@ use parking_lot::Mutex; pub type StorageChanges = sp_state_machine::StorageChanges, NumberFor>; -/// A worker that should be invoked at every new slot. -pub trait SlotWorker { - /// The type of the future that will be returned when a new slot is - /// triggered. - type OnSlot: Future>; +/// The result of [`SlotWorker::on_slot`]. +#[derive(Debug, Clone)] +pub struct SlotResult { + /// The block that was built. + pub block: Block, + /// The storage proof that was recorded while building the block. + pub storage_proof: Proof, +} +/// A worker that should be invoked at every new slot. +/// +/// The implementation should not make any assumptions of the slot being bound to the time or +/// similar. The only valid assumption is that the slot number is always increasing. +#[async_trait::async_trait] +pub trait SlotWorker { /// Called when a new slot is triggered. - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot; + /// + /// Returns a future that resolves to a [`SlotResult`] iff a block was successfully built in + /// the slot. Otherwise `None` is returned. + async fn on_slot(&mut self, slot_info: SlotInfo) -> Option>; } /// A skeleton implementation for `SlotWorker` which tries to claim a slot at /// its beginning and tries to produce a block if successfully claimed, timing /// out if block production takes too long. +#[async_trait::async_trait] pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. type BlockImport: BlockImport>::Transaction> - + Send + 'static; + + Send + + 'static; /// A handle to a `SyncOracle`. type SyncOracle: SyncOracle; + /// A handle to a `JustificationSyncLink`, allows hooking into the sync module to control the + /// justification sync process. + type JustificationSyncLink: JustificationSyncLink; + /// The type of future resolving to the proposer. type CreateProposer: Future> - + Send + Unpin + 'static; + + Send + + Unpin + + 'static; /// The type of proposer to use to build blocks. - type Proposer: Proposer; + type Proposer: Proposer + Send; /// Data associated with a slot claim. type Claim: Send + 'static; /// Epoch data necessary for authoring. - type EpochData: Send + 'static; + type EpochData: Send + Sync + 'static; /// The logging target to use when logging messages. fn logging_target(&self) -> &'static str; /// A handle to a `BlockImport`. - fn block_import(&self) -> Arc>; + fn block_import(&mut self) -> &mut Self::BlockImport; /// Returns the epoch data necessary for authoring. For time-dependent epochs, /// use the provided slot number as a canonical source of time. - fn epoch_data(&self, header: &B::Header, slot_number: u64) -> Result; + fn epoch_data( + &self, + header: &B::Header, + slot: Slot, + ) -> Result; /// Returns the number of authorities given the epoch data. /// None indicate that the authorities information is incomplete. fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option; /// Tries to claim the given slot, returning an object with claim data if successful. - fn claim_slot( + async fn claim_slot( &self, header: &B::Header, - slot_number: u64, + slot: Slot, epoch_data: &Self::EpochData, ) -> Option; /// Notifies the given slot. Similar to `claim_slot`, but will be called no matter whether we /// need to author blocks or not. - fn notify_slot( - &self, - _header: &B::Header, - _slot_number: u64, - _epoch_data: &Self::EpochData, - ) { } + fn notify_slot(&self, _header: &B::Header, _slot: Slot, _epoch_data: &Self::EpochData) {} /// Return the pre digest data to include in a block authored with the given claim. fn pre_digest_data( &self, - slot_number: u64, + slot: Slot, claim: &Self::Claim, ) -> Vec>; /// Returns a function which produces a `BlockImportParams`. - fn block_import_params(&self) -> Box< + fn block_import_params( + &self, + ) -> Box< dyn Fn( - B::Header, - &B::Hash, - Vec, - StorageChanges<>::Transaction, B>, - Self::Claim, - Self::EpochData, - ) -> Result< - sp_consensus::BlockImportParams>::Transaction>, - sp_consensus::Error - > + Send + 'static + B::Header, + &B::Hash, + Vec, + StorageChanges<>::Transaction, B>, + Self::Claim, + Self::EpochData, + ) -> Result< + sc_consensus::BlockImportParams< + B, + >::Transaction, + >, + sp_consensus::Error, + > + Send + + 'static, >; /// Whether to force authoring if offline. fn force_authoring(&self) -> bool; + /// Returns whether the block production should back off. + /// + /// By default this function always returns `false`. + /// + /// An example strategy that back offs if the finalized head is lagging too much behind the tip + /// is implemented by [`BackoffAuthoringOnFinalizedHeadLagging`]. + fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { + false + } + /// Returns a handle to a `SyncOracle`. fn sync_oracle(&mut self) -> &mut Self::SyncOracle; + /// Returns a handle to a `JustificationSyncLink`. + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink; + /// Returns a `Proposer` to author on top of the given block. fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer; - /// Remaining duration of the slot. - fn slot_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { - let now = Instant::now(); - if now < slot_info.ends_at { - slot_info.ends_at.duration_since(now) - } else { - Duration::from_millis(0) - } - } + /// Returns a [`TelemetryHandle`] if any. + fn telemetry(&self) -> Option; - /// Remaining duration for proposing. None means unlimited. - fn proposing_remaining_duration( - &self, - _head: &B::Header, - slot_info: &SlotInfo - ) -> Option { - Some(self.slot_remaining_duration(slot_info)) - } + /// Remaining duration for proposing. + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration; - /// Implements the `on_slot` functionality from `SlotWorker`. - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) - -> Pin> + Send>> where - Self: Send + Sync, - >::Proposal: Unpin + Send + 'static, + /// Implements [`SlotWorker::on_slot`]. + async fn on_slot( + &mut self, + slot_info: SlotInfo, + ) -> Option>::Proof>> + where + Self: Sync, { - let (timestamp, slot_number, slot_duration) = - (slot_info.timestamp, slot_info.number, slot_info.duration); + let (timestamp, slot) = (slot_info.timestamp, slot_info.slot); + let telemetry = self.telemetry(); + let logging_target = self.logging_target(); - { - let slot_now = SignedDuration::default().slot_now(slot_duration); - if slot_now > slot_number { - // if this is behind, return. - debug!(target: self.logging_target(), - "Skipping proposal slot {} since our current view is {}", - slot_number, slot_now, - ); + let proposing_remaining_duration = self.proposing_remaining_duration(&slot_info); - return Box::pin(future::ready(Ok(()))); - } - } + let proposing_remaining = if proposing_remaining_duration == Duration::default() { + debug!( + target: logging_target, + "Skipping proposal slot {} since there's no time left to propose", slot, + ); + + return None + } else { + Delay::new(proposing_remaining_duration) + }; - let epoch_data = match self.epoch_data(&chain_head, slot_number) { + let epoch_data = match self.epoch_data(&slot_info.chain_head, slot) { Ok(epoch_data) => epoch_data, Err(err) => { - warn!("Unable to fetch epoch data at block {:?}: {:?}", chain_head.hash(), err); + warn!( + target: logging_target, + "Unable to fetch epoch data at block {:?}: {:?}", + slot_info.chain_head.hash(), + err, + ); telemetry!( - CONSENSUS_WARN; "slots.unable_fetching_authorities"; - "slot" => ?chain_head.hash(), + telemetry; + CONSENSUS_WARN; + "slots.unable_fetching_authorities"; + "slot" => ?slot_info.chain_head.hash(), "err" => ?err, ); - return Box::pin(future::ready(Ok(()))); - } + return None + }, }; - self.notify_slot(&chain_head, slot_number, &epoch_data); + self.notify_slot(&slot_info.chain_head, slot, &epoch_data); let authorities_len = self.authorities_len(&epoch_data); @@ -208,221 +251,286 @@ pub trait SimpleSlotWorker { self.sync_oracle().is_offline() && authorities_len.map(|a| a > 1).unwrap_or(false) { - debug!(target: self.logging_target(), "Skipping proposal slot. Waiting for the network."); + debug!(target: logging_target, "Skipping proposal slot. Waiting for the network."); telemetry!( + telemetry; CONSENSUS_DEBUG; "slots.skipping_proposal_slot"; "authorities_len" => authorities_len, ); - return Box::pin(future::ready(Ok(()))); + return None } - let claim = match self.claim_slot(&chain_head, slot_number, &epoch_data) { - None => return Box::pin(future::ready(Ok(()))), - Some(claim) => claim, - }; + let claim = self.claim_slot(&slot_info.chain_head, slot, &epoch_data).await?; + + if self.should_backoff(slot, &slot_info.chain_head) { + return None + } debug!( - target: self.logging_target(), "Starting authorship at slot {}; timestamp = {}", - slot_number, - timestamp, + target: self.logging_target(), + "Starting authorship at slot {}; timestamp = {}", + slot, + *timestamp, ); - telemetry!(CONSENSUS_DEBUG; "slots.starting_authorship"; - "slot_num" => slot_number, - "timestamp" => timestamp, + telemetry!( + telemetry; + CONSENSUS_DEBUG; + "slots.starting_authorship"; + "slot_num" => *slot, + "timestamp" => *timestamp, ); - let awaiting_proposer = self.proposer(&chain_head).map_err(move |err| { - warn!("Unable to author block in slot {:?}: {:?}", slot_number, err); - - telemetry!(CONSENSUS_WARN; "slots.unable_authoring_block"; - "slot" => slot_number, "err" => ?err - ); + let proposer = match self.proposer(&slot_info.chain_head).await { + Ok(p) => p, + Err(err) => { + warn!( + target: logging_target, + "Unable to author block in slot {:?}: {:?}", slot, err, + ); - err - }); + telemetry!( + telemetry; + CONSENSUS_WARN; + "slots.unable_authoring_block"; + "slot" => *slot, + "err" => ?err + ); - let slot_remaining_duration = self.slot_remaining_duration(&slot_info); - let proposing_remaining_duration = self.proposing_remaining_duration(&chain_head, &slot_info); - let logs = self.pre_digest_data(slot_number, &claim); + return None + }, + }; - // deadline our production to approx. the end of the slot - let proposing = awaiting_proposer.and_then(move |proposer| proposer.propose( - slot_info.inherent_data, - sp_runtime::generic::Digest { - logs, + let logs = self.pre_digest_data(slot, &claim); + + // deadline our production to 98% of the total time left for proposing. As we deadline + // the proposing below to the same total time left, the 2% margin should be enough for + // the result to be returned. + let proposing = proposer + .propose( + slot_info.inherent_data, + sp_runtime::generic::Digest { logs }, + proposing_remaining_duration.mul_f32(0.98), + None, + ) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))); + + let proposal = match futures::future::select(proposing, proposing_remaining).await { + Either::Left((Ok(p), _)) => p, + Either::Left((Err(err), _)) => { + warn!(target: logging_target, "Proposing failed: {:?}", err); + + return None }, - slot_remaining_duration, - RecordProof::No, - ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)))); + Either::Right(_) => { + info!( + target: logging_target, + "⌛️ Discarding proposal for slot {}; block production took too long", slot, + ); + // If the node was compiled with debug, tell the user to use release optimizations. + #[cfg(build_type = "debug")] + info!( + target: logging_target, + "👉 Recompile your node in `--release` mode to mitigate this problem.", + ); + telemetry!( + telemetry; + CONSENSUS_INFO; + "slots.discarding_proposal_took_too_long"; + "slot" => *slot, + ); - let delay: Box + Unpin + Send> = match proposing_remaining_duration { - Some(r) => Box::new(Delay::new(r)), - None => Box::new(future::pending()), + return None + }, }; - let proposal_work = - Box::new(futures::future::select(proposing, delay).map(move |v| match v { - futures::future::Either::Left((b, _)) => b.map(|b| (b, claim)), - futures::future::Either::Right(_) => { - info!("⌛️ Discarding proposal for slot {}; block production took too long", slot_number); - // If the node was compiled with debug, tell the user to use release optimizations. - #[cfg(build_type="debug")] - info!("👉 Recompile your node in `--release` mode to mitigate this problem."); - telemetry!(CONSENSUS_INFO; "slots.discarding_proposal_took_too_long"; - "slot" => slot_number, - ); - Err(sp_consensus::Error::ClientImport("Timeout in the Slots proposer".into())) - }, - })); - let block_import_params_maker = self.block_import_params(); let block_import = self.block_import(); - let logging_target = self.logging_target(); - Box::pin(proposal_work.and_then(move |(proposal, claim)| { - let (header, body) = proposal.block.deconstruct(); - let header_num = *header.number(); - let header_hash = header.hash(); - let parent_hash = *header.parent_hash(); - - let block_import_params = block_import_params_maker( - header, - &header_hash, - body, - proposal.storage_changes, - claim, - epoch_data, - ); + let (block, storage_proof) = (proposal.block, proposal.proof); + let (header, body) = block.deconstruct(); + let header_num = *header.number(); + let header_hash = header.hash(); + let parent_hash = *header.parent_hash(); + + let block_import_params = match block_import_params_maker( + header, + &header_hash, + body.clone(), + proposal.storage_changes, + claim, + epoch_data, + ) { + Ok(bi) => bi, + Err(err) => { + warn!(target: logging_target, "Failed to create block import params: {:?}", err); - let block_import_params = match block_import_params { - Ok(params) => params, - Err(e) => return future::err(e), - }; + return None + }, + }; - info!( - "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", - header_num, - block_import_params.post_hash(), - header_hash, - ); + info!( + target: logging_target, + "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + header_num, + block_import_params.post_hash(), + header_hash, + ); - telemetry!(CONSENSUS_INFO; "slots.pre_sealed_block"; - "header_num" => ?header_num, - "hash_now" => ?block_import_params.post_hash(), - "hash_previously" => ?header_hash, - ); + telemetry!( + telemetry; + CONSENSUS_INFO; + "slots.pre_sealed_block"; + "header_num" => ?header_num, + "hash_now" => ?block_import_params.post_hash(), + "hash_previously" => ?header_hash, + ); - if let Err(err) = block_import.lock().import_block(block_import_params, Default::default()) { - warn!(target: logging_target, - "Error with block built on {:?}: {:?}", - parent_hash, - err, + let header = block_import_params.post_header(); + match block_import.import_block(block_import_params, Default::default()).await { + Ok(res) => { + res.handle_justification( + &header.hash(), + *header.number(), + self.justification_sync_link(), + ); + }, + Err(err) => { + warn!( + target: logging_target, + "Error with block built on {:?}: {:?}", parent_hash, err, ); - telemetry!(CONSENSUS_WARN; "slots.err_with_block_built_on"; - "hash" => ?parent_hash, "err" => ?err, + telemetry!( + telemetry; + CONSENSUS_WARN; + "slots.err_with_block_built_on"; + "hash" => ?parent_hash, + "err" => ?err, ); - } - future::ready(Ok(())) - })) + }, + } + + Some(SlotResult { block: B::new(header, body), storage_proof }) } } -/// Slot compatible inherent data. -pub trait SlotCompatible { - /// Extract timestamp and slot from inherent data. - fn extract_timestamp_and_slot( - &self, - inherent: &InherentData, - ) -> Result<(u64, u64, std::time::Duration), sp_consensus::Error>; +#[async_trait::async_trait] +impl + Send + Sync> + SlotWorker>::Proof> for T +{ + async fn on_slot( + &mut self, + slot_info: SlotInfo, + ) -> Option>::Proof>> { + SimpleSlotWorker::on_slot(self, slot_info).await + } +} + +/// Slot specific extension that the inherent data provider needs to implement. +pub trait InherentDataProviderExt { + /// The current timestamp that will be found in the + /// [`InherentData`](`sp_inherents::InherentData`). + fn timestamp(&self) -> Timestamp; + + /// The current slot that will be found in the [`InherentData`](`sp_inherents::InherentData`). + fn slot(&self) -> Slot; +} - /// Get the difference between chain time and local time. Defaults to - /// always returning zero. - fn time_offset() -> SignedDuration { Default::default() } +/// Small macro for implementing `InherentDataProviderExt` for inherent data provider tuple. +macro_rules! impl_inherent_data_provider_ext_tuple { + ( T, S $(, $TN:ident)* $( , )?) => { + impl InherentDataProviderExt for (T, S, $($TN),*) + where + T: Deref, + S: Deref, + { + fn timestamp(&self) -> Timestamp { + *self.0.deref() + } + + fn slot(&self) -> Slot { + *self.1.deref() + } + } + } } +impl_inherent_data_provider_ext_tuple!(T, S); +impl_inherent_data_provider_ext_tuple!(T, S, A); +impl_inherent_data_provider_ext_tuple!(T, S, A, B); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H, I); +impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H, I, J); + /// Start a new slot worker. /// /// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is /// polled until completion, unless we are major syncing. -pub fn start_slot_worker( +pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, mut sync_oracle: SO, - inherent_data_providers: InherentDataProviders, - timestamp_extractor: SC, + create_inherent_data_providers: CIDP, can_author_with: CAW, -) -> impl Future -where +) where B: BlockT, C: SelectChain, - W: SlotWorker, - W::OnSlot: Unpin, + W: SlotWorker, SO: SyncOracle + Send, - SC: SlotCompatible + Unpin, T: SlotData + Clone, + CIDP: CreateInherentDataProviders + Send, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, CAW: CanAuthorWith + Send, { let SlotDuration(slot_duration) = slot_duration; - // rather than use a timer interval, we schedule our waits ourselves - Slots::::new( - slot_duration.slot_duration(), - inherent_data_providers, - timestamp_extractor, - ).inspect_err(|e| debug!(target: "slots", "Faulty timer: {:?}", e)) - .try_for_each(move |slot_info| { - // only propose when we are not syncing. - if sync_oracle.is_major_syncing() { - debug!(target: "slots", "Skipping proposal slot due to sync."); - return Either::Right(future::ready(Ok(()))); - } + let mut slots = + Slots::new(slot_duration.slot_duration(), create_inherent_data_providers, client); - let slot_num = slot_info.number; - let chain_head = match client.best_chain() { - Ok(x) => x, - Err(e) => { - warn!(target: "slots", "Unable to author block in slot {}. \ - no best block header: {:?}", slot_num, e); - return Either::Right(future::ready(Ok(()))); - } - }; + loop { + let slot_info = match slots.next_slot().await { + Ok(r) => r, + Err(e) => { + warn!(target: "slots", "Error while polling for next slot: {:?}", e); + return + }, + }; - if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(chain_head.hash())) { - warn!( - target: "slots", - "Unable to author block in slot {},. `can_author_with` returned: {} \ - Probably a node update is required!", - slot_num, - err, - ); - Either::Right(future::ready(Ok(()))) - } else { - Either::Left( - worker.on_slot(chain_head, slot_info) - .map_err(|e| { - warn!(target: "slots", "Encountered consensus error: {:?}", e); - }) - .or_else(|_| future::ready(Ok(()))) - ) - } - }).then(|res| { - if let Err(err) = res { - warn!(target: "slots", "Slots stream terminated with an error: {:?}", err); - } - future::ready(()) - }) + if sync_oracle.is_major_syncing() { + debug!(target: "slots", "Skipping proposal slot due to sync."); + continue + } + + if let Err(err) = + can_author_with.can_author_with(&BlockId::Hash(slot_info.chain_head.hash())) + { + warn!( + target: "slots", + "Unable to author block in slot {},. `can_author_with` returned: {} \ + Probably a node update is required!", + slot_info.slot, + err, + ); + } else { + let _ = worker.on_slot(slot_info).await; + } + } } /// A header which has been checked pub enum CheckedHeader { /// A header which has slot in the future. this is the full header (not stripped) /// and the slot in which it should be processed. - Deferred(H, u64), + Deferred(H, Slot), /// A header which is fully checked, including signature. This is the pre-header /// accompanied by the seal components. /// @@ -430,7 +538,17 @@ pub enum CheckedHeader { Checked(H, S), } -/// A slot duration. Create with `get_or_compute`. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error +where + T: Debug, +{ + #[error("Slot duration is invalid: {0:?}")] + SlotDurationInvalid(SlotDuration), +} + +/// A slot duration. Create with [`get_or_compute`](Self::get_or_compute). // The internal member should stay private here to maintain invariants of // `get_or_compute`. #[derive(Clone, Copy, Debug, Encode, Decode, Hash, PartialOrd, Ord, PartialEq, Eq)] @@ -443,59 +561,55 @@ impl Deref for SlotDuration { } } -impl SlotData for SlotDuration { - /// Get the slot duration in milliseconds. - fn slot_duration(&self) -> u64 - where T: SlotData, - { +impl SlotData for SlotDuration { + fn slot_duration(&self) -> std::time::Duration { self.0.slot_duration() } const SLOT_KEY: &'static [u8] = T::SLOT_KEY; } -impl SlotDuration { +impl SlotDuration { /// Either fetch the slot duration from disk or compute it from the /// genesis state. /// /// `slot_key` is marked as `'static`, as it should really be a /// compile-time constant. - pub fn get_or_compute(client: &C, cb: CB) -> sp_blockchain::Result where - C: sc_client_api::backend::AuxStore, + pub fn get_or_compute(client: &C, cb: CB) -> sp_blockchain::Result + where + C: sc_client_api::backend::AuxStore + sc_client_api::UsageProvider, C: ProvideRuntimeApi, CB: FnOnce(ApiRef, &BlockId) -> sp_blockchain::Result, T: SlotData + Encode + Decode + Debug, { let slot_duration = match client.get_aux(T::SLOT_KEY)? { - Some(v) => ::decode(&mut &v[..]) - .map(SlotDuration) - .map_err(|_| { - sp_blockchain::Error::Backend({ - error!(target: "slots", "slot duration kept in invalid format"); - "slot duration kept in invalid format".to_string() - }) - }), + Some(v) => ::decode(&mut &v[..]).map(SlotDuration).map_err(|_| { + sp_blockchain::Error::Backend({ + error!(target: "slots", "slot duration kept in invalid format"); + "slot duration kept in invalid format".to_string() + }) + }), None => { - use sp_runtime::traits::Zero; - let genesis_slot_duration = - cb(client.runtime_api(), &BlockId::number(Zero::zero()))?; + let best_hash = client.usage_info().chain.best_hash; + let slot_duration = cb(client.runtime_api(), &BlockId::hash(best_hash))?; info!( - "⏱ Loaded block-time = {:?} milliseconds from genesis on first-launch", - genesis_slot_duration.slot_duration() + "⏱ Loaded block-time = {:?} from block {:?}", + slot_duration.slot_duration(), + best_hash, ); - genesis_slot_duration + slot_duration .using_encoded(|s| client.insert_aux(&[(T::SLOT_KEY, &s[..])], &[]))?; - Ok(SlotDuration(genesis_slot_duration)) - } + Ok(SlotDuration(slot_duration)) + }, }?; - if slot_duration.slot_duration() == 0 { - return Err(sp_blockchain::Error::Msg( - "Invalid value for slot_duration: the value must be greater than 0.".into(), - )) + if slot_duration.slot_duration() == Default::default() { + return Err(sp_blockchain::Error::Application(Box::new(Error::SlotDurationInvalid( + slot_duration, + )))) } Ok(slot_duration) @@ -507,11 +621,118 @@ impl SlotDuration { } } +/// A unit type wrapper to express the proportion of a slot. +pub struct SlotProportion(f32); + +impl SlotProportion { + /// Create a new proportion. + /// + /// The given value `inner` should be in the range `[0,1]`. If the value is not in the required + /// range, it is clamped into the range. + pub fn new(inner: f32) -> Self { + Self(inner.clamp(0.0, 1.0)) + } + + /// Returns the inner that is guaranted to be in the range `[0,1]`. + pub fn get(&self) -> f32 { + self.0 + } +} + +/// The strategy used to calculate the slot lenience used to increase the block proposal time when +/// slots have been skipped with no blocks authored. +pub enum SlotLenienceType { + /// Increase the lenience linearly with the number of skipped slots. + Linear, + /// Increase the lenience exponentially with the number of skipped slots. + Exponential, +} + +impl SlotLenienceType { + fn as_str(&self) -> &'static str { + match self { + SlotLenienceType::Linear => "linear", + SlotLenienceType::Exponential => "exponential", + } + } +} + +/// Calculate the remaining duration for block proposal taking into account whether any slots have +/// been skipped and applying the given lenience strategy. If `max_block_proposal_slot_portion` is +/// not none this method guarantees that the returned duration must be lower or equal to +/// `slot_info.duration * max_block_proposal_slot_portion`. +pub fn proposing_remaining_duration( + parent_slot: Option, + slot_info: &SlotInfo, + block_proposal_slot_portion: &SlotProportion, + max_block_proposal_slot_portion: Option<&SlotProportion>, + slot_lenience_type: SlotLenienceType, + log_target: &str, +) -> Duration { + use sp_runtime::traits::Zero; + + let proposing_duration = slot_info.duration.mul_f32(block_proposal_slot_portion.get()); + + let slot_remaining = slot_info + .ends_at + .checked_duration_since(std::time::Instant::now()) + .unwrap_or_default(); + + let proposing_duration = std::cmp::min(slot_remaining, proposing_duration); + + // If parent is genesis block, we don't require any lenience factor. + if slot_info.chain_head.number().is_zero() { + return proposing_duration + } + + let parent_slot = match parent_slot { + Some(parent_slot) => parent_slot, + None => return proposing_duration, + }; + + let slot_lenience = match slot_lenience_type { + SlotLenienceType::Exponential => slot_lenience_exponential(parent_slot, slot_info), + SlotLenienceType::Linear => slot_lenience_linear(parent_slot, slot_info), + }; + + if let Some(slot_lenience) = slot_lenience { + let lenient_proposing_duration = + proposing_duration + slot_lenience.mul_f32(block_proposal_slot_portion.get()); + + // if we defined a maximum portion of the slot for proposal then we must make sure the + // lenience doesn't go over it + let lenient_proposing_duration = + if let Some(ref max_block_proposal_slot_portion) = max_block_proposal_slot_portion { + std::cmp::min( + lenient_proposing_duration, + slot_info.duration.mul_f32(max_block_proposal_slot_portion.get()), + ) + } else { + lenient_proposing_duration + }; + + debug!( + target: log_target, + "No block for {} slots. Applying {} lenience, total proposing duration: {}", + slot_info.slot.saturating_sub(parent_slot + 1), + slot_lenience_type.as_str(), + lenient_proposing_duration.as_secs(), + ); + + lenient_proposing_duration + } else { + proposing_duration + } +} + /// Calculate a slot duration lenience based on the number of missed slots from current /// to parent. If the number of skipped slots is greated than 0 this method will apply /// an exponential backoff of at most `2^7 * slot_duration`, if no slots were skipped /// this method will return `None.` -pub fn slot_lenience_exponential(parent_slot: u64, slot_info: &SlotInfo) -> Option { +pub fn slot_lenience_exponential( + parent_slot: Slot, + slot_info: &SlotInfo, +) -> Option { // never give more than 2^this times the lenience. const BACKOFF_CAP: u64 = 7; @@ -524,7 +745,7 @@ pub fn slot_lenience_exponential(parent_slot: u64, slot_info: &SlotInfo) -> Opti // exponential back-off. // in normal cases we only attempt to issue blocks up to the end of the slot. // when the chain has been stalled for a few slots, we give more lenience. - let skipped_slots = slot_info.number.saturating_sub(parent_slot + 1); + let skipped_slots = *slot_info.slot.saturating_sub(parent_slot + 1); if skipped_slots == 0 { None @@ -532,7 +753,7 @@ pub fn slot_lenience_exponential(parent_slot: u64, slot_info: &SlotInfo) -> Opti let slot_lenience = skipped_slots / BACKOFF_STEP; let slot_lenience = std::cmp::min(slot_lenience, BACKOFF_CAP); let slot_lenience = 1 << slot_lenience; - Some(Duration::from_millis(slot_lenience * slot_info.duration)) + Some(slot_lenience * slot_info.duration) } } @@ -540,7 +761,10 @@ pub fn slot_lenience_exponential(parent_slot: u64, slot_info: &SlotInfo) -> Opti /// to parent. If the number of skipped slots is greated than 0 this method will apply /// a linear backoff of at most `20 * slot_duration`, if no slots were skipped /// this method will return `None.` -pub fn slot_lenience_linear(parent_slot: u64, slot_info: &SlotInfo) -> Option { +pub fn slot_lenience_linear( + parent_slot: Slot, + slot_info: &SlotInfo, +) -> Option { // never give more than 20 times more lenience. const BACKOFF_CAP: u64 = 20; @@ -550,76 +774,547 @@ pub fn slot_lenience_linear(parent_slot: u64, slot_info: &SlotInfo) -> Option { + /// Returns true if we should backoff authoring new blocks. + fn should_backoff( + &self, + chain_head_number: N, + chain_head_slot: Slot, + finalized_number: N, + slow_now: Slot, + logging_target: &str, + ) -> bool; +} + +/// A simple default strategy for how to decide backing off authoring blocks if the number of +/// unfinalized blocks grows too large. +#[derive(Clone)] +pub struct BackoffAuthoringOnFinalizedHeadLagging { + /// The max interval to backoff when authoring blocks, regardless of delay in finality. + pub max_interval: N, + /// The number of unfinalized blocks allowed before starting to consider to backoff authoring + /// blocks. Note that depending on the value for `authoring_bias`, there might still be an + /// additional wait until block authorship starts getting declined. + pub unfinalized_slack: N, + /// Scales the backoff rate. A higher value effectively means we backoff slower, taking longer + /// time to reach the maximum backoff as the unfinalized head of chain grows. + pub authoring_bias: N, +} + +/// These parameters is supposed to be some form of sensible defaults. +impl Default for BackoffAuthoringOnFinalizedHeadLagging { + fn default() -> Self { + Self { + // Never wait more than 100 slots before authoring blocks, regardless of delay in + // finality. + max_interval: 100.into(), + // Start to consider backing off block authorship once we have 50 or more unfinalized + // blocks at the head of the chain. + unfinalized_slack: 50.into(), + // A reasonable default for the authoring bias, or reciprocal interval scaling, is 2. + // Effectively meaning that consider the unfinalized head suffix length to grow half as + // fast as in actuality. + authoring_bias: 2.into(), + } + } +} + +impl BackoffAuthoringBlocksStrategy for BackoffAuthoringOnFinalizedHeadLagging +where + N: BaseArithmetic + Copy, +{ + fn should_backoff( + &self, + chain_head_number: N, + chain_head_slot: Slot, + finalized_number: N, + slot_now: Slot, + logging_target: &str, + ) -> bool { + // This should not happen, but we want to keep the previous behaviour if it does. + if slot_now <= chain_head_slot { + return false + } + + let unfinalized_block_length = chain_head_number - finalized_number; + let interval = + unfinalized_block_length.saturating_sub(self.unfinalized_slack) / self.authoring_bias; + let interval = interval.min(self.max_interval); + + // We're doing arithmetic between block and slot numbers. + let interval: u64 = interval.unique_saturated_into(); + + // If interval is nonzero we backoff if the current slot isn't far enough ahead of the chain + // head. + if *slot_now <= *chain_head_slot + interval { + info!( + target: logging_target, + "Backing off claiming new slot for block authorship: finality is lagging.", + ); + true + } else { + false + } + } +} + +impl BackoffAuthoringBlocksStrategy for () { + fn should_backoff( + &self, + _chain_head_number: N, + _chain_head_slot: Slot, + _finalized_number: N, + _slot_now: Slot, + _logging_target: &str, + ) -> bool { + false } } #[cfg(test)] mod test { + use super::*; + use sp_api::NumberFor; use std::time::{Duration, Instant}; + use substrate_test_runtime_client::runtime::{Block, Header}; const SLOT_DURATION: Duration = Duration::from_millis(6000); - fn slot(n: u64) -> super::slots::SlotInfo { + fn slot(slot: u64) -> super::slots::SlotInfo { super::slots::SlotInfo { - number: n, - last_number: n - 1, - duration: SLOT_DURATION.as_millis() as u64, + slot: slot.into(), + duration: SLOT_DURATION, timestamp: Default::default(), inherent_data: Default::default(), - ends_at: Instant::now(), + ends_at: Instant::now() + SLOT_DURATION, + chain_head: Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + block_size_limit: None, } } #[test] fn linear_slot_lenience() { // if no slots are skipped there should be no lenience - assert_eq!(super::slot_lenience_linear(1, &slot(2)), None); + assert_eq!(super::slot_lenience_linear(1u64.into(), &slot(2)), None); // otherwise the lenience is incremented linearly with // the number of skipped slots. for n in 3..=22 { assert_eq!( - super::slot_lenience_linear(1, &slot(n)), + super::slot_lenience_linear(1u64.into(), &slot(n)), Some(SLOT_DURATION * (n - 2) as u32), ); } // but we cap it to a maximum of 20 slots - assert_eq!( - super::slot_lenience_linear(1, &slot(23)), - Some(SLOT_DURATION * 20), - ); + assert_eq!(super::slot_lenience_linear(1u64.into(), &slot(23)), Some(SLOT_DURATION * 20)); } #[test] fn exponential_slot_lenience() { // if no slots are skipped there should be no lenience - assert_eq!(super::slot_lenience_exponential(1, &slot(2)), None); + assert_eq!(super::slot_lenience_exponential(1u64.into(), &slot(2)), None); // otherwise the lenience is incremented exponentially every two slots for n in 3..=17 { assert_eq!( - super::slot_lenience_exponential(1, &slot(n)), + super::slot_lenience_exponential(1u64.into(), &slot(n)), Some(SLOT_DURATION * 2u32.pow((n / 2 - 1) as u32)), ); } // but we cap it to a maximum of 14 slots assert_eq!( - super::slot_lenience_exponential(1, &slot(18)), + super::slot_lenience_exponential(1u64.into(), &slot(18)), Some(SLOT_DURATION * 2u32.pow(7)), ); assert_eq!( - super::slot_lenience_exponential(1, &slot(19)), + super::slot_lenience_exponential(1u64.into(), &slot(19)), Some(SLOT_DURATION * 2u32.pow(7)), ); } + + #[test] + fn proposing_remaining_duration_should_apply_lenience_based_on_proposal_slot_proportion() { + assert_eq!( + proposing_remaining_duration( + Some(0.into()), + &slot(2), + &SlotProportion(0.25), + None, + SlotLenienceType::Linear, + "test", + ), + SLOT_DURATION.mul_f32(0.25 * 2.0), + ); + } + + #[test] + fn proposing_remaining_duration_should_never_exceed_max_proposal_slot_proportion() { + assert_eq!( + proposing_remaining_duration( + Some(0.into()), + &slot(100), + &SlotProportion(0.25), + Some(SlotProportion(0.9)).as_ref(), + SlotLenienceType::Exponential, + "test", + ), + SLOT_DURATION.mul_f32(0.9), + ); + } + + #[derive(PartialEq, Debug)] + struct HeadState { + head_number: NumberFor, + head_slot: u64, + slot_now: NumberFor, + } + + impl HeadState { + fn author_block(&mut self) { + // Add a block to the head, and set latest slot to the current + self.head_number += 1; + self.head_slot = self.slot_now; + // Advance slot to next + self.slot_now += 1; + } + + fn dont_author_block(&mut self) { + self.slot_now += 1; + } + } + + #[test] + fn should_never_backoff_when_head_not_advancing() { + let strategy = BackoffAuthoringOnFinalizedHeadLagging::> { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let head_number = 1; + let head_slot = 1; + let finalized_number = 1; + let slot_now = 2; + + let should_backoff: Vec = (slot_now..1000) + .map(|s| { + strategy.should_backoff( + head_number, + head_slot.into(), + finalized_number, + s.into(), + "slots", + ) + }) + .collect(); + + // Should always be false, since the head isn't advancing + let expected: Vec = (slot_now..1000).map(|_| false).collect(); + assert_eq!(should_backoff, expected); + } + + #[test] + fn should_stop_authoring_if_blocks_are_still_produced_when_finality_stalled() { + let strategy = BackoffAuthoringOnFinalizedHeadLagging::> { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let mut head_number = 1; + let mut head_slot = 1; + let finalized_number = 1; + let slot_now = 2; + + let should_backoff: Vec = (slot_now..300) + .map(move |s| { + let b = strategy.should_backoff( + head_number, + head_slot.into(), + finalized_number, + s.into(), + "slots", + ); + // Chain is still advancing (by someone else) + head_number += 1; + head_slot = s; + b + }) + .collect(); + + // Should always be true after a short while, since the chain is advancing but finality is + // stalled + let expected: Vec = (slot_now..300).map(|s| s > 8).collect(); + assert_eq!(should_backoff, expected); + } + + #[test] + fn should_never_backoff_if_max_interval_is_reached() { + let strategy = BackoffAuthoringOnFinalizedHeadLagging::> { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + // The limit `max_interval` is used when the unfinalized chain grows to + // `max_interval * authoring_bias + unfinalized_slack`, + // which for the above parameters becomes + // 100 * 2 + 5 = 205. + // Hence we trigger this with head_number > finalized_number + 205. + let head_number = 207; + let finalized_number = 1; + + // The limit is then used once the current slot is `max_interval` ahead of slot of the head. + let head_slot = 1; + let slot_now = 2; + let max_interval = strategy.max_interval; + + let should_backoff: Vec = (slot_now..200) + .map(|s| { + strategy.should_backoff( + head_number, + head_slot.into(), + finalized_number, + s.into(), + "slots", + ) + }) + .collect(); + + // Should backoff (true) until we are `max_interval` number of slots ahead of the chain + // head slot, then we never backoff (false). + let expected: Vec = (slot_now..200).map(|s| s <= max_interval + head_slot).collect(); + assert_eq!(should_backoff, expected); + } + + #[test] + fn should_backoff_authoring_when_finality_stalled() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let finalized_number = 2; + let mut head_state = HeadState { head_number: 4, head_slot: 10, slot_now: 11 }; + + let should_backoff = |head_state: &HeadState| -> bool { + >>::should_backoff( + ¶m, + head_state.head_number, + head_state.head_slot.into(), + finalized_number, + head_state.slot_now.into(), + "slots", + ) + }; + + let backoff: Vec = (head_state.slot_now..200) + .map(|_| { + if should_backoff(&head_state) { + head_state.dont_author_block(); + true + } else { + head_state.author_block(); + false + } + }) + .collect(); + + // Gradually start to backoff more and more frequently + let expected = [ + false, false, false, false, false, // no effect + true, false, true, false, // 1:1 + true, true, false, true, true, false, // 2:1 + true, true, true, false, true, true, true, false, // 3:1 + true, true, true, true, false, true, true, true, true, false, // 4:1 + true, true, true, true, true, false, true, true, true, true, true, false, // 5:1 + true, true, true, true, true, true, false, true, true, true, true, true, true, + false, // 6:1 + true, true, true, true, true, true, true, false, true, true, true, true, true, true, + true, false, // 7:1 + true, true, true, true, true, true, true, true, false, true, true, true, true, true, + true, true, true, false, // 8:1 + true, true, true, true, true, true, true, true, true, false, true, true, true, true, + true, true, true, true, true, false, // 9:1 + true, true, true, true, true, true, true, true, true, true, false, true, true, true, + true, true, true, true, true, true, true, false, // 10:1 + true, true, true, true, true, true, true, true, true, true, true, false, true, true, + true, true, true, true, true, true, true, true, true, false, // 11:1 + true, true, true, true, true, true, true, true, true, true, true, true, false, true, + true, true, true, true, true, true, true, true, true, true, true, false, // 12:1 + true, true, true, true, + ]; + + assert_eq!(backoff.as_slice(), &expected[..]); + } + + #[test] + fn should_never_wait_more_than_max_interval() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + + let finalized_number = 2; + let starting_slot = 11; + let mut head_state = HeadState { head_number: 4, head_slot: 10, slot_now: starting_slot }; + + let should_backoff = |head_state: &HeadState| -> bool { + >>::should_backoff( + ¶m, + head_state.head_number, + head_state.head_slot.into(), + finalized_number, + head_state.slot_now.into(), + "slots", + ) + }; + + let backoff: Vec = (head_state.slot_now..40000) + .map(|_| { + if should_backoff(&head_state) { + head_state.dont_author_block(); + true + } else { + head_state.author_block(); + false + } + }) + .collect(); + + let slots_claimed: Vec = backoff + .iter() + .enumerate() + .filter(|&(_i, x)| x == &false) + .map(|(i, _x)| i + starting_slot as usize) + .collect(); + + let last_slot = backoff.len() + starting_slot as usize; + let mut last_two_claimed = slots_claimed.iter().rev().take(2); + + // Check that we claimed all the way to the end. Check two slots for when we have an uneven + // number of slots_claimed. + let expected_distance = param.max_interval as usize + 1; + assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92); + assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92 + expected_distance); + + let intervals: Vec<_> = slots_claimed.windows(2).map(|x| x[1] - x[0]).collect(); + + // The key thing is that the distance between claimed slots is capped to `max_interval + 1` + // assert_eq!(max_observed_interval, Some(&expected_distance)); + assert_eq!(intervals.iter().max(), Some(&expected_distance)); + + // But lets assert all distances, which we expect to grow linearly until `max_interval + 1` + let expected_intervals: Vec<_> = + (0..497).map(|i| (i / 2).max(1).min(expected_distance)).collect(); + + assert_eq!(intervals, expected_intervals); + } + + fn run_until_max_interval(param: BackoffAuthoringOnFinalizedHeadLagging) -> (u64, u64) { + let finalized_number = 0; + let mut head_state = HeadState { head_number: 0, head_slot: 0, slot_now: 1 }; + + let should_backoff = |head_state: &HeadState| -> bool { + >>::should_backoff( + ¶m, + head_state.head_number, + head_state.head_slot.into(), + finalized_number, + head_state.slot_now.into(), + "slots", + ) + }; + + // Number of blocks until we reach the max interval + let block_for_max_interval = + param.max_interval * param.authoring_bias + param.unfinalized_slack; + + while head_state.head_number < block_for_max_interval { + if should_backoff(&head_state) { + head_state.dont_author_block(); + } else { + head_state.author_block(); + } + } + + let slot_time = 6; + let time_to_reach_limit = slot_time * head_state.slot_now; + (block_for_max_interval, time_to_reach_limit) + } + + // Denoting + // C: unfinalized_slack + // M: authoring_bias + // X: max_interval + // then the number of slots to reach the max interval can be computed from + // (start_slot + C) + M * sum(n, 1, X) + // or + // (start_slot + C) + M * X*(X+1)/2 + fn expected_time_to_reach_max_interval( + param: &BackoffAuthoringOnFinalizedHeadLagging, + ) -> (u64, u64) { + let c = param.unfinalized_slack; + let m = param.authoring_bias; + let x = param.max_interval; + let slot_time = 6; + + let block_for_max_interval = x * m + c; + + // The 1 is because we start at slot_now = 1. + let expected_number_of_slots = (1 + c) + m * x * (x + 1) / 2; + let time_to_reach = expected_number_of_slots * slot_time; + + (block_for_max_interval, time_to_reach) + } + + #[test] + fn time_to_reach_upper_bound_for_smaller_slack() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 5, + authoring_bias: 2, + }; + let expected = expected_time_to_reach_max_interval(¶m); + let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param); + assert_eq!((block_for_max_interval, time_to_reach_limit), expected); + // Note: 16 hours is 57600 sec + assert_eq!((block_for_max_interval, time_to_reach_limit), (205, 60636)); + } + + #[test] + fn time_to_reach_upper_bound_for_larger_slack() { + let param = BackoffAuthoringOnFinalizedHeadLagging { + max_interval: 100, + unfinalized_slack: 50, + authoring_bias: 2, + }; + let expected = expected_time_to_reach_max_interval(¶m); + let (block_for_max_interval, time_to_reach_limit) = run_until_max_interval(param); + assert_eq!((block_for_max_interval, time_to_reach_limit), expected); + assert_eq!((block_for_max_interval, time_to_reach_limit), (250, 60906)); + } } diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index 32316c56c9f53..c2ed986e1e7f8 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -1,170 +1,193 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Utility stream for yielding slots in a loop. //! //! This is used instead of `futures_timer::Interval` because it was unreliable. -use super::SlotCompatible; -use sp_consensus::Error; -use futures::{prelude::*, task::Context, task::Poll}; -use sp_inherents::{InherentData, InherentDataProviders}; +use super::{InherentDataProviderExt, Slot}; +use sp_consensus::{Error, SelectChain}; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::{pin::Pin, time::{Duration, Instant}}; use futures_timer::Delay; +use std::time::{Duration, Instant}; /// Returns current duration since unix epoch. pub fn duration_now() -> Duration { use std::time::SystemTime; let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| panic!( - "Current time {:?} is before unix epoch. Something is wrong: {:?}", - now, - e, - )) + now.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| { + panic!("Current time {:?} is before unix epoch. Something is wrong: {:?}", now, e) + }) } +/// Returns the duration until the next slot from now. +pub fn time_until_next_slot(slot_duration: Duration) -> Duration { + let now = duration_now().as_millis(); -/// A `Duration` with a sign (before or after). Immutable. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] -pub struct SignedDuration { - offset: Duration, - is_positive: bool, -} - -impl SignedDuration { - /// Construct a `SignedDuration` - pub fn new(offset: Duration, is_positive: bool) -> Self { - Self { offset, is_positive } - } - - /// Get the slot for now. Panics if `slot_duration` is 0. - pub fn slot_now(&self, slot_duration: u64) -> u64 { - (if self.is_positive { - duration_now() + self.offset - } else { - duration_now() - self.offset - }.as_millis() as u64) / slot_duration - } -} - -/// Returns the duration until the next slot, based on current duration since -pub fn time_until_next(now: Duration, slot_duration: u64) -> Duration { - let remaining_full_millis = slot_duration - (now.as_millis() as u64 % slot_duration) - 1; - Duration::from_millis(remaining_full_millis) + let next_slot = (now + slot_duration.as_millis()) / slot_duration.as_millis(); + let remaining_millis = next_slot * slot_duration.as_millis() - now; + Duration::from_millis(remaining_millis as u64) } /// Information about a slot. -pub struct SlotInfo { - /// The slot number. - pub number: u64, - /// The last slot number produced. - pub last_number: u64, - /// Current timestamp. - pub timestamp: u64, +pub struct SlotInfo { + /// The slot number as found in the inherent data. + pub slot: Slot, + /// Current timestamp as found in the inherent data. + pub timestamp: sp_timestamp::Timestamp, /// The instant at which the slot ends. pub ends_at: Instant, /// The inherent data. pub inherent_data: InherentData, /// Slot duration. - pub duration: u64, + pub duration: Duration, + /// The chain header this slot is based on. + pub chain_head: B::Header, + /// Some potential block size limit for the block to be authored at this slot. + /// + /// For more information see [`Proposer::propose`](sp_consensus::Proposer::propose). + pub block_size_limit: Option, +} + +impl SlotInfo { + /// Create a new [`SlotInfo`]. + /// + /// `ends_at` is calculated using `timestamp` and `duration`. + pub fn new( + slot: Slot, + timestamp: sp_timestamp::Timestamp, + inherent_data: InherentData, + duration: Duration, + chain_head: B::Header, + block_size_limit: Option, + ) -> Self { + Self { + slot, + timestamp, + inherent_data, + duration, + chain_head, + block_size_limit, + ends_at: Instant::now() + time_until_next_slot(duration), + } + } } /// A stream that returns every time there is a new slot. -pub(crate) struct Slots { - last_slot: u64, - slot_duration: u64, +pub(crate) struct Slots { + last_slot: Slot, + slot_duration: Duration, inner_delay: Option, - inherent_data_providers: InherentDataProviders, - timestamp_extractor: SC, + create_inherent_data_providers: IDP, + client: C, + _phantom: std::marker::PhantomData, } -impl Slots { +impl Slots { /// Create a new `Slots` stream. - pub fn new( - slot_duration: u64, - inherent_data_providers: InherentDataProviders, - timestamp_extractor: SC, - ) -> Self { + pub fn new(slot_duration: Duration, create_inherent_data_providers: IDP, client: C) -> Self { Slots { - last_slot: 0, + last_slot: 0.into(), slot_duration, inner_delay: None, - inherent_data_providers, - timestamp_extractor, + create_inherent_data_providers, + client, + _phantom: Default::default(), } } } -impl Stream for Slots { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { +impl Slots +where + Block: BlockT, + C: SelectChain, + IDP: CreateInherentDataProviders, + IDP::InherentDataProviders: crate::InherentDataProviderExt, +{ + /// Returns a future that fires when the next slot starts. + pub async fn next_slot(&mut self) -> Result, Error> { loop { - let slot_duration = self.slot_duration; self.inner_delay = match self.inner_delay.take() { None => { // schedule wait. - let wait_dur = time_until_next(duration_now(), slot_duration); + let wait_dur = time_until_next_slot(self.slot_duration); Some(Delay::new(wait_dur)) - } + }, Some(d) => Some(d), }; - if let Some(ref mut inner_delay) = self.inner_delay { - match Future::poll(Pin::new(inner_delay), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(()) => {} - } + if let Some(inner_delay) = self.inner_delay.take() { + inner_delay.await; } - // timeout has fired. - let inherent_data = match self.inherent_data_providers.create_inherent_data() { - Ok(id) => id, - Err(err) => return Poll::Ready(Some(Err(sp_consensus::Error::InherentData(err)))), - }; - let result = self.timestamp_extractor.extract_timestamp_and_slot(&inherent_data); - let (timestamp, slot_num, offset) = match result { - Ok(v) => v, - Err(err) => return Poll::Ready(Some(Err(err))), - }; + let ends_in = time_until_next_slot(self.slot_duration); + // reschedule delay for next slot. - let ends_in = offset + - time_until_next(Duration::from_millis(timestamp), slot_duration); - let ends_at = Instant::now() + ends_in; self.inner_delay = Some(Delay::new(ends_in)); + let ends_at = Instant::now() + ends_in; + + let chain_head = match self.client.best_chain().await { + Ok(x) => x, + Err(e) => { + log::warn!( + target: "slots", + "Unable to author block in slot. No best block header: {:?}", + e, + ); + // Let's try at the next slot.. + self.inner_delay.take(); + continue + }, + }; + + let inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(chain_head.hash(), ()) + .await?; + + if Instant::now() > ends_at { + log::warn!( + target: "slots", + "Creating inherent data providers took more time than we had left for the slot.", + ); + } + + let timestamp = inherent_data_providers.timestamp(); + let slot = inherent_data_providers.slot(); + let inherent_data = inherent_data_providers.create_inherent_data()?; + // never yield the same slot twice. - if slot_num > self.last_slot { - let last_slot = self.last_slot; - self.last_slot = slot_num; - - break Poll::Ready(Some(Ok(SlotInfo { - number: slot_num, - duration: self.slot_duration, - last_number: last_slot, + if slot > self.last_slot { + self.last_slot = slot; + + break Ok(SlotInfo::new( + slot, timestamp, - ends_at, inherent_data, - }))) + self.slot_duration, + chain_head, + None, + )) } } } } - -impl Unpin for Slots { -} diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index bb23c829a6e09..7e821db197b3c 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-uncles" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Generic uncle inclusion utilities for consensus" edition = "2018" @@ -13,10 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-authorship = { version = "2.0.0", path = "../../../primitives/authorship" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -log = "0.4.8" +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-authorship = { version = "4.0.0-dev", path = "../../../primitives/authorship" } +thiserror = "1.0.21" diff --git a/client/consensus/uncles/src/lib.rs b/client/consensus/uncles/src/lib.rs index 2a129b200063b..368a994cfe520 100644 --- a/client/consensus/uncles/src/lib.rs +++ b/client/consensus/uncles/src/lib.rs @@ -1,65 +1,45 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Uncles functionality for Substrate. -#![forbid(unsafe_code, missing_docs)] -use sp_consensus::SelectChain; -use sp_inherents::{InherentDataProviders}; -use log::warn; use sc_client_api::ProvideUncles; -use sp_runtime::traits::{Block as BlockT, Header}; -use std::sync::Arc; -use sp_authorship; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Could not retrieve the block hash for block id: {0:?}")] + NoHashForBlockId(BlockId), +} /// Maximum uncles generations we may provide to the runtime. const MAX_UNCLE_GENERATIONS: u32 = 8; -/// Register uncles inherent data provider, if not registered already. -pub fn register_uncles_inherent_data_provider( - client: Arc, - select_chain: SC, - inherent_data_providers: &InherentDataProviders, -) -> Result<(), sp_consensus::Error> where +/// Create a new [`sp_authorship::InherentDataProvider`] at the given block. +pub fn create_uncles_inherent_data_provider( + client: &C, + parent: B::Hash, +) -> Result, sc_client_api::blockchain::Error> +where B: BlockT, - C: ProvideUncles + Send + Sync + 'static, - SC: SelectChain + 'static, + C: ProvideUncles, { - if !inherent_data_providers.has_provider(&sp_authorship::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_authorship::InherentDataProvider::new(move || { - { - let chain_head = match select_chain.best_chain() { - Ok(x) => x, - Err(e) => { - warn!(target: "uncles", "Unable to get chain head: {:?}", e); - return Vec::new(); - } - }; - match client.uncles(chain_head.hash(), MAX_UNCLE_GENERATIONS.into()) { - Ok(uncles) => uncles, - Err(e) => { - warn!(target: "uncles", "Unable to get uncles: {:?}", e); - Vec::new() - } - } - } - })) - .map_err(|err| sp_consensus::Error::InherentData(err.into()))?; - } - Ok(()) -} + let uncles = client.uncles(parent, MAX_UNCLE_GENERATIONS.into())?; + Ok(sp_authorship::InherentDataProvider::new(uncles)) +} diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 70a0b19532593..1d3d76ee7a55a 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-db" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,37 +13,33 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = "0.10.0" +parking_lot = "0.11.1" log = "0.4.8" -kvdb = "0.7.0" -kvdb-rocksdb = { version = "0.9.1", optional = true } -kvdb-memorydb = "0.7.0" -linked-hash-map = "0.5.2" +kvdb = "0.10.0" +kvdb-rocksdb = { version = "0.14.0", optional = true } +kvdb-memorydb = "0.10.0" +linked-hash-map = "0.5.4" hash-db = "0.15.2" -parity-util-mem = { version = "0.7.0", default-features = false, features = ["std"] } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -blake2-rfc = "0.2.18" +codec = { package = "parity-scale-codec", version = "2.0.0", features = [ + "derive", +] } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sc-executor = { version = "0.8.0", path = "../executor" } -sc-state-db = { version = "0.8.0", path = "../state-db" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-database = { version = "2.0.0", path = "../../primitives/database" } -parity-db = { version = "0.1.2", optional = true } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } +sc-state-db = { version = "0.10.0-dev", path = "../state-db" } +sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-database = { version = "4.0.0-dev", path = "../../primitives/database" } +parity-db = { version = "0.3.1", optional = true } [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -quickcheck = "0.9" -kvdb-rocksdb = "0.9.1" +quickcheck = "1.0.3" +kvdb-rocksdb = "0.14.0" tempfile = "3" [features] diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index f3c8f1aff9e14..d46aca8e8ff78 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,77 +18,61 @@ //! State backend that's useful for benchmarking -use std::sync::Arc; -use std::cell::{Cell, RefCell}; -use std::collections::HashMap; +use std::{ + cell::{Cell, RefCell}, + collections::HashMap, + sync::Arc, +}; -use hash_db::{Prefix, Hasher}; -use sp_trie::{MemoryDB, prefixed_key}; +use crate::storage_cache::{new_shared_cache, CachingState, SharedCache}; +use hash_db::{Hasher, Prefix}; +use kvdb::{DBTransaction, KeyValueDB}; +use linked_hash_map::LinkedHashMap; use sp_core::{ + hexdisplay::HexDisplay, storage::{ChildInfo, TrackedStorageKey}, - hexdisplay::HexDisplay }; -use sp_runtime::traits::{Block as BlockT, HashFor}; -use sp_runtime::Storage; +use sp_runtime::{ + traits::{Block as BlockT, HashFor}, + Storage, +}; use sp_state_machine::{ - DBValue, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection + backend::Backend as StateBackend, ChildStorageCollection, DBValue, ProofRecorder, + StorageCollection, }; -use kvdb::{KeyValueDB, DBTransaction}; -use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; +use sp_trie::{prefixed_key, MemoryDB}; -type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor ->; +type DbState = + sp_state_machine::TrieBackend>>, HashFor>; type State = CachingState, B>; struct StorageDb { db: Arc, + proof_recorder: Option>, _block: std::marker::PhantomData, } impl sp_state_machine::Storage> for StorageDb { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); - self.db.get(0, &key) - .map_err(|e| format!("Database backend error: {:?}", e)) - } -} - -/// Track whether a specific key has already been read or written to. -#[derive(Default, Clone, Copy)] -pub struct KeyTracker { - has_been_read: bool, - has_been_written: bool, -} - -/// A simple object that counts the reads and writes at the key level to the underlying state db. -#[derive(Default, Clone, Copy, Debug)] -pub struct ReadWriteTracker { - reads: u32, - repeat_reads: u32, - writes: u32, - repeat_writes: u32, -} - -impl ReadWriteTracker { - fn add_read(&mut self) { - self.reads += 1; - } - - fn add_repeat_read(&mut self) { - self.repeat_reads += 1; - } - - fn add_write(&mut self) { - self.writes += 1; - } - - fn add_repeat_write(&mut self) { - self.repeat_writes += 1; + let prefixed_key = prefixed_key::>(key, prefix); + if let Some(recorder) = &self.proof_recorder { + if let Some(v) = recorder.get(&key) { + return Ok(v.clone()) + } + let backend_value = self + .db + .get(0, &prefixed_key) + .map_err(|e| format!("Database backend error: {:?}", e))?; + recorder.record(key.clone(), backend_value.clone()); + Ok(backend_value) + } else { + self.db + .get(0, &prefixed_key) + .map_err(|e| format!("Database backend error: {:?}", e)) + } } } - /// State that manages the backend database reference. Allows runtime to control the database. pub struct BenchmarkingState { root: Cell, @@ -99,17 +83,28 @@ pub struct BenchmarkingState { record: Cell>>, shared_cache: SharedCache, // shared cache is always empty /// Key tracker for keys in the main trie. - main_key_tracker: RefCell, KeyTracker>>, + /// We track the total number of reads and writes to these keys, + /// not de-duplicated for repeats. + main_key_tracker: RefCell, TrackedStorageKey>>, /// Key tracker for keys in a child trie. /// Child trie are identified by their storage key (i.e. `ChildInfo::storage_key()`) - child_key_tracker: RefCell, HashMap, KeyTracker>>>, - read_write_tracker: RefCell, + /// We track the total number of reads and writes to these keys, + /// not de-duplicated for repeats. + child_key_tracker: RefCell, LinkedHashMap, TrackedStorageKey>>>, whitelist: RefCell>, + proof_recorder: Option>, + proof_recorder_root: Cell, + enable_tracking: bool, } impl BenchmarkingState { /// Create a new instance that creates a database in a temporary dir. - pub fn new(genesis: Storage, _cache_size_mb: Option) -> Result { + pub fn new( + genesis: Storage, + _cache_size_mb: Option, + record_proof: bool, + enable_tracking: bool, + ) -> Result { let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); @@ -117,28 +112,33 @@ impl BenchmarkingState { let mut state = BenchmarkingState { state: RefCell::new(None), db: Cell::new(None), - root: Cell::new(root), + root: Cell::new(root.clone()), genesis: Default::default(), genesis_root: Default::default(), record: Default::default(), shared_cache: new_shared_cache(0, (1, 10)), main_key_tracker: Default::default(), child_key_tracker: Default::default(), - read_write_tracker: Default::default(), whitelist: Default::default(), + proof_recorder: record_proof.then(Default::default), + proof_recorder_root: Cell::new(root.clone()), + enable_tracking, }; state.add_whitelist_to_tracker(); state.reopen()?; - let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| ( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), - )); - let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( - genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), - child_delta, - ); + let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| { + ( + &child_content.child_info, + child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + ) + }); + let (root, transaction): (B::Hash, _) = + state.state.borrow_mut().as_mut().unwrap().full_storage_root( + genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + child_delta, + ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); state.commit(root, transaction, Vec::new(), Vec::new())?; @@ -150,14 +150,22 @@ impl BenchmarkingState { *self.state.borrow_mut() = None; let db = match self.db.take() { Some(db) => db, - None => Arc::new(::kvdb_memorydb::create(1)), + None => Arc::new(kvdb_memorydb::create(1)), }; self.db.set(Some(db.clone())); - let storage_db = Arc::new(StorageDb:: { db, _block: Default::default() }); + if let Some(recorder) = &self.proof_recorder { + recorder.reset(); + self.proof_recorder_root.set(self.root.get()); + } + let storage_db = Arc::new(StorageDb:: { + db, + proof_recorder: self.proof_recorder.clone(), + _block: Default::default(), + }); *self.state.borrow_mut() = Some(State::new( DbState::::new(storage_db, self.root.get()), self.shared_cache.clone(), - None + None, )); Ok(()) } @@ -168,60 +176,50 @@ impl BenchmarkingState { let whitelist = self.whitelist.borrow(); whitelist.iter().for_each(|key| { - let whitelisted = KeyTracker { - has_been_read: key.has_been_read, - has_been_written: key.has_been_written, - }; + let mut whitelisted = TrackedStorageKey::new(key.key.clone()); + whitelisted.whitelist(); main_key_tracker.insert(key.key.clone(), whitelisted); }); } fn wipe_tracker(&self) { - *self.main_key_tracker.borrow_mut() = HashMap::new(); - *self.child_key_tracker.borrow_mut() = HashMap::new(); + *self.main_key_tracker.borrow_mut() = LinkedHashMap::new(); + *self.child_key_tracker.borrow_mut() = LinkedHashMap::new(); self.add_whitelist_to_tracker(); - *self.read_write_tracker.borrow_mut() = Default::default(); } // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { - let mut read_write_tracker = self.read_write_tracker.borrow_mut(); + if !self.enable_tracking { + return + } + let mut child_key_tracker = self.child_key_tracker.borrow_mut(); let mut main_key_tracker = self.main_key_tracker.borrow_mut(); let key_tracker = if let Some(childtrie) = childtrie { - child_key_tracker.entry(childtrie.to_vec()).or_insert_with(|| HashMap::new()) - } else { + child_key_tracker + .entry(childtrie.to_vec()) + .or_insert_with(|| LinkedHashMap::new()) + } else { &mut main_key_tracker }; - let read = match key_tracker.get(key) { + let should_log = match key_tracker.get_mut(key) { None => { - let has_been_read = KeyTracker { - has_been_read: true, - has_been_written: false, - }; + let mut has_been_read = TrackedStorageKey::new(key.to_vec()); + has_been_read.add_read(); key_tracker.insert(key.to_vec(), has_been_read); - read_write_tracker.add_read(); true }, Some(tracker) => { - if !tracker.has_been_read { - let has_been_read = KeyTracker { - has_been_read: true, - has_been_written: tracker.has_been_written, - }; - key_tracker.insert(key.to_vec(), has_been_read); - read_write_tracker.add_read(); - true - } else { - read_write_tracker.add_repeat_read(); - false - } - } + let should_log = !tracker.has_been_read(); + tracker.add_read(); + should_log + }, }; - if read { + if should_log { if let Some(childtrie) = childtrie { log::trace!( target: "benchmark", @@ -235,41 +233,37 @@ impl BenchmarkingState { // Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`) fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) { - let mut read_write_tracker = self.read_write_tracker.borrow_mut(); + if !self.enable_tracking { + return + } + let mut child_key_tracker = self.child_key_tracker.borrow_mut(); let mut main_key_tracker = self.main_key_tracker.borrow_mut(); let key_tracker = if let Some(childtrie) = childtrie { - child_key_tracker.entry(childtrie.to_vec()).or_insert_with(|| HashMap::new()) - } else { + child_key_tracker + .entry(childtrie.to_vec()) + .or_insert_with(|| LinkedHashMap::new()) + } else { &mut main_key_tracker }; // If we have written to the key, we also consider that we have read from it. - let has_been_written = KeyTracker { - has_been_read: true, - has_been_written: true, - }; - - let write = match key_tracker.get(key) { + let should_log = match key_tracker.get_mut(key) { None => { + let mut has_been_written = TrackedStorageKey::new(key.to_vec()); + has_been_written.add_write(); key_tracker.insert(key.to_vec(), has_been_written); - read_write_tracker.add_write(); true }, Some(tracker) => { - if !tracker.has_been_written { - key_tracker.insert(key.to_vec(), has_been_written); - read_write_tracker.add_write(); - true - } else { - read_write_tracker.add_repeat_write(); - false - } - } + let should_log = !tracker.has_been_written(); + tracker.add_write(); + should_log + }, }; - if write { + if should_log { if let Some(childtrie) = childtrie { log::trace!( target: "benchmark", @@ -280,6 +274,23 @@ impl BenchmarkingState { } } } + + // Return all the tracked storage keys among main and child trie. + fn all_trackers(&self) -> Vec { + let mut all_trackers = Vec::new(); + + self.main_key_tracker.borrow().iter().for_each(|(_, tracker)| { + all_trackers.push(tracker.clone()); + }); + + self.child_key_tracker.borrow().iter().for_each(|(_, child_tracker)| { + child_tracker.iter().for_each(|(_, tracker)| { + all_trackers.push(tracker.clone()); + }); + }); + + all_trackers + } } fn state_err() -> String { @@ -287,7 +298,7 @@ fn state_err() -> String { } impl StateBackend> for BenchmarkingState { - type Error = as StateBackend>>::Error; + type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; @@ -307,7 +318,11 @@ impl StateBackend> for BenchmarkingState { key: &[u8], ) -> Result>, Self::Error> { self.add_read_key(Some(child_info.storage_key()), key); - self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -321,7 +336,11 @@ impl StateBackend> for BenchmarkingState { key: &[u8], ) -> Result { self.add_read_key(Some(child_info.storage_key()), key); - self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -335,7 +354,11 @@ impl StateBackend> for BenchmarkingState { key: &[u8], ) -> Result>, Self::Error> { self.add_read_key(Some(child_info.storage_key()), key); - self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -350,13 +373,31 @@ impl StateBackend> for BenchmarkingState { } } - fn for_keys_in_child_storage( + fn apply_to_key_values_while, Vec) -> bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.state.borrow().as_ref().ok_or_else(state_err)?.apply_to_key_values_while( + child_info, + prefix, + start_at, + f, + allow_missing, + ) + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.for_keys_in_child_storage(child_info, f) + state.apply_to_keys_while(child_info, prefix, f) } } @@ -373,17 +414,29 @@ impl StateBackend> for BenchmarkingState { fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.storage_root(delta)) + delta: impl Iterator)>, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| s.storage_root(delta)) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) + delta: impl Iterator)>, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -394,21 +447,15 @@ impl StateBackend> for BenchmarkingState { self.state.borrow().as_ref().map_or(Default::default(), |s| s.keys(prefix)) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix)) + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| s.child_keys(child_info, prefix)) } - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { - None - } - - fn commit(&self, + fn commit( + &self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction, main_storage_changes: StorageCollection, @@ -429,7 +476,8 @@ impl StateBackend> for BenchmarkingState { let mut record = self.record.take(); record.extend(keys); self.record.set(record); - db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; + db.write(db_transaction) + .map_err(|_| String::from("Error committing transaction"))?; self.root.set(storage_root); self.db.set(Some(db)); @@ -459,7 +507,8 @@ impl StateBackend> for BenchmarkingState { None => db_transaction.delete(0, &key), } } - db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; + db.write(db_transaction) + .map_err(|_| String::from("Error committing transaction"))?; self.db.set(Some(db)); } @@ -470,9 +519,30 @@ impl StateBackend> for BenchmarkingState { } /// Get the key tracking information for the state db. + /// 1. `reads` - Total number of DB reads. + /// 2. `repeat_reads` - Total number of in-memory reads. + /// 3. `writes` - Total number of DB writes. + /// 4. `repeat_writes` - Total number of in-memory writes. fn read_write_count(&self) -> (u32, u32, u32, u32) { - let count = *self.read_write_tracker.borrow_mut(); - (count.reads, count.repeat_reads, count.writes, count.repeat_writes) + let mut reads = 0; + let mut repeat_reads = 0; + let mut writes = 0; + let mut repeat_writes = 0; + + self.all_trackers().iter().for_each(|tracker| { + if !tracker.whitelisted { + if tracker.reads > 0 { + reads += 1; + repeat_reads += tracker.reads - 1; + } + + if tracker.writes > 0 { + writes += 1; + repeat_writes += tracker.writes - 1; + } + } + }); + (reads, repeat_reads, writes, repeat_writes) } /// Reset the key tracking information for the state db. @@ -488,12 +558,69 @@ impl StateBackend> for BenchmarkingState { *self.whitelist.borrow_mut() = new; } - fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { + // We only track at the level of a key-prefix and not whitelisted for now for memory size. + // TODO: Refactor to enable full storage key transparency, where we can remove the + // `prefix_key_tracker`. + let mut prefix_key_tracker = LinkedHashMap::, (u32, u32, bool)>::new(); + self.all_trackers().iter().for_each(|tracker| { + if !tracker.whitelisted { + let prefix_length = tracker.key.len().min(32); + let prefix = tracker.key[0..prefix_length].to_vec(); + // each read / write of a specific key is counted at most one time, since + // additional reads / writes happen in the memory overlay. + let reads = tracker.reads.min(1); + let writes = tracker.writes.min(1); + if let Some(prefix_tracker) = prefix_key_tracker.get_mut(&prefix) { + prefix_tracker.0 += reads; + prefix_tracker.1 += writes; + } else { + prefix_key_tracker.insert(prefix, (reads, writes, tracker.whitelisted)); + } + } + }); + + prefix_key_tracker + .iter() + .map(|(key, tracker)| -> (Vec, u32, u32, bool) { + (key.to_vec(), tracker.0, tracker.1, tracker.2) + }) + .collect::>() + } + + fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { self.state.borrow_mut().as_mut().map(|s| s.register_overlay_stats(stats)); } fn usage_info(&self) -> sp_state_machine::UsageInfo { - self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) + self.state + .borrow() + .as_ref() + .map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) + } + + fn proof_size(&self) -> Option { + self.proof_recorder.as_ref().map(|recorder| { + let proof_size = recorder.estimate_encoded_size() as u32; + let proof = recorder.to_storage_proof(); + let proof_recorder_root = self.proof_recorder_root.get(); + if proof_recorder_root == Default::default() || proof_size == 1 { + // empty trie + proof_size + } else { + if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) { + size as u32 + } else { + panic!( + "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", + self.proof_recorder_root.get(), + self.root.get(), + self.genesis_root, + proof_size, + ); + } + } + }) } } @@ -510,8 +637,9 @@ mod test { #[test] fn read_to_main_and_child_tries() { - let bench_state = BenchmarkingState::::new(Default::default(), None) - .unwrap(); + let bench_state = + BenchmarkingState::::new(Default::default(), None, false, true) + .unwrap(); for _ in 0..2 { let child1 = sp_core::storage::ChildInfo::new_default(b"child1"); @@ -525,22 +653,20 @@ mod test { bench_state.child_storage(&child1, b"bar").unwrap(); bench_state.child_storage(&child2, b"bar").unwrap(); - bench_state.commit( - Default::default(), - Default::default(), - vec![ - ("foo".as_bytes().to_vec(), None) - ], - vec![ - ("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)]) - ] - ).unwrap(); - - let rw_tracker = bench_state.read_write_tracker.borrow(); - assert_eq!(rw_tracker.reads, 6); - assert_eq!(rw_tracker.repeat_reads, 0); - assert_eq!(rw_tracker.writes, 2); - assert_eq!(rw_tracker.repeat_writes, 0); + bench_state + .commit( + Default::default(), + Default::default(), + vec![("foo".as_bytes().to_vec(), None)], + vec![("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)])], + ) + .unwrap(); + + let rw_tracker = bench_state.read_write_count(); + assert_eq!(rw_tracker.0, 6); + assert_eq!(rw_tracker.1, 0); + assert_eq!(rw_tracker.2, 2); + assert_eq!(rw_tracker.3, 0); drop(rw_tracker); bench_state.wipe().unwrap(); } diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs index 15ad339b1f2c1..795cb8f901183 100644 --- a/client/db/src/cache/list_cache.rs +++ b/client/db/src/cache/list_cache.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -41,18 +41,18 @@ //! Finalized entry E1 is pruned when block B is finalized so that: //! EntryAt(B.number - prune_depth).points_to(E1) -use std::collections::{BTreeSet, BTreeMap}; +use std::collections::{BTreeMap, BTreeSet}; use log::warn; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, Zero, Bounded, CheckedSub -}; +use sp_runtime::traits::{Block as BlockT, Bounded, CheckedSub, NumberFor, Zero}; -use crate::cache::{CacheItemT, ComplexBlockId, EntryType}; -use crate::cache::list_entry::{Entry, StorageEntry}; -use crate::cache::list_storage::{Storage, StorageTransaction, Metadata}; +use crate::cache::{ + list_entry::{Entry, StorageEntry}, + list_storage::{Metadata, Storage, StorageTransaction}, + CacheItemT, ComplexBlockId, EntryType, +}; /// Pruning strategy. #[derive(Debug, Clone, Copy)] @@ -132,8 +132,8 @@ impl> ListCache pruning_strategy: PruningStrategy>, best_finalized_block: ComplexBlockId, ) -> ClientResult { - let (best_finalized_entry, unfinalized) = storage.read_meta() - .and_then(|meta| read_forks(&storage, meta))?; + let (best_finalized_entry, unfinalized) = + storage.read_meta().and_then(|meta| read_forks(&storage, meta))?; Ok(ListCache { storage, @@ -167,7 +167,7 @@ impl> ListCache // BUT since we're not guaranteeing to provide correct values for forks // behind the finalized block, check if the block is finalized first if !chain::is_finalized_block(&self.storage, &at, Bounded::max_value())? { - return Err(ClientError::NotInFinalizedChain); + return Err(ClientError::NotInFinalizedChain) } self.best_finalized_entry.as_ref() @@ -178,24 +178,27 @@ impl> ListCache } else { // there are unfinalized entries // => find the fork containing given block and read from this fork - // IF there's no matching fork, ensure that this isn't a block from a fork that has forked - // behind the best finalized block and search at finalized fork + // IF there's no matching fork, ensure that this isn't a block from a fork that has + // forked behind the best finalized block and search at finalized fork match self.find_unfinalized_fork(&at)? { Some(fork) => Some(&fork.head), None => match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) if chain::is_connected_to_block( - &self.storage, - &at, - &best_finalized_entry.valid_from, - )? => Some(best_finalized_entry), + Some(best_finalized_entry) + if chain::is_connected_to_block( + &self.storage, + &at, + &best_finalized_entry.valid_from, + )? => + Some(best_finalized_entry), _ => None, }, } }; match head { - Some(head) => head.search_best_before(&self.storage, at.number) + Some(head) => head + .search_best_before(&self.storage, at.number) .map(|e| e.map(|e| (e.0.valid_from, e.1, e.0.value))), None => Ok(None), } @@ -213,7 +216,8 @@ impl> ListCache entry_type: EntryType, operations: &mut CommitOperations, ) -> ClientResult<()> { - Ok(operations.append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) + Ok(operations + .append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) } /// When previously inserted block is finalized. @@ -242,25 +246,25 @@ impl> ListCache for op in ops.operations { match op { CommitOperation::AppendNewBlock(index, best_block) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; + let mut fork = self.unfinalized.get_mut(index).expect( + "ListCache is a crate-private type; internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); + CommitOperation holds valid references while cache is locked; qed", + ); fork.best_block = Some(best_block); }, CommitOperation::AppendNewEntry(index, entry) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; + let mut fork = self.unfinalized.get_mut(index).expect( + "ListCache is a crate-private type; internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); + CommitOperation holds valid references while cache is locked; qed", + ); fork.best_block = Some(entry.valid_from.clone()); fork.head = entry; }, CommitOperation::AddNewFork(entry) => { - self.unfinalized.push(Fork { - best_block: Some(entry.valid_from.clone()), - head: entry, - }); + self.unfinalized + .push(Fork { best_block: Some(entry.valid_from.clone()), head: entry }); }, CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { self.best_finalized_block = block; @@ -275,7 +279,9 @@ impl> ListCache for (fork_index, updated_fork) in forks.into_iter().rev() { match updated_fork { Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, - None => { self.unfinalized.remove(fork_index); }, + None => { + self.unfinalized.remove(fork_index); + }, } } }, @@ -296,34 +302,36 @@ impl> ListCache let prev_operation = operations.operations.last(); debug_assert!( entry_type != EntryType::Final || - self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) - => best_finalized_block.hash == parent.hash, - _ => false, - } + self.unfinalized.is_empty() || + self.best_finalized_block.hash == parent.hash || + match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => + best_finalized_block.hash == parent.hash, + _ => false, + } ); // we do not store any values behind finalized if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { - return Ok(None); + return Ok(None) } - // if the block is not final, it is possibly appended to/forking from existing unfinalized fork + // if the block is not final, it is possibly appended to/forking from existing unfinalized + // fork let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis; if !is_final { let mut fork_and_action = None; // when value hasn't changed and block isn't final, there's nothing we need to do if value.is_none() { - return Ok(None); + return Ok(None) } // first: try to find fork that is known to has the best block we're appending to for (index, fork) in self.unfinalized.iter().enumerate() { if fork.try_append(&parent) { fork_and_action = Some((index, ForkAppendResult::Append)); - break; + break } } @@ -331,11 +339,14 @@ impl> ListCache // - we're appending to the fork for the first time after restart; // - we're forking existing unfinalized fork from the middle; if fork_and_action.is_none() { - let best_finalized_entry_block = self.best_finalized_entry.as_ref().map(|f| f.valid_from.number); + let best_finalized_entry_block = + self.best_finalized_entry.as_ref().map(|f| f.valid_from.number); for (index, fork) in self.unfinalized.iter().enumerate() { - if let Some(action) = fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? { + if let Some(action) = + fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? + { fork_and_action = Some((index, action)); - break; + break } } } @@ -350,9 +361,14 @@ impl> ListCache }; tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - return Ok(Some(operation)); + let operation = + CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + return Ok(Some(operation)) }, // fork from the middle of unfinalized fork Some((_, ForkAppendResult::Fork(prev_valid_from))) => { @@ -363,18 +379,24 @@ impl> ListCache }; tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - return Ok(Some(operation)); + let operation = + CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + return Ok(Some(operation)) }, None => (), } } // if we're here, then one of following is true: - // - either we're inserting final block => all ancestors are already finalized AND the only thing we can do - // is to try to update last finalized entry - // - either we're inserting non-final blocks that has no ancestors in any known unfinalized forks + // - either we're inserting final block => all ancestors are already finalized AND the only + // thing we can do is to try to update last finalized entry + // - either we're inserting non-final blocks that has no ancestors in any known unfinalized + // forks let new_storage_entry = match self.best_finalized_entry.as_ref() { Some(best_finalized_entry) => best_finalized_entry.try_update(value), @@ -389,12 +411,17 @@ impl> ListCache return Ok(match new_storage_entry { Some(new_storage_entry) => { tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); + let operation = + CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); Some(operation) }, None => None, - }); + }) } // cleanup database from abandoned unfinalized forks and obsolete finalized entries @@ -404,7 +431,11 @@ impl> ListCache match new_storage_entry { Some(new_storage_entry) => { tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::BlockFinalized(block.clone(), Some(new_storage_entry.into_entry(block)), abandoned_forks); + let operation = CommitOperation::BlockFinalized( + block.clone(), + Some(new_storage_entry.into_entry(block)), + abandoned_forks, + ); tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); Ok(Some(operation)) }, @@ -423,16 +454,16 @@ impl> ListCache let prev_operation = operations.operations.last(); debug_assert!( self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) - => best_finalized_block.hash == parent.hash, - _ => false, - } + match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => + best_finalized_block.hash == parent.hash, + _ => false, + } ); // there could be at most one entry that is finalizing - let finalizing_entry = self.storage.read_entry(&block)? - .map(|entry| entry.into_entry(block.clone())); + let finalizing_entry = + self.storage.read_entry(&block)?.map(|entry| entry.into_entry(block.clone())); // cleanup database from abandoned unfinalized forks and obsolete finalized entries let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); @@ -457,12 +488,13 @@ impl> ListCache for (index, fork) in self.unfinalized.iter().enumerate() { // we only need to truncate fork if its head is ancestor of truncated block if fork.head.valid_from.number < reverted_block.number { - continue; + continue } // we only need to truncate fork if its head is connected to truncated block - if !chain::is_connected_to_block(&self.storage, reverted_block, &fork.head.valid_from)? { - continue; + if !chain::is_connected_to_block(&self.storage, reverted_block, &fork.head.valid_from)? + { + continue } let updated_fork = fork.truncate( @@ -485,7 +517,7 @@ impl> ListCache fn prune_finalized_entries>( &self, tx: &mut Tx, - block: &ComplexBlockId + block: &ComplexBlockId, ) { let prune_depth = match self.pruning_strategy { PruningStrategy::ByDepth(prune_depth) => prune_depth, @@ -515,18 +547,13 @@ impl> ListCache }; // truncate ancient entry - tx.insert_storage_entry(&ancient_block, &StorageEntry { - prev_valid_from: None, - value: current_entry.value, - }); + tx.insert_storage_entry( + &ancient_block, + &StorageEntry { prev_valid_from: None, value: current_entry.value }, + ); // destroy 'fork' ending with previous entry - destroy_fork( - first_entry_to_truncate, - &self.storage, - tx, - None, - ) + destroy_fork(first_entry_to_truncate, &self.storage, tx, None) }; if let Err(error) = do_pruning() { @@ -543,16 +570,17 @@ impl> ListCache ) -> BTreeSet { // if some block has been finalized already => take it into account let prev_abandoned_forks = match prev_operation { - Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => Some(abandoned_forks), + Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => + Some(abandoned_forks), _ => None, }; let mut destroyed = prev_abandoned_forks.cloned().unwrap_or_else(|| BTreeSet::new()); - let live_unfinalized = self.unfinalized.iter() - .enumerate() - .filter(|(idx, _)| prev_abandoned_forks + let live_unfinalized = self.unfinalized.iter().enumerate().filter(|(idx, _)| { + prev_abandoned_forks .map(|prev_abandoned_forks| !prev_abandoned_forks.contains(idx)) - .unwrap_or(true)); + .unwrap_or(true) + }); for (index, fork) in live_unfinalized { if fork.head.valid_from.number == block.number { destroyed.insert(index); @@ -574,7 +602,7 @@ impl> ListCache ) -> ClientResult>> { for unfinalized in &self.unfinalized { if unfinalized.matches(&self.storage, block)? { - return Ok(Some(&unfinalized)); + return Ok(Some(&unfinalized)) } } @@ -597,7 +625,8 @@ impl Fork { let range = self.head.search_best_range_before(storage, block.number)?; match range { None => Ok(false), - Some((begin, end)) => chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), + Some((begin, end)) => + chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), } } @@ -628,19 +657,19 @@ impl Fork { // check if the parent is connected to the beginning of the range if !chain::is_connected_to_block(storage, parent, &begin)? { - return Ok(None); + return Ok(None) } // the block is connected to the begin-entry. If begin is the head entry // => we need to append new block to the fork if begin == self.head.valid_from { - return Ok(Some(ForkAppendResult::Append)); + return Ok(Some(ForkAppendResult::Append)) } // the parent block belongs to this fork AND it is located after last finalized entry // => we need to make a new fork if best_finalized_entry_block.map(|f| begin.number > f).unwrap_or(true) { - return Ok(Some(ForkAppendResult::Fork(begin))); + return Ok(Some(ForkAppendResult::Fork(begin))) } Ok(None) @@ -653,12 +682,7 @@ impl Fork { tx: &mut Tx, best_finalized_block: Option>, ) -> ClientResult<()> { - destroy_fork( - self.head.valid_from.clone(), - storage, - tx, - best_finalized_block, - ) + destroy_fork(self.head.valid_from.clone(), storage, tx, best_finalized_block) } /// Truncate fork by deleting all entries that are descendants of given block. @@ -674,18 +698,15 @@ impl Fork { // read pointer to previous entry let entry = storage.require_entry(¤t)?; - // truncation stops when we have reached the ancestor of truncated block + // truncation stops when we have reached the ancestor of truncated block if current.number < reverting_block { // if we have reached finalized block => destroy fork if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(None); + return Ok(None) } // else fork needs to be updated - return Ok(Some(Fork { - best_block: None, - head: entry.into_entry(current), - })); + return Ok(Some(Fork { best_block: None, head: entry.into_entry(current) })) } tx.remove_storage_entry(¤t); @@ -707,7 +728,9 @@ impl Default for CommitOperations { // This should never be allowed for non-test code to avoid revealing its internals. #[cfg(test)] -impl From>> for CommitOperations { +impl From>> + for CommitOperations +{ fn from(operations: Vec>) -> Self { CommitOperations { operations } } @@ -725,30 +748,36 @@ impl CommitOperations { Some(last_operation) => last_operation, None => { self.operations.push(new_operation); - return; + return }, }; // we are able (and obliged to) to merge two consequent block finalization operations match last_operation { - CommitOperation::BlockFinalized(old_finalized_block, old_finalized_entry, old_abandoned_forks) => { - match new_operation { - CommitOperation::BlockFinalized(new_finalized_block, new_finalized_entry, new_abandoned_forks) => { - self.operations.push(CommitOperation::BlockFinalized( - new_finalized_block, - new_finalized_entry, - new_abandoned_forks, - )); - }, - _ => { - self.operations.push(CommitOperation::BlockFinalized( - old_finalized_block, - old_finalized_entry, - old_abandoned_forks, - )); - self.operations.push(new_operation); - }, - } + CommitOperation::BlockFinalized( + old_finalized_block, + old_finalized_entry, + old_abandoned_forks, + ) => match new_operation { + CommitOperation::BlockFinalized( + new_finalized_block, + new_finalized_entry, + new_abandoned_forks, + ) => { + self.operations.push(CommitOperation::BlockFinalized( + new_finalized_block, + new_finalized_entry, + new_abandoned_forks, + )); + }, + _ => { + self.operations.push(CommitOperation::BlockFinalized( + old_finalized_block, + old_finalized_entry, + old_abandoned_forks, + )); + self.operations.push(new_operation); + }, }, _ => { self.operations.push(last_operation); @@ -759,7 +788,12 @@ impl CommitOperations { } /// Destroy fork by deleting all unfinalized entries. -pub fn destroy_fork, Tx: StorageTransaction>( +pub fn destroy_fork< + Block: BlockT, + T: CacheItemT, + S: Storage, + Tx: StorageTransaction, +>( head_valid_from: ComplexBlockId, storage: &S, tx: &mut Tx, @@ -770,7 +804,7 @@ pub fn destroy_fork, Tx: Stor // optionally: deletion stops when we found entry at finalized block if let Some(best_finalized_block) = best_finalized_block { if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(()); + return Ok(()) } } @@ -788,8 +822,8 @@ pub fn destroy_fork, Tx: Stor /// Blockchain related functions. mod chain { - use sp_runtime::traits::Header as HeaderT; use super::*; + use sp_runtime::traits::Header as HeaderT; /// Is the block1 connected both ends of the range. pub fn is_connected_to_range>( @@ -798,8 +832,8 @@ mod chain { range: (&ComplexBlockId, Option<&ComplexBlockId>), ) -> ClientResult { let (begin, end) = range; - Ok(is_connected_to_block(storage, block, begin)? - && match end { + Ok(is_connected_to_block(storage, block, begin)? && + match end { Some(end) => is_connected_to_block(storage, block, end)?, None => true, }) @@ -812,10 +846,12 @@ mod chain { block2: &ComplexBlockId, ) -> ClientResult { let (begin, end) = if *block1 > *block2 { (block2, block1) } else { (block1, block2) }; - let mut current = storage.read_header(&end.hash)? + let mut current = storage + .read_header(&end.hash)? .ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; while *current.number() > begin.number { - current = storage.read_header(current.parent_hash())? + current = storage + .read_header(current.parent_hash())? .ok_or_else(|| ClientError::UnknownBlock(format!("{}", current.parent_hash())))?; } @@ -829,11 +865,10 @@ mod chain { best_finalized_block: NumberFor, ) -> ClientResult { if block.number > best_finalized_block { - return Ok(false); + return Ok(false) } - storage.read_id(block.number) - .map(|hash| hash.as_ref() == Some(&block.hash)) + storage.read_id(block.number).map(|hash| hash.as_ref() == Some(&block.hash)) } } @@ -843,17 +878,19 @@ fn read_forks>( meta: Metadata, ) -> ClientResult<(Option>, Vec>)> { let finalized = match meta.finalized { - Some(finalized) => Some(storage.require_entry(&finalized)? - .into_entry(finalized)), + Some(finalized) => Some(storage.require_entry(&finalized)?.into_entry(finalized)), None => None, }; - let unfinalized = meta.unfinalized.into_iter() - .map(|unfinalized| storage.require_entry(&unfinalized) - .map(|storage_entry| Fork { + let unfinalized = meta + .unfinalized + .into_iter() + .map(|unfinalized| { + storage.require_entry(&unfinalized).map(|storage_entry| Fork { best_block: None, head: storage_entry.into_entry(unfinalized), - })) + }) + }) .collect::>()?; Ok((finalized, unfinalized)) @@ -861,10 +898,10 @@ fn read_forks>( #[cfg(test)] mod tests { - use substrate_test_runtime_client::runtime::H256; - use sp_runtime::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage, DummyTransaction}; use super::*; + use crate::cache::list_storage::tests::{DummyStorage, DummyTransaction, FaultyStorage}; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, Header}; + use substrate_test_runtime_client::runtime::H256; type Block = RawBlock>; @@ -882,7 +919,11 @@ mod tests { fn test_header(number: u64) -> Header { Header { - parent_hash: if number == 0 { Default::default() } else { test_header(number - 1).hash() }, + parent_hash: if number == 0 { + Default::default() + } else { + test_header(number - 1).hash() + }, number, state_root: Default::default(), extrinsics_root: Default::default(), @@ -909,28 +950,54 @@ mod tests { // when block is earlier than best finalized block AND it is not finalized // --- 50 --- // ----------> [100] - assert!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .unwrap().value_at_block(&test_id(50)).is_err()); + assert!(ListCache::<_, u64, _>::new( + DummyStorage::new(), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(50)) + .is_err()); // when block is earlier than best finalized block AND it is finalized AND value is some // [30] ---- 50 ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&test_id(50)).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } + ) + .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(50)) + .unwrap(), + Some((test_id(30), Some(test_id(100)), 30)) + ); // when block is the best finalized block AND value is some // ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(100, H256::from_low_u64_be(100)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&test_id(100)).unwrap(), Some((test_id(100), None, 100))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(100, H256::from_low_u64_be(100)) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } + ) + .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(100)) + .unwrap(), + Some((test_id(100), None, 100)) + ); // when block is parallel to the best finalized block // ---- 100 // ---> [100] @@ -938,81 +1005,138 @@ mod tests { DummyStorage::new() .with_meta(Some(test_id(100)), Vec::new()) .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } + ) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).is_err()); + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)) + .is_err()); - // when block is later than last finalized block AND there are no forks AND finalized value is Some - // ---> [100] --- 200 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&test_id(200)).unwrap(), Some((test_id(100), None, 100))); + // when block is later than last finalized block AND there are no forks AND finalized value + // is Some ---> [100] --- 200 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } + ), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(200)) + .unwrap(), + Some((test_id(100), None, 100)) + ); // when block is later than last finalized block AND there are no matching forks // AND block is connected to finalized block AND finalized value is Some // --- 3 // ---> [2] /---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + Some((correct_id(2), None, 2)) + ); // when block is later than last finalized block AND there are no matching forks // AND block is not connected to finalized block // --- 2 --- 3 // 1 /---> [2] ---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_header(test_header(1)) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 2)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&fork_id(0, 1, 3)).unwrap(), None); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } + ) + .with_header(test_header(1)) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 2)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&fork_id(0, 1, 3)) + .unwrap(), + None + ); - // when block is later than last finalized block AND it appends to unfinalized fork from the end - // AND unfinalized value is Some + // when block is later than last finalized block AND it appends to unfinalized fork from the + // end AND unfinalized value is Some // ---> [2] ---> [4] ---> 5 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), None, 4))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } + ) + .with_header(test_header(4)) + .with_header(test_header(5)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&correct_id(5)) + .unwrap(), + Some((correct_id(4), None, 4)) + ); // when block is later than last finalized block AND it does not fits unfinalized fork // AND it is connected to the finalized block AND finalized value is Some // ---> [2] ----------> [4] // \--- 3 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } + ) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + Some((correct_id(2), None, 2)) + ); } #[test] @@ -1022,7 +1146,8 @@ mod tests { // when trying to insert block < finalized number let mut ops = Default::default(); - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) + .unwrap() .do_on_block_insert( &mut DummyTransaction::new(), test_id(49), @@ -1030,9 +1155,12 @@ mod tests { Some(50), nfin, &mut ops, - ).unwrap().is_none()); + ) + .unwrap() + .is_none()); // when trying to insert block @ finalized number - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) + .unwrap() .do_on_block_insert( &mut DummyTransaction::new(), test_id(99), @@ -1040,98 +1168,151 @@ mod tests { Some(100), nfin, &Default::default(), - ).unwrap().is_none()); + ) + .unwrap() + .is_none()); - // when trying to insert non-final block AND it appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block + // when trying to insert non-final block AND it appends to the best block of unfinalized + // fork AND new value is the same as in the fork' best block let mut cache = ListCache::new( DummyStorage::new() .with_meta(None, vec![test_id(4)]) .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: 4 }), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + test_id(2), + ) + .unwrap(); cache.unfinalized[0].best_block = Some(test_id(4)); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin, &Default::default()).unwrap(), + cache + .do_on_block_insert( + &mut tx, + test_id(4), + test_id(5), + Some(4), + nfin, + &Default::default() + ) + .unwrap(), Some(CommitOperation::AppendNewBlock(0, test_id(5))), ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block + // when trying to insert non-final block AND it appends to the best block of unfinalized + // fork AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin, &Default::default()).unwrap(), + cache + .do_on_block_insert( + &mut tx, + test_id(4), + test_id(5), + Some(5), + nfin, + &Default::default() + ) + .unwrap(), Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 })), ); assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] }) + ); - // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block + // when trying to insert non-final block AND it is the first block that appends to the best + // block of unfinalized fork AND new value is the same as in the fork' best block let cache = ListCache::new( DummyStorage::new() .with_meta(None, vec![correct_id(4)]) .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: 4 }) .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + test_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(4), - nfin, - &Default::default(), - ).unwrap(), + cache + .do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(4), + nfin, + &Default::default(), + ) + .unwrap(), Some(CommitOperation::AppendNewBlock(0, correct_id(5))), ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block + // when trying to insert non-final block AND it is the first block that appends to the best + // block of unfinalized fork AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(5), - nfin, - &Default::default(), - ).unwrap(), + cache + .do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(5), + nfin, + &Default::default(), + ) + .unwrap(), Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 })), ); assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] }) + ); // when trying to insert non-final block AND it forks unfinalized fork let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }, + ) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin, &Default::default()) + cache + .do_on_block_insert( + &mut tx, + correct_id(3), + fork_id(0, 3, 4), + Some(14), + nfin, + &Default::default() + ) .unwrap(), Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 })), ); assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(2)), + unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] + }) + ); // when trying to insert non-final block AND there are no unfinalized forks // AND value is the same as last finalized @@ -1139,11 +1320,21 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin, &Default::default()) + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(2), + nfin, + &Default::default() + ) .unwrap(), None, ); @@ -1156,23 +1347,46 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin, &Default::default()) + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(3), + nfin, + &Default::default() + ) .unwrap(), Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 })), ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] }) + ); // when inserting finalized entry AND there are no previous finalized entries - let cache = ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)).unwrap(); + let cache = + ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()) + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(3), + fin, + &Default::default() + ) .unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), @@ -1182,17 +1396,31 @@ mod tests { ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }) + ); // when inserting finalized entry AND value is the same as in previous finalized let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(2), + fin, + &Default::default() + ) + .unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), ); assert!(tx.inserted_entries().is_empty()); @@ -1201,7 +1429,16 @@ mod tests { // when inserting finalized entry AND value differs from previous finalized let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()).unwrap(), + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(3), + fin, + &Default::default() + ) + .unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), Some(Entry { valid_from: correct_id(3), value: 3 }), @@ -1210,7 +1447,10 @@ mod tests { ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }) + ); // inserting finalized entry removes abandoned fork EVEN if new entry is not inserted let cache = ListCache::new( @@ -1218,12 +1458,27 @@ mod tests { .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(2), + fin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + vec![0].into_iter().collect() + )), ); } @@ -1234,12 +1489,19 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(5)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), + cache + .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) + .unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), ); assert!(tx.inserted_entries().is_empty()); @@ -1253,12 +1515,19 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(5)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), - PruningStrategy::ByDepth(1024), correct_id(4) - ).unwrap(); + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, + ), + PruningStrategy::ByDepth(1024), + correct_id(4), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()).unwrap(), + cache + .do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()) + .unwrap(), Some(CommitOperation::BlockFinalized( correct_id(5), Some(Entry { valid_from: correct_id(5), value: 5 }), @@ -1267,19 +1536,30 @@ mod tests { ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] }) + ); // finalization removes abandoned forks let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), + cache + .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + vec![0].into_iter().collect() + )), ); } @@ -1289,34 +1569,50 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, + ) + .with_entry( + correct_id(6), + StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); // when new block is appended to unfinalized fork cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))].into()); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); // when new entry is appended to unfinalized fork - cache.on_transaction_commit(vec![ - CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 }), - ].into()); + cache.on_transaction_commit( + vec![CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 })] + .into(), + ); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: 7 }); // when new fork is added - cache.on_transaction_commit(vec![ - CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 }), - ].into()); + cache.on_transaction_commit( + vec![CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 })] + .into(), + ); assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: 10 }); // when block is finalized + entry is finalized + unfinalized forks are deleted - cache.on_transaction_commit(vec![CommitOperation::BlockFinalized( - correct_id(20), - Some(Entry { valid_from: correct_id(20), value: 20 }), - vec![0, 1, 2].into_iter().collect(), - )].into()); + cache.on_transaction_commit( + vec![CommitOperation::BlockFinalized( + correct_id(20), + Some(Entry { valid_from: correct_id(20), value: 20 }), + vec![0, 1, 2].into_iter().collect(), + )] + .into(), + ); assert_eq!(cache.best_finalized_block, correct_id(20)); - assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: 20 })); + assert_eq!( + cache.best_finalized_entry, + Some(Entry { valid_from: correct_id(20), value: 20 }) + ); assert!(cache.unfinalized.is_empty()); } @@ -1324,45 +1620,88 @@ mod tests { fn list_find_unfinalized_fork_works() { // ----------> [3] // --- [2] ---------> 4 ---> [5] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), correct_id(0) - ).unwrap().find_unfinalized_fork((&correct_id(4)).into()).unwrap().unwrap().head.valid_from, correct_id(5)); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } + ) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } + ) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)), + PruningStrategy::ByDepth(1024), + correct_id(0) + ) + .unwrap() + .find_unfinalized_fork((&correct_id(4)).into()) + .unwrap() + .unwrap() + .head + .valid_from, + correct_id(5) + ); // --- [2] ---------------> [5] // ----------> [3] ---> 4 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 2)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)), - PruningStrategy::ByDepth(1024), correct_id(0) - ).unwrap() - .find_unfinalized_fork((&fork_id(0, 1, 4)).into()).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3)); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } + ) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } + ) + .with_entry( + correct_id(2), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)) + .with_header(fork_header(0, 1, 2)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 4)), + PruningStrategy::ByDepth(1024), + correct_id(0) + ) + .unwrap() + .find_unfinalized_fork((&fork_id(0, 1, 4)).into()) + .unwrap() + .unwrap() + .head + .valid_from, + fork_id(0, 1, 3) + ); // --- [2] ---------------> [5] // ----------> [3] // -----------------> 4 assert!(ListCache::new( DummyStorage::new() .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 }) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } + ) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } + ) + .with_entry( + correct_id(2), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 } + ) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)) @@ -1372,89 +1711,167 @@ mod tests { .with_header(fork_header(1, 1, 2)) .with_header(fork_header(1, 1, 3)) .with_header(fork_header(1, 1, 4)), - PruningStrategy::ByDepth(1024), correct_id(0) - ).unwrap().find_unfinalized_fork((&fork_id(1, 1, 4)).into()).unwrap().is_none()); + PruningStrategy::ByDepth(1024), + correct_id(0) + ) + .unwrap() + .find_unfinalized_fork((&fork_id(1, 1, 4)).into()) + .unwrap() + .is_none()); } #[test] fn fork_matches_works() { // when block is not within list range let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, + ) .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .matches(&storage, (&test_id(20)).into()).unwrap(), false); + assert_eq!( + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } + .matches(&storage, (&test_id(20)).into()) + .unwrap(), + false + ); // when block is not connected to the begin block let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 2, 4)) .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&fork_id(0, 2, 4)).into()).unwrap(), false); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .matches(&storage, (&fork_id(0, 2, 4)).into()) + .unwrap(), + false + ); // when block is not connected to the end block let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 3, 4)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&fork_id(0, 3, 4)).into()).unwrap(), false); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .matches(&storage, (&fork_id(0, 3, 4)).into()) + .unwrap(), + false + ); // when block is connected to the begin block AND end is open let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: 100 }) .with_header(test_header(5)) .with_header(test_header(6)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&correct_id(6)).into()).unwrap(), true); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .matches(&storage, (&correct_id(6)).into()) + .unwrap(), + true + ); // when block is connected to the begin block AND to the end block let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&correct_id(4)).into()).unwrap(), true); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .matches(&storage, (&correct_id(4)).into()) + .unwrap(), + true + ); } #[test] fn fork_try_append_works() { // when best block is unknown - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(100)), false); + assert_eq!( + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } + .try_append(&test_id(100)), + false + ); // when best block is known but different - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(101)), false); + assert_eq!( + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } + .try_append(&test_id(101)), + false + ); // when best block is known and the same - assert_eq!(Fork::<_, u64> { best_block: Some(test_id(100)), head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(100)), true); + assert_eq!( + Fork::<_, u64> { + best_block: Some(test_id(100)), + head: Entry { valid_from: test_id(100), value: 0 } + } + .try_append(&test_id(100)), + true + ); } #[test] fn fork_try_append_or_fork_works() { // when there's no entry before parent let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, + ) .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append_or_fork(&storage, &test_id(30), None).unwrap(), None); + assert_eq!( + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } + .try_append_or_fork(&storage, &test_id(30), None) + .unwrap(), + None + ); // when parent does not belong to the fork let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 2, 4)) .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .try_append_or_fork(&storage, &fork_id(0, 2, 4), None).unwrap(), None); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .try_append_or_fork(&storage, &fork_id(0, 2, 4), None) + .unwrap(), + None + ); // when the entry before parent is the head entry let storage = DummyStorage::new() .with_entry( @@ -1463,30 +1880,57 @@ mod tests { ) .with_header(test_header(6)) .with_header(test_header(5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .try_append_or_fork(&storage, &correct_id(6), None).unwrap(), Some(ForkAppendResult::Append)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .try_append_or_fork(&storage, &correct_id(6), None) + .unwrap(), + Some(ForkAppendResult::Append) + ); // when the parent located after last finalized entry let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(6), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(6)) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: 100 } } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), None).unwrap(), Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3)))); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(6), value: 100 } + } + .try_append_or_fork(&storage, &fork_id(0, 4, 5), None) + .unwrap(), + Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3))) + ); // when the parent located before last finalized entry let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(6), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(6)) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: 100 } } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)).unwrap(), None); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(6), value: 100 } + } + .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)) + .unwrap(), + None + ); } #[test] @@ -1495,12 +1939,16 @@ mod tests { let storage = DummyStorage::new().with_id(100, H256::from_low_u64_be(100)); let mut tx = DummyTransaction::new(); Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); assert!(tx.removed_entries().is_empty()); // when we reach finalized entry with iterations let storage = DummyStorage::new() .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, + ) .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: 50 }) .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: 10 }) @@ -1508,120 +1956,192 @@ mod tests { .with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: 0 }); let mut tx = DummyTransaction::new(); Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert_eq!(*tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash, test_id(20).hash].into_iter().collect()); + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert_eq!( + *tx.removed_entries(), + vec![test_id(100).hash, test_id(50).hash, test_id(20).hash] + .into_iter() + .collect() + ); // when we reach beginning of fork before finalized block let storage = DummyStorage::new() .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, + ) .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); let mut tx = DummyTransaction::new(); Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert_eq!(*tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash].into_iter().collect()); + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert_eq!( + *tx.removed_entries(), + vec![test_id(100).hash, test_id(50).hash].into_iter().collect() + ); } #[test] fn is_connected_to_block_fails() { // when storage returns error - assert!( - chain::is_connected_to_block::<_, u64, _>( - &FaultyStorage, - (&test_id(1)).into(), - &test_id(100), - ).is_err(), - ); + assert!(chain::is_connected_to_block::<_, u64, _>( + &FaultyStorage, + (&test_id(1)).into(), + &test_id(100), + ) + .is_err(),); // when there's no header in the storage - assert!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new(), - (&test_id(1)).into(), - &test_id(100), - ).is_err(), - ); + assert!(chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new(), + (&test_id(1)).into(), + &test_id(100), + ) + .is_err(),); } #[test] fn is_connected_to_block_works() { // when without iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(1)), - (&test_id(1)).into(), &correct_id(1)).unwrap(), false); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new().with_header(test_header(1)), + (&test_id(1)).into(), + &correct_id(1) + ) + .unwrap(), + false + ); // when with ASC iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&test_id(0)).into(), &correct_id(2)).unwrap(), false); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&test_id(0)).into(), + &correct_id(2) + ) + .unwrap(), + false + ); // when with DESC iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), &test_id(0)).unwrap(), false); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&correct_id(2)).into(), + &test_id(0) + ) + .unwrap(), + false + ); // when without iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(1)), - (&correct_id(1)).into(), &correct_id(1)).unwrap(), true); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new().with_header(test_header(1)), + (&correct_id(1)).into(), + &correct_id(1) + ) + .unwrap(), + true + ); // when with ASC iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(0)).into(), &correct_id(2)).unwrap(), true); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&correct_id(0)).into(), + &correct_id(2) + ) + .unwrap(), + true + ); // when with DESC iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), &correct_id(0)).unwrap(), true); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&correct_id(2)).into(), + &correct_id(0) + ) + .unwrap(), + true + ); } #[test] fn is_finalized_block_fails() { // when storage returns error assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err()); - } #[test] fn is_finalized_block_works() { // when number of block is larger than last finalized block - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), false); + assert_eq!( + chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), + false + ); // when there's no hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), false); + assert_eq!( + chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), + false + ); // when there's different hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new() - .with_id(1, H256::from_low_u64_be(2)), &test_id(1), 100).unwrap(), false); + assert_eq!( + chain::is_finalized_block::<_, u64, _>( + &DummyStorage::new().with_id(1, H256::from_low_u64_be(2)), + &test_id(1), + 100 + ) + .unwrap(), + false + ); // when there's the same hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new() - .with_id(1, H256::from_low_u64_be(1)), &test_id(1), 100).unwrap(), true); + assert_eq!( + chain::is_finalized_block::<_, u64, _>( + &DummyStorage::new().with_id(1, H256::from_low_u64_be(1)), + &test_id(1), + 100 + ) + .unwrap(), + true + ); } #[test] fn read_forks_fails() { // when storage returns error during finalized entry read - assert!(read_forks::(&FaultyStorage, Metadata { - finalized: Some(test_id(1)), - unfinalized: vec![], - }).is_err()); + assert!(read_forks::( + &FaultyStorage, + Metadata { finalized: Some(test_id(1)), unfinalized: vec![] } + ) + .is_err()); // when storage returns error during unfinalized entry read - assert!(read_forks::(&FaultyStorage, Metadata { - finalized: None, - unfinalized: vec![test_id(1)], - }).is_err()); + assert!(read_forks::( + &FaultyStorage, + Metadata { finalized: None, unfinalized: vec![test_id(1)] } + ) + .is_err()); // when finalized entry is not found - assert!(read_forks::(&DummyStorage::new(), Metadata { - finalized: Some(test_id(1)), - unfinalized: vec![], - }).is_err()); + assert!(read_forks::( + &DummyStorage::new(), + Metadata { finalized: Some(test_id(1)), unfinalized: vec![] } + ) + .is_err()); // when unfinalized entry is not found - assert!(read_forks::(&DummyStorage::new(), Metadata { - finalized: None, - unfinalized: vec![test_id(1)], - }).is_err()); + assert!(read_forks::( + &DummyStorage::new(), + Metadata { finalized: None, unfinalized: vec![test_id(1)] } + ) + .is_err()); } #[test] @@ -1638,23 +2158,40 @@ mod tests { ], ); - assert_eq!(expected, read_forks(&storage, Metadata { - finalized: Some(test_id(10)), - unfinalized: vec![test_id(20), test_id(30)], - }).unwrap()); + assert_eq!( + expected, + read_forks( + &storage, + Metadata { + finalized: Some(test_id(10)), + unfinalized: vec![test_id(20), test_id(30)], + } + ) + .unwrap() + ); } #[test] fn ancient_entries_are_pruned_when_pruning_enabled() { fn do_test(strategy: PruningStrategy) { - let cache = ListCache::new(DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_id(20, H256::from_low_u64_be(20)) - .with_id(30, H256::from_low_u64_be(30)) - .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }), - strategy, test_id(9)).unwrap(); + let cache = ListCache::new( + DummyStorage::new() + .with_id(10, H256::from_low_u64_be(10)) + .with_id(20, H256::from_low_u64_be(20)) + .with_id(30, H256::from_low_u64_be(30)) + .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) + .with_entry( + test_id(20), + StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }, + ) + .with_entry( + test_id(30), + StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }, + ), + strategy, + test_id(9), + ) + .unwrap(); let mut tx = DummyTransaction::new(); // when finalizing entry #10: no entries pruned @@ -1669,7 +2206,8 @@ mod tests { cache.prune_finalized_entries(&mut tx, &test_id(20)); assert!(tx.removed_entries().is_empty()); assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is enabled) + // when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is + // enabled) cache.prune_finalized_entries(&mut tx, &test_id(30)); match strategy { PruningStrategy::NeverPrune => { @@ -1678,7 +2216,10 @@ mod tests { }, PruningStrategy::ByDepth(_) => { assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect()); - assert_eq!(*tx.inserted_entries(), vec![test_id(20).hash].into_iter().collect()); + assert_eq!( + *tx.inserted_entries(), + vec![test_id(20).hash].into_iter().collect() + ); }, } } @@ -1696,15 +2237,36 @@ mod tests { // -> (3') -> 4' -> 5' let mut cache = ListCache::new( DummyStorage::new() - .with_meta(Some(correct_id(1)), vec![correct_id(5), fork_id(1, 2, 5), fork_id(2, 4, 5)]) + .with_meta( + Some(correct_id(1)), + vec![correct_id(5), fork_id(1, 2, 5), fork_id(2, 4, 5)], + ) .with_id(1, correct_id(1).hash) .with_entry(correct_id(1), StorageEntry { prev_valid_from: None, value: 1 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 3 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 4 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(4)), value: 5 }) - .with_entry(fork_id(1, 2, 4), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 14 }) - .with_entry(fork_id(1, 2, 5), StorageEntry { prev_valid_from: Some(fork_id(1, 2, 4)), value: 15 }) - .with_entry(fork_id(2, 4, 5), StorageEntry { prev_valid_from: Some(correct_id(4)), value: 25 }) + .with_entry( + correct_id(3), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 3 }, + ) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 4 }, + ) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(4)), value: 5 }, + ) + .with_entry( + fork_id(1, 2, 4), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 14 }, + ) + .with_entry( + fork_id(1, 2, 5), + StorageEntry { prev_valid_from: Some(fork_id(1, 2, 4)), value: 15 }, + ) + .with_entry( + fork_id(2, 4, 5), + StorageEntry { prev_valid_from: Some(correct_id(4)), value: 25 }, + ) .with_header(test_header(1)) .with_header(test_header(2)) .with_header(test_header(3)) @@ -1714,29 +2276,40 @@ mod tests { .with_header(fork_header(1, 2, 4)) .with_header(fork_header(1, 2, 5)) .with_header(fork_header(2, 4, 5)), - PruningStrategy::ByDepth(1024), correct_id(1) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(1), + ) + .unwrap(); // when 5 is reverted: entry 5 is truncated let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(5)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![ - (0, Some(Fork { best_block: None, head: Entry { valid_from: correct_id(4), value: 4 } })), - ].into_iter().collect())); + assert_eq!( + op, + CommitOperation::BlockReverted( + vec![( + 0, + Some(Fork { + best_block: None, + head: Entry { valid_from: correct_id(4), value: 4 } + }) + ),] + .into_iter() + .collect() + ) + ); cache.on_transaction_commit(vec![op].into()); // when 3 is reverted: entries 4+5' are truncated let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(3)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![ - (0, None), - (2, None), - ].into_iter().collect())); + assert_eq!( + op, + CommitOperation::BlockReverted(vec![(0, None), (2, None),].into_iter().collect()) + ); cache.on_transaction_commit(vec![op].into()); // when 2 is reverted: entries 4'+5' are truncated let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(2)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![ - (0, None), - ].into_iter().collect())); + assert_eq!(op, CommitOperation::BlockReverted(vec![(0, None),].into_iter().collect())); cache.on_transaction_commit(vec![op].into()); } diff --git a/client/db/src/cache/list_entry.rs b/client/db/src/cache/list_entry.rs index d14fab9274ccb..7cee7a5146260 100644 --- a/client/db/src/cache/list_entry.rs +++ b/client/db/src/cache/list_entry.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,12 +18,11 @@ //! List-cache storage entries. +use codec::{Decode, Encode}; use sp_blockchain::Result as ClientResult; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use codec::{Encode, Decode}; -use crate::cache::{CacheItemT, ComplexBlockId}; -use crate::cache::list_storage::{Storage}; +use crate::cache::{list_storage::Storage, CacheItemT, ComplexBlockId}; /// Single list-based cache entry. #[derive(Debug)] @@ -52,10 +51,8 @@ impl Entry { match value { Some(value) => match self.value == value { true => None, - false => Some(StorageEntry { - prev_valid_from: Some(self.valid_from.clone()), - value, - }), + false => + Some(StorageEntry { prev_valid_from: Some(self.valid_from.clone()), value }), }, None => None, } @@ -67,7 +64,8 @@ impl Entry { storage: &S, block: NumberFor, ) -> ClientResult, Option>)>> { - Ok(self.search_best_before(storage, block)? + Ok(self + .search_best_before(storage, block)? .map(|(entry, next)| (entry.valid_from, next))) } @@ -86,14 +84,14 @@ impl Entry { let mut current = self.valid_from.clone(); if block >= self.valid_from.number { let value = self.value.clone(); - return Ok(Some((Entry { valid_from: current, value }, next))); + return Ok(Some((Entry { valid_from: current, value }, next))) } // else - travel back in time loop { let entry = storage.require_entry(¤t)?; if block >= current.number { - return Ok(Some((Entry { valid_from: current, value: entry.value }, next))); + return Ok(Some((Entry { valid_from: current, value: entry.value }, next))) } next = Some(current); @@ -108,18 +106,15 @@ impl Entry { impl StorageEntry { /// Converts storage entry into an entry, valid from given block. pub fn into_entry(self, valid_from: ComplexBlockId) -> Entry { - Entry { - valid_from, - value: self.value, - } + Entry { valid_from, value: self.value } } } #[cfg(test)] mod tests { - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; - use substrate_test_runtime_client::runtime::{H256, Block}; use super::*; + use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; + use substrate_test_runtime_client::runtime::{Block, H256}; fn test_id(number: u64) -> ComplexBlockId { ComplexBlockId::new(H256::from_low_u64_be(number), number) @@ -132,36 +127,61 @@ mod tests { // when trying to update with the same Some value assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(1)), None); // when trying to update with different Some value - assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)), - Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 })); + assert_eq!( + Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)), + Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 }) + ); } #[test] fn entry_search_best_before_fails() { // when storage returns error assert!(Entry::<_, u64> { valid_from: test_id(100), value: 42 } - .search_best_before(&FaultyStorage, 50).is_err()); + .search_best_before(&FaultyStorage, 50) + .is_err()); } #[test] fn entry_search_best_before_works() { // when block is better than our best block - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new(), 150).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None))); + assert_eq!( + Entry::<_, u64> { valid_from: test_id(100), value: 100 } + .search_best_before(&DummyStorage::new(), 150) + .unwrap(), + Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None)) + ); // when block is found between two entries - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 }), - 75).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100))))); + assert_eq!( + Entry::<_, u64> { valid_from: test_id(100), value: 100 } + .search_best_before( + &DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 } + ) + .with_entry( + test_id(50), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 } + ), + 75 + ) + .unwrap(), + Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100)))) + ); // when block is not found - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }), - 30).unwrap(), - None); + assert_eq!( + Entry::<_, u64> { valid_from: test_id(100), value: 100 } + .search_best_before( + &DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 } + ) + .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }), + 30 + ) + .unwrap(), + None + ); } } diff --git a/client/db/src/cache/list_storage.rs b/client/db/src/cache/list_storage.rs index 377d744effa60..bb47b8dab5a7f 100644 --- a/client/db/src/cache/list_storage.rs +++ b/client/db/src/cache/list_storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,17 +20,23 @@ use std::sync::Arc; +use crate::utils::{self, meta_keys}; +use codec::{Decode, Encode}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use codec::{Encode, Decode}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_database::{Database, Transaction}; -use crate::utils::{self, meta_keys}; - -use crate::cache::{CacheItemT, ComplexBlockId}; -use crate::cache::list_cache::{CommitOperation, Fork}; -use crate::cache::list_entry::{Entry, StorageEntry}; -use crate::DbHash; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, +}; + +use crate::{ + cache::{ + list_cache::{CommitOperation, Fork}, + list_entry::{Entry, StorageEntry}, + CacheItemT, ComplexBlockId, + }, + DbHash, +}; /// Single list-cache metadata. #[derive(Debug)] @@ -54,14 +60,21 @@ pub trait Storage { fn read_meta(&self) -> ClientResult>; /// Reads cache entry from the storage. - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>>; + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>>; /// Reads referenced (and thus existing) cache entry from the storage. fn require_entry(&self, at: &ComplexBlockId) -> ClientResult> { - self.read_entry(at) - .and_then(|entry| entry - .ok_or_else(|| ClientError::from( - ClientError::Backend(format!("Referenced cache entry at {:?} is not found", at))))) + self.read_entry(at).and_then(|entry| { + entry.ok_or_else(|| { + ClientError::from(ClientError::Backend(format!( + "Referenced cache entry at {:?} is not found", + at + ))) + }) + }) } } @@ -111,10 +124,14 @@ impl DbStorage { } /// Get reference to the database. - pub fn db(&self) -> &Arc> { &self.db } + pub fn db(&self) -> &Arc> { + &self.db + } /// Get reference to the database columns. - pub fn columns(&self) -> &DbColumns { &self.columns } + pub fn columns(&self) -> &DbColumns { + &self.columns + } /// Encode block id for storing as a key in cache column. /// We append prefix to the actual encoding to allow several caches @@ -128,25 +145,35 @@ impl DbStorage { impl Storage for DbStorage { fn read_id(&self, at: NumberFor) -> ClientResult> { - utils::read_header::(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Number(at)) - .map(|maybe_header| maybe_header.map(|header| header.hash())) + utils::read_header::( + &*self.db, + self.columns.key_lookup, + self.columns.header, + BlockId::Number(at), + ) + .map(|maybe_header| maybe_header.map(|header| header.hash())) } fn read_header(&self, at: &Block::Hash) -> ClientResult> { - utils::read_header::(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Hash(*at)) + utils::read_header::( + &*self.db, + self.columns.key_lookup, + self.columns.header, + BlockId::Hash(*at), + ) } fn read_meta(&self) -> ClientResult> { match self.db.get(self.columns.meta, &self.meta_key) { Some(meta) => meta::decode(&*meta), - None => Ok(Metadata { - finalized: None, - unfinalized: Vec::new(), - }) + None => Ok(Metadata { finalized: None, unfinalized: Vec::new() }), } } - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> { + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>> { match self.db.get(self.columns.cache, &self.encode_block_id(at)) { Some(entry) => StorageEntry::::decode(&mut &entry[..]) .map_err(|_| ClientError::Backend("Failed to decode cache entry".into())) @@ -171,7 +198,11 @@ impl<'a> DbStorageTransaction<'a> { impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorageTransaction<'a> { fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) { - self.tx.set_from_vec(self.storage.columns.cache, &self.storage.encode_block_id(at), entry.encode()); + self.tx.set_from_vec( + self.storage.columns.cache, + &self.storage.encode_block_id(at), + entry.encode(), + ); } fn remove_storage_entry(&mut self, at: &ComplexBlockId) { @@ -187,7 +218,8 @@ impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorag self.tx.set_from_vec( self.storage.columns.meta, &self.storage.meta_key, - meta::encode(best_finalized_entry, unfinalized, operation)); + meta::encode(best_finalized_entry, unfinalized, operation), + ); } } @@ -206,10 +238,11 @@ mod meta { pub fn encode( best_finalized_entry: Option<&Entry>, unfinalized: &[Fork], - op: &CommitOperation + op: &CommitOperation, ) -> Vec { let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from); - let mut unfinalized = unfinalized.iter().map(|fork| &fork.head().valid_from).collect::>(); + let mut unfinalized = + unfinalized.iter().map(|fork| &fork.head().valid_from).collect::>(); match op { CommitOperation::AppendNewBlock(_, _) => (), @@ -230,8 +263,11 @@ mod meta { CommitOperation::BlockReverted(ref forks) => { for (fork_index, updated_fork) in forks.iter().rev() { match updated_fork { - Some(updated_fork) => unfinalized[*fork_index] = &updated_fork.head().valid_from, - None => { unfinalized.remove(*fork_index); }, + Some(updated_fork) => + unfinalized[*fork_index] = &updated_fork.head().valid_from, + None => { + unfinalized.remove(*fork_index); + }, } } }, @@ -243,10 +279,12 @@ mod meta { /// Decode meta information. pub fn decode(encoded: &[u8]) -> ClientResult> { let input = &mut &*encoded; - let finalized: Option> = Decode::decode(input) - .map_err(|_| ClientError::from(ClientError::Backend("Error decoding cache meta".into())))?; - let unfinalized: Vec> = Decode::decode(input) - .map_err(|_| ClientError::from(ClientError::Backend("Error decoding cache meta".into())))?; + let finalized: Option> = Decode::decode(input).map_err(|_| { + ClientError::from(ClientError::Backend("Error decoding cache meta".into())) + })?; + let unfinalized: Vec> = Decode::decode(input).map_err(|_| { + ClientError::from(ClientError::Backend("Error decoding cache meta".into())) + })?; Ok(Metadata { finalized, unfinalized }) } @@ -254,8 +292,8 @@ mod meta { #[cfg(test)] pub mod tests { - use std::collections::{HashMap, HashSet}; use super::*; + use std::collections::{HashMap, HashSet}; pub struct FaultyStorage; @@ -272,7 +310,10 @@ pub mod tests { Err(ClientError::Backend("TestError".into())) } - fn read_entry(&self, _at: &ComplexBlockId) -> ClientResult>> { + fn read_entry( + &self, + _at: &ComplexBlockId, + ) -> ClientResult>> { Err(ClientError::Backend("TestError".into())) } } @@ -287,17 +328,18 @@ pub mod tests { impl DummyStorage { pub fn new() -> Self { DummyStorage { - meta: Metadata { - finalized: None, - unfinalized: Vec::new(), - }, + meta: Metadata { finalized: None, unfinalized: Vec::new() }, ids: HashMap::new(), headers: HashMap::new(), entries: HashMap::new(), } } - pub fn with_meta(mut self, finalized: Option>, unfinalized: Vec>) -> Self { + pub fn with_meta( + mut self, + finalized: Option>, + unfinalized: Vec>, + ) -> Self { self.meta.finalized = finalized; self.meta.unfinalized = unfinalized; self @@ -313,7 +355,11 @@ pub mod tests { self } - pub fn with_entry(mut self, at: ComplexBlockId, entry: StorageEntry) -> Self { + pub fn with_entry( + mut self, + at: ComplexBlockId, + entry: StorageEntry, + ) -> Self { self.entries.insert(at.hash, entry); self } @@ -332,7 +378,10 @@ pub mod tests { Ok(self.meta.clone()) } - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> { + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>> { Ok(self.entries.get(&at.hash).cloned()) } } @@ -366,7 +415,11 @@ pub mod tests { } impl StorageTransaction for DummyTransaction { - fn insert_storage_entry(&mut self, at: &ComplexBlockId, _entry: &StorageEntry) { + fn insert_storage_entry( + &mut self, + at: &ComplexBlockId, + _entry: &StorageEntry, + ) { self.inserted_entries.insert(at.hash); } @@ -380,7 +433,9 @@ pub mod tests { unfinalized: &[Fork], operation: &CommitOperation, ) { - self.updated_meta = Some(meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap()); + self.updated_meta = Some( + meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap(), + ); } } } diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs index 5501f0f1864c1..5502896aced2c 100644 --- a/client/db/src/cache/mod.rs +++ b/client/db/src/cache/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,17 +18,27 @@ //! DB-backed cache of blockchain data. -use std::{sync::Arc, collections::{HashMap, hash_map::Entry}}; use parking_lot::RwLock; - -use sc_client_api::blockchain::{well_known_cache_keys::{self, Id as CacheKeyId}, Cache as BlockchainCache}; -use sp_blockchain::{Result as ClientResult, HeaderMetadataCache}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; + +use crate::{ + utils::{self, COLUMN_META}, + DbHash, +}; +use codec::{Decode, Encode}; +use sc_client_api::blockchain::{ + well_known_cache_keys::{self, Id as CacheKeyId}, + Cache as BlockchainCache, +}; +use sp_blockchain::{HeaderMetadataCache, Result as ClientResult}; use sp_database::{Database, Transaction}; -use codec::{Encode, Decode}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; -use crate::utils::{self, COLUMN_META}; -use crate::DbHash; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, +}; use self::list_cache::{ListCache, PruningStrategy}; @@ -118,7 +128,10 @@ impl DbCache { } /// Begin cache transaction. - pub fn transaction<'a>(&'a mut self, tx: &'a mut Transaction) -> DbCacheTransaction<'a, Block> { + pub fn transaction<'a>( + &'a mut self, + tx: &'a mut Transaction, + ) -> DbCacheTransaction<'a, Block> { DbCacheTransaction { cache: self, tx, @@ -164,7 +177,7 @@ impl DbCache { self.key_lookup_column, self.header_column, self.cache_column, - &self.best_finalized_block + &self.best_finalized_block, ) } } @@ -184,19 +197,16 @@ fn get_cache_helper<'a, Block: BlockT>( Entry::Occupied(entry) => Ok(entry.into_mut()), Entry::Vacant(entry) => { let cache = ListCache::new( - self::list_storage::DbStorage::new(name.to_vec(), db.clone(), - self::list_storage::DbColumns { - meta: COLUMN_META, - key_lookup, - header, - cache, - }, + self::list_storage::DbStorage::new( + name.to_vec(), + db.clone(), + self::list_storage::DbColumns { meta: COLUMN_META, key_lookup, header, cache }, ), cache_pruning_strategy(name), best_finalized_block.clone(), )?; Ok(entry.insert(cache)) - } + }, } } @@ -210,10 +220,7 @@ pub struct DbCacheTransactionOps { impl DbCacheTransactionOps { /// Empty transaction ops. pub fn empty() -> DbCacheTransactionOps { - DbCacheTransactionOps { - cache_at_ops: HashMap::new(), - best_finalized_block: None, - } + DbCacheTransactionOps { cache_at_ops: HashMap::new(), best_finalized_block: None } } } @@ -244,19 +251,21 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { ) -> ClientResult { // prepare list of caches that are not update // (we might still need to do some cache maintenance in this case) - let missed_caches = self.cache.cache_at.keys() + let missed_caches = self + .cache + .cache_at + .keys() .filter(|cache| !data_at.contains_key(*cache)) .cloned() .collect::>(); - let mut insert_op = |name: CacheKeyId, value: Option>| -> Result<(), sp_blockchain::Error> { + let mut insert_op = |name: CacheKeyId, + value: Option>| + -> Result<(), sp_blockchain::Error> { let cache = self.cache.get_cache(name)?; let cache_ops = self.cache_at_ops.entry(name).or_default(); cache.on_block_insert( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx, - ), + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), parent.clone(), block.clone(), value, @@ -271,8 +280,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?; match entry_type { - EntryType::Final | EntryType::Genesis => - self.best_finalized_block = Some(block), + EntryType::Final | EntryType::Genesis => self.best_finalized_block = Some(block), EntryType::NonFinal => (), } @@ -288,10 +296,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { for (name, cache) in self.cache.cache_at.iter() { let cache_ops = self.cache_at_ops.entry(*name).or_default(); cache.on_block_finalize( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx - ), + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), parent.clone(), block.clone(), cache_ops, @@ -304,17 +309,11 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { } /// When block is reverted. - pub fn on_block_revert( - mut self, - reverted_block: &ComplexBlockId, - ) -> ClientResult { + pub fn on_block_revert(mut self, reverted_block: &ComplexBlockId) -> ClientResult { for (name, cache) in self.cache.cache_at.iter() { let cache_ops = self.cache_at_ops.entry(*name).or_default(); cache.on_block_revert( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx - ), + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), reverted_block, cache_ops, )?; @@ -352,7 +351,9 @@ impl BlockchainCache for DbCacheSync { &self, key: &CacheKeyId, at: &BlockId, - ) -> ClientResult, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>> { + ) -> ClientResult< + Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>, + > { let mut cache = self.0.write(); let header_metadata_cache = cache.header_metadata_cache.clone(); let cache = cache.get_cache(*key)?; @@ -360,36 +361,39 @@ impl BlockchainCache for DbCacheSync { let db = storage.db(); let columns = storage.columns(); let at = match *at { - BlockId::Hash(hash) => { - match header_metadata_cache.header_metadata(hash) { - Some(metadata) => ComplexBlockId::new(hash, metadata.number), - None => { - let header = utils::require_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Hash(hash.clone()))?; - ComplexBlockId::new(hash, *header.number()) - } - } + BlockId::Hash(hash) => match header_metadata_cache.header_metadata(hash) { + Some(metadata) => ComplexBlockId::new(hash, metadata.number), + None => { + let header = utils::require_header::( + &**db, + columns.key_lookup, + columns.header, + BlockId::Hash(hash.clone()), + )?; + ComplexBlockId::new(hash, *header.number()) + }, }, BlockId::Number(number) => { let hash = utils::require_header::( &**db, columns.key_lookup, columns.header, - BlockId::Number(number.clone()))?.hash(); + BlockId::Number(number.clone()), + )? + .hash(); ComplexBlockId::new(hash, number) }, }; - cache.value_at_block(&at) - .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| + cache.value_at_block(&at).map(|block_and_value| { + block_and_value.map(|(begin_block, end_block, value)| { ( (begin_block.number, begin_block.hash), end_block.map(|end_block| (end_block.number, end_block.hash)), value, - ))) + ) + }) + }) } } diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index a2299a82337a0..3a3c5918535f9 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -1,54 +1,66 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! DB-backed changes tries storage. -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use hash_db::Prefix; +use crate::{ + cache::{ + ComplexBlockId, DbCache, DbCacheSync, DbCacheTransactionOps, EntryType as CacheEntryType, + }, + utils::{self, meta_keys, Meta}, + Database, DbHash, +}; use codec::{Decode, Encode}; +use hash_db::Prefix; use parking_lot::RwLock; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_trie::MemoryDB; use sc_client_api::backend::PrunableStateChangesTrieStorage; -use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache, HeaderMetadataCache}; -use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; -use sp_core::storage::PrefixedStorageKey; +use sp_blockchain::{ + well_known_cache_keys, Cache as BlockchainCache, Error as ClientError, HeaderMetadataCache, + Result as ClientResult, +}; +use sp_core::{ + convert_hash, storage::PrefixedStorageKey, ChangesTrieConfiguration, + ChangesTrieConfigurationRange, +}; use sp_database::Transaction; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, HashFor, NumberFor, One, Zero, CheckedSub, +use sp_runtime::{ + generic::{BlockId, ChangesTrieSignal, DigestItem}, + traits::{Block as BlockT, CheckedSub, HashFor, Header as HeaderT, NumberFor, One, Zero}, }; -use sp_runtime::generic::{BlockId, DigestItem, ChangesTrieSignal}; use sp_state_machine::{ChangesTrieBuildCache, ChangesTrieCacheAction}; -use crate::{Database, DbHash}; -use crate::utils::{self, Meta, meta_keys}; -use crate::cache::{ - DbCacheSync, DbCache, DbCacheTransactionOps, - ComplexBlockId, EntryType as CacheEntryType, +use sp_trie::MemoryDB; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; /// Extract new changes trie configuration (if available) from the header. -pub fn extract_new_configuration(header: &Header) -> Option<&Option> { - header.digest() +pub fn extract_new_configuration( + header: &Header, +) -> Option<&Option> { + header + .digest() .log(DigestItem::as_changes_trie_signal) .and_then(ChangesTrieSignal::as_new_configuration) } -/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is currently -/// guaranteed because import lock is held during block import/finalization. +/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is +/// currently guaranteed because import lock is held during block import/finalization. pub struct DbChangesTrieStorageTransaction { /// Cache operations that must be performed after db transaction is committed. cache_ops: DbCacheTransactionOps, @@ -66,10 +78,7 @@ impl DbChangesTrieStorageTransaction { impl From> for DbChangesTrieStorageTransaction { fn from(cache_ops: DbCacheTransactionOps) -> Self { - DbChangesTrieStorageTransaction { - cache_ops, - new_config: None, - } + DbChangesTrieStorageTransaction { cache_ops, new_config: None } } } @@ -101,12 +110,13 @@ struct ChangesTriesMeta { /// The range is inclusive from both sides. /// Is None only if: /// 1) we haven't yet finalized any blocks (except genesis) - /// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are disabled - /// 3) changes tries pruning is disabled + /// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are + /// disabled 3) changes tries pruning is disabled pub oldest_digest_range: Option<(NumberFor, NumberFor)>, /// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. /// It is guaranteed that we have no any changes tries before (and including) this block. - /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). + /// It is guaranteed that all existing changes tries after this block are not yet pruned (if + /// created). pub oldest_pruned_digest_range_end: NumberFor, } @@ -171,21 +181,25 @@ impl DbChangesTrieStorage { let new_configuration = match new_configuration { Some(new_configuration) => new_configuration, None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), - None => return self.finalize( - tx, - parent_block.hash, - block.hash, - block.number, - Some(new_header), - cache_tx, - ), + None => + return self.finalize( + tx, + parent_block.hash, + block.hash, + block.number, + Some(new_header), + cache_tx, + ), }; // update configuration cache let mut cache_at = HashMap::new(); cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); Ok(DbChangesTrieStorageTransaction::from(match cache_tx { - Some(cache_tx) => self.cache.0.write() + Some(cache_tx) => self + .cache + .0 + .write() .transaction_with_ops(tx, cache_tx.cache_ops) .on_block_insert( parent_block, @@ -194,7 +208,10 @@ impl DbChangesTrieStorage { if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, )? .into_ops(), - None => self.cache.0.write() + None => self + .cache + .0 + .write() .transaction(tx) .on_block_insert( parent_block, @@ -203,7 +220,8 @@ impl DbChangesTrieStorage { if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, )? .into_ops(), - }).with_new_config(Some(new_configuration))) + }) + .with_new_config(Some(new_configuration))) } /// Called when block is finalized. @@ -224,7 +242,7 @@ impl DbChangesTrieStorage { if cache_tx.is_some() { if let Some(new_header) = new_header { if new_header.hash() == block_hash { - return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")); + return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")) } } } @@ -235,22 +253,21 @@ impl DbChangesTrieStorage { let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); Ok(match cache_tx { Some(cache_tx) => DbChangesTrieStorageTransaction::from( - self.cache.0.write() + self.cache + .0 + .write() .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_finalize( - parent_block, - block, - )? - .into_ops() - ).with_new_config(cache_tx.new_config), + .on_block_finalize(parent_block, block)? + .into_ops(), + ) + .with_new_config(cache_tx.new_config), None => DbChangesTrieStorageTransaction::from( - self.cache.0.write() + self.cache + .0 + .write() .transaction(tx) - .on_block_finalize( - parent_block, - block, - )? - .into_ops() + .on_block_finalize(parent_block, block)? + .into_ops(), ), }) } @@ -261,23 +278,24 @@ impl DbChangesTrieStorage { tx: &mut Transaction, block: &ComplexBlockId, ) -> ClientResult> { - Ok(self.cache.0.write().transaction(tx) - .on_block_revert(block)? - .into_ops() - .into()) + Ok(self.cache.0.write().transaction(tx).on_block_revert(block)?.into_ops().into()) } /// When transaction has been committed. pub fn post_commit(&self, tx: Option>) { if let Some(tx) = tx { - self.cache.0.write().commit(tx.cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there is tx; qed"); + self.cache.0.write().commit(tx.cache_ops).expect( + "only fails if cache with given name isn't loaded yet; cache is already loaded \ + because there is tx; qed", + ); } } /// Commit changes into changes trie build cache. - pub fn commit_build_cache(&self, cache_update: ChangesTrieCacheAction>) { + pub fn commit_build_cache( + &self, + cache_update: ChangesTrieCacheAction>, + ) { self.build_cache.write().perform(cache_update); } @@ -305,7 +323,7 @@ impl DbChangesTrieStorage { // 2) or we are (or were) in period where changes tries are disabled if let Some((begin, end)) = tries_meta.oldest_digest_range { if block_num <= end || block_num - end <= min_blocks_to_keep.into() { - break; + break } tries_meta.oldest_pruned_digest_range_end = end; @@ -331,7 +349,8 @@ impl DbChangesTrieStorage { self.key_lookup_column, self.header_column, BlockId::Number(next_digest_range_start), - )?.hash(), + )? + .hash(), }; let config_for_new_block = new_header @@ -339,22 +358,24 @@ impl DbChangesTrieStorage { .unwrap_or(false); let next_config = match cache_tx { Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { - let config = cache_tx - .new_config - .clone() - .expect("guarded by is_some(); qed"); - ChangesTrieConfigurationRange { + let config = cache_tx.new_config.clone().expect("guarded by is_some(); qed"); + Ok(ChangesTrieConfigurationRange { zero: (block_num, block_hash), end: None, config, - } - }, - _ if config_for_new_block => { - self.configuration_at(&BlockId::Hash(*new_header.expect( - "config_for_new_block is only true when new_header is passed; qed" - ).parent_hash()))? + }) }, - _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash))?, + _ if config_for_new_block => self.configuration_at(&BlockId::Hash( + *new_header + .expect("config_for_new_block is only true when new_header is passed; qed") + .parent_hash(), + )), + _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash)), + }; + let next_config = match next_config { + Ok(next_config) => next_config, + Err(ClientError::UnknownBlock(_)) => break, // No block means nothing to prune. + Err(e) => return Err(e), }; if let Some(config) = next_config.config { let mut oldest_digest_range = config @@ -368,11 +389,11 @@ impl DbChangesTrieStorage { } tries_meta.oldest_digest_range = Some(oldest_digest_range); - continue; + continue } tries_meta.oldest_digest_range = None; - break; + break } write_tries_meta(tx, self.meta_column, &*tries_meta); @@ -381,17 +402,23 @@ impl DbChangesTrieStorage { } impl PrunableStateChangesTrieStorage for DbChangesTrieStorage { - fn storage(&self) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { + fn storage( + &self, + ) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { self } - fn configuration_at(&self, at: &BlockId) -> ClientResult< - ChangesTrieConfigurationRange, Block::Hash> - > { + fn configuration_at( + &self, + at: &BlockId, + ) -> ClientResult, Block::Hash>> { self.cache .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? - .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]).ok() - .map(|config| ChangesTrieConfigurationRange { zero, end, config })) + .and_then(|(zero, end, encoded)| { + Decode::decode(&mut &encoded[..]) + .ok() + .map(|config| ChangesTrieConfigurationRange { zero, end, config }) + }) .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) } @@ -407,14 +434,21 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu &self, hash: Block::Hash, ) -> Result>, String> { - utils::read_header::(&*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(hash)) - .map_err(|e| e.to_string()) - .and_then(|maybe_header| maybe_header.map(|header| - sp_state_machine::ChangesTrieAnchorBlockId { + utils::read_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Hash(hash), + ) + .map_err(|e| e.to_string()) + .and_then(|maybe_header| { + maybe_header + .map(|header| sp_state_machine::ChangesTrieAnchorBlockId { hash, number: *header.number(), - } - ).ok_or_else(|| format!("Unknown header: {}", hash))) + }) + .ok_or_else(|| format!("Unknown header: {}", hash)) + }) } fn root( @@ -424,7 +458,10 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu ) -> Result, String> { // check API requirement: we can't get NEXT block(s) based on anchor if block > anchor.number { - return Err(format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number)); + return Err(format!( + "Can't get changes trie root at {} using anchor at {}", + block, anchor.number + )) } // we need to get hash of the block to resolve changes trie root @@ -436,8 +473,12 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu let mut current_num = anchor.number; let mut current_hash: Block::Hash = convert_hash(&anchor.hash); let maybe_anchor_header: Block::Header = utils::require_header::( - &*self.db, self.key_lookup_column, self.header_column, BlockId::Number(current_num) - ).map_err(|e| e.to_string())?; + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Number(current_num), + ) + .map_err(|e| e.to_string())?; if maybe_anchor_header.hash() == current_hash { // if anchor is canonicalized, then the block is also canonicalized BlockId::Number(block) @@ -447,8 +488,12 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu // back from the anchor to the block with given number while current_num != block { let current_header: Block::Header = utils::require_header::( - &*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(current_hash) - ).map_err(|e| e.to_string())?; + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Hash(current_hash), + ) + .map_err(|e| e.to_string())?; current_hash = *current_header.parent_hash(); current_num = current_num - One::one(); @@ -458,18 +503,16 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu } }; - Ok( - utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - block_id, - ) - .map_err(|e| e.to_string())? - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned() + Ok(utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + block_id, ) + .map_err(|e| e.to_string())? + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned()) } } @@ -478,7 +521,9 @@ impl sp_state_machine::ChangesTrieStorage, NumberFor &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { + fn as_roots_storage( + &self, + ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { self } @@ -501,10 +546,9 @@ fn read_tries_meta( meta_column: u32, ) -> ClientResult> { match db.get(meta_column, meta_keys::CHANGES_TRIES_META) { - Some(h) => match Decode::decode(&mut &h[..]) { - Ok(h) => Ok(h), - Err(err) => Err(ClientError::Backend(format!("Error decoding changes tries metadata: {}", err))), - }, + Some(h) => Decode::decode(&mut &h[..]).map_err(|err| { + ClientError::Backend(format!("Error decoding changes tries metadata: {}", err)) + }), None => Ok(ChangesTriesMeta { oldest_digest_range: None, oldest_pruned_digest_range_end: Zero::zero(), @@ -523,18 +567,23 @@ fn write_tries_meta( #[cfg(test)] mod tests { + use super::*; + use crate::{ + tests::{insert_header, prepare_changes, Block}, + Backend, + }; use hash_db::EMPTY_PREFIX; use sc_client_api::backend::{ - Backend as ClientBackend, NewBlockState, BlockImportOperation, PrunableStateChangesTrieStorage, + Backend as ClientBackend, BlockImportOperation, NewBlockState, + PrunableStateChangesTrieStorage, }; use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; use sp_core::H256; - use sp_runtime::testing::{Digest, Header}; - use sp_runtime::traits::{Hash, BlakeTwo256}; + use sp_runtime::{ + testing::{Digest, Header}, + traits::{BlakeTwo256, Hash}, + }; use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; - use crate::Backend; - use crate::tests::{Block, insert_header, prepare_changes}; - use super::*; fn changes(number: u64) -> Option, Vec)>> { Some(vec![(number.to_le_bytes().to_vec(), number.to_le_bytes().to_vec())]) @@ -554,7 +603,9 @@ mod tests { digest.push(DigestItem::ChangesTrieRoot(root)); changes_trie_update = update; } - digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_configuration))); + digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( + new_configuration, + ))); let header = Header { number, @@ -572,8 +623,9 @@ mod tests { }; let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); + op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); + op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) + .unwrap(); backend.commit_operation(op).unwrap(); header_hash @@ -584,11 +636,13 @@ mod tests { let backend = Backend::::new_test(1000, 100); backend.changes_tries_storage.meta.write().finalized_number = 1000; - let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { + let check_changes = |backend: &Backend, + block: u64, + changes: Vec<(Vec, Vec)>| { let (changes_root, mut changes_trie_update) = prepare_changes(changes); let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), - number: block + number: block, }; assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); @@ -605,7 +659,13 @@ mod tests { ]; let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); + let block0 = insert_header( + &backend, + 0, + Default::default(), + Some(changes0.clone()), + Default::default(), + ); let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); let _ = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); @@ -622,19 +682,29 @@ mod tests { let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); + let block0 = insert_header( + &backend, + 0, + Default::default(), + Some(changes0.clone()), + Default::default(), + ); let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); let block2 = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); - let block2_1_1 = insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); + let block2_1_0 = + insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); + let block2_1_1 = + insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); - let block2_2_1 = insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); + let block2_2_0 = + insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); + let block2_2_1 = + insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); // finalize block1 backend.changes_tries_storage.meta.write().finalized_number = 1; @@ -680,7 +750,12 @@ mod tests { if number == 0 { Default::default() } else { - backend.blockchain().header(BlockId::Number(number - 1)).unwrap().unwrap().hash() + backend + .blockchain() + .header(BlockId::Number(number - 1)) + .unwrap() + .unwrap() + .hash() } }; @@ -698,12 +773,14 @@ mod tests { let trie_root = backend .blockchain() .header(BlockId::Number(number)) - .unwrap().unwrap() + .unwrap() + .unwrap() .digest() .log(DigestItem::as_changes_trie_root) .cloned(); match trie_root { - Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), + Some(trie_root) => + backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), None => true, } }; @@ -711,14 +788,10 @@ mod tests { let finalize_block = |number| { let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap(); let mut tx = Transaction::new(); - let cache_ops = backend.changes_tries_storage.finalize( - &mut tx, - *header.parent_hash(), - header.hash(), - number, - None, - None, - ).unwrap(); + let cache_ops = backend + .changes_tries_storage + .finalize(&mut tx, *header.parent_hash(), header.hash(), number, None, None) + .unwrap(); backend.storage.db.commit(tx).unwrap(); backend.changes_tries_storage.post_commit(Some(cache_ops)); }; @@ -737,11 +810,23 @@ mod tests { (0..6).for_each(|number| insert_regular_header(false, number)); insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6); (7..17).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change(&backend, 17, parent_hash(17), changes(17), config_at_17); + insert_header_with_configuration_change( + &backend, + 17, + parent_hash(17), + changes(17), + config_at_17, + ); (18..21).for_each(|number| insert_regular_header(false, number)); insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21); (22..32).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change(&backend, 32, parent_hash(32), changes(32), config_at_32); + insert_header_with_configuration_change( + &backend, + 32, + parent_hash(32), + changes(32), + config_at_32, + ); (33..50).for_each(|number| insert_regular_header(true, number)); // when only genesis is finalized, nothing is pruned @@ -826,29 +911,24 @@ mod tests { let backend = Backend::::new_test(1000, 100); // configurations at blocks - let config_at_1 = Some(ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - }); - let config_at_3 = Some(ChangesTrieConfiguration { - digest_interval: 8, - digest_levels: 1, - }); + let config_at_1 = Some(ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2 }); + let config_at_3 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 }); let config_at_5 = None; - let config_at_7 = Some(ChangesTrieConfiguration { - digest_interval: 8, - digest_levels: 1, - }); + let config_at_7 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 }); // insert some blocks let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); + let block1 = + insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); let block2 = insert_header(&backend, 2, block1, None, Default::default()); - let block3 = insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); + let block3 = + insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); let block4 = insert_header(&backend, 4, block3, None, Default::default()); - let block5 = insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); + let block5 = + insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); let block6 = insert_header(&backend, 6, block5, None, Default::default()); - let block7 = insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); + let block7 = + insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); // test configuration cache let storage = &backend.changes_tries_storage; @@ -887,17 +967,48 @@ mod tests { let mut backend = Backend::::new_test(10, 10); backend.changes_tries_storage.min_blocks_to_keep = Some(8); - let configs = (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); + let configs = + (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); // insert unfinalized headers - let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, configs[0].clone()); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(1), configs[1].clone()); - let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(2), configs[2].clone()); + let block0 = insert_header_with_configuration_change( + &backend, + 0, + Default::default(), + None, + configs[0].clone(), + ); + let block1 = insert_header_with_configuration_change( + &backend, + 1, + block0, + changes(1), + configs[1].clone(), + ); + let block2 = insert_header_with_configuration_change( + &backend, + 2, + block1, + changes(2), + configs[2].clone(), + ); let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2)); let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3)); - let block2_1 = insert_header_with_configuration_change(&backend, 2, block1, changes(8), side_config2_1.clone()); - let _ = insert_header_with_configuration_change(&backend, 3, block2_1, changes(9), side_config2_2.clone()); + let block2_1 = insert_header_with_configuration_change( + &backend, + 2, + block1, + changes(8), + side_config2_1.clone(), + ); + let _ = insert_header_with_configuration_change( + &backend, + 3, + block2_1, + changes(9), + side_config2_2.clone(), + ); // insert finalized header => 4 headers are finalized at once let header3 = Header { @@ -905,9 +1016,9 @@ mod tests { parent_hash: block2, state_root: Default::default(), digest: Digest { - logs: vec![ - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[3].clone())), - ], + logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( + configs[3].clone(), + ))], }, extrinsics_root: Default::default(), }; @@ -916,13 +1027,31 @@ mod tests { backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); op.mark_finalized(BlockId::Hash(block1), None).unwrap(); op.mark_finalized(BlockId::Hash(block2), None).unwrap(); - op.set_block_data(header3, None, None, NewBlockState::Final).unwrap(); + op.set_block_data(header3, None, None, None, NewBlockState::Final).unwrap(); backend.commit_operation(op).unwrap(); // insert more unfinalized headers - let block4 = insert_header_with_configuration_change(&backend, 4, block3, changes(4), configs[4].clone()); - let block5 = insert_header_with_configuration_change(&backend, 5, block4, changes(5), configs[5].clone()); - let block6 = insert_header_with_configuration_change(&backend, 6, block5, changes(6), configs[6].clone()); + let block4 = insert_header_with_configuration_change( + &backend, + 4, + block3, + changes(4), + configs[4].clone(), + ); + let block5 = insert_header_with_configuration_change( + &backend, + 5, + block4, + changes(5), + configs[5].clone(), + ); + let block6 = insert_header_with_configuration_change( + &backend, + 6, + block5, + changes(6), + configs[6].clone(), + ); // insert finalized header => 4 headers are finalized at once let header7 = Header { @@ -930,9 +1059,9 @@ mod tests { parent_hash: block6, state_root: Default::default(), digest: Digest { - logs: vec![ - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[7].clone())), - ], + logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( + configs[7].clone(), + ))], }, extrinsics_root: Default::default(), }; @@ -941,7 +1070,7 @@ mod tests { op.mark_finalized(BlockId::Hash(block4), None).unwrap(); op.mark_finalized(BlockId::Hash(block5), None).unwrap(); op.mark_finalized(BlockId::Hash(block6), None).unwrap(); - op.set_block_data(header7, None, None, NewBlockState::Final).unwrap(); + op.set_block_data(header7, None, None, None, NewBlockState::Final).unwrap(); backend.commit_operation(op).unwrap(); } @@ -950,22 +1079,33 @@ mod tests { let backend = Backend::::new_test(10, 10); let config0 = Some(ChangesTrieConfiguration::new(2, 5)); - let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); + let block0 = + insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); let config1 = Some(ChangesTrieConfiguration::new(2, 6)); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); - backend.finalize_block(BlockId::Number(1), Some(vec![42])).unwrap(); + let block1 = + insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); + let just1 = Some((*b"TEST", vec![42])); + backend.finalize_block(BlockId::Number(1), just1).unwrap(); let config2 = Some(ChangesTrieConfiguration::new(2, 7)); - let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); + let block2 = + insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); - let _ = insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); + let _ = + insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); let config2_2 = Some(ChangesTrieConfiguration::new(2, 9)); - let block2_2 = insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); + let block2_2 = + insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); let config2_3 = Some(ChangesTrieConfiguration::new(2, 10)); - let _ = insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); + let _ = + insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); // before truncate there are 2 unfinalized forks - block2_1+block2_3 assert_eq!( - backend.changes_tries_storage.cache.0.write() + backend + .changes_tries_storage + .cache + .0 + .write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -978,7 +1118,11 @@ mod tests { // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 backend.revert(1, false).unwrap(); assert_eq!( - backend.changes_tries_storage.cache.0.write() + backend + .changes_tries_storage + .cache + .0 + .write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -988,11 +1132,15 @@ mod tests { vec![3, 3], ); - // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl specifics), - // the 1st one points to the block #3 because it isn't truncated + // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl + // specifics), the 1st one points to the block #3 because it isn't truncated backend.revert(1, false).unwrap(); assert_eq!( - backend.changes_tries_storage.cache.0.write() + backend + .changes_tries_storage + .cache + .0 + .write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -1004,15 +1152,17 @@ mod tests { // after truncating block2 - there are no unfinalized forks backend.revert(1, false).unwrap(); - assert!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>() - .is_empty(), - ); + assert!(backend + .changes_tries_storage + .cache + .0 + .write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>() + .is_empty(),); } } diff --git a/client/db/src/children.rs b/client/db/src/children.rs index bfba797cd467b..c11e4204997d1 100644 --- a/client/db/src/children.rs +++ b/client/db/src/children.rs @@ -1,32 +1,39 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Functionality for reading and storing children hashes from db. -use codec::{Encode, Decode}; +use crate::DbHash; +use codec::{Decode, Encode}; use sp_blockchain; -use std::hash::Hash; use sp_database::{Database, Transaction}; -use crate::DbHash; +use std::hash::Hash; /// Returns the hashes of the children blocks of the block with `parent_hash`. pub fn read_children< K: Eq + Hash + Clone + Encode + Decode, V: Eq + Hash + Clone + Encode + Decode, ->(db: &dyn Database, column: u32, prefix: &[u8], parent_hash: K) -> sp_blockchain::Result> { +>( + db: &dyn Database, + column: u32, + prefix: &[u8], + parent_hash: K, +) -> sp_blockchain::Result> { let mut buf = prefix.to_vec(); parent_hash.using_encoded(|s| buf.extend(s)); @@ -63,9 +70,7 @@ pub fn write_children< } /// Prepare transaction to remove the children of `parent_hash`. -pub fn remove_children< - K: Eq + Hash + Clone + Encode + Decode, ->( +pub fn remove_children( tx: &mut Transaction, column: u32, prefix: &[u8], @@ -76,7 +81,6 @@ pub fn remove_children< tx.remove(column, &key); } - #[cfg(test)] mod tests { use super::*; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 8196a750557a8..66adb64c0109e 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -34,77 +34,98 @@ pub mod offchain; #[cfg(any(feature = "with-kvdb-rocksdb", test))] pub mod bench; -mod children; mod cache; mod changes_tries_storage; +mod children; +#[cfg(feature = "with-parity-db")] +mod parity_db; +mod stats; mod storage_cache; #[cfg(any(feature = "with-kvdb-rocksdb", test))] mod upgrade; mod utils; -mod stats; -#[cfg(feature = "with-parity-db")] -mod parity_db; -use std::sync::Arc; -use std::path::{Path, PathBuf}; -use std::io; -use std::collections::{HashMap, HashSet}; +use linked_hash_map::LinkedHashMap; +use log::{debug, trace, warn}; +use parking_lot::{Mutex, RwLock}; +use std::{ + collections::{HashMap, HashSet}, + io, + path::{Path, PathBuf}, + sync::Arc, +}; +use crate::{ + changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}, + stats::StateUsageStats, + storage_cache::{new_shared_cache, CachingState, SharedCache, SyncingCachingState}, + utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}, +}; +use codec::{Decode, Encode}; +use hash_db::Prefix; use sc_client_api::{ - UsageInfo, MemoryInfo, IoInfo, MemorySize, - backend::{NewBlockState, PrunableStateChangesTrieStorage, ProvideChtRoots}, - leaves::{LeafSet, FinalizationDisplaced}, cht, + backend::{NewBlockState, ProvideChtRoots, PrunableStateChangesTrieStorage}, + cht, + leaves::{FinalizationDisplaced, LeafSet}, + utils::is_descendent_of, + IoInfo, MemoryInfo, MemorySize, UsageInfo, }; +use sc_state_db::StateDb; +use sp_arithmetic::traits::Saturating; use sp_blockchain::{ - Result as ClientResult, Error as ClientError, - well_known_cache_keys, HeaderBackend, + well_known_cache_keys, Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend, + HeaderMetadata, HeaderMetadataCache, Result as ClientResult, +}; +use sp_core::{ + offchain::OffchainOverlayedChange, + storage::{well_known_keys, ChildInfo}, + ChangesTrieConfiguration, }; -use codec::{Decode, Encode}; -use hash_db::Prefix; -use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use sp_database::Transaction; -use parking_lot::RwLock; -use sp_core::ChangesTrieConfiguration; -use sp_core::offchain::storage::{OffchainOverlayedChange, OffchainOverlayedChanges}; -use sp_core::storage::{well_known_keys, ChildInfo}; -use sp_arithmetic::traits::Saturating; -use sp_runtime::{generic::{DigestItem, BlockId}, Justification, Storage}; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, +use sp_runtime::{ + generic::{BlockId, DigestItem}, + traits::{ + Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, + Zero, + }, + Justification, Justifications, Storage, }; use sp_state_machine::{ - DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, - StorageCollection, ChildStorageCollection, - backend::Backend as StateBackend, StateMachineStats, + backend::Backend as StateBackend, ChangesTrieCacheAction, ChangesTrieTransaction, + ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats, + StorageCollection, UsageInfo as StateUsageInfo, }; -use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; -use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; -use sc_state_db::StateDb; -use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; -use crate::storage_cache::{CachingState, SyncingCachingState, SharedCache, new_shared_cache}; -use crate::stats::StateUsageStats; -use log::{trace, debug, warn}; +use sp_trie::{prefixed_key, MemoryDB, PrefixedMemoryDB}; // Re-export the Database trait so that one can pass an implementation of it. -pub use sp_database::Database; pub use sc_state_db::PruningMode; +pub use sp_database::Database; #[cfg(any(feature = "with-kvdb-rocksdb", test))] pub use bench::BenchmarkingState; const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; +const CACHE_HEADERS: usize = 8; /// Default value for storage cache child ratio. const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. -pub type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor ->; +pub type DbState = + sp_state_machine::TrieBackend>>, HashFor>; const DB_HASH_LEN: usize = 32; /// Hash type that this backend uses for the database. -pub type DbHash = [u8; DB_HASH_LEN]; +pub type DbHash = sp_core::H256; + +/// This is used as block body when storage-chain mode is enabled. +#[derive(Debug, Encode, Decode)] +struct ExtrinsicHeader { + /// Hash of the indexed part + indexed_hash: DbHash, // Zero hash if there's no indexed data + /// The rest of the data. + data: Vec, +} /// A reference tracking state. /// @@ -118,11 +139,7 @@ pub struct RefTrackingState { impl RefTrackingState { fn new(state: DbState, storage: Arc>, parent_hash: Option) -> Self { - RefTrackingState { - state, - parent_hash, - storage, - } + RefTrackingState { state, parent_hash, storage } } } @@ -141,7 +158,7 @@ impl std::fmt::Debug for RefTrackingState { } impl StateBackend> for RefTrackingState { - type Error = as StateBackend>>::Error; + type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; @@ -193,12 +210,25 @@ impl StateBackend> for RefTrackingState { self.state.for_key_values_with_prefix(prefix, f) } - fn for_keys_in_child_storage( + fn apply_to_key_values_while, Vec) -> bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.state + .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.state.for_keys_in_child_storage(child_info, f) + self.state.apply_to_keys_while(child_info, prefix, f) } fn for_child_keys_with_prefix( @@ -212,16 +242,22 @@ impl StateBackend> for RefTrackingState { fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { self.state.storage_root(delta) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { self.state.child_storage_root(child_info, delta) } @@ -233,21 +269,17 @@ impl StateBackend> for RefTrackingState { self.state.keys(prefix) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { self.state.child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { + fn as_trie_backend( + &self, + ) -> Option<&sp_state_machine::TrieBackend>> { self.state.as_trie_backend() } - fn register_overlay_stats(&mut self, stats: &StateMachineStats) { + fn register_overlay_stats(&self, stats: &StateMachineStats) { self.state.register_overlay_stats(stats); } @@ -262,15 +294,48 @@ pub struct DatabaseSettings { pub state_cache_size: usize, /// Ratio of cache size dedicated to child tries. pub state_cache_child_ratio: Option<(usize, usize)>, - /// Pruning mode. - pub pruning: PruningMode, + /// State pruning mode. + pub state_pruning: PruningMode, /// Where to find the database. - pub source: DatabaseSettingsSrc, + pub source: DatabaseSource, + /// Block pruning mode. + pub keep_blocks: KeepBlocks, + /// Block body/Transaction storage scheme. + pub transaction_storage: TransactionStorageMode, +} + +/// Block pruning settings. +#[derive(Debug, Clone, Copy)] +pub enum KeepBlocks { + /// Keep full block history. + All, + /// Keep N recent finalized blocks. + Some(u32), +} + +/// Block body storage scheme. +#[derive(Debug, Clone, Copy)] +pub enum TransactionStorageMode { + /// Store block body as an encoded list of full transactions in the BODY column + BlockBody, + /// Store a list of hashes in the BODY column and each transaction individually + /// in the TRANSACTION column. + StorageChain, } /// Where to find the database.. #[derive(Debug, Clone)] -pub enum DatabaseSettingsSrc { +pub enum DatabaseSource { + /// Check given path, and see if there is an existing database there. If it's either `RocksDb` + /// or `ParityDb`, use it. If there is none, create a new instance of `ParityDb`. + Auto { + /// Path to the paritydb database. + paritydb_path: PathBuf, + /// Path to the rocksdb database. + rocksdb_path: PathBuf, + /// Cache size in MiB. Used only by `RocksDb` variant of `DatabaseSource`. + cache_size: usize, + }, /// Load a RocksDB database from a given path. Recommended for most uses. RocksDb { /// Path to the database. @@ -289,30 +354,44 @@ pub enum DatabaseSettingsSrc { Custom(Arc>), } -impl DatabaseSettingsSrc { - /// Return dabase path for databases that are on the disk. +impl DatabaseSource { + /// Return path for databases that are stored on disk. pub fn path(&self) -> Option<&Path> { match self { - DatabaseSettingsSrc::RocksDb { path, .. } => Some(path.as_path()), - DatabaseSettingsSrc::ParityDb { path, .. } => Some(path.as_path()), - DatabaseSettingsSrc::Custom(_) => None, + // as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550 + // + // IIUC this is needed for polkadot to create its own dbs, so until it can use parity db + // I would think rocksdb, but later parity-db. + DatabaseSource::Auto { paritydb_path, .. } => Some(&paritydb_path), + DatabaseSource::RocksDb { path, .. } | DatabaseSource::ParityDb { path } => Some(&path), + DatabaseSource::Custom(..) => None, } } - /// Check if database supports internal ref counting for state data. - pub fn supports_ref_counting(&self) -> bool { + + /// Set path for databases that are stored on disk. + pub fn set_path(&mut self, p: &Path) -> bool { match self { - DatabaseSettingsSrc::ParityDb { .. } => true, - _ => false, + DatabaseSource::Auto { ref mut paritydb_path, .. } => { + *paritydb_path = p.into(); + true + }, + DatabaseSource::RocksDb { ref mut path, .. } | + DatabaseSource::ParityDb { ref mut path } => { + *path = p.into(); + true + }, + DatabaseSource::Custom(..) => false, } } } -impl std::fmt::Display for DatabaseSettingsSrc { +impl std::fmt::Display for DatabaseSource { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let name = match self { - DatabaseSettingsSrc::RocksDb { .. } => "RocksDb", - DatabaseSettingsSrc::ParityDb { .. } => "ParityDb", - DatabaseSettingsSrc::Custom(_) => "Custom", + DatabaseSource::Auto { .. } => "Auto", + DatabaseSource::RocksDb { .. } => "RocksDb", + DatabaseSource::ParityDb { .. } => "ParityDb", + DatabaseSource::Custom(_) => "Custom", }; write!(f, "{}", name) } @@ -326,18 +405,21 @@ pub(crate) mod columns { pub const KEY_LOOKUP: u32 = 3; pub const HEADER: u32 = 4; pub const BODY: u32 = 5; - pub const JUSTIFICATION: u32 = 6; + pub const JUSTIFICATIONS: u32 = 6; pub const CHANGES_TRIE: u32 = 7; pub const AUX: u32 = 8; /// Offchain workers local storage pub const OFFCHAIN: u32 = 9; pub const CACHE: u32 = 10; + /// Transactions + pub const TRANSACTION: u32 = 11; } struct PendingBlock { header: Block::Header, - justification: Option, + justifications: Option, body: Option>, + indexed_body: Option>>, leaf_state: NewBlockState, } @@ -352,16 +434,40 @@ impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { } } +struct MetaUpdate { + pub hash: Block::Hash, + pub number: NumberFor, + pub is_best: bool, + pub is_finalized: bool, + pub with_state: bool, +} + +fn cache_header( + cache: &mut LinkedHashMap>, + hash: Hash, + header: Option

, +) { + cache.insert(hash, header); + while cache.len() > CACHE_HEADERS { + cache.pop_front(); + } +} + /// Block database pub struct BlockchainDb { db: Arc>, meta: Arc, Block::Hash>>>, leaves: RwLock>>, header_metadata_cache: Arc>, + header_cache: Mutex>>, + transaction_storage: TransactionStorageMode, } impl BlockchainDb { - fn new(db: Arc>) -> ClientResult { + fn new( + db: Arc>, + transaction_storage: TransactionStorageMode, + ) -> ClientResult { let meta = read_meta::(&*db, columns::HEADER)?; let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; Ok(BlockchainDb { @@ -369,16 +475,13 @@ impl BlockchainDb { leaves: RwLock::new(leaves), meta: Arc::new(RwLock::new(meta)), header_metadata_cache: Arc::new(HeaderMetadataCache::default()), + header_cache: Default::default(), + transaction_storage, }) } - fn update_meta( - &self, - hash: Block::Hash, - number: ::Number, - is_best: bool, - is_finalized: bool - ) { + fn update_meta(&self, update: MetaUpdate) { + let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update; let mut meta = self.meta.write(); if number.is_zero() { meta.genesis_hash = hash; @@ -391,6 +494,9 @@ impl BlockchainDb { } if is_finalized { + if with_state { + meta.finalized_state = Some((hash.clone(), number)); + } meta.finalized_number = number; meta.finalized_hash = hash; } @@ -398,16 +504,28 @@ impl BlockchainDb { // Get block changes trie root, if available. fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block) - .map(|header| header.and_then(|header| - header.digest().log(DigestItem::as_changes_trie_root) - .cloned())) + self.header(block).map(|header| { + header.and_then(|header| header.digest().log(DigestItem::as_changes_trie_root).cloned()) + }) } } impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { fn header(&self, id: BlockId) -> ClientResult> { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + match &id { + BlockId::Hash(h) => { + let mut cache = self.header_cache.lock(); + if let Some(result) = cache.get_refresh(h) { + return Ok(result.clone()) + } + let header = + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?; + cache_header(&mut cache, h.clone(), header.clone()); + Ok(header) + }, + BlockId::Number(_) => + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id), + } } fn info(&self) -> sc_client_api::blockchain::Info { @@ -418,18 +536,14 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha genesis_hash: meta.genesis_hash, finalized_hash: meta.finalized_hash, finalized_number: meta.finalized_number, + finalized_state: meta.finalized_state.clone(), number_leaves: self.leaves.read().count(), } } fn status(&self, id: BlockId) -> ClientResult { let exists = match id { - BlockId::Hash(_) => read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - id - )?.is_some(), + BlockId::Hash(_) => self.header(id)?.is_some(), BlockId::Number(n) => n <= self.meta.read().best_number, }; match exists { @@ -443,34 +557,81 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha } fn hash(&self, number: NumberFor) -> ClientResult> { - self.header(BlockId::Number(number)).and_then(|maybe_header| match maybe_header { - Some(header) => Ok(Some(header.hash().clone())), - None => Ok(None), - }) + self.header(BlockId::Number(number)) + .and_then(|maybe_header| match maybe_header { + Some(header) => Ok(Some(header.hash().clone())), + None => Ok(None), + }) } } impl sc_client_api::blockchain::Backend for BlockchainDb { fn body(&self, id: BlockId) -> ClientResult>> { - match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { - Some(body) => match Decode::decode(&mut &body[..]) { + let body = match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { + Some(body) => body, + None => return Ok(None), + }; + match self.transaction_storage { + TransactionStorageMode::BlockBody => match Decode::decode(&mut &body[..]) { Ok(body) => Ok(Some(body)), - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body: {}", err) - )), - } - None => Ok(None), + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body: {}", + err + ))), + }, + TransactionStorageMode::StorageChain => { + match Vec::::decode(&mut &body[..]) { + Ok(index) => { + let extrinsics: ClientResult> = index + .into_iter() + .map(|ExtrinsicHeader { indexed_hash, data }| { + let decode_result = if indexed_hash != Default::default() { + match self.db.get(columns::TRANSACTION, indexed_hash.as_ref()) { + Some(t) => { + let mut input = + utils::join_input(data.as_ref(), t.as_ref()); + Block::Extrinsic::decode(&mut input) + }, + None => + return Err(sp_blockchain::Error::Backend(format!( + "Missing indexed transaction {:?}", + indexed_hash + ))), + } + } else { + Block::Extrinsic::decode(&mut data.as_ref()) + }; + decode_result.map_err(|err| { + sp_blockchain::Error::Backend(format!( + "Error decoding extrinsic: {}", + err + )) + }) + }) + .collect(); + Ok(Some(extrinsics?)) + }, + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body list: {}", + err + ))), + } + }, } } - fn justification(&self, id: BlockId) -> ClientResult> { - match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATION, id)? { - Some(justification) => match Decode::decode(&mut &justification[..]) { - Ok(justification) => Ok(Some(justification)), - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding justification: {}", err) - )), - } + fn justifications(&self, id: BlockId) -> ClientResult> { + match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATIONS, id)? { + Some(justifications) => match Decode::decode(&mut &justifications[..]) { + Ok(justifications) => Ok(Some(justifications)), + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding justifications: {}", + err + ))), + }, None => Ok(None), } } @@ -490,6 +651,49 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult> { children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash) } + + fn indexed_transaction(&self, hash: &Block::Hash) -> ClientResult>> { + Ok(self.db.get(columns::TRANSACTION, hash.as_ref())) + } + + fn has_indexed_transaction(&self, hash: &Block::Hash) -> ClientResult { + Ok(self.db.contains(columns::TRANSACTION, hash.as_ref())) + } + + fn block_indexed_body(&self, id: BlockId) -> ClientResult>>> { + match self.transaction_storage { + TransactionStorageMode::BlockBody => Ok(None), + TransactionStorageMode::StorageChain => { + let body = match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { + Some(body) => body, + None => return Ok(None), + }; + match Vec::::decode(&mut &body[..]) { + Ok(index) => { + let mut transactions = Vec::new(); + for ExtrinsicHeader { indexed_hash, .. } in index.into_iter() { + if indexed_hash != Default::default() { + match self.db.get(columns::TRANSACTION, indexed_hash.as_ref()) { + Some(t) => transactions.push(t), + None => + return Err(sp_blockchain::Error::Backend(format!( + "Missing indexed transaction {:?}", + indexed_hash + ))), + } + } + } + Ok(Some(transactions)) + }, + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body list: {}", + err + ))), + } + }, + } + } } impl sc_client_api::blockchain::ProvideCache for BlockchainDb { @@ -501,17 +705,28 @@ impl sc_client_api::blockchain::ProvideCache for Blockchai impl HeaderMetadata for BlockchainDb { type Error = sp_blockchain::Error; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.header_metadata_cache.header_metadata(hash).map_or_else(|| { - self.header(BlockId::hash(hash))?.map(|header| { - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache.insert_header_metadata( - header_metadata.hash, - header_metadata.clone(), - ); - header_metadata - }).ok_or_else(|| ClientError::UnknownBlock(format!("header not found in db: {}", hash))) - }, Ok) + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header_metadata_cache.header_metadata(hash).map_or_else( + || { + self.header(BlockId::hash(hash))? + .map(|header| { + let header_metadata = CachedHeaderMetadata::from(&header); + self.header_metadata_cache + .insert_header_metadata(header_metadata.hash, header_metadata.clone()); + header_metadata + }) + .ok_or_else(|| { + ClientError::UnknownBlock(format!( + "Header was not found in the database: {:?}", + hash + )) + }) + }, + Ok, + ) } fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { @@ -519,6 +734,7 @@ impl HeaderMetadata for BlockchainDb { } fn remove_header_metadata(&self, hash: Block::Hash) { + self.header_cache.lock().remove(&hash); self.header_metadata_cache.remove_header_metadata(hash); } } @@ -544,8 +760,11 @@ impl ProvideChtRoots for BlockchainDb { }); cht::compute_root::, _>( - cht::size(), cht_number, cht_range.map(|num| self.hash(num)) - ).map(Some) + cht::size(), + cht_number, + cht_range.map(|num| self.hash(num)), + ) + .map(Some) } fn changes_trie_cht_root( @@ -571,7 +790,8 @@ impl ProvideChtRoots for BlockchainDb { cht::size(), cht_number, cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), - ).map(Some) + ) + .map(Some) } } @@ -581,7 +801,7 @@ pub struct BlockImportOperation { db_updates: PrefixedMemoryDB>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, - offchain_storage_updates: OffchainOverlayedChanges, + offchain_storage_updates: OffchainChangesCollection, changes_trie_updates: MemoryDB>, changes_trie_build_cache_update: Option>>, changes_trie_config_update: Option>, @@ -590,21 +810,25 @@ pub struct BlockImportOperation { finalized_blocks: Vec<(BlockId, Option)>, set_head: Option>, commit_state: bool, + index_ops: Vec, } impl BlockImportOperation { fn apply_offchain(&mut self, transaction: &mut Transaction) { - for ((prefix, key), value_operation) in self.offchain_storage_updates.drain() { - let key: Vec = prefix - .into_iter() - .chain(sp_core::sp_std::iter::once(b'/')) - .chain(key.into_iter()) - .collect(); + let mut count = 0; + for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) { + count += 1; + let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key); match value_operation { - OffchainOverlayedChange::SetValue(val) => transaction.set_from_vec(columns::OFFCHAIN, &key, val), + OffchainOverlayedChange::SetValue(val) => + transaction.set_from_vec(columns::OFFCHAIN, &key, val), OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key), } } + + if count > 0 { + log::debug!(target: "sc_offchain", "Applied {} offchain indexing changes.", count); + } } fn apply_aux(&mut self, transaction: &mut Transaction) { @@ -615,9 +839,45 @@ impl BlockImportOperation { } } } + + fn apply_new_state(&mut self, storage: Storage) -> ClientResult { + if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { + return Err(sp_blockchain::Error::InvalidState.into()) + } + + let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { + ( + &child_content.child_info, + child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), + ) + }); + + let mut changes_trie_config = None; + let (root, transaction) = self.old_state.full_storage_root( + storage.top.iter().map(|(k, v)| { + if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { + changes_trie_config = Some(Decode::decode(&mut &v[..])); + } + (&k[..], Some(&v[..])) + }), + child_delta, + ); + + let changes_trie_config = match changes_trie_config { + Some(Ok(c)) => Some(c), + Some(Err(_)) => return Err(sp_blockchain::Error::InvalidState.into()), + None => None, + }; + + self.db_updates = transaction; + self.changes_trie_config_update = Some(changes_trie_config); + Ok(root) + } } -impl sc_client_api::backend::BlockImportOperation for BlockImportOperation { +impl sc_client_api::backend::BlockImportOperation + for BlockImportOperation +{ type State = SyncingCachingState, Block>; fn state(&self) -> ClientResult> { @@ -628,19 +888,18 @@ impl sc_client_api::backend::BlockImportOperation for Bloc &mut self, header: Block::Header, body: Option>, - justification: Option, + indexed_body: Option>>, + justifications: Option, leaf_state: NewBlockState, ) -> ClientResult<()> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - if let Some(changes_trie_config_update) = changes_tries_storage::extract_new_configuration(&header) { + if let Some(changes_trie_config_update) = + changes_tries_storage::extract_new_configuration(&header) + { self.changes_trie_config_update = Some(changes_trie_config_update.clone()); } - self.pending_block = Some(PendingBlock { - header, - body, - justification, - leaf_state, - }); + self.pending_block = + Some(PendingBlock { header, body, indexed_body, justifications, leaf_state }); Ok(()) } @@ -653,39 +912,18 @@ impl sc_client_api::backend::BlockImportOperation for Bloc Ok(()) } - fn reset_storage( - &mut self, - storage: Storage, - ) -> ClientResult { - if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); - } - - let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)|( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), - )); - - let mut changes_trie_config: Option = None; - let (root, transaction) = self.old_state.full_storage_root( - storage.top.iter().map(|(k, v)| { - if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { - changes_trie_config = Some( - Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis") - ); - } - (&k[..], Some(&v[..])) - }), - child_delta - ); - - self.db_updates = transaction; - self.changes_trie_config_update = Some(changes_trie_config); + fn reset_storage(&mut self, storage: Storage) -> ClientResult { + let root = self.apply_new_state(storage)?; self.commit_state = true; Ok(root) } + fn set_genesis_state(&mut self, storage: Storage, commit: bool) -> ClientResult { + let root = self.apply_new_state(storage)?; + self.commit_state = commit; + Ok(root) + } + fn update_changes_trie( &mut self, update: ChangesTrieTransaction, NumberFor>, @@ -696,7 +934,8 @@ impl sc_client_api::backend::BlockImportOperation for Bloc } fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where I: IntoIterator, Option>)> + where + I: IntoIterator, Option>)>, { self.aux_ops.append(&mut ops.into_iter().collect()); Ok(()) @@ -714,7 +953,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc fn update_offchain_storage( &mut self, - offchain_update: OffchainOverlayedChanges, + offchain_update: OffchainChangesCollection, ) -> ClientResult<()> { self.offchain_storage_updates = offchain_update; Ok(()) @@ -734,6 +973,11 @@ impl sc_client_api::backend::BlockImportOperation for Bloc self.set_head = Some(block); Ok(()) } + + fn update_transaction_index(&mut self, index_ops: Vec) -> ClientResult<()> { + self.index_ops = index_ops; + Ok(()) + } } struct StorageDb { @@ -763,18 +1007,36 @@ impl sc_state_db::NodeDb for StorageDb { } } -struct DbGenesisStorage(pub Block::Hash); +struct DbGenesisStorage { + root: Block::Hash, + storage: PrefixedMemoryDB>, +} impl DbGenesisStorage { + pub fn new(root: Block::Hash, storage: PrefixedMemoryDB>) -> Self { + DbGenesisStorage { root, storage } + } +} + +impl sp_state_machine::Storage> for DbGenesisStorage { + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + use hash_db::HashDB; + Ok(self.storage.get(key, prefix)) + } +} + +struct EmptyStorage(pub Block::Hash); + +impl EmptyStorage { pub fn new() -> Self { let mut root = Block::Hash::default(); let mut mdb = MemoryDB::>::default(); sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); - DbGenesisStorage(root) + EmptyStorage(root) } } -impl sp_state_machine::Storage> for DbGenesisStorage { +impl sp_state_machine::Storage> for EmptyStorage { fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { Ok(None) } @@ -800,13 +1062,13 @@ pub(crate) struct FrozenForDuration { impl FrozenForDuration { fn new(duration: std::time::Duration) -> Self { - Self { - duration, - value: Frozen { at: std::time::Instant::now(), value: None }.into(), - } + Self { duration, value: Frozen { at: std::time::Instant::now(), value: None }.into() } } - fn take_or_else(&self, f: F) -> T where F: FnOnce() -> T { + fn take_or_else(&self, f: F) -> T + where + F: FnOnce() -> T, + { let mut lock = self.value.lock(); if lock.at.elapsed() > self.duration || lock.value.is_none() { let new_value = f(); @@ -821,8 +1083,8 @@ impl FrozenForDuration { /// Disk backend. /// -/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all blocks. -/// Otherwise, trie nodes are kept only from some recent blocks. +/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all +/// blocks. Otherwise, trie nodes are kept only from some recent blocks. pub struct Backend { storage: Arc>, offchain_storage: offchain::LocalStorage, @@ -832,8 +1094,11 @@ pub struct Backend { shared_cache: SharedCache, import_lock: Arc>, is_archive: bool, + keep_blocks: KeepBlocks, + transaction_storage: TransactionStorageMode, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, + genesis_state: RwLock>>>, } impl Backend { @@ -848,13 +1113,29 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { + Self::new_test_with_tx_storage( + keep_blocks, + canonicalization_delay, + TransactionStorageMode::BlockBody, + ) + } + + /// Create new memory-backed client backend for tests. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test_with_tx_storage( + keep_blocks: u32, + canonicalization_delay: u64, + transaction_storage: TransactionStorageMode, + ) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); let db_setting = DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), - pruning: PruningMode::keep_blocks(keep_blocks), - source: DatabaseSettingsSrc::Custom(db), + state_pruning: PruningMode::keep_blocks(keep_blocks), + source: DatabaseSource::Custom(db), + keep_blocks: KeepBlocks::Some(keep_blocks), + transaction_storage, }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -865,22 +1146,18 @@ impl Backend { canonicalization_delay: u64, config: &DatabaseSettings, ) -> ClientResult { - let is_archive_pruning = config.pruning.is_archive(); - let blockchain = BlockchainDb::new(db.clone())?; + let is_archive_pruning = config.state_pruning.is_archive(); + let blockchain = BlockchainDb::new(db.clone(), config.transaction_storage.clone())?; let meta = blockchain.meta.clone(); - let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from( - format!("State database error: {:?}", e) - ); + let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e); let state_db: StateDb<_, _> = StateDb::new( - config.pruning.clone(), - !config.source.supports_ref_counting(), + config.state_pruning.clone(), + !db.supports_ref_counting(), &StateMetaDb(&*db), - ).map_err(map_e)?; - let storage_db = StorageDb { - db: db.clone(), - state_db, - prefix_keys: !config.source.supports_ref_counting(), - }; + ) + .map_err(map_e)?; + let storage_db = + StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() }; let offchain_storage = offchain::LocalStorage::new(db.clone()); let changes_tries_storage = DbChangesTrieStorage::new( db, @@ -891,14 +1168,10 @@ impl Backend { columns::HEADER, columns::CACHE, meta, - if is_archive_pruning { - None - } else { - Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) - }, + if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, )?; - Ok(Backend { + let backend = Backend { storage: Arc::new(storage_db), offchain_storage, changes_tries_storage, @@ -912,7 +1185,29 @@ impl Backend { is_archive: is_archive_pruning, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), state_usage: Arc::new(StateUsageStats::new()), - }) + keep_blocks: config.keep_blocks.clone(), + transaction_storage: config.transaction_storage.clone(), + genesis_state: RwLock::new(None), + }; + + // Older DB versions have no last state key. Check if the state is available and set it. + let info = backend.blockchain.info(); + if info.finalized_state.is_none() && + info.finalized_hash != Default::default() && + sc_client_api::Backend::have_state_at( + &backend, + &info.finalized_hash, + info.finalized_number, + ) { + backend.blockchain.update_meta(MetaUpdate { + hash: info.finalized_hash, + number: info.finalized_number, + is_best: info.finalized_hash == info.best_hash, + is_finalized: true, + with_state: true, + }); + } + Ok(backend) } /// Handle setting head within a transaction. `route_to` should be the last @@ -931,15 +1226,23 @@ impl Backend { let mut enacted = Vec::default(); let mut retracted = Vec::default(); + let (best_number, best_hash) = best_to; + let meta = self.blockchain.meta.read(); - // cannot find tree route with empty DB. - if meta.best_hash != Default::default() { - let tree_route = sp_blockchain::tree_route( - &self.blockchain, - meta.best_hash, - route_to, - )?; + if meta.best_number > best_number && + (meta.best_number - best_number).saturated_into::() > + self.canonicalization_delay + { + return Err(sp_blockchain::Error::SetHeadTooOld.into()) + } + + let parent_exists = + self.blockchain.status(BlockId::Hash(route_to))? == sp_blockchain::BlockStatus::InChain; + + // Cannot find tree route with empty DB or when imported a detached block. + if meta.best_hash != Default::default() && parent_exists { + let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?; // uncanonicalize: check safety violations and ensure the numbers no longer // point to these block hashes in the key mapping. @@ -950,15 +1253,11 @@ impl Backend { (&r.number, &r.hash) ); - return Err(::sp_blockchain::Error::NotInFinalizedChain.into()); + return Err(::sp_blockchain::Error::NotInFinalizedChain.into()) } retracted.push(r.hash.clone()); - utils::remove_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - r.number - )?; + utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?; } // canonicalize: set the number lookup to map to this block's hash. @@ -968,18 +1267,18 @@ impl Backend { transaction, columns::KEY_LOOKUP, e.number, - e.hash + e.hash, )?; } } - let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1)?; + let lookup_key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?; transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); utils::insert_number_to_key_mapping( transaction, columns::KEY_LOOKUP, - best_to.0, - best_to.1, + best_number, + best_hash, )?; Ok((enacted, retracted)) @@ -990,11 +1289,17 @@ impl Backend { header: &Block::Header, last_finalized: Option, ) -> ClientResult<()> { - let last_finalized = last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); - if *header.parent_hash() != last_finalized { - return Err(::sp_blockchain::Error::NonSequentialFinalization( - format!("Last finalized {:?} not parent of {:?}", last_finalized, header.hash()), - ).into()); + let last_finalized = + last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); + if last_finalized != self.blockchain.meta.read().genesis_hash && + *header.parent_hash() != last_finalized + { + return Err(sp_blockchain::Error::NonSequentialFinalization(format!( + "Last finalized {:?} not parent of {:?}", + last_finalized, + header.hash() + )) + .into()) } Ok(()) } @@ -1008,10 +1313,11 @@ impl Backend { justification: Option, changes_trie_cache_ops: &mut Option>, finalization_displaced: &mut Option>>, - ) -> ClientResult<(Block::Hash, ::Number, bool, bool)> { + ) -> ClientResult> { // TODO: ensure best chain contains this block. let number = *header.number(); self.ensure_sequential_finalization(header, last_finalized)?; + let with_state = sc_client_api::Backend::have_state_at(self, &hash, number); self.note_finalized( transaction, @@ -1020,16 +1326,17 @@ impl Backend { *hash, changes_trie_cache_ops, finalization_displaced, + with_state, )?; if let Some(justification) = justification { transaction.set_from_vec( - columns::JUSTIFICATION, + columns::JUSTIFICATIONS, &utils::number_and_hash_to_lookup_key(number, hash)?, - justification.encode(), + Justifications::from(justification).encode(), ); } - Ok((*hash, number, false, true)) + Ok(MetaUpdate { hash: *hash, number, is_best: false, is_finalized: true, with_state }) } // performs forced canonicalization with a delay after importing a non-finalized block. @@ -1038,9 +1345,7 @@ impl Backend { transaction: &mut Transaction, hash: Block::Hash, number: NumberFor, - ) - -> ClientResult<()> - { + ) -> ClientResult<()> { let number_u64 = number.saturated_into::(); if number_u64 > self.canonicalization_delay { let new_canonical = number_u64 - self.canonicalization_delay; @@ -1048,28 +1353,34 @@ impl Backend { if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) { return Ok(()) } - let hash = if new_canonical == number_u64 { hash } else { - ::sc_client_api::blockchain::HeaderBackend::hash(&self.blockchain, new_canonical.saturated_into())? - .expect("existence of block with number `new_canonical` \ - implies existence of blocks with all numbers before it; qed") + sc_client_api::blockchain::HeaderBackend::hash( + &self.blockchain, + new_canonical.saturated_into(), + )? + .ok_or_else(|| { + sp_blockchain::Error::Backend(format!( + "Can't canonicalize missing block number #{} when importing {:?} (#{})", + new_canonical, hash, number, + )) + })? }; + if !sc_client_api::Backend::have_state_at(self, &hash, new_canonical.saturated_into()) { + return Ok(()) + } trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); - let commit = self.storage.state_db.canonicalize_block(&hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; + let commit = self.storage.state_db.canonicalize_block(&hash).map_err( + |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e), + )?; apply_state_commit(transaction, commit); - }; - + } Ok(()) } - fn try_commit_operation( - &self, - mut operation: BlockImportOperation, - ) -> ClientResult<()> { + fn try_commit_operation(&self, mut operation: BlockImportOperation) -> ClientResult<()> { let mut transaction = Transaction::new(); let mut finalization_displaced_leaves = None; @@ -1078,12 +1389,13 @@ impl Backend { let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len()); let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash; + let mut last_finalized_num = self.blockchain.meta.read().finalized_number; + let best_num = self.blockchain.meta.read().best_number; let mut changes_trie_cache_ops = None; for (block, justification) in operation.finalized_blocks { let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; - meta_updates.push(self.finalize_block_with_transaction( &mut transaction, &block_hash, @@ -1094,12 +1406,16 @@ impl Backend { &mut finalization_displaced_leaves, )?); last_finalized_hash = block_hash; + last_finalized_num = block_header.number().clone(); } let imported = if let Some(pending_block) = operation.pending_block { let hash = pending_block.header.hash(); + let parent_hash = *pending_block.header.parent_hash(); let number = pending_block.header.number().clone(); + let existing_header = + number <= best_num && self.blockchain.header(BlockId::hash(hash))?.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -1110,39 +1426,68 @@ impl Backend { (Default::default(), Default::default()) }; - utils::insert_hash_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; - - let header_metadata = CachedHeaderMetadata::from(&pending_block.header); - self.blockchain.insert_header_metadata( - header_metadata.hash, - header_metadata, - ); + utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode()); - if let Some(body) = &pending_block.body { - transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); + if let Some(body) = pending_block.body { + match self.transaction_storage { + TransactionStorageMode::BlockBody => { + transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); + }, + TransactionStorageMode::StorageChain => { + let body = + apply_index_ops::(&mut transaction, body, operation.index_ops); + transaction.set_from_vec(columns::BODY, &lookup_key, body); + }, + } + } + if let Some(body) = pending_block.indexed_body { + match self.transaction_storage { + TransactionStorageMode::BlockBody => { + debug!(target: "db", "Commit: ignored indexed block body"); + }, + TransactionStorageMode::StorageChain => { + apply_indexed_body::(&mut transaction, body); + }, + } } - if let Some(justification) = pending_block.justification { - transaction.set_from_vec(columns::JUSTIFICATION, &lookup_key, justification.encode()); + if let Some(justifications) = pending_block.justifications { + transaction.set_from_vec( + columns::JUSTIFICATIONS, + &lookup_key, + justifications.encode(), + ); } if number.is_zero() { - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + transaction.set_from_vec( + columns::META, + meta_keys::FINALIZED_BLOCK, + lookup_key.clone(), + ); transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); // for tests, because config is set from within the reset_storage if operation.changes_trie_config_update.is_none() { operation.changes_trie_config_update = Some(None); } + + if operation.commit_state { + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key); + } else { + // When we don't want to commit the genesis state, we still preserve it in + // memory to bootstrap consensus. It is queried for an initial list of + // authorities, etc. + *self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new( + pending_block.header.state_root().clone(), + operation.db_updates.clone(), + ))); + } } let finalized = if operation.commit_state { - let mut changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + let mut changeset: sc_state_db::ChangeSet> = + sc_state_db::ChangeSet::default(); let mut ops: u64 = 0; let mut bytes: u64 = 0; let mut removal: u64 = 0; @@ -1150,7 +1495,7 @@ impl Backend { for (mut key, (val, rc)) in operation.db_updates.drain() { if !self.storage.prefix_keys { // Strip prefix - key.drain(0 .. key.len() - DB_HASH_LEN); + key.drain(0..key.len() - DB_HASH_LEN); }; if rc > 0 { ops += 1; @@ -1159,7 +1504,7 @@ impl Backend { changeset.inserted.push((key, val.to_vec())); } else { changeset.inserted.push((key.clone(), val.to_vec())); - for _ in 0 .. rc - 1 { + for _ in 0..rc - 1 { changeset.inserted.push((key.clone(), Default::default())); } } @@ -1169,7 +1514,7 @@ impl Backend { if rc == -1 { changeset.deleted.push(key); } else { - for _ in 0 .. -rc { + for _ in 0..-rc { changeset.deleted.push(key.clone()); } } @@ -1180,50 +1525,57 @@ impl Backend { let mut ops: u64 = 0; let mut bytes: u64 = 0; - for (key, value) in operation.storage_updates.iter() - .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) { - ops += 1; - bytes += key.len() as u64; - if let Some(v) = value.as_ref() { - bytes += v.len() as u64; - } + for (key, value) in operation + .storage_updates + .iter() + .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) + { + ops += 1; + bytes += key.len() as u64; + if let Some(v) = value.as_ref() { + bytes += v.len() as u64; + } } self.state_usage.tally_writes(ops, bytes); let number_u64 = number.saturated_into::(); - let commit = self.storage.state_db.insert_block( - &hash, - number_u64, - &pending_block.header.parent_hash(), - changeset, - ).map_err(|e: sc_state_db::Error| - sp_blockchain::Error::from(format!("State database error: {:?}", e)) - )?; + let commit = self + .storage + .state_db + .insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset) + .map_err(|e: sc_state_db::Error| { + sp_blockchain::Error::from_state_db(e) + })?; apply_state_commit(&mut transaction, commit); + if number <= last_finalized_num { + // Canonicalize in the db when re-importing existing blocks with state. + let commit = self.storage.state_db.canonicalize_block(&hash).map_err( + |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e), + )?; + apply_state_commit(&mut transaction, commit); + meta_updates.push(MetaUpdate { + hash, + number, + is_best: false, + is_finalized: true, + with_state: true, + }); + } // Check if need to finalize. Genesis is always finalized instantly. let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); finalized } else { - false + number.is_zero() || pending_block.leaf_state.is_final() }; let header = &pending_block.header; let is_best = pending_block.leaf_state.is_best(); let changes_trie_updates = operation.changes_trie_updates; - let changes_trie_config_update = operation.changes_trie_config_update; - changes_trie_cache_ops = Some(self.changes_tries_storage.commit( - &mut transaction, - changes_trie_updates, - cache::ComplexBlockId::new( - *header.parent_hash(), - if number.is_zero() { Zero::zero() } else { number - One::one() }, - ), - cache::ComplexBlockId::new(hash, number), - header, - finalized, - changes_trie_config_update, - changes_trie_cache_ops, - )?); + debug!(target: "db", + "DB Commit {:?} ({}), best={}, state={}, existing={}", + hash, number, is_best, operation.commit_state, existing_header, + ); + self.state_usage.merge_sm(operation.old_state.usage_info()); // release state reference so that it can be finalized let cache = operation.old_state.into_cache_changes(); @@ -1238,58 +1590,94 @@ impl Backend { hash, &mut changes_trie_cache_ops, &mut finalization_displaced_leaves, + operation.commit_state, )?; } else { // canonicalize blocks which are old enough, regardless of finality. self.force_delayed_canonicalize(&mut transaction, hash, *header.number())? } - debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, is_best); - - let displaced_leaf = { - let mut leaves = self.blockchain.leaves.write(); - let displaced_leaf = leaves.import(hash, number, parent_hash); - leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - - displaced_leaf - }; + if !existing_header { + let changes_trie_config_update = operation.changes_trie_config_update; + changes_trie_cache_ops = Some(self.changes_tries_storage.commit( + &mut transaction, + changes_trie_updates, + cache::ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { Zero::zero() } else { number - One::one() }, + ), + cache::ComplexBlockId::new(hash, number), + header, + finalized, + changes_trie_config_update, + changes_trie_cache_ops, + )?); - let mut children = children::read_children( - &*self.storage.db, - columns::META, - meta_keys::CHILDREN_PREFIX, - parent_hash, - )?; - children.push(hash); - children::write_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - parent_hash, - children, - ); + { + let mut leaves = self.blockchain.leaves.write(); + leaves.import(hash, number, parent_hash); + leaves.prepare_transaction( + &mut transaction, + columns::META, + meta_keys::LEAF_PREFIX, + ); + } - meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); + let mut children = children::read_children( + &*self.storage.db, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + )?; + if !children.contains(&hash) { + children.push(hash); + children::write_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + children, + ); + } + } - Some((number, hash, enacted, retracted, displaced_leaf, is_best, cache)) + meta_updates.push(MetaUpdate { + hash, + number, + is_best: pending_block.leaf_state.is_best(), + is_finalized: finalized, + with_state: operation.commit_state, + }); + Some((pending_block.header, number, hash, enacted, retracted, is_best, cache)) } else { None }; let cache_update = if let Some(set_head) = operation.set_head { - if let Some(header) = sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { + if let Some(header) = + sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? + { let number = header.number(); let hash = header.hash(); let (enacted, retracted) = self.set_head_with_transaction( &mut transaction, hash.clone(), - (number.clone(), hash.clone()) + (number.clone(), hash.clone()), )?; - meta_updates.push((hash, *number, true, false)); + meta_updates.push(MetaUpdate { + hash, + number: *number, + is_best: true, + is_finalized: false, + with_state: false, + }); Some((enacted, retracted)) } else { - return Err(sp_blockchain::Error::UnknownBlock(format!("Cannot set head {:?}", set_head))) + return Err(sp_blockchain::Error::UnknownBlock(format!( + "Cannot set head {:?}", + set_head + ))) } } else { None @@ -1297,15 +1685,14 @@ impl Backend { self.storage.db.commit(transaction)?; - if let Some(( - number, - hash, - enacted, - retracted, - _displaced_leaf, - is_best, - mut cache, - )) = imported { + // Apply all in-memory state changes. + // Code beyond this point can't fail. + + if let Some((header, number, hash, enacted, retracted, is_best, mut cache)) = imported { + trace!(target: "db", "DB Commit done {:?}", hash); + let header_metadata = CachedHeaderMetadata::from(&header); + self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata); + cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header)); cache.sync_cache( &enacted, &retracted, @@ -1323,11 +1710,11 @@ impl Backend { self.changes_tries_storage.post_commit(changes_trie_cache_ops); if let Some((enacted, retracted)) = cache_update { - self.shared_cache.lock().sync(&enacted, &retracted); + self.shared_cache.write().sync(&enacted, &retracted); } - for (hash, number, is_best, is_finalized) in meta_updates { - self.blockchain.update_meta(hash, number, is_best, is_finalized); + for m in meta_updates { + self.blockchain.update_meta(m); } Ok(()) @@ -1343,32 +1730,44 @@ impl Backend { f_header: &Block::Header, f_hash: Block::Hash, changes_trie_cache_ops: &mut Option>, - displaced: &mut Option>> + displaced: &mut Option>>, + with_state: bool, ) -> ClientResult<()> { let f_num = f_header.number().clone(); - if self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::() > c).unwrap_or(true) { - let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - - let commit = self.storage.state_db.canonicalize_block(&f_hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; + let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; + if with_state { + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone()); + } + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + + if sc_client_api::Backend::have_state_at(self, &f_hash, f_num) && + self.storage + .state_db + .best_canonical() + .map(|c| f_num.saturated_into::() > c) + .unwrap_or(true) + { + let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err( + |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e), + )?; apply_state_commit(transaction, commit); + } - if !f_num.is_zero() { - let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( - transaction, - *f_header.parent_hash(), - f_hash, - f_num, - if is_inserted { Some(&f_header) } else { None }, - changes_trie_cache_ops.take(), - )?; - *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); - } + if !f_num.is_zero() { + let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( + transaction, + *f_header.parent_hash(), + f_hash, + f_num, + if is_inserted { Some(&f_header) } else { None }, + changes_trie_cache_ops.take(), + )?; + *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); } let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); + self.prune_blocks(transaction, f_num, &new_displaced)?; match displaced { x @ &mut None => *x = Some(new_displaced), &mut Some(ref mut displaced) => displaced.merge(new_displaced), @@ -1376,31 +1775,190 @@ impl Backend { Ok(()) } -} -fn apply_state_commit(transaction: &mut Transaction, commit: sc_state_db::CommitSet>) { - for (key, val) in commit.data.inserted.into_iter() { - transaction.set_from_vec(columns::STATE, &key[..], val); - } - for key in commit.data.deleted.into_iter() { - transaction.remove(columns::STATE, &key[..]); - } - for (key, val) in commit.meta.inserted.into_iter() { - transaction.set_from_vec(columns::STATE_META, &key[..], val); - } - for key in commit.meta.deleted.into_iter() { - transaction.remove(columns::STATE_META, &key[..]); - } + fn prune_blocks( + &self, + transaction: &mut Transaction, + finalized: NumberFor, + displaced: &FinalizationDisplaced>, + ) -> ClientResult<()> { + if let KeepBlocks::Some(keep_blocks) = self.keep_blocks { + // Always keep the last finalized block + let keep = std::cmp::max(keep_blocks, 1); + if finalized >= keep.into() { + let number = finalized.saturating_sub(keep.into()); + self.prune_block(transaction, BlockId::::number(number))?; + } + + // Also discard all blocks from displaced branches + for h in displaced.leaves() { + let mut number = finalized; + let mut hash = h.clone(); + // Follow displaced chains back until we reach a finalized block. + // Since leaves are discarded due to finality, they can't have parents + // that are canonical, but not yet finalized. So we stop deletig as soon as + // we reach canonical chain. + while self.blockchain.hash(number)? != Some(hash.clone()) { + let id = BlockId::::hash(hash.clone()); + match self.blockchain.header(id)? { + Some(header) => { + self.prune_block(transaction, id)?; + number = header.number().saturating_sub(One::one()); + hash = header.parent_hash().clone(); + }, + None => break, + } + } + } + } + Ok(()) + } + + fn prune_block( + &self, + transaction: &mut Transaction, + id: BlockId, + ) -> ClientResult<()> { + match read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY, id)? { + Some(body) => { + debug!(target: "db", "Removing block #{}", id); + utils::remove_from_db( + transaction, + &*self.storage.db, + columns::KEY_LOOKUP, + columns::BODY, + id, + )?; + match self.transaction_storage { + TransactionStorageMode::BlockBody => {}, + TransactionStorageMode::StorageChain => { + match Vec::::decode(&mut &body[..]) { + Ok(body) => + for ExtrinsicHeader { indexed_hash, .. } in body { + if indexed_hash != Default::default() { + transaction.release(columns::TRANSACTION, indexed_hash); + } + }, + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body list: {}", + err + ))), + } + }, + } + }, + None => return Ok(()), + } + Ok(()) + } + + fn empty_state(&self) -> ClientResult, Block>> { + let root = EmptyStorage::::new().0; // Empty trie + let db_state = DbState::::new(self.storage.clone(), root); + let state = RefTrackingState::new(db_state, self.storage.clone(), None); + let caching_state = CachingState::new(state, self.shared_cache.clone(), None); + Ok(SyncingCachingState::new( + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), + )) + } +} + +fn apply_state_commit( + transaction: &mut Transaction, + commit: sc_state_db::CommitSet>, +) { + for (key, val) in commit.data.inserted.into_iter() { + transaction.set_from_vec(columns::STATE, &key[..], val); + } + for key in commit.data.deleted.into_iter() { + transaction.remove(columns::STATE, &key[..]); + } + for (key, val) in commit.meta.inserted.into_iter() { + transaction.set_from_vec(columns::STATE_META, &key[..], val); + } + for key in commit.meta.deleted.into_iter() { + transaction.remove(columns::STATE_META, &key[..]); + } +} + +fn apply_index_ops( + transaction: &mut Transaction, + body: Vec, + ops: Vec, +) -> Vec { + let mut extrinsic_headers: Vec = Vec::with_capacity(body.len()); + let mut index_map = HashMap::new(); + let mut renewed_map = HashMap::new(); + for op in ops { + match op { + IndexOperation::Insert { extrinsic, hash, size } => { + index_map.insert(extrinsic, (hash, size)); + }, + IndexOperation::Renew { extrinsic, hash } => { + renewed_map.insert(extrinsic, DbHash::from_slice(hash.as_ref())); + }, + } + } + for (index, extrinsic) in body.into_iter().enumerate() { + let extrinsic = extrinsic.encode(); + let extrinsic_header = if let Some(hash) = renewed_map.get(&(index as u32)) { + // Bump ref counter + transaction.reference(columns::TRANSACTION, DbHash::from_slice(hash.as_ref())); + ExtrinsicHeader { indexed_hash: hash.clone(), data: extrinsic } + } else { + match index_map.get(&(index as u32)) { + Some((hash, size)) if *size as usize <= extrinsic.len() => { + let offset = extrinsic.len() - *size as usize; + transaction.store( + columns::TRANSACTION, + DbHash::from_slice(hash.as_ref()), + extrinsic[offset..].to_vec(), + ); + ExtrinsicHeader { + indexed_hash: DbHash::from_slice(hash.as_ref()), + data: extrinsic[..offset].to_vec(), + } + }, + _ => ExtrinsicHeader { indexed_hash: Default::default(), data: extrinsic }, + } + }; + extrinsic_headers.push(extrinsic_header); + } + debug!( + target: "db", + "DB transaction index: {} inserted, {} renewed", + index_map.len(), + renewed_map.len() + ); + extrinsic_headers.encode() +} + +fn apply_indexed_body(transaction: &mut Transaction, body: Vec>) { + for extrinsic in body { + let hash = sp_runtime::traits::BlakeTwo256::hash(&extrinsic); + transaction.store(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), extrinsic); + } } -impl sc_client_api::backend::AuxStore for Backend where Block: BlockT { +impl sc_client_api::backend::AuxStore for Backend +where + Block: BlockT, +{ fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { let mut transaction = Transaction::new(); for (k, v) in insert { transaction.set(columns::AUX, k, v); @@ -1424,7 +1982,7 @@ impl sc_client_api::backend::Backend for Backend { type OffchainStorage = offchain::LocalStorage; fn begin_operation(&self) -> ClientResult { - let mut old_state = self.state_at(BlockId::Hash(Default::default()))?; + let mut old_state = self.empty_state()?; old_state.disable_syncing(); Ok(BlockImportOperation { @@ -1441,6 +1999,7 @@ impl sc_client_api::backend::Backend for Backend { finalized_blocks: Vec::new(), set_head: None, commit_state: false, + index_ops: Default::default(), }) } @@ -1449,17 +2008,18 @@ impl sc_client_api::backend::Backend for Backend { operation: &mut Self::BlockImportOperation, block: BlockId, ) -> ClientResult<()> { - operation.old_state = self.state_at(block)?; + if block.is_pre_genesis() { + operation.old_state = self.empty_state()?; + } else { + operation.old_state = self.state_at(block)?; + } operation.old_state.disable_syncing(); operation.commit_state = true; Ok(()) } - fn commit_operation( - &self, - operation: Self::BlockImportOperation, - ) -> ClientResult<()> { + fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> { let usage = operation.old_state.usage_info(); self.state_usage.merge_sm(usage); @@ -1471,7 +2031,7 @@ impl sc_client_api::backend::Backend for Backend { e @ Err(_) => { self.storage.state_db.revert_pending(); e - } + }, } } @@ -1486,7 +2046,7 @@ impl sc_client_api::backend::Backend for Backend { let mut displaced = None; let mut changes_trie_cache_ops = None; - let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( + let m = self.finalize_block_with_transaction( &mut transaction, &hash, &header, @@ -1496,11 +2056,54 @@ impl sc_client_api::backend::Backend for Backend { &mut displaced, )?; self.storage.db.commit(transaction)?; - self.blockchain.update_meta(hash, number, is_best, is_finalized); + self.blockchain.update_meta(m); self.changes_tries_storage.post_commit(changes_trie_cache_ops); Ok(()) } + fn append_justification( + &self, + block: BlockId, + justification: Justification, + ) -> ClientResult<()> { + let mut transaction: Transaction = Transaction::new(); + let hash = self.blockchain.expect_block_hash_from_id(&block)?; + let header = self.blockchain.expect_header(block)?; + let number = *header.number(); + + // Check if the block is finalized first. + let is_descendent_of = is_descendent_of(&self.blockchain, None); + let last_finalized = self.blockchain.last_finalized()?; + + // We can do a quick check first, before doing a proper but more expensive check + if number > self.blockchain.info().finalized_number || + (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) + { + return Err(ClientError::NotInFinalizedChain) + } + + let justifications = if let Some(mut stored_justifications) = + self.blockchain.justifications(block)? + { + if !stored_justifications.append(justification) { + return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())) + } + stored_justifications + } else { + Justifications::from(justification) + }; + + transaction.set_from_vec( + columns::JUSTIFICATIONS, + &utils::number_and_hash_to_lookup_key(number, hash)?, + justifications.encode(), + ); + + self.storage.db.commit(transaction)?; + + Ok(()) + } + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { Some(&self.changes_tries_storage) } @@ -1510,25 +2113,20 @@ impl sc_client_api::backend::Backend for Backend { } fn usage_info(&self) -> Option { - let (io_stats, state_stats) = self.io_stats.take_or_else(|| + let (io_stats, state_stats) = self.io_stats.take_or_else(|| { ( // TODO: implement DB stats and cache size retrieval kvdb::IoStats::empty(), self.state_usage.take(), ) - ); + }); let database_cache = MemorySize::from_bytes(0); - let state_cache = MemorySize::from_bytes( - (*&self.shared_cache).lock().used_storage_cache_size(), - ); + let state_cache = + MemorySize::from_bytes((*&self.shared_cache).read().used_storage_cache_size()); let state_db = self.storage.state_db.memory_info(); Some(UsageInfo { - memory: MemoryInfo { - state_cache, - database_cache, - state_db, - }, + memory: MemoryInfo { state_cache, database_cache, state_db }, io: IoInfo { transactions: io_stats.transactions, bytes_read: io_stats.bytes_read, @@ -1558,29 +2156,31 @@ impl sc_client_api::backend::Backend for Backend { let finalized = self.blockchain.info().finalized_number; let revertible = best_number - finalized; - let n = if !revert_finalized && revertible < n { - revertible - } else { - n - }; + let n = if !revert_finalized && revertible < n { revertible } else { n }; let mut revert_blocks = || -> ClientResult> { - for c in 0 .. n.saturated_into::() { + for c in 0..n.saturated_into::() { if best_number.is_zero() { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); let removed_number = best_number; - let removed = self.blockchain.header(BlockId::Number(best_number))?.ok_or_else( - || sp_blockchain::Error::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best_number)))?; + let removed = + self.blockchain.header(BlockId::Number(best_number))?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!( + "Error reverting to {}. Block hash not found.", + best_number + )) + })?; let removed_hash = removed.hash(); let prev_number = best_number.saturating_sub(One::one()); - let prev_hash = self.blockchain.hash(prev_number)?.ok_or_else( - || sp_blockchain::Error::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best_number)) - )?; + let prev_hash = self.blockchain.hash(prev_number)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!( + "Error reverting to {}. Block hash not found.", + best_number + )) + })?; if !self.have_state_at(&prev_hash, prev_number) { return Ok(c.saturated_into::>()) @@ -1595,30 +2195,60 @@ impl sc_client_api::backend::Backend for Backend { let update_finalized = best_number < finalized; - let key = utils::number_and_hash_to_lookup_key(best_number.clone(), &best_hash)?; + let key = + utils::number_and_hash_to_lookup_key(best_number.clone(), &best_hash)?; let changes_trie_cache_ops = self.changes_tries_storage.revert( &mut transaction, - &cache::ComplexBlockId::new( - removed.hash(), - removed_number, - ), + &cache::ComplexBlockId::new(removed.hash(), removed_number), )?; if update_finalized { transaction.set_from_vec( columns::META, meta_keys::FINALIZED_BLOCK, - key.clone() + key.clone(), ); + reverted_finalized.insert(removed_hash); + if let Some((hash, _)) = self.blockchain.info().finalized_state { + if hash == best_hash { + if !best_number.is_zero() && + self.have_state_at(&prev_hash, best_number - One::one()) + { + let lookup_key = utils::number_and_hash_to_lookup_key( + best_number - One::one(), + prev_hash, + )?; + transaction.set_from_vec( + columns::META, + meta_keys::FINALIZED_STATE, + lookup_key, + ); + } else { + transaction + .remove(columns::META, meta_keys::FINALIZED_STATE); + } + } + } } transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); - children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, best_hash); + children::remove_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + best_hash, + ); self.storage.db.commit(transaction)?; self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); - self.blockchain.update_meta(best_hash, best_number, true, update_finalized); - } - None => return Ok(c.saturated_into::>()) + self.blockchain.update_meta(MetaUpdate { + hash: best_hash, + number: best_number, + is_best: true, + is_finalized: update_finalized, + with_state: false, + }); + }, + None => return Ok(c.saturated_into::>()), } } @@ -1643,6 +2273,46 @@ impl sc_client_api::backend::Backend for Backend { Ok((reverted, reverted_finalized)) } + fn remove_leaf_block(&self, hash: &Block::Hash) -> ClientResult<()> { + let best_hash = self.blockchain.info().best_hash; + + if best_hash == *hash { + return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {:?}", hash))) + } + + let hdr = self.blockchain.header_metadata(hash.clone())?; + if !self.have_state_at(&hash, hdr.number) { + return Err(sp_blockchain::Error::UnknownBlock(format!( + "State already discarded for {:?}", + hash + ))) + } + + let mut leaves = self.blockchain.leaves.write(); + if !leaves.contains(hdr.number, *hash) { + return Err(sp_blockchain::Error::Backend(format!( + "Can't remove non-leaf block {:?}", + hash + ))) + } + + let mut transaction = Transaction::new(); + if let Some(commit) = self.storage.state_db.remove(hash) { + apply_state_commit(&mut transaction, commit); + } + transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); + let changes_trie_cache_ops = self + .changes_tries_storage + .revert(&mut transaction, &cache::ComplexBlockId::new(*hash, hdr.number))?; + + self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); + leaves.revert(hash.clone(), hdr.number); + leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); + self.storage.db.commit(transaction)?; + self.blockchain().remove_header_metadata(*hash); + Ok(()) + } + fn blockchain(&self) -> &BlockchainDb { &self.blockchain } @@ -1650,57 +2320,50 @@ impl sc_client_api::backend::Backend for Backend { fn state_at(&self, block: BlockId) -> ClientResult { use sc_client_api::blockchain::HeaderBackend as BcHeaderBackend; - // special case for genesis initialization - match block { - BlockId::Hash(h) if h == Default::default() => { - let genesis_storage = DbGenesisStorage::::new(); - let root = genesis_storage.0.clone(); - let db_state = DbState::::new(Arc::new(genesis_storage), root); + let is_genesis = match &block { + BlockId::Number(n) if n.is_zero() => true, + BlockId::Hash(h) if h == &self.blockchain.meta.read().genesis_hash => true, + _ => false, + }; + if is_genesis { + if let Some(genesis_state) = &*self.genesis_state.read() { + let root = genesis_state.root.clone(); + let db_state = DbState::::new(genesis_state.clone(), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - let caching_state = CachingState::new( - state, - self.shared_cache.clone(), - None, - ); - return Ok(SyncingCachingState::new( + let caching_state = CachingState::new(state, self.shared_cache.clone(), None); + let mut state = SyncingCachingState::new( caching_state, self.state_usage.clone(), self.blockchain.meta.clone(), self.import_lock.clone(), - )); - }, - _ => {} + ); + state.disable_syncing(); + return Ok(state) + } } let hash = match block { BlockId::Hash(h) => h, - BlockId::Number(n) => self.blockchain.hash(n)?.ok_or_else(|| + BlockId::Number(n) => self.blockchain.hash(n)?.ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!("Unknown block number {}", n)) - )?, + })?, }; match self.blockchain.header_metadata(hash) { Ok(ref hdr) => { if !self.have_state_at(&hash, hdr.number) { - return Err( - sp_blockchain::Error::UnknownBlock( - format!("State already discarded for {:?}", block) - ) - ) + return Err(sp_blockchain::Error::UnknownBlock(format!( + "State already discarded for {:?}", + block + ))) } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root; let db_state = DbState::::new(self.storage.clone(), root); - let state = RefTrackingState::new( - db_state, - self.storage.clone(), - Some(hash.clone()), - ); - let caching_state = CachingState::new( - state, - self.shared_cache.clone(), - Some(hash), - ); + let state = + RefTrackingState::new(db_state, self.storage.clone(), Some(hash.clone())); + let caching_state = + CachingState::new(state, self.shared_cache.clone(), Some(hash)); Ok(SyncingCachingState::new( caching_state, self.state_usage.clone(), @@ -1708,11 +2371,10 @@ impl sc_client_api::backend::Backend for Backend { self.import_lock.clone(), )) } else { - Err( - sp_blockchain::Error::UnknownBlock( - format!("State already discarded for {:?}", block) - ) - ) + Err(sp_blockchain::Error::UnknownBlock(format!( + "State already discarded for {:?}", + block + ))) } }, Err(e) => Err(e), @@ -1722,13 +2384,13 @@ impl sc_client_api::backend::Backend for Backend { fn have_state_at(&self, hash: &Block::Hash, number: NumberFor) -> bool { if self.is_archive { match self.blockchain.header_metadata(hash.clone()) { - Ok(header) => { - sp_state_machine::Storage::get( - self.storage.as_ref(), - &header.state_root, - (&[], None), - ).unwrap_or(None).is_some() - }, + Ok(header) => sp_state_machine::Storage::get( + self.storage.as_ref(), + &header.state_root, + (&[], None), + ) + .unwrap_or(None) + .is_some(), _ => false, } } else { @@ -1745,17 +2407,25 @@ impl sc_client_api::backend::LocalBackend for Backend>; @@ -1763,10 +2433,8 @@ pub(crate) mod tests { let mut changes_root = H256::default(); let mut changes_trie_update = MemoryDB::::default(); { - let mut trie = TrieDBMut::::new( - &mut changes_trie_update, - &mut changes_root - ); + let mut trie = + TrieDBMut::::new(&mut changes_trie_update, &mut changes_root); for (key, value) in changes { trie.insert(&key, &value).unwrap(); } @@ -1781,6 +2449,18 @@ pub(crate) mod tests { parent_hash: H256, changes: Option, Vec)>>, extrinsics_root: H256, + ) -> H256 { + insert_block(backend, number, parent_hash, changes, extrinsics_root, Vec::new(), None) + } + + pub fn insert_block( + backend: &Backend, + number: u64, + parent_hash: H256, + changes: Option, Vec)>>, + extrinsics_root: H256, + body: Vec>, + transaction_index: Option>, ) -> H256 { use sp_runtime::testing::Digest; @@ -1807,8 +2487,12 @@ pub(crate) mod tests { }; let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, Some(Vec::new()), None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); + op.set_block_data(header, Some(body), None, None, NewBlockState::Best).unwrap(); + if let Some(index) = transaction_index { + op.update_transaction_index(index).unwrap(); + } + op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) + .unwrap(); backend.commit_operation(op).unwrap(); header_hash @@ -1842,12 +2526,8 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); db.commit_operation(op).unwrap(); } @@ -1856,12 +2536,18 @@ pub(crate) mod tests { db.storage.db.clone() }; - let backend = Backend::::new(DatabaseSettings { - state_cache_size: 16777216, - state_cache_child_ratio: Some((50, 100)), - pruning: PruningMode::keep_blocks(1), - source: DatabaseSettingsSrc::Custom(backing), - }, 0).unwrap(); + let backend = Backend::::new( + DatabaseSettings { + state_cache_size: 16777216, + state_cache_child_ratio: Some((50, 100)), + state_pruning: PruningMode::keep_blocks(1), + source: DatabaseSource::Custom(backing), + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + }, + 0, + ) + .unwrap(); assert_eq!(backend.blockchain().info().best_number, 9); for i in 0..10 { assert!(backend.blockchain().hash(i).unwrap().is_some()) @@ -1873,7 +2559,6 @@ pub(crate) mod tests { let db = Backend::::new_test(2, 0); let hash = { let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -1882,27 +2567,22 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - let storage = vec![ - (vec![1, 3, 5], vec![2, 4, 6]), - (vec![1, 2, 3], vec![9, 9, 9]), - ]; + let storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])]; - header.state_root = op.old_state.storage_root(storage - .iter() - .map(|(x, y)| (&x[..], Some(&y[..]))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..])))) + .0 + .into(); let hash = header.hash(); op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), - }).unwrap(); - op.set_block_data( - header.clone(), - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); + }) + .unwrap(); + op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); db.commit_operation(op).unwrap(); @@ -1926,25 +2606,17 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - let storage = vec![ - (vec![1, 3, 5], None), - (vec![5, 5, 5], Some(vec![4, 5, 6])), - ]; + let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))]; - let (root, overlay) = op.old_state.storage_root( - storage.iter() - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) - ); + let (root, overlay) = op + .old_state + .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); op.update_storage(storage, Vec::new()).unwrap(); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); db.commit_operation(op).unwrap(); @@ -1964,7 +2636,9 @@ pub(crate) mod tests { let hash = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -1979,21 +2653,22 @@ pub(crate) mod tests { op.reset_storage(Storage { top: Default::default(), children_default: Default::default(), - }).unwrap(); + }) + .unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap(), &b"hello"[..]); + assert_eq!( + backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .unwrap(), + &b"hello"[..] + ); hash }; @@ -2010,27 +2685,27 @@ pub(crate) mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); let hash = header.hash(); op.db_updates.insert(EMPTY_PREFIX, b"hello"); op.db_updates.remove(&key, EMPTY_PREFIX); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap(), &b"hello"[..]); + assert_eq!( + backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .unwrap(), + &b"hello"[..] + ); hash }; @@ -2047,27 +2722,24 @@ pub(crate) mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); let hash = header.hash(); op.db_updates.remove(&key, EMPTY_PREFIX); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); backend.commit_operation(op).unwrap(); - assert!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).is_some()); + assert!(backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .is_some()); hash }; @@ -2084,33 +2756,31 @@ pub(crate) mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); backend.commit_operation(op).unwrap(); - assert!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).is_none()); + assert!(backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .is_none()); } backend.finalize_block(BlockId::Number(1), None).unwrap(); backend.finalize_block(BlockId::Number(2), None).unwrap(); backend.finalize_block(BlockId::Number(3), None).unwrap(); - assert!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).is_none()); + assert!(backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .is_none()); } #[test] @@ -2132,8 +2802,14 @@ pub(crate) mod tests { let tree_route = tree_route(blockchain, a3, b2).unwrap(); assert_eq!(tree_route.common_block().hash, block0); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); + assert_eq!( + tree_route.retracted().iter().map(|r| r.hash).collect::>(), + vec![a3, a2, a1] + ); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![b1, b2] + ); } { @@ -2141,14 +2817,20 @@ pub(crate) mod tests { assert_eq!(tree_route.common_block().hash, a1); assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![a2, a3] + ); } { let tree_route = tree_route(blockchain, a3, a1).unwrap(); assert_eq!(tree_route.common_block().hash, a1); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); + assert_eq!( + tree_route.retracted().iter().map(|r| r.hash).collect::>(), + vec![a3, a2] + ); assert!(tree_route.enacted().is_empty()); } @@ -2174,7 +2856,10 @@ pub(crate) mod tests { assert_eq!(tree_route.common_block().hash, block0); assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![block1]); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![block1] + ); } } @@ -2272,20 +2957,25 @@ pub(crate) mod tests { #[test] fn test_leaves_with_complex_block_tree() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); } #[test] fn test_children_with_complex_block_tree() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); substrate_test_runtime_client::trait_tests::test_children_for_backend(backend); } #[test] fn test_blockchain_query_by_number_gets_canonical() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); + substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical( + backend, + ); } #[test] @@ -2303,7 +2993,10 @@ pub(crate) mod tests { let block2_b = insert_header(&backend, 2, block1_b, None, Default::default()); let block2_c = insert_header(&backend, 2, block1_b, None, [1; 32].into()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c, block1_c]); + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![block2_a, block2_b, block2_c, block1_c] + ); backend.finalize_block(BlockId::hash(block1_a), None).unwrap(); backend.finalize_block(BlockId::hash(block2_a), None).unwrap(); @@ -2314,7 +3007,8 @@ pub(crate) mod tests { #[test] fn test_aux() { - let backend: Backend = Backend::new_test(0, 0); + let backend: Backend = + Backend::new_test(0, 0); assert!(backend.get_aux(b"test").unwrap().is_none()); backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap(); assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]); @@ -2324,19 +3018,51 @@ pub(crate) mod tests { #[test] fn test_finalize_block_with_justification() { - use sc_client_api::blockchain::{Backend as BlockChainBackend}; + use sc_client_api::blockchain::Backend as BlockChainBackend; let backend = Backend::::new_test(10, 10); let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); let _ = insert_header(&backend, 1, block0, None, Default::default()); - let justification = Some(vec![1, 2, 3]); + let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3])); backend.finalize_block(BlockId::Number(1), justification.clone()).unwrap(); assert_eq!( - backend.blockchain().justification(BlockId::Number(1)).unwrap(), - justification, + backend.blockchain().justifications(BlockId::Number(1)).unwrap(), + justification.map(Justifications::from), + ); + } + + #[test] + fn test_append_justification_to_finalized_block() { + use sc_client_api::blockchain::Backend as BlockChainBackend; + + let backend = Backend::::new_test(10, 10); + + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let _ = insert_header(&backend, 1, block0, None, Default::default()); + + let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]); + backend.finalize_block(BlockId::Number(1), Some(just0.clone().into())).unwrap(); + + let just1 = (CONS1_ENGINE_ID, vec![4, 5]); + backend.append_justification(BlockId::Number(1), just1.clone()).unwrap(); + + let just2 = (CONS1_ENGINE_ID, vec![6, 7]); + assert!(matches!( + backend.append_justification(BlockId::Number(1), just2), + Err(ClientError::BadJustification(_)) + )); + + let justifications = { + let mut just = Justifications::from(just0); + just.append(just1); + just + }; + assert_eq!( + backend.blockchain().justifications(BlockId::Number(1)).unwrap(), + Some(justifications), ); } @@ -2365,6 +3091,97 @@ pub(crate) mod tests { } } + #[test] + fn storage_hash_is_cached_correctly() { + let backend = Backend::::new_test(10, 10); + + let hash0 = { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage = vec![(b"test".to_vec(), b"test".to_vec())]; + + header.state_root = op + .old_state + .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..])))) + .0 + .into(); + let hash = header.hash(); + + op.reset_storage(Storage { + top: storage.into_iter().collect(), + children_default: Default::default(), + }) + .unwrap(); + op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + hash + }; + + let block0_hash = backend + .state_at(BlockId::Hash(hash0)) + .unwrap() + .storage_hash(&b"test"[..]) + .unwrap(); + + let hash1 = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); + let mut header = Header { + number: 1, + parent_hash: hash0, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))]; + + let (root, overlay) = op + .old_state + .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))); + op.update_db_storage(overlay).unwrap(); + header.state_root = root.into(); + let hash = header.hash(); + + op.update_storage(storage, Vec::new()).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + hash + }; + + { + let header = backend.blockchain().header(BlockId::Hash(hash1)).unwrap().unwrap(); + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(hash0)).unwrap(); + op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); + backend.commit_operation(op).unwrap(); + } + + let block1_hash = backend + .state_at(BlockId::Hash(hash1)) + .unwrap() + .storage_hash(&b"test"[..]) + .unwrap(); + + assert_ne!(block0_hash, block1_hash); + } + #[test] fn test_finalize_non_sequential() { let backend = Backend::::new_test(10, 10); @@ -2387,7 +3204,8 @@ pub(crate) mod tests { let backend = Backend::::new_test(10, 10); // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = insert_header(&backend, 0, Default::default(), None, Default::default()); + let mut prev_hash = + insert_header(&backend, 0, Default::default(), None, Default::default()); let cht_size: u64 = cht::size(); for i in 1..1 + cht_size + cht_size + 1 { prev_hash = insert_header(&backend, i, prev_hash, None, Default::default()); @@ -2395,13 +3213,262 @@ pub(crate) mod tests { let blockchain = backend.blockchain(); - let cht_root_1 = blockchain.header_cht_root(cht_size, cht::start_number(cht_size, 0)) - .unwrap().unwrap(); - let cht_root_2 = blockchain.header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) - .unwrap().unwrap(); - let cht_root_3 = blockchain.header_cht_root(cht_size, cht::end_number(cht_size, 0)) - .unwrap().unwrap(); + let cht_root_1 = blockchain + .header_cht_root(cht_size, cht::start_number(cht_size, 0)) + .unwrap() + .unwrap(); + let cht_root_2 = blockchain + .header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap() + .unwrap(); + let cht_root_3 = blockchain + .header_cht_root(cht_size, cht::end_number(cht_size, 0)) + .unwrap() + .unwrap(); assert_eq!(cht_root_1, cht_root_2); assert_eq!(cht_root_2, cht_root_3); } + + #[test] + fn prune_blocks_on_finalize() { + for storage in &[TransactionStorageMode::BlockBody, TransactionStorageMode::StorageChain] { + let backend = Backend::::new_test_with_tx_storage(2, 0, *storage); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ); + blocks.push(hash); + prev_hash = hash; + } + + { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + for i in 1..5 { + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + } + backend.commit_operation(op).unwrap(); + } + let bc = backend.blockchain(); + assert_eq!(None, bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + } + } + + #[test] + fn prune_blocks_on_finalize_with_fork() { + let backend = + Backend::::new_test_with_tx_storage(2, 10, TransactionStorageMode::StorageChain); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ); + blocks.push(hash); + prev_hash = hash; + } + + // insert a fork at block 2 + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + sp_core::H256::random(), + vec![2.into()], + None, + ); + insert_block( + &backend, + 3, + fork_hash_root, + None, + H256::random(), + vec![3.into(), 11.into()], + None, + ); + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_head(BlockId::Hash(blocks[4])).unwrap(); + backend.commit_operation(op).unwrap(); + + for i in 1..5 { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + backend.commit_operation(op).unwrap(); + } + + let bc = backend.blockchain(); + assert_eq!(None, bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(None, bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + } + + #[test] + fn renew_transaction_storage() { + let backend = + Backend::::new_test_with_tx_storage(2, 10, TransactionStorageMode::StorageChain); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + let x1 = ExtrinsicWrapper::from(0u64).encode(); + let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); + for i in 0..10 { + let mut index = Vec::new(); + if i == 0 { + index.push(IndexOperation::Insert { + extrinsic: 0, + hash: x1_hash.as_ref().to_vec(), + size: (x1.len() - 1) as u32, + }); + } else if i < 5 { + // keep renewing 1st + index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() }); + } // else stop renewing + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + Some(index), + ); + blocks.push(hash); + prev_hash = hash; + } + + for i in 1..10 { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + backend.commit_operation(op).unwrap(); + let bc = backend.blockchain(); + if i < 6 { + assert!(bc.indexed_transaction(&x1_hash).unwrap().is_some()); + } else { + assert!(bc.indexed_transaction(&x1_hash).unwrap().is_none()); + } + } + } + + #[test] + fn remove_leaf_block_works() { + let backend = + Backend::::new_test_with_tx_storage(2, 10, TransactionStorageMode::StorageChain); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..2 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ); + blocks.push(hash); + prev_hash = hash; + } + + // insert a fork at block 2, which becomes best block + let best_hash = insert_block( + &backend, + 1, + blocks[0], + None, + sp_core::H256::random(), + vec![42.into()], + None, + ); + assert!(backend.remove_leaf_block(&best_hash).is_err()); + assert!(backend.have_state_at(&prev_hash, 1)); + backend.remove_leaf_block(&prev_hash).unwrap(); + assert_eq!(None, backend.blockchain().header(BlockId::hash(prev_hash.clone())).unwrap()); + assert!(!backend.have_state_at(&prev_hash, 1)); + } + + #[test] + fn test_import_existing_block_as_new_head() { + let backend: Backend = Backend::new_test(10, 3); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + let block3 = insert_header(&backend, 3, block2, None, Default::default()); + let block4 = insert_header(&backend, 4, block3, None, Default::default()); + let block5 = insert_header(&backend, 5, block4, None, Default::default()); + assert_eq!(backend.blockchain().info().best_hash, block5); + + // Insert 1 as best again. This should fail because canonicalization_delay == 3 and best == + // 5 + let header = Header { + number: 1, + parent_hash: block0, + state_root: BlakeTwo256::trie_root(Vec::new()), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + let mut op = backend.begin_operation().unwrap(); + op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); + assert!(matches!(backend.commit_operation(op), Err(sp_blockchain::Error::SetHeadTooOld))); + + // Insert 2 as best again. + let header = Header { + number: 2, + parent_hash: block1, + state_root: BlakeTwo256::trie_root(Vec::new()), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + let mut op = backend.begin_operation().unwrap(); + op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); + backend.commit_operation(op).unwrap(); + assert_eq!(backend.blockchain().info().best_hash, block2); + } + + #[test] + fn test_import_existing_block_as_final() { + let backend: Backend = Backend::new_test(10, 10); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let _block2 = insert_header(&backend, 2, block1, None, Default::default()); + // Genesis is auto finalized, the rest are not. + assert_eq!(backend.blockchain().info().finalized_hash, block0); + + // Insert 1 as final again. + let header = Header { + number: 1, + parent_hash: block0, + state_root: BlakeTwo256::trie_root(Vec::new()), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let mut op = backend.begin_operation().unwrap(); + op.set_block_data(header, None, None, None, NewBlockState::Final).unwrap(); + backend.commit_operation(op).unwrap(); + + assert_eq!(backend.blockchain().info().finalized_hash, block1); + } } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index acfb6217ce9e0..bf2da5c61d058 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,31 +18,31 @@ //! RocksDB-based light client blockchain storage. -use std::{sync::Arc, collections::HashMap}; -use std::convert::TryInto; use parking_lot::RwLock; +use std::{collections::HashMap, convert::TryInto, sync::Arc}; +use crate::{ + cache::{ComplexBlockId, DbCache, DbCacheSync, EntryType as CacheEntryType}, + utils::{self, block_id_to_lookup_key, meta_keys, read_db, read_meta, DatabaseType, Meta}, + DatabaseSettings, DbHash, FrozenForDuration, +}; +use codec::{Decode, Encode}; +use log::{debug, trace, warn}; use sc_client_api::{ - cht, backend::{AuxStore, NewBlockState, ProvideChtRoots}, UsageInfo, - blockchain::{ - BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo, - }, - Storage, + backend::{AuxStore, NewBlockState, ProvideChtRoots}, + blockchain::{BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo}, + cht, Storage, UsageInfo, }; use sp_blockchain::{ - CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache, - Error as ClientError, Result as ClientResult, - HeaderBackend as BlockchainHeaderBackend, - well_known_cache_keys, + well_known_cache_keys, CachedHeaderMetadata, Error as ClientError, + HeaderBackend as BlockchainHeaderBackend, HeaderMetadata, HeaderMetadataCache, + Result as ClientResult, }; use sp_database::{Database, Transaction}; -use codec::{Decode, Encode}; -use sp_runtime::generic::{DigestItem, BlockId}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HashFor}; -use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType}; -use crate::utils::{self, meta_keys, DatabaseType, Meta, read_db, block_id_to_lookup_key, read_meta}; -use crate::{DatabaseSettings, FrozenForDuration, DbHash}; -use log::{trace, warn, debug}; +use sp_runtime::{ + generic::{BlockId, DigestItem}, + traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, Zero}, +}; pub(crate) mod columns { pub const META: u32 = crate::utils::COLUMN_META; @@ -65,8 +65,6 @@ pub struct LightStorage { meta: RwLock, Block::Hash>>, cache: Arc>, header_metadata_cache: Arc>, - - #[cfg(not(target_os = "unknown"))] io_stats: FrozenForDuration, } @@ -102,7 +100,6 @@ impl LightStorage { meta: RwLock::new(meta), cache: Arc::new(DbCacheSync(RwLock::new(cache))), header_metadata_cache, - #[cfg(not(target_os = "unknown"))] io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), }) } @@ -139,8 +136,8 @@ impl LightStorage { } impl BlockchainHeaderBackend for LightStorage - where - Block: BlockT, +where + Block: BlockT, { fn header(&self, id: BlockId) -> ClientResult> { utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) @@ -151,21 +148,22 @@ impl BlockchainHeaderBackend for LightStorage BlockchainInfo { best_hash: meta.best_hash, best_number: meta.best_number, - genesis_hash: meta.genesis_hash, + genesis_hash: meta.genesis_hash.clone(), finalized_hash: meta.finalized_hash, finalized_number: meta.finalized_number, + finalized_state: if meta.finalized_hash != Default::default() { + Some((meta.genesis_hash, Zero::zero())) + } else { + None + }, number_leaves: 1, } } fn status(&self, id: BlockId) -> ClientResult { let exists = match id { - BlockId::Hash(_) => read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - id - )?.is_some(), + BlockId::Hash(_) => + read_db(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?.is_some(), BlockId::Number(n) => n <= self.meta.read().best_number, }; match exists { @@ -175,7 +173,9 @@ impl BlockchainHeaderBackend for LightStorage } fn number(&self, hash: Block::Hash) -> ClientResult>> { - if let Some(lookup_key) = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? { + if let Some(lookup_key) = + block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? + { let number = utils::lookup_key_to_number(&lookup_key)?; Ok(Some(number)) } else { @@ -191,17 +191,25 @@ impl BlockchainHeaderBackend for LightStorage impl HeaderMetadata for LightStorage { type Error = ClientError; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.header_metadata_cache.header_metadata(hash).map_or_else(|| { - self.header(BlockId::hash(hash))?.map(|header| { - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache.insert_header_metadata( - header_metadata.hash, - header_metadata.clone(), - ); - header_metadata - }).ok_or_else(|| ClientError::UnknownBlock(format!("header not found in db: {}", hash))) - }, Ok) + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header_metadata_cache.header_metadata(hash).map_or_else( + || { + self.header(BlockId::hash(hash))? + .map(|header| { + let header_metadata = CachedHeaderMetadata::from(&header); + self.header_metadata_cache + .insert_header_metadata(header_metadata.hash, header_metadata.clone()); + header_metadata + }) + .ok_or_else(|| { + ClientError::UnknownBlock(format!("header not found in db: {}", hash)) + }) + }, + Ok, + ) } fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { @@ -216,10 +224,9 @@ impl HeaderMetadata for LightStorage { impl LightStorage { // Get block changes trie root, if available. fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block) - .map(|header| header.and_then(|header| - header.digest().log(DigestItem::as_changes_trie_root) - .cloned())) + self.header(block).map(|header| { + header.and_then(|header| header.digest().log(DigestItem::as_changes_trie_root).cloned()) + }) } /// Handle setting head within a transaction. `route_to` should be the last @@ -246,14 +253,16 @@ impl LightStorage { for retracted in tree_route.retracted() { if retracted.hash == meta.finalized_hash { // TODO: can we recover here? - warn!("Safety failure: reverting finalized block {:?}", - (&retracted.number, &retracted.hash)); + warn!( + "Safety failure: reverting finalized block {:?}", + (&retracted.number, &retracted.hash) + ); } utils::remove_number_to_key_mapping( transaction, columns::KEY_LOOKUP, - retracted.number + retracted.number, )?; } @@ -262,7 +271,7 @@ impl LightStorage { transaction, columns::KEY_LOOKUP, enacted.number, - enacted.hash + enacted.hash, )?; } } @@ -287,10 +296,11 @@ impl LightStorage { ) -> ClientResult<()> { let meta = self.meta.read(); if &meta.finalized_hash != header.parent_hash() { - return Err(::sp_blockchain::Error::NonSequentialFinalization( - format!("Last finalized {:?} not parent of {:?}", - meta.finalized_hash, hash), - ).into()) + return Err(::sp_blockchain::Error::NonSequentialFinalization(format!( + "Last finalized {:?} not parent of {:?}", + meta.finalized_hash, hash + )) + .into()) } let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash)?; @@ -308,12 +318,14 @@ impl LightStorage { }); let new_header_cht_root = cht::compute_root::, _>( - cht::size(), new_cht_number, cht_range.map(|num| self.hash(num)) + cht::size(), + new_cht_number, + cht_range.map(|num| self.hash(num)), )?; transaction.set( columns::CHT, &cht_key(HEADER_CHT_PREFIX, new_cht_start)?, - new_header_cht_root.as_ref() + new_header_cht_root.as_ref(), ); // if the header includes changes trie root, let's build a changes tries roots CHT @@ -324,14 +336,16 @@ impl LightStorage { current_num = current_num + One::one(); Some(old_current_num) }); - let new_changes_trie_cht_root = cht::compute_root::, _>( - cht::size(), new_cht_number, cht_range - .map(|num| self.changes_trie_root(BlockId::Number(num))) - )?; + let new_changes_trie_cht_root = + cht::compute_root::, _>( + cht::size(), + new_cht_number, + cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), + )?; transaction.set( columns::CHT, &cht_key(CHANGES_TRIE_CHT_PREFIX, new_cht_start)?, - new_changes_trie_cht_root.as_ref() + new_changes_trie_cht_root.as_ref(), ); } @@ -349,7 +363,7 @@ impl LightStorage { transaction, columns::KEY_LOOKUP, prune_block, - hash + hash, )?; transaction.remove(columns::HEADER, &lookup_key); } @@ -365,7 +379,7 @@ impl LightStorage { &self, cht_type: u8, cht_size: NumberFor, - block: NumberFor + block: NumberFor, ) -> ClientResult> { let no_cht_for_block = || ClientError::Backend(format!("Missing CHT for block {}", block)); @@ -378,7 +392,8 @@ impl LightStorage { } let cht_start = cht::start_number(cht_size, cht_number); - self.db.get(columns::CHT, &cht_key(cht_type, cht_start)?) + self.db + .get(columns::CHT, &cht_key(cht_type, cht_start)?) .ok_or_else(no_cht_for_block) .and_then(|hash| Block::Hash::decode(&mut &*hash).map_err(|_| no_cht_for_block())) .map(Some) @@ -386,15 +401,20 @@ impl LightStorage { } impl AuxStore for LightStorage - where Block: BlockT, +where + Block: BlockT, { fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { let mut transaction = Transaction::new(); for (k, v) in insert { transaction.set(columns::AUX, k, v); @@ -413,7 +433,8 @@ impl AuxStore for LightStorage } impl Storage for LightStorage - where Block: BlockT, +where + Block: BlockT, { fn import_header( &self, @@ -442,19 +463,12 @@ impl Storage for LightStorage self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; } - utils::insert_hash_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; + utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; transaction.set_from_vec(columns::HEADER, &lookup_key, header.encode()); let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache.insert_header_metadata( - header.hash().clone(), - header_metadata, - ); + self.header_metadata_cache + .insert_header_metadata(header.hash().clone(), header_metadata); let is_genesis = number.is_zero(); if is_genesis { @@ -469,25 +483,28 @@ impl Storage for LightStorage }; if finalized { - self.note_finalized( - &mut transaction, - &header, - hash, - )?; + self.note_finalized(&mut transaction, &header, hash)?; } // update changes trie configuration cache if !cache_at.contains_key(&well_known_cache_keys::CHANGES_TRIE_CONFIG) { - if let Some(new_configuration) = crate::changes_tries_storage::extract_new_configuration(&header) { - cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); + if let Some(new_configuration) = + crate::changes_tries_storage::extract_new_configuration(&header) + { + cache_at + .insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); } } { let mut cache = self.cache.0.write(); - let cache_ops = cache.transaction(&mut transaction) + let cache_ops = cache + .transaction(&mut transaction) .on_block_insert( - ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), + ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { Zero::zero() } else { number - One::one() }, + ), ComplexBlockId::new(hash, number), cache_at, if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, @@ -497,9 +514,10 @@ impl Storage for LightStorage debug!("Light DB Commit {:?} ({})", hash, number); self.db.commit(transaction)?; - cache.commit(cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there are cache_ops; qed"); + cache.commit(cache_ops).expect( + "only fails if cache with given name isn't loaded yet; cache is already loaded \ + because there are cache_ops; qed", + ); } self.update_meta(hash, number, leaf_state.is_best(), finalized); @@ -513,7 +531,11 @@ impl Storage for LightStorage let number = header.number(); let mut transaction = Transaction::new(); - self.set_head_with_transaction(&mut transaction, hash.clone(), (number.clone(), hash.clone()))?; + self.set_head_with_transaction( + &mut transaction, + hash.clone(), + (number.clone(), hash.clone()), + )?; self.db.commit(transaction)?; self.update_meta(hash, header.number().clone(), true, false); @@ -531,17 +553,22 @@ impl Storage for LightStorage self.note_finalized(&mut transaction, &header, hash.clone())?; { let mut cache = self.cache.0.write(); - let cache_ops = cache.transaction(&mut transaction) + let cache_ops = cache + .transaction(&mut transaction) .on_block_finalize( - ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), - ComplexBlockId::new(hash, number) + ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { Zero::zero() } else { number - One::one() }, + ), + ComplexBlockId::new(hash, number), )? .into_ops(); self.db.commit(transaction)?; - cache.commit(cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there are cache_ops; qed"); + cache.commit(cache_ops).expect( + "only fails if cache with given name isn't loaded yet; cache is already loaded \ + because there are cache_ops; qed", + ); } self.update_meta(hash, header.number().clone(), false, true); @@ -559,9 +586,8 @@ impl Storage for LightStorage Some(self.cache.clone()) } - #[cfg(not(target_os = "unknown"))] fn usage_info(&self) -> Option { - use sc_client_api::{MemoryInfo, IoInfo, MemorySize}; + use sc_client_api::{IoInfo, MemoryInfo, MemorySize}; // TODO: reimplement IO stats let database_cache = MemorySize::from_bytes(0); @@ -586,18 +612,14 @@ impl Storage for LightStorage state_reads_cache: 0, state_writes_cache: 0, state_writes_nodes: 0, - } + }, }) } - - #[cfg(target_os = "unknown")] - fn usage_info(&self) -> Option { - None - } } impl ProvideChtRoots for LightStorage - where Block: BlockT, +where + Block: BlockT, { fn header_cht_root( &self, @@ -625,12 +647,14 @@ fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { #[cfg(test)] pub(crate) mod tests { + use super::*; use sc_client_api::cht; - use sp_core::ChangesTrieConfiguration; - use sp_runtime::generic::{DigestItem, ChangesTrieSignal}; - use sp_runtime::testing::{H256 as Hash, Header, Block as RawBlock, ExtrinsicWrapper}; use sp_blockchain::{lowest_common_ancestor, tree_route}; - use super::*; + use sp_core::ChangesTrieConfiguration; + use sp_runtime::{ + generic::{ChangesTrieSignal, DigestItem}, + testing::{Block as RawBlock, ExtrinsicWrapper, Header, H256 as Hash}, + }; type Block = RawBlock>; type AuthorityId = sp_core::ed25519::Public; @@ -647,7 +671,10 @@ pub(crate) mod tests { fn header_with_changes_trie(parent: &Hash, number: u64) -> Header { let mut header = default_header(parent, number); - header.digest.logs.push(DigestItem::ChangesTrieRoot([(number % 256) as u8; 32].into())); + header + .digest + .logs + .push(DigestItem::ChangesTrieRoot([(number % 256) as u8; 32].into())); header } @@ -693,7 +720,8 @@ pub(crate) mod tests { #[test] fn returns_known_header() { let db = LightStorage::new_test(); - let known_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let known_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); let header_by_hash = db.header(BlockId::Hash(known_hash)).unwrap().unwrap(); let header_by_number = db.header(BlockId::Number(0)).unwrap().unwrap(); assert_eq!(header_by_hash, header_by_number); @@ -709,7 +737,8 @@ pub(crate) mod tests { #[test] fn returns_info() { let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let genesis_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); let info = db.info(); assert_eq!(info.best_hash, genesis_hash); assert_eq!(info.best_number, 0); @@ -724,17 +753,22 @@ pub(crate) mod tests { #[test] fn returns_block_status() { let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let genesis_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!(db.status(BlockId::Hash(genesis_hash)).unwrap(), BlockStatus::InChain); assert_eq!(db.status(BlockId::Number(0)).unwrap(), BlockStatus::InChain); - assert_eq!(db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), BlockStatus::Unknown); + assert_eq!( + db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), + BlockStatus::Unknown + ); assert_eq!(db.status(BlockId::Number(1)).unwrap(), BlockStatus::Unknown); } #[test] fn returns_block_hash() { let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let genesis_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!(db.hash(0).unwrap(), Some(genesis_hash)); assert_eq!(db.hash(1).unwrap(), None); } @@ -744,7 +778,8 @@ pub(crate) mod tests { let raw_db = Arc::new(sp_database::MemDb::default()); let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let genesis_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!(raw_db.count(columns::HEADER), 1); assert_eq!(raw_db.count(columns::KEY_LOOKUP), 2); @@ -755,43 +790,41 @@ pub(crate) mod tests { #[test] fn finalized_ancient_headers_are_replaced_with_cht() { - fn insert_headers Header>(header_producer: F) -> - (Arc>, LightStorage) - { + fn insert_headers Header>( + header_producer: F, + ) -> (Arc, LightStorage) { let raw_db = Arc::new(sp_database::MemDb::default()); let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); let cht_size: u64 = cht::size(); let ucht_size: usize = cht_size as _; // insert genesis block header (never pruned) - let mut prev_hash = insert_final_block(&db, HashMap::new(), || header_producer(&Default::default(), 0)); + let mut prev_hash = + insert_final_block(&db, HashMap::new(), || header_producer(&Default::default(), 0)); // insert SIZE blocks && ensure that nothing is pruned for number in 0..cht::size() { - prev_hash = insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + number)); + prev_hash = + insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + number)); } assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size); assert_eq!(raw_db.count(columns::CHT), 0); // insert next SIZE blocks && ensure that nothing is pruned for number in 0..(cht_size as _) { - prev_hash = insert_block( - &db, - HashMap::new(), - || header_producer(&prev_hash, 1 + cht_size + number), - ); + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + cht_size + number) + }); } assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size + ucht_size); assert_eq!(raw_db.count(columns::CHT), 0); - // insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of this CHT are pruned - // nothing is yet finalized, so nothing is pruned. - prev_hash = insert_block( - &db, - HashMap::new(), - || header_producer(&prev_hash, 1 + cht_size + cht_size), - ); + // insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of + // this CHT are pruned nothing is yet finalized, so nothing is pruned. + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + cht_size + cht_size) + }); assert_eq!(raw_db.count(columns::HEADER), 2 + ucht_size + ucht_size); assert_eq!(raw_db.count(columns::CHT), 0); @@ -834,7 +867,10 @@ pub(crate) mod tests { #[test] fn get_cht_fails_for_non_existent_cht() { let cht_size: u64 = cht::size(); - assert!(LightStorage::::new_test().header_cht_root(cht_size, cht_size / 2).unwrap().is_none()); + assert!(LightStorage::::new_test() + .header_cht_root(cht_size, cht_size / 2) + .unwrap() + .is_none()); } #[test] @@ -842,26 +878,41 @@ pub(crate) mod tests { let db = LightStorage::new_test(); // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = insert_final_block(&db, HashMap::new(), || header_with_changes_trie(&Default::default(), 0)); + let mut prev_hash = insert_final_block(&db, HashMap::new(), || { + header_with_changes_trie(&Default::default(), 0) + }); let cht_size: u64 = cht::size(); let ucht_size: usize = cht_size as _; for i in 1..1 + ucht_size + ucht_size + 1 { - prev_hash = insert_block(&db, HashMap::new(), || header_with_changes_trie(&prev_hash, i as u64)); + prev_hash = insert_block(&db, HashMap::new(), || { + header_with_changes_trie(&prev_hash, i as u64) + }); db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); } - let cht_root_1 = db.header_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); - let cht_root_2 = db.header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2).unwrap().unwrap(); - let cht_root_3 = db.header_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); + let cht_root_1 = + db.header_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); + let cht_root_2 = db + .header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap() + .unwrap(); + let cht_root_3 = + db.header_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); assert_eq!(cht_root_1, cht_root_2); assert_eq!(cht_root_2, cht_root_3); - let cht_root_1 = db.changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); - let cht_root_2 = db.changes_trie_cht_root( - cht_size, - cht::start_number(cht_size, 0) + cht_size / 2, - ).unwrap().unwrap(); - let cht_root_3 = db.changes_trie_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); + let cht_root_1 = db + .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0)) + .unwrap() + .unwrap(); + let cht_root_2 = db + .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap() + .unwrap(); + let cht_root_3 = db + .changes_trie_cht_root(cht_size, cht::end_number(cht_size, 0)) + .unwrap() + .unwrap(); assert_eq!(cht_root_1, cht_root_2); assert_eq!(cht_root_2, cht_root_3); } @@ -877,15 +928,23 @@ pub(crate) mod tests { let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block0, 1, Hash::from([1; 32]))); + let b1 = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) + }); let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); { let tree_route = tree_route(&db, a3, b2).unwrap(); assert_eq!(tree_route.common_block().hash, block0); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); + assert_eq!( + tree_route.retracted().iter().map(|r| r.hash).collect::>(), + vec![a3, a2, a1] + ); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![b1, b2] + ); } { @@ -893,14 +952,20 @@ pub(crate) mod tests { assert_eq!(tree_route.common_block().hash, a1); assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![a2, a3] + ); } { let tree_route = tree_route(&db, a3, a1).unwrap(); assert_eq!(tree_route.common_block().hash, a1); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); + assert_eq!( + tree_route.retracted().iter().map(|r| r.hash).collect::>(), + vec![a3, a2] + ); assert!(tree_route.enacted().is_empty()); } @@ -924,7 +989,9 @@ pub(crate) mod tests { let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block0, 1, Hash::from([1; 32]))); + let b1 = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) + }); let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); { @@ -974,7 +1041,11 @@ pub(crate) mod tests { fn authorities_are_cached() { let db = LightStorage::new_test(); - fn run_checks(db: &LightStorage, max: u64, checks: &[(u64, Option>)]) { + fn run_checks( + db: &LightStorage, + max: u64, + checks: &[(u64, Option>)], + ) { for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) { let actual = authorities(db.cache(), BlockId::Number(*at)); assert_eq!(*expected, actual); @@ -985,14 +1056,21 @@ pub(crate) mod tests { HashMap::new() } - fn make_authorities(authorities: Vec) -> HashMap> { + fn make_authorities( + authorities: Vec, + ) -> HashMap> { let mut map = HashMap::new(); map.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); map } - fn authorities(cache: &dyn BlockchainCache, at: BlockId) -> Option> { - cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).unwrap_or(None) + fn authorities( + cache: &dyn BlockchainCache, + at: BlockId, + ) -> Option> { + cache + .get_at(&well_known_cache_keys::AUTHORITIES, &at) + .unwrap_or(None) .and_then(|(_, _, val)| Decode::decode(&mut &val[..]).ok()) } @@ -1016,17 +1094,27 @@ pub(crate) mod tests { (6, Some(vec![auth1(), auth2()])), ]; - let hash0 = insert_final_block(&db, same_authorities(), || default_header(&Default::default(), 0)); + let hash0 = insert_final_block(&db, same_authorities(), || { + default_header(&Default::default(), 0) + }); run_checks(&db, 0, &checks); let hash1 = insert_final_block(&db, same_authorities(), || default_header(&hash0, 1)); run_checks(&db, 1, &checks); - let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || default_header(&hash1, 2)); + let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash1, 2) + }); run_checks(&db, 2, &checks); - let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || default_header(&hash2, 3)); + let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash2, 3) + }); run_checks(&db, 3, &checks); - let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash3, 4)); + let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash3, 4) + }); run_checks(&db, 4, &checks); - let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash4, 5)); + let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash4, 5) + }); run_checks(&db, 5, &checks); let hash6 = insert_final_block(&db, same_authorities(), || default_header(&hash5, 6)); run_checks(&db, 6, &checks); @@ -1038,9 +1126,14 @@ pub(crate) mod tests { // some older non-best blocks are inserted // ... -> B2(1) -> B2_1(1) -> B2_2(2) // => the cache ignores all writes before best finalized block - let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || default_header(&hash2, 3)); + let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash2, 3) + }); assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_1))); - let hash2_2 = insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash2_1, 4)); + let hash2_2 = + insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash2_1, 4) + }); assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_2))); } @@ -1051,51 +1144,41 @@ pub(crate) mod tests { // \> B6_1_1(5) // \> B6_1_2(6) -> B6_1_3(7) - let hash7 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash6, 7)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash7 = + insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash6, 7)); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - let hash8 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash7, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash8 = + insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash7, 8)); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - let hash6_1 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6, 7)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash6_1 = + insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6, 7)); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || default_header(&hash6_1, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || default_header(&hash6_1, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - let hash6_2 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6_1, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash6_2 = + insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6_1, 8)); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); @@ -1109,10 +1192,7 @@ pub(crate) mod tests { { // finalize block hash6_1 db.finalize_header(BlockId::Hash(hash6_1)).unwrap(); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); @@ -1121,10 +1201,7 @@ pub(crate) mod tests { assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); // finalize block hash6_2 db.finalize_header(BlockId::Hash(hash6_2)).unwrap(); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); @@ -1137,7 +1214,8 @@ pub(crate) mod tests { #[test] fn database_is_reopened() { let db = LightStorage::new_test(); - let hash0 = insert_final_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let hash0 = + insert_final_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!(db.info().best_hash, hash0); assert_eq!(db.header(BlockId::Hash(hash0)).unwrap().unwrap().hash(), hash0); @@ -1152,7 +1230,8 @@ pub(crate) mod tests { let db = LightStorage::::new_test(); // insert aux1 + aux2 using direct store access - db.insert_aux(&[(&[1][..], &[101][..]), (&[2][..], &[102][..])], ::std::iter::empty()).unwrap(); + db.insert_aux(&[(&[1][..], &[101][..]), (&[2][..], &[102][..])], ::std::iter::empty()) + .unwrap(); // check aux values assert_eq!(db.get_aux(&[1]).unwrap(), Some(vec![101])); @@ -1160,10 +1239,13 @@ pub(crate) mod tests { assert_eq!(db.get_aux(&[3]).unwrap(), None); // delete aux1 + insert aux3 using import operation - db.import_header(default_header(&Default::default(), 0), HashMap::new(), NewBlockState::Best, vec![ - (vec![3], Some(vec![103])), - (vec![1], None), - ]).unwrap(); + db.import_header( + default_header(&Default::default(), 0), + HashMap::new(), + NewBlockState::Best, + vec![(vec![3], Some(vec![103])), (vec![1], None)], + ) + .unwrap(); // check aux values assert_eq!(db.get_aux(&[1]).unwrap(), None); @@ -1203,7 +1285,8 @@ pub(crate) mod tests { }; // restart && check that after restart value is read from the cache - let db = LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); + let db = + LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); assert_eq!( db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), Some(((0, genesis_hash.unwrap()), None, vec![42])), @@ -1219,7 +1302,9 @@ pub(crate) mod tests { // insert block#0 && block#1 (no value for cache is provided) let hash0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!( - db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)).unwrap() + db.cache() + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)) + .unwrap() .map(|(_, _, v)| ChangesTrieConfiguration::decode(&mut &v[..]).unwrap()), None, ); @@ -1227,13 +1312,15 @@ pub(crate) mod tests { // insert configuration at block#1 (starts from block#2) insert_block(&db, HashMap::new(), || { let mut header = default_header(&hash0, 1); - header.digest_mut().push( - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_config.clone())) - ); + header.digest_mut().push(DigestItem::ChangesTrieSignal( + ChangesTrieSignal::NewConfiguration(new_config.clone()), + )); header }); assert_eq!( - db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)).unwrap() + db.cache() + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)) + .unwrap() .map(|(_, _, v)| Option::::decode(&mut &v[..]).unwrap()), Some(new_config), ); diff --git a/client/db/src/offchain.rs b/client/db/src/offchain.rs index c4f0ce115ca54..c31273ff07c63 100644 --- a/client/db/src/offchain.rs +++ b/client/db/src/offchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,14 +18,11 @@ //! RocksDB-based offchain workers local storage. -use std::{ - collections::HashMap, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use crate::{columns, Database, DbHash, Transaction}; -use parking_lot::Mutex; use log::error; +use parking_lot::Mutex; /// Offchain local storage #[derive(Clone)] @@ -36,14 +33,13 @@ pub struct LocalStorage { impl std::fmt::Debug for LocalStorage { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("LocalStorage") - .finish() + fmt.debug_struct("LocalStorage").finish() } } impl LocalStorage { /// Create new offchain storage for tests (backed by memorydb) - #[cfg(any(test, feature = "test-helpers"))] + #[cfg(any(feature = "test-helpers", test))] pub fn new_test() -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); @@ -52,18 +48,14 @@ impl LocalStorage { /// Create offchain local storage with given `KeyValueDB` backend. pub fn new(db: Arc>) -> Self { - Self { - db, - locks: Default::default(), - } + Self { db, locks: Default::default() } } } impl sp_core::offchain::OffchainStorage for LocalStorage { fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - let key: Vec = prefix.iter().chain(key).cloned().collect(); let mut tx = Transaction::new(); - tx.set(columns::OFFCHAIN, &key, value); + tx.set(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key), value); if let Err(err) = self.db.commit(tx) { error!("Error setting on local storage: {}", err) @@ -71,9 +63,8 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { } fn remove(&mut self, prefix: &[u8], key: &[u8]) { - let key: Vec = prefix.iter().chain(key).cloned().collect(); let mut tx = Transaction::new(); - tx.remove(columns::OFFCHAIN, &key); + tx.remove(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key)); if let Err(err) = self.db.commit(tx) { error!("Error removing on local storage: {}", err) @@ -81,8 +72,7 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { } fn get(&self, prefix: &[u8], key: &[u8]) -> Option> { - let key: Vec = prefix.iter().chain(key).cloned().collect(); - self.db.get(columns::OFFCHAIN, &key) + self.db.get(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key)) } fn compare_and_set( @@ -92,7 +82,7 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { old_value: Option<&[u8]>, new_value: &[u8], ) -> bool { - let key: Vec = prefix.iter().chain(item_key).cloned().collect(); + let key = concatenate_prefix_and_key(prefix, item_key); let key_lock = { let mut locks = self.locks.lock(); locks.entry(key.clone()).or_default().clone() @@ -122,6 +112,11 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { } } +/// Concatenate the prefix and key to create an offchain key in the db. +pub(crate) fn concatenate_prefix_and_key(prefix: &[u8], key: &[u8]) -> Vec { + prefix.iter().chain(key.into_iter()).cloned().collect() +} + #[cfg(test)] mod tests { use super::*; @@ -152,5 +147,4 @@ mod tests { assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec())); assert!(storage.locks.lock().is_empty(), "Locks map should be empty!"); } - } diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index 313069706f33f..1b645ca9fb2b9 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -15,47 +15,74 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::{ + columns, light, + utils::{DatabaseType, NUM_COLUMNS}, +}; /// A `Database` adapter for parity-db. - -use sp_database::{Database, Change, ColumnId, Transaction, error::DatabaseError}; -use crate::utils::{DatabaseType, NUM_COLUMNS}; -use crate::columns; +use sp_database::{error::DatabaseError, Change, ColumnId, Database, Transaction}; struct DbAdapter(parity_db::Db); fn handle_err(result: parity_db::Result) -> T { match result { Ok(r) => r, - Err(e) => { + Err(e) => { panic!("Critical database error: {:?}", e); - } + }, } } /// Wrap parity-db database into a trait object that implements `sp_database::Database` -pub fn open(path: &std::path::Path, db_type: DatabaseType) - -> parity_db::Result>> -{ +pub fn open>( + path: &std::path::Path, + db_type: DatabaseType, + create: bool, +) -> parity_db::Result>> { let mut config = parity_db::Options::with_columns(path, NUM_COLUMNS as u8); - if db_type == DatabaseType::Full { - let mut state_col = &mut config.columns[columns::STATE as usize]; - state_col.ref_counted = true; - state_col.preimage = true; - state_col.uniform = true; + + match db_type { + DatabaseType::Full => { + let indexes = [ + columns::STATE, + columns::HEADER, + columns::BODY, + columns::TRANSACTION, + columns::JUSTIFICATIONS, + ]; + + for i in indexes { + let mut column = &mut config.columns[i as usize]; + column.compression = parity_db::CompressionType::Lz4; + } + + let mut state_col = &mut config.columns[columns::STATE as usize]; + state_col.ref_counted = true; + state_col.preimage = true; + state_col.uniform = true; + }, + DatabaseType::Light => { + config.columns[light::columns::HEADER as usize].compression = + parity_db::CompressionType::Lz4; + }, } - let db = parity_db::Db::open(&config)?; + + let db = if create { + parity_db::Db::open_or_create(&config)? + } else { + parity_db::Db::open(&config)? + }; + Ok(std::sync::Arc::new(DbAdapter(db))) } -impl Database for DbAdapter { +impl> Database for DbAdapter { fn commit(&self, transaction: Transaction) -> Result<(), DatabaseError> { - handle_err(self.0.commit(transaction.0.into_iter().map(|change| - match change { - Change::Set(col, key, value) => (col as u8, key, Some(value)), - Change::Remove(col, key) => (col as u8, key, None), - _ => unimplemented!(), - })) - ); + handle_err(self.0.commit(transaction.0.into_iter().map(|change| match change { + Change::Set(col, key, value) => (col as u8, key, Some(value)), + Change::Remove(col, key) => (col as u8, key, None), + _ => unimplemented!(), + }))); Ok(()) } @@ -64,7 +91,15 @@ impl Database for DbAdapter { handle_err(self.0.get(col as u8, key)) } - fn lookup(&self, _hash: &H) -> Option> { - unimplemented!(); + fn contains(&self, col: ColumnId, key: &[u8]) -> bool { + handle_err(self.0.get_size(col as u8, key)).is_some() + } + + fn value_size(&self, col: ColumnId, key: &[u8]) -> Option { + handle_err(self.0.get_size(col as u8, key)).map(|s| s as usize) + } + + fn supports_ref_counting(&self) -> bool { + true } } diff --git a/client/db/src/stats.rs b/client/db/src/stats.rs index 8d208024b4bb2..9223142ef5aba 100644 --- a/client/db/src/stats.rs +++ b/client/db/src/stats.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -65,7 +65,10 @@ impl StateUsageStats { /// Tally one key read. pub fn tally_key_read(&self, key: &[u8], val: Option<&Vec>, cache: bool) { - self.tally_read(key.len() as u64 + val.as_ref().map(|x| x.len() as u64).unwrap_or(0), cache); + self.tally_read( + key.len() as u64 + val.as_ref().map(|x| x.len() as u64).unwrap_or(0), + cache, + ); } /// Tally one child key read. @@ -103,9 +106,11 @@ impl StateUsageStats { self.reads.fetch_add(info.reads.ops, AtomicOrdering::Relaxed); self.bytes_read.fetch_add(info.reads.bytes, AtomicOrdering::Relaxed); self.writes_nodes.fetch_add(info.nodes_writes.ops, AtomicOrdering::Relaxed); - self.bytes_written_nodes.fetch_add(info.nodes_writes.bytes, AtomicOrdering::Relaxed); + self.bytes_written_nodes + .fetch_add(info.nodes_writes.bytes, AtomicOrdering::Relaxed); self.removed_nodes.fetch_add(info.removed_nodes.ops, AtomicOrdering::Relaxed); - self.bytes_removed_nodes.fetch_add(info.removed_nodes.bytes, AtomicOrdering::Relaxed); + self.bytes_removed_nodes + .fetch_add(info.removed_nodes.bytes, AtomicOrdering::Relaxed); self.reads_cache.fetch_add(info.cache_reads.ops, AtomicOrdering::Relaxed); self.bytes_read_cache.fetch_add(info.cache_reads.bytes, AtomicOrdering::Relaxed); } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 0b4b6d4f88ef5..a895324a2e7b9 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -1,36 +1,41 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . -//! Global cache state. +//! Global state cache. Maintains recently queried/committed state values +//! Tracks changes over the span of a few recent blocks and handles forks +//! by tracking/removing cache entries for conflicting changes. -use std::collections::{VecDeque, HashSet, HashMap}; -use std::sync::Arc; -use std::hash::Hash as StdHash; -use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; -use linked_hash_map::{LinkedHashMap, Entry}; +use crate::{stats::StateUsageStats, utils::Meta}; use hash_db::Hasher; -use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; -use sp_core::hexdisplay::HexDisplay; -use sp_core::storage::ChildInfo; +use linked_hash_map::{Entry, LinkedHashMap}; +use log::trace; +use parking_lot::{RwLock, RwLockUpgradableReadGuard}; +use sp_core::{hexdisplay::HexDisplay, storage::ChildInfo}; +use sp_runtime::traits::{Block as BlockT, HashFor, Header, NumberFor}; use sp_state_machine::{ - backend::Backend as StateBackend, TrieBackend, StorageKey, StorageValue, - StorageCollection, ChildStorageCollection, + backend::Backend as StateBackend, ChildStorageCollection, StorageCollection, StorageKey, + StorageValue, TrieBackend, +}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + hash::Hash as StdHash, + sync::Arc, }; -use log::trace; -use crate::{utils::Meta, stats::StateUsageStats}; const STATE_CACHE_BLOCKS: usize = 12; @@ -71,7 +76,7 @@ impl EstimateSize for Vec { impl EstimateSize for Option> { fn estimate_size(&self) -> usize { - self.as_ref().map(|v|v.capacity()).unwrap_or(0) + self.as_ref().map(|v| v.capacity()).unwrap_or(0) } } @@ -80,7 +85,7 @@ struct OptionHOut>(Option); impl> EstimateSize for OptionHOut { fn estimate_size(&self) -> usize { // capacity would be better - self.0.as_ref().map(|v|v.as_ref().len()).unwrap_or(0) + self.0.as_ref().map(|v| v.as_ref().len()).unwrap_or(0) } } @@ -121,20 +126,22 @@ impl LRUMap { }; while *storage_used_size > limit { - if let Some((k,v)) = lmap.pop_front() { + if let Some((k, v)) = lmap.pop_front() { *storage_used_size -= k.estimate_size(); *storage_used_size -= v.estimate_size(); } else { // can happen fairly often as we get value from multiple lru // and only remove from a single lru - break; + break } } } - fn get(&mut self, k: &Q) -> Option<&mut V> - where K: std::borrow::Borrow, - Q: StdHash + Eq { + fn get(&mut self, k: &Q) -> Option<&mut V> + where + K: std::borrow::Borrow, + Q: StdHash + Eq, + { self.0.get_refresh(k) } @@ -145,15 +152,13 @@ impl LRUMap { self.0.clear(); self.1 = 0; } - } impl Cache { /// Returns the used memory size of the storage cache in bytes. pub fn used_storage_cache_size(&self) -> usize { - self.lru_storage.used_size() - + self.lru_child_storage.used_size() - // ignore small hashes storage and self.lru_hashes.used_size() + self.lru_storage.used_size() + self.lru_child_storage.used_size() + // ignore small hashes storage and self.lru_hashes.used_size() } /// Synchronize the shared cache with the best block state. @@ -174,6 +179,7 @@ impl Cache { for a in &m.storage { trace!("Reverting enacted key {:?}", HexDisplay::from(a)); self.lru_storage.remove(a); + self.lru_hashes.remove(a); } for a in &m.child_storage { trace!("Reverting enacted child key {:?}", a); @@ -194,6 +200,7 @@ impl Cache { for a in &m.storage { trace!("Retracted key {:?}", HexDisplay::from(a)); self.lru_storage.remove(a); + self.lru_hashes.remove(a); } for a in &m.child_storage { trace!("Retracted child key {:?}", a); @@ -216,7 +223,7 @@ impl Cache { } } -pub type SharedCache = Arc>>; +pub type SharedCache = Arc>>; /// Fix lru storage size for hash (small 64ko). const FIX_LRU_HASH_SIZE: usize = 65_536; @@ -227,20 +234,16 @@ pub fn new_shared_cache( child_ratio: (usize, usize), ) -> SharedCache { let top = child_ratio.1.saturating_sub(child_ratio.0); - Arc::new( - Mutex::new( - Cache { - lru_storage: LRUMap( - LinkedHashMap::new(), 0, shared_cache_size * top / child_ratio.1 - ), - lru_hashes: LRUMap(LinkedHashMap::new(), 0, FIX_LRU_HASH_SIZE), - lru_child_storage: LRUMap( - LinkedHashMap::new(), 0, shared_cache_size * child_ratio.0 / child_ratio.1 - ), - modifications: VecDeque::new(), - } - ) - ) + Arc::new(RwLock::new(Cache { + lru_storage: LRUMap(LinkedHashMap::new(), 0, shared_cache_size * top / child_ratio.1), + lru_hashes: LRUMap(LinkedHashMap::new(), 0, FIX_LRU_HASH_SIZE), + lru_child_storage: LRUMap( + LinkedHashMap::new(), + 0, + shared_cache_size * child_ratio.0 / child_ratio.1, + ), + modifications: VecDeque::new(), + })) } #[derive(Debug)] @@ -331,7 +334,7 @@ impl CacheChanges { commit_number: Option>, is_best: bool, ) { - let mut cache = self.shared_cache.lock(); + let mut cache = self.shared_cache.write(); trace!( "Syncing cache, id = (#{:?}, {:?}), parent={:?}, best={}", commit_number, @@ -341,15 +344,30 @@ impl CacheChanges { ); let cache = &mut *cache; // Filter out committing block if any. - let enacted: Vec<_> = enacted + let mut enacted: Vec<_> = enacted .iter() .filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p)) .cloned() .collect(); - cache.sync(&enacted, retracted); + + let mut retracted = std::borrow::Cow::Borrowed(retracted); + if let Some(commit_hash) = &commit_hash { + if let Some(m) = cache.modifications.iter_mut().find(|m| &m.hash == commit_hash) { + if m.is_canon != is_best { + // Same block comitted twice with different state changes. + // Treat it as reenacted/retracted. + if is_best { + enacted.push(commit_hash.clone()); + } else { + retracted.to_mut().push(commit_hash.clone()); + } + } + } + } + cache.sync(&enacted, &retracted); // Propagate cache only if committing on top of the latest canonical state - // blocks are ordered by number and only one block with a given number is marked as canonical - // (contributed to canonical state cache) + // blocks are ordered by number and only one block with a given number is marked as + // canonical (contributed to canonical state cache) if let Some(_) = self.parent_hash { let mut local_cache = self.local_cache.write(); if is_best { @@ -372,16 +390,15 @@ impl CacheChanges { } } - if let ( - Some(ref number), Some(ref hash), Some(ref parent)) - = (commit_number, commit_hash, self.parent_hash) + if let (Some(ref number), Some(ref hash), Some(ref parent)) = + (commit_number, commit_hash, self.parent_hash) { if cache.modifications.len() == STATE_CACHE_BLOCKS { cache.modifications.pop_back(); } let mut modifications = HashSet::new(); let mut child_modifications = HashSet::new(); - child_changes.into_iter().for_each(|(sk, changes)| + child_changes.into_iter().for_each(|(sk, changes)| { for (k, v) in changes.into_iter() { let k = (sk.clone(), k); if is_best { @@ -389,7 +406,7 @@ impl CacheChanges { } child_modifications.insert(k); } - ); + }); for (k, v) in changes.into_iter() { if is_best { cache.lru_hashes.remove(&k); @@ -407,7 +424,9 @@ impl CacheChanges { is_canon: is_best, parent: parent.clone(), }; - let insert_at = cache.modifications.iter() + let insert_at = cache + .modifications + .iter() .enumerate() .find(|(_, m)| m.number < *number) .map(|(i, _)| i); @@ -444,19 +463,22 @@ impl>, B: BlockT> CachingState { } } - /// Check if the key can be returned from cache by matching current block parent hash against canonical - /// state and filtering out entries modified in later blocks. + /// Check if the key can be returned from cache by matching current block parent hash against + /// canonical state and filtering out entries modified in later blocks. fn is_allowed( key: Option<&[u8]>, child_key: Option<&ChildStorageKey>, parent_hash: &Option, - modifications: &VecDeque> + modifications: &VecDeque>, ) -> bool { let mut parent = match *parent_hash { None => { - trace!("Cache lookup skipped for {:?}: no parent hash", key.as_ref().map(HexDisplay::from)); - return false; - } + trace!( + "Cache lookup skipped for {:?}: no parent hash", + key.as_ref().map(HexDisplay::from) + ); + return false + }, Some(ref parent) => parent, }; // Ignore all storage entries modified in later blocks. @@ -467,20 +489,23 @@ impl>, B: BlockT> CachingState { for m in modifications { if &m.hash == parent { if m.is_canon { - return true; + return true } parent = &m.parent; } if let Some(key) = key { if m.storage.contains(key) { - trace!("Cache lookup skipped for {:?}: modified in a later block", HexDisplay::from(&key)); - return false; + trace!( + "Cache lookup skipped for {:?}: modified in a later block", + HexDisplay::from(&key) + ); + return false } } if let Some(child_key) = child_key { if m.child_storage.contains(child_key) { trace!("Cache lookup skipped for {:?}: modified in a later block", child_key); - return false; + return false } } } @@ -506,17 +531,22 @@ impl>, B: BlockT> StateBackend> for Cachin return Ok(entry) } - let mut cache = self.cache.shared_cache.lock(); - if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) { - if let Some(entry) = cache.lru_storage.get(key).map(|a| a.clone()) { - trace!("Found in shared cache: {:?}", HexDisplay::from(&key)); - self.usage.tally_key_read(key, entry.as_ref(), true); - return Ok(entry) + { + let cache = self.cache.shared_cache.upgradable_read(); + if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) { + let mut cache = RwLockUpgradableReadGuard::upgrade(cache); + if let Some(entry) = cache.lru_storage.get(key).map(|a| a.clone()) { + trace!("Found in shared cache: {:?}", HexDisplay::from(&key)); + self.usage.tally_key_read(key, entry.as_ref(), true); + return Ok(entry) + } } } trace!("Cache miss: {:?}", HexDisplay::from(&key)); let value = self.state.storage(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache).storage.insert(key.to_vec(), value.clone()); + RwLockUpgradableReadGuard::upgrade(local_cache) + .storage + .insert(key.to_vec(), value.clone()); self.usage.tally_key_read(key, value.as_ref(), false); Ok(value) } @@ -527,16 +557,21 @@ impl>, B: BlockT> StateBackend> for Cachin trace!("Found hash in local cache: {:?}", HexDisplay::from(&key)); return Ok(entry) } - let mut cache = self.cache.shared_cache.lock(); - if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) { - if let Some(entry) = cache.lru_hashes.get(key).map(|a| a.0.clone()) { - trace!("Found hash in shared cache: {:?}", HexDisplay::from(&key)); - return Ok(entry) + { + let cache = self.cache.shared_cache.upgradable_read(); + if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) { + let mut cache = RwLockUpgradableReadGuard::upgrade(cache); + if let Some(entry) = cache.lru_hashes.get(key).map(|a| a.0.clone()) { + trace!("Found hash in shared cache: {:?}", HexDisplay::from(&key)); + return Ok(entry) + } } } trace!("Cache hash miss: {:?}", HexDisplay::from(&key)); let hash = self.state.storage_hash(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache).hashes.insert(key.to_vec(), hash); + RwLockUpgradableReadGuard::upgrade(local_cache) + .hashes + .insert(key.to_vec(), hash); Ok(hash) } @@ -549,26 +584,27 @@ impl>, B: BlockT> StateBackend> for Cachin let local_cache = self.cache.local_cache.upgradable_read(); if let Some(entry) = local_cache.child_storage.get(&key).cloned() { trace!("Found in local cache: {:?}", key); - return Ok( - self.usage.tally_child_key_read(&key, entry, true) - ) + return Ok(self.usage.tally_child_key_read(&key, entry, true)) } - let mut cache = self.cache.shared_cache.lock(); - if Self::is_allowed(None, Some(&key), &self.cache.parent_hash, &cache.modifications) { - if let Some(entry) = cache.lru_child_storage.get(&key).map(|a| a.clone()) { - trace!("Found in shared cache: {:?}", key); - return Ok( - self.usage.tally_child_key_read(&key, entry, true) - ) + { + let cache = self.cache.shared_cache.upgradable_read(); + if Self::is_allowed(None, Some(&key), &self.cache.parent_hash, &cache.modifications) { + let mut cache = RwLockUpgradableReadGuard::upgrade(cache); + if let Some(entry) = cache.lru_child_storage.get(&key).map(|a| a.clone()) { + trace!("Found in shared cache: {:?}", key); + return Ok(self.usage.tally_child_key_read(&key, entry, true)) + } } } trace!("Cache miss: {:?}", key); let value = self.state.child_storage(child_info, &key.1[..])?; // just pass it through the usage counter - let value = self.usage.tally_child_key_read(&key, value, false); + let value = self.usage.tally_child_key_read(&key, value, false); - RwLockUpgradableReadGuard::upgrade(local_cache).child_storage.insert(key, value.clone()); + RwLockUpgradableReadGuard::upgrade(local_cache) + .child_storage + .insert(key, value.clone()); Ok(value) } @@ -584,12 +620,25 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.exists_child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_key_values_while, Vec) -> bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.state + .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.state.for_keys_in_child_storage(child_info, f) + self.state.apply_to_keys_while(child_info, prefix, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -623,16 +672,22 @@ impl>, B: BlockT> StateBackend> for Cachin fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { self.state.storage_root(delta) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { self.state.child_storage_root(child_info, delta) } @@ -644,19 +699,15 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.keys(prefix) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { self.state.child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { + fn as_trie_backend(&self) -> Option<&TrieBackend>> { self.state.as_trie_backend() } - fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { + fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { self.overlay_stats.add(stats); } @@ -697,13 +748,7 @@ impl SyncingCachingState { meta: Arc, B::Hash>>>, lock: Arc>, ) -> Self { - Self { - caching_state: Some(caching_state), - state_usage, - meta, - lock, - disable_syncing: false, - } + Self { caching_state: Some(caching_state), state_usage, meta, lock, disable_syncing: false } } /// Returns the reference to the internal [`CachingState`]. @@ -733,7 +778,9 @@ impl std::fmt::Debug for SyncingCachingState { } } -impl>, B: BlockT> StateBackend> for SyncingCachingState { +impl>, B: BlockT> StateBackend> + for SyncingCachingState +{ type Error = S::Error; type Transaction = S::Transaction; type TrieBackendStorage = S::TrieBackendStorage; @@ -766,12 +813,30 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().exists_child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_key_values_while, Vec) -> bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.caching_state().apply_to_key_values_while( + child_info, + prefix, + start_at, + f, + allow_missing, + ) + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.caching_state().for_keys_in_child_storage(child_info, f) + self.caching_state().apply_to_keys_while(child_info, prefix, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -805,16 +870,22 @@ impl>, B: BlockT> StateBackend> for Syncin fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { self.caching_state().storage_root(delta) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { self.caching_state().child_storage_root(child_info, delta) } @@ -826,22 +897,18 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().keys(prefix) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { self.caching_state().child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { + fn as_trie_backend(&self) -> Option<&TrieBackend>> { self.caching_state - .as_mut() + .as_ref() .expect("`caching_state` is valid for the lifetime of the object; qed") .as_trie_backend() } - fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { + fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { self.caching_state().register_overlay_stats(stats); } @@ -853,7 +920,7 @@ impl>, B: BlockT> StateBackend> for Syncin impl Drop for SyncingCachingState { fn drop(&mut self) { if self.disable_syncing { - return; + return } if let Some(mut caching_state) = self.caching_state.take() { @@ -872,8 +939,8 @@ impl Drop for SyncingCachingState { mod tests { use super::*; use sp_runtime::{ + testing::{Block as RawBlock, ExtrinsicWrapper, H256}, traits::BlakeTwo256, - testing::{H256, Block as RawBlock, ExtrinsicWrapper}, }; use sp_state_machine::InMemoryBackend; @@ -881,7 +948,7 @@ mod tests { #[test] fn smoke() { - //init_log(); + // init_log(); let root_parent = H256::random(); let key = H256::random()[..].to_vec(); let h0 = H256::random(); @@ -911,18 +978,12 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h0), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h0), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); s.cache.sync_cache( &[], &[], @@ -933,11 +994,8 @@ mod tests { false, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1b), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1b)); s.cache.sync_cache( &[], &[], @@ -948,11 +1006,8 @@ mod tests { false, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1a), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1a)); s.cache.sync_cache( &[], &[], @@ -963,48 +1018,30 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2a), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h3a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1a)); assert!(s.storage(&key).unwrap().is_none()); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); assert!(s.storage(&key).unwrap().is_none()); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1b), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1b)); assert!(s.storage(&key).unwrap().is_none()); // reorg to 3b // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); s.cache.sync_cache( &[h1b, h2b, h3b], &[h1a, h2a, h3a], @@ -1014,11 +1051,8 @@ mod tests { Some(3), true, ); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h3a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); assert!(s.storage(&key).unwrap().is_none()); } @@ -1033,7 +1067,7 @@ mod tests { let h2b = H256::random(); let h3b = H256::random(); - let shared = new_shared_cache::(256*1024, (0,1)); + let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut s = CachingState::new( InMemoryBackend::::default(), @@ -1050,18 +1084,12 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); s.cache.sync_cache( &[], &[], @@ -1072,11 +1100,8 @@ mod tests { false, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); s.cache.sync_cache( &[], &[], @@ -1087,11 +1112,8 @@ mod tests { false, ); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); } @@ -1105,7 +1127,7 @@ mod tests { let h3a = H256::random(); let h3b = H256::random(); - let shared = new_shared_cache::(256*1024, (0,1)); + let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut s = CachingState::new( InMemoryBackend::::default(), @@ -1114,18 +1136,12 @@ mod tests { ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2a), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); s.cache.sync_cache( &[], &[], @@ -1136,18 +1152,12 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); s.cache.sync_cache( &[], &[], @@ -1158,22 +1168,50 @@ mod tests { false, ); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h3a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); } + #[test] + fn reverts_storage_hash() { + let root_parent = H256::random(); + let key = H256::random()[..].to_vec(); + let h1a = H256::random(); + let h1b = H256::random(); + + let shared = new_shared_cache::(256 * 1024, (0, 1)); + let mut backend = InMemoryBackend::::default(); + backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))]))); + + let mut s = CachingState::new(backend.clone(), shared.clone(), Some(root_parent)); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![2]))], + vec![], + Some(h1a), + Some(1), + true, + ); + + let mut s = CachingState::new(backend.clone(), shared.clone(), Some(root_parent)); + s.cache.sync_cache(&[], &[h1a], vec![], vec![], Some(h1b), Some(1), true); + + let s = CachingState::new(backend.clone(), shared.clone(), Some(h1b)); + assert_eq!(s.storage_hash(&key).unwrap().unwrap(), BlakeTwo256::hash(&vec![1])); + } + #[test] fn should_track_used_size_correctly() { let root_parent = H256::random(); - let shared = new_shared_cache::(109, ((109-36), 109)); + let shared = new_shared_cache::(109, ((109 - 36), 109)); let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), shared.clone(), Some(root_parent.clone()), + InMemoryBackend::::default(), + shared.clone(), + Some(root_parent.clone()), ); let key = H256::random()[..].to_vec(); @@ -1188,7 +1226,7 @@ mod tests { true, ); // 32 key, 3 byte size - assert_eq!(shared.lock().used_storage_cache_size(), 35 /* bytes */); + assert_eq!(shared.read().used_storage_cache_size(), 35 /* bytes */); let key = H256::random()[..].to_vec(); s.cache.sync_cache( @@ -1201,13 +1239,13 @@ mod tests { true, ); // 35 + (2 * 32) key, 2 byte size - assert_eq!(shared.lock().used_storage_cache_size(), 101 /* bytes */); + assert_eq!(shared.read().used_storage_cache_size(), 101 /* bytes */); } #[test] fn should_remove_lru_items_based_on_tracking_used_size() { let root_parent = H256::random(); - let shared = new_shared_cache::(36*3, (2,3)); + let shared = new_shared_cache::(36 * 3, (2, 3)); let h0 = H256::random(); let mut s = CachingState::new( @@ -1227,7 +1265,7 @@ mod tests { true, ); // 32 key, 4 byte size - assert_eq!(shared.lock().used_storage_cache_size(), 36 /* bytes */); + assert_eq!(shared.read().used_storage_cache_size(), 36 /* bytes */); let key = H256::random()[..].to_vec(); s.cache.sync_cache( @@ -1240,7 +1278,7 @@ mod tests { true, ); // 32 key, 2 byte size - assert_eq!(shared.lock().used_storage_cache_size(), 34 /* bytes */); + assert_eq!(shared.read().used_storage_cache_size(), 34 /* bytes */); } #[test] @@ -1269,11 +1307,8 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h0), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); s.cache.sync_cache( &[], &[], @@ -1284,16 +1319,13 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); assert_eq!(s.storage(&key).unwrap(), Some(vec![3])); // Restart (or unknown block?), clear caches. { - let mut cache = s.cache.shared_cache.lock(); + let mut cache = s.cache.shared_cache.write(); let cache = &mut *cache; cache.lru_storage.clear(); cache.lru_hashes.clear(); @@ -1307,25 +1339,73 @@ mod tests { // New value is propagated. s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true); - let s = CachingState::new( + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + assert_eq!(s.storage(&key).unwrap(), None); + } + + #[test] + fn same_block_no_changes() { + sp_tracing::try_init_simple(); + + let root_parent = H256::random(); + let key = H256::random()[..].to_vec(); + let h1 = H256::random(); + let h2 = H256::random(); + + let shared = new_shared_cache::(256 * 1024, (0, 1)); + + let mut s = CachingState::new( InMemoryBackend::::default(), shared.clone(), + Some(root_parent), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![1]))], + vec![], Some(h1), + Some(1), + true, + ); + assert_eq!(shared.write().lru_storage.get(&key).unwrap(), &Some(vec![1])); + + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + + // commit as non-best + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![2]))], + vec![], + Some(h2), + Some(2), + false, ); + + assert_eq!(shared.write().lru_storage.get(&key).unwrap(), &Some(vec![1])); + + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); + + // commit again as best with no changes + s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2), Some(2), true); assert_eq!(s.storage(&key).unwrap(), None); } } #[cfg(test)] mod qc { - use std::collections::{HashMap, hash_map::Entry}; + use std::collections::{hash_map::Entry, HashMap}; - use quickcheck::{quickcheck, TestResult, Arbitrary}; + use quickcheck::{quickcheck, Arbitrary, TestResult}; use super::*; use sp_runtime::{ + testing::{Block as RawBlock, ExtrinsicWrapper, H256}, traits::BlakeTwo256, - testing::{H256, Block as RawBlock, ExtrinsicWrapper}, }; use sp_state_machine::InMemoryBackend; @@ -1347,28 +1427,24 @@ mod qc { fn new_next(&self, hash: H256, changes: KeySet) -> Self { let mut state = self.state.clone(); - for (k, v) in self.state.iter() { state.insert(k.clone(), v.clone()); } - for (k, v) in changes.clone().into_iter() { state.insert(k, v); } - - Self { - hash, - parent: self.hash, - changes, - state, + for (k, v) in self.state.iter() { + state.insert(k.clone(), v.clone()); + } + for (k, v) in changes.clone().into_iter() { + state.insert(k, v); } + + Self { hash, parent: self.hash, changes, state } } fn new(hash: H256, parent: H256, changes: KeySet) -> Self { let mut state = KeyMap::new(); - for (k, v) in changes.clone().into_iter() { state.insert(k, v); } - - Self { - hash, - parent, - state, - changes, + for (k, v) in changes.clone().into_iter() { + state.insert(k, v); } + + Self { hash, parent, state, changes } } fn purge(&mut self, other_changes: &KeySet) { @@ -1387,50 +1463,42 @@ mod qc { } impl Arbitrary for Action { - fn arbitrary(gen: &mut G) -> Self { - let path = gen.next_u32() as u8; - let mut buf = [0u8; 32]; + fn arbitrary(gen: &mut quickcheck::Gen) -> Self { + let path = u8::arbitrary(gen); + let buf = (0..32).map(|_| u8::arbitrary(gen)).collect::>(); match path { - 0..=175 => { - gen.fill_bytes(&mut buf[..]); - Action::Next { - hash: H256::from(&buf), - changes: { - let mut set = Vec::new(); - for _ in 0..gen.next_u32()/(64*256*256*256) { - set.push((vec![gen.next_u32() as u8], Some(vec![gen.next_u32() as u8]))); - } - set + 0..=175 => Action::Next { + hash: H256::from_slice(&buf[..]), + changes: { + let mut set = Vec::new(); + for _ in 0..::arbitrary(gen) / (64 * 256 * 256 * 256) { + set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); } - } + set + }, }, - 176..=220 => { - gen.fill_bytes(&mut buf[..]); - Action::Fork { - hash: H256::from(&buf), - depth: ((gen.next_u32() as u8) / 32) as usize, - changes: { - let mut set = Vec::new(); - for _ in 0..gen.next_u32()/(64*256*256*256) { - set.push((vec![gen.next_u32() as u8], Some(vec![gen.next_u32() as u8]))); - } - set + 176..=220 => Action::Fork { + hash: H256::from_slice(&buf[..]), + depth: ((u8::arbitrary(gen)) / 32) as usize, + changes: { + let mut set = Vec::new(); + for _ in 0..::arbitrary(gen) / (64 * 256 * 256 * 256) { + set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); } - } + set + }, }, 221..=240 => { - gen.fill_bytes(&mut buf[..]); Action::ReorgWithImport { - hash: H256::from(&buf), - depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7 + hash: H256::from_slice(&buf[..]), + depth: ((u8::arbitrary(gen)) / 32) as usize, // 0-7 } }, _ => { - gen.fill_bytes(&mut buf[..]); Action::FinalizationReorg { - fork_depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7 - depth: ((gen.next_u32() as u8) / 64) as usize, // 0-3 + fork_depth: ((u8::arbitrary(gen)) / 32) as usize, // 0-7 + depth: ((u8::arbitrary(gen)) / 64) as usize, // 0-3 } }, } @@ -1445,13 +1513,9 @@ mod qc { impl Mutator { fn new_empty() -> Self { - let shared = new_shared_cache::(256*1024, (0,1)); + let shared = new_shared_cache::(256 * 1024, (0, 1)); - Self { - shared, - canon: vec![], - forks: HashMap::new(), - } + Self { shared, canon: vec![], forks: HashMap::new() } } fn head_state(&self, hash: H256) -> CachingState, Block> { @@ -1470,11 +1534,12 @@ mod qc { &mut self, action: Action, ) -> CachingState, Block> { - self.mutate(action).expect("Expected to provide only valid actions to the mutate_static") + self.mutate(action) + .expect("Expected to provide only valid actions to the mutate_static") } fn canon_len(&self) -> usize { - return self.canon.len(); + return self.canon.len() } fn head_storage_ref(&self) -> &KeyMap { @@ -1492,10 +1557,10 @@ mod qc { let state = match action { Action::Fork { depth, hash, changes } => { let pos = self.canon.len() as isize - depth as isize; - if pos < 0 || self.canon.len() == 0 || pos >= (self.canon.len()-1) as isize + if pos < 0 || self.canon.len() == 0 || pos >= (self.canon.len() - 1) as isize // no fork on top also, thus len-1 { - return Err(()); + return Err(()) } let pos = pos as usize; @@ -1505,7 +1570,8 @@ mod qc { let (total_h, parent) = match self.forks.entry(fork_at) { Entry::Occupied(occupied) => { let chain = occupied.into_mut(); - let parent = chain.last().expect("No empty forks are ever created").clone(); + let parent = + chain.last().expect("No empty forks are ever created").clone(); let mut node = parent.new_next(hash, changes.clone()); for earlier in chain.iter() { @@ -1521,7 +1587,7 @@ mod qc { vacant.insert(vec![canon_parent.new_next(hash, changes.clone())]); (pos + 1, fork_at) - } + }, }; let mut state = CachingState::new( @@ -1548,9 +1614,7 @@ mod qc { let parent_hash = H256::from(&[0u8; 32]); (Node::new(hash, parent_hash, changes.clone()), parent_hash) }, - Some(parent) => { - (parent.new_next(hash, changes.clone()), parent.hash) - } + Some(parent) => (parent.new_next(hash, changes.clone()), parent.hash), }; // delete cache entries for earlier @@ -1585,22 +1649,26 @@ mod qc { }, Action::ReorgWithImport { depth, hash } => { let pos = self.canon.len() as isize - depth as isize; - if pos < 0 || pos+1 >= self.canon.len() as isize { return Err(()); } + if pos < 0 || pos + 1 >= self.canon.len() as isize { + return Err(()) + } let fork_at = self.canon[pos as usize].hash; let pos = pos as usize; match self.forks.get_mut(&fork_at) { Some(chain) => { - let mut new_fork = self.canon.drain(pos+1..).collect::>(); + let mut new_fork = self.canon.drain(pos + 1..).collect::>(); - let retracted: Vec = new_fork.iter().map(|node| node.hash).collect(); + let retracted: Vec = + new_fork.iter().map(|node| node.hash).collect(); let enacted: Vec = chain.iter().map(|node| node.hash).collect(); std::mem::swap(chain, &mut new_fork); - let mut node = new_fork.last().map( - |node| node.new_next(hash, vec![]) - ).expect("No empty fork ever created!"); + let mut node = new_fork + .last() + .map(|node| node.new_next(hash, vec![])) + .expect("No empty fork ever created!"); for invalidators in chain.iter().chain(new_fork.iter()) { node.purge(&invalidators.changes); @@ -1628,44 +1696,54 @@ mod qc { ); state - } + }, None => { - return Err(()); // no reorg without a fork atm! + return Err(()) // no reorg without a fork atm! }, } }, Action::FinalizationReorg { fork_depth, depth } => { let pos = self.canon.len() as isize - fork_depth as isize; - if pos < 0 || pos+1 >= self.canon.len() as isize { return Err(()); } + if pos < 0 || pos + 1 >= self.canon.len() as isize { + return Err(()) + } let fork_at = self.canon[pos as usize].hash; let pos = pos as usize; match self.forks.get_mut(&fork_at) { Some(fork_chain) => { - let sync_pos = fork_chain.len() as isize - fork_chain.len() as isize - depth as isize; - if sync_pos < 0 || sync_pos >= fork_chain.len() as isize { return Err (()); } + let sync_pos = fork_chain.len() as isize - + fork_chain.len() as isize - depth as isize; + if sync_pos < 0 || sync_pos >= fork_chain.len() as isize { + return Err(()) + } let sync_pos = sync_pos as usize; - let mut new_fork = self.canon.drain(pos+1..).collect::>(); + let mut new_fork = self.canon.drain(pos + 1..).collect::>(); - let retracted: Vec = new_fork.iter().map(|node| node.hash).collect(); - let enacted: Vec = fork_chain.iter().take(sync_pos+1).map(|node| node.hash).collect(); + let retracted: Vec = + new_fork.iter().map(|node| node.hash).collect(); + let enacted: Vec = fork_chain + .iter() + .take(sync_pos + 1) + .map(|node| node.hash) + .collect(); std::mem::swap(fork_chain, &mut new_fork); - self.shared.lock().sync(&retracted, &enacted); + self.shared.write().sync(&retracted, &enacted); self.head_state( - self.canon.last() - .expect("wasn't forking to emptiness so there should be one!") - .hash + self.canon + .last() + .expect("wasn't forking to emptiness so there should be one!") + .hash, ) }, None => { - return Err(()); // no reorg to nothing pls! - } + return Err(()) // no reorg to nothing pls! + }, } - }, }; @@ -1685,14 +1763,27 @@ mod qc { let h3b = H256::random(); let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h0, changes: vec![(key.clone(), Some(vec![2]))] }); + mutator + .mutate_static(Action::Next { hash: h0, changes: vec![(key.clone(), Some(vec![2]))] }); mutator.mutate_static(Action::Next { hash: h1a, changes: vec![] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: vec![(key.clone(), Some(vec![3]))] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: vec![(key.clone(), Some(vec![4]))] }); - mutator.mutate_static(Action::Next { hash: h2a, changes: vec![(key.clone(), Some(vec![5]))] }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h1b, + changes: vec![(key.clone(), Some(vec![3]))], + }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h2b, + changes: vec![(key.clone(), Some(vec![4]))], + }); + mutator + .mutate_static(Action::Next { hash: h2a, changes: vec![(key.clone(), Some(vec![5]))] }); mutator.mutate_static(Action::Next { hash: h3a, changes: vec![] }); - assert_eq!(mutator.head_state(h3a).storage(&key).unwrap().expect("there should be a value"), vec![5]); + assert_eq!( + mutator.head_state(h3a).storage(&key).unwrap().expect("there should be a value"), + vec![5] + ); assert!(mutator.head_state(h1a).storage(&key).unwrap().is_none()); assert!(mutator.head_state(h2b).storage(&key).unwrap().is_none()); assert!(mutator.head_state(h1b).storage(&key).unwrap().is_none()); @@ -1706,18 +1797,17 @@ mod qc { for key in Mutator::key_permutations() { match (head_state.storage(&key).unwrap(), mutator.head_storage_ref().get(&key)) { - (Some(x), Some(y)) => { + (Some(x), Some(y)) => if Some(&x) != y.as_ref() { eprintln!("{:?} != {:?}", x, y); - return false; - } - }, + return false + }, (None, Some(_y)) => { // TODO: cache miss is not tracked atm }, (Some(x), None) => { eprintln!("{:?} != ", x); - return false; + return false }, _ => continue, } @@ -1730,18 +1820,17 @@ mod qc { let head_state = mutator.head_state(node.hash); for key in Mutator::key_permutations() { match (head_state.storage(&key).unwrap(), node.state.get(&key)) { - (Some(x), Some(y)) => { + (Some(x), Some(y)) => if Some(&x) != y.as_ref() { eprintln!("at [{}]: {:?} != {:?}", node.hash, x, y); - return false; - } - }, + return false + }, (None, Some(_y)) => { // cache miss is not tracked atm }, (Some(x), None) => { eprintln!("at [{}]: {:?} != ", node.hash, x); - return false; + return false }, _ => continue, } @@ -1762,16 +1851,27 @@ mod qc { let mut mutator = Mutator::new_empty(); mutator.mutate_static(Action::Next { hash: h0, changes: vec![] }); mutator.mutate_static(Action::Next { hash: h1, changes: vec![] }); - mutator.mutate_static(Action::Next { hash: h2, changes: vec![(key.clone(), Some(vec![2]))] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: vec![(key.clone(), Some(vec![3]))] }); + mutator + .mutate_static(Action::Next { hash: h2, changes: vec![(key.clone(), Some(vec![2]))] }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h1b, + changes: vec![(key.clone(), Some(vec![3]))], + }); mutator.mutate_static(Action::ReorgWithImport { depth: 2, hash: h2b }); assert!(is_head_match(&mutator)) } - fn key(k: u8) -> Vec { vec![k] } - fn val(v: u8) -> Option> { Some(vec![v]) } - fn keyval(k: u8, v: u8) -> KeySet { vec![(key(k), val(v))] } + fn key(k: u8) -> Vec { + vec![k] + } + fn val(v: u8) -> Option> { + Some(vec![v]) + } + fn keyval(k: u8, v: u8) -> KeySet { + vec![(key(k), val(v))] + } #[test] fn reorg2() { @@ -1785,7 +1885,7 @@ mod qc { let mut mutator = Mutator::new_empty(); mutator.mutate_static(Action::Next { hash: h0, changes: keyval(1, 1) }); mutator.mutate_static(Action::Next { hash: h1a, changes: keyval(1, 1) }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: keyval(2, 2 ) }); + mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: keyval(2, 2) }); mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(3, 3) }); mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(4, 4) }); diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 95592d071f777..0f3578ad99a37 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -1,77 +1,165 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Database upgrade logic. -use std::fs; -use std::io::{Read, Write, ErrorKind}; -use std::path::{Path, PathBuf}; +use std::{ + fmt, fs, + io::{self, ErrorKind, Read, Write}, + path::{Path, PathBuf}, +}; +use crate::{columns, utils::DatabaseType}; +use codec::{Decode, Encode}; +use kvdb_rocksdb::{Database, DatabaseConfig}; use sp_runtime::traits::Block as BlockT; -use crate::utils::DatabaseType; /// Version file name. const VERSION_FILE_NAME: &'static str = "db_version"; /// Current db version. -const CURRENT_VERSION: u32 = 1; +const CURRENT_VERSION: u32 = 3; + +/// Number of columns in v1. +const V1_NUM_COLUMNS: u32 = 11; +const V2_NUM_COLUMNS: u32 = 12; + +/// Database upgrade errors. +#[derive(Debug)] +pub enum UpgradeError { + /// Database version cannot be read from existing db_version file. + UnknownDatabaseVersion, + /// Missing database version file. + MissingDatabaseVersionFile, + /// Database version no longer supported. + UnsupportedVersion(u32), + /// Database version comes from future version of the client. + FutureDatabaseVersion(u32), + /// Invalid justification block. + DecodingJustificationBlock, + /// Common io error. + Io(io::Error), +} -/// Upgrade database to current version. -pub fn upgrade_db(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { - let is_empty = db_path.read_dir().map_or(true, |mut d| d.next().is_none()); - if !is_empty { - let db_version = current_version(db_path)?; - match db_version { - 0 => Err(sp_blockchain::Error::Backend(format!("Unsupported database version: {}", db_version)))?, - 1 => (), - _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, +pub type UpgradeResult = Result; + +impl From for UpgradeError { + fn from(err: io::Error) -> Self { + UpgradeError::Io(err) + } +} + +impl fmt::Display for UpgradeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + UpgradeError::UnknownDatabaseVersion => { + write!(f, "Database version cannot be read from exisiting db_version file") + }, + UpgradeError::MissingDatabaseVersionFile => write!(f, "Missing database version file"), + UpgradeError::UnsupportedVersion(version) => { + write!(f, "Database version no longer supported: {}", version) + }, + UpgradeError::FutureDatabaseVersion(version) => { + write!(f, "Database version comes from future version of the client: {}", version) + }, + UpgradeError::DecodingJustificationBlock => { + write!(f, "Decodoning justification block failed") + }, + UpgradeError::Io(err) => write!(f, "Io error: {}", err), } } +} + +/// Upgrade database to current version. +pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> UpgradeResult<()> { + let db_version = current_version(db_path)?; + match db_version { + 0 => return Err(UpgradeError::UnsupportedVersion(db_version)), + 1 => { + migrate_1_to_2::(db_path, db_type)?; + migrate_2_to_3::(db_path, db_type)? + }, + 2 => migrate_2_to_3::(db_path, db_type)?, + CURRENT_VERSION => (), + _ => return Err(UpgradeError::FutureDatabaseVersion(db_version)), + } + update_version(db_path)?; + Ok(()) +} - update_version(db_path) +/// Migration from version1 to version2: +/// 1) the number of columns has changed from 11 to 12; +/// 2) transactions column is added; +fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { + let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path)?; + db.add_column().map_err(Into::into) } +/// Migration from version2 to version3: +/// - The format of the stored Justification changed to support multiple Justifications. +fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { + let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path)?; + + // Get all the keys we need to update + let keys: Vec<_> = db.iter(columns::JUSTIFICATIONS).map(|entry| entry.0).collect(); + + // Read and update each entry + let mut transaction = db.transaction(); + for key in keys { + if let Some(justification) = db.get(columns::JUSTIFICATIONS, &key)? { + // Tag each justification with the hardcoded ID for GRANDPA to avoid the dependency on + // the GRANDPA crate. + // NOTE: when storing justifications the previous API would get a `Vec` and still + // call encode on it. + let justification = Vec::::decode(&mut &justification[..]) + .map_err(|_| UpgradeError::DecodingJustificationBlock)?; + let justifications = sp_runtime::Justifications::from((*b"FRNK", justification)); + transaction.put_vec(columns::JUSTIFICATIONS, &key, justifications.encode()); + } + } + db.write(transaction)?; + + Ok(()) +} /// Reads current database version from the file at given path. /// If the file does not exist returns 0. -fn current_version(path: &Path) -> sp_blockchain::Result { - let unknown_version_err = || sp_blockchain::Error::Backend("Unknown database version".into()); - +fn current_version(path: &Path) -> UpgradeResult { match fs::File::open(version_file_path(path)) { - Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(0), - Err(_) => Err(unknown_version_err()), + Err(ref err) if err.kind() == ErrorKind::NotFound => + Err(UpgradeError::MissingDatabaseVersionFile), + Err(_) => Err(UpgradeError::UnknownDatabaseVersion), Ok(mut file) => { let mut s = String::new(); - file.read_to_string(&mut s).map_err(|_| unknown_version_err())?; - u32::from_str_radix(&s, 10).map_err(|_| unknown_version_err()) + file.read_to_string(&mut s).map_err(|_| UpgradeError::UnknownDatabaseVersion)?; + u32::from_str_radix(&s, 10).map_err(|_| UpgradeError::UnknownDatabaseVersion) }, } } -/// Maps database error to client error -fn db_err(err: std::io::Error) -> sp_blockchain::Error { - sp_blockchain::Error::Backend(format!("{}", err)) -} - /// Writes current database version to the file. /// Creates a new file if the version file does not exist yet. -fn update_version(path: &Path) -> sp_blockchain::Result<()> { - fs::create_dir_all(path).map_err(db_err)?; - let mut file = fs::File::create(version_file_path(path)).map_err(db_err)?; - file.write_all(format!("{}", CURRENT_VERSION).as_bytes()).map_err(db_err)?; +pub fn update_version(path: &Path) -> io::Result<()> { + fs::create_dir_all(path)?; + let mut file = fs::File::create(version_file_path(path))?; + file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?; Ok(()) } @@ -84,10 +172,11 @@ fn version_file_path(path: &Path) -> PathBuf { #[cfg(test)] mod tests { - use sc_state_db::PruningMode; - use crate::{DatabaseSettings, DatabaseSettingsSrc}; - use crate::tests::Block; use super::*; + use crate::{ + tests::Block, DatabaseSettings, DatabaseSource, KeepBlocks, TransactionStorageMode, + }; + use sc_state_db::PruningMode; fn create_db(db_path: &Path, version: Option) { if let Some(version) = version { @@ -97,27 +186,47 @@ mod tests { } } - fn open_database(db_path: &Path) -> sp_blockchain::Result<()> { - crate::utils::open_database::(&DatabaseSettings { - state_cache_size: 0, - state_cache_child_ratio: None, - pruning: PruningMode::ArchiveAll, - source: DatabaseSettingsSrc::RocksDb { path: db_path.to_owned(), cache_size: 128 }, - }, DatabaseType::Full).map(|_| ()) + fn open_database(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { + crate::utils::open_database::( + &DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + source: DatabaseSource::RocksDb { path: db_path.to_owned(), cache_size: 128 }, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + }, + db_type, + ) + .map(|_| ()) } #[test] fn downgrade_never_happens() { let db_dir = tempfile::TempDir::new().unwrap(); create_db(db_dir.path(), Some(CURRENT_VERSION + 1)); - assert!(open_database(db_dir.path()).is_err()); + assert!(open_database(db_dir.path(), DatabaseType::Full).is_err()); } #[test] fn open_empty_database_works() { + let db_type = DatabaseType::Full; let db_dir = tempfile::TempDir::new().unwrap(); - open_database(db_dir.path()).unwrap(); - open_database(db_dir.path()).unwrap(); - assert_eq!(current_version(db_dir.path()).unwrap(), CURRENT_VERSION); + let db_dir = db_dir.path().join(db_type.as_str()); + open_database(&db_dir, db_type).unwrap(); + open_database(&db_dir, db_type).unwrap(); + assert_eq!(current_version(&db_dir).unwrap(), CURRENT_VERSION); + } + + #[test] + fn upgrade_to_3_works() { + let db_type = DatabaseType::Full; + for version_from_file in &[None, Some(1), Some(2)] { + let db_dir = tempfile::TempDir::new().unwrap(); + let db_path = db_dir.path().join(db_type.as_str()); + create_db(&db_path, *version_from_file); + open_database(&db_path, db_type).unwrap(); + assert_eq!(current_version(&db_path).unwrap(), CURRENT_VERSION); + } } } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index e999469c18ff0..ea22c774f463e 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,25 +19,28 @@ //! Db-based backend utility structures and functions, used by both //! full and light storages. -use std::sync::Arc; -use std::convert::TryInto; +use std::{convert::TryInto, fmt, fs, io, path::Path, sync::Arc}; -use log::debug; +use log::{debug, info}; +use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; use codec::Decode; -use sp_trie::DBValue; use sp_database::Transaction; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, Zero, - UniqueSaturatedFrom, UniqueSaturatedInto, +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, UniqueSaturatedFrom, UniqueSaturatedInto, Zero}, }; -use crate::{DatabaseSettings, DatabaseSettingsSrc, Database, DbHash}; +use sp_trie::DBValue; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. -#[cfg(any(feature = "with-kvdb-rocksdb", feature = "with-parity-db", feature = "test-helpers", test))] -pub const NUM_COLUMNS: u32 = 11; +#[cfg(any( + feature = "with-kvdb-rocksdb", + feature = "with-parity-db", + feature = "test-helpers", + test +))] +pub const NUM_COLUMNS: u32 = 12; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: u32 = 0; @@ -49,6 +52,8 @@ pub mod meta_keys { pub const BEST_BLOCK: &[u8; 4] = b"best"; /// Last finalized block key. pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; + /// Last finalized state key. + pub const FINALIZED_STATE: &[u8; 6] = b"fstate"; /// Meta information prefix for list-based caches. pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; /// Meta information for changes tries key. @@ -74,6 +79,8 @@ pub struct Meta { pub finalized_number: N, /// Hash of the genesis block. pub genesis_hash: H, + /// Finalized state, if any + pub finalized_state: Option<(H, N)>, } /// A block lookup key: used for canonical lookup from block number to hash @@ -94,24 +101,17 @@ pub enum DatabaseType { /// In the current database schema, this kind of key is only used for /// lookups into an index, NOT for storing header data or others. pub fn number_index_key>(n: N) -> sp_blockchain::Result { - let n = n.try_into().map_err(|_| + let n = n.try_into().map_err(|_| { sp_blockchain::Error::Backend("Block number cannot be converted to u32".into()) - )?; + })?; - Ok([ - (n >> 24) as u8, - ((n >> 16) & 0xff) as u8, - ((n >> 8) & 0xff) as u8, - (n & 0xff) as u8 - ]) + Ok([(n >> 24) as u8, ((n >> 16) & 0xff) as u8, ((n >> 8) & 0xff) as u8, (n & 0xff) as u8]) } /// Convert number and hash into long lookup key for blocks that are /// not in the canonical chain. -pub fn number_and_hash_to_lookup_key( - number: N, - hash: H, -) -> sp_blockchain::Result> where +pub fn number_and_hash_to_lookup_key(number: N, hash: H) -> sp_blockchain::Result> +where N: TryInto, H: AsRef<[u8]>, { @@ -122,16 +122,15 @@ pub fn number_and_hash_to_lookup_key( /// Convert block lookup key into block number. /// all block lookup keys start with the block number. -pub fn lookup_key_to_number(key: &[u8]) -> sp_blockchain::Result where - N: From +pub fn lookup_key_to_number(key: &[u8]) -> sp_blockchain::Result +where + N: From, { if key.len() < 4 { - return Err(sp_blockchain::Error::Backend("Invalid block key".into())); + return Err(sp_blockchain::Error::Backend("Invalid block key".into())) } - Ok((key[0] as u32) << 24 - | (key[1] as u32) << 16 - | (key[2] as u32) << 8 - | (key[3] as u32)).map(Into::into) + Ok((key[0] as u32) << 24 | (key[1] as u32) << 16 | (key[2] as u32) << 8 | (key[3] as u32)) + .map(Into::into) } /// Delete number to hash mapping in DB transaction. @@ -193,114 +192,212 @@ pub fn insert_hash_to_key_mapping, H: AsRef<[u8]> + Clone>( pub fn block_id_to_lookup_key( db: &dyn Database, key_lookup_col: u32, - id: BlockId -) -> Result>, sp_blockchain::Error> where + id: BlockId, +) -> Result>, sp_blockchain::Error> +where Block: BlockT, ::sp_runtime::traits::NumberFor: UniqueSaturatedFrom + UniqueSaturatedInto, { Ok(match id { - BlockId::Number(n) => db.get( - key_lookup_col, - number_index_key(n)?.as_ref(), - ), - BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()) + BlockId::Number(n) => db.get(key_lookup_col, number_index_key(n)?.as_ref()), + BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()), }) } +fn backend_err(feat: &'static str) -> sp_blockchain::Error { + sp_blockchain::Error::Backend(feat.to_string()) +} + /// Opens the configured database. pub fn open_database( config: &DatabaseSettings, db_type: DatabaseType, ) -> sp_blockchain::Result>> { - #[allow(unused)] - fn db_open_error(feat: &'static str) -> sp_blockchain::Error { - sp_blockchain::Error::Backend( - format!("`{}` feature not enabled, database can not be opened", feat), - ) - } + // Maybe migrate (copy) the database to a type specific subdirectory to make it + // possible that light and full databases coexist + // NOTE: This function can be removed in a few releases + maybe_migrate_to_type_subdir::(&config.source, db_type).map_err(|e| { + sp_blockchain::Error::Backend(format!("Error in migration to role subdirectory: {}", e)) + })?; + + open_database_at::(&config.source, db_type) +} - let db: Arc> = match &config.source { - #[cfg(any(feature = "with-kvdb-rocksdb", test))] - DatabaseSettingsSrc::RocksDb { path, cache_size } => { - // first upgrade database to required version - crate::upgrade::upgrade_db::(&path, db_type)?; - - // and now open database assuming that it has the latest version - let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS); - let path = path.to_str() - .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; - - let mut memory_budget = std::collections::HashMap::new(); - match db_type { - DatabaseType::Full => { - let state_col_budget = (*cache_size as f64 * 0.9) as usize; - let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); - - for i in 0..NUM_COLUMNS { - if i == crate::columns::STATE { - memory_budget.insert(i, state_col_budget); - } else { - memory_budget.insert(i, other_col_budget); - } - } - log::trace!( - target: "db", - "Open RocksDB database at {}, state column budget: {} MiB, others({}) column cache: {} MiB", - path, - state_col_budget, - NUM_COLUMNS, - other_col_budget, - ); - }, - DatabaseType::Light => { - let col_budget = cache_size / (NUM_COLUMNS as usize); - for i in 0..NUM_COLUMNS { - memory_budget.insert(i, col_budget); - } - log::trace!( - target: "db", - "Open RocksDB light database at {}, column cache: {} MiB", - path, - col_budget, - ); - } +fn open_database_at( + source: &DatabaseSource, + db_type: DatabaseType, +) -> sp_blockchain::Result>> { + let db: Arc> = match &source { + DatabaseSource::ParityDb { path } => open_parity_db::(&path, db_type, true)?, + DatabaseSource::RocksDb { path, cache_size } => + open_kvdb_rocksdb::(&path, db_type, true, *cache_size)?, + DatabaseSource::Custom(db) => db.clone(), + DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size } => { + // check if rocksdb exists first, if not, open paritydb + match open_kvdb_rocksdb::(&rocksdb_path, db_type, false, *cache_size) { + Ok(db) => db, + Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) => + open_parity_db::(&paritydb_path, db_type, true)?, + Err(_) => return Err(backend_err("cannot open rocksdb. corrupted database")), } - db_config.memory_budget = memory_budget; - - let db = kvdb_rocksdb::Database::open(&db_config, &path) - .map_err(|err| sp_blockchain::Error::Backend(format!("{}", err)))?; - sp_database::as_database(db) - }, - #[cfg(not(any(feature = "with-kvdb-rocksdb", test)))] - DatabaseSettingsSrc::RocksDb { .. } => { - return Err(db_open_error("with-kvdb-rocksdb")); - }, - #[cfg(feature = "with-parity-db")] - DatabaseSettingsSrc::ParityDb { path } => { - crate::parity_db::open(&path, db_type) - .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? }, - #[cfg(not(feature = "with-parity-db"))] - DatabaseSettingsSrc::ParityDb { .. } => { - return Err(db_open_error("with-parity-db")) - }, - DatabaseSettingsSrc::Custom(db) => db.clone(), }; check_database_type(&*db, db_type)?; + Ok(db) +} + +#[derive(Debug)] +enum OpenDbError { + // constructed only when rocksdb and paritydb are disabled + #[allow(dead_code)] + NotEnabled(&'static str), + DoesNotExist, + Internal(String), +} + +type OpenDbResult = Result>, OpenDbError>; + +impl fmt::Display for OpenDbError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + OpenDbError::Internal(e) => write!(f, "{}", e.to_string()), + OpenDbError::DoesNotExist => write!(f, "Database does not exist at given location"), + OpenDbError::NotEnabled(feat) => { + write!(f, "`{}` feature not enabled, database can not be opened", feat) + }, + } + } +} +impl From for sp_blockchain::Error { + fn from(err: OpenDbError) -> Self { + sp_blockchain::Error::Backend(err.to_string()) + } +} + +#[cfg(feature = "with-parity-db")] +impl From for OpenDbError { + fn from(err: parity_db::Error) -> Self { + if err.to_string().contains("use open_or_create") { + OpenDbError::DoesNotExist + } else { + OpenDbError::Internal(err.to_string()) + } + } +} + +impl From for OpenDbError { + fn from(err: io::Error) -> Self { + if err.to_string().contains("create_if_missing is false") { + OpenDbError::DoesNotExist + } else { + OpenDbError::Internal(err.to_string()) + } + } +} + +#[cfg(feature = "with-parity-db")] +fn open_parity_db(path: &Path, db_type: DatabaseType, create: bool) -> OpenDbResult { + let db = crate::parity_db::open(path, db_type, create)?; Ok(db) } +#[cfg(not(feature = "with-parity-db"))] +fn open_parity_db( + _path: &Path, + _db_type: DatabaseType, + _create: bool, +) -> OpenDbResult { + Err(OpenDbError::NotEnabled("with-parity-db")) +} + +#[cfg(any(feature = "with-kvdb-rocksdb", test))] +fn open_kvdb_rocksdb( + path: &Path, + db_type: DatabaseType, + create: bool, + cache_size: usize, +) -> OpenDbResult { + // first upgrade database to required version + match crate::upgrade::upgrade_db::(&path, db_type) { + // in case of missing version file, assume that database simply does not exist at given + // location + Ok(_) | Err(crate::upgrade::UpgradeError::MissingDatabaseVersionFile) => (), + Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err.to_string()).into()), + } + + // and now open database assuming that it has the latest version + let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS); + db_config.create_if_missing = create; + + let mut memory_budget = std::collections::HashMap::new(); + match db_type { + DatabaseType::Full => { + let state_col_budget = (cache_size as f64 * 0.9) as usize; + let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); + + for i in 0..NUM_COLUMNS { + if i == crate::columns::STATE { + memory_budget.insert(i, state_col_budget); + } else { + memory_budget.insert(i, other_col_budget); + } + } + log::trace!( + target: "db", + "Open RocksDB database at {:?}, state column budget: {} MiB, others({}) column cache: {} MiB", + path, + state_col_budget, + NUM_COLUMNS, + other_col_budget, + ); + }, + DatabaseType::Light => { + let col_budget = cache_size / (NUM_COLUMNS as usize); + for i in 0..NUM_COLUMNS { + memory_budget.insert(i, col_budget); + } + log::trace!( + target: "db", + "Open RocksDB light database at {:?}, column cache: {} MiB", + path, + col_budget, + ); + }, + } + db_config.memory_budget = memory_budget; + + let db = kvdb_rocksdb::Database::open(&db_config, path)?; + // write database version only after the database is succesfully opened + crate::upgrade::update_version(path)?; + Ok(sp_database::as_database(db)) +} + +#[cfg(not(any(feature = "with-kvdb-rocksdb", test)))] +fn open_kvdb_rocksdb( + _path: &Path, + _db_type: DatabaseType, + _create: bool, + _cache_size: usize, +) -> OpenDbResult { + Err(OpenDbError::NotEnabled("with-kvdb-rocksdb")) +} + /// Check database type. -pub fn check_database_type(db: &dyn Database, db_type: DatabaseType) -> sp_blockchain::Result<()> { +pub fn check_database_type( + db: &dyn Database, + db_type: DatabaseType, +) -> sp_blockchain::Result<()> { match db.get(COLUMN_META, meta_keys::TYPE) { - Some(stored_type) => { + Some(stored_type) => if db_type.as_str().as_bytes() != &*stored_type { - return Err(sp_blockchain::Error::Backend( - format!("Unexpected database type. Expected: {}", db_type.as_str())).into()); - } - }, + return Err(sp_blockchain::Error::Backend(format!( + "Unexpected database type. Expected: {}", + db_type.as_str() + )) + .into()) + }, None => { let mut transaction = Transaction::new(); transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); @@ -311,15 +408,55 @@ pub fn check_database_type(db: &dyn Database, db_type: DatabaseType) -> Ok(()) } +fn maybe_migrate_to_type_subdir( + source: &DatabaseSource, + db_type: DatabaseType, +) -> io::Result<()> { + if let Some(p) = source.path() { + let mut basedir = p.to_path_buf(); + basedir.pop(); + + // Do we have to migrate to a database-type-based subdirectory layout: + // See if there's a file identifying a rocksdb or paritydb folder in the parent dir and + // the target path ends in a role specific directory + if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) && + (p.ends_with(DatabaseType::Full.as_str()) || + p.ends_with(DatabaseType::Light.as_str())) + { + // Try to open the database to check if the current `DatabaseType` matches the type of + // database stored in the target directory and close the database on success. + let mut old_source = source.clone(); + old_source.set_path(&basedir); + open_database_at::(&old_source, db_type) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + info!( + "Migrating database to a database-type-based subdirectory: '{:?}' -> '{:?}'", + basedir, + basedir.join(db_type.as_str()) + ); + let mut tmp_dir = basedir.clone(); + tmp_dir.pop(); + tmp_dir.push("tmp"); + + fs::rename(&basedir, &tmp_dir)?; + fs::create_dir_all(&p)?; + fs::rename(tmp_dir, &p)?; + } + } + + Ok(()) +} + /// Read database column entry for the given block. pub fn read_db( db: &dyn Database, col_index: u32, col: u32, - id: BlockId + id: BlockId, ) -> sp_blockchain::Result> - where - Block: BlockT, +where + Block: BlockT, { block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { Some(key) => Ok(db.get(col, key.as_ref())), @@ -327,6 +464,23 @@ pub fn read_db( }) } +/// Remove database column entry for the given block. +pub fn remove_from_db( + transaction: &mut Transaction, + db: &dyn Database, + col_index: u32, + col: u32, + id: BlockId, +) -> sp_blockchain::Result<()> +where + Block: BlockT, +{ + block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { + Some(key) => Ok(transaction.remove(col, key.as_ref())), + None => Ok(()), + }) +} + /// Read a header from the database. pub fn read_header( db: &dyn Database, @@ -337,10 +491,8 @@ pub fn read_header( match read_db(db, col_index, col, id)? { Some(header) => match Block::Header::decode(&mut &header[..]) { Ok(header) => Ok(Some(header)), - Err(_) => return Err( - sp_blockchain::Error::Backend("Error decoding header".into()) - ), - } + Err(_) => return Err(sp_blockchain::Error::Backend("Error decoding header".into())), + }, None => Ok(None), } } @@ -352,47 +504,60 @@ pub fn require_header( col: u32, id: BlockId, ) -> sp_blockchain::Result { - read_header(db, col_index, col, id) - .and_then(|header| header.ok_or_else(|| - sp_blockchain::Error::UnknownBlock(format!("Require header: {}", id)) - )) + read_header(db, col_index, col, id).and_then(|header| { + header.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("Require header: {}", id))) + }) } /// Read meta from the database. -pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< - Meta<<::Header as HeaderT>::Number, Block::Hash>, - sp_blockchain::Error, -> - where - Block: BlockT, +pub fn read_meta( + db: &dyn Database, + col_header: u32, +) -> Result::Header as HeaderT>::Number, Block::Hash>, sp_blockchain::Error> +where + Block: BlockT, { let genesis_hash: Block::Hash = match read_genesis_hash(db)? { Some(genesis_hash) => genesis_hash, - None => return Ok(Meta { - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - }), + None => + return Ok(Meta { + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + finalized_state: None, + }), }; let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { - if let Some(Some(header)) = match db.get(COLUMN_META, key) { - Some(id) => db.get(col_header, &id).map(|b| Block::Header::decode(&mut &b[..]).ok()), - None => None, - } + if let Some(Some(header)) = db + .get(COLUMN_META, key) + .and_then(|id| db.get(col_header, &id).map(|b| Block::Header::decode(&mut &b[..]).ok())) { let hash = header.hash(); - debug!("DB Opened blockchain db, fetched {} = {:?} ({})", desc, hash, header.number()); + debug!( + target: "db", + "Opened blockchain db, fetched {} = {:?} ({})", + desc, + hash, + header.number() + ); Ok((hash, *header.number())) } else { - Ok((genesis_hash.clone(), Zero::zero())) + Ok((Default::default(), Zero::zero())) } }; let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?; let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?; + let (finalized_state_hash, finalized_state_number) = + load_meta_block("final_state", meta_keys::FINALIZED_STATE)?; + let finalized_state = if finalized_state_hash != Default::default() { + Some((finalized_state_hash, finalized_state_number)) + } else { + None + }; Ok(Meta { best_hash, @@ -400,17 +565,19 @@ pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< finalized_hash, finalized_number, genesis_hash, + finalized_state, }) } /// Read genesis hash from database. -pub fn read_genesis_hash(db: &dyn Database) -> sp_blockchain::Result> { +pub fn read_genesis_hash( + db: &dyn Database, +) -> sp_blockchain::Result> { match db.get(COLUMN_META, meta_keys::GENESIS_HASH) { Some(h) => match Decode::decode(&mut &h[..]) { Ok(h) => Ok(Some(h)), - Err(err) => Err(sp_blockchain::Error::Backend( - format!("Error decoding genesis hash: {}", err) - )), + Err(err) => + Err(sp_blockchain::Error::Backend(format!("Error decoding genesis hash: {}", err))), }, None => Ok(None), } @@ -426,12 +593,126 @@ impl DatabaseType { } } +pub(crate) struct JoinInput<'a, 'b>(&'a [u8], &'b [u8]); + +pub(crate) fn join_input<'a, 'b>(i1: &'a [u8], i2: &'b [u8]) -> JoinInput<'a, 'b> { + JoinInput(i1, i2) +} + +impl<'a, 'b> codec::Input for JoinInput<'a, 'b> { + fn remaining_len(&mut self) -> Result, codec::Error> { + Ok(Some(self.0.len() + self.1.len())) + } + + fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + let mut read = 0; + if self.0.len() > 0 { + read = std::cmp::min(self.0.len(), into.len()); + self.0.read(&mut into[..read])?; + } + if read < into.len() { + self.1.read(&mut into[read..])?; + } + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; + use crate::{KeepBlocks, TransactionStorageMode}; + use codec::Input; + use sc_state_db::PruningMode; use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + use std::path::PathBuf; type Block = RawBlock>; + #[cfg(any(feature = "with-kvdb-rocksdb", test))] + #[test] + fn database_type_subdir_migration() { + type Block = RawBlock>; + + fn check_dir_for_db_type( + db_type: DatabaseType, + mut source: DatabaseSource, + db_check_file: &str, + ) { + let base_path = tempfile::TempDir::new().unwrap(); + let old_db_path = base_path.path().join("chains/dev/db"); + + source.set_path(&old_db_path); + let settings = db_settings(source.clone()); + + { + let db_res = open_database::(&settings, db_type); + assert!(db_res.is_ok(), "New database should be created."); + assert!(old_db_path.join(db_check_file).exists()); + assert!(!old_db_path.join(db_type.as_str()).join("db_version").exists()); + } + + source.set_path(&old_db_path.join(db_type.as_str())); + let settings = db_settings(source); + let db_res = open_database::(&settings, db_type); + assert!(db_res.is_ok(), "Reopening the db with the same role should work"); + // check if the database dir had been migrated + assert!(!old_db_path.join(db_check_file).exists()); + assert!(old_db_path.join(db_type.as_str()).join(db_check_file).exists()); + } + + check_dir_for_db_type( + DatabaseType::Light, + DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, + "db_version", + ); + check_dir_for_db_type( + DatabaseType::Full, + DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, + "db_version", + ); + + #[cfg(feature = "with-parity-db")] + check_dir_for_db_type( + DatabaseType::Light, + DatabaseSource::ParityDb { path: PathBuf::new() }, + "metadata", + ); + #[cfg(feature = "with-parity-db")] + check_dir_for_db_type( + DatabaseType::Full, + DatabaseSource::ParityDb { path: PathBuf::new() }, + "metadata", + ); + + // check failure on reopening with wrong role + { + let base_path = tempfile::TempDir::new().unwrap(); + let old_db_path = base_path.path().join("chains/dev/db"); + + let source = DatabaseSource::RocksDb { path: old_db_path.clone(), cache_size: 128 }; + let settings = db_settings(source); + { + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "New database should be created."); + + // check if the database dir had been migrated + assert!(old_db_path.join("db_version").exists()); + assert!(!old_db_path.join("light/db_version").exists()); + assert!(!old_db_path.join("full/db_version").exists()); + } + let source = DatabaseSource::RocksDb { + path: old_db_path.join(DatabaseType::Light.as_str()), + cache_size: 128, + }; + let settings = db_settings(source); + let db_res = open_database::(&settings, DatabaseType::Light); + assert!(db_res.is_err(), "Opening a light database in full role should fail"); + // assert nothing was changed + assert!(old_db_path.join("db_version").exists()); + assert!(!old_db_path.join("light/db_version").exists()); + assert!(!old_db_path.join("full/db_version").exists()); + } + } + #[test] fn number_index_key_doesnt_panic() { let id = BlockId::::Number(72340207214430721); @@ -446,4 +727,162 @@ mod tests { assert_eq!(DatabaseType::Full.as_str(), "full"); assert_eq!(DatabaseType::Light.as_str(), "light"); } + + #[test] + fn join_input_works() { + let buf1 = [1, 2, 3, 4]; + let buf2 = [5, 6, 7, 8]; + let mut test = [0, 0, 0]; + let mut joined = join_input(buf1.as_ref(), buf2.as_ref()); + assert_eq!(joined.remaining_len().unwrap(), Some(8)); + + joined.read(&mut test).unwrap(); + assert_eq!(test, [1, 2, 3]); + assert_eq!(joined.remaining_len().unwrap(), Some(5)); + + joined.read(&mut test).unwrap(); + assert_eq!(test, [4, 5, 6]); + assert_eq!(joined.remaining_len().unwrap(), Some(2)); + + joined.read(&mut test[0..2]).unwrap(); + assert_eq!(test, [7, 8, 6]); + assert_eq!(joined.remaining_len().unwrap(), Some(0)); + } + + fn db_settings(source: DatabaseSource) -> DatabaseSettings { + DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + source, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + } + } + + #[cfg(feature = "with-parity-db")] + #[cfg(any(feature = "with-kvdb-rocksdb", test))] + #[test] + fn test_open_database_auto_new() { + let db_dir = tempfile::TempDir::new().unwrap(); + let db_path = db_dir.path().to_owned(); + let paritydb_path = db_path.join("paritydb"); + let rocksdb_path = db_path.join("rocksdb_path"); + let source = DatabaseSource::Auto { + paritydb_path: paritydb_path.clone(), + rocksdb_path: rocksdb_path.clone(), + cache_size: 128, + }; + let mut settings = db_settings(source); + + // it should create new auto (paritydb) database + { + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "New database should be created."); + } + + // it should reopen existing auto (pairtydb) database + { + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "Existing parity database should be reopened"); + } + + // it should fail to open existing auto (pairtydb) database + { + settings.source = DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }; + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "New database should be opened."); + } + + // it should reopen existing auto (pairtydb) database + { + settings.source = DatabaseSource::ParityDb { path: paritydb_path }; + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "Existing parity database should be reopened"); + } + } + + #[cfg(feature = "with-parity-db")] + #[cfg(any(feature = "with-kvdb-rocksdb", test))] + #[test] + fn test_open_database_rocksdb_new() { + let db_dir = tempfile::TempDir::new().unwrap(); + let db_path = db_dir.path().to_owned(); + let paritydb_path = db_path.join("paritydb"); + let rocksdb_path = db_path.join("rocksdb_path"); + + let source = DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 }; + let mut settings = db_settings(source); + + // it should create new rocksdb database + { + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "New rocksdb database should be created"); + } + + // it should reopen existing auto (rocksdb) database + { + settings.source = DatabaseSource::Auto { + paritydb_path: paritydb_path.clone(), + rocksdb_path: rocksdb_path.clone(), + cache_size: 128, + }; + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "Existing rocksdb database should be reopened"); + } + + // it should fail to open existing auto (rocksdb) database + { + settings.source = DatabaseSource::ParityDb { path: paritydb_path }; + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "New paritydb database should be created"); + } + + // it should reopen existing auto (pairtydb) database + { + settings.source = DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 }; + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "Existing rocksdb database should be reopened"); + } + } + + #[cfg(feature = "with-parity-db")] + #[cfg(any(feature = "with-kvdb-rocksdb", test))] + #[test] + fn test_open_database_paritydb_new() { + let db_dir = tempfile::TempDir::new().unwrap(); + let db_path = db_dir.path().to_owned(); + let paritydb_path = db_path.join("paritydb"); + let rocksdb_path = db_path.join("rocksdb_path"); + + let source = DatabaseSource::ParityDb { path: paritydb_path.clone() }; + let mut settings = db_settings(source); + + // it should create new paritydb database + { + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "New database should be created."); + } + + // it should reopen existing pairtydb database + { + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "Existing parity database should be reopened"); + } + + // it should fail to open existing pairtydb database + { + settings.source = + DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 }; + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "New rocksdb database should be created"); + } + + // it should reopen existing auto (pairtydb) database + { + settings.source = DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size: 128 }; + let db_res = open_database::(&settings, DatabaseType::Full); + assert!(db_res.is_ok(), "Existing parity database should be reopened"); + } + } } diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index b88e8926be141..b7e2595b8e169 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,51 +14,45 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.3.4" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-tasks = { version = "2.0.0", path = "../../primitives/tasks" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-serializer = { version = "2.0.0", path = "../../primitives/serializer" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } -wasmi = "0.6.2" -parity-wasm = "0.41.0" +codec = { package = "parity-scale-codec", version = "2.0.0" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-tasks = { version = "4.0.0-dev", path = "../../primitives/tasks" } +sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" } +sp-version = { version = "4.0.0-dev", path = "../../primitives/version" } +sp-panic-handler = { version = "3.0.0", path = "../../primitives/panic-handler" } +wasmi = "0.9.0" lazy_static = "1.4.0" -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-wasm-interface = { version = "2.0.0", path = "../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../primitives/runtime-interface" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } -sc-executor-common = { version = "0.8.0", path = "common" } -sc-executor-wasmi = { version = "0.8.0", path = "wasmi" } -sc-executor-wasmtime = { version = "0.8.0", path = "wasmtime", optional = true } -parking_lot = "0.10.0" +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-wasm-interface = { version = "4.0.0-dev", path = "../../primitives/wasm-interface" } +sp-runtime-interface = { version = "4.0.0-dev", path = "../../primitives/runtime-interface" } +sp-externalities = { version = "0.10.0-dev", path = "../../primitives/externalities" } +sc-executor-common = { version = "0.10.0-dev", path = "common" } +sc-executor-wasmi = { version = "0.10.0-dev", path = "wasmi" } +sc-executor-wasmtime = { version = "0.10.0-dev", path = "wasmtime", optional = true } +parking_lot = "0.11.1" log = "0.4.8" -libsecp256k1 = "0.3.4" +libsecp256k1 = "0.6" [dev-dependencies] -assert_matches = "1.3.0" wat = "1.0" hex-literal = "0.3.1" sc-runtime-test = { version = "2.0.0", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -test-case = "0.3.3" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -sc-tracing = { version = "2.0.0", path = "../tracing" } -tracing = "0.1.19" -tracing-subscriber = "0.2.10" +sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-maybe-compressed-blob = { version = "4.0.0-dev", path = "../../primitives/maybe-compressed-blob" } +sc-tracing = { version = "4.0.0-dev", path = "../tracing" } +tracing = "0.1.25" +tracing-subscriber = "0.2.19" +paste = "1.0" +regex = "1" [features] -default = [ "std" ] +default = ["std"] # This crate does not have `no_std` support, we just require this for tests std = [] wasm-extern-trace = [] -wasmtime = [ - "sc-executor-wasmtime", -] -wasmi-errno = [ - "wasmi/errno" -] +wasmtime = ["sc-executor-wasmtime"] +wasmi-errno = ["wasmi/errno"] +wasmer-sandbox = ["sc-executor-common/wasmer-sandbox"] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 64ed23598f47c..c4fc8c27f7544 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-common" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,16 +14,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = "0.4.8" derive_more = "0.99.2" -parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.3.4" } -wasmi = "0.6.2" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } -sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } -sp-serializer = { version = "2.0.0", path = "../../../primitives/serializer" } +pwasm-utils = "0.18.0" +codec = { package = "parity-scale-codec", version = "2.0.0" } +wasmi = "0.9.0" +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sc-allocator = { version = "4.0.0-dev", path = "../../allocator" } +sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-interface" } +sp-maybe-compressed-blob = { version = "4.0.0-dev", path = "../../../primitives/maybe-compressed-blob" } +sp-serializer = { version = "3.0.0", path = "../../../primitives/serializer" } +thiserror = "1.0.21" +environmental = "1.1.3" + +wasmer = { version = "1.0", optional = true } +wasmer-compiler-singlepass = { version = "1.0", optional = true } [features] default = [] +wasmer-sandbox = [ + "wasmer", + "wasmer-compiler-singlepass", +] diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index caed63c183e68..6ad4802e57a8b 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,92 +25,89 @@ use wasmi; pub type Result = std::result::Result; /// Error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { - /// Unserializable Data - InvalidData(sp_serializer::Error), - /// Trap occurred during execution - Trap(wasmi::Trap), - /// Wasmi loading/instantiating error - Wasmi(wasmi::Error), - /// Error in the API. Parameter is an error message. - #[from(ignore)] - ApiError(String), - /// Method is not found - #[display(fmt="Method not found: '{}'", _0)] - #[from(ignore)] + #[error("Unserializable data encountered")] + InvalidData(#[from] sp_serializer::Error), + + #[error(transparent)] + Trap(#[from] wasmi::Trap), + + #[error(transparent)] + Wasmi(#[from] wasmi::Error), + + #[error("Error calling api function: {0}")] + ApiError(Box), + + #[error("Method not found: '{0}'")] MethodNotFound(String), - /// Code is invalid (expected single byte) - #[display(fmt="Invalid Code: {}", _0)] - #[from(ignore)] + + #[error("Invalid Code (expected single byte): '{0}'")] InvalidCode(String), - /// Could not get runtime version. - #[display(fmt="On-chain runtime does not specify version")] + + #[error("On-chain runtime does not specify version")] VersionInvalid, - /// Externalities have failed. - #[display(fmt="Externalities error")] + + #[error("Externalities error")] Externalities, - /// Invalid index. - #[display(fmt="Invalid index provided")] + + #[error("Invalid index provided")] InvalidIndex, - /// Invalid return type. - #[display(fmt="Invalid type returned (should be u64)")] + + #[error("Invalid type returned (should be u64)")] InvalidReturn, - /// Runtime failed. - #[display(fmt="Runtime error")] + + #[error("Runtime error")] Runtime, - /// Runtime panicked. - #[display(fmt="Runtime panicked: {}", _0)] - #[from(ignore)] + + #[error("Runtime panicked: {0}")] RuntimePanicked(String), - /// Invalid memory reference. - #[display(fmt="Invalid memory reference")] + + #[error("Invalid memory reference")] InvalidMemoryReference, - /// The runtime must provide a global named `__heap_base` of type i32 for specifying where the - /// allocator is allowed to place its data. - #[display(fmt="The runtime doesn't provide a global named `__heap_base`")] + + #[error("The runtime doesn't provide a global named `__heap_base` of type `i32`")] HeapBaseNotFoundOrInvalid, - /// The runtime WebAssembly module is not allowed to have the `start` function. - #[display(fmt="The runtime has the `start` function")] + + #[error("The runtime must not have the `start` function defined")] RuntimeHasStartFn, - /// Some other error occurred + + #[error("Other: {0}")] Other(String), - /// Some error occurred in the allocator - #[display(fmt="Error in allocator: {}", _0)] - Allocator(sp_allocator::Error), - /// Execution of a host function failed. - #[display(fmt="Host function {} execution failed with: {}", _0, _1)] + + #[error(transparent)] + Allocator(#[from] sc_allocator::Error), + + #[error("Host function {0} execution failed with: {1}")] FunctionExecution(String, String), - /// No table is present. - /// - /// Call was requested that requires table but none was present in the instance. - #[display(fmt="No table exported by wasm blob")] + + #[error("No table exported by wasm blob")] NoTable, - /// No table entry is present. - /// - /// Call was requested that requires specific entry in the table to be present. - #[display(fmt="No table entry with index {} in wasm blob exported table", _0)] - #[from(ignore)] + + #[error("No table entry with index {0} in wasm blob exported table")] NoTableEntryWithIndex(u32), - /// Table entry is not a function. - #[display(fmt="Table element with index {} is not a function in wasm blob exported table", _0)] - #[from(ignore)] + + #[error("Table element with index {0} is not a function in wasm blob exported table")] TableElementIsNotAFunction(u32), - /// Function in table is null and thus cannot be called. - #[display(fmt="Table entry with index {} in wasm blob is null", _0)] - #[from(ignore)] + + #[error("Table entry with index {0} in wasm blob is null")] FunctionRefIsNull(u32), -} -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::InvalidData(ref err) => Some(err), - Error::Trap(ref err) => Some(err), - Error::Wasmi(ref err) => Some(err), - _ => None, - } - } + #[error(transparent)] + RuntimeConstruction(#[from] WasmError), + + #[error("Shared memory is not supported")] + SharedMemUnsupported, + + #[error("Imported globals are not supported yet")] + ImportedGlobalsUnsupported, + + #[error("initializer expression can have only up to 2 expressions in wasm 1.0")] + InitializerHasTooManyExpressions, + + #[error("Invalid initializer expression provided {0}")] + InvalidInitializerExpression(String), } impl wasmi::HostError for Error {} @@ -121,9 +118,9 @@ impl From<&'static str> for Error { } } -impl From for Error { - fn from(err: WasmError) -> Error { - Error::Other(err.to_string()) +impl From for Error { + fn from(err: String) -> Error { + Error::Other(err) } } @@ -151,3 +148,5 @@ pub enum WasmError { /// Other error happenend. Other(String), } + +impl std::error::Error for WasmError {} diff --git a/client/executor/common/src/lib.rs b/client/executor/common/src/lib.rs index 7f3864e6152fb..99b927e062038 100644 --- a/client/executor/common/src/lib.rs +++ b/client/executor/common/src/lib.rs @@ -1,24 +1,28 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! A set of common definitions that are needed for defining execution engines. #![warn(missing_docs)] +#![deny(unused_crate_dependencies)] pub mod error; +pub mod runtime_blob; pub mod sandbox; pub mod util; pub mod wasm_runtime; diff --git a/client/executor/common/src/runtime_blob/data_segments_snapshot.rs b/client/executor/common/src/runtime_blob/data_segments_snapshot.rs new file mode 100644 index 0000000000000..5c3fedbdc963e --- /dev/null +++ b/client/executor/common/src/runtime_blob/data_segments_snapshot.rs @@ -0,0 +1,87 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::RuntimeBlob; +use crate::error::{self, Error}; +use pwasm_utils::parity_wasm::elements::Instruction; +use std::mem; + +/// This is a snapshot of data segments specialzied for a particular instantiation. +/// +/// Note that this assumes that no mutable globals are used. +#[derive(Clone)] +pub struct DataSegmentsSnapshot { + /// The list of data segments represented by (offset, contents). + data_segments: Vec<(u32, Vec)>, +} + +impl DataSegmentsSnapshot { + /// Create a snapshot from the data segments from the module. + pub fn take(module: &RuntimeBlob) -> error::Result { + let data_segments = module + .data_segments() + .into_iter() + .map(|mut segment| { + // Just replace contents of the segment since the segments will be discarded later + // anyway. + let contents = mem::replace(segment.value_mut(), vec![]); + + let init_expr = match segment.offset() { + Some(offset) => offset.code(), + // Return if the segment is passive + None => return Err(Error::SharedMemUnsupported), + }; + + // [op, End] + if init_expr.len() != 2 { + return Err(Error::InitializerHasTooManyExpressions) + } + let offset = match &init_expr[0] { + Instruction::I32Const(v) => *v as u32, + Instruction::GetGlobal(_) => { + // In a valid wasm file, initializer expressions can only refer imported + // globals. + // + // At the moment of writing the Substrate Runtime Interface does not provide + // any globals. There is nothing that prevents us from supporting this + // if/when we gain those. + return Err(Error::ImportedGlobalsUnsupported) + }, + insn => return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))), + }; + + Ok((offset, contents)) + }) + .collect::>>()?; + + Ok(Self { data_segments }) + } + + /// Apply the given snapshot to a linear memory. + /// + /// Linear memory interface is represented by a closure `memory_set`. + pub fn apply( + &self, + mut memory_set: impl FnMut(u32, &[u8]) -> Result<(), E>, + ) -> Result<(), E> { + for (offset, contents) in &self.data_segments { + memory_set(*offset, contents)?; + } + Ok(()) + } +} diff --git a/client/executor/common/src/runtime_blob/globals_snapshot.rs b/client/executor/common/src/runtime_blob/globals_snapshot.rs new file mode 100644 index 0000000000000..6a29ff8bae365 --- /dev/null +++ b/client/executor/common/src/runtime_blob/globals_snapshot.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::RuntimeBlob; + +/// Saved value of particular exported global. +struct SavedValue { + /// The handle of this global which can be used to refer to this global. + handle: Global, + /// The global value that was observed during the snapshot creation. + value: sp_wasm_interface::Value, +} + +/// An adapter for a wasm module instance that is focused on getting and setting globals. +pub trait InstanceGlobals { + /// A handle to a global which can be used to get or set a global variable. This is supposed to + /// be a lightweight handle, like an index or an Rc-like smart-pointer, which is cheap to clone. + type Global: Clone; + /// Get a handle to a global by it's export name. + /// + /// The requested export is must exist in the exported list, and it should be a mutable global. + fn get_global(&self, export_name: &str) -> Self::Global; + /// Get the current value of the global. + fn get_global_value(&self, global: &Self::Global) -> sp_wasm_interface::Value; + /// Update the current value of the global. + /// + /// The global behind the handle is guaranteed to be mutable and the value to be the same type + /// as the global. + fn set_global_value(&self, global: &Self::Global, value: sp_wasm_interface::Value); +} + +/// A set of exposed mutable globals. +/// +/// This is set of globals required to create a [`GlobalsSnapshot`] and that are collected from +/// a runtime blob that was instrumented by +/// [`RuntimeBlob::expose_mutable_globals`](super::RuntimeBlob::expose_mutable_globals`). + +/// If the code wasn't instrumented then it would be empty and snapshot would do nothing. +pub struct ExposedMutableGlobalsSet(Vec); + +impl ExposedMutableGlobalsSet { + /// Collect the set from the given runtime blob. See the struct documentation for details. + pub fn collect(runtime_blob: &RuntimeBlob) -> Self { + let global_names = + runtime_blob.exported_internal_global_names().map(ToOwned::to_owned).collect(); + Self(global_names) + } +} + +/// A snapshot of a global variables values. This snapshot can be later used for restoring the +/// values to the preserved state. +/// +/// Technically, a snapshot stores only values of mutable global variables. This is because +/// immutable global variables always have the same values. +/// +/// We take it from an instance rather from a module because the start function could potentially +/// change any of the mutable global values. +pub struct GlobalsSnapshot(Vec>); + +impl GlobalsSnapshot { + /// Take a snapshot of global variables for a given instance. + /// + /// # Panics + /// + /// This function panics if the instance doesn't correspond to the module from which the + /// [`ExposedMutableGlobalsSet`] was collected. + pub fn take(mutable_globals: &ExposedMutableGlobalsSet, instance: &Instance) -> Self + where + Instance: InstanceGlobals, + { + let global_names = &mutable_globals.0; + let mut saved_values = Vec::with_capacity(global_names.len()); + + for global_name in global_names { + let handle = instance.get_global(global_name); + let value = instance.get_global_value(&handle); + saved_values.push(SavedValue { handle, value }); + } + + Self(saved_values) + } + + /// Apply the snapshot to the given instance. + /// + /// This instance must be the same that was used for creation of this snapshot. + pub fn apply(&self, instance: &Instance) + where + Instance: InstanceGlobals, + { + for saved_value in &self.0 { + instance.set_global_value(&saved_value.handle, saved_value.value); + } + } +} diff --git a/client/executor/common/src/runtime_blob/mod.rs b/client/executor/common/src/runtime_blob/mod.rs new file mode 100644 index 0000000000000..1af2708d3eb47 --- /dev/null +++ b/client/executor/common/src/runtime_blob/mod.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! This module allows for inspection and instrumentation, i.e. modifying the module to alter it's +//! structure or behavior, of a wasm module. +//! +//! ## Instrumentation +//! +//! In ideal world, there would be no instrumentation. However, in the real world the execution +//! engines we use are somewhat limited in their APIs or abilities. +//! +//! To give you some examples: +//! +//! - wasmi allows reaching to non-exported mutable globals so that we could reset them. Wasmtime +//! doesn’t support that. +//! +//! We need to reset the globals because when we +//! execute the Substrate Runtime, we do not drop and create the instance anew, instead +//! we restore some selected parts of the state. +//! +//! - stack depth metering can be performed via instrumentation or deferred to the engine and say be +//! added directly in machine code. Implementing this in machine code is rather cumbersome so +//! instrumentation looks like a good solution. +//! +//! Stack depth metering is needed to make a wasm blob +//! execution deterministic, which in turn is needed by the Parachain Validation Function in +//! Polkadot. +//! +//! ## Inspection +//! +//! Inspection of a wasm module may be needed to extract some useful information, such as to extract +//! data segment snapshot, which is helpful for quickly restoring the initial state of instances. +//! Inspection can be also useful to prove that a wasm module possesses some properties, such as, +//! is free of any floating point operations, which is a useful step towards making instances +//! produced from such a module deterministic. + +mod data_segments_snapshot; +mod globals_snapshot; +mod runtime_blob; + +pub use data_segments_snapshot::DataSegmentsSnapshot; +pub use globals_snapshot::{ExposedMutableGlobalsSet, GlobalsSnapshot, InstanceGlobals}; +pub use runtime_blob::RuntimeBlob; diff --git a/client/executor/common/src/runtime_blob/runtime_blob.rs b/client/executor/common/src/runtime_blob/runtime_blob.rs new file mode 100644 index 0000000000000..6fb9303e07758 --- /dev/null +++ b/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -0,0 +1,137 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::error::WasmError; +use pwasm_utils::{ + export_mutable_globals, + parity_wasm::elements::{deserialize_buffer, serialize, DataSegment, Internal, Module}, +}; + +/// A bunch of information collected from a WebAssembly module. +#[derive(Clone)] +pub struct RuntimeBlob { + raw_module: Module, +} + +impl RuntimeBlob { + /// Create `RuntimeBlob` from the given wasm code. Will attempt to decompress the code before + /// deserializing it. + /// + /// See [`sp_maybe_compressed_blob`] for details about decompression. + pub fn uncompress_if_needed(wasm_code: &[u8]) -> Result { + use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; + let wasm_code = sp_maybe_compressed_blob::decompress(wasm_code, CODE_BLOB_BOMB_LIMIT) + .map_err(|e| WasmError::Other(format!("Decompression error: {:?}", e)))?; + Self::new(&wasm_code) + } + + /// Create `RuntimeBlob` from the given wasm code. + /// + /// Returns `Err` if the wasm code cannot be deserialized. + pub fn new(wasm_code: &[u8]) -> Result { + let raw_module: Module = deserialize_buffer(wasm_code) + .map_err(|e| WasmError::Other(format!("cannot deserialize module: {:?}", e)))?; + Ok(Self { raw_module }) + } + + /// Extract the data segments from the given wasm code. + pub(super) fn data_segments(&self) -> Vec { + self.raw_module.data_section().map(|ds| ds.entries()).unwrap_or(&[]).to_vec() + } + + /// The number of globals defined in locally in this module. + pub fn declared_globals_count(&self) -> u32 { + self.raw_module + .global_section() + .map(|gs| gs.entries().len() as u32) + .unwrap_or(0) + } + + /// The number of imports of globals. + pub fn imported_globals_count(&self) -> u32 { + self.raw_module.import_section().map(|is| is.globals() as u32).unwrap_or(0) + } + + /// Perform an instrumentation that makes sure that the mutable globals are exported. + pub fn expose_mutable_globals(&mut self) { + export_mutable_globals(&mut self.raw_module, "exported_internal_global"); + } + + /// Run a pass that instrument this module so as to introduce a deterministic stack height + /// limit. + /// + /// It will introduce a global mutable counter. The instrumentation will increase the counter + /// according to the "cost" of the callee. If the cost exceeds the `stack_depth_limit` constant, + /// the instrumentation will trap. The counter will be decreased as soon as the the callee + /// returns. + /// + /// The stack cost of a function is computed based on how much locals there are and the maximum + /// depth of the wasm operand stack. + pub fn inject_stack_depth_metering(self, stack_depth_limit: u32) -> Result { + let injected_module = + pwasm_utils::stack_height::inject_limiter(self.raw_module, stack_depth_limit).map_err( + |e| WasmError::Other(format!("cannot inject the stack limiter: {:?}", e)), + )?; + + Ok(Self { raw_module: injected_module }) + } + + /// Perform an instrumentation that makes sure that a specific function `entry_point` is + /// exported + pub fn entry_point_exists(&self, entry_point: &str) -> bool { + self.raw_module + .export_section() + .map(|e| { + e.entries().iter().any(|e| { + matches!(e.internal(), Internal::Function(_)) && e.field() == entry_point + }) + }) + .unwrap_or_default() + } + + /// Returns an iterator of all globals which were exported by [`expose_mutable_globals`]. + pub(super) fn exported_internal_global_names<'module>( + &'module self, + ) -> impl Iterator { + let exports = self.raw_module.export_section().map(|es| es.entries()).unwrap_or(&[]); + exports.iter().filter_map(|export| match export.internal() { + Internal::Global(_) if export.field().starts_with("exported_internal_global") => + Some(export.field()), + _ => None, + }) + } + + /// Scans the wasm blob for the first section with the name that matches the given. Returns the + /// contents of the custom section if found or `None` otherwise. + pub fn custom_section_contents(&self, section_name: &str) -> Option<&[u8]> { + self.raw_module + .custom_sections() + .find(|cs| cs.name() == section_name) + .map(|cs| cs.payload()) + } + + /// Consumes this runtime blob and serializes it. + pub fn serialize(self) -> Vec { + serialize(self.raw_module).expect("serializing into a vec should succeed; qed") + } + + /// Destructure this structure into the underlying parity-wasm Module. + pub fn into_inner(self) -> Module { + self.raw_module + } +} diff --git a/client/executor/common/src/sandbox.rs b/client/executor/common/src/sandbox.rs index b2c35b7582718..b627294241252 100644 --- a/client/executor/common/src/sandbox.rs +++ b/client/executor/common/src/sandbox.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,18 +18,26 @@ //! This module implements sandboxing support in the runtime. //! -//! Sandboxing is baked by wasmi at the moment. In future, however, we would like to add/switch to -//! a compiled execution engine. +//! Sandboxing is backed by wasmi and wasmer, depending on the configuration. -use crate::error::{Result, Error}; -use std::{collections::HashMap, rc::Rc}; +use crate::{ + error::{Error, Result}, + util, +}; use codec::{Decode, Encode}; use sp_core::sandbox as sandbox_primitives; +use sp_wasm_interface::{FunctionContext, Pointer, WordSize}; +use std::{collections::HashMap, rc::Rc}; use wasmi::{ - Externals, ImportResolver, MemoryInstance, MemoryRef, Module, ModuleInstance, - ModuleRef, RuntimeArgs, RuntimeValue, Trap, TrapKind, memory_units::Pages, + memory_units::Pages, Externals, ImportResolver, MemoryInstance, Module, ModuleInstance, + RuntimeArgs, RuntimeValue, Trap, TrapKind, }; -use sp_wasm_interface::{FunctionContext, Pointer, WordSize}; + +#[cfg(feature = "wasmer-sandbox")] +use crate::util::wasmer::MemoryWrapper as WasmerMemoryWrapper; +use crate::util::wasmi::MemoryWrapper as WasmiMemoryWrapper; + +environmental::environmental!(SandboxContextStore: trait SandboxContext); /// Index of a function inside the supervisor. /// @@ -46,34 +54,59 @@ impl From for usize { /// Index of a function within guest index space. /// -/// This index is supposed to be used with as index for `Externals`. +/// This index is supposed to be used as index for `Externals`. #[derive(Copy, Clone, Debug, PartialEq)] struct GuestFuncIndex(usize); /// This struct holds a mapping from guest index space to supervisor. struct GuestToSupervisorFunctionMapping { + /// Position of elements in this vector are interpreted + /// as indices of guest functions and are mapped to + /// corresponding supervisor function indices. funcs: Vec, } impl GuestToSupervisorFunctionMapping { + /// Create an empty function mapping fn new() -> GuestToSupervisorFunctionMapping { GuestToSupervisorFunctionMapping { funcs: Vec::new() } } + /// Add a new supervisor function to the mapping. + /// Returns a newly assigned guest function index. fn define(&mut self, supervisor_func: SupervisorFuncIndex) -> GuestFuncIndex { let idx = self.funcs.len(); self.funcs.push(supervisor_func); GuestFuncIndex(idx) } + /// Find supervisor function index by its corresponding guest function index fn func_by_guest_index(&self, guest_func_idx: GuestFuncIndex) -> Option { self.funcs.get(guest_func_idx.0).cloned() } } +/// Holds sandbox function and memory imports and performs name resolution struct Imports { + /// Maps qualified function name to its guest function index func_map: HashMap<(Vec, Vec), GuestFuncIndex>, - memories_map: HashMap<(Vec, Vec), MemoryRef>, + + /// Maps qualified field name to its memory reference + memories_map: HashMap<(Vec, Vec), Memory>, +} + +impl Imports { + fn func_by_name(&self, module_name: &str, func_name: &str) -> Option { + self.func_map + .get(&(module_name.as_bytes().to_owned(), func_name.as_bytes().to_owned())) + .cloned() + } + + fn memory_by_name(&self, module_name: &str, memory_name: &str) -> Option { + self.memories_map + .get(&(module_name.as_bytes().to_owned(), memory_name.as_bytes().to_owned())) + .cloned() + } } impl ImportResolver for Imports { @@ -83,16 +116,10 @@ impl ImportResolver for Imports { field_name: &str, signature: &::wasmi::Signature, ) -> std::result::Result { - let key = ( - module_name.as_bytes().to_owned(), - field_name.as_bytes().to_owned(), - ); - let idx = *self.func_map.get(&key).ok_or_else(|| { - wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - )) + let idx = self.func_by_name(module_name, field_name).ok_or_else(|| { + wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name)) })?; + Ok(wasmi::FuncInstance::alloc_host(signature.clone(), idx.0)) } @@ -101,20 +128,22 @@ impl ImportResolver for Imports { module_name: &str, field_name: &str, _memory_type: &::wasmi::MemoryDescriptor, - ) -> std::result::Result { - let key = ( - module_name.as_bytes().to_vec(), - field_name.as_bytes().to_vec(), - ); - let mem = self.memories_map - .get(&key) - .ok_or_else(|| { - wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - )) - })? - .clone(); + ) -> std::result::Result { + let mem = self.memory_by_name(module_name, field_name).ok_or_else(|| { + wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name)) + })?; + + let wrapper = mem.as_wasmi().ok_or_else(|| { + wasmi::Error::Instantiation(format!( + "Unsupported non-wasmi export {}:{}", + module_name, field_name + )) + })?; + + // Here we use inner memory reference only to resolve + // the imports without accessing the memory contents. + let mem = unsafe { wrapper.clone_inner() }; + Ok(mem) } @@ -124,10 +153,7 @@ impl ImportResolver for Imports { field_name: &str, _global_type: &::wasmi::GlobalDescriptor, ) -> std::result::Result { - Err(wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - ))) + Err(wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name))) } fn resolve_table( @@ -136,28 +162,20 @@ impl ImportResolver for Imports { field_name: &str, _table_type: &::wasmi::TableDescriptor, ) -> std::result::Result { - Err(wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - ))) + Err(wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name))) } } -/// This trait encapsulates sandboxing capabilities. -/// -/// Note that this functions are only called in the `supervisor` context. -pub trait SandboxCapabilities: FunctionContext { - /// Represents a function reference into the supervisor environment. - type SupervisorFuncRef; - +/// The sandbox context used to execute sandboxed functions. +pub trait SandboxContext { /// Invoke a function in the supervisor environment. /// - /// This first invokes the dispatch_thunk function, passing in the function index of the + /// This first invokes the dispatch thunk function, passing in the function index of the /// desired function to call and serialized arguments. The thunk calls the desired function /// with the deserialized arguments, then serializes the result into memory and returns - /// reference. The pointer to and length of the result in linear memory is encoded into an i64, - /// with the upper 32 bits representing the pointer and the lower 32 bits representing the - /// length. + /// reference. The pointer to and length of the result in linear memory is encoded into an + /// `i64`, with the upper 32 bits representing the pointer and the lower 32 bits representing + /// the length. /// /// # Errors /// @@ -165,32 +183,39 @@ pub trait SandboxCapabilities: FunctionContext { /// execution. fn invoke( &mut self, - dispatch_thunk: &Self::SupervisorFuncRef, invoke_args_ptr: Pointer, invoke_args_len: WordSize, state: u32, func_idx: SupervisorFuncIndex, ) -> Result; + + /// Returns the supervisor context. + fn supervisor_context(&mut self) -> &mut dyn FunctionContext; } /// Implementation of [`Externals`] that allows execution of guest module with /// [externals][`Externals`] that might refer functions defined by supervisor. /// /// [`Externals`]: ../wasmi/trait.Externals.html -pub struct GuestExternals<'a, FE: SandboxCapabilities + 'a> { - supervisor_externals: &'a mut FE, - sandbox_instance: &'a SandboxInstance, +pub struct GuestExternals<'a> { + /// Instance of sandboxed module to be dispatched + sandbox_instance: &'a SandboxInstance, + + /// External state passed to guest environment, see the `instantiate` function state: u32, } +/// Construct trap error from specified message fn trap(msg: &'static str) -> Trap { TrapKind::Host(Box::new(Error::Other(msg.into()))).into() } -fn deserialize_result(serialized_result: &[u8]) -> std::result::Result, Trap> { +fn deserialize_result( + mut serialized_result: &[u8], +) -> std::result::Result, Trap> { use self::sandbox_primitives::HostError; use sp_wasm_interface::ReturnValue; - let result_val = std::result::Result::::decode(&mut &serialized_result[..]) + let result_val = std::result::Result::::decode(&mut serialized_result) .map_err(|_| trap("Decoding Result failed!"))?; match result_val { @@ -202,106 +227,118 @@ fn deserialize_result(serialized_result: &[u8]) -> std::result::Result Externals for GuestExternals<'a, FE> { +impl<'a> Externals for GuestExternals<'a> { fn invoke_index( &mut self, index: usize, args: RuntimeArgs, ) -> std::result::Result, Trap> { - // Make `index` typesafe again. - let index = GuestFuncIndex(index); - - let func_idx = self.sandbox_instance - .guest_to_supervisor_mapping - .func_by_guest_index(index) - .expect( - "`invoke_index` is called with indexes registered via `FuncInstance::alloc_host`; - `FuncInstance::alloc_host` is called with indexes that was obtained from `guest_to_supervisor_mapping`; + SandboxContextStore::with(|sandbox_context| { + // Make `index` typesafe again. + let index = GuestFuncIndex(index); + + // Convert function index from guest to supervisor space + let func_idx = self.sandbox_instance + .guest_to_supervisor_mapping + .func_by_guest_index(index) + .expect( + "`invoke_index` is called with indexes registered via `FuncInstance::alloc_host`; + `FuncInstance::alloc_host` is called with indexes that were obtained from `guest_to_supervisor_mapping`; `func_by_guest_index` called with `index` can't return `None`; qed" - ); - - // Serialize arguments into a byte vector. - let invoke_args_data: Vec = args.as_ref() - .iter() - .cloned() - .map(sp_wasm_interface::Value::from) - .collect::>() - .encode(); - - let state = self.state; - - // Move serialized arguments inside the memory and invoke dispatch thunk and - // then free allocated memory. - let invoke_args_len = invoke_args_data.len() as WordSize; - let invoke_args_ptr = self - .supervisor_externals - .allocate_memory(invoke_args_len) - .map_err(|_| trap("Can't allocate memory in supervisor for the arguments"))?; - - let deallocate = |this: &mut GuestExternals, ptr, fail_msg| { - this - .supervisor_externals - .deallocate_memory(ptr) - .map_err(|_| trap(fail_msg)) - }; - - if self - .supervisor_externals - .write_memory(invoke_args_ptr, &invoke_args_data) - .is_err() - { - deallocate(self, invoke_args_ptr, "Failed dealloction after failed write of invoke arguments")?; - return Err(trap("Can't write invoke args into memory")); - } - - let result = self.supervisor_externals.invoke( - &self.sandbox_instance.dispatch_thunk, - invoke_args_ptr, - invoke_args_len, - state, - func_idx, - ); - - deallocate(self, invoke_args_ptr, "Can't deallocate memory for dispatch thunk's invoke arguments")?; - let result = result?; - - // dispatch_thunk returns pointer to serialized arguments. - // Unpack pointer and len of the serialized result data. - let (serialized_result_val_ptr, serialized_result_val_len) = { - // Cast to u64 to use zero-extension. - let v = result as u64; - let ptr = (v as u64 >> 32) as u32; - let len = (v & 0xFFFFFFFF) as u32; - (Pointer::new(ptr), len) - }; + ); + + // Serialize arguments into a byte vector. + let invoke_args_data: Vec = args + .as_ref() + .iter() + .cloned() + .map(sp_wasm_interface::Value::from) + .collect::>() + .encode(); + + let state = self.state; + + // Move serialized arguments inside the memory, invoke dispatch thunk and + // then free allocated memory. + let invoke_args_len = invoke_args_data.len() as WordSize; + let invoke_args_ptr = sandbox_context + .supervisor_context() + .allocate_memory(invoke_args_len) + .map_err(|_| trap("Can't allocate memory in supervisor for the arguments"))?; + + let deallocate = |supervisor_context: &mut dyn FunctionContext, ptr, fail_msg| { + supervisor_context.deallocate_memory(ptr).map_err(|_| trap(fail_msg)) + }; + + if sandbox_context + .supervisor_context() + .write_memory(invoke_args_ptr, &invoke_args_data) + .is_err() + { + deallocate( + sandbox_context.supervisor_context(), + invoke_args_ptr, + "Failed dealloction after failed write of invoke arguments", + )?; + return Err(trap("Can't write invoke args into memory")) + } - let serialized_result_val = self.supervisor_externals - .read_memory(serialized_result_val_ptr, serialized_result_val_len) - .map_err(|_| trap("Can't read the serialized result from dispatch thunk")); + let result = sandbox_context.invoke( + invoke_args_ptr, + invoke_args_len, + state, + func_idx, + ); - deallocate(self, serialized_result_val_ptr, "Can't deallocate memory for dispatch thunk's result") + deallocate( + sandbox_context.supervisor_context(), + invoke_args_ptr, + "Can't deallocate memory for dispatch thunk's invoke arguments", + )?; + let result = result?; + + // dispatch_thunk returns pointer to serialized arguments. + // Unpack pointer and len of the serialized result data. + let (serialized_result_val_ptr, serialized_result_val_len) = { + // Cast to u64 to use zero-extension. + let v = result as u64; + let ptr = (v as u64 >> 32) as u32; + let len = (v & 0xFFFFFFFF) as u32; + (Pointer::new(ptr), len) + }; + + let serialized_result_val = sandbox_context + .supervisor_context() + .read_memory(serialized_result_val_ptr, serialized_result_val_len) + .map_err(|_| trap("Can't read the serialized result from dispatch thunk")); + + deallocate( + sandbox_context.supervisor_context(), + serialized_result_val_ptr, + "Can't deallocate memory for dispatch thunk's result", + ) .and_then(|_| serialized_result_val) .and_then(|serialized_result_val| deserialize_result(&serialized_result_val)) + }).expect("SandboxContextStore is set when invoking sandboxed functions; qed") } } -fn with_guest_externals( - supervisor_externals: &mut FE, - sandbox_instance: &SandboxInstance, - state: u32, - f: F, -) -> R +fn with_guest_externals(sandbox_instance: &SandboxInstance, state: u32, f: F) -> R where - FE: SandboxCapabilities, - F: FnOnce(&mut GuestExternals) -> R, + F: FnOnce(&mut GuestExternals) -> R, { - let mut guest_externals = GuestExternals { - supervisor_externals, - sandbox_instance, - state, - }; - f(&mut guest_externals) + f(&mut GuestExternals { sandbox_instance, state }) +} + +/// Module instance in terms of selected backend +enum BackendInstance { + /// Wasmi module instance + Wasmi(wasmi::ModuleRef), + + /// Wasmer module instance + #[cfg(feature = "wasmer-sandbox")] + Wasmer(wasmer::Instance), } /// Sandboxed instance of a wasm module. @@ -318,13 +355,12 @@ where /// This is generic over a supervisor function reference type. /// /// [`invoke`]: #method.invoke -pub struct SandboxInstance { - instance: ModuleRef, - dispatch_thunk: FR, +pub struct SandboxInstance { + backend_instance: BackendInstance, guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, } -impl SandboxInstance { +impl SandboxInstance { /// Invoke an exported function by a name. /// /// `supervisor_externals` is required to execute the implementations @@ -332,34 +368,106 @@ impl SandboxInstance { /// /// The `state` parameter can be used to provide custom data for /// these syscall implementations. - pub fn invoke>( + pub fn invoke( &self, + + // function to call that is exported from the module export_name: &str, + + // arguments passed to the function args: &[RuntimeValue], - supervisor_externals: &mut FE, + + // arbitraty context data of the call state: u32, + + sandbox_context: &mut dyn SandboxContext, ) -> std::result::Result, wasmi::Error> { - with_guest_externals( - supervisor_externals, - self, - state, - |guest_externals| { - self.instance - .invoke_export(export_name, args, guest_externals) + match &self.backend_instance { + BackendInstance::Wasmi(wasmi_instance) => + with_guest_externals(self, state, |guest_externals| { + let wasmi_result = SandboxContextStore::using(sandbox_context, || { + wasmi_instance.invoke_export(export_name, args, guest_externals) + })?; + + Ok(wasmi_result) + }), + + #[cfg(feature = "wasmer-sandbox")] + BackendInstance::Wasmer(wasmer_instance) => { + let function = wasmer_instance + .exports + .get_function(export_name) + .map_err(|error| wasmi::Error::Function(error.to_string()))?; + + let args: Vec = args + .iter() + .map(|v| match *v { + RuntimeValue::I32(val) => wasmer::Val::I32(val), + RuntimeValue::I64(val) => wasmer::Val::I64(val), + RuntimeValue::F32(val) => wasmer::Val::F32(val.into()), + RuntimeValue::F64(val) => wasmer::Val::F64(val.into()), + }) + .collect(); + + let wasmer_result = SandboxContextStore::using(sandbox_context, || { + function.call(&args).map_err(|error| wasmi::Error::Function(error.to_string())) + })?; + + if wasmer_result.len() > 1 { + return Err(wasmi::Error::Function( + "multiple return types are not supported yet".into(), + )) + } + + wasmer_result + .first() + .map(|wasm_value| { + let wasmer_value = match *wasm_value { + wasmer::Val::I32(val) => RuntimeValue::I32(val), + wasmer::Val::I64(val) => RuntimeValue::I64(val), + wasmer::Val::F32(val) => RuntimeValue::F32(val.into()), + wasmer::Val::F64(val) => RuntimeValue::F64(val.into()), + _ => + return Err(wasmi::Error::Function(format!( + "Unsupported return value: {:?}", + wasm_value, + ))), + }; + + Ok(wasmer_value) + }) + .transpose() }, - ) + } } /// Get the value from a global with the given `name`. /// /// Returns `Some(_)` if the global could be found. pub fn get_global_val(&self, name: &str) -> Option { - let global = self.instance - .export_by_name(name)? - .as_global()? - .get(); + match &self.backend_instance { + BackendInstance::Wasmi(wasmi_instance) => { + let wasmi_global = wasmi_instance.export_by_name(name)?.as_global()?.get(); + + Some(wasmi_global.into()) + }, + + #[cfg(feature = "wasmer-sandbox")] + BackendInstance::Wasmer(wasmer_instance) => { + use sp_wasm_interface::Value; + + let global = wasmer_instance.exports.get_global(name).ok()?; + let wasmtime_value = match global.get() { + wasmer::Val::I32(val) => Value::I32(val), + wasmer::Val::I64(val) => Value::I64(val), + wasmer::Val::F32(val) => Value::F32(f32::to_bits(val)), + wasmer::Val::F64(val) => Value::F64(f64::to_bits(val)), + _ => None?, + }; - Some(global.into()) + Some(wasmtime_value) + }, + } } } @@ -379,10 +487,10 @@ pub enum InstantiationError { } fn decode_environment_definition( - raw_env_def: &[u8], - memories: &[Option], + mut raw_env_def: &[u8], + memories: &[Option], ) -> std::result::Result<(Imports, GuestToSupervisorFunctionMapping), InstantiationError> { - let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut &raw_env_def[..]) + let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut raw_env_def) .map_err(|_| InstantiationError::EnvironmentDefinitionCorrupted)?; let mut func_map = HashMap::new(); @@ -398,7 +506,7 @@ fn decode_environment_definition( let externals_idx = guest_to_supervisor_mapping.define(SupervisorFuncIndex(func_idx as usize)); func_map.insert((module, field), externals_idx); - } + }, sandbox_primitives::ExternEntity::Memory(memory_idx) => { let memory_ref = memories .get(memory_idx as usize) @@ -406,22 +514,19 @@ fn decode_environment_definition( .ok_or_else(|| InstantiationError::EnvironmentDefinitionCorrupted)? .ok_or_else(|| InstantiationError::EnvironmentDefinitionCorrupted)?; memories_map.insert((module, field), memory_ref); - } + }, } } - Ok(( - Imports { - func_map, - memories_map, - }, - guest_to_supervisor_mapping, - )) + Ok((Imports { func_map, memories_map }, guest_to_supervisor_mapping)) } /// An environment in which the guest module is instantiated. pub struct GuestEnvironment { + /// Function and memory imports of the guest module imports: Imports, + + /// Supervisor functinons mapped to guest index space guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, } @@ -429,16 +534,13 @@ impl GuestEnvironment { /// Decodes an environment definition from the given raw bytes. /// /// Returns `Err` if the definition cannot be decoded. - pub fn decode( - store: &Store, + pub fn decode
( + store: &Store
, raw_env_def: &[u8], ) -> std::result::Result { let (imports, guest_to_supervisor_mapping) = decode_environment_definition(raw_env_def, &store.memories)?; - Ok(Self { - imports, - guest_to_supervisor_mapping, - }) + Ok(Self { imports, guest_to_supervisor_mapping }) } } @@ -446,82 +548,148 @@ impl GuestEnvironment { /// /// To finish off the instantiation the user must call `register`. #[must_use] -pub struct UnregisteredInstance { - sandbox_instance: Rc>, +pub struct UnregisteredInstance { + sandbox_instance: Rc, } -impl UnregisteredInstance { +impl UnregisteredInstance { /// Finalizes instantiation of this module. - pub fn register(self, store: &mut Store) -> u32 { + pub fn register
(self, store: &mut Store
, dispatch_thunk: DT) -> u32 { // At last, register the instance. - let instance_idx = store.register_sandbox_instance(self.sandbox_instance); - instance_idx + store.register_sandbox_instance(self.sandbox_instance, dispatch_thunk) } } -/// Instantiate a guest module and return it's index in the store. -/// -/// The guest module's code is specified in `wasm`. Environment that will be available to -/// guest module is specified in `raw_env_def` (serialized version of [`EnvironmentDefinition`]). -/// `dispatch_thunk` is used as function that handle calls from guests. -/// -/// # Errors -/// -/// Returns `Err` if any of the following conditions happens: -/// -/// - `raw_env_def` can't be deserialized as a [`EnvironmentDefinition`]. -/// - Module in `wasm` is invalid or couldn't be instantiated. -/// -/// [`EnvironmentDefinition`]: ../sandbox/struct.EnvironmentDefinition.html -pub fn instantiate<'a, FE: SandboxCapabilities>( - supervisor_externals: &mut FE, - dispatch_thunk: FE::SupervisorFuncRef, - wasm: &[u8], - host_env: GuestEnvironment, - state: u32, -) -> std::result::Result, InstantiationError> { - let module = Module::from_buffer(wasm).map_err(|_| InstantiationError::ModuleDecoding)?; - let instance = ModuleInstance::new(&module, &host_env.imports) - .map_err(|_| InstantiationError::Instantiation)?; - - let sandbox_instance = Rc::new(SandboxInstance { - // In general, it's not a very good idea to use `.not_started_instance()` for anything - // but for extracting memory and tables. But in this particular case, we are extracting - // for the purpose of running `start` function which should be ok. - instance: instance.not_started_instance().clone(), - dispatch_thunk, - guest_to_supervisor_mapping: host_env.guest_to_supervisor_mapping, - }); - - with_guest_externals( - supervisor_externals, - &sandbox_instance, - state, - |guest_externals| { - instance - .run_start(guest_externals) - .map_err(|_| InstantiationError::StartTrapped) - }, - )?; - - Ok(UnregisteredInstance { sandbox_instance }) +/// Sandbox backend to use +pub enum SandboxBackend { + /// Wasm interpreter + Wasmi, + + /// Wasmer environment + #[cfg(feature = "wasmer-sandbox")] + Wasmer, + + /// Use wasmer backend if available. Fall back to wasmi otherwise. + TryWasmer, +} + +/// Memory reference in terms of a selected backend +#[derive(Clone, Debug)] +pub enum Memory { + /// Wasmi memory reference + Wasmi(WasmiMemoryWrapper), + + /// Wasmer memory refernce + #[cfg(feature = "wasmer-sandbox")] + Wasmer(WasmerMemoryWrapper), +} + +impl Memory { + /// View as wasmi memory + pub fn as_wasmi(&self) -> Option { + match self { + Memory::Wasmi(memory) => Some(memory.clone()), + + #[cfg(feature = "wasmer-sandbox")] + Memory::Wasmer(_) => None, + } + } + + /// View as wasmer memory + #[cfg(feature = "wasmer-sandbox")] + pub fn as_wasmer(&self) -> Option { + match self { + Memory::Wasmer(memory) => Some(memory.clone()), + Memory::Wasmi(_) => None, + } + } +} + +impl util::MemoryTransfer for Memory { + fn read(&self, source_addr: Pointer, size: usize) -> Result> { + match self { + Memory::Wasmi(sandboxed_memory) => sandboxed_memory.read(source_addr, size), + + #[cfg(feature = "wasmer-sandbox")] + Memory::Wasmer(sandboxed_memory) => sandboxed_memory.read(source_addr, size), + } + } + + fn read_into(&self, source_addr: Pointer, destination: &mut [u8]) -> Result<()> { + match self { + Memory::Wasmi(sandboxed_memory) => sandboxed_memory.read_into(source_addr, destination), + + #[cfg(feature = "wasmer-sandbox")] + Memory::Wasmer(sandboxed_memory) => sandboxed_memory.read_into(source_addr, destination), + } + } + + fn write_from(&self, dest_addr: Pointer, source: &[u8]) -> Result<()> { + match self { + Memory::Wasmi(sandboxed_memory) => sandboxed_memory.write_from(dest_addr, source), + + #[cfg(feature = "wasmer-sandbox")] + Memory::Wasmer(sandboxed_memory) => sandboxed_memory.write_from(dest_addr, source), + } + } +} + +/// Wasmer specific context +#[cfg(feature = "wasmer-sandbox")] +struct WasmerBackend { + store: wasmer::Store, +} + +/// Information specific to a particular execution backend +enum BackendContext { + /// Wasmi specific context + Wasmi, + + /// Wasmer specific context + #[cfg(feature = "wasmer-sandbox")] + Wasmer(WasmerBackend), +} + +impl BackendContext { + pub fn new(backend: SandboxBackend) -> BackendContext { + match backend { + SandboxBackend::Wasmi => BackendContext::Wasmi, + + #[cfg(not(feature = "wasmer-sandbox"))] + SandboxBackend::TryWasmer => BackendContext::Wasmi, + + #[cfg(feature = "wasmer-sandbox")] + SandboxBackend::Wasmer | SandboxBackend::TryWasmer => { + let compiler = wasmer_compiler_singlepass::Singlepass::default(); + + BackendContext::Wasmer(WasmerBackend { + store: wasmer::Store::new(&wasmer::JIT::new(compiler).engine()), + }) + }, + } + } } /// This struct keeps track of all sandboxed components. /// /// This is generic over a supervisor function reference type. -pub struct Store { - // Memories and instances are `Some` until torn down. - instances: Vec>>>, - memories: Vec>, +pub struct Store
{ + /// Stores the instance and the dispatch thunk associated to per instance. + /// + /// Instances are `Some` until torn down. + instances: Vec, DT)>>, + /// Memories are `Some` until torn down. + memories: Vec>, + backend_context: BackendContext, } -impl Store { +impl Store
{ /// Create a new empty sandbox store. - pub fn new() -> Self { + pub fn new(backend: SandboxBackend) -> Self { Store { instances: Vec::new(), memories: Vec::new(), + backend_context: BackendContext::new(backend), } } @@ -532,19 +700,33 @@ impl Store { /// Returns `Err` if the memory couldn't be created. /// Typically happens if `initial` is more than `maximum`. pub fn new_memory(&mut self, initial: u32, maximum: u32) -> Result { + let memories = &mut self.memories; + let backend_context = &self.backend_context; + let maximum = match maximum { sandbox_primitives::MEM_UNLIMITED => None, - specified_limit => Some(Pages(specified_limit as usize)), + specified_limit => Some(specified_limit), }; - let mem = - MemoryInstance::alloc( + let memory = match &backend_context { + BackendContext::Wasmi => Memory::Wasmi(WasmiMemoryWrapper::new(MemoryInstance::alloc( Pages(initial as usize), - maximum, - )?; + maximum.map(|m| Pages(m as usize)), + )?)), + + #[cfg(feature = "wasmer-sandbox")] + BackendContext::Wasmer(context) => { + let ty = wasmer::MemoryType::new(initial, maximum, false); + Memory::Wasmer(WasmerMemoryWrapper::new( + wasmer::Memory::new(&context.store, ty) + .map_err(|_| Error::InvalidMemoryReference)?, + )) + }, + }; + + let mem_idx = memories.len(); + memories.push(Some(memory.clone())); - let mem_idx = self.memories.len(); - self.memories.push(Some(mem)); Ok(mem_idx as u32) } @@ -554,11 +736,28 @@ impl Store { /// /// Returns `Err` If `instance_idx` isn't a valid index of an instance or /// instance is already torndown. - pub fn instance(&self, instance_idx: u32) -> Result>> { + pub fn instance(&self, instance_idx: u32) -> Result> { self.instances .get(instance_idx as usize) - .cloned() .ok_or_else(|| "Trying to access a non-existent instance")? + .as_ref() + .map(|v| v.0.clone()) + .ok_or_else(|| "Trying to access a torndown instance".into()) + } + + /// Returns dispatch thunk by `instance_idx`. + /// + /// # Errors + /// + /// Returns `Err` If `instance_idx` isn't a valid index of an instance or + /// instance is already torndown. + pub fn dispatch_thunk(&self, instance_idx: u32) -> Result
{ + self.instances + .get(instance_idx as usize) + .as_ref() + .ok_or_else(|| "Trying to access a non-existent instance")? + .as_ref() + .map(|v| v.1.clone()) .ok_or_else(|| "Trying to access a torndown instance".into()) } @@ -568,7 +767,7 @@ impl Store { /// /// Returns `Err` If `memory_idx` isn't a valid index of an memory or /// if memory has been torn down. - pub fn memory(&self, memory_idx: u32) -> Result { + pub fn memory(&self, memory_idx: u32) -> Result { self.memories .get(memory_idx as usize) .cloned() @@ -589,7 +788,7 @@ impl Store { Some(memory) => { *memory = None; Ok(()) - } + }, } } @@ -606,13 +805,286 @@ impl Store { Some(instance) => { *instance = None; Ok(()) - } + }, } } - fn register_sandbox_instance(&mut self, sandbox_instance: Rc>) -> u32 { + /// Instantiate a guest module and return it's index in the store. + /// + /// The guest module's code is specified in `wasm`. Environment that will be available to + /// guest module is specified in `guest_env`. A dispatch thunk is used as function that + /// handle calls from guests. `state` is an opaque pointer to caller's arbitrary context + /// normally created by `sp_sandbox::Instance` primitive. + /// + /// Note: Due to borrowing constraints dispatch thunk is now propagated using DTH + /// + /// Returns uninitialized sandboxed module instance or an instantiation error. + pub fn instantiate( + &mut self, + wasm: &[u8], + guest_env: GuestEnvironment, + state: u32, + sandbox_context: &mut dyn SandboxContext, + ) -> std::result::Result { + let sandbox_instance = match self.backend_context { + BackendContext::Wasmi => + Self::instantiate_wasmi(wasm, guest_env, state, sandbox_context)?, + + #[cfg(feature = "wasmer-sandbox")] + BackendContext::Wasmer(ref context) => + Self::instantiate_wasmer(&context, wasm, guest_env, state, sandbox_context)?, + }; + + Ok(UnregisteredInstance { sandbox_instance }) + } +} + +// Private routines +impl
Store
{ + fn register_sandbox_instance( + &mut self, + sandbox_instance: Rc, + dispatch_thunk: DT, + ) -> u32 { let instance_idx = self.instances.len(); - self.instances.push(Some(sandbox_instance)); + self.instances.push(Some((sandbox_instance, dispatch_thunk))); instance_idx as u32 } + + fn instantiate_wasmi( + wasm: &[u8], + guest_env: GuestEnvironment, + state: u32, + sandbox_context: &mut dyn SandboxContext, + ) -> std::result::Result, InstantiationError> { + let wasmi_module = + Module::from_buffer(wasm).map_err(|_| InstantiationError::ModuleDecoding)?; + let wasmi_instance = ModuleInstance::new(&wasmi_module, &guest_env.imports) + .map_err(|_| InstantiationError::Instantiation)?; + + let sandbox_instance = Rc::new(SandboxInstance { + // In general, it's not a very good idea to use `.not_started_instance()` for + // anything but for extracting memory and tables. But in this particular case, we + // are extracting for the purpose of running `start` function which should be ok. + backend_instance: BackendInstance::Wasmi(wasmi_instance.not_started_instance().clone()), + guest_to_supervisor_mapping: guest_env.guest_to_supervisor_mapping, + }); + + with_guest_externals(&sandbox_instance, state, |guest_externals| { + SandboxContextStore::using(sandbox_context, || { + wasmi_instance + .run_start(guest_externals) + .map_err(|_| InstantiationError::StartTrapped) + }) + + // Note: no need to run start on wasmtime instance, since it's done + // automatically + })?; + + Ok(sandbox_instance) + } + + #[cfg(feature = "wasmer-sandbox")] + fn instantiate_wasmer( + context: &WasmerBackend, + wasm: &[u8], + guest_env: GuestEnvironment, + state: u32, + sandbox_context: &mut dyn SandboxContext, + ) -> std::result::Result, InstantiationError> { + let module = wasmer::Module::new(&context.store, wasm) + .map_err(|_| InstantiationError::ModuleDecoding)?; + + type Exports = HashMap; + let mut exports_map = Exports::new(); + + for import in module.imports().into_iter() { + match import.ty() { + // Nothing to do here + wasmer::ExternType::Global(_) | wasmer::ExternType::Table(_) => (), + + wasmer::ExternType::Memory(_) => { + let exports = exports_map + .entry(import.module().to_string()) + .or_insert(wasmer::Exports::new()); + + let memory = guest_env + .imports + .memory_by_name(import.module(), import.name()) + .ok_or(InstantiationError::ModuleDecoding)?; + + let mut wasmer_memory_ref = memory.as_wasmer().expect( + "memory is created by wasmer; \ + exported by the same module and backend; \ + thus the operation can't fail; \ + qed", + ); + + // This is safe since we're only instantiating the module and populating + // the export table, so no memory access can happen at this time. + // All subsequent memory accesses should happen through the wrapper, + // that enforces the memory access protocol. + let wasmer_memory = unsafe { wasmer_memory_ref.clone_inner() }; + + exports.insert(import.name(), wasmer::Extern::Memory(wasmer_memory)); + }, + + wasmer::ExternType::Function(func_ty) => { + let guest_func_index = + guest_env.imports.func_by_name(import.module(), import.name()); + + let guest_func_index = if let Some(index) = guest_func_index { + index + } else { + // Missing import (should we abort here?) + continue + }; + + let supervisor_func_index = guest_env + .guest_to_supervisor_mapping + .func_by_guest_index(guest_func_index) + .ok_or(InstantiationError::ModuleDecoding)?; + + let function = Self::wasmer_dispatch_function( + supervisor_func_index, + &context.store, + func_ty, + state, + ); + + let exports = exports_map + .entry(import.module().to_string()) + .or_insert(wasmer::Exports::new()); + + exports.insert(import.name(), wasmer::Extern::Function(function)); + }, + } + } + + let mut import_object = wasmer::ImportObject::new(); + for (module_name, exports) in exports_map.into_iter() { + import_object.register(module_name, exports); + } + + let instance = SandboxContextStore::using(sandbox_context, || { + wasmer::Instance::new(&module, &import_object).map_err(|error| match error { + wasmer::InstantiationError::Link(_) => InstantiationError::Instantiation, + wasmer::InstantiationError::Start(_) => InstantiationError::StartTrapped, + wasmer::InstantiationError::HostEnvInitialization(_) => + InstantiationError::EnvironmentDefinitionCorrupted, + }) + })?; + + Ok(Rc::new(SandboxInstance { + backend_instance: BackendInstance::Wasmer(instance), + guest_to_supervisor_mapping: guest_env.guest_to_supervisor_mapping, + })) + } + + #[cfg(feature = "wasmer-sandbox")] + fn wasmer_dispatch_function( + supervisor_func_index: SupervisorFuncIndex, + store: &wasmer::Store, + func_ty: &wasmer::FunctionType, + state: u32, + ) -> wasmer::Function { + wasmer::Function::new(store, func_ty, move |params| { + SandboxContextStore::with(|sandbox_context| { + use sp_wasm_interface::Value; + + // Serialize arguments into a byte vector. + let invoke_args_data = params + .iter() + .map(|val| match val { + wasmer::Val::I32(val) => Ok(Value::I32(*val)), + wasmer::Val::I64(val) => Ok(Value::I64(*val)), + wasmer::Val::F32(val) => Ok(Value::F32(f32::to_bits(*val))), + wasmer::Val::F64(val) => Ok(Value::F64(f64::to_bits(*val))), + _ => Err(wasmer::RuntimeError::new(format!( + "Unsupported function argument: {:?}", + val + ))), + }) + .collect::, _>>()? + .encode(); + + // Move serialized arguments inside the memory, invoke dispatch thunk and + // then free allocated memory. + let invoke_args_len = invoke_args_data.len() as WordSize; + let invoke_args_ptr = sandbox_context + .supervisor_context() + .allocate_memory(invoke_args_len) + .map_err(|_| { + wasmer::RuntimeError::new( + "Can't allocate memory in supervisor for the arguments", + ) + })?; + + let deallocate = |fe: &mut dyn FunctionContext, ptr, fail_msg| { + fe.deallocate_memory(ptr).map_err(|_| wasmer::RuntimeError::new(fail_msg)) + }; + + if sandbox_context + .supervisor_context() + .write_memory(invoke_args_ptr, &invoke_args_data) + .is_err() + { + deallocate( + sandbox_context.supervisor_context(), + invoke_args_ptr, + "Failed dealloction after failed write of invoke arguments", + )?; + + return Err(wasmer::RuntimeError::new("Can't write invoke args into memory")) + } + + // Perform the actuall call + let serialized_result = sandbox_context + .invoke(invoke_args_ptr, invoke_args_len, state, supervisor_func_index) + .map_err(|e| wasmer::RuntimeError::new(e.to_string()))?; + + // dispatch_thunk returns pointer to serialized arguments. + // Unpack pointer and len of the serialized result data. + let (serialized_result_val_ptr, serialized_result_val_len) = { + // Cast to u64 to use zero-extension. + let v = serialized_result as u64; + let ptr = (v as u64 >> 32) as u32; + let len = (v & 0xFFFFFFFF) as u32; + (Pointer::new(ptr), len) + }; + + let serialized_result_val = sandbox_context + .supervisor_context() + .read_memory(serialized_result_val_ptr, serialized_result_val_len) + .map_err(|_| { + wasmer::RuntimeError::new( + "Can't read the serialized result from dispatch thunk", + ) + }); + + let deserialized_result = deallocate( + sandbox_context.supervisor_context(), + serialized_result_val_ptr, + "Can't deallocate memory for dispatch thunk's result", + ) + .and_then(|_| serialized_result_val) + .and_then(|serialized_result_val| { + deserialize_result(&serialized_result_val) + .map_err(|e| wasmer::RuntimeError::new(e.to_string())) + })?; + + if let Some(value) = deserialized_result { + Ok(vec![match value { + RuntimeValue::I32(val) => wasmer::Val::I32(val), + RuntimeValue::I64(val) => wasmer::Val::I64(val), + RuntimeValue::F32(val) => wasmer::Val::F32(val.into()), + RuntimeValue::F64(val) => wasmer::Val::F64(val.into()), + }]) + } else { + Ok(vec![]) + } + }) + .expect("SandboxContextStore is set when invoking sandboxed functions; qed") + }) + } } diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/util.rs index 92a48e1401814..3ea29540f98ee 100644 --- a/client/executor/common/src/util.rs +++ b/client/executor/common/src/util.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,125 +16,226 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! A set of utilities for resetting a wasm instance to its initial state. +//! Utilities used by all backends -use crate::error::{self, Error}; -use std::mem; -use parity_wasm::elements::{deserialize_buffer, DataSegment, Instruction, Module as RawModule}; +use crate::error::{Error, Result}; +use sp_wasm_interface::Pointer; +use std::ops::Range; -/// A bunch of information collected from a WebAssembly module. -pub struct WasmModuleInfo { - raw_module: RawModule, +/// Construct a range from an offset to a data length after the offset. +/// Returns None if the end of the range would exceed some maximum offset. +pub fn checked_range(offset: usize, len: usize, max: usize) -> Option> { + let end = offset.checked_add(len)?; + if end <= max { + Some(offset..end) + } else { + None + } } -impl WasmModuleInfo { - /// Create `WasmModuleInfo` from the given wasm code. +/// Provides safe memory access interface using an external buffer +pub trait MemoryTransfer { + /// Read data from a slice of memory into a newly allocated buffer. /// - /// Returns `None` if the wasm code cannot be deserialized. - pub fn new(wasm_code: &[u8]) -> Option { - let raw_module: RawModule = deserialize_buffer(wasm_code).ok()?; - Some(Self { raw_module }) - } + /// Returns an error if the read would go out of the memory bounds. + fn read(&self, source_addr: Pointer, size: usize) -> Result>; - /// Extract the data segments from the given wasm code. + /// Read data from a slice of memory into a destination buffer. /// - /// Returns `Err` if the given wasm code cannot be deserialized. - fn data_segments(&self) -> Vec { - self.raw_module - .data_section() - .map(|ds| ds.entries()) - .unwrap_or(&[]) - .to_vec() - } + /// Returns an error if the read would go out of the memory bounds. + fn read_into(&self, source_addr: Pointer, destination: &mut [u8]) -> Result<()>; + + /// Write data to a slice of memory. + /// + /// Returns an error if the write would go out of the memory bounds. + fn write_from(&self, dest_addr: Pointer, source: &[u8]) -> Result<()>; +} - /// The number of globals defined in locally in this module. - pub fn declared_globals_count(&self) -> u32 { - self.raw_module - .global_section() - .map(|gs| gs.entries().len() as u32) - .unwrap_or(0) +/// Safe wrapper over wasmi memory reference +pub mod wasmi { + use super::*; + + /// Wasmi provides direct access to its memory using slices. + /// + /// This wrapper limits the scope where the slice can be taken to + #[derive(Debug, Clone)] + pub struct MemoryWrapper(::wasmi::MemoryRef); + + impl MemoryWrapper { + /// Take ownership of the memory region and return a wrapper object + pub fn new(memory: ::wasmi::MemoryRef) -> Self { + Self(memory) + } + + /// Clone the underlying memory object + /// + /// # Safety + /// + /// The sole purpose of `MemoryRef` is to protect the memory from uncontrolled + /// access. By returning the memory object "as is" we bypass all of the checks. + /// + /// Intended to use only during module initialization. + pub unsafe fn clone_inner(&self) -> ::wasmi::MemoryRef { + self.0.clone() + } } - /// The number of imports of globals. - pub fn imported_globals_count(&self) -> u32 { - self.raw_module - .import_section() - .map(|is| is.globals() as u32) - .unwrap_or(0) + impl super::MemoryTransfer for MemoryWrapper { + fn read(&self, source_addr: Pointer, size: usize) -> Result> { + self.0.with_direct_access(|source| { + let range = checked_range(source_addr.into(), size, source.len()) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + + Ok(Vec::from(&source[range])) + }) + } + + fn read_into(&self, source_addr: Pointer, destination: &mut [u8]) -> Result<()> { + self.0.with_direct_access(|source| { + let range = checked_range(source_addr.into(), destination.len(), source.len()) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + + destination.copy_from_slice(&source[range]); + Ok(()) + }) + } + + fn write_from(&self, dest_addr: Pointer, source: &[u8]) -> Result<()> { + self.0.with_direct_access_mut(|destination| { + let range = checked_range(dest_addr.into(), source.len(), destination.len()) + .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; + + destination[range].copy_from_slice(source); + Ok(()) + }) + } } } -/// This is a snapshot of data segments specialzied for a particular instantiation. -/// -/// Note that this assumes that no mutable globals are used. -#[derive(Clone)] -pub struct DataSegmentsSnapshot { - /// The list of data segments represented by (offset, contents). - data_segments: Vec<(u32, Vec)>, -} +// Routines specific to Wasmer runtime. Since sandbox can be invoked from both +/// wasmi and wasmtime runtime executors, we need to have a way to deal with sanbox +/// backends right from the start. +#[cfg(feature = "wasmer-sandbox")] +pub mod wasmer { + use super::checked_range; + use crate::error::{Error, Result}; + use sp_wasm_interface::Pointer; + use std::{cell::RefCell, convert::TryInto, rc::Rc}; + + /// In order to enforce memory access protocol to the backend memory + /// we wrap it with `RefCell` and encapsulate all memory operations. + #[derive(Debug, Clone)] + pub struct MemoryWrapper { + buffer: Rc>, + } -impl DataSegmentsSnapshot { - /// Create a snapshot from the data segments from the module. - pub fn take(module: &WasmModuleInfo) -> error::Result { - let data_segments = module - .data_segments() - .into_iter() - .map(|mut segment| { - // Just replace contents of the segment since the segments will be discarded later - // anyway. - let contents = mem::replace(segment.value_mut(), vec![]); - - let init_expr = match segment.offset() { - Some(offset) => offset.code(), - // Return if the segment is passive - None => return Err(Error::from("Shared memory is not supported".to_string())), - }; - - // [op, End] - if init_expr.len() != 2 { - return Err(Error::from( - "initializer expression can have only up to 2 expressions in wasm 1.0" - .to_string(), - )); - } - let offset = match &init_expr[0] { - Instruction::I32Const(v) => *v as u32, - Instruction::GetGlobal(_) => { - // In a valid wasm file, initializer expressions can only refer imported - // globals. - // - // At the moment of writing the Substrate Runtime Interface does not provide - // any globals. There is nothing that prevents us from supporting this - // if/when we gain those. - return Err(Error::from( - "Imported globals are not supported yet".to_string(), - )); - } - insn => { - return Err(Error::from(format!( - "{:?} is not supported as initializer expression in wasm 1.0", - insn - ))) - } - }; - - Ok((offset, contents)) - }) - .collect::>>()?; + impl MemoryWrapper { + /// Take ownership of the memory region and return a wrapper object + pub fn new(memory: wasmer::Memory) -> Self { + Self { buffer: Rc::new(RefCell::new(memory)) } + } + + /// Returns linear memory of the wasm instance as a slice. + /// + /// # Safety + /// + /// Wasmer doesn't provide comprehensive documentation about the exact behavior of the data + /// pointer. If a dynamic style heap is used the base pointer of the heap can change. Since + /// growing, we cannot guarantee the lifetime of the returned slice reference. + unsafe fn memory_as_slice(memory: &wasmer::Memory) -> &[u8] { + let ptr = memory.data_ptr() as *const _; + let len: usize = + memory.data_size().try_into().expect("data size should fit into usize"); + + if len == 0 { + &[] + } else { + core::slice::from_raw_parts(ptr, len) + } + } + + /// Returns linear memory of the wasm instance as a slice. + /// + /// # Safety + /// + /// See `[memory_as_slice]`. In addition to those requirements, since a mutable reference is + /// returned it must be ensured that only one mutable and no shared references to memory + /// exists at the same time. + unsafe fn memory_as_slice_mut(memory: &wasmer::Memory) -> &mut [u8] { + let ptr = memory.data_ptr(); + let len: usize = + memory.data_size().try_into().expect("data size should fit into usize"); + + if len == 0 { + &mut [] + } else { + core::slice::from_raw_parts_mut(ptr, len) + } + } - Ok(Self { data_segments }) + /// Clone the underlying memory object + /// + /// # Safety + /// + /// The sole purpose of `MemoryRef` is to protect the memory from uncontrolled + /// access. By returning the memory object "as is" we bypass all of the checks. + /// + /// Intended to use only during module initialization. + /// + /// # Panics + /// + /// Will panic if `MemoryRef` is currently in use. + pub unsafe fn clone_inner(&mut self) -> wasmer::Memory { + // We take exclusive lock to ensure that we're the only one here + self.buffer.borrow_mut().clone() + } } - /// Apply the given snapshot to a linear memory. - /// - /// Linear memory interface is represented by a closure `memory_set`. - pub fn apply( - &self, - mut memory_set: impl FnMut(u32, &[u8]) -> Result<(), E>, - ) -> Result<(), E> { - for (offset, contents) in &self.data_segments { - memory_set(*offset, contents)?; + impl super::MemoryTransfer for MemoryWrapper { + fn read(&self, source_addr: Pointer, size: usize) -> Result> { + let memory = self.buffer.borrow(); + + let data_size = memory.data_size().try_into().expect("data size does not fit"); + + let range = checked_range(source_addr.into(), size, data_size) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + + let mut buffer = vec![0; range.len()]; + self.read_into(source_addr, &mut buffer)?; + + Ok(buffer) + } + + fn read_into(&self, source_addr: Pointer, destination: &mut [u8]) -> Result<()> { + unsafe { + let memory = self.buffer.borrow(); + + // This should be safe since we don't grow up memory while caching this reference + // and we give up the reference before returning from this function. + let source = Self::memory_as_slice(&memory); + + let range = checked_range(source_addr.into(), destination.len(), source.len()) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + + destination.copy_from_slice(&source[range]); + Ok(()) + } + } + + fn write_from(&self, dest_addr: Pointer, source: &[u8]) -> Result<()> { + unsafe { + let memory = self.buffer.borrow_mut(); + + // This should be safe since we don't grow up memory while caching this reference + // and we give up the reference before returning from this function. + let destination = Self::memory_as_slice_mut(&memory); + + let range = checked_range(dest_addr.into(), source.len(), destination.len()) + .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; + + &mut destination[range].copy_from_slice(source); + Ok(()) + } } - Ok(()) } } diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index c407d9967cbf9..eb73909d9234f 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Definitions for a wasm runtime. @@ -91,4 +93,14 @@ pub trait WasmInstance: Send { /// /// This method is only suitable for getting immutable globals. fn get_global_const(&self, name: &str) -> Result, Error>; + + /// **Testing Only**. This function returns the base address of the linear memory. + /// + /// This is meant to be the starting address of the memory mapped area for the linear memory. + /// + /// This function is intended only for a specific test that measures physical memory + /// consumption. + fn linear_memory_base_ptr(&self) -> Option<*const u8> { + None + } } diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index ba23e31febee5..a4fbc88cf5662 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -13,21 +13,19 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-allocator = { version = "2.0.0", default-features = false, path = "../../../primitives/allocator" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-io = { version = "2.0.0", default-features = false, path = "../../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-sandbox = { version = "0.8.0", default-features = false, path = "../../../primitives/sandbox" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-tasks = { version = "2.0.0", default-features = false, path = "../../../primitives/tasks" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/sandbox" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } +sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } [features] default = [ "std" ] std = [ - "sp-allocator/std", "sp-core/std", "sp-io/std", "sp-runtime/std", diff --git a/client/executor/runtime-test/build.rs b/client/executor/runtime-test/build.rs index bc07db900c31e..9456d6bc90f4c 100644 --- a/client/executor/runtime-test/build.rs +++ b/client/executor/runtime-test/build.rs @@ -1,26 +1,27 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { // regular build WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build(); @@ -28,10 +29,9 @@ fn main() { // and building with tracing activated WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .set_file_name("wasm_binary_with_tracing.rs") - .append_to_rust_flags("--cfg feature=\\\"with-tracing\\\"") + .append_to_rust_flags(r#"--cfg feature="with-tracing""#) .build(); } diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 404530c1c3ebf..c9f7d6b1e2970 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -4,25 +4,31 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ - supported with the flag disabled.") + WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only supported with the flag \ + disabled.", + ) } #[cfg(not(feature = "std"))] -use sp_std::{vec::Vec, vec}; +use sp_std::{vec, vec::Vec}; +#[cfg(not(feature = "std"))] +use sp_core::{ed25519, sr25519}; #[cfg(not(feature = "std"))] use sp_io::{ - storage, hashing::{blake2_128, blake2_256, sha2_256, twox_128, twox_256}, - crypto::{ed25519_verify, sr25519_verify}, wasm_tracing, + crypto::{ed25519_verify, sr25519_verify}, + hashing::{blake2_128, blake2_256, sha2_256, twox_128, twox_256}, + storage, wasm_tracing, }; #[cfg(not(feature = "std"))] -use sp_runtime::{print, traits::{BlakeTwo256, Hash}}; -#[cfg(not(feature = "std"))] -use sp_core::{ed25519, sr25519}; +use sp_runtime::{ + print, + traits::{BlakeTwo256, Hash}, +}; #[cfg(not(feature = "std"))] use sp_sandbox::Value; @@ -39,302 +45,356 @@ extern "C" { /// the initialized value at the start of a runtime call. static mut MUTABLE_STATIC: u64 = 32; -sp_core::wasm_export_functions! { - fn test_calling_missing_external() { - unsafe { missing_external() } - } - - fn test_calling_yet_another_missing_external() { - unsafe { yet_another_missing_external() } - } - - fn test_data_in(input: Vec) -> Vec { - print("set_storage"); - storage::set(b"input", &input); - - print("storage"); - let foo = storage::get(b"foo").unwrap(); - - print("set_storage"); - storage::set(b"baz", &foo); - - print("finished!"); - b"all ok!".to_vec() - } - - fn test_clear_prefix(input: Vec) -> Vec { - storage::clear_prefix(&input); - b"all ok!".to_vec() - } - - fn test_empty_return() {} - - fn test_exhaust_heap() -> Vec { Vec::with_capacity(16777216) } - - fn test_panic() { panic!("test panic") } - - fn test_conditional_panic(input: Vec) -> Vec { - if input.len() > 0 { - panic!("test panic") - } - - input - } - - fn test_blake2_256(input: Vec) -> Vec { - blake2_256(&input).to_vec() - } - - fn test_blake2_128(input: Vec) -> Vec { - blake2_128(&input).to_vec() - } - - fn test_sha2_256(input: Vec) -> Vec { - sha2_256(&input).to_vec() - } - - fn test_twox_256(input: Vec) -> Vec { - twox_256(&input).to_vec() - } - - fn test_twox_128(input: Vec) -> Vec { - twox_128(&input).to_vec() - } - - fn test_ed25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) - } - - fn test_sr25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) - } - - fn test_ordered_trie_root() -> Vec { - BlakeTwo256::ordered_trie_root( - vec![ - b"zero"[..].into(), - b"one"[..].into(), - b"two"[..].into(), - ], - ).as_ref().to_vec() - } - - fn test_sandbox(code: Vec) -> bool { - execute_sandboxed(&code, &[]).is_ok() - } - - fn test_sandbox_args(code: Vec) -> bool { - execute_sandboxed( - &code, - &[ - Value::I32(0x12345678), - Value::I64(0x1234567887654321), - ], - ).is_ok() - } - - fn test_sandbox_return_val(code: Vec) -> bool { - let ok = match execute_sandboxed( - &code, - &[ - Value::I32(0x1336), - ] - ) { - Ok(sp_sandbox::ReturnValue::Value(Value::I32(0x1337))) => true, - _ => false, - }; - - ok - } - - fn test_sandbox_instantiate(code: Vec) -> u8 { - let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); - let code = match sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { - Ok(_) => 0, - Err(sp_sandbox::Error::Module) => 1, - Err(sp_sandbox::Error::Execution) => 2, - Err(sp_sandbox::Error::OutOfBounds) => 3, - }; - - code - } - - - fn test_sandbox_get_global_val(code: Vec) -> i64 { - let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); - let instance = if let Ok(i) = sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { - i - } else { - return 20; - }; - - match instance.get_global_val("test_global") { - Some(sp_sandbox::Value::I64(val)) => val, - None => 30, - val => 40, - } - } - - - fn test_offchain_index_set() { - sp_io::offchain_index::set(b"k", b"v"); - } - - - fn test_offchain_local_storage() -> bool { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - sp_io::offchain::local_storage_set(kind, b"test", b"asd"); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); - - let res = sp_io::offchain::local_storage_compare_and_set( - kind, - b"test", - Some(b"asd".to_vec()), - b"", - ); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); - res - } - - fn test_offchain_local_storage_with_none() { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - - let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); - assert_eq!(res, true); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); - } - - fn test_offchain_http() -> bool { - use sp_core::offchain::HttpRequestStatus; - let run = || -> Option<()> { - let id = sp_io::offchain::http_request_start( - "POST", - "http://localhost:12345", - &[], - ).ok()?; - sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; - sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; - sp_io::offchain::http_request_write_body(id, &[], None).ok()?; - let status = sp_io::offchain::http_response_wait(&[id], None); - assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); - let headers = sp_io::offchain::http_response_headers(id); - assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); - let mut buffer = vec![0; 64]; - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 3); - assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 0); - - Some(()) - }; - - run().is_some() - } - - // Just some test to make sure that `sp-allocator` compiles on `no_std`. - fn test_sp_allocator_compiles() { - sp_allocator::FreeingBumpHeapAllocator::new(0); - } - - fn test_enter_span() -> u64 { - wasm_tracing::enter_span(Default::default()) - } - - fn test_exit_span(span_id: u64) { - wasm_tracing::exit(span_id) - } - - fn returns_mutable_static() -> u64 { - unsafe { - MUTABLE_STATIC += 1; - MUTABLE_STATIC - } - } - - fn allocates_huge_stack_array(trap: bool) -> Vec { - // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). - // This will just decrease (stacks in wasm32-u-u grow downwards) the stack - // pointer. This won't trap on the current compilers. - let mut data = [0u8; 1024 * 768]; - - // Then make sure we actually write something to it. - // - // If: - // 1. the stack area is placed at the beginning of the linear memory space, and - // 2. the stack pointer points to out-of-bounds area, and - // 3. a write is performed around the current stack pointer. - // - // then a trap should happen. - // - for (i, v) in data.iter_mut().enumerate() { - *v = i as u8; // deliberate truncation - } - - if trap { - // There is a small chance of this to be pulled up in theory. In practice - // the probability of that is rather low. - panic!() - } - - data.to_vec() - } - - // Check that the heap at `heap_base + offset` don't contains the test message. - // After the check succeeds the test message is written into the heap. - // - // It is expected that the given pointer is not allocated. - fn check_and_set_in_heap(heap_base: u32, offset: u32) { - let test_message = b"Hello invalid heap memory"; - let ptr = unsafe { (heap_base + offset) as *mut u8 }; +#[cfg(not(feature = "std"))] +/// This is similar to `MUTABLE_STATIC`. The tests need `MUTABLE_STATIC` for testing that +/// non-null initialization data is properly restored during instance reusing. +/// +/// `MUTABLE_STATIC_BSS` on the other hand focuses on the zeroed data. This is important since there +/// may be differences in handling zeroed and non-zeroed data. +static mut MUTABLE_STATIC_BSS: u64 = 0; - let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; +sp_core::wasm_export_functions! { + fn test_calling_missing_external() { + unsafe { missing_external() } + } - assert_ne!(test_message, message_slice); - message_slice.copy_from_slice(test_message); - } + fn test_calling_yet_another_missing_external() { + unsafe { yet_another_missing_external() } + } - fn test_spawn() { - let data = vec![1u8, 2u8]; - let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); + fn test_data_in(input: Vec) -> Vec { + print("set_storage"); + storage::set(b"input", &input); - assert_eq!(data_new, vec![2u8, 3u8]); - } + print("storage"); + let foo = storage::get(b"foo").unwrap(); - fn test_nested_spawn() { - let data = vec![7u8, 13u8]; - let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); + print("set_storage"); + storage::set(b"baz", &foo); - assert_eq!(data_new, vec![10u8, 16u8]); - } + print("finished!"); + b"all ok!".to_vec() + } - fn test_panic_in_spawned() { - sp_tasks::spawn(tasks::panicker, vec![]).join(); - } - } + fn test_clear_prefix(input: Vec) -> Vec { + storage::clear_prefix(&input, None); + b"all ok!".to_vec() + } + + fn test_empty_return() {} + + fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { + // This piece of code will dirty multiple pages of memory. The number of pages is given by + // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared + // is a wasm page that that follows the one that holds the `heap_base` address. + // + // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take + // 16 writes to process a single wasm page. + + let mut heap_ptr = heap_base as usize; + + // Find the next wasm page boundary. + let heap_ptr = round_up_to(heap_ptr, 65536); + + // Make it an actual pointer + let heap_ptr = heap_ptr as *mut u8; + + // Traverse the host pages and make each one dirty + let host_pages = heap_pages as usize * 16; + for i in 0..host_pages { + unsafe { + // technically this is an UB, but there is no way Rust can find this out. + heap_ptr.add(i * 4096).write(0); + } + } + + fn round_up_to(n: usize, divisor: usize) -> usize { + (n + divisor - 1) / divisor + } + } + + fn test_exhaust_heap() -> Vec { Vec::with_capacity(16777216) } + + fn test_fp_f32add(a: [u8; 4], b: [u8; 4]) -> [u8; 4] { + let a = f32::from_le_bytes(a); + let b = f32::from_le_bytes(b); + f32::to_le_bytes(a + b) + } + + fn test_panic() { panic!("test panic") } + + fn test_conditional_panic(input: Vec) -> Vec { + if input.len() > 0 { + panic!("test panic") + } + + input + } + + fn test_blake2_256(input: Vec) -> Vec { + blake2_256(&input).to_vec() + } + + fn test_blake2_128(input: Vec) -> Vec { + blake2_128(&input).to_vec() + } + + fn test_sha2_256(input: Vec) -> Vec { + sha2_256(&input).to_vec() + } + + fn test_twox_256(input: Vec) -> Vec { + twox_256(&input).to_vec() + } + + fn test_twox_128(input: Vec) -> Vec { + twox_128(&input).to_vec() + } + + fn test_ed25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) + } + + fn test_sr25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) + } + + fn test_ordered_trie_root() -> Vec { + BlakeTwo256::ordered_trie_root( + vec![ + b"zero"[..].into(), + b"one"[..].into(), + b"two"[..].into(), + ], + ).as_ref().to_vec() + } + + fn test_sandbox(code: Vec) -> bool { + execute_sandboxed(&code, &[]).is_ok() + } + + fn test_sandbox_args(code: Vec) -> bool { + execute_sandboxed( + &code, + &[ + Value::I32(0x12345678), + Value::I64(0x1234567887654321), + ], + ).is_ok() + } + + fn test_sandbox_return_val(code: Vec) -> bool { + let ok = match execute_sandboxed( + &code, + &[ + Value::I32(0x1336), + ] + ) { + Ok(sp_sandbox::ReturnValue::Value(Value::I32(0x1337))) => true, + _ => false, + }; + + ok + } + + fn test_sandbox_instantiate(code: Vec) -> u8 { + let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); + let code = match sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { + Ok(_) => 0, + Err(sp_sandbox::Error::Module) => 1, + Err(sp_sandbox::Error::Execution) => 2, + Err(sp_sandbox::Error::OutOfBounds) => 3, + }; + + code + } + + fn test_sandbox_get_global_val(code: Vec) -> i64 { + let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); + let instance = if let Ok(i) = sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { + i + } else { + return 20; + }; + + match instance.get_global_val("test_global") { + Some(sp_sandbox::Value::I64(val)) => val, + None => 30, + val => 40, + } + } + + fn test_offchain_index_set() { + sp_io::offchain_index::set(b"k", b"v"); + } + + fn test_offchain_local_storage() -> bool { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + sp_io::offchain::local_storage_set(kind, b"test", b"asd"); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); + + let res = sp_io::offchain::local_storage_compare_and_set( + kind, + b"test", + Some(b"asd".to_vec()), + b"", + ); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); + res + } + + fn test_offchain_local_storage_with_none() { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + + let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); + assert_eq!(res, true); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); + } + + fn test_offchain_http() -> bool { + use sp_core::offchain::HttpRequestStatus; + let run = || -> Option<()> { + let id = sp_io::offchain::http_request_start( + "POST", + "http://localhost:12345", + &[], + ).ok()?; + sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; + sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; + sp_io::offchain::http_request_write_body(id, &[], None).ok()?; + let status = sp_io::offchain::http_response_wait(&[id], None); + assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); + let headers = sp_io::offchain::http_response_headers(id); + assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); + let mut buffer = vec![0; 64]; + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 3); + assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 0); + + Some(()) + }; + + run().is_some() + } + + fn test_enter_span() -> u64 { + wasm_tracing::enter_span(Default::default()) + } + + fn test_exit_span(span_id: u64) { + wasm_tracing::exit(span_id) + } + + fn test_nested_spans() { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + wasm_tracing::exit(span_id); + } + wasm_tracing::exit(span_id); + } + + fn returns_mutable_static() -> u64 { + unsafe { + MUTABLE_STATIC += 1; + MUTABLE_STATIC + } + } + + fn returns_mutable_static_bss() -> u64 { + unsafe { + MUTABLE_STATIC_BSS += 1; + MUTABLE_STATIC_BSS + } + } + + fn allocates_huge_stack_array(trap: bool) -> Vec { + // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). + // This will just decrease (stacks in wasm32-u-u grow downwards) the stack + // pointer. This won't trap on the current compilers. + let mut data = [0u8; 1024 * 768]; + + // Then make sure we actually write something to it. + // + // If: + // 1. the stack area is placed at the beginning of the linear memory space, and + // 2. the stack pointer points to out-of-bounds area, and + // 3. a write is performed around the current stack pointer. + // + // then a trap should happen. + // + for (i, v) in data.iter_mut().enumerate() { + *v = i as u8; // deliberate truncation + } + + if trap { + // There is a small chance of this to be pulled up in theory. In practice + // the probability of that is rather low. + panic!() + } + + data.to_vec() + } + + // Check that the heap at `heap_base + offset` don't contains the test message. + // After the check succeeds the test message is written into the heap. + // + // It is expected that the given pointer is not allocated. + fn check_and_set_in_heap(heap_base: u32, offset: u32) { + let test_message = b"Hello invalid heap memory"; + let ptr = unsafe { (heap_base + offset) as *mut u8 }; + + let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; + + assert_ne!(test_message, message_slice); + message_slice.copy_from_slice(test_message); + } + + fn test_spawn() { + let data = vec![1u8, 2u8]; + let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); + + assert_eq!(data_new, vec![2u8, 3u8]); + } + + fn test_nested_spawn() { + let data = vec![7u8, 13u8]; + let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); + + assert_eq!(data_new, vec![10u8, 16u8]); + } + + fn test_panic_in_spawned() { + sp_tasks::spawn(tasks::panicker, vec![]).join(); + } +} - #[cfg(not(feature = "std"))] - mod tasks { +#[cfg(not(feature = "std"))] +mod tasks { use sp_std::prelude::*; pub fn incrementer(data: Vec) -> Vec { - data.into_iter().map(|v| v + 1).collect() + data.into_iter().map(|v| v + 1).collect() } pub fn panicker(_: Vec) -> Vec { @@ -342,11 +402,11 @@ sp_core::wasm_export_functions! { } pub fn parallel_incrementer(data: Vec) -> Vec { - let first = data.into_iter().map(|v| v + 2).collect::>(); - let second = sp_tasks::spawn(incrementer, first).join(); - second + let first = data.into_iter().map(|v| v + 2).collect::>(); + let second = sp_tasks::spawn(incrementer, first).join(); + second } - } +} #[cfg(not(feature = "std"))] fn execute_sandboxed( @@ -362,7 +422,7 @@ fn execute_sandboxed( args: &[Value], ) -> Result { if args.len() != 1 { - return Err(sp_sandbox::HostError); + return Err(sp_sandbox::HostError) } let condition = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; if condition != 0 { @@ -376,7 +436,7 @@ fn execute_sandboxed( args: &[Value], ) -> Result { if args.len() != 1 { - return Err(sp_sandbox::HostError); + return Err(sp_sandbox::HostError) } let inc_by = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; e.counter += inc_by as u32; @@ -391,7 +451,8 @@ fn execute_sandboxed( env_builder.add_host_func("env", "inc_counter", env_inc_counter); let memory = match sp_sandbox::Memory::new(1, Some(16)) { Ok(m) => m, - Err(_) => unreachable!(" + Err(_) => unreachable!( + " Memory::new() can return Err only if parameters are borked; \ We passing params here explicitly and they're correct; \ Memory::new() can't return a Error qed" diff --git a/client/executor/src/integration_tests/linux.rs b/client/executor/src/integration_tests/linux.rs new file mode 100644 index 0000000000000..7e0696973dc77 --- /dev/null +++ b/client/executor/src/integration_tests/linux.rs @@ -0,0 +1,67 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests that are only relevant for Linux. + +// Constrain this only to wasmtime for the time being. Without this rustc will complain on unused +// imports and items. The alternative is to plop `cfg(feature = wasmtime)` everywhere which seems +// borthersome. +#![cfg(feature = "wasmtime")] + +use super::mk_test_runtime; +use crate::WasmExecutionMethod; +use codec::Encode as _; + +mod smaps; + +use self::smaps::Smaps; + +#[test] +fn memory_consumption_compiled() { + // This aims to see if linear memory stays backed by the physical memory after a runtime call. + // + // For that we make a series of runtime calls, probing the RSS for the VMA matching the linear + // memory. After the call we expect RSS to be equal to 0. + + let runtime = mk_test_runtime(WasmExecutionMethod::Compiled, 1024); + + let instance = runtime.new_instance().unwrap(); + let heap_base = instance + .get_global_const("__heap_base") + .expect("`__heap_base` is valid") + .expect("`__heap_base` exists") + .as_i32() + .expect("`__heap_base` is an `i32`"); + + fn probe_rss(instance: &dyn sc_executor_common::wasm_runtime::WasmInstance) -> usize { + let base_addr = instance.linear_memory_base_ptr().unwrap() as usize; + Smaps::new().get_rss(base_addr).expect("failed to get rss") + } + + instance + .call_export("test_dirty_plenty_memory", &(heap_base as u32, 1u32).encode()) + .unwrap(); + let probe_1 = probe_rss(&*instance); + instance + .call_export("test_dirty_plenty_memory", &(heap_base as u32, 1024u32).encode()) + .unwrap(); + let probe_2 = probe_rss(&*instance); + + assert_eq!(probe_1, 0); + assert_eq!(probe_2, 0); +} diff --git a/client/executor/src/integration_tests/linux/smaps.rs b/client/executor/src/integration_tests/linux/smaps.rs new file mode 100644 index 0000000000000..b23a188b93a26 --- /dev/null +++ b/client/executor/src/integration_tests/linux/smaps.rs @@ -0,0 +1,82 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! A tool for extracting information about the memory consumption of the current process from +//! the procfs. + +use std::{collections::BTreeMap, ops::Range}; + +/// An interface to the /proc/self/smaps +/// +/// See docs about [procfs on kernel.org][procfs] +/// +/// [procfs]: https://www.kernel.org/doc/html/latest/filesystems/proc.html +pub struct Smaps(Vec<(Range, BTreeMap)>); + +impl Smaps { + pub fn new() -> Self { + let regex_start = regex::RegexBuilder::new("^([0-9a-f]+)-([0-9a-f]+)") + .multi_line(true) + .build() + .unwrap(); + let regex_kv = regex::RegexBuilder::new(r#"^([^:]+):\s*(\d+) kB"#) + .multi_line(true) + .build() + .unwrap(); + let smaps = std::fs::read_to_string("/proc/self/smaps").unwrap(); + let boundaries: Vec<_> = regex_start + .find_iter(&smaps) + .map(|matched| matched.start()) + .chain(std::iter::once(smaps.len())) + .collect(); + + let mut output = Vec::new(); + for window in boundaries.windows(2) { + let chunk = &smaps[window[0]..window[1]]; + let caps = regex_start.captures(chunk).unwrap(); + let start = usize::from_str_radix(caps.get(1).unwrap().as_str(), 16).unwrap(); + let end = usize::from_str_radix(caps.get(2).unwrap().as_str(), 16).unwrap(); + + let values = regex_kv + .captures_iter(chunk) + .map(|cap| { + let key = cap.get(1).unwrap().as_str().to_owned(); + let value = cap.get(2).unwrap().as_str().parse().unwrap(); + (key, value) + }) + .collect(); + + output.push((start..end, values)); + } + + Self(output) + } + + fn get_map(&self, addr: usize) -> &BTreeMap { + &self + .0 + .iter() + .find(|(range, _)| addr >= range.start && addr < range.end) + .unwrap() + .1 + } + + pub fn get_rss(&self, addr: usize) -> Option { + self.get_map(addr).get("Rss").cloned() + } +} diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index c8b763a6b1936..dabead4799dc8 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -15,21 +15,27 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . + +#[cfg(target_os = "linux")] +mod linux; mod sandbox; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use hex_literal::hex; +use sc_executor_common::{runtime_blob::RuntimeBlob, wasm_runtime::WasmModule}; +use sc_runtime_test::wasm_binary_unwrap; use sp_core::{ - blake2_128, blake2_256, ed25519, sr25519, map, Pair, - offchain::{OffchainExt, testing}, - traits::{Externalities, CallInWasm}, + blake2_128, blake2_256, ed25519, map, + offchain::{testing, OffchainDbExt, OffchainWorkerExt}, + sr25519, + traits::Externalities, + Pair, }; -use sc_runtime_test::wasm_binary_unwrap; +use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; -use test_case::test_case; -use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{trie_types::Layout, TrieConfiguration}; use sp_wasm_interface::HostFunctions as _; -use sp_runtime::traits::BlakeTwo256; +use std::sync::Arc; use tracing_subscriber::layer::SubscriberExt; use crate::WasmExecutionMethod; @@ -37,6 +43,34 @@ use crate::WasmExecutionMethod; pub type TestExternalities = CoreTestExternalities; type HostFunctions = sp_io::SubstrateHostFunctions; +/// Simple macro that runs a given method as test with the available wasm execution methods. +#[macro_export] +macro_rules! test_wasm_execution { + ($method_name:ident) => { + paste::item! { + #[test] + fn [<$method_name _interpreted>]() { + $method_name(WasmExecutionMethod::Interpreted); + } + + #[test] + #[cfg(feature = "wasmtime")] + fn [<$method_name _compiled>]() { + $method_name(WasmExecutionMethod::Compiled); + } + } + }; + + (interpreted_only $method_name:ident) => { + paste::item! { + #[test] + fn [<$method_name _interpreted>]() { + $method_name(WasmExecutionMethod::Interpreted); + } + } + }; +} + fn call_in_wasm( function: &str, call_data: &[u8], @@ -48,34 +82,27 @@ fn call_in_wasm( Some(1024), HostFunctions::host_functions(), 8, - ); - executor.call_in_wasm( - &wasm_binary_unwrap()[..], None, + ); + executor.uncached_call( + RuntimeBlob::uncompress_if_needed(&wasm_binary_unwrap()[..]).unwrap(), + ext, + true, function, call_data, - ext, - sp_core::traits::MissingHostFunctions::Allow, ) } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(returning_should_work); fn returning_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let output = call_in_wasm( - "test_empty_return", - &[], - wasm_method, - &mut ext, - ).unwrap(); + let output = call_in_wasm("test_empty_return", &[], wasm_method, &mut ext).unwrap(); assert_eq!(output, vec![0u8; 0]); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(call_not_existing_function); fn call_not_existing_function(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -102,8 +129,7 @@ fn call_not_existing_function(wasm_method: WasmExecutionMethod) { } } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(call_yet_another_not_existing_function); fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -130,39 +156,22 @@ fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { } } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(panicking_should_work); fn panicking_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let output = call_in_wasm( - "test_panic", - &[], - wasm_method, - &mut ext, - ); + let output = call_in_wasm("test_panic", &[], wasm_method, &mut ext); assert!(output.is_err()); - let output = call_in_wasm( - "test_conditional_panic", - &[0], - wasm_method, - &mut ext, - ); + let output = call_in_wasm("test_conditional_panic", &[0], wasm_method, &mut ext); assert_eq!(Decode::decode(&mut &output.unwrap()[..]), Ok(Vec::::new())); - let output = call_in_wasm( - "test_conditional_panic", - &vec![2].encode(), - wasm_method, - &mut ext, - ); + let output = call_in_wasm("test_conditional_panic", &vec![2].encode(), wasm_method, &mut ext); assert!(output.is_err()); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(storage_should_work); fn storage_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); @@ -170,12 +179,9 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); - let output = call_in_wasm( - "test_data_in", - &b"Hello world".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); + let output = + call_in_wasm("test_data_in", &b"Hello world".to_vec().encode(), wasm_method, &mut ext) + .unwrap(); assert_eq!(output, b"all ok!".to_vec().encode()); } @@ -191,8 +197,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(ext, expected); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(clear_prefix_should_work); fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); { @@ -204,12 +209,9 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { ext.set_storage(b"bbb".to_vec(), b"5".to_vec()); // This will clear all entries which prefix is "ab". - let output = call_in_wasm( - "test_clear_prefix", - &b"ab".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); + let output = + call_in_wasm("test_clear_prefix", &b"ab".to_vec().encode(), wasm_method, &mut ext) + .unwrap(); assert_eq!(output, b"all ok!".to_vec().encode()); } @@ -225,143 +227,90 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(expected, ext); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(blake2_256_should_work); fn blake2_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_blake2_256", - &[0], - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_blake2_256", &[0], wasm_method, &mut ext,).unwrap(), blake2_256(&b""[..]).to_vec().encode(), ); assert_eq!( - call_in_wasm( - "test_blake2_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_blake2_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), blake2_256(&b"Hello world!"[..]).to_vec().encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(blake2_128_should_work); fn blake2_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_blake2_128", - &[0], - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_blake2_128", &[0], wasm_method, &mut ext,).unwrap(), blake2_128(&b""[..]).to_vec().encode(), ); assert_eq!( - call_in_wasm( - "test_blake2_128", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_blake2_128", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), blake2_128(&b"Hello world!"[..]).to_vec().encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(sha2_256_should_work); fn sha2_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_sha2_256", - &[0], - wasm_method, - &mut ext, - ) - .unwrap(), + call_in_wasm("test_sha2_256", &[0], wasm_method, &mut ext,).unwrap(), hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") .to_vec() .encode(), ); assert_eq!( - call_in_wasm( - "test_sha2_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ) - .unwrap(), + call_in_wasm("test_sha2_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), hex!("c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a") .to_vec() .encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(twox_256_should_work); fn twox_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_twox_256", - &[0], - wasm_method, - &mut ext, - ).unwrap(), - hex!( - "99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a" - ).to_vec().encode(), + call_in_wasm("test_twox_256", &[0], wasm_method, &mut ext,).unwrap(), + hex!("99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a") + .to_vec() + .encode(), ); assert_eq!( - call_in_wasm( - "test_twox_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), - hex!( - "b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74" - ).to_vec().encode(), + call_in_wasm("test_twox_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), + hex!("b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74") + .to_vec() + .encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(twox_128_should_work); fn twox_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_twox_128", - &[0], - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_twox_128", &[0], wasm_method, &mut ext,).unwrap(), hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), ); assert_eq!( - call_in_wasm( - "test_twox_128", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_twox_128", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), hex!("b27dfd7f223f177f2a13647b533599af").to_vec().encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(ed25519_verify_should_work); fn ed25519_verify_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -372,12 +321,7 @@ fn ed25519_verify_should_work(wasm_method: WasmExecutionMethod) { calldata.extend_from_slice(sig.as_ref()); assert_eq!( - call_in_wasm( - "test_ed25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_ed25519_verify", &calldata.encode(), wasm_method, &mut ext,).unwrap(), true.encode(), ); @@ -387,18 +331,12 @@ fn ed25519_verify_should_work(wasm_method: WasmExecutionMethod) { calldata.extend_from_slice(other_sig.as_ref()); assert_eq!( - call_in_wasm( - "test_ed25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_ed25519_verify", &calldata.encode(), wasm_method, &mut ext,).unwrap(), false.encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(sr25519_verify_should_work); fn sr25519_verify_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -409,12 +347,7 @@ fn sr25519_verify_should_work(wasm_method: WasmExecutionMethod) { calldata.extend_from_slice(sig.as_ref()); assert_eq!( - call_in_wasm( - "test_sr25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sr25519_verify", &calldata.encode(), wasm_method, &mut ext,).unwrap(), true.encode(), ); @@ -424,135 +357,115 @@ fn sr25519_verify_should_work(wasm_method: WasmExecutionMethod) { calldata.extend_from_slice(other_sig.as_ref()); assert_eq!( - call_in_wasm( - "test_sr25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sr25519_verify", &calldata.encode(), wasm_method, &mut ext,).unwrap(), false.encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(ordered_trie_root_should_work); fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; assert_eq!( - call_in_wasm( - "test_ordered_trie_root", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), + call_in_wasm("test_ordered_trie_root", &[0], wasm_method, &mut ext.ext(),).unwrap(), Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(offchain_index); fn offchain_index(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let (offchain, _state) = testing::TestOffchainExt::new(); - ext.register_extension(OffchainExt::new(offchain)); - call_in_wasm( - "test_offchain_index_set", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(); - - use sp_core::offchain::storage::OffchainOverlayedChange; - assert_eq!( - ext.ext() - .get_offchain_storage_changes() - .get(sp_core::offchain::STORAGE_PREFIX, b"k"), - Some(OffchainOverlayedChange::SetValue(b"v".to_vec())) - ); + ext.register_extension(OffchainWorkerExt::new(offchain)); + call_in_wasm("test_offchain_index_set", &[0], wasm_method, &mut ext.ext()).unwrap(); + + use sp_core::offchain::OffchainOverlayedChange; + let data = ext + .overlayed_changes() + .clone() + .offchain_drain_committed() + .find(|(k, _v)| k == &(sp_core::offchain::STORAGE_PREFIX.to_vec(), b"k".to_vec())); + assert_eq!(data.map(|data| data.1), Some(OffchainOverlayedChange::SetValue(b"v".to_vec()))); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(offchain_local_storage_should_work); fn offchain_local_storage_should_work(wasm_method: WasmExecutionMethod) { - use sp_core::offchain::OffchainStorage; - let mut ext = TestExternalities::default(); let (offchain, state) = testing::TestOffchainExt::new(); - ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); assert_eq!( - call_in_wasm( - "test_offchain_local_storage", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), + call_in_wasm("test_offchain_local_storage", &[0], wasm_method, &mut ext.ext(),).unwrap(), true.encode(), ); - assert_eq!(state.read().persistent_storage.get(b"", b"test"), Some(vec![])); + assert_eq!(state.read().persistent_storage.get(b"test"), Some(vec![])); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(offchain_http_should_work); fn offchain_http_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let (offchain, state) = testing::TestOffchainExt::new(); - ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(OffchainWorkerExt::new(offchain)); state.write().expect_request(testing::PendingRequest { - method: "POST".into(), - uri: "http://localhost:12345".into(), - body: vec![1, 2, 3, 4], - headers: vec![("X-Auth".to_owned(), "test".to_owned())], - sent: true, - response: Some(vec![1, 2, 3]), - response_headers: vec![("X-Auth".to_owned(), "hello".to_owned())], - ..Default::default() - }, - ); + method: "POST".into(), + uri: "http://localhost:12345".into(), + body: vec![1, 2, 3, 4], + headers: vec![("X-Auth".to_owned(), "test".to_owned())], + sent: true, + response: Some(vec![1, 2, 3]), + response_headers: vec![("X-Auth".to_owned(), "hello".to_owned())], + ..Default::default() + }); assert_eq!( - call_in_wasm( - "test_offchain_http", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), + call_in_wasm("test_offchain_http", &[0], wasm_method, &mut ext.ext(),).unwrap(), true.encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] -#[should_panic(expected = "Allocator ran out of space")] +test_wasm_execution!(should_trap_when_heap_exhausted); fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let executor = crate::WasmExecutor::new( wasm_method, - Some(17), // `17` is the initial number of pages compiled into the binary. + Some(17), // `17` is the initial number of pages compiled into the binary. HostFunctions::host_functions(), 8, - ); - executor.call_in_wasm( - &wasm_binary_unwrap()[..], None, - "test_exhaust_heap", - &[0], - &mut ext.ext(), - sp_core::traits::MissingHostFunctions::Allow, - ).unwrap(); + ); + + let err = executor + .uncached_call( + RuntimeBlob::uncompress_if_needed(&wasm_binary_unwrap()[..]).unwrap(), + &mut ext.ext(), + true, + "test_exhaust_heap", + &[0], + ) + .unwrap_err(); + + assert!(err.contains("Allocator ran out of space")); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] -fn returns_mutable_static(wasm_method: WasmExecutionMethod) { - let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( +fn mk_test_runtime(wasm_method: WasmExecutionMethod, pages: u64) -> Arc { + let blob = RuntimeBlob::uncompress_if_needed(&wasm_binary_unwrap()[..]) + .expect("failed to create a runtime blob out of test runtime"); + + crate::wasm_runtime::create_wasm_runtime_with_code( wasm_method, - 1024, - &wasm_binary_unwrap()[..], + pages, + blob, HostFunctions::host_functions(), true, - ).expect("Creates runtime"); + None, + ) + .expect("failed to instantiate wasm runtime") +} + +test_wasm_execution!(returns_mutable_static); +fn returns_mutable_static(wasm_method: WasmExecutionMethod) { + let runtime = mk_test_runtime(wasm_method, 1024); let instance = runtime.new_instance().unwrap(); let res = instance.call_export("returns_mutable_static", &[0]).unwrap(); @@ -565,12 +478,26 @@ fn returns_mutable_static(wasm_method: WasmExecutionMethod) { assert_eq!(33, u64::decode(&mut &res[..]).unwrap()); } +test_wasm_execution!(returns_mutable_static_bss); +fn returns_mutable_static_bss(wasm_method: WasmExecutionMethod) { + let runtime = mk_test_runtime(wasm_method, 1024); + + let instance = runtime.new_instance().unwrap(); + let res = instance.call_export("returns_mutable_static_bss", &[0]).unwrap(); + assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); + + // We expect that every invocation will need to return the initial + // value plus one. If the value increases more than that then it is + // a sign that the wasm runtime preserves the memory content. + let res = instance.call_export("returns_mutable_static_bss", &[0]).unwrap(); + assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); +} + // If we didn't restore the wasm instance properly, on a trap the stack pointer would not be // returned to its initial value and thus the stack space is going to be leaked. // // See https://github.com/paritytech/substrate/issues/2967 for details -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(restoration_of_globals); fn restoration_of_globals(wasm_method: WasmExecutionMethod) { // Allocate 32 pages (of 65536 bytes) which gives the runtime 2048KB of heap to operate on // (plus some additional space unused from the initial pages requested by the wasm runtime @@ -580,13 +507,7 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { // to our allocator algorithm there are inefficiencies. const REQUIRED_MEMORY_PAGES: u64 = 32; - let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( - wasm_method, - REQUIRED_MEMORY_PAGES, - &wasm_binary_unwrap()[..], - HostFunctions::host_functions(), - true, - ).expect("Creates runtime"); + let runtime = mk_test_runtime(wasm_method, REQUIRED_MEMORY_PAGES); let instance = runtime.new_instance().unwrap(); // On the first invocation we allocate approx. 768KB (75%) of stack and then trap. @@ -598,18 +519,13 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { assert!(res.is_ok()); } -#[test_case(WasmExecutionMethod::Interpreted)] +test_wasm_execution!(interpreted_only heap_is_reset_between_calls); fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { - let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( - wasm_method, - 1024, - &wasm_binary_unwrap()[..], - HostFunctions::host_functions(), - true, - ).expect("Creates runtime"); + let runtime = mk_test_runtime(wasm_method, 1024); let instance = runtime.new_instance().unwrap(); - let heap_base = instance.get_global_const("__heap_base") + let heap_base = instance + .get_global_const("__heap_base") .expect("`__heap_base` is valid") .expect("`__heap_base` exists") .as_i32() @@ -622,48 +538,46 @@ fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { instance.call_export("check_and_set_in_heap", ¶ms).unwrap(); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(parallel_execution); fn parallel_execution(wasm_method: WasmExecutionMethod) { let executor = std::sync::Arc::new(crate::WasmExecutor::new( wasm_method, Some(1024), HostFunctions::host_functions(), 8, + None, )); - let code_hash = blake2_256(wasm_binary_unwrap()).to_vec(); - let threads: Vec<_> = (0..8).map(|_| - { + let threads: Vec<_> = (0..8) + .map(|_| { let executor = executor.clone(); - let code_hash = code_hash.clone(); std::thread::spawn(move || { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - executor.call_in_wasm( - &wasm_binary_unwrap()[..], - Some(code_hash.clone()), - "test_twox_128", - &[0], - &mut ext, - sp_core::traits::MissingHostFunctions::Allow, - ).unwrap(), + executor + .uncached_call( + RuntimeBlob::uncompress_if_needed(&wasm_binary_unwrap()[..]).unwrap(), + &mut ext, + true, + "test_twox_128", + &[0], + ) + .unwrap(), hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), ); }) - }).collect(); + }) + .collect(); for t in threads.into_iter() { t.join().unwrap(); } } -#[test_case(WasmExecutionMethod::Interpreted)] +test_wasm_execution!(wasm_tracing_should_work); fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { - - use std::sync::{Arc, Mutex}; - use sc_tracing::{SpanDatum, TraceEvent}; + use std::sync::Mutex; struct TestTraceHandler(Arc>>); @@ -679,36 +593,23 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { let handler = TestTraceHandler(traces.clone()); // Create subscriber with wasm_tracing disabled - let test_subscriber = tracing_subscriber::fmt().finish().with( - sc_tracing::ProfilingLayer::new_with_handler( - Box::new(handler), "default" - ) - ); + let test_subscriber = tracing_subscriber::fmt() + .finish() + .with(sc_tracing::ProfilingLayer::new_with_handler(Box::new(handler), "default")); let _guard = tracing::subscriber::set_default(test_subscriber); let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let span_id = call_in_wasm( - "test_enter_span", - Default::default(), - wasm_method, - &mut ext, - ).unwrap(); + let span_id = + call_in_wasm("test_enter_span", Default::default(), wasm_method, &mut ext).unwrap(); let span_id = u64::decode(&mut &span_id[..]).unwrap(); - assert!( - span_id > 0 - ); + assert!(span_id > 0); - call_in_wasm( - "test_exit_span", - &span_id.encode(), - wasm_method, - &mut ext, - ).unwrap(); + call_in_wasm("test_exit_span", &span_id.encode(), wasm_method, &mut ext).unwrap(); // Check there is only the single trace let len = traces.lock().unwrap().len(); @@ -719,52 +620,35 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(span_datum.target, "default"); assert_eq!(span_datum.name, ""); assert_eq!(values.bool_values.get("wasm").unwrap(), &true); + + call_in_wasm("test_nested_spans", Default::default(), wasm_method, &mut ext).unwrap(); + let len = traces.lock().unwrap().len(); + assert_eq!(len, 2); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(spawning_runtime_instance_should_work); fn spawning_runtime_instance_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - call_in_wasm( - "test_spawn", - &[], - wasm_method, - &mut ext, - ).unwrap(); + call_in_wasm("test_spawn", &[], wasm_method, &mut ext).unwrap(); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(spawning_runtime_instance_nested_should_work); fn spawning_runtime_instance_nested_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - call_in_wasm( - "test_nested_spawn", - &[], - wasm_method, - &mut ext, - ).unwrap(); + call_in_wasm("test_nested_spawn", &[], wasm_method, &mut ext).unwrap(); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(panic_in_spawned_instance_panics_on_joining_its_result); fn panic_in_spawned_instance_panics_on_joining_its_result(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let error_result = call_in_wasm( - "test_panic_in_spawned", - &[], - wasm_method, - &mut ext, - ).unwrap_err(); + let error_result = + call_in_wasm("test_panic_in_spawned", &[], wasm_method, &mut ext).unwrap_err(); - dbg!(&error_result); assert!(format!("{}", error_result).contains("Spawned task")); } diff --git a/client/executor/src/integration_tests/sandbox.rs b/client/executor/src/integration_tests/sandbox.rs index 447e395c2fb08..aacd493297cc8 100644 --- a/client/executor/src/integration_tests/sandbox.rs +++ b/client/executor/src/integration_tests/sandbox.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,19 +16,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::{TestExternalities, call_in_wasm}; -use crate::WasmExecutionMethod; +use super::{call_in_wasm, TestExternalities}; +use crate::{test_wasm_execution, WasmExecutionMethod}; use codec::Encode; -use test_case::test_case; -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(sandbox_should_work); fn sandbox_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -47,26 +46,21 @@ fn sandbox_should_work(wasm_method: WasmExecutionMethod) { call $assert ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); + assert_eq!(call_in_wasm("test_sandbox", &code, wasm_method, &mut ext).unwrap(), true.encode()); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(sandbox_trap); fn sandbox_trap(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) (func (export "call") @@ -74,26 +68,20 @@ fn sandbox_trap(wasm_method: WasmExecutionMethod) { call $assert ) ) - "#).unwrap(); + "#, + ) + .unwrap(); - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - ).unwrap(), - vec![0], - ); + assert_eq!(call_in_wasm("test_sandbox", &code, wasm_method, &mut ext).unwrap(), vec![0]); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(start_called); fn start_called(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -118,26 +106,21 @@ fn start_called(wasm_method: WasmExecutionMethod) { call $assert ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); + assert_eq!(call_in_wasm("test_sandbox", &code, wasm_method, &mut ext).unwrap(), true.encode()); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(invoke_args); fn invoke_args(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) @@ -158,26 +141,24 @@ fn invoke_args(wasm_method: WasmExecutionMethod) { ) ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_args", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_args", &code, wasm_method, &mut ext,).unwrap(), true.encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(return_val); fn return_val(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (func (export "call") (param $x i32) (result i32) (i32.add @@ -186,47 +167,42 @@ fn return_val(wasm_method: WasmExecutionMethod) { ) ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_return_val", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_return_val", &code, wasm_method, &mut ext,).unwrap(), true.encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(unlinkable_module); fn unlinkable_module(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "non-existent" (func)) (func (export "call") ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), 1u8.encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(corrupted_module); fn corrupted_module(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -235,23 +211,18 @@ fn corrupted_module(wasm_method: WasmExecutionMethod) { let code = vec![0u8, 0, 0, 0, 1, 0, 0, 0].encode(); assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), 1u8.encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(start_fn_ok); fn start_fn_ok(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (func (export "call") ) @@ -261,26 +232,24 @@ fn start_fn_ok(wasm_method: WasmExecutionMethod) { (start $start) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), 0u8.encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(start_fn_traps); fn start_fn_traps(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (func (export "call") ) @@ -291,38 +260,34 @@ fn start_fn_traps(wasm_method: WasmExecutionMethod) { (start $start) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), 2u8.encode(), ); } -#[test_case(WasmExecutionMethod::Interpreted)] -#[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] +test_wasm_execution!(get_global_val_works); fn get_global_val_works(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (global (export "test_global") i64 (i64.const 500)) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_get_global_val", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_get_global_val", &code, wasm_method, &mut ext,).unwrap(), 500i64.encode(), ); } diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index 56a81b24b4076..041db87bc82ab 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -29,31 +29,30 @@ //! wasm engine used, instance cache. #![warn(missing_docs)] -#![recursion_limit="128"] +#![recursion_limit = "128"] #[macro_use] mod native_executor; -mod wasm_runtime; #[cfg(test)] mod integration_tests; +mod wasm_runtime; -pub use wasmi; -pub use native_executor::{with_externalities_safe, NativeExecutor, WasmExecutor, NativeExecutionDispatch}; -pub use sp_version::{RuntimeVersion, NativeVersion}; pub use codec::Codec; +pub use native_executor::{ + with_externalities_safe, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor, +}; #[doc(hidden)] -pub use sp_core::traits::{Externalities, CallInWasm}; +pub use sp_core::traits::Externalities; +pub use sp_version::{NativeVersion, RuntimeVersion}; #[doc(hidden)] pub use sp_wasm_interface; -pub use wasm_runtime::WasmExecutionMethod; +pub use wasm_runtime::{read_embedded_version, WasmExecutionMethod}; +pub use wasmi; pub use sc_executor_common::{error, sandbox}; -/// Provides runtime information. -pub trait RuntimeInfo { - /// Native runtime information. - fn native_version(&self) -> &NativeVersion; - +/// Extracts the runtime version of a given runtime code. +pub trait RuntimeVersionOf { /// Extract [`RuntimeVersion`](sp_version::RuntimeVersion) of the given `runtime_code`. fn runtime_version( &self, @@ -65,10 +64,10 @@ pub trait RuntimeInfo { #[cfg(test)] mod tests { use super::*; + use sc_executor_common::runtime_blob::RuntimeBlob; use sc_runtime_test::wasm_binary_unwrap; use sp_io::TestExternalities; use sp_wasm_interface::HostFunctions; - use sp_core::traits::CallInWasm; #[test] fn call_in_interpreted_wasm_works() { @@ -80,15 +79,17 @@ mod tests { Some(8), sp_io::SubstrateHostFunctions::host_functions(), 8, - ); - let res = executor.call_in_wasm( - &wasm_binary_unwrap()[..], None, - "test_empty_return", - &[], - &mut ext, - sp_core::traits::MissingHostFunctions::Allow, - ).unwrap(); + ); + let res = executor + .uncached_call( + RuntimeBlob::uncompress_if_needed(&wasm_binary_unwrap()[..]).unwrap(), + &mut ext, + true, + "test_empty_return", + &[], + ) + .unwrap(); assert_eq!(res, vec![0u8; 0]); } } diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 1da82313a2df9..38dba55b5f87c 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,58 +17,61 @@ // along with this program. If not, see . use crate::{ - RuntimeInfo, error::{Error, Result}, + error::{Error, Result}, wasm_runtime::{RuntimeCache, WasmExecutionMethod}, + RuntimeVersionOf, }; use std::{ collections::HashMap, - panic::{UnwindSafe, AssertUnwindSafe}, + panic::{AssertUnwindSafe, UnwindSafe}, + path::PathBuf, result, - sync::{Arc, atomic::{AtomicU64, Ordering}, mpsc}, + sync::{ + atomic::{AtomicU64, Ordering}, + mpsc, Arc, + }, }; -use sp_version::{NativeVersion, RuntimeVersion}; use codec::{Decode, Encode}; +use log::trace; +use sc_executor_common::{ + runtime_blob::RuntimeBlob, + wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, +}; use sp_core::{ + traits::{CodeExecutor, Externalities, RuntimeCode, RuntimeSpawn, RuntimeSpawnExt}, NativeOrEncoded, - traits::{ - CodeExecutor, Externalities, RuntimeCode, MissingHostFunctions, - RuntimeSpawnExt, RuntimeSpawn, - }, }; -use log::trace; -use sp_wasm_interface::{HostFunctions, Function}; -use sc_executor_common::wasm_runtime::{WasmInstance, WasmModule, InvokeMethod}; use sp_externalities::ExternalitiesExt as _; use sp_tasks::new_async_externalities; +use sp_version::{GetNativeVersion, NativeVersion, RuntimeVersion}; +use sp_wasm_interface::{Function, HostFunctions}; /// Default num of pages for the heap -const DEFAULT_HEAP_PAGES: u64 = 1024; +const DEFAULT_HEAP_PAGES: u64 = 2048; /// Set up the externalities and safe calling environment to execute runtime calls. /// /// If the inner closure panics, it will be caught and return an error. pub fn with_externalities_safe(ext: &mut dyn Externalities, f: F) -> Result - where F: UnwindSafe + FnOnce() -> U +where + F: UnwindSafe + FnOnce() -> U, { - sp_externalities::set_and_run_with_externalities( - ext, - move || { - // Substrate uses custom panic hook that terminates process on panic. Disable - // termination for the native call. - let _guard = sp_panic_handler::AbortGuard::force_unwind(); - std::panic::catch_unwind(f).map_err(|e| { - if let Some(err) = e.downcast_ref::() { - Error::RuntimePanicked(err.clone()) - } else if let Some(err) = e.downcast_ref::<&'static str>() { - Error::RuntimePanicked(err.to_string()) - } else { - Error::RuntimePanicked("Unknown panic".into()) - } - }) - }, - ) + sp_externalities::set_and_run_with_externalities(ext, move || { + // Substrate uses custom panic hook that terminates process on panic. Disable + // termination for the native call. + let _guard = sp_panic_handler::AbortGuard::force_unwind(); + std::panic::catch_unwind(f).map_err(|e| { + if let Some(err) = e.downcast_ref::() { + Error::RuntimePanicked(err.clone()) + } else if let Some(err) = e.downcast_ref::<&'static str>() { + Error::RuntimePanicked(err.to_string()) + } else { + Error::RuntimePanicked("Unknown panic".into()) + } + }) + }) } /// Delegate for dispatching a CodeExecutor call. @@ -80,9 +83,7 @@ pub trait NativeExecutionDispatch: Send + Sync { type ExtendHostFunctions: HostFunctions; /// Dispatch a method in the runtime. - /// - /// If the method with the specified name doesn't exist then `Err` is returned. - fn dispatch(ext: &mut dyn Externalities, method: &str, data: &[u8]) -> Result>; + fn dispatch(method: &str, data: &[u8]) -> Option>; /// Provide native runtime version. fn native_version() -> NativeVersion; @@ -102,6 +103,9 @@ pub struct WasmExecutor { cache: Arc, /// The size of the instances cache. max_runtime_instances: usize, + /// The path to a directory which the executor can leverage for a file cache, e.g. put there + /// compiled artifacts. + cache_path: Option, } impl WasmExecutor { @@ -112,19 +116,30 @@ impl WasmExecutor { /// `method` - Method used to execute Wasm code. /// /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. - /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + /// + /// `host_functions` - The set of host functions to be available for import provided by this + /// executor. + /// + /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. + /// + /// `cache_path` - A path to a directory where the executor can place its files for purposes of + /// caching. This may be important in cases when there are many different modules with the + /// compiled execution method is used. pub fn new( method: WasmExecutionMethod, default_heap_pages: Option, host_functions: Vec<&'static dyn Function>, max_runtime_instances: usize, + cache_path: Option, ) -> Self { WasmExecutor { method, default_heap_pages: default_heap_pages.unwrap_or(DEFAULT_HEAP_PAGES), host_functions: Arc::new(host_functions), - cache: Arc::new(RuntimeCache::new(max_runtime_instances)), + cache: Arc::new(RuntimeCache::new(max_runtime_instances, cache_path.clone())), max_runtime_instances, + cache_path, } } @@ -148,7 +163,8 @@ impl WasmExecutor { allow_missing_host_functions: bool, f: F, ) -> Result - where F: FnOnce( + where + F: FnOnce( AssertUnwindSafe<&Arc>, AssertUnwindSafe<&dyn WasmInstance>, Option<&RuntimeVersion>, @@ -167,75 +183,135 @@ impl WasmExecutor { let instance = AssertUnwindSafe(instance); let ext = AssertUnwindSafe(ext); f(module, instance, version, ext) - } + }, )? { Ok(r) => r, Err(e) => Err(e), } } + + /// Perform a call into the given runtime. + /// + /// The runtime is passed as a [`RuntimeBlob`]. The runtime will be isntantiated with the + /// parameters this `WasmExecutor` was initialized with. + /// + /// In case of problems with during creation of the runtime or instantation, a `Err` is + /// returned. that describes the message. + #[doc(hidden)] // We use this function for tests across multiple crates. + pub fn uncached_call( + &self, + runtime_blob: RuntimeBlob, + ext: &mut dyn Externalities, + allow_missing_host_functions: bool, + export_name: &str, + call_data: &[u8], + ) -> std::result::Result, String> { + let module = crate::wasm_runtime::create_wasm_runtime_with_code( + self.method, + self.default_heap_pages, + runtime_blob, + self.host_functions.to_vec(), + allow_missing_host_functions, + self.cache_path.as_deref(), + ) + .map_err(|e| format!("Failed to create module: {:?}", e))?; + + let instance = module + .new_instance() + .map_err(|e| format!("Failed to create instance: {:?}", e))?; + + let instance = AssertUnwindSafe(instance); + let mut ext = AssertUnwindSafe(ext); + let module = AssertUnwindSafe(module); + + with_externalities_safe(&mut **ext, move || { + preregister_builtin_ext(module.clone()); + instance.call_export(export_name, call_data) + }) + .and_then(|r| r) + .map_err(|e| e.to_string()) + } } -impl sp_core::traits::CallInWasm for WasmExecutor { - fn call_in_wasm( +impl sp_core::traits::ReadRuntimeVersion for WasmExecutor { + fn read_runtime_version( &self, wasm_code: &[u8], - code_hash: Option>, - method: &str, - call_data: &[u8], ext: &mut dyn Externalities, - missing_host_functions: MissingHostFunctions, ) -> std::result::Result, String> { - let allow_missing_host_functions = missing_host_functions.allowed(); - - if let Some(hash) = code_hash { - let code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_code.into()), - hash, - heap_pages: None, - }; - - self.with_instance(&code, ext, allow_missing_host_functions, |module, instance, _, mut ext| { - with_externalities_safe( - &mut **ext, - move || { - RuntimeInstanceSpawn::register_on_externalities(module.clone()); - instance.call_export(method, call_data) - } - ) - }).map_err(|e| e.to_string()) - } else { - let module = crate::wasm_runtime::create_wasm_runtime_with_code( - self.method, - self.default_heap_pages, - &wasm_code, - self.host_functions.to_vec(), - allow_missing_host_functions, - ) - .map_err(|e| format!("Failed to create module: {:?}", e))?; - - let instance = module.new_instance() - .map_err(|e| format!("Failed to create instance: {:?}", e))?; - - let instance = AssertUnwindSafe(instance); - let mut ext = AssertUnwindSafe(ext); - let module = AssertUnwindSafe(module); - - with_externalities_safe( - &mut **ext, - move || { - RuntimeInstanceSpawn::register_on_externalities(module.clone()); - instance.call_export(method, call_data) - } - ) - .and_then(|r| r) - .map_err(|e| e.to_string()) + let runtime_blob = RuntimeBlob::uncompress_if_needed(&wasm_code) + .map_err(|e| format!("Failed to create runtime blob: {:?}", e))?; + + if let Some(version) = crate::wasm_runtime::read_embedded_version(&runtime_blob) + .map_err(|e| format!("Failed to read the static section: {:?}", e)) + .map(|v| v.map(|v| v.encode()))? + { + return Ok(version) } + + // If the blob didn't have embedded runtime version section, we fallback to the legacy + // way of fetching the verison: i.e. instantiating the given instance and calling + // `Core_version` on it. + + self.uncached_call( + runtime_blob, + ext, + // If a runtime upgrade introduces new host functions that are not provided by + // the node, we should not fail at instantiation. Otherwise nodes that are + // updated could run this successfully and it could lead to a storage root + // mismatch when importing this block. + true, + "Core_version", + &[], + ) + } +} + +impl CodeExecutor for WasmExecutor { + type Error = Error; + + fn call< + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + UnwindSafe, + >( + &self, + ext: &mut dyn Externalities, + runtime_code: &RuntimeCode, + method: &str, + data: &[u8], + _use_native: bool, + _native_call: Option, + ) -> (Result>, bool) { + let result = self.with_instance( + runtime_code, + ext, + false, + |module, instance, _onchain_version, mut ext| { + with_externalities_safe(&mut **ext, move || { + preregister_builtin_ext(module.clone()); + instance.call_export(method, data).map(NativeOrEncoded::Encoded) + }) + }, + ); + (result, false) + } +} + +impl RuntimeVersionOf for WasmExecutor { + fn runtime_version( + &self, + ext: &mut dyn Externalities, + runtime_code: &RuntimeCode, + ) -> Result { + self.with_instance(runtime_code, ext, false, |_module, _instance, version, _ext| { + Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) + }) } } /// A generic `CodeExecutor` implementation that uses a delegate to determine wasm code equivalence /// and dispatch to native code when possible, falling back on `WasmExecutor` when not. -pub struct NativeExecutor { +pub struct NativeElseWasmExecutor { /// Dummy field to avoid the compiler complaining about us not using `D`. _dummy: std::marker::PhantomData, /// Native runtime version info. @@ -244,7 +320,7 @@ pub struct NativeExecutor { wasm: WasmExecutor, } -impl NativeExecutor { +impl NativeElseWasmExecutor { /// Create new instance. /// /// # Parameters @@ -258,18 +334,29 @@ impl NativeExecutor { default_heap_pages: Option, max_runtime_instances: usize, ) -> Self { - let mut host_functions = sp_io::SubstrateHostFunctions::host_functions(); + let extended = D::ExtendHostFunctions::host_functions(); + let mut host_functions = sp_io::SubstrateHostFunctions::host_functions() + .into_iter() + // filter out any host function overrides provided. + .filter(|host_fn| { + extended + .iter() + .find(|ext_host_fn| host_fn.name() == ext_host_fn.name()) + .is_none() + }) + .collect::>(); // Add the custom host functions provided by the user. - host_functions.extend(D::ExtendHostFunctions::host_functions()); + host_functions.extend(extended); let wasm_executor = WasmExecutor::new( fallback_method, default_heap_pages, host_functions, max_runtime_instances, + None, ); - NativeExecutor { + NativeElseWasmExecutor { _dummy: Default::default(), native_version: D::native_version(), wasm: wasm_executor, @@ -277,23 +364,22 @@ impl NativeExecutor { } } -impl RuntimeInfo for NativeExecutor { - fn native_version(&self) -> &NativeVersion { - &self.native_version - } - +impl RuntimeVersionOf for NativeElseWasmExecutor { fn runtime_version( &self, ext: &mut dyn Externalities, runtime_code: &RuntimeCode, ) -> Result { - self.wasm.with_instance( - runtime_code, - ext, - false, - |_module, _instance, version, _ext| - Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))), - ) + self.wasm + .with_instance(runtime_code, ext, false, |_module, _instance, version, _ext| { + Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) + }) + } +} + +impl GetNativeVersion for NativeElseWasmExecutor { + fn native_version(&self) -> &NativeVersion { + &self.native_version } } @@ -314,70 +400,67 @@ impl RuntimeSpawn for RuntimeInstanceSpawn { let module = self.module.clone(); let scheduler = self.scheduler.clone(); - self.scheduler.spawn("executor-extra-runtime-instance", Box::pin(async move { - let module = AssertUnwindSafe(module); - - let async_ext = match new_async_externalities(scheduler.clone()) { - Ok(val) => val, - Err(e) => { - log::error!( - target: "executor", - "Failed to setup externalities for async context: {}", - e, - ); - - // This will drop sender and receiver end will panic - return; - } - }; - - let mut async_ext = match async_ext.with_runtime_spawn( - Box::new(RuntimeInstanceSpawn::new(module.clone(), scheduler)) - ) { - Ok(val) => val, - Err(e) => { - log::error!( - target: "executor", - "Failed to setup runtime extension for async externalities: {}", - e, - ); - - // This will drop sender and receiver end will panic - return; - } - }; + self.scheduler.spawn( + "executor-extra-runtime-instance", + Box::pin(async move { + let module = AssertUnwindSafe(module); - let result = with_externalities_safe( - &mut async_ext, - move || { + let async_ext = match new_async_externalities(scheduler.clone()) { + Ok(val) => val, + Err(e) => { + log::error!( + target: "executor", + "Failed to setup externalities for async context: {}", + e, + ); + // This will drop sender and receiver end will panic + return + }, + }; + + let mut async_ext = match async_ext.with_runtime_spawn(Box::new( + RuntimeInstanceSpawn::new(module.clone(), scheduler), + )) { + Ok(val) => val, + Err(e) => { + log::error!( + target: "executor", + "Failed to setup runtime extension for async externalities: {}", + e, + ); + + // This will drop sender and receiver end will panic + return + }, + }; + + let result = with_externalities_safe(&mut async_ext, move || { // FIXME: Should be refactored to shared "instance factory". // Instantiating wasm here every time is suboptimal at the moment, shared // pool of instances should be used. // // https://github.com/paritytech/substrate/issues/7354 - let instance = module.new_instance() - .expect("Failed to create new instance from module"); + let instance = + module.new_instance().expect("Failed to create new instance from module"); - instance.call( - InvokeMethod::TableWithWrapper { dispatcher_ref, func }, - &data[..], - ).expect("Failed to invoke instance.") - } - ); - - match result { - Ok(output) => { - let _ = sender.send(output); - }, - Err(error) => { - // If execution is panicked, the `join` in the original runtime code will panic as well, - // since the sender is dropped without sending anything. - log::error!("Call error in spawned task: {:?}", error); - }, - } - })); + instance + .call(InvokeMethod::TableWithWrapper { dispatcher_ref, func }, &data[..]) + .expect("Failed to invoke instance.") + }); + match result { + Ok(output) => { + let _ = sender.send(output); + }, + Err(error) => { + // If execution is panicked, the `join` in the original runtime code will + // panic as well, since the sender is dropped without sending anything. + log::error!("Call error in spawned task: {:?}", error); + }, + } + }), + ); new_handle } @@ -394,12 +477,7 @@ impl RuntimeInstanceSpawn { module: Arc, scheduler: Box, ) -> Self { - Self { - module, - scheduler, - counter: 0.into(), - tasks: HashMap::new().into(), - } + Self { module, scheduler, counter: 0.into(), tasks: HashMap::new().into() } } fn with_externalities_and_module( @@ -409,37 +487,33 @@ impl RuntimeInstanceSpawn { ext.extension::() .map(move |task_ext| Self::new(module, task_ext.clone())) } +} - /// Register new `RuntimeSpawnExt` on current externalities. - /// - /// This extensions will spawn instances from provided `module`. - pub fn register_on_externalities(module: Arc) { - sp_externalities::with_externalities( - move |mut ext| { - if let Some(runtime_spawn) = - Self::with_externalities_and_module(module.clone(), ext) - { - if let Err(e) = ext.register_extension( - RuntimeSpawnExt(Box::new(runtime_spawn)) - ) { - trace!( - target: "executor", - "Failed to register `RuntimeSpawnExt` instance on externalities: {:?}", - e, - ) - } - } +/// Pre-registers the built-in extensions to the currently effective externalities. +/// +/// Meant to be called each time before calling into the runtime. +fn preregister_builtin_ext(module: Arc) { + sp_externalities::with_externalities(move |mut ext| { + if let Some(runtime_spawn) = + RuntimeInstanceSpawn::with_externalities_and_module(module, ext) + { + if let Err(e) = ext.register_extension(RuntimeSpawnExt(Box::new(runtime_spawn))) { + trace!( + target: "executor", + "Failed to register `RuntimeSpawnExt` instance on externalities: {:?}", + e, + ) } - ); - } + } + }); } -impl CodeExecutor for NativeExecutor { +impl CodeExecutor for NativeElseWasmExecutor { type Error = Error; fn call< R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result> + UnwindSafe, >( &self, ext: &mut dyn Externalities, @@ -455,17 +529,13 @@ impl CodeExecutor for NativeExecutor { ext, false, |module, instance, onchain_version, mut ext| { - let onchain_version = onchain_version.ok_or_else( - || Error::ApiError("Unknown version".into()) - )?; + let onchain_version = + onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; - let can_call_with = onchain_version.can_call_with(&self.native_version.runtime_version); + let can_call_with = + onchain_version.can_call_with(&self.native_version.runtime_version); - match ( - use_native, - can_call_with, - native_call, - ) { + match (use_native, can_call_with, native_call) { (_, false, _) | (false, _, _) => { if !can_call_with { trace!( @@ -476,13 +546,10 @@ impl CodeExecutor for NativeExecutor { ); } - with_externalities_safe( - &mut **ext, - move || { - RuntimeInstanceSpawn::register_on_externalities(module.clone()); - instance.call_export(method, data).map(NativeOrEncoded::Encoded) - } - ) + with_externalities_safe(&mut **ext, move || { + preregister_builtin_ext(module.clone()); + instance.call_export(method, data).map(NativeOrEncoded::Encoded) + }) }, (true, true, Some(call)) => { trace!( @@ -495,13 +562,10 @@ impl CodeExecutor for NativeExecutor { used_native = true; let res = with_externalities_safe(&mut **ext, move || (call)()) - .and_then(|r| r - .map(NativeOrEncoded::Native) - .map_err(|s| Error::ApiError(s)) - ); + .and_then(|r| r.map(NativeOrEncoded::Native).map_err(Error::ApiError)); Ok(res) - } + }, _ => { trace!( target: "executor", @@ -511,18 +575,20 @@ impl CodeExecutor for NativeExecutor { ); used_native = true; - Ok(D::dispatch(&mut **ext, method, data).map(NativeOrEncoded::Encoded)) - } + Ok(with_externalities_safe(&mut **ext, move || D::dispatch(method, data))? + .map(NativeOrEncoded::Encoded) + .ok_or_else(|| Error::MethodNotFound(method.to_owned()))) + }, } - } + }, ); (result, used_native) } } -impl Clone for NativeExecutor { +impl Clone for NativeElseWasmExecutor { fn clone(&self) -> Self { - NativeExecutor { + NativeElseWasmExecutor { _dummy: Default::default(), native_version: D::native_version(), wasm: self.wasm.clone(), @@ -530,91 +596,13 @@ impl Clone for NativeExecutor { } } -impl sp_core::traits::CallInWasm for NativeExecutor { - fn call_in_wasm( +impl sp_core::traits::ReadRuntimeVersion for NativeElseWasmExecutor { + fn read_runtime_version( &self, - wasm_blob: &[u8], - code_hash: Option>, - method: &str, - call_data: &[u8], + wasm_code: &[u8], ext: &mut dyn Externalities, - missing_host_functions: MissingHostFunctions, ) -> std::result::Result, String> { - self.wasm.call_in_wasm(wasm_blob, code_hash, method, call_data, ext, missing_host_functions) - } -} - -/// Implements a `NativeExecutionDispatch` for provided parameters. -/// -/// # Example -/// -/// ``` -/// sc_executor::native_executor_instance!( -/// pub MyExecutor, -/// substrate_test_runtime::api::dispatch, -/// substrate_test_runtime::native_version, -/// ); -/// ``` -/// -/// # With custom host functions -/// -/// When you want to use custom runtime interfaces from within your runtime, you need to make the -/// executor aware of the host functions for these interfaces. -/// -/// ``` -/// # use sp_runtime_interface::runtime_interface; -/// -/// #[runtime_interface] -/// trait MyInterface { -/// fn say_hello_world(data: &str) { -/// println!("Hello world from: {}", data); -/// } -/// } -/// -/// sc_executor::native_executor_instance!( -/// pub MyExecutor, -/// substrate_test_runtime::api::dispatch, -/// substrate_test_runtime::native_version, -/// my_interface::HostFunctions, -/// ); -/// ``` -/// -/// When you have multiple interfaces, you can give the host functions as a tuple e.g.: -/// `(my_interface::HostFunctions, my_interface2::HostFunctions)` -/// -#[macro_export] -macro_rules! native_executor_instance { - ( $pub:vis $name:ident, $dispatcher:path, $version:path $(,)?) => { - /// A unit struct which implements `NativeExecutionDispatch` feeding in the - /// hard-coded runtime. - $pub struct $name; - $crate::native_executor_instance!(IMPL $name, $dispatcher, $version, ()); - }; - ( $pub:vis $name:ident, $dispatcher:path, $version:path, $custom_host_functions:ty $(,)?) => { - /// A unit struct which implements `NativeExecutionDispatch` feeding in the - /// hard-coded runtime. - $pub struct $name; - $crate::native_executor_instance!( - IMPL $name, $dispatcher, $version, $custom_host_functions - ); - }; - (IMPL $name:ident, $dispatcher:path, $version:path, $custom_host_functions:ty) => { - impl $crate::NativeExecutionDispatch for $name { - type ExtendHostFunctions = $custom_host_functions; - - fn dispatch( - ext: &mut dyn $crate::Externalities, - method: &str, - data: &[u8] - ) -> $crate::error::Result> { - $crate::with_externalities_safe(ext, move || $dispatcher(method, data))? - .ok_or_else(|| $crate::error::Error::MethodNotFound(method.to_owned())) - } - - fn native_version() -> $crate::NativeVersion { - $version() - } - } + self.wasm.read_runtime_version(wasm_code, ext) } } @@ -630,25 +618,29 @@ mod tests { } } - native_executor_instance!( - pub MyExecutor, - substrate_test_runtime::api::dispatch, - substrate_test_runtime::native_version, - (my_interface::HostFunctions, my_interface::HostFunctions), - ); + pub struct MyExecutorDispatch; + + impl NativeExecutionDispatch for MyExecutorDispatch { + type ExtendHostFunctions = (my_interface::HostFunctions, my_interface::HostFunctions); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + substrate_test_runtime::api::dispatch(method, data) + } + + fn native_version() -> NativeVersion { + substrate_test_runtime::native_version() + } + } #[test] fn native_executor_registers_custom_interface() { - let executor = NativeExecutor::::new( + let executor = NativeElseWasmExecutor::::new( WasmExecutionMethod::Interpreted, None, 8, ); my_interface::HostFunctions::host_functions().iter().for_each(|function| { - assert_eq!( - executor.wasm.host_functions.iter().filter(|f| f == &function).count(), - 2, - ); + assert_eq!(executor.wasm.host_functions.iter().filter(|f| f == &function).count(), 2); }); my_interface::say_hello_world("hey"); diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 7288df35f31c4..b11e3958dbc81 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -1,32 +1,40 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Traits and accessor functions for calling into the Substrate Wasm runtime. //! //! The primary means of accessing the runtimes is through a cache which saves the reusable //! components of the runtime that are expensive to initialize. -use std::sync::Arc; use crate::error::{Error, WasmError}; -use parking_lot::Mutex; use codec::Decode; -use sp_core::traits::{Externalities, RuntimeCode, FetchRuntimeCode}; +use parking_lot::Mutex; +use sc_executor_common::{ + runtime_blob::RuntimeBlob, + wasm_runtime::{WasmInstance, WasmModule}, +}; +use sp_core::traits::{Externalities, FetchRuntimeCode, RuntimeCode}; use sp_version::RuntimeVersion; -use std::panic::AssertUnwindSafe; -use sc_executor_common::wasm_runtime::{WasmModule, WasmInstance}; +use std::{ + panic::AssertUnwindSafe, + path::{Path, PathBuf}, + sync::Arc, +}; use sp_wasm_interface::Function; @@ -64,27 +72,26 @@ struct VersionedRuntime { impl VersionedRuntime { /// Run the given closure `f` with an instance of this runtime. - fn with_instance<'c, R, F>( - &self, - ext: &mut dyn Externalities, - f: F, - ) -> Result - where F: FnOnce( + fn with_instance<'c, R, F>(&self, ext: &mut dyn Externalities, f: F) -> Result + where + F: FnOnce( &Arc, &dyn WasmInstance, Option<&RuntimeVersion>, - &mut dyn Externalities) - -> Result, + &mut dyn Externalities, + ) -> Result, { // Find a free instance - let instance = self.instances + let instance = self + .instances .iter() .enumerate() .find_map(|(index, i)| i.try_lock().map(|i| (index, i))); match instance { Some((index, mut locked)) => { - let (instance, new_inst) = locked.take() + let (instance, new_inst) = locked + .take() .map(|r| Ok((r, false))) .unwrap_or_else(|| self.module.new_instance().map(|i| (i, true)))?; @@ -125,7 +132,7 @@ impl VersionedRuntime { let instance = self.module.new_instance()?; f(&self.module, &*instance, self.version.as_ref(), ext) - } + }, } } } @@ -150,20 +157,24 @@ pub struct RuntimeCache { runtimes: Mutex<[Option>; MAX_RUNTIMES]>, /// The size of the instances cache for each runtime. max_runtime_instances: usize, + cache_path: Option, } impl RuntimeCache { /// Creates a new instance of a runtimes cache. - pub fn new(max_runtime_instances: usize) -> RuntimeCache { - RuntimeCache { - runtimes: Default::default(), - max_runtime_instances, - } + /// + /// `max_runtime_instances` specifies the number of runtime instances preserved in an in-memory + /// cache. + /// + /// `cache_path` allows to specify an optional directory where the executor can store files + /// for caching. + pub fn new(max_runtime_instances: usize, cache_path: Option) -> RuntimeCache { + RuntimeCache { runtimes: Default::default(), max_runtime_instances, cache_path } } /// Prepares a WASM module instance and executes given function for it. /// - /// This uses internal cache to find avaiable instance or create a new one. + /// This uses internal cache to find available instance or create a new one. /// # Parameters /// /// `code` - Provides external code or tells the executor to fetch it from storage. @@ -182,7 +193,7 @@ impl RuntimeCache { /// /// `f` - Function to execute. /// - /// # Returns result of `f` wrapped in an additonal result. + /// # Returns result of `f` wrapped in an additional result. /// In case of failure one of two errors can be returned: /// /// `Err::InvalidCode` is returned for runtime code issues. @@ -199,31 +210,35 @@ impl RuntimeCache { allow_missing_func_imports: bool, f: F, ) -> Result, Error> - where F: FnOnce( + where + F: FnOnce( &Arc, &dyn WasmInstance, Option<&RuntimeVersion>, - &mut dyn Externalities) - -> Result, + &mut dyn Externalities, + ) -> Result, { let code_hash = &runtime_code.hash; let heap_pages = runtime_code.heap_pages.unwrap_or(default_heap_pages); let mut runtimes = self.runtimes.lock(); // this must be released prior to calling f - let pos = runtimes.iter().position(|r| r.as_ref().map_or( - false, - |r| r.wasm_method == wasm_method && - r.code_hash == *code_hash && - r.heap_pages == heap_pages - )); + let pos = runtimes.iter().position(|r| { + r.as_ref().map_or(false, |r| { + r.wasm_method == wasm_method && + r.code_hash == *code_hash && + r.heap_pages == heap_pages + }) + }); let runtime = match pos { Some(n) => runtimes[n] .clone() .expect("`position` only returns `Some` for entries that are `Some`"), - None => { + None => { let code = runtime_code.fetch_runtime_code().ok_or(WasmError::CodeNotFound)?; + let time = std::time::Instant::now(); + let result = create_versioned_wasm_runtime( &code, code_hash.clone(), @@ -233,28 +248,40 @@ impl RuntimeCache { host_functions.into(), allow_missing_func_imports, self.max_runtime_instances, + self.cache_path.as_deref(), ); - if let Err(ref err) = result { - log::warn!(target: "wasm-runtime", "Cannot create a runtime: {:?}", err); + + match result { + Ok(ref result) => { + log::debug!( + target: "wasm-runtime", + "Prepared new runtime version {:?} in {} ms.", + result.version, + time.elapsed().as_millis(), + ); + }, + Err(ref err) => { + log::warn!(target: "wasm-runtime", "Cannot create a runtime: {:?}", err); + }, } + Arc::new(result?) - } + }, }; // Rearrange runtimes by last recently used. match pos { Some(0) => {}, - Some(n) => { - for i in (1 .. n + 1).rev() { + Some(n) => + for i in (1..n + 1).rev() { runtimes.swap(i, i - 1); - } - } + }, None => { - runtimes[MAX_RUNTIMES-1] = Some(runtime.clone()); - for i in (1 .. MAX_RUNTIMES).rev() { + runtimes[MAX_RUNTIMES - 1] = Some(runtime.clone()); + for i in (1..MAX_RUNTIMES).rev() { runtimes.swap(i, i - 1); } - } + }, } drop(runtimes); @@ -266,48 +293,107 @@ impl RuntimeCache { pub fn create_wasm_runtime_with_code( wasm_method: WasmExecutionMethod, heap_pages: u64, - code: &[u8], + blob: RuntimeBlob, host_functions: Vec<&'static dyn Function>, allow_missing_func_imports: bool, + cache_path: Option<&Path>, ) -> Result, WasmError> { match wasm_method { - WasmExecutionMethod::Interpreted => + WasmExecutionMethod::Interpreted => { + // Wasmi doesn't have any need in a cache directory. + // + // We drop the cache_path here to silence warnings that cache_path is not used if + // compiling without the `wasmtime` flag. + drop(cache_path); + sc_executor_wasmi::create_runtime( - code, + blob, heap_pages, host_functions, - allow_missing_func_imports - ).map(|runtime| -> Arc { Arc::new(runtime) }), + allow_missing_func_imports, + ) + .map(|runtime| -> Arc { Arc::new(runtime) }) + }, #[cfg(feature = "wasmtime")] - WasmExecutionMethod::Compiled => - sc_executor_wasmtime::create_runtime( - code, - heap_pages, - host_functions, - allow_missing_func_imports - ).map(|runtime| -> Arc { Arc::new(runtime) }), + WasmExecutionMethod::Compiled => sc_executor_wasmtime::create_runtime( + blob, + sc_executor_wasmtime::Config { + heap_pages: heap_pages as u32, + max_memory_pages: None, + allow_missing_func_imports, + cache_path: cache_path.map(ToOwned::to_owned), + semantics: sc_executor_wasmtime::Semantics { + fast_instance_reuse: true, + deterministic_stack_limit: None, + canonicalize_nans: false, + }, + }, + host_functions, + ) + .map(|runtime| -> Arc { Arc::new(runtime) }), } } -fn decode_version(version: &[u8]) -> Result { +fn decode_version(mut version: &[u8]) -> Result { let v: RuntimeVersion = sp_api::OldRuntimeVersion::decode(&mut &version[..]) - .map_err(|_| - WasmError::Instantiation( - "failed to decode \"Core_version\" result using old runtime version".into(), - ) - )?.into(); + .map_err(|_| { + WasmError::Instantiation( + "failed to decode \"Core_version\" result using old runtime version".into(), + ) + })? + .into(); let core_api_id = sp_core::hashing::blake2_64(b"Core"); if v.has_api_with(&core_api_id, |v| v >= 3) { - sp_api::RuntimeVersion::decode(&mut &version[..]) - .map_err(|_| - WasmError::Instantiation("failed to decode \"Core_version\" result".into()) - ) + sp_api::RuntimeVersion::decode(&mut version).map_err(|_| { + WasmError::Instantiation("failed to decode \"Core_version\" result".into()) + }) } else { Ok(v) } } +fn decode_runtime_apis(apis: &[u8]) -> Result, WasmError> { + use sp_api::RUNTIME_API_INFO_SIZE; + use std::convert::TryFrom; + + apis.chunks(RUNTIME_API_INFO_SIZE) + .map(|chunk| { + // `chunk` can be less than `RUNTIME_API_INFO_SIZE` if the total length of `apis` + // doesn't completely divide by `RUNTIME_API_INFO_SIZE`. + <[u8; RUNTIME_API_INFO_SIZE]>::try_from(chunk) + .map(sp_api::deserialize_runtime_api_info) + .map_err(|_| WasmError::Other("a clipped runtime api info declaration".to_owned())) + }) + .collect::, WasmError>>() +} + +/// Take the runtime blob and scan it for the custom wasm sections containing the version +/// information and construct the `RuntimeVersion` from them. +/// +/// If there are no such sections, it returns `None`. If there is an error during decoding those +/// sections, `Err` will be returned. +pub fn read_embedded_version(blob: &RuntimeBlob) -> Result, WasmError> { + if let Some(mut version_section) = blob.custom_section_contents("runtime_version") { + // We do not use `decode_version` here because the runtime_version section is not supposed + // to ever contain a legacy version. Apart from that `decode_version` relies on presence + // of a special API in the `apis` field to treat the input as a non-legacy version. However + // the structure found in the `runtime_version` always contain an empty `apis` field. + // Therefore the version read will be mistakenly treated as an legacy one. + let mut decoded_version = sp_api::RuntimeVersion::decode(&mut version_section) + .map_err(|_| WasmError::Instantiation("failed to decode version section".into()))?; + + // Don't stop on this and check if there is a special section that encodes all runtime APIs. + if let Some(apis_section) = blob.custom_section_contents("runtime_apis") { + decoded_version.apis = decode_runtime_apis(apis_section)?.into(); + } + + Ok(Some(decoded_version)) + } else { + Ok(None) + } +} + fn create_versioned_wasm_runtime( code: &[u8], code_hash: Vec, @@ -317,62 +403,62 @@ fn create_versioned_wasm_runtime( host_functions: Vec<&'static dyn Function>, allow_missing_func_imports: bool, max_instances: usize, + cache_path: Option<&Path>, ) -> Result { - #[cfg(not(target_os = "unknown"))] - let time = std::time::Instant::now(); + // The incoming code may be actually compressed. We decompress it here and then work with + // the uncompressed code from now on. + let blob = sc_executor_common::runtime_blob::RuntimeBlob::uncompress_if_needed(&code)?; + + // Use the runtime blob to scan if there is any metadata embedded into the wasm binary + // pertaining to runtime version. We do it before consuming the runtime blob for creating the + // runtime. + let mut version: Option<_> = read_embedded_version(&blob)?; + let runtime = create_wasm_runtime_with_code( wasm_method, heap_pages, - &code, + blob, host_functions, allow_missing_func_imports, + cache_path, )?; - // Call to determine runtime version. - let version_result = { - // `ext` is already implicitly handled as unwind safe, as we store it in a global variable. - let mut ext = AssertUnwindSafe(ext); - - // The following unwind safety assertion is OK because if the method call panics, the - // runtime will be dropped. - let runtime = AssertUnwindSafe(runtime.as_ref()); - crate::native_executor::with_externalities_safe( - &mut **ext, - move || runtime.new_instance()?.call("Core_version".into(), &[]) - ).map_err(|_| WasmError::Instantiation("panic in call to get runtime version".into()))? - }; - let version = match version_result { - Ok(version) => Some(decode_version(&version)?), - Err(_) => None, - }; - #[cfg(not(target_os = "unknown"))] - log::debug!( - target: "wasm-runtime", - "Prepared new runtime version {:?} in {} ms.", - version, - time.elapsed().as_millis(), - ); + // If the runtime blob doesn't embed the runtime version then use the legacy version query + // mechanism: call the runtime. + if version.is_none() { + // Call to determine runtime version. + let version_result = { + // `ext` is already implicitly handled as unwind safe, as we store it in a global + // variable. + let mut ext = AssertUnwindSafe(ext); + + // The following unwind safety assertion is OK because if the method call panics, the + // runtime will be dropped. + let runtime = AssertUnwindSafe(runtime.as_ref()); + crate::native_executor::with_externalities_safe(&mut **ext, move || { + runtime.new_instance()?.call("Core_version".into(), &[]) + }) + .map_err(|_| WasmError::Instantiation("panic in call to get runtime version".into()))? + }; + + if let Ok(version_buf) = version_result { + version = Some(decode_version(&version_buf)?) + } + } let mut instances = Vec::with_capacity(max_instances); instances.resize_with(max_instances, || Mutex::new(None)); - Ok(VersionedRuntime { - code_hash, - module: runtime, - version, - heap_pages, - wasm_method, - instances, - }) + Ok(VersionedRuntime { code_hash, module: runtime, version, heap_pages, wasm_method, instances }) } #[cfg(test)] mod tests { use super::*; - use sp_wasm_interface::HostFunctions; + use codec::Encode; use sp_api::{Core, RuntimeApiInfo}; + use sp_wasm_interface::HostFunctions; use substrate_test_runtime::Block; - use codec::Encode; #[test] fn host_functions_are_equal() { @@ -390,7 +476,7 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(Core::::ID, 1)]), + apis: sp_api::create_apis_vec!([(>::ID, 1)]), }; let version = decode_version(&old_runtime_version.encode()).unwrap(); @@ -405,7 +491,7 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), + apis: sp_api::create_apis_vec!([(>::ID, 3)]), }; decode_version(&old_runtime_version.encode()).unwrap_err(); @@ -419,11 +505,41 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), + apis: sp_api::create_apis_vec!([(>::ID, 3)]), transaction_version: 3, }; let version = decode_version(&old_runtime_version.encode()).unwrap(); assert_eq!(3, version.transaction_version); } + + #[test] + fn embed_runtime_version_works() { + let wasm = sp_maybe_compressed_blob::decompress( + substrate_test_runtime::wasm_binary_unwrap(), + sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT, + ) + .expect("Decompressing works"); + + let runtime_version = RuntimeVersion { + spec_name: "test_replace".into(), + impl_name: "test_replace".into(), + authoring_version: 100, + spec_version: 100, + impl_version: 100, + apis: sp_api::create_apis_vec!([(>::ID, 3)]), + transaction_version: 100, + }; + + let embedded = sp_version::embed::embed_runtime_version(&wasm, runtime_version.clone()) + .expect("Embedding works"); + + let blob = RuntimeBlob::new(&embedded).expect("Embedded blob is valid"); + let read_version = read_embedded_version(&blob) + .ok() + .flatten() + .expect("Reading embedded version works"); + + assert_eq!(runtime_version, read_version); + } } diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index bf174bca2d466..324b2bdd0baeb 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmi" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,10 +15,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -wasmi = "0.6.2" -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor-common = { version = "0.8.0", path = "../common" } -sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } +wasmi = "0.9.0" +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-executor-common = { version = "0.10.0-dev", path = "../common" } +sc-allocator = { version = "4.0.0-dev", path = "../../allocator" } +sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "4.0.0-dev", path = "../../../primitives/runtime-interface" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +scoped-tls = "1.0" diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 17b92e04950c9..6052662fa7ccf 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -1,63 +1,69 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! This crate provides an implementation of `WasmModule` that is baked by wasmi. -use std::{str, cell::RefCell, sync::Arc}; -use wasmi::{ - Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef, - FuncInstance, memory_units::Pages, - RuntimeValue::{I32, I64, self}, +use codec::{Decode, Encode}; +use log::{debug, error, trace}; +use sc_executor_common::{ + error::{Error, WasmError}, + runtime_blob::{DataSegmentsSnapshot, RuntimeBlob}, + sandbox, + util::MemoryTransfer, + wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, }; -use codec::{Encode, Decode}; use sp_core::sandbox as sandbox_primitives; -use log::{error, trace, debug}; +use sp_runtime_interface::unpack_ptr_and_len; use sp_wasm_interface::{ - FunctionContext, Pointer, WordSize, Sandbox, MemoryId, Result as WResult, Function, + Function, FunctionContext, MemoryId, Pointer, Result as WResult, Sandbox, WordSize, }; -use sp_runtime_interface::unpack_ptr_and_len; -use sc_executor_common::wasm_runtime::{WasmModule, WasmInstance, InvokeMethod}; -use sc_executor_common::{ - error::{Error, WasmError}, - sandbox, +use std::{cell::RefCell, rc::Rc, str, sync::Arc}; +use wasmi::{ + memory_units::Pages, + FuncInstance, ImportsBuilder, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, + RuntimeValue::{self, I32, I64}, + TableRef, }; -use sc_executor_common::util::{DataSegmentsSnapshot, WasmModuleInfo}; -struct FunctionExecutor<'a> { - sandbox_store: sandbox::Store, - heap: sp_allocator::FreeingBumpHeapAllocator, +struct FunctionExecutor { + sandbox_store: Rc>>, + heap: RefCell, memory: MemoryRef, table: Option, - host_functions: &'a [&'static dyn Function], + host_functions: Arc>, allow_missing_func_imports: bool, - missing_functions: &'a [String], + missing_functions: Arc>, } -impl<'a> FunctionExecutor<'a> { +impl FunctionExecutor { fn new( m: MemoryRef, heap_base: u32, t: Option, - host_functions: &'a [&'static dyn Function], + host_functions: Arc>, allow_missing_func_imports: bool, - missing_functions: &'a [String], + missing_functions: Arc>, ) -> Result { Ok(FunctionExecutor { - sandbox_store: sandbox::Store::new(), - heap: sp_allocator::FreeingBumpHeapAllocator::new(heap_base), + sandbox_store: Rc::new(RefCell::new(sandbox::Store::new( + sandbox::SandboxBackend::Wasmi, + ))), + heap: RefCell::new(sc_allocator::FreeingBumpHeapAllocator::new(heap_base)), memory: m, table: t, host_functions, @@ -67,36 +73,43 @@ impl<'a> FunctionExecutor<'a> { } } -impl<'a> sandbox::SandboxCapabilities for FunctionExecutor<'a> { - type SupervisorFuncRef = wasmi::FuncRef; +struct SandboxContext<'a> { + executor: &'a mut FunctionExecutor, + dispatch_thunk: wasmi::FuncRef, +} +impl<'a> sandbox::SandboxContext for SandboxContext<'a> { fn invoke( &mut self, - dispatch_thunk: &Self::SupervisorFuncRef, invoke_args_ptr: Pointer, invoke_args_len: WordSize, state: u32, func_idx: sandbox::SupervisorFuncIndex, ) -> Result { let result = wasmi::FuncInstance::invoke( - dispatch_thunk, + &self.dispatch_thunk, &[ RuntimeValue::I32(u32::from(invoke_args_ptr) as i32), RuntimeValue::I32(invoke_args_len as i32), RuntimeValue::I32(state as i32), RuntimeValue::I32(usize::from(func_idx) as i32), ], - self, + self.executor, ); + match result { Ok(Some(RuntimeValue::I64(val))) => Ok(val), Ok(_) => return Err("Supervisor function returned unexpected result!".into()), Err(err) => Err(Error::Trap(err)), } } + + fn supervisor_context(&mut self) -> &mut dyn FunctionContext { + self.executor + } } -impl<'a> FunctionContext for FunctionExecutor<'a> { +impl FunctionContext for FunctionExecutor { fn read_memory_into(&self, address: Pointer, dest: &mut [u8]) -> WResult<()> { self.memory.get_into(address.into(), dest).map_err(|e| e.to_string()) } @@ -106,17 +119,15 @@ impl<'a> FunctionContext for FunctionExecutor<'a> { } fn allocate_memory(&mut self, size: WordSize) -> WResult> { - let heap = &mut self.heap; - self.memory.with_direct_access_mut(|mem| { - heap.allocate(mem, size).map_err(|e| e.to_string()) - }) + let heap = &mut self.heap.borrow_mut(); + self.memory + .with_direct_access_mut(|mem| heap.allocate(mem, size).map_err(|e| e.to_string())) } fn deallocate_memory(&mut self, ptr: Pointer) -> WResult<()> { - let heap = &mut self.heap; - self.memory.with_direct_access_mut(|mem| { - heap.deallocate(mem, ptr).map_err(|e| e.to_string()) - }) + let heap = &mut self.heap.borrow_mut(); + self.memory + .with_direct_access_mut(|mem| heap.deallocate(mem, ptr).map_err(|e| e.to_string())) } fn sandbox(&mut self) -> &mut dyn Sandbox { @@ -124,7 +135,7 @@ impl<'a> FunctionContext for FunctionExecutor<'a> { } } -impl<'a> Sandbox for FunctionExecutor<'a> { +impl Sandbox for FunctionExecutor { fn memory_get( &mut self, memory_id: MemoryId, @@ -132,18 +143,21 @@ impl<'a> Sandbox for FunctionExecutor<'a> { buf_ptr: Pointer, buf_len: WordSize, ) -> WResult { - let sandboxed_memory = self.sandbox_store.memory(memory_id).map_err(|e| e.to_string())?; + let sandboxed_memory = + self.sandbox_store.borrow().memory(memory_id).map_err(|e| e.to_string())?; - match MemoryInstance::transfer( - &sandboxed_memory, - offset as usize, - &self.memory, - buf_ptr.into(), - buf_len as usize, - ) { - Ok(()) => Ok(sandbox_primitives::ERR_OK), - Err(_) => Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + let len = buf_len as usize; + + let buffer = match sandboxed_memory.read(Pointer::new(offset as u32), len) { + Err(_) => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + Ok(buffer) => buffer, + }; + + if let Err(_) = self.memory.set(buf_ptr.into(), &buffer) { + return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS) } + + Ok(sandbox_primitives::ERR_OK) } fn memory_set( @@ -153,37 +167,42 @@ impl<'a> Sandbox for FunctionExecutor<'a> { val_ptr: Pointer, val_len: WordSize, ) -> WResult { - let sandboxed_memory = self.sandbox_store.memory(memory_id).map_err(|e| e.to_string())?; + let sandboxed_memory = + self.sandbox_store.borrow().memory(memory_id).map_err(|e| e.to_string())?; - match MemoryInstance::transfer( - &self.memory, - val_ptr.into(), - &sandboxed_memory, - offset as usize, - val_len as usize, - ) { - Ok(()) => Ok(sandbox_primitives::ERR_OK), - Err(_) => Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + let len = val_len as usize; + + let buffer = match self.memory.get(val_ptr.into(), len) { + Err(_) => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + Ok(buffer) => buffer, + }; + + if let Err(_) = sandboxed_memory.write_from(Pointer::new(offset as u32), &buffer) { + return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS) } + + Ok(sandbox_primitives::ERR_OK) } fn memory_teardown(&mut self, memory_id: MemoryId) -> WResult<()> { - self.sandbox_store.memory_teardown(memory_id).map_err(|e| e.to_string()) + self.sandbox_store + .borrow_mut() + .memory_teardown(memory_id) + .map_err(|e| e.to_string()) } - fn memory_new( - &mut self, - initial: u32, - maximum: u32, - ) -> WResult { - self.sandbox_store.new_memory(initial, maximum).map_err(|e| e.to_string()) + fn memory_new(&mut self, initial: u32, maximum: u32) -> WResult { + self.sandbox_store + .borrow_mut() + .new_memory(initial, maximum) + .map_err(|e| e.to_string()) } fn invoke( &mut self, instance_id: u32, export_name: &str, - args: &[u8], + mut args: &[u8], return_val: Pointer, return_val_len: WordSize, state: u32, @@ -191,16 +210,27 @@ impl<'a> Sandbox for FunctionExecutor<'a> { trace!(target: "sp-sandbox", "invoke, instance_idx={}", instance_id); // Deserialize arguments and convert them into wasmi types. - let args = Vec::::decode(&mut &args[..]) + let args = Vec::::decode(&mut args) .map_err(|_| "Can't decode serialized arguments for the invocation")? .into_iter() .map(Into::into) .collect::>(); - let instance = self.sandbox_store.instance(instance_id).map_err(|e| e.to_string())?; - let result = instance.invoke(export_name, &args, self, state); + let instance = + self.sandbox_store.borrow().instance(instance_id).map_err(|e| e.to_string())?; - match result { + let dispatch_thunk = self + .sandbox_store + .borrow() + .dispatch_thunk(instance_id) + .map_err(|e| e.to_string())?; + + match instance.invoke( + export_name, + &args, + state, + &mut SandboxContext { dispatch_thunk, executor: self }, + ) { Ok(None) => Ok(sandbox_primitives::ERR_OK), Ok(Some(val)) => { // Serialize return value and write it back into the memory. @@ -211,13 +241,16 @@ impl<'a> Sandbox for FunctionExecutor<'a> { self.write_memory(return_val, val).map_err(|_| "Return value buffer is OOB")?; Ok(sandbox_primitives::ERR_OK) }) - } + }, Err(_) => Ok(sandbox_primitives::ERR_EXECUTION), } } fn instance_teardown(&mut self, instance_id: u32) -> WResult<()> { - self.sandbox_store.instance_teardown(instance_id).map_err(|e| e.to_string()) + self.sandbox_store + .borrow_mut() + .instance_teardown(instance_id) + .map_err(|e| e.to_string()) } fn instance_new( @@ -229,29 +262,38 @@ impl<'a> Sandbox for FunctionExecutor<'a> { ) -> WResult { // Extract a dispatch thunk from instance's table by the specified index. let dispatch_thunk = { - let table = self.table.as_ref() + let table = self + .table + .as_ref() .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")?; - table.get(dispatch_thunk_id) + table + .get(dispatch_thunk_id) .map_err(|_| "dispatch_thunk_idx is out of the table bounds")? .ok_or_else(|| "dispatch_thunk_idx points on an empty table entry")? }; - let guest_env = match sandbox::GuestEnvironment::decode(&self.sandbox_store, raw_env_def) { - Ok(guest_env) => guest_env, - Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), - }; + let guest_env = + match sandbox::GuestEnvironment::decode(&*self.sandbox_store.borrow(), raw_env_def) { + Ok(guest_env) => guest_env, + Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), + }; + + let store = self.sandbox_store.clone(); + let result = store.borrow_mut().instantiate( + wasm, + guest_env, + state, + &mut SandboxContext { executor: self, dispatch_thunk: dispatch_thunk.clone() }, + ); let instance_idx_or_err_code = - match sandbox::instantiate(self, dispatch_thunk, wasm, guest_env, state) - .map(|i| i.register(&mut self.sandbox_store)) - { + match result.map(|i| i.register(&mut store.borrow_mut(), dispatch_thunk)) { Ok(instance_idx) => instance_idx, - Err(sandbox::InstantiationError::StartTrapped) => - sandbox_primitives::ERR_EXECUTION, + Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, Err(_) => sandbox_primitives::ERR_MODULE, }; - Ok(instance_idx_or_err_code as u32) + Ok(instance_idx_or_err_code) } fn get_global_val( @@ -260,6 +302,7 @@ impl<'a> Sandbox for FunctionExecutor<'a> { name: &str, ) -> WResult> { self.sandbox_store + .borrow() .instance(instance_idx) .map(|i| i.get_global_val(name)) .map_err(|e| e.to_string()) @@ -286,7 +329,7 @@ struct Resolver<'a> { impl<'a> Resolver<'a> { fn new( - host_functions: &'a[&'static dyn Function], + host_functions: &'a [&'static dyn Function], allow_missing_func_imports: bool, heap_pages: usize, ) -> Resolver<'a> { @@ -301,25 +344,23 @@ impl<'a> Resolver<'a> { } impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { - fn resolve_func(&self, name: &str, signature: &wasmi::Signature) - -> std::result::Result - { + fn resolve_func( + &self, + name: &str, + signature: &wasmi::Signature, + ) -> std::result::Result { let signature = sp_wasm_interface::Signature::from(signature); for (function_index, function) in self.host_functions.iter().enumerate() { if name == function.name() { if signature == function.signature() { - return Ok( - wasmi::FuncInstance::alloc_host(signature.into(), function_index), - ) + return Ok(wasmi::FuncInstance::alloc_host(signature.into(), function_index)) } else { - return Err(wasmi::Error::Instantiation( - format!( - "Invalid signature for function `{}` expected `{:?}`, got `{:?}`", - function.name(), - signature, - function.signature(), - ), - )) + return Err(wasmi::Error::Instantiation(format!( + "Invalid signature for function `{}` expected `{:?}`, got `{:?}`", + function.name(), + signature, + function.signature(), + ))) } } } @@ -331,9 +372,7 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { Ok(wasmi::FuncInstance::alloc_host(signature.into(), id)) } else { - Err(wasmi::Error::Instantiation( - format!("Export {} not found", name), - )) + Err(wasmi::Error::Instantiation(format!("Export {} not found", name))) } } @@ -344,15 +383,14 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { ) -> Result { if field_name == "memory" { match &mut *self.import_memory.borrow_mut() { - Some(_) => Err(wasmi::Error::Instantiation( - "Memory can not be imported twice!".into(), - )), + Some(_) => + Err(wasmi::Error::Instantiation("Memory can not be imported twice!".into())), memory_ref @ None => { if memory_type - .maximum() - .map(|m| m.saturating_sub(memory_type.initial())) - .map(|m| self.heap_pages > m as usize) - .unwrap_or(false) + .maximum() + .map(|m| m.saturating_sub(memory_type.initial())) + .map(|m| self.heap_pages > m as usize) + .unwrap_or(false) { Err(wasmi::Error::Instantiation(format!( "Heap pages ({}) is greater than imported memory maximum ({}).", @@ -370,35 +408,40 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { *memory_ref = Some(memory.clone()); Ok(memory) } - } + }, } } else { - Err(wasmi::Error::Instantiation( - format!("Unknown memory reference with name: {}", field_name), - )) + Err(wasmi::Error::Instantiation(format!( + "Unknown memory reference with name: {}", + field_name + ))) } } } -impl<'a> wasmi::Externals for FunctionExecutor<'a> { - fn invoke_index(&mut self, index: usize, args: wasmi::RuntimeArgs) - -> Result, wasmi::Trap> - { +impl wasmi::Externals for FunctionExecutor { + fn invoke_index( + &mut self, + index: usize, + args: wasmi::RuntimeArgs, + ) -> Result, wasmi::Trap> { let mut args = args.as_ref().iter().copied().map(Into::into); - if let Some(function) = self.host_functions.get(index) { - function.execute(self, &mut args) + if let Some(function) = self.host_functions.clone().get(index) { + function + .execute(self, &mut args) .map_err(|msg| Error::FunctionExecution(function.name().to_string(), msg)) .map_err(wasmi::Trap::from) .map(|v| v.map(Into::into)) - } else if self.allow_missing_func_imports - && index >= self.host_functions.len() - && index < self.host_functions.len() + self.missing_functions.len() + } else if self.allow_missing_func_imports && + index >= self.host_functions.len() && + index < self.host_functions.len() + self.missing_functions.len() { Err(Error::from(format!( "Function `{}` is only a stub. Calling a stub is not allowed.", self.missing_functions[index - self.host_functions.len()], - )).into()) + )) + .into()) } else { Err(Error::from(format!("Could not find host function with index: {}", index)).into()) } @@ -436,9 +479,9 @@ fn call_in_wasm_module( memory: &MemoryRef, method: InvokeMethod, data: &[u8], - host_functions: &[&'static dyn Function], + host_functions: Arc>, allow_missing_func_imports: bool, - missing_functions: &Vec, + missing_functions: Arc>, ) -> Result, Error> { // Initialize FunctionExecutor. let table: Option = module_instance @@ -460,25 +503,26 @@ fn call_in_wasm_module( function_executor.write_memory(offset, data)?; let result = match method { - InvokeMethod::Export(method) => { - module_instance.invoke_export( - method, - &[I32(u32::from(offset) as i32), I32(data.len() as i32)], - &mut function_executor, - ) - }, + InvokeMethod::Export(method) => module_instance.invoke_export( + method, + &[I32(u32::from(offset) as i32), I32(data.len() as i32)], + &mut function_executor, + ), InvokeMethod::Table(func_ref) => { - let func = table.ok_or(Error::NoTable)? + let func = table + .ok_or(Error::NoTable)? .get(func_ref)? .ok_or(Error::NoTableEntryWithIndex(func_ref))?; FuncInstance::invoke( &func, &[I32(u32::from(offset) as i32), I32(data.len() as i32)], &mut function_executor, - ).map_err(Into::into) + ) + .map_err(Into::into) }, InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { - let dispatcher = table.ok_or(Error::NoTable)? + let dispatcher = table + .ok_or(Error::NoTable)? .get(dispatcher_ref)? .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; @@ -486,7 +530,8 @@ fn call_in_wasm_module( &dispatcher, &[I32(func as _), I32(u32::from(offset) as i32), I32(data.len() as i32)], &mut function_executor, - ).map_err(Into::into) + ) + .map_err(Into::into) }, }; @@ -516,15 +561,12 @@ fn instantiate_module( ) -> Result<(ModuleRef, Vec, MemoryRef), Error> { let resolver = Resolver::new(host_functions, allow_missing_func_imports, heap_pages); // start module instantiation. Don't run 'start' function yet. - let intermediate_instance = ModuleInstance::new( - module, - &ImportsBuilder::new().with_resolver("env", &resolver), - )?; + let intermediate_instance = + ModuleInstance::new(module, &ImportsBuilder::new().with_resolver("env", &resolver))?; // Verify that the module has the heap base global variable. let _ = get_heap_base(intermediate_instance.not_started_instance())?; - // Get the memory reference. Runtimes should import memory, but to be backwards // compatible we also support exported memory. let memory = match resolver.import_memory.into_inner() { @@ -539,7 +581,7 @@ fn instantiate_module( memory.grow(Pages(heap_pages)).map_err(|_| Error::Runtime)?; memory - } + }, }; if intermediate_instance.has_start() { @@ -590,9 +632,7 @@ impl GlobalValsSnapshot { // the instance should be the same as used for preserving and // we iterate the same way it as we do it for preserving values that means that the // types should be the same and all the values are mutable. So no error is expected/ - global_ref - .set(*global_val) - .map_err(|_| WasmError::ApplySnapshotFailed)?; + global_ref.set(*global_val).map_err(|_| WasmError::ApplySnapshotFailed)?; } Ok(()) } @@ -622,7 +662,8 @@ impl WasmModule for WasmiRuntime { &self.module, &self.host_functions, self.allow_missing_func_imports, - ).map_err(|e| WasmError::Instantiation(e.to_string()))?; + ) + .map_err(|e| WasmError::Instantiation(e.to_string()))?; Ok(Box::new(WasmiInstance { instance, @@ -631,7 +672,7 @@ impl WasmModule for WasmiRuntime { data_segments_snapshot: self.data_segments_snapshot.clone(), host_functions: self.host_functions.clone(), allow_missing_func_imports: self.allow_missing_func_imports, - missing_functions, + missing_functions: Arc::new(missing_functions), })) } } @@ -639,18 +680,18 @@ impl WasmModule for WasmiRuntime { /// Create a new `WasmiRuntime` given the code. This function loads the module and /// stores it in the instance. pub fn create_runtime( - code: &[u8], + blob: RuntimeBlob, heap_pages: u64, host_functions: Vec<&'static dyn Function>, allow_missing_func_imports: bool, ) -> Result { - let module = Module::from_buffer(&code).map_err(|_| WasmError::InvalidModule)?; + let data_segments_snapshot = + DataSegmentsSnapshot::take(&blob).map_err(|e| WasmError::Other(e.to_string()))?; - // Extract the data segments from the wasm code. - // - // A return of this error actually indicates that there is a problem in logic, since - // we just loaded and validated the `module` above. - let (data_segments_snapshot, global_vals_snapshot) = { + let module = + Module::from_parity_wasm_module(blob.into_inner()).map_err(|_| WasmError::InvalidModule)?; + + let global_vals_snapshot = { let (instance, _, _) = instantiate_module( heap_pages as usize, &module, @@ -658,15 +699,7 @@ pub fn create_runtime( allow_missing_func_imports, ) .map_err(|e| WasmError::Instantiation(e.to_string()))?; - - let data_segments_snapshot = DataSegmentsSnapshot::take( - &WasmModuleInfo::new(code) - .ok_or_else(|| WasmError::Other("cannot deserialize module".to_string()))?, - ) - .map_err(|e| WasmError::Other(e.to_string()))?; - let global_vals_snapshot = GlobalValsSnapshot::take(&instance); - - (data_segments_snapshot, global_vals_snapshot) + GlobalValsSnapshot::take(&instance) }; Ok(WasmiRuntime { @@ -695,10 +728,11 @@ pub struct WasmiInstance { /// These stubs will error when the wasm blob trie to call them. allow_missing_func_imports: bool, /// List of missing functions detected during function resolution - missing_functions: Vec, + missing_functions: Arc>, } -// This is safe because `WasmiInstance` does not leak any references to `self.memory` and `self.instance` +// This is safe because `WasmiInstance` does not leak any references to `self.memory` and +// `self.instance` unsafe impl Send for WasmiInstance {} impl WasmInstance for WasmiInstance { @@ -727,9 +761,9 @@ impl WasmInstance for WasmiInstance { &self.memory, method, data, - self.host_functions.as_ref(), + self.host_functions.clone(), self.allow_missing_func_imports, - self.missing_functions.as_ref(), + self.missing_functions.clone(), ) } @@ -740,7 +774,7 @@ impl WasmInstance for WasmiInstance { .as_global() .ok_or_else(|| format!("`{}` is not a global", name))? .get() - .into() + .into(), )), None => Ok(None), } diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 7a8aa1ff458f3..3158cdecc3263 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmtime" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,17 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +libc = "0.2.90" +cfg-if = "1.0" log = "0.4.8" scoped-tls = "1.0" -parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor-common = { version = "0.8.0", path = "../common" } -sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } -wasmtime = "0.19" -pwasm-utils = "0.14.0" +parity-wasm = "0.42.0" +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-executor-common = { version = "0.10.0-dev", path = "../common" } +sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "4.0.0-dev", path = "../../../primitives/runtime-interface" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sc-allocator = { version = "4.0.0-dev", path = "../../allocator" } +wasmtime = { version = "0.27.0", default-features = false, features = [ + "cache", + "parallel-compilation", +] } [dev-dependencies] -assert_matches = "1.3.0" +sc-runtime-test = { version = "2.0.0", path = "../runtime-test" } +sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } +wat = "1.0" diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index eeb7cb927167f..8453ec3954354 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -1,55 +1,52 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! This module defines `HostState` and `HostContext` structs which provide logic and state //! required for execution of host. use crate::instance_wrapper::InstanceWrapper; -use crate::util; -use std::{cell::RefCell, rc::Rc}; +use codec::{Decode, Encode}; use log::trace; -use codec::{Encode, Decode}; -use sp_allocator::FreeingBumpHeapAllocator; -use sc_executor_common::error::Result; -use sc_executor_common::sandbox::{self, SandboxCapabilities, SupervisorFuncIndex}; +use sc_allocator::FreeingBumpHeapAllocator; +use sc_executor_common::{ + error::Result, + sandbox::{self, SupervisorFuncIndex}, + util::MemoryTransfer, +}; use sp_core::sandbox as sandbox_primitives; use sp_wasm_interface::{FunctionContext, MemoryId, Pointer, Sandbox, WordSize}; +use std::{cell::RefCell, rc::Rc}; use wasmtime::{Func, Val}; -/// Wrapper type for pointer to a Wasm table entry. -/// -/// The wrapper type is used to ensure that the function reference is valid as it must be unsafely -/// dereferenced from within the safe method `::invoke`. -#[derive(Clone)] -pub struct SupervisorFuncRef(Func); - /// The state required to construct a HostContext context. The context only lasts for one host /// call, whereas the state is maintained for the duration of a Wasm runtime call, which may make /// many different host calls that must share state. pub struct HostState { - // We need some interior mutability here since the host state is shared between all host - // function handlers and the wasmtime backend's `impl WasmRuntime`. - // - // Furthermore, because of recursive calls (e.g. runtime can create and call an sandboxed - // instance which in turn can call the runtime back) we have to be very careful with borrowing - // those. - // - // Basically, most of the interactions should do temporary borrow immediately releasing the - // borrow after performing necessary queries/changes. - sandbox_store: RefCell>, + /// We need some interior mutability here since the host state is shared between all host + /// function handlers and the wasmtime backend's `impl WasmRuntime`. + /// + /// Furthermore, because of recursive calls (e.g. runtime can create and call an sandboxed + /// instance which in turn can call the runtime back) we have to be very careful with borrowing + /// those. + /// + /// Basically, most of the interactions should do temporary borrow immediately releasing the + /// borrow after performing necessary queries/changes. + sandbox_store: Rc>>, allocator: RefCell, instance: Rc, } @@ -58,7 +55,9 @@ impl HostState { /// Constructs a new `HostState`. pub fn new(allocator: FreeingBumpHeapAllocator, instance: Rc) -> Self { HostState { - sandbox_store: RefCell::new(sandbox::Store::new()), + sandbox_store: Rc::new(RefCell::new(sandbox::Store::new( + sandbox::SandboxBackend::TryWasmer, + ))), allocator: RefCell::new(allocator), instance, } @@ -82,61 +81,17 @@ impl<'a> std::ops::Deref for HostContext<'a> { } } -impl<'a> SandboxCapabilities for HostContext<'a> { - type SupervisorFuncRef = SupervisorFuncRef; - - fn invoke( - &mut self, - dispatch_thunk: &Self::SupervisorFuncRef, - invoke_args_ptr: Pointer, - invoke_args_len: WordSize, - state: u32, - func_idx: SupervisorFuncIndex, - ) -> Result { - let result = dispatch_thunk.0.call(&[ - Val::I32(u32::from(invoke_args_ptr) as i32), - Val::I32(invoke_args_len as i32), - Val::I32(state as i32), - Val::I32(usize::from(func_idx) as i32), - ]); - match result { - Ok(ret_vals) => { - let ret_val = if ret_vals.len() != 1 { - return Err(format!( - "Supervisor function returned {} results, expected 1", - ret_vals.len() - ) - .into()); - } else { - &ret_vals[0] - }; - - if let Some(ret_val) = ret_val.i64() { - Ok(ret_val) - } else { - return Err("Supervisor function returned unexpected result!".into()); - } - } - Err(err) => Err(err.to_string().into()), - } - } -} - impl<'a> sp_wasm_interface::FunctionContext for HostContext<'a> { fn read_memory_into( &self, address: Pointer, dest: &mut [u8], ) -> sp_wasm_interface::Result<()> { - self.instance - .read_memory_into(address, dest) - .map_err(|e| e.to_string()) + self.instance.read_memory_into(address, dest).map_err(|e| e.to_string()) } fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { - self.instance - .write_memory_from(address, data) - .map_err(|e| e.to_string()) + self.instance.write_memory_from(address, data).map_err(|e| e.to_string()) } fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { @@ -164,31 +119,21 @@ impl<'a> Sandbox for HostContext<'a> { buf_ptr: Pointer, buf_len: WordSize, ) -> sp_wasm_interface::Result { - let sandboxed_memory = self - .sandbox_store - .borrow() - .memory(memory_id) - .map_err(|e| e.to_string())?; - sandboxed_memory.with_direct_access(|sandboxed_memory| { - let len = buf_len as usize; - let src_range = match util::checked_range(offset as usize, len, sandboxed_memory.len()) - { - Some(range) => range, - None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - }; - let supervisor_mem_size = self.instance.memory_size() as usize; - let dst_range = match util::checked_range(buf_ptr.into(), len, supervisor_mem_size) { - Some(range) => range, - None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - }; - self.instance - .write_memory_from( - Pointer::new(dst_range.start as u32), - &sandboxed_memory[src_range], - ) - .expect("ranges are checked above; write can't fail; qed"); - Ok(sandbox_primitives::ERR_OK) - }) + let sandboxed_memory = + self.sandbox_store.borrow().memory(memory_id).map_err(|e| e.to_string())?; + + let len = buf_len as usize; + + let buffer = match sandboxed_memory.read(Pointer::new(offset as u32), len) { + Err(_) => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + Ok(buffer) => buffer, + }; + + if let Err(_) = self.instance.write_memory_from(buf_ptr, &buffer) { + return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS) + } + + Ok(sandbox_primitives::ERR_OK) } fn memory_set( @@ -198,31 +143,21 @@ impl<'a> Sandbox for HostContext<'a> { val_ptr: Pointer, val_len: WordSize, ) -> sp_wasm_interface::Result { - let sandboxed_memory = self - .sandbox_store - .borrow() - .memory(memory_id) - .map_err(|e| e.to_string())?; - sandboxed_memory.with_direct_access_mut(|sandboxed_memory| { - let len = val_len as usize; - let supervisor_mem_size = self.instance.memory_size() as usize; - let src_range = match util::checked_range(val_ptr.into(), len, supervisor_mem_size) { - Some(range) => range, - None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - }; - let dst_range = match util::checked_range(offset as usize, len, sandboxed_memory.len()) - { - Some(range) => range, - None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - }; - self.instance - .read_memory_into( - Pointer::new(src_range.start as u32), - &mut sandboxed_memory[dst_range], - ) - .expect("ranges are checked above; read can't fail; qed"); - Ok(sandbox_primitives::ERR_OK) - }) + let sandboxed_memory = + self.sandbox_store.borrow().memory(memory_id).map_err(|e| e.to_string())?; + + let len = val_len as usize; + + let buffer = match self.instance.read_memory(val_ptr, len) { + Err(_) => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + Ok(buffer) => buffer, + }; + + if let Err(_) = sandboxed_memory.write_from(Pointer::new(offset as u32), &buffer) { + return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS) + } + + Ok(sandbox_primitives::ERR_OK) } fn memory_teardown(&mut self, memory_id: MemoryId) -> sp_wasm_interface::Result<()> { @@ -232,7 +167,7 @@ impl<'a> Sandbox for HostContext<'a> { .map_err(|e| e.to_string()) } - fn memory_new(&mut self, initial: u32, maximum: MemoryId) -> sp_wasm_interface::Result { + fn memory_new(&mut self, initial: u32, maximum: u32) -> sp_wasm_interface::Result { self.sandbox_store .borrow_mut() .new_memory(initial, maximum) @@ -257,12 +192,21 @@ impl<'a> Sandbox for HostContext<'a> { .map(Into::into) .collect::>(); - let instance = self + let instance = + self.sandbox_store.borrow().instance(instance_id).map_err(|e| e.to_string())?; + + let dispatch_thunk = self .sandbox_store .borrow() - .instance(instance_id) + .dispatch_thunk(instance_id) .map_err(|e| e.to_string())?; - let result = instance.invoke(export_name, &args, self, state); + + let result = instance.invoke( + export_name, + &args, + state, + &mut SandboxContext { host_context: self, dispatch_thunk }, + ); match result { Ok(None) => Ok(sandbox_primitives::ERR_OK), @@ -276,7 +220,7 @@ impl<'a> Sandbox for HostContext<'a> { .map_err(|_| "can't write return value")?; Ok(sandbox_primitives::ERR_OK) }) - } + }, Err(_) => Ok(sandbox_primitives::ERR_EXECUTION), } } @@ -304,13 +248,12 @@ impl<'a> Sandbox for HostContext<'a> { .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")? .get(dispatch_thunk_id); - let func_ref = table_item + table_item .ok_or_else(|| "dispatch_thunk_id is out of bounds")? .funcref() .ok_or_else(|| "dispatch_thunk_idx should be a funcref")? .ok_or_else(|| "dispatch_thunk_idx should point to actual func")? - .clone(); - SupervisorFuncRef(func_ref) + .clone() }; let guest_env = @@ -319,14 +262,22 @@ impl<'a> Sandbox for HostContext<'a> { Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), }; - let instance_idx_or_err_code = - match sandbox::instantiate(self, dispatch_thunk, wasm, guest_env, state) - .map(|i| i.register(&mut *self.sandbox_store.borrow_mut())) - { - Ok(instance_idx) => instance_idx, - Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, - Err(_) => sandbox_primitives::ERR_MODULE, - }; + let store = self.sandbox_store.clone(); + let store = &mut store.borrow_mut(); + let result = store + .instantiate( + wasm, + guest_env, + state, + &mut SandboxContext { host_context: self, dispatch_thunk: dispatch_thunk.clone() }, + ) + .map(|i| i.register(store, dispatch_thunk)); + + let instance_idx_or_err_code = match result { + Ok(instance_idx) => instance_idx, + Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, + Err(_) => sandbox_primitives::ERR_MODULE, + }; Ok(instance_idx_or_err_code as u32) } @@ -343,3 +294,49 @@ impl<'a> Sandbox for HostContext<'a> { .map_err(|e| e.to_string()) } } + +struct SandboxContext<'a, 'b> { + host_context: &'a mut HostContext<'b>, + dispatch_thunk: Func, +} + +impl<'a, 'b> sandbox::SandboxContext for SandboxContext<'a, 'b> { + fn invoke( + &mut self, + invoke_args_ptr: Pointer, + invoke_args_len: WordSize, + state: u32, + func_idx: SupervisorFuncIndex, + ) -> Result { + let result = self.dispatch_thunk.call(&[ + Val::I32(u32::from(invoke_args_ptr) as i32), + Val::I32(invoke_args_len as i32), + Val::I32(state as i32), + Val::I32(usize::from(func_idx) as i32), + ]); + match result { + Ok(ret_vals) => { + let ret_val = if ret_vals.len() != 1 { + return Err(format!( + "Supervisor function returned {} results, expected 1", + ret_vals.len() + ) + .into()) + } else { + &ret_vals[0] + }; + + if let Some(ret_val) = ret_val.i64() { + Ok(ret_val) + } else { + return Err("Supervisor function returned unexpected result!".into()) + } + }, + Err(err) => Err(err.to_string().into()), + } + } + + fn supervisor_context(&mut self) -> &mut dyn FunctionContext { + self.host_context + } +} diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index add62df5cef45..b27fb944bc030 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,13 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::state_holder; +use crate::{state_holder, util}; use sc_executor_common::error::WasmError; -use sp_wasm_interface::{Function, Value, ValueType}; +use sp_wasm_interface::{Function, ValueType}; use std::any::Any; use wasmtime::{ - Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, - Trap, Val, Store, + Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, Store, + Trap, Val, }; pub struct Imports { @@ -44,32 +44,37 @@ pub fn resolve_imports( let mut externs = vec![]; let mut memory_import_index = None; for import_ty in module.imports() { + let name = import_name(&import_ty)?; + if import_ty.module() != "env" { return Err(WasmError::Other(format!( "host doesn't provide any imports from non-env module: {}:{}", import_ty.module(), - import_ty.name() - ))); + name, + ))) } - let resolved = match import_ty.name() { + let resolved = match name { "memory" => { memory_import_index = Some(externs.len()); resolve_memory_import(store, &import_ty, heap_pages)? - } - _ => resolve_func_import( - store, - &import_ty, - host_functions, - allow_missing_func_imports, - )?, + }, + _ => + resolve_func_import(store, &import_ty, host_functions, allow_missing_func_imports)?, }; externs.push(resolved); } - Ok(Imports { - memory_import_index, - externs, - }) + Ok(Imports { memory_import_index, externs }) +} + +/// When the module linking proposal is supported the import's name can be `None`. +/// Because we are not using this proposal we could safely unwrap the name. +/// However, we opt for an error in order to avoid panics at all costs. +fn import_name<'a, 'b: 'a>(import: &'a ImportType<'b>) -> Result<&'a str, WasmError> { + let name = import.name().ok_or_else(|| { + WasmError::Other("The module linking proposal is not supported.".to_owned()) + })?; + Ok(name) } fn resolve_memory_import( @@ -79,21 +84,17 @@ fn resolve_memory_import( ) -> Result { let requested_memory_ty = match import_ty.ty() { ExternType::Memory(memory_ty) => memory_ty, - _ => { + _ => return Err(WasmError::Other(format!( "this import must be of memory type: {}:{}", import_ty.module(), - import_ty.name() - ))) - } + import_name(&import_ty)?, + ))), }; // Increment the min (a.k.a initial) number of pages by `heap_pages` and check if it exceeds the // maximum specified by the import. - let initial = requested_memory_ty - .limits() - .min() - .saturating_add(heap_pages); + let initial = requested_memory_ty.limits().min().saturating_add(heap_pages); if let Some(max) = requested_memory_ty.limits().max() { if initial > max { return Err(WasmError::Other(format!( @@ -101,12 +102,17 @@ fn resolve_memory_import( by the runtime wasm module {}", initial, max, - ))); + ))) } } let memory_ty = MemoryType::new(Limits::new(initial, requested_memory_ty.limits().max())); - let memory = Memory::new(store, memory_ty); + let memory = Memory::new(store, memory_ty).map_err(|e| { + WasmError::Other(format!( + "failed to create a memory during resolving of memory import: {}", + e, + )) + })?; Ok(Extern::Memory(memory)) } @@ -116,49 +122,40 @@ fn resolve_func_import( host_functions: &[&'static dyn Function], allow_missing_func_imports: bool, ) -> Result { + let name = import_name(&import_ty)?; + let func_ty = match import_ty.ty() { ExternType::Func(func_ty) => func_ty, - _ => { + _ => return Err(WasmError::Other(format!( "host doesn't provide any non function imports besides 'memory': {}:{}", import_ty.module(), - import_ty.name() - ))); - } + name, + ))), }; - let host_func = match host_functions - .iter() - .find(|host_func| host_func.name() == import_ty.name()) - { + let host_func = match host_functions.iter().find(|host_func| host_func.name() == name) { Some(host_func) => host_func, - None if allow_missing_func_imports => { - return Ok(MissingHostFuncHandler::new(import_ty).into_extern(store, &func_ty)); - } - None => { + None if allow_missing_func_imports => + return Ok(MissingHostFuncHandler::new(import_ty)?.into_extern(store, &func_ty)), + None => return Err(WasmError::Other(format!( "host doesn't provide such function: {}:{}", import_ty.module(), - import_ty.name() - ))); - } + name, + ))), }; - if !signature_matches(&func_ty, &wasmtime_func_sig(*host_func)) { + if &func_ty != &wasmtime_func_sig(*host_func) { return Err(WasmError::Other(format!( "signature mismatch for: {}:{}", import_ty.module(), - import_ty.name() - ))); + name, + ))) } Ok(HostFuncHandler::new(*host_func).into_extern(store)) } -/// Returns `true` if `lhs` and `rhs` represent the same signature. -fn signature_matches(lhs: &wasmtime::FuncType, rhs: &wasmtime::FuncType) -> bool { - lhs.params() == rhs.params() && lhs.results() == rhs.results() -} - /// This structure implements `Callable` and acts as a bridge between wasmtime and /// substrate host functions. struct HostFuncHandler { @@ -178,12 +175,12 @@ fn call_static( qed ", ); - // `into_value` panics if it encounters a value that doesn't fit into the values + // `from_wasmtime_val` panics if it encounters a value that doesn't fit into the values // available in substrate. // // This, however, cannot happen since the signature of this function is created from // a `dyn Function` signature of which cannot have a non substrate value by definition. - let mut params = wasmtime_params.iter().cloned().map(into_value); + let mut params = wasmtime_params.iter().cloned().map(util::from_wasmtime_val); std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { static_func.execute(&mut host_ctx, &mut params) @@ -202,9 +199,9 @@ fn call_static( "wasmtime function signature, therefore the number of results, should always \ correspond to the number of results returned by the host function", ); - wasmtime_results[0] = into_wasmtime_val(ret_val); + wasmtime_results[0] = util::into_wasmtime_val(ret_val); Ok(()) - } + }, Ok(None) => { debug_assert!( wasmtime_results.len() == 0, @@ -212,26 +209,22 @@ fn call_static( correspond to the number of results returned by the host function", ); Ok(()) - } + }, Err(msg) => Err(Trap::new(msg)), } } impl HostFuncHandler { fn new(host_func: &'static dyn Function) -> Self { - Self { - host_func, - } + Self { host_func } } fn into_extern(self, store: &Store) -> Extern { let host_func = self.host_func; let func_ty = wasmtime_func_sig(self.host_func); - let func = Func::new(store, func_ty, - move |_, params, result| { - call_static(host_func, params, result) - } - ); + let func = Func::new(store, func_ty, move |_, params, result| { + call_static(host_func, params, result) + }); Extern::Func(func) } } @@ -243,42 +236,28 @@ struct MissingHostFuncHandler { } impl MissingHostFuncHandler { - fn new(import_ty: &ImportType) -> Self { - Self { + fn new(import_ty: &ImportType) -> Result { + Ok(Self { module: import_ty.module().to_string(), - name: import_ty.name().to_string(), - } + name: import_name(import_ty)?.to_string(), + }) } fn into_extern(self, store: &Store, func_ty: &FuncType) -> Extern { let Self { module, name } = self; - let func = Func::new(store, func_ty.clone(), - move |_, _, _| Err(Trap::new(format!( - "call to a missing function {}:{}", - module, name - ))) - ); + let func = Func::new(store, func_ty.clone(), move |_, _, _| { + Err(Trap::new(format!("call to a missing function {}:{}", module, name))) + }); Extern::Func(func) } } fn wasmtime_func_sig(func: &dyn Function) -> wasmtime::FuncType { - let params = func - .signature() - .args - .iter() - .cloned() - .map(into_wasmtime_val_type) - .collect::>() - .into_boxed_slice(); - let results = func - .signature() - .return_value - .iter() - .cloned() - .map(into_wasmtime_val_type) - .collect::>() - .into_boxed_slice(); + let signature = func.signature(); + let params = signature.args.iter().cloned().map(into_wasmtime_val_type); + + let results = signature.return_value.iter().cloned().map(into_wasmtime_val_type); + wasmtime::FuncType::new(params, results) } @@ -291,28 +270,6 @@ fn into_wasmtime_val_type(val_ty: ValueType) -> wasmtime::ValType { } } -/// Converts a `Val` into a substrate runtime interface `Value`. -/// -/// Panics if the given value doesn't have a corresponding variant in `Value`. -pub fn into_value(val: Val) -> Value { - match val { - Val::I32(v) => Value::I32(v), - Val::I64(v) => Value::I64(v), - Val::F32(f_bits) => Value::F32(f_bits), - Val::F64(f_bits) => Value::F64(f_bits), - _ => panic!("Given value type is unsupported by substrate"), - } -} - -pub fn into_wasmtime_val(value: Value) -> wasmtime::Val { - match value { - Value::I32(v) => Val::I32(v), - Value::I64(v) => Val::I64(v), - Value::F32(f_bits) => Val::F32(f_bits), - Value::F64(f_bits) => Val::F64(f_bits), - } -} - /// Attempt to convert a opaque panic payload to a string. fn stringify_panic_payload(payload: Box) -> String { match payload.downcast::<&'static str>() { diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 089d8cb237b56..f66d62f673d90 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,133 +19,78 @@ //! Defines data and logic needed for interaction with an WebAssembly instance of a substrate //! runtime module. -use crate::util; -use crate::imports::Imports; +use crate::{ + imports::Imports, + util::{from_wasmtime_val, into_wasmtime_val}, +}; -use std::{slice, marker}; use sc_executor_common::{ error::{Error, Result}, - util::{WasmModuleInfo, DataSegmentsSnapshot}, + runtime_blob, + util::checked_range, wasm_runtime::InvokeMethod, }; -use sp_wasm_interface::{Pointer, WordSize, Value}; -use wasmtime::{Engine, Instance, Module, Memory, Table, Val, Func, Extern, Global, Store}; -use parity_wasm::elements; - -mod globals_snapshot; - -pub use globals_snapshot::GlobalsSnapshot; - -pub struct ModuleWrapper { - module: Module, - data_segments_snapshot: DataSegmentsSnapshot, -} - -impl ModuleWrapper { - pub fn new(engine: &Engine, code: &[u8]) -> Result { - let mut raw_module: elements::Module = elements::deserialize_buffer(code) - .map_err(|e| Error::from(format!("cannot decode module: {}", e)))?; - pwasm_utils::export_mutable_globals(&mut raw_module, "exported_internal_global"); - let instrumented_code = elements::serialize(raw_module) - .map_err(|e| Error::from(format!("cannot encode module: {}", e)))?; - - let module = Module::new(engine, &instrumented_code) - .map_err(|e| Error::from(format!("cannot create module: {}", e)))?; - - let module_info = WasmModuleInfo::new(code) - .ok_or_else(|| Error::from("cannot deserialize module".to_string()))?; - - let data_segments_snapshot = DataSegmentsSnapshot::take(&module_info) - .map_err(|e| Error::from(format!("cannot take data segments snapshot: {}", e)))?; - - Ok(Self { - module, - data_segments_snapshot, - }) - } - - pub fn module(&self) -> &Module { - &self.module - } - - pub fn data_segments_snapshot(&self) -> &DataSegmentsSnapshot { - &self.data_segments_snapshot - } -} +use sp_wasm_interface::{Pointer, Value, WordSize}; +use std::{marker, slice}; +use wasmtime::{Extern, Func, Global, Instance, Memory, Module, Store, Table, Val}; /// Invoked entrypoint format. pub enum EntryPointType { /// Direct call. /// /// Call is made by providing only payload reference and length. - Direct, + Direct { entrypoint: wasmtime::TypedFunc<(u32, u32), u64> }, /// Indirect call. /// /// Call is made by providing payload reference and length, and extra argument - /// for advanced routing (typically extra WASM function pointer). - Wrapped(u32), + /// for advanced routing. + Wrapped { + /// The extra argument passed to the runtime. It is typically a wasm function pointer. + func: u32, + dispatcher: wasmtime::TypedFunc<(u32, u32, u32), u64>, + }, } /// Wasm blob entry point. pub struct EntryPoint { call_type: EntryPointType, - func: wasmtime::Func, } impl EntryPoint { /// Call this entry point. pub fn call(&self, data_ptr: Pointer, data_len: WordSize) -> Result { - let data_ptr = u32::from(data_ptr) as i32; - let data_len = u32::from(data_len) as i32; - - (match self.call_type { - EntryPointType::Direct => { - self.func.call(&[ - wasmtime::Val::I32(data_ptr), - wasmtime::Val::I32(data_len), - ]) - }, - EntryPointType::Wrapped(func) => { - self.func.call(&[ - wasmtime::Val::I32(func as _), - wasmtime::Val::I32(data_ptr), - wasmtime::Val::I32(data_len), - ]) - }, - }) - .map(|results| - // the signature is checked to have i64 return type - results[0].unwrap_i64() as u64 - ) - .map_err(|err| Error::from(format!( - "Wasm execution trapped: {}", - err - ))) + let data_ptr = u32::from(data_ptr); + let data_len = u32::from(data_len); + + fn handle_trap(err: wasmtime::Trap) -> Error { + Error::from(format!("Wasm execution trapped: {}", err)) + } + + match self.call_type { + EntryPointType::Direct { ref entrypoint } => + entrypoint.call((data_ptr, data_len)).map_err(handle_trap), + EntryPointType::Wrapped { func, ref dispatcher } => + dispatcher.call((func, data_ptr, data_len)).map_err(handle_trap), + } } pub fn direct(func: wasmtime::Func) -> std::result::Result { - match (func.ty().params(), func.ty().results()) { - (&[wasmtime::ValType::I32, wasmtime::ValType::I32], &[wasmtime::ValType::I64]) => { - Ok(Self { func, call_type: EntryPointType::Direct }) - } - _ => { - Err("Invalid signature for direct entry point") - } - } + let entrypoint = func + .typed::<(u32, u32), u64>() + .map_err(|_| "Invalid signature for direct entry point")? + .clone(); + Ok(Self { call_type: EntryPointType::Direct { entrypoint } }) } - pub fn wrapped(dispatcher: wasmtime::Func, func: u32) -> std::result::Result { - match (dispatcher.ty().params(), dispatcher.ty().results()) { - ( - &[wasmtime::ValType::I32, wasmtime::ValType::I32, wasmtime::ValType::I32], - &[wasmtime::ValType::I64], - ) => { - Ok(Self { func: dispatcher, call_type: EntryPointType::Wrapped(func) }) - }, - _ => { - Err("Invalid signature for wrapped entry point") - } - } + pub fn wrapped( + dispatcher: wasmtime::Func, + func: u32, + ) -> std::result::Result { + let dispatcher = dispatcher + .typed::<(u32, u32, u32), u64>() + .map_err(|_| "Invalid signature for wrapped entry point")? + .clone(); + Ok(Self { call_type: EntryPointType::Wrapped { func, dispatcher } }) } } @@ -155,12 +100,16 @@ impl EntryPoint { /// routines. pub struct InstanceWrapper { instance: Instance, + // The memory instance of the `instance`. // - // It is important to make sure that we don't make any copies of this to make it easier to proof - // See `memory_as_slice` and `memory_as_slice_mut`. + // It is important to make sure that we don't make any copies of this to make it easier to + // proof See `memory_as_slice` and `memory_as_slice_mut`. memory: Memory, + + /// Indirect functions table of the module table: Option, + // Make this struct explicitly !Send & !Sync. _not_send_nor_sync: marker::PhantomData<*const ()>, } @@ -172,7 +121,6 @@ fn extern_memory(extern_: &Extern) -> Option<&Memory> { } } - fn extern_global(extern_: &Extern) -> Option<&Global> { match extern_ { Extern::Global(glob) => Some(glob), @@ -196,20 +144,18 @@ fn extern_func(extern_: &Extern) -> Option<&Func> { impl InstanceWrapper { /// Create a new instance wrapper from the given wasm module. - pub fn new(store: &Store, module_wrapper: &ModuleWrapper, imports: &Imports, heap_pages: u32) -> Result { - let instance = Instance::new(store, &module_wrapper.module, &imports.externs) + pub fn new(store: &Store, module: &Module, imports: &Imports, heap_pages: u32) -> Result { + let instance = Instance::new(store, module, &imports.externs) .map_err(|e| Error::from(format!("cannot instantiate: {}", e)))?; let memory = match imports.memory_import_index { - Some(memory_idx) => { - extern_memory(&imports.externs[memory_idx]) - .expect("only memory can be at the `memory_idx`; qed") - .clone() - } + Some(memory_idx) => extern_memory(&imports.externs[memory_idx]) + .expect("only memory can be at the `memory_idx`; qed") + .clone(), None => { let memory = get_linear_memory(&instance)?; if !memory.grow(heap_pages).is_ok() { - return Err("failed top increase the linear memory size".into()); + return Err("failed to increase the linear memory size".into()) } memory }, @@ -231,42 +177,38 @@ impl InstanceWrapper { Ok(match method { InvokeMethod::Export(method) => { // Resolve the requested method and verify that it has a proper signature. - let export = self - .instance - .get_export(method) - .ok_or_else(|| Error::from(format!("Exported method {} is not found", method)))?; + let export = self.instance.get_export(method).ok_or_else(|| { + Error::from(format!("Exported method {} is not found", method)) + })?; let func = extern_func(&export) .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))? .clone(); - EntryPoint::direct(func) - .map_err(|_| - Error::from(format!( - "Exported function '{}' has invalid signature.", - method, - )) - )? + EntryPoint::direct(func).map_err(|_| { + Error::from(format!("Exported function '{}' has invalid signature.", method)) + })? }, InvokeMethod::Table(func_ref) => { - let table = self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; - let val = table.get(func_ref) - .ok_or(Error::NoTableEntryWithIndex(func_ref))?; + let table = + self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; + let val = table.get(func_ref).ok_or(Error::NoTableEntryWithIndex(func_ref))?; let func = val .funcref() .ok_or(Error::TableElementIsNotAFunction(func_ref))? .ok_or(Error::FunctionRefIsNull(func_ref))? .clone(); - EntryPoint::direct(func) - .map_err(|_| - Error::from(format!( - "Function @{} in exported table has invalid signature for direct call.", - func_ref, - )) - )? - }, + EntryPoint::direct(func).map_err(|_| { + Error::from(format!( + "Function @{} in exported table has invalid signature for direct call.", + func_ref, + )) + })? + }, InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { - let table = self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; - let val = table.get(dispatcher_ref) + let table = + self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; + let val = table + .get(dispatcher_ref) .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; let dispatcher = val .funcref() @@ -274,13 +216,12 @@ impl InstanceWrapper { .ok_or(Error::FunctionRefIsNull(dispatcher_ref))? .clone(); - EntryPoint::wrapped(dispatcher, func) - .map_err(|_| - Error::from(format!( - "Function @{} in exported table has invalid signature for wrapped call.", - dispatcher_ref, - )) - )? + EntryPoint::wrapped(dispatcher, func).map_err(|_| { + Error::from(format!( + "Function @{} in exported table has invalid signature for wrapped call.", + dispatcher_ref, + )) + })? }, }) } @@ -290,11 +231,6 @@ impl InstanceWrapper { self.table.as_ref() } - /// Returns the byte size of the linear memory instance attached to this instance. - pub fn memory_size(&self) -> u32 { - self.memory.data_size() as u32 - } - /// Reads `__heap_base: i32` global variable and returns it. /// /// If it doesn't exist, not a global or of not i32 type returns an error. @@ -356,36 +292,49 @@ fn get_table(instance: &Instance) -> Option
{ .cloned() } -/// Functions realted to memory. +/// Functions related to memory. impl InstanceWrapper { - /// Read data from a slice of memory into a destination buffer. + /// Read data from a slice of memory into a newly allocated buffer. /// /// Returns an error if the read would go out of the memory bounds. - pub fn read_memory_into(&self, address: Pointer, dest: &mut [u8]) -> Result<()> { + pub fn read_memory(&self, source_addr: Pointer, size: usize) -> Result> { + let range = checked_range(source_addr.into(), size, self.memory.data_size()) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + + let mut buffer = vec![0; range.len()]; + self.read_memory_into(source_addr, &mut buffer)?; + + Ok(buffer) + } + + /// Read data from the instance memory into a slice. + /// + /// Returns an error if the read would go out of the memory bounds. + pub fn read_memory_into(&self, source_addr: Pointer, dest: &mut [u8]) -> Result<()> { unsafe { // This should be safe since we don't grow up memory while caching this reference and // we give up the reference before returning from this function. let memory = self.memory_as_slice(); - let range = util::checked_range(address.into(), dest.len(), memory.len()) + let range = checked_range(source_addr.into(), dest.len(), memory.len()) .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; dest.copy_from_slice(&memory[range]); Ok(()) } } - /// Write data to a slice of memory. + /// Write data to the instance memory from a slice. /// /// Returns an error if the write would go out of the memory bounds. - pub fn write_memory_from(&self, address: Pointer, data: &[u8]) -> Result<()> { + pub fn write_memory_from(&self, dest_addr: Pointer, data: &[u8]) -> Result<()> { unsafe { // This should be safe since we don't grow up memory while caching this reference and // we give up the reference before returning from this function. let memory = self.memory_as_slice_mut(); - let range = util::checked_range(address.into(), data.len(), memory.len()) + let range = checked_range(dest_addr.into(), data.len(), memory.len()) .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; - &mut memory[range].copy_from_slice(data); + memory[range].copy_from_slice(data); Ok(()) } } @@ -396,7 +345,7 @@ impl InstanceWrapper { /// to get more details. pub fn allocate( &self, - allocator: &mut sp_allocator::FreeingBumpHeapAllocator, + allocator: &mut sc_allocator::FreeingBumpHeapAllocator, size: WordSize, ) -> Result> { unsafe { @@ -413,7 +362,7 @@ impl InstanceWrapper { /// Returns `Err` in case the given memory region cannot be deallocated. pub fn deallocate( &self, - allocator: &mut sp_allocator::FreeingBumpHeapAllocator, + allocator: &mut sc_allocator::FreeingBumpHeapAllocator, ptr: Pointer, ) -> Result<()> { unsafe { @@ -460,4 +409,61 @@ impl InstanceWrapper { slice::from_raw_parts_mut(ptr, len) } } + + /// Returns the pointer to the first byte of the linear memory for this instance. + pub fn base_ptr(&self) -> *const u8 { + self.memory.data_ptr() + } + + /// Removes physical backing from the allocated linear memory. This leads to returning the + /// memory back to the system. While the memory is zeroed this is considered as a side-effect + /// and is not relied upon. Thus this function acts as a hint. + pub fn decommit(&self) { + if self.memory.data_size() == 0 { + return + } + + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + use std::sync::Once; + + unsafe { + let ptr = self.memory.data_ptr(); + let len = self.memory.data_size(); + + // Linux handles MADV_DONTNEED reliably. The result is that the given area + // is unmapped and will be zeroed on the next pagefault. + if libc::madvise(ptr as _, len, libc::MADV_DONTNEED) != 0 { + static LOGGED: Once = Once::new(); + LOGGED.call_once(|| { + log::warn!( + "madvise(MADV_DONTNEED) failed: {}", + std::io::Error::last_os_error(), + ); + }); + } + } + } + } + } +} + +impl runtime_blob::InstanceGlobals for InstanceWrapper { + type Global = wasmtime::Global; + + fn get_global(&self, export_name: &str) -> Self::Global { + self.instance + .get_global(export_name) + .expect("get_global is guaranteed to be called with an export name of a global; qed") + } + + fn get_global_value(&self, global: &Self::Global) -> Value { + from_wasmtime_val(global.get()) + } + + fn set_global_value(&self, global: &Self::Global, value: Value) { + global.set(into_wasmtime_val(value)).expect( + "the value is guaranteed to be of the same value; the global is guaranteed to be mutable; qed", + ); + } } diff --git a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs deleted file mode 100644 index 42935d851d95c..0000000000000 --- a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs +++ /dev/null @@ -1,84 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::InstanceWrapper; -use sc_executor_common::error::{Result, Error}; -use sp_wasm_interface::Value; -use crate::imports::{into_value, into_wasmtime_val}; - -/// Saved value of particular exported global. -struct SavedValue { - /// Index of the export. - index: usize, - /// Global value. - value: Value, -} - -/// A snapshot of a global variables values. This snapshot can be used later for restoring the -/// values to the preserved state. -/// -/// Technically, a snapshot stores only values of mutable global variables. This is because -/// immutable global variables always have the same values. -pub struct GlobalsSnapshot(Vec); - -impl GlobalsSnapshot { - /// Take a snapshot of global variables for a given instance. - pub fn take(instance_wrapper: &InstanceWrapper) -> Result { - let data = instance_wrapper.instance - .exports() - .enumerate() - .filter_map(|(index, export)| { - if export.name().starts_with("exported_internal_global") { - export.into_global().map( - |g| SavedValue { index, value: into_value(g.get()) } - ) - } else { None } - }) - .collect::>(); - - Ok(Self(data)) - } - - /// Apply the snapshot to the given instance. - /// - /// This instance must be the same that was used for creation of this snapshot. - pub fn apply(&self, instance_wrapper: &InstanceWrapper) -> Result<()> { - // This is a pointer over saved items, it moves forward when the loop value below takes over it's current value. - // Since both pointers (`current` and `index` below) are over ordered lists, they eventually hit all - // equal referenced values. - let mut current = 0; - for (index, export) in instance_wrapper.instance.exports().enumerate() { - if current >= self.0.len() { break; } - let current_saved = &self.0[current]; - if index < current_saved.index { continue; } - else if index > current_saved.index { current += 1; continue; } - else { - export.into_global() - .ok_or_else(|| Error::Other( - "Wrong instance in GlobalsSnapshot::apply: what should be global is not global.".to_string() - ))? - .set(into_wasmtime_val(current_saved.value)) - .map_err(|_e| Error::Other( - "Wrong instance in GlobalsSnapshot::apply: global saved type does not matched applied.".to_string() - ))?; - } - } - - Ok(()) - } -} diff --git a/client/executor/wasmtime/src/lib.rs b/client/executor/wasmtime/src/lib.rs index 66e4e085235ac..62b0b205f6de6 100644 --- a/client/executor/wasmtime/src/lib.rs +++ b/client/executor/wasmtime/src/lib.rs @@ -1,26 +1,33 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -///! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. +// along with this program. If not, see . +/// ! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. mod host; -mod runtime; -mod state_holder; mod imports; mod instance_wrapper; +mod runtime; +mod state_holder; mod util; -pub use runtime::create_runtime; +#[cfg(test)] +mod tests; + +pub use runtime::{ + create_runtime, create_runtime_from_artifact, prepare_runtime_artifact, Config, + DeterministicStackLimit, Semantics, +}; diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 965b067535721..f6878ec5ee6e1 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -1,146 +1,538 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Defines the compiled Wasm runtime that uses Wasmtime internally. -use crate::host::HostState; -use crate::imports::{Imports, resolve_imports}; -use crate::instance_wrapper::{ModuleWrapper, InstanceWrapper, GlobalsSnapshot, EntryPoint}; -use crate::state_holder; +use crate::{ + host::HostState, + imports::{resolve_imports, Imports}, + instance_wrapper::{EntryPoint, InstanceWrapper}, + state_holder, +}; -use std::rc::Rc; -use std::sync::Arc; +use sc_allocator::FreeingBumpHeapAllocator; use sc_executor_common::{ error::{Result, WasmError}, - wasm_runtime::{WasmModule, WasmInstance, InvokeMethod}, + runtime_blob::{DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob}, + wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, }; -use sp_allocator::FreeingBumpHeapAllocator; use sp_runtime_interface::unpack_ptr_and_len; -use sp_wasm_interface::{Function, Pointer, WordSize, Value}; -use wasmtime::{Config, Engine, Store}; +use sp_wasm_interface::{Function, Pointer, Value, WordSize}; +use std::{ + path::{Path, PathBuf}, + rc::Rc, + sync::Arc, +}; +use wasmtime::{Engine, Store}; + +enum Strategy { + FastInstanceReuse { + instance_wrapper: Rc, + globals_snapshot: GlobalsSnapshot, + data_segments_snapshot: Arc, + heap_base: u32, + }, + RecreateInstance(InstanceCreator), +} + +struct InstanceCreator { + store: Store, + module: Arc, + imports: Arc, + heap_pages: u32, +} + +impl InstanceCreator { + fn instantiate(&self) -> Result { + InstanceWrapper::new(&self.store, &*self.module, &*self.imports, self.heap_pages) + } +} + +/// Data required for creating instances with the fast instance reuse strategy. +struct InstanceSnapshotData { + mutable_globals: ExposedMutableGlobalsSet, + data_segments_snapshot: Arc, +} /// A `WasmModule` implementation using wasmtime to compile the runtime module to machine code /// and execute the compiled code. pub struct WasmtimeRuntime { - module_wrapper: Arc, - heap_pages: u32, - allow_missing_func_imports: bool, + module: Arc, + snapshot_data: Option, + config: Config, host_functions: Vec<&'static dyn Function>, engine: Engine, } +impl WasmtimeRuntime { + /// Creates the store respecting the set limits. + fn new_store(&self) -> Store { + match self.config.max_memory_pages { + Some(max_memory_pages) => Store::new_with_limits( + &self.engine, + wasmtime::StoreLimitsBuilder::new().memory_pages(max_memory_pages).build(), + ), + None => Store::new(&self.engine), + } + } +} + impl WasmModule for WasmtimeRuntime { fn new_instance(&self) -> Result> { - let store = Store::new(&self.engine); + let store = self.new_store(); // Scan all imports, find the matching host functions, and create stubs that adapt arguments // and results. + // + // NOTE: Attentive reader may notice that this could've been moved in `WasmModule` creation. + // However, I am not sure if that's a good idea since it would be pushing our luck + // further by assuming that `Store` not only `Send` but also `Sync`. let imports = resolve_imports( &store, - self.module_wrapper.module(), + &self.module, &self.host_functions, - self.heap_pages, - self.allow_missing_func_imports, + self.config.heap_pages, + self.config.allow_missing_func_imports, )?; - let instance_wrapper = - InstanceWrapper::new(&store, &self.module_wrapper, &imports, self.heap_pages)?; - let heap_base = instance_wrapper.extract_heap_base()?; - let globals_snapshot = GlobalsSnapshot::take(&instance_wrapper)?; - - Ok(Box::new(WasmtimeInstance { - store, - instance_wrapper: Rc::new(instance_wrapper), - module_wrapper: Arc::clone(&self.module_wrapper), - imports, - globals_snapshot, - heap_pages: self.heap_pages, - heap_base, - })) + let strategy = if let Some(ref snapshot_data) = self.snapshot_data { + let instance_wrapper = + InstanceWrapper::new(&store, &self.module, &imports, self.config.heap_pages)?; + let heap_base = instance_wrapper.extract_heap_base()?; + + // This function panics if the instance was created from a runtime blob different from + // which the mutable globals were collected. Here, it is easy to see that there is only + // a single runtime blob and thus it's the same that was used for both creating the + // instance and collecting the mutable globals. + let globals_snapshot = + GlobalsSnapshot::take(&snapshot_data.mutable_globals, &instance_wrapper); + + Strategy::FastInstanceReuse { + instance_wrapper: Rc::new(instance_wrapper), + globals_snapshot, + data_segments_snapshot: snapshot_data.data_segments_snapshot.clone(), + heap_base, + } + } else { + Strategy::RecreateInstance(InstanceCreator { + imports: Arc::new(imports), + module: self.module.clone(), + store, + heap_pages: self.config.heap_pages, + }) + }; + + Ok(Box::new(WasmtimeInstance { strategy })) } } /// A `WasmInstance` implementation that reuses compiled module and spawns instances /// to execute the compiled code. pub struct WasmtimeInstance { - store: Store, - module_wrapper: Arc, - instance_wrapper: Rc, - globals_snapshot: GlobalsSnapshot, - imports: Imports, - heap_pages: u32, - heap_base: u32, + strategy: Strategy, } // This is safe because `WasmtimeInstance` does not leak reference to `self.imports` -// and all imports don't reference any anything, other than host functions and memory +// and all imports don't reference anything, other than host functions and memory unsafe impl Send for WasmtimeInstance {} impl WasmInstance for WasmtimeInstance { fn call(&self, method: InvokeMethod, data: &[u8]) -> Result> { - let entrypoint = self.instance_wrapper.resolve_entrypoint(method)?; - let allocator = FreeingBumpHeapAllocator::new(self.heap_base); - - self.module_wrapper - .data_segments_snapshot() - .apply(|offset, contents| { - self.instance_wrapper - .write_memory_from(Pointer::new(offset), contents) - })?; - - self.globals_snapshot.apply(&*self.instance_wrapper)?; - - perform_call( - data, - Rc::clone(&self.instance_wrapper), - entrypoint, - allocator, - ) + match &self.strategy { + Strategy::FastInstanceReuse { + instance_wrapper, + globals_snapshot, + data_segments_snapshot, + heap_base, + } => { + let entrypoint = instance_wrapper.resolve_entrypoint(method)?; + + data_segments_snapshot.apply(|offset, contents| { + instance_wrapper.write_memory_from(Pointer::new(offset), contents) + })?; + globals_snapshot.apply(&**instance_wrapper); + let allocator = FreeingBumpHeapAllocator::new(*heap_base); + + let result = + perform_call(data, Rc::clone(&instance_wrapper), entrypoint, allocator); + + // Signal to the OS that we are done with the linear memory and that it can be + // reclaimed. + instance_wrapper.decommit(); + + result + }, + Strategy::RecreateInstance(instance_creator) => { + let instance_wrapper = instance_creator.instantiate()?; + let heap_base = instance_wrapper.extract_heap_base()?; + let entrypoint = instance_wrapper.resolve_entrypoint(method)?; + + let allocator = FreeingBumpHeapAllocator::new(heap_base); + perform_call(data, Rc::new(instance_wrapper), entrypoint, allocator) + }, + } } fn get_global_const(&self, name: &str) -> Result> { - let instance = InstanceWrapper::new(&self.store, &self.module_wrapper, &self.imports, self.heap_pages)?; - instance.get_global_val(name) + match &self.strategy { + Strategy::FastInstanceReuse { instance_wrapper, .. } => + instance_wrapper.get_global_val(name), + Strategy::RecreateInstance(instance_creator) => + instance_creator.instantiate()?.get_global_val(name), + } + } + + fn linear_memory_base_ptr(&self) -> Option<*const u8> { + match &self.strategy { + Strategy::RecreateInstance(_) => { + // We do not keep the wasm instance around, therefore there is no linear memory + // associated with it. + None + }, + Strategy::FastInstanceReuse { instance_wrapper, .. } => + Some(instance_wrapper.base_ptr()), + } } } +/// Prepare a directory structure and a config file to enable wasmtime caching. +/// +/// In case of an error the caching will not be enabled. +fn setup_wasmtime_caching( + cache_path: &Path, + config: &mut wasmtime::Config, +) -> std::result::Result<(), String> { + use std::fs; + + let wasmtime_cache_root = cache_path.join("wasmtime"); + fs::create_dir_all(&wasmtime_cache_root) + .map_err(|err| format!("cannot create the dirs to cache: {:?}", err))?; + + // Canonicalize the path after creating the directories. + let wasmtime_cache_root = wasmtime_cache_root + .canonicalize() + .map_err(|err| format!("failed to canonicalize the path: {:?}", err))?; + + // Write the cache config file + let cache_config_path = wasmtime_cache_root.join("cache-config.toml"); + let config_content = format!( + "\ +[cache] +enabled = true +directory = \"{cache_dir}\" +", + cache_dir = wasmtime_cache_root.display() + ); + fs::write(&cache_config_path, config_content) + .map_err(|err| format!("cannot write the cache config: {:?}", err))?; + + config + .cache_config_load(cache_config_path) + .map_err(|err| format!("failed to parse the config: {:?}", err))?; + + Ok(()) +} + +fn common_config(semantics: &Semantics) -> std::result::Result { + let mut config = wasmtime::Config::new(); + config.cranelift_opt_level(wasmtime::OptLevel::SpeedAndSize); + config.cranelift_nan_canonicalization(semantics.canonicalize_nans); + + if let Some(DeterministicStackLimit { native_stack_max, .. }) = + semantics.deterministic_stack_limit + { + config + .max_wasm_stack(native_stack_max as usize) + .map_err(|e| WasmError::Other(format!("cannot set max wasm stack: {}", e)))?; + } + + // Be clear and specific about the extensions we support. If an update brings new features + // they should be introduced here as well. + config.wasm_reference_types(false); + config.wasm_simd(false); + config.wasm_bulk_memory(false); + config.wasm_multi_value(false); + config.wasm_multi_memory(false); + config.wasm_module_linking(false); + config.wasm_threads(false); + + Ok(config) +} + +/// Knobs for deterministic stack height limiting. +/// +/// The WebAssembly standard defines a call/value stack but it doesn't say anything about its +/// size except that it has to be finite. The implementations are free to choose their own notion +/// of limit: some may count the number of calls or values, others would rely on the host machine +/// stack and trap on reaching a guard page. +/// +/// This obviously is a source of non-determinism during execution. This feature can be used +/// to instrument the code so that it will count the depth of execution in some deterministic +/// way (the machine stack limit should be so high that the deterministic limit always triggers +/// first). +/// +/// The deterministic stack height limiting feature allows to instrument the code so that it will +/// count the number of items that may be on the stack. This counting will only act as an rough +/// estimate of the actual stack limit in wasmtime. This is because wasmtime measures it's stack +/// usage in bytes. +/// +/// The actual number of bytes consumed by a function is not trivial to compute without going +/// through full compilation. Therefore, it's expected that `native_stack_max` is grealy +/// overestimated and thus never reached in practice. The stack overflow check introduced by the +/// instrumentation and that relies on the logical item count should be reached first. +/// +/// See [here][stack_height] for more details of the instrumentation +/// +/// [stack_height]: https://github.com/paritytech/wasm-utils/blob/d9432baf/src/stack_height/mod.rs#L1-L50 +pub struct DeterministicStackLimit { + /// A number of logical "values" that can be pushed on the wasm stack. A trap will be triggered + /// if exceeded. + /// + /// A logical value is a local, an argument or a value pushed on operand stack. + pub logical_max: u32, + /// The maximum number of bytes for stack used by wasmtime JITed code. + /// + /// It's not specified how much bytes will be consumed by a stack frame for a given wasm + /// function after translation into machine code. It is also not quite trivial. + /// + /// Therefore, this number should be choosen conservatively. It must be so large so that it can + /// fit the [`logical_max`](Self::logical_max) logical values on the stack, according to the + /// current instrumentation algorithm. + /// + /// This value cannot be 0. + pub native_stack_max: u32, +} + +pub struct Semantics { + /// Enabling this will lead to some optimization shenanigans that make calling [`WasmInstance`] + /// extermely fast. + /// + /// Primarily this is achieved by not recreating the instance for each call and performing a + /// bare minimum clean up: reapplying the data segments and restoring the values for global + /// variables. The vast majority of the linear memory is not restored, meaning that effects + /// of previous executions on the same [`WasmInstance`] can be observed there. + /// + /// This is not a problem for a standard substrate runtime execution because it's up to the + /// runtime itself to make sure that it doesn't involve any non-determinism. + /// + /// Since this feature depends on instrumentation, it can be set only if runtime is + /// instantiated using the runtime blob, e.g. using [`create_runtime`]. + // I.e. if [`CodeSupplyMode::Verbatim`] is used. + pub fast_instance_reuse: bool, + + /// Specifiying `Some` will enable deterministic stack height. That is, all executor + /// invocations will reach stack overflow at the exactly same point across different wasmtime + /// versions and architectures. + /// + /// This is achieved by a combination of running an instrumentation pass on input code and + /// configuring wasmtime accordingly. + /// + /// Since this feature depends on instrumentation, it can be set only if runtime is + /// instantiated using the runtime blob, e.g. using [`create_runtime`]. + // I.e. if [`CodeSupplyMode::Verbatim`] is used. + pub deterministic_stack_limit: Option, + + /// Controls whether wasmtime should compile floating point in a way that doesn't allow for + /// non-determinism. + /// + /// By default, the wasm spec allows some local non-determinism wrt. certain floating point + /// operations. Specifically, those operations that are not defined to operate on bits (e.g. + /// fneg) can produce NaN values. The exact bit pattern for those is not specified and may + /// depend on the particular machine that executes wasmtime generated JITed machine code. That + /// is a source of non-deterministic values. + /// + /// The classical runtime environment for Substrate allowed it and punted this on the runtime + /// developers. For PVFs, we want to ensure that execution is deterministic though. Therefore, + /// for PVF execution this flag is meant to be turned on. + pub canonicalize_nans: bool, +} + +pub struct Config { + /// The number of wasm pages to be mounted after instantiation. + pub heap_pages: u32, + + /// The total number of wasm pages an instance can request. + /// + /// If specified, the runtime will be able to allocate only that much of wasm memory pages. + /// This is the total number and therefore the [`heap_pages`] is accounted for. + /// + /// That means that the initial number of pages of a linear memory plus the [`heap_pages`] + /// should be less or equal to `max_memory_pages`, otherwise the instance won't be created. + /// + /// Moreover, `memory.grow` will fail (return -1) if the sum of the number of currently mounted + /// pages and the number of additional pages exceeds `max_memory_pages`. + /// + /// The default is `None`. + pub max_memory_pages: Option, + + /// The WebAssembly standard requires all imports of an instantiated module to be resolved, + /// othewise, the instantiation fails. If this option is set to `true`, then this behavior is + /// overriden and imports that are requested by the module and not provided by the host + /// functions will be resolved using stubs. These stubs will trap upon a call. + pub allow_missing_func_imports: bool, + + /// A directory in which wasmtime can store its compiled artifacts cache. + pub cache_path: Option, + + /// Tuning of various semantics of the wasmtime executor. + pub semantics: Semantics, +} + +enum CodeSupplyMode<'a> { + /// The runtime is instantiated using the given runtime blob. + Verbatim { + // Rationale to take the `RuntimeBlob` here is so that the client will be able to reuse + // the blob e.g. if they did a prevalidation. If they didn't they can pass a `RuntimeBlob` + // instance and it will be used anyway in most cases, because we are going to do at least + // some instrumentations for both anticipated paths: substrate execution and PVF execution. + // + // Should there raise a need in performing no instrumentation and the client doesn't need + // to do any checks, then we can provide a `Cow` like semantics here: if we need the blob + // and the user got `RuntimeBlob` then extract it, or otherwise create it from the given + // bytecode. + blob: RuntimeBlob, + }, + + /// The code is supplied in a form of a compiled artifact. + /// + /// This assumes that the code is already prepared for execution and the same `Config` was + /// used. + Artifact { compiled_artifact: &'a [u8] }, +} + /// Create a new `WasmtimeRuntime` given the code. This function performs translation from Wasm to /// machine code, which can be computationally heavy. pub fn create_runtime( - code: &[u8], - heap_pages: u64, + blob: RuntimeBlob, + config: Config, + host_functions: Vec<&'static dyn Function>, +) -> std::result::Result { + // SAFETY: this is safe because it doesn't use `CodeSupplyMode::Artifact`. + unsafe { do_create_runtime(CodeSupplyMode::Verbatim { blob }, config, host_functions) } +} + +/// The same as [`create_runtime`] but takes a precompiled artifact, which makes this function +/// considerably faster than [`create_runtime`]. +/// +/// # Safety +/// +/// The caller must ensure that the compiled artifact passed here was produced by +/// [`prepare_runtime_artifact`]. Otherwise, there is a risk of arbitrary code execution with all +/// implications. +/// +/// It is ok though if the `compiled_artifact` was created by code of another version or with +/// different configuration flags. In such case the caller will receive an `Err` deterministically. +pub unsafe fn create_runtime_from_artifact( + compiled_artifact: &[u8], + config: Config, + host_functions: Vec<&'static dyn Function>, +) -> std::result::Result { + do_create_runtime(CodeSupplyMode::Artifact { compiled_artifact }, config, host_functions) +} + +/// # Safety +/// +/// This is only unsafe if called with [`CodeSupplyMode::Artifact`]. See +/// [`create_runtime_from_artifact`] to get more details. +unsafe fn do_create_runtime( + code_supply_mode: CodeSupplyMode<'_>, + config: Config, host_functions: Vec<&'static dyn Function>, - allow_missing_func_imports: bool, ) -> std::result::Result { // Create the engine, store and finally the module from the given code. - let mut config = Config::new(); - config.cranelift_opt_level(wasmtime::OptLevel::SpeedAndSize); + let mut wasmtime_config = common_config(&config.semantics)?; + if let Some(ref cache_path) = config.cache_path { + if let Err(reason) = setup_wasmtime_caching(cache_path, &mut wasmtime_config) { + log::warn!( + "failed to setup wasmtime cache. Performance may degrade significantly: {}.", + reason, + ); + } + } + + let engine = Engine::new(&wasmtime_config) + .map_err(|e| WasmError::Other(format!("cannot create the engine for runtime: {}", e)))?; + + let (module, snapshot_data) = match code_supply_mode { + CodeSupplyMode::Verbatim { blob } => { + let blob = instrument(blob, &config.semantics)?; + + if config.semantics.fast_instance_reuse { + let data_segments_snapshot = DataSegmentsSnapshot::take(&blob).map_err(|e| { + WasmError::Other(format!("cannot take data segments snapshot: {}", e)) + })?; + let data_segments_snapshot = Arc::new(data_segments_snapshot); + + let mutable_globals = ExposedMutableGlobalsSet::collect(&blob); + + let module = wasmtime::Module::new(&engine, &blob.serialize()) + .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; + + (module, Some(InstanceSnapshotData { data_segments_snapshot, mutable_globals })) + } else { + let module = wasmtime::Module::new(&engine, &blob.serialize()) + .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; + (module, None) + } + }, + CodeSupplyMode::Artifact { compiled_artifact } => { + // SAFETY: The unsafity of `deserialize` is covered by this function. The + // responsibilities to maintain the invariants are passed to the caller. + let module = wasmtime::Module::deserialize(&engine, compiled_artifact) + .map_err(|e| WasmError::Other(format!("cannot deserialize module: {}", e)))?; + + (module, None) + }, + }; + + Ok(WasmtimeRuntime { module: Arc::new(module), snapshot_data, config, host_functions, engine }) +} + +fn instrument( + mut blob: RuntimeBlob, + semantics: &Semantics, +) -> std::result::Result { + if let Some(DeterministicStackLimit { logical_max, .. }) = semantics.deterministic_stack_limit { + blob = blob.inject_stack_depth_metering(logical_max)?; + } + + // If enabled, this should happen after all other passes that may introduce global variables. + if semantics.fast_instance_reuse { + blob.expose_mutable_globals(); + } + + Ok(blob) +} - let engine = Engine::new(&config); +/// Takes a [`RuntimeBlob`] and precompiles it returning the serialized result of compilation. It +/// can then be used for calling [`create_runtime`] avoiding long compilation times. +pub fn prepare_runtime_artifact( + blob: RuntimeBlob, + semantics: &Semantics, +) -> std::result::Result, WasmError> { + let blob = instrument(blob, semantics)?; - let module_wrapper = ModuleWrapper::new(&engine, code) - .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; + let engine = Engine::new(&common_config(semantics)?) + .map_err(|e| WasmError::Other(format!("cannot create the engine: {}", e)))?; - Ok(WasmtimeRuntime { - module_wrapper: Arc::new(module_wrapper), - heap_pages: heap_pages as u32, - allow_missing_func_imports, - host_functions, - engine, - }) + engine + .precompile_module(&blob.serialize()) + .map_err(|e| WasmError::Other(format!("cannot precompile module: {}", e))) } fn perform_call( diff --git a/client/executor/wasmtime/src/state_holder.rs b/client/executor/wasmtime/src/state_holder.rs index 711d3bb735d7c..0e2684cd25130 100644 --- a/client/executor/wasmtime/src/state_holder.rs +++ b/client/executor/wasmtime/src/state_holder.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/executor/wasmtime/src/test-guard-page-skip.wat b/client/executor/wasmtime/src/test-guard-page-skip.wat new file mode 100644 index 0000000000000..2f7339d45c9ef --- /dev/null +++ b/client/executor/wasmtime/src/test-guard-page-skip.wat @@ -0,0 +1,2293 @@ +;; This file is a modified version of +;; https://github.com/WebAssembly/testsuite/blob/01efde81028c5b0d099eb836645a2dc5e7755449/skip-stack-guard-page.wast +;; Licensed Apache 2.0 https://github.com/WebAssembly/testsuite/blob/01efde81028c5b0d099eb836645a2dc5e7755449/LICENSE + +;; This wasm module implements a Substrate Runtime with one entrypoint: `test-many-locals`. This +;; entrypoint does not take any parameters nor returns a result. Each execution should end up with +;; a stack overflow trap. +;; +;; What it does is essentially a recursive call. The function that recurses into itself declares +;; lots of local variables. It reads into each local at the corresponding offset, recurses into itself +;; and then writes the contents of the locals back into the memory at the same offset. +;; +;; The original purpose of this file in the test suite is to test skipping the guard page (hence the +;; size 256 + 4096 + 4096). However, what's important here is to just an infinite recursion with +;; many locals. +;; +;; NOTE That memory accesses are put there in an attempt to prevent eliminating the dead locals. +;; At the moment of writing, wasmtime should be dumb enough to be tricked into thinking that the code +;; does something. + +(module + (import "env" "memory" (memory 1)) + (export "test-many-locals" (func $test-many-locals)) + + ;; The heap base is chosen so that the heap doesn't overlap with the data below. + (global (export "__heap_base") i32 (i32.const 8448)) + + (func $test-many-locals + (param i32 i32) (result i64) + (call $function-with-many-locals) + (i64.const 0) + ) + + (func $function-with-many-locals + + ;; 1056 i64 = 8448 bytes of locals + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x000-0x007 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x008-0x00f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x010-0x017 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x018-0x01f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x020-0x027 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x028-0x02f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x030-0x037 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x038-0x03f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x040-0x047 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x048-0x04f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x050-0x057 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x058-0x05f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x060-0x067 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x068-0x06f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x070-0x077 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x078-0x07f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x080-0x087 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x088-0x08f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x090-0x097 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x098-0x09f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0a0-0x0a7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0a8-0x0af + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0b0-0x0b7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0b8-0x0bf + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0c0-0x0c7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0c8-0x0cf + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0d0-0x0d7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0d8-0x0df + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0e0-0x0e7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0e8-0x0ef + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0f0-0x0f7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x0f8-0x0ff + + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x100-0x107 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x108-0x10f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x110-0x117 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x118-0x11f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x120-0x127 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x128-0x12f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x130-0x137 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x138-0x13f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x140-0x147 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x148-0x14f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x150-0x157 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x158-0x15f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x160-0x167 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x168-0x16f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x170-0x177 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x178-0x17f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x180-0x187 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x188-0x18f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x190-0x197 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x198-0x19f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1a0-0x1a7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1a8-0x1af + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1b0-0x1b7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1b8-0x1bf + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1c0-0x1c7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1c8-0x1cf + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1d0-0x1d7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1d8-0x1df + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1e0-0x1e7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1e8-0x1ef + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1f0-0x1f7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x1f8-0x1ff + + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x200-0x207 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x208-0x20f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x210-0x217 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x218-0x21f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x220-0x227 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x228-0x22f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x230-0x237 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x238-0x23f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x240-0x247 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x248-0x24f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x250-0x257 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x258-0x25f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x260-0x267 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x268-0x26f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x270-0x277 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x278-0x27f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x280-0x287 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x288-0x28f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x290-0x297 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x298-0x29f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2a0-0x2a7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2a8-0x2af + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2b0-0x2b7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2b8-0x2bf + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2c0-0x2c7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2c8-0x2cf + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2d0-0x2d7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2d8-0x2df + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2e0-0x2e7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2e8-0x2ef + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2f0-0x2f7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x2f8-0x2ff + + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x300-0x307 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x308-0x30f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x310-0x317 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x318-0x31f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x320-0x327 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x328-0x32f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x330-0x337 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x338-0x33f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x340-0x347 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x348-0x34f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x350-0x357 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x358-0x35f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x360-0x367 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x368-0x36f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x370-0x377 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x378-0x37f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x380-0x387 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x388-0x38f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x390-0x397 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x398-0x39f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3a0-0x3a7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3a8-0x3af + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3b0-0x3b7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3b8-0x3bf + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3c0-0x3c7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3c8-0x3cf + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3d0-0x3d7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3d8-0x3df + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3e0-0x3e7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3e8-0x3ef + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3f0-0x3f7 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x3f8-0x3ff + + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x400-0x407 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x408-0x40f + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x410-0x417 + (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) (local i64) ;; 0x418-0x41f + + ;; recurse first to try to make the callee access the stack below the space allocated for the locals before the locals themselves have been initialized. + (call $function-with-many-locals) + + ;; load from memory into the locals. + (local.set 0x000 (i64.load offset=0x000 align=1 (i32.const 0))) + (local.set 0x001 (i64.load offset=0x001 align=1 (i32.const 0))) + (local.set 0x002 (i64.load offset=0x002 align=1 (i32.const 0))) + (local.set 0x003 (i64.load offset=0x003 align=1 (i32.const 0))) + (local.set 0x004 (i64.load offset=0x004 align=1 (i32.const 0))) + (local.set 0x005 (i64.load offset=0x005 align=1 (i32.const 0))) + (local.set 0x006 (i64.load offset=0x006 align=1 (i32.const 0))) + (local.set 0x007 (i64.load offset=0x007 align=1 (i32.const 0))) + (local.set 0x008 (i64.load offset=0x008 align=1 (i32.const 0))) + (local.set 0x009 (i64.load offset=0x009 align=1 (i32.const 0))) + (local.set 0x00a (i64.load offset=0x00a align=1 (i32.const 0))) + (local.set 0x00b (i64.load offset=0x00b align=1 (i32.const 0))) + (local.set 0x00c (i64.load offset=0x00c align=1 (i32.const 0))) + (local.set 0x00d (i64.load offset=0x00d align=1 (i32.const 0))) + (local.set 0x00e (i64.load offset=0x00e align=1 (i32.const 0))) + (local.set 0x00f (i64.load offset=0x00f align=1 (i32.const 0))) + (local.set 0x010 (i64.load offset=0x010 align=1 (i32.const 0))) + (local.set 0x011 (i64.load offset=0x011 align=1 (i32.const 0))) + (local.set 0x012 (i64.load offset=0x012 align=1 (i32.const 0))) + (local.set 0x013 (i64.load offset=0x013 align=1 (i32.const 0))) + (local.set 0x014 (i64.load offset=0x014 align=1 (i32.const 0))) + (local.set 0x015 (i64.load offset=0x015 align=1 (i32.const 0))) + (local.set 0x016 (i64.load offset=0x016 align=1 (i32.const 0))) + (local.set 0x017 (i64.load offset=0x017 align=1 (i32.const 0))) + (local.set 0x018 (i64.load offset=0x018 align=1 (i32.const 0))) + (local.set 0x019 (i64.load offset=0x019 align=1 (i32.const 0))) + (local.set 0x01a (i64.load offset=0x01a align=1 (i32.const 0))) + (local.set 0x01b (i64.load offset=0x01b align=1 (i32.const 0))) + (local.set 0x01c (i64.load offset=0x01c align=1 (i32.const 0))) + (local.set 0x01d (i64.load offset=0x01d align=1 (i32.const 0))) + (local.set 0x01e (i64.load offset=0x01e align=1 (i32.const 0))) + (local.set 0x01f (i64.load offset=0x01f align=1 (i32.const 0))) + (local.set 0x020 (i64.load offset=0x020 align=1 (i32.const 0))) + (local.set 0x021 (i64.load offset=0x021 align=1 (i32.const 0))) + (local.set 0x022 (i64.load offset=0x022 align=1 (i32.const 0))) + (local.set 0x023 (i64.load offset=0x023 align=1 (i32.const 0))) + (local.set 0x024 (i64.load offset=0x024 align=1 (i32.const 0))) + (local.set 0x025 (i64.load offset=0x025 align=1 (i32.const 0))) + (local.set 0x026 (i64.load offset=0x026 align=1 (i32.const 0))) + (local.set 0x027 (i64.load offset=0x027 align=1 (i32.const 0))) + (local.set 0x028 (i64.load offset=0x028 align=1 (i32.const 0))) + (local.set 0x029 (i64.load offset=0x029 align=1 (i32.const 0))) + (local.set 0x02a (i64.load offset=0x02a align=1 (i32.const 0))) + (local.set 0x02b (i64.load offset=0x02b align=1 (i32.const 0))) + (local.set 0x02c (i64.load offset=0x02c align=1 (i32.const 0))) + (local.set 0x02d (i64.load offset=0x02d align=1 (i32.const 0))) + (local.set 0x02e (i64.load offset=0x02e align=1 (i32.const 0))) + (local.set 0x02f (i64.load offset=0x02f align=1 (i32.const 0))) + (local.set 0x030 (i64.load offset=0x030 align=1 (i32.const 0))) + (local.set 0x031 (i64.load offset=0x031 align=1 (i32.const 0))) + (local.set 0x032 (i64.load offset=0x032 align=1 (i32.const 0))) + (local.set 0x033 (i64.load offset=0x033 align=1 (i32.const 0))) + (local.set 0x034 (i64.load offset=0x034 align=1 (i32.const 0))) + (local.set 0x035 (i64.load offset=0x035 align=1 (i32.const 0))) + (local.set 0x036 (i64.load offset=0x036 align=1 (i32.const 0))) + (local.set 0x037 (i64.load offset=0x037 align=1 (i32.const 0))) + (local.set 0x038 (i64.load offset=0x038 align=1 (i32.const 0))) + (local.set 0x039 (i64.load offset=0x039 align=1 (i32.const 0))) + (local.set 0x03a (i64.load offset=0x03a align=1 (i32.const 0))) + (local.set 0x03b (i64.load offset=0x03b align=1 (i32.const 0))) + (local.set 0x03c (i64.load offset=0x03c align=1 (i32.const 0))) + (local.set 0x03d (i64.load offset=0x03d align=1 (i32.const 0))) + (local.set 0x03e (i64.load offset=0x03e align=1 (i32.const 0))) + (local.set 0x03f (i64.load offset=0x03f align=1 (i32.const 0))) + (local.set 0x040 (i64.load offset=0x040 align=1 (i32.const 0))) + (local.set 0x041 (i64.load offset=0x041 align=1 (i32.const 0))) + (local.set 0x042 (i64.load offset=0x042 align=1 (i32.const 0))) + (local.set 0x043 (i64.load offset=0x043 align=1 (i32.const 0))) + (local.set 0x044 (i64.load offset=0x044 align=1 (i32.const 0))) + (local.set 0x045 (i64.load offset=0x045 align=1 (i32.const 0))) + (local.set 0x046 (i64.load offset=0x046 align=1 (i32.const 0))) + (local.set 0x047 (i64.load offset=0x047 align=1 (i32.const 0))) + (local.set 0x048 (i64.load offset=0x048 align=1 (i32.const 0))) + (local.set 0x049 (i64.load offset=0x049 align=1 (i32.const 0))) + (local.set 0x04a (i64.load offset=0x04a align=1 (i32.const 0))) + (local.set 0x04b (i64.load offset=0x04b align=1 (i32.const 0))) + (local.set 0x04c (i64.load offset=0x04c align=1 (i32.const 0))) + (local.set 0x04d (i64.load offset=0x04d align=1 (i32.const 0))) + (local.set 0x04e (i64.load offset=0x04e align=1 (i32.const 0))) + (local.set 0x04f (i64.load offset=0x04f align=1 (i32.const 0))) + (local.set 0x050 (i64.load offset=0x050 align=1 (i32.const 0))) + (local.set 0x051 (i64.load offset=0x051 align=1 (i32.const 0))) + (local.set 0x052 (i64.load offset=0x052 align=1 (i32.const 0))) + (local.set 0x053 (i64.load offset=0x053 align=1 (i32.const 0))) + (local.set 0x054 (i64.load offset=0x054 align=1 (i32.const 0))) + (local.set 0x055 (i64.load offset=0x055 align=1 (i32.const 0))) + (local.set 0x056 (i64.load offset=0x056 align=1 (i32.const 0))) + (local.set 0x057 (i64.load offset=0x057 align=1 (i32.const 0))) + (local.set 0x058 (i64.load offset=0x058 align=1 (i32.const 0))) + (local.set 0x059 (i64.load offset=0x059 align=1 (i32.const 0))) + (local.set 0x05a (i64.load offset=0x05a align=1 (i32.const 0))) + (local.set 0x05b (i64.load offset=0x05b align=1 (i32.const 0))) + (local.set 0x05c (i64.load offset=0x05c align=1 (i32.const 0))) + (local.set 0x05d (i64.load offset=0x05d align=1 (i32.const 0))) + (local.set 0x05e (i64.load offset=0x05e align=1 (i32.const 0))) + (local.set 0x05f (i64.load offset=0x05f align=1 (i32.const 0))) + (local.set 0x060 (i64.load offset=0x060 align=1 (i32.const 0))) + (local.set 0x061 (i64.load offset=0x061 align=1 (i32.const 0))) + (local.set 0x062 (i64.load offset=0x062 align=1 (i32.const 0))) + (local.set 0x063 (i64.load offset=0x063 align=1 (i32.const 0))) + (local.set 0x064 (i64.load offset=0x064 align=1 (i32.const 0))) + (local.set 0x065 (i64.load offset=0x065 align=1 (i32.const 0))) + (local.set 0x066 (i64.load offset=0x066 align=1 (i32.const 0))) + (local.set 0x067 (i64.load offset=0x067 align=1 (i32.const 0))) + (local.set 0x068 (i64.load offset=0x068 align=1 (i32.const 0))) + (local.set 0x069 (i64.load offset=0x069 align=1 (i32.const 0))) + (local.set 0x06a (i64.load offset=0x06a align=1 (i32.const 0))) + (local.set 0x06b (i64.load offset=0x06b align=1 (i32.const 0))) + (local.set 0x06c (i64.load offset=0x06c align=1 (i32.const 0))) + (local.set 0x06d (i64.load offset=0x06d align=1 (i32.const 0))) + (local.set 0x06e (i64.load offset=0x06e align=1 (i32.const 0))) + (local.set 0x06f (i64.load offset=0x06f align=1 (i32.const 0))) + (local.set 0x070 (i64.load offset=0x070 align=1 (i32.const 0))) + (local.set 0x071 (i64.load offset=0x071 align=1 (i32.const 0))) + (local.set 0x072 (i64.load offset=0x072 align=1 (i32.const 0))) + (local.set 0x073 (i64.load offset=0x073 align=1 (i32.const 0))) + (local.set 0x074 (i64.load offset=0x074 align=1 (i32.const 0))) + (local.set 0x075 (i64.load offset=0x075 align=1 (i32.const 0))) + (local.set 0x076 (i64.load offset=0x076 align=1 (i32.const 0))) + (local.set 0x077 (i64.load offset=0x077 align=1 (i32.const 0))) + (local.set 0x078 (i64.load offset=0x078 align=1 (i32.const 0))) + (local.set 0x079 (i64.load offset=0x079 align=1 (i32.const 0))) + (local.set 0x07a (i64.load offset=0x07a align=1 (i32.const 0))) + (local.set 0x07b (i64.load offset=0x07b align=1 (i32.const 0))) + (local.set 0x07c (i64.load offset=0x07c align=1 (i32.const 0))) + (local.set 0x07d (i64.load offset=0x07d align=1 (i32.const 0))) + (local.set 0x07e (i64.load offset=0x07e align=1 (i32.const 0))) + (local.set 0x07f (i64.load offset=0x07f align=1 (i32.const 0))) + (local.set 0x080 (i64.load offset=0x080 align=1 (i32.const 0))) + (local.set 0x081 (i64.load offset=0x081 align=1 (i32.const 0))) + (local.set 0x082 (i64.load offset=0x082 align=1 (i32.const 0))) + (local.set 0x083 (i64.load offset=0x083 align=1 (i32.const 0))) + (local.set 0x084 (i64.load offset=0x084 align=1 (i32.const 0))) + (local.set 0x085 (i64.load offset=0x085 align=1 (i32.const 0))) + (local.set 0x086 (i64.load offset=0x086 align=1 (i32.const 0))) + (local.set 0x087 (i64.load offset=0x087 align=1 (i32.const 0))) + (local.set 0x088 (i64.load offset=0x088 align=1 (i32.const 0))) + (local.set 0x089 (i64.load offset=0x089 align=1 (i32.const 0))) + (local.set 0x08a (i64.load offset=0x08a align=1 (i32.const 0))) + (local.set 0x08b (i64.load offset=0x08b align=1 (i32.const 0))) + (local.set 0x08c (i64.load offset=0x08c align=1 (i32.const 0))) + (local.set 0x08d (i64.load offset=0x08d align=1 (i32.const 0))) + (local.set 0x08e (i64.load offset=0x08e align=1 (i32.const 0))) + (local.set 0x08f (i64.load offset=0x08f align=1 (i32.const 0))) + (local.set 0x090 (i64.load offset=0x090 align=1 (i32.const 0))) + (local.set 0x091 (i64.load offset=0x091 align=1 (i32.const 0))) + (local.set 0x092 (i64.load offset=0x092 align=1 (i32.const 0))) + (local.set 0x093 (i64.load offset=0x093 align=1 (i32.const 0))) + (local.set 0x094 (i64.load offset=0x094 align=1 (i32.const 0))) + (local.set 0x095 (i64.load offset=0x095 align=1 (i32.const 0))) + (local.set 0x096 (i64.load offset=0x096 align=1 (i32.const 0))) + (local.set 0x097 (i64.load offset=0x097 align=1 (i32.const 0))) + (local.set 0x098 (i64.load offset=0x098 align=1 (i32.const 0))) + (local.set 0x099 (i64.load offset=0x099 align=1 (i32.const 0))) + (local.set 0x09a (i64.load offset=0x09a align=1 (i32.const 0))) + (local.set 0x09b (i64.load offset=0x09b align=1 (i32.const 0))) + (local.set 0x09c (i64.load offset=0x09c align=1 (i32.const 0))) + (local.set 0x09d (i64.load offset=0x09d align=1 (i32.const 0))) + (local.set 0x09e (i64.load offset=0x09e align=1 (i32.const 0))) + (local.set 0x09f (i64.load offset=0x09f align=1 (i32.const 0))) + (local.set 0x0a0 (i64.load offset=0x0a0 align=1 (i32.const 0))) + (local.set 0x0a1 (i64.load offset=0x0a1 align=1 (i32.const 0))) + (local.set 0x0a2 (i64.load offset=0x0a2 align=1 (i32.const 0))) + (local.set 0x0a3 (i64.load offset=0x0a3 align=1 (i32.const 0))) + (local.set 0x0a4 (i64.load offset=0x0a4 align=1 (i32.const 0))) + (local.set 0x0a5 (i64.load offset=0x0a5 align=1 (i32.const 0))) + (local.set 0x0a6 (i64.load offset=0x0a6 align=1 (i32.const 0))) + (local.set 0x0a7 (i64.load offset=0x0a7 align=1 (i32.const 0))) + (local.set 0x0a8 (i64.load offset=0x0a8 align=1 (i32.const 0))) + (local.set 0x0a9 (i64.load offset=0x0a9 align=1 (i32.const 0))) + (local.set 0x0aa (i64.load offset=0x0aa align=1 (i32.const 0))) + (local.set 0x0ab (i64.load offset=0x0ab align=1 (i32.const 0))) + (local.set 0x0ac (i64.load offset=0x0ac align=1 (i32.const 0))) + (local.set 0x0ad (i64.load offset=0x0ad align=1 (i32.const 0))) + (local.set 0x0ae (i64.load offset=0x0ae align=1 (i32.const 0))) + (local.set 0x0af (i64.load offset=0x0af align=1 (i32.const 0))) + (local.set 0x0b0 (i64.load offset=0x0b0 align=1 (i32.const 0))) + (local.set 0x0b1 (i64.load offset=0x0b1 align=1 (i32.const 0))) + (local.set 0x0b2 (i64.load offset=0x0b2 align=1 (i32.const 0))) + (local.set 0x0b3 (i64.load offset=0x0b3 align=1 (i32.const 0))) + (local.set 0x0b4 (i64.load offset=0x0b4 align=1 (i32.const 0))) + (local.set 0x0b5 (i64.load offset=0x0b5 align=1 (i32.const 0))) + (local.set 0x0b6 (i64.load offset=0x0b6 align=1 (i32.const 0))) + (local.set 0x0b7 (i64.load offset=0x0b7 align=1 (i32.const 0))) + (local.set 0x0b8 (i64.load offset=0x0b8 align=1 (i32.const 0))) + (local.set 0x0b9 (i64.load offset=0x0b9 align=1 (i32.const 0))) + (local.set 0x0ba (i64.load offset=0x0ba align=1 (i32.const 0))) + (local.set 0x0bb (i64.load offset=0x0bb align=1 (i32.const 0))) + (local.set 0x0bc (i64.load offset=0x0bc align=1 (i32.const 0))) + (local.set 0x0bd (i64.load offset=0x0bd align=1 (i32.const 0))) + (local.set 0x0be (i64.load offset=0x0be align=1 (i32.const 0))) + (local.set 0x0bf (i64.load offset=0x0bf align=1 (i32.const 0))) + (local.set 0x0c0 (i64.load offset=0x0c0 align=1 (i32.const 0))) + (local.set 0x0c1 (i64.load offset=0x0c1 align=1 (i32.const 0))) + (local.set 0x0c2 (i64.load offset=0x0c2 align=1 (i32.const 0))) + (local.set 0x0c3 (i64.load offset=0x0c3 align=1 (i32.const 0))) + (local.set 0x0c4 (i64.load offset=0x0c4 align=1 (i32.const 0))) + (local.set 0x0c5 (i64.load offset=0x0c5 align=1 (i32.const 0))) + (local.set 0x0c6 (i64.load offset=0x0c6 align=1 (i32.const 0))) + (local.set 0x0c7 (i64.load offset=0x0c7 align=1 (i32.const 0))) + (local.set 0x0c8 (i64.load offset=0x0c8 align=1 (i32.const 0))) + (local.set 0x0c9 (i64.load offset=0x0c9 align=1 (i32.const 0))) + (local.set 0x0ca (i64.load offset=0x0ca align=1 (i32.const 0))) + (local.set 0x0cb (i64.load offset=0x0cb align=1 (i32.const 0))) + (local.set 0x0cc (i64.load offset=0x0cc align=1 (i32.const 0))) + (local.set 0x0cd (i64.load offset=0x0cd align=1 (i32.const 0))) + (local.set 0x0ce (i64.load offset=0x0ce align=1 (i32.const 0))) + (local.set 0x0cf (i64.load offset=0x0cf align=1 (i32.const 0))) + (local.set 0x0d0 (i64.load offset=0x0d0 align=1 (i32.const 0))) + (local.set 0x0d1 (i64.load offset=0x0d1 align=1 (i32.const 0))) + (local.set 0x0d2 (i64.load offset=0x0d2 align=1 (i32.const 0))) + (local.set 0x0d3 (i64.load offset=0x0d3 align=1 (i32.const 0))) + (local.set 0x0d4 (i64.load offset=0x0d4 align=1 (i32.const 0))) + (local.set 0x0d5 (i64.load offset=0x0d5 align=1 (i32.const 0))) + (local.set 0x0d6 (i64.load offset=0x0d6 align=1 (i32.const 0))) + (local.set 0x0d7 (i64.load offset=0x0d7 align=1 (i32.const 0))) + (local.set 0x0d8 (i64.load offset=0x0d8 align=1 (i32.const 0))) + (local.set 0x0d9 (i64.load offset=0x0d9 align=1 (i32.const 0))) + (local.set 0x0da (i64.load offset=0x0da align=1 (i32.const 0))) + (local.set 0x0db (i64.load offset=0x0db align=1 (i32.const 0))) + (local.set 0x0dc (i64.load offset=0x0dc align=1 (i32.const 0))) + (local.set 0x0dd (i64.load offset=0x0dd align=1 (i32.const 0))) + (local.set 0x0de (i64.load offset=0x0de align=1 (i32.const 0))) + (local.set 0x0df (i64.load offset=0x0df align=1 (i32.const 0))) + (local.set 0x0e0 (i64.load offset=0x0e0 align=1 (i32.const 0))) + (local.set 0x0e1 (i64.load offset=0x0e1 align=1 (i32.const 0))) + (local.set 0x0e2 (i64.load offset=0x0e2 align=1 (i32.const 0))) + (local.set 0x0e3 (i64.load offset=0x0e3 align=1 (i32.const 0))) + (local.set 0x0e4 (i64.load offset=0x0e4 align=1 (i32.const 0))) + (local.set 0x0e5 (i64.load offset=0x0e5 align=1 (i32.const 0))) + (local.set 0x0e6 (i64.load offset=0x0e6 align=1 (i32.const 0))) + (local.set 0x0e7 (i64.load offset=0x0e7 align=1 (i32.const 0))) + (local.set 0x0e8 (i64.load offset=0x0e8 align=1 (i32.const 0))) + (local.set 0x0e9 (i64.load offset=0x0e9 align=1 (i32.const 0))) + (local.set 0x0ea (i64.load offset=0x0ea align=1 (i32.const 0))) + (local.set 0x0eb (i64.load offset=0x0eb align=1 (i32.const 0))) + (local.set 0x0ec (i64.load offset=0x0ec align=1 (i32.const 0))) + (local.set 0x0ed (i64.load offset=0x0ed align=1 (i32.const 0))) + (local.set 0x0ee (i64.load offset=0x0ee align=1 (i32.const 0))) + (local.set 0x0ef (i64.load offset=0x0ef align=1 (i32.const 0))) + (local.set 0x0f0 (i64.load offset=0x0f0 align=1 (i32.const 0))) + (local.set 0x0f1 (i64.load offset=0x0f1 align=1 (i32.const 0))) + (local.set 0x0f2 (i64.load offset=0x0f2 align=1 (i32.const 0))) + (local.set 0x0f3 (i64.load offset=0x0f3 align=1 (i32.const 0))) + (local.set 0x0f4 (i64.load offset=0x0f4 align=1 (i32.const 0))) + (local.set 0x0f5 (i64.load offset=0x0f5 align=1 (i32.const 0))) + (local.set 0x0f6 (i64.load offset=0x0f6 align=1 (i32.const 0))) + (local.set 0x0f7 (i64.load offset=0x0f7 align=1 (i32.const 0))) + (local.set 0x0f8 (i64.load offset=0x0f8 align=1 (i32.const 0))) + (local.set 0x0f9 (i64.load offset=0x0f9 align=1 (i32.const 0))) + (local.set 0x0fa (i64.load offset=0x0fa align=1 (i32.const 0))) + (local.set 0x0fb (i64.load offset=0x0fb align=1 (i32.const 0))) + (local.set 0x0fc (i64.load offset=0x0fc align=1 (i32.const 0))) + (local.set 0x0fd (i64.load offset=0x0fd align=1 (i32.const 0))) + (local.set 0x0fe (i64.load offset=0x0fe align=1 (i32.const 0))) + (local.set 0x0ff (i64.load offset=0x0ff align=1 (i32.const 0))) + (local.set 0x100 (i64.load offset=0x100 align=1 (i32.const 0))) + (local.set 0x101 (i64.load offset=0x101 align=1 (i32.const 0))) + (local.set 0x102 (i64.load offset=0x102 align=1 (i32.const 0))) + (local.set 0x103 (i64.load offset=0x103 align=1 (i32.const 0))) + (local.set 0x104 (i64.load offset=0x104 align=1 (i32.const 0))) + (local.set 0x105 (i64.load offset=0x105 align=1 (i32.const 0))) + (local.set 0x106 (i64.load offset=0x106 align=1 (i32.const 0))) + (local.set 0x107 (i64.load offset=0x107 align=1 (i32.const 0))) + (local.set 0x108 (i64.load offset=0x108 align=1 (i32.const 0))) + (local.set 0x109 (i64.load offset=0x109 align=1 (i32.const 0))) + (local.set 0x10a (i64.load offset=0x10a align=1 (i32.const 0))) + (local.set 0x10b (i64.load offset=0x10b align=1 (i32.const 0))) + (local.set 0x10c (i64.load offset=0x10c align=1 (i32.const 0))) + (local.set 0x10d (i64.load offset=0x10d align=1 (i32.const 0))) + (local.set 0x10e (i64.load offset=0x10e align=1 (i32.const 0))) + (local.set 0x10f (i64.load offset=0x10f align=1 (i32.const 0))) + (local.set 0x110 (i64.load offset=0x110 align=1 (i32.const 0))) + (local.set 0x111 (i64.load offset=0x111 align=1 (i32.const 0))) + (local.set 0x112 (i64.load offset=0x112 align=1 (i32.const 0))) + (local.set 0x113 (i64.load offset=0x113 align=1 (i32.const 0))) + (local.set 0x114 (i64.load offset=0x114 align=1 (i32.const 0))) + (local.set 0x115 (i64.load offset=0x115 align=1 (i32.const 0))) + (local.set 0x116 (i64.load offset=0x116 align=1 (i32.const 0))) + (local.set 0x117 (i64.load offset=0x117 align=1 (i32.const 0))) + (local.set 0x118 (i64.load offset=0x118 align=1 (i32.const 0))) + (local.set 0x119 (i64.load offset=0x119 align=1 (i32.const 0))) + (local.set 0x11a (i64.load offset=0x11a align=1 (i32.const 0))) + (local.set 0x11b (i64.load offset=0x11b align=1 (i32.const 0))) + (local.set 0x11c (i64.load offset=0x11c align=1 (i32.const 0))) + (local.set 0x11d (i64.load offset=0x11d align=1 (i32.const 0))) + (local.set 0x11e (i64.load offset=0x11e align=1 (i32.const 0))) + (local.set 0x11f (i64.load offset=0x11f align=1 (i32.const 0))) + (local.set 0x120 (i64.load offset=0x120 align=1 (i32.const 0))) + (local.set 0x121 (i64.load offset=0x121 align=1 (i32.const 0))) + (local.set 0x122 (i64.load offset=0x122 align=1 (i32.const 0))) + (local.set 0x123 (i64.load offset=0x123 align=1 (i32.const 0))) + (local.set 0x124 (i64.load offset=0x124 align=1 (i32.const 0))) + (local.set 0x125 (i64.load offset=0x125 align=1 (i32.const 0))) + (local.set 0x126 (i64.load offset=0x126 align=1 (i32.const 0))) + (local.set 0x127 (i64.load offset=0x127 align=1 (i32.const 0))) + (local.set 0x128 (i64.load offset=0x128 align=1 (i32.const 0))) + (local.set 0x129 (i64.load offset=0x129 align=1 (i32.const 0))) + (local.set 0x12a (i64.load offset=0x12a align=1 (i32.const 0))) + (local.set 0x12b (i64.load offset=0x12b align=1 (i32.const 0))) + (local.set 0x12c (i64.load offset=0x12c align=1 (i32.const 0))) + (local.set 0x12d (i64.load offset=0x12d align=1 (i32.const 0))) + (local.set 0x12e (i64.load offset=0x12e align=1 (i32.const 0))) + (local.set 0x12f (i64.load offset=0x12f align=1 (i32.const 0))) + (local.set 0x130 (i64.load offset=0x130 align=1 (i32.const 0))) + (local.set 0x131 (i64.load offset=0x131 align=1 (i32.const 0))) + (local.set 0x132 (i64.load offset=0x132 align=1 (i32.const 0))) + (local.set 0x133 (i64.load offset=0x133 align=1 (i32.const 0))) + (local.set 0x134 (i64.load offset=0x134 align=1 (i32.const 0))) + (local.set 0x135 (i64.load offset=0x135 align=1 (i32.const 0))) + (local.set 0x136 (i64.load offset=0x136 align=1 (i32.const 0))) + (local.set 0x137 (i64.load offset=0x137 align=1 (i32.const 0))) + (local.set 0x138 (i64.load offset=0x138 align=1 (i32.const 0))) + (local.set 0x139 (i64.load offset=0x139 align=1 (i32.const 0))) + (local.set 0x13a (i64.load offset=0x13a align=1 (i32.const 0))) + (local.set 0x13b (i64.load offset=0x13b align=1 (i32.const 0))) + (local.set 0x13c (i64.load offset=0x13c align=1 (i32.const 0))) + (local.set 0x13d (i64.load offset=0x13d align=1 (i32.const 0))) + (local.set 0x13e (i64.load offset=0x13e align=1 (i32.const 0))) + (local.set 0x13f (i64.load offset=0x13f align=1 (i32.const 0))) + (local.set 0x140 (i64.load offset=0x140 align=1 (i32.const 0))) + (local.set 0x141 (i64.load offset=0x141 align=1 (i32.const 0))) + (local.set 0x142 (i64.load offset=0x142 align=1 (i32.const 0))) + (local.set 0x143 (i64.load offset=0x143 align=1 (i32.const 0))) + (local.set 0x144 (i64.load offset=0x144 align=1 (i32.const 0))) + (local.set 0x145 (i64.load offset=0x145 align=1 (i32.const 0))) + (local.set 0x146 (i64.load offset=0x146 align=1 (i32.const 0))) + (local.set 0x147 (i64.load offset=0x147 align=1 (i32.const 0))) + (local.set 0x148 (i64.load offset=0x148 align=1 (i32.const 0))) + (local.set 0x149 (i64.load offset=0x149 align=1 (i32.const 0))) + (local.set 0x14a (i64.load offset=0x14a align=1 (i32.const 0))) + (local.set 0x14b (i64.load offset=0x14b align=1 (i32.const 0))) + (local.set 0x14c (i64.load offset=0x14c align=1 (i32.const 0))) + (local.set 0x14d (i64.load offset=0x14d align=1 (i32.const 0))) + (local.set 0x14e (i64.load offset=0x14e align=1 (i32.const 0))) + (local.set 0x14f (i64.load offset=0x14f align=1 (i32.const 0))) + (local.set 0x150 (i64.load offset=0x150 align=1 (i32.const 0))) + (local.set 0x151 (i64.load offset=0x151 align=1 (i32.const 0))) + (local.set 0x152 (i64.load offset=0x152 align=1 (i32.const 0))) + (local.set 0x153 (i64.load offset=0x153 align=1 (i32.const 0))) + (local.set 0x154 (i64.load offset=0x154 align=1 (i32.const 0))) + (local.set 0x155 (i64.load offset=0x155 align=1 (i32.const 0))) + (local.set 0x156 (i64.load offset=0x156 align=1 (i32.const 0))) + (local.set 0x157 (i64.load offset=0x157 align=1 (i32.const 0))) + (local.set 0x158 (i64.load offset=0x158 align=1 (i32.const 0))) + (local.set 0x159 (i64.load offset=0x159 align=1 (i32.const 0))) + (local.set 0x15a (i64.load offset=0x15a align=1 (i32.const 0))) + (local.set 0x15b (i64.load offset=0x15b align=1 (i32.const 0))) + (local.set 0x15c (i64.load offset=0x15c align=1 (i32.const 0))) + (local.set 0x15d (i64.load offset=0x15d align=1 (i32.const 0))) + (local.set 0x15e (i64.load offset=0x15e align=1 (i32.const 0))) + (local.set 0x15f (i64.load offset=0x15f align=1 (i32.const 0))) + (local.set 0x160 (i64.load offset=0x160 align=1 (i32.const 0))) + (local.set 0x161 (i64.load offset=0x161 align=1 (i32.const 0))) + (local.set 0x162 (i64.load offset=0x162 align=1 (i32.const 0))) + (local.set 0x163 (i64.load offset=0x163 align=1 (i32.const 0))) + (local.set 0x164 (i64.load offset=0x164 align=1 (i32.const 0))) + (local.set 0x165 (i64.load offset=0x165 align=1 (i32.const 0))) + (local.set 0x166 (i64.load offset=0x166 align=1 (i32.const 0))) + (local.set 0x167 (i64.load offset=0x167 align=1 (i32.const 0))) + (local.set 0x168 (i64.load offset=0x168 align=1 (i32.const 0))) + (local.set 0x169 (i64.load offset=0x169 align=1 (i32.const 0))) + (local.set 0x16a (i64.load offset=0x16a align=1 (i32.const 0))) + (local.set 0x16b (i64.load offset=0x16b align=1 (i32.const 0))) + (local.set 0x16c (i64.load offset=0x16c align=1 (i32.const 0))) + (local.set 0x16d (i64.load offset=0x16d align=1 (i32.const 0))) + (local.set 0x16e (i64.load offset=0x16e align=1 (i32.const 0))) + (local.set 0x16f (i64.load offset=0x16f align=1 (i32.const 0))) + (local.set 0x170 (i64.load offset=0x170 align=1 (i32.const 0))) + (local.set 0x171 (i64.load offset=0x171 align=1 (i32.const 0))) + (local.set 0x172 (i64.load offset=0x172 align=1 (i32.const 0))) + (local.set 0x173 (i64.load offset=0x173 align=1 (i32.const 0))) + (local.set 0x174 (i64.load offset=0x174 align=1 (i32.const 0))) + (local.set 0x175 (i64.load offset=0x175 align=1 (i32.const 0))) + (local.set 0x176 (i64.load offset=0x176 align=1 (i32.const 0))) + (local.set 0x177 (i64.load offset=0x177 align=1 (i32.const 0))) + (local.set 0x178 (i64.load offset=0x178 align=1 (i32.const 0))) + (local.set 0x179 (i64.load offset=0x179 align=1 (i32.const 0))) + (local.set 0x17a (i64.load offset=0x17a align=1 (i32.const 0))) + (local.set 0x17b (i64.load offset=0x17b align=1 (i32.const 0))) + (local.set 0x17c (i64.load offset=0x17c align=1 (i32.const 0))) + (local.set 0x17d (i64.load offset=0x17d align=1 (i32.const 0))) + (local.set 0x17e (i64.load offset=0x17e align=1 (i32.const 0))) + (local.set 0x17f (i64.load offset=0x17f align=1 (i32.const 0))) + (local.set 0x180 (i64.load offset=0x180 align=1 (i32.const 0))) + (local.set 0x181 (i64.load offset=0x181 align=1 (i32.const 0))) + (local.set 0x182 (i64.load offset=0x182 align=1 (i32.const 0))) + (local.set 0x183 (i64.load offset=0x183 align=1 (i32.const 0))) + (local.set 0x184 (i64.load offset=0x184 align=1 (i32.const 0))) + (local.set 0x185 (i64.load offset=0x185 align=1 (i32.const 0))) + (local.set 0x186 (i64.load offset=0x186 align=1 (i32.const 0))) + (local.set 0x187 (i64.load offset=0x187 align=1 (i32.const 0))) + (local.set 0x188 (i64.load offset=0x188 align=1 (i32.const 0))) + (local.set 0x189 (i64.load offset=0x189 align=1 (i32.const 0))) + (local.set 0x18a (i64.load offset=0x18a align=1 (i32.const 0))) + (local.set 0x18b (i64.load offset=0x18b align=1 (i32.const 0))) + (local.set 0x18c (i64.load offset=0x18c align=1 (i32.const 0))) + (local.set 0x18d (i64.load offset=0x18d align=1 (i32.const 0))) + (local.set 0x18e (i64.load offset=0x18e align=1 (i32.const 0))) + (local.set 0x18f (i64.load offset=0x18f align=1 (i32.const 0))) + (local.set 0x190 (i64.load offset=0x190 align=1 (i32.const 0))) + (local.set 0x191 (i64.load offset=0x191 align=1 (i32.const 0))) + (local.set 0x192 (i64.load offset=0x192 align=1 (i32.const 0))) + (local.set 0x193 (i64.load offset=0x193 align=1 (i32.const 0))) + (local.set 0x194 (i64.load offset=0x194 align=1 (i32.const 0))) + (local.set 0x195 (i64.load offset=0x195 align=1 (i32.const 0))) + (local.set 0x196 (i64.load offset=0x196 align=1 (i32.const 0))) + (local.set 0x197 (i64.load offset=0x197 align=1 (i32.const 0))) + (local.set 0x198 (i64.load offset=0x198 align=1 (i32.const 0))) + (local.set 0x199 (i64.load offset=0x199 align=1 (i32.const 0))) + (local.set 0x19a (i64.load offset=0x19a align=1 (i32.const 0))) + (local.set 0x19b (i64.load offset=0x19b align=1 (i32.const 0))) + (local.set 0x19c (i64.load offset=0x19c align=1 (i32.const 0))) + (local.set 0x19d (i64.load offset=0x19d align=1 (i32.const 0))) + (local.set 0x19e (i64.load offset=0x19e align=1 (i32.const 0))) + (local.set 0x19f (i64.load offset=0x19f align=1 (i32.const 0))) + (local.set 0x1a0 (i64.load offset=0x1a0 align=1 (i32.const 0))) + (local.set 0x1a1 (i64.load offset=0x1a1 align=1 (i32.const 0))) + (local.set 0x1a2 (i64.load offset=0x1a2 align=1 (i32.const 0))) + (local.set 0x1a3 (i64.load offset=0x1a3 align=1 (i32.const 0))) + (local.set 0x1a4 (i64.load offset=0x1a4 align=1 (i32.const 0))) + (local.set 0x1a5 (i64.load offset=0x1a5 align=1 (i32.const 0))) + (local.set 0x1a6 (i64.load offset=0x1a6 align=1 (i32.const 0))) + (local.set 0x1a7 (i64.load offset=0x1a7 align=1 (i32.const 0))) + (local.set 0x1a8 (i64.load offset=0x1a8 align=1 (i32.const 0))) + (local.set 0x1a9 (i64.load offset=0x1a9 align=1 (i32.const 0))) + (local.set 0x1aa (i64.load offset=0x1aa align=1 (i32.const 0))) + (local.set 0x1ab (i64.load offset=0x1ab align=1 (i32.const 0))) + (local.set 0x1ac (i64.load offset=0x1ac align=1 (i32.const 0))) + (local.set 0x1ad (i64.load offset=0x1ad align=1 (i32.const 0))) + (local.set 0x1ae (i64.load offset=0x1ae align=1 (i32.const 0))) + (local.set 0x1af (i64.load offset=0x1af align=1 (i32.const 0))) + (local.set 0x1b0 (i64.load offset=0x1b0 align=1 (i32.const 0))) + (local.set 0x1b1 (i64.load offset=0x1b1 align=1 (i32.const 0))) + (local.set 0x1b2 (i64.load offset=0x1b2 align=1 (i32.const 0))) + (local.set 0x1b3 (i64.load offset=0x1b3 align=1 (i32.const 0))) + (local.set 0x1b4 (i64.load offset=0x1b4 align=1 (i32.const 0))) + (local.set 0x1b5 (i64.load offset=0x1b5 align=1 (i32.const 0))) + (local.set 0x1b6 (i64.load offset=0x1b6 align=1 (i32.const 0))) + (local.set 0x1b7 (i64.load offset=0x1b7 align=1 (i32.const 0))) + (local.set 0x1b8 (i64.load offset=0x1b8 align=1 (i32.const 0))) + (local.set 0x1b9 (i64.load offset=0x1b9 align=1 (i32.const 0))) + (local.set 0x1ba (i64.load offset=0x1ba align=1 (i32.const 0))) + (local.set 0x1bb (i64.load offset=0x1bb align=1 (i32.const 0))) + (local.set 0x1bc (i64.load offset=0x1bc align=1 (i32.const 0))) + (local.set 0x1bd (i64.load offset=0x1bd align=1 (i32.const 0))) + (local.set 0x1be (i64.load offset=0x1be align=1 (i32.const 0))) + (local.set 0x1bf (i64.load offset=0x1bf align=1 (i32.const 0))) + (local.set 0x1c0 (i64.load offset=0x1c0 align=1 (i32.const 0))) + (local.set 0x1c1 (i64.load offset=0x1c1 align=1 (i32.const 0))) + (local.set 0x1c2 (i64.load offset=0x1c2 align=1 (i32.const 0))) + (local.set 0x1c3 (i64.load offset=0x1c3 align=1 (i32.const 0))) + (local.set 0x1c4 (i64.load offset=0x1c4 align=1 (i32.const 0))) + (local.set 0x1c5 (i64.load offset=0x1c5 align=1 (i32.const 0))) + (local.set 0x1c6 (i64.load offset=0x1c6 align=1 (i32.const 0))) + (local.set 0x1c7 (i64.load offset=0x1c7 align=1 (i32.const 0))) + (local.set 0x1c8 (i64.load offset=0x1c8 align=1 (i32.const 0))) + (local.set 0x1c9 (i64.load offset=0x1c9 align=1 (i32.const 0))) + (local.set 0x1ca (i64.load offset=0x1ca align=1 (i32.const 0))) + (local.set 0x1cb (i64.load offset=0x1cb align=1 (i32.const 0))) + (local.set 0x1cc (i64.load offset=0x1cc align=1 (i32.const 0))) + (local.set 0x1cd (i64.load offset=0x1cd align=1 (i32.const 0))) + (local.set 0x1ce (i64.load offset=0x1ce align=1 (i32.const 0))) + (local.set 0x1cf (i64.load offset=0x1cf align=1 (i32.const 0))) + (local.set 0x1d0 (i64.load offset=0x1d0 align=1 (i32.const 0))) + (local.set 0x1d1 (i64.load offset=0x1d1 align=1 (i32.const 0))) + (local.set 0x1d2 (i64.load offset=0x1d2 align=1 (i32.const 0))) + (local.set 0x1d3 (i64.load offset=0x1d3 align=1 (i32.const 0))) + (local.set 0x1d4 (i64.load offset=0x1d4 align=1 (i32.const 0))) + (local.set 0x1d5 (i64.load offset=0x1d5 align=1 (i32.const 0))) + (local.set 0x1d6 (i64.load offset=0x1d6 align=1 (i32.const 0))) + (local.set 0x1d7 (i64.load offset=0x1d7 align=1 (i32.const 0))) + (local.set 0x1d8 (i64.load offset=0x1d8 align=1 (i32.const 0))) + (local.set 0x1d9 (i64.load offset=0x1d9 align=1 (i32.const 0))) + (local.set 0x1da (i64.load offset=0x1da align=1 (i32.const 0))) + (local.set 0x1db (i64.load offset=0x1db align=1 (i32.const 0))) + (local.set 0x1dc (i64.load offset=0x1dc align=1 (i32.const 0))) + (local.set 0x1dd (i64.load offset=0x1dd align=1 (i32.const 0))) + (local.set 0x1de (i64.load offset=0x1de align=1 (i32.const 0))) + (local.set 0x1df (i64.load offset=0x1df align=1 (i32.const 0))) + (local.set 0x1e0 (i64.load offset=0x1e0 align=1 (i32.const 0))) + (local.set 0x1e1 (i64.load offset=0x1e1 align=1 (i32.const 0))) + (local.set 0x1e2 (i64.load offset=0x1e2 align=1 (i32.const 0))) + (local.set 0x1e3 (i64.load offset=0x1e3 align=1 (i32.const 0))) + (local.set 0x1e4 (i64.load offset=0x1e4 align=1 (i32.const 0))) + (local.set 0x1e5 (i64.load offset=0x1e5 align=1 (i32.const 0))) + (local.set 0x1e6 (i64.load offset=0x1e6 align=1 (i32.const 0))) + (local.set 0x1e7 (i64.load offset=0x1e7 align=1 (i32.const 0))) + (local.set 0x1e8 (i64.load offset=0x1e8 align=1 (i32.const 0))) + (local.set 0x1e9 (i64.load offset=0x1e9 align=1 (i32.const 0))) + (local.set 0x1ea (i64.load offset=0x1ea align=1 (i32.const 0))) + (local.set 0x1eb (i64.load offset=0x1eb align=1 (i32.const 0))) + (local.set 0x1ec (i64.load offset=0x1ec align=1 (i32.const 0))) + (local.set 0x1ed (i64.load offset=0x1ed align=1 (i32.const 0))) + (local.set 0x1ee (i64.load offset=0x1ee align=1 (i32.const 0))) + (local.set 0x1ef (i64.load offset=0x1ef align=1 (i32.const 0))) + (local.set 0x1f0 (i64.load offset=0x1f0 align=1 (i32.const 0))) + (local.set 0x1f1 (i64.load offset=0x1f1 align=1 (i32.const 0))) + (local.set 0x1f2 (i64.load offset=0x1f2 align=1 (i32.const 0))) + (local.set 0x1f3 (i64.load offset=0x1f3 align=1 (i32.const 0))) + (local.set 0x1f4 (i64.load offset=0x1f4 align=1 (i32.const 0))) + (local.set 0x1f5 (i64.load offset=0x1f5 align=1 (i32.const 0))) + (local.set 0x1f6 (i64.load offset=0x1f6 align=1 (i32.const 0))) + (local.set 0x1f7 (i64.load offset=0x1f7 align=1 (i32.const 0))) + (local.set 0x1f8 (i64.load offset=0x1f8 align=1 (i32.const 0))) + (local.set 0x1f9 (i64.load offset=0x1f9 align=1 (i32.const 0))) + (local.set 0x1fa (i64.load offset=0x1fa align=1 (i32.const 0))) + (local.set 0x1fb (i64.load offset=0x1fb align=1 (i32.const 0))) + (local.set 0x1fc (i64.load offset=0x1fc align=1 (i32.const 0))) + (local.set 0x1fd (i64.load offset=0x1fd align=1 (i32.const 0))) + (local.set 0x1fe (i64.load offset=0x1fe align=1 (i32.const 0))) + (local.set 0x1ff (i64.load offset=0x1ff align=1 (i32.const 0))) + (local.set 0x200 (i64.load offset=0x200 align=1 (i32.const 0))) + (local.set 0x201 (i64.load offset=0x201 align=1 (i32.const 0))) + (local.set 0x202 (i64.load offset=0x202 align=1 (i32.const 0))) + (local.set 0x203 (i64.load offset=0x203 align=1 (i32.const 0))) + (local.set 0x204 (i64.load offset=0x204 align=1 (i32.const 0))) + (local.set 0x205 (i64.load offset=0x205 align=1 (i32.const 0))) + (local.set 0x206 (i64.load offset=0x206 align=1 (i32.const 0))) + (local.set 0x207 (i64.load offset=0x207 align=1 (i32.const 0))) + (local.set 0x208 (i64.load offset=0x208 align=1 (i32.const 0))) + (local.set 0x209 (i64.load offset=0x209 align=1 (i32.const 0))) + (local.set 0x20a (i64.load offset=0x20a align=1 (i32.const 0))) + (local.set 0x20b (i64.load offset=0x20b align=1 (i32.const 0))) + (local.set 0x20c (i64.load offset=0x20c align=1 (i32.const 0))) + (local.set 0x20d (i64.load offset=0x20d align=1 (i32.const 0))) + (local.set 0x20e (i64.load offset=0x20e align=1 (i32.const 0))) + (local.set 0x20f (i64.load offset=0x20f align=1 (i32.const 0))) + (local.set 0x210 (i64.load offset=0x210 align=1 (i32.const 0))) + (local.set 0x211 (i64.load offset=0x211 align=1 (i32.const 0))) + (local.set 0x212 (i64.load offset=0x212 align=1 (i32.const 0))) + (local.set 0x213 (i64.load offset=0x213 align=1 (i32.const 0))) + (local.set 0x214 (i64.load offset=0x214 align=1 (i32.const 0))) + (local.set 0x215 (i64.load offset=0x215 align=1 (i32.const 0))) + (local.set 0x216 (i64.load offset=0x216 align=1 (i32.const 0))) + (local.set 0x217 (i64.load offset=0x217 align=1 (i32.const 0))) + (local.set 0x218 (i64.load offset=0x218 align=1 (i32.const 0))) + (local.set 0x219 (i64.load offset=0x219 align=1 (i32.const 0))) + (local.set 0x21a (i64.load offset=0x21a align=1 (i32.const 0))) + (local.set 0x21b (i64.load offset=0x21b align=1 (i32.const 0))) + (local.set 0x21c (i64.load offset=0x21c align=1 (i32.const 0))) + (local.set 0x21d (i64.load offset=0x21d align=1 (i32.const 0))) + (local.set 0x21e (i64.load offset=0x21e align=1 (i32.const 0))) + (local.set 0x21f (i64.load offset=0x21f align=1 (i32.const 0))) + (local.set 0x220 (i64.load offset=0x220 align=1 (i32.const 0))) + (local.set 0x221 (i64.load offset=0x221 align=1 (i32.const 0))) + (local.set 0x222 (i64.load offset=0x222 align=1 (i32.const 0))) + (local.set 0x223 (i64.load offset=0x223 align=1 (i32.const 0))) + (local.set 0x224 (i64.load offset=0x224 align=1 (i32.const 0))) + (local.set 0x225 (i64.load offset=0x225 align=1 (i32.const 0))) + (local.set 0x226 (i64.load offset=0x226 align=1 (i32.const 0))) + (local.set 0x227 (i64.load offset=0x227 align=1 (i32.const 0))) + (local.set 0x228 (i64.load offset=0x228 align=1 (i32.const 0))) + (local.set 0x229 (i64.load offset=0x229 align=1 (i32.const 0))) + (local.set 0x22a (i64.load offset=0x22a align=1 (i32.const 0))) + (local.set 0x22b (i64.load offset=0x22b align=1 (i32.const 0))) + (local.set 0x22c (i64.load offset=0x22c align=1 (i32.const 0))) + (local.set 0x22d (i64.load offset=0x22d align=1 (i32.const 0))) + (local.set 0x22e (i64.load offset=0x22e align=1 (i32.const 0))) + (local.set 0x22f (i64.load offset=0x22f align=1 (i32.const 0))) + (local.set 0x230 (i64.load offset=0x230 align=1 (i32.const 0))) + (local.set 0x231 (i64.load offset=0x231 align=1 (i32.const 0))) + (local.set 0x232 (i64.load offset=0x232 align=1 (i32.const 0))) + (local.set 0x233 (i64.load offset=0x233 align=1 (i32.const 0))) + (local.set 0x234 (i64.load offset=0x234 align=1 (i32.const 0))) + (local.set 0x235 (i64.load offset=0x235 align=1 (i32.const 0))) + (local.set 0x236 (i64.load offset=0x236 align=1 (i32.const 0))) + (local.set 0x237 (i64.load offset=0x237 align=1 (i32.const 0))) + (local.set 0x238 (i64.load offset=0x238 align=1 (i32.const 0))) + (local.set 0x239 (i64.load offset=0x239 align=1 (i32.const 0))) + (local.set 0x23a (i64.load offset=0x23a align=1 (i32.const 0))) + (local.set 0x23b (i64.load offset=0x23b align=1 (i32.const 0))) + (local.set 0x23c (i64.load offset=0x23c align=1 (i32.const 0))) + (local.set 0x23d (i64.load offset=0x23d align=1 (i32.const 0))) + (local.set 0x23e (i64.load offset=0x23e align=1 (i32.const 0))) + (local.set 0x23f (i64.load offset=0x23f align=1 (i32.const 0))) + (local.set 0x240 (i64.load offset=0x240 align=1 (i32.const 0))) + (local.set 0x241 (i64.load offset=0x241 align=1 (i32.const 0))) + (local.set 0x242 (i64.load offset=0x242 align=1 (i32.const 0))) + (local.set 0x243 (i64.load offset=0x243 align=1 (i32.const 0))) + (local.set 0x244 (i64.load offset=0x244 align=1 (i32.const 0))) + (local.set 0x245 (i64.load offset=0x245 align=1 (i32.const 0))) + (local.set 0x246 (i64.load offset=0x246 align=1 (i32.const 0))) + (local.set 0x247 (i64.load offset=0x247 align=1 (i32.const 0))) + (local.set 0x248 (i64.load offset=0x248 align=1 (i32.const 0))) + (local.set 0x249 (i64.load offset=0x249 align=1 (i32.const 0))) + (local.set 0x24a (i64.load offset=0x24a align=1 (i32.const 0))) + (local.set 0x24b (i64.load offset=0x24b align=1 (i32.const 0))) + (local.set 0x24c (i64.load offset=0x24c align=1 (i32.const 0))) + (local.set 0x24d (i64.load offset=0x24d align=1 (i32.const 0))) + (local.set 0x24e (i64.load offset=0x24e align=1 (i32.const 0))) + (local.set 0x24f (i64.load offset=0x24f align=1 (i32.const 0))) + (local.set 0x250 (i64.load offset=0x250 align=1 (i32.const 0))) + (local.set 0x251 (i64.load offset=0x251 align=1 (i32.const 0))) + (local.set 0x252 (i64.load offset=0x252 align=1 (i32.const 0))) + (local.set 0x253 (i64.load offset=0x253 align=1 (i32.const 0))) + (local.set 0x254 (i64.load offset=0x254 align=1 (i32.const 0))) + (local.set 0x255 (i64.load offset=0x255 align=1 (i32.const 0))) + (local.set 0x256 (i64.load offset=0x256 align=1 (i32.const 0))) + (local.set 0x257 (i64.load offset=0x257 align=1 (i32.const 0))) + (local.set 0x258 (i64.load offset=0x258 align=1 (i32.const 0))) + (local.set 0x259 (i64.load offset=0x259 align=1 (i32.const 0))) + (local.set 0x25a (i64.load offset=0x25a align=1 (i32.const 0))) + (local.set 0x25b (i64.load offset=0x25b align=1 (i32.const 0))) + (local.set 0x25c (i64.load offset=0x25c align=1 (i32.const 0))) + (local.set 0x25d (i64.load offset=0x25d align=1 (i32.const 0))) + (local.set 0x25e (i64.load offset=0x25e align=1 (i32.const 0))) + (local.set 0x25f (i64.load offset=0x25f align=1 (i32.const 0))) + (local.set 0x260 (i64.load offset=0x260 align=1 (i32.const 0))) + (local.set 0x261 (i64.load offset=0x261 align=1 (i32.const 0))) + (local.set 0x262 (i64.load offset=0x262 align=1 (i32.const 0))) + (local.set 0x263 (i64.load offset=0x263 align=1 (i32.const 0))) + (local.set 0x264 (i64.load offset=0x264 align=1 (i32.const 0))) + (local.set 0x265 (i64.load offset=0x265 align=1 (i32.const 0))) + (local.set 0x266 (i64.load offset=0x266 align=1 (i32.const 0))) + (local.set 0x267 (i64.load offset=0x267 align=1 (i32.const 0))) + (local.set 0x268 (i64.load offset=0x268 align=1 (i32.const 0))) + (local.set 0x269 (i64.load offset=0x269 align=1 (i32.const 0))) + (local.set 0x26a (i64.load offset=0x26a align=1 (i32.const 0))) + (local.set 0x26b (i64.load offset=0x26b align=1 (i32.const 0))) + (local.set 0x26c (i64.load offset=0x26c align=1 (i32.const 0))) + (local.set 0x26d (i64.load offset=0x26d align=1 (i32.const 0))) + (local.set 0x26e (i64.load offset=0x26e align=1 (i32.const 0))) + (local.set 0x26f (i64.load offset=0x26f align=1 (i32.const 0))) + (local.set 0x270 (i64.load offset=0x270 align=1 (i32.const 0))) + (local.set 0x271 (i64.load offset=0x271 align=1 (i32.const 0))) + (local.set 0x272 (i64.load offset=0x272 align=1 (i32.const 0))) + (local.set 0x273 (i64.load offset=0x273 align=1 (i32.const 0))) + (local.set 0x274 (i64.load offset=0x274 align=1 (i32.const 0))) + (local.set 0x275 (i64.load offset=0x275 align=1 (i32.const 0))) + (local.set 0x276 (i64.load offset=0x276 align=1 (i32.const 0))) + (local.set 0x277 (i64.load offset=0x277 align=1 (i32.const 0))) + (local.set 0x278 (i64.load offset=0x278 align=1 (i32.const 0))) + (local.set 0x279 (i64.load offset=0x279 align=1 (i32.const 0))) + (local.set 0x27a (i64.load offset=0x27a align=1 (i32.const 0))) + (local.set 0x27b (i64.load offset=0x27b align=1 (i32.const 0))) + (local.set 0x27c (i64.load offset=0x27c align=1 (i32.const 0))) + (local.set 0x27d (i64.load offset=0x27d align=1 (i32.const 0))) + (local.set 0x27e (i64.load offset=0x27e align=1 (i32.const 0))) + (local.set 0x27f (i64.load offset=0x27f align=1 (i32.const 0))) + (local.set 0x280 (i64.load offset=0x280 align=1 (i32.const 0))) + (local.set 0x281 (i64.load offset=0x281 align=1 (i32.const 0))) + (local.set 0x282 (i64.load offset=0x282 align=1 (i32.const 0))) + (local.set 0x283 (i64.load offset=0x283 align=1 (i32.const 0))) + (local.set 0x284 (i64.load offset=0x284 align=1 (i32.const 0))) + (local.set 0x285 (i64.load offset=0x285 align=1 (i32.const 0))) + (local.set 0x286 (i64.load offset=0x286 align=1 (i32.const 0))) + (local.set 0x287 (i64.load offset=0x287 align=1 (i32.const 0))) + (local.set 0x288 (i64.load offset=0x288 align=1 (i32.const 0))) + (local.set 0x289 (i64.load offset=0x289 align=1 (i32.const 0))) + (local.set 0x28a (i64.load offset=0x28a align=1 (i32.const 0))) + (local.set 0x28b (i64.load offset=0x28b align=1 (i32.const 0))) + (local.set 0x28c (i64.load offset=0x28c align=1 (i32.const 0))) + (local.set 0x28d (i64.load offset=0x28d align=1 (i32.const 0))) + (local.set 0x28e (i64.load offset=0x28e align=1 (i32.const 0))) + (local.set 0x28f (i64.load offset=0x28f align=1 (i32.const 0))) + (local.set 0x290 (i64.load offset=0x290 align=1 (i32.const 0))) + (local.set 0x291 (i64.load offset=0x291 align=1 (i32.const 0))) + (local.set 0x292 (i64.load offset=0x292 align=1 (i32.const 0))) + (local.set 0x293 (i64.load offset=0x293 align=1 (i32.const 0))) + (local.set 0x294 (i64.load offset=0x294 align=1 (i32.const 0))) + (local.set 0x295 (i64.load offset=0x295 align=1 (i32.const 0))) + (local.set 0x296 (i64.load offset=0x296 align=1 (i32.const 0))) + (local.set 0x297 (i64.load offset=0x297 align=1 (i32.const 0))) + (local.set 0x298 (i64.load offset=0x298 align=1 (i32.const 0))) + (local.set 0x299 (i64.load offset=0x299 align=1 (i32.const 0))) + (local.set 0x29a (i64.load offset=0x29a align=1 (i32.const 0))) + (local.set 0x29b (i64.load offset=0x29b align=1 (i32.const 0))) + (local.set 0x29c (i64.load offset=0x29c align=1 (i32.const 0))) + (local.set 0x29d (i64.load offset=0x29d align=1 (i32.const 0))) + (local.set 0x29e (i64.load offset=0x29e align=1 (i32.const 0))) + (local.set 0x29f (i64.load offset=0x29f align=1 (i32.const 0))) + (local.set 0x2a0 (i64.load offset=0x2a0 align=1 (i32.const 0))) + (local.set 0x2a1 (i64.load offset=0x2a1 align=1 (i32.const 0))) + (local.set 0x2a2 (i64.load offset=0x2a2 align=1 (i32.const 0))) + (local.set 0x2a3 (i64.load offset=0x2a3 align=1 (i32.const 0))) + (local.set 0x2a4 (i64.load offset=0x2a4 align=1 (i32.const 0))) + (local.set 0x2a5 (i64.load offset=0x2a5 align=1 (i32.const 0))) + (local.set 0x2a6 (i64.load offset=0x2a6 align=1 (i32.const 0))) + (local.set 0x2a7 (i64.load offset=0x2a7 align=1 (i32.const 0))) + (local.set 0x2a8 (i64.load offset=0x2a8 align=1 (i32.const 0))) + (local.set 0x2a9 (i64.load offset=0x2a9 align=1 (i32.const 0))) + (local.set 0x2aa (i64.load offset=0x2aa align=1 (i32.const 0))) + (local.set 0x2ab (i64.load offset=0x2ab align=1 (i32.const 0))) + (local.set 0x2ac (i64.load offset=0x2ac align=1 (i32.const 0))) + (local.set 0x2ad (i64.load offset=0x2ad align=1 (i32.const 0))) + (local.set 0x2ae (i64.load offset=0x2ae align=1 (i32.const 0))) + (local.set 0x2af (i64.load offset=0x2af align=1 (i32.const 0))) + (local.set 0x2b0 (i64.load offset=0x2b0 align=1 (i32.const 0))) + (local.set 0x2b1 (i64.load offset=0x2b1 align=1 (i32.const 0))) + (local.set 0x2b2 (i64.load offset=0x2b2 align=1 (i32.const 0))) + (local.set 0x2b3 (i64.load offset=0x2b3 align=1 (i32.const 0))) + (local.set 0x2b4 (i64.load offset=0x2b4 align=1 (i32.const 0))) + (local.set 0x2b5 (i64.load offset=0x2b5 align=1 (i32.const 0))) + (local.set 0x2b6 (i64.load offset=0x2b6 align=1 (i32.const 0))) + (local.set 0x2b7 (i64.load offset=0x2b7 align=1 (i32.const 0))) + (local.set 0x2b8 (i64.load offset=0x2b8 align=1 (i32.const 0))) + (local.set 0x2b9 (i64.load offset=0x2b9 align=1 (i32.const 0))) + (local.set 0x2ba (i64.load offset=0x2ba align=1 (i32.const 0))) + (local.set 0x2bb (i64.load offset=0x2bb align=1 (i32.const 0))) + (local.set 0x2bc (i64.load offset=0x2bc align=1 (i32.const 0))) + (local.set 0x2bd (i64.load offset=0x2bd align=1 (i32.const 0))) + (local.set 0x2be (i64.load offset=0x2be align=1 (i32.const 0))) + (local.set 0x2bf (i64.load offset=0x2bf align=1 (i32.const 0))) + (local.set 0x2c0 (i64.load offset=0x2c0 align=1 (i32.const 0))) + (local.set 0x2c1 (i64.load offset=0x2c1 align=1 (i32.const 0))) + (local.set 0x2c2 (i64.load offset=0x2c2 align=1 (i32.const 0))) + (local.set 0x2c3 (i64.load offset=0x2c3 align=1 (i32.const 0))) + (local.set 0x2c4 (i64.load offset=0x2c4 align=1 (i32.const 0))) + (local.set 0x2c5 (i64.load offset=0x2c5 align=1 (i32.const 0))) + (local.set 0x2c6 (i64.load offset=0x2c6 align=1 (i32.const 0))) + (local.set 0x2c7 (i64.load offset=0x2c7 align=1 (i32.const 0))) + (local.set 0x2c8 (i64.load offset=0x2c8 align=1 (i32.const 0))) + (local.set 0x2c9 (i64.load offset=0x2c9 align=1 (i32.const 0))) + (local.set 0x2ca (i64.load offset=0x2ca align=1 (i32.const 0))) + (local.set 0x2cb (i64.load offset=0x2cb align=1 (i32.const 0))) + (local.set 0x2cc (i64.load offset=0x2cc align=1 (i32.const 0))) + (local.set 0x2cd (i64.load offset=0x2cd align=1 (i32.const 0))) + (local.set 0x2ce (i64.load offset=0x2ce align=1 (i32.const 0))) + (local.set 0x2cf (i64.load offset=0x2cf align=1 (i32.const 0))) + (local.set 0x2d0 (i64.load offset=0x2d0 align=1 (i32.const 0))) + (local.set 0x2d1 (i64.load offset=0x2d1 align=1 (i32.const 0))) + (local.set 0x2d2 (i64.load offset=0x2d2 align=1 (i32.const 0))) + (local.set 0x2d3 (i64.load offset=0x2d3 align=1 (i32.const 0))) + (local.set 0x2d4 (i64.load offset=0x2d4 align=1 (i32.const 0))) + (local.set 0x2d5 (i64.load offset=0x2d5 align=1 (i32.const 0))) + (local.set 0x2d6 (i64.load offset=0x2d6 align=1 (i32.const 0))) + (local.set 0x2d7 (i64.load offset=0x2d7 align=1 (i32.const 0))) + (local.set 0x2d8 (i64.load offset=0x2d8 align=1 (i32.const 0))) + (local.set 0x2d9 (i64.load offset=0x2d9 align=1 (i32.const 0))) + (local.set 0x2da (i64.load offset=0x2da align=1 (i32.const 0))) + (local.set 0x2db (i64.load offset=0x2db align=1 (i32.const 0))) + (local.set 0x2dc (i64.load offset=0x2dc align=1 (i32.const 0))) + (local.set 0x2dd (i64.load offset=0x2dd align=1 (i32.const 0))) + (local.set 0x2de (i64.load offset=0x2de align=1 (i32.const 0))) + (local.set 0x2df (i64.load offset=0x2df align=1 (i32.const 0))) + (local.set 0x2e0 (i64.load offset=0x2e0 align=1 (i32.const 0))) + (local.set 0x2e1 (i64.load offset=0x2e1 align=1 (i32.const 0))) + (local.set 0x2e2 (i64.load offset=0x2e2 align=1 (i32.const 0))) + (local.set 0x2e3 (i64.load offset=0x2e3 align=1 (i32.const 0))) + (local.set 0x2e4 (i64.load offset=0x2e4 align=1 (i32.const 0))) + (local.set 0x2e5 (i64.load offset=0x2e5 align=1 (i32.const 0))) + (local.set 0x2e6 (i64.load offset=0x2e6 align=1 (i32.const 0))) + (local.set 0x2e7 (i64.load offset=0x2e7 align=1 (i32.const 0))) + (local.set 0x2e8 (i64.load offset=0x2e8 align=1 (i32.const 0))) + (local.set 0x2e9 (i64.load offset=0x2e9 align=1 (i32.const 0))) + (local.set 0x2ea (i64.load offset=0x2ea align=1 (i32.const 0))) + (local.set 0x2eb (i64.load offset=0x2eb align=1 (i32.const 0))) + (local.set 0x2ec (i64.load offset=0x2ec align=1 (i32.const 0))) + (local.set 0x2ed (i64.load offset=0x2ed align=1 (i32.const 0))) + (local.set 0x2ee (i64.load offset=0x2ee align=1 (i32.const 0))) + (local.set 0x2ef (i64.load offset=0x2ef align=1 (i32.const 0))) + (local.set 0x2f0 (i64.load offset=0x2f0 align=1 (i32.const 0))) + (local.set 0x2f1 (i64.load offset=0x2f1 align=1 (i32.const 0))) + (local.set 0x2f2 (i64.load offset=0x2f2 align=1 (i32.const 0))) + (local.set 0x2f3 (i64.load offset=0x2f3 align=1 (i32.const 0))) + (local.set 0x2f4 (i64.load offset=0x2f4 align=1 (i32.const 0))) + (local.set 0x2f5 (i64.load offset=0x2f5 align=1 (i32.const 0))) + (local.set 0x2f6 (i64.load offset=0x2f6 align=1 (i32.const 0))) + (local.set 0x2f7 (i64.load offset=0x2f7 align=1 (i32.const 0))) + (local.set 0x2f8 (i64.load offset=0x2f8 align=1 (i32.const 0))) + (local.set 0x2f9 (i64.load offset=0x2f9 align=1 (i32.const 0))) + (local.set 0x2fa (i64.load offset=0x2fa align=1 (i32.const 0))) + (local.set 0x2fb (i64.load offset=0x2fb align=1 (i32.const 0))) + (local.set 0x2fc (i64.load offset=0x2fc align=1 (i32.const 0))) + (local.set 0x2fd (i64.load offset=0x2fd align=1 (i32.const 0))) + (local.set 0x2fe (i64.load offset=0x2fe align=1 (i32.const 0))) + (local.set 0x2ff (i64.load offset=0x2ff align=1 (i32.const 0))) + (local.set 0x300 (i64.load offset=0x300 align=1 (i32.const 0))) + (local.set 0x301 (i64.load offset=0x301 align=1 (i32.const 0))) + (local.set 0x302 (i64.load offset=0x302 align=1 (i32.const 0))) + (local.set 0x303 (i64.load offset=0x303 align=1 (i32.const 0))) + (local.set 0x304 (i64.load offset=0x304 align=1 (i32.const 0))) + (local.set 0x305 (i64.load offset=0x305 align=1 (i32.const 0))) + (local.set 0x306 (i64.load offset=0x306 align=1 (i32.const 0))) + (local.set 0x307 (i64.load offset=0x307 align=1 (i32.const 0))) + (local.set 0x308 (i64.load offset=0x308 align=1 (i32.const 0))) + (local.set 0x309 (i64.load offset=0x309 align=1 (i32.const 0))) + (local.set 0x30a (i64.load offset=0x30a align=1 (i32.const 0))) + (local.set 0x30b (i64.load offset=0x30b align=1 (i32.const 0))) + (local.set 0x30c (i64.load offset=0x30c align=1 (i32.const 0))) + (local.set 0x30d (i64.load offset=0x30d align=1 (i32.const 0))) + (local.set 0x30e (i64.load offset=0x30e align=1 (i32.const 0))) + (local.set 0x30f (i64.load offset=0x30f align=1 (i32.const 0))) + (local.set 0x310 (i64.load offset=0x310 align=1 (i32.const 0))) + (local.set 0x311 (i64.load offset=0x311 align=1 (i32.const 0))) + (local.set 0x312 (i64.load offset=0x312 align=1 (i32.const 0))) + (local.set 0x313 (i64.load offset=0x313 align=1 (i32.const 0))) + (local.set 0x314 (i64.load offset=0x314 align=1 (i32.const 0))) + (local.set 0x315 (i64.load offset=0x315 align=1 (i32.const 0))) + (local.set 0x316 (i64.load offset=0x316 align=1 (i32.const 0))) + (local.set 0x317 (i64.load offset=0x317 align=1 (i32.const 0))) + (local.set 0x318 (i64.load offset=0x318 align=1 (i32.const 0))) + (local.set 0x319 (i64.load offset=0x319 align=1 (i32.const 0))) + (local.set 0x31a (i64.load offset=0x31a align=1 (i32.const 0))) + (local.set 0x31b (i64.load offset=0x31b align=1 (i32.const 0))) + (local.set 0x31c (i64.load offset=0x31c align=1 (i32.const 0))) + (local.set 0x31d (i64.load offset=0x31d align=1 (i32.const 0))) + (local.set 0x31e (i64.load offset=0x31e align=1 (i32.const 0))) + (local.set 0x31f (i64.load offset=0x31f align=1 (i32.const 0))) + (local.set 0x320 (i64.load offset=0x320 align=1 (i32.const 0))) + (local.set 0x321 (i64.load offset=0x321 align=1 (i32.const 0))) + (local.set 0x322 (i64.load offset=0x322 align=1 (i32.const 0))) + (local.set 0x323 (i64.load offset=0x323 align=1 (i32.const 0))) + (local.set 0x324 (i64.load offset=0x324 align=1 (i32.const 0))) + (local.set 0x325 (i64.load offset=0x325 align=1 (i32.const 0))) + (local.set 0x326 (i64.load offset=0x326 align=1 (i32.const 0))) + (local.set 0x327 (i64.load offset=0x327 align=1 (i32.const 0))) + (local.set 0x328 (i64.load offset=0x328 align=1 (i32.const 0))) + (local.set 0x329 (i64.load offset=0x329 align=1 (i32.const 0))) + (local.set 0x32a (i64.load offset=0x32a align=1 (i32.const 0))) + (local.set 0x32b (i64.load offset=0x32b align=1 (i32.const 0))) + (local.set 0x32c (i64.load offset=0x32c align=1 (i32.const 0))) + (local.set 0x32d (i64.load offset=0x32d align=1 (i32.const 0))) + (local.set 0x32e (i64.load offset=0x32e align=1 (i32.const 0))) + (local.set 0x32f (i64.load offset=0x32f align=1 (i32.const 0))) + (local.set 0x330 (i64.load offset=0x330 align=1 (i32.const 0))) + (local.set 0x331 (i64.load offset=0x331 align=1 (i32.const 0))) + (local.set 0x332 (i64.load offset=0x332 align=1 (i32.const 0))) + (local.set 0x333 (i64.load offset=0x333 align=1 (i32.const 0))) + (local.set 0x334 (i64.load offset=0x334 align=1 (i32.const 0))) + (local.set 0x335 (i64.load offset=0x335 align=1 (i32.const 0))) + (local.set 0x336 (i64.load offset=0x336 align=1 (i32.const 0))) + (local.set 0x337 (i64.load offset=0x337 align=1 (i32.const 0))) + (local.set 0x338 (i64.load offset=0x338 align=1 (i32.const 0))) + (local.set 0x339 (i64.load offset=0x339 align=1 (i32.const 0))) + (local.set 0x33a (i64.load offset=0x33a align=1 (i32.const 0))) + (local.set 0x33b (i64.load offset=0x33b align=1 (i32.const 0))) + (local.set 0x33c (i64.load offset=0x33c align=1 (i32.const 0))) + (local.set 0x33d (i64.load offset=0x33d align=1 (i32.const 0))) + (local.set 0x33e (i64.load offset=0x33e align=1 (i32.const 0))) + (local.set 0x33f (i64.load offset=0x33f align=1 (i32.const 0))) + (local.set 0x340 (i64.load offset=0x340 align=1 (i32.const 0))) + (local.set 0x341 (i64.load offset=0x341 align=1 (i32.const 0))) + (local.set 0x342 (i64.load offset=0x342 align=1 (i32.const 0))) + (local.set 0x343 (i64.load offset=0x343 align=1 (i32.const 0))) + (local.set 0x344 (i64.load offset=0x344 align=1 (i32.const 0))) + (local.set 0x345 (i64.load offset=0x345 align=1 (i32.const 0))) + (local.set 0x346 (i64.load offset=0x346 align=1 (i32.const 0))) + (local.set 0x347 (i64.load offset=0x347 align=1 (i32.const 0))) + (local.set 0x348 (i64.load offset=0x348 align=1 (i32.const 0))) + (local.set 0x349 (i64.load offset=0x349 align=1 (i32.const 0))) + (local.set 0x34a (i64.load offset=0x34a align=1 (i32.const 0))) + (local.set 0x34b (i64.load offset=0x34b align=1 (i32.const 0))) + (local.set 0x34c (i64.load offset=0x34c align=1 (i32.const 0))) + (local.set 0x34d (i64.load offset=0x34d align=1 (i32.const 0))) + (local.set 0x34e (i64.load offset=0x34e align=1 (i32.const 0))) + (local.set 0x34f (i64.load offset=0x34f align=1 (i32.const 0))) + (local.set 0x350 (i64.load offset=0x350 align=1 (i32.const 0))) + (local.set 0x351 (i64.load offset=0x351 align=1 (i32.const 0))) + (local.set 0x352 (i64.load offset=0x352 align=1 (i32.const 0))) + (local.set 0x353 (i64.load offset=0x353 align=1 (i32.const 0))) + (local.set 0x354 (i64.load offset=0x354 align=1 (i32.const 0))) + (local.set 0x355 (i64.load offset=0x355 align=1 (i32.const 0))) + (local.set 0x356 (i64.load offset=0x356 align=1 (i32.const 0))) + (local.set 0x357 (i64.load offset=0x357 align=1 (i32.const 0))) + (local.set 0x358 (i64.load offset=0x358 align=1 (i32.const 0))) + (local.set 0x359 (i64.load offset=0x359 align=1 (i32.const 0))) + (local.set 0x35a (i64.load offset=0x35a align=1 (i32.const 0))) + (local.set 0x35b (i64.load offset=0x35b align=1 (i32.const 0))) + (local.set 0x35c (i64.load offset=0x35c align=1 (i32.const 0))) + (local.set 0x35d (i64.load offset=0x35d align=1 (i32.const 0))) + (local.set 0x35e (i64.load offset=0x35e align=1 (i32.const 0))) + (local.set 0x35f (i64.load offset=0x35f align=1 (i32.const 0))) + (local.set 0x360 (i64.load offset=0x360 align=1 (i32.const 0))) + (local.set 0x361 (i64.load offset=0x361 align=1 (i32.const 0))) + (local.set 0x362 (i64.load offset=0x362 align=1 (i32.const 0))) + (local.set 0x363 (i64.load offset=0x363 align=1 (i32.const 0))) + (local.set 0x364 (i64.load offset=0x364 align=1 (i32.const 0))) + (local.set 0x365 (i64.load offset=0x365 align=1 (i32.const 0))) + (local.set 0x366 (i64.load offset=0x366 align=1 (i32.const 0))) + (local.set 0x367 (i64.load offset=0x367 align=1 (i32.const 0))) + (local.set 0x368 (i64.load offset=0x368 align=1 (i32.const 0))) + (local.set 0x369 (i64.load offset=0x369 align=1 (i32.const 0))) + (local.set 0x36a (i64.load offset=0x36a align=1 (i32.const 0))) + (local.set 0x36b (i64.load offset=0x36b align=1 (i32.const 0))) + (local.set 0x36c (i64.load offset=0x36c align=1 (i32.const 0))) + (local.set 0x36d (i64.load offset=0x36d align=1 (i32.const 0))) + (local.set 0x36e (i64.load offset=0x36e align=1 (i32.const 0))) + (local.set 0x36f (i64.load offset=0x36f align=1 (i32.const 0))) + (local.set 0x370 (i64.load offset=0x370 align=1 (i32.const 0))) + (local.set 0x371 (i64.load offset=0x371 align=1 (i32.const 0))) + (local.set 0x372 (i64.load offset=0x372 align=1 (i32.const 0))) + (local.set 0x373 (i64.load offset=0x373 align=1 (i32.const 0))) + (local.set 0x374 (i64.load offset=0x374 align=1 (i32.const 0))) + (local.set 0x375 (i64.load offset=0x375 align=1 (i32.const 0))) + (local.set 0x376 (i64.load offset=0x376 align=1 (i32.const 0))) + (local.set 0x377 (i64.load offset=0x377 align=1 (i32.const 0))) + (local.set 0x378 (i64.load offset=0x378 align=1 (i32.const 0))) + (local.set 0x379 (i64.load offset=0x379 align=1 (i32.const 0))) + (local.set 0x37a (i64.load offset=0x37a align=1 (i32.const 0))) + (local.set 0x37b (i64.load offset=0x37b align=1 (i32.const 0))) + (local.set 0x37c (i64.load offset=0x37c align=1 (i32.const 0))) + (local.set 0x37d (i64.load offset=0x37d align=1 (i32.const 0))) + (local.set 0x37e (i64.load offset=0x37e align=1 (i32.const 0))) + (local.set 0x37f (i64.load offset=0x37f align=1 (i32.const 0))) + (local.set 0x380 (i64.load offset=0x380 align=1 (i32.const 0))) + (local.set 0x381 (i64.load offset=0x381 align=1 (i32.const 0))) + (local.set 0x382 (i64.load offset=0x382 align=1 (i32.const 0))) + (local.set 0x383 (i64.load offset=0x383 align=1 (i32.const 0))) + (local.set 0x384 (i64.load offset=0x384 align=1 (i32.const 0))) + (local.set 0x385 (i64.load offset=0x385 align=1 (i32.const 0))) + (local.set 0x386 (i64.load offset=0x386 align=1 (i32.const 0))) + (local.set 0x387 (i64.load offset=0x387 align=1 (i32.const 0))) + (local.set 0x388 (i64.load offset=0x388 align=1 (i32.const 0))) + (local.set 0x389 (i64.load offset=0x389 align=1 (i32.const 0))) + (local.set 0x38a (i64.load offset=0x38a align=1 (i32.const 0))) + (local.set 0x38b (i64.load offset=0x38b align=1 (i32.const 0))) + (local.set 0x38c (i64.load offset=0x38c align=1 (i32.const 0))) + (local.set 0x38d (i64.load offset=0x38d align=1 (i32.const 0))) + (local.set 0x38e (i64.load offset=0x38e align=1 (i32.const 0))) + (local.set 0x38f (i64.load offset=0x38f align=1 (i32.const 0))) + (local.set 0x390 (i64.load offset=0x390 align=1 (i32.const 0))) + (local.set 0x391 (i64.load offset=0x391 align=1 (i32.const 0))) + (local.set 0x392 (i64.load offset=0x392 align=1 (i32.const 0))) + (local.set 0x393 (i64.load offset=0x393 align=1 (i32.const 0))) + (local.set 0x394 (i64.load offset=0x394 align=1 (i32.const 0))) + (local.set 0x395 (i64.load offset=0x395 align=1 (i32.const 0))) + (local.set 0x396 (i64.load offset=0x396 align=1 (i32.const 0))) + (local.set 0x397 (i64.load offset=0x397 align=1 (i32.const 0))) + (local.set 0x398 (i64.load offset=0x398 align=1 (i32.const 0))) + (local.set 0x399 (i64.load offset=0x399 align=1 (i32.const 0))) + (local.set 0x39a (i64.load offset=0x39a align=1 (i32.const 0))) + (local.set 0x39b (i64.load offset=0x39b align=1 (i32.const 0))) + (local.set 0x39c (i64.load offset=0x39c align=1 (i32.const 0))) + (local.set 0x39d (i64.load offset=0x39d align=1 (i32.const 0))) + (local.set 0x39e (i64.load offset=0x39e align=1 (i32.const 0))) + (local.set 0x39f (i64.load offset=0x39f align=1 (i32.const 0))) + (local.set 0x3a0 (i64.load offset=0x3a0 align=1 (i32.const 0))) + (local.set 0x3a1 (i64.load offset=0x3a1 align=1 (i32.const 0))) + (local.set 0x3a2 (i64.load offset=0x3a2 align=1 (i32.const 0))) + (local.set 0x3a3 (i64.load offset=0x3a3 align=1 (i32.const 0))) + (local.set 0x3a4 (i64.load offset=0x3a4 align=1 (i32.const 0))) + (local.set 0x3a5 (i64.load offset=0x3a5 align=1 (i32.const 0))) + (local.set 0x3a6 (i64.load offset=0x3a6 align=1 (i32.const 0))) + (local.set 0x3a7 (i64.load offset=0x3a7 align=1 (i32.const 0))) + (local.set 0x3a8 (i64.load offset=0x3a8 align=1 (i32.const 0))) + (local.set 0x3a9 (i64.load offset=0x3a9 align=1 (i32.const 0))) + (local.set 0x3aa (i64.load offset=0x3aa align=1 (i32.const 0))) + (local.set 0x3ab (i64.load offset=0x3ab align=1 (i32.const 0))) + (local.set 0x3ac (i64.load offset=0x3ac align=1 (i32.const 0))) + (local.set 0x3ad (i64.load offset=0x3ad align=1 (i32.const 0))) + (local.set 0x3ae (i64.load offset=0x3ae align=1 (i32.const 0))) + (local.set 0x3af (i64.load offset=0x3af align=1 (i32.const 0))) + (local.set 0x3b0 (i64.load offset=0x3b0 align=1 (i32.const 0))) + (local.set 0x3b1 (i64.load offset=0x3b1 align=1 (i32.const 0))) + (local.set 0x3b2 (i64.load offset=0x3b2 align=1 (i32.const 0))) + (local.set 0x3b3 (i64.load offset=0x3b3 align=1 (i32.const 0))) + (local.set 0x3b4 (i64.load offset=0x3b4 align=1 (i32.const 0))) + (local.set 0x3b5 (i64.load offset=0x3b5 align=1 (i32.const 0))) + (local.set 0x3b6 (i64.load offset=0x3b6 align=1 (i32.const 0))) + (local.set 0x3b7 (i64.load offset=0x3b7 align=1 (i32.const 0))) + (local.set 0x3b8 (i64.load offset=0x3b8 align=1 (i32.const 0))) + (local.set 0x3b9 (i64.load offset=0x3b9 align=1 (i32.const 0))) + (local.set 0x3ba (i64.load offset=0x3ba align=1 (i32.const 0))) + (local.set 0x3bb (i64.load offset=0x3bb align=1 (i32.const 0))) + (local.set 0x3bc (i64.load offset=0x3bc align=1 (i32.const 0))) + (local.set 0x3bd (i64.load offset=0x3bd align=1 (i32.const 0))) + (local.set 0x3be (i64.load offset=0x3be align=1 (i32.const 0))) + (local.set 0x3bf (i64.load offset=0x3bf align=1 (i32.const 0))) + (local.set 0x3c0 (i64.load offset=0x3c0 align=1 (i32.const 0))) + (local.set 0x3c1 (i64.load offset=0x3c1 align=1 (i32.const 0))) + (local.set 0x3c2 (i64.load offset=0x3c2 align=1 (i32.const 0))) + (local.set 0x3c3 (i64.load offset=0x3c3 align=1 (i32.const 0))) + (local.set 0x3c4 (i64.load offset=0x3c4 align=1 (i32.const 0))) + (local.set 0x3c5 (i64.load offset=0x3c5 align=1 (i32.const 0))) + (local.set 0x3c6 (i64.load offset=0x3c6 align=1 (i32.const 0))) + (local.set 0x3c7 (i64.load offset=0x3c7 align=1 (i32.const 0))) + (local.set 0x3c8 (i64.load offset=0x3c8 align=1 (i32.const 0))) + (local.set 0x3c9 (i64.load offset=0x3c9 align=1 (i32.const 0))) + (local.set 0x3ca (i64.load offset=0x3ca align=1 (i32.const 0))) + (local.set 0x3cb (i64.load offset=0x3cb align=1 (i32.const 0))) + (local.set 0x3cc (i64.load offset=0x3cc align=1 (i32.const 0))) + (local.set 0x3cd (i64.load offset=0x3cd align=1 (i32.const 0))) + (local.set 0x3ce (i64.load offset=0x3ce align=1 (i32.const 0))) + (local.set 0x3cf (i64.load offset=0x3cf align=1 (i32.const 0))) + (local.set 0x3d0 (i64.load offset=0x3d0 align=1 (i32.const 0))) + (local.set 0x3d1 (i64.load offset=0x3d1 align=1 (i32.const 0))) + (local.set 0x3d2 (i64.load offset=0x3d2 align=1 (i32.const 0))) + (local.set 0x3d3 (i64.load offset=0x3d3 align=1 (i32.const 0))) + (local.set 0x3d4 (i64.load offset=0x3d4 align=1 (i32.const 0))) + (local.set 0x3d5 (i64.load offset=0x3d5 align=1 (i32.const 0))) + (local.set 0x3d6 (i64.load offset=0x3d6 align=1 (i32.const 0))) + (local.set 0x3d7 (i64.load offset=0x3d7 align=1 (i32.const 0))) + (local.set 0x3d8 (i64.load offset=0x3d8 align=1 (i32.const 0))) + (local.set 0x3d9 (i64.load offset=0x3d9 align=1 (i32.const 0))) + (local.set 0x3da (i64.load offset=0x3da align=1 (i32.const 0))) + (local.set 0x3db (i64.load offset=0x3db align=1 (i32.const 0))) + (local.set 0x3dc (i64.load offset=0x3dc align=1 (i32.const 0))) + (local.set 0x3dd (i64.load offset=0x3dd align=1 (i32.const 0))) + (local.set 0x3de (i64.load offset=0x3de align=1 (i32.const 0))) + (local.set 0x3df (i64.load offset=0x3df align=1 (i32.const 0))) + (local.set 0x3e0 (i64.load offset=0x3e0 align=1 (i32.const 0))) + (local.set 0x3e1 (i64.load offset=0x3e1 align=1 (i32.const 0))) + (local.set 0x3e2 (i64.load offset=0x3e2 align=1 (i32.const 0))) + (local.set 0x3e3 (i64.load offset=0x3e3 align=1 (i32.const 0))) + (local.set 0x3e4 (i64.load offset=0x3e4 align=1 (i32.const 0))) + (local.set 0x3e5 (i64.load offset=0x3e5 align=1 (i32.const 0))) + (local.set 0x3e6 (i64.load offset=0x3e6 align=1 (i32.const 0))) + (local.set 0x3e7 (i64.load offset=0x3e7 align=1 (i32.const 0))) + (local.set 0x3e8 (i64.load offset=0x3e8 align=1 (i32.const 0))) + (local.set 0x3e9 (i64.load offset=0x3e9 align=1 (i32.const 0))) + (local.set 0x3ea (i64.load offset=0x3ea align=1 (i32.const 0))) + (local.set 0x3eb (i64.load offset=0x3eb align=1 (i32.const 0))) + (local.set 0x3ec (i64.load offset=0x3ec align=1 (i32.const 0))) + (local.set 0x3ed (i64.load offset=0x3ed align=1 (i32.const 0))) + (local.set 0x3ee (i64.load offset=0x3ee align=1 (i32.const 0))) + (local.set 0x3ef (i64.load offset=0x3ef align=1 (i32.const 0))) + (local.set 0x3f0 (i64.load offset=0x3f0 align=1 (i32.const 0))) + (local.set 0x3f1 (i64.load offset=0x3f1 align=1 (i32.const 0))) + (local.set 0x3f2 (i64.load offset=0x3f2 align=1 (i32.const 0))) + (local.set 0x3f3 (i64.load offset=0x3f3 align=1 (i32.const 0))) + (local.set 0x3f4 (i64.load offset=0x3f4 align=1 (i32.const 0))) + (local.set 0x3f5 (i64.load offset=0x3f5 align=1 (i32.const 0))) + (local.set 0x3f6 (i64.load offset=0x3f6 align=1 (i32.const 0))) + (local.set 0x3f7 (i64.load offset=0x3f7 align=1 (i32.const 0))) + (local.set 0x3f8 (i64.load offset=0x3f8 align=1 (i32.const 0))) + (local.set 0x3f9 (i64.load offset=0x3f9 align=1 (i32.const 0))) + (local.set 0x3fa (i64.load offset=0x3fa align=1 (i32.const 0))) + (local.set 0x3fb (i64.load offset=0x3fb align=1 (i32.const 0))) + (local.set 0x3fc (i64.load offset=0x3fc align=1 (i32.const 0))) + (local.set 0x3fd (i64.load offset=0x3fd align=1 (i32.const 0))) + (local.set 0x3fe (i64.load offset=0x3fe align=1 (i32.const 0))) + (local.set 0x3ff (i64.load offset=0x3ff align=1 (i32.const 0))) + (local.set 0x400 (i64.load offset=0x400 align=1 (i32.const 0))) + (local.set 0x401 (i64.load offset=0x401 align=1 (i32.const 0))) + (local.set 0x402 (i64.load offset=0x402 align=1 (i32.const 0))) + (local.set 0x403 (i64.load offset=0x403 align=1 (i32.const 0))) + (local.set 0x404 (i64.load offset=0x404 align=1 (i32.const 0))) + (local.set 0x405 (i64.load offset=0x405 align=1 (i32.const 0))) + (local.set 0x406 (i64.load offset=0x406 align=1 (i32.const 0))) + (local.set 0x407 (i64.load offset=0x407 align=1 (i32.const 0))) + (local.set 0x408 (i64.load offset=0x408 align=1 (i32.const 0))) + (local.set 0x409 (i64.load offset=0x409 align=1 (i32.const 0))) + (local.set 0x40a (i64.load offset=0x40a align=1 (i32.const 0))) + (local.set 0x40b (i64.load offset=0x40b align=1 (i32.const 0))) + (local.set 0x40c (i64.load offset=0x40c align=1 (i32.const 0))) + (local.set 0x40d (i64.load offset=0x40d align=1 (i32.const 0))) + (local.set 0x40e (i64.load offset=0x40e align=1 (i32.const 0))) + (local.set 0x40f (i64.load offset=0x40f align=1 (i32.const 0))) + (local.set 0x410 (i64.load offset=0x410 align=1 (i32.const 0))) + (local.set 0x411 (i64.load offset=0x411 align=1 (i32.const 0))) + (local.set 0x412 (i64.load offset=0x412 align=1 (i32.const 0))) + (local.set 0x413 (i64.load offset=0x413 align=1 (i32.const 0))) + (local.set 0x414 (i64.load offset=0x414 align=1 (i32.const 0))) + (local.set 0x415 (i64.load offset=0x415 align=1 (i32.const 0))) + (local.set 0x416 (i64.load offset=0x416 align=1 (i32.const 0))) + (local.set 0x417 (i64.load offset=0x417 align=1 (i32.const 0))) + (local.set 0x418 (i64.load offset=0x418 align=1 (i32.const 0))) + (local.set 0x419 (i64.load offset=0x419 align=1 (i32.const 0))) + (local.set 0x41a (i64.load offset=0x41a align=1 (i32.const 0))) + (local.set 0x41b (i64.load offset=0x41b align=1 (i32.const 0))) + (local.set 0x41c (i64.load offset=0x41c align=1 (i32.const 0))) + (local.set 0x41d (i64.load offset=0x41d align=1 (i32.const 0))) + (local.set 0x41e (i64.load offset=0x41e align=1 (i32.const 0))) + (local.set 0x41f (i64.load offset=0x41f align=1 (i32.const 0))) + + ;; store the locals back to memory + (i64.store offset=0x000 align=1 (i32.const 0) (local.get 0x000)) + (i64.store offset=0x001 align=1 (i32.const 0) (local.get 0x001)) + (i64.store offset=0x002 align=1 (i32.const 0) (local.get 0x002)) + (i64.store offset=0x003 align=1 (i32.const 0) (local.get 0x003)) + (i64.store offset=0x004 align=1 (i32.const 0) (local.get 0x004)) + (i64.store offset=0x005 align=1 (i32.const 0) (local.get 0x005)) + (i64.store offset=0x006 align=1 (i32.const 0) (local.get 0x006)) + (i64.store offset=0x007 align=1 (i32.const 0) (local.get 0x007)) + (i64.store offset=0x008 align=1 (i32.const 0) (local.get 0x008)) + (i64.store offset=0x009 align=1 (i32.const 0) (local.get 0x009)) + (i64.store offset=0x00a align=1 (i32.const 0) (local.get 0x00a)) + (i64.store offset=0x00b align=1 (i32.const 0) (local.get 0x00b)) + (i64.store offset=0x00c align=1 (i32.const 0) (local.get 0x00c)) + (i64.store offset=0x00d align=1 (i32.const 0) (local.get 0x00d)) + (i64.store offset=0x00e align=1 (i32.const 0) (local.get 0x00e)) + (i64.store offset=0x00f align=1 (i32.const 0) (local.get 0x00f)) + (i64.store offset=0x010 align=1 (i32.const 0) (local.get 0x010)) + (i64.store offset=0x011 align=1 (i32.const 0) (local.get 0x011)) + (i64.store offset=0x012 align=1 (i32.const 0) (local.get 0x012)) + (i64.store offset=0x013 align=1 (i32.const 0) (local.get 0x013)) + (i64.store offset=0x014 align=1 (i32.const 0) (local.get 0x014)) + (i64.store offset=0x015 align=1 (i32.const 0) (local.get 0x015)) + (i64.store offset=0x016 align=1 (i32.const 0) (local.get 0x016)) + (i64.store offset=0x017 align=1 (i32.const 0) (local.get 0x017)) + (i64.store offset=0x018 align=1 (i32.const 0) (local.get 0x018)) + (i64.store offset=0x019 align=1 (i32.const 0) (local.get 0x019)) + (i64.store offset=0x01a align=1 (i32.const 0) (local.get 0x01a)) + (i64.store offset=0x01b align=1 (i32.const 0) (local.get 0x01b)) + (i64.store offset=0x01c align=1 (i32.const 0) (local.get 0x01c)) + (i64.store offset=0x01d align=1 (i32.const 0) (local.get 0x01d)) + (i64.store offset=0x01e align=1 (i32.const 0) (local.get 0x01e)) + (i64.store offset=0x01f align=1 (i32.const 0) (local.get 0x01f)) + (i64.store offset=0x020 align=1 (i32.const 0) (local.get 0x020)) + (i64.store offset=0x021 align=1 (i32.const 0) (local.get 0x021)) + (i64.store offset=0x022 align=1 (i32.const 0) (local.get 0x022)) + (i64.store offset=0x023 align=1 (i32.const 0) (local.get 0x023)) + (i64.store offset=0x024 align=1 (i32.const 0) (local.get 0x024)) + (i64.store offset=0x025 align=1 (i32.const 0) (local.get 0x025)) + (i64.store offset=0x026 align=1 (i32.const 0) (local.get 0x026)) + (i64.store offset=0x027 align=1 (i32.const 0) (local.get 0x027)) + (i64.store offset=0x028 align=1 (i32.const 0) (local.get 0x028)) + (i64.store offset=0x029 align=1 (i32.const 0) (local.get 0x029)) + (i64.store offset=0x02a align=1 (i32.const 0) (local.get 0x02a)) + (i64.store offset=0x02b align=1 (i32.const 0) (local.get 0x02b)) + (i64.store offset=0x02c align=1 (i32.const 0) (local.get 0x02c)) + (i64.store offset=0x02d align=1 (i32.const 0) (local.get 0x02d)) + (i64.store offset=0x02e align=1 (i32.const 0) (local.get 0x02e)) + (i64.store offset=0x02f align=1 (i32.const 0) (local.get 0x02f)) + (i64.store offset=0x030 align=1 (i32.const 0) (local.get 0x030)) + (i64.store offset=0x031 align=1 (i32.const 0) (local.get 0x031)) + (i64.store offset=0x032 align=1 (i32.const 0) (local.get 0x032)) + (i64.store offset=0x033 align=1 (i32.const 0) (local.get 0x033)) + (i64.store offset=0x034 align=1 (i32.const 0) (local.get 0x034)) + (i64.store offset=0x035 align=1 (i32.const 0) (local.get 0x035)) + (i64.store offset=0x036 align=1 (i32.const 0) (local.get 0x036)) + (i64.store offset=0x037 align=1 (i32.const 0) (local.get 0x037)) + (i64.store offset=0x038 align=1 (i32.const 0) (local.get 0x038)) + (i64.store offset=0x039 align=1 (i32.const 0) (local.get 0x039)) + (i64.store offset=0x03a align=1 (i32.const 0) (local.get 0x03a)) + (i64.store offset=0x03b align=1 (i32.const 0) (local.get 0x03b)) + (i64.store offset=0x03c align=1 (i32.const 0) (local.get 0x03c)) + (i64.store offset=0x03d align=1 (i32.const 0) (local.get 0x03d)) + (i64.store offset=0x03e align=1 (i32.const 0) (local.get 0x03e)) + (i64.store offset=0x03f align=1 (i32.const 0) (local.get 0x03f)) + (i64.store offset=0x040 align=1 (i32.const 0) (local.get 0x040)) + (i64.store offset=0x041 align=1 (i32.const 0) (local.get 0x041)) + (i64.store offset=0x042 align=1 (i32.const 0) (local.get 0x042)) + (i64.store offset=0x043 align=1 (i32.const 0) (local.get 0x043)) + (i64.store offset=0x044 align=1 (i32.const 0) (local.get 0x044)) + (i64.store offset=0x045 align=1 (i32.const 0) (local.get 0x045)) + (i64.store offset=0x046 align=1 (i32.const 0) (local.get 0x046)) + (i64.store offset=0x047 align=1 (i32.const 0) (local.get 0x047)) + (i64.store offset=0x048 align=1 (i32.const 0) (local.get 0x048)) + (i64.store offset=0x049 align=1 (i32.const 0) (local.get 0x049)) + (i64.store offset=0x04a align=1 (i32.const 0) (local.get 0x04a)) + (i64.store offset=0x04b align=1 (i32.const 0) (local.get 0x04b)) + (i64.store offset=0x04c align=1 (i32.const 0) (local.get 0x04c)) + (i64.store offset=0x04d align=1 (i32.const 0) (local.get 0x04d)) + (i64.store offset=0x04e align=1 (i32.const 0) (local.get 0x04e)) + (i64.store offset=0x04f align=1 (i32.const 0) (local.get 0x04f)) + (i64.store offset=0x050 align=1 (i32.const 0) (local.get 0x050)) + (i64.store offset=0x051 align=1 (i32.const 0) (local.get 0x051)) + (i64.store offset=0x052 align=1 (i32.const 0) (local.get 0x052)) + (i64.store offset=0x053 align=1 (i32.const 0) (local.get 0x053)) + (i64.store offset=0x054 align=1 (i32.const 0) (local.get 0x054)) + (i64.store offset=0x055 align=1 (i32.const 0) (local.get 0x055)) + (i64.store offset=0x056 align=1 (i32.const 0) (local.get 0x056)) + (i64.store offset=0x057 align=1 (i32.const 0) (local.get 0x057)) + (i64.store offset=0x058 align=1 (i32.const 0) (local.get 0x058)) + (i64.store offset=0x059 align=1 (i32.const 0) (local.get 0x059)) + (i64.store offset=0x05a align=1 (i32.const 0) (local.get 0x05a)) + (i64.store offset=0x05b align=1 (i32.const 0) (local.get 0x05b)) + (i64.store offset=0x05c align=1 (i32.const 0) (local.get 0x05c)) + (i64.store offset=0x05d align=1 (i32.const 0) (local.get 0x05d)) + (i64.store offset=0x05e align=1 (i32.const 0) (local.get 0x05e)) + (i64.store offset=0x05f align=1 (i32.const 0) (local.get 0x05f)) + (i64.store offset=0x060 align=1 (i32.const 0) (local.get 0x060)) + (i64.store offset=0x061 align=1 (i32.const 0) (local.get 0x061)) + (i64.store offset=0x062 align=1 (i32.const 0) (local.get 0x062)) + (i64.store offset=0x063 align=1 (i32.const 0) (local.get 0x063)) + (i64.store offset=0x064 align=1 (i32.const 0) (local.get 0x064)) + (i64.store offset=0x065 align=1 (i32.const 0) (local.get 0x065)) + (i64.store offset=0x066 align=1 (i32.const 0) (local.get 0x066)) + (i64.store offset=0x067 align=1 (i32.const 0) (local.get 0x067)) + (i64.store offset=0x068 align=1 (i32.const 0) (local.get 0x068)) + (i64.store offset=0x069 align=1 (i32.const 0) (local.get 0x069)) + (i64.store offset=0x06a align=1 (i32.const 0) (local.get 0x06a)) + (i64.store offset=0x06b align=1 (i32.const 0) (local.get 0x06b)) + (i64.store offset=0x06c align=1 (i32.const 0) (local.get 0x06c)) + (i64.store offset=0x06d align=1 (i32.const 0) (local.get 0x06d)) + (i64.store offset=0x06e align=1 (i32.const 0) (local.get 0x06e)) + (i64.store offset=0x06f align=1 (i32.const 0) (local.get 0x06f)) + (i64.store offset=0x070 align=1 (i32.const 0) (local.get 0x070)) + (i64.store offset=0x071 align=1 (i32.const 0) (local.get 0x071)) + (i64.store offset=0x072 align=1 (i32.const 0) (local.get 0x072)) + (i64.store offset=0x073 align=1 (i32.const 0) (local.get 0x073)) + (i64.store offset=0x074 align=1 (i32.const 0) (local.get 0x074)) + (i64.store offset=0x075 align=1 (i32.const 0) (local.get 0x075)) + (i64.store offset=0x076 align=1 (i32.const 0) (local.get 0x076)) + (i64.store offset=0x077 align=1 (i32.const 0) (local.get 0x077)) + (i64.store offset=0x078 align=1 (i32.const 0) (local.get 0x078)) + (i64.store offset=0x079 align=1 (i32.const 0) (local.get 0x079)) + (i64.store offset=0x07a align=1 (i32.const 0) (local.get 0x07a)) + (i64.store offset=0x07b align=1 (i32.const 0) (local.get 0x07b)) + (i64.store offset=0x07c align=1 (i32.const 0) (local.get 0x07c)) + (i64.store offset=0x07d align=1 (i32.const 0) (local.get 0x07d)) + (i64.store offset=0x07e align=1 (i32.const 0) (local.get 0x07e)) + (i64.store offset=0x07f align=1 (i32.const 0) (local.get 0x07f)) + (i64.store offset=0x080 align=1 (i32.const 0) (local.get 0x080)) + (i64.store offset=0x081 align=1 (i32.const 0) (local.get 0x081)) + (i64.store offset=0x082 align=1 (i32.const 0) (local.get 0x082)) + (i64.store offset=0x083 align=1 (i32.const 0) (local.get 0x083)) + (i64.store offset=0x084 align=1 (i32.const 0) (local.get 0x084)) + (i64.store offset=0x085 align=1 (i32.const 0) (local.get 0x085)) + (i64.store offset=0x086 align=1 (i32.const 0) (local.get 0x086)) + (i64.store offset=0x087 align=1 (i32.const 0) (local.get 0x087)) + (i64.store offset=0x088 align=1 (i32.const 0) (local.get 0x088)) + (i64.store offset=0x089 align=1 (i32.const 0) (local.get 0x089)) + (i64.store offset=0x08a align=1 (i32.const 0) (local.get 0x08a)) + (i64.store offset=0x08b align=1 (i32.const 0) (local.get 0x08b)) + (i64.store offset=0x08c align=1 (i32.const 0) (local.get 0x08c)) + (i64.store offset=0x08d align=1 (i32.const 0) (local.get 0x08d)) + (i64.store offset=0x08e align=1 (i32.const 0) (local.get 0x08e)) + (i64.store offset=0x08f align=1 (i32.const 0) (local.get 0x08f)) + (i64.store offset=0x090 align=1 (i32.const 0) (local.get 0x090)) + (i64.store offset=0x091 align=1 (i32.const 0) (local.get 0x091)) + (i64.store offset=0x092 align=1 (i32.const 0) (local.get 0x092)) + (i64.store offset=0x093 align=1 (i32.const 0) (local.get 0x093)) + (i64.store offset=0x094 align=1 (i32.const 0) (local.get 0x094)) + (i64.store offset=0x095 align=1 (i32.const 0) (local.get 0x095)) + (i64.store offset=0x096 align=1 (i32.const 0) (local.get 0x096)) + (i64.store offset=0x097 align=1 (i32.const 0) (local.get 0x097)) + (i64.store offset=0x098 align=1 (i32.const 0) (local.get 0x098)) + (i64.store offset=0x099 align=1 (i32.const 0) (local.get 0x099)) + (i64.store offset=0x09a align=1 (i32.const 0) (local.get 0x09a)) + (i64.store offset=0x09b align=1 (i32.const 0) (local.get 0x09b)) + (i64.store offset=0x09c align=1 (i32.const 0) (local.get 0x09c)) + (i64.store offset=0x09d align=1 (i32.const 0) (local.get 0x09d)) + (i64.store offset=0x09e align=1 (i32.const 0) (local.get 0x09e)) + (i64.store offset=0x09f align=1 (i32.const 0) (local.get 0x09f)) + (i64.store offset=0x0a0 align=1 (i32.const 0) (local.get 0x0a0)) + (i64.store offset=0x0a1 align=1 (i32.const 0) (local.get 0x0a1)) + (i64.store offset=0x0a2 align=1 (i32.const 0) (local.get 0x0a2)) + (i64.store offset=0x0a3 align=1 (i32.const 0) (local.get 0x0a3)) + (i64.store offset=0x0a4 align=1 (i32.const 0) (local.get 0x0a4)) + (i64.store offset=0x0a5 align=1 (i32.const 0) (local.get 0x0a5)) + (i64.store offset=0x0a6 align=1 (i32.const 0) (local.get 0x0a6)) + (i64.store offset=0x0a7 align=1 (i32.const 0) (local.get 0x0a7)) + (i64.store offset=0x0a8 align=1 (i32.const 0) (local.get 0x0a8)) + (i64.store offset=0x0a9 align=1 (i32.const 0) (local.get 0x0a9)) + (i64.store offset=0x0aa align=1 (i32.const 0) (local.get 0x0aa)) + (i64.store offset=0x0ab align=1 (i32.const 0) (local.get 0x0ab)) + (i64.store offset=0x0ac align=1 (i32.const 0) (local.get 0x0ac)) + (i64.store offset=0x0ad align=1 (i32.const 0) (local.get 0x0ad)) + (i64.store offset=0x0ae align=1 (i32.const 0) (local.get 0x0ae)) + (i64.store offset=0x0af align=1 (i32.const 0) (local.get 0x0af)) + (i64.store offset=0x0b0 align=1 (i32.const 0) (local.get 0x0b0)) + (i64.store offset=0x0b1 align=1 (i32.const 0) (local.get 0x0b1)) + (i64.store offset=0x0b2 align=1 (i32.const 0) (local.get 0x0b2)) + (i64.store offset=0x0b3 align=1 (i32.const 0) (local.get 0x0b3)) + (i64.store offset=0x0b4 align=1 (i32.const 0) (local.get 0x0b4)) + (i64.store offset=0x0b5 align=1 (i32.const 0) (local.get 0x0b5)) + (i64.store offset=0x0b6 align=1 (i32.const 0) (local.get 0x0b6)) + (i64.store offset=0x0b7 align=1 (i32.const 0) (local.get 0x0b7)) + (i64.store offset=0x0b8 align=1 (i32.const 0) (local.get 0x0b8)) + (i64.store offset=0x0b9 align=1 (i32.const 0) (local.get 0x0b9)) + (i64.store offset=0x0ba align=1 (i32.const 0) (local.get 0x0ba)) + (i64.store offset=0x0bb align=1 (i32.const 0) (local.get 0x0bb)) + (i64.store offset=0x0bc align=1 (i32.const 0) (local.get 0x0bc)) + (i64.store offset=0x0bd align=1 (i32.const 0) (local.get 0x0bd)) + (i64.store offset=0x0be align=1 (i32.const 0) (local.get 0x0be)) + (i64.store offset=0x0bf align=1 (i32.const 0) (local.get 0x0bf)) + (i64.store offset=0x0c0 align=1 (i32.const 0) (local.get 0x0c0)) + (i64.store offset=0x0c1 align=1 (i32.const 0) (local.get 0x0c1)) + (i64.store offset=0x0c2 align=1 (i32.const 0) (local.get 0x0c2)) + (i64.store offset=0x0c3 align=1 (i32.const 0) (local.get 0x0c3)) + (i64.store offset=0x0c4 align=1 (i32.const 0) (local.get 0x0c4)) + (i64.store offset=0x0c5 align=1 (i32.const 0) (local.get 0x0c5)) + (i64.store offset=0x0c6 align=1 (i32.const 0) (local.get 0x0c6)) + (i64.store offset=0x0c7 align=1 (i32.const 0) (local.get 0x0c7)) + (i64.store offset=0x0c8 align=1 (i32.const 0) (local.get 0x0c8)) + (i64.store offset=0x0c9 align=1 (i32.const 0) (local.get 0x0c9)) + (i64.store offset=0x0ca align=1 (i32.const 0) (local.get 0x0ca)) + (i64.store offset=0x0cb align=1 (i32.const 0) (local.get 0x0cb)) + (i64.store offset=0x0cc align=1 (i32.const 0) (local.get 0x0cc)) + (i64.store offset=0x0cd align=1 (i32.const 0) (local.get 0x0cd)) + (i64.store offset=0x0ce align=1 (i32.const 0) (local.get 0x0ce)) + (i64.store offset=0x0cf align=1 (i32.const 0) (local.get 0x0cf)) + (i64.store offset=0x0d0 align=1 (i32.const 0) (local.get 0x0d0)) + (i64.store offset=0x0d1 align=1 (i32.const 0) (local.get 0x0d1)) + (i64.store offset=0x0d2 align=1 (i32.const 0) (local.get 0x0d2)) + (i64.store offset=0x0d3 align=1 (i32.const 0) (local.get 0x0d3)) + (i64.store offset=0x0d4 align=1 (i32.const 0) (local.get 0x0d4)) + (i64.store offset=0x0d5 align=1 (i32.const 0) (local.get 0x0d5)) + (i64.store offset=0x0d6 align=1 (i32.const 0) (local.get 0x0d6)) + (i64.store offset=0x0d7 align=1 (i32.const 0) (local.get 0x0d7)) + (i64.store offset=0x0d8 align=1 (i32.const 0) (local.get 0x0d8)) + (i64.store offset=0x0d9 align=1 (i32.const 0) (local.get 0x0d9)) + (i64.store offset=0x0da align=1 (i32.const 0) (local.get 0x0da)) + (i64.store offset=0x0db align=1 (i32.const 0) (local.get 0x0db)) + (i64.store offset=0x0dc align=1 (i32.const 0) (local.get 0x0dc)) + (i64.store offset=0x0dd align=1 (i32.const 0) (local.get 0x0dd)) + (i64.store offset=0x0de align=1 (i32.const 0) (local.get 0x0de)) + (i64.store offset=0x0df align=1 (i32.const 0) (local.get 0x0df)) + (i64.store offset=0x0e0 align=1 (i32.const 0) (local.get 0x0e0)) + (i64.store offset=0x0e1 align=1 (i32.const 0) (local.get 0x0e1)) + (i64.store offset=0x0e2 align=1 (i32.const 0) (local.get 0x0e2)) + (i64.store offset=0x0e3 align=1 (i32.const 0) (local.get 0x0e3)) + (i64.store offset=0x0e4 align=1 (i32.const 0) (local.get 0x0e4)) + (i64.store offset=0x0e5 align=1 (i32.const 0) (local.get 0x0e5)) + (i64.store offset=0x0e6 align=1 (i32.const 0) (local.get 0x0e6)) + (i64.store offset=0x0e7 align=1 (i32.const 0) (local.get 0x0e7)) + (i64.store offset=0x0e8 align=1 (i32.const 0) (local.get 0x0e8)) + (i64.store offset=0x0e9 align=1 (i32.const 0) (local.get 0x0e9)) + (i64.store offset=0x0ea align=1 (i32.const 0) (local.get 0x0ea)) + (i64.store offset=0x0eb align=1 (i32.const 0) (local.get 0x0eb)) + (i64.store offset=0x0ec align=1 (i32.const 0) (local.get 0x0ec)) + (i64.store offset=0x0ed align=1 (i32.const 0) (local.get 0x0ed)) + (i64.store offset=0x0ee align=1 (i32.const 0) (local.get 0x0ee)) + (i64.store offset=0x0ef align=1 (i32.const 0) (local.get 0x0ef)) + (i64.store offset=0x0f0 align=1 (i32.const 0) (local.get 0x0f0)) + (i64.store offset=0x0f1 align=1 (i32.const 0) (local.get 0x0f1)) + (i64.store offset=0x0f2 align=1 (i32.const 0) (local.get 0x0f2)) + (i64.store offset=0x0f3 align=1 (i32.const 0) (local.get 0x0f3)) + (i64.store offset=0x0f4 align=1 (i32.const 0) (local.get 0x0f4)) + (i64.store offset=0x0f5 align=1 (i32.const 0) (local.get 0x0f5)) + (i64.store offset=0x0f6 align=1 (i32.const 0) (local.get 0x0f6)) + (i64.store offset=0x0f7 align=1 (i32.const 0) (local.get 0x0f7)) + (i64.store offset=0x0f8 align=1 (i32.const 0) (local.get 0x0f8)) + (i64.store offset=0x0f9 align=1 (i32.const 0) (local.get 0x0f9)) + (i64.store offset=0x0fa align=1 (i32.const 0) (local.get 0x0fa)) + (i64.store offset=0x0fb align=1 (i32.const 0) (local.get 0x0fb)) + (i64.store offset=0x0fc align=1 (i32.const 0) (local.get 0x0fc)) + (i64.store offset=0x0fd align=1 (i32.const 0) (local.get 0x0fd)) + (i64.store offset=0x0fe align=1 (i32.const 0) (local.get 0x0fe)) + (i64.store offset=0x0ff align=1 (i32.const 0) (local.get 0x0ff)) + (i64.store offset=0x100 align=1 (i32.const 0) (local.get 0x100)) + (i64.store offset=0x101 align=1 (i32.const 0) (local.get 0x101)) + (i64.store offset=0x102 align=1 (i32.const 0) (local.get 0x102)) + (i64.store offset=0x103 align=1 (i32.const 0) (local.get 0x103)) + (i64.store offset=0x104 align=1 (i32.const 0) (local.get 0x104)) + (i64.store offset=0x105 align=1 (i32.const 0) (local.get 0x105)) + (i64.store offset=0x106 align=1 (i32.const 0) (local.get 0x106)) + (i64.store offset=0x107 align=1 (i32.const 0) (local.get 0x107)) + (i64.store offset=0x108 align=1 (i32.const 0) (local.get 0x108)) + (i64.store offset=0x109 align=1 (i32.const 0) (local.get 0x109)) + (i64.store offset=0x10a align=1 (i32.const 0) (local.get 0x10a)) + (i64.store offset=0x10b align=1 (i32.const 0) (local.get 0x10b)) + (i64.store offset=0x10c align=1 (i32.const 0) (local.get 0x10c)) + (i64.store offset=0x10d align=1 (i32.const 0) (local.get 0x10d)) + (i64.store offset=0x10e align=1 (i32.const 0) (local.get 0x10e)) + (i64.store offset=0x10f align=1 (i32.const 0) (local.get 0x10f)) + (i64.store offset=0x110 align=1 (i32.const 0) (local.get 0x110)) + (i64.store offset=0x111 align=1 (i32.const 0) (local.get 0x111)) + (i64.store offset=0x112 align=1 (i32.const 0) (local.get 0x112)) + (i64.store offset=0x113 align=1 (i32.const 0) (local.get 0x113)) + (i64.store offset=0x114 align=1 (i32.const 0) (local.get 0x114)) + (i64.store offset=0x115 align=1 (i32.const 0) (local.get 0x115)) + (i64.store offset=0x116 align=1 (i32.const 0) (local.get 0x116)) + (i64.store offset=0x117 align=1 (i32.const 0) (local.get 0x117)) + (i64.store offset=0x118 align=1 (i32.const 0) (local.get 0x118)) + (i64.store offset=0x119 align=1 (i32.const 0) (local.get 0x119)) + (i64.store offset=0x11a align=1 (i32.const 0) (local.get 0x11a)) + (i64.store offset=0x11b align=1 (i32.const 0) (local.get 0x11b)) + (i64.store offset=0x11c align=1 (i32.const 0) (local.get 0x11c)) + (i64.store offset=0x11d align=1 (i32.const 0) (local.get 0x11d)) + (i64.store offset=0x11e align=1 (i32.const 0) (local.get 0x11e)) + (i64.store offset=0x11f align=1 (i32.const 0) (local.get 0x11f)) + (i64.store offset=0x120 align=1 (i32.const 0) (local.get 0x120)) + (i64.store offset=0x121 align=1 (i32.const 0) (local.get 0x121)) + (i64.store offset=0x122 align=1 (i32.const 0) (local.get 0x122)) + (i64.store offset=0x123 align=1 (i32.const 0) (local.get 0x123)) + (i64.store offset=0x124 align=1 (i32.const 0) (local.get 0x124)) + (i64.store offset=0x125 align=1 (i32.const 0) (local.get 0x125)) + (i64.store offset=0x126 align=1 (i32.const 0) (local.get 0x126)) + (i64.store offset=0x127 align=1 (i32.const 0) (local.get 0x127)) + (i64.store offset=0x128 align=1 (i32.const 0) (local.get 0x128)) + (i64.store offset=0x129 align=1 (i32.const 0) (local.get 0x129)) + (i64.store offset=0x12a align=1 (i32.const 0) (local.get 0x12a)) + (i64.store offset=0x12b align=1 (i32.const 0) (local.get 0x12b)) + (i64.store offset=0x12c align=1 (i32.const 0) (local.get 0x12c)) + (i64.store offset=0x12d align=1 (i32.const 0) (local.get 0x12d)) + (i64.store offset=0x12e align=1 (i32.const 0) (local.get 0x12e)) + (i64.store offset=0x12f align=1 (i32.const 0) (local.get 0x12f)) + (i64.store offset=0x130 align=1 (i32.const 0) (local.get 0x130)) + (i64.store offset=0x131 align=1 (i32.const 0) (local.get 0x131)) + (i64.store offset=0x132 align=1 (i32.const 0) (local.get 0x132)) + (i64.store offset=0x133 align=1 (i32.const 0) (local.get 0x133)) + (i64.store offset=0x134 align=1 (i32.const 0) (local.get 0x134)) + (i64.store offset=0x135 align=1 (i32.const 0) (local.get 0x135)) + (i64.store offset=0x136 align=1 (i32.const 0) (local.get 0x136)) + (i64.store offset=0x137 align=1 (i32.const 0) (local.get 0x137)) + (i64.store offset=0x138 align=1 (i32.const 0) (local.get 0x138)) + (i64.store offset=0x139 align=1 (i32.const 0) (local.get 0x139)) + (i64.store offset=0x13a align=1 (i32.const 0) (local.get 0x13a)) + (i64.store offset=0x13b align=1 (i32.const 0) (local.get 0x13b)) + (i64.store offset=0x13c align=1 (i32.const 0) (local.get 0x13c)) + (i64.store offset=0x13d align=1 (i32.const 0) (local.get 0x13d)) + (i64.store offset=0x13e align=1 (i32.const 0) (local.get 0x13e)) + (i64.store offset=0x13f align=1 (i32.const 0) (local.get 0x13f)) + (i64.store offset=0x140 align=1 (i32.const 0) (local.get 0x140)) + (i64.store offset=0x141 align=1 (i32.const 0) (local.get 0x141)) + (i64.store offset=0x142 align=1 (i32.const 0) (local.get 0x142)) + (i64.store offset=0x143 align=1 (i32.const 0) (local.get 0x143)) + (i64.store offset=0x144 align=1 (i32.const 0) (local.get 0x144)) + (i64.store offset=0x145 align=1 (i32.const 0) (local.get 0x145)) + (i64.store offset=0x146 align=1 (i32.const 0) (local.get 0x146)) + (i64.store offset=0x147 align=1 (i32.const 0) (local.get 0x147)) + (i64.store offset=0x148 align=1 (i32.const 0) (local.get 0x148)) + (i64.store offset=0x149 align=1 (i32.const 0) (local.get 0x149)) + (i64.store offset=0x14a align=1 (i32.const 0) (local.get 0x14a)) + (i64.store offset=0x14b align=1 (i32.const 0) (local.get 0x14b)) + (i64.store offset=0x14c align=1 (i32.const 0) (local.get 0x14c)) + (i64.store offset=0x14d align=1 (i32.const 0) (local.get 0x14d)) + (i64.store offset=0x14e align=1 (i32.const 0) (local.get 0x14e)) + (i64.store offset=0x14f align=1 (i32.const 0) (local.get 0x14f)) + (i64.store offset=0x150 align=1 (i32.const 0) (local.get 0x150)) + (i64.store offset=0x151 align=1 (i32.const 0) (local.get 0x151)) + (i64.store offset=0x152 align=1 (i32.const 0) (local.get 0x152)) + (i64.store offset=0x153 align=1 (i32.const 0) (local.get 0x153)) + (i64.store offset=0x154 align=1 (i32.const 0) (local.get 0x154)) + (i64.store offset=0x155 align=1 (i32.const 0) (local.get 0x155)) + (i64.store offset=0x156 align=1 (i32.const 0) (local.get 0x156)) + (i64.store offset=0x157 align=1 (i32.const 0) (local.get 0x157)) + (i64.store offset=0x158 align=1 (i32.const 0) (local.get 0x158)) + (i64.store offset=0x159 align=1 (i32.const 0) (local.get 0x159)) + (i64.store offset=0x15a align=1 (i32.const 0) (local.get 0x15a)) + (i64.store offset=0x15b align=1 (i32.const 0) (local.get 0x15b)) + (i64.store offset=0x15c align=1 (i32.const 0) (local.get 0x15c)) + (i64.store offset=0x15d align=1 (i32.const 0) (local.get 0x15d)) + (i64.store offset=0x15e align=1 (i32.const 0) (local.get 0x15e)) + (i64.store offset=0x15f align=1 (i32.const 0) (local.get 0x15f)) + (i64.store offset=0x160 align=1 (i32.const 0) (local.get 0x160)) + (i64.store offset=0x161 align=1 (i32.const 0) (local.get 0x161)) + (i64.store offset=0x162 align=1 (i32.const 0) (local.get 0x162)) + (i64.store offset=0x163 align=1 (i32.const 0) (local.get 0x163)) + (i64.store offset=0x164 align=1 (i32.const 0) (local.get 0x164)) + (i64.store offset=0x165 align=1 (i32.const 0) (local.get 0x165)) + (i64.store offset=0x166 align=1 (i32.const 0) (local.get 0x166)) + (i64.store offset=0x167 align=1 (i32.const 0) (local.get 0x167)) + (i64.store offset=0x168 align=1 (i32.const 0) (local.get 0x168)) + (i64.store offset=0x169 align=1 (i32.const 0) (local.get 0x169)) + (i64.store offset=0x16a align=1 (i32.const 0) (local.get 0x16a)) + (i64.store offset=0x16b align=1 (i32.const 0) (local.get 0x16b)) + (i64.store offset=0x16c align=1 (i32.const 0) (local.get 0x16c)) + (i64.store offset=0x16d align=1 (i32.const 0) (local.get 0x16d)) + (i64.store offset=0x16e align=1 (i32.const 0) (local.get 0x16e)) + (i64.store offset=0x16f align=1 (i32.const 0) (local.get 0x16f)) + (i64.store offset=0x170 align=1 (i32.const 0) (local.get 0x170)) + (i64.store offset=0x171 align=1 (i32.const 0) (local.get 0x171)) + (i64.store offset=0x172 align=1 (i32.const 0) (local.get 0x172)) + (i64.store offset=0x173 align=1 (i32.const 0) (local.get 0x173)) + (i64.store offset=0x174 align=1 (i32.const 0) (local.get 0x174)) + (i64.store offset=0x175 align=1 (i32.const 0) (local.get 0x175)) + (i64.store offset=0x176 align=1 (i32.const 0) (local.get 0x176)) + (i64.store offset=0x177 align=1 (i32.const 0) (local.get 0x177)) + (i64.store offset=0x178 align=1 (i32.const 0) (local.get 0x178)) + (i64.store offset=0x179 align=1 (i32.const 0) (local.get 0x179)) + (i64.store offset=0x17a align=1 (i32.const 0) (local.get 0x17a)) + (i64.store offset=0x17b align=1 (i32.const 0) (local.get 0x17b)) + (i64.store offset=0x17c align=1 (i32.const 0) (local.get 0x17c)) + (i64.store offset=0x17d align=1 (i32.const 0) (local.get 0x17d)) + (i64.store offset=0x17e align=1 (i32.const 0) (local.get 0x17e)) + (i64.store offset=0x17f align=1 (i32.const 0) (local.get 0x17f)) + (i64.store offset=0x180 align=1 (i32.const 0) (local.get 0x180)) + (i64.store offset=0x181 align=1 (i32.const 0) (local.get 0x181)) + (i64.store offset=0x182 align=1 (i32.const 0) (local.get 0x182)) + (i64.store offset=0x183 align=1 (i32.const 0) (local.get 0x183)) + (i64.store offset=0x184 align=1 (i32.const 0) (local.get 0x184)) + (i64.store offset=0x185 align=1 (i32.const 0) (local.get 0x185)) + (i64.store offset=0x186 align=1 (i32.const 0) (local.get 0x186)) + (i64.store offset=0x187 align=1 (i32.const 0) (local.get 0x187)) + (i64.store offset=0x188 align=1 (i32.const 0) (local.get 0x188)) + (i64.store offset=0x189 align=1 (i32.const 0) (local.get 0x189)) + (i64.store offset=0x18a align=1 (i32.const 0) (local.get 0x18a)) + (i64.store offset=0x18b align=1 (i32.const 0) (local.get 0x18b)) + (i64.store offset=0x18c align=1 (i32.const 0) (local.get 0x18c)) + (i64.store offset=0x18d align=1 (i32.const 0) (local.get 0x18d)) + (i64.store offset=0x18e align=1 (i32.const 0) (local.get 0x18e)) + (i64.store offset=0x18f align=1 (i32.const 0) (local.get 0x18f)) + (i64.store offset=0x190 align=1 (i32.const 0) (local.get 0x190)) + (i64.store offset=0x191 align=1 (i32.const 0) (local.get 0x191)) + (i64.store offset=0x192 align=1 (i32.const 0) (local.get 0x192)) + (i64.store offset=0x193 align=1 (i32.const 0) (local.get 0x193)) + (i64.store offset=0x194 align=1 (i32.const 0) (local.get 0x194)) + (i64.store offset=0x195 align=1 (i32.const 0) (local.get 0x195)) + (i64.store offset=0x196 align=1 (i32.const 0) (local.get 0x196)) + (i64.store offset=0x197 align=1 (i32.const 0) (local.get 0x197)) + (i64.store offset=0x198 align=1 (i32.const 0) (local.get 0x198)) + (i64.store offset=0x199 align=1 (i32.const 0) (local.get 0x199)) + (i64.store offset=0x19a align=1 (i32.const 0) (local.get 0x19a)) + (i64.store offset=0x19b align=1 (i32.const 0) (local.get 0x19b)) + (i64.store offset=0x19c align=1 (i32.const 0) (local.get 0x19c)) + (i64.store offset=0x19d align=1 (i32.const 0) (local.get 0x19d)) + (i64.store offset=0x19e align=1 (i32.const 0) (local.get 0x19e)) + (i64.store offset=0x19f align=1 (i32.const 0) (local.get 0x19f)) + (i64.store offset=0x1a0 align=1 (i32.const 0) (local.get 0x1a0)) + (i64.store offset=0x1a1 align=1 (i32.const 0) (local.get 0x1a1)) + (i64.store offset=0x1a2 align=1 (i32.const 0) (local.get 0x1a2)) + (i64.store offset=0x1a3 align=1 (i32.const 0) (local.get 0x1a3)) + (i64.store offset=0x1a4 align=1 (i32.const 0) (local.get 0x1a4)) + (i64.store offset=0x1a5 align=1 (i32.const 0) (local.get 0x1a5)) + (i64.store offset=0x1a6 align=1 (i32.const 0) (local.get 0x1a6)) + (i64.store offset=0x1a7 align=1 (i32.const 0) (local.get 0x1a7)) + (i64.store offset=0x1a8 align=1 (i32.const 0) (local.get 0x1a8)) + (i64.store offset=0x1a9 align=1 (i32.const 0) (local.get 0x1a9)) + (i64.store offset=0x1aa align=1 (i32.const 0) (local.get 0x1aa)) + (i64.store offset=0x1ab align=1 (i32.const 0) (local.get 0x1ab)) + (i64.store offset=0x1ac align=1 (i32.const 0) (local.get 0x1ac)) + (i64.store offset=0x1ad align=1 (i32.const 0) (local.get 0x1ad)) + (i64.store offset=0x1ae align=1 (i32.const 0) (local.get 0x1ae)) + (i64.store offset=0x1af align=1 (i32.const 0) (local.get 0x1af)) + (i64.store offset=0x1b0 align=1 (i32.const 0) (local.get 0x1b0)) + (i64.store offset=0x1b1 align=1 (i32.const 0) (local.get 0x1b1)) + (i64.store offset=0x1b2 align=1 (i32.const 0) (local.get 0x1b2)) + (i64.store offset=0x1b3 align=1 (i32.const 0) (local.get 0x1b3)) + (i64.store offset=0x1b4 align=1 (i32.const 0) (local.get 0x1b4)) + (i64.store offset=0x1b5 align=1 (i32.const 0) (local.get 0x1b5)) + (i64.store offset=0x1b6 align=1 (i32.const 0) (local.get 0x1b6)) + (i64.store offset=0x1b7 align=1 (i32.const 0) (local.get 0x1b7)) + (i64.store offset=0x1b8 align=1 (i32.const 0) (local.get 0x1b8)) + (i64.store offset=0x1b9 align=1 (i32.const 0) (local.get 0x1b9)) + (i64.store offset=0x1ba align=1 (i32.const 0) (local.get 0x1ba)) + (i64.store offset=0x1bb align=1 (i32.const 0) (local.get 0x1bb)) + (i64.store offset=0x1bc align=1 (i32.const 0) (local.get 0x1bc)) + (i64.store offset=0x1bd align=1 (i32.const 0) (local.get 0x1bd)) + (i64.store offset=0x1be align=1 (i32.const 0) (local.get 0x1be)) + (i64.store offset=0x1bf align=1 (i32.const 0) (local.get 0x1bf)) + (i64.store offset=0x1c0 align=1 (i32.const 0) (local.get 0x1c0)) + (i64.store offset=0x1c1 align=1 (i32.const 0) (local.get 0x1c1)) + (i64.store offset=0x1c2 align=1 (i32.const 0) (local.get 0x1c2)) + (i64.store offset=0x1c3 align=1 (i32.const 0) (local.get 0x1c3)) + (i64.store offset=0x1c4 align=1 (i32.const 0) (local.get 0x1c4)) + (i64.store offset=0x1c5 align=1 (i32.const 0) (local.get 0x1c5)) + (i64.store offset=0x1c6 align=1 (i32.const 0) (local.get 0x1c6)) + (i64.store offset=0x1c7 align=1 (i32.const 0) (local.get 0x1c7)) + (i64.store offset=0x1c8 align=1 (i32.const 0) (local.get 0x1c8)) + (i64.store offset=0x1c9 align=1 (i32.const 0) (local.get 0x1c9)) + (i64.store offset=0x1ca align=1 (i32.const 0) (local.get 0x1ca)) + (i64.store offset=0x1cb align=1 (i32.const 0) (local.get 0x1cb)) + (i64.store offset=0x1cc align=1 (i32.const 0) (local.get 0x1cc)) + (i64.store offset=0x1cd align=1 (i32.const 0) (local.get 0x1cd)) + (i64.store offset=0x1ce align=1 (i32.const 0) (local.get 0x1ce)) + (i64.store offset=0x1cf align=1 (i32.const 0) (local.get 0x1cf)) + (i64.store offset=0x1d0 align=1 (i32.const 0) (local.get 0x1d0)) + (i64.store offset=0x1d1 align=1 (i32.const 0) (local.get 0x1d1)) + (i64.store offset=0x1d2 align=1 (i32.const 0) (local.get 0x1d2)) + (i64.store offset=0x1d3 align=1 (i32.const 0) (local.get 0x1d3)) + (i64.store offset=0x1d4 align=1 (i32.const 0) (local.get 0x1d4)) + (i64.store offset=0x1d5 align=1 (i32.const 0) (local.get 0x1d5)) + (i64.store offset=0x1d6 align=1 (i32.const 0) (local.get 0x1d6)) + (i64.store offset=0x1d7 align=1 (i32.const 0) (local.get 0x1d7)) + (i64.store offset=0x1d8 align=1 (i32.const 0) (local.get 0x1d8)) + (i64.store offset=0x1d9 align=1 (i32.const 0) (local.get 0x1d9)) + (i64.store offset=0x1da align=1 (i32.const 0) (local.get 0x1da)) + (i64.store offset=0x1db align=1 (i32.const 0) (local.get 0x1db)) + (i64.store offset=0x1dc align=1 (i32.const 0) (local.get 0x1dc)) + (i64.store offset=0x1dd align=1 (i32.const 0) (local.get 0x1dd)) + (i64.store offset=0x1de align=1 (i32.const 0) (local.get 0x1de)) + (i64.store offset=0x1df align=1 (i32.const 0) (local.get 0x1df)) + (i64.store offset=0x1e0 align=1 (i32.const 0) (local.get 0x1e0)) + (i64.store offset=0x1e1 align=1 (i32.const 0) (local.get 0x1e1)) + (i64.store offset=0x1e2 align=1 (i32.const 0) (local.get 0x1e2)) + (i64.store offset=0x1e3 align=1 (i32.const 0) (local.get 0x1e3)) + (i64.store offset=0x1e4 align=1 (i32.const 0) (local.get 0x1e4)) + (i64.store offset=0x1e5 align=1 (i32.const 0) (local.get 0x1e5)) + (i64.store offset=0x1e6 align=1 (i32.const 0) (local.get 0x1e6)) + (i64.store offset=0x1e7 align=1 (i32.const 0) (local.get 0x1e7)) + (i64.store offset=0x1e8 align=1 (i32.const 0) (local.get 0x1e8)) + (i64.store offset=0x1e9 align=1 (i32.const 0) (local.get 0x1e9)) + (i64.store offset=0x1ea align=1 (i32.const 0) (local.get 0x1ea)) + (i64.store offset=0x1eb align=1 (i32.const 0) (local.get 0x1eb)) + (i64.store offset=0x1ec align=1 (i32.const 0) (local.get 0x1ec)) + (i64.store offset=0x1ed align=1 (i32.const 0) (local.get 0x1ed)) + (i64.store offset=0x1ee align=1 (i32.const 0) (local.get 0x1ee)) + (i64.store offset=0x1ef align=1 (i32.const 0) (local.get 0x1ef)) + (i64.store offset=0x1f0 align=1 (i32.const 0) (local.get 0x1f0)) + (i64.store offset=0x1f1 align=1 (i32.const 0) (local.get 0x1f1)) + (i64.store offset=0x1f2 align=1 (i32.const 0) (local.get 0x1f2)) + (i64.store offset=0x1f3 align=1 (i32.const 0) (local.get 0x1f3)) + (i64.store offset=0x1f4 align=1 (i32.const 0) (local.get 0x1f4)) + (i64.store offset=0x1f5 align=1 (i32.const 0) (local.get 0x1f5)) + (i64.store offset=0x1f6 align=1 (i32.const 0) (local.get 0x1f6)) + (i64.store offset=0x1f7 align=1 (i32.const 0) (local.get 0x1f7)) + (i64.store offset=0x1f8 align=1 (i32.const 0) (local.get 0x1f8)) + (i64.store offset=0x1f9 align=1 (i32.const 0) (local.get 0x1f9)) + (i64.store offset=0x1fa align=1 (i32.const 0) (local.get 0x1fa)) + (i64.store offset=0x1fb align=1 (i32.const 0) (local.get 0x1fb)) + (i64.store offset=0x1fc align=1 (i32.const 0) (local.get 0x1fc)) + (i64.store offset=0x1fd align=1 (i32.const 0) (local.get 0x1fd)) + (i64.store offset=0x1fe align=1 (i32.const 0) (local.get 0x1fe)) + (i64.store offset=0x1ff align=1 (i32.const 0) (local.get 0x1ff)) + (i64.store offset=0x200 align=1 (i32.const 0) (local.get 0x200)) + (i64.store offset=0x201 align=1 (i32.const 0) (local.get 0x201)) + (i64.store offset=0x202 align=1 (i32.const 0) (local.get 0x202)) + (i64.store offset=0x203 align=1 (i32.const 0) (local.get 0x203)) + (i64.store offset=0x204 align=1 (i32.const 0) (local.get 0x204)) + (i64.store offset=0x205 align=1 (i32.const 0) (local.get 0x205)) + (i64.store offset=0x206 align=1 (i32.const 0) (local.get 0x206)) + (i64.store offset=0x207 align=1 (i32.const 0) (local.get 0x207)) + (i64.store offset=0x208 align=1 (i32.const 0) (local.get 0x208)) + (i64.store offset=0x209 align=1 (i32.const 0) (local.get 0x209)) + (i64.store offset=0x20a align=1 (i32.const 0) (local.get 0x20a)) + (i64.store offset=0x20b align=1 (i32.const 0) (local.get 0x20b)) + (i64.store offset=0x20c align=1 (i32.const 0) (local.get 0x20c)) + (i64.store offset=0x20d align=1 (i32.const 0) (local.get 0x20d)) + (i64.store offset=0x20e align=1 (i32.const 0) (local.get 0x20e)) + (i64.store offset=0x20f align=1 (i32.const 0) (local.get 0x20f)) + (i64.store offset=0x210 align=1 (i32.const 0) (local.get 0x210)) + (i64.store offset=0x211 align=1 (i32.const 0) (local.get 0x211)) + (i64.store offset=0x212 align=1 (i32.const 0) (local.get 0x212)) + (i64.store offset=0x213 align=1 (i32.const 0) (local.get 0x213)) + (i64.store offset=0x214 align=1 (i32.const 0) (local.get 0x214)) + (i64.store offset=0x215 align=1 (i32.const 0) (local.get 0x215)) + (i64.store offset=0x216 align=1 (i32.const 0) (local.get 0x216)) + (i64.store offset=0x217 align=1 (i32.const 0) (local.get 0x217)) + (i64.store offset=0x218 align=1 (i32.const 0) (local.get 0x218)) + (i64.store offset=0x219 align=1 (i32.const 0) (local.get 0x219)) + (i64.store offset=0x21a align=1 (i32.const 0) (local.get 0x21a)) + (i64.store offset=0x21b align=1 (i32.const 0) (local.get 0x21b)) + (i64.store offset=0x21c align=1 (i32.const 0) (local.get 0x21c)) + (i64.store offset=0x21d align=1 (i32.const 0) (local.get 0x21d)) + (i64.store offset=0x21e align=1 (i32.const 0) (local.get 0x21e)) + (i64.store offset=0x21f align=1 (i32.const 0) (local.get 0x21f)) + (i64.store offset=0x220 align=1 (i32.const 0) (local.get 0x220)) + (i64.store offset=0x221 align=1 (i32.const 0) (local.get 0x221)) + (i64.store offset=0x222 align=1 (i32.const 0) (local.get 0x222)) + (i64.store offset=0x223 align=1 (i32.const 0) (local.get 0x223)) + (i64.store offset=0x224 align=1 (i32.const 0) (local.get 0x224)) + (i64.store offset=0x225 align=1 (i32.const 0) (local.get 0x225)) + (i64.store offset=0x226 align=1 (i32.const 0) (local.get 0x226)) + (i64.store offset=0x227 align=1 (i32.const 0) (local.get 0x227)) + (i64.store offset=0x228 align=1 (i32.const 0) (local.get 0x228)) + (i64.store offset=0x229 align=1 (i32.const 0) (local.get 0x229)) + (i64.store offset=0x22a align=1 (i32.const 0) (local.get 0x22a)) + (i64.store offset=0x22b align=1 (i32.const 0) (local.get 0x22b)) + (i64.store offset=0x22c align=1 (i32.const 0) (local.get 0x22c)) + (i64.store offset=0x22d align=1 (i32.const 0) (local.get 0x22d)) + (i64.store offset=0x22e align=1 (i32.const 0) (local.get 0x22e)) + (i64.store offset=0x22f align=1 (i32.const 0) (local.get 0x22f)) + (i64.store offset=0x230 align=1 (i32.const 0) (local.get 0x230)) + (i64.store offset=0x231 align=1 (i32.const 0) (local.get 0x231)) + (i64.store offset=0x232 align=1 (i32.const 0) (local.get 0x232)) + (i64.store offset=0x233 align=1 (i32.const 0) (local.get 0x233)) + (i64.store offset=0x234 align=1 (i32.const 0) (local.get 0x234)) + (i64.store offset=0x235 align=1 (i32.const 0) (local.get 0x235)) + (i64.store offset=0x236 align=1 (i32.const 0) (local.get 0x236)) + (i64.store offset=0x237 align=1 (i32.const 0) (local.get 0x237)) + (i64.store offset=0x238 align=1 (i32.const 0) (local.get 0x238)) + (i64.store offset=0x239 align=1 (i32.const 0) (local.get 0x239)) + (i64.store offset=0x23a align=1 (i32.const 0) (local.get 0x23a)) + (i64.store offset=0x23b align=1 (i32.const 0) (local.get 0x23b)) + (i64.store offset=0x23c align=1 (i32.const 0) (local.get 0x23c)) + (i64.store offset=0x23d align=1 (i32.const 0) (local.get 0x23d)) + (i64.store offset=0x23e align=1 (i32.const 0) (local.get 0x23e)) + (i64.store offset=0x23f align=1 (i32.const 0) (local.get 0x23f)) + (i64.store offset=0x240 align=1 (i32.const 0) (local.get 0x240)) + (i64.store offset=0x241 align=1 (i32.const 0) (local.get 0x241)) + (i64.store offset=0x242 align=1 (i32.const 0) (local.get 0x242)) + (i64.store offset=0x243 align=1 (i32.const 0) (local.get 0x243)) + (i64.store offset=0x244 align=1 (i32.const 0) (local.get 0x244)) + (i64.store offset=0x245 align=1 (i32.const 0) (local.get 0x245)) + (i64.store offset=0x246 align=1 (i32.const 0) (local.get 0x246)) + (i64.store offset=0x247 align=1 (i32.const 0) (local.get 0x247)) + (i64.store offset=0x248 align=1 (i32.const 0) (local.get 0x248)) + (i64.store offset=0x249 align=1 (i32.const 0) (local.get 0x249)) + (i64.store offset=0x24a align=1 (i32.const 0) (local.get 0x24a)) + (i64.store offset=0x24b align=1 (i32.const 0) (local.get 0x24b)) + (i64.store offset=0x24c align=1 (i32.const 0) (local.get 0x24c)) + (i64.store offset=0x24d align=1 (i32.const 0) (local.get 0x24d)) + (i64.store offset=0x24e align=1 (i32.const 0) (local.get 0x24e)) + (i64.store offset=0x24f align=1 (i32.const 0) (local.get 0x24f)) + (i64.store offset=0x250 align=1 (i32.const 0) (local.get 0x250)) + (i64.store offset=0x251 align=1 (i32.const 0) (local.get 0x251)) + (i64.store offset=0x252 align=1 (i32.const 0) (local.get 0x252)) + (i64.store offset=0x253 align=1 (i32.const 0) (local.get 0x253)) + (i64.store offset=0x254 align=1 (i32.const 0) (local.get 0x254)) + (i64.store offset=0x255 align=1 (i32.const 0) (local.get 0x255)) + (i64.store offset=0x256 align=1 (i32.const 0) (local.get 0x256)) + (i64.store offset=0x257 align=1 (i32.const 0) (local.get 0x257)) + (i64.store offset=0x258 align=1 (i32.const 0) (local.get 0x258)) + (i64.store offset=0x259 align=1 (i32.const 0) (local.get 0x259)) + (i64.store offset=0x25a align=1 (i32.const 0) (local.get 0x25a)) + (i64.store offset=0x25b align=1 (i32.const 0) (local.get 0x25b)) + (i64.store offset=0x25c align=1 (i32.const 0) (local.get 0x25c)) + (i64.store offset=0x25d align=1 (i32.const 0) (local.get 0x25d)) + (i64.store offset=0x25e align=1 (i32.const 0) (local.get 0x25e)) + (i64.store offset=0x25f align=1 (i32.const 0) (local.get 0x25f)) + (i64.store offset=0x260 align=1 (i32.const 0) (local.get 0x260)) + (i64.store offset=0x261 align=1 (i32.const 0) (local.get 0x261)) + (i64.store offset=0x262 align=1 (i32.const 0) (local.get 0x262)) + (i64.store offset=0x263 align=1 (i32.const 0) (local.get 0x263)) + (i64.store offset=0x264 align=1 (i32.const 0) (local.get 0x264)) + (i64.store offset=0x265 align=1 (i32.const 0) (local.get 0x265)) + (i64.store offset=0x266 align=1 (i32.const 0) (local.get 0x266)) + (i64.store offset=0x267 align=1 (i32.const 0) (local.get 0x267)) + (i64.store offset=0x268 align=1 (i32.const 0) (local.get 0x268)) + (i64.store offset=0x269 align=1 (i32.const 0) (local.get 0x269)) + (i64.store offset=0x26a align=1 (i32.const 0) (local.get 0x26a)) + (i64.store offset=0x26b align=1 (i32.const 0) (local.get 0x26b)) + (i64.store offset=0x26c align=1 (i32.const 0) (local.get 0x26c)) + (i64.store offset=0x26d align=1 (i32.const 0) (local.get 0x26d)) + (i64.store offset=0x26e align=1 (i32.const 0) (local.get 0x26e)) + (i64.store offset=0x26f align=1 (i32.const 0) (local.get 0x26f)) + (i64.store offset=0x270 align=1 (i32.const 0) (local.get 0x270)) + (i64.store offset=0x271 align=1 (i32.const 0) (local.get 0x271)) + (i64.store offset=0x272 align=1 (i32.const 0) (local.get 0x272)) + (i64.store offset=0x273 align=1 (i32.const 0) (local.get 0x273)) + (i64.store offset=0x274 align=1 (i32.const 0) (local.get 0x274)) + (i64.store offset=0x275 align=1 (i32.const 0) (local.get 0x275)) + (i64.store offset=0x276 align=1 (i32.const 0) (local.get 0x276)) + (i64.store offset=0x277 align=1 (i32.const 0) (local.get 0x277)) + (i64.store offset=0x278 align=1 (i32.const 0) (local.get 0x278)) + (i64.store offset=0x279 align=1 (i32.const 0) (local.get 0x279)) + (i64.store offset=0x27a align=1 (i32.const 0) (local.get 0x27a)) + (i64.store offset=0x27b align=1 (i32.const 0) (local.get 0x27b)) + (i64.store offset=0x27c align=1 (i32.const 0) (local.get 0x27c)) + (i64.store offset=0x27d align=1 (i32.const 0) (local.get 0x27d)) + (i64.store offset=0x27e align=1 (i32.const 0) (local.get 0x27e)) + (i64.store offset=0x27f align=1 (i32.const 0) (local.get 0x27f)) + (i64.store offset=0x280 align=1 (i32.const 0) (local.get 0x280)) + (i64.store offset=0x281 align=1 (i32.const 0) (local.get 0x281)) + (i64.store offset=0x282 align=1 (i32.const 0) (local.get 0x282)) + (i64.store offset=0x283 align=1 (i32.const 0) (local.get 0x283)) + (i64.store offset=0x284 align=1 (i32.const 0) (local.get 0x284)) + (i64.store offset=0x285 align=1 (i32.const 0) (local.get 0x285)) + (i64.store offset=0x286 align=1 (i32.const 0) (local.get 0x286)) + (i64.store offset=0x287 align=1 (i32.const 0) (local.get 0x287)) + (i64.store offset=0x288 align=1 (i32.const 0) (local.get 0x288)) + (i64.store offset=0x289 align=1 (i32.const 0) (local.get 0x289)) + (i64.store offset=0x28a align=1 (i32.const 0) (local.get 0x28a)) + (i64.store offset=0x28b align=1 (i32.const 0) (local.get 0x28b)) + (i64.store offset=0x28c align=1 (i32.const 0) (local.get 0x28c)) + (i64.store offset=0x28d align=1 (i32.const 0) (local.get 0x28d)) + (i64.store offset=0x28e align=1 (i32.const 0) (local.get 0x28e)) + (i64.store offset=0x28f align=1 (i32.const 0) (local.get 0x28f)) + (i64.store offset=0x290 align=1 (i32.const 0) (local.get 0x290)) + (i64.store offset=0x291 align=1 (i32.const 0) (local.get 0x291)) + (i64.store offset=0x292 align=1 (i32.const 0) (local.get 0x292)) + (i64.store offset=0x293 align=1 (i32.const 0) (local.get 0x293)) + (i64.store offset=0x294 align=1 (i32.const 0) (local.get 0x294)) + (i64.store offset=0x295 align=1 (i32.const 0) (local.get 0x295)) + (i64.store offset=0x296 align=1 (i32.const 0) (local.get 0x296)) + (i64.store offset=0x297 align=1 (i32.const 0) (local.get 0x297)) + (i64.store offset=0x298 align=1 (i32.const 0) (local.get 0x298)) + (i64.store offset=0x299 align=1 (i32.const 0) (local.get 0x299)) + (i64.store offset=0x29a align=1 (i32.const 0) (local.get 0x29a)) + (i64.store offset=0x29b align=1 (i32.const 0) (local.get 0x29b)) + (i64.store offset=0x29c align=1 (i32.const 0) (local.get 0x29c)) + (i64.store offset=0x29d align=1 (i32.const 0) (local.get 0x29d)) + (i64.store offset=0x29e align=1 (i32.const 0) (local.get 0x29e)) + (i64.store offset=0x29f align=1 (i32.const 0) (local.get 0x29f)) + (i64.store offset=0x2a0 align=1 (i32.const 0) (local.get 0x2a0)) + (i64.store offset=0x2a1 align=1 (i32.const 0) (local.get 0x2a1)) + (i64.store offset=0x2a2 align=1 (i32.const 0) (local.get 0x2a2)) + (i64.store offset=0x2a3 align=1 (i32.const 0) (local.get 0x2a3)) + (i64.store offset=0x2a4 align=1 (i32.const 0) (local.get 0x2a4)) + (i64.store offset=0x2a5 align=1 (i32.const 0) (local.get 0x2a5)) + (i64.store offset=0x2a6 align=1 (i32.const 0) (local.get 0x2a6)) + (i64.store offset=0x2a7 align=1 (i32.const 0) (local.get 0x2a7)) + (i64.store offset=0x2a8 align=1 (i32.const 0) (local.get 0x2a8)) + (i64.store offset=0x2a9 align=1 (i32.const 0) (local.get 0x2a9)) + (i64.store offset=0x2aa align=1 (i32.const 0) (local.get 0x2aa)) + (i64.store offset=0x2ab align=1 (i32.const 0) (local.get 0x2ab)) + (i64.store offset=0x2ac align=1 (i32.const 0) (local.get 0x2ac)) + (i64.store offset=0x2ad align=1 (i32.const 0) (local.get 0x2ad)) + (i64.store offset=0x2ae align=1 (i32.const 0) (local.get 0x2ae)) + (i64.store offset=0x2af align=1 (i32.const 0) (local.get 0x2af)) + (i64.store offset=0x2b0 align=1 (i32.const 0) (local.get 0x2b0)) + (i64.store offset=0x2b1 align=1 (i32.const 0) (local.get 0x2b1)) + (i64.store offset=0x2b2 align=1 (i32.const 0) (local.get 0x2b2)) + (i64.store offset=0x2b3 align=1 (i32.const 0) (local.get 0x2b3)) + (i64.store offset=0x2b4 align=1 (i32.const 0) (local.get 0x2b4)) + (i64.store offset=0x2b5 align=1 (i32.const 0) (local.get 0x2b5)) + (i64.store offset=0x2b6 align=1 (i32.const 0) (local.get 0x2b6)) + (i64.store offset=0x2b7 align=1 (i32.const 0) (local.get 0x2b7)) + (i64.store offset=0x2b8 align=1 (i32.const 0) (local.get 0x2b8)) + (i64.store offset=0x2b9 align=1 (i32.const 0) (local.get 0x2b9)) + (i64.store offset=0x2ba align=1 (i32.const 0) (local.get 0x2ba)) + (i64.store offset=0x2bb align=1 (i32.const 0) (local.get 0x2bb)) + (i64.store offset=0x2bc align=1 (i32.const 0) (local.get 0x2bc)) + (i64.store offset=0x2bd align=1 (i32.const 0) (local.get 0x2bd)) + (i64.store offset=0x2be align=1 (i32.const 0) (local.get 0x2be)) + (i64.store offset=0x2bf align=1 (i32.const 0) (local.get 0x2bf)) + (i64.store offset=0x2c0 align=1 (i32.const 0) (local.get 0x2c0)) + (i64.store offset=0x2c1 align=1 (i32.const 0) (local.get 0x2c1)) + (i64.store offset=0x2c2 align=1 (i32.const 0) (local.get 0x2c2)) + (i64.store offset=0x2c3 align=1 (i32.const 0) (local.get 0x2c3)) + (i64.store offset=0x2c4 align=1 (i32.const 0) (local.get 0x2c4)) + (i64.store offset=0x2c5 align=1 (i32.const 0) (local.get 0x2c5)) + (i64.store offset=0x2c6 align=1 (i32.const 0) (local.get 0x2c6)) + (i64.store offset=0x2c7 align=1 (i32.const 0) (local.get 0x2c7)) + (i64.store offset=0x2c8 align=1 (i32.const 0) (local.get 0x2c8)) + (i64.store offset=0x2c9 align=1 (i32.const 0) (local.get 0x2c9)) + (i64.store offset=0x2ca align=1 (i32.const 0) (local.get 0x2ca)) + (i64.store offset=0x2cb align=1 (i32.const 0) (local.get 0x2cb)) + (i64.store offset=0x2cc align=1 (i32.const 0) (local.get 0x2cc)) + (i64.store offset=0x2cd align=1 (i32.const 0) (local.get 0x2cd)) + (i64.store offset=0x2ce align=1 (i32.const 0) (local.get 0x2ce)) + (i64.store offset=0x2cf align=1 (i32.const 0) (local.get 0x2cf)) + (i64.store offset=0x2d0 align=1 (i32.const 0) (local.get 0x2d0)) + (i64.store offset=0x2d1 align=1 (i32.const 0) (local.get 0x2d1)) + (i64.store offset=0x2d2 align=1 (i32.const 0) (local.get 0x2d2)) + (i64.store offset=0x2d3 align=1 (i32.const 0) (local.get 0x2d3)) + (i64.store offset=0x2d4 align=1 (i32.const 0) (local.get 0x2d4)) + (i64.store offset=0x2d5 align=1 (i32.const 0) (local.get 0x2d5)) + (i64.store offset=0x2d6 align=1 (i32.const 0) (local.get 0x2d6)) + (i64.store offset=0x2d7 align=1 (i32.const 0) (local.get 0x2d7)) + (i64.store offset=0x2d8 align=1 (i32.const 0) (local.get 0x2d8)) + (i64.store offset=0x2d9 align=1 (i32.const 0) (local.get 0x2d9)) + (i64.store offset=0x2da align=1 (i32.const 0) (local.get 0x2da)) + (i64.store offset=0x2db align=1 (i32.const 0) (local.get 0x2db)) + (i64.store offset=0x2dc align=1 (i32.const 0) (local.get 0x2dc)) + (i64.store offset=0x2dd align=1 (i32.const 0) (local.get 0x2dd)) + (i64.store offset=0x2de align=1 (i32.const 0) (local.get 0x2de)) + (i64.store offset=0x2df align=1 (i32.const 0) (local.get 0x2df)) + (i64.store offset=0x2e0 align=1 (i32.const 0) (local.get 0x2e0)) + (i64.store offset=0x2e1 align=1 (i32.const 0) (local.get 0x2e1)) + (i64.store offset=0x2e2 align=1 (i32.const 0) (local.get 0x2e2)) + (i64.store offset=0x2e3 align=1 (i32.const 0) (local.get 0x2e3)) + (i64.store offset=0x2e4 align=1 (i32.const 0) (local.get 0x2e4)) + (i64.store offset=0x2e5 align=1 (i32.const 0) (local.get 0x2e5)) + (i64.store offset=0x2e6 align=1 (i32.const 0) (local.get 0x2e6)) + (i64.store offset=0x2e7 align=1 (i32.const 0) (local.get 0x2e7)) + (i64.store offset=0x2e8 align=1 (i32.const 0) (local.get 0x2e8)) + (i64.store offset=0x2e9 align=1 (i32.const 0) (local.get 0x2e9)) + (i64.store offset=0x2ea align=1 (i32.const 0) (local.get 0x2ea)) + (i64.store offset=0x2eb align=1 (i32.const 0) (local.get 0x2eb)) + (i64.store offset=0x2ec align=1 (i32.const 0) (local.get 0x2ec)) + (i64.store offset=0x2ed align=1 (i32.const 0) (local.get 0x2ed)) + (i64.store offset=0x2ee align=1 (i32.const 0) (local.get 0x2ee)) + (i64.store offset=0x2ef align=1 (i32.const 0) (local.get 0x2ef)) + (i64.store offset=0x2f0 align=1 (i32.const 0) (local.get 0x2f0)) + (i64.store offset=0x2f1 align=1 (i32.const 0) (local.get 0x2f1)) + (i64.store offset=0x2f2 align=1 (i32.const 0) (local.get 0x2f2)) + (i64.store offset=0x2f3 align=1 (i32.const 0) (local.get 0x2f3)) + (i64.store offset=0x2f4 align=1 (i32.const 0) (local.get 0x2f4)) + (i64.store offset=0x2f5 align=1 (i32.const 0) (local.get 0x2f5)) + (i64.store offset=0x2f6 align=1 (i32.const 0) (local.get 0x2f6)) + (i64.store offset=0x2f7 align=1 (i32.const 0) (local.get 0x2f7)) + (i64.store offset=0x2f8 align=1 (i32.const 0) (local.get 0x2f8)) + (i64.store offset=0x2f9 align=1 (i32.const 0) (local.get 0x2f9)) + (i64.store offset=0x2fa align=1 (i32.const 0) (local.get 0x2fa)) + (i64.store offset=0x2fb align=1 (i32.const 0) (local.get 0x2fb)) + (i64.store offset=0x2fc align=1 (i32.const 0) (local.get 0x2fc)) + (i64.store offset=0x2fd align=1 (i32.const 0) (local.get 0x2fd)) + (i64.store offset=0x2fe align=1 (i32.const 0) (local.get 0x2fe)) + (i64.store offset=0x2ff align=1 (i32.const 0) (local.get 0x2ff)) + (i64.store offset=0x300 align=1 (i32.const 0) (local.get 0x300)) + (i64.store offset=0x301 align=1 (i32.const 0) (local.get 0x301)) + (i64.store offset=0x302 align=1 (i32.const 0) (local.get 0x302)) + (i64.store offset=0x303 align=1 (i32.const 0) (local.get 0x303)) + (i64.store offset=0x304 align=1 (i32.const 0) (local.get 0x304)) + (i64.store offset=0x305 align=1 (i32.const 0) (local.get 0x305)) + (i64.store offset=0x306 align=1 (i32.const 0) (local.get 0x306)) + (i64.store offset=0x307 align=1 (i32.const 0) (local.get 0x307)) + (i64.store offset=0x308 align=1 (i32.const 0) (local.get 0x308)) + (i64.store offset=0x309 align=1 (i32.const 0) (local.get 0x309)) + (i64.store offset=0x30a align=1 (i32.const 0) (local.get 0x30a)) + (i64.store offset=0x30b align=1 (i32.const 0) (local.get 0x30b)) + (i64.store offset=0x30c align=1 (i32.const 0) (local.get 0x30c)) + (i64.store offset=0x30d align=1 (i32.const 0) (local.get 0x30d)) + (i64.store offset=0x30e align=1 (i32.const 0) (local.get 0x30e)) + (i64.store offset=0x30f align=1 (i32.const 0) (local.get 0x30f)) + (i64.store offset=0x310 align=1 (i32.const 0) (local.get 0x310)) + (i64.store offset=0x311 align=1 (i32.const 0) (local.get 0x311)) + (i64.store offset=0x312 align=1 (i32.const 0) (local.get 0x312)) + (i64.store offset=0x313 align=1 (i32.const 0) (local.get 0x313)) + (i64.store offset=0x314 align=1 (i32.const 0) (local.get 0x314)) + (i64.store offset=0x315 align=1 (i32.const 0) (local.get 0x315)) + (i64.store offset=0x316 align=1 (i32.const 0) (local.get 0x316)) + (i64.store offset=0x317 align=1 (i32.const 0) (local.get 0x317)) + (i64.store offset=0x318 align=1 (i32.const 0) (local.get 0x318)) + (i64.store offset=0x319 align=1 (i32.const 0) (local.get 0x319)) + (i64.store offset=0x31a align=1 (i32.const 0) (local.get 0x31a)) + (i64.store offset=0x31b align=1 (i32.const 0) (local.get 0x31b)) + (i64.store offset=0x31c align=1 (i32.const 0) (local.get 0x31c)) + (i64.store offset=0x31d align=1 (i32.const 0) (local.get 0x31d)) + (i64.store offset=0x31e align=1 (i32.const 0) (local.get 0x31e)) + (i64.store offset=0x31f align=1 (i32.const 0) (local.get 0x31f)) + (i64.store offset=0x320 align=1 (i32.const 0) (local.get 0x320)) + (i64.store offset=0x321 align=1 (i32.const 0) (local.get 0x321)) + (i64.store offset=0x322 align=1 (i32.const 0) (local.get 0x322)) + (i64.store offset=0x323 align=1 (i32.const 0) (local.get 0x323)) + (i64.store offset=0x324 align=1 (i32.const 0) (local.get 0x324)) + (i64.store offset=0x325 align=1 (i32.const 0) (local.get 0x325)) + (i64.store offset=0x326 align=1 (i32.const 0) (local.get 0x326)) + (i64.store offset=0x327 align=1 (i32.const 0) (local.get 0x327)) + (i64.store offset=0x328 align=1 (i32.const 0) (local.get 0x328)) + (i64.store offset=0x329 align=1 (i32.const 0) (local.get 0x329)) + (i64.store offset=0x32a align=1 (i32.const 0) (local.get 0x32a)) + (i64.store offset=0x32b align=1 (i32.const 0) (local.get 0x32b)) + (i64.store offset=0x32c align=1 (i32.const 0) (local.get 0x32c)) + (i64.store offset=0x32d align=1 (i32.const 0) (local.get 0x32d)) + (i64.store offset=0x32e align=1 (i32.const 0) (local.get 0x32e)) + (i64.store offset=0x32f align=1 (i32.const 0) (local.get 0x32f)) + (i64.store offset=0x330 align=1 (i32.const 0) (local.get 0x330)) + (i64.store offset=0x331 align=1 (i32.const 0) (local.get 0x331)) + (i64.store offset=0x332 align=1 (i32.const 0) (local.get 0x332)) + (i64.store offset=0x333 align=1 (i32.const 0) (local.get 0x333)) + (i64.store offset=0x334 align=1 (i32.const 0) (local.get 0x334)) + (i64.store offset=0x335 align=1 (i32.const 0) (local.get 0x335)) + (i64.store offset=0x336 align=1 (i32.const 0) (local.get 0x336)) + (i64.store offset=0x337 align=1 (i32.const 0) (local.get 0x337)) + (i64.store offset=0x338 align=1 (i32.const 0) (local.get 0x338)) + (i64.store offset=0x339 align=1 (i32.const 0) (local.get 0x339)) + (i64.store offset=0x33a align=1 (i32.const 0) (local.get 0x33a)) + (i64.store offset=0x33b align=1 (i32.const 0) (local.get 0x33b)) + (i64.store offset=0x33c align=1 (i32.const 0) (local.get 0x33c)) + (i64.store offset=0x33d align=1 (i32.const 0) (local.get 0x33d)) + (i64.store offset=0x33e align=1 (i32.const 0) (local.get 0x33e)) + (i64.store offset=0x33f align=1 (i32.const 0) (local.get 0x33f)) + (i64.store offset=0x340 align=1 (i32.const 0) (local.get 0x340)) + (i64.store offset=0x341 align=1 (i32.const 0) (local.get 0x341)) + (i64.store offset=0x342 align=1 (i32.const 0) (local.get 0x342)) + (i64.store offset=0x343 align=1 (i32.const 0) (local.get 0x343)) + (i64.store offset=0x344 align=1 (i32.const 0) (local.get 0x344)) + (i64.store offset=0x345 align=1 (i32.const 0) (local.get 0x345)) + (i64.store offset=0x346 align=1 (i32.const 0) (local.get 0x346)) + (i64.store offset=0x347 align=1 (i32.const 0) (local.get 0x347)) + (i64.store offset=0x348 align=1 (i32.const 0) (local.get 0x348)) + (i64.store offset=0x349 align=1 (i32.const 0) (local.get 0x349)) + (i64.store offset=0x34a align=1 (i32.const 0) (local.get 0x34a)) + (i64.store offset=0x34b align=1 (i32.const 0) (local.get 0x34b)) + (i64.store offset=0x34c align=1 (i32.const 0) (local.get 0x34c)) + (i64.store offset=0x34d align=1 (i32.const 0) (local.get 0x34d)) + (i64.store offset=0x34e align=1 (i32.const 0) (local.get 0x34e)) + (i64.store offset=0x34f align=1 (i32.const 0) (local.get 0x34f)) + (i64.store offset=0x350 align=1 (i32.const 0) (local.get 0x350)) + (i64.store offset=0x351 align=1 (i32.const 0) (local.get 0x351)) + (i64.store offset=0x352 align=1 (i32.const 0) (local.get 0x352)) + (i64.store offset=0x353 align=1 (i32.const 0) (local.get 0x353)) + (i64.store offset=0x354 align=1 (i32.const 0) (local.get 0x354)) + (i64.store offset=0x355 align=1 (i32.const 0) (local.get 0x355)) + (i64.store offset=0x356 align=1 (i32.const 0) (local.get 0x356)) + (i64.store offset=0x357 align=1 (i32.const 0) (local.get 0x357)) + (i64.store offset=0x358 align=1 (i32.const 0) (local.get 0x358)) + (i64.store offset=0x359 align=1 (i32.const 0) (local.get 0x359)) + (i64.store offset=0x35a align=1 (i32.const 0) (local.get 0x35a)) + (i64.store offset=0x35b align=1 (i32.const 0) (local.get 0x35b)) + (i64.store offset=0x35c align=1 (i32.const 0) (local.get 0x35c)) + (i64.store offset=0x35d align=1 (i32.const 0) (local.get 0x35d)) + (i64.store offset=0x35e align=1 (i32.const 0) (local.get 0x35e)) + (i64.store offset=0x35f align=1 (i32.const 0) (local.get 0x35f)) + (i64.store offset=0x360 align=1 (i32.const 0) (local.get 0x360)) + (i64.store offset=0x361 align=1 (i32.const 0) (local.get 0x361)) + (i64.store offset=0x362 align=1 (i32.const 0) (local.get 0x362)) + (i64.store offset=0x363 align=1 (i32.const 0) (local.get 0x363)) + (i64.store offset=0x364 align=1 (i32.const 0) (local.get 0x364)) + (i64.store offset=0x365 align=1 (i32.const 0) (local.get 0x365)) + (i64.store offset=0x366 align=1 (i32.const 0) (local.get 0x366)) + (i64.store offset=0x367 align=1 (i32.const 0) (local.get 0x367)) + (i64.store offset=0x368 align=1 (i32.const 0) (local.get 0x368)) + (i64.store offset=0x369 align=1 (i32.const 0) (local.get 0x369)) + (i64.store offset=0x36a align=1 (i32.const 0) (local.get 0x36a)) + (i64.store offset=0x36b align=1 (i32.const 0) (local.get 0x36b)) + (i64.store offset=0x36c align=1 (i32.const 0) (local.get 0x36c)) + (i64.store offset=0x36d align=1 (i32.const 0) (local.get 0x36d)) + (i64.store offset=0x36e align=1 (i32.const 0) (local.get 0x36e)) + (i64.store offset=0x36f align=1 (i32.const 0) (local.get 0x36f)) + (i64.store offset=0x370 align=1 (i32.const 0) (local.get 0x370)) + (i64.store offset=0x371 align=1 (i32.const 0) (local.get 0x371)) + (i64.store offset=0x372 align=1 (i32.const 0) (local.get 0x372)) + (i64.store offset=0x373 align=1 (i32.const 0) (local.get 0x373)) + (i64.store offset=0x374 align=1 (i32.const 0) (local.get 0x374)) + (i64.store offset=0x375 align=1 (i32.const 0) (local.get 0x375)) + (i64.store offset=0x376 align=1 (i32.const 0) (local.get 0x376)) + (i64.store offset=0x377 align=1 (i32.const 0) (local.get 0x377)) + (i64.store offset=0x378 align=1 (i32.const 0) (local.get 0x378)) + (i64.store offset=0x379 align=1 (i32.const 0) (local.get 0x379)) + (i64.store offset=0x37a align=1 (i32.const 0) (local.get 0x37a)) + (i64.store offset=0x37b align=1 (i32.const 0) (local.get 0x37b)) + (i64.store offset=0x37c align=1 (i32.const 0) (local.get 0x37c)) + (i64.store offset=0x37d align=1 (i32.const 0) (local.get 0x37d)) + (i64.store offset=0x37e align=1 (i32.const 0) (local.get 0x37e)) + (i64.store offset=0x37f align=1 (i32.const 0) (local.get 0x37f)) + (i64.store offset=0x380 align=1 (i32.const 0) (local.get 0x380)) + (i64.store offset=0x381 align=1 (i32.const 0) (local.get 0x381)) + (i64.store offset=0x382 align=1 (i32.const 0) (local.get 0x382)) + (i64.store offset=0x383 align=1 (i32.const 0) (local.get 0x383)) + (i64.store offset=0x384 align=1 (i32.const 0) (local.get 0x384)) + (i64.store offset=0x385 align=1 (i32.const 0) (local.get 0x385)) + (i64.store offset=0x386 align=1 (i32.const 0) (local.get 0x386)) + (i64.store offset=0x387 align=1 (i32.const 0) (local.get 0x387)) + (i64.store offset=0x388 align=1 (i32.const 0) (local.get 0x388)) + (i64.store offset=0x389 align=1 (i32.const 0) (local.get 0x389)) + (i64.store offset=0x38a align=1 (i32.const 0) (local.get 0x38a)) + (i64.store offset=0x38b align=1 (i32.const 0) (local.get 0x38b)) + (i64.store offset=0x38c align=1 (i32.const 0) (local.get 0x38c)) + (i64.store offset=0x38d align=1 (i32.const 0) (local.get 0x38d)) + (i64.store offset=0x38e align=1 (i32.const 0) (local.get 0x38e)) + (i64.store offset=0x38f align=1 (i32.const 0) (local.get 0x38f)) + (i64.store offset=0x390 align=1 (i32.const 0) (local.get 0x390)) + (i64.store offset=0x391 align=1 (i32.const 0) (local.get 0x391)) + (i64.store offset=0x392 align=1 (i32.const 0) (local.get 0x392)) + (i64.store offset=0x393 align=1 (i32.const 0) (local.get 0x393)) + (i64.store offset=0x394 align=1 (i32.const 0) (local.get 0x394)) + (i64.store offset=0x395 align=1 (i32.const 0) (local.get 0x395)) + (i64.store offset=0x396 align=1 (i32.const 0) (local.get 0x396)) + (i64.store offset=0x397 align=1 (i32.const 0) (local.get 0x397)) + (i64.store offset=0x398 align=1 (i32.const 0) (local.get 0x398)) + (i64.store offset=0x399 align=1 (i32.const 0) (local.get 0x399)) + (i64.store offset=0x39a align=1 (i32.const 0) (local.get 0x39a)) + (i64.store offset=0x39b align=1 (i32.const 0) (local.get 0x39b)) + (i64.store offset=0x39c align=1 (i32.const 0) (local.get 0x39c)) + (i64.store offset=0x39d align=1 (i32.const 0) (local.get 0x39d)) + (i64.store offset=0x39e align=1 (i32.const 0) (local.get 0x39e)) + (i64.store offset=0x39f align=1 (i32.const 0) (local.get 0x39f)) + (i64.store offset=0x3a0 align=1 (i32.const 0) (local.get 0x3a0)) + (i64.store offset=0x3a1 align=1 (i32.const 0) (local.get 0x3a1)) + (i64.store offset=0x3a2 align=1 (i32.const 0) (local.get 0x3a2)) + (i64.store offset=0x3a3 align=1 (i32.const 0) (local.get 0x3a3)) + (i64.store offset=0x3a4 align=1 (i32.const 0) (local.get 0x3a4)) + (i64.store offset=0x3a5 align=1 (i32.const 0) (local.get 0x3a5)) + (i64.store offset=0x3a6 align=1 (i32.const 0) (local.get 0x3a6)) + (i64.store offset=0x3a7 align=1 (i32.const 0) (local.get 0x3a7)) + (i64.store offset=0x3a8 align=1 (i32.const 0) (local.get 0x3a8)) + (i64.store offset=0x3a9 align=1 (i32.const 0) (local.get 0x3a9)) + (i64.store offset=0x3aa align=1 (i32.const 0) (local.get 0x3aa)) + (i64.store offset=0x3ab align=1 (i32.const 0) (local.get 0x3ab)) + (i64.store offset=0x3ac align=1 (i32.const 0) (local.get 0x3ac)) + (i64.store offset=0x3ad align=1 (i32.const 0) (local.get 0x3ad)) + (i64.store offset=0x3ae align=1 (i32.const 0) (local.get 0x3ae)) + (i64.store offset=0x3af align=1 (i32.const 0) (local.get 0x3af)) + (i64.store offset=0x3b0 align=1 (i32.const 0) (local.get 0x3b0)) + (i64.store offset=0x3b1 align=1 (i32.const 0) (local.get 0x3b1)) + (i64.store offset=0x3b2 align=1 (i32.const 0) (local.get 0x3b2)) + (i64.store offset=0x3b3 align=1 (i32.const 0) (local.get 0x3b3)) + (i64.store offset=0x3b4 align=1 (i32.const 0) (local.get 0x3b4)) + (i64.store offset=0x3b5 align=1 (i32.const 0) (local.get 0x3b5)) + (i64.store offset=0x3b6 align=1 (i32.const 0) (local.get 0x3b6)) + (i64.store offset=0x3b7 align=1 (i32.const 0) (local.get 0x3b7)) + (i64.store offset=0x3b8 align=1 (i32.const 0) (local.get 0x3b8)) + (i64.store offset=0x3b9 align=1 (i32.const 0) (local.get 0x3b9)) + (i64.store offset=0x3ba align=1 (i32.const 0) (local.get 0x3ba)) + (i64.store offset=0x3bb align=1 (i32.const 0) (local.get 0x3bb)) + (i64.store offset=0x3bc align=1 (i32.const 0) (local.get 0x3bc)) + (i64.store offset=0x3bd align=1 (i32.const 0) (local.get 0x3bd)) + (i64.store offset=0x3be align=1 (i32.const 0) (local.get 0x3be)) + (i64.store offset=0x3bf align=1 (i32.const 0) (local.get 0x3bf)) + (i64.store offset=0x3c0 align=1 (i32.const 0) (local.get 0x3c0)) + (i64.store offset=0x3c1 align=1 (i32.const 0) (local.get 0x3c1)) + (i64.store offset=0x3c2 align=1 (i32.const 0) (local.get 0x3c2)) + (i64.store offset=0x3c3 align=1 (i32.const 0) (local.get 0x3c3)) + (i64.store offset=0x3c4 align=1 (i32.const 0) (local.get 0x3c4)) + (i64.store offset=0x3c5 align=1 (i32.const 0) (local.get 0x3c5)) + (i64.store offset=0x3c6 align=1 (i32.const 0) (local.get 0x3c6)) + (i64.store offset=0x3c7 align=1 (i32.const 0) (local.get 0x3c7)) + (i64.store offset=0x3c8 align=1 (i32.const 0) (local.get 0x3c8)) + (i64.store offset=0x3c9 align=1 (i32.const 0) (local.get 0x3c9)) + (i64.store offset=0x3ca align=1 (i32.const 0) (local.get 0x3ca)) + (i64.store offset=0x3cb align=1 (i32.const 0) (local.get 0x3cb)) + (i64.store offset=0x3cc align=1 (i32.const 0) (local.get 0x3cc)) + (i64.store offset=0x3cd align=1 (i32.const 0) (local.get 0x3cd)) + (i64.store offset=0x3ce align=1 (i32.const 0) (local.get 0x3ce)) + (i64.store offset=0x3cf align=1 (i32.const 0) (local.get 0x3cf)) + (i64.store offset=0x3d0 align=1 (i32.const 0) (local.get 0x3d0)) + (i64.store offset=0x3d1 align=1 (i32.const 0) (local.get 0x3d1)) + (i64.store offset=0x3d2 align=1 (i32.const 0) (local.get 0x3d2)) + (i64.store offset=0x3d3 align=1 (i32.const 0) (local.get 0x3d3)) + (i64.store offset=0x3d4 align=1 (i32.const 0) (local.get 0x3d4)) + (i64.store offset=0x3d5 align=1 (i32.const 0) (local.get 0x3d5)) + (i64.store offset=0x3d6 align=1 (i32.const 0) (local.get 0x3d6)) + (i64.store offset=0x3d7 align=1 (i32.const 0) (local.get 0x3d7)) + (i64.store offset=0x3d8 align=1 (i32.const 0) (local.get 0x3d8)) + (i64.store offset=0x3d9 align=1 (i32.const 0) (local.get 0x3d9)) + (i64.store offset=0x3da align=1 (i32.const 0) (local.get 0x3da)) + (i64.store offset=0x3db align=1 (i32.const 0) (local.get 0x3db)) + (i64.store offset=0x3dc align=1 (i32.const 0) (local.get 0x3dc)) + (i64.store offset=0x3dd align=1 (i32.const 0) (local.get 0x3dd)) + (i64.store offset=0x3de align=1 (i32.const 0) (local.get 0x3de)) + (i64.store offset=0x3df align=1 (i32.const 0) (local.get 0x3df)) + (i64.store offset=0x3e0 align=1 (i32.const 0) (local.get 0x3e0)) + (i64.store offset=0x3e1 align=1 (i32.const 0) (local.get 0x3e1)) + (i64.store offset=0x3e2 align=1 (i32.const 0) (local.get 0x3e2)) + (i64.store offset=0x3e3 align=1 (i32.const 0) (local.get 0x3e3)) + (i64.store offset=0x3e4 align=1 (i32.const 0) (local.get 0x3e4)) + (i64.store offset=0x3e5 align=1 (i32.const 0) (local.get 0x3e5)) + (i64.store offset=0x3e6 align=1 (i32.const 0) (local.get 0x3e6)) + (i64.store offset=0x3e7 align=1 (i32.const 0) (local.get 0x3e7)) + (i64.store offset=0x3e8 align=1 (i32.const 0) (local.get 0x3e8)) + (i64.store offset=0x3e9 align=1 (i32.const 0) (local.get 0x3e9)) + (i64.store offset=0x3ea align=1 (i32.const 0) (local.get 0x3ea)) + (i64.store offset=0x3eb align=1 (i32.const 0) (local.get 0x3eb)) + (i64.store offset=0x3ec align=1 (i32.const 0) (local.get 0x3ec)) + (i64.store offset=0x3ed align=1 (i32.const 0) (local.get 0x3ed)) + (i64.store offset=0x3ee align=1 (i32.const 0) (local.get 0x3ee)) + (i64.store offset=0x3ef align=1 (i32.const 0) (local.get 0x3ef)) + (i64.store offset=0x3f0 align=1 (i32.const 0) (local.get 0x3f0)) + (i64.store offset=0x3f1 align=1 (i32.const 0) (local.get 0x3f1)) + (i64.store offset=0x3f2 align=1 (i32.const 0) (local.get 0x3f2)) + (i64.store offset=0x3f3 align=1 (i32.const 0) (local.get 0x3f3)) + (i64.store offset=0x3f4 align=1 (i32.const 0) (local.get 0x3f4)) + (i64.store offset=0x3f5 align=1 (i32.const 0) (local.get 0x3f5)) + (i64.store offset=0x3f6 align=1 (i32.const 0) (local.get 0x3f6)) + (i64.store offset=0x3f7 align=1 (i32.const 0) (local.get 0x3f7)) + (i64.store offset=0x3f8 align=1 (i32.const 0) (local.get 0x3f8)) + (i64.store offset=0x3f9 align=1 (i32.const 0) (local.get 0x3f9)) + (i64.store offset=0x3fa align=1 (i32.const 0) (local.get 0x3fa)) + (i64.store offset=0x3fb align=1 (i32.const 0) (local.get 0x3fb)) + (i64.store offset=0x3fc align=1 (i32.const 0) (local.get 0x3fc)) + (i64.store offset=0x3fd align=1 (i32.const 0) (local.get 0x3fd)) + (i64.store offset=0x3fe align=1 (i32.const 0) (local.get 0x3fe)) + (i64.store offset=0x3ff align=1 (i32.const 0) (local.get 0x3ff)) + (i64.store offset=0x400 align=1 (i32.const 0) (local.get 0x400)) + (i64.store offset=0x401 align=1 (i32.const 0) (local.get 0x401)) + (i64.store offset=0x402 align=1 (i32.const 0) (local.get 0x402)) + (i64.store offset=0x403 align=1 (i32.const 0) (local.get 0x403)) + (i64.store offset=0x404 align=1 (i32.const 0) (local.get 0x404)) + (i64.store offset=0x405 align=1 (i32.const 0) (local.get 0x405)) + (i64.store offset=0x406 align=1 (i32.const 0) (local.get 0x406)) + (i64.store offset=0x407 align=1 (i32.const 0) (local.get 0x407)) + (i64.store offset=0x408 align=1 (i32.const 0) (local.get 0x408)) + (i64.store offset=0x409 align=1 (i32.const 0) (local.get 0x409)) + (i64.store offset=0x40a align=1 (i32.const 0) (local.get 0x40a)) + (i64.store offset=0x40b align=1 (i32.const 0) (local.get 0x40b)) + (i64.store offset=0x40c align=1 (i32.const 0) (local.get 0x40c)) + (i64.store offset=0x40d align=1 (i32.const 0) (local.get 0x40d)) + (i64.store offset=0x40e align=1 (i32.const 0) (local.get 0x40e)) + (i64.store offset=0x40f align=1 (i32.const 0) (local.get 0x40f)) + (i64.store offset=0x410 align=1 (i32.const 0) (local.get 0x410)) + (i64.store offset=0x411 align=1 (i32.const 0) (local.get 0x411)) + (i64.store offset=0x412 align=1 (i32.const 0) (local.get 0x412)) + (i64.store offset=0x413 align=1 (i32.const 0) (local.get 0x413)) + (i64.store offset=0x414 align=1 (i32.const 0) (local.get 0x414)) + (i64.store offset=0x415 align=1 (i32.const 0) (local.get 0x415)) + (i64.store offset=0x416 align=1 (i32.const 0) (local.get 0x416)) + (i64.store offset=0x417 align=1 (i32.const 0) (local.get 0x417)) + (i64.store offset=0x418 align=1 (i32.const 0) (local.get 0x418)) + (i64.store offset=0x419 align=1 (i32.const 0) (local.get 0x419)) + (i64.store offset=0x41a align=1 (i32.const 0) (local.get 0x41a)) + (i64.store offset=0x41b align=1 (i32.const 0) (local.get 0x41b)) + (i64.store offset=0x41c align=1 (i32.const 0) (local.get 0x41c)) + (i64.store offset=0x41d align=1 (i32.const 0) (local.get 0x41d)) + (i64.store offset=0x41e align=1 (i32.const 0) (local.get 0x41e)) + (i64.store offset=0x41f align=1 (i32.const 0) (local.get 0x41f)) + ) +) diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs new file mode 100644 index 0000000000000..366352d7f5c39 --- /dev/null +++ b/client/executor/wasmtime/src/tests.rs @@ -0,0 +1,309 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use codec::{Decode as _, Encode as _}; +use sc_executor_common::{runtime_blob::RuntimeBlob, wasm_runtime::WasmModule}; +use sc_runtime_test::wasm_binary_unwrap; +use std::sync::Arc; + +type HostFunctions = sp_io::SubstrateHostFunctions; + +struct RuntimeBuilder { + code: Option<&'static str>, + fast_instance_reuse: bool, + canonicalize_nans: bool, + deterministic_stack: bool, + heap_pages: u32, + max_memory_pages: Option, +} + +impl RuntimeBuilder { + /// Returns a new builder that won't use the fast instance reuse mechanism, but instead will + /// create a new runtime instance each time. + fn new_on_demand() -> Self { + Self { + code: None, + fast_instance_reuse: false, + canonicalize_nans: false, + deterministic_stack: false, + heap_pages: 1024, + max_memory_pages: None, + } + } + + fn use_wat(&mut self, code: &'static str) { + self.code = Some(code); + } + + fn canonicalize_nans(&mut self, canonicalize_nans: bool) { + self.canonicalize_nans = canonicalize_nans; + } + + fn deterministic_stack(&mut self, deterministic_stack: bool) { + self.deterministic_stack = deterministic_stack; + } + + fn max_memory_pages(&mut self, max_memory_pages: Option) { + self.max_memory_pages = max_memory_pages; + } + + fn build(self) -> Arc { + let blob = { + let wasm: Vec; + + let wasm = match self.code { + None => wasm_binary_unwrap(), + Some(wat) => { + wasm = wat::parse_str(wat).expect("wat parsing failed"); + &wasm + }, + }; + + RuntimeBlob::uncompress_if_needed(&wasm) + .expect("failed to create a runtime blob out of test runtime") + }; + + let rt = crate::create_runtime( + blob, + crate::Config { + heap_pages: self.heap_pages, + max_memory_pages: self.max_memory_pages, + allow_missing_func_imports: true, + cache_path: None, + semantics: crate::Semantics { + fast_instance_reuse: self.fast_instance_reuse, + deterministic_stack_limit: match self.deterministic_stack { + true => Some(crate::DeterministicStackLimit { + logical_max: 65536, + native_stack_max: 256 * 1024 * 1024, + }), + false => None, + }, + canonicalize_nans: self.canonicalize_nans, + }, + }, + { + use sp_wasm_interface::HostFunctions as _; + HostFunctions::host_functions() + }, + ) + .expect("cannot create runtime"); + + Arc::new(rt) as Arc + } +} + +#[test] +fn test_nan_canonicalization() { + let runtime = { + let mut builder = RuntimeBuilder::new_on_demand(); + builder.canonicalize_nans(true); + builder.build() + }; + + let instance = runtime.new_instance().expect("failed to instantiate a runtime"); + + /// A NaN with canonical payload bits. + const CANONICAL_NAN_BITS: u32 = 0x7fc00000; + /// A NaN value with an abitrary payload. + const ARBITRARY_NAN_BITS: u32 = 0x7f812345; + + // This test works like this: we essentially do + // + // a + b + // + // where + // + // * a is a nan with arbitrary bits in its payload + // * b is 1. + // + // according to the wasm spec, if one of the inputs to the operation is a non-canonical NaN + // then the value be a NaN with non-deterministic payload bits. + // + // However, with the `canonicalize_nans` option turned on above, we expect that the output will + // be a canonical NaN. + // + // We exterpolate the results of this tests so that we assume that all intermediate computations + // that involve floats are sanitized and cannot produce a non-deterministic NaN. + + let params = (u32::to_le_bytes(ARBITRARY_NAN_BITS), u32::to_le_bytes(1)).encode(); + let res = { + let raw_result = instance.call_export("test_fp_f32add", ¶ms).unwrap(); + u32::from_le_bytes(<[u8; 4]>::decode(&mut &raw_result[..]).unwrap()) + }; + assert_eq!(res, CANONICAL_NAN_BITS); +} + +#[test] +fn test_stack_depth_reaching() { + const TEST_GUARD_PAGE_SKIP: &str = include_str!("test-guard-page-skip.wat"); + + let runtime = { + let mut builder = RuntimeBuilder::new_on_demand(); + builder.use_wat(TEST_GUARD_PAGE_SKIP); + builder.deterministic_stack(true); + builder.build() + }; + let instance = runtime.new_instance().expect("failed to instantiate a runtime"); + + let err = instance.call_export("test-many-locals", &[]).unwrap_err(); + + assert!( + format!("{:?}", err).starts_with("Other(\"Wasm execution trapped: wasm trap: unreachable") + ); +} + +#[test] +fn test_max_memory_pages() { + fn try_instantiate( + max_memory_pages: Option, + wat: &'static str, + ) -> Result<(), Box> { + let runtime = { + let mut builder = RuntimeBuilder::new_on_demand(); + builder.use_wat(wat); + builder.max_memory_pages(max_memory_pages); + builder.build() + }; + let instance = runtime.new_instance()?; + let _ = instance.call_export("main", &[])?; + Ok(()) + } + + // check the old behavior if preserved. That is, if no limit is set we allow 4 GiB of memory. + try_instantiate( + None, + r#" + (module + ;; we want to allocate the maximum number of pages supported in wasm for this test. + ;; + ;; However, due to a bug in wasmtime (I think wasmi is also affected) it is only possible + ;; to allocate 65536 - 1 pages. + ;; + ;; Then, during creation of the Substrate Runtime instance, 1024 (heap_pages) pages are + ;; mounted. + ;; + ;; Thus 65535 = 64511 + 1024 + (import "env" "memory" (memory 64511)) + + (global (export "__heap_base") i32 (i32.const 0)) + (func (export "main") + (param i32 i32) (result i64) + (i64.const 0) + ) + ) + "#, + ) + .unwrap(); + + // max is not specified, therefore it's implied to be 65536 pages (4 GiB). + // + // max_memory_pages = 1 (initial) + 1024 (heap_pages) + try_instantiate( + Some(1 + 1024), + r#" + (module + + (import "env" "memory" (memory 1)) ;; <- 1 initial, max is not specified + + (global (export "__heap_base") i32 (i32.const 0)) + (func (export "main") + (param i32 i32) (result i64) + (i64.const 0) + ) + ) + "#, + ) + .unwrap(); + + // max is specified explicitly to 2048 pages. + try_instantiate( + Some(1 + 1024), + r#" + (module + + (import "env" "memory" (memory 1 2048)) ;; <- max is 2048 + + (global (export "__heap_base") i32 (i32.const 0)) + (func (export "main") + (param i32 i32) (result i64) + (i64.const 0) + ) + ) + "#, + ) + .unwrap(); + + // memory grow should work as long as it doesn't exceed 1025 pages in total. + try_instantiate( + Some(0 + 1024 + 25), + r#" + (module + (import "env" "memory" (memory 0)) ;; <- zero starting pages. + + (global (export "__heap_base") i32 (i32.const 0)) + (func (export "main") + (param i32 i32) (result i64) + + ;; assert(memory.grow returns != -1) + (if + (i32.eq + (memory.grow + (i32.const 25) + ) + (i32.const -1) + ) + (unreachable) + ) + + (i64.const 0) + ) + ) + "#, + ) + .unwrap(); + + // We start with 1025 pages and try to grow at least one. + try_instantiate( + Some(1 + 1024), + r#" + (module + (import "env" "memory" (memory 1)) ;; <- initial=1, meaning after heap pages mount the + ;; total will be already 1025 + (global (export "__heap_base") i32 (i32.const 0)) + (func (export "main") + (param i32 i32) (result i64) + + ;; assert(memory.grow returns == -1) + (if + (i32.ne + (memory.grow + (i32.const 1) + ) + (i32.const -1) + ) + (unreachable) + ) + + (i64.const 0) + ) + ) + "#, + ) + .unwrap(); +} diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index d2de95d4cc715..2c135fe7a343b 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -1,28 +1,43 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . + +use sp_wasm_interface::Value; -use std::ops::Range; +/// Converts a [`wasmtime::Val`] into a substrate runtime interface [`Value`]. +/// +/// Panics if the given value doesn't have a corresponding variant in `Value`. +pub fn from_wasmtime_val(val: wasmtime::Val) -> Value { + match val { + wasmtime::Val::I32(v) => Value::I32(v), + wasmtime::Val::I64(v) => Value::I64(v), + wasmtime::Val::F32(f_bits) => Value::F32(f_bits), + wasmtime::Val::F64(f_bits) => Value::F64(f_bits), + v => panic!("Given value type is unsupported by Substrate: {:?}", v), + } +} -/// Construct a range from an offset to a data length after the offset. -/// Returns None if the end of the range would exceed some maximum offset. -pub fn checked_range(offset: usize, len: usize, max: usize) -> Option> { - let end = offset.checked_add(len)?; - if end <= max { - Some(offset..end) - } else { - None +/// Converts a sp_wasm_interface's [`Value`] into the corresponding variant in wasmtime's +/// [`wasmtime::Val`]. +pub fn into_wasmtime_val(value: Value) -> wasmtime::Val { + match value { + Value::I32(v) => wasmtime::Val::I32(v), + Value::I64(v) => wasmtime::Val::I64(v), + Value::F32(f_bits) => wasmtime::Val::F32(f_bits), + Value::F64(f_bits) => wasmtime::Val::F64(f_bits), } } diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml new file mode 100644 index 0000000000000..a444125fdfa11 --- /dev/null +++ b/client/finality-grandpa-warp-sync/Cargo.toml @@ -0,0 +1,35 @@ +[package] +description = "A request-response protocol for handling grandpa warp sync requests" +name = "sc-finality-grandpa-warp-sync" +version = "0.10.0-dev" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +derive_more = "0.99.11" +futures = "0.3.8" +log = "0.4.11" +prost = "0.8" +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-finality-grandpa = { version = "0.10.0-dev", path = "../finality-grandpa" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sc-service = { version = "0.10.0-dev", path = "../service" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } + +[dev-dependencies] +finality-grandpa = { version = "0.14.4" } +rand = "0.8" +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } +sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 8966f5e8f657a..7fdd91e557ab7 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -16,46 +16,46 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -fork-tree = { version = "2.0.0", path = "../../utils/fork-tree" } -futures = "0.3.4" +dyn-clone = "1.0" +fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } +futures = "0.3.9" futures-timer = "3.0.1" log = "0.4.8" -parking_lot = "0.10.0" -rand = "0.7.2" -parity-scale-codec = { version = "1.3.4", features = ["derive"] } -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } -sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0", path = "../../client/consensus/common" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sc-keystore = { version = "2.0.0", path = "../keystore" } -serde_json = "1.0.41" -sc-client-api = { version = "2.0.0", path = "../api" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0", path = "../network" } -sc-network-gossip = { version = "0.8.0", path = "../network-gossip" } -sp-finality-grandpa = { version = "2.0.0", path = "../../primitives/finality-grandpa" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -sc-block-builder = { version = "0.8.0", path = "../block-builder" } -finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } -pin-project = "0.4.6" +parking_lot = "0.11.1" +rand = "0.8.4" +parity-scale-codec = { version = "2.0.0", features = ["derive"] } +sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } +sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } +sc-keystore = { version = "4.0.0-dev", path = "../keystore" } +serde_json = "1.0.68" +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } +sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0" } +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +finality-grandpa = { version = "0.14.4", features = ["derive-codec"] } +async-trait = "0.1.50" [dev-dependencies] assert_matches = "1.3.0" -finality-grandpa = { version = "0.12.3", features = ["derive-codec", "test-helpers"] } -sc-network = { version = "0.8.0", path = "../network" } +finality-grandpa = { version = "0.14.1", features = [ + "derive-codec", + "test-helpers", +] } +sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-test = { version = "0.8.0", path = "../network/test" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0", path = "../../primitives/consensus/babe" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -tokio = { version = "0.2", features = ["rt-core"] } +sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } +tokio = "1.10" tempfile = "3.1.0" -sp-api = { version = "2.0.0", path = "../../primitives/api" } diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index c0c2ea8b27d88..d2976ee71275f 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa-rpc" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "RPC extensions for the GRANDPA finality gadget" repository = "https://github.com/paritytech/substrate/" @@ -9,31 +9,30 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" readme = "README.md" [dependencies] -sc-finality-grandpa = { version = "0.8.0", path = "../" } -sc-rpc = { version = "2.0.0", path = "../../rpc" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -jsonrpc-pubsub = "15.0.0" -futures = { version = "0.3.4", features = ["compat"] } +sc-finality-grandpa = { version = "0.10.0-dev", path = "../" } +sc-rpc = { version = "4.0.0-dev", path = "../../rpc" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +finality-grandpa = { version = "0.14.4", features = ["derive-codec"] } +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" +jsonrpc-pubsub = "18.0.0" +futures = "0.3.16" serde = { version = "1.0.105", features = ["derive"] } serde_json = "1.0.50" log = "0.4.8" derive_more = "0.99.2" -parity-scale-codec = { version = "1.3.0", features = ["derive"] } -sc-client-api = { version = "2.0.0", path = "../../api" } +parity-scale-codec = { version = "2.0.0", features = ["derive"] } +sc-client-api = { version = "4.0.0-dev", path = "../../api" } [dev-dependencies] -sc-block-builder = { version = "0.8.0", path = "../../block-builder" } -sc-network-test = { version = "0.8.0", path = "../../network/test" } -sc-rpc = { version = "2.0.0", path = "../../rpc", features = ["test-helpers"] } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0", path = "../../../primitives/finality-grandpa" } -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -lazy_static = "1.4" +sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } +sc-rpc = { version = "4.0.0-dev", path = "../../rpc", features = [ + "test-helpers", +] } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } +sp-keyring = { version = "4.0.0-dev", path = "../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/finality-grandpa/rpc/src/error.rs b/client/finality-grandpa/rpc/src/error.rs index 6464acbe10ea0..c812b78f3fd8e 100644 --- a/client/finality-grandpa/rpc/src/error.rs +++ b/client/finality-grandpa/rpc/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -30,7 +30,7 @@ pub enum Error { VoterStateReportsUnreasonablyLargeNumbers, /// GRANDPA prove finality failed. #[display(fmt = "GRANDPA prove finality rpc failed: {}", _0)] - ProveFinalityFailed(sp_blockchain::Error), + ProveFinalityFailed(sc_finality_grandpa::FinalityProofError), } /// The error codes returned by jsonrpc. diff --git a/client/finality-grandpa/rpc/src/finality.rs b/client/finality-grandpa/rpc/src/finality.rs index 1f288b86a0e46..62e3502fc7180 100644 --- a/client/finality-grandpa/rpc/src/finality.rs +++ b/client/finality-grandpa/rpc/src/finality.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,24 +16,22 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use sc_finality_grandpa::FinalityProofProvider; use sp_runtime::traits::{Block as BlockT, NumberFor}; #[derive(Serialize, Deserialize)] -pub struct EncodedFinalityProofs(pub sp_core::Bytes); +pub struct EncodedFinalityProof(pub sp_core::Bytes); /// Local trait mainly to allow mocking in tests. pub trait RpcFinalityProofProvider { - /// Return finality proofs for the given authorities set id, if it is provided, otherwise the - /// current one will be used. + /// Prove finality for the given block number by returning a Justification for the last block of + /// the authority set. fn rpc_prove_finality( &self, - begin: Block::Hash, - end: Block::Hash, - authorities_set_id: u64, - ) -> Result, sp_blockchain::Error>; + block: NumberFor, + ) -> Result, sc_finality_grandpa::FinalityProofError>; } impl RpcFinalityProofProvider for FinalityProofProvider @@ -44,11 +42,8 @@ where { fn rpc_prove_finality( &self, - begin: Block::Hash, - end: Block::Hash, - authorities_set_id: u64, - ) -> Result, sp_blockchain::Error> { - self.prove_finality(begin, end, authorities_set_id) - .map(|x| x.map(|y| EncodedFinalityProofs(y.into()))) + block: NumberFor, + ) -> Result, sc_finality_grandpa::FinalityProofError> { + self.prove_finality(block).map(|x| x.map(|y| EncodedFinalityProof(y.into()))) } } diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index 172473ad6518b..b8b8b2d956463 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,17 +19,11 @@ //! RPC API for GRANDPA. #![warn(missing_docs)] -use std::sync::Arc; -use futures::{FutureExt, TryFutureExt, TryStreamExt, StreamExt}; -use log::warn; +use futures::{task::Spawn, FutureExt, SinkExt, StreamExt, TryFutureExt}; use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; -use jsonrpc_core::futures::{ - sink::Sink as Sink01, - stream::Stream as Stream01, - future::Future as Future01, - future::Executor as Executor01, -}; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use log::warn; +use std::sync::Arc; mod error; mod finality; @@ -37,18 +31,17 @@ mod notification; mod report; use sc_finality_grandpa::GrandpaJustificationStream; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, NumberFor}; -use finality::{EncodedFinalityProofs, RpcFinalityProofProvider}; -use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; +use finality::{EncodedFinalityProof, RpcFinalityProofProvider}; use notification::JustificationNotification; +use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; -type FutureResult = - Box + Send>; +type FutureResult = jsonrpc_core::BoxFuture>; /// Provides RPC methods for interacting with GRANDPA. #[rpc] -pub trait GrandpaApi { +pub trait GrandpaApi { /// RPC Metadata type Metadata; @@ -67,7 +60,7 @@ pub trait GrandpaApi { fn subscribe_justifications( &self, metadata: Self::Metadata, - subscriber: Subscriber + subscriber: Subscriber, ); /// Unsubscribe from receiving notifications about recently finalized blocks. @@ -79,18 +72,13 @@ pub trait GrandpaApi { fn unsubscribe_justifications( &self, metadata: Option, - id: SubscriptionId + id: SubscriptionId, ) -> jsonrpc_core::Result; - /// Prove finality for the range (begin; end] hash. Returns None if there are no finalized blocks - /// unknown in the range. If no authorities set is provided, the current one will be attempted. + /// Prove finality for the given block number by returning the Justification for the last block + /// in the set and all the intermediary headers to link them together. #[rpc(name = "grandpa_proveFinality")] - fn prove_finality( - &self, - begin: Hash, - end: Hash, - authorities_set_id: Option, - ) -> FutureResult>; + fn prove_finality(&self, block: Number) -> FutureResult>; } /// Implements the GrandpaApi RPC trait for interacting with GRANDPA. @@ -114,20 +102,15 @@ impl finality_proof_provider: Arc, ) -> Self where - E: Executor01 + Send>> + Send + Sync + 'static, + E: Spawn + Sync + Send + 'static, { let manager = SubscriptionManager::new(Arc::new(executor)); - Self { - authority_set, - voter_state, - justification_stream, - manager, - finality_proof_provider, - } + Self { authority_set, voter_state, justification_stream, manager, finality_proof_provider } } } -impl GrandpaApi +impl + GrandpaApi> for GrandpaRpcHandler where VoterState: ReportVoterState + Send + Sync + 'static, @@ -140,23 +123,22 @@ where fn round_state(&self) -> FutureResult { let round_states = ReportedRoundStates::from(&self.authority_set, &self.voter_state); let future = async move { round_states }.boxed(); - Box::new(future.map_err(jsonrpc_core::Error::from).compat()) + future.map_err(jsonrpc_core::Error::from).boxed() } fn subscribe_justifications( &self, _metadata: Self::Metadata, - subscriber: Subscriber + subscriber: Subscriber, ) { - let stream = self.justification_stream.subscribe() - .map(|x| Ok::<_,()>(JustificationNotification::from(x))) - .map_err(|e| warn!("Notification stream error: {:?}", e)) - .compat(); + let stream = self + .justification_stream + .subscribe() + .map(|x| Ok(Ok::<_, jsonrpc_core::Error>(JustificationNotification::from(x)))); self.manager.add(subscriber, |sink| { - let stream = stream.map(|res| Ok(res)); - sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all(stream) + stream + .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) .map(|_| ()) }); } @@ -164,58 +146,45 @@ where fn unsubscribe_justifications( &self, _metadata: Option, - id: SubscriptionId + id: SubscriptionId, ) -> jsonrpc_core::Result { Ok(self.manager.cancel(id)) } fn prove_finality( &self, - begin: Block::Hash, - end: Block::Hash, - authorities_set_id: Option, - ) -> FutureResult> { - // If we are not provided a set_id, try with the current one. - let authorities_set_id = authorities_set_id - .unwrap_or_else(|| self.authority_set.get().0); - let result = self - .finality_proof_provider - .rpc_prove_finality(begin, end, authorities_set_id); + block: NumberFor, + ) -> FutureResult> { + let result = self.finality_proof_provider.rpc_prove_finality(block); let future = async move { result }.boxed(); - Box::new( - future - .map_err(|e| { - warn!("Error proving finality: {}", e); - error::Error::ProveFinalityFailed(e) - }) - .map_err(jsonrpc_core::Error::from) - .compat() - ) + future + .map_err(|e| { + warn!("Error proving finality: {}", e); + error::Error::ProveFinalityFailed(e) + }) + .map_err(jsonrpc_core::Error::from) + .boxed() } } #[cfg(test)] mod tests { use super::*; + use jsonrpc_core::{types::Params, Notification, Output}; use std::{collections::HashSet, convert::TryInto, sync::Arc}; - use jsonrpc_core::{Notification, Output, types::Params}; - use parity_scale_codec::{Encode, Decode}; - use sc_block_builder::BlockBuilder; + use parity_scale_codec::{Decode, Encode}; + use sc_block_builder::{BlockBuilder, RecordProof}; use sc_finality_grandpa::{ - report, AuthorityId, GrandpaJustificationSender, GrandpaJustification, - FinalityProofFragment, + report, AuthorityId, FinalityProof, GrandpaJustification, GrandpaJustificationSender, }; use sp_blockchain::HeaderBackend; - use sp_consensus::RecordProof; use sp_core::crypto::Public; use sp_keyring::Ed25519Keyring; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use substrate_test_runtime_client::{ runtime::{Block, Header, H256}, - DefaultTestClientBuilderExt, - TestClientBuilderExt, - TestClientBuilder, + DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; struct TestAuthoritySet; @@ -223,7 +192,7 @@ mod tests { struct EmptyVoterState; struct TestFinalityProofProvider { - finality_proofs: Vec>, + finality_proof: Option>, } fn voters() -> HashSet { @@ -262,11 +231,15 @@ mod tests { impl RpcFinalityProofProvider for TestFinalityProofProvider { fn rpc_prove_finality( &self, - _begin: Block::Hash, - _end: Block::Hash, - _authoritites_set_id: u64, - ) -> Result, sp_blockchain::Error> { - Ok(Some(EncodedFinalityProofs(self.finality_proofs.encode().into()))) + _block: NumberFor, + ) -> Result, sc_finality_grandpa::FinalityProofError> { + Ok(Some(EncodedFinalityProof( + self.finality_proof + .as_ref() + .expect("Don't call rpc_prove_finality without setting the FinalityProof") + .encode() + .into(), + ))) } } @@ -295,33 +268,28 @@ mod tests { let background_rounds = vec![(1, past_round_state)].into_iter().collect(); - Some(report::VoterState { - background_rounds, - best_round: (2, best_round_state), - }) + Some(report::VoterState { background_rounds, best_round: (2, best_round_state) }) } } - fn setup_io_handler(voter_state: VoterState) -> ( - jsonrpc_core::MetaIoHandler, - GrandpaJustificationSender, - ) where + fn setup_io_handler( + voter_state: VoterState, + ) -> (jsonrpc_core::MetaIoHandler, GrandpaJustificationSender) + where VoterState: ReportVoterState + Send + Sync + 'static, { - setup_io_handler_with_finality_proofs(voter_state, Default::default()) + setup_io_handler_with_finality_proofs(voter_state, None) } fn setup_io_handler_with_finality_proofs( voter_state: VoterState, - finality_proofs: Vec>, - ) -> ( - jsonrpc_core::MetaIoHandler, - GrandpaJustificationSender, - ) where + finality_proof: Option>, + ) -> (jsonrpc_core::MetaIoHandler, GrandpaJustificationSender) + where VoterState: ReportVoterState + Send + Sync + 'static, { let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); - let finality_proof_provider = Arc::new(TestFinalityProofProvider { finality_proofs }); + let finality_proof_provider = Arc::new(TestFinalityProofProvider { finality_proof }); let handler = GrandpaRpcHandler::new( TestAuthoritySet, @@ -350,7 +318,7 @@ mod tests { #[test] fn working_rpc_handler() { - let (io, _) = setup_io_handler(TestVoterState); + let (io, _) = setup_io_handler(TestVoterState); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":1}"#; let response = "{\"jsonrpc\":\"2.0\",\"result\":{\ @@ -371,8 +339,8 @@ mod tests { assert_eq!(io.handle_request_sync(request, meta), Some(response.into())); } - fn setup_session() -> (sc_rpc::Metadata, jsonrpc_core::futures::sync::mpsc::Receiver) { - let (tx, rx) = jsonrpc_core::futures::sync::mpsc::channel(1); + fn setup_session() -> (sc_rpc::Metadata, futures::channel::mpsc::UnboundedReceiver) { + let (tx, rx) = futures::channel::mpsc::unbounded(); let meta = sc_rpc::Metadata::new(tx); (meta, rx) } @@ -383,7 +351,8 @@ mod tests { let (meta, _) = setup_session(); // Subscribe - let sub_request = r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; + let sub_request = + r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; let resp = io.handle_request_sync(sub_request, meta.clone()); let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); @@ -405,7 +374,7 @@ mod tests { // Unsubscribe again and fail assert_eq!( io.handle_request_sync(&unsub_req, meta), - Some(r#"{"jsonrpc":"2.0","result":false,"id":1}"#.into()), + Some("{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32602,\"message\":\"Invalid subscription id.\"},\"id\":1}".into()), ); } @@ -415,7 +384,8 @@ mod tests { let (meta, _) = setup_session(); // Subscribe - let sub_request = r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; + let sub_request = + r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; let resp = io.handle_request_sync(sub_request, meta.clone()); let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); assert!(matches!(resp, Output::Success(_))); @@ -426,7 +396,7 @@ mod tests { r#"{"jsonrpc":"2.0","method":"grandpa_unsubscribeJustifications","params":["FOO"],"id":1}"#, meta.clone() ), - Some(r#"{"jsonrpc":"2.0","result":false,"id":1}"#.into()) + Some("{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32602,\"message\":\"Invalid subscription id.\"},\"id\":1}".into()) ); } @@ -442,10 +412,13 @@ mod tests { &*client, client.info().best_hash, client.info().best_number, - RecordProof::Yes, + RecordProof::No, Default::default(), &*backend, - ).unwrap().build().unwrap(); + ) + .unwrap() + .build() + .unwrap(); let block = built_block.block; let block_hash = block.hash(); @@ -499,15 +472,14 @@ mod tests { justification_sender.notify(|| Ok(justification.clone())).unwrap(); // Inspect what we received - let recv = receiver.take(1).wait().flatten().collect::>(); + let recv = futures::executor::block_on(receiver.take(1).collect::>()); let recv: Notification = serde_json::from_str(&recv[0]).unwrap(); let mut json_map = match recv.params { Params::Map(json_map) => json_map, _ => panic!(), }; - let recv_sub_id: String = - serde_json::from_value(json_map["subscription"].take()).unwrap(); + let recv_sub_id: String = serde_json::from_value(json_map["subscription"].take()).unwrap(); let recv_justification: sp_core::Bytes = serde_json::from_value(json_map["result"].take()).unwrap(); let recv_justification: GrandpaJustification = @@ -520,29 +492,22 @@ mod tests { #[test] fn prove_finality_with_test_finality_proof_provider() { - let finality_proofs = vec![FinalityProofFragment { + let finality_proof = FinalityProof { block: header(42).hash(), justification: create_justification().encode(), unknown_headers: vec![header(2)], - authorities_proof: None, - }]; - let (io, _) = setup_io_handler_with_finality_proofs( - TestVoterState, - finality_proofs.clone(), - ); + }; + let (io, _) = + setup_io_handler_with_finality_proofs(TestVoterState, Some(finality_proof.clone())); - let request = "{\"jsonrpc\":\"2.0\",\"method\":\"grandpa_proveFinality\",\"params\":[\ - \"0x0000000000000000000000000000000000000000000000000000000000000000\",\ - \"0x0000000000000000000000000000000000000000000000000000000000000001\",\ - 42\ - ],\"id\":1}"; + let request = + "{\"jsonrpc\":\"2.0\",\"method\":\"grandpa_proveFinality\",\"params\":[42],\"id\":1}"; let meta = sc_rpc::Metadata::default(); let resp = io.handle_request_sync(request, meta); let mut resp: serde_json::Value = serde_json::from_str(&resp.unwrap()).unwrap(); let result: sp_core::Bytes = serde_json::from_value(resp["result"].take()).unwrap(); - let fragments: Vec> = - Decode::decode(&mut &result[..]).unwrap(); - assert_eq!(fragments, finality_proofs); + let finality_proof_rpc: FinalityProof
= Decode::decode(&mut &result[..]).unwrap(); + assert_eq!(finality_proof_rpc, finality_proof); } } diff --git a/client/finality-grandpa/rpc/src/notification.rs b/client/finality-grandpa/rpc/src/notification.rs index fd03a622b2196..68944e903e0fb 100644 --- a/client/finality-grandpa/rpc/src/notification.rs +++ b/client/finality-grandpa/rpc/src/notification.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,10 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use serde::{Serialize, Deserialize}; use parity_scale_codec::Encode; -use sp_runtime::traits::Block as BlockT; use sc_finality_grandpa::GrandpaJustification; +use serde::{Deserialize, Serialize}; +use sp_runtime::traits::Block as BlockT; /// An encoded justification proving that the given header has been finalized #[derive(Clone, Serialize, Deserialize)] diff --git a/client/finality-grandpa/rpc/src/report.rs b/client/finality-grandpa/rpc/src/report.rs index a635728cb938a..fef8f22659953 100644 --- a/client/finality-grandpa/rpc/src/report.rs +++ b/client/finality-grandpa/rpc/src/report.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -44,11 +44,8 @@ where H: Clone + Debug + Eq, { fn get(&self) -> (u64, HashSet) { - let current_voters: HashSet = self - .current_authorities() - .iter() - .map(|p| p.0.clone()) - .collect(); + let current_voters: HashSet = + self.current_authorities().iter().map(|p| p.0.clone()).collect(); (self.set_id(), current_voters) } @@ -152,10 +149,6 @@ impl ReportedRoundStates { .map(|(round, round_state)| RoundState::from(*round, round_state, ¤t_voters)) .collect::, Error>>()?; - Ok(Self { - set_id, - best, - background, - }) + Ok(Self { set_id, best, background }) } } diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 57c30bc3b25c9..6e5dfdd05e624 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,31 +18,59 @@ //! Utilities for dealing with authorities, authority sets, and handoffs. -use fork_tree::ForkTree; -use parking_lot::RwLock; +use std::{cmp::Ord, fmt::Debug, ops::Add}; + use finality_grandpa::voter_set::VoterSet; -use parity_scale_codec::{Encode, Decode}; +use fork_tree::ForkTree; use log::debug; -use sc_telemetry::{telemetry, CONSENSUS_INFO}; +use parity_scale_codec::{Decode, Encode}; +use parking_lot::MappedMutexGuard; +use sc_consensus::shared_data::{SharedData, SharedDataLocked}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sp_finality_grandpa::{AuthorityId, AuthorityList}; -use std::cmp::Ord; -use std::fmt::Debug; -use std::ops::Add; -use std::sync::Arc; +use crate::SetId; /// Error type returned on operations on the `AuthoritySet`. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum Error { - #[display("Invalid authority set, either empty or with an authority weight set to 0.")] +#[derive(Debug, derive_more::Display)] +pub enum Error { + #[display(fmt = "Invalid authority set, either empty or with an authority weight set to 0.")] InvalidAuthoritySet, + #[display(fmt = "Client error during ancestry lookup: {}", _0)] + Client(E), + #[display(fmt = "Duplicate authority set change.")] + DuplicateAuthoritySetChange, + #[display(fmt = "Multiple pending forced authority set changes are not allowed.")] + MultiplePendingForcedAuthoritySetChanges, + #[display( + fmt = "A pending forced authority set change could not be applied since it must be applied \ + after the pending standard change at #{}", + _0 + )] + ForcedAuthoritySetChangeDependencyUnsatisfied(N), #[display(fmt = "Invalid operation in the pending changes tree: {}", _0)] ForkTree(fork_tree::Error), } +impl From> for Error { + fn from(err: fork_tree::Error) -> Error { + match err { + fork_tree::Error::Client(err) => Error::Client(err), + fork_tree::Error::Duplicate => Error::DuplicateAuthoritySetChange, + err => Error::ForkTree(err), + } + } +} + +impl From for Error { + fn from(err: E) -> Error { + Error::Client(err) + } +} + /// A shared authority set. pub struct SharedAuthoritySet { - inner: Arc>>, + inner: SharedData>, } impl Clone for SharedAuthoritySet { @@ -52,30 +80,38 @@ impl Clone for SharedAuthoritySet { } impl SharedAuthoritySet { - /// Acquire a reference to the inner read-write lock. - pub(crate) fn inner(&self) -> &RwLock> { - &*self.inner + /// Returns access to the [`AuthoritySet`]. + pub(crate) fn inner(&self) -> MappedMutexGuard> { + self.inner.shared_data() + } + + /// Returns access to the [`AuthoritySet`] and locks it. + /// + /// For more information see [`SharedDataLocked`]. + pub(crate) fn inner_locked(&self) -> SharedDataLocked> { + self.inner.shared_data_locked() } } impl SharedAuthoritySet -where N: Add + Ord + Clone + Debug, - H: Clone + Debug +where + N: Add + Ord + Clone + Debug, + H: Clone + Debug, { /// Get the earliest limit-block number that's higher or equal to the given /// min number, if any. pub(crate) fn current_limit(&self, min: N) -> Option { - self.inner.read().current_limit(min) + self.inner().current_limit(min) } /// Get the current set ID. This is incremented every time the set changes. pub fn set_id(&self) -> u64 { - self.inner.read().set_id + self.inner().set_id } /// Get the current authorities and their weights (for the current set ID). pub fn current_authorities(&self) -> VoterSet { - VoterSet::new(self.inner.read().current_authorities.iter().cloned()).expect( + VoterSet::new(self.inner().current_authorities.iter().cloned()).expect( "current_authorities is non-empty and weights are non-zero; \ constructor and all mutating operations on `AuthoritySet` ensure this; \ qed.", @@ -84,13 +120,18 @@ where N: Add + Ord + Clone + Debug, /// Clone the inner `AuthoritySet`. pub fn clone_inner(&self) -> AuthoritySet { - self.inner.read().clone() + self.inner().clone() + } + + /// Clone the inner `AuthoritySetChanges`. + pub fn authority_set_changes(&self) -> AuthoritySetChanges { + self.inner().authority_set_changes.clone() } } impl From> for SharedAuthoritySet { fn from(set: AuthoritySet) -> Self { - SharedAuthoritySet { inner: Arc::new(RwLock::new(set)) } + SharedAuthoritySet { inner: SharedData::new(set) } } } @@ -116,14 +157,24 @@ pub struct AuthoritySet { /// a given branch pub(crate) pending_standard_changes: ForkTree>, /// Pending forced changes across different forks (at most one per fork). - /// Forced changes are enacted on block depth (not finality), for this reason - /// only one forced change should exist per fork. + /// Forced changes are enacted on block depth (not finality), for this + /// reason only one forced change should exist per fork. When trying to + /// apply forced changes we keep track of any pending standard changes that + /// they may depend on, this is done by making sure that any pending change + /// that is an ancestor of the forced changed and its effective block number + /// is lower than the last finalized block (as signaled in the forced + /// change) must be applied beforehand. pending_forced_changes: Vec>, + /// Track at which blocks the set id changed. This is useful when we need to prove finality for + /// a given block since we can figure out what set the block belongs to and when the set + /// started/ended. + authority_set_changes: AuthoritySetChanges, } impl AuthoritySet -where H: PartialEq, - N: Ord, +where + H: PartialEq, + N: Ord + Clone, { // authority sets must be non-empty and all weights must be greater than 0 fn invalid_authority_list(authorities: &AuthorityList) -> bool { @@ -133,7 +184,7 @@ where H: PartialEq, /// Get a genesis set with given authorities. pub(crate) fn genesis(initial: AuthorityList) -> Option { if Self::invalid_authority_list(&initial) { - return None; + return None } Some(AuthoritySet { @@ -141,6 +192,7 @@ where H: PartialEq, set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }) } @@ -150,9 +202,10 @@ where H: PartialEq, set_id: u64, pending_standard_changes: ForkTree>, pending_forced_changes: Vec>, + authority_set_changes: AuthoritySetChanges, ) -> Option { if Self::invalid_authority_list(&authorities) { - return None; + return None } Some(AuthoritySet { @@ -160,6 +213,7 @@ where H: PartialEq, set_id, pending_standard_changes, pending_forced_changes, + authority_set_changes, }) } @@ -185,7 +239,7 @@ where &self, best_hash: &H, is_descendent_of: &F, - ) -> Result, fork_tree::Error> + ) -> Result, Error> where F: Fn(&H, &H) -> Result, E: std::error::Error, @@ -194,7 +248,7 @@ where for change in &self.pending_forced_changes { if is_descendent_of(&change.canon_hash, best_hash)? { forced = Some((change.canon_hash.clone(), change.canon_height.clone())); - break; + break } } @@ -202,16 +256,13 @@ where for (_, _, change) in self.pending_standard_changes.roots() { if is_descendent_of(&change.canon_hash, best_hash)? { standard = Some((change.canon_hash.clone(), change.canon_height.clone())); - break; + break } } let earliest = match (forced, standard) { - (Some(forced), Some(standard)) => Some(if forced.1 < standard.1 { - forced - } else { - standard - }), + (Some(forced), Some(standard)) => + Some(if forced.1 < standard.1 { forced } else { standard }), (Some(forced), None) => Some(forced), (None, Some(standard)) => Some(standard), (None, None) => None, @@ -224,26 +275,27 @@ where &mut self, pending: PendingChange, is_descendent_of: &F, - ) -> Result<(), Error> where + ) -> Result<(), Error> + where F: Fn(&H, &H) -> Result, E: std::error::Error, { let hash = pending.canon_hash.clone(); let number = pending.canon_height.clone(); - debug!(target: "afg", "Inserting potential standard set change signaled at block {:?} \ - (delayed by {:?} blocks).", - (&number, &hash), pending.delay); + debug!( + target: "afg", + "Inserting potential standard set change signaled at block {:?} (delayed by {:?} blocks).", + (&number, &hash), + pending.delay, + ); - self.pending_standard_changes.import( - hash, - number, - pending, - is_descendent_of, - )?; + self.pending_standard_changes.import(hash, number, pending, is_descendent_of)?; - debug!(target: "afg", "There are now {} alternatives for the next pending standard change (roots), \ - and a total of {} pending standard changes (across all forks).", + debug!( + target: "afg", + "There are now {} alternatives for the next pending standard change (roots), and a \ + total of {} pending standard changes (across all forks).", self.pending_standard_changes.roots().count(), self.pending_standard_changes.iter().count(), ); @@ -255,31 +307,36 @@ where &mut self, pending: PendingChange, is_descendent_of: &F, - ) -> Result<(), Error> where + ) -> Result<(), Error> + where F: Fn(&H, &H) -> Result, E: std::error::Error, { - for change in self.pending_forced_changes.iter() { - if change.canon_hash == pending.canon_hash || - is_descendent_of(&change.canon_hash, &pending.canon_hash) - .map_err(fork_tree::Error::Client)? - { - return Err(fork_tree::Error::UnfinalizedAncestor.into()); + for change in &self.pending_forced_changes { + if change.canon_hash == pending.canon_hash { + return Err(Error::DuplicateAuthoritySetChange) + } + + if is_descendent_of(&change.canon_hash, &pending.canon_hash)? { + return Err(Error::MultiplePendingForcedAuthoritySetChanges) } } // ordered first by effective number and then by signal-block number. let key = (pending.effective_number(), pending.canon_height.clone()); - let idx = self.pending_forced_changes - .binary_search_by_key(&key, |change| ( - change.effective_number(), - change.canon_height.clone(), - )) + let idx = self + .pending_forced_changes + .binary_search_by_key(&key, |change| { + (change.effective_number(), change.canon_height.clone()) + }) .unwrap_or_else(|i| i); - debug!(target: "afg", "Inserting potential forced set change at block {:?} \ - (delayed by {:?} blocks).", - (&pending.canon_height, &pending.canon_hash), pending.delay); + debug!( + target: "afg", + "Inserting potential forced set change at block {:?} (delayed by {:?} blocks).", + (&pending.canon_height, &pending.canon_hash), + pending.delay, + ); self.pending_forced_changes.insert(idx, pending); @@ -298,29 +355,28 @@ where &mut self, pending: PendingChange, is_descendent_of: &F, - ) -> Result<(), Error> where + ) -> Result<(), Error> + where F: Fn(&H, &H) -> Result, E: std::error::Error, { if Self::invalid_authority_list(&pending.next_authorities) { - return Err(Error::InvalidAuthoritySet); + return Err(Error::InvalidAuthoritySet) } match pending.delay_kind { - DelayKind::Best { .. } => { - self.add_forced_change(pending, is_descendent_of) - }, - DelayKind::Finalized => { - self.add_standard_change(pending, is_descendent_of) - }, + DelayKind::Best { .. } => self.add_forced_change(pending, is_descendent_of), + DelayKind::Finalized => self.add_standard_change(pending, is_descendent_of), } } /// Inspect pending changes. Standard pending changes are iterated first, /// and the changes in the tree are traversed in pre-order, afterwards all /// forced changes are iterated. - pub(crate) fn pending_changes(&self) -> impl Iterator> { - self.pending_standard_changes.iter().map(|(_, _, c)| c) + pub(crate) fn pending_changes(&self) -> impl Iterator> { + self.pending_standard_changes + .iter() + .map(|(_, _, c)| c) .chain(self.pending_forced_changes.iter()) } @@ -331,7 +387,8 @@ where /// Only standard changes are taken into account for the current /// limit, since any existing forced change should preclude the voter from voting. pub(crate) fn current_limit(&self, min: N) -> Option { - self.pending_standard_changes.roots() + self.pending_standard_changes + .roots() .filter(|&(_, _, c)| c.effective_number() >= min) .min_by_key(|&(_, _, c)| c.effective_number()) .map(|(_, _, c)| c.effective_number()) @@ -346,52 +403,94 @@ where /// /// These transitions are always forced and do not lead to justifications /// which light clients can follow. + /// + /// Forced changes can only be applied after all pending standard changes + /// that it depends on have been applied. If any pending standard change + /// exists that is an ancestor of a given forced changed and which effective + /// block number is lower than the last finalized block (as defined by the + /// forced change), then the forced change cannot be applied. An error will + /// be returned in that case which will prevent block import. pub(crate) fn apply_forced_changes( &self, best_hash: H, best_number: N, is_descendent_of: &F, initial_sync: bool, - ) -> Result, E> - where F: Fn(&H, &H) -> Result, + telemetry: Option, + ) -> Result, Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, { let mut new_set = None; - for change in self.pending_forced_changes.iter() + for change in self + .pending_forced_changes + .iter() .take_while(|c| c.effective_number() <= best_number) // to prevent iterating too far .filter(|c| c.effective_number() == best_number) { // check if the given best block is in the same branch as // the block that signaled the change. if change.canon_hash == best_hash || is_descendent_of(&change.canon_hash, &best_hash)? { + let median_last_finalized = match change.delay_kind { + DelayKind::Best { ref median_last_finalized } => median_last_finalized.clone(), + _ => unreachable!( + "pending_forced_changes only contains forced changes; forced changes have delay kind Best; qed." + ), + }; + + // check if there's any pending standard change that we depend on + for (_, _, standard_change) in self.pending_standard_changes.roots() { + if standard_change.effective_number() <= median_last_finalized && + is_descendent_of(&standard_change.canon_hash, &change.canon_hash)? + { + log::info!(target: "afg", + "Not applying authority set change forced at block #{:?}, due to pending standard change at block #{:?}", + change.canon_height, + standard_change.effective_number(), + ); + + return Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied( + standard_change.effective_number(), + )) + } + } + // apply this change: make the set canonical - afg_log!(initial_sync, + afg_log!( + initial_sync, "👴 Applying authority set change forced at block #{:?}", change.canon_height, ); - telemetry!(CONSENSUS_INFO; "afg.applying_forced_authority_set_change"; + + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.applying_forced_authority_set_change"; "block" => ?change.canon_height ); - let median_last_finalized = match change.delay_kind { - DelayKind::Best { ref median_last_finalized } => median_last_finalized.clone(), - _ => unreachable!("pending_forced_changes only contains forced changes; forced changes have delay kind Best; qed."), - }; - - new_set = Some((median_last_finalized, AuthoritySet { - current_authorities: change.next_authorities.clone(), - set_id: self.set_id + 1, - pending_standard_changes: ForkTree::new(), // new set, new changes. - pending_forced_changes: Vec::new(), - })); - - break; + let mut authority_set_changes = self.authority_set_changes.clone(); + authority_set_changes.append(self.set_id, median_last_finalized.clone()); + + new_set = Some(( + median_last_finalized, + AuthoritySet { + current_authorities: change.next_authorities.clone(), + set_id: self.set_id + 1, + pending_standard_changes: ForkTree::new(), // new set, new changes. + pending_forced_changes: Vec::new(), + authority_set_changes, + }, + )); + + break } - - // we don't wipe forced changes until another change is - // applied } + // we don't wipe forced changes until another change is applied, hence + // why we return a new set instead of mutating. Ok(new_set) } @@ -411,56 +510,56 @@ where finalized_number: N, is_descendent_of: &F, initial_sync: bool, - ) -> Result, Error> where + telemetry: Option<&TelemetryHandle>, + ) -> Result, Error> + where F: Fn(&H, &H) -> Result, E: std::error::Error, { - let mut status = Status { - changed: false, - new_set_block: None, - }; + let mut status = Status { changed: false, new_set_block: None }; match self.pending_standard_changes.finalize_with_descendent_if( &finalized_hash, finalized_number.clone(), is_descendent_of, - |change| change.effective_number() <= finalized_number + |change| change.effective_number() <= finalized_number, )? { fork_tree::FinalizationResult::Changed(change) => { status.changed = true; - let pending_forced_changes = std::mem::replace( - &mut self.pending_forced_changes, - Vec::new(), - ); + let pending_forced_changes = + std::mem::replace(&mut self.pending_forced_changes, Vec::new()); - // we will keep all forced change for any later blocks and that are a - // descendent of the finalized block (i.e. they are from this fork). + // we will keep all forced changes for any later blocks and that are a + // descendent of the finalized block (i.e. they are part of this branch). for change in pending_forced_changes { if change.effective_number() > finalized_number && - is_descendent_of(&finalized_hash, &change.canon_hash) - .map_err(fork_tree::Error::Client)? + is_descendent_of(&finalized_hash, &change.canon_hash)? { self.pending_forced_changes.push(change) } } if let Some(change) = change { - afg_log!(initial_sync, + afg_log!( + initial_sync, "👴 Applying authority set change scheduled at block #{:?}", change.canon_height, ); - telemetry!(CONSENSUS_INFO; "afg.applying_scheduled_authority_set_change"; + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.applying_scheduled_authority_set_change"; "block" => ?change.canon_height ); + // Store the set_id together with the last block_number for the set + self.authority_set_changes.append(self.set_id, finalized_number.clone()); + self.current_authorities = change.next_authorities; self.set_id += 1; - status.new_set_block = Some(( - finalized_hash, - finalized_number, - )); + status.new_set_block = Some((finalized_hash, finalized_number)); } }, fork_tree::FinalizationResult::Unchanged => {}, @@ -484,16 +583,19 @@ where finalized_hash: H, finalized_number: N, is_descendent_of: &F, - ) -> Result, Error> where + ) -> Result, Error> + where F: Fn(&H, &H) -> Result, E: std::error::Error, { - self.pending_standard_changes.finalizes_any_with_descendent_if( - &finalized_hash, - finalized_number.clone(), - is_descendent_of, - |change| change.effective_number() == finalized_number - ).map_err(Error::ForkTree) + self.pending_standard_changes + .finalizes_any_with_descendent_if( + &finalized_hash, + finalized_number.clone(), + is_descendent_of, + |change| change.effective_number() == finalized_number, + ) + .map_err(Error::ForkTree) } } @@ -527,7 +629,9 @@ pub struct PendingChange { } impl Decode for PendingChange { - fn decode(value: &mut I) -> Result { + fn decode( + value: &mut I, + ) -> Result { let next_authorities = Decode::decode(value)?; let delay = Decode::decode(value)?; let canon_height = Decode::decode(value)?; @@ -535,36 +639,118 @@ impl Decode for PendingChange { let delay_kind = DelayKind::decode(value).unwrap_or(DelayKind::Finalized); - Ok(PendingChange { - next_authorities, - delay, - canon_height, - canon_hash, - delay_kind, - }) + Ok(PendingChange { next_authorities, delay, canon_height, canon_hash, delay_kind }) } } -impl + Clone> PendingChange { +impl + Clone> PendingChange { /// Returns the effective number this change will be applied at. pub fn effective_number(&self) -> N { self.canon_height.clone() + self.delay.clone() } } +/// Tracks historical authority set changes. We store the block numbers for the last block +/// of each authority set, once they have been finalized. These blocks are guaranteed to +/// have a justification unless they were triggered by a forced change. +#[derive(Debug, Encode, Decode, Clone, PartialEq)] +pub struct AuthoritySetChanges(Vec<(u64, N)>); + +/// The response when querying for a set id for a specific block. Either we get a set id +/// together with a block number for the last block in the set, or that the requested block is in +/// the latest set, or that we don't know what set id the given block belongs to. +#[derive(Debug, PartialEq)] +pub enum AuthoritySetChangeId { + /// The requested block is in the latest set. + Latest, + /// Tuple containing the set id and the last block number of that set. + Set(SetId, N), + /// We don't know which set id the request block belongs to (this can only happen due to + /// missing data). + Unknown, +} + +impl From> for AuthoritySetChanges { + fn from(changes: Vec<(u64, N)>) -> AuthoritySetChanges { + AuthoritySetChanges(changes) + } +} + +impl AuthoritySetChanges { + pub(crate) fn empty() -> Self { + Self(Default::default()) + } + + pub(crate) fn append(&mut self, set_id: u64, block_number: N) { + self.0.push((set_id, block_number)); + } + + pub(crate) fn get_set_id(&self, block_number: N) -> AuthoritySetChangeId { + if self + .0 + .last() + .map(|last_auth_change| last_auth_change.1 < block_number) + .unwrap_or(false) + { + return AuthoritySetChangeId::Latest + } + + let idx = self + .0 + .binary_search_by_key(&block_number, |(_, n)| n.clone()) + .unwrap_or_else(|b| b); + + if idx < self.0.len() { + let (set_id, block_number) = self.0[idx].clone(); + + // if this is the first index but not the first set id then we are missing data. + if idx == 0 && set_id != 0 { + return AuthoritySetChangeId::Unknown + } + + AuthoritySetChangeId::Set(set_id, block_number) + } else { + AuthoritySetChangeId::Unknown + } + } + + /// Returns an iterator over all historical authority set changes starting at the given block + /// number (excluded). The iterator yields a tuple representing the set id and the block number + /// of the last block in that set. + pub fn iter_from(&self, block_number: N) -> Option> { + let idx = self + .0 + .binary_search_by_key(&block_number, |(_, n)| n.clone()) + // if there was a change at the given block number then we should start on the next + // index since we want to exclude the current block number + .map(|n| n + 1) + .unwrap_or_else(|b| b); + + if idx < self.0.len() { + let (set_id, _) = self.0[idx].clone(); + + // if this is the first index but not the first set id then we are missing data. + if idx == 0 && set_id != 0 { + return None + } + } + + Some(self.0[idx..].iter()) + } +} + #[cfg(test)] mod tests { use super::*; use sp_core::crypto::Public; - fn static_is_descendent_of(value: bool) - -> impl Fn(&A, &A) -> Result - { + fn static_is_descendent_of(value: bool) -> impl Fn(&A, &A) -> Result { move |_, _| Ok(value) } fn is_descendent_of(f: F) -> impl Fn(&A, &A) -> Result - where F: Fn(&A, &A) -> bool + where + F: Fn(&A, &A) -> bool, { move |base, hash| Ok(f(base, hash)) } @@ -578,16 +764,15 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; - let change = |height| { - PendingChange { - next_authorities: current_authorities.clone(), - delay: 0, - canon_height: height, - canon_hash: height.to_string(), - delay_kind: DelayKind::Finalized, - } + let change = |height| PendingChange { + next_authorities: current_authorities.clone(), + delay: 0, + canon_height: height, + canon_hash: height.to_string(), + delay_kind: DelayKind::Finalized, }; let is_descendent_of = static_is_descendent_of(false); @@ -595,25 +780,13 @@ mod tests { authorities.add_pending_change(change(1), &is_descendent_of).unwrap(); authorities.add_pending_change(change(2), &is_descendent_of).unwrap(); - assert_eq!( - authorities.current_limit(0), - Some(1), - ); + assert_eq!(authorities.current_limit(0), Some(1)); - assert_eq!( - authorities.current_limit(1), - Some(1), - ); + assert_eq!(authorities.current_limit(1), Some(1)); - assert_eq!( - authorities.current_limit(2), - Some(2), - ); + assert_eq!(authorities.current_limit(2), Some(2)); - assert_eq!( - authorities.current_limit(3), - None, - ); + assert_eq!(authorities.current_limit(3), None); } #[test] @@ -625,6 +798,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let change_a = PendingChange { @@ -651,13 +825,22 @@ mod tests { delay_kind: DelayKind::Finalized, }; - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_c.clone(), &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_c") => true, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - })).unwrap(); + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change( + change_c.clone(), + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + ) + .unwrap(); // forced changes are iterated last let change_d = PendingChange { @@ -676,12 +859,17 @@ mod tests { delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; - authorities.add_pending_change(change_d.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_e.clone(), &static_is_descendent_of(false)).unwrap(); + authorities + .add_pending_change(change_d.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_e.clone(), &static_is_descendent_of(false)) + .unwrap(); + // ordered by subtree depth assert_eq!( authorities.pending_changes().collect::>(), - vec![&change_b, &change_a, &change_c, &change_e, &change_d], + vec![&change_a, &change_c, &change_b, &change_e, &change_d], ); } @@ -692,6 +880,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -714,43 +903,49 @@ mod tests { delay_kind: DelayKind::Finalized, }; - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(true)).unwrap(); - - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_b, &change_a], - ); + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); - // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b" - let status = authorities.apply_standard_changes( - "hash_c", - 11, - &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_c") => true, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - }), - false, - ).unwrap(); + assert_eq!(authorities.pending_changes().collect::>(), vec![&change_a, &change_b]); + + // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out + // "hash_b" + let status = authorities + .apply_standard_changes( + "hash_c", + 11, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + false, + None, + ) + .unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, None); - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_a], - ); + assert_eq!(authorities.pending_changes().collect::>(), vec![&change_a]); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); // finalizing "hash_d" will enact the change signaled at "hash_a" - let status = authorities.apply_standard_changes( - "hash_d", - 15, - &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_d") => true, - _ => unreachable!(), - }), - false, - ).unwrap(); + let status = authorities + .apply_standard_changes( + "hash_d", + 15, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_d") => true, + _ => unreachable!(), + }), + false, + None, + ) + .unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_d", 15))); @@ -758,6 +953,7 @@ mod tests { assert_eq!(authorities.current_authorities, set_a); assert_eq!(authorities.set_id, 1); assert_eq!(authorities.pending_changes().count(), 0); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); } #[test] @@ -767,6 +963,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -789,8 +986,12 @@ mod tests { delay_kind: DelayKind::Finalized, }; - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_c.clone(), &static_is_descendent_of(true)).unwrap(); + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_c.clone(), &static_is_descendent_of(true)) + .unwrap(); let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { ("hash_a", "hash_b") => true, @@ -806,36 +1007,33 @@ mod tests { // trying to finalize past `change_c` without finalizing `change_a` first assert!(matches!( - authorities.apply_standard_changes("hash_d", 40, &is_descendent_of, false), + authorities.apply_standard_changes("hash_d", 40, &is_descendent_of, false, None), Err(Error::ForkTree(fork_tree::Error::UnfinalizedAncestor)) )); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); - let status = authorities.apply_standard_changes( - "hash_b", - 15, - &is_descendent_of, - false, - ).unwrap(); + let status = authorities + .apply_standard_changes("hash_b", 15, &is_descendent_of, false, None) + .unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_b", 15))); assert_eq!(authorities.current_authorities, set_a); assert_eq!(authorities.set_id, 1); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); // after finalizing `change_a` it should be possible to finalize `change_c` - let status = authorities.apply_standard_changes( - "hash_d", - 40, - &is_descendent_of, - false, - ).unwrap(); + let status = authorities + .apply_standard_changes("hash_d", 40, &is_descendent_of, false, None) + .unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_d", 40))); assert_eq!(authorities.current_authorities, set_c); assert_eq!(authorities.set_id, 2); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15), (1, 40)])); } #[test] @@ -845,6 +1043,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -865,8 +1064,12 @@ mod tests { delay_kind: DelayKind::Finalized, }; - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(true)).unwrap(); + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { ("hash_a", "hash_d") => true, @@ -911,6 +1114,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -932,22 +1136,29 @@ mod tests { delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; - authorities.add_pending_change(change_a, &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b, &static_is_descendent_of(false)).unwrap(); + authorities + .add_pending_change(change_a, &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(false)) + .unwrap(); + + // no duplicates are allowed + assert!(matches!( + authorities.add_pending_change(change_b, &static_is_descendent_of(false)), + Err(Error::DuplicateAuthoritySetChange) + )); // there's an effective change triggered at block 15 but not a standard one. // so this should do nothing. assert_eq!( - authorities.enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)).unwrap(), + authorities + .enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)) + .unwrap(), None, ); - // throw a standard change into the mix to prove that it's discarded - // for being on the same fork. - // - // NOTE: after https://github.com/paritytech/substrate/issues/1861 - // this should still be rejected based on the "span" rule -- it overlaps - // with another change on the same fork. + // there can only be one pending forced change per fork let change_c = PendingChange { next_authorities: set_b.clone(), delay: 3, @@ -956,37 +1167,42 @@ mod tests { delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; - let is_descendent_of_a = is_descendent_of(|base: &&str, _| { - base.starts_with("hash_a") - }); + let is_descendent_of_a = is_descendent_of(|base: &&str, _| base.starts_with("hash_a")); - assert!(authorities.add_pending_change(change_c, &is_descendent_of_a).is_err()); + assert!(matches!( + authorities.add_pending_change(change_c, &is_descendent_of_a), + Err(Error::MultiplePendingForcedAuthoritySetChanges) + )); - // too early. - assert!( - authorities.apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true), false) - .unwrap() - .is_none() - ); + // let's try and apply the forced changes. + // too early and there's no forced changes to apply. + assert!(authorities + .apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true), false, None) + .unwrap() + .is_none()); // too late. - assert!( - authorities.apply_forced_changes("hash_a16", 16, &static_is_descendent_of(true), false) - .unwrap() - .is_none() - ); + assert!(authorities + .apply_forced_changes("hash_a16", 16, &is_descendent_of_a, false, None) + .unwrap() + .is_none()); - // on time -- chooses the right change. + // on time -- chooses the right change for this fork. assert_eq!( - authorities.apply_forced_changes("hash_a15", 15, &is_descendent_of_a, false) + authorities + .apply_forced_changes("hash_a15", 15, &is_descendent_of_a, false, None) .unwrap() .unwrap(), - (42, AuthoritySet { - current_authorities: set_a, - set_id: 1, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }), + ( + 42, + AuthoritySet { + current_authorities: set_a, + set_id: 1, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges(vec![(0, 42)]), + }, + ) ); } @@ -998,6 +1214,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; @@ -1008,9 +1225,7 @@ mod tests { delay: 0, canon_height: 5, canon_hash: "hash_a", - delay_kind: DelayKind::Best { - median_last_finalized: 0, - }, + delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; // and import it @@ -1019,12 +1234,135 @@ mod tests { .unwrap(); // it should be enacted at the same block that signaled it - assert!( + assert!(authorities + .apply_forced_changes("hash_a", 5, &static_is_descendent_of(false), false, None) + .unwrap() + .is_some()); + } + + #[test] + fn forced_changes_blocked_by_standard_changes() { + let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; + + let mut authorities = AuthoritySet { + current_authorities: set_a.clone(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + // effective at #15 + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 5, + canon_height: 10, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + // effective #20 + let change_b = PendingChange { + next_authorities: set_a.clone(), + delay: 0, + canon_height: 20, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + // effective at #35 + let change_c = PendingChange { + next_authorities: set_a.clone(), + delay: 5, + canon_height: 30, + canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, + }; + + // add some pending standard changes all on the same fork + authorities + .add_pending_change(change_a, &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_b, &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_c, &static_is_descendent_of(true)) + .unwrap(); + + // effective at #45 + let change_d = PendingChange { + next_authorities: set_a.clone(), + delay: 5, + canon_height: 40, + canon_hash: "hash_d", + delay_kind: DelayKind::Best { median_last_finalized: 31 }, + }; + + // now add a forced change on the same fork + authorities + .add_pending_change(change_d, &static_is_descendent_of(true)) + .unwrap(); + + // the forced change cannot be applied since the pending changes it depends on + // have not been applied yet. + assert!(matches!( + authorities.apply_forced_changes( + "hash_d45", + 45, + &static_is_descendent_of(true), + false, + None + ), + Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied(15)) + )); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); + + // we apply the first pending standard change at #15 + authorities + .apply_standard_changes("hash_a15", 15, &static_is_descendent_of(true), false, None) + .unwrap(); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); + + // but the forced change still depends on the next standard change + assert!(matches!( + authorities.apply_forced_changes( + "hash_d", + 45, + &static_is_descendent_of(true), + false, + None + ), + Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied(20)) + )); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); + + // we apply the pending standard change at #20 + authorities + .apply_standard_changes("hash_b", 20, &static_is_descendent_of(true), false, None) + .unwrap(); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15), (1, 20)])); + + // afterwards the forced change at #45 can already be applied since it signals + // that finality stalled at #31, and the next pending standard change is effective + // at #35. subsequent forced changes on the same branch must be kept + assert_eq!( authorities - .apply_forced_changes("hash_a", 5, &static_is_descendent_of(false), false) + .apply_forced_changes("hash_d", 45, &static_is_descendent_of(true), false, None) .unwrap() - .is_some() + .unwrap(), + ( + 31, + AuthoritySet { + current_authorities: set_a.clone(), + set_id: 3, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges(vec![(0, 15), (1, 20), (2, 31)]), + } + ), ); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15), (1, 20)])); } #[test] @@ -1036,6 +1374,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let new_set = current_authorities.clone(); @@ -1078,52 +1417,35 @@ mod tests { }); // add the three pending changes - authorities - .add_pending_change(change_b, &is_descendent_of) - .unwrap(); - authorities - .add_pending_change(change_a0, &is_descendent_of) - .unwrap(); - authorities - .add_pending_change(change_a1, &is_descendent_of) - .unwrap(); + authorities.add_pending_change(change_b, &is_descendent_of).unwrap(); + authorities.add_pending_change(change_a0, &is_descendent_of).unwrap(); + authorities.add_pending_change(change_a1, &is_descendent_of).unwrap(); // the earliest change at block `best_a` should be the change at A0 (#5) assert_eq!( - authorities - .next_change(&"best_a", &is_descendent_of) - .unwrap(), + authorities.next_change(&"best_a", &is_descendent_of).unwrap(), Some(("hash_a0", 5)), ); // the earliest change at block `best_b` should be the change at B (#4) assert_eq!( - authorities - .next_change(&"best_b", &is_descendent_of) - .unwrap(), + authorities.next_change(&"best_b", &is_descendent_of).unwrap(), Some(("hash_b", 4)), ); // we apply the change at A0 which should prune it and the fork at B authorities - .apply_standard_changes("hash_a0", 5, &is_descendent_of, false) + .apply_standard_changes("hash_a0", 5, &is_descendent_of, false, None) .unwrap(); // the next change is now at A1 (#10) assert_eq!( - authorities - .next_change(&"best_a", &is_descendent_of) - .unwrap(), + authorities.next_change(&"best_a", &is_descendent_of).unwrap(), Some(("hash_a1", 10)), ); // there's no longer any pending change at `best_b` fork - assert_eq!( - authorities - .next_change(&"best_b", &is_descendent_of) - .unwrap(), - None, - ); + assert_eq!(authorities.next_change(&"best_b", &is_descendent_of).unwrap(), None); // we a forced change at A10 (#8) let change_a10 = PendingChange { @@ -1131,9 +1453,7 @@ mod tests { delay: 0, canon_height: 8, canon_hash: "hash_a10", - delay_kind: DelayKind::Best { - median_last_finalized: 0, - }, + delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; authorities @@ -1142,9 +1462,7 @@ mod tests { // it should take precedence over the change at A1 (#10) assert_eq!( - authorities - .next_change(&"best_a", &is_descendent_of) - .unwrap(), + authorities.next_change(&"best_a", &is_descendent_of).unwrap(), Some(("hash_a10", 8)), ); } @@ -1154,26 +1472,28 @@ mod tests { // empty authority lists are invalid assert_eq!(AuthoritySet::<(), ()>::genesis(vec![]), None); assert_eq!( - AuthoritySet::<(), ()>::new(vec![], 0, ForkTree::new(), Vec::new()), + AuthoritySet::<(), ()>::new( + vec![], + 0, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ), None, ); - let invalid_authorities_weight = vec![ - (AuthorityId::from_slice(&[1; 32]), 5), - (AuthorityId::from_slice(&[2; 32]), 0), - ]; + let invalid_authorities_weight = + vec![(AuthorityId::from_slice(&[1; 32]), 5), (AuthorityId::from_slice(&[2; 32]), 0)]; // authority weight of zero is invalid - assert_eq!( - AuthoritySet::<(), ()>::genesis(invalid_authorities_weight.clone()), - None - ); + assert_eq!(AuthoritySet::<(), ()>::genesis(invalid_authorities_weight.clone()), None); assert_eq!( AuthoritySet::<(), ()>::new( invalid_authorities_weight.clone(), 0, ForkTree::new(), - Vec::new() + Vec::new(), + AuthoritySetChanges::empty(), ), None, ); @@ -1203,9 +1523,7 @@ mod tests { delay: 10, canon_height: 5, canon_hash: (), - delay_kind: DelayKind::Best { - median_last_finalized: 0, - }, + delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; // pending change contains an an authority set @@ -1228,6 +1546,7 @@ mod tests { set_id: 0, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), }; let new_set = current_authorities.clone(); @@ -1262,17 +1581,13 @@ mod tests { canon_height, canon_hash, delay_kind: if forced { - DelayKind::Best { - median_last_finalized: 0, - } + DelayKind::Best { median_last_finalized: 0 } } else { DelayKind::Finalized }, }; - authorities - .add_pending_change(change, &is_descendent_of) - .unwrap(); + authorities.add_pending_change(change, &is_descendent_of).unwrap(); }; add_pending_change(5, "A", false); @@ -1283,32 +1598,17 @@ mod tests { add_pending_change(15, "C3", true); add_pending_change(20, "D", true); - println!( - "pending_changes: {:?}", - authorities - .pending_changes() - .map(|c| c.canon_hash) - .collect::>() - ); - // applying the standard change at A should not prune anything // other then the change that was applied authorities - .apply_standard_changes("A", 5, &is_descendent_of, false) + .apply_standard_changes("A", 5, &is_descendent_of, false, None) .unwrap(); - println!( - "pending_changes: {:?}", - authorities - .pending_changes() - .map(|c| c.canon_hash) - .collect::>() - ); assert_eq!(authorities.pending_changes().count(), 6); // same for B authorities - .apply_standard_changes("B", 10, &is_descendent_of, false) + .apply_standard_changes("B", 10, &is_descendent_of, false, None) .unwrap(); assert_eq!(authorities.pending_changes().count(), 5); @@ -1317,7 +1617,7 @@ mod tests { // finalizing C2 should clear all forced changes authorities - .apply_standard_changes("C2", 15, &is_descendent_of, false) + .apply_standard_changes("C2", 15, &is_descendent_of, false, None) .unwrap(); assert_eq!(authorities.pending_forced_changes.len(), 0); @@ -1325,17 +1625,69 @@ mod tests { // finalizing C0 should clear all forced changes but D let mut authorities = authorities2; authorities - .apply_standard_changes("C0", 15, &is_descendent_of, false) + .apply_standard_changes("C0", 15, &is_descendent_of, false, None) .unwrap(); assert_eq!(authorities.pending_forced_changes.len(), 1); + assert_eq!(authorities.pending_forced_changes.first().unwrap().canon_hash, "D"); + } + + #[test] + fn authority_set_changes_for_complete_data() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 41); + authority_set_changes.append(1, 81); + authority_set_changes.append(2, 121); + + assert_eq!(authority_set_changes.get_set_id(20), AuthoritySetChangeId::Set(0, 41)); + assert_eq!(authority_set_changes.get_set_id(40), AuthoritySetChangeId::Set(0, 41)); + assert_eq!(authority_set_changes.get_set_id(41), AuthoritySetChangeId::Set(0, 41)); + assert_eq!(authority_set_changes.get_set_id(42), AuthoritySetChangeId::Set(1, 81)); + assert_eq!(authority_set_changes.get_set_id(141), AuthoritySetChangeId::Latest); + } + + #[test] + fn authority_set_changes_for_incomplete_data() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(2, 41); + authority_set_changes.append(3, 81); + authority_set_changes.append(4, 121); + + assert_eq!(authority_set_changes.get_set_id(20), AuthoritySetChangeId::Unknown); + assert_eq!(authority_set_changes.get_set_id(40), AuthoritySetChangeId::Unknown); + assert_eq!(authority_set_changes.get_set_id(41), AuthoritySetChangeId::Unknown); + assert_eq!(authority_set_changes.get_set_id(42), AuthoritySetChangeId::Set(3, 81)); + assert_eq!(authority_set_changes.get_set_id(141), AuthoritySetChangeId::Latest); + } + + #[test] + fn iter_from_works() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(1, 41); + authority_set_changes.append(2, 81); + + // we are missing the data for the first set, therefore we should return `None` + assert_eq!(None, authority_set_changes.iter_from(40).map(|it| it.collect::>())); + + // after adding the data for the first set the same query should work + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 21); + authority_set_changes.append(1, 41); + authority_set_changes.append(2, 81); + authority_set_changes.append(3, 121); + assert_eq!( - authorities - .pending_forced_changes - .first() - .unwrap() - .canon_hash, - "D" + Some(vec![(1, 41), (2, 81), (3, 121)]), + authority_set_changes.iter_from(40).map(|it| it.cloned().collect::>()), + ); + + assert_eq!( + Some(vec![(2, 81), (3, 121)]), + authority_set_changes.iter_from(41).map(|it| it.cloned().collect::>()), ); + + assert_eq!(0, authority_set_changes.iter_from(121).unwrap().count()); + + assert_eq!(0, authority_set_changes.iter_from(200).unwrap().count()); } } diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 4ed96d058ac6b..bad01e6dfc62f 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -1,46 +1,53 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Schema for stuff in the aux-db. use std::fmt::Debug; -use std::sync::Arc; -use parity_scale_codec::{Encode, Decode}; -use sc_client_api::backend::AuxStore; -use sp_blockchain::{Result as ClientResult, Error as ClientError}; -use fork_tree::ForkTree; + use finality_grandpa::round::State as RoundState; -use sp_runtime::traits::{Block as BlockT, NumberFor}; use log::{info, warn}; -use sp_finality_grandpa::{AuthorityList, SetId, RoundNumber}; +use parity_scale_codec::{Decode, Encode}; -use crate::authorities::{AuthoritySet, SharedAuthoritySet, PendingChange, DelayKind}; -use crate::consensus_changes::{SharedConsensusChanges, ConsensusChanges}; -use crate::environment::{ - CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, VoterSetState, +use fork_tree::ForkTree; +use sc_client_api::backend::AuxStore; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_finality_grandpa::{AuthorityList, RoundNumber, SetId}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +use crate::{ + authorities::{ + AuthoritySet, AuthoritySetChanges, DelayKind, PendingChange, SharedAuthoritySet, + }, + environment::{ + CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, + VoterSetState, + }, + GrandpaJustification, NewAuthoritySet, }; -use crate::NewAuthoritySet; const VERSION_KEY: &[u8] = b"grandpa_schema_version"; const SET_STATE_KEY: &[u8] = b"grandpa_completed_round"; const CONCLUDED_ROUNDS: &[u8] = b"grandpa_concluded_rounds"; const AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; -const CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; +const BEST_JUSTIFICATION: &[u8] = b"grandpa_best_justification"; -const CURRENT_VERSION: u32 = 2; +const CURRENT_VERSION: u32 = 3; /// The voter set state. #[derive(Debug, Clone, Encode, Decode)] @@ -70,8 +77,9 @@ struct V0AuthoritySet { } impl Into> for V0AuthoritySet -where H: Clone + Debug + PartialEq, - N: Clone + Debug + Ord, +where + H: Clone + Debug + PartialEq, + N: Clone + Debug + Ord, { fn into(self) -> AuthoritySet { let mut pending_standard_changes = ForkTree::new(); @@ -102,62 +110,86 @@ where H: Clone + Debug + PartialEq, self.set_id, pending_standard_changes, Vec::new(), + AuthoritySetChanges::empty(), ); authority_set.expect("current_authorities is non-empty and weights are non-zero; qed.") } } -pub(crate) fn load_decode(backend: &B, key: &[u8]) -> ClientResult> { +impl Into> for V2AuthoritySet +where + H: Clone + Debug + PartialEq, + N: Clone + Debug + Ord, +{ + fn into(self) -> AuthoritySet { + AuthoritySet::new( + self.current_authorities, + self.set_id, + self.pending_standard_changes, + self.pending_forced_changes, + AuthoritySetChanges::empty(), + ) + .expect("current_authorities is non-empty and weights are non-zero; qed.") + } +} + +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +struct V2AuthoritySet { + current_authorities: AuthorityList, + set_id: u64, + pending_standard_changes: ForkTree>, + pending_forced_changes: Vec>, +} + +pub(crate) fn load_decode( + backend: &B, + key: &[u8], +) -> ClientResult> { match backend.get_aux(key)? { None => Ok(None), Some(t) => T::decode(&mut &t[..]) - .map_err( - |e| ClientError::Backend(format!("GRANDPA DB is corrupted: {}", e.what())), - ) - .map(Some) + .map_err(|e| ClientError::Backend(format!("GRANDPA DB is corrupted: {}", e))) + .map(Some), } } /// Persistent data kept between runs. pub(crate) struct PersistentData { pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) consensus_changes: SharedConsensusChanges>, pub(crate) set_state: SharedVoterSetState, } fn migrate_from_version0( backend: &B, genesis_round: &G, -) -> ClientResult>, - VoterSetState, -)>> where B: AuxStore, - G: Fn() -> RoundState>, +) -> ClientResult>, VoterSetState)>> +where + B: AuxStore, + G: Fn() -> RoundState>, { - CURRENT_VERSION.using_encoded(|s| - backend.insert_aux(&[(VERSION_KEY, s)], &[]) - )?; + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; - if let Some(old_set) = load_decode::<_, V0AuthoritySet>>( - backend, - AUTHORITY_SET_KEY, - )? { + if let Some(old_set) = + load_decode::<_, V0AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { let new_set: AuthoritySet> = old_set.into(); backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; - let (last_round_number, last_round_state) = match load_decode::<_, V0VoterSetState>>( - backend, - SET_STATE_KEY, - )? { + let (last_round_number, last_round_state) = match load_decode::< + _, + V0VoterSetState>, + >(backend, SET_STATE_KEY)? + { Some((number, state)) => (number, state), None => (0, genesis_round()), }; let set_id = new_set.set_id; - let base = last_round_state.prevote_ghost - .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + let base = last_round_state.prevote_ghost.expect( + "state is for completed round; completed rounds must have a prevote ghost; qed.", + ); let mut current_rounds = CurrentRounds::new(); current_rounds.insert(last_round_number + 1, HasVoted::No); @@ -178,7 +210,7 @@ fn migrate_from_version0( backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - return Ok(Some((new_set, set_state))); + return Ok(Some((new_set, set_state))) } Ok(None) @@ -187,32 +219,25 @@ fn migrate_from_version0( fn migrate_from_version1( backend: &B, genesis_round: &G, -) -> ClientResult>, - VoterSetState, -)>> where B: AuxStore, - G: Fn() -> RoundState>, +) -> ClientResult>, VoterSetState)>> +where + B: AuxStore, + G: Fn() -> RoundState>, { - CURRENT_VERSION.using_encoded(|s| - backend.insert_aux(&[(VERSION_KEY, s)], &[]) - )?; + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; - if let Some(set) = load_decode::<_, AuthoritySet>>( - backend, - AUTHORITY_SET_KEY, - )? { + if let Some(set) = + load_decode::<_, AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { let set_id = set.set_id; - let completed_rounds = |number, state, base| CompletedRounds::new( - CompletedRound { - number, - state, - votes: Vec::new(), - base, - }, - set_id, - &set, - ); + let completed_rounds = |number, state, base| { + CompletedRounds::new( + CompletedRound { number, state, votes: Vec::new(), base }, + set_id, + &set, + ) + }; let set_state = match load_decode::<_, V1VoterSetState>>( backend, @@ -243,17 +268,46 @@ fn migrate_from_version1( let base = set_state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - VoterSetState::live( - set_id, - &set, - base, - ) + VoterSetState::live(set_id, &set, base) }, }; backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - return Ok(Some((set, set_state))); + return Ok(Some((set, set_state))) + } + + Ok(None) +} + +fn migrate_from_version2( + backend: &B, + genesis_round: &G, +) -> ClientResult>, VoterSetState)>> +where + B: AuxStore, + G: Fn() -> RoundState>, +{ + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; + + if let Some(old_set) = + load_decode::<_, V2AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { + let new_set: AuthoritySet> = old_set.into(); + backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; + + let set_state = match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? { + Some(state) => state, + None => { + let state = genesis_round(); + let base = state.prevote_ghost + .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + + VoterSetState::live(new_set.set_id, &new_set, base) + }, + }; + + return Ok(Some((new_set, set_state))) } Ok(None) @@ -265,70 +319,68 @@ pub(crate) fn load_persistent( genesis_hash: Block::Hash, genesis_number: NumberFor, genesis_authorities: G, -) - -> ClientResult> - where - B: AuxStore, - G: FnOnce() -> ClientResult, +) -> ClientResult> +where + B: AuxStore, + G: FnOnce() -> ClientResult, { let version: Option = load_decode(backend, VERSION_KEY)?; - let consensus_changes = load_decode(backend, CONSENSUS_CHANGES_KEY)? - .unwrap_or_else(ConsensusChanges::>::empty); let make_genesis_round = move || RoundState::genesis((genesis_hash, genesis_number)); match version { None => { - if let Some((new_set, set_state)) = migrate_from_version0::(backend, &make_genesis_round)? { + if let Some((new_set, set_state)) = + migrate_from_version0::(backend, &make_genesis_round)? + { return Ok(PersistentData { authority_set: new_set.into(), - consensus_changes: Arc::new(consensus_changes.into()), set_state: set_state.into(), - }); + }) } }, Some(1) => { - if let Some((new_set, set_state)) = migrate_from_version1::(backend, &make_genesis_round)? { + if let Some((new_set, set_state)) = + migrate_from_version1::(backend, &make_genesis_round)? + { return Ok(PersistentData { authority_set: new_set.into(), - consensus_changes: Arc::new(consensus_changes.into()), set_state: set_state.into(), - }); + }) } }, Some(2) => { + if let Some((new_set, set_state)) = + migrate_from_version2::(backend, &make_genesis_round)? + { + return Ok(PersistentData { + authority_set: new_set.into(), + set_state: set_state.into(), + }) + } + }, + Some(3) => { if let Some(set) = load_decode::<_, AuthoritySet>>( backend, AUTHORITY_SET_KEY, )? { - let set_state = match load_decode::<_, VoterSetState>( - backend, - SET_STATE_KEY, - )? { - Some(state) => state, - None => { - let state = make_genesis_round(); - let base = state.prevote_ghost + let set_state = + match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? { + Some(state) => state, + None => { + let state = make_genesis_round(); + let base = state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - VoterSetState::live( - set.set_id, - &set, - base, - ) - } - }; + VoterSetState::live(set.set_id, &set, base) + }, + }; - return Ok(PersistentData { - authority_set: set.into(), - consensus_changes: Arc::new(consensus_changes.into()), - set_state: set_state.into(), - }); + return Ok(PersistentData { authority_set: set.into(), set_state: set_state.into() }) } - } - Some(other) => return Err(ClientError::Backend( - format!("Unsupported GRANDPA DB version: {:?}", other) - )), + }, + Some(other) => + return Err(ClientError::Backend(format!("Unsupported GRANDPA DB version: {:?}", other))), } // genesis. @@ -339,14 +391,11 @@ pub(crate) fn load_persistent( let genesis_set = AuthoritySet::genesis(genesis_authorities) .expect("genesis authorities is non-empty; all weights are non-zero; qed."); let state = make_genesis_round(); - let base = state.prevote_ghost + let base = state + .prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - let genesis_state = VoterSetState::live( - 0, - &genesis_set, - base, - ); + let genesis_state = VoterSetState::live(0, &genesis_set, base); backend.insert_aux( &[ @@ -356,11 +405,7 @@ pub(crate) fn load_persistent( &[], )?; - Ok(PersistentData { - authority_set: genesis_set.into(), - set_state: genesis_state.into(), - consensus_changes: Arc::new(consensus_changes.into()), - }) + Ok(PersistentData { authority_set: genesis_set.into(), set_state: genesis_state.into() }) } /// Update the authority set on disk after a change. @@ -371,8 +416,9 @@ pub(crate) fn load_persistent( pub(crate) fn update_authority_set( set: &AuthoritySet>, new_set: Option<&NewAuthoritySet>>, - write_aux: F -) -> R where + write_aux: F, +) -> R +where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { // write new authority set state to disk. @@ -389,24 +435,45 @@ pub(crate) fn update_authority_set( ); let encoded = set_state.encode(); - write_aux(&[ - (AUTHORITY_SET_KEY, &encoded_set[..]), - (SET_STATE_KEY, &encoded[..]), - ]) + write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..]), (SET_STATE_KEY, &encoded[..])]) } else { write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])]) } } +/// Update the justification for the latest finalized block on-disk. +/// +/// We always keep around the justification for the best finalized block and overwrite it +/// as we finalize new blocks, this makes sure that we don't store useless justifications +/// but can always prove finality of the latest block. +pub(crate) fn update_best_justification( + justification: &GrandpaJustification, + write_aux: F, +) -> R +where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + let encoded_justification = justification.encode(); + write_aux(&[(BEST_JUSTIFICATION, &encoded_justification[..])]) +} + +/// Fetch the justification for the latest block finalized by GRANDPA, if any. +pub fn best_justification( + backend: &B, +) -> ClientResult>> +where + B: AuxStore, + Block: BlockT, +{ + load_decode::<_, GrandpaJustification>(backend, BEST_JUSTIFICATION) +} + /// Write voter set state. pub(crate) fn write_voter_set_state( backend: &B, state: &VoterSetState, ) -> ClientResult<()> { - backend.insert_aux( - &[(SET_STATE_KEY, state.encode().as_slice())], - &[] - ) + backend.insert_aux(&[(SET_STATE_KEY, state.encode().as_slice())], &[]) } /// Write concluded round. @@ -421,31 +488,19 @@ pub(crate) fn write_concluded_round( backend.insert_aux(&[(&key[..], round_data.encode().as_slice())], &[]) } -/// Update the consensus changes. -pub(crate) fn update_consensus_changes( - set: &ConsensusChanges, - write_aux: F -) -> R where - H: Encode + Clone, - N: Encode + Clone, - F: FnOnce(&[(&'static [u8], &[u8])]) -> R, -{ - write_aux(&[(CONSENSUS_CHANGES_KEY, set.encode().as_slice())]) -} - #[cfg(test)] -pub(crate) fn load_authorities(backend: &B) - -> Option> { - load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY) - .expect("backend error") +pub(crate) fn load_authorities( + backend: &B, +) -> Option> { + load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY).expect("backend error") } #[cfg(test)] mod test { - use sp_finality_grandpa::AuthorityId; + use super::*; use sp_core::H256; + use sp_finality_grandpa::AuthorityId; use substrate_test_runtime_client; - use super::*; #[test] fn load_decode_from_v0_migrates_data_format() { @@ -470,19 +525,18 @@ mod test { let voter_set_state = (round_number, round_state.clone()); - client.insert_aux( - &[ - (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), - (SET_STATE_KEY, voter_set_state.encode().as_slice()), - ], - &[], - ).unwrap(); + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + ], + &[], + ) + .unwrap(); } - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - None, - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), None); // should perform the migration load_persistent::( @@ -490,28 +544,30 @@ mod test { H256::random(), 0, || unreachable!(), - ).unwrap(); + ) + .unwrap(); - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(2), - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(3)); - let PersistentData { authority_set, set_state, .. } = load_persistent::( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); + let PersistentData { authority_set, set_state, .. } = + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); assert_eq!( - *authority_set.inner().read(), + *authority_set.inner(), AuthoritySet::new( authorities.clone(), set_id, ForkTree::new(), Vec::new(), - ).unwrap(), + AuthoritySetChanges::empty(), + ) + .unwrap(), ); let mut current_rounds = CurrentRounds::new(); @@ -528,7 +584,7 @@ mod test { votes: vec![], }, set_id, - &*authority_set.inner().read(), + &*authority_set.inner(), ), current_rounds, }, @@ -555,24 +611,25 @@ mod test { set_id, ForkTree::new(), Vec::new(), - ).unwrap(); + AuthoritySetChanges::empty(), + ) + .unwrap(); let voter_set_state = V1VoterSetState::Live(round_number, round_state.clone()); - client.insert_aux( - &[ - (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), - (SET_STATE_KEY, voter_set_state.encode().as_slice()), - (VERSION_KEY, 1u32.encode().as_slice()), - ], - &[], - ).unwrap(); + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + (VERSION_KEY, 1u32.encode().as_slice()), + ], + &[], + ) + .unwrap(); } - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(1), - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(1)); // should perform the migration load_persistent::( @@ -580,28 +637,30 @@ mod test { H256::random(), 0, || unreachable!(), - ).unwrap(); + ) + .unwrap(); - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(2), - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(3)); - let PersistentData { authority_set, set_state, .. } = load_persistent::( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); + let PersistentData { authority_set, set_state, .. } = + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); assert_eq!( - *authority_set.inner().read(), + *authority_set.inner(), AuthoritySet::new( authorities.clone(), set_id, ForkTree::new(), Vec::new(), - ).unwrap(), + AuthoritySetChanges::empty(), + ) + .unwrap(), ); let mut current_rounds = CurrentRounds::new(); @@ -618,13 +677,81 @@ mod test { votes: vec![], }, set_id, - &*authority_set.inner().read(), + &*authority_set.inner(), ), current_rounds, }, ); } + #[test] + fn load_decode_from_v2_migrates_data_format() { + let client = substrate_test_runtime_client::new(); + + let authorities = vec![(AuthorityId::default(), 100)]; + let set_id = 3; + + { + let authority_set = V2AuthoritySet:: { + current_authorities: authorities.clone(), + set_id, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let genesis_state = (H256::random(), 32); + let voter_set_state: VoterSetState = + VoterSetState::live( + set_id, + &authority_set.clone().into(), // Note the conversion! + genesis_state, + ); + + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + (VERSION_KEY, 2u32.encode().as_slice()), + ], + &[], + ) + .unwrap(); + } + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(2)); + + // should perform the migration + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(3)); + + let PersistentData { authority_set, .. } = load_persistent::< + substrate_test_runtime_client::runtime::Block, + _, + _, + >(&client, H256::random(), 0, || unreachable!()) + .unwrap(); + + assert_eq!( + *authority_set.inner(), + AuthoritySet::new( + authorities.clone(), + set_id, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ) + .unwrap(), + ); + } + #[test] fn write_read_concluded_rounds() { let client = substrate_test_runtime_client::new(); @@ -645,7 +772,10 @@ mod test { round_number.using_encoded(|n| key.extend(n)); assert_eq!( - load_decode::<_, CompletedRound::>(&client, &key).unwrap(), + load_decode::<_, CompletedRound::>( + &client, &key + ) + .unwrap(), Some(completed_round), ); } diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 276529d555ffe..2e50a3bac01d9 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Gossip and politeness for polite-grandpa. //! @@ -82,23 +84,25 @@ //! //! We only send polite messages to peers, -use sp_runtime::traits::{NumberFor, Block as BlockT, Zero}; -use sc_network_gossip::{MessageIntent, ValidatorContext}; +use parity_scale_codec::{Decode, Encode}; use sc_network::{ObservedRole, PeerId, ReputationChange}; -use parity_scale_codec::{Encode, Decode}; +use sc_network_gossip::{MessageIntent, ValidatorContext}; use sp_finality_grandpa::AuthorityId; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; -use sc_telemetry::{telemetry, CONSENSUS_DEBUG}; -use log::{trace, debug}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use prometheus_endpoint::{CounterVec, Opts, PrometheusError, register, Registry, U64}; +use log::{debug, trace}; +use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use rand::seq::SliceRandom; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use super::{benefit, cost, Round, SetId}; use crate::{environment, CatchUp, CompactCommit, SignedMessage}; -use super::{cost, benefit, Round, SetId}; -use std::collections::{HashMap, VecDeque, HashSet}; -use std::time::{Duration, Instant}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + time::{Duration, Instant}, +}; const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); const CATCH_UP_REQUEST_TIMEOUT: Duration = Duration::from_secs(45); @@ -107,12 +111,23 @@ const CATCH_UP_PROCESS_TIMEOUT: Duration = Duration::from_secs(30); /// catch up request. const CATCH_UP_THRESHOLD: u64 = 2; -const PROPAGATION_ALL: u32 = 4; //in rounds; -const PROPAGATION_ALL_AUTHORITIES: u32 = 2; //in rounds; -const PROPAGATION_SOME_NON_AUTHORITIES: u32 = 3; //in rounds; -const ROUND_DURATION: u32 = 2; // measured in gossip durations +/// The total round duration measured in periods of gossip duration: +/// 2 gossip durations for prevote timer +/// 2 gossip durations for precommit timer +/// 1 gossip duration for precommits to spread +const ROUND_DURATION: u32 = 5; + +/// The period, measured in rounds, since the latest round start, after which we will start +/// propagating gossip messages to more nodes than just the lucky ones. +const PROPAGATION_SOME: f32 = 1.5; + +/// The period, measured in rounds, since the latest round start, after which we will start +/// propagating gossip messages to all the nodes we are connected to. +const PROPAGATION_ALL: f32 = 3.0; -const MIN_LUCKY: usize = 5; +/// Assuming a network of 3000 nodes, using a fanout of 4, after about 6 iterations +/// of gossip a message has very likely reached all nodes on the network (`log4(3000)`). +const LUCKY_PEERS: usize = 4; type Report = (PeerId, ReputationChange); @@ -132,18 +147,14 @@ enum Consider { /// A view of protocol state. #[derive(Debug)] struct View { - round: Round, // the current round we are at. - set_id: SetId, // the current voter set id. + round: Round, // the current round we are at. + set_id: SetId, // the current voter set id. last_commit: Option, // commit-finalized block height, if any. } impl Default for View { fn default() -> Self { - View { - round: Round(1), - set_id: SetId(0), - last_commit: None, - } + View { round: Round(1), set_id: SetId(0), last_commit: None } } } @@ -151,12 +162,20 @@ impl View { /// Consider a round and set ID combination under a current view. fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { // only from current set - if set_id < self.set_id { return Consider::RejectPast } - if set_id > self.set_id { return Consider::RejectFuture } + if set_id < self.set_id { + return Consider::RejectPast + } + if set_id > self.set_id { + return Consider::RejectFuture + } // only r-1 ... r+1 - if round.0 > self.round.0.saturating_add(1) { return Consider::RejectFuture } - if round.0 < self.round.0.saturating_sub(1) { return Consider::RejectPast } + if round.0 > self.round.0.saturating_add(1) { + return Consider::RejectFuture + } + if round.0 < self.round.0.saturating_sub(1) { + return Consider::RejectPast + } Consider::Accept } @@ -165,18 +184,23 @@ impl View { /// because we gate on finalization of a further block than a previous commit. fn consider_global(&self, set_id: SetId, number: N) -> Consider { // only from current set - if set_id < self.set_id { return Consider::RejectPast } - if set_id > self.set_id { return Consider::RejectFuture } + if set_id < self.set_id { + return Consider::RejectPast + } + if set_id > self.set_id { + return Consider::RejectFuture + } // only commits which claim to prove a higher block number than // the one we're aware of. match self.last_commit { None => Consider::Accept, - Some(ref num) => if num < &number { - Consider::Accept - } else { - Consider::RejectPast - } + Some(ref num) => + if num < &number { + Consider::Accept + } else { + Consider::RejectPast + }, } } } @@ -194,22 +218,13 @@ struct LocalView { impl LocalView { /// Creates a new `LocalView` at the given set id and round. fn new(set_id: SetId, round: Round) -> LocalView { - LocalView { - set_id, - round, - last_commit: None, - round_start: Instant::now(), - } + LocalView { set_id, round, last_commit: None, round_start: Instant::now() } } /// Converts the local view to a `View` discarding round and set id /// information about the last commit. fn as_view(&self) -> View<&N> { - View { - round: self.round, - set_id: self.set_id, - last_commit: self.last_commit_height(), - } + View { round: self.round, set_id: self.set_id, last_commit: self.last_commit_height() } } /// Update the set ID. implies a reset to round 1. @@ -217,7 +232,7 @@ impl LocalView { if set_id != self.set_id { self.set_id = set_id; self.round = Round(1); - self.round_start = Instant::now(); + self.round_start = Instant::now(); } } @@ -245,7 +260,7 @@ const KEEP_RECENT_ROUNDS: usize = 3; struct KeepTopics { current_set: SetId, rounds: VecDeque<(Round, SetId)>, - reverse_map: HashMap, SetId)> + reverse_map: HashMap, SetId)>, } impl KeepTopics { @@ -279,10 +294,7 @@ impl KeepTopics { map.insert(super::global_topic::(self.current_set.0), (None, self.current_set)); for &(round, set) in &self.rounds { - map.insert( - super::round_topic::(round.0, set.0), - (Some(round), set) - ); + map.insert(super::round_topic::(round.0, set.0), (Some(round), set)); } self.reverse_map = map; @@ -296,10 +308,8 @@ impl KeepTopics { // topics to send to a neighbor based on their view. fn neighbor_topics(view: &View>) -> Vec { let s = view.set_id; - let mut topics = vec![ - super::global_topic::(s.0), - super::round_topic::(view.round.0, s.0), - ]; + let mut topics = + vec![super::global_topic::(s.0), super::round_topic::(view.round.0, s.0)]; if view.round.0 != 0 { let r = Round(view.round.0 - 1); @@ -368,7 +378,7 @@ pub(super) struct NeighborPacket { /// A versioned neighbor packet. #[derive(Debug, Encode, Decode)] pub(super) enum VersionedNeighborPacket { - #[codec(index = "1")] + #[codec(index = 1)] V1(NeighborPacket), } @@ -409,15 +419,9 @@ pub(super) enum Misbehavior { // could not decode neighbor message. bytes-length of the packet. UndecodablePacket(i32), // Bad catch up message (invalid signatures). - BadCatchUpMessage { - signatures_checked: i32, - }, + BadCatchUpMessage { signatures_checked: i32 }, // Bad commit message - BadCommitMessage { - signatures_checked: i32, - blocks_loaded: i32, - equivocations_caught: i32, - }, + BadCommitMessage { signatures_checked: i32, blocks_loaded: i32, equivocations_caught: i32 }, // A message received that's from the future relative to our view. // always misbehavior. FutureMessage, @@ -448,7 +452,10 @@ impl Misbehavior { let benefit = equivocations_caught.saturating_mul(benefit::PER_EQUIVOCATION); - ReputationChange::new((benefit as i32).saturating_add(cost as i32), "Grandpa: Bad commit") + ReputationChange::new( + (benefit as i32).saturating_add(cost as i32), + "Grandpa: Bad commit", + ) }, FutureMessage => cost::FUTURE_MESSAGE, OutOfScopeMessage => cost::OUT_OF_SCOPE_MESSAGE, @@ -456,6 +463,7 @@ impl Misbehavior { } } +#[derive(Debug)] struct PeerInfo { view: View, roles: ObservedRole, @@ -463,26 +471,31 @@ struct PeerInfo { impl PeerInfo { fn new(roles: ObservedRole) -> Self { - PeerInfo { - view: View::default(), - roles, - } + PeerInfo { view: View::default(), roles } } } -/// The peers we're connected do in gossip. +/// The peers we're connected to in gossip. struct Peers { inner: HashMap>, - lucky_peers: HashSet, - lucky_authorities: HashSet, + /// The randomly picked set of `LUCKY_PEERS` we'll gossip to in the first stage of round + /// gossiping. + first_stage_peers: HashSet, + /// The randomly picked set of peers we'll gossip to in the second stage of gossiping if the + /// first stage didn't allow us to spread the voting data enough to conclude the round. This + /// set should have size `sqrt(connected_peers)`. + second_stage_peers: HashSet, + /// The randomly picked set of `LUCKY_PEERS` light clients we'll gossip commit messages to. + lucky_light_peers: HashSet, } impl Default for Peers { fn default() -> Self { Peers { inner: HashMap::new(), - lucky_peers: HashSet::new(), - lucky_authorities: HashSet::new(), + first_stage_peers: HashSet::new(), + second_stage_peers: HashSet::new(), + lucky_light_peers: HashSet::new(), } } } @@ -490,14 +503,18 @@ impl Default for Peers { impl Peers { fn new_peer(&mut self, who: PeerId, role: ObservedRole) { match role { - ObservedRole::Authority if self.lucky_authorities.len() < MIN_LUCKY => { - self.lucky_authorities.insert(who.clone()); + ObservedRole::Authority if self.first_stage_peers.len() < LUCKY_PEERS => { + self.first_stage_peers.insert(who.clone()); }, - ObservedRole::Full | ObservedRole::Light if self.lucky_peers.len() < MIN_LUCKY => { - self.lucky_peers.insert(who.clone()); + ObservedRole::Authority if self.second_stage_peers.len() < LUCKY_PEERS => { + self.second_stage_peers.insert(who.clone()); }, - _ => {} + ObservedRole::Light if self.lucky_light_peers.len() < LUCKY_PEERS => { + self.lucky_light_peers.insert(who.clone()); + }, + _ => {}, } + self.inner.insert(who, PeerInfo::new(role)); } @@ -505,25 +522,28 @@ impl Peers { self.inner.remove(who); // This does not happen often enough compared to round duration, // so we don't reshuffle. - self.lucky_peers.remove(who); - self.lucky_authorities.remove(who); + self.first_stage_peers.remove(who); + self.second_stage_peers.remove(who); + self.lucky_light_peers.remove(who); } // returns a reference to the new view, if the peer is known. - fn update_peer_state(&mut self, who: &PeerId, update: NeighborPacket) - -> Result>, Misbehavior> - { + fn update_peer_state( + &mut self, + who: &PeerId, + update: NeighborPacket, + ) -> Result>, Misbehavior> { let peer = match self.inner.get_mut(who) { None => return Ok(None), Some(p) => p, }; - let invalid_change = peer.view.set_id > update.set_id - || peer.view.round > update.round && peer.view.set_id == update.set_id - || peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); + let invalid_change = peer.view.set_id > update.set_id || + peer.view.round > update.round && peer.view.set_id == update.set_id || + peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); if invalid_change { - return Err(Misbehavior::InvalidViewChange); + return Err(Misbehavior::InvalidViewChange) } peer.view = View { @@ -548,7 +568,7 @@ impl Peers { // same height, because there is still a misbehavior condition based on // sending commits that are <= the best we are aware of. if peer.view.last_commit.as_ref() > Some(&new_height) { - return Err(Misbehavior::InvalidViewChange); + return Err(Misbehavior::InvalidViewChange) } peer.view.last_commit = Some(new_height); @@ -560,55 +580,89 @@ impl Peers { self.inner.get(who) } - fn authorities(&self) -> usize { - // Note that our sentry and our validator are neither authorities nor non-authorities. - self.inner.iter().filter(|(_, info)| matches!(info.roles, ObservedRole::Authority)).count() - } - - fn non_authorities(&self) -> usize { - // Note that our sentry and our validator are neither authorities nor non-authorities. - self.inner - .iter() - .filter(|(_, info)| matches!(info.roles, ObservedRole::Full | ObservedRole::Light)) - .count() - } - fn reshuffle(&mut self) { - let mut lucky_peers: Vec<_> = self.inner - .iter() - .filter_map(|(id, info)| - if matches!(info.roles, ObservedRole::Full | ObservedRole::Light) { Some(id.clone()) } else { None }) - .collect(); - let mut lucky_authorities: Vec<_> = self.inner - .iter() - .filter_map(|(id, info)| - if matches!(info.roles, ObservedRole::Authority) { Some(id.clone()) } else { None }) - .collect(); + // we want to randomly select peers into three sets according to the following logic: + // - first set: LUCKY_PEERS random peers where at least LUCKY_PEERS/2 are authorities + // (unless + // we're not connected to that many authorities) + // - second set: max(LUCKY_PEERS, sqrt(peers)) peers where at least LUCKY_PEERS are + // authorities. + // - third set: LUCKY_PEERS random light client peers + + let shuffled_peers = { + let mut peers = self + .inner + .iter() + .map(|(peer_id, info)| (*peer_id, info.clone())) + .collect::>(); + + peers.shuffle(&mut rand::thread_rng()); + peers + }; - let num_non_authorities = ((lucky_peers.len() as f32).sqrt() as usize) - .max(MIN_LUCKY) - .min(lucky_peers.len()); + let shuffled_authorities = shuffled_peers.iter().filter_map(|(peer_id, info)| { + if matches!(info.roles, ObservedRole::Authority) { + Some(peer_id) + } else { + None + } + }); - let num_authorities = ((lucky_authorities.len() as f32).sqrt() as usize) - .max(MIN_LUCKY) - .min(lucky_authorities.len()); + let mut first_stage_peers = HashSet::new(); + let mut second_stage_peers = HashSet::new(); + + // we start by allocating authorities to the first stage set and when the minimum of + // `LUCKY_PEERS / 2` is filled we start allocating to the second stage set. + let half_lucky = LUCKY_PEERS / 2; + let one_and_a_half_lucky = LUCKY_PEERS + half_lucky; + let mut n_authorities_added = 0; + for peer_id in shuffled_authorities { + if n_authorities_added < half_lucky { + first_stage_peers.insert(*peer_id); + } else if n_authorities_added < one_and_a_half_lucky { + second_stage_peers.insert(*peer_id); + } else { + break + } - lucky_peers.partial_shuffle(&mut rand::thread_rng(), num_non_authorities); - lucky_peers.truncate(num_non_authorities); + n_authorities_added += 1; + } - lucky_authorities.partial_shuffle(&mut rand::thread_rng(), num_authorities); - lucky_authorities.truncate(num_authorities); + // fill up first and second sets with remaining peers (either full or authorities) + // prioritizing filling the first set over the second. + let n_second_stage_peers = LUCKY_PEERS.max((shuffled_peers.len() as f32).sqrt() as usize); + for (peer_id, info) in &shuffled_peers { + if info.roles.is_light() { + continue + } - self.lucky_peers.clear(); - self.lucky_peers.extend(lucky_peers.into_iter()); + if first_stage_peers.len() < LUCKY_PEERS { + first_stage_peers.insert(*peer_id); + second_stage_peers.remove(peer_id); + } else if second_stage_peers.len() < n_second_stage_peers { + if !first_stage_peers.contains(peer_id) { + second_stage_peers.insert(*peer_id); + } + } else { + break + } + } - self.lucky_authorities.clear(); - self.lucky_authorities.extend(lucky_authorities.into_iter()); + // pick `LUCKY_PEERS` random light peers + let lucky_light_peers = shuffled_peers + .into_iter() + .filter_map(|(peer_id, info)| if info.roles.is_light() { Some(peer_id) } else { None }) + .take(LUCKY_PEERS) + .collect(); + + self.first_stage_peers = first_stage_peers; + self.second_stage_peers = second_stage_peers; + self.lucky_light_peers = lucky_light_peers; } } #[derive(Debug, PartialEq)] -pub(super) enum Action { +pub(super) enum Action { // repropagate under given topic, to the given peers, applying cost/benefit to originator. Keep(H, ReputationChange), // discard and process. @@ -623,15 +677,9 @@ enum PendingCatchUp { /// No pending catch up requests. None, /// Pending catch up request which has not been answered yet. - Requesting { - who: PeerId, - request: CatchUpRequestMessage, - instant: Instant, - }, + Requesting { who: PeerId, request: CatchUpRequestMessage, instant: Instant }, /// Pending catch up request that was answered and is being processed. - Processing { - instant: Instant, - }, + Processing { instant: Instant }, } /// Configuration for the round catch-up mechanism. @@ -663,10 +711,10 @@ impl CatchUpConfig { match self { CatchUpConfig::Disabled => false, CatchUpConfig::Enabled { only_from_authorities, .. } => match peer.roles { - ObservedRole::Authority | ObservedRole::OurSentry | - ObservedRole::OurGuardedAuthority => true, - _ => !only_from_authorities - } + ObservedRole::Authority => true, + ObservedRole::Light => false, + ObservedRole::Full => !only_from_authorities, + }, } } } @@ -686,8 +734,12 @@ type MaybeMessage = Option<(Vec, NeighborPacket> impl Inner { fn new(config: crate::Config) -> Self { - let catch_up_config = if config.observer_enabled { - if config.is_authority { + let catch_up_config = if config.local_role.is_light() { + // if we are a light client we shouldn't be issuing any catch-up requests + // as we don't participate in the full GRANDPA protocol + CatchUpConfig::disabled() + } else if config.observer_enabled { + if config.local_role.is_authority() { // since the observer protocol is enabled, we will only issue // catch-up requests if we are an authority (and only to other // authorities). @@ -698,8 +750,8 @@ impl Inner { CatchUpConfig::disabled() } } else { - // if the observer protocol isn't enabled, then any full node should - // be able to answer catch-up requests. + // if the observer protocol isn't enabled and we're not a light client, then any full + // node should be able to answer catch-up requests. CatchUpConfig::enabled(false) }; @@ -720,11 +772,12 @@ impl Inner { { let local_view = match self.local_view { None => return None, - Some(ref mut v) => if v.round == round { - return None - } else { - v - }, + Some(ref mut v) => + if v.round == round { + return None + } else { + v + }, }; let set_id = local_view.set_id; @@ -745,27 +798,24 @@ impl Inner { fn note_set(&mut self, set_id: SetId, authorities: Vec) -> MaybeMessage { { let local_view = match self.local_view { - ref mut x @ None => x.get_or_insert(LocalView::new( - set_id, - Round(1), - )), - Some(ref mut v) => if v.set_id == set_id { - let diff_authorities = - self.authorities.iter().collect::>() != - authorities.iter().collect(); - - if diff_authorities { - debug!(target: "afg", - "Gossip validator noted set {:?} twice with different authorities. \ - Was the authority set hard forked?", - set_id, - ); - self.authorities = authorities; - } - return None; - } else { - v - }, + ref mut x @ None => x.get_or_insert(LocalView::new(set_id, Round(1))), + Some(ref mut v) => + if v.set_id == set_id { + let diff_authorities = self.authorities.iter().collect::>() != + authorities.iter().collect(); + + if diff_authorities { + debug!(target: "afg", + "Gossip validator noted set {:?} twice with different authorities. \ + Was the authority set hard forked?", + set_id, + ); + self.authorities = authorities; + } + return None + } else { + v + }, }; local_view.update_set(set_id); @@ -785,11 +835,12 @@ impl Inner { { match self.local_view { None => return None, - Some(ref mut v) => if v.last_commit_height() < Some(&finalized) { - v.last_commit = Some((finalized, round, set_id)); - } else { - return None - }, + Some(ref mut v) => + if v.last_commit_height() < Some(&finalized) { + v.last_commit = Some((finalized, round, set_id)); + } else { + return None + }, }; } @@ -797,30 +848,40 @@ impl Inner { } fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { - self.local_view.as_ref() + self.local_view + .as_ref() .map(LocalView::as_view) .map(|v| v.consider_vote(round, set_id)) .unwrap_or(Consider::RejectOutOfScope) } fn consider_global(&self, set_id: SetId, number: NumberFor) -> Consider { - self.local_view.as_ref() + self.local_view + .as_ref() .map(LocalView::as_view) .map(|v| v.consider_global(set_id, &number)) .unwrap_or(Consider::RejectOutOfScope) } - fn cost_past_rejection(&self, _who: &PeerId, _round: Round, _set_id: SetId) -> ReputationChange { + fn cost_past_rejection( + &self, + _who: &PeerId, + _round: Round, + _set_id: SetId, + ) -> ReputationChange { // hardcoded for now. cost::PAST_REJECTION } - fn validate_round_message(&self, who: &PeerId, full: &VoteMessage) - -> Action - { + fn validate_round_message( + &self, + who: &PeerId, + full: &VoteMessage, + ) -> Action { match self.consider_vote(full.round, full.set_id) { Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), - Consider::RejectOutOfScope => return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + Consider::RejectOutOfScope => + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), Consider::RejectPast => return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), Consider::Accept => {}, @@ -829,8 +890,13 @@ impl Inner { // ensure authority is part of the set. if !self.authorities.contains(&full.message.id) { debug!(target: "afg", "Message from unknown voter: {}", full.message.id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_msg_signature"; "signature" => ?full.message.id); - return Action::Discard(cost::UNKNOWN_VOTER); + telemetry!( + self.config.telemetry; + CONSENSUS_DEBUG; + "afg.bad_msg_signature"; + "signature" => ?full.message.id, + ); + return Action::Discard(cost::UNKNOWN_VOTER) } if !sp_finality_grandpa::check_message_signature( @@ -841,38 +907,50 @@ impl Inner { full.set_id.0, ) { debug!(target: "afg", "Bad message signature {}", full.message.id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_msg_signature"; "signature" => ?full.message.id); - return Action::Discard(cost::BAD_SIGNATURE); + telemetry!( + self.config.telemetry; + CONSENSUS_DEBUG; + "afg.bad_msg_signature"; + "signature" => ?full.message.id, + ); + return Action::Discard(cost::BAD_SIGNATURE) } let topic = super::round_topic::(full.round.0, full.set_id.0); Action::Keep(topic, benefit::ROUND_MESSAGE) } - fn validate_commit_message(&mut self, who: &PeerId, full: &FullCommitMessage) - -> Action - { - + fn validate_commit_message( + &mut self, + who: &PeerId, + full: &FullCommitMessage, + ) -> Action { if let Err(misbehavior) = self.peers.update_commit_height(who, full.message.target_number) { - return Action::Discard(misbehavior.cost()); + return Action::Discard(misbehavior.cost()) } match self.consider_global(full.set_id, full.message.target_number) { Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), Consider::RejectPast => return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), - Consider::RejectOutOfScope => return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + Consider::RejectOutOfScope => + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), Consider::Accept => {}, } - if full.message.precommits.len() != full.message.auth_data.len() || full.message.precommits.is_empty() { + if full.message.precommits.len() != full.message.auth_data.len() || + full.message.precommits.is_empty() + { debug!(target: "afg", "Malformed compact commit"); - telemetry!(CONSENSUS_DEBUG; "afg.malformed_compact_commit"; + telemetry!( + self.config.telemetry; + CONSENSUS_DEBUG; + "afg.malformed_compact_commit"; "precommits_len" => ?full.message.precommits.len(), "auth_data_len" => ?full.message.auth_data.len(), "precommits_is_empty" => ?full.message.precommits.is_empty(), ); - return Action::Discard(cost::MALFORMED_COMMIT); + return Action::Discard(cost::MALFORMED_COMMIT) } // always discard commits initially and rebroadcast after doing full @@ -881,33 +959,33 @@ impl Inner { Action::ProcessAndDiscard(topic, benefit::BASIC_VALIDATED_COMMIT) } - fn validate_catch_up_message(&mut self, who: &PeerId, full: &FullCatchUpMessage) - -> Action - { + fn validate_catch_up_message( + &mut self, + who: &PeerId, + full: &FullCatchUpMessage, + ) -> Action { match &self.pending_catch_up { PendingCatchUp::Requesting { who: peer, request, instant } => { if peer != who { - return Action::Discard(Misbehavior::OutOfScopeMessage.cost()); + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) } if request.set_id != full.set_id { - return Action::Discard(cost::MALFORMED_CATCH_UP); + return Action::Discard(cost::MALFORMED_CATCH_UP) } if request.round.0 > full.message.round_number { - return Action::Discard(cost::MALFORMED_CATCH_UP); + return Action::Discard(cost::MALFORMED_CATCH_UP) } if full.message.prevotes.is_empty() || full.message.precommits.is_empty() { - return Action::Discard(cost::MALFORMED_CATCH_UP); + return Action::Discard(cost::MALFORMED_CATCH_UP) } // move request to pending processing state, we won't push out // any catch up requests until we import this one (either with a // success or failure). - self.pending_catch_up = PendingCatchUp::Processing { - instant: *instant, - }; + self.pending_catch_up = PendingCatchUp::Processing { instant: *instant }; // always discard catch up messages, they're point-to-point let topic = super::global_topic::(full.set_id.0); @@ -948,15 +1026,14 @@ impl Inner { if request.set_id.0.saturating_add(1) == local_view.set_id.0 && local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0 { - return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)); + return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)) } - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) } match self.peers.peer(who) { - None => - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), + None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), Some(peer) if peer.view.round >= request.round => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), _ => {}, @@ -964,7 +1041,7 @@ impl Inner { let last_completed_round = set_state.read().last_completed_round(); if last_completed_round.number < request.round.0 { - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) } trace!(target: "afg", "Replying to catch-up request for round {} from {} with round {}", @@ -1035,10 +1112,8 @@ impl Inner { { // send catch up request if allowed let round = peer.view.round.0 - 1; // peer.view.round is > 0 - let request = CatchUpRequestMessage { - set_id: peer.view.set_id, - round: Round(round), - }; + let request = + CatchUpRequestMessage { set_id: peer.view.set_id, round: Round(round) }; let (catch_up_allowed, catch_up_report) = self.note_catch_up_request(who, &request); @@ -1058,16 +1133,17 @@ impl Inner { (catch_up, report) } - fn import_neighbor_message(&mut self, who: &PeerId, update: NeighborPacket>) - -> (Vec, Action, Option>, Option) - { + fn import_neighbor_message( + &mut self, + who: &PeerId, + update: NeighborPacket>, + ) -> (Vec, Action, Option>, Option) { let update_res = self.peers.update_peer_state(who, update); let (cost_benefit, topics) = match update_res { Ok(view) => (benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::(view))), - Err(misbehavior) => - (misbehavior.cost(), None), + Err(misbehavior) => (misbehavior.cost(), None), }; let (catch_up, report) = match update_res { @@ -1091,7 +1167,22 @@ impl Inner { commit_finalized_height: *local_view.last_commit_height().unwrap_or(&Zero::zero()), }; - let peers = self.peers.inner.keys().cloned().collect(); + let peers = self + .peers + .inner + .iter() + .filter_map(|(id, info)| { + // light clients don't participate in the full GRANDPA voter protocol + // and therefore don't need to be informed about view updates + if info.roles.is_light() { + None + } else { + Some(id) + } + }) + .cloned() + .collect(); + (peers, packet) }) } @@ -1102,19 +1193,21 @@ impl Inner { catch_up_request: &CatchUpRequestMessage, ) -> (bool, Option) { let report = match &self.pending_catch_up { - PendingCatchUp::Requesting { who: peer, instant, .. } => + PendingCatchUp::Requesting { who: peer, instant, .. } => { if instant.elapsed() <= CATCH_UP_REQUEST_TIMEOUT { - return (false, None); + return (false, None) } else { // report peer for timeout Some((peer.clone(), cost::CATCH_UP_REQUEST_TIMEOUT)) - }, - PendingCatchUp::Processing { instant, .. } => + } + }, + PendingCatchUp::Processing { instant, .. } => { if instant.elapsed() < CATCH_UP_PROCESS_TIMEOUT { - return (false, None); + return (false, None) } else { None - }, + } + }, _ => None, }; @@ -1130,72 +1223,40 @@ impl Inner { /// The initial logic for filtering round messages follows the given state /// transitions: /// - /// - State 0: not allowed to anyone (only if our local node is not an authority) - /// - State 1: allowed to random `sqrt(authorities)` - /// - State 2: allowed to all authorities - /// - State 3: allowed to random `sqrt(non-authorities)` - /// - State 4: allowed to all non-authorities + /// - State 1: allowed to LUCKY_PEERS random peers (where at least LUCKY_PEERS/2 are + /// authorities) + /// - State 2: allowed to max(LUCKY_PEERS, sqrt(random peers)) (where at least LUCKY_PEERS are + /// authorities) + /// - State 3: allowed to all peers /// - /// Transitions will be triggered on repropagation attempts by the - /// underlying gossip layer, which should happen every 30 seconds. - fn round_message_allowed(&self, who: &PeerId, peer: &PeerInfo) -> bool { + /// Transitions will be triggered on repropagation attempts by the underlying gossip layer. + fn round_message_allowed(&self, who: &PeerId) -> bool { let round_duration = self.config.gossip_duration * ROUND_DURATION; let round_elapsed = match self.local_view { Some(ref local_view) => local_view.round_start.elapsed(), None => return false, }; - if !self.config.is_authority - && round_elapsed < round_duration * PROPAGATION_ALL - { - // non-authority nodes don't gossip any messages right away. we - // assume that authorities (and sentries) are strongly connected, so - // it should be unnecessary for non-authorities to gossip all - // messages right away. - return false; - } - - match peer.roles { - ObservedRole::OurGuardedAuthority | ObservedRole::OurSentry => true, - ObservedRole::Authority => { - let authorities = self.peers.authorities(); - - // the target node is an authority, on the first round duration we start by - // sending the message to only `sqrt(authorities)` (if we're - // connected to at least `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES - && authorities > MIN_LUCKY - { - self.peers.lucky_authorities.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // authorities for whom it is polite to do so - true - } - }, - ObservedRole::Full | ObservedRole::Light => { - // the node is not an authority so we apply stricter filters - if round_elapsed >= round_duration * PROPAGATION_ALL { - // if we waited for 3 (or more) rounds - // then it is allowed to be sent to all peers. - true - } else if round_elapsed >= round_duration * PROPAGATION_SOME_NON_AUTHORITIES { - // otherwise we only send it to `sqrt(non-authorities)`. - self.peers.lucky_peers.contains(who) - } else { - false - } - }, + if self.config.local_role.is_light() { + return false + } + + if round_elapsed < round_duration.mul_f32(PROPAGATION_SOME) { + self.peers.first_stage_peers.contains(who) + } else if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { + self.peers.first_stage_peers.contains(who) || + self.peers.second_stage_peers.contains(who) + } else { + self.peers.peer(who).map(|info| !info.roles.is_light()).unwrap_or(false) } } /// The initial logic for filtering global messages follows the given state /// transitions: /// - /// - State 0: send to `sqrt(authorities)` ++ `sqrt(non-authorities)`. - /// - State 1: send to all authorities - /// - State 2: send to all non-authorities + /// - State 1: allowed to max(LUCKY_PEERS, sqrt(peers)) (where at least LUCKY_PEERS are + /// authorities) + /// - State 2: allowed to all peers /// /// We are more lenient with global messages since there should be a lot /// less global messages than round messages (just commits), and we want @@ -1204,50 +1265,23 @@ impl Inner { /// /// Transitions will be triggered on repropagation attempts by the /// underlying gossip layer, which should happen every 30 seconds. - fn global_message_allowed(&self, who: &PeerId, peer: &PeerInfo) -> bool { + fn global_message_allowed(&self, who: &PeerId) -> bool { let round_duration = self.config.gossip_duration * ROUND_DURATION; let round_elapsed = match self.local_view { Some(ref local_view) => local_view.round_start.elapsed(), None => return false, }; - match peer.roles { - ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority => true, - ObservedRole::Authority => { - let authorities = self.peers.authorities(); - - // the target node is an authority, on the first round duration we start by - // sending the message to only `sqrt(authorities)` (if we're - // connected to at least `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES - && authorities > MIN_LUCKY - { - self.peers.lucky_authorities.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // authorities for whom it is polite to do so - true - } - }, - ObservedRole::Full | ObservedRole::Light => { - let non_authorities = self.peers.non_authorities(); - - // the target node is not an authority, on the first and second - // round duration we start by sending the message to only - // `sqrt(non_authorities)` (if we're connected to at least - // `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_SOME_NON_AUTHORITIES - && non_authorities > MIN_LUCKY - { - self.peers.lucky_peers.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // non-authorities for whom it is polite to do so - true - } - } + if self.config.local_role.is_light() { + return false + } + + if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { + self.peers.first_stage_peers.contains(who) || + self.peers.second_stage_peers.contains(who) || + self.peers.lucky_light_peers.contains(who) + } else { + true } } } @@ -1258,15 +1292,17 @@ pub(crate) struct Metrics { } impl Metrics { - pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { + pub(crate) fn register( + registry: &prometheus_endpoint::Registry, + ) -> Result { Ok(Self { messages_validated: register( CounterVec::new( Opts::new( "finality_grandpa_communication_gossip_validator_messages", - "Number of messages validated by the finality grandpa gossip validator." + "Number of messages validated by the finality grandpa gossip validator.", ), - &["message", "action"] + &["message", "action"], )?, registry, )?, @@ -1280,6 +1316,7 @@ pub(super) struct GossipValidator { set_state: environment::SharedVoterSetState, report_sender: TracingUnboundedSender, metrics: Option, + telemetry: Option, } impl GossipValidator { @@ -1290,7 +1327,8 @@ impl GossipValidator { config: crate::Config, set_state: environment::SharedVoterSetState, prometheus_registry: Option<&Registry>, - ) -> (GossipValidator, TracingUnboundedReceiver) { + telemetry: Option, + ) -> (GossipValidator, TracingUnboundedReceiver) { let metrics = match prometheus_registry.map(Metrics::register) { Some(Ok(metrics)) => Some(metrics), Some(Err(e)) => { @@ -1306,6 +1344,7 @@ impl GossipValidator { set_state, report_sender: tx, metrics, + telemetry, }; (val, rx) @@ -1313,7 +1352,8 @@ impl GossipValidator { /// Note a round in the current set has started. pub(super) fn note_round(&self, round: Round, send_neighbor: F) - where F: FnOnce(Vec, NeighborPacket>) + where + F: FnOnce(Vec, NeighborPacket>), { let maybe_msg = self.inner.write().note_round(round); if let Some((to, msg)) = maybe_msg { @@ -1324,7 +1364,8 @@ impl GossipValidator { /// Note that a voter set with given ID has started. Updates the current set to given /// value and initializes the round to 0. pub(super) fn note_set(&self, set_id: SetId, authorities: Vec, send_neighbor: F) - where F: FnOnce(Vec, NeighborPacket>) + where + F: FnOnce(Vec, NeighborPacket>), { let maybe_msg = self.inner.write().note_set(set_id, authorities); if let Some((to, msg)) = maybe_msg { @@ -1339,14 +1380,10 @@ impl GossipValidator { set_id: SetId, finalized: NumberFor, send_neighbor: F, - ) - where F: FnOnce(Vec, NeighborPacket>) + ) where + F: FnOnce(Vec, NeighborPacket>), { - let maybe_msg = self.inner.write().note_commit_finalized( - round, - set_id, - finalized, - ); + let maybe_msg = self.inner.write().note_commit_finalized(round, set_id, finalized); if let Some((to, msg)) = maybe_msg { send_neighbor(to, msg); @@ -1354,7 +1391,7 @@ impl GossipValidator { } /// Note that we've processed a catch up message. - pub(super) fn note_catch_up_message_processed(&self) { + pub(super) fn note_catch_up_message_processed(&self) { self.inner.write().note_catch_up_message_processed(); } @@ -1362,9 +1399,11 @@ impl GossipValidator { let _ = self.report_sender.unbounded_send(PeerReport { who, cost_benefit }); } - pub(super) fn do_validate(&self, who: &PeerId, mut data: &[u8]) - -> (Action, Vec, Option>) - { + pub(super) fn do_validate( + &self, + who: &PeerId, + mut data: &[u8], + ) -> (Action, Vec, Option>) { let mut broadcast_topics = Vec::new(); let mut peer_reply = None; @@ -1383,10 +1422,10 @@ impl GossipValidator { }, Ok(GossipMessage::Neighbor(update)) => { message_name = Some("neighbor"); - let (topics, action, catch_up, report) = self.inner.write().import_neighbor_message( - who, - update.into_neighbor_packet(), - ); + let (topics, action, catch_up, report) = self + .inner + .write() + .import_neighbor_message(who, update.into_neighbor_packet()); if let Some((peer, cost_benefit)) = report { self.report(peer, cost_benefit); @@ -1395,30 +1434,32 @@ impl GossipValidator { broadcast_topics = topics; peer_reply = catch_up; action - } + }, Ok(GossipMessage::CatchUp(ref message)) => { message_name = Some("catch_up"); self.inner.write().validate_catch_up_message(who, message) }, Ok(GossipMessage::CatchUpRequest(request)) => { message_name = Some("catch_up_request"); - let (reply, action) = self.inner.write().handle_catch_up_request( - who, - request, - &self.set_state, - ); + let (reply, action) = + self.inner.write().handle_catch_up_request(who, request, &self.set_state); peer_reply = reply; action - } + }, Err(e) => { message_name = None; - debug!(target: "afg", "Error decoding message: {}", e.what()); - telemetry!(CONSENSUS_DEBUG; "afg.err_decoding_msg"; "" => ""); + debug!(target: "afg", "Error decoding message: {}", e); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.err_decoding_msg"; + "" => "", + ); - let len = std::cmp::min(i32::max_value() as usize, data.len()) as i32; + let len = std::cmp::min(i32::MAX as usize, data.len()) as i32; Action::Discard(Misbehavior::UndecodablePacket(len).cost()) - } + }, } }; @@ -1442,17 +1483,20 @@ impl GossipValidator { } impl sc_network_gossip::Validator for GossipValidator { - fn new_peer(&self, context: &mut dyn ValidatorContext, who: &PeerId, roles: ObservedRole) { + fn new_peer( + &self, + context: &mut dyn ValidatorContext, + who: &PeerId, + roles: ObservedRole, + ) { let packet = { let mut inner = self.inner.write(); inner.peers.new_peer(who.clone(), roles); - inner.local_view.as_ref().map(|v| { - NeighborPacket { - round: v.round, - set_id: v.set_id, - commit_finalized_height: *v.last_commit_height().unwrap_or(&Zero::zero()), - } + inner.local_view.as_ref().map(|v| NeighborPacket { + round: v.round, + set_id: v.set_id, + commit_finalized_height: *v.last_commit_height().unwrap_or(&Zero::zero()), }) }; @@ -1466,9 +1510,12 @@ impl sc_network_gossip::Validator for GossipValidator, who: &PeerId, data: &[u8]) - -> sc_network_gossip::ValidationResult - { + fn validate( + &self, + context: &mut dyn ValidatorContext, + who: &PeerId, + data: &[u8], + ) -> sc_network_gossip::ValidationResult { let (action, broadcast_topics, peer_reply) = self.do_validate(who, data); // not with lock held! @@ -1485,21 +1532,21 @@ impl sc_network_gossip::Validator for GossipValidator { self.report(who.clone(), cb); sc_network_gossip::ValidationResult::ProcessAndDiscard(topic) - } + }, Action::Discard(cb) => { self.report(who.clone(), cb); sc_network_gossip::ValidationResult::Discard - } + }, } } - fn message_allowed<'a>(&'a self) - -> Box bool + 'a> - { + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { let (inner, do_rebroadcast) = { use parking_lot::RwLockWriteGuard; @@ -1518,7 +1565,7 @@ impl sc_network_gossip::Validator for GossipValidator sc_network_gossip::Validator for GossipValidator sc_network_gossip::Validator for GossipValidator false, Ok(GossipMessage::CatchUpRequest(_)) => false, Ok(GossipMessage::CatchUp(_)) => false, @@ -1583,7 +1631,8 @@ impl sc_network_gossip::Validator for GossipValidator return true, - Some((Some(_), _)) => return false, // round messages don't require further checking. + // round messages don't require further checking. + Some((Some(_), _)) => return false, Some((None, _)) => {}, }; @@ -1597,11 +1646,10 @@ impl sc_network_gossip::Validator for GossipValidator true, Ok(GossipMessage::Commit(full)) => match local_view.last_commit { Some((number, round, set_id)) => - // we expire any commit message that doesn't target the same block - // as our best commit or isn't from the same round and set id + // we expire any commit message that doesn't target the same block + // as our best commit or isn't from the same round and set id !(full.message.target_number == number && - full.round == round && - full.set_id == set_id), + full.round == round && full.set_id == set_id), None => true, }, Ok(_) => true, @@ -1618,8 +1666,8 @@ pub(super) struct PeerReport { #[cfg(test)] mod tests { - use super::*; - use super::environment::SharedVoterSetState; + use super::{environment::SharedVoterSetState, *}; + use sc_network::config::Role; use sc_network_gossip::Validator as GossipValidatorT; use sc_network_test::Block; use sp_core::{crypto::Public, H256}; @@ -1631,26 +1679,22 @@ mod tests { justification_period: 256, keystore: None, name: None, - is_authority: true, + local_role: Role::Authority, observer_enabled: true, + telemetry: None, } } // dummy voter set state fn voter_set_state() -> SharedVoterSetState { - use crate::authorities::AuthoritySet; - use crate::environment::VoterSetState; + use crate::{authorities::AuthoritySet, environment::VoterSetState}; let base = (H256::zero(), 0); let voters = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; let voters = AuthoritySet::genesis(voters).unwrap(); - let set_state = VoterSetState::live( - 0, - &voters, - base, - ); + let set_state = VoterSetState::live(0, &voters, base); set_state.into() } @@ -1695,11 +1739,8 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - let update = NeighborPacket { - round: Round(5), - set_id: SetId(10), - commit_finalized_height: 50, - }; + let update = + NeighborPacket { round: Round(5), set_id: SetId(10), commit_finalized_height: 50 }; let res = peers.update_peer_state(&id, update.clone()); assert!(res.unwrap().is_none()); @@ -1714,29 +1755,17 @@ mod tests { #[test] fn update_peer_state() { - let update1 = NeighborPacket { - round: Round(5), - set_id: SetId(10), - commit_finalized_height: 50u32, - }; + let update1 = + NeighborPacket { round: Round(5), set_id: SetId(10), commit_finalized_height: 50u32 }; - let update2 = NeighborPacket { - round: Round(6), - set_id: SetId(10), - commit_finalized_height: 60, - }; + let update2 = + NeighborPacket { round: Round(6), set_id: SetId(10), commit_finalized_height: 60 }; - let update3 = NeighborPacket { - round: Round(2), - set_id: SetId(11), - commit_finalized_height: 61, - }; + let update3 = + NeighborPacket { round: Round(2), set_id: SetId(11), commit_finalized_height: 61 }; - let update4 = NeighborPacket { - round: Round(3), - set_id: SetId(11), - commit_finalized_height: 80, - }; + let update4 = + NeighborPacket { round: Round(3), set_id: SetId(11), commit_finalized_height: 80 }; let mut peers = Peers::default(); let id = PeerId::random(); @@ -1763,11 +1792,13 @@ mod tests { let id = PeerId::random(); peers.new_peer(id.clone(), ObservedRole::Authority); - peers.update_peer_state(&id, NeighborPacket { - round: Round(10), - set_id: SetId(10), - commit_finalized_height: 10, - }).unwrap().unwrap(); + peers + .update_peer_state( + &id, + NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 10 }, + ) + .unwrap() + .unwrap(); let mut check_update = move |update: NeighborPacket<_>| { let err = peers.update_peer_state(&id, update.clone()).unwrap_err(); @@ -1796,11 +1827,7 @@ mod tests { #[test] fn messages_not_expired_immediately() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); let set_id = 1; @@ -1832,11 +1859,7 @@ mod tests { fn message_from_unknown_authority_discarded() { assert!(cost::UNKNOWN_VOTER != cost::BAD_SIGNATURE); - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); let set_id = 1; let auth = AuthorityId::from_slice(&[1u8; 32]); let peer = PeerId::random(); @@ -1845,31 +1868,37 @@ mod tests { val.note_round(Round(1), |_, _| {}); let inner = val.inner.read(); - let unknown_voter = inner.validate_round_message(&peer, &VoteMessage { - round: Round(1), - set_id: SetId(set_id), - message: SignedMessage:: { - message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { - target_hash: Default::default(), - target_number: 10, - }), - signature: Default::default(), - id: AuthorityId::from_slice(&[2u8; 32]), - } - }); + let unknown_voter = inner.validate_round_message( + &peer, + &VoteMessage { + round: Round(1), + set_id: SetId(set_id), + message: SignedMessage:: { + message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { + target_hash: Default::default(), + target_number: 10, + }), + signature: Default::default(), + id: AuthorityId::from_slice(&[2u8; 32]), + }, + }, + ); - let bad_sig = inner.validate_round_message(&peer, &VoteMessage { - round: Round(1), - set_id: SetId(set_id), - message: SignedMessage:: { - message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { - target_hash: Default::default(), - target_number: 10, - }), - signature: Default::default(), - id: auth.clone(), - } - }); + let bad_sig = inner.validate_round_message( + &peer, + &VoteMessage { + round: Round(1), + set_id: SetId(set_id), + message: SignedMessage:: { + message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { + target_hash: Default::default(), + target_number: 10, + }), + signature: Default::default(), + id: auth.clone(), + }, + }, + ); assert_eq!(unknown_voter, Action::Discard(cost::UNKNOWN_VOTER)); assert_eq!(bad_sig, Action::Discard(cost::BAD_SIGNATURE)); @@ -1877,11 +1906,7 @@ mod tests { #[test] fn unsolicited_catch_up_messages_discarded() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); let set_id = 1; let auth = AuthorityId::from_slice(&[1u8; 32]); @@ -1892,16 +1917,19 @@ mod tests { let validate_catch_up = || { let mut inner = val.inner.write(); - inner.validate_catch_up_message(&peer, &FullCatchUpMessage { - set_id: SetId(set_id), - message: finality_grandpa::CatchUp { - round_number: 10, - prevotes: Default::default(), - precommits: Default::default(), - base_hash: Default::default(), - base_number: Default::default(), - } - }) + inner.validate_catch_up_message( + &peer, + &FullCatchUpMessage { + set_id: SetId(set_id), + message: finality_grandpa::CatchUp { + round_number: 10, + prevotes: Default::default(), + precommits: Default::default(), + base_hash: Default::default(), + base_number: Default::default(), + }, + }, + ) }; // the catch up is discarded because we have no pending request @@ -1909,10 +1937,7 @@ mod tests { let noted = val.inner.write().note_catch_up_request( &peer, - &CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(10), - } + &CatchUpRequestMessage { set_id: SetId(set_id), round: Round(10) }, ); assert!(noted.0); @@ -1938,19 +1963,13 @@ mod tests { let mut current_rounds = environment::CurrentRounds::new(); current_rounds.insert(3, environment::HasVoted::No); - let set_state = environment::VoterSetState::::Live { - completed_rounds, - current_rounds, - }; + let set_state = + environment::VoterSetState::::Live { completed_rounds, current_rounds }; set_state.into() }; - let (val, _) = GossipValidator::::new( - config(), - set_state.clone(), - None, - ); + let (val, _) = GossipValidator::::new(config(), set_state.clone(), None, None); let set_id = 1; let auth = AuthorityId::from_slice(&[1u8; 32]); @@ -1966,10 +1985,7 @@ mod tests { let res = inner.handle_catch_up_request( &peer, - CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(10), - }, + CatchUpRequestMessage { set_id: SetId(set_id), round: Round(10) }, &set_state, ); @@ -1979,10 +1995,7 @@ mod tests { let res = inner.handle_catch_up_request( &peer, - CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(2), - }, + CatchUpRequestMessage { set_id: SetId(set_id), round: Round(2) }, &set_state, ); @@ -2001,11 +2014,7 @@ mod tests { #[test] fn detects_honest_out_of_scope_catch_requests() { let set_state = voter_set_state(); - let (val, _) = GossipValidator::::new( - config(), - set_state.clone(), - None, - ); + let (val, _) = GossipValidator::::new(config(), set_state.clone(), None, None); // the validator starts at set id 2 val.note_set(SetId(2), Vec::new(), |_, _| {}); @@ -2019,10 +2028,7 @@ mod tests { let mut inner = val.inner.write(); inner.handle_catch_up_request( &peer, - CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(round), - }, + CatchUpRequestMessage { set_id: SetId(set_id), round: Round(round) }, &set_state, ) }; @@ -2042,50 +2048,28 @@ mod tests { // the validator is at set id 2 and round 0. requests for set id 1 // should not be answered but they should be considered an honest // mistake - assert_res( - send_request(1, 1), - true, - ); + assert_res(send_request(1, 1), true); - assert_res( - send_request(1, 10), - true, - ); + assert_res(send_request(1, 10), true); // requests for set id 0 should be considered out of scope - assert_res( - send_request(0, 1), - false, - ); + assert_res(send_request(0, 1), false); - assert_res( - send_request(0, 10), - false, - ); + assert_res(send_request(0, 10), false); // after the validator progresses further than CATCH_UP_THRESHOLD in set // id 2, any request for set id 1 should no longer be considered an // honest mistake. val.note_round(Round(3), |_, _| {}); - assert_res( - send_request(1, 1), - false, - ); + assert_res(send_request(1, 1), false); - assert_res( - send_request(1, 2), - false, - ); + assert_res(send_request(1, 2), false); } #[test] fn issues_catch_up_request_on_neighbor_packet_import() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2149,17 +2133,13 @@ mod tests { // if the observer protocol is enabled and we are not an authority, // then we don't issue any catch-up requests. - c.is_authority = false; + c.local_role = Role::Full; c.observer_enabled = true; c }; - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - ); + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2174,11 +2154,7 @@ mod tests { // we should get `None`. let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer, - NeighborPacket { - round: Round(42), - set_id: SetId(1), - commit_finalized_height: 50, - }, + NeighborPacket { round: Round(42), set_id: SetId(1), commit_finalized_height: 50 }, ); match catch_up_request { @@ -2189,11 +2165,7 @@ mod tests { #[test] fn doesnt_send_catch_up_requests_to_non_authorities_when_observer_enabled() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2203,17 +2175,16 @@ mod tests { let peer_authority = PeerId::random(); let peer_full = PeerId::random(); - val.inner.write().peers.new_peer(peer_authority.clone(), ObservedRole::Authority); + val.inner + .write() + .peers + .new_peer(peer_authority.clone(), ObservedRole::Authority); val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); let import_neighbor_message = |peer| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer, - NeighborPacket { - round: Round(42), - set_id: SetId(1), - commit_finalized_height: 50, - }, + NeighborPacket { round: Round(42), set_id: SetId(1), commit_finalized_height: 50 }, ); catch_up_request @@ -2249,11 +2220,7 @@ mod tests { c }; - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - ); + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2265,11 +2232,7 @@ mod tests { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer_full, - NeighborPacket { - round: Round(42), - set_id: SetId(1), - commit_finalized_height: 50, - }, + NeighborPacket { round: Round(42), set_id: SetId(1), commit_finalized_height: 50 }, ); // importing a neighbor message from a peer in the same set in a later @@ -2288,11 +2251,7 @@ mod tests { #[test] fn doesnt_expire_next_round_messages() { // NOTE: this is a regression test - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2306,26 +2265,17 @@ mod tests { // we accept messages from rounds 9, 10 and 11 // therefore neither of those should be considered expired for round in &[9, 10, 11] { - assert!( - !is_expired( - crate::communication::round_topic::(*round, 1), - &[], - ) - ) + assert!(!is_expired(crate::communication::round_topic::(*round, 1), &[])) } } #[test] - fn progressively_gossips_to_more_peers() { + fn progressively_gossips_to_more_peers_as_round_duration_increases() { let mut config = config(); config.gossip_duration = Duration::from_secs(300); // Set to high value to prevent test race let round_duration = config.gossip_duration * ROUND_DURATION; - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - ); + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); // the validator start at set id 0 val.note_set(SetId(0), Vec::new(), |_, _| {}); @@ -2338,14 +2288,23 @@ mod tests { full_nodes.resize_with(30, || PeerId::random()); for i in 0..30 { - val.inner.write().peers.new_peer(authorities[i].clone(), ObservedRole::Authority); + val.inner + .write() + .peers + .new_peer(authorities[i].clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(full_nodes[i].clone(), ObservedRole::Full); } - let test = |num_round, peers| { + let test = |rounds_elapsed, peers| { // rewind n round durations - val.inner.write().local_view.as_mut().unwrap().round_start = - Instant::now() - round_duration * num_round; + val.inner.write().local_view.as_mut().unwrap().round_start = Instant::now() - + Duration::from_millis( + (round_duration.as_millis() as f32 * rounds_elapsed) as u64, + ); + + val.inner.write().peers.reshuffle(); + let mut message_allowed = val.message_allowed(); move || { @@ -2378,153 +2337,127 @@ mod tests { sum / n } - // on the first attempt we will only gossip to `sqrt(authorities)`, - // which should average out to 5 peers after a couple of trials - assert_eq!(trial(test(1, &authorities)), 5); + let all_peers = authorities.iter().chain(full_nodes.iter()).cloned().collect(); - // on the second (and subsequent attempts) we should gossip to all - // authorities we're connected to. - assert_eq!(trial(test(2, &authorities)), 30); - assert_eq!(trial(test(3, &authorities)), 30); + // on the first attempt we will only gossip to 4 peers, either + // authorities or full nodes, but we'll guarantee that half of those + // are authorities + assert!(trial(test(1.0, &authorities)) >= LUCKY_PEERS / 2); + assert_eq!(trial(test(1.0, &all_peers)), LUCKY_PEERS); - // we should only gossip to non-authorities after the third attempt - assert_eq!(trial(test(1, &full_nodes)), 0); - assert_eq!(trial(test(2, &full_nodes)), 0); - - // and only to `sqrt(non-authorities)` - assert_eq!(trial(test(3, &full_nodes)), 5); + // after more than 1.5 round durations have elapsed we should gossip to + // `sqrt(peers)` we're connected to, but we guarantee that at least 4 of + // those peers are authorities (plus the `LUCKY_PEERS` from the previous + // stage) + assert!(trial(test(PROPAGATION_SOME * 1.1, &authorities)) >= LUCKY_PEERS); + assert_eq!( + trial(test(2.0, &all_peers)), + LUCKY_PEERS + (all_peers.len() as f64).sqrt() as usize, + ); - // only on the fourth attempt should we gossip to all non-authorities - assert_eq!(trial(test(4, &full_nodes)), 30); + // after 3 rounds durations we should gossip to all peers we are + // connected to + assert_eq!(trial(test(PROPAGATION_ALL * 1.1, &all_peers)), all_peers.len()); } #[test] - fn only_restricts_gossip_to_authorities_after_a_minimum_threshold() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); + fn never_gossips_round_messages_to_light_clients() { + let config = config(); + let round_duration = config.gossip_duration * ROUND_DURATION; + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); - // the validator start at set id 0 + // the validator starts at set id 0 val.note_set(SetId(0), Vec::new(), |_, _| {}); - let mut authorities = Vec::new(); - for _ in 0..5 { - let peer_id = PeerId::random(); - val.inner.write().peers.new_peer(peer_id.clone(), ObservedRole::Authority); - authorities.push(peer_id); - } + // add a new light client as peer + let light_peer = PeerId::random(); - let mut message_allowed = val.message_allowed(); + val.inner.write().peers.new_peer(light_peer.clone(), ObservedRole::Light); - // since we're only connected to 5 authorities, we should never restrict - // sending of gossip messages, and instead just allow them to all - // non-authorities on the first attempt. - for authority in &authorities { - assert!( - message_allowed( - authority, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) - ); - } - } + assert!(!val.message_allowed()( + &light_peer, + MessageIntent::Broadcast, + &crate::communication::round_topic::(1, 0), + &[], + )); - #[test] - fn non_authorities_never_gossip_messages_on_first_round_duration() { - let mut config = config(); - config.gossip_duration = Duration::from_secs(300); // Set to high value to prevent test race - config.is_authority = false; - let round_duration = config.gossip_duration * ROUND_DURATION; + // we reverse the round start time so that the elapsed time is higher + // (which should lead to more peers getting the message) + val.inner.write().local_view.as_mut().unwrap().round_start = + Instant::now() - round_duration * 10; - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - ); + // even after the round has been going for 10 round durations we will never + // gossip to light clients + assert!(!val.message_allowed()( + &light_peer, + MessageIntent::Broadcast, + &crate::communication::round_topic::(1, 0), + &[], + )); - // the validator start at set id 0 - val.note_set(SetId(0), Vec::new(), |_, _| {}); + // update the peer state and local state wrt commits + val.inner + .write() + .peers + .update_peer_state( + &light_peer, + NeighborPacket { round: Round(1), set_id: SetId(0), commit_finalized_height: 1 }, + ) + .unwrap(); - let mut authorities = Vec::new(); - for _ in 0..100 { - let peer_id = PeerId::random(); - val.inner.write().peers.new_peer(peer_id.clone(), ObservedRole::Authority); - authorities.push(peer_id); - } + val.note_commit_finalized(Round(1), SetId(0), 2, |_, _| {}); - { - let mut message_allowed = val.message_allowed(); - // since our node is not an authority we should **never** gossip any - // messages on the first attempt. - for authority in &authorities { - assert!( - !message_allowed( - authority, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) - ); - } - } + let commit = { + let commit = finality_grandpa::CompactCommit { + target_hash: H256::random(), + target_number: 2, + precommits: Vec::new(), + auth_data: Vec::new(), + }; - { - val.inner.write().local_view.as_mut().unwrap().round_start = - Instant::now() - round_duration * 4; - let mut message_allowed = val.message_allowed(); - // on the fourth round duration we should allow messages to authorities - // (on the second we would do `sqrt(authorities)`) - for authority in &authorities { - assert!( - message_allowed( - authority, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) - ); - } - } + crate::communication::gossip::GossipMessage::::Commit( + crate::communication::gossip::FullCommitMessage { + round: Round(2), + set_id: SetId(0), + message: commit, + }, + ) + .encode() + }; + + // global messages are gossiped to light clients though + assert!(val.message_allowed()( + &light_peer, + MessageIntent::Broadcast, + &crate::communication::global_topic::(0), + &commit, + )); } #[test] fn only_gossip_commits_to_peers_on_same_set() { - let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); - // the validator start at set id 1 + // the validator starts at set id 1 val.note_set(SetId(1), Vec::new(), |_, _| {}); // add a new peer at set id 1 let peer1 = PeerId::random(); - val.inner - .write() - .peers - .new_peer(peer1.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer1.clone(), ObservedRole::Authority); val.inner .write() .peers .update_peer_state( &peer1, - NeighborPacket { - round: Round(1), - set_id: SetId(1), - commit_finalized_height: 1, - }, + NeighborPacket { round: Round(1), set_id: SetId(1), commit_finalized_height: 1 }, ) .unwrap(); // peer2 will default to set id 0 let peer2 = PeerId::random(); - val.inner - .write() - .peers - .new_peer(peer2.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer2.clone(), ObservedRole::Authority); // create a commit for round 1 of set id 1 // targeting a block at height 2 @@ -2571,7 +2504,7 @@ mod tests { #[test] fn expire_commits_from_older_rounds() { - let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); let commit = |round, set_id, target_number| { let commit = finality_grandpa::CompactCommit { @@ -2602,27 +2535,20 @@ mod tests { // a commit message for round 1 that finalizes the same height as we // have observed previously should not be expired - assert!(!message_expired( - crate::communication::global_topic::(1), - &commit(1, 1, 2), - )); + assert!( + !message_expired(crate::communication::global_topic::(1), &commit(1, 1, 2),) + ); // it should be expired if it is for a lower block - assert!(message_expired( - crate::communication::global_topic::(1), - &commit(1, 1, 1), - )); + assert!(message_expired(crate::communication::global_topic::(1), &commit(1, 1, 1))); // or the same block height but from the previous round - assert!(message_expired( - crate::communication::global_topic::(1), - &commit(0, 1, 2), - )); + assert!(message_expired(crate::communication::global_topic::(1), &commit(0, 1, 2))); } #[test] fn allow_noting_different_authorities_for_same_set() { - let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); let a1 = vec![AuthorityId::from_slice(&[0; 32])]; val.note_set(SetId(1), a1.clone(), |_, _| {}); diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 3daffcb9f2522..c370e1d642d7d 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -29,38 +29,37 @@ //! In the future, there will be a fallback for allowing sending the same message //! under certain conditions that are used to un-stick the protocol. -use futures::{prelude::*, channel::mpsc}; +use futures::{channel::mpsc, prelude::*}; use log::{debug, trace}; use parking_lot::Mutex; use prometheus_endpoint::Registry; -use std::{pin::Pin, sync::Arc, task::{Context, Poll}}; +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; -use sp_keystore::SyncCryptoStorePtr; -use finality_grandpa::Message::{Prevote, Precommit, PrimaryPropose}; -use finality_grandpa::{voter, voter_set::VoterSet}; +use finality_grandpa::{ + voter, + voter_set::VoterSet, + Message::{Precommit, Prevote, PrimaryPropose}, +}; +use parity_scale_codec::{Decode, Encode}; use sc_network::{NetworkService, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; -use parity_scale_codec::{Encode, Decode}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sp_keystore::SyncCryptoStorePtr; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; -use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO}; use crate::{ - CatchUp, Commit, CommunicationIn, CommunicationOutH, - CompactCommit, Error, Message, SignedMessage, + environment::HasVoted, CatchUp, Commit, CommunicationIn, CommunicationOutH, CompactCommit, + Error, Message, SignedMessage, }; -use crate::environment::HasVoted; use gossip::{ - FullCatchUpMessage, - FullCommitMessage, - GossipMessage, - GossipValidator, - PeerReport, - VoteMessage, -}; -use sp_finality_grandpa::{ - AuthorityId, AuthoritySignature, SetId as SetIdNumber, RoundNumber, + FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; -use sp_utils::mpsc::TracingUnboundedReceiver; +use sc_utils::mpsc::TracingUnboundedReceiver; +use sp_finality_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; pub mod gossip; mod periodic; @@ -68,7 +67,8 @@ mod periodic; #[cfg(test)] pub(crate) mod tests; -pub use sp_finality_grandpa::GRANDPA_ENGINE_ID; +/// Name of the notifications protocol used by Grandpa. Must be registered towards the networking +/// in order for Grandpa to properly function. pub const GRANDPA_PROTOCOL_NAME: &'static str = "/paritytech/grandpa/1"; // cost scalars for reporting peers. @@ -88,11 +88,13 @@ mod cost { pub(super) const INVALID_CATCH_UP: Rep = Rep::new(-5000, "Grandpa: Invalid catch-up"); pub(super) const INVALID_COMMIT: Rep = Rep::new(-5000, "Grandpa: Invalid commit"); pub(super) const OUT_OF_SCOPE_MESSAGE: Rep = Rep::new(-500, "Grandpa: Out-of-scope message"); - pub(super) const CATCH_UP_REQUEST_TIMEOUT: Rep = Rep::new(-200, "Grandpa: Catch-up request timeout"); + pub(super) const CATCH_UP_REQUEST_TIMEOUT: Rep = + Rep::new(-200, "Grandpa: Catch-up request timeout"); // cost of answering a catch up request pub(super) const CATCH_UP_REPLY: Rep = Rep::new(-200, "Grandpa: Catch-up reply"); - pub(super) const HONEST_OUT_OF_SCOPE_CATCH_UP: Rep = Rep::new(-200, "Grandpa: Out-of-scope catch-up"); + pub(super) const HONEST_OUT_OF_SCOPE_CATCH_UP: Rep = + Rep::new(-200, "Grandpa: Out-of-scope catch-up"); } // benefit scalars for reporting peers. @@ -116,7 +118,7 @@ impl LocalIdKeystore { } /// Returns a reference to the keystore. - fn keystore(&self) -> SyncCryptoStorePtr{ + fn keystore(&self) -> SyncCryptoStorePtr { (self.0).1.clone() } } @@ -143,14 +145,25 @@ pub trait Network: GossipNetwork + Clone + Send + 'static /// If the given vector of peers is empty then the underlying implementation /// should make a best effort to fetch the block from any peers it is /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor); + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ); } -impl Network for Arc> where +impl Network for Arc> +where B: BlockT, H: sc_network::ExHashT, { - fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + fn set_sync_fork_request( + &self, + peers: Vec, + hash: B::Hash, + number: NumberFor, + ) { NetworkService::set_sync_fork_request(self, peers, hash, number) } } @@ -178,19 +191,19 @@ pub(crate) struct NetworkBridge> { neighbor_sender: periodic::NeighborPacketSender, /// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`. - // - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children, - // thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`. + // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // children, thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`. neighbor_packet_worker: Arc>>, /// Receiver side of the peer report stream populated by the gossip validator, forwarded to the /// gossip engine. - // - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children, - // thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given that it is - // just an `UnboundedReceiver`, one could also switch to a multi-producer-*multi*-consumer - // channel implementation. + // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // children, thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given + // that it is just an `UnboundedReceiver`, one could also switch to a + // multi-producer-*multi*-consumer channel implementation. gossip_validator_report_stream: Arc>>, + + telemetry: Option, } impl> Unpin for NetworkBridge {} @@ -205,19 +218,17 @@ impl> NetworkBridge { config: crate::Config, set_state: crate::environment::SharedVoterSetState, prometheus_registry: Option<&Registry>, + telemetry: Option, ) -> Self { - let (validator, report_stream) = GossipValidator::new( - config, - set_state.clone(), - prometheus_registry, - ); + let (validator, report_stream) = + GossipValidator::new(config, set_state.clone(), prometheus_registry, telemetry.clone()); let validator = Arc::new(validator); let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( service.clone(), - GRANDPA_ENGINE_ID, GRANDPA_PROTOCOL_NAME, - validator.clone() + validator.clone(), + prometheus_registry, ))); { @@ -234,18 +245,13 @@ impl> NetworkBridge { validator.note_round(Round(round.number), |_, _| {}); for signed in round.votes.iter() { - let message = gossip::GossipMessage::Vote( - gossip::VoteMessage:: { - message: signed.clone(), - round: Round(round.number), - set_id: SetId(set_id), - } - ); + let message = gossip::GossipMessage::Vote(gossip::VoteMessage:: { + message: signed.clone(), + round: Round(round.number), + set_id: SetId(set_id), + }); - gossip_engine.lock().register_gossip_message( - topic, - message.encode(), - ); + gossip_engine.lock().register_gossip_message(topic, message.encode()); } trace!(target: "afg", @@ -258,7 +264,8 @@ impl> NetworkBridge { } } - let (neighbor_packet_worker, neighbor_packet_sender) = periodic::NeighborPacketWorker::new(); + let (neighbor_packet_worker, neighbor_packet_sender) = + periodic::NeighborPacketWorker::new(); NetworkBridge { service, @@ -267,16 +274,12 @@ impl> NetworkBridge { neighbor_sender: neighbor_packet_sender, neighbor_packet_worker: Arc::new(Mutex::new(neighbor_packet_worker)), gossip_validator_report_stream: Arc::new(Mutex::new(report_stream)), + telemetry, } } /// Note the beginning of a new round to the `GossipValidator`. - pub(crate) fn note_round( - &self, - round: Round, - set_id: SetId, - voters: &VoterSet, - ) { + pub(crate) fn note_round(&self, round: Round, set_id: SetId, voters: &VoterSet) { // is a no-op if currently in that set. self.validator.note_set( set_id, @@ -284,14 +287,12 @@ impl> NetworkBridge { |to, neighbor| self.neighbor_sender.send(to, neighbor), ); - self.validator.note_round( - round, - |to, neighbor| self.neighbor_sender.send(to, neighbor), - ); + self.validator + .note_round(round, |to, neighbor| self.neighbor_sender.send(to, neighbor)); } - /// Get a stream of signature-checked round messages from the network as well as a sink for round messages to the - /// network all within the current set. + /// Get a stream of signature-checked round messages from the network as well as a sink for + /// round messages to the network all within the current set. pub(crate) fn round_communication( &self, keystore: Option, @@ -299,15 +300,8 @@ impl> NetworkBridge { set_id: SetId, voters: Arc>, has_voted: HasVoted, - ) -> ( - impl Stream> + Unpin, - OutgoingMessages, - ) { - self.note_round( - round, - set_id, - &*voters, - ); + ) -> (impl Stream> + Unpin, OutgoingMessages) { + self.note_round(round, set_id, &*voters); let keystore = keystore.and_then(|ks| { let id = ks.local_id(); @@ -319,40 +313,50 @@ impl> NetworkBridge { }); let topic = round_topic::(round.0, set_id.0); - let incoming = self.gossip_engine.lock().messages_for(topic) - .filter_map(move |notification| { + let telemetry = self.telemetry.clone(); + let incoming = + self.gossip_engine.lock().messages_for(topic).filter_map(move |notification| { let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); match decoded { Err(ref e) => { debug!(target: "afg", "Skipping malformed message {:?}: {}", notification, e); future::ready(None) - } + }, Ok(GossipMessage::Vote(msg)) => { // check signature. if !voters.contains(&msg.message.id) { debug!(target: "afg", "Skipping message from unknown voter {}", msg.message.id); - return future::ready(None); + return future::ready(None) } if voters.len().get() <= TELEMETRY_VOTERS_LIMIT { match &msg.message.message { PrimaryPropose(propose) => { - telemetry!(CONSENSUS_INFO; "afg.received_propose"; + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.received_propose"; "voter" => ?format!("{}", msg.message.id), "target_number" => ?propose.target_number, "target_hash" => ?propose.target_hash, ); }, Prevote(prevote) => { - telemetry!(CONSENSUS_INFO; "afg.received_prevote"; + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.received_prevote"; "voter" => ?format!("{}", msg.message.id), "target_number" => ?prevote.target_number, "target_hash" => ?prevote.target_hash, ); }, Precommit(precommit) => { - telemetry!(CONSENSUS_INFO; "afg.received_precommit"; + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.received_precommit"; "voter" => ?format!("{}", msg.message.id), "target_number" => ?precommit.target_number, "target_hash" => ?precommit.target_hash, @@ -362,11 +366,11 @@ impl> NetworkBridge { } future::ready(Some(msg.message)) - } + }, _ => { debug!(target: "afg", "Skipping unknown message type"); future::ready(None) - } + }, } }); @@ -378,6 +382,7 @@ impl> NetworkBridge { network: self.gossip_engine.clone(), sender: tx, has_voted, + telemetry: self.telemetry.clone(), }; // Combine incoming votes from external GRANDPA nodes with outgoing @@ -411,6 +416,7 @@ impl> NetworkBridge { voters, self.validator.clone(), self.neighbor_sender.clone(), + self.telemetry.clone(), ); let outgoing = CommitsOut::::new( @@ -419,6 +425,7 @@ impl> NetworkBridge { is_voter, self.validator.clone(), self.neighbor_sender.clone(), + self.telemetry.clone(), ); let outgoing = outgoing.with(|out| { @@ -439,7 +446,7 @@ impl> NetworkBridge { &self, peers: Vec, hash: B::Hash, - number: NumberFor + number: NumberFor, ) { Network::set_sync_fork_request(&self.service, peers, hash, number) } @@ -454,9 +461,10 @@ impl> Future for NetworkBridge { Poll::Ready(Some((to, packet))) => { self.gossip_engine.lock().send_message(to, packet.encode()); }, - Poll::Ready(None) => return Poll::Ready( - Err(Error::Network("Neighbor packet worker stream closed.".into())) - ), + Poll::Ready(None) => + return Poll::Ready(Err(Error::Network( + "Neighbor packet worker stream closed.".into(), + ))), Poll::Pending => break, } } @@ -466,17 +474,17 @@ impl> Future for NetworkBridge { Poll::Ready(Some(PeerReport { who, cost_benefit })) => { self.gossip_engine.lock().report(who, cost_benefit); }, - Poll::Ready(None) => return Poll::Ready( - Err(Error::Network("Gossip validator report stream closed.".into())) - ), + Poll::Ready(None) => + return Poll::Ready(Err(Error::Network( + "Gossip validator report stream closed.".into(), + ))), Poll::Pending => break, } } match self.gossip_engine.lock().poll_unpin(cx) { - Poll::Ready(()) => return Poll::Ready( - Err(Error::Network("Gossip engine future finished.".into())) - ), + Poll::Ready(()) => + return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into()))), Poll::Pending => {}, } @@ -490,94 +498,93 @@ fn incoming_global( voters: Arc>, gossip_validator: Arc>, neighbor_sender: periodic::NeighborPacketSender, + telemetry: Option, ) -> impl Stream> { - let process_commit = move | - msg: FullCommitMessage, - mut notification: sc_network_gossip::TopicNotification, - gossip_engine: &Arc>>, - gossip_validator: &Arc>, - voters: &VoterSet, - | { - if voters.len().get() <= TELEMETRY_VOTERS_LIMIT { - let precommits_signed_by: Vec = - msg.message.auth_data.iter().map(move |(_, a)| { - format!("{}", a) - }).collect(); - - telemetry!(CONSENSUS_INFO; "afg.received_commit"; - "contains_precommits_signed_by" => ?precommits_signed_by, - "target_number" => ?msg.message.target_number.clone(), - "target_hash" => ?msg.message.target_hash.clone(), - ); - } - - if let Err(cost) = check_compact_commit::( - &msg.message, - voters, - msg.round, - msg.set_id, - ) { - if let Some(who) = notification.sender { - gossip_engine.lock().report(who, cost); - } - - return None; - } - - let round = msg.round; - let set_id = msg.set_id; - let commit = msg.message; - let finalized_number = commit.target_number; - let gossip_validator = gossip_validator.clone(); - let gossip_engine = gossip_engine.clone(); - let neighbor_sender = neighbor_sender.clone(); - let cb = move |outcome| match outcome { - voter::CommitProcessingOutcome::Good(_) => { - // if it checks out, gossip it. not accounting for - // any discrepancy between the actual ghost and the claimed - // finalized number. - gossip_validator.note_commit_finalized( - round, - set_id, - finalized_number, - |to, neighbor| neighbor_sender.send(to, neighbor), + let process_commit = { + let telemetry = telemetry.clone(); + move |msg: FullCommitMessage, + mut notification: sc_network_gossip::TopicNotification, + gossip_engine: &Arc>>, + gossip_validator: &Arc>, + voters: &VoterSet| { + if voters.len().get() <= TELEMETRY_VOTERS_LIMIT { + let precommits_signed_by: Vec = + msg.message.auth_data.iter().map(move |(_, a)| format!("{}", a)).collect(); + + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.received_commit"; + "contains_precommits_signed_by" => ?precommits_signed_by, + "target_number" => ?msg.message.target_number.clone(), + "target_hash" => ?msg.message.target_hash.clone(), ); - - gossip_engine.lock().gossip_message(topic, notification.message.clone(), false); } - voter::CommitProcessingOutcome::Bad(_) => { - // report peer and do not gossip. - if let Some(who) = notification.sender.take() { - gossip_engine.lock().report(who, cost::INVALID_COMMIT); + + if let Err(cost) = check_compact_commit::( + &msg.message, + voters, + msg.round, + msg.set_id, + telemetry.as_ref(), + ) { + if let Some(who) = notification.sender { + gossip_engine.lock().report(who, cost); } + + return None } - }; - let cb = voter::Callback::Work(Box::new(cb)); + let round = msg.round; + let set_id = msg.set_id; + let commit = msg.message; + let finalized_number = commit.target_number; + let gossip_validator = gossip_validator.clone(); + let gossip_engine = gossip_engine.clone(); + let neighbor_sender = neighbor_sender.clone(); + let cb = move |outcome| match outcome { + voter::CommitProcessingOutcome::Good(_) => { + // if it checks out, gossip it. not accounting for + // any discrepancy between the actual ghost and the claimed + // finalized number. + gossip_validator.note_commit_finalized( + round, + set_id, + finalized_number, + |to, neighbor| neighbor_sender.send(to, neighbor), + ); - Some(voter::CommunicationIn::Commit(round.0, commit, cb)) + gossip_engine.lock().gossip_message(topic, notification.message.clone(), false); + }, + voter::CommitProcessingOutcome::Bad(_) => { + // report peer and do not gossip. + if let Some(who) = notification.sender.take() { + gossip_engine.lock().report(who, cost::INVALID_COMMIT); + } + }, + }; + + let cb = voter::Callback::Work(Box::new(cb)); + + Some(voter::CommunicationIn::Commit(round.0, commit, cb)) + } }; - let process_catch_up = move | - msg: FullCatchUpMessage, - mut notification: sc_network_gossip::TopicNotification, - gossip_engine: &Arc>>, - gossip_validator: &Arc>, - voters: &VoterSet, - | { + let process_catch_up = move |msg: FullCatchUpMessage, + mut notification: sc_network_gossip::TopicNotification, + gossip_engine: &Arc>>, + gossip_validator: &Arc>, + voters: &VoterSet| { let gossip_validator = gossip_validator.clone(); let gossip_engine = gossip_engine.clone(); - if let Err(cost) = check_catch_up::( - &msg.message, - voters, - msg.set_id, - ) { + if let Err(cost) = check_catch_up::(&msg.message, voters, msg.set_id, telemetry.clone()) + { if let Some(who) = notification.sender { gossip_engine.lock().report(who, cost); } - return None; + return None } let cb = move |outcome| { @@ -596,7 +603,10 @@ fn incoming_global( Some(voter::CommunicationIn::CatchUp(msg.message, cb)) }; - gossip_engine.clone().lock().messages_for(topic) + gossip_engine + .clone() + .lock() + .messages_for(topic) .filter_map(|notification| { // this could be optimized by decoding piecewise. let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); @@ -614,7 +624,7 @@ fn incoming_global( _ => { debug!(target: "afg", "Skipping unknown message type"); None - } + }, }) }) } @@ -628,6 +638,7 @@ impl> Clone for NetworkBridge { neighbor_sender: self.neighbor_sender.clone(), neighbor_packet_worker: self.neighbor_packet_worker.clone(), gossip_validator_report_stream: self.gossip_validator_report_stream.clone(), + telemetry: self.telemetry.clone(), } } } @@ -654,36 +665,40 @@ pub(crate) struct OutgoingMessages { sender: mpsc::Sender>, network: Arc>>, has_voted: HasVoted, + telemetry: Option, } impl Unpin for OutgoingMessages {} -impl Sink> for OutgoingMessages -{ +impl Sink> for OutgoingMessages { type Error = Error; fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Sink::poll_ready(Pin::new(&mut self.sender), cx) - .map(|elem| { elem.map_err(|e| { + Sink::poll_ready(Pin::new(&mut self.sender), cx).map(|elem| { + elem.map_err(|e| { Error::Network(format!("Failed to poll_ready channel sender: {:?}", e)) - })}) + }) + }) } fn start_send(mut self: Pin<&mut Self>, mut msg: Message) -> Result<(), Self::Error> { // if we've voted on this round previously under the same key, send that vote instead match &mut msg { - finality_grandpa::Message::PrimaryPropose(ref mut vote) => + finality_grandpa::Message::PrimaryPropose(ref mut vote) => { if let Some(propose) = self.has_voted.propose() { *vote = propose.clone(); - }, - finality_grandpa::Message::Prevote(ref mut vote) => + } + }, + finality_grandpa::Message::Prevote(ref mut vote) => { if let Some(prevote) = self.has_voted.prevote() { *vote = prevote.clone(); - }, - finality_grandpa::Message::Precommit(ref mut vote) => + } + }, + finality_grandpa::Message::Precommit(ref mut vote) => { if let Some(precommit) = self.has_voted.precommit() { *vote = precommit.clone(); - }, + } + }, } // when locals exist, sign messages on import @@ -695,11 +710,13 @@ impl Sink> for OutgoingMessages keystore.local_id().clone(), self.round, self.set_id, - ).ok_or_else( - || Error::Signing(format!( - "Failed to sign GRANDPA vote for round {} targetting {:?}", self.round, target_hash + ) + .ok_or_else(|| { + Error::Signing(format!( + "Failed to sign GRANDPA vote for round {} targetting {:?}", + self.round, target_hash )) - )?; + })?; let message = GossipMessage::Vote(VoteMessage:: { message: signed.clone(), @@ -716,12 +733,14 @@ impl Sink> for OutgoingMessages ); telemetry!( - CONSENSUS_DEBUG; "afg.announcing_blocks_to_voted_peers"; + self.telemetry; + CONSENSUS_DEBUG; + "afg.announcing_blocks_to_voted_peers"; "block" => ?target_hash, "round" => ?self.round, "set_id" => ?self.set_id, ); // announce the block we voted on to our peers. - self.network.lock().announce(target_hash, Vec::new()); + self.network.lock().announce(target_hash, None); // propagate the message to peers let topic = round_topic::(self.round, self.set_id); @@ -730,7 +749,7 @@ impl Sink> for OutgoingMessages // forward the message to the inner sender. return self.sender.start_send(signed).map_err(|e| { Error::Network(format!("Failed to start_send on channel sender: {:?}", e)) - }); + }) }; Ok(()) @@ -741,10 +760,11 @@ impl Sink> for OutgoingMessages } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Sink::poll_close(Pin::new(&mut self.sender), cx) - .map(|elem| { elem.map_err(|e| { + Sink::poll_close(Pin::new(&mut self.sender), cx).map(|elem| { + elem.map_err(|e| { Error::Network(format!("Failed to poll_close channel sender: {:?}", e)) - })}) + }) + }) } } @@ -755,6 +775,7 @@ fn check_compact_commit( voters: &VoterSet, round: Round, set_id: SetId, + telemetry: Option<&TelemetryHandle>, ) -> Result<(), ReputationChange> { // 4f + 1 = equivocations from f voters. let f = voters.total_weight() - voters.threshold(); @@ -766,23 +787,22 @@ fn check_compact_commit( if let Some(weight) = voters.get(id).map(|info| info.weight()) { total_weight += weight.get(); if total_weight > full_threshold { - return Err(cost::MALFORMED_COMMIT); + return Err(cost::MALFORMED_COMMIT) } } else { debug!(target: "afg", "Skipping commit containing unknown voter {}", id); - return Err(cost::MALFORMED_COMMIT); + return Err(cost::MALFORMED_COMMIT) } } if total_weight < voters.threshold().get() { - return Err(cost::MALFORMED_COMMIT); + return Err(cost::MALFORMED_COMMIT) } // check signatures on all contained precommits. let mut buf = Vec::new(); - for (i, (precommit, &(ref sig, ref id))) in msg.precommits.iter() - .zip(&msg.auth_data) - .enumerate() + for (i, (precommit, &(ref sig, ref id))) in + msg.precommits.iter().zip(&msg.auth_data).enumerate() { use crate::communication::gossip::Misbehavior; use finality_grandpa::Message as GrandpaMessage; @@ -796,14 +816,20 @@ fn check_compact_commit( &mut buf, ) { debug!(target: "afg", "Bad commit message signature {}", id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_commit_msg_signature"; "id" => ?id); + telemetry!( + telemetry; + CONSENSUS_DEBUG; + "afg.bad_commit_msg_signature"; + "id" => ?id, + ); let cost = Misbehavior::BadCommitMessage { signatures_checked: i as i32, blocks_loaded: 0, equivocations_caught: 0, - }.cost(); + } + .cost(); - return Err(cost); + return Err(cost) } } @@ -816,6 +842,7 @@ fn check_catch_up( msg: &CatchUp, voters: &VoterSet, set_id: SetId, + telemetry: Option, ) -> Result<(), ReputationChange> { // 4f + 1 = equivocations from f voters. let f = voters.total_weight() - voters.threshold(); @@ -824,7 +851,7 @@ fn check_catch_up( // check total weight is not out of range for a set of votes. fn check_weight<'a>( voters: &'a VoterSet, - votes: impl Iterator, + votes: impl Iterator, full_threshold: u64, ) -> Result<(), ReputationChange> { let mut total_weight = 0; @@ -833,32 +860,24 @@ fn check_catch_up( if let Some(weight) = voters.get(&id).map(|info| info.weight()) { total_weight += weight.get(); if total_weight > full_threshold { - return Err(cost::MALFORMED_CATCH_UP); + return Err(cost::MALFORMED_CATCH_UP) } } else { debug!(target: "afg", "Skipping catch up message containing unknown voter {}", id); - return Err(cost::MALFORMED_CATCH_UP); + return Err(cost::MALFORMED_CATCH_UP) } } if total_weight < voters.threshold().get() { - return Err(cost::MALFORMED_CATCH_UP); + return Err(cost::MALFORMED_CATCH_UP) } Ok(()) - }; + } - check_weight( - voters, - msg.prevotes.iter().map(|vote| &vote.id), - full_threshold, - )?; + check_weight(voters, msg.prevotes.iter().map(|vote| &vote.id), full_threshold)?; - check_weight( - voters, - msg.precommits.iter().map(|vote| &vote.id), - full_threshold, - )?; + check_weight(voters, msg.precommits.iter().map(|vote| &vote.id), full_threshold)?; fn check_signatures<'a, B, I>( messages: I, @@ -866,9 +885,11 @@ fn check_catch_up( set_id: SetIdNumber, mut signatures_checked: usize, buf: &mut Vec, - ) -> Result where + telemetry: Option, + ) -> Result + where B: BlockT, - I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, + I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, { use crate::communication::gossip::Misbehavior; @@ -876,21 +897,22 @@ fn check_catch_up( signatures_checked += 1; if !sp_finality_grandpa::check_message_signature_with_buffer( - &msg, - id, - sig, - round, - set_id, - buf, + &msg, id, sig, round, set_id, buf, ) { debug!(target: "afg", "Bad catch up message signature {}", id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_catch_up_msg_signature"; "id" => ?id); + telemetry!( + telemetry; + CONSENSUS_DEBUG; + "afg.bad_catch_up_msg_signature"; + "id" => ?id, + ); let cost = Misbehavior::BadCatchUpMessage { signatures_checked: signatures_checked as i32, - }.cost(); + } + .cost(); - return Err(cost); + return Err(cost) } } @@ -908,17 +930,23 @@ fn check_catch_up( set_id.0, 0, &mut buf, + telemetry.clone(), )?; // check signatures on all contained precommits. let _ = check_signatures::( msg.precommits.iter().map(|vote| { - (finality_grandpa::Message::Precommit(vote.precommit.clone()), &vote.id, &vote.signature) + ( + finality_grandpa::Message::Precommit(vote.precommit.clone()), + &vote.id, + &vote.signature, + ) }), msg.round_number, set_id.0, signatures_checked, &mut buf, + telemetry, )?; Ok(()) @@ -931,6 +959,7 @@ struct CommitsOut { is_voter: bool, gossip_validator: Arc>, neighbor_sender: periodic::NeighborPacketSender, + telemetry: Option, } impl CommitsOut { @@ -941,6 +970,7 @@ impl CommitsOut { is_voter: bool, gossip_validator: Arc>, neighbor_sender: periodic::NeighborPacketSender, + telemetry: Option, ) -> Self { CommitsOut { network, @@ -948,6 +978,7 @@ impl CommitsOut { is_voter, gossip_validator, neighbor_sender, + telemetry, } } } @@ -959,18 +990,27 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut { Poll::Ready(Ok(())) } - fn start_send(self: Pin<&mut Self>, input: (RoundNumber, Commit)) -> Result<(), Self::Error> { + fn start_send( + self: Pin<&mut Self>, + input: (RoundNumber, Commit), + ) -> Result<(), Self::Error> { if !self.is_voter { - return Ok(()); + return Ok(()) } let (round, commit) = input; let round = Round(round); - telemetry!(CONSENSUS_DEBUG; "afg.commit_issued"; - "target_number" => ?commit.target_number, "target_hash" => ?commit.target_hash, + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.commit_issued"; + "target_number" => ?commit.target_number, + "target_hash" => ?commit.target_hash, ); - let (precommits, auth_data) = commit.precommits.into_iter() + let (precommits, auth_data) = commit + .precommits + .into_iter() .map(|signed| (signed.precommit, (signed.signature, signed.id))) .unzip(); @@ -978,7 +1018,7 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut { target_hash: commit.target_hash, target_number: commit.target_number, precommits, - auth_data + auth_data, }; let message = GossipMessage::Commit(FullCommitMessage:: { diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index dadd7deb57fca..77e55ad652f6c 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -1,30 +1,36 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Periodic rebroadcast of neighbor packets. +use futures::{future::FutureExt as _, prelude::*, ready, stream::Stream}; use futures_timer::Delay; -use futures::{future::{FutureExt as _}, prelude::*, ready, stream::Stream}; use log::debug; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; - +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use super::gossip::{GossipMessage, NeighborPacket}; use sc_network::PeerId; -use sp_runtime::traits::{NumberFor, Block as BlockT}; -use super::gossip::{NeighborPacket, GossipMessage}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; // How often to rebroadcast, in cases where no new packets are created. const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); @@ -32,7 +38,7 @@ const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); /// A sender used to send neighbor packets to a background job. #[derive(Clone)] pub(super) struct NeighborPacketSender( - TracingUnboundedSender<(Vec, NeighborPacket>)> + TracingUnboundedSender<(Vec, NeighborPacket>)>, ); impl NeighborPacketSender { @@ -61,24 +67,20 @@ pub(super) struct NeighborPacketWorker { impl Unpin for NeighborPacketWorker {} impl NeighborPacketWorker { - pub(super) fn new() -> (Self, NeighborPacketSender){ - let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)> - ("mpsc_grandpa_neighbor_packet_worker"); + pub(super) fn new() -> (Self, NeighborPacketSender) { + let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)>( + "mpsc_grandpa_neighbor_packet_worker", + ); let delay = Delay::new(REBROADCAST_AFTER); - (NeighborPacketWorker { - last: None, - delay, - rx, - }, NeighborPacketSender(tx)) + (NeighborPacketWorker { last: None, delay, rx }, NeighborPacketSender(tx)) } } -impl Stream for NeighborPacketWorker { +impl Stream for NeighborPacketWorker { type Item = (Vec, GossipMessage); - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> - { + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let this = &mut *self; match this.rx.poll_next_unpin(cx) { Poll::Ready(None) => return Poll::Ready(None), @@ -86,8 +88,8 @@ impl Stream for NeighborPacketWorker { this.delay.reset(REBROADCAST_AFTER); this.last = Some((to.clone(), packet.clone())); - return Poll::Ready(Some((to, GossipMessage::::from(packet)))); - } + return Poll::Ready(Some((to, GossipMessage::::from(packet)))) + }, // Don't return yet, maybe the timer fired. Poll::Pending => {}, }; @@ -102,10 +104,10 @@ impl Stream for NeighborPacketWorker { // // Note: In case poll_unpin is called after the resetted delay fires again, this // will drop one tick. Deemed as very unlikely and also not critical. - while let Poll::Ready(()) = this.delay.poll_unpin(cx) {}; + while let Poll::Ready(()) = this.delay.poll_unpin(cx) {} if let Some((ref to, ref packet)) = this.last { - return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))); + return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))) } Poll::Pending diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 1a773acd6d0fb..1fac0230b2a84 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -1,35 +1,43 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Tests for the communication portion of the GRANDPA crate. -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use super::{ + gossip::{self, GossipValidator}, + Round, SetId, VoterSet, +}; +use crate::{communication::GRANDPA_PROTOCOL_NAME, environment::SharedVoterSetState}; use futures::prelude::*; -use sc_network::{Event as NetworkEvent, ObservedRole, PeerId}; -use sc_network_test::{Block, Hash}; +use parity_scale_codec::Encode; +use sc_network::{config::Role, Event as NetworkEvent, ObservedRole, PeerId}; use sc_network_gossip::Validator; -use std::sync::Arc; +use sc_network_test::{Block, Hash}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_finality_grandpa::AuthorityList; use sp_keyring::Ed25519Keyring; -use parity_scale_codec::Encode; -use sp_runtime::{ConsensusEngineId, traits::NumberFor}; -use std::{borrow::Cow, pin::Pin, task::{Context, Poll}}; -use crate::environment::SharedVoterSetState; -use sp_finality_grandpa::{AuthorityList, GRANDPA_ENGINE_ID}; -use super::gossip::{self, GossipValidator}; -use super::{VoterSet, Round, SetId}; +use sp_runtime::traits::NumberFor; +use std::{ + borrow::Cow, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; #[derive(Debug)] pub(crate) enum Event { @@ -55,15 +63,17 @@ impl sc_network_gossip::Network for TestNetwork { let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); } - fn disconnect_peer(&self, _: PeerId) {} + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - fn write_notification(&self, who: PeerId, _: ConsensusEngineId, message: Vec) { + fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) {} + + fn write_notification(&self, who: PeerId, _: Cow<'static, str>, message: Vec) { let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} - - fn announce(&self, block: Hash, _associated_data: Vec) { + fn announce(&self, block: Hash, _associated_data: Option>) { let _ = self.sender.unbounded_send(Event::Announce(block)); } } @@ -74,24 +84,25 @@ impl super::Network for TestNetwork { _peers: Vec, _hash: Hash, _number: NumberFor, - ) {} + ) { + } } impl sc_network_gossip::ValidatorContext for TestNetwork { - fn broadcast_topic(&mut self, _: Hash, _: bool) { } + fn broadcast_topic(&mut self, _: Hash, _: bool) {} - fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) { } + fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} fn send_message(&mut self, who: &sc_network::PeerId, data: Vec) { >::write_notification( self, who.clone(), - GRANDPA_ENGINE_ID, + GRANDPA_PROTOCOL_NAME.into(), data, ); } - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) { } + fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} } pub(crate) struct Tester { @@ -102,15 +113,17 @@ pub(crate) struct Tester { impl Tester { fn filter_network_events(self, mut pred: F) -> impl Future - where F: FnMut(Event) -> bool + where + F: FnMut(Event) -> bool, { let mut s = Some(self); futures::future::poll_fn(move |cx| loop { match Stream::poll_next(Pin::new(&mut s.as_mut().unwrap().events), cx) { Poll::Ready(None) => panic!("concluded early"), - Poll::Ready(Some(item)) => if pred(item) { - return Poll::Ready(s.take().unwrap()) - }, + Poll::Ready(Some(item)) => + if pred(item) { + return Poll::Ready(s.take().unwrap()) + }, Poll::Pending => return Poll::Pending, } }) @@ -132,15 +145,15 @@ fn config() -> crate::Config { justification_period: 256, keystore: None, name: None, - is_authority: true, + local_role: Role::Authority, observer_enabled: true, + telemetry: None, } } // dummy voter set state fn voter_set_state() -> SharedVoterSetState { - use crate::authorities::AuthoritySet; - use crate::environment::VoterSetState; + use crate::{authorities::AuthoritySet, environment::VoterSetState}; use finality_grandpa::round::State as RoundState; use sp_core::{crypto::Public, H256}; use sp_finality_grandpa::AuthorityId; @@ -151,20 +164,13 @@ fn voter_set_state() -> SharedVoterSetState { let voters = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; let voters = AuthoritySet::genesis(voters).unwrap(); - let set_state = VoterSetState::live( - 0, - &voters, - base, - ); + let set_state = VoterSetState::live(0, &voters, base); set_state.into() } // needs to run in a tokio runtime. -pub(crate) fn make_test_network() -> ( - impl Future, - TestNetwork, -) { +pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { let (tx, rx) = tracing_unbounded("test"); let net = TestNetwork { sender: tx }; @@ -179,12 +185,7 @@ pub(crate) fn make_test_network() -> ( } } - let bridge = super::NetworkBridge::new( - net.clone(), - config(), - voter_set_state(), - None, - ); + let bridge = super::NetworkBridge::new(net.clone(), config(), voter_set_state(), None, None); ( futures::future::ready(Tester { @@ -197,19 +198,16 @@ pub(crate) fn make_test_network() -> ( } fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter() - .map(|key| key.clone().public().into()) - .map(|id| (id, 1)) - .collect() + keys.iter().map(|key| key.clone().public().into()).map(|id| (id, 1)).collect() } struct NoopContext; impl sc_network_gossip::ValidatorContext for NoopContext { - fn broadcast_topic(&mut self, _: Hash, _: bool) { } - fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) { } - fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) { } - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) { } + fn broadcast_topic(&mut self, _: Hash, _: bool) {} + fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} + fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) {} + fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} } #[test] @@ -225,9 +223,12 @@ fn good_commit_leads_to_relay() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; + let precommit = + finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; let payload = sp_finality_grandpa::localized_payload( - round, set_id, &finality_grandpa::Message::Precommit(precommit.clone()) + round, + set_id, + &finality_grandpa::Message::Precommit(precommit.clone()), ); let mut precommits = Vec::new(); @@ -240,24 +241,21 @@ fn good_commit_leads_to_relay() { auth_data.push((signature, public[i].0.clone())) } - finality_grandpa::CompactCommit { - target_hash, - target_number, - precommits, - auth_data, - } + finality_grandpa::CompactCommit { target_hash, target_number, precommits, auth_data } }; let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { round: Round(round), set_id: SetId(set_id), message: commit, - }).encode(); + }) + .encode(); let id = sc_network::PeerId::random(); let global_topic = super::global_topic::(set_id); - let test = make_test_network().0 + let test = make_test_network() + .0 .then(move |tester| { // register a peer. tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); @@ -265,7 +263,8 @@ fn good_commit_leads_to_relay() { }) .then(move |(tester, id)| { // start round, dispatch commit, and wait for broadcast. - let (commits_in, _) = tester.net_handle.global_communication(SetId(1), voter_set, false); + let (commits_in, _) = + tester.net_handle.global_communication(SetId(1), voter_set, false); { let (action, ..) = tester.gossip_validator.do_validate(&id, &encoded_commit[..]); @@ -287,68 +286,67 @@ fn good_commit_leads_to_relay() { // Add the sending peer and send the commit let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: sender_id.clone(), - engine_id: GRANDPA_ENGINE_ID, + protocol: GRANDPA_PROTOCOL_NAME.into(), + negotiated_fallback: None, role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id.clone(), - messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], + messages: vec![( + GRANDPA_PROTOCOL_NAME.into(), + commit_to_send.clone().into(), + )], }); // Add a random peer which will be the recipient of this message let receiver_id = sc_network::PeerId::random(); let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: receiver_id.clone(), - engine_id: GRANDPA_ENGINE_ID, + protocol: GRANDPA_PROTOCOL_NAME.into(), + negotiated_fallback: None, role: ObservedRole::Full, }); // Announce its local set has being on the current set id through a neighbor // packet, otherwise it won't be eligible to receive the commit let _ = { - let update = gossip::VersionedNeighborPacket::V1( - gossip::NeighborPacket { - round: Round(round), - set_id: SetId(set_id), - commit_finalized_height: 1, - } - ); + let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket { + round: Round(round), + set_id: SetId(set_id), + commit_finalized_height: 1, + }); let msg = gossip::GossipMessage::::Neighbor(update); sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: receiver_id, - messages: vec![(GRANDPA_ENGINE_ID, msg.encode().into())], + messages: vec![(GRANDPA_PROTOCOL_NAME.into(), msg.encode().into())], }) }; true - } + }, _ => false, }); // when the commit comes in, we'll tell the callback it was good. - let handle_commit = commits_in.into_future() - .map(|(item, _)| { - match item.unwrap() { - finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { - callback.run(finality_grandpa::voter::CommitProcessingOutcome::good()); - }, - _ => panic!("commit expected"), - } - }); + let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { + finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { + callback.run(finality_grandpa::voter::CommitProcessingOutcome::good()); + }, + _ => panic!("commit expected"), + }); // once the message is sent and commit is "handled" we should have // a repropagation event coming from the network. - let fut = future::join(send_message, handle_commit).then(move |(tester, ())| { - tester.filter_network_events(move |event| match event { - Event::WriteNotification(_, data) => { - data == encoded_commit - } - _ => false, + let fut = future::join(send_message, handle_commit) + .then(move |(tester, ())| { + tester.filter_network_events(move |event| match event { + Event::WriteNotification(_, data) => data == encoded_commit, + _ => false, + }) }) - }) .map(|_| ()); // Poll both the future sending and handling the commit, as well as the underlying @@ -373,9 +371,12 @@ fn bad_commit_leads_to_report() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; + let precommit = + finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; let payload = sp_finality_grandpa::localized_payload( - round, set_id, &finality_grandpa::Message::Precommit(precommit.clone()) + round, + set_id, + &finality_grandpa::Message::Precommit(precommit.clone()), ); let mut precommits = Vec::new(); @@ -388,24 +389,21 @@ fn bad_commit_leads_to_report() { auth_data.push((signature, public[i].0.clone())) } - finality_grandpa::CompactCommit { - target_hash, - target_number, - precommits, - auth_data, - } + finality_grandpa::CompactCommit { target_hash, target_number, precommits, auth_data } }; let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { round: Round(round), set_id: SetId(set_id), message: commit, - }).encode(); + }) + .encode(); let id = sc_network::PeerId::random(); let global_topic = super::global_topic::(set_id); - let test = make_test_network().0 + let test = make_test_network() + .0 .map(move |tester| { // register a peer. tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); @@ -413,7 +411,8 @@ fn bad_commit_leads_to_report() { }) .then(move |(tester, id)| { // start round, dispatch commit, and wait for broadcast. - let (commits_in, _) = tester.net_handle.global_communication(SetId(1), voter_set, false); + let (commits_in, _) = + tester.net_handle.global_communication(SetId(1), voter_set, false); { let (action, ..) = tester.gossip_validator.do_validate(&id, &encoded_commit[..]); @@ -434,40 +433,41 @@ fn bad_commit_leads_to_report() { Event::EventStream(sender) => { let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: sender_id.clone(), - engine_id: GRANDPA_ENGINE_ID, + protocol: GRANDPA_PROTOCOL_NAME.into(), + negotiated_fallback: None, role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id.clone(), - messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], + messages: vec![( + GRANDPA_PROTOCOL_NAME.into(), + commit_to_send.clone().into(), + )], }); true - } + }, _ => false, }); // when the commit comes in, we'll tell the callback it was bad. - let handle_commit = commits_in.into_future() - .map(|(item, _)| { - match item.unwrap() { - finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { - callback.run(finality_grandpa::voter::CommitProcessingOutcome::bad()); - }, - _ => panic!("commit expected"), - } - }); + let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { + finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { + callback.run(finality_grandpa::voter::CommitProcessingOutcome::bad()); + }, + _ => panic!("commit expected"), + }); // once the message is sent and commit is "handled" we should have // a report event coming from the network. - let fut = future::join(send_message, handle_commit).then(move |(tester, ())| { - tester.filter_network_events(move |event| match event { - Event::Report(who, cost_benefit) => { - who == id && cost_benefit == super::cost::INVALID_COMMIT - } - _ => false, + let fut = future::join(send_message, handle_commit) + .then(move |(tester, ())| { + tester.filter_network_events(move |event| match event { + Event::Report(who, cost_benefit) => + who == id && cost_benefit == super::cost::INVALID_COMMIT, + _ => false, + }) }) - }) .map(|_| ()); // Poll both the future sending and handling the commit, as well as the underlying @@ -498,7 +498,8 @@ fn peer_with_higher_view_leads_to_catch_up_request() { set_id: SetId(0), round: Round(10), commit_finalized_height: 50, - }).encode(), + }) + .encode(), ); // neighbor packets are always discard @@ -508,27 +509,23 @@ fn peer_with_higher_view_leads_to_catch_up_request() { } // a catch up request should be sent to the peer for round - 1 - tester.filter_network_events(move |event| match event { - Event::WriteNotification(peer, message) => { - assert_eq!( - peer, - id, - ); - - assert_eq!( - message, - gossip::GossipMessage::::CatchUpRequest( - gossip::CatchUpRequestMessage { - set_id: SetId(0), - round: Round(9), - } - ).encode(), - ); + tester + .filter_network_events(move |event| match event { + Event::WriteNotification(peer, message) => { + assert_eq!(peer, id); + + assert_eq!( + message, + gossip::GossipMessage::::CatchUpRequest( + gossip::CatchUpRequestMessage { set_id: SetId(0), round: Round(9) } + ) + .encode(), + ); - true - }, - _ => false, - }) + true + }, + _ => false, + }) .map(|_| ()) }); diff --git a/client/finality-grandpa/src/consensus_changes.rs b/client/finality-grandpa/src/consensus_changes.rs deleted file mode 100644 index 1ce7b551d0d7c..0000000000000 --- a/client/finality-grandpa/src/consensus_changes.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::sync::Arc; -use parity_scale_codec::{Encode, Decode}; - -/// Consensus-related data changes tracker. -#[derive(Clone, Debug, Encode, Decode)] -pub(crate) struct ConsensusChanges { - pending_changes: Vec<(N, H)>, -} - -impl ConsensusChanges { - /// Create empty consensus changes. - pub(crate) fn empty() -> Self { - ConsensusChanges { pending_changes: Vec::new(), } - } -} - -impl ConsensusChanges { - - /// Returns reference to all pending changes. - pub fn pending_changes(&self) -> &[(N, H)] { - &self.pending_changes - } - - /// Note unfinalized change of consensus-related data. - pub(crate) fn note_change(&mut self, at: (N, H)) { - let idx = self.pending_changes - .binary_search_by_key(&at.0, |change| change.0) - .unwrap_or_else(|i| i); - self.pending_changes.insert(idx, at); - } - - /// Finalize all pending consensus changes that are finalized by given block. - /// Returns true if there any changes were finalized. - pub(crate) fn finalize ::sp_blockchain::Result>>( - &mut self, - block: (N, H), - canonical_at_height: F, - ) -> ::sp_blockchain::Result<(bool, bool)> { - let (split_idx, has_finalized_changes) = self.pending_changes.iter() - .enumerate() - .take_while(|(_, &(at_height, _))| at_height <= block.0) - .fold((None, Ok(false)), |(_, has_finalized_changes), (idx, ref at)| - ( - Some(idx), - has_finalized_changes - .and_then(|has_finalized_changes| if has_finalized_changes { - Ok(has_finalized_changes) - } else { - canonical_at_height(at.0).map(|can_hash| Some(at.1) == can_hash) - }), - )); - - let altered_changes = split_idx.is_some(); - if let Some(split_idx) = split_idx { - self.pending_changes = self.pending_changes.split_off(split_idx + 1); - } - has_finalized_changes.map(|has_finalized_changes| (altered_changes, has_finalized_changes)) - } -} - -/// Thread-safe consensus changes tracker reference. -pub(crate) type SharedConsensusChanges = Arc>>; diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 95d7adb9578c5..f27a530ed2f40 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,50 +16,52 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::BTreeMap; -use std::iter::FromIterator; -use std::pin::Pin; -use std::sync::Arc; -use std::time::Duration; +use std::{ + collections::{BTreeMap, HashMap}, + iter::FromIterator, + marker::PhantomData, + pin::Pin, + sync::Arc, + time::Duration, +}; -use log::{debug, warn}; -use parity_scale_codec::{Decode, Encode}; +use finality_grandpa::{ + round::State as RoundState, voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError, +}; use futures::prelude::*; use futures_timer::Delay; +use log::{debug, warn}; +use parity_scale_codec::{Decode, Encode}; use parking_lot::RwLock; -use std::marker::PhantomData; +use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, U64}; -use sc_client_api::{backend::{Backend, apply_aux}, utils::is_descendent_of}; -use finality_grandpa::{ - BlockNumberOps, Error as GrandpaError, round::State as RoundState, - voter, voter_set::VoterSet, +use sc_client_api::{ + backend::{apply_aux, Backend as BackendT}, + utils::is_descendent_of, }; -use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as ClientError}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, One, Zero, +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sp_blockchain::HeaderMetadata; +use sp_consensus::SelectChain as SelectChainT; +use sp_finality_grandpa::{ + AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, GrandpaApi, RoundNumber, + SetId, GRANDPA_ENGINE_ID, }; -use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO}; - -use crate::{ - CommandOrError, Commit, Config, Error, Precommit, Prevote, - PrimaryPropose, SignedMessage, NewAuthoritySet, VoterCommand, +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, }; -use sp_consensus::SelectChain; - -use crate::authorities::{AuthoritySet, SharedAuthoritySet}; -use crate::communication::Network as NetworkT; -use crate::consensus_changes::SharedConsensusChanges; -use crate::notification::GrandpaJustificationSender; -use crate::justification::GrandpaJustification; -use crate::until_imported::UntilVoteTargetImported; -use crate::voting_rule::VotingRule; -use sp_finality_grandpa::{ - AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, - GrandpaApi, RoundNumber, SetId, +use crate::{ + authorities::{AuthoritySet, SharedAuthoritySet}, + communication::Network as NetworkT, + justification::GrandpaJustification, + local_authority_id, + notification::GrandpaJustificationSender, + until_imported::UntilVoteTargetImported, + voting_rule::VotingRule as VotingRuleT, + ClientForGrandpa, CommandOrError, Commit, Config, Error, NewAuthoritySet, Precommit, Prevote, + PrimaryPropose, SignedMessage, VoterCommand, }; -use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, U64}; type HistoricalVotes = finality_grandpa::HistoricalVotes< ::Hash, @@ -107,13 +109,11 @@ impl Encode for CompletedRounds { impl parity_scale_codec::EncodeLike for CompletedRounds {} impl Decode for CompletedRounds { - fn decode(value: &mut I) -> Result { + fn decode( + value: &mut I, + ) -> Result { <(Vec>, SetId, Vec)>::decode(value) - .map(|(rounds, set_id, voters)| CompletedRounds { - rounds, - set_id, - voters, - }) + .map(|(rounds, set_id, voters)| CompletedRounds { rounds, set_id, voters }) } } @@ -123,9 +123,7 @@ impl CompletedRounds { genesis: CompletedRound, set_id: SetId, voters: &AuthoritySet>, - ) - -> CompletedRounds - { + ) -> CompletedRounds { let mut rounds = Vec::with_capacity(NUM_LAST_COMPLETED_ROUNDS); rounds.push(genesis); @@ -139,13 +137,14 @@ impl CompletedRounds { } /// Iterate over all completed rounds. - pub fn iter(&self) -> impl Iterator> { + pub fn iter(&self) -> impl Iterator> { self.rounds.iter().rev() } /// Returns the last (latest) completed round. pub fn last(&self) -> &CompletedRound { - self.rounds.first() + self.rounds + .first() .expect("inner is never empty; always contains at least genesis; qed") } @@ -154,10 +153,11 @@ impl CompletedRounds { pub fn push(&mut self, completed_round: CompletedRound) { use std::cmp::Reverse; - match self.rounds.binary_search_by_key( - &Reverse(completed_round.number), - |completed_round| Reverse(completed_round.number), - ) { + match self + .rounds + .binary_search_by_key(&Reverse(completed_round.number), |completed_round| { + Reverse(completed_round.number) + }) { Ok(idx) => self.rounds[idx] = completed_round, Err(idx) => self.rounds.insert(idx, completed_round), }; @@ -217,37 +217,31 @@ impl VoterSetState { let mut current_rounds = CurrentRounds::new(); current_rounds.insert(1, HasVoted::No); - VoterSetState::Live { - completed_rounds, - current_rounds, - } + VoterSetState::Live { completed_rounds, current_rounds } } /// Returns the last completed rounds. pub(crate) fn completed_rounds(&self) -> CompletedRounds { match self { - VoterSetState::Live { completed_rounds, .. } => - completed_rounds.clone(), - VoterSetState::Paused { completed_rounds } => - completed_rounds.clone(), + VoterSetState::Live { completed_rounds, .. } => completed_rounds.clone(), + VoterSetState::Paused { completed_rounds } => completed_rounds.clone(), } } /// Returns the last completed round. pub(crate) fn last_completed_round(&self) -> CompletedRound { match self { - VoterSetState::Live { completed_rounds, .. } => - completed_rounds.last().clone(), - VoterSetState::Paused { completed_rounds } => - completed_rounds.last().clone(), + VoterSetState::Live { completed_rounds, .. } => completed_rounds.last().clone(), + VoterSetState::Paused { completed_rounds } => completed_rounds.last().clone(), } } /// Returns the voter set state validating that it includes the given round /// in current rounds and that the voter isn't paused. - pub fn with_current_round(&self, round: RoundNumber) - -> Result<(&CompletedRounds, &CurrentRounds), Error> - { + pub fn with_current_round( + &self, + round: RoundNumber, + ) -> Result<(&CompletedRounds, &CurrentRounds), Error> { if let VoterSetState::Live { completed_rounds, current_rounds } = self { if current_rounds.contains_key(&round) { Ok((completed_rounds, current_rounds)) @@ -286,10 +280,9 @@ impl HasVoted { /// Returns the proposal we should vote with (if any.) pub fn propose(&self) -> Option<&PrimaryPropose> { match self { - HasVoted::Yes(_, Vote::Propose(propose)) => - Some(propose), - HasVoted::Yes(_, Vote::Prevote(propose, _)) | HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => - propose.as_ref(), + HasVoted::Yes(_, Vote::Propose(propose)) => Some(propose), + HasVoted::Yes(_, Vote::Prevote(propose, _)) | + HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(), _ => None, } } @@ -297,8 +290,8 @@ impl HasVoted { /// Returns the prevote we should vote with (if any.) pub fn prevote(&self) -> Option<&Prevote> { match self { - HasVoted::Yes(_, Vote::Prevote(_, prevote)) | HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => - Some(prevote), + HasVoted::Yes(_, Vote::Prevote(_, prevote)) | + HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), _ => None, } } @@ -306,8 +299,7 @@ impl HasVoted { /// Returns the precommit we should vote with (if any.) pub fn precommit(&self) -> Option<&Precommit> { match self { - HasVoted::Yes(_, Vote::Precommit(_, _, precommit)) => - Some(precommit), + HasVoted::Yes(_, Vote::Precommit(_, _, precommit)) => Some(precommit), _ => None, } } @@ -331,7 +323,11 @@ impl HasVoted { /// A voter set state meant to be shared safely across multiple owners. #[derive(Clone)] pub struct SharedVoterSetState { + /// The inner shared `VoterSetState`. inner: Arc>>, + /// A tracker for the rounds that we are actively participating on (i.e. voting) + /// and the authority id under which we are doing it. + voting: Arc>>, } impl From> for SharedVoterSetState { @@ -343,7 +339,10 @@ impl From> for SharedVoterSetState { impl SharedVoterSetState { /// Create a new shared voter set tracker with the given state. pub(crate) fn new(state: VoterSetState) -> Self { - SharedVoterSetState { inner: Arc::new(RwLock::new(state)) } + SharedVoterSetState { + inner: Arc::new(RwLock::new(state)), + voting: Arc::new(RwLock::new(HashMap::new())), + } } /// Read the inner voter set state. @@ -351,24 +350,41 @@ impl SharedVoterSetState { self.inner.read() } + /// Get the authority id that we are using to vote on the given round, if any. + pub(crate) fn voting_on(&self, round: RoundNumber) -> Option { + self.voting.read().get(&round).cloned() + } + + /// Note that we started voting on the give round with the given authority id. + pub(crate) fn started_voting_on(&self, round: RoundNumber, local_id: AuthorityId) { + self.voting.write().insert(round, local_id); + } + + /// Note that we have finished voting on the given round. If we were voting on + /// the given round, the authority id that we were using to do it will be + /// cleared. + pub(crate) fn finished_voting_on(&self, round: RoundNumber) { + self.voting.write().remove(&round); + } + /// Return vote status information for the current round. pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { match &*self.inner.read() { - VoterSetState::Live { current_rounds, .. } => { - current_rounds.get(&round).and_then(|has_voted| match has_voted { - HasVoted::Yes(id, vote) => - Some(HasVoted::Yes(id.clone(), vote.clone())), + VoterSetState::Live { current_rounds, .. } => current_rounds + .get(&round) + .and_then(|has_voted| match has_voted { + HasVoted::Yes(id, vote) => Some(HasVoted::Yes(id.clone(), vote.clone())), _ => None, }) - .unwrap_or(HasVoted::No) - }, + .unwrap_or(HasVoted::No), _ => HasVoted::No, } } // NOTE: not exposed outside of this module intentionally. fn with(&self, f: F) -> R - where F: FnOnce(&mut VoterSetState) -> R + where + F: FnOnce(&mut VoterSetState) -> R, { f(&mut *self.inner.write()) } @@ -416,13 +432,13 @@ pub(crate) struct Environment, SC, pub(crate) voters: Arc>, pub(crate) config: Config, pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) consensus_changes: SharedConsensusChanges>, pub(crate) network: crate::communication::NetworkBridge, pub(crate) set_id: SetId, pub(crate) voter_set_state: SharedVoterSetState, pub(crate) voting_rule: VR, pub(crate) metrics: Option, pub(crate) justification_sender: Option>, + pub(crate) telemetry: Option, pub(crate) _phantom: PhantomData, } @@ -430,8 +446,9 @@ impl, SC, VR> Environment(&self, f: F) -> Result<(), Error> where - F: FnOnce(&VoterSetState) -> Result>, Error> + pub(crate) fn update_voter_set_state(&self, f: F) -> Result<(), Error> + where + F: FnOnce(&VoterSetState) -> Result>, Error>, { self.voter_set_state.with(|voter_set_state| { if let Some(set_state) = f(&voter_set_state)? { @@ -439,7 +456,9 @@ impl, SC, VR> Environment, SC, VR> Environment Environment where Block: BlockT, - BE: Backend, - C: crate::ClientForGrandpa, - C::Api: GrandpaApi, + BE: BackendT, + C: ClientForGrandpa, + C::Api: GrandpaApi, N: NetworkT, - SC: SelectChain + 'static, + SC: SelectChainT, { /// Report the given equivocation to the GRANDPA runtime module. This method /// generates a session membership proof of the offender and then submits an /// extrinsic to report the equivocation. In particular, the session membership /// proof must be generated at the block at which the given set was active which /// isn't necessarily the best block if there are pending authority set changes. - fn report_equivocation( + pub(crate) fn report_equivocation( &self, equivocation: Equivocation>, ) -> Result<(), Error> { + if let Some(local_id) = self.voter_set_state.voting_on(equivocation.round_number()) { + if *equivocation.offender() == local_id { + return Err(Error::Safety( + "Refraining from sending equivocation report for our own equivocation.".into(), + )) + } + } + let is_descendent_of = is_descendent_of(&*self.client, None); - let best_header = self.select_chain - .best_chain() - .map_err(|e| Error::Blockchain(e.to_string()))?; + let (best_block_hash, best_block_number) = { + // TODO [#9158]: Use SelectChain::best_chain() to get a potentially + // more accurate best block + let info = self.client.info(); + (info.best_hash, info.best_number) + }; - let authority_set = self.authority_set.inner().read(); + let authority_set = self.authority_set.inner(); // block hash and number of the next pending authority set change in the // given best chain. let next_change = authority_set - .next_change(&best_header.hash(), &is_descendent_of) + .next_change(&best_block_hash, &is_descendent_of) .map_err(|e| Error::Safety(e.to_string()))?; // find the hash of the latest block in the current set let current_set_latest_hash = match next_change { - Some((_, n)) if n.is_zero() => { - return Err(Error::Safety( - "Authority set change signalled at genesis.".to_string(), - )) - } + Some((_, n)) if n.is_zero() => + return Err(Error::Safety("Authority set change signalled at genesis.".to_string())), // the next set starts at `n` so the current one lasts until `n - 1`. if // `n` is later than the best block, then the current set is still live // at best block. - Some((_, n)) if n > *best_header.number() => best_header.hash(), + Some((_, n)) if n > best_block_number => best_block_hash, Some((h, _)) => { // this is the header at which the new set will start let header = self.client.header(BlockId::Hash(h))?.expect( @@ -505,160 +532,66 @@ where // its parent block is the last block in the current set *header.parent_hash() - } + }, // there is no pending change, the latest block for the current set is // the best block. - None => best_header.hash(), + None => best_block_hash, }; // generate key ownership proof at that block - let key_owner_proof = match self.client + let key_owner_proof = match self + .client .runtime_api() .generate_key_ownership_proof( &BlockId::Hash(current_set_latest_hash), authority_set.set_id, equivocation.offender().clone(), ) - .map_err(Error::Client)? + .map_err(Error::RuntimeApi)? { Some(proof) => proof, None => { debug!(target: "afg", "Equivocation offender is not part of the authority set."); - return Ok(()); - } + return Ok(()) + }, }; // submit equivocation report at **best** block - let equivocation_proof = EquivocationProof::new( - authority_set.set_id, - equivocation, - ); + let equivocation_proof = EquivocationProof::new(authority_set.set_id, equivocation); self.client .runtime_api() .submit_report_equivocation_unsigned_extrinsic( - &BlockId::Hash(best_header.hash()), + &BlockId::Hash(best_block_hash), equivocation_proof, key_owner_proof, ) - .map_err(Error::Client)?; + .map_err(Error::RuntimeApi)?; Ok(()) } } -impl - finality_grandpa::Chain> -for Environment +impl finality_grandpa::Chain> + for Environment where - Block: 'static, - BE: Backend, - C: crate::ClientForGrandpa, - N: NetworkT + 'static + Send, - SC: SelectChain + 'static, - VR: VotingRule, + Block: BlockT, + BE: BackendT, + C: ClientForGrandpa, + N: NetworkT, + SC: SelectChainT, + VR: VotingRuleT, NumberFor: BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { ancestry(&self.client, base, block) } - - fn best_chain_containing(&self, block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - // NOTE: when we finalize an authority set change through the sync protocol the voter is - // signaled asynchronously. therefore the voter could still vote in the next round - // before activating the new set. the `authority_set` is updated immediately thus we - // restrict the voter based on that. - if self.set_id != self.authority_set.set_id() { - return None; - } - - let base_header = match self.client.header(BlockId::Hash(block)).ok()? { - Some(h) => h, - None => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find base block", block); - return None; - } - }; - - // we refuse to vote beyond the current limit number where transitions are scheduled to - // occur. - // once blocks are finalized that make that transition irrelevant or activate it, - // we will proceed onwards. most of the time there will be no pending transition. - // the limit, if any, is guaranteed to be higher than or equal to the given base number. - let limit = self.authority_set.current_limit(*base_header.number()); - debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); - - match self.select_chain.finality_target(block, None) { - Ok(Some(best_hash)) => { - let best_header = self.client.header(BlockId::Hash(best_hash)).ok()? - .expect("Header known to exist after `finality_target` call; qed"); - - // check if our vote is currently being limited due to a pending change - let limit = limit.filter(|limit| limit < best_header.number()); - let target; - - let target_header = if let Some(target_number) = limit { - let mut target_header = best_header.clone(); - - // walk backwards until we find the target block - loop { - if *target_header.number() < target_number { - unreachable!( - "we are traversing backwards from a known block; \ - blocks are stored contiguously; \ - qed" - ); - } - - if *target_header.number() == target_number { - break; - } - - target_header = self.client.header(BlockId::Hash(*target_header.parent_hash())).ok()? - .expect("Header known to exist after `finality_target` call; qed"); - } - - target = target_header; - &target - } else { - // otherwise just use the given best as the target - &best_header - }; - - // restrict vote according to the given voting rule, if the - // voting rule doesn't restrict the vote then we keep the - // previous target. - // - // note that we pass the original `best_header`, i.e. before the - // authority set limit filter, which can be considered a - // mandatory/implicit voting rule. - // - // we also make sure that the restricted vote is higher than the - // round base (i.e. last finalized), otherwise the value - // returned by the given voting rule is ignored and the original - // target is used instead. - self.voting_rule - .restrict_vote(&*self.client, &base_header, &best_header, target_header) - .filter(|(_, restricted_number)| { - // we can only restrict votes within the interval [base, target] - restricted_number >= base_header.number() && - restricted_number < target_header.number() - }) - .or_else(|| Some((target_header.hash(), *target_header.number()))) - }, - Ok(None) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); - None - } - Err(e) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); - None - } - } - } } - pub(crate) fn ancestry( client: &Arc, base: Block::Hash, @@ -667,7 +600,9 @@ pub(crate) fn ancestry( where Client: HeaderMetadata, { - if base == block { return Err(GrandpaError::NotDescendent) } + if base == block { + return Err(GrandpaError::NotDescendent) + } let tree_route_res = sp_blockchain::tree_route(&**client, block, base); @@ -677,12 +612,12 @@ where debug!(target: "afg", "Encountered error computing ancestry between block {:?} and base {:?}: {:?}", block, base, e); - return Err(GrandpaError::NotDescendent); - } + return Err(GrandpaError::NotDescendent) + }, }; if tree_route.common_block().hash != base { - return Err(GrandpaError::NotDescendent); + return Err(GrandpaError::NotDescendent) } // skip one because our ancestry is meant to start from the parent of `block`, @@ -690,33 +625,78 @@ where Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) } -impl voter::Environment> +impl voter::Environment> for Environment where - Block: 'static, - B: Backend, - C: crate::ClientForGrandpa + 'static, - C::Api: GrandpaApi, - N: NetworkT + 'static + Send + Sync, - SC: SelectChain + 'static, - VR: VotingRule, + Block: BlockT, + B: BackendT, + C: ClientForGrandpa + 'static, + C::Api: GrandpaApi, + N: NetworkT, + SC: SelectChainT + 'static, + VR: VotingRuleT + Clone + 'static, NumberFor: BlockNumberOps, { - type Timer = Pin> + Send + Sync>>; + type Timer = Pin> + Send>>; + type BestChain = Pin< + Box< + dyn Future)>, Self::Error>> + + Send, + >, + >; + type Id = AuthorityId; type Signature = AuthoritySignature; // regular round message streams - type In = Pin, Self::Signature, Self::Id>, Self::Error> - > + Send + Sync>>; - type Out = Pin>, - Error = Self::Error, - > + Send + Sync>>; + type In = Pin< + Box< + dyn Stream< + Item = Result< + ::finality_grandpa::SignedMessage< + Block::Hash, + NumberFor, + Self::Signature, + Self::Id, + >, + Self::Error, + >, + > + Send, + >, + >; + type Out = Pin< + Box< + dyn Sink< + ::finality_grandpa::Message>, + Error = Self::Error, + > + Send, + >, + >; type Error = CommandOrError>; + fn best_chain_containing(&self, block: Block::Hash) -> Self::BestChain { + let client = self.client.clone(); + let authority_set = self.authority_set.clone(); + let select_chain = self.select_chain.clone(); + let voting_rule = self.voting_rule.clone(); + let set_id = self.set_id; + + Box::pin(async move { + // NOTE: when we finalize an authority set change through the sync protocol the voter is + // signaled asynchronously. therefore the voter could still vote in the next round + // before activating the new set. the `authority_set` is updated immediately thus + // we restrict the voter based on that. + if set_id != authority_set.set_id() { + return Ok(None) + } + + best_chain_containing(block, client, authority_set, select_chain, voting_rule) + .await + .map_err(|e| e.into()) + }) + } + fn round_data( &self, round: RoundNumber, @@ -724,19 +704,29 @@ where let prevote_timer = Delay::new(self.config.gossip_duration * 2); let precommit_timer = Delay::new(self.config.gossip_duration * 4); - let local_id = crate::local_authority_id(&self.voters, self.config.keystore.as_ref()); + let local_id = local_authority_id(&self.voters, self.config.keystore.as_ref()); let has_voted = match self.voter_set_state.has_voted(round) { - HasVoted::Yes(id, vote) => { + HasVoted::Yes(id, vote) => if local_id.as_ref().map(|k| k == &id).unwrap_or(false) { HasVoted::Yes(id, vote) } else { HasVoted::No - } - }, + }, HasVoted::No => HasVoted::No, }; + // NOTE: we cache the local authority id that we'll be using to vote on the + // given round. this is done to make sure we only check for available keys + // from the keystore in this method when beginning the round, otherwise if + // the keystore state changed during the round (e.g. a key was removed) it + // could lead to internal state inconsistencies in the voter environment + // (e.g. we wouldn't update the voter set state after prevoting since there's + // no local authority id). + if let Some(id) = local_id.as_ref() { + self.voter_set_state.started_voting_on(round, id.clone()); + } + // we can only sign when we have a local key in the authority set // and we have a reference to the keystore. let keystore = match (local_id.as_ref(), self.config.keystore.as_ref()) { @@ -754,14 +744,17 @@ where // schedule incoming messages from the network to be held until // corresponding blocks are imported. - let incoming = Box::pin(UntilVoteTargetImported::new( - self.client.import_notification_stream(), - self.network.clone(), - self.client.clone(), - incoming, - "round", - None, - ).map_err(Into::into)); + let incoming = Box::pin( + UntilVoteTargetImported::new( + self.client.import_notification_stream(), + self.network.clone(), + self.client.clone(), + incoming, + "round", + None, + ) + .map_err(Into::into), + ); // schedule network message cleanup when sink drops. let outgoing = Box::pin(outgoing.sink_err_into()); @@ -775,28 +768,32 @@ where } } - fn proposed(&self, round: RoundNumber, propose: PrimaryPropose) -> Result<(), Self::Error> { - let local_id = crate::local_authority_id(&self.voters, self.config.keystore.as_ref()); - - let local_id = match local_id { + fn proposed( + &self, + round: RoundNumber, + propose: PrimaryPropose, + ) -> Result<(), Self::Error> { + let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; self.update_voter_set_state(|voter_set_state| { let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; - let current_round = current_rounds.get(&round) + let current_round = current_rounds + .get(&round) .expect("checked in with_current_round that key exists; qed."); if !current_round.can_propose() { // we've already proposed in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None); + return Ok(None) } let mut current_rounds = current_rounds.clone(); - let current_round = current_rounds.get_mut(&round) + let current_round = current_rounds + .get_mut(&round) .expect("checked previously that key exists; qed."); *current_round = HasVoted::Yes(local_id, Vote::Propose(propose)); @@ -815,15 +812,16 @@ where } fn prevoted(&self, round: RoundNumber, prevote: Prevote) -> Result<(), Self::Error> { - let local_id = crate::local_authority_id(&self.voters, self.config.keystore.as_ref()); - - let local_id = match local_id { + let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; let report_prevote_metrics = |prevote: &Prevote| { - telemetry!(CONSENSUS_DEBUG; "afg.prevote_issued"; + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.prevote_issued"; "round" => round, "target_number" => ?prevote.target_number, "target_hash" => ?prevote.target_hash, @@ -844,7 +842,7 @@ where // we've already prevoted in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None); + return Ok(None) } // report to telemetry and prometheus @@ -853,7 +851,8 @@ where let propose = current_round.propose(); let mut current_rounds = current_rounds.clone(); - let current_round = current_rounds.get_mut(&round) + let current_round = current_rounds + .get_mut(&round) .expect("checked previously that key exists; qed."); *current_round = HasVoted::Yes(local_id, Vote::Prevote(propose.cloned(), prevote)); @@ -876,15 +875,16 @@ where round: RoundNumber, precommit: Precommit, ) -> Result<(), Self::Error> { - let local_id = crate::local_authority_id(&self.voters, self.config.keystore.as_ref()); - - let local_id = match local_id { + let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; let report_precommit_metrics = |precommit: &Precommit| { - telemetry!(CONSENSUS_DEBUG; "afg.precommit_issued"; + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.precommit_issued"; "round" => round, "target_number" => ?precommit.target_number, "target_hash" => ?precommit.target_hash, @@ -905,7 +905,7 @@ where // we've already precommitted in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None); + return Ok(None) } // report to telemetry and prometheus @@ -916,12 +916,13 @@ where HasVoted::Yes(_, Vote::Prevote(_, prevote)) => prevote, _ => { let msg = "Voter precommitting before prevoting."; - return Err(Error::Safety(msg.to_string())); - } + return Err(Error::Safety(msg.to_string())) + }, }; let mut current_rounds = current_rounds.clone(); - let current_round = current_rounds.get_mut(&round) + let current_round = current_rounds + .get_mut(&round) .expect("checked previously that key exists; qed."); *current_round = HasVoted::Yes( @@ -967,7 +968,7 @@ where (completed_rounds, current_rounds) } else { let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())); + return Err(Error::Safety(msg.to_string())) }; let mut completed_rounds = completed_rounds.clone(); @@ -992,16 +993,16 @@ where current_rounds.insert(round + 1, HasVoted::No); } - let set_state = VoterSetState::::Live { - completed_rounds, - current_rounds, - }; + let set_state = VoterSetState::::Live { completed_rounds, current_rounds }; crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; Ok(Some(set_state)) })?; + // clear any cached local authority id associated with this round + self.voter_set_state.finished_voting_on(round); + Ok(()) } @@ -1029,21 +1030,21 @@ where (completed_rounds, current_rounds) } else { let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())); + return Err(Error::Safety(msg.to_string())) }; let mut completed_rounds = completed_rounds.clone(); - if let Some(already_completed) = completed_rounds.rounds - .iter_mut().find(|r| r.number == round) + if let Some(already_completed) = + completed_rounds.rounds.iter_mut().find(|r| r.number == round) { let n_existing_votes = already_completed.votes.len(); // the interface of Environment guarantees that the previous `historical_votes` // from `completable` is a prefix of what is passed to `concluded`. - already_completed.votes.extend( - historical_votes.seen().iter().skip(n_existing_votes).cloned() - ); + already_completed + .votes + .extend(historical_votes.seen().iter().skip(n_existing_votes).cloned()); already_completed.state = state; crate::aux_schema::write_concluded_round(&*self.client, &already_completed)?; } @@ -1071,21 +1072,22 @@ where finalize_block( self.client.clone(), &self.authority_set, - &self.consensus_changes, Some(self.config.justification_period.into()), hash, number, (round, commit).into(), false, self.justification_sender.as_ref(), + self.telemetry.clone(), ) } fn round_commit_timer(&self) -> Self::Timer { use rand::{thread_rng, Rng}; - //random between 0-1 seconds. - let delay: u64 = thread_rng().gen_range(0, 1000); + // random between `[0, 2 * gossip_duration]` seconds. + let delay: u64 = + thread_rng().gen_range(0..2 * self.config.gossip_duration.as_millis() as u64); Box::pin(Delay::new(Duration::from_millis(delay)).map(Ok)) } @@ -1129,6 +1131,111 @@ impl From> for JustificationOrCommit< } } +async fn best_chain_containing( + block: Block::Hash, + client: Arc, + authority_set: SharedAuthoritySet>, + select_chain: SelectChain, + voting_rule: VotingRule, +) -> Result)>, Error> +where + Backend: BackendT, + Block: BlockT, + Client: ClientForGrandpa, + SelectChain: SelectChainT + 'static, + VotingRule: VotingRuleT, +{ + let base_header = match client.header(BlockId::Hash(block))? { + Some(h) => h, + None => { + debug!(target: "afg", + "Encountered error finding best chain containing {:?}: couldn't find base block", + block, + ); + + return Ok(None) + }, + }; + + // we refuse to vote beyond the current limit number where transitions are scheduled to occur. + // once blocks are finalized that make that transition irrelevant or activate it, we will + // proceed onwards. most of the time there will be no pending transition. the limit, if any, is + // guaranteed to be higher than or equal to the given base number. + let limit = authority_set.current_limit(*base_header.number()); + debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); + + let result = match select_chain.finality_target(block, None).await { + Ok(Some(best_hash)) => { + let best_header = client + .header(BlockId::Hash(best_hash))? + .expect("Header known to exist after `finality_target` call; qed"); + + // check if our vote is currently being limited due to a pending change + let limit = limit.filter(|limit| limit < best_header.number()); + + let (base_header, best_header, target_header) = if let Some(target_number) = limit { + let mut target_header = best_header.clone(); + + // walk backwards until we find the target block + loop { + if *target_header.number() < target_number { + unreachable!( + "we are traversing backwards from a known block; \ + blocks are stored contiguously; \ + qed" + ); + } + + if *target_header.number() == target_number { + break + } + + target_header = client + .header(BlockId::Hash(*target_header.parent_hash()))? + .expect("Header known to exist after `finality_target` call; qed"); + } + + (base_header, best_header, target_header) + } else { + // otherwise just use the given best as the target + (base_header, best_header.clone(), best_header) + }; + + // restrict vote according to the given voting rule, if the + // voting rule doesn't restrict the vote then we keep the + // previous target. + // + // note that we pass the original `best_header`, i.e. before the + // authority set limit filter, which can be considered a + // mandatory/implicit voting rule. + // + // we also make sure that the restricted vote is higher than the + // round base (i.e. last finalized), otherwise the value + // returned by the given voting rule is ignored and the original + // target is used instead. + voting_rule + .restrict_vote(client.clone(), &base_header, &best_header, &target_header) + .await + .filter(|(_, restricted_number)| { + // we can only restrict votes within the interval [base, target] + restricted_number >= base_header.number() && + restricted_number < target_header.number() + }) + .or_else(|| Some((target_header.hash(), *target_header.number()))) + }, + Ok(None) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); + None + }, + Err(e) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); + None + }, + }; + + Ok(result) +} + /// Finalize the given block and apply any authority set changes. If an /// authority set change is enacted then a justification is created (if not /// given) and stored with the block when finalizing it. @@ -1136,23 +1243,23 @@ impl From> for JustificationOrCommit< pub(crate) fn finalize_block( client: Arc, authority_set: &SharedAuthoritySet>, - consensus_changes: &SharedConsensusChanges>, justification_period: Option>, hash: Block::Hash, number: NumberFor, justification_or_commit: JustificationOrCommit, initial_sync: bool, justification_sender: Option<&GrandpaJustificationSender>, + telemetry: Option, ) -> Result<(), CommandOrError>> where Block: BlockT, - BE: Backend, - Client: crate::ClientForGrandpa, + BE: BackendT, + Client: ClientForGrandpa, { // NOTE: lock must be held through writing to DB to avoid race. this lock // also implicitly synchronizes the check for last finalized number // below. - let mut authority_set = authority_set.inner().write(); + let mut authority_set = authority_set.inner(); let status = client.info(); @@ -1166,48 +1273,22 @@ where status.finalized_number, ); - return Ok(()); + return Ok(()) } // FIXME #1483: clone only when changed let old_authority_set = authority_set.clone(); - // holds the old consensus changes in case it is changed below, needed for - // reverting in case of failure - let mut old_consensus_changes = None; - - let mut consensus_changes = consensus_changes.lock(); - let canon_at_height = |canon_number| { - // "true" because the block is finalized - canonical_at_height(&*client, (hash, number), true, canon_number) - }; let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { - let status = authority_set.apply_standard_changes( - hash, - number, - &is_descendent_of::(&*client, None), - initial_sync, - ).map_err(|e| Error::Safety(e.to_string()))?; - - // check if this is this is the first finalization of some consensus changes - let (alters_consensus_changes, finalizes_consensus_changes) = consensus_changes - .finalize((number, hash), &canon_at_height)?; - - if alters_consensus_changes { - old_consensus_changes = Some(consensus_changes.clone()); - - let write_result = crate::aux_schema::update_consensus_changes( - &*consensus_changes, - |insert| apply_aux(import_op, insert, &[]), - ); - - if let Err(e) = write_result { - warn!(target: "afg", "Failed to write updated consensus changes to disk. Bailing."); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - - return Err(e.into()); - } - } + let status = authority_set + .apply_standard_changes( + hash, + number, + &is_descendent_of::(&*client, None), + initial_sync, + None, + ) + .map_err(|e| Error::Safety(e.to_string()))?; // send a justification notification if a sender exists and in case of error log it. fn notify_justification( @@ -1227,84 +1308,81 @@ where // `N+1`. this assumption is required to make sure we store // justifications for transition blocks which will be requested by // syncing clients. - let justification = match justification_or_commit { - JustificationOrCommit::Justification(justification) => { - notify_justification(justification_sender, || Ok(justification.clone())); - Some(justification.encode()) - }, + let (justification_required, justification) = match justification_or_commit { + JustificationOrCommit::Justification(justification) => (true, justification), JustificationOrCommit::Commit((round_number, commit)) => { let mut justification_required = // justification is always required when block that enacts new authorities // set is finalized - status.new_set_block.is_some() || - // justification is required when consensus changes are finalized - finalizes_consensus_changes; + status.new_set_block.is_some(); // justification is required every N blocks to be able to prove blocks // finalization to remote nodes if !justification_required { if let Some(justification_period) = justification_period { let last_finalized_number = client.info().finalized_number; - justification_required = - (!last_finalized_number.is_zero() || number - last_finalized_number == justification_period) && - (last_finalized_number / justification_period != number / justification_period); + justification_required = (!last_finalized_number.is_zero() || + number - last_finalized_number == justification_period) && + (last_finalized_number / justification_period != + number / justification_period); } } - // NOTE: the code below is a bit more verbose because we - // really want to avoid creating a justification if it isn't - // needed (e.g. if there's no subscribers), and also to avoid - // creating it twice. depending on the vote tree for the round, - // creating a justification might require multiple fetches of - // headers from the database. - let justification = || GrandpaJustification::from_commit( - &client, - round_number, - commit, - ); - - if justification_required { - let justification = justification()?; - notify_justification(justification_sender, || Ok(justification.clone())); + let justification = + GrandpaJustification::from_commit(&client, round_number, commit)?; - Some(justification.encode()) - } else { - notify_justification(justification_sender, justification); - - None - } + (justification_required, justification) }, }; - debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); + notify_justification(justification_sender, || Ok(justification.clone())); + + let persisted_justification = if justification_required { + Some((GRANDPA_ENGINE_ID, justification.encode())) + } else { + None + }; // ideally some handle to a synchronization oracle would be used // to avoid unconditionally notifying. - client.apply_finality(import_op, BlockId::Hash(hash), justification, true).map_err(|e| { - warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); - e - })?; - telemetry!(CONSENSUS_INFO; "afg.finalized_blocks_up_to"; + client + .apply_finality(import_op, BlockId::Hash(hash), persisted_justification, true) + .map_err(|e| { + warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); + e + })?; + + debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); + + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.finalized_blocks_up_to"; "number" => ?number, "hash" => ?hash, ); + crate::aux_schema::update_best_justification(&justification, |insert| { + apply_aux(import_op, insert, &[]) + })?; + let new_authorities = if let Some((canon_hash, canon_number)) = status.new_set_block { // the authority set has changed. let (new_id, set_ref) = authority_set.current(); if set_ref.len() > 16 { - afg_log!(initial_sync, + afg_log!( + initial_sync, "👴 Applying GRANDPA set change to new set with {} authorities", set_ref.len(), ); } else { - afg_log!(initial_sync, - "👴 Applying GRANDPA set change to new set {:?}", - set_ref, - ); + afg_log!(initial_sync, "👴 Applying GRANDPA set change to new set {:?}", set_ref); } - telemetry!(CONSENSUS_INFO; "afg.generating_new_authority_set"; + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.generating_new_authority_set"; "number" => ?canon_number, "hash" => ?canon_hash, "authorities" => ?set_ref.to_vec(), "set_id" => ?new_id, @@ -1330,7 +1408,7 @@ where warn!(target: "afg", "Failed to write updated authority set to disk. Bailing."); warn!(target: "afg", "Node is in a potentially inconsistent state."); - return Err(e.into()); + return Err(e.into()) } } @@ -1343,57 +1421,7 @@ where Err(e) => { *authority_set = old_authority_set; - if let Some(old_consensus_changes) = old_consensus_changes { - *consensus_changes = old_consensus_changes; - } - Err(CommandOrError::Error(e)) - } - } -} - -/// Using the given base get the block at the given height on this chain. The -/// target block must be an ancestor of base, therefore `height <= base.height`. -pub(crate) fn canonical_at_height>( - provider: &C, - base: (Block::Hash, NumberFor), - base_is_canonical: bool, - height: NumberFor, -) -> Result, ClientError> { - if height > base.1 { - return Ok(None); + }, } - - if height == base.1 { - if base_is_canonical { - return Ok(Some(base.0)); - } else { - return Ok(provider.hash(height).unwrap_or(None)); - } - } else if base_is_canonical { - return Ok(provider.hash(height).unwrap_or(None)); - } - - let one = NumberFor::::one(); - - // start by getting _canonical_ block with number at parent position and then iterating - // backwards by hash. - let mut current = match provider.header(BlockId::Number(base.1 - one))? { - Some(header) => header, - _ => return Ok(None), - }; - - // we've already checked that base > height above. - let mut steps = base.1 - height - one; - - while steps > NumberFor::::zero() { - current = match provider.header(BlockId::Hash(*current.parent_hash()))? { - Some(header) => header, - _ => return Ok(None), - }; - - steps -= one; - } - - Ok(Some(current.hash())) } diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 33dd69cc11d6e..1e20c2edc3a6e 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -36,1028 +36,512 @@ //! finality proof (that finalizes some block C that is ancestor of the B and descendant //! of the U) could be returned. -use std::sync::Arc; use log::{trace, warn}; +use std::sync::Arc; -use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; -use sc_client_api::{ - backend::Backend, StorageProof, - light::{FetchChecker, RemoteReadRequest}, - StorageProvider, ProofProvider, -}; -use parity_scale_codec::{Encode, Decode}; -use finality_grandpa::BlockNumberOps; +use parity_scale_codec::{Decode, Encode}; +use sc_client_api::backend::Backend; +use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; +use sp_finality_grandpa::GRANDPA_ENGINE_ID; use sp_runtime::{ - Justification, generic::BlockId, - traits::{NumberFor, Block as BlockT, Header as HeaderT, One}, + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, One}, }; -use sp_core::storage::StorageKey; -use sc_telemetry::{telemetry, CONSENSUS_INFO}; -use sp_finality_grandpa::{AuthorityId, AuthorityList, VersionedAuthorityList, GRANDPA_AUTHORITIES_KEY}; - -use crate::justification::GrandpaJustification; -use crate::VoterSet; - -/// Maximum number of fragments that we want to return in a single prove_finality call. -const MAX_FRAGMENTS_IN_PROOF: usize = 8; - -/// GRANDPA authority set related methods for the finality proof provider. -pub trait AuthoritySetForFinalityProver: Send + Sync { - /// Read GRANDPA_AUTHORITIES_KEY from storage at given block. - fn authorities(&self, block: &BlockId) -> ClientResult; - /// Prove storage read of GRANDPA_AUTHORITIES_KEY at given block. - fn prove_authorities(&self, block: &BlockId) -> ClientResult; -} - -/// Trait that combines `StorageProvider` and `ProofProvider` -pub trait StorageAndProofProvider: StorageProvider + ProofProvider + Send + Sync - where - Block: BlockT, - BE: Backend + Send + Sync, -{} - -/// Blanket implementation. -impl StorageAndProofProvider for P - where - Block: BlockT, - BE: Backend + Send + Sync, - P: StorageProvider + ProofProvider + Send + Sync, -{} - -/// Implementation of AuthoritySetForFinalityProver. -impl AuthoritySetForFinalityProver for Arc> - where - BE: Backend + Send + Sync + 'static, -{ - fn authorities(&self, block: &BlockId) -> ClientResult { - let storage_key = StorageKey(GRANDPA_AUTHORITIES_KEY.to_vec()); - self.storage(block, &storage_key)? - .and_then(|encoded| VersionedAuthorityList::decode(&mut encoded.0.as_slice()).ok()) - .map(|versioned| versioned.into()) - .ok_or(ClientError::InvalidAuthoritiesSet) - } - - fn prove_authorities(&self, block: &BlockId) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY)) - } -} -/// GRANDPA authority set related methods for the finality proof checker. -pub trait AuthoritySetForFinalityChecker: Send + Sync { - /// Check storage read proof of GRANDPA_AUTHORITIES_KEY at given block. - fn check_authorities_proof( - &self, - hash: Block::Hash, - header: Block::Header, - proof: StorageProof, - ) -> ClientResult; -} - -/// FetchChecker-based implementation of AuthoritySetForFinalityChecker. -impl AuthoritySetForFinalityChecker for Arc> { - fn check_authorities_proof( - &self, - hash: Block::Hash, - header: Block::Header, - proof: StorageProof, - ) -> ClientResult { - let storage_key = GRANDPA_AUTHORITIES_KEY.to_vec(); - let request = RemoteReadRequest { - block: hash, - header, - keys: vec![storage_key.clone()], - retry_count: None, - }; +use crate::{ + authorities::{AuthoritySetChangeId, AuthoritySetChanges}, + best_justification, + justification::GrandpaJustification, + SharedAuthoritySet, +}; - self.check_read_proof(&request, proof) - .and_then(|results| { - let maybe_encoded = results.get(&storage_key) - .expect( - "storage_key is listed in the request keys; \ - check_read_proof must return a value for each requested key; - qed" - ); - maybe_encoded - .as_ref() - .and_then(|encoded| { - VersionedAuthorityList::decode(&mut encoded.as_slice()).ok() - }) - .map(|versioned| versioned.into()) - .ok_or(ClientError::InvalidAuthoritiesSet) - }) - } -} +const MAX_UNKNOWN_HEADERS: usize = 100_000; /// Finality proof provider for serving network requests. -pub struct FinalityProofProvider { - backend: Arc, - authority_provider: Arc>, +pub struct FinalityProofProvider { + backend: Arc, + shared_authority_set: Option>>, } -impl FinalityProofProvider - where B: Backend + Send + Sync + 'static +impl FinalityProofProvider +where + Block: BlockT, + B: Backend, { /// Create new finality proof provider using: /// /// - backend for accessing blockchain data; /// - authority_provider for calling and proving runtime methods. - pub fn new

( + /// - shared_authority_set for accessing authority set data + pub fn new( backend: Arc, - authority_provider: P, - ) -> Self - where P: AuthoritySetForFinalityProver + 'static, - { - FinalityProofProvider { backend, authority_provider: Arc::new(authority_provider) } + shared_authority_set: Option>>, + ) -> Self { + FinalityProofProvider { backend, shared_authority_set } } /// Create new finality proof provider for the service using: /// /// - backend for accessing blockchain data; - /// - storage_and_proof_provider, which is generally a client. + /// - storage_provider, which is generally a client. + /// - shared_authority_set for accessing authority set data pub fn new_for_service( backend: Arc, - storage_and_proof_provider: Arc>, + shared_authority_set: Option>>, ) -> Arc { - Arc::new(Self::new(backend, storage_and_proof_provider)) + Arc::new(Self::new(backend, shared_authority_set)) } } impl FinalityProofProvider - where - Block: BlockT, - NumberFor: BlockNumberOps, - B: Backend + Send + Sync + 'static, +where + Block: BlockT, + B: Backend, { - /// Prove finality for the range (begin; end] hash. Returns None if there are no finalized blocks - /// unknown in the range. + /// Prove finality for the given block number by returning a Justification for the last block of + /// the authority set. pub fn prove_finality( &self, - begin: Block::Hash, - end: Block::Hash, - authorities_set_id: u64, - ) -> Result>, ClientError> { - prove_finality::<_, _, GrandpaJustification>( - &*self.backend.blockchain(), - &*self.authority_provider, - authorities_set_id, - begin, - end, - ) - } -} + block: NumberFor, + ) -> Result>, FinalityProofError> { + let authority_set_changes = if let Some(changes) = self + .shared_authority_set + .as_ref() + .map(SharedAuthoritySet::authority_set_changes) + { + changes + } else { + return Ok(None) + }; -impl sc_network::config::FinalityProofProvider for FinalityProofProvider - where - Block: BlockT, - NumberFor: BlockNumberOps, - B: Backend + Send + Sync + 'static, -{ - fn prove_finality( - &self, - for_block: Block::Hash, - request: &[u8], - ) -> Result>, ClientError> { - let request: FinalityProofRequest = Decode::decode(&mut &request[..]) - .map_err(|e| { - warn!(target: "afg", "Unable to decode finality proof request: {}", e.what()); - ClientError::Backend("Invalid finality proof request".to_string()) - })?; - match request { - FinalityProofRequest::Original(request) => prove_finality::<_, _, GrandpaJustification>( - &*self.backend.blockchain(), - &*self.authority_provider, - request.authorities_set_id, - request.last_finalized, - for_block, - ), - } + prove_finality(&*self.backend, authority_set_changes, block) } } -/// The effects of block finality. -#[derive(Debug, PartialEq)] -pub struct FinalityEffects { - /// The (ordered) set of headers that could be imported. - pub headers_to_import: Vec

, - /// The hash of the block that could be finalized. - pub block: Header::Hash, - /// The justification for the block. - pub justification: Vec, - /// New authorities set id that should be applied starting from block. - pub new_set_id: u64, - /// New authorities set that should be applied starting from block. - pub new_authorities: AuthorityList, -} - -/// Single fragment of proof-of-finality. -/// /// Finality for block B is proved by providing: /// 1) the justification for the descendant block F; /// 2) headers sub-chain (B; F] if B != F; -/// 3) proof of GRANDPA::authorities() if the set changes at block F. #[derive(Debug, PartialEq, Encode, Decode, Clone)] -pub struct FinalityProofFragment { +pub struct FinalityProof { /// The hash of block F for which justification is provided. pub block: Header::Hash, /// Justification of the block F. pub justification: Vec, - /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. + /// The set of headers in the range (B; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, - /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option, } -/// Proof of finality is the ordered set of finality fragments, where: -/// - last fragment provides justification for the best possible block from the requested range; -/// - all other fragments provide justifications for GRANDPA authorities set changes within requested range. -type FinalityProof
= Vec>; - -/// Finality proof request data. -#[derive(Debug, Encode, Decode)] -enum FinalityProofRequest { - /// Original version of the request. - Original(OriginalFinalityProofRequest), +/// Errors occurring when trying to prove finality +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum FinalityProofError { + /// The requested block has not yet been finalized. + #[display(fmt = "Block not yet finalized")] + BlockNotYetFinalized, + /// The requested block is not covered by authority set changes. Likely this means the block is + /// in the latest authority set, and the subscription API is more appropriate. + #[display(fmt = "Block not covered by authority set changes")] + BlockNotInAuthoritySetChanges, + /// Errors originating from the client. + Client(sp_blockchain::Error), } -/// Original version of finality proof request. -#[derive(Debug, Encode, Decode)] -struct OriginalFinalityProofRequest { - /// The authorities set id we are waiting proof from. - /// - /// The first justification in the proof must be signed by this authority set. - pub authorities_set_id: u64, - /// Hash of the last known finalized block. - pub last_finalized: H, -} - -/// Prepare data blob associated with finality proof request. -pub(crate) fn make_finality_proof_request(last_finalized: H, authorities_set_id: u64) -> Vec { - FinalityProofRequest::Original(OriginalFinalityProofRequest { - authorities_set_id, - last_finalized, - }).encode() -} - -/// Prepare proof-of-finality for the best possible block in the range: (begin; end]. -/// -/// It is assumed that the caller already have a proof-of-finality for the block 'begin'. -/// It is assumed that the caller already knows all blocks in the range (begin; end]. -/// -/// Returns None if there are no finalized blocks unknown to the caller. -pub(crate) fn prove_finality, J>( - blockchain: &B, - authorities_provider: &dyn AuthoritySetForFinalityProver, - authorities_set_id: u64, - begin: Block::Hash, - end: Block::Hash, -) -> ::sp_blockchain::Result>> - where - J: ProvableJustification, +fn prove_finality( + backend: &B, + authority_set_changes: AuthoritySetChanges>, + block: NumberFor, +) -> Result>, FinalityProofError> +where + Block: BlockT, + B: Backend, { - let begin_id = BlockId::Hash(begin); - let begin_number = blockchain.expect_block_number_from_id(&begin_id)?; - - // early-return if we sure that there are no blocks finalized AFTER begin block - let info = blockchain.info(); - if info.finalized_number <= begin_number { - trace!( - target: "afg", - "Requested finality proof for descendant of #{} while we only have finalized #{}. Returning empty proof.", - begin_number, - info.finalized_number, + // Early-return if we are sure that there are no blocks finalized that cover the requested + // block. + let info = backend.blockchain().info(); + if info.finalized_number < block { + let err = format!( + "Requested finality proof for descendant of #{} while we only have finalized #{}.", + block, info.finalized_number, ); - - return Ok(None); - } - - // check if blocks range is valid. It is the caller responsibility to ensure - // that it only asks peers that know about whole blocks range - let end_number = blockchain.expect_block_number_from_id(&BlockId::Hash(end))?; - if begin_number + One::one() > end_number { - return Err(ClientError::Backend( - format!("Cannot generate finality proof for invalid range: {}..{}", begin_number, end_number), - )); - } - - // early-return if we sure that the block is NOT a part of canonical chain - let canonical_begin = blockchain.expect_block_hash_from_id(&BlockId::Number(begin_number))?; - if begin != canonical_begin { - return Err(ClientError::Backend( - format!("Cannot generate finality proof for non-canonical block: {}", begin), - )); + trace!(target: "afg", "{}", &err); + return Err(FinalityProofError::BlockNotYetFinalized) } - // iterate justifications && try to prove finality - let mut fragment_index = 0; - let mut current_authorities = authorities_provider.authorities(&begin_id)?; - let mut current_number = begin_number + One::one(); - let mut finality_proof = Vec::new(); - let mut unknown_headers = Vec::new(); - let mut latest_proof_fragment = None; - let begin_authorities = current_authorities.clone(); - loop { - let current_id = BlockId::Number(current_number); - - // check if header is unknown to the caller - if current_number > end_number { - let unknown_header = blockchain.expect_header(current_id)?; - unknown_headers.push(unknown_header); - } - - if let Some(justification) = blockchain.justification(current_id)? { - // check if the current block enacts new GRANDPA authorities set - let new_authorities = authorities_provider.authorities(¤t_id)?; - let new_authorities_proof = if current_authorities != new_authorities { - current_authorities = new_authorities; - Some(authorities_provider.prove_authorities(¤t_id)?) + let (justification, just_block) = match authority_set_changes.get_set_id(block) { + AuthoritySetChangeId::Latest => { + if let Some(justification) = best_justification(backend)? + .map(|j: GrandpaJustification| (j.encode(), j.target().0)) + { + justification } else { - None - }; - - // prepare finality proof for the current block - let current = blockchain.expect_block_hash_from_id(&BlockId::Number(current_number))?; - let proof_fragment = FinalityProofFragment { - block: current, - justification, - unknown_headers: ::std::mem::take(&mut unknown_headers), - authorities_proof: new_authorities_proof, - }; - - // append justification to finality proof if required - let justifies_end_block = current_number >= end_number; - let justifies_authority_set_change = proof_fragment.authorities_proof.is_some(); - if justifies_end_block || justifies_authority_set_change { - // check if the proof is generated by the requested authority set - if finality_proof.is_empty() { - let justification_check_result = J::decode_and_verify( - &proof_fragment.justification, - authorities_set_id, - &begin_authorities, - ); - if justification_check_result.is_err() { - trace!( - target: "afg", - "Can not provide finality proof with requested set id #{}\ - (possible forced change?). Returning empty proof.", - authorities_set_id, - ); - - return Ok(None); - } - } - - finality_proof.push(proof_fragment); - latest_proof_fragment = None; - } else { - latest_proof_fragment = Some(proof_fragment); - } - - // we don't need to provide more justifications - if justifies_end_block { - break; + trace!( + target: "afg", + "No justification found for the latest finalized block. \ + Returning empty proof.", + ); + return Ok(None) } - } - - // we can't provide more justifications - if current_number == info.finalized_number { - // append last justification - even if we can't generate finality proof for - // the end block, we try to generate it for the latest possible block - if let Some(latest_proof_fragment) = latest_proof_fragment.take() { - finality_proof.push(latest_proof_fragment); - - fragment_index += 1; - if fragment_index == MAX_FRAGMENTS_IN_PROOF { - break; - } + }, + AuthoritySetChangeId::Set(_, last_block_for_set) => { + let last_block_for_set_id = BlockId::Number(last_block_for_set); + let justification = if let Some(grandpa_justification) = backend + .blockchain() + .justifications(last_block_for_set_id)? + .and_then(|justifications| justifications.into_justification(GRANDPA_ENGINE_ID)) + { + grandpa_justification + } else { + trace!( + target: "afg", + "No justification found when making finality proof for {}. \ + Returning empty proof.", + block, + ); + return Ok(None) + }; + (justification, last_block_for_set) + }, + AuthoritySetChangeId::Unknown => { + warn!( + target: "afg", + "AuthoritySetChanges does not cover the requested block #{} due to missing data. \ + You need to resync to populate AuthoritySetChanges properly.", + block, + ); + return Err(FinalityProofError::BlockNotInAuthoritySetChanges) + }, + }; + + // Collect all headers from the requested block until the last block of the set + let unknown_headers = { + let mut headers = Vec::new(); + let mut current = block + One::one(); + loop { + if current > just_block || headers.len() >= MAX_UNKNOWN_HEADERS { + break } - break; + headers.push(backend.blockchain().expect_header(BlockId::Number(current))?); + current += One::one(); } - - // else search for the next justification - current_number += One::one(); - } - - if finality_proof.is_empty() { - trace!( - target: "afg", - "No justifications found when making finality proof for {}. Returning empty proof.", - end, - ); - - Ok(None) - } else { - trace!( - target: "afg", - "Built finality proof for {} of {} fragments. Last fragment for {}.", - end, - finality_proof.len(), - finality_proof.last().expect("checked that !finality_proof.is_empty(); qed").block, - ); - - Ok(Some(finality_proof.encode())) - } -} - -/// Check GRANDPA proof-of-finality for the given block. -/// -/// Returns the vector of headers that MUST be validated + imported -/// AND if at least one of those headers is invalid, all other MUST be considered invalid. -pub(crate) fn check_finality_proof( - blockchain: &B, - current_set_id: u64, - current_authorities: AuthorityList, - authorities_provider: &dyn AuthoritySetForFinalityChecker, - remote_proof: Vec, -) -> ClientResult> - where - NumberFor: BlockNumberOps, - B: BlockchainBackend, - J: ProvableJustification, -{ - // decode finality proof - let proof = FinalityProof::::decode(&mut &remote_proof[..]) - .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; - - // empty proof can't prove anything - if proof.is_empty() { - return Err(ClientError::BadJustification("empty proof of finality".into())); - } - - // iterate and verify proof fragments - let last_fragment_index = proof.len() - 1; - let mut authorities = AuthoritiesOrEffects::Authorities(current_set_id, current_authorities); - for (proof_fragment_index, proof_fragment) in proof.into_iter().enumerate() { - // check that proof is non-redundant. The proof still can be valid, but - // we do not want peer to spam us with redundant data - if proof_fragment_index != last_fragment_index { - let has_unknown_headers = !proof_fragment.unknown_headers.is_empty(); - let has_new_authorities = proof_fragment.authorities_proof.is_some(); - if has_unknown_headers || !has_new_authorities { - return Err(ClientError::BadJustification("redundant proof of finality".into())); - } + headers + }; + + Ok(Some( + FinalityProof { + block: backend.blockchain().expect_block_hash_from_id(&BlockId::Number(just_block))?, + justification, + unknown_headers, } - - authorities = check_finality_proof_fragment::<_, _, J>( - blockchain, - authorities, - authorities_provider, - proof_fragment)?; - } - - let effects = authorities.extract_effects().expect("at least one loop iteration is guaranteed - because proof is not empty;\ - check_finality_proof_fragment is called on every iteration;\ - check_finality_proof_fragment always returns FinalityEffects;\ - qed"); - - telemetry!(CONSENSUS_INFO; "afg.finality_proof_ok"; - "set_id" => ?effects.new_set_id, "finalized_header_hash" => ?effects.block); - - Ok(effects) -} - -/// Check finality proof for the single block. -fn check_finality_proof_fragment( - blockchain: &B, - authority_set: AuthoritiesOrEffects, - authorities_provider: &dyn AuthoritySetForFinalityChecker, - proof_fragment: FinalityProofFragment, -) -> ClientResult> - where - NumberFor: BlockNumberOps, - B: BlockchainBackend, - J: Decode + ProvableJustification, -{ - // verify justification using previous authorities set - let (mut current_set_id, mut current_authorities) = authority_set.extract_authorities(); - let justification: J = Decode::decode(&mut &proof_fragment.justification[..]) - .map_err(|_| ClientError::JustificationDecode)?; - justification.verify(current_set_id, ¤t_authorities)?; - - // and now verify new authorities proof (if provided) - if let Some(new_authorities_proof) = proof_fragment.authorities_proof { - // the proof is either generated using known header and it is safe to query header - // here, because its non-finality proves that it can't be pruned - // or it is generated using last unknown header (because it is the one who has - // justification => we only generate proofs for headers with justifications) - let header = match proof_fragment.unknown_headers.iter().rev().next().cloned() { - Some(header) => header, - None => blockchain.expect_header(BlockId::Hash(proof_fragment.block))?, - }; - current_authorities = authorities_provider.check_authorities_proof( - proof_fragment.block, - header, - new_authorities_proof, - )?; - - current_set_id += 1; - } - - Ok(AuthoritiesOrEffects::Effects(FinalityEffects { - headers_to_import: proof_fragment.unknown_headers, - block: proof_fragment.block, - justification: proof_fragment.justification, - new_set_id: current_set_id, - new_authorities: current_authorities, - })) -} - -/// Authorities set from initial authorities set or finality effects. -enum AuthoritiesOrEffects { - Authorities(u64, AuthorityList), - Effects(FinalityEffects
), -} - -impl AuthoritiesOrEffects
{ - pub fn extract_authorities(self) -> (u64, AuthorityList) { - match self { - AuthoritiesOrEffects::Authorities(set_id, authorities) => (set_id, authorities), - AuthoritiesOrEffects::Effects(effects) => (effects.new_set_id, effects.new_authorities), - } - } - - pub fn extract_effects(self) -> Option> { - match self { - AuthoritiesOrEffects::Authorities(_, _) => None, - AuthoritiesOrEffects::Effects(effects) => Some(effects), - } - } -} - -/// Justification used to prove block finality. -pub(crate) trait ProvableJustification: Encode + Decode { - /// Verify justification with respect to authorities set and authorities set id. - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()>; - - /// Decode and verify justification. - fn decode_and_verify( - justification: &Justification, - set_id: u64, - authorities: &[(AuthorityId, u64)], - ) -> ClientResult { - let justification = Self::decode(&mut &**justification) - .map_err(|_| ClientError::JustificationDecode)?; - justification.verify(set_id, authorities)?; - Ok(justification) - } -} - -impl ProvableJustification for GrandpaJustification - where - NumberFor: BlockNumberOps, -{ - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { - let authorities = VoterSet::new(authorities.iter().cloned()).ok_or( - ClientError::Consensus(sp_consensus::Error::InvalidAuthoritiesSet), - )?; - - GrandpaJustification::verify(self, set_id, &authorities) - } + .encode(), + )) } #[cfg(test)] pub(crate) mod tests { - use substrate_test_runtime_client::runtime::{Block, Header, H256}; - use sc_client_api::NewBlockState; - use sc_client_api::in_mem::Blockchain as InMemoryBlockchain; use super::*; + use crate::{authorities::AuthoritySetChanges, BlockNumberOps, ClientError, SetId}; + use futures::executor::block_on; + use sc_block_builder::BlockBuilderProvider; + use sc_client_api::{apply_aux, LockImportRun}; + use sp_consensus::BlockOrigin; use sp_core::crypto::Public; - - pub(crate) type FinalityProof = super::FinalityProof
; - - impl AuthoritySetForFinalityProver for (GetAuthorities, ProveAuthorities) - where - GetAuthorities: Send + Sync + Fn(BlockId) -> ClientResult, - ProveAuthorities: Send + Sync + Fn(BlockId) -> ClientResult, + use sp_finality_grandpa::{AuthorityId, GRANDPA_ENGINE_ID as ID}; + use sp_keyring::Ed25519Keyring; + use substrate_test_runtime_client::{ + runtime::{Block, Header, H256}, + Backend as TestBackend, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + TestClient, TestClientBuilder, TestClientBuilderExt, + }; + + /// Check GRANDPA proof-of-finality for the given block. + /// + /// Returns the vector of headers that MUST be validated + imported + /// AND if at least one of those headers is invalid, all other MUST be considered invalid. + fn check_finality_proof( + current_set_id: SetId, + current_authorities: sp_finality_grandpa::AuthorityList, + remote_proof: Vec, + ) -> sp_blockchain::Result> + where + NumberFor: BlockNumberOps, { - fn authorities(&self, block: &BlockId) -> ClientResult { - self.0(*block) - } + let proof = super::FinalityProof::::decode(&mut &remote_proof[..]) + .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; - fn prove_authorities(&self, block: &BlockId) -> ClientResult { - self.1(*block) - } - } - - pub(crate) struct ClosureAuthoritySetForFinalityChecker(pub Closure); + let justification: GrandpaJustification = + Decode::decode(&mut &proof.justification[..]) + .map_err(|_| ClientError::JustificationDecode)?; + justification.verify(current_set_id, ¤t_authorities)?; - impl AuthoritySetForFinalityChecker for ClosureAuthoritySetForFinalityChecker - where - Closure: Send + Sync + Fn(H256, Header, StorageProof) -> ClientResult, - { - fn check_authorities_proof( - &self, - hash: H256, - header: Header, - proof: StorageProof, - ) -> ClientResult { - self.0(hash, header, proof) - } + Ok(proof) } - #[derive(Debug, PartialEq, Encode, Decode)] - pub struct TestJustification(pub (u64, AuthorityList), pub Vec); - - impl ProvableJustification
for TestJustification { - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { - if (self.0).0 != set_id || (self.0).1 != authorities { - return Err(ClientError::BadJustification("test".into())); - } - - Ok(()) - } - } + pub(crate) type FinalityProof = super::FinalityProof
; fn header(number: u64) -> Header { let parent_hash = match number { 0 => Default::default(), _ => header(number - 1).hash(), }; - Header::new(number, H256::from_low_u64_be(0), H256::from_low_u64_be(0), parent_hash, Default::default()) - } - - fn side_header(number: u64) -> Header { Header::new( number, H256::from_low_u64_be(0), - H256::from_low_u64_be(1), - header(number - 1).hash(), - Default::default(), - ) - } - - fn second_side_header(number: u64) -> Header { - Header::new( - number, H256::from_low_u64_be(0), - H256::from_low_u64_be(1), - side_header(number - 1).hash(), + parent_hash, Default::default(), ) } - fn test_blockchain() -> InMemoryBlockchain { - let blockchain = InMemoryBlockchain::::new(); - blockchain.insert(header(0).hash(), header(0), Some(vec![0]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(1).hash(), header(1), Some(vec![1]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(2).hash(), header(2), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(header(3).hash(), header(3), Some(vec![3]), None, NewBlockState::Final).unwrap(); - blockchain - } + fn test_blockchain( + number_of_blocks: u64, + to_finalize: &[u64], + ) -> (Arc, Arc, Vec) { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); - #[test] - fn finality_prove_fails_with_invalid_range() { - let blockchain = test_blockchain(); - - // their last finalized is: 2 - // they request for proof-of-finality of: 2 - // => range is invalid - prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| unreachable!("should return before calling GetAuthorities"), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(2).hash(), - header(2).hash(), - ).unwrap_err(); - } + let mut blocks = Vec::new(); + for _ in 0..number_of_blocks { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + blocks.push(block); + } - #[test] - fn finality_proof_is_none_if_no_more_last_finalized_blocks() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); - - // our last finalized is: 3 - // their last finalized is: 3 - // => we can't provide any additional justifications - let proof_of_4 = prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| unreachable!("should return before calling GetAuthorities"), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(4).hash(), - ).unwrap(); - assert_eq!(proof_of_4, None); + for block in to_finalize { + client.finalize_block(BlockId::Number(*block), None).unwrap(); + } + (client, backend, blocks) } - #[test] - fn finality_proof_fails_for_non_canonical_block() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(side_header(4).hash(), side_header(4), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(second_side_header(5).hash(), second_side_header(5), None, None, NewBlockState::Best) + fn store_best_justification(client: &TestClient, just: &GrandpaJustification) { + client + .lock_import_and_run(|import_op| { + crate::aux_schema::update_best_justification(just, |insert| { + apply_aux(import_op, insert, &[]) + }) + }) .unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(vec![5]), None, NewBlockState::Final).unwrap(); - - // chain is 1 -> 2 -> 3 -> 4 -> 5 - // \> 4' -> 5' - // and the best finalized is 5 - // => when requesting for (4'; 5'], error is returned - prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| unreachable!("should return before calling GetAuthorities"), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - side_header(4).hash(), - second_side_header(5).hash(), - ).unwrap_err(); } #[test] - fn finality_proof_is_none_if_no_justification_known() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Final).unwrap(); - - // block 4 is finalized without justification - // => we can't prove finality - let proof_of_4 = prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), - |_| unreachable!("authorities didn't change => ProveAuthorities won't be called"), - ), - 0, - header(3).hash(), - header(4).hash(), - ).unwrap(); - assert_eq!(proof_of_4, None); - } + fn finality_proof_fails_if_no_more_last_finalized_blocks() { + let (_, backend, _) = test_blockchain(6, &[4]); + let authority_set_changes = AuthoritySetChanges::empty(); - #[test] - fn finality_proof_works_without_authorities_change() { - let blockchain = test_blockchain(); - let authorities = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; - let just4 = TestJustification((0, authorities.clone()), vec![4]).encode(); - let just5 = TestJustification((0, authorities.clone()), vec![5]).encode(); - blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(just5.clone()), None, NewBlockState::Final).unwrap(); - - // blocks 4 && 5 are finalized with justification - // => since authorities are the same, we only need justification for 5 - let proof_of_5: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(authorities.clone()), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(5).hash(), - ).unwrap().unwrap()[..]).unwrap(); - assert_eq!(proof_of_5, vec![FinalityProofFragment { - block: header(5).hash(), - justification: just5, - unknown_headers: Vec::new(), - authorities_proof: None, - }]); + // The last finalized block is 4, so we cannot provide further justifications. + let proof_of_5 = prove_finality(&*backend, authority_set_changes, 5); + assert!(matches!(proof_of_5, Err(FinalityProofError::BlockNotYetFinalized))); } #[test] - fn finality_proof_finalized_earlier_block_if_no_justification_for_target_is_known() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), Some(vec![4]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final).unwrap(); - - // block 4 is finalized with justification + we request for finality of 5 - // => we can't prove finality of 5, but providing finality for 4 is still useful for requester - let proof_of_5: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(5).hash(), - ).unwrap().unwrap()[..]).unwrap(); - assert_eq!(proof_of_5, vec![FinalityProofFragment { - block: header(4).hash(), - justification: vec![4], - unknown_headers: Vec::new(), - authorities_proof: None, - }]); - } + fn finality_proof_is_none_if_no_justification_known() { + let (_, backend, _) = test_blockchain(6, &[4]); - #[test] - fn finality_proof_works_with_authorities_change() { - let blockchain = test_blockchain(); - let auth3 = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - let auth5 = vec![(AuthorityId::from_slice(&[5u8; 32]), 1u64)]; - let auth7 = vec![(AuthorityId::from_slice(&[7u8; 32]), 1u64)]; - let just4 = TestJustification((0, auth3.clone()), vec![4]).encode(); - let just5 = TestJustification((0, auth3.clone()), vec![5]).encode(); - let just7 = TestJustification((1, auth5.clone()), vec![7]).encode(); - blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(just5.clone()), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(7).hash(), header(7), Some(just7.clone()), None, NewBlockState::Final).unwrap(); - - // when querying for finality of 6, we assume that the #3 is the last block known to the requester - // => since we only have justification for #7, we provide #7 - let proof_of_6: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |block_id| match block_id { - BlockId::Hash(h) if h == header(3).hash() => Ok(auth3.clone()), - BlockId::Number(4) => Ok(auth3.clone()), - BlockId::Number(5) => Ok(auth5.clone()), - BlockId::Number(7) => Ok(auth7.clone()), - _ => unreachable!("no other authorities should be fetched: {:?}", block_id), - }, - |block_id| match block_id { - BlockId::Number(5) => Ok(StorageProof::new(vec![vec![50]])), - BlockId::Number(7) => Ok(StorageProof::new(vec![vec![70]])), - _ => unreachable!("no other authorities should be proved: {:?}", block_id), - }, - ), - 0, - header(3).hash(), - header(6).hash(), - ).unwrap().unwrap()[..]).unwrap(); - // initial authorities set (which start acting from #0) is [3; 32] - assert_eq!(proof_of_6, vec![ - // new authorities set starts acting from #5 => we do not provide fragment for #4 - // first fragment provides justification for #5 && authorities set that starts acting from #5 - FinalityProofFragment { - block: header(5).hash(), - justification: just5, - unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![50]])), - }, - // last fragment provides justification for #7 && unknown#7 - FinalityProofFragment { - block: header(7).hash(), - justification: just7.clone(), - unknown_headers: vec![header(7)], - authorities_proof: Some(StorageProof::new(vec![vec![70]])), - }, - ]); - - // now let's verify finality proof - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); - let effects = check_finality_proof::<_, _, TestJustification>( - &blockchain, - 0, - auth3, - &ClosureAuthoritySetForFinalityChecker( - |hash, _header, proof: StorageProof| match proof.clone().iter_nodes().next().map(|x| x[0]) { - Some(50) => Ok(auth5.clone()), - Some(70) => Ok(auth7.clone()), - _ => unreachable!("no other proofs should be checked: {}", hash), - } - ), - proof_of_6.encode(), - ).unwrap(); - - assert_eq!(effects, FinalityEffects { - headers_to_import: vec![header(7)], - block: header(7).hash(), - justification: TestJustification((1, auth5.clone()), vec![7]).encode(), - new_set_id: 2, - new_authorities: auth7, - }); + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 4); + + // Block 4 is finalized without justification + // => we can't prove finality of 3 + let proof_of_3 = prove_finality(&*backend, authority_set_changes, 3).unwrap(); + assert_eq!(proof_of_3, None); } #[test] fn finality_proof_check_fails_when_proof_decode_fails() { - let blockchain = test_blockchain(); - - // when we can't decode proof from Vec - check_finality_proof::<_, _, TestJustification>( - &blockchain, + // When we can't decode proof from Vec + check_finality_proof::( 1, vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), vec![42], - ).unwrap_err(); + ) + .unwrap_err(); } #[test] fn finality_proof_check_fails_when_proof_is_empty() { - let blockchain = test_blockchain(); - - // when decoded proof has zero length - check_finality_proof::<_, _, TestJustification>( - &blockchain, + // When decoded proof has zero length + check_finality_proof::( 1, vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), - Vec::::new().encode(), - ).unwrap_err(); + Vec::>::new().encode(), + ) + .unwrap_err(); } #[test] - fn finality_proof_check_fails_when_intermediate_fragment_has_unknown_headers() { - let blockchain = test_blockchain(); + fn finality_proof_check_fails_with_incomplete_justification() { + let (client, _, blocks) = test_blockchain(8, &[4, 5, 8]); + + // Create a commit without precommits + let commit = finality_grandpa::Commit { + target_hash: blocks[7].hash(), + target_number: *blocks[7].header().number(), + precommits: Vec::new(), + }; + let grandpa_just = GrandpaJustification::from_commit(&client, 8, commit).unwrap(); + + let finality_proof = FinalityProof { + block: header(2).hash(), + justification: grandpa_just.encode(), + unknown_headers: Vec::new(), + }; - // when intermediate (#0) fragment has non-empty unknown headers - let authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - check_finality_proof::<_, _, TestJustification>( - &blockchain, + check_finality_proof::( 1, - authorities.clone(), - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), - vec![FinalityProofFragment { - block: header(4).hash(), - justification: TestJustification((0, authorities.clone()), vec![7]).encode(), - unknown_headers: vec![header(4)], - authorities_proof: Some(StorageProof::new(vec![vec![42]])), - }, FinalityProofFragment { - block: header(5).hash(), - justification: TestJustification((0, authorities), vec![8]).encode(), - unknown_headers: vec![header(5)], - authorities_proof: None, - }].encode(), - ).unwrap_err(); + vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], + finality_proof.encode(), + ) + .unwrap_err(); + } + + fn create_commit( + block: Block, + round: u64, + set_id: SetId, + auth: &[Ed25519Keyring], + ) -> finality_grandpa::Commit + where + Id: From, + S: From, + { + let mut precommits = Vec::new(); + + for voter in auth { + let precommit = finality_grandpa::Precommit { + target_hash: block.hash(), + target_number: *block.header().number(), + }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = sp_finality_grandpa::localized_payload(round, set_id, &msg); + let signature = voter.sign(&encoded[..]).into(); + + let signed_precommit = finality_grandpa::SignedPrecommit { + precommit, + signature, + id: voter.public().into(), + }; + precommits.push(signed_precommit); + } + + finality_grandpa::Commit { + target_hash: block.hash(), + target_number: *block.header().number(), + precommits, + } } #[test] - fn finality_proof_check_fails_when_intermediate_fragment_has_no_authorities_proof() { - let blockchain = test_blockchain(); + fn finality_proof_check_works_with_correct_justification() { + let (client, _, blocks) = test_blockchain(8, &[4, 5, 8]); + + let alice = Ed25519Keyring::Alice; + let set_id = 1; + let round = 8; + let commit = create_commit(blocks[7].clone(), round, set_id, &[alice]); + let grandpa_just = GrandpaJustification::from_commit(&client, round, commit).unwrap(); + + let finality_proof = FinalityProof { + block: header(2).hash(), + justification: grandpa_just.encode(), + unknown_headers: Vec::new(), + }; + assert_eq!( + finality_proof, + check_finality_proof::( + set_id, + vec![(alice.public().into(), 1u64)], + finality_proof.encode(), + ) + .unwrap(), + ); + } - // when intermediate (#0) fragment has empty authorities proof - let authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - check_finality_proof::<_, _, TestJustification>( - &blockchain, - 1, - authorities.clone(), - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), - vec![FinalityProofFragment { - block: header(4).hash(), - justification: TestJustification((0, authorities.clone()), vec![7]).encode(), - unknown_headers: Vec::new(), - authorities_proof: None, - }, FinalityProofFragment { - block: header(5).hash(), - justification: TestJustification((0, authorities), vec![8]).encode(), - unknown_headers: vec![header(5)], - authorities_proof: None, - }].encode(), - ).unwrap_err(); + #[test] + fn finality_proof_using_authority_set_changes_fails_with_undefined_start() { + let (_, backend, _) = test_blockchain(8, &[4, 5, 8]); + + // We have stored the correct block number for the relevant set, but as we are missing the + // block for the preceding set the start is not well-defined. + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(1, 8); + + let proof_of_6 = prove_finality(&*backend, authority_set_changes, 6); + assert!(matches!(proof_of_6, Err(FinalityProofError::BlockNotInAuthoritySetChanges))); } #[test] - fn finality_proof_check_works() { - let blockchain = test_blockchain(); + fn finality_proof_using_authority_set_changes_works() { + let (client, backend, blocks) = test_blockchain(8, &[4, 5]); + let block7 = &blocks[6]; + let block8 = &blocks[7]; - let initial_authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - let next_authorities = vec![(AuthorityId::from_slice(&[4u8; 32]), 1u64)]; - let effects = check_finality_proof::<_, _, TestJustification>( - &blockchain, - 1, - initial_authorities.clone(), - &ClosureAuthoritySetForFinalityChecker(|_, _, _| Ok(next_authorities.clone())), - vec![FinalityProofFragment { - block: header(2).hash(), - justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), - unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![42]])), - }, FinalityProofFragment { - block: header(4).hash(), - justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), - unknown_headers: vec![header(4)], - authorities_proof: None, - }].encode(), - ).unwrap(); - assert_eq!(effects, FinalityEffects { - headers_to_import: vec![header(4)], - block: header(4).hash(), - justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), - new_set_id: 2, - new_authorities: vec![(AuthorityId::from_slice(&[4u8; 32]), 1u64)], - }); + let round = 8; + let commit = create_commit(block8.clone(), round, 1, &[Ed25519Keyring::Alice]); + let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); + + client + .finalize_block(BlockId::Number(8), Some((ID, grandpa_just8.encode().clone()))) + .unwrap(); + + // Authority set change at block 8, so the justification stored there will be used in the + // FinalityProof for block 6 + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 5); + authority_set_changes.append(1, 8); + + let proof_of_6: FinalityProof = Decode::decode( + &mut &prove_finality(&*backend, authority_set_changes.clone(), 6).unwrap().unwrap()[..], + ) + .unwrap(); + assert_eq!( + proof_of_6, + FinalityProof { + block: block8.hash(), + justification: grandpa_just8.encode(), + unknown_headers: vec![block7.header().clone(), block8.header().clone()], + }, + ); + } + + #[test] + fn finality_proof_in_last_set_fails_without_latest() { + let (_, backend, _) = test_blockchain(8, &[4, 5, 8]); + + // No recent authority set change, so we are in the latest set, and we will try to pickup + // the best stored justification, for which there is none in this case. + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 5); + + assert!(matches!(prove_finality(&*backend, authority_set_changes, 6), Ok(None))); } #[test] - fn finality_proof_is_none_if_first_justification_is_generated_by_unknown_set() { - // this is the case for forced change: set_id has been forcibly increased on full node - // and light node missed that - // => justification verification will fail on light node anyways, so we do not return - // finality proof at all - let blockchain = test_blockchain(); - let just4 = TestJustification((0, vec![(AuthorityId::from_slice(&[42u8; 32]), 1u64)]), vec![4]).encode(); - blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); - - let proof_of_4 = prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(4).hash(), - ).unwrap(); - assert!(proof_of_4.is_none()); + fn finality_proof_in_last_set_using_latest_justification_works() { + let (client, backend, blocks) = test_blockchain(8, &[4, 5, 8]); + let block7 = &blocks[6]; + let block8 = &blocks[7]; + + let round = 8; + let commit = create_commit(block8.clone(), round, 1, &[Ed25519Keyring::Alice]); + let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); + store_best_justification(&client, &grandpa_just8); + + // No recent authority set change, so we are in the latest set, and will pickup the best + // stored justification + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 5); + + let proof_of_6: FinalityProof = Decode::decode( + &mut &prove_finality(&*backend, authority_set_changes, 6).unwrap().unwrap()[..], + ) + .unwrap(); + assert_eq!( + proof_of_6, + FinalityProof { + block: block8.hash(), + justification: grandpa_just8.encode(), + unknown_headers: vec![block7.header().clone(), block8.header().clone()], + } + ); } } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 04df95a3187e1..f663bfe94afdf 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,36 +16,36 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{sync::Arc, collections::HashMap}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use log::debug; -use parity_scale_codec::Encode; -use parking_lot::RwLockWriteGuard; +use parity_scale_codec::{Decode, Encode}; -use sp_blockchain::{BlockStatus, well_known_cache_keys}; use sc_client_api::{backend::Backend, utils::is_descendent_of}; -use sp_utils::mpsc::TracingUnboundedSender; -use sp_api::{TransactionFor}; - -use sp_consensus::{ - BlockImport, Error as ConsensusError, - BlockCheckParams, BlockImportParams, BlockOrigin, ImportResult, JustificationImport, - SelectChain, +use sc_consensus::{ + shared_data::{SharedDataLocked, SharedDataLockedUpgradable}, + BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, }; -use sp_finality_grandpa::{ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; -use sp_runtime::Justification; -use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; -use sp_runtime::traits::{ - Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero, +use sc_telemetry::TelemetryHandle; +use sc_utils::mpsc::TracingUnboundedSender; +use sp_api::{Core, RuntimeApiInfo, TransactionFor}; +use sp_blockchain::{well_known_cache_keys, BlockStatus}; +use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; +use sp_core::hashing::twox_128; +use sp_finality_grandpa::{ConsensusLog, GrandpaApi, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero}, + Justification, }; -use crate::{Error, CommandOrError, NewAuthoritySet, VoterCommand}; -use crate::authorities::{AuthoritySet, SharedAuthoritySet, DelayKind, PendingChange}; -use crate::consensus_changes::SharedConsensusChanges; -use crate::environment::finalize_block; -use crate::justification::GrandpaJustification; -use crate::notification::GrandpaJustificationSender; -use std::marker::PhantomData; +use crate::{ + authorities::{AuthoritySet, DelayKind, PendingChange, SharedAuthoritySet}, + environment::finalize_block, + justification::GrandpaJustification, + notification::GrandpaJustificationSender, + AuthoritySetChanges, ClientForGrandpa, CommandOrError, Error, NewAuthoritySet, VoterCommand, +}; /// A block-import handler for GRANDPA. /// @@ -61,14 +61,14 @@ pub struct GrandpaBlockImport { select_chain: SC, authority_set: SharedAuthoritySet>, send_voter_commands: TracingUnboundedSender>>, - consensus_changes: SharedConsensusChanges>, authority_set_hard_forks: HashMap>>, justification_sender: GrandpaJustificationSender, + telemetry: Option, _phantom: PhantomData, } -impl Clone for - GrandpaBlockImport +impl Clone + for GrandpaBlockImport { fn clone(&self) -> Self { GrandpaBlockImport { @@ -76,40 +76,47 @@ impl Clone for select_chain: self.select_chain.clone(), authority_set: self.authority_set.clone(), send_voter_commands: self.send_voter_commands.clone(), - consensus_changes: self.consensus_changes.clone(), authority_set_hard_forks: self.authority_set_hard_forks.clone(), justification_sender: self.justification_sender.clone(), + telemetry: self.telemetry.clone(), _phantom: PhantomData, } } } +#[async_trait::async_trait] impl JustificationImport - for GrandpaBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend, - Client: crate::ClientForGrandpa, - SC: SelectChain, + for GrandpaBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend, + Client: ClientForGrandpa, + SC: SelectChain, { type Error = ConsensusError; - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { + async fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { let mut out = Vec::new(); let chain_info = self.inner.info(); - // request justifications for all pending changes for which change blocks have already been imported - let authorities = self.authority_set.inner().read(); - for pending_change in authorities.pending_changes() { + // request justifications for all pending changes for which change blocks have already been + // imported + let pending_changes: Vec<_> = + self.authority_set.inner().pending_changes().cloned().collect(); + + for pending_change in pending_changes { if pending_change.delay_kind == DelayKind::Finalized && pending_change.effective_number() > chain_info.finalized_number && pending_change.effective_number() <= chain_info.best_number { let effective_block_hash = if !pending_change.delay.is_zero() { - self.select_chain.finality_target( - pending_change.canon_hash, - Some(pending_change.effective_number()), - ) + self.select_chain + .finality_target( + pending_change.canon_hash, + Some(pending_change.effective_number()), + ) + .await } else { Ok(Some(pending_change.canon_hash)) }; @@ -127,7 +134,7 @@ impl JustificationImport out } - fn import_justification( + async fn import_justification( &mut self, hash: Block::Hash, number: NumberFor, @@ -157,37 +164,39 @@ impl AppliedChanges { } } -struct PendingSetChanges<'a, Block: 'a + BlockT> { +struct PendingSetChanges { just_in_case: Option<( AuthoritySet>, - RwLockWriteGuard<'a, AuthoritySet>>, + SharedDataLockedUpgradable>>, )>, applied_changes: AppliedChanges>, do_pause: bool, } -impl<'a, Block: 'a + BlockT> PendingSetChanges<'a, Block> { +impl PendingSetChanges { // revert the pending set change explicitly. - fn revert(self) { } + fn revert(self) {} fn defuse(mut self) -> (AppliedChanges>, bool) { self.just_in_case = None; - let applied_changes = ::std::mem::replace(&mut self.applied_changes, AppliedChanges::None); + let applied_changes = std::mem::replace(&mut self.applied_changes, AppliedChanges::None); (applied_changes, self.do_pause) } } -impl<'a, Block: 'a + BlockT> Drop for PendingSetChanges<'a, Block> { +impl Drop for PendingSetChanges { fn drop(&mut self) { if let Some((old_set, mut authorities)) = self.just_in_case.take() { - *authorities = old_set; + *authorities.upgrade() = old_set; } } } -fn find_scheduled_change(header: &B::Header) - -> Option>> -{ +/// Checks the given header for a consensus digest signalling a **standard** scheduled change and +/// extracts it. +pub fn find_scheduled_change( + header: &B::Header, +) -> Option>> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); let filter_log = |log: ConsensusLog>| match log { @@ -200,9 +209,11 @@ fn find_scheduled_change(header: &B::Header) header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } -fn find_forced_change(header: &B::Header) - -> Option<(NumberFor, ScheduledChange>)> -{ +/// Checks the given header for a consensus digest signalling a **forced** scheduled change and +/// extracts it. +pub fn find_forced_change( + header: &B::Header, +) -> Option<(NumberFor, ScheduledChange>)> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); let filter_log = |log: ConsensusLog>| match log { @@ -215,13 +226,16 @@ fn find_forced_change(header: &B::Header) header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } -impl - GrandpaBlockImport +impl GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, DigestFor: Encode, BE: Backend, - Client: crate::ClientForGrandpa, + Client: ClientForGrandpa, + Client::Api: GrandpaApi, + for<'a> &'a Client: + BlockImport>, + TransactionFor: 'static, { // check for a new authority set change. fn check_new_change( @@ -231,7 +245,7 @@ where ) -> Option>> { // check for forced authority set hard forks if let Some(change) = self.authority_set_hard_forks.get(&hash) { - return Some(change.clone()); + return Some(change.clone()) } // check for forced change. @@ -242,7 +256,7 @@ where canon_height: *header.number(), canon_hash: hash, delay_kind: DelayKind::Best { median_last_finalized }, - }); + }) } // check normal scheduled change. @@ -265,33 +279,33 @@ where // when we update the authorities, we need to hold the lock // until the block is written to prevent a race if we need to restore // the old authority set on error or panic. - struct InnerGuard<'a, T: 'a> { - old: Option, - guard: Option>, + struct InnerGuard<'a, H, N> { + old: Option>, + guard: Option>>, } - impl<'a, T: 'a> InnerGuard<'a, T> { - fn as_mut(&mut self) -> &mut T { + impl<'a, H, N> InnerGuard<'a, H, N> { + fn as_mut(&mut self) -> &mut AuthoritySet { &mut **self.guard.as_mut().expect("only taken on deconstruction; qed") } - fn set_old(&mut self, old: T) { + fn set_old(&mut self, old: AuthoritySet) { if self.old.is_none() { // ignore "newer" old changes. self.old = Some(old); } } - fn consume(mut self) -> Option<(T, RwLockWriteGuard<'a, T>)> { - if let Some(old) = self.old.take() { - Some((old, self.guard.take().expect("only taken on deconstruction; qed"))) - } else { - None - } + fn consume( + mut self, + ) -> Option<(AuthoritySet, SharedDataLocked<'a, AuthoritySet>)> { + self.old + .take() + .map(|old| (old, self.guard.take().expect("only taken on deconstruction; qed"))) } } - impl<'a, T: 'a> Drop for InnerGuard<'a, T> { + impl<'a, H, N> Drop for InnerGuard<'a, H, N> { fn drop(&mut self) { if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { *guard = old; @@ -300,20 +314,14 @@ where } let number = *(block.header.number()); - let maybe_change = self.check_new_change( - &block.header, - hash, - ); + let maybe_change = self.check_new_change(&block.header, hash); // returns a function for checking whether a block is a descendent of another // consistent with querying client directly after importing the block. let parent_hash = *block.header.parent_hash(); let is_descendent_of = is_descendent_of(&*self.inner, Some((hash, parent_hash))); - let mut guard = InnerGuard { - guard: Some(self.authority_set.inner().write()), - old: None, - }; + let mut guard = InnerGuard { guard: Some(self.authority_set.inner_locked()), old: None }; // whether to pause the old authority set -- happens after import // of a forced change block. @@ -328,16 +336,22 @@ where do_pause = true; } - guard.as_mut().add_pending_change( - change, - &is_descendent_of, - ).map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + guard + .as_mut() + .add_pending_change(change, &is_descendent_of) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; } let applied_changes = { let forced_change_set = guard .as_mut() - .apply_forced_changes(hash, number, &is_descendent_of, initial_sync) + .apply_forced_changes( + hash, + number, + &is_descendent_of, + initial_sync, + self.telemetry.clone(), + ) .map_err(|e| ConsensusError::ClientImport(e.to_string())) .map_err(ConsensusError::from)?; @@ -354,8 +368,10 @@ where let canon_hash = self.inner.header(BlockId::Number(canon_number)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .expect("the given block number is less or equal than the current best finalized number; \ - current best finalized number must exist in chain; qed.") + .expect( + "the given block number is less or equal than the current best finalized number; \ + current best finalized number must exist in chain; qed." + ) .hash(); NewAuthoritySet { @@ -370,7 +386,9 @@ where AppliedChanges::Forced(new_authorities) } else { - let did_standard = guard.as_mut().enacts_standard_change(hash, number, &is_descendent_of) + let did_standard = guard + .as_mut() + .enacts_standard_change(hash, number, &is_descendent_of) .map_err(|e| ConsensusError::ClientImport(e.to_string())) .map_err(ConsensusError::from)?; @@ -394,29 +412,122 @@ where crate::aux_schema::update_authority_set::( authorities, authorities_change, - |insert| block.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) + |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }, ); } + let just_in_case = just_in_case.map(|(o, i)| (o, i.release_mutex())); + Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) } + + /// Read current set id form a given state. + fn current_set_id(&self, id: &BlockId) -> Result { + let runtime_version = self.inner.runtime_api().version(id).map_err(|e| { + ConsensusError::ClientImport(format!( + "Unable to retrieve current runtime version. {}", + e + )) + })?; + if runtime_version + .api_version(&>::ID) + .map_or(false, |v| v < 3) + { + // The new API is not supported in this runtime. Try reading directly from storage. + // This code may be removed once warp sync to an old runtime is no longer needed. + for prefix in ["GrandpaFinality", "Grandpa"] { + let k = [twox_128(prefix.as_bytes()), twox_128(b"CurrentSetId")].concat(); + if let Ok(Some(id)) = + self.inner.storage(&id, &sc_client_api::StorageKey(k.to_vec())) + { + if let Ok(id) = SetId::decode(&mut id.0.as_ref()) { + return Ok(id) + } + } + } + Err(ConsensusError::ClientImport("Unable to retrieve current set id.".into())) + } else { + self.inner + .runtime_api() + .current_set_id(&id) + .map_err(|e| ConsensusError::ClientImport(e.to_string())) + } + } + + /// Import whole new state and reset authority set. + async fn import_state( + &mut self, + mut block: BlockImportParams>, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + // Force imported state finality. + block.finalized = true; + let import_result = (&*self.inner).import_block(block, new_cache).await; + match import_result { + Ok(ImportResult::Imported(aux)) => { + // We've just imported a new state. We trust the sync module has verified + // finality proofs and that the state is correct and final. + // So we can read the authority list and set id from the state. + self.authority_set_hard_forks.clear(); + let block_id = BlockId::hash(hash); + let authorities = self + .inner + .runtime_api() + .grandpa_authorities(&block_id) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let set_id = self.current_set_id(&block_id)?; + let authority_set = AuthoritySet::new( + authorities.clone(), + set_id, + fork_tree::ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ) + .ok_or_else(|| ConsensusError::ClientImport("Invalid authority list".into()))?; + *self.authority_set.inner_locked() = authority_set.clone(); + + crate::aux_schema::update_authority_set::( + &authority_set, + None, + |insert| self.inner.insert_aux(insert, []), + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let new_set = + NewAuthoritySet { canon_number: number, canon_hash: hash, set_id, authorities }; + let _ = self + .send_voter_commands + .unbounded_send(VoterCommand::ChangeAuthorities(new_set)); + Ok(ImportResult::Imported(aux)) + }, + Ok(r) => Ok(r), + Err(e) => Err(ConsensusError::ClientImport(e.to_string())), + } + } } -impl BlockImport - for GrandpaBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend, - Client: crate::ClientForGrandpa, - for<'a> &'a Client: - BlockImport>, +#[async_trait::async_trait] +impl BlockImport for GrandpaBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend, + Client: ClientForGrandpa, + Client::Api: GrandpaApi, + for<'a> &'a Client: + BlockImport>, + TransactionFor: 'static, + SC: Send, { type Error = ConsensusError; type Transaction = TransactionFor; - fn import_block( + async fn import_block( &mut self, mut block: BlockImportParams, new_cache: HashMap>, @@ -427,20 +538,27 @@ impl BlockImport // early exit if block already in chain, otherwise the check for // authority changes will error when trying to re-import a change block match self.inner.status(BlockId::Hash(hash)) { - Ok(BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), + Ok(BlockStatus::InChain) => { + // Strip justifications when re-importing an existing block. + let _justifications = block.justifications.take(); + return (&*self.inner).import_block(block, new_cache).await + }, Ok(BlockStatus::Unknown) => {}, Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } + if block.with_state() { + return self.import_state(block, new_cache).await + } + // on initial sync we will restrict logging under info to avoid spam. let initial_sync = block.origin == BlockOrigin::NetworkInitialSync; let pending_changes = self.make_authorities_changes(&mut block, hash, initial_sync)?; // we don't want to finalize on `inner.import_block` - let mut justification = block.justification.take(); - let enacts_consensus_change = !new_cache.is_empty(); - let import_result = (&*self.inner).import_block(block, new_cache); + let mut justifications = block.justifications.take(); + let import_result = (&*self.inner).import_block(block, new_cache).await; let mut imported_aux = { match import_result { @@ -452,7 +570,7 @@ impl BlockImport r, ); pending_changes.revert(); - return Ok(r); + return Ok(r) }, Err(e) => { debug!( @@ -461,7 +579,7 @@ impl BlockImport e, ); pending_changes.revert(); - return Err(ConsensusError::ClientImport(e.to_string())); + return Err(ConsensusError::ClientImport(e.to_string())) }, } }; @@ -470,9 +588,9 @@ impl BlockImport // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. if do_pause { - let _ = self.send_voter_commands.unbounded_send( - VoterCommand::Pause("Forced change scheduled after inactivity".to_string()) - ); + let _ = self.send_voter_commands.unbounded_send(VoterCommand::Pause( + "Forced change scheduled after inactivity".to_string(), + )); } let needs_justification = applied_changes.needs_justification(); @@ -490,7 +608,8 @@ impl BlockImport // they should import the block and discard the justification, and they will // then request a justification from sync if it's necessary (which they should // then be able to successfully validate). - let _ = self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); + let _ = + self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); // we must clear all pending justifications requests, presumably they won't be // finalized hence why this forced changes was triggered @@ -501,23 +620,26 @@ impl BlockImport // need to apply first, drop any justification that might have been provided with // the block to make sure we request them from `sync` which will ensure they'll be // applied in-order. - justification.take(); + justifications.take(); }, _ => {}, } - match justification { + let grandpa_justification = + justifications.and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)); + + match grandpa_justification { Some(justification) => { let import_res = self.import_justification( hash, number, - justification, + (GRANDPA_ENGINE_ID, justification), needs_justification, initial_sync, ); import_res.unwrap_or_else(|err| { - if needs_justification || enacts_consensus_change { + if needs_justification { debug!(target: "afg", "Imported block #{} that enacts authority set change with \ invalid justification: {:?}, requesting justification from peers.", number, err); imported_aux.bad_justification = true; @@ -525,7 +647,7 @@ impl BlockImport } }); }, - None => { + None => if needs_justification { debug!( target: "afg", @@ -534,24 +656,17 @@ impl BlockImport ); imported_aux.needs_justification = true; - } - - // we have imported block with consensus data changes, but without justification - // => remember to create justification when next block will be finalized - if enacts_consensus_change { - self.consensus_changes.lock().note_change((number, hash)); - } - } + }, } Ok(ImportResult::Imported(imported_aux)) } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block) + self.inner.check_block(block).await } } @@ -561,9 +676,9 @@ impl GrandpaBlockImport>, send_voter_commands: TracingUnboundedSender>>, - consensus_changes: SharedConsensusChanges>, authority_set_hard_forks: Vec<(SetId, PendingChange>)>, justification_sender: GrandpaJustificationSender, + telemetry: Option, ) -> GrandpaBlockImport { // check for and apply any forced authority set hard fork that applies // to the *current* authority set. @@ -571,8 +686,7 @@ impl GrandpaBlockImport GrandpaBlockImport GrandpaBlockImport GrandpaBlockImport GrandpaBlockImport where BE: Backend, - Client: crate::ClientForGrandpa, + Client: ClientForGrandpa, NumberFor: finality_grandpa::BlockNumberOps, { /// Import a block justification and finalize the block. @@ -631,8 +740,17 @@ where enacts_change: bool, initial_sync: bool, ) -> Result<(), ConsensusError> { + if justification.0 != GRANDPA_ENGINE_ID { + // TODO: the import queue needs to be refactored to be able dispatch to the correct + // `JustificationImport` instance based on `ConsensusEngineId`, or we need to build a + // justification import pipeline similar to what we do for `BlockImport`. In the + // meantime we'll just drop the justification, since this is only used for BEEFY which + // is still WIP. + return Ok(()) + } + let justification = GrandpaJustification::decode_and_verify_finalizes( - &justification, + &justification.1, (hash, number), self.authority_set.set_id(), &self.authority_set.current_authorities(), @@ -646,18 +764,19 @@ where let result = finalize_block( self.inner.clone(), &self.authority_set, - &self.consensus_changes, None, hash, number, justification.into(), initial_sync, Some(&self.justification_sender), + self.telemetry.clone(), ); match result { Err(CommandOrError::VoterCommand(command)) => { - afg_log!(initial_sync, + afg_log!( + initial_sync, "👴 Imported justification for block #{} that triggers \ command {}, signaling voter.", number, @@ -667,7 +786,7 @@ where // send the command to the voter let _ = self.send_voter_commands.unbounded_send(command); }, - Err(CommandOrError::Error(e)) => { + Err(CommandOrError::Error(e)) => return Err(match e { Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()), Error::Network(error) => ConsensusError::ClientImport(error), @@ -676,10 +795,13 @@ where Error::Safety(error) => ConsensusError::ClientImport(error), Error::Signing(error) => ConsensusError::ClientImport(error), Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), - }); - }, + Error::RuntimeApi(error) => ConsensusError::ClientImport(error.to_string()), + }), Ok(_) => { - assert!(!enacts_change, "returns Ok when no authority set change should be enacted; qed;"); + assert!( + !enacts_change, + "returns Ok when no authority set change should be enacted; qed;" + ); }, } diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index d5ca92d50e937..a852c74d9d1a4 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,18 +16,21 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; +use finality_grandpa::{voter_set::VoterSet, Error as GrandpaError}; +use parity_scale_codec::{Decode, Encode}; use sp_blockchain::{Error as ClientError, HeaderBackend}; -use parity_scale_codec::{Encode, Decode}; -use finality_grandpa::voter_set::VoterSet; -use finality_grandpa::{Error as GrandpaError}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{NumberFor, Block as BlockT, Header as HeaderT}; use sp_finality_grandpa::AuthorityId; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, +}; -use crate::{Commit, Error}; +use crate::{AuthorityList, Commit, Error}; /// A GRANDPA justification for block finality, it includes a commit message and /// an ancestry proof including all headers routing all precommit target blocks @@ -51,7 +54,8 @@ impl GrandpaJustification { client: &Arc, round: u64, commit: Commit, - ) -> Result, Error> where + ) -> Result, Error> + where C: HeaderBackend, { let mut votes_ancestries_hashes = HashSet::new(); @@ -65,12 +69,14 @@ impl GrandpaJustification { for signed in commit.precommits.iter() { let mut current_hash = signed.precommit.target_hash; loop { - if current_hash == commit.target_hash { break; } + if current_hash == commit.target_hash { + break + } match client.header(BlockId::Hash(current_hash))? { Some(current_header) => { if *current_header.number() <= commit.target_number { - return error(); + return error() } let parent_hash = *current_header.parent_hash(); @@ -94,23 +100,40 @@ impl GrandpaJustification { finalized_target: (Block::Hash, NumberFor), set_id: u64, voters: &VoterSet, - ) -> Result, ClientError> where + ) -> Result, ClientError> + where NumberFor: finality_grandpa::BlockNumberOps, { - let justification = GrandpaJustification::::decode(&mut &*encoded) .map_err(|_| ClientError::JustificationDecode)?; - if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { + if (justification.commit.target_hash, justification.commit.target_number) != + finalized_target + { let msg = "invalid commit target in grandpa justification".to_string(); Err(ClientError::BadJustification(msg)) } else { - justification.verify(set_id, voters).map(|_| justification) + justification.verify_with_voter_set(set_id, voters).map(|_| justification) } } /// Validate the commit and the votes' ancestry proofs. - pub(crate) fn verify(&self, set_id: u64, voters: &VoterSet) -> Result<(), ClientError> + pub fn verify(&self, set_id: u64, authorities: &AuthorityList) -> Result<(), ClientError> + where + NumberFor: finality_grandpa::BlockNumberOps, + { + let voters = VoterSet::new(authorities.iter().cloned()) + .ok_or(ClientError::Consensus(sp_consensus::Error::InvalidAuthoritiesSet))?; + + self.verify_with_voter_set(set_id, &voters) + } + + /// Validate the commit and the votes' ancestry proofs. + pub(crate) fn verify_with_voter_set( + &self, + set_id: u64, + voters: &VoterSet, + ) -> Result<(), ClientError> where NumberFor: finality_grandpa::BlockNumberOps, { @@ -118,16 +141,12 @@ impl GrandpaJustification { let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); - match finality_grandpa::validate_commit( - &self.commit, - voters, - &ancestry_chain, - ) { + match finality_grandpa::validate_commit(&self.commit, voters, &ancestry_chain) { Ok(ref result) if result.ghost().is_some() => {}, _ => { let msg = "invalid commit in grandpa justification".to_string(); - return Err(ClientError::BadJustification(msg)); - } + return Err(ClientError::BadJustification(msg)) + }, } let mut buf = Vec::new(); @@ -142,40 +161,47 @@ impl GrandpaJustification { &mut buf, ) { return Err(ClientError::BadJustification( - "invalid signature for precommit in grandpa justification".to_string())); + "invalid signature for precommit in grandpa justification".to_string(), + )) } if self.commit.target_hash == signed.precommit.target_hash { - continue; + continue } match ancestry_chain.ancestry(self.commit.target_hash, signed.precommit.target_hash) { Ok(route) => { - // ancestry starts from parent hash but the precommit target hash has been visited + // ancestry starts from parent hash but the precommit target hash has been + // visited visited_hashes.insert(signed.precommit.target_hash); for hash in route { visited_hashes.insert(hash); } }, - _ => { + _ => return Err(ClientError::BadJustification( - "invalid precommit ancestry proof in grandpa justification".to_string())); - }, + "invalid precommit ancestry proof in grandpa justification".to_string(), + )), } } - let ancestry_hashes = self.votes_ancestries - .iter() - .map(|h: &Block::Header| h.hash()) - .collect(); + let ancestry_hashes = + self.votes_ancestries.iter().map(|h: &Block::Header| h.hash()).collect(); if visited_hashes != ancestry_hashes { return Err(ClientError::BadJustification( - "invalid precommit ancestries in grandpa justification with unused headers".to_string())); + "invalid precommit ancestries in grandpa justification with unused headers" + .to_string(), + )) } Ok(()) } + + /// The target block number and hash that this justifications proves finality for. + pub fn target(&self) -> (NumberFor, Block::Hash) { + (self.commit.target_number, self.commit.target_hash) + } } /// A utility trait implementing `finality_grandpa::Chain` using a given set of headers. @@ -187,24 +213,28 @@ struct AncestryChain { impl AncestryChain { fn new(ancestry: &[Block::Header]) -> AncestryChain { - let ancestry: HashMap<_, _> = ancestry - .iter() - .cloned() - .map(|h: Block::Header| (h.hash(), h)) - .collect(); + let ancestry: HashMap<_, _> = + ancestry.iter().cloned().map(|h: Block::Header| (h.hash(), h)).collect(); AncestryChain { ancestry } } } -impl finality_grandpa::Chain> for AncestryChain where - NumberFor: finality_grandpa::BlockNumberOps +impl finality_grandpa::Chain> for AncestryChain +where + NumberFor: finality_grandpa::BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { let mut route = Vec::new(); let mut current_hash = block; loop { - if current_hash == base { break; } + if current_hash == base { + break + } match self.ancestry.get(¤t_hash) { Some(current_header) => { current_hash = *current_header.parent_hash(); @@ -217,8 +247,4 @@ impl finality_grandpa::Chain> for A Ok(route) } - - fn best_chain_containing(&self, _block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - None - } } diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 6ab95d7eac970..452659ced6a70 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -56,41 +56,40 @@ #![warn(missing_docs)] -use futures::{ - prelude::*, - StreamExt, -}; +use futures::{prelude::*, StreamExt}; use log::{debug, error, info}; +use parity_scale_codec::{Decode, Encode}; +use parking_lot::RwLock; +use prometheus_endpoint::{PrometheusError, Registry}; use sc_client_api::{ backend::{AuxStore, Backend}, - LockImportRun, BlockchainEvents, CallExecutor, - ExecutionStrategy, Finalizer, TransactionFor, ExecutorProvider, + BlockchainEvents, CallExecutor, ExecutionStrategy, ExecutorProvider, Finalizer, LockImportRun, + StorageProvider, TransactionFor, }; -use parity_scale_codec::{Decode, Encode}; -use prometheus_endpoint::{PrometheusError, Registry}; +use sc_consensus::BlockImport; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_api::ProvideRuntimeApi; -use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{NumberFor, Block as BlockT, DigestFor, Zero}; -use sp_consensus::{SelectChain, BlockImport}; -use sp_core::{ - crypto::Public, -}; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; use sp_application_crypto::AppKey; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; -use sc_telemetry::{telemetry, CONSENSUS_INFO, CONSENSUS_DEBUG}; -use parking_lot::RwLock; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; +use sp_consensus::SelectChain; +use sp_core::crypto::Public; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, DigestFor, NumberFor, Zero}, +}; -use finality_grandpa::Error as GrandpaError; -use finality_grandpa::{voter, voter_set::VoterSet}; pub use finality_grandpa::BlockNumberOps; - -use std::{fmt, io}; -use std::sync::Arc; -use std::time::Duration; -use std::pin::Pin; -use std::task::{Poll, Context}; +use finality_grandpa::{voter, voter_set::VoterSet, Error as GrandpaError}; + +use std::{ + fmt, io, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; // utility logging macro that takes as first argument a conditional to // decide whether to log under debug or info level (useful to restrict @@ -112,33 +111,34 @@ macro_rules! afg_log { mod authorities; mod aux_schema; mod communication; -mod consensus_changes; mod environment; mod finality_proof; mod import; mod justification; -mod light_import; mod notification; mod observer; mod until_imported; mod voting_rule; +pub mod warp_proof; -pub use authorities::{SharedAuthoritySet, AuthoritySet}; -pub use finality_proof::{FinalityProofFragment, FinalityProofProvider, StorageAndProofProvider}; -pub use notification::{GrandpaJustificationSender, GrandpaJustificationStream}; -pub use import::GrandpaBlockImport; +pub use authorities::{AuthoritySet, AuthoritySetChanges, SharedAuthoritySet}; +pub use aux_schema::best_justification; +pub use finality_grandpa::voter::report; +pub use finality_proof::{FinalityProof, FinalityProofError, FinalityProofProvider}; +pub use import::{find_forced_change, find_scheduled_change, GrandpaBlockImport}; pub use justification::GrandpaJustification; -pub use light_import::{light_block_import, GrandpaLightBlockImport}; +pub use notification::{GrandpaJustificationSender, GrandpaJustificationStream}; +pub use observer::run_grandpa_observer; pub use voting_rule::{ - BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder + BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRuleResult, + VotingRulesBuilder, }; -pub use finality_grandpa::voter::report; use aux_schema::PersistentData; +use communication::{Network as NetworkT, NetworkBridge}; use environment::{Environment, VoterSetState}; -use until_imported::UntilGlobalMessageBlocksImported; -use communication::{NetworkBridge, Network as NetworkT}; use sp_finality_grandpa::{AuthorityList, AuthoritySignature, SetId}; +use until_imported::UntilGlobalMessageBlocksImported; // Re-export these two because it's just so damn convenient. pub use sp_finality_grandpa::{AuthorityId, AuthorityPair, GrandpaApi, ScheduledChange}; @@ -159,7 +159,8 @@ pub type SignedMessage = finality_grandpa::SignedMessage< >; /// A primary propose message for this chain's block type. -pub type PrimaryPropose = finality_grandpa::PrimaryPropose<::Hash, NumberFor>; +pub type PrimaryPropose = + finality_grandpa::PrimaryPropose<::Hash, NumberFor>; /// A prevote message for this chain's block type. pub type Prevote = finality_grandpa::Prevote<::Hash, NumberFor>; /// A precommit message for this chain's block type. @@ -198,22 +199,14 @@ type CommunicationIn = finality_grandpa::voter::CommunicationIn< /// Global communication input stream for commits and catch up messages, with /// the hash type not being derived from the block, useful for forcing the hash /// to some type (e.g. `H256`) when the compiler can't do the inference. -type CommunicationInH = finality_grandpa::voter::CommunicationIn< - H, - NumberFor, - AuthoritySignature, - AuthorityId, ->; +type CommunicationInH = + finality_grandpa::voter::CommunicationIn, AuthoritySignature, AuthorityId>; /// Global communication sink for commits with the hash type not being derived /// from the block, useful for forcing the hash to some type (e.g. `H256`) when /// the compiler can't do the inference. -type CommunicationOutH = finality_grandpa::voter::CommunicationOut< - H, - NumberFor, - AuthoritySignature, - AuthorityId, ->; +type CommunicationOutH = + finality_grandpa::voter::CommunicationOut, AuthoritySignature, AuthorityId>; /// Shared voter state for querying. pub struct SharedVoterState { @@ -223,18 +216,14 @@ pub struct SharedVoterState { impl SharedVoterState { /// Create a new empty `SharedVoterState` instance. pub fn empty() -> Self { - Self { - inner: Arc::new(RwLock::new(None)), - } + Self { inner: Arc::new(RwLock::new(None)) } } fn reset( &self, voter_state: Box + Sync + Send>, ) -> Option<()> { - let mut shared_voter_state = self - .inner - .try_write_for(Duration::from_secs(1))?; + let mut shared_voter_state = self.inner.try_write_for(Duration::from_secs(1))?; *shared_voter_state = Some(voter_state); Some(()) @@ -266,12 +255,14 @@ pub struct Config { /// protocol (we will only issue catch-up requests to authorities when the /// observer protocol is enabled). pub observer_enabled: bool, - /// Whether the node is running as an authority (i.e. running the full GRANDPA protocol). - pub is_authority: bool, + /// The role of the local node (i.e. authority, full-node or light). + pub local_role: sc_network::config::Role, /// Some local identifier of the voter. pub name: Option, /// The keystore that manages the keys of this node. pub keystore: Option, + /// TelemetryHandle instance. + pub telemetry: Option, } impl Config { @@ -297,6 +288,8 @@ pub enum Error { Safety(String), /// A timer failed to fire. Timer(io::Error), + /// A runtime api request failed. + RuntimeApi(sp_api::ApiError), } impl From for Error { @@ -319,7 +312,8 @@ pub(crate) trait BlockStatus { fn block_number(&self, hash: Block::Hash) -> Result>, Error>; } -impl BlockStatus for Arc where +impl BlockStatus for Arc +where Client: HeaderBackend, NumberFor: BlockNumberOps, { @@ -331,26 +325,40 @@ impl BlockStatus for Arc where /// A trait that includes all the client functionalities grandpa requires. /// Ideally this would be a trait alias, we're not there yet. -/// tracking issue https://github.com/rust-lang/rust/issues/41517 +/// tracking issue pub trait ClientForGrandpa: - LockImportRun + Finalizer + AuxStore - + HeaderMetadata + HeaderBackend - + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider + LockImportRun + + Finalizer + + AuxStore + + HeaderMetadata + + HeaderBackend + + BlockchainEvents + + ProvideRuntimeApi + + ExecutorProvider + BlockImport, Error = sp_consensus::Error> - where - BE: Backend, - Block: BlockT, -{} + + StorageProvider +where + BE: Backend, + Block: BlockT, +{ +} impl ClientForGrandpa for T - where - BE: Backend, - Block: BlockT, - T: LockImportRun + Finalizer + AuxStore - + HeaderMetadata + HeaderBackend - + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider - + BlockImport, Error = sp_consensus::Error>, -{} +where + BE: Backend, + Block: BlockT, + T: LockImportRun + + Finalizer + + AuxStore + + HeaderMetadata + + HeaderBackend + + BlockchainEvents + + ProvideRuntimeApi + + ExecutorProvider + + BlockImport, Error = sp_consensus::Error> + + StorageProvider, +{ +} /// Something that one can ask to do a block sync request. pub(crate) trait BlockSyncRequester { @@ -360,14 +368,25 @@ pub(crate) trait BlockSyncRequester { /// If the given vector of peers is empty then the underlying implementation /// should make a best effort to fetch the block from any peers it is /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor); + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ); } -impl BlockSyncRequester for NetworkBridge where +impl BlockSyncRequester for NetworkBridge +where Block: BlockT, Network: NetworkT, { - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor) { + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ) { NetworkBridge::set_sync_fork_request(self, peers, hash, number) } } @@ -387,7 +406,7 @@ pub(crate) enum VoterCommand { /// Pause the voter for given reason. Pause(String), /// New authorities. - ChangeAuthorities(NewAuthoritySet) + ChangeAuthorities(NewAuthoritySet), } impl fmt::Display for VoterCommand { @@ -432,7 +451,7 @@ impl From> for CommandOrError { } } -impl ::std::error::Error for CommandOrError { } +impl ::std::error::Error for CommandOrError {} impl fmt::Display for CommandOrError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -451,6 +470,7 @@ pub struct LinkHalf { voter_commands_rx: TracingUnboundedReceiver>>, justification_sender: GrandpaJustificationSender, justification_stream: GrandpaJustificationStream, + telemetry: Option, } impl LinkHalf { @@ -471,8 +491,10 @@ pub trait GenesisAuthoritySetProvider { fn get(&self) -> Result; } -impl GenesisAuthoritySetProvider for Arc> - where E: CallExecutor, +impl GenesisAuthoritySetProvider + for Arc> +where + E: CallExecutor, { fn get(&self) -> Result { // This implementation uses the Grandpa runtime API instead of reading directly from the @@ -487,10 +509,12 @@ impl GenesisAuthoritySetProvider for Arc( client: Arc, genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, select_chain: SC, -) -> Result< - ( - GrandpaBlockImport, - LinkHalf, - ), - ClientError, -> + telemetry: Option, +) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> where SC: SelectChain, BE: Backend + 'static, @@ -518,6 +537,7 @@ where genesis_authorities_provider, select_chain, Default::default(), + telemetry, ) } @@ -531,13 +551,8 @@ pub fn block_import_with_authority_set_hard_forks genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, select_chain: SC, authority_set_hard_forks: Vec<(SetId, (Block::Hash, NumberFor), AuthorityList)>, -) -> Result< - ( - GrandpaBlockImport, - LinkHalf, - ), - ClientError, -> + telemetry: Option, +) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> where SC: SelectChain, BE: Backend + 'static, @@ -546,23 +561,24 @@ where let chain_info = client.info(); let genesis_hash = chain_info.genesis_hash; - let persistent_data = aux_schema::load_persistent( - &*client, - genesis_hash, - >::zero(), - || { - let authorities = genesis_authorities_provider.get()?; - telemetry!(CONSENSUS_DEBUG; "afg.loading_authorities"; - "authorities_len" => ?authorities.len() - ); - Ok(authorities) - } - )?; + let persistent_data = + aux_schema::load_persistent(&*client, genesis_hash, >::zero(), { + let telemetry = telemetry.clone(); + move || { + let authorities = genesis_authorities_provider.get()?; + telemetry!( + telemetry; + CONSENSUS_DEBUG; + "afg.loading_authorities"; + "authorities_len" => ?authorities.len() + ); + Ok(authorities) + } + })?; let (voter_commands_tx, voter_commands_rx) = tracing_unbounded("mpsc_grandpa_voter_command"); - let (justification_sender, justification_stream) = - GrandpaJustificationStream::channel(); + let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); // create pending change objects with 0 delay and enacted on finality // (i.e. standard changes) for each authority set hard fork. @@ -588,9 +604,9 @@ where select_chain.clone(), persistent_data.authority_set.clone(), voter_commands_tx, - persistent_data.consensus_changes.clone(), authority_set_hard_forks, justification_sender.clone(), + telemetry.clone(), ), LinkHalf { client, @@ -599,6 +615,7 @@ where voter_commands_rx, justification_sender, justification_stream, + telemetry, }, )) } @@ -612,13 +629,17 @@ fn global_communication( metrics: Option, ) -> ( impl Stream< - Item = Result, CommandOrError>>, + Item = Result< + CommunicationInH, + CommandOrError>, + >, >, impl Sink< CommunicationOutH, Error = CommandOrError>, - > + Unpin, -) where + >, +) +where BE: Backend + 'static, C: ClientForGrandpa + 'static, N: NetworkT, @@ -627,11 +648,8 @@ fn global_communication( let is_voter = local_authority_id(voters, keystore).is_some(); // verification stream - let (global_in, global_out) = network.global_communication( - communication::SetId(set_id), - voters.clone(), - is_voter, - ); + let (global_in, global_out) = + network.global_communication(communication::SetId(set_id), voters.clone(), is_voter); // block commit and catch up messages until relevant blocks are imported. let global_in = UntilGlobalMessageBlocksImported::new( @@ -656,41 +674,62 @@ pub struct GrandpaParams { /// A link to the block import worker. pub link: LinkHalf, /// The Network instance. + /// + /// It is assumed that this network will feed us Grandpa notifications. When using the + /// `sc_network` crate, it is assumed that the Grandpa notifications protocol has been passed + /// to the configuration of the networking. See [`grandpa_peers_set_config`]. pub network: N, - /// If supplied, can be used to hook on telemetry connection established events. - pub telemetry_on_connect: Option>, /// A voting rule used to potentially restrict target votes. pub voting_rule: VR, /// The prometheus metrics registry. pub prometheus_registry: Option, /// The voter state is exposed at an RPC endpoint. pub shared_voter_state: SharedVoterState, + /// TelemetryHandle instance. + pub telemetry: Option, +} + +/// Returns the configuration value to put in +/// [`sc_network::config::NetworkConfiguration::extra_sets`]. +pub fn grandpa_peers_set_config() -> sc_network::config::NonDefaultSetConfig { + sc_network::config::NonDefaultSetConfig { + notifications_protocol: communication::GRANDPA_PROTOCOL_NAME.into(), + fallback_names: Vec::new(), + // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. + max_notification_size: 1024 * 1024, + set_config: sc_network::config::SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny, + }, + } } /// Run a GRANDPA voter as a task. Provide configuration and a link to a /// block import worker that has already been instantiated with `block_import`. pub fn run_grandpa_voter( grandpa_params: GrandpaParams, -) -> sp_blockchain::Result + Unpin + Send + 'static> +) -> sp_blockchain::Result + Send> where Block::Hash: Ord, BE: Backend + 'static, - N: NetworkT + Send + Sync + Clone + 'static, + N: NetworkT + Sync + 'static, SC: SelectChain + 'static, VR: VotingRule + Clone + 'static, NumberFor: BlockNumberOps, DigestFor: Encode, C: ClientForGrandpa + 'static, - C::Api: GrandpaApi, + C::Api: GrandpaApi, { let GrandpaParams { mut config, link, network, - telemetry_on_connect, voting_rule, prometheus_registry, shared_voter_state, + telemetry, } = grandpa_params; // NOTE: we have recently removed `run_grandpa_observer` from the public @@ -706,6 +745,7 @@ where voter_commands_rx, justification_sender, justification_stream: _, + telemetry: _, } = link; let network = NetworkBridge::new( @@ -713,29 +753,32 @@ where config.clone(), persistent_data.set_state.clone(), prometheus_registry.as_ref(), + telemetry.clone(), ); let conf = config.clone(); - let telemetry_task = if let Some(telemetry_on_connect) = telemetry_on_connect { - let authorities = persistent_data.authority_set.clone(); - let events = telemetry_on_connect - .for_each(move |_| { + let telemetry_task = + if let Some(telemetry_on_connect) = telemetry.as_ref().map(|x| x.on_connect_stream()) { + let authorities = persistent_data.authority_set.clone(); + let telemetry = telemetry.clone(); + let events = telemetry_on_connect.for_each(move |_| { let current_authorities = authorities.current_authorities(); let set_id = authorities.set_id(); let authority_id = local_authority_id(¤t_authorities, conf.keystore.as_ref()) .unwrap_or_default(); - let authorities = current_authorities - .iter() - .map(|(id, _)| id.to_string()) - .collect::>(); + let authorities = + current_authorities.iter().map(|(id, _)| id.to_string()).collect::>(); let authorities = serde_json::to_string(&authorities).expect( "authorities is always at least an empty vector; \ elements are always of type string", ); - telemetry!(CONSENSUS_INFO; "afg.authority_set"; + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.authority_set"; "authority_id" => authority_id.to_string(), "authority_set_id" => ?set_id, "authorities" => authorities, @@ -743,10 +786,10 @@ where future::ready(()) }); - future::Either::Left(events) - } else { - future::Either::Right(future::pending()) - }; + future::Either::Left(events) + } else { + future::Either::Right(future::pending()) + }; let voter_work = VoterWork::new( client, @@ -759,6 +802,7 @@ where prometheus_registry, shared_voter_state, justification_sender, + telemetry, ); let voter_work = voter_work.map(|res| match res { @@ -769,8 +813,7 @@ where }); // Make sure that `telemetry_task` doesn't accidentally finish and kill grandpa. - let telemetry_task = telemetry_task - .then(|_| future::pending::<()>()); + let telemetry_task = telemetry_task.then(|_| future::pending::<()>()); Ok(future::select(voter_work, telemetry_task).map(drop)) } @@ -792,12 +835,14 @@ impl Metrics { /// Future that powers the voter. #[must_use] struct VoterWork, SC, VR> { - voter: Pin>>> + Send>>, + voter: Pin< + Box>>> + Send>, + >, shared_voter_state: SharedVoterState, env: Arc>, voter_commands_rx: TracingUnboundedReceiver>>, network: NetworkBridge, - + telemetry: Option, /// Prometheus metrics. metrics: Option, } @@ -807,7 +852,7 @@ where Block: BlockT, B: Backend + 'static, C: ClientForGrandpa + 'static, - C::Api: GrandpaApi, + C::Api: GrandpaApi, N: NetworkT + Sync, NumberFor: BlockNumberOps, SC: SelectChain + 'static, @@ -824,13 +869,14 @@ where prometheus_registry: Option, shared_voter_state: SharedVoterState, justification_sender: GrandpaJustificationSender, + telemetry: Option, ) -> Self { let metrics = match prometheus_registry.as_ref().map(Metrics::register) { Some(Ok(metrics)) => Some(metrics), Some(Err(e)) => { debug!(target: "afg", "Failed to register metrics: {:?}", e); None - } + }, None => None, }; @@ -844,10 +890,10 @@ where network: network.clone(), set_id: persistent_data.authority_set.set_id(), authority_set: persistent_data.authority_set.clone(), - consensus_changes: persistent_data.consensus_changes.clone(), voter_set_state: persistent_data.set_state, metrics: metrics.as_ref().map(|m| m.environment.clone()), justification_sender: Some(justification_sender), + telemetry: telemetry.clone(), _phantom: PhantomData, }); @@ -859,6 +905,7 @@ where env, voter_commands_rx, network, + telemetry, metrics, }; work.rebuild_voter(); @@ -874,7 +921,10 @@ where let authority_id = local_authority_id(&self.env.voters, self.env.config.keystore.as_ref()) .unwrap_or_default(); - telemetry!(CONSENSUS_DEBUG; "afg.starting_new_voter"; + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.starting_new_voter"; "name" => ?self.env.config.name(), "set_id" => ?self.env.set_id, "authority_id" => authority_id.to_string(), @@ -882,18 +932,16 @@ where let chain_info = self.env.client.info(); - let authorities = self - .env - .voters - .iter() - .map(|(id, _)| id.to_string()) - .collect::>(); + let authorities = self.env.voters.iter().map(|(id, _)| id.to_string()).collect::>(); let authorities = serde_json::to_string(&authorities).expect( - "authorities is always at least an empty vector; elements are always of type string", + "authorities is always at least an empty vector; elements are always of type string; qed.", ); - telemetry!(CONSENSUS_INFO; "afg.authority_set"; + telemetry!( + self.telemetry; + CONSENSUS_INFO; + "afg.authority_set"; "number" => ?chain_info.finalized_number, "hash" => ?chain_info.finalized_hash, "authority_id" => authority_id.to_string(), @@ -903,10 +951,7 @@ where match &*self.env.voter_set_state.read() { VoterSetState::Live { completed_rounds, .. } => { - let last_finalized = ( - chain_info.finalized_hash, - chain_info.finalized_number, - ); + let last_finalized = (chain_info.finalized_hash, chain_info.finalized_number); let global_comms = global_communication( self.env.set_id, @@ -939,21 +984,22 @@ where self.voter = Box::pin(voter); }, - VoterSetState::Paused { .. } => - self.voter = Box::pin(future::pending()), + VoterSetState::Paused { .. } => self.voter = Box::pin(future::pending()), }; } fn handle_voter_command( &mut self, - command: VoterCommand> + command: VoterCommand>, ) -> Result<(), Error> { match command { VoterCommand::ChangeAuthorities(new) => { - let voters: Vec = new.authorities.iter().map(move |(a, _)| { - format!("{}", a) - }).collect(); - telemetry!(CONSENSUS_INFO; "afg.voter_command_change_authorities"; + let voters: Vec = + new.authorities.iter().map(move |(a, _)| format!("{}", a)).collect(); + telemetry!( + self.telemetry; + CONSENSUS_INFO; + "afg.voter_command_change_authorities"; "number" => ?new.canon_number, "hash" => ?new.canon_hash, "voters" => ?voters, @@ -965,7 +1011,7 @@ where // set changed (not where the signal happened!) as the base. let set_state = VoterSetState::live( new.set_id, - &*self.env.authority_set.inner().read(), + &*self.env.authority_set.inner(), (new.canon_hash, new.canon_number), ); @@ -973,13 +1019,11 @@ where Ok(Some(set_state)) })?; - let voters = Arc::new(VoterSet::new(new.authorities.into_iter()) - .expect("new authorities come from pending change; \ - pending change comes from `AuthoritySet`; \ - `AuthoritySet` validates authorities is non-empty and weights are non-zero; \ - qed." - ) - ); + let voters = Arc::new(VoterSet::new(new.authorities.into_iter()).expect( + "new authorities come from pending change; pending change comes from \ + `AuthoritySet`; `AuthoritySet` validates authorities is non-empty and \ + weights are non-zero; qed.", + )); self.env = Arc::new(Environment { voters, @@ -989,17 +1033,17 @@ where select_chain: self.env.select_chain.clone(), config: self.env.config.clone(), authority_set: self.env.authority_set.clone(), - consensus_changes: self.env.consensus_changes.clone(), network: self.env.network.clone(), voting_rule: self.env.voting_rule.clone(), metrics: self.env.metrics.clone(), justification_sender: self.env.justification_sender.clone(), + telemetry: self.telemetry.clone(), _phantom: PhantomData, }); self.rebuild_voter(); Ok(()) - } + }, VoterCommand::Pause(reason) => { info!(target: "afg", "Pausing old validator set: {}", reason); @@ -1014,7 +1058,7 @@ where self.rebuild_voter(); Ok(()) - } + }, } } } @@ -1027,71 +1071,48 @@ where NumberFor: BlockNumberOps, SC: SelectChain + 'static, C: ClientForGrandpa + 'static, - C::Api: GrandpaApi, + C::Api: GrandpaApi, VR: VotingRule + Clone + 'static, { type Output = Result<(), Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { match Future::poll(Pin::new(&mut self.voter), cx) { - Poll::Pending => {} + Poll::Pending => {}, Poll::Ready(Ok(())) => { // voters don't conclude naturally - return Poll::Ready( - Err(Error::Safety("finality-grandpa inner voter has concluded.".into())) - ) - } + return Poll::Ready(Err(Error::Safety( + "finality-grandpa inner voter has concluded.".into(), + ))) + }, Poll::Ready(Err(CommandOrError::Error(e))) => { // return inner observer error return Poll::Ready(Err(e)) - } + }, Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { // some command issued internally self.handle_voter_command(command)?; cx.waker().wake_by_ref(); - } + }, } match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { - Poll::Pending => {} + Poll::Pending => {}, Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. - return Poll::Ready( - Err(Error::Safety("`voter_commands_rx` was closed.".into())) - ) - } + return Poll::Ready(Err(Error::Safety("`voter_commands_rx` was closed.".into()))) + }, Poll::Ready(Some(command)) => { // some command issued externally self.handle_voter_command(command)?; cx.waker().wake_by_ref(); - } + }, } Future::poll(Pin::new(&mut self.network), cx) } } -/// When GRANDPA is not initialized we still need to register the finality -/// tracker inherent provider which might be expected by the runtime for block -/// authoring. Additionally, we register a gossip message validator that -/// discards all GRANDPA messages (otherwise, we end up banning nodes that send -/// us a `Neighbor` message, since there is no registered gossip validator for -/// the engine id defined in the message.) -pub fn setup_disabled_grandpa(network: N) -> Result<(), sp_consensus::Error> -where - N: NetworkT + Send + Clone + 'static, -{ - // We register the GRANDPA protocol so that we don't consider it an anomaly - // to receive GRANDPA messages on the network. We don't process the - // messages. - network.register_notifications_protocol( - communication::GRANDPA_ENGINE_ID, - From::from(communication::GRANDPA_PROTOCOL_NAME), - ); - - Ok(()) -} - /// Checks if this node has any available keys in the keystore for any authority id in the given /// voter set. Returns the authority id for which keys are available, or `None` if no keys are /// available. @@ -1099,13 +1120,12 @@ fn local_authority_id( voters: &VoterSet, keystore: Option<&SyncCryptoStorePtr>, ) -> Option { - match keystore { - Some(keystore) => voters + keystore.and_then(|keystore| { + voters .iter() .find(|(p, _)| { SyncCryptoStore::has_keys(&**keystore, &[(p.to_raw_vec(), AuthorityId::ID)]) }) - .map(|(p, _)| p.clone()), - None => None, - } + .map(|(p, _)| p.clone()) + }) } diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs deleted file mode 100644 index a7c9a655467c7..0000000000000 --- a/client/finality-grandpa/src/light_import.rs +++ /dev/null @@ -1,880 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::collections::HashMap; -use std::sync::Arc; -use log::{info, trace, warn}; -use parking_lot::RwLock; -use sc_client_api::backend::{AuxStore, Backend, Finalizer, TransactionFor}; -use sp_blockchain::{HeaderBackend, Error as ClientError, well_known_cache_keys}; -use parity_scale_codec::{Encode, Decode}; -use sp_consensus::{ - import_queue::Verifier, - BlockOrigin, BlockImport, FinalityProofImport, BlockImportParams, ImportResult, ImportedAux, - BlockCheckParams, Error as ConsensusError, -}; -use sc_network::config::{BoxFinalityProofRequestBuilder, FinalityProofRequestBuilder}; -use sp_runtime::Justification; -use sp_runtime::traits::{NumberFor, Block as BlockT, Header as HeaderT, DigestFor}; -use sp_finality_grandpa::{self, AuthorityList}; -use sp_runtime::generic::BlockId; - -use crate::GenesisAuthoritySetProvider; -use crate::aux_schema::load_decode; -use crate::consensus_changes::ConsensusChanges; -use crate::environment::canonical_at_height; -use crate::finality_proof::{ - AuthoritySetForFinalityChecker, ProvableJustification, make_finality_proof_request, -}; -use crate::justification::GrandpaJustification; - -/// LightAuthoritySet is saved under this key in aux storage. -const LIGHT_AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; -/// ConsensusChanges is saver under this key in aux storage. -const LIGHT_CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; - -/// Create light block importer. -pub fn light_block_import( - client: Arc, - backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - authority_set_provider: Arc>, -) -> Result, ClientError> - where - BE: Backend, - Client: crate::ClientForGrandpa, -{ - let info = client.info(); - let import_data = load_aux_import_data( - info.finalized_hash, - &*client, - genesis_authorities_provider, - )?; - Ok(GrandpaLightBlockImport { - client, - backend, - authority_set_provider, - data: Arc::new(RwLock::new(import_data)), - }) -} - -/// A light block-import handler for GRANDPA. -/// -/// It is responsible for: -/// - checking GRANDPA justifications; -/// - fetching finality proofs for blocks that are enacting consensus changes. -pub struct GrandpaLightBlockImport { - client: Arc, - backend: Arc, - authority_set_provider: Arc>, - data: Arc>>, -} - -impl Clone for GrandpaLightBlockImport { - fn clone(&self) -> Self { - GrandpaLightBlockImport { - client: self.client.clone(), - backend: self.backend.clone(), - authority_set_provider: self.authority_set_provider.clone(), - data: self.data.clone(), - } - } -} - -/// Mutable data of light block importer. -struct LightImportData { - last_finalized: Block::Hash, - authority_set: LightAuthoritySet, - consensus_changes: ConsensusChanges>, -} - -/// Latest authority set tracker. -#[derive(Debug, Encode, Decode)] -struct LightAuthoritySet { - set_id: u64, - authorities: AuthorityList, -} - -impl GrandpaLightBlockImport { - /// Create finality proof request builder. - pub fn create_finality_proof_request_builder(&self) -> BoxFinalityProofRequestBuilder { - Box::new(GrandpaFinalityProofRequestBuilder(self.data.clone())) as _ - } -} - -impl BlockImport - for GrandpaLightBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for<'a> &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, -{ - type Error = ConsensusError; - type Transaction = TransactionFor; - - fn import_block( - &mut self, - block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - do_import_block::<_, _, _, GrandpaJustification>( - &*self.client, &mut *self.data.write(), block, new_cache - ) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.client.check_block(block) - } -} - -impl FinalityProofImport - for GrandpaLightBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for<'a> &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, -{ - type Error = ConsensusError; - - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { - let mut out = Vec::new(); - let chain_info = (&*self.client).info(); - - let data = self.data.read(); - for (pending_number, pending_hash) in data.consensus_changes.pending_changes() { - if *pending_number > chain_info.finalized_number - && *pending_number <= chain_info.best_number - { - out.push((*pending_hash, *pending_number)); - } - } - - out - } - - fn import_finality_proof( - &mut self, - hash: Block::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(Block::Hash, NumberFor), Self::Error> { - do_import_finality_proof::<_, _, _, GrandpaJustification>( - &*self.client, - self.backend.clone(), - &*self.authority_set_provider, - &mut *self.data.write(), - hash, - number, - finality_proof, - verifier, - ) - } -} - -impl LightAuthoritySet { - /// Get a genesis set with given authorities. - pub fn genesis(initial: AuthorityList) -> Self { - LightAuthoritySet { - set_id: sp_finality_grandpa::SetId::default(), - authorities: initial, - } - } - - /// Get latest set id. - pub fn set_id(&self) -> u64 { - self.set_id - } - - /// Get latest authorities set. - pub fn authorities(&self) -> AuthorityList { - self.authorities.clone() - } - - /// Set new authorities set. - pub fn update(&mut self, set_id: u64, authorities: AuthorityList) { - self.set_id = set_id; - self.authorities = authorities; - } -} - -struct GrandpaFinalityProofRequestBuilder(Arc>>); - -impl FinalityProofRequestBuilder for GrandpaFinalityProofRequestBuilder { - fn build_request_data(&mut self, _hash: &B::Hash) -> Vec { - let data = self.0.read(); - make_finality_proof_request( - data.last_finalized, - data.authority_set.set_id(), - ) - } -} - -/// Try to import new block. -fn do_import_block( - mut client: C, - data: &mut LightImportData, - mut block: BlockImportParams>, - new_cache: HashMap>, -) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + BlockImport> - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - J: ProvableJustification, -{ - let hash = block.post_hash(); - let number = *block.header.number(); - - // we don't want to finalize on `inner.import_block` - let justification = block.justification.take(); - let enacts_consensus_change = !new_cache.is_empty(); - let import_result = client.import_block(block, new_cache); - - let mut imported_aux = match import_result { - Ok(ImportResult::Imported(aux)) => aux, - Ok(r) => return Ok(r), - Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), - }; - - match justification { - Some(justification) => { - trace!( - target: "afg", - "Imported block {}{}. Importing justification.", - if enacts_consensus_change { " which enacts consensus changes" } else { "" }, - hash, - ); - - do_import_justification::<_, _, _, J>(client, data, hash, number, justification) - }, - None if enacts_consensus_change => { - trace!( - target: "afg", - "Imported block {} which enacts consensus changes. Requesting finality proof.", - hash, - ); - - // remember that we need finality proof for this block - imported_aux.needs_finality_proof = true; - data.consensus_changes.note_change((number, hash)); - Ok(ImportResult::Imported(imported_aux)) - }, - None => Ok(ImportResult::Imported(imported_aux)), - } -} - -/// Try to import finality proof. -fn do_import_finality_proof( - client: C, - backend: Arc, - authority_set_provider: &dyn AuthoritySetForFinalityChecker, - data: &mut LightImportData, - _hash: Block::Hash, - _number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, -) -> Result<(Block::Hash, NumberFor), ConsensusError> - where - C: HeaderBackend - + AuxStore - + Finalizer - + BlockImport> - + Clone, - B: Backend + 'static, - DigestFor: Encode, - NumberFor: finality_grandpa::BlockNumberOps, - J: ProvableJustification, -{ - let authority_set_id = data.authority_set.set_id(); - let authorities = data.authority_set.authorities(); - let finality_effects = crate::finality_proof::check_finality_proof::<_, _, J>( - backend.blockchain(), - authority_set_id, - authorities, - authority_set_provider, - finality_proof, - ).map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - // try to import all new headers - let block_origin = BlockOrigin::NetworkBroadcast; - for header_to_import in finality_effects.headers_to_import { - let (block_to_import, new_authorities) = verifier.verify( - block_origin, - header_to_import, - None, - None, - ).map_err(|e| ConsensusError::ClientImport(e))?; - assert!( - block_to_import.justification.is_none(), - "We have passed None as justification to verifier.verify", - ); - - let mut cache = HashMap::new(); - if let Some(authorities) = new_authorities { - cache.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); - } - do_import_block::<_, _, _, J>( - client.clone(), - data, - block_to_import.convert_transaction(), - cache, - )?; - } - - // try to import latest justification - let finalized_block_hash = finality_effects.block; - let finalized_block_number = backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(finality_effects.block)) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - do_finalize_block( - client.clone(), - data, - finalized_block_hash, - finalized_block_number, - finality_effects.justification.encode(), - )?; - - // apply new authorities set - data.authority_set.update( - finality_effects.new_set_id, - finality_effects.new_authorities, - ); - - // store new authorities set - require_insert_aux( - &client, - LIGHT_AUTHORITY_SET_KEY, - &data.authority_set, - "authority set", - )?; - - Ok((finalized_block_hash, finalized_block_number)) -} - -/// Try to import justification. -fn do_import_justification( - client: C, - data: &mut LightImportData, - hash: Block::Hash, - number: NumberFor, - justification: Justification, -) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, - J: ProvableJustification, -{ - // with justification, we have two cases - // - // optimistic: the same GRANDPA authorities set has generated intermediate justification - // => justification is verified using current authorities set + we could proceed further - // - // pessimistic scenario: the GRANDPA authorities set has changed - // => we need to fetch new authorities set (i.e. finality proof) from remote node - - // first, try to behave optimistically - let authority_set_id = data.authority_set.set_id(); - let justification = J::decode_and_verify( - &justification, - authority_set_id, - &data.authority_set.authorities(), - ); - - // BadJustification error means that justification has been successfully decoded, but - // it isn't valid within current authority set - let justification = match justification { - Err(ClientError::BadJustification(_)) => { - trace!( - target: "afg", - "Justification for {} is not valid within current authorities set. Requesting finality proof.", - hash, - ); - - let mut imported_aux = ImportedAux::default(); - imported_aux.needs_finality_proof = true; - return Ok(ImportResult::Imported(imported_aux)); - }, - Err(e) => { - trace!( - target: "afg", - "Justification for {} is not valid. Bailing.", - hash, - ); - - return Err(ConsensusError::ClientImport(e.to_string())); - }, - Ok(justification) => { - trace!( - target: "afg", - "Justification for {} is valid. Finalizing the block.", - hash, - ); - - justification - }, - }; - - // finalize the block - do_finalize_block(client, data, hash, number, justification.encode()) -} - -/// Finalize the block. -fn do_finalize_block( - client: C, - data: &mut LightImportData, - hash: Block::Hash, - number: NumberFor, - justification: Justification, -) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, -{ - // finalize the block - client.finalize_block(BlockId::Hash(hash), Some(justification), true).map_err(|e| { - warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); - ConsensusError::ClientImport(e.to_string()) - })?; - - // forget obsoleted consensus changes - let consensus_finalization_res = data.consensus_changes - .finalize( - (number, hash), - |at_height| canonical_at_height(&client, (hash, number), true, at_height) - ); - match consensus_finalization_res { - Ok((true, _)) => require_insert_aux( - &client, - LIGHT_CONSENSUS_CHANGES_KEY, - &data.consensus_changes, - "consensus changes", - )?, - Ok(_) => (), - Err(error) => return Err(on_post_finalization_error(error, "consensus changes")), - } - - // update last finalized block reference - data.last_finalized = hash; - - // we just finalized this block, so if we were importing it, it is now the new best - Ok(ImportResult::imported(true)) -} - -/// Load light import aux data from the store. -fn load_aux_import_data( - last_finalized: Block::Hash, - aux_store: &B, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, -) -> Result, ClientError> - where - B: AuxStore, - Block: BlockT, -{ - let authority_set = match load_decode(aux_store, LIGHT_AUTHORITY_SET_KEY)? { - Some(authority_set) => authority_set, - None => { - info!(target: "afg", "Loading GRANDPA authorities \ - from genesis on what appears to be first startup."); - - // no authority set on disk: fetch authorities from genesis state - let genesis_authorities = genesis_authorities_provider.get()?; - - let authority_set = LightAuthoritySet::genesis(genesis_authorities); - let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_AUTHORITY_SET_KEY, &encoded[..])], &[])?; - - authority_set - }, - }; - - let consensus_changes = match load_decode(aux_store, LIGHT_CONSENSUS_CHANGES_KEY)? { - Some(consensus_changes) => consensus_changes, - None => { - let consensus_changes = ConsensusChanges::>::empty(); - - let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_CONSENSUS_CHANGES_KEY, &encoded[..])], &[])?; - - consensus_changes - }, - }; - - Ok(LightImportData { - last_finalized, - authority_set, - consensus_changes, - }) -} - -/// Insert into aux store. If failed, return error && show inconsistency warning. -fn require_insert_aux( - store: &A, - key: &[u8], - value: &T, - value_type: &str, -) -> Result<(), ConsensusError> { - let encoded = value.encode(); - let update_res = store.insert_aux(&[(key, &encoded[..])], &[]); - if let Err(error) = update_res { - return Err(on_post_finalization_error(error, value_type)); - } - - Ok(()) -} - -/// Display inconsistency warning. -fn on_post_finalization_error(error: ClientError, value_type: &str) -> ConsensusError { - warn!(target: "afg", "Failed to write updated {} to disk. Bailing.", value_type); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - ConsensusError::ClientImport(error.to_string()) -} - -#[cfg(test)] -pub mod tests { - use super::*; - use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; - use sp_finality_grandpa::AuthorityId; - use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof, BlockBackend}; - use substrate_test_runtime_client::runtime::{Block, Header}; - use crate::tests::TestApi; - use crate::finality_proof::{ - FinalityProofFragment, - tests::{TestJustification, ClosureAuthoritySetForFinalityChecker}, - }; - - struct OkVerifier; - - impl Verifier for OkVerifier { - fn verify( - &mut self, - origin: BlockOrigin, - header: Header, - _justification: Option, - _body: Option::Extrinsic>>, - ) -> Result<(BlockImportParams, Option)>>), String> { - Ok((BlockImportParams::new(origin, header), None)) - } - } - - pub struct NoJustificationsImport( - pub GrandpaLightBlockImport - ); - - impl Clone - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - { - fn clone(&self) -> Self { - NoJustificationsImport(self.0.clone()) - } - } - - impl BlockImport - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for <'a > &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, - GrandpaLightBlockImport: - BlockImport, Error = ConsensusError> - { - type Error = ConsensusError; - type Transaction = TransactionFor; - - fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - block.justification.take(); - self.0.import_block(block, new_cache) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.0.check_block(block) - } - } - - impl FinalityProofImport - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - BE: Backend + 'static, - DigestFor: Encode, - for <'a > &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, - { - type Error = ConsensusError; - - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { - self.0.on_start() - } - - fn import_finality_proof( - &mut self, - hash: Block::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(Block::Hash, NumberFor), Self::Error> { - self.0.import_finality_proof(hash, number, finality_proof, verifier) - } - } - - /// Creates light block import that ignores justifications that came outside of finality proofs. - pub fn light_block_import_without_justifications( - client: Arc, - backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - authority_set_provider: Arc>, - ) -> Result, ClientError> - where - BE: Backend + 'static, - Client: crate::ClientForGrandpa, - { - light_block_import(client, backend, genesis_authorities_provider, authority_set_provider) - .map(NoJustificationsImport) - } - - fn import_block( - new_cache: HashMap>, - justification: Option, - ) -> ( - ImportResult, - substrate_test_runtime_client::client::Client, - Arc, - ) { - let (client, backend) = substrate_test_runtime_client::new_light(); - let mut import_data = LightImportData { - last_finalized: Default::default(), - authority_set: LightAuthoritySet::genesis(vec![(AuthorityId::from_slice(&[1; 32]), 1)]), - consensus_changes: ConsensusChanges::empty(), - }; - let mut block = BlockImportParams::new( - BlockOrigin::Own, - Header { - number: 1, - parent_hash: client.chain_info().best_hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }, - ); - block.justification = justification; - block.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - ( - do_import_block::<_, _, _, TestJustification>( - &client, - &mut import_data, - block, - new_cache, - ).unwrap(), - client, - backend, - ) - } - - #[test] - fn finality_proof_not_required_when_consensus_data_does_not_changes_and_no_justification_provided() { - assert_eq!(import_block(HashMap::new(), None).0, ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_not_required_when_consensus_data_does_not_changes_and_correct_justification_provided() { - let justification = TestJustification((0, vec![(AuthorityId::from_slice(&[1; 32]), 1)]), Vec::new()).encode(); - assert_eq!(import_block(HashMap::new(), Some(justification)).0, ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_required_when_consensus_data_changes_and_no_justification_provided() { - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId::from_slice(&[2; 32])].encode()); - assert_eq!(import_block(cache, None).0, ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: true, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_required_when_consensus_data_changes_and_incorrect_justification_provided() { - let justification = TestJustification((0, vec![]), Vec::new()).encode(); - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId::from_slice(&[2; 32])].encode()); - assert_eq!( - import_block(cache, Some(justification)).0, - ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: true, - is_new_best: false, - header_only: false, - }, - )); - } - - - #[test] - fn aux_data_updated_on_start() { - let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - - // when aux store is empty initially - assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_none()); - assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_none()); - - // it is updated on importer start - load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); - assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_some()); - assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_some()); - } - - #[test] - fn aux_data_loaded_on_restart() { - let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - - // when aux store is non-empty initially - let mut consensus_changes = ConsensusChanges::::empty(); - consensus_changes.note_change((42, Default::default())); - aux_store.insert_aux( - &[ - ( - LIGHT_AUTHORITY_SET_KEY, - LightAuthoritySet::genesis( - vec![(AuthorityId::from_slice(&[42; 32]), 2)] - ).encode().as_slice(), - ), - ( - LIGHT_CONSENSUS_CHANGES_KEY, - consensus_changes.encode().as_slice(), - ), - ], - &[], - ).unwrap(); - - // importer uses it on start - let data = load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); - assert_eq!(data.authority_set.authorities(), vec![(AuthorityId::from_slice(&[42; 32]), 2)]); - assert_eq!(data.consensus_changes.pending_changes(), &[(42, Default::default())]); - } - - #[test] - fn authority_set_is_updated_on_finality_proof_import() { - let initial_set_id = 0; - let initial_set = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; - let updated_set = vec![(AuthorityId::from_slice(&[2; 32]), 2)]; - let babe_set_signal = vec![AuthorityId::from_slice(&[42; 32])].encode(); - - // import block #1 without justification - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, babe_set_signal); - let (_, client, backend) = import_block(cache, None); - - // import finality proof for block #1 - let hash = client.block_hash(1).unwrap().unwrap(); - let mut verifier = OkVerifier; - let mut import_data = LightImportData { - last_finalized: Default::default(), - authority_set: LightAuthoritySet::genesis(initial_set.clone()), - consensus_changes: ConsensusChanges::empty(), - }; - - // import finality proof - do_import_finality_proof::<_, _, _, TestJustification>( - &client, - backend, - &ClosureAuthoritySetForFinalityChecker( - |_, _, _| Ok(updated_set.clone()) - ), - &mut import_data, - Default::default(), - Default::default(), - vec![ - FinalityProofFragment::
{ - block: hash, - justification: TestJustification( - (initial_set_id, initial_set.clone()), - Vec::new(), - ).encode(), - unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![])), - }, - ].encode(), - &mut verifier, - ).unwrap(); - - // verify that new authorities set has been saved to the aux storage - let data = load_aux_import_data(Default::default(), &client, &TestApi::new(initial_set)).unwrap(); - assert_eq!(data.authority_set.authorities(), updated_set); - } -} diff --git a/client/finality-grandpa/src/notification.rs b/client/finality-grandpa/src/notification.rs index 8415583051902..85d581bd5065e 100644 --- a/client/finality-grandpa/src/notification.rs +++ b/client/finality-grandpa/src/notification.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,14 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::sync::Arc; use parking_lot::Mutex; +use std::sync::Arc; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use crate::justification::GrandpaJustification; -use crate::Error; +use crate::{justification::GrandpaJustification, Error}; // Stream of justifications returned when subscribing. type JustificationStream = TracingUnboundedReceiver>; @@ -41,16 +40,14 @@ type SharedJustificationSenders = Arc { - subscribers: SharedJustificationSenders + subscribers: SharedJustificationSenders, } impl GrandpaJustificationSender { /// The `subscribers` should be shared with a corresponding /// `GrandpaJustificationStream`. fn new(subscribers: SharedJustificationSenders) -> Self { - Self { - subscribers, - } + Self { subscribers } } /// Send out a notification to all subscribers that a new justification @@ -83,7 +80,7 @@ impl GrandpaJustificationSender { /// so it can be used to add more subscriptions. #[derive(Clone)] pub struct GrandpaJustificationStream { - subscribers: SharedJustificationSenders + subscribers: SharedJustificationSenders, } impl GrandpaJustificationStream { @@ -100,9 +97,7 @@ impl GrandpaJustificationStream { /// The `subscribers` should be shared with a corresponding /// `GrandpaJustificationSender`. fn new(subscribers: SharedJustificationSenders) -> Self { - Self { - subscribers, - } + Self { subscribers } } /// Subscribe to a channel through which justifications are sent diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index fd00b35c40a73..70a94cd504726 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,33 +16,35 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; +use std::{ + marker::{PhantomData, Unpin}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use finality_grandpa::{voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError}; use futures::prelude::*; - -use finality_grandpa::{ - BlockNumberOps, Error as GrandpaError, voter, voter_set::VoterSet -}; use log::{debug, info, warn}; -use sp_keystore::SyncCryptoStorePtr; -use sp_consensus::SelectChain; + use sc_client_api::backend::Backend; -use sp_utils::mpsc::TracingUnboundedReceiver; -use sp_runtime::traits::{NumberFor, Block as BlockT}; +use sc_telemetry::TelemetryHandle; +use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; +use sp_consensus::SelectChain; +use sp_finality_grandpa::AuthorityId; +use sp_keystore::SyncCryptoStorePtr; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use crate::{ - global_communication, CommandOrError, CommunicationIn, Config, environment, - LinkHalf, Error, aux_schema::PersistentData, VoterCommand, VoterSetState, + authorities::SharedAuthoritySet, + aux_schema::PersistentData, + communication::{Network as NetworkT, NetworkBridge}, + environment, global_communication, + notification::GrandpaJustificationSender, + ClientForGrandpa, CommandOrError, CommunicationIn, Config, Error, LinkHalf, VoterCommand, + VoterSetState, }; -use crate::authorities::SharedAuthoritySet; -use crate::communication::{Network as NetworkT, NetworkBridge}; -use crate::consensus_changes::SharedConsensusChanges; -use crate::notification::GrandpaJustificationSender; -use sp_finality_grandpa::AuthorityId; -use std::marker::{PhantomData, Unpin}; struct ObserverChain<'a, Block: BlockT, Client> { client: &'a Arc, @@ -50,40 +52,39 @@ struct ObserverChain<'a, Block: BlockT, Client> { } impl<'a, Block, Client> finality_grandpa::Chain> - for ObserverChain<'a, Block, Client> where - Block: BlockT, - Client: HeaderMetadata, - NumberFor: BlockNumberOps, + for ObserverChain<'a, Block, Client> +where + Block: BlockT, + Client: HeaderMetadata, + NumberFor: BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { environment::ancestry(&self.client, base, block) } - - fn best_chain_containing(&self, _block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - // only used by voter - None - } } fn grandpa_observer( client: &Arc, authority_set: &SharedAuthoritySet>, - consensus_changes: &SharedConsensusChanges>, voters: &Arc>, justification_sender: &Option>, last_finalized_number: NumberFor, commits: S, note_round: F, + telemetry: Option, ) -> impl Future>>> where NumberFor: BlockNumberOps, S: Stream, CommandOrError>>>, F: Fn(u64), BE: Backend, - Client: crate::ClientForGrandpa, + Client: ClientForGrandpa, { let authority_set = authority_set.clone(); - let consensus_changes = consensus_changes.clone(); let client = client.clone(); let voters = voters.clone(); let justification_sender = justification_sender.clone(); @@ -96,14 +97,14 @@ where }, voter::CommunicationIn::CatchUp(..) => { // ignore catch up messages - return future::ok(last_finalized_number); + return future::ok(last_finalized_number) }, }; // if the commit we've received targets a block lower or equal to the last // finalized, ignore it and continue with the current state if commit.target_number <= last_finalized_number { - return future::ok(last_finalized_number); + return future::ok(last_finalized_number) } let validation_result = match finality_grandpa::validate_commit( @@ -123,13 +124,13 @@ where match environment::finalize_block( client.clone(), &authority_set, - &consensus_changes, None, finalized_hash, finalized_number, (round, commit).into(), false, justification_sender.as_ref(), + telemetry.clone(), ) { Ok(_) => {}, Err(e) => return future::err(e), @@ -162,25 +163,24 @@ where /// already been instantiated with `block_import`. /// NOTE: this is currently not part of the crate's public API since we don't consider /// it stable enough to use on a live network. -#[allow(unused)] pub fn run_grandpa_observer( config: Config, link: LinkHalf, network: N, -) -> sp_blockchain::Result + Unpin + Send + 'static> +) -> sp_blockchain::Result + Send> where BE: Backend + Unpin + 'static, - N: NetworkT + Send + Clone + 'static, - SC: SelectChain + 'static, + N: NetworkT, + SC: SelectChain, NumberFor: BlockNumberOps, - Client: crate::ClientForGrandpa + 'static, + Client: ClientForGrandpa + 'static, { let LinkHalf { client, - select_chain: _, persistent_data, voter_commands_rx, justification_sender, + telemetry, .. } = link; @@ -189,22 +189,22 @@ where config.clone(), persistent_data.set_state.clone(), None, + telemetry.clone(), ); let observer_work = ObserverWork::new( - client, + client.clone(), network, persistent_data, config.keystore, voter_commands_rx, Some(justification_sender), + telemetry.clone(), ); - let observer_work = observer_work - .map_ok(|_| ()) - .map_err(|e| { - warn!("GRANDPA Observer failed: {:?}", e); - }); + let observer_work = observer_work.map_ok(|_| ()).map_err(|e| { + warn!("GRANDPA Observer failed: {:?}", e); + }); Ok(observer_work.map(drop)) } @@ -212,13 +212,15 @@ where /// Future that powers the observer. #[must_use] struct ObserverWork> { - observer: Pin>>> + Send>>, + observer: + Pin>>> + Send>>, client: Arc, network: NetworkBridge, persistent_data: PersistentData, keystore: Option, voter_commands_rx: TracingUnboundedReceiver>>, justification_sender: Option>, + telemetry: Option, _phantom: PhantomData, } @@ -226,7 +228,7 @@ impl ObserverWork where B: BlockT, BE: Backend + 'static, - Client: crate::ClientForGrandpa + 'static, + Client: ClientForGrandpa + 'static, Network: NetworkT, NumberFor: BlockNumberOps, { @@ -237,8 +239,8 @@ where keystore: Option, voter_commands_rx: TracingUnboundedReceiver>>, justification_sender: Option>, + telemetry: Option, ) -> Self { - let mut work = ObserverWork { // `observer` is set to a temporary value and replaced below when // calling `rebuild_observer`. @@ -249,6 +251,7 @@ where keystore: keystore.clone(), voter_commands_rx, justification_sender, + telemetry, _phantom: PhantomData, }; work.rebuild_observer(); @@ -282,23 +285,25 @@ where let network = self.network.clone(); let voters = voters.clone(); - move |round| network.note_round( - crate::communication::Round(round), - crate::communication::SetId(set_id), - &*voters, - ) + move |round| { + network.note_round( + crate::communication::Round(round), + crate::communication::SetId(set_id), + &*voters, + ) + } }; // create observer for the current set let observer = grandpa_observer( &self.client, &self.persistent_data.authority_set, - &self.persistent_data.consensus_changes, &voters, &self.justification_sender, last_finalized_number, global_in, note_round, + self.telemetry.clone(), ); self.observer = Box::pin(observer); @@ -326,7 +331,7 @@ where // set changed (not where the signal happened!) as the base. let set_state = VoterSetState::live( new.set_id, - &*self.persistent_data.authority_set.inner().read(), + &*self.persistent_data.authority_set.inner(), (new.canon_hash, new.canon_number), ); @@ -334,7 +339,8 @@ where set_state }, - }.into(); + } + .into(); self.rebuild_observer(); Ok(()) @@ -345,7 +351,7 @@ impl Future for ObserverWork where B: BlockT, BE: Backend + Unpin + 'static, - C: crate::ClientForGrandpa + 'static, + C: ClientForGrandpa + 'static, N: NetworkT, NumberFor: BlockNumberOps, { @@ -353,33 +359,34 @@ where fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { match Future::poll(Pin::new(&mut self.observer), cx) { - Poll::Pending => {} + Poll::Pending => {}, Poll::Ready(Ok(())) => { - // observer commit stream doesn't conclude naturally; this could reasonably be an error. + // observer commit stream doesn't conclude naturally; this could reasonably be an + // error. return Poll::Ready(Ok(())) - } + }, Poll::Ready(Err(CommandOrError::Error(e))) => { // return inner observer error return Poll::Ready(Err(e)) - } + }, Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { // some command issued internally self.handle_voter_command(command)?; cx.waker().wake_by_ref(); - } + }, } match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { - Poll::Pending => {} + Poll::Pending => {}, Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. return Poll::Ready(Ok(())) - } + }, Poll::Ready(Some(command)) => { // some command issued externally self.handle_voter_command(command)?; cx.waker().wake_by_ref(); - } + }, } Future::poll(Pin::new(&mut self.network), cx) @@ -390,12 +397,15 @@ where mod tests { use super::*; + use crate::{ + aux_schema, + communication::tests::{make_test_network, Event}, + }; use assert_matches::assert_matches; - use sp_utils::mpsc::tracing_unbounded; - use crate::{aux_schema, communication::tests::{Event, make_test_network}}; - use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; use sc_network::PeerId; + use sc_utils::mpsc::tracing_unbounded; use sp_blockchain::HeaderBackend as _; + use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; use futures::executor; @@ -423,12 +433,9 @@ mod tests { let voters = vec![(sp_keyring::Ed25519Keyring::Alice.public().into(), 1)]; - let persistent_data = aux_schema::load_persistent( - &*backend, - client.info().genesis_hash, - 0, - || Ok(voters), - ).unwrap(); + let persistent_data = + aux_schema::load_persistent(&*backend, client.info().genesis_hash, 0, || Ok(voters)) + .unwrap(); let (_tx, voter_command_rx) = tracing_unbounded(""); @@ -439,6 +446,7 @@ mod tests { None, voter_command_rx, None, + None, ); // Trigger a reputation change through the gossip validator. diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index c9d9f717cdcec..1aef7cd1b017a 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,37 +21,40 @@ use super::*; use assert_matches::assert_matches; use environment::HasVoted; +use futures::executor::block_on; +use futures_timer::Delay; +use parking_lot::{Mutex, RwLock}; +use sc_consensus::{ + BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, + ImportedAux, +}; +use sc_network::config::{ProtocolConfig, Role}; use sc_network_test::{ - Block, BlockImportAdapter, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, - TestClient, TestNetFactory, FullPeerConfig, + Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient, + PeersFullClient, TestClient, TestNetFactory, }; -use sc_network::config::{ProtocolConfig, BoxFinalityProofRequestBuilder}; -use parking_lot::{RwLock, Mutex}; -use futures_timer::Delay; -use tokio::runtime::{Runtime, Handle}; -use sp_keyring::Ed25519Keyring; -use sc_client_api::backend::TransactionFor; +use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_blockchain::Result; -use sp_api::{ApiRef, StorageProof, ProvideRuntimeApi}; -use substrate_test_runtime_client::runtime::BlockNumber; -use sp_consensus::{ - BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, - import_queue::{BoxJustificationImport, BoxFinalityProofImport}, +use sp_consensus::BlockOrigin; +use sp_core::H256; +use sp_finality_grandpa::{ + AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof, GRANDPA_ENGINE_ID, +}; +use sp_keyring::Ed25519Keyring; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::{BlockId, DigestItem}, + traits::{Block as BlockT, Header as HeaderT}, + Justifications, }; -use std::{collections::{HashMap, HashSet}, pin::Pin}; -use parity_scale_codec::Decode; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HashFor}; -use sp_runtime::generic::{BlockId, DigestItem}; -use sp_core::{H256, crypto::Public}; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof}; -use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check}; +use std::{ + collections::{HashMap, HashSet}, + pin::Pin, +}; +use substrate_test_runtime_client::runtime::BlockNumber; +use tokio::runtime::{Handle, Runtime}; use authorities::AuthoritySet; -use finality_proof::{ - FinalityProofProvider, AuthoritySetForFinalityProver, AuthoritySetForFinalityChecker, -}; -use consensus_changes::ConsensusChanges; use sc_block_builder::BlockBuilderProvider; use sc_consensus::LongestChain; use sc_keystore::LocalKeystore; @@ -60,7 +63,13 @@ use sp_application_crypto::key_types::GRANDPA; type TestLinkHalf = LinkHalf>; type PeerData = Mutex>; -type GrandpaPeer = Peer; +type GrandpaPeer = Peer; +type GrandpaBlockImport = crate::GrandpaBlockImport< + substrate_test_runtime_client::Backend, + Block, + PeersFullClient, + LongestChain, +>; struct GrandpaTestNet { peers: Vec, @@ -68,28 +77,40 @@ struct GrandpaTestNet { } impl GrandpaTestNet { - fn new(test_config: TestApi, n_peers: usize) -> Self { - let mut net = GrandpaTestNet { - peers: Vec::with_capacity(n_peers), - test_config, - }; - for _ in 0..n_peers { + fn new(test_config: TestApi, n_authority: usize, n_full: usize) -> Self { + let mut net = + GrandpaTestNet { peers: Vec::with_capacity(n_authority + n_full), test_config }; + + for _ in 0..n_authority { + net.add_authority_peer(); + } + + for _ in 0..n_full { net.add_full_peer(); } + net } } +impl GrandpaTestNet { + fn add_authority_peer(&mut self) { + self.add_full_peer_with_config(FullPeerConfig { + notifications_protocols: vec![communication::GRANDPA_PROTOCOL_NAME.into()], + is_authority: true, + ..Default::default() + }) + } +} + impl TestNetFactory for GrandpaTestNet { type Verifier = PassThroughVerifier; type PeerData = PeerData; + type BlockImport = GrandpaBlockImport; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { - GrandpaTestNet { - peers: Vec::new(), - test_config: Default::default(), - } + GrandpaTestNet { peers: Vec::new(), test_config: Default::default() } } fn default_config() -> ProtocolConfig { @@ -99,9 +120,8 @@ impl TestNetFactory for GrandpaTestNet { fn add_full_peer(&mut self) { self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![ - (communication::GRANDPA_ENGINE_ID, communication::GRANDPA_PROTOCOL_NAME.into()) - ], + notifications_protocols: vec![communication::GRANDPA_PROTOCOL_NAME.into()], + is_authority: false, ..Default::default() }) } @@ -115,68 +135,32 @@ impl TestNetFactory for GrandpaTestNet { PassThroughVerifier::new(false) // use non-instant finality. } - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - PeerData, - ) - { + fn make_block_import( + &self, + client: PeersClient, + ) -> (BlockImportAdapter, Option>, PeerData) { match client { PeersClient::Full(ref client, ref backend) => { let (import, link) = block_import( client.clone(), &self.test_config, LongestChain::new(backend.clone()), - ).expect("Could not create block import for fresh peer."); + None, + ) + .expect("Could not create block import for fresh peer."); let justification_import = Box::new(import.clone()); ( - BlockImportAdapter::new_full(import), + BlockImportAdapter::new(import), Some(justification_import), - None, - None, Mutex::new(Some(link)), ) }, - PeersClient::Light(ref client, ref backend) => { - use crate::light_import::tests::light_block_import_without_justifications; - - let authorities_provider = Arc::new(self.test_config.clone()); - // forbid direct finalization using justification that came with the block - // => light clients will try to fetch finality proofs - let import = light_block_import_without_justifications( - client.clone(), - backend.clone(), - &self.test_config, - authorities_provider, - ).expect("Could not create block import for fresh peer."); - let finality_proof_req_builder = import.0.create_finality_proof_request_builder(); - let proof_import = Box::new(import.clone()); - ( - BlockImportAdapter::new_light(import), - None, - Some(proof_import), - Some(finality_proof_req_builder), - Mutex::new(None), - ) + PeersClient::Light(..) => { + panic!("Light client is not used in tests."); }, } } - fn make_finality_proof_provider( - &self, - client: PeersClient - ) -> Option>> { - match client { - PeersClient::Full(_, ref backend) => { - Some(Arc::new(FinalityProofProvider::new(backend.clone(), self.test_config.clone()))) - }, - PeersClient::Light(_, _) => None, - } - } - fn peer(&mut self, i: usize) -> &mut GrandpaPeer { &mut self.peers[i] } @@ -197,9 +181,7 @@ pub(crate) struct TestApi { impl TestApi { pub fn new(genesis_authorities: AuthorityList) -> Self { - TestApi { - genesis_authorities, - } + TestApi { genesis_authorities } } } @@ -217,12 +199,14 @@ impl ProvideRuntimeApi for TestApi { sp_api::mock_impl_runtime_apis! { impl GrandpaApi for RuntimeApi { - type Error = sp_blockchain::Error; - fn grandpa_authorities(&self) -> AuthorityList { self.inner.genesis_authorities.clone() } + fn current_set_id(&self) -> SetId { + 0 + } + fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: EquivocationProof, _key_owner_proof: OpaqueKeyOwnershipProof, @@ -245,43 +229,6 @@ impl GenesisAuthoritySetProvider for TestApi { } } -impl AuthoritySetForFinalityProver for TestApi { - fn authorities(&self, _block: &BlockId) -> Result { - Ok(self.genesis_authorities.clone()) - } - - fn prove_authorities(&self, block: &BlockId) -> Result { - let authorities = self.authorities(block)?; - let backend = >>::from(vec![ - (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) - ]); - let proof = prove_read(backend, vec![b"authorities"]) - .expect("failure proving read from in-memory storage backend"); - Ok(proof) - } -} - -impl AuthoritySetForFinalityChecker for TestApi { - fn check_authorities_proof( - &self, - _hash: ::Hash, - header: ::Header, - proof: StorageProof, - ) -> Result { - let results = read_proof_check::, _>( - *header.state_root(), proof, vec![b"authorities"] - ) - .expect("failure checking read proof for authorities"); - let encoded = results.get(&b"authorities"[..]) - .expect("returned map must contain all proof keys") - .as_ref() - .expect("authorities in proof is None"); - let authorities = Decode::decode(&mut &encoded[..]) - .expect("failure decoding authorities read from proof"); - Ok(authorities) - } -} - const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { @@ -290,97 +237,109 @@ fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { fn create_keystore(authority: Ed25519Keyring) -> (SyncCryptoStorePtr, tempfile::TempDir) { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); SyncCryptoStore::ed25519_generate_new(&*keystore, GRANDPA, Some(&authority.to_seed())) .expect("Creates authority key"); (keystore, keystore_path) } -fn block_until_complete(future: impl Future + Unpin, net: &Arc>, runtime: &mut Runtime) { +fn block_until_complete( + future: impl Future + Unpin, + net: &Arc>, + runtime: &mut Runtime, +) { let drive_to_completion = futures::future::poll_fn(|cx| { - net.lock().poll(cx); Poll::<()>::Pending + net.lock().poll(cx); + Poll::<()>::Pending }); - runtime.block_on( - future::select(future, drive_to_completion) - ); + runtime.block_on(future::select(future, drive_to_completion)); } -// run the voters to completion. provide a closure to be invoked after -// the voters are spawned but before blocking on them. -fn run_to_completion_with( - runtime: &mut Runtime, - blocks: u64, - net: Arc>, +// Spawns grandpa voters. Returns a future to spawn on the runtime. +fn initialize_grandpa( + net: &mut GrandpaTestNet, peers: &[Ed25519Keyring], - with: F, -) -> u64 where - F: FnOnce(Handle) -> Option>>> -{ - let mut wait_for = Vec::new(); - - let highest_finalized = Arc::new(RwLock::new(0)); +) -> impl Future { + let voters = stream::FuturesUnordered::new(); - if let Some(f) = (with)(runtime.handle().clone()) { - wait_for.push(f); - }; - - let mut keystore_paths = Vec::new(); for (peer_id, key) in peers.iter().enumerate() { - let (keystore, keystore_path) = create_keystore(*key); - keystore_paths.push(keystore_path); + let (keystore, _) = create_keystore(*key); - let highest_finalized = highest_finalized.clone(); - let (client, net_service, link) = { - let net = net.lock(); + let (net_service, link) = { // temporary needed for some reason - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) + let link = + net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[peer_id].network_service().clone(), link) }; - wait_for.push( - Box::pin( - client.finality_notification_stream() - .take_while(move |n| { - let mut highest_finalized = highest_finalized.write(); - if *n.header.number() > *highest_finalized { - *highest_finalized = *n.header.number(); - } - future::ready(n.header.number() < &blocks) - }) - .collect::>() - .map(|_| ()) - ) - ); - - fn assert_send(_: &T) { } - let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, keystore: Some(keystore), name: Some(format!("peer#{}", peer_id)), - is_authority: true, + local_role: Role::Authority, observer_enabled: true, + telemetry: None, }, - link: link, + link, network: net_service, - telemetry_on_connect: None, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), + telemetry: None, }; - let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); + let voter = + run_grandpa_voter(grandpa_params).expect("all in order with client and network"); + fn assert_send(_: &T) {} assert_send(&voter); - runtime.spawn(voter); + voters.push(voter); + } + + voters.for_each(|_| async move {}) +} + +// run the voters to completion. provide a closure to be invoked after +// the voters are spawned but before blocking on them. +fn run_to_completion_with( + runtime: &mut Runtime, + blocks: u64, + net: Arc>, + peers: &[Ed25519Keyring], + with: F, +) -> u64 +where + F: FnOnce(Handle) -> Option>>>, +{ + let mut wait_for = Vec::new(); + + let highest_finalized = Arc::new(RwLock::new(0)); + + if let Some(f) = (with)(runtime.handle().clone()) { + wait_for.push(f); + }; + + for (peer_id, _) in peers.iter().enumerate() { + let highest_finalized = highest_finalized.clone(); + let client = net.lock().peers[peer_id].client().clone(); + + wait_for.push(Box::pin( + client + .finality_notification_stream() + .take_while(move |n| { + let mut highest_finalized = highest_finalized.write(); + if *n.header.number() > *highest_finalized { + *highest_finalized = *n.header.number(); + } + future::ready(n.header.number() < &blocks) + }) + .collect::>() + .map(|_| ()), + )); } // wait for all finalized on each. @@ -395,7 +354,7 @@ fn run_to_completion( runtime: &mut Runtime, blocks: u64, net: Arc>, - peers: &[Ed25519Keyring] + peers: &[Ed25519Keyring], ) -> u64 { run_to_completion_with(runtime, blocks, net, peers, |_| None) } @@ -425,13 +384,13 @@ fn finalize_3_voters_no_observers() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0); + runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(20, false); net.block_until_sync(); for i in 0..3 { - assert_eq!(net.peer(i).client().info().best_number, 20, - "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_number, 20, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); @@ -439,7 +398,12 @@ fn finalize_3_voters_no_observers() { // normally there's no justification for finalized blocks assert!( - net.lock().peer(0).client().justification(&BlockId::Number(20)).unwrap().is_none(), + net.lock() + .peer(0) + .client() + .justifications(&BlockId::Number(20)) + .unwrap() + .is_none(), "Extra justification for block#1", ); } @@ -451,116 +415,135 @@ fn finalize_3_voters_1_full_observer() { let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - let net = Arc::new(Mutex::new(net)); - let mut finality_notifications = Vec::new(); - - let all_peers = peers.iter() - .cloned() - .map(Some) - .chain(std::iter::once(None)); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1); + runtime.spawn(initialize_grandpa(&mut net, peers)); - let mut keystore_paths = Vec::new(); - - let mut voters = Vec::new(); - - for (peer_id, local_key) in all_peers.enumerate() { - let (client, net_service, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; - finality_notifications.push( - client.finality_notification_stream() - .take_while(|n| future::ready(n.header.number() < &20)) - .for_each(move |_| future::ready(())) - ); - - let keystore = if let Some(local_key) = local_key { - let (keystore, keystore_path) = create_keystore(local_key); - keystore_paths.push(keystore_path); - Some(keystore) - } else { - None - }; + runtime.spawn({ + let peer_id = 3; + let net_service = net.peers[peer_id].network_service().clone(); + let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, - keystore, + keystore: None, name: Some(format!("peer#{}", peer_id)), - is_authority: true, + local_role: Role::Authority, observer_enabled: true, + telemetry: None, }, - link: link, + link, network: net_service, - telemetry_on_connect: None, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), + telemetry: None, }; - voters.push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); - } + run_grandpa_voter(grandpa_params).expect("all in order with client and network") + }); - for voter in voters { - runtime.spawn(voter); + net.peer(0).push_blocks(20, false); + + let net = Arc::new(Mutex::new(net)); + let mut finality_notifications = Vec::new(); + + for peer_id in 0..4 { + let client = net.lock().peers[peer_id].client().clone(); + finality_notifications.push( + client + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &20)) + .for_each(move |_| future::ready(())), + ); } // wait for all finalized on each. - let wait_for = futures::future::join_all(finality_notifications) - .map(|_| ()); + let wait_for = futures::future::join_all(finality_notifications).map(|_| ()); block_until_complete(wait_for, &net, &mut runtime); + + // all peers should have stored the justification for the best finalized block #20 + for peer_id in 0..4 { + let client = net.lock().peers[peer_id].client().as_full().unwrap(); + let justification = + crate::aux_schema::best_justification::<_, Block>(&*client).unwrap().unwrap(); + + assert_eq!(justification.commit.target_number, 20); + } } #[test] fn transition_3_voters_twice_1_full_observer() { sp_tracing::try_init_simple(); - let peers_a = &[ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - ]; + let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let peers_b = &[ - Ed25519Keyring::Dave, - Ed25519Keyring::Eve, - Ed25519Keyring::Ferdie, - ]; + let peers_b = &[Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie]; - let peers_c = &[ - Ed25519Keyring::Alice, - Ed25519Keyring::Eve, - Ed25519Keyring::Two, - ]; + let peers_c = &[Ed25519Keyring::Alice, Ed25519Keyring::Eve, Ed25519Keyring::Two]; let observer = &[Ed25519Keyring::One]; + let all_peers = peers_a + .iter() + .chain(peers_b) + .chain(peers_c) + .chain(observer) + .cloned() + .collect::>(); // deduplicate + let genesis_voters = make_ids(peers_a); let api = TestApi::new(genesis_voters); - let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); + let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8, 1))); let mut runtime = Runtime::new().unwrap(); + let mut keystore_paths = Vec::new(); + let mut voters = Vec::new(); + for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() { + let (keystore, keystore_path) = create_keystore(local_key); + keystore_paths.push(keystore_path); + + let (net_service, link) = { + let net = net.lock(); + let link = + net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[peer_id].network_service().clone(), link) + }; + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", peer_id)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + }, + link, + network: net_service, + voting_rule: (), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + telemetry: None, + }; + + voters + .push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); + } + net.lock().peer(0).push_blocks(1, false); net.lock().block_until_sync(); for (i, peer) in net.lock().peers().iter().enumerate() { let full_client = peer.client().as_full().expect("only full clients are used in test"); - assert_eq!(full_client.chain_info().best_number, 1, - "Peer #{} failed to sync", i); + assert_eq!(full_client.chain_info().best_number, 1, "Peer #{} failed to sync", i); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); assert_eq!(set.pending_changes().count(), 0); @@ -572,7 +555,8 @@ fn transition_3_voters_twice_1_full_observer() { let peers_c = peers_c.clone(); // wait for blocks to be finalized before generating new ones - let block_production = client.finality_notification_stream() + let block_production = client + .finality_notification_stream() .take_while(|n| future::ready(n.header.number() < &30)) .for_each(move |n| { match n.header.number() { @@ -584,10 +568,10 @@ fn transition_3_voters_twice_1_full_observer() { // generate transition at block 15, applied at 20. net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 4, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 4 }, + ); block }); @@ -598,10 +582,10 @@ fn transition_3_voters_twice_1_full_observer() { // add more until we have 30. net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(&peers_c), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(&peers_c), delay: 0 }, + ); block }); @@ -617,62 +601,27 @@ fn transition_3_voters_twice_1_full_observer() { } let mut finality_notifications = Vec::new(); - let all_peers = peers_a.iter() - .chain(peers_b) - .chain(peers_c) - .chain(observer) - .cloned() - .collect::>() // deduplicate - .into_iter() - .enumerate(); - let mut keystore_paths = Vec::new(); - for (peer_id, local_key) in all_peers { - let (keystore, keystore_path) = create_keystore(local_key); - keystore_paths.push(keystore_path); - - let (client, net_service, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; + for voter in voters { + runtime.spawn(voter); + } + for (peer_id, _) in all_peers.into_iter().enumerate() { + let client = net.lock().peers[peer_id].client().clone(); finality_notifications.push( - client.finality_notification_stream() + client + .finality_notification_stream() .take_while(|n| future::ready(n.header.number() < &30)) .for_each(move |_| future::ready(())) .map(move |()| { let full_client = client.as_full().expect("only full clients are used in test"); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); assert_eq!(set.pending_changes().count(), 0); - }) + }), ); - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(keystore), - name: Some(format!("peer#{}", peer_id)), - is_authority: true, - observer_enabled: true, - }, - link: link, - network: net_service, - telemetry_on_connect: None, - voting_rule: (), - prometheus_registry: None, - shared_voter_state: SharedVoterState::empty(), - }; - let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); - - runtime.spawn(voter); } // wait for all finalized on each. @@ -681,31 +630,14 @@ fn transition_3_voters_twice_1_full_observer() { block_until_complete(wait_for, &net, &mut runtime); } -#[test] -fn justification_is_emitted_when_consensus_data_changes() { - let mut runtime = Runtime::new().unwrap(); - let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); - - // import block#1 WITH consensus data change - let new_authorities = vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]; - net.peer(0).push_authorities_change_block(new_authorities); - net.block_until_sync(); - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 1, net.clone(), peers); - - // ... and check that there's justification for block#1 - assert!(net.lock().peer(0).client().justification(&BlockId::Number(1)).unwrap().is_some(), - "Missing justification for block#1"); -} - #[test] fn justification_is_generated_periodically() { let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0); + runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(32, false); net.block_until_sync(); @@ -715,29 +647,16 @@ fn justification_is_generated_periodically() { // when block#32 (justification_period) is finalized, justification // is required => generated for i in 0..3 { - assert!(net.lock().peer(i).client().justification(&BlockId::Number(32)).unwrap().is_some()); + assert!(net + .lock() + .peer(i) + .client() + .justifications(&BlockId::Number(32)) + .unwrap() + .is_some()); } } -#[test] -fn consensus_changes_works() { - let mut changes = ConsensusChanges::::empty(); - - // pending changes are not finalized - changes.note_change((10, H256::from_low_u64_be(1))); - assert_eq!(changes.finalize((5, H256::from_low_u64_be(5)), |_| Ok(None)).unwrap(), (false, false)); - - // no change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1001)))).unwrap(), (true, false)); - - // change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1)))).unwrap(), (true, true)); -} - #[test] fn sync_justifications_on_change_blocks() { let mut runtime = Runtime::new().unwrap(); @@ -747,7 +666,8 @@ fn sync_justifications_on_change_blocks() { // 4 peers, 3 of them are authorities and participate in grandpa let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api, 4); + let mut net = GrandpaTestNet::new(api, 3, 1); + let voters = initialize_grandpa(&mut net, peers_a); // add 20 blocks net.peer(0).push_blocks(20, false); @@ -755,10 +675,10 @@ fn sync_justifications_on_change_blocks() { // at block 21 we do add a transition which is instant net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); block }); @@ -767,22 +687,35 @@ fn sync_justifications_on_change_blocks() { net.block_until_sync(); for i in 0..4 { - assert_eq!(net.peer(i).client().info().best_number, 25, - "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_number, 25, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); + runtime.spawn(voters); run_to_completion(&mut runtime, 25, net.clone(), peers_a); // the first 3 peers are grandpa voters and therefore have already finalized // block 21 and stored a justification for i in 0..3 { - assert!(net.lock().peer(i).client().justification(&BlockId::Number(21)).unwrap().is_some()); + assert!(net + .lock() + .peer(i) + .client() + .justifications(&BlockId::Number(21)) + .unwrap() + .is_some()); } // the last peer should get the justification by syncing from other peers futures::executor::block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(3).client().justification(&BlockId::Number(21)).unwrap().is_none() { + if net + .lock() + .peer(3) + .client() + .justifications(&BlockId::Number(21)) + .unwrap() + .is_none() + { net.lock().poll(cx); Poll::Pending } else { @@ -801,14 +734,21 @@ fn finalizes_multiple_pending_changes_in_order() { let peers_c = &[Ed25519Keyring::Dave, Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let all_peers = &[ - Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie, - Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie, + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + Ed25519Keyring::Dave, + Ed25519Keyring::Eve, + Ed25519Keyring::Ferdie, ]; let genesis_voters = make_ids(peers_a); // 6 peers, 3 of them are authorities and participate in grandpa from genesis + // but all of them will be part of the voter set eventually so they should be + // all added to the network as authorities let api = TestApi::new(genesis_voters); - let mut net = GrandpaTestNet::new(api, 6); + let mut net = GrandpaTestNet::new(api, 6, 0); + runtime.spawn(initialize_grandpa(&mut net, all_peers)); // add 20 blocks net.peer(0).push_blocks(20, false); @@ -816,10 +756,10 @@ fn finalizes_multiple_pending_changes_in_order() { // at block 21 we do add a transition which is instant net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); block }); @@ -829,10 +769,10 @@ fn finalizes_multiple_pending_changes_in_order() { // at block 26 we add another which is enacted at block 30 net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_c), - delay: 4, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_c), delay: 4 }, + ); block }); @@ -843,8 +783,7 @@ fn finalizes_multiple_pending_changes_in_order() { // all peers imported both change blocks for i in 0..6 { - assert_eq!(net.peer(i).client().info().best_number, 30, - "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_number, 30, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); @@ -867,23 +806,25 @@ fn force_change_to_new_set() { let api = TestApi::new(make_ids(genesis_authorities)); let voters = make_ids(peers_a); - let net = GrandpaTestNet::new(api, 3); + let mut net = GrandpaTestNet::new(api, 3, 0); + let voters_future = initialize_grandpa(&mut net, peers_a); let net = Arc::new(Mutex::new(net)); net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; // add a forced transition at block 12. - add_forced_change(&mut block, 0, ScheduledChange { - next_authorities: voters.clone(), - delay: 10, - }); + add_forced_change( + &mut block, + 0, + ScheduledChange { next_authorities: voters.clone(), delay: 10 }, + ); // add a normal transition too to ensure that forced changes take priority. - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(genesis_authorities), - delay: 5, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(genesis_authorities), delay: 5 }, + ); block }); @@ -892,11 +833,11 @@ fn force_change_to_new_set() { net.lock().block_until_sync(); for (i, peer) in net.lock().peers().iter().enumerate() { - assert_eq!(peer.client().info().best_number, 26, - "Peer #{} failed to sync", i); + assert_eq!(peer.client().info().best_number, 26, "Peer #{} failed to sync", i); let full_client = peer.client().as_full().expect("only full clients are used in test"); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); assert_eq!(set.current(), (1, voters.as_slice())); assert_eq!(set.pending_changes().count(), 0); @@ -905,6 +846,7 @@ fn force_change_to_new_set() { // it will only finalize if the forced transition happens. // we add_blocks after the voters are spawned because otherwise // the link-halves have the wrong AuthoritySet + runtime.spawn(voters_future); run_to_completion(&mut runtime, 25, net, peers_a); } @@ -914,22 +856,20 @@ fn allows_reimporting_change_blocks() { let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers_a); let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api.clone(), 3); + let mut net = GrandpaTestNet::new(api.clone(), 3, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >( - client.clone(), - ); + let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().unwrap(); - let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + let builder = full_client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); let block = || { let block = block.clone(); @@ -941,19 +881,18 @@ fn allows_reimporting_change_blocks() { }; assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: false, - needs_finality_proof: false, is_new_best: true, header_only: false, }), ); assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::AlreadyInChain ); } @@ -964,28 +903,26 @@ fn test_bad_justification() { let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers_a); let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api.clone(), 3); + let mut net = GrandpaTestNet::new(api.clone(), 3, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >( - client.clone(), - ); + let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().expect("only full clients are used in test"); - let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + let builder = full_client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); let block = || { let block = block.clone(); let mut import = BlockImportParams::new(BlockOrigin::File, block.header); - import.justification = Some(Vec::new()); + import.justifications = Some(Justifications::from((GRANDPA_ENGINE_ID, Vec::new()))); import.body = Some(block.extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); @@ -993,7 +930,7 @@ fn test_bad_justification() { }; assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, @@ -1004,19 +941,19 @@ fn test_bad_justification() { ); assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::AlreadyInChain ); } #[test] fn voter_persists_its_votes() { - use std::sync::atomic::{AtomicUsize, Ordering}; use futures::future; - use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; + use std::sync::atomic::{AtomicUsize, Ordering}; sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); + let mut keystore_paths = Vec::new(); // we have two authorities but we'll only be running the voter for alice // we are going to be listening for the prevotes it casts @@ -1024,153 +961,145 @@ fn voter_persists_its_votes() { let voters = make_ids(peers); // alice has a chain with 20 blocks - let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - assert_eq!(net.peer(0).client().info().best_number, 20, - "Peer #{} failed to sync", 0); - - - let peer = net.peer(0); - let client = peer.client().clone(); - let net = Arc::new(Mutex::new(net)); - - // channel between the voter and the main controller. - // sending a message on the `voter_tx` restarts the voter. - let (voter_tx, voter_rx) = tracing_unbounded::<()>(""); - - let mut keystore_paths = Vec::new(); - - // startup a grandpa voter for alice but also listen for messages on a - // channel. whenever a message is received the voter is restarted. when the - // sender is dropped the voter is stopped. - { - let (keystore, keystore_path) = create_keystore(peers[0]); - keystore_paths.push(keystore_path); - - struct ResettableVoter { - voter: Pin + Send + Unpin>>, - voter_rx: TracingUnboundedReceiver<()>, - net: Arc>, - client: PeersClient, - keystore: SyncCryptoStorePtr, - } - - impl Future for ResettableVoter { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = Pin::into_inner(self); - - if let Poll::Ready(()) = Pin::new(&mut this.voter).poll(cx) { - panic!("error in the voter"); - } - - match Pin::new(&mut this.voter_rx).poll_next(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(None) => return Poll::Ready(()), - Poll::Ready(Some(())) => { - let (_block_import, _, _, _, link) = - this.net.lock() - .make_block_import::< - TransactionFor - >(this.client.clone()); - let link = link.lock().take().unwrap(); - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(this.keystore.clone()), - name: Some(format!("peer#{}", 0)), - is_authority: true, - observer_enabled: true, - }, - link, - network: this.net.lock().peers[0].network_service().clone(), - telemetry_on_connect: None, - voting_rule: VotingRulesBuilder::default().build(), - prometheus_registry: None, - shared_voter_state: SharedVoterState::empty(), - }; - - let voter = run_grandpa_voter(grandpa_params) - .expect("all in order with client and network") - .map(move |r| { - // we need to keep the block_import alive since it owns the - // sender for the voter commands channel, if that gets dropped - // then the voter will stop - drop(_block_import); - r - }); - - this.voter = Box::pin(voter); - // notify current task in order to poll the voter - cx.waker().wake_by_ref(); - } - }; - - Poll::Pending - } - } - - // we create a "dummy" voter by setting it to `pending` and triggering the `tx`. - // this way, the `ResettableVoter` will reset its `voter` field to a value ASAP. - voter_tx.unbounded_send(()).unwrap(); - runtime.spawn(ResettableVoter { - voter: Box::pin(futures::future::pending()), - voter_rx, - net: net.clone(), - client: client.clone(), - keystore, - }); - } - - let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); + let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2, 0); // create the communication layer for bob, but don't start any // voter. instead we'll listen for the prevote that alice casts // and cast our own manually - { + let bob_keystore = { let (keystore, keystore_path) = create_keystore(peers[1]); keystore_paths.push(keystore_path); - + keystore + }; + let bob_network = { let config = Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, - keystore: Some(keystore.clone()), + keystore: Some(bob_keystore.clone()), name: Some(format!("peer#{}", 1)), - is_authority: true, + local_role: Role::Authority, observer_enabled: true, + telemetry: None, }; let set_state = { - let (_, _, _, _, link) = net.lock() - .make_block_import::< - TransactionFor - >(client); + let bob_client = net.peer(1).client().clone(); + let (_, _, link) = net.make_block_import(bob_client); let LinkHalf { persistent_data, .. } = link.lock().take().unwrap(); let PersistentData { set_state, .. } = persistent_data; set_state }; - let network = communication::NetworkBridge::new( - net.lock().peers[1].network_service().clone(), + communication::NetworkBridge::new( + net.peers[1].network_service().clone(), config.clone(), set_state, None, - ); + None, + ) + }; + + // spawn two voters for alice. + // half-way through the test, we stop one and start the other. + let (alice_voter1, abort) = future::abortable({ + let (keystore, _) = create_keystore(peers[0]); - let (round_rx, round_tx) = network.round_communication( - Some((peers[1].public().into(), keystore).into()), + let (net_service, link) = { + // temporary needed for some reason + let link = net.peers[0].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[0].network_service().clone(), link) + }; + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", 0)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + }, + link, + network: net_service, + voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + telemetry: None, + }; + + run_grandpa_voter(grandpa_params).expect("all in order with client and network") + }); + + fn alice_voter2( + peers: &[Ed25519Keyring], + net: Arc>, + ) -> impl Future + Send { + let (keystore, _) = create_keystore(peers[0]); + let mut net = net.lock(); + + // we add a new peer to the test network and we'll use + // the network service of this new peer + net.add_authority_peer(); + let net_service = net.peers[2].network_service().clone(); + // but we'll reuse the client from the first peer (alice_voter1) + // since we want to share the same database, so that we can + // read the persisted state after aborting alice_voter1. + let alice_client = net.peer(0).client().clone(); + + let (_block_import, _, link) = net.make_block_import(alice_client); + let link = link.lock().take().unwrap(); + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", 0)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + }, + link, + network: net_service, + voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + telemetry: None, + }; + + run_grandpa_voter(grandpa_params) + .expect("all in order with client and network") + .map(move |r| { + // we need to keep the block_import alive since it owns the + // sender for the voter commands channel, if that gets dropped + // then the voter will stop + drop(_block_import); + r + }) + } + + runtime.spawn(alice_voter1); + + net.peer(0).push_blocks(20, false); + net.block_until_sync(); + + assert_eq!(net.peer(0).client().info().best_number, 20, "Peer #{} failed to sync", 0); + + let net = Arc::new(Mutex::new(net)); + + let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); + + { + let (round_rx, round_tx) = bob_network.round_communication( + Some((peers[1].public().into(), bob_keystore).into()), communication::Round(1), communication::SetId(0), Arc::new(VoterSet::new(voters).unwrap()), HasVoted::No, ); - runtime.spawn(network); + runtime.spawn(bob_network); let round_tx = Arc::new(Mutex::new(round_tx)); let exit_tx = Arc::new(Mutex::new(Some(exit_tx))); @@ -1178,16 +1107,18 @@ fn voter_persists_its_votes() { let net = net.clone(); let state = Arc::new(AtomicUsize::new(0)); + let runtime_handle = runtime.handle().clone(); runtime.spawn(round_rx.for_each(move |signed| { let net2 = net.clone(); let net = net.clone(); - let voter_tx = voter_tx.clone(); + let abort = abort.clone(); let round_tx = round_tx.clone(); let state = state.clone(); let exit_tx = exit_tx.clone(); + let runtime_handle = runtime_handle.clone(); async move { - if state.compare_and_swap(0, 1, Ordering::SeqCst) == 0 { + if state.compare_exchange(0, 1, Ordering::SeqCst, Ordering::SeqCst).unwrap() == 0 { // the first message we receive should be a prevote from alice. let prevote = match signed.message { finality_grandpa::Message::Prevote(prevote) => prevote, @@ -1196,17 +1127,18 @@ fn voter_persists_its_votes() { // its chain has 20 blocks and the voter targets 3/4 of the // unfinalized chain, so the vote should be for block 15 - assert!(prevote.target_number == 15); + assert_eq!(prevote.target_number, 15); // we push 20 more blocks to alice's chain net.lock().peer(0).push_blocks(20, false); - let interval = futures::stream::unfold(Delay::new(Duration::from_millis(200)), |delay| - Box::pin(async move { - delay.await; - Some(((), Delay::new(Duration::from_millis(200)))) - }) - ); + let interval = + futures::stream::unfold(Delay::new(Duration::from_millis(200)), |delay| { + Box::pin(async move { + delay.await; + Some(((), Delay::new(Duration::from_millis(200)))) + }) + }); interval .take_while(move |_| { @@ -1219,20 +1151,23 @@ fn voter_persists_its_votes() { net.lock().peer(0).client().as_full().unwrap().hash(30).unwrap().unwrap(); // we restart alice's voter - voter_tx.unbounded_send(()).unwrap(); + abort.abort(); + runtime_handle.spawn(alice_voter2(peers, net.clone())); // and we push our own prevote for block 30 - let prevote = finality_grandpa::Prevote { - target_number: 30, - target_hash: block_30_hash, - }; + let prevote = + finality_grandpa::Prevote { target_number: 30, target_hash: block_30_hash }; // One should either be calling `Sink::send` or `Sink::start_send` followed // by `Sink::poll_complete` to make sure items are being flushed. Given that // we send in a loop including a delay until items are received, this can be // ignored for the sake of reduced complexity. - Pin::new(&mut *round_tx.lock()).start_send(finality_grandpa::Message::Prevote(prevote)).unwrap(); - } else if state.compare_and_swap(1, 2, Ordering::SeqCst) == 1 { + Pin::new(&mut *round_tx.lock()) + .start_send(finality_grandpa::Message::Prevote(prevote)) + .unwrap(); + } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() == + 1 + { // the next message we receive should be our own prevote let prevote = match signed.message { finality_grandpa::Message::Prevote(prevote) => prevote, @@ -1242,11 +1177,12 @@ fn voter_persists_its_votes() { // targeting block 30 assert!(prevote.target_number == 30); - // after alice restarts it should send its previous prevote - // therefore we won't ever receive it again since it will be a - // known message on the gossip layer - - } else if state.compare_and_swap(2, 3, Ordering::SeqCst) == 2 { + // after alice restarts it should send its previous prevote + // therefore we won't ever receive it again since it will be a + // known message on the gossip layer + } else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() == + 2 + { // we then receive a precommit from alice for block 15 // even though we casted a prevote for block 30 let precommit = match signed.message { @@ -1275,169 +1211,70 @@ fn finalize_3_voters_1_light_observer() { let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(authorities); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1); + let voters = initialize_grandpa(&mut net, authorities); + let observer = observer::run_grandpa_observer( + Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: None, + name: Some("observer".to_string()), + local_role: Role::Full, + observer_enabled: true, + telemetry: None, + }, + net.peers[3].data.lock().take().expect("link initialized at startup; qed"), + net.peers[3].network_service().clone(), + ) + .unwrap(); net.peer(0).push_blocks(20, false); net.block_until_sync(); for i in 0..4 { - assert_eq!(net.peer(i).client().info().best_number, 20, - "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_number, 20, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); - let link = net.lock().peer(3).data.lock().take().expect("link initialized on startup; qed"); - - let finality_notifications = net.lock().peer(3).client().finality_notification_stream() - .take_while(|n| { - future::ready(n.header.number() < &20) - }) - .collect::>(); - - run_to_completion_with(&mut runtime, 20, net.clone(), authorities, |executor| { - executor.spawn( - observer::run_grandpa_observer( - Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: None, - name: Some("observer".to_string()), - is_authority: false, - observer_enabled: true, - }, - link, - net.lock().peers[3].network_service().clone(), - ).unwrap() - ); - - Some(Box::pin(finality_notifications.map(|_| ()))) - }); -} - -#[test] -fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { - sp_tracing::try_init_simple(); - let mut runtime = Runtime::new().unwrap(); - - let peers = &[Ed25519Keyring::Alice]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); - net.add_light_peer(); - - // import block#1 WITH consensus data change. Light client ignores justification - // && instead fetches finality proof for block #1 - net.peer(0).push_authorities_change_block(vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]); - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 1, net.clone(), peers); - net.lock().block_until_sync(); - - // check that the block#1 is finalized on light client - runtime.block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(1).client().info().finalized_number == 1 { - Poll::Ready(()) - } else { - net.lock().poll(cx); - Poll::Pending - } - })); -} - -#[test] -fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different() { - // for debug: to ensure that without forced change light client will sync finality proof - const FORCE_CHANGE: bool = true; - - sp_tracing::try_init_simple(); - let mut runtime = Runtime::new().unwrap(); - - // two of these guys are offline. - let genesis_authorities = if FORCE_CHANGE { - vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - Ed25519Keyring::One, - Ed25519Keyring::Two, - ] - } else { - vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - ] - }; - let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let api = TestApi::new(make_ids(&genesis_authorities)); - - let voters = make_ids(peers_a); - let net = GrandpaTestNet::new(api, 3); - let net = Arc::new(Mutex::new(net)); - - // best is #1 - net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - // add a forced transition at block 5. - let mut block = builder.build().unwrap().block; - if FORCE_CHANGE { - add_forced_change(&mut block, 0, ScheduledChange { - next_authorities: voters.clone(), - delay: 3, - }); - } - block - }); - - // ensure block#10 enacts authorities set change => justification is generated - // normally it will reach light client, but because of the forced change, it will not - net.lock().peer(0).push_blocks(8, false); // best is #9 - net.lock().peer(0).push_authorities_change_block( - vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])] - ); // #10 - net.lock().peer(0).push_blocks(1, false); // best is #11 - net.lock().block_until_sync(); - - // finalize block #11 on full clients - run_to_completion(&mut runtime, 11, net.clone(), peers_a); - // request finalization by light client - net.lock().add_light_peer(); - net.lock().block_until_sync(); - - // check block, finalized on light client - assert_eq!( - net.lock().peer(3).client().info().finalized_number, - if FORCE_CHANGE { 0 } else { 10 }, - ); + runtime.spawn(voters); + runtime.spawn(observer); + run_to_completion(&mut runtime, 20, net.clone(), authorities); } #[test] fn voter_catches_up_to_latest_round_when_behind() { sp_tracing::try_init_simple(); - let mut runtime = Runtime::new().unwrap(); + let runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); - net.peer(0).push_blocks(50, false); - net.block_until_sync(); + let net = GrandpaTestNet::new(TestApi::new(voters), 2, 0); let net = Arc::new(Mutex::new(net)); let mut finality_notifications = Vec::new(); - let voter = |keystore, peer_id, link, net: Arc>| -> Pin + Send>> { + let voter = |keystore, + peer_id, + link, + net: Arc>| + -> Pin + Send>> { let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, keystore, name: Some(format!("peer#{}", peer_id)), - is_authority: true, + local_role: Role::Authority, observer_enabled: true, + telemetry: None, }, link, network: net.lock().peer(peer_id).network_service().clone(), - telemetry_on_connect: None, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), + telemetry: None, }; Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network")) @@ -1449,17 +1286,16 @@ fn voter_catches_up_to_latest_round_when_behind() { for (peer_id, key) in peers.iter().enumerate() { let (client, link) = { let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - link, - ) + let link = + net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[peer_id].client().clone(), link) }; finality_notifications.push( - client.finality_notification_stream() + client + .finality_notification_stream() .take_while(|n| future::ready(n.header.number() < &50)) - .for_each(move |_| future::ready(())) + .for_each(move |_| future::ready(())), ); let (keystore, keystore_path) = create_keystore(*key); @@ -1470,6 +1306,9 @@ fn voter_catches_up_to_latest_round_when_behind() { runtime.spawn(voter); } + net.lock().peer(0).push_blocks(50, false); + net.lock().block_until_sync(); + // wait for them to finalize block 50. since they'll vote on 3/4 of the // unfinalized chain it will take at least 4 rounds to do it. let wait_for_finality = ::futures::future::join_all(finality_notifications); @@ -1481,18 +1320,15 @@ fn voter_catches_up_to_latest_round_when_behind() { let runtime = runtime.handle().clone(); wait_for_finality.then(move |_| { - let peer_id = 2; + net.lock().add_authority_peer(); + let link = { let net = net.lock(); - let mut link = net.peers[peer_id].data.lock(); + let mut link = net.peers[2].data.lock(); link.take().expect("link initialized at startup; qed") }; - let set_state = link.persistent_data.set_state.clone(); - - let voter = voter(None, peer_id, link, net); - - runtime.spawn(voter); + runtime.spawn(voter(None, 2, link, net.clone())); let start_time = std::time::Instant::now(); let timeout = Duration::from_secs(5 * 60); @@ -1514,11 +1350,10 @@ fn voter_catches_up_to_latest_round_when_behind() { }; let drive_to_completion = futures::future::poll_fn(|cx| { - net.lock().poll(cx); Poll::<()>::Pending + net.lock().poll(cx); + Poll::<()>::Pending }); - runtime.block_on( - future::select(test, drive_to_completion) - ); + runtime.block_on(future::select(test, drive_to_completion)); } type TestEnvironment = Environment< @@ -1540,33 +1375,24 @@ where N: NetworkT, VR: VotingRule, { - let PersistentData { - ref authority_set, - ref consensus_changes, - ref set_state, - .. - } = link.persistent_data; + let PersistentData { ref authority_set, ref set_state, .. } = link.persistent_data; let config = Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, keystore, name: None, - is_authority: true, + local_role: Role::Authority, observer_enabled: true, + telemetry: None, }; - let network = NetworkBridge::new( - network_service.clone(), - config.clone(), - set_state.clone(), - None, - ); + let network = + NetworkBridge::new(network_service.clone(), config.clone(), set_state.clone(), None, None); Environment { authority_set: authority_set.clone(), config: config.clone(), - consensus_changes: consensus_changes.clone(), client: link.client.clone(), select_chain: link.select_chain.clone(), set_id: authority_set.set_id(), @@ -1576,18 +1402,19 @@ where voting_rule, metrics: None, justification_sender: None, + telemetry: None, _phantom: PhantomData, } } #[test] fn grandpa_environment_respects_voting_rules() { - use finality_grandpa::Chain; + use finality_grandpa::voter::Environment; let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 1); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); let peer = net.peer(0); let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); @@ -1617,25 +1444,28 @@ fn grandpa_environment_respects_voting_rules() { // the unrestricted environment should just return the best block assert_eq!( - unrestricted_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, + block_on(unrestricted_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 21, ); // both the other environments should return block 16, which is 3/4 of the // way in the unfinalized chain assert_eq!( - three_quarters_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, + block_on(three_quarters_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 16, ); assert_eq!( - default_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, + block_on(default_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 16, ); @@ -1644,18 +1474,20 @@ fn grandpa_environment_respects_voting_rules() { // the 3/4 environment should propose block 21 for voting assert_eq!( - three_quarters_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, + block_on(three_quarters_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 21, ); // while the default environment will always still make sure we don't vote // on the best block (2 behind) assert_eq!( - default_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, + block_on(default_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 19, ); @@ -1666,9 +1498,10 @@ fn grandpa_environment_respects_voting_rules() { // best block, there's a hard rule that we can't cast any votes lower than // the given base (#21). assert_eq!( - default_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, + block_on(default_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 21, ); } @@ -1680,7 +1513,7 @@ fn grandpa_environment_never_overwrites_round_voter_state() { let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); - let mut net = GrandpaTestNet::new(TestApi::new(voters), 1); + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); let peer = net.peer(0); let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); @@ -1707,18 +1540,18 @@ fn grandpa_environment_never_overwrites_round_voter_state() { assert_eq!(get_current_round(2), None); // after completing round 1 we should start tracking round 2 - environment - .completed(1, round_state(), base(), &historical_votes()) - .unwrap(); + environment.completed(1, round_state(), base(), &historical_votes()).unwrap(); assert_eq!(get_current_round(2).unwrap(), HasVoted::No); + // we need to call `round_data` for the next round to pick up + // from the keystore which authority id we'll be using to vote + environment.round_data(2); + let info = peer.client().info(); - let prevote = finality_grandpa::Prevote { - target_hash: info.best_hash, - target_number: info.best_number, - }; + let prevote = + finality_grandpa::Prevote { target_hash: info.best_hash, target_number: info.best_number }; // we prevote for round 2 which should lead to us updating the voter state environment.prevoted(2, prevote.clone()).unwrap(); @@ -1730,9 +1563,7 @@ fn grandpa_environment_never_overwrites_round_voter_state() { // if we report round 1 as completed again we should not overwrite the // voter state for round 2 - environment - .completed(1, round_state(), base(), &historical_votes()) - .unwrap(); + environment.completed(1, round_state(), base(), &historical_votes()).unwrap(); assert_matches!(get_current_round(2).unwrap(), HasVoted::Yes(_, _)); } @@ -1745,15 +1576,15 @@ fn imports_justification_for_regular_blocks_on_import() { let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api.clone(), 1); + let mut net = GrandpaTestNet::new(api.clone(), 1, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >(client.clone()); + let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().expect("only full clients are used in test"); - let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + let builder = full_client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); let block = builder.build().unwrap().block; let block_hash = block.hash(); @@ -1784,21 +1615,17 @@ fn imports_justification_for_regular_blocks_on_import() { precommits: vec![precommit], }; - GrandpaJustification::from_commit( - &full_client, - round, - commit, - ).unwrap() + GrandpaJustification::from_commit(&full_client, round, commit).unwrap() }; // we import the block with justification attached let mut import = BlockImportParams::new(BlockOrigin::File, block.header); - import.justification = Some(justification.encode()); + import.justifications = Some((GRANDPA_ENGINE_ID, justification.encode()).into()); import.body = Some(block.extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); assert_eq!( - block_import.import_block(import, HashMap::new()).unwrap(), + block_on(block_import.import_block(import, HashMap::new())).unwrap(), ImportResult::Imported(ImportedAux { needs_justification: false, clear_justification_requests: false, @@ -1809,7 +1636,51 @@ fn imports_justification_for_regular_blocks_on_import() { ); // the justification should be imported and available from the client - assert!( - client.justification(&BlockId::Hash(block_hash)).unwrap().is_some(), - ); + assert!(client.justifications(&BlockId::Hash(block_hash)).unwrap().is_some()); +} + +#[test] +fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { + use finality_grandpa::voter::Environment; + + let alice = Ed25519Keyring::Alice; + let voters = make_ids(&[alice]); + + let environment = { + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let link = peer.data.lock().take().unwrap(); + let (keystore, _keystore_path) = create_keystore(alice); + test_environment(&link, Some(keystore), network_service.clone(), ()) + }; + + let signed_prevote = { + let prevote = finality_grandpa::Prevote { target_hash: H256::random(), target_number: 1 }; + + let signed = alice.sign(&[]).into(); + (prevote, signed) + }; + + let mut equivocation = finality_grandpa::Equivocation { + round_number: 1, + identity: alice.public().into(), + first: signed_prevote.clone(), + second: signed_prevote.clone(), + }; + + // we need to call `round_data` to pick up from the keystore which + // authority id we'll be using to vote + environment.round_data(1); + + // reporting the equivocation should fail since the offender is a local + // authority (i.e. we have keys in our keystore for the given id) + let equivocation_proof = sp_finality_grandpa::Equivocation::Prevote(equivocation.clone()); + assert!(matches!(environment.report_equivocation(equivocation_proof), Err(Error::Safety(_)))); + + // if we set the equivocation offender to another id for which we don't have + // keys it should work + equivocation.identity = Default::default(); + let equivocation_proof = sp_finality_grandpa::Equivocation::Prevote(equivocation); + assert!(environment.report_equivocation(equivocation_proof).is_ok()); } diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index 3ac94f3b062f0..deb6577264347 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -23,32 +23,31 @@ //! This is used for votes and commit messages currently. use super::{ - BlockStatus as BlockStatusT, - BlockSyncRequester as BlockSyncRequesterT, - CommunicationIn, - Error, + BlockStatus as BlockStatusT, BlockSyncRequester as BlockSyncRequesterT, CommunicationIn, Error, SignedMessage, }; -use log::{debug, warn}; -use sp_utils::mpsc::TracingUnboundedReceiver; -use futures::prelude::*; -use futures::stream::{Fuse, StreamExt}; -use futures_timer::Delay; use finality_grandpa::voter; -use parking_lot::Mutex; -use prometheus_endpoint::{ - Gauge, U64, PrometheusError, register, Registry, +use futures::{ + prelude::*, + stream::{Fuse, StreamExt}, }; +use futures_timer::Delay; +use log::{debug, warn}; +use parking_lot::Mutex; +use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; use sc_client_api::{BlockImportNotification, ImportNotifications}; +use sc_utils::mpsc::TracingUnboundedReceiver; use sp_finality_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::collections::{HashMap, VecDeque}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::{Duration, Instant}; +use std::{ + collections::{HashMap, VecDeque}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::{Duration, Instant}, +}; const LOG_PENDING_INTERVAL: Duration = Duration::from_secs(15); @@ -83,7 +82,6 @@ pub(crate) enum DiscardWaitOrReady { } /// Prometheus metrics for the `UntilImported` queue. -// // At a given point in time there can be more than one `UntilImported` queue. One can not register a // metric twice, thus queues need to share the same Prometheus metrics instead of instantiating // their own ones. @@ -100,10 +98,13 @@ pub(crate) struct Metrics { impl Metrics { pub(crate) fn register(registry: &Registry) -> Result { Ok(Self { - global_waiting_messages: register(Gauge::new( - "finality_grandpa_until_imported_waiting_messages_number", - "Number of finality grandpa messages waiting within the until imported queue.", - )?, registry)?, + global_waiting_messages: register( + Gauge::new( + "finality_grandpa_until_imported_waiting_messages_number", + "Number of finality grandpa messages waiting within the until imported queue.", + )?, + registry, + )?, local_waiting_messages: 0, }) } @@ -119,7 +120,6 @@ impl Metrics { } } - impl Clone for Metrics { fn clone(&self) -> Self { Metrics { @@ -140,7 +140,8 @@ impl Drop for Metrics { } /// Buffering incoming messages until blocks with given hashes are imported. -pub(crate) struct UntilImported where +pub(crate) struct UntilImported +where Block: BlockT, I: Stream + Unpin, M: BlockUntilImported, @@ -151,7 +152,7 @@ pub(crate) struct UntilImported wh incoming_messages: Fuse, ready: VecDeque, /// Interval at which to check status of each awaited block. - check_pending: Pin> + Send + Sync>>, + check_pending: Pin> + Send>>, /// Mapping block hashes to their block number, the point in time it was /// first encountered (Instant) and a list of GRANDPA messages referencing /// the block hash. @@ -163,13 +164,18 @@ pub(crate) struct UntilImported wh metrics: Option, } -impl Unpin for UntilImported where +impl Unpin + for UntilImported +where Block: BlockT, I: Stream + Unpin, M: BlockUntilImported, -{} +{ +} -impl UntilImported where +impl + UntilImported +where Block: BlockT, BlockStatus: BlockStatusT, BlockSyncRequester: BlockSyncRequesterT, @@ -192,11 +198,12 @@ impl UntilImported UntilImported Stream for UntilImported where +impl Stream + for UntilImported +where Block: BlockT, BStatus: BlockStatusT, BSyncRequester: BlockSyncRequesterT, @@ -249,7 +258,7 @@ impl Stream for UntilImported break, } } @@ -261,12 +270,12 @@ impl Stream for UntilImported break, } } @@ -278,7 +287,9 @@ impl Stream for UntilImported Stream for UntilImported BlockUntilImported for SignedMessage { if let Some(number) = status_check.block_number(target_hash)? { if number != target_number { warn_authority_wrong_target(target_hash, msg.id); - return Ok(DiscardWaitOrReady::Discard); + return Ok(DiscardWaitOrReady::Discard) } else { - return Ok(DiscardWaitOrReady::Ready(msg)); + return Ok(DiscardWaitOrReady::Ready(msg)) } } @@ -378,13 +389,8 @@ impl BlockUntilImported for SignedMessage { /// Helper type definition for the stream which waits until vote targets for /// signed messages are imported. -pub(crate) type UntilVoteTargetImported = UntilImported< - Block, - BlockStatus, - BlockSyncRequester, - I, - SignedMessage, ->; +pub(crate) type UntilVoteTargetImported = + UntilImported>; /// This blocks a global message import, i.e. a commit or catch up messages, /// until all blocks referenced in its votes are known. @@ -437,19 +443,18 @@ impl BlockUntilImported for BlockGlobalMessage { if let Some(number) = status_check.block_number(target_hash)? { entry.insert(KnownOrUnknown::Known(number)); number - } else { entry.insert(KnownOrUnknown::Unknown(perceived_number)); perceived_number } - } + }, }; if canon_number != perceived_number { // invalid global message: messages targeting wrong number // or at least different from other vote in same global // message. - return Ok(false); + return Ok(false) } Ok(true) @@ -458,23 +463,24 @@ impl BlockUntilImported for BlockGlobalMessage { match input { voter::CommunicationIn::Commit(_, ref commit, ..) => { // add known hashes from all precommits. - let precommit_targets = commit.precommits - .iter() - .map(|c| (c.target_number, c.target_hash)); + let precommit_targets = + commit.precommits.iter().map(|c| (c.target_number, c.target_hash)); for (target_number, target_hash) in precommit_targets { if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard); + return Ok(DiscardWaitOrReady::Discard) } } }, voter::CommunicationIn::CatchUp(ref catch_up, ..) => { // add known hashes from all prevotes and precommits. - let prevote_targets = catch_up.prevotes + let prevote_targets = catch_up + .prevotes .iter() .map(|s| (s.prevote.target_number, s.prevote.target_hash)); - let precommit_targets = catch_up.precommits + let precommit_targets = catch_up + .precommits .iter() .map(|s| (s.precommit.target_number, s.precommit.target_hash)); @@ -482,29 +488,39 @@ impl BlockUntilImported for BlockGlobalMessage { for (target_number, target_hash) in targets { if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard); + return Ok(DiscardWaitOrReady::Discard) } } }, }; } - let unknown_hashes = checked_hashes.into_iter().filter_map(|(hash, num)| match num { - KnownOrUnknown::Unknown(number) => Some((hash, number)), - KnownOrUnknown::Known(_) => None, - }).collect::>(); + let unknown_hashes = checked_hashes + .into_iter() + .filter_map(|(hash, num)| match num { + KnownOrUnknown::Unknown(number) => Some((hash, number)), + KnownOrUnknown::Known(_) => None, + }) + .collect::>(); if unknown_hashes.is_empty() { // none of the hashes in the global message were unknown. // we can just return the message directly. - return Ok(DiscardWaitOrReady::Ready(input)); + return Ok(DiscardWaitOrReady::Ready(input)) } let locked_global = Arc::new(Mutex::new(Some(input))); - let items_to_await = unknown_hashes.into_iter().map(|(hash, target_number)| { - (hash, target_number, BlockGlobalMessage { inner: locked_global.clone(), target_number }) - }).collect(); + let items_to_await = unknown_hashes + .into_iter() + .map(|(hash, target_number)| { + ( + hash, + target_number, + BlockGlobalMessage { inner: locked_global.clone(), target_number }, + ) + }) + .collect(); // schedule waits for all unknown messages. // when the last one of these has `wait_completed` called on it, @@ -517,7 +533,7 @@ impl BlockUntilImported for BlockGlobalMessage { // Delete the inner message so it won't ever be forwarded. Future calls to // `wait_completed` on the same `inner` will ignore it. *self.inner.lock() = None; - return None; + return None } match Arc::try_unwrap(self.inner) { @@ -534,25 +550,20 @@ impl BlockUntilImported for BlockGlobalMessage { /// A stream which gates off incoming global messages, i.e. commit and catch up /// messages, until all referenced block hashes have been imported. -pub(crate) type UntilGlobalMessageBlocksImported = UntilImported< - Block, - BlockStatus, - BlockSyncRequester, - I, - BlockGlobalMessage, ->; +pub(crate) type UntilGlobalMessageBlocksImported = + UntilImported>; #[cfg(test)] mod tests { use super::*; use crate::{CatchUp, CompactCommit}; - use substrate_test_runtime_client::runtime::{Block, Hash, Header}; - use sp_consensus::BlockOrigin; - use sc_client_api::BlockImportNotification; + use finality_grandpa::Precommit; use futures::future::Either; use futures_timer::Delay; - use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; - use finality_grandpa::Precommit; + use sc_client_api::BlockImportNotification; + use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; + use sp_consensus::BlockOrigin; + use substrate_test_runtime_client::runtime::{Block, Hash, Header}; #[derive(Clone)] struct TestChainState { @@ -563,10 +574,8 @@ mod tests { impl TestChainState { fn new() -> (Self, ImportNotifications) { let (tx, rx) = tracing_unbounded("test"); - let state = TestChainState { - sender: tx, - known_blocks: Arc::new(Mutex::new(HashMap::new())), - }; + let state = + TestChainState { sender: tx, known_blocks: Arc::new(Mutex::new(HashMap::new())) }; (state, rx) } @@ -580,13 +589,15 @@ mod tests { let number = header.number().clone(); self.known_blocks.lock().insert(hash, number); - self.sender.unbounded_send(BlockImportNotification { - hash, - origin: BlockOrigin::File, - header, - is_new_best: false, - tree_route: None, - }).unwrap(); + self.sender + .unbounded_send(BlockImportNotification { + hash, + origin: BlockOrigin::File, + header, + is_new_best: false, + tree_route: None, + }) + .unwrap(); } } @@ -607,14 +618,17 @@ mod tests { impl Default for TestBlockSyncRequester { fn default() -> Self { - TestBlockSyncRequester { - requests: Arc::new(Mutex::new(Vec::new())), - } + TestBlockSyncRequester { requests: Arc::new(Mutex::new(Vec::new())) } } } impl BlockSyncRequesterT for TestBlockSyncRequester { - fn set_sync_fork_request(&self, _peers: Vec, hash: Hash, number: NumberFor) { + fn set_sync_fork_request( + &self, + _peers: Vec, + hash: Hash, + number: NumberFor, + ) { self.requests.lock().push((hash, number)); } } @@ -631,7 +645,7 @@ mod tests { // unwrap the commit from `CommunicationIn` returning its fields in a tuple, // panics if the given message isn't a commit - fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit::) { + fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit) { match msg { voter::CommunicationIn::Commit(round, commit, ..) => (round, commit), _ => panic!("expected commit"), @@ -650,7 +664,8 @@ mod tests { fn message_all_dependencies_satisfied( msg: CommunicationIn, enact_dependencies: F, - ) -> CommunicationIn where + ) -> CommunicationIn + where F: FnOnce(&TestChainState), { let (chain_state, import_notifications) = TestChainState::new(); @@ -680,7 +695,8 @@ mod tests { fn blocking_message_on_dependencies( msg: CommunicationIn, enact_dependencies: F, - ) -> CommunicationIn where + ) -> CommunicationIn + where F: FnOnce(&TestChainState), { let (chain_state, import_notifications) = TestChainState::new(); @@ -702,16 +718,17 @@ mod tests { // NOTE: needs to be cloned otherwise it is moved to the stream and // dropped too early. let inner_chain_state = chain_state.clone(); - let work = future::select(until_imported.into_future(), Delay::new(Duration::from_millis(100))) - .then(move |res| match res { - Either::Left(_) => panic!("timeout should have fired first"), - Either::Right((_, until_imported)) => { - // timeout fired. push in the headers. - enact_dependencies(&inner_chain_state); - - until_imported - } - }); + let work = + future::select(until_imported.into_future(), Delay::new(Duration::from_millis(100))) + .then(move |res| match res { + Either::Left(_) => panic!("timeout should have fired first"), + Either::Right((_, until_imported)) => { + // timeout fired. push in the headers. + enact_dependencies(&inner_chain_state); + + until_imported + }, + }); futures::executor::block_on(work).0.unwrap().unwrap() } @@ -726,37 +743,22 @@ mod tests { target_hash: h1.hash(), target_number: 5, precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, + Precommit { target_hash: h2.hash(), target_number: 6 }, + Precommit { target_hash: h3.hash(), target_number: 7 }, ], auth_data: Vec::new(), // not used }; - let unknown_commit = || voter::CommunicationIn::Commit( - 0, - unknown_commit.clone(), - voter::Callback::Blank, - ); + let unknown_commit = + || voter::CommunicationIn::Commit(0, unknown_commit.clone(), voter::Callback::Blank); - let res = blocking_message_on_dependencies( - unknown_commit(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); + let res = blocking_message_on_dependencies(unknown_commit(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); - assert_eq!( - unapply_commit(res), - unapply_commit(unknown_commit()), - ); + assert_eq!(unapply_commit(res), unapply_commit(unknown_commit())); } #[test] @@ -769,37 +771,22 @@ mod tests { target_hash: h1.hash(), target_number: 5, precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, + Precommit { target_hash: h2.hash(), target_number: 6 }, + Precommit { target_hash: h3.hash(), target_number: 7 }, ], auth_data: Vec::new(), // not used }; - let known_commit = || voter::CommunicationIn::Commit( - 0, - known_commit.clone(), - voter::Callback::Blank, - ); + let known_commit = + || voter::CommunicationIn::Commit(0, known_commit.clone(), voter::Callback::Blank); - let res = message_all_dependencies_satisfied( - known_commit(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); + let res = message_all_dependencies_satisfied(known_commit(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); - assert_eq!( - unapply_commit(res), - unapply_commit(known_commit()), - ); + assert_eq!(unapply_commit(res), unapply_commit(known_commit())); } #[test] @@ -808,37 +795,27 @@ mod tests { let h2 = make_header(6); let h3 = make_header(7); - let signed_prevote = |header: &Header| { - finality_grandpa::SignedPrevote { - id: Default::default(), - signature: Default::default(), - prevote: finality_grandpa::Prevote { - target_hash: header.hash(), - target_number: *header.number(), - }, - } + let signed_prevote = |header: &Header| finality_grandpa::SignedPrevote { + id: Default::default(), + signature: Default::default(), + prevote: finality_grandpa::Prevote { + target_hash: header.hash(), + target_number: *header.number(), + }, }; - let signed_precommit = |header: &Header| { - finality_grandpa::SignedPrecommit { - id: Default::default(), - signature: Default::default(), - precommit: finality_grandpa::Precommit { - target_hash: header.hash(), - target_number: *header.number(), - }, - } + let signed_precommit = |header: &Header| finality_grandpa::SignedPrecommit { + id: Default::default(), + signature: Default::default(), + precommit: finality_grandpa::Precommit { + target_hash: header.hash(), + target_number: *header.number(), + }, }; - let prevotes = vec![ - signed_prevote(&h1), - signed_prevote(&h3), - ]; + let prevotes = vec![signed_prevote(&h1), signed_prevote(&h3)]; - let precommits = vec![ - signed_precommit(&h1), - signed_precommit(&h2), - ]; + let precommits = vec![signed_precommit(&h1), signed_precommit(&h2)]; let unknown_catch_up = finality_grandpa::CatchUp { round_number: 1, @@ -848,24 +825,16 @@ mod tests { base_number: *h1.number(), }; - let unknown_catch_up = || voter::CommunicationIn::CatchUp( - unknown_catch_up.clone(), - voter::Callback::Blank, - ); + let unknown_catch_up = + || voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); - let res = blocking_message_on_dependencies( - unknown_catch_up(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); + let res = blocking_message_on_dependencies(unknown_catch_up(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); - assert_eq!( - unapply_catch_up(res), - unapply_catch_up(unknown_catch_up()), - ); + assert_eq!(unapply_catch_up(res), unapply_catch_up(unknown_catch_up())); } #[test] @@ -874,37 +843,27 @@ mod tests { let h2 = make_header(6); let h3 = make_header(7); - let signed_prevote = |header: &Header| { - finality_grandpa::SignedPrevote { - id: Default::default(), - signature: Default::default(), - prevote: finality_grandpa::Prevote { - target_hash: header.hash(), - target_number: *header.number(), - }, - } + let signed_prevote = |header: &Header| finality_grandpa::SignedPrevote { + id: Default::default(), + signature: Default::default(), + prevote: finality_grandpa::Prevote { + target_hash: header.hash(), + target_number: *header.number(), + }, }; - let signed_precommit = |header: &Header| { - finality_grandpa::SignedPrecommit { - id: Default::default(), - signature: Default::default(), - precommit: finality_grandpa::Precommit { - target_hash: header.hash(), - target_number: *header.number(), - }, - } + let signed_precommit = |header: &Header| finality_grandpa::SignedPrecommit { + id: Default::default(), + signature: Default::default(), + precommit: finality_grandpa::Precommit { + target_hash: header.hash(), + target_number: *header.number(), + }, }; - let prevotes = vec![ - signed_prevote(&h1), - signed_prevote(&h3), - ]; + let prevotes = vec![signed_prevote(&h1), signed_prevote(&h3)]; - let precommits = vec![ - signed_precommit(&h1), - signed_precommit(&h2), - ]; + let precommits = vec![signed_precommit(&h1), signed_precommit(&h2)]; let unknown_catch_up = finality_grandpa::CatchUp { round_number: 1, @@ -914,24 +873,16 @@ mod tests { base_number: *h1.number(), }; - let unknown_catch_up = || voter::CommunicationIn::CatchUp( - unknown_catch_up.clone(), - voter::Callback::Blank, - ); + let unknown_catch_up = + || voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); - let res = message_all_dependencies_satisfied( - unknown_catch_up(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); + let res = message_all_dependencies_satisfied(unknown_catch_up(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); - assert_eq!( - unapply_catch_up(res), - unapply_catch_up(unknown_catch_up()), - ); + assert_eq!(unapply_catch_up(res), unapply_catch_up(unknown_catch_up())); } #[test] @@ -962,23 +913,14 @@ mod tests { target_hash: h1.hash(), target_number: 5, precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, + Precommit { target_hash: h2.hash(), target_number: 6 }, + Precommit { target_hash: h3.hash(), target_number: 7 }, ], auth_data: Vec::new(), // not used }; - let unknown_commit = || voter::CommunicationIn::Commit( - 0, - unknown_commit.clone(), - voter::Callback::Blank, - ); + let unknown_commit = + || voter::CommunicationIn::Commit(0, unknown_commit.clone(), voter::Callback::Blank); // we send the commit message and spawn the until_imported stream global_tx.unbounded_send(unknown_commit()).unwrap(); @@ -987,26 +929,33 @@ mod tests { threads_pool.spawn_ok(until_imported.into_future().map(|_| ())); // assert that we will make sync requests - let assert = futures::future::poll_fn(|_| { + let assert = futures::future::poll_fn(|ctx| { let block_sync_requests = block_sync_requester.requests.lock(); // we request blocks targeted by the precommits that aren't imported if block_sync_requests.contains(&(h2.hash(), *h2.number())) && block_sync_requests.contains(&(h3.hash(), *h3.number())) { - return Poll::Ready(()); + return Poll::Ready(()) } + // NOTE: nothing in this function is future-aware (i.e nothing gets registered to wake + // up this future), we manually wake up this task to avoid having to wait until the + // timeout below triggers. + ctx.waker().wake_by_ref(); + Poll::Pending }); // the `until_imported` stream doesn't request the blocks immediately, // but it should request them after a small timeout let timeout = Delay::new(Duration::from_secs(60)); - let test = future::select(assert, timeout).map(|res| match res { - Either::Left(_) => {}, - Either::Right(_) => panic!("timed out waiting for block sync request"), - }).map(drop); + let test = future::select(assert, timeout) + .map(|res| match res { + Either::Left(_) => {}, + Either::Right(_) => panic!("timed out waiting for block sync request"), + }) + .map(drop); futures::executor::block_on(test); } @@ -1022,10 +971,8 @@ mod tests { base_number: *header.number(), }; - let catch_up = voter::CommunicationIn::CatchUp( - unknown_catch_up.clone(), - voter::Callback::Blank, - ); + let catch_up = + voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); Arc::new(Mutex::new(Some(catch_up))) } @@ -1034,15 +981,10 @@ mod tests { fn block_global_message_wait_completed_return_when_all_awaited() { let msg_inner = test_catch_up(); - let waiting_block_1 = BlockGlobalMessage:: { - inner: msg_inner.clone(), - target_number: 1, - }; + let waiting_block_1 = + BlockGlobalMessage:: { inner: msg_inner.clone(), target_number: 1 }; - let waiting_block_2 = BlockGlobalMessage:: { - inner: msg_inner, - target_number: 2, - }; + let waiting_block_2 = BlockGlobalMessage:: { inner: msg_inner, target_number: 2 }; // waiting_block_2 is still waiting for block 2, thus this should return `None`. assert!(waiting_block_1.wait_completed(1).is_none()); @@ -1056,15 +998,10 @@ mod tests { fn block_global_message_wait_completed_return_none_on_block_number_missmatch() { let msg_inner = test_catch_up(); - let waiting_block_1 = BlockGlobalMessage:: { - inner: msg_inner.clone(), - target_number: 1, - }; + let waiting_block_1 = + BlockGlobalMessage:: { inner: msg_inner.clone(), target_number: 1 }; - let waiting_block_2 = BlockGlobalMessage:: { - inner: msg_inner, - target_number: 2, - }; + let waiting_block_2 = BlockGlobalMessage:: { inner: msg_inner, target_number: 2 }; // Calling wait_completed with wrong block number should yield None. assert!(waiting_block_1.wait_completed(1234).is_none()); diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 60493867ce1f4..b974afe0d352e 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,14 +22,23 @@ //! restrictions that are taken into account by the GRANDPA environment when //! selecting a finality target to vote on. -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; + +use dyn_clone::DynClone; use sc_client_api::blockchain::HeaderBackend; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One, Zero}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header, NumberFor, One, Zero}, +}; + +/// A future returned by a `VotingRule` to restrict a given vote, if any restriction is necessary. +pub type VotingRuleResult = + Pin::Hash, NumberFor)>> + Send>>; /// A trait for custom voting rules in GRANDPA. -pub trait VotingRule: Send + Sync where +pub trait VotingRule: DynClone + Send + Sync +where Block: BlockT, B: HeaderBackend, { @@ -47,25 +56,26 @@ pub trait VotingRule: Send + Sync where /// execution of voting rules wherein `current_target <= best_target`. fn restrict_vote( &self, - backend: &B, + backend: Arc, base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)>; + ) -> VotingRuleResult; } -impl VotingRule for () where +impl VotingRule for () +where Block: BlockT, B: HeaderBackend, { fn restrict_vote( &self, - _backend: &B, + _backend: Arc, _base: &Block::Header, _best_target: &Block::Header, _current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { - None + ) -> VotingRuleResult { + Box::pin(async { None }) } } @@ -74,21 +84,22 @@ impl VotingRule for () where /// behind the best block. #[derive(Clone)] pub struct BeforeBestBlockBy(N); -impl VotingRule for BeforeBestBlockBy> where +impl VotingRule for BeforeBestBlockBy> +where Block: BlockT, B: HeaderBackend, { fn restrict_vote( &self, - backend: &B, + backend: Arc, _base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { + ) -> VotingRuleResult { use sp_arithmetic::traits::Saturating; if current_target.number().is_zero() { - return None; + return Box::pin(async { None }) } // find the target number restricted by this rule @@ -96,34 +107,34 @@ impl VotingRule for BeforeBestBlockBy> wher // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return None; + return Box::pin(async { None }) } + let current_target = current_target.clone(); + // find the block at the given target height - find_target( - backend, - target_number, - current_target, - ) + Box::pin(std::future::ready(find_target(&*backend, target_number.clone(), ¤t_target))) } } /// A custom voting rule that limits votes towards 3/4 of the unfinalized chain, /// using the given `base` and `best_target` to figure where the 3/4 target /// should fall. +#[derive(Clone)] pub struct ThreeQuartersOfTheUnfinalizedChain; -impl VotingRule for ThreeQuartersOfTheUnfinalizedChain where +impl VotingRule for ThreeQuartersOfTheUnfinalizedChain +where Block: BlockT, B: HeaderBackend, { fn restrict_vote( &self, - backend: &B, + backend: Arc, base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { + ) -> VotingRuleResult { // target a vote towards 3/4 of the unfinalized chain (rounding up) let target_number = { let two = NumberFor::::one() + One::one(); @@ -138,15 +149,11 @@ impl VotingRule for ThreeQuartersOfTheUnfinalizedChain where // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return None; + return Box::pin(async { None }) } // find the block at the given target height - find_target( - backend, - target_number, - current_target, - ) + Box::pin(std::future::ready(find_target(&*backend, target_number, current_target))) } } @@ -155,7 +162,8 @@ fn find_target( backend: &B, target_number: NumberFor, current_header: &Block::Header, -) -> Option<(Block::Hash, NumberFor)> where +) -> Option<(Block::Hash, NumberFor)> +where Block: BlockT, B: HeaderBackend, { @@ -172,11 +180,13 @@ fn find_target( } if *target_header.number() == target_number { - return Some((target_hash, target_number)); + return Some((target_hash, target_number)) } target_hash = *target_header.parent_hash(); - target_header = backend.header(BlockId::Hash(target_hash)).ok()? + target_header = backend + .header(BlockId::Hash(target_hash)) + .ok()? .expect("Header known to exist due to the existence of one of its descendents; qed"); } } @@ -187,45 +197,54 @@ struct VotingRules { impl Clone for VotingRules { fn clone(&self) -> Self { - VotingRules { - rules: self.rules.clone(), - } + VotingRules { rules: self.rules.clone() } } } -impl VotingRule for VotingRules where +impl VotingRule for VotingRules +where Block: BlockT, - B: HeaderBackend, + B: HeaderBackend + 'static, { fn restrict_vote( &self, - backend: &B, + backend: Arc, base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { - let restricted_target = self.rules.iter().fold( - current_target.clone(), - |current_target, rule| { - rule.restrict_vote( - backend, - base, - best_target, - ¤t_target, - ) + ) -> VotingRuleResult { + let rules = self.rules.clone(); + let base = base.clone(); + let best_target = best_target.clone(); + let current_target = current_target.clone(); + + Box::pin(async move { + let mut restricted_target = current_target.clone(); + + for rule in rules.iter() { + if let Some(header) = rule + .restrict_vote(backend.clone(), &base, &best_target, &restricted_target) + .await + .filter(|(_, restricted_number)| { + // NOTE: we can only restrict votes within the interval [base, target) + restricted_number >= base.number() && + restricted_number < restricted_target.number() + }) .and_then(|(hash, _)| backend.header(BlockId::Hash(hash)).ok()) .and_then(std::convert::identity) - .unwrap_or(current_target) - }, - ); - - let restricted_hash = restricted_target.hash(); - - if restricted_hash != current_target.hash() { - Some((restricted_hash, *restricted_target.number())) - } else { - None - } + { + restricted_target = header; + } + } + + let restricted_hash = restricted_target.hash(); + + if restricted_hash != current_target.hash() { + Some((restricted_hash, *restricted_target.number())) + } else { + None + } + }) } } @@ -235,30 +254,31 @@ pub struct VotingRulesBuilder { rules: Vec>>, } -impl Default for VotingRulesBuilder where +impl Default for VotingRulesBuilder +where Block: BlockT, - B: HeaderBackend, + B: HeaderBackend + 'static, { fn default() -> Self { VotingRulesBuilder::new() - .add(BeforeBestBlockBy(2.into())) + .add(BeforeBestBlockBy(2u32.into())) .add(ThreeQuartersOfTheUnfinalizedChain) } } -impl VotingRulesBuilder where +impl VotingRulesBuilder +where Block: BlockT, - B: HeaderBackend, + B: HeaderBackend + 'static, { /// Return a new voting rule builder using the given backend. pub fn new() -> Self { - VotingRulesBuilder { - rules: Vec::new(), - } + VotingRulesBuilder { rules: Vec::new() } } /// Add a new voting rule to the builder. - pub fn add(mut self, rule: R) -> Self where + pub fn add(mut self, rule: R) -> Self + where R: VotingRule + 'static, { self.rules.push(Box::new(rule)); @@ -266,8 +286,9 @@ impl VotingRulesBuilder where } /// Add all given voting rules to the builder. - pub fn add_all(mut self, rules: I) -> Self where - I: IntoIterator>>, + pub fn add_all(mut self, rules: I) -> Self + where + I: IntoIterator>>, { self.rules.extend(rules); self @@ -276,23 +297,100 @@ impl VotingRulesBuilder where /// Return a new `VotingRule` that applies all of the previously added /// voting rules in-order. pub fn build(self) -> impl VotingRule + Clone { - VotingRules { - rules: Arc::new(self.rules), - } + VotingRules { rules: Arc::new(self.rules) } } } -impl VotingRule for Box> where +impl VotingRule for Box> +where Block: BlockT, B: HeaderBackend, + Self: Clone, { fn restrict_vote( &self, - backend: &B, + backend: Arc, base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { + ) -> VotingRuleResult { (**self).restrict_vote(backend, base, best_target, current_target) } } + +#[cfg(test)] +mod tests { + use super::*; + use sc_block_builder::BlockBuilderProvider; + use sp_consensus::BlockOrigin; + use sp_runtime::traits::Header as _; + + use substrate_test_runtime_client::{ + runtime::{Block, Header}, + Backend, Client, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClientBuilder, + TestClientBuilderExt, + }; + + /// A mock voting rule that subtracts a static number of block from the `current_target`. + #[derive(Clone)] + struct Subtract(u64); + impl VotingRule> for Subtract { + fn restrict_vote( + &self, + backend: Arc>, + _base: &Header, + _best_target: &Header, + current_target: &Header, + ) -> VotingRuleResult { + let target_number = current_target.number() - self.0; + let res = backend + .hash(target_number) + .unwrap() + .map(|target_hash| (target_hash, target_number)); + + Box::pin(std::future::ready(res)) + } + } + + #[test] + fn multiple_voting_rules_cannot_restrict_past_base() { + // setup an aggregate voting rule composed of two voting rules + // where each subtracts 50 blocks from the current target + let rule = VotingRulesBuilder::new().add(Subtract(50)).add(Subtract(50)).build(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + + for _ in 0..200 { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + } + + let genesis = client.header(&BlockId::Number(0u32.into())).unwrap().unwrap(); + + let best = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap(); + + let (_, number) = + futures::executor::block_on(rule.restrict_vote(client.clone(), &genesis, &best, &best)) + .unwrap(); + + // we apply both rules which should subtract 100 blocks from best block (#200) + // which means that we should be voting for block #100 + assert_eq!(number, 100); + + let block110 = client.header(&BlockId::Number(110u32.into())).unwrap().unwrap(); + + let (_, number) = futures::executor::block_on(rule.restrict_vote( + client.clone(), + &block110, + &best, + &best, + )) + .unwrap(); + + // base block is #110 while best block is #200, applying both rules would make + // would make the target block (#100) be lower than the base block, therefore + // only one of the rules is applied. + assert_eq!(number, 150); + } +} diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs new file mode 100644 index 0000000000000..34eaa49cdf360 --- /dev/null +++ b/client/finality-grandpa/src/warp_proof.rs @@ -0,0 +1,430 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Utilities for generating and verifying GRANDPA warp sync proofs. + +use sp_runtime::codec::{self, Decode, Encode}; + +use crate::{ + best_justification, find_scheduled_change, AuthoritySetChanges, BlockNumberOps, + GrandpaJustification, SharedAuthoritySet, +}; +use sc_client_api::Backend as ClientBackend; +use sc_network::warp_request_handler::{EncodedProof, VerificationResult, WarpSyncProvider}; +use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; +use sp_finality_grandpa::{AuthorityList, SetId, GRANDPA_ENGINE_ID}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, One}, +}; + +use std::sync::Arc; + +/// Warp proof processing error. +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum Error { + /// Decoding error. + #[display(fmt = "Failed to decode block hash: {}.", _0)] + DecodeScale(codec::Error), + /// Client backend error. + Client(sp_blockchain::Error), + /// Invalid request data. + #[from(ignore)] + InvalidRequest(String), + /// Invalid warp proof. + #[from(ignore)] + InvalidProof(String), + /// Missing header or authority set change data. + #[display(fmt = "Missing required data to be able to answer request.")] + MissingData, +} + +impl std::error::Error for Error {} + +/// The maximum size in bytes of the `WarpSyncProof`. +pub(super) const MAX_WARP_SYNC_PROOF_SIZE: usize = 8 * 1024 * 1024; + +/// A proof of an authority set change. +#[derive(Decode, Encode, Debug)] +pub struct WarpSyncFragment { + /// The last block that the given authority set finalized. This block should contain a digest + /// signaling an authority set change from which we can fetch the next authority set. + pub header: Block::Header, + /// A justification for the header above which proves its finality. In order to validate it the + /// verifier must be aware of the authorities and set id for which the justification refers to. + pub justification: GrandpaJustification, +} + +/// An accumulated proof of multiple authority set changes. +#[derive(Decode, Encode)] +pub struct WarpSyncProof { + proofs: Vec>, + is_finished: bool, +} + +impl WarpSyncProof { + /// Generates a warp sync proof starting at the given block. It will generate authority set + /// change proofs for all changes that happened from `begin` until the current authority set + /// (capped by MAX_WARP_SYNC_PROOF_SIZE). + fn generate( + backend: &Backend, + begin: Block::Hash, + set_changes: &AuthoritySetChanges>, + ) -> Result, Error> + where + Backend: ClientBackend, + { + // TODO: cache best response (i.e. the one with lowest begin_number) + let blockchain = backend.blockchain(); + + let begin_number = blockchain + .block_number_from_id(&BlockId::Hash(begin))? + .ok_or_else(|| Error::InvalidRequest("Missing start block".to_string()))?; + + if begin_number > blockchain.info().finalized_number { + return Err(Error::InvalidRequest("Start block is not finalized".to_string())) + } + + let canon_hash = blockchain.hash(begin_number)?.expect( + "begin number is lower than finalized number; \ + all blocks below finalized number must have been imported; \ + qed.", + ); + + if canon_hash != begin { + return Err(Error::InvalidRequest( + "Start block is not in the finalized chain".to_string(), + )) + } + + let mut proofs = Vec::new(); + let mut proofs_encoded_len = 0; + let mut proof_limit_reached = false; + + let set_changes = set_changes.iter_from(begin_number).ok_or(Error::MissingData)?; + + for (_, last_block) in set_changes { + let header = blockchain.header(BlockId::Number(*last_block))?.expect( + "header number comes from previously applied set changes; must exist in db; qed.", + ); + + // the last block in a set is the one that triggers a change to the next set, + // therefore the block must have a digest that signals the authority set change + if find_scheduled_change::(&header).is_none() { + // if it doesn't contain a signal for standard change then the set must have changed + // through a forced changed, in which case we stop collecting proofs as the chain of + // trust in authority handoffs was broken. + break + } + + let justification = blockchain + .justifications(BlockId::Number(*last_block))? + .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)) + .expect( + "header is last in set and contains standard change signal; \ + must have justification; \ + qed.", + ); + + let justification = GrandpaJustification::::decode(&mut &justification[..])?; + + let proof = WarpSyncFragment { header: header.clone(), justification }; + let proof_size = proof.encoded_size(); + + // Check for the limit. We remove some bytes from the maximum size, because we're only + // counting the size of the `WarpSyncFragment`s. The extra margin is here to leave + // room for rest of the data (the size of the `Vec` and the boolean). + if proofs_encoded_len + proof_size >= MAX_WARP_SYNC_PROOF_SIZE - 50 { + proof_limit_reached = true; + break + } + + proofs_encoded_len += proof_size; + proofs.push(proof); + } + + let is_finished = if proof_limit_reached { + false + } else { + let latest_justification = best_justification(backend)?.filter(|justification| { + // the existing best justification must be for a block higher than the + // last authority set change. if we didn't prove any authority set + // change then we fallback to make sure it's higher or equal to the + // initial warp sync block. + let limit = proofs + .last() + .map(|proof| proof.justification.target().0 + One::one()) + .unwrap_or(begin_number); + + justification.target().0 >= limit + }); + + if let Some(latest_justification) = latest_justification { + let header = blockchain.header(BlockId::Hash(latest_justification.target().1))? + .expect("header hash corresponds to a justification in db; must exist in db as well; qed."); + + proofs.push(WarpSyncFragment { header, justification: latest_justification }) + } + + true + }; + + let final_outcome = WarpSyncProof { proofs, is_finished }; + debug_assert!(final_outcome.encoded_size() <= MAX_WARP_SYNC_PROOF_SIZE); + Ok(final_outcome) + } + + /// Verifies the warp sync proof starting at the given set id and with the given authorities. + /// Verification stops when either the proof is exhausted or finality for the target header can + /// be proven. If the proof is valid the new set id and authorities is returned. + fn verify( + &self, + set_id: SetId, + authorities: AuthorityList, + ) -> Result<(SetId, AuthorityList), Error> + where + NumberFor: BlockNumberOps, + { + let mut current_set_id = set_id; + let mut current_authorities = authorities; + + for (fragment_num, proof) in self.proofs.iter().enumerate() { + proof + .justification + .verify(current_set_id, ¤t_authorities) + .map_err(|err| Error::InvalidProof(err.to_string()))?; + + if proof.justification.target().1 != proof.header.hash() { + return Err(Error::InvalidProof( + "Mismatch between header and justification".to_owned(), + )) + } + + if let Some(scheduled_change) = find_scheduled_change::(&proof.header) { + current_authorities = scheduled_change.next_authorities; + current_set_id += 1; + } else if fragment_num != self.proofs.len() - 1 || !self.is_finished { + // Only the last fragment of the last proof message is allowed to be missing + // the authority set change. + return Err(Error::InvalidProof( + "Header is missing authority set change digest".to_string(), + )) + } + } + Ok((current_set_id, current_authorities)) + } +} + +/// Implements network API for warp sync. +pub struct NetworkProvider> +where + NumberFor: BlockNumberOps, +{ + backend: Arc, + authority_set: SharedAuthoritySet>, +} + +impl> NetworkProvider +where + NumberFor: BlockNumberOps, +{ + /// Create a new istance for a given backend and authority set. + pub fn new( + backend: Arc, + authority_set: SharedAuthoritySet>, + ) -> Self { + NetworkProvider { backend, authority_set } + } +} + +impl> WarpSyncProvider + for NetworkProvider +where + NumberFor: BlockNumberOps, +{ + fn generate( + &self, + start: Block::Hash, + ) -> Result> { + let proof = WarpSyncProof::::generate( + &*self.backend, + start, + &self.authority_set.authority_set_changes(), + ) + .map_err(Box::new)?; + Ok(EncodedProof(proof.encode())) + } + + fn verify( + &self, + proof: &EncodedProof, + set_id: SetId, + authorities: AuthorityList, + ) -> Result, Box> { + let EncodedProof(proof) = proof; + let proof = WarpSyncProof::::decode(&mut proof.as_slice()) + .map_err(|e| format!("Proof decoding error: {:?}", e))?; + let last_header = proof + .proofs + .last() + .map(|p| p.header.clone()) + .ok_or_else(|| "Empty proof".to_string())?; + let (next_set_id, next_authorities) = + proof.verify(set_id, authorities).map_err(Box::new)?; + if proof.is_finished { + Ok(VerificationResult::::Complete(next_set_id, next_authorities, last_header)) + } else { + Ok(VerificationResult::::Partial( + next_set_id, + next_authorities, + last_header.hash(), + )) + } + } + + fn current_authorities(&self) -> AuthorityList { + self.authority_set.inner().current_authorities.clone() + } +} + +#[cfg(test)] +mod tests { + use super::{codec::Encode, WarpSyncProof}; + use crate::{AuthoritySetChanges, GrandpaJustification}; + use rand::prelude::*; + use sc_block_builder::BlockBuilderProvider; + use sp_blockchain::HeaderBackend; + use sp_consensus::BlockOrigin; + use sp_finality_grandpa::GRANDPA_ENGINE_ID; + use sp_keyring::Ed25519Keyring; + use sp_runtime::{generic::BlockId, traits::Header as _}; + use std::sync::Arc; + use substrate_test_runtime_client::{ + ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClientBuilder, + TestClientBuilderExt, + }; + + #[test] + fn warp_sync_proof_generate_verify() { + let mut rng = rand::rngs::StdRng::from_seed([0; 32]); + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let available_authorities = Ed25519Keyring::iter().collect::>(); + let genesis_authorities = vec![(Ed25519Keyring::Alice.public().into(), 1)]; + + let mut current_authorities = vec![Ed25519Keyring::Alice]; + let mut current_set_id = 0; + let mut authority_set_changes = Vec::new(); + + for n in 1..=100 { + let mut block = client.new_block(Default::default()).unwrap().build().unwrap().block; + + let mut new_authorities = None; + + // we will trigger an authority set change every 10 blocks + if n != 0 && n % 10 == 0 { + // pick next authorities and add digest for the set change + let n_authorities = rng.gen_range(1..available_authorities.len()); + let next_authorities = available_authorities + .choose_multiple(&mut rng, n_authorities) + .cloned() + .collect::>(); + + new_authorities = Some(next_authorities.clone()); + + let next_authorities = next_authorities + .iter() + .map(|keyring| (keyring.public().into(), 1)) + .collect::>(); + + let digest = sp_runtime::generic::DigestItem::Consensus( + sp_finality_grandpa::GRANDPA_ENGINE_ID, + sp_finality_grandpa::ConsensusLog::ScheduledChange( + sp_finality_grandpa::ScheduledChange { delay: 0u64, next_authorities }, + ) + .encode(), + ); + + block.header.digest_mut().logs.push(digest); + } + + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + + if let Some(new_authorities) = new_authorities { + // generate a justification for this block, finalize it and note the authority set + // change + let (target_hash, target_number) = { + let info = client.info(); + (info.best_hash, info.best_number) + }; + + let mut precommits = Vec::new(); + for keyring in ¤t_authorities { + let precommit = finality_grandpa::Precommit { target_hash, target_number }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = sp_finality_grandpa::localized_payload(42, current_set_id, &msg); + let signature = keyring.sign(&encoded[..]).into(); + + let precommit = finality_grandpa::SignedPrecommit { + precommit, + signature, + id: keyring.public().into(), + }; + + precommits.push(precommit); + } + + let commit = finality_grandpa::Commit { target_hash, target_number, precommits }; + + let justification = GrandpaJustification::from_commit(&client, 42, commit).unwrap(); + + client + .finalize_block( + BlockId::Hash(target_hash), + Some((GRANDPA_ENGINE_ID, justification.encode())), + ) + .unwrap(); + + authority_set_changes.push((current_set_id, n)); + + current_set_id += 1; + current_authorities = new_authorities; + } + } + + let authority_set_changes = AuthoritySetChanges::from(authority_set_changes); + + // generate a warp sync proof + let genesis_hash = client.hash(0).unwrap().unwrap(); + + let warp_sync_proof = + WarpSyncProof::generate(&*backend, genesis_hash, &authority_set_changes).unwrap(); + + // verifying the proof should yield the last set id and authorities + let (new_set_id, new_authorities) = warp_sync_proof.verify(0, genesis_authorities).unwrap(); + + let expected_authorities = current_authorities + .iter() + .map(|keyring| (keyring.public().into(), 1)) + .collect::>(); + + assert_eq!(new_set_id, current_set_id); + assert_eq!(new_authorities, expected_authorities); + } +} diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 871cc3ef426ec..88d02f81ad5b3 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-informant" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Substrate informant." edition = "2018" @@ -14,13 +14,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -futures = "0.3.4" +futures = "0.3.9" +futures-timer = "3.0.1" log = "0.4.8" -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -sc-client-api = { version = "2.0.0", path = "../api" } -sc-network = { version = "0.8.0", path = "../network" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -wasm-timer = "0.2" +parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index aa2d883b5baa0..1f23856101aa3 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::OutputFormat; use ansi_term::Colour; @@ -23,8 +25,8 @@ use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zer use std::{ convert::{TryFrom, TryInto}, fmt, + time::Instant, }; -use wasm_timer::Instant; /// State of the informant display system. /// @@ -38,7 +40,6 @@ use wasm_timer::Instant; /// /// Call `InformantDisplay::new` to initialize the state, then regularly call `display` with the /// information to display. -/// pub struct InformantDisplay { /// Head of chain block number from the last time `display` has been called. /// `None` if `display` has never been called. @@ -82,23 +83,43 @@ impl InformantDisplay { let diff_bytes_inbound = total_bytes_inbound - self.last_total_bytes_inbound; let diff_bytes_outbound = total_bytes_outbound - self.last_total_bytes_outbound; - let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = - if elapsed > 0 { - self.last_total_bytes_inbound = total_bytes_inbound; - self.last_total_bytes_outbound = total_bytes_outbound; - (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) - } else { - (diff_bytes_inbound, diff_bytes_outbound) - }; - - let (level, status, target) = match (net_status.sync_state, net_status.best_seen_block) { - (SyncState::Idle, _) => ("💤", "Idle".into(), "".into()), - (SyncState::Downloading, None) => ("⚙️ ", format!("Preparing{}", speed), "".into()), - (SyncState::Downloading, Some(n)) => ( + let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = if elapsed > 0 { + self.last_total_bytes_inbound = total_bytes_inbound; + self.last_total_bytes_outbound = total_bytes_outbound; + (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) + } else { + (diff_bytes_inbound, diff_bytes_outbound) + }; + + let (level, status, target) = match ( + net_status.sync_state, + net_status.best_seen_block, + net_status.state_sync, + net_status.warp_sync, + ) { + (_, _, _, Some(warp)) => ( + "⏩", + "Warping".into(), + format!( + ", {}, ({:.2}) Mib", + warp.phase, + (warp.total_bytes as f32) / (1024f32 * 1024f32) + ), + ), + (_, _, Some(state), _) => ( "⚙️ ", - format!("Syncing{}", speed), - format!(", target=#{}", n), + "Downloading state".into(), + format!( + ", {}%, ({:.2}) Mib", + state.percentage, + (state.size as f32) / (1024f32 * 1024f32) + ), ), + (SyncState::Idle, _, _, _) => ("💤", "Idle".into(), "".into()), + (SyncState::Downloading, None, _, _) => + ("⚙️ ", format!("Preparing{}", speed), "".into()), + (SyncState::Downloading, Some(n), None, _) => + ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{}", n)), }; if self.format.enable_color { @@ -140,7 +161,7 @@ impl InformantDisplay { fn speed( best_number: NumberFor, last_number: Option>, - last_update: Instant + last_update: Instant, ) -> String { // Number of milliseconds elapsed since last time. let elapsed_ms = { @@ -153,25 +174,28 @@ fn speed( // Number of blocks that have been imported since last time. let diff = match last_number { None => return String::new(), - Some(n) => best_number.saturating_sub(n) + Some(n) => best_number.saturating_sub(n), }; if let Ok(diff) = TryInto::::try_into(diff) { // If the number of blocks can be converted to a regular integer, then it's easy: just // do the math and turn it into a `f64`. - let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / 10.0; + let speed = diff + .saturating_mul(10_000) + .checked_div(u128::from(elapsed_ms)) + .map_or(0.0, |s| s as f64) / + 10.0; format!(" {:4.1} bps", speed) - } else { // If the number of blocks can't be converted to a regular integer, then we need a more // algebraic approach and we stay within the realm of integers. - let one_thousand = NumberFor::::from(1_000); - let elapsed = NumberFor::::from( - >::try_from(elapsed_ms).unwrap_or(u32::max_value()) - ); + let one_thousand = NumberFor::::from(1_000u32); + let elapsed = + NumberFor::::from(>::try_from(elapsed_ms).unwrap_or(u32::MAX)); - let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) + let speed = diff + .saturating_mul(one_thousand) + .checked_div(&elapsed) .unwrap_or_else(Zero::zero); format!(" {} bps", speed) } diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index c60eda76f63f6..f421dbbb7e564 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,65 +20,58 @@ use ansi_term::Colour; use futures::prelude::*; -use log::{info, trace, warn}; +use futures_timer::Delay; +use log::{debug, info, trace}; use parity_util_mem::MallocSizeOf; use sc_client_api::{BlockchainEvents, UsageProvider}; -use sc_network::NetworkStatus; +use sc_network::NetworkService; +use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; -use sp_transaction_pool::TransactionPool; -use sp_utils::{status_sinks, mpsc::tracing_unbounded}; -use std::{fmt::Display, sync::Arc, time::Duration, collections::VecDeque}; +use std::{collections::VecDeque, fmt::Display, sync::Arc, time::Duration}; mod display; +/// Creates a stream that returns a new value every `duration`. +fn interval(duration: Duration) -> impl Stream + Unpin { + futures::stream::unfold((), move |_| Delay::new(duration).map(|_| Some(((), ())))).map(drop) +} + /// The format to print telemetry output in. #[derive(Clone, Debug)] pub struct OutputFormat { - /// Enable color output in logs. True by default. + /// Enable color output in logs. + /// + /// Is enabled by default. pub enable_color: bool, } impl Default for OutputFormat { fn default() -> Self { - Self { - enable_color: true, - } + Self { enable_color: true } } } -/// Marker trait for a type that implements `TransactionPool` and `MallocSizeOf` on `not(target_os = "unknown")`. -#[cfg(target_os = "unknown")] -pub trait TransactionPoolAndMaybeMallogSizeOf: TransactionPool {} - -/// Marker trait for a type that implements `TransactionPool` and `MallocSizeOf` on `not(target_os = "unknown")`. -#[cfg(not(target_os = "unknown"))] -pub trait TransactionPoolAndMaybeMallogSizeOf: TransactionPool + MallocSizeOf {} - -#[cfg(target_os = "unknown")] -impl TransactionPoolAndMaybeMallogSizeOf for T {} - -#[cfg(not(target_os = "unknown"))] -impl TransactionPoolAndMaybeMallogSizeOf for T {} - /// Builds the informant and returns a `Future` that drives the informant. -pub fn build( +pub async fn build( client: Arc, - network_status_sinks: Arc>>, - pool: Arc, + network: Arc::Hash>>, + pool: Arc

, format: OutputFormat, -) -> impl futures::Future -where +) where C: UsageProvider + HeaderMetadata + BlockchainEvents, >::Error: Display, + P: TransactionPool + MallocSizeOf, { let mut display = display::InformantDisplay::new(format.clone()); let client_1 = client.clone(); - let (network_status_sink, network_status_stream) = tracing_unbounded("mpsc_network_status"); - network_status_sinks.push(Duration::from_millis(5000), network_status_sink); - let display_notifications = network_status_stream + let display_notifications = interval(Duration::from_millis(5000)) + .filter_map(|_| async { + let status = network.status().await; + status.ok() + }) .for_each(move |net_status| { let info = client_1.usage_info(); if let Some(ref usage) = info.usage { @@ -89,7 +82,6 @@ where "Usage statistics not displayed as backend does not provide it", ) } - #[cfg(not(target_os = "unknown"))] trace!( target: "usage", "Subsystems memory [txpool: {} kB]", @@ -99,10 +91,10 @@ where future::ready(()) }); - future::join( - display_notifications, - display_block_import(client), - ).map(|_| ()) + futures::select! { + () = display_notifications.fuse() => (), + () = display_block_import(client).fuse() => (), + }; } fn display_block_import(client: Arc) -> impl Future @@ -122,22 +114,22 @@ where client.import_notification_stream().for_each(move |n| { // detect and log reorganizations. if let Some((ref last_num, ref last_hash)) = last_best { - if n.header.parent_hash() != last_hash && n.is_new_best { - let maybe_ancestor = sp_blockchain::lowest_common_ancestor( - &*client, - last_hash.clone(), - n.hash, - ); + if n.header.parent_hash() != last_hash && n.is_new_best { + let maybe_ancestor = + sp_blockchain::lowest_common_ancestor(&*client, last_hash.clone(), n.hash); match maybe_ancestor { Ok(ref ancestor) if ancestor.hash != *last_hash => info!( "♻️ Reorg on #{},{} to #{},{}, common ancestor #{},{}", - Colour::Red.bold().paint(format!("{}", last_num)), last_hash, - Colour::Green.bold().paint(format!("{}", n.header.number())), n.hash, - Colour::White.bold().paint(format!("{}", ancestor.number)), ancestor.hash, + Colour::Red.bold().paint(format!("{}", last_num)), + last_hash, + Colour::Green.bold().paint(format!("{}", n.header.number())), + n.hash, + Colour::White.bold().paint(format!("{}", ancestor.number)), + ancestor.hash, ), Ok(_) => {}, - Err(e) => warn!("Error computing tree route: {}", e), + Err(e) => debug!("Error computing tree route: {}", e), } } } @@ -146,7 +138,6 @@ where last_best = Some((n.header.number().clone(), n.hash.clone())); } - // If we already printed a message for a given block recently, // we should not print it again. if !last_blocks.contains(&n.hash) { diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index c0c3acde25edf..17c651a91decd 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-keystore" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -15,19 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.30" +async-trait = "0.1.50" derive_more = "0.99.2" -futures = "0.3.4" -futures-util = "0.3.4" -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } +sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } hex = "0.4.0" -merlin = { version = "2.0", default-features = false } -parking_lot = "0.10.0" -rand = "0.7.2" -serde_json = "1.0.41" -subtle = "2.1.1" +parking_lot = "0.11.1" +serde_json = "1.0.68" [dev-dependencies] tempfile = "3.1.0" diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index 0b6d654bc623e..5e29f691997e6 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -1,25 +1,27 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Keystore (and session key management) for ed25519 based chains like Polkadot. #![warn(missing_docs)] -use std::io; use sp_core::crypto::KeyTypeId; use sp_keystore::Error as TraitError; +use std::io; /// Local keystore implementation mod local; @@ -33,22 +35,19 @@ pub enum Error { /// JSON error. Json(serde_json::Error), /// Invalid password. - #[display(fmt="Invalid password")] + #[display(fmt = "Invalid password")] InvalidPassword, /// Invalid BIP39 phrase - #[display(fmt="Invalid recovery phrase (BIP39) data")] + #[display(fmt = "Invalid recovery phrase (BIP39) data")] InvalidPhrase, /// Invalid seed - #[display(fmt="Invalid seed")] + #[display(fmt = "Invalid seed")] InvalidSeed, /// Public key type is not supported - #[display(fmt="Key crypto type is not supported")] + #[display(fmt = "Key crypto type is not supported")] KeyNotSupported(KeyTypeId), - /// Pair not found for public key and KeyTypeId - #[display(fmt="Pair not found for {} public key", "_0")] - PairNotFound(String), /// Keystore unavailable - #[display(fmt="Keystore unavailable")] + #[display(fmt = "Keystore unavailable")] Unavailable, } @@ -59,10 +58,8 @@ impl From for TraitError { fn from(error: Error) -> Self { match error { Error::KeyNotSupported(id) => TraitError::KeyNotSupported(id), - Error::PairNotFound(e) => TraitError::PairNotFound(e), - Error::InvalidSeed | Error::InvalidPhrase | Error::InvalidPassword => { - TraitError::ValidationError(error.to_string()) - }, + Error::InvalidSeed | Error::InvalidPhrase | Error::InvalidPassword => + TraitError::ValidationError(error.to_string()), Error::Unavailable => TraitError::Unavailable, Error::Io(e) => TraitError::Other(e.to_string()), Error::Json(e) => TraitError::Other(e.to_string()), @@ -79,4 +76,3 @@ impl std::error::Error for Error { } } } - diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 856327d46f6ea..e5c8ff14af095 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,30 +17,27 @@ // //! Local keystore implementation -use std::{ - collections::{HashMap, HashSet}, - fs::{self, File}, - io::Write, - path::PathBuf, - sync::Arc, -}; use async_trait::async_trait; use parking_lot::RwLock; +use sp_application_crypto::{ecdsa, ed25519, sr25519, AppKey, AppPair, IsWrappedBy}; use sp_core::{ - crypto::{CryptoTypePublicPair, KeyTypeId, Pair as PairT, ExposeSecret, SecretString, Public}, - sr25519::{Public as Sr25519Public, Pair as Sr25519Pair}, + crypto::{CryptoTypePublicPair, ExposeSecret, KeyTypeId, Pair as PairT, Public, SecretString}, + sr25519::{Pair as Sr25519Pair, Public as Sr25519Public}, Encode, }; use sp_keystore::{ - CryptoStore, - SyncCryptoStorePtr, - Error as TraitError, - SyncCryptoStore, - vrf::{VRFTranscriptData, VRFSignature, make_transcript}, + vrf::{make_transcript, VRFSignature, VRFTranscriptData}, + CryptoStore, Error as TraitError, SyncCryptoStore, SyncCryptoStorePtr, +}; +use std::{ + collections::{HashMap, HashSet}, + fs::{self, File}, + io::Write, + path::PathBuf, + sync::Arc, }; -use sp_application_crypto::{ed25519, sr25519, ecdsa}; -use crate::{Result, Error}; +use crate::{Error, Result}; /// A local based keystore that is either memory-based or filesystem-based. pub struct LocalKeystore(RwLock); @@ -57,11 +54,25 @@ impl LocalKeystore { let inner = KeystoreInner::new_in_memory(); Self(RwLock::new(inner)) } + + /// Get a key pair for the given public key. + /// + /// Returns `Ok(None)` if the key doesn't exist, `Ok(Some(_))` if the key exists and + /// `Err(_)` when something failed. + pub fn key_pair( + &self, + public: &::Public, + ) -> Result> { + self.0.read().key_pair::(public) + } } #[async_trait] impl CryptoStore for LocalKeystore { - async fn keys(&self, id: KeyTypeId) -> std::result::Result, TraitError> { + async fn keys( + &self, + id: KeyTypeId, + ) -> std::result::Result, TraitError> { SyncCryptoStore::keys(self, id) } @@ -101,7 +112,12 @@ impl CryptoStore for LocalKeystore { SyncCryptoStore::ecdsa_generate_new(self, id, seed) } - async fn insert_unknown(&self, id: KeyTypeId, suri: &str, public: &[u8]) -> std::result::Result<(), ()> { + async fn insert_unknown( + &self, + id: KeyTypeId, + suri: &str, + public: &[u8], + ) -> std::result::Result<(), ()> { SyncCryptoStore::insert_unknown(self, id, suri, public) } @@ -122,7 +138,7 @@ impl CryptoStore for LocalKeystore { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> std::result::Result, TraitError> { + ) -> std::result::Result>, TraitError> { SyncCryptoStore::sign_with(self, id, key, msg) } @@ -131,37 +147,38 @@ impl CryptoStore for LocalKeystore { key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> std::result::Result { + ) -> std::result::Result, TraitError> { SyncCryptoStore::sr25519_vrf_sign(self, key_type, public, transcript_data) } + + async fn ecdsa_sign_prehashed( + &self, + id: KeyTypeId, + public: &ecdsa::Public, + msg: &[u8; 32], + ) -> std::result::Result, TraitError> { + SyncCryptoStore::ecdsa_sign_prehashed(self, id, public, msg) + } } impl SyncCryptoStore for LocalKeystore { - fn keys( - &self, - id: KeyTypeId - ) -> std::result::Result, TraitError> { + fn keys(&self, id: KeyTypeId) -> std::result::Result, TraitError> { let raw_keys = self.0.read().raw_public_keys(id)?; - Ok(raw_keys.into_iter() - .fold(Vec::new(), |mut v, k| { - v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k)); - v - })) + Ok(raw_keys.into_iter().fold(Vec::new(), |mut v, k| { + v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k)); + v + })) } fn supported_keys( &self, id: KeyTypeId, - keys: Vec + keys: Vec, ) -> std::result::Result, TraitError> { - let all_keys = SyncCryptoStore::keys(self, id)? - .into_iter() - .collect::>(); - Ok(keys.into_iter() - .filter(|key| all_keys.contains(key)) - .collect::>()) + let all_keys = SyncCryptoStore::keys(self, id)?.into_iter().collect::>(); + Ok(keys.into_iter().filter(|key| all_keys.contains(key)).collect::>()) } fn sign_with( @@ -169,40 +186,44 @@ impl SyncCryptoStore for LocalKeystore { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> std::result::Result, TraitError> { + ) -> std::result::Result>, TraitError> { match key.0 { ed25519::CRYPTO_ID => { let pub_key = ed25519::Public::from_slice(key.1.as_slice()); - let key_pair: ed25519::Pair = self.0.read() + let key_pair = self + .0 + .read() .key_pair_by_type::(&pub_key, id) .map_err(|e| TraitError::from(e))?; - Ok(key_pair.sign(msg).encode()) - } + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() + }, sr25519::CRYPTO_ID => { let pub_key = sr25519::Public::from_slice(key.1.as_slice()); - let key_pair: sr25519::Pair = self.0.read() + let key_pair = self + .0 + .read() .key_pair_by_type::(&pub_key, id) .map_err(|e| TraitError::from(e))?; - Ok(key_pair.sign(msg).encode()) + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() }, ecdsa::CRYPTO_ID => { let pub_key = ecdsa::Public::from_slice(key.1.as_slice()); - let key_pair: ecdsa::Pair = self.0.read() + let key_pair = self + .0 + .read() .key_pair_by_type::(&pub_key, id) .map_err(|e| TraitError::from(e))?; - Ok(key_pair.sign(msg).encode()) - } - _ => Err(TraitError::KeyNotSupported(id)) + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() + }, + _ => Err(TraitError::KeyNotSupported(id)), } } fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.0.read().raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .map(|k| sr25519::Public::from_slice(k.as_slice())) - .collect() - }) + self.0 + .read() + .raw_public_keys(key_type) + .map(|v| v.into_iter().map(|k| sr25519::Public::from_slice(k.as_slice())).collect()) .unwrap_or_default() } @@ -212,21 +233,21 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), + Some(seed) => + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), None => self.0.write().generate_by_type::(id), - }.map_err(|e| -> TraitError { e.into() })?; + } + .map_err(|e| -> TraitError { e.into() })?; Ok(pair.public()) } fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.0.read().raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .map(|k| ed25519::Public::from_slice(k.as_slice())) - .collect() - }) - .unwrap_or_default() + self.0 + .read() + .raw_public_keys(key_type) + .map(|v| v.into_iter().map(|k| ed25519::Public::from_slice(k.as_slice())).collect()) + .unwrap_or_default() } fn ed25519_generate_new( @@ -235,20 +256,20 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), + Some(seed) => + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), None => self.0.write().generate_by_type::(id), - }.map_err(|e| -> TraitError { e.into() })?; + } + .map_err(|e| -> TraitError { e.into() })?; Ok(pair.public()) } fn ecdsa_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.0.read().raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .map(|k| ecdsa::Public::from_slice(k.as_slice())) - .collect() - }) + self.0 + .read() + .raw_public_keys(key_type) + .map(|v| v.into_iter().map(|k| ecdsa::Public::from_slice(k.as_slice())).collect()) .unwrap_or_default() } @@ -258,21 +279,28 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), + Some(seed) => + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), None => self.0.write().generate_by_type::(id), - }.map_err(|e| -> TraitError { e.into() })?; + } + .map_err(|e| -> TraitError { e.into() })?; Ok(pair.public()) } - fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) - -> std::result::Result<(), ()> - { + fn insert_unknown( + &self, + key_type: KeyTypeId, + suri: &str, + public: &[u8], + ) -> std::result::Result<(), ()> { self.0.write().insert_unknown(key_type, suri, public).map_err(|_| ()) } fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - public_keys.iter().all(|(p, t)| self.0.read().key_phrase_by_type(&p, *t).is_ok()) + public_keys + .iter() + .all(|(p, t)| self.0.read().key_phrase_by_type(&p, *t).ok().flatten().is_some()) } fn sr25519_vrf_sign( @@ -280,16 +308,27 @@ impl SyncCryptoStore for LocalKeystore { key_type: KeyTypeId, public: &Sr25519Public, transcript_data: VRFTranscriptData, - ) -> std::result::Result { + ) -> std::result::Result, TraitError> { let transcript = make_transcript(transcript_data); - let pair = self.0.read().key_pair_by_type::(public, key_type) - .map_err(|e| TraitError::PairNotFound(e.to_string()))?; + let pair = self.0.read().key_pair_by_type::(public, key_type)?; + + if let Some(pair) = pair { + let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); + Ok(Some(VRFSignature { output: inout.to_output(), proof })) + } else { + Ok(None) + } + } + + fn ecdsa_sign_prehashed( + &self, + id: KeyTypeId, + public: &ecdsa::Public, + msg: &[u8; 32], + ) -> std::result::Result, TraitError> { + let pair = self.0.read().key_pair_by_type::(public, id)?; - let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); - Ok(VRFSignature { - output: inout.to_output(), - proof, - }) + pair.map(|k| k.sign_prehashed(msg)).map(Ok).transpose() } } @@ -321,7 +360,7 @@ impl KeystoreInner { /// Open the store at the given path. /// /// Optionally takes a password that will be used to encrypt/decrypt the keys. - pub fn open>(path: T, password: Option) -> Result { + fn open>(path: T, password: Option) -> Result { let path = path.into(); fs::create_dir_all(&path)?; @@ -331,26 +370,16 @@ impl KeystoreInner { /// Get the password for this store. fn password(&self) -> Option<&str> { - self.password.as_ref() - .map(|p| p.expose_secret()) - .map(|p| p.as_str()) + self.password.as_ref().map(|p| p.expose_secret()).map(|p| p.as_str()) } /// Create a new in-memory store. - pub fn new_in_memory() -> Self { - Self { - path: None, - additional: HashMap::new(), - password: None - } + fn new_in_memory() -> Self { + Self { path: None, additional: HashMap::new(), password: None } } /// Get the key phrase for the given public key and key type from the in-memory store. - fn get_additional_pair( - &self, - public: &[u8], - key_type: KeyTypeId, - ) -> Option<&String> { + fn get_additional_pair(&self, public: &[u8], key_type: KeyTypeId) -> Option<&String> { let key = (key_type, public.to_vec()); self.additional.get(&key) } @@ -365,8 +394,8 @@ impl KeystoreInner { /// Insert a new key with anonymous crypto. /// - /// Places it into the file system store. - pub fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { + /// Places it into the file system store, if a path is configured. + fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { if let Some(path) = self.key_file_path(public, key_type) { let mut file = File::create(path).map_err(Error::Io)?; serde_json::to_writer(&file, &suri).map_err(Error::Json)?; @@ -377,13 +406,16 @@ impl KeystoreInner { /// Generate a new key. /// - /// Places it into the file system store. - pub fn generate_by_type(&self, key_type: KeyTypeId) -> Result { + /// Places it into the file system store, if a path is configured. Otherwise insert + /// it into the memory cache only. + fn generate_by_type(&mut self, key_type: KeyTypeId) -> Result { let (pair, phrase, _) = Pair::generate_with_phrase(self.password()); if let Some(path) = self.key_file_path(pair.public().as_slice(), key_type) { let mut file = File::create(path)?; serde_json::to_writer(&file, &phrase)?; file.flush()?; + } else { + self.insert_ephemeral_pair(&pair, &phrase, key_type); } Ok(pair) } @@ -391,7 +423,7 @@ impl KeystoreInner { /// Create a new key from seed. /// /// Does not place it into the file system store. - pub fn insert_ephemeral_from_seed_by_type( + fn insert_ephemeral_from_seed_by_type( &mut self, seed: &str, key_type: KeyTypeId, @@ -402,36 +434,50 @@ impl KeystoreInner { } /// Get the key phrase for a given public key and key type. - fn key_phrase_by_type(&self, public: &[u8], key_type: KeyTypeId) -> Result { + fn key_phrase_by_type(&self, public: &[u8], key_type: KeyTypeId) -> Result> { if let Some(phrase) = self.get_additional_pair(public, key_type) { - return Ok(phrase.clone()) + return Ok(Some(phrase.clone())) } - let path = self.key_file_path(public, key_type).ok_or_else(|| Error::Unavailable)?; - let file = File::open(path)?; + let path = if let Some(path) = self.key_file_path(public, key_type) { + path + } else { + return Ok(None) + }; + + if path.exists() { + let file = File::open(path)?; - serde_json::from_reader(&file).map_err(Into::into) + serde_json::from_reader(&file).map_err(Into::into).map(Some) + } else { + Ok(None) + } } /// Get a key pair for the given public key and key type. - pub fn key_pair_by_type(&self, + fn key_pair_by_type( + &self, public: &Pair::Public, key_type: KeyTypeId, - ) -> Result { - let phrase = self.key_phrase_by_type(public.as_slice(), key_type)?; - let pair = Pair::from_string( - &phrase, - self.password(), - ).map_err(|_| Error::InvalidPhrase)?; + ) -> Result> { + let phrase = if let Some(p) = self.key_phrase_by_type(public.as_slice(), key_type)? { + p + } else { + return Ok(None) + }; + + let pair = Pair::from_string(&phrase, self.password()).map_err(|_| Error::InvalidPhrase)?; if &pair.public() == public { - Ok(pair) + Ok(Some(pair)) } else { Err(Error::InvalidPassword) } } - /// Returns the file path for the given public key and key type. + /// Get the file path for the given public key and key type. + /// + /// Returns `None` if the keystore only exists in-memory and there isn't any path to provide. fn key_file_path(&self, public: &[u8], key_type: KeyTypeId) -> Option { let mut buf = self.path.as_ref()?.clone(); let key_type = hex::encode(key_type.0); @@ -442,7 +488,9 @@ impl KeystoreInner { /// Returns a list of raw public keys filtered by `KeyTypeId` fn raw_public_keys(&self, id: KeyTypeId) -> Result>> { - let mut public_keys: Vec> = self.additional.keys() + let mut public_keys: Vec> = self + .additional + .keys() .into_iter() .filter_map(|k| if k.0 == id { Some(k.1.clone()) } else { None }) .collect(); @@ -457,11 +505,11 @@ impl KeystoreInner { match hex::decode(name) { Ok(ref hex) if hex.len() > 4 => { if &hex[0..4] != &id.0 { - continue; + continue } let public = hex[4..].to_vec(); public_keys.push(public); - } + }, _ => continue, } } @@ -470,71 +518,86 @@ impl KeystoreInner { Ok(public_keys) } -} + /// Get a key pair for the given public key. + /// + /// Returns `Ok(None)` if the key doesn't exist, `Ok(Some(_))` if the key exists or `Err(_)` + /// when something failed. + pub fn key_pair( + &self, + public: &::Public, + ) -> Result> { + self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID) + .map(|v| v.map(Into::into)) + } +} #[cfg(test)] mod tests { use super::*; + use sp_application_crypto::{ed25519, sr25519, AppPublic}; + use sp_core::{crypto::Ss58Codec, testing::SR25519, Pair}; + use std::{fs, str::FromStr}; use tempfile::TempDir; - use sp_core::{ - Pair, - crypto::{IsWrappedBy, Ss58Codec}, - testing::SR25519, - }; - use sp_application_crypto::{ed25519, sr25519, AppPublic, AppKey, AppPair}; - use std::{ - fs, - str::FromStr, - }; - /// Generate a new key. - /// - /// Places it into the file system store. - fn generate(store: &KeystoreInner) -> Result { - store.generate_by_type::(Pair::ID).map(Into::into) - } + const TEST_KEY_TYPE: KeyTypeId = KeyTypeId(*b"test"); - /// Create a new key from seed. - /// - /// Does not place it into the file system store. - fn insert_ephemeral_from_seed(store: &mut KeystoreInner, seed: &str) -> Result { - store.insert_ephemeral_from_seed_by_type::(seed, Pair::ID).map(Into::into) - } + impl KeystoreInner { + fn insert_ephemeral_from_seed(&mut self, seed: &str) -> Result { + self.insert_ephemeral_from_seed_by_type::(seed, Pair::ID) + .map(Into::into) + } - /// Get public keys of all stored keys that match the key type. - /// - /// This will just use the type of the public key (a list of which to be returned) in order - /// to determine the key type. Unless you use a specialized application-type public key, then - /// this only give you keys registered under generic cryptography, and will not return keys - /// registered under the application type. - fn public_keys(store: &KeystoreInner) -> Result> { - store.raw_public_keys(Public::ID) - .map(|v| { - v.into_iter() - .map(|k| Public::from_slice(k.as_slice())) - .collect() - }) - } + fn public_keys(&self) -> Result> { + self.raw_public_keys(Public::ID) + .map(|v| v.into_iter().map(|k| Public::from_slice(k.as_slice())).collect()) + } - /// Get a key pair for the given public key. - fn key_pair(store: &KeystoreInner, public: &::Public) -> Result { - store.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID).map(Into::into) + fn generate(&mut self) -> Result { + self.generate_by_type::(Pair::ID).map(Into::into) + } } #[test] fn basic_store() { let temp_dir = TempDir::new().unwrap(); - let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); + let mut store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - assert!(public_keys::(&store).unwrap().is_empty()); + assert!(store.public_keys::().unwrap().is_empty()); - let key: ed25519::AppPair = generate(&store).unwrap(); - let key2: ed25519::AppPair = key_pair(&store, &key.public()).unwrap(); + let key: ed25519::AppPair = store.generate().unwrap(); + let key2: ed25519::AppPair = store.key_pair(&key.public()).unwrap().unwrap(); assert_eq!(key.public(), key2.public()); - assert_eq!(public_keys::(&store).unwrap()[0], key.public()); + assert_eq!(store.public_keys::().unwrap()[0], key.public()); + } + + #[test] + fn has_keys_works() { + let temp_dir = TempDir::new().unwrap(); + let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); + + let key: ed25519::AppPair = store.0.write().generate().unwrap(); + let key2 = ed25519::Pair::generate().0; + + assert!(!SyncCryptoStore::has_keys( + &store, + &[(key2.public().to_vec(), ed25519::AppPublic::ID)] + )); + + assert!(!SyncCryptoStore::has_keys( + &store, + &[ + (key2.public().to_vec(), ed25519::AppPublic::ID), + (key.public().to_raw_vec(), ed25519::AppPublic::ID), + ], + )); + + assert!(SyncCryptoStore::has_keys( + &store, + &[(key.public().to_raw_vec(), ed25519::AppPublic::ID)] + )); } #[test] @@ -542,10 +605,11 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let mut store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - let pair: ed25519::AppPair = insert_ephemeral_from_seed( - &mut store, - "0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc" - ).unwrap(); + let pair: ed25519::AppPair = store + .insert_ephemeral_from_seed( + "0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc", + ) + .unwrap(); assert_eq!( "5DKUrgFqCPV8iAXx9sjy1nyBygQCeiUYRFWurZGhnrn3HJCA", pair.public().to_ss58check() @@ -554,35 +618,37 @@ mod tests { drop(store); let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); // Keys generated from seed should not be persisted! - assert!(key_pair::(&store, &pair.public()).is_err()); + assert!(store.key_pair::(&pair.public()).unwrap().is_none()); } #[test] fn password_being_used() { let password = String::from("password"); let temp_dir = TempDir::new().unwrap(); - let store = KeystoreInner::open( + let mut store = KeystoreInner::open( temp_dir.path(), Some(FromStr::from_str(password.as_str()).unwrap()), - ).unwrap(); + ) + .unwrap(); - let pair: ed25519::AppPair = generate(&store).unwrap(); + let pair: ed25519::AppPair = store.generate().unwrap(); assert_eq!( pair.public(), - key_pair::(&store, &pair.public()).unwrap().public(), + store.key_pair::(&pair.public()).unwrap().unwrap().public(), ); // Without the password the key should not be retrievable let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - assert!(key_pair::(&store, &pair.public()).is_err()); + assert!(store.key_pair::(&pair.public()).is_err()); let store = KeystoreInner::open( temp_dir.path(), Some(FromStr::from_str(password.as_str()).unwrap()), - ).unwrap(); + ) + .unwrap(); assert_eq!( pair.public(), - key_pair::(&store, &pair.public()).unwrap().public(), + store.key_pair::(&pair.public()).unwrap().unwrap().public(), ); } @@ -593,18 +659,23 @@ mod tests { let mut keys = Vec::new(); for i in 0..10 { - keys.push(generate::(&store).unwrap().public()); - keys.push(insert_ephemeral_from_seed::( - &mut store, - &format!("0x3d97c819d68f9bafa7d6e79cb991eebcd7{}d966c5334c0b94d9e1fa7ad0869dc", i), - ).unwrap().public()); + keys.push(store.generate::().unwrap().public()); + keys.push( + store + .insert_ephemeral_from_seed::(&format!( + "0x3d97c819d68f9bafa7d6e79cb991eebcd7{}d966c5334c0b94d9e1fa7ad0869dc", + i + )) + .unwrap() + .public(), + ); } // Generate a key of a different type - generate::(&store).unwrap(); + store.generate::().unwrap(); keys.sort(); - let mut store_pubs = public_keys::(&store).unwrap(); + let mut store_pubs = store.public_keys::().unwrap(); store_pubs.sort(); assert_eq!(keys, store_pubs); @@ -618,16 +689,14 @@ mod tests { let secret_uri = "//Alice"; let key_pair = sr25519::AppPair::from_string(secret_uri, None).expect("Generates key pair"); - store.insert_unknown( - SR25519, - secret_uri, - key_pair.public().as_ref(), - ).expect("Inserts unknown key"); + store + .insert_unknown(SR25519, secret_uri, key_pair.public().as_ref()) + .expect("Inserts unknown key"); - let store_key_pair = store.key_pair_by_type::( - &key_pair.public(), - SR25519, - ).expect("Gets key pair from keystore"); + let store_key_pair = store + .key_pair_by_type::(&key_pair.public(), SR25519) + .expect("Gets key pair from keystore") + .unwrap(); assert_eq!(key_pair.public(), store_key_pair.public()); } @@ -640,8 +709,30 @@ mod tests { let file_name = temp_dir.path().join(hex::encode(&SR25519.0[..2])); fs::write(file_name, "test").expect("Invalid file is written"); - assert!( - SyncCryptoStore::sr25519_public_keys(&store, SR25519).is_empty(), - ); + assert!(SyncCryptoStore::sr25519_public_keys(&store, SR25519).is_empty()); + } + + #[test] + fn generate_with_seed_is_not_stored() { + let temp_dir = TempDir::new().unwrap(); + let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); + let _alice_tmp_key = + SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); + + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 1); + + drop(store); + let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 0); + } + + #[test] + fn generate_can_be_fetched_in_memory() { + let store = LocalKeystore::in_memory(); + SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); + + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 1); + SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, None).unwrap(); + assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 2); } } diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml index d9fecb7aa8fa2..b10f7646bf9bd 100644 --- a/client/light/Cargo.toml +++ b/client/light/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "components for a light client" name = "sc-light" -version = "2.0.0" +version = "4.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -11,18 +11,17 @@ documentation = "https://docs.rs/sc-light" readme = "README.md" [dependencies] -parking_lot = "0.10.0" -lazy_static = "1.4.0" +parking_lot = "0.11.1" hash-db = "0.15.2" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor = { version = "0.8.0", path = "../executor" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-externalities = { version = "0.10.0-dev", path = "../../primitives/externalities" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-executor = { version = "0.10.0-dev", path = "../executor" } [features] default = [] diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index be7953e528bd8..3091dce625a3f 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,38 +19,44 @@ //! Light client backend. Only stores headers and justifications of blocks. //! Everything else is requested from full nodes on demand. -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; use parking_lot::RwLock; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use codec::{Decode, Encode}; -use sp_core::ChangesTrieConfiguration; -use sp_core::storage::{well_known_keys, ChildInfo}; -use sp_core::offchain::storage::InMemOffchainStorage; -use sp_state_machine::{ - Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, -}; -use sp_runtime::{generic::BlockId, Justification, Storage}; -use sp_runtime::traits::{Block as BlockT, NumberFor, Zero, Header, HashFor}; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use super::blockchain::Blockchain; +use hash_db::Hasher; use sc_client_api::{ backend::{ - AuxStore, Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState, - PrunableStateChangesTrieStorage, - }, - blockchain::{ - HeaderBackend as BlockchainHeaderBackend, well_known_cache_keys, + AuxStore, Backend as ClientBackend, BlockImportOperation, NewBlockState, + PrunableStateChangesTrieStorage, RemoteBackend, }, - light::Storage as BlockchainStorage, + blockchain::{well_known_cache_keys, HeaderBackend as BlockchainHeaderBackend}, in_mem::check_genesis_storage, + light::Storage as BlockchainStorage, UsageInfo, }; -use super::blockchain::Blockchain; -use hash_db::Hasher; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_core::{ + offchain::storage::InMemOffchainStorage, + storage::{well_known_keys, ChildInfo}, + ChangesTrieConfiguration, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor, Header, NumberFor, Zero}, + Justification, Justifications, Storage, +}; +use sp_state_machine::{ + Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, + IndexOperation, StorageCollection, TrieBackend, +}; -const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; +const IN_MEMORY_EXPECT_PROOF: &str = + "InMemory state backend has Void error type and always succeeds; qed"; /// Light client backend. pub struct Backend { @@ -84,11 +90,7 @@ pub enum GenesisOrUnavailableState { impl Backend { /// Create new light backend. pub fn new(blockchain: Arc>) -> Self { - Self { - blockchain, - genesis_state: RwLock::new(None), - import_lock: Default::default(), - } + Self { blockchain, genesis_state: RwLock::new(None), import_lock: Default::default() } } /// Get shared blockchain reference. @@ -102,9 +104,13 @@ impl AuxStore for Backend { 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { self.blockchain.storage().insert_aux(insert, delete) } @@ -114,10 +120,10 @@ impl AuxStore for Backend { } impl ClientBackend for Backend> - where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, +where + Block: BlockT, + S: BlockchainStorage, + Block::Hash: Ord, { type BlockImportOperation = ImportOperation; type Blockchain = Blockchain; @@ -141,15 +147,12 @@ impl ClientBackend for Backend> fn begin_state_operation( &self, _operation: &mut Self::BlockImportOperation, - _block: BlockId + _block: BlockId, ) -> ClientResult<()> { Ok(()) } - fn commit_operation( - &self, - mut operation: Self::BlockImportOperation, - ) -> ClientResult<()> { + fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { if !operation.finalized_blocks.is_empty() { for block in operation.finalized_blocks { self.blockchain.storage().finalize_header(block)?; @@ -159,7 +162,9 @@ impl ClientBackend for Backend> if let Some(header) = operation.header { let is_genesis_import = header.number().is_zero(); if let Some(new_config) = operation.changes_trie_config_update { - operation.cache.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); + operation + .cache + .insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); } self.blockchain.storage().import_header( header, @@ -175,11 +180,12 @@ impl ClientBackend for Backend> } else { for (key, maybe_val) in operation.aux_ops { match maybe_val { - Some(val) => self.blockchain.storage().insert_aux( - &[(&key[..], &val[..])], - std::iter::empty(), - )?, - None => self.blockchain.storage().insert_aux(std::iter::empty(), &[&key[..]])?, + Some(val) => self + .blockchain + .storage() + .insert_aux(&[(&key[..], &val[..])], std::iter::empty())?, + None => + self.blockchain.storage().insert_aux(std::iter::empty(), &[&key[..]])?, } } } @@ -199,6 +205,14 @@ impl ClientBackend for Backend> self.blockchain.storage().finalize_header(block) } + fn append_justification( + &self, + _block: BlockId, + _justification: Justification, + ) -> ClientResult<()> { + Ok(()) + } + fn blockchain(&self) -> &Blockchain { &self.blockchain } @@ -221,7 +235,7 @@ impl ClientBackend for Backend> // special case for genesis block if block_number.is_zero() { if let Some(genesis_state) = self.genesis_state.read().clone() { - return Ok(GenesisOrUnavailableState::Genesis(genesis_state)); + return Ok(GenesisOrUnavailableState::Genesis(genesis_state)) } } @@ -238,6 +252,10 @@ impl ClientBackend for Backend> Err(ClientError::NotAvailableOnLightClient) } + fn remove_leaf_block(&self, _hash: &Block::Hash) -> ClientResult<()> { + Err(ClientError::NotAvailableOnLightClient) + } + fn get_import_lock(&self) -> &RwLock<()> { &self.import_lock } @@ -250,8 +268,9 @@ where Block::Hash: Ord, { fn is_local_state_available(&self, block: &BlockId) -> bool { - self.genesis_state.read().is_some() - && self.blockchain.expect_block_number_from_id(block) + self.genesis_state.read().is_some() && + self.blockchain + .expect_block_number_from_id(block) .map(|num| num.is_zero()) .unwrap_or(false) } @@ -262,10 +281,10 @@ where } impl BlockImportOperation for ImportOperation - where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, +where + Block: BlockT, + S: BlockchainStorage, + Block::Hash: Ord, { type State = GenesisOrUnavailableState>; @@ -278,7 +297,8 @@ impl BlockImportOperation for ImportOperation &mut self, header: Block::Header, _body: Option>, - _justification: Option, + _indexed_body: Option>>, + _justifications: Option, state: NewBlockState, ) -> ClientResult<()> { self.leaf_state = state; @@ -306,14 +326,18 @@ impl BlockImportOperation for ImportOperation Ok(()) } - fn reset_storage(&mut self, input: Storage) -> ClientResult { + fn set_genesis_state(&mut self, input: Storage, commit: bool) -> ClientResult { check_genesis_storage(&input)?; // changes trie configuration - let changes_trie_config = input.top.iter() + let changes_trie_config = input + .top + .iter() .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) - .map(|(_, v)| Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis")); + .map(|(_, v)| { + Decode::decode(&mut &v[..]) + .expect("changes trie configuration is encoded properly at genesis") + }); self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck @@ -321,7 +345,8 @@ impl BlockImportOperation for ImportOperation storage.insert(None, input.top); // create a list of children keys to re-compute roots for - let child_delta = input.children_default + let child_delta = input + .children_default .iter() .map(|(_storage_key, storage_child)| (&storage_child.child_info, std::iter::empty())); @@ -332,13 +357,20 @@ impl BlockImportOperation for ImportOperation let storage_update = InMemoryBackend::from(storage); let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); - self.storage_update = Some(storage_update); + if commit { + self.storage_update = Some(storage_update); + } Ok(storage_root) } + fn reset_storage(&mut self, _input: Storage) -> ClientResult { + Err(ClientError::NotAvailableOnLightClient) + } + fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where I: IntoIterator, Option>)> + where + I: IntoIterator, Option>)>, { self.aux_ops.append(&mut ops.into_iter().collect()); Ok(()) @@ -356,7 +388,7 @@ impl BlockImportOperation for ImportOperation fn mark_finalized( &mut self, block: BlockId, - _justification: Option, + _justifications: Option, ) -> ClientResult<()> { self.finalized_blocks.push(block); Ok(()) @@ -366,6 +398,14 @@ impl BlockImportOperation for ImportOperation self.set_head = Some(block); Ok(()) } + + fn update_transaction_index( + &mut self, + _index: Vec, + ) -> sp_blockchain::Result<()> { + // noop for the light client + Ok(()) + } } impl std::fmt::Debug for GenesisOrUnavailableState { @@ -378,8 +418,8 @@ impl std::fmt::Debug for GenesisOrUnavailableState { } impl StateBackend for GenesisOrUnavailableState - where - H::Out: Ord + codec::Codec, +where + H::Out: Ord + codec::Codec, { type Error = ClientError; type Transaction = as StateBackend>::Transaction; @@ -393,11 +433,7 @@ impl StateBackend for GenesisOrUnavailableState } } - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> ClientResult>> { + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> ClientResult>> { match *self { GenesisOrUnavailableState::Genesis(ref state) => Ok(state.child_storage(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), @@ -419,36 +455,53 @@ impl StateBackend for GenesisOrUnavailableState key: &[u8], ) -> Result>, Self::Error> { match *self { - GenesisOrUnavailableState::Genesis(ref state) => Ok( - state.next_child_storage_key(child_info, key) - .expect(IN_MEMORY_EXPECT_PROOF) - ), + GenesisOrUnavailableState::Genesis(ref state) => + Ok(state.next_child_storage_key(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), } } fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.for_keys_with_prefix(prefix, action), + GenesisOrUnavailableState::Genesis(ref state) => + state.for_keys_with_prefix(prefix, action), GenesisOrUnavailableState::Unavailable => (), } } fn for_key_values_with_prefix(&self, prefix: &[u8], action: A) { match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.for_key_values_with_prefix(prefix, action), + GenesisOrUnavailableState::Genesis(ref state) => + state.for_key_values_with_prefix(prefix, action), GenesisOrUnavailableState::Unavailable => (), } } - fn for_keys_in_child_storage( + fn apply_to_key_values_while, Vec) -> bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + action: A, + allow_missing: bool, + ) -> ClientResult { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => Ok(state + .apply_to_key_values_while(child_info, prefix, start_at, action, allow_missing) + .expect(IN_MEMORY_EXPECT_PROOF)), + GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), + } + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_in_child_storage(child_info, action), + state.apply_to_keys_while(child_info, prefix, action), GenesisOrUnavailableState::Unavailable => (), } } @@ -468,11 +521,13 @@ impl StateBackend for GenesisOrUnavailableState fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord, + { match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.storage_root(delta), + GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta), GenesisOrUnavailableState::Unavailable => Default::default(), } } @@ -480,15 +535,17 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord, + { match *self { GenesisOrUnavailableState::Genesis(ref state) => { let (root, is_equal, _) = state.child_storage_root(child_info, delta); (root, is_equal, Default::default()) }, - GenesisOrUnavailableState::Unavailable => - (H::Out::default(), true, Default::default()), + GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, Default::default()), } } @@ -506,15 +563,15 @@ impl StateBackend for GenesisOrUnavailableState } } - fn register_overlay_stats(&mut self, _stats: &sp_state_machine::StateMachineStats) { } + fn register_overlay_stats(&self, _stats: &sp_state_machine::StateMachineStats) {} fn usage_info(&self) -> sp_state_machine::UsageInfo { sp_state_machine::UsageInfo::empty() } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&self) -> Option<&TrieBackend> { match self { - GenesisOrUnavailableState::Genesis(ref mut state) => state.as_trie_backend(), + GenesisOrUnavailableState::Genesis(ref state) => state.as_trie_backend(), GenesisOrUnavailableState::Unavailable => None, } } diff --git a/client/light/src/blockchain.rs b/client/light/src/blockchain.rs index 3b5753f2849d5..e88c724193697 100644 --- a/client/light/src/blockchain.rs +++ b/client/light/src/blockchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,27 +21,25 @@ use std::sync::Arc; -use sp_runtime::{Justification, generic::BlockId}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; - -use sp_blockchain::{ - HeaderMetadata, CachedHeaderMetadata, Error as ClientError, Result as ClientResult, +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, + Justifications, }; + +use crate::fetcher::RemoteHeaderRequest; pub use sc_client_api::{ - backend::{ - AuxStore, NewBlockState, ProvideChtRoots, - }, + backend::{AuxStore, NewBlockState, ProvideChtRoots}, blockchain::{ - Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, + well_known_cache_keys, Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache, - well_known_cache_keys, - }, - light::{ - RemoteBlockchain, LocalOrRemote, Storage }, cht, + light::{LocalOrRemote, RemoteBlockchain, Storage}, +}; +use sp_blockchain::{ + CachedHeaderMetadata, Error as ClientError, HeaderMetadata, Result as ClientResult, }; -use crate::fetcher::RemoteHeaderRequest; /// Light client blockchain. pub struct Blockchain { @@ -51,9 +49,7 @@ pub struct Blockchain { impl Blockchain { /// Create new light blockchain backed with given storage. pub fn new(storage: S) -> Self { - Self { - storage, - } + Self { storage } } /// Get storage reference. @@ -62,7 +58,11 @@ impl Blockchain { } } -impl BlockchainHeaderBackend for Blockchain where Block: BlockT, S: Storage { +impl BlockchainHeaderBackend for Blockchain +where + Block: BlockT, + S: Storage, +{ fn header(&self, id: BlockId) -> ClientResult> { match RemoteBlockchain::header(self, id)? { LocalOrRemote::Local(header) => Ok(Some(header)), @@ -83,15 +83,25 @@ impl BlockchainHeaderBackend for Blockchain where Block: Blo self.storage.number(hash) } - fn hash(&self, number: <::Header as HeaderT>::Number) -> ClientResult> { + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> ClientResult> { self.storage.hash(number) } } -impl HeaderMetadata for Blockchain where Block: BlockT, S: Storage { +impl HeaderMetadata for Blockchain +where + Block: BlockT, + S: Storage, +{ type Error = ClientError; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { self.storage.header_metadata(hash) } @@ -104,12 +114,16 @@ impl HeaderMetadata for Blockchain where Block: BlockT, S: S } } -impl BlockchainBackend for Blockchain where Block: BlockT, S: Storage { +impl BlockchainBackend for Blockchain +where + Block: BlockT, + S: Storage, +{ fn body(&self, _id: BlockId) -> ClientResult>> { Err(ClientError::NotAvailableOnLightClient) } - fn justification(&self, _id: BlockId) -> ClientResult> { + fn justifications(&self, _id: BlockId) -> ClientResult> { Err(ClientError::NotAvailableOnLightClient) } @@ -128,6 +142,17 @@ impl BlockchainBackend for Blockchain where Block: BlockT, S fn children(&self, _parent_hash: Block::Hash) -> ClientResult> { Err(ClientError::NotAvailableOnLightClient) } + + fn indexed_transaction(&self, _hash: &Block::Hash) -> ClientResult>> { + Err(ClientError::NotAvailableOnLightClient) + } + + fn block_indexed_body( + &self, + _id: BlockId, + ) -> sp_blockchain::Result>>> { + Err(ClientError::NotAvailableOnLightClient) + } } impl, Block: BlockT> ProvideCache for Blockchain { @@ -137,16 +162,16 @@ impl, Block: BlockT> ProvideCache for Blockchain { } impl RemoteBlockchain for Blockchain - where - S: Storage, +where + S: Storage, { - fn header(&self, id: BlockId) -> ClientResult, - >> { + fn header( + &self, + id: BlockId, + ) -> ClientResult>> { // first, try to read header from local storage if let Some(local_header) = self.storage.header(id)? { - return Ok(LocalOrRemote::Local(local_header)); + return Ok(LocalOrRemote::Local(local_header)) } // we need to know block number to check if it's a part of CHT @@ -159,8 +184,9 @@ impl RemoteBlockchain for Blockchain }; // if the header is genesis (never pruned), non-canonical, or from future => return - if number.is_zero() || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown { - return Ok(LocalOrRemote::Unknown); + if number.is_zero() || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown + { + return Ok(LocalOrRemote::Unknown) } Ok(LocalOrRemote::Remote(RemoteHeaderRequest { diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index fa0f02cd5aed9..a0776131e406d 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,35 +18,33 @@ //! Methods that light client could use to execute runtime calls. -use std::{ - sync::Arc, panic::UnwindSafe, result, cell::RefCell, -}; +use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use hash_db::Hasher; use sp_core::{ - convert_hash, NativeOrEncoded, traits::{CodeExecutor, SpawnNamed}, - offchain::storage::OffchainOverlayedChanges, + convert_hash, + traits::{CodeExecutor, SpawnNamed}, + NativeOrEncoded, }; +use sp_externalities::Extensions; use sp_runtime::{ - generic::BlockId, traits::{One, Block as BlockT, Header as HeaderT, HashFor}, + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, }; -use sp_externalities::Extensions; use sp_state_machine::{ - self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, + create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, + ExecutionStrategy, OverlayedChanges, StorageProof, }; -use hash_db::Hasher; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{ProofRecorder, StorageTransactionCache}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{ - backend::RemoteBackend, - light::RemoteCallRequest, - call_executor::CallExecutor, + backend::RemoteBackend, call_executor::CallExecutor, light::RemoteCallRequest, }; -use sc_executor::{RuntimeVersion, NativeVersion}; +use sc_executor::RuntimeVersion; /// Call executor that is able to execute calls only on genesis state. /// @@ -65,19 +63,15 @@ impl GenesisCallExecutor { impl Clone for GenesisCallExecutor { fn clone(&self) -> Self { - GenesisCallExecutor { - backend: self.backend.clone(), - local: self.local.clone(), - } + GenesisCallExecutor { backend: self.backend.clone(), local: self.local.clone() } } } -impl CallExecutor for - GenesisCallExecutor - where - Block: BlockT, - B: RemoteBackend, - Local: CallExecutor, +impl CallExecutor for GenesisCallExecutor +where + Block: BlockT, + B: RemoteBackend, + Local: CallExecutor, { type Error = ClientError; @@ -91,202 +85,112 @@ impl CallExecutor for strategy: ExecutionStrategy, extensions: Option, ) -> ClientResult> { - match self.backend.is_local_state_available(id) { - true => self.local.call(id, method, call_data, strategy, extensions), - false => Err(ClientError::NotAvailableOnLightClient), + if self.backend.is_local_state_available(id) { + self.local.call(id, method, call_data, strategy, extensions) + } else { + Err(ClientError::NotAvailableOnLightClient) } } fn contextual_call< - 'a, - IB: Fn() -> ClientResult<()>, EM: Fn( Result, Self::Error>, - Result, Self::Error> + Result, Self::Error>, ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, - initialize_block_fn: IB, at: &BlockId, method: &str, call_data: &[u8], changes: &RefCell, - offchain_changes: &RefCell, _: Option<&RefCell>>, - initialize_block: InitializeBlock<'a, Block>, _manager: ExecutionManager, native_call: Option, recorder: &Option>, extensions: Option, - ) -> ClientResult> where ExecutionManager: Clone { + ) -> ClientResult> + where + ExecutionManager: Clone, + { // there's no actual way/need to specify native/wasm execution strategy on light node // => we can safely ignore passed values - match self.backend.is_local_state_available(at) { - true => CallExecutor::contextual_call::< - _, + if self.backend.is_local_state_available(at) { + CallExecutor::contextual_call::< fn( Result, Local::Error>, Result, Local::Error>, ) -> Result, Local::Error>, _, - NC + NC, >( &self.local, - initialize_block_fn, at, method, call_data, changes, - offchain_changes, None, - initialize_block, ExecutionManager::NativeWhenPossible, native_call, recorder, extensions, - ).map_err(|e| ClientError::Execution(Box::new(e.to_string()))), - false => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn runtime_version(&self, id: &BlockId) -> ClientResult { - match self.backend.is_local_state_available(id) { - true => self.local.runtime_version(id), - false => Err(ClientError::NotAvailableOnLightClient), + ) + } else { + Err(ClientError::NotAvailableOnLightClient) } } - fn prove_at_trie_state>>( + fn prove_execution( &self, - _state: &sp_state_machine::TrieBackend>, - _changes: &mut OverlayedChanges, - _method: &str, - _call_data: &[u8], + at: &BlockId, + method: &str, + call_data: &[u8], ) -> ClientResult<(Vec, StorageProof)> { - Err(ClientError::NotAvailableOnLightClient) + if self.backend.is_local_state_available(at) { + self.local.prove_execution(at, method, call_data) + } else { + Err(ClientError::NotAvailableOnLightClient) + } } - fn native_runtime_version(&self) -> Option<&NativeVersion> { - None + fn runtime_version(&self, id: &BlockId) -> ClientResult { + if self.backend.is_local_state_available(id) { + self.local.runtime_version(id) + } else { + Err(ClientError::NotAvailableOnLightClient) + } } } -/// Prove contextual execution using given block header in environment. -/// -/// Method is executed using passed header as environment' current block. -/// Proof includes both environment preparation proof and method execution proof. -pub fn prove_execution( - mut state: S, - header: Block::Header, - executor: &E, - method: &str, - call_data: &[u8], -) -> ClientResult<(Vec, StorageProof)> - where - Block: BlockT, - S: StateBackend>, - E: CallExecutor, -{ - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as - Box - )?; - - // prepare execution environment + record preparation proof - let mut changes = Default::default(); - let (_, init_proof) = executor.prove_at_trie_state( - trie_state, - &mut changes, - "Core_initialize_block", - &header.encode(), - )?; - - // execute method + record execution proof - let (result, exec_proof) = executor.prove_at_trie_state( - &trie_state, - &mut changes, - method, - call_data, - )?; - let total_proof = StorageProof::merge(vec![init_proof, exec_proof]); - - Ok((result, total_proof)) -} - /// Check remote contextual execution proof using given backend. /// -/// Method is executed using passed header as environment' current block. -/// Proof should include both environment preparation proof and method execution proof. +/// Proof should include the method execution proof. pub fn check_execution_proof( executor: &E, spawn_handle: Box, request: &RemoteCallRequest

, remote_proof: StorageProof, ) -> ClientResult> - where - Header: HeaderT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, -{ - check_execution_proof_with_make_header::( - executor, - spawn_handle, - request, - remote_proof, - |header|
::new( - *header.number() + One::one(), - Default::default(), - Default::default(), - header.hash(), - Default::default(), - ), - ) -} - -/// Check remote contextual execution proof using given backend and header factory. -/// -/// Method is executed using passed header as environment' current block. -/// Proof should include both environment preparation proof and method execution proof. -pub fn check_execution_proof_with_make_header( - executor: &E, - spawn_handle: Box, - request: &RemoteCallRequest
, - remote_proof: StorageProof, - make_next_header: MakeNextHeader, -) -> ClientResult> - where - E: CodeExecutor + Clone + 'static, - H: Hasher, - Header: HeaderT, - H::Out: Ord + codec::Codec + 'static, - MakeNextHeader: Fn(&Header) -> Header, +where + Header: HeaderT, + E: CodeExecutor + Clone + 'static, + H: Hasher, + H::Out: Ord + codec::Codec + 'static, { let local_state_root = request.header.state_root(); let root: H::Out = convert_hash(&local_state_root); - // prepare execution environment + check preparation proof + // prepare execution environment let mut changes = OverlayedChanges::default(); let trie_backend = create_proof_check_backend(root, remote_proof)?; - let next_header = make_next_header(&request.header); // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); - let runtime_code = backend_runtime_code.runtime_code()?; - - execution_proof_check_on_trie_backend::( - &trie_backend, - &mut changes, - executor, - spawn_handle.clone(), - "Core_initialize_block", - &next_header.encode(), - &runtime_code, - )?; + let runtime_code = backend_runtime_code + .runtime_code() + .map_err(|_e| ClientError::RuntimeCodeMissing)?; // execute method execution_proof_check_on_trie_backend::( diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 33113c2fc7df0..5740e407a5e89 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,53 +18,56 @@ //! Light client data fetcher. Fetches requested data from remote full nodes. -use std::sync::Arc; -use std::collections::{BTreeMap, HashMap}; -use std::marker::PhantomData; +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, + sync::Arc, +}; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; -use sp_core::{convert_hash, traits::{CodeExecutor, SpawnNamed}, storage::{ChildInfo, ChildType}}; +use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_core::{ + convert_hash, + storage::{ChildInfo, ChildType}, + traits::{CodeExecutor, SpawnNamed}, +}; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, - AtLeast32Bit, CheckedConversion, + AtLeast32Bit, Block as BlockT, CheckedConversion, Hash, HashFor, Header as HeaderT, NumberFor, }; +pub use sp_state_machine::StorageProof; use sp_state_machine::{ - ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, - InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - read_child_proof_check, + key_changes_proof_check_with_db, read_child_proof_check, read_proof_check, + ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, ChangesTrieRootsStorage, + InMemoryChangesTrieStorage, TrieBackend, }; -pub use sp_state_machine::StorageProof; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use crate::{blockchain::Blockchain, call_executor::check_execution_proof}; pub use sc_client_api::{ + cht, light::{ - RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, - RemoteChangesRequest, ChangesProof, RemoteBodyRequest, Fetcher, FetchChecker, + ChangesProof, FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, + RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, Storage as BlockchainStorage, }, - cht, }; -use crate::{blockchain::Blockchain, call_executor::check_execution_proof}; /// Remote data checker. -pub struct LightDataChecker> { +pub struct LightDataChecker> { blockchain: Arc>, executor: E, spawn_handle: Box, - _hasher: PhantomData<(B, H)>, + _marker: PhantomData, } -impl> LightDataChecker { +impl> LightDataChecker { /// Create new light data checker. pub fn new( blockchain: Arc>, executor: E, spawn_handle: Box, ) -> Self { - Self { - blockchain, executor, spawn_handle, _hasher: PhantomData - } + Self { blockchain, executor, spawn_handle, _marker: PhantomData } } /// Check remote changes query proof assuming that CHT-s are of given size. @@ -73,27 +76,36 @@ impl> LightDataChecker { request: &RemoteChangesRequest, remote_proof: ChangesProof, cht_size: NumberFor, - ) -> ClientResult, u32)>> - where - H: Hasher, - H::Out: Ord + codec::Codec, - { + ) -> ClientResult, u32)>> { // since we need roots of all changes tries for the range begin..max // => remote node can't use max block greater that one that we have passed - if remote_proof.max_block > request.max_block.0 || remote_proof.max_block < request.last_block.0 { + if remote_proof.max_block > request.max_block.0 || + remote_proof.max_block < request.last_block.0 + { return Err(ClientError::ChangesTrieAccessFailed(format!( "Invalid max_block used by the remote node: {}. Local: {}..{}..{}", - remote_proof.max_block, request.first_block.0, request.last_block.0, request.max_block.0, - )).into()); + remote_proof.max_block, + request.first_block.0, + request.last_block.0, + request.max_block.0, + )) + .into()) } // check if remote node has responded with extra changes trie roots proofs // all changes tries roots must be in range [request.first_block.0; request.tries_roots.0) - let is_extra_first_root = remote_proof.roots.keys().next() - .map(|first_root| *first_root < request.first_block.0 - || *first_root >= request.tries_roots.0) + let is_extra_first_root = remote_proof + .roots + .keys() + .next() + .map(|first_root| { + *first_root < request.first_block.0 || *first_root >= request.tries_roots.0 + }) .unwrap_or(false); - let is_extra_last_root = remote_proof.roots.keys().next_back() + let is_extra_last_root = remote_proof + .roots + .keys() + .next_back() .map(|last_root| *last_root >= request.tries_roots.0) .unwrap_or(false); if is_extra_first_root || is_extra_last_root { @@ -112,20 +124,19 @@ impl> LightDataChecker { let remote_roots_proof = remote_proof.roots_proof; let remote_proof = remote_proof.proof; if !remote_roots.is_empty() { - self.check_changes_tries_proof( - cht_size, - &remote_roots, - remote_roots_proof, - )?; + self.check_changes_tries_proof(cht_size, &remote_roots, remote_roots_proof)?; } // and now check the key changes proof + get the changes let mut result = Vec::new(); let proof_storage = InMemoryChangesTrieStorage::with_proof(remote_proof); for config_range in &request.changes_trie_configs { - let result_range = key_changes_proof_check_with_db::( + let result_range = key_changes_proof_check_with_db::, _>( ChangesTrieConfigurationRange { - config: config_range.config.as_ref().ok_or(ClientError::ChangesTriesNotSupported)?, + config: config_range + .config + .as_ref() + .ok_or(ClientError::ChangesTriesNotSupported)?, zero: config_range.zero.0, end: config_range.end.map(|(n, _)| n), }, @@ -141,7 +152,8 @@ impl> LightDataChecker { }, remote_max_block, request.storage_key.as_ref(), - &request.key) + &request.key, + ) .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; result.extend(result_range); } @@ -155,63 +167,65 @@ impl> LightDataChecker { cht_size: NumberFor, remote_roots: &BTreeMap, B::Hash>, remote_roots_proof: StorageProof, - ) -> ClientResult<()> - where - H: Hasher, - H::Out: Ord + codec::Codec, - { + ) -> ClientResult<()> { // all the checks are sharing the same storage let storage = remote_roots_proof.into_memory_db(); // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT let blocks = remote_roots.keys().cloned(); - cht::for_each_cht_group::(cht_size, blocks, |mut storage, _, cht_blocks| { - // get local changes trie CHT root for given CHT - // it should be there, because it is never pruned AND request has been composed - // when required header has been pruned (=> replaced with CHT) - let first_block = cht_blocks.first().cloned() - .expect("for_each_cht_group never calls callback with empty groups"); - let local_cht_root = self.blockchain.storage().changes_trie_cht_root(cht_size, first_block)? - .ok_or(ClientError::InvalidCHTProof)?; - - // check changes trie root for every block within CHT range - for block in cht_blocks { - // check if the proofs storage contains the root - // normally this happens in when the proving backend is created, but since - // we share the storage for multiple checks, do it here - let mut cht_root = H::Out::default(); - cht_root.as_mut().copy_from_slice(local_cht_root.as_ref()); - if !storage.contains(&cht_root, EMPTY_PREFIX) { - return Err(ClientError::InvalidCHTProof.into()); + cht::for_each_cht_group::( + cht_size, + blocks, + |mut storage, _, cht_blocks| { + // get local changes trie CHT root for given CHT + // it should be there, because it is never pruned AND request has been composed + // when required header has been pruned (=> replaced with CHT) + let first_block = cht_blocks + .first() + .cloned() + .expect("for_each_cht_group never calls callback with empty groups"); + let local_cht_root = self + .blockchain + .storage() + .changes_trie_cht_root(cht_size, first_block)? + .ok_or(ClientError::InvalidCHTProof)?; + + // check changes trie root for every block within CHT range + for block in cht_blocks { + // check if the proofs storage contains the root + // normally this happens in when the proving backend is created, but since + // we share the storage for multiple checks, do it here + if !storage.contains(&local_cht_root, EMPTY_PREFIX) { + return Err(ClientError::InvalidCHTProof.into()) + } + + // check proof for single changes trie root + let proving_backend = TrieBackend::new(storage, local_cht_root); + let remote_changes_trie_root = remote_roots[&block]; + cht::check_proof_on_proving_backend::>( + local_cht_root, + block, + remote_changes_trie_root, + &proving_backend, + )?; + + // and return the storage to use in following checks + storage = proving_backend.into_storage(); } - // check proof for single changes trie root - let proving_backend = TrieBackend::new(storage, cht_root); - let remote_changes_trie_root = remote_roots[&block]; - cht::check_proof_on_proving_backend::( - local_cht_root, - block, - remote_changes_trie_root, - &proving_backend, - )?; - - // and return the storage to use in following checks - storage = proving_backend.into_storage(); - } - - Ok(storage) - }, storage) + Ok(storage) + }, + storage, + ) } } -impl FetchChecker for LightDataChecker - where - Block: BlockT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, - S: BlockchainStorage, +impl FetchChecker for LightDataChecker +where + Block: BlockT, + E: CodeExecutor + Clone + 'static, + S: BlockchainStorage, { fn check_header_proof( &self, @@ -219,15 +233,16 @@ impl FetchChecker for LightDataChecker remote_header: Option, remote_proof: StorageProof, ) -> ClientResult { - let remote_header = remote_header.ok_or_else(|| - ClientError::from(ClientError::InvalidCHTProof))?; + let remote_header = + remote_header.ok_or_else(|| ClientError::from(ClientError::InvalidCHTProof))?; let remote_header_hash = remote_header.hash(); - cht::check_proof::( + cht::check_proof::>( request.cht_root, request.block, remote_header_hash, remote_proof, - ).map(|_| remote_header) + ) + .map(|_| remote_header) } fn check_read_proof( @@ -235,11 +250,12 @@ impl FetchChecker for LightDataChecker request: &RemoteReadRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - read_proof_check::( + read_proof_check::, _>( convert_hash(request.header.state_root()), remote_proof, request.keys.iter(), - ).map_err(Into::into) + ) + .map_err(|e| ClientError::from(e)) } fn check_read_child_proof( @@ -249,14 +265,15 @@ impl FetchChecker for LightDataChecker ) -> ClientResult, Option>>> { let child_info = match ChildType::from_prefixed_key(&request.storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), + None => return Err(ClientError::InvalidChildType), }; - read_child_proof_check::( + read_child_proof_check::, _>( convert_hash(request.header.state_root()), remote_proof, &child_info, request.keys.iter(), - ).map_err(Into::into) + ) + .map_err(|e| ClientError::from(e)) } fn check_execution_proof( @@ -264,7 +281,7 @@ impl FetchChecker for LightDataChecker request: &RemoteCallRequest, remote_proof: StorageProof, ) -> ClientResult> { - check_execution_proof::<_, _, H>( + check_execution_proof::<_, _, HashFor>( &self.executor, self.spawn_handle.clone(), request, @@ -275,7 +292,7 @@ impl FetchChecker for LightDataChecker fn check_changes_proof( &self, request: &RemoteChangesRequest, - remote_proof: ChangesProof + remote_proof: ChangesProof, ) -> ClientResult, u32)>> { self.check_changes_proof_with_cht_size(request, remote_proof, cht::size()) } @@ -283,21 +300,19 @@ impl FetchChecker for LightDataChecker fn check_body_proof( &self, request: &RemoteBodyRequest, - body: Vec + body: Vec, ) -> ClientResult> { // TODO: #2621 - let extrinsics_root = HashFor::::ordered_trie_root( - body.iter().map(Encode::encode).collect(), - ); + let extrinsics_root = + HashFor::::ordered_trie_root(body.iter().map(Encode::encode).collect()); if *request.header.extrinsics_root() == extrinsics_root { Ok(body) } else { - Err(format!("RemoteBodyRequest: invalid extrinsics root expected: {} but got {}", - *request.header.extrinsics_root(), - extrinsics_root, - ).into()) + Err(ClientError::ExtrinsicRootInvalid { + received: request.header.extrinsics_root().to_string(), + expected: extrinsics_root.to_string(), + }) } - } } @@ -308,10 +323,18 @@ struct RootsStorage<'a, Number: AtLeast32Bit, Hash: 'a> { } impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> - where - H: Hasher, - Number: std::fmt::Display + std::hash::Hash + Clone + AtLeast32Bit + Encode + Decode + Send + Sync + 'static, - Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, +where + H: Hasher, + Number: std::fmt::Display + + std::hash::Hash + + Clone + + AtLeast32Bit + + Encode + + Decode + + Send + + Sync + + 'static, + Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, { fn build_anchor( &self, @@ -329,11 +352,9 @@ impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a let root = if block < self.roots.0 { self.prev_roots.get(&Number::unique_saturated_from(block)).cloned() } else { - let index: Option = block.checked_sub(&self.roots.0).and_then(|index| index.checked_into()); - match index { - Some(index) => self.roots.1.get(index as usize).cloned(), - None => None, - } + let index: Option = + block.checked_sub(&self.roots.0).and_then(|index| index.checked_into()); + index.and_then(|index| self.roots.1.get(index as usize).cloned()) }; Ok(root.map(|root| { diff --git a/client/light/src/lib.rs b/client/light/src/lib.rs index 899d1ae31a3dd..0c874326ef2e0 100644 --- a/client/light/src/lib.rs +++ b/client/light/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,25 +18,28 @@ //! Light client components. +use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_runtime::traits::{Block as BlockT, HashFor}; use std::sync::Arc; -use sp_core::traits::{CodeExecutor, SpawnNamed}; pub mod backend; pub mod blockchain; pub mod call_executor; pub mod fetcher; -pub use {backend::*, blockchain::*, call_executor::*, fetcher::*}; +pub use backend::*; +pub use blockchain::*; +pub use call_executor::*; +pub use fetcher::*; /// Create an instance of fetch data checker. pub fn new_fetch_checker>( blockchain: Arc>, executor: E, spawn_handle: Box, -) -> LightDataChecker, B, S> - where - E: CodeExecutor, +) -> LightDataChecker +where + E: CodeExecutor, { LightDataChecker::new(blockchain, executor, spawn_handle) } @@ -48,9 +51,9 @@ pub fn new_light_blockchain>(storage: S) -> A /// Create an instance of light client backend. pub fn new_light_backend(blockchain: Arc>) -> Arc>> - where - B: BlockT, - S: BlockchainStorage, +where + B: BlockT, + S: BlockchainStorage, { Arc::new(Backend::new(blockchain)) } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 36f877da9adf9..c078e5b892fe2 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Gossiping for the Substrate network protocol" name = "sc-network-gossip" -version = "0.8.0" +version = "0.10.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -15,17 +15,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.4" +futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.28.1", default-features = false } +libp2p = { version = "0.39.1", default-features = false } log = "0.4.8" -lru = "0.4.3" -sc-network = { version = "0.8.0", path = "../network" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -wasm-timer = "0.2" +lru = "0.6.6" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +tracing = "0.1.25" [dev-dependencies] async-std = "1.6.5" -quickcheck = "0.9.0" -rand = "0.7.2" +quickcheck = "1.0.3" substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 70c2942597aa5..70b13983d8bd3 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -1,29 +1,36 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . -use crate::{Network, Validator}; -use crate::state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL}; +use crate::{ + state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL}, + Network, Validator, +}; use sc_network::{Event, ReputationChange}; -use futures::prelude::*; -use futures::channel::mpsc::{channel, Sender, Receiver}; +use futures::{ + channel::mpsc::{channel, Receiver, Sender}, + prelude::*, +}; use libp2p::PeerId; use log::trace; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use prometheus_endpoint::Registry; +use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, collections::{HashMap, VecDeque}, @@ -38,7 +45,7 @@ pub struct GossipEngine { state_machine: ConsensusGossip, network: Box + Send>, periodic_maintenance_interval: futures_timer::Delay, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Incoming events from the network. network_event_stream: Pin + Send>>, @@ -57,8 +64,8 @@ enum ForwardingState { /// more messages to forward. Idle, /// The gossip engine is in the progress of forwarding messages and thus will not poll the - /// network for more messages until it has send all current messages into the subscribed message - /// sinks. + /// network for more messages until it has send all current messages into the subscribed + /// message sinks. Busy(VecDeque<(B::Hash, TopicNotification)>), } @@ -68,20 +75,21 @@ impl GossipEngine { /// Create a new instance. pub fn new + Send + Clone + 'static>( network: N, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, + protocol: impl Into>, validator: Arc>, - ) -> Self where B: 'static { - // We grab the event stream before registering the notifications protocol, otherwise we - // might miss events. + metrics_registry: Option<&Registry>, + ) -> Self + where + B: 'static, + { + let protocol = protocol.into(); let network_event_stream = network.event_stream(); - network.register_notifications_protocol(engine_id, protocol_name.into()); GossipEngine { - state_machine: ConsensusGossip::new(validator, engine_id), + state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), network: Box::new(network), periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), - engine_id, + protocol, network_event_stream, message_sinks: HashMap::new(), @@ -98,11 +106,7 @@ impl GossipEngine { /// the message's topic. No validation is performed on the message, if the /// message is already expired it should be dropped on the next garbage /// collection. - pub fn register_gossip_message( - &mut self, - topic: B::Hash, - message: Vec, - ) { + pub fn register_gossip_message(&mut self, topic: B::Hash, message: Vec) { self.state_machine.register_message(topic, message); } @@ -112,9 +116,7 @@ impl GossipEngine { } /// Get data of valid, incoming messages for a topic (but might have expired meanwhile). - pub fn messages_for(&mut self, topic: B::Hash) - -> Receiver - { + pub fn messages_for(&mut self, topic: B::Hash) -> Receiver { let past_messages = self.state_machine.messages_for(topic).collect::>(); // The channel length is not critical for correctness. By the implementation of `channel` // each sender is guaranteed a single buffer slot, making it a non-rendezvous channel and @@ -123,7 +125,7 @@ impl GossipEngine { // contains a single message. let (mut tx, rx) = channel(usize::max(past_messages.len(), 10)); - for notification in past_messages{ + for notification in past_messages { tx.try_send(notification) .expect("receiver known to be live, and buffer size known to suffice; qed"); } @@ -134,22 +136,12 @@ impl GossipEngine { } /// Send all messages with given topic to a peer. - pub fn send_topic( - &mut self, - who: &PeerId, - topic: B::Hash, - force: bool - ) { + pub fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { self.state_machine.send_topic(&mut *self.network, who, topic, force) } /// Multicast a message to all peers. - pub fn gossip_message( - &mut self, - topic: B::Hash, - message: Vec, - force: bool, - ) { + pub fn gossip_message(&mut self, topic: B::Hash, message: Vec, force: bool) { self.state_machine.multicast(&mut *self.network, topic, message, force) } @@ -165,7 +157,7 @@ impl GossipEngine { /// /// Note: this method isn't strictly related to gossiping and should eventually be moved /// somewhere else. - pub fn announce(&self, block: B::Hash, associated_data: Vec) { + pub fn announce(&self, block: B::Hash, associated_data: Option>) { self.network.announce(block, associated_data); } } @@ -181,26 +173,35 @@ impl Future for GossipEngine { ForwardingState::Idle => { match this.network_event_stream.poll_next_unpin(cx) { Poll::Ready(Some(event)) => match event { - Event::NotificationStreamOpened { remote, engine_id, role } => { - if engine_id != this.engine_id { - continue; + Event::SyncConnected { remote } => { + this.network.add_set_reserved(remote, this.protocol.clone()); + }, + Event::SyncDisconnected { remote } => { + this.network.remove_set_reserved(remote, this.protocol.clone()); + }, + Event::NotificationStreamOpened { remote, protocol, role, .. } => { + if protocol != this.protocol { + continue } this.state_machine.new_peer(&mut *this.network, remote, role); - } - Event::NotificationStreamClosed { remote, engine_id } => { - if engine_id != this.engine_id { - continue; + }, + Event::NotificationStreamClosed { remote, protocol } => { + if protocol != this.protocol { + continue } this.state_machine.peer_disconnected(&mut *this.network, remote); }, Event::NotificationsReceived { remote, messages } => { - let messages = messages.into_iter().filter_map(|(engine, data)| { - if engine == this.engine_id { - Some(data.to_vec()) - } else { - None - } - }).collect(); + let messages = messages + .into_iter() + .filter_map(|(engine, data)| { + if engine == this.protocol { + Some(data.to_vec()) + } else { + None + } + }) + .collect(); let to_forward = this.state_machine.on_incoming( &mut *this.network, @@ -210,27 +211,25 @@ impl Future for GossipEngine { this.forwarding_state = ForwardingState::Busy(to_forward.into()); }, - Event::Dht(_) => {} - } + Event::Dht(_) => {}, + }, // The network event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => return Poll::Ready(()), Poll::Pending => break, } - } + }, ForwardingState::Busy(to_forward) => { let (topic, notification) = match to_forward.pop_front() { Some(n) => n, None => { this.forwarding_state = ForwardingState::Idle; - continue; - } + continue + }, }; let sinks = match this.message_sinks.get_mut(&topic) { Some(sinks) => sinks, - None => { - continue; - }, + None => continue, }; // Make sure all sinks for the given topic are ready. @@ -242,8 +241,8 @@ impl Future for GossipEngine { Poll::Pending => { // Push back onto queue for later. to_forward.push_front((topic, notification)); - break 'outer; - } + break 'outer + }, } } @@ -252,7 +251,7 @@ impl Future for GossipEngine { if sinks.is_empty() { this.message_sinks.remove(&topic); - continue; + continue } trace!( @@ -264,18 +263,17 @@ impl Future for GossipEngine { for sink in sinks { match sink.start_send(notification.clone()) { Ok(()) => {}, - Err(e) if e.is_full() => unreachable!( - "Previously ensured that all sinks are ready; qed.", - ), + Err(e) if e.is_full() => { + unreachable!("Previously ensured that all sinks are ready; qed.") + }, // Receiver got dropped. Will be removed in next iteration (See (1)). Err(_) => {}, } } - } + }, } } - while let Poll::Ready(()) = this.periodic_maintenance_interval.poll_unpin(cx) { this.periodic_maintenance_interval.reset(PERIODIC_MAINTENANCE_INTERVAL); this.state_machine.tick(&mut *this.network); @@ -292,17 +290,23 @@ impl Future for GossipEngine { #[cfg(test)] mod tests { - use async_std::task::spawn; + use super::*; use crate::{ValidationResult, ValidatorContext}; - use futures::{channel::mpsc::{unbounded, UnboundedSender}, executor::{block_on, block_on_stream}, future::poll_fn}; + use async_std::task::spawn; + use futures::{ + channel::mpsc::{unbounded, UnboundedSender}, + executor::{block_on, block_on_stream}, + future::poll_fn, + }; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use rand::Rng; use sc_network::ObservedRole; - use sp_runtime::{testing::H256, traits::{Block as BlockT}}; - use std::convert::TryInto; - use std::sync::{Arc, Mutex}; + use sp_runtime::{testing::H256, traits::Block as BlockT}; + use std::{ + borrow::Cow, + convert::TryInto, + sync::{Arc, Mutex}, + }; use substrate_test_runtime_client::runtime::Block; - use super::*; #[derive(Clone, Default)] struct TestNetwork { @@ -322,20 +326,21 @@ mod tests { Box::pin(rx) } - fn report_peer(&self, _: PeerId, _: ReputationChange) { - } + fn report_peer(&self, _: PeerId, _: ReputationChange) {} - fn disconnect_peer(&self, _: PeerId) { + fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { unimplemented!(); } - fn write_notification(&self, _: PeerId, _: ConsensusEngineId, _: Vec) { + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + + fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { unimplemented!(); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} - - fn announce(&self, _: B::Hash, _: Vec) { + fn announce(&self, _: B::Hash, _: Option>) { unimplemented!(); } } @@ -361,9 +366,9 @@ mod tests { let network = TestNetwork::default(); let mut gossip_engine = GossipEngine::::new( network.clone(), - [1, 2, 3, 4], - "my_protocol", - Arc::new(AllowAll{}), + "/my_protocol", + Arc::new(AllowAll {}), + None, ); // Drop network event stream sender side. @@ -383,42 +388,43 @@ mod tests { #[test] fn keeps_multiple_subscribers_per_topic_updated_with_both_old_and_new_messages() { let topic = H256::default(); - let engine_id = [1, 2, 3, 4]; + let protocol = Cow::Borrowed("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); let mut gossip_engine = GossipEngine::::new( network.clone(), - engine_id.clone(), - "my_protocol", - Arc::new(AllowAll{}), + protocol.clone(), + Arc::new(AllowAll {}), + None, ); - let mut event_sender = network.inner.lock() - .unwrap() - .event_senders - .pop() - .unwrap(); + let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); // Register the remote peer. - event_sender.start_send( - Event::NotificationStreamOpened { + event_sender + .start_send(Event::NotificationStreamOpened { remote: remote_peer.clone(), - engine_id: engine_id.clone(), + protocol: protocol.clone(), + negotiated_fallback: None, role: ObservedRole::Authority, - } - ).expect("Event stream is unbounded; qed."); + }) + .expect("Event stream is unbounded; qed."); let messages = vec![vec![1], vec![2]]; - let events = messages.iter().cloned().map(|m| { - Event::NotificationsReceived { + let events = messages + .iter() + .cloned() + .map(|m| Event::NotificationsReceived { remote: remote_peer.clone(), - messages: vec![(engine_id, m.into())] - } - }).collect::>(); + messages: vec![(protocol.clone(), m.into())], + }) + .collect::>(); // Send first event before subscribing. - event_sender.start_send(events[0].clone()).expect("Event stream is unbounded; qed."); + event_sender + .start_send(events[0].clone()) + .expect("Event stream is unbounded; qed."); let mut subscribers = vec![]; for _ in 0..2 { @@ -426,13 +432,14 @@ mod tests { } // Send second event after subscribing. - event_sender.start_send(events[1].clone()).expect("Event stream is unbounded; qed."); + event_sender + .start_send(events[1].clone()) + .expect("Event stream is unbounded; qed."); spawn(gossip_engine); - let mut subscribers = subscribers.into_iter() - .map(|s| block_on_stream(s)) - .collect::>(); + let mut subscribers = + subscribers.into_iter().map(|s| block_on_stream(s)).collect::>(); // Expect each subscriber to receive both events. for message in messages { @@ -451,18 +458,20 @@ mod tests { #[test] fn forwarding_to_different_size_and_topic_channels() { #[derive(Clone, Debug)] - struct ChannelLengthAndTopic{ + struct ChannelLengthAndTopic { length: usize, topic: H256, } impl Arbitrary for ChannelLengthAndTopic { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { + let possible_length = (0..100).collect::>(); + let possible_topics = (0..10).collect::>(); Self { - length: g.gen_range(0, 100), + length: *g.choose(&possible_length).unwrap(), // Make sure channel topics and message topics overlap by choosing a small // range. - topic: H256::from_low_u64_ne(g.gen_range(0, 10)), + topic: H256::from_low_u64_ne(*g.choose(&possible_topics).unwrap()), } } } @@ -472,12 +481,13 @@ mod tests { topic: H256, } - impl Arbitrary for Message{ - fn arbitrary(g: &mut G) -> Self { + impl Arbitrary for Message { + fn arbitrary(g: &mut Gen) -> Self { + let possible_topics = (0..10).collect::>(); Self { // Make sure channel topics and message topics overlap by choosing a small // range. - topic: H256::from_low_u64_ne(g.gen_range(0, 10)), + topic: H256::from_low_u64_ne(*g.choose(&possible_topics).unwrap()), } } } @@ -498,17 +508,20 @@ mod tests { } fn prop(channels: Vec, notifications: Vec>) { - let engine_id = [1, 2, 3, 4]; + let protocol = Cow::Borrowed("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); - let num_channels_per_topic = channels.iter() - .fold(HashMap::new(), |mut acc, ChannelLengthAndTopic { topic, .. }| { + let num_channels_per_topic = channels.iter().fold( + HashMap::new(), + |mut acc, ChannelLengthAndTopic { topic, .. }| { acc.entry(topic).and_modify(|e| *e += 1).or_insert(1); acc - }); + }, + ); - let expected_msgs_per_topic_all_chan = notifications.iter() + let expected_msgs_per_topic_all_chan = notifications + .iter() .fold(HashMap::new(), |mut acc, messages| { for message in messages { acc.entry(message.topic).and_modify(|e| *e += 1).or_insert(1); @@ -524,48 +537,48 @@ mod tests { let mut gossip_engine = GossipEngine::::new( network.clone(), - engine_id.clone(), - "my_protocol", - Arc::new(TestValidator{}), + protocol.clone(), + Arc::new(TestValidator {}), + None, ); // Create channels. - let (txs, mut rxs) = channels.iter() - .map(|ChannelLengthAndTopic { length, topic }| { - (topic.clone(), channel(*length)) - }) + let (txs, mut rxs) = channels + .iter() + .map(|ChannelLengthAndTopic { length, topic }| (topic.clone(), channel(*length))) .fold((vec![], vec![]), |mut acc, (topic, (tx, rx))| { - acc.0.push((topic, tx)); acc.1.push((topic, rx)); + acc.0.push((topic, tx)); + acc.1.push((topic, rx)); acc }); // Insert sender sides into `gossip_engine`. for (topic, tx) in txs { match gossip_engine.message_sinks.get_mut(&topic) { - Some(entry) => entry.push(tx), - None => {gossip_engine.message_sinks.insert(topic, vec![tx]);}, + Some(entry) => entry.push(tx), + None => { + gossip_engine.message_sinks.insert(topic, vec![tx]); + }, } } - - let mut event_sender = network.inner.lock() - .unwrap() - .event_senders - .pop() - .unwrap(); + let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); // Register the remote peer. - event_sender.start_send( - Event::NotificationStreamOpened { + event_sender + .start_send(Event::NotificationStreamOpened { remote: remote_peer.clone(), - engine_id: engine_id.clone(), + protocol: protocol.clone(), + negotiated_fallback: None, role: ObservedRole::Authority, - } - ).expect("Event stream is unbounded; qed."); + }) + .expect("Event stream is unbounded; qed."); // Send messages into the network event stream. for (i_notification, messages) in notifications.iter().enumerate() { - let messages = messages.into_iter().enumerate() + let messages = messages + .into_iter() + .enumerate() .map(|(i_message, Message { topic })| { // Embed the topic in the first 256 bytes of the message to be extracted by // the [`TestValidator`] later on. @@ -576,13 +589,16 @@ mod tests { message.push(i_notification.try_into().unwrap()); message.push(i_message.try_into().unwrap()); - (engine_id, message.into()) - }).collect(); + (protocol.clone(), message.into()) + }) + .collect(); - event_sender.start_send(Event::NotificationsReceived { - remote: remote_peer.clone(), - messages, - }).expect("Event stream is unbounded; qed."); + event_sender + .start_send(Event::NotificationsReceived { + remote: remote_peer.clone(), + messages, + }) + .expect("Event stream is unbounded; qed."); } let mut received_msgs_per_topic_all_chan = HashMap::::new(); @@ -603,19 +619,20 @@ mod tests { match rx.poll_next_unpin(cx) { Poll::Ready(Some(_)) => { progress = true; - received_msgs_per_topic_all_chan.entry(*topic) + received_msgs_per_topic_all_chan + .entry(*topic) .and_modify(|e| *e += 1) .or_insert(1); }, - Poll::Ready(None) => unreachable!( - "Sender side of channel is never dropped", - ), + Poll::Ready(None) => { + unreachable!("Sender side of channel is never dropped") + }, Poll::Pending => {}, } } if !progress { - break; + break } } Poll::Ready(()) @@ -637,10 +654,10 @@ mod tests { } // Past regressions. - prop(vec![], vec![vec![Message{ topic: H256::default()}]]); + prop(vec![], vec![vec![Message { topic: H256::default() }]]); prop( - vec![ChannelLengthAndTopic {length: 71, topic: H256::default()}], - vec![vec![Message{ topic: H256::default()}]], + vec![ChannelLengthAndTopic { length: 71, topic: H256::default() }], + vec![vec![Message { topic: H256::default() }]], ); QuickCheck::new().quickcheck(prop as fn(_, _)) diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 1d566ed3cbba2..55c2fc820637e 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Polite gossiping. //! @@ -30,14 +32,19 @@ //! //! # Usage //! -//! - Implement the `Network` trait, representing the low-level networking primitives. It is -//! already implemented on `sc_network::NetworkService`. +//! - Implement the `Network` trait, representing the low-level networking primitives. It is already +//! implemented on `sc_network::NetworkService`. //! - Implement the `Validator` trait. See the section below. -//! - Decide on a `ConsensusEngineId`. Each gossiping protocol should have a different one. +//! - Decide on a protocol name. Each gossiping protocol should have a different one. //! - Build a `GossipEngine` using these three elements. //! - Use the methods of the `GossipEngine` in order to send out messages and receive incoming //! messages. //! +//! The `GossipEngine` will automatically use `Network::add_set_reserved` and +//! `Network::remove_set_reserved` to maintain a set of peers equal to the set of peers the +//! node is syncing from. See the documentation of `sc-network` for more explanations about the +//! concepts of peer sets. +//! //! # What is a validator? //! //! The primary role of a `Validator` is to process incoming messages from peers, and decide @@ -54,14 +61,16 @@ //! These status packets will typically contain light pieces of information //! used to inform peers of a current view of protocol state. -pub use self::bridge::GossipEngine; -pub use self::state_machine::TopicNotification; -pub use self::validator::{DiscardAll, MessageIntent, Validator, ValidatorContext, ValidationResult}; +pub use self::{ + bridge::GossipEngine, + state_machine::TopicNotification, + validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext}, +}; use futures::prelude::*; -use sc_network::{Event, ExHashT, NetworkService, PeerId, ReputationChange}; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; -use std::{borrow::Cow, pin::Pin, sync::Arc}; +use sc_network::{multiaddr, Event, ExHashT, NetworkService, PeerId, ReputationChange}; +use sp_runtime::traits::Block as BlockT; +use std::{borrow::Cow, iter, pin::Pin, sync::Arc}; mod bridge; mod state_machine; @@ -75,26 +84,23 @@ pub trait Network { /// Adjust the reputation of a node. fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange); + /// Adds the peer to the set of peers to be connected to with this protocol. + fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); + + /// Removes the peer from the set of peers to be connected to with this protocol. + fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); + /// Force-disconnect a peer. - fn disconnect_peer(&self, who: PeerId); + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); /// Send a notification to a peer. - fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec); - - /// Registers a notifications protocol. - /// - /// See the documentation of [`NetworkService:register_notifications_protocol`] for more information. - fn register_notifications_protocol( - &self, - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, str>, - ); + fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec); /// Notify everyone we're connected to that we have the given block. /// /// Note: this method isn't strictly related to gossiping and should eventually be moved /// somewhere else. - fn announce(&self, block: B::Hash, associated_data: Vec); + fn announce(&self, block: B::Hash, associated_data: Option>); } impl Network for Arc> { @@ -106,23 +112,38 @@ impl Network for Arc> { NetworkService::report_peer(self, peer_id, reputation); } - fn disconnect_peer(&self, who: PeerId) { - NetworkService::disconnect_peer(self, who) + fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { + let addr = + iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); + let result = + NetworkService::add_peers_to_reserved_set(self, protocol, iter::once(addr).collect()); + if let Err(err) = result { + log::error!(target: "gossip", "add_set_reserved failed: {}", err); + } + } + + fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { + let addr = + iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); + let result = NetworkService::remove_peers_from_reserved_set( + self, + protocol, + iter::once(addr).collect(), + ); + if let Err(err) = result { + log::error!(target: "gossip", "remove_set_reserved failed: {}", err); + } } - fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec) { - NetworkService::write_notification(self, who, engine_id, message) + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + NetworkService::disconnect_peer(self, who, protocol) } - fn register_notifications_protocol( - &self, - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, str>, - ) { - NetworkService::register_notifications_protocol(self, engine_id, protocol_name) + fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec) { + NetworkService::write_notification(self, who, protocol, message) } - fn announce(&self, block: B::Hash, associated_data: Vec) { + fn announce(&self, block: B::Hash, associated_data: Option>) { NetworkService::announce_block(self, block, associated_data) } } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 60c669ecb6680..920b44d8c1e5a 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,24 +16,34 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{Network, MessageIntent, Validator, ValidatorContext, ValidationResult}; +use crate::{MessageIntent, Network, ValidationResult, Validator, ValidatorContext}; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use std::iter; -use std::time; -use log::{error, trace}; -use lru::LruCache; use libp2p::PeerId; -use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; -use sp_runtime::ConsensusEngineId; +use lru::LruCache; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::ObservedRole; -use wasm_timer::Instant; +use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + iter, + sync::Arc, + time, + time::Instant, +}; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 -const KNOWN_MESSAGES_CACHE_SIZE: usize = 4096; - -const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_secs(30); +// NOTE: The current value is adjusted based on largest production network deployment (Kusama) and +// the current main gossip user (GRANDPA). Currently there are ~800 validators on Kusama, as such, +// each GRANDPA round should generate ~1600 messages, and we currently keep track of the last 2 +// completed rounds and the current live one. That makes it so that at any point we will be holding +// ~4800 live messages. +// +// Assuming that each known message is tracked with a 32 byte hash (common for `Block::Hash`), then +// this cache should take about 256 KB of memory. +const KNOWN_MESSAGES_CACHE_SIZE: usize = 8192; + +const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_millis(750); pub(crate) const PERIODIC_MAINTENANCE_INTERVAL: time::Duration = time::Duration::from_millis(1100); @@ -79,17 +89,13 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { /// Broadcast a message to all peers that have not received it previously. fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool) { - self.gossip.multicast( - self.network, - topic, - message, - force, - ); + self.gossip.multicast(self.network, topic, message, force); } /// Send addressed message to a peer. fn send_message(&mut self, who: &PeerId, message: Vec) { - self.network.write_notification(who.clone(), self.gossip.engine_id, message); + self.network + .write_notification(who.clone(), self.gossip.protocol.clone(), message); } /// Send all messages with given topic to a peer. @@ -100,14 +106,15 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { fn propagate<'a, B: BlockT, I>( network: &mut dyn Network, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, messages: I, intent: MessageIntent, peers: &mut HashMap>, validator: &Arc>, ) - // (msg_hash, topic, message) - where I: Clone + IntoIterator)>, +// (msg_hash, topic, message) +where + I: Clone + IntoIterator)>, { let mut message_allowed = validator.message_allowed(); @@ -116,29 +123,36 @@ fn propagate<'a, B: BlockT, I>( let intent = match intent { MessageIntent::Broadcast { .. } => if peer.known_messages.contains(&message_hash) { - continue; + continue } else { MessageIntent::Broadcast }, - MessageIntent::PeriodicRebroadcast => + MessageIntent::PeriodicRebroadcast => { if peer.known_messages.contains(&message_hash) { MessageIntent::PeriodicRebroadcast } else { // peer doesn't know message, so the logic should treat it as an // initial broadcast. MessageIntent::Broadcast - }, + } + }, other => other, }; if !message_allowed(id, intent, &topic, &message) { - continue; + continue } peer.known_messages.insert(message_hash.clone()); - trace!(target: "gossip", "Propagating to {}: {:?}", id, message); - network.write_notification(id.clone(), engine_id, message.clone()); + tracing::trace!( + target: "gossip", + to = %id, + %protocol, + ?message, + "Propagating message", + ); + network.write_notification(id.clone(), protocol.clone(), message.clone()); } } } @@ -148,35 +162,49 @@ pub struct ConsensusGossip { peers: HashMap>, messages: Vec>, known_messages: LruCache, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, validator: Arc>, next_broadcast: Instant, + metrics: Option, } impl ConsensusGossip { /// Create a new instance using the given validator. - pub fn new(validator: Arc>, engine_id: ConsensusEngineId) -> Self { + pub fn new( + validator: Arc>, + protocol: Cow<'static, str>, + metrics_registry: Option<&Registry>, + ) -> Self { + let metrics = match metrics_registry.map(Metrics::register) { + Some(Ok(metrics)) => Some(metrics), + Some(Err(e)) => { + tracing::debug!(target: "gossip", "Failed to register metrics: {:?}", e); + None + }, + None => None, + }; + ConsensusGossip { peers: HashMap::new(), messages: Default::default(), known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), - engine_id, + protocol, validator, next_broadcast: Instant::now() + REBROADCAST_INTERVAL, + metrics, } } /// Handle new connected peer. pub fn new_peer(&mut self, network: &mut dyn Network, who: PeerId, role: ObservedRole) { - // light nodes are not valid targets for consensus gossip messages - if role.is_light() { - return; - } - - trace!(target:"gossip", "Registering {:?} {}", role, who); - self.peers.insert(who.clone(), PeerConsensus { - known_messages: HashSet::new(), - }); + tracing::trace!( + target:"gossip", + %who, + protocol = %self.protocol, + ?role, + "Registering peer", + ); + self.peers.insert(who.clone(), PeerConsensus { known_messages: HashSet::new() }); let validator = self.validator.clone(); let mut context = NetworkContext { gossip: self, network }; @@ -191,12 +219,11 @@ impl ConsensusGossip { sender: Option, ) { if self.known_messages.put(message_hash.clone(), ()).is_none() { - self.messages.push(MessageEntry { - message_hash, - topic, - message, - sender, - }); + self.messages.push(MessageEntry { message_hash, topic, message, sender }); + + if let Some(ref metrics) = self.metrics { + metrics.registered_messages.inc(); + } } } @@ -205,11 +232,7 @@ impl ConsensusGossip { /// the message's topic. No validation is performed on the message, if the /// message is already expired it should be dropped on the next garbage /// collection. - pub fn register_message( - &mut self, - topic: B::Hash, - message: Vec, - ) { + pub fn register_message(&mut self, topic: B::Hash, message: Vec) { let message_hash = HashFor::::hash(&message[..]); self.register_message_hashed(message_hash, topic, message, None); } @@ -233,21 +256,38 @@ impl ConsensusGossip { /// Rebroadcast all messages to all peers. fn rebroadcast(&mut self, network: &mut dyn Network) { - let messages = self.messages.iter() + let messages = self + .messages + .iter() .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); - propagate(network, self.engine_id, messages, MessageIntent::PeriodicRebroadcast, &mut self.peers, &self.validator); + propagate( + network, + self.protocol.clone(), + messages, + MessageIntent::PeriodicRebroadcast, + &mut self.peers, + &self.validator, + ); } /// Broadcast all messages with given topic. pub fn broadcast_topic(&mut self, network: &mut dyn Network, topic: B::Hash, force: bool) { - let messages = self.messages.iter() - .filter_map(|entry| - if entry.topic == topic { - Some((&entry.message_hash, &entry.topic, &entry.message)) - } else { None } - ); + let messages = self.messages.iter().filter_map(|entry| { + if entry.topic == topic { + Some((&entry.message_hash, &entry.topic, &entry.message)) + } else { + None + } + }); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, self.engine_id, messages, intent, &mut self.peers, &self.validator); + propagate( + network, + self.protocol.clone(), + messages, + intent, + &mut self.peers, + &self.validator, + ); } /// Prune old or no longer relevant consensus messages. Provide a predicate @@ -259,8 +299,17 @@ impl ConsensusGossip { let mut message_expired = self.validator.message_expired(); self.messages.retain(|entry| !message_expired(entry.topic, &entry.message)); - trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)", - before - self.messages.len(), + let expired_messages = before - self.messages.len(); + + if let Some(ref metrics) = self.metrics { + metrics.expired_messages.inc_by(expired_messages as u64) + } + + tracing::trace!( + target: "gossip", + protocol = %self.protocol, + "Cleaned up {} stale messages, {} left ({} known)", + expired_messages, self.messages.len(), known_messages.len(), ); @@ -272,10 +321,13 @@ impl ConsensusGossip { /// Get valid messages received in the past for a topic (might have expired meanwhile). pub fn messages_for(&mut self, topic: B::Hash) -> impl Iterator + '_ { - self.messages.iter().filter(move |e| e.topic == topic).map(|entry| TopicNotification { - message: entry.message.clone(), - sender: entry.sender.clone(), - }) + self.messages + .iter() + .filter(move |e| e.topic == topic) + .map(|entry| TopicNotification { + message: entry.message.clone(), + sender: entry.sender.clone(), + }) } /// Register incoming messages and return the ones that are new and valid (according to a gossip @@ -289,16 +341,27 @@ impl ConsensusGossip { let mut to_forward = vec![]; if !messages.is_empty() { - trace!(target: "gossip", "Received {} messages from peer {}", messages.len(), who); + tracing::trace!( + target: "gossip", + messages_num = %messages.len(), + %who, + protocol = %self.protocol, + "Received messages from peer", + ); } for message in messages { let message_hash = HashFor::::hash(&message[..]); if self.known_messages.contains(&message_hash) { - trace!(target:"gossip", "Ignored already known message from {}", who); + tracing::trace!( + target: "gossip", + %who, + protocol = %self.protocol, + "Ignored already known message", + ); network.report_peer(who.clone(), rep::DUPLICATE_GOSSIP); - continue; + continue } // validate the message @@ -312,33 +375,38 @@ impl ConsensusGossip { ValidationResult::ProcessAndKeep(topic) => (topic, true), ValidationResult::ProcessAndDiscard(topic) => (topic, false), ValidationResult::Discard => { - trace!(target:"gossip", "Discard message from peer {}", who); - continue; + tracing::trace!( + target: "gossip", + %who, + protocol = %self.protocol, + "Discard message from peer", + ); + continue }, }; let peer = match self.peers.get_mut(&who) { Some(peer) => peer, None => { - error!(target:"gossip", "Got message from unregistered peer {}", who); - continue; - } + tracing::error!( + target: "gossip", + %who, + protocol = %self.protocol, + "Got message from unregistered peer", + ); + continue + }, }; network.report_peer(who.clone(), rep::GOSSIP_SUCCESS); peer.known_messages.insert(message_hash); - to_forward.push((topic, TopicNotification { - message: message.clone(), - sender: Some(who.clone()) - })); + to_forward.push(( + topic, + TopicNotification { message: message.clone(), sender: Some(who.clone()) }, + )); if keep { - self.register_message_hashed( - message_hash, - topic, - message, - Some(who.clone()), - ); + self.register_message_hashed(message_hash, topic, message, Some(who.clone())); } } @@ -351,30 +419,37 @@ impl ConsensusGossip { network: &mut dyn Network, who: &PeerId, topic: B::Hash, - force: bool + force: bool, ) { let mut message_allowed = self.validator.message_allowed(); if let Some(ref mut peer) = self.peers.get_mut(who) { for entry in self.messages.iter().filter(|m| m.topic == topic) { - let intent = if force { - MessageIntent::ForcedBroadcast - } else { - MessageIntent::Broadcast - }; + let intent = + if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; if !force && peer.known_messages.contains(&entry.message_hash) { - continue; + continue } if !message_allowed(who, intent, &entry.topic, &entry.message) { - continue; + continue } peer.known_messages.insert(entry.message_hash.clone()); - trace!(target: "gossip", "Sending topic message to {}: {:?}", who, entry.message); - network.write_notification(who.clone(), self.engine_id, entry.message.clone()); + tracing::trace!( + target: "gossip", + to = %who, + protocol = %self.protocol, + ?entry.message, + "Sending topic message", + ); + network.write_notification( + who.clone(), + self.protocol.clone(), + entry.message.clone(), + ); } } } @@ -390,17 +465,19 @@ impl ConsensusGossip { let message_hash = HashFor::::hash(&message); self.register_message_hashed(message_hash, topic, message.clone(), None); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, self.engine_id, iter::once((&message_hash, &topic, &message)), intent, &mut self.peers, &self.validator); + propagate( + network, + self.protocol.clone(), + iter::once((&message_hash, &topic, &message)), + intent, + &mut self.peers, + &self.validator, + ); } /// Send addressed message to a peer. The message is not kept or multicast /// later on. - pub fn send_message( - &mut self, - network: &mut dyn Network, - who: &PeerId, - message: Vec, - ) { + pub fn send_message(&mut self, network: &mut dyn Network, who: &PeerId, message: Vec) { let peer = match self.peers.get_mut(who) { None => return, Some(peer) => peer, @@ -408,20 +485,56 @@ impl ConsensusGossip { let message_hash = HashFor::::hash(&message); - trace!(target: "gossip", "Sending direct to {}: {:?}", who, message); + tracing::trace!( + target: "gossip", + to = %who, + protocol = %self.protocol, + ?message, + "Sending direct message", + ); peer.known_messages.insert(message_hash); - network.write_notification(who.clone(), self.engine_id, message); + network.write_notification(who.clone(), self.protocol.clone(), message); + } +} + +struct Metrics { + registered_messages: Counter, + expired_messages: Counter, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + registered_messages: register( + Counter::new( + "network_gossip_registered_messages_total", + "Number of registered messages by the gossip service.", + )?, + registry, + )?, + expired_messages: register( + Counter::new( + "network_gossip_expired_messages_total", + "Number of expired messages by the gossip service.", + )?, + registry, + )?, + }) } } #[cfg(test)] mod tests { + use super::*; use futures::prelude::*; use sc_network::{Event, ReputationChange}; - use sp_runtime::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; - use std::{borrow::Cow, pin::Pin, sync::{Arc, Mutex}}; - use super::*; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256}; + use std::{ + borrow::Cow, + pin::Pin, + sync::{Arc, Mutex}, + }; type Block = RawBlock>; @@ -435,7 +548,7 @@ mod tests { sender: None, }); } - } + }; } struct AllowAll; @@ -451,7 +564,7 @@ mod tests { } struct DiscardAll; - impl Validator for DiscardAll{ + impl Validator for DiscardAll { fn validate( &self, _context: &mut dyn ValidatorContext, @@ -481,17 +594,19 @@ mod tests { self.inner.lock().unwrap().peer_reports.push((peer_id, reputation_change)); } - fn disconnect_peer(&self, _: PeerId) { + fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { unimplemented!(); } - fn write_notification(&self, _: PeerId, _: ConsensusEngineId, _: Vec) { + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + + fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { unimplemented!(); } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, str>) {} - - fn announce(&self, _: B::Hash, _: Vec) { + fn announce(&self, _: B::Hash, _: Option>) { unimplemented!(); } } @@ -520,7 +635,7 @@ mod tests { let prev_hash = H256::random(); let best_hash = H256::random(); - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); let m1_hash = H256::random(); let m2_hash = H256::random(); let m1 = vec![1, 2, 3]; @@ -547,22 +662,22 @@ mod tests { #[test] fn message_stream_include_those_sent_before_asking() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); // Register message. let message = vec![4, 5, 6]; - let topic = HashFor::::hash(&[1,2,3]); + let topic = HashFor::::hash(&[1, 2, 3]); consensus.register_message(topic, message.clone()); assert_eq!( consensus.messages_for(topic).next(), - Some(TopicNotification { message: message, sender: None }), + Some(TopicNotification { message, sender: None }), ); } #[test] fn can_keep_multiple_messages_per_topic() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); let topic = [1; 32].into(); let msg_a = vec![1, 2, 3]; @@ -576,32 +691,27 @@ mod tests { #[test] fn peer_is_removed_on_disconnect() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); let mut network = NoOpNetwork::default(); let peer_id = PeerId::random(); - consensus.new_peer(&mut network, peer_id.clone(), ObservedRole::Full); + consensus.new_peer(&mut network, peer_id, ObservedRole::Full); assert!(consensus.peers.contains_key(&peer_id)); - consensus.peer_disconnected(&mut network, peer_id.clone()); + consensus.peer_disconnected(&mut network, peer_id); assert!(!consensus.peers.contains_key(&peer_id)); } #[test] fn on_incoming_ignores_discarded_messages() { - let to_forward = ConsensusGossip::::new( - Arc::new(DiscardAll), - [0, 0, 0, 0], - ).on_incoming( - &mut NoOpNetwork::default(), - PeerId::random(), - vec![vec![1, 2, 3]], - ); + let to_forward = ConsensusGossip::::new(Arc::new(DiscardAll), "/foo".into(), None) + .on_incoming(&mut NoOpNetwork::default(), PeerId::random(), vec![vec![1, 2, 3]]); assert!( to_forward.is_empty(), - "Expected `on_incoming` to ignore discarded message but got {:?}", to_forward, + "Expected `on_incoming` to ignore discarded message but got {:?}", + to_forward, ); } @@ -610,15 +720,13 @@ mod tests { let mut network = NoOpNetwork::default(); let remote = PeerId::random(); - let to_forward = ConsensusGossip::::new( - Arc::new(AllowAll), - [0, 0, 0, 0], - ).on_incoming( - &mut network, - // Unregistered peer. - remote.clone(), - vec![vec![1, 2, 3]], - ); + let to_forward = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None) + .on_incoming( + &mut network, + // Unregistered peer. + remote.clone(), + vec![vec![1, 2, 3]], + ); assert!( to_forward.is_empty(), diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index fd29aaddafe6d..9a2652d03f642 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -26,15 +26,14 @@ pub trait Validator: Send + Sync { } /// New connection is dropped. - fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, _who: &PeerId) { - } + fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, _who: &PeerId) {} /// Validate consensus message. fn validate( &self, context: &mut dyn ValidatorContext, sender: &PeerId, - data: &[u8] + data: &[u8], ) -> ValidationResult; /// Produce a closure for validating messages on a given topic. @@ -43,7 +42,9 @@ pub trait Validator: Send + Sync { } /// Produce a closure for filtering egress messages. - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { Box::new(move |_who, _intent, _topic, _data| true) } } @@ -99,7 +100,9 @@ impl Validator for DiscardAll { Box::new(move |_topic, _data| true) } - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { Box::new(move |_who, _intent, _topic, _data| false) } } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index af0e2a2dc10fa..873c2a847a29a 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate network protocol" name = "sc-network" -version = "0.8.0" +version = "0.10.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -14,68 +14,65 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.6.1" +prost-build = "0.8" [dependencies] async-trait = "0.1" async-std = "1.6.5" -bitflags = "1.2.0" -bs58 = "0.3.1" -bytes = "0.5.0" -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } +bitflags = "1.3.2" +cid = "0.6.0" +bytes = "1" +codec = { package = "parity-scale-codec", version = "2.0.0", features = [ + "derive", +] } derive_more = "0.99.2" either = "1.5.3" -erased-serde = "0.3.9" fnv = "1.0.6" -fork-tree = { version = "2.0.0", path = "../../utils/fork-tree" } -futures = "0.3.4" +fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } +futures = "0.3.9" futures-timer = "3.0.2" -futures_codec = "0.4.0" +asynchronous-codec = "0.5" hex = "0.4.0" -ip_network = "0.3.4" -linked-hash-map = "0.5.2" +ip_network = "0.4.0" +linked-hash-map = "0.5.4" linked_hash_set = "0.1.3" +lru = "0.6.6" log = "0.4.8" -lru = "0.4.0" -nohash-hasher = "0.2.0" -parking_lot = "0.10.0" -pin-project = "0.4.6" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0", path = "../../utils/prometheus" } -prost = "0.6.1" +parking_lot = "0.11.1" +pin-project = "1.0.4" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } +prost = "0.8" rand = "0.7.2" -sc-block-builder = { version = "0.8.0", path = "../block-builder" } -sc-client-api = { version = "2.0.0", path = "../api" } -sc-peerset = { version = "2.0.0", path = "../peerset" } -serde = { version = "1.0.101", features = ["derive"] } -serde_json = "1.0.41" -slog = { version = "2.5.2", features = ["nested-values"] } -slog_derive = "0.2.0" -smallvec = "0.6.10" -sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-peerset = { version = "4.0.0-dev", path = "../peerset" } +serde = { version = "1.0.126", features = ["derive"] } +serde_json = "1.0.68" +smallvec = "1.5.0" +sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } +sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } thiserror = "1" -unsigned-varint = { version = "0.4.0", features = ["futures", "futures-codec"] } +unsigned-varint = { version = "0.6.0", features = [ + "futures", + "asynchronous_codec", +] } void = "1.0.2" -wasm-timer = "0.2" -zeroize = "1.0.0" - -[dependencies.libp2p] -version = "0.28.1" -default-features = false -features = ["identify", "kad", "mdns-async-std", "mplex", "noise", "ping", "request-response", "tcp-async-std", "websocket", "yamux"] +zeroize = "1.4.1" +libp2p = "0.39.1" [dev-dependencies] assert_matches = "1.3" -libp2p = { version = "0.28.1", default-features = false } -quickcheck = "0.9.0" +libp2p = { version = "0.39.1", default-features = false } +quickcheck = "1.0.3" rand = "0.7.2" -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tempfile = "3.1.0" diff --git a/client/network/README.md b/client/network/README.md index e0bd691043bee..c361bc9249f71 100644 --- a/client/network/README.md +++ b/client/network/README.md @@ -120,8 +120,8 @@ bytes. block announces are pushed to other nodes. The handshake is empty on both sides. The message format is a SCALE-encoded tuple containing a block header followed with an opaque list of bytes containing some data associated with this block announcement, e.g. a candidate message. -- Notifications protocols that are registered using the `register_notifications_protocol` -method. For example: `/paritytech/grandpa/1`. See below for more information. +- Notifications protocols that are registered using `NetworkConfiguration::notifications_protocols`. +For example: `/paritytech/grandpa/1`. See below for more information. ## The legacy Substrate substream @@ -203,6 +203,69 @@ integer representing the role of the node: In the future, though, these restrictions will be removed. +# Sync + +The crate implements a number of syncing algorithms. The main purpose of the syncing algorithm is +get the chain to the latest state and keep it synced with the rest of the network by downloading and +importing new data as soon as it becomes available. Once the node starts it catches up with the network +with one of the initial sync methods listed below, and once it is completed uses a keep-up sync to +download new blocks. + +## Full and light sync + +This is the default syncing method for the initial and keep-up sync. The algorithm starts with the +current best block and downloads block data progressively from multiple peers if available. Once +there's a sequence of blocks ready to be imported they are fed to the import queue. Full nodes download +and execute full blocks, while light nodes only download and import headers. This continues until each peers +has no more new blocks to give. + +For each peer the sync maintains the number of our common best block with that peer. This number is updates +whenever peer announce new blocks or our best block advances. This allows to keep track of peers that have new +block data and request new information as soon as it is announced. In keep-up mode, we also track peers that +announce blocks on all branches and not just the best branch. The sync algorithm tries to be greedy and download +All data that's announced. + +## Fast sync + +In this mode the initial downloads and verifies full header history. This allows to validate +authority set transitions and arrive at a recent header. After header chain is verified and imported +the node starts downloading a state snapshot using the state request protocol. Each `StateRequest` +contains a starting storage key, which is empty for the first request. +`StateResponse` contains a storage proof for a sequence of keys and values in the storage +starting (but not including) from the key that is in the request. After iterating the proof trie against +the storage root that is in the target header, the node issues The next `StateRequest` with set starting +key set to the last key from the previous response. This continues until trie iteration reaches the end. +The state is then imported into the database and the keep-up sync starts in normal full/light sync mode. + +## Warp sync + +This is similar to fast sync, but instead of downloading and verifying full header chain, the algorithm +only downloads finalized authority set changes. + +### GRANDPA warp sync. + +GRANDPA keeps justifications for each finalized authority set change. Each change is signed by the +authorities from the previous set. By downloading and verifying these signed hand-offs starting from genesis, +we arrive at a recent header faster than downloading full header chain. Each `WarpSyncRequest` contains a block +hash to a to start collecting proofs from. `WarpSyncResponse` contains a sequence of block headers and +justifications. The proof downloader checks the justifications and continues requesting proofs from the last +header hash, until it arrives at some recent header. + +Once the finality chain is proved for a header, the state matching the header is downloaded much like during +the fast sync. The state is verified to match the header storage root. After the state is imported into the +database it is queried for the information that allows GRANDPA and BABE to continue operating from that state. +This includes BABE epoch information and GRANDPA authority set id. + +### Background block download. + +After the latest state has been imported the node is fully operational, but is still missing historic block +data. I.e. it is unable to serve bock bodies and headers other than the most recent one. To make sure all +nodes have block history available, a background sync process is started that downloads all the missing blocks. +It is run in parallel with the keep-up sync and does not interfere with downloading of the recent blocks. +During this download we also import GRANPA justifications for blocks with authority set changes, so that +The warp-synced node has all the data to serve for other nodes nodes that might want to sync from it with +any method. + # Usage Using the `sc-network` crate is done through the [`NetworkWorker`] struct. Create this @@ -223,4 +286,4 @@ dispatching a background task with the [`NetworkWorker`]. More precise usage details are still being worked on and will likely change in the future. -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/client/network/build.rs b/client/network/build.rs index 8ed460f163eb4..6e5b83d4e58ae 100644 --- a/client/network/build.rs +++ b/client/network/build.rs @@ -1,8 +1,5 @@ -const PROTOS: &[&str] = &[ - "src/schema/api.v1.proto", - "src/schema/finality.v1.proto", - "src/schema/light.v1.proto" -]; +const PROTOS: &[&str] = + &["src/schema/api.v1.proto", "src/schema/light.v1.proto", "src/schema/bitswap.v1.2.0.proto"]; fn main() { prost_build::compile_protos(PROTOS, &["src/schema"]).unwrap(); diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 6b3cfac38ae99..08d061ee26b23 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -1,36 +1,48 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::{ - config::{ProtocolId, Role}, block_requests, light_client_handler, finality_requests, - peer_info, request_responses, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, - protocol::{message::{self, Roles}, CustomMessageOutcome, NotificationsSink, Protocol}, - ObservedRole, DhtEvent, ExHashT, + bitswap::Bitswap, + config::ProtocolId, + discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, + light_client_requests, peer_info, + protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, + request_responses, DhtEvent, ObservedRole, }; use bytes::Bytes; -use codec::Encode as _; -use libp2p::NetworkBehaviour; -use libp2p::core::{Multiaddr, PeerId, PublicKey}; -use libp2p::identify::IdentifyInfo; -use libp2p::kad::record; -use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}; +use codec::Encode; +use futures::{channel::oneshot, stream::StreamExt}; +use libp2p::{ + core::{Multiaddr, PeerId, PublicKey}, + identify::IdentifyInfo, + kad::record, + swarm::{toggle::Toggle, NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}, + NetworkBehaviour, +}; use log::debug; -use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; -use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId, Justification}; +use prost::Message; +use sc_consensus::import_queue::{IncomingBlock, Origin}; +use sp_consensus::BlockOrigin; +use sp_runtime::{ + traits::{Block as BlockT, NumberFor}, + Justifications, +}; use std::{ borrow::Cow, collections::{HashSet, VecDeque}, @@ -40,43 +52,53 @@ use std::{ }; pub use crate::request_responses::{ - ResponseFailure, InboundFailure, RequestFailure, OutboundFailure, RequestId, SendRequestError + IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, RequestId, ResponseFailure, }; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourOut", poll_method = "poll")] -pub struct Behaviour { +pub struct Behaviour { /// All the substrate-specific protocols. - substrate: Protocol, + substrate: Protocol, /// Periodically pings and identifies the nodes we are connected to, and store information in a /// cache. peer_info: peer_info::PeerInfoBehaviour, /// Discovers nodes of the network. discovery: DiscoveryBehaviour, + /// Bitswap server for blockchain data. + bitswap: Toggle>, /// Generic request-reponse protocols. request_responses: request_responses::RequestResponsesBehaviour, - /// Block request handling. - block_requests: block_requests::BlockRequests, - /// Finality proof request handling. - finality_proof_requests: finality_requests::FinalityProofRequests, - /// Light client request handling. - light_client_handler: light_client_handler::LightClientHandler, /// Queue of events to produce for the outside. #[behaviour(ignore)] events: VecDeque>, - /// Role of our local node, as originally passed from the configuration. + /// Light client request handling. + #[behaviour(ignore)] + light_client_request_sender: light_client_requests::sender::LightClientRequestSender, + + /// Protocol name used to send out block requests via + /// [`request_responses::RequestResponsesBehaviour`]. #[behaviour(ignore)] - role: Role, + block_request_protocol_name: String, + + /// Protocol name used to send out state requests via + /// [`request_responses::RequestResponsesBehaviour`]. + #[behaviour(ignore)] + state_request_protocol_name: String, + + /// Protocol name used to send out warp sync requests via + /// [`request_responses::RequestResponsesBehaviour`]. + #[behaviour(ignore)] + warp_sync_protocol_name: Option, } /// Event generated by `Behaviour`. pub enum BehaviourOut { BlockImport(BlockOrigin, Vec>), - JustificationImport(Origin, B::Hash, NumberFor, Justification), - FinalityProofImport(Origin, B::Hash, NumberFor, Vec), + JustificationImport(Origin, B::Hash, NumberFor, Justifications), /// Started a random iterative Kademlia discovery query. RandomKademliaStarted(ProtocolId), @@ -94,34 +116,18 @@ pub enum BehaviourOut { result: Result, }, - /// A request initiated using [`Behaviour::send_request`] has succeeded or failed. - RequestFinished { - /// Request that has succeeded. - request_id: RequestId, - /// Response sent by the remote or reason for failure. - result: Result, RequestFailure>, - }, - - /// Started a new request with the given node. - /// - /// This event is for statistics purposes only. The request and response handling are entirely - /// internal to the behaviour. - OpaqueRequestStarted { - peer: PeerId, - /// Protocol name of the request. - protocol: String, - }, - /// Finished, successfully or not, a previously-started request. + /// A request has succeeded or failed. /// - /// This event is for statistics purposes only. The request and response handling are entirely - /// internal to the behaviour. - OpaqueRequestFinished { - /// Who we were requesting. + /// This event is generated for statistics purposes. + RequestFinished { + /// Peer that we send a request to. peer: PeerId, - /// Protocol name of the request. - protocol: String, - /// How long before the response came or the request got cancelled. - request_duration: Duration, + /// Name of the protocol in question. + protocol: Cow<'static, str>, + /// Duration the request took. + duration: Duration, + /// Result of the request. + result: Result<(), RequestFailure>, }, /// Opened a substream with the given node with the given notifications protocol. @@ -131,7 +137,12 @@ pub enum BehaviourOut { /// Node we opened the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, + /// If the negotiation didn't use the main name of the protocol (the one in + /// `notifications_protocol`), then this field contains which name has actually been + /// used. + /// See also [`crate::Event::NotificationStreamOpened`]. + negotiated_fallback: Option>, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, /// Role of the remote. @@ -147,7 +158,7 @@ pub enum BehaviourOut { /// Id of the peer we are connected to. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, /// Replacement for the previous [`NotificationsSink`]. notifications_sink: NotificationsSink, }, @@ -158,7 +169,7 @@ pub enum BehaviourOut { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, }, /// Received one or more messages from the given node using the given protocol. @@ -166,38 +177,64 @@ pub enum BehaviourOut { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(ConsensusEngineId, Bytes)>, + messages: Vec<(Cow<'static, str>, Bytes)>, }, + /// Now connected to a new peer for syncing purposes. + SyncConnected(PeerId), + + /// No longer connected to a peer for syncing purposes. + SyncDisconnected(PeerId), + /// Events generated by a DHT as a response to get_value or put_value requests as well as the /// request duration. Dht(DhtEvent, Duration), } -impl Behaviour { +impl Behaviour { /// Builds a new `Behaviour`. pub fn new( - substrate: Protocol, - role: Role, + substrate: Protocol, user_agent: String, local_public_key: PublicKey, - block_requests: block_requests::BlockRequests, - finality_proof_requests: finality_requests::FinalityProofRequests, - light_client_handler: light_client_handler::LightClientHandler, + light_client_request_sender: light_client_requests::sender::LightClientRequestSender, disco_config: DiscoveryConfig, - request_response_protocols: Vec, + block_request_protocol_config: request_responses::ProtocolConfig, + state_request_protocol_config: request_responses::ProtocolConfig, + warp_sync_protocol_config: Option, + bitswap: Option>, + light_client_request_protocol_config: request_responses::ProtocolConfig, + // All remaining request protocol configs. + mut request_response_protocols: Vec, ) -> Result { - Ok(Behaviour { + // Extract protocol name and add to `request_response_protocols`. + let block_request_protocol_name = block_request_protocol_config.name.to_string(); + let state_request_protocol_name = state_request_protocol_config.name.to_string(); + let warp_sync_protocol_name = match warp_sync_protocol_config { + Some(config) => { + let name = config.name.to_string(); + request_response_protocols.push(config); + Some(name) + }, + None => None, + }; + request_response_protocols.push(block_request_protocol_config); + request_response_protocols.push(state_request_protocol_config); + request_response_protocols.push(light_client_request_protocol_config); + + Ok(Self { substrate, peer_info: peer_info::PeerInfoBehaviour::new(user_agent, local_public_key), discovery: disco_config.finish(), - request_responses: - request_responses::RequestResponsesBehaviour::new(request_response_protocols.into_iter())?, - block_requests, - finality_proof_requests, - light_client_handler, + bitswap: bitswap.into(), + request_responses: request_responses::RequestResponsesBehaviour::new( + request_response_protocols.into_iter(), + )?, + light_client_request_sender, events: VecDeque::new(), - role, + block_request_protocol_name, + state_request_protocol_name, + warp_sync_protocol_name, }) } @@ -215,7 +252,9 @@ impl Behaviour { /// /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm /// of their lower bound. - pub fn num_entries_per_kbucket(&mut self) -> impl ExactSizeIterator)> { + pub fn num_entries_per_kbucket( + &mut self, + ) -> impl ExactSizeIterator)> { self.discovery.num_entries_per_kbucket() } @@ -225,7 +264,9 @@ impl Behaviour { } /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size(&mut self) -> impl ExactSizeIterator { + pub fn kademlia_records_total_size( + &mut self, + ) -> impl ExactSizeIterator { self.discovery.kademlia_records_total_size() } @@ -239,78 +280,52 @@ impl Behaviour { } /// Initiates sending a request. - /// - /// An error is returned if we are not connected to the target peer of if the protocol doesn't - /// match one that has been registered. - pub fn send_request(&mut self, target: &PeerId, protocol: &str, request: Vec) - -> Result - { - self.request_responses.send_request(target, protocol, request) - } - - /// Registers a new notifications protocol. - /// - /// Please call `event_stream` before registering a protocol, otherwise you may miss events - /// about the protocol that you have registered. - /// - /// You are very strongly encouraged to call this method very early on. Any connection open - /// will retain the protocols that were registered then, and not any new one. - pub fn register_notifications_protocol( + pub fn send_request( &mut self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, + target: &PeerId, + protocol: &str, + request: Vec, + pending_response: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, ) { - // This is the message that we will send to the remote as part of the initial handshake. - // At the moment, we force this to be an encoded `Roles`. - let handshake_message = Roles::from(&self.role).encode(); - - let list = self.substrate.register_notifications_protocol(engine_id, protocol_name, handshake_message); - for (remote, roles, notifications_sink) in list { - let role = reported_roles_to_observed_role(&self.role, remote, roles); - self.events.push_back(BehaviourOut::NotificationStreamOpened { - remote: remote.clone(), - engine_id, - role, - notifications_sink: notifications_sink.clone(), - }); - } + self.request_responses + .send_request(target, protocol, request, pending_response, connect) } /// Returns a shared reference to the user protocol. - pub fn user_protocol(&self) -> &Protocol { + pub fn user_protocol(&self) -> &Protocol { &self.substrate } /// Returns a mutable reference to the user protocol. - pub fn user_protocol_mut(&mut self) -> &mut Protocol { + pub fn user_protocol_mut(&mut self) -> &mut Protocol { &mut self.substrate } - /// Start querying a record from the DHT. Will later produce either a `ValueFound` or a `ValueNotFound` event. + /// Start querying a record from the DHT. Will later produce either a `ValueFound` or a + /// `ValueNotFound` event. pub fn get_value(&mut self, key: &record::Key) { self.discovery.get_value(key); } - /// Starts putting a record into DHT. Will later produce either a `ValuePut` or a `ValuePutFailed` event. + /// Starts putting a record into DHT. Will later produce either a `ValuePut` or a + /// `ValuePutFailed` event. pub fn put_value(&mut self, key: record::Key, value: Vec) { self.discovery.put_value(key, value); } /// Issue a light client request. - pub fn light_client_request(&mut self, r: light_client_handler::Request) -> Result<(), light_client_handler::Error> { - self.light_client_handler.request(r) + pub fn light_client_request( + &mut self, + r: light_client_requests::sender::Request, + ) -> Result<(), light_client_requests::sender::SendRequestError> { + self.light_client_request_sender.request(r) } } -fn reported_roles_to_observed_role(local_role: &Role, remote: &PeerId, roles: Roles) -> ObservedRole { +fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { if roles.is_authority() { - match local_role { - Role::Authority { sentry_nodes } - if sentry_nodes.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurSentry, - Role::Sentry { validators } - if validators.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurGuardedAuthority, - _ => ObservedRole::Authority - } + ObservedRole::Authority } else if roles.is_full() { ObservedRole::Full } else { @@ -318,173 +333,149 @@ fn reported_roles_to_observed_role(local_role: &Role, remote: &PeerId, roles: Ro } } -impl NetworkBehaviourEventProcess for -Behaviour { +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: void::Void) { void::unreachable(event) } } -impl NetworkBehaviourEventProcess> for -Behaviour { +impl NetworkBehaviourEventProcess> for Behaviour { fn inject_event(&mut self, event: CustomMessageOutcome) { match event { CustomMessageOutcome::BlockImport(origin, blocks) => self.events.push_back(BehaviourOut::BlockImport(origin, blocks)), - CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => - self.events.push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), - CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => - self.events.push_back(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)), - CustomMessageOutcome::BlockRequest { target, request } => { - match self.block_requests.send_request(&target, request) { - block_requests::SendRequestOutcome::Ok => { - self.events.push_back(BehaviourOut::OpaqueRequestStarted { - peer: target, - protocol: self.block_requests.protocol_name().to_owned(), - }); - }, - block_requests::SendRequestOutcome::Replaced { request_duration, .. } => { - self.events.push_back(BehaviourOut::OpaqueRequestFinished { - peer: target.clone(), - protocol: self.block_requests.protocol_name().to_owned(), - request_duration, - }); - self.events.push_back(BehaviourOut::OpaqueRequestStarted { - peer: target, - protocol: self.block_requests.protocol_name().to_owned(), - }); - } - block_requests::SendRequestOutcome::NotConnected | - block_requests::SendRequestOutcome::EncodeError(_) => {}, + CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => self + .events + .push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), + CustomMessageOutcome::BlockRequest { target, request, pending_response } => { + let mut buf = Vec::with_capacity(request.encoded_len()); + if let Err(err) = request.encode(&mut buf) { + log::warn!( + target: "sync", + "Failed to encode block request {:?}: {:?}", + request, err + ); + return } + + self.request_responses.send_request( + &target, + &self.block_request_protocol_name, + buf, + pending_response, + IfDisconnected::ImmediateError, + ); }, - CustomMessageOutcome::FinalityProofRequest { target, block_hash, request } => { - self.finality_proof_requests.send_request(&target, block_hash, request); - }, - CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles, notifications_sink } => { - let role = reported_roles_to_observed_role(&self.role, &remote, roles); - for engine_id in protocols { - self.events.push_back(BehaviourOut::NotificationStreamOpened { - remote: remote.clone(), - engine_id, - role: role.clone(), - notifications_sink: notifications_sink.clone(), - }); + CustomMessageOutcome::StateRequest { target, request, pending_response } => { + let mut buf = Vec::with_capacity(request.encoded_len()); + if let Err(err) = request.encode(&mut buf) { + log::warn!( + target: "sync", + "Failed to encode state request {:?}: {:?}", + request, err + ); + return } + + self.request_responses.send_request( + &target, + &self.state_request_protocol_name, + buf, + pending_response, + IfDisconnected::ImmediateError, + ); }, - CustomMessageOutcome::NotificationStreamReplaced { remote, protocols, notifications_sink } => - for engine_id in protocols { - self.events.push_back(BehaviourOut::NotificationStreamReplaced { - remote: remote.clone(), - engine_id, - notifications_sink: notifications_sink.clone(), - }); - }, - CustomMessageOutcome::NotificationStreamClosed { remote, protocols } => - for engine_id in protocols { - self.events.push_back(BehaviourOut::NotificationStreamClosed { - remote: remote.clone(), - engine_id, - }); + CustomMessageOutcome::WarpSyncRequest { target, request, pending_response } => + match &self.warp_sync_protocol_name { + Some(name) => self.request_responses.send_request( + &target, + name, + request.encode(), + pending_response, + IfDisconnected::ImmediateError, + ), + None => { + log::warn!( + target: "sync", + "Trying to send warp sync request when no protocol is configured {:?}", + request, + ); + return + }, }, + CustomMessageOutcome::NotificationStreamOpened { + remote, + protocol, + negotiated_fallback, + roles, + notifications_sink, + } => { + self.events.push_back(BehaviourOut::NotificationStreamOpened { + remote, + protocol, + negotiated_fallback, + role: reported_roles_to_observed_role(roles), + notifications_sink: notifications_sink.clone(), + }); + }, + CustomMessageOutcome::NotificationStreamReplaced { + remote, + protocol, + notifications_sink, + } => self.events.push_back(BehaviourOut::NotificationStreamReplaced { + remote, + protocol, + notifications_sink, + }), + CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => self + .events + .push_back(BehaviourOut::NotificationStreamClosed { remote, protocol }), CustomMessageOutcome::NotificationsReceived { remote, messages } => { self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); }, CustomMessageOutcome::PeerNewBest(peer_id, number) => { - self.light_client_handler.update_best_block(&peer_id, number); - } - CustomMessageOutcome::None => {} + self.light_client_request_sender.update_best_block(&peer_id, number); + }, + CustomMessageOutcome::SyncConnected(peer_id) => { + self.light_client_request_sender.inject_connected(peer_id); + self.events.push_back(BehaviourOut::SyncConnected(peer_id)) + }, + CustomMessageOutcome::SyncDisconnected(peer_id) => { + self.light_client_request_sender.inject_disconnected(peer_id); + self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)) + }, + CustomMessageOutcome::None => {}, } } } -impl NetworkBehaviourEventProcess for Behaviour { +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: request_responses::Event) { match event { request_responses::Event::InboundRequest { peer, protocol, result } => { - self.events.push_back(BehaviourOut::InboundRequest { + self.events.push_back(BehaviourOut::InboundRequest { peer, protocol, result }); + }, + request_responses::Event::RequestFinished { peer, protocol, duration, result } => { + self.events.push_back(BehaviourOut::RequestFinished { peer, protocol, - result, - }); - } - - request_responses::Event::RequestFinished { request_id, result } => { - self.events.push_back(BehaviourOut::RequestFinished { - request_id, + duration, result, }); }, + request_responses::Event::ReputationChanges { peer, changes } => + for change in changes { + self.substrate.report_peer(peer, change); + }, } } } -impl NetworkBehaviourEventProcess> for Behaviour { - fn inject_event(&mut self, event: block_requests::Event) { - match event { - block_requests::Event::AnsweredRequest { peer, total_handling_time } => { - self.events.push_back(BehaviourOut::InboundRequest { - peer, - protocol: self.block_requests.protocol_name().to_owned().into(), - result: Ok(total_handling_time), - }); - }, - block_requests::Event::Response { peer, original_request: _, response, request_duration } => { - self.events.push_back(BehaviourOut::OpaqueRequestFinished { - peer: peer.clone(), - protocol: self.block_requests.protocol_name().to_owned(), - request_duration, - }); - let ev = self.substrate.on_block_response(peer, response); - self.inject_event(ev); - } - block_requests::Event::RequestCancelled { peer, request_duration, .. } | - block_requests::Event::RequestTimeout { peer, request_duration, .. } => { - // There doesn't exist any mechanism to report cancellations or timeouts yet, so - // we process them by disconnecting the node. - self.events.push_back(BehaviourOut::OpaqueRequestFinished { - peer: peer.clone(), - protocol: self.block_requests.protocol_name().to_owned(), - request_duration, - }); - self.substrate.on_block_request_failed(&peer); - } - } - } -} - -impl NetworkBehaviourEventProcess> for Behaviour { - fn inject_event(&mut self, event: finality_requests::Event) { - match event { - finality_requests::Event::Response { peer, block_hash, proof } => { - let response = message::FinalityProofResponse { - id: 0, - block: block_hash, - proof: if !proof.is_empty() { - Some(proof) - } else { - None - }, - }; - let ev = self.substrate.on_finality_proof_response(peer, response); - self.inject_event(ev); - } - } - } -} - -impl NetworkBehaviourEventProcess - for Behaviour { +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { let peer_info::PeerInfoEvent::Identified { peer_id, - info: IdentifyInfo { - protocol_version, - agent_version, - mut listen_addrs, - protocols, - .. - }, + info: IdentifyInfo { protocol_version, agent_version, mut listen_addrs, protocols, .. }, } = event; if listen_addrs.len() > 30 { @@ -499,12 +490,11 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess - for Behaviour { +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, out: DiscoveryOut) { match out { DiscoveryOut::UnroutablePeer(_peer_id) => { @@ -512,33 +502,52 @@ impl NetworkBehaviourEventProcess // to Kademlia is handled by the `Identify` protocol, part of the // `PeerInfoBehaviour`. See the `NetworkBehaviourEventProcess` // implementation for `PeerInfoEvent`. - } + }, DiscoveryOut::Discovered(peer_id) => { - self.substrate.add_discovered_nodes(iter::once(peer_id)); - } + self.substrate.add_default_set_discovered_nodes(iter::once(peer_id)); + }, DiscoveryOut::ValueFound(results, duration) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results), duration)); - } + self.events + .push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results), duration)); + }, DiscoveryOut::ValueNotFound(key, duration) => { self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration)); - } + }, DiscoveryOut::ValuePut(key, duration) => { self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePut(key), duration)); - } + }, DiscoveryOut::ValuePutFailed(key, duration) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration)); - } - DiscoveryOut::RandomKademliaStarted(protocols) => { + self.events + .push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration)); + }, + DiscoveryOut::RandomKademliaStarted(protocols) => for protocol in protocols { self.events.push_back(BehaviourOut::RandomKademliaStarted(protocol)); - } - } + }, } } } -impl Behaviour { - fn poll(&mut self, _: &mut Context, _: &mut impl PollParameters) -> Poll>> { +impl Behaviour { + fn poll( + &mut self, + cx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll>> { + use light_client_requests::sender::OutEvent; + while let Poll::Ready(Some(event)) = self.light_client_request_sender.poll_next_unpin(cx) { + match event { + OutEvent::SendRequest { target, request, pending_response, protocol_name } => + self.request_responses.send_request( + &target, + &protocol_name, + request, + pending_response, + IfDisconnected::ImmediateError, + ), + } + } + if let Some(event) = self.events.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) } diff --git a/client/network/src/bitswap.rs b/client/network/src/bitswap.rs new file mode 100644 index 0000000000000..6b53dce626505 --- /dev/null +++ b/client/network/src/bitswap.rs @@ -0,0 +1,335 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Bitswap server for substrate. +//! +//! Allows querying transactions by hash over standard bitswap protocol +//! Only supports bitswap 1.2.0. +//! CID is expected to reference 256-bit Blake2b transaction hash. + +use crate::{ + chain::Client, + schema::bitswap::{ + message::{wantlist::WantType, Block as MessageBlock, BlockPresence, BlockPresenceType}, + Message as BitswapMessage, + }, +}; +use cid::Version; +use core::pin::Pin; +use futures::{ + io::{AsyncRead, AsyncWrite}, + Future, +}; +use libp2p::{ + core::{ + connection::ConnectionId, upgrade, InboundUpgrade, Multiaddr, OutboundUpgrade, PeerId, + UpgradeInfo, + }, + swarm::{ + IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, + OneShotHandler, PollParameters, ProtocolsHandler, + }, +}; +use log::{debug, error, trace}; +use prost::Message; +use sp_runtime::traits::Block as BlockT; +use std::{ + collections::VecDeque, + io, + sync::Arc, + task::{Context, Poll}, +}; +use unsigned_varint::encode as varint_encode; + +const LOG_TARGET: &str = "bitswap"; + +// Undocumented, but according to JS the bitswap messages have a max size of 512*1024 bytes +// https://github.com/ipfs/js-ipfs-bitswap/blob/ +// d8f80408aadab94c962f6b88f343eb9f39fa0fcc/src/decision-engine/index.js#L16 +// We set it to the same value as max substrate protocol message +const MAX_PACKET_SIZE: usize = 16 * 1024 * 1024; + +// Max number of queued responses before denying requests. +const MAX_RESPONSE_QUEUE: usize = 20; +// Max number of blocks per wantlist +const MAX_WANTED_BLOCKS: usize = 16; + +const PROTOCOL_NAME: &'static [u8] = b"/ipfs/bitswap/1.2.0"; + +type FutureResult = Pin> + Send>>; + +/// Bitswap protocol config +#[derive(Clone, Copy, Debug, Default)] +pub struct BitswapConfig; + +impl UpgradeInfo for BitswapConfig { + type Info = &'static [u8]; + type InfoIter = std::iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + std::iter::once(PROTOCOL_NAME) + } +} + +impl InboundUpgrade for BitswapConfig +where + TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, +{ + type Output = BitswapMessage; + type Error = BitswapError; + type Future = FutureResult; + + fn upgrade_inbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { + Box::pin(async move { + let packet = upgrade::read_length_prefixed(&mut socket, MAX_PACKET_SIZE).await?; + let message: BitswapMessage = Message::decode(packet.as_slice())?; + Ok(message) + }) + } +} + +impl UpgradeInfo for BitswapMessage { + type Info = &'static [u8]; + type InfoIter = std::iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + std::iter::once(PROTOCOL_NAME) + } +} + +impl OutboundUpgrade for BitswapMessage +where + TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, +{ + type Output = (); + type Error = io::Error; + type Future = FutureResult; + + fn upgrade_outbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { + Box::pin(async move { + let mut data = Vec::with_capacity(self.encoded_len()); + self.encode(&mut data)?; + upgrade::write_length_prefixed(&mut socket, data).await + }) + } +} + +/// Internal protocol handler event. +#[derive(Debug)] +pub enum HandlerEvent { + /// We received a `BitswapMessage` from a remote. + Request(BitswapMessage), + /// We successfully sent a `BitswapMessage`. + ResponseSent, +} + +impl From for HandlerEvent { + fn from(message: BitswapMessage) -> Self { + Self::Request(message) + } +} + +impl From<()> for HandlerEvent { + fn from(_: ()) -> Self { + Self::ResponseSent + } +} + +/// Prefix represents all metadata of a CID, without the actual content. +#[derive(PartialEq, Eq, Clone, Debug)] +struct Prefix { + /// The version of CID. + pub version: Version, + /// The codec of CID. + pub codec: u64, + /// The multihash type of CID. + pub mh_type: u64, + /// The multihash length of CID. + pub mh_len: u8, +} + +impl Prefix { + /// Convert the prefix to encoded bytes. + pub fn to_bytes(&self) -> Vec { + let mut res = Vec::with_capacity(4); + let mut buf = varint_encode::u64_buffer(); + let version = varint_encode::u64(self.version.into(), &mut buf); + res.extend_from_slice(version); + let mut buf = varint_encode::u64_buffer(); + let codec = varint_encode::u64(self.codec.into(), &mut buf); + res.extend_from_slice(codec); + let mut buf = varint_encode::u64_buffer(); + let mh_type = varint_encode::u64(self.mh_type.into(), &mut buf); + res.extend_from_slice(mh_type); + let mut buf = varint_encode::u64_buffer(); + let mh_len = varint_encode::u64(self.mh_len as u64, &mut buf); + res.extend_from_slice(mh_len); + res + } +} + +/// Network behaviour that handles sending and receiving IPFS blocks. +pub struct Bitswap { + client: Arc>, + ready_blocks: VecDeque<(PeerId, BitswapMessage)>, +} + +impl Bitswap { + /// Create a new instance of the bitswap protocol handler. + pub fn new(client: Arc>) -> Self { + Self { client, ready_blocks: Default::default() } + } +} + +impl NetworkBehaviour for Bitswap { + type ProtocolsHandler = OneShotHandler; + type OutEvent = void::Void; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + Default::default() + } + + fn addresses_of_peer(&mut self, _peer: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, _peer: &PeerId) {} + + fn inject_disconnected(&mut self, _peer: &PeerId) {} + + fn inject_event(&mut self, peer: PeerId, _connection: ConnectionId, message: HandlerEvent) { + let request = match message { + HandlerEvent::ResponseSent => return, + HandlerEvent::Request(msg) => msg, + }; + trace!(target: LOG_TARGET, "Received request: {:?} from {}", request, peer); + if self.ready_blocks.len() > MAX_RESPONSE_QUEUE { + debug!(target: LOG_TARGET, "Ignored request: queue is full"); + return + } + let mut response = BitswapMessage { + wantlist: None, + blocks: Default::default(), + payload: Default::default(), + block_presences: Default::default(), + pending_bytes: 0, + }; + let wantlist = match request.wantlist { + Some(wantlist) => wantlist, + None => { + debug!(target: LOG_TARGET, "Unexpected bitswap message from {}", peer); + return + }, + }; + if wantlist.entries.len() > MAX_WANTED_BLOCKS { + trace!(target: LOG_TARGET, "Ignored request: too many entries"); + return + } + for entry in wantlist.entries { + let cid = match cid::Cid::read_bytes(entry.block.as_slice()) { + Ok(cid) => cid, + Err(e) => { + trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e); + continue + }, + }; + if cid.version() != cid::Version::V1 || + cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) || + cid.hash().size() != 32 + { + debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid); + continue + } + let mut hash = B::Hash::default(); + hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); + let transaction = match self.client.indexed_transaction(&hash) { + Ok(ex) => ex, + Err(e) => { + error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); + None + }, + }; + match transaction { + Some(transaction) => { + trace!(target: LOG_TARGET, "Found CID {:?}, hash {:?}", cid, hash); + if entry.want_type == WantType::Block as i32 { + let prefix = Prefix { + version: cid.version(), + codec: cid.codec(), + mh_type: cid.hash().code(), + mh_len: cid.hash().size(), + }; + response + .payload + .push(MessageBlock { prefix: prefix.to_bytes(), data: transaction }); + } else { + response.block_presences.push(BlockPresence { + r#type: BlockPresenceType::Have as i32, + cid: cid.to_bytes(), + }); + } + }, + None => { + trace!(target: LOG_TARGET, "Missing CID {:?}, hash {:?}", cid, hash); + if entry.send_dont_have { + response.block_presences.push(BlockPresence { + r#type: BlockPresenceType::DontHave as i32, + cid: cid.to_bytes(), + }); + } + }, + } + } + trace!(target: LOG_TARGET, "Response: {:?}", response); + self.ready_blocks.push_back((peer, response)); + } + + fn poll(&mut self, _ctx: &mut Context, _: &mut impl PollParameters) -> Poll< + NetworkBehaviourAction< + <::Handler as ProtocolsHandler>::InEvent, + Self::OutEvent, + >, + >{ + if let Some((peer_id, message)) = self.ready_blocks.pop_front() { + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::Any, + event: message, + }) + } + Poll::Pending + } +} + +/// Bitswap protocol error. +#[derive(derive_more::Display, derive_more::From)] +pub enum BitswapError { + /// Protobuf decoding error. + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + /// Protobuf encoding error. + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + /// Client backend error. + Client(sp_blockchain::Error), + /// Error parsing CID + BadCid(cid::Error), + /// Packet read error. + Read(io::Error), + /// Error sending response. + #[display(fmt = "Failed to send response.")] + SendResponse, +} diff --git a/client/network/src/block_request_handler.rs b/client/network/src/block_request_handler.rs new file mode 100644 index 0000000000000..9411ca71fd009 --- /dev/null +++ b/client/network/src/block_request_handler.rs @@ -0,0 +1,396 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) block requests from a remote peer via the +//! [`crate::request_responses::RequestResponsesBehaviour`]. + +use crate::{ + chain::Client, + config::ProtocolId, + protocol::message::BlockAttributes, + request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + schema::v1::{block_request::FromBlock, BlockResponse, Direction}, + PeerId, ReputationChange, +}; +use codec::{Decode, Encode}; +use futures::{ + channel::{mpsc, oneshot}, + stream::StreamExt, +}; +use log::debug; +use lru::LruCache; +use prost::Message; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header, One, Zero}, +}; +use std::{ + cmp::min, + hash::{Hash, Hasher}, + sync::Arc, + time::Duration, +}; + +const LOG_TARGET: &str = "sync"; +const MAX_BLOCKS_IN_RESPONSE: usize = 128; +const MAX_BODY_BYTES: usize = 8 * 1024 * 1024; +const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; + +mod rep { + use super::ReputationChange as Rep; + + /// Reputation change when a peer sent us the same request multiple times. + pub const SAME_REQUEST: Rep = Rep::new_fatal("Same block request multiple times"); +} + +/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. +pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { + ProtocolConfig { + name: generate_protocol_name(protocol_id).into(), + max_request_size: 1024 * 1024, + max_response_size: 16 * 1024 * 1024, + request_timeout: Duration::from_secs(20), + inbound_queue: None, + } +} + +/// Generate the block protocol name from chain specific protocol identifier. +// Visibility `pub(crate)` to allow `crate::light_client_requests::sender` to generate block request +// protocol name and send block requests. +pub(crate) fn generate_protocol_name(protocol_id: &ProtocolId) -> String { + format!("/{}/sync/2", protocol_id.as_ref()) +} + +/// The key of [`BlockRequestHandler::seen_requests`]. +#[derive(Eq, PartialEq, Clone)] +struct SeenRequestsKey { + peer: PeerId, + from: BlockId, + max_blocks: usize, + direction: Direction, + attributes: BlockAttributes, + support_multiple_justifications: bool, +} + +impl Hash for SeenRequestsKey { + fn hash(&self, state: &mut H) { + self.peer.hash(state); + self.max_blocks.hash(state); + self.direction.hash(state); + self.attributes.hash(state); + + match self.from { + BlockId::Hash(h) => h.hash(state), + BlockId::Number(n) => n.hash(state), + } + } +} + +/// The value of [`BlockRequestHandler::seen_requests`]. +enum SeenRequestsValue { + /// First time we have seen the request. + First, + /// We have fulfilled the request `n` times. + Fulfilled(usize), +} + +/// Handler for incoming block requests from a remote peer. +pub struct BlockRequestHandler { + client: Arc>, + request_receiver: mpsc::Receiver, + /// Maps from request to number of times we have seen this request. + /// + /// This is used to check if a peer is spamming us with the same request. + seen_requests: LruCache, SeenRequestsValue>, +} + +impl BlockRequestHandler { + /// Create a new [`BlockRequestHandler`]. + pub fn new( + protocol_id: &ProtocolId, + client: Arc>, + num_peer_hint: usize, + ) -> (Self, ProtocolConfig) { + // Reserve enough request slots for one request per peer when we are at the maximum + // number of peers. + let (tx, request_receiver) = mpsc::channel(num_peer_hint); + + let mut protocol_config = generate_protocol_config(protocol_id); + protocol_config.inbound_queue = Some(tx); + + let seen_requests = LruCache::new(num_peer_hint * 2); + + (Self { client, request_receiver, seen_requests }, protocol_config) + } + + /// Run [`BlockRequestHandler`]. + pub async fn run(mut self) { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_request(payload, pending_response, &peer) { + Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer), + Err(e) => debug!( + target: LOG_TARGET, + "Failed to handle block request from {}: {}", peer, e, + ), + } + } + } + + fn handle_request( + &mut self, + payload: Vec, + pending_response: oneshot::Sender, + peer: &PeerId, + ) -> Result<(), HandleRequestError> { + let request = crate::schema::v1::BlockRequest::decode(&payload[..])?; + + let from_block_id = match request.from_block.ok_or(HandleRequestError::MissingFromField)? { + FromBlock::Hash(ref h) => { + let h = Decode::decode(&mut h.as_ref())?; + BlockId::::Hash(h) + }, + FromBlock::Number(ref n) => { + let n = Decode::decode(&mut n.as_ref())?; + BlockId::::Number(n) + }, + }; + + let max_blocks = if request.max_blocks == 0 { + MAX_BLOCKS_IN_RESPONSE + } else { + min(request.max_blocks as usize, MAX_BLOCKS_IN_RESPONSE) + }; + + let direction = + Direction::from_i32(request.direction).ok_or(HandleRequestError::ParseDirection)?; + + let attributes = BlockAttributes::from_be_u32(request.fields)?; + + let support_multiple_justifications = request.support_multiple_justifications; + + let key = SeenRequestsKey { + peer: *peer, + max_blocks, + direction, + from: from_block_id.clone(), + attributes, + support_multiple_justifications, + }; + + let mut reputation_change = None; + + match self.seen_requests.get_mut(&key) { + Some(SeenRequestsValue::First) => {}, + Some(SeenRequestsValue::Fulfilled(ref mut requests)) => { + *requests = requests.saturating_add(1); + + if *requests > MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER { + reputation_change = Some(rep::SAME_REQUEST); + } + }, + None => { + self.seen_requests.put(key.clone(), SeenRequestsValue::First); + }, + } + + debug!( + target: LOG_TARGET, + "Handling block request from {}: Starting at `{:?}` with maximum blocks \ + of `{}`, direction `{:?}` and attributes `{:?}`.", + peer, + from_block_id, + max_blocks, + direction, + attributes, + ); + + let result = if reputation_change.is_none() { + let block_response = self.get_block_response( + attributes, + from_block_id, + direction, + max_blocks, + support_multiple_justifications, + )?; + + // If any of the blocks contains any data, we can consider it as successful request. + if block_response + .blocks + .iter() + .any(|b| !b.header.is_empty() || !b.body.is_empty() || b.is_empty_justification) + { + if let Some(value) = self.seen_requests.get_mut(&key) { + // If this is the first time we have processed this request, we need to change + // it to `Fulfilled`. + if let SeenRequestsValue::First = value { + *value = SeenRequestsValue::Fulfilled(1); + } + } + } + + let mut data = Vec::with_capacity(block_response.encoded_len()); + block_response.encode(&mut data)?; + + Ok(data) + } else { + Err(()) + }; + + pending_response + .send(OutgoingResponse { + result, + reputation_changes: reputation_change.into_iter().collect(), + sent_feedback: None, + }) + .map_err(|_| HandleRequestError::SendResponse) + } + + fn get_block_response( + &self, + attributes: BlockAttributes, + mut block_id: BlockId, + direction: Direction, + max_blocks: usize, + support_multiple_justifications: bool, + ) -> Result { + let get_header = attributes.contains(BlockAttributes::HEADER); + let get_body = attributes.contains(BlockAttributes::BODY); + let get_indexed_body = attributes.contains(BlockAttributes::INDEXED_BODY); + let get_justification = attributes.contains(BlockAttributes::JUSTIFICATION); + + let mut blocks = Vec::new(); + + let mut total_size: usize = 0; + while let Some(header) = self.client.header(block_id).unwrap_or_default() { + let number = *header.number(); + let hash = header.hash(); + let parent_hash = *header.parent_hash(); + let justifications = if get_justification { + self.client.justifications(&BlockId::Hash(hash))? + } else { + None + }; + + let (justifications, justification, is_empty_justification) = + if support_multiple_justifications { + let justifications = match justifications { + Some(v) => v.encode(), + None => Vec::new(), + }; + (justifications, Vec::new(), false) + } else { + // For now we keep compatibility by selecting precisely the GRANDPA one, and not + // just the first one. When sending we could have just taken the first one, + // since we don't expect there to be any other kind currently, but when + // receiving we need to add the engine ID tag. + // The ID tag is hardcoded here to avoid depending on the GRANDPA crate, and + // will be removed once we remove the backwards compatibility. + // See: https://github.com/paritytech/substrate/issues/8172 + let justification = + justifications.and_then(|just| just.into_justification(*b"FRNK")); + + let is_empty_justification = + justification.as_ref().map(|j| j.is_empty()).unwrap_or(false); + + let justification = justification.unwrap_or_default(); + + (Vec::new(), justification, is_empty_justification) + }; + + let body = if get_body { + match self.client.block_body(&BlockId::Hash(hash))? { + Some(mut extrinsics) => + extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect(), + None => { + log::trace!(target: LOG_TARGET, "Missing data for block request."); + break + }, + } + } else { + Vec::new() + }; + + let indexed_body = if get_indexed_body { + match self.client.block_indexed_body(&BlockId::Hash(hash))? { + Some(transactions) => transactions, + None => { + log::trace!( + target: LOG_TARGET, + "Missing indexed block data for block request." + ); + // If the indexed body is missing we still continue returning headers. + // Ideally `None` should distinguish a missing body from the empty body, + // but the current protobuf based protocol does not allow it. + Vec::new() + }, + } + } else { + Vec::new() + }; + + let block_data = crate::schema::v1::BlockData { + hash: hash.encode(), + header: if get_header { header.encode() } else { Vec::new() }, + body, + receipt: Vec::new(), + message_queue: Vec::new(), + justification, + is_empty_justification, + justifications, + indexed_body, + }; + + total_size += block_data.body.iter().map(|ex| ex.len()).sum::(); + total_size += block_data.indexed_body.iter().map(|ex| ex.len()).sum::(); + blocks.push(block_data); + + if blocks.len() >= max_blocks as usize || total_size > MAX_BODY_BYTES { + break + } + + match direction { + Direction::Ascending => block_id = BlockId::Number(number + One::one()), + Direction::Descending => { + if number.is_zero() { + break + } + block_id = BlockId::Hash(parent_hash) + }, + } + } + + Ok(BlockResponse { blocks }) + } +} + +#[derive(derive_more::Display, derive_more::From)] +enum HandleRequestError { + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + #[display(fmt = "Failed to decode block hash: {}.", _0)] + DecodeScale(codec::Error), + #[display(fmt = "Missing `BlockRequest::from_block` field.")] + MissingFromField, + #[display(fmt = "Failed to parse BlockRequest::direction.")] + ParseDirection, + Client(sp_blockchain::Error), + #[display(fmt = "Failed to send response.")] + SendResponse, +} diff --git a/client/network/src/block_requests.rs b/client/network/src/block_requests.rs deleted file mode 100644 index 7ee8f18f3a26f..0000000000000 --- a/client/network/src/block_requests.rs +++ /dev/null @@ -1,866 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. -// -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! `NetworkBehaviour` implementation which handles incoming block requests. -//! -//! Every request is coming in on a separate connection substream which gets -//! closed after we have sent the response back. Incoming requests are encoded -//! as protocol buffers (cf. `api.v1.proto`). - -#![allow(unused)] - -use bytes::Bytes; -use codec::{Encode, Decode}; -use crate::{ - chain::Client, - config::ProtocolId, - protocol::{message::{self, BlockAttributes}}, - schema, -}; -use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use futures_timer::Delay; -use libp2p::{ - core::{ - ConnectedPoint, - Multiaddr, - PeerId, - connection::ConnectionId, - upgrade::{InboundUpgrade, OutboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, - upgrade::{DeniedUpgrade, read_one, write_one} - }, - swarm::{ - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - OneShotHandler, - OneShotHandlerConfig, - PollParameters, - SubstreamProtocol - } -}; -use prost::Message; -use sp_runtime::{generic::BlockId, traits::{Block, Header, One, Zero}}; -use std::{ - cmp::min, - collections::{HashMap, VecDeque}, - io, - iter, - marker::PhantomData, - pin::Pin, - sync::Arc, - time::Duration, - task::{Context, Poll} -}; -use void::{Void, unreachable}; -use wasm_timer::Instant; - -// Type alias for convenience. -pub type Error = Box; - -/// Event generated by the block requests behaviour. -#[derive(Debug)] -pub enum Event { - /// A request came and we have successfully answered it. - AnsweredRequest { - /// Peer which has emitted the request. - peer: PeerId, - /// Time elapsed between when we received the request and when we sent back the response. - total_handling_time: Duration, - }, - - /// A response to a block request has arrived. - Response { - peer: PeerId, - /// The original request passed to `send_request`. - original_request: message::BlockRequest, - response: message::BlockResponse, - /// Time elapsed between the start of the request and the response. - request_duration: Duration, - }, - - /// A request has been cancelled because the peer has disconnected. - /// Disconnects can also happen as a result of violating the network protocol. - /// - /// > **Note**: This event is NOT emitted if a request is overridden by calling `send_request`. - /// > For that, you must check the value returned by `send_request`. - RequestCancelled { - peer: PeerId, - /// The original request passed to `send_request`. - original_request: message::BlockRequest, - /// Time elapsed between the start of the request and the cancellation. - request_duration: Duration, - }, - - /// A request has timed out. - RequestTimeout { - peer: PeerId, - /// The original request passed to `send_request`. - original_request: message::BlockRequest, - /// Time elapsed between the start of the request and the timeout. - request_duration: Duration, - } -} - -/// Configuration options for `BlockRequests`. -#[derive(Debug, Clone)] -pub struct Config { - max_block_data_response: u32, - max_block_body_bytes: usize, - max_request_len: usize, - max_response_len: usize, - inactivity_timeout: Duration, - request_timeout: Duration, - protocol: String, -} - -impl Config { - /// Create a fresh configuration with the following options: - /// - /// - max. block data in response = 128 - /// - max. request size = 1 MiB - /// - max. response size = 16 MiB - /// - inactivity timeout = 15s - /// - request timeout = 40s - pub fn new(id: &ProtocolId) -> Self { - let mut c = Config { - max_block_data_response: 128, - max_block_body_bytes: 8 * 1024 * 1024, - max_request_len: 1024 * 1024, - max_response_len: 16 * 1024 * 1024, - inactivity_timeout: Duration::from_secs(15), - request_timeout: Duration::from_secs(40), - protocol: String::new(), - }; - c.set_protocol(id); - c - } - - /// Limit the max. number of block data in a response. - pub fn set_max_block_data_response(&mut self, v: u32) -> &mut Self { - self.max_block_data_response = v; - self - } - - /// Limit the max. length of incoming block request bytes. - pub fn set_max_request_len(&mut self, v: usize) -> &mut Self { - self.max_request_len = v; - self - } - - /// Limit the max. size of responses to our block requests. - pub fn set_max_response_len(&mut self, v: usize) -> &mut Self { - self.max_response_len = v; - self - } - - /// Limit the max. duration the substream may remain inactive before closing it. - pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { - self.inactivity_timeout = v; - self - } - - /// Set the maximum total bytes of block bodies that are send in the response. - /// Note that at least one block is always sent regardless of the limit. - /// This should be lower than the value specified in `set_max_response_len` - /// accounting for headers, justifications and encoding overhead. - pub fn set_max_block_body_bytes(&mut self, v: usize) -> &mut Self { - self.max_block_body_bytes = v; - self - } - - /// Set protocol to use for upgrade negotiation. - pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut s = String::new(); - s.push_str("/"); - s.push_str(id.as_ref()); - s.push_str("/sync/2"); - self.protocol = s; - self - } -} - -/// The block request handling behaviour. -pub struct BlockRequests { - /// This behaviour's configuration. - config: Config, - /// Blockchain client. - chain: Arc>, - /// List of all active connections and the requests we've sent. - peers: HashMap>>, - /// Futures sending back the block request response. Returns the `PeerId` we sent back to, and - /// the total time the handling of this request took. - outgoing: FuturesUnordered>, - /// Events to return as soon as possible from `poll`. - pending_events: VecDeque, Event>>, -} - -/// Local tracking of a libp2p connection. -#[derive(Debug)] -struct Connection { - id: ConnectionId, - ongoing_request: Option>, -} - -#[derive(Debug)] -struct OngoingRequest { - /// `Instant` when the request has been emitted. Used for diagnostic purposes. - emitted: Instant, - request: message::BlockRequest, - timeout: Delay, -} - -/// Outcome of calling `send_request`. -#[derive(Debug)] -#[must_use] -pub enum SendRequestOutcome { - /// Request has been emitted. - Ok, - /// The request has been emitted and has replaced an existing request. - Replaced { - /// The previously-emitted request. - previous: message::BlockRequest, - /// Time that had elapsed since `previous` has been emitted. - request_duration: Duration, - }, - /// Didn't start a request because we have no connection to this node. - /// If `send_request` returns that, it is as if the function had never been called. - NotConnected, - /// Error while serializing the request. - EncodeError(prost::EncodeError), -} - -impl BlockRequests -where - B: Block, -{ - pub fn new(cfg: Config, chain: Arc>) -> Self { - BlockRequests { - config: cfg, - chain, - peers: HashMap::new(), - outgoing: FuturesUnordered::new(), - pending_events: VecDeque::new(), - } - } - - /// Returns the libp2p protocol name used on the wire (e.g. `/foo/sync/2`). - pub fn protocol_name(&self) -> &str { - &self.config.protocol - } - - /// Issue a new block request. - /// - /// Cancels any existing request targeting the same `PeerId`. - /// - /// If the response doesn't arrive in time, or if the remote answers improperly, the target - /// will be disconnected. - pub fn send_request(&mut self, target: &PeerId, req: message::BlockRequest) -> SendRequestOutcome { - // Determine which connection to send the request to. - let connection = if let Some(peer) = self.peers.get_mut(target) { - // We don't want to have multiple requests for any given node, so in priority try to - // find a connection with an existing request, to override it. - if let Some(entry) = peer.iter_mut().find(|c| c.ongoing_request.is_some()) { - entry - } else if let Some(entry) = peer.get_mut(0) { - entry - } else { - log::error!( - target: "sync", - "State inconsistency: empty list of peer connections" - ); - return SendRequestOutcome::NotConnected; - } - } else { - return SendRequestOutcome::NotConnected; - }; - - let protobuf_rq = build_protobuf_block_request( - req.fields, - req.from.clone(), - req.to.clone(), - req.direction, - req.max, - ); - - let mut buf = Vec::with_capacity(protobuf_rq.encoded_len()); - if let Err(err) = protobuf_rq.encode(&mut buf) { - log::warn!( - target: "sync", - "Failed to encode block request {:?}: {:?}", - protobuf_rq, - err - ); - return SendRequestOutcome::EncodeError(err); - } - - let previous_request = connection.ongoing_request.take(); - connection.ongoing_request = Some(OngoingRequest { - emitted: Instant::now(), - request: req.clone(), - timeout: Delay::new(self.config.request_timeout), - }); - - log::trace!(target: "sync", "Enqueueing block request to {:?}: {:?}", target, protobuf_rq); - self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: target.clone(), - handler: NotifyHandler::One(connection.id), - event: OutboundProtocol { - request: buf, - original_request: req, - max_response_size: self.config.max_response_len, - protocol: self.config.protocol.as_bytes().to_vec().into(), - }, - }); - - if let Some(previous_request) = previous_request { - log::debug!( - target: "sync", - "Replacing existing block request on connection {:?}", - connection.id - ); - SendRequestOutcome::Replaced { - previous: previous_request.request, - request_duration: previous_request.emitted.elapsed(), - } - } else { - SendRequestOutcome::Ok - } - } - - /// Callback, invoked when a new block request has been received from remote. - fn on_block_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::BlockRequest - ) -> Result - { - log::trace!( - target: "sync", - "Block request from peer {}: from block {:?} to block {:?}, max blocks {:?}", - peer, - request.from_block, - request.to_block, - request.max_blocks); - - let from_block_id = - match request.from_block { - Some(schema::v1::block_request::FromBlock::Hash(ref h)) => { - let h = Decode::decode(&mut h.as_ref())?; - BlockId::::Hash(h) - } - Some(schema::v1::block_request::FromBlock::Number(ref n)) => { - let n = Decode::decode(&mut n.as_ref())?; - BlockId::::Number(n) - } - None => { - let msg = "missing `BlockRequest::from_block` field"; - return Err(io::Error::new(io::ErrorKind::Other, msg).into()) - } - }; - - let max_blocks = - if request.max_blocks == 0 { - self.config.max_block_data_response - } else { - min(request.max_blocks, self.config.max_block_data_response) - }; - - let direction = - if request.direction == schema::v1::Direction::Ascending as i32 { - schema::v1::Direction::Ascending - } else if request.direction == schema::v1::Direction::Descending as i32 { - schema::v1::Direction::Descending - } else { - let msg = format!("invalid `BlockRequest::direction` value: {}", request.direction); - return Err(io::Error::new(io::ErrorKind::Other, msg).into()) - }; - - let attributes = BlockAttributes::from_be_u32(request.fields)?; - let get_header = attributes.contains(BlockAttributes::HEADER); - let get_body = attributes.contains(BlockAttributes::BODY); - let get_justification = attributes.contains(BlockAttributes::JUSTIFICATION); - - let mut blocks = Vec::new(); - let mut block_id = from_block_id; - let mut total_size = 0; - while let Some(header) = self.chain.header(block_id).unwrap_or(None) { - if blocks.len() >= max_blocks as usize - || (blocks.len() >= 1 && total_size > self.config.max_block_body_bytes) - { - break - } - - let number = *header.number(); - let hash = header.hash(); - let parent_hash = *header.parent_hash(); - let justification = if get_justification { - self.chain.justification(&BlockId::Hash(hash))? - } else { - None - }; - let is_empty_justification = justification.as_ref().map(|j| j.is_empty()).unwrap_or(false); - - let body = if get_body { - match self.chain.block_body(&BlockId::Hash(hash))? { - Some(mut extrinsics) => extrinsics.iter_mut() - .map(|extrinsic| extrinsic.encode()) - .collect(), - None => { - log::trace!(target: "sync", "Missing data for block request."); - break; - } - } - } else { - Vec::new() - }; - - let block_data = schema::v1::BlockData { - hash: hash.encode(), - header: if get_header { - header.encode() - } else { - Vec::new() - }, - body, - receipt: Vec::new(), - message_queue: Vec::new(), - justification: justification.unwrap_or_default(), - is_empty_justification, - }; - - total_size += block_data.body.len(); - blocks.push(block_data); - - match direction { - schema::v1::Direction::Ascending => { - block_id = BlockId::Number(number + One::one()) - } - schema::v1::Direction::Descending => { - if number.is_zero() { - break - } - block_id = BlockId::Hash(parent_hash) - } - } - } - - Ok(schema::v1::BlockResponse { blocks }) - } -} - -impl NetworkBehaviour for BlockRequests -where - B: Block -{ - type ProtocolsHandler = OneShotHandler, OutboundProtocol, NodeEvent>; - type OutEvent = Event; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let p = InboundProtocol { - max_request_len: self.config.max_request_len, - protocol: self.config.protocol.as_bytes().to_owned().into(), - marker: PhantomData, - }; - let mut cfg = OneShotHandlerConfig::default(); - cfg.keep_alive_timeout = self.config.inactivity_timeout; - cfg.outbound_substream_timeout = self.config.request_timeout; - OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - Vec::new() - } - - fn inject_connected(&mut self, _peer: &PeerId) { - } - - fn inject_disconnected(&mut self, _peer: &PeerId) { - } - - fn inject_connection_established(&mut self, peer_id: &PeerId, id: &ConnectionId, _: &ConnectedPoint) { - self.peers.entry(peer_id.clone()) - .or_default() - .push(Connection { - id: *id, - ongoing_request: None, - }); - } - - fn inject_connection_closed(&mut self, peer_id: &PeerId, id: &ConnectionId, _: &ConnectedPoint) { - let mut needs_remove = false; - if let Some(entry) = self.peers.get_mut(peer_id) { - if let Some(pos) = entry.iter().position(|i| i.id == *id) { - let ongoing_request = entry.remove(pos).ongoing_request; - if let Some(ongoing_request) = ongoing_request { - log::debug!( - target: "sync", - "Connection {:?} with {} closed with ongoing sync request: {:?}", - id, - peer_id, - ongoing_request - ); - let ev = Event::RequestCancelled { - peer: peer_id.clone(), - original_request: ongoing_request.request.clone(), - request_duration: ongoing_request.emitted.elapsed(), - }; - self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); - } - if entry.is_empty() { - needs_remove = true; - } - } else { - log::error!( - target: "sync", - "State inconsistency: connection id not found in list" - ); - } - } else { - log::error!( - target: "sync", - "State inconsistency: peer_id not found in list of connections" - ); - } - if needs_remove { - self.peers.remove(peer_id); - } - } - - fn inject_event( - &mut self, - peer: PeerId, - connection_id: ConnectionId, - node_event: NodeEvent - ) { - match node_event { - NodeEvent::Request(request, mut stream, handling_start) => { - match self.on_block_request(&peer, &request) { - Ok(res) => { - log::trace!( - target: "sync", - "Enqueueing block response for peer {} with {} blocks", - peer, res.blocks.len() - ); - let mut data = Vec::with_capacity(res.encoded_len()); - if let Err(e) = res.encode(&mut data) { - log::debug!( - target: "sync", - "Error encoding block response for peer {}: {}", - peer, e - ) - } else { - self.outgoing.push(async move { - if let Err(e) = write_one(&mut stream, data).await { - log::debug!( - target: "sync", - "Error writing block response: {}", - e - ); - } - (peer, handling_start.elapsed()) - }.boxed()); - } - } - Err(e) => log::debug!( - target: "sync", - "Error handling block request from peer {}: {}", peer, e - ) - } - } - NodeEvent::Response(original_request, response) => { - log::trace!( - target: "sync", - "Received block response from peer {} with {} blocks", - peer, response.blocks.len() - ); - let request_duration = if let Some(connections) = self.peers.get_mut(&peer) { - if let Some(connection) = connections.iter_mut().find(|c| c.id == connection_id) { - if let Some(ongoing_request) = &mut connection.ongoing_request { - if ongoing_request.request == original_request { - let request_duration = ongoing_request.emitted.elapsed(); - connection.ongoing_request = None; - request_duration - } else { - // We're no longer interested in that request. - log::debug!( - target: "sync", - "Received response from {} to obsolete block request {:?}", - peer, - original_request - ); - return; - } - } else { - // We remove from `self.peers` requests we're no longer interested in, - // so this can legitimately happen. - log::trace!( - target: "sync", - "Response discarded because it concerns an obsolete request" - ); - return; - } - } else { - log::error!( - target: "sync", - "State inconsistency: response on non-existing connection {:?}", - connection_id - ); - return; - } - } else { - log::error!( - target: "sync", - "State inconsistency: response on non-connected peer {}", - peer - ); - return; - }; - - let blocks = response.blocks.into_iter().map(|block_data| { - Ok(message::BlockData:: { - hash: Decode::decode(&mut block_data.hash.as_ref())?, - header: if !block_data.header.is_empty() { - Some(Decode::decode(&mut block_data.header.as_ref())?) - } else { - None - }, - body: if original_request.fields.contains(message::BlockAttributes::BODY) { - Some(block_data.body.iter().map(|body| { - Decode::decode(&mut body.as_ref()) - }).collect::, _>>()?) - } else { - None - }, - receipt: if !block_data.message_queue.is_empty() { - Some(block_data.receipt) - } else { - None - }, - message_queue: if !block_data.message_queue.is_empty() { - Some(block_data.message_queue) - } else { - None - }, - justification: if !block_data.justification.is_empty() { - Some(block_data.justification) - } else if block_data.is_empty_justification { - Some(Vec::new()) - } else { - None - }, - }) - }).collect::, codec::Error>>(); - - match blocks { - Ok(blocks) => { - let id = original_request.id; - let ev = Event::Response { - peer, - original_request, - response: message::BlockResponse:: { id, blocks }, - request_duration, - }; - self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); - } - Err(err) => { - log::debug!( - target: "sync", - "Failed to decode block response from peer {}: {}", peer, err - ); - } - } - } - } - } - - fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) - -> Poll, Event>> - { - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(ev); - } - - // Check the request timeouts. - for (peer, connections) in &mut self.peers { - for connection in connections { - let ongoing_request = match &mut connection.ongoing_request { - Some(rq) => rq, - None => continue, - }; - - if let Poll::Ready(_) = Pin::new(&mut ongoing_request.timeout).poll(cx) { - let original_request = ongoing_request.request.clone(); - let request_duration = ongoing_request.emitted.elapsed(); - connection.ongoing_request = None; - log::debug!( - target: "sync", - "Request timeout for {}: {:?}", - peer, original_request - ); - let ev = Event::RequestTimeout { - peer: peer.clone(), - original_request, - request_duration, - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - } - } - - if let Poll::Ready(Some((peer, total_handling_time))) = self.outgoing.poll_next_unpin(cx) { - let ev = Event::AnsweredRequest { - peer, - total_handling_time, - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - - Poll::Pending - } -} - -/// Output type of inbound and outbound substream upgrades. -#[derive(Debug)] -pub enum NodeEvent { - /// Incoming request from remote, substream to use for the response, and when we started - /// handling this request. - Request(schema::v1::BlockRequest, T, Instant), - /// Incoming response from remote. - Response(message::BlockRequest, schema::v1::BlockResponse), -} - -/// Substream upgrade protocol. -/// -/// We attempt to parse an incoming protobuf encoded request (cf. `Request`) -/// which will be handled by the `BlockRequests` behaviour, i.e. the request -/// will become visible via `inject_node_event` which then dispatches to the -/// relevant callback to process the message and prepare a response. -#[derive(Debug, Clone)] -pub struct InboundProtocol { - /// The max. request length in bytes. - max_request_len: usize, - /// The protocol to use during upgrade negotiation. - protocol: Bytes, - /// Type of the block. - marker: PhantomData, -} - -impl UpgradeInfo for InboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl InboundUpgrade for InboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { - // This `Instant` will be passed around until the processing of this request is done. - let handling_start = Instant::now(); - - let future = async move { - let len = self.max_request_len; - let vec = read_one(&mut s, len).await?; - match schema::v1::BlockRequest::decode(&vec[..]) { - Ok(r) => Ok(NodeEvent::Request(r, s, handling_start)), - Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) - } - }; - future.boxed() - } -} - -/// Substream upgrade protocol. -/// -/// Sends a request to remote and awaits the response. -#[derive(Debug, Clone)] -pub struct OutboundProtocol { - /// The serialized protobuf request. - request: Vec, - /// The original request. Passed back through the API when the response comes back. - original_request: message::BlockRequest, - /// The max. response length in bytes. - max_response_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, -} - -impl UpgradeInfo for OutboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl OutboundUpgrade for OutboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { - async move { - write_one(&mut s, &self.request).await?; - let vec = read_one(&mut s, self.max_response_size).await?; - - schema::v1::BlockResponse::decode(&vec[..]) - .map(|r| NodeEvent::Response(self.original_request, r)) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - }.boxed() - } -} - -/// Build protobuf block request message. -pub(crate) fn build_protobuf_block_request( - attributes: BlockAttributes, - from_block: message::FromBlock, - to_block: Option, - direction: message::Direction, - max_blocks: Option, -) -> schema::v1::BlockRequest { - schema::v1::BlockRequest { - fields: attributes.to_be_u32(), - from_block: match from_block { - message::FromBlock::Hash(h) => - Some(schema::v1::block_request::FromBlock::Hash(h.encode())), - message::FromBlock::Number(n) => - Some(schema::v1::block_request::FromBlock::Number(n.encode())), - }, - to_block: to_block.map(|h| h.encode()).unwrap_or_default(), - direction: match direction { - message::Direction::Ascending => schema::v1::Direction::Ascending as i32, - message::Direction::Descending => schema::v1::Direction::Descending as i32, - }, - max_blocks: max_blocks.unwrap_or(0), - } -} diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 20fbe0284397d..7c131dd75370f 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,29 +18,31 @@ //! Blockchain access trait -use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sc_client_api::{BlockBackend, ProofProvider}; +pub use sc_client_api::{StorageData, StorageKey}; +pub use sc_consensus::ImportedState; +use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; /// Local client abstraction for the network. -pub trait Client: HeaderBackend + ProofProvider + BlockIdTo - + BlockBackend + HeaderMetadata + Send + Sync -{} - -impl Client for T - where - T: HeaderBackend + ProofProvider + BlockIdTo - + BlockBackend + HeaderMetadata + Send + Sync -{} - -/// Finality proof provider. -pub trait FinalityProofProvider: Send + Sync { - /// Prove finality of the block. - fn prove_finality(&self, for_block: Block::Hash, request: &[u8]) -> Result>, Error>; +pub trait Client: + HeaderBackend + + ProofProvider + + BlockIdTo + + BlockBackend + + HeaderMetadata + + Send + + Sync +{ } -impl FinalityProofProvider for () { - fn prove_finality(&self, _for_block: Block::Hash, _request: &[u8]) -> Result>, Error> { - Ok(None) - } +impl Client for T where + T: HeaderBackend + + ProofProvider + + BlockIdTo + + BlockBackend + + HeaderMetadata + + Send + + Sync +{ } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 86450dc6e79bf..d08e29ef8589f 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,10 +21,15 @@ //! The [`Params`] struct is the struct that must be passed in order to initialize the networking. //! See the documentation of [`Params`]. -pub use crate::chain::{Client, FinalityProofProvider}; -pub use crate::on_demand_layer::{AlwaysBadChecker, OnDemand}; -pub use crate::request_responses::{IncomingRequest, ProtocolConfig as RequestResponseConfig}; -pub use libp2p::{identity, core::PublicKey, wasm_ext::ExtTransport, build_multiaddr}; +pub use crate::{ + chain::Client, + on_demand_layer::{AlwaysBadChecker, OnDemand}, + request_responses::{ + IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, + }, + warp_request_handler::WarpSyncProvider, +}; +pub use libp2p::{build_multiaddr, core::PublicKey, identity}; // Note: this re-export shouldn't be part of the public API of the crate and will be removed in // the future. @@ -37,20 +42,25 @@ use core::{fmt, iter}; use futures::future; use libp2p::{ identity::{ed25519, Keypair}, - multiaddr, wasm_ext, Multiaddr, PeerId, + multiaddr, Multiaddr, PeerId, }; use prometheus_endpoint::Registry; -use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; -use std::{borrow::Cow, convert::TryFrom, future::Future, pin::Pin, str::FromStr}; +use sc_consensus::ImportQueue; +use sp_consensus::block_validation::BlockAnnounceValidator; +use sp_runtime::traits::Block as BlockT; use std::{ + borrow::Cow, collections::HashMap, + convert::TryFrom, error::Error, fs, + future::Future, io::{self, Write}, net::Ipv4Addr, path::{Path, PathBuf}, + pin::Pin, str, + str::FromStr, sync::Arc, }; use zeroize::Zeroize; @@ -64,23 +74,15 @@ pub struct Params { /// default. pub executor: Option + Send>>) + Send>>, + /// How to spawn the background task dedicated to the transactions handler. + pub transactions_handler_executor: Box + Send>>) + Send>, + /// Network layer configuration. pub network_config: NetworkConfiguration, /// Client that contains the blockchain. pub chain: Arc>, - /// Finality proof provider. - /// - /// This object, if `Some`, is used when a node on the network requests a proof of finality - /// from us. - pub finality_proof_provider: Option>>, - - /// How to build requests for proofs of finality. - /// - /// This object, if `Some`, is used when we need a proof of finality from another node. - pub finality_proof_request_builder: Option>, - /// The `OnDemand` object acts as a "receiver" for block data requests from the client. /// If `Some`, the network worker will process these requests and answer them. /// Normally used only for light clients. @@ -106,6 +108,39 @@ pub struct Params { /// Registry for recording prometheus metrics to. pub metrics_registry: Option, + + /// Request response configuration for the block request protocol. + /// + /// [`RequestResponseConfig::name`] is used to tag outgoing block requests with the correct + /// protocol name. In addition all of [`RequestResponseConfig`] is used to handle incoming + /// block requests, if enabled. + /// + /// Can be constructed either via [`crate::block_request_handler::generate_protocol_config`] + /// allowing outgoing but not incoming requests, or constructed via + /// [`crate::block_request_handler::BlockRequestHandler::new`] allowing both outgoing and + /// incoming requests. + pub block_request_protocol_config: RequestResponseConfig, + + /// Request response configuration for the light client request protocol. + /// + /// Can be constructed either via + /// [`crate::light_client_requests::generate_protocol_config`] allowing outgoing but not + /// incoming requests, or constructed via + /// [`crate::light_client_requests::handler::LightClientRequestHandler::new`] allowing + /// both outgoing and incoming requests. + pub light_client_request_protocol_config: RequestResponseConfig, + + /// Request response configuration for the state request protocol. + /// + /// Can be constructed either via + /// [`crate::block_request_handler::generate_protocol_config`] allowing outgoing but not + /// incoming requests, or constructed via + /// [`crate::state_request_handler::StateRequestHandler::new`] allowing + /// both outgoing and incoming requests. + pub state_request_protocol_config: RequestResponseConfig, + + /// Optional warp sync protocol support. Include protocol config and sync provider. + pub warp_sync: Option<(Arc>, RequestResponseConfig)>, } /// Role of the local node. @@ -115,18 +150,8 @@ pub enum Role { Full, /// Regular light node. Light, - /// Sentry node that guards an authority. Will be reported as "authority" on the wire protocol. - Sentry { - /// Address and identity of the validator nodes that we're guarding. - /// - /// The nodes will be granted some priviledged status. - validators: Vec, - }, /// Actual authority. - Authority { - /// List of public addresses and identities of our sentry nodes. - sentry_nodes: Vec, - } + Authority, } impl Role { @@ -135,43 +160,22 @@ impl Role { matches!(self, Role::Authority { .. }) } - /// True for `Role::Authority` and `Role::Sentry` since they're both - /// announced as having the authority role to the network. - pub fn is_network_authority(&self) -> bool { - matches!(self, Role::Authority { .. } | Role::Sentry { .. }) + /// True for `Role::Light` + pub fn is_light(&self) -> bool { + matches!(self, Role::Light { .. }) } } impl fmt::Display for Role { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Role::Full => write!(f, "FULL"), - Role::Light => write!(f, "LIGHT"), - Role::Sentry { .. } => write!(f, "SENTRY"), - Role::Authority { .. } => write!(f, "AUTHORITY"), + Self::Full => write!(f, "FULL"), + Self::Light => write!(f, "LIGHT"), + Self::Authority { .. } => write!(f, "AUTHORITY"), } } } -/// Finality proof request builder. -pub trait FinalityProofRequestBuilder: Send { - /// Build data blob, associated with the request. - fn build_request_data(&mut self, hash: &B::Hash) -> Vec; -} - -/// Implementation of `FinalityProofRequestBuilder` that builds a dummy empty request. -#[derive(Debug, Default)] -pub struct DummyFinalityProofRequestBuilder; - -impl FinalityProofRequestBuilder for DummyFinalityProofRequestBuilder { - fn build_request_data(&mut self, _: &B::Hash) -> Vec { - Vec::new() - } -} - -/// Shared finality proof request builder struct used by the queue. -pub type BoxFinalityProofRequestBuilder = Box + Send + Sync>; - /// Result of the transaction import. #[derive(Clone, Copy, Debug)] pub enum TransactionImport { @@ -185,8 +189,8 @@ pub enum TransactionImport { None, } -/// Fuure resolving to transaction import result. -pub type TransactionImportFuture = Pin + Send>>; +/// Future resolving to transaction import result. +pub type TransactionImportFuture = Pin + Send>>; /// Transaction pool interface pub trait TransactionPool: Send + Sync { @@ -197,10 +201,7 @@ pub trait TransactionPool: Send + Sync { /// Import a transaction into the pool. /// /// This will return future. - fn import( - &self, - transaction: B::Extrinsic, - ) -> TransactionImportFuture; + fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; /// Notify the pool about transactions broadcast. fn on_broadcasted(&self, propagations: HashMap>); /// Get transaction by hash. @@ -224,16 +225,15 @@ impl TransactionPool for EmptyTransaction Default::default() } - fn import( - &self, - _transaction: B::Extrinsic - ) -> TransactionImportFuture { + fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { Box::pin(future::ready(TransactionImport::KnownGood)) } fn on_broadcasted(&self, _: HashMap>) {} - fn transaction(&self, _h: &H) -> Option { None } + fn transaction(&self, _h: &H) -> Option { + None + } } /// Name of a protocol, transmitted on the wire. Should be unique for each chain. Always UTF-8. @@ -242,7 +242,7 @@ pub struct ProtocolId(smallvec::SmallVec<[u8; 6]>); impl<'a> From<&'a str> for ProtocolId { fn from(bytes: &'a str) -> ProtocolId { - ProtocolId(bytes.as_bytes().into()) + Self(bytes.as_bytes().into()) } } @@ -272,17 +272,16 @@ impl fmt::Debug for ProtocolId { /// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); /// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); /// ``` -/// pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { let addr: Multiaddr = addr_str.parse()?; parse_addr(addr) } /// Splits a Multiaddress into a Multiaddress and PeerId. -pub fn parse_addr(mut addr: Multiaddr)-> Result<(PeerId, Multiaddr), ParseErr> { +pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| ParseErr::InvalidPeerId)?, + Some(multiaddr::Protocol::P2p(key)) => + PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, _ => return Err(ParseErr::PeerIdMissing), }; @@ -314,7 +313,7 @@ pub struct MultiaddrWithPeerId { impl MultiaddrWithPeerId { /// Concatenates the multiaddress and peer ID into one multiaddress containing both. pub fn concat(&self) -> Multiaddr { - let proto = multiaddr::Protocol::P2p(From::from(self.peer_id.clone())); + let proto = multiaddr::Protocol::P2p(From::from(self.peer_id)); self.multiaddr.clone().with(proto) } } @@ -330,10 +329,7 @@ impl FromStr for MultiaddrWithPeerId { fn from_str(s: &str) -> Result { let (peer_id, multiaddr) = parse_str_addr(s)?; - Ok(MultiaddrWithPeerId { - peer_id, - multiaddr, - }) + Ok(MultiaddrWithPeerId { peer_id, multiaddr }) } } @@ -364,9 +360,9 @@ pub enum ParseErr { impl fmt::Display for ParseErr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - ParseErr::MultiaddrParse(err) => write!(f, "{}", err), - ParseErr::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), - ParseErr::PeerIdMissing => write!(f, "Peer id is missing from the address"), + Self::MultiaddrParse(err) => write!(f, "{}", err), + Self::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), + Self::PeerIdMissing => write!(f, "Peer id is missing from the address"), } } } @@ -374,16 +370,38 @@ impl fmt::Display for ParseErr { impl std::error::Error for ParseErr { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { - ParseErr::MultiaddrParse(err) => Some(err), - ParseErr::InvalidPeerId => None, - ParseErr::PeerIdMissing => None, + Self::MultiaddrParse(err) => Some(err), + Self::InvalidPeerId => None, + Self::PeerIdMissing => None, } } } impl From for ParseErr { fn from(err: multiaddr::Error) -> ParseErr { - ParseErr::MultiaddrParse(err) + Self::MultiaddrParse(err) + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +/// Sync operation mode. +pub enum SyncMode { + /// Full block download and verification. + Full, + /// Download blocks and the latest state. + Fast { + /// Skip state proof download and verification. + skip_proofs: bool, + /// Download indexed transactions for recent blocks. + storage_chain_mode: bool, + }, + /// Warp sync - verify authority set transitions and the latest state. + Warp, +} + +impl Default for SyncMode { + fn default() -> Self { + Self::Full } } @@ -400,19 +418,12 @@ pub struct NetworkConfiguration { pub boot_nodes: Vec, /// The node key configuration, which determines the node's network identity keypair. pub node_key: NodeKeyConfig, - /// List of notifications protocols that the node supports. Must also include a - /// `ConsensusEngineId` for backwards-compatibility. - pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, /// List of request-response protocols that the node supports. pub request_response_protocols: Vec, - /// Maximum allowed number of incoming connections. - pub in_peers: u32, - /// Number of outgoing connections we're trying to maintain. - pub out_peers: u32, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// The non-reserved peer mode. - pub non_reserved_mode: NonReservedPeerMode, + /// Configuration for the default set of nodes used for block syncing and transactions. + pub default_peers_set: SetConfig, + /// Configuration for extra sets of nodes. + pub extra_sets: Vec, /// Client identifier. Sent over the wire for debugging purposes. pub client_version: String, /// Name of the node. Sent over the wire for debugging purposes. @@ -421,11 +432,43 @@ pub struct NetworkConfiguration { pub transport: TransportConfig, /// Maximum number of peers to ask the same blocks in parallel. pub max_parallel_downloads: u32, + /// Initial syncing mode. + pub sync_mode: SyncMode, + + /// True if Kademlia random discovery should be enabled. + /// + /// If true, the node will automatically randomly walk the DHT in order to find new peers. + pub enable_dht_random_walk: bool, + /// Should we insert non-global addresses into the DHT? pub allow_non_globals_in_dht: bool, - /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in the - /// presence of potentially adversarial nodes. + + /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in + /// the presence of potentially adversarial nodes. pub kademlia_disjoint_query_paths: bool, + /// Enable serving block data over IPFS bitswap. + pub ipfs_server: bool, + + /// Size of Yamux receive window of all substreams. `None` for the default (256kiB). + /// Any value less than 256kiB is invalid. + /// + /// # Context + /// + /// By design, notifications substreams on top of Yamux connections only allow up to `N` bytes + /// to be transferred at a time, where `N` is the Yamux receive window size configurable here. + /// This means, in practice, that every `N` bytes must be acknowledged by the receiver before + /// the sender can send more data. The maximum bandwidth of each notifications substream is + /// therefore `N / round_trip_time`. + /// + /// It is recommended to leave this to `None`, and use a request-response protocol instead if + /// a large amount of data must be transferred. The reason why the value is configurable is + /// that some Substrate users mis-use notification protocols to send large amounts of data. + /// As such, this option isn't designed to stay and will likely get removed in the future. + /// + /// Note that configuring a value here isn't a modification of the Yamux protocol, but rather + /// a modification of the way the implementation works. Different nodes with different + /// configured values remain compatible with each other. + pub yamux_window_size: Option, } impl NetworkConfiguration { @@ -436,70 +479,138 @@ impl NetworkConfiguration { node_key: NodeKeyConfig, net_config_path: Option, ) -> Self { - NetworkConfiguration { + Self { net_config_path, listen_addresses: Vec::new(), public_addresses: Vec::new(), boot_nodes: Vec::new(), node_key, - notifications_protocols: Vec::new(), request_response_protocols: Vec::new(), - in_peers: 25, - out_peers: 75, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Accept, + default_peers_set: Default::default(), + extra_sets: Vec::new(), client_version: client_version.into(), node_name: node_name.into(), - transport: TransportConfig::Normal { - enable_mdns: false, - allow_private_ipv4: true, - wasm_external_transport: None, - }, + transport: TransportConfig::Normal { enable_mdns: false, allow_private_ipv4: true }, max_parallel_downloads: 5, + sync_mode: SyncMode::Full, + enable_dht_random_walk: true, allow_non_globals_in_dht: false, kademlia_disjoint_query_paths: false, + yamux_window_size: None, + ipfs_server: false, } } - /// Create new default configuration for localhost-only connection with random port (useful for testing) + /// Create new default configuration for localhost-only connection with random port (useful for + /// testing) pub fn new_local() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - - config.listen_addresses = vec![ - iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + let mut config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + + config.listen_addresses = + vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) .chain(iter::once(multiaddr::Protocol::Tcp(0))) - .collect() - ]; + .collect()]; config.allow_non_globals_in_dht = true; config } - /// Create new default configuration for localhost-only connection with random port (useful for testing) + /// Create new default configuration for localhost-only connection with random port (useful for + /// testing) pub fn new_memory() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - - config.listen_addresses = vec![ - iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + let mut config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + + config.listen_addresses = + vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) .chain(iter::once(multiaddr::Protocol::Tcp(0))) - .collect() - ]; + .collect()]; config.allow_non_globals_in_dht = true; config } } +/// Configuration for a set of nodes. +#[derive(Clone, Debug)] +pub struct SetConfig { + /// Maximum allowed number of incoming substreams related to this set. + pub in_peers: u32, + /// Number of outgoing substreams related to this set that we're trying to maintain. + pub out_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically + /// refused. + pub non_reserved_mode: NonReservedPeerMode, +} + +impl Default for SetConfig { + fn default() -> Self { + Self { + in_peers: 25, + out_peers: 75, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + } + } +} + +/// Extension to [`SetConfig`] for sets that aren't the default set. +/// +/// > **Note**: As new fields might be added in the future, please consider using the `new` method +/// > and modifiers instead of creating this struct manually. +#[derive(Clone, Debug)] +pub struct NonDefaultSetConfig { + /// Name of the notifications protocols of this set. A substream on this set will be + /// considered established once this protocol is open. + /// + /// > **Note**: This field isn't present for the default set, as this is handled internally + /// > by the networking code. + pub notifications_protocol: Cow<'static, str>, + /// If the remote reports that it doesn't support the protocol indicated in the + /// `notifications_protocol` field, then each of these fallback names will be tried one by + /// one. + /// + /// If a fallback is used, it will be reported in + /// [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. + pub fallback_names: Vec>, + /// Maximum allowed size of single notifications. + pub max_notification_size: u64, + /// Base configuration. + pub set_config: SetConfig, +} + +impl NonDefaultSetConfig { + /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. + pub fn new(notifications_protocol: Cow<'static, str>, max_notification_size: u64) -> Self { + Self { + notifications_protocol, + max_notification_size, + fallback_names: Vec::new(), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } + + /// Modifies the configuration to allow non-reserved nodes. + pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) { + self.set_config.in_peers = in_peers; + self.set_config.out_peers = out_peers; + self.set_config.non_reserved_mode = NonReservedPeerMode::Accept; + } + + /// Add a node to the list of reserved nodes. + pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) { + self.set_config.reserved_nodes.push(peer); + } +} + /// Configuration for the transport layer. #[derive(Clone, Debug)] pub enum TransportConfig { @@ -511,17 +622,8 @@ pub enum TransportConfig { /// If true, allow connecting to private IPv4 addresses (as defined in /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have - /// been passed in [`NetworkConfiguration::reserved_nodes`] or - /// [`NetworkConfiguration::boot_nodes`]. + /// been passed in [`NetworkConfiguration::boot_nodes`]. allow_private_ipv4: bool, - - /// Optional external implementation of a libp2p transport. Used in WASM contexts where we - /// need some binding between the networking provided by the operating system or environment - /// and libp2p. - /// - /// This parameter exists whatever the target platform is, but it is expected to be set to - /// `Some` only when compiling for WASM. - wasm_external_transport: Option, }, /// Only allow connections within the same process. @@ -542,8 +644,8 @@ impl NonReservedPeerMode { /// Attempt to parse the peer mode from a string. pub fn parse(s: &str) -> Option { match s { - "accept" => Some(NonReservedPeerMode::Accept), - "deny" => Some(NonReservedPeerMode::Deny), + "accept" => Some(Self::Accept), + "deny" => Some(Self::Deny), _ => None, } } @@ -555,12 +657,12 @@ impl NonReservedPeerMode { #[derive(Clone, Debug)] pub enum NodeKeyConfig { /// A Ed25519 secret key configuration. - Ed25519(Secret) + Ed25519(Secret), } impl Default for NodeKeyConfig { fn default() -> NodeKeyConfig { - NodeKeyConfig::Ed25519(Secret::New) + Self::Ed25519(Secret::New) } } @@ -579,15 +681,15 @@ pub enum Secret { /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. File(PathBuf), /// Always generate a new secret key `K`. - New + New, } impl fmt::Debug for Secret { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Secret::Input(_) => f.debug_tuple("Secret::Input").finish(), - Secret::File(path) => f.debug_tuple("Secret::File").field(path).finish(), - Secret::New => f.debug_tuple("Secret::New").finish(), + Self::Input(_) => f.debug_tuple("Secret::Input").finish(), + Self::File(path) => f.debug_tuple("Secret::File").field(path).finish(), + Self::New => f.debug_tuple("Secret::New").finish(), } } } @@ -597,44 +699,36 @@ impl NodeKeyConfig { /// /// * If the secret is configured as input, the corresponding keypair is returned. /// - /// * If the secret is configured as a file, it is read from that file, if it exists. - /// Otherwise a new secret is generated and stored. In either case, the - /// keypair obtained from the secret is returned. + /// * If the secret is configured as a file, it is read from that file, if it exists. Otherwise + /// a new secret is generated and stored. In either case, the keypair obtained from the + /// secret is returned. /// - /// * If the secret is configured to be new, it is generated and the corresponding - /// keypair is returned. + /// * If the secret is configured to be new, it is generated and the corresponding keypair is + /// returned. pub fn into_keypair(self) -> io::Result { use NodeKeyConfig::*; match self { - Ed25519(Secret::New) => - Ok(Keypair::generate_ed25519()), - - Ed25519(Secret::Input(k)) => - Ok(Keypair::Ed25519(k.into())), - - Ed25519(Secret::File(f)) => - get_secret( - f, - |mut b| { - match String::from_utf8(b.to_vec()) - .ok() - .and_then(|s|{ - if s.len() == 64 { - hex::decode(&s).ok() - } else { - None - }} - ) - { - Some(s) => ed25519::SecretKey::from_bytes(s), - _ => ed25519::SecretKey::from_bytes(&mut b), - } - }, - ed25519::SecretKey::generate, - |b| b.as_ref().to_vec() - ) - .map(ed25519::Keypair::from) - .map(Keypair::Ed25519), + Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()), + + Ed25519(Secret::Input(k)) => Ok(Keypair::Ed25519(k.into())), + + Ed25519(Secret::File(f)) => get_secret( + f, + |mut b| match String::from_utf8(b.to_vec()).ok().and_then(|s| { + if s.len() == 64 { + hex::decode(&s).ok() + } else { + None + } + }) { + Some(s) => ed25519::SecretKey::from_bytes(s), + _ => ed25519::SecretKey::from_bytes(&mut b), + }, + ed25519::SecretKey::generate, + |b| b.as_ref().to_vec(), + ) + .map(ed25519::Keypair::from) + .map(Keypair::Ed25519), } } } @@ -651,9 +745,9 @@ where W: Fn(&K) -> Vec, { std::fs::read(&file) - .and_then(|mut sk_bytes| - parse(&mut sk_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))) + .and_then(|mut sk_bytes| { + parse(&mut sk_bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + }) .or_else(|e| { if e.kind() == io::ErrorKind::NotFound { file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; @@ -671,7 +765,7 @@ where /// Write secret bytes to a file. fn write_secret_file

(path: P, sk_bytes: &[u8]) -> io::Result<()> where - P: AsRef + P: AsRef, { let mut file = open_secret_file(&path)?; file.write_all(sk_bytes) @@ -681,26 +775,19 @@ where #[cfg(unix)] fn open_secret_file

(path: P) -> io::Result where - P: AsRef + P: AsRef, { use std::os::unix::fs::OpenOptionsExt; - fs::OpenOptions::new() - .write(true) - .create_new(true) - .mode(0o600) - .open(path) + fs::OpenOptions::new().write(true).create_new(true).mode(0o600).open(path) } /// Opens a file containing a secret key in write mode. #[cfg(not(unix))] fn open_secret_file

(path: P) -> Result where - P: AsRef + P: AsRef, { - fs::OpenOptions::new() - .write(true) - .create_new(true) - .open(path) + fs::OpenOptions::new().write(true).create_new(true).open(path) } #[cfg(test)] @@ -716,7 +803,7 @@ mod tests { match kp { Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), Keypair::Secp256k1(p) => p.secret().to_bytes().to_vec(), - _ => panic!("Unexpected keypair.") + _ => panic!("Unexpected keypair."), } } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index ab9ee2d4dba0b..71e46f73234c7 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Discovery mechanisms of Substrate. //! @@ -43,30 +45,42 @@ //! **Important**: In order for the discovery mechanism to work properly, there needs to be an //! active mechanism that asks nodes for the addresses they are listening on. Whenever we learn //! of a node's address, you must call `add_self_reported_address`. -//! -use crate::config::ProtocolId; -use crate::utils::LruHashSet; +use crate::{config::ProtocolId, utils::LruHashSet}; use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; -use libp2p::core::{connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, PublicKey}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use libp2p::swarm::protocols_handler::multi::MultiHandler; -use libp2p::kad::{Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent, QueryResult, Quorum, Record}; -use libp2p::kad::GetClosestPeersError; -use libp2p::kad::handler::KademliaHandler; -use libp2p::kad::QueryId; -use libp2p::kad::record::{self, store::{MemoryStore, RecordStore}}; -#[cfg(not(target_os = "unknown"))] -use libp2p::swarm::toggle::Toggle; -#[cfg(not(target_os = "unknown"))] -use libp2p::mdns::{Mdns, MdnsEvent}; -use libp2p::multiaddr::Protocol; -use log::{debug, info, trace, warn}; -use std::{cmp, collections::{HashMap, HashSet, VecDeque}, io, num::NonZeroUsize, time::Duration}; -use std::task::{Context, Poll}; +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, Multiaddr, PeerId, PublicKey, + }, + kad::{ + handler::KademliaHandlerProto, + record::{ + self, + store::{MemoryStore, RecordStore}, + }, + GetClosestPeersError, Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent, + QueryId, QueryResult, Quorum, Record, + }, + mdns::{Mdns, MdnsConfig, MdnsEvent}, + multiaddr::Protocol, + swarm::{ + protocols_handler::multi::IntoMultiHandler, IntoProtocolsHandler, NetworkBehaviour, + NetworkBehaviourAction, PollParameters, ProtocolsHandler, + }, +}; +use log::{debug, error, info, trace, warn}; use sp_core::hexdisplay::HexDisplay; +use std::{ + cmp, + collections::{HashMap, HashSet, VecDeque}, + io, + num::NonZeroUsize, + task::{Context, Poll}, + time::Duration, +}; /// Maximum number of known external addresses that we will cache. /// This only affects whether we will log whenever we (re-)discover @@ -79,7 +93,8 @@ const MAX_KNOWN_EXTERNAL_ADDRESSES: usize = 32; /// one protocol via [`DiscoveryConfig::add_protocol`]. pub struct DiscoveryConfig { local_peer_id: PeerId, - user_defined: Vec<(PeerId, Multiaddr)>, + permanent_addresses: Vec<(PeerId, Multiaddr)>, + dht_random_walk: bool, allow_private_ipv4: bool, allow_non_globals_in_dht: bool, discovery_only_if_under_num: u64, @@ -91,15 +106,16 @@ pub struct DiscoveryConfig { impl DiscoveryConfig { /// Create a default configuration with the given public key. pub fn new(local_public_key: PublicKey) -> Self { - DiscoveryConfig { + Self { local_peer_id: local_public_key.into_peer_id(), - user_defined: Vec::new(), + permanent_addresses: Vec::new(), + dht_random_walk: true, allow_private_ipv4: true, allow_non_globals_in_dht: false, discovery_only_if_under_num: std::u64::MAX, enable_mdns: false, kademlia_disjoint_query_paths: false, - protocol_ids: HashSet::new() + protocol_ids: HashSet::new(), } } @@ -110,11 +126,18 @@ impl DiscoveryConfig { } /// Set custom nodes which never expire, e.g. bootstrap or reserved nodes. - pub fn with_user_defined(&mut self, user_defined: I) -> &mut Self + pub fn with_permanent_addresses(&mut self, permanent_addresses: I) -> &mut Self where - I: IntoIterator + I: IntoIterator, { - self.user_defined.extend(user_defined); + self.permanent_addresses.extend(permanent_addresses); + self + } + + /// Whether the discovery behaviour should periodically perform a random + /// walk on the DHT to discover peers. + pub fn with_dht_random_walk(&mut self, value: bool) -> &mut Self { + self.dht_random_walk = value; self } @@ -132,9 +155,6 @@ impl DiscoveryConfig { /// Should MDNS discovery be supported? pub fn with_mdns(&mut self, value: bool) -> &mut Self { - if value && cfg!(target_os = "unknown") { - log::warn!(target: "sub-libp2p", "mDNS is not available on this platform") - } self.enable_mdns = value; self } @@ -143,7 +163,7 @@ impl DiscoveryConfig { pub fn add_protocol(&mut self, id: ProtocolId) -> &mut Self { if self.protocol_ids.contains(&id) { warn!(target: "sub-libp2p", "Discovery already registered for protocol {:?}", id); - return self; + return self } self.protocol_ids.insert(id); @@ -160,9 +180,10 @@ impl DiscoveryConfig { /// Create a `DiscoveryBehaviour` from this config. pub fn finish(self) -> DiscoveryBehaviour { - let DiscoveryConfig { + let Self { local_peer_id, - user_defined, + permanent_addresses, + dht_random_walk, allow_private_ipv4, allow_non_globals_in_dht, discovery_only_if_under_num, @@ -171,7 +192,8 @@ impl DiscoveryConfig { protocol_ids, } = self; - let kademlias = protocol_ids.into_iter() + let kademlias = protocol_ids + .into_iter() .map(|protocol_id| { let proto_name = protocol_name_from_protocol_id(&protocol_id); @@ -183,10 +205,10 @@ impl DiscoveryConfig { config.set_kbucket_inserts(KademliaBucketInserts::Manual); config.disjoint_query_paths(kademlia_disjoint_query_paths); - let store = MemoryStore::new(local_peer_id.clone()); - let mut kad = Kademlia::with_config(local_peer_id.clone(), store, config); + let store = MemoryStore::new(local_peer_id); + let mut kad = Kademlia::with_config(local_peer_id, store, config); - for (peer_id, addr) in &user_defined { + for (peer_id, addr) in &permanent_addresses { kad.add_address(peer_id, addr.clone()); } @@ -195,31 +217,29 @@ impl DiscoveryConfig { .collect(); DiscoveryBehaviour { - user_defined, + permanent_addresses, + ephemeral_addresses: HashMap::new(), kademlias, - next_kad_random_query: Delay::new(Duration::new(0, 0)), + next_kad_random_query: if dht_random_walk { + Some(Delay::new(Duration::new(0, 0))) + } else { + None + }, duration_to_next_kad: Duration::from_secs(1), pending_events: VecDeque::new(), local_peer_id, num_connections: 0, allow_private_ipv4, discovery_only_if_under_num, - #[cfg(not(target_os = "unknown"))] mdns: if enable_mdns { - match Mdns::new() { - Ok(mdns) => Some(mdns).into(), - Err(err) => { - warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); - None.into() - } - } + MdnsWrapper::Instantiating(Mdns::new(MdnsConfig::default()).boxed()) } else { - None.into() + MdnsWrapper::Disabled }, allow_non_globals_in_dht, known_external_addresses: LruHashSet::new( NonZeroUsize::new(MAX_KNOWN_EXTERNAL_ADDRESSES) - .expect("value is a constant; constant is non-zero; qed.") + .expect("value is a constant; constant is non-zero; qed."), ), } } @@ -229,14 +249,17 @@ impl DiscoveryConfig { pub struct DiscoveryBehaviour { /// User-defined list of nodes and their addresses. Typically includes bootstrap nodes and /// reserved nodes. - user_defined: Vec<(PeerId, Multiaddr)>, + permanent_addresses: Vec<(PeerId, Multiaddr)>, + /// Same as `permanent_addresses`, except that addresses that fail to reach a peer are + /// removed. + ephemeral_addresses: HashMap>, /// Kademlia requests and answers. kademlias: HashMap>, /// Discovers nodes on the local network. - #[cfg(not(target_os = "unknown"))] - mdns: Toggle, - /// Stream that fires when we need to perform the next random Kademlia query. - next_kad_random_query: Delay, + mdns: MdnsWrapper, + /// Stream that fires when we need to perform the next random Kademlia query. `None` if + /// random walking is disabled. + next_kad_random_query: Option, /// After `next_kad_random_query` triggers, the next one triggers after this duration. duration_to_next_kad: Duration, /// Events to return in priority when polled. @@ -246,7 +269,7 @@ pub struct DiscoveryBehaviour { /// Number of nodes we're currently connected to. num_connections: u64, /// If false, `addresses_of_peer` won't return any private IPv4 address, except for the ones - /// stored in `user_defined`. + /// stored in `permanent_addresses` or `ephemeral_addresses`. allow_private_ipv4: bool, /// Number of active connections over which we interrupt the discovery process. discovery_only_if_under_num: u64, @@ -278,12 +301,14 @@ impl DiscoveryBehaviour { /// /// If we didn't know this address before, also generates a `Discovered` event. pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { - if self.user_defined.iter().all(|(p, a)| *p != peer_id && *a != addr) { + let addrs_list = self.ephemeral_addresses.entry(peer_id).or_default(); + if !addrs_list.iter().any(|a| *a == addr) { for k in self.kademlias.values_mut() { k.add_address(&peer_id, addr.clone()); } + self.pending_events.push_back(DiscoveryOut::Discovered(peer_id.clone())); - self.user_defined.push((peer_id, addr)); + addrs_list.push(addr); } } @@ -296,10 +321,10 @@ impl DiscoveryBehaviour { &mut self, peer_id: &PeerId, supported_protocols: impl Iterator>, - addr: Multiaddr + addr: Multiaddr, ) { if !self.allow_non_globals_in_dht && !self.can_add_to_dht(&addr) { - log::trace!(target: "sub-libp2p", "Ignoring self-reported non-global address {} from {}.", addr, peer_id); + trace!(target: "sub-libp2p", "Ignoring self-reported non-global address {} from {}.", addr, peer_id); return } @@ -307,7 +332,7 @@ impl DiscoveryBehaviour { for protocol in supported_protocols { for kademlia in self.kademlias.values_mut() { if protocol.as_ref() == kademlia.protocol_name() { - log::trace!( + trace!( target: "sub-libp2p", "Adding self-reported address {} from {} to Kademlia DHT {}.", addr, peer_id, String::from_utf8_lossy(kademlia.protocol_name()), @@ -319,7 +344,7 @@ impl DiscoveryBehaviour { } if !added { - log::trace!( + trace!( target: "sub-libp2p", "Ignoring self-reported address {} from {} as remote node is not part of any \ Kademlia DHTs supported by the local node.", addr, peer_id, @@ -344,7 +369,8 @@ impl DiscoveryBehaviour { for k in self.kademlias.values_mut() { if let Err(e) = k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) { warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); - self.pending_events.push_back(DiscoveryOut::ValuePutFailed(key.clone(), Duration::from_secs(0))); + self.pending_events + .push_back(DiscoveryOut::ValuePutFailed(key.clone(), Duration::from_secs(0))); } } } @@ -353,14 +379,16 @@ impl DiscoveryBehaviour { /// /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm /// of their lower bound. - pub fn num_entries_per_kbucket(&mut self) -> impl ExactSizeIterator)> { - self.kademlias.iter_mut() - .map(|(id, kad)| { - let buckets = kad.kbuckets() - .map(|bucket| (bucket.range().0.ilog2().unwrap_or(0), bucket.iter().count())) - .collect(); - (id, buckets) - }) + pub fn num_entries_per_kbucket( + &mut self, + ) -> impl ExactSizeIterator)> { + self.kademlias.iter_mut().map(|(id, kad)| { + let buckets = kad + .kbuckets() + .map(|bucket| (bucket.range().0.ilog2().unwrap_or(0), bucket.iter().count())) + .collect(); + (id, buckets) + }) } /// Returns the number of records in the Kademlia record stores. @@ -373,7 +401,9 @@ impl DiscoveryBehaviour { } /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size(&mut self) -> impl ExactSizeIterator { + pub fn kademlia_records_total_size( + &mut self, + ) -> impl ExactSizeIterator { // Note that this code is ok only because we use a `MemoryStore`. If the records were // for example stored on disk, this would load every single one of them every single time. self.kademlias.iter_mut().map(|(id, kad)| { @@ -385,7 +415,6 @@ impl DiscoveryBehaviour { /// Can the given `Multiaddr` be put into the DHT? /// /// This test is successful only for global IP addresses and DNS names. - // // NB: Currently all DNS names are allowed and no check for TLD suffixes is done // because the set of valid domains is highly dynamic and would require frequent // updates, for example by utilising publicsuffix.org or IANA. @@ -393,9 +422,9 @@ impl DiscoveryBehaviour { let ip = match addr.iter().next() { Some(Protocol::Ip4(ip)) => IpNetwork::from(ip), Some(Protocol::Ip6(ip)) => IpNetwork::from(ip), - Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) - => return true, - _ => return false + Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => + return true, + _ => return false, }; ip.is_global() } @@ -440,44 +469,53 @@ pub enum DiscoveryOut { ValuePutFailed(record::Key, Duration), /// Started a random Kademlia query for each DHT identified by the given `ProtocolId`s. + /// + /// Only happens if [`DiscoveryConfig::with_dht_random_walk`] has been configured to `true`. RandomKademliaStarted(Vec), } impl NetworkBehaviour for DiscoveryBehaviour { - type ProtocolsHandler = MultiHandler>; + type ProtocolsHandler = IntoMultiHandler>; type OutEvent = DiscoveryOut; fn new_handler(&mut self) -> Self::ProtocolsHandler { - let iter = self.kademlias.iter_mut() + let iter = self + .kademlias + .iter_mut() .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); - MultiHandler::try_from_iter(iter) - .expect("There can be at most one handler per `ProtocolId` and \ - protocol names contain the `ProtocolId` so no two protocol \ - names in `self.kademlias` can be equal which is the only error \ - `try_from_iter` can return, therefore this call is guaranteed \ - to succeed; qed") + IntoMultiHandler::try_from_iter(iter).expect( + "There can be at most one handler per `ProtocolId` and protocol names contain the \ + `ProtocolId` so no two protocol names in `self.kademlias` can be equal which is the \ + only error `try_from_iter` can return, therefore this call is guaranteed to succeed; \ + qed", + ) } fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - let mut list = self.user_defined.iter() + let mut list = self + .permanent_addresses + .iter() .filter_map(|(p, a)| if p == peer_id { Some(a.clone()) } else { None }) .collect::>(); + if let Some(ephemeral_addresses) = self.ephemeral_addresses.get(peer_id) { + list.extend(ephemeral_addresses.clone()); + } + { let mut list_to_filter = Vec::new(); for k in self.kademlias.values_mut() { list_to_filter.extend(k.addresses_of_peer(peer_id)) } - #[cfg(not(target_os = "unknown"))] list_to_filter.extend(self.mdns.addresses_of_peer(peer_id)); if !self.allow_private_ipv4 { list_to_filter.retain(|addr| { if let Some(Protocol::Ip4(addr)) = addr.iter().next() { if addr.is_private() { - return false; + return false } } @@ -493,7 +531,12 @@ impl NetworkBehaviour for DiscoveryBehaviour { list } - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.num_connections += 1; for k in self.kademlias.values_mut() { NetworkBehaviour::inject_connection_established(k, peer_id, conn, endpoint) @@ -506,7 +549,12 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.num_connections -= 1; for k in self.kademlias.values_mut() { NetworkBehaviour::inject_connection_closed(k, peer_id, conn, endpoint) @@ -523,8 +571,14 @@ impl NetworkBehaviour for DiscoveryBehaviour { &mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, - error: &dyn std::error::Error + error: &dyn std::error::Error, ) { + if let Some(peer_id) = peer_id { + if let Some(list) = self.ephemeral_addresses.get_mut(peer_id) { + list.retain(|a| a != addr); + } + } + for k in self.kademlias.values_mut() { NetworkBehaviour::inject_addr_reach_failure(k, peer_id, addr, error) } @@ -534,24 +588,26 @@ impl NetworkBehaviour for DiscoveryBehaviour { &mut self, peer_id: PeerId, connection: ConnectionId, - (pid, event): ::OutEvent, + (pid, event): <::Handler as ProtocolsHandler>::OutEvent, ) { if let Some(kad) = self.kademlias.get_mut(&pid) { return kad.inject_event(peer_id, connection, event) } - log::error!(target: "sub-libp2p", + error!( + target: "sub-libp2p", "inject_node_event: no kademlia instance registered for protocol {:?}", - pid) + pid, + ) } fn inject_new_external_addr(&mut self, addr: &Multiaddr) { - let new_addr = addr.clone() - .with(Protocol::P2p(self.local_peer_id.clone().into())); + let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id.into())); // NOTE: we might re-discover the same address multiple times // in which case we just want to refrain from logging. if self.known_external_addresses.insert(new_addr.clone()) { - info!(target: "sub-libp2p", + info!( + target: "sub-libp2p", "🔍 Discovered new external address for our node: {}", new_addr, ); @@ -562,9 +618,18 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { + // We intentionally don't remove the element from `known_external_addresses` in order + // to not print the log line again. + for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_expired_listen_addr(k, addr) + NetworkBehaviour::inject_expired_external_addr(k, addr) + } + } + + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_expired_listen_addr(k, id, addr) } } @@ -574,9 +639,15 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + fn inject_new_listener(&mut self, id: ListenerId) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_new_listener(k, id) + } + } + + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_new_listen_addr(k, addr) + NetworkBehaviour::inject_new_listen_addr(k, id, addr) } } @@ -598,44 +669,50 @@ impl NetworkBehaviour for DiscoveryBehaviour { params: &mut impl PollParameters, ) -> Poll< NetworkBehaviourAction< - ::InEvent, + <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent, >, - > { + >{ // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } // Poll the stream that fires when we need to start a random Kademlia query. - while let Poll::Ready(_) = self.next_kad_random_query.poll_unpin(cx) { - let actually_started = if self.num_connections < self.discovery_only_if_under_num { - let random_peer_id = PeerId::random(); - debug!(target: "sub-libp2p", - "Libp2p <= Starting random Kademlia request for {:?}", - random_peer_id); - for k in self.kademlias.values_mut() { - k.get_closest_peers(random_peer_id.clone()); + if let Some(next_kad_random_query) = self.next_kad_random_query.as_mut() { + while let Poll::Ready(_) = next_kad_random_query.poll_unpin(cx) { + let actually_started = if self.num_connections < self.discovery_only_if_under_num { + let random_peer_id = PeerId::random(); + debug!( + target: "sub-libp2p", + "Libp2p <= Starting random Kademlia request for {:?}", + random_peer_id, + ); + for k in self.kademlias.values_mut() { + k.get_closest_peers(random_peer_id); + } + true + } else { + debug!( + target: "sub-libp2p", + "Kademlia paused due to high number of connections ({})", + self.num_connections + ); + false + }; + + // Schedule the next random query with exponentially increasing delay, + // capped at 60 seconds. + *next_kad_random_query = Delay::new(self.duration_to_next_kad); + self.duration_to_next_kad = + cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); + + if actually_started { + let ev = DiscoveryOut::RandomKademliaStarted( + self.kademlias.keys().cloned().collect(), + ); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } - true - } else { - debug!( - target: "sub-libp2p", - "Kademlia paused due to high number of connections ({})", - self.num_connections - ); - false - }; - - // Schedule the next random query with exponentially increasing delay, - // capped at 60 seconds. - self.next_kad_random_query = Delay::new(self.duration_to_next_kad); - self.duration_to_next_kad = cmp::min(self.duration_to_next_kad * 2, - Duration::from_secs(60)); - - if actually_started { - let ev = DiscoveryOut::RandomKademliaStarted(self.kademlias.keys().cloned().collect()); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); } } @@ -646,86 +723,131 @@ impl NetworkBehaviour for DiscoveryBehaviour { NetworkBehaviourAction::GenerateEvent(ev) => match ev { KademliaEvent::RoutingUpdated { peer, .. } => { let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, KademliaEvent::UnroutablePeer { peer, .. } => { let ev = DiscoveryOut::UnroutablePeer(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, KademliaEvent::RoutablePeer { peer, .. } => { let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, KademliaEvent::PendingRoutablePeer { .. } => { // We are not interested in this event at the moment. - } - KademliaEvent::QueryResult { result: QueryResult::GetClosestPeers(res), .. } => { - match res { - Err(GetClosestPeersError::Timeout { key, peers }) => { - debug!(target: "sub-libp2p", - "Libp2p => Query for {:?} timed out with {} results", - HexDisplay::from(&key), peers.len()); - }, - Ok(ok) => { - trace!(target: "sub-libp2p", - "Libp2p => Query for {:?} yielded {:?} results", - HexDisplay::from(&ok.key), ok.peers.len()); - if ok.peers.is_empty() && self.num_connections != 0 { - debug!(target: "sub-libp2p", "Libp2p => Random Kademlia query has yielded empty \ - results"); - } + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::GetClosestPeers(res), + .. + } => match res { + Err(GetClosestPeersError::Timeout { key, peers }) => { + debug!( + target: "sub-libp2p", + "Libp2p => Query for {:?} timed out with {} results", + HexDisplay::from(&key), peers.len(), + ); + }, + Ok(ok) => { + trace!( + target: "sub-libp2p", + "Libp2p => Query for {:?} yielded {:?} results", + HexDisplay::from(&ok.key), ok.peers.len(), + ); + if ok.peers.is_empty() && self.num_connections != 0 { + debug!( + target: "sub-libp2p", + "Libp2p => Random Kademlia query has yielded empty results", + ); } - } - } - KademliaEvent::QueryResult { result: QueryResult::GetRecord(res), stats, .. } => { + }, + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::GetRecord(res), + stats, + .. + } => { let ev = match res { Ok(ok) => { - let results = ok.records + let results = ok + .records .into_iter() .map(|r| (r.record.key, r.record.value)) .collect(); - DiscoveryOut::ValueFound(results, stats.duration().unwrap_or_else(Default::default)) - } + DiscoveryOut::ValueFound( + results, + stats.duration().unwrap_or_else(Default::default), + ) + }, Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { - trace!(target: "sub-libp2p", - "Libp2p => Failed to get record: {:?}", e); - DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) - } + trace!( + target: "sub-libp2p", + "Libp2p => Failed to get record: {:?}", + e, + ); + DiscoveryOut::ValueNotFound( + e.into_key(), + stats.duration().unwrap_or_else(Default::default), + ) + }, Err(e) => { - warn!(target: "sub-libp2p", - "Libp2p => Failed to get record: {:?}", e); - DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) - } + debug!( + target: "sub-libp2p", + "Libp2p => Failed to get record: {:?}", + e, + ); + DiscoveryOut::ValueNotFound( + e.into_key(), + stats.duration().unwrap_or_else(Default::default), + ) + }, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - KademliaEvent::QueryResult { result: QueryResult::PutRecord(res), stats, .. } => { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::PutRecord(res), + stats, + .. + } => { let ev = match res { - Ok(ok) => DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_else(Default::default)), + Ok(ok) => DiscoveryOut::ValuePut( + ok.key, + stats.duration().unwrap_or_else(Default::default), + ), Err(e) => { - warn!(target: "sub-libp2p", - "Libp2p => Failed to put record: {:?}", e); - DiscoveryOut::ValuePutFailed(e.into_key(), stats.duration().unwrap_or_else(Default::default)) - } + debug!( + target: "sub-libp2p", + "Libp2p => Failed to put record: {:?}", + e, + ); + DiscoveryOut::ValuePutFailed( + e.into_key(), + stats.duration().unwrap_or_else(Default::default), + ) + }, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - KademliaEvent::QueryResult { result: QueryResult::RepublishRecord(res), .. } => { - match res { - Ok(ok) => debug!(target: "sub-libp2p", - "Libp2p => Record republished: {:?}", - ok.key), - Err(e) => warn!(target: "sub-libp2p", - "Libp2p => Republishing of record {:?} failed with: {:?}", - e.key(), e) - } - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::RepublishRecord(res), + .. + } => match res { + Ok(ok) => debug!( + target: "sub-libp2p", + "Libp2p => Record republished: {:?}", + ok.key, + ), + Err(e) => debug!( + target: "sub-libp2p", + "Libp2p => Republishing of record {:?} failed with: {:?}", + e.key(), e, + ), + }, // We never start any other type of query. e => { - warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) - } - } + debug!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) + }, + }, NetworkBehaviourAction::DialAddress { address } => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), NetworkBehaviourAction::DialPeer { peer_id, condition } => @@ -734,41 +856,54 @@ impl NetworkBehaviour for DiscoveryBehaviour { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, - event: (pid.clone(), event) + event: (pid.clone(), event), + }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), + NetworkBehaviourAction::CloseConnection { peer_id, connection } => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { + peer_id, + connection, }), - NetworkBehaviourAction::ReportObservedAddr { address } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), } } } // Poll mDNS. - #[cfg(not(target_os = "unknown"))] while let Poll::Ready(ev) = self.mdns.poll(cx, params) { match ev { - NetworkBehaviourAction::GenerateEvent(event) => { - match event { - MdnsEvent::Discovered(list) => { - if self.num_connections >= self.discovery_only_if_under_num { - continue; - } + NetworkBehaviourAction::GenerateEvent(event) => match event { + MdnsEvent::Discovered(list) => { + if self.num_connections >= self.discovery_only_if_under_num { + continue + } - self.pending_events.extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - }, - MdnsEvent::Expired(_) => {} - } + self.pending_events + .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + } + }, + MdnsEvent::Expired(_) => {}, }, NetworkBehaviourAction::DialAddress { address } => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), NetworkBehaviourAction::DialPeer { peer_id, condition } => return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), - NetworkBehaviourAction::NotifyHandler { event, .. } => - match event {}, // `event` is an enum with no variant - NetworkBehaviourAction::ReportObservedAddr { address } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, /* `event` is an enum with no variant */ + NetworkBehaviourAction::ReportObservedAddr { address, score } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), + NetworkBehaviourAction::CloseConnection { peer_id, connection } => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { + peer_id, + connection, + }), } } @@ -785,19 +920,61 @@ fn protocol_name_from_protocol_id(id: &ProtocolId) -> Vec { v } +/// [`Mdns::new`] returns a future. Instead of forcing [`DiscoveryConfig::finish`] and all its +/// callers to be async, lazily instantiate [`Mdns`]. +enum MdnsWrapper { + Instantiating(futures::future::BoxFuture<'static, std::io::Result>), + Ready(Mdns), + Disabled, +} + +impl MdnsWrapper { + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + match self { + Self::Instantiating(_) => Vec::new(), + Self::Ready(mdns) => mdns.addresses_of_peer(peer_id), + Self::Disabled => Vec::new(), + } + } + + fn poll( + &mut self, + cx: &mut Context<'_>, + params: &mut impl PollParameters, + ) -> Poll> { + loop { + match self { + Self::Instantiating(fut) => + *self = match futures::ready!(fut.as_mut().poll(cx)) { + Ok(mdns) => Self::Ready(mdns), + Err(err) => { + warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); + Self::Disabled + }, + }, + Self::Ready(mdns) => return mdns.poll(cx, params), + Self::Disabled => return Poll::Pending, + } + } + } +} + #[cfg(test)] mod tests { + use super::{protocol_name_from_protocol_id, DiscoveryConfig, DiscoveryOut}; use crate::config::ProtocolId; use futures::prelude::*; - use libp2p::identity::Keypair; - use libp2p::{Multiaddr, PeerId}; - use libp2p::core::upgrade; - use libp2p::core::transport::{Transport, MemoryTransport}; - use libp2p::noise; - use libp2p::swarm::Swarm; - use libp2p::yamux; + use libp2p::{ + core::{ + transport::{MemoryTransport, Transport}, + upgrade, + }, + identity::Keypair, + noise, + swarm::{Swarm, SwarmEvent}, + yamux, Multiaddr, PeerId, + }; use std::{collections::HashSet, task::Poll}; - use super::{DiscoveryConfig, DiscoveryOut, protocol_name_from_protocol_id}; #[test] fn discovery_working() { @@ -805,50 +982,57 @@ mod tests { let protocol_id = ProtocolId::from("dot"); // Build swarms whose behaviour is `DiscoveryBehaviour`, each aware of - // the first swarm via `with_user_defined`. - let mut swarms = (0..25).map(|i| { - let keypair = Keypair::generate_ed25519(); - - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .unwrap(); - - let transport = MemoryTransport - .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(yamux::Config::default()); - - let behaviour = { - let mut config = DiscoveryConfig::new(keypair.public()); - config.with_user_defined(first_swarm_peer_id_and_addr.clone()) - .allow_private_ipv4(true) - .allow_non_globals_in_dht(true) - .discovery_limit(50) - .add_protocol(protocol_id.clone()); - - config.finish() - }; - - let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); - let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - - if i == 0 { - first_swarm_peer_id_and_addr = Some((keypair.public().into_peer_id(), listen_addr.clone())) - } + // the first swarm via `with_permanent_addresses`. + let mut swarms = (0..25) + .map(|i| { + let keypair = Keypair::generate_ed25519(); + + let noise_keys = + noise::Keypair::::new().into_authentic(&keypair).unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(yamux::YamuxConfig::default()) + .boxed(); + + let behaviour = { + let mut config = DiscoveryConfig::new(keypair.public()); + config + .with_permanent_addresses(first_swarm_peer_id_and_addr.clone()) + .allow_private_ipv4(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .add_protocol(protocol_id.clone()); + + config.finish() + }; + + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = + format!("/memory/{}", rand::random::()).parse().unwrap(); + + if i == 0 { + first_swarm_peer_id_and_addr = + Some((keypair.public().into_peer_id(), listen_addr.clone())) + } - Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); - (swarm, listen_addr) - }).collect::>(); + swarm.listen_on(listen_addr.clone()).unwrap(); + (swarm, listen_addr) + }) + .collect::>(); // Build a `Vec>` with the list of nodes remaining to be discovered. - let mut to_discover = (0..swarms.len()).map(|n| { - (0..swarms.len()) - // Skip the first swarm as all other swarms already know it. - .skip(1) - .filter(|p| *p != n) - .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) - .collect::>() - }).collect::>(); + let mut to_discover = (0..swarms.len()) + .map(|n| { + (0..swarms.len()) + // Skip the first swarm as all other swarms already know it. + .skip(1) + .filter(|p| *p != n) + .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) + .collect::>() + }) + .collect::>(); let fut = futures::future::poll_fn(move |cx| { 'polling: loop { @@ -856,29 +1040,46 @@ mod tests { match swarms[swarm_n].0.poll_next_unpin(cx) { Poll::Ready(Some(e)) => { match e { - DiscoveryOut::UnroutablePeer(other) | DiscoveryOut::Discovered(other) => { - // Call `add_self_reported_address` to simulate identify happening. - let addr = swarms.iter().find_map(|(s, a)| - if s.local_peer_id == other { - Some(a.clone()) - } else { - None - }) - .unwrap(); - swarms[swarm_n].0.add_self_reported_address( - &other, - [protocol_name_from_protocol_id(&protocol_id)].iter(), - addr, - ); - - to_discover[swarm_n].remove(&other); + SwarmEvent::Behaviour(behavior) => { + match behavior { + DiscoveryOut::UnroutablePeer(other) | + DiscoveryOut::Discovered(other) => { + // Call `add_self_reported_address` to simulate identify + // happening. + let addr = swarms + .iter() + .find_map(|(s, a)| { + if s.behaviour().local_peer_id == other { + Some(a.clone()) + } else { + None + } + }) + .unwrap(); + swarms[swarm_n] + .0 + .behaviour_mut() + .add_self_reported_address( + &other, + [protocol_name_from_protocol_id(&protocol_id)] + .iter(), + addr, + ); + + to_discover[swarm_n].remove(&other); + }, + DiscoveryOut::RandomKademliaStarted(_) => {}, + e => { + panic!("Unexpected event: {:?}", e) + }, + } }, - DiscoveryOut::RandomKademliaStarted(_) => {}, - e => {panic!("Unexpected event: {:?}", e)}, + // ignore non Behaviour events + _ => {}, } continue 'polling - } - _ => {} + }, + _ => {}, } } break @@ -902,7 +1103,8 @@ mod tests { let mut discovery = { let keypair = Keypair::generate_ed25519(); let mut config = DiscoveryConfig::new(keypair.public()); - config.allow_private_ipv4(true) + config + .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) .add_protocol(supported_protocol_id.clone()); @@ -921,7 +1123,8 @@ mod tests { for kademlia in discovery.kademlias.values_mut() { assert!( - kademlia.kbucket(remote_peer_id.clone()) + kademlia + .kbucket(remote_peer_id) .expect("Remote peer id not to be equal to local peer id.") .is_empty(), "Expect peer with unsupported protocol not to be added." @@ -938,7 +1141,8 @@ mod tests { for kademlia in discovery.kademlias.values_mut() { assert_eq!( 1, - kademlia.kbucket(remote_peer_id.clone()) + kademlia + .kbucket(remote_peer_id) .expect("Remote peer id not to be equal to local peer id.") .num_entries(), "Expect peer with supported protocol to be added." @@ -954,7 +1158,8 @@ mod tests { let mut discovery = { let keypair = Keypair::generate_ed25519(); let mut config = DiscoveryConfig::new(keypair.public()); - config.allow_private_ipv4(true) + config + .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) .add_protocol(protocol_a.clone()) @@ -974,19 +1179,22 @@ mod tests { assert_eq!( 1, - discovery.kademlias.get_mut(&protocol_a) + discovery + .kademlias + .get_mut(&protocol_a) .expect("Kademlia instance to exist.") - .kbucket(remote_peer_id.clone()) + .kbucket(remote_peer_id) .expect("Remote peer id not to be equal to local peer id.") .num_entries(), "Expected remote peer to be added to `protocol_a` Kademlia instance.", - ); assert!( - discovery.kademlias.get_mut(&protocol_b) + discovery + .kademlias + .get_mut(&protocol_b) .expect("Kademlia instance to exist.") - .kbucket(remote_peer_id.clone()) + .kbucket(remote_peer_id) .expect("Remote peer id not to be equal to local peer id.") .is_empty(), "Expected remote peer not to be added to `protocol_b` Kademlia instance.", diff --git a/client/network/src/error.rs b/client/network/src/error.rs index 7d7603ce92aab..b8a31def7dc61 100644 --- a/client/network/src/error.rs +++ b/client/network/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,7 +19,7 @@ //! Substrate network possible errors. use crate::config::TransportConfig; -use libp2p::{PeerId, Multiaddr}; +use libp2p::{Multiaddr, PeerId}; use std::{borrow::Cow, fmt}; @@ -38,7 +38,7 @@ pub enum Error { fmt = "The same bootnode (`{}`) is registered with two different peer ids: `{}` and `{}`", address, first_id, - second_id, + second_id )] DuplicateBootnode { /// The address of the bootnode. @@ -53,7 +53,7 @@ pub enum Error { /// The network addresses are invalid because they don't match the transport. #[display( fmt = "The following addresses are invalid because they don't match the transport: {:?}", - addresses, + addresses )] AddressesForAnotherTransport { /// Transport used. @@ -79,12 +79,12 @@ impl fmt::Debug for Error { impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { - Error::Io(ref err) => Some(err), - Error::Client(ref err) => Some(err), - Error::DuplicateBootnode { .. } => None, - Error::Prometheus(ref err) => Some(err), - Error::AddressesForAnotherTransport { .. } => None, - Error::DuplicateRequestResponseProtocol { .. } => None, + Self::Io(ref err) => Some(err), + Self::Client(ref err) => Some(err), + Self::Prometheus(ref err) => Some(err), + Self::DuplicateBootnode { .. } | + Self::AddressesForAnotherTransport { .. } | + Self::DuplicateRequestResponseProtocol { .. } => None, } } } diff --git a/client/network/src/finality_requests.rs b/client/network/src/finality_requests.rs deleted file mode 100644 index 55f56b9a0cc25..0000000000000 --- a/client/network/src/finality_requests.rs +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. -// -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! `NetworkBehaviour` implementation which handles incoming finality proof requests. -//! -//! Every request is coming in on a separate connection substream which gets -//! closed after we have sent the response back. Incoming requests are encoded -//! as protocol buffers (cf. `finality.v1.proto`). - -#![allow(unused)] - -use bytes::Bytes; -use codec::{Encode, Decode}; -use crate::{ - chain::FinalityProofProvider, - config::ProtocolId, - protocol::message, - schema, -}; -use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use libp2p::{ - core::{ - ConnectedPoint, - Multiaddr, - PeerId, - connection::ConnectionId, - upgrade::{InboundUpgrade, OutboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, - upgrade::{DeniedUpgrade, read_one, write_one} - }, - swarm::{ - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - OneShotHandler, - OneShotHandlerConfig, - PollParameters, - SubstreamProtocol - } -}; -use prost::Message; -use sp_runtime::{generic::BlockId, traits::{Block, Header, One, Zero}}; -use std::{ - cmp::min, - collections::VecDeque, - io, - iter, - marker::PhantomData, - sync::Arc, - time::Duration, - task::{Context, Poll} -}; -use void::{Void, unreachable}; - -// Type alias for convenience. -pub type Error = Box; - -/// Event generated by the finality proof requests behaviour. -#[derive(Debug)] -pub enum Event { - /// A response to a finality proof request has arrived. - Response { - peer: PeerId, - /// Block hash originally passed to `send_request`. - block_hash: B::Hash, - /// Finality proof returned by the remote. - proof: Vec, - }, -} - -/// Configuration options for `FinalityProofRequests`. -#[derive(Debug, Clone)] -pub struct Config { - max_request_len: usize, - max_response_len: usize, - inactivity_timeout: Duration, - protocol: Bytes, -} - -impl Config { - /// Create a fresh configuration with the following options: - /// - /// - max. request size = 1 MiB - /// - max. response size = 1 MiB - /// - inactivity timeout = 15s - pub fn new(id: &ProtocolId) -> Self { - let mut c = Config { - max_request_len: 1024 * 1024, - max_response_len: 1024 * 1024, - inactivity_timeout: Duration::from_secs(15), - protocol: Bytes::new(), - }; - c.set_protocol(id); - c - } - - /// Limit the max. length of incoming finality proof request bytes. - pub fn set_max_request_len(&mut self, v: usize) -> &mut Self { - self.max_request_len = v; - self - } - - /// Limit the max. length of incoming finality proof response bytes. - pub fn set_max_response_len(&mut self, v: usize) -> &mut Self { - self.max_response_len = v; - self - } - - /// Limit the max. duration the substream may remain inactive before closing it. - pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { - self.inactivity_timeout = v; - self - } - - /// Set protocol to use for upgrade negotiation. - pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut v = Vec::new(); - v.extend_from_slice(b"/"); - v.extend_from_slice(id.as_ref().as_bytes()); - v.extend_from_slice(b"/finality-proof/1"); - self.protocol = v.into(); - self - } -} - -/// The finality proof request handling behaviour. -pub struct FinalityProofRequests { - /// This behaviour's configuration. - config: Config, - /// How to construct finality proofs. - finality_proof_provider: Option>>, - /// Futures sending back the finality proof request responses. - outgoing: FuturesUnordered>, - /// Events to return as soon as possible from `poll`. - pending_events: VecDeque, Event>>, -} - -impl FinalityProofRequests -where - B: Block, -{ - /// Initializes the behaviour. - /// - /// If the proof provider is `None`, then the behaviour will not support the finality proof - /// requests protocol. - pub fn new(cfg: Config, finality_proof_provider: Option>>) -> Self { - FinalityProofRequests { - config: cfg, - finality_proof_provider, - outgoing: FuturesUnordered::new(), - pending_events: VecDeque::new(), - } - } - - /// Issue a new finality proof request. - /// - /// If the response doesn't arrive in time, or if the remote answers improperly, the target - /// will be disconnected. - pub fn send_request(&mut self, target: &PeerId, block_hash: B::Hash, request: Vec) { - let protobuf_rq = schema::v1::finality::FinalityProofRequest { - block_hash: block_hash.encode(), - request, - }; - - let mut buf = Vec::with_capacity(protobuf_rq.encoded_len()); - if let Err(err) = protobuf_rq.encode(&mut buf) { - log::warn!("failed to encode finality proof request {:?}: {:?}", protobuf_rq, err); - return; - } - - log::trace!("enqueueing finality proof request to {:?}: {:?}", target, protobuf_rq); - self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: target.clone(), - handler: NotifyHandler::Any, - event: OutboundProtocol { - request: buf, - block_hash, - max_response_size: self.config.max_response_len, - protocol: self.config.protocol.clone(), - }, - }); - } - - /// Callback, invoked when a new finality request has been received from remote. - fn on_finality_request(&mut self, peer: &PeerId, request: &schema::v1::finality::FinalityProofRequest) - -> Result - { - let block_hash = Decode::decode(&mut request.block_hash.as_ref())?; - - log::trace!(target: "sync", "Finality proof request from {} for {}", peer, block_hash); - - // Note that an empty Vec is sent if no proof is available. - let finality_proof = if let Some(provider) = &self.finality_proof_provider { - provider - .prove_finality(block_hash, &request.request)? - .unwrap_or_default() - } else { - log::error!("Answering a finality proof request while finality provider is empty"); - return Err(From::from("Empty finality proof provider".to_string())) - }; - - Ok(schema::v1::finality::FinalityProofResponse { proof: finality_proof }) - } -} - -impl NetworkBehaviour for FinalityProofRequests -where - B: Block -{ - type ProtocolsHandler = OneShotHandler, OutboundProtocol, NodeEvent>; - type OutEvent = Event; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let p = InboundProtocol { - max_request_len: self.config.max_request_len, - protocol: if self.finality_proof_provider.is_some() { - Some(self.config.protocol.clone()) - } else { - None - }, - marker: PhantomData, - }; - let mut cfg = OneShotHandlerConfig::default(); - cfg.keep_alive_timeout = self.config.inactivity_timeout; - OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - Vec::new() - } - - fn inject_connected(&mut self, _peer: &PeerId) { - } - - fn inject_disconnected(&mut self, _peer: &PeerId) { - } - - fn inject_event( - &mut self, - peer: PeerId, - connection: ConnectionId, - event: NodeEvent - ) { - match event { - NodeEvent::Request(request, mut stream) => { - match self.on_finality_request(&peer, &request) { - Ok(res) => { - log::trace!("enqueueing finality response for peer {}", peer); - let mut data = Vec::with_capacity(res.encoded_len()); - if let Err(e) = res.encode(&mut data) { - log::debug!("error encoding finality response for peer {}: {}", peer, e) - } else { - let future = async move { - if let Err(e) = write_one(&mut stream, data).await { - log::debug!("error writing finality response: {}", e) - } - }; - self.outgoing.push(future.boxed()) - } - } - Err(e) => log::debug!("error handling finality request from peer {}: {}", peer, e) - } - } - NodeEvent::Response(response, block_hash) => { - let ev = Event::Response { - peer, - block_hash, - proof: response.proof, - }; - self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); - } - } - } - - fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) - -> Poll, Event>> - { - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(ev); - } - - while let Poll::Ready(Some(_)) = self.outgoing.poll_next_unpin(cx) {} - Poll::Pending - } -} - -/// Output type of inbound and outbound substream upgrades. -#[derive(Debug)] -pub enum NodeEvent { - /// Incoming request from remote and substream to use for the response. - Request(schema::v1::finality::FinalityProofRequest, T), - /// Incoming response from remote. - Response(schema::v1::finality::FinalityProofResponse, B::Hash), -} - -/// Substream upgrade protocol. -/// -/// We attempt to parse an incoming protobuf encoded request (cf. `Request`) -/// which will be handled by the `FinalityProofRequests` behaviour, i.e. the request -/// will become visible via `inject_node_event` which then dispatches to the -/// relevant callback to process the message and prepare a response. -#[derive(Debug, Clone)] -pub struct InboundProtocol { - /// The max. request length in bytes. - max_request_len: usize, - /// The protocol to use during upgrade negotiation. If `None`, then the incoming protocol - /// is simply disabled. - protocol: Option, - /// Marker to pin the block type. - marker: PhantomData, -} - -impl UpgradeInfo for InboundProtocol { - type Info = Bytes; - // This iterator will return either 0 elements if `self.protocol` is `None`, or 1 element if - // it is `Some`. - type InfoIter = std::option::IntoIter; - - fn protocol_info(&self) -> Self::InfoIter { - self.protocol.clone().into_iter() - } -} - -impl InboundUpgrade for InboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { - async move { - let len = self.max_request_len; - let vec = read_one(&mut s, len).await?; - match schema::v1::finality::FinalityProofRequest::decode(&vec[..]) { - Ok(r) => Ok(NodeEvent::Request(r, s)), - Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) - } - }.boxed() - } -} - -/// Substream upgrade protocol. -/// -/// Sends a request to remote and awaits the response. -#[derive(Debug, Clone)] -pub struct OutboundProtocol { - /// The serialized protobuf request. - request: Vec, - /// Block hash that has been requested. - block_hash: B::Hash, - /// The max. response length in bytes. - max_response_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, -} - -impl UpgradeInfo for OutboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl OutboundUpgrade for OutboundProtocol -where - B: Block, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = NodeEvent; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { - async move { - write_one(&mut s, &self.request).await?; - let vec = read_one(&mut s, self.max_response_size).await?; - - schema::v1::finality::FinalityProofResponse::decode(&vec[..]) - .map(|r| NodeEvent::Response(r, self.block_hash)) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - }.boxed() - } -} diff --git a/client/network/src/gossip.rs b/client/network/src/gossip.rs index 9d20229288a42..0bc46b2164bcb 100644 --- a/client/network/src/gossip.rs +++ b/client/network/src/gossip.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -41,7 +41,7 @@ //! In normal situations, messages sent through a [`QueuedSender`] will arrive in the same //! order as they have been sent. //! It is possible, in the situation of disconnects and reconnects, that messages arrive in a -//! different order. See also https://github.com/paritytech/substrate/issues/6756. +//! different order. See also . //! However, if multiple instances of [`QueuedSender`] exist for the same peer and protocol, or //! if some other code uses the [`NetworkService`] to send notifications to this combination or //! peer and protocol, then the notifications will be interleaved in an unpredictable way. @@ -53,8 +53,9 @@ use async_std::sync::{Mutex, MutexGuard}; use futures::prelude::*; use futures::channel::mpsc::{channel, Receiver, Sender}; use libp2p::PeerId; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use sp_runtime::traits::Block as BlockT; use std::{ + borrow::Cow, collections::VecDeque, fmt, sync::Arc, @@ -82,7 +83,7 @@ impl QueuedSender { pub fn new( service: Arc>, peer_id: PeerId, - protocol: ConsensusEngineId, + protocol: Cow<'static, str>, queue_size_limit: usize, messages_encode: F ) -> (Self, impl Future + Send + 'static) @@ -107,7 +108,7 @@ impl QueuedSender { messages_encode ); - let sender = QueuedSender { + let sender = Self { shared_message_queue, notify_background_future, queue_size_limit, @@ -193,7 +194,7 @@ async fn create_background_future Vec> mut wait_for_sender: Receiver<()>, service: Arc>, peer_id: PeerId, - protocol: ConsensusEngineId, + protocol: Cow<'static, str>, shared_message_queue: SharedMessageQueue, messages_encode: F, ) { @@ -212,7 +213,7 @@ async fn create_background_future Vec> // Starting from below, we try to send the message. If an error happens when sending, // the only sane option we have is to silently discard the message. - let sender = match service.notification_sender(peer_id.clone(), protocol) { + let sender = match service.notification_sender(peer_id.clone(), protocol.clone()) { Ok(s) => s, Err(_) => continue, }; diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index 0f01ed81bffcb..88c4160bc5066 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,11 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, gossip::QueuedSender, Event, NetworkService, NetworkWorker}; +use crate::block_request_handler::BlockRequestHandler; +use crate::state_request_handler::StateRequestHandler; +use crate::light_client_requests::handler::LightClientRequestHandler; +use crate::gossip::QueuedSender; +use crate::{config, Event, NetworkService, NetworkWorker}; use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{sync::Arc, time::Duration}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< @@ -33,7 +37,7 @@ type TestNetworkService = NetworkService< /// /// > **Note**: We return the events stream in order to not possibly lose events between the /// > construction of the service and the moment the events stream is grabbed. -fn build_test_full_node(config: config::NetworkConfiguration) +fn build_test_full_node(network_config: config::NetworkConfiguration) -> (Arc, impl Stream) { let client = Arc::new( @@ -44,21 +48,20 @@ fn build_test_full_node(config: config::NetworkConfiguration) #[derive(Clone)] struct PassThroughVerifier(bool); - impl sp_consensus::import_queue::Verifier for PassThroughVerifier { - fn verify( + + #[async_trait::async_trait] + impl sc_consensus::Verifier for PassThroughVerifier { + async fn verify( &mut self, - origin: sp_consensus::BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, + mut block: sp_consensus::BlockImportParams, ) -> Result< ( - sp_consensus::BlockImportParams, + sc_consensus::BlockImportParams, Option)>>, ), String, > { - let maybe_keys = header + let maybe_keys = block.header .digest() .log(|l| { l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) @@ -73,39 +76,69 @@ fn build_test_full_node(config: config::NetworkConfiguration) )] }); - let mut import = sp_consensus::BlockImportParams::new(origin, header); - import.body = body; - import.finalized = self.0; - import.justification = justification; - import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); - Ok((import, maybe_keys)) + block.finalized = self.0; + block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); + Ok((block, maybe_keys)) } } - let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + let import_queue = Box::new(sc_consensus::BasicQueue::new( PassThroughVerifier(false), Box::new(client.clone()), None, - None, &sp_core::testing::TaskExecutor::new(), None, )); + let protocol_id = config::ProtocolId::from("/test-protocol-name"); + + let block_request_protocol_config = { + let (handler, protocol_config) = BlockRequestHandler::new( + &protocol_id, + client.clone(), + 50, + ); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let state_request_protocol_config = { + let (handler, protocol_config) = StateRequestHandler::new( + &protocol_id, + client.clone(), + 50, + ); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = LightClientRequestHandler::new( + &protocol_id, + client.clone(), + ); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, - network_config: config, + transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), + network_config, chain: client.clone(), - finality_proof_provider: None, - finality_proof_request_builder: None, on_demand: None, transaction_pool: Arc::new(crate::config::EmptyTransactionPool), - protocol_id: config::ProtocolId::from("/test-protocol-name"), + protocol_id, import_queue, block_announce_validator: Box::new( sp_consensus::block_validation::DefaultBlockAnnounceValidator, ), metrics_registry: None, + block_request_protocol_config, + state_request_protocol_config, + light_client_request_protocol_config, + warp_sync: None, }) .unwrap(); @@ -120,29 +153,45 @@ fn build_test_full_node(config: config::NetworkConfiguration) (service, event_stream) } -const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; +const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); /// Builds two nodes and their associated events stream. -/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +/// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. fn build_nodes_one_proto() -> (Arc, impl Stream, Arc, impl Stream) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + extra_sets: vec![ + config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: Default::default() + } + ], listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![], - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), - }], + extra_sets: vec![ + config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + .. Default::default() + }, + } + ], transport: config::TransportConfig::MemoryOnly, .. config::NetworkConfiguration::new_local() }); @@ -165,7 +214,7 @@ fn basic_works() { Event::NotificationStreamClosed { .. } => panic!(), Event::NotificationsReceived { messages, .. } => { for message in messages { - assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.0, PROTOCOL_NAME); assert_eq!(message.1, &b"message"[..]); received_notifications += 1; } @@ -181,7 +230,7 @@ fn basic_works() { async_std::task::block_on(async move { let (mut sender, bg_future) = - QueuedSender::new(node1, node2_id, ENGINE_ID, NUM_NOTIFS, |msg| msg); + QueuedSender::new(node1, node2_id, PROTOCOL_NAME, NUM_NOTIFS, |msg| msg); async_std::task::spawn(bg_future); // Wait for the `NotificationStreamOpened`. diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 3fd01c33dcf5f..51bc370265ef0 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -121,10 +121,10 @@ //! //! - **`/substrate//`** (where `` must be replaced with the //! protocol ID of the targeted chain, and `` is a number between 2 and 6). For each -//! connection we optionally keep an additional substream for all Substrate-based communications alive. -//! This protocol is considered legacy, and is progressively being replaced with alternatives. -//! This is designated as "The legacy Substrate substream" in this documentation. See below for -//! more details. +//! connection we optionally keep an additional substream for all Substrate-based communications +//! alive. This protocol is considered legacy, and is progressively being replaced with +//! alternatives. This is designated as "The legacy Substrate substream" in this documentation. See +//! below for more details. //! - **`//sync/2`** is a request-response protocol (see below) that lets one perform //! requests for information about blocks. Each request is the encoding of a `BlockRequest` and //! each response is the encoding of a `BlockResponse`, as defined in the `api.v1.proto` file in @@ -141,8 +141,9 @@ //! block announces are pushed to other nodes. The handshake is empty on both sides. The message //! format is a SCALE-encoded tuple containing a block header followed with an opaque list of //! bytes containing some data associated with this block announcement, e.g. a candidate message. -//! - Notifications protocols that are registered using the `register_notifications_protocol` -//! method. For example: `/paritytech/grandpa/1`. See below for more information. +//! - Notifications protocols that are registered using +//! `NetworkConfiguration::notifications_protocols`. For example: `/paritytech/grandpa/1`. See +//! below for more information. //! //! ## The legacy Substrate substream //! @@ -242,16 +243,12 @@ //! - Calling `trigger_repropagate` when a transaction is added to the pool. //! //! More precise usage details are still being worked on and will likely change in the future. -//! mod behaviour; -mod block_requests; mod chain; -mod peer_info; mod discovery; -mod finality_requests; -mod light_client_handler; mod on_demand_layer; +mod peer_info; mod protocol; mod request_responses; mod schema; @@ -259,17 +256,26 @@ mod service; mod transport; mod utils; +pub mod bitswap; +pub mod block_request_handler; pub mod config; pub mod error; -pub mod gossip; +pub mod light_client_requests; pub mod network_state; +pub mod state_request_handler; +pub mod transactions; +pub mod warp_request_handler; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; -pub use protocol::{event::{DhtEvent, Event, ObservedRole}, sync::SyncState, PeerInfo}; +pub use protocol::{ + event::{DhtEvent, Event, ObservedRole}, + sync::{StateDownloadProgress, SyncState, WarpSyncPhase, WarpSyncProgress}, + PeerInfo, +}; pub use service::{ - NetworkService, NetworkWorker, RequestFailure, OutboundFailure, NotificationSender, - NotificationSenderReady, + IfDisconnected, NetworkService, NetworkWorker, NotificationSender, NotificationSenderReady, + OutboundFailure, RequestFailure, }; pub use sc_peerset::ReputationChange; @@ -284,6 +290,9 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; /// two peers, the per-peer connection limit is not set to 1 but 2. const MAX_CONNECTIONS_PER_PEER: usize = 2; +/// The maximum number of concurrent established connections that were incoming. +const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; + /// Minimum Requirements for a Hash within Networking pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} @@ -316,4 +325,8 @@ pub struct NetworkStatus { pub total_bytes_inbound: u64, /// The total number of bytes sent. pub total_bytes_outbound: u64, + /// State sync in progress. + pub state_sync: Option, + /// Warp sync in progress. + pub warp_sync: Option, } diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs deleted file mode 100644 index c1ff14fc82a22..0000000000000 --- a/client/network/src/light_client_handler.rs +++ /dev/null @@ -1,2058 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. -// -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! [`NetworkBehaviour`] implementation which handles light client requests. -//! -//! Every request is coming in on a separate connection substream which gets -//! closed after we have sent the response back. Requests and responses are -//! encoded as protocol buffers (cf. `api.v1.proto`). -//! -//! For every outgoing request we likewise open a separate substream. - -#![allow(unused)] - -use bytes::Bytes; -use codec::{self, Encode, Decode}; -use crate::{ - block_requests::build_protobuf_block_request, - chain::Client, - config::ProtocolId, - protocol::message::{BlockAttributes, Direction, FromBlock}, - schema, -}; -use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use libp2p::{ - core::{ - ConnectedPoint, - Multiaddr, - PeerId, - connection::ConnectionId, - upgrade::{InboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, - upgrade::{OutboundUpgrade, read_one, write_one} - }, - swarm::{ - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - OneShotHandler, - OneShotHandlerConfig, - PollParameters, - SubstreamProtocol, - } -}; -use nohash_hasher::IntMap; -use prost::Message; -use sc_client_api::{ - StorageProof, - light::{ - self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, - RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, - } -}; -use sc_peerset::ReputationChange; -use sp_core::{ - storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, - hexdisplay::HexDisplay, -}; -use smallvec::SmallVec; -use sp_blockchain::{Error as ClientError}; -use sp_runtime::{ - traits::{Block, Header, NumberFor, Zero}, - generic::BlockId, -}; -use std::{ - collections::{BTreeMap, VecDeque, HashMap}, - iter, - io, - sync::Arc, - time::Duration, - task::{Context, Poll} -}; -use void::Void; -use wasm_timer::Instant; - -/// Reputation change for a peer when a request timed out. -pub(crate) const TIMEOUT_REPUTATION_CHANGE: i32 = -(1 << 8); - -/// Configuration options for `LightClientHandler` behaviour. -#[derive(Debug, Clone)] -pub struct Config { - max_request_size: usize, - max_response_size: usize, - max_pending_requests: usize, - inactivity_timeout: Duration, - request_timeout: Duration, - light_protocol: Bytes, - block_protocol: Bytes, -} - -impl Config { - /// Create a fresh configuration with the following options: - /// - /// - max. request size = 1 MiB - /// - max. response size = 16 MiB - /// - max. pending requests = 128 - /// - inactivity timeout = 15s - /// - request timeout = 15s - pub fn new(id: &ProtocolId) -> Self { - let mut c = Config { - max_request_size: 1 * 1024 * 1024, - max_response_size: 16 * 1024 * 1024, - max_pending_requests: 128, - inactivity_timeout: Duration::from_secs(15), - request_timeout: Duration::from_secs(15), - light_protocol: Bytes::new(), - block_protocol: Bytes::new(), - }; - c.set_protocol(id); - c - } - - /// Limit the max. length in bytes of a request. - pub fn set_max_request_size(&mut self, v: usize) -> &mut Self { - self.max_request_size = v; - self - } - - /// Limit the max. length in bytes of a response. - pub fn set_max_response_size(&mut self, v: usize) -> &mut Self { - self.max_response_size = v; - self - } - - /// Limit the max. number of pending requests. - pub fn set_max_pending_requests(&mut self, v: usize) -> &mut Self { - self.max_pending_requests = v; - self - } - - /// Limit the max. duration the connection may remain inactive before closing it. - pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { - self.inactivity_timeout = v; - self - } - - /// Limit the max. request duration. - pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { - self.request_timeout = v; - self - } - - /// Set protocol to use for upgrade negotiation. - pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut vl = Vec::new(); - vl.extend_from_slice(b"/"); - vl.extend_from_slice(id.as_ref().as_bytes()); - vl.extend_from_slice(b"/light/2"); - self.light_protocol = vl.into(); - - let mut vb = Vec::new(); - vb.extend_from_slice(b"/"); - vb.extend_from_slice(id.as_ref().as_bytes()); - vb.extend_from_slice(b"/sync/2"); - self.block_protocol = vb.into(); - - self - } -} - -/// Possible errors while handling light clients. -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// There are currently too many pending request. - #[error("too many pending requests")] - TooManyRequests, - /// The response type does not correspond to the issued request. - #[error("unexpected response")] - UnexpectedResponse, - /// A bad request has been received. - #[error("bad request: {0}")] - BadRequest(&'static str), - /// The chain client errored. - #[error("client error: {0}")] - Client(#[from] ClientError), - /// Encoding or decoding of some data failed. - #[error("codec error: {0}")] - Codec(#[from] codec::Error), -} - -/// The possible light client requests we support. -/// -/// The associated `oneshot::Sender` will be used to convey the result of -/// their request back to them (cf. `Reply`). -// -// This is modeled after light_dispatch.rs's `RequestData` which is not -// used because we currently only support a subset of those. -#[derive(Debug)] -pub enum Request { - Body { - request: RemoteBodyRequest, - sender: oneshot::Sender, ClientError>> - }, - Header { - request: light::RemoteHeaderRequest, - sender: oneshot::Sender> - }, - Read { - request: light::RemoteReadRequest, - sender: oneshot::Sender, Option>>, ClientError>> - }, - ReadChild { - request: light::RemoteReadChildRequest, - sender: oneshot::Sender, Option>>, ClientError>> - }, - Call { - request: light::RemoteCallRequest, - sender: oneshot::Sender, ClientError>> - }, - Changes { - request: light::RemoteChangesRequest, - sender: oneshot::Sender, u32)>, ClientError>> - } -} - -/// The data to send back to the light client over the oneshot channel. -// -// It is unified here in order to be able to return it as a function -// result instead of delivering it to the client as a side effect of -// response processing. -#[derive(Debug)] -enum Reply { - VecU8(Vec), - VecNumberU32(Vec<(::Number, u32)>), - MapVecU8OptVecU8(HashMap, Option>>), - Header(B::Header), - Extrinsics(Vec), -} - -/// Augments a light client request with metadata. -#[derive(Debug)] -struct RequestWrapper { - /// Time when this value was created. - timestamp: Instant, - /// Remaining retries. - retries: usize, - /// The actual request. - request: Request, - /// The peer to send the request to, e.g. `PeerId`. - peer: P, - /// The connection to use for sending the request. - connection: Option, -} - -/// Information we have about some peer. -#[derive(Debug)] -struct PeerInfo { - connections: SmallVec<[(ConnectionId, Multiaddr); crate::MAX_CONNECTIONS_PER_PEER]>, - best_block: Option>, - status: PeerStatus, -} - -impl Default for PeerInfo { - fn default() -> Self { - PeerInfo { - connections: SmallVec::new(), - best_block: None, - status: PeerStatus::Idle, - } - } -} - -type RequestId = u64; - -/// A peer is either idle or busy processing a request from us. -#[derive(Debug, Clone, PartialEq, Eq)] -enum PeerStatus { - /// The peer is available. - Idle, - /// We wait for the peer to return us a response for the given request ID. - BusyWith(RequestId), -} - -/// The light client handler behaviour. -pub struct LightClientHandler { - /// This behaviour's configuration. - config: Config, - /// Blockchain client. - chain: Arc>, - /// Verifies that received responses are correct. - checker: Arc>, - /// Peer information (addresses, their best block, etc.) - peers: HashMap>, - /// Futures sending back response to remote clients. - responses: FuturesUnordered>, - /// Pending (local) requests. - pending_requests: VecDeque>, - /// Requests on their way to remote peers. - outstanding: IntMap>, - /// (Local) Request ID counter - next_request_id: RequestId, - /// Handle to use for reporting misbehaviour of peers. - peerset: sc_peerset::PeersetHandle, -} - -impl LightClientHandler -where - B: Block, -{ - /// Construct a new light client handler. - pub fn new( - cfg: Config, - chain: Arc>, - checker: Arc>, - peerset: sc_peerset::PeersetHandle, - ) -> Self { - LightClientHandler { - config: cfg, - chain, - checker, - peers: HashMap::new(), - responses: FuturesUnordered::new(), - pending_requests: VecDeque::new(), - outstanding: IntMap::default(), - next_request_id: 1, - peerset, - } - } - - /// We rely on external information about peers best blocks as we lack the - /// means to determine it ourselves. - pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { - if let Some(info) = self.peers.get_mut(peer) { - log::trace!("new best block for {:?}: {:?}", peer, num); - info.best_block = Some(num) - } - } - - /// Issue a new light client request. - pub fn request(&mut self, req: Request) -> Result<(), Error> { - if self.pending_requests.len() >= self.config.max_pending_requests { - return Err(Error::TooManyRequests) - } - let rw = RequestWrapper { - timestamp: Instant::now(), - retries: retries(&req), - request: req, - peer: (), // we do not know the peer yet - connection: None, - }; - self.pending_requests.push_back(rw); - Ok(()) - } - - fn next_request_id(&mut self) -> RequestId { - let id = self.next_request_id; - self.next_request_id += 1; - id - } - - /// Remove the given peer. - /// - /// If we have a request to this peer in flight, we move it back to - /// the pending requests queue. - fn remove_peer(&mut self, peer: &PeerId) { - if let Some(id) = self.outstanding.iter().find(|(_, rw)| &rw.peer == peer).map(|(k, _)| *k) { - let rw = self.outstanding.remove(&id).expect("key belongs to entry in this map"); - let rw = RequestWrapper { - timestamp: rw.timestamp, - retries: rw.retries, - request: rw.request, - peer: (), // need to find another peer - connection: None, - }; - self.pending_requests.push_back(rw); - } - self.peers.remove(peer); - } - - /// Prepares a request by selecting a suitable peer and connection to send it to. - /// - /// If there is currently no suitable peer for the request, the given request - /// is returned as `Err`. - fn prepare_request(&self, req: RequestWrapper) - -> Result<(PeerId, RequestWrapper), RequestWrapper> - { - let number = required_block(&req.request); - - let mut peer = None; - for (peer_id, peer_info) in self.peers.iter() { - if peer_info.status == PeerStatus::Idle { - match peer_info.best_block { - Some(n) => if n >= number { - peer = Some((peer_id, peer_info)); - break - }, - None => peer = Some((peer_id, peer_info)) - } - } - } - - if let Some((peer_id, peer_info)) = peer { - let connection = peer_info.connections.iter().next().map(|(id, _)| *id); - let rw = RequestWrapper { - timestamp: req.timestamp, - retries: req.retries, - request: req.request, - peer: peer_id.clone(), - connection, - }; - Ok((peer_id.clone(), rw)) - } else { - Err(req) - } - } - - /// Process a local request's response from remote. - /// - /// If successful, this will give us the actual, checked data we should be - /// sending back to the client, otherwise an error. - fn on_response - ( &mut self - , peer: &PeerId - , request: &Request - , response: Response - ) -> Result, Error> - { - log::trace!("response from {}", peer); - match response { - Response::Light(r) => self.on_response_light(peer, request, r), - Response::Block(r) => self.on_response_block(peer, request, r), - } - } - - fn on_response_light - ( &mut self - , peer: &PeerId - , request: &Request - , response: schema::v1::light::Response - ) -> Result, Error> - { - use schema::v1::light::response::Response; - match response.response { - Some(Response::RemoteCallResponse(response)) => - if let Request::Call { request , .. } = request { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_execution_proof(request, proof)?; - Ok(Reply::VecU8(reply)) - } else { - Err(Error::UnexpectedResponse) - } - Some(Response::RemoteReadResponse(response)) => - match request { - Request::Read { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - } - Request::ReadChild { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_child_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - } - _ => Err(Error::UnexpectedResponse) - } - Some(Response::RemoteChangesResponse(response)) => - if let Request::Changes { request, .. } = request { - let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; - let roots = { - let mut r = BTreeMap::new(); - for pair in response.roots { - let k = Decode::decode(&mut pair.fst.as_ref())?; - let v = Decode::decode(&mut pair.snd.as_ref())?; - r.insert(k, v); - } - r - }; - let reply = self.checker.check_changes_proof(&request, light::ChangesProof { - max_block, - proof: response.proof, - roots, - roots_proof, - })?; - Ok(Reply::VecNumberU32(reply)) - } else { - Err(Error::UnexpectedResponse) - } - Some(Response::RemoteHeaderResponse(response)) => - if let Request::Header { request, .. } = request { - let header = - if response.header.is_empty() { - None - } else { - Some(Decode::decode(&mut response.header.as_ref())?) - }; - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_header_proof(&request, header, proof)?; - Ok(Reply::Header(reply)) - } else { - Err(Error::UnexpectedResponse) - } - None => Err(Error::UnexpectedResponse) - } - } - - fn on_response_block - ( &mut self - , peer: &PeerId - , request: &Request - , response: schema::v1::BlockResponse - ) -> Result, Error> - { - let request = if let Request::Body { request , .. } = &request { - request - } else { - return Err(Error::UnexpectedResponse); - }; - - let body: Vec<_> = match response.blocks.into_iter().next() { - Some(b) => b.body, - None => return Err(Error::UnexpectedResponse), - }; - - let body = body.into_iter() - .map(|mut extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) - .collect::>()?; - - let body = self.checker.check_body_proof(&request, body)?; - Ok(Reply::Extrinsics(body)) - } - - fn on_remote_call_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteCallRequest - ) -> Result - { - log::trace!("remote call request from {} ({} at {:?})", - peer, - request.method, - request.block, - ); - - let block = Decode::decode(&mut request.block.as_ref())?; - - let proof = match self.chain.execution_proof(&BlockId::Hash(block), &request.method, &request.data) { - Ok((_, proof)) => proof, - Err(e) => { - log::trace!("remote call request from {} ({} at {:?}) failed with: {}", - peer, - request.method, - request.block, - e, - ); - StorageProof::empty() - } - }; - - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteCallResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_read_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteReadRequest - ) -> Result - { - if request.keys.is_empty() { - log::debug!("invalid remote read request sent by {}", peer); - return Err(Error::BadRequest("remote read request without keys")) - } - - log::trace!("remote read request from {} ({} at {:?})", - peer, - fmt_keys(request.keys.first(), request.keys.last()), - request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - - let proof = match self.chain.read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read request from {} ({} at {:?}) failed with: {}", - peer, - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - }; - - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_read_child_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteReadChildRequest - ) -> Result - { - if request.keys.is_empty() { - log::debug!("invalid remote child read request sent by {}", peer); - return Err(Error::BadRequest("remove read child request without keys")) - } - - log::trace!("remote read child request from {} ({} {} at {:?})", - peer, - HexDisplay::from(&request.storage_key), - fmt_keys(request.keys.first(), request.keys.last()), - request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - - let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); - let child_info = match ChildType::from_prefixed_key(prefixed_key) { - Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), - None => Err("Invalid child storage key".into()), - }; - let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( - &BlockId::Hash(block), - &child_info, - &mut request.keys.iter().map(AsRef::as_ref) - )) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", - peer, - HexDisplay::from(&request.storage_key), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - }; - - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_header_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteHeaderRequest - ) -> Result - { - log::trace!("remote header proof request from {} ({:?})", peer, request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - let (header, proof) = match self.chain.header_proof(&BlockId::Number(block)) { - Ok((header, proof)) => (header.encode(), proof), - Err(error) => { - log::trace!("remote header proof request from {} ({:?}) failed with: {}", - peer, - request.block, - error); - (Default::default(), StorageProof::empty()) - } - }; - - let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; - schema::v1::light::response::Response::RemoteHeaderResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_changes_request - ( &mut self - , peer: &PeerId - , request: &schema::v1::light::RemoteChangesRequest - ) -> Result - { - log::trace!("remote changes proof request from {} for key {} ({:?}..{:?})", - peer, - if !request.storage_key.is_empty() { - format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&request.key)) - } else { - HexDisplay::from(&request.key).to_string() - }, - request.first, - request.last); - - let first = Decode::decode(&mut request.first.as_ref())?; - let last = Decode::decode(&mut request.last.as_ref())?; - let min = Decode::decode(&mut request.min.as_ref())?; - let max = Decode::decode(&mut request.max.as_ref())?; - let key = StorageKey(request.key.clone()); - let storage_key = if request.storage_key.is_empty() { - None - } else { - Some(PrefixedStorageKey::new_ref(&request.storage_key)) - }; - - let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key, &key) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", - peer, - format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), - request.first, - request.last, - error); - - light::ChangesProof:: { - max_block: Zero::zero(), - proof: Vec::new(), - roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), - } - } - }; - - let response = { - let r = schema::v1::light::RemoteChangesResponse { - max: proof.max_block.encode(), - proof: proof.proof, - roots: proof.roots.into_iter() - .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) - .collect(), - roots_proof: proof.roots_proof.encode(), - }; - schema::v1::light::response::Response::RemoteChangesResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } -} - -impl NetworkBehaviour for LightClientHandler -where - B: Block -{ - type ProtocolsHandler = OneShotHandler>; - type OutEvent = Void; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let p = InboundProtocol { - max_request_size: self.config.max_request_size, - protocol: self.config.light_protocol.clone(), - }; - let mut cfg = OneShotHandlerConfig::default(); - cfg.keep_alive_timeout = self.config.inactivity_timeout; - OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) - } - - fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { - self.peers.get(peer) - .map(|info| info.connections.iter().map(|(_, a)| a.clone()).collect()) - .unwrap_or_default() - } - - fn inject_connected(&mut self, peer: &PeerId) { - } - - fn inject_connection_established(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { - let peer_address = match info { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(), - ConnectedPoint::Dialer { address } => address.clone() - }; - - log::trace!("peer {} connected with address {}", peer, peer_address); - - let entry = self.peers.entry(peer.clone()).or_default(); - entry.connections.push((*conn, peer_address)); - } - - fn inject_disconnected(&mut self, peer: &PeerId) { - log::trace!("peer {} disconnected", peer); - self.remove_peer(peer) - } - - fn inject_connection_closed(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { - let peer_address = match info { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - ConnectedPoint::Dialer { address } => address - }; - - log::trace!("connection to peer {} closed: {}", peer, peer_address); - - if let Some(info) = self.peers.get_mut(peer) { - info.connections.retain(|(c, _)| c != conn) - } - - // Add any outstanding requests on the closed connection back to the - // pending requests. - if let Some(id) = self.outstanding.iter() - .find(|(_, rw)| &rw.peer == peer && rw.connection == Some(*conn)) // (*) - .map(|(id, _)| *id) - { - let rw = self.outstanding.remove(&id).expect("by (*)"); - let rw = RequestWrapper { - timestamp: rw.timestamp, - retries: rw.retries, - request: rw.request, - peer: (), // need to find another peer - connection: None, - }; - self.pending_requests.push_back(rw); - } - } - - fn inject_event(&mut self, peer: PeerId, conn: ConnectionId, event: Event) { - match event { - // An incoming request from remote has been received. - Event::Request(request, mut stream) => { - log::trace!("incoming request from {}", peer); - let result = match &request.request { - Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => - self.on_remote_call_request(&peer, r), - Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => - self.on_remote_read_request(&peer, r), - Some(schema::v1::light::request::Request::RemoteHeaderRequest(r)) => - self.on_remote_header_request(&peer, r), - Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => - self.on_remote_read_child_request(&peer, r), - Some(schema::v1::light::request::Request::RemoteChangesRequest(r)) => - self.on_remote_changes_request(&peer, r), - None => { - log::debug!("ignoring request without request data from peer {}", peer); - return - } - }; - match result { - Ok(response) => { - log::trace!("enqueueing response for peer {}", peer); - let mut data = Vec::new(); - if let Err(e) = response.encode(&mut data) { - log::debug!("error encoding response for peer {}: {}", peer, e) - } else { - let future = async move { - if let Err(e) = write_one(&mut stream, data).await { - log::debug!("error writing response: {}", e) - } - }; - self.responses.push(future.boxed()) - } - } - Err(Error::BadRequest(_)) => { - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new(-(1 << 12), "bad request")) - } - Err(e) => log::debug!("error handling request from peer {}: {}", peer, e) - } - } - // A response to one of our own requests has been received. - Event::Response(id, response) => { - if let Some(request) = self.outstanding.remove(&id) { - // We first just check if the response originates from the expected peer - // and connection. - if request.peer != peer { - log::debug!("Expected response from {} instead of {}.", request.peer, peer); - self.outstanding.insert(id, request); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); - return - } - - if let Some(info) = self.peers.get_mut(&peer) { - if info.status != PeerStatus::BusyWith(id) { - // If we get here, something is wrong with our internal handling of peer - // status information. At any time, a single peer processes at most one - // request from us and its status should contain the request ID we are - // expecting a response for. If a peer would send us a response with a - // random ID, we should not have an entry for it with this peer ID in - // our `outstanding` map, so a malicious peer should not be able to get - // us here. It is our own fault and must be fixed! - panic!("unexpected peer status {:?} for {}", info.status, peer); - } - - info.status = PeerStatus::Idle; // Make peer available again. - - match self.on_response(&peer, &request.request, response) { - Ok(reply) => send_reply(Ok(reply), request.request), - Err(Error::UnexpectedResponse) => { - log::debug!("unexpected response {} from peer {}", id, peer); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("unexpected response from peer")); - let rw = RequestWrapper { - timestamp: request.timestamp, - retries: request.retries, - request: request.request, - peer: (), - connection: None, - }; - self.pending_requests.push_back(rw); - } - Err(other) => { - log::debug!("error handling response {} from peer {}: {}", id, peer, other); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("invalid response from peer")); - if request.retries > 0 { - let rw = RequestWrapper { - timestamp: request.timestamp, - retries: request.retries - 1, - request: request.request, - peer: (), - connection: None, - }; - self.pending_requests.push_back(rw) - } else { - send_reply(Err(ClientError::RemoteFetchFailed), request.request) - } - } - } - } else { - // If we get here, something is wrong with our internal handling of peers. - // We apparently have an entry in our `outstanding` map and the peer is the one we - // expected. So, if we can not find an entry for it in our peer information table, - // then these two collections are out of sync which must not happen and is a clear - // programmer error that must be fixed! - panic!("missing peer information for {}; response {}", peer, id); - } - } else { - log::debug!("unexpected response {} from peer {}", id, peer); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); - } - } - } - } - - fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) -> Poll> { - // Process response sending futures. - while let Poll::Ready(Some(_)) = self.responses.poll_next_unpin(cx) {} - - // If we have a pending request to send, try to find an available peer and send it. - let now = Instant::now(); - while let Some(mut request) = self.pending_requests.pop_front() { - if now > request.timestamp + self.config.request_timeout { - if request.retries == 0 { - send_reply(Err(ClientError::RemoteFetchFailed), request.request); - continue - } - request.timestamp = Instant::now(); - request.retries -= 1 - } - - - match self.prepare_request(request) { - Err(request) => { - self.pending_requests.push_front(request); - log::debug!("no peer available to send request to"); - break - } - Ok((peer, request)) => { - let request_bytes = match serialize_request(&request.request) { - Ok(bytes) => bytes, - Err(error) => { - log::debug!("failed to serialize request: {}", error); - send_reply(Err(ClientError::RemoteFetchFailed), request.request); - continue - } - }; - - let (expected, protocol) = match request.request { - Request::Body { .. } => - (ExpectedResponseTy::Block, self.config.block_protocol.clone()), - _ => - (ExpectedResponseTy::Light, self.config.light_protocol.clone()), - }; - - let peer_id = peer.clone(); - let handler = request.connection.map_or(NotifyHandler::Any, NotifyHandler::One); - - let request_id = self.next_request_id(); - if let Some(p) = self.peers.get_mut(&peer) { - p.status = PeerStatus::BusyWith(request_id); - } - self.outstanding.insert(request_id, request); - - let event = OutboundProtocol { - request_id, - request: request_bytes, - expected, - max_response_size: self.config.max_response_size, - protocol, - }; - - log::trace!("sending request {} to peer {}", request_id, peer_id); - - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event, - }) - } - } - } - - // Look for ongoing requests that have timed out. - let mut expired = Vec::new(); - for (id, rw) in &self.outstanding { - if now > rw.timestamp + self.config.request_timeout { - log::debug!("request {} timed out", id); - expired.push(*id) - } - } - for id in expired { - if let Some(rw) = self.outstanding.remove(&id) { - self.remove_peer(&rw.peer); - self.peerset.report_peer(rw.peer.clone(), - ReputationChange::new(TIMEOUT_REPUTATION_CHANGE, "light request timeout")); - if rw.retries == 0 { - send_reply(Err(ClientError::RemoteFetchFailed), rw.request); - continue - } - let rw = RequestWrapper { - timestamp: Instant::now(), - retries: rw.retries - 1, - request: rw.request, - peer: (), - connection: None, - }; - self.pending_requests.push_back(rw) - } - } - - Poll::Pending - } -} - -fn required_block(request: &Request) -> NumberFor { - match request { - Request::Body { request, .. } => *request.header.number(), - Request::Header { request, .. } => request.block, - Request::Read { request, .. } => *request.header.number(), - Request::ReadChild { request, .. } => *request.header.number(), - Request::Call { request, .. } => *request.header.number(), - Request::Changes { request, .. } => request.max_block.0, - } -} - -fn retries(request: &Request) -> usize { - let rc = match request { - Request::Body { request, .. } => request.retry_count, - Request::Header { request, .. } => request.retry_count, - Request::Read { request, .. } => request.retry_count, - Request::ReadChild { request, .. } => request.retry_count, - Request::Call { request, .. } => request.retry_count, - Request::Changes { request, .. } => request.retry_count, - }; - rc.unwrap_or(0) -} - -fn serialize_request(request: &Request) -> Result, prost::EncodeError> { - let request = match request { - Request::Body { request, .. } => { - let rq = build_protobuf_block_request::<_, NumberFor>( - BlockAttributes::BODY, - FromBlock::Hash(request.header.hash()), - None, - Direction::Ascending, - Some(1), - ); - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - return Ok(buf); - } - Request::Header { request, .. } => { - let r = schema::v1::light::RemoteHeaderRequest { block: request.block.encode() }; - schema::v1::light::request::Request::RemoteHeaderRequest(r) - } - Request::Read { request, .. } => { - let r = schema::v1::light::RemoteReadRequest { - block: request.block.encode(), - keys: request.keys.clone(), - }; - schema::v1::light::request::Request::RemoteReadRequest(r) - } - Request::ReadChild { request, .. } => { - let r = schema::v1::light::RemoteReadChildRequest { - block: request.block.encode(), - storage_key: request.storage_key.clone().into_inner(), - keys: request.keys.clone(), - }; - schema::v1::light::request::Request::RemoteReadChildRequest(r) - } - Request::Call { request, .. } => { - let r = schema::v1::light::RemoteCallRequest { - block: request.block.encode(), - method: request.method.clone(), - data: request.call_data.clone(), - }; - schema::v1::light::request::Request::RemoteCallRequest(r) - } - Request::Changes { request, .. } => { - let r = schema::v1::light::RemoteChangesRequest { - first: request.first_block.1.encode(), - last: request.last_block.1.encode(), - min: request.tries_roots.1.encode(), - max: request.max_block.1.encode(), - storage_key: request.storage_key.clone().map(|s| s.into_inner()) - .unwrap_or_default(), - key: request.key.clone(), - }; - schema::v1::light::request::Request::RemoteChangesRequest(r) - } - }; - - let rq = schema::v1::light::Request { request: Some(request) }; - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - Ok(buf) -} - -fn send_reply(result: Result, ClientError>, request: Request) { - fn send(item: T, sender: oneshot::Sender) { - let _ = sender.send(item); // It is okay if the other end already hung up. - } - match request { - Request::Body { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), - } - Request::Header { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Header(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for header request: {:?}, {:?}", reply, request), - } - Request::Read { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), - } - Request::ReadChild { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), - } - Request::Call { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), - } - Request::Changes { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for changes request: {:?}, {:?}", reply, request), - } - } -} - -/// Output type of inbound and outbound substream upgrades. -#[derive(Debug)] -pub enum Event { - /// Incoming request from remote and substream to use for the response. - Request(schema::v1::light::Request, T), - /// Incoming response from remote. - Response(RequestId, Response), -} - -/// Incoming response from remote. -#[derive(Debug, Clone)] -pub enum Response { - /// Incoming light response from remote. - Light(schema::v1::light::Response), - /// Incoming block response from remote. - Block(schema::v1::BlockResponse), -} - -/// Substream upgrade protocol. -/// -/// Reads incoming requests from remote. -#[derive(Debug, Clone)] -pub struct InboundProtocol { - /// The max. request length in bytes. - max_request_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, -} - -impl UpgradeInfo for InboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl InboundUpgrade for InboundProtocol -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = Event; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { - let future = async move { - let vec = read_one(&mut s, self.max_request_size).await?; - match schema::v1::light::Request::decode(&vec[..]) { - Ok(r) => Ok(Event::Request(r, s)), - Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) - } - }; - future.boxed() - } -} - -/// Substream upgrade protocol. -/// -/// Sends a request to remote and awaits the response. -#[derive(Debug, Clone)] -pub struct OutboundProtocol { - /// The serialized protobuf request. - request: Vec, - /// Local identifier for the request. Used to associate it with a response. - request_id: RequestId, - /// Kind of response expected for this request. - expected: ExpectedResponseTy, - /// The max. response length in bytes. - max_response_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, -} - -/// Type of response expected from the remote for this request. -#[derive(Debug, Clone)] -enum ExpectedResponseTy { - Light, - Block, -} - -impl UpgradeInfo for OutboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } -} - -impl OutboundUpgrade for OutboundProtocol -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static -{ - type Output = Event; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { - let future = async move { - write_one(&mut s, &self.request).await?; - let vec = read_one(&mut s, self.max_response_size).await?; - - match self.expected { - ExpectedResponseTy::Light => { - schema::v1::light::Response::decode(&vec[..]) - .map(|r| Event::Response(self.request_id, Response::Light(r))) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - }, - ExpectedResponseTy::Block => { - schema::v1::BlockResponse::decode(&vec[..]) - .map(|r| Event::Response(self.request_id, Response::Block(r))) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - } - } - }; - future.boxed() - } -} - -fn fmt_keys(first: Option<&Vec>, last: Option<&Vec>) -> String { - if let (Some(first), Some(last)) = (first, last) { - if first == last { - HexDisplay::from(first).to_string() - } else { - format!("{}..{}", HexDisplay::from(first), HexDisplay::from(last)) - } - } else { - String::from("n/a") - } -} - -#[cfg(test)] -mod tests { - use super::*; - use async_std::task; - use assert_matches::assert_matches; - use codec::Encode; - use crate::{ - chain::Client, - config::ProtocolId, - schema, - }; - use futures::{channel::oneshot, prelude::*}; - use libp2p::{ - PeerId, - Multiaddr, - core::{ - ConnectedPoint, - connection::ConnectionId, - identity, - muxing::{StreamMuxerBox, SubstreamRef}, - transport::{Transport, boxed::Boxed, memory::MemoryTransport}, - upgrade - }, - noise::{self, Keypair, X25519, NoiseConfig}, - swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, - yamux - }; - use sc_client_api::{StorageProof, RemoteReadChildRequest, FetchChecker}; - use sp_blockchain::{Error as ClientError}; - use sp_core::storage::ChildInfo; - use std::{ - collections::{HashMap, HashSet}, - io, - iter::{self, FromIterator}, - pin::Pin, - sync::Arc, - task::{Context, Poll} - }; - use sp_runtime::{generic::Header, traits::{BlakeTwo256, Block as BlockT, NumberFor}}; - use super::{Event, LightClientHandler, Request, Response, OutboundProtocol, PeerStatus}; - use void::Void; - - type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - type Handler = LightClientHandler; - type Swarm = libp2p::swarm::Swarm; - - fn empty_proof() -> Vec { - StorageProof::empty().encode() - } - - fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { - let client = Arc::new(substrate_test_runtime_client::new()); - let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); - let id_key = identity::Keypair::generate_ed25519(); - let dh_key = Keypair::::new().into_authentic(&id_key).unwrap(); - let local_peer = id_key.public().into_peer_id(); - let transport = MemoryTransport::default() - .upgrade(upgrade::Version::V1) - .authenticate(NoiseConfig::xx(dh_key).into_authenticated()) - .multiplex(yamux::Config::default()) - .map(|(peer, muxer), _| (peer, StreamMuxerBox::new(muxer))) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) - .boxed(); - Swarm::new(transport, LightClientHandler::new(cf, client, checker, ps), local_peer) - } - - struct DummyFetchChecker { - ok: bool, - _mark: std::marker::PhantomData - } - - impl light::FetchChecker for DummyFetchChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest, - header: Option, - _remote_proof: StorageProof, - ) -> Result { - match self.ok { - true if header.is_some() => Ok(header.unwrap()), - _ => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_proof( - &self, - request: &RemoteReadRequest, - _: StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect() - ), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - _: StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect() - ), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_execution_proof( - &self, - _: &RemoteCallRequest, - _: StorageProof, - ) -> Result, ClientError> { - match self.ok { - true => Ok(vec![42]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_changes_proof( - &self, - _: &RemoteChangesRequest, - _: ChangesProof - ) -> Result, u32)>, ClientError> { - match self.ok { - true => Ok(vec![(100.into(), 2)]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_body_proof( - &self, - _: &RemoteBodyRequest, - body: Vec - ) -> Result, ClientError> { - match self.ok { - true => Ok(body), - false => Err(ClientError::Backend("Test error".into())), - } - } - } - - fn make_config() -> super::Config { - super::Config::new(&ProtocolId::from("foo")) - } - - fn dummy_header() -> sp_test_primitives::Header { - sp_test_primitives::Header { - parent_hash: Default::default(), - number: 0, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - struct EmptyPollParams(PeerId); - - impl PollParameters for EmptyPollParams { - type SupportedProtocolsIter = iter::Empty>; - type ListenedAddressesIter = iter::Empty; - type ExternalAddressesIter = iter::Empty; - - fn supported_protocols(&self) -> Self::SupportedProtocolsIter { - iter::empty() - } - - fn listened_addresses(&self) -> Self::ListenedAddressesIter { - iter::empty() - } - - fn external_addresses(&self) -> Self::ExternalAddressesIter { - iter::empty() - } - - fn local_peer_id(&self) -> &PeerId { - &self.0 - } - } - - fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) { - let cfg = sc_peerset::PeersetConfig { - in_peers: 128, - out_peers: 128, - bootnodes: Vec::new(), - reserved_only: false, - priority_groups: Vec::new(), - }; - sc_peerset::Peerset::from_config(cfg) - } - - fn make_behaviour - ( ok: bool - , ps: sc_peerset::PeersetHandle - , cf: super::Config - ) -> LightClientHandler - { - let client = Arc::new(substrate_test_runtime_client::new()); - let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); - LightClientHandler::new(cf, client, checker, ps) - } - - fn empty_dialer() -> ConnectedPoint { - ConnectedPoint::Dialer { address: Multiaddr::empty() } - } - - fn poll(mut b: &mut LightClientHandler) -> Poll> { - let mut p = EmptyPollParams(PeerId::random()); - match future::poll_fn(|cx| Pin::new(&mut b).poll(cx, &mut p)).now_or_never() { - Some(a) => Poll::Ready(a), - None => Poll::Pending - } - } - - #[test] - fn disconnects_from_peer_if_told() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - behaviour.inject_connection_established(&peer, &ConnectionId::new(1), &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - behaviour.inject_connection_closed(&peer, &ConnectionId::new(1), &empty_dialer()); - behaviour.inject_disconnected(&peer); - assert_eq!(0, behaviour.peers.len()) - } - - #[test] - fn disconnects_from_peer_if_request_times_out() { - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - behaviour.inject_connection_established(&peer0, &ConnectionId::new(1), &empty_dialer()); - behaviour.inject_connected(&peer0); - behaviour.inject_connection_established(&peer1, &ConnectionId::new(2), &empty_dialer()); - behaviour.inject_connected(&peer1); - - // We now know about two peers. - assert_eq!(HashSet::from_iter(&[peer0.clone(), peer1.clone()]), behaviour.peers.keys().collect::>()); - - // No requests have been made yet. - assert!(behaviour.pending_requests.is_empty()); - assert!(behaviour.outstanding.is_empty()); - - // Issue our first request! - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - assert_eq!(1, behaviour.pending_requests.len()); - - // The behaviour should now attempt to send the request. - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, .. }) => { - assert!(peer_id == peer0 || peer_id == peer1) - }); - - // And we should have one busy peer. - assert!({ - let (idle, busy): (Vec<_>, Vec<_>) = - behaviour.peers.iter().partition(|(_, info)| info.status == PeerStatus::Idle); - - idle.len() == 1 && busy.len() == 1 - && (idle[0].0 == &peer0 || busy[0].0 == &peer0) - && (idle[0].0 == &peer1 || busy[0].0 == &peer1) - }); - - // No more pending requests, but one should be outstanding. - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - // We now set back the timestamp of the outstanding request to make it expire. - let request = behaviour.outstanding.values_mut().next().unwrap(); - request.timestamp -= make_config().request_timeout; - - // Make progress, but do not expect some action. - assert_matches!(poll(&mut behaviour), Poll::Pending); - - // The request should have timed out by now and the corresponding peer be removed. - assert_eq!(1, behaviour.peers.len()); - // Since we asked for one retry, the request should be back in the pending queue. - assert_eq!(1, behaviour.pending_requests.len()); - // No other request should be ongoing. - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn disconnects_from_peer_on_incorrect_response() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(false, pset.1, make_config()); - // ^--- Making sure the response data check fails. - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - poll(&mut behaviour); // Make progress - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - let request_id = *behaviour.outstanding.keys().next().unwrap(); - - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - - behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); - assert!(behaviour.peers.is_empty()); - - poll(&mut behaviour); // More progress - - // The request should be back in the pending queue - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn disconnects_from_peer_on_unexpected_response() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - - // Some unsolicited response - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - - behaviour.inject_event(peer.clone(), conn, Event::Response(2347895932, Response::Light(response))); - - assert!(behaviour.peers.is_empty()); - poll(&mut behaviour); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn disconnects_from_peer_on_wrong_response_type() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - poll(&mut behaviour); // Make progress - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - let request_id = *behaviour.outstanding.keys().next().unwrap(); - - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse! - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - }; - - behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); - assert!(behaviour.peers.is_empty()); - - poll(&mut behaviour); // More progress - - // The request should be back in the pending queue - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn receives_remote_failure_after_retry_count_failures() { - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - let peer3 = PeerId::random(); - let peer4 = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(false, pset.1, make_config()); - // ^--- Making sure the response data check fails. - - let conn1 = ConnectionId::new(1); - behaviour.inject_connection_established(&peer1, &conn1, &empty_dialer()); - behaviour.inject_connected(&peer1); - let conn2 = ConnectionId::new(2); - behaviour.inject_connection_established(&peer2, &conn2, &empty_dialer()); - behaviour.inject_connected(&peer2); - let conn3 = ConnectionId::new(3); - behaviour.inject_connection_established(&peer3, &conn3, &empty_dialer()); - behaviour.inject_connected(&peer3); - let conn4 = ConnectionId::new(3); - behaviour.inject_connection_established(&peer4, &conn4, &empty_dialer()); - behaviour.inject_connected(&peer4); - assert_eq!(4, behaviour.peers.len()); - - let mut chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(3), // Attempt up to three retries. - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - for i in 1 ..= 3 { - // Construct an invalid response - let request_id = *behaviour.outstanding.keys().next().unwrap(); - let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)) - } - }; - let conn = ConnectionId::new(i); - behaviour.inject_event(responding_peer, conn, Event::Response(request_id, Response::Light(response.clone()))); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); - assert_matches!(chan.1.try_recv(), Ok(None)) - } - // Final invalid response - let request_id = *behaviour.outstanding.keys().next().unwrap(); - let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - behaviour.inject_event(responding_peer, conn4, Event::Response(request_id, Response::Light(response))); - assert_matches!(poll(&mut behaviour), Poll::Pending); - assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) - } - - fn issue_request(request: Request) { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - let response = match request { - Request::Body { .. } => unimplemented!(), - Request::Header{..} => { - let r = schema::v1::light::RemoteHeaderResponse { - header: dummy_header().encode(), - proof: empty_proof() - }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteHeaderResponse(r)), - } - } - Request::Read{..} => { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - } - Request::ReadChild{..} => { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - } - Request::Call{..} => { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - } - Request::Changes{..} => { - let r = schema::v1::light::RemoteChangesResponse { - max: iter::repeat(1).take(32).collect(), - proof: Vec::new(), - roots: Vec::new(), - roots_proof: empty_proof() - }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteChangesResponse(r)), - } - } - }; - - behaviour.request(request).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - assert_eq!(1, *behaviour.outstanding.keys().next().unwrap()); - - behaviour.inject_event(peer.clone(), conn, Event::Response(1, Response::Light(response))); - - poll(&mut behaviour); - - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()) - } - - #[test] - fn receives_remote_call_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - issue_request(Request::Call { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - issue_request(Request::Read { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_child_response() { - let mut chan = oneshot::channel(); - let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = light::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: child_info.prefixed_storage_key(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - issue_request(Request::ReadChild { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_header_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - issue_request(Request::Header { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_changes_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - issue_request(Request::Changes { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - fn send_receive(request: Request) { - // We start a swarm on the listening side which awaits incoming requests and answers them: - let local_pset = peerset(); - let local_listen_addr: libp2p::Multiaddr = libp2p::multiaddr::Protocol::Memory(rand::random()).into(); - let mut local_swarm = make_swarm(true, local_pset.1, make_config()); - Swarm::listen_on(&mut local_swarm, local_listen_addr.clone()).unwrap(); - - // We also start a swarm that makes requests and awaits responses: - let remote_pset = peerset(); - let mut remote_swarm = make_swarm(true, remote_pset.1, make_config()); - - // We now schedule a request, dial the remote and let the two swarm work it out: - remote_swarm.request(request).unwrap(); - Swarm::dial_addr(&mut remote_swarm, local_listen_addr).unwrap(); - - let future = { - let a = local_swarm.for_each(|_| future::ready(())); - let b = remote_swarm.for_each(|_| future::ready(())); - future::join(a, b).map(|_| ()) - }; - - task::spawn(future); - } - - #[test] - fn send_receive_call() { - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - send_receive(Request::Call { request, sender: chan.0 }); - assert_eq!(vec![42], task::block_on(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_execution_proof` - } - - #[test] - fn send_receive_read() { - let chan = oneshot::channel(); - let request = light::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None - }; - send_receive(Request::Read { request, sender: chan.0 }); - assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_proof` - } - - #[test] - fn send_receive_read_child() { - let chan = oneshot::channel(); - let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = light::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: child_info.prefixed_storage_key(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - send_receive(Request::ReadChild { request, sender: chan.0 }); - assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_child_proof` - } - - #[test] - fn send_receive_header() { - sp_tracing::try_init_simple(); - let chan = oneshot::channel(); - let request = light::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - send_receive(Request::Header { request, sender: chan.0 }); - // The remote does not know block 1: - assert_matches!(task::block_on(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); - } - - #[test] - fn send_receive_changes() { - let chan = oneshot::channel(); - let request = light::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - send_receive(Request::Changes { request, sender: chan.0 }); - assert_eq!(vec![(100, 2)], task::block_on(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_changes_proof` - } - - #[test] - fn body_request_fields_encoded_properly() { - let (sender, _) = oneshot::channel(); - let serialized_request = serialize_request::(&Request::Body { - request: RemoteBodyRequest { - header: dummy_header(), - retry_count: None, - }, - sender, - }).unwrap(); - let deserialized_request = schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); - assert!( - BlockAttributes::from_be_u32(deserialized_request.fields) - .unwrap() - .contains(BlockAttributes::BODY) - ); - } -} diff --git a/client/network/src/light_client_requests.rs b/client/network/src/light_client_requests.rs new file mode 100644 index 0000000000000..e18b783f219be --- /dev/null +++ b/client/network/src/light_client_requests.rs @@ -0,0 +1,315 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helpers for outgoing and incoming light client requests. + +/// For incoming light client requests. +pub mod handler; +/// For outgoing light client requests. +pub mod sender; + +use crate::{config::ProtocolId, request_responses::ProtocolConfig}; + +use std::time::Duration; + +/// Generate the light client protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: &ProtocolId) -> String { + let mut s = String::new(); + s.push_str("/"); + s.push_str(protocol_id.as_ref()); + s.push_str("/light/2"); + s +} + +/// Generates a [`ProtocolConfig`] for the light client request protocol, refusing incoming +/// requests. +pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { + ProtocolConfig { + name: generate_protocol_name(protocol_id).into(), + max_request_size: 1 * 1024 * 1024, + max_response_size: 16 * 1024 * 1024, + request_timeout: Duration::from_secs(15), + inbound_queue: None, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{config::ProtocolId, request_responses::IncomingRequest}; + + use assert_matches::assert_matches; + use futures::{ + channel::oneshot, + executor::{block_on, LocalPool}, + prelude::*, + task::Spawn, + }; + use libp2p::PeerId; + use sc_client_api::{ + light::{ + self, ChangesProof, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, + RemoteHeaderRequest, RemoteReadRequest, + }, + FetchChecker, RemoteReadChildRequest, StorageProof, + }; + use sp_blockchain::Error as ClientError; + use sp_core::storage::ChildInfo; + use sp_runtime::{ + generic::Header, + traits::{BlakeTwo256, Block as BlockT, NumberFor}, + }; + use std::{collections::HashMap, sync::Arc}; + + pub struct DummyFetchChecker { + pub ok: bool, + pub _mark: std::marker::PhantomData, + } + + impl FetchChecker for DummyFetchChecker { + fn check_header_proof( + &self, + _request: &RemoteHeaderRequest, + header: Option, + _remote_proof: StorageProof, + ) -> Result { + match self.ok { + true if header.is_some() => Ok(header.unwrap()), + _ => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_proof( + &self, + request: &RemoteReadRequest, + _: StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_child_proof( + &self, + request: &RemoteReadChildRequest, + _: StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_execution_proof( + &self, + _: &RemoteCallRequest, + _: StorageProof, + ) -> Result, ClientError> { + match self.ok { + true => Ok(vec![42]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_changes_proof( + &self, + _: &RemoteChangesRequest, + _: ChangesProof, + ) -> Result, u32)>, ClientError> { + match self.ok { + true => Ok(vec![(100u32.into(), 2)]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_body_proof( + &self, + _: &RemoteBodyRequest, + body: Vec, + ) -> Result, ClientError> { + match self.ok { + true => Ok(body), + false => Err(ClientError::Backend("Test error".into())), + } + } + } + + pub fn protocol_id() -> ProtocolId { + ProtocolId::from("test") + } + + pub fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) { + let cfg = sc_peerset::SetConfig { + in_peers: 128, + out_peers: 128, + bootnodes: Default::default(), + reserved_only: false, + reserved_nodes: Default::default(), + }; + sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets: vec![cfg] }) + } + + pub fn dummy_header() -> sp_test_primitives::Header { + sp_test_primitives::Header { + parent_hash: Default::default(), + number: 0, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + + type Block = + sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; + + fn send_receive(request: sender::Request, pool: &LocalPool) { + let client = Arc::new(substrate_test_runtime_client::new()); + let (handler, protocol_config) = + handler::LightClientRequestHandler::new(&protocol_id(), client); + pool.spawner().spawn_obj(handler.run().boxed().into()).unwrap(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = sender::LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: true, + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + sender.inject_connected(PeerId::random()); + + sender.request(request).unwrap(); + let sender::OutEvent::SendRequest { pending_response, request, .. } = + block_on(sender.next()).unwrap(); + let (tx, rx) = oneshot::channel(); + block_on(protocol_config.inbound_queue.unwrap().send(IncomingRequest { + peer: PeerId::random(), + payload: request, + pending_response: tx, + })) + .unwrap(); + pool.spawner() + .spawn_obj( + async move { + pending_response.send(Ok(rx.await.unwrap().result.unwrap())).unwrap(); + } + .boxed() + .into(), + ) + .unwrap(); + + pool.spawner() + .spawn_obj(sender.for_each(|_| future::ready(())).boxed().into()) + .unwrap(); + } + + #[test] + fn send_receive_call() { + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }; + + let mut pool = LocalPool::new(); + send_receive(sender::Request::Call { request, sender: chan.0 }, &pool); + assert_eq!(vec![42], pool.run_until(chan.1).unwrap().unwrap()); + // ^--- from `DummyFetchChecker::check_execution_proof` + } + + #[test] + fn send_receive_read() { + let chan = oneshot::channel(); + let request = light::RemoteReadRequest { + header: dummy_header(), + block: Default::default(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + let mut pool = LocalPool::new(); + send_receive(sender::Request::Read { request, sender: chan.0 }, &pool); + assert_eq!( + Some(vec![42]), + pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() + ); + // ^--- from `DummyFetchChecker::check_read_proof` + } + + #[test] + fn send_receive_read_child() { + let chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); + let request = light::RemoteReadChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: child_info.prefixed_storage_key(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + let mut pool = LocalPool::new(); + send_receive(sender::Request::ReadChild { request, sender: chan.0 }, &pool); + assert_eq!( + Some(vec![42]), + pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() + ); + // ^--- from `DummyFetchChecker::check_read_child_proof` + } + + #[test] + fn send_receive_header() { + sp_tracing::try_init_simple(); + let chan = oneshot::channel(); + let request = light::RemoteHeaderRequest { + cht_root: Default::default(), + block: 1, + retry_count: None, + }; + let mut pool = LocalPool::new(); + send_receive(sender::Request::Header { request, sender: chan.0 }, &pool); + // The remote does not know block 1: + assert_matches!(pool.run_until(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); + } + + #[test] + fn send_receive_changes() { + let chan = oneshot::channel(); + let request = light::RemoteChangesRequest { + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], + first_block: (1, Default::default()), + last_block: (100, Default::default()), + max_block: (100, Default::default()), + tries_roots: (1, Default::default(), Vec::new()), + key: Vec::new(), + storage_key: None, + retry_count: None, + }; + let mut pool = LocalPool::new(); + send_receive(sender::Request::Changes { request, sender: chan.0 }, &pool); + assert_eq!(vec![(100, 2)], pool.run_until(chan.1).unwrap().unwrap()); + // ^--- from `DummyFetchChecker::check_changes_proof` + } +} diff --git a/client/network/src/light_client_requests/handler.rs b/client/network/src/light_client_requests/handler.rs new file mode 100644 index 0000000000000..43504edddd73a --- /dev/null +++ b/client/network/src/light_client_requests/handler.rs @@ -0,0 +1,416 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helper for incoming light client requests. +//! +//! Handle (i.e. answer) incoming light client requests from a remote peer received via +//! [`crate::request_responses::RequestResponsesBehaviour`] with +//! [`LightClientRequestHandler`](handler::LightClientRequestHandler). + +use crate::{ + chain::Client, + config::ProtocolId, + request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + schema, PeerId, +}; +use codec::{self, Decode, Encode}; +use futures::{channel::mpsc, prelude::*}; +use log::{debug, trace}; +use prost::Message; +use sc_client_api::{light, StorageProof}; +use sc_peerset::ReputationChange; +use sp_core::{ + hexdisplay::HexDisplay, + storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageKey}, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block, Zero}, +}; +use std::{collections::BTreeMap, sync::Arc}; + +const LOG_TARGET: &str = "light-client-request-handler"; + +/// Handler for incoming light client requests from a remote peer. +pub struct LightClientRequestHandler { + request_receiver: mpsc::Receiver, + /// Blockchain client. + client: Arc>, +} + +impl LightClientRequestHandler { + /// Create a new [`crate::block_request_handler::BlockRequestHandler`]. + pub fn new(protocol_id: &ProtocolId, client: Arc>) -> (Self, ProtocolConfig) { + // For now due to lack of data on light client request handling in production systems, this + // value is chosen to match the block request limit. + let (tx, request_receiver) = mpsc::channel(20); + + let mut protocol_config = super::generate_protocol_config(protocol_id); + protocol_config.inbound_queue = Some(tx); + + (Self { client, request_receiver }, protocol_config) + } + + /// Run [`LightClientRequestHandler`]. + pub async fn run(mut self) { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_request(peer, payload) { + Ok(response_data) => { + let response = OutgoingResponse { + result: Ok(response_data), + reputation_changes: Vec::new(), + sent_feedback: None, + }; + + match pending_response.send(response) { + Ok(()) => trace!( + target: LOG_TARGET, + "Handled light client request from {}.", + peer, + ), + Err(_) => debug!( + target: LOG_TARGET, + "Failed to handle light client request from {}: {}", + peer, + HandleRequestError::SendResponse, + ), + }; + }, + Err(e) => { + debug!( + target: LOG_TARGET, + "Failed to handle light client request from {}: {}", peer, e, + ); + + let reputation_changes = match e { + HandleRequestError::BadRequest(_) => { + vec![ReputationChange::new(-(1 << 12), "bad request")] + }, + _ => Vec::new(), + }; + + let response = OutgoingResponse { + result: Err(()), + reputation_changes, + sent_feedback: None, + }; + + if pending_response.send(response).is_err() { + debug!( + target: LOG_TARGET, + "Failed to handle light client request from {}: {}", + peer, + HandleRequestError::SendResponse, + ); + }; + }, + } + } + } + + fn handle_request( + &mut self, + peer: PeerId, + payload: Vec, + ) -> Result, HandleRequestError> { + let request = schema::v1::light::Request::decode(&payload[..])?; + + let response = match &request.request { + Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => + self.on_remote_call_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => + self.on_remote_read_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteHeaderRequest(r)) => + self.on_remote_header_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => + self.on_remote_read_child_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteChangesRequest(r)) => + self.on_remote_changes_request(&peer, r)?, + None => + return Err(HandleRequestError::BadRequest("Remote request without request data.")), + }; + + let mut data = Vec::new(); + response.encode(&mut data)?; + + Ok(data) + } + + fn on_remote_call_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteCallRequest, + ) -> Result { + trace!("Remote call request from {} ({} at {:?}).", peer, request.method, request.block,); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let proof = + match self + .client + .execution_proof(&BlockId::Hash(block), &request.method, &request.data) + { + Ok((_, proof)) => proof, + Err(e) => { + trace!( + "remote call request from {} ({} at {:?}) failed with: {}", + peer, + request.method, + request.block, + e, + ); + StorageProof::empty() + }, + }; + + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteCallResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_read_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteReadRequest, + ) -> Result { + if request.keys.is_empty() { + debug!("Invalid remote read request sent by {}.", peer); + return Err(HandleRequestError::BadRequest("Remote read request without keys.")) + } + + trace!( + "Remote read request from {} ({} at {:?}).", + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let proof = match self + .client + .read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) + { + Ok(proof) => proof, + Err(error) => { + trace!( + "remote read request from {} ({} at {:?}) failed with: {}", + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error, + ); + StorageProof::empty() + }, + }; + + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_read_child_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteReadChildRequest, + ) -> Result { + if request.keys.is_empty() { + debug!("Invalid remote child read request sent by {}.", peer); + return Err(HandleRequestError::BadRequest("Remove read child request without keys.")) + } + + trace!( + "Remote read child request from {} ({} {} at {:?}).", + peer, + HexDisplay::from(&request.storage_key), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); + let child_info = match ChildType::from_prefixed_key(prefixed_key) { + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err(sp_blockchain::Error::InvalidChildStorageKey), + }; + let proof = match child_info.and_then(|child_info| { + self.client.read_child_proof( + &BlockId::Hash(block), + &child_info, + &mut request.keys.iter().map(AsRef::as_ref), + ) + }) { + Ok(proof) => proof, + Err(error) => { + trace!( + "remote read child request from {} ({} {} at {:?}) failed with: {}", + peer, + HexDisplay::from(&request.storage_key), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error, + ); + StorageProof::empty() + }, + }; + + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_header_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteHeaderRequest, + ) -> Result { + trace!("Remote header proof request from {} ({:?}).", peer, request.block); + + let block = Decode::decode(&mut request.block.as_ref())?; + let (header, proof) = match self.client.header_proof(&BlockId::Number(block)) { + Ok((header, proof)) => (header.encode(), proof), + Err(error) => { + trace!( + "Remote header proof request from {} ({:?}) failed with: {}.", + peer, + request.block, + error + ); + (Default::default(), StorageProof::empty()) + }, + }; + + let response = { + let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; + schema::v1::light::response::Response::RemoteHeaderResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_changes_request( + &mut self, + peer: &PeerId, + request: &schema::v1::light::RemoteChangesRequest, + ) -> Result { + trace!( + "Remote changes proof request from {} for key {} ({:?}..{:?}).", + peer, + if !request.storage_key.is_empty() { + format!( + "{} : {}", + HexDisplay::from(&request.storage_key), + HexDisplay::from(&request.key) + ) + } else { + HexDisplay::from(&request.key).to_string() + }, + request.first, + request.last, + ); + + let first = Decode::decode(&mut request.first.as_ref())?; + let last = Decode::decode(&mut request.last.as_ref())?; + let min = Decode::decode(&mut request.min.as_ref())?; + let max = Decode::decode(&mut request.max.as_ref())?; + let key = StorageKey(request.key.clone()); + let storage_key = if request.storage_key.is_empty() { + None + } else { + Some(PrefixedStorageKey::new_ref(&request.storage_key)) + }; + + let proof = + match self.client.key_changes_proof(first, last, min, max, storage_key, &key) { + Ok(proof) => proof, + Err(error) => { + trace!( + "Remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}.", + peer, + format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), + request.first, + request.last, + error, + ); + + light::ChangesProof:: { + max_block: Zero::zero(), + proof: Vec::new(), + roots: BTreeMap::new(), + roots_proof: StorageProof::empty(), + } + }, + }; + + let response = { + let r = schema::v1::light::RemoteChangesResponse { + max: proof.max_block.encode(), + proof: proof.proof, + roots: proof + .roots + .into_iter() + .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) + .collect(), + roots_proof: proof.roots_proof.encode(), + }; + schema::v1::light::response::Response::RemoteChangesResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } +} + +#[derive(derive_more::Display, derive_more::From)] +enum HandleRequestError { + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + #[display(fmt = "Failed to send response.")] + SendResponse, + /// A bad request has been received. + #[display(fmt = "bad request: {}", _0)] + BadRequest(&'static str), + /// Encoding or decoding of some data failed. + #[display(fmt = "codec error: {}", _0)] + Codec(codec::Error), +} + +fn fmt_keys(first: Option<&Vec>, last: Option<&Vec>) -> String { + if let (Some(first), Some(last)) = (first, last) { + if first == last { + HexDisplay::from(first).to_string() + } else { + format!("{}..{}", HexDisplay::from(first), HexDisplay::from(last)) + } + } else { + String::from("n/a") + } +} diff --git a/client/network/src/light_client_requests/sender.rs b/client/network/src/light_client_requests/sender.rs new file mode 100644 index 0000000000000..284db827594b4 --- /dev/null +++ b/client/network/src/light_client_requests/sender.rs @@ -0,0 +1,1294 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helper for outgoing light client requests. +//! +//! Call [`LightClientRequestSender::request`](sender::LightClientRequestSender::request) +//! to send out light client requests. It will: +//! +//! 1. Build the request. +//! +//! 2. Forward the request to [`crate::request_responses::RequestResponsesBehaviour`] via +//! [`OutEvent::SendRequest`](sender::OutEvent::SendRequest). +//! +//! 3. Wait for the response and forward the response via the [`futures::channel::oneshot::Sender`] +//! provided earlier with [`LightClientRequestSender::request`](sender::LightClientRequestSender:: +//! request). + +use crate::{ + config::ProtocolId, + protocol::message::BlockAttributes, + request_responses::{OutboundFailure, RequestFailure}, + schema, PeerId, +}; +use codec::{self, Decode, Encode}; +use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; +use prost::Message; +use sc_client_api::light::{self, RemoteBodyRequest}; +use sc_peerset::ReputationChange; +use sp_blockchain::Error as ClientError; +use sp_runtime::traits::{Block, Header, NumberFor}; +use std::{ + collections::{BTreeMap, HashMap, VecDeque}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +mod rep { + use super::*; + + /// Reputation change for a peer when a request timed out. + pub const TIMEOUT: ReputationChange = + ReputationChange::new(-(1 << 8), "light client request timeout"); + /// Reputation change for a peer when a request is refused. + pub const REFUSED: ReputationChange = + ReputationChange::new(-(1 << 8), "light client request refused"); +} + +/// Configuration options for [`LightClientRequestSender`]. +#[derive(Debug, Clone)] +struct Config { + max_pending_requests: usize, + light_protocol: String, + block_protocol: String, +} + +impl Config { + /// Create a new [`LightClientRequestSender`] configuration. + pub fn new(id: &ProtocolId) -> Self { + Self { + max_pending_requests: 128, + light_protocol: super::generate_protocol_name(id), + block_protocol: crate::block_request_handler::generate_protocol_name(id), + } + } +} + +/// State machine helping to send out light client requests. +pub struct LightClientRequestSender { + /// This behaviour's configuration. + config: Config, + /// Verifies that received responses are correct. + checker: Arc>, + /// Peer information (addresses, their best block, etc.) + peers: HashMap>, + /// Pending (local) requests. + pending_requests: VecDeque>, + /// Requests on their way to remote peers. + sent_requests: FuturesUnordered< + BoxFuture< + 'static, + (SentRequest, Result, RequestFailure>, oneshot::Canceled>), + >, + >, + /// Handle to use for reporting misbehaviour of peers. + peerset: sc_peerset::PeersetHandle, +} + +/// Augments a pending light client request with metadata. +#[derive(Debug)] +struct PendingRequest { + /// Remaining attempts. + attempts_left: usize, + /// The actual request. + request: Request, +} + +impl PendingRequest { + fn new(req: Request) -> Self { + Self { + // Number of retries + one for the initial attempt. + attempts_left: req.retries() + 1, + request: req, + } + } + + fn into_sent(self, peer_id: PeerId) -> SentRequest { + SentRequest { attempts_left: self.attempts_left, request: self.request, peer: peer_id } + } +} + +/// Augments a light client request with metadata that is currently being send to a remote. +#[derive(Debug)] +struct SentRequest { + /// Remaining attempts. + attempts_left: usize, + /// The actual request. + request: Request, + /// The peer that the request is send to. + peer: PeerId, +} + +impl SentRequest { + fn into_pending(self) -> PendingRequest { + PendingRequest { attempts_left: self.attempts_left, request: self.request } + } +} + +impl Unpin for LightClientRequestSender {} + +impl LightClientRequestSender +where + B: Block, +{ + /// Construct a new light client handler. + pub fn new( + id: &ProtocolId, + checker: Arc>, + peerset: sc_peerset::PeersetHandle, + ) -> Self { + Self { + config: Config::new(id), + checker, + peers: Default::default(), + pending_requests: Default::default(), + sent_requests: Default::default(), + peerset, + } + } + + /// We rely on external information about peers best blocks as we lack the + /// means to determine it ourselves. + pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { + if let Some(info) = self.peers.get_mut(peer) { + log::trace!("new best block for {:?}: {:?}", peer, num); + info.best_block = Some(num) + } + } + + /// Issue a new light client request. + pub fn request(&mut self, req: Request) -> Result<(), SendRequestError> { + if self.pending_requests.len() >= self.config.max_pending_requests { + return Err(SendRequestError::TooManyRequests) + } + self.pending_requests.push_back(PendingRequest::new(req)); + Ok(()) + } + + /// Remove the given peer. + /// + /// In-flight requests to the given peer might fail and be retried. See + /// [`::poll_next`]. + fn remove_peer(&mut self, peer: PeerId) { + self.peers.remove(&peer); + } + + /// Process a local request's response from remote. + /// + /// If successful, this will give us the actual, checked data we should be + /// sending back to the client, otherwise an error. + fn on_response( + &mut self, + peer: PeerId, + request: &Request, + response: Response, + ) -> Result, Error> { + log::trace!("response from {}", peer); + match response { + Response::Light(r) => self.on_response_light(request, r), + Response::Block(r) => self.on_response_block(request, r), + } + } + + fn on_response_light( + &mut self, + request: &Request, + response: schema::v1::light::Response, + ) -> Result, Error> { + use schema::v1::light::response::Response; + match response.response { + Some(Response::RemoteCallResponse(response)) => { + if let Request::Call { request, .. } = request { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_execution_proof(request, proof)?; + Ok(Reply::VecU8(reply)) + } else { + Err(Error::UnexpectedResponse) + } + }, + Some(Response::RemoteReadResponse(response)) => match request { + Request::Read { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + }, + Request::ReadChild { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_child_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + }, + _ => Err(Error::UnexpectedResponse), + }, + Some(Response::RemoteChangesResponse(response)) => { + if let Request::Changes { request, .. } = request { + let max_block = Decode::decode(&mut response.max.as_ref())?; + let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; + let roots = { + let mut r = BTreeMap::new(); + for pair in response.roots { + let k = Decode::decode(&mut pair.fst.as_ref())?; + let v = Decode::decode(&mut pair.snd.as_ref())?; + r.insert(k, v); + } + r + }; + let reply = self.checker.check_changes_proof( + &request, + light::ChangesProof { + max_block, + proof: response.proof, + roots, + roots_proof, + }, + )?; + Ok(Reply::VecNumberU32(reply)) + } else { + Err(Error::UnexpectedResponse) + } + }, + Some(Response::RemoteHeaderResponse(response)) => { + if let Request::Header { request, .. } = request { + let header = if response.header.is_empty() { + None + } else { + Some(Decode::decode(&mut response.header.as_ref())?) + }; + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_header_proof(&request, header, proof)?; + Ok(Reply::Header(reply)) + } else { + Err(Error::UnexpectedResponse) + } + }, + None => Err(Error::UnexpectedResponse), + } + } + + fn on_response_block( + &mut self, + request: &Request, + response: schema::v1::BlockResponse, + ) -> Result, Error> { + let request = if let Request::Body { request, .. } = &request { + request + } else { + return Err(Error::UnexpectedResponse) + }; + + let body: Vec<_> = match response.blocks.into_iter().next() { + Some(b) => b.body, + None => return Err(Error::UnexpectedResponse), + }; + + let body = body + .into_iter() + .map(|extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) + .collect::>()?; + + let body = self.checker.check_body_proof(&request, body)?; + Ok(Reply::Extrinsics(body)) + } + + /// Signal that the node is connected to the given peer. + pub fn inject_connected(&mut self, peer: PeerId) { + let prev_entry = self.peers.insert(peer, Default::default()); + debug_assert!( + prev_entry.is_none(), + "Expect `inject_connected` to be called for disconnected peer.", + ); + } + + /// Signal that the node disconnected from the given peer. + pub fn inject_disconnected(&mut self, peer: PeerId) { + self.remove_peer(peer) + } +} + +impl Stream for LightClientRequestSender { + type Item = OutEvent; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + // If we have received responses to previously sent requests, check them and pass them on. + while let Poll::Ready(Some((sent_request, request_result))) = + self.sent_requests.poll_next_unpin(cx) + { + if let Some(info) = self.peers.get_mut(&sent_request.peer) { + if info.status != PeerStatus::Busy { + // If we get here, something is wrong with our internal handling of peer status + // information. At any time, a single peer processes at most one request from + // us. A malicious peer should not be able to get us here. It is our own fault + // and must be fixed! + panic!("unexpected peer status {:?} for {}", info.status, sent_request.peer); + } + + info.status = PeerStatus::Idle; // Make peer available again. + } + + let request_result = match request_result { + Ok(r) => r, + Err(oneshot::Canceled) => { + log::debug!("Oneshot for request to peer {} was canceled.", sent_request.peer); + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal("no response from peer"), + ); + self.pending_requests.push_back(sent_request.into_pending()); + continue + }, + }; + + let decoded_request_result = request_result.map(|response| { + if sent_request.request.is_block_request() { + schema::v1::BlockResponse::decode(&response[..]).map(|r| Response::Block(r)) + } else { + schema::v1::light::Response::decode(&response[..]).map(|r| Response::Light(r)) + } + }); + + let response = match decoded_request_result { + Ok(Ok(response)) => response, + Ok(Err(e)) => { + log::debug!( + "Failed to decode response from peer {}: {:?}.", + sent_request.peer, + e + ); + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal("invalid response from peer"), + ); + self.pending_requests.push_back(sent_request.into_pending()); + continue + }, + Err(e) => { + log::debug!("Request to peer {} failed with {:?}.", sent_request.peer, e); + + match e { + RequestFailure::NotConnected => { + self.remove_peer(sent_request.peer); + self.pending_requests.push_back(sent_request.into_pending()); + }, + RequestFailure::UnknownProtocol => { + debug_assert!( + false, + "Light client and block request protocol should be known when \ + sending requests.", + ); + }, + RequestFailure::Refused => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer(sent_request.peer, rep::REFUSED); + self.pending_requests.push_back(sent_request.into_pending()); + }, + RequestFailure::Obsolete => { + debug_assert!( + false, + "Can not receive `RequestFailure::Obsolete` after dropping the \ + response receiver.", + ); + self.pending_requests.push_back(sent_request.into_pending()); + }, + RequestFailure::Network(OutboundFailure::Timeout) => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer(sent_request.peer, rep::TIMEOUT); + self.pending_requests.push_back(sent_request.into_pending()); + }, + RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal( + "peer does not support light client or block request protocol", + ), + ); + self.pending_requests.push_back(sent_request.into_pending()); + }, + RequestFailure::Network(OutboundFailure::DialFailure) => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal("failed to dial peer"), + ); + self.pending_requests.push_back(sent_request.into_pending()); + }, + RequestFailure::Network(OutboundFailure::ConnectionClosed) => { + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal("connection to peer closed"), + ); + self.pending_requests.push_back(sent_request.into_pending()); + }, + } + + continue + }, + }; + + match self.on_response(sent_request.peer, &sent_request.request, response) { + Ok(reply) => sent_request.request.return_reply(Ok(reply)), + Err(Error::UnexpectedResponse) => { + log::debug!("Unexpected response from peer {}.", sent_request.peer); + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal("unexpected response from peer"), + ); + self.pending_requests.push_back(sent_request.into_pending()); + }, + Err(other) => { + log::debug!( + "error handling response from peer {}: {}", + sent_request.peer, + other + ); + self.remove_peer(sent_request.peer); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal("invalid response from peer"), + ); + self.pending_requests.push_back(sent_request.into_pending()) + }, + } + } + + // If we have a pending request to send, try to find an available peer and send it. + while let Some(mut pending_request) = self.pending_requests.pop_front() { + if pending_request.attempts_left == 0 { + pending_request.request.return_reply(Err(ClientError::RemoteFetchFailed)); + continue + } + + let protocol = if pending_request.request.is_block_request() { + self.config.block_protocol.clone() + } else { + self.config.light_protocol.clone() + }; + + // Out of all idle peers, find one who's best block is high enough, choose any idle peer + // if none exists. + let mut peer = None; + for (peer_id, peer_info) in self.peers.iter_mut() { + if peer_info.status == PeerStatus::Idle { + match peer_info.best_block { + Some(n) if n >= pending_request.request.required_block() => { + peer = Some((*peer_id, peer_info)); + break + }, + _ => peer = Some((*peer_id, peer_info)), + } + } + } + + // Break in case there is no idle peer. + let (peer_id, peer_info) = match peer { + Some((peer_id, peer_info)) => (peer_id, peer_info), + None => { + self.pending_requests.push_front(pending_request); + log::debug!("No peer available to send request to."); + + break + }, + }; + + let request_bytes = match pending_request.request.serialize_request() { + Ok(bytes) => bytes, + Err(error) => { + log::debug!("failed to serialize request: {}", error); + pending_request.request.return_reply(Err(ClientError::RemoteFetchFailed)); + continue + }, + }; + + let (tx, rx) = oneshot::channel(); + + peer_info.status = PeerStatus::Busy; + + pending_request.attempts_left -= 1; + + self.sent_requests + .push(async move { (pending_request.into_sent(peer_id), rx.await) }.boxed()); + + return Poll::Ready(Some(OutEvent::SendRequest { + target: peer_id, + request: request_bytes, + pending_response: tx, + protocol_name: protocol, + })) + } + + Poll::Pending + } +} + +/// Events returned by [`LightClientRequestSender`]. +#[derive(Debug)] +pub enum OutEvent { + /// Emit a request to be send out on the network e.g. via [`crate::request_responses`]. + SendRequest { + /// The remote peer to send the request to. + target: PeerId, + /// The encoded request. + request: Vec, + /// The [`oneshot::Sender`] channel to pass the response to. + pending_response: oneshot::Sender, RequestFailure>>, + /// The name of the protocol to use to send the request. + protocol_name: String, + }, +} + +/// Incoming response from remote. +#[derive(Debug, Clone)] +pub enum Response { + /// Incoming light response from remote. + Light(schema::v1::light::Response), + /// Incoming block response from remote. + Block(schema::v1::BlockResponse), +} + +/// Error returned by [`LightClientRequestSender::request`]. +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum SendRequestError { + /// There are currently too many pending request. + #[display(fmt = "too many pending requests")] + TooManyRequests, +} + +/// Error type to propagate errors internally. +#[derive(Debug, derive_more::Display, derive_more::From)] +enum Error { + /// The response type does not correspond to the issued request. + #[display(fmt = "unexpected response")] + UnexpectedResponse, + /// Encoding or decoding of some data failed. + #[display(fmt = "codec error: {}", _0)] + Codec(codec::Error), + /// The chain client errored. + #[display(fmt = "client error: {}", _0)] + Client(ClientError), +} + +/// The data to send back to the light client over the oneshot channel. +// It is unified here in order to be able to return it as a function +// result instead of delivering it to the client as a side effect of +// response processing. +#[derive(Debug)] +enum Reply { + VecU8(Vec), + VecNumberU32(Vec<(::Number, u32)>), + MapVecU8OptVecU8(HashMap, Option>>), + Header(B::Header), + Extrinsics(Vec), +} + +/// Information we have about some peer. +#[derive(Debug)] +struct PeerInfo { + best_block: Option>, + status: PeerStatus, +} + +impl Default for PeerInfo { + fn default() -> Self { + PeerInfo { best_block: None, status: PeerStatus::Idle } + } +} + +/// A peer is either idle or busy processing a request from us. +#[derive(Debug, Clone, PartialEq, Eq)] +enum PeerStatus { + /// The peer is available. + Idle, + /// We wait for the peer to return us a response for the given request ID. + Busy, +} + +/// The possible light client requests we support. +/// +/// The associated `oneshot::Sender` will be used to convey the result of +/// their request back to them (cf. `Reply`). +// This is modeled after light_dispatch.rs's `RequestData` which is not +// used because we currently only support a subset of those. +#[derive(Debug)] +pub enum Request { + /// Remote body request. + Body { + /// Request. + request: RemoteBodyRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, ClientError>>, + }, + /// Remote header request. + Header { + /// Request. + request: light::RemoteHeaderRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender>, + }, + /// Remote read request. + Read { + /// Request. + request: light::RemoteReadRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, Option>>, ClientError>>, + }, + /// Remote read child request. + ReadChild { + /// Request. + request: light::RemoteReadChildRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, Option>>, ClientError>>, + }, + /// Remote call request. + Call { + /// Request. + request: light::RemoteCallRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, ClientError>>, + }, + /// Remote changes request. + Changes { + /// Request. + request: light::RemoteChangesRequest, + /// [`oneshot::Sender`] to return response. + sender: oneshot::Sender, u32)>, ClientError>>, + }, +} + +impl Request { + fn is_block_request(&self) -> bool { + matches!(self, Request::Body { .. }) + } + + fn required_block(&self) -> NumberFor { + match self { + Request::Body { request, .. } => *request.header.number(), + Request::Header { request, .. } => request.block, + Request::Read { request, .. } => *request.header.number(), + Request::ReadChild { request, .. } => *request.header.number(), + Request::Call { request, .. } => *request.header.number(), + Request::Changes { request, .. } => request.max_block.0, + } + } + + fn retries(&self) -> usize { + let rc = match self { + Request::Body { request, .. } => request.retry_count, + Request::Header { request, .. } => request.retry_count, + Request::Read { request, .. } => request.retry_count, + Request::ReadChild { request, .. } => request.retry_count, + Request::Call { request, .. } => request.retry_count, + Request::Changes { request, .. } => request.retry_count, + }; + rc.unwrap_or(0) + } + + fn serialize_request(&self) -> Result, prost::EncodeError> { + let request = match self { + Request::Body { request, .. } => { + let rq = schema::v1::BlockRequest { + fields: BlockAttributes::BODY.to_be_u32(), + from_block: Some(schema::v1::block_request::FromBlock::Hash( + request.header.hash().encode(), + )), + to_block: Default::default(), + direction: schema::v1::Direction::Ascending as i32, + max_blocks: 1, + support_multiple_justifications: true, + }; + + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + return Ok(buf) + }, + Request::Header { request, .. } => { + let r = schema::v1::light::RemoteHeaderRequest { block: request.block.encode() }; + schema::v1::light::request::Request::RemoteHeaderRequest(r) + }, + Request::Read { request, .. } => { + let r = schema::v1::light::RemoteReadRequest { + block: request.block.encode(), + keys: request.keys.clone(), + }; + schema::v1::light::request::Request::RemoteReadRequest(r) + }, + Request::ReadChild { request, .. } => { + let r = schema::v1::light::RemoteReadChildRequest { + block: request.block.encode(), + storage_key: request.storage_key.clone().into_inner(), + keys: request.keys.clone(), + }; + schema::v1::light::request::Request::RemoteReadChildRequest(r) + }, + Request::Call { request, .. } => { + let r = schema::v1::light::RemoteCallRequest { + block: request.block.encode(), + method: request.method.clone(), + data: request.call_data.clone(), + }; + schema::v1::light::request::Request::RemoteCallRequest(r) + }, + Request::Changes { request, .. } => { + let r = schema::v1::light::RemoteChangesRequest { + first: request.first_block.1.encode(), + last: request.last_block.1.encode(), + min: request.tries_roots.1.encode(), + max: request.max_block.1.encode(), + storage_key: request + .storage_key + .clone() + .map(|s| s.into_inner()) + .unwrap_or_default(), + key: request.key.clone(), + }; + schema::v1::light::request::Request::RemoteChangesRequest(r) + }, + }; + + let rq = schema::v1::light::Request { request: Some(request) }; + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + Ok(buf) + } + + fn return_reply(self, result: Result, ClientError>) { + fn send(item: T, sender: oneshot::Sender) { + let _ = sender.send(item); // It is okay if the other end already hung up. + } + match self { + Request::Body { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), + }, + Request::Header { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::Header(x)) => send(Ok(x), sender), + reply => { + log::error!("invalid reply for header request: {:?}, {:?}", reply, request) + }, + }, + Request::Read { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), + }, + Request::ReadChild { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), + reply => { + log::error!("invalid reply for read child request: {:?}, {:?}", reply, request) + }, + }, + Request::Call { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::VecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), + }, + Request::Changes { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), + reply => { + log::error!("invalid reply for changes request: {:?}, {:?}", reply, request) + }, + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + light_client_requests::tests::{dummy_header, peerset, protocol_id, DummyFetchChecker}, + request_responses::OutboundFailure, + }; + + use assert_matches::assert_matches; + use futures::{channel::oneshot, executor::block_on, poll}; + use sc_client_api::StorageProof; + use sp_core::storage::ChildInfo; + use sp_runtime::{generic::Header, traits::BlakeTwo256}; + use std::{collections::HashSet, iter::FromIterator}; + + fn empty_proof() -> Vec { + StorageProof::empty().encode() + } + + #[test] + fn removes_peer_if_told() { + let peer = PeerId::random(); + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(DummyFetchChecker { ok: true, _mark: std::marker::PhantomData }), + peer_set_handle, + ); + + sender.inject_connected(peer); + assert_eq!(1, sender.peers.len()); + + sender.inject_disconnected(peer); + assert_eq!(0, sender.peers.len()); + } + + type Block = + sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; + + #[test] + fn body_request_fields_encoded_properly() { + let (sender, _receiver) = oneshot::channel(); + let request = Request::::Body { + request: RemoteBodyRequest { header: dummy_header(), retry_count: None }, + sender, + }; + let serialized_request = request.serialize_request().unwrap(); + let deserialized_request = + schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); + assert!(BlockAttributes::from_be_u32(deserialized_request.fields) + .unwrap() + .contains(BlockAttributes::BODY)); + } + + #[test] + fn disconnects_from_peer_if_request_times_out() { + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: true, + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + sender.inject_connected(peer0); + sender.inject_connected(peer1); + + assert_eq!( + HashSet::from_iter(&[peer0.clone(), peer1.clone()]), + sender.peers.keys().collect::>(), + "Expect knowledge of two peers." + ); + + assert!(sender.pending_requests.is_empty(), "Expect no pending request."); + assert!(sender.sent_requests.is_empty(), "Expect no sent request."); + + // Issue a request! + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + sender.request(Request::Call { request, sender: chan.0 }).unwrap(); + assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); + + let OutEvent::SendRequest { target, pending_response, .. } = + block_on(sender.next()).unwrap(); + assert!(target == peer0 || target == peer1, "Expect request to originate from known peer."); + + // And we should have one busy peer. + assert!({ + let (idle, busy): (Vec<_>, Vec<_>) = + sender.peers.iter().partition(|(_, info)| info.status == PeerStatus::Idle); + idle.len() == 1 && + busy.len() == 1 && (idle[0].0 == &peer0 || busy[0].0 == &peer0) && + (idle[0].0 == &peer1 || busy[0].0 == &peer1) + }); + + assert_eq!(0, sender.pending_requests.len(), "Expect no pending request."); + assert_eq!(1, sender.sent_requests.len(), "Expect one request to be sent."); + + // Report first attempt as timed out. + pending_response + .send(Err(RequestFailure::Network(OutboundFailure::Timeout))) + .unwrap(); + + // Expect a new request to be issued. + let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); + + assert_eq!(1, sender.peers.len(), "Expect peer to be removed."); + assert_eq!(0, sender.pending_requests.len(), "Expect no request to be pending."); + assert_eq!(1, sender.sent_requests.len(), "Expect new request to be issued."); + + // Report second attempt as timed out. + pending_response + .send(Err(RequestFailure::Network(OutboundFailure::Timeout))) + .unwrap(); + assert_matches!( + block_on(async { poll!(sender.next()) }), + Poll::Pending, + "Expect sender to not issue another attempt.", + ); + assert_matches!( + block_on(chan.1).unwrap(), + Err(ClientError::RemoteFetchFailed), + "Expect request failure to be reported.", + ); + assert_eq!(0, sender.peers.len(), "Expect no peer to be left"); + assert_eq!(0, sender.pending_requests.len(), "Expect no request to be pending."); + assert_eq!(0, sender.sent_requests.len(), "Expect no other request to be in progress."); + } + + #[test] + fn disconnects_from_peer_on_incorrect_response() { + let peer = PeerId::random(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: false, + // ^--- Making sure the response data check fails. + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + sender.inject_connected(peer); + assert_eq!(1, sender.peers.len(), "Expect one peer."); + + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + sender.request(Request::Call { request, sender: chan.0 }).unwrap(); + + assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); + assert_eq!(0, sender.sent_requests.len(), "Expect zero sent requests."); + + let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); + assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); + assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); + + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; + let response = schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + }; + let mut data = Vec::new(); + response.encode(&mut data).unwrap(); + data + }; + + pending_response.send(Ok(response)).unwrap(); + + assert_matches!( + block_on(async { poll!(sender.next()) }), + Poll::Pending, + "Expect sender to not issue another attempt, given that there is no peer left.", + ); + + assert!(sender.peers.is_empty(), "Expect no peers to be left."); + assert_eq!(1, sender.pending_requests.len(), "Expect request to be pending again."); + assert_eq!(0, sender.sent_requests.len(), "Expect no request to be sent."); + } + + #[test] + fn disconnects_from_peer_on_wrong_response_type() { + let peer = PeerId::random(); + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: true, + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + sender.inject_connected(peer); + assert_eq!(1, sender.peers.len(), "Expect one peer."); + + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + sender.request(Request::Call { request, sender: chan.0 }).unwrap(); + + assert_eq!(1, sender.pending_requests.len()); + assert_eq!(0, sender.sent_requests.len()); + let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); + assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); + assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); + + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse! + let response = schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), + }; + let mut data = Vec::new(); + response.encode(&mut data).unwrap(); + data + }; + + pending_response.send(Ok(response)).unwrap(); + assert_matches!( + block_on(async { poll!(sender.next()) }), + Poll::Pending, + "Expect sender to not issue another attempt, given that there is no peer left.", + ); + + assert!(sender.peers.is_empty(), "Expect no peers to be left."); + assert_eq!(1, sender.pending_requests.len(), "Expect request to be pending again."); + assert_eq!(0, sender.sent_requests.len(), "Expect no request to be sent."); + } + + #[test] + fn receives_remote_failure_after_retry_count_failures() { + let peers = (0..4).map(|_| PeerId::random()).collect::>(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: false, + // ^--- Making sure the response data check fails. + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + for peer in &peers { + sender.inject_connected(*peer); + } + assert_eq!(4, sender.peers.len(), "Expect four peers."); + + let mut chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(3), // Attempt up to three retries. + }; + sender.request(Request::Call { request, sender: chan.0 }).unwrap(); + + assert_eq!(1, sender.pending_requests.len()); + assert_eq!(0, sender.sent_requests.len()); + let mut pending_response = match block_on(sender.next()).unwrap() { + OutEvent::SendRequest { pending_response, .. } => Some(pending_response), + }; + assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); + assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); + + for (i, _peer) in peers.iter().enumerate() { + // Construct an invalid response + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; + let response = schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + }; + let mut data = Vec::new(); + response.encode(&mut data).unwrap(); + data + }; + pending_response.take().unwrap().send(Ok(response)).unwrap(); + + if i < 3 { + pending_response = match block_on(sender.next()).unwrap() { + OutEvent::SendRequest { pending_response, .. } => Some(pending_response), + }; + assert_matches!(chan.1.try_recv(), Ok(None)) + } else { + // Last peer and last attempt. + assert_matches!( + block_on(async { poll!(sender.next()) }), + Poll::Pending, + "Expect sender to not issue another attempt, given that there is no peer left.", + ); + assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) + } + } + } + + fn issue_request(request: Request) { + let peer = PeerId::random(); + + let (_peer_set, peer_set_handle) = peerset(); + let mut sender = LightClientRequestSender::::new( + &protocol_id(), + Arc::new(crate::light_client_requests::tests::DummyFetchChecker { + ok: true, + _mark: std::marker::PhantomData, + }), + peer_set_handle, + ); + + sender.inject_connected(peer); + assert_eq!(1, sender.peers.len(), "Expect one peer."); + + let response = match request { + Request::Body { .. } => unimplemented!(), + Request::Header { .. } => { + let r = schema::v1::light::RemoteHeaderResponse { + header: dummy_header().encode(), + proof: empty_proof(), + }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteHeaderResponse(r)), + } + }, + Request::Read { .. } => { + let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), + } + }, + Request::ReadChild { .. } => { + let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), + } + }, + Request::Call { .. } => { + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + } + }, + Request::Changes { .. } => { + let r = schema::v1::light::RemoteChangesResponse { + max: std::iter::repeat(1).take(32).collect(), + proof: Vec::new(), + roots: Vec::new(), + roots_proof: empty_proof(), + }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteChangesResponse(r)), + } + }, + }; + + let response = { + let mut data = Vec::new(); + response.encode(&mut data).unwrap(); + data + }; + + sender.request(request).unwrap(); + + assert_eq!(1, sender.pending_requests.len()); + assert_eq!(0, sender.sent_requests.len()); + let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); + assert_eq!(0, sender.pending_requests.len()); + assert_eq!(1, sender.sent_requests.len()); + + pending_response.send(Ok(response)).unwrap(); + assert_matches!( + block_on(async { poll!(sender.next()) }), + Poll::Pending, + "Expect sender to not issue another attempt, given that there is no peer left.", + ); + + assert_eq!(0, sender.pending_requests.len()); + assert_eq!(0, sender.sent_requests.len()) + } + + #[test] + fn receives_remote_call_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }; + issue_request(Request::Call { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_read_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteReadRequest { + header: dummy_header(), + block: Default::default(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + issue_request(Request::Read { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_read_child_response() { + let mut chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); + let request = light::RemoteReadChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: child_info.prefixed_storage_key(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + issue_request(Request::ReadChild { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_header_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteHeaderRequest { + cht_root: Default::default(), + block: 1, + retry_count: None, + }; + issue_request(Request::Header { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_changes_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteChangesRequest { + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], + first_block: (1, Default::default()), + last_block: (100, Default::default()), + max_block: (100, Default::default()), + tries_roots: (1, Default::default(), Vec::new()), + key: Vec::new(), + storage_key: None, + retry_count: None, + }; + issue_request(Request::Changes { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } +} diff --git a/client/network/src/network_state.rs b/client/network/src/network_state.rs index db2b6429304bb..6f5f031bf35df 100644 --- a/client/network/src/network_state.rs +++ b/client/network/src/network_state.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,15 +22,17 @@ use libp2p::{core::ConnectedPoint, Multiaddr}; use serde::{Deserialize, Serialize}; -use slog_derive::SerdeValue; -use std::{collections::{HashMap, HashSet}, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + time::Duration, +}; /// Returns general information about the networking. /// /// Meant for general diagnostic purposes. /// /// **Warning**: This API is not stable. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, SerdeValue)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NetworkState { /// PeerId of the local node. @@ -57,12 +59,6 @@ pub struct Peer { pub version_string: Option, /// Latest ping duration with this node. pub latest_ping_time: Option, - /// If true, the peer is "enabled", which means that we try to open Substrate-related protocols - /// with this peer. If false, we stick to Kademlia and/or other network-only protocols. - pub enabled: bool, - /// If true, the peer is "open", which means that we have a Substrate-related protocol - /// with this peer. - pub open: bool, /// List of addresses known for this node. pub known_addresses: HashSet, } @@ -97,13 +93,9 @@ pub enum PeerEndpoint { impl From for PeerEndpoint { fn from(endpoint: ConnectedPoint) -> Self { match endpoint { - ConnectedPoint::Dialer { address } => - PeerEndpoint::Dialing(address), + ConnectedPoint::Dialer { address } => Self::Dialing(address), ConnectedPoint::Listener { local_addr, send_back_addr } => - PeerEndpoint::Listening { - local_addr, - send_back_addr - } + Self::Listening { local_addr, send_back_addr }, } } } diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index 084172ee57c4f..eaeb0bee98f2c 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,18 +18,24 @@ //! On-demand requests service. -use crate::light_client_handler; +use crate::light_client_requests; use futures::{channel::oneshot, prelude::*}; use parking_lot::Mutex; use sc_client_api::{ - FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, - RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, StorageProof, ChangesProof, + ChangesProof, FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, + RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, + StorageProof, }; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::{collections::HashMap, pin::Pin, sync::Arc, task::Context, task::Poll}; +use std::{ + collections::HashMap, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; /// Implements the `Fetcher` trait of the client. Makes it possible for the light client to perform /// network requests for some state. @@ -45,10 +51,21 @@ pub struct OnDemand { /// Note that a better alternative would be to use a MPMC queue here, and add a `poll` method /// from the `OnDemand`. However there exists no popular implementation of MPMC channels in /// asynchronous Rust at the moment - requests_queue: Mutex>>>, + requests_queue: + Mutex>>>, /// Sending side of `requests_queue`. - requests_send: TracingUnboundedSender>, + requests_send: TracingUnboundedSender>, +} + +#[derive(Debug, thiserror::Error)] +#[error("AlwaysBadChecker")] +struct ErrorAlwaysBadChecker; + +impl Into for ErrorAlwaysBadChecker { + fn into(self) -> ClientError { + ClientError::Application(Box::new(self)) + } } /// Dummy implementation of `FetchChecker` that always assumes that responses are bad. @@ -65,15 +82,15 @@ impl FetchChecker for AlwaysBadChecker { _remote_header: Option, _remote_proof: StorageProof, ) -> Result { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_read_proof( &self, _request: &RemoteReadRequest, _remote_proof: StorageProof, - ) -> Result,Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + ) -> Result, Option>>, ClientError> { + Err(ErrorAlwaysBadChecker.into()) } fn check_read_child_proof( @@ -81,7 +98,7 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteReadChildRequest, _remote_proof: StorageProof, ) -> Result, Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_execution_proof( @@ -89,23 +106,23 @@ impl FetchChecker for AlwaysBadChecker { _request: &RemoteCallRequest, _remote_proof: StorageProof, ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_changes_proof( &self, _request: &RemoteChangesRequest, - _remote_proof: ChangesProof + _remote_proof: ChangesProof, ) -> Result, u32)>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } fn check_body_proof( &self, _request: &RemoteBodyRequest, - _body: Vec + _body: Vec, ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) + Err(ErrorAlwaysBadChecker.into()) } } @@ -118,11 +135,7 @@ where let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); let requests_queue = Mutex::new(Some(requests_queue)); - OnDemand { - checker, - requests_queue, - requests_send, - } + Self { checker, requests_queue, requests_send } } /// Get checker reference. @@ -137,9 +150,9 @@ where /// /// If this function returns `None`, that means that the receiver has already been extracted in /// the past, and therefore that something already handles the requests. - pub(crate) fn extract_receiver(&self) - -> Option>> - { + pub(crate) fn extract_receiver( + &self, + ) -> Option>> { self.requests_queue.lock().take() } } @@ -159,7 +172,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Header { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Header { request, sender }); RemoteResponse { receiver } } @@ -167,7 +180,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Read { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Read { request, sender }); RemoteResponse { receiver } } @@ -178,7 +191,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::ReadChild { request, sender }); + .unbounded_send(light_client_requests::sender::Request::ReadChild { request, sender }); RemoteResponse { receiver } } @@ -186,7 +199,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Call { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Call { request, sender }); RemoteResponse { receiver } } @@ -197,7 +210,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Changes { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Changes { request, sender }); RemoteResponse { receiver } } @@ -205,7 +218,7 @@ where let (sender, receiver) = oneshot::channel(); let _ = self .requests_send - .unbounded_send(light_client_handler::Request::Body { request, sender }); + .unbounded_send(light_client_requests::sender::Request::Body { request, sender }); RemoteResponse { receiver } } } diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index e69ad2b17e59c..141cc59247d1a 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -1,37 +1,47 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . +use crate::utils::interval; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::Multiaddr; -use libp2p::core::connection::{ConnectionId, ListenerId}; -use libp2p::core::{ConnectedPoint, either::EitherOutput, PeerId, PublicKey}; -use libp2p::swarm::{IntoProtocolsHandler, IntoProtocolsHandlerSelect, ProtocolsHandler}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p::identify::{Identify, IdentifyEvent, IdentifyInfo}; -use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; -use log::{debug, trace, error}; +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + either::EitherOutput, + ConnectedPoint, PeerId, PublicKey, + }, + identify::{Identify, IdentifyConfig, IdentifyEvent, IdentifyInfo}, + ping::{Ping, PingConfig, PingEvent, PingSuccess}, + swarm::{ + IntoProtocolsHandler, IntoProtocolsHandlerSelect, NetworkBehaviour, NetworkBehaviourAction, + PollParameters, ProtocolsHandler, + }, + Multiaddr, +}; +use log::{debug, error, trace}; use smallvec::SmallVec; -use std::{error, io}; -use std::collections::hash_map::Entry; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use wasm_timer::Instant; -use crate::utils::interval; +use std::{ + collections::hash_map::Entry, + error, io, + pin::Pin, + task::{Context, Poll}, + time::{Duration, Instant}, +}; /// Time after we disconnect from a node before we purge its information from the cache. const CACHE_EXPIRE: Duration = Duration::from_secs(10 * 60); @@ -68,27 +78,20 @@ impl NodeInfo { fn new(endpoint: ConnectedPoint) -> Self { let mut endpoints = SmallVec::new(); endpoints.push(endpoint); - NodeInfo { - info_expire: None, - endpoints, - client_version: None, - latest_ping: None, - } + Self { info_expire: None, endpoints, client_version: None, latest_ping: None } } } impl PeerInfoBehaviour { /// Builds a new `PeerInfoBehaviour`. - pub fn new( - user_agent: String, - local_public_key: PublicKey, - ) -> Self { + pub fn new(user_agent: String, local_public_key: PublicKey) -> Self { let identify = { - let proto_version = "/substrate/1.0".to_string(); - Identify::new(proto_version, user_agent, local_public_key) + let cfg = IdentifyConfig::new("/substrate/1.0".to_string(), local_public_key) + .with_agent_version(user_agent); + Identify::new(cfg) }; - PeerInfoBehaviour { + Self { ping: Ping::new(PingConfig::new()), identify, nodes_info: FnvHashMap::default(), @@ -135,13 +138,15 @@ pub struct Node<'a>(&'a NodeInfo); impl<'a> Node<'a> { /// Returns the endpoint of an established connection to the peer. - pub fn endpoint(&self) -> &'a ConnectedPoint { - &self.0.endpoints[0] // `endpoints` are non-empty by definition + /// + /// Returns `None` if we are disconnected from the node. + pub fn endpoint(&self) -> Option<&'a ConnectedPoint> { + self.0.endpoints.get(0) } /// Returns the latest version information we know of. pub fn client_version(&self) -> Option<&'a str> { - self.0.client_version.as_ref().map(|s| &s[..]) + self.0.client_version.as_deref() } /// Returns the latest ping time we know of for this node. `None` if we never successfully @@ -167,7 +172,7 @@ pub enum PeerInfoEvent { impl NetworkBehaviour for PeerInfoBehaviour { type ProtocolsHandler = IntoProtocolsHandlerSelect< ::ProtocolsHandler, - ::ProtocolsHandler + ::ProtocolsHandler, >; type OutEvent = PeerInfoEvent; @@ -186,13 +191,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.identify.inject_connected(peer_id); } - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.ping.inject_connection_established(peer_id, conn, endpoint); self.identify.inject_connection_established(peer_id, conn, endpoint); - match self.nodes_info.entry(peer_id.clone()) { + match self.nodes_info.entry(*peer_id) { Entry::Vacant(e) => { e.insert(NodeInfo::new(endpoint.clone())); - } + }, Entry::Occupied(e) => { let e = e.into_mut(); if e.info_expire.as_ref().map(|exp| *exp < Instant::now()).unwrap_or(false) { @@ -201,11 +211,16 @@ impl NetworkBehaviour for PeerInfoBehaviour { } e.info_expire = None; e.endpoints.push(endpoint.clone()); - } + }, } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.ping.inject_connection_closed(peer_id, conn, endpoint); self.identify.inject_connection_closed(peer_id, conn, endpoint); @@ -233,7 +248,7 @@ impl NetworkBehaviour for PeerInfoBehaviour { &mut self, peer_id: PeerId, connection: ConnectionId, - event: <::Handler as ProtocolsHandler>::OutEvent + event: <::Handler as ProtocolsHandler>::OutEvent, ) { match event { EitherOutput::First(event) => self.ping.inject_event(peer_id, connection, event), @@ -241,7 +256,12 @@ impl NetworkBehaviour for PeerInfoBehaviour { } } - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn std::error::Error) { + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error, + ) { self.ping.inject_addr_reach_failure(peer_id, addr, error); self.identify.inject_addr_reach_failure(peer_id, addr, error); } @@ -251,14 +271,19 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.identify.inject_dial_failure(peer_id); } - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - self.ping.inject_new_listen_addr(addr); - self.identify.inject_new_listen_addr(addr); + fn inject_new_listener(&mut self, id: ListenerId) { + self.ping.inject_new_listener(id); + self.identify.inject_new_listener(id); + } + + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.ping.inject_new_listen_addr(id, addr); + self.identify.inject_new_listen_addr(id, addr); } - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - self.ping.inject_expired_listen_addr(addr); - self.identify.inject_expired_listen_addr(addr); + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.ping.inject_expired_listen_addr(id, addr); + self.identify.inject_expired_listen_addr(id, addr); } fn inject_new_external_addr(&mut self, addr: &Multiaddr) { @@ -266,6 +291,11 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.identify.inject_new_external_addr(addr); } + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { + self.ping.inject_expired_external_addr(addr); + self.identify.inject_expired_external_addr(addr); + } + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { self.ping.inject_listener_error(id, err); self.identify.inject_listener_error(id, err); @@ -285,7 +315,7 @@ impl NetworkBehaviour for PeerInfoBehaviour { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent > - > { + >{ loop { match self.ping.poll(cx, params) { Poll::Pending => break, @@ -302,27 +332,35 @@ impl NetworkBehaviour for PeerInfoBehaviour { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, - event: EitherOutput::First(event) + event: EitherOutput::First(event), + }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), + Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { + peer_id, + connection, }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), } } loop { match self.identify.poll(cx, params) { Poll::Pending => break, - Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) => { - match event { - IdentifyEvent::Received { peer_id, info, .. } => { - self.handle_identify_report(&peer_id, &info); - let event = PeerInfoEvent::Identified { peer_id, info }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); - } - IdentifyEvent::Error { peer_id, error } => - debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error), - IdentifyEvent::Sent { .. } => {} - } + Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) => match event { + IdentifyEvent::Received { peer_id, info, .. } => { + self.handle_identify_report(&peer_id, &info); + let event = PeerInfoEvent::Identified { peer_id, info }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) + }, + IdentifyEvent::Error { peer_id, error } => { + debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error) + }, + IdentifyEvent::Pushed { .. } => {}, + IdentifyEvent::Sent { .. } => {}, }, Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), @@ -332,10 +370,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, - event: EitherOutput::Second(event) + event: EitherOutput::Second(event), + }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), + Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { + peer_id, + connection, }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index ac74af0f5ca94..e22d96f32aeb8 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,74 +17,84 @@ // along with this program. If not, see . use crate::{ - ExHashT, chain::Client, - config::{BoxFinalityProofRequestBuilder, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, + config::{self, ProtocolId, WarpSyncProvider}, error, + request_responses::RequestFailure, + schema::v1::StateResponse, utils::{interval, LruHashSet}, + warp_request_handler::EncodedProof, }; -use bytes::{Bytes, BytesMut}; -use futures::{prelude::*, stream::FuturesUnordered}; -use generic_proto::{GenericProto, GenericProtoOut}; -use libp2p::{Multiaddr, PeerId}; -use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; -use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_consensus::{ - BlockOrigin, - block_validation::BlockAnnounceValidator, - import_queue::{BlockImportResult, BlockImportError, IncomingBlock, Origin} -}; +use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; -use sp_runtime::{generic::BlockId, ConsensusEngineId, Justification}; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, CheckedSub +use futures::{channel::oneshot, prelude::*}; +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, + }, + request_response::OutboundFailure, + swarm::{ + IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, + ProtocolsHandler, + }, + Multiaddr, PeerId, +}; +use log::{debug, error, info, log, trace, warn, Level}; +use message::{ + generic::{Message as GenericMessage, Roles}, + BlockAnnounce, Message, }; +use notifications::{Notifications, NotificationsOut}; +use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; +use prost::Message as _; +use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; use sp_arithmetic::traits::SaturatedConversion; -use message::{BlockAnnounce, Message}; -use message::generic::{Message as GenericMessage, Roles}; -use prometheus_endpoint::{ - Registry, Gauge, Counter, GaugeVec, - PrometheusError, Opts, register, U64 +use sp_consensus::{block_validation::BlockAnnounceValidator, BlockOrigin}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, + Justifications, }; -use sync::{ChainSync, SyncState}; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet, VecDeque, hash_map::Entry}; -use std::sync::Arc; -use std::fmt::Write; -use std::{io, iter, num::NonZeroUsize, pin::Pin, task::Poll, time}; -use log::{log, Level, trace, debug, warn, error}; -use wasm_timer::Instant; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet, VecDeque}, + convert::TryFrom as _, + io, iter, + num::NonZeroUsize, + pin::Pin, + sync::Arc, + task::Poll, + time, +}; +use sync::{ChainSync, Status as SyncStatus}; -mod generic_proto; +mod notifications; -pub mod message; pub mod event; +pub mod message; pub mod sync; -pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError, LegacyConnectionKillError}; +pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; -const REQUEST_TIMEOUT_SEC: u64 = 40; /// Interval at which we perform time based maintenance const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); -/// Interval at which we propagate transactions; -const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); -/// Maximim number of known block hashes to keep for a peer. +/// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead -/// Maximim number of known transaction hashes to keep for a peer. -/// -/// This should be approx. 2 blocks full of transactions for the network to function properly. -const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. +/// Maximum allowed size for a block announce. +const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; -/// Maximim number of transaction validation request we keep at any moment. -const MAX_PENDING_TRANSACTIONS: usize = 8192; +/// Maximum size used for notifications in the block announce and transaction protocols. +// Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`. +pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 * 1024; -/// Current protocol version. -pub(crate) const CURRENT_VERSION: u32 = 6; -/// Lowest version we support -pub(crate) const MIN_VERSION: u32 = 3; +/// Identifier of the peerset for the block announces protocol. +const HARDCODED_PEERSETS_SYNC: sc_peerset::SetId = sc_peerset::SetId::from(0); +/// Number of hardcoded peersets (the constants right above). Any set whose identifier is equal or +/// superior to this value corresponds to a user-defined protocol. +const NUM_HARDCODED_PEERSETS: usize = 1; /// When light node connects to the full node and the full node is behind light node /// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful @@ -95,54 +105,32 @@ mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer doesn't respond in time to our messages. pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); + /// Reputation change when a peer refuses a request. + pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); /// Reputation change when we are a light client and a peer is behind us. pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); - /// Reputation change when a peer sends us any transaction. - /// - /// This forces node to verify it, thus the negative value here. Once transaction is verified, - /// reputation change should be refunded with `ANY_TRANSACTION_REFUND` - pub const ANY_TRANSACTION: Rep = Rep::new(-(1 << 4), "Any transaction"); - /// Reputation change when a peer sends us any transaction that is not invalid. - pub const ANY_TRANSACTION_REFUND: Rep = Rep::new(1 << 4, "Any transaction (refund)"); - /// Reputation change when a peer sends us an transaction that we didn't know about. - pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); - /// Reputation change when a peer sends us a bad transaction. - pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); /// We received a message that failed to decode. pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); - /// We received an unexpected response. - pub const UNEXPECTED_RESPONSE: Rep = Rep::new_fatal("Unexpected response packet"); - /// We received an unexpected transaction packet. - pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); /// Peer has different genesis. pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); /// Peer is on unsupported protocol version. pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); /// Peer role does not match (e.g. light peer connecting to another light peer). pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); - /// Peer response data does not have requested bits. - pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); /// Peer send us a block announcement that failed at validation. pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); } struct Metrics { - obsolete_requests: Gauge, peers: Gauge, queued_blocks: Gauge, fork_targets: Gauge, - finality_proofs: GaugeVec, justifications: GaugeVec, - propagated_transactions: Counter, } impl Metrics { fn register(r: &Registry) -> Result { - Ok(Metrics { - obsolete_requests: { - let g = Gauge::new("sync_obsolete_requests", "Number of obsolete requests")?; - register(g, r)? - }, + Ok(Self { peers: { let g = Gauge::new("sync_peers", "Number of peers we sync with")?; register(g, r)? @@ -159,113 +147,67 @@ impl Metrics { let g = GaugeVec::new( Opts::new( "sync_extra_justifications", - "Number of extra justifications requests" + "Number of extra justifications requests", ), &["status"], )?; register(g, r)? }, - finality_proofs: { - let g = GaugeVec::new( - Opts::new( - "sync_extra_finality_proofs", - "Number of extra finality proof requests", - ), - &["status"], - )?; - register(g, r)? - }, - propagated_transactions: register(Counter::new( - "sync_propagated_transactions", - "Number of transactions propagated to at least one peer", - )?, r)?, }) } } -#[pin_project::pin_project] -struct PendingTransaction { - #[pin] - validation: TransactionImportFuture, - tx_hash: H, -} - -impl Future for PendingTransaction { - type Output = (H, TransactionImport); - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - let mut this = self.project(); - - if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { - return Poll::Ready((this.tx_hash.clone(), import_result)); - } - - Poll::Pending - } -} - // Lock must always be taken in order declared here. -pub struct Protocol { +pub struct Protocol { /// Interval at which we call `tick`. tick_timeout: Pin + Send>>, - /// Interval at which we call `propagate_transactions`. - propagate_timeout: Pin + Send>>, /// Pending list of messages to return from `poll` as a priority. pending_messages: VecDeque>, - /// Pending transactions verification tasks. - pending_transactions: FuturesUnordered>, - /// As multiple peers can send us the same transaction, we group - /// these peers using the transaction hash while the transaction is - /// imported. This prevents that we import the same transaction - /// multiple times concurrently. - pending_transactions_peers: HashMap>, config: ProtocolConfig, genesis_hash: B::Hash, sync: ChainSync, - context_data: ContextData, + // All connected peers + peers: HashMap>, + chain: Arc>, /// List of nodes for which we perform additional logging because they are important for the /// user. important_peers: HashSet, /// Used to report reputation changes. peerset_handle: sc_peerset::PeersetHandle, - transaction_pool: Arc>, /// Handles opening the unique substream and sending and receiving raw messages. - behaviour: GenericProto, - /// For each legacy gossiping engine ID, the corresponding new protocol name. - protocol_name_by_engine: HashMap>, - /// For each protocol name, the legacy equivalent. - legacy_equiv_by_name: HashMap, Fallback>, - /// Name of the protocol used for transactions. - transactions_protocol: Cow<'static, str>, - /// Name of the protocol used for block announces. - block_announces_protocol: Cow<'static, str>, + behaviour: Notifications, + /// List of notifications protocols that have been registered. + notification_protocols: Vec>, + /// If we receive a new "substream open" event that contains an invalid handshake, we ask the + /// inner layer to force-close the substream. Force-closing the substream will generate a + /// "substream closed" event. This is a problem: since we can't propagate the "substream open" + /// event to the outer layers, we also shouldn't propagate this "substream closed" event. To + /// solve this, an entry is added to this map whenever an invalid handshake is received. + /// Entries are removed when the corresponding "substream closed" is later received. + bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, /// Prometheus metrics. metrics: Option, /// The `PeerId`'s of all boot nodes. - boot_node_ids: Arc>, + boot_node_ids: HashSet, + /// A cache for the data that was associated to a block announcement. + block_announce_data_cache: lru::LruCache>, } -#[derive(Default)] -struct PacketStats { - bytes_in: u64, - bytes_out: u64, - count_in: u64, - count_out: u64, +#[derive(Debug)] +enum PeerRequest { + Block(message::BlockRequest), + State, + WarpProof, } + /// Peer information -#[derive(Debug, Clone)] -struct Peer { +#[derive(Debug)] +struct Peer { info: PeerInfo, - /// Current block request, if any. - block_request: Option<(Instant, message::BlockRequest)>, - /// Requests we are no longer interested in. - obsolete_requests: HashMap, - /// Holds a set of transactions known to this peer. - known_transactions: LruHashSet, + /// Current request, if any. Started by emitting [`CustomMessageOutcome::BlockRequest`]. + request: Option<(PeerRequest, oneshot::Receiver, RequestFailure>>)>, /// Holds a set of blocks known to this peer. known_blocks: LruHashSet, - /// Request counter, - next_request_id: message::RequestId, } /// Info about a peer's known state. @@ -279,14 +221,6 @@ pub struct PeerInfo { pub best_number: ::Number, } -/// Data necessary to create a context. -struct ContextData { - // All connected peers - peers: HashMap>, - stats: HashMap<&'static str, PacketStats>, - pub chain: Arc>, -} - /// Configuration for the Substrate-specific part of the networking layer. #[derive(Clone)] pub struct ProtocolConfig { @@ -294,14 +228,28 @@ pub struct ProtocolConfig { pub roles: Roles, /// Maximum number of peers to ask the same blocks in parallel. pub max_parallel_downloads: u32, + /// Enable state sync. + pub sync_mode: config::SyncMode, +} + +impl ProtocolConfig { + fn sync_mode(&self) -> sync::SyncMode { + if self.roles.is_light() { + sync::SyncMode::Light + } else { + match self.sync_mode { + config::SyncMode::Full => sync::SyncMode::Full, + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, + config::SyncMode::Warp => sync::SyncMode::Warp, + } + } + } } impl Default for ProtocolConfig { fn default() -> ProtocolConfig { - ProtocolConfig { - roles: Roles::FULL, - max_parallel_downloads: 5, - } + Self { roles: Roles::FULL, max_parallel_downloads: 5, sync_mode: config::SyncMode::Full } } } @@ -319,89 +267,112 @@ struct BlockAnnouncesHandshake { } impl BlockAnnouncesHandshake { - fn build(protocol_config: &ProtocolConfig, chain: &Arc>) -> Self { - let info = chain.info(); - BlockAnnouncesHandshake { - genesis_hash: info.genesis_hash, - roles: protocol_config.roles, - best_number: info.best_number, - best_hash: info.best_hash, - } + fn build( + protocol_config: &ProtocolConfig, + best_number: NumberFor, + best_hash: B::Hash, + genesis_hash: B::Hash, + ) -> Self { + Self { genesis_hash, roles: protocol_config.roles, best_number, best_hash } } } -/// Builds a SCALE-encoded "Status" message to send as handshake for the legacy protocol. -fn build_status_message(protocol_config: &ProtocolConfig, chain: &Arc>) -> Vec { - let info = chain.info(); - let status = message::generic::Status { - version: CURRENT_VERSION, - min_supported_version: MIN_VERSION, - genesis_hash: info.genesis_hash, - roles: protocol_config.roles.into(), - best_number: info.best_number, - best_hash: info.best_hash, - chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible - }; - - Message::::Status(status).encode() -} - -/// Fallback mechanism to use to send a notification if no substream is open. -#[derive(Debug, Clone, PartialEq, Eq)] -enum Fallback { - /// Use a `Message::Consensus` with the given engine ID. - Consensus(ConsensusEngineId), - /// The message is the bytes encoding of a `Transactions` (which is itself defined as a `Vec`). - Transactions, - /// The message is the bytes encoding of a `BlockAnnounce`. - BlockAnnounce, -} - -impl Protocol { +impl Protocol { /// Create a new instance. pub fn new( config: ProtocolConfig, - local_peer_id: PeerId, chain: Arc>, - transaction_pool: Arc>, - finality_proof_request_builder: Option>, protocol_id: ProtocolId, - peerset_config: sc_peerset::PeersetConfig, + network_config: &config::NetworkConfiguration, + notifications_protocols_handshakes: Vec>, block_announce_validator: Box + Send>, metrics_registry: Option<&Registry>, - boot_node_ids: Arc>, - ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { + warp_sync_provider: Option>>, + ) -> error::Result<(Protocol, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); let sync = ChainSync::new( - config.roles, + config.sync_mode(), chain.clone(), - &info, - finality_proof_request_builder, block_announce_validator, config.max_parallel_downloads, - ); + warp_sync_provider, + ) + .map_err(Box::new)?; + + let boot_node_ids = { + let mut list = HashSet::new(); + for node in &network_config.boot_nodes { + list.insert(node.peer_id); + } + list.shrink_to_fit(); + list + }; let important_peers = { let mut imp_p = HashSet::new(); - for reserved in peerset_config.priority_groups.iter().flat_map(|(_, l)| l.iter()) { - imp_p.insert(reserved.clone()); + for reserved in &network_config.default_peers_set.reserved_nodes { + imp_p.insert(reserved.peer_id); + } + for reserved in network_config + .extra_sets + .iter() + .flat_map(|s| s.set_config.reserved_nodes.iter()) + { + imp_p.insert(reserved.peer_id); } imp_p.shrink_to_fit(); imp_p }; - let (peerset, peerset_handle) = sc_peerset::Peerset::from_config(peerset_config); + let mut known_addresses = Vec::new(); - let mut legacy_equiv_by_name = HashMap::new(); + let (peerset, peerset_handle) = { + let mut sets = + Vec::with_capacity(NUM_HARDCODED_PEERSETS + network_config.extra_sets.len()); - let transactions_protocol: Cow<'static, str> = Cow::from({ - let mut proto = String::new(); - proto.push_str("/"); - proto.push_str(protocol_id.as_ref()); - proto.push_str("/transactions/1"); - proto - }); - legacy_equiv_by_name.insert(transactions_protocol.clone(), Fallback::Transactions); + let mut default_sets_reserved = HashSet::new(); + for reserved in network_config.default_peers_set.reserved_nodes.iter() { + default_sets_reserved.insert(reserved.peer_id); + known_addresses.push((reserved.peer_id, reserved.multiaddr.clone())); + } + + let mut bootnodes = Vec::with_capacity(network_config.boot_nodes.len()); + for bootnode in network_config.boot_nodes.iter() { + bootnodes.push(bootnode.peer_id); + known_addresses.push((bootnode.peer_id, bootnode.multiaddr.clone())); + } + + // Set number 0 is used for block announces. + sets.push(sc_peerset::SetConfig { + in_peers: network_config.default_peers_set.in_peers, + out_peers: network_config.default_peers_set.out_peers, + bootnodes, + reserved_nodes: default_sets_reserved.clone(), + reserved_only: network_config.default_peers_set.non_reserved_mode == + config::NonReservedPeerMode::Deny, + }); + + for set_cfg in &network_config.extra_sets { + let mut reserved_nodes = HashSet::new(); + for reserved in set_cfg.set_config.reserved_nodes.iter() { + reserved_nodes.insert(reserved.peer_id); + known_addresses.push((reserved.peer_id, reserved.multiaddr.clone())); + } + + let reserved_only = + set_cfg.set_config.non_reserved_mode == config::NonReservedPeerMode::Deny; + + sets.push(sc_peerset::SetConfig { + in_peers: set_cfg.set_config.in_peers, + out_peers: set_cfg.set_config.out_peers, + bootnodes: Vec::new(), + reserved_nodes, + reserved_only, + }); + } + + sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) + }; let block_announces_protocol: Cow<'static, str> = Cow::from({ let mut proto = String::new(); @@ -410,55 +381,70 @@ impl Protocol { proto.push_str("/block-announces/1"); proto }); - legacy_equiv_by_name.insert(block_announces_protocol.clone(), Fallback::BlockAnnounce); let behaviour = { - let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); - let block_announces_handshake = BlockAnnouncesHandshake::build(&config, &chain).encode(); - GenericProto::new( - local_peer_id, - protocol_id.clone(), - versions, - build_status_message(&config, &chain), + let best_number = info.best_number; + let best_hash = info.best_hash; + let genesis_hash = info.genesis_hash; + + let block_announces_handshake = + BlockAnnouncesHandshake::::build(&config, best_number, best_hash, genesis_hash) + .encode(); + + let sync_protocol_config = notifications::ProtocolConfig { + name: block_announces_protocol, + fallback_names: Vec::new(), + handshake: block_announces_handshake, + max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, + }; + + Notifications::new( peerset, - // As documented in `GenericProto`, the first protocol in the list is always the - // one carrying the handshake reported in the `CustomProtocolOpen` event. - iter::once((block_announces_protocol.clone(), block_announces_handshake)) - .chain(iter::once((transactions_protocol.clone(), vec![]))), + iter::once(sync_protocol_config).chain( + network_config.extra_sets.iter().zip(notifications_protocols_handshakes).map( + |(s, hs)| notifications::ProtocolConfig { + name: s.notifications_protocol.clone(), + fallback_names: s.fallback_names.clone(), + handshake: hs, + max_notification_size: s.max_notification_size, + }, + ), + ), ) }; - let protocol = Protocol { + let block_announce_data_cache = lru::LruCache::new( + network_config.default_peers_set.in_peers as usize + + network_config.default_peers_set.out_peers as usize, + ); + + let protocol = Self { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), - propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), pending_messages: VecDeque::new(), - pending_transactions: FuturesUnordered::new(), - pending_transactions_peers: HashMap::new(), config, - context_data: ContextData { - peers: HashMap::new(), - stats: HashMap::new(), - chain, - }, + peers: HashMap::new(), + chain, genesis_hash: info.genesis_hash, sync, important_peers, - transaction_pool, peerset_handle: peerset_handle.clone(), behaviour, - protocol_name_by_engine: HashMap::new(), - legacy_equiv_by_name, - transactions_protocol, - block_announces_protocol, + notification_protocols: network_config + .extra_sets + .iter() + .map(|s| s.notifications_protocol.clone()) + .collect(), + bad_handshake_substreams: Default::default(), metrics: if let Some(r) = metrics_registry { Some(Metrics::register(r)?) } else { None }, boot_node_ids, + block_announce_data_cache, }; - Ok((protocol, peerset_handle)) + Ok((protocol, peerset_handle, known_addresses)) } /// Returns the list of all the peers we have an open channel to. @@ -466,14 +452,10 @@ impl Protocol { self.behaviour.open_peers() } - /// Returns true if we have a channel open with this node. - pub fn is_open(&self, peer_id: &PeerId) -> bool { - self.behaviour.is_open(peer_id) - } - - /// Returns the list of all the peers that the peerset currently requests us to be connected to. + /// Returns the list of all the peers that the peerset currently requests us to be connected + /// to on the default set. pub fn requested_peers(&self) -> impl Iterator { - self.behaviour.requested_peers() + self.behaviour.requested_peers(HARDCODED_PEERSETS_SYNC) } /// Returns the number of discovered nodes that we keep in memory. @@ -482,13 +464,16 @@ impl Protocol { } /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId) { - self.behaviour.disconnect_peer(peer_id) - } - - /// Returns true if we try to open protocols with the given peer. - pub fn is_enabled(&self, peer_id: &PeerId) -> bool { - self.behaviour.is_enabled(peer_id) + pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: &str) { + if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) + { + self.behaviour.disconnect_peer( + peer_id, + sc_peerset::SetId::from(position + NUM_HARDCODED_PEERSETS), + ); + } else { + warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") + } } /// Returns the state of the peerset manager, for debugging purposes. @@ -498,21 +483,17 @@ impl Protocol { /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { - self.context_data.peers.values().count() + self.peers.values().count() } /// Returns the number of peers we're connected to and that are being queried. pub fn num_active_peers(&self) -> usize { - self.context_data - .peers - .values() - .filter(|p| p.block_request.is_some()) - .count() + self.peers.values().filter(|p| p.request.is_some()).count() } /// Current global sync state. - pub fn sync_state(&self) -> SyncState { - self.sync.status().state + pub fn sync_state(&self) -> SyncStatus { + self.sync.status() } /// Target sync block number. @@ -540,27 +521,22 @@ impl Protocol { self.sync.num_sync_requests() } - /// Sync local state with the blockchain state. - pub fn update_chain(&mut self) { - let info = self.context_data.chain.info(); - self.sync.update_chain_info(&info.best_hash, info.best_number); - self.behaviour.set_legacy_handshake_message( - build_status_message(&self.config, &self.context_data.chain), - ); - self.behaviour.set_notif_protocol_handshake( - &self.block_announces_protocol, - BlockAnnouncesHandshake::build(&self.config, &self.context_data.chain).encode() - ); - } + /// Inform sync about new best imported block. + pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { + debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); - /// Inform sync about an own imported block. - pub fn own_block_imported(&mut self, hash: B::Hash, number: NumberFor) { self.sync.update_chain_info(&hash, number); + + self.behaviour.set_notif_protocol_handshake( + HARDCODED_PEERSETS_SYNC, + BlockAnnouncesHandshake::::build(&self.config, number, hash, self.genesis_hash) + .encode(), + ); } fn update_peer_info(&mut self, who: &PeerId) { if let Some(info) = self.sync.peer_info(who) { - if let Some(ref mut peer) = self.context_data.peers.get_mut(who) { + if let Some(ref mut peer) = self.peers.get_mut(who) { peer.info.best_hash = info.best_hash; peer.info.best_number = info.best_number; } @@ -569,126 +545,37 @@ impl Protocol { /// Returns information about all the peers we are connected to after the handshake message. pub fn peers_info(&self) -> impl Iterator)> { - self.context_data.peers.iter().map(|(id, peer)| (id, &peer.info)) + self.peers.iter().map(|(id, peer)| (id, &peer.info)) } - fn on_custom_message( + fn prepare_block_request( &mut self, who: PeerId, - data: BytesMut, + request: message::BlockRequest, ) -> CustomMessageOutcome { - let message = match as Decode>::decode(&mut &data[..]) { - Ok(message) => message, - Err(err) => { - debug!( - target: "sync", - "Couldn't decode packet sent by {}: {:?}: {}", - who, - data, - err.what(), - ); - self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); - return CustomMessageOutcome::None; - } - }; - - let mut stats = self.context_data.stats.entry(message.id()).or_default(); - stats.bytes_in += data.len() as u64; - stats.count_in += 1; - - match message { - GenericMessage::Status(_) => - debug!(target: "sub-libp2p", "Received unexpected Status"), - GenericMessage::BlockAnnounce(announce) => - self.push_block_announce_validation(who.clone(), announce), - GenericMessage::Transactions(m) => - self.on_transactions(who, m), - GenericMessage::BlockResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected BlockResponse"), - GenericMessage::RemoteCallResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected RemoteCallResponse"), - GenericMessage::RemoteReadResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected RemoteReadResponse"), - GenericMessage::RemoteHeaderResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected RemoteHeaderResponse"), - GenericMessage::RemoteChangesResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected RemoteChangesResponse"), - GenericMessage::FinalityProofResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected FinalityProofResponse"), - GenericMessage::BlockRequest(_) | - GenericMessage::FinalityProofRequest(_) | - GenericMessage::RemoteReadChildRequest(_) | - GenericMessage::RemoteCallRequest(_) | - GenericMessage::RemoteReadRequest(_) | - GenericMessage::RemoteHeaderRequest(_) | - GenericMessage::RemoteChangesRequest(_) => { - debug!( - target: "sub-libp2p", - "Received no longer supported legacy request from {:?}", - who - ); - self.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::BAD_PROTOCOL); - }, - GenericMessage::Consensus(msg) => - return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { - CustomMessageOutcome::NotificationsReceived { - remote: who, - messages: vec![(msg.engine_id, From::from(msg.data))], - } - } else { - debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); - CustomMessageOutcome::None - }, - GenericMessage::ConsensusBatch(messages) => { - let messages = messages - .into_iter() - .filter_map(|msg| { - if self.protocol_name_by_engine.contains_key(&msg.engine_id) { - Some((msg.engine_id, From::from(msg.data))) - } else { - debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); - None - } - }) - .collect::>(); - - return if !messages.is_empty() { - CustomMessageOutcome::NotificationsReceived { - remote: who, - messages, - } - } else { - CustomMessageOutcome::None - }; - }, - } - - CustomMessageOutcome::None - } - - fn update_peer_request(&mut self, who: &PeerId, request: &mut message::BlockRequest) { - update_peer_request::(&mut self.context_data.peers, who, request) + prepare_block_request::(&mut self.peers, who, request) } - /// Called by peer when it is disconnecting - pub fn on_peer_disconnected(&mut self, peer: PeerId) -> CustomMessageOutcome { + /// Called by peer when it is disconnecting. + /// + /// Returns a result if the handshake of this peer was indeed accepted. + pub fn on_sync_peer_disconnected(&mut self, peer: PeerId) -> Result<(), ()> { if self.important_peers.contains(&peer) { warn!(target: "sync", "Reserved peer {} disconnected", peer); } else { - trace!(target: "sync", "{} disconnected", peer); + debug!(target: "sync", "{} disconnected", peer); } - if let Some(_peer_data) = self.context_data.peers.remove(&peer) { - self.sync.peer_disconnected(&peer); - - // Notify all the notification protocols as closed. - CustomMessageOutcome::NotificationStreamClosed { - remote: peer, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), + if let Some(_peer_data) = self.peers.remove(&peer) { + if let Some(sync::OnBlockData::Import(origin, blocks)) = + self.sync.peer_disconnected(&peer) + { + self.pending_messages + .push_back(CustomMessageOutcome::BlockImport(origin, blocks)); } + Ok(()) } else { - CustomMessageOutcome::None + Err(()) } } @@ -701,163 +588,192 @@ impl Protocol { /// Must contain the same `PeerId` and request that have been emitted. pub fn on_block_response( &mut self, - peer: PeerId, - response: message::BlockResponse, + peer_id: PeerId, + request: message::BlockRequest, + response: crate::schema::v1::BlockResponse, ) -> CustomMessageOutcome { - let request = if let Some(ref mut p) = self.context_data.peers.get_mut(&peer) { - if p.obsolete_requests.remove(&response.id).is_some() { - trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", peer, response.id); - return CustomMessageOutcome::None; - } - // Clear the request. If the response is invalid peer will be disconnected anyway. - match p.block_request.take() { - Some((_, request)) if request.id == response.id => request, - Some(_) => { - trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", peer, response.id); - return CustomMessageOutcome::None; - } - None => { - trace!(target: "sync", "Unexpected response packet from unknown peer {}", peer); - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::UNEXPECTED_RESPONSE); - return CustomMessageOutcome::None; - } - } - } else { - trace!(target: "sync", "Unexpected response packet from unknown peer {}", peer); - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::UNEXPECTED_RESPONSE); - return CustomMessageOutcome::None; + let blocks = response + .blocks + .into_iter() + .map(|block_data| { + Ok(message::BlockData:: { + hash: Decode::decode(&mut block_data.hash.as_ref())?, + header: if !block_data.header.is_empty() { + Some(Decode::decode(&mut block_data.header.as_ref())?) + } else { + None + }, + body: if request.fields.contains(message::BlockAttributes::BODY) { + Some( + block_data + .body + .iter() + .map(|body| Decode::decode(&mut body.as_ref())) + .collect::, _>>()?, + ) + } else { + None + }, + indexed_body: if request.fields.contains(message::BlockAttributes::INDEXED_BODY) + { + Some(block_data.indexed_body) + } else { + None + }, + receipt: if !block_data.receipt.is_empty() { + Some(block_data.receipt) + } else { + None + }, + message_queue: if !block_data.message_queue.is_empty() { + Some(block_data.message_queue) + } else { + None + }, + justification: if !block_data.justification.is_empty() { + Some(block_data.justification) + } else if block_data.is_empty_justification { + Some(Vec::new()) + } else { + None + }, + justifications: if !block_data.justifications.is_empty() { + Some(DecodeAll::decode_all(&mut block_data.justifications.as_ref())?) + } else { + None + }, + }) + }) + .collect::, codec::Error>>(); + + let blocks = match blocks { + Ok(blocks) => blocks, + Err(err) => { + debug!(target: "sync", "Failed to decode block response from {}: {}", peer_id, err); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + return CustomMessageOutcome::None + }, }; + let block_response = message::BlockResponse:: { id: request.id, blocks }; + let blocks_range = || match ( - response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), - response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), + block_response + .blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), ) { (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), (Some(first), Some(_)) => format!(" ({})", first), _ => Default::default(), }; trace!(target: "sync", "BlockResponse {} from {} with {} blocks {}", - response.id, - peer, - response.blocks.len(), + block_response.id, + peer_id, + block_response.blocks.len(), blocks_range(), ); if request.fields == message::BlockAttributes::JUSTIFICATION { - match self.sync.on_block_justification(peer, response) { + match self.sync.on_block_justification(peer_id, block_response) { Ok(sync::OnBlockJustification::Nothing) => CustomMessageOutcome::None, - Ok(sync::OnBlockJustification::Import { peer, hash, number, justification }) => - CustomMessageOutcome::JustificationImport(peer, hash, number, justification), + Ok(sync::OnBlockJustification::Import { peer, hash, number, justifications }) => + CustomMessageOutcome::JustificationImport(peer, hash, number, justifications), Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None - } + }, } } else { - // Validate fields against the request. - if request.fields.contains(message::BlockAttributes::HEADER) && response.blocks.iter().any(|b| b.header.is_none()) { - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); - trace!(target: "sync", "Missing header for a block"); - return CustomMessageOutcome::None - } - if request.fields.contains(message::BlockAttributes::BODY) && response.blocks.iter().any(|b| b.body.is_none()) { - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); - trace!(target: "sync", "Missing body for a block"); - return CustomMessageOutcome::None - } - - match self.sync.on_block_data(&peer, Some(request), response) { + match self.sync.on_block_data(&peer_id, Some(request), block_response) { Ok(sync::OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), - Ok(sync::OnBlockData::Request(peer, mut req)) => { - self.update_peer_request(&peer, &mut req); - CustomMessageOutcome::BlockRequest { - target: peer, - request: req, - } - } + Ok(sync::OnBlockData::Request(peer, req)) => self.prepare_block_request(peer, req), Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None - } + }, } } } - /// Must be called in response to a [`CustomMessageOutcome::BlockRequest`] if it has failed. - pub fn on_block_request_failed( + /// Must be called in response to a [`CustomMessageOutcome::StateRequest`] being emitted. + /// Must contain the same `PeerId` and request that have been emitted. + pub fn on_state_response( &mut self, - peer: &PeerId, - ) { - self.peerset_handle.report_peer(peer.clone(), rep::TIMEOUT); - self.behaviour.disconnect_peer(peer); + peer_id: PeerId, + response: StateResponse, + ) -> CustomMessageOutcome { + match self.sync.on_state_data(&peer_id, response) { + Ok(sync::OnStateData::Import(origin, block)) => + CustomMessageOutcome::BlockImport(origin, vec![block]), + Ok(sync::OnStateData::Request(peer, req)) => + prepare_state_request::(&mut self.peers, peer, req), + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + }, + } + } + + /// Must be called in response to a [`CustomMessageOutcome::WarpSyncRequest`] being emitted. + /// Must contain the same `PeerId` and request that have been emitted. + pub fn on_warp_sync_response( + &mut self, + peer_id: PeerId, + response: crate::warp_request_handler::EncodedProof, + ) -> CustomMessageOutcome { + match self.sync.on_warp_sync_data(&peer_id, response) { + Ok(sync::OnWarpSyncData::WarpProofRequest(peer, req)) => + prepare_warp_sync_request::(&mut self.peers, peer, req), + Ok(sync::OnWarpSyncData::StateRequest(peer, req)) => + prepare_state_request::(&mut self.peers, peer, req), + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + }, + } } /// Perform time based maintenance. /// /// > **Note**: This method normally doesn't have to be called except for testing purposes. pub fn tick(&mut self) { - self.maintain_peers(); self.report_metrics() } - fn maintain_peers(&mut self) { - let tick = Instant::now(); - let mut aborting = Vec::new(); - { - for (who, peer) in self.context_data.peers.iter() { - if peer.block_request.as_ref().map_or(false, |(t, _)| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { - log!( - target: "sync", - if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, - "Request timeout {}", who - ); - aborting.push(who.clone()); - } else if peer.obsolete_requests.values().any(|t| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { - log!( - target: "sync", - if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, - "Obsolete timeout {}", who - ); - aborting.push(who.clone()); - } - } - } - - for p in aborting { - self.behaviour.disconnect_peer(&p); - self.peerset_handle.report_peer(p, rep::TIMEOUT); - } - } - - /// Called on the first connection between two peers, after their exchange of handshake. - fn on_peer_connected( + /// Called on the first connection between two peers on the default set, after their exchange + /// of handshake. + /// + /// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync + /// from. + fn on_sync_peer_connected( &mut self, who: PeerId, status: BlockAnnouncesHandshake, - notifications_sink: NotificationsSink, - ) -> CustomMessageOutcome { + ) -> Result<(), ()> { trace!(target: "sync", "New peer {} {:?}", who, status); - if self.context_data.peers.contains_key(&who) { - debug!(target: "sync", "Ignoring duplicate status packet from {}", who); - return CustomMessageOutcome::None; + if self.peers.contains_key(&who) { + error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who); + debug_assert!(false); + return Err(()) } + if status.genesis_hash != self.genesis_hash { log!( target: "sync", - if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, + if self.important_peers.contains(&who) { Level::Warn } else { Level::Debug }, "Peer is on different chain (our genesis: {} theirs: {})", self.genesis_hash, status.genesis_hash ); - self.peerset_handle.report_peer(who.clone(), rep::GENESIS_MISMATCH); - self.behaviour.disconnect_peer(&who); + self.peerset_handle.report_peer(who, rep::GENESIS_MISMATCH); + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); if self.boot_node_ids.contains(&who) { error!( @@ -869,33 +785,29 @@ impl Protocol { ); } - return CustomMessageOutcome::None; + return Err(()) } if self.config.roles.is_light() { // we're not interested in light peers if status.roles.is_light() { debug!(target: "sync", "Peer {} is unable to serve light requests", who); - self.peerset_handle.report_peer(who.clone(), rep::BAD_ROLE); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; + self.peerset_handle.report_peer(who, rep::BAD_ROLE); + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); + return Err(()) } // we don't interested in peers that are far behind us - let self_best_block = self - .context_data - .chain - .info() - .best_number; + let self_best_block = self.chain.info().best_number; let blocks_difference = self_best_block .checked_sub(&status.best_number) .unwrap_or_else(Zero::zero) .saturated_into::(); if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); - self.peerset_handle.report_peer(who.clone(), rep::PEER_BEHIND_US_LIGHT); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; + self.peerset_handle.report_peer(who, rep::PEER_BEHIND_US_LIGHT); + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); + return Err(()) } } @@ -903,246 +815,74 @@ impl Protocol { info: PeerInfo { roles: status.roles, best_hash: status.best_hash, - best_number: status.best_number + best_number: status.best_number, }, - block_request: None, - known_transactions: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS) - .expect("Constant is nonzero")), - known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) - .expect("Constant is nonzero")), - next_request_id: 0, - obsolete_requests: HashMap::new(), + request: None, + known_blocks: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"), + ), }; - self.context_data.peers.insert(who.clone(), peer); - debug!(target: "sync", "Connected {}", who); - - let info = self.context_data.peers.get(&who).expect("We just inserted above; QED").info.clone(); - self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who.clone(), status.best_number)); - if info.roles.is_full() { - match self.sync.new_peer(who.clone(), info.best_hash, info.best_number) { - Ok(None) => (), - Ok(Some(mut req)) => { - self.update_peer_request(&who, &mut req); - self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { - target: who.clone(), - request: req, - }); - }, + let req = if peer.info.roles.is_full() { + match self.sync.new_peer(who, peer.info.best_hash, peer.info.best_number) { + Ok(req) => req, Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu) - } + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); + self.peerset_handle.report_peer(id, repu); + return Err(()) + }, } - } - - // Notify all the notification protocols as open. - CustomMessageOutcome::NotificationStreamOpened { - remote: who, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), - roles: info.roles, - notifications_sink, - } - } - - /// Registers a new notifications protocol. - /// - /// While registering a protocol while we already have open connections is discouraged, we - /// nonetheless handle it by notifying that we opened channels with everyone. This function - /// returns a list of substreams to open as a result. - pub fn register_notifications_protocol<'a>( - &'a mut self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, - handshake_message: Vec, - ) -> impl Iterator + 'a { - let protocol_name = protocol_name.into(); - if self.protocol_name_by_engine.insert(engine_id, protocol_name.clone()).is_some() { - error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol_name); } else { - self.behaviour.register_notif_protocol(protocol_name.clone(), handshake_message); - self.legacy_equiv_by_name.insert(protocol_name, Fallback::Consensus(engine_id)); - } - - let behaviour = &self.behaviour; - self.context_data.peers.iter().filter_map(move |(peer_id, peer)| { - if let Some(notifications_sink) = behaviour.notifications_sink(peer_id) { - Some((peer_id, peer.info.roles, notifications_sink)) - } else { - log::error!("State mismatch: no notifications sink for opened peer {:?}", peer_id); - None - } - }) - } - - /// Called when peer sends us new transactions - fn on_transactions( - &mut self, - who: PeerId, - transactions: message::Transactions, - ) { - // sending transaction to light node is considered a bad behavior - if !self.config.roles.is_full() { - trace!(target: "sync", "Peer {} is trying to send transactions to the light node", who); - self.behaviour.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); - return; - } - - // Accept transactions only when fully synced - if self.sync.status().state != SyncState::Idle { - trace!(target: "sync", "{} Ignoring transactions while syncing", who); - return; - } - - trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - for t in transactions { - if self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS { - debug!( - target: "sync", - "Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit", - MAX_PENDING_TRANSACTIONS, - ); - break; - } - - let hash = self.transaction_pool.hash_of(&t); - peer.known_transactions.insert(hash.clone()); - - self.peerset_handle.report_peer(who.clone(), rep::ANY_TRANSACTION); - - match self.pending_transactions_peers.entry(hash.clone()) { - Entry::Vacant(entry) => { - self.pending_transactions.push(PendingTransaction { - validation: self.transaction_pool.import(t), - tx_hash: hash, - }); - entry.insert(vec![who.clone()]); - }, - Entry::Occupied(mut entry) => { - entry.get_mut().push(who.clone()); - } - } - } - } - } - - fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { - match import { - TransactionImport::KnownGood => self.peerset_handle.report_peer(who, rep::ANY_TRANSACTION_REFUND), - TransactionImport::NewGood => self.peerset_handle.report_peer(who, rep::GOOD_TRANSACTION), - TransactionImport::Bad => self.peerset_handle.report_peer(who, rep::BAD_TRANSACTION), - TransactionImport::None => {}, - } - } + None + }; - /// Propagate one transaction. - pub fn propagate_transaction( - &mut self, - hash: &H, - ) { - debug!(target: "sync", "Propagating transaction [{:?}]", hash); - // Accept transactions only when fully synced - if self.sync.status().state != SyncState::Idle { - return; - } - if let Some(transaction) = self.transaction_pool.transaction(hash) { - let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); - self.transaction_pool.on_broadcasted(propagated_to); - } - } + debug!(target: "sync", "Connected {}", who); - fn do_propagate_transactions( - &mut self, - transactions: &[(H, B::Extrinsic)], - ) -> HashMap> { - let mut propagated_to = HashMap::<_, Vec<_>>::new(); - let mut propagated_transactions = 0; - - for (who, peer) in self.context_data.peers.iter_mut() { - // never send transactions to the light node - if !peer.info.roles.is_full() { - continue; - } + self.peers.insert(who, peer); + self.pending_messages + .push_back(CustomMessageOutcome::PeerNewBest(who, status.best_number)); - let (hashes, to_send): (Vec<_>, Vec<_>) = transactions - .iter() - .filter(|&(ref hash, _)| peer.known_transactions.insert(hash.clone())) - .cloned() - .unzip(); - - propagated_transactions += hashes.len(); - - if !to_send.is_empty() { - for hash in hashes { - propagated_to - .entry(hash) - .or_default() - .push(who.to_base58()); - } - trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - self.behaviour.write_notification( - who, - self.transactions_protocol.clone(), - to_send.encode() - ); - } - } - - if let Some(ref metrics) = self.metrics { - metrics.propagated_transactions.inc_by(propagated_transactions as _) + if let Some(req) = req { + let event = self.prepare_block_request(who, req); + self.pending_messages.push_back(event); } - propagated_to - } - - /// Call when we must propagate ready transactions to peers. - pub fn propagate_transactions(&mut self) { - debug!(target: "sync", "Propagating transactions"); - // Accept transactions only when fully synced - if self.sync.status().state != SyncState::Idle { - return; - } - let transactions = self.transaction_pool.transactions(); - let propagated_to = self.do_propagate_transactions(&transactions); - self.transaction_pool.on_broadcasted(propagated_to); + Ok(()) } /// Make sure an important block is propagated to peers. /// /// In chain-based consensus, we often need to make sure non-best forks are /// at least temporarily synced. - pub fn announce_block(&mut self, hash: B::Hash, data: Vec) { - let header = match self.context_data.chain.header(BlockId::Hash(hash)) { + pub fn announce_block(&mut self, hash: B::Hash, data: Option>) { + let header = match self.chain.header(BlockId::Hash(hash)) { Ok(Some(header)) => header, Ok(None) => { warn!("Trying to announce unknown block: {}", hash); - return; - } + return + }, Err(e) => { warn!("Error reading block header {}: {:?}", hash, e); - return; - } + return + }, }; // don't announce genesis block since it will be ignored if header.number().is_zero() { - return; + return } - let is_best = self.context_data.chain.info().best_hash == hash; + let is_best = self.chain.info().best_hash == hash; debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); - self.send_announcement(&header, data, is_best, true) - } - fn send_announcement(&mut self, header: &B::Header, data: Vec, is_best: bool, force: bool) { - let hash = header.hash(); + let data = data + .or_else(|| self.block_announce_data_cache.get(&hash).cloned()) + .unwrap_or_default(); - for (who, ref mut peer) in self.context_data.peers.iter_mut() { - trace!(target: "sync", "Announcing block {:?} to {}", hash, who); + for (who, ref mut peer) in self.peers.iter_mut() { let inserted = peer.known_blocks.insert(hash); - if inserted || force { + if inserted { + trace!(target: "sync", "Announcing block {:?} to {}", hash, who); let message = message::BlockAnnounce { header: header.clone(), state: if is_best { @@ -1153,11 +893,8 @@ impl Protocol { data: Some(data.clone()), }; - self.behaviour.write_notification( - who, - self.block_announces_protocol.clone(), - message.encode() - ); + self.behaviour + .write_notification(who, HARDCODED_PEERSETS_SYNC, message.encode()); } } } @@ -1175,23 +912,28 @@ impl Protocol { /// in the task before being polled once. So, it is required to call /// [`ChainSync::poll_block_announce_validation`] to ensure that the future is /// registered properly and will wake up the task when being ready. - fn push_block_announce_validation( - &mut self, - who: PeerId, - announce: BlockAnnounce, - ) { + fn push_block_announce_validation(&mut self, who: PeerId, announce: BlockAnnounce) { let hash = announce.header.hash(); - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - peer.known_blocks.insert(hash.clone()); - } + let peer = match self.peers.get_mut(&who) { + Some(p) => p, + None => { + log::error!(target: "sync", "Received block announce from disconnected peer {}", who); + debug_assert!(false); + return + }, + }; + + peer.known_blocks.insert(hash.clone()); let is_best = match announce.state.unwrap_or(message::BlockState::Best) { message::BlockState::Best => true, message::BlockState::Normal => false, }; - self.sync.push_block_announce_validation(who, hash, announce, is_best); + if peer.info.roles.is_full() { + self.sync.push_block_announce_validation(who, hash, announce, is_best); + } } /// Process the result of the block announce validation. @@ -1200,9 +942,16 @@ impl Protocol { validation_result: sync::PollBlockAnnounceValidation, ) -> CustomMessageOutcome { let (header, is_best, who) = match validation_result { - sync::PollBlockAnnounceValidation::Nothing { is_best, who, header } => { + sync::PollBlockAnnounceValidation::Skip => return CustomMessageOutcome::None, + sync::PollBlockAnnounceValidation::Nothing { is_best, who, announce } => { self.update_peer_info(&who); + if let Some(data) = announce.data { + if !data.is_empty() { + self.block_announce_data_cache.put(announce.header.hash(), data); + } + } + // `on_block_announce` returns `OnBlockAnnounce::ImportHeader` // when we have all data required to import the block // in the BlockAnnounce message. This is only when: @@ -1210,19 +959,30 @@ impl Protocol { // AND // 2) parent block is already imported and not pruned. if is_best { - return CustomMessageOutcome::PeerNewBest(who, *header.number()) + return CustomMessageOutcome::PeerNewBest(who, *announce.header.number()) } else { return CustomMessageOutcome::None } - } - sync::PollBlockAnnounceValidation::ImportHeader { header, is_best, who } => { + }, + sync::PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => { self.update_peer_info(&who); - (header, is_best, who) - } - sync::PollBlockAnnounceValidation::Failure { who } => { + + if let Some(data) = announce.data { + if !data.is_empty() { + self.block_announce_data_cache.put(announce.header.hash(), data); + } + } + + (announce.header, is_best, who) + }, + sync::PollBlockAnnounceValidation::Failure { who, disconnect } => { + if disconnect { + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); + } + self.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); return CustomMessageOutcome::None - } + }, }; let number = *header.number(); @@ -1234,41 +994,32 @@ impl Protocol { None, message::generic::BlockResponse { id: 0, - blocks: vec![ - message::generic::BlockData { - hash: header.hash(), - header: Some(header), - body: None, - receipt: None, - message_queue: None, - justification: None, - }, - ], + blocks: vec![message::generic::BlockData { + hash: header.hash(), + header: Some(header), + body: None, + indexed_body: None, + receipt: None, + message_queue: None, + justification: None, + justifications: None, + }], }, ); if is_best { - self.pending_messages.push_back( - CustomMessageOutcome::PeerNewBest(who, number), - ); + self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number)); } match blocks_to_import { - Ok(sync::OnBlockData::Import(origin, blocks)) => { - CustomMessageOutcome::BlockImport(origin, blocks) - }, - Ok(sync::OnBlockData::Request(peer, mut req)) => { - self.update_peer_request(&peer, &mut req); - CustomMessageOutcome::BlockRequest { - target: peer, - request: req, - } - } + Ok(sync::OnBlockData::Import(origin, blocks)) => + CustomMessageOutcome::BlockImport(origin, blocks), + Ok(sync::OnBlockData::Request(peer, req)) => self.prepare_block_request(peer, req), Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None - } + }, } } @@ -1286,10 +1037,20 @@ impl Protocol { self.sync.request_justification(&hash, number) } + /// Clear all pending justification requests. + pub fn clear_justification_requests(&mut self) { + self.sync.clear_justification_requests(); + } + /// Request syncing for the given block from given set of peers. /// Uses `protocol` to queue a new block download request and tries to dispatch all pending /// requests. - pub fn set_sync_fork_request(&mut self, peers: Vec, hash: &B::Hash, number: NumberFor) { + pub fn set_sync_fork_request( + &mut self, + peers: Vec, + hash: &B::Hash, + number: NumberFor, + ) { self.sync.set_sync_fork_request(peers, hash, number) } @@ -1300,118 +1061,141 @@ impl Protocol { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { - let new_best = results.iter().rev().find_map(|r| match r { - (Ok(BlockImportResult::ImportedUnknown(n, aux, _)), hash) if aux.is_new_best => Some((*n, hash.clone())), - _ => None, - }); - if let Some((best_num, best_hash)) = new_best { - self.sync.update_chain_info(&best_hash, best_num); - self.behaviour.set_legacy_handshake_message(build_status_message(&self.config, &self.context_data.chain)); - self.behaviour.set_notif_protocol_handshake( - &self.block_announces_protocol, - BlockAnnouncesHandshake::build(&self.config, &self.context_data.chain).encode() - ); - } - let results = self.sync.on_blocks_processed( - imported, - count, - results, - ); + let results = self.sync.on_blocks_processed(imported, count, results); for result in results { match result { - Ok((id, mut req)) => { - update_peer_request(&mut self.context_data.peers, &id, &mut req); - self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { - target: id, - request: req, - }); - } + Ok((id, req)) => { + self.pending_messages.push_back(prepare_block_request( + &mut self.peers, + id, + req, + )); + }, Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu) - } + }, } } } /// Call this when a justification has been processed by the import queue, with or without /// errors. - pub fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - self.sync.on_justification_import(hash, number, success) + pub fn justification_import_result( + &mut self, + who: PeerId, + hash: B::Hash, + number: NumberFor, + success: bool, + ) { + self.sync.on_justification_import(hash, number, success); + if !success { + info!("💔 Invalid justification provided by {} for #{}", who, hash); + self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); + self.peerset_handle + .report_peer(who, sc_peerset::ReputationChange::new_fatal("Invalid justification")); + } } - /// Request a finality proof for the given block. - /// - /// Queues a new finality proof request and tries to dispatch all pending requests. - pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - self.sync.request_finality_proof(&hash, number) + /// Set whether the syncing peers set is in reserved-only mode. + pub fn set_reserved_only(&self, reserved_only: bool) { + self.peerset_handle.set_reserved_only(HARDCODED_PEERSETS_SYNC, reserved_only); } - /// Notify the protocol that we have learned about the existence of nodes. - /// - /// Can be called multiple times with the same `PeerId`s. - pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { - self.behaviour.add_discovered_nodes(peer_ids) + /// Removes a `PeerId` from the list of reserved peers for syncing purposes. + pub fn remove_reserved_peer(&self, peer: PeerId) { + self.peerset_handle.remove_reserved_peer(HARDCODED_PEERSETS_SYNC, peer.clone()); } - pub fn finality_proof_import_result( - &mut self, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - self.sync.on_finality_proof_import(request_block, finalization_result) + /// Returns the list of reserved peers. + pub fn reserved_peers(&self) -> impl Iterator { + self.behaviour.reserved_peers(HARDCODED_PEERSETS_SYNC) } - /// Must be called after a [`CustomMessageOutcome::FinalityProofRequest`] has been emitted, - /// to notify of the response having arrived. - pub fn on_finality_proof_response( - &mut self, - who: PeerId, - response: message::FinalityProofResponse, - ) -> CustomMessageOutcome { - trace!(target: "sync", "Finality proof response from {} for {}", who, response.block); - match self.sync.on_block_finality_proof(who, response) { - Ok(sync::OnBlockFinalityProof::Nothing) => CustomMessageOutcome::None, - Ok(sync::OnBlockFinalityProof::Import { peer, hash, number, proof }) => - CustomMessageOutcome::FinalityProofImport(peer, hash, number, proof), - Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu); - CustomMessageOutcome::None - } + /// Adds a `PeerId` to the list of reserved peers for syncing purposes. + pub fn add_reserved_peer(&self, peer: PeerId) { + self.peerset_handle.add_reserved_peer(HARDCODED_PEERSETS_SYNC, peer.clone()); + } + + /// Sets the list of reserved peers for syncing purposes. + pub fn set_reserved_peers(&self, peers: HashSet) { + self.peerset_handle.set_reserved_peers(HARDCODED_PEERSETS_SYNC, peers.clone()); + } + + /// Removes a `PeerId` from the list of reserved peers. + pub fn remove_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.peerset_handle.remove_reserved_peer( + sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), + peer, + ); + } else { + error!( + target: "sub-libp2p", + "remove_set_reserved_peer with unknown protocol: {}", + protocol + ); } } - fn format_stats(&self) -> String { - let mut out = String::new(); - for (id, stats) in &self.context_data.stats { - let _ = writeln!( - &mut out, - "{}: In: {} bytes ({}), Out: {} bytes ({})", - id, - stats.bytes_in, - stats.count_in, - stats.bytes_out, - stats.count_out, + /// Adds a `PeerId` to the list of reserved peers. + pub fn add_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.peerset_handle + .add_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + } else { + error!( + target: "sub-libp2p", + "add_set_reserved_peer with unknown protocol: {}", + protocol ); } - out } - fn report_metrics(&self) { - use std::convert::TryInto; + /// Notify the protocol that we have learned about the existence of nodes on the default set. + /// + /// Can be called multiple times with the same `PeerId`s. + pub fn add_default_set_discovered_nodes(&mut self, peer_ids: impl Iterator) { + for peer_id in peer_ids { + self.peerset_handle.add_to_peers_set(HARDCODED_PEERSETS_SYNC, peer_id); + } + } - if let Some(metrics) = &self.metrics { - let mut obsolete_requests: u64 = 0; - for peer in self.context_data.peers.values() { - let n = peer.obsolete_requests.len().try_into().unwrap_or(std::u64::MAX); - obsolete_requests = obsolete_requests.saturating_add(n); - } - metrics.obsolete_requests.set(obsolete_requests); + /// Add a peer to a peers set. + pub fn add_to_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.peerset_handle + .add_to_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + } else { + error!( + target: "sub-libp2p", + "add_to_peers_set with unknown protocol: {}", + protocol + ); + } + } + + /// Remove a peer from a peers set. + pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.peerset_handle.remove_from_peers_set( + sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), + peer, + ); + } else { + error!( + target: "sub-libp2p", + "remove_from_peers_set with unknown protocol: {}", + protocol + ); + } + } - let n = self.context_data.peers.len().try_into().unwrap_or(std::u64::MAX); + fn report_metrics(&self) { + if let Some(metrics) = &self.metrics { + let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX); metrics.peers.set(n); let m = self.sync.metrics(); @@ -1419,87 +1203,140 @@ impl Protocol { metrics.fork_targets.set(m.fork_targets.into()); metrics.queued_blocks.set(m.queued_blocks.into()); - metrics.justifications.with_label_values(&["pending"]) + metrics + .justifications + .with_label_values(&["pending"]) .set(m.justifications.pending_requests.into()); - metrics.justifications.with_label_values(&["active"]) + metrics + .justifications + .with_label_values(&["active"]) .set(m.justifications.active_requests.into()); - metrics.justifications.with_label_values(&["failed"]) + metrics + .justifications + .with_label_values(&["failed"]) .set(m.justifications.failed_requests.into()); - metrics.justifications.with_label_values(&["importing"]) + metrics + .justifications + .with_label_values(&["importing"]) .set(m.justifications.importing_requests.into()); - - metrics.finality_proofs.with_label_values(&["pending"]) - .set(m.finality_proofs.pending_requests.into()); - metrics.finality_proofs.with_label_values(&["active"]) - .set(m.finality_proofs.active_requests.into()); - metrics.finality_proofs.with_label_values(&["failed"]) - .set(m.finality_proofs.failed_requests.into()); - metrics.finality_proofs.with_label_values(&["importing"]) - .set(m.finality_proofs.importing_requests.into()); } } } +fn prepare_block_request( + peers: &mut HashMap>, + who: PeerId, + request: message::BlockRequest, +) -> CustomMessageOutcome { + let (tx, rx) = oneshot::channel(); + + if let Some(ref mut peer) = peers.get_mut(&who) { + peer.request = Some((PeerRequest::Block(request.clone()), rx)); + } + + let request = crate::schema::v1::BlockRequest { + fields: request.fields.to_be_u32(), + from_block: match request.from { + message::FromBlock::Hash(h) => + Some(crate::schema::v1::block_request::FromBlock::Hash(h.encode())), + message::FromBlock::Number(n) => + Some(crate::schema::v1::block_request::FromBlock::Number(n.encode())), + }, + to_block: request.to.map(|h| h.encode()).unwrap_or_default(), + direction: request.direction as i32, + max_blocks: request.max.unwrap_or(0), + support_multiple_justifications: true, + }; + + CustomMessageOutcome::BlockRequest { target: who, request, pending_response: tx } +} + +fn prepare_state_request( + peers: &mut HashMap>, + who: PeerId, + request: crate::schema::v1::StateRequest, +) -> CustomMessageOutcome { + let (tx, rx) = oneshot::channel(); + + if let Some(ref mut peer) = peers.get_mut(&who) { + peer.request = Some((PeerRequest::State, rx)); + } + CustomMessageOutcome::StateRequest { target: who, request, pending_response: tx } +} + +fn prepare_warp_sync_request( + peers: &mut HashMap>, + who: PeerId, + request: crate::warp_request_handler::Request, +) -> CustomMessageOutcome { + let (tx, rx) = oneshot::channel(); + + if let Some(ref mut peer) = peers.get_mut(&who) { + peer.request = Some((PeerRequest::WarpProof, rx)); + } + CustomMessageOutcome::WarpSyncRequest { target: who, request, pending_response: tx } +} + /// Outcome of an incoming custom message. #[derive(Debug)] #[must_use] pub enum CustomMessageOutcome { BlockImport(BlockOrigin, Vec>), - JustificationImport(Origin, B::Hash, NumberFor, Justification), - FinalityProofImport(Origin, B::Hash, NumberFor, Vec), + JustificationImport(Origin, B::Hash, NumberFor, Justifications), /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, - protocols: Vec, + protocol: Cow<'static, str>, + /// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. + negotiated_fallback: Option>, roles: Roles, - notifications_sink: NotificationsSink + notifications_sink: NotificationsSink, }, /// The [`NotificationsSink`] of some notification protocols need an update. NotificationStreamReplaced { remote: PeerId, - protocols: Vec, + protocol: Cow<'static, str>, notifications_sink: NotificationsSink, }, /// Notification protocols have been closed with a remote. - NotificationStreamClosed { remote: PeerId, protocols: Vec }, + NotificationStreamClosed { + remote: PeerId, + protocol: Cow<'static, str>, + }, /// Messages have been received on one or more notifications protocols. - NotificationsReceived { remote: PeerId, messages: Vec<(ConsensusEngineId, Bytes)> }, + NotificationsReceived { + remote: PeerId, + messages: Vec<(Cow<'static, str>, Bytes)>, + }, /// A new block request must be emitted. - /// You must later call either [`Protocol::on_block_response`] or - /// [`Protocol::on_block_request_failed`]. - /// Each peer can only have one active request. If a request already exists for this peer, it - /// must be silently discarded. - /// It is the responsibility of the handler to ensure that a timeout exists. - BlockRequest { target: PeerId, request: message::BlockRequest }, - /// A new finality proof request must be emitted. - /// Once you have the response, you must call `Protocol::on_finality_proof_response`. - /// It is the responsibility of the handler to ensure that a timeout exists. - /// If the request times out, or the peer responds in an invalid way, the peer has to be - /// disconnect. This will inform the state machine that the request it has emitted is stale. - FinalityProofRequest { target: PeerId, block_hash: B::Hash, request: Vec }, + BlockRequest { + target: PeerId, + request: crate::schema::v1::BlockRequest, + pending_response: oneshot::Sender, RequestFailure>>, + }, + /// A new storage request must be emitted. + StateRequest { + target: PeerId, + request: crate::schema::v1::StateRequest, + pending_response: oneshot::Sender, RequestFailure>>, + }, + /// A new warp sync request must be emitted. + WarpSyncRequest { + target: PeerId, + request: crate::warp_request_handler::Request, + pending_response: oneshot::Sender, RequestFailure>>, + }, /// Peer has a reported a new head of chain. PeerNewBest(PeerId, NumberFor), + /// Now connected to a new peer for syncing purposes. + SyncConnected(PeerId), + /// No longer connected to a peer for syncing purposes. + SyncDisconnected(PeerId), None, } -fn update_peer_request( - peers: &mut HashMap>, - who: &PeerId, - request: &mut message::BlockRequest, -) { - if let Some(ref mut peer) = peers.get_mut(who) { - request.id = peer.next_request_id; - peer.next_request_id += 1; - if let Some((timestamp, request)) = peer.block_request.take() { - trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who); - peer.obsolete_requests.insert(request.id, timestamp); - } - peer.block_request = Some((Instant::now(), request.clone())); - } -} - -impl NetworkBehaviour for Protocol { - type ProtocolsHandler = ::ProtocolsHandler; +impl NetworkBehaviour for Protocol { + type ProtocolsHandler = ::ProtocolsHandler; type OutEvent = CustomMessageOutcome; fn new_handler(&mut self) -> Self::ProtocolsHandler { @@ -1510,11 +1347,21 @@ impl NetworkBehaviour for Protocol { self.behaviour.addresses_of_peer(peer_id) } - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.behaviour.inject_connection_established(peer_id, conn, endpoint) } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.behaviour.inject_connection_closed(peer_id, conn, endpoint) } @@ -1544,49 +1391,150 @@ impl NetworkBehaviour for Protocol { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent > - > { + >{ if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } - while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { - self.tick(); + // Check for finished outgoing requests. + let mut finished_block_requests = Vec::new(); + let mut finished_state_requests = Vec::new(); + let mut finished_warp_sync_requests = Vec::new(); + for (id, peer) in self.peers.iter_mut() { + if let Peer { request: Some((_, pending_response)), .. } = peer { + match pending_response.poll_unpin(cx) { + Poll::Ready(Ok(Ok(resp))) => { + let (req, _) = peer.request.take().unwrap(); + match req { + PeerRequest::Block(req) => { + let protobuf_response = + match crate::schema::v1::BlockResponse::decode(&resp[..]) { + Ok(proto) => proto, + Err(e) => { + debug!( + target: "sync", + "Failed to decode block response from peer {:?}: {:?}.", + id, + e + ); + self.peerset_handle.report_peer(*id, rep::BAD_MESSAGE); + self.behaviour + .disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + continue + }, + }; + + finished_block_requests.push((id.clone(), req, protobuf_response)); + }, + PeerRequest::State => { + let protobuf_response = + match crate::schema::v1::StateResponse::decode(&resp[..]) { + Ok(proto) => proto, + Err(e) => { + debug!( + target: "sync", + "Failed to decode state response from peer {:?}: {:?}.", + id, + e + ); + self.peerset_handle.report_peer(*id, rep::BAD_MESSAGE); + self.behaviour + .disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + continue + }, + }; + + finished_state_requests.push((*id, protobuf_response)); + }, + PeerRequest::WarpProof => { + finished_warp_sync_requests.push((*id, resp)); + }, + } + }, + Poll::Ready(Ok(Err(e))) => { + peer.request.take(); + debug!(target: "sync", "Request to peer {:?} failed: {:?}.", id, e); + + match e { + RequestFailure::Network(OutboundFailure::Timeout) => { + self.peerset_handle.report_peer(*id, rep::TIMEOUT); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + }, + RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { + self.peerset_handle.report_peer(*id, rep::BAD_PROTOCOL); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + }, + RequestFailure::Network(OutboundFailure::DialFailure) => { + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + }, + RequestFailure::Refused => { + self.peerset_handle.report_peer(*id, rep::REFUSED); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + }, + RequestFailure::Network(OutboundFailure::ConnectionClosed) | + RequestFailure::NotConnected => { + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + }, + RequestFailure::UnknownProtocol => { + debug_assert!( + false, + "Block request protocol should always be known." + ); + }, + RequestFailure::Obsolete => { + debug_assert!( + false, + "Can not receive `RequestFailure::Obsolete` after dropping the \ + response receiver.", + ); + }, + } + }, + Poll::Ready(Err(oneshot::Canceled)) => { + peer.request.take(); + trace!( + target: "sync", + "Request to peer {:?} failed due to oneshot being canceled.", + id, + ); + self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + }, + Poll::Pending => {}, + } + } + } + for (id, req, protobuf_response) in finished_block_requests { + let ev = self.on_block_response(id, req, protobuf_response); + self.pending_messages.push_back(ev); + } + for (id, protobuf_response) in finished_state_requests { + let ev = self.on_state_response(id, protobuf_response); + self.pending_messages.push_back(ev); + } + for (id, response) in finished_warp_sync_requests { + let ev = self.on_warp_sync_response(id, EncodedProof(response)); + self.pending_messages.push_back(ev); } - while let Poll::Ready(Some(())) = self.propagate_timeout.poll_next_unpin(cx) { - self.propagate_transactions(); + while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { + self.tick(); } - for (id, mut r) in self.sync.block_requests() { - update_peer_request(&mut self.context_data.peers, &id, &mut r); - let event = CustomMessageOutcome::BlockRequest { - target: id.clone(), - request: r, - }; + for (id, request) in self.sync.block_requests() { + let event = prepare_block_request(&mut self.peers, id.clone(), request); self.pending_messages.push_back(event); } - for (id, mut r) in self.sync.justification_requests() { - update_peer_request(&mut self.context_data.peers, &id, &mut r); - let event = CustomMessageOutcome::BlockRequest { - target: id, - request: r, - }; + if let Some((id, request)) = self.sync.state_request() { + let event = prepare_state_request(&mut self.peers, id, request); self.pending_messages.push_back(event); } - for (id, r) in self.sync.finality_proof_requests() { - let event = CustomMessageOutcome::FinalityProofRequest { - target: id, - block_hash: r.block, - request: r.request, - }; + for (id, request) in self.sync.justification_requests() { + let event = prepare_block_request(&mut self.peers, id, request); self.pending_messages.push_back(event); } - if let Poll::Ready(Some((tx_hash, result))) = self.pending_transactions.poll_next_unpin(cx) { - if let Some(peers) = self.pending_transactions_peers.remove(&tx_hash) { - peers.into_iter().for_each(|p| self.on_handle_transaction_import(p, result)); - } else { - warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!"); - } + if let Some((id, request)) = self.sync.warp_sync_request() { + let event = prepare_warp_sync_request(&mut self.peers, id, request); + self.pending_messages.push_back(event); } // Check if there is any block announcement validation finished. @@ -1598,7 +1546,7 @@ impl NetworkBehaviour for Protocol { } if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } let event = match self.behaviour.poll(cx, params) { @@ -1609,121 +1557,222 @@ impl NetworkBehaviour for Protocol { Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), + Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }) => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }), }; let outcome = match event { - GenericProtoOut::CustomProtocolOpen { peer_id, received_handshake, notifications_sink, .. } => { - // `received_handshake` can be either a `Status` message if received from the - // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block - // announces substream. - match as DecodeAll>::decode_all(&mut &received_handshake[..]) { - Ok(GenericMessage::Status(handshake)) => { - let handshake = BlockAnnouncesHandshake { - roles: handshake.roles, - best_number: handshake.best_number, - best_hash: handshake.best_hash, - genesis_hash: handshake.genesis_hash, - }; - - self.on_peer_connected(peer_id, handshake, notifications_sink) - }, - Ok(msg) => { - debug!( - target: "sync", - "Expected Status message from {}, but got {:?}", - peer_id, - msg, - ); - self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None - } - Err(err) => { - match as DecodeAll>::decode_all(&mut &received_handshake[..]) { - Ok(handshake) => { - self.on_peer_connected(peer_id, handshake, notifications_sink) - } - Err(err2) => { - debug!( - target: "sync", - "Couldn't decode handshake sent by {}: {:?}: {} & {}", - peer_id, - received_handshake, - err.what(), - err2, - ); - self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + NotificationsOut::CustomProtocolOpen { + peer_id, + set_id, + received_handshake, + notifications_sink, + negotiated_fallback, + } => { + // Set number 0 is hardcoded the default set of peers we sync from. + if set_id == HARDCODED_PEERSETS_SYNC { + debug_assert!(negotiated_fallback.is_none()); + + // `received_handshake` can be either a `Status` message if received from the + // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block + // announces substream. + match as DecodeAll>::decode_all(&mut &received_handshake[..]) { + Ok(GenericMessage::Status(handshake)) => { + let handshake = BlockAnnouncesHandshake { + roles: handshake.roles, + best_number: handshake.best_number, + best_hash: handshake.best_hash, + genesis_hash: handshake.genesis_hash, + }; + + if self.on_sync_peer_connected(peer_id, handshake).is_ok() { + CustomMessageOutcome::SyncConnected(peer_id) + } else { CustomMessageOutcome::None } - } + }, + Ok(msg) => { + debug!( + target: "sync", + "Expected Status message from {}, but got {:?}", + peer_id, + msg, + ); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None + }, + Err(err) => { + match as DecodeAll>::decode_all( + &mut &received_handshake[..], + ) { + Ok(handshake) => { + if self.on_sync_peer_connected(peer_id, handshake).is_ok() { + CustomMessageOutcome::SyncConnected(peer_id) + } else { + CustomMessageOutcome::None + } + }, + Err(err2) => { + debug!( + target: "sync", + "Couldn't decode handshake sent by {}: {:?}: {} & {}", + peer_id, + received_handshake, + err, + err2, + ); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None + }, + } + }, + } + } else { + match ( + message::Roles::decode_all(&received_handshake[..]), + self.peers.get(&peer_id), + ) { + (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), + negotiated_fallback, + roles, + notifications_sink, + }, + (Err(_), Some(peer)) if received_handshake.is_empty() => { + // As a convenience, we allow opening substreams for "external" + // notification protocols with an empty handshake. This fetches the + // roles from the locally-known roles. + // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 + CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), + negotiated_fallback, + roles: peer.info.roles, + notifications_sink, + } + }, + (Err(err), _) => { + debug!(target: "sync", "Failed to parse remote handshake: {}", err); + self.bad_handshake_substreams.insert((peer_id, set_id)); + self.behaviour.disconnect_peer(&peer_id, set_id); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None + }, } } - } - GenericProtoOut::CustomProtocolReplaced { peer_id, notifications_sink, .. } => { - CustomMessageOutcome::NotificationStreamReplaced { - remote: peer_id, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), - notifications_sink, - } - }, - GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { - self.on_peer_disconnected(peer_id) }, - GenericProtoOut::LegacyMessage { peer_id, message } => - self.on_custom_message(peer_id, message), - GenericProtoOut::Notification { peer_id, protocol_name, message } => - match self.legacy_equiv_by_name.get(&protocol_name) { - Some(Fallback::Consensus(engine_id)) => { - CustomMessageOutcome::NotificationsReceived { - remote: peer_id, - messages: vec![(*engine_id, message.freeze())], - } + NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => + if set_id == HARDCODED_PEERSETS_SYNC { + CustomMessageOutcome::None + } else if self.bad_handshake_substreams.contains(&(peer_id, set_id)) { + CustomMessageOutcome::None + } else { + CustomMessageOutcome::NotificationStreamReplaced { + remote: peer_id, + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), + notifications_sink, } - Some(Fallback::Transactions) => { - if let Ok(m) = message::Transactions::decode(&mut message.as_ref()) { - self.on_transactions(peer_id, m); - } else { - warn!(target: "sub-libp2p", "Failed to decode transactions list"); - } + }, + NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { + // Set number 0 is hardcoded the default set of peers we sync from. + if set_id == HARDCODED_PEERSETS_SYNC { + if self.on_sync_peer_disconnected(peer_id).is_ok() { + CustomMessageOutcome::SyncDisconnected(peer_id) + } else { + log::trace!( + target: "sync", + "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", + peer_id + ); CustomMessageOutcome::None } - Some(Fallback::BlockAnnounce) => { - if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { - self.push_block_announce_validation(peer_id, announce); - - // Make sure that the newly added block announce validation future was - // polled once to be registered in the task. - if let Poll::Ready(res) = self.sync.poll_block_announce_validation(cx) { - self.process_block_announce_validation_result(res) - } else { - CustomMessageOutcome::None - } + } else if self.bad_handshake_substreams.remove(&(peer_id, set_id)) { + // The substream that has just been closed had been opened with a bad + // handshake. The outer layers have never received an opening event about this + // substream, and consequently shouldn't receive a closing event either. + CustomMessageOutcome::None + } else { + CustomMessageOutcome::NotificationStreamClosed { + remote: peer_id, + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), + } + } + }, + NotificationsOut::Notification { peer_id, set_id, message } => match set_id { + HARDCODED_PEERSETS_SYNC if self.peers.contains_key(&peer_id) => { + if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { + self.push_block_announce_validation(peer_id, announce); + + // Make sure that the newly added block announce validation future was + // polled once to be registered in the task. + if let Poll::Ready(res) = self.sync.poll_block_announce_validation(cx) { + self.process_block_announce_validation_result(res) } else { - warn!(target: "sub-libp2p", "Failed to decode block announce"); CustomMessageOutcome::None } - } - None => { - debug!(target: "sub-libp2p", "Received notification from unknown protocol {:?}", protocol_name); + } else { + warn!(target: "sub-libp2p", "Failed to decode block announce"); CustomMessageOutcome::None } - } + }, + HARDCODED_PEERSETS_SYNC => { + trace!( + target: "sync", + "Received sync for peer earlier refused by sync layer: {}", + peer_id + ); + CustomMessageOutcome::None + }, + _ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) => + CustomMessageOutcome::None, + _ => { + let protocol_name = self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(); + CustomMessageOutcome::NotificationsReceived { + remote: peer_id, + messages: vec![(protocol_name, message.freeze())], + } + }, + }, }; - if let CustomMessageOutcome::None = outcome { - Poll::Pending - } else { - Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) + if !matches!(outcome, CustomMessageOutcome::::None) { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) } + + if let Some(message) = self.pending_messages.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) + } + + // This block can only be reached if an event was pulled from the behaviour and that + // resulted in `CustomMessageOutcome::None`. Since there might be another pending + // message from the behaviour, the task is scheduled again. + cx.waker().wake_by_ref(); + Poll::Pending } fn inject_addr_reach_failure( &mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, - error: &dyn std::error::Error + error: &dyn std::error::Error, ) { self.behaviour.inject_addr_reach_failure(peer_id, addr, error) } @@ -1732,18 +1781,26 @@ impl NetworkBehaviour for Protocol { self.behaviour.inject_dial_failure(peer_id) } - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - self.behaviour.inject_new_listen_addr(addr) + fn inject_new_listener(&mut self, id: ListenerId) { + self.behaviour.inject_new_listener(id) } - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - self.behaviour.inject_expired_listen_addr(addr) + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.behaviour.inject_new_listen_addr(id, addr) + } + + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.behaviour.inject_expired_listen_addr(id, addr) } fn inject_new_external_addr(&mut self, addr: &Multiaddr) { self.behaviour.inject_new_external_addr(addr) } + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { + self.behaviour.inject_expired_external_addr(addr) + } + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { self.behaviour.inject_listener_error(id, err); } @@ -1752,9 +1809,3 @@ impl NetworkBehaviour for Protocol { self.behaviour.inject_listener_closed(id, reason); } } - -impl Drop for Protocol { - fn drop(&mut self) { - debug!(target: "sync", "Network stats:\n{}", self.format_stats()); - } -} diff --git a/client/network/src/protocol/event.rs b/client/network/src/protocol/event.rs index 637bf805b5024..e0b35647c7531 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/src/protocol/event.rs @@ -1,26 +1,27 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Network event types. These are are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. use bytes::Bytes; -use libp2p::core::PeerId; -use libp2p::kad::record::Key; -use sp_runtime::ConsensusEngineId; +use libp2p::{core::PeerId, kad::record::Key}; +use std::borrow::Cow; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -46,6 +47,18 @@ pub enum Event { /// Event generated by a DHT. Dht(DhtEvent), + /// Now connected to a new peer for syncing purposes. + SyncConnected { + /// Node we are now syncing from. + remote: PeerId, + }, + + /// Now disconnected from a peer for syncing purposes. + SyncDisconnected { + /// Node we are no longer syncing from. + remote: PeerId, + }, + /// Opened a substream with the given node with the given notifications protocol. /// /// The protocol is always one of the notification protocols that have been registered. @@ -53,7 +66,16 @@ pub enum Event { /// Node we opened the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + /// This is always equal to the value of + /// [`crate::config::NonDefaultSetConfig::notifications_protocol`] of one of the + /// configured sets. + protocol: Cow<'static, str>, + /// If the negotiation didn't use the main name of the protocol (the one in + /// `notifications_protocol`), then this field contains which name has actually been + /// used. + /// Always contains a value equal to the value in + /// [`crate::config::NonDefaultSetConfig::fallback_names`]. + negotiated_fallback: Option>, /// Role of the remote. role: ObservedRole, }, @@ -64,7 +86,7 @@ pub enum Event { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, }, /// Received one or more messages from the given node using the given protocol. @@ -72,22 +94,22 @@ pub enum Event { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(ConsensusEngineId, Bytes)>, + messages: Vec<(Cow<'static, str>, Bytes)>, }, } /// Role that the peer sent to us during the handshake, with the addition of what our local node /// knows about that peer. +/// +/// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a +/// > node says about itself, while `ObservedRole` is a `Role` merged with the +/// > information known locally about that node. #[derive(Debug, Clone)] pub enum ObservedRole { /// Full node. Full, /// Light node. Light, - /// When we are a validator node, this is a sentry that protects us. - OurSentry, - /// When we are a sentry node, this is the authority we are protecting. - OurGuardedAuthority, /// Third-party authority. Authority, } @@ -95,6 +117,6 @@ pub enum ObservedRole { impl ObservedRole { /// Returns `true` for `ObservedRole::Light`. pub fn is_light(&self) -> bool { - matches!(self, ObservedRole::Light) + matches!(self, Self::Light) } } diff --git a/client/network/src/protocol/generic_proto.rs b/client/network/src/protocol/generic_proto.rs deleted file mode 100644 index 3133471b0d249..0000000000000 --- a/client/network/src/protocol/generic_proto.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Implementation of libp2p's `NetworkBehaviour` trait that opens a single substream with the -//! remote and then allows any communication with them. -//! -//! The `Protocol` struct uses `GenericProto` in order to open substreams with the rest of the -//! network, then performs the Substrate protocol handling on top. - -pub use self::behaviour::{GenericProto, GenericProtoOut}; -pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready, LegacyConnectionKillError}; - -mod behaviour; -mod handler; -mod upgrade; -mod tests; diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs deleted file mode 100644 index 7b62b154016c3..0000000000000 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ /dev/null @@ -1,1434 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use crate::config::ProtocolId; -use crate::protocol::generic_proto::{ - handler::{NotificationsSink, NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn}, - upgrade::RegisteredProtocol -}; - -use bytes::BytesMut; -use fnv::FnvHashMap; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, Multiaddr, PeerId, connection::ConnectionId}; -use libp2p::swarm::{ - DialPeerCondition, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - PollParameters -}; -use log::{debug, error, trace, warn}; -use parking_lot::RwLock; -use rand::distributions::{Distribution as _, Uniform}; -use smallvec::SmallVec; -use std::task::{Context, Poll}; -use std::{borrow::Cow, cmp, collections::{hash_map::Entry, VecDeque}}; -use std::{error, mem, pin::Pin, str, sync::Arc, time::Duration}; -use wasm_timer::Instant; - -/// Network behaviour that handles opening substreams for custom protocols with other peers. -/// -/// ## Legacy vs new protocol -/// -/// The `GenericProto` behaves as following: -/// -/// - Whenever a connection is established, we open a single substream (called "legacy protocol" in -/// the source code) on that connection. This substream name depends on the `protocol_id` and -/// `versions` passed at initialization. If the remote refuses this substream, we close the -/// connection. -/// -/// - For each registered protocol, we also open an additional substream for this protocol. If the -/// remote refuses this substream, then it's fine. -/// -/// - Whenever we want to send a message, we can call either `send_packet` to force the legacy -/// substream, or `write_notification` to indicate a registered protocol. If the registered -/// protocol was refused or isn't supported by the remote, we always use the legacy instead. -/// -/// ## How it works -/// -/// The role of the `GenericProto` is to synchronize the following components: -/// -/// - The libp2p swarm that opens new connections and reports disconnects. -/// - The connection handler (see `handler.rs`) that handles individual connections. -/// - The peerset manager (PSM) that requests links to peers to be established or broken. -/// - The external API, that requires knowledge of the links that have been established. -/// -/// Each connection handler can be in four different states: Enabled+Open, Enabled+Closed, -/// Disabled+Open, or Disabled+Closed. The Enabled/Disabled component must be in sync with the -/// peerset manager. For example, if the peerset manager requires a disconnection, we disable the -/// connection handlers of that peer. The Open/Closed component must be in sync with the external -/// API. -/// -/// However, a connection handler for a peer only exists if we are actually connected to that peer. -/// What this means is that there are six possible states for each peer: Disconnected, Dialing -/// (trying to connect), Enabled+Open, Enabled+Closed, Disabled+Open, Disabled+Closed. -/// Most notably, the Dialing state must correspond to a "link established" state in the peerset -/// manager. In other words, the peerset manager doesn't differentiate whether we are dialing a -/// peer or connected to it. -/// -/// There may be multiple connections to a peer. However, the status of a peer on -/// the API of this behaviour and towards the peerset manager is aggregated in -/// the following way: -/// -/// 1. The enabled/disabled status is the same across all connections, as -/// decided by the peerset manager. -/// 2. `send_packet` and `write_notification` always send all data over -/// the same connection to preserve the ordering provided by the transport, -/// as long as that connection is open. If it closes, a second open -/// connection may take over, if one exists, but that case should be no -/// different than a single connection failing and being re-established -/// in terms of potential reordering and dropped messages. Messages can -/// be received on any connection. -/// 3. The behaviour reports `GenericProtoOut::CustomProtocolOpen` when the -/// first connection reports `NotifsHandlerOut::Open`. -/// 4. The behaviour reports `GenericProtoOut::CustomProtocolClosed` when the -/// last connection reports `NotifsHandlerOut::Closed`. -/// -/// In this way, the number of actual established connections to the peer is -/// an implementation detail of this behaviour. Note that, in practice and at -/// the time of this writing, there may be at most two connections to a peer -/// and only as a result of simultaneous dialing. However, the implementation -/// accommodates for any number of connections. -/// -/// Additionally, there also exists a "banning" system. If we fail to dial a peer, we "ban" it for -/// a few seconds. If the PSM requests connecting to a peer that is currently "banned", the next -/// dialing attempt is delayed until after the ban expires. However, the PSM will still consider -/// the peer to be connected. This "ban" is thus not a ban in a strict sense: If a "banned" peer -/// tries to connect, the connection is accepted. A ban only delays dialing attempts. -/// -pub struct GenericProto { - /// `PeerId` of the local node. - local_peer_id: PeerId, - - /// Legacy protocol to open with peers. Never modified. - legacy_protocol: RegisteredProtocol, - - /// Notification protocols. Entries are only ever added and not removed. - /// Contains, for each protocol, the protocol name and the message to send as part of the - /// initial handshake. - notif_protocols: Vec<(Cow<'static, str>, Arc>>)>, - - /// Receiver for instructions about who to connect to or disconnect from. - peerset: sc_peerset::Peerset, - - /// List of peers in our state. - peers: FnvHashMap, - - /// The elements in `peers` occasionally contain `Delay` objects that we would normally have - /// to be polled one by one. In order to avoid doing so, as an optimization, every `Delay` is - /// instead put inside of `delays` and reference by a [`DelayId`]. This stream - /// yields `PeerId`s whose `DelayId` is potentially ready. - /// - /// By design, we never remove elements from this list. Elements are removed only when the - /// `Delay` triggers. As such, this stream may produce obsolete elements. - delays: stream::FuturesUnordered + Send>>>, - - /// [`DelayId`] to assign to the next delay. - next_delay_id: DelayId, - - /// List of incoming messages we have sent to the peer set manager and that are waiting for an - /// answer. - incoming: SmallVec<[IncomingPeer; 6]>, - - /// We generate indices to identify incoming connections. This is the next value for the index - /// to use when a connection is incoming. - next_incoming_index: sc_peerset::IncomingIndex, - - /// Events to produce from `poll()`. - events: VecDeque>, -} - -/// Identifier for a delay firing. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -struct DelayId(u64); - -/// State of a peer we're connected to. -#[derive(Debug)] -enum PeerState { - /// State is poisoned. This is a temporary state for a peer and we should always switch back - /// to it later. If it is found in the wild, that means there was either a panic or a bug in - /// the state machine code. - Poisoned, - - /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial - /// delay to the connection. - Banned { - /// Until when the peer is banned. - until: Instant, - }, - - /// The peerset requested that we connect to this peer. We are currently not connected. - PendingRequest { - /// When to actually start dialing. References an entry in `delays`. - timer: DelayId, - /// When the `timer` will trigger. - timer_deadline: Instant, - }, - - /// The peerset requested that we connect to this peer. We are currently dialing this peer. - Requested, - - /// We are connected to this peer but the peerset refused it. - /// - /// We may still have ongoing traffic with that peer, but it should cease shortly. - Disabled { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, - /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. - banned_until: Option, - }, - - /// We are connected to this peer but we are not opening any Substrate substream. The handler - /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, - /// but should get disconnected in a few seconds. - DisabledPendingEnable { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, - /// When to enable this remote. References an entry in `delays`. - timer: DelayId, - /// When the `timer` will trigger. - timer_deadline: Instant, - }, - - /// We are connected to this peer and the peerset has accepted it. The handler is in the - /// enabled state. - Enabled { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, - }, - - /// We received an incoming connection from this peer and forwarded that - /// connection request to the peerset. The connection handlers are waiting - /// for initialisation, i.e. to be enabled or disabled based on whether - /// the peerset accepts or rejects the peer. - Incoming, -} - -impl PeerState { - /// True if there exists an established connection to the peer - /// that is open for custom protocol traffic. - fn is_open(&self) -> bool { - self.get_open().is_some() - } - - /// Returns the [`NotificationsSink`] of the first established connection - /// that is open for custom protocol traffic. - fn get_open(&self) -> Option<&NotificationsSink> { - match self { - PeerState::Disabled { open, .. } | - PeerState::DisabledPendingEnable { open, .. } | - PeerState::Enabled { open, .. } => - if !open.is_empty() { - Some(&open[0].1) - } else { - None - } - PeerState::Poisoned => None, - PeerState::Banned { .. } => None, - PeerState::PendingRequest { .. } => None, - PeerState::Requested => None, - PeerState::Incoming { .. } => None, - } - } - - /// True if that node has been requested by the PSM. - fn is_requested(&self) -> bool { - match self { - PeerState::Poisoned => false, - PeerState::Banned { .. } => false, - PeerState::PendingRequest { .. } => true, - PeerState::Requested => true, - PeerState::Disabled { .. } => false, - PeerState::DisabledPendingEnable { .. } => true, - PeerState::Enabled { .. } => true, - PeerState::Incoming { .. } => false, - } - } -} - -/// State of an "incoming" message sent to the peer set manager. -#[derive(Debug)] -struct IncomingPeer { - /// Id of the remote peer of the incoming connection. - peer_id: PeerId, - /// If true, this "incoming" still corresponds to an actual connection. If false, then the - /// connection corresponding to it has been closed or replaced already. - alive: bool, - /// Id that the we sent to the peerset. - incoming_id: sc_peerset::IncomingIndex, -} - -/// Event that can be emitted by the `GenericProto`. -#[derive(Debug)] -pub enum GenericProtoOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Id of the peer we are connected to. - peer_id: PeerId, - /// Handshake that was sent to us. - /// This is normally a "Status" message, but this is out of the concern of this code. - received_handshake: Vec, - /// Object that permits sending notifications to the peer. - notifications_sink: NotificationsSink, - }, - - /// The [`NotificationsSink`] object used to send notifications with the given peer must be - /// replaced with a new one. - /// - /// This event is typically emitted when a transport-level connection is closed and we fall - /// back to a secondary connection. - CustomProtocolReplaced { - /// Id of the peer we are connected to. - peer_id: PeerId, - /// Replacement for the previous [`NotificationsSink`]. - notifications_sink: NotificationsSink, - }, - - /// Closed a custom protocol with the remote. The existing [`NotificationsSink`] should - /// be dropped. - CustomProtocolClosed { - /// Id of the peer we were connected to. - peer_id: PeerId, - /// Reason why the substream closed, for debugging purposes. - reason: Cow<'static, str>, - }, - - /// Receives a message on the legacy substream. - LegacyMessage { - /// Id of the peer the message came from. - peer_id: PeerId, - /// Message that has been received. - message: BytesMut, - }, - - /// Receives a message on a custom protocol substream. - /// - /// Also concerns received notifications for the notifications API. - Notification { - /// Id of the peer the message came from. - peer_id: PeerId, - /// Engine corresponding to the message. - protocol_name: Cow<'static, str>, - /// Message that has been received. - message: BytesMut, - }, -} - -impl GenericProto { - /// Creates a `CustomProtos`. - pub fn new( - local_peer_id: PeerId, - protocol: impl Into, - versions: &[u8], - handshake_message: Vec, - peerset: sc_peerset::Peerset, - notif_protocols: impl Iterator, Vec)>, - ) -> Self { - let notif_protocols = notif_protocols - .map(|(n, hs)| (n, Arc::new(RwLock::new(hs)))) - .collect::>(); - - assert!(!notif_protocols.is_empty()); - - let legacy_handshake_message = Arc::new(RwLock::new(handshake_message)); - let legacy_protocol = RegisteredProtocol::new(protocol, versions, legacy_handshake_message); - - GenericProto { - local_peer_id, - legacy_protocol, - notif_protocols, - peerset, - peers: FnvHashMap::default(), - delays: Default::default(), - next_delay_id: DelayId(0), - incoming: SmallVec::new(), - next_incoming_index: sc_peerset::IncomingIndex(0), - events: VecDeque::new(), - } - } - - /// Registers a new notifications protocol. - /// - /// You are very strongly encouraged to call this method very early on. Any open connection - /// will retain the protocols that were registered then, and not any new one. - pub fn register_notif_protocol( - &mut self, - protocol_name: impl Into>, - handshake_msg: impl Into> - ) { - self.notif_protocols.push((protocol_name.into(), Arc::new(RwLock::new(handshake_msg.into())))); - } - - /// Modifies the handshake of the given notifications protocol. - /// - /// Has no effect if the protocol is unknown. - pub fn set_notif_protocol_handshake( - &mut self, - protocol_name: &str, - handshake_message: impl Into> - ) { - if let Some(protocol) = self.notif_protocols.iter_mut().find(|(name, _)| name == protocol_name) { - *protocol.1.write() = handshake_message.into(); - } - } - - /// Modifies the handshake of the legacy protocol. - pub fn set_legacy_handshake_message( - &mut self, - handshake_message: impl Into> - ) { - *self.legacy_protocol.handshake_message().write() = handshake_message.into(); - } - - /// Returns the number of discovered nodes that we keep in memory. - pub fn num_discovered_peers(&self) -> usize { - self.peerset.num_discovered_peers() - } - - /// Returns the list of all the peers we have an open channel to. - pub fn open_peers<'a>(&'a self) -> impl Iterator + 'a { - self.peers.iter().filter(|(_, state)| state.is_open()).map(|(id, _)| id) - } - - /// Returns true if we have an open connection to the given peer. - pub fn is_open(&self, peer_id: &PeerId) -> bool { - self.peers.get(peer_id).map(|p| p.is_open()).unwrap_or(false) - } - - /// Returns the [`NotificationsSink`] that sends notifications to the given peer, or `None` - /// if the custom protocols aren't opened with this peer. - /// - /// If [`GenericProto::is_open`] returns `true` for this `PeerId`, then this method is - /// guaranteed to return `Some`. - pub fn notifications_sink(&self, peer_id: &PeerId) -> Option<&NotificationsSink> { - self.peers.get(peer_id).and_then(|p| p.get_open()) - } - - /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId) { - debug!(target: "sub-libp2p", "External API => Disconnect {:?}", peer_id); - self.disconnect_peer_inner(peer_id, None); - } - - /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the peer - /// for the specific duration. - fn disconnect_peer_inner(&mut self, peer_id: &PeerId, ban: Option) { - let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { - entry - } else { - return - }; - - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // We're not connected anyway. - st @ PeerState::Disabled { .. } => *entry.into_mut() = st, - st @ PeerState::Requested => *entry.into_mut() = st, - st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, - st @ PeerState::Banned { .. } => *entry.into_mut() = st, - - // DisabledPendingEnable => Disabled. - PeerState::DisabledPendingEnable { - open, - timer_deadline, - timer: _ - } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()); - let banned_until = Some(if let Some(ban) = ban { - cmp::max(timer_deadline, Instant::now() + ban) - } else { - timer_deadline - }); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until - } - }, - - // Enabled => Disabled. - PeerState::Enabled { open } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until - } - }, - - // Incoming => Disabled. - PeerState::Incoming => { - let inc = if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == *entry.key() && i.alive) { - inc - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ - incoming for incoming peer"); - return - }; - - inc.alive = false; - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); - *entry.into_mut() = PeerState::Disabled { - open: SmallVec::new(), - banned_until - } - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), - } - } - - /// Returns the list of all the peers that the peerset currently requests us to be connected to. - pub fn requested_peers<'a>(&'a self) -> impl Iterator + 'a { - self.peers.iter().filter(|(_, state)| state.is_requested()).map(|(id, _)| id) - } - - /// Returns true if we try to open protocols with the given peer. - pub fn is_enabled(&self, peer_id: &PeerId) -> bool { - match self.peers.get(peer_id) { - None => false, - Some(PeerState::Disabled { .. }) => false, - Some(PeerState::DisabledPendingEnable { .. }) => false, - Some(PeerState::Enabled { .. }) => true, - Some(PeerState::Incoming { .. }) => false, - Some(PeerState::Requested) => false, - Some(PeerState::PendingRequest { .. }) => false, - Some(PeerState::Banned { .. }) => false, - Some(PeerState::Poisoned) => false, - } - } - - /// Notify the behaviour that we have learned about the existence of nodes. - /// - /// Can be called multiple times with the same `PeerId`s. - pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { - let local_peer_id = &self.local_peer_id; - self.peerset.discovered(peer_ids.filter_map(|peer_id| { - if peer_id == *local_peer_id { - error!( - target: "sub-libp2p", - "Discovered our own identity. This is a minor inconsequential bug." - ); - return None; - } - - debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); - Some(peer_id) - })); - } - - /// Sends a notification to a peer. - /// - /// Has no effect if the custom protocol is not open with the given peer. - /// - /// Also note that even if we have a valid open substream, it may in fact be already closed - /// without us knowing, in which case the packet will not be received. - /// - /// The `fallback` parameter is used for backwards-compatibility reason if the remote doesn't - /// support our protocol. One needs to pass the equivalent of what would have been passed - /// with `send_packet`. - pub fn write_notification( - &mut self, - target: &PeerId, - protocol_name: Cow<'static, str>, - message: impl Into>, - ) { - let notifs_sink = match self.peers.get(target).and_then(|p| p.get_open()) { - None => { - debug!(target: "sub-libp2p", - "Tried to sent notification to {:?} without an open channel.", - target); - return - }, - Some(sink) => sink - }; - - trace!( - target: "sub-libp2p", - "External API => Notification({:?}, {:?})", - target, - protocol_name, - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - notifs_sink.send_sync_notification( - protocol_name, - message - ); - } - - /// Returns the state of the peerset manager, for debugging purposes. - pub fn peerset_debug_info(&mut self) -> serde_json::Value { - self.peerset.debug_info() - } - - /// Function that is called when the peerset wants us to connect to a peer. - fn peerset_report_connect(&mut self, peer_id: PeerId) { - let mut occ_entry = match self.peers.entry(peer_id) { - Entry::Occupied(entry) => entry, - Entry::Vacant(entry) => { - // If there's no entry in `self.peers`, start dialing. - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", entry.key()); - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", entry.key()); - self.events.push_back(NetworkBehaviourAction::DialPeer { - peer_id: entry.key().clone(), - condition: DialPeerCondition::Disconnected - }); - entry.insert(PeerState::Requested); - return; - } - }; - - let now = Instant::now(); - - match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { - PeerState::Banned { ref until } if *until > now => { - let peer_id = occ_entry.key().clone(); - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ - until {:?}", peer_id, until); - - let delay_id = self.next_delay_id; - self.next_delay_id.0 += 1; - let delay = futures_timer::Delay::new(*until - now); - self.delays.push(async move { - delay.await; - (delay_id, peer_id) - }.boxed()); - - *occ_entry.into_mut() = PeerState::PendingRequest { - timer: delay_id, - timer_deadline: *until, - }; - }, - - PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); - self.events.push_back(NetworkBehaviourAction::DialPeer { - peer_id: occ_entry.key().clone(), - condition: DialPeerCondition::Disconnected - }); - *occ_entry.into_mut() = PeerState::Requested; - }, - - PeerState::Disabled { - open, - banned_until: Some(ref banned) - } if *banned > now => { - let peer_id = occ_entry.key().clone(); - debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is banned until {:?}", - peer_id, banned); - - let delay_id = self.next_delay_id; - self.next_delay_id.0 += 1; - let delay = futures_timer::Delay::new(*banned - now); - self.delays.push(async move { - delay.await; - (delay_id, peer_id) - }.boxed()); - - *occ_entry.into_mut() = PeerState::DisabledPendingEnable { - open, - timer: delay_id, - timer_deadline: *banned, - }; - }, - - PeerState::Disabled { open, banned_until: _ } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", - occ_entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *occ_entry.into_mut() = PeerState::Enabled { open }; - }, - - PeerState::Incoming => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", - occ_entry.key()); - if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == *occ_entry.key() && i.alive) { - inc.alive = false; - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ - incoming for incoming peer") - } - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *occ_entry.into_mut() = PeerState::Enabled { open: SmallVec::new() }; - }, - - st @ PeerState::Enabled { .. } => { - warn!(target: "sub-libp2p", - "PSM => Connect({:?}): Already connected.", - occ_entry.key()); - *occ_entry.into_mut() = st; - }, - st @ PeerState::DisabledPendingEnable { .. } => { - warn!(target: "sub-libp2p", - "PSM => Connect({:?}): Already pending enabling.", - occ_entry.key()); - *occ_entry.into_mut() = st; - }, - st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { - warn!(target: "sub-libp2p", - "PSM => Connect({:?}): Duplicate request.", - occ_entry.key()); - *occ_entry.into_mut() = st; - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()), - } - } - - /// Function that is called when the peerset wants us to disconnect from a peer. - fn peerset_report_disconnect(&mut self, peer_id: PeerId) { - let mut entry = match self.peers.entry(peer_id) { - Entry::Occupied(entry) => entry, - Entry::Vacant(entry) => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); - return - } - }; - - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); - *entry.into_mut() = st; - }, - - PeerState::DisabledPendingEnable { - open, - timer_deadline, - timer: _ - } => { - debug!(target: "sub-libp2p", - "PSM => Drop({:?}): Interrupting pending enabling.", - entry.key()); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: Some(timer_deadline), - }; - }, - - PeerState::Enabled { open } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None - } - }, - st @ PeerState::Incoming => { - error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", - entry.key()); - *entry.into_mut() = st; - }, - PeerState::Requested => { - // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other - // sub-systems (such as the discovery mechanism) may require dialing this peer as - // well at the same time. - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected.", entry.key()); - entry.remove(); - }, - PeerState::PendingRequest { timer_deadline, .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); - *entry.into_mut() = PeerState::Banned { until: timer_deadline } - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()), - } - } - - /// Function that is called when the peerset wants us to accept a connection - /// request from a peer. - fn peerset_report_accept(&mut self, index: sc_peerset::IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { - self.incoming.remove(pos) - } else { - error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); - return - }; - - if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, - sending back dropped", index, incoming.peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); - self.peerset.dropped(incoming.peer_id); - return - } - - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", - index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *state = PeerState::Enabled { open: SmallVec::new() }; - } - peer => error!(target: "sub-libp2p", - "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer) - } - } - - /// Function that is called when the peerset wants us to reject an incoming peer. - fn peerset_report_reject(&mut self, index: sc_peerset::IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { - self.incoming.remove(pos) - } else { - error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); - return - }; - - if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Obsolete incoming, \ - ignoring", index, incoming.peer_id); - return - } - - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { - debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", - index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - *state = PeerState::Disabled { - open: SmallVec::new(), - banned_until: None - }; - } - peer => error!(target: "sub-libp2p", - "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer) - } - } -} - -impl NetworkBehaviour for GenericProto { - type ProtocolsHandler = NotifsHandlerProto; - type OutEvent = GenericProtoOut; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - NotifsHandlerProto::new( - self.legacy_protocol.clone(), - self.notif_protocols.clone(), - ) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - Vec::new() - } - - fn inject_connected(&mut self, _: &PeerId) { - } - - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.", - conn, endpoint, peer_id); - match (self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned), endpoint) { - (st @ &mut PeerState::Requested, endpoint) | - (st @ &mut PeerState::PendingRequest { .. }, endpoint) => { - debug!(target: "sub-libp2p", - "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", - peer_id, endpoint - ); - *st = PeerState::Enabled { open: SmallVec::new() }; - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable - }); - } - - // Note: it may seem weird that "Banned" peers get treated as if they were absent. - // This is because the word "Banned" means "temporarily prevent outgoing connections to - // this peer", and not "banned" in the sense that we would refuse the peer altogether. - (st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) | - (st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => { - let incoming_id = self.next_incoming_index; - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; - debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Incoming connection", - peer_id, endpoint); - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", - peer_id, incoming_id); - self.peerset.incoming(peer_id.clone(), incoming_id); - self.incoming.push(IncomingPeer { - peer_id: peer_id.clone(), - alive: true, - incoming_id, - }); - *st = PeerState::Incoming { }; - } - - (st @ &mut PeerState::Poisoned, endpoint) | - (st @ &mut PeerState::Banned { .. }, endpoint) => { - let banned_until = if let PeerState::Banned { until } = st { - Some(*until) - } else { - None - }; - debug!(target: "sub-libp2p", - "Libp2p => Connected({},{:?}): Not requested by PSM, disabling.", - peer_id, endpoint); - *st = PeerState::Disabled { open: SmallVec::new(), banned_until }; - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); - } - - (PeerState::Incoming { .. }, _) => { - debug!(target: "sub-libp2p", - "Secondary connection {:?} to {} waiting for PSM decision.", - conn, peer_id); - }, - - (PeerState::Enabled { .. }, _) => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection", - peer_id, conn); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable - }); - } - - (PeerState::Disabled { .. }, _) | (PeerState::DisabledPendingEnable { .. }, _) => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection", - peer_id, conn); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); - } - } - } - - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} closed.", - conn, endpoint, peer_id); - match self.peers.get_mut(peer_id) { - Some(PeerState::Disabled { open, .. }) | - Some(PeerState::DisabledPendingEnable { open, .. }) | - Some(PeerState::Enabled { open, .. }) => { - // Check if the "link" to the peer is already considered closed, - // i.e. there is no connection that is open for custom protocols, - // in which case `CustomProtocolClosed` was already emitted. - let closed = open.is_empty(); - let sink_closed = open.get(0).map_or(false, |(c, _)| c == conn); - open.retain(|(c, _)| c != conn); - if !closed { - if let Some((_, sink)) = open.get(0) { - if sink_closed { - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: peer_id.clone(), - notifications_sink: sink.clone(), - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - } else { - debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - reason: "Disconnected by libp2p".into(), - }; - - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - } - } - _ => {} - } - } - - fn inject_disconnected(&mut self, peer_id: &PeerId) { - match self.peers.remove(peer_id) { - None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) | - Some(PeerState::Banned { .. }) => - // This is a serious bug either in this state machine or in libp2p. - error!(target: "sub-libp2p", - "`inject_disconnected` called for unknown peer {}", - peer_id), - - Some(PeerState::Disabled { open, banned_until, .. }) => { - if !open.is_empty() { - debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id - ); - } - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled.", peer_id); - if let Some(until) = banned_until { - self.peers.insert(peer_id.clone(), PeerState::Banned { until }); - } - } - - Some(PeerState::DisabledPendingEnable { open, timer_deadline, .. }) => { - if !open.is_empty() { - debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id - ); - } - debug!(target: "sub-libp2p", - "Libp2p => Disconnected({}): Was disabled but pending enable.", - peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); - } - - Some(PeerState::Enabled { open, .. }) => { - if !open.is_empty() { - debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch: disconnected from {} with non-empty list of connections", - peer_id - ); - } - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was enabled.", peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); - self.peers.insert(peer_id.clone(), PeerState::Banned { - until: Instant::now() + Duration::from_secs(ban_dur) - }); - } - - // In the incoming state, we don't report "Dropped". Instead we will just ignore the - // corresponding Accept/Reject. - Some(PeerState::Incoming { }) => { - if let Some(state) = self.incoming.iter_mut() - .find(|i| i.alive && i.peer_id == *peer_id) - { - debug!(target: "sub-libp2p", - "Libp2p => Disconnected({}): Was in incoming mode with id {:?}.", - peer_id, state.incoming_id); - state.alive = false; - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ - corresponding to an incoming state in peers") - } - } - - Some(PeerState::Poisoned) => - error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id), - } - } - - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn error::Error) { - trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // The peer is not in our list. - st @ PeerState::Banned { .. } => { - trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = st; - }, - - // "Basic" situation: we failed to reach a peer that the peerset requested. - PeerState::Requested | PeerState::PendingRequest { .. } => { - debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = PeerState::Banned { - until: Instant::now() + Duration::from_secs(5) - }; - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()) - }, - - // We can still get dial failures even if we are already connected to the peer, - // as an extra diagnostic for an earlier attempt. - st @ PeerState::Disabled { .. } | st @ PeerState::Enabled { .. } | - st @ PeerState::DisabledPendingEnable { .. } | st @ PeerState::Incoming { .. } => { - debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = st; - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), - } - - } else { - // The peer is not in our list. - trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - } - } - - fn inject_event( - &mut self, - source: PeerId, - connection: ConnectionId, - event: NotifsHandlerOut, - ) { - match event { - NotifsHandlerOut::Closed { endpoint, reason } => { - debug!(target: "sub-libp2p", - "Handler({:?}) => Endpoint {:?} closed for custom protocols: {}", - source, endpoint, reason); - - let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { - entry - } else { - error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); - return - }; - - let (last, new_notifications_sink) = match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Enabled { mut open } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); - } else { - debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source - ); - } - - // TODO: We switch the entire peer state to "disabled" because of possible - // race conditions involving the legacy substream. - // Once https://github.com/paritytech/substrate/issues/5670 is done, this - // should be changed to stay in the `Enabled` state. - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); - self.peerset.dropped(source.clone()); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: source.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) - } else { - None - }); - - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None - }; - - (last, new_notifications_sink) - }, - PeerState::Disabled { mut open, banned_until } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); - } else { - debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source - ); - } - - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) - } else { - None - }); - - *entry.into_mut() = PeerState::Disabled { - open, - banned_until - }; - - (last, new_notifications_sink) - }, - PeerState::DisabledPendingEnable { - mut open, - timer, - timer_deadline - } => { - let pos = open.iter().position(|(c, _)| c == &connection); - let sink_closed = pos == Some(0); - if let Some(pos) = pos { - open.remove(pos); - } else { - debug_assert!(false); - error!( - target: "sub-libp2p", - "State mismatch with {}: unknown closed connection", - source - ); - } - - let last = open.is_empty(); - let new_notifications_sink = open.iter().next().and_then(|(_, sink)| - if sink_closed { - Some(sink.clone()) - } else { - None - }); - - *entry.into_mut() = PeerState::DisabledPendingEnable { - open, - timer, - timer_deadline - }; - - (last, new_notifications_sink) - }, - state => { - error!(target: "sub-libp2p", - "Unexpected state in the custom protos handler: {:?}", - state); - return - } - }; - - if last { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = GenericProtoOut::CustomProtocolClosed { - reason, - peer_id: source, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - - } else { - if let Some(new_notifications_sink) = new_notifications_sink { - let event = GenericProtoOut::CustomProtocolReplaced { - peer_id: source, - notifications_sink: new_notifications_sink, - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); - } - } - - NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } => { - debug!(target: "sub-libp2p", - "Handler({:?}) => Endpoint {:?} open for custom protocols.", - source, endpoint); - - let first = match self.peers.get_mut(&source) { - Some(PeerState::Enabled { ref mut open, .. }) | - Some(PeerState::DisabledPendingEnable { ref mut open, .. }) | - Some(PeerState::Disabled { ref mut open, .. }) => { - let first = open.is_empty(); - if !open.iter().any(|(c, _)| *c == connection) { - open.push((connection, notifications_sink.clone())); - } else { - error!( - target: "sub-libp2p", - "State mismatch: connection with {} opened a second time", - source - ); - } - first - } - state => { - error!(target: "sub-libp2p", - "Open: Unexpected state in the custom protos handler: {:?}", - state); - return - } - }; - - if first { - debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = GenericProtoOut::CustomProtocolOpen { - peer_id: source, - received_handshake, - notifications_sink - }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - - } else { - debug!( - target: "sub-libp2p", - "Handler({:?}) => Secondary connection opened custom protocol", - source - ); - } - } - - NotifsHandlerOut::CustomMessage { message } => { - debug_assert!(self.is_open(&source)); - trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); - trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = GenericProtoOut::LegacyMessage { - peer_id: source, - message, - }; - - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - - NotifsHandlerOut::Notification { protocol_name, message } => { - debug_assert!(self.is_open(&source)); - trace!( - target: "sub-libp2p", - "Handler({:?}) => Notification({:?})", - source, - protocol_name, - ); - trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); - let event = GenericProtoOut::Notification { - peer_id: source, - protocol_name, - message, - }; - - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - } - } - - fn poll( - &mut self, - cx: &mut Context, - _params: &mut impl PollParameters, - ) -> Poll< - NetworkBehaviourAction< - NotifsHandlerIn, - Self::OutEvent, - >, - > { - if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); - } - - // Poll for instructions from the peerset. - // Note that the peerset is a *best effort* crate, and we have to use defensive programming. - loop { - match futures::Stream::poll_next(Pin::new(&mut self.peerset), cx) { - Poll::Ready(Some(sc_peerset::Message::Accept(index))) => { - self.peerset_report_accept(index); - } - Poll::Ready(Some(sc_peerset::Message::Reject(index))) => { - self.peerset_report_reject(index); - } - Poll::Ready(Some(sc_peerset::Message::Connect(id))) => { - self.peerset_report_connect(id); - } - Poll::Ready(Some(sc_peerset::Message::Drop(id))) => { - self.peerset_report_disconnect(id); - } - Poll::Ready(None) => { - error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); - break; - } - Poll::Pending => break, - } - } - - while let Poll::Ready(Some((delay_id, peer_id))) = - Pin::new(&mut self.delays).poll_next(cx) { - let peer_state = match self.peers.get_mut(&peer_id) { - Some(s) => s, - // We intentionally never remove elements from `delays`, and it may - // thus contain peers which are now gone. This is a normal situation. - None => continue, - }; - - match peer_state { - PeerState::PendingRequest { timer, .. } if *timer == delay_id => { - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); - self.events.push_back(NetworkBehaviourAction::DialPeer { - peer_id, - condition: DialPeerCondition::Disconnected - }); - *peer_state = PeerState::Requested; - } - - PeerState::DisabledPendingEnable { timer, open, .. } if *timer == delay_id => { - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable (ban expired)", peer_id); - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *peer_state = PeerState::Enabled { open: mem::replace(open, Default::default()) }; - } - - // We intentionally never remove elements from `delays`, and it may - // thus contain obsolete entries. This is a normal situation. - _ => {}, - } - } - - if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); - } - - Poll::Pending - } -} diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs deleted file mode 100644 index fbfdb1cb6ab0e..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ /dev/null @@ -1,737 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming -//! and outgoing substreams for all gossiping protocols together. -//! -//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the -//! protocols that are Substrate-related and outside of the scope of libp2p. -//! -//! # Usage -//! -//! The handler can be in one of the following states: `Initial`, `Enabled`, `Disabled`. -//! -//! The `Initial` state is the state that the handler initially is in. It is a temporary state -//! during which the user must either enable or disable the handler. After that, the handler stays -//! either enabled or disabled. -//! -//! On the wire, we try to open the following substreams: -//! -//! - One substream for each notification protocol passed as parameter to the -//! `NotifsHandlerProto::new` function. -//! - One "legacy" substream used for anything non-related to gossiping, and used as a fallback -//! in case the notification protocol can't be opened. -//! -//! When the handler is in the `Enabled` state, we immediately open and try to maintain all the -//! aforementioned substreams. When the handler is in the `Disabled` state, we immediately close -//! (or abort opening) all these substreams. It is intended that in the future we allow states in -//! which some protocols are open and not others. Symmetrically, we allow incoming -//! Substrate-related substreams if and only if we are in the `Enabled` state. -//! -//! The user has the choice between sending a message with `SendNotification`, to send a -//! notification, and `SendLegacy`, to send any other kind of message. -//! - -use crate::protocol::generic_proto::{ - handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut}, - handler::notif_in::{NotifsInHandlerProto, NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut}, - handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, - upgrade::{NotificationsIn, NotificationsOut, NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec}, -}; - -use bytes::BytesMut; -use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{UpgradeError, SelectUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use futures::{ - channel::mpsc, - lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, - prelude::* -}; -use log::{debug, error}; -use parking_lot::{Mutex, RwLock}; -use std::{borrow::Cow, str, sync::Arc, task::{Context, Poll}}; - -/// Number of pending notifications in asynchronous contexts. -/// See [`NotificationsSink::reserve_notification`] for context. -const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; -/// Number of pending notifications in synchronous contexts. -const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsHandler`]. -/// -/// See the documentation at the module level for more information. -pub struct NotifsHandlerProto { - /// Prototypes for handlers for inbound substreams, and the message we respond with in the - /// handshake. - in_handlers: Vec<(NotifsInHandlerProto, Arc>>)>, - - /// Prototypes for handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandlerProto, Arc>>)>, - - /// Prototype for handler for backwards-compatibility. - legacy: LegacyProtoHandlerProto, -} - -/// The actual handler once the connection has been established. -/// -/// See the documentation at the module level for more information. -pub struct NotifsHandler { - /// Handlers for inbound substreams, and the message we respond with in the handshake. - in_handlers: Vec<(NotifsInHandler, Arc>>)>, - - /// Handlers for outbound substreams, and the initial handshake message we send. - out_handlers: Vec<(NotifsOutHandler, Arc>>)>, - - /// Whether we are the connection dialer or listener. - endpoint: ConnectedPoint, - - /// Handler for backwards-compatibility. - legacy: LegacyProtoHandler, - - /// In the situation where either the legacy substream has been opened or the handshake-bearing - /// notifications protocol is open, but we haven't sent out any [`NotifsHandlerOut::Open`] - /// event yet, this contains the received handshake waiting to be reported through the - /// external API. - pending_handshake: Option>, - - /// State of this handler. - enabled: EnabledState, - - /// If we receive inbound substream requests while in initialization mode, - /// we push the corresponding index here and process them when the handler - /// gets enabled/disabled. - pending_in: Vec, - - /// If `Some`, contains the two `Receiver`s connected to the [`NotificationsSink`] that has - /// been sent out. The notifications to send out can be pulled from this receivers. - /// We use two different channels in order to have two different channel sizes, but from the - /// receiving point of view, the two channels are the same. - /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. - /// - /// Contains `Some` if and only if it has been reported to the user that the substreams are - /// open. - notifications_sink_rx: Option< - stream::Select< - stream::Fuse>, - stream::Fuse> - > - >, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -enum EnabledState { - Initial, - Enabled, - Disabled, -} - -impl IntoProtocolsHandler for NotifsHandlerProto { - type Handler = NotifsHandler; - - fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { - let in_handlers = self.in_handlers.iter() - .map(|(h, _)| h.inbound_protocol()) - .collect::>(); - - SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) - } - - fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { - NotifsHandler { - in_handlers: self.in_handlers - .into_iter() - .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) - .collect(), - out_handlers: self.out_handlers - .into_iter() - .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) - .collect(), - endpoint: connected_point.clone(), - legacy: self.legacy.into_handler(remote_peer_id, connected_point), - pending_handshake: None, - enabled: EnabledState::Initial, - pending_in: Vec::new(), - notifications_sink_rx: None, - } - } -} - -/// Event that can be received by a `NotifsHandler`. -#[derive(Debug, Clone)] -pub enum NotifsHandlerIn { - /// The node should start using custom protocols. - Enable, - - /// The node should stop using custom protocols. - Disable, -} - -/// Event that can be emitted by a `NotifsHandler`. -#[derive(Debug)] -pub enum NotifsHandlerOut { - /// The connection is open for custom protocols. - Open { - /// The endpoint of the connection that is open for custom protocols. - endpoint: ConnectedPoint, - /// Handshake that was sent to us. - /// This is normally a "Status" message, but this out of the concern of this code. - received_handshake: Vec, - /// How notifications can be sent to this node. - notifications_sink: NotificationsSink, - }, - - /// The connection is closed for custom protocols. - Closed { - /// The reason for closing, for diagnostic purposes. - reason: Cow<'static, str>, - /// The endpoint of the connection that closed for custom protocols. - endpoint: ConnectedPoint, - }, - - /// Received a non-gossiping message on the legacy substream. - CustomMessage { - /// Message that has been received. - /// - /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a - /// notification. - message: BytesMut, - }, - - /// Received a message on a custom protocol substream. - Notification { - /// Name of the protocol of the message. - protocol_name: Cow<'static, str>, - - /// Message that has been received. - message: BytesMut, - }, -} - -/// Sink connected directly to the node background task. Allows sending notifications to the peer. -/// -/// Can be cloned in order to obtain multiple references to the same peer. -#[derive(Debug, Clone)] -pub struct NotificationsSink { - inner: Arc, -} - -#[derive(Debug)] -struct NotificationsSinkInner { - /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. - async_channel: FuturesMutex>, - /// Sender to use in synchronous contexts. Uses a synchronous mutex. - /// This channel has a large capacity and is meant to be used in contexts where - /// back-pressure cannot be properly exerted. - /// It will be removed in a future version. - sync_channel: Mutex>, -} - -/// Message emitted through the [`NotificationsSink`] and processed by the background task -/// dedicated to the peer. -#[derive(Debug)] -enum NotificationsSinkMessage { - /// Message emitted by [`NotificationsSink::reserve_notification`] and - /// [`NotificationsSink::write_notification_now`]. - Notification { - protocol_name: Cow<'static, str>, - message: Vec, - }, - - /// Must close the connection. - ForceClose, -} - -impl NotificationsSink { - /// Sends a notification to the peer. - /// - /// If too many messages are already buffered, the notification is silently discarded and the - /// connection to the peer will be closed shortly after. - /// - /// The protocol name is expected to be checked ahead of calling this method. It is a logic - /// error to send a notification using an unknown protocol. - /// - /// This method will be removed in a future version. - pub fn send_sync_notification<'a>( - &'a self, - protocol_name: Cow<'static, str>, - message: impl Into> - ) { - let mut lock = self.inner.sync_channel.lock(); - let result = lock.try_send(NotificationsSinkMessage::Notification { - protocol_name, - message: message.into() - }); - - if result.is_err() { - // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the - // buffer, and therefore that `try_send` will succeed. - let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); - debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); - } - } - - /// Wait until the remote is ready to accept a notification. - /// - /// Returns an error in the case where the connection is closed. - /// - /// The protocol name is expected to be checked ahead of calling this method. It is a logic - /// error to send a notification using an unknown protocol. - pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result, ()> { - let mut lock = self.inner.async_channel.lock().await; - - let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; - if poll_ready.is_ok() { - Ok(Ready { protocol_name: protocol_name, lock }) - } else { - Err(()) - } - } -} - -/// Notification slot is reserved and the notification can actually be sent. -#[must_use] -#[derive(Debug)] -pub struct Ready<'a> { - /// Guarded channel. The channel inside is guaranteed to not be full. - lock: FuturesMutexGuard<'a, mpsc::Sender>, - /// Name of the protocol. Should match one of the protocols passed at initialization. - protocol_name: Cow<'static, str>, -} - -impl<'a> Ready<'a> { - /// Consumes this slots reservation and actually queues the notification. - /// - /// Returns an error if the substream has been closed. - pub fn send( - mut self, - notification: impl Into> - ) -> Result<(), ()> { - self.lock.start_send(NotificationsSinkMessage::Notification { - protocol_name: self.protocol_name, - message: notification.into(), - }).map_err(|_| ()) - } -} - -/// Error specific to the collection of protocols. -#[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum NotifsHandlerError { - /// Channel of synchronous notifications is full. - SyncNotificationsClogged, - /// Error in legacy protocol. - Legacy(::Error), -} - -impl NotifsHandlerProto { - /// Builds a new handler. - /// - /// `list` is a list of notification protocols names, and the message to send as part of the - /// handshake. At the moment, the message is always the same whether we open a substream - /// ourselves or respond to handshake from the remote. - /// - /// The first protocol in `list` is special-cased as the protocol that contains the handshake - /// to report through the [`NotifsHandlerOut::Open`] event. - /// - /// # Panic - /// - /// - Panics if `list` is empty. - /// - pub fn new( - legacy: RegisteredProtocol, - list: impl Into, Arc>>)>>, - ) -> Self { - let list = list.into(); - assert!(!list.is_empty()); - - let out_handlers = list - .clone() - .into_iter() - .map(|(proto_name, initial_message)| { - (NotifsOutHandlerProto::new(proto_name), initial_message) - }).collect(); - - let in_handlers = list.clone() - .into_iter() - .map(|(proto_name, msg)| (NotifsInHandlerProto::new(proto_name), msg)) - .collect(); - - NotifsHandlerProto { - in_handlers, - out_handlers, - legacy: LegacyProtoHandlerProto::new(legacy), - } - } -} - -impl ProtocolsHandler for NotifsHandler { - type InEvent = NotifsHandlerIn; - type OutEvent = NotifsHandlerOut; - type Error = NotifsHandlerError; - type InboundProtocol = SelectUpgrade, RegisteredProtocol>; - type OutboundProtocol = NotificationsOut; - // Index within the `out_handlers` - type OutboundOpenInfo = usize; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - let in_handlers = self.in_handlers.iter() - .map(|(h, _)| h.listen_protocol().into_upgrade().1) - .collect::>(); - - let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); - SubstreamProtocol::new(proto, ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - out: >::Output, - (): () - ) { - match out { - EitherOutput::First((out, num)) => - self.in_handlers[num].0.inject_fully_negotiated_inbound(out, ()), - EitherOutput::Second(out) => - self.legacy.inject_fully_negotiated_inbound(out, ()), - } - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - num: Self::OutboundOpenInfo - ) { - self.out_handlers[num].0.inject_fully_negotiated_outbound(out, ()) - } - - fn inject_event(&mut self, message: NotifsHandlerIn) { - match message { - NotifsHandlerIn::Enable => { - if let EnabledState::Enabled = self.enabled { - debug!("enabling already-enabled handler"); - } - self.enabled = EnabledState::Enabled; - self.legacy.inject_event(LegacyProtoHandlerIn::Enable); - for (handler, initial_message) in &mut self.out_handlers { - // We create `initial_message` on a separate line to be sure that the lock - // is released as soon as possible. - let initial_message = initial_message.read().clone(); - handler.inject_event(NotifsOutHandlerIn::Enable { - initial_message, - }); - } - for num in self.pending_in.drain(..) { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = self.in_handlers[num].1.read().clone(); - self.in_handlers[num].0 - .inject_event(NotifsInHandlerIn::Accept(handshake_message)); - } - }, - NotifsHandlerIn::Disable => { - if let EnabledState::Disabled = self.enabled { - debug!("disabling already-disabled handler"); - } - self.legacy.inject_event(LegacyProtoHandlerIn::Disable); - // The notifications protocols start in the disabled state. If we were in the - // "Initial" state, then we shouldn't disable the notifications protocols again. - if self.enabled != EnabledState::Initial { - for (handler, _) in &mut self.out_handlers { - handler.inject_event(NotifsOutHandlerIn::Disable); - } - } - self.enabled = EnabledState::Disabled; - for num in self.pending_in.drain(..) { - self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); - } - }, - } - } - - fn inject_dial_upgrade_error( - &mut self, - num: usize, - err: ProtocolsHandlerUpgrErr - ) { - match err { - ProtocolsHandlerUpgrErr::Timeout => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timeout - ), - ProtocolsHandlerUpgrErr::Timer => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timer - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) - ), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) => - self.out_handlers[num].0.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) - ), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - // Iterate over each handler and return the maximum value. - - let mut ret = self.legacy.connection_keep_alive(); - if ret.is_yes() { - return KeepAlive::Yes; - } - - for (handler, _) in &self.in_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; - } - if ret < val { ret = val; } - } - - for (handler, _) in &self.out_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; - } - if ret < val { ret = val; } - } - - ret - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - if let Some(notifications_sink_rx) = &mut self.notifications_sink_rx { - 'poll_notifs_sink: loop { - // Before we poll the notifications sink receiver, check that all the notification - // channels are ready to send a message. - // TODO: it is planned that in the future we switch to one `NotificationsSink` per - // protocol, in which case each sink should wait only for its corresponding handler - // to be ready, and not all handlers - // see https://github.com/paritytech/substrate/issues/5670 - for (out_handler, _) in &mut self.out_handlers { - match out_handler.poll_ready(cx) { - Poll::Ready(_) => {}, - Poll::Pending => break 'poll_notifs_sink, - } - } - - let message = match notifications_sink_rx.poll_next_unpin(cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) | Poll::Pending => break, - }; - - match message { - NotificationsSinkMessage::Notification { - protocol_name, - message - } => { - let mut found_any_with_name = false; - - for (handler, _) in &mut self.out_handlers { - if *handler.protocol_name() == protocol_name { - found_any_with_name = true; - if handler.is_open() { - handler.send_or_discard(message); - continue 'poll_notifs_sink; - } - } - } - - // This code can be reached via the following scenarios: - // - // - User tried to send a notification on a non-existing protocol. This - // most likely relates to https://github.com/paritytech/substrate/issues/6827 - // - User tried to send a notification to a peer we're not or no longer - // connected to. This happens in a normal scenario due to the racy nature - // of connections and disconnections, and is benign. - // - // We print a warning in the former condition. - if !found_any_with_name { - log::warn!( - target: "sub-libp2p", - "Tried to send a notification on non-registered protocol: {:?}", - protocol_name - ); - } - } - NotificationsSinkMessage::ForceClose => { - return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged)); - } - } - } - } - - // If `self.pending_handshake` is `Some`, we are in a state where the handshake-bearing - // substream (either the legacy substream or the one special-cased as providing the - // handshake) is open but the user isn't aware yet of the substreams being open. - // When that is the case, neither the legacy substream nor the incoming notifications - // substreams should be polled, otherwise there is a risk of receiving messages from them. - if self.pending_handshake.is_none() { - while let Poll::Ready(ev) = self.legacy.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, .. } => - match *protocol.info() {}, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { - received_handshake, - .. - }) => { - if self.notifications_sink_rx.is_none() { - debug_assert!(self.pending_handshake.is_none()); - self.pending_handshake = Some(received_handshake); - } - cx.waker().wake_by_ref(); - return Poll::Pending; - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason, .. }) => { - // We consciously drop the receivers despite notifications being potentially - // still buffered up. - self.notifications_sink_rx = None; - - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Closed { endpoint: self.endpoint.clone(), reason } - )) - }, - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CustomMessage { message } - )) - }, - ProtocolsHandlerEvent::Close(err) => - return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), - } - } - } - - for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() { - loop { - let poll = if self.notifications_sink_rx.is_some() { - handler.poll(cx) - } else { - handler.poll_process(cx) - }; - - let ev = match poll { - Poll::Ready(e) => e, - Poll::Pending => break, - }; - - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => - error!("Incoming substream handler tried to open a substream"), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => - match self.enabled { - EnabledState::Initial => self.pending_in.push(handler_num), - EnabledState::Enabled => { - // We create `handshake_message` on a separate line to be sure - // that the lock is released as soon as possible. - let handshake_message = handshake_message.read().clone(); - handler.inject_event(NotifsInHandlerIn::Accept(handshake_message)) - }, - EnabledState::Disabled => - handler.inject_event(NotifsInHandlerIn::Refuse), - }, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { - debug_assert!(self.pending_handshake.is_none()); - if self.notifications_sink_rx.is_some() { - let msg = NotifsHandlerOut::Notification { - message, - protocol_name: handler.protocol_name().clone(), - }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); - } - }, - } - } - } - - for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() { - while let Poll::Ready(ev) = handler.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol } => - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol - .map_info(|()| handler_num), - }), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - - // Opened substream on the handshake-bearing notification protocol. - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }) - if handler_num == 0 => - { - if self.notifications_sink_rx.is_none() && self.pending_handshake.is_none() { - self.pending_handshake = Some(handshake); - } - }, - - // Nothing to do in response to other notification substreams being opened - // or closed. - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {}, - } - } - } - - if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) { - if let Some(handshake) = self.pending_handshake.take() { - let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); - let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); - let notifications_sink = NotificationsSink { - inner: Arc::new(NotificationsSinkInner { - async_channel: FuturesMutex::new(async_tx), - sync_channel: Mutex::new(sync_tx), - }), - }; - - debug_assert!(self.notifications_sink_rx.is_none()); - self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); - - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Open { - endpoint: self.endpoint.clone(), - received_handshake: handshake, - notifications_sink - } - )) - } - } - - Poll::Pending - } -} diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs deleted file mode 100644 index 404093553785c..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/legacy.rs +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use crate::protocol::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; -use bytes::BytesMut; -use futures::prelude::*; -use futures_timer::Delay; -use libp2p::core::{ConnectedPoint, PeerId, Endpoint}; -use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{debug, error}; -use smallvec::{smallvec, SmallVec}; -use std::{borrow::Cow, collections::VecDeque, convert::Infallible, error, fmt, io, mem}; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a `LegacyProtoHandler`. It then handles all communications that are specific -/// to Substrate on that single connection. -/// -/// Note that there can be multiple instance of this struct simultaneously for same peer, -/// if there are multiple established connections to the peer. -/// -/// ## State of the handler -/// -/// There are six possible states for the handler: -/// -/// - Enabled and open, which is a normal operation. -/// - Enabled and closed, in which case it will try to open substreams. -/// - Disabled and open, in which case it will try to close substreams. -/// - Disabled and closed, in which case the handler is idle. The connection will be -/// garbage-collected after a few seconds if nothing more happens. -/// - Initializing and open. -/// - Initializing and closed, which is the state the handler starts in. -/// -/// The Init/Enabled/Disabled state is entirely controlled by the user by sending `Enable` or -/// `Disable` messages to the handler. The handler itself never transitions automatically between -/// these states. For example, if the handler reports a network misbehaviour, it will close the -/// substreams but it is the role of the user to send a `Disabled` event if it wants the connection -/// to close. Otherwise, the handler will try to reopen substreams. -/// -/// The handler starts in the "Initializing" state and must be transitionned to Enabled or Disabled -/// as soon as possible. -/// -/// The Open/Closed state is decided by the handler and is reported with the `CustomProtocolOpen` -/// and `CustomProtocolClosed` events. The `CustomMessage` event can only be generated if the -/// handler is open. -/// -/// ## How it works -/// -/// When the handler is created, it is initially in the `Init` state and waits for either a -/// `Disable` or an `Enable` message from the outer layer. At any time, the outer layer is free to -/// toggle the handler between the disabled and enabled states. -/// -/// When the handler switches to "enabled", it opens a substream and negotiates the protocol named -/// `/substrate/xxx`, where `xxx` is chosen by the user and depends on the chain. -/// -/// For backwards compatibility reasons, when we switch to "enabled" for the first time (while we -/// are still in "init" mode) and we are the connection listener, we don't open a substream. -/// -/// In order the handle the situation where both the remote and us get enabled at the same time, -/// we tolerate multiple substreams open at the same time. Messages are transmitted on an arbitrary -/// substream. The endpoints don't try to agree on a single substream. -/// -/// We consider that we are now "closed" if the remote closes all the existing substreams. -/// Re-opening it can then be performed by closing all active substream and re-opening one. -/// -pub struct LegacyProtoHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, -} - -impl LegacyProtoHandlerProto { - /// Builds a new `LegacyProtoHandlerProto`. - pub fn new(protocol: RegisteredProtocol) -> Self { - LegacyProtoHandlerProto { - protocol, - } - } -} - -impl IntoProtocolsHandler for LegacyProtoHandlerProto { - type Handler = LegacyProtoHandler; - - fn inbound_protocol(&self) -> RegisteredProtocol { - self.protocol.clone() - } - - fn into_handler(self, remote_peer_id: &PeerId, _: &ConnectedPoint) -> Self::Handler { - LegacyProtoHandler { - protocol: self.protocol, - remote_peer_id: remote_peer_id.clone(), - state: ProtocolState::Init { - substreams: SmallVec::new(), - init_deadline: Delay::new(Duration::from_secs(20)) - }, - events_queue: VecDeque::new(), - } - } -} - -/// The actual handler once the connection has been established. -pub struct LegacyProtoHandler { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, - - /// State of the communications with the remote. - state: ProtocolState, - - /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have - /// any influence on the behaviour. - remote_peer_id: PeerId, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque< - ProtocolsHandlerEvent - >, -} - -/// State of the handler. -enum ProtocolState { - /// Waiting for the behaviour to tell the handler whether it is enabled or disabled. - Init { - /// List of substreams opened by the remote but that haven't been processed yet. - /// For each substream, also includes the handshake message that we have received. - substreams: SmallVec<[(RegisteredProtocolSubstream, Vec); 6]>, - /// Deadline after which the initialization is abnormally long. - init_deadline: Delay, - }, - - /// Handler is ready to accept incoming substreams. - /// If we are in this state, we haven't sent any `CustomProtocolOpen` yet. - Opening, - - /// Normal operating mode. Contains the substreams that are open. - /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. - Normal { - /// The substreams where bidirectional communications happen. - substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, - /// Contains substreams which are being shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, - }, - - /// We are disabled. Contains substreams that are being closed. - /// If we are in this state, either we have sent a `CustomProtocolClosed` message to the - /// outside or we have never sent any `CustomProtocolOpen` in the first place. - Disabled { - /// List of substreams to shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 6]>, - - /// If true, we should reactivate the handler after all the substreams in `shutdown` have - /// been closed. - /// - /// Since we don't want to mix old and new substreams, we wait for all old substreams to - /// be closed before opening any new one. - reenable: bool, - }, - - /// In this state, we don't care about anything anymore and need to kill the connection as soon - /// as possible. - KillAsap, - - /// We sometimes temporarily switch to this state during processing. If we are in this state - /// at the beginning of a method, that means something bad happened in the source code. - Poisoned, -} - -/// Event that can be received by a `LegacyProtoHandler`. -#[derive(Debug)] -pub enum LegacyProtoHandlerIn { - /// The node should start using custom protocols. - Enable, - - /// The node should stop using custom protocols. - Disable, -} - -/// Event that can be emitted by a `LegacyProtoHandler`. -#[derive(Debug)] -pub enum LegacyProtoHandlerOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, - /// Handshake message that has been sent to us. - /// This is normally a "Status" message, but this out of the concern of this code. - received_handshake: Vec, - }, - - /// Closed a custom protocol with the remote. - CustomProtocolClosed { - /// Reason why the substream closed, for diagnostic purposes. - reason: Cow<'static, str>, - }, - - /// Receives a message on a custom protocol substream. - CustomMessage { - /// Message that has been received. - message: BytesMut, - }, -} - -impl LegacyProtoHandler { - /// Enables the handler. - fn enable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: mut incoming, .. } => { - if incoming.is_empty() { - ProtocolState::Opening - } else { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: incoming[0].0.protocol_version(), - received_handshake: mem::replace(&mut incoming[0].1, Vec::new()), - }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: incoming.into_iter().map(|(s, _)| s).collect(), - shutdown: SmallVec::new() - } - } - } - - st @ ProtocolState::KillAsap => st, - st @ ProtocolState::Opening { .. } => st, - st @ ProtocolState::Normal { .. } => st, - ProtocolState::Disabled { shutdown, .. } => { - ProtocolState::Disabled { shutdown, reenable: true } - } - } - } - - /// Disables the handler. - fn disable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: shutdown, .. } => { - let mut shutdown = shutdown.into_iter().map(|(s, _)| s).collect::>(); - for s in &mut shutdown { - s.shutdown(); - } - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::Opening { .. } | ProtocolState::Normal { .. } => - // At the moment, if we get disabled while things were working, we kill the entire - // connection in order to force a reset of the state. - // This is obviously an extremely shameful way to do things, but at the time of - // the writing of this comment, the networking works very poorly and a solution - // needs to be found. - ProtocolState::KillAsap, - - ProtocolState::Disabled { shutdown, .. } => - ProtocolState::Disabled { shutdown, reenable: false }, - - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; - } - - /// Polls the state for events. Optionally returns an event to produce. - #[must_use] - fn poll_state(&mut self, cx: &mut Context) - -> Option> { - match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - self.state = ProtocolState::Poisoned; - None - } - - ProtocolState::Init { substreams, mut init_deadline } => { - match Pin::new(&mut init_deadline).poll(cx) { - Poll::Ready(()) => { - error!(target: "sub-libp2p", "Handler initialization process is too long \ - with {:?}", self.remote_peer_id); - self.state = ProtocolState::KillAsap; - }, - Poll::Pending => { - self.state = ProtocolState::Init { substreams, init_deadline }; - } - } - - None - } - - ProtocolState::Opening => { - self.state = ProtocolState::Opening; - None - } - - ProtocolState::Normal { mut substreams, mut shutdown } => { - for n in (0..substreams.len()).rev() { - let mut substream = substreams.swap_remove(n); - match Pin::new(&mut substream).poll_next(cx) { - Poll::Pending => substreams.push(substream), - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { - let event = LegacyProtoHandlerOut::CustomMessage { - message - }; - substreams.push(substream); - self.state = ProtocolState::Normal { substreams, shutdown }; - return Some(ProtocolsHandlerEvent::Custom(event)); - }, - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { - shutdown.push(substream); - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: "Legacy substream clogged".into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(None) => { - shutdown.push(substream); - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: "All substreams have been closed by the remote".into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(Some(Err(err))) => { - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: format!("Error on the last substream: {:?}", err).into(), - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } else { - debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err); - } - } - } - } - - // This code is reached is none if and only if none of the substreams are in a ready state. - self.state = ProtocolState::Normal { substreams, shutdown }; - None - } - - ProtocolState::Disabled { mut shutdown, reenable } => { - shutdown_list(&mut shutdown, cx); - // If `reenable` is `true`, that means we should open the substreams system again - // after all the substreams are closed. - if reenable && shutdown.is_empty() { - self.state = ProtocolState::Opening; - } else { - self.state = ProtocolState::Disabled { shutdown, reenable }; - } - None - } - - ProtocolState::KillAsap => None, - } - } -} - -impl ProtocolsHandler for LegacyProtoHandler { - type InEvent = LegacyProtoHandlerIn; - type OutEvent = LegacyProtoHandlerOut; - type Error = ConnectionKillError; - type InboundProtocol = RegisteredProtocol; - type OutboundProtocol = RegisteredProtocol; - type OutboundOpenInfo = Infallible; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.protocol.clone(), ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - (mut substream, received_handshake): >::Output, - (): () - ) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", - self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { mut substreams, init_deadline } => { - if substream.endpoint() == Endpoint::Dialer { - error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \ - initialization", self.remote_peer_id); - } - substreams.push((substream, received_handshake)); - ProtocolState::Init { substreams, init_deadline } - } - - ProtocolState::Opening { .. } => { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: substream.protocol_version(), - received_handshake, - }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: smallvec![substream], - shutdown: SmallVec::new() - } - } - - ProtocolState::Normal { substreams: mut existing, shutdown } => { - existing.push(substream); - ProtocolState::Normal { substreams: existing, shutdown } - } - - ProtocolState::Disabled { mut shutdown, .. } => { - substream.shutdown(); - shutdown.push(substream); - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; - } - - fn inject_fully_negotiated_outbound( - &mut self, - _: >::Output, - unreachable: Self::OutboundOpenInfo - ) { - match unreachable {} - } - - fn inject_event(&mut self, message: LegacyProtoHandlerIn) { - match message { - LegacyProtoHandlerIn::Disable => self.disable(), - LegacyProtoHandlerIn::Enable => self.enable(), - } - } - - fn inject_dial_upgrade_error( - &mut self, - unreachable: Self::OutboundOpenInfo, - _: ProtocolsHandlerUpgrErr - ) { - match unreachable {} - } - - fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - ProtocolState::Init { .. } | ProtocolState::Normal { .. } => KeepAlive::Yes, - ProtocolState::Opening { .. } | ProtocolState::Disabled { .. } | - ProtocolState::Poisoned | ProtocolState::KillAsap => KeepAlive::No, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - // Kill the connection if needed. - if let ProtocolState::KillAsap = self.state { - return Poll::Ready(ProtocolsHandlerEvent::Close(ConnectionKillError)); - } - - // Process all the substreams. - if let Some(event) = self.poll_state(cx) { - return Poll::Ready(event) - } - - Poll::Pending - } -} - -impl fmt::Debug for LegacyProtoHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("LegacyProtoHandler") - .finish() - } -} - -/// Given a list of substreams, tries to shut them down. The substreams that have been successfully -/// shut down are removed from the list. -fn shutdown_list - (list: &mut SmallVec>>, - cx: &mut Context) -{ - 'outer: for n in (0..list.len()).rev() { - let mut substream = list.swap_remove(n); - loop { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(_))) => {} - Poll::Pending => break, - Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, - } - } - list.push(substream); - } -} - -/// Error returned when switching from normal to disabled. -#[derive(Debug)] -pub struct ConnectionKillError; - -impl error::Error for ConnectionKillError { -} - -impl fmt::Display for ConnectionKillError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Connection kill when switching from normal to disabled") - } -} diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs deleted file mode 100644 index d3b505e0de3e2..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/notif_in.rs +++ /dev/null @@ -1,293 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for ingoing -//! substreams for a single gossiping protocol. -//! -//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple -//! > protocols, you need to create multiple instances and group them. -//! - -use crate::protocol::generic_proto::upgrade::{NotificationsIn, NotificationsInSubstream}; -use bytes::BytesMut; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{error, warn}; -use std::{borrow::Cow, collections::VecDeque, fmt, pin::Pin, task::{Context, Poll}}; - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsInHandler`]. -pub struct NotifsInHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - in_protocol: NotificationsIn, -} - -/// The actual handler once the connection has been established. -pub struct NotifsInHandler { - /// Configuration for the protocol upgrade to negotiate for inbound substreams. - in_protocol: NotificationsIn, - - /// Substream that is open with the remote. - substream: Option>, - - /// If the substream is opened and closed rapidly, we can emit several `OpenRequest` and - /// `Closed` messages in a row without the handler having time to respond with `Accept` or - /// `Refuse`. - /// - /// In order to keep the state consistent, we increment this variable every time an - /// `OpenRequest` is emitted and decrement it every time an `Accept` or `Refuse` is received. - pending_accept_refuses: usize, - - /// Queue of events to send to the outside. - /// - /// This queue is only ever modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque>, -} - -/// Event that can be received by a `NotifsInHandler`. -#[derive(Debug, Clone)] -pub enum NotifsInHandlerIn { - /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send - /// to the remote. - /// - /// After sending this to the handler, the substream is now considered open and `Notif` events - /// can be received. - Accept(Vec), - - /// Can be sent back as a response to an `OpenRequest`. - Refuse, -} - -/// Event that can be emitted by a `NotifsInHandler`. -#[derive(Debug)] -pub enum NotifsInHandlerOut { - /// The remote wants to open a substream. Contains the initial message sent by the remote - /// when the substream has been opened. - /// - /// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent - /// back even if a `Closed` is received. - OpenRequest(Vec), - - /// The notifications substream has been closed by the remote. In order to avoid race - /// conditions, this does **not** cancel any previously-sent `OpenRequest`. - Closed, - - /// Received a message on the notifications substream. - /// - /// Can only happen after an `Accept` and before a `Closed`. - Notif(BytesMut), -} - -impl NotifsInHandlerProto { - /// Builds a new `NotifsInHandlerProto`. - pub fn new( - protocol_name: impl Into> - ) -> Self { - NotifsInHandlerProto { - in_protocol: NotificationsIn::new(protocol_name), - } - } -} - -impl IntoProtocolsHandler for NotifsInHandlerProto { - type Handler = NotifsInHandler; - - fn inbound_protocol(&self) -> NotificationsIn { - self.in_protocol.clone() - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsInHandler { - in_protocol: self.in_protocol, - substream: None, - pending_accept_refuses: 0, - events_queue: VecDeque::new(), - } - } -} - -impl NotifsInHandler { - /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &Cow<'static, str> { - self.in_protocol.protocol_name() - } - - /// Equivalent to the `poll` method of `ProtocolsHandler`, except that it is guaranteed to - /// never generate [`NotifsInHandlerOut::Notif`]. - /// - /// Use this method in situations where it is not desirable to receive events but still - /// necessary to drive any potential incoming handshake or request. - pub fn poll_process( - &mut self, - cx: &mut Context - ) -> Poll< - ProtocolsHandlerEvent - > { - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match self.substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { - None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Ok(v))) => match v {}, - Some(Poll::Ready(Err(_))) => { - self.substream = None; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - }, - } - - Poll::Pending - } -} - -impl ProtocolsHandler for NotifsInHandler { - type InEvent = NotifsInHandlerIn; - type OutEvent = NotifsInHandlerOut; - type Error = void::Void; - type InboundProtocol = NotificationsIn; - type OutboundProtocol = DeniedUpgrade; - type OutboundOpenInfo = (); - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.in_protocol.clone(), ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - (msg, proto): >::Output, - (): () - ) { - // If a substream already exists, we drop it and replace it with the new incoming one. - if self.substream.is_some() { - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - } - - // Note that we drop the existing substream, which will send an equivalent to a TCP "RST" - // to the remote and force-close the substream. It might seem like an unclean way to get - // rid of a substream. However, keep in mind that it is invalid for the remote to open - // multiple such substreams, and therefore sending a "RST" is not an incorrect thing to do. - self.substream = Some(proto); - - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg))); - self.pending_accept_refuses = self.pending_accept_refuses - .checked_add(1) - .unwrap_or_else(|| { - error!(target: "sub-libp2p", "Overflow in pending_accept_refuses"); - usize::max_value() - }); - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - _: Self::OutboundOpenInfo - ) { - // We never emit any outgoing substream. - void::unreachable(out) - } - - fn inject_event(&mut self, message: NotifsInHandlerIn) { - self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) { - Some(v) => v, - None => { - error!( - target: "sub-libp2p", - "Inconsistent state: received Accept/Refuse when no pending request exists" - ); - return; - } - }; - - // If we send multiple `OpenRequest`s in a row, we will receive back multiple - // `Accept`/`Refuse` messages. All of them are obsolete except the last one. - if self.pending_accept_refuses != 0 { - return; - } - - match (message, self.substream.as_mut()) { - (NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message), - (NotifsInHandlerIn::Accept(_), None) => {}, - (NotifsInHandlerIn::Refuse, _) => self.substream = None, - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler"); - } - - fn connection_keep_alive(&self) -> KeepAlive { - if self.substream.is_some() { - KeepAlive::Yes - } else { - KeepAlive::No - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match self.substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { - None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Some(Ok(msg)))) => { - if self.pending_accept_refuses != 0 { - warn!( - target: "sub-libp2p", - "Bad state in inbound-only handler: notif before accepting substream" - ); - } - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg))) - }, - Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => { - self.substream = None; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - }, - } - - Poll::Pending - } -} - -impl fmt::Debug for NotifsInHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsInHandler") - .field("substream_open", &self.substream.is_some()) - .finish() - } -} diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs deleted file mode 100644 index 414e62c0d135f..0000000000000 --- a/client/network/src/protocol/generic_proto/handler/notif_out.rs +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for outgoing -//! substreams of a single gossiping protocol. -//! -//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple -//! > protocols, you need to create multiple instances and group them. -//! - -use crate::protocol::generic_proto::upgrade::{NotificationsOut, NotificationsOutSubstream, NotificationsHandshakeError}; -use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; -use log::{debug, warn, error}; -use std::{ - borrow::Cow, collections::VecDeque, fmt, mem, pin::Pin, task::{Context, Poll, Waker}, - time::Duration -}; -use wasm_timer::Instant; - -/// Maximum duration to open a substream and receive the handshake message. After that, we -/// consider that we failed to open the substream. -const OPEN_TIMEOUT: Duration = Duration::from_secs(10); -/// After successfully establishing a connection with the remote, we keep the connection open for -/// at least this amount of time in order to give the rest of the code the chance to notify us to -/// open substreams. -const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); - -/// Implements the `IntoProtocolsHandler` trait of libp2p. -/// -/// Every time a connection with a remote starts, an instance of this struct is created and -/// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a [`NotifsOutHandler`]. -/// -/// See the documentation of [`NotifsOutHandler`] for more information. -pub struct NotifsOutHandlerProto { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, str>, -} - -impl NotifsOutHandlerProto { - /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the - /// notifications substream. - pub fn new(protocol_name: impl Into>) -> Self { - NotifsOutHandlerProto { - protocol_name: protocol_name.into(), - } - } -} - -impl IntoProtocolsHandler for NotifsOutHandlerProto { - type Handler = NotifsOutHandler; - - fn inbound_protocol(&self) -> DeniedUpgrade { - DeniedUpgrade - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsOutHandler { - protocol_name: self.protocol_name, - when_connection_open: Instant::now(), - state: State::Disabled, - events_queue: VecDeque::new(), - } - } -} - -/// Handler for an outbound notification substream. -/// -/// When a connection is established, this handler starts in the "disabled" state, meaning that -/// no substream will be open. -/// -/// One can try open a substream by sending an [`NotifsOutHandlerIn::Enable`] message to the -/// handler. Once done, the handler will try to establish then maintain an outbound substream with -/// the remote for the purpose of sending notifications to it. -pub struct NotifsOutHandler { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, str>, - - /// Relationship with the node we're connected to. - state: State, - - /// When the connection with the remote has been successfully established. - when_connection_open: Instant, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: VecDeque>, -} - -/// Our relationship with the node we're connected to. -enum State { - /// The handler is disabled and idle. No substream is open. - Disabled, - - /// The handler is disabled. A substream is still open and needs to be closed. - /// - /// > **Important**: Having this state means that `poll_close` has been called at least once, - /// > but the `Sink` API is unclear about whether or not the stream can then - /// > be recovered. Because of that, we must never switch from the - /// > `DisabledOpen` state to the `Open` state while keeping the same substream. - DisabledOpen(NotificationsOutSubstream), - - /// The handler is disabled but we are still trying to open a substream with the remote. - /// - /// If the handler gets enabled again, we can immediately switch to `Opening`. - DisabledOpening, - - /// The handler is enabled and we are trying to open a substream with the remote. - Opening { - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// The handler is enabled. We have tried opening a substream in the past but the remote - /// refused it. - Refused, - - /// The handler is enabled and substream is open. - Open { - /// Substream that is currently open. - substream: NotificationsOutSubstream, - /// Waker for the last task that got `Poll::Pending` from `poll_ready`, to notify - /// when the open substream closes due to being disabled or encountering an - /// error, i.e. to notify the task as soon as the substream becomes unavailable, - /// without waiting for an underlying I/O task wakeup. - close_waker: Option, - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// Poisoned state. Shouldn't be found in the wild. - Poisoned, -} - -/// Event that can be received by a `NotifsOutHandler`. -#[derive(Debug)] -pub enum NotifsOutHandlerIn { - /// Enables the notifications substream for this node. The handler will try to maintain a - /// substream with the remote. - Enable { - /// Initial message to send to remote nodes when we open substreams. - initial_message: Vec, - }, - - /// Disables the notifications substream for this node. This is the default state. - Disable, -} - -/// Event that can be emitted by a `NotifsOutHandler`. -#[derive(Debug)] -pub enum NotifsOutHandlerOut { - /// The notifications substream has been accepted by the remote. - Open { - /// Handshake message sent by the remote after we opened the substream. - handshake: Vec, - }, - - /// The notifications substream has been closed by the remote. - Closed, - - /// We tried to open a notifications substream, but the remote refused it. - /// - /// Can only happen if we're in a closed state. - Refused, -} - -impl NotifsOutHandler { - /// Returns true if the substream is currently open. - pub fn is_open(&self) -> bool { - match &self.state { - State::Disabled => false, - State::DisabledOpening => false, - State::DisabledOpen(_) => true, - State::Opening { .. } => false, - State::Refused => false, - State::Open { .. } => true, - State::Poisoned => false, - } - } - - /// Returns `true` if there has been an attempt to open the substream, but the remote refused - /// the substream. - /// - /// Always returns `false` if the handler is in a disabled state. - pub fn is_refused(&self) -> bool { - match &self.state { - State::Disabled => false, - State::DisabledOpening => false, - State::DisabledOpen(_) => false, - State::Opening { .. } => false, - State::Refused => true, - State::Open { .. } => false, - State::Poisoned => false, - } - } - - /// Returns the name of the protocol that we negotiate. - pub fn protocol_name(&self) -> &Cow<'static, str> { - &self.protocol_name - } - - /// Polls whether the outbound substream is ready to send a notification. - /// - /// - Returns `Poll::Pending` if the substream is open but not ready to send a notification. - /// - Returns `Poll::Ready(true)` if the substream is ready to send a notification. - /// - Returns `Poll::Ready(false)` if the substream is closed. - /// - pub fn poll_ready(&mut self, cx: &mut Context) -> Poll { - if let State::Open { substream, close_waker, .. } = &mut self.state { - match substream.poll_ready_unpin(cx) { - Poll::Ready(Ok(())) => Poll::Ready(true), - Poll::Ready(Err(_)) => Poll::Ready(false), - Poll::Pending => { - *close_waker = Some(cx.waker().clone()); - Poll::Pending - } - } - } else { - Poll::Ready(false) - } - } - - /// Sends out a notification. - /// - /// If the substream is closed, or not ready to send out a notification yet, then the - /// notification is silently discarded. - /// - /// You are encouraged to call [`NotifsOutHandler::poll_ready`] beforehand to determine - /// whether this will succeed. If `Poll::Ready(true)` is returned, then this method will send - /// out a notification. - pub fn send_or_discard(&mut self, notification: Vec) { - if let State::Open { substream, .. } = &mut self.state { - let _ = substream.start_send_unpin(notification); - } - } -} - -impl ProtocolsHandler for NotifsOutHandler { - type InEvent = NotifsOutHandlerIn; - type OutEvent = NotifsOutHandlerOut; - type Error = void::Void; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = NotificationsOut; - type OutboundOpenInfo = (); - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade, ()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - proto: >::Output, - (): () - ) { - // We should never reach here. `proto` is a `Void`. - void::unreachable(proto) - } - - fn inject_fully_negotiated_outbound( - &mut self, - (handshake_msg, substream): >::Output, - _: () - ) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Opening { initial_message } => { - let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg }; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); - self.state = State::Open { substream, initial_message, close_waker: None }; - }, - // If the handler was disabled while we were negotiating the protocol, immediately - // close it. - State::DisabledOpening => self.state = State::DisabledOpen(substream), - - // Any other situation should never happen. - State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) => - error!("☎️ State mismatch in notifications handler: substream already open"), - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn inject_event(&mut self, message: NotifsOutHandlerIn) { - match message { - NotifsOutHandlerIn::Enable { initial_message } => { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => { - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - self.state = State::Opening { initial_message }; - }, - State::DisabledOpening => self.state = State::Opening { initial_message }, - State::DisabledOpen(mut sub) => { - // As documented above, in this state we have already called `poll_close` - // once on the substream, and it is unclear whether the substream can then - // be recovered. When in doubt, let's drop the existing substream and - // open a new one. - if sub.close().now_or_never().is_none() { - warn!( - target: "sub-libp2p", - "📞 Improperly closed outbound notifications substream" - ); - } - - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - self.state = State::Opening { initial_message }; - }, - st @ State::Opening { .. } | st @ State::Refused | st @ State::Open { .. } => { - debug!(target: "sub-libp2p", - "Tried to enable notifications handler that was already enabled"); - self.state = st; - } - State::Poisoned => error!("Notifications handler in a poisoned state"), - } - } - - NotifsOutHandlerIn::Disable => { - match mem::replace(&mut self.state, State::Poisoned) { - st @ State::Disabled | st @ State::DisabledOpen(_) | st @ State::DisabledOpening => { - debug!(target: "sub-libp2p", - "Tried to disable notifications handler that was already disabled"); - self.state = st; - } - State::Opening { .. } => self.state = State::DisabledOpening, - State::Refused => self.state = State::Disabled, - State::Open { substream, close_waker, .. } => { - if let Some(close_waker) = close_waker { - close_waker.wake(); - } - self.state = State::DisabledOpen(substream) - }, - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => {}, - State::DisabledOpen(_) | State::Refused | State::Open { .. } => - error!("☎️ State mismatch in NotificationsOut"), - State::Opening { .. } => { - self.state = State::Refused; - let ev = NotifsOutHandlerOut::Refused; - self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); - }, - State::DisabledOpening => self.state = State::Disabled, - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - // We have a small grace period of `INITIAL_KEEPALIVE_TIME` during which we keep the - // connection open no matter what, in order to avoid closing and reopening - // connections all the time. - State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => - KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), - State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, - State::Refused | State::Poisoned => KeepAlive::No, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll> { - // Flush the events queue if necessary. - if let Some(event) = self.events_queue.pop_front() { - return Poll::Ready(event) - } - - match &mut self.state { - State::Open { substream, initial_message, close_waker } => - match Sink::poll_flush(Pin::new(substream), cx) { - Poll::Pending | Poll::Ready(Ok(())) => {}, - Poll::Ready(Err(_)) => { - if let Some(close_waker) = close_waker.take() { - close_waker.wake(); - } - - // We try to re-open a substream. - let initial_message = mem::replace(initial_message, Vec::new()); - self.state = State::Opening { initial_message: initial_message.clone() }; - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), - }); - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - } - }, - - State::DisabledOpen(sub) => match Sink::poll_close(Pin::new(sub), cx) { - Poll::Pending => {}, - Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => { - self.state = State::Disabled; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - }, - }, - - _ => {} - } - - Poll::Pending - } -} - -impl fmt::Debug for NotifsOutHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsOutHandler") - .field("open", &self.is_open()) - .finish() - } -} diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs deleted file mode 100644 index d604645d4ac87..0000000000000 --- a/client/network/src/protocol/generic_proto/tests.rs +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -#![cfg(test)] - -use crate::protocol::generic_proto::{GenericProto, GenericProtoOut}; - -use futures::prelude::*; -use libp2p::{PeerId, Multiaddr, Transport}; -use libp2p::core::{ - connection::{ConnectionId, ListenerId}, - ConnectedPoint, - muxing, - transport::MemoryTransport, - upgrade -}; -use libp2p::{identity, noise, yamux}; -use libp2p::swarm::{ - Swarm, ProtocolsHandler, IntoProtocolsHandler, PollParameters, - NetworkBehaviour, NetworkBehaviourAction -}; -use std::{error, io, iter, task::{Context, Poll}, time::Duration}; - -/// Builds two nodes that have each other as bootstrap nodes. -/// This is to be used only for testing, and a panic will happen if something goes wrong. -fn build_nodes() -> (Swarm, Swarm) { - let mut out = Vec::with_capacity(2); - - let keypairs: Vec<_> = (0..2).map(|_| identity::Keypair::generate_ed25519()).collect(); - let addrs: Vec = (0..2) - .map(|_| format!("/memory/{}", rand::random::()).parse().unwrap()) - .collect(); - - for index in 0 .. 2 { - let keypair = keypairs[index].clone(); - let local_peer_id = keypair.public().into_peer_id(); - - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .unwrap(); - - let transport = MemoryTransport - .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(yamux::Config::default()) - .map(|(peer, muxer), _| (peer, muxing::StreamMuxerBox::new(muxer))) - .timeout(Duration::from_secs(20)) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .boxed(); - - let (peerset, _) = sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { - in_peers: 25, - out_peers: 25, - bootnodes: if index == 0 { - keypairs - .iter() - .skip(1) - .map(|keypair| keypair.public().into_peer_id()) - .collect() - } else { - vec![] - }, - reserved_only: false, - priority_groups: Vec::new(), - }); - - let behaviour = CustomProtoWithAddr { - inner: GenericProto::new( - local_peer_id, "test", &[1], vec![], peerset, - iter::once(("/foo".into(), Vec::new())) - ), - addrs: addrs - .iter() - .enumerate() - .filter_map(|(n, a)| if n != index { - Some((keypairs[n].public().into_peer_id(), a.clone())) - } else { - None - }) - .collect(), - }; - - let mut swarm = Swarm::new( - transport, - behaviour, - keypairs[index].public().into_peer_id() - ); - Swarm::listen_on(&mut swarm, addrs[index].clone()).unwrap(); - out.push(swarm); - } - - // Final output - let mut out_iter = out.into_iter(); - let first = out_iter.next().unwrap(); - let second = out_iter.next().unwrap(); - (first, second) -} - -/// Wraps around the `CustomBehaviour` network behaviour, and adds hardcoded node addresses to it. -struct CustomProtoWithAddr { - inner: GenericProto, - addrs: Vec<(PeerId, Multiaddr)>, -} - -impl std::ops::Deref for CustomProtoWithAddr { - type Target = GenericProto; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl std::ops::DerefMut for CustomProtoWithAddr { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} - -impl NetworkBehaviour for CustomProtoWithAddr { - type ProtocolsHandler = ::ProtocolsHandler; - type OutEvent = ::OutEvent; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - self.inner.new_handler() - } - - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - let mut list = self.inner.addresses_of_peer(peer_id); - for (p, a) in self.addrs.iter() { - if p == peer_id { - list.push(a.clone()); - } - } - list - } - - fn inject_connected(&mut self, peer_id: &PeerId) { - self.inner.inject_connected(peer_id) - } - - fn inject_disconnected(&mut self, peer_id: &PeerId) { - self.inner.inject_disconnected(peer_id) - } - - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.inner.inject_connection_established(peer_id, conn, endpoint) - } - - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.inner.inject_connection_closed(peer_id, conn, endpoint) - } - - fn inject_event( - &mut self, - peer_id: PeerId, - connection: ConnectionId, - event: <::Handler as ProtocolsHandler>::OutEvent - ) { - self.inner.inject_event(peer_id, connection, event) - } - - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters - ) -> Poll< - NetworkBehaviourAction< - <::Handler as ProtocolsHandler>::InEvent, - Self::OutEvent - > - > { - self.inner.poll(cx, params) - } - - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn std::error::Error) { - self.inner.inject_addr_reach_failure(peer_id, addr, error) - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - self.inner.inject_dial_failure(peer_id) - } - - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - self.inner.inject_new_listen_addr(addr) - } - - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - self.inner.inject_expired_listen_addr(addr) - } - - fn inject_new_external_addr(&mut self, addr: &Multiaddr) { - self.inner.inject_new_external_addr(addr) - } - - fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { - self.inner.inject_listener_error(id, err); - } - - fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { - self.inner.inject_listener_closed(id, reason); - } -} - -#[test] -fn reconnect_after_disconnect() { - // We connect two nodes together, then force a disconnect (through the API of the `Service`), - // check that the disconnect worked, and finally check whether they successfully reconnect. - - let (mut service1, mut service2) = build_nodes(); - - // For this test, the services can be in the following states. - #[derive(Debug, Copy, Clone, PartialEq, Eq)] - enum ServiceState { NotConnected, FirstConnec, Disconnected, ConnectedAgain } - let mut service1_state = ServiceState::NotConnected; - let mut service2_state = ServiceState::NotConnected; - - futures::executor::block_on(async move { - loop { - // Grab next event from services. - let event = { - let s1 = service1.next(); - let s2 = service2.next(); - futures::pin_mut!(s1, s2); - match future::select(s1, s2).await { - future::Either::Left((ev, _)) => future::Either::Left(ev), - future::Either::Right((ev, _)) => future::Either::Right(ev), - } - }; - - match event { - future::Either::Left(GenericProtoOut::CustomProtocolOpen { .. }) => { - match service1_state { - ServiceState::NotConnected => { - service1_state = ServiceState::FirstConnec; - if service2_state == ServiceState::FirstConnec { - service1.disconnect_peer(Swarm::local_peer_id(&service2)); - } - }, - ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - } - }, - future::Either::Left(GenericProtoOut::CustomProtocolClosed { .. }) => { - match service1_state { - ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain| ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - } - }, - future::Either::Right(GenericProtoOut::CustomProtocolOpen { .. }) => { - match service2_state { - ServiceState::NotConnected => { - service2_state = ServiceState::FirstConnec; - if service1_state == ServiceState::FirstConnec { - service1.disconnect_peer(Swarm::local_peer_id(&service2)); - } - }, - ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - } - }, - future::Either::Right(GenericProtoOut::CustomProtocolClosed { .. }) => { - match service2_state { - ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain| ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - } - }, - _ => {} - } - - if service1_state == ServiceState::ConnectedAgain && service2_state == ServiceState::ConnectedAgain { - break; - } - } - - // Now that the two services have disconnected and reconnected, wait for 3 seconds and - // check whether they're still connected. - let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); - - loop { - // Grab next event from services. - let event = { - let s1 = service1.next(); - let s2 = service2.next(); - futures::pin_mut!(s1, s2); - match future::select(future::select(s1, s2), &mut delay).await { - future::Either::Right(_) => break, // success - future::Either::Left((future::Either::Left((ev, _)), _)) => ev, - future::Either::Left((future::Either::Right((ev, _)), _)) => ev, - } - }; - - match event { - GenericProtoOut::CustomProtocolOpen { .. } | - GenericProtoOut::CustomProtocolClosed { .. } => panic!(), - _ => {} - } - } - }); -} diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs deleted file mode 100644 index 1b2b97253d1ae..0000000000000 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ /dev/null @@ -1,313 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use crate::config::ProtocolId; -use bytes::BytesMut; -use futures::prelude::*; -use futures_codec::Framed; -use libp2p::core::{Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; -use parking_lot::RwLock; -use std::{collections::VecDeque, io, pin::Pin, sync::Arc, vec::IntoIter as VecIntoIter}; -use std::task::{Context, Poll}; -use unsigned_varint::codec::UviBytes; - -/// Connection upgrade for a single protocol. -/// -/// Note that "a single protocol" here refers to `par` for example. However -/// each protocol can have multiple different versions for networking purposes. -pub struct RegisteredProtocol { - /// Id of the protocol for API purposes. - id: ProtocolId, - /// Base name of the protocol as advertised on the network. - /// Ends with `/` so that we can append a version number behind. - base_name: Vec, - /// List of protocol versions that we support. - /// Ordered in descending order so that the best comes first. - supported_versions: Vec, - /// Handshake to send after the substream is open. - handshake_message: Arc>>, -} - -impl RegisteredProtocol { - /// Creates a new `RegisteredProtocol`. - pub fn new(protocol: impl Into, versions: &[u8], handshake_message: Arc>>) - -> Self { - let protocol = protocol.into(); - let mut base_name = b"/substrate/".to_vec(); - base_name.extend_from_slice(protocol.as_ref().as_bytes()); - base_name.extend_from_slice(b"/"); - - RegisteredProtocol { - base_name, - id: protocol, - supported_versions: { - let mut tmp = versions.to_vec(); - tmp.sort_by(|a, b| b.cmp(&a)); - tmp - }, - handshake_message, - } - } - - /// Returns the `Arc` to the handshake message that was passed at initialization. - pub fn handshake_message(&self) -> &Arc>> { - &self.handshake_message - } -} - -impl Clone for RegisteredProtocol { - fn clone(&self) -> Self { - RegisteredProtocol { - id: self.id.clone(), - base_name: self.base_name.clone(), - supported_versions: self.supported_versions.clone(), - handshake_message: self.handshake_message.clone(), - } - } -} - -/// Output of a `RegisteredProtocol` upgrade. -pub struct RegisteredProtocolSubstream { - /// If true, we are in the process of closing the sink. - is_closing: bool, - /// Whether the local node opened this substream (dialer), or we received this substream from - /// the remote (listener). - endpoint: Endpoint, - /// Buffer of packets to send. - send_queue: VecDeque, - /// If true, we should call `poll_complete` on the inner sink. - requires_poll_flush: bool, - /// The underlying substream. - inner: stream::Fuse>>, - /// Version of the protocol that was negotiated. - protocol_version: u8, - /// If true, we have sent a "remote is clogged" event recently and shouldn't send another one - /// unless the buffer empties then fills itself again. - clogged_fuse: bool, -} - -impl RegisteredProtocolSubstream { - /// Returns the version of the protocol that was negotiated. - pub fn protocol_version(&self) -> u8 { - self.protocol_version - } - - /// Returns whether the local node opened this substream (dialer), or we received this - /// substream from the remote (listener). - pub fn endpoint(&self) -> Endpoint { - self.endpoint - } - - /// Starts a graceful shutdown process on this substream. - /// - /// Note that "graceful" means that we sent a closing message. We don't wait for any - /// confirmation from the remote. - /// - /// After calling this, the stream is guaranteed to finish soon-ish. - pub fn shutdown(&mut self) { - self.is_closing = true; - self.send_queue.clear(); - } -} - -/// Event produced by the `RegisteredProtocolSubstream`. -#[derive(Debug, Clone)] -pub enum RegisteredProtocolEvent { - /// Received a message from the remote. - Message(BytesMut), - - /// Diagnostic event indicating that the connection is clogged and we should avoid sending too - /// many messages to it. - Clogged, -} - -impl Stream for RegisteredProtocolSubstream -where TSubstream: AsyncRead + AsyncWrite + Unpin { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - // Flushing the local queue. - while !self.send_queue.is_empty() { - match Pin::new(&mut self.inner).poll_ready(cx) { - Poll::Ready(Ok(())) => {}, - Poll::Ready(Err(err)) => return Poll::Ready(Some(Err(err))), - Poll::Pending => break, - } - - if let Some(packet) = self.send_queue.pop_front() { - Pin::new(&mut self.inner).start_send(packet)?; - self.requires_poll_flush = true; - } - } - - // If we are closing, close as soon as the Sink is closed. - if self.is_closing { - return match Pin::new(&mut self.inner).poll_close(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(Ok(_)) => Poll::Ready(None), - Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), - } - } - - // Indicating that the remote is clogged if that's the case. - if self.send_queue.len() >= 1536 { - if !self.clogged_fuse { - // Note: this fuse is important not just for preventing us from flooding the logs; - // if you remove the fuse, then we will always return early from this function and - // thus never read any message from the network. - self.clogged_fuse = true; - return Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) - } - } else { - self.clogged_fuse = false; - } - - // Flushing if necessary. - if self.requires_poll_flush { - if let Poll::Ready(()) = Pin::new(&mut self.inner).poll_flush(cx)? { - self.requires_poll_flush = false; - } - } - - // Receiving incoming packets. - // Note that `inner` is wrapped in a `Fuse`, therefore we can poll it forever. - match Pin::new(&mut self.inner).poll_next(cx)? { - Poll::Ready(Some(data)) => { - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(data)))) - } - Poll::Ready(None) => - if !self.requires_poll_flush && self.send_queue.is_empty() { - Poll::Ready(None) - } else { - Poll::Pending - } - Poll::Pending => Poll::Pending, - } - } -} - -impl UpgradeInfo for RegisteredProtocol { - type Info = RegisteredProtocolName; - type InfoIter = VecIntoIter; - - #[inline] - fn protocol_info(&self) -> Self::InfoIter { - // Report each version as an individual protocol. - self.supported_versions.iter().map(|&version| { - let num = version.to_string(); - - let mut name = self.base_name.clone(); - name.extend_from_slice(num.as_bytes()); - RegisteredProtocolName { - name, - version, - } - }).collect::>().into_iter() - } -} - -/// Implementation of `ProtocolName` for a custom protocol. -#[derive(Debug, Clone)] -pub struct RegisteredProtocolName { - /// Protocol name, as advertised on the wire. - name: Vec, - /// Version number. Stored in string form in `name`, but duplicated here for easier retrieval. - version: u8, -} - -impl ProtocolName for RegisteredProtocolName { - fn protocol_name(&self) -> &[u8] { - &self.name - } -} - -impl InboundUpgrade for RegisteredProtocol -where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - type Output = (RegisteredProtocolSubstream, Vec); - type Future = Pin> + Send>>; - type Error = io::Error; - - fn upgrade_inbound( - self, - socket: TSubstream, - info: Self::Info, - ) -> Self::Future { - Box::pin(async move { - let mut framed = { - let mut codec = UviBytes::default(); - codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. - Framed::new(socket, codec) - }; - - let handshake = BytesMut::from(&self.handshake_message.read()[..]); - framed.send(handshake).await?; - let received_handshake = framed.next().await - .ok_or_else(|| io::ErrorKind::UnexpectedEof)??; - - Ok((RegisteredProtocolSubstream { - is_closing: false, - endpoint: Endpoint::Listener, - send_queue: VecDeque::new(), - requires_poll_flush: false, - inner: framed.fuse(), - protocol_version: info.version, - clogged_fuse: false, - }, received_handshake.to_vec())) - }) - } -} - -impl OutboundUpgrade for RegisteredProtocol -where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - type Output = >::Output; - type Future = >::Future; - type Error = >::Error; - - fn upgrade_outbound( - self, - socket: TSubstream, - info: Self::Info, - ) -> Self::Future { - Box::pin(async move { - let mut framed = { - let mut codec = UviBytes::default(); - codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. - Framed::new(socket, codec) - }; - - let handshake = BytesMut::from(&self.handshake_message.read()[..]); - framed.send(handshake).await?; - let received_handshake = framed.next().await - .ok_or_else(|| { - io::Error::new(io::ErrorKind::UnexpectedEof, "Failed to receive handshake") - })??; - - Ok((RegisteredProtocolSubstream { - is_closing: false, - endpoint: Endpoint::Dialer, - send_queue: VecDeque::new(), - requires_poll_flush: false, - inner: framed.fuse(), - protocol_version: info.version, - clogged_fuse: false, - }, received_handshake.to_vec())) - }) - } -} diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 1cd78c0ed1dda..8938c27aeddd8 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,19 +16,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Network packet message types. These get serialized and put into the lower level protocol payload. +//! Network packet message types. These get serialized and put into the lower level protocol +//! payload. -use bitflags::bitflags; -use sp_runtime::{ConsensusEngineId, traits::{Block as BlockT, Header as HeaderT}}; -use codec::{Encode, Decode, Input, Output, Error}; pub use self::generic::{ - BlockAnnounce, RemoteCallRequest, RemoteReadRequest, - RemoteHeaderRequest, RemoteHeaderResponse, - RemoteChangesRequest, RemoteChangesResponse, - FinalityProofRequest, FinalityProofResponse, - FromBlock, RemoteReadChildRequest, Roles, + BlockAnnounce, FromBlock, RemoteCallRequest, RemoteChangesRequest, RemoteChangesResponse, + RemoteHeaderRequest, RemoteHeaderResponse, RemoteReadChildRequest, RemoteReadRequest, Roles, }; +use bitflags::bitflags; +use codec::{Decode, Encode, Error, Input, Output}; use sc_client_api::StorageProof; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT}, + ConsensusEngineId, +}; /// A unique ID of a request. pub type RequestId = u64; @@ -42,24 +43,16 @@ pub type Message = generic::Message< >; /// Type alias for using the block request type using block type parameters. -pub type BlockRequest = generic::BlockRequest< - ::Hash, - <::Header as HeaderT>::Number, ->; +pub type BlockRequest = + generic::BlockRequest<::Hash, <::Header as HeaderT>::Number>; /// Type alias for using the BlockData type using block type parameters. -pub type BlockData = generic::BlockData< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockData = + generic::BlockData<::Header, ::Hash, ::Extrinsic>; /// Type alias for using the BlockResponse type using block type parameters. -pub type BlockResponse = generic::BlockResponse< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockResponse = + generic::BlockResponse<::Header, ::Hash, ::Extrinsic>; /// A set of transactions. pub type Transactions = Vec; @@ -78,6 +71,8 @@ bitflags! { const MESSAGE_QUEUE = 0b00001000; /// Include a justification for the block. const JUSTIFICATION = 0b00010000; + /// Include indexed transactions for a block. + const INDEXED_BODY = 0b00100000; } } @@ -90,13 +85,13 @@ impl BlockAttributes { /// Decodes attributes, encoded with the `encode_to_be_u32()` call. pub fn from_be_u32(encoded: u32) -> Result { - BlockAttributes::from_bits(encoded.to_be_bytes()[0]) + Self::from_bits(encoded.to_be_bytes()[0]) .ok_or_else(|| Error::from("Invalid BlockAttributes")) } } impl Encode for BlockAttributes { - fn encode_to(&self, dest: &mut T) { + fn encode_to(&self, dest: &mut T) { dest.push_byte(self.bits()) } } @@ -145,16 +140,35 @@ pub struct RemoteReadResponse { pub proof: StorageProof, } +/// Announcement summary used for debug logging. +#[derive(Debug)] +pub struct AnnouncementSummary { + block_hash: H::Hash, + number: H::Number, + parent_hash: H::Hash, + state: Option, +} + +impl generic::BlockAnnounce { + pub fn summary(&self) -> AnnouncementSummary { + AnnouncementSummary { + block_hash: self.header.hash(), + number: *self.header.number(), + parent_hash: self.header.parent_hash().clone(), + state: self.state, + } + } +} + /// Generic types. pub mod generic { - use bitflags::bitflags; - use codec::{Encode, Decode, Input, Output}; - use sp_runtime::Justification; use super::{ - RemoteReadResponse, Transactions, Direction, - RequestId, BlockAttributes, RemoteCallResponse, ConsensusEngineId, - BlockState, StorageProof, + BlockAttributes, BlockState, ConsensusEngineId, Direction, RemoteCallResponse, + RemoteReadResponse, RequestId, StorageProof, Transactions, }; + use bitflags::bitflags; + use codec::{Decode, Encode, Input, Output}; + use sp_runtime::{EncodedJustification, Justifications}; bitflags! { /// Bitmask of the roles that a node fulfills. @@ -173,12 +187,12 @@ pub mod generic { impl Roles { /// Does this role represents a client that holds full chain data locally? pub fn is_full(&self) -> bool { - self.intersects(Roles::FULL | Roles::AUTHORITY) + self.intersects(Self::FULL | Self::AUTHORITY) } /// Does this role represents a client that does not participates in the consensus? pub fn is_authority(&self) -> bool { - *self == Roles::AUTHORITY + *self == Self::AUTHORITY } /// Does this role represents a client that does not hold full chain data locally? @@ -190,16 +204,15 @@ pub mod generic { impl<'a> From<&'a crate::config::Role> for Roles { fn from(roles: &'a crate::config::Role) -> Self { match roles { - crate::config::Role::Full => Roles::FULL, - crate::config::Role::Light => Roles::LIGHT, - crate::config::Role::Sentry { .. } => Roles::AUTHORITY, - crate::config::Role::Authority { .. } => Roles::AUTHORITY, + crate::config::Role::Full => Self::FULL, + crate::config::Role::Light => Self::LIGHT, + crate::config::Role::Authority { .. } => Self::AUTHORITY, } } } impl codec::Encode for Roles { - fn encode_to(&self, dest: &mut T) { + fn encode_to(&self, dest: &mut T) { dest.push_byte(self.bits()) } } @@ -216,7 +229,7 @@ pub mod generic { #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct ConsensusMessage { /// Identifies consensus engine. - pub engine_id: ConsensusEngineId, + pub protocol: ConsensusEngineId, /// Message payload. pub data: Vec, } @@ -230,12 +243,16 @@ pub mod generic { pub header: Option

, /// Block body if requested. pub body: Option>, + /// Block body indexed transactions if requested. + pub indexed_body: Option>>, /// Block receipt if requested. pub receipt: Option>, /// Block message queue if requested. pub message_queue: Option>, /// Justification if requested. - pub justification: Option, + pub justification: Option, + /// Justifications if requested. + pub justifications: Option, } /// Identifies starting point of a block sequence. @@ -280,40 +297,13 @@ pub mod generic { RemoteChangesResponse(RemoteChangesResponse), /// Remote child storage read request. RemoteReadChildRequest(RemoteReadChildRequest), - /// Finality proof request. - FinalityProofRequest(FinalityProofRequest), - /// Finality proof response. - FinalityProofResponse(FinalityProofResponse), /// Batch of consensus protocol messages. + // NOTE: index is incremented by 2 due to finality proof related + // messages that were removed. + #[codec(index = 17)] ConsensusBatch(Vec), } - impl Message { - /// Message id useful for logging. - pub fn id(&self) -> &'static str { - match self { - Message::Status(_) => "Status", - Message::BlockRequest(_) => "BlockRequest", - Message::BlockResponse(_) => "BlockResponse", - Message::BlockAnnounce(_) => "BlockAnnounce", - Message::Transactions(_) => "Transactions", - Message::Consensus(_) => "Consensus", - Message::RemoteCallRequest(_) => "RemoteCallRequest", - Message::RemoteCallResponse(_) => "RemoteCallResponse", - Message::RemoteReadRequest(_) => "RemoteReadRequest", - Message::RemoteReadResponse(_) => "RemoteReadResponse", - Message::RemoteHeaderRequest(_) => "RemoteHeaderRequest", - Message::RemoteHeaderResponse(_) => "RemoteHeaderResponse", - Message::RemoteChangesRequest(_) => "RemoteChangesRequest", - Message::RemoteChangesResponse(_) => "RemoteChangesResponse", - Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", - Message::FinalityProofRequest(_) => "FinalityProofRequest", - Message::FinalityProofResponse(_) => "FinalityProofResponse", - Message::ConsensusBatch(_) => "ConsensusBatch", - } - } - } - /// Status sent on connection. // TODO https://github.com/paritytech/substrate/issues/4674: replace the `Status` // struct with this one, after waiting a few releases beyond `NetworkSpecialization`'s @@ -361,11 +351,12 @@ pub mod generic { let compact = CompactStatus::decode(value)?; let chain_status = match >::decode(value) { Ok(v) => v, - Err(e) => if compact.version <= LAST_CHAIN_STATUS_VERSION { - return Err(e) - } else { - Vec::new() - } + Err(e) => + if compact.version <= LAST_CHAIN_STATUS_VERSION { + return Err(e) + } else { + Vec::new() + }, }; let CompactStatus { @@ -377,7 +368,7 @@ pub mod generic { genesis_hash, } = compact; - Ok(Status { + Ok(Self { version, min_supported_version, roles, @@ -402,7 +393,8 @@ pub mod generic { pub to: Option, /// Sequence direction. pub direction: Direction, - /// Maximum number of blocks to return. An implementation defined maximum is used when unspecified. + /// Maximum number of blocks to return. An implementation defined maximum is used when + /// unspecified. pub max: Option, } @@ -430,7 +422,7 @@ pub mod generic { // This assumes that the packet contains nothing but the announcement message. // TODO: Get rid of it once protocol v4 is common. impl Encode for BlockAnnounce { - fn encode_to(&self, dest: &mut T) { + fn encode_to(&self, dest: &mut T) { self.header.encode_to(dest); if let Some(state) = &self.state { state.encode_to(dest); @@ -446,11 +438,7 @@ pub mod generic { let header = H::decode(input)?; let state = BlockState::decode(input).ok(); let data = Vec::decode(input).ok(); - Ok(BlockAnnounce { - header, - state, - data, - }) + Ok(Self { header, state, data }) } } @@ -546,26 +534,4 @@ pub mod generic { /// Missing changes tries roots proof. pub roots_proof: StorageProof, } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Finality proof request. - pub struct FinalityProofRequest { - /// Unique request id. - pub id: RequestId, - /// Hash of the block to request proof for. - pub block: H, - /// Additional data blob (that both requester and provider understood) required for proving finality. - pub request: Vec, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Finality proof response. - pub struct FinalityProofResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Hash of the block (the same as in the FinalityProofRequest). - pub block: H, - /// Finality proof (if available). - pub proof: Option>, - } } diff --git a/client/network/src/protocol/generic_proto/upgrade.rs b/client/network/src/protocol/notifications.rs similarity index 63% rename from client/network/src/protocol/generic_proto/upgrade.rs rename to client/network/src/protocol/notifications.rs index 6322a10b572a9..e489970e987c6 100644 --- a/client/network/src/protocol/generic_proto/upgrade.rs +++ b/client/network/src/protocol/notifications.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -15,22 +15,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -pub use self::collec::UpgradeCollec; -pub use self::legacy::{ - RegisteredProtocol, - RegisteredProtocolEvent, - RegisteredProtocolName, - RegisteredProtocolSubstream -}; -pub use self::notifications::{ - NotificationsIn, - NotificationsInSubstream, - NotificationsOut, - NotificationsOutSubstream, - NotificationsHandshakeError, - NotificationsOutError, + +//! Implementation of libp2p's `NetworkBehaviour` trait that establishes communications and opens +//! notifications substreams. + +pub use self::{ + behaviour::{Notifications, NotificationsOut, ProtocolConfig}, + handler::{NotificationsSink, NotifsHandlerError, Ready}, }; -mod collec; -mod legacy; -mod notifications; +mod behaviour; +mod handler; +mod tests; +mod upgrade; diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs new file mode 100644 index 0000000000000..da2967d6f26eb --- /dev/null +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -0,0 +1,2102 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::protocol::notifications::handler::{ + self, NotificationsSink, NotifsHandlerIn, NotifsHandlerOut, NotifsHandlerProto, +}; + +use bytes::BytesMut; +use fnv::FnvHashMap; +use futures::prelude::*; +use libp2p::{ + core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, + swarm::{ + DialPeerCondition, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, + }, +}; +use log::{error, trace, warn}; +use parking_lot::RwLock; +use rand::distributions::{Distribution as _, Uniform}; +use sc_peerset::DropReason; +use smallvec::SmallVec; +use std::{ + borrow::Cow, + cmp, + collections::{hash_map::Entry, VecDeque}, + error, mem, + pin::Pin, + str, + sync::Arc, + task::{Context, Poll}, + time::{Duration, Instant}, +}; + +/// Network behaviour that handles opening substreams for custom protocols with other peers. +/// +/// # How it works +/// +/// The role of the `Notifications` is to synchronize the following components: +/// +/// - The libp2p swarm that opens new connections and reports disconnects. +/// - The connection handler (see `group.rs`) that handles individual connections. +/// - The peerset manager (PSM) that requests links to peers to be established or broken. +/// - The external API, that requires knowledge of the links that have been established. +/// +/// In the state machine below, each `PeerId` is attributed one of these states: +/// +/// - [`PeerState::Requested`]: No open connection, but requested by the peerset. Currently dialing. +/// - [`PeerState::Disabled`]: Has open TCP connection(s) unbeknownst to the peerset. No substream +/// is open. +/// - [`PeerState::Enabled`]: Has open TCP connection(s), acknowledged by the peerset. +/// - Notifications substreams are open on at least one connection, and external API has been +/// notified. +/// - Notifications substreams aren't open. +/// - [`PeerState::Incoming`]: Has open TCP connection(s) and remote would like to open substreams. +/// Peerset has been asked to attribute an inbound slot. +/// +/// In addition to these states, there also exists a "banning" system. If we fail to dial a peer, +/// we back-off for a few seconds. If the PSM requests connecting to a peer that is currently +/// backed-off, the next dialing attempt is delayed until after the ban expires. However, the PSM +/// will still consider the peer to be connected. This "ban" is thus not a ban in a strict sense: +/// if a backed-off peer tries to connect, the connection is accepted. A ban only delays dialing +/// attempts. +/// +/// There may be multiple connections to a peer. The status of a peer on +/// the API of this behaviour and towards the peerset manager is aggregated in +/// the following way: +/// +/// 1. The enabled/disabled status is the same across all connections, as +/// decided by the peerset manager. +/// 2. `send_packet` and `write_notification` always send all data over +/// the same connection to preserve the ordering provided by the transport, +/// as long as that connection is open. If it closes, a second open +/// connection may take over, if one exists, but that case should be no +/// different than a single connection failing and being re-established +/// in terms of potential reordering and dropped messages. Messages can +/// be received on any connection. +/// 3. The behaviour reports `NotificationsOut::CustomProtocolOpen` when the +/// first connection reports `NotifsHandlerOut::OpenResultOk`. +/// 4. The behaviour reports `NotificationsOut::CustomProtocolClosed` when the +/// last connection reports `NotifsHandlerOut::ClosedResult`. +/// +/// In this way, the number of actual established connections to the peer is +/// an implementation detail of this behaviour. Note that, in practice and at +/// the time of this writing, there may be at most two connections to a peer +/// and only as a result of simultaneous dialing. However, the implementation +/// accommodates for any number of connections. +pub struct Notifications { + /// Notification protocols. Entries never change after initialization. + notif_protocols: Vec, + + /// Receiver for instructions about who to connect to or disconnect from. + peerset: sc_peerset::Peerset, + + /// List of peers in our state. + peers: FnvHashMap<(PeerId, sc_peerset::SetId), PeerState>, + + /// The elements in `peers` occasionally contain `Delay` objects that we would normally have + /// to be polled one by one. In order to avoid doing so, as an optimization, every `Delay` is + /// instead put inside of `delays` and reference by a [`DelayId`]. This stream + /// yields `PeerId`s whose `DelayId` is potentially ready. + /// + /// By design, we never remove elements from this list. Elements are removed only when the + /// `Delay` triggers. As such, this stream may produce obsolete elements. + delays: stream::FuturesUnordered< + Pin + Send>>, + >, + + /// [`DelayId`] to assign to the next delay. + next_delay_id: DelayId, + + /// List of incoming messages we have sent to the peer set manager and that are waiting for an + /// answer. + incoming: SmallVec<[IncomingPeer; 6]>, + + /// We generate indices to identify incoming connections. This is the next value for the index + /// to use when a connection is incoming. + next_incoming_index: sc_peerset::IncomingIndex, + + /// Events to produce from `poll()`. + events: VecDeque>, +} + +/// Configuration for a notifications protocol. +#[derive(Debug, Clone)] +pub struct ProtocolConfig { + /// Name of the protocol. + pub name: Cow<'static, str>, + /// Names of the protocol to use if the main one isn't available. + pub fallback_names: Vec>, + /// Handshake of the protocol. + pub handshake: Vec, + /// Maximum allowed size for a notification. + pub max_notification_size: u64, +} + +/// Identifier for a delay firing. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +struct DelayId(u64); + +/// State of a peer we're connected to. +/// +/// The variants correspond to the state of the peer w.r.t. the peerset. +#[derive(Debug)] +enum PeerState { + /// State is poisoned. This is a temporary state for a peer and we should always switch back + /// to it later. If it is found in the wild, that means there was either a panic or a bug in + /// the state machine code. + Poisoned, + + /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial + /// delay to the connection. + Backoff { + /// When the ban expires. For clean-up purposes. References an entry in `delays`. + timer: DelayId, + /// Until when the peer is backed-off. + timer_deadline: Instant, + }, + + /// The peerset requested that we connect to this peer. We are currently not connected. + PendingRequest { + /// When to actually start dialing. References an entry in `delays`. + timer: DelayId, + /// When the `timer` will trigger. + timer_deadline: Instant, + }, + + /// The peerset requested that we connect to this peer. We are currently dialing this peer. + Requested, + + /// We are connected to this peer but the peerset hasn't requested it or has denied it. + /// + /// The handler is either in the closed state, or a `Close` message has been sent to it and + /// hasn't been answered yet. + Disabled { + /// If `Some`, any connection request from the peerset to this peer is delayed until the + /// given `Instant`. + backoff_until: Option, + + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, + }, + + /// We are connected to this peer. The peerset has requested a connection to this peer, but + /// it is currently in a "backed-off" phase. The state will switch to `Enabled` once the timer + /// expires. + /// + /// The handler is either in the closed state, or a `Close` message has been sent to it and + /// hasn't been answered yet. + /// + /// The handler will be opened when `timer` fires. + DisabledPendingEnable { + /// When to enable this remote. References an entry in `delays`. + timer: DelayId, + /// When the `timer` will trigger. + timer_deadline: Instant, + + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, + }, + + /// We are connected to this peer and the peerset has accepted it. + Enabled { + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, + }, + + /// We are connected to this peer. We have received an `OpenDesiredByRemote` from one of the + /// handlers and forwarded that request to the peerset. The connection handlers are waiting for + /// a response, i.e. to be opened or closed based on whether the peerset accepts or rejects + /// the peer. + Incoming { + /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. + backoff_until: Option, + + /// List of connections with this peer, and their state. + connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, + }, +} + +impl PeerState { + /// True if there exists an established connection to the peer + /// that is open for custom protocol traffic. + fn is_open(&self) -> bool { + self.get_open().is_some() + } + + /// Returns the [`NotificationsSink`] of the first established connection + /// that is open for custom protocol traffic. + fn get_open(&self) -> Option<&NotificationsSink> { + match self { + Self::Enabled { connections, .. } => connections.iter().find_map(|(_, s)| match s { + ConnectionState::Open(s) => Some(s), + _ => None, + }), + _ => None, + } + } + + /// True if that node has been requested by the PSM. + fn is_requested(&self) -> bool { + matches!( + self, + Self::PendingRequest { .. } | + Self::Requested | Self::DisabledPendingEnable { .. } | + Self::Enabled { .. } + ) + } +} + +/// State of the handler of a single connection visible from this state machine. +#[derive(Debug)] +enum ConnectionState { + /// Connection is in the `Closed` state, meaning that the remote hasn't requested anything. + Closed, + + /// Connection is either in the `Open` or the `Closed` state, but a + /// [`NotifsHandlerIn::Close`] message has been sent. Waiting for this message to be + /// acknowledged through a [`NotifsHandlerOut::CloseResult`]. + Closing, + + /// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message has been sent. + /// An `OpenResultOk`/`OpenResultErr` message is expected. + Opening, + + /// Connection is in the `Closed` state but a [`NotifsHandlerIn::Open`] message then a + /// [`NotifsHandlerIn::Close`] message has been sent. An `OpenResultOk`/`OpenResultErr` message + /// followed with a `CloseResult` message are expected. + OpeningThenClosing, + + /// Connection is in the `Closed` state, but a [`NotifsHandlerOut::OpenDesiredByRemote`] + /// message has been received, meaning that the remote wants to open a substream. + OpenDesiredByRemote, + + /// Connection is in the `Open` state. + /// + /// The external API is notified of a channel with this peer if any of its connection is in + /// this state. + Open(NotificationsSink), +} + +/// State of an "incoming" message sent to the peer set manager. +#[derive(Debug)] +struct IncomingPeer { + /// Id of the remote peer of the incoming substream. + peer_id: PeerId, + /// Id of the set the incoming substream would belong to. + set_id: sc_peerset::SetId, + /// If true, this "incoming" still corresponds to an actual connection. If false, then the + /// connection corresponding to it has been closed or replaced already. + alive: bool, + /// Id that the we sent to the peerset. + incoming_id: sc_peerset::IncomingIndex, +} + +/// Event that can be emitted by the `Notifications`. +#[derive(Debug)] +pub enum NotificationsOut { + /// Opened a custom protocol with the remote. + CustomProtocolOpen { + /// Id of the peer we are connected to. + peer_id: PeerId, + /// Peerset set ID the substream is tied to. + set_id: sc_peerset::SetId, + /// If `Some`, a fallback protocol name has been used rather the main protocol name. + /// Always matches one of the fallback names passed at initialization. + negotiated_fallback: Option>, + /// Handshake that was sent to us. + /// This is normally a "Status" message, but this is out of the concern of this code. + received_handshake: Vec, + /// Object that permits sending notifications to the peer. + notifications_sink: NotificationsSink, + }, + + /// The [`NotificationsSink`] object used to send notifications with the given peer must be + /// replaced with a new one. + /// + /// This event is typically emitted when a transport-level connection is closed and we fall + /// back to a secondary connection. + CustomProtocolReplaced { + /// Id of the peer we are connected to. + peer_id: PeerId, + /// Peerset set ID the substream is tied to. + set_id: sc_peerset::SetId, + /// Replacement for the previous [`NotificationsSink`]. + notifications_sink: NotificationsSink, + }, + + /// Closed a custom protocol with the remote. The existing [`NotificationsSink`] should + /// be dropped. + CustomProtocolClosed { + /// Id of the peer we were connected to. + peer_id: PeerId, + /// Peerset set ID the substream was tied to. + set_id: sc_peerset::SetId, + }, + + /// Receives a message on a custom protocol substream. + /// + /// Also concerns received notifications for the notifications API. + Notification { + /// Id of the peer the message came from. + peer_id: PeerId, + /// Peerset set ID the substream is tied to. + set_id: sc_peerset::SetId, + /// Message that has been received. + message: BytesMut, + }, +} + +impl Notifications { + /// Creates a `CustomProtos`. + pub fn new( + peerset: sc_peerset::Peerset, + notif_protocols: impl Iterator, + ) -> Self { + let notif_protocols = notif_protocols + .map(|cfg| handler::ProtocolConfig { + name: cfg.name, + fallback_names: cfg.fallback_names, + handshake: Arc::new(RwLock::new(cfg.handshake)), + max_notification_size: cfg.max_notification_size, + }) + .collect::>(); + + assert!(!notif_protocols.is_empty()); + + Self { + notif_protocols, + peerset, + peers: FnvHashMap::default(), + delays: Default::default(), + next_delay_id: DelayId(0), + incoming: SmallVec::new(), + next_incoming_index: sc_peerset::IncomingIndex(0), + events: VecDeque::new(), + } + } + + /// Modifies the handshake of the given notifications protocol. + pub fn set_notif_protocol_handshake( + &mut self, + set_id: sc_peerset::SetId, + handshake_message: impl Into>, + ) { + if let Some(p) = self.notif_protocols.get_mut(usize::from(set_id)) { + *p.handshake.write() = handshake_message.into(); + } else { + log::error!(target: "sub-libp2p", "Unknown handshake change set: {:?}", set_id); + debug_assert!(false); + } + } + + /// Returns the number of discovered nodes that we keep in memory. + pub fn num_discovered_peers(&self) -> usize { + self.peerset.num_discovered_peers() + } + + /// Returns the list of all the peers we have an open channel to. + pub fn open_peers<'a>(&'a self) -> impl Iterator + 'a { + self.peers.iter().filter(|(_, state)| state.is_open()).map(|((id, _), _)| id) + } + + /// Returns true if we have an open substream to the given peer. + pub fn is_open(&self, peer_id: &PeerId, set_id: sc_peerset::SetId) -> bool { + self.peers.get(&(peer_id.clone(), set_id)).map(|p| p.is_open()).unwrap_or(false) + } + + /// Disconnects the given peer if we are connected to it. + pub fn disconnect_peer(&mut self, peer_id: &PeerId, set_id: sc_peerset::SetId) { + trace!(target: "sub-libp2p", "External API => Disconnect({}, {:?})", peer_id, set_id); + self.disconnect_peer_inner(peer_id, set_id, None); + } + + /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the peer + /// for the specific duration. + fn disconnect_peer_inner( + &mut self, + peer_id: &PeerId, + set_id: sc_peerset::SetId, + ban: Option, + ) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((*peer_id, set_id)) { + entry + } else { + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // We're not connected anyway. + st @ PeerState::Disabled { .. } => *entry.into_mut() = st, + st @ PeerState::Requested => *entry.into_mut() = st, + st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, + st @ PeerState::Backoff { .. } => *entry.into_mut() = st, + + // DisabledPendingEnable => Disabled. + PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => { + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, *peer_id, DropReason::Unknown); + let backoff_until = Some(if let Some(ban) = ban { + cmp::max(timer_deadline, Instant::now() + ban) + } else { + timer_deadline + }); + *entry.into_mut() = PeerState::Disabled { connections, backoff_until } + }, + + // Enabled => Disabled. + // All open or opening connections are sent a `Close` message. + // If relevant, the external API is instantly notified. + PeerState::Enabled { mut connections } => { + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, *peer_id, DropReason::Unknown); + + if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { + trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id); + let event = + NotificationsOut::CustomProtocolClosed { peer_id: *peer_id, set_id }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + for (connec_id, connec_state) in + connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: *peer_id, + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Closing; + } + + for (connec_id, connec_state) in + connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Opening)) + { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: *peer_id, + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::OpeningThenClosing; + } + + debug_assert!(!connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::Open(_)))); + debug_assert!(!connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::Opening))); + + let backoff_until = ban.map(|dur| Instant::now() + dur); + *entry.into_mut() = PeerState::Disabled { connections, backoff_until } + }, + + // Incoming => Disabled. + // Ongoing opening requests from the remote are rejected. + PeerState::Incoming { mut connections, backoff_until } => { + let inc = if let Some(inc) = self + .incoming + .iter_mut() + .find(|i| i.peer_id == entry.key().0 && i.set_id == set_id && i.alive) + { + inc + } else { + error!( + target: "sub-libp2p", + "State mismatch in libp2p: no entry in incoming for incoming peer" + ); + return + }; + + inc.alive = false; + + for (connec_id, connec_state) in connections + .iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: *peer_id, + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Closing; + } + + let backoff_until = match (backoff_until, ban) { + (Some(a), Some(b)) => Some(cmp::max(a, Instant::now() + b)), + (Some(a), None) => Some(a), + (None, Some(b)) => Some(Instant::now() + b), + (None, None) => None, + }; + + debug_assert!(!connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + *entry.into_mut() = PeerState::Disabled { connections, backoff_until } + }, + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id) + }, + } + } + + /// Returns the list of all the peers that the peerset currently requests us to be connected to. + pub fn requested_peers<'a>( + &'a self, + set_id: sc_peerset::SetId, + ) -> impl Iterator + 'a { + self.peers + .iter() + .filter(move |((_, set), state)| *set == set_id && state.is_requested()) + .map(|((id, _), _)| id) + } + + /// Returns the list of reserved peers. + pub fn reserved_peers<'a>( + &'a self, + set_id: sc_peerset::SetId, + ) -> impl Iterator + 'a { + self.peerset.reserved_peers(set_id) + } + + /// Sends a notification to a peer. + /// + /// Has no effect if the custom protocol is not open with the given peer. + /// + /// Also note that even if we have a valid open substream, it may in fact be already closed + /// without us knowing, in which case the packet will not be received. + /// + /// The `fallback` parameter is used for backwards-compatibility reason if the remote doesn't + /// support our protocol. One needs to pass the equivalent of what would have been passed + /// with `send_packet`. + pub fn write_notification( + &mut self, + target: &PeerId, + set_id: sc_peerset::SetId, + message: impl Into>, + ) { + let notifs_sink = match self.peers.get(&(*target, set_id)).and_then(|p| p.get_open()) { + None => { + trace!( + target: "sub-libp2p", + "Tried to sent notification to {:?} without an open channel.", + target, + ); + return + }, + Some(sink) => sink, + }; + + let message = message.into(); + + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?}, {} bytes)", + target, + set_id, + message.len(), + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); + + notifs_sink.send_sync_notification(message); + } + + /// Returns the state of the peerset manager, for debugging purposes. + pub fn peerset_debug_info(&mut self) -> serde_json::Value { + self.peerset.debug_info() + } + + /// Function that is called when the peerset wants us to connect to a peer. + fn peerset_report_connect(&mut self, peer_id: PeerId, set_id: sc_peerset::SetId) { + // If `PeerId` is unknown to us, insert an entry, start dialing, and return early. + let mut occ_entry = match self.peers.entry((peer_id, set_id)) { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => { + // If there's no entry in `self.peers`, start dialing. + trace!( + target: "sub-libp2p", + "PSM => Connect({}, {:?}): Starting to connect", + entry.key().0, + set_id, + ); + trace!(target: "sub-libp2p", "Libp2p <= Dial {}", entry.key().0); + // The `DialPeerCondition` ensures that dial attempts are de-duplicated + self.events.push_back(NetworkBehaviourAction::DialPeer { + peer_id: entry.key().0.clone(), + condition: DialPeerCondition::Disconnected, + }); + entry.insert(PeerState::Requested); + return + }, + }; + + let now = Instant::now(); + + match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { + // Backoff (not expired) => PendingRequest + PeerState::Backoff { ref timer, ref timer_deadline } if *timer_deadline > now => { + let peer_id = occ_entry.key().0.clone(); + trace!( + target: "sub-libp2p", + "PSM => Connect({}, {:?}): Will start to connect at until {:?}", + peer_id, + set_id, + timer_deadline, + ); + *occ_entry.into_mut() = + PeerState::PendingRequest { timer: *timer, timer_deadline: *timer_deadline }; + }, + + // Backoff (expired) => Requested + PeerState::Backoff { .. } => { + trace!( + target: "sub-libp2p", + "PSM => Connect({}, {:?}): Starting to connect", + occ_entry.key().0, + set_id, + ); + trace!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); + // The `DialPeerCondition` ensures that dial attempts are de-duplicated + self.events.push_back(NetworkBehaviourAction::DialPeer { + peer_id: occ_entry.key().0.clone(), + condition: DialPeerCondition::Disconnected, + }); + *occ_entry.into_mut() = PeerState::Requested; + }, + + // Disabled (with non-expired ban) => DisabledPendingEnable + PeerState::Disabled { connections, backoff_until: Some(ref backoff) } + if *backoff > now => + { + let peer_id = occ_entry.key().0.clone(); + trace!( + target: "sub-libp2p", + "PSM => Connect({}, {:?}): But peer is backed-off until {:?}", + peer_id, + set_id, + backoff, + ); + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(*backoff - now); + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); + + *occ_entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer: delay_id, + timer_deadline: *backoff, + }; + } + + // Disabled => Enabled + PeerState::Disabled { mut connections, backoff_until } => { + debug_assert!(!connections + .iter() + .any(|(_, s)| { matches!(s, ConnectionState::Open(_)) })); + + // The first element of `closed` is chosen to open the notifications substream. + if let Some((connec_id, connec_state)) = + connections.iter_mut().find(|(_, s)| matches!(s, ConnectionState::Closed)) + { + trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", + occ_entry.key().0, set_id); + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + *occ_entry.into_mut() = PeerState::Enabled { connections }; + } else { + // If no connection is available, switch to `DisabledPendingEnable` in order + // to try again later. + debug_assert!(connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::OpeningThenClosing | ConnectionState::Closing) + })); + trace!( + target: "sub-libp2p", + "PSM => Connect({}, {:?}): No connection in proper state. Delaying.", + occ_entry.key().0, set_id + ); + + let timer_deadline = { + let base = now + Duration::from_secs(5); + if let Some(backoff_until) = backoff_until { + cmp::max(base, backoff_until) + } else { + base + } + }; + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + debug_assert!(timer_deadline > now); + let delay = futures_timer::Delay::new(timer_deadline - now); + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); + + *occ_entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer: delay_id, + timer_deadline, + }; + } + }, + + // Incoming => Enabled + PeerState::Incoming { mut connections, .. } => { + trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", + occ_entry.key().0, set_id); + if let Some(inc) = self + .incoming + .iter_mut() + .find(|i| i.peer_id == occ_entry.key().0 && i.set_id == set_id && i.alive) + { + inc.alive = false; + } else { + error!( + target: "sub-libp2p", + "State mismatch in libp2p: no entry in incoming for incoming peer", + ) + } + + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections + .iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + occ_entry.key(), *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: occ_entry.key().0.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + } + + *occ_entry.into_mut() = PeerState::Enabled { connections }; + }, + + // Other states are kept as-is. + st @ PeerState::Enabled { .. } => { + warn!(target: "sub-libp2p", + "PSM => Connect({}, {:?}): Already connected.", + occ_entry.key().0, set_id); + *occ_entry.into_mut() = st; + debug_assert!(false); + }, + st @ PeerState::DisabledPendingEnable { .. } => { + warn!(target: "sub-libp2p", + "PSM => Connect({}, {:?}): Already pending enabling.", + occ_entry.key().0, set_id); + *occ_entry.into_mut() = st; + debug_assert!(false); + }, + st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { + warn!(target: "sub-libp2p", + "PSM => Connect({}, {:?}): Duplicate request.", + occ_entry.key().0, set_id); + *occ_entry.into_mut() = st; + debug_assert!(false); + }, + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()); + debug_assert!(false); + }, + } + } + + /// Function that is called when the peerset wants us to disconnect from a peer. + fn peerset_report_disconnect(&mut self, peer_id: PeerId, set_id: sc_peerset::SetId) { + let mut entry = match self.peers.entry((peer_id, set_id)) { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => { + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", + entry.key().0, set_id); + return + }, + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => { + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", + entry.key().0, set_id); + *entry.into_mut() = st; + }, + + // DisabledPendingEnable => Disabled + PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => { + debug_assert!(!connections.is_empty()); + trace!(target: "sub-libp2p", + "PSM => Drop({}, {:?}): Interrupting pending enabling.", + entry.key().0, set_id); + *entry.into_mut() = + PeerState::Disabled { connections, backoff_until: Some(timer_deadline) }; + }, + + // Enabled => Disabled + PeerState::Enabled { mut connections } => { + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Disabling connections.", + entry.key().0, set_id); + + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); + + if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { + trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", entry.key().0, set_id); + let event = NotificationsOut::CustomProtocolClosed { + peer_id: entry.key().0.clone(), + set_id, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + for (connec_id, connec_state) in + connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Opening)) + { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", + entry.key(), *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().0.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::OpeningThenClosing; + } + + for (connec_id, connec_state) in + connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", + entry.key(), *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().0.clone(), + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Closing; + } + + *entry.into_mut() = PeerState::Disabled { connections, backoff_until: None } + }, + + // Requested => Ø + PeerState::Requested => { + // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other + // sub-systems (such as the discovery mechanism) may require dialing this peer as + // well at the same time. + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected.", + entry.key().0, set_id); + entry.remove(); + }, + + // PendingRequest => Backoff + PeerState::PendingRequest { timer, timer_deadline } => { + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected", + entry.key().0, set_id); + *entry.into_mut() = PeerState::Backoff { timer, timer_deadline } + }, + + // Invalid state transitions. + st @ PeerState::Incoming { .. } => { + error!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not enabled (Incoming).", + entry.key().0, set_id); + *entry.into_mut() = st; + debug_assert!(false); + }, + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()); + debug_assert!(false); + }, + } + } + + /// Function that is called when the peerset wants us to accept a connection + /// request from a peer. + fn peerset_report_accept(&mut self, index: sc_peerset::IncomingIndex) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) + { + self.incoming.remove(pos) + } else { + error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); + return + }; + + if !incoming.alive { + trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Obsolete incoming", + index, incoming.peer_id, incoming.set_id); + match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { + Some(PeerState::DisabledPendingEnable { .. }) | Some(PeerState::Enabled { .. }) => { + }, + _ => { + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", + incoming.peer_id, incoming.set_id); + self.peerset.dropped(incoming.set_id, incoming.peer_id, DropReason::Unknown); + }, + } + return + } + + let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { + Some(s) => s, + None => { + debug_assert!(false); + return + }, + }; + + match mem::replace(state, PeerState::Poisoned) { + // Incoming => Enabled + PeerState::Incoming { mut connections, .. } => { + trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Enabling connections.", + index, incoming.peer_id, incoming.set_id); + + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections + .iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + incoming.peer_id, *connec_id, incoming.set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id, + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open { protocol_index: incoming.set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + } + + *state = PeerState::Enabled { connections }; + }, + + // Any state other than `Incoming` is invalid. + peer => { + error!(target: "sub-libp2p", + "State mismatch in libp2p: Expected alive incoming. Got {:?}.", + peer); + debug_assert!(false); + }, + } + } + + /// Function that is called when the peerset wants us to reject an incoming peer. + fn peerset_report_reject(&mut self, index: sc_peerset::IncomingIndex) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) + { + self.incoming.remove(pos) + } else { + error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); + return + }; + + if !incoming.alive { + trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, \ + ignoring", index, incoming.peer_id, incoming.set_id); + return + } + + let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { + Some(s) => s, + None => { + debug_assert!(false); + return + }, + }; + + match mem::replace(state, PeerState::Poisoned) { + // Incoming => Disabled + PeerState::Incoming { mut connections, backoff_until } => { + trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.", + index, incoming.peer_id, incoming.set_id); + + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections + .iter_mut() + .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) + { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", + incoming.peer_id, connec_id, incoming.set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id, + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Close { protocol_index: incoming.set_id.into() }, + }); + *connec_state = ConnectionState::Closing; + } + + *state = PeerState::Disabled { connections, backoff_until }; + }, + peer => error!(target: "sub-libp2p", + "State mismatch in libp2p: Expected alive incoming. Got {:?}.", + peer), + } + } +} + +impl NetworkBehaviour for Notifications { + type ProtocolsHandler = NotifsHandlerProto; + type OutEvent = NotificationsOut; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + NotifsHandlerProto::new(self.notif_protocols.clone()) + } + + fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, _: &PeerId) {} + + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + match self.peers.entry((*peer_id, set_id)).or_insert(PeerState::Poisoned) { + // Requested | PendingRequest => Enabled + st @ &mut PeerState::Requested | st @ &mut PeerState::PendingRequest { .. } => { + trace!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}, {:?}): Connection was requested by PSM.", + peer_id, set_id, endpoint + ); + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *conn, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: *peer_id, + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + + let mut connections = SmallVec::new(); + connections.push((*conn, ConnectionState::Opening)); + *st = PeerState::Enabled { connections }; + }, + + // Poisoned gets inserted above if the entry was missing. + // Ø | Backoff => Disabled + st @ &mut PeerState::Poisoned | st @ &mut PeerState::Backoff { .. } => { + let backoff_until = if let PeerState::Backoff { timer_deadline, .. } = st { + Some(*timer_deadline) + } else { + None + }; + trace!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}, {:?}, {:?}): Not requested by PSM, disabling.", + peer_id, set_id, endpoint, *conn); + + let mut connections = SmallVec::new(); + connections.push((*conn, ConnectionState::Closed)); + *st = PeerState::Disabled { connections, backoff_until }; + }, + + // In all other states, add this new connection to the list of closed inactive + // connections. + PeerState::Incoming { connections, .. } | + PeerState::Disabled { connections, .. } | + PeerState::DisabledPendingEnable { connections, .. } | + PeerState::Enabled { connections, .. } => { + trace!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}, {:?}, {:?}): Secondary connection. Leaving closed.", + peer_id, set_id, endpoint, *conn); + connections.push((*conn, ConnectionState::Closed)); + }, + } + } + } + + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + _endpoint: &ConnectedPoint, + ) { + for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((*peer_id, set_id)) { + entry + } else { + error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Disabled => Disabled | Backoff | Ø + PeerState::Disabled { mut connections, backoff_until } => { + trace!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): Disabled.", + peer_id, set_id, *conn); + + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); + } else { + debug_assert!(false); + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + } + + if connections.is_empty() { + if let Some(until) = backoff_until { + let now = Instant::now(); + if until > now { + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(until - now); + let peer_id = *peer_id; + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); + + *entry.get_mut() = + PeerState::Backoff { timer: delay_id, timer_deadline: until }; + } else { + entry.remove(); + } + } else { + entry.remove(); + } + } else { + *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; + } + }, + + // DisabledPendingEnable => DisabledPendingEnable | Backoff + PeerState::DisabledPendingEnable { mut connections, timer_deadline, timer } => { + trace!( + target: "sub-libp2p", + "Libp2p => Disconnected({}, {:?}, {:?}): Disabled but pending enable.", + peer_id, set_id, *conn + ); + + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); + } else { + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + if connections.is_empty() { + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, *peer_id, DropReason::Unknown); + *entry.get_mut() = PeerState::Backoff { timer, timer_deadline }; + } else { + *entry.get_mut() = + PeerState::DisabledPendingEnable { connections, timer_deadline, timer }; + } + }, + + // Incoming => Incoming | Disabled | Backoff | Ø + PeerState::Incoming { mut connections, backoff_until } => { + trace!( + target: "sub-libp2p", + "Libp2p => Disconnected({}, {:?}, {:?}): OpenDesiredByRemote.", + peer_id, set_id, *conn + ); + + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + connections.remove(pos); + } else { + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + let no_desired_left = !connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)); + + // If no connection is `OpenDesiredByRemote` anymore, clean up the peerset + // incoming request. + if no_desired_left { + // In the incoming state, we don't report "Dropped". Instead we will just + // ignore the corresponding Accept/Reject. + if let Some(state) = self + .incoming + .iter_mut() + .find(|i| i.alive && i.set_id == set_id && i.peer_id == *peer_id) + { + state.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + incoming corresponding to an incoming state in peers"); + debug_assert!(false); + } + } + + if connections.is_empty() { + if let Some(until) = backoff_until { + let now = Instant::now(); + if until > now { + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(until - now); + let peer_id = *peer_id; + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); + + *entry.get_mut() = + PeerState::Backoff { timer: delay_id, timer_deadline: until }; + } else { + entry.remove(); + } + } else { + entry.remove(); + } + } else if no_desired_left { + // If no connection is `OpenDesiredByRemote` anymore, switch to `Disabled`. + *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; + } else { + *entry.get_mut() = PeerState::Incoming { connections, backoff_until }; + } + }, + + // Enabled => Enabled | Backoff + // Peers are always backed-off when disconnecting while Enabled. + PeerState::Enabled { mut connections } => { + trace!( + target: "sub-libp2p", + "Libp2p => Disconnected({}, {:?}, {:?}): Enabled.", + peer_id, set_id, *conn + ); + + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); + + if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { + let (_, state) = connections.remove(pos); + if let ConnectionState::Open(_) = state { + if let Some((replacement_pos, replacement_sink)) = + connections.iter().enumerate().find_map(|(num, (_, s))| match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None, + }) { + if pos <= replacement_pos { + trace!( + target: "sub-libp2p", + "External API <= Sink replaced({}, {:?})", + peer_id, set_id + ); + let event = NotificationsOut::CustomProtocolReplaced { + peer_id: *peer_id, + set_id, + notifications_sink: replacement_sink, + }; + self.events + .push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } else { + trace!( + target: "sub-libp2p", "External API <= Closed({}, {:?})", + peer_id, set_id + ); + let event = NotificationsOut::CustomProtocolClosed { + peer_id: *peer_id, + set_id, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } + } else { + error!(target: "sub-libp2p", + "inject_connection_closed: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + if connections.is_empty() { + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, *peer_id, DropReason::Unknown); + let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(Duration::from_secs(ban_dur)); + let peer_id = *peer_id; + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); + + *entry.get_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: Instant::now() + Duration::from_secs(ban_dur), + }; + } else if !connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)) + }) { + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, *peer_id, DropReason::Unknown); + + *entry.get_mut() = PeerState::Disabled { connections, backoff_until: None }; + } else { + *entry.get_mut() = PeerState::Enabled { connections }; + } + }, + + PeerState::Requested | + PeerState::PendingRequest { .. } | + PeerState::Backoff { .. } => { + // This is a serious bug either in this state machine or in libp2p. + error!(target: "sub-libp2p", + "`inject_connection_closed` called for unknown peer {}", + peer_id); + debug_assert!(false); + }, + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id); + debug_assert!(false); + }, + } + } + } + + fn inject_disconnected(&mut self, _peer_id: &PeerId) {} + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn error::Error, + ) { + trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + + for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + if let Entry::Occupied(mut entry) = self.peers.entry((peer_id.clone(), set_id)) { + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // The peer is not in our list. + st @ PeerState::Backoff { .. } => { + *entry.into_mut() = st; + }, + + // "Basic" situation: we failed to reach a peer that the peerset requested. + st @ PeerState::Requested | st @ PeerState::PendingRequest { .. } => { + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, *peer_id, DropReason::Unknown); + + let now = Instant::now(); + let ban_duration = match st { + PeerState::PendingRequest { timer_deadline, .. } + if timer_deadline > now => + cmp::max(timer_deadline - now, Duration::from_secs(5)), + _ => Duration::from_secs(5), + }; + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(ban_duration); + let peer_id = *peer_id; + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); + + *entry.into_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: now + ban_duration, + }; + }, + + // We can still get dial failures even if we are already connected to the peer, + // as an extra diagnostic for an earlier attempt. + st @ PeerState::Disabled { .. } | + st @ PeerState::Enabled { .. } | + st @ PeerState::DisabledPendingEnable { .. } | + st @ PeerState::Incoming { .. } => { + *entry.into_mut() = st; + }, + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id); + debug_assert!(false); + }, + } + } + } + } + + fn inject_event(&mut self, source: PeerId, connection: ConnectionId, event: NotifsHandlerOut) { + match event { + NotifsHandlerOut::OpenDesiredByRemote { protocol_index } => { + let set_id = sc_peerset::SetId::from(protocol_index); + + trace!(target: "sub-libp2p", + "Handler({:?}, {:?}]) => OpenDesiredByRemote({:?})", + source, connection, set_id); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((source, set_id)) { + entry + } else { + error!( + target: "sub-libp2p", + "OpenDesiredByRemote: State mismatch in the custom protos handler" + ); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Incoming => Incoming + PeerState::Incoming { mut connections, backoff_until } => { + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, _)| *c == connection) + { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesiredByRemote; + } else { + // Connections in `OpeningThenClosing` and `Closing` state can be + // in a Closed phase, and as such can emit `OpenDesiredByRemote` + // messages. + // Since an `Open` and/or a `Close` message have already been sent, + // there is nothing much that can be done about this anyway. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningThenClosing | ConnectionState::Closing + )); + } + } else { + error!( + target: "sub-libp2p", + "OpenDesiredByRemote: State mismatch in the custom protos handler" + ); + debug_assert!(false); + } + + *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; + }, + + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); + + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, _)| *c == connection) + { + if let ConnectionState::Closed = *connec_state { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + source, connection, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source, + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + } else { + // Connections in `OpeningThenClosing`, `Opening`, and `Closing` + // state can be in a Closed phase, and as such can emit + // `OpenDesiredByRemote` messages. + // Since an `Open` message haS already been sent, there is nothing + // more to do. + debug_assert!(matches!( + connec_state, + ConnectionState::OpenDesiredByRemote | + ConnectionState::Closing | ConnectionState::Opening + )); + } + } else { + error!( + target: "sub-libp2p", + "OpenDesiredByRemote: State mismatch in the custom protos handler" + ); + debug_assert!(false); + } + + *entry.into_mut() = PeerState::Enabled { connections }; + }, + + // Disabled => Disabled | Incoming + PeerState::Disabled { mut connections, backoff_until } => { + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, _)| *c == connection) + { + if let ConnectionState::Closed = *connec_state { + *connec_state = ConnectionState::OpenDesiredByRemote; + + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 += 1; + + trace!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + source, incoming_id); + self.peerset.incoming(set_id, source, incoming_id); + self.incoming.push(IncomingPeer { + peer_id: source, + set_id, + alive: true, + incoming_id, + }); + + *entry.into_mut() = + PeerState::Incoming { connections, backoff_until }; + } else { + // Connections in `OpeningThenClosing` and `Closing` state can be + // in a Closed phase, and as such can emit `OpenDesiredByRemote` + // messages. + // We ignore them. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningThenClosing | ConnectionState::Closing + )); + *entry.into_mut() = + PeerState::Disabled { connections, backoff_until }; + } + } else { + error!( + target: "sub-libp2p", + "OpenDesiredByRemote: State mismatch in the custom protos handler" + ); + debug_assert!(false); + } + }, + + // DisabledPendingEnable => Enabled | DisabledPendingEnable + PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, _)| *c == connection) + { + if let ConnectionState::Closed = *connec_state { + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + source, connection, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source, + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + + *entry.into_mut() = PeerState::Enabled { connections }; + } else { + // Connections in `OpeningThenClosing` and `Closing` state can be + // in a Closed phase, and as such can emit `OpenDesiredByRemote` + // messages. + // We ignore them. + debug_assert!(matches!( + connec_state, + ConnectionState::OpeningThenClosing | ConnectionState::Closing + )); + *entry.into_mut() = PeerState::DisabledPendingEnable { + connections, + timer, + timer_deadline, + }; + } + } else { + error!( + target: "sub-libp2p", + "OpenDesiredByRemote: State mismatch in the custom protos handler" + ); + debug_assert!(false); + } + }, + + state => { + error!(target: "sub-libp2p", + "OpenDesiredByRemote: Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); + return + }, + }; + }, + + NotifsHandlerOut::CloseDesired { protocol_index } => { + let set_id = sc_peerset::SetId::from(protocol_index); + + trace!(target: "sub-libp2p", + "Handler({}, {:?}) => CloseDesired({:?})", + source, connection, set_id); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((source, set_id)) { + entry + } else { + error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // Enabled => Enabled | Disabled + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); + + let pos = if let Some(pos) = + connections.iter().position(|(c, _)| *c == connection) + { + pos + } else { + error!(target: "sub-libp2p", + "CloseDesired: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; + + if matches!(connections[pos].1, ConnectionState::Closing) { + *entry.into_mut() = PeerState::Enabled { connections }; + return + } + + debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_))); + connections[pos].1 = ConnectionState::Closing; + + trace!(target: "sub-libp2p", "Handler({}, {:?}) <= Close({:?})", source, connection, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source, + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Close { protocol_index: set_id.into() }, + }); + + if let Some((replacement_pos, replacement_sink)) = + connections.iter().enumerate().find_map(|(num, (_, s))| match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None, + }) { + if pos <= replacement_pos { + trace!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source); + let event = NotificationsOut::CustomProtocolReplaced { + peer_id: source, + set_id, + notifications_sink: replacement_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + *entry.into_mut() = PeerState::Enabled { connections }; + } else { + // List of open connections wasn't empty before but now it is. + if !connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::Opening)) + { + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", source, set_id); + self.peerset.dropped(set_id, source, DropReason::Refused); + *entry.into_mut() = + PeerState::Disabled { connections, backoff_until: None }; + } else { + *entry.into_mut() = PeerState::Enabled { connections }; + } + + trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", source, set_id); + let event = + NotificationsOut::CustomProtocolClosed { peer_id: source, set_id }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + }, + + // All connections in `Disabled` and `DisabledPendingEnable` have been sent a + // `Close` message already, and as such ignore any `CloseDesired` message. + state @ PeerState::Disabled { .. } | + state @ PeerState::DisabledPendingEnable { .. } => { + *entry.into_mut() = state; + return + }, + state => { + error!(target: "sub-libp2p", + "Unexpected state in the custom protos handler: {:?}", + state); + return + }, + } + }, + + NotifsHandlerOut::CloseResult { protocol_index } => { + let set_id = sc_peerset::SetId::from(protocol_index); + + trace!(target: "sub-libp2p", + "Handler({}, {:?}) => CloseResult({:?})", + source, connection, set_id); + + match self.peers.get_mut(&(source.clone(), set_id)) { + // Move the connection from `Closing` to `Closed`. + Some(PeerState::Incoming { connections, .. }) | + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) | + Some(PeerState::Enabled { connections, .. }) => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::Closing) + }) { + *connec_state = ConnectionState::Closed; + } else { + error!(target: "sub-libp2p", + "CloseResult: State mismatch in the custom protos handler"); + debug_assert!(false); + } + }, + + state => { + error!(target: "sub-libp2p", + "CloseResult: Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); + }, + } + }, + + NotifsHandlerOut::OpenResultOk { + protocol_index, + negotiated_fallback, + received_handshake, + notifications_sink, + .. + } => { + let set_id = sc_peerset::SetId::from(protocol_index); + trace!(target: "sub-libp2p", + "Handler({}, {:?}) => OpenResultOk({:?})", + source, connection, set_id); + + match self.peers.get_mut(&(source, set_id)) { + Some(PeerState::Enabled { connections, .. }) => { + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); + let any_open = + connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::Opening) + }) { + if !any_open { + trace!(target: "sub-libp2p", "External API <= Open({}, {:?})", source, set_id); + let event = NotificationsOut::CustomProtocolOpen { + peer_id: source, + set_id, + negotiated_fallback, + received_handshake, + notifications_sink: notifications_sink.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + *connec_state = ConnectionState::Open(notifications_sink); + } else if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::OpeningThenClosing) + }) { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultOk State mismatch in the custom protos handler"); + debug_assert!(false); + } + }, + + Some(PeerState::Incoming { connections, .. }) | + Some(PeerState::DisabledPendingEnable { connections, .. }) | + Some(PeerState::Disabled { connections, .. }) => { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::OpeningThenClosing) + }) { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultOk State mismatch in the custom protos handler"); + debug_assert!(false); + } + }, + + state => { + error!(target: "sub-libp2p", + "OpenResultOk: Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); + return + }, + } + }, + + NotifsHandlerOut::OpenResultErr { protocol_index } => { + let set_id = sc_peerset::SetId::from(protocol_index); + trace!(target: "sub-libp2p", + "Handler({:?}, {:?}) => OpenResultErr({:?})", + source, connection, set_id); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((source, set_id)) { + entry + } else { + error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Enabled { mut connections } => { + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::Opening) + }) { + *connec_state = ConnectionState::Closed; + } else if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::OpeningThenClosing) + }) { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + } + + if !connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)) + }) { + trace!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(set_id, source, DropReason::Refused); + + let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); + *entry.into_mut() = PeerState::Disabled { + connections, + backoff_until: Some(Instant::now() + Duration::from_secs(ban_dur)), + }; + } else { + *entry.into_mut() = PeerState::Enabled { connections }; + } + }, + mut state @ PeerState::Incoming { .. } | + mut state @ PeerState::DisabledPendingEnable { .. } | + mut state @ PeerState::Disabled { .. } => { + match &mut state { + PeerState::Incoming { connections, .. } | + PeerState::Disabled { connections, .. } | + PeerState::DisabledPendingEnable { connections, .. } => { + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, s)| { + *c == connection && + matches!(s, ConnectionState::OpeningThenClosing) + }) { + *connec_state = ConnectionState::Closing; + } else { + error!(target: "sub-libp2p", + "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + } + }, + _ => unreachable!( + "Match branches are the same as the one on which we + enter this block; qed" + ), + }; + + *entry.into_mut() = state; + }, + state => { + error!(target: "sub-libp2p", + "Unexpected state in the custom protos handler: {:?}", + state); + debug_assert!(false); + }, + }; + }, + + NotifsHandlerOut::Notification { protocol_index, message } => { + let set_id = sc_peerset::SetId::from(protocol_index); + if self.is_open(&source, set_id) { + trace!( + target: "sub-libp2p", + "Handler({:?}) => Notification({}, {:?}, {} bytes)", + connection, + source, + set_id, + message.len() + ); + trace!( + target: "sub-libp2p", + "External API <= Message({}, {:?})", + source, + set_id, + ); + let event = NotificationsOut::Notification { peer_id: source, set_id, message }; + + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } else { + trace!( + target: "sub-libp2p", + "Handler({:?}) => Post-close notification({}, {:?}, {} bytes)", + connection, + source, + set_id, + message.len() + ); + } + }, + } + } + + fn poll( + &mut self, + cx: &mut Context, + _params: &mut impl PollParameters, + ) -> Poll> { + if let Some(event) = self.events.pop_front() { + return Poll::Ready(event) + } + + // Poll for instructions from the peerset. + // Note that the peerset is a *best effort* crate, and we have to use defensive programming. + loop { + match futures::Stream::poll_next(Pin::new(&mut self.peerset), cx) { + Poll::Ready(Some(sc_peerset::Message::Accept(index))) => { + self.peerset_report_accept(index); + }, + Poll::Ready(Some(sc_peerset::Message::Reject(index))) => { + self.peerset_report_reject(index); + }, + Poll::Ready(Some(sc_peerset::Message::Connect { peer_id, set_id, .. })) => { + self.peerset_report_connect(peer_id, set_id); + }, + Poll::Ready(Some(sc_peerset::Message::Drop { peer_id, set_id, .. })) => { + self.peerset_report_disconnect(peer_id, set_id); + }, + Poll::Ready(None) => { + error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); + break + }, + Poll::Pending => break, + } + } + + while let Poll::Ready(Some((delay_id, peer_id, set_id))) = + Pin::new(&mut self.delays).poll_next(cx) + { + let peer_state = match self.peers.get_mut(&(peer_id, set_id)) { + Some(s) => s, + // We intentionally never remove elements from `delays`, and it may + // thus contain peers which are now gone. This is a normal situation. + None => continue, + }; + + match peer_state { + PeerState::Backoff { timer, .. } if *timer == delay_id => { + trace!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id); + self.peers.remove(&(peer_id, set_id)); + }, + + PeerState::PendingRequest { timer, .. } if *timer == delay_id => { + trace!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); + // The `DialPeerCondition` ensures that dial attempts are de-duplicated + self.events.push_back(NetworkBehaviourAction::DialPeer { + peer_id, + condition: DialPeerCondition::Disconnected, + }); + *peer_state = PeerState::Requested; + }, + + PeerState::DisabledPendingEnable { connections, timer, timer_deadline } + if *timer == delay_id => + { + // The first element of `closed` is chosen to open the notifications substream. + if let Some((connec_id, connec_state)) = + connections.iter_mut().find(|(_, s)| matches!(s, ConnectionState::Closed)) + { + trace!(target: "sub-libp2p", "Handler({}, {:?}) <= Open({:?}) (ban expired)", + peer_id, *connec_id, set_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::One(*connec_id), + event: NotifsHandlerIn::Open { protocol_index: set_id.into() }, + }); + *connec_state = ConnectionState::Opening; + *peer_state = PeerState::Enabled { + connections: mem::replace(connections, Default::default()), + }; + } else { + *timer_deadline = Instant::now() + Duration::from_secs(5); + let delay = futures_timer::Delay::new(Duration::from_secs(5)); + let timer = *timer; + self.delays.push( + async move { + delay.await; + (timer, peer_id, set_id) + } + .boxed(), + ); + } + } + + // We intentionally never remove elements from `delays`, and it may + // thus contain obsolete entries. This is a normal situation. + _ => {}, + } + } + + if let Some(event) = self.events.pop_front() { + return Poll::Ready(event) + } + + Poll::Pending + } +} diff --git a/client/network/src/protocol/notifications/handler.rs b/client/network/src/protocol/notifications/handler.rs new file mode 100644 index 0000000000000..a0c49fa592b21 --- /dev/null +++ b/client/network/src/protocol/notifications/handler.rs @@ -0,0 +1,845 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming +//! and outgoing substreams for all gossiping protocols. +//! +//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the +//! gossiping protocols that are Substrate-related and outside of the scope of libp2p. +//! +//! # Usage +//! +//! From an API perspective, for each of its protocols, the [`NotifsHandler`] is always in one of +//! the following state (see [`State`]): +//! +//! - Closed substream. This is the initial state. +//! - Closed substream, but remote desires them to be open. +//! - Open substream. +//! - Open substream, but remote desires them to be closed. +//! +//! Each protocol in the [`NotifsHandler`] can spontaneously switch between these states: +//! +//! - "Closed substream" to "Closed substream but open desired". When that happens, a +//! [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted. +//! - "Closed substream but open desired" to "Closed substream" (i.e. the remote has cancelled +//! their request). When that happens, a [`NotifsHandlerOut::CloseDesired`] is emitted. +//! - "Open substream" to "Open substream but close desired". When that happens, a +//! [`NotifsHandlerOut::CloseDesired`] is emitted. +//! +//! The user can instruct a protocol in the `NotifsHandler` to switch from "closed" to "open" or +//! vice-versa by sending either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`]. The +//! `NotifsHandler` must answer with [`NotifsHandlerOut::OpenResultOk`] or +//! [`NotifsHandlerOut::OpenResultErr`], or with [`NotifsHandlerOut::CloseResult`]. +//! +//! When a [`NotifsHandlerOut::OpenResultOk`] is emitted, the substream is now in the open state. +//! When a [`NotifsHandlerOut::OpenResultErr`] or [`NotifsHandlerOut::CloseResult`] is emitted, +//! the `NotifsHandler` is now (or remains) in the closed state. +//! +//! When a [`NotifsHandlerOut::OpenDesiredByRemote`] is emitted, the user should always send back +//! either a [`NotifsHandlerIn::Open`] or a [`NotifsHandlerIn::Close`].If this isn't done, the +//! remote will be left in a pending state. +//! +//! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted +//! [`NotifsHandlerIn::Open`] has gotten an answer. + +use crate::protocol::notifications::upgrade::{ + NotificationsHandshakeError, NotificationsIn, NotificationsInSubstream, NotificationsOut, + NotificationsOutSubstream, UpgradeCollec, +}; + +use bytes::BytesMut; +use futures::{ + channel::mpsc, + lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, + prelude::*, +}; +use libp2p::{ + core::{ + upgrade::{InboundUpgrade, OutboundUpgrade}, + ConnectedPoint, PeerId, + }, + swarm::{ + IntoProtocolsHandler, KeepAlive, NegotiatedSubstream, ProtocolsHandler, + ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, + }, +}; +use log::error; +use parking_lot::{Mutex, RwLock}; +use std::{ + borrow::Cow, + collections::VecDeque, + mem, + pin::Pin, + str, + sync::Arc, + task::{Context, Poll}, + time::{Duration, Instant}, +}; + +/// Number of pending notifications in asynchronous contexts. +/// See [`NotificationsSink::reserve_notification`] for context. +const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; + +/// Number of pending notifications in synchronous contexts. +const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; + +/// Maximum duration to open a substream and receive the handshake message. After that, we +/// consider that we failed to open the substream. +const OPEN_TIMEOUT: Duration = Duration::from_secs(10); + +/// After successfully establishing a connection with the remote, we keep the connection open for +/// at least this amount of time in order to give the rest of the code the chance to notify us to +/// open substreams. +const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsHandler`]. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandlerProto { + /// Name of protocols, prototypes for upgrades for inbound substreams, and the message we + /// send or respond with in the handshake. + protocols: Vec, +} + +/// The actual handler once the connection has been established. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandler { + /// List of notification protocols, specified by the user at initialization. + protocols: Vec, + + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, + + /// Whether we are the connection dialer or listener. + endpoint: ConnectedPoint, + + /// Remote we are connected to. + peer_id: PeerId, + + /// Events to return in priority from `poll`. + events_queue: VecDeque< + ProtocolsHandlerEvent, + >, +} + +/// Configuration for a notifications protocol. +#[derive(Debug, Clone)] +pub struct ProtocolConfig { + /// Name of the protocol. + pub name: Cow<'static, str>, + /// Names of the protocol to use if the main one isn't available. + pub fallback_names: Vec>, + /// Handshake of the protocol. The `RwLock` is locked every time a new substream is opened. + pub handshake: Arc>>, + /// Maximum allowed size for a notification. + pub max_notification_size: u64, +} + +/// Fields specific for each individual protocol. +struct Protocol { + /// Other fields. + config: ProtocolConfig, + + /// Prototype for the inbound upgrade. + in_upgrade: NotificationsIn, + + /// Current state of the substreams for this protocol. + state: State, +} + +/// See the module-level documentation to learn about the meaning of these variants. +enum State { + /// Protocol is in the "Closed" state. + Closed { + /// True if an outgoing substream is still in the process of being opened. + pending_opening: bool, + }, + + /// Protocol is in the "Closed" state. A [`NotifsHandlerOut::OpenDesiredByRemote`] has been + /// emitted. + OpenDesiredByRemote { + /// Substream opened by the remote and that hasn't been accepted/rejected yet. + in_substream: NotificationsInSubstream, + + /// See [`State::Closed::pending_opening`]. + pending_opening: bool, + }, + + /// Protocol is in the "Closed" state, but has received a [`NotifsHandlerIn::Open`] and is + /// consequently trying to open the various notifications substreams. + /// + /// A [`NotifsHandlerOut::OpenResultOk`] or a [`NotifsHandlerOut::OpenResultErr`] event must + /// be emitted when transitionning to respectively [`State::Open`] or [`State::Closed`]. + Opening { + /// Substream opened by the remote. If `Some`, has been accepted. + in_substream: Option>, + }, + + /// Protocol is in the "Open" state. + Open { + /// Contains the two `Receiver`s connected to the [`NotificationsSink`] that has been + /// sent out. The notifications to send out can be pulled from this receivers. + /// We use two different channels in order to have two different channel sizes, but from + /// the receiving point of view, the two channels are the same. + /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + notifications_sink_rx: stream::Peekable< + stream::Select< + stream::Fuse>, + stream::Fuse>, + >, + >, + + /// Outbound substream that has been accepted by the remote. + /// + /// Always `Some` on transition to [`State::Open`]. Switched to `None` only if the remote + /// closed the substream. If `None`, a [`NotifsHandlerOut::CloseDesired`] event has been + /// emitted. + out_substream: Option>, + + /// Substream opened by the remote. + /// + /// Contrary to the `out_substream` field, operations continue as normal even if the + /// substream has been closed by the remote. A `None` is treated the same way as if there + /// was an idle substream. + in_substream: Option>, + }, +} + +impl IntoProtocolsHandler for NotifsHandlerProto { + type Handler = NotifsHandler; + + fn inbound_protocol(&self) -> UpgradeCollec { + self.protocols + .iter() + .map(|cfg| { + NotificationsIn::new( + cfg.name.clone(), + cfg.fallback_names.clone(), + cfg.max_notification_size, + ) + }) + .collect::>() + } + + fn into_handler(self, peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + NotifsHandler { + protocols: self + .protocols + .into_iter() + .map(|config| { + let in_upgrade = NotificationsIn::new( + config.name.clone(), + config.fallback_names.clone(), + config.max_notification_size, + ); + + Protocol { config, in_upgrade, state: State::Closed { pending_opening: false } } + }) + .collect(), + peer_id: *peer_id, + endpoint: connected_point.clone(), + when_connection_open: Instant::now(), + events_queue: VecDeque::with_capacity(16), + } + } +} + +/// Event that can be received by a `NotifsHandler`. +#[derive(Debug, Clone)] +pub enum NotifsHandlerIn { + /// Instruct the handler to open the notification substreams. + /// + /// Must always be answered by a [`NotifsHandlerOut::OpenResultOk`] or a + /// [`NotifsHandlerOut::OpenResultErr`] event. + /// + /// Importantly, it is forbidden to send a [`NotifsHandlerIn::Open`] while a previous one is + /// already in the fly. It is however possible if a `Close` is still in the fly. + Open { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// Instruct the handler to close the notification substreams, or reject any pending incoming + /// substream request. + /// + /// Must always be answered by a [`NotifsHandlerOut::CloseResult`] event. + Close { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, +} + +/// Event that can be emitted by a `NotifsHandler`. +#[derive(Debug)] +pub enum NotifsHandlerOut { + /// Acknowledges a [`NotifsHandlerIn::Open`]. + OpenResultOk { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + /// Name of the protocol that was actually negotiated, if the default one wasn't available. + negotiated_fallback: Option>, + /// The endpoint of the connection that is open for custom protocols. + endpoint: ConnectedPoint, + /// Handshake that was sent to us. + /// This is normally a "Status" message, but this out of the concern of this code. + received_handshake: Vec, + /// How notifications can be sent to this node. + notifications_sink: NotificationsSink, + }, + + /// Acknowledges a [`NotifsHandlerIn::Open`]. The remote has refused the attempt to open + /// notification substreams. + OpenResultErr { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// Acknowledges a [`NotifsHandlerIn::Close`]. + CloseResult { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// The remote would like the substreams to be open. Send a [`NotifsHandlerIn::Open`] or a + /// [`NotifsHandlerIn::Close`] in order to either accept or deny this request. If a + /// [`NotifsHandlerIn::Open`] or [`NotifsHandlerIn::Close`] has been sent before and has not + /// yet been acknowledged by a matching [`NotifsHandlerOut`], then you don't need to a send + /// another [`NotifsHandlerIn`]. + OpenDesiredByRemote { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in + /// order to close them. If a [`NotifsHandlerIn::Close`] has been sent before and has not yet + /// been acknowledged by a [`NotifsHandlerOut::CloseResult`], then you don't need to a send + /// another one. + CloseDesired { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, + + /// Received a message on a custom protocol substream. + /// + /// Can only happen when the handler is in the open state. + Notification { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + /// Message that has been received. + message: BytesMut, + }, +} + +/// Sink connected directly to the node background task. Allows sending notifications to the peer. +/// +/// Can be cloned in order to obtain multiple references to the substream of the same peer. +#[derive(Debug, Clone)] +pub struct NotificationsSink { + inner: Arc, +} + +#[derive(Debug)] +struct NotificationsSinkInner { + /// Target of the sink. + peer_id: PeerId, + /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. + async_channel: FuturesMutex>, + /// Sender to use in synchronous contexts. Uses a synchronous mutex. + /// This channel has a large capacity and is meant to be used in contexts where + /// back-pressure cannot be properly exerted. + /// It will be removed in a future version. + sync_channel: Mutex>, +} + +/// Message emitted through the [`NotificationsSink`] and processed by the background task +/// dedicated to the peer. +#[derive(Debug)] +enum NotificationsSinkMessage { + /// Message emitted by [`NotificationsSink::reserve_notification`] and + /// [`NotificationsSink::write_notification_now`]. + Notification { message: Vec }, + + /// Must close the connection. + ForceClose, +} + +impl NotificationsSink { + /// Returns the [`PeerId`] the sink is connected to. + pub fn peer_id(&self) -> &PeerId { + &self.inner.peer_id + } + + /// Sends a notification to the peer. + /// + /// If too many messages are already buffered, the notification is silently discarded and the + /// connection to the peer will be closed shortly after. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + /// + /// This method will be removed in a future version. + pub fn send_sync_notification<'a>(&'a self, message: impl Into>) { + let mut lock = self.inner.sync_channel.lock(); + let result = + lock.try_send(NotificationsSinkMessage::Notification { message: message.into() }); + + if result.is_err() { + // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the + // buffer, and therefore `try_send` will succeed. + let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); + debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); + } + } + + /// Wait until the remote is ready to accept a notification. + /// + /// Returns an error in the case where the connection is closed. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + pub async fn reserve_notification<'a>(&'a self) -> Result, ()> { + let mut lock = self.inner.async_channel.lock().await; + + let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; + if poll_ready.is_ok() { + Ok(Ready { lock }) + } else { + Err(()) + } + } +} + +/// Notification slot is reserved and the notification can actually be sent. +#[must_use] +#[derive(Debug)] +pub struct Ready<'a> { + /// Guarded channel. The channel inside is guaranteed to not be full. + lock: FuturesMutexGuard<'a, mpsc::Sender>, +} + +impl<'a> Ready<'a> { + /// Consumes this slots reservation and actually queues the notification. + /// + /// Returns an error if the substream has been closed. + pub fn send(mut self, notification: impl Into>) -> Result<(), ()> { + self.lock + .start_send(NotificationsSinkMessage::Notification { message: notification.into() }) + .map_err(|_| ()) + } +} + +/// Error specific to the collection of protocols. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum NotifsHandlerError { + /// Channel of synchronous notifications is full. + SyncNotificationsClogged, +} + +impl NotifsHandlerProto { + /// Builds a new handler. + /// + /// `list` is a list of notification protocols names, the message to send as part of the + /// handshake, and the maximum allowed size of a notification. At the moment, the message + /// is always the same whether we open a substream ourselves or respond to handshake from + /// the remote. + pub fn new(list: impl Into>) -> Self { + Self { protocols: list.into() } + } +} + +impl ProtocolsHandler for NotifsHandler { + type InEvent = NotifsHandlerIn; + type OutEvent = NotifsHandlerOut; + type Error = NotifsHandlerError; + type InboundProtocol = UpgradeCollec; + type OutboundProtocol = NotificationsOut; + // Index within the `out_protocols`. + type OutboundOpenInfo = usize; + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + let protocols = self + .protocols + .iter() + .map(|p| p.in_upgrade.clone()) + .collect::>(); + + SubstreamProtocol::new(protocols, ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + (mut in_substream_open, protocol_index): >::Output, + (): (), + ) { + let mut protocol_info = &mut self.protocols[protocol_index]; + match protocol_info.state { + State::Closed { pending_opening } => { + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenDesiredByRemote { protocol_index }, + )); + + protocol_info.state = State::OpenDesiredByRemote { + in_substream: in_substream_open.substream, + pending_opening, + }; + }, + State::OpenDesiredByRemote { .. } => { + // If a substream already exists, silently drop the new one. + // Note that we drop the substream, which will send an equivalent to a + // TCP "RST" to the remote and force-close the substream. It might + // seem like an unclean way to get rid of a substream. However, keep + // in mind that it is invalid for the remote to open multiple such + // substreams, and therefore sending a "RST" is the most correct thing + // to do. + return + }, + State::Opening { ref mut in_substream, .. } | + State::Open { ref mut in_substream, .. } => { + if in_substream.is_some() { + // Same remark as above. + return + } + + // Create `handshake_message` on a separate line to be sure that the + // lock is released as soon as possible. + let handshake_message = protocol_info.config.handshake.read().clone(); + in_substream_open.substream.send_handshake(handshake_message); + *in_substream = Some(in_substream_open.substream); + }, + } + } + + fn inject_fully_negotiated_outbound( + &mut self, + new_open: >::Output, + protocol_index: Self::OutboundOpenInfo, + ) { + match self.protocols[protocol_index].state { + State::Closed { ref mut pending_opening } | + State::OpenDesiredByRemote { ref mut pending_opening, .. } => { + debug_assert!(*pending_opening); + *pending_opening = false; + }, + State::Open { .. } => { + error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler"); + debug_assert!(false); + }, + State::Opening { ref mut in_substream } => { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + peer_id: self.peer_id, + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + self.protocols[protocol_index].state = State::Open { + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()) + .peekable(), + out_substream: Some(new_open.substream), + in_substream: in_substream.take(), + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultOk { + protocol_index, + negotiated_fallback: new_open.negotiated_fallback, + endpoint: self.endpoint.clone(), + received_handshake: new_open.handshake, + notifications_sink, + }, + )); + }, + } + } + + fn inject_event(&mut self, message: NotifsHandlerIn) { + match message { + NotifsHandlerIn::Open { protocol_index } => { + let protocol_info = &mut self.protocols[protocol_index]; + match &mut protocol_info.state { + State::Closed { pending_opening } => { + if !*pending_opening { + let proto = NotificationsOut::new( + protocol_info.config.name.clone(), + protocol_info.config.fallback_names.clone(), + protocol_info.config.handshake.read().clone(), + protocol_info.config.max_notification_size, + ); + + self.events_queue.push_back( + ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, protocol_index) + .with_timeout(OPEN_TIMEOUT), + }, + ); + } + + protocol_info.state = State::Opening { in_substream: None }; + }, + State::OpenDesiredByRemote { pending_opening, in_substream } => { + let handshake_message = protocol_info.config.handshake.read().clone(); + + if !*pending_opening { + let proto = NotificationsOut::new( + protocol_info.config.name.clone(), + protocol_info.config.fallback_names.clone(), + handshake_message.clone(), + protocol_info.config.max_notification_size, + ); + + self.events_queue.push_back( + ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, protocol_index) + .with_timeout(OPEN_TIMEOUT), + }, + ); + } + + in_substream.send_handshake(handshake_message); + + // The state change is done in two steps because of borrowing issues. + let in_substream = match mem::replace( + &mut protocol_info.state, + State::Opening { in_substream: None }, + ) { + State::OpenDesiredByRemote { in_substream, .. } => in_substream, + _ => unreachable!(), + }; + protocol_info.state = State::Opening { in_substream: Some(in_substream) }; + }, + State::Opening { .. } | State::Open { .. } => { + // As documented, it is forbidden to send an `Open` while there is already + // one in the fly. + error!(target: "sub-libp2p", "opening already-opened handler"); + debug_assert!(false); + }, + } + }, + + NotifsHandlerIn::Close { protocol_index } => { + match self.protocols[protocol_index].state { + State::Open { .. } => { + self.protocols[protocol_index].state = + State::Closed { pending_opening: false }; + }, + State::Opening { .. } => { + self.protocols[protocol_index].state = + State::Closed { pending_opening: true }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr { protocol_index }, + )); + }, + State::OpenDesiredByRemote { pending_opening, .. } => { + self.protocols[protocol_index].state = State::Closed { pending_opening }; + }, + State::Closed { .. } => {}, + } + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseResult { protocol_index }, + )); + }, + } + } + + fn inject_dial_upgrade_error( + &mut self, + num: usize, + _: ProtocolsHandlerUpgrErr, + ) { + match self.protocols[num].state { + State::Closed { ref mut pending_opening } | + State::OpenDesiredByRemote { ref mut pending_opening, .. } => { + debug_assert!(*pending_opening); + *pending_opening = false; + }, + + State::Opening { .. } => { + self.protocols[num].state = State::Closed { pending_opening: false }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::OpenResultErr { protocol_index: num }, + )); + }, + + // No substream is being open when already `Open`. + State::Open { .. } => debug_assert!(false), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + // `Yes` if any protocol has some activity. + if self.protocols.iter().any(|p| !matches!(p.state, State::Closed { .. })) { + return KeepAlive::Yes + } + + // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote + // to express desire to open substreams. + KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME) + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, + > { + if let Some(ev) = self.events_queue.pop_front() { + return Poll::Ready(ev) + } + + // For each open substream, try send messages from `notifications_sink_rx` to the + // substream. + for protocol_index in 0..self.protocols.len() { + if let State::Open { + notifications_sink_rx, out_substream: Some(out_substream), .. + } = &mut self.protocols[protocol_index].state + { + loop { + // Only proceed with `out_substream.poll_ready_unpin` if there is an element + // available in `notifications_sink_rx`. This avoids waking up the task when + // a substream is ready to send if there isn't actually something to send. + match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { + Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => + return Poll::Ready(ProtocolsHandlerEvent::Close( + NotifsHandlerError::SyncNotificationsClogged, + )), + Poll::Ready(Some(&NotificationsSinkMessage::Notification { .. })) => {}, + Poll::Ready(None) | Poll::Pending => break, + } + + // Before we extract the element from `notifications_sink_rx`, check that the + // substream is ready to accept a message. + match out_substream.poll_ready_unpin(cx) { + Poll::Ready(_) => {}, + Poll::Pending => break, + } + + // Now that the substream is ready for a message, grab what to send. + let message = match notifications_sink_rx.poll_next_unpin(cx) { + Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => + message, + Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) | + Poll::Ready(None) | + Poll::Pending => { + // Should never be reached, as per `poll_peek` above. + debug_assert!(false); + break + }, + }; + + let _ = out_substream.start_send_unpin(message); + // Note that flushing is performed later down this function. + } + } + } + + // Flush all outbound substreams. + // When `poll` returns `Poll::Ready`, the libp2p `Swarm` may decide to no longer call + // `poll` again before it is ready to accept more events. + // In order to make sure that substreams are flushed as soon as possible, the flush is + // performed before the code paths that can produce `Ready` (with some rare exceptions). + // Importantly, however, the flush is performed *after* notifications are queued with + // `Sink::start_send`. + for protocol_index in 0..self.protocols.len() { + match &mut self.protocols[protocol_index].state { + State::Open { out_substream: out_substream @ Some(_), .. } => { + match Sink::poll_flush(Pin::new(out_substream.as_mut().unwrap()), cx) { + Poll::Pending | Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(_)) => { + *out_substream = None; + let event = NotifsHandlerOut::CloseDesired { protocol_index }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) + }, + }; + }, + + State::Closed { .. } | + State::Opening { .. } | + State::Open { out_substream: None, .. } | + State::OpenDesiredByRemote { .. } => {}, + } + } + + // Poll inbound substreams. + for protocol_index in 0..self.protocols.len() { + // Inbound substreams being closed is always tolerated, except for the + // `OpenDesiredByRemote` state which might need to be switched back to `Closed`. + match &mut self.protocols[protocol_index].state { + State::Closed { .. } | + State::Open { in_substream: None, .. } | + State::Opening { in_substream: None } => {}, + + State::Open { in_substream: in_substream @ Some(_), .. } => + match Stream::poll_next(Pin::new(in_substream.as_mut().unwrap()), cx) { + Poll::Pending => {}, + Poll::Ready(Some(Ok(message))) => { + let event = NotifsHandlerOut::Notification { protocol_index, message }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) + }, + Poll::Ready(None) | Poll::Ready(Some(Err(_))) => *in_substream = None, + }, + + State::OpenDesiredByRemote { in_substream, pending_opening } => + match NotificationsInSubstream::poll_process(Pin::new(in_substream), cx) { + Poll::Pending => {}, + Poll::Ready(Ok(void)) => match void {}, + Poll::Ready(Err(_)) => { + self.protocols[protocol_index].state = + State::Closed { pending_opening: *pending_opening }; + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseDesired { protocol_index }, + )) + }, + }, + + State::Opening { in_substream: in_substream @ Some(_), .. } => + match NotificationsInSubstream::poll_process( + Pin::new(in_substream.as_mut().unwrap()), + cx, + ) { + Poll::Pending => {}, + Poll::Ready(Ok(void)) => match void {}, + Poll::Ready(Err(_)) => *in_substream = None, + }, + } + } + + // This is the only place in this method that can return `Pending`. + // By putting it at the very bottom, we are guaranteed that everything has been properly + // polled. + Poll::Pending + } +} diff --git a/client/network/src/protocol/notifications/tests.rs b/client/network/src/protocol/notifications/tests.rs new file mode 100644 index 0000000000000..0b3ffc01a4b8d --- /dev/null +++ b/client/network/src/protocol/notifications/tests.rs @@ -0,0 +1,355 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#![cfg(test)] + +use crate::protocol::notifications::{Notifications, NotificationsOut, ProtocolConfig}; + +use futures::prelude::*; +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + transport::MemoryTransport, + upgrade, ConnectedPoint, + }, + identity, noise, + swarm::{ + IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, + ProtocolsHandler, Swarm, SwarmEvent, + }, + yamux, Multiaddr, PeerId, Transport, +}; +use std::{ + error, io, iter, + task::{Context, Poll}, + time::Duration, +}; + +/// Builds two nodes that have each other as bootstrap nodes. +/// This is to be used only for testing, and a panic will happen if something goes wrong. +fn build_nodes() -> (Swarm, Swarm) { + let mut out = Vec::with_capacity(2); + + let keypairs: Vec<_> = (0..2).map(|_| identity::Keypair::generate_ed25519()).collect(); + let addrs: Vec = (0..2) + .map(|_| format!("/memory/{}", rand::random::()).parse().unwrap()) + .collect(); + + for index in 0..2 { + let keypair = keypairs[index].clone(); + + let noise_keys = + noise::Keypair::::new().into_authentic(&keypair).unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(yamux::YamuxConfig::default()) + .timeout(Duration::from_secs(20)) + .boxed(); + + let (peerset, _) = sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { + sets: vec![sc_peerset::SetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: if index == 0 { + keypairs.iter().skip(1).map(|keypair| keypair.public().into_peer_id()).collect() + } else { + vec![] + }, + reserved_nodes: Default::default(), + reserved_only: false, + }], + }); + + let behaviour = CustomProtoWithAddr { + inner: Notifications::new( + peerset, + iter::once(ProtocolConfig { + name: "/foo".into(), + fallback_names: Vec::new(), + handshake: Vec::new(), + max_notification_size: 1024 * 1024, + }), + ), + addrs: addrs + .iter() + .enumerate() + .filter_map(|(n, a)| { + if n != index { + Some((keypairs[n].public().into_peer_id(), a.clone())) + } else { + None + } + }) + .collect(), + }; + + let mut swarm = Swarm::new(transport, behaviour, keypairs[index].public().into_peer_id()); + swarm.listen_on(addrs[index].clone()).unwrap(); + out.push(swarm); + } + + // Final output + let mut out_iter = out.into_iter(); + let first = out_iter.next().unwrap(); + let second = out_iter.next().unwrap(); + (first, second) +} + +/// Wraps around the `CustomBehaviour` network behaviour, and adds hardcoded node addresses to it. +struct CustomProtoWithAddr { + inner: Notifications, + addrs: Vec<(PeerId, Multiaddr)>, +} + +impl std::ops::Deref for CustomProtoWithAddr { + type Target = Notifications; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl std::ops::DerefMut for CustomProtoWithAddr { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl NetworkBehaviour for CustomProtoWithAddr { + type ProtocolsHandler = ::ProtocolsHandler; + type OutEvent = ::OutEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + self.inner.new_handler() + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + let mut list = self.inner.addresses_of_peer(peer_id); + for (p, a) in self.addrs.iter() { + if p == peer_id { + list.push(a.clone()); + } + } + list + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + self.inner.inject_connected(peer_id) + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.inner.inject_disconnected(peer_id) + } + + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.inner.inject_connection_established(peer_id, conn, endpoint) + } + + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.inner.inject_connection_closed(peer_id, conn, endpoint) + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + event: <::Handler as ProtocolsHandler>::OutEvent, + ) { + self.inner.inject_event(peer_id, connection, event) + } + + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters + ) -> Poll< + NetworkBehaviourAction< + <::Handler as ProtocolsHandler>::InEvent, + Self::OutEvent + > + >{ + self.inner.poll(cx, params) + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error, + ) { + self.inner.inject_addr_reach_failure(peer_id, addr, error) + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + self.inner.inject_dial_failure(peer_id) + } + + fn inject_new_listener(&mut self, id: ListenerId) { + self.inner.inject_new_listener(id) + } + + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.inner.inject_new_listen_addr(id, addr) + } + + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.inner.inject_expired_listen_addr(id, addr) + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + self.inner.inject_new_external_addr(addr) + } + + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { + self.inner.inject_expired_external_addr(addr) + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { + self.inner.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.inner.inject_listener_closed(id, reason); + } +} + +#[test] +fn reconnect_after_disconnect() { + // We connect two nodes together, then force a disconnect (through the API of the `Service`), + // check that the disconnect worked, and finally check whether they successfully reconnect. + + let (mut service1, mut service2) = build_nodes(); + + // For this test, the services can be in the following states. + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + enum ServiceState { + NotConnected, + FirstConnec, + Disconnected, + ConnectedAgain, + } + let mut service1_state = ServiceState::NotConnected; + let mut service2_state = ServiceState::NotConnected; + + futures::executor::block_on(async move { + loop { + // Grab next event from services. + let event = { + let s1 = service1.select_next_some(); + let s2 = service2.select_next_some(); + futures::pin_mut!(s1, s2); + match future::select(s1, s2).await { + future::Either::Left((ev, _)) => future::Either::Left(ev), + future::Either::Right((ev, _)) => future::Either::Right(ev), + } + }; + + match event { + future::Either::Left(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolOpen { .. }, + )) => match service1_state { + ServiceState::NotConnected => { + service1_state = ServiceState::FirstConnec; + if service2_state == ServiceState::FirstConnec { + service1.behaviour_mut().disconnect_peer( + Swarm::local_peer_id(&service2), + sc_peerset::SetId::from(0), + ); + } + }, + ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + }, + future::Either::Left(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolClosed { .. }, + )) => match service1_state { + ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), + }, + future::Either::Right(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolOpen { .. }, + )) => match service2_state { + ServiceState::NotConnected => { + service2_state = ServiceState::FirstConnec; + if service1_state == ServiceState::FirstConnec { + service1.behaviour_mut().disconnect_peer( + Swarm::local_peer_id(&service2), + sc_peerset::SetId::from(0), + ); + } + }, + ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + }, + future::Either::Right(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolClosed { .. }, + )) => match service2_state { + ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), + }, + _ => {}, + } + + if service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::ConnectedAgain + { + break + } + } + + // Now that the two services have disconnected and reconnected, wait for 3 seconds and + // check whether they're still connected. + let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); + + loop { + // Grab next event from services. + let event = { + let s1 = service1.select_next_some(); + let s2 = service2.select_next_some(); + futures::pin_mut!(s1, s2); + match future::select(future::select(s1, s2), &mut delay).await { + future::Either::Right(_) => break, // success + future::Either::Left((future::Either::Left((ev, _)), _)) => ev, + future::Either::Left((future::Either::Right((ev, _)), _)) => ev, + } + }; + + match event { + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) | + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), + _ => {}, + } + } + }); +} diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/notifications/upgrade.rs similarity index 69% rename from client/network/src/protocol/generic_proto/handler.rs rename to client/network/src/protocol/notifications/upgrade.rs index 5845130a7db87..196b4f44f81f7 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/notifications/upgrade.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,12 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -pub use self::group::{ - NotificationsSink, NotifsHandlerError, Ready, NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut +pub use self::{ + collec::UpgradeCollec, + notifications::{ + NotificationsHandshakeError, NotificationsIn, NotificationsInOpen, + NotificationsInSubstream, NotificationsOut, NotificationsOutError, NotificationsOutOpen, + NotificationsOutSubstream, + }, }; -pub use self::legacy::ConnectionKillError as LegacyConnectionKillError; -mod group; -mod legacy; -mod notif_in; -mod notif_out; +mod collec; +mod notifications; diff --git a/client/network/src/protocol/generic_proto/upgrade/collec.rs b/client/network/src/protocol/notifications/upgrade/collec.rs similarity index 59% rename from client/network/src/protocol/generic_proto/upgrade/collec.rs rename to client/network/src/protocol/notifications/upgrade/collec.rs index f8d199974940f..2462d2becf4b1 100644 --- a/client/network/src/protocol/generic_proto/upgrade/collec.rs +++ b/client/network/src/protocol/notifications/upgrade/collec.rs @@ -1,26 +1,29 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. +// This file is part of Substrate. + +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use futures::prelude::*; use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; -use std::{iter::FromIterator, pin::Pin, task::{Context, Poll}, vec}; +use std::{ + iter::FromIterator, + pin::Pin, + task::{Context, Poll}, + vec, +}; // TODO: move this to libp2p => https://github.com/libp2p/rust-libp2p/issues/1445 @@ -31,13 +34,13 @@ pub struct UpgradeCollec(pub Vec); impl From> for UpgradeCollec { fn from(list: Vec) -> Self { - UpgradeCollec(list) + Self(list) } } impl FromIterator for UpgradeCollec { fn from_iter>(iter: I) -> Self { - UpgradeCollec(iter.into_iter().collect()) + Self(iter.into_iter().collect()) } } @@ -46,9 +49,10 @@ impl UpgradeInfo for UpgradeCollec { type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - self.0.iter().enumerate() - .flat_map(|(n, p)| - p.protocol_info().into_iter().map(move |i| ProtoNameWithUsize(i, n))) + self.0 + .iter() + .enumerate() + .flat_map(|(n, p)| p.protocol_info().into_iter().map(move |i| ProtoNameWithUsize(i, n))) .collect::>() .into_iter() } diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/notifications/upgrade/notifications.rs similarity index 62% rename from client/network/src/protocol/generic_proto/upgrade/notifications.rs rename to client/network/src/protocol/notifications/upgrade/notifications.rs index 64b4b980da002..997a1ccf1dec7 100644 --- a/client/network/src/protocol/generic_proto/upgrade/notifications.rs +++ b/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -1,45 +1,52 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . +use asynchronous_codec::Framed; /// Notifications protocol. /// /// The Substrate notifications protocol consists in the following: /// -/// - Node A opens a substream to node B and sends a message which contains some protocol-specific -/// higher-level logic. This message is prefixed with a variable-length integer message length. -/// This message can be empty, in which case `0` is sent. +/// - Node A opens a substream to node B and sends a message which contains some +/// protocol-specific higher-level logic. This message is prefixed with a variable-length +/// integer message length. This message can be empty, in which case `0` is sent. /// - If node B accepts the substream, it sends back a message with the same properties. /// - If instead B refuses the connection (which typically happens because no empty slot is /// available), then it immediately closes the substream without sending back anything. -/// - Node A can then send notifications to B, prefixed with a variable-length integer indicating -/// the length of the message. -/// - Either node A or node B can signal that it doesn't want this notifications substream anymore -/// by closing its writing side. The other party should respond by also closing their own -/// writing side soon after. +/// - Node A can then send notifications to B, prefixed with a variable-length integer +/// indicating the length of the message. +/// - Either node A or node B can signal that it doesn't want this notifications substream +/// anymore by closing its writing side. The other party should respond by also closing their +/// own writing side soon after. /// /// Notification substreams are unidirectional. If A opens a substream with B, then B is /// encouraged but not required to open a substream to A as well. -/// - use bytes::BytesMut; use futures::prelude::*; -use futures_codec::Framed; -use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; -use log::error; -use std::{borrow::Cow, convert::Infallible, io, iter, mem, pin::Pin, task::{Context, Poll}}; +use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use log::{error, warn}; +use std::{ + borrow::Cow, + convert::{Infallible, TryFrom as _}, + io, mem, + pin::Pin, + task::{Context, Poll}, + vec, +}; use unsigned_varint::codec::UviBytes; /// Maximum allowed size of the two handshake messages, in bytes. @@ -50,7 +57,10 @@ const MAX_HANDSHAKE_SIZE: usize = 1024; #[derive(Debug, Clone)] pub struct NotificationsIn { /// Protocol name to use when negotiating the substream. - protocol_name: Cow<'static, str>, + /// The first one is the main name, while the other ones are fall backs. + protocol_names: Vec>, + /// Maximum allowed size for a single notification. + max_notification_size: u64, } /// Upgrade that opens a substream, waits for the remote to accept by sending back a status @@ -58,9 +68,12 @@ pub struct NotificationsIn { #[derive(Debug, Clone)] pub struct NotificationsOut { /// Protocol name to use when negotiating the substream. - protocol_name: Cow<'static, str>, + /// The first one is the main name, while the other ones are fall backs. + protocol_names: Vec>, /// Message to send when we start the handshake. initial_message: Vec, + /// Maximum allowed size for a single notification. + max_notification_size: u64, } /// A substream for incoming notification messages. @@ -100,75 +113,96 @@ pub struct NotificationsOutSubstream { impl NotificationsIn { /// Builds a new potential upgrade. - pub fn new(protocol_name: impl Into>) -> Self { - NotificationsIn { - protocol_name: protocol_name.into(), - } - } - - /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &Cow<'static, str> { - &self.protocol_name + pub fn new( + main_protocol_name: impl Into>, + fallback_names: Vec>, + max_notification_size: u64, + ) -> Self { + let mut protocol_names = fallback_names; + protocol_names.insert(0, main_protocol_name.into()); + + Self { protocol_names, max_notification_size } } } impl UpgradeInfo for NotificationsIn { - type Info = Cow<'static, [u8]>; - type InfoIter = iter::Once; + type Info = StringProtocolName; + type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - let bytes: Cow<'static, [u8]> = match &self.protocol_name { - Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()), - Cow::Owned(s) => Cow::Owned(s.as_bytes().to_vec()) - }; - iter::once(bytes) + self.protocol_names + .iter() + .cloned() + .map(StringProtocolName) + .collect::>() + .into_iter() } } impl InboundUpgrade for NotificationsIn -where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +where + TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - type Output = (Vec, NotificationsInSubstream); + type Output = NotificationsInOpen; type Future = Pin> + Send>>; type Error = NotificationsHandshakeError; - fn upgrade_inbound( - self, - mut socket: TSubstream, - _: Self::Info, - ) -> Self::Future { + fn upgrade_inbound(self, mut socket: TSubstream, negotiated_name: Self::Info) -> Self::Future { Box::pin(async move { - let initial_message_len = unsigned_varint::aio::read_usize(&mut socket).await?; - if initial_message_len > MAX_HANDSHAKE_SIZE { + let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; + if handshake_len > MAX_HANDSHAKE_SIZE { return Err(NotificationsHandshakeError::TooLarge { - requested: initial_message_len, + requested: handshake_len, max: MAX_HANDSHAKE_SIZE, - }); + }) } - let mut initial_message = vec![0u8; initial_message_len]; - if !initial_message.is_empty() { - socket.read_exact(&mut initial_message).await?; + let mut handshake = vec![0u8; handshake_len]; + if !handshake.is_empty() { + socket.read_exact(&mut handshake).await?; } + let mut codec = UviBytes::default(); + codec.set_max_len(usize::try_from(self.max_notification_size).unwrap_or(usize::MAX)); + let substream = NotificationsInSubstream { - socket: Framed::new(socket, UviBytes::default()), + socket: Framed::new(socket, codec), handshake: NotificationsInSubstreamHandshake::NotSent, }; - Ok((initial_message, substream)) + Ok(NotificationsInOpen { + handshake, + negotiated_fallback: if negotiated_name.0 == self.protocol_names[0] { + None + } else { + Some(negotiated_name.0) + }, + substream, + }) }) } } +/// Yielded by the [`NotificationsIn`] after a successfuly upgrade. +pub struct NotificationsInOpen { + /// Handshake sent by the remote. + pub handshake: Vec, + /// If the negotiated name is not the "main" protocol name but a fallback, contains the + /// name of the negotiated fallback. + pub negotiated_fallback: Option>, + /// Implementation of `Stream` that allows receives messages from the substream. + pub substream: NotificationsInSubstream, +} + impl NotificationsInSubstream -where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { /// Sends the handshake in order to inform the remote that we accept the substream. pub fn send_handshake(&mut self, message: impl Into>) { if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) { error!(target: "sub-libp2p", "Tried to send handshake twice"); - return; + return } self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); @@ -176,12 +210,15 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, /// Equivalent to `Stream::poll_next`, except that it only drives the handshake and is /// guaranteed to not generate any notification. - pub fn poll_process(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + pub fn poll_process( + self: Pin<&mut Self>, + cx: &mut Context, + ) -> Poll> { let mut this = self.project(); loop { match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { - NotificationsInSubstreamHandshake::PendingSend(msg) => + NotificationsInSubstreamHandshake::PendingSend(msg) => { match Sink::poll_ready(this.socket.as_mut(), cx) { Poll::Ready(_) => { *this.handshake = NotificationsInSubstreamHandshake::Flush; @@ -193,32 +230,35 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); return Poll::Pending - } - }, - NotificationsInSubstreamHandshake::Flush => + }, + } + }, + NotificationsInSubstreamHandshake::Flush => { match Sink::poll_flush(this.socket.as_mut(), cx)? { Poll::Ready(()) => *this.handshake = NotificationsInSubstreamHandshake::Sent, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Flush; return Poll::Pending - } - }, + }, + } + }, st @ NotificationsInSubstreamHandshake::NotSent | st @ NotificationsInSubstreamHandshake::Sent | st @ NotificationsInSubstreamHandshake::ClosingInResponseToRemote | st @ NotificationsInSubstreamHandshake::BothSidesClosed => { *this.handshake = st; - return Poll::Pending; - } + return Poll::Pending + }, } } } } impl Stream for NotificationsInSubstream -where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { type Item = Result; @@ -232,7 +272,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, *this.handshake = NotificationsInSubstreamHandshake::NotSent; return Poll::Pending }, - NotificationsInSubstreamHandshake::PendingSend(msg) => + NotificationsInSubstreamHandshake::PendingSend(msg) => { match Sink::poll_ready(this.socket.as_mut(), cx) { Poll::Ready(_) => { *this.handshake = NotificationsInSubstreamHandshake::Flush; @@ -244,22 +284,25 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); return Poll::Pending - } - }, - NotificationsInSubstreamHandshake::Flush => + }, + } + }, + NotificationsInSubstreamHandshake::Flush => { match Sink::poll_flush(this.socket.as_mut(), cx)? { Poll::Ready(()) => *this.handshake = NotificationsInSubstreamHandshake::Sent, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Flush; return Poll::Pending - } - }, + }, + } + }, NotificationsInSubstreamHandshake::Sent => { match Stream::poll_next(this.socket.as_mut(), cx) { - Poll::Ready(None) => *this.handshake = - NotificationsInSubstreamHandshake::ClosingInResponseToRemote, + Poll::Ready(None) => + *this.handshake = + NotificationsInSubstreamHandshake::ClosingInResponseToRemote, Poll::Ready(Some(msg)) => { *this.handshake = NotificationsInSubstreamHandshake::Sent; return Poll::Ready(Some(msg)) @@ -276,13 +319,13 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, Poll::Ready(()) => *this.handshake = NotificationsInSubstreamHandshake::BothSidesClosed, Poll::Pending => { - *this.handshake = NotificationsInSubstreamHandshake::ClosingInResponseToRemote; + *this.handshake = + NotificationsInSubstreamHandshake::ClosingInResponseToRemote; return Poll::Pending - } + }, }, - NotificationsInSubstreamHandshake::BothSidesClosed => - return Poll::Ready(None), + NotificationsInSubstreamHandshake::BothSidesClosed => return Poll::Ready(None), } } } @@ -290,46 +333,59 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, impl NotificationsOut { /// Builds a new potential upgrade. - pub fn new(protocol_name: impl Into>, initial_message: impl Into>) -> Self { + pub fn new( + main_protocol_name: impl Into>, + fallback_names: Vec>, + initial_message: impl Into>, + max_notification_size: u64, + ) -> Self { let initial_message = initial_message.into(); if initial_message.len() > MAX_HANDSHAKE_SIZE { error!(target: "sub-libp2p", "Outbound networking handshake is above allowed protocol limit"); } - NotificationsOut { - protocol_name: protocol_name.into(), - initial_message, - } + let mut protocol_names = fallback_names; + protocol_names.insert(0, main_protocol_name.into()); + + Self { protocol_names, initial_message, max_notification_size } + } +} + +/// Implementation of the `ProtocolName` trait, where the protocol name is a string. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StringProtocolName(Cow<'static, str>); + +impl upgrade::ProtocolName for StringProtocolName { + fn protocol_name(&self) -> &[u8] { + self.0.as_bytes() } } impl UpgradeInfo for NotificationsOut { - type Info = Cow<'static, [u8]>; - type InfoIter = iter::Once; + type Info = StringProtocolName; + type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - let bytes: Cow<'static, [u8]> = match &self.protocol_name { - Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()), - Cow::Owned(s) => Cow::Owned(s.as_bytes().to_vec()) - }; - iter::once(bytes) + self.protocol_names + .iter() + .cloned() + .map(StringProtocolName) + .collect::>() + .into_iter() } } impl OutboundUpgrade for NotificationsOut -where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +where + TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - type Output = (Vec, NotificationsOutSubstream); + type Output = NotificationsOutOpen; type Future = Pin> + Send>>; type Error = NotificationsHandshakeError; - fn upgrade_outbound( - self, - mut socket: TSubstream, - _: Self::Info, - ) -> Self::Future { + fn upgrade_outbound(self, mut socket: TSubstream, negotiated_name: Self::Info) -> Self::Future { Box::pin(async move { - upgrade::write_with_len_prefix(&mut socket, &self.initial_message).await?; + upgrade::write_length_prefixed(&mut socket, &self.initial_message).await?; // Reading handshake. let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; @@ -337,7 +393,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, return Err(NotificationsHandshakeError::TooLarge { requested: handshake_len, max: MAX_HANDSHAKE_SIZE, - }); + }) } let mut handshake = vec![0u8; handshake_len]; @@ -345,22 +401,42 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, socket.read_exact(&mut handshake).await?; } - Ok((handshake, NotificationsOutSubstream { - socket: Framed::new(socket, UviBytes::default()), - })) + let mut codec = UviBytes::default(); + codec.set_max_len(usize::try_from(self.max_notification_size).unwrap_or(usize::MAX)); + + Ok(NotificationsOutOpen { + handshake, + negotiated_fallback: if negotiated_name.0 == self.protocol_names[0] { + None + } else { + Some(negotiated_name.0) + }, + substream: NotificationsOutSubstream { socket: Framed::new(socket, codec) }, + }) }) } } +/// Yielded by the [`NotificationsOut`] after a successfuly upgrade. +pub struct NotificationsOutOpen { + /// Handshake returned by the remote. + pub handshake: Vec, + /// If the negotiated name is not the "main" protocol name but a fallback, contains the + /// name of the negotiated fallback. + pub negotiated_fallback: Option>, + /// Implementation of `Sink` that allows sending messages on the substream. + pub substream: NotificationsOutSubstream, +} + impl Sink> for NotificationsOutSubstream - where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { type Error = NotificationsOutError; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = self.project(); - Sink::poll_ready(this.socket.as_mut(), cx) - .map_err(NotificationsOutError::Io) + Sink::poll_ready(this.socket.as_mut(), cx).map_err(NotificationsOutError::Io) } fn start_send(self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { @@ -371,14 +447,12 @@ impl Sink> for NotificationsOutSubstream fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = self.project(); - Sink::poll_flush(this.socket.as_mut(), cx) - .map_err(NotificationsOutError::Io) + Sink::poll_flush(this.socket.as_mut(), cx).map_err(NotificationsOutError::Io) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = self.project(); - Sink::poll_close(this.socket.as_mut(), cx) - .map_err(NotificationsOutError::Io) + Sink::poll_close(this.socket.as_mut(), cx).map_err(NotificationsOutError::Io) } } @@ -404,12 +478,12 @@ pub enum NotificationsHandshakeError { impl From for NotificationsHandshakeError { fn from(err: unsigned_varint::io::ReadError) -> Self { match err { - unsigned_varint::io::ReadError::Io(err) => NotificationsHandshakeError::Io(err), - unsigned_varint::io::ReadError::Decode(err) => NotificationsHandshakeError::VarintDecode(err), + unsigned_varint::io::ReadError::Io(err) => Self::Io(err), + unsigned_varint::io::ReadError::Decode(err) => Self::VarintDecode(err), _ => { - log::warn!("Unrecognized varint decoding error"); - NotificationsHandshakeError::Io(From::from(io::ErrorKind::InvalidData)) - } + warn!("Unrecognized varint decoding error"); + Self::Io(From::from(io::ErrorKind::InvalidData)) + }, } } } @@ -423,10 +497,10 @@ pub enum NotificationsOutError { #[cfg(test)] mod tests { - use super::{NotificationsIn, NotificationsOut}; + use super::{NotificationsIn, NotificationsInOpen, NotificationsOut, NotificationsOutOpen}; use async_std::net::{TcpListener, TcpStream}; - use futures::{prelude::*, channel::oneshot}; + use futures::{channel::oneshot, prelude::*}; use libp2p::core::upgrade; use std::borrow::Cow; @@ -437,11 +511,13 @@ mod tests { let client = async_std::task::spawn(async move { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let (handshake, mut substream) = upgrade::apply_outbound( + let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( socket, - NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), - upgrade::Version::V1 - ).await.unwrap(); + NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), + upgrade::Version::V1, + ) + .await + .unwrap(); assert_eq!(handshake, b"hello world"); substream.send(b"test message".to_vec()).await.unwrap(); @@ -452,12 +528,14 @@ mod tests { listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); let (socket, _) = listener.accept().await.unwrap(); - let (initial_message, mut substream) = upgrade::apply_inbound( + let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await + .unwrap(); - assert_eq!(initial_message, b"initial message"); + assert_eq!(handshake, b"initial message"); substream.send_handshake(&b"hello world"[..]); let msg = substream.next().await.unwrap().unwrap(); @@ -476,11 +554,13 @@ mod tests { let client = async_std::task::spawn(async move { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let (handshake, mut substream) = upgrade::apply_outbound( + let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( socket, - NotificationsOut::new(PROTO_NAME, vec![]), - upgrade::Version::V1 - ).await.unwrap(); + NotificationsOut::new(PROTO_NAME, Vec::new(), vec![], 1024 * 1024), + upgrade::Version::V1, + ) + .await + .unwrap(); assert!(handshake.is_empty()); substream.send(Default::default()).await.unwrap(); @@ -491,12 +571,14 @@ mod tests { listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); let (socket, _) = listener.accept().await.unwrap(); - let (initial_message, mut substream) = upgrade::apply_inbound( + let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await + .unwrap(); - assert!(initial_message.is_empty()); + assert!(handshake.is_empty()); substream.send_handshake(vec![]); let msg = substream.next().await.unwrap().unwrap(); @@ -515,9 +597,10 @@ mod tests { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); let outcome = upgrade::apply_outbound( socket, - NotificationsOut::new(PROTO_NAME, &b"hello"[..]), - upgrade::Version::V1 - ).await; + NotificationsOut::new(PROTO_NAME, Vec::new(), &b"hello"[..], 1024 * 1024), + upgrade::Version::V1, + ) + .await; // Despite the protocol negotiation being successfully conducted on the listener // side, we have to receive an error here because the listener didn't send the @@ -530,12 +613,14 @@ mod tests { listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); let (socket, _) = listener.accept().await.unwrap(); - let (initial_msg, substream) = upgrade::apply_inbound( + let NotificationsInOpen { handshake, substream, .. } = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await + .unwrap(); - assert_eq!(initial_msg, b"hello"); + assert_eq!(handshake, b"hello"); // We successfully upgrade to the protocol, but then close the substream. drop(substream); @@ -554,9 +639,15 @@ mod tests { let ret = upgrade::apply_outbound( socket, // We check that an initial message that is too large gets refused. - NotificationsOut::new(PROTO_NAME, (0..32768).map(|_| 0).collect::>()), - upgrade::Version::V1 - ).await; + NotificationsOut::new( + PROTO_NAME, + Vec::new(), + (0..32768).map(|_| 0).collect::>(), + 1024 * 1024, + ), + upgrade::Version::V1, + ) + .await; assert!(ret.is_err()); }); @@ -567,8 +658,9 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let ret = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) - ).await; + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await; assert!(ret.is_err()); }); @@ -584,9 +676,10 @@ mod tests { let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); let ret = upgrade::apply_outbound( socket, - NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), - upgrade::Version::V1 - ).await; + NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), + upgrade::Version::V1, + ) + .await; assert!(ret.is_err()); }); @@ -595,11 +688,13 @@ mod tests { listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); let (socket, _) = listener.accept().await.unwrap(); - let (initial_message, mut substream) = upgrade::apply_inbound( + let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); - assert_eq!(initial_message, b"initial message"); + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await + .unwrap(); + assert_eq!(handshake, b"initial message"); // We check that a handshake that is too large gets refused. substream.send_handshake((0..32768).map(|_| 0).collect::>()); diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 03714b05ace0d..07f5f76fce7f2 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -1,18 +1,20 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// -// Substrate is free software: you can redistribute it and/or modify + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, + +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -// + // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Contains the state of the chain synchronization process //! @@ -25,41 +27,50 @@ //! The `ChainSync` struct maintains the state of the block requests. Whenever something happens on //! the network, or whenever a block has been successfully verified, call the appropriate method in //! order to update it. -//! -use codec::Encode; -use blocks::BlockCollection; -use sp_blockchain::{Error as ClientError, Info as BlockchainInfo, HeaderMetadata}; -use sp_consensus::{BlockOrigin, BlockStatus, - block_validation::{BlockAnnounceValidator, Validation}, - import_queue::{IncomingBlock, BlockImportResult, BlockImportError} -}; use crate::{ - config::BoxFinalityProofRequestBuilder, - protocol::message::{self, generic::FinalityProofRequest, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, - FinalityProofResponse, Roles}, + protocol::message::{self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse}, + schema::v1::{StateRequest, StateResponse}, }; +use blocks::BlockCollection; +use codec::Encode; use either::Either; use extra_requests::ExtraRequests; +use futures::{stream::FuturesUnordered, task::Poll, Future, FutureExt, StreamExt}; use libp2p::PeerId; -use log::{debug, trace, warn, info, error}; +use log::{debug, error, info, trace, warn}; +use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; +use sp_arithmetic::traits::Saturating; +use sp_blockchain::{Error as ClientError, HeaderMetadata}; +use sp_consensus::{ + block_validation::{BlockAnnounceValidator, Validation}, + BlockOrigin, BlockStatus, +}; use sp_runtime::{ - Justification, generic::BlockId, - traits::{Block as BlockT, Header, NumberFor, Zero, One, CheckedSub, SaturatedConversion, Hash, HashFor} + traits::{ + Block as BlockT, CheckedSub, Hash, HashFor, Header as HeaderT, NumberFor, One, + SaturatedConversion, Zero, + }, + EncodedJustification, Justifications, }; -use sp_arithmetic::traits::Saturating; +use state::StateSync; use std::{ - fmt, ops::Range, collections::{HashMap, hash_map::Entry, HashSet, VecDeque}, - sync::Arc, pin::Pin, + collections::{hash_map::Entry, HashMap, HashSet}, + fmt, + ops::Range, + pin::Pin, + sync::Arc, }; -use futures::{task::Poll, Future, stream::FuturesUnordered, FutureExt, StreamExt}; +use warp::{WarpProofRequest, WarpSync, WarpSyncProvider}; mod blocks; mod extra_requests; +mod state; +mod warp; /// Maximum blocks to request in a single packet. -const MAX_BLOCKS_TO_REQUEST: usize = 128; +const MAX_BLOCKS_TO_REQUEST: usize = 64; /// Maximum blocks to store in the import queue. const MAX_IMPORTING_BLOCKS: usize = 2048; @@ -67,6 +78,10 @@ const MAX_IMPORTING_BLOCKS: usize = 2048; /// Maximum blocks to download ahead of any gap. const MAX_DOWNLOAD_AHEAD: u32 = 2048; +/// Maximum blocks to look backwards. The gap is the difference between the highest block and the +/// common block of a node. +const MAX_BLOCKS_TO_LOOK_BACKWARDS: u32 = MAX_DOWNLOAD_AHEAD / 2; + /// Maximum number of concurrent block announce validations. /// /// If the queue reaches the maximum, we drop any new block @@ -78,6 +93,9 @@ const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS: usize = 256; /// See [`MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS`] for more information. const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER: usize = 4; +/// Pick the state to sync as the latest finalized number minus this. +const STATE_SYNC_FINALITY_THRESHOLD: u32 = 8; + /// We use a heuristic that with a high likelihood, by the time /// `MAJOR_SYNC_BLOCKS` have been imported we'll be on the same /// chain as (or at least closer to) the peer so we want to delay @@ -85,8 +103,8 @@ const MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS_PER_PEER: usize = 4; /// so far behind. const MAJOR_SYNC_BLOCKS: u8 = 5; -/// Number of recently announced blocks to track for each peer. -const ANNOUNCE_HISTORY_SIZE: usize = 64; +/// Number of peers that need to be connected before warp sync is started. +const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; mod rep { use sc_peerset::ReputationChange as Rep; @@ -96,7 +114,7 @@ mod rep { /// Reputation change when a peer sent us a status message with a different /// genesis than us. - pub const GENESIS_MISMATCH: Rep = Rep::new(i32::min_value(), "Genesis mismatch"); + pub const GENESIS_MISMATCH: Rep = Rep::new(i32::MIN, "Genesis mismatch"); /// Reputation change for peers which send us a block with an incomplete header. pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); @@ -110,17 +128,17 @@ mod rep { /// Peer did not provide us with advertised block data. pub const NO_BLOCK: Rep = Rep::new(-(1 << 29), "No requested block data"); - /// Reputation change for peers which send us a known block. - pub const KNOWN_BLOCK: Rep = Rep::new(-(1 << 29), "Duplicate block"); + /// Reputation change for peers which send us non-requested block data. + pub const NOT_REQUESTED: Rep = Rep::new(-(1 << 29), "Not requested block data"); /// Reputation change for peers which send us a block with bad justifications. pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); - /// Reputation change for peers which send us a block with bad finality proof. - pub const BAD_FINALITY_PROOF: Rep = Rep::new(-(1 << 16), "Bad finality proof"); - /// Reputation change when a peer sent us invlid ancestry result. - pub const UNKNOWN_ANCESTOR:Rep = Rep::new(-(1 << 16), "DB Error"); + pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); + + /// Peer response data does not have requested bits. + pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); } enum PendingRequests { @@ -130,40 +148,37 @@ enum PendingRequests { impl PendingRequests { fn add(&mut self, id: &PeerId) { - match self { - PendingRequests::Some(set) => { - set.insert(id.clone()); - } - PendingRequests::All => {}, + if let Self::Some(ref mut set) = self { + set.insert(*id); } } - fn take(&mut self) -> PendingRequests { + fn take(&mut self) -> Self { std::mem::take(self) } fn set_all(&mut self) { - *self = PendingRequests::All; + *self = Self::All; } fn contains(&self, id: &PeerId) -> bool { match self { - PendingRequests::Some(set) => set.contains(id), - PendingRequests::All => true, + Self::Some(set) => set.contains(id), + Self::All => true, } } fn is_empty(&self) -> bool { match self { - PendingRequests::Some(set) => set.is_empty(), - PendingRequests::All => false, + Self::Some(set) => set.is_empty(), + Self::All => false, } } } impl Default for PendingRequests { fn default() -> Self { - PendingRequests::Some(HashSet::default()) + Self::Some(HashSet::default()) } } @@ -180,23 +195,13 @@ pub struct ChainSync { best_queued_number: NumberFor, /// The best block hash in our queue of blocks to import best_queued_hash: B::Hash, - /// The role of this node, e.g. light or full - role: Roles, - /// What block attributes we require for this node, usually derived from - /// what role we are, but could be customized - required_block_attributes: message::BlockAttributes, - /// Any extra finality proof requests. - extra_finality_proofs: ExtraRequests, + /// Current mode (full/light) + mode: SyncMode, /// Any extra justification requests. extra_justifications: ExtraRequests, /// A set of hashes of blocks that are being downloaded or have been /// downloaded and are queued for import. queue_blocks: HashSet, - /// The best block number that was successfully imported into the chain. - /// This can not decrease. - best_imported_number: NumberFor, - /// Finality proof handler. - request_builder: Option>, /// Fork sync targets. fork_targets: HashMap>, /// A set of peers for which there might be potential block requests @@ -208,16 +213,26 @@ pub struct ChainSync { /// Total number of downloaded blocks. downloaded_blocks: usize, /// All block announcement that are currently being validated. - block_announce_validation: FuturesUnordered< - Pin> + Send>> - >, + block_announce_validation: + FuturesUnordered> + Send>>>, /// Stats per peer about the number of concurrent block announce validations. block_announce_validation_per_peer_stats: HashMap, + /// State sync in progress, if any. + state_sync: Option>, + /// Warp sync in progress, if any. + warp_sync: Option>, + /// Warp sync provider. + warp_sync_provider: Option>>, + /// Enable importing existing blocks. This is used used after the state download to + /// catch up to the latest state while re-importing blocks. + import_existing: bool, } /// All the data we have about a Peer that we are trying to sync with #[derive(Debug, Clone)] pub struct PeerSync { + /// Peer id of this peer. + pub peer_id: PeerId, /// The common number is the block number that is a common point of /// ancestry for both our chains (as far as we know). pub common_number: NumberFor, @@ -228,9 +243,22 @@ pub struct PeerSync { /// The state of syncing this peer is in for us, generally categories /// into `Available` or "busy" with something as defined by `PeerSyncState`. pub state: PeerSyncState, - /// A queue of blocks that this peer has announced to us, should only - /// contain `ANNOUNCE_HISTORY_SIZE` entries. - pub recently_announced: VecDeque +} + +impl PeerSync { + /// Update the `common_number` iff `new_common > common_number`. + fn update_common_number(&mut self, new_common: NumberFor) { + if self.common_number < new_common { + trace!( + target: "sync", + "Updating peer {} common number from={} => to={}.", + self.peer_id, + self.common_number, + new_common, + ); + self.common_number = new_common; + } + } } /// The sync status of a peer we are trying to sync with @@ -239,7 +267,7 @@ pub struct PeerInfo { /// Their best block hash. pub best_hash: B::Hash, /// Their best block number. - pub best_number: NumberFor + pub best_number: NumberFor, } struct ForkTarget { @@ -257,11 +285,7 @@ pub enum PeerSyncState { /// Available for sync requests. Available, /// Searching for ancestors the Peer has in common with us. - AncestorSearch { - start: NumberFor, - current: NumberFor, - state: AncestorSearchState, - }, + AncestorSearch { start: NumberFor, current: NumberFor, state: AncestorSearchState }, /// Actively downloading new blocks, starting from the given Number. DownloadingNew(NumberFor), /// Downloading a stale block with given Hash. Stale means that it is a @@ -270,17 +294,15 @@ pub enum PeerSyncState { DownloadingStale(B::Hash), /// Downloading justification for given block hash. DownloadingJustification(B::Hash), - /// Downloading finality proof for given block hash. - DownloadingFinalityProof(B::Hash) + /// Downloading state. + DownloadingState, + /// Downloading warp proof. + DownloadingWarpProof, } impl PeerSyncState { pub fn is_available(&self) -> bool { - if let PeerSyncState::Available = self { - true - } else { - false - } + matches!(self, Self::Available) } } @@ -290,7 +312,49 @@ pub enum SyncState { /// Initial sync is complete, keep-up sync is active. Idle, /// Actively catching up with the chain. - Downloading + Downloading, +} + +/// Reported state download progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct StateDownloadProgress { + /// Estimated download percentage. + pub percentage: u32, + /// Total state size in bytes downloaded so far. + pub size: u64, +} + +/// Reported warp sync phase. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum WarpSyncPhase { + /// Waiting for peers to connect. + AwaitingPeers, + /// Downloading and verifying grandpa warp proofs. + DownloadingWarpProofs, + /// Downloading state data. + DownloadingState, + /// Importing state. + ImportingState, +} + +impl fmt::Display for WarpSyncPhase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::AwaitingPeers => write!(f, "Waiting for peers"), + Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), + Self::DownloadingState => write!(f, "Downloading state"), + Self::ImportingState => write!(f, "Importing state"), + } + } +} + +/// Reported warp sync progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct WarpSyncProgress { + /// Estimated download percentage. + pub phase: WarpSyncPhase, + /// Total bytes downloaded so far. + pub total_bytes: u64, } /// Syncing status and statistics. @@ -304,6 +368,10 @@ pub struct Status { pub num_peers: u32, /// Number of blocks queued for import pub queued_blocks: u32, + /// State sync status in progress, if any. + pub state_sync: Option, + /// Warp sync in progress, if any. + pub warp_sync: Option, } /// A peer did not behave as expected and should be reported. @@ -324,7 +392,37 @@ pub enum OnBlockData { /// The block should be imported. Import(BlockOrigin, Vec>), /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest) + Request(PeerId, BlockRequest), +} + +impl OnBlockData { + /// Returns `self` as request. + #[cfg(test)] + fn into_request(self) -> Option<(PeerId, BlockRequest)> { + if let Self::Request(peer, req) = self { + Some((peer, req)) + } else { + None + } + } +} + +/// Result of [`ChainSync::on_state_data`]. +#[derive(Debug)] +pub enum OnStateData { + /// The block and state that should be imported. + Import(BlockOrigin, IncomingBlock), + /// A new state request needs to be made to the given peer. + Request(PeerId, StateRequest), +} + +/// Result of [`ChainSync::on_warp_sync_data`]. +#[derive(Debug)] +pub enum OnWarpSyncData { + /// Warp proof request is issued. + WarpProofRequest(PeerId, warp::WarpProofRequest), + /// A new state request needs to be made to the given peer. + StateRequest(PeerId, StateRequest), } /// Result of [`ChainSync::poll_block_announce_validation`]. @@ -336,6 +434,8 @@ pub enum PollBlockAnnounceValidation { Failure { /// Who sent the processed block announcement? who: PeerId, + /// Should the peer be disconnected? + disconnect: bool, }, /// The announcement does not require further handling. Nothing { @@ -343,8 +443,8 @@ pub enum PollBlockAnnounceValidation { who: PeerId, /// Was this their new best block? is_best: bool, - /// The header of the announcement. - header: H, + /// The announcement. + announce: BlockAnnounce, }, /// The announcement header should be imported. ImportHeader { @@ -352,9 +452,11 @@ pub enum PollBlockAnnounceValidation { who: PeerId, /// Was this their new best block? is_best: bool, - /// The header of the announcement. - header: H, + /// The announcement. + announce: BlockAnnounce, }, + /// The block announcement should be skipped. + Skip, } /// Result of [`ChainSync::block_announce_validation`]. @@ -366,15 +468,8 @@ enum PreValidateBlockAnnounce { Failure { /// Who sent the processed block announcement? who: PeerId, - }, - /// The announcement does not require further handling. - Nothing { - /// Who sent the processed block announcement? - who: PeerId, - /// Was this their new best block? - is_best: bool, - /// The announcement. - announce: BlockAnnounce, + /// Should the peer be disconnected? + disconnect: bool, }, /// The pre-validation was sucessful and the announcement should be /// further processed. @@ -386,6 +481,17 @@ enum PreValidateBlockAnnounce { /// The announcement. announce: BlockAnnounce, }, + /// The announcement validation returned an error. + /// + /// An error means that *this* node failed to validate it because some internal error happened. + /// If the block announcement was invalid, [`Self::Failure`] is the correct variant to express + /// this. + Error { who: PeerId }, + /// The block announcement should be skipped. + /// + /// This should *only* be returned when there wasn't a slot registered + /// for this block announcement validation. + Skip, } /// Result of [`ChainSync::on_block_justification`]. @@ -394,26 +500,20 @@ pub enum OnBlockJustification { /// The justification needs no further handling. Nothing, /// The justification should be imported. - Import { - peer: PeerId, - hash: B::Hash, - number: NumberFor, - justification: Justification - } + Import { peer: PeerId, hash: B::Hash, number: NumberFor, justifications: Justifications }, } -/// Result of [`ChainSync::on_block_finality_proof`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum OnBlockFinalityProof { - /// The proof needs no further handling. - Nothing, - /// The proof should be imported. - Import { - peer: PeerId, - hash: B::Hash, - number: NumberFor, - proof: Vec - } +/// Operation mode. +#[derive(Debug, PartialEq, Eq)] +pub enum SyncMode { + // Sync headers only + Light, + // Sync headers and block bodies + Full, + // Sync headers and the last finalied state + LightState { storage_chain_mode: bool, skip_proofs: bool }, + // Warp sync mode. + Warp, } /// Result of [`ChainSync::has_slot_for_block_announce_validation`]. @@ -429,32 +529,21 @@ enum HasSlotForBlockAnnounceValidation { impl ChainSync { /// Create a new instance. pub fn new( - role: Roles, + mode: SyncMode, client: Arc>, - info: &BlockchainInfo, - request_builder: Option>, block_announce_validator: Box + Send>, max_parallel_downloads: u32, - ) -> Self { - let mut required_block_attributes = BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION; - - if role.is_full() { - required_block_attributes |= BlockAttributes::BODY - } - - ChainSync { + warp_sync_provider: Option>>, + ) -> Result { + let mut sync = Self { client, peers: HashMap::new(), blocks: BlockCollection::new(), - best_queued_hash: info.best_hash, - best_queued_number: info.best_number, - best_imported_number: info.best_number, - extra_finality_proofs: ExtraRequests::new("finality proof"), + best_queued_hash: Default::default(), + best_queued_number: Zero::zero(), extra_justifications: ExtraRequests::new("justification"), - role, - required_block_attributes, + mode, queue_blocks: Default::default(), - request_builder, fork_targets: Default::default(), pending_requests: Default::default(), block_announce_validator, @@ -462,6 +551,35 @@ impl ChainSync { downloaded_blocks: 0, block_announce_validation: Default::default(), block_announce_validation_per_peer_stats: Default::default(), + state_sync: None, + warp_sync: None, + warp_sync_provider, + import_existing: false, + }; + sync.reset_sync_start_point()?; + Ok(sync) + } + + fn required_block_attributes(&self) -> BlockAttributes { + match self.mode { + SyncMode::Full => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, + SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::LightState { storage_chain_mode: true, .. } => + BlockAttributes::HEADER | + BlockAttributes::JUSTIFICATION | + BlockAttributes::INDEXED_BODY, + } + } + + fn skip_execution(&self) -> bool { + match self.mode { + SyncMode::Full => false, + SyncMode::Light => true, + SyncMode::LightState { .. } => true, + SyncMode::Warp => true, } } @@ -469,36 +587,51 @@ impl ChainSync { /// /// Returns `None` if the peer is unknown. pub fn peer_info(&self, who: &PeerId) -> Option> { - self.peers.get(who).map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) + self.peers + .get(who) + .map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) } /// Returns the current sync status. pub fn status(&self) -> Status { let best_seen = self.peers.values().map(|p| p.best_number).max(); - let sync_state = - if let Some(n) = best_seen { - // A chain is classified as downloading if the provided best block is - // more than `MAJOR_SYNC_BLOCKS` behind the best queued block. - if n > self.best_queued_number && n - self.best_queued_number > MAJOR_SYNC_BLOCKS.into() { - SyncState::Downloading - } else { - SyncState::Idle - } + let sync_state = if let Some(n) = best_seen { + // A chain is classified as downloading if the provided best block is + // more than `MAJOR_SYNC_BLOCKS` behind the best queued block. + if n > self.best_queued_number && n - self.best_queued_number > MAJOR_SYNC_BLOCKS.into() + { + SyncState::Downloading } else { SyncState::Idle - }; + } + } else { + SyncState::Idle + }; + + let warp_sync_progress = match (&self.warp_sync, &self.mode) { + (None, SyncMode::Warp) => + Some(WarpSyncProgress { phase: WarpSyncPhase::AwaitingPeers, total_bytes: 0 }), + (Some(sync), _) => Some(sync.progress()), + _ => None, + }; Status { state: sync_state, best_seen_block: best_seen, num_peers: self.peers.len() as u32, queued_blocks: self.queue_blocks.len() as u32, + state_sync: self.state_sync.as_ref().map(|s| s.progress()), + warp_sync: warp_sync_progress, } } - /// Number of active sync requests. + /// Number of active forks requests. This includes + /// requests that are pending or could be issued right away. pub fn num_sync_requests(&self) -> usize { - self.fork_targets.len() + self.fork_targets + .values() + .filter(|f| f.number <= self.best_queued_number) + .count() } /// Number of downloaded blocks. @@ -509,23 +642,26 @@ impl ChainSync { /// Handle a new connected peer. /// /// Call this method whenever we connect to a new peer. - pub fn new_peer(&mut self, who: PeerId, best_hash: B::Hash, best_number: NumberFor) - -> Result>, BadPeer> - { + pub fn new_peer( + &mut self, + who: PeerId, + best_hash: B::Hash, + best_number: NumberFor, + ) -> Result>, BadPeer> { // There is nothing sync can get from the node that has no blockchain data. match self.block_status(&best_hash) { Err(e) => { debug!(target:"sync", "Error reading blockchain: {:?}", e); Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) - } + }, Ok(BlockStatus::KnownBad) => { info!("💔 New peer with known bad best block {} ({}).", best_hash, best_number); Err(BadPeer(who, rep::BAD_BLOCK)) - } + }, Ok(BlockStatus::Unknown) => { if best_number.is_zero() { info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); - return Err(BadPeer(who, rep::GENESIS_MISMATCH)); + return Err(BadPeer(who, rep::GENESIS_MISMATCH)) } // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have // enough to do in the import queue that it's not worth kicking off @@ -537,65 +673,96 @@ impl ChainSync { self.best_queued_hash, self.best_queued_number ); - self.peers.insert(who, PeerSync { - common_number: self.best_queued_number, - best_hash, - best_number, - state: PeerSyncState::Available, - recently_announced: Default::default() - }); + self.peers.insert( + who.clone(), + PeerSync { + peer_id: who, + common_number: self.best_queued_number, + best_hash, + best_number, + state: PeerSyncState::Available, + }, + ); return Ok(None) } + if let SyncMode::Warp = &self.mode { + if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none() + { + log::debug!(target: "sync", "Starting warp state sync."); + if let Some(provider) = &self.warp_sync_provider { + self.warp_sync = + Some(WarpSync::new(self.client.clone(), provider.clone())); + } + } + } + // If we are at genesis, just start downloading. - if self.best_queued_number.is_zero() { - debug!(target:"sync", "New peer with best hash {} ({}).", best_hash, best_number); - self.peers.insert(who.clone(), PeerSync { - common_number: Zero::zero(), + let (state, req) = if self.best_queued_number.is_zero() { + debug!( + target:"sync", + "New peer with best hash {} ({}).", best_hash, best_number, - state: PeerSyncState::Available, - recently_announced: Default::default(), - }); - self.pending_requests.add(&who); - return Ok(None) - } + ); - let common_best = std::cmp::min(self.best_queued_number, best_number); + (PeerSyncState::Available, None) + } else { + let common_best = std::cmp::min(self.best_queued_number, best_number); - debug!(target:"sync", - "New peer with unknown best hash {} ({}), searching for common ancestor.", - best_hash, - best_number - ); + debug!( + target:"sync", + "New peer with unknown best hash {} ({}), searching for common ancestor.", + best_hash, + best_number + ); + + ( + PeerSyncState::AncestorSearch { + current: common_best, + start: self.best_queued_number, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }, + Some(ancestry_request::(common_best)), + ) + }; self.pending_requests.add(&who); - self.peers.insert(who, PeerSync { - common_number: Zero::zero(), - best_hash, - best_number, - state: PeerSyncState::AncestorSearch { - current: common_best, - start: self.best_queued_number, - state: AncestorSearchState::ExponentialBackoff(One::one()), + self.peers.insert( + who, + PeerSync { + peer_id: who, + common_number: Zero::zero(), + best_hash, + best_number, + state, }, - recently_announced: Default::default() - }); + ); - Ok(Some(ancestry_request::(common_best))) - } - Ok(BlockStatus::Queued) | Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) => { - debug!(target:"sync", "New peer with known best hash {} ({}).", best_hash, best_number); - self.peers.insert(who.clone(), PeerSync { - common_number: best_number, + Ok(req) + }, + Ok(BlockStatus::Queued) | + Ok(BlockStatus::InChainWithState) | + Ok(BlockStatus::InChainPruned) => { + debug!( + target: "sync", + "New peer with known best hash {} ({}).", best_hash, best_number, - state: PeerSyncState::Available, - recently_announced: Default::default(), - }); + ); + self.peers.insert( + who, + PeerSync { + peer_id: who, + common_number: std::cmp::min(self.best_queued_number, best_number), + best_hash, + best_number, + state: PeerSyncState::Available, + }, + ); self.pending_requests.add(&who); Ok(None) - } + }, } } @@ -608,17 +775,13 @@ impl ChainSync { /// Schedule a justification request for the given block. pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { let client = &self.client; - self.extra_justifications.schedule((*hash, number), |base, block| { - is_descendent_of(&**client, base, block) - }) + self.extra_justifications + .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) } - /// Schedule a finality proof request for the given block. - pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - self.extra_finality_proofs.schedule((*hash, number), |base, block| { - is_descendent_of(&**client, base, block) - }) + /// Clear all pending justification requests. + pub fn clear_justification_requests(&mut self) { + self.extra_justifications.reset(); } /// Request syncing for the given block from given set of peers. @@ -630,7 +793,9 @@ impl ChainSync { number: NumberFor, ) { if peers.is_empty() { - peers = self.peers.iter() + peers = self + .peers + .iter() // Only request blocks from peers who are ahead or on a par. .filter(|(_, peer)| peer.best_number >= number) .map(|(id, _)| id.clone()) @@ -648,14 +813,14 @@ impl ChainSync { if self.is_known(&hash) { debug!(target: "sync", "Refusing to sync known hash {:?}", hash); - return; + return } trace!(target: "sync", "Downloading requested old fork {:?}", hash); for peer_id in &peers { if let Some(peer) = self.peers.get_mut(peer_id) { - if let PeerSyncState::AncestorSearch {..} = peer.state { - continue; + if let PeerSyncState::AncestorSearch { .. } = peer.state { + continue } if number > peer.best_number { @@ -667,23 +832,25 @@ impl ChainSync { } self.fork_targets - .entry(hash.clone()) - .or_insert_with(|| ForkTarget { - number, - peers: Default::default(), - parent_hash: None, - }) - .peers.extend(peers); + .entry(*hash) + .or_insert_with(|| ForkTarget { number, peers: Default::default(), parent_hash: None }) + .peers + .extend(peers); } /// Get an iterator over all scheduled justification requests. - pub fn justification_requests(&mut self) -> impl Iterator)> + '_ { + pub fn justification_requests( + &mut self, + ) -> impl Iterator)> + '_ { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); std::iter::from_fn(move || { if let Some((peer, request)) = matcher.next(&peers) { - peers.get_mut(&peer) - .expect("`Matcher::next` guarantees the `PeerId` comes from the given peers; qed") + peers + .get_mut(&peer) + .expect( + "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", + ) .state = PeerSyncState::DownloadingJustification(request.0); let req = message::generic::BlockRequest { id: 0, @@ -691,31 +858,7 @@ impl ChainSync { from: message::FromBlock::Hash(request.0), to: None, direction: message::Direction::Ascending, - max: Some(1) - }; - Some((peer, req)) - } else { - None - } - }) - } - - /// Get an iterator over all scheduled finality proof requests. - pub fn finality_proof_requests(&mut self) -> impl Iterator)> + '_ { - let peers = &mut self.peers; - let request_builder = &mut self.request_builder; - let mut matcher = self.extra_finality_proofs.matcher(); - std::iter::from_fn(move || { - if let Some((peer, request)) = matcher.next(&peers) { - peers.get_mut(&peer) - .expect("`Matcher::next` guarantees the `PeerId` comes from the given peers; qed") - .state = PeerSyncState::DownloadingFinalityProof(request.0); - let req = message::generic::FinalityProofRequest { - id: 0, - block: request.0, - request: request_builder.as_mut() - .map(|builder| builder.build_request_data(&request.0)) - .unwrap_or_default() + max: Some(1), }; Some((peer, req)) } else { @@ -726,7 +869,8 @@ impl ChainSync { /// Get an iterator over all block requests of all peers. pub fn block_requests(&mut self) -> impl Iterator)> + '_ { - if self.pending_requests.is_empty() { + if self.pending_requests.is_empty() || self.state_sync.is_some() || self.warp_sync.is_some() + { return Either::Left(std::iter::empty()) } if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { @@ -734,10 +878,11 @@ impl ChainSync { return Either::Left(std::iter::empty()) } let major_sync = self.status().state == SyncState::Downloading; + let attrs = self.required_block_attributes(); let blocks = &mut self.blocks; - let attrs = &self.required_block_attributes; let fork_targets = &mut self.fork_targets; - let last_finalized = self.client.info().finalized_number; + let last_finalized = + std::cmp::min(self.best_queued_number, self.client.info().finalized_number); let best_queued = self.best_queued_number; let client = &self.client; let queue = &self.queue_blocks; @@ -748,7 +893,31 @@ impl ChainSync { return None } - if let Some((range, req)) = peer_block_request( + // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from the + // common number, the peer best number is higher than our best queued and the common + // number is smaller than the last finalized block number, we should do an ancestor + // search to find a better common block. If the queue is full we wait till all blocks + // are imported though. + if best_queued.saturating_sub(peer.common_number) > MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && + best_queued < peer.best_number && + peer.common_number < last_finalized && + queue.len() <= MAJOR_SYNC_BLOCKS.into() + { + trace!( + target: "sync", + "Peer {:?} common block {} too far behind of our best {}. Starting ancestry search.", + id, + peer.common_number, + best_queued, + ); + let current = std::cmp::min(peer.best_number, best_queued); + peer.state = PeerSyncState::AncestorSearch { + current, + start: best_queued, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }; + Some((id, ancestry_request::(current))) + } else if let Some((range, req)) = peer_block_request( id, peer, blocks, @@ -767,18 +936,14 @@ impl ChainSync { req, ); Some((id, req)) - } else if let Some((hash, req)) = fork_sync_request( - id, - fork_targets, - best_queued, - last_finalized, - attrs, - |hash| if queue.contains(hash) { - BlockStatus::Queued - } else { - client.block_status(&BlockId::Hash(*hash)).unwrap_or(BlockStatus::Unknown) - }, - ) { + } else if let Some((hash, req)) = + fork_sync_request(id, fork_targets, best_queued, last_finalized, attrs, |hash| { + if queue.contains(hash) { + BlockStatus::Queued + } else { + client.block_status(&BlockId::Hash(*hash)).unwrap_or(BlockStatus::Unknown) + } + }) { trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); peer.state = PeerSyncState::DownloadingStale(hash); Some((id, req)) @@ -789,6 +954,77 @@ impl ChainSync { Either::Right(iter) } + /// Get a state request, if any. + pub fn state_request(&mut self) -> Option<(PeerId, StateRequest)> { + if self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) { + // Only one pending state request is allowed. + return None + } + if let Some(sync) = &self.state_sync { + if sync.is_complete() { + return None + } + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.common_number >= sync.target_block_num() { + trace!(target: "sync", "New StateRequest for {}", id); + peer.state = PeerSyncState::DownloadingState; + let request = sync.next_request(); + return Some((*id, request)) + } + } + } + if let Some(sync) = &self.warp_sync { + if sync.is_complete() { + return None + } + if let (Some(request), Some(target)) = + (sync.next_state_request(), sync.target_block_number()) + { + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= target { + trace!(target: "sync", "New StateRequest for {}", id); + peer.state = PeerSyncState::DownloadingState; + return Some((*id, request)) + } + } + } + } + None + } + + /// Get a warp sync request, if any. + pub fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { + if self + .peers + .iter() + .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpProof) + { + // Only one pending state request is allowed. + return None + } + if let Some(sync) = &self.warp_sync { + if sync.is_complete() { + return None + } + if let Some(request) = sync.next_warp_poof_request() { + let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); + if !targets.is_empty() { + targets.sort(); + let median = targets[targets.len() / 2]; + // Find a random peer that is synced as much as peer majority. + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= median { + trace!(target: "sync", "New WarpProofRequest for {}", id); + peer.state = PeerSyncState::DownloadingWarpProof; + return Some((*id, request)) + } + } + } + } + } + None + } + /// Handle a response from the remote to a block request that we made. /// /// `request` must be the original request that triggered `response`. @@ -800,185 +1036,311 @@ impl ChainSync { &mut self, who: &PeerId, request: Option>, - response: BlockResponse + response: BlockResponse, ) -> Result, BadPeer> { self.downloaded_blocks += response.blocks.len(); - let mut new_blocks: Vec> = - if let Some(peer) = self.peers.get_mut(who) { - let mut blocks = response.blocks; - if request.as_ref().map_or(false, |r| r.direction == message::Direction::Descending) { - trace!(target: "sync", "Reversing incoming block list"); - blocks.reverse() - } - self.pending_requests.add(who); - if request.is_some() { - match &mut peer.state { - PeerSyncState::DownloadingNew(start_block) => { - self.blocks.clear_peer_download(who); - let start_block = *start_block; - peer.state = PeerSyncState::Available; - validate_blocks::(&blocks, who)?; + let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { + let mut blocks = response.blocks; + if request + .as_ref() + .map_or(false, |r| r.direction == message::Direction::Descending) + { + trace!(target: "sync", "Reversing incoming block list"); + blocks.reverse() + } + self.pending_requests.add(who); + if let Some(request) = request { + match &mut peer.state { + PeerSyncState::DownloadingNew(_) => { + self.blocks.clear_peer_download(who); + peer.state = PeerSyncState::Available; + if let Some(start_block) = + validate_blocks::(&blocks, who, Some(request))? + { self.blocks.insert(start_block, blocks, who.clone()); - self.blocks - .drain(self.best_queued_number + One::one()) - .into_iter() - .map(|block_data| { - IncomingBlock { - hash: block_data.block.hash, - header: block_data.block.header, - body: block_data.block.body, - justification: block_data.block.justification, - origin: block_data.origin, - allow_missing_state: true, - import_existing: false, - } - }).collect() } - PeerSyncState::DownloadingStale(_) => { - peer.state = PeerSyncState::Available; - if blocks.is_empty() { - debug!(target: "sync", "Empty block response from {}", who); - return Err(BadPeer(who.clone(), rep::NO_BLOCK)); - } - validate_blocks::(&blocks, who)?; - blocks.into_iter().map(|b| { + self.drain_blocks() + }, + PeerSyncState::DownloadingStale(_) => { + peer.state = PeerSyncState::Available; + if blocks.is_empty() { + debug!(target: "sync", "Empty block response from {}", who); + return Err(BadPeer(*who, rep::NO_BLOCK)) + } + validate_blocks::(&blocks, who, Some(request))?; + blocks + .into_iter() + .map(|b| { + let justifications = b + .justifications + .or(legacy_justification_mapping(b.justification)); IncomingBlock { hash: b.hash, header: b.header, body: b.body, - justification: b.justification, - origin: Some(who.clone()), + indexed_body: None, + justifications, + origin: Some(*who), allow_missing_state: true, - import_existing: false, + import_existing: self.import_existing, + skip_execution: self.skip_execution(), + state: None, } - }).collect() + }) + .collect() + }, + PeerSyncState::AncestorSearch { current, start, state } => { + let matching_hash = match (blocks.get(0), self.client.hash(*current)) { + (Some(block), Ok(maybe_our_block_hash)) => { + trace!( + target: "sync", + "Got ancestry block #{} ({}) from peer {}", + current, + block.hash, + who, + ); + maybe_our_block_hash.filter(|x| x == &block.hash) + }, + (None, _) => { + debug!( + target: "sync", + "Invalid response when searching for ancestor from {}", + who, + ); + return Err(BadPeer(*who, rep::UNKNOWN_ANCESTOR)) + }, + (_, Err(e)) => { + info!( + target: "sync", + "❌ Error answering legitimate blockchain query: {:?}", + e, + ); + return Err(BadPeer(*who, rep::BLOCKCHAIN_READ_ERROR)) + }, + }; + if matching_hash.is_some() { + if *start < self.best_queued_number && + self.best_queued_number <= peer.best_number + { + // We've made progress on this chain since the search was started. + // Opportunistically set common number to updated number + // instead of the one that started the search. + peer.common_number = self.best_queued_number; + } else if peer.common_number < *current { + peer.common_number = *current; + } } - PeerSyncState::AncestorSearch { current, start, state } => { - let matching_hash = match (blocks.get(0), self.client.hash(*current)) { - (Some(block), Ok(maybe_our_block_hash)) => { - trace!(target: "sync", "Got ancestry block #{} ({}) from peer {}", current, block.hash, who); - maybe_our_block_hash.filter(|x| x == &block.hash) - }, - (None, _) => { - debug!(target: "sync", "Invalid response when searching for ancestor from {}", who); - return Err(BadPeer(who.clone(), rep::UNKNOWN_ANCESTOR)) - }, - (_, Err(e)) => { - info!("❌ Error answering legitimate blockchain query: {:?}", e); - return Err(BadPeer(who.clone(), rep::BLOCKCHAIN_READ_ERROR)) - } + if matching_hash.is_none() && current.is_zero() { + trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); + return Err(BadPeer(*who, rep::GENESIS_MISMATCH)) + } + if let Some((next_state, next_num)) = + handle_ancestor_search_state(state, *current, matching_hash.is_some()) + { + peer.state = PeerSyncState::AncestorSearch { + current: next_num, + start: *start, + state: next_state, }; - if matching_hash.is_some() { - if *start < self.best_queued_number && self.best_queued_number <= peer.best_number { - // We've made progress on this chain since the search was started. - // Opportunistically set common number to updated number - // instead of the one that started the search. - peer.common_number = self.best_queued_number; - } - else if peer.common_number < *current { - peer.common_number = *current; - } - } - if matching_hash.is_none() && current.is_zero() { - trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); - return Err(BadPeer(who.clone(), rep::GENESIS_MISMATCH)) - } - if let Some((next_state, next_num)) = handle_ancestor_search_state(state, *current, matching_hash.is_some()) { - peer.state = PeerSyncState::AncestorSearch { - current: next_num, - start: *start, - state: next_state, - }; - return Ok(OnBlockData::Request(who.clone(), ancestry_request::(next_num))) - } else { - // Ancestry search is complete. Check if peer is on a stale fork unknown to us and - // add it to sync targets if necessary. - trace!(target: "sync", "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", - self.best_queued_hash, - self.best_queued_number, + return Ok(OnBlockData::Request(*who, ancestry_request::(next_num))) + } else { + // Ancestry search is complete. Check if peer is on a stale fork unknown + // to us and add it to sync targets if necessary. + trace!( + target: "sync", + "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", + self.best_queued_hash, + self.best_queued_number, + peer.best_hash, + peer.best_number, + matching_hash, + peer.common_number, + ); + if peer.common_number < peer.best_number && + peer.best_number < self.best_queued_number + { + trace!( + target: "sync", + "Added fork target {} for {}", peer.best_hash, - peer.best_number, - matching_hash, - peer.common_number, + who, ); - if peer.common_number < peer.best_number - && peer.best_number < self.best_queued_number - { - trace!(target: "sync", "Added fork target {} for {}" , peer.best_hash, who); - self.fork_targets - .entry(peer.best_hash.clone()) - .or_insert_with(|| ForkTarget { - number: peer.best_number, - parent_hash: None, - peers: Default::default(), - }) - .peers.insert(who.clone()); - } - peer.state = PeerSyncState::Available; - Vec::new() + self.fork_targets + .entry(peer.best_hash) + .or_insert_with(|| ForkTarget { + number: peer.best_number, + parent_hash: None, + peers: Default::default(), + }) + .peers + .insert(*who); } + peer.state = PeerSyncState::Available; + Vec::new() } - - | PeerSyncState::Available - | PeerSyncState::DownloadingJustification(..) - | PeerSyncState::DownloadingFinalityProof(..) => Vec::new() - } - } else { - // When request.is_none() this is a block announcement. Just accept blocks. - validate_blocks::(&blocks, who)?; - blocks.into_iter().map(|b| { + }, + PeerSyncState::Available | + PeerSyncState::DownloadingJustification(..) | + PeerSyncState::DownloadingState | + PeerSyncState::DownloadingWarpProof => Vec::new(), + } + } else { + // When request.is_none() this is a block announcement. Just accept blocks. + validate_blocks::(&blocks, who, None)?; + blocks + .into_iter() + .map(|b| { + let justifications = + b.justifications.or(legacy_justification_mapping(b.justification)); IncomingBlock { hash: b.hash, header: b.header, body: b.body, - justification: b.justification, - origin: Some(who.clone()), + indexed_body: None, + justifications, + origin: Some(*who), allow_missing_state: true, import_existing: false, + skip_execution: true, + state: None, } - }).collect() - } - } else { - Vec::new() - }; - - // When doing initial sync we don't request blocks in parallel. - // So the only way this can happen is when peers lie about the - // common block. - let is_recent = new_blocks.first() - .map(|block| { - self.peers.iter().any(|(_, peer)| peer.recently_announced.contains(&block.hash)) - }) - .unwrap_or(false); + }) + .collect() + } + } else { + // We don't know of this peer, so we also did not request anything from it. + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + }; - if !is_recent && new_blocks.last().map_or(false, |b| self.is_known(&b.hash)) { - // When doing initial sync we don't request blocks in parallel. - // So the only way this can happen is when peers lie about the - // common block. - debug!(target: "sync", "Ignoring known blocks from {}", who); - return Err(BadPeer(who.clone(), rep::KNOWN_BLOCK)); - } - let orig_len = new_blocks.len(); - new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); - if new_blocks.len() != orig_len { - debug!(target: "sync", "Ignoring {} blocks that are already queued", orig_len - new_blocks.len()); - } + Ok(self.validate_and_queue_blocks(new_blocks)) + } - let origin = - if is_recent { - BlockOrigin::NetworkBroadcast - } else { - BlockOrigin::NetworkInitialSync - }; + /// Handle a response from the remote to a state request that we made. + /// + /// Returns next request if any. + pub fn on_state_data( + &mut self, + who: &PeerId, + response: StateResponse, + ) -> Result, BadPeer> { + let import_result = if let Some(sync) = &mut self.state_sync { + debug!( + target: "sync", + "Importing state data from {} with {} keys, {} proof nodes.", + who, + response.entries.len(), + response.proof.len(), + ); + sync.import(response) + } else if let Some(sync) = &mut self.warp_sync { + debug!( + target: "sync", + "Importing state data from {} with {} keys, {} proof nodes.", + who, + response.entries.len(), + response.proof.len(), + ); + sync.import_state(response) + } else { + debug!(target: "sync", "Ignored obsolete state response from {}", who); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + }; - if let Some((h, n)) = new_blocks.last().and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) { - trace!(target:"sync", "Accepted {} blocks ({:?}) with origin {:?}", new_blocks.len(), h, origin); - self.on_block_queued(h, n) + match import_result { + state::ImportResult::Import(hash, header, state) => { + let origin = BlockOrigin::NetworkInitialSync; + let block = IncomingBlock { + hash, + header: Some(header), + body: None, + indexed_body: None, + justifications: None, + origin: None, + allow_missing_state: true, + import_existing: true, + skip_execution: self.skip_execution(), + state: Some(state), + }; + debug!(target: "sync", "State sync is complete. Import is queued"); + Ok(OnStateData::Import(origin, block)) + }, + state::ImportResult::Continue(request) => + Ok(OnStateData::Request(who.clone(), request)), + state::ImportResult::BadResponse => { + debug!(target: "sync", "Bad state data received from {}", who); + Err(BadPeer(*who, rep::BAD_BLOCK)) + }, } + } - self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); + /// Handle a response from the remote to a warp proof request that we made. + /// + /// Returns next request. + pub fn on_warp_sync_data( + &mut self, + who: &PeerId, + response: warp::EncodedProof, + ) -> Result, BadPeer> { + let import_result = if let Some(sync) = &mut self.warp_sync { + debug!( + target: "sync", + "Importing warp proof data from {}, {} bytes.", + who, + response.0.len(), + ); + sync.import_warp_proof(response) + } else { + debug!(target: "sync", "Ignored obsolete warp sync response from {}", who); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + }; + + match import_result { + warp::WarpProofImportResult::StateRequest(request) => + Ok(OnWarpSyncData::StateRequest(*who, request)), + warp::WarpProofImportResult::WarpProofRequest(request) => + Ok(OnWarpSyncData::WarpProofRequest(*who, request)), + warp::WarpProofImportResult::BadResponse => { + debug!(target: "sync", "Bad proof data received from {}", who); + Err(BadPeer(*who, rep::BAD_BLOCK)) + }, + } + } - Ok(OnBlockData::Import(origin, new_blocks)) + fn validate_and_queue_blocks( + &mut self, + mut new_blocks: Vec>, + ) -> OnBlockData { + let orig_len = new_blocks.len(); + new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); + if new_blocks.len() != orig_len { + debug!( + target: "sync", + "Ignoring {} blocks that are already queued", + orig_len - new_blocks.len(), + ); + } + + let origin = if self.status().state != SyncState::Downloading { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; + + if let Some((h, n)) = new_blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) + { + trace!( + target:"sync", + "Accepted {} blocks ({:?}) with origin {:?}", + new_blocks.len(), + h, + origin, + ); + self.on_block_queued(h, n) + } + self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); + OnBlockData::Import(origin, new_blocks) } /// Handle a response from the remote to a justification request that we made. @@ -987,16 +1349,17 @@ impl ChainSync { /// /// Returns `Some` if this produces a justification that must be imported /// into the import queue. - pub fn on_block_justification - (&mut self, who: PeerId, response: BlockResponse) -> Result, BadPeer> - { - let peer = - if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); - return Ok(OnBlockJustification::Nothing) - }; + pub fn on_block_justification( + &mut self, + who: PeerId, + response: BlockResponse, + ) -> Result, BadPeer> { + let peer = if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); + return Ok(OnBlockJustification::Nothing) + }; self.pending_requests.add(&who); if let PeerSyncState::DownloadingJustification(hash) = peer.state { @@ -1005,18 +1368,22 @@ impl ChainSync { // We only request one justification at a time let justification = if let Some(block) = response.blocks.into_iter().next() { if hash != block.hash { - info!( + warn!( target: "sync", - "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", who, hash, block.hash + "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", + who, + hash, + block.hash, ); - return Err(BadPeer(who, rep::BAD_JUSTIFICATION)); + return Err(BadPeer(who, rep::BAD_JUSTIFICATION)) } - block.justification + block.justifications.or(legacy_justification_mapping(block.justification)) } else { // we might have asked the peer for a justification on a block that we assumed it // had but didn't (regardless of whether it had a justification for it or not). - trace!(target: "sync", + trace!( + target: "sync", "Peer {:?} provided empty response for justification request {:?}", who, hash, @@ -1025,49 +1392,16 @@ impl ChainSync { None }; - if let Some((peer, hash, number, j)) = self.extra_justifications.on_response(who, justification) { - return Ok(OnBlockJustification::Import { peer, hash, number, justification: j }) + if let Some((peer, hash, number, j)) = + self.extra_justifications.on_response(who, justification) + { + return Ok(OnBlockJustification::Import { peer, hash, number, justifications: j }) } } Ok(OnBlockJustification::Nothing) } - /// Handle new finality proof data. - pub fn on_block_finality_proof - (&mut self, who: PeerId, resp: FinalityProofResponse) -> Result, BadPeer> - { - let peer = - if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: "sync", "💔 Called on_block_finality_proof_data with a bad peer ID"); - return Ok(OnBlockFinalityProof::Nothing) - }; - - self.pending_requests.add(&who); - if let PeerSyncState::DownloadingFinalityProof(hash) = peer.state { - peer.state = PeerSyncState::Available; - - // We only request one finality proof at a time. - if hash != resp.block { - info!( - target: "sync", - "💔 Invalid block finality proof provided: requested: {:?} got: {:?}", - hash, - resp.block - ); - return Err(BadPeer(who, rep::BAD_FINALITY_PROOF)); - } - - if let Some((peer, hash, number, p)) = self.extra_finality_proofs.on_response(who, resp.proof) { - return Ok(OnBlockFinalityProof::Import { peer, hash, number, proof: p }) - } - } - - Ok(OnBlockFinalityProof::Nothing) - } - /// A batch of blocks have been processed, with or without errors. /// /// Call this when a batch of blocks have been processed by the import @@ -1078,7 +1412,7 @@ impl ChainSync { &'a mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) -> impl Iterator), BadPeer>> + 'a { trace!(target: "sync", "Imported {} of {}", imported, count); @@ -1090,7 +1424,7 @@ impl ChainSync { } for (result, hash) in results { if has_error { - continue; + continue } if result.is_err() { @@ -1098,71 +1432,113 @@ impl ChainSync { } match result { - Ok(BlockImportResult::ImportedKnown(_number)) => {} - Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { + Ok(BlockImportStatus::ImportedKnown(number, who)) => { + if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { + peer.update_common_number(number); + } + }, + Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( target: "sync", "Block imported clears all pending justification requests {}: {:?}", number, - hash + hash, ); - self.extra_justifications.reset() + self.clear_justification_requests(); } if aux.needs_justification { - trace!(target: "sync", "Block imported but requires justification {}: {:?}", number, hash); + trace!( + target: "sync", + "Block imported but requires justification {}: {:?}", + number, + hash, + ); self.request_justification(&hash, number); } if aux.bad_justification { - if let Some(peer) = who { - info!("💔 Sent block with bad justification to import"); - output.push(Err(BadPeer(peer, rep::BAD_JUSTIFICATION))); + if let Some(ref peer) = who { + warn!("💔 Sent block with bad justification to import"); + output.push(Err(BadPeer(*peer, rep::BAD_JUSTIFICATION))); } } - if aux.needs_finality_proof { - trace!(target: "sync", "Block imported but requires finality proof {}: {:?}", number, hash); - self.request_finality_proof(&hash, number); + if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { + peer.update_common_number(number); } - - if number > self.best_imported_number { - self.best_imported_number = number; + let state_sync_complete = + self.state_sync.as_ref().map_or(false, |s| s.target() == hash); + if state_sync_complete { + info!( + target: "sync", + "State sync is complete ({} MiB), restarting block sync.", + self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)), + ); + self.state_sync = None; + self.mode = SyncMode::Full; + output.extend(self.restart()); + } + let warp_sync_complete = self + .warp_sync + .as_ref() + .map_or(false, |s| s.target_block_hash() == Some(hash)); + if warp_sync_complete { + info!( + target: "sync", + "Warp sync is complete ({} MiB), restarting block sync.", + self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)), + ); + self.warp_sync = None; + self.mode = SyncMode::Full; + output.extend(self.restart()); } }, - Err(BlockImportError::IncompleteHeader(who)) => { + Err(BlockImportError::IncompleteHeader(who)) => if let Some(peer) = who { - warn!("💔 Peer sent block with incomplete header to import"); + warn!( + target: "sync", + "💔 Peer sent block with incomplete header to import", + ); output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); output.extend(self.restart()); - } - }, - Err(BlockImportError::VerificationFailed(who, e)) => { + }, + Err(BlockImportError::VerificationFailed(who, e)) => if let Some(peer) = who { - warn!("💔 Verification failed for block {:?} received from peer: {}, {:?}", hash, peer, e); + warn!( + target: "sync", + "💔 Verification failed for block {:?} received from peer: {}, {:?}", + hash, + peer, + e, + ); output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); output.extend(self.restart()); - } - }, - Err(BlockImportError::BadBlock(who)) => { + }, + Err(BlockImportError::BadBlock(who)) => if let Some(peer) = who { - info!("💔 Block {:?} received from peer {} has been blacklisted", hash, peer); + warn!( + target: "sync", + "💔 Block {:?} received from peer {} has been blacklisted", + hash, + peer, + ); output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); - } - }, + }, Err(BlockImportError::MissingState) => { // This may happen if the chain we were requesting upon has been discarded // in the meantime because other chain has been finalized. // Don't mark it as bad as it still may be synced if explicitly requested. trace!(target: "sync", "Obsolete block {:?}", hash); }, - e @ Err(BlockImportError::UnknownParent) | - e @ Err(BlockImportError::Other(_)) => { + e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { warn!(target: "sync", "💔 Error importing block {:?}: {:?}", hash, e); + self.state_sync = None; + self.warp_sync = None; output.extend(self.restart()); }, - Err(BlockImportError::Cancelled) => {} + Err(BlockImportError::Cancelled) => {}, }; } @@ -1174,33 +1550,46 @@ impl ChainSync { /// with or without errors. pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; - self.extra_justifications.try_finalize_root((hash, number), finalization_result, true); - self.pending_requests.set_all(); - } - - pub fn on_finality_proof_import(&mut self, req: (B::Hash, NumberFor), res: Result<(B::Hash, NumberFor), ()>) { - self.extra_finality_proofs.try_finalize_root(req, res, true); + self.extra_justifications + .try_finalize_root((hash, number), finalization_result, true); self.pending_requests.set_all(); } /// Notify about finalization of the given block. pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { let client = &self.client; - let r = self.extra_finality_proofs.on_block_finalized(hash, number, |base, block| { + let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { is_descendent_of(&**client, base, block) }); - if let Err(err) = r { - warn!(target: "sync", "💔 Error cleaning up pending extra finality proof data requests: {:?}", err) + if let SyncMode::LightState { skip_proofs, .. } = &self.mode { + if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { + // Finalized a recent block. + let mut heads: Vec<_> = + self.peers.iter().map(|(_, peer)| peer.best_number).collect(); + heads.sort(); + let median = heads[heads.len() / 2]; + if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { + if let Ok(Some(header)) = self.client.header(BlockId::hash(hash.clone())) { + log::debug!( + target: "sync", + "Starting state sync for #{} ({})", + number, + hash, + ); + self.state_sync = + Some(StateSync::new(self.client.clone(), header, *skip_proofs)); + } + } + } } - let client = &self.client; - let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { - is_descendent_of(&**client, base, block) - }); - if let Err(err) = r { - warn!(target: "sync", "💔 Error cleaning up pending extra justification data requests: {:?}", err); + warn!( + target: "sync", + "💔 Error cleaning up pending extra justification data requests: {:?}", + err, + ); } } @@ -1217,15 +1606,12 @@ impl ChainSync { self.best_queued_hash = *hash; // Update common blocks for (n, peer) in self.peers.iter_mut() { - if let PeerSyncState::AncestorSearch {..} = peer.state { + if let PeerSyncState::AncestorSearch { .. } = peer.state { // Wait for ancestry search to complete first. - continue; + continue } - let new_common_number = if peer.best_number >= number { - number - } else { - peer.best_number - }; + let new_common_number = + if peer.best_number >= number { number } else { peer.best_number }; trace!( target: "sync", "Updating peer {} info, ours={}, common={}->{}, their best={}", @@ -1247,7 +1633,15 @@ impl ChainSync { /// is capped. /// /// Returns [`HasSlotForBlockAnnounceValidation`] to inform about the result. - fn has_slot_for_block_announce_validation(&mut self, peer: &PeerId) -> HasSlotForBlockAnnounceValidation { + /// + /// # Note + /// + /// It is *required* to call [`Self::peer_block_announce_validation_finished`] when the + /// validation is finished to clear the slot. + fn has_slot_for_block_announce_validation( + &mut self, + peer: &PeerId, + ) -> HasSlotForBlockAnnounceValidation { if self.block_announce_validation.len() >= MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached } @@ -1290,15 +1684,18 @@ impl ChainSync { ); if number.is_zero() { - self.block_announce_validation.push(async move { - warn!( - target: "sync", - "💔 Ignored genesis block (#0) announcement from {}: {}", - who, - hash, - ); - PreValidateBlockAnnounce::Nothing { is_best, who, announce } - }.boxed()); + self.block_announce_validation.push( + async move { + warn!( + target: "sync", + "💔 Ignored genesis block (#0) announcement from {}: {}", + who, + hash, + ); + PreValidateBlockAnnounce::Skip + } + .boxed(), + ); return } @@ -1306,18 +1703,21 @@ impl ChainSync { match self.has_slot_for_block_announce_validation(&who) { HasSlotForBlockAnnounceValidation::Yes => {}, HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached => { - self.block_announce_validation.push(async move { - warn!( - target: "sync", - "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots are occupied.", - number, - hash, - who, - ); - PreValidateBlockAnnounce::Nothing { is_best, who, announce } - }.boxed()); + self.block_announce_validation.push( + async move { + warn!( + target: "sync", + "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots are occupied.", + number, + hash, + who, + ); + PreValidateBlockAnnounce::Skip + } + .boxed(), + ); return - } + }, HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { self.block_announce_validation.push(async move { warn!( @@ -1327,10 +1727,10 @@ impl ChainSync { hash, who, ); - PreValidateBlockAnnounce::Nothing { is_best, who, announce } + PreValidateBlockAnnounce::Skip }.boxed()); return - } + }, } // Let external validator check the block announcement. @@ -1338,28 +1738,36 @@ impl ChainSync { let future = self.block_announce_validator.validate(&header, assoc_data); let hash = hash.clone(); - self.block_announce_validation.push(async move { - match future.await { - Ok(Validation::Success { is_new_best }) => PreValidateBlockAnnounce::Process { - is_new_best: is_new_best || is_best, - announce, - who, - }, - Ok(Validation::Failure) => { - debug!( - target: "sync", - "Block announcement validation of block {} from {} failed", - hash, + self.block_announce_validation.push( + async move { + match future.await { + Ok(Validation::Success { is_new_best }) => PreValidateBlockAnnounce::Process { + is_new_best: is_new_best || is_best, + announce, who, - ); - PreValidateBlockAnnounce::Failure { who } - } - Err(e) => { - error!(target: "sync", "💔 Block announcement validation errored: {}", e); - PreValidateBlockAnnounce::Nothing { is_best, who, announce } + }, + Ok(Validation::Failure { disconnect }) => { + debug!( + target: "sync", + "Block announcement validation of block {:?} from {} failed", + hash, + who, + ); + PreValidateBlockAnnounce::Failure { who, disconnect } + }, + Err(e) => { + debug!( + target: "sync", + "💔 Block announcement validation of block {:?} errored: {}", + hash, + e, + ); + PreValidateBlockAnnounce::Error { who } + }, } } - }.boxed()); + .boxed(), + ); } /// Poll block announce validation. @@ -1369,21 +1777,34 @@ impl ChainSync { /// /// This should be polled until it returns [`Poll::Pending`]. /// - /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to import passed - /// header (call `on_block_data`). The network request isn't sent in this case. + /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to + /// import passed header (call `on_block_data`). The network request isn't sent in this case. pub fn poll_block_announce_validation( &mut self, cx: &mut std::task::Context, ) -> Poll> { match self.block_announce_validation.poll_next_unpin(cx) { - Poll::Ready(Some(res)) => Poll::Ready(self.finish_block_announce_validation(res)), + Poll::Ready(Some(res)) => { + self.peer_block_announce_validation_finished(&res); + Poll::Ready(self.finish_block_announce_validation(res)) + }, _ => Poll::Pending, } } - /// Should be called when a block announce validation was finished, to update the stats - /// of the given peer. - fn peer_block_announce_validation_finished(&mut self, peer: &PeerId) { + /// Should be called when a block announce validation is finished, to update the slots + /// of the peer that send the block announce. + fn peer_block_announce_validation_finished( + &mut self, + res: &PreValidateBlockAnnounce, + ) { + let peer = match res { + PreValidateBlockAnnounce::Failure { who, .. } | + PreValidateBlockAnnounce::Process { who, .. } | + PreValidateBlockAnnounce::Error { who } => who, + PreValidateBlockAnnounce::Skip => return, + }; + match self.block_announce_validation_per_peer_stats.entry(peer.clone()) { Entry::Vacant(_) => { error!( @@ -1393,10 +1814,11 @@ impl ChainSync { ); }, Entry::Occupied(mut entry) => { - if entry.get_mut().saturating_sub(1) == 0 { + *entry.get_mut() = entry.get().saturating_sub(1); + if *entry.get() == 0 { entry.remove(); } - } + }, } } @@ -1406,24 +1828,38 @@ impl ChainSync { pre_validation_result: PreValidateBlockAnnounce, ) -> PollBlockAnnounceValidation { let (announce, is_best, who) = match pre_validation_result { - PreValidateBlockAnnounce::Nothing { is_best, who, announce } => { - self.peer_block_announce_validation_finished(&who); - return PollBlockAnnounceValidation::Nothing { is_best, who, header: announce.header } - }, - PreValidateBlockAnnounce::Failure { who } => { - self.peer_block_announce_validation_finished(&who); - return PollBlockAnnounceValidation::Failure { who } + PreValidateBlockAnnounce::Failure { who, disconnect } => { + debug!( + target: "sync", + "Failed announce validation: {:?}, disconnect: {}", + who, + disconnect, + ); + return PollBlockAnnounceValidation::Failure { who, disconnect } }, - PreValidateBlockAnnounce::Process { announce, is_new_best, who } => { - self.peer_block_announce_validation_finished(&who); - (announce, is_new_best, who) + PreValidateBlockAnnounce::Process { announce, is_new_best, who } => + (announce, is_new_best, who), + PreValidateBlockAnnounce::Error { .. } | PreValidateBlockAnnounce::Skip => { + debug!( + target: "sync", + "Ignored announce validation", + ); + return PollBlockAnnounceValidation::Skip }, }; - let header = announce.header; - let number = *header.number(); - let hash = header.hash(); - let parent_status = self.block_status(header.parent_hash()).unwrap_or(BlockStatus::Unknown); + trace!( + target: "sync", + "Finished block announce validation: from {:?}: {:?}. local_best={}", + who, + announce.summary(), + is_best, + ); + + let number = *announce.header.number(); + let hash = announce.header.hash(); + let parent_status = + self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); let known_parent = parent_status != BlockStatus::Unknown; let ancient_parent = parent_status == BlockStatus::InChainPruned; @@ -1432,33 +1868,29 @@ impl ChainSync { peer } else { error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); - return PollBlockAnnounceValidation::Nothing { is_best, who, header } + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } }; - while peer.recently_announced.len() >= ANNOUNCE_HISTORY_SIZE { - peer.recently_announced.pop_front(); - } - peer.recently_announced.push_back(hash.clone()); - if is_best { // update their best block peer.best_number = number; peer.best_hash = hash; } - if let PeerSyncState::AncestorSearch {..} = peer.state { - return PollBlockAnnounceValidation::Nothing { is_best, who, header } + if let PeerSyncState::AncestorSearch { .. } = peer.state { + trace!(target: "sync", "Peer state is ancestor search."); + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } // If the announced block is the best they have and is not ahead of us, our common number // is either one further ahead or it's the one they just announced, if we know about it. if is_best { if known && self.best_queued_number >= number { - peer.common_number = number - } else if header.parent_hash() == &self.best_queued_hash - || known_parent && self.best_queued_number >= number + peer.update_common_number(number); + } else if announce.header.parent_hash() == &self.best_queued_hash || + known_parent && self.best_queued_number >= number { - peer.common_number = number - One::one(); + peer.update_common_number(number - One::one()); } } self.pending_requests.add(&who); @@ -1469,72 +1901,99 @@ impl ChainSync { if let Some(target) = self.fork_targets.get_mut(&hash) { target.peers.insert(who.clone()); } - return PollBlockAnnounceValidation::Nothing { is_best, who, header } + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } if ancient_parent { - trace!(target: "sync", "Ignored ancient block announced from {}: {} {:?}", who, hash, header); - return PollBlockAnnounceValidation::Nothing { is_best, who, header } + trace!( + target: "sync", + "Ignored ancient block announced from {}: {} {:?}", + who, + hash, + announce.header, + ); + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } - let requires_additional_data = !self.role.is_light() || !known_parent; + let requires_additional_data = self.mode != SyncMode::Light || !known_parent; if !requires_additional_data { - trace!(target: "sync", "Importing new header announced from {}: {} {:?}", who, hash, header); - return PollBlockAnnounceValidation::ImportHeader { is_best, header, who } + trace!( + target: "sync", + "Importing new header announced from {}: {} {:?}", + who, + hash, + announce.header, + ); + return PollBlockAnnounceValidation::ImportHeader { is_best, announce, who } } - if number <= self.best_queued_number { + if self.status().state == SyncState::Idle { trace!( target: "sync", - "Added sync target for block announced from {}: {} {:?}", who, hash, header + "Added sync target for block announced from {}: {} {:?}", + who, + hash, + announce.summary(), ); self.fork_targets - .entry(hash.clone()) + .entry(hash) .or_insert_with(|| ForkTarget { number, - parent_hash: Some(*header.parent_hash()), + parent_hash: Some(*announce.header.parent_hash()), peers: Default::default(), }) - .peers.insert(who.clone()); + .peers + .insert(who); } - PollBlockAnnounceValidation::Nothing { is_best, who, header } + PollBlockAnnounceValidation::Nothing { is_best, who, announce } } /// Call when a peer has disconnected. - pub fn peer_disconnected(&mut self, who: &PeerId) { + /// Canceled obsolete block request may result in some blocks being ready for + /// import, so this functions checks for such blocks and returns them. + pub fn peer_disconnected(&mut self, who: &PeerId) -> Option> { self.blocks.clear_peer_download(who); self.peers.remove(who); self.extra_justifications.peer_disconnected(who); - self.extra_finality_proofs.peer_disconnected(who); self.pending_requests.set_all(); + self.fork_targets.retain(|_, target| { + target.peers.remove(who); + !target.peers.is_empty() + }); + let blocks = self.drain_blocks(); + if !blocks.is_empty() { + Some(self.validate_and_queue_blocks(blocks)) + } else { + None + } } /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. - /// their state was `DownloadingJustification` or `DownloadingFinalityProof`) are unaffected and - /// will stay in the same state. + /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. fn restart<'a>( &'a mut self, ) -> impl Iterator), BadPeer>> + 'a { self.blocks.clear(); - let info = self.client.info(); - self.best_queued_hash = info.best_hash; - self.best_queued_number = std::cmp::max(info.best_number, self.best_imported_number); + if let Err(e) = self.reset_sync_start_point() { + warn!(target: "sync", "💔 Unable to restart sync. :{:?}", e); + } self.pending_requests.set_all(); debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); let old_peers = std::mem::take(&mut self.peers); - old_peers.into_iter().filter_map(move |(id, p)| { - // peers that were downloading justifications or finality proofs + old_peers.into_iter().filter_map(move |(id, mut p)| { + // peers that were downloading justifications // should be kept in that state. match p.state { - PeerSyncState::DownloadingJustification(_) - | PeerSyncState::DownloadingFinalityProof(_) => { + PeerSyncState::DownloadingJustification(_) => { + // We make sure our commmon number is at least something we have. + p.common_number = self.best_queued_number; self.peers.insert(id, p); - return None; - } - _ => {} + return None + }, + _ => {}, } // handle peers that were in other states. @@ -1546,6 +2005,48 @@ impl ChainSync { }) } + /// Find a block to start sync from. If we sync with state, that's the latest block we have + /// state for. + fn reset_sync_start_point(&mut self) -> Result<(), ClientError> { + let info = self.client.info(); + if matches!(self.mode, SyncMode::LightState { .. }) && info.finalized_state.is_some() { + warn!( + target: "sync", + "Can't use fast sync mode with a partially synced database. Reverting to full sync mode." + ); + self.mode = SyncMode::Full; + } + if matches!(self.mode, SyncMode::Warp) && info.finalized_state.is_some() { + warn!( + target: "sync", + "Can't use warp sync mode with a partially synced database. Reverting to full sync mode." + ); + self.mode = SyncMode::Full; + } + self.import_existing = false; + self.best_queued_hash = info.best_hash; + self.best_queued_number = info.best_number; + if self.mode == SyncMode::Full { + if self.client.block_status(&BlockId::hash(info.best_hash))? != + BlockStatus::InChainWithState + { + self.import_existing = true; + // Latest state is missing, start with the last finalized state or genesis instead. + if let Some((hash, number)) = info.finalized_state { + debug!(target: "sync", "Starting from finalized state #{}", number); + self.best_queued_hash = hash; + self.best_queued_number = number; + } else { + debug!(target: "sync", "Restarting from genesis"); + self.best_queued_hash = Default::default(); + self.best_queued_number = Zero::zero(); + } + } + } + trace!(target: "sync", "Restarted sync at #{} ({:?})", self.best_queued_number, self.best_queued_hash); + Ok(()) + } + /// What is the status of the block corresponding to the given hash? fn block_status(&self, hash: &B::Hash) -> Result { if self.queue_blocks.contains(hash) { @@ -1561,7 +2062,9 @@ impl ChainSync { /// Is any peer downloading the given hash? fn is_already_downloading(&self, hash: &B::Hash) -> bool { - self.peers.iter().any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) + self.peers + .iter() + .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } /// Return some key metrics. @@ -1570,20 +2073,54 @@ impl ChainSync { Metrics { queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), - finality_proofs: self.extra_finality_proofs.metrics(), justifications: self.extra_justifications.metrics(), - _priv: () + _priv: (), } } + + /// Drain the downloaded block set up to the first gap. + fn drain_blocks(&mut self) -> Vec> { + self.blocks + .drain(self.best_queued_number + One::one()) + .into_iter() + .map(|block_data| { + let justifications = block_data + .block + .justifications + .or(legacy_justification_mapping(block_data.block.justification)); + IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + indexed_body: block_data.block.indexed_body, + justifications, + origin: block_data.origin, + allow_missing_state: true, + import_existing: self.import_existing, + skip_execution: self.skip_execution(), + state: None, + } + }) + .collect() + } +} + +// This is purely during a backwards compatible transitionary period and should be removed +// once we can assume all nodes can send and receive multiple Justifications +// The ID tag is hardcoded here to avoid depending on the GRANDPA crate. +// See: https://github.com/paritytech/substrate/issues/8172 +fn legacy_justification_mapping( + justification: Option, +) -> Option { + justification.map(|just| (*b"FRNK", just).into()) } #[derive(Debug)] pub(crate) struct Metrics { pub(crate) queued_blocks: u32, pub(crate) fork_targets: u32, - pub(crate) finality_proofs: extra_requests::Metrics, pub(crate) justifications: extra_requests::Metrics, - _priv: () + _priv: (), } /// Request the ancestry for a block. Sends a request for header and justification for the given @@ -1595,12 +2132,12 @@ fn ancestry_request(block: NumberFor) -> BlockRequest { from: message::FromBlock::Number(block), to: None, direction: message::Direction::Ascending, - max: Some(1) + max: Some(1), } } -/// The ancestor search state expresses which algorithm, and its stateful parameters, we are using to -/// try to find an ancestor block +/// The ancestor search state expresses which algorithm, and its stateful parameters, we are using +/// to try to find an ancestor block #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum AncestorSearchState { /// Use exponential backoff to find an ancestor, then switch to binary search. @@ -1621,15 +2158,16 @@ pub enum AncestorSearchState { fn handle_ancestor_search_state( state: &AncestorSearchState, curr_block_num: NumberFor, - block_hash_match: bool + block_hash_match: bool, ) -> Option<(AncestorSearchState, NumberFor)> { let two = >::one() + >::one(); match state { AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { let next_distance_to_tip = *next_distance_to_tip; if block_hash_match && next_distance_to_tip == One::one() { - // We found the ancestor in the first step so there is no need to execute binary search. - return None; + // We found the ancestor in the first step so there is no need to execute binary + // search. + return None } if block_hash_match { let left = curr_block_num; @@ -1637,15 +2175,18 @@ fn handle_ancestor_search_state( let middle = left + (right - left) / two; Some((AncestorSearchState::BinarySearch(left, right), middle)) } else { - let next_block_num = curr_block_num.checked_sub(&next_distance_to_tip) - .unwrap_or_else(Zero::zero); + let next_block_num = + curr_block_num.checked_sub(&next_distance_to_tip).unwrap_or_else(Zero::zero); let next_distance_to_tip = next_distance_to_tip * two; - Some((AncestorSearchState::ExponentialBackoff(next_distance_to_tip), next_block_num)) + Some(( + AncestorSearchState::ExponentialBackoff(next_distance_to_tip), + next_block_num, + )) } - } + }, AncestorSearchState::BinarySearch(mut left, mut right) => { if left >= curr_block_num { - return None; + return None } if block_hash_match { left = curr_block_num; @@ -1655,7 +2196,7 @@ fn handle_ancestor_search_state( assert!(right >= left); let middle = left + (right - left) / two; Some((AncestorSearchState::BinarySearch(left, right), middle)) - } + }, } } @@ -1664,52 +2205,49 @@ fn peer_block_request( id: &PeerId, peer: &PeerSync, blocks: &mut BlockCollection, - attrs: &message::BlockAttributes, + attrs: message::BlockAttributes, max_parallel_downloads: u32, finalized: NumberFor, best_num: NumberFor, ) -> Option<(Range>, BlockRequest)> { if best_num >= peer.best_number { // Will be downloaded as alternative fork instead. - return None; - } - if peer.common_number < finalized { + return None + } else if peer.common_number < finalized { trace!( target: "sync", "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", - id, finalized, peer.common_number, peer.best_number, best_num, + id, peer.common_number, finalized, peer.best_number, best_num, ); } - if let Some(range) = blocks.needed_blocks( - id.clone(), + let range = blocks.needed_blocks( + *id, MAX_BLOCKS_TO_REQUEST, peer.best_number, peer.common_number, max_parallel_downloads, MAX_DOWNLOAD_AHEAD, - ) { - // The end is not part of the range. - let last = range.end.saturating_sub(One::one()); + )?; - let from = if peer.best_number == last { - message::FromBlock::Hash(peer.best_hash) - } else { - message::FromBlock::Number(last) - }; + // The end is not part of the range. + let last = range.end.saturating_sub(One::one()); - let request = message::generic::BlockRequest { - id: 0, - fields: attrs.clone(), - from, - to: None, - direction: message::Direction::Descending, - max: Some((range.end - range.start).saturated_into::()) - }; - - Some((range, request)) + let from = if peer.best_number == last { + message::FromBlock::Hash(peer.best_hash) } else { - None - } + message::FromBlock::Number(last) + }; + + let request = message::generic::BlockRequest { + id: 0, + fields: attrs.clone(), + from, + to: None, + direction: message::Direction::Descending, + max: Some((range.end - range.start).saturated_into::()), + }; + + Some((range, request)) } /// Get pending fork sync targets for a peer. @@ -1718,18 +2256,17 @@ fn fork_sync_request( targets: &mut HashMap>, best_num: NumberFor, finalized: NumberFor, - attributes: &message::BlockAttributes, + attributes: message::BlockAttributes, check_block: impl Fn(&B::Hash) -> BlockStatus, -) -> Option<(B::Hash, BlockRequest)> -{ +) -> Option<(B::Hash, BlockRequest)> { targets.retain(|hash, r| { if r.number <= finalized { trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); - return false; + return false } if check_block(hash) != BlockStatus::Unknown { trace!(target: "sync", "Removed obsolete fork sync request {:?} (#{})", hash, r.number); - return false; + return false } true }); @@ -1746,27 +2283,34 @@ fn fork_sync_request( 1 }; trace!(target: "sync", "Downloading requested fork {:?} from {}, {} blocks", hash, id, count); - return Some((hash.clone(), message::generic::BlockRequest { - id: 0, - fields: attributes.clone(), - from: message::FromBlock::Hash(hash.clone()), - to: None, - direction: message::Direction::Descending, - max: Some(count), - })) + return Some(( + hash.clone(), + message::generic::BlockRequest { + id: 0, + fields: attributes.clone(), + from: message::FromBlock::Hash(hash.clone()), + to: None, + direction: message::Direction::Descending, + max: Some(count), + }, + )) } } None } /// Returns `true` if the given `block` is a descendent of `base`. -fn is_descendent_of(client: &T, base: &Block::Hash, block: &Block::Hash) -> sp_blockchain::Result - where - Block: BlockT, - T: HeaderMetadata + ?Sized, +fn is_descendent_of( + client: &T, + base: &Block::Hash, + block: &Block::Hash, +) -> sp_blockchain::Result +where + Block: BlockT, + T: HeaderMetadata + ?Sized, { if base == block { - return Ok(false); + return Ok(false) } let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; @@ -1774,7 +2318,76 @@ fn is_descendent_of(client: &T, base: &Block::Hash, block: &Block::Has Ok(ancestor.hash == *base) } -fn validate_blocks(blocks: &Vec>, who: &PeerId) -> Result<(), BadPeer> { +/// Validate that the given `blocks` are correct. +/// Returns the number of the first block in the sequence. +/// +/// It is expected that `blocks` are in ascending order. +fn validate_blocks( + blocks: &Vec>, + who: &PeerId, + request: Option>, +) -> Result>, BadPeer> { + if let Some(request) = request { + if Some(blocks.len() as _) > request.max { + debug!( + target: "sync", + "Received more blocks than requested from {}. Expected in maximum {:?}, got {}.", + who, + request.max, + blocks.len(), + ); + + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + } + + let block_header = if request.direction == message::Direction::Descending { + blocks.last() + } else { + blocks.first() + } + .and_then(|b| b.header.as_ref()); + + let expected_block = block_header.as_ref().map_or(false, |h| match request.from { + message::FromBlock::Hash(hash) => h.hash() == hash, + message::FromBlock::Number(n) => h.number() == &n, + }); + + if !expected_block { + debug!( + target: "sync", + "Received block that was not requested. Requested {:?}, got {:?}.", + request.from, + block_header, + ); + + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + } + + if request.fields.contains(message::BlockAttributes::HEADER) && + blocks.iter().any(|b| b.header.is_none()) + { + trace!( + target: "sync", + "Missing requested header for a block in response from {}.", + who, + ); + + return Err(BadPeer(*who, rep::BAD_RESPONSE)) + } + + if request.fields.contains(message::BlockAttributes::BODY) && + blocks.iter().any(|b| b.body.is_none()) + { + trace!( + target: "sync", + "Missing requested body for a block in response from {}.", + who, + ); + + return Err(BadPeer(*who, rep::BAD_RESPONSE)) + } + } + for b in blocks { if let Some(header) = &b.header { let hash = header.hash(); @@ -1786,12 +2399,13 @@ fn validate_blocks(blocks: &Vec>, who: b.hash, hash, ); - return Err(BadPeer(who.clone(), rep::BAD_BLOCK)) + return Err(BadPeer(*who, rep::BAD_BLOCK)) } } if let (Some(header), Some(body)) = (&b.header, &b.body) { let expected = *header.extrinsics_root(); - let got = HashFor::::ordered_trie_root(body.iter().map(Encode::encode).collect()); + let got = + HashFor::::ordered_trie_root(body.iter().map(Encode::encode).collect()); if expected != got { debug!( target:"sync", @@ -1801,23 +2415,28 @@ fn validate_blocks(blocks: &Vec>, who: expected, got, ); - return Err(BadPeer(who.clone(), rep::BAD_BLOCK)) + return Err(BadPeer(*who, rep::BAD_BLOCK)) } } } - Ok(()) + + Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number())) } #[cfg(test)] mod test { - use super::message::FromBlock; - use super::*; + use super::{ + message::{BlockData, BlockState, FromBlock}, + *, + }; + use futures::{executor::block_on, future::poll_fn}; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::HeaderBackend; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use substrate_test_runtime_client::{ - runtime::{Block, Hash}, - ClientBlockImportExt, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + runtime::{Block, Hash, Header}, + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient, + TestClientBuilder, TestClientBuilderExt, }; #[test] @@ -1827,18 +2446,12 @@ mod test { // internally we should process the response as the justification not being available. let client = Arc::new(TestClientBuilder::new().build()); - let info = client.info(); let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); - let mut sync = ChainSync::new( - Roles::AUTHORITY, - client.clone(), - &info, - None, - block_announce_validator, - 1, - ); + let mut sync = + ChainSync::new(SyncMode::Full, client.clone(), block_announce_validator, 1, None) + .unwrap(); let (a1_hash, a1_number) = { let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -1846,81 +2459,61 @@ mod test { }; // add a new peer with the same best block - sync.new_peer(peer_id.clone(), a1_hash, a1_number).unwrap(); + sync.new_peer(peer_id, a1_hash, a1_number).unwrap(); // and request a justification for the block sync.request_justification(&a1_hash, a1_number); // the justification request should be scheduled to that peer - assert!( - sync.justification_requests().any(|(who, request)| { - who == peer_id && request.from == FromBlock::Hash(a1_hash) - }) - ); + assert!(sync + .justification_requests() + .any(|(who, request)| { who == peer_id && request.from == FromBlock::Hash(a1_hash) })); // there are no extra pending requests - assert_eq!( - sync.extra_justifications.pending_requests().count(), - 0, - ); + assert_eq!(sync.extra_justifications.pending_requests().count(), 0); // there's one in-flight extra request to the expected peer - assert!( - sync.extra_justifications.active_requests().any(|(who, (hash, number))| { - *who == peer_id && *hash == a1_hash && *number == a1_number - }) - ); + assert!(sync.extra_justifications.active_requests().any(|(who, (hash, number))| { + *who == peer_id && *hash == a1_hash && *number == a1_number + })); // if the peer replies with an empty response (i.e. it doesn't know the block), // the active request should be cleared. assert_eq!( - sync.on_block_justification( - peer_id.clone(), - BlockResponse:: { - id: 0, - blocks: vec![], - } - ), + sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }), Ok(OnBlockJustification::Nothing), ); // there should be no in-flight requests - assert_eq!( - sync.extra_justifications.active_requests().count(), - 0, - ); + assert_eq!(sync.extra_justifications.active_requests().count(), 0); // and the request should now be pending again, waiting for reschedule - assert!( - sync.extra_justifications.pending_requests().any(|(hash, number)| { - *hash == a1_hash && *number == a1_number - }) - ); + assert!(sync + .extra_justifications + .pending_requests() + .any(|(hash, number)| { *hash == a1_hash && *number == a1_number })); } #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); - let info = client.info(); - let mut sync = ChainSync::new( - Roles::AUTHORITY, + SyncMode::Full, client.clone(), - &info, - None, Box::new(DefaultBlockAnnounceValidator), 1, - ); + None, + ) + .unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); let peer_id3 = PeerId::random(); - let peer_id4 = PeerId::random(); let mut new_blocks = |n| { for _ in 0..n { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); } let info = client.info(); @@ -1928,7 +2521,6 @@ mod test { }; let (b1_hash, b1_number) = new_blocks(50); - let (b2_hash, b2_number) = new_blocks(10); // add 2 peers at blocks that we don't have locally sync.new_peer(peer_id1.clone(), Hash::random(), 42).unwrap(); @@ -1947,10 +2539,10 @@ mod test { // the justification request should be scheduled to the // new peer which is at the given block assert!(sync.justification_requests().any(|(p, r)| { - p == peer_id3 - && r.fields == BlockAttributes::JUSTIFICATION - && r.from == message::FromBlock::Hash(b1_hash) - && r.to == None + p == peer_id3 && + r.fields == BlockAttributes::JUSTIFICATION && + r.from == message::FromBlock::Hash(b1_hash) && + r.to == None })); assert_eq!( @@ -1958,38 +2550,486 @@ mod test { PeerSyncState::DownloadingJustification(b1_hash), ); - // add another peer at a known later block - sync.new_peer(peer_id4.clone(), b2_hash, b2_number).unwrap(); - - // we request a finality proof for a block we have locally - sync.request_finality_proof(&b2_hash, b2_number); - - // the finality proof request should be scheduled to peer 4 - // which is at that block - assert!( - sync.finality_proof_requests().any(|(p, r)| { p == peer_id4 && r.block == b2_hash }) - ); - - assert_eq!( - sync.peers.get(&peer_id4).unwrap().state, - PeerSyncState::DownloadingFinalityProof(b2_hash), - ); - // we restart the sync state let block_requests = sync.restart(); // which should make us send out block requests to the first two peers - assert!(block_requests.map(|r| r.unwrap()).all(|(p, _)| { p == peer_id1 || p == peer_id2 })); + assert!(block_requests + .map(|r| r.unwrap()) + .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); - // peer 3 and 4 should be unaffected as they were downloading finality data + // peer 3 should be unaffected it was downloading finality data assert_eq!( sync.peers.get(&peer_id3).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), ); - assert_eq!( - sync.peers.get(&peer_id4).unwrap().state, - PeerSyncState::DownloadingFinalityProof(b2_hash), + // Set common block to something that we don't have (e.g. failed import) + sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; + let _ = sync.restart().count(); + assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); + } + + /// Send a block annoucnement for the given `header`. + fn send_block_announce(header: Header, peer_id: &PeerId, sync: &mut ChainSync) { + let block_annnounce = BlockAnnounce { + header: header.clone(), + state: Some(BlockState::Best), + data: Some(Vec::new()), + }; + + sync.push_block_announce_validation(peer_id.clone(), header.hash(), block_annnounce, true); + + // Poll until we have procssed the block announcement + block_on(poll_fn(|cx| loop { + if sync.poll_block_announce_validation(cx).is_pending() { + break Poll::Ready(()) + } + })) + } + + /// Create a block response from the given `blocks`. + fn create_block_response(blocks: Vec) -> BlockResponse { + BlockResponse:: { + id: 0, + blocks: blocks + .into_iter() + .map(|b| BlockData:: { + hash: b.hash(), + header: Some(b.header().clone()), + body: Some(b.deconstruct().1), + indexed_body: None, + receipt: None, + message_queue: None, + justification: None, + justifications: None, + }) + .collect(), + } + } + + /// Get a block request from `sync` and check that is matches the expected request. + fn get_block_request( + sync: &mut ChainSync, + from: FromBlock, + max: u32, + peer: &PeerId, + ) -> BlockRequest { + let requests = sync.block_requests().collect::>(); + + log::trace!(target: "sync", "Requests: {:?}", requests); + + assert_eq!(1, requests.len()); + assert_eq!(peer, requests[0].0); + + let request = requests[0].1.clone(); + + assert_eq!(from, request.from); + assert_eq!(Some(max), request.max); + request + } + + /// Build and import a new best block. + fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { + let at = at.unwrap_or_else(|| client.info().best_hash); + + let mut block_builder = + client.new_block_at(&BlockId::Hash(at), Default::default(), false).unwrap(); + + if fork { + block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); + } + + let block = block_builder.build().unwrap().block; + + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + block + } + + /// This test is a regression test as observed on a real network. + /// + /// The node is connected to multiple peers. Both of these peers are having a best block (1) + /// that is below our best block (3). Now peer 2 announces a fork of block 3 that we will + /// request from peer 2. After importing the fork, peer 2 and then peer 1 will announce block 4. + /// But as peer 1 in our view is still at block 1, we will request block 2 (which we already + /// have) from it. In the meanwhile peer 2 sends us block 4 and 3 and we send another request + /// for block 2 to peer 2. Peer 1 answers with block 2 and then peer 2. This will need to + /// succeed, as we have requested block 2 from both peers. + #[test] + fn do_not_report_peer_on_block_response_for_block_request() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + Box::new(DefaultBlockAnnounceValidator), + 5, + None, + ) + .unwrap(); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + + let mut client2 = client.clone(); + let mut build_block_at = |at, import| { + let mut block_builder = + client2.new_block_at(&BlockId::Hash(at), Default::default(), false).unwrap(); + // Make sure we generate a different block as fork + block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); + + let block = block_builder.build().unwrap().block; + + if import { + block_on(client2.import(BlockOrigin::Own, block.clone())).unwrap(); + } + + block + }; + + let block1 = build_block(&mut client, None, false); + let block2 = build_block(&mut client, None, false); + let block3 = build_block(&mut client, None, false); + let block3_fork = build_block_at(block2.hash(), false); + + // Add two peers which are on block 1. + sync.new_peer(peer_id1.clone(), block1.hash(), 1).unwrap(); + sync.new_peer(peer_id2.clone(), block1.hash(), 1).unwrap(); + + // Tell sync that our best block is 3. + sync.update_chain_info(&block3.hash(), 3); + + // There should be no requests. + assert!(sync.block_requests().collect::>().is_empty()); + + // Let peer2 announce a fork of block 3 + send_block_announce(block3_fork.header().clone(), &peer_id2, &mut sync); + + // Import and tell sync that we now have the fork. + block_on(client.import(BlockOrigin::Own, block3_fork.clone())).unwrap(); + sync.update_chain_info(&block3_fork.hash(), 3); + + let block4 = build_block_at(block3_fork.hash(), false); + + // Let peer2 announce block 4 and check that sync wants to get the block. + send_block_announce(block4.header().clone(), &peer_id2, &mut sync); + + let request = get_block_request(&mut sync, FromBlock::Hash(block4.hash()), 2, &peer_id2); + + // Peer1 announces the same block, but as the common block is still `1`, sync will request + // block 2 again. + send_block_announce(block4.header().clone(), &peer_id1, &mut sync); + + let request2 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id1); + + let response = create_block_response(vec![block4.clone(), block3_fork.clone()]); + let res = sync.on_block_data(&peer_id2, Some(request), response).unwrap(); + + // We should not yet import the blocks, because there is still an open request for fetching + // block `2` which blocks the import. + assert!(matches!(res, OnBlockData::Import(_, blocks) if blocks.is_empty())); + + let request3 = get_block_request(&mut sync, FromBlock::Number(2), 1, &peer_id2); + + let response = create_block_response(vec![block2.clone()]); + let res = sync.on_block_data(&peer_id1, Some(request2), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) + if blocks.iter().all(|b| [2, 3, 4].contains(b.header.as_ref().unwrap().number())) + )); + + let response = create_block_response(vec![block2.clone()]); + let res = sync.on_block_data(&peer_id2, Some(request3), response).unwrap(); + // Nothing to import + assert!(matches!(res, OnBlockData::Import(_, blocks) if blocks.is_empty())); + } + + fn unwrap_from_block_number(from: FromBlock) -> u64 { + if let FromBlock::Number(from) = from { + from + } else { + panic!("Expected a number!"); + } + } + + /// A regression test for a behavior we have seen on a live network. + /// + /// The scenario is that the node is doing a full resync and is connected to some node that is + /// doing a major sync as well. This other node that is doing a major sync will finish before + /// our node and send a block announcement message, but we don't have seen any block + /// announcement from this node in its sync process. Meaning our common number didn't change. It + /// is now expected that we start an ancestor search to find the common number. + #[test] + fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { + sp_tracing::try_init_simple(); + + let blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + (0..MAX_DOWNLOAD_AHEAD * 2) + .map(|_| build_block(&mut client, None, false)) + .collect::>() + }; + + let mut client = Arc::new(TestClientBuilder::new().build()); + let info = client.info(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + Box::new(DefaultBlockAnnounceValidator), + 5, + None, + ) + .unwrap(); + + let peer_id1 = PeerId::random(); + let peer_id2 = PeerId::random(); + + let best_block = blocks.last().unwrap().clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()) + .unwrap(); + sync.new_peer(peer_id2.clone(), info.best_hash, 0).unwrap(); + + let mut best_block_num = 0; + while best_block_num < MAX_DOWNLOAD_AHEAD { + let request = get_block_request( + &mut sync, + FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id1, + ); + + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + ),); + + best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import_as_final(BlockOrigin::Own, b)).unwrap()); + } + + // "Wait" for the queue to clear + sync.queue_blocks.clear(); + + // Let peer2 announce that it finished syncing + send_block_announce(best_block.header().clone(), &peer_id2, &mut sync); + + let (peer1_req, peer2_req) = sync.block_requests().fold((None, None), |res, req| { + if req.0 == &peer_id1 { + (Some(req.1), res.1) + } else if req.0 == &peer_id2 { + (res.0, Some(req.1)) + } else { + panic!("Unexpected req: {:?}", req) + } + }); + + // We should now do an ancestor search to find the correct common block. + let peer2_req = peer2_req.unwrap(); + assert_eq!(Some(1), peer2_req.max); + assert_eq!(FromBlock::Number(best_block_num as u64), peer2_req.from); + + let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); + let res = sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) if blocks.is_empty() + ),); + + let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); + + // As we are on the same chain, we should directly continue with requesting blocks from + // peer 2 as well. + get_block_request( + &mut sync, + FromBlock::Number(peer1_from + MAX_BLOCKS_TO_REQUEST as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id2, ); } + + /// A test that ensures that we can sync a huge fork. + /// + /// The following scenario: + /// A peer connects to us and we both have the common block 512. The last finalized is 2048. + /// Our best block is 4096. The peer send us a block announcement with 4097 from a fork. + /// + /// We will first do an ancestor search to find the common block. After that we start to sync + /// the fork and finish it ;) + #[test] + fn can_sync_huge_fork() { + sp_tracing::try_init_simple(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) + .map(|_| build_block(&mut client, None, false)) + .collect::>(); + + let fork_blocks = { + let mut client = Arc::new(TestClientBuilder::new().build()); + let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] + .into_iter() + .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) + .cloned() + .collect::>(); + + fork_blocks + .into_iter() + .chain( + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) + .map(|_| build_block(&mut client, None, true)), + ) + .collect::>() + }; + + let info = client.info(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + Box::new(DefaultBlockAnnounceValidator), + 5, + None, + ) + .unwrap(); + + let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); + let just = (*b"TEST", Vec::new()); + client + .finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)) + .unwrap(); + sync.update_chain_info(&info.best_hash, info.best_number); + + let peer_id1 = PeerId::random(); + + let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + .unwrap(); + + send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); + + let mut request = + get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + + // Do the ancestor search + loop { + let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; + let response = create_block_response(vec![block.clone()]); + + let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + request = match on_block_data.into_request() { + Some(req) => req.1, + // We found the ancenstor + None => break, + }; + + log::trace!(target: "sync", "Request: {:?}", request); + } + + // Now request and import the fork. + let mut best_block_num = finalized_block.header().number().clone() as u32; + while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { + let request = get_block_request( + &mut sync, + FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), + MAX_BLOCKS_TO_REQUEST as u32, + &peer_id1, + ); + + let from = unwrap_from_block_number(request.from.clone()); + + let mut resp_blocks = fork_blocks[best_block_num as usize..from as usize].to_vec(); + resp_blocks.reverse(); + + let response = create_block_response(resp_blocks.clone()); + + let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + ),); + + best_block_num += MAX_BLOCKS_TO_REQUEST as u32; + + let _ = sync.on_blocks_processed( + MAX_BLOCKS_TO_REQUEST as usize, + MAX_BLOCKS_TO_REQUEST as usize, + resp_blocks + .iter() + .rev() + .map(|b| { + ( + Ok(BlockImportStatus::ImportedUnknown( + b.header().number().clone(), + Default::default(), + Some(peer_id1.clone()), + )), + b.hash(), + ) + }) + .collect(), + ); + + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); + } + + // Request the tip + get_block_request( + &mut sync, + FromBlock::Hash(fork_blocks.last().unwrap().hash()), + 1, + &peer_id1, + ); + } + + #[test] + fn removes_target_fork_on_disconnect() { + sp_tracing::try_init_simple(); + let mut client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); + + let mut sync = ChainSync::new( + SyncMode::Full, + client.clone(), + Box::new(DefaultBlockAnnounceValidator), + 1, + None, + ) + .unwrap(); + + let peer_id1 = PeerId::random(); + let common_block = blocks[1].clone(); + // Connect the node we will sync from + sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + .unwrap(); + + // Create a "new" header and announce it + let mut header = blocks[0].header().clone(); + header.number = 4; + send_block_announce(header, &peer_id1, &mut sync); + assert!(sync.fork_targets.len() == 1); + + sync.peer_disconnected(&peer_id1); + assert!(sync.fork_targets.len() == 0); + } } diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index b64c9e053e97b..30ba7ffafeffc 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,13 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::cmp; -use std::ops::Range; -use std::collections::{HashMap, BTreeMap}; -use log::trace; +use crate::protocol::message; use libp2p::PeerId; +use log::trace; use sp_runtime::traits::{Block as BlockT, NumberFor, One}; -use crate::protocol::message; +use std::{ + cmp, + collections::{BTreeMap, HashMap}, + ops::Range, +}; /// Block data with origin. #[derive(Debug, Clone, PartialEq, Eq)] @@ -35,18 +37,15 @@ pub struct BlockData { #[derive(Debug)] enum BlockRangeState { - Downloading { - len: NumberFor, - downloading: u32, - }, + Downloading { len: NumberFor, downloading: u32 }, Complete(Vec>), } impl BlockRangeState { pub fn len(&self) -> NumberFor { match *self { - BlockRangeState::Downloading { len, .. } => len, - BlockRangeState::Complete(ref blocks) => (blocks.len() as u32).into(), + Self::Downloading { len, .. } => len, + Self::Complete(ref blocks) => (blocks.len() as u32).into(), } } } @@ -62,10 +61,7 @@ pub struct BlockCollection { impl BlockCollection { /// Create a new instance. pub fn new() -> Self { - BlockCollection { - blocks: BTreeMap::new(), - peer_requests: HashMap::new(), - } + Self { blocks: BTreeMap::new(), peer_requests: HashMap::new() } } /// Clear everything. @@ -77,7 +73,7 @@ impl BlockCollection { /// Insert a set of blocks into collection. pub fn insert(&mut self, start: NumberFor, blocks: Vec>, who: PeerId) { if blocks.is_empty() { - return; + return } match self.blocks.get(&start) { @@ -86,16 +82,21 @@ impl BlockCollection { }, Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { trace!(target: "sync", "Ignored block data already downloaded: {}", start); - return; + return }, _ => (), } - self.blocks.insert(start, BlockRangeState::Complete(blocks.into_iter() - .map(|b| BlockData { origin: Some(who.clone()), block: b }).collect())); + self.blocks.insert( + start, + BlockRangeState::Complete( + blocks.into_iter().map(|b| BlockData { origin: Some(who), block: b }).collect(), + ), + ); } - /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. + /// Returns a set of block hashes that require a header download. The returned set is marked as + /// being downloaded. pub fn needed_blocks( &mut self, who: PeerId, @@ -107,7 +108,7 @@ impl BlockCollection { ) -> Option>> { if peer_best <= common { // Bail out early - return None; + return None } // First block number that we need to download let first_different = common + >::one(); @@ -120,15 +121,13 @@ impl BlockCollection { break match (prev, next) { (Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) if downloading < max_parallel => - (*start .. *start + *len, downloading), + (*start..*start + *len, downloading), (Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => - (*start + r.len() .. cmp::min(*next_start, *start + r.len() + count), 0), // gap - (Some((start, r)), None) => - (*start + r.len() .. *start + r.len() + count, 0), // last range - (None, None) => - (first_different .. first_different + count, 0), // empty + (*start + r.len()..cmp::min(*next_start, *start + r.len() + count), 0), // gap + (Some((start, r)), None) => (*start + r.len()..*start + r.len() + count, 0), /* last range */ + (None, None) => (first_different..first_different + count, 0), /* empty */ (None, Some((start, _))) if *start > first_different => - (first_different .. cmp::min(first_different + count, *start), 0), // gap at the start + (first_different..cmp::min(first_different + count, *start), 0), /* gap at the start */ _ => { prev = next; continue @@ -139,28 +138,39 @@ impl BlockCollection { // crop to peers best if range.start > peer_best { trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best); - return None; + return None } range.end = cmp::min(peer_best + One::one(), range.end); - if self.blocks.iter().next().map_or(false, |(n, _)| range.start > *n + max_ahead.into()) { + if self + .blocks + .iter() + .next() + .map_or(false, |(n, _)| range.start > *n + max_ahead.into()) + { trace!(target: "sync", "Too far ahead for peer {} ({})", who, range.start); - return None; + return None } self.peer_requests.insert(who, range.start); - self.blocks.insert(range.start, BlockRangeState::Downloading { - len: range.end - range.start, - downloading: downloading + 1 - }); + self.blocks.insert( + range.start, + BlockRangeState::Downloading { + len: range.end - range.start, + downloading: downloading + 1, + }, + ); if range.end <= range.start { - panic!("Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", - range, count, peer_best, common, self.blocks); + panic!( + "Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", + range, count, peer_best, common, self.blocks + ); } Some(range) } - /// Get a valid chain of blocks ordered in descending order and ready for importing into blockchain. + /// Get a valid chain of blocks ordered in descending order and ready for importing into + /// blockchain. pub fn drain(&mut self, from: NumberFor) -> Vec> { let mut drained = Vec::new(); let mut ranges = Vec::new(); @@ -181,23 +191,21 @@ impl BlockCollection { for r in ranges { self.blocks.remove(&r); } - trace!(target: "sync", "Drained {} blocks", drained.len()); + trace!(target: "sync", "Drained {} blocks from {:?}", drained.len(), from); drained } pub fn clear_peer_download(&mut self, who: &PeerId) { if let Some(start) = self.peer_requests.remove(who) { let remove = match self.blocks.get_mut(&start) { - Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) if *downloading > 1 => { + Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) + if *downloading > 1 => + { *downloading -= 1; false - }, - Some(&mut BlockRangeState::Downloading { .. }) => { - true - }, - _ => { - false } + Some(&mut BlockRangeState::Downloading { .. }) => true, + _ => false, }; if remove { self.blocks.remove(&start); @@ -210,25 +218,28 @@ impl BlockCollection { mod test { use super::{BlockCollection, BlockData, BlockRangeState}; use crate::{protocol::message, PeerId}; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; use sp_core::H256; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; type Block = RawBlock>; fn is_empty(bc: &BlockCollection) -> bool { - bc.blocks.is_empty() && - bc.peer_requests.is_empty() + bc.blocks.is_empty() && bc.peer_requests.is_empty() } fn generate_blocks(n: usize) -> Vec> { - (0 .. n).map(|_| message::generic::BlockData { - hash: H256::random(), - header: None, - body: None, - message_queue: None, - receipt: None, - justification: None, - }).collect() + (0..n) + .map(|_| message::generic::BlockData { + hash: H256::random(), + header: None, + body: None, + indexed_body: None, + message_queue: None, + receipt: None, + justification: None, + justifications: None, + }) + .collect() } #[test] @@ -250,32 +261,47 @@ mod test { let peer2 = PeerId::random(); let blocks = generate_blocks(150); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1 .. 41)); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41 .. 81)); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81 .. 121)); + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1..41)); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41..81)); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81..121)); bc.clear_peer_download(&peer1); bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); assert_eq!(bc.drain(1), vec![]); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121 .. 151)); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121..151)); bc.clear_peer_download(&peer0); bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11 .. 41)); - assert_eq!(bc.drain(1), blocks[1..11].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()); + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11..41)); + assert_eq!( + bc.drain(1), + blocks[1..11] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) + .collect::>() + ); bc.clear_peer_download(&peer0); bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); let drained = bc.drain(12); - assert_eq!(drained[..30], blocks[11..41].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()[..]); - assert_eq!(drained[30..], blocks[41..81].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); + assert_eq!( + drained[..30], + blocks[11..41] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) + .collect::>()[..] + ); + assert_eq!( + drained[30..], + blocks[41..81] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) + .collect::>()[..] + ); bc.clear_peer_download(&peer2); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81 .. 121)); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81..121)); bc.clear_peer_download(&peer2); bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); bc.clear_peer_download(&peer1); @@ -283,25 +309,38 @@ mod test { assert_eq!(bc.drain(80), vec![]); let drained = bc.drain(81); - assert_eq!(drained[..40], blocks[81..121].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }).collect::>()[..]); - assert_eq!(drained[40..], blocks[121..150].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); + assert_eq!( + drained[..40], + blocks[81..121] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }) + .collect::>()[..] + ); + assert_eq!( + drained[40..], + blocks[121..150] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) + .collect::>()[..] + ); } #[test] fn large_gap() { let mut bc: BlockCollection = BlockCollection::new(); - bc.blocks.insert(100, BlockRangeState::Downloading { - len: 128, - downloading: 1, - }); - let blocks = generate_blocks(10).into_iter().map(|b| BlockData { block: b, origin: None }).collect(); + bc.blocks.insert(100, BlockRangeState::Downloading { len: 128, downloading: 1 }); + let blocks = generate_blocks(10) + .into_iter() + .map(|b| BlockData { block: b, origin: None }) + .collect(); bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); let peer0 = PeerId::random(); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1 .. 100)); + assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1..100)); assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), None); // too far ahead - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), Some(100 + 128 .. 100 + 128 + 128)); + assert_eq!( + bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), + Some(100 + 128..100 + 128 + 128) + ); } } diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index df336c25339fd..226762b9658d2 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,15 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_blockchain::Error as ClientError; use crate::protocol::sync::{PeerSync, PeerSyncState}; use fork_tree::ForkTree; use libp2p::PeerId; use log::{debug, trace, warn}; +use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::time::Duration; -use wasm_timer::Instant; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + time::{Duration, Instant}, +}; // Time to wait before trying to get the same extra data from the same peer. const EXTRA_RETRY_WAIT: Duration = Duration::from_secs(10); @@ -61,12 +62,12 @@ pub(crate) struct Metrics { pub(crate) active_requests: u32, pub(crate) importing_requests: u32, pub(crate) failed_requests: u32, - _priv: () + _priv: (), } impl ExtraRequests { pub(crate) fn new(request_type_name: &'static str) -> Self { - ExtraRequests { + Self { tree: ForkTree::new(), best_seen_finalized_number: Zero::zero(), pending_requests: VecDeque::new(), @@ -93,13 +94,14 @@ impl ExtraRequests { /// Queue an extra data request to be considered by the `Matcher`. pub(crate) fn schedule(&mut self, request: ExtraRequest, is_descendent_of: F) - where F: Fn(&B::Hash, &B::Hash) -> Result + where + F: Fn(&B::Hash, &B::Hash) -> Result, { match self.tree.import(request.0, request.1, (), &is_descendent_of) { Ok(true) => { // this is a new root so we add it to the current `pending_requests` self.pending_requests.push_back((request.0, request.1)); - } + }, Err(fork_tree::Error::Revert) => { // we have finalized further than the given request, presumably // by some other part of the system (not sync). we can safely @@ -107,8 +109,8 @@ impl ExtraRequests { }, Err(err) => { debug!(target: "sync", "Failed to insert request {:?} into tree: {:?}", request, err); - } - _ => () + }, + _ => (), } } @@ -120,33 +122,35 @@ impl ExtraRequests { } /// Processes the response for the request previously sent to the given peer. - pub(crate) fn on_response(&mut self, who: PeerId, resp: Option) -> Option<(PeerId, B::Hash, NumberFor, R)> { + pub(crate) fn on_response( + &mut self, + who: PeerId, + resp: Option, + ) -> Option<(PeerId, B::Hash, NumberFor, R)> { // we assume that the request maps to the given response, this is // currently enforced by the outer network protocol before passing on // messages to chain sync. if let Some(request) = self.active_requests.remove(&who) { if let Some(r) = resp { - trace!(target: "sync", "Queuing import of {} from {:?} for {:?}", - self.request_type_name, - who, - request, + trace!(target: "sync", + "Queuing import of {} from {:?} for {:?}", + self.request_type_name, who, request, ); self.importing_requests.insert(request); return Some((who, request.0, request.1, r)) } else { - trace!(target: "sync", "Empty {} response from {:?} for {:?}", - self.request_type_name, - who, - request, + trace!(target: "sync", + "Empty {} response from {:?} for {:?}", + self.request_type_name, who, request, ); } self.failed_requests.entry(request).or_default().push((who, Instant::now())); self.pending_requests.push_front(request); } else { - trace!(target: "sync", "No active {} request to {:?}", - self.request_type_name, - who, + trace!(target: "sync", + "No active {} request to {:?}", + self.request_type_name, who, ); } None @@ -157,9 +161,10 @@ impl ExtraRequests { &mut self, best_finalized_hash: &B::Hash, best_finalized_number: NumberFor, - is_descendent_of: F + is_descendent_of: F, ) -> Result<(), fork_tree::Error> - where F: Fn(&B::Hash, &B::Hash) -> Result + where + F: Fn(&B::Hash, &B::Hash) -> Result, { let request = (*best_finalized_hash, best_finalized_number); @@ -168,8 +173,8 @@ impl ExtraRequests { } if best_finalized_number > self.best_seen_finalized_number { - // normally we'll receive finality notifications for every block => finalize would be enough - // but if many blocks are finalized at once, some notifications may be omitted + // normally we'll receive finality notifications for every block => finalize would be + // enough but if many blocks are finalized at once, some notifications may be omitted // => let's use finalize_with_ancestors here match self.tree.finalize_with_ancestors( best_finalized_hash, @@ -203,9 +208,8 @@ impl ExtraRequests { &mut self, request: ExtraRequest, result: Result, E>, - reschedule_on_failure: bool - ) -> bool - { + reschedule_on_failure: bool, + ) -> bool { if !self.importing_requests.remove(&request) { return false } @@ -217,14 +221,13 @@ impl ExtraRequests { self.pending_requests.push_front(request); } return true - } + }, }; if self.tree.finalize_root(&finalized_hash).is_none() { - warn!(target: "sync", "‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}", - finalized_hash, - finalized_number, - self.tree.roots().collect::>() + warn!(target: "sync", + "‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}", + finalized_hash, finalized_number, self.tree.roots().collect::>() ); return true } @@ -258,7 +261,7 @@ impl ExtraRequests { active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX), failed_requests: self.failed_requests.len().try_into().unwrap_or(std::u32::MAX), importing_requests: self.importing_requests.len().try_into().unwrap_or(std::u32::MAX), - _priv: () + _priv: (), } } } @@ -269,15 +272,12 @@ pub(crate) struct Matcher<'a, B: BlockT> { /// Length of pending requests collection. /// Used to ensure we do not loop more than once over all pending requests. remaining: usize, - extras: &'a mut ExtraRequests + extras: &'a mut ExtraRequests, } impl<'a, B: BlockT> Matcher<'a, B> { fn new(extras: &'a mut ExtraRequests) -> Self { - Matcher { - remaining: extras.pending_requests.len(), - extras - } + Self { remaining: extras.pending_requests.len(), extras } } /// Finds a peer to which a pending request can be sent. @@ -294,7 +294,10 @@ impl<'a, B: BlockT> Matcher<'a, B> { /// /// The returned `PeerId` (if any) is guaranteed to come from the given `peers` /// argument. - pub(crate) fn next(&mut self, peers: &HashMap>) -> Option<(PeerId, ExtraRequest)> { + pub(crate) fn next( + &mut self, + peers: &HashMap>, + ) -> Option<(PeerId, ExtraRequest)> { if self.remaining == 0 { return None } @@ -305,8 +308,11 @@ impl<'a, B: BlockT> Matcher<'a, B> { } while let Some(request) = self.extras.pending_requests.pop_front() { - for (peer, sync) in peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available) { - // only ask peers that have synced at least up to the block number that we're asking the extra for + for (peer, sync) in + peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available) + { + // only ask peers that have synced at least up to the block number that we're asking + // the extra for if sync.best_number < request.1 { continue } @@ -315,18 +321,23 @@ impl<'a, B: BlockT> Matcher<'a, B> { continue } // only ask if the same request has not failed for this peer before - if self.extras.failed_requests.get(&request).map(|rr| rr.iter().any(|i| &i.0 == peer)).unwrap_or(false) { + if self + .extras + .failed_requests + .get(&request) + .map(|rr| rr.iter().any(|i| &i.0 == peer)) + .unwrap_or(false) + { continue } self.extras.active_requests.insert(peer.clone(), request); - trace!(target: "sync", "Sending {} request to {:?} for {:?}", - self.extras.request_type_name, - peer, - request, + trace!(target: "sync", + "Sending {} request to {:?} for {:?}", + self.extras.request_type_name, peer, request, ); - return Some((peer.clone(), request)) + return Some((*peer, request)) } self.extras.pending_requests.push_back(request); @@ -343,23 +354,22 @@ impl<'a, B: BlockT> Matcher<'a, B> { #[cfg(test)] mod tests { + use super::*; use crate::protocol::sync::PeerSync; + use quickcheck::{Arbitrary, Gen, QuickCheck}; use sp_blockchain::Error as ClientError; - use quickcheck::{Arbitrary, Gen, QuickCheck, StdThreadGen}; - use rand::Rng; - use std::collections::{HashMap, HashSet}; - use super::*; use sp_test_primitives::{Block, BlockNumber, Hash}; + use std::collections::{HashMap, HashSet}; #[test] fn requests_are_processed_in_order() { fn property(mut peers: ArbitraryPeers) { let mut requests = ExtraRequests::::new("test"); - let num_peers_available = peers.0.values() - .filter(|s| s.state == PeerSyncState::Available).count(); + let num_peers_available = + peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); - for i in 0 .. num_peers_available { + for i in 0..num_peers_available { requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) } @@ -369,12 +379,12 @@ mod tests { for p in &pending { let (peer, r) = m.next(&peers.0).unwrap(); assert_eq!(p, &r); - peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); + peers.0.get_mut(&peer).unwrap().state = + PeerSyncState::DownloadingJustification(r.0); } } - QuickCheck::with_gen(StdThreadGen::new(19)) - .quickcheck(property as fn(ArbitraryPeers)) + QuickCheck::new().quickcheck(property as fn(ArbitraryPeers)) } #[test] @@ -399,22 +409,24 @@ mod tests { fn property(mut peers: ArbitraryPeers) -> bool { let mut requests = ExtraRequests::::new("test"); - let num_peers_available = peers.0.values() - .filter(|s| s.state == PeerSyncState::Available).count(); + let num_peers_available = + peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); - for i in 0 .. num_peers_available { + for i in 0..num_peers_available { requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) } let mut m = requests.matcher(); while let Some((peer, r)) = m.next(&peers.0) { - peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); + peers.0.get_mut(&peer).unwrap().state = + PeerSyncState::DownloadingJustification(r.0); } assert!(requests.pending_requests.is_empty()); let active_peers = requests.active_requests.keys().cloned().collect::>(); - let previously_active = requests.active_requests.values().cloned().collect::>(); + let previously_active = + requests.active_requests.values().cloned().collect::>(); for peer in &active_peers { requests.peer_disconnected(peer) @@ -425,8 +437,7 @@ mod tests { previously_active == requests.pending_requests.iter().cloned().collect::>() } - QuickCheck::with_gen(StdThreadGen::new(19)) - .quickcheck(property as fn(ArbitraryPeers) -> bool) + QuickCheck::new().quickcheck(property as fn(ArbitraryPeers) -> bool) } #[test] @@ -434,31 +445,44 @@ mod tests { fn property(mut peers: ArbitraryPeers) { let mut requests = ExtraRequests::::new("test"); - let num_peers_available = peers.0.values() - .filter(|s| s.state == PeerSyncState::Available).count(); + let num_peers_available = + peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); - for i in 0 .. num_peers_available { + for i in 0..num_peers_available { requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) } let mut m = requests.matcher(); while let Some((peer, r)) = m.next(&peers.0) { - peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); + peers.0.get_mut(&peer).unwrap().state = + PeerSyncState::DownloadingJustification(r.0); } - let active = requests.active_requests.iter().map(|(p, &r)| (p.clone(), r)).collect::>(); + let active = requests + .active_requests + .iter() + .map(|(p, &r)| (p.clone(), r)) + .collect::>(); for (peer, req) in &active { assert!(requests.failed_requests.get(req).is_none()); assert!(!requests.pending_requests.contains(req)); assert!(requests.on_response::<()>(peer.clone(), None).is_none()); assert!(requests.pending_requests.contains(req)); - assert_eq!(1, requests.failed_requests.get(req).unwrap().iter().filter(|(p, _)| p == peer).count()) + assert_eq!( + 1, + requests + .failed_requests + .get(req) + .unwrap() + .iter() + .filter(|(p, _)| p == peer) + .count() + ) } } - QuickCheck::with_gen(StdThreadGen::new(19)) - .quickcheck(property as fn(ArbitraryPeers)) + QuickCheck::new().quickcheck(property as fn(ArbitraryPeers)) } #[test] @@ -498,7 +522,10 @@ mod tests { finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash7, 7)), true); // ensure that there's no request for #6 - assert_eq!(finality_proofs.pending_requests.iter().collect::>(), Vec::<&(Hash, u64)>::new()); + assert_eq!( + finality_proofs.pending_requests.iter().collect::>(), + Vec::<&(Hash, u64)>::new() + ); } #[test] @@ -527,14 +554,13 @@ mod tests { struct ArbitraryPeerSyncState(PeerSyncState); impl Arbitrary for ArbitraryPeerSyncState { - fn arbitrary(g: &mut G) -> Self { - let s = match g.gen::() % 5 { + fn arbitrary(g: &mut Gen) -> Self { + let s = match u8::arbitrary(g) % 4 { 0 => PeerSyncState::Available, // TODO: 1 => PeerSyncState::AncestorSearch(g.gen(), AncestorSearchState), - 1 => PeerSyncState::DownloadingNew(g.gen::()), + 1 => PeerSyncState::DownloadingNew(BlockNumber::arbitrary(g)), 2 => PeerSyncState::DownloadingStale(Hash::random()), - 3 => PeerSyncState::DownloadingJustification(Hash::random()), - _ => PeerSyncState::DownloadingFinalityProof(Hash::random()) + _ => PeerSyncState::DownloadingJustification(Hash::random()), }; ArbitraryPeerSyncState(s) } @@ -544,13 +570,13 @@ mod tests { struct ArbitraryPeerSync(PeerSync); impl Arbitrary for ArbitraryPeerSync { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let ps = PeerSync { - common_number: g.gen(), + peer_id: PeerId::random(), + common_number: u64::arbitrary(g), best_hash: Hash::random(), - best_number: g.gen(), + best_number: u64::arbitrary(g), state: ArbitraryPeerSyncState::arbitrary(g).0, - recently_announced: Default::default() }; ArbitraryPeerSync(ps) } @@ -560,13 +586,13 @@ mod tests { struct ArbitraryPeers(HashMap>); impl Arbitrary for ArbitraryPeers { - fn arbitrary(g: &mut G) -> Self { + fn arbitrary(g: &mut Gen) -> Self { let mut peers = HashMap::with_capacity(g.size()); - for _ in 0 .. g.size() { - peers.insert(PeerId::random(), ArbitraryPeerSync::arbitrary(g).0); + for _ in 0..g.size() { + let ps = ArbitraryPeerSync::arbitrary(g).0; + peers.insert(ps.peer_id, ps); } ArbitraryPeers(peers) } } - } diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs new file mode 100644 index 0000000000000..d2e4463f98912 --- /dev/null +++ b/client/network/src/protocol/sync/state.rs @@ -0,0 +1,170 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::StateDownloadProgress; +use crate::{ + chain::{Client, ImportedState}, + schema::v1::{StateEntry, StateRequest, StateResponse}, +}; +use codec::{Decode, Encode}; +use log::debug; +use sc_client_api::StorageProof; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; +use std::sync::Arc; + +/// State sync support. + +/// State sync state machine. Accumulates partial state data until it +/// is ready to be imported. +pub struct StateSync { + target_block: B::Hash, + target_header: B::Header, + target_root: B::Hash, + last_key: Vec, + state: Vec<(Vec, Vec)>, + complete: bool, + client: Arc>, + imported_bytes: u64, + skip_proof: bool, +} + +/// Import state chunk result. +pub enum ImportResult { + /// State is complete and ready for import. + Import(B::Hash, B::Header, ImportedState), + /// Continue dowloading. + Continue(StateRequest), + /// Bad state chunk. + BadResponse, +} + +impl StateSync { + /// Create a new instance. + pub fn new(client: Arc>, target: B::Header, skip_proof: bool) -> Self { + Self { + client, + target_block: target.hash(), + target_root: target.state_root().clone(), + target_header: target, + last_key: Vec::default(), + state: Vec::default(), + complete: false, + imported_bytes: 0, + skip_proof, + } + } + + /// Validate and import a state reponse. + pub fn import(&mut self, response: StateResponse) -> ImportResult { + if response.entries.is_empty() && response.proof.is_empty() && !response.complete { + debug!(target: "sync", "Bad state response"); + return ImportResult::BadResponse + } + if !self.skip_proof && response.proof.is_empty() { + debug!(target: "sync", "Missing proof"); + return ImportResult::BadResponse + } + let complete = if !self.skip_proof { + debug!(target: "sync", "Importing state from {} trie nodes", response.proof.len()); + let proof_size = response.proof.len() as u64; + let proof = match StorageProof::decode(&mut response.proof.as_ref()) { + Ok(proof) => proof, + Err(e) => { + debug!(target: "sync", "Error decoding proof: {:?}", e); + return ImportResult::BadResponse + }, + }; + let (values, complete) = + match self.client.verify_range_proof(self.target_root, proof, &self.last_key) { + Err(e) => { + debug!(target: "sync", "StateResponse failed proof verification: {:?}", e); + return ImportResult::BadResponse + }, + Ok(values) => values, + }; + debug!(target: "sync", "Imported with {} keys", values.len()); + + if let Some(last) = values.last().map(|(k, _)| k) { + self.last_key = last.clone(); + } + + for (key, value) in values { + self.imported_bytes += key.len() as u64; + self.state.push((key, value)) + } + self.imported_bytes += proof_size; + complete + } else { + debug!( + target: "sync", + "Importing state from {:?} to {:?}", + response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + ); + + if let Some(e) = response.entries.last() { + self.last_key = e.key.clone(); + } + for StateEntry { key, value } in response.entries { + self.imported_bytes += (key.len() + value.len()) as u64; + self.state.push((key, value)) + } + response.complete + }; + if complete { + self.complete = true; + ImportResult::Import( + self.target_block, + self.target_header.clone(), + ImportedState { block: self.target_block, state: std::mem::take(&mut self.state) }, + ) + } else { + ImportResult::Continue(self.next_request()) + } + } + + /// Produce next state request. + pub fn next_request(&self) -> StateRequest { + StateRequest { + block: self.target_block.encode(), + start: self.last_key.clone(), + no_proof: self.skip_proof, + } + } + + /// Check if the state is complete. + pub fn is_complete(&self) -> bool { + self.complete + } + + /// Returns target block number. + pub fn target_block_num(&self) -> NumberFor { + *self.target_header.number() + } + + /// Returns target block hash. + pub fn target(&self) -> B::Hash { + self.target_block + } + + /// Returns state sync estimated progress. + pub fn progress(&self) -> StateDownloadProgress { + let percent_done = (*self.last_key.get(0).unwrap_or(&0u8) as u32) * 100 / 256; + StateDownloadProgress { percentage: percent_done, size: self.imported_bytes } + } +} diff --git a/client/network/src/protocol/sync/warp.rs b/client/network/src/protocol/sync/warp.rs new file mode 100644 index 0000000000000..32bd5cb9ed798 --- /dev/null +++ b/client/network/src/protocol/sync/warp.rs @@ -0,0 +1,180 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +///! Warp sync support. +pub use super::state::ImportResult; +use super::state::StateSync; +pub use crate::warp_request_handler::{ + EncodedProof, Request as WarpProofRequest, VerificationResult, WarpSyncProvider, +}; +use crate::{ + chain::Client, + schema::v1::{StateRequest, StateResponse}, + WarpSyncPhase, WarpSyncProgress, +}; +use sp_finality_grandpa::{AuthorityList, SetId}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use std::sync::Arc; + +enum Phase { + WarpProof { set_id: SetId, authorities: AuthorityList, last_hash: B::Hash }, + State(StateSync), +} + +/// Import warp proof result. +pub enum WarpProofImportResult { + /// Start downloading state data. + StateRequest(StateRequest), + /// Continue dowloading warp sync proofs. + WarpProofRequest(WarpProofRequest), + /// Bad proof. + BadResponse, +} + +/// Warp sync state machine. Accumulates warp proofs and state. +pub struct WarpSync { + phase: Phase, + client: Arc>, + warp_sync_provider: Arc>, + total_proof_bytes: u64, +} + +impl WarpSync { + /// Create a new instance. + pub fn new( + client: Arc>, + warp_sync_provider: Arc>, + ) -> Self { + let last_hash = client.hash(Zero::zero()).unwrap().expect("Genesis header always exists"); + let phase = Phase::WarpProof { + set_id: 0, + authorities: warp_sync_provider.current_authorities(), + last_hash, + }; + Self { client, warp_sync_provider, phase, total_proof_bytes: 0 } + } + + /// Validate and import a state reponse. + pub fn import_state(&mut self, response: StateResponse) -> ImportResult { + match &mut self.phase { + Phase::WarpProof { .. } => { + log::debug!(target: "sync", "Unexpected state response"); + return ImportResult::BadResponse + }, + Phase::State(sync) => sync.import(response), + } + } + + /// Validate and import a warp proof reponse. + pub fn import_warp_proof(&mut self, response: EncodedProof) -> WarpProofImportResult { + match &mut self.phase { + Phase::State(_) => { + log::debug!(target: "sync", "Unexpected warp proof response"); + WarpProofImportResult::BadResponse + }, + Phase::WarpProof { set_id, authorities, last_hash } => { + match self.warp_sync_provider.verify( + &response, + *set_id, + std::mem::take(authorities), + ) { + Err(e) => { + log::debug!(target: "sync", "Bad warp proof response: {:?}", e); + return WarpProofImportResult::BadResponse + }, + Ok(VerificationResult::Partial(new_set_id, new_authorities, new_last_hash)) => { + log::debug!(target: "sync", "Verified partial proof, set_id={:?}", new_set_id); + *set_id = new_set_id; + *authorities = new_authorities; + *last_hash = new_last_hash.clone(); + self.total_proof_bytes += response.0.len() as u64; + WarpProofImportResult::WarpProofRequest(WarpProofRequest { + begin: new_last_hash, + }) + }, + Ok(VerificationResult::Complete(new_set_id, _, header)) => { + log::debug!(target: "sync", "Verified complete proof, set_id={:?}", new_set_id); + self.total_proof_bytes += response.0.len() as u64; + let state_sync = StateSync::new(self.client.clone(), header, false); + let request = state_sync.next_request(); + self.phase = Phase::State(state_sync); + WarpProofImportResult::StateRequest(request) + }, + } + }, + } + } + + /// Produce next state request. + pub fn next_state_request(&self) -> Option { + match &self.phase { + Phase::WarpProof { .. } => None, + Phase::State(sync) => Some(sync.next_request()), + } + } + + /// Produce next warp proof request. + pub fn next_warp_poof_request(&self) -> Option> { + match &self.phase { + Phase::State(_) => None, + Phase::WarpProof { last_hash, .. } => Some(WarpProofRequest { begin: *last_hash }), + } + } + + /// Return target block hash if it is known. + pub fn target_block_hash(&self) -> Option { + match &self.phase { + Phase::State(s) => Some(s.target()), + Phase::WarpProof { .. } => None, + } + } + + /// Return target block number if it is known. + pub fn target_block_number(&self) -> Option> { + match &self.phase { + Phase::State(s) => Some(s.target_block_num()), + Phase::WarpProof { .. } => None, + } + } + + /// Check if the state is complete. + pub fn is_complete(&self) -> bool { + match &self.phase { + Phase::WarpProof { .. } => false, + Phase::State(sync) => sync.is_complete(), + } + } + + /// Returns state sync estimated progress (percentage, bytes) + pub fn progress(&self) -> WarpSyncProgress { + match &self.phase { + Phase::WarpProof { .. } => WarpSyncProgress { + phase: WarpSyncPhase::DownloadingWarpProofs, + total_bytes: self.total_proof_bytes, + }, + Phase::State(sync) => WarpSyncProgress { + phase: if self.is_complete() { + WarpSyncPhase::ImportingState + } else { + WarpSyncPhase::DownloadingState + }, + total_bytes: self.total_proof_bytes + sync.progress().size, + }, + } + } +} diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 5141e6db70141..6ebc7416c2a35 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Collection of request-response protocols. //! @@ -31,17 +33,20 @@ //! //! - If provided, a ["requests processing"](ProtocolConfig::inbound_queue) channel //! is used to handle incoming requests. -//! -use futures::{channel::{mpsc, oneshot}, prelude::*}; +use crate::ReputationChange; +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, +}; use libp2p::{ core::{ connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, }, request_response::{ - RequestResponse, RequestResponseCodec, RequestResponseConfig, RequestResponseEvent, - RequestResponseMessage, ResponseChannel, ProtocolSupport + ProtocolSupport, RequestResponse, RequestResponseCodec, RequestResponseConfig, + RequestResponseEvent, RequestResponseMessage, ResponseChannel, }, swarm::{ protocols_handler::multi::MultiHandler, NetworkBehaviour, NetworkBehaviourAction, @@ -49,8 +54,13 @@ use libp2p::{ }, }; use std::{ - borrow::Cow, collections::{hash_map::Entry, HashMap}, convert::TryFrom as _, io, iter, - pin::Pin, task::{Context, Poll}, time::Duration, + borrow::Cow, + collections::{hash_map::Entry, HashMap}, + convert::TryFrom as _, + io, iter, + pin::Pin, + task::{Context, Poll}, + time::{Duration, Instant}, }; pub use libp2p::request_response::{InboundFailure, OutboundFailure, RequestId}; @@ -58,47 +68,47 @@ pub use libp2p::request_response::{InboundFailure, OutboundFailure, RequestId}; /// Configuration for a single request-response protocol. #[derive(Debug, Clone)] pub struct ProtocolConfig { - /// Name of the protocol on the wire. Should be something like `/foo/bar`. - pub name: Cow<'static, str>, - - /// Maximum allowed size, in bytes, of a request. - /// - /// Any request larger than this value will be declined as a way to avoid allocating too - /// much memory for it. - pub max_request_size: u64, - - /// Maximum allowed size, in bytes, of a response. - /// - /// Any response larger than this value will be declined as a way to avoid allocating too - /// much memory for it. - pub max_response_size: u64, - - /// Duration after which emitted requests are considered timed out. - /// - /// If you expect the response to come back quickly, you should set this to a smaller duration. - pub request_timeout: Duration, - - /// Channel on which the networking service will send incoming requests. - /// - /// Every time a peer sends a request to the local node using this protocol, the networking - /// service will push an element on this channel. The receiving side of this channel then has - /// to pull this element, process the request, and send back the response to send back to the - /// peer. - /// - /// The size of the channel has to be carefully chosen. If the channel is full, the networking - /// service will discard the incoming request send back an error to the peer. Consequently, - /// the channel being full is an indicator that the node is overloaded. - /// - /// You can typically set the size of the channel to `T / d`, where `T` is the - /// `request_timeout` and `d` is the expected average duration of CPU and I/O it takes to - /// build a response. - /// - /// Can be `None` if the local node does not support answering incoming requests. - /// If this is `None`, then the local node will not advertise support for this protocol towards - /// other peers. If this is `Some` but the channel is closed, then the local node will - /// advertise support for this protocol, but any incoming request will lead to an error being - /// sent back. - pub inbound_queue: Option>, + /// Name of the protocol on the wire. Should be something like `/foo/bar`. + pub name: Cow<'static, str>, + + /// Maximum allowed size, in bytes, of a request. + /// + /// Any request larger than this value will be declined as a way to avoid allocating too + /// much memory for it. + pub max_request_size: u64, + + /// Maximum allowed size, in bytes, of a response. + /// + /// Any response larger than this value will be declined as a way to avoid allocating too + /// much memory for it. + pub max_response_size: u64, + + /// Duration after which emitted requests are considered timed out. + /// + /// If you expect the response to come back quickly, you should set this to a smaller duration. + pub request_timeout: Duration, + + /// Channel on which the networking service will send incoming requests. + /// + /// Every time a peer sends a request to the local node using this protocol, the networking + /// service will push an element on this channel. The receiving side of this channel then has + /// to pull this element, process the request, and send back the response to send back to the + /// peer. + /// + /// The size of the channel has to be carefully chosen. If the channel is full, the networking + /// service will discard the incoming request send back an error to the peer. Consequently, + /// the channel being full is an indicator that the node is overloaded. + /// + /// You can typically set the size of the channel to `T / d`, where `T` is the + /// `request_timeout` and `d` is the expected average duration of CPU and I/O it takes to + /// build a response. + /// + /// Can be `None` if the local node does not support answering incoming requests. + /// If this is `None`, then the local node will not advertise support for this protocol towards + /// other peers. If this is `Some` but the channel is closed, then the local node will + /// advertise support for this protocol, but any incoming request will lead to an error being + /// sent back. + pub inbound_queue: Option>, } /// A single request received by a peer on a request-response protocol. @@ -111,8 +121,38 @@ pub struct IncomingRequest { /// [`ProtocolConfig::max_request_size`]. pub payload: Vec, - /// Channel to send back the response to. - pub pending_response: oneshot::Sender>, + /// Channel to send back the response. + /// + /// There are two ways to indicate that handling the request failed: + /// + /// 1. Drop `pending_response` and thus not changing the reputation of the peer. + /// + /// 2. Sending an `Err(())` via `pending_response`, optionally including reputation changes for + /// the given peer. + pub pending_response: oneshot::Sender, +} + +/// Response for an incoming request to be send by a request protocol handler. +#[derive(Debug)] +pub struct OutgoingResponse { + /// The payload of the response. + /// + /// `Err(())` if none is available e.g. due an error while handling the request. + pub result: Result, ()>, + + /// Reputation changes accrued while handling the request. To be applied to the reputation of + /// the peer sending the request. + pub reputation_changes: Vec, + + /// If provided, the `oneshot::Sender` will be notified when the request has been sent to the + /// peer. + /// + /// > **Note**: Operating systems typically maintain a buffer of a few dozen kilobytes of + /// > outgoing data for each TCP socket, and it is not possible for a user + /// > application to inspect this buffer. This channel here is not actually notified + /// > when the response has been fully sent out, but rather when it has fully been + /// > written to the buffer managed by the operating system. + pub sent_feedback: Option>, } /// Event generated by the [`RequestResponsesBehaviour`]. @@ -126,19 +166,68 @@ pub enum Event { peer: PeerId, /// Name of the protocol in question. protocol: Cow<'static, str>, - /// If `Ok`, contains the time elapsed between when we received the request and when we - /// sent back the response. If `Err`, the error that happened. + /// Whether handling the request was successful or unsuccessful. + /// + /// When successful contains the time elapsed between when we received the request and when + /// we sent back the response. When unsuccessful contains the failure reason. result: Result, }, /// A request initiated using [`RequestResponsesBehaviour::send_request`] has succeeded or /// failed. + /// + /// This event is generated for statistics purposes. RequestFinished { - /// Request that has succeeded. - request_id: RequestId, - /// Response sent by the remote or reason for failure. - result: Result, RequestFailure>, + /// Peer that we send a request to. + peer: PeerId, + /// Name of the protocol in question. + protocol: Cow<'static, str>, + /// Duration the request took. + duration: Duration, + /// Result of the request. + result: Result<(), RequestFailure>, }, + + /// A request protocol handler issued reputation changes for the given peer. + ReputationChanges { peer: PeerId, changes: Vec }, +} + +/// Combination of a protocol name and a request id. +/// +/// Uniquely identifies an inbound or outbound request among all handled protocols. Note however +/// that uniqueness is only guaranteed between two inbound and likewise between two outbound +/// requests. There is no uniqueness guarantee in a set of both inbound and outbound +/// [`ProtocolRequestId`]s. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +struct ProtocolRequestId { + protocol: Cow<'static, str>, + request_id: RequestId, +} + +impl From<(Cow<'static, str>, RequestId)> for ProtocolRequestId { + fn from((protocol, request_id): (Cow<'static, str>, RequestId)) -> Self { + Self { protocol, request_id } + } +} + +/// When sending a request, what to do on a disconnected recipient. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum IfDisconnected { + /// Try to connect to the peer. + TryConnect, + /// Just fail if the destination is not yet connected. + ImmediateError, +} + +/// Convenience functions for `IfDisconnected`. +impl IfDisconnected { + /// Shall we connect to a disconnected peer? + pub fn should_connect(self) -> bool { + match self { + Self::TryConnect => true, + Self::ImmediateError => false, + } + } } /// Implementation of `NetworkBehaviour` that provides support for request-response protocols. @@ -148,27 +237,34 @@ pub struct RequestResponsesBehaviour { /// "response builder" used to build responses for incoming requests. protocols: HashMap< Cow<'static, str>, - (RequestResponse, Option>) + (RequestResponse, Option>), >, + /// Pending requests, passed down to a [`RequestResponse`] behaviour, awaiting a reply. + pending_requests: + HashMap, RequestFailure>>)>, + /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the - /// response to send back to the remote. + /// start time and the response to send back to the remote. pending_responses: stream::FuturesUnordered< - Pin + Send>> + Pin> + Send>>, >, + + /// Whenever an incoming request arrives, the arrival [`Instant`] is recorded here. + pending_responses_arrival_time: HashMap, + + /// Whenever a response is received on `pending_responses`, insert a channel to be notified + /// when the request has been sent out. + send_feedback: HashMap>, } /// Generated by the response builder and waiting to be processed. -enum RequestProcessingOutcome { - Response { - protocol: Cow<'static, str>, - inner_channel: ResponseChannel, ()>>, - response: Vec, - }, - Busy { - peer: PeerId, - protocol: Cow<'static, str>, - }, +struct RequestProcessingOutcome { + peer: PeerId, + request_id: RequestId, + protocol: Cow<'static, str>, + inner_channel: ResponseChannel, ()>>, + response: OutgoingResponse, } impl RequestResponsesBehaviour { @@ -187,57 +283,90 @@ impl RequestResponsesBehaviour { ProtocolSupport::Outbound }; - let rq_rp = RequestResponse::new(GenericCodec { - max_request_size: protocol.max_request_size, - max_response_size: protocol.max_response_size, - }, iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), cfg); + let rq_rp = RequestResponse::new( + GenericCodec { + max_request_size: protocol.max_request_size, + max_response_size: protocol.max_response_size, + }, + iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), + cfg, + ); match protocols.entry(protocol.name) { Entry::Vacant(e) => e.insert((rq_rp, protocol.inbound_queue)), - Entry::Occupied(e) => - return Err(RegisterError::DuplicateProtocol(e.key().clone())), + Entry::Occupied(e) => return Err(RegisterError::DuplicateProtocol(e.key().clone())), }; } Ok(Self { protocols, - pending_responses: stream::FuturesUnordered::new(), + pending_requests: Default::default(), + pending_responses: Default::default(), + pending_responses_arrival_time: Default::default(), + send_feedback: Default::default(), }) } /// Initiates sending a request. /// - /// An error is returned if we are not connected to the target peer or if the protocol doesn't - /// match one that has been registered. - pub fn send_request(&mut self, target: &PeerId, protocol: &str, request: Vec) - -> Result - { - if let Some((protocol, _)) = self.protocols.get_mut(protocol) { - if protocol.is_connected(target) { - Ok(protocol.send_request(target, request)) + /// If there is no established connection to the target peer, the behavior is determined by the + /// choice of `connect`. + /// + /// An error is returned if the protocol doesn't match one that has been registered. + pub fn send_request( + &mut self, + target: &PeerId, + protocol_name: &str, + request: Vec, + pending_response: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ) { + if let Some((protocol, _)) = self.protocols.get_mut(protocol_name) { + if protocol.is_connected(target) || connect.should_connect() { + let request_id = protocol.send_request(target, request); + let prev_req_id = self.pending_requests.insert( + (protocol_name.to_string().into(), request_id).into(), + (Instant::now(), pending_response), + ); + debug_assert!(prev_req_id.is_none(), "Expect request id to be unique."); } else { - Err(SendRequestError::NotConnected) + if pending_response.send(Err(RequestFailure::NotConnected)).is_err() { + log::debug!( + target: "sub-libp2p", + "Not connected to peer {:?}. At the same time local \ + node is no longer interested in the result.", + target, + ); + }; } } else { - Err(SendRequestError::UnknownProtocol) + if pending_response.send(Err(RequestFailure::UnknownProtocol)).is_err() { + log::debug!( + target: "sub-libp2p", + "Unknown protocol {:?}. At the same time local \ + node is no longer interested in the result.", + protocol_name, + ); + }; } } } impl NetworkBehaviour for RequestResponsesBehaviour { - type ProtocolsHandler = MultiHandler< - String, - as NetworkBehaviour>::ProtocolsHandler, - >; + type ProtocolsHandler = + MultiHandler as NetworkBehaviour>::ProtocolsHandler>; type OutEvent = Event; fn new_handler(&mut self) -> Self::ProtocolsHandler { - let iter = self.protocols.iter_mut() + let iter = self + .protocols + .iter_mut() .map(|(p, (r, _))| (p.to_string(), NetworkBehaviour::new_handler(r))); - MultiHandler::try_from_iter(iter) - .expect("Protocols are in a HashMap and there can be at most one handler per \ - protocol name, which is the only possible error; qed") + MultiHandler::try_from_iter(iter).expect( + "Protocols are in a HashMap and there can be at most one handler per protocol name, \ + which is the only possible error; qed", + ) } fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { @@ -261,7 +390,12 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { for (p, _) in self.protocols.values_mut() { NetworkBehaviour::inject_connection_closed(p, peer_id, conn, endpoint) } @@ -277,7 +411,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { &mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, - error: &dyn std::error::Error + error: &dyn std::error::Error, ) { for (p, _) in self.protocols.values_mut() { NetworkBehaviour::inject_addr_reach_failure(p, peer_id, addr, error) @@ -305,9 +439,15 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_expired_external_addr(p, addr) + } + } + + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::inject_expired_listen_addr(p, addr) + NetworkBehaviour::inject_expired_listen_addr(p, id, addr) } } @@ -317,9 +457,15 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + fn inject_new_listener(&mut self, id: ListenerId) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_new_listener(p, id) + } + } + + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::inject_new_listen_addr(p, addr) + NetworkBehaviour::inject_new_listen_addr(p, id, addr) } } @@ -347,23 +493,45 @@ impl NetworkBehaviour for RequestResponsesBehaviour { > { 'poll_all: loop { // Poll to see if any response is ready to be sent back. - while let Poll::Ready(Some(result)) = self.pending_responses.poll_next_unpin(cx) { - match result { - RequestProcessingOutcome::Response { - protocol, inner_channel, response - } => { - if let Some((protocol, _)) = self.protocols.get_mut(&*protocol) { - protocol.send_response(inner_channel, Ok(response)); + while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { + let RequestProcessingOutcome { + peer, + request_id, + protocol: protocol_name, + inner_channel, + response: OutgoingResponse { result, reputation_changes, sent_feedback }, + } = match outcome { + Some(outcome) => outcome, + // The response builder was too busy or handling the request failed. This is + // later on reported as a `InboundFailure::Omission`. + None => continue, + }; + + if let Ok(payload) = result { + if let Some((protocol, _)) = self.protocols.get_mut(&*protocol_name) { + if let Err(_) = protocol.send_response(inner_channel, Ok(payload)) { + // Note: Failure is handled further below when receiving + // `InboundFailure` event from `RequestResponse` behaviour. + log::debug!( + target: "sub-libp2p", + "Failed to send response for {:?} on protocol {:?} due to a \ + timeout or due to the connection to the peer being closed. \ + Dropping response", + request_id, protocol_name, + ); + } else { + if let Some(sent_feedback) = sent_feedback { + self.send_feedback + .insert((protocol_name, request_id).into(), sent_feedback); + } } } - RequestProcessingOutcome::Busy { peer, protocol } => { - let out = Event::InboundRequest { - peer, - protocol, - result: Err(ResponseFailure::Busy), - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); - } + } + + if !reputation_changes.is_empty() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent( + Event::ReputationChanges { peer, changes: reputation_changes }, + )) } } @@ -377,115 +545,215 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Other events generated by the underlying behaviour are transparently // passed through. NetworkBehaviourAction::DialAddress { address } => { - log::error!("The request-response isn't supposed to start dialing peers"); + log::error!( + "The request-response isn't supposed to start dialing peers" + ); return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) - } - NetworkBehaviourAction::DialPeer { peer_id, condition } => { - log::error!("The request-response isn't supposed to start dialing peers"); + }, + NetworkBehaviourAction::DialPeer { peer_id, condition } => return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition, - }) - } - NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event, - } => { + }), + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event: ((*protocol).to_string(), event), - }) - } - NetworkBehaviourAction::ReportObservedAddr { address } => { + }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, - }) - } + score, + }), + NetworkBehaviourAction::CloseConnection { peer_id, connection } => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { + peer_id, + connection, + }), }; match ev { // Received a request from a remote. RequestResponseEvent::Message { peer, - message: RequestResponseMessage::Request { request, channel, .. }, + message: + RequestResponseMessage::Request { request_id, request, channel, .. }, } => { + self.pending_responses_arrival_time.insert( + (protocol.clone(), request_id.clone()).into(), + Instant::now(), + ); + let (tx, rx) = oneshot::channel(); // Submit the request to the "response builder" passed by the user at // initialization. if let Some(resp_builder) = resp_builder { - // If the response builder is too busy, silently drop `tx`. - // This will be reported as a `Busy` error. + // If the response builder is too busy, silently drop `tx`. This + // will be reported by the corresponding `RequestResponse` through + // an `InboundFailure::Omission` event. let _ = resp_builder.try_send(IncomingRequest { peer: peer.clone(), payload: request, pending_response: tx, }); + } else { + debug_assert!(false, "Received message on outbound-only protocol."); } let protocol = protocol.clone(); self.pending_responses.push(Box::pin(async move { // The `tx` created above can be dropped if we are not capable of - // processing this request, which is reflected as a "Busy" error. + // processing this request, which is reflected as a + // `InboundFailure::Omission` event. if let Ok(response) = rx.await { - RequestProcessingOutcome::Response { - protocol, inner_channel: channel, response - } + Some(RequestProcessingOutcome { + peer, + request_id, + protocol, + inner_channel: channel, + response, + }) } else { - RequestProcessingOutcome::Busy { peer, protocol } + None } })); // This `continue` makes sure that `pending_responses` gets polled // after we have added the new element. - continue 'poll_all; - } + continue 'poll_all + }, // Received a response from a remote to one of our requests. RequestResponseEvent::Message { - message: - RequestResponseMessage::Response { - request_id, - response, - }, + peer, + message: RequestResponseMessage::Response { request_id, response }, .. } => { + let (started, delivered) = match self + .pending_requests + .remove(&(protocol.clone(), request_id).into()) + { + Some((started, pending_response)) => { + let delivered = pending_response + .send(response.map_err(|()| RequestFailure::Refused)) + .map_err(|_| RequestFailure::Obsolete); + (started, delivered) + }, + None => { + log::warn!( + target: "sub-libp2p", + "Received `RequestResponseEvent::Message` with unexpected request id {:?}", + request_id, + ); + debug_assert!(false); + continue + }, + }; + let out = Event::RequestFinished { - request_id, - result: response.map_err(|()| RequestFailure::Refused), + peer, + protocol: protocol.clone(), + duration: started.elapsed(), + result: delivered, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); - } + + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + }, // One of our requests has failed. RequestResponseEvent::OutboundFailure { - request_id, - error, - .. + peer, request_id, error, .. } => { + let started = match self + .pending_requests + .remove(&(protocol.clone(), request_id).into()) + { + Some((started, pending_response)) => { + if pending_response + .send(Err(RequestFailure::Network(error.clone()))) + .is_err() + { + log::debug!( + target: "sub-libp2p", + "Request with id {:?} failed. At the same time local \ + node is no longer interested in the result.", + request_id, + ); + } + started + }, + None => { + log::warn!( + target: "sub-libp2p", + "Received `RequestResponseEvent::Message` with unexpected request id {:?}", + request_id, + ); + debug_assert!(false); + continue + }, + }; + let out = Event::RequestFinished { - request_id, + peer, + protocol: protocol.clone(), + duration: started.elapsed(), result: Err(RequestFailure::Network(error)), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); - } - // Remote has tried to send a request but failed. - RequestResponseEvent::InboundFailure { peer, error, .. } => { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + }, + + // An inbound request failed, either while reading the request or due to + // failing to send a response. + RequestResponseEvent::InboundFailure { + request_id, peer, error, .. + } => { + self.pending_responses_arrival_time + .remove(&(protocol.clone(), request_id).into()); + self.send_feedback.remove(&(protocol.clone(), request_id).into()); let out = Event::InboundRequest { peer, protocol: protocol.clone(), result: Err(ResponseFailure::Network(error)), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + }, + + // A response to an inbound request has been sent. + RequestResponseEvent::ResponseSent { request_id, peer } => { + let arrival_time = self + .pending_responses_arrival_time + .remove(&(protocol.clone(), request_id).into()) + .map(|t| t.elapsed()) + .expect( + "Time is added for each inbound request on arrival and only \ + removed on success (`ResponseSent`) or failure \ + (`InboundFailure`). One can not receive a success event for a \ + request that either never arrived, or that has previously \ + failed; qed.", + ); + + if let Some(send_feedback) = + self.send_feedback.remove(&(protocol.clone(), request_id).into()) + { + let _ = send_feedback.send(()); + } + + let out = Event::InboundRequest { + peer, + protocol: protocol.clone(), + result: Ok(arrival_time), + }; + + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + }, }; } } - break Poll::Pending; + break Poll::Pending } } } @@ -497,40 +765,35 @@ pub enum RegisterError { DuplicateProtocol(#[error(ignore)] Cow<'static, str>), } -/// Error when sending a request. +/// Error in a request. #[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum SendRequestError { +pub enum RequestFailure { /// We are not currently connected to the requested peer. NotConnected, /// Given protocol hasn't been registered. UnknownProtocol, -} - -/// Error in a request. -#[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum RequestFailure { /// Remote has closed the substream before answering, thereby signaling that it considers the /// request as valid, but refused to answer it. Refused, + /// The remote replied, but the local node is no longer interested in the response. + Obsolete, /// Problem on the network. - #[display(fmt = "Problem on the network")] - Network(#[error(ignore)] OutboundFailure), + #[display(fmt = "Problem on the network: {}", _0)] + Network(OutboundFailure), } /// Error when processing a request sent by a remote. #[derive(Debug, derive_more::Display, derive_more::Error)] pub enum ResponseFailure { - /// Internal response builder is too busy to process this request. - Busy, /// Problem on the network. - #[display(fmt = "Problem on the network")] - Network(#[error(ignore)] InboundFailure), + #[display(fmt = "Problem on the network: {}", _0)] + Network(InboundFailure), } /// Implements the libp2p [`RequestResponseCodec`] trait. Defines how streams of bytes are turned /// into requests and responses and vice-versa. #[derive(Debug, Clone)] -#[doc(hidden)] // Needs to be public in order to satisfy the Rust compiler. +#[doc(hidden)] // Needs to be public in order to satisfy the Rust compiler. pub struct GenericCodec { max_request_size: u64, max_response_size: u64, @@ -551,13 +814,14 @@ impl RequestResponseCodec for GenericCodec { T: AsyncRead + Unpin + Send, { // Read the length. - let length = unsigned_varint::aio::read_usize(&mut io).await + let length = unsigned_varint::aio::read_usize(&mut io) + .await .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; - if length > usize::try_from(self.max_request_size).unwrap_or(usize::max_value()) { + if length > usize::try_from(self.max_request_size).unwrap_or(usize::MAX) { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("Request size exceeds limit: {} > {}", length, self.max_request_size) - )); + format!("Request size exceeds limit: {} > {}", length, self.max_request_size), + )) } // Read the payload. @@ -584,17 +848,15 @@ impl RequestResponseCodec for GenericCodec { Ok(l) => l, Err(unsigned_varint::io::ReadError::Io(err)) if matches!(err.kind(), io::ErrorKind::UnexpectedEof) => - { - return Ok(Err(())); - } + return Ok(Err(())), Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidInput, err)), }; - if length > usize::try_from(self.max_response_size).unwrap_or(usize::max_value()) { + if length > usize::try_from(self.max_response_size).unwrap_or(usize::MAX) { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("Response size exceeds limit: {} > {}", length, self.max_response_size) - )); + format!("Response size exceeds limit: {} > {}", length, self.max_response_size), + )) } // Read the payload. @@ -655,59 +917,86 @@ impl RequestResponseCodec for GenericCodec { #[cfg(test)] mod tests { - use futures::{channel::mpsc, prelude::*}; - use libp2p::identity::Keypair; - use libp2p::Multiaddr; - use libp2p::core::upgrade; - use libp2p::core::transport::{Transport, MemoryTransport}; - use libp2p::noise; - use libp2p::swarm::{Swarm, SwarmEvent}; + use super::*; + + use futures::{ + channel::{mpsc, oneshot}, + executor::LocalPool, + task::Spawn, + }; + use libp2p::{ + core::{ + transport::{MemoryTransport, Transport}, + upgrade, + }, + identity::Keypair, + noise, + swarm::{Swarm, SwarmEvent}, + Multiaddr, + }; use std::{iter, time::Duration}; + fn build_swarm( + list: impl Iterator, + ) -> (Swarm, Multiaddr) { + let keypair = Keypair::generate_ed25519(); + + let noise_keys = + noise::Keypair::::new().into_authentic(&keypair).unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(libp2p::yamux::YamuxConfig::default()) + .boxed(); + + let behaviour = RequestResponsesBehaviour::new(list).unwrap(); + + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + swarm.listen_on(listen_addr.clone()).unwrap(); + (swarm, listen_addr) + } + #[test] fn basic_request_response_works() { - let protocol_name = "/test/req-rep/1"; + let protocol_name = "/test/req-resp/1"; + let mut pool = LocalPool::new(); // Build swarms whose behaviour is `RequestResponsesBehaviour`. let mut swarms = (0..2) .map(|_| { - let keypair = Keypair::generate_ed25519(); - - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .unwrap(); - - let transport = MemoryTransport - .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(libp2p::yamux::Config::default()); - - let behaviour = { - let (tx, mut rx) = mpsc::channel(64); - - let b = super::RequestResponsesBehaviour::new(iter::once(super::ProtocolConfig { - name: From::from(protocol_name), - max_request_size: 1024, - max_response_size: 1024 * 1024, - request_timeout: Duration::from_secs(30), - inbound_queue: Some(tx), - })).unwrap(); - - async_std::task::spawn(async move { - while let Some(rq) = rx.next().await { - assert_eq!(rq.payload, b"this is a request"); - let _ = rq.pending_response.send(b"this is a response".to_vec()); + let (tx, mut rx) = mpsc::channel::(64); + + pool.spawner() + .spawn_obj( + async move { + while let Some(rq) = rx.next().await { + let (fb_tx, fb_rx) = oneshot::channel(); + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(super::OutgoingResponse { + result: Ok(b"this is a response".to_vec()), + reputation_changes: Vec::new(), + sent_feedback: Some(fb_tx), + }); + fb_rx.await.unwrap(); + } } - }); + .boxed() + .into(), + ) + .unwrap(); - b + let protocol_config = ProtocolConfig { + name: From::from(protocol_name), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx), }; - let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); - let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - - Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); - (swarm, listen_addr) + build_swarm(iter::once(protocol_config)) }) .collect::>(); @@ -718,98 +1007,92 @@ mod tests { Swarm::dial_addr(&mut swarms[0].0, dial_addr).unwrap(); } - // Running `swarm[0]` in the background until a `InboundRequest` event happens, - // which is a hint about the test having ended. - async_std::task::spawn({ - let (mut swarm, _) = swarms.remove(0); - async move { - loop { - match swarm.next_event().await { - SwarmEvent::Behaviour(super::Event::InboundRequest { result, .. }) => { - assert!(result.is_ok()); - break - }, - _ => {} + // Running `swarm[0]` in the background. + pool.spawner() + .spawn_obj({ + let (mut swarm, _) = swarms.remove(0); + async move { + loop { + match swarm.select_next_some().await { + SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { + result.unwrap(); + }, + _ => {}, + } } } - } - }); + .boxed() + .into() + }) + .unwrap(); // Remove and run the remaining swarm. let (mut swarm, _) = swarms.remove(0); - async_std::task::block_on(async move { - let mut sent_request_id = None; + pool.run_until(async move { + let mut response_receiver = None; loop { - match swarm.next_event().await { + match swarm.select_next_some().await { SwarmEvent::ConnectionEstablished { peer_id, .. } => { - let id = swarm.send_request( + let (sender, receiver) = oneshot::channel(); + swarm.behaviour_mut().send_request( &peer_id, protocol_name, - b"this is a request".to_vec() - ).unwrap(); - assert!(sent_request_id.is_none()); - sent_request_id = Some(id); - } - SwarmEvent::Behaviour(super::Event::RequestFinished { - request_id, - result, - }) => { - assert_eq!(Some(request_id), sent_request_id); - let result = result.unwrap(); - assert_eq!(result, b"this is a response"); - break; - } - _ => {} + b"this is a request".to_vec(), + sender, + IfDisconnected::ImmediateError, + ); + assert!(response_receiver.is_none()); + response_receiver = Some(receiver); + }, + SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { + result.unwrap(); + break + }, + _ => {}, } } + + assert_eq!(response_receiver.unwrap().await.unwrap().unwrap(), b"this is a response"); }); } #[test] fn max_response_size_exceeded() { - let protocol_name = "/test/req-rep/1"; + let protocol_name = "/test/req-resp/1"; + let mut pool = LocalPool::new(); // Build swarms whose behaviour is `RequestResponsesBehaviour`. let mut swarms = (0..2) .map(|_| { - let keypair = Keypair::generate_ed25519(); - - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .unwrap(); - - let transport = MemoryTransport - .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(libp2p::yamux::Config::default()); - - let behaviour = { - let (tx, mut rx) = mpsc::channel(64); - - let b = super::RequestResponsesBehaviour::new(iter::once(super::ProtocolConfig { - name: From::from(protocol_name), - max_request_size: 1024, - max_response_size: 8, // <-- important for the test - request_timeout: Duration::from_secs(30), - inbound_queue: Some(tx), - })).unwrap(); - - async_std::task::spawn(async move { - while let Some(rq) = rx.next().await { - assert_eq!(rq.payload, b"this is a request"); - let _ = rq.pending_response.send(b"this response exceeds the limit".to_vec()); + let (tx, mut rx) = mpsc::channel::(64); + + pool.spawner() + .spawn_obj( + async move { + while let Some(rq) = rx.next().await { + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(super::OutgoingResponse { + result: Ok(b"this response exceeds the limit".to_vec()), + reputation_changes: Vec::new(), + sent_feedback: None, + }); + } } - }); + .boxed() + .into(), + ) + .unwrap(); - b + let protocol_config = ProtocolConfig { + name: From::from(protocol_name), + max_request_size: 1024, + max_response_size: 8, // <-- important for the test + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx), }; - let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); - let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - - Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); - (swarm, listen_addr) + build_swarm(iter::once(protocol_config)) }) .collect::>(); @@ -822,51 +1105,218 @@ mod tests { // Running `swarm[0]` in the background until a `InboundRequest` event happens, // which is a hint about the test having ended. - async_std::task::spawn({ - let (mut swarm, _) = swarms.remove(0); - async move { - loop { - match swarm.next_event().await { - SwarmEvent::Behaviour(super::Event::InboundRequest { result, .. }) => { - assert!(result.is_ok()); - break - }, - _ => {} + pool.spawner() + .spawn_obj({ + let (mut swarm, _) = swarms.remove(0); + async move { + loop { + match swarm.select_next_some().await { + SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { + assert!(result.is_ok()); + break + }, + _ => {}, + } } } - } - }); + .boxed() + .into() + }) + .unwrap(); // Remove and run the remaining swarm. let (mut swarm, _) = swarms.remove(0); - async_std::task::block_on(async move { - let mut sent_request_id = None; + pool.run_until(async move { + let mut response_receiver = None; loop { - match swarm.next_event().await { + match swarm.select_next_some().await { SwarmEvent::ConnectionEstablished { peer_id, .. } => { - let id = swarm.send_request( + let (sender, receiver) = oneshot::channel(); + swarm.behaviour_mut().send_request( &peer_id, protocol_name, - b"this is a request".to_vec() - ).unwrap(); - assert!(sent_request_id.is_none()); - sent_request_id = Some(id); - } - SwarmEvent::Behaviour(super::Event::RequestFinished { - request_id, - result, - }) => { - assert_eq!(Some(request_id), sent_request_id); - match result { - Err(super::RequestFailure::Network(super::OutboundFailure::ConnectionClosed)) => {}, - _ => panic!() + b"this is a request".to_vec(), + sender, + IfDisconnected::ImmediateError, + ); + assert!(response_receiver.is_none()); + response_receiver = Some(receiver); + }, + SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { + assert!(result.is_err()); + break + }, + _ => {}, + } + } + + match response_receiver.unwrap().await.unwrap().unwrap_err() { + RequestFailure::Network(OutboundFailure::ConnectionClosed) => {}, + _ => panic!(), + } + }); + } + + /// A [`RequestId`] is a unique identifier among either all inbound or all outbound requests for + /// a single [`RequestResponse`] behaviour. It is not guaranteed to be unique across multiple + /// [`RequestResponse`] behaviours. Thus when handling [`RequestId`] in the context of multiple + /// [`RequestResponse`] behaviours, one needs to couple the protocol name with the [`RequestId`] + /// to get a unique request identifier. + /// + /// This test ensures that two requests on different protocols can be handled concurrently + /// without a [`RequestId`] collision. + /// + /// See [`ProtocolRequestId`] for additional information. + #[test] + fn request_id_collision() { + let protocol_name_1 = "/test/req-resp-1/1"; + let protocol_name_2 = "/test/req-resp-2/1"; + let mut pool = LocalPool::new(); + + let mut swarm_1 = { + let protocol_configs = vec![ + ProtocolConfig { + name: From::from(protocol_name_1), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: None, + }, + ProtocolConfig { + name: From::from(protocol_name_2), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: None, + }, + ]; + + build_swarm(protocol_configs.into_iter()).0 + }; + + let (mut swarm_2, mut swarm_2_handler_1, mut swarm_2_handler_2, listen_add_2) = { + let (tx_1, rx_1) = mpsc::channel(64); + let (tx_2, rx_2) = mpsc::channel(64); + + let protocol_configs = vec![ + ProtocolConfig { + name: From::from(protocol_name_1), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx_1), + }, + ProtocolConfig { + name: From::from(protocol_name_2), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx_2), + }, + ]; + + let (swarm, listen_addr) = build_swarm(protocol_configs.into_iter()); + + (swarm, rx_1, rx_2, listen_addr) + }; + + // Ask swarm 1 to dial swarm 2. There isn't any discovery mechanism in place in this test, + // so they wouldn't connect to each other. + swarm_1.dial_addr(listen_add_2).unwrap(); + + // Run swarm 2 in the background, receiving two requests. + pool.spawner() + .spawn_obj( + async move { + loop { + match swarm_2.select_next_some().await { + SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { + result.unwrap(); + }, + _ => {}, } - break; } - _ => {} + } + .boxed() + .into(), + ) + .unwrap(); + + // Handle both requests sent by swarm 1 to swarm 2 in the background. + // + // Make sure both requests overlap, by answering the first only after receiving the + // second. + pool.spawner() + .spawn_obj( + async move { + let protocol_1_request = swarm_2_handler_1.next().await; + let protocol_2_request = swarm_2_handler_2.next().await; + + protocol_1_request + .unwrap() + .pending_response + .send(OutgoingResponse { + result: Ok(b"this is a response".to_vec()), + reputation_changes: Vec::new(), + sent_feedback: None, + }) + .unwrap(); + protocol_2_request + .unwrap() + .pending_response + .send(OutgoingResponse { + result: Ok(b"this is a response".to_vec()), + reputation_changes: Vec::new(), + sent_feedback: None, + }) + .unwrap(); + } + .boxed() + .into(), + ) + .unwrap(); + + // Have swarm 1 send two requests to swarm 2 and await responses. + pool.run_until(async move { + let mut response_receivers = None; + let mut num_responses = 0; + + loop { + match swarm_1.select_next_some().await { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + let (sender_1, receiver_1) = oneshot::channel(); + let (sender_2, receiver_2) = oneshot::channel(); + swarm_1.behaviour_mut().send_request( + &peer_id, + protocol_name_1, + b"this is a request".to_vec(), + sender_1, + IfDisconnected::ImmediateError, + ); + swarm_1.behaviour_mut().send_request( + &peer_id, + protocol_name_2, + b"this is a request".to_vec(), + sender_2, + IfDisconnected::ImmediateError, + ); + assert!(response_receivers.is_none()); + response_receivers = Some((receiver_1, receiver_2)); + }, + SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { + num_responses += 1; + result.unwrap(); + if num_responses == 2 { + break + } + }, + _ => {}, } } + let (response_receiver_1, response_receiver_2) = response_receivers.unwrap(); + assert_eq!(response_receiver_1.await.unwrap().unwrap(), b"this is a response"); + assert_eq!(response_receiver_2.await.unwrap().unwrap(), b"this is a response"); }); } } diff --git a/client/network/src/schema.rs b/client/network/src/schema.rs index 44fbbffd25406..d4572fca7594c 100644 --- a/client/network/src/schema.rs +++ b/client/network/src/schema.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,10 +20,11 @@ pub mod v1 { include!(concat!(env!("OUT_DIR"), "/api.v1.rs")); - pub mod finality { - include!(concat!(env!("OUT_DIR"), "/api.v1.finality.rs")); - } pub mod light { include!(concat!(env!("OUT_DIR"), "/api.v1.light.rs")); } } + +pub mod bitswap { + include!(concat!(env!("OUT_DIR"), "/bitswap.message.rs")); +} diff --git a/client/network/src/schema/api.v1.proto b/client/network/src/schema/api.v1.proto index a933c5811c109..c5333c7dcdbf1 100644 --- a/client/network/src/schema/api.v1.proto +++ b/client/network/src/schema/api.v1.proto @@ -29,6 +29,10 @@ message BlockRequest { Direction direction = 5; // Maximum number of blocks to return. An implementation defined maximum is used when unspecified. uint32 max_blocks = 6; // optional + // Indicate to the receiver that we support multiple justifications. If the responder also + // supports this it will populate the multiple justifications field in `BlockData` instead of + // the single justification field. + bool support_multiple_justifications = 7; // optional } // Response to `BlockRequest` @@ -56,5 +60,38 @@ message BlockData { // doesn't make in possible to differentiate between a lack of justification and an empty // justification. bool is_empty_justification = 7; // optional, false if absent + // Justifications if requested. + // Unlike the field for a single justification, this field does not required an associated + // boolean to differentiate between the lack of justifications and empty justification(s). This + // is because empty justifications, like all justifications, are paired with a non-empty + // consensus engine ID. + bytes justifications = 8; // optional + // Indexed block body if requestd. + repeated bytes indexed_body = 9; // optional +} + +// Request storage data from a peer. +message StateRequest { + // Block header hash. + bytes block = 1; + // Start from this key. Equivalent to if omitted. + bytes start = 2; // optional + // if 'true' indicates that response should contain raw key-values, rather than proof. + bool no_proof = 3; +} + +message StateResponse { + // A collection of keys-values. Only populated if `no_proof` is `true` + repeated StateEntry entries = 1; + // If `no_proof` is false in request, this contains proof nodes. + bytes proof = 2; + // Set to true when there are no more keys to return. + bool complete = 3; +} + +// A key-value pair +message StateEntry { + bytes key = 1; + bytes value = 2; } diff --git a/client/network/src/schema/bitswap.v1.2.0.proto b/client/network/src/schema/bitswap.v1.2.0.proto new file mode 100644 index 0000000000000..a4138b516d63d --- /dev/null +++ b/client/network/src/schema/bitswap.v1.2.0.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package bitswap.message; + +message Message { + message Wantlist { + enum WantType { + Block = 0; + Have = 1; + } + + message Entry { + bytes block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) + int32 priority = 2; // the priority (normalized). default to 1 + bool cancel = 3; // whether this revokes an entry + WantType wantType = 4; // Note: defaults to enum 0, ie Block + bool sendDontHave = 5; // Note: defaults to false + } + + repeated Entry entries = 1; // a list of wantlist entries + bool full = 2; // whether this is the full wantlist. default to false + } + + message Block { + bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) + bytes data = 2; + } + + enum BlockPresenceType { + Have = 0; + DontHave = 1; + } + message BlockPresence { + bytes cid = 1; + BlockPresenceType type = 2; + } + + Wantlist wantlist = 1; + repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 + repeated Block payload = 3; // used to send Blocks in bitswap 1.1.0 + repeated BlockPresence blockPresences = 4; + int32 pendingBytes = 5; +} diff --git a/client/network/src/schema/finality.v1.proto b/client/network/src/schema/finality.v1.proto deleted file mode 100644 index 843bc4eca0990..0000000000000 --- a/client/network/src/schema/finality.v1.proto +++ /dev/null @@ -1,19 +0,0 @@ -// Schema definition for finality proof request/responses. - -syntax = "proto3"; - -package api.v1.finality; - -// Request a finality proof from a peer. -message FinalityProofRequest { - // SCALE-encoded hash of the block to request. - bytes block_hash = 1; - // Opaque chain-specific additional request data. - bytes request = 2; -} - -// Response to a finality proof request. -message FinalityProofResponse { - // Opaque chain-specific finality proof. Empty if no such proof exists. - bytes proof = 1; // optional -} diff --git a/client/network/src/service.rs b/client/network/src/service.rs index f0cf79182bfc0..525470145b78c 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -28,42 +28,58 @@ //! which is then processed by [`NetworkWorker::poll`]. use crate::{ - ExHashT, NetworkStateInfo, NetworkStatus, behaviour::{self, Behaviour, BehaviourOut}, - config::{parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig}, - DhtEvent, + bitswap::Bitswap, + config::{parse_str_addr, Params, TransportConfig}, discovery::DiscoveryConfig, error::Error, + light_client_requests, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, on_demand_layer::AlwaysBadChecker, - light_client_handler, block_requests, finality_requests, - protocol::{self, event::Event, NotifsHandlerError, LegacyConnectionKillError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, - transport, ReputationChange, + protocol::{ + self, + event::Event, + message::generic::Roles, + sync::{Status as SyncStatus, SyncState}, + NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready, + }, + transactions, transport, DhtEvent, ExHashT, NetworkStateInfo, NetworkStatus, ReputationChange, }; + +use codec::Encode as _; use futures::{channel::oneshot, prelude::*}; -use libp2p::{PeerId, multiaddr, Multiaddr}; -use libp2p::core::{ConnectedPoint, Executor, connection::{ConnectionError, PendingConnectionError}, either::EitherError}; -use libp2p::kad::record; -use libp2p::ping::handler::PingFailure; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent, protocols_handler::NodeHandlerWrapperError}; -use log::{error, info, trace, warn}; -use metrics::{Metrics, MetricSources, Histogram, HistogramVec}; +use libp2p::{ + core::{ + connection::{ConnectionError, ConnectionLimits, PendingConnectionError}, + either::EitherError, + upgrade, ConnectedPoint, Executor, + }, + kad::record, + multiaddr, + ping::handler::PingFailure, + swarm::{ + protocols_handler::NodeHandlerWrapperError, AddressScore, NetworkBehaviour, SwarmBuilder, + SwarmEvent, + }, + Multiaddr, PeerId, +}; +use log::{debug, error, info, trace, warn}; +use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; +use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_peerset::PeersetHandle; -use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, - ConsensusEngineId, -}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{ borrow::Cow, + cmp, collections::{HashMap, HashSet}, - fs, + convert::TryFrom as _, + fs, iter, marker::PhantomData, - num:: NonZeroUsize, + num::NonZeroUsize, pin::Pin, str, sync::{ @@ -72,9 +88,10 @@ use std::{ }, task::Poll, }; -use wasm_timer::Instant; -pub use behaviour::{ResponseFailure, InboundFailure, RequestFailure, OutboundFailure}; +pub use behaviour::{ + IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, ResponseFailure, +}; mod metrics; mod out_events; @@ -100,9 +117,7 @@ pub struct NetworkService { to_worker: TracingUnboundedSender>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. - peers_notifications_sinks: Arc>>, - /// For each legacy gossiping engine ID, the corresponding new protocol name. - protocol_name_by_engine: Mutex>>, + peers_notifications_sinks: Arc), NotificationsSink>>>, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notifications_sizes_metric: Option, @@ -117,7 +132,7 @@ impl NetworkWorker { /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(params: Params) -> Result, Error> { + pub fn new(mut params: Params) -> Result { // Ensure the listen addresses are consistent with the transport. ensure_addresses_consistent_with_transport( params.network_config.listen_addresses.iter(), @@ -128,9 +143,20 @@ impl NetworkWorker { ¶ms.network_config.transport, )?; ensure_addresses_consistent_with_transport( - params.network_config.reserved_nodes.iter().map(|x| &x.multiaddr), + params + .network_config + .default_peers_set + .reserved_nodes + .iter() + .map(|x| &x.multiaddr), ¶ms.network_config.transport, )?; + for extra_set in ¶ms.network_config.extra_sets { + ensure_addresses_consistent_with_transport( + extra_set.set_config.reserved_nodes.iter().map(|x| &x.multiaddr), + ¶ms.network_config.transport, + )?; + } ensure_addresses_consistent_with_transport( params.network_config.public_addresses.iter(), ¶ms.network_config.transport, @@ -138,96 +164,16 @@ impl NetworkWorker { let (to_worker, from_service) = tracing_unbounded("mpsc_network_worker"); - if let Some(path) = params.network_config.net_config_path { - fs::create_dir_all(&path)?; + if let Some(path) = ¶ms.network_config.net_config_path { + fs::create_dir_all(path)?; } - // List of multiaddresses that we know in the network. - let mut known_addresses = Vec::new(); - let mut bootnodes = Vec::new(); - let mut boot_node_ids = HashSet::new(); - - // Process the bootnodes. - for bootnode in params.network_config.boot_nodes.iter() { - bootnodes.push(bootnode.peer_id.clone()); - boot_node_ids.insert(bootnode.peer_id.clone()); - known_addresses.push((bootnode.peer_id.clone(), bootnode.multiaddr.clone())); - } - - let boot_node_ids = Arc::new(boot_node_ids); - - // Check for duplicate bootnodes. - known_addresses.iter() - .try_for_each(|(peer_id, addr)| - if let Some(other) = known_addresses - .iter() - .find(|o| o.1 == *addr && o.0 != *peer_id) - { - Err(Error::DuplicateBootnode { - address: addr.clone(), - first_id: peer_id.clone(), - second_id: other.0.clone(), - }) - } else { - Ok(()) - } - )?; - - // Initialize the peers we should always be connected to. - let priority_groups = { - let mut reserved_nodes = HashSet::new(); - for reserved in params.network_config.reserved_nodes.iter() { - reserved_nodes.insert(reserved.peer_id.clone()); - known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); - } - - let print_deprecated_message = match ¶ms.role { - Role::Sentry { .. } => true, - Role::Authority { sentry_nodes } if !sentry_nodes.is_empty() => true, - _ => false, - }; - if print_deprecated_message { - log::warn!( - "🙇 Sentry nodes are deprecated, and the `--sentry` and `--sentry-nodes` \ - CLI options will eventually be removed in a future version. The Substrate \ - and Polkadot networking protocol require validators to be \ - publicly-accessible. Please do not block access to your validator nodes. \ - For details, see https://github.com/paritytech/substrate/issues/6845." - ); - } - - let mut sentries_and_validators = HashSet::new(); - match ¶ms.role { - Role::Sentry { validators } => { - for validator in validators { - sentries_and_validators.insert(validator.peer_id.clone()); - reserved_nodes.insert(validator.peer_id.clone()); - known_addresses.push((validator.peer_id.clone(), validator.multiaddr.clone())); - } - } - Role::Authority { sentry_nodes } => { - for sentry_node in sentry_nodes { - sentries_and_validators.insert(sentry_node.peer_id.clone()); - reserved_nodes.insert(sentry_node.peer_id.clone()); - known_addresses.push((sentry_node.peer_id.clone(), sentry_node.multiaddr.clone())); - } - } - _ => {} - } - - vec![ - ("reserved".to_owned(), reserved_nodes), - ("sentries_and_validators".to_owned(), sentries_and_validators), - ] - }; - - let peerset_config = sc_peerset::PeersetConfig { - in_peers: params.network_config.in_peers, - out_peers: params.network_config.out_peers, - bootnodes, - reserved_only: params.network_config.non_reserved_mode == NonReservedPeerMode::Deny, - priority_groups, - }; + let transactions_handler_proto = + transactions::TransactionsHandlerPrototype::new(params.protocol_id.clone()); + params + .network_config + .extra_sets + .insert(0, transactions_handler_proto.set_config()); // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; @@ -239,48 +185,79 @@ impl NetworkWorker { local_peer_id.to_base58(), ); - let checker = params.on_demand.as_ref() - .map(|od| od.checker().clone()) - .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); + let default_notif_handshake_message = Roles::from(¶ms.role).encode(); - let num_connected = Arc::new(AtomicUsize::new(0)); - let is_major_syncing = Arc::new(AtomicBool::new(false)); - let (protocol, peerset_handle) = Protocol::new( + let (warp_sync_provider, warp_sync_protocol_config) = match params.warp_sync { + Some((p, c)) => (Some(p), Some(c)), + None => (None, None), + }; + + let (protocol, peerset_handle, mut known_addresses) = Protocol::new( protocol::ProtocolConfig { roles: From::from(¶ms.role), max_parallel_downloads: params.network_config.max_parallel_downloads, + sync_mode: params.network_config.sync_mode.clone(), }, - local_peer_id.clone(), params.chain.clone(), - params.transaction_pool, - params.finality_proof_request_builder, params.protocol_id.clone(), - peerset_config, + ¶ms.network_config, + iter::once(Vec::new()) + .chain( + (0..params.network_config.extra_sets.len() - 1) + .map(|_| default_notif_handshake_message.clone()), + ) + .collect(), params.block_announce_validator, params.metrics_registry.as_ref(), - boot_node_ids.clone(), + warp_sync_provider, )?; + // List of multiaddresses that we know in the network. + let mut bootnodes = Vec::new(); + let mut boot_node_ids = HashSet::new(); + + // Process the bootnodes. + for bootnode in params.network_config.boot_nodes.iter() { + bootnodes.push(bootnode.peer_id); + boot_node_ids.insert(bootnode.peer_id); + known_addresses.push((bootnode.peer_id, bootnode.multiaddr.clone())); + } + + let boot_node_ids = Arc::new(boot_node_ids); + + // Check for duplicate bootnodes. + known_addresses.iter().try_for_each(|(peer_id, addr)| { + if let Some(other) = known_addresses.iter().find(|o| o.1 == *addr && o.0 != *peer_id) { + Err(Error::DuplicateBootnode { + address: addr.clone(), + first_id: *peer_id, + second_id: other.0.clone(), + }) + } else { + Ok(()) + } + })?; + + let checker = params + .on_demand + .as_ref() + .map(|od| od.checker().clone()) + .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); + + let num_connected = Arc::new(AtomicUsize::new(0)); + let is_major_syncing = Arc::new(AtomicBool::new(false)); + // Build the swarm. - let (mut swarm, bandwidth): (Swarm, _) = { + let client = params.chain.clone(); + let (mut swarm, bandwidth): (Swarm, _) = { let user_agent = format!( "{} ({})", - params.network_config.client_version, - params.network_config.node_name + params.network_config.client_version, params.network_config.node_name ); - let block_requests = { - let config = block_requests::Config::new(¶ms.protocol_id); - block_requests::BlockRequests::new(config, params.chain.clone()) - }; - let finality_proof_requests = { - let config = finality_requests::Config::new(¶ms.protocol_id); - finality_requests::FinalityProofRequests::new(config, params.finality_proof_provider.clone()) - }; - let light_client_handler = { - let config = light_client_handler::Config::new(¶ms.protocol_id); - light_client_handler::LightClientHandler::new( - config, - params.chain, + + let light_client_request_sender = { + light_client_requests::sender::LightClientRequestSender::new( + ¶ms.protocol_id, checker, peerset_handle.clone(), ) @@ -288,62 +265,114 @@ impl NetworkWorker { let discovery_config = { let mut config = DiscoveryConfig::new(local_public.clone()); - config.with_user_defined(known_addresses); - config.discovery_limit(u64::from(params.network_config.out_peers) + 15); + config.with_permanent_addresses(known_addresses); + config.discovery_limit( + u64::from(params.network_config.default_peers_set.out_peers) + 15, + ); config.add_protocol(params.protocol_id.clone()); + config.with_dht_random_walk(params.network_config.enable_dht_random_walk); config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht); - config.use_kademlia_disjoint_query_paths(params.network_config.kademlia_disjoint_query_paths); + config.use_kademlia_disjoint_query_paths( + params.network_config.kademlia_disjoint_query_paths, + ); match params.network_config.transport { TransportConfig::MemoryOnly => { config.with_mdns(false); config.allow_private_ipv4(false); - } + }, TransportConfig::Normal { enable_mdns, allow_private_ipv4, .. } => { config.with_mdns(enable_mdns); config.allow_private_ipv4(allow_private_ipv4); - } + }, } config }; - let mut behaviour = { + let (transport, bandwidth) = { + let config_mem = match params.network_config.transport { + TransportConfig::MemoryOnly => true, + TransportConfig::Normal { .. } => false, + }; + + // The yamux buffer size limit is configured to be equal to the maximum frame size + // of all protocols. 10 bytes are added to each limit for the length prefix that + // is not included in the upper layer protocols limit but is still present in the + // yamux buffer. These 10 bytes correspond to the maximum size required to encode + // a variable-length-encoding 64bits number. In other words, we make the + // assumption that no notification larger than 2^64 will ever be sent. + let yamux_maximum_buffer_size = { + let requests_max = params + .network_config + .request_response_protocols + .iter() + .map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::MAX)); + let responses_max = + params.network_config.request_response_protocols.iter().map(|cfg| { + usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX) + }); + let notifs_max = params.network_config.extra_sets.iter().map(|cfg| { + usize::try_from(cfg.max_notification_size).unwrap_or(usize::MAX) + }); + + // A "default" max is added to cover all the other protocols: ping, identify, + // kademlia, block announces, and transactions. + let default_max = cmp::max( + 1024 * 1024, + usize::try_from(protocol::BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE) + .unwrap_or(usize::MAX), + ); + + iter::once(default_max) + .chain(requests_max) + .chain(responses_max) + .chain(notifs_max) + .max() + .expect("iterator known to always yield at least one element; qed") + .saturating_add(10) + }; + + transport::build_transport( + local_identity, + config_mem, + params.network_config.yamux_window_size, + yamux_maximum_buffer_size, + ) + }; + + let behaviour = { + let bitswap = params.network_config.ipfs_server.then(|| Bitswap::new(client)); let result = Behaviour::new( protocol, - params.role, user_agent, local_public, - block_requests, - finality_proof_requests, - light_client_handler, + light_client_request_sender, discovery_config, + params.block_request_protocol_config, + params.state_request_protocol_config, + warp_sync_protocol_config, + bitswap, + params.light_client_request_protocol_config, params.network_config.request_response_protocols, ); match result { Ok(b) => b, - Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => { - return Err(Error::DuplicateRequestResponseProtocol { - protocol: proto, - }) - }, + Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => + return Err(Error::DuplicateRequestResponseProtocol { protocol: proto }), } }; - for (engine_id, protocol_name) in ¶ms.network_config.notifications_protocols { - behaviour.register_notifications_protocol(*engine_id, protocol_name.clone()); - } - let (transport, bandwidth) = { - let (config_mem, config_wasm) = match params.network_config.transport { - TransportConfig::MemoryOnly => (true, None), - TransportConfig::Normal { wasm_external_transport, .. } => - (false, wasm_external_transport) - }; - transport::build_transport(local_identity, config_mem, config_wasm) - }; - let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) - .peer_connection_limit(crate::MAX_CONNECTIONS_PER_PEER) + let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id) + .connection_limits( + ConnectionLimits::default() + .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) + .with_max_established_incoming(Some( + crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING, + )), + ) + .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) .connection_event_buffer_size(1024); if let Some(spawner) = params.executor { @@ -360,33 +389,31 @@ impl NetworkWorker { // Initialize the metrics. let metrics = match ¶ms.metrics_registry { - Some(registry) => { - Some(metrics::register(registry, MetricSources { + Some(registry) => Some(metrics::register( + registry, + MetricSources { bandwidth: bandwidth.clone(), major_syncing: is_major_syncing.clone(), connected_peers: num_connected.clone(), - })?) - } - None => None + }, + )?), + None => None, }; // Listen on multiaddresses. for addr in ¶ms.network_config.listen_addresses { - if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { + if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) } } // Add external addresses. for addr in ¶ms.network_config.public_addresses { - Swarm::::add_external_address(&mut swarm, addr.clone()); + Swarm::::add_external_address(&mut swarm, addr.clone(), AddressScore::Infinite); } let external_addresses = Arc::new(Mutex::new(Vec::new())); let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new())); - let protocol_name_by_engine = Mutex::new({ - params.network_config.notifications_protocols.iter().cloned().collect() - }); let service = Arc::new(NetworkService { bandwidth, @@ -397,12 +424,20 @@ impl NetworkWorker { local_peer_id, to_worker, peers_notifications_sinks: peers_notifications_sinks.clone(), - protocol_name_by_engine, - notifications_sizes_metric: - metrics.as_ref().map(|metrics| metrics.notifications_sizes.clone()), + notifications_sizes_metric: metrics + .as_ref() + .map(|metrics| metrics.notifications_sizes.clone()), _marker: PhantomData, }); + let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( + service.clone(), + params.role, + params.transaction_pool, + params.metrics_registry.as_ref(), + )?; + (params.transactions_handler_executor)(tx_handler.run().boxed()); + Ok(NetworkWorker { external_addresses, num_connected, @@ -414,22 +449,25 @@ impl NetworkWorker { light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, + tx_handler_controller, metrics, boot_node_ids, - pending_requests: HashMap::with_capacity(128), }) } /// High-level network status information. pub fn status(&self) -> NetworkStatus { + let status = self.sync_state(); NetworkStatus { - sync_state: self.sync_state(), + sync_state: status.state, best_seen_block: self.best_seen_block(), num_sync_peers: self.num_sync_peers(), num_connected_peers: self.num_connected_peers(), num_active_peers: self.num_active_peers(), total_bytes_inbound: self.total_bytes_inbound(), total_bytes_outbound: self.total_bytes_outbound(), + state_sync: status.state_sync, + warp_sync: status.warp_sync, } } @@ -445,47 +483,47 @@ impl NetworkWorker { /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { - self.network_service.user_protocol().num_connected_peers() + self.network_service.behaviour().user_protocol().num_connected_peers() } /// Returns the number of peers we're connected to and that are being queried. pub fn num_active_peers(&self) -> usize { - self.network_service.user_protocol().num_active_peers() + self.network_service.behaviour().user_protocol().num_active_peers() } /// Current global sync state. - pub fn sync_state(&self) -> SyncState { - self.network_service.user_protocol().sync_state() + pub fn sync_state(&self) -> SyncStatus { + self.network_service.behaviour().user_protocol().sync_state() } /// Target sync block number. pub fn best_seen_block(&self) -> Option> { - self.network_service.user_protocol().best_seen_block() + self.network_service.behaviour().user_protocol().best_seen_block() } /// Number of peers participating in syncing. pub fn num_sync_peers(&self) -> u32 { - self.network_service.user_protocol().num_sync_peers() + self.network_service.behaviour().user_protocol().num_sync_peers() } /// Number of blocks in the import queue. pub fn num_queued_blocks(&self) -> u32 { - self.network_service.user_protocol().num_queued_blocks() + self.network_service.behaviour().user_protocol().num_queued_blocks() } /// Returns the number of downloaded blocks. pub fn num_downloaded_blocks(&self) -> usize { - self.network_service.user_protocol().num_downloaded_blocks() + self.network_service.behaviour().user_protocol().num_downloaded_blocks() } /// Number of active sync requests. pub fn num_sync_requests(&self) -> usize { - self.network_service.user_protocol().num_sync_requests() + self.network_service.behaviour().user_protocol().num_sync_requests() } /// Adds an address for a node. pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { - self.network_service.add_known_address(peer_id, addr); + self.network_service.behaviour_mut().add_known_address(peer_id, addr); } /// Return a `NetworkService` that can be shared through the code base and can be used to @@ -496,26 +534,30 @@ impl NetworkWorker { /// You must call this when a new block is finalized by the client. pub fn on_block_finalized(&mut self, hash: B::Hash, header: B::Header) { - self.network_service.user_protocol_mut().on_block_finalized(hash, &header); + self.network_service + .behaviour_mut() + .user_protocol_mut() + .on_block_finalized(hash, &header); } - /// This should be called when blocks are added to the - /// chain by something other than the import queue. - /// Currently this is only useful for tests. - pub fn update_chain(&mut self) { - self.network_service.user_protocol_mut().update_chain(); + /// Inform the network service about new best imported block. + pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { + self.network_service + .behaviour_mut() + .user_protocol_mut() + .new_best_block_imported(hash, number); } /// Returns the local `PeerId`. pub fn local_peer_id(&self) -> &PeerId { - Swarm::::local_peer_id(&self.network_service) + Swarm::::local_peer_id(&self.network_service) } /// Returns the list of addresses we are listening on. /// /// Does **NOT** include a trailing `/p2p/` with our `PeerId`. pub fn listen_addresses(&self) -> impl Iterator { - Swarm::::listeners(&self.network_service) + Swarm::::listeners(&self.network_service) } /// Get network state. @@ -524,63 +566,96 @@ impl NetworkWorker { /// everywhere about this. Please don't use this function to retrieve actual information. pub fn network_state(&mut self) -> NetworkState { let swarm = &mut self.network_service; - let open = swarm.user_protocol().open_peers().cloned().collect::>(); + let open = swarm.behaviour_mut().user_protocol().open_peers().cloned().collect::>(); let connected_peers = { let swarm = &mut *swarm; - open.iter().filter_map(move |peer_id| { - let known_addresses = NetworkBehaviour::addresses_of_peer(&mut **swarm, peer_id) - .into_iter().collect(); - - let endpoint = if let Some(e) = swarm.node(peer_id).map(|i| i.endpoint()) { - e.clone().into() - } else { - error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \ + open.iter() + .filter_map(move |peer_id| { + let known_addresses = + NetworkBehaviour::addresses_of_peer(swarm.behaviour_mut(), peer_id) + .into_iter() + .collect(); + + let endpoint = if let Some(e) = + swarm.behaviour_mut().node(peer_id).map(|i| i.endpoint()).flatten() + { + e.clone().into() + } else { + error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \ and debug information about {:?}", peer_id); - return None - }; - - Some((peer_id.to_base58(), NetworkStatePeer { - endpoint, - version_string: swarm.node(peer_id) - .and_then(|i| i.client_version().map(|s| s.to_owned())), - latest_ping_time: swarm.node(peer_id).and_then(|i| i.latest_ping()), - enabled: swarm.user_protocol().is_enabled(&peer_id), - open: swarm.user_protocol().is_open(&peer_id), - known_addresses, - })) - }).collect() + return None + }; + + Some(( + peer_id.to_base58(), + NetworkStatePeer { + endpoint, + version_string: swarm + .behaviour_mut() + .node(peer_id) + .and_then(|i| i.client_version().map(|s| s.to_owned())), + latest_ping_time: swarm + .behaviour_mut() + .node(peer_id) + .and_then(|i| i.latest_ping()), + known_addresses, + }, + )) + }) + .collect() }; let not_connected_peers = { let swarm = &mut *swarm; - swarm.known_peers().into_iter() + swarm + .behaviour_mut() + .known_peers() + .into_iter() .filter(|p| open.iter().all(|n| n != p)) .map(move |peer_id| { - (peer_id.to_base58(), NetworkStateNotConnectedPeer { - version_string: swarm.node(&peer_id) - .and_then(|i| i.client_version().map(|s| s.to_owned())), - latest_ping_time: swarm.node(&peer_id).and_then(|i| i.latest_ping()), - known_addresses: NetworkBehaviour::addresses_of_peer(&mut **swarm, &peer_id) - .into_iter().collect(), - }) + ( + peer_id.to_base58(), + NetworkStateNotConnectedPeer { + version_string: swarm + .behaviour_mut() + .node(&peer_id) + .and_then(|i| i.client_version().map(|s| s.to_owned())), + latest_ping_time: swarm + .behaviour_mut() + .node(&peer_id) + .and_then(|i| i.latest_ping()), + known_addresses: NetworkBehaviour::addresses_of_peer( + swarm.behaviour_mut(), + &peer_id, + ) + .into_iter() + .collect(), + }, + ) }) .collect() }; + let peer_id = Swarm::::local_peer_id(&swarm).to_base58(); + let listened_addresses = swarm.listeners().cloned().collect(); + let external_addresses = swarm.external_addresses().map(|r| &r.addr).cloned().collect(); + NetworkState { - peer_id: Swarm::::local_peer_id(&swarm).to_base58(), - listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), - external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), + peer_id, + listened_addresses, + external_addresses, connected_peers, not_connected_peers, - peerset: swarm.user_protocol_mut().peerset_debug_info(), + peerset: swarm.behaviour_mut().user_protocol_mut().peerset_debug_info(), } } /// Get currently connected peers. pub fn peers_debug_info(&mut self) -> Vec<(PeerId, PeerInfo)> { - self.network_service.user_protocol_mut() + self.network_service + .behaviour_mut() + .user_protocol_mut() .peers_info() .map(|(id, info)| (id.clone(), info.clone())) .collect() @@ -596,6 +671,11 @@ impl NetworkWorker { pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { self.service.add_reserved_peer(peer) } + + /// Returns the list of reserved peers. + pub fn reserved_peers(&self) -> impl Iterator { + self.network_service.behaviour().user_protocol().reserved_peers() + } } impl NetworkService { @@ -609,7 +689,7 @@ impl NetworkService { /// Need a better solution to manage authorized peers, but now just use reserved peers for /// prototyping. pub fn set_authorized_peers(&self, peers: HashSet) { - self.peerset.set_reserved_peers(peers) + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); } /// Set authorized_only flag. @@ -617,7 +697,16 @@ impl NetworkService { /// Need a better solution to decide authorized_only, but now just use reserved_only flag for /// prototyping. pub fn set_authorized_only(&self, reserved_only: bool) { - self.peerset.set_reserved_only(reserved_only) + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); + } + + /// Adds an address known to a node. + pub fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); } /// Appends a notification to the buffer of pending outgoing notifications with the given peer. @@ -637,44 +726,53 @@ impl NetworkService { /// > between the remote voluntarily closing a substream or a network error /// > preventing the message from being delivered. /// - /// The protocol must have been registered with `register_notifications_protocol` or - /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). - /// - pub fn write_notification(&self, target: PeerId, engine_id: ConsensusEngineId, message: Vec) { + /// The protocol must have been registered with + /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration:: + /// notifications_protocols). + pub fn write_notification( + &self, + target: PeerId, + protocol: Cow<'static, str>, + message: Vec, + ) { // We clone the `NotificationsSink` in order to be able to unlock the network-wide // `peers_notifications_sinks` mutex as soon as possible. let sink = { let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + if let Some(sink) = peers_notifications_sinks.get(&(target.clone(), protocol.clone())) { sink.clone() } else { // Notification silently discarded, as documented. - return; + debug!( + target: "sub-libp2p", + "Attempted to send notification on missing or closed substream: {}, {:?}", + target, protocol, + ); + return } }; - // Used later for the metrics report. - let message_len = message.len(); - - // Determine the wire protocol name corresponding to this `engine_id`. - let protocol_name = self.protocol_name_by_engine.lock().get(&engine_id).cloned(); - if let Some(protocol_name) = protocol_name { - sink.send_sync_notification(protocol_name, message); - } else { - return; - } - if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { notifications_sizes_metric - .with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) - .observe(message_len as f64); + .with_label_values(&["out", &protocol]) + .observe(message.len() as f64); } + + // Sending is communicated to the `NotificationsSink`. + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?}, {} bytes)", + target, protocol, message.len() + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); + sink.send_sync_notification(message); } /// Obtains a [`NotificationSender`] for a connected peer, if it exists. /// /// A `NotificationSender` is scoped to a particular connection to the peer that holds - /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two steps: + /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two + /// steps: /// /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready /// for another notification, yielding a [`NotificationSenderReady`] token. @@ -693,8 +791,9 @@ impl NetworkService { /// return an error. It is however possible for the entire connection to be abruptly closed, /// in which case enqueued notifications will be lost. /// - /// The protocol must have been registered with `register_notifications_protocol` or - /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). + /// The protocol must have been registered with + /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration:: + /// notifications_protocols). /// /// # Usage /// @@ -708,9 +807,9 @@ impl NetworkService { /// // Do NOT do this /// for peer in peers { /// if let Ok(n) = network.notification_sender(peer, ...) { - /// if let Ok(s) = n.ready().await { - /// let _ = s.send(...); - /// } + /// if let Ok(s) = n.ready().await { + /// let _ = s.send(...); + /// } /// } /// } /// ``` @@ -737,36 +836,28 @@ impl NetworkService { /// /// See also the [`gossip`](crate::gossip) module for a higher-level way to send /// notifications. - /// pub fn notification_sender( &self, target: PeerId, - engine_id: ConsensusEngineId, + protocol: Cow<'static, str>, ) -> Result { // We clone the `NotificationsSink` in order to be able to unlock the network-wide // `peers_notifications_sinks` mutex as soon as possible. let sink = { let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { sink.clone() } else { - return Err(NotificationSenderError::Closed); + return Err(NotificationSenderError::Closed) } }; - // Determine the wire protocol name corresponding to this `engine_id`. - let protocol_name = match self.protocol_name_by_engine.lock().get(&engine_id).cloned() { - Some(p) => p, - None => return Err(NotificationSenderError::BadProtocol), - }; + let notification_size_metric = self + .notifications_sizes_metric + .as_ref() + .map(|histogram| histogram.with_label_values(&["out", &protocol])); - Ok(NotificationSender { - sink, - protocol_name, - notification_size_metric: self.notifications_sizes_metric.as_ref().map(|histogram| { - histogram.with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) - }), - }) + Ok(NotificationSender { sink, protocol_name: protocol, notification_size_metric }) } /// Returns a stream containing the events that happen on the network. @@ -791,9 +882,10 @@ impl NetworkService { /// notifications should remain the default ways of communicating information. For example, a /// peer can announce something through a notification, after which the recipient can obtain /// more information by performing a request. - /// As such, this function is meant to be called only with peers we are already connected to. - /// Calling this method with a `target` we are not connected to will *not* attempt to connect - /// to said peer. + /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way + /// you will get an error immediately for disconnected peers, instead of waiting for a + /// potentially very long connection attempt, which would suggest that something is wrong + /// anyway, as you are supposed to be connected because of the notification protocol. /// /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. /// Such restrictions, if desired, need to be enforced at the call site(s). @@ -805,15 +897,12 @@ impl NetworkService { &self, target: PeerId, protocol: impl Into>, - request: Vec + request: Vec, + connect: IfDisconnected, ) -> Result, RequestFailure> { let (tx, rx) = oneshot::channel(); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { - target, - protocol: protocol.into(), - request, - pending_response: tx - }); + + self.start_request(target, protocol, request, tx, connect); match rx.await { Ok(v) => v, @@ -824,33 +913,71 @@ impl NetworkService { } } - /// Registers a new notifications protocol. - /// - /// After a protocol has been registered, you can call `write_notifications`. + /// Variation of `request` which starts a request whose response is delivered on a provided + /// channel. /// - /// **Important**: This method is a work-around, and you are instead strongly encouraged to - /// pass the protocol in the `NetworkConfiguration::notifications_protocols` list instead. - /// If you have no other choice but to use this method, you are very strongly encouraged to - /// call it very early on. Any connection open will retain the protocols that were registered - /// then, and not any new one. + /// Instead of blocking and waiting for a reply, this function returns immediately, sending + /// responses via the passed in sender. This alternative API exists to make it easier to + /// integrate with message passing APIs. /// - /// Please call `event_stream` before registering a protocol, otherwise you may miss events - /// about the protocol that you have registered. - // TODO: remove this method after https://github.com/paritytech/substrate/issues/4587 - pub fn register_notifications_protocol( + /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a + /// closing connection. This is expected behaviour. With `request` you would get a + /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. + pub fn start_request( &self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, + target: PeerId, + protocol: impl Into>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, ) { - let protocol_name = protocol_name.into(); - self.protocol_name_by_engine.lock().insert(engine_id, protocol_name.clone()); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RegisterNotifProtocol { - engine_id, - protocol_name, + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { + target, + protocol: protocol.into(), + request, + pending_response: tx, + connect, }); } - /// You may call this when new transactons are imported by the transaction pool. + /// High-level network status information. + /// + /// Returns an error if the `NetworkWorker` is no longer running. + pub async fn status(&self) -> Result, ()> { + let (tx, rx) = oneshot::channel(); + + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); + + match rx.await { + Ok(v) => v.map_err(|_| ()), + // The channel can only be closed if the network worker no longer exists. + Err(_) => Err(()), + } + } + + /// Get network state. + /// + /// **Note**: Use this only for debugging. This API is unstable. There are warnings literally + /// everywhere about this. Please don't use this function to retrieve actual information. + /// + /// Returns an error if the `NetworkWorker` is no longer running. + pub async fn network_state(&self) -> Result { + let (tx, rx) = oneshot::channel(); + + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NetworkState { pending_response: tx }); + + match rx.await { + Ok(v) => v.map_err(|_| ()), + // The channel can only be closed if the network worker no longer exists. + Err(_) => Err(()), + } + } + + /// You may call this when new transactions are imported by the transaction pool. /// /// All transactions will be fetched from the `TransactionPool` that was passed at /// initialization as part of the configuration and propagated to peers. @@ -870,7 +997,7 @@ impl NetworkService { /// /// In chain-based consensus, we often need to make sure non-best forks are /// at least temporarily synced. This function forces such an announcement. - pub fn announce_block(&self, hash: B::Hash, data: Vec) { + pub fn announce_block(&self, hash: B::Hash, data: Option>) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); } @@ -883,8 +1010,14 @@ impl NetworkService { /// Disconnect from a node as soon as possible. /// /// This triggers the same effects as if the connection had closed itself spontaneously. - pub fn disconnect_peer(&self, who: PeerId) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who)); + /// + /// See also [`NetworkService::remove_from_peers_set`], which has the same effect but also + /// prevents the local node from re-establishing an outgoing substream to this peer until it + /// is added again. + pub fn disconnect_peer(&self, who: PeerId, protocol: impl Into>) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol.into())); } /// Request a justification for the given block from the network. @@ -897,6 +1030,11 @@ impl NetworkService { .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); } + /// Clear all pending justification requests. + pub fn clear_justification_requests(&self) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); + } + /// Are we in the process of downloading the chain? pub fn is_major_syncing(&self) -> bool { self.is_major_syncing.load(Ordering::Relaxed) @@ -907,9 +1045,7 @@ impl NetworkService { /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an /// item on the [`NetworkWorker`] stream. pub fn get_value(&self, key: &record::Key) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); } /// Start putting a value in the DHT. @@ -917,24 +1053,18 @@ impl NetworkService { /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an /// item on the [`NetworkWorker`] stream. pub fn put_value(&self, key: record::Key, value: Vec) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); } - /// Connect to unreserved peers and allow unreserved peers to connect. + /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. pub fn accept_unreserved_peers(&self) { - self.peerset.set_reserved_only(false); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false)); } - /// Disconnect from unreserved peers and deny new unreserved peers to connect. + /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing + /// purposes. pub fn deny_unreserved_peers(&self) { - self.peerset.set_reserved_only(true); - } - - /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_reserved_peer(&self, peer: PeerId) { - self.peerset.remove_reserved_peer(peer); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); } /// Adds a `PeerId` and its address as reserved. The string should encode the address @@ -948,84 +1078,139 @@ impl NetworkService { if peer_id == self.local_peer_id { return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } - self.peerset.add_reserved_peer(peer_id.clone()); + let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddReserved(peer_id)); Ok(()) } - /// Configure an explicit fork sync request. - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// `set_sync_fork_request` should only be used if external code detects that there's - /// a stale fork missing. - /// Passing empty `peers` set effectively removes the sync request. - pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + /// Removes a `PeerId` from the list of reserved peers. + pub fn remove_reserved_peer(&self, peer_id: PeerId) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); } - /// Modify a peerset priority group. + /// Add peers to a peer set. /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - pub fn set_priority_group(&self, group_id: String, peers: HashSet) -> Result<(), String> { + pub fn add_peers_to_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; - let peer_ids = peers.iter().map(|(peer_id, _addr)| peer_id.clone()).collect(); - - self.peerset.set_priority_group(group_id, peer_ids); - for (peer_id, addr) in peers.into_iter() { + // Make sure the local peer ID is never added to the PSM. + if peer_id == self.local_peer_id { + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + } + + if !addr.is_empty() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr)); + } let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + .unbounded_send(ServiceToWorkerMsg::AddSetReserved(protocol.clone(), peer_id)); } Ok(()) } - /// Add peers to a peerset priority group. + /// Remove peers from a peer set. /// /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - // - // NOTE: even though this function is currently sync, it's marked as async for - // future-proofing, see https://github.com/paritytech/substrate/pull/7247#discussion_r502263451. - pub async fn add_to_priority_group(&self, group_id: String, peers: HashSet) -> Result<(), String> { + // NOTE: technically, this function only needs `Vec`, but we use `Multiaddr` here for + // convenience. + pub fn remove_peers_from_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + let peers = self.split_multiaddr_and_peer_id(peers)?; + for (peer_id, _) in peers.into_iter() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::RemoveSetReserved(protocol.clone(), peer_id)); + } + Ok(()) + } + + /// Configure an explicit fork sync request. + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// `set_sync_fork_request` should only be used if external code detects that there's + /// a stale fork missing. + /// Passing empty `peers` set effectively removes the sync request. + pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + } + + /// Add a peer to a set of peers. + /// + /// If the set has slots available, it will try to open a substream with this peer. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + pub fn add_to_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, addr) in peers.into_iter() { - self.peerset.add_to_priority_group(group_id.clone(), peer_id.clone()); + // Make sure the local peer ID is never added to the PSM. + if peer_id == self.local_peer_id { + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + } + if !addr.is_empty() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr)); + } let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + .unbounded_send(ServiceToWorkerMsg::AddToPeersSet(protocol.clone(), peer_id)); } Ok(()) } - /// Remove peers from a peerset priority group. + /// Remove peers from a peer set. + /// + /// If we currently have an open substream with this peer, it will soon be closed. /// /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - // - // NOTE: even though this function is currently sync, it's marked as async for - // future-proofing, see https://github.com/paritytech/substrate/pull/7247#discussion_r502263451. - // NOTE: technically, this function only needs `Vec`, but we use `Multiaddr` here for convenience. - pub async fn remove_from_priority_group(&self, group_id: String, peers: HashSet) -> Result<(), String> { + // NOTE: technically, this function only needs `Vec`, but we use `Multiaddr` here for + // convenience. + pub fn remove_from_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, _) in peers.into_iter() { - self.peerset.remove_from_priority_group(group_id.clone(), peer_id); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::RemoveFromPeersSet(protocol.clone(), peer_id)); } Ok(()) } @@ -1035,29 +1220,23 @@ impl NetworkService { self.num_connected.load(Ordering::Relaxed) } - /// This function should be called when blocks are added to the chain by something other - /// than the import queue. - /// - /// > **Important**: This function is a hack and can be removed at any time. Do **not** use it. - pub fn update_chain(&self) { + /// Inform the network service about new best imported block. + pub fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::UpdateChain); + .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); } - /// Inform the network service about an own imported block. - pub fn own_block_imported(&self, hash: B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::OwnBlockImported(hash, number)); - } - - /// Utility function to extract `PeerId` from each `Multiaddr` for priority group updates. + /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - fn split_multiaddr_and_peer_id(&self, peers: HashSet) -> Result, String> { - peers.into_iter() + fn split_multiaddr_and_peer_id( + &self, + peers: HashSet, + ) -> Result, String> { + peers + .into_iter() .map(|mut addr| { let peer = match addr.pop() { Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) @@ -1068,7 +1247,7 @@ impl NetworkService { // Make sure the local peer ID is never added to the PSM // or added as a "known address", even if given. if peer == self.local_peer_id { - Err("Local peer ID in priority group.".to_string()) + Err("Local peer ID in peer set.".to_string()) } else { Ok((peer, addr)) } @@ -1077,11 +1256,9 @@ impl NetworkService { } } -impl sp_consensus::SyncOracle - for NetworkService -{ +impl sp_consensus::SyncOracle for NetworkService { fn is_major_syncing(&mut self) -> bool { - NetworkService::is_major_syncing(self) + Self::is_major_syncing(self) } fn is_offline(&mut self) -> bool { @@ -1089,9 +1266,7 @@ impl sp_consensus::SyncOracle } } -impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle - for &'a NetworkService -{ +impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle for &'a NetworkService { fn is_major_syncing(&mut self) -> bool { NetworkService::is_major_syncing(self) } @@ -1101,10 +1276,20 @@ impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle } } +impl sc_consensus::JustificationSyncLink for NetworkService { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + Self::request_justification(self, hash, number); + } + + fn clear_justification_requests(&self) { + Self::clear_justification_requests(self); + } +} + impl NetworkStateInfo for NetworkService - where - B: sp_runtime::traits::Block, - H: ExHashT, +where + B: sp_runtime::traits::Block, + H: ExHashT, { /// Returns the local external addresses. fn external_addresses(&self) -> Vec { @@ -1113,7 +1298,7 @@ impl NetworkStateInfo for NetworkService /// Returns the local Peer ID. fn local_peer_id(&self) -> PeerId { - self.local_peer_id.clone() + self.local_peer_id } } @@ -1131,13 +1316,18 @@ pub struct NotificationSender { } impl NotificationSender { - /// Returns a future that resolves when the `NotificationSender` is ready to send a notification. - pub async fn ready<'a>(&'a self) -> Result, NotificationSenderError> { + /// Returns a future that resolves when the `NotificationSender` is ready to send a + /// notification. + pub async fn ready<'a>( + &'a self, + ) -> Result, NotificationSenderError> { Ok(NotificationSenderReady { - ready: match self.sink.reserve_notification(self.protocol_name.clone()).await { + ready: match self.sink.reserve_notification().await { Ok(r) => r, Err(()) => return Err(NotificationSenderError::Closed), }, + peer_id: self.sink.peer_id(), + protocol_name: &self.protocol_name, notification_size_metric: self.notification_size_metric.clone(), }) } @@ -1148,6 +1338,12 @@ impl NotificationSender { pub struct NotificationSenderReady<'a> { ready: Ready<'a>, + /// Target of the notification. + peer_id: &'a PeerId, + + /// Name of the protocol on the wire. + protocol_name: &'a Cow<'static, str>, + /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notification_size_metric: Option, @@ -1162,16 +1358,22 @@ impl<'a> NotificationSenderReady<'a> { notification_size_metric.observe(notification.len() as f64); } - self.ready - .send(notification) - .map_err(|()| NotificationSenderError::Closed) + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {}, {} bytes)", + self.peer_id, self.protocol_name, notification.len(), + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id); + + self.ready.send(notification).map_err(|()| NotificationSenderError::Closed) } } /// Error returned by [`NetworkService::send_notification`]. #[derive(Debug, derive_more::Display, derive_more::Error)] pub enum NotificationSenderError { - /// The notification receiver has been closed, usually because the underlying connection closed. + /// The notification receiver has been closed, usually because the underlying connection + /// closed. /// /// Some of the notifications most recently sent may not have been received. However, /// the peer may still be connected and a new `NotificationSender` for the same @@ -1188,10 +1390,19 @@ enum ServiceToWorkerMsg { PropagateTransaction(H), PropagateTransactions, RequestJustification(B::Hash, NumberFor), - AnnounceBlock(B::Hash, Vec), + ClearJustificationRequests, + AnnounceBlock(B::Hash, Option>), GetValue(record::Key), PutValue(record::Key, Vec), AddKnownAddress(PeerId, Multiaddr), + SetReservedOnly(bool), + AddReserved(PeerId), + RemoveReserved(PeerId), + SetReserved(HashSet), + AddSetReserved(Cow<'static, str>, PeerId), + RemoveSetReserved(Cow<'static, str>, PeerId), + AddToPeersSet(Cow<'static, str>, PeerId), + RemoveFromPeersSet(Cow<'static, str>, PeerId), SyncFork(Vec, B::Hash, NumberFor), EventStream(out_events::Sender), Request { @@ -1199,14 +1410,16 @@ enum ServiceToWorkerMsg { protocol: Cow<'static, str>, request: Vec, pending_response: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, }, - RegisterNotifProtocol { - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, str>, + NetworkStatus { + pending_response: oneshot::Sender, RequestFailure>>, }, - DisconnectPeer(PeerId), - UpdateChain, - OwnBlockImported(B::Hash, NumberFor), + NetworkState { + pending_response: oneshot::Sender>, + }, + DisconnectPeer(PeerId, Cow<'static, str>), + NewBestBlockImported(B::Hash, NumberFor), } /// Main network worker. Must be polled in order for the network to advance. @@ -1223,29 +1436,24 @@ pub struct NetworkWorker { /// The network service that can be extracted and shared through the codebase. service: Arc>, /// The *actual* network. - network_service: Swarm, + network_service: Swarm, /// The import queue that was passed at initialization. import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. from_service: TracingUnboundedReceiver>, /// Receiver for queries from the light client that must be processed. - light_client_rqs: Option>>, + light_client_rqs: Option>>, /// Senders for events that happen on the network. event_streams: out_events::OutChannels, /// Prometheus network metrics. metrics: Option, /// The `PeerId`'s of all boot nodes. boot_node_ids: Arc>, - /// Requests started using [`NetworkService::request`]. Includes the channel to send back the - /// response, when the request has started, and the name of the protocol for diagnostic - /// purposes. - pending_requests: HashMap< - behaviour::RequestId, - (oneshot::Sender, RequestFailure>>, Instant, String) - >, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. - peers_notifications_sinks: Arc>>, + peers_notifications_sinks: Arc), NotificationsSink>>>, + /// Controller for the handler of incoming and outgoing transactions. + tx_handler_controller: transactions::TransactionsHandlerController, } impl Future for NetworkWorker { @@ -1255,17 +1463,20 @@ impl Future for NetworkWorker { let this = &mut *self; // Poll the import queue for actions to perform. - this.import_queue.poll_actions(cx, &mut NetworkLink { - protocol: &mut this.network_service, - }); + this.import_queue + .poll_actions(cx, &mut NetworkLink { protocol: &mut this.network_service }); // Check for new incoming light client requests. if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { while let Poll::Ready(Some(rq)) = light_client_rqs.poll_next_unpin(cx) { - // This can error if there are too many queued requests already. - if this.network_service.light_client_request(rq).is_err() { - log::warn!("Couldn't start light client request: too many pending requests"); + let result = this.network_service.behaviour_mut().light_client_request(rq); + match result { + Ok(()) => {}, + Err(light_client_requests::sender::SendRequestError::TooManyRequests) => { + warn!("Couldn't start light client request: too many pending requests"); + }, } + if let Some(metrics) = this.metrics.as_ref() { metrics.issued_light_requests.inc(); } @@ -1285,7 +1496,7 @@ impl Future for NetworkWorker { num_iterations += 1; if num_iterations >= 100 { cx.waker().wake_by_ref(); - break; + break } // Process the next message coming from the `NetworkService`. @@ -1296,59 +1507,108 @@ impl Future for NetworkWorker { }; match msg { - ServiceToWorkerMsg::AnnounceBlock(hash, data) => - this.network_service.user_protocol_mut().announce_block(hash, data), - ServiceToWorkerMsg::RequestJustification(hash, number) => - this.network_service.user_protocol_mut().request_justification(&hash, number), + ServiceToWorkerMsg::AnnounceBlock(hash, data) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .announce_block(hash, data), + ServiceToWorkerMsg::RequestJustification(hash, number) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .request_justification(&hash, number), + ServiceToWorkerMsg::ClearJustificationRequests => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .clear_justification_requests(), ServiceToWorkerMsg::PropagateTransaction(hash) => - this.network_service.user_protocol_mut().propagate_transaction(&hash), + this.tx_handler_controller.propagate_transaction(hash), ServiceToWorkerMsg::PropagateTransactions => - this.network_service.user_protocol_mut().propagate_transactions(), + this.tx_handler_controller.propagate_transactions(), ServiceToWorkerMsg::GetValue(key) => - this.network_service.get_value(&key), + this.network_service.behaviour_mut().get_value(&key), ServiceToWorkerMsg::PutValue(key, value) => - this.network_service.put_value(key, value), + this.network_service.behaviour_mut().put_value(key, value), + ServiceToWorkerMsg::SetReservedOnly(reserved_only) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .set_reserved_only(reserved_only), + ServiceToWorkerMsg::SetReserved(peers) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .set_reserved_peers(peers), + ServiceToWorkerMsg::AddReserved(peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .add_reserved_peer(peer_id), + ServiceToWorkerMsg::RemoveReserved(peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .remove_reserved_peer(peer_id), + ServiceToWorkerMsg::AddSetReserved(protocol, peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .add_set_reserved_peer(protocol, peer_id), + ServiceToWorkerMsg::RemoveSetReserved(protocol, peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .remove_set_reserved_peer(protocol, peer_id), ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => - this.network_service.add_known_address(peer_id, addr), - ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => - this.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), - ServiceToWorkerMsg::EventStream(sender) => - this.event_streams.push(sender), - ServiceToWorkerMsg::Request { target, protocol, request, pending_response } => { - // Calling `send_request` can fail immediately in some circumstances. - // This is handled by sending back an error on the channel. - match this.network_service.send_request(&target, &protocol, request) { - Ok(request_id) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.requests_out_started_total - .with_label_values(&[&protocol]) - .inc(); - } - this.pending_requests.insert( - request_id, - (pending_response, Instant::now(), protocol.to_string()) - ); - }, - Err(behaviour::SendRequestError::NotConnected) => { - let err = RequestFailure::Network(OutboundFailure::ConnectionClosed); - let _ = pending_response.send(Err(err)); - }, - Err(behaviour::SendRequestError::UnknownProtocol) => { - let err = RequestFailure::Network(OutboundFailure::UnsupportedProtocols); - let _ = pending_response.send(Err(err)); - }, - } + this.network_service.behaviour_mut().add_known_address(peer_id, addr), + ServiceToWorkerMsg::AddToPeersSet(protocol, peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .add_to_peers_set(protocol, peer_id), + ServiceToWorkerMsg::RemoveFromPeersSet(protocol, peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .remove_from_peers_set(protocol, peer_id), + ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .set_sync_fork_request(peer_ids, &hash, number), + ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), + ServiceToWorkerMsg::Request { + target, + protocol, + request, + pending_response, + connect, + } => { + this.network_service.behaviour_mut().send_request( + &target, + &protocol, + request, + pending_response, + connect, + ); + }, + ServiceToWorkerMsg::NetworkStatus { pending_response } => { + let _ = pending_response.send(Ok(this.status())); }, - ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, protocol_name } => { - this.network_service - .register_notifications_protocol(engine_id, protocol_name); + ServiceToWorkerMsg::NetworkState { pending_response } => { + let _ = pending_response.send(Ok(this.network_state())); }, - ServiceToWorkerMsg::DisconnectPeer(who) => - this.network_service.user_protocol_mut().disconnect_peer(&who), - ServiceToWorkerMsg::UpdateChain => - this.network_service.user_protocol_mut().update_chain(), - ServiceToWorkerMsg::OwnBlockImported(hash, number) => - this.network_service.user_protocol_mut().own_block_imported(hash, number), + ServiceToWorkerMsg::DisconnectPeer(who, protocol_name) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .disconnect_peer(&who, &protocol_name), + ServiceToWorkerMsg::NewBestBlockImported(hash, number) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .new_best_block_imported(hash, number), } } @@ -1359,11 +1619,11 @@ impl Future for NetworkWorker { num_iterations += 1; if num_iterations >= 1000 { cx.waker().wake_by_ref(); - break; + break } // Process the next action coming from the network. - let next_event = this.network_service.next_event(); + let next_event = this.network_service.select_next_some(); futures::pin_mut!(next_event); let poll_value = next_event.poll_unpin(cx); @@ -1375,121 +1635,141 @@ impl Future for NetworkWorker { } this.import_queue.import_blocks(origin, blocks); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport(origin, hash, nb, justification))) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport( + origin, + hash, + nb, + justifications, + ))) => { if let Some(metrics) = this.metrics.as_ref() { metrics.import_queue_justifications_submitted.inc(); } - this.import_queue.import_justification(origin, hash, nb, justification); + this.import_queue.import_justifications(origin, hash, nb, justifications); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::FinalityProofImport(origin, hash, nb, proof))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_finality_proofs_submitted.inc(); - } - this.import_queue.import_finality_proof(origin, hash, nb, proof); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { + protocol, + result, + .. + })) => { if let Some(metrics) = this.metrics.as_ref() { match result { Ok(serve_time) => { - metrics.requests_in_success_total + metrics + .requests_in_success_total .with_label_values(&[&protocol]) .observe(serve_time.as_secs_f64()); - } + }, Err(err) => { let reason = match err { - ResponseFailure::Busy => "busy", ResponseFailure::Network(InboundFailure::Timeout) => "timeout", - ResponseFailure::Network(InboundFailure::UnsupportedProtocols) => - "unsupported", + ResponseFailure::Network( + InboundFailure::UnsupportedProtocols, + ) => + // `UnsupportedProtocols` is reported for every single + // inbound request whenever a request with an unsupported + // protocol is received. This is not reported in order to + // avoid confusions. + continue, + ResponseFailure::Network(InboundFailure::ResponseOmission) => + "busy-omitted", ResponseFailure::Network(InboundFailure::ConnectionClosed) => "connection-closed", }; - metrics.requests_in_failure_total + metrics + .requests_in_failure_total .with_label_values(&[&protocol, reason]) .inc(); - } + }, } } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestFinished { request_id, result })) => { - if let Some((send_back, started, protocol)) = this.pending_requests.remove(&request_id) { - if let Some(metrics) = this.metrics.as_ref() { - match &result { - Ok(_) => { - metrics.requests_out_success_total - .with_label_values(&[&protocol]) - .observe(started.elapsed().as_secs_f64()); - } - Err(err) => { - let reason = match err { - RequestFailure::Refused => "refused", - RequestFailure::Network(OutboundFailure::DialFailure) => - "dial-failure", - RequestFailure::Network(OutboundFailure::Timeout) => - "timeout", - RequestFailure::Network(OutboundFailure::ConnectionClosed) => - "connection-closed", - RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => - "unsupported", - }; - - metrics.requests_out_failure_total - .with_label_values(&[&protocol, reason]) - .inc(); - } - } + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestFinished { + protocol, + duration, + result, + .. + })) => + if let Some(metrics) = this.metrics.as_ref() { + match result { + Ok(_) => { + metrics + .requests_out_success_total + .with_label_values(&[&protocol]) + .observe(duration.as_secs_f64()); + }, + Err(err) => { + let reason = match err { + RequestFailure::NotConnected => "not-connected", + RequestFailure::UnknownProtocol => "unknown-protocol", + RequestFailure::Refused => "refused", + RequestFailure::Obsolete => "obsolete", + RequestFailure::Network(OutboundFailure::DialFailure) => + "dial-failure", + RequestFailure::Network(OutboundFailure::Timeout) => "timeout", + RequestFailure::Network(OutboundFailure::ConnectionClosed) => + "connection-closed", + RequestFailure::Network( + OutboundFailure::UnsupportedProtocols, + ) => "unsupported", + }; + + metrics + .requests_out_failure_total + .with_label_values(&[&protocol, reason]) + .inc(); + }, } - let _ = send_back.send(result); - } else { - error!("Request not in pending_requests"); - } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::OpaqueRequestStarted { protocol, .. })) => { + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted( + protocol, + ))) => if let Some(metrics) = this.metrics.as_ref() { - metrics.requests_out_started_total - .with_label_values(&[&protocol]) + metrics + .kademlia_random_queries_total + .with_label_values(&[&protocol.as_ref()]) .inc(); - } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::OpaqueRequestFinished { protocol, request_duration, .. })) => { + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { + remote, + protocol, + negotiated_fallback, + notifications_sink, + role, + })) => { if let Some(metrics) = this.metrics.as_ref() { - metrics.requests_out_success_total + metrics + .notifications_streams_opened_total .with_label_values(&[&protocol]) - .observe(request_duration.as_secs_f64()); - } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted(protocol))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.kademlia_random_queries_total - .with_label_values(&[&protocol.as_ref()]) .inc(); } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, engine_id, notifications_sink, role })) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.notifications_streams_opened_total - .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id)]).inc(); - } { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - peers_notifications_sinks.insert((remote.clone(), engine_id), notifications_sink); + let _previous_value = peers_notifications_sinks + .insert((remote.clone(), protocol.clone()), notifications_sink); + debug_assert!(_previous_value.is_none()); } this.event_streams.send(Event::NotificationStreamOpened { remote, - engine_id, + protocol, + negotiated_fallback, role, }); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { remote, engine_id, notifications_sink })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { + remote, + protocol, + notifications_sink, + })) => { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - if let Some(s) = peers_notifications_sinks.get_mut(&(remote, engine_id)) { + if let Some(s) = peers_notifications_sinks.get_mut(&(remote, protocol)) { *s = notifications_sink; } else { - log::error!( + error!( target: "sub-libp2p", "NotificationStreamReplaced for non-existing substream" ); + debug_assert!(false); } // TODO: Notifications might have been lost as a result of the previous @@ -1503,42 +1783,56 @@ impl Future for NetworkWorker { // acceptable, this bug is at the moment intentionally left there and is // intended to be fixed at the same time as // https://github.com/paritytech/substrate/issues/6403. - /*this.event_streams.send(Event::NotificationStreamClosed { - remote, - engine_id, - }); - this.event_streams.send(Event::NotificationStreamOpened { - remote, - engine_id, - role, - });*/ + // this.event_streams.send(Event::NotificationStreamClosed { + // remote, + // protocol, + // }); + // this.event_streams.send(Event::NotificationStreamOpened { + // remote, + // protocol, + // role, + // }); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, engine_id })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { + remote, + protocol, + })) => { if let Some(metrics) = this.metrics.as_ref() { - metrics.notifications_streams_closed_total - .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id[..])]).inc(); + metrics + .notifications_streams_closed_total + .with_label_values(&[&protocol[..]]) + .inc(); } this.event_streams.send(Event::NotificationStreamClosed { remote: remote.clone(), - engine_id, + protocol: protocol.clone(), }); { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - peers_notifications_sinks.remove(&(remote.clone(), engine_id)); + let _previous_value = + peers_notifications_sinks.remove(&(remote.clone(), protocol)); + debug_assert!(_previous_value.is_some()); } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { remote, messages })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { + remote, + messages, + })) => { if let Some(metrics) = this.metrics.as_ref() { - for (engine_id, message) in &messages { - metrics.notifications_sizes - .with_label_values(&["in", &maybe_utf8_bytes_to_string(engine_id)]) + for (protocol, message) in &messages { + metrics + .notifications_sizes + .with_label_values(&["in", protocol]) .observe(message.len() as f64); } } - this.event_streams.send(Event::NotificationsReceived { - remote, - messages, - }); + this.event_streams.send(Event::NotificationsReceived { remote, messages }); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::SyncConnected(remote))) => { + this.event_streams.send(Event::SyncConnected { remote }); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::SyncDisconnected(remote))) => { + this.event_streams.send(Event::SyncDisconnected { remote }); }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration))) => { if let Some(metrics) = this.metrics.as_ref() { @@ -1548,14 +1842,20 @@ impl Future for NetworkWorker { DhtEvent::ValuePut(_) => "value-put", DhtEvent::ValuePutFailed(_) => "value-put-failed", }; - metrics.kademlia_query_duration.with_label_values(&[query_type]) + metrics + .kademlia_query_duration + .with_label_values(&[query_type]) .observe(duration.as_secs_f64()); } this.event_streams.send(Event::Dht(event)); }, - Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, num_established }) => { - trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); + Poll::Ready(SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + num_established, + }) => { + debug!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); if let Some(metrics) = this.metrics.as_ref() { let direction = match endpoint { @@ -1569,8 +1869,13 @@ impl Future for NetworkWorker { } } }, - Poll::Ready(SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, num_established }) => { - trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); + Poll::Ready(SwarmEvent::ConnectionClosed { + peer_id, + cause, + endpoint, + num_established, + }) => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); if let Some(metrics) = this.metrics.as_ref() { let direction = match endpoint { ConnectedPoint::Dialer { .. } => "out", @@ -1578,20 +1883,27 @@ impl Future for NetworkWorker { }; let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler( EitherError::A(EitherError::A(EitherError::A(EitherError::B( - EitherError::A(PingFailure::Timeout)))))))))) => "ping-timeout", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))))) => "force-closed", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(PingFailure::Timeout), + )))), + ))) => "ping-timeout", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler( EitherError::A(EitherError::A(EitherError::A(EitherError::A( - NotifsHandlerError::SyncNotificationsClogged))))))))) => "sync-notifications-clogged", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => "protocol-error", - Some(ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout)) => "keep-alive-timeout", + NotifsHandlerError::SyncNotificationsClogged, + )))), + ))) => "sync-notifications-clogged", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => + "protocol-error", + Some(ConnectionError::Handler( + NodeHandlerWrapperError::KeepAliveTimeout, + )) => "keep-alive-timeout", None => "actively-closed", }; - metrics.connections_closed_total.with_label_values(&[direction, reason]).inc(); + metrics + .connections_closed_total + .with_label_values(&[direction, reason]) + .inc(); // `num_established` represents the number of *remaining* connections. if num_established == 0 { @@ -1599,49 +1911,55 @@ impl Future for NetworkWorker { } } }, - Poll::Ready(SwarmEvent::NewListenAddr(addr)) => { - trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", addr); + Poll::Ready(SwarmEvent::NewListenAddr { address, .. }) => { + trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", address); if let Some(metrics) = this.metrics.as_ref() { metrics.listeners_local_addresses.inc(); } }, - Poll::Ready(SwarmEvent::ExpiredListenAddr(addr)) => { - info!(target: "sub-libp2p", "📪 No longer listening on {}", addr); + Poll::Ready(SwarmEvent::ExpiredListenAddr { address, .. }) => { + info!(target: "sub-libp2p", "📪 No longer listening on {}", address); if let Some(metrics) = this.metrics.as_ref() { metrics.listeners_local_addresses.dec(); } }, Poll::Ready(SwarmEvent::UnreachableAddr { peer_id, address, error, .. }) => { trace!( - target: "sub-libp2p", "Libp2p => Failed to reach {:?} through {:?}: {}", - peer_id, - address, - error, + target: "sub-libp2p", + "Libp2p => Failed to reach {:?} through {:?}: {}", + peer_id, address, error, ); if this.boot_node_ids.contains(&peer_id) { if let PendingConnectionError::InvalidPeerId = error { error!( "💔 The bootnode you want to connect to at `{}` provided a different peer ID than the one you expect: `{}`.", - address, - peer_id, + address, peer_id, ); } } if let Some(metrics) = this.metrics.as_ref() { match error { - PendingConnectionError::ConnectionLimit(_) => - metrics.pending_connections_errors_total.with_label_values(&["limit-reached"]).inc(), - PendingConnectionError::InvalidPeerId => - metrics.pending_connections_errors_total.with_label_values(&["invalid-peer-id"]).inc(), - PendingConnectionError::Transport(_) | PendingConnectionError::IO(_) => - metrics.pending_connections_errors_total.with_label_values(&["transport-error"]).inc(), + PendingConnectionError::ConnectionLimit(_) => metrics + .pending_connections_errors_total + .with_label_values(&["limit-reached"]) + .inc(), + PendingConnectionError::InvalidPeerId => metrics + .pending_connections_errors_total + .with_label_values(&["invalid-peer-id"]) + .inc(), + PendingConnectionError::Transport(_) | + PendingConnectionError::IO(_) => metrics + .pending_connections_errors_total + .with_label_values(&["transport-error"]) + .inc(), } } - } - Poll::Ready(SwarmEvent::Dialing(peer_id)) => - trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id), + }, + Poll::Ready(SwarmEvent::Dialing(peer_id)) => { + trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id) + }, Poll::Ready(SwarmEvent::IncomingConnection { local_addr, send_back_addr }) => { trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({},{}))", local_addr, send_back_addr); @@ -1649,9 +1967,16 @@ impl Future for NetworkWorker { metrics.incoming_connections_total.inc(); } }, - Poll::Ready(SwarmEvent::IncomingConnectionError { local_addr, send_back_addr, error }) => { - trace!(target: "sub-libp2p", "Libp2p => IncomingConnectionError({},{}): {}", - local_addr, send_back_addr, error); + Poll::Ready(SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + }) => { + debug!( + target: "sub-libp2p", + "Libp2p => IncomingConnectionError({},{}): {}", + local_addr, send_back_addr, error, + ); if let Some(metrics) = this.metrics.as_ref() { let reason = match error { PendingConnectionError::ConnectionLimit(_) => "limit-reached", @@ -1660,25 +1985,33 @@ impl Future for NetworkWorker { PendingConnectionError::IO(_) => "transport-error", }; - metrics.incoming_connections_errors_total.with_label_values(&[reason]).inc(); + metrics + .incoming_connections_errors_total + .with_label_values(&[reason]) + .inc(); } }, Poll::Ready(SwarmEvent::BannedPeer { peer_id, endpoint }) => { - trace!(target: "sub-libp2p", "Libp2p => BannedPeer({}). Connected via {:?}.", - peer_id, endpoint); + debug!( + target: "sub-libp2p", + "Libp2p => BannedPeer({}). Connected via {:?}.", + peer_id, endpoint, + ); if let Some(metrics) = this.metrics.as_ref() { - metrics.incoming_connections_errors_total.with_label_values(&["banned"]).inc(); + metrics + .incoming_connections_errors_total + .with_label_values(&["banned"]) + .inc(); } }, Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => - trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}", - address, error), - Poll::Ready(SwarmEvent::ListenerClosed { reason, addresses }) => { + trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}", address, error), + Poll::Ready(SwarmEvent::ListenerClosed { reason, addresses, .. }) => { if let Some(metrics) = this.metrics.as_ref() { metrics.listeners_local_addresses.sub(addresses.len() as u64); } - let addrs = addresses.into_iter().map(|a| a.to_string()) - .collect::>().join(", "); + let addrs = + addresses.into_iter().map(|a| a.to_string()).collect::>().join(", "); match reason { Ok(()) => error!( target: "sub-libp2p", @@ -1692,8 +2025,8 @@ impl Future for NetworkWorker { ), } }, - Poll::Ready(SwarmEvent::ListenerError { error }) => { - trace!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error); + Poll::Ready(SwarmEvent::ListenerError { error, .. }) => { + debug!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error); if let Some(metrics) = this.metrics.as_ref() { metrics.listeners_errors_total.inc(); } @@ -1701,103 +2034,115 @@ impl Future for NetworkWorker { }; } - let num_connected_peers = this.network_service.user_protocol_mut().num_connected_peers(); + let num_connected_peers = + this.network_service.behaviour_mut().user_protocol_mut().num_connected_peers(); // Update the variables shared with the `NetworkService`. this.num_connected.store(num_connected_peers, Ordering::Relaxed); { - let external_addresses = Swarm::::external_addresses(&this.network_service).cloned().collect(); + let external_addresses = Swarm::::external_addresses(&this.network_service) + .map(|r| &r.addr) + .cloned() + .collect(); *this.external_addresses.lock() = external_addresses; } - let is_major_syncing = match this.network_service.user_protocol_mut().sync_state() { - SyncState::Idle => false, - SyncState::Downloading => true, - }; + let is_major_syncing = + match this.network_service.behaviour_mut().user_protocol_mut().sync_state().state { + SyncState::Idle => false, + SyncState::Downloading => true, + }; + + this.tx_handler_controller.set_gossip_enabled(!is_major_syncing); this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { - for (proto, buckets) in this.network_service.num_entries_per_kbucket() { + for (proto, buckets) in this.network_service.behaviour_mut().num_entries_per_kbucket() { for (lower_ilog2_bucket_bound, num_entries) in buckets { - metrics.kbuckets_num_nodes - .with_label_values(&[&proto.as_ref(), &lower_ilog2_bucket_bound.to_string()]) + metrics + .kbuckets_num_nodes + .with_label_values(&[ + &proto.as_ref(), + &lower_ilog2_bucket_bound.to_string(), + ]) .set(num_entries as u64); } } - for (proto, num_entries) in this.network_service.num_kademlia_records() { - metrics.kademlia_records_count.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); + for (proto, num_entries) in this.network_service.behaviour_mut().num_kademlia_records() + { + metrics + .kademlia_records_count + .with_label_values(&[&proto.as_ref()]) + .set(num_entries as u64); } - for (proto, num_entries) in this.network_service.kademlia_records_total_size() { - metrics.kademlia_records_sizes_total.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); + for (proto, num_entries) in + this.network_service.behaviour_mut().kademlia_records_total_size() + { + metrics + .kademlia_records_sizes_total + .with_label_values(&[&proto.as_ref()]) + .set(num_entries as u64); } - metrics.peerset_num_discovered.set(this.network_service.user_protocol().num_discovered_peers() as u64); - metrics.peerset_num_requested.set(this.network_service.user_protocol().requested_peers().count() as u64); - metrics.pending_connections.set(Swarm::network_info(&this.network_service).num_connections_pending as u64); + metrics + .peerset_num_discovered + .set(this.network_service.behaviour_mut().user_protocol().num_discovered_peers() + as u64); + metrics.peerset_num_requested.set( + this.network_service.behaviour_mut().user_protocol().requested_peers().count() + as u64, + ); + metrics.pending_connections.set( + Swarm::network_info(&this.network_service).connection_counters().num_pending() + as u64, + ); } Poll::Pending } } -impl Unpin for NetworkWorker { -} - -/// Turns bytes that are potentially UTF-8 into a reasonable representable string. -/// -/// Meant to be used only for debugging or metrics-reporting purposes. -pub(crate) fn maybe_utf8_bytes_to_string(id: &[u8]) -> Cow { - if let Ok(s) = std::str::from_utf8(&id[..]) { - Cow::Borrowed(s) - } else { - Cow::Owned(format!("{:?}", id)) - } -} +impl Unpin for NetworkWorker {} /// The libp2p swarm, customized for our needs. -type Swarm = libp2p::swarm::Swarm>; +type Swarm = libp2p::swarm::Swarm>; // Implementation of `import_queue::Link` trait using the available local variables. -struct NetworkLink<'a, B: BlockT, H: ExHashT> { - protocol: &'a mut Swarm, +struct NetworkLink<'a, B: BlockT> { + protocol: &'a mut Swarm, } -impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { +impl<'a, B: BlockT> Link for NetworkLink<'a, B> { fn blocks_processed( &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { - self.protocol.user_protocol_mut().on_blocks_processed(imported, count, results) - } - fn justification_imported(&mut self, who: PeerId, hash: &B::Hash, number: NumberFor, success: bool) { - self.protocol.user_protocol_mut().justification_import_result(hash.clone(), number, success); - if !success { - info!("💔 Invalid justification provided by {} for #{}", who, hash); - self.protocol.user_protocol_mut().disconnect_peer(&who); - self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid justification")); - } - } - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol.user_protocol_mut().request_justification(hash, number) + self.protocol + .behaviour_mut() + .user_protocol_mut() + .on_blocks_processed(imported, count, results) } - fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol.user_protocol_mut().request_finality_proof(hash, number) - } - fn finality_proof_imported( + fn justification_imported( &mut self, who: PeerId, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, + hash: &B::Hash, + number: NumberFor, + success: bool, ) { - let success = finalization_result.is_ok(); - self.protocol.user_protocol_mut().finality_proof_import_result(request_block, finalization_result); - if !success { - info!("💔 Invalid finality proof provided by {} for #{}", who, request_block.0); - self.protocol.user_protocol_mut().disconnect_peer(&who); - self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid finality proof")); - } + self.protocol.behaviour_mut().user_protocol_mut().justification_import_result( + who, + hash.clone(), + number, + success, + ); + } + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + self.protocol + .behaviour_mut() + .user_protocol_mut() + .request_justification(hash, number) } } @@ -1807,9 +2152,9 @@ fn ensure_addresses_consistent_with_transport<'a>( ) -> Result<(), Error> { if matches!(transport, TransportConfig::MemoryOnly) { let addresses: Vec<_> = addresses - .filter(|x| x.iter() - .any(|y| !matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) - ) + .filter(|x| { + x.iter().any(|y| !matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) + }) .cloned() .collect(); @@ -1817,13 +2162,11 @@ fn ensure_addresses_consistent_with_transport<'a>( return Err(Error::AddressesForAnotherTransport { transport: transport.clone(), addresses, - }); + }) } } else { let addresses: Vec<_> = addresses - .filter(|x| x.iter() - .any(|y| matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) - ) + .filter(|x| x.iter().any(|y| matches!(y, libp2p::core::multiaddr::Protocol::Memory(_)))) .cloned() .collect(); @@ -1831,7 +2174,7 @@ fn ensure_addresses_consistent_with_transport<'a>( return Err(Error::AddressesForAnotherTransport { transport: transport.clone(), addresses, - }); + }) } } diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs index a63ce7a18a519..e33cd4b194d69 100644 --- a/client/network/src/service/metrics.rs +++ b/client/network/src/service/metrics.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,10 +18,8 @@ use crate::transport::BandwidthSinks; use prometheus_endpoint::{ - self as prometheus, - Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, - PrometheusError, Registry, U64, Opts, - SourcedCounter, SourcedGauge, MetricSource, + self as prometheus, Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, MetricSource, Opts, + PrometheusError, Registry, SourcedCounter, SourcedGauge, U64, }; use std::{ str, @@ -56,7 +54,6 @@ pub struct Metrics { pub distinct_peers_connections_closed_total: Counter, pub distinct_peers_connections_opened_total: Counter, pub import_queue_blocks_submitted: Counter, - pub import_queue_finality_proofs_submitted: Counter, pub import_queue_justifications_submitted: Counter, pub incoming_connections_errors_total: CounterVec, pub incoming_connections_total: Counter, @@ -79,7 +76,6 @@ pub struct Metrics { pub requests_in_success_total: HistogramVec, pub requests_out_failure_total: CounterVec, pub requests_out_success_total: HistogramVec, - pub requests_out_started_total: CounterVec, } impl Metrics { @@ -112,10 +108,6 @@ impl Metrics { "import_queue_blocks_submitted", "Number of blocks submitted to the import queue.", )?, registry)?, - import_queue_finality_proofs_submitted: prometheus::register(Counter::new( - "import_queue_finality_proofs_submitted", - "Number of finality proofs submitted to the import queue.", - )?, registry)?, import_queue_justifications_submitted: prometheus::register(Counter::new( "import_queue_justifications_submitted", "Number of justifications submitted to the import queue.", @@ -235,7 +227,8 @@ impl Metrics { HistogramOpts { common_opts: Opts::new( "sub_libp2p_requests_in_success_total", - "Total number of requests received and answered" + "For successful incoming requests, time between receiving the request and \ + starting to send the response" ), buckets: prometheus::exponential_buckets(0.001, 2.0, 16) .expect("parameters are always valid values; qed"), @@ -253,20 +246,13 @@ impl Metrics { HistogramOpts { common_opts: Opts::new( "sub_libp2p_requests_out_success_total", - "For successful requests, time between a request's start and finish" + "For successful outgoing requests, time between a request's start and finish" ), buckets: prometheus::exponential_buckets(0.001, 2.0, 16) .expect("parameters are always valid values; qed"), }, &["protocol"] )?, registry)?, - requests_out_started_total: prometheus::register(CounterVec::new( - Opts::new( - "sub_libp2p_requests_out_started_total", - "Total number of requests emitted" - ), - &["protocol"] - )?, registry)?, }) } } @@ -279,13 +265,14 @@ impl BandwidthCounters { /// Registers the `BandwidthCounters` metric whose values are /// obtained from the given sinks. fn register(registry: &Registry, sinks: Arc) -> Result<(), PrometheusError> { - prometheus::register(SourcedCounter::new( - &Opts::new( - "sub_libp2p_network_bytes_total", - "Total bandwidth usage" - ).variable_label("direction"), - BandwidthCounters(sinks), - )?, registry)?; + prometheus::register( + SourcedCounter::new( + &Opts::new("sub_libp2p_network_bytes_total", "Total bandwidth usage") + .variable_label("direction"), + BandwidthCounters(sinks), + )?, + registry, + )?; Ok(()) } @@ -308,13 +295,16 @@ impl MajorSyncingGauge { /// Registers the `MajorSyncGauge` metric whose value is /// obtained from the given `AtomicBool`. fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { - prometheus::register(SourcedGauge::new( - &Opts::new( - "sub_libp2p_is_major_syncing", - "Whether the node is performing a major sync or not.", - ), - MajorSyncingGauge(value), - )?, registry)?; + prometheus::register( + SourcedGauge::new( + &Opts::new( + "sub_libp2p_is_major_syncing", + "Whether the node is performing a major sync or not.", + ), + MajorSyncingGauge(value), + )?, + registry, + )?; Ok(()) } @@ -336,13 +326,13 @@ impl NumConnectedGauge { /// Registers the `MajorSyncingGauge` metric whose value is /// obtained from the given `AtomicUsize`. fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { - prometheus::register(SourcedGauge::new( - &Opts::new( - "sub_libp2p_peers_count", - "Number of connected peers", - ), - NumConnectedGauge(value), - )?, registry)?; + prometheus::register( + SourcedGauge::new( + &Opts::new("sub_libp2p_peers_count", "Number of connected peers"), + NumConnectedGauge(value), + )?, + registry, + )?; Ok(()) } diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 1b86a5fa4317d..2d6241278005b 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -30,18 +30,18 @@ //! [`OutChannels::push`] to put the sender within a [`OutChannels`]. //! - Send events by calling [`OutChannels::send`]. Events are cloned for each sender in the //! collection. -//! use crate::Event; -use super::maybe_utf8_bytes_to_string; -use futures::{prelude::*, channel::mpsc, ready, stream::FusedStream}; +use futures::{channel::mpsc, prelude::*, ready, stream::FusedStream}; use parking_lot::Mutex; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; use std::{ convert::TryFrom as _, - fmt, pin::Pin, sync::Arc, - task::{Context, Poll} + fmt, + pin::Pin, + sync::Arc, + task::{Context, Poll}, }; /// Creates a new channel that can be associated to a [`OutChannels`]. @@ -101,8 +101,10 @@ impl Stream for Receiver { let metrics = self.metrics.lock().clone(); match metrics.as_ref().map(|m| m.as_ref()) { Some(Some(metrics)) => metrics.event_out(&ev, self.name), - Some(None) => (), // no registry - None => log::warn!("Inconsistency in out_events: event happened before sender associated"), + Some(None) => (), // no registry + None => log::warn!( + "Inconsistency in out_events: event happened before sender associated" + ), } Poll::Ready(Some(ev)) } else { @@ -137,16 +139,10 @@ pub struct OutChannels { impl OutChannels { /// Creates a new empty collection of senders. pub fn new(registry: Option<&Registry>) -> Result { - let metrics = if let Some(registry) = registry { - Some(Metrics::register(registry)?) - } else { - None - }; + let metrics = + if let Some(registry) = registry { Some(Metrics::register(registry)?) } else { None }; - Ok(OutChannels { - event_streams: Vec::new(), - metrics: Arc::new(metrics), - }) + Ok(Self { event_streams: Vec::new(), metrics: Arc::new(metrics) }) } /// Adds a new [`Sender`] to the collection. @@ -165,9 +161,8 @@ impl OutChannels { /// Sends an event. pub fn send(&mut self, event: Event) { - self.event_streams.retain(|sender| { - sender.inner.unbounded_send(event.clone()).is_ok() - }); + self.event_streams + .retain(|sender| sender.inner.unbounded_send(event.clone()).is_ok()); if let Some(metrics) = &*self.metrics { for ev in &self.event_streams { @@ -224,60 +219,72 @@ impl Metrics { fn event_in(&self, event: &Event, num: u64, name: &str) { match event { Event::Dht(_) => { + self.events_total.with_label_values(&["dht", "sent", name]).inc_by(num); + }, + Event::SyncConnected { .. } => { self.events_total - .with_label_values(&["dht", "sent", name]) + .with_label_values(&["sync-connected", "sent", name]) .inc_by(num); - } - Event::NotificationStreamOpened { engine_id, .. } => { + }, + Event::SyncDisconnected { .. } => { self.events_total - .with_label_values(&[&format!("notif-open-{:?}", engine_id), "sent", name]) + .with_label_values(&["sync-disconnected", "sent", name]) .inc_by(num); }, - Event::NotificationStreamClosed { engine_id, .. } => { + Event::NotificationStreamOpened { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "sent", name]) + .with_label_values(&[&format!("notif-open-{:?}", protocol), "sent", name]) .inc_by(num); }, - Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { + Event::NotificationStreamClosed { protocol, .. } => { + self.events_total + .with_label_values(&[&format!("notif-closed-{:?}", protocol), "sent", name]) + .inc_by(num); + }, + Event::NotificationsReceived { messages, .. } => + for (protocol, message) in messages { self.events_total - .with_label_values(&[&format!("notif-{:?}", engine_id), "sent", name]) + .with_label_values(&[&format!("notif-{:?}", protocol), "sent", name]) .inc_by(num); - self.notifications_sizes - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "sent", name]) - .inc_by(num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::max_value()))); - } - }, + self.notifications_sizes.with_label_values(&[protocol, "sent", name]).inc_by( + num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::MAX)), + ); + }, } } fn event_out(&self, event: &Event, name: &str) { match event { Event::Dht(_) => { + self.events_total.with_label_values(&["dht", "received", name]).inc(); + }, + Event::SyncConnected { .. } => { + self.events_total.with_label_values(&["sync-connected", "received", name]).inc(); + }, + Event::SyncDisconnected { .. } => { self.events_total - .with_label_values(&["dht", "received", name]) + .with_label_values(&["sync-disconnected", "received", name]) .inc(); - } - Event::NotificationStreamOpened { engine_id, .. } => { + }, + Event::NotificationStreamOpened { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-open-{:?}", engine_id), "received", name]) + .with_label_values(&[&format!("notif-open-{:?}", protocol), "received", name]) .inc(); }, - Event::NotificationStreamClosed { engine_id, .. } => { + Event::NotificationStreamClosed { protocol, .. } => { self.events_total - .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "received", name]) + .with_label_values(&[&format!("notif-closed-{:?}", protocol), "received", name]) .inc(); }, - Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { + Event::NotificationsReceived { messages, .. } => + for (protocol, message) in messages { self.events_total - .with_label_values(&[&format!("notif-{:?}", engine_id), "received", name]) + .with_label_values(&[&format!("notif-{:?}", protocol), "received", name]) .inc(); self.notifications_sizes - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "received", name]) - .inc_by(u64::try_from(message.len()).unwrap_or(u64::max_value())); - } - }, + .with_label_values(&[&protocol, "received", name]) + .inc_by(u64::try_from(message.len()).unwrap_or(u64::MAX)); + }, } } } diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 4b6f9dd156482..69b172d07edfe 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,12 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, Event, NetworkService, NetworkWorker}; +use crate::{ + block_request_handler::BlockRequestHandler, config, + light_client_requests::handler::LightClientRequestHandler, + state_request_handler::StateRequestHandler, Event, NetworkService, NetworkWorker, +}; -use libp2p::PeerId; use futures::prelude::*; +use libp2p::PeerId; use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{sync::Arc, time::Duration}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< @@ -34,79 +38,96 @@ type TestNetworkService = NetworkService< /// /// > **Note**: We return the events stream in order to not possibly lose events between the /// > construction of the service and the moment the events stream is grabbed. -fn build_test_full_node(config: config::NetworkConfiguration) - -> (Arc, impl Stream) -{ - let client = Arc::new( - TestClientBuilder::with_default_backend() - .build_with_longest_chain() - .0, - ); +fn build_test_full_node( + config: config::NetworkConfiguration, +) -> (Arc, impl Stream) { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); #[derive(Clone)] struct PassThroughVerifier(bool); - impl sp_consensus::import_queue::Verifier for PassThroughVerifier { - fn verify( + + #[async_trait::async_trait] + impl sc_consensus::Verifier for PassThroughVerifier { + async fn verify( &mut self, - origin: sp_consensus::BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, + mut block: sc_consensus::BlockImportParams, ) -> Result< ( - sp_consensus::BlockImportParams, + sc_consensus::BlockImportParams, Option)>>, ), String, > { - let maybe_keys = header + let maybe_keys = block + .header .digest() .log(|l| { l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) .or_else(|| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"babe")) + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( + b"babe", + )) }) }) .map(|blob| { - vec![( - sp_blockchain::well_known_cache_keys::AUTHORITIES, - blob.to_vec(), - )] + vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] }); - let mut import = sp_consensus::BlockImportParams::new(origin, header); - import.body = body; - import.finalized = self.0; - import.justification = justification; - import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); - Ok((import, maybe_keys)) + block.finalized = self.0; + block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); + Ok((block, maybe_keys)) } } - let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + let import_queue = Box::new(sc_consensus::BasicQueue::new( PassThroughVerifier(false), Box::new(client.clone()), None, - None, &sp_core::testing::TaskExecutor::new(), None, )); + let protocol_id = config::ProtocolId::from("/test-protocol-name"); + + let block_request_protocol_config = { + let (handler, protocol_config) = BlockRequestHandler::new(&protocol_id, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let state_request_protocol_config = { + let (handler, protocol_config) = StateRequestHandler::new(&protocol_id, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, client.clone()); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, + transactions_handler_executor: Box::new(|task| { + async_std::task::spawn(task); + }), network_config: config, chain: client.clone(), - finality_proof_provider: None, - finality_proof_request_builder: None, on_demand: None, transaction_pool: Arc::new(crate::config::EmptyTransactionPool), - protocol_id: config::ProtocolId::from("/test-protocol-name"), + protocol_id, import_queue, block_announce_validator: Box::new( sp_consensus::block_validation::DefaultBlockAnnounceValidator, ), metrics_registry: None, + block_request_protocol_config, + state_request_protocol_config, + light_client_request_protocol_config, + warp_sync: None, }) .unwrap(); @@ -121,31 +142,46 @@ fn build_test_full_node(config: config::NetworkConfiguration) (service, event_stream) } -const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; +const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); /// Builds two nodes and their associated events stream. -/// The nodes are connected together and have the `ENGINE_ID` protocol registered. -fn build_nodes_one_proto() - -> (Arc, impl Stream, Arc, impl Stream) -{ +/// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. +fn build_nodes_one_proto() -> ( + Arc, + impl Stream, + Arc, + impl Stream, +) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: Default::default(), + }], listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], - listen_addresses: vec![], - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + ..Default::default() + }, }], + listen_addresses: vec![], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); (node1, events_stream1, node2, events_stream2) @@ -161,10 +197,18 @@ fn notifications_state_consistent() { // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node1.write_notification( + node2.local_peer_id().clone(), + PROTOCOL_NAME, + b"hello world".to_vec(), + ); } for _ in 0..(rand::random::() % 5) { - node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node2.write_notification( + node1.local_peer_id().clone(), + PROTOCOL_NAME, + b"hello world".to_vec(), + ); } async_std::task::block_on(async move { @@ -181,24 +225,32 @@ fn notifications_state_consistent() { iterations += 1; if iterations >= 1_000 { assert!(something_happened); - break; + break } // Start by sending a notification from node1 to node2 and vice-versa. Part of the // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { - node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node1.write_notification( + node2.local_peer_id().clone(), + PROTOCOL_NAME, + b"hello world".to_vec(), + ); } if rand::random::() % 5 >= 3 { - node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + node2.write_notification( + node1.local_peer_id().clone(), + PROTOCOL_NAME, + b"hello world".to_vec(), + ); } // Also randomly disconnect the two nodes from time to time. if rand::random::() % 20 == 0 { - node1.disconnect_peer(node2.local_peer_id().clone()); + node1.disconnect_peer(node2.local_peer_id().clone(), PROTOCOL_NAME); } if rand::random::() % 20 == 0 { - node2.disconnect_peer(node1.local_peer_id().clone()); + node2.disconnect_peer(node1.local_peer_id().clone(), PROTOCOL_NAME); } // Grab next event from either `events_stream1` or `events_stream2`. @@ -219,58 +271,70 @@ fn notifications_state_consistent() { }; match next_event { - future::Either::Left(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + future::Either::Left(Event::NotificationStreamOpened { + remote, protocol, .. + }) => { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; assert_eq!(remote, *node2.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); - } - future::Either::Right(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + assert_eq!(protocol, PROTOCOL_NAME); + }, + future::Either::Right(Event::NotificationStreamOpened { + remote, protocol, .. + }) => { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; assert_eq!(remote, *node1.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); - } - future::Either::Left(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + assert_eq!(protocol, PROTOCOL_NAME); + }, + future::Either::Left(Event::NotificationStreamClosed { + remote, protocol, .. + }) => { assert!(node1_to_node2_open); node1_to_node2_open = false; assert_eq!(remote, *node2.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); - } - future::Either::Right(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + assert_eq!(protocol, PROTOCOL_NAME); + }, + future::Either::Right(Event::NotificationStreamClosed { + remote, protocol, .. + }) => { assert!(node2_to_node1_open); node2_to_node1_open = false; assert_eq!(remote, *node1.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); - } + assert_eq!(protocol, PROTOCOL_NAME); + }, future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); assert_eq!(remote, *node2.local_peer_id()); if rand::random::() % 5 >= 4 { node1.write_notification( node2.local_peer_id().clone(), - ENGINE_ID, - b"hello world".to_vec() + PROTOCOL_NAME, + b"hello world".to_vec(), ); } - } + }, future::Either::Right(Event::NotificationsReceived { remote, .. }) => { assert!(node2_to_node1_open); assert_eq!(remote, *node1.local_peer_id()); if rand::random::() % 5 >= 4 { node2.write_notification( node1.local_peer_id().clone(), - ENGINE_ID, - b"hello world".to_vec() + PROTOCOL_NAME, + b"hello world".to_vec(), ); } - } + }, // Add new events here. - future::Either::Left(Event::Dht(_)) => {} - future::Either::Right(Event::Dht(_)) => {} + future::Either::Left(Event::SyncConnected { .. }) => {}, + future::Either::Right(Event::SyncConnected { .. }) => {}, + future::Either::Left(Event::SyncDisconnected { .. }) => {}, + future::Either::Right(Event::SyncDisconnected { .. }) => {}, + future::Either::Left(Event::Dht(_)) => {}, + future::Either::Right(Event::Dht(_)) => {}, }; } }); @@ -281,31 +345,40 @@ fn lots_of_incoming_peers_works() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (main_node, _) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![listen_addr.clone()], - in_peers: u32::max_value(), + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { in_peers: u32::MAX, ..Default::default() }, + }], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); - let main_node_peer_id = main_node.local_peer_id().clone(); + let main_node_peer_id = *main_node.local_peer_id(); // We spawn background tasks and push them in this `Vec`. They will all be waited upon before // this test ends. let mut background_tasks_to_wait = Vec::new(); for _ in 0..32 { - let main_node_peer_id = main_node_peer_id.clone(); - let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], listen_addresses: vec![], - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr.clone(), - peer_id: main_node_peer_id.clone(), + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr.clone(), + peer_id: main_node_peer_id, + }], + ..Default::default() + }, }], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); background_tasks_to_wait.push(async_std::task::spawn(async move { @@ -341,9 +414,7 @@ fn lots_of_incoming_peers_works() { })); } - futures::executor::block_on(async move { - future::join_all(background_tasks_to_wait).await - }); + futures::executor::block_on(async move { future::join_all(background_tasks_to_wait).await }); } #[test] @@ -362,14 +433,13 @@ fn notifications_back_pressure() { while received_notifications < TOTAL_NOTIFS { match events_stream2.next().await.unwrap() { Event::NotificationStreamClosed { .. } => panic!(), - Event::NotificationsReceived { messages, .. } => { + Event::NotificationsReceived { messages, .. } => for message in messages { - assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.0, PROTOCOL_NAME); assert_eq!(message.1, format!("hello #{}", received_notifications)); received_notifications += 1; - } - } - _ => {} + }, + _ => {}, }; if rand::random::() < 2 { @@ -383,13 +453,13 @@ fn notifications_back_pressure() { loop { match events_stream1.next().await.unwrap() { Event::NotificationStreamOpened { .. } => break, - _ => {} + _ => {}, }; } // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id.clone(), ENGINE_ID).unwrap(); + let notif = node1.notification_sender(node2_id.clone(), PROTOCOL_NAME).unwrap(); notif.ready().await.unwrap().send(format!("hello #{}", num)).unwrap(); } @@ -397,6 +467,78 @@ fn notifications_back_pressure() { }); } +#[test] +fn fallback_name_working() { + // Node 1 supports the protocols "new" and "old". Node 2 only supports "old". Checks whether + // they can connect. + + const NEW_PROTOCOL_NAME: Cow<'static, str> = + Cow::Borrowed("/new-shiny-protocol-that-isnt-PROTOCOL_NAME"); + + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: NEW_PROTOCOL_NAME.clone(), + fallback_names: vec![PROTOCOL_NAME], + max_notification_size: 1024 * 1024, + set_config: Default::default(), + }], + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); + + let (_, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + ..Default::default() + }, + }], + listen_addresses: vec![], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); + + let receiver = async_std::task::spawn(async move { + // Wait for the `NotificationStreamOpened`. + loop { + match events_stream2.next().await.unwrap() { + Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => { + assert_eq!(protocol, PROTOCOL_NAME); + assert_eq!(negotiated_fallback, None); + break + }, + _ => {}, + }; + } + }); + + async_std::task::block_on(async move { + // Wait for the `NotificationStreamOpened`. + loop { + match events_stream1.next().await.unwrap() { + Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } + if protocol == NEW_PROTOCOL_NAME => + { + assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME)); + break + } + _ => {}, + }; + } + + receiver.await; + }); +} + #[test] #[should_panic(expected = "don't match the transport")] fn ensure_listen_addresses_consistent_with_transport_memory() { @@ -405,7 +547,7 @@ fn ensure_listen_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -416,7 +558,7 @@ fn ensure_listen_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -433,7 +575,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_memory() { listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, boot_nodes: vec![boot_node], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -449,7 +591,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], boot_nodes: vec![boot_node], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -465,8 +607,11 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, - reserved_nodes: vec![reserved_node], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + default_peers_set: config::SetConfig { + reserved_nodes: vec![reserved_node], + ..Default::default() + }, + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -481,8 +626,11 @@ fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - reserved_nodes: vec![reserved_node], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + default_peers_set: config::SetConfig { + reserved_nodes: vec![reserved_node], + ..Default::default() + }, + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -496,7 +644,7 @@ fn ensure_public_addresses_consistent_with_transport_memory() { listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, public_addresses: vec![public_address], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -509,6 +657,6 @@ fn ensure_public_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], public_addresses: vec![public_address], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs new file mode 100644 index 0000000000000..b4e5320ebfda8 --- /dev/null +++ b/client/network/src/state_request_handler.rs @@ -0,0 +1,245 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) state requests from a remote peer via the +//! [`crate::request_responses::RequestResponsesBehaviour`]. + +use crate::{ + chain::Client, + config::ProtocolId, + request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + schema::v1::{StateEntry, StateRequest, StateResponse}, + PeerId, ReputationChange, +}; +use codec::{Decode, Encode}; +use futures::{ + channel::{mpsc, oneshot}, + stream::StreamExt, +}; +use log::{debug, trace}; +use lru::LruCache; +use prost::Message; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use std::{ + hash::{Hash, Hasher}, + sync::Arc, + time::Duration, +}; + +const LOG_TARGET: &str = "sync"; +const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigger. +const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; + +mod rep { + use super::ReputationChange as Rep; + + /// Reputation change when a peer sent us the same request multiple times. + pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times"); +} + +/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. +pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { + ProtocolConfig { + name: generate_protocol_name(protocol_id).into(), + max_request_size: 1024 * 1024, + max_response_size: 16 * 1024 * 1024, + request_timeout: Duration::from_secs(40), + inbound_queue: None, + } +} + +/// Generate the state protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: &ProtocolId) -> String { + let mut s = String::new(); + s.push_str("/"); + s.push_str(protocol_id.as_ref()); + s.push_str("/state/1"); + s +} + +/// The key of [`BlockRequestHandler::seen_requests`]. +#[derive(Eq, PartialEq, Clone)] +struct SeenRequestsKey { + peer: PeerId, + block: B::Hash, + start: Vec, +} + +impl Hash for SeenRequestsKey { + fn hash(&self, state: &mut H) { + self.peer.hash(state); + self.block.hash(state); + self.start.hash(state); + } +} + +/// The value of [`StateRequestHandler::seen_requests`]. +enum SeenRequestsValue { + /// First time we have seen the request. + First, + /// We have fulfilled the request `n` times. + Fulfilled(usize), +} + +/// Handler for incoming block requests from a remote peer. +pub struct StateRequestHandler { + client: Arc>, + request_receiver: mpsc::Receiver, + /// Maps from request to number of times we have seen this request. + /// + /// This is used to check if a peer is spamming us with the same request. + seen_requests: LruCache, SeenRequestsValue>, +} + +impl StateRequestHandler { + /// Create a new [`StateRequestHandler`]. + pub fn new( + protocol_id: &ProtocolId, + client: Arc>, + num_peer_hint: usize, + ) -> (Self, ProtocolConfig) { + // Reserve enough request slots for one request per peer when we are at the maximum + // number of peers. + let (tx, request_receiver) = mpsc::channel(num_peer_hint); + + let mut protocol_config = generate_protocol_config(protocol_id); + protocol_config.inbound_queue = Some(tx); + + let seen_requests = LruCache::new(num_peer_hint * 2); + + (Self { client, request_receiver, seen_requests }, protocol_config) + } + + /// Run [`StateRequestHandler`]. + pub async fn run(mut self) { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_request(payload, pending_response, &peer) { + Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer), + Err(e) => debug!( + target: LOG_TARGET, + "Failed to handle state request from {}: {}", peer, e, + ), + } + } + } + + fn handle_request( + &mut self, + payload: Vec, + pending_response: oneshot::Sender, + peer: &PeerId, + ) -> Result<(), HandleRequestError> { + let request = StateRequest::decode(&payload[..])?; + let block: B::Hash = Decode::decode(&mut request.block.as_ref())?; + + let key = + SeenRequestsKey { peer: *peer, block: block.clone(), start: request.start.clone() }; + + let mut reputation_changes = Vec::new(); + + match self.seen_requests.get_mut(&key) { + Some(SeenRequestsValue::First) => {}, + Some(SeenRequestsValue::Fulfilled(ref mut requests)) => { + *requests = requests.saturating_add(1); + + if *requests > MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER { + reputation_changes.push(rep::SAME_REQUEST); + } + }, + None => { + self.seen_requests.put(key.clone(), SeenRequestsValue::First); + }, + } + + trace!( + target: LOG_TARGET, + "Handling state request from {}: Block {:?}, Starting at {:?}, no_proof={}", + peer, + request.block, + sp_core::hexdisplay::HexDisplay::from(&request.start), + request.no_proof, + ); + + let result = if reputation_changes.is_empty() { + let mut response = StateResponse::default(); + + if !request.no_proof { + let (proof, count) = self.client.read_proof_collection( + &BlockId::hash(block), + &request.start, + MAX_RESPONSE_BYTES, + )?; + response.proof = proof.encode(); + if count == 0 { + response.complete = true; + } + } else { + let entries = self.client.storage_collection( + &BlockId::hash(block), + &request.start, + MAX_RESPONSE_BYTES, + )?; + response.entries = + entries.into_iter().map(|(key, value)| StateEntry { key, value }).collect(); + if response.entries.is_empty() { + response.complete = true; + } + } + + trace!( + target: LOG_TARGET, + "StateResponse contains {} keys, {}, proof nodes, complete={}, from {:?} to {:?}", + response.entries.len(), + response.proof.len(), + response.complete, + response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + ); + if let Some(value) = self.seen_requests.get_mut(&key) { + // If this is the first time we have processed this request, we need to change + // it to `Fulfilled`. + if let SeenRequestsValue::First = value { + *value = SeenRequestsValue::Fulfilled(1); + } + } + + let mut data = Vec::with_capacity(response.encoded_len()); + response.encode(&mut data)?; + Ok(data) + } else { + Err(()) + }; + + pending_response + .send(OutgoingResponse { result, reputation_changes, sent_feedback: None }) + .map_err(|_| HandleRequestError::SendResponse) + } +} + +#[derive(derive_more::Display, derive_more::From)] +enum HandleRequestError { + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + #[display(fmt = "Failed to decode block hash: {}.", _0)] + InvalidHash(codec::Error), + Client(sp_blockchain::Error), + #[display(fmt = "Failed to send response.")] + SendResponse, +} diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs new file mode 100644 index 0000000000000..82e7e8fe1714c --- /dev/null +++ b/client/network/src/transactions.rs @@ -0,0 +1,497 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transactions handling to plug on top of the network service. +//! +//! Usage: +//! +//! - Use [`TransactionsHandlerPrototype::new`] to create a prototype. +//! - Pass the return value of [`TransactionsHandlerPrototype::set_config`] to the network +//! configuration as an extra peers set. +//! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a +//! `Future` that processes transactions. + +use crate::{ + config::{self, ProtocolId, TransactionImport, TransactionImportFuture, TransactionPool}, + error, + protocol::message, + service::NetworkService, + utils::{interval, LruHashSet}, + Event, ExHashT, ObservedRole, +}; + +use codec::{Decode, Encode}; +use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; +use libp2p::{multiaddr, PeerId}; +use log::{debug, trace, warn}; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; +use sp_runtime::traits::Block as BlockT; +use std::{ + borrow::Cow, + collections::{hash_map::Entry, HashMap}, + iter, + num::NonZeroUsize, + pin::Pin, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + task::Poll, + time, +}; + +/// Interval at which we propagate transactions; +const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); + +/// Maximum number of known transaction hashes to keep for a peer. +/// +/// This should be approx. 2 blocks full of transactions for the network to function properly. +const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. + +/// Maximum allowed size for a transactions notification. +const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; + +/// Maximum number of transaction validation request we keep at any moment. +const MAX_PENDING_TRANSACTIONS: usize = 8192; + +mod rep { + use sc_peerset::ReputationChange as Rep; + /// Reputation change when a peer sends us any transaction. + /// + /// This forces node to verify it, thus the negative value here. Once transaction is verified, + /// reputation change should be refunded with `ANY_TRANSACTION_REFUND` + pub const ANY_TRANSACTION: Rep = Rep::new(-(1 << 4), "Any transaction"); + /// Reputation change when a peer sends us any transaction that is not invalid. + pub const ANY_TRANSACTION_REFUND: Rep = Rep::new(1 << 4, "Any transaction (refund)"); + /// Reputation change when a peer sends us an transaction that we didn't know about. + pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); + /// Reputation change when a peer sends us a bad transaction. + pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); + /// We received an unexpected transaction packet. + pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); +} + +struct Metrics { + propagated_transactions: Counter, +} + +impl Metrics { + fn register(r: &Registry) -> Result { + Ok(Metrics { + propagated_transactions: register( + Counter::new( + "sync_propagated_transactions", + "Number of transactions propagated to at least one peer", + )?, + r, + )?, + }) + } +} + +#[pin_project::pin_project] +struct PendingTransaction { + #[pin] + validation: TransactionImportFuture, + tx_hash: H, +} + +impl Future for PendingTransaction { + type Output = (H, TransactionImport); + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let mut this = self.project(); + + if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { + return Poll::Ready((this.tx_hash.clone(), import_result)) + } + + Poll::Pending + } +} + +/// Prototype for a [`TransactionsHandler`]. +pub struct TransactionsHandlerPrototype { + protocol_name: Cow<'static, str>, +} + +impl TransactionsHandlerPrototype { + /// Create a new instance. + pub fn new(protocol_id: ProtocolId) -> Self { + TransactionsHandlerPrototype { + protocol_name: Cow::from({ + let mut proto = String::new(); + proto.push_str("/"); + proto.push_str(protocol_id.as_ref()); + proto.push_str("/transactions/1"); + proto + }), + } + } + + /// Returns the configuration of the set to put in the network configuration. + pub fn set_config(&self) -> config::NonDefaultSetConfig { + config::NonDefaultSetConfig { + notifications_protocol: self.protocol_name.clone(), + fallback_names: Vec::new(), + max_notification_size: MAX_TRANSACTIONS_SIZE, + set_config: config::SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: config::NonReservedPeerMode::Deny, + }, + } + } + + /// Turns the prototype into the actual handler. Returns a controller that allows controlling + /// the behaviour of the handler while it's running. + /// + /// Important: the transactions handler is initially disabled and doesn't gossip transactions. + /// You must call [`TransactionsHandlerController::set_gossip_enabled`] to enable it. + pub fn build( + self, + service: Arc>, + local_role: config::Role, + transaction_pool: Arc>, + metrics_registry: Option<&Registry>, + ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { + let event_stream = service.event_stream("transactions-handler").boxed(); + let (to_handler, from_controller) = mpsc::unbounded(); + let gossip_enabled = Arc::new(AtomicBool::new(false)); + + let handler = TransactionsHandler { + protocol_name: self.protocol_name, + propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), + pending_transactions: FuturesUnordered::new(), + pending_transactions_peers: HashMap::new(), + gossip_enabled: gossip_enabled.clone(), + service, + event_stream, + peers: HashMap::new(), + transaction_pool, + local_role, + from_controller, + metrics: if let Some(r) = metrics_registry { + Some(Metrics::register(r)?) + } else { + None + }, + }; + + let controller = TransactionsHandlerController { to_handler, gossip_enabled }; + + Ok((handler, controller)) + } +} + +/// Controls the behaviour of a [`TransactionsHandler`] it is connected to. +pub struct TransactionsHandlerController { + to_handler: mpsc::UnboundedSender>, + gossip_enabled: Arc, +} + +impl TransactionsHandlerController { + /// Controls whether transactions are being gossiped on the network. + pub fn set_gossip_enabled(&mut self, enabled: bool) { + self.gossip_enabled.store(enabled, Ordering::Relaxed); + } + + /// You may call this when new transactions are imported by the transaction pool. + /// + /// All transactions will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn propagate_transactions(&self) { + let _ = self.to_handler.unbounded_send(ToHandler::PropagateTransactions); + } + + /// You must call when new a transaction is imported by the transaction pool. + /// + /// This transaction will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn propagate_transaction(&self, hash: H) { + let _ = self.to_handler.unbounded_send(ToHandler::PropagateTransaction(hash)); + } +} + +enum ToHandler { + PropagateTransactions, + PropagateTransaction(H), +} + +/// Handler for transactions. Call [`TransactionsHandler::run`] to start the processing. +pub struct TransactionsHandler { + protocol_name: Cow<'static, str>, + /// Interval at which we call `propagate_transactions`. + propagate_timeout: Pin + Send>>, + /// Pending transactions verification tasks. + pending_transactions: FuturesUnordered>, + /// As multiple peers can send us the same transaction, we group + /// these peers using the transaction hash while the transaction is + /// imported. This prevents that we import the same transaction + /// multiple times concurrently. + pending_transactions_peers: HashMap>, + /// Network service to use to send messages and manage peers. + service: Arc>, + /// Stream of networking events. + event_stream: Pin + Send>>, + // All connected peers + peers: HashMap>, + transaction_pool: Arc>, + gossip_enabled: Arc, + local_role: config::Role, + from_controller: mpsc::UnboundedReceiver>, + /// Prometheus metrics. + metrics: Option, +} + +/// Peer information +#[derive(Debug)] +struct Peer { + /// Holds a set of transactions known to this peer. + known_transactions: LruHashSet, + role: ObservedRole, +} + +impl TransactionsHandler { + /// Turns the [`TransactionsHandler`] into a future that should run forever and not be + /// interrupted. + pub async fn run(mut self) { + loop { + futures::select! { + _ = self.propagate_timeout.next().fuse() => { + self.propagate_transactions(); + }, + (tx_hash, result) = self.pending_transactions.select_next_some() => { + if let Some(peers) = self.pending_transactions_peers.remove(&tx_hash) { + peers.into_iter().for_each(|p| self.on_handle_transaction_import(p, result)); + } else { + warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!"); + } + }, + network_event = self.event_stream.next().fuse() => { + if let Some(network_event) = network_event { + self.handle_network_event(network_event).await; + } else { + // Networking has seemingly closed. Closing as well. + return; + } + }, + message = self.from_controller.select_next_some().fuse() => { + match message { + ToHandler::PropagateTransaction(hash) => self.propagate_transaction(&hash), + ToHandler::PropagateTransactions => self.propagate_transactions(), + } + }, + } + } + } + + async fn handle_network_event(&mut self, event: Event) { + match event { + Event::Dht(_) => {}, + Event::SyncConnected { remote } => { + let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) + .collect::(); + let result = self.service.add_peers_to_reserved_set( + self.protocol_name.clone(), + iter::once(addr).collect(), + ); + if let Err(err) = result { + log::error!(target: "sync", "Add reserved peer failed: {}", err); + } + }, + Event::SyncDisconnected { remote } => { + let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) + .collect::(); + let result = self.service.remove_peers_from_reserved_set( + self.protocol_name.clone(), + iter::once(addr).collect(), + ); + if let Err(err) = result { + log::error!(target: "sync", "Removing reserved peer failed: {}", err); + } + }, + + Event::NotificationStreamOpened { remote, protocol, role, .. } + if protocol == self.protocol_name => + { + let _was_in = self.peers.insert( + remote, + Peer { + known_transactions: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS).expect("Constant is nonzero"), + ), + role, + }, + ); + debug_assert!(_was_in.is_none()); + } + Event::NotificationStreamClosed { remote, protocol } + if protocol == self.protocol_name => + { + let _peer = self.peers.remove(&remote); + debug_assert!(_peer.is_some()); + } + + Event::NotificationsReceived { remote, messages } => { + for (protocol, message) in messages { + if protocol != self.protocol_name { + continue + } + + if let Ok(m) = as Decode>::decode( + &mut message.as_ref(), + ) { + self.on_transactions(remote, m); + } else { + warn!(target: "sub-libp2p", "Failed to decode transactions list"); + } + } + }, + + // Not our concern. + Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {}, + } + } + + /// Called when peer sends us new transactions + fn on_transactions(&mut self, who: PeerId, transactions: message::Transactions) { + // sending transaction to light node is considered a bad behavior + if matches!(self.local_role, config::Role::Light) { + debug!(target: "sync", "Peer {} is trying to send transactions to the light node", who); + self.service.disconnect_peer(who, self.protocol_name.clone()); + self.service.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); + return + } + + // Accept transactions only when enabled + if !self.gossip_enabled.load(Ordering::Relaxed) { + trace!(target: "sync", "{} Ignoring transactions while disabled", who); + return + } + + trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); + if let Some(ref mut peer) = self.peers.get_mut(&who) { + for t in transactions { + if self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS { + debug!( + target: "sync", + "Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit", + MAX_PENDING_TRANSACTIONS, + ); + break + } + + let hash = self.transaction_pool.hash_of(&t); + peer.known_transactions.insert(hash.clone()); + + self.service.report_peer(who.clone(), rep::ANY_TRANSACTION); + + match self.pending_transactions_peers.entry(hash.clone()) { + Entry::Vacant(entry) => { + self.pending_transactions.push(PendingTransaction { + validation: self.transaction_pool.import(t), + tx_hash: hash, + }); + entry.insert(vec![who.clone()]); + }, + Entry::Occupied(mut entry) => { + entry.get_mut().push(who.clone()); + }, + } + } + } + } + + fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { + match import { + TransactionImport::KnownGood => + self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND), + TransactionImport::NewGood => self.service.report_peer(who, rep::GOOD_TRANSACTION), + TransactionImport::Bad => self.service.report_peer(who, rep::BAD_TRANSACTION), + TransactionImport::None => {}, + } + } + + /// Propagate one transaction. + pub fn propagate_transaction(&mut self, hash: &H) { + debug!(target: "sync", "Propagating transaction [{:?}]", hash); + // Accept transactions only when enabled + if !self.gossip_enabled.load(Ordering::Relaxed) { + return + } + if let Some(transaction) = self.transaction_pool.transaction(hash) { + let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); + self.transaction_pool.on_broadcasted(propagated_to); + } + } + + fn do_propagate_transactions( + &mut self, + transactions: &[(H, B::Extrinsic)], + ) -> HashMap> { + let mut propagated_to = HashMap::<_, Vec<_>>::new(); + let mut propagated_transactions = 0; + + for (who, peer) in self.peers.iter_mut() { + // never send transactions to the light node + if matches!(peer.role, ObservedRole::Light) { + continue + } + + let (hashes, to_send): (Vec<_>, Vec<_>) = transactions + .iter() + .filter(|&(ref hash, _)| peer.known_transactions.insert(hash.clone())) + .cloned() + .unzip(); + + propagated_transactions += hashes.len(); + + if !to_send.is_empty() { + for hash in hashes { + propagated_to.entry(hash).or_default().push(who.to_base58()); + } + trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); + self.service.write_notification( + who.clone(), + self.protocol_name.clone(), + to_send.encode(), + ); + } + } + + if let Some(ref metrics) = self.metrics { + metrics.propagated_transactions.inc_by(propagated_transactions as _) + } + + propagated_to + } + + /// Call when we must propagate ready transactions to peers. + fn propagate_transactions(&mut self) { + // Accept transactions only when enabled + if !self.gossip_enabled.load(Ordering::Relaxed) { + return + } + debug!(target: "sync", "Propagating transactions"); + let transactions = self.transaction_pool.transactions(); + let propagated_to = self.do_propagate_transactions(&transactions); + self.transaction_pool.on_broadcasted(propagated_to); + } +} diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 10b374a4f256c..3f977a21b1165 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,16 +17,17 @@ // along with this program. If not, see . use libp2p::{ - InboundUpgradeExt, OutboundUpgradeExt, PeerId, Transport, + bandwidth, core::{ - self, either::EitherOutput, muxing::StreamMuxerBox, - transport::{boxed::Boxed, OptionalTransport}, upgrade + self, + either::EitherTransport, + muxing::StreamMuxerBox, + transport::{Boxed, OptionalTransport}, + upgrade, }, - mplex, identity, bandwidth, wasm_ext, noise + dns, identity, mplex, noise, tcp, websocket, PeerId, Transport, }; -#[cfg(not(target_os = "unknown"))] -use libp2p::{tcp, dns, websocket}; -use std::{io, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; pub use self::bandwidth::BandwidthSinks; @@ -35,93 +36,83 @@ pub use self::bandwidth::BandwidthSinks; /// If `memory_only` is true, then only communication within the same process are allowed. Only /// addresses with the format `/memory/...` are allowed. /// +/// `yamux_window_size` is the maximum size of the Yamux receive windows. `None` to leave the +/// default (256kiB). +/// +/// `yamux_maximum_buffer_size` is the maximum allowed size of the Yamux buffer. This should be +/// set either to the maximum of all the maximum allowed sizes of messages frames of all +/// high-level protocols combined, or to some generously high value if you are sure that a maximum +/// size is enforced on all high-level protocols. +/// /// Returns a `BandwidthSinks` object that allows querying the average bandwidth produced by all /// the connections spawned with this transport. pub fn build_transport( keypair: identity::Keypair, memory_only: bool, - wasm_external_transport: Option, -) -> (Boxed<(PeerId, StreamMuxerBox), io::Error>, Arc) { + yamux_window_size: Option, + yamux_maximum_buffer_size: usize, +) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) { // Build the base layer of the transport. - let transport = if let Some(t) = wasm_external_transport { - OptionalTransport::some(t) - } else { - OptionalTransport::none() - }; - #[cfg(not(target_os = "unknown"))] - let transport = transport.or_transport(if !memory_only { - let desktop_trans = tcp::TcpConfig::new(); - let desktop_trans = websocket::WsConfig::new(desktop_trans.clone()) - .or_transport(desktop_trans); - OptionalTransport::some(if let Ok(dns) = dns::DnsConfig::new(desktop_trans.clone()) { - dns.boxed() + let transport = if !memory_only { + let desktop_trans = tcp::TcpConfig::new().nodelay(true); + let desktop_trans = + websocket::WsConfig::new(desktop_trans.clone()).or_transport(desktop_trans); + let dns_init = futures::executor::block_on(dns::DnsConfig::system(desktop_trans.clone())); + EitherTransport::Left(if let Ok(dns) = dns_init { + EitherTransport::Left(dns) } else { - desktop_trans.map_err(dns::DnsErr::Underlying).boxed() + EitherTransport::Right(desktop_trans.map_err(dns::DnsErr::Transport)) }) } else { - OptionalTransport::none() - }); - - let transport = transport.or_transport(if memory_only { - OptionalTransport::some(libp2p::core::transport::MemoryTransport::default()) - } else { - OptionalTransport::none() - }); + EitherTransport::Right(OptionalTransport::some( + libp2p::core::transport::MemoryTransport::default(), + )) + }; let (transport, bandwidth) = bandwidth::BandwidthLogging::new(transport); - let authentication_config = { - // For more information about these two panics, see in "On the Importance of - // Checking Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, - // and Richard J. Lipton. - let noise_keypair_legacy = noise::Keypair::::new().into_authentic(&keypair) + let authentication_config = + { + // For more information about these two panics, see in "On the Importance of + // Checking Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, + // and Richard J. Lipton. + let noise_keypair = noise::Keypair::::new().into_authentic(&keypair) .expect("can only fail in case of a hardware bug; since this signing is performed only \ once and at initialization, we're taking the bet that the inconvenience of a very \ rare panic here is basically zero"); - let noise_keypair_spec = noise::Keypair::::new().into_authentic(&keypair) - .expect("can only fail in case of a hardware bug; since this signing is performed only \ - once and at initialization, we're taking the bet that the inconvenience of a very \ - rare panic here is basically zero"); - - // Legacy noise configurations for backward compatibility. - let mut noise_legacy = noise::LegacyConfig::default(); - noise_legacy.recv_legacy_handshake = true; - let mut xx_config = noise::NoiseConfig::xx(noise_keypair_spec); - xx_config.set_legacy_config(noise_legacy.clone()); - let mut ix_config = noise::NoiseConfig::ix(noise_keypair_legacy); - ix_config.set_legacy_config(noise_legacy); + // Legacy noise configurations for backward compatibility. + let mut noise_legacy = noise::LegacyConfig::default(); + noise_legacy.recv_legacy_handshake = true; - let extract_peer_id = |result| match result { - EitherOutput::First((peer_id, o)) => (peer_id, EitherOutput::First(o)), - EitherOutput::Second((peer_id, o)) => (peer_id, EitherOutput::Second(o)), + let mut xx_config = noise::NoiseConfig::xx(noise_keypair); + xx_config.set_legacy_config(noise_legacy.clone()); + xx_config.into_authenticated() }; - core::upgrade::SelectUpgrade::new(xx_config.into_authenticated(), ix_config.into_authenticated()) - .map_inbound(extract_peer_id) - .map_outbound(extract_peer_id) - }; - let multiplexing_config = { let mut mplex_config = mplex::MplexConfig::new(); - mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); - mplex_config.max_buffer_len(usize::MAX); + mplex_config.set_max_buffer_behaviour(mplex::MaxBufferBehaviour::Block); + mplex_config.set_max_buffer_size(usize::MAX); - let mut yamux_config = libp2p::yamux::Config::default(); + let mut yamux_config = libp2p::yamux::YamuxConfig::default(); // Enable proper flow-control: window updates are only sent when // buffered data has been consumed. - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::OnRead); + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + yamux_config.set_max_buffer_size(yamux_maximum_buffer_size); + + if let Some(yamux_window_size) = yamux_window_size { + yamux_config.set_receive_window_size(yamux_window_size); + } core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) - .map_inbound(move |muxer| core::muxing::StreamMuxerBox::new(muxer)) - .map_outbound(move |muxer| core::muxing::StreamMuxerBox::new(muxer)) }; - let transport = transport.upgrade(upgrade::Version::V1) + let transport = transport + .upgrade(upgrade::Version::V1Lazy) .authenticate(authentication_config) .multiplex(multiplexing_config) .timeout(Duration::from_secs(20)) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) .boxed(); (transport, bandwidth) diff --git a/client/network/src/utils.rs b/client/network/src/utils.rs index 490e2ced38266..b23b7e0c101e0 100644 --- a/client/network/src/utils.rs +++ b/client/network/src/utils.rs @@ -1,24 +1,25 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use futures::{stream::unfold, FutureExt, Stream, StreamExt}; use futures_timer::Delay; use linked_hash_set::LinkedHashSet; -use std::time::Duration; -use std::{hash::Hash, num::NonZeroUsize}; +use std::{hash::Hash, num::NonZeroUsize, time::Duration}; /// Creates a stream that returns a new value every `duration`. pub fn interval(duration: Duration) -> impl Stream + Unpin { @@ -37,10 +38,7 @@ pub struct LruHashSet { impl LruHashSet { /// Create a new `LruHashSet` with the given (exclusive) limit. pub fn new(limit: NonZeroUsize) -> Self { - Self { - set: LinkedHashSet::new(), - limit, - } + Self { set: LinkedHashSet::new(), limit } } /// Insert element into the set. @@ -53,7 +51,7 @@ impl LruHashSet { if self.set.len() == usize::from(self.limit) { self.set.pop_front(); // remove oldest entry } - return true; + return true } false } diff --git a/client/network/src/warp_request_handler.rs b/client/network/src/warp_request_handler.rs new file mode 100644 index 0000000000000..2ab95bb3853ba --- /dev/null +++ b/client/network/src/warp_request_handler.rs @@ -0,0 +1,165 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer. + +use crate::config::{IncomingRequest, OutgoingResponse, ProtocolId, RequestResponseConfig}; +use codec::{Decode, Encode}; +use futures::{ + channel::{mpsc, oneshot}, + stream::StreamExt, +}; +use log::debug; +use sp_finality_grandpa::{AuthorityList, SetId}; +use sp_runtime::traits::Block as BlockT; +use std::{sync::Arc, time::Duration}; + +/// Scale-encoded warp sync proof response. +pub struct EncodedProof(pub Vec); + +/// Warp sync request +#[derive(Encode, Decode, Debug)] +pub struct Request { + /// Start collecting proofs from this block. + pub begin: B::Hash, +} + +const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; + +/// Proof verification result. +pub enum VerificationResult { + /// Proof is valid, but the target was not reached. + Partial(SetId, AuthorityList, Block::Hash), + /// Target finality is proved. + Complete(SetId, AuthorityList, Block::Header), +} + +/// Warp sync backend. Handles retrieveing and verifying warp sync proofs. +pub trait WarpSyncProvider: Send + Sync { + /// Generate proof starting at given block hash. The proof is accumulated until maximum proof + /// size is reached. + fn generate( + &self, + start: B::Hash, + ) -> Result>; + /// Verify warp proof agains current set of authorities. + fn verify( + &self, + proof: &EncodedProof, + set_id: SetId, + authorities: AuthorityList, + ) -> Result, Box>; + /// Get current list of authorities. This is supposed to be genesis authorities when starting + /// sync. + fn current_authorities(&self) -> AuthorityList; +} + +/// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing +/// incoming requests. +pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig { + RequestResponseConfig { + name: generate_protocol_name(protocol_id).into(), + max_request_size: 32, + max_response_size: MAX_RESPONSE_SIZE, + request_timeout: Duration::from_secs(10), + inbound_queue: None, + } +} + +/// Generate the grandpa warp sync protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: ProtocolId) -> String { + let mut s = String::new(); + s.push_str("/"); + s.push_str(protocol_id.as_ref()); + s.push_str("/sync/warp"); + s +} + +/// Handler for incoming grandpa warp sync requests from a remote peer. +pub struct RequestHandler { + backend: Arc>, + request_receiver: mpsc::Receiver, +} + +impl RequestHandler { + /// Create a new [`RequestHandler`]. + pub fn new( + protocol_id: ProtocolId, + backend: Arc>, + ) -> (Self, RequestResponseConfig) { + let (tx, request_receiver) = mpsc::channel(20); + + let mut request_response_config = generate_request_response_config(protocol_id); + request_response_config.inbound_queue = Some(tx); + + (Self { backend, request_receiver }, request_response_config) + } + + fn handle_request( + &self, + payload: Vec, + pending_response: oneshot::Sender, + ) -> Result<(), HandleRequestError> { + let request = Request::::decode(&mut &payload[..])?; + + let EncodedProof(proof) = self + .backend + .generate(request.begin) + .map_err(HandleRequestError::InvalidRequest)?; + + pending_response + .send(OutgoingResponse { + result: Ok(proof), + reputation_changes: Vec::new(), + sent_feedback: None, + }) + .map_err(|_| HandleRequestError::SendResponse) + } + + /// Run [`RequestHandler`]. + pub async fn run(mut self) { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_request(payload, pending_response) { + Ok(()) => { + debug!(target: "sync", "Handled grandpa warp sync request from {}.", peer) + }, + Err(e) => debug!( + target: "sync", + "Failed to handle grandpa warp sync request from {}: {}", + peer, e, + ), + } + } + } +} + +#[derive(Debug, derive_more::Display, derive_more::From)] +enum HandleRequestError { + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + #[display(fmt = "Failed to decode block hash: {}.", _0)] + DecodeScale(codec::Error), + Client(sp_blockchain::Error), + #[from(ignore)] + #[display(fmt = "Invalid request {}.", _0)] + InvalidRequest(Box), + #[display(fmt = "Failed to send response.")] + SendResponse, +} diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 26e1631d9f1aa..88399ca54a436 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -13,23 +13,24 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-network = { version = "0.8.0", path = "../" } +async-std = "1.6.5" +sc-network = { version = "0.10.0-dev", path = "../" } log = "0.4.8" -parking_lot = "0.10.0" -futures = "0.3.4" +parking_lot = "0.11.1" +futures = "0.3.9" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.28.1", default-features = false } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-block-builder = { version = "0.8.0", path = "../../block-builder" } -sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } +libp2p = { version = "0.39.1", default-features = false } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } +sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } -tempfile = "3.1.0" -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } -sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../service" } +sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } +sc-service = { version = "0.10.0-dev", default-features = false, features = ["test-helpers"], path = "../../service" } +async-trait = "0.1.50" diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 1d2cd3d687de9..7a4c4f6c83081 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,34 +18,48 @@ //! Testing block import logic. -use sp_consensus::ImportedAux; -use sp_consensus::import_queue::{ - import_single_block, BasicQueue, BlockImportError, BlockImportResult, IncomingBlock, +use super::*; +use futures::executor::block_on; +use sc_block_builder::BlockBuilderProvider; +use sc_consensus::{ + import_single_block, BasicQueue, BlockImportError, BlockImportStatus, ImportedAux, + IncomingBlock, }; -use substrate_test_runtime_client::{self, prelude::*}; -use substrate_test_runtime_client::runtime::{Block, Hash}; +use sp_consensus::BlockOrigin; use sp_runtime::generic::BlockId; -use sc_block_builder::BlockBuilderProvider; -use super::*; +use substrate_test_runtime_client::{ + self, + prelude::*, + runtime::{Block, Hash}, +}; fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { let mut client = substrate_test_runtime_client::new(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::File, block).unwrap(); + block_on(client.import(BlockOrigin::File, block)).unwrap(); let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); let header = client.header(&BlockId::Number(1)).unwrap(); - let justification = client.justification(&BlockId::Number(1)).unwrap(); + let justifications = client.justifications(&BlockId::Number(1)).unwrap(); let peer_id = PeerId::random(); - (client, hash, number, peer_id.clone(), IncomingBlock { + ( + client, hash, - header, - body: Some(Vec::new()), - justification, - origin: Some(peer_id.clone()), - allow_missing_state: false, - import_existing: false, - }) + number, + peer_id, + IncomingBlock { + hash, + header, + body: Some(Vec::new()), + indexed_body: None, + justifications, + origin: Some(peer_id), + allow_missing_state: false, + import_existing: false, + state: None, + skip_execution: false, + }, + ) } #[test] @@ -55,29 +69,29 @@ fn import_single_good_block_works() { let mut expected_aux = ImportedAux::default(); expected_aux.is_new_best = true; - match import_single_block( + match block_on(import_single_block( &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, - &mut PassThroughVerifier::new(true) - ) { - Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) - if *num == number && *aux == expected_aux && *org == Some(peer_id) => {} - r @ _ => panic!("{:?}", r) + &mut PassThroughVerifier::new(true), + )) { + Ok(BlockImportStatus::ImportedUnknown(ref num, ref aux, ref org)) + if *num == number && *aux == expected_aux && *org == Some(peer_id) => {}, + r @ _ => panic!("{:?}", r), } } #[test] fn import_single_good_known_block_is_ignored() { let (mut client, _hash, number, _, block) = prepare_good_block(); - match import_single_block( + match block_on(import_single_block( &mut client, BlockOrigin::File, block, - &mut PassThroughVerifier::new(true) - ) { - Ok(BlockImportResult::ImportedKnown(ref n)) if *n == number => {} - _ => panic!() + &mut PassThroughVerifier::new(true), + )) { + Ok(BlockImportStatus::ImportedKnown(ref n, _)) if *n == number => {}, + _ => panic!(), } } @@ -85,14 +99,14 @@ fn import_single_good_known_block_is_ignored() { fn import_single_good_block_without_header_fails() { let (_, _, _, peer_id, mut block) = prepare_good_block(); block.header = None; - match import_single_block( + match block_on(import_single_block( &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, - &mut PassThroughVerifier::new(true) - ) { - Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {} - _ => panic!() + &mut PassThroughVerifier::new(true), + )) { + Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {}, + _ => panic!(), } } @@ -107,7 +121,6 @@ fn async_import_queue_drops() { verifier, Box::new(substrate_test_runtime_client::new()), None, - None, &executor, None, ); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 587feebe55c14..bb49cef8c642c 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -23,47 +23,58 @@ mod block_import; mod sync; use std::{ - borrow::Cow, collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, - task::{Poll, Context as FutureContext} + borrow::Cow, + collections::HashMap, + pin::Pin, + sync::Arc, + task::{Context as FutureContext, Poll}, }; -use libp2p::build_multiaddr; +use futures::{future::BoxFuture, prelude::*}; +use libp2p::{build_multiaddr, PeerId}; use log::trace; -use sc_network::config::FinalityProofProvider; +use parking_lot::Mutex; +use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_client_api::{ + backend::{AuxStore, Backend, Finalizer, TransactionFor}, + BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, + FinalityNotifications, ImportNotifications, +}; +use sc_consensus::{ + BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxJustificationImport, + ForkChoiceStrategy, ImportResult, JustificationImport, LongestChain, Verifier, +}; +pub use sc_network::config::EmptyTransactionPool; +use sc_network::{ + block_request_handler::{self, BlockRequestHandler}, + config::{ + MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, + ProtocolConfig, ProtocolId, Role, SyncMode, TransportConfig, + }, + light_client_requests::{self, handler::LightClientRequestHandler}, + state_request_handler::{self, StateRequestHandler}, + Multiaddr, NetworkService, NetworkWorker, +}; +use sc_service::client::Client; use sp_blockchain::{ - HeaderBackend, Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, - Info as BlockchainInfo, + HeaderBackend, Info as BlockchainInfo, Result as ClientResult, }; -use sc_client_api::{ - BlockchainEvents, BlockImportNotification, FinalityNotifications, ImportNotifications, FinalityNotification, - backend::{TransactionFor, AuxStore, Backend, Finalizer}, BlockBackend, -}; -use sc_consensus::LongestChain; -use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; -use sc_network::config::Role; -use sp_consensus::block_validation::{DefaultBlockAnnounceValidator, BlockAnnounceValidator}; -use sp_consensus::import_queue::{ - BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport, +use sp_consensus::{ + block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator}, + BlockOrigin, Error as ConsensusError, }; -use sp_consensus::block_import::{BlockImport, ImportResult}; -use sp_consensus::Error as ConsensusError; -use sp_consensus::{BlockOrigin, ForkChoiceStrategy, BlockImportParams, BlockCheckParams, JustificationImport}; -use futures::prelude::*; -use sc_network::{NetworkWorker, NetworkService, config::ProtocolId}; -use sc_network::config::{NetworkConfiguration, TransportConfig, BoxFinalityProofRequestBuilder}; -use libp2p::PeerId; -use parking_lot::Mutex; use sp_core::H256; -use sc_network::config::ProtocolConfig; -use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::{ConsensusEngineId, Justification}; -use substrate_test_runtime_client::{self, AccountKeyring}; -use sc_service::client::Client; -pub use sc_network::config::EmptyTransactionPool; -pub use substrate_test_runtime_client::runtime::{Block, Extrinsic, Hash, Transfer}; -pub use substrate_test_runtime_client::{TestClient, TestClientBuilder, TestClientBuilderExt}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + Justification, Justifications, +}; +use substrate_test_runtime_client::AccountKeyring; +pub use substrate_test_runtime_client::{ + runtime::{Block, Extrinsic, Hash, Transfer}, + TestClient, TestClientBuilder, TestClientBuilderExt, +}; type AuthorityId = sp_consensus_babe::AuthorityId; @@ -80,10 +91,7 @@ impl PassThroughVerifier { /// /// Every verified block will use `finalized` for the `BlockImportParams`. pub fn new(finalized: bool) -> Self { - Self { - finalized, - fork_choice: ForkChoiceStrategy::LongestChain, - } + Self { finalized, fork_choice: ForkChoiceStrategy::LongestChain } } /// Create a new instance. @@ -91,48 +99,42 @@ impl PassThroughVerifier { /// Every verified block will use `finalized` for the `BlockImportParams` and /// the given [`ForkChoiceStrategy`]. pub fn new_with_fork_choice(finalized: bool, fork_choice: ForkChoiceStrategy) -> Self { - Self { - finalized, - fork_choice, - } + Self { finalized, fork_choice } } } /// This `Verifier` accepts all data as valid. +#[async_trait::async_trait] impl Verifier for PassThroughVerifier { - fn verify( + async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option> + mut block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let maybe_keys = header.digest() - .log(|l| l.try_as_raw(OpaqueDigestItemId::Consensus(b"aura")) - .or_else(|| l.try_as_raw(OpaqueDigestItemId::Consensus(b"babe"))) - ) + let maybe_keys = block + .header + .digest() + .log(|l| { + l.try_as_raw(OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| l.try_as_raw(OpaqueDigestItemId::Consensus(b"babe"))) + }) .map(|blob| vec![(well_known_cache_keys::AUTHORITIES, blob.to_vec())]); - let mut import = BlockImportParams::new(origin, header); - import.body = body; - import.finalized = self.finalized; - import.justification = justification; - import.fork_choice = Some(self.fork_choice.clone()); - - Ok((import, maybe_keys)) + block.finalized = self.finalized; + block.fork_choice = Some(self.fork_choice.clone()); + Ok((block, maybe_keys)) } } pub type PeersFullClient = Client< substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, + substrate_test_runtime_client::ExecutorDispatch, Block, - substrate_test_runtime_client::runtime::RuntimeApi + substrate_test_runtime_client::runtime::RuntimeApi, >; pub type PeersLightClient = Client< substrate_test_runtime_client::LightBackend, substrate_test_runtime_client::LightExecutor, Block, - substrate_test_runtime_client::runtime::RuntimeApi + substrate_test_runtime_client::runtime::RuntimeApi, >; #[derive(Clone)] @@ -144,59 +146,70 @@ pub enum PeersClient { impl PeersClient { pub fn as_full(&self) -> Option> { match *self { - PeersClient::Full(ref client, ref _backend) => Some(client.clone()), + PeersClient::Full(ref client, _) => Some(client.clone()), _ => None, } } - pub fn as_block_import(&self) -> BlockImportAdapter { - match *self { - PeersClient::Full(ref client, ref _backend) => - BlockImportAdapter::new_full(client.clone()), - PeersClient::Light(ref client, ref _backend) => - BlockImportAdapter::Light(Arc::new(Mutex::new(client.clone())), PhantomData), - } + pub fn as_block_import(&self) -> BlockImportAdapter { + BlockImportAdapter::new(self.clone()) } pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { match *self { - PeersClient::Full(ref client, ref _backend) => client.get_aux(key), - PeersClient::Light(ref client, ref _backend) => client.get_aux(key), + PeersClient::Full(ref client, _) => client.get_aux(key), + PeersClient::Light(ref client, _) => client.get_aux(key), } } pub fn info(&self) -> BlockchainInfo { match *self { - PeersClient::Full(ref client, ref _backend) => client.chain_info(), - PeersClient::Light(ref client, ref _backend) => client.chain_info(), + PeersClient::Full(ref client, _) => client.chain_info(), + PeersClient::Light(ref client, _) => client.chain_info(), } } - pub fn header(&self, block: &BlockId) -> ClientResult::Header>> { + pub fn header( + &self, + block: &BlockId, + ) -> ClientResult::Header>> { match *self { - PeersClient::Full(ref client, ref _backend) => client.header(block), - PeersClient::Light(ref client, ref _backend) => client.header(block), + PeersClient::Full(ref client, _) => client.header(block), + PeersClient::Light(ref client, _) => client.header(block), + } + } + + pub fn has_state_at(&self, block: &BlockId) -> bool { + let header = match self.header(block).unwrap() { + Some(header) => header, + None => return false, + }; + match self { + PeersClient::Full(_client, backend) => + backend.have_state_at(&header.hash(), *header.number()), + PeersClient::Light(_client, backend) => + backend.have_state_at(&header.hash(), *header.number()), } } - pub fn justification(&self, block: &BlockId) -> ClientResult> { + pub fn justifications(&self, block: &BlockId) -> ClientResult> { match *self { - PeersClient::Full(ref client, ref _backend) => client.justification(block), - PeersClient::Light(ref client, ref _backend) => client.justification(block), + PeersClient::Full(ref client, _) => client.justifications(block), + PeersClient::Light(ref client, _) => client.justifications(block), } } pub fn finality_notification_stream(&self) -> FinalityNotifications { match *self { - PeersClient::Full(ref client, ref _backend) => client.finality_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.finality_notification_stream(), + PeersClient::Full(ref client, _) => client.finality_notification_stream(), + PeersClient::Light(ref client, _) => client.finality_notification_stream(), } } - pub fn import_notification_stream(&self) -> ImportNotifications{ + pub fn import_notification_stream(&self) -> ImportNotifications { match *self { - PeersClient::Full(ref client, ref _backend) => client.import_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.import_notification_stream(), + PeersClient::Full(ref client, _) => client.import_notification_stream(), + PeersClient::Light(ref client, _) => client.import_notification_stream(), } } @@ -204,16 +217,47 @@ impl PeersClient { &self, id: BlockId, justification: Option, - notify: bool + notify: bool, ) -> ClientResult<()> { match *self { - PeersClient::Full(ref client, ref _backend) => client.finalize_block(id, justification, notify), - PeersClient::Light(ref client, ref _backend) => client.finalize_block(id, justification, notify), + PeersClient::Full(ref client, ref _backend) => + client.finalize_block(id, justification, notify), + PeersClient::Light(ref client, ref _backend) => + client.finalize_block(id, justification, notify), + } + } +} + +#[async_trait::async_trait] +impl BlockImport for PeersClient { + type Error = ConsensusError; + type Transaction = (); + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + match self { + PeersClient::Full(client, _) => client.check_block(block).await, + PeersClient::Light(client, _) => client.check_block(block).await, + } + } + + async fn import_block( + &mut self, + block: BlockImportParams, + cache: HashMap>, + ) -> Result { + match self { + PeersClient::Full(client, _) => + client.import_block(block.clear_storage_changes_and_mutate(), cache).await, + PeersClient::Light(client, _) => + client.import_block(block.clear_storage_changes_and_mutate(), cache).await, } } } -pub struct Peer { +pub struct Peer { pub data: D, client: PeersClient, /// We keep a copy of the verifier so that we can invoke it for locally-generated blocks, @@ -221,15 +265,20 @@ pub struct Peer { verifier: VerifierAdapter, /// We keep a copy of the block_import so that we can invoke it for locally-generated blocks, /// instead of going through the import queue. - block_import: BlockImportAdapter<()>, + block_import: BlockImportAdapter, select_chain: Option>, backend: Option>, network: NetworkWorker::Hash>, imported_blocks_stream: Pin> + Send>>, finality_notification_stream: Pin> + Send>>, + listen_addr: Multiaddr, } -impl Peer { +impl Peer +where + B: BlockImport + Send + Sync, + B::Transaction: Send, +{ /// Get this peer ID. pub fn id(&self) -> PeerId { self.network.service().local_peer_id().clone() @@ -241,7 +290,9 @@ impl Peer { } // Returns a clone of the local SelectChain, only available on full nodes - pub fn select_chain(&self) -> Option> { + pub fn select_chain( + &self, + ) -> Option> { self.select_chain.clone() } @@ -266,21 +317,37 @@ impl Peer { } /// Announces an important block on the network. - pub fn announce_block(&self, hash: ::Hash, data: Vec) { + pub fn announce_block(&self, hash: ::Hash, data: Option>) { self.network.service().announce_block(hash, data); } /// Request explicit fork sync. - pub fn set_sync_fork_request(&self, peers: Vec, hash: ::Hash, number: NumberFor) { + pub fn set_sync_fork_request( + &self, + peers: Vec, + hash: ::Hash, + number: NumberFor, + ) { self.network.service().set_sync_fork_request(peers, hash, number); } /// Add blocks to the peer -- edit the block before adding pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 - where F: FnMut(BlockBuilder) -> Block + where + F: FnMut( + BlockBuilder, + ) -> Block, { let best_hash = self.client.info().best_hash; - self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false) + self.generate_blocks_at( + BlockId::Hash(best_hash), + count, + origin, + edit_block, + false, + true, + true, + ) } /// Add blocks to the peer -- edit the block before adding. The chain will @@ -292,16 +359,20 @@ impl Peer { origin: BlockOrigin, mut edit_block: F, headers_only: bool, - ) -> H256 where F: FnMut(BlockBuilder) -> Block { - let full_client = self.client.as_full() - .expect("blocks could only be generated by full clients"); + inform_sync_about_new_best_block: bool, + announce_block: bool, + ) -> H256 + where + F: FnMut( + BlockBuilder, + ) -> Block, + { + let full_client = + self.client.as_full().expect("blocks could only be generated by full clients"); let mut at = full_client.header(&at).unwrap().unwrap().hash(); - for _ in 0..count { - let builder = full_client.new_block_at( - &BlockId::Hash(at), - Default::default(), - false, - ).unwrap(); + for _ in 0..count { + let builder = + full_client.new_block_at(&BlockId::Hash(at), Default::default(), false).unwrap(); let block = edit_block(builder); let hash = block.header.hash(); trace!( @@ -312,25 +383,30 @@ impl Peer { block.header.parent_hash, ); let header = block.header.clone(); - let (import_block, cache) = self.verifier.verify( - origin, - header.clone(), - None, - if headers_only { None } else { Some(block.extrinsics) }, - ).unwrap(); + let mut import_block = BlockImportParams::new(origin, header.clone()); + import_block.body = if headers_only { None } else { Some(block.extrinsics) }; + let (import_block, cache) = + futures::executor::block_on(self.verifier.verify(import_block)).unwrap(); let cache = if let Some(cache) = cache { cache.into_iter().collect() } else { Default::default() }; - self.block_import.import_block(import_block, cache).expect("block_import failed"); - self.network.service().announce_block(hash, Vec::new()); + futures::executor::block_on(self.block_import.import_block(import_block, cache)) + .expect("block_import failed"); + if announce_block { + self.network.service().announce_block(hash, None); + } at = hash; } - self.network.update_chain(); - self.network.service().announce_block(at.clone(), Vec::new()); + if inform_sync_about_new_best_block { + self.network.new_best_block_imported( + at, + full_client.header(&BlockId::Hash(at)).ok().flatten().unwrap().number().clone(), + ); + } at } @@ -343,24 +419,55 @@ impl Peer { /// Push blocks to the peer (simplified: with or without a TX) pub fn push_headers(&mut self, count: usize) -> H256 { let best_hash = self.client.info().best_hash; - self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true) + self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true, true, true) } /// Push blocks to the peer (simplified: with or without a TX) starting from /// given hash. pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { - self.generate_tx_blocks_at(at, count, with_tx, false) + self.generate_tx_blocks_at(at, count, with_tx, false, true, true) + } + + /// Push blocks to the peer (simplified: with or without a TX) starting from + /// given hash without informing the sync protocol about the new best block. + pub fn push_blocks_at_without_informing_sync( + &mut self, + at: BlockId, + count: usize, + with_tx: bool, + ) -> H256 { + self.generate_tx_blocks_at(at, count, with_tx, false, false, true) + } + + /// Push blocks to the peer (simplified: with or without a TX) starting from + /// given hash without announcing the block. + pub fn push_blocks_at_without_announcing( + &mut self, + at: BlockId, + count: usize, + with_tx: bool, + ) -> H256 { + self.generate_tx_blocks_at(at, count, with_tx, false, true, false) } /// Push blocks/headers to the peer (simplified: with or without a TX) starting from /// given hash. - fn generate_tx_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool, headers_only:bool) -> H256 { + fn generate_tx_blocks_at( + &mut self, + at: BlockId, + count: usize, + with_tx: bool, + headers_only: bool, + inform_sync_about_new_best_block: bool, + announce_block: bool, + ) -> H256 { let mut nonce = 0; if with_tx { self.generate_blocks_at( at, count, - BlockOrigin::File, |mut builder| { + BlockOrigin::File, + |mut builder| { let transfer = Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Alice.into(), @@ -371,7 +478,9 @@ impl Peer { nonce = nonce + 1; builder.build().unwrap().block }, - headers_only + headers_only, + inform_sync_about_new_best_block, + announce_block, ) } else { self.generate_blocks_at( @@ -380,6 +489,8 @@ impl Peer { BlockOrigin::File, |builder| builder.build().unwrap().block, headers_only, + inform_sync_about_new_best_block, + announce_block, ) } } @@ -401,6 +512,11 @@ impl Peer { &self.network.service() } + /// Get a reference to the network worker. + pub fn network(&self) -> &NetworkWorker::Hash> { + &self.network + } + /// Test helper to compare the blockchain state of multiple (networked) /// clients. pub fn blockchain_canon_equals(&self, other: &Self) -> bool { @@ -413,9 +529,10 @@ impl Peer { /// Count the total number of imported blocks. pub fn blocks_count(&self) -> u64 { - self.backend.as_ref().map( - |backend| backend.blockchain().info().best_number - ).unwrap_or(0) + self.backend + .as_ref() + .map(|backend| backend.blockchain().info().best_number) + .unwrap_or(0) } /// Return a collection of block hashes that failed verification @@ -424,126 +541,110 @@ impl Peer { } pub fn has_block(&self, hash: &H256) -> bool { - self.backend.as_ref().map( - |backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some() - ).unwrap_or(false) + self.backend + .as_ref() + .map(|backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some()) + .unwrap_or(false) } } +pub trait BlockImportAdapterFull: + BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + Send + + Sync + + Clone +{ +} + +impl BlockImportAdapterFull for T where + T: BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + Send + + Sync + + Clone +{ +} + /// Implements `BlockImport` for any `Transaction`. Internally the transaction is /// "converted", aka the field is set to `None`. /// /// This is required as the `TestNetFactory` trait does not distinguish between /// full and light nodes. -pub enum BlockImportAdapter { - Full( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), - Light( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), +#[derive(Clone)] +pub struct BlockImportAdapter { + inner: I, } -impl BlockImportAdapter { +impl BlockImportAdapter { /// Create a new instance of `Self::Full`. - pub fn new_full( - full: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Full(Arc::new(Mutex::new(full)), PhantomData) - } - - /// Create a new instance of `Self::Light`. - pub fn new_light( - light: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Light(Arc::new(Mutex::new(light)), PhantomData) - } -} - -impl Clone for BlockImportAdapter { - fn clone(&self) -> Self { - match self { - Self::Full(full, _) => Self::Full(full.clone(), PhantomData), - Self::Light(light, _) => Self::Light(light.clone(), PhantomData), - } + pub fn new(inner: I) -> Self { + Self { inner } } } -impl BlockImport for BlockImportAdapter { +#[async_trait::async_trait] +impl BlockImport for BlockImportAdapter +where + I: BlockImport + Send + Sync, + I::Transaction: Send, +{ type Error = ConsensusError; - type Transaction = Transaction; + type Transaction = (); - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - match self { - Self::Full(full, _) => full.lock().check_block(block), - Self::Light(light, _) => light.lock().check_block(block), - } + self.inner.check_block(block).await } - fn import_block( + async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, cache: HashMap>, ) -> Result { - match self { - Self::Full(full, _) => full.lock().import_block(block.convert_transaction(), cache), - Self::Light(light, _) => light.lock().import_block(block.convert_transaction(), cache), - } + self.inner.import_block(block.clear_storage_changes_and_mutate(), cache).await } } -/// Implements `Verifier` on an `Arc>`. Used internally. -#[derive(Clone)] +/// Implements `Verifier` and keeps track of failed verifications. struct VerifierAdapter { - verifier: Arc>>>, + verifier: Arc>>>, failed_verifications: Arc>>, } +#[async_trait::async_trait] impl Verifier for VerifierAdapter { - fn verify( + async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option> + block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let hash = header.hash(); - self.verifier.lock().verify(origin, header, justification, body).map_err(|e| { + let hash = block.header.hash(); + self.verifier.lock().await.verify(block).await.map_err(|e| { self.failed_verifications.lock().insert(hash, e.clone()); e }) } } +impl Clone for VerifierAdapter { + fn clone(&self) -> Self { + Self { + verifier: self.verifier.clone(), + failed_verifications: self.failed_verifications.clone(), + } + } +} + impl VerifierAdapter { - fn new(verifier: Arc>>>) -> VerifierAdapter { + fn new(verifier: impl Verifier + 'static) -> Self { VerifierAdapter { - verifier, + verifier: Arc::new(futures::lock::Mutex::new(Box::new(verifier))), failed_verifications: Default::default(), } } @@ -557,11 +658,25 @@ pub struct FullPeerConfig { /// Block announce validator. pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. - pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, + pub notifications_protocols: Vec>, + /// The indices of the peers the peer should be connected to. + /// + /// If `None`, it will be connected to all other peers. + pub connect_to_peers: Option>, + /// Whether the full peer should have the authority role. + pub is_authority: bool, + /// Syncing mode + pub sync_mode: SyncMode, + /// Enable transaction indexing. + pub storage_chain: bool, } -pub trait TestNetFactory: Sized { +pub trait TestNetFactory: Sized +where + >::Transaction: Send, +{ type Verifier: 'static + Verifier; + type BlockImport: BlockImport + Clone + Send + Sync + 'static; type PeerData: Default; /// These two need to be implemented! @@ -574,33 +689,22 @@ pub trait TestNetFactory: Sized { ) -> Self::Verifier; /// Get reference to peer. - fn peer(&mut self, i: usize) -> &mut Peer; - fn peers(&self) -> &Vec>; - fn mut_peers>)>( + fn peer(&mut self, i: usize) -> &mut Peer; + fn peers(&self) -> &Vec>; + fn mut_peers>)>( &mut self, closure: F, ); /// Get custom block import handle for fresh client, along with peer data. - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - Self::PeerData, - ) - { - (client.as_block_import(), None, None, None, Default::default()) - } - - /// Get finality proof provider (if supported). - fn make_finality_proof_provider( + fn make_block_import( &self, - _client: PeersClient, - ) -> Option>> { - None - } + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ); fn default_config() -> ProtocolConfig { ProtocolConfig::default() @@ -625,80 +729,131 @@ pub trait TestNetFactory: Sized { /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { - let test_client_builder = match config.keep_blocks { - Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), - None => TestClientBuilder::with_default_backend(), + let mut test_client_builder = match (config.keep_blocks, config.storage_chain) { + (Some(keep_blocks), true) => TestClientBuilder::with_tx_storage(keep_blocks), + (None, true) => TestClientBuilder::with_tx_storage(u32::MAX), + (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), + (None, false) => TestClientBuilder::with_default_backend(), }; + if matches!(config.sync_mode, SyncMode::Fast { .. }) { + test_client_builder = test_client_builder.set_no_genesis(); + } let backend = test_client_builder.backend(); let (c, longest_chain) = test_client_builder.build_with_longest_chain(); let client = Arc::new(c); - let ( - block_import, - justification_import, - finality_proof_import, - finality_proof_request_builder, - data, - ) = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); + let (block_import, justification_import, data) = + self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); let verifier = self.make_verifier( PeersClient::Full(client.clone(), backend.clone()), &Default::default(), &data, ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + let verifier = VerifierAdapter::new(verifier); let import_queue = Box::new(BasicQueue::new( verifier.clone(), Box::new(block_import.clone()), justification_import, - finality_proof_import, &sp_core::testing::TaskExecutor::new(), None, )); let listen_addr = build_multiaddr![Memory(rand::random::())]; - let mut network_config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); + let mut network_config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + network_config.sync_mode = config.sync_mode; network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; - network_config.notifications_protocols = config.notifications_protocols; + network_config.extra_sets = config + .notifications_protocols + .into_iter() + .map(|p| NonDefaultSetConfig { + notifications_protocol: p, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: Default::default(), + }) + .collect(); + if let Some(connect_to) = config.connect_to_peers { + let addrs = connect_to + .iter() + .map(|v| { + let peer_id = self.peer(*v).network_service().local_peer_id().clone(); + let multiaddr = self.peer(*v).listen_addr.clone(); + MultiaddrWithPeerId { peer_id, multiaddr } + }) + .collect(); + network_config.default_peers_set.reserved_nodes = addrs; + network_config.default_peers_set.non_reserved_mode = NonReservedPeerMode::Deny; + } + + let protocol_id = ProtocolId::from("test-protocol-name"); + + let block_request_protocol_config = { + let (handler, protocol_config) = + BlockRequestHandler::new(&protocol_id, client.clone(), 50); + self.spawn_task(handler.run().boxed()); + protocol_config + }; + + let state_request_protocol_config = { + let (handler, protocol_config) = + StateRequestHandler::new(&protocol_id, client.clone(), 50); + self.spawn_task(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, client.clone()); + self.spawn_task(handler.run().boxed()); + protocol_config + }; let network = NetworkWorker::new(sc_network::config::Params { - role: Role::Full, + role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, + transactions_handler_executor: Box::new(|task| { + async_std::task::spawn(task); + }), network_config, chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Full(client.clone(), backend.clone()), - ), - finality_proof_request_builder, on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from("test-protocol-name"), + protocol_id, import_queue, - block_announce_validator: config.block_announce_validator + block_announce_validator: config + .block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)), metrics_registry: None, - }).unwrap(); + block_request_protocol_config, + state_request_protocol_config, + light_client_request_protocol_config, + warp_sync: None, + }) + .unwrap(); - self.mut_peers(|peers| { + trace!(target: "test_network", "Peer identifier: {}", network.service().local_peer_id()); + + self.mut_peers(move |peers| { for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); + peer.network.add_known_address( + network.service().local_peer_id().clone(), + listen_addr.clone(), + ); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); + let finality_notification_stream = + Box::pin(client.finality_notification_stream().fuse()); peers.push(Peer { data, - client: PeersClient::Full(client, backend.clone()), + client: PeersClient::Full(client.clone(), backend.clone()), select_chain: Some(longest_chain), backend: Some(backend), imported_blocks_stream, @@ -706,6 +861,7 @@ pub trait TestNetFactory: Sized { block_import, verifier, network, + listen_addr, }); }); } @@ -714,66 +870,74 @@ pub trait TestNetFactory: Sized { fn add_light_peer(&mut self) { let (c, backend) = substrate_test_runtime_client::new_light(); let client = Arc::new(c); - let ( - block_import, - justification_import, - finality_proof_import, - finality_proof_request_builder, - data, - ) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); + let (block_import, justification_import, data) = + self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); let verifier = self.make_verifier( PeersClient::Light(client.clone(), backend.clone()), &Default::default(), &data, ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + let verifier = VerifierAdapter::new(verifier); let import_queue = Box::new(BasicQueue::new( verifier.clone(), Box::new(block_import.clone()), justification_import, - finality_proof_import, &sp_core::testing::TaskExecutor::new(), None, )); let listen_addr = build_multiaddr![Memory(rand::random::())]; - let mut network_config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); + let mut network_config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; + let protocol_id = ProtocolId::from("test-protocol-name"); + + let block_request_protocol_config = + block_request_handler::generate_protocol_config(&protocol_id); + let state_request_protocol_config = + state_request_handler::generate_protocol_config(&protocol_id); + + let light_client_request_protocol_config = + light_client_requests::generate_protocol_config(&protocol_id); + let network = NetworkWorker::new(sc_network::config::Params { role: Role::Light, executor: None, + transactions_handler_executor: Box::new(|task| { + async_std::task::spawn(task); + }), network_config, chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Light(client.clone(), backend.clone()) - ), - finality_proof_request_builder, on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from("test-protocol-name"), + protocol_id, import_queue, block_announce_validator: Box::new(DefaultBlockAnnounceValidator), metrics_registry: None, - }).unwrap(); + block_request_protocol_config, + state_request_protocol_config, + light_client_request_protocol_config, + warp_sync: None, + }) + .unwrap(); self.mut_peers(|peers| { for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); + peer.network.add_known_address( + network.service().local_peer_id().clone(), + listen_addr.clone(), + ); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); + let finality_notification_stream = + Box::pin(client.finality_notification_stream().fuse()); peers.push(Peer { data, @@ -785,10 +949,16 @@ pub trait TestNetFactory: Sized { imported_blocks_stream, finality_notification_stream, network, + listen_addr, }); }); } + /// Used to spawn background tasks, e.g. the block request protocol handler. + fn spawn_task(&self, f: BoxFuture<'static, ()>) { + async_std::task::spawn(f); + } + /// Polls the testnet until all nodes are in sync. /// /// Must be executed in a task context. @@ -807,7 +977,7 @@ pub trait TestNetFactory: Sized { match (highest, peer.client.info().best_hash) { (None, b) => highest = Some(b), (Some(ref a), ref b) if a == b => {}, - (Some(_), _) => return Poll::Pending + (Some(_), _) => return Poll::Pending, } } Poll::Ready(()) @@ -848,43 +1018,51 @@ pub trait TestNetFactory: Sized { /// /// Calls `poll_until_sync` repeatedly. fn block_until_sync(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx))); + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { + self.poll_until_sync(cx) + })); } /// Blocks the current thread until there are no pending packets. /// /// Calls `poll_until_idle` repeatedly with the runtime passed as parameter. fn block_until_idle(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_idle(cx))); + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { + self.poll_until_idle(cx) + })); } /// Blocks the current thread until all peers are connected to each other. /// /// Calls `poll_until_connected` repeatedly with the runtime passed as parameter. fn block_until_connected(&mut self) { - futures::executor::block_on( - futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)), - ); + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { + self.poll_until_connected(cx) + })); } /// Polls the testnet. Processes all the pending actions. fn poll(&mut self, cx: &mut FutureContext) { self.mut_peers(|peers| { - for peer in peers { - trace!(target: "sync", "-- Polling {}", peer.id()); + for (i, peer) in peers.into_iter().enumerate() { + trace!(target: "sync", "-- Polling {}: {}", i, peer.id()); if let Poll::Ready(()) = peer.network.poll_unpin(cx) { panic!("NetworkWorker has terminated unexpectedly.") } - trace!(target: "sync", "-- Polling complete {}", peer.id()); + trace!(target: "sync", "-- Polling complete {}: {}", i, peer.id()); // We poll `imported_blocks_stream`. - while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { - peer.network.service().announce_block(notification.hash, Vec::new()); + while let Poll::Ready(Some(notification)) = + peer.imported_blocks_stream.as_mut().poll_next(cx) + { + peer.network.service().announce_block(notification.hash, None); } // We poll `finality_notification_stream`, but we only take the last event. let mut last = None; - while let Poll::Ready(Some(item)) = peer.finality_notification_stream.as_mut().poll_next(cx) { + while let Poll::Ready(Some(item)) = + peer.finality_notification_stream.as_mut().poll_next(cx) + { last = Some(item); } if let Some(notification) = last { @@ -896,63 +1074,78 @@ pub trait TestNetFactory: Sized { } pub struct TestNet { - peers: Vec>, + peers: Vec>, fork_choice: ForkChoiceStrategy, } impl TestNet { /// Create a `TestNet` that used the given fork choice rule. pub fn with_fork_choice(fork_choice: ForkChoiceStrategy) -> Self { - Self { - peers: Vec::new(), - fork_choice, - } + Self { peers: Vec::new(), fork_choice } } } impl TestNetFactory for TestNet { type Verifier = PassThroughVerifier; type PeerData = (); + type BlockImport = PeersClient; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { - TestNet { - peers: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - } + TestNet { peers: Vec::new(), fork_choice: ForkChoiceStrategy::LongestChain } } - fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig, _peer_data: &()) - -> Self::Verifier - { + fn make_verifier( + &self, + _client: PeersClient, + _config: &ProtocolConfig, + _peer_data: &(), + ) -> Self::Verifier { PassThroughVerifier::new_with_fork_choice(false, self.fork_choice.clone()) } - fn peer(&mut self, i: usize) -> &mut Peer<()> { + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ) { + (client.as_block_import(), None, ()) + } + + fn peer(&mut self, i: usize) -> &mut Peer<(), Self::BlockImport> { &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { &self.peers } - fn mut_peers>)>(&mut self, closure: F) { + fn mut_peers>)>(&mut self, closure: F) { closure(&mut self.peers); } } pub struct ForceFinalized(PeersClient); +#[async_trait::async_trait] impl JustificationImport for ForceFinalized { type Error = ConsensusError; - fn import_justification( + async fn on_start(&mut self) -> Vec<(H256, NumberFor)> { + Vec::new() + } + + async fn import_justification( &mut self, hash: H256, _number: NumberFor, justification: Justification, ) -> Result<(), Self::Error> { - self.0.finalize_block(BlockId::Hash(hash), Some(justification), true) + self.0 + .finalize_block(BlockId::Hash(hash), Some(justification), true) .map_err(|_| ConsensusError::InvalidJustification.into()) } } @@ -962,44 +1155,44 @@ pub struct JustificationTestNet(TestNet); impl TestNetFactory for JustificationTestNet { type Verifier = PassThroughVerifier; type PeerData = (); + type BlockImport = PeersClient; fn from_config(config: &ProtocolConfig) -> Self { JustificationTestNet(TestNet::from_config(config)) } - fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig, peer_data: &()) -> Self::Verifier { + fn make_verifier( + &self, + client: PeersClient, + config: &ProtocolConfig, + peer_data: &(), + ) -> Self::Verifier { self.0.make_verifier(client, config, peer_data) } - fn peer(&mut self, i: usize) -> &mut Peer { + fn peer(&mut self, i: usize) -> &mut Peer { self.0.peer(i) } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { self.0.peers() } - fn mut_peers>, - )>(&mut self, closure: F) { + fn mut_peers>)>( + &mut self, + closure: F, + ) { self.0.mut_peers(closure) } - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - Self::PeerData, - ) - { - ( - client.as_block_import(), - Some(Box::new(ForceFinalized(client))), - None, - None, - Default::default(), - ) + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ) { + (client.as_block_import(), Some(Box::new(ForceFinalized(client))), Default::default()) } } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 64985871d85e0..c86ccfeac3ed1 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,11 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_consensus::BlockOrigin; -use std::time::Duration; -use futures::{Future, executor::block_on}; use super::*; -use sp_consensus::block_validation::Validation; +use futures::{executor::block_on, Future}; +use sp_consensus::{block_validation::Validation, BlockOrigin}; +use sp_runtime::Justifications; +use std::time::Duration; use substrate_test_runtime::Header; fn test_ancestor_search_when_common_is(n: usize) { @@ -248,13 +248,23 @@ fn sync_justifications() { net.block_until_sync(); // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justification(&BlockId::Number(10)).unwrap(), None); - assert_eq!(net.peer(1).client().justification(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(0).client().justifications(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(&BlockId::Number(10)).unwrap(), None); // we finalize block #10, #15 and #20 for peer 0 with a justification - net.peer(0).client().finalize_block(BlockId::Number(10), Some(Vec::new()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(15), Some(Vec::new()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(20), Some(Vec::new()), true).unwrap(); + let just = (*b"FRNK", Vec::new()); + net.peer(0) + .client() + .finalize_block(BlockId::Number(10), Some(just.clone()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(15), Some(just.clone()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(20), Some(just.clone()), true) + .unwrap(); let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); let h2 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap(); @@ -269,11 +279,15 @@ fn sync_justifications() { net.poll(cx); for height in (10..21).step_by(5) { - if net.peer(0).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { - return Poll::Pending; + if net.peer(0).client().justifications(&BlockId::Number(height)).unwrap() != + Some(Justifications::from((*b"FRNK", Vec::new()))) + { + return Poll::Pending } - if net.peer(1).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { - return Poll::Pending; + if net.peer(1).client().justifications(&BlockId::Number(height)).unwrap() != + Some(Justifications::from((*b"FRNK", Vec::new()))) + { + return Poll::Pending } } @@ -295,7 +309,11 @@ fn sync_justifications_across_forks() { // for both and finalize the small fork instead. net.block_until_sync(); - net.peer(0).client().finalize_block(BlockId::Hash(f1_best), Some(Vec::new()), true).unwrap(); + let just = (*b"FRNK", Vec::new()); + net.peer(0) + .client() + .finalize_block(BlockId::Hash(f1_best), Some(just), true) + .unwrap(); net.peer(1).request_justification(&f1_best, 10); net.peer(1).request_justification(&f2_best, 11); @@ -303,8 +321,10 @@ fn sync_justifications_across_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) && - net.peer(1).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) + if net.peer(0).client().justifications(&BlockId::Number(10)).unwrap() == + Some(Justifications::from((*b"FRNK", Vec::new()))) && + net.peer(1).client().justifications(&BlockId::Number(10)).unwrap() == + Some(Justifications::from((*b"FRNK", Vec::new()))) { Poll::Ready(()) } else { @@ -359,7 +379,8 @@ fn own_blocks_are_announced() { sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.block_until_sync(); // connect'em - net.peer(0).generate_blocks(1, BlockOrigin::Own, |builder| builder.build().unwrap().block); + net.peer(0) + .generate_blocks(1, BlockOrigin::Own, |builder| builder.build().unwrap().block); net.block_until_sync(); @@ -436,7 +457,7 @@ fn can_sync_small_non_best_forks() { assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); assert!(!net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - net.peer(0).announce_block(small_hash, Vec::new()); + net.peer(0).announce_block(small_hash, None); // after announcing, peer 1 downloads the block. @@ -452,7 +473,7 @@ fn can_sync_small_non_best_forks() { net.block_until_sync(); let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true); - net.peer(0).announce_block(another_fork, Vec::new()); + net.peer(0).announce_block(another_fork, None); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); if net.peer(1).client().header(&BlockId::Hash(another_fork)).unwrap().is_none() { @@ -500,7 +521,7 @@ fn light_peer_imports_header_from_announce() { sp_tracing::try_init_simple(); fn import_with_announce(net: &mut TestNet, hash: H256) { - net.peer(0).announce_block(hash, Vec::new()); + net.peer(0).announce_block(hash, None); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -552,7 +573,7 @@ fn can_sync_explicit_forks() { // poll until the two nodes connect, otherwise announcing the block will not work block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { Poll::Pending } else { Poll::Ready(()) @@ -610,7 +631,7 @@ fn does_not_sync_announced_old_best_block() { net.peer(0).push_blocks(18, true); net.peer(1).push_blocks(20, true); - net.peer(0).announce_block(old_hash, Vec::new()); + net.peer(0).announce_block(old_hash, None); block_on(futures::future::poll_fn::<(), _>(|cx| { // poll once to import announcement net.poll(cx); @@ -618,7 +639,7 @@ fn does_not_sync_announced_old_best_block() { })); assert!(!net.peer(1).is_major_syncing()); - net.peer(0).announce_block(old_hash_with_parent, Vec::new()); + net.peer(0).announce_block(old_hash_with_parent, None); block_on(futures::future::poll_fn::<(), _>(|cx| { // poll once to import announcement net.poll(cx); @@ -637,7 +658,7 @@ fn full_sync_requires_block_body() { // Wait for nodes to connect block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { Poll::Pending } else { Poll::Ready(()) @@ -653,8 +674,8 @@ fn imports_stale_once() { fn import_with_announce(net: &mut TestNet, hash: H256) { // Announce twice - net.peer(0).announce_block(hash, Vec::new()); - net.peer(0).announce_block(hash, Vec::new()); + net.peer(0).announce_block(hash, None); + net.peer(0).announce_block(hash, None); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -696,13 +717,20 @@ fn can_sync_to_peers_with_wrong_common_block() { net.block_until_connected(); // both peers re-org to the same fork without notifying each other - net.peer(0).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); - net.peer(1).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); + let just = Some((*b"FRNK", Vec::new())); + net.peer(0) + .client() + .finalize_block(BlockId::Hash(fork_hash), just.clone(), true) + .unwrap(); + net.peer(1) + .client() + .finalize_block(BlockId::Hash(fork_hash), just, true) + .unwrap(); let final_hash = net.peer(0).push_blocks(1, false); net.block_until_sync(); - assert!(net.peer(1).client().header(&BlockId::Hash(final_hash)).unwrap().is_some()); + assert!(net.peer(1).has_block(&final_hash)); } /// Returns `is_new_best = true` for each validated announcement. @@ -713,15 +741,38 @@ impl BlockAnnounceValidator for NewBestBlockAnnounceValidator { &mut self, _: &Header, _: &[u8], - ) -> Pin>> + Send>> { + ) -> Pin>> + Send>> + { async { Ok(Validation::Success { is_new_best: true }) }.boxed() } } +/// Returns `Validation::Failure` for specified block number +struct FailingBlockAnnounceValidator(u64); + +impl BlockAnnounceValidator for FailingBlockAnnounceValidator { + fn validate( + &mut self, + header: &Header, + _: &[u8], + ) -> Pin>> + Send>> + { + let number = *header.number(); + let target_number = self.0; + async move { + Ok(if number == target_number { + Validation::Failure { disconnect: false } + } else { + Validation::Success { is_new_best: true } + }) + } + .boxed() + } +} + #[test] fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { sp_tracing::try_init_simple(); - log::trace!(target: "sync", "Test"); let mut net = TestNet::with_fork_choice(ForkChoiceStrategy::Custom(false)); net.add_full_peer_with_config(Default::default()); net.add_full_peer_with_config(Default::default()); @@ -752,18 +803,19 @@ impl BlockAnnounceValidator for DeferredBlockAnnounceValidator { &mut self, _: &Header, _: &[u8], - ) -> Pin>> + Send>> { + ) -> Pin>> + Send>> + { async { futures_timer::Delay::new(std::time::Duration::from_millis(500)).await; Ok(Validation::Success { is_new_best: false }) - }.boxed() + } + .boxed() } } #[test] fn wait_until_deferred_block_announce_validation_is_ready() { sp_tracing::try_init_simple(); - log::trace!(target: "sync", "Test"); let mut net = TestNet::with_fork_choice(ForkChoiceStrategy::Custom(false)); net.add_full_peer_with_config(Default::default()); net.add_full_peer_with_config(FullPeerConfig { @@ -779,3 +831,394 @@ fn wait_until_deferred_block_announce_validation_is_ready() { net.block_until_idle(); } } + +/// When we don't inform the sync protocol about the best block, a node will not sync from us as the +/// handshake is not does not contain our best block. +#[test] +fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(1); + + // Produce some blocks + let block_hash = net.peer(0).push_blocks_at_without_informing_sync(BlockId::Number(0), 3, true); + + // Add a node and wait until they are connected + net.add_full_peer_with_config(Default::default()); + net.block_until_connected(); + net.block_until_idle(); + + // The peer should not have synced the block. + assert!(!net.peer(1).has_block(&block_hash)); + + // Make sync protocol aware of the best block + net.peer(0).network_service().new_best_block_imported(block_hash, 3); + net.block_until_idle(); + + // Connect another node that should now sync to the tip + net.add_full_peer_with_config(Default::default()); + net.block_until_connected(); + + while !net.peer(2).has_block(&block_hash) { + net.block_until_idle(); + } + + // However peer 1 should still not have the block. + assert!(!net.peer(1).has_block(&block_hash)); +} + +/// Ensures that if we as a syncing node sync to the tip while we are connected to another peer +/// that is currently also doing a major sync. +#[test] +fn sync_to_tip_when_we_sync_together_with_multiple_peers() { + sp_tracing::try_init_simple(); + + let mut net = TestNet::new(3); + + let block_hash = + net.peer(0) + .push_blocks_at_without_informing_sync(BlockId::Number(0), 10_000, false); + + net.peer(1) + .push_blocks_at_without_informing_sync(BlockId::Number(0), 5_000, false); + + net.block_until_connected(); + net.block_until_idle(); + + assert!(!net.peer(2).has_block(&block_hash)); + + net.peer(0).network_service().new_best_block_imported(block_hash, 10_000); + while !net.peer(2).has_block(&block_hash) && !net.peer(1).has_block(&block_hash) { + net.block_until_idle(); + } +} + +/// Ensures that when we receive a block announcement with some data attached, that we propagate +/// this data when reannouncing the block. +#[test] +fn block_announce_data_is_propagated() { + struct TestBlockAnnounceValidator; + + impl BlockAnnounceValidator for TestBlockAnnounceValidator { + fn validate( + &mut self, + _: &Header, + data: &[u8], + ) -> Pin< + Box>> + Send>, + > { + let correct = data.get(0) == Some(&137); + async move { + if correct { + Ok(Validation::Success { is_new_best: true }) + } else { + Ok(Validation::Failure { disconnect: false }) + } + } + .boxed() + } + } + + sp_tracing::try_init_simple(); + let mut net = TestNet::new(1); + + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(TestBlockAnnounceValidator)), + ..Default::default() + }); + + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(TestBlockAnnounceValidator)), + connect_to_peers: Some(vec![1]), + ..Default::default() + }); + + // Wait until peer 1 is connected to both nodes. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).num_peers() == 2 { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + + let block_hash = net.peer(0).push_blocks_at_without_announcing(BlockId::Number(0), 1, true); + net.peer(0).announce_block(block_hash, Some(vec![137])); + + while !net.peer(1).has_block(&block_hash) || !net.peer(2).has_block(&block_hash) { + net.block_until_idle(); + } +} + +#[test] +fn continue_to_sync_after_some_block_announcement_verifications_failed() { + struct TestBlockAnnounceValidator; + + impl BlockAnnounceValidator for TestBlockAnnounceValidator { + fn validate( + &mut self, + header: &Header, + _: &[u8], + ) -> Pin< + Box>> + Send>, + > { + let number = *header.number(); + async move { + if number < 100 { + Err(Box::::from(String::from("error")) + as Box<_>) + } else { + Ok(Validation::Success { is_new_best: false }) + } + } + .boxed() + } + } + + sp_tracing::try_init_simple(); + let mut net = TestNet::new(1); + + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(TestBlockAnnounceValidator)), + ..Default::default() + }); + + net.block_until_connected(); + net.block_until_idle(); + + let block_hash = net.peer(0).push_blocks(500, true); + + net.block_until_sync(); + assert!(net.peer(1).has_block(&block_hash)); +} + +/// When being spammed by the same request of a peer, we ban this peer. However, we should only ban +/// this peer if the request was successful. In the case of a justification request for example, +/// we ask our peers multiple times until we got the requested justification. This test ensures that +/// asking for the same justification multiple times doesn't ban a peer. +#[test] +fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { + sp_tracing::try_init_simple(); + let mut net = JustificationTestNet::new(2); + net.peer(0).push_blocks(10, false); + net.block_until_sync(); + + // there's currently no justification for block #10 + assert_eq!(net.peer(0).client().justifications(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(&BlockId::Number(10)).unwrap(), None); + + let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); + + // Let's assume block 10 was finalized, but we still need the justification from the network. + net.peer(1).request_justification(&h1.hash().into(), 10); + + // Let's build some more blocks and wait always for the network to have synced them + for _ in 0..5 { + // We need to sleep 10 seconds as this is the time we wait between sending a new + // justification request. + std::thread::sleep(std::time::Duration::from_secs(10)); + net.peer(0).push_blocks(1, false); + net.block_until_sync(); + assert_eq!(1, net.peer(0).num_peers()); + } + + // Finalize the block and make the justification available. + net.peer(0) + .client() + .finalize_block(BlockId::Number(10), Some((*b"FRNK", Vec::new())), true) + .unwrap(); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + + if net.peer(1).client().justifications(&BlockId::Number(10)).unwrap() != + Some(Justifications::from((*b"FRNK", Vec::new()))) + { + return Poll::Pending + } + + Poll::Ready(()) + })); +} + +#[test] +fn syncs_all_forks_from_single_peer() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(2); + net.peer(0).push_blocks(10, false); + net.peer(1).push_blocks(10, false); + + // poll until the two nodes connect, otherwise announcing the block will not work + net.block_until_connected(); + + // Peer 0 produces new blocks and announces. + let branch1 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, true); + + // Wait till peer 1 starts downloading + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).network().best_seen_block() != Some(12) { + return Poll::Pending + } + Poll::Ready(()) + })); + + // Peer 0 produces and announces another fork + let branch2 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, false); + + net.block_until_sync(); + + // Peer 1 should have both branches, + assert!(net.peer(1).client().header(&BlockId::Hash(branch1)).unwrap().is_some()); + assert!(net.peer(1).client().header(&BlockId::Hash(branch2)).unwrap().is_some()); +} + +#[test] +fn syncs_after_missing_announcement() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(0); + net.add_full_peer_with_config(Default::default()); + // Set peer 1 to ignore announcement + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(FailingBlockAnnounceValidator(11))), + ..Default::default() + }); + net.peer(0).push_blocks(10, false); + net.peer(1).push_blocks(10, false); + + net.block_until_connected(); + + // Peer 0 produces a new block and announces. Peer 1 ignores announcement. + net.peer(0).push_blocks_at(BlockId::Number(10), 1, false); + // Peer 0 produces another block and announces. + let final_block = net.peer(0).push_blocks_at(BlockId::Number(11), 1, false); + net.peer(1).push_blocks_at(BlockId::Number(10), 1, true); + net.block_until_sync(); + assert!(net.peer(1).client().header(&BlockId::Hash(final_block)).unwrap().is_some()); +} + +#[test] +fn syncs_state() { + sp_tracing::try_init_simple(); + for skip_proofs in &[false, true] { + let mut net = TestNet::new(0); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(FullPeerConfig { + sync_mode: SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }, + ..Default::default() + }); + net.peer(0).push_blocks(64, false); + // Wait for peer 1 to sync header chain. + net.block_until_sync(); + assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64))); + + let just = (*b"FRNK", Vec::new()); + net.peer(1) + .client() + .finalize_block(BlockId::Number(60), Some(just), true) + .unwrap(); + // Wait for state sync. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).client.info().finalized_state.is_some() { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64))); + // Wait for the rest of the states to be imported. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).client().has_state_at(&BlockId::Number(64)) { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + } +} + +#[test] +fn syncs_indexed_blocks() { + use sp_runtime::traits::Hash; + sp_tracing::try_init_simple(); + let mut net = TestNet::new(0); + let mut n: u64 = 0; + net.add_full_peer_with_config(FullPeerConfig { storage_chain: true, ..Default::default() }); + net.add_full_peer_with_config(FullPeerConfig { + storage_chain: true, + sync_mode: SyncMode::Fast { skip_proofs: false, storage_chain_mode: true }, + ..Default::default() + }); + net.peer(0).generate_blocks_at( + BlockId::number(0), + 64, + BlockOrigin::Own, + |mut builder| { + let ex = Extrinsic::Store(n.to_le_bytes().to_vec()); + n += 1; + builder.push(ex).unwrap(); + builder.build().unwrap().block + }, + false, + true, + true, + ); + let indexed_key = sp_runtime::traits::BlakeTwo256::hash(&42u64.to_le_bytes()); + assert!(net + .peer(0) + .client() + .as_full() + .unwrap() + .indexed_transaction(&indexed_key) + .unwrap() + .is_some()); + assert!(net + .peer(1) + .client() + .as_full() + .unwrap() + .indexed_transaction(&indexed_key) + .unwrap() + .is_none()); + + net.block_until_sync(); + assert!(net + .peer(1) + .client() + .as_full() + .unwrap() + .indexed_transaction(&indexed_key) + .unwrap() + .is_some()); +} + +#[test] +fn syncs_huge_blocks() { + use sp_core::storage::well_known_keys::HEAP_PAGES; + use sp_runtime::codec::Encode; + use substrate_test_runtime_client::BlockBuilderExt; + + sp_tracing::try_init_simple(); + let mut net = TestNet::new(2); + + // Increase heap space for bigger blocks. + net.peer(0).generate_blocks(1, BlockOrigin::Own, |mut builder| { + builder.push_storage_change(HEAP_PAGES.to_vec(), Some(256u64.encode())).unwrap(); + builder.build().unwrap().block + }); + + net.peer(0).generate_blocks(32, BlockOrigin::Own, |mut builder| { + // Add 32 extrinsics 32k each = 1MiB total + for _ in 0..32 { + let ex = Extrinsic::IncludeData([42u8; 32 * 1024].to_vec()); + builder.push(ex).unwrap(); + } + builder.build().unwrap().block + }); + + net.block_until_sync(); + assert_eq!(net.peer(0).client.info().best_number, 33); + assert_eq!(net.peer(1).client.info().best_number, 33); +} diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index efd4574af6a94..d3cd3ab6a1fc1 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers" name = "sc-offchain" -version = "2.0.0" +version = "4.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,39 +13,39 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bytes = "0.5" cid = "0.5" -sc-client-api = { version = "2.0.0", path = "../api" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } +bytes = "1.0" +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +hex = "0.4" fnv = "1.0.6" -futures = "0.3.4" +futures = "0.3.9" futures-timer = "3.0.1" -ipfs = { git = "https://github.com/rs-ipfs/rust-ipfs" } +ipfs = { git = "https://github.com/rs-ipfs/rust-ipfs"} log = "0.4.8" -threadpool = "1.7" num_cpus = "1.10" -sp-offchain = { version = "2.0.0", path = "../../primitives/offchain" } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../../primitives/core" } +parking_lot = "0.11.1" rand = "0.7.2" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sc-network = { version = "0.8.0", path = "../network" } -sc-keystore = { version = "2.0.0", path = "../keystore" } -tokio = { version = "0.2", default-features = false } - -[target.'cfg(not(target_os = "unknown"))'.dependencies] -hyper = "0.13.2" -hyper-rustls = "0.21.0" +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-offchain = { version = "4.0.0-dev", path = "../../primitives/offchain" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } +threadpool = "1.7" +hyper = "0.14.11" +hyper-rustls = "0.22.1" +tokio = "1.10" [dev-dependencies] -sc-client-db = { version = "0.8.0", default-features = true, path = "../db/" } -sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } +sc-client-db = { version = "0.10.0-dev", default-features = true, path = "../db" } +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -tokio = "0.2" +tokio = "1.10" lazy_static = "1.4.0" [features] diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 07816d956173b..5cddb249a62d2 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -1,69 +1,45 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::{ - str::FromStr, - sync::Arc, - convert::TryFrom, - thread::sleep, - collections::HashSet, -}; +// along with this program. If not, see . + +use std::{collections::HashSet, convert::TryFrom, str::FromStr, sync::Arc, thread::sleep}; use crate::NetworkProvider; -use log::error; -use sc_network::{PeerId, Multiaddr}; -use codec::{Encode, Decode}; -use sp_core::OpaquePeerId; -use sp_core::offchain::{ - Externalities as OffchainExt, HttpRequestId, Timestamp, HttpRequestStatus, HttpError, IpfsRequest, - IpfsRequestId, IpfsRequestStatus, OffchainStorage, OpaqueNetworkState, OpaqueMultiaddr, StorageKind, +use codec::{Decode, Encode}; +pub use http::SharedClient; +use sc_network::{Multiaddr, PeerId}; +use sp_core::{ + offchain::{ + self, HttpError, HttpRequestId, HttpRequestStatus, OffchainStorage, OpaqueMultiaddr, + IpfsRequest, IpfsRequestId, IpfsRequestStatus, OpaqueNetworkState, StorageKind, Timestamp, + }, + OpaquePeerId, }; pub use sp_offchain::STORAGE_PREFIX; -pub use http::SharedClient; -#[cfg(not(target_os = "unknown"))] mod http; mod ipfs; -#[cfg(target_os = "unknown")] -use http_dummy as http; -#[cfg(target_os = "unknown")] -mod http_dummy; - mod timestamp; -/// Asynchronous offchain API. -/// -/// NOTE this is done to prevent recursive calls into the runtime (which are not supported currently). -pub(crate) struct Api { - /// Offchain Workers database. - db: Storage, - /// A provider for substrate networking. - network_provider: Arc, - /// Is this node a potential validator? - is_validator: bool, - /// Everything HTTP-related is handled by a different struct. - http: http::HttpApi, - /// Everything IPFS-related is handled by a different struct. - ipfs: ipfs::IpfsApi, -} - fn unavailable_yet(name: &str) -> R { - error!( + log::error!( + target: "sc_offchain", "The {:?} API is not available for offchain workers yet. Follow \ https://github.com/paritytech/substrate/issues/1458 for details", name ); @@ -72,43 +48,51 @@ fn unavailable_yet(name: &str) -> R { const LOCAL_DB: &str = "LOCAL (fork-aware) DB"; -impl OffchainExt for Api { - fn is_validator(&self) -> bool { - self.is_validator - } - - fn network_state(&self) -> Result { - let external_addresses = self.network_provider.external_addresses(); - - let state = NetworkState::new( - self.network_provider.local_peer_id(), - external_addresses, - ); - Ok(OpaqueNetworkState::from(state)) - } - - fn timestamp(&mut self) -> Timestamp { - timestamp::now() - } +/// Offchain DB reference. +#[derive(Debug, Clone)] +pub struct Db { + /// Persistent storage database. + persistent: Storage, +} - fn sleep_until(&mut self, deadline: Timestamp) { - sleep(timestamp::timestamp_from_now(deadline)); +impl Db { + /// Create new instance of Offchain DB. + pub fn new(persistent: Storage) -> Self { + Self { persistent } } - fn random_seed(&mut self) -> [u8; 32] { - rand::random() + /// Create new instance of Offchain DB, backed by given backend. + pub fn factory_from_backend( + backend: &Backend, + ) -> Option> + where + Backend: sc_client_api::Backend, + Block: sp_runtime::traits::Block, + Storage: 'static, + { + sc_client_api::Backend::offchain_storage(backend).map(|db| Box::new(Self::new(db)) as _) } +} +impl offchain::DbExternalities for Db { fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + log::debug!( + target: "sc_offchain", + "{:?}: Write: {:?} <= {:?}", kind, hex::encode(key), hex::encode(value) + ); match kind { - StorageKind::PERSISTENT => self.db.set(STORAGE_PREFIX, key, value), + StorageKind::PERSISTENT => self.persistent.set(STORAGE_PREFIX, key, value), StorageKind::LOCAL => unavailable_yet(LOCAL_DB), } } fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { + log::debug!( + target: "sc_offchain", + "{:?}: Clear: {:?}", kind, hex::encode(key) + ); match kind { - StorageKind::PERSISTENT => self.db.remove(STORAGE_PREFIX, key), + StorageKind::PERSISTENT => self.persistent.remove(STORAGE_PREFIX, key), StorageKind::LOCAL => unavailable_yet(LOCAL_DB), } } @@ -120,26 +104,81 @@ impl OffchainExt for Api { old_value: Option<&[u8]>, new_value: &[u8], ) -> bool { + log::debug!( + target: "sc_offchain", + "{:?}: CAS: {:?} <= {:?} vs {:?}", + kind, + hex::encode(key), + hex::encode(new_value), + old_value.as_ref().map(hex::encode), + ); match kind { - StorageKind::PERSISTENT => { - self.db.compare_and_set(STORAGE_PREFIX, key, old_value, new_value) - }, + StorageKind::PERSISTENT => + self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value), StorageKind::LOCAL => unavailable_yet(LOCAL_DB), } } fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - match kind { - StorageKind::PERSISTENT => self.db.get(STORAGE_PREFIX, key), + let result = match kind { + StorageKind::PERSISTENT => self.persistent.get(STORAGE_PREFIX, key), StorageKind::LOCAL => unavailable_yet(LOCAL_DB), - } + }; + log::debug!( + target: "sc_offchain", + "{:?}: Read: {:?} => {:?}", + kind, + hex::encode(key), + result.as_ref().map(hex::encode) + ); + result + } +} + +/// Asynchronous offchain API. +/// +/// NOTE this is done to prevent recursive calls into the runtime +/// (which are not supported currently). +pub(crate) struct Api { + /// A provider for substrate networking. + network_provider: Arc, + /// Is this node a potential validator? + is_validator: bool, + /// Everything HTTP-related is handled by a different struct. + http: http::HttpApi, + /// Everything IPFS-related is handled by a different struct. + ipfs: ipfs::IpfsApi, +} + +impl offchain::Externalities for Api { + fn is_validator(&self) -> bool { + self.is_validator + } + + fn network_state(&self) -> Result { + let external_addresses = self.network_provider.external_addresses(); + + let state = NetworkState::new(self.network_provider.local_peer_id(), external_addresses); + Ok(OpaqueNetworkState::from(state)) + } + + fn timestamp(&mut self) -> Timestamp { + timestamp::now() + } + + fn sleep_until(&mut self, deadline: Timestamp) { + sleep(timestamp::timestamp_from_now(deadline)); + } + + fn random_seed(&mut self) -> [u8; 32] { + rand::random() } fn http_request_start( &mut self, method: &str, uri: &str, - _meta: &[u8] + _meta: &[u8], ) -> Result { self.http.request_start(method, uri) } @@ -148,7 +187,7 @@ impl OffchainExt for Api { &mut self, request_id: HttpRequestId, name: &str, - value: &str + value: &str, ) -> Result<(), ()> { self.http.request_add_header(request_id, name, value) } @@ -157,7 +196,7 @@ impl OffchainExt for Api { &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError> { self.http.request_write_body(request_id, chunk, deadline) } @@ -165,15 +204,12 @@ impl OffchainExt for Api { fn http_response_wait( &mut self, ids: &[HttpRequestId], - deadline: Option + deadline: Option, ) -> Vec { self.http.response_wait(ids, deadline) } - fn http_response_headers( - &mut self, - request_id: HttpRequestId - ) -> Vec<(Vec, Vec)> { + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { self.http.response_headers(request_id) } @@ -181,7 +217,7 @@ impl OffchainExt for Api { &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result { self.http.response_read_body(request_id, buffer, deadline) } @@ -199,9 +235,8 @@ impl OffchainExt for Api { } fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { - let peer_ids: HashSet = nodes.into_iter() - .filter_map(|node| PeerId::from_bytes(node.0).ok()) - .collect(); + let peer_ids: HashSet = + nodes.into_iter().filter_map(|node| PeerId::from_bytes(&node.0).ok()).collect(); self.network_provider.set_authorized_peers(peer_ids); self.network_provider.set_authorized_only(authorized_only); @@ -217,16 +252,13 @@ pub struct NetworkState { impl NetworkState { fn new(peer_id: PeerId, external_addresses: Vec) -> Self { - NetworkState { - peer_id, - external_addresses, - } + NetworkState { peer_id, external_addresses } } } impl From for OpaqueNetworkState { fn from(state: NetworkState) -> OpaqueNetworkState { - let enc = Encode::encode(&state.peer_id.into_bytes()); + let enc = Encode::encode(&state.peer_id.to_bytes()); let peer_id = OpaquePeerId::new(enc); let external_addresses: Vec = state @@ -238,10 +270,7 @@ impl From for OpaqueNetworkState { }) .collect(); - OpaqueNetworkState { - peer_id, - external_addresses, - } + OpaqueNetworkState { peer_id, external_addresses } } } @@ -252,9 +281,10 @@ impl TryFrom for NetworkState { let inner_vec = state.peer_id.0; let bytes: Vec = Decode::decode(&mut &inner_vec[..]).map_err(|_| ())?; - let peer_id = PeerId::from_bytes(bytes).map_err(|_| ())?; + let peer_id = PeerId::from_bytes(&bytes).map_err(|_| ())?; - let external_addresses: Result, Self::Error> = state.external_addresses + let external_addresses: Result, Self::Error> = state + .external_addresses .iter() .map(|enc_multiaddr| -> Result { let inner_vec = &enc_multiaddr.0; @@ -266,10 +296,7 @@ impl TryFrom for NetworkState { .collect(); let external_addresses = external_addresses?; - Ok(NetworkState { - peer_id, - external_addresses, - }) + Ok(NetworkState { peer_id, external_addresses }) } } @@ -285,18 +312,16 @@ pub(crate) struct AsyncApi { impl AsyncApi { /// Creates new Offchain extensions API implementation an the asynchronous processing part. - pub fn new( - db: S, + pub fn new( network_provider: Arc, ipfs_node: ::ipfs::Ipfs, is_validator: bool, shared_client: SharedClient, - ) -> (Api, Self) { + ) -> (Api, Self) { let (http_api, http_worker) = http::http(shared_client); let (ipfs_api, ipfs_worker) = ipfs::ipfs(ipfs_node); let api = Api { - db, network_provider, is_validator, http: http_api, @@ -315,7 +340,6 @@ impl AsyncApi { pub async fn process(mut self) { let http = self.http.take().expect("Take invoked only once."); let ipfs = self.ipfs.take().expect("Take invoked only once."); - futures::join!(http, ipfs); } } @@ -323,9 +347,13 @@ impl AsyncApi { #[cfg(test)] mod tests { use super::*; - use std::{convert::{TryFrom, TryInto}, time::SystemTime}; use sc_client_db::offchain::LocalStorage; use sc_network::{NetworkStateInfo, PeerId}; + use sp_core::offchain::{DbExternalities, Externalities}; + use std::{ + convert::{TryFrom, TryInto}, + time::SystemTime, + }; struct TestNetwork(); @@ -349,9 +377,8 @@ mod tests { } } - fn offchain_api() -> (Api, AsyncApi) { + fn offchain_api() -> (Api, AsyncApi) { sp_tracing::try_init_simple(); - let db = LocalStorage::new_test(); let mock = Arc::new(TestNetwork()); let shared_client = SharedClient::new(); @@ -364,7 +391,6 @@ mod tests { }); AsyncApi::new( - db, mock, ipfs_node, false, @@ -372,13 +398,22 @@ mod tests { ) } + fn offchain_db() -> Db { + Db::new(LocalStorage::new_test()) + } + #[test] fn should_get_timestamp() { let mut api = offchain_api().0; // Get timestamp from std. let now = SystemTime::now(); - let d: u64 = now.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_millis().try_into().unwrap(); + let d: u64 = now + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_millis() + .try_into() + .unwrap(); // Get timestamp from offchain api. let timestamp = api.timestamp(); @@ -410,7 +445,7 @@ mod tests { fn should_set_and_get_local_storage() { // given let kind = StorageKind::PERSISTENT; - let mut api = offchain_api().0; + let mut api = offchain_db(); let key = b"test"; // when @@ -425,7 +460,7 @@ mod tests { fn should_compare_and_set_local_storage() { // given let kind = StorageKind::PERSISTENT; - let mut api = offchain_api().0; + let mut api = offchain_db(); let key = b"test"; api.local_storage_set(kind, key, b"value"); @@ -442,7 +477,7 @@ mod tests { fn should_compare_and_set_local_storage_with_none() { // given let kind = StorageKind::PERSISTENT; - let mut api = offchain_api().0; + let mut api = offchain_db(); let key = b"test"; // when diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index 1f542b7c11e19..ce9fb298d1b0c 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! This module is composed of two structs: [`HttpApi`] and [`HttpWorker`]. Calling the [`http`] //! function returns a pair of [`HttpApi`] and [`HttpWorker`] that share some state. @@ -26,16 +28,22 @@ //! actively calling any function. use crate::api::timestamp; -use bytes::buf::ext::{Reader, BufExt}; +use bytes::buf::{Buf, Reader}; use fnv::FnvHashMap; -use futures::{prelude::*, future, channel::mpsc}; -use log::error; -use sp_core::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; -use std::{convert::TryFrom, fmt, io::Read as _, pin::Pin, task::{Context, Poll}}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; -use std::sync::Arc; -use hyper::{Client as HyperClient, Body, client}; +use futures::{channel::mpsc, future, prelude::*}; +use hyper::{client, Body, Client as HyperClient}; use hyper_rustls::HttpsConnector; +use log::error; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp}; +use std::{ + convert::TryFrom, + fmt, + io::Read as _, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; /// Wrapper struct used for keeping the hyper_rustls client running. #[derive(Clone)] @@ -43,7 +51,7 @@ pub struct SharedClient(Arc, B impl SharedClient { pub fn new() -> Self { - Self(Arc::new(HyperClient::builder().build(HttpsConnector::new()))) + Self(Arc::new(HyperClient::builder().build(HttpsConnector::with_native_roots()))) } } @@ -61,12 +69,8 @@ pub fn http(shared_client: SharedClient) -> (HttpApi, HttpWorker) { requests: FnvHashMap::default(), }; - let engine = HttpWorker { - to_api, - from_api, - http_client: shared_client.0, - requests: Vec::new(), - }; + let engine = + HttpWorker { to_api, from_api, http_client: shared_client.0, requests: Vec::new() }; (api, engine) } @@ -125,11 +129,7 @@ struct HttpApiRequestRp { impl HttpApi { /// Mimics the corresponding method in the offchain API. - pub fn request_start( - &mut self, - method: &str, - uri: &str - ) -> Result { + pub fn request_start(&mut self, method: &str, uri: &str) -> Result { // Start by building the prototype of the request. // We do this first so that we don't touch anything in `self` if building the prototype // fails. @@ -144,10 +144,11 @@ impl HttpApi { Some(new_id) => self.next_id.0 = new_id, None => { error!("Overflow in offchain worker HTTP request ID assignment"); - return Err(()); - } + return Err(()) + }, }; - self.requests.insert(new_id, HttpApiRequest::NotDispatched(request, body_sender)); + self.requests + .insert(new_id, HttpApiRequest::NotDispatched(request, body_sender)); Ok(new_id) } @@ -157,11 +158,11 @@ impl HttpApi { &mut self, request_id: HttpRequestId, name: &str, - value: &str + value: &str, ) -> Result<(), ()> { let request = match self.requests.get_mut(&request_id) { Some(&mut HttpApiRequest::NotDispatched(ref mut rq, _)) => rq, - _ => return Err(()) + _ => return Err(()), }; let name = hyper::header::HeaderName::try_from(name).map_err(drop)?; @@ -177,14 +178,11 @@ impl HttpApi { &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError> { // Extract the request from the list. // Don't forget to add it back if necessary when returning. - let mut request = match self.requests.remove(&request_id) { - None => return Err(HttpError::Invalid), - Some(r) => r, - }; + let mut request = self.requests.remove(&request_id).ok_or_else(|| HttpError::Invalid)?; let mut deadline = timestamp::deadline_to_future(deadline); // Closure that writes data to a sender, taking the deadline into account. Can return `Ok` @@ -194,44 +192,42 @@ impl HttpApi { let mut when_ready = future::maybe_done(future::poll_fn(|cx| sender.poll_ready(cx))); futures::executor::block_on(future::select(&mut when_ready, &mut deadline)); match when_ready { - future::MaybeDone::Done(Ok(())) => {} + future::MaybeDone::Done(Ok(())) => {}, future::MaybeDone::Done(Err(_)) => return Err(HttpError::IoError), - future::MaybeDone::Future(_) | - future::MaybeDone::Gone => { + future::MaybeDone::Future(_) | future::MaybeDone::Gone => { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); return Err(HttpError::DeadlineReached) - } + }, }; - futures::executor::block_on(sender.send_data(hyper::body::Bytes::from(chunk.to_owned()))) - .map_err(|_| { - error!("HTTP sender refused data despite being ready"); - HttpError::IoError - }) + futures::executor::block_on( + sender.send_data(hyper::body::Bytes::from(chunk.to_owned())), + ) + .map_err(|_| { + error!("HTTP sender refused data despite being ready"); + HttpError::IoError + }) }; loop { request = match request { HttpApiRequest::NotDispatched(request, sender) => { // If the request is not dispatched yet, dispatch it and loop again. - let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { - id: request_id, - request - }); + let _ = self + .to_worker + .unbounded_send(ApiToWorker::Dispatch { id: request_id, request }); HttpApiRequest::Dispatched(Some(sender)) - } + }, - HttpApiRequest::Dispatched(Some(mut sender)) => + HttpApiRequest::Dispatched(Some(mut sender)) => { if !chunk.is_empty() { match poll_sender(&mut sender) { Err(HttpError::IoError) => return Err(HttpError::IoError), other => { - self.requests.insert( - request_id, - HttpApiRequest::Dispatched(Some(sender)) - ); + self.requests + .insert(request_id, HttpApiRequest::Dispatched(Some(sender))); return other - } + }, } } else { // Writing an empty body is a hint that we should stop writing. Dropping @@ -239,31 +235,42 @@ impl HttpApi { self.requests.insert(request_id, HttpApiRequest::Dispatched(None)); return Ok(()) } + }, - HttpApiRequest::Response(mut response @ HttpApiRequestRp { sending_body: Some(_), .. }) => + HttpApiRequest::Response( + mut response @ HttpApiRequestRp { sending_body: Some(_), .. }, + ) => { if !chunk.is_empty() { - match poll_sender(response.sending_body.as_mut() - .expect("Can only enter this match branch if Some; qed")) { + match poll_sender( + response + .sending_body + .as_mut() + .expect("Can only enter this match branch if Some; qed"), + ) { Err(HttpError::IoError) => return Err(HttpError::IoError), other => { - self.requests.insert(request_id, HttpApiRequest::Response(response)); + self.requests + .insert(request_id, HttpApiRequest::Response(response)); return other - } + }, } - } else { // Writing an empty body is a hint that we should stop writing. Dropping // the sender. - self.requests.insert(request_id, HttpApiRequest::Response(HttpApiRequestRp { - sending_body: None, - ..response - })); + self.requests.insert( + request_id, + HttpApiRequest::Response(HttpApiRequestRp { + sending_body: None, + ..response + }), + ); return Ok(()) } + }, HttpApiRequest::Fail(_) => - // If the request has already failed, return without putting back the request - // in the list. + // If the request has already failed, return without putting back the request + // in the list. return Err(HttpError::IoError), v @ HttpApiRequest::Dispatched(None) | @@ -271,7 +278,7 @@ impl HttpApi { // We have already finished sending this body. self.requests.insert(request_id, v); return Err(HttpError::Invalid) - } + }, } } } @@ -280,30 +287,27 @@ impl HttpApi { pub fn response_wait( &mut self, ids: &[HttpRequestId], - deadline: Option + deadline: Option, ) -> Vec { // First of all, dispatch all the non-dispatched requests and drop all senders so that the // user can't write anymore data. for id in ids { match self.requests.get_mut(id) { - Some(HttpApiRequest::NotDispatched(_, _)) => {} + Some(HttpApiRequest::NotDispatched(_, _)) => {}, Some(HttpApiRequest::Dispatched(sending_body)) | Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { let _ = sending_body.take(); continue - } - _ => continue + }, + _ => continue, }; let (request, _sender) = match self.requests.remove(id) { Some(HttpApiRequest::NotDispatched(rq, s)) => (rq, s), - _ => unreachable!("we checked for NotDispatched above; qed") + _ => unreachable!("we checked for NotDispatched above; qed"), }; - let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { - id: *id, - request - }); + let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { id: *id, request }); // We also destroy the sender in order to forbid writing more data. self.requests.insert(*id, HttpApiRequest::Dispatched(None)); @@ -320,25 +324,24 @@ impl HttpApi { for id in ids { output.push(match self.requests.get(id) { None => HttpRequestStatus::Invalid, - Some(HttpApiRequest::NotDispatched(_, _)) => - unreachable!("we replaced all the NotDispatched with Dispatched earlier; qed"), + Some(HttpApiRequest::NotDispatched(_, _)) => unreachable!( + "we replaced all the NotDispatched with Dispatched earlier; qed" + ), Some(HttpApiRequest::Dispatched(_)) => { must_wait_more = true; HttpRequestStatus::DeadlineReached }, Some(HttpApiRequest::Fail(_)) => HttpRequestStatus::IoError, - Some(HttpApiRequest::Response(HttpApiRequestRp { status_code, .. })) => - HttpRequestStatus::Finished(status_code.as_u16()), + Some(HttpApiRequest::Response(HttpApiRequestRp { + status_code, .. + })) => HttpRequestStatus::Finished(status_code.as_u16()), }); } debug_assert_eq!(output.len(), ids.len()); // Are we ready to call `return`? - let is_done = if let future::MaybeDone::Done(_) = deadline { - true - } else { - !must_wait_more - }; + let is_done = + if let future::MaybeDone::Done(_) = deadline { true } else { !must_wait_more }; if is_done { // Requests in "fail" mode are purged before returning. @@ -367,50 +370,49 @@ impl HttpApi { // Update internal state based on received message. match next_message { - Some(WorkerToApi::Response { id, status_code, headers, body }) => + Some(WorkerToApi::Response { id, status_code, headers, body }) => { match self.requests.remove(&id) { Some(HttpApiRequest::Dispatched(sending_body)) => { - self.requests.insert(id, HttpApiRequest::Response(HttpApiRequestRp { - sending_body, - status_code, - headers, - body: body.fuse(), - current_read_chunk: None, - })); - } - None => {} // can happen if we detected an IO error when sending the body + self.requests.insert( + id, + HttpApiRequest::Response(HttpApiRequestRp { + sending_body, + status_code, + headers, + body: body.fuse(), + current_read_chunk: None, + }), + ); + }, + None => {}, // can happen if we detected an IO error when sending the body _ => error!("State mismatch between the API and worker"), } + }, - Some(WorkerToApi::Fail { id, error }) => - match self.requests.remove(&id) { - Some(HttpApiRequest::Dispatched(_)) => { - self.requests.insert(id, HttpApiRequest::Fail(error)); - } - None => {} // can happen if we detected an IO error when sending the body - _ => error!("State mismatch between the API and worker"), - } + Some(WorkerToApi::Fail { id, error }) => match self.requests.remove(&id) { + Some(HttpApiRequest::Dispatched(_)) => { + self.requests.insert(id, HttpApiRequest::Fail(error)); + }, + None => {}, // can happen if we detected an IO error when sending the body + _ => error!("State mismatch between the API and worker"), + }, None => { error!("Worker has crashed"); return ids.iter().map(|_| HttpRequestStatus::IoError).collect() - } + }, } - } } /// Mimics the corresponding method in the offchain API. - pub fn response_headers( - &mut self, - request_id: HttpRequestId - ) -> Vec<(Vec, Vec)> { + pub fn response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { // Do an implicit non-blocking wait on the request. let _ = self.response_wait(&[request_id], Some(timestamp::now())); let headers = match self.requests.get(&request_id) { Some(HttpApiRequest::Response(HttpApiRequestRp { headers, .. })) => headers, - _ => return Vec::new() + _ => return Vec::new(), }; headers @@ -424,7 +426,7 @@ impl HttpApi { &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result { // Do an implicit wait on the request. let _ = self.response_wait(&[request_id], deadline); @@ -440,14 +442,13 @@ impl HttpApi { return Err(HttpError::DeadlineReached) }, // The request has failed. - Some(HttpApiRequest::Fail { .. }) => - return Err(HttpError::IoError), + Some(HttpApiRequest::Fail { .. }) => return Err(HttpError::IoError), // Request hasn't been dispatched yet; reading the body is invalid. Some(rq @ HttpApiRequest::NotDispatched(_, _)) => { self.requests.insert(request_id, rq); return Err(HttpError::Invalid) - } - None => return Err(HttpError::Invalid) + }, + None => return Err(HttpError::Invalid), }; // Convert the deadline into a `Future` that resolves when the deadline is reached. @@ -457,19 +458,22 @@ impl HttpApi { // First read from `current_read_chunk`. if let Some(mut current_read_chunk) = response.current_read_chunk.take() { match current_read_chunk.read(buffer) { - Ok(0) => {} + Ok(0) => {}, Ok(n) => { - self.requests.insert(request_id, HttpApiRequest::Response(HttpApiRequestRp { - current_read_chunk: Some(current_read_chunk), - .. response - })); + self.requests.insert( + request_id, + HttpApiRequest::Response(HttpApiRequestRp { + current_read_chunk: Some(current_read_chunk), + ..response + }), + ); return Ok(n) }, Err(err) => { // This code should never be reached unless there's a logic error somewhere. error!("Failed to read from current read chunk: {:?}", err); return Err(HttpError::IoError) - } + }, } } @@ -483,7 +487,7 @@ impl HttpApi { match next_body { Some(Ok(chunk)) => response.current_read_chunk = Some(chunk.reader()), Some(Err(_)) => return Err(HttpError::IoError), - None => return Ok(0), // eof + None => return Ok(0), // eof } } @@ -497,9 +501,7 @@ impl HttpApi { impl fmt::Debug for HttpApi { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_list() - .entries(self.requests.iter()) - .finish() + f.debug_list().entries(self.requests.iter()).finish() } } @@ -508,12 +510,13 @@ impl fmt::Debug for HttpApiRequest { match self { HttpApiRequest::NotDispatched(_, _) => f.debug_tuple("HttpApiRequest::NotDispatched").finish(), - HttpApiRequest::Dispatched(_) => - f.debug_tuple("HttpApiRequest::Dispatched").finish(), - HttpApiRequest::Response(HttpApiRequestRp { status_code, headers, .. }) => - f.debug_tuple("HttpApiRequest::Response").field(status_code).field(headers).finish(), - HttpApiRequest::Fail(err) => - f.debug_tuple("HttpApiRequest::Fail").field(err).finish(), + HttpApiRequest::Dispatched(_) => f.debug_tuple("HttpApiRequest::Dispatched").finish(), + HttpApiRequest::Response(HttpApiRequestRp { status_code, headers, .. }) => f + .debug_tuple("HttpApiRequest::Response") + .field(status_code) + .field(headers) + .finish(), + HttpApiRequest::Fail(err) => f.debug_tuple("HttpApiRequest::Fail").field(err).finish(), } } } @@ -526,7 +529,7 @@ enum ApiToWorker { id: HttpRequestId, /// Request to start executing. request: hyper::Request, - } + }, } /// Message send from the API to the worker. @@ -606,8 +609,8 @@ impl Future for HttpWorker { Poll::Ready(Ok(response)) => response, Poll::Ready(Err(error)) => { let _ = me.to_api.unbounded_send(WorkerToApi::Fail { id, error }); - continue; // don't insert the request back - } + continue // don't insert the request back + }, }; // We received a response! Decompose it into its parts. @@ -623,20 +626,20 @@ impl Future for HttpWorker { }); me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx: body_tx })); - cx.waker().wake_by_ref(); // reschedule in order to poll the new future + cx.waker().wake_by_ref(); // reschedule in order to poll the new future continue - } + }, HttpWorkerRequest::ReadBody { mut body, mut tx } => { // Before reading from the HTTP response, check that `tx` is ready to accept // a new chunk. match tx.poll_ready(cx) { - Poll::Ready(Ok(())) => {} - Poll::Ready(Err(_)) => continue, // don't insert the request back + Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(_)) => continue, // don't insert the request back Poll::Pending => { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); continue - } + }, } // `tx` is ready. Read a chunk from the socket and send it to the channel. @@ -644,31 +647,31 @@ impl Future for HttpWorker { Poll::Ready(Some(Ok(chunk))) => { let _ = tx.start_send(Ok(chunk)); me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); - cx.waker().wake_by_ref(); // reschedule in order to continue reading - } + cx.waker().wake_by_ref(); // reschedule in order to continue reading + }, Poll::Ready(Some(Err(err))) => { let _ = tx.start_send(Err(err)); // don't insert the request back }, - Poll::Ready(None) => {} // EOF; don't insert the request back + Poll::Ready(None) => {}, // EOF; don't insert the request back Poll::Pending => { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); }, } - } + }, } } // Check for messages coming from the [`HttpApi`]. match Stream::poll_next(Pin::new(&mut me.from_api), cx) { Poll::Pending => {}, - Poll::Ready(None) => return Poll::Ready(()), // stops the worker + Poll::Ready(None) => return Poll::Ready(()), // stops the worker Poll::Ready(Some(ApiToWorker::Dispatch { id, request })) => { let future = me.http_client.request(request); debug_assert!(me.requests.iter().all(|(i, _)| *i != id)); me.requests.push((id, HttpWorkerRequest::Dispatched(future))); - cx.waker().wake_by_ref(); // reschedule the task to poll the request - } + cx.waker().wake_by_ref(); // reschedule the task to poll the request + }, } Poll::Pending @@ -677,9 +680,7 @@ impl Future for HttpWorker { impl fmt::Debug for HttpWorker { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_list() - .entries(self.requests.iter()) - .finish() + f.debug_list().entries(self.requests.iter()).finish() } } @@ -696,13 +697,13 @@ impl fmt::Debug for HttpWorkerRequest { #[cfg(test)] mod tests { - use core::convert::Infallible; - use crate::api::timestamp; use super::{http, SharedClient}; - use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Duration}; + use crate::api::timestamp; + use core::convert::Infallible; use futures::future; use lazy_static::lazy_static; - + use sp_core::offchain::{Duration, HttpError, HttpRequestId, HttpRequestStatus}; + // Using lazy_static to avoid spawning lots of different SharedClients, // as spawning a SharedClient is CPU-intensive and opens lots of fds. lazy_static! { @@ -718,17 +719,20 @@ mod tests { let (addr_tx, addr_rx) = std::sync::mpsc::channel(); std::thread::spawn(move || { - let mut rt = tokio::runtime::Runtime::new().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); let worker = rt.spawn(worker); let server = rt.spawn(async move { - let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()) - .serve(hyper::service::make_service_fn(|_| { async move { - Ok::<_, Infallible>(hyper::service::service_fn(move |_req| async move { - Ok::<_, Infallible>( - hyper::Response::new(hyper::Body::from("Hello World!")) - ) - })) - }})); + let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()).serve( + hyper::service::make_service_fn(|_| async move { + Ok::<_, Infallible>(hyper::service::service_fn( + move |_req| async move { + Ok::<_, Infallible>(hyper::Response::new(hyper::Body::from( + "Hello World!", + ))) + }, + )) + }), + ); let _ = addr_tx.send(server.local_addr()); server.await.map_err(drop) }); @@ -751,7 +755,7 @@ mod tests { match api.response_wait(&[id], Some(deadline))[0] { HttpRequestStatus::Finished(200) => {}, - v => panic!("Connecting to localhost failed: {:?}", v) + v => panic!("Connecting to localhost failed: {:?}", v), } let headers = api.response_headers(id); @@ -767,13 +771,13 @@ mod tests { let (mut api, addr) = build_api_server!(); match api.request_start("\0", &format!("http://{}", addr)) { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; match api.request_start("GET", "http://\0localhost") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; } @@ -782,42 +786,42 @@ mod tests { let (mut api, addr) = build_api_server!(); match api.request_add_header(HttpRequestId(0xdead), "Foo", "bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); match api.request_add_header(id, "\0", "bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); match api.request_add_header(id, "Foo", "\0") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.request_add_header(id, "Foo", "Bar").unwrap(); api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); match api.request_add_header(id, "Foo2", "Bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); api.response_headers(id); match api.request_add_header(id, "Foo2", "Bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); api.response_read_body(id, &mut [], None).unwrap(); match api.request_add_header(id, "Foo2", "Bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; } @@ -826,13 +830,13 @@ mod tests { let (mut api, addr) = build_api_server!(); match api.request_write_body(HttpRequestId(0xdead), &[1, 2, 3], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; match api.request_write_body(HttpRequestId(0xdead), &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); @@ -840,8 +844,8 @@ mod tests { api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); api.request_write_body(id, &[], None).unwrap(); match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); @@ -849,52 +853,52 @@ mod tests { api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); api.request_write_body(id, &[], None).unwrap(); match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); api.response_wait(&[id], None); match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); api.response_wait(&[id], None); match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.response_headers(id); match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); api.response_headers(id); match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.response_read_body(id, &mut [], None).unwrap(); match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.response_read_body(id, &mut [], None).unwrap(); match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; } @@ -949,15 +953,15 @@ mod tests { let mut buf = [0; 512]; match api.response_read_body(HttpRequestId(0xdead), &mut buf, None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), } let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); while api.response_read_body(id, &mut buf, None).unwrap() != 0 {} match api.response_read_body(id, &mut buf, None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), } } @@ -974,16 +978,26 @@ mod tests { for _ in 0..250 { match rand::random::() % 6 { - 0 => { let _ = api.request_add_header(id, "Foo", "Bar"); } - 1 => { let _ = api.request_write_body(id, &[1, 2, 3, 4], None); } - 2 => { let _ = api.request_write_body(id, &[], None); } - 3 => { let _ = api.response_wait(&[id], None); } - 4 => { let _ = api.response_headers(id); } + 0 => { + let _ = api.request_add_header(id, "Foo", "Bar"); + }, + 1 => { + let _ = api.request_write_body(id, &[1, 2, 3, 4], None); + }, + 2 => { + let _ = api.request_write_body(id, &[], None); + }, + 3 => { + let _ = api.response_wait(&[id], None); + }, + 4 => { + let _ = api.response_headers(id); + }, 5 => { let mut buf = [0; 512]; let _ = api.response_read_body(id, &mut buf, None); - } - 6 ..= 255 => unreachable!() + }, + 6..=255 => unreachable!(), } } } diff --git a/client/offchain/src/api/http_dummy.rs b/client/offchain/src/api/http_dummy.rs deleted file mode 100644 index 1c83325c93b20..0000000000000 --- a/client/offchain/src/api/http_dummy.rs +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Contains the same API as the `http` module, except that everything returns an error. - -use sp_core::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; -use std::{future::Future, pin::Pin, task::Context, task::Poll}; - -/// Wrapper struct (wrapping nothing in case of http_dummy) used for keeping the hyper_rustls client running. -#[derive(Clone)] -pub struct SharedClient; - -impl SharedClient { - pub fn new() -> Self { - Self - } -} - -/// Creates a pair of [`HttpApi`] and [`HttpWorker`]. -pub fn http(_: SharedClient) -> (HttpApi, HttpWorker) { - (HttpApi, HttpWorker) -} - -/// Dummy implementation of HTTP capabilities. -#[derive(Debug)] -pub struct HttpApi; - -/// Dummy implementation of HTTP capabilities. -#[derive(Debug)] -pub struct HttpWorker; - -impl HttpApi { - /// Mimics the corresponding method in the offchain API. - pub fn request_start( - &mut self, - _: &str, - _: &str - ) -> Result { - /// Because this always returns an error, none of the other methods should ever be called. - Err(()) - } - - /// Mimics the corresponding method in the offchain API. - pub fn request_add_header( - &mut self, - _: HttpRequestId, - _: &str, - _: &str - ) -> Result<(), ()> { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") - } - - /// Mimics the corresponding method in the offchain API. - pub fn request_write_body( - &mut self, - _: HttpRequestId, - _: &[u8], - _: Option - ) -> Result<(), HttpError> { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") - } - - /// Mimics the corresponding method in the offchain API. - pub fn response_wait( - &mut self, - requests: &[HttpRequestId], - _: Option - ) -> Vec { - if requests.is_empty() { - Vec::new() - } else { - unreachable!("Creating a request always fails, thus the list of requests should \ - always be empty; qed") - } - } - - /// Mimics the corresponding method in the offchain API. - pub fn response_headers( - &mut self, - _: HttpRequestId - ) -> Vec<(Vec, Vec)> { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") - } - - /// Mimics the corresponding method in the offchain API. - pub fn response_read_body( - &mut self, - _: HttpRequestId, - _: &mut [u8], - _: Option - ) -> Result { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") - } -} - -impl Future for HttpWorker { - type Output = (); - - fn poll(self: Pin<&mut Self>, _: &mut Context) -> Poll { - Poll::Ready(()) - } -} diff --git a/client/offchain/src/api/ipfs.rs b/client/offchain/src/api/ipfs.rs index b68cbc4dd611e..c8b4f8b8d1adb 100644 --- a/client/offchain/src/api/ipfs.rs +++ b/client/offchain/src/api/ipfs.rs @@ -19,10 +19,10 @@ use ipfs::{ use log::error; use sp_core::offchain::{IpfsRequest, IpfsRequestId, IpfsRequestStatus, IpfsResponse, OpaqueMultiaddr, Timestamp}; use std::{collections::BTreeMap, convert::TryInto, fmt, mem, pin::Pin, str, task::{Context, Poll}}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; // wasm-friendly implementations of Ipfs::{add, get} -async fn ipfs_add(ipfs: &Ipfs, data: Vec) -> Result { +async fn ipfs_add(ipfs: &Ipfs, data: Vec) -> Result { let dag = ipfs.dag(); let links: Vec = vec![]; @@ -297,7 +297,7 @@ impl From for IpfsResponse { let mut ret = Vec::with_capacity(resp.len()); for (peer_id, addrs) in resp { - let peer = peer_id.as_ref().to_vec(); + let peer = peer_id.to_bytes(); let mut converted_addrs = Vec::with_capacity(addrs.len()); for addr in addrs { @@ -329,7 +329,7 @@ impl From for IpfsResponse { data_received, dup_blks_received, dup_data_received, - peers: peers.into_iter().map(|peer_id| peer_id.as_ref().to_vec()).collect(), + peers: peers.into_iter().map(|peer_id| peer_id.to_bytes()).collect(), wantlist: wantlist.into_iter().map(|(cid, prio)| (cid.to_bytes(), prio)).collect(), } } @@ -355,7 +355,7 @@ impl From for IpfsResponse { IpfsResponse::FindPeer(addrs) }, IpfsNativeResponse::Identity(pk, addrs) => { - let pk = pk.into_peer_id().as_ref().to_vec(); + let pk = pk.into_peer_id().to_bytes(); let addrs = addrs.into_iter().map(|addr| OpaqueMultiaddr(addr.to_string().into_bytes()) ).collect(); @@ -550,7 +550,7 @@ mod tests { let mut rt = tokio::runtime::Runtime::new().unwrap(); let ipfs_node = rt.block_on(async move { let (ipfs, fut): (Ipfs, _) = - ipfs::UninitializedIpfs::new(options, None).await.start().await.unwrap(); + ipfs::UninitializedIpfs::new(options).await.start().await.unwrap(); tokio::task::spawn(fut); ipfs }); diff --git a/client/offchain/src/api/timestamp.rs b/client/offchain/src/api/timestamp.rs index 222d3273cb355..f1c8c004a0198 100644 --- a/client/offchain/src/api/timestamp.rs +++ b/client/offchain/src/api/timestamp.rs @@ -1,24 +1,28 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Helper methods dedicated to timestamps. use sp_core::offchain::Timestamp; -use std::convert::TryInto; -use std::time::{SystemTime, Duration}; +use std::{ + convert::TryInto, + time::{Duration, SystemTime}, +}; /// Returns the current time as a `Timestamp`. pub fn now() -> Timestamp { @@ -32,9 +36,12 @@ pub fn now() -> Timestamp { Ok(d) => { let duration = d.as_millis(); // Assuming overflow won't happen for a few hundred years. - Timestamp::from_unix_millis(duration.try_into() - .expect("epoch milliseconds won't overflow u64 for hundreds of years; qed")) - } + Timestamp::from_unix_millis( + duration + .try_into() + .expect("epoch milliseconds won't overflow u64 for hundreds of years; qed"), + ) + }, } } @@ -50,7 +57,7 @@ pub fn timestamp_from_now(timestamp: Timestamp) -> Duration { /// If `None`, returns a never-ending `Future`. pub fn deadline_to_future( deadline: Option, -) -> futures::future::MaybeDone { +) -> futures::future::MaybeDone> { use futures::future::{self, Either}; future::maybe_done(match deadline.map(timestamp_from_now) { @@ -58,7 +65,6 @@ pub fn deadline_to_future( // Only apply delay if we need to wait a non-zero duration Some(duration) if duration <= Duration::from_secs(0) => Either::Right(Either::Left(future::ready(()))), - Some(duration) => - Either::Right(Either::Right(futures_timer::Delay::new(duration))), + Some(duration) => Either::Right(Either::Right(futures_timer::Delay::new(duration))), }) } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 233a4aa6a6fc0..832a6795b244b 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate offchain workers. //! @@ -33,24 +35,26 @@ #![warn(missing_docs)] -use std::{ - fmt, marker::PhantomData, sync::Arc, - collections::HashSet, -}; +use std::{collections::HashSet, fmt, marker::PhantomData, sync::Arc}; -use parking_lot::Mutex; -use threadpool::ThreadPool; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use futures::future::Future; +use futures::{ + future::{ready, Future}, + prelude::*, +}; use log::{debug, warn}; +use parking_lot::Mutex; use sc_network::{ExHashT, NetworkService, NetworkStateInfo, PeerId}; -use sp_core::{offchain::{self, OffchainStorage}, ExecutionContext, traits::SpawnNamed}; -use sp_runtime::{generic::BlockId, traits::{self, Header}}; -use futures::{prelude::*, future::ready}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_core::{offchain, traits::SpawnNamed, ExecutionContext}; +use sp_runtime::{ + generic::BlockId, + traits::{self, Header}, +}; +use threadpool::ThreadPool; mod api; -use api::SharedClient; +pub use api::Db as OffchainDb; pub use sp_offchain::{OffchainWorkerApi, STORAGE_PREFIX}; /// NetworkProvider provides [`OffchainWorkers`] with all necessary hooks into the @@ -58,7 +62,7 @@ pub use sp_offchain::{OffchainWorkerApi, STORAGE_PREFIX}; pub trait NetworkProvider: NetworkStateInfo { /// Set the authorized peers. fn set_authorized_peers(&self, peers: HashSet); - + /// Set the authorized only flag. fn set_authorized_only(&self, reserved_only: bool); } @@ -78,67 +82,60 @@ where } /// An offchain workers manager. -pub struct OffchainWorkers { +pub struct OffchainWorkers { client: Arc, - db: Storage, ipfs_node: ipfs::Ipfs, _block: PhantomData, thread_pool: Mutex, - shared_client: SharedClient, + shared_client: api::SharedClient, } -impl OffchainWorkers { +impl OffchainWorkers { /// Creates new `OffchainWorkers`. - pub fn new(client: Arc, db: Storage, ipfs_rt: Arc>) -> Self { - let shared_client = SharedClient::new(); + pub fn new(client: Arc, ipfs_rt: Arc>) -> Self { + let shared_client = api::SharedClient::new(); let (ipfs_node, node_info) = std::thread::spawn(move || { - let mut ipfs_rt = ipfs_rt.lock(); - let options = ipfs::IpfsOptions::inmemory_with_generated_keys(); - ipfs_rt.block_on(async move { - // Start daemon and initialize repo - let (ipfs, fut) = ipfs::UninitializedIpfs::new(options).start().await.unwrap(); - tokio::task::spawn(fut); - let node_info = ipfs.identity().await.unwrap(); - (ipfs, node_info) - }) + let ipfs_rt = ipfs_rt.lock(); + let options = ipfs::IpfsOptions::inmemory_with_generated_keys(); + ipfs_rt.block_on(async move { + // Start daemon and initialize repo + let (ipfs, fut) = ipfs::UninitializedIpfs::new(options).start().await.unwrap(); + tokio::task::spawn(fut); + let node_info = ipfs.identity().await.unwrap(); + (ipfs, node_info) + }) }).join().expect("couldn't start the IPFS async runtime"); log::info!( - "IPFS: node started with PeerId {} and addresses {:?}", - node_info.0.into_peer_id(), node_info.1 + "IPFS: node started with PeerId {} and addresses {:?}", + node_info.0.into_peer_id(), node_info.1 ); Self { client, - db, ipfs_node, _block: PhantomData, - thread_pool: Mutex::new(ThreadPool::new(num_cpus::get())), + thread_pool: Mutex::new(ThreadPool::with_name( + "offchain-worker".into(), + num_cpus::get(), + )), shared_client, } } } -impl fmt::Debug for OffchainWorkers< - Client, - Storage, - Block, -> { +impl fmt::Debug for OffchainWorkers { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("OffchainWorkers").finish() } } -impl OffchainWorkers< - Client, - Storage, - Block, -> where +impl OffchainWorkers +where Block: traits::Block, Client: ProvideRuntimeApi + Send + Sync + 'static, Client::Api: OffchainWorkerApi, - Storage: OffchainStorage + 'static, { /// Start the offchain workers after given block. #[must_use] @@ -150,29 +147,25 @@ impl OffchainWorkers< ) -> impl Future { let runtime = self.client.runtime_api(); let at = BlockId::hash(header.hash()); - let has_api_v1 = runtime.has_api_with::, _>( - &at, |v| v == 1 - ); - let has_api_v2 = runtime.has_api_with::, _>( - &at, |v| v == 2 - ); + let has_api_v1 = runtime.has_api_with::, _>(&at, |v| v == 1); + let has_api_v2 = runtime.has_api_with::, _>(&at, |v| v == 2); let version = match (has_api_v1, has_api_v2) { (_, Ok(true)) => 2, (Ok(true), _) => 1, err => { - let help = "Consider turning off offchain workers if they are not part of your runtime."; + let help = + "Consider turning off offchain workers if they are not part of your runtime."; log::error!("Unsupported Offchain Worker API version: {:?}. {}.", err, help); 0 - } + }, }; debug!("Checking offchain workers at {:?}: version:{}", at, version); if version > 0 { let (api, runner) = api::AsyncApi::new( - self.db.clone(), network_provider, self.ipfs_node.clone(), is_validator, - self.shared_client.clone(), + self.shared_client.clone() ); debug!("Spawning offchain workers at {:?}", at); let header = header.clone(); @@ -181,18 +174,19 @@ impl OffchainWorkers< let runtime = client.runtime_api(); let api = Box::new(api); debug!("Running offchain workers at {:?}", at); - let context = ExecutionContext::OffchainCall(Some( - (api, offchain::Capabilities::all()) - )); + let context = + ExecutionContext::OffchainCall(Some((api, offchain::Capabilities::all()))); let run = if version == 2 { runtime.offchain_worker_with_context(&at, context, &header) } else { #[allow(deprecated)] runtime.offchain_worker_before_version_2_with_context( - &at, context, *header.number() + &at, + context, + *header.number(), ) }; - if let Err(e) = run { + if let Err(e) = run { log::error!("Error running offchain workers at {:?}: {:?}", at, e); } }); @@ -216,50 +210,57 @@ impl OffchainWorkers< } /// Inform the offchain worker about new imported blocks -pub async fn notification_future( +pub async fn notification_future( is_validator: bool, client: Arc, - offchain: Arc>, + offchain: Arc>, spawner: Spawner, network_provider: Arc, -) - where - Block: traits::Block, - Client: ProvideRuntimeApi + sc_client_api::BlockchainEvents + Send + Sync + 'static, - Client::Api: OffchainWorkerApi, - Storage: OffchainStorage + 'static, - Spawner: SpawnNamed, +) where + Block: traits::Block, + Client: + ProvideRuntimeApi + sc_client_api::BlockchainEvents + Send + Sync + 'static, + Client::Api: OffchainWorkerApi, + Spawner: SpawnNamed, { - client.import_notification_stream().for_each(move |n| { - if n.is_new_best { - spawner.spawn( - "offchain-on-block", - offchain.on_block_imported( - &n.header, - network_provider.clone(), - is_validator, - ).boxed(), - ); - } else { - log::debug!( - target: "sc_offchain", - "Skipping offchain workers for non-canon block: {:?}", - n.header, - ) - } + client + .import_notification_stream() + .for_each(move |n| { + if n.is_new_best { + spawner.spawn( + "offchain-on-block", + offchain + .on_block_imported(&n.header, network_provider.clone(), is_validator) + .boxed(), + ); + } else { + log::debug!( + target: "sc_offchain", + "Skipping offchain workers for non-canon block: {:?}", + n.header, + ) + } - ready(()) - }).await; + ready(()) + }) + .await; } #[cfg(test)] mod tests { use super::*; - use std::sync::Arc; + use futures::executor::block_on; + use sc_block_builder::BlockBuilderProvider as _; + use sc_client_api::Backend as _; use sc_network::{Multiaddr, PeerId}; - use substrate_test_runtime_client::{TestClient, runtime::Block}; use sc_transaction_pool::{BasicPool, FullChainApi}; - use sp_transaction_pool::{TransactionPool, InPoolTransaction}; + use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; + use sp_consensus::BlockOrigin; + use std::sync::Arc; + use substrate_test_runtime_client::{ + runtime::Block, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClient, + TestClientBuilderExt, + }; struct TestNetwork(); @@ -283,17 +284,15 @@ mod tests { } } - struct TestPool( - Arc, Block>> - ); + struct TestPool(Arc, Block>>); - impl sp_transaction_pool::OffchainSubmitTransaction for TestPool { + impl sc_transaction_pool_api::OffchainSubmitTransaction for TestPool { fn submit_at( &self, at: &BlockId, extrinsic: ::Extrinsic, ) -> Result<(), ()> { - let source = sp_transaction_pool::TransactionSource::Local; + let source = sc_transaction_pool_api::TransactionSource::Local; futures::executor::block_on(self.0.submit_one(&at, source, extrinsic)) .map(|_| ()) .map_err(|_| ()) @@ -308,23 +307,61 @@ mod tests { let spawner = sp_core::testing::TaskExecutor::new(); let pool = TestPool(BasicPool::new_full( Default::default(), + true.into(), None, spawner, client.clone(), )); - let db = sc_client_db::offchain::LocalStorage::new_test(); let network = Arc::new(TestNetwork()); let header = client.header(&BlockId::number(0)).unwrap().unwrap(); let mut ipfs_rt = tokio::runtime::Runtime::new().unwrap(); // when - let offchain: OffchainWorkers<_, _, _> = OffchainWorkers::new(client, db, &mut ipfs_rt); - futures::executor::block_on( - offchain.on_block_imported(&header, network, false) - ); + let offchain = OffchainWorkers::new(client, &mut ipfs_rt); + futures::executor::block_on(offchain.on_block_imported(&header, network, false)); // then assert_eq!(pool.0.status().ready, 1); assert_eq!(pool.0.ready().next().unwrap().is_propagable(), false); } + + #[test] + fn offchain_index_set_and_clear_works() { + use sp_core::offchain::OffchainStorage; + + sp_tracing::try_init_simple(); + + let (client, backend) = substrate_test_runtime_client::TestClientBuilder::new() + .enable_offchain_indexing_api() + .build_with_backend(); + let mut client = Arc::new(client); + let offchain_db = backend.offchain_storage().unwrap(); + + let key = &b"hello"[..]; + let value = &b"world"[..]; + let mut block_builder = client.new_block(Default::default()).unwrap(); + block_builder + .push(substrate_test_runtime_client::runtime::Extrinsic::OffchainIndexSet( + key.to_vec(), + value.to_vec(), + )) + .unwrap(); + + let block = block_builder.build().unwrap().block; + block_on(client.import(BlockOrigin::Own, block)).unwrap(); + + assert_eq!(value, &offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).unwrap()); + + let mut block_builder = client.new_block(Default::default()).unwrap(); + block_builder + .push(substrate_test_runtime_client::runtime::Extrinsic::OffchainIndexClear( + key.to_vec(), + )) + .unwrap(); + + let block = block_builder.build().unwrap().block; + block_on(client.import(BlockOrigin::Own, block)).unwrap(); + + assert!(offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).is_none()); + } } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 40062db8f9b91..5962620d6e06e 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -3,7 +3,7 @@ description = "Connectivity manager based on reputation" homepage = "http://parity.io" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" name = "sc-peerset" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" repository = "https://github.com/paritytech/substrate/" @@ -15,11 +15,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.3.4" -libp2p = { version = "0.28.1", default-features = false } -sp-utils = { version = "2.0.0", path = "../../primitives/utils"} +futures = "0.3.9" +libp2p = { version = "0.39.1", default-features = false } +sc-utils = { version = "4.0.0-dev", path = "../utils"} log = "0.4.8" -serde_json = "1.0.41" +serde_json = "1.0.68" wasm-timer = "0.2" [dev-dependencies] diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 575743afa079c..9c6c5617c34b1 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,39 +18,80 @@ //! Peer Set Manager (PSM). Contains the strategy for choosing which nodes the network should be //! connected to. +//! +//! The PSM handles *sets* of nodes. A set of nodes is defined as the nodes that are believed to +//! support a certain capability, such as handling blocks and transactions of a specific chain, +//! or collating a certain parachain. +//! +//! For each node in each set, the peerset holds a flag specifying whether the node is +//! connected to us or not. +//! +//! This connected/disconnected status is specific to the node and set combination, and it is for +//! example possible for a node to be connected through a specific set but not another. +//! +//! In addition, for each, set, the peerset also holds a list of reserved nodes towards which it +//! will at all time try to maintain a connection with. mod peersstate; -use std::{collections::{HashSet, HashMap}, collections::VecDeque}; use futures::prelude::*; use log::{debug, error, trace}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use serde_json::json; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; -use wasm_timer::Instant; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + pin::Pin, + task::{Context, Poll}, + time::{Duration, Instant}, +}; +use wasm_timer::Delay; pub use libp2p::PeerId; /// We don't accept nodes whose reputation is under this value. -const BANNED_THRESHOLD: i32 = 82 * (i32::min_value() / 100); +const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100); /// Reputation change for a node when we get disconnected from it. const DISCONNECT_REPUTATION_CHANGE: i32 = -256; -/// Reserved peers group ID -const RESERVED_NODES: &'static str = "reserved"; /// Amount of time between the moment we disconnect from a node and the moment we remove it from /// the list. const FORGET_AFTER: Duration = Duration::from_secs(3600); #[derive(Debug)] enum Action { - AddReservedPeer(PeerId), - RemoveReservedPeer(PeerId), - SetReservedPeers(HashSet), - SetReservedOnly(bool), + AddReservedPeer(SetId, PeerId), + RemoveReservedPeer(SetId, PeerId), + SetReservedPeers(SetId, HashSet), + SetReservedOnly(SetId, bool), ReportPeer(PeerId, ReputationChange), - SetPriorityGroup(String, HashSet), - AddToPriorityGroup(String, PeerId), - RemoveFromPriorityGroup(String, PeerId), + AddToPeersSet(SetId, PeerId), + RemoveFromPeersSet(SetId, PeerId), +} + +/// Identifier of a set in the peerset. +/// +/// Can be constructed using the `From` trait implementation based on the index of the set +/// within [`PeersetConfig::sets`]. For example, the first element of [`PeersetConfig::sets`] is +/// later referred to with `SetId::from(0)`. It is intended that the code responsible for building +/// the [`PeersetConfig`] is also responsible for constructing the [`SetId`]s. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct SetId(usize); + +impl SetId { + pub const fn from(id: usize) -> Self { + SetId(id) + } +} + +impl From for SetId { + fn from(id: usize) -> Self { + SetId(id) + } +} + +impl From for usize { + fn from(id: SetId) -> Self { + id.0 + } } /// Description of a reputation adjustment for a node. @@ -70,7 +111,7 @@ impl ReputationChange { /// New reputation change that forces minimum possible reputation. pub const fn new_fatal(reason: &'static str) -> ReputationChange { - ReputationChange { value: i32::min_value(), reason } + ReputationChange { value: i32::MIN, reason } } } @@ -87,26 +128,27 @@ impl PeersetHandle { /// Has no effect if the node was already a reserved peer. /// /// > **Note**: Keep in mind that the networking has to know an address for this node, - /// > otherwise it will not be able to connect to it. - pub fn add_reserved_peer(&self, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::AddReservedPeer(peer_id)); + /// > otherwise it will not be able to connect to it. + pub fn add_reserved_peer(&self, set_id: SetId, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::AddReservedPeer(set_id, peer_id)); } /// Remove a previously-added reserved peer. /// /// Has no effect if the node was not a reserved peer. - pub fn remove_reserved_peer(&self, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::RemoveReservedPeer(peer_id)); + pub fn remove_reserved_peer(&self, set_id: SetId, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::RemoveReservedPeer(set_id, peer_id)); } - /// Sets whether or not the peerset only has connections . - pub fn set_reserved_only(&self, reserved: bool) { - let _ = self.tx.unbounded_send(Action::SetReservedOnly(reserved)); + /// Sets whether or not the peerset only has connections with nodes marked as reserved for + /// the given set. + pub fn set_reserved_only(&self, set_id: SetId, reserved: bool) { + let _ = self.tx.unbounded_send(Action::SetReservedOnly(set_id, reserved)); } - + /// Set reserved peers to the new set. - pub fn set_reserved_peers(&self, peer_ids: HashSet) { - let _ = self.tx.unbounded_send(Action::SetReservedPeers(peer_ids)); + pub fn set_reserved_peers(&self, set_id: SetId, peer_ids: HashSet) { + let _ = self.tx.unbounded_send(Action::SetReservedPeers(set_id, peer_ids)); } /// Reports an adjustment to the reputation of the given peer. @@ -114,19 +156,14 @@ impl PeersetHandle { let _ = self.tx.unbounded_send(Action::ReportPeer(peer_id, score_diff)); } - /// Modify a priority group. - pub fn set_priority_group(&self, group_id: String, peers: HashSet) { - let _ = self.tx.unbounded_send(Action::SetPriorityGroup(group_id, peers)); - } - - /// Add a peer to a priority group. - pub fn add_to_priority_group(&self, group_id: String, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::AddToPriorityGroup(group_id, peer_id)); + /// Add a peer to a set. + pub fn add_to_peers_set(&self, set_id: SetId, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::AddToPeersSet(set_id, peer_id)); } - /// Remove a peer from a priority group. - pub fn remove_from_priority_group(&self, group_id: String, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::RemoveFromPriorityGroup(group_id, peer_id)); + /// Remove a peer from a set. + pub fn remove_from_peers_set(&self, set_id: SetId, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::RemoveFromPeersSet(set_id, peer_id)); } } @@ -135,10 +172,18 @@ impl PeersetHandle { pub enum Message { /// Request to open a connection to the given peer. From the point of view of the PSM, we are /// immediately connected. - Connect(PeerId), + Connect { + set_id: SetId, + /// Peer to connect to. + peer_id: PeerId, + }, /// Drop the connection to the given peer, or cancel the connection attempt after a `Connect`. - Drop(PeerId), + Drop { + set_id: SetId, + /// Peer to disconnect from. + peer_id: PeerId, + }, /// Equivalent to `Connect` for the peer corresponding to this incoming index. Accept(IncomingIndex), @@ -160,26 +205,33 @@ impl From for IncomingIndex { /// Configuration to pass when creating the peer set manager. #[derive(Debug)] pub struct PeersetConfig { + /// List of sets of nodes the peerset manages. + pub sets: Vec, +} + +/// Configuration for a single set of nodes. +#[derive(Debug)] +pub struct SetConfig { /// Maximum number of ingoing links to peers. pub in_peers: u32, /// Maximum number of outgoing links to peers. pub out_peers: u32, - /// List of bootstrap nodes to initialize the peer with. + /// List of bootstrap nodes to initialize the set with. /// /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. + /// > otherwise it will not be able to connect to them. pub bootnodes: Vec, - /// If true, we only accept nodes in [`PeersetConfig::priority_groups`]. - pub reserved_only: bool, - /// Lists of nodes we should always be connected to. /// /// > **Note**: Keep in mind that the networking has to know an address for these nodes, /// > otherwise it will not be able to connect to them. - pub priority_groups: Vec<(String, HashSet)>, + pub reserved_nodes: HashSet, + + /// If true, we only accept nodes in [`SetConfig::reserved_nodes`]. + pub reserved_only: bool, } /// Side of the peer set manager owned by the network. In other words, the "receiving" side. @@ -190,11 +242,10 @@ pub struct PeersetConfig { pub struct Peerset { /// Underlying data structure for the nodes's states. data: peersstate::PeersState, - /// If true, we only accept reserved nodes. - reserved_only: bool, - /// Lists of nodes that don't occupy slots and that we should try to always be connected to. - /// Is kept in sync with the list of reserved nodes in [`Peerset::data`]. - priority_groups: HashMap>, + /// For each set, lists of nodes that don't occupy slots and that we should try to always be + /// connected to, and whether only reserved nodes are accepted. Is kept in sync with the list + /// of non-slot-occupying nodes in [`Peerset::data`]. + reserved_nodes: Vec<(HashSet, bool)>, /// Receiver for messages from the `PeersetHandle` and from `tx`. rx: TracingUnboundedReceiver, /// Sending side of `rx`. @@ -205,6 +256,9 @@ pub struct Peerset { created: Instant, /// Last time when we updated the reputations of connected nodes. latest_time_update: Instant, + /// Next time to do a periodic call to `alloc_slots` with all sets. This is done once per + /// second, to match the period of the reputation updates. + next_periodic_alloc_slots: Delay, } impl Peerset { @@ -212,129 +266,159 @@ impl Peerset { pub fn from_config(config: PeersetConfig) -> (Peerset, PeersetHandle) { let (tx, rx) = tracing_unbounded("mpsc_peerset_messages"); - let handle = PeersetHandle { - tx: tx.clone(), + let handle = PeersetHandle { tx: tx.clone() }; + + let mut peerset = { + let now = Instant::now(); + + Peerset { + data: peersstate::PeersState::new(config.sets.iter().map(|set| { + peersstate::SetConfig { in_peers: set.in_peers, out_peers: set.out_peers } + })), + tx, + rx, + reserved_nodes: config + .sets + .iter() + .map(|set| (set.reserved_nodes.clone(), set.reserved_only)) + .collect(), + message_queue: VecDeque::new(), + created: now, + latest_time_update: now, + next_periodic_alloc_slots: Delay::new(Duration::new(0, 0)), + } }; - let now = Instant::now(); - - let mut peerset = Peerset { - data: peersstate::PeersState::new(config.in_peers, config.out_peers), - tx, - rx, - reserved_only: config.reserved_only, - priority_groups: config.priority_groups.clone().into_iter().collect(), - message_queue: VecDeque::new(), - created: now, - latest_time_update: now, - }; + for (set, set_config) in config.sets.into_iter().enumerate() { + for node in set_config.reserved_nodes { + peerset.data.add_no_slot_node(set, node); + } - for node in config.priority_groups.into_iter().flat_map(|(_, l)| l) { - peerset.data.add_no_slot_node(node); + for peer_id in set_config.bootnodes { + if let peersstate::Peer::Unknown(entry) = peerset.data.peer(set, &peer_id) { + entry.discover(); + } else { + debug!(target: "peerset", "Duplicate bootnode in config: {:?}", peer_id); + } + } } - for peer_id in config.bootnodes { - if let peersstate::Peer::Unknown(entry) = peerset.data.peer(&peer_id) { - entry.discover(); - } else { - debug!(target: "peerset", "Duplicate bootnode in config: {:?}", peer_id); - } + for set_index in 0..peerset.data.num_sets() { + peerset.alloc_slots(SetId(set_index)); } - peerset.alloc_slots(); (peerset, handle) } - fn on_add_reserved_peer(&mut self, peer_id: PeerId) { - self.on_add_to_priority_group(RESERVED_NODES, peer_id); - } + fn on_add_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { + let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id.clone()); + if !newly_inserted { + return + } - fn on_remove_reserved_peer(&mut self, peer_id: PeerId) { - self.on_remove_from_priority_group(RESERVED_NODES, peer_id); - } - - fn on_set_reserved_peers(&mut self, peer_ids: HashSet) { - self.on_set_priority_group(RESERVED_NODES, peer_ids); + self.data.add_no_slot_node(set_id.0, peer_id); + self.alloc_slots(set_id); } - fn on_set_reserved_only(&mut self, reserved_only: bool) { - self.reserved_only = reserved_only; + fn on_remove_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { + if !self.reserved_nodes[set_id.0].0.remove(&peer_id) { + return + } - if self.reserved_only { - // Disconnect all the nodes that aren't reserved. - for peer_id in self.data.connected_peers().cloned().collect::>().into_iter() { - if self.priority_groups.get(RESERVED_NODES).map_or(false, |g| g.contains(&peer_id)) { - continue; - } + self.data.remove_no_slot_node(set_id.0, &peer_id); - let peer = self.data.peer(&peer_id).into_connected() - .expect("We are enumerating connected peers, therefore the peer is connected; qed"); - peer.disconnect(); - self.message_queue.push_back(Message::Drop(peer_id)); - } + // Nothing more to do if not in reserved-only mode. + if !self.reserved_nodes[set_id.0].1 { + return + } - } else { - self.alloc_slots(); + // If, however, the peerset is in reserved-only mode, then the removed node needs to be + // disconnected. + if let peersstate::Peer::Connected(peer) = self.data.peer(set_id.0, &peer_id) { + peer.disconnect(); + self.message_queue.push_back(Message::Drop { set_id, peer_id }); } } - fn on_set_priority_group(&mut self, group_id: &str, peers: HashSet) { + fn on_set_reserved_peers(&mut self, set_id: SetId, peer_ids: HashSet) { // Determine the difference between the current group and the new list. let (to_insert, to_remove) = { - let current_group = self.priority_groups.entry(group_id.to_owned()).or_default(); - let to_insert = peers.difference(current_group) - .cloned().collect::>(); - let to_remove = current_group.difference(&peers) - .cloned().collect::>(); + let to_insert = peer_ids + .difference(&self.reserved_nodes[set_id.0].0) + .cloned() + .collect::>(); + let to_remove = self.reserved_nodes[set_id.0] + .0 + .difference(&peer_ids) + .cloned() + .collect::>(); (to_insert, to_remove) }; - // Enumerate elements in `peers` not in `current_group`. - for peer_id in &to_insert { - // We don't call `on_add_to_priority_group` here in order to avoid calling - // `alloc_slots` all the time. - self.priority_groups.entry(group_id.to_owned()).or_default().insert(peer_id.clone()); - self.data.add_no_slot_node(peer_id.clone()); + for node in to_insert { + self.on_add_reserved_peer(set_id, node); } - // Enumerate elements in `current_group` not in `peers`. - for peer in to_remove { - self.on_remove_from_priority_group(group_id, peer); + for node in to_remove { + self.on_remove_reserved_peer(set_id, node); } + } - if !to_insert.is_empty() { - self.alloc_slots(); + fn on_set_reserved_only(&mut self, set_id: SetId, reserved_only: bool) { + self.reserved_nodes[set_id.0].1 = reserved_only; + + if reserved_only { + // Disconnect all the nodes that aren't reserved. + for peer_id in + self.data.connected_peers(set_id.0).cloned().collect::>().into_iter() + { + if self.reserved_nodes[set_id.0].0.contains(&peer_id) { + continue + } + + let peer = self.data.peer(set_id.0, &peer_id).into_connected().expect( + "We are enumerating connected peers, therefore the peer is connected; qed", + ); + peer.disconnect(); + self.message_queue.push_back(Message::Drop { set_id, peer_id }); + } + } else { + self.alloc_slots(set_id); } } - fn on_add_to_priority_group(&mut self, group_id: &str, peer_id: PeerId) { - self.priority_groups.entry(group_id.to_owned()).or_default().insert(peer_id.clone()); - self.data.add_no_slot_node(peer_id); - self.alloc_slots(); + /// Returns the list of reserved peers. + pub fn reserved_peers(&self, set_id: SetId) -> impl Iterator { + self.reserved_nodes[set_id.0].0.iter() } - fn on_remove_from_priority_group(&mut self, group_id: &str, peer_id: PeerId) { - if let Some(priority_group) = self.priority_groups.get_mut(group_id) { - if !priority_group.remove(&peer_id) { - // `PeerId` wasn't in the group in the first place. - return; - } - } else { - // Group doesn't exist, so the `PeerId` can't be in it. - return; + /// Adds a node to the given set. The peerset will, if possible and not already the case, + /// try to connect to it. + /// + /// > **Note**: This has the same effect as [`PeersetHandle::add_to_peers_set`]. + pub fn add_to_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { + if let peersstate::Peer::Unknown(entry) = self.data.peer(set_id.0, &peer_id) { + entry.discover(); + self.alloc_slots(set_id); } + } - // If that `PeerId` isn't in any other group, then it is no longer no-slot-occupying. - if !self.priority_groups.values().any(|l| l.contains(&peer_id)) { - self.data.remove_no_slot_node(&peer_id); + fn on_remove_from_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { + // Don't do anything if node is reserved. + if self.reserved_nodes[set_id.0].0.contains(&peer_id) { + return } - // Disconnect the peer if necessary. - if group_id != RESERVED_NODES && self.reserved_only { - if let peersstate::Peer::Connected(peer) = self.data.peer(&peer_id) { - peer.disconnect(); - self.message_queue.push_back(Message::Drop(peer_id)); - } + match self.data.peer(set_id.0, &peer_id) { + peersstate::Peer::Connected(peer) => { + self.message_queue + .push_back(Message::Drop { set_id, peer_id: peer.peer_id().clone() }); + peer.disconnect().forget_peer(); + }, + peersstate::Peer::NotConnected(peer) => { + peer.forget_peer(); + }, + peersstate::Peer::Unknown(_) => {}, } } @@ -342,23 +426,31 @@ impl Peerset { // We want reputations to be up-to-date before adjusting them. self.update_time(); - match self.data.peer(&peer_id) { - peersstate::Peer::Connected(mut peer) => { - peer.add_reputation(change.value); - if peer.reputation() < BANNED_THRESHOLD { - debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", - peer_id, change.value, peer.reputation(), change.reason - ); - peer.disconnect(); - self.message_queue.push_back(Message::Drop(peer_id)); - } else { - trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", - peer_id, change.value, peer.reputation(), change.reason - ); - } - }, - peersstate::Peer::NotConnected(mut peer) => peer.add_reputation(change.value), - peersstate::Peer::Unknown(peer) => peer.discover().add_reputation(change.value), + let mut reputation = self.data.peer_reputation(peer_id); + reputation.add_reputation(change.value); + if reputation.reputation() >= BANNED_THRESHOLD { + trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", + peer_id, change.value, reputation.reputation(), change.reason + ); + return + } + + debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", + peer_id, change.value, reputation.reputation(), change.reason + ); + + drop(reputation); + + for set_index in 0..self.data.num_sets() { + if let peersstate::Peer::Connected(peer) = self.data.peer(set_index, &peer_id) { + let peer = peer.disconnect(); + self.message_queue.push_back(Message::Drop { + set_id: SetId(set_index), + peer_id: peer.into_peer_id(), + }); + + self.alloc_slots(SetId(set_index)); + } } } @@ -393,125 +485,107 @@ impl Peerset { } reput.saturating_sub(diff) } - match self.data.peer(&peer_id) { - peersstate::Peer::Connected(mut peer) => { - let before = peer.reputation(); - let after = reput_tick(before); - trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); - peer.set_reputation(after) - } - peersstate::Peer::NotConnected(mut peer) => { - if peer.reputation() == 0 && - peer.last_connected_or_discovered() + FORGET_AFTER < now - { - peer.forget_peer(); - } else { - let before = peer.reputation(); - let after = reput_tick(before); - trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); - peer.set_reputation(after) - } + + let mut peer_reputation = self.data.peer_reputation(peer_id); + + let before = peer_reputation.reputation(); + let after = reput_tick(before); + trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); + peer_reputation.set_reputation(after); + + if after != 0 { + continue + } + + drop(peer_reputation); + + // If the peer reaches a reputation of 0, and there is no connection to it, + // forget it. + for set_index in 0..self.data.num_sets() { + match self.data.peer(set_index, &peer_id) { + peersstate::Peer::Connected(_) => {}, + peersstate::Peer::NotConnected(peer) => { + if peer.last_connected_or_discovered() + FORGET_AFTER < now { + peer.forget_peer(); + } + }, + peersstate::Peer::Unknown(_) => { + // Happens if this peer does not belong to this set. + }, } - peersstate::Peer::Unknown(_) => unreachable!("We iterate over known peers; qed") - }; + } } } } - /// Try to fill available out slots with nodes. - fn alloc_slots(&mut self) { + /// Try to fill available out slots with nodes for the given set. + fn alloc_slots(&mut self, set_id: SetId) { self.update_time(); // Try to connect to all the reserved nodes that we are not connected to. - loop { - let next = { - let data = &mut self.data; - self.priority_groups - .get(RESERVED_NODES) - .into_iter() - .flatten() - .filter(move |n| { - data.peer(n).into_connected().is_none() - }) - .next() - .cloned() - }; - - let next = match next { - Some(n) => n, - None => break, - }; - - let next = match self.data.peer(&next) { + for reserved_node in &self.reserved_nodes[set_id.0].0 { + let entry = match self.data.peer(set_id.0, reserved_node) { peersstate::Peer::Unknown(n) => n.discover(), peersstate::Peer::NotConnected(n) => n, - peersstate::Peer::Connected(_) => { - debug_assert!(false, "State inconsistency: not connected state"); - break; - } + peersstate::Peer::Connected(_) => continue, }; - match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect(conn.into_peer_id())), - Err(_) => break, // No more slots available. + // Don't connect to nodes with an abysmal reputation, even if they're reserved. + // This is a rather opinionated behaviour, and it wouldn't be fundamentally wrong to + // remove that check. If necessary, the peerset should be refactored to give more + // control over what happens in that situation. + if entry.reputation() < BANNED_THRESHOLD { + break + } + + match entry.try_outgoing() { + Ok(conn) => self + .message_queue + .push_back(Message::Connect { set_id, peer_id: conn.into_peer_id() }), + Err(_) => { + // An error is returned only if no slot is available. Reserved nodes are + // marked in the state machine with a flag saying "doesn't occupy a slot", + // and as such this should never happen. + debug_assert!(false); + log::error!( + target: "peerset", + "Not enough slots to connect to reserved node" + ); + }, } } + // Now, we try to connect to other nodes. + // Nothing more to do if we're in reserved mode. - if self.reserved_only { - return; + if self.reserved_nodes[set_id.0].1 { + return } - // Try to connect to all the nodes in priority groups and that we are not connected to. - loop { - let next = { - let data = &mut self.data; - self.priority_groups - .values() - .flatten() - .filter(move |n| { - data.peer(n).into_connected().is_none() - }) - .next() - .cloned() - }; - - let next = match next { + // Try to grab the next node to attempt to connect to. + // Since `highest_not_connected_peer` is rather expensive to call, check beforehand + // whether we have an available slot. + while self.data.has_free_outgoing_slot(set_id.0) { + let next = match self.data.highest_not_connected_peer(set_id.0) { Some(n) => n, None => break, }; - let next = match self.data.peer(&next) { - peersstate::Peer::Unknown(n) => n.discover(), - peersstate::Peer::NotConnected(n) => n, - peersstate::Peer::Connected(_) => { - debug_assert!(false, "State inconsistency: not connected state"); - break; - } - }; - - match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect(conn.into_peer_id())), - Err(_) => break, // No more slots available. - } - } - - // Now, we try to connect to non-priority nodes. - loop { - // Try to grab the next node to attempt to connect to. - let next = match self.data.highest_not_connected_peer() { - Some(p) => p, - None => break, // No known node to add. - }; - // Don't connect to nodes with an abysmal reputation. if next.reputation() < BANNED_THRESHOLD { - break; + break } match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect(conn.into_peer_id())), - Err(_) => break, // No more slots available. + Ok(conn) => self + .message_queue + .push_back(Message::Connect { set_id, peer_id: conn.into_peer_id() }), + Err(_) => { + // This branch can only be entered if there is no free slot, which is + // checked above. + debug_assert!(false); + break + }, } } } @@ -522,22 +596,22 @@ impl Peerset { /// Note that this mechanism is orthogonal to `Connect`/`Drop`. Accepting an incoming /// connection implicitly means `Connect`, but incoming connections aren't cancelled by /// `dropped`. - /// // Implementation note: because of concurrency issues, it is possible that we push a `Connect` // message to the output channel with a `PeerId`, and that `incoming` gets called with the same // `PeerId` before that message has been read by the user. In this situation we must not answer. - pub fn incoming(&mut self, peer_id: PeerId, index: IncomingIndex) { + pub fn incoming(&mut self, set_id: SetId, peer_id: PeerId, index: IncomingIndex) { trace!(target: "peerset", "Incoming {:?}", peer_id); + self.update_time(); - if self.reserved_only { - if !self.priority_groups.get(RESERVED_NODES).map_or(false, |n| n.contains(&peer_id)) { + if self.reserved_nodes[set_id.0].1 { + if !self.reserved_nodes[set_id.0].0.contains(&peer_id) { self.message_queue.push_back(Message::Reject(index)); - return; + return } } - let not_connected = match self.data.peer(&peer_id) { + let not_connected = match self.data.peer(set_id.0, &peer_id) { // If we're already connected, don't answer, as the docs mention. peersstate::Peer::Connected(_) => return, peersstate::Peer::NotConnected(mut entry) => { @@ -562,42 +636,28 @@ impl Peerset { /// /// Must only be called after the PSM has either generated a `Connect` message with this /// `PeerId`, or accepted an incoming connection with this `PeerId`. - pub fn dropped(&mut self, peer_id: PeerId) { - trace!(target: "peerset", "Dropping {:?}", peer_id); - + pub fn dropped(&mut self, set_id: SetId, peer_id: PeerId, reason: DropReason) { // We want reputations to be up-to-date before adjusting them. self.update_time(); - match self.data.peer(&peer_id) { + match self.data.peer(set_id.0, &peer_id) { peersstate::Peer::Connected(mut entry) => { // Decrease the node's reputation so that we don't try it again and again and again. entry.add_reputation(DISCONNECT_REPUTATION_CHANGE); + trace!(target: "peerset", "Dropping {}: {:+} to {}", + peer_id, DISCONNECT_REPUTATION_CHANGE, entry.reputation()); entry.disconnect(); - } - peersstate::Peer::NotConnected(_) | peersstate::Peer::Unknown(_) => - error!(target: "peerset", "Received dropped() for non-connected node"), + }, + peersstate::Peer::NotConnected(_) | peersstate::Peer::Unknown(_) => { + error!(target: "peerset", "Received dropped() for non-connected node") + }, } - self.alloc_slots(); - } - - /// Adds discovered peer ids to the PSM. - /// - /// > **Note**: There is no equivalent "expired" message, meaning that it is the responsibility - /// > of the PSM to remove `PeerId`s that fail to dial too often. - pub fn discovered>(&mut self, peer_ids: I) { - let mut discovered_any = false; - - for peer_id in peer_ids { - if let peersstate::Peer::Unknown(entry) = self.data.peer(&peer_id) { - entry.discover(); - discovered_any = true; - } + if let DropReason::Refused = reason { + self.on_remove_from_peers_set(set_id, peer_id); } - if discovered_any { - self.alloc_slots(); - } + self.alloc_slots(set_id); } /// Reports an adjustment to the reputation of the given peer. @@ -613,23 +673,29 @@ impl Peerset { self.update_time(); json!({ - "nodes": self.data.peers().cloned().collect::>().into_iter().map(|peer_id| { - let state = match self.data.peer(&peer_id) { - peersstate::Peer::Connected(entry) => json!({ - "connected": true, - "reputation": entry.reputation() - }), - peersstate::Peer::NotConnected(entry) => json!({ - "connected": false, - "reputation": entry.reputation() - }), - peersstate::Peer::Unknown(_) => - unreachable!("We iterate over the known peers; QED") - }; - - (peer_id.to_base58(), state) - }).collect::>(), - "reserved_only": self.reserved_only, + "sets": (0..self.data.num_sets()).map(|set_index| { + json!({ + "nodes": self.data.peers().cloned().collect::>().into_iter().filter_map(|peer_id| { + let state = match self.data.peer(set_index, &peer_id) { + peersstate::Peer::Connected(entry) => json!({ + "connected": true, + "reputation": entry.reputation() + }), + peersstate::Peer::NotConnected(entry) => json!({ + "connected": false, + "reputation": entry.reputation() + }), + peersstate::Peer::Unknown(_) => return None, + }; + + Some((peer_id.to_base58(), state)) + }).collect::>(), + "reserved_nodes": self.reserved_nodes[set_index].0.iter().map(|peer_id| { + peer_id.to_base58() + }).collect::>(), + "reserved_only": self.reserved_nodes[set_index].1, + }) + }).collect::>(), "message_queue": self.message_queue.len(), }) } @@ -638,11 +704,6 @@ impl Peerset { pub fn num_discovered_peers(&self) -> usize { self.data.peers().len() } - - /// Returns the content of a priority group. - pub fn priority_group(&self, group_id: &str) -> Option> { - self.priority_groups.get(group_id).map(|l| l.iter()) - } } impl Stream for Peerset { @@ -651,7 +712,16 @@ impl Stream for Peerset { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { loop { if let Some(message) = self.message_queue.pop_front() { - return Poll::Ready(Some(message)); + return Poll::Ready(Some(message)) + } + + if let Poll::Ready(_) = Future::poll(Pin::new(&mut self.next_periodic_alloc_slots), cx) + { + self.next_periodic_alloc_slots = Delay::new(Duration::new(1, 0)); + + for set_index in 0..self.data.num_sets() { + self.alloc_slots(SetId(set_index)); + } } let action = match Stream::poll_next(Pin::new(&mut self.rx), cx) { @@ -661,32 +731,43 @@ impl Stream for Peerset { }; match action { - Action::AddReservedPeer(peer_id) => - self.on_add_reserved_peer(peer_id), - Action::RemoveReservedPeer(peer_id) => - self.on_remove_reserved_peer(peer_id), - Action::SetReservedPeers(peer_ids) => - self.on_set_reserved_peers(peer_ids), - Action::SetReservedOnly(reserved) => - self.on_set_reserved_only(reserved), - Action::ReportPeer(peer_id, score_diff) => - self.on_report_peer(peer_id, score_diff), - Action::SetPriorityGroup(group_id, peers) => - self.on_set_priority_group(&group_id, peers), - Action::AddToPriorityGroup(group_id, peer_id) => - self.on_add_to_priority_group(&group_id, peer_id), - Action::RemoveFromPriorityGroup(group_id, peer_id) => - self.on_remove_from_priority_group(&group_id, peer_id), + Action::AddReservedPeer(set_id, peer_id) => + self.on_add_reserved_peer(set_id, peer_id), + Action::RemoveReservedPeer(set_id, peer_id) => + self.on_remove_reserved_peer(set_id, peer_id), + Action::SetReservedPeers(set_id, peer_ids) => + self.on_set_reserved_peers(set_id, peer_ids), + Action::SetReservedOnly(set_id, reserved) => + self.on_set_reserved_only(set_id, reserved), + Action::ReportPeer(peer_id, score_diff) => self.on_report_peer(peer_id, score_diff), + Action::AddToPeersSet(sets_name, peer_id) => + self.add_to_peers_set(sets_name, peer_id), + Action::RemoveFromPeersSet(sets_name, peer_id) => + self.on_remove_from_peers_set(sets_name, peer_id), } } } } +/// Reason for calling [`Peerset::dropped`]. +pub enum DropReason { + /// Substream or connection has been closed for an unknown reason. + Unknown, + /// Substream or connection has been explicitly refused by the target. In other words, the + /// peer doesn't actually belong to this set. + /// + /// This has the side effect of calling [`PeersetHandle::remove_from_peers_set`]. + Refused, +} + #[cfg(test)] mod tests { - use libp2p::PeerId; + use super::{ + IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange, SetConfig, SetId, + BANNED_THRESHOLD, + }; use futures::prelude::*; - use super::{PeersetConfig, Peerset, Message, IncomingIndex, ReputationChange, BANNED_THRESHOLD}; + use libp2p::PeerId; use std::{pin::Pin, task::Poll, thread, time::Duration}; fn assert_messages(mut peerset: Peerset, messages: Vec) -> Peerset { @@ -710,21 +791,26 @@ mod tests { let reserved_peer = PeerId::random(); let reserved_peer2 = PeerId::random(); let config = PeersetConfig { - in_peers: 0, - out_peers: 2, - bootnodes: vec![bootnode], - reserved_only: true, - priority_groups: Vec::new(), + sets: vec![SetConfig { + in_peers: 0, + out_peers: 2, + bootnodes: vec![bootnode], + reserved_nodes: Default::default(), + reserved_only: true, + }], }; let (peerset, handle) = Peerset::from_config(config); - handle.add_reserved_peer(reserved_peer.clone()); - handle.add_reserved_peer(reserved_peer2.clone()); + handle.add_reserved_peer(SetId::from(0), reserved_peer.clone()); + handle.add_reserved_peer(SetId::from(0), reserved_peer2.clone()); - assert_messages(peerset, vec![ - Message::Connect(reserved_peer), - Message::Connect(reserved_peer2) - ]); + assert_messages( + peerset, + vec![ + Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer }, + Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer2 }, + ], + ); } #[test] @@ -738,25 +824,30 @@ mod tests { let ii3 = IncomingIndex(3); let ii4 = IncomingIndex(3); let config = PeersetConfig { - in_peers: 2, - out_peers: 1, - bootnodes: vec![bootnode.clone()], - reserved_only: false, - priority_groups: Vec::new(), + sets: vec![SetConfig { + in_peers: 2, + out_peers: 1, + bootnodes: vec![bootnode.clone()], + reserved_nodes: Default::default(), + reserved_only: false, + }], }; let (mut peerset, _handle) = Peerset::from_config(config); - peerset.incoming(incoming.clone(), ii); - peerset.incoming(incoming.clone(), ii4); - peerset.incoming(incoming2.clone(), ii2); - peerset.incoming(incoming3.clone(), ii3); - - assert_messages(peerset, vec![ - Message::Connect(bootnode.clone()), - Message::Accept(ii), - Message::Accept(ii2), - Message::Reject(ii3), - ]); + peerset.incoming(SetId::from(0), incoming.clone(), ii); + peerset.incoming(SetId::from(0), incoming.clone(), ii4); + peerset.incoming(SetId::from(0), incoming2.clone(), ii2); + peerset.incoming(SetId::from(0), incoming3.clone(), ii3); + + assert_messages( + peerset, + vec![ + Message::Connect { set_id: SetId::from(0), peer_id: bootnode.clone() }, + Message::Accept(ii), + Message::Accept(ii2), + Message::Reject(ii3), + ], + ); } #[test] @@ -764,19 +855,19 @@ mod tests { let incoming = PeerId::random(); let ii = IncomingIndex(1); let config = PeersetConfig { - in_peers: 50, - out_peers: 50, - bootnodes: vec![], - reserved_only: true, - priority_groups: vec![], + sets: vec![SetConfig { + in_peers: 50, + out_peers: 50, + bootnodes: vec![], + reserved_nodes: Default::default(), + reserved_only: true, + }], }; let (mut peerset, _) = Peerset::from_config(config); - peerset.incoming(incoming.clone(), ii); + peerset.incoming(SetId::from(0), incoming.clone(), ii); - assert_messages(peerset, vec![ - Message::Reject(ii), - ]); + assert_messages(peerset, vec![Message::Reject(ii)]); } #[test] @@ -785,32 +876,39 @@ mod tests { let discovered = PeerId::random(); let discovered2 = PeerId::random(); let config = PeersetConfig { - in_peers: 0, - out_peers: 2, - bootnodes: vec![bootnode.clone()], - reserved_only: false, - priority_groups: vec![], + sets: vec![SetConfig { + in_peers: 0, + out_peers: 2, + bootnodes: vec![bootnode.clone()], + reserved_nodes: Default::default(), + reserved_only: false, + }], }; let (mut peerset, _handle) = Peerset::from_config(config); - peerset.discovered(Some(discovered.clone())); - peerset.discovered(Some(discovered.clone())); - peerset.discovered(Some(discovered2)); + peerset.add_to_peers_set(SetId::from(0), discovered.clone()); + peerset.add_to_peers_set(SetId::from(0), discovered.clone()); + peerset.add_to_peers_set(SetId::from(0), discovered2); - assert_messages(peerset, vec![ - Message::Connect(bootnode), - Message::Connect(discovered), - ]); + assert_messages( + peerset, + vec![ + Message::Connect { set_id: SetId::from(0), peer_id: bootnode }, + Message::Connect { set_id: SetId::from(0), peer_id: discovered }, + ], + ); } #[test] fn test_peerset_banned() { let (mut peerset, handle) = Peerset::from_config(PeersetConfig { - in_peers: 25, - out_peers: 25, - bootnodes: vec![], - reserved_only: false, - priority_groups: vec![], + sets: vec![SetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: vec![], + reserved_nodes: Default::default(), + reserved_only: false, + }], }); // We ban a node by setting its reputation under the threshold. @@ -822,7 +920,7 @@ mod tests { assert_eq!(Stream::poll_next(Pin::new(&mut peerset), cx), Poll::Pending); // Check that an incoming connection from that node gets refused. - peerset.incoming(peer_id.clone(), IncomingIndex(1)); + peerset.incoming(SetId::from(0), peer_id, IncomingIndex(1)); if let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { assert_eq!(msg.unwrap(), Message::Reject(IncomingIndex(1))); } else { @@ -833,7 +931,7 @@ mod tests { thread::sleep(Duration::from_millis(1500)); // Try again. This time the node should be accepted. - peerset.incoming(peer_id.clone(), IncomingIndex(2)); + peerset.incoming(SetId::from(0), peer_id, IncomingIndex(2)); while let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { assert_eq!(msg.unwrap(), Message::Accept(IncomingIndex(2))); } @@ -843,4 +941,45 @@ mod tests { futures::executor::block_on(fut); } + + #[test] + fn test_relloc_after_banned() { + let (mut peerset, handle) = Peerset::from_config(PeersetConfig { + sets: vec![SetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: vec![], + reserved_nodes: Default::default(), + reserved_only: false, + }], + }); + + // We ban a node by setting its reputation under the threshold. + let peer_id = PeerId::random(); + handle.report_peer(peer_id.clone(), ReputationChange::new(BANNED_THRESHOLD - 1, "")); + + let fut = futures::future::poll_fn(move |cx| { + // We need one polling for the message to be processed. + assert_eq!(Stream::poll_next(Pin::new(&mut peerset), cx), Poll::Pending); + + // Check that an incoming connection from that node gets refused. + // This is already tested in other tests, but it is done again here because it doesn't + // hurt. + peerset.incoming(SetId::from(0), peer_id, IncomingIndex(1)); + if let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { + assert_eq!(msg.unwrap(), Message::Reject(IncomingIndex(1))); + } else { + panic!() + } + + // Wait for the peerset to change its mind and actually connect to it. + while let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { + assert_eq!(msg.unwrap(), Message::Connect { set_id: SetId::from(0), peer_id }); + } + + Poll::Ready(()) + }); + + futures::executor::block_on(fut); + } } diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index 59879f629e31e..7717620eae3a7 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -1,25 +1,28 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Reputation and slots allocation system behind the peerset. //! //! The [`PeersState`] state machine is responsible for managing the reputation and allocating -//! slots. It holds a list of nodes, each associated with a reputation value and whether we are -//! connected or not to this node. Thanks to this list, it knows how many slots are occupied. It -//! also holds a list of nodes which don't occupy slots. +//! slots. It holds a list of nodes, each associated with a reputation value, a list of sets the +//! node belongs to, and for each set whether we are connected or not to this node. Thanks to this +//! list, it knows how many slots are occupied. It also holds a list of nodes which don't occupy +//! slots. //! //! > Note: This module is purely dedicated to managing slots and reputations. Features such as //! > for example connecting to some nodes in priority should be added outside of this @@ -27,8 +30,14 @@ use libp2p::PeerId; use log::error; -use std::{borrow::Cow, collections::{HashSet, HashMap}}; -use wasm_timer::Instant; +use std::{ + borrow::Cow, + collections::{ + hash_map::{Entry, OccupiedEntry}, + HashMap, HashSet, + }, + time::Instant, +}; /// State storage behind the peerset. /// @@ -36,26 +45,42 @@ use wasm_timer::Instant; /// /// This struct is nothing more but a data structure containing a list of nodes, where each node /// has a reputation and is either connected to us or not. -/// #[derive(Debug, Clone)] pub struct PeersState { /// List of nodes that we know about. /// /// > **Note**: This list should really be ordered by decreasing reputation, so that we can - /// easily select the best node to connect to. As a first draft, however, we don't - /// sort, to make the logic easier. + /// easily select the best node to connect to. As a first draft, however, we don't + /// sort, to make the logic easier. nodes: HashMap, - /// Number of slot-occupying nodes for which the `ConnectionState` is `In`. + /// Configuration of each set. The size of this `Vec` is never modified. + sets: Vec, +} + +/// Configuration of a single set. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct SetConfig { + /// Maximum allowed number of slot-occupying nodes for ingoing connections. + pub in_peers: u32, + + /// Maximum allowed number of slot-occupying nodes for outgoing connections. + pub out_peers: u32, +} + +/// State of a single set. +#[derive(Debug, Clone, PartialEq, Eq)] +struct SetInfo { + /// Number of slot-occupying nodes for which the `MembershipState` is `In`. num_in: u32, - /// Number of slot-occupying nodes for which the `ConnectionState` is `In`. + /// Number of slot-occupying nodes for which the `MembershipState` is `In`. num_out: u32, - /// Maximum allowed number of slot-occupying nodes for which the `ConnectionState` is `In`. + /// Maximum allowed number of slot-occupying nodes for which the `MembershipState` is `In`. max_in: u32, - /// Maximum allowed number of slot-occupying nodes for which the `ConnectionState` is `Out`. + /// Maximum allowed number of slot-occupying nodes for which the `MembershipState` is `Out`. max_out: u32, /// List of node identities (discovered or not) that don't occupy slots. @@ -67,35 +92,34 @@ pub struct PeersState { } /// State of a single node that we know about. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] struct Node { - /// Whether we are connected to this node. - connection_state: ConnectionState, + /// List of sets the node belongs to. + /// Always has a fixed size equal to the one of [`PeersState::set`]. The various possible sets + /// are indices into this `Vec`. + sets: Vec, - /// Reputation value of the node, between `i32::min_value` (we hate that node) and - /// `i32::max_value` (we love that node). + /// Reputation value of the node, between `i32::MIN` (we hate that node) and + /// `i32::MAX` (we love that node). reputation: i32, } -impl Default for Node { - fn default() -> Node { - Node { - connection_state: ConnectionState::NotConnected { - last_connected: Instant::now(), - }, - reputation: 0, - } +impl Node { + fn new(num_sets: usize) -> Node { + Node { sets: (0..num_sets).map(|_| MembershipState::NotMember).collect(), reputation: 0 } } } -/// Whether we are connected to a node. +/// Whether we are connected to a node in the context of a specific set. #[derive(Debug, Copy, Clone, PartialEq, Eq)] -enum ConnectionState { +enum MembershipState { + /// Node isn't part of that set. + NotMember, /// We are connected through an ingoing connection. In, /// We are connected through an outgoing connection. Out, - /// We are not connected to this node. + /// Node is part of that set, but we are not connected to it. NotConnected { /// When we were last connected to the node, or if we were never connected when we /// discovered it. @@ -103,50 +127,78 @@ enum ConnectionState { }, } -impl ConnectionState { +impl MembershipState { /// Returns `true` for `In` and `Out`. fn is_connected(self) -> bool { match self { - ConnectionState::In => true, - ConnectionState::Out => true, - ConnectionState::NotConnected { .. } => false, + MembershipState::NotMember => false, + MembershipState::In => true, + MembershipState::Out => true, + MembershipState::NotConnected { .. } => false, } } } impl PeersState { /// Builds a new empty `PeersState`. - pub fn new(in_peers: u32, out_peers: u32) -> Self { + pub fn new(sets: impl IntoIterator) -> Self { PeersState { nodes: HashMap::new(), - num_in: 0, - num_out: 0, - max_in: in_peers, - max_out: out_peers, - no_slot_nodes: HashSet::new(), + sets: sets + .into_iter() + .map(|config| SetInfo { + num_in: 0, + num_out: 0, + max_in: config.in_peers, + max_out: config.out_peers, + no_slot_nodes: HashSet::new(), + }) + .collect(), } } - /// Returns an object that grants access to the state of a peer. - pub fn peer<'a>(&'a mut self, peer_id: &'a PeerId) -> Peer<'a> { - match self.nodes.get_mut(peer_id) { - None => return Peer::Unknown(UnknownPeer { - parent: self, + /// Returns the number of sets. + /// + /// Corresponds to the number of elements passed to [`PeersState::new`]. + pub fn num_sets(&self) -> usize { + self.sets.len() + } + + /// Returns an object that grants access to the reputation value of a peer. + pub fn peer_reputation(&mut self, peer_id: PeerId) -> Reputation { + if !self.nodes.contains_key(&peer_id) { + self.nodes.insert(peer_id, Node::new(self.sets.len())); + } + + let entry = match self.nodes.entry(peer_id) { + Entry::Vacant(_) => unreachable!("guaranteed to be inserted above; qed"), + Entry::Occupied(e) => e, + }; + + Reputation { node: Some(entry) } + } + + /// Returns an object that grants access to the state of a peer in the context of a specific + /// set. + /// + /// # Panic + /// + /// `set` must be within range of the sets passed to [`PeersState::new`]. + pub fn peer<'a>(&'a mut self, set: usize, peer_id: &'a PeerId) -> Peer<'a> { + // The code below will panic anyway if this happens to be false, but this earlier assert + // makes it explicit what is wrong. + assert!(set < self.sets.len()); + + match self.nodes.get_mut(peer_id).map(|p| &p.sets[set]) { + None | Some(MembershipState::NotMember) => + Peer::Unknown(UnknownPeer { parent: self, set, peer_id: Cow::Borrowed(peer_id) }), + Some(MembershipState::In) | Some(MembershipState::Out) => + Peer::Connected(ConnectedPeer { state: self, set, peer_id: Cow::Borrowed(peer_id) }), + Some(MembershipState::NotConnected { .. }) => Peer::NotConnected(NotConnectedPeer { + state: self, + set, peer_id: Cow::Borrowed(peer_id), }), - Some(peer) => { - if peer.connection_state.is_connected() { - Peer::Connected(ConnectedPeer { - state: self, - peer_id: Cow::Borrowed(peer_id), - }) - } else { - Peer::NotConnected(NotConnectedPeer { - state: self, - peer_id: Cow::Borrowed(peer_id), - }) - } - } } } @@ -157,86 +209,111 @@ impl PeersState { self.nodes.keys() } - /// Returns the list of peers we are connected to. + /// Returns the list of peers we are connected to in the context of a specific set. + /// + /// # Panic + /// + /// `set` must be within range of the sets passed to [`PeersState::new`]. // Note: this method could theoretically return a `ConnectedPeer`, but implementing that // isn't simple. - pub fn connected_peers(&self) -> impl Iterator { - self.nodes.iter() - .filter(|(_, p)| p.connection_state.is_connected()) + pub fn connected_peers(&self, set: usize) -> impl Iterator { + // The code below will panic anyway if this happens to be false, but this earlier assert + // makes it explicit what is wrong. + assert!(set < self.sets.len()); + + self.nodes + .iter() + .filter(move |(_, p)| p.sets[set].is_connected()) .map(|(p, _)| p) } /// Returns the peer with the highest reputation and that we are not connected to. /// /// If multiple nodes have the same reputation, which one is returned is unspecified. - pub fn highest_not_connected_peer(&mut self) -> Option { - let outcome = self.nodes + /// + /// # Panic + /// + /// `set` must be within range of the sets passed to [`PeersState::new`]. + pub fn highest_not_connected_peer(&mut self, set: usize) -> Option { + // The code below will panic anyway if this happens to be false, but this earlier assert + // makes it explicit what is wrong. + assert!(set < self.sets.len()); + + let outcome = self + .nodes .iter_mut() - .filter(|(_, Node { connection_state, .. })| !connection_state.is_connected()) + .filter(|(_, Node { sets, .. })| match sets[set] { + MembershipState::NotMember => false, + MembershipState::In => false, + MembershipState::Out => false, + MembershipState::NotConnected { .. } => true, + }) .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { if let Some(cur_node) = cur_node.take() { if cur_node.1.reputation >= to_try.1.reputation { - return Some(cur_node); + return Some(cur_node) } } Some(to_try) }) - .map(|(peer_id, _)| peer_id.clone()); + .map(|(peer_id, _)| *peer_id); - if let Some(peer_id) = outcome { - Some(NotConnectedPeer { - state: self, - peer_id: Cow::Owned(peer_id), - }) - } else { - None - } + outcome.map(move |peer_id| NotConnectedPeer { + state: self, + set, + peer_id: Cow::Owned(peer_id), + }) + } + + /// Returns `true` if there is a free outgoing slot available related to this set. + pub fn has_free_outgoing_slot(&self, set: usize) -> bool { + self.sets[set].num_out < self.sets[set].max_out } /// Add a node to the list of nodes that don't occupy slots. /// - /// Has no effect if the peer was already in the group. - pub fn add_no_slot_node(&mut self, peer_id: PeerId) { + /// Has no effect if the node was already in the group. + pub fn add_no_slot_node(&mut self, set: usize, peer_id: PeerId) { // Reminder: `HashSet::insert` returns false if the node was already in the set - if !self.no_slot_nodes.insert(peer_id.clone()) { - return; + if !self.sets[set].no_slot_nodes.insert(peer_id) { + return } if let Some(peer) = self.nodes.get_mut(&peer_id) { - match peer.connection_state { - ConnectionState::In => self.num_in -= 1, - ConnectionState::Out => self.num_out -= 1, - ConnectionState::NotConnected { .. } => {}, + match peer.sets[set] { + MembershipState::In => self.sets[set].num_in -= 1, + MembershipState::Out => self.sets[set].num_out -= 1, + MembershipState::NotConnected { .. } | MembershipState::NotMember => {}, } } } /// Removes a node from the list of nodes that don't occupy slots. /// - /// Has no effect if the peer was not in the group. - pub fn remove_no_slot_node(&mut self, peer_id: &PeerId) { + /// Has no effect if the node was not in the group. + pub fn remove_no_slot_node(&mut self, set: usize, peer_id: &PeerId) { // Reminder: `HashSet::remove` returns false if the node was already not in the set - if !self.no_slot_nodes.remove(peer_id) { - return; + if !self.sets[set].no_slot_nodes.remove(peer_id) { + return } if let Some(peer) = self.nodes.get_mut(peer_id) { - match peer.connection_state { - ConnectionState::In => self.num_in += 1, - ConnectionState::Out => self.num_out += 1, - ConnectionState::NotConnected { .. } => {}, + match peer.sets[set] { + MembershipState::In => self.sets[set].num_in += 1, + MembershipState::Out => self.sets[set].num_out += 1, + MembershipState::NotConnected { .. } | MembershipState::NotMember => {}, } } } } -/// Grants access to the state of a peer in the `PeersState`. +/// Grants access to the state of a peer in the [`PeersState`] in the context of a specific set. pub enum Peer<'a> { /// We are connected to this node. Connected(ConnectedPeer<'a>), /// We are not connected to this node. NotConnected(NotConnectedPeer<'a>), - /// We have never heard of this node. + /// We have never heard of this node, or it is not part of the set. Unknown(UnknownPeer<'a>), } @@ -253,7 +330,7 @@ impl<'a> Peer<'a> { /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` /// otherwise. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests + #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_not_connected(self) -> Option> { match self { Peer::Connected(_) => None, @@ -264,7 +341,7 @@ impl<'a> Peer<'a> { /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` /// otherwise. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests + #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_unknown(self) -> Option> { match self { Peer::Connected(_) => None, @@ -277,10 +354,16 @@ impl<'a> Peer<'a> { /// A peer that is connected to us. pub struct ConnectedPeer<'a> { state: &'a mut PeersState, + set: usize, peer_id: Cow<'a, PeerId>, } impl<'a> ConnectedPeer<'a> { + /// Get the `PeerId` associated to this `ConnectedPeer`. + pub fn peer_id(&self) -> &PeerId { + &self.peer_id + } + /// Destroys this `ConnectedPeer` and returns the `PeerId` inside of it. pub fn into_peer_id(self) -> PeerId { self.peer_id.into_owned() @@ -288,46 +371,33 @@ impl<'a> ConnectedPeer<'a> { /// Switches the peer to "not connected". pub fn disconnect(self) -> NotConnectedPeer<'a> { - let is_no_slot_occupy = self.state.no_slot_nodes.contains(&*self.peer_id); - if let Some(mut node) = self.state.nodes.get_mut(&*self.peer_id) { + let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); + if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { if !is_no_slot_occupy { - match node.connection_state { - ConnectionState::In => self.state.num_in -= 1, - ConnectionState::Out => self.state.num_out -= 1, - ConnectionState::NotConnected { .. } => - debug_assert!(false, "State inconsistency: disconnecting a disconnected node") + match node.sets[self.set] { + MembershipState::In => self.state.sets[self.set].num_in -= 1, + MembershipState::Out => self.state.sets[self.set].num_out -= 1, + MembershipState::NotMember | MembershipState::NotConnected { .. } => { + debug_assert!( + false, + "State inconsistency: disconnecting a disconnected node" + ) + }, } } - node.connection_state = ConnectionState::NotConnected { - last_connected: Instant::now(), - }; + node.sets[self.set] = MembershipState::NotConnected { last_connected: Instant::now() }; } else { debug_assert!(false, "State inconsistency: disconnecting a disconnected node"); } - NotConnectedPeer { - state: self.state, - peer_id: self.peer_id, - } - } - - /// Returns the reputation value of the node. - pub fn reputation(&self) -> i32 { - self.state.nodes.get(&*self.peer_id).map_or(0, |p| p.reputation) - } - - /// Sets the reputation of the peer. - pub fn set_reputation(&mut self, value: i32) { - if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { - node.reputation = value; - } else { - debug_assert!(false, "State inconsistency: set_reputation on an unknown node"); - } + NotConnectedPeer { state: self.state, set: self.set, peer_id: self.peer_id } } /// Performs an arithmetic addition on the reputation score of that peer. /// /// In case of overflow, the value will be capped. + /// + /// > **Note**: Reputation values aren't specific to a set but are global per peer. pub fn add_reputation(&mut self, modifier: i32) { if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { node.reputation = node.reputation.saturating_add(modifier); @@ -335,18 +405,25 @@ impl<'a> ConnectedPeer<'a> { debug_assert!(false, "State inconsistency: add_reputation on an unknown node"); } } + + /// Returns the reputation value of the node. + /// + /// > **Note**: Reputation values aren't specific to a set but are global per peer. + pub fn reputation(&self) -> i32 { + self.state.nodes.get(&*self.peer_id).map_or(0, |p| p.reputation) + } } /// A peer that is not connected to us. #[derive(Debug)] pub struct NotConnectedPeer<'a> { state: &'a mut PeersState, + set: usize, peer_id: Cow<'a, PeerId>, } impl<'a> NotConnectedPeer<'a> { /// Destroys this `NotConnectedPeer` and returns the `PeerId` inside of it. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_peer_id(self) -> PeerId { self.peer_id.into_owned() } @@ -359,7 +436,7 @@ impl<'a> NotConnectedPeer<'a> { None => return, }; - if let ConnectionState::NotConnected { last_connected } = &mut state.connection_state { + if let MembershipState::NotConnected { last_connected } = &mut state.sets[self.set] { *last_connected = Instant::now(); } } @@ -377,16 +454,16 @@ impl<'a> NotConnectedPeer<'a> { "State inconsistency with {}; not connected after borrow", self.peer_id ); - return Instant::now(); - } + return Instant::now() + }, }; - match state.connection_state { - ConnectionState::NotConnected { last_connected } => last_connected, + match state.sets[self.set] { + MembershipState::NotConnected { last_connected } => last_connected, _ => { error!(target: "peerset", "State inconsistency with {}", self.peer_id); Instant::now() - } + }, } } @@ -397,27 +474,24 @@ impl<'a> NotConnectedPeer<'a> { /// /// Non-slot-occupying nodes don't count towards the number of slots. pub fn try_outgoing(self) -> Result, NotConnectedPeer<'a>> { - let is_no_slot_occupy = self.state.no_slot_nodes.contains(&*self.peer_id); + let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); // Note that it is possible for num_out to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. - if self.state.num_out >= self.state.max_out && !is_no_slot_occupy { - return Err(self); + if !self.state.has_free_outgoing_slot(self.set) && !is_no_slot_occupy { + return Err(self) } - if let Some(mut peer) = self.state.nodes.get_mut(&*self.peer_id) { - peer.connection_state = ConnectionState::Out; + if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { + peer.sets[self.set] = MembershipState::Out; if !is_no_slot_occupy { - self.state.num_out += 1; + self.state.sets[self.set].num_out += 1; } } else { debug_assert!(false, "State inconsistency: try_outgoing on an unknown node"); } - Ok(ConnectedPeer { - state: self.state, - peer_id: self.peer_id, - }) + Ok(ConnectedPeer { state: self.state, set: self.set, peer_id: self.peer_id }) } /// Tries to accept the peer as an incoming connection. @@ -427,35 +501,39 @@ impl<'a> NotConnectedPeer<'a> { /// /// Non-slot-occupying nodes don't count towards the number of slots. pub fn try_accept_incoming(self) -> Result, NotConnectedPeer<'a>> { - let is_no_slot_occupy = self.state.no_slot_nodes.contains(&*self.peer_id); + let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); // Note that it is possible for num_in to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. - if self.state.num_in >= self.state.max_in && !is_no_slot_occupy { - return Err(self); + if self.state.sets[self.set].num_in >= self.state.sets[self.set].max_in && + !is_no_slot_occupy + { + return Err(self) } - if let Some(mut peer) = self.state.nodes.get_mut(&*self.peer_id) { - peer.connection_state = ConnectionState::In; + if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { + peer.sets[self.set] = MembershipState::In; if !is_no_slot_occupy { - self.state.num_in += 1; + self.state.sets[self.set].num_in += 1; } } else { debug_assert!(false, "State inconsistency: try_accept_incoming on an unknown node"); } - Ok(ConnectedPeer { - state: self.state, - peer_id: self.peer_id, - }) + Ok(ConnectedPeer { state: self.state, set: self.set, peer_id: self.peer_id }) } /// Returns the reputation value of the node. + /// + /// > **Note**: Reputation values aren't specific to a set but are global per peer. pub fn reputation(&self) -> i32 { self.state.nodes.get(&*self.peer_id).map_or(0, |p| p.reputation) } /// Sets the reputation of the peer. + /// + /// > **Note**: Reputation values aren't specific to a set but are global per peer. + #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn set_reputation(&mut self, value: i32) { if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { node.reputation = value; @@ -464,37 +542,35 @@ impl<'a> NotConnectedPeer<'a> { } } - /// Performs an arithmetic addition on the reputation score of that peer. - /// - /// In case of overflow, the value will be capped. - pub fn add_reputation(&mut self, modifier: i32) { - if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { - node.reputation = node.reputation.saturating_add(modifier); - } else { - debug_assert!(false, "State inconsistency: add_reputation on an unknown node"); - } - } - - /// Un-discovers the peer. Removes it from the list. + /// Removes the peer from the list of members of the set. pub fn forget_peer(self) -> UnknownPeer<'a> { - if self.state.nodes.remove(&*self.peer_id).is_none() { + if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { + debug_assert!(!matches!(peer.sets[self.set], MembershipState::NotMember)); + peer.sets[self.set] = MembershipState::NotMember; + + // Remove the peer from `self.state.nodes` entirely if it isn't a member of any set. + if peer.reputation == 0 && + peer.sets.iter().all(|set| matches!(set, MembershipState::NotMember)) + { + self.state.nodes.remove(&*self.peer_id); + } + } else { + debug_assert!(false, "State inconsistency: forget_peer on an unknown node"); error!( target: "peerset", "State inconsistency with {} when forgetting peer", self.peer_id ); - } + }; - UnknownPeer { - parent: self.state, - peer_id: self.peer_id, - } + UnknownPeer { parent: self.state, set: self.set, peer_id: self.peer_id } } } -/// A peer that we have never heard of. +/// A peer that we have never heard of or that isn't part of the set. pub struct UnknownPeer<'a> { parent: &'a mut PeersState, + set: usize, peer_id: Cow<'a, PeerId>, } @@ -504,96 +580,180 @@ impl<'a> UnknownPeer<'a> { /// The node starts with a reputation of 0. You can adjust these default /// values using the `NotConnectedPeer` that this method returns. pub fn discover(self) -> NotConnectedPeer<'a> { - self.parent.nodes.insert(self.peer_id.clone().into_owned(), Node { - connection_state: ConnectionState::NotConnected { - last_connected: Instant::now(), - }, - reputation: 0, - }); + let num_sets = self.parent.sets.len(); + + self.parent + .nodes + .entry(self.peer_id.clone().into_owned()) + .or_insert_with(|| Node::new(num_sets)) + .sets[self.set] = MembershipState::NotConnected { last_connected: Instant::now() }; + + NotConnectedPeer { state: self.parent, set: self.set, peer_id: self.peer_id } + } +} + +/// Access to the reputation of a peer. +pub struct Reputation<'a> { + /// Node entry in [`PeersState::nodes`]. Always `Some` except right before dropping. + node: Option>, +} + +impl<'a> Reputation<'a> { + /// Returns the reputation value of the node. + pub fn reputation(&self) -> i32 { + self.node.as_ref().unwrap().get().reputation + } + + /// Sets the reputation of the peer. + pub fn set_reputation(&mut self, value: i32) { + self.node.as_mut().unwrap().get_mut().reputation = value; + } + + /// Performs an arithmetic addition on the reputation score of that peer. + /// + /// In case of overflow, the value will be capped. + pub fn add_reputation(&mut self, modifier: i32) { + let reputation = &mut self.node.as_mut().unwrap().get_mut().reputation; + *reputation = reputation.saturating_add(modifier); + } +} - let state = self.parent; - NotConnectedPeer { - state, - peer_id: self.peer_id, +impl<'a> Drop for Reputation<'a> { + fn drop(&mut self) { + if let Some(node) = self.node.take() { + if node.get().reputation == 0 && + node.get().sets.iter().all(|set| matches!(set, MembershipState::NotMember)) + { + node.remove(); + } } } } #[cfg(test)] mod tests { - use super::{PeersState, Peer}; + use super::{Peer, PeersState, SetConfig}; use libp2p::PeerId; + use std::iter; #[test] fn full_slots_in() { - let mut peers_state = PeersState::new(1, 1); + let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); let id1 = PeerId::random(); let id2 = PeerId::random(); - if let Peer::Unknown(e) = peers_state.peer(&id1) { + if let Peer::Unknown(e) = peers_state.peer(0, &id1) { assert!(e.discover().try_accept_incoming().is_ok()); } - if let Peer::Unknown(e) = peers_state.peer(&id2) { + if let Peer::Unknown(e) = peers_state.peer(0, &id2) { assert!(e.discover().try_accept_incoming().is_err()); } } #[test] fn no_slot_node_doesnt_use_slot() { - let mut peers_state = PeersState::new(1, 1); + let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); let id1 = PeerId::random(); let id2 = PeerId::random(); - peers_state.add_no_slot_node(id1.clone()); - if let Peer::Unknown(p) = peers_state.peer(&id1) { + peers_state.add_no_slot_node(0, id1.clone()); + if let Peer::Unknown(p) = peers_state.peer(0, &id1) { assert!(p.discover().try_accept_incoming().is_ok()); - } else { panic!() } + } else { + panic!() + } - if let Peer::Unknown(e) = peers_state.peer(&id2) { + if let Peer::Unknown(e) = peers_state.peer(0, &id2) { assert!(e.discover().try_accept_incoming().is_ok()); - } else { panic!() } + } else { + panic!() + } } #[test] fn disconnecting_frees_slot() { - let mut peers_state = PeersState::new(1, 1); + let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); let id1 = PeerId::random(); let id2 = PeerId::random(); - assert!(peers_state.peer(&id1).into_unknown().unwrap().discover().try_accept_incoming().is_ok()); - assert!(peers_state.peer(&id2).into_unknown().unwrap().discover().try_accept_incoming().is_err()); - peers_state.peer(&id1).into_connected().unwrap().disconnect(); - assert!(peers_state.peer(&id2).into_not_connected().unwrap().try_accept_incoming().is_ok()); + assert!(peers_state + .peer(0, &id1) + .into_unknown() + .unwrap() + .discover() + .try_accept_incoming() + .is_ok()); + assert!(peers_state + .peer(0, &id2) + .into_unknown() + .unwrap() + .discover() + .try_accept_incoming() + .is_err()); + peers_state.peer(0, &id1).into_connected().unwrap().disconnect(); + assert!(peers_state + .peer(0, &id2) + .into_not_connected() + .unwrap() + .try_accept_incoming() + .is_ok()); } #[test] fn highest_not_connected_peer() { - let mut peers_state = PeersState::new(25, 25); + let mut peers_state = + PeersState::new(iter::once(SetConfig { in_peers: 25, out_peers: 25 })); let id1 = PeerId::random(); let id2 = PeerId::random(); - assert!(peers_state.highest_not_connected_peer().is_none()); - peers_state.peer(&id1).into_unknown().unwrap().discover().set_reputation(50); - peers_state.peer(&id2).into_unknown().unwrap().discover().set_reputation(25); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); - peers_state.peer(&id2).into_not_connected().unwrap().set_reputation(75); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id2.clone())); - peers_state.peer(&id2).into_not_connected().unwrap().try_accept_incoming().unwrap(); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); - peers_state.peer(&id1).into_not_connected().unwrap().set_reputation(100); - peers_state.peer(&id2).into_connected().unwrap().disconnect(); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); - peers_state.peer(&id1).into_not_connected().unwrap().set_reputation(-100); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id2.clone())); + assert!(peers_state.highest_not_connected_peer(0).is_none()); + peers_state.peer(0, &id1).into_unknown().unwrap().discover().set_reputation(50); + peers_state.peer(0, &id2).into_unknown().unwrap().discover().set_reputation(25); + assert_eq!( + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), + Some(id1.clone()) + ); + peers_state.peer(0, &id2).into_not_connected().unwrap().set_reputation(75); + assert_eq!( + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), + Some(id2.clone()) + ); + peers_state + .peer(0, &id2) + .into_not_connected() + .unwrap() + .try_accept_incoming() + .unwrap(); + assert_eq!( + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), + Some(id1.clone()) + ); + peers_state.peer(0, &id1).into_not_connected().unwrap().set_reputation(100); + peers_state.peer(0, &id2).into_connected().unwrap().disconnect(); + assert_eq!( + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), + Some(id1.clone()) + ); + peers_state.peer(0, &id1).into_not_connected().unwrap().set_reputation(-100); + assert_eq!( + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), + Some(id2.clone()) + ); } #[test] fn disconnect_no_slot_doesnt_panic() { - let mut peers_state = PeersState::new(1, 1); + let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); let id = PeerId::random(); - peers_state.add_no_slot_node(id.clone()); - let peer = peers_state.peer(&id).into_unknown().unwrap().discover().try_outgoing().unwrap(); + peers_state.add_no_slot_node(0, id.clone()); + let peer = peers_state + .peer(0, &id) + .into_unknown() + .unwrap() + .discover() + .try_outgoing() + .unwrap(); peer.disconnect(); } } diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs index 6fa29e3d834cf..3a9ba686ee95c 100644 --- a/client/peerset/tests/fuzz.rs +++ b/client/peerset/tests/fuzz.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,10 +18,18 @@ use futures::prelude::*; use libp2p::PeerId; -use rand::distributions::{Distribution, Uniform, WeightedIndex}; -use rand::seq::IteratorRandom; -use std::{collections::HashMap, collections::HashSet, iter, pin::Pin, task::Poll}; -use sc_peerset::{IncomingIndex, Message, PeersetConfig, Peerset, ReputationChange}; +use rand::{ + distributions::{Distribution, Uniform, WeightedIndex}, + seq::IteratorRandom, +}; +use sc_peerset::{ + DropReason, IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange, SetConfig, SetId, +}; +use std::{ + collections::{HashMap, HashSet}, + pin::Pin, + task::Poll, +}; #[test] fn run() { @@ -40,23 +48,28 @@ fn test_once() { let mut reserved_nodes = HashSet::::new(); let (mut peerset, peerset_handle) = Peerset::from_config(PeersetConfig { - bootnodes: (0 .. Uniform::new_inclusive(0, 4).sample(&mut rng)).map(|_| { - let id = PeerId::random(); - known_nodes.insert(id.clone()); - id - }).collect(), - priority_groups: { - let nodes = (0 .. Uniform::new_inclusive(0, 2).sample(&mut rng)).map(|_| { - let id = PeerId::random(); - known_nodes.insert(id.clone()); - reserved_nodes.insert(id.clone()); - id - }).collect(); - vec![("foo".to_string(), nodes)] - }, - reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, - in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), - out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + sets: vec![SetConfig { + bootnodes: (0..Uniform::new_inclusive(0, 4).sample(&mut rng)) + .map(|_| { + let id = PeerId::random(); + known_nodes.insert(id.clone()); + id + }) + .collect(), + reserved_nodes: { + (0..Uniform::new_inclusive(0, 2).sample(&mut rng)) + .map(|_| { + let id = PeerId::random(); + known_nodes.insert(id.clone()); + reserved_nodes.insert(id.clone()); + id + }) + .collect() + }, + in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, + }], }); futures::executor::block_on(futures::future::poll_fn(move |cx| { @@ -71,70 +84,90 @@ fn test_once() { // Perform a certain number of actions while checking that the state is consistent. If we // reach the end of the loop, the run has succeeded. - for _ in 0 .. 2500 { + for _ in 0..2500 { // Each of these weights corresponds to an action that we may perform. let action_weights = [150, 90, 90, 30, 30, 1, 1, 4, 4]; match WeightedIndex::new(&action_weights).unwrap().sample(&mut rng) { // If we generate 0, poll the peerset. 0 => match Stream::poll_next(Pin::new(&mut peerset), cx) { - Poll::Ready(Some(Message::Connect(id))) => { - if let Some(id) = incoming_nodes.iter().find(|(_, v)| **v == id).map(|(&id, _)| id) { + Poll::Ready(Some(Message::Connect { peer_id, .. })) => { + if let Some(id) = + incoming_nodes.iter().find(|(_, v)| **v == peer_id).map(|(&id, _)| id) + { incoming_nodes.remove(&id); } - assert!(connected_nodes.insert(id)); - } - Poll::Ready(Some(Message::Drop(id))) => { connected_nodes.remove(&id); } - Poll::Ready(Some(Message::Accept(n))) => - assert!(connected_nodes.insert(incoming_nodes.remove(&n).unwrap())), - Poll::Ready(Some(Message::Reject(n))) => - assert!(!connected_nodes.contains(&incoming_nodes.remove(&n).unwrap())), + assert!(connected_nodes.insert(peer_id)); + }, + Poll::Ready(Some(Message::Drop { peer_id, .. })) => { + connected_nodes.remove(&peer_id); + }, + Poll::Ready(Some(Message::Accept(n))) => { + assert!(connected_nodes.insert(incoming_nodes.remove(&n).unwrap())) + }, + Poll::Ready(Some(Message::Reject(n))) => { + assert!(!connected_nodes.contains(&incoming_nodes.remove(&n).unwrap())) + }, Poll::Ready(None) => panic!(), - Poll::Pending => {} - } + Poll::Pending => {}, + }, // If we generate 1, discover a new node. 1 => { let new_id = PeerId::random(); known_nodes.insert(new_id.clone()); - peerset.discovered(iter::once(new_id)); - } + peerset.add_to_peers_set(SetId::from(0), new_id); + }, // If we generate 2, adjust a random reputation. - 2 => if let Some(id) = known_nodes.iter().choose(&mut rng) { - let val = Uniform::new_inclusive(i32::min_value(), i32::max_value()).sample(&mut rng); - peerset_handle.report_peer(id.clone(), ReputationChange::new(val, "")); - } + 2 => + if let Some(id) = known_nodes.iter().choose(&mut rng) { + let val = Uniform::new_inclusive(i32::MIN, i32::MAX).sample(&mut rng); + peerset_handle.report_peer(id.clone(), ReputationChange::new(val, "")); + }, // If we generate 3, disconnect from a random node. - 3 => if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { - connected_nodes.remove(&id); - peerset.dropped(id); - } + 3 => + if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { + connected_nodes.remove(&id); + peerset.dropped(SetId::from(0), id, DropReason::Unknown); + }, // If we generate 4, connect to a random node. - 4 => if let Some(id) = known_nodes.iter() - .filter(|n| incoming_nodes.values().all(|m| m != *n) && !connected_nodes.contains(*n)) - .choose(&mut rng) { - peerset.incoming(id.clone(), next_incoming_id.clone()); - incoming_nodes.insert(next_incoming_id.clone(), id.clone()); - next_incoming_id.0 += 1; - } + 4 => { + if let Some(id) = known_nodes + .iter() + .filter(|n| { + incoming_nodes.values().all(|m| m != *n) && + !connected_nodes.contains(*n) + }) + .choose(&mut rng) + { + peerset.incoming(SetId::from(0), id.clone(), next_incoming_id.clone()); + incoming_nodes.insert(next_incoming_id.clone(), id.clone()); + next_incoming_id.0 += 1; + } + }, // 5 and 6 are the reserved-only mode. - 5 => peerset_handle.set_reserved_only(true), - 6 => peerset_handle.set_reserved_only(false), + 5 => peerset_handle.set_reserved_only(SetId::from(0), true), + 6 => peerset_handle.set_reserved_only(SetId::from(0), false), // 7 and 8 are about switching a random node in or out of reserved mode. - 7 => if let Some(id) = known_nodes.iter().filter(|n| !reserved_nodes.contains(*n)).choose(&mut rng) { - peerset_handle.add_reserved_peer(id.clone()); - reserved_nodes.insert(id.clone()); - } - 8 => if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { - reserved_nodes.remove(&id); - peerset_handle.remove_reserved_peer(id); - } - - _ => unreachable!() + 7 => { + if let Some(id) = + known_nodes.iter().filter(|n| !reserved_nodes.contains(*n)).choose(&mut rng) + { + peerset_handle.add_reserved_peer(SetId::from(0), id.clone()); + reserved_nodes.insert(id.clone()); + } + }, + 8 => + if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { + reserved_nodes.remove(&id); + peerset_handle.remove_reserved_peer(SetId::from(0), id); + }, + + _ => unreachable!(), } } diff --git a/client/proposer-metrics/Cargo.toml b/client/proposer-metrics/Cargo.toml index 085f50f5be551..ffe5045461f77 100644 --- a/client/proposer-metrics/Cargo.toml +++ b/client/proposer-metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-proposer-metrics" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,4 +14,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} diff --git a/client/proposer-metrics/src/lib.rs b/client/proposer-metrics/src/lib.rs index 50498d40b62d5..da29fb2951995 100644 --- a/client/proposer-metrics/src/lib.rs +++ b/client/proposer-metrics/src/lib.rs @@ -1,22 +1,26 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Prometheus basic proposer metrics. -use prometheus_endpoint::{register, PrometheusError, Registry, Histogram, HistogramOpts, Gauge, U64}; +use prometheus_endpoint::{ + register, Gauge, Histogram, HistogramOpts, PrometheusError, Registry, U64, +}; /// Optional shareable link to basic authorship metrics. #[derive(Clone, Default)] @@ -24,13 +28,13 @@ pub struct MetricsLink(Option); impl MetricsLink { pub fn new(registry: Option<&Registry>) -> Self { - Self( - registry.and_then(|registry| - Metrics::register(registry) - .map_err(|err| log::warn!("Failed to register proposer prometheus metrics: {}", err)) - .ok() - ) - ) + Self(registry.and_then(|registry| { + Metrics::register(registry) + .map_err(|err| { + log::warn!("Failed to register proposer prometheus metrics: {}", err) + }) + .ok() + })) } pub fn report(&self, do_this: impl FnOnce(&Metrics) -> O) -> Option { diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 55eb51d261cdb..6342abb1a3c41 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-api" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,20 +13,22 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } -derive_more = "0.99.2" -futures = { version = "0.3.1", features = ["compat"] } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -jsonrpc-pubsub = "15.0.0" +codec = { package = "parity-scale-codec", version = "2.0.0" } +futures = "0.3.16" +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" +jsonrpc-pubsub = "18.0.0" log = "0.4.8" -parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0"} -sp-chain-spec = { path = "../../primitives/chain-spec" , version = "2.0.0"} -serde = { version = "1.0.101", features = ["derive"] } -serde_json = "1.0.41" -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-rpc = { version = "2.0.0", path = "../../primitives/rpc" } +parking_lot = "0.11.1" +thiserror = "1.0" + +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-version = { version = "4.0.0-dev", path = "../../primitives/version" } +sp-runtime = { path = "../../primitives/runtime", version = "4.0.0-dev" } +sc-chain-spec = { path = "../chain-spec", version = "4.0.0-dev" } +serde = { version = "1.0.126", features = ["derive"] } +serde_json = "1.0.68" +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } +sp-rpc = { version = "4.0.0-dev", path = "../../primitives/rpc" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } diff --git a/client/rpc-api/src/author/error.rs b/client/rpc-api/src/author/error.rs index 69c036be95fe0..c7e3ccffabbb7 100644 --- a/client/rpc-api/src/author/error.rs +++ b/client/rpc-api/src/author/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -26,54 +26,41 @@ use sp_runtime::transaction_validity::InvalidTransaction; pub type Result = std::result::Result; /// Author RPC future Result type. -pub type FutureResult = Box + Send>; +pub type FutureResult = jsonrpc_core::BoxFuture>; /// Author RPC errors. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] pub enum Error { /// Client error. - #[display(fmt="Client error: {}", _0)] - #[from(ignore)] + #[error("Client error: {}", .0)] Client(Box), /// Transaction pool error, - #[display(fmt="Transaction pool error: {}", _0)] - Pool(sp_transaction_pool::error::Error), + #[error("Transaction pool error: {}", .0)] + Pool(#[from] sc_transaction_pool_api::error::Error), /// Verification error - #[display(fmt="Extrinsic verification error: {}", _0)] - #[from(ignore)] + #[error("Extrinsic verification error: {}", .0)] Verification(Box), /// Incorrect extrinsic format. - #[display(fmt="Invalid extrinsic format: {}", _0)] - BadFormat(codec::Error), + #[error("Invalid extrinsic format: {}", .0)] + BadFormat(#[from] codec::Error), /// Incorrect seed phrase. - #[display(fmt="Invalid seed phrase/SURI")] + #[error("Invalid seed phrase/SURI")] BadSeedPhrase, /// Key type ID has an unknown format. - #[display(fmt="Invalid key type ID format (should be of length four)")] + #[error("Invalid key type ID format (should be of length four)")] BadKeyType, /// Key type ID has some unsupported crypto. - #[display(fmt="The crypto of key type ID is unknown")] + #[error("The crypto of key type ID is unknown")] UnsupportedKeyType, /// Some random issue with the key store. Shouldn't happen. - #[display(fmt="The key store is unavailable")] + #[error("The key store is unavailable")] KeyStoreUnavailable, /// Invalid session keys encoding. - #[display(fmt="Session keys are not encoded correctly")] + #[error("Session keys are not encoded correctly")] InvalidSessionKeys, /// Call to an unsafe RPC was denied. - UnsafeRpcCalled(crate::policy::UnsafeRpcError), -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(&**err), - Error::Pool(ref err) => Some(err), - Error::Verification(ref err) => Some(&**err), - Error::UnsafeRpcCalled(ref err) => Some(err), - _ => None, - } - } + #[error(transparent)] + UnsafeRpcCalled(#[from] crate::policy::UnsafeRpcError), } /// Base code for all authorship errors. @@ -99,10 +86,13 @@ const POOL_CYCLE_DETECTED: i64 = POOL_INVALID_TX + 5; const POOL_IMMEDIATELY_DROPPED: i64 = POOL_INVALID_TX + 6; /// The key type crypto is not known. const UNSUPPORTED_KEY_TYPE: i64 = POOL_INVALID_TX + 7; +/// The transaction was not included to the pool since it is unactionable, +/// it is not propagable and the local node does not author blocks. +const POOL_UNACTIONABLE: i64 = POOL_INVALID_TX + 8; impl From for rpc::Error { fn from(e: Error) -> Self { - use sp_transaction_pool::error::{Error as PoolError}; + use sc_transaction_pool_api::error::Error as PoolError; match e { Error::BadFormat(e) => rpc::Error { @@ -158,6 +148,14 @@ impl From for rpc::Error { message: "Immediately Dropped".into(), data: Some("The transaction couldn't enter the pool because of the limit".into()), }, + Error::Pool(PoolError::Unactionable) => rpc::Error { + code: rpc::ErrorCode::ServerError(POOL_UNACTIONABLE), + message: "Unactionable".into(), + data: Some( + "The transaction is unactionable since it is not propagable and \ + the local node does not author blocks".into(), + ), + }, Error::UnsupportedKeyType => rpc::Error { code: rpc::ErrorCode::ServerError(UNSUPPORTED_KEY_TYPE), message: "Unknown key type crypto" .into(), diff --git a/client/rpc-api/src/author/hash.rs b/client/rpc-api/src/author/hash.rs index 4287af8ede596..c4acfb819ddbb 100644 --- a/client/rpc-api/src/author/hash.rs +++ b/client/rpc-api/src/author/hash.rs @@ -1,23 +1,25 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Extrinsic helpers for author RPC module. +use serde::{Deserialize, Serialize}; use sp_core::Bytes; -use serde::{Serialize, Deserialize}; /// RPC Extrinsic or hash /// diff --git a/client/rpc-api/src/author/mod.rs b/client/rpc-api/src/author/mod.rs index 29f5b1d26e84c..720598e0b32a8 100644 --- a/client/rpc-api/src/author/mod.rs +++ b/client/rpc-api/src/author/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,11 +21,11 @@ pub mod error; pub mod hash; +use self::error::{FutureResult, Result}; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; +use sc_transaction_pool_api::TransactionStatus; use sp_core::Bytes; -use sp_transaction_pool::TransactionStatus; -use self::error::{FutureResult, Result}; pub use self::gen_client::Client as AuthorClient; @@ -41,12 +41,7 @@ pub trait AuthorApi { /// Insert a key into the keystore. #[rpc(name = "author_insertKey")] - fn insert_key( - &self, - key_type: String, - suri: String, - public: Bytes, - ) -> Result<()>; + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> Result<()>; /// Generate new session keys and returns the corresponding public keys. #[rpc(name = "author_rotateKeys")] @@ -72,23 +67,25 @@ pub trait AuthorApi { /// Remove given extrinsic from the pool and temporarily ban it to prevent reimporting. #[rpc(name = "author_removeExtrinsic")] - fn remove_extrinsic(&self, - bytes_or_hash: Vec> + fn remove_extrinsic( + &self, + bytes_or_hash: Vec>, ) -> Result>; /// Submit an extrinsic to watch. /// - /// See [`TransactionStatus`](sp_transaction_pool::TransactionStatus) for details on transaction - /// life cycle. + /// See [`TransactionStatus`](sc_transaction_pool_api::TransactionStatus) for details on + /// transaction life cycle. #[pubsub( subscription = "author_extrinsicUpdate", subscribe, name = "author_submitAndWatchExtrinsic" )] - fn watch_extrinsic(&self, + fn watch_extrinsic( + &self, metadata: Self::Metadata, subscriber: Subscriber>, - bytes: Bytes + bytes: Bytes, ); /// Unsubscribe from extrinsic watching. @@ -97,8 +94,9 @@ pub trait AuthorApi { unsubscribe, name = "author_unwatchExtrinsic" )] - fn unwatch_extrinsic(&self, + fn unwatch_extrinsic( + &self, metadata: Option, - id: SubscriptionId + id: SubscriptionId, ) -> Result; } diff --git a/client/rpc-api/src/chain/error.rs b/client/rpc-api/src/chain/error.rs index fd7bd0a43d778..c7f14b2dfc168 100644 --- a/client/rpc-api/src/chain/error.rs +++ b/client/rpc-api/src/chain/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,27 +25,19 @@ use jsonrpc_core as rpc; pub type Result = std::result::Result; /// Chain RPC future Result type. -pub type FutureResult = Box + Send>; +pub type FutureResult = jsonrpc_core::BoxFuture>; /// Chain RPC errors. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] pub enum Error { /// Client error. - #[display(fmt="Client error: {}", _0)] - Client(Box), + #[error("Client error: {}", .0)] + Client(#[from] Box), /// Other error type. + #[error("{0}")] Other(String), } -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(&**err), - _ => None, - } - } -} - /// Base error code for all chain errors. const BASE_ERROR: i64 = 3000; diff --git a/client/rpc-api/src/chain/mod.rs b/client/rpc-api/src/chain/mod.rs index 9bb75216c0186..79ae80d0c4d1d 100644 --- a/client/rpc-api/src/chain/mod.rs +++ b/client/rpc-api/src/chain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,11 +20,11 @@ pub mod error; +use self::error::{FutureResult, Result}; use jsonrpc_core::Result as RpcResult; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use sp_rpc::{number::NumberOrHex, list::ListOrValue}; -use self::error::{FutureResult, Result}; +use sp_rpc::{list::ListOrValue, number::NumberOrHex}; pub use self::gen_client::Client as ChainClient; diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs index d956a7554f8ee..de94790d09907 100644 --- a/client/rpc-api/src/child_state/mod.rs +++ b/client/rpc-api/src/child_state/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,28 +18,43 @@ //! Substrate state API. -use jsonrpc_derive::rpc; -use sp_core::storage::{StorageKey, PrefixedStorageKey, StorageData}; use crate::state::error::FutureResult; +use jsonrpc_derive::rpc; +use sp_core::storage::{PrefixedStorageKey, StorageData, StorageKey}; pub use self::gen_client::Client as ChildStateClient; +use crate::state::ReadProof; /// Substrate child state API /// -/// Note that all `PrefixedStorageKey` are desierialized -/// from json and not guaranted valid. +/// Note that all `PrefixedStorageKey` are deserialized +/// from json and not guaranteed valid. #[rpc] pub trait ChildStateApi { /// RPC Metadata type Metadata; + /// DEPRECATED: Please use `childstate_getKeysPaged` with proper paging support. /// Returns the keys with prefix from a child storage, leave empty to get all the keys #[rpc(name = "childstate_getKeys")] fn storage_keys( &self, child_storage_key: PrefixedStorageKey, prefix: StorageKey, - hash: Option + hash: Option, + ) -> FutureResult>; + + /// Returns the keys with prefix from a child storage with pagination support. + /// Up to `count` keys will be returned. + /// If `start_key` is passed, return next keys in storage in lexicographic order. + #[rpc(name = "childstate_getKeysPaged", alias("childstate_getKeysPagedAt"))] + fn storage_keys_paged( + &self, + child_storage_key: PrefixedStorageKey, + prefix: Option, + count: u32, + start_key: Option, + hash: Option, ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. @@ -48,16 +63,25 @@ pub trait ChildStateApi { &self, child_storage_key: PrefixedStorageKey, key: StorageKey, - hash: Option + hash: Option, ) -> FutureResult>; + /// Returns child storage entries for multiple keys at a specific block's state. + #[rpc(name = "childstate_getStorageEntries")] + fn storage_entries( + &self, + child_storage_key: PrefixedStorageKey, + keys: Vec, + hash: Option, + ) -> FutureResult>>; + /// Returns the hash of a child storage entry at a block's state. #[rpc(name = "childstate_getStorageHash")] fn storage_hash( &self, child_storage_key: PrefixedStorageKey, key: StorageKey, - hash: Option + hash: Option, ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. @@ -66,6 +90,15 @@ pub trait ChildStateApi { &self, child_storage_key: PrefixedStorageKey, key: StorageKey, - hash: Option + hash: Option, ) -> FutureResult>; + + /// Returns proof of storage for child key entries at a specific block's state. + #[rpc(name = "state_getChildReadProof")] + fn read_child_proof( + &self, + child_storage_key: PrefixedStorageKey, + keys: Vec, + hash: Option, + ) -> FutureResult>; } diff --git a/client/rpc-api/src/errors.rs b/client/rpc-api/src/errors.rs index 4e1a5b10fc512..8e4883a4cc20c 100644 --- a/client/rpc-api/src/errors.rs +++ b/client/rpc-api/src/errors.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/helpers.rs b/client/rpc-api/src/helpers.rs index 025fef1102c49..a26adbf2e9032 100644 --- a/client/rpc-api/src/helpers.rs +++ b/client/rpc-api/src/helpers.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,18 +16,26 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use jsonrpc_core::futures::prelude::*; -use futures::{channel::oneshot, compat::Compat}; +use futures::{channel::oneshot, Future}; +use std::pin::Pin; /// Wraps around `oneshot::Receiver` and adjusts the error type to produce an internal error if the /// sender gets dropped. -pub struct Receiver(pub Compat>); +pub struct Receiver(pub oneshot::Receiver); impl Future for Receiver { - type Item = T; - type Error = jsonrpc_core::Error; + type Output = Result; - fn poll(&mut self) -> Poll { - self.0.poll().map_err(|_| jsonrpc_core::Error::internal_error()) + fn poll( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + Future::poll(Pin::new(&mut self.0), cx).map_err(|_| jsonrpc_core::Error::internal_error()) + } +} + +impl jsonrpc_core::WrapFuture for Receiver { + fn into_future(self) -> jsonrpc_core::BoxFuture> { + Box::pin(async { self.await }) } } diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index 7bae75181056f..92de1e7fcb344 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Substrate RPC interfaces. //! @@ -28,11 +30,11 @@ mod policy; pub use helpers::Receiver; pub use jsonrpc_core::IoHandlerExtension as RpcExtension; pub use metadata::Metadata; -pub use policy::DenyUnsafe; +pub use policy::{DenyUnsafe, UnsafeRpcError}; pub mod author; pub mod chain; +pub mod child_state; pub mod offchain; pub mod state; -pub mod child_state; pub mod system; diff --git a/client/rpc-api/src/metadata.rs b/client/rpc-api/src/metadata.rs index cffcbf61f5440..d493b92c11ac5 100644 --- a/client/rpc-api/src/metadata.rs +++ b/client/rpc-api/src/metadata.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,8 +19,8 @@ //! RPC Metadata use std::sync::Arc; -use jsonrpc_core::futures::sync::mpsc; -use jsonrpc_pubsub::{Session, PubSubMetadata}; +use futures::channel::mpsc; +use jsonrpc_pubsub::{PubSubMetadata, Session}; /// RPC Metadata. /// @@ -41,22 +41,20 @@ impl PubSubMetadata for Metadata { impl Metadata { /// Create new `Metadata` with session (Pub/Sub) support. - pub fn new(transport: mpsc::Sender) -> Self { - Metadata { - session: Some(Arc::new(Session::new(transport))), - } + pub fn new(transport: mpsc::UnboundedSender) -> Self { + Metadata { session: Some(Arc::new(Session::new(transport))) } } /// Create new `Metadata` for tests. #[cfg(test)] - pub fn new_test() -> (mpsc::Receiver, Self) { - let (tx, rx) = mpsc::channel(1); + pub fn new_test() -> (mpsc::UnboundedReceiver, Self) { + let (tx, rx) = mpsc::unbounded(); (rx, Self::new(tx)) } } -impl From> for Metadata { - fn from(sender: mpsc::Sender) -> Self { +impl From> for Metadata { + fn from(sender: mpsc::UnboundedSender) -> Self { Self::new(sender) } } diff --git a/client/rpc-api/src/offchain/error.rs b/client/rpc-api/src/offchain/error.rs index ea5223f1ce7f9..6b8e2bfe189b1 100644 --- a/client/rpc-api/src/offchain/error.rs +++ b/client/rpc-api/src/offchain/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -24,22 +24,14 @@ use jsonrpc_core as rpc; pub type Result = std::result::Result; /// Offchain RPC errors. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] pub enum Error { /// Unavailable storage kind error. - #[display(fmt="This storage kind is not available yet.")] + #[error("This storage kind is not available yet.")] UnavailableStorageKind, /// Call to an unsafe RPC was denied. - UnsafeRpcCalled(crate::policy::UnsafeRpcError), -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Self::UnsafeRpcCalled(err) => Some(err), - _ => None, - } - } + #[error(transparent)] + UnsafeRpcCalled(#[from] crate::policy::UnsafeRpcError), } /// Base error code for all offchain errors. @@ -50,7 +42,7 @@ impl From for rpc::Error { match e { Error::UnavailableStorageKind => rpc::Error { code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message: "This storage kind is not available yet" .into(), + message: "This storage kind is not available yet".into(), data: None, }, Error::UnsafeRpcCalled(e) => e.into(), diff --git a/client/rpc-api/src/offchain/mod.rs b/client/rpc-api/src/offchain/mod.rs index 427b6a1cc017b..333892fc19c4c 100644 --- a/client/rpc-api/src/offchain/mod.rs +++ b/client/rpc-api/src/offchain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,9 +20,9 @@ pub mod error; -use jsonrpc_derive::rpc; use self::error::Result; -use sp_core::{Bytes, offchain::StorageKind}; +use jsonrpc_derive::rpc; +use sp_core::{offchain::StorageKind, Bytes}; pub use self::gen_client::Client as OffchainClient; diff --git a/client/rpc-api/src/policy.rs b/client/rpc-api/src/policy.rs index 141dcfbc415f8..5d56c62bfece3 100644 --- a/client/rpc-api/src/policy.rs +++ b/client/rpc-api/src/policy.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify diff --git a/client/rpc-api/src/state/error.rs b/client/rpc-api/src/state/error.rs index 1c22788062c7b..d700863476329 100644 --- a/client/rpc-api/src/state/error.rs +++ b/client/rpc-api/src/state/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,16 +25,16 @@ use jsonrpc_core as rpc; pub type Result = std::result::Result; /// State RPC future Result type. -pub type FutureResult = Box + Send>; +pub type FutureResult = jsonrpc_core::BoxFuture>; /// State RPC errors. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] pub enum Error { /// Client error. - #[display(fmt="Client error: {}", _0)] - Client(Box), + #[error("Client error: {}", .0)] + Client(#[from] Box), /// Provided block range couldn't be resolved to a list of blocks. - #[display(fmt = "Cannot resolve a block range ['{:?}' ... '{:?}]. {}", from, to, details)] + #[error("Cannot resolve a block range ['{:?}' ... '{:?}]. {}", .from, .to, .details)] InvalidBlockRange { /// Beginning of the block range. from: String, @@ -44,7 +44,7 @@ pub enum Error { details: String, }, /// Provided count exceeds maximum value. - #[display(fmt = "count exceeds maximum value. value: {}, max: {}", value, max)] + #[error("count exceeds maximum value. value: {}, max: {}", .value, .max)] InvalidCount { /// Provided value value: u32, @@ -52,16 +52,8 @@ pub enum Error { max: u32, }, /// Call to an unsafe RPC was denied. - UnsafeRpcCalled(crate::policy::UnsafeRpcError), -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(&**err), - _ => None, - } - } + #[error(transparent)] + UnsafeRpcCalled(#[from] crate::policy::UnsafeRpcError), } /// Base code for all state errors. diff --git a/client/rpc-api/src/state/helpers.rs b/client/rpc-api/src/state/helpers.rs index 0d176ea67f35b..718ad69ac232c 100644 --- a/client/rpc-api/src/state/helpers.rs +++ b/client/rpc-api/src/state/helpers.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,8 +18,8 @@ //! Substrate state API helpers. +use serde::{Deserialize, Serialize}; use sp_core::Bytes; -use serde::{Serialize, Deserialize}; /// ReadProof struct returned by the RPC #[derive(Debug, PartialEq, Serialize, Deserialize)] diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 874fc862a39d2..620a000c500f4 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,16 +21,17 @@ pub mod error; pub mod helpers; +use self::error::FutureResult; use jsonrpc_core::Result as RpcResult; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use sp_core::Bytes; -use sp_core::storage::{StorageKey, StorageData, StorageChangeSet}; +use sp_core::{ + storage::{StorageChangeSet, StorageData, StorageKey}, + Bytes, +}; use sp_version::RuntimeVersion; -use self::error::FutureResult; -pub use self::gen_client::Client as StateClient; -pub use self::helpers::ReadProof; +pub use self::{gen_client::Client as StateClient, helpers::ReadProof}; /// Substrate state API #[rpc] @@ -45,11 +46,16 @@ pub trait StateApi { /// DEPRECATED: Please use `state_getKeysPaged` with proper paging support. /// Returns the keys with prefix, leave empty to get all the keys. #[rpc(name = "state_getKeys")] - fn storage_keys(&self, prefix: StorageKey, hash: Option) -> FutureResult>; + fn storage_keys(&self, prefix: StorageKey, hash: Option) + -> FutureResult>; /// Returns the keys with prefix, leave empty to get all the keys #[rpc(name = "state_getPairs")] - fn storage_pairs(&self, prefix: StorageKey, hash: Option) -> FutureResult>; + fn storage_pairs( + &self, + prefix: StorageKey, + hash: Option, + ) -> FutureResult>; /// Returns the keys with prefix with pagination support. /// Up to `count` keys will be returned. @@ -83,7 +89,8 @@ pub trait StateApi { #[rpc(name = "state_getRuntimeVersion", alias("chain_getRuntimeVersion"))] fn runtime_version(&self, hash: Option) -> FutureResult; - /// Query historical storage entries (by key) starting from a block given as the second parameter. + /// Query historical storage entries (by key) starting from a block given as the second + /// parameter. /// /// NOTE This first returned result contains the initial state of storage for all keys. /// Subsequent values in the vector represent changes to the previous state (diffs). @@ -92,7 +99,7 @@ pub trait StateApi { &self, keys: Vec, block: Hash, - hash: Option + hash: Option, ) -> FutureResult>>; /// Query storage entries (by key) starting at block hash given as the second parameter. @@ -105,7 +112,11 @@ pub trait StateApi { /// Returns proof of storage entries at a specific block's state. #[rpc(name = "state_getReadProof")] - fn read_proof(&self, keys: Vec, hash: Option) -> FutureResult>; + fn read_proof( + &self, + keys: Vec, + hash: Option, + ) -> FutureResult>; /// New runtime version subscription #[pubsub( @@ -114,7 +125,11 @@ pub trait StateApi { name = "state_subscribeRuntimeVersion", alias("chain_subscribeRuntimeVersion") )] - fn subscribe_runtime_version(&self, metadata: Self::Metadata, subscriber: Subscriber); + fn subscribe_runtime_version( + &self, + metadata: Self::Metadata, + subscriber: Subscriber, + ); /// Unsubscribe from runtime version subscription #[pubsub( @@ -123,17 +138,187 @@ pub trait StateApi { name = "state_unsubscribeRuntimeVersion", alias("chain_unsubscribeRuntimeVersion") )] - fn unsubscribe_runtime_version(&self, metadata: Option, id: SubscriptionId) -> RpcResult; + fn unsubscribe_runtime_version( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; /// New storage subscription #[pubsub(subscription = "state_storage", subscribe, name = "state_subscribeStorage")] fn subscribe_storage( - &self, metadata: Self::Metadata, subscriber: Subscriber>, keys: Option> + &self, + metadata: Self::Metadata, + subscriber: Subscriber>, + keys: Option>, ); /// Unsubscribe from storage subscription #[pubsub(subscription = "state_storage", unsubscribe, name = "state_unsubscribeStorage")] fn unsubscribe_storage( - &self, metadata: Option, id: SubscriptionId + &self, + metadata: Option, + id: SubscriptionId, ) -> RpcResult; + + /// The `state_traceBlock` RPC provides a way to trace the re-execution of a single + /// block, collecting Spans and Events from both the client and the relevant WASM runtime. + /// The Spans and Events are conceptually equivalent to those from the [Tracing][1] crate. + /// + /// The structure of the traces follows that of the block execution pipeline, so meaningful + /// interpretation of the traces requires an understanding of the Substrate chain's block + /// execution. + /// + /// [Link to conceptual map of trace structure for Polkadot and Kusama block execution.][2] + /// + /// [1]: https://crates.io/crates/tracing + /// [2]: https://docs.google.com/drawings/d/1vZoJo9jaXlz0LmrdTOgHck9_1LsfuQPRmTr-5g1tOis/edit?usp=sharing + /// + /// ## Node requirements + /// + /// - Fully synced archive node (i.e. a node that is not actively doing a "major" sync). + /// - [Tracing enabled WASM runtimes](#creating-tracing-enabled-wasm-runtimes) for all runtime + /// versions + /// for which tracing is desired. + /// + /// ## Node recommendations + /// + /// - Use fast SSD disk storage. + /// - Run node flags to increase DB read speed (i.e. `--state-cache-size`, `--db-cache`). + /// + /// ## Creating tracing enabled WASM runtimes + /// + /// - Checkout commit of chain version to compile with WASM traces + /// - [diener][1] can help to peg commit of substrate to what the chain expects. + /// - Navigate to the `runtime` folder/package of the chain + /// - Add feature `with-tracing = ["frame-executive/with-tracing", "sp-io/with-tracing"]` + /// under `[features]` to the `runtime` packages' `Cargo.toml`. + /// - Compile the runtime with `cargo build --release --features with-tracing` + /// - Tracing-enabled WASM runtime should be found in + /// `./target/release/wbuild/{{chain}}-runtime` + /// and be called something like `{{your_chain}}_runtime.compact.wasm`. This can be + /// renamed/modified however you like, as long as it retains the `.wasm` extension. + /// - Run the node with the wasm blob overrides by placing them in a folder with all your + /// runtimes, + /// and passing the path of this folder to your chain, e.g.: + /// - `./target/release/polkadot --wasm-runtime-overrides /home/user/my-custom-wasm-runtimes` + /// + /// You can also find some pre-built tracing enabled wasm runtimes in [substrate-archive][2] + /// + /// [Source.][3] + /// + /// [1]: https://crates.io/crates/diener + /// [2]: https://github.com/paritytech/substrate-archive/tree/master/wasm-tracing + /// [3]: https://github.com/paritytech/substrate-archive/wiki + /// + /// ## RPC Usage + /// + /// The RPC allows for two filtering mechanisms: tracing targets and storage key prefixes. + /// The filtering of spans and events takes place after they are all collected; so while filters + /// do not reduce time for actual block re-execution, they reduce the response payload size. + /// + /// Note: storage events primarily come from _primitives/state-machine/src/ext.rs_. + /// The default filters can be overridden, see the [params section](#params) for details. + /// + /// ### `curl` example + /// + /// - Get tracing spans and events + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "state_traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "pallet,frame,state", "", ""]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// - Get tracing events with all `storage_keys` + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "state_traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "state", "", ""]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// - Get tracing events with `storage_keys` ('f0c365c3cf59d671eb72da0e7a4113c4') + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "state_traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "state", "f0c365c3cf59d671eb72da0e7a4113c4", ""]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// - Get tracing events with `storage_keys` ('f0c365c3cf59d671eb72da0e7a4113c4') and method + /// ('Put') + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "state_traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "state", "f0c365c3cf59d671eb72da0e7a4113c4", "Put"]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// - Get tracing events with all `storage_keys` and method ('Put') + /// ```text + /// curl \ + /// -H "Content-Type: application/json" \ + /// -d '{"id":1, "jsonrpc":"2.0", "method": "state_traceBlock", \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264", "state", "", "Put"]}' \ + /// http://localhost:9933/ + /// ``` + /// + /// ### Params + /// + /// - `block` (param index 0): Hash of the block to trace. + /// - `targets` (param index 1): String of comma separated (no spaces) targets. Specified + /// targets match with trace targets by prefix (i.e if a target is in the beginning + /// of a trace target it is considered a match). If an empty string is specified no + /// targets will be filtered out. The majority of targets correspond to Rust module names, + /// and the ones that do not are typically "hardcoded" into span or event location + /// somewhere in the Substrate source code. ("Non-hardcoded" targets typically come from frame + /// support macros.) + /// - `storage_keys` (param index 2): String of comma separated (no spaces) hex encoded + /// (no `0x` prefix) storage keys. If an empty string is specified no events will + /// be filtered out. If anything other than an empty string is specified, events + /// will be filtered by storage key (so non-storage events will **not** show up). + /// You can specify any length of a storage key prefix (i.e. if a specified storage + /// key is in the beginning of an events storage key it is considered a match). + /// Example: for balance tracking on Polkadot & Kusama you would likely want + /// to track changes to account balances with the frame_system::Account storage item, + /// which is a map from `AccountId` to `AccountInfo`. The key filter for this would be + /// the storage prefix for the map: + /// `26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9` + /// - `methods` (param index 3): String of comma separated (no spaces) tracing event method. + /// If an empty string is specified no events will be filtered out. If anything other than + /// an empty string is specified, events will be filtered by method (so non-method events will + /// **not** show up). + /// + /// Additionally you would want to track the extrinsic index, which is under the + /// `:extrinsic_index` key. The key for this would be the aforementioned string as bytes + /// in hex: `3a65787472696e7369635f696e646578`. + /// The following are some resources to learn more about storage keys in substrate: + /// [substrate storage][1], [transparent keys in substrate][2], + /// [querying substrate storage via rpc][3]. + /// + /// [1]: https://substrate.dev/docs/en/knowledgebase/advanced/storage#storage-map-key + /// [2]: https://www.shawntabrizi.com/substrate/transparent-keys-in-substrate/ + /// [3]: https://www.shawntabrizi.com/substrate/querying-substrate-storage-via-rpc/ + /// + /// ### Maximum payload size + /// + /// The maximum payload size allowed is 15mb. Payloads over this size will return a + /// object with a simple error message. If you run into issues with payload size you can + /// narrow down the traces using a smaller set of targets and/or storage keys. + /// + /// If you are having issues with maximum payload size you can use the flag + /// `-lstate_tracing=trace` to get some logging during tracing. + #[rpc(name = "state_traceBlock")] + fn trace_block( + &self, + block: Hash, + targets: Option, + storage_keys: Option, + methods: Option, + ) -> FutureResult; } diff --git a/client/rpc-api/src/system/error.rs b/client/rpc-api/src/system/error.rs index 4897aa485cbe4..4ba5125d82bc1 100644 --- a/client/rpc-api/src/system/error.rs +++ b/client/rpc-api/src/system/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -25,17 +25,16 @@ use jsonrpc_core as rpc; pub type Result = std::result::Result; /// System RPC errors. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] pub enum Error { /// Provided block range couldn't be resolved to a list of blocks. - #[display(fmt = "Node is not fully functional: {}", _0)] + #[error("Node is not fully functional: {}", .0)] NotHealthy(Health), /// Peer argument is malformatted. + #[error("{0}")] MalformattedPeerArg(String), } -impl std::error::Error for Error {} - /// Base code for all system errors. const BASE_ERROR: i64 = 2000; @@ -48,10 +47,10 @@ impl From for rpc::Error { data: serde_json::to_value(h).ok(), }, Error::MalformattedPeerArg(ref e) => rpc::Error { - code :rpc::ErrorCode::ServerError(BASE_ERROR + 2), + code: rpc::ErrorCode::ServerError(BASE_ERROR + 2), message: e.clone(), data: None, - } + }, } } } diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index c5dddedef9564..96e8aeb1ae3d7 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,9 +18,9 @@ //! Substrate system API helpers. +use sc_chain_spec::{ChainType, Properties}; +use serde::{Deserialize, Serialize}; use std::fmt; -use serde::{Serialize, Deserialize}; -use sp_chain_spec::{Properties, ChainType}; /// Running node's static details. #[derive(Clone, Debug)] @@ -53,9 +53,7 @@ pub struct Health { impl fmt::Display for Health { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{} peers ({})", self.peers, if self.is_syncing { - "syncing" - } else { "idle" }) + write!(fmt, "{} peers ({})", self.peers, if self.is_syncing { "syncing" } else { "idle" }) } } @@ -82,8 +80,6 @@ pub enum NodeRole { LightClient, /// The node is an authority Authority, - /// The node is a sentry - Sentry, } /// The state of the syncing of the node. @@ -109,7 +105,8 @@ mod tests { peers: 1, is_syncing: false, should_have_peers: true, - }).unwrap(), + }) + .unwrap(), r#"{"peers":1,"isSyncing":false,"shouldHavePeers":true}"#, ); } @@ -122,7 +119,8 @@ mod tests { roles: "a".into(), best_hash: 5u32, best_number: 6u32, - }).unwrap(), + }) + .unwrap(), r#"{"peerId":"2","roles":"a","bestHash":5,"bestNumber":6}"#, ); } @@ -134,7 +132,8 @@ mod tests { starting_block: 12u32, current_block: 50u32, highest_block: Some(128u32), - }).unwrap(), + }) + .unwrap(), r#"{"startingBlock":12,"currentBlock":50,"highestBlock":128}"#, ); @@ -143,7 +142,8 @@ mod tests { starting_block: 12u32, current_block: 50u32, highest_block: None, - }).unwrap(), + }) + .unwrap(), r#"{"startingBlock":12,"currentBlock":50}"#, ); } diff --git a/client/rpc-api/src/system/mod.rs b/client/rpc-api/src/system/mod.rs index fbeec23ea5085..3ffc5f4346508 100644 --- a/client/rpc-api/src/system/mod.rs +++ b/client/rpc-api/src/system/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,13 +22,15 @@ pub mod error; pub mod helpers; use crate::helpers::Receiver; +use jsonrpc_core::BoxFuture; use jsonrpc_derive::rpc; -use futures::{future::BoxFuture, compat::Compat}; use self::error::Result as SystemResult; -pub use self::helpers::{SystemInfo, Health, PeerInfo, NodeRole, SyncState}; -pub use self::gen_client::Client as SystemClient; +pub use self::{ + gen_client::Client as SystemClient, + helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo}, +}; /// Substrate system RPC API #[rpc] @@ -47,11 +49,11 @@ pub trait SystemApi { /// Get the chain's type. #[rpc(name = "system_chainType")] - fn system_type(&self) -> SystemResult; + fn system_type(&self) -> SystemResult; /// Get a custom set of properties as a JSON object, defined in the chain spec. #[rpc(name = "system_properties")] - fn system_properties(&self) -> SystemResult; + fn system_properties(&self) -> SystemResult; /// Return health status of the node. /// @@ -74,16 +76,16 @@ pub trait SystemApi { /// Returns currently connected peers #[rpc(name = "system_peers", returns = "Vec>")] - fn system_peers(&self) - -> Compat>>>>; + fn system_peers(&self) -> BoxFuture>>>; /// Returns current state of the network. /// - /// **Warning**: This API is not stable. - // TODO: make this stable and move structs https://github.com/paritytech/substrate/issues/1890 - #[rpc(name = "system_networkState", returns = "jsonrpc_core::Value")] - fn system_network_state(&self) - -> Compat>>; + /// **Warning**: This API is not stable. Please do not programmatically interpret its output, + /// as its format might change at any time. + // TODO: the future of this call is uncertain: https://github.com/paritytech/substrate/issues/1890 + // https://github.com/paritytech/substrate/issues/5541 + #[rpc(name = "system_unstable_networkState", returns = "jsonrpc_core::Value")] + fn system_network_state(&self) -> BoxFuture>; /// Adds a reserved peer. Returns the empty string or an error. The string /// parameter should encode a `p2p` multiaddr. @@ -91,14 +93,19 @@ pub trait SystemApi { /// `/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV` /// is an example of a valid, passing multiaddr with PeerId attached. #[rpc(name = "system_addReservedPeer", returns = "()")] - fn system_add_reserved_peer(&self, peer: String) - -> Compat>>; + fn system_add_reserved_peer(&self, peer: String) -> BoxFuture>; /// Remove a reserved peer. Returns the empty string or an error. The string /// should encode only the PeerId e.g. `QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV`. #[rpc(name = "system_removeReservedPeer", returns = "()")] - fn system_remove_reserved_peer(&self, peer_id: String) - -> Compat>>; + fn system_remove_reserved_peer( + &self, + peer_id: String, + ) -> BoxFuture>; + + /// Returns the list of reserved peers + #[rpc(name = "system_reservedPeers", returns = "Vec")] + fn system_reserved_peers(&self) -> Receiver>; /// Returns the roles the node is running as. #[rpc(name = "system_nodeRoles", returns = "Vec")] @@ -108,4 +115,16 @@ pub trait SystemApi { /// known block. #[rpc(name = "system_syncState", returns = "SyncState")] fn system_sync_state(&self) -> Receiver>; + + /// Adds the supplied directives to the current log filter + /// + /// The syntax is identical to the CLI `=`: + /// + /// `sync=debug,state=trace` + #[rpc(name = "system_addLogFilter", returns = "()")] + fn system_add_log_filter(&self, directives: String) -> Result<(), jsonrpc_core::Error>; + + /// Resets the log filter to Substrate defaults + #[rpc(name = "system_resetLogFilter", returns = "()")] + fn system_reset_log_filter(&self) -> Result<(), jsonrpc_core::Error>; } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index 4fdf0298a530b..26a05a8263dc4 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-server" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,16 +13,13 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = "0.1.6" -jsonrpc-core = "15.0.0" -pubsub = { package = "jsonrpc-pubsub", version = "15.0.0" } +futures = "0.3.16" +jsonrpc-core = "18.0.0" +pubsub = { package = "jsonrpc-pubsub", version = "18.0.0" } log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -serde = "1.0.101" -serde_json = "1.0.41" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } - -[target.'cfg(not(target_os = "unknown"))'.dependencies] -http = { package = "jsonrpc-http-server", version = "15.0.0" } -ipc = { package = "jsonrpc-ipc-server", version = "15.0.0" } -ws = { package = "jsonrpc-ws-server", version = "15.0.0" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} +serde_json = "1.0.68" +tokio = "1.10" +http = { package = "jsonrpc-http-server", version = "18.0.0" } +ipc = { package = "jsonrpc-ipc-server", version = "18.0.0" } +ws = { package = "jsonrpc-ws-server", version = "18.0.0" } diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 1f99e8bb0d242..65ed6a914b19a 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,13 +22,16 @@ mod middleware; -use std::io; use jsonrpc_core::{IoHandlerExtension, MetaIoHandler}; use log::error; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use pubsub::PubSubMetadata; +use std::io; + +const MEGABYTE: usize = 1024 * 1024; /// Maximal payload accepted by RPC servers. -const MAX_PAYLOAD: usize = 15 * 1024 * 1024; +pub const RPC_MAX_PAYLOAD_DEFAULT: usize = 15 * MEGABYTE; /// Default maximum number of connections for WS RPC servers. const WS_MAX_CONNECTIONS: usize = 100; @@ -36,8 +39,7 @@ const WS_MAX_CONNECTIONS: usize = 100; /// The RPC IoHandler containing all requested APIs. pub type RpcHandler = pubsub::PubSubHandler; -pub use self::inner::*; -pub use middleware::{RpcMiddleware, RpcMetrics}; +pub use middleware::{method_names, RpcMetrics, RpcMiddleware}; /// Construct rpc `IoHandler` pub fn rpc_handler( @@ -55,107 +57,159 @@ pub fn rpc_handler( let methods = serde_json::to_value(&methods) .expect("Serialization of Vec is infallible; qed"); - move |_| Ok(serde_json::json!({ - "version": 1, - "methods": methods.clone(), - })) + move |_| { + let methods = methods.clone(); + async move { + Ok(serde_json::json!({ + "version": 1, + "methods": methods, + })) + } + } }); io } -#[cfg(not(target_os = "unknown"))] -mod inner { - use super::*; - - /// Type alias for ipc server - pub type IpcServer = ipc::Server; - /// Type alias for http server - pub type HttpServer = http::Server; - /// Type alias for ws server - pub type WsServer = ws::Server; - - /// Start HTTP server listening on given address. - /// - /// **Note**: Only available if `not(target_os = "unknown")`. - pub fn start_http( - addr: &std::net::SocketAddr, - cors: Option<&Vec>, - io: RpcHandler, - ) -> io::Result { - http::ServerBuilder::new(io) - .threads(4) - .health_api(("/health", "system_health")) - .allowed_hosts(hosts_filtering(cors.is_some())) - .rest_api(if cors.is_some() { - http::RestApi::Secure - } else { - http::RestApi::Unsecure +/// RPC server-specific prometheus metrics. +#[derive(Debug, Clone, Default)] +pub struct ServerMetrics { + /// Number of sessions opened. + session_opened: Option>, + /// Number of sessions closed. + session_closed: Option>, +} + +impl ServerMetrics { + /// Create new WebSocket RPC server metrics. + pub fn new(registry: Option<&Registry>) -> Result { + registry + .map(|r| { + Ok(Self { + session_opened: register( + Counter::new( + "rpc_sessions_opened", + "Number of persistent RPC sessions opened", + )?, + r, + )? + .into(), + session_closed: register( + Counter::new( + "rpc_sessions_closed", + "Number of persistent RPC sessions closed", + )?, + r, + )? + .into(), + }) }) - .cors(map_cors::(cors)) - .max_request_body_size(MAX_PAYLOAD) - .start_http(addr) + .unwrap_or_else(|| Ok(Default::default())) } +} - /// Start IPC server listening on given path. - /// - /// **Note**: Only available if `not(target_os = "unknown")`. - pub fn start_ipc( - addr: &str, - io: RpcHandler, - ) -> io::Result { - let builder = ipc::ServerBuilder::new(io); - #[cfg(target_os = "unix")] - builder.set_security_attributes({ - let security_attributes = ipc::SecurityAttributes::empty(); - security_attributes.set_mode(0o600)?; - security_attributes - }); - builder.start(addr) - } +/// Type alias for ipc server +pub type IpcServer = ipc::Server; +/// Type alias for http server +pub type HttpServer = http::Server; +/// Type alias for ws server +pub type WsServer = ws::Server; - /// Start WS server listening on given address. - /// - /// **Note**: Only available if `not(target_os = "unknown")`. - pub fn start_ws>> ( - addr: &std::net::SocketAddr, - max_connections: Option, - cors: Option<&Vec>, - io: RpcHandler, - ) -> io::Result { - ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| context.sender().into()) - .max_payload(MAX_PAYLOAD) - .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) - .allowed_origins(map_cors(cors)) - .allowed_hosts(hosts_filtering(cors.is_some())) - .start(addr) - .map_err(|err| match err { - ws::Error::Io(io) => io, - ws::Error::ConnectionClosed => io::ErrorKind::BrokenPipe.into(), - e => { - error!("{}", e); - io::ErrorKind::Other.into() - } - }) +impl ws::SessionStats for ServerMetrics { + fn open_session(&self, _id: ws::SessionId) { + self.session_opened.as_ref().map(|m| m.inc()); } - fn map_cors From<&'a str>>( - cors: Option<&Vec> - ) -> http::DomainsValidation { - cors.map(|x| x.iter().map(AsRef::as_ref).map(Into::into).collect::>()).into() + fn close_session(&self, _id: ws::SessionId) { + self.session_closed.as_ref().map(|m| m.inc()); } +} - fn hosts_filtering(enable: bool) -> http::DomainsValidation { - if enable { - // NOTE The listening address is whitelisted by default. - // Setting an empty vector here enables the validation - // and allows only the listening address. - http::DomainsValidation::AllowOnly(vec![]) - } else { - http::DomainsValidation::Disabled - } - } +/// Start HTTP server listening on given address. +pub fn start_http( + addr: &std::net::SocketAddr, + cors: Option<&Vec>, + io: RpcHandler, + maybe_max_payload_mb: Option, + tokio_handle: tokio::runtime::Handle, +) -> io::Result { + let max_request_body_size = maybe_max_payload_mb + .map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); + + http::ServerBuilder::new(io) + .threads(1) + .event_loop_executor(tokio_handle) + .health_api(("/health", "system_health")) + .allowed_hosts(hosts_filtering(cors.is_some())) + .rest_api(if cors.is_some() { http::RestApi::Secure } else { http::RestApi::Unsecure }) + .cors(map_cors::(cors)) + .max_request_body_size(max_request_body_size) + .start_http(addr) +} + +/// Start IPC server listening on given path. +pub fn start_ipc( + addr: &str, + io: RpcHandler, + server_metrics: ServerMetrics, +) -> io::Result { + let builder = ipc::ServerBuilder::new(io); + #[cfg(target_os = "unix")] + builder.set_security_attributes({ + let security_attributes = ipc::SecurityAttributes::empty(); + security_attributes.set_mode(0o600)?; + security_attributes + }); + builder.session_stats(server_metrics).start(addr) } -#[cfg(target_os = "unknown")] -mod inner { +/// Start WS server listening on given address. +pub fn start_ws< + M: pubsub::PubSubMetadata + From>, +>( + addr: &std::net::SocketAddr, + max_connections: Option, + cors: Option<&Vec>, + io: RpcHandler, + maybe_max_payload_mb: Option, + server_metrics: ServerMetrics, + tokio_handle: tokio::runtime::Handle, +) -> io::Result { + let rpc_max_payload = maybe_max_payload_mb + .map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); + ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| { + context.sender().into() + }) + .event_loop_executor(tokio_handle) + .max_payload(rpc_max_payload) + .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) + .allowed_origins(map_cors(cors)) + .allowed_hosts(hosts_filtering(cors.is_some())) + .session_stats(server_metrics) + .start(addr) + .map_err(|err| match err { + ws::Error::Io(io) => io, + ws::Error::ConnectionClosed => io::ErrorKind::BrokenPipe.into(), + e => { + error!("{}", e); + io::ErrorKind::Other.into() + }, + }) +} + +fn map_cors From<&'a str>>(cors: Option<&Vec>) -> http::DomainsValidation { + cors.map(|x| x.iter().map(AsRef::as_ref).map(Into::into).collect::>()) + .into() +} + +fn hosts_filtering(enable: bool) -> http::DomainsValidation { + if enable { + // NOTE The listening address is whitelisted by default. + // Setting an empty vector here enables the validation + // and allows only the listening address. + http::DomainsValidation::AllowOnly(vec![]) + } else { + http::DomainsValidation::Disabled + } } diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 74139714c8cb7..00532b0e8d661 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,54 +18,118 @@ //! Middleware for RPC requests. -use jsonrpc_core::{ - Middleware as RequestMiddleware, Metadata, - Request, Response, FutureResponse, FutureOutput -}; +use std::collections::HashSet; + +use jsonrpc_core::{FutureOutput, FutureResponse, Metadata, Middleware as RequestMiddleware}; use prometheus_endpoint::{ - Registry, CounterVec, PrometheusError, - Opts, register, U64 + register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, }; -use futures::{future::Either, Future}; +use futures::{future::Either, Future, FutureExt}; +use pubsub::PubSubMetadata; + +use crate::RpcHandler; /// Metrics for RPC middleware #[derive(Debug, Clone)] pub struct RpcMetrics { - rpc_calls: CounterVec, + requests_started: CounterVec, + requests_finished: CounterVec, + calls_time: HistogramVec, + calls_started: CounterVec, + calls_finished: CounterVec, } impl RpcMetrics { /// Create an instance of metrics - pub fn new(metrics_registry: Option<&Registry>) -> Result { - metrics_registry.and_then(|r| { - Some(RpcMetrics { - rpc_calls: register(CounterVec::new( - Opts::new( - "rpc_calls_total", - "Number of rpc calls received", - ), - &["protocol"] - ).ok()?, r).ok()?, - }) - }).ok_or(PrometheusError::Msg("Cannot register metric".to_string())) + pub fn new(metrics_registry: Option<&Registry>) -> Result, PrometheusError> { + if let Some(r) = metrics_registry { + Ok(Some(Self { + requests_started: register( + CounterVec::new( + Opts::new( + "rpc_requests_started", + "Number of RPC requests (not calls) received by the server.", + ), + &["protocol"], + )?, + r, + )?, + requests_finished: register( + CounterVec::new( + Opts::new( + "rpc_requests_finished", + "Number of RPC requests (not calls) processed by the server.", + ), + &["protocol"], + )?, + r, + )?, + calls_time: register( + HistogramVec::new( + HistogramOpts::new( + "rpc_calls_time", + "Total time [μs] of processed RPC calls", + ), + &["protocol", "method"], + )?, + r, + )?, + calls_started: register( + CounterVec::new( + Opts::new( + "rpc_calls_started", + "Number of received RPC calls (unique un-batched requests)", + ), + &["protocol", "method"], + )?, + r, + )?, + calls_finished: register( + CounterVec::new( + Opts::new( + "rpc_calls_finished", + "Number of processed RPC calls (unique un-batched requests)", + ), + &["protocol", "method", "is_error"], + )?, + r, + )?, + })) + } else { + Ok(None) + } } } +/// Instantiates a dummy `IoHandler` given a builder function to extract supported method names. +pub fn method_names(gen_handler: F) -> Result, E> +where + F: FnOnce(RpcMiddleware) -> Result, E>, + M: PubSubMetadata, +{ + let io = gen_handler(RpcMiddleware::new(None, HashSet::new(), "dummy"))?; + Ok(io.iter().map(|x| x.0.clone()).collect()) +} + /// Middleware for RPC calls pub struct RpcMiddleware { metrics: Option, + known_rpc_method_names: HashSet, transport_label: String, } impl RpcMiddleware { - /// Create an instance of middleware with provided metrics - /// transport_label is used as a label for Prometheus collector - pub fn new(metrics: Option, transport_label: &str) -> Self { - RpcMiddleware { - metrics, - transport_label: String::from(transport_label), - } + /// Create an instance of middleware. + /// + /// - `metrics`: Will be used to report statistics. + /// - `transport_label`: The label that is used when reporting the statistics. + pub fn new( + metrics: Option, + known_rpc_method_names: HashSet, + transport_label: &str, + ) -> Self { + RpcMiddleware { metrics, known_rpc_method_names, transport_label: transport_label.into() } } } @@ -73,15 +137,108 @@ impl RequestMiddleware for RpcMiddleware { type Future = FutureResponse; type CallFuture = FutureOutput; - fn on_request(&self, request: Request, meta: M, next: F) -> Either + fn on_request( + &self, + request: jsonrpc_core::Request, + meta: M, + next: F, + ) -> Either where - F: Fn(Request, M) -> X + Send + Sync, - X: Future, Error = ()> + Send + 'static, + F: Fn(jsonrpc_core::Request, M) -> X + Send + Sync, + X: Future> + Send + 'static, { - if let Some(ref metrics) = self.metrics { - metrics.rpc_calls.with_label_values(&[self.transport_label.as_str()]).inc(); + let metrics = self.metrics.clone(); + let transport_label = self.transport_label.clone(); + if let Some(ref metrics) = metrics { + metrics.requests_started.with_label_values(&[transport_label.as_str()]).inc(); } + let r = next(request, meta); + Either::Left( + async move { + let r = r.await; + if let Some(ref metrics) = metrics { + metrics.requests_finished.with_label_values(&[transport_label.as_str()]).inc(); + } + r + } + .boxed(), + ) + } + + fn on_call( + &self, + call: jsonrpc_core::Call, + meta: M, + next: F, + ) -> Either + where + F: Fn(jsonrpc_core::Call, M) -> X + Send + Sync, + X: Future> + Send + 'static, + { + let start = std::time::Instant::now(); + let name = call_name(&call, &self.known_rpc_method_names).to_owned(); + let metrics = self.metrics.clone(); + let transport_label = self.transport_label.clone(); + log::trace!(target: "rpc_metrics", "[{}] {} call: {:?}", transport_label, name, &call); + if let Some(ref metrics) = metrics { + metrics + .calls_started + .with_label_values(&[transport_label.as_str(), name.as_str()]) + .inc(); + } + let r = next(call, meta); + Either::Left( + async move { + let r = r.await; + let micros = start.elapsed().as_micros(); + if let Some(ref metrics) = metrics { + metrics + .calls_time + .with_label_values(&[transport_label.as_str(), name.as_str()]) + .observe(micros as _); + metrics + .calls_finished + .with_label_values(&[ + transport_label.as_str(), + name.as_str(), + if is_success(&r) { "true" } else { "false" }, + ]) + .inc(); + } + log::debug!( + target: "rpc_metrics", + "[{}] {} call took {} μs", + transport_label, + name, + micros, + ); + r + } + .boxed(), + ) + } +} + +fn call_name<'a>(call: &'a jsonrpc_core::Call, known_methods: &HashSet) -> &'a str { + // To prevent bloating metric with all invalid method names we filter them out here. + let only_known = |method: &'a String| { + if known_methods.contains(method) { + method.as_str() + } else { + "invalid method" + } + }; + + match call { + jsonrpc_core::Call::Invalid { .. } => "invalid call", + jsonrpc_core::Call::MethodCall(ref call) => only_known(&call.method), + jsonrpc_core::Call::Notification(ref notification) => only_known(¬ification.method), + } +} - Either::B(next(request, meta)) +fn is_success(output: &Option) -> bool { + match output { + Some(jsonrpc_core::Output::Success(..)) => true, + _ => false, } } diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 021c795fe5b94..427800f74ddf2 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,43 +13,40 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-rpc-api = { version = "0.8.0", path = "../rpc-api" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -futures = { version = "0.3.1", features = ["compat"] } -jsonrpc-pubsub = "15.0.0" +sc-rpc-api = { version = "0.10.0-dev", path = "../rpc-api" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +futures = "0.3.16" +jsonrpc-pubsub = "18.0.0" log = "0.4.8" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -rpc = { package = "jsonrpc-core", version = "15.0.0" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -serde_json = "1.0.41" -sp-session = { version = "2.0.0", path = "../../primitives/session" } -sp-offchain = { version = "2.0.0", path = "../../primitives/offchain" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-rpc = { version = "2.0.0", path = "../../primitives/rpc" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-chain-spec = { version = "2.0.0", path = "../../primitives/chain-spec" } -sc-executor = { version = "0.8.0", path = "../executor" } -sc-block-builder = { version = "0.8.0", path = "../../client/block-builder" } -sc-keystore = { version = "2.0.0", path = "../keystore" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +rpc = { package = "jsonrpc-core", version = "18.0.0" } +sp-version = { version = "4.0.0-dev", path = "../../primitives/version" } +serde_json = "1.0.68" +sp-session = { version = "4.0.0-dev", path = "../../primitives/session" } +sp-offchain = { version = "4.0.0-dev", path = "../../primitives/offchain" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } +sp-rpc = { version = "4.0.0-dev", path = "../../primitives/rpc" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } +sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sc-tracing = { version = "4.0.0-dev", path = "../tracing" } hash-db = { version = "0.15.2", default-features = false } -parking_lot = "0.10.0" +parking_lot = "0.11.1" lazy_static = { version = "1.4.0", optional = true } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } [dev-dependencies] assert_matches = "1.3.0" -futures01 = { package = "futures", version = "0.1.29" } lazy_static = "1.4.0" -sc-network = { version = "0.8.0", path = "../network" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -tokio = "0.1.22" -sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } [features] test-helpers = ["lazy_static"] diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 1db90e209d0d6..40b477a662a60 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,33 +21,31 @@ #[cfg(test)] mod tests; -use std::{sync::Arc, convert::TryInto}; use log::warn; +use std::{convert::TryInto, sync::Arc}; -use sp_blockchain::{Error as ClientError, HeaderBackend}; +use sp_blockchain::HeaderBackend; -use rpc::futures::{ - Sink, Future, - future::result, +use codec::{Decode, Encode}; +use futures::{ + future::{FutureExt, TryFutureExt}, + SinkExt, StreamExt as _, }; -use futures::{StreamExt as _, compat::Compat}; -use futures::future::{ready, FutureExt, TryFutureExt}; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; use sc_rpc_api::DenyUnsafe; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; -use codec::{Encode, Decode}; -use sp_core::Bytes; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_api::ProvideRuntimeApi; -use sp_runtime::generic; -use sp_transaction_pool::{ - TransactionPool, InPoolTransaction, TransactionStatus, TransactionSource, - BlockHash, TxHash, TransactionFor, error::IntoPoolError, +use sc_transaction_pool_api::{ + error::IntoPoolError, BlockHash, InPoolTransaction, TransactionFor, TransactionPool, + TransactionSource, TransactionStatus, TxHash, }; +use sp_api::ProvideRuntimeApi; +use sp_core::Bytes; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{generic, traits::Block as BlockT}; use sp_session::SessionKeys; +use self::error::{Error, FutureResult, Result}; /// Re-export the API for backward compatibility. pub use sc_rpc_api::author::*; -use self::error::{Error, FutureResult, Result}; /// Authoring API pub struct Author { @@ -72,13 +70,7 @@ impl Author { keystore: SyncCryptoStorePtr, deny_unsafe: DenyUnsafe, ) -> Self { - Author { - client, - pool, - subscriptions, - keystore, - deny_unsafe, - } + Author { client, pool, subscriptions, keystore, deny_unsafe } } } @@ -90,19 +82,16 @@ impl Author { const TX_SOURCE: TransactionSource = TransactionSource::External; impl AuthorApi, BlockHash

> for Author - where - P: TransactionPool + Sync + Send + 'static, - Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: SessionKeys, +where + P: TransactionPool + Sync + Send + 'static, + Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, + Client::Api: SessionKeys, + P::Hash: Unpin, + ::Hash: Unpin, { type Metadata = crate::Metadata; - fn insert_key( - &self, - key_type: String, - suri: String, - public: Bytes, - ) -> Result<()> { + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> Result<()> { self.deny_unsafe.check_if_safe()?; let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; @@ -115,20 +104,22 @@ impl AuthorApi, BlockHash

> for Author self.deny_unsafe.check_if_safe()?; let best_block_hash = self.client.info().best_hash; - self.client.runtime_api().generate_session_keys( - &generic::BlockId::Hash(best_block_hash), - None, - ).map(Into::into).map_err(|e| Error::Client(Box::new(e))) + self.client + .runtime_api() + .generate_session_keys(&generic::BlockId::Hash(best_block_hash), None) + .map(Into::into) + .map_err(|e| Error::Client(Box::new(e))) } fn has_session_keys(&self, session_keys: Bytes) -> Result { self.deny_unsafe.check_if_safe()?; let best_block_hash = self.client.info().best_hash; - let keys = self.client.runtime_api().decode_session_keys( - &generic::BlockId::Hash(best_block_hash), - session_keys.to_vec(), - ).map_err(|e| Error::Client(Box::new(e)))? + let keys = self + .client + .runtime_api() + .decode_session_keys(&generic::BlockId::Hash(best_block_hash), session_keys.to_vec()) + .map_err(|e| Error::Client(Box::new(e)))? .ok_or_else(|| Error::InvalidSessionKeys)?; Ok(SyncCryptoStore::has_keys(&*self.keystore, &keys)) @@ -144,16 +135,18 @@ impl AuthorApi, BlockHash

> for Author fn submit_extrinsic(&self, ext: Bytes) -> FutureResult> { let xt = match Decode::decode(&mut &ext[..]) { Ok(xt) => xt, - Err(err) => return Box::new(result(Err(err.into()))), + Err(err) => return async move { Err(err.into()) }.boxed(), }; let best_block_hash = self.client.info().best_hash; - Box::new(self.pool + + self.pool .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) - .compat() - .map_err(|e| e.into_pool_error() - .map(Into::into) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into())) - ) + .map_err(|e| { + e.into_pool_error() + .map(Into::into) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into()) + }) + .boxed() } fn pending_extrinsics(&self) -> Result> { @@ -166,7 +159,8 @@ impl AuthorApi, BlockHash

> for Author ) -> Result>> { self.deny_unsafe.check_if_safe()?; - let hashes = bytes_or_hash.into_iter() + let hashes = bytes_or_hash + .into_iter() .map(|x| match x { hash::ExtrinsicOrHash::Hash(h) => Ok(h), hash::ExtrinsicOrHash::Extrinsic(bytes) => { @@ -176,65 +170,74 @@ impl AuthorApi, BlockHash

> for Author }) .collect::>>()?; - Ok( - self.pool - .remove_invalid(&hashes) - .into_iter() - .map(|tx| tx.hash().clone()) - .collect() - ) + Ok(self + .pool + .remove_invalid(&hashes) + .into_iter() + .map(|tx| tx.hash().clone()) + .collect()) } - fn watch_extrinsic(&self, + fn watch_extrinsic( + &self, _metadata: Self::Metadata, subscriber: Subscriber, BlockHash

>>, xt: Bytes, ) { - let submit = || -> Result<_> { - let best_block_hash = self.client.info().best_hash; - let dxt = TransactionFor::

::decode(&mut &xt[..]) - .map_err(error::Error::from)?; - Ok( - self.pool - .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) - .map_err(|e| e.into_pool_error() - .map(error::Error::from) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into()) - ) - ) + let best_block_hash = self.client.info().best_hash; + let dxt = match TransactionFor::

::decode(&mut &xt[..]).map_err(error::Error::from) { + Ok(tx) => tx, + Err(err) => { + warn!("Failed to submit extrinsic: {}", err); + // reject the subscriber (ignore errors - we don't care if subscriber is no longer + // there). + let _ = subscriber.reject(err.into()); + return + }, }; + let submit = self + .pool + .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) + .map_err(|e| { + e.into_pool_error() + .map(error::Error::from) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into()) + }); + let subscriptions = self.subscriptions.clone(); - let future = ready(submit()) - .and_then(|res| res) - // convert the watcher into a `Stream` - .map(|res| res.map(|stream| stream.map(|v| Ok::<_, ()>(Ok(v))))) - // now handle the import result, - // start a new subscrition - .map(move |result| match result { - Ok(watcher) => { - subscriptions.add(subscriber, move |sink| { - sink - .sink_map_err(|_| unimplemented!()) - .send_all(Compat::new(watcher)) - .map(|_| ()) - }); - }, + + let future = async move { + let tx_stream = match submit.await { + Ok(s) => s, Err(err) => { warn!("Failed to submit extrinsic: {}", err); - // reject the subscriber (ignore errors - we don't care if subscriber is no longer there). + // reject the subscriber (ignore errors - we don't care if subscriber is no + // longer there). let _ = subscriber.reject(err.into()); + return }, + }; + + subscriptions.add(subscriber, move |sink| { + tx_stream + .map(|v| Ok(Ok(v))) + .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) + .map(drop) }); + }; - let res = self.subscriptions.executor() - .execute(Box::new(Compat::new(future.map(|_| Ok(()))))); + let res = self.subscriptions.executor().spawn_obj(future.boxed().into()); if res.is_err() { warn!("Error spawning subscription RPC task."); } } - fn unwatch_extrinsic(&self, _metadata: Option, id: SubscriptionId) -> Result { + fn unwatch_extrinsic( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> Result { Ok(self.subscriptions.cancel(id)) } } diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index dc553e60dbfbe..2349e08fee506 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,37 +18,34 @@ use super::*; -use std::{mem, sync::Arc}; use assert_matches::assert_matches; use codec::Encode; +use futures::executor; +use sc_transaction_pool::{BasicPool, FullChainApi}; use sp_core::{ - ed25519, sr25519, - H256, blake2_256, hexdisplay::HexDisplay, testing::{ED25519, SR25519}, + blake2_256, crypto::{CryptoTypePublicPair, Pair, Public}, + ed25519, + hexdisplay::HexDisplay, + sr25519, + testing::{ED25519, SR25519}, + H256, }; use sp_keystore::testing::KeyStore; -use rpc::futures::Stream as _; +use std::{mem, sync::Arc}; use substrate_test_runtime_client::{ - self, AccountKeyring, runtime::{Extrinsic, Transfer, SessionKeys, Block}, - DefaultTestClientBuilderExt, TestClientBuilderExt, Backend, Client, + self, + runtime::{Block, Extrinsic, SessionKeys, Transfer}, + AccountKeyring, Backend, Client, DefaultTestClientBuilderExt, TestClientBuilderExt, }; -use sc_transaction_pool::{BasicPool, FullChainApi}; -use futures::{executor, compat::Future01CompatExt}; fn uxt(sender: AccountKeyring, nonce: u64) -> Extrinsic { - let tx = Transfer { - amount: Default::default(), - nonce, - from: sender.into(), - to: Default::default(), - }; + let tx = + Transfer { amount: Default::default(), nonce, from: sender.into(), to: Default::default() }; tx.into_signed_tx() } -type FullTransactionPool = BasicPool< - FullChainApi, Block>, - Block, ->; +type FullTransactionPool = BasicPool, Block>, Block>; struct TestSetup { pub client: Arc>, @@ -63,17 +60,9 @@ impl Default for TestSetup { let client = Arc::new(client_builder.set_keystore(keystore.clone()).build()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - None, - spawner, - client.clone(), - ); - TestSetup { - client, - keystore, - pool, - } + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); + TestSetup { client, keystore, pool } } } @@ -96,12 +85,10 @@ fn submit_transaction_should_not_cause_error() { let h: H256 = blake2_256(&xt).into(); assert_matches!( - AuthorApi::submit_extrinsic(&p, xt.clone().into()).wait(), + executor::block_on(AuthorApi::submit_extrinsic(&p, xt.clone().into())), Ok(h2) if h == h2 ); - assert!( - AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err() - ); + assert!(executor::block_on(AuthorApi::submit_extrinsic(&p, xt.into())).is_err()); } #[test] @@ -111,17 +98,15 @@ fn submit_rich_transaction_should_not_cause_error() { let h: H256 = blake2_256(&xt).into(); assert_matches!( - AuthorApi::submit_extrinsic(&p, xt.clone().into()).wait(), + executor::block_on(AuthorApi::submit_extrinsic(&p, xt.clone().into())), Ok(h2) if h == h2 ); - assert!( - AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err() - ); + assert!(executor::block_on(AuthorApi::submit_extrinsic(&p, xt.into())).is_err()); } #[test] fn should_watch_extrinsic() { - //given + // given let setup = TestSetup::default(); let p = setup.author(); @@ -134,7 +119,7 @@ fn should_watch_extrinsic() { uxt(AccountKeyring::Alice, 0).encode().into(), ); - let id = executor::block_on(id_rx.compat()).unwrap().unwrap(); + let id = executor::block_on(id_rx).unwrap().unwrap(); assert_matches!(id, SubscriptionId::String(_)); let id = match id { @@ -152,8 +137,8 @@ fn should_watch_extrinsic() { }; tx.into_signed_tx() }; - AuthorApi::submit_extrinsic(&p, replacement.encode().into()).wait().unwrap(); - let (res, data) = executor::block_on(data.into_future().compat()).unwrap(); + executor::block_on(AuthorApi::submit_extrinsic(&p, replacement.encode().into())).unwrap(); + let (res, data) = executor::block_on(data.into_future()); let expected = Some(format!( r#"{{"jsonrpc":"2.0","method":"test","params":{{"result":"ready","subscription":"{}"}}}}"#, @@ -168,23 +153,27 @@ fn should_watch_extrinsic() { id, )); - let res = executor::block_on(data.into_future().compat()).unwrap().0; + let res = executor::block_on(data.into_future()).0; assert_eq!(res, expected); } #[test] fn should_return_watch_validation_error() { - //given + // given let setup = TestSetup::default(); let p = setup.author(); let (subscriber, id_rx, _data) = jsonrpc_pubsub::typed::Subscriber::new_test("test"); // when - p.watch_extrinsic(Default::default(), subscriber, uxt(AccountKeyring::Alice, 179).encode().into()); + p.watch_extrinsic( + Default::default(), + subscriber, + uxt(AccountKeyring::Alice, 179).encode().into(), + ); // then - let res = executor::block_on(id_rx.compat()).unwrap(); + let res = executor::block_on(id_rx).unwrap(); assert!(res.is_err(), "Expected the transaction to be rejected as invalid."); } @@ -193,7 +182,7 @@ fn should_return_pending_extrinsics() { let p = TestSetup::default().author(); let ex = uxt(AccountKeyring::Alice, 0); - AuthorApi::submit_extrinsic(&p, ex.encode().into()).wait().unwrap(); + executor::block_on(AuthorApi::submit_extrinsic(&p, ex.encode().into())).unwrap(); assert_matches!( p.pending_extrinsics(), Ok(ref expected) if *expected == vec![Bytes(ex.encode())] @@ -206,19 +195,21 @@ fn should_remove_extrinsics() { let p = setup.author(); let ex1 = uxt(AccountKeyring::Alice, 0); - p.submit_extrinsic(ex1.encode().into()).wait().unwrap(); + executor::block_on(p.submit_extrinsic(ex1.encode().into())).unwrap(); let ex2 = uxt(AccountKeyring::Alice, 1); - p.submit_extrinsic(ex2.encode().into()).wait().unwrap(); + executor::block_on(p.submit_extrinsic(ex2.encode().into())).unwrap(); let ex3 = uxt(AccountKeyring::Bob, 0); - let hash3 = p.submit_extrinsic(ex3.encode().into()).wait().unwrap(); + let hash3 = executor::block_on(p.submit_extrinsic(ex3.encode().into())).unwrap(); assert_eq!(setup.pool.status().ready, 3); // now remove all 3 - let removed = p.remove_extrinsic(vec![ - hash::ExtrinsicOrHash::Hash(hash3), - // Removing this one will also remove ex2 - hash::ExtrinsicOrHash::Extrinsic(ex1.encode().into()), - ]).unwrap(); + let removed = p + .remove_extrinsic(vec![ + hash::ExtrinsicOrHash::Hash(hash3), + // Removing this one will also remove ex2 + hash::ExtrinsicOrHash::Extrinsic(ex1.encode().into()), + ]) + .unwrap(); assert_eq!(removed.len(), 3); } @@ -234,11 +225,13 @@ fn should_insert_key() { String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), suri.to_string(), key_pair.public().0.to_vec().into(), - ).expect("Insert key"); + ) + .expect("Insert key"); let public_keys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); - assert!(public_keys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, key_pair.public().to_raw_vec()))); + assert!(public_keys + .contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, key_pair.public().to_raw_vec()))); } #[test] @@ -248,14 +241,16 @@ fn should_rotate_keys() { let new_public_keys = p.rotate_keys().expect("Rotates the keys"); - let session_keys = SessionKeys::decode(&mut &new_public_keys[..]) - .expect("SessionKeys decode successfully"); + let session_keys = + SessionKeys::decode(&mut &new_public_keys[..]).expect("SessionKeys decode successfully"); let ed25519_public_keys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); let sr25519_public_keys = SyncCryptoStore::keys(&*setup.keystore, SR25519).unwrap(); - assert!(ed25519_public_keys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, session_keys.ed25519.to_raw_vec()))); - assert!(sr25519_public_keys.contains(&CryptoTypePublicPair(sr25519::CRYPTO_ID, session_keys.sr25519.to_raw_vec()))); + assert!(ed25519_public_keys + .contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, session_keys.ed25519.to_raw_vec()))); + assert!(sr25519_public_keys + .contains(&CryptoTypePublicPair(sr25519::CRYPTO_ID, session_keys.sr25519.to_raw_vec()))); } #[test] @@ -263,10 +258,8 @@ fn test_has_session_keys() { let setup = TestSetup::default(); let p = setup.author(); - let non_existent_public_keys = TestSetup::default() - .author() - .rotate_keys() - .expect("Rotates the keys"); + let non_existent_public_keys = + TestSetup::default().author().rotate_keys().expect("Rotates the keys"); let public_keys = p.rotate_keys().expect("Rotates the keys"); let test_vectors = vec![ @@ -294,7 +287,8 @@ fn test_has_key() { String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), suri.to_string(), alice_key_pair.public().0.to_vec().into(), - ).expect("Insert key"); + ) + .expect("Insert key"); let bob_key_pair = ed25519::Pair::from_string("//Bob", None).expect("Generates keypair"); let test_vectors = vec![ @@ -309,7 +303,8 @@ fn test_has_key() { p.has_key( key, String::from_utf8(key_type.0.to_vec()).expect("Keytype is a valid string"), - ).map_err(|e| mem::discriminant(&e)), + ) + .map_err(|e| mem::discriminant(&e)), ); } } diff --git a/client/rpc/src/chain/chain_full.rs b/client/rpc/src/chain/chain_full.rs index 816dbba866417..96d5b86f42498 100644 --- a/client/rpc/src/chain/chain_full.rs +++ b/client/rpc/src/chain/chain_full.rs @@ -1,31 +1,33 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Blockchain API backend for full nodes. -use std::sync::Arc; -use rpc::futures::future::result; +use super::{client_err, error::FutureResult, ChainBackend}; +use futures::FutureExt; use jsonrpc_pubsub::manager::SubscriptionManager; - -use sc_client_api::{BlockchainEvents, BlockBackend}; -use sp_runtime::{generic::{BlockId, SignedBlock}, traits::{Block as BlockT}}; - -use super::{ChainBackend, client_err, error::FutureResult}; -use std::marker::PhantomData; +use sc_client_api::{BlockBackend, BlockchainEvents}; use sp_blockchain::HeaderBackend; +use sp_runtime::{ + generic::{BlockId, SignedBlock}, + traits::Block as BlockT, +}; +use std::{marker::PhantomData, sync::Arc}; /// Blockchain API backend for full nodes. Reads all the data from local database. pub struct FullChain { @@ -40,16 +42,14 @@ pub struct FullChain { impl FullChain { /// Create new Chain API RPC handler. pub fn new(client: Arc, subscriptions: SubscriptionManager) -> Self { - Self { - client, - subscriptions, - _phantom: PhantomData, - } + Self { client, subscriptions, _phantom: PhantomData } } } -impl ChainBackend for FullChain where +impl ChainBackend for FullChain +where Block: BlockT + 'static, + Block::Header: Unpin, Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, { fn client(&self) -> &Arc { @@ -61,18 +61,12 @@ impl ChainBackend for FullChain whe } fn header(&self, hash: Option) -> FutureResult> { - Box::new(result(self.client - .header(BlockId::Hash(self.unwrap_or_best(hash))) - .map_err(client_err) - )) + let res = self.client.header(BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err); + async move { res }.boxed() } - fn block(&self, hash: Option) - -> FutureResult>> - { - Box::new(result(self.client - .block(&BlockId::Hash(self.unwrap_or_best(hash))) - .map_err(client_err) - )) + fn block(&self, hash: Option) -> FutureResult>> { + let res = self.client.block(&BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err); + async move { res }.boxed() } } diff --git a/client/rpc/src/chain/chain_light.rs b/client/rpc/src/chain/chain_light.rs index 8a4afbed71c16..2d15c819e1dab 100644 --- a/client/rpc/src/chain/chain_light.rs +++ b/client/rpc/src/chain/chain_light.rs @@ -1,35 +1,36 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Blockchain API backend for light nodes. -use std::sync::Arc; use futures::{future::ready, FutureExt, TryFutureExt}; -use rpc::futures::future::{result, Future, Either}; use jsonrpc_pubsub::manager::SubscriptionManager; +use std::sync::Arc; -use sc_client_api::light::{Fetcher, RemoteBodyRequest, RemoteBlockchain}; +use sc_client_api::light::{Fetcher, RemoteBlockchain, RemoteBodyRequest}; use sp_runtime::{ generic::{BlockId, SignedBlock}, - traits::{Block as BlockT}, + traits::Block as BlockT, }; -use super::{ChainBackend, client_err, error::FutureResult}; -use sp_blockchain::HeaderBackend; +use super::{client_err, error::FutureResult, ChainBackend}; use sc_client_api::BlockchainEvents; +use sp_blockchain::HeaderBackend; /// Blockchain API backend for light nodes. Reads all the data from local /// database, if available, or fetches it from remote node otherwise. @@ -52,17 +53,14 @@ impl> LightChain { remote_blockchain: Arc>, fetcher: Arc, ) -> Self { - Self { - client, - subscriptions, - remote_blockchain, - fetcher, - } + Self { client, subscriptions, remote_blockchain, fetcher } } } -impl ChainBackend for LightChain where +impl ChainBackend for LightChain +where Block: BlockT + 'static, + Block::Header: Unpin, Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, F: Fetcher + Send + Sync + 'static, { @@ -84,33 +82,33 @@ impl ChainBackend for LightChain) - -> FutureResult>> - { + fn block(&self, hash: Option) -> FutureResult>> { let fetcher = self.fetcher.clone(); - let block = self.header(hash) - .and_then(move |header| match header { - Some(header) => Either::A(fetcher - .remote_body(RemoteBodyRequest { - header: header.clone(), - retry_count: Default::default(), - }) - .boxed() - .compat() - .map(move |body| Some(SignedBlock { - block: Block::new(header, body), - justification: None, - })) - .map_err(client_err) - ), - None => Either::B(result(Ok(None))), - }); - - Box::new(block) + self.header(hash) + .and_then(move |header| async move { + match header { + Some(header) => { + let body = fetcher + .remote_body(RemoteBodyRequest { + header: header.clone(), + retry_count: Default::default(), + }) + .await; + + body.map(|body| { + Some(SignedBlock { + block: Block::new(header, body), + justifications: None, + }) + }) + .map_err(client_err) + }, + None => Ok(None), + } + }) + .boxed() } } diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index cb67d9ba23166..a06c3a094b40f 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -24,33 +24,37 @@ mod chain_light; #[cfg(test)] mod tests; -use std::sync::Arc; use futures::{future, StreamExt, TryStreamExt}; use log::warn; use rpc::{ + futures::{stream, FutureExt, SinkExt, Stream}, Result as RpcResult, - futures::{stream, Future, Sink, Stream}, }; +use std::sync::Arc; -use sc_client_api::{BlockchainEvents, light::{Fetcher, RemoteBlockchain}}; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; -use sp_rpc::{number::NumberOrHex, list::ListOrValue}; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use sc_client_api::{ + light::{Fetcher, RemoteBlockchain}, + BlockchainEvents, +}; +use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use sp_runtime::{ generic::{BlockId, SignedBlock}, traits::{Block as BlockT, Header, NumberFor}, }; -use self::error::{Result, Error, FutureResult}; +use self::error::{Error, FutureResult, Result}; +use sc_client_api::BlockBackend; pub use sc_rpc_api::chain::*; use sp_blockchain::HeaderBackend; -use sc_client_api::BlockBackend; /// Blockchain backend API trait ChainBackend: Send + Sync + 'static - where - Block: BlockT + 'static, - Client: HeaderBackend + BlockchainEvents + 'static, +where + Block: BlockT + 'static, + Block::Header: Unpin, + Client: HeaderBackend + BlockchainEvents + 'static, { /// Get client reference. fn client(&self) -> &Arc; @@ -83,8 +87,8 @@ trait ChainBackend: Send + Sync + 'static // FIXME <2329>: Database seems to limit the block number to u32 for no reason let block_num: u32 = num_or_hex.try_into().map_err(|_| { - Error::from(format!( - "`{:?}` > u32::max_value(), the max block number is u32.", + Error::Other(format!( + "`{:?}` > u32::MAX, the max block number is u32.", num_or_hex )) })?; @@ -94,7 +98,7 @@ trait ChainBackend: Send + Sync + 'static .header(BlockId::number(block_num)) .map_err(client_err)? .map(|h| h.hash())) - } + }, } } @@ -114,9 +118,11 @@ trait ChainBackend: Send + Sync + 'static self.subscriptions(), subscriber, || self.client().info().best_hash, - || self.client().import_notification_stream() - .map(|notification| Ok::<_, ()>(notification.header)) - .compat(), + || { + self.client() + .import_notification_stream() + .map(|notification| Ok::<_, rpc::Error>(notification.header)) + }, ) } @@ -140,10 +146,12 @@ trait ChainBackend: Send + Sync + 'static self.subscriptions(), subscriber, || self.client().info().best_hash, - || self.client().import_notification_stream() - .filter(|notification| future::ready(notification.is_new_best)) - .map(|notification| Ok::<_, ()>(notification.header)) - .compat(), + || { + self.client() + .import_notification_stream() + .filter(|notification| future::ready(notification.is_new_best)) + .map(|notification| Ok::<_, rpc::Error>(notification.header)) + }, ) } @@ -167,9 +175,11 @@ trait ChainBackend: Send + Sync + 'static self.subscriptions(), subscriber, || self.client().info().finalized_hash, - || self.client().finality_notification_stream() - .map(|notification| Ok::<_, ()>(notification.header)) - .compat(), + || { + self.client() + .finality_notification_stream() + .map(|notification| Ok::<_, rpc::Error>(notification.header)) + }, ) } @@ -188,13 +198,12 @@ pub fn new_full( client: Arc, subscriptions: SubscriptionManager, ) -> Chain - where - Block: BlockT + 'static, - Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, +where + Block: BlockT + 'static, + Block::Header: Unpin, + Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, { - Chain { - backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)), - } + Chain { backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)) } } /// Create new state API that works on light node. @@ -204,10 +213,11 @@ pub fn new_light>( remote_blockchain: Arc>, fetcher: Arc, ) -> Chain - where - Block: BlockT + 'static, - Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, - F: Send + Sync + 'static, +where + Block: BlockT + 'static, + Block::Header: Unpin, + Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, + F: Send + Sync + 'static, { Chain { backend: Box::new(self::chain_light::LightChain::new( @@ -224,11 +234,12 @@ pub struct Chain { backend: Box>, } -impl ChainApi, Block::Hash, Block::Header, SignedBlock> for - Chain - where - Block: BlockT + 'static, - Client: HeaderBackend + BlockchainEvents + 'static, +impl ChainApi, Block::Hash, Block::Header, SignedBlock> + for Chain +where + Block: BlockT + 'static, + Block::Header: Unpin, + Client: HeaderBackend + BlockchainEvents + 'static, { type Metadata = crate::Metadata; @@ -236,8 +247,7 @@ impl ChainApi, Block::Hash, Block::Header, Signe self.backend.header(hash) } - fn block(&self, hash: Option) -> FutureResult>> - { + fn block(&self, hash: Option) -> FutureResult>> { self.backend.block(hash) } @@ -247,12 +257,13 @@ impl ChainApi, Block::Hash, Block::Header, Signe ) -> Result>> { match number { None => self.backend.block_hash(None).map(ListOrValue::Value), - Some(ListOrValue::Value(number)) => self.backend.block_hash(Some(number)).map(ListOrValue::Value), - Some(ListOrValue::List(list)) => Ok(ListOrValue::List(list - .into_iter() - .map(|number| self.backend.block_hash(Some(number))) - .collect::>()? - )) + Some(ListOrValue::Value(number)) => + self.backend.block_hash(Some(number)).map(ListOrValue::Value), + Some(ListOrValue::List(list)) => Ok(ListOrValue::List( + list.into_iter() + .map(|number| self.backend.block_hash(Some(number))) + .collect::>()?, + )), } } @@ -264,7 +275,11 @@ impl ChainApi, Block::Hash, Block::Header, Signe self.backend.subscribe_all_heads(metadata, subscriber) } - fn unsubscribe_all_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { + fn unsubscribe_all_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult { self.backend.unsubscribe_all_heads(metadata, id) } @@ -272,21 +287,33 @@ impl ChainApi, Block::Hash, Block::Header, Signe self.backend.subscribe_new_heads(metadata, subscriber) } - fn unsubscribe_new_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { + fn unsubscribe_new_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult { self.backend.unsubscribe_new_heads(metadata, id) } - fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { + fn subscribe_finalized_heads( + &self, + metadata: Self::Metadata, + subscriber: Subscriber, + ) { self.backend.subscribe_finalized_heads(metadata, subscriber) } - fn unsubscribe_finalized_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { + fn unsubscribe_finalized_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult { self.backend.unsubscribe_finalized_heads(metadata, id) } } /// Subscribe to new headers. -fn subscribe_headers( +fn subscribe_headers( client: &Arc, subscriptions: &SubscriptionManager, subscriber: Subscriber, @@ -294,32 +321,30 @@ fn subscribe_headers( stream: F, ) where Block: BlockT + 'static, + Block::Header: Unpin, Client: HeaderBackend + 'static, F: FnOnce() -> S, G: FnOnce() -> Block::Hash, - ERR: ::std::fmt::Debug, - S: Stream + Send + 'static, + S: Stream> + Send + 'static, { subscriptions.add(subscriber, |sink| { // send current head right at the start. - let header = client.header(BlockId::Hash(best_block_hash())) + let header = client + .header(BlockId::Hash(best_block_hash())) .map_err(client_err) .and_then(|header| { - header.ok_or_else(|| "Best header missing.".to_owned().into()) + header.ok_or_else(|| Error::Other("Best header missing.".to_string())) }) .map_err(Into::into); // send further subscriptions let stream = stream() - .map(|res| Ok(res)) - .map_err(|e| warn!("Block notification stream error: {:?}", e)); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all( - stream::iter_result(vec![Ok(header)]) - .chain(stream) - ) + .inspect_err(|e| warn!("Block notification stream error: {:?}", e)) + .map(|res| Ok(res)); + + stream::iter(vec![Ok(header)]) + .chain(stream) + .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) // we ignore the resulting Stream (if the first stream is over we are unsubscribed) .map(|_| ()) }); diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index b36fc4eab1d86..caa9f33138b86 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,16 +17,16 @@ // along with this program. If not, see . use super::*; +use crate::testing::TaskExecutor; use assert_matches::assert_matches; +use futures::executor; +use sc_block_builder::BlockBuilderProvider; +use sp_consensus::BlockOrigin; +use sp_rpc::list::ListOrValue; use substrate_test_runtime_client::{ prelude::*, - sp_consensus::BlockOrigin, - runtime::{H256, Block, Header}, + runtime::{Block, Header, H256}, }; -use sp_rpc::list::ListOrValue; -use sc_block_builder::BlockBuilderProvider; -use futures::{executor, compat::{Future01CompatExt, Stream01CompatExt}}; -use crate::testing::TaskExecutor; #[test] fn should_return_header() { @@ -34,7 +34,7 @@ fn should_return_header() { let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); assert_matches!( - api.header(Some(client.genesis_hash()).into()).wait(), + executor::block_on(api.header(Some(client.genesis_hash()).into())), Ok(Some(ref x)) if x == &Header { parent_hash: H256::from_low_u64_be(0), number: 0, @@ -46,7 +46,7 @@ fn should_return_header() { ); assert_matches!( - api.header(None.into()).wait(), + executor::block_on(api.header(None.into())), Ok(Some(ref x)) if x == &Header { parent_hash: H256::from_low_u64_be(0), number: 0, @@ -57,7 +57,10 @@ fn should_return_header() { } ); - assert_matches!(api.header(Some(H256::from_low_u64_be(5)).into()).wait(), Ok(None)); + assert_matches!( + executor::block_on(api.header(Some(H256::from_low_u64_be(5)).into())), + Ok(None) + ); } #[test] @@ -67,16 +70,16 @@ fn should_return_a_block() { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; let block_hash = block.hash(); - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); // Genesis block is not justified assert_matches!( - api.block(Some(client.genesis_hash()).into()).wait(), - Ok(Some(SignedBlock { justification: None, .. })) + executor::block_on(api.block(Some(client.genesis_hash()).into())), + Ok(Some(SignedBlock { justifications: None, .. })) ); assert_matches!( - api.block(Some(block_hash).into()).wait(), + executor::block_on(api.block(Some(block_hash).into())), Ok(Some(ref x)) if x.block == Block { header: Header { parent_hash: client.genesis_hash(), @@ -91,7 +94,7 @@ fn should_return_a_block() { ); assert_matches!( - api.block(None.into()).wait(), + executor::block_on(api.block(None.into())), Ok(Some(ref x)) if x.block == Block { header: Header { parent_hash: client.genesis_hash(), @@ -105,10 +108,7 @@ fn should_return_a_block() { } ); - assert_matches!( - api.block(Some(H256::from_low_u64_be(5)).into()).wait(), - Ok(None) - ); + assert_matches!(executor::block_on(api.block(Some(H256::from_low_u64_be(5)).into())), Ok(None)); } #[test] @@ -121,7 +121,6 @@ fn should_return_block_hash() { Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() ); - assert_matches!( api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() @@ -133,7 +132,7 @@ fn should_return_block_hash() { ); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block.clone()).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); assert_matches!( api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), @@ -154,7 +153,6 @@ fn should_return_block_hash() { ); } - #[test] fn should_return_finalized_hash() { let mut client = Arc::new(substrate_test_runtime_client::new()); @@ -167,7 +165,7 @@ fn should_return_finalized_hash() { // import new block let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); // no finalization yet assert_matches!( api.finalized_head(), @@ -184,7 +182,7 @@ fn should_return_finalized_hash() { #[test] fn should_notify_about_latest_block() { - let (subscriber, id, transport) = Subscriber::new_test("test"); + let (subscriber, id, mut transport) = Subscriber::new_test("test"); { let mut client = Arc::new(substrate_test_runtime_client::new()); @@ -193,28 +191,20 @@ fn should_notify_about_latest_block() { api.subscribe_all_heads(Default::default(), subscriber); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } - // assert initial head sent. - let (notification, next) = executor::block_on(transport.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = executor::block_on(next.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(executor::block_on(next.into_future().compat()).unwrap().0, None); + // Check for the correct number of notifications + executor::block_on((&mut transport).take(2).collect::>()); + assert!(executor::block_on(transport.next()).is_none()); } #[test] fn should_notify_about_best_block() { - let (subscriber, id, transport) = Subscriber::new_test("test"); + let (subscriber, id, mut transport) = Subscriber::new_test("test"); { let mut client = Arc::new(substrate_test_runtime_client::new()); @@ -223,28 +213,20 @@ fn should_notify_about_best_block() { api.subscribe_new_heads(Default::default(), subscriber); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } - // assert initial head sent. - let (notification, next) = executor::block_on(transport.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = executor::block_on(next.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(executor::block_on(Stream01CompatExt::compat(next).into_future()).0, None); + // Assert that the correct number of notifications have been sent. + executor::block_on((&mut transport).take(2).collect::>()); + assert!(executor::block_on(transport.next()).is_none()); } #[test] fn should_notify_about_finalized_block() { - let (subscriber, id, transport) = Subscriber::new_test("test"); + let (subscriber, id, mut transport) = Subscriber::new_test("test"); { let mut client = Arc::new(substrate_test_runtime_client::new()); @@ -253,22 +235,14 @@ fn should_notify_about_finalized_block() { api.subscribe_finalized_heads(Default::default(), subscriber); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); client.finalize_block(BlockId::number(1), None).unwrap(); } - // assert initial head sent. - let (notification, next) = executor::block_on(transport.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = executor::block_on(next.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(executor::block_on(next.into_future().compat()).unwrap().0, None); + // Assert that the correct number of notifications have been sent. + executor::block_on((&mut transport).take(2).collect::>()); + assert!(executor::block_on(transport.next()).is_none()); } diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 434859a39c2f4..832585db4854c 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,13 +22,15 @@ #![warn(missing_docs)] -use futures::{compat::Future01CompatExt, FutureExt}; -use rpc::futures::future::{Executor, ExecuteError, Future}; +use futures::{ + task::{FutureObj, Spawn, SpawnError}, + FutureExt, +}; use sp_core::traits::SpawnNamed; use std::sync::Arc; -pub use sc_rpc_api::{DenyUnsafe, Metadata}; pub use rpc::IoHandlerExtension as RpcExtension; +pub use sc_rpc_api::{DenyUnsafe, Metadata}; pub mod author; pub mod chain; @@ -50,12 +52,13 @@ impl SubscriptionTaskExecutor { } } -impl Executor + Send>> for SubscriptionTaskExecutor { - fn execute( - &self, - future: Box + Send>, - ) -> Result<(), ExecuteError + Send>>> { - self.0.spawn("substrate-rpc-subscription", future.compat().map(drop).boxed()); +impl Spawn for SubscriptionTaskExecutor { + fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { + self.0.spawn("substrate-rpc-subscription", future.map(drop).boxed()); + Ok(()) + } + + fn status(&self) -> Result<(), SpawnError> { Ok(()) } } diff --git a/client/rpc/src/offchain/mod.rs b/client/rpc/src/offchain/mod.rs index f8d2bb6a50f9c..9d1cc702b51e0 100644 --- a/client/rpc/src/offchain/mod.rs +++ b/client/rpc/src/offchain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,15 +21,15 @@ #[cfg(test)] mod tests; +use self::error::{Error, Result}; +use parking_lot::RwLock; /// Re-export the API for backward compatibility. pub use sc_rpc_api::offchain::*; use sc_rpc_api::DenyUnsafe; -use self::error::{Error, Result}; use sp_core::{ - Bytes, offchain::{OffchainStorage, StorageKind}, + Bytes, }; -use parking_lot::RwLock; use std::sync::Arc; /// Offchain API @@ -43,10 +43,7 @@ pub struct Offchain { impl Offchain { /// Create new instance of Offchain API. pub fn new(storage: T, deny_unsafe: DenyUnsafe) -> Self { - Offchain { - storage: Arc::new(RwLock::new(storage)), - deny_unsafe, - } + Offchain { storage: Arc::new(RwLock::new(storage)), deny_unsafe } } } diff --git a/client/rpc/src/offchain/tests.rs b/client/rpc/src/offchain/tests.rs index f65971a7ffe8a..f9629e70198a3 100644 --- a/client/rpc/src/offchain/tests.rs +++ b/client/rpc/src/offchain/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,7 +18,7 @@ use super::*; use assert_matches::assert_matches; -use sp_core::{Bytes, offchain::storage::InMemOffchainStorage}; +use sp_core::{offchain::storage::InMemOffchainStorage, Bytes}; #[test] fn local_storage_should_work() { diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 8573b3cf82551..80eccc2c97deb 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -24,32 +24,37 @@ mod state_light; #[cfg(test)] mod tests; +use futures::FutureExt; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use rpc::Result as RpcResult; use std::sync::Arc; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; -use rpc::{Result as RpcResult, futures::{Future, future::result}}; -use sc_rpc_api::{DenyUnsafe, state::ReadProof}; -use sc_client_api::light::{RemoteBlockchain, Fetcher}; -use sp_core::{Bytes, storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}}; -use sp_version::RuntimeVersion; +use sc_client_api::light::{Fetcher, RemoteBlockchain}; +use sc_rpc_api::{state::ReadProof, DenyUnsafe}; +use sp_core::{ + storage::{PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey}, + Bytes, +}; use sp_runtime::traits::Block as BlockT; +use sp_version::RuntimeVersion; -use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; +use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; use self::error::{Error, FutureResult}; -pub use sc_rpc_api::state::*; -pub use sc_rpc_api::child_state::*; -use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend, ProofProvider}; -use sp_blockchain::{HeaderMetadata, HeaderBackend}; +use sc_client_api::{ + Backend, BlockBackend, BlockchainEvents, ExecutorProvider, ProofProvider, StorageProvider, +}; +pub use sc_rpc_api::{child_state::*, state::*}; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000; /// State backend API. pub trait StateBackend: Send + Sync + 'static - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { /// Call runtime method at given block. fn call( @@ -112,7 +117,8 @@ pub trait StateBackend: Send + Sync + 'static /// Get the runtime version. fn runtime_version(&self, block: Option) -> FutureResult; - /// Query historical storage entries (by key) starting from a block given as the second parameter. + /// Query historical storage entries (by key) starting from a block given as the second + /// parameter. /// /// NOTE This first returned result contains the initial state of storage for all keys. /// Subsequent values in the vector represent changes to the previous state (diffs). @@ -127,7 +133,7 @@ pub trait StateBackend: Send + Sync + 'static fn query_storage_at( &self, keys: Vec, - at: Option + at: Option, ) -> FutureResult>>; /// Returns proof of storage entries at a specific block's state. @@ -165,6 +171,15 @@ pub trait StateBackend: Send + Sync + 'static _meta: Option, id: SubscriptionId, ) -> RpcResult; + + /// Trace storage changes for block + fn trace_block( + &self, + block: Block::Hash, + targets: Option, + storage_keys: Option, + methods: Option, + ) -> FutureResult; } /// Create new state API that works on full node. @@ -172,20 +187,33 @@ pub fn new_full( client: Arc, subscriptions: SubscriptionManager, deny_unsafe: DenyUnsafe, + rpc_max_payload: Option, ) -> (State, ChildState) - where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend - + HeaderMetadata + BlockchainEvents - + CallApiAt - + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: Metadata, +where + Block: BlockT + 'static, + Block::Hash: Unpin, + BE: Backend + 'static, + Client: ExecutorProvider + + StorageProvider + + ProofProvider + + HeaderMetadata + + BlockchainEvents + + CallApiAt + + HeaderBackend + + BlockBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: Metadata, { - let child_backend = Box::new( - self::state_full::FullState::new(client.clone(), subscriptions.clone()) - ); - let backend = Box::new(self::state_full::FullState::new(client, subscriptions)); + let child_backend = Box::new(self::state_full::FullState::new( + client.clone(), + subscriptions.clone(), + rpc_max_payload, + )); + let backend = + Box::new(self::state_full::FullState::new(client, subscriptions, rpc_max_payload)); (State { backend, deny_unsafe }, ChildState { backend: child_backend }) } @@ -197,27 +225,33 @@ pub fn new_light>( fetcher: Arc, deny_unsafe: DenyUnsafe, ) -> (State, ChildState) - where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider - + HeaderMetadata - + ProvideRuntimeApi + HeaderBackend + BlockchainEvents - + Send + Sync + 'static, - F: Send + Sync + 'static, +where + Block: BlockT + 'static, + Block::Hash: Unpin, + BE: Backend + 'static, + Client: ExecutorProvider + + StorageProvider + + HeaderMetadata + + ProvideRuntimeApi + + HeaderBackend + + BlockchainEvents + + Send + + Sync + + 'static, + F: Send + Sync + 'static, { let child_backend = Box::new(self::state_light::LightState::new( - client.clone(), - subscriptions.clone(), - remote_blockchain.clone(), - fetcher.clone(), + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + fetcher.clone(), )); let backend = Box::new(self::state_light::LightState::new( - client, - subscriptions, - remote_blockchain, - fetcher, + client, + subscriptions, + remote_blockchain, + fetcher, )); (State { backend, deny_unsafe }, ChildState { backend: child_backend }) } @@ -230,9 +264,9 @@ pub struct State { } impl StateApi for State - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { type Metadata = crate::Metadata; @@ -254,7 +288,7 @@ impl StateApi for State block: Option, ) -> FutureResult> { if let Err(err) = self.deny_unsafe.check_if_safe() { - return Box::new(result(Err(err.into()))) + return async move { Err(err.into()) }.boxed() } self.backend.storage_pairs(block, key_prefix) @@ -268,25 +302,35 @@ impl StateApi for State block: Option, ) -> FutureResult> { if count > STORAGE_KEYS_PAGED_MAX_COUNT { - return Box::new(result(Err( - Error::InvalidCount { - value: count, - max: STORAGE_KEYS_PAGED_MAX_COUNT, - } - ))); + return async move { + Err(Error::InvalidCount { value: count, max: STORAGE_KEYS_PAGED_MAX_COUNT }) + } + .boxed() } self.backend.storage_keys_paged(block, prefix, count, start_key) } - fn storage(&self, key: StorageKey, block: Option) -> FutureResult> { + fn storage( + &self, + key: StorageKey, + block: Option, + ) -> FutureResult> { self.backend.storage(block, key) } - fn storage_hash(&self, key: StorageKey, block: Option) -> FutureResult> { + fn storage_hash( + &self, + key: StorageKey, + block: Option, + ) -> FutureResult> { self.backend.storage_hash(block, key) } - fn storage_size(&self, key: StorageKey, block: Option) -> FutureResult> { + fn storage_size( + &self, + key: StorageKey, + block: Option, + ) -> FutureResult> { self.backend.storage_size(block, key) } @@ -298,10 +342,10 @@ impl StateApi for State &self, keys: Vec, from: Block::Hash, - to: Option + to: Option, ) -> FutureResult>> { if let Err(err) = self.deny_unsafe.check_if_safe() { - return Box::new(result(Err(err.into()))) + return async move { Err(err.into()) }.boxed() } self.backend.query_storage(from, to, keys) @@ -310,12 +354,16 @@ impl StateApi for State fn query_storage_at( &self, keys: Vec, - at: Option + at: Option, ) -> FutureResult>> { self.backend.query_storage_at(keys, at) } - fn read_proof(&self, keys: Vec, block: Option) -> FutureResult> { + fn read_proof( + &self, + keys: Vec, + block: Option, + ) -> FutureResult> { self.backend.read_proof(block, keys) } @@ -323,12 +371,16 @@ impl StateApi for State &self, meta: Self::Metadata, subscriber: Subscriber>, - keys: Option> + keys: Option>, ) { self.backend.subscribe_storage(meta, subscriber, keys); } - fn unsubscribe_storage(&self, meta: Option, id: SubscriptionId) -> RpcResult { + fn unsubscribe_storage( + &self, + meta: Option, + id: SubscriptionId, + ) -> RpcResult { self.backend.unsubscribe_storage(meta, id) } @@ -336,7 +388,11 @@ impl StateApi for State self.backend.runtime_version(at) } - fn subscribe_runtime_version(&self, meta: Self::Metadata, subscriber: Subscriber) { + fn subscribe_runtime_version( + &self, + meta: Self::Metadata, + subscriber: Subscriber, + ) { self.backend.subscribe_runtime_version(meta, subscriber); } @@ -347,14 +403,41 @@ impl StateApi for State ) -> RpcResult { self.backend.unsubscribe_runtime_version(meta, id) } + + /// Re-execute the given block with the tracing targets given in `targets` + /// and capture all state changes. + /// + /// Note: requires the node to run with `--rpc-methods=Unsafe`. + /// Note: requires runtimes compiled with wasm tracing support, `--features with-tracing`. + fn trace_block( + &self, + block: Block::Hash, + targets: Option, + storage_keys: Option, + methods: Option, + ) -> FutureResult { + if let Err(err) = self.deny_unsafe.check_if_safe() { + return async move { Err(err.into()) }.boxed() + } + + self.backend.trace_block(block, targets, storage_keys, methods) + } } /// Child state backend API. pub trait ChildStateBackend: Send + Sync + 'static - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { + /// Returns proof of storage for a child key entries at a specific block's state. + fn read_child_proof( + &self, + block: Option, + storage_key: PrefixedStorageKey, + keys: Vec, + ) -> FutureResult>; + /// Returns the keys with prefix from a child storage, /// leave prefix empty to get all the keys. fn storage_keys( @@ -364,6 +447,16 @@ pub trait ChildStateBackend: Send + Sync + 'static prefix: StorageKey, ) -> FutureResult>; + /// Returns the keys with prefix from a child storage with pagination support. + fn storage_keys_paged( + &self, + block: Option, + storage_key: PrefixedStorageKey, + prefix: Option, + count: u32, + start_key: Option, + ) -> FutureResult>; + /// Returns a child storage entry at a specific block's state. fn storage( &self, @@ -372,6 +465,14 @@ pub trait ChildStateBackend: Send + Sync + 'static key: StorageKey, ) -> FutureResult>; + /// Returns child storage entries at a specific block's state. + fn storage_entries( + &self, + block: Option, + storage_key: PrefixedStorageKey, + keys: Vec, + ) -> FutureResult>>; + /// Returns the hash of a child storage entry at a block's state. fn storage_hash( &self, @@ -387,8 +488,9 @@ pub trait ChildStateBackend: Send + Sync + 'static storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { - Box::new(self.storage(block, storage_key, key) - .map(|x| x.map(|x| x.0.len() as u64))) + self.storage(block, storage_key, key) + .map(|x| x.map(|r| r.map(|v| v.0.len() as u64))) + .boxed() } } @@ -398,35 +500,64 @@ pub struct ChildState { } impl ChildStateApi for ChildState - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { type Metadata = crate::Metadata; + fn read_child_proof( + &self, + child_storage_key: PrefixedStorageKey, + keys: Vec, + block: Option, + ) -> FutureResult> { + self.backend.read_child_proof(block, child_storage_key, keys) + } + fn storage( &self, storage_key: PrefixedStorageKey, key: StorageKey, - block: Option + block: Option, ) -> FutureResult> { self.backend.storage(block, storage_key, key) } + fn storage_entries( + &self, + storage_key: PrefixedStorageKey, + keys: Vec, + block: Option, + ) -> FutureResult>> { + self.backend.storage_entries(block, storage_key, keys) + } + fn storage_keys( &self, storage_key: PrefixedStorageKey, key_prefix: StorageKey, - block: Option + block: Option, ) -> FutureResult> { self.backend.storage_keys(block, storage_key, key_prefix) } + fn storage_keys_paged( + &self, + storage_key: PrefixedStorageKey, + prefix: Option, + count: u32, + start_key: Option, + block: Option, + ) -> FutureResult> { + self.backend.storage_keys_paged(block, storage_key, prefix, count, start_key) + } + fn storage_hash( &self, storage_key: PrefixedStorageKey, key: StorageKey, - block: Option + block: Option, ) -> FutureResult> { self.backend.storage_hash(block, storage_key, key) } @@ -435,7 +566,7 @@ impl ChildStateApi for ChildState &self, storage_key: PrefixedStorageKey, key: StorageKey, - block: Option + block: Option, ) -> FutureResult> { self.backend.storage_size(block, storage_key, key) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index fda73cea27110..97f77a4077962 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -1,47 +1,66 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! State API backend for full nodes. -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; -use std::ops::Range; -use futures::{future, StreamExt as _, TryStreamExt as _}; +use futures::{ + future, + future::{err, try_join_all}, + stream, FutureExt, SinkExt, StreamExt, +}; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; use log::warn; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; -use rpc::{Result as RpcResult, futures::{stream, Future, Sink, Stream, future::result}}; +use rpc::Result as RpcResult; +use std::{ + collections::{BTreeMap, HashMap}, + ops::Range, + sync::Arc, +}; use sc_rpc_api::state::ReadProof; -use sc_client_api::backend::Backend; -use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata, HeaderBackend}; -use sc_client_api::BlockchainEvents; +use sp_blockchain::{ + CachedHeaderMetadata, Error as ClientError, HeaderBackend, HeaderMetadata, + Result as ClientResult, +}; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, - ChildInfo, ChildType, PrefixedStorageKey}, + storage::{ + ChildInfo, ChildType, PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey, + }, + Bytes, }; -use sp_version::RuntimeVersion; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, NumberFor, SaturatedConversion, CheckedSub}, + generic::BlockId, + traits::{Block as BlockT, CheckedSub, NumberFor, SaturatedConversion}, }; +use sp_version::RuntimeVersion; -use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; +use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err}; +use super::{ + client_err, + error::{Error, FutureResult, Result}, + ChildStateBackend, StateBackend, +}; +use sc_client_api::{ + Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, ProofProvider, + StorageProvider, +}; use std::marker::PhantomData; -use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider, ProofProvider}; /// Ranges to query in state_queryStorage. struct QueryStorageRange { @@ -61,19 +80,26 @@ struct QueryStorageRange { pub struct FullState { client: Arc, subscriptions: SubscriptionManager, - _phantom: PhantomData<(BE, Block)> + _phantom: PhantomData<(BE, Block)>, + rpc_max_payload: Option, } impl FullState - where - BE: Backend, - Client: StorageProvider + HeaderBackend - + HeaderMetadata, - Block: BlockT + 'static, +where + BE: Backend, + Client: StorageProvider + + HeaderBackend + + BlockBackend + + HeaderMetadata, + Block: BlockT + 'static, { /// Create new state API backend for full nodes. - pub fn new(client: Arc, subscriptions: SubscriptionManager) -> Self { - Self { client, subscriptions, _phantom: PhantomData } + pub fn new( + client: Arc, + subscriptions: SubscriptionManager, + rpc_max_payload: Option, + ) -> Self { + Self { client, subscriptions, _phantom: PhantomData, rpc_max_payload } } /// Returns given block hash or best block hash if None is passed. @@ -87,16 +113,23 @@ impl FullState fn split_query_storage_range( &self, from: Block::Hash, - to: Option + to: Option, ) -> Result> { - let to = self.block_or_best(to).map_err(|e| invalid_block::(from, to, e.to_string()))?; + let to = self + .block_or_best(to) + .map_err(|e| invalid_block::(from, to, e.to_string()))?; - let invalid_block_err = |e: ClientError| invalid_block::(from, Some(to), e.to_string()); + let invalid_block_err = + |e: ClientError| invalid_block::(from, Some(to), e.to_string()); let from_meta = self.client.header_metadata(from).map_err(invalid_block_err)?; let to_meta = self.client.header_metadata(to).map_err(invalid_block_err)?; if from_meta.number > to_meta.number { - return Err(invalid_block_range(&from_meta, &to_meta, "from number > to number".to_owned())) + return Err(invalid_block_range( + &from_meta, + &to_meta, + "from number > to number".to_owned(), + )) } // check if we can get from `to` to `from` by going through parent_hashes. @@ -105,28 +138,33 @@ impl FullState let mut hashes = vec![to_meta.hash]; let mut last = to_meta.clone(); while last.number > from_number { - let header_metadata = self.client + let header_metadata = self + .client .header_metadata(last.parent) .map_err(|e| invalid_block_range::(&last, &to_meta, e.to_string()))?; hashes.push(header_metadata.hash); last = header_metadata; } if last.hash != from_meta.hash { - return Err(invalid_block_range(&from_meta, &to_meta, "from and to are on different forks".to_owned())) + return Err(invalid_block_range( + &from_meta, + &to_meta, + "from and to are on different forks".to_owned(), + )) } hashes.reverse(); hashes }; // check if we can filter blocks-with-changes from some (sub)range using changes tries - let changes_trie_range = self.client + let changes_trie_range = self + .client .max_key_changes_range(from_number, BlockId::Hash(to_meta.hash)) .map_err(client_err)?; - let filtered_range_begin = changes_trie_range - .and_then(|(begin, _)| { - // avoids a corner case where begin < from_number (happens when querying genesis) - begin.checked_sub(&from_number).map(|x| x.saturated_into::()) - }); + let filtered_range_begin = changes_trie_range.and_then(|(begin, _)| { + // avoids a corner case where begin < from_number (happens when querying genesis) + begin.checked_sub(&from_number).map(|x| x.saturated_into::()) + }); let (unfiltered_range, filtered_range) = split_range(hashes.len(), filtered_range_begin); Ok(QueryStorageRange { @@ -147,7 +185,8 @@ impl FullState ) -> Result<()> { for block in range.unfiltered_range.start..range.unfiltered_range.end { let block_hash = range.hashes[block].clone(); - let mut block_changes = StorageChangeSet { block: block_hash.clone(), changes: Vec::new() }; + let mut block_changes = + StorageChangeSet { block: block_hash.clone(), changes: Vec::new() }; let id = BlockId::hash(block_hash); for key in keys { let (has_changed, data) = { @@ -169,7 +208,8 @@ impl FullState Ok(()) } - /// Iterates through all blocks that are changing keys within range.filtered_range and collects these changes. + /// Iterates through all blocks that are changing keys within range.filtered_range and collects + /// these changes. fn query_storage_filtered( &self, range: &QueryStorageRange, @@ -180,30 +220,34 @@ impl FullState let (begin, end) = match range.filtered_range { Some(ref filtered_range) => ( range.first_number + filtered_range.start.saturated_into(), - BlockId::Hash(range.hashes[filtered_range.end - 1].clone()) + BlockId::Hash(range.hashes[filtered_range.end - 1].clone()), ), None => return Ok(()), }; - let mut changes_map: BTreeMap, StorageChangeSet> = BTreeMap::new(); + let mut changes_map: BTreeMap, StorageChangeSet> = + BTreeMap::new(); for key in keys { let mut last_block = None; let mut last_value = last_values.get(key).cloned().unwrap_or_default(); let key_changes = self.client.key_changes(begin, end, None, key).map_err(client_err)?; for (block, _) in key_changes.into_iter().rev() { if last_block == Some(block) { - continue; + continue } - let block_hash = range.hashes[(block - range.first_number).saturated_into::()].clone(); + let block_hash = + range.hashes[(block - range.first_number).saturated_into::()].clone(); let id = BlockId::Hash(block_hash); let value_at_block = self.client.storage(&id, key).map_err(client_err)?; if last_value == value_at_block { - continue; + continue } - changes_map.entry(block) + changes_map + .entry(block) .or_insert_with(|| StorageChangeSet { block: block_hash, changes: Vec::new() }) - .changes.push((key.clone(), value_at_block.clone())); + .changes + .push((key.clone(), value_at_block.clone())); last_block = Some(block); last_value = value_at_block; } @@ -216,14 +260,24 @@ impl FullState } } -impl StateBackend for FullState where +impl StateBackend for FullState +where Block: BlockT + 'static, + Block::Hash: Unpin, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend - + HeaderMetadata + BlockchainEvents - + CallApiAt + ProvideRuntimeApi - + Send + Sync + 'static, - Client::Api: Metadata, + Client: ExecutorProvider + + StorageProvider + + ProofProvider + + HeaderBackend + + HeaderMetadata + + BlockchainEvents + + CallApiAt + + ProvideRuntimeApi + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: Metadata, { fn call( &self, @@ -231,20 +285,22 @@ impl StateBackend for FullState FutureResult { - let r = self.block_or_best(block) - .and_then(|block| self - .client - .executor() - .call( - &BlockId::Hash(block), - &method, - &*call_data, - self.client.execution_extensions().strategies().other, - None, - ) - .map(Into::into) - ).map_err(client_err); - Box::new(result(r)) + let r = self + .block_or_best(block) + .and_then(|block| { + self.client + .executor() + .call( + &BlockId::Hash(block), + &method, + &*call_data, + self.client.execution_extensions().strategies().other, + None, + ) + .map(Into::into) + }) + .map_err(client_err); + async move { r }.boxed() } fn storage_keys( @@ -252,10 +308,11 @@ impl StateBackend for FullState, prefix: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.storage_keys(&BlockId::Hash(block), &prefix)) - .map_err(client_err))) + let r = self + .block_or_best(block) + .and_then(|block| self.client.storage_keys(&BlockId::Hash(block), &prefix)) + .map_err(client_err); + async move { r }.boxed() } fn storage_pairs( @@ -263,10 +320,11 @@ impl StateBackend for FullState, prefix: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.storage_pairs(&BlockId::Hash(block), &prefix)) - .map_err(client_err))) + let r = self + .block_or_best(block) + .and_then(|block| self.client.storage_pairs(&BlockId::Hash(block), &prefix)) + .map_err(client_err); + async move { r }.boxed() } fn storage_keys_paged( @@ -276,15 +334,18 @@ impl StateBackend for FullState, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| - self.client.storage_keys_iter( - &BlockId::Hash(block), prefix.as_ref(), start_key.as_ref() - ) + let r = self + .block_or_best(block) + .and_then(|block| { + self.client.storage_keys_iter( + &BlockId::Hash(block), + prefix.as_ref(), + start_key.as_ref(), ) - .map(|v| v.take(count as usize).collect()) - .map_err(client_err))) + }) + .map(|iter| iter.take(count as usize).collect()) + .map_err(client_err); + async move { r }.boxed() } fn storage( @@ -292,10 +353,11 @@ impl StateBackend for FullState, key: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.storage(&BlockId::Hash(block), &key)) - .map_err(client_err))) + let r = self + .block_or_best(block) + .and_then(|block| self.client.storage(&BlockId::Hash(block), &key)) + .map_err(client_err); + async move { r }.boxed() } fn storage_size( @@ -305,27 +367,28 @@ impl StateBackend for FullState FutureResult> { let block = match self.block_or_best(block) { Ok(b) => b, - Err(e) => return Box::new(result(Err(client_err(e)))), + Err(e) => return async move { Err(client_err(e)) }.boxed(), }; match self.client.storage(&BlockId::Hash(block), &key) { - Ok(Some(d)) => return Box::new(result(Ok(Some(d.0.len() as u64)))), - Err(e) => return Box::new(result(Err(client_err(e)))), + Ok(Some(d)) => return async move { Ok(Some(d.0.len() as u64)) }.boxed(), + Err(e) => return async move { Err(client_err(e)) }.boxed(), Ok(None) => {}, } - Box::new(result( - self.client.storage_pairs(&BlockId::Hash(block), &key) - .map(|kv| { - let item_sum = kv.iter().map(|(_, v)| v.0.len() as u64).sum::(); - if item_sum > 0 { - Some(item_sum) - } else { - None - } - }) - .map_err(client_err) - )) + let r = self + .client + .storage_pairs(&BlockId::Hash(block), &key) + .map(|kv| { + let item_sum = kv.iter().map(|(_, v)| v.0.len() as u64).sum::(); + if item_sum > 0 { + Some(item_sum) + } else { + None + } + }) + .map_err(client_err); + async move { r }.boxed() } fn storage_hash( @@ -333,26 +396,31 @@ impl StateBackend for FullState, key: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.storage_hash(&BlockId::Hash(block), &key)) - .map_err(client_err))) + let r = self + .block_or_best(block) + .and_then(|block| self.client.storage_hash(&BlockId::Hash(block), &key)) + .map_err(client_err); + async move { r }.boxed() } fn metadata(&self, block: Option) -> FutureResult { - Box::new(result( - self.block_or_best(block) - .and_then(|block| - self.client.runtime_api().metadata(&BlockId::Hash(block)).map(Into::into) - ) - .map_err(client_err))) + let r = self.block_or_best(block).map_err(client_err).and_then(|block| { + self.client + .runtime_api() + .metadata(&BlockId::Hash(block)) + .map(Into::into) + .map_err(|e| Error::Client(Box::new(e))) + }); + async move { r }.boxed() } fn runtime_version(&self, block: Option) -> FutureResult { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.runtime_version_at(&BlockId::Hash(block))) - .map_err(client_err))) + let r = self.block_or_best(block).map_err(client_err).and_then(|block| { + self.client + .runtime_version_at(&BlockId::Hash(block)) + .map_err(|e| Error::Client(Box::new(e))) + }); + async move { r }.boxed() } fn query_storage( @@ -369,13 +437,15 @@ impl StateBackend for FullState, - at: Option + at: Option, ) -> FutureResult>> { let at = at.unwrap_or_else(|| self.client.info().best_hash); self.query_storage(at, Some(at), keys) @@ -386,19 +456,16 @@ impl StateBackend for FullState, keys: Vec, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - self.client - .read_proof( - &BlockId::Hash(block), - &mut keys.iter().map(|key| key.0.as_ref()), - ) - .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) - .map(|proof| ReadProof { at: block, proof }) - }) - .map_err(client_err), - )) + let r = self + .block_or_best(block) + .and_then(|block| { + self.client + .read_proof(&BlockId::Hash(block), &mut keys.iter().map(|key| key.0.as_ref())) + .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) + .map(|proof| ReadProof { at: block, proof }) + }) + .map_err(client_err); + async move { r }.boxed() } fn subscribe_runtime_version( @@ -406,47 +473,39 @@ impl StateBackend for FullState, ) { - let stream = match self.client.storage_changes_notification_stream( - Some(&[StorageKey(well_known_keys::CODE.to_vec())]), - None, - ) { - Ok(stream) => stream, - Err(err) => { - let _ = subscriber.reject(Error::from(client_err(err)).into()); - return; - } - }; - self.subscriptions.add(subscriber, |sink| { - let version = self.runtime_version(None.into()) - .map_err(Into::into) - .wait(); + let version = self + .block_or_best(None) + .and_then(|block| { + self.client.runtime_version_at(&BlockId::Hash(block)).map_err(Into::into) + }) + .map_err(client_err) + .map_err(Into::into); let client = self.client.clone(); let mut previous_version = version.clone(); - let stream = stream - .filter_map(move |_| { - let info = client.info(); - let version = client - .runtime_version_at(&BlockId::hash(info.best_hash)) - .map_err(client_err) - .map_err(Into::into); - if previous_version != version { - previous_version = version.clone(); - future::ready(Some(Ok::<_, ()>(version))) - } else { - future::ready(None) - } - }) - .compat(); + // A stream of all best blocks. + let stream = + client.import_notification_stream().filter(|n| future::ready(n.is_new_best)); + + let stream = stream.filter_map(move |n| { + let version = client + .runtime_version_at(&BlockId::hash(n.hash)) + .map_err(|e| Error::Client(Box::new(e))) + .map_err(Into::into); + + if previous_version != version { + previous_version = version.clone(); + future::ready(Some(Ok::<_, ()>(version))) + } else { + future::ready(None) + } + }); - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all( - stream::iter_result(vec![Ok(version)]) - .chain(stream) - ) + stream::iter(vec![Ok(version)]) + .chain(stream) + .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) // we ignore the resulting Stream (if the first stream is over we are unsubscribed) .map(|_| ()) }); @@ -467,46 +526,47 @@ impl StateBackend for FullState>, ) { let keys = Into::>>::into(keys); - let stream = match self.client.storage_changes_notification_stream( - keys.as_ref().map(|x| &**x), - None - ) { + let stream = match self + .client + .storage_changes_notification_stream(keys.as_ref().map(|x| &**x), None) + { Ok(stream) => stream, Err(err) => { let _ = subscriber.reject(client_err(err).into()); - return; + return }, }; // initial values - let initial = stream::iter_result(keys - .map(|keys| { + let initial = stream::iter( + keys.map(|keys| { let block = self.client.info().best_hash; let changes = keys .into_iter() - .map(|key| StateBackend::storage(self, Some(block.clone()).into(), key.clone()) - .map(|val| (key.clone(), val)) - .wait() - .unwrap_or_else(|_| (key, None)) - ) + .map(|key| { + let v = self.client.storage(&BlockId::Hash(block), &key).ok().flatten(); + (key, v) + }) .collect(); vec![Ok(Ok(StorageChangeSet { block, changes }))] - }).unwrap_or_default()); + }) + .unwrap_or_default(), + ); self.subscriptions.add(subscriber, |sink| { - let stream = stream - .map(|(block, changes)| Ok::<_, ()>(Ok(StorageChangeSet { + let stream = stream.map(|(block, changes)| { + Ok(Ok::<_, rpc::Error>(StorageChangeSet { block, - changes: changes.iter() - .filter_map(|(o_sk, k, v)| if o_sk.is_none() { - Some((k.clone(),v.cloned())) - } else { None }).collect(), - }))) - .compat(); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all(initial.chain(stream)) + changes: changes + .iter() + .filter_map(|(o_sk, k, v)| o_sk.is_none().then(|| (k.clone(), v.cloned()))) + .collect(), + })) + }); + + initial + .chain(stream) + .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) // we ignore the resulting Stream (if the first stream is over we are unsubscribed) .map(|_| ()) }); @@ -519,37 +579,123 @@ impl StateBackend for FullState RpcResult { Ok(self.subscriptions.cancel(id)) } + + fn trace_block( + &self, + block: Block::Hash, + targets: Option, + storage_keys: Option, + methods: Option, + ) -> FutureResult { + let block_executor = sc_tracing::block::BlockExecutor::new( + self.client.clone(), + block, + targets, + storage_keys, + methods, + self.rpc_max_payload, + ); + let r = block_executor + .trace_block() + .map_err(|e| invalid_block::(block, None, e.to_string())); + async move { r }.boxed() + } } -impl ChildStateBackend for FullState where +impl ChildStateBackend for FullState +where Block: BlockT + 'static, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + HeaderBackend - + HeaderMetadata + BlockchainEvents - + CallApiAt + ProvideRuntimeApi - + Send + Sync + 'static, - Client::Api: Metadata, + Client: ExecutorProvider + + StorageProvider + + ProofProvider + + HeaderBackend + + BlockBackend + + HeaderMetadata + + BlockchainEvents + + CallApiAt + + ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: Metadata, { - fn storage_keys( + fn read_child_proof( &self, block: Option, storage_key: PrefixedStorageKey, - prefix: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), - }; - self.client.child_storage_keys( + keys: Vec, + ) -> FutureResult> { + let r = self + .block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), + }; + self.client + .read_child_proof( &BlockId::Hash(block), &child_info, - &prefix, + &mut keys.iter().map(|key| key.0.as_ref()), ) - }) - .map_err(client_err))) + .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) + .map(|proof| ReadProof { at: block, proof }) + }) + .map_err(client_err); + + async move { r }.boxed() + } + + fn storage_keys( + &self, + block: Option, + storage_key: PrefixedStorageKey, + prefix: StorageKey, + ) -> FutureResult> { + let r = self + .block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), + }; + self.client.child_storage_keys(&BlockId::Hash(block), &child_info, &prefix) + }) + .map_err(client_err); + + async move { r }.boxed() + } + + fn storage_keys_paged( + &self, + block: Option, + storage_key: PrefixedStorageKey, + prefix: Option, + count: u32, + start_key: Option, + ) -> FutureResult> { + let r = self + .block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), + }; + self.client.child_storage_keys_iter( + &BlockId::Hash(block), + child_info, + prefix.as_ref(), + start_key.as_ref(), + ) + }) + .map(|iter| iter.take(count as usize).collect()) + .map_err(client_err); + + async move { r }.boxed() } fn storage( @@ -558,20 +704,46 @@ impl ChildStateBackend for FullState FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), - }; - self.client.child_storage( - &BlockId::Hash(block), - &child_info, - &key, - ) - }) - .map_err(client_err))) + let r = self + .block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), + }; + self.client.child_storage(&BlockId::Hash(block), &child_info, &key) + }) + .map_err(client_err); + + async move { r }.boxed() + } + + fn storage_entries( + &self, + block: Option, + storage_key: PrefixedStorageKey, + keys: Vec, + ) -> FutureResult>> { + let child_info = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + Arc::new(ChildInfo::new_default(storage_key)), + None => return err(client_err(sp_blockchain::Error::InvalidChildStorageKey)).boxed(), + }; + let block = match self.block_or_best(block) { + Ok(b) => b, + Err(e) => return err(client_err(e)).boxed(), + }; + let client = self.client.clone(); + try_join_all(keys.into_iter().map(move |key| { + let res = client + .clone() + .child_storage(&BlockId::Hash(block), &child_info, &key) + .map_err(client_err); + + async move { res } + })) + .boxed() } fn storage_hash( @@ -580,27 +752,29 @@ impl ChildStateBackend for FullState FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), - }; - self.client.child_storage_hash( - &BlockId::Hash(block), - &child_info, - &key, - ) - }) - .map_err(client_err))) + let r = self + .block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), + None => return Err(sp_blockchain::Error::InvalidChildStorageKey), + }; + self.client.child_storage_hash(&BlockId::Hash(block), &child_info, &key) + }) + .map_err(client_err); + + async move { r }.boxed() } } /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. -pub(crate) fn split_range(size: usize, middle: Option) -> (Range, Option>) { +pub(crate) fn split_range( + size: usize, + middle: Option, +) -> (Range, Option>) { // check if we can filter blocks-with-changes from some (sub)range using changes tries let range2_begin = match middle { // some of required changes tries are pruned => use available tries @@ -625,21 +799,9 @@ fn invalid_block_range( ) -> Error { let to_string = |h: &CachedHeaderMetadata| format!("{} ({:?})", h.number, h.hash); - Error::InvalidBlockRange { - from: to_string(from), - to: to_string(to), - details, - } + Error::InvalidBlockRange { from: to_string(from), to: to_string(to), details } } -fn invalid_block( - from: B::Hash, - to: Option, - details: String, -) -> Error { - Error::InvalidBlockRange { - from: format!("{:?}", from), - to: format!("{:?}", to), - details, - } +fn invalid_block(from: B::Hash, to: Option, details: String) -> Error { + Error::InvalidBlockRange { from: format!("{:?}", from), to: format!("{:?}", to), details } } diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 8f4dce08b3fb6..749e57c365cc0 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -1,60 +1,63 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! State API backend for light nodes. -use std::{ - sync::Arc, - collections::{HashSet, HashMap, hash_map::Entry}, -}; use codec::Decode; use futures::{ - future::{ready, Either}, channel::oneshot::{channel, Sender}, - FutureExt, TryFutureExt, - StreamExt as _, TryStreamExt as _, + future::{ready, Either}, + Future, FutureExt, SinkExt, Stream, StreamExt as _, TryFutureExt, TryStreamExt as _, }; use hash_db::Hasher; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; use log::warn; use parking_lot::Mutex; -use rpc::{ - Result as RpcResult, - futures::Sink, - futures::future::{result, Future}, - futures::stream::Stream, +use rpc::Result as RpcResult; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + sync::Arc, }; -use sc_rpc_api::state::ReadProof; -use sp_blockchain::{Error as ClientError, HeaderBackend}; use sc_client_api::{ - BlockchainEvents, light::{ - RemoteCallRequest, RemoteReadRequest, RemoteReadChildRequest, - RemoteBlockchain, Fetcher, future_header, + future_header, Fetcher, RemoteBlockchain, RemoteCallRequest, RemoteReadChildRequest, + RemoteReadRequest, }, + BlockchainEvents, }; +use sc_rpc_api::state::ReadProof; +use sp_blockchain::{Error as ClientError, HeaderBackend}; use sp_core::{ + storage::{PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey}, Bytes, OpaqueMetadata, - storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor}, }; use sp_version::RuntimeVersion; -use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err}; +use super::{ + client_err, + error::{Error, FutureResult}, + ChildStateBackend, StateBackend, +}; /// Storage data map of storage keys => (optional) storage value. type StorageMap = HashMap>; @@ -75,11 +78,7 @@ trait SharedRequests: Clone + Send + Sync { /// Tries to listen for already issued request, or issues request. /// /// Returns true if requests has been issued. - fn listen_request( - &self, - block: Hash, - sender: Sender>, - ) -> bool; + fn listen_request(&self, block: Hash, sender: Sender>) -> bool; /// Returns (and forgets) all listeners for given request. fn on_response_received(&self, block: Hash) -> Vec>>; @@ -95,12 +94,10 @@ struct StorageSubscriptions { subscriptions_by_key: HashMap>, } -impl SharedRequests for Arc>> { - fn listen_request( - &self, - block: Block::Hash, - sender: Sender>, - ) -> bool { +impl SharedRequests + for Arc>> +{ + fn listen_request(&self, block: Block::Hash, sender: Sender>) -> bool { let mut subscriptions = self.lock(); let active_requests_at = subscriptions.active_requests.entry(block).or_default(); active_requests_at.push(sender); @@ -115,15 +112,12 @@ impl SharedRequests for Arc = Arc>>>>>; -impl SharedRequests for SimpleSubscriptions where +impl SharedRequests for SimpleSubscriptions +where Hash: Send + Eq + std::hash::Hash, V: Send, { - fn listen_request( - &self, - block: Hash, - sender: Sender>, - ) -> bool { + fn listen_request(&self, block: Hash, sender: Sender>) -> bool { let mut subscriptions = self.lock(); let active_requests_at = subscriptions.entry(block).or_default(); active_requests_at.push(sender); @@ -136,9 +130,9 @@ impl SharedRequests for SimpleSubscriptions where } impl + 'static, Client> LightState - where - Block: BlockT, - Client: HeaderBackend + Send + Sync + 'static, +where + Block: BlockT, + Client: HeaderBackend + Send + Sync + 'static, { /// Create new state API backend for light nodes. pub fn new( @@ -168,10 +162,11 @@ impl + 'static, Client> LightState StateBackend for LightState - where - Block: BlockT, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + 'static +where + Block: BlockT, + Block::Hash: Unpin, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, + F: Fetcher + 'static, { fn call( &self, @@ -179,13 +174,14 @@ impl StateBackend for LightState FutureResult { - Box::new(call( + call( &*self.remote_blockchain, self.fetcher.clone(), self.block_or_best(block), method, call_data, - ).boxed().compat()) + ) + .boxed() } fn storage_keys( @@ -193,7 +189,7 @@ impl StateBackend for LightState, _prefix: StorageKey, ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() } fn storage_pairs( @@ -201,7 +197,7 @@ impl StateBackend for LightState, _prefix: StorageKey, ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() } fn storage_keys_paged( @@ -211,15 +207,11 @@ impl StateBackend for LightState, ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() } - fn storage_size( - &self, - _: Option, - _: StorageKey, - ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + fn storage_size(&self, _: Option, _: StorageKey) -> FutureResult> { + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() } fn storage( @@ -227,15 +219,18 @@ impl StateBackend for LightState, key: StorageKey, ) -> FutureResult> { - Box::new(storage( + storage( &*self.remote_blockchain, self.fetcher.clone(), self.block_or_best(block), vec![key.0.clone()], - ).boxed().compat().map(move |mut values| values - .remove(&key) - .expect("successful request has entries for all requested keys; qed") - )) + ) + .map_ok(move |mut values| { + values + .remove(&key) + .expect("successful request has entries for all requested keys; qed") + }) + .boxed() } fn storage_hash( @@ -243,31 +238,28 @@ impl StateBackend for LightState, key: StorageKey, ) -> FutureResult> { - Box::new(StateBackend::storage(self, block, key) - .and_then(|maybe_storage| - result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) - ) - ) + let res = StateBackend::storage(self, block, key); + async move { res.await.map(|r| r.map(|s| HashFor::::hash(&s.0))) }.boxed() } fn metadata(&self, block: Option) -> FutureResult { - let metadata = self.call(block, "Metadata_metadata".into(), Bytes(Vec::new())) - .and_then(|metadata| OpaqueMetadata::decode(&mut &metadata.0[..]) - .map(Into::into) - .map_err(|decode_err| client_err(ClientError::CallResultDecode( - "Unable to decode metadata", - decode_err, - )))); - - Box::new(metadata) + self.call(block, "Metadata_metadata".into(), Bytes(Vec::new())) + .and_then(|metadata| async move { + OpaqueMetadata::decode(&mut &metadata.0[..]) + .map(Into::into) + .map_err(|decode_err| { + client_err(ClientError::CallResultDecode( + "Unable to decode metadata", + decode_err, + )) + }) + }) + .boxed() } fn runtime_version(&self, block: Option) -> FutureResult { - Box::new(runtime_version( - &*self.remote_blockchain, - self.fetcher.clone(), - self.block_or_best(block), - ).boxed().compat()) + runtime_version(&*self.remote_blockchain, self.fetcher.clone(), self.block_or_best(block)) + .boxed() } fn query_storage( @@ -276,15 +268,15 @@ impl StateBackend for LightState, _keys: Vec, ) -> FutureResult>> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() } fn query_storage_at( &self, _keys: Vec, - _at: Option + _at: Option, ) -> FutureResult>> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() } fn read_proof( @@ -292,21 +284,21 @@ impl StateBackend for LightState, _keys: Vec, ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() } fn subscribe_storage( &self, _meta: crate::Metadata, subscriber: Subscriber>, - keys: Option> + keys: Option>, ) { let keys = match keys { Some(keys) if !keys.is_empty() => keys, _ => { warn!("Cannot subscribe to all keys on light client. Subscription rejected."); - return; - } + return + }, }; let keys = keys.iter().cloned().collect::>(); @@ -320,16 +312,11 @@ impl StateBackend for LightState( storage_subscriptions.clone(), - self.client - .import_notification_stream() - .map(|notification| Ok::<_, ()>(notification.hash)) - .compat(), - display_error(storage( - &*remote_blockchain, - fetcher.clone(), - initial_block, - initial_keys, - ).map(move |r| r.map(|r| (initial_block, r)))), + self.client.import_notification_stream().map(|notification| notification.hash), + display_error( + storage(&*remote_blockchain, fetcher.clone(), initial_block, initial_keys) + .map(move |r| r.map(|r| (initial_block, r))), + ), move |block| { // there'll be single request per block for all active subscriptions // with all subscribed keys @@ -340,12 +327,7 @@ impl StateBackend for LightState StateBackend for LightState Some(StorageChangeSet { - block, - changes: new_value - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(), - }), - false => None, - } - } + + value_differs.then(|| StorageChangeSet { + block, + changes: new_value.iter().map(|(k, v)| (k.clone(), v.clone())).collect(), + }) + }, ); - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all(changes_stream.map(|changes| Ok(changes))) + changes_stream + .map_ok(Ok) + .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) // we ignore the resulting Stream (if the first stream is over we are unsubscribed) .map(|_| ()) }); // remember keys associated with this subscription let mut storage_subscriptions = self.storage_subscriptions.lock(); - storage_subscriptions.keys_by_subscription.insert(subscription_id.clone(), keys.clone()); + storage_subscriptions + .keys_by_subscription + .insert(subscription_id.clone(), keys.clone()); for key in keys { storage_subscriptions .subscriptions_by_key @@ -396,7 +375,7 @@ impl StateBackend for LightState RpcResult { if !self.subscriptions.cancel(id.clone()) { - return Ok(false); + return Ok(false) } // forget subscription keys @@ -404,14 +383,16 @@ impl StateBackend for LightState unreachable!("every key from keys_by_subscription has\ - corresponding entry in subscriptions_by_key; qed"), + Entry::Vacant(_) => unreachable!( + "every key from keys_by_subscription has\ + corresponding entry in subscriptions_by_key; qed" + ), Entry::Occupied(mut entry) => { entry.get_mut().remove(&id); if entry.get().is_empty() { entry.remove(); } - } + }, } } @@ -431,35 +412,25 @@ impl StateBackend for LightState( version_subscriptions, - self.client - .import_notification_stream() - .map(|notification| Ok::<_, ()>(notification.hash)) - .compat(), - display_error(runtime_version( - &*remote_blockchain, - fetcher.clone(), - initial_block, - ).map(move |r| r.map(|r| (initial_block, r)))), - move |block| runtime_version( - &*remote_blockchain, - fetcher.clone(), - block, + self.client.import_notification_stream().map(|notification| notification.hash), + display_error( + runtime_version(&*remote_blockchain, fetcher.clone(), initial_block) + .map(move |r| r.map(|r| (initial_block, r))), ), + move |block| runtime_version(&*remote_blockchain, fetcher.clone(), block), |_, old_version, new_version| { let version_differs = old_version .as_ref() .map(|old_version| *old_version != new_version) .unwrap_or(true); - match version_differs { - true => Some(new_version.clone()), - false => None, - } - } + + version_differs.then(|| new_version.clone()) + }, ); - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all(versions_stream.map(|version| Ok(version))) + versions_stream + .map_ok(Ok) + .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) // we ignore the resulting Stream (if the first stream is over we are unsubscribed) .map(|_| ()) }); @@ -472,21 +443,51 @@ impl StateBackend for LightState RpcResult { Ok(self.subscriptions.cancel(id)) } + + fn trace_block( + &self, + _block: Block::Hash, + _targets: Option, + _storage_keys: Option, + _methods: Option, + ) -> FutureResult { + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() + } } impl ChildStateBackend for LightState - where - Block: BlockT, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + 'static +where + Block: BlockT, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, + F: Fetcher + 'static, { + fn read_child_proof( + &self, + _block: Option, + _storage_key: PrefixedStorageKey, + _keys: Vec, + ) -> FutureResult> { + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() + } + fn storage_keys( &self, _block: Option, _storage_key: PrefixedStorageKey, _prefix: StorageKey, ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() + } + + fn storage_keys_paged( + &self, + _block: Option, + _storage_key: PrefixedStorageKey, + _prefix: Option, + _count: u32, + _start_key: Option, + ) -> FutureResult> { + async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() } fn storage( @@ -497,26 +498,81 @@ impl ChildStateBackend for LightState FutureResult> { let block = self.block_or_best(block); let fetcher = self.fetcher.clone(); - let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { - block, - header, - storage_key, - keys: vec![key.0.clone()], - retry_count: Default::default(), - }).then(move |result| ready(result - .map(|mut data| data - .remove(&key.0) - .expect("successful result has entry for all keys; qed") - .map(StorageData) - ) - .map_err(client_err) - ))), - Err(error) => Either::Right(ready(Err(error))), + let child_storage = + resolve_header(&*self.remote_blockchain, &*self.fetcher, block).then(move |result| { + match result { + Ok(header) => Either::Left( + fetcher + .remote_read_child(RemoteReadChildRequest { + block, + header, + storage_key, + keys: vec![key.0.clone()], + retry_count: Default::default(), + }) + .then(move |result| { + ready( + result + .map(|mut data| { + data.remove(&key.0) + .expect( + "successful result has entry for all keys; qed", + ) + .map(StorageData) + }) + .map_err(client_err), + ) + }), + ), + Err(error) => Either::Right(ready(Err(error))), + } }); - Box::new(child_storage.boxed().compat()) + child_storage.boxed() + } + + fn storage_entries( + &self, + block: Option, + storage_key: PrefixedStorageKey, + keys: Vec, + ) -> FutureResult>> { + let block = self.block_or_best(block); + let fetcher = self.fetcher.clone(); + let keys = keys.iter().map(|k| k.0.clone()).collect::>(); + let child_storage = + resolve_header(&*self.remote_blockchain, &*self.fetcher, block).then(move |result| { + match result { + Ok(header) => Either::Left( + fetcher + .remote_read_child(RemoteReadChildRequest { + block, + header, + storage_key, + keys: keys.clone(), + retry_count: Default::default(), + }) + .then(move |result| { + ready( + result + .map(|data| { + data.iter() + .filter_map(|(k, d)| { + keys.contains(k).then(|| { + d.as_ref().map(|v| StorageData(v.to_vec())) + }) + }) + .collect::>() + }) + .map_err(client_err), + ) + }), + ), + Err(error) => Either::Right(ready(Err(error))), + } + }); + + child_storage.boxed() } fn storage_hash( @@ -525,11 +581,9 @@ impl ChildStateBackend for LightState FutureResult> { - Box::new(ChildStateBackend::storage(self, block, storage_key, key) - .and_then(|maybe_storage| - result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) - ) - ) + let child_storage = ChildStateBackend::storage(self, block, storage_key, key); + + async move { child_storage.await.map(|r| r.map(|s| HashFor::::hash(&s.0))) }.boxed() } } @@ -539,17 +593,17 @@ fn resolve_header>( fetcher: &F, block: Block::Hash, ) -> impl std::future::Future> { - let maybe_header = future_header( - remote_blockchain, - fetcher, - BlockId::Hash(block), - ); - - maybe_header.then(move |result| - ready(result.and_then(|maybe_header| - maybe_header.ok_or_else(|| ClientError::UnknownBlock(format!("{}", block))) - ).map_err(client_err)), - ) + let maybe_header = future_header(remote_blockchain, fetcher, BlockId::Hash(block)); + + maybe_header.then(move |result| { + ready( + result + .and_then(|maybe_header| { + maybe_header.ok_or_else(|| ClientError::UnknownBlock(format!("{}", block))) + }) + .map_err(client_err), + ) + }) } /// Call runtime method at given block @@ -560,17 +614,20 @@ fn call>( method: String, call_data: Bytes, ) -> impl std::future::Future> { - resolve_header(remote_blockchain, &*fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_call(RemoteCallRequest { - block, - header, - method, - call_data: call_data.0, - retry_count: Default::default(), - }).then(|result| ready(result.map(Bytes).map_err(client_err)))), - Err(error) => Either::Right(ready(Err(error))), - }) + resolve_header(remote_blockchain, &*fetcher, block).then(move |result| match result { + Ok(header) => Either::Left( + fetcher + .remote_call(RemoteCallRequest { + block, + header, + method, + call_data: call_data.0, + retry_count: Default::default(), + }) + .then(|result| ready(result.map(Bytes).map_err(client_err))), + ), + Err(error) => Either::Right(ready(Err(error))), + }) } /// Get runtime version at given block. @@ -579,17 +636,14 @@ fn runtime_version>( fetcher: Arc, block: Block::Hash, ) -> impl std::future::Future> { - call( - remote_blockchain, - fetcher, - block, - "Core_version".into(), - Bytes(Vec::new()), + call(remote_blockchain, fetcher, block, "Core_version".into(), Bytes(Vec::new())).then( + |version| { + ready(version.and_then(|version| { + Decode::decode(&mut &version.0[..]) + .map_err(|e| client_err(ClientError::VersionInvalid(e.to_string()))) + })) + }, ) - .then(|version| ready(version.and_then(|version| - Decode::decode(&mut &version.0[..]) - .map_err(|e| client_err(ClientError::VersionInvalid(e.what().into()))) - ))) } /// Get storage value at given key at given block. @@ -599,22 +653,30 @@ fn storage>( block: Block::Hash, keys: Vec>, ) -> impl std::future::Future>, Error>> { - resolve_header(remote_blockchain, &*fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read(RemoteReadRequest { - block, - header, - keys, - retry_count: Default::default(), - }).then(|result| ready(result - .map(|result| result - .into_iter() - .map(|(key, value)| (StorageKey(key), value.map(StorageData))) - .collect() - ).map_err(client_err) - ))), - Err(error) => Either::Right(ready(Err(error))), - }) + resolve_header(remote_blockchain, &*fetcher, block).then(move |result| match result { + Ok(header) => Either::Left( + fetcher + .remote_read(RemoteReadRequest { + block, + header, + keys, + retry_count: Default::default(), + }) + .then(|result| { + ready( + result + .map(|result| { + result + .into_iter() + .map(|(key, value)| (StorageKey(key), value.map(StorageData))) + .collect() + }) + .map_err(client_err), + ) + }), + ), + Err(error) => Either::Right(ready(Err(error))), + }) } /// Returns subscription stream that issues request on every imported block and @@ -623,9 +685,11 @@ fn subscription_stream< Block, Requests, FutureBlocksStream, - V, N, + V, + N, InitialRequestFuture, - IssueRequest, IssueRequestFuture, + IssueRequest, + IssueRequestFuture, CompareValues, >( shared_requests: Requests, @@ -633,47 +697,51 @@ fn subscription_stream< initial_request: InitialRequestFuture, issue_request: IssueRequest, compare_values: CompareValues, -) -> impl Stream where +) -> impl Stream> +where Block: BlockT, Requests: 'static + SharedRequests, - FutureBlocksStream: Stream, + FutureBlocksStream: Stream, V: Send + 'static + Clone, - InitialRequestFuture: std::future::Future> + Send + 'static, + InitialRequestFuture: Future> + Send + 'static, IssueRequest: 'static + Fn(Block::Hash) -> IssueRequestFuture, - IssueRequestFuture: std::future::Future> + Send + 'static, + IssueRequestFuture: Future> + Send + 'static, CompareValues: Fn(Block::Hash, Option<&V>, &V) -> Option, { // we need to send initial value first, then we'll only be sending if value has changed let previous_value = Arc::new(Mutex::new(None)); // prepare 'stream' of initial values - let initial_value_stream = ignore_error(initial_request) - .boxed() - .compat() - .into_stream(); + let initial_value_stream = initial_request.into_stream(); // prepare stream of future values // // we do not want to stop stream if single request fails // (the warning should have been already issued by the request issuer) let future_values_stream = future_blocks_stream - .and_then(move |block| ignore_error(maybe_share_remote_request::( - shared_requests.clone(), - block, - &issue_request, - ).map(move |r| r.map(|v| (block, v)))).boxed().compat()); + .then(move |block| { + maybe_share_remote_request::( + shared_requests.clone(), + block, + &issue_request, + ) + .map(move |r| r.map(|v| (block, v))) + }) + .filter(|r| ready(r.is_ok())); // now let's return changed values for selected blocks initial_value_stream .chain(future_values_stream) - .filter_map(move |block_and_new_value| block_and_new_value.and_then(|(block, new_value)| { + .try_filter_map(move |(block, new_value)| { let mut previous_value = previous_value.lock(); - compare_values(block, previous_value.as_ref(), &new_value) - .map(|notification_value| { + let res = compare_values(block, previous_value.as_ref(), &new_value).map( + |notification_value| { *previous_value = Some(new_value); notification_value - }) - })) + }, + ); + async move { Ok(res) } + }) .map_err(|_| ()) } @@ -683,7 +751,8 @@ fn maybe_share_remote_request impl std::future::Future> where +) -> impl std::future::Future> +where V: Clone, Requests: SharedRequests, IssueRequest: Fn(Block::Hash) -> IssueRequestFuture, @@ -694,64 +763,50 @@ fn maybe_share_remote_request(future: F) -> impl std::future::Future> where - F: std::future::Future> +fn display_error(future: F) -> impl std::future::Future> +where + F: std::future::Future>, { - future.then(|result| ready(match result { - Ok(result) => Ok(result), - Err(err) => { + future.then(|result| { + ready(result.or_else(|err| { warn!("Remote request for subscription data has failed with: {:?}", err); Err(()) - }, - })) -} - -/// Convert successful future result into Ok(Some(result)) and error into Ok(None), -/// displaying warning. -fn ignore_error(future: F) -> impl std::future::Future, ()>> where - F: std::future::Future> -{ - future.then(|result| ready(match result { - Ok(result) => Ok(Some(result)), - Err(()) => Ok(None), - })) + })) + }) } #[cfg(test)] mod tests { - use rpc::futures::stream::futures_ordered; - use substrate_test_runtime_client::runtime::Block; - use sp_core::H256; use super::*; + use futures::{executor, stream}; + use sp_core::H256; + use substrate_test_runtime_client::runtime::Block; #[test] fn subscription_stream_works() { let stream = subscription_stream::( SimpleSubscriptions::default(), - futures_ordered(vec![result(Ok(H256::from([2; 32]))), result(Ok(H256::from([3; 32])))]), + stream::iter(vec![H256::from([2; 32]), H256::from([3; 32])]), ready(Ok((H256::from([1; 32]), 100))), |block| match block[0] { 2 => ready(Ok(100)), @@ -761,20 +816,17 @@ mod tests { |_, old_value, new_value| match old_value == Some(new_value) { true => None, false => Some(new_value.clone()), - } + }, ); - assert_eq!( - stream.collect().wait(), - Ok(vec![100, 200]) - ); + assert_eq!(executor::block_on(stream.collect::>()), vec![Ok(100), Ok(200)]); } #[test] fn subscription_stream_ignores_failed_requests() { let stream = subscription_stream::( SimpleSubscriptions::default(), - futures_ordered(vec![result(Ok(H256::from([2; 32]))), result(Ok(H256::from([3; 32])))]), + stream::iter(vec![H256::from([2; 32]), H256::from([3; 32])]), ready(Ok((H256::from([1; 32]), 100))), |block| match block[0] { 2 => ready(Err(client_err(ClientError::NotAvailableOnLightClient))), @@ -784,13 +836,10 @@ mod tests { |_, old_value, new_value| match old_value == Some(new_value) { true => None, false => Some(new_value.clone()), - } + }, ); - assert_eq!( - stream.collect().wait(), - Ok(vec![100, 200]) - ); + assert_eq!(executor::block_on(stream.collect::>()), vec![Ok(100), Ok(200)]); } #[test] @@ -800,10 +849,7 @@ mod tests { let shared_requests = SimpleSubscriptions::default(); // let's 'issue' requests for B1 - shared_requests.lock().insert( - H256::from([1; 32]), - vec![channel().0], - ); + shared_requests.lock().insert(H256::from([1; 32]), vec![channel().0]); // make sure that no additional requests are issued when we're asking for B1 let _ = maybe_share_remote_request::( diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index d145ac5e5510b..712fe00c54386 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,26 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::*; -use super::state_full::split_range; use self::error::Error; - -use std::sync::Arc; +use super::{state_full::split_range, *}; +use crate::testing::TaskExecutor; use assert_matches::assert_matches; -use futures01::stream::Stream; -use sp_core::{storage::ChildInfo, ChangesTrieConfiguration}; -use sp_core::hash::H256; +use futures::{executor, StreamExt}; use sc_block_builder::BlockBuilderProvider; -use sp_io::hashing::blake2_256; -use substrate_test_runtime_client::{ - prelude::*, - sp_consensus::BlockOrigin, - runtime, -}; use sc_rpc_api::DenyUnsafe; +use sp_consensus::BlockOrigin; +use sp_core::{hash::H256, storage::ChildInfo, ChangesTrieConfiguration}; +use sp_io::hashing::blake2_256; use sp_runtime::generic::BlockId; -use crate::testing::TaskExecutor; -use futures::{executor, compat::Future01CompatExt}; +use std::sync::Arc; +use substrate_test_runtime_client::{prelude::*, runtime}; const STORAGE_KEY: &[u8] = b"child"; @@ -63,75 +56,184 @@ fn should_return_storage() { Arc::new(client), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let key = StorageKey(KEY.to_vec()); assert_eq!( - client.storage(key.clone(), Some(genesis_hash).into()).wait() - .map(|x| x.map(|x| x.0.len())).unwrap().unwrap() as usize, + executor::block_on(client.storage(key.clone(), Some(genesis_hash).into())) + .map(|x| x.map(|x| x.0.len())) + .unwrap() + .unwrap() as usize, VALUE.len(), ); assert_matches!( - client.storage_hash(key.clone(), Some(genesis_hash).into()).wait() + executor::block_on(client.storage_hash(key.clone(), Some(genesis_hash).into())) .map(|x| x.is_some()), Ok(true) ); assert_eq!( - client.storage_size(key.clone(), None).wait().unwrap().unwrap() as usize, + executor::block_on(client.storage_size(key.clone(), None)).unwrap().unwrap() as usize, VALUE.len(), ); assert_eq!( - client.storage_size(StorageKey(b":map".to_vec()), None).wait().unwrap().unwrap() as usize, + executor::block_on(client.storage_size(StorageKey(b":map".to_vec()), None)) + .unwrap() + .unwrap() as usize, 2 + 3, ); assert_eq!( executor::block_on( - child.storage(prefixed_storage_key(), key, Some(genesis_hash).into()) - .map(|x| x.map(|x| x.0.len())) - .compat(), - ).unwrap().unwrap() as usize, + child + .storage(prefixed_storage_key(), key, Some(genesis_hash).into()) + .map(|x| x.map(|x| x.unwrap().0.len())) + ) + .unwrap() as usize, CHILD_VALUE.len(), ); } #[test] -fn should_return_child_storage() { +fn should_return_storage_entries() { + const KEY1: &[u8] = b":mock"; + const KEY2: &[u8] = b":turtle"; + const VALUE: &[u8] = b"hello world"; + const CHILD_VALUE1: &[u8] = b"hello world !"; + const CHILD_VALUE2: &[u8] = b"hello world !"; + let child_info = ChildInfo::new_default(STORAGE_KEY); - let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage(&child_info, "key", vec![42_u8]) - .build()); + let client = TestClientBuilder::new() + .add_extra_storage(KEY1.to_vec(), VALUE.to_vec()) + .add_extra_child_storage(&child_info, KEY1.to_vec(), CHILD_VALUE1.to_vec()) + .add_extra_child_storage(&child_info, KEY2.to_vec(), CHILD_VALUE2.to_vec()) + .build(); let genesis_hash = client.genesis_hash(); let (_client, child) = new_full( - client, + Arc::new(client), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, + ); + + let keys = &[StorageKey(KEY1.to_vec()), StorageKey(KEY2.to_vec())]; + assert_eq!( + executor::block_on(child.storage_entries( + prefixed_storage_key(), + keys.to_vec(), + Some(genesis_hash).into() + )) + .map(|x| x.into_iter().map(|x| x.map(|x| x.0.len()).unwrap()).sum::()) + .unwrap(), + CHILD_VALUE1.len() + CHILD_VALUE2.len() + ); + + // should fail if not all keys exist. + let mut failing_keys = vec![StorageKey(b":soup".to_vec())]; + failing_keys.extend_from_slice(keys); + assert_matches!( + executor::block_on(child.storage_entries( + prefixed_storage_key(), + failing_keys, + Some(genesis_hash).into() + )) + .map(|x| x.iter().all(|x| x.is_some())), + Ok(false) ); +} + +#[test] +fn should_return_child_storage() { + let child_info = ChildInfo::new_default(STORAGE_KEY); + let client = Arc::new( + substrate_test_runtime_client::TestClientBuilder::new() + .add_child_storage(&child_info, "key", vec![42_u8]) + .build(), + ); + let genesis_hash = client.genesis_hash(); + let (_client, child) = + new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None); let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); - assert_matches!( - child.storage( + executor::block_on(child.storage( child_key.clone(), key.clone(), Some(genesis_hash).into(), - ).wait(), + )), Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); + + // should fail if key does not exist. + let failing_key = StorageKey(b":soup".to_vec()); + assert_matches!( + executor::block_on(child.storage( + prefixed_storage_key(), + failing_key, + Some(genesis_hash).into() + )) + .map(|x| x.is_some()), + Ok(false) + ); + assert_matches!( - child.storage_hash( + executor::block_on(child.storage_hash( child_key.clone(), key.clone(), Some(genesis_hash).into(), - ).wait().map(|x| x.is_some()), + )) + .map(|x| x.is_some()), Ok(true) ); assert_matches!( - child.storage_size( + executor::block_on(child.storage_size(child_key.clone(), key.clone(), None)), + Ok(Some(1)) + ); +} + +#[test] +fn should_return_child_storage_entries() { + let child_info = ChildInfo::new_default(STORAGE_KEY); + let client = Arc::new( + substrate_test_runtime_client::TestClientBuilder::new() + .add_child_storage(&child_info, "key1", vec![42_u8]) + .add_child_storage(&child_info, "key2", vec![43_u8, 44]) + .build(), + ); + let genesis_hash = client.genesis_hash(); + let (_client, child) = + new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None); + let child_key = prefixed_storage_key(); + let keys = vec![StorageKey(b"key1".to_vec()), StorageKey(b"key2".to_vec())]; + + let res = executor::block_on(child.storage_entries( + child_key.clone(), + keys.clone(), + Some(genesis_hash).into(), + )) + .unwrap(); + + assert_matches!( + res[0], + Some(StorageData(ref d)) + if d[0] == 42 && d.len() == 1 + ); + assert_matches!( + res[1], + Some(StorageData(ref d)) + if d[0] == 43 && d[1] == 44 && d.len() == 2 + ); + assert_matches!( + executor::block_on(child.storage_hash( child_key.clone(), - key.clone(), - None, - ).wait(), + keys[0].clone(), + Some(genesis_hash).into() + )) + .map(|x| x.is_some()), + Ok(true) + ); + assert_matches!( + executor::block_on(child.storage_size(child_key.clone(), keys[0].clone(), None)), Ok(Some(1)) ); } @@ -140,21 +242,22 @@ fn should_return_child_storage() { fn should_call_contract() { let client = Arc::new(substrate_test_runtime_client::new()); let genesis_hash = client.genesis_hash(); - let (client, _child) = new_full( - client, - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - ); + let (client, _child) = + new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None); assert_matches!( - client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()).wait(), + executor::block_on(client.call( + "balanceOf".into(), + Bytes(vec![1, 2, 3]), + Some(genesis_hash).into() + )), Err(Error::Client(_)) ) } #[test] fn should_notify_about_storage_changes() { - let (subscriber, id, transport) = Subscriber::new_test("test"); + let (subscriber, id, mut transport) = Subscriber::new_test("test"); { let mut client = Arc::new(substrate_test_runtime_client::new()); @@ -162,37 +265,35 @@ fn should_notify_about_storage_changes() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); api.subscribe_storage(Default::default(), subscriber, None.into()); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } - // assert notification sent to transport - let (notification, next) = executor::block_on(transport.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(executor::block_on(next.into_future().compat()).unwrap().0, None); + // Check notification sent to transport + executor::block_on((&mut transport).take(2).collect::>()); + assert!(executor::block_on(transport.next()).is_none()); } #[test] fn should_send_initial_storage_changes_and_notifications() { - let (subscriber, id, transport) = Subscriber::new_test("test"); + let (subscriber, id, mut transport) = Subscriber::new_test("test"); { let mut client = Arc::new(substrate_test_runtime_client::new()); @@ -200,39 +301,37 @@ fn should_send_initial_storage_changes_and_notifications() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); - let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); + let alice_balance_key = + blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); - api.subscribe_storage(Default::default(), subscriber, Some(vec![ - StorageKey(alice_balance_key.to_vec()), - ]).into()); + api.subscribe_storage( + Default::default(), + subscriber, + Some(vec![StorageKey(alice_balance_key.to_vec())]).into(), + ); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } - // assert initial values sent to transport - let (notification, next) = executor::block_on(transport.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = executor::block_on(next.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(executor::block_on(next.into_future().compat()).unwrap().0, None); + // Check for the correct number of notifications + executor::block_on((&mut transport).take(2).collect::>()); + assert!(executor::block_on(transport.next()).is_none()); } #[test] @@ -242,6 +341,7 @@ fn should_query_storage() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let mut add_block = |nonce| { @@ -251,14 +351,18 @@ fn should_query_storage() { // fake change: None -> Some(value) -> Some(value) builder.push_storage_change(vec![2], Some(vec![2])).unwrap(); // actual change: None -> Some(value) -> None - builder.push_storage_change(vec![3], if nonce == 0 { Some(vec![3]) } else { None }).unwrap(); + builder + .push_storage_change(vec![3], if nonce == 0 { Some(vec![3]) } else { None }) + .unwrap(); // actual change: None -> Some(value) - builder.push_storage_change(vec![4], if nonce == 0 { None } else { Some(vec![4]) }).unwrap(); + builder + .push_storage_change(vec![4], if nonce == 0 { None } else { Some(vec![4]) }) + .unwrap(); // actual change: Some(value1) -> Some(value2) builder.push_storage_change(vec![5], Some(vec![nonce as u8])).unwrap(); let block = builder.build().unwrap().block; let hash = block.header.hash(); - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); hash }; let block1_hash = add_block(0); @@ -295,20 +399,12 @@ fn should_query_storage() { // Query changes only up to block1 let keys = (1..6).map(|k| StorageKey(vec![k])).collect::>(); - let result = api.query_storage( - keys.clone(), - genesis_hash, - Some(block1_hash).into(), - ); + let result = api.query_storage(keys.clone(), genesis_hash, Some(block1_hash).into()); - assert_eq!(result.wait().unwrap(), expected); + assert_eq!(executor::block_on(result).unwrap(), expected); // Query all changes - let result = api.query_storage( - keys.clone(), - genesis_hash, - None.into(), - ); + let result = api.query_storage(keys.clone(), genesis_hash, None.into()); expected.push(StorageChangeSet { block: block2_hash, @@ -318,120 +414,108 @@ fn should_query_storage() { (StorageKey(vec![5]), Some(StorageData(vec![1]))), ], }); - assert_eq!(result.wait().unwrap(), expected); + assert_eq!(executor::block_on(result).unwrap(), expected); // Query changes up to block2. - let result = api.query_storage( - keys.clone(), - genesis_hash, - Some(block2_hash), - ); + let result = api.query_storage(keys.clone(), genesis_hash, Some(block2_hash)); - assert_eq!(result.wait().unwrap(), expected); + assert_eq!(executor::block_on(result).unwrap(), expected); // Inverted range. - let result = api.query_storage( - keys.clone(), - block1_hash, - Some(genesis_hash), - ); + let result = api.query_storage(keys.clone(), block1_hash, Some(genesis_hash)); assert_eq!( - result.wait().map_err(|e| e.to_string()), + executor::block_on(result).map_err(|e| e.to_string()), Err(Error::InvalidBlockRange { from: format!("1 ({:?})", block1_hash), to: format!("0 ({:?})", genesis_hash), details: "from number > to number".to_owned(), - }).map_err(|e| e.to_string()) + }) + .map_err(|e| e.to_string()) ); let random_hash1 = H256::random(); let random_hash2 = H256::random(); // Invalid second hash. - let result = api.query_storage( - keys.clone(), - genesis_hash, - Some(random_hash1), - ); + let result = api.query_storage(keys.clone(), genesis_hash, Some(random_hash1)); assert_eq!( - result.wait().map_err(|e| e.to_string()), + executor::block_on(result).map_err(|e| e.to_string()), Err(Error::InvalidBlockRange { from: format!("{:?}", genesis_hash), to: format!("{:?}", Some(random_hash1)), - details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()) + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), + }) + .map_err(|e| e.to_string()) ); // Invalid first hash with Some other hash. - let result = api.query_storage( - keys.clone(), - random_hash1, - Some(genesis_hash), - ); + let result = api.query_storage(keys.clone(), random_hash1, Some(genesis_hash)); assert_eq!( - result.wait().map_err(|e| e.to_string()), + executor::block_on(result).map_err(|e| e.to_string()), Err(Error::InvalidBlockRange { from: format!("{:?}", random_hash1), to: format!("{:?}", Some(genesis_hash)), - details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), + }) + .map_err(|e| e.to_string()), ); // Invalid first hash with None. - let result = api.query_storage( - keys.clone(), - random_hash1, - None, - ); + let result = api.query_storage(keys.clone(), random_hash1, None); assert_eq!( - result.wait().map_err(|e| e.to_string()), + executor::block_on(result).map_err(|e| e.to_string()), Err(Error::InvalidBlockRange { from: format!("{:?}", random_hash1), to: format!("{:?}", Some(block2_hash)), // Best block hash. - details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), + }) + .map_err(|e| e.to_string()), ); // Both hashes invalid. - let result = api.query_storage( - keys.clone(), - random_hash1, - Some(random_hash2), - ); + let result = api.query_storage(keys.clone(), random_hash1, Some(random_hash2)); assert_eq!( - result.wait().map_err(|e| e.to_string()), + executor::block_on(result).map_err(|e| e.to_string()), Err(Error::InvalidBlockRange { from: format!("{:?}", random_hash1), // First hash not found. to: format!("{:?}", Some(random_hash2)), - details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), + }) + .map_err(|e| e.to_string()), ); // single block range - let result = api.query_storage_at( - keys.clone(), - Some(block1_hash), - ); + let result = api.query_storage_at(keys.clone(), Some(block1_hash)); assert_eq!( - result.wait().unwrap(), - vec![ - StorageChangeSet { - block: block1_hash, - changes: vec![ - (StorageKey(vec![1_u8]), None), - (StorageKey(vec![2_u8]), Some(StorageData(vec![2_u8]))), - (StorageKey(vec![3_u8]), Some(StorageData(vec![3_u8]))), - (StorageKey(vec![4_u8]), None), - (StorageKey(vec![5_u8]), Some(StorageData(vec![0_u8]))), - ] - } - ] + executor::block_on(result).unwrap(), + vec![StorageChangeSet { + block: block1_hash, + changes: vec![ + (StorageKey(vec![1_u8]), None), + (StorageKey(vec![2_u8]), Some(StorageData(vec![2_u8]))), + (StorageKey(vec![3_u8]), Some(StorageData(vec![3_u8]))), + (StorageKey(vec![4_u8]), None), + (StorageKey(vec![5_u8]), Some(StorageData(vec![0_u8]))), + ] + }] ); } @@ -455,7 +539,6 @@ fn should_split_ranges() { assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100))); } - #[test] fn should_return_runtime_version() { let client = Arc::new(substrate_test_runtime_client::new()); @@ -463,16 +546,17 @@ fn should_return_runtime_version() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",3],\ - [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",2],[\"0x40fe3ad401f8959a\",4],\ + [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",5],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",2],\ [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]],\ \"transactionVersion\":1}"; - let runtime_version = api.runtime_version(None.into()).wait().unwrap(); + let runtime_version = executor::block_on(api.runtime_version(None.into())).unwrap(); let serialized = serde_json::to_string(&runtime_version).unwrap(); assert_eq!(serialized, result); @@ -482,7 +566,7 @@ fn should_return_runtime_version() { #[test] fn should_notify_on_runtime_version_initially() { - let (subscriber, id, transport) = Subscriber::new_test("test"); + let (subscriber, id, mut transport) = Subscriber::new_test("test"); { let client = Arc::new(substrate_test_runtime_client::new()); @@ -490,23 +574,18 @@ fn should_notify_on_runtime_version_initially() { client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, + None, ); api.subscribe_runtime_version(Default::default(), subscriber); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); - + assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); } // assert initial version sent. - let (notification, next) = executor::block_on(transport.into_future().compat()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(executor::block_on(next.into_future().compat()).unwrap().0, None); + executor::block_on((&mut transport).take(1).collect::>()); + assert!(executor::block_on(transport.next()).is_none()); } #[test] diff --git a/client/rpc/src/system/mod.rs b/client/rpc/src/system/mod.rs index 17fb6b77a5710..f99994e41a1be 100644 --- a/client/rpc/src/system/mod.rs +++ b/client/rpc/src/system/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,25 +18,27 @@ //! Substrate system API. -#[cfg(test)] -mod tests; - -use futures::{future::BoxFuture, FutureExt, TryFutureExt}; -use futures::{channel::oneshot, compat::Compat}; +use self::error::Result; +use futures::{channel::oneshot, FutureExt}; use sc_rpc_api::{DenyUnsafe, Receiver}; -use sp_utils::mpsc::TracingUnboundedSender; +use sc_tracing::logging; +use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{self, Header as HeaderT}; -use self::error::Result; - +pub use self::{ + gen_client::Client as SystemClient, + helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo}, +}; pub use sc_rpc_api::system::*; -pub use self::helpers::{SystemInfo, Health, PeerInfo, NodeRole, SyncState}; -pub use self::gen_client::Client as SystemClient; +#[cfg(test)] +mod tests; + +/// Early exit for RPCs that require `--rpc-methods=Unsafe` to be enabled macro_rules! bail_if_unsafe { ($value: expr) => { if let Err(err) = $value.check_if_safe() { - return async move { Err(err.into()) }.boxed().compat(); + return async move { Err(err.into()) }.boxed() } }; } @@ -65,6 +67,8 @@ pub enum Request { NetworkAddReservedPeer(String, oneshot::Sender>), /// Must return any potential parse error. NetworkRemoveReservedPeer(String, oneshot::Sender>), + /// Must return the list of reserved peers + NetworkReservedPeers(oneshot::Sender>), /// Must return the node role. NodeRoles(oneshot::Sender>), /// Must return the state of the node syncing. @@ -81,11 +85,7 @@ impl System { send_back: TracingUnboundedSender>, deny_unsafe: DenyUnsafe, ) -> Self { - System { - info, - send_back, - deny_unsafe, - } + System { info, send_back, deny_unsafe } } } @@ -102,61 +102,53 @@ impl SystemApi::Number> for Sy Ok(self.info.chain_name.clone()) } - fn system_type(&self) -> Result { + fn system_type(&self) -> Result { Ok(self.info.chain_type.clone()) } - fn system_properties(&self) -> Result { + fn system_properties(&self) -> Result { Ok(self.info.properties.clone()) } fn system_health(&self) -> Receiver { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::Health(tx)); - Receiver(Compat::new(rx)) + Receiver(rx) } fn system_local_peer_id(&self) -> Receiver { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::LocalPeerId(tx)); - Receiver(Compat::new(rx)) + Receiver(rx) } fn system_local_listen_addresses(&self) -> Receiver> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::LocalListenAddresses(tx)); - Receiver(Compat::new(rx)) + Receiver(rx) } - fn system_peers(&self) - -> Compat::Number>>>>> - { + fn system_peers( + &self, + ) -> rpc::BoxFuture::Number>>>> { bail_if_unsafe!(self.deny_unsafe); let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::Peers(tx)); - async move { - rx.await.map_err(|_| rpc::Error::internal_error()) - }.boxed().compat() + async move { rx.await.map_err(|_| rpc::Error::internal_error()) }.boxed() } - fn system_network_state(&self) - -> Compat>> - { + fn system_network_state(&self) -> rpc::BoxFuture> { bail_if_unsafe!(self.deny_unsafe); let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NetworkState(tx)); - async move { - rx.await.map_err(|_| rpc::Error::internal_error()) - }.boxed().compat() + async move { rx.await.map_err(|_| rpc::Error::internal_error()) }.boxed() } - fn system_add_reserved_peer(&self, peer: String) - -> Compat>> - { + fn system_add_reserved_peer(&self, peer: String) -> rpc::BoxFuture> { bail_if_unsafe!(self.deny_unsafe); let (tx, rx) = oneshot::channel(); @@ -167,12 +159,11 @@ impl SystemApi::Number> for Sy Ok(Err(e)) => Err(rpc::Error::from(e)), Err(_) => Err(rpc::Error::internal_error()), } - }.boxed().compat() + } + .boxed() } - fn system_remove_reserved_peer(&self, peer: String) - -> Compat>> - { + fn system_remove_reserved_peer(&self, peer: String) -> rpc::BoxFuture> { bail_if_unsafe!(self.deny_unsafe); let (tx, rx) = oneshot::channel(); @@ -183,18 +174,36 @@ impl SystemApi::Number> for Sy Ok(Err(e)) => Err(rpc::Error::from(e)), Err(_) => Err(rpc::Error::internal_error()), } - }.boxed().compat() + } + .boxed() + } + + fn system_reserved_peers(&self) -> Receiver> { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::NetworkReservedPeers(tx)); + Receiver(rx) } fn system_node_roles(&self) -> Receiver> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NodeRoles(tx)); - Receiver(Compat::new(rx)) + Receiver(rx) } fn system_sync_state(&self) -> Receiver::Number>> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::SyncState(tx)); - Receiver(Compat::new(rx)) + Receiver(rx) + } + + fn system_add_log_filter(&self, directives: String) -> rpc::Result<()> { + self.deny_unsafe.check_if_safe()?; + logging::add_directives(&directives); + logging::reload_filter().map_err(|_e| rpc::Error::internal_error()) + } + + fn system_reset_log_filter(&self) -> rpc::Result<()> { + self.deny_unsafe.check_if_safe()?; + logging::reset_log_filter().map_err(|_e| rpc::Error::internal_error()) } } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 61f1940dc2010..14997545031df 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,13 +18,17 @@ use super::*; -use sc_network::{self, PeerId}; -use sc_network::config::Role; -use substrate_test_runtime_client::runtime::Block; use assert_matches::assert_matches; -use futures::prelude::*; -use sp_utils::mpsc::tracing_unbounded; -use std::thread; +use futures::{executor, prelude::*}; +use sc_network::{self, config::Role, PeerId}; +use sc_utils::mpsc::tracing_unbounded; +use std::{ + env, + io::{BufRead, BufReader, Write}, + process::{Command, Stdio}, + thread, +}; +use substrate_test_runtime_client::runtime::Block; struct Status { pub peers: usize, @@ -35,12 +39,7 @@ struct Status { impl Default for Status { fn default() -> Status { - Status { - peer_id: PeerId::random(), - peers: 0, - is_syncing: false, - is_dev: false, - } + Status { peer_id: PeerId::random(), peers: 0, is_syncing: false, is_dev: false } } } @@ -59,7 +58,8 @@ fn api>>(sync: T) -> System { }); }, Request::LocalPeerId(sender) => { - let _ = sender.send("QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()); + let _ = + sender.send("QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()); }, Request::LocalListenAddresses(sender) => { let _ = sender.send(vec![ @@ -78,39 +78,48 @@ fn api>>(sync: T) -> System { }); } let _ = sender.send(peers); - } + }, Request::NetworkState(sender) => { - let _ = sender.send(serde_json::to_value(&sc_network::network_state::NetworkState { - peer_id: String::new(), - listened_addresses: Default::default(), - external_addresses: Default::default(), - connected_peers: Default::default(), - not_connected_peers: Default::default(), - peerset: serde_json::Value::Null, - }).unwrap()); + let _ = sender.send( + serde_json::to_value(&sc_network::network_state::NetworkState { + peer_id: String::new(), + listened_addresses: Default::default(), + external_addresses: Default::default(), + connected_peers: Default::default(), + not_connected_peers: Default::default(), + peerset: serde_json::Value::Null, + }) + .unwrap(), + ); }, Request::NetworkAddReservedPeer(peer, sender) => { let _ = match sc_network::config::parse_str_addr(&peer) { Ok(_) => sender.send(Ok(())), - Err(s) => sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), + Err(s) => + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), }; }, Request::NetworkRemoveReservedPeer(peer, sender) => { let _ = match peer.parse::() { Ok(_) => sender.send(Ok(())), - Err(s) => sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), + Err(s) => + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), }; - } + }, + Request::NetworkReservedPeers(sender) => { + let _ = sender + .send(vec!["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()]); + }, Request::NodeRoles(sender) => { let _ = sender.send(vec![NodeRole::Authority]); - } + }, Request::SyncState(sender) => { let _ = sender.send(SyncState { starting_block: 1, current_block: 2, highest_block: Some(3), }); - } + }, }; future::ready(()) @@ -125,106 +134,68 @@ fn api>>(sync: T) -> System { chain_type: Default::default(), }, tx, - sc_rpc_api::DenyUnsafe::No + sc_rpc_api::DenyUnsafe::No, ) } fn wait_receiver(rx: Receiver) -> T { - let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); - runtime.block_on(rx).unwrap() + futures::executor::block_on(rx).unwrap() } #[test] fn system_name_works() { - assert_eq!( - api(None).system_name().unwrap(), - "testclient".to_owned(), - ); + assert_eq!(api(None).system_name().unwrap(), "testclient".to_owned()); } #[test] fn system_version_works() { - assert_eq!( - api(None).system_version().unwrap(), - "0.2.0".to_owned(), - ); + assert_eq!(api(None).system_version().unwrap(), "0.2.0".to_owned()); } #[test] fn system_chain_works() { - assert_eq!( - api(None).system_chain().unwrap(), - "testchain".to_owned(), - ); + assert_eq!(api(None).system_chain().unwrap(), "testchain".to_owned()); } #[test] fn system_properties_works() { - assert_eq!( - api(None).system_properties().unwrap(), - serde_json::map::Map::new(), - ); + assert_eq!(api(None).system_properties().unwrap(), serde_json::map::Map::new()); } #[test] fn system_type_works() { - assert_eq!( - api(None).system_type().unwrap(), - Default::default(), - ); + assert_eq!(api(None).system_type().unwrap(), Default::default()); } #[test] fn system_health() { assert_matches!( wait_receiver(api(None).system_health()), - Health { - peers: 0, - is_syncing: false, - should_have_peers: true, - } + Health { peers: 0, is_syncing: false, should_have_peers: true } ); assert_matches!( - wait_receiver(api(Status { - peer_id: PeerId::random(), - peers: 5, - is_syncing: true, - is_dev: true, - }).system_health()), - Health { - peers: 5, - is_syncing: true, - should_have_peers: false, - } + wait_receiver( + api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: true, is_dev: true }) + .system_health() + ), + Health { peers: 5, is_syncing: true, should_have_peers: false } ); assert_eq!( - wait_receiver(api(Status { - peer_id: PeerId::random(), - peers: 5, - is_syncing: false, - is_dev: false, - }).system_health()), - Health { - peers: 5, - is_syncing: false, - should_have_peers: true, - } + wait_receiver( + api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: false, is_dev: false }) + .system_health() + ), + Health { peers: 5, is_syncing: false, should_have_peers: true } ); assert_eq!( - wait_receiver(api(Status { - peer_id: PeerId::random(), - peers: 0, - is_syncing: false, - is_dev: true, - }).system_health()), - Health { - peers: 0, - is_syncing: false, - should_have_peers: false, - } + wait_receiver( + api(Status { peer_id: PeerId::random(), peers: 0, is_syncing: false, is_dev: true }) + .system_health() + ), + Health { peers: 0, is_syncing: false, should_have_peers: false } ); } @@ -241,24 +212,19 @@ fn system_local_listen_addresses_works() { assert_eq!( wait_receiver(api(None).system_local_listen_addresses()), vec![ - "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(), - "/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(), + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" + .to_string(), + "/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" + .to_string(), ] ); } #[test] fn system_peers() { - let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); - let peer_id = PeerId::random(); - let req = api(Status { - peer_id: peer_id.clone(), - peers: 1, - is_syncing: false, - is_dev: true, - }).system_peers(); - let res = runtime.block_on(req).unwrap(); + let req = api(Status { peer_id, peers: 1, is_syncing: false, is_dev: true }).system_peers(); + let res = executor::block_on(req).unwrap(); assert_eq!( res, @@ -273,9 +239,8 @@ fn system_peers() { #[test] fn system_network_state() { - let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); let req = api(None).system_network_state(); - let res = runtime.block_on(req).unwrap(); + let res = executor::block_on(req).unwrap(); assert_eq!( serde_json::from_value::(res).unwrap(), @@ -292,44 +257,122 @@ fn system_network_state() { #[test] fn system_node_roles() { - assert_eq!( - wait_receiver(api(None).system_node_roles()), - vec![NodeRole::Authority] - ); + assert_eq!(wait_receiver(api(None).system_node_roles()), vec![NodeRole::Authority]); } #[test] fn system_sync_state() { assert_eq!( wait_receiver(api(None).system_sync_state()), - SyncState { - starting_block: 1, - current_block: 2, - highest_block: Some(3), - } + SyncState { starting_block: 1, current_block: 2, highest_block: Some(3) } ); } #[test] fn system_network_add_reserved() { - let good_peer_id = "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; + let good_peer_id = + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; let bad_peer_id = "/ip4/198.51.100.19/tcp/30333"; - let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); let good_fut = api(None).system_add_reserved_peer(good_peer_id.into()); let bad_fut = api(None).system_add_reserved_peer(bad_peer_id.into()); - assert_eq!(runtime.block_on(good_fut), Ok(())); - assert!(runtime.block_on(bad_fut).is_err()); + assert_eq!(executor::block_on(good_fut), Ok(())); + assert!(executor::block_on(bad_fut).is_err()); } #[test] fn system_network_remove_reserved() { let good_peer_id = "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; - let bad_peer_id = "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; - let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); + let bad_peer_id = + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; let good_fut = api(None).system_remove_reserved_peer(good_peer_id.into()); let bad_fut = api(None).system_remove_reserved_peer(bad_peer_id.into()); - assert_eq!(runtime.block_on(good_fut), Ok(())); - assert!(runtime.block_on(bad_fut).is_err()); + assert_eq!(executor::block_on(good_fut), Ok(())); + assert!(executor::block_on(bad_fut).is_err()); +} + +#[test] +fn system_network_reserved_peers() { + assert_eq!( + wait_receiver(api(None).system_reserved_peers()), + vec!["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()] + ); +} + +#[test] +fn test_add_reset_log_filter() { + const EXPECTED_BEFORE_ADD: &'static str = "EXPECTED_BEFORE_ADD"; + const EXPECTED_AFTER_ADD: &'static str = "EXPECTED_AFTER_ADD"; + const EXPECTED_WITH_TRACE: &'static str = "EXPECTED_WITH_TRACE"; + + // Enter log generation / filter reload + if std::env::var("TEST_LOG_FILTER").is_ok() { + sc_tracing::logging::LoggerBuilder::new("test_before_add=debug").init().unwrap(); + for line in std::io::stdin().lock().lines() { + let line = line.expect("Failed to read bytes"); + if line.contains("add_reload") { + api(None) + .system_add_log_filter("test_after_add".into()) + .expect("`system_add_log_filter` failed"); + } else if line.contains("add_trace") { + api(None) + .system_add_log_filter("test_before_add=trace".into()) + .expect("`system_add_log_filter` failed"); + } else if line.contains("reset") { + api(None).system_reset_log_filter().expect("`system_reset_log_filter` failed"); + } else if line.contains("exit") { + return + } + log::trace!(target: "test_before_add", "{}", EXPECTED_WITH_TRACE); + log::debug!(target: "test_before_add", "{}", EXPECTED_BEFORE_ADD); + log::debug!(target: "test_after_add", "{}", EXPECTED_AFTER_ADD); + } + } + + // Call this test again to enter the log generation / filter reload block + let test_executable = env::current_exe().expect("Unable to get current executable!"); + let mut child_process = Command::new(test_executable) + .env("TEST_LOG_FILTER", "1") + .args(&["--nocapture", "test_add_reset_log_filter"]) + .stdin(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .unwrap(); + + let child_stderr = child_process.stderr.take().expect("Could not get child stderr"); + let mut child_out = BufReader::new(child_stderr); + let mut child_in = child_process.stdin.take().expect("Could not get child stdin"); + + let mut read_line = || { + let mut line = String::new(); + child_out.read_line(&mut line).expect("Reading a line"); + line + }; + + // Initiate logs loop in child process + child_in.write(b"\n").unwrap(); + assert!(read_line().contains(EXPECTED_BEFORE_ADD)); + + // Initiate add directive & reload in child process + child_in.write(b"add_reload\n").unwrap(); + assert!(read_line().contains(EXPECTED_BEFORE_ADD)); + assert!(read_line().contains(EXPECTED_AFTER_ADD)); + + // Check that increasing the max log level works + child_in.write(b"add_trace\n").unwrap(); + assert!(read_line().contains(EXPECTED_WITH_TRACE)); + assert!(read_line().contains(EXPECTED_BEFORE_ADD)); + assert!(read_line().contains(EXPECTED_AFTER_ADD)); + + // Initiate logs filter reset in child process + child_in.write(b"reset\n").unwrap(); + assert!(read_line().contains(EXPECTED_BEFORE_ADD)); + + // Return from child process + child_in.write(b"exit\n").unwrap(); + assert!(child_process.wait().expect("Error waiting for child process").success()); + + // Check for EOF + assert_eq!(child_out.read_line(&mut String::new()).unwrap(), 0); } diff --git a/client/rpc/src/testing.rs b/client/rpc/src/testing.rs index 9530ff0020644..23071ba10e0d6 100644 --- a/client/rpc/src/testing.rs +++ b/client/rpc/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,8 +18,10 @@ //! Testing utils used by the RPC tests. -use rpc::futures::future as future01; -use futures::{executor, compat::Future01CompatExt, FutureExt}; +use futures::{ + executor, + task::{FutureObj, Spawn, SpawnError}, +}; // Executor shared by all tests. // @@ -30,16 +32,15 @@ lazy_static::lazy_static! { .expect("Failed to create thread pool executor for tests"); } -type Boxed01Future01 = Box + Send + 'static>; - /// Executor for use in testing pub struct TaskExecutor; -impl future01::Executor for TaskExecutor { - fn execute( - &self, - future: Boxed01Future01, - ) -> std::result::Result<(), future01::ExecuteError>{ - EXECUTOR.spawn_ok(future.compat().map(drop)); +impl Spawn for TaskExecutor { + fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { + EXECUTOR.spawn_ok(future); + Ok(()) + } + + fn status(&self) -> Result<(), SpawnError> { Ok(()) } } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 6395cbe125b6d..5120cc8f4dfaa 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-service" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -17,78 +17,73 @@ default = ["db"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. db = ["sc-client-db/with-kvdb-rocksdb", "sc-client-db/with-parity-db"] -wasmtime = [ - "sc-executor/wasmtime", -] +wasmtime = ["sc-executor/wasmtime"] # exposes the client type test-helpers = [] [dependencies] -derive_more = "0.99.2" -futures01 = { package = "futures", version = "0.1.29" } -futures = { version = "0.3.4", features = ["compat"] } -jsonrpc-pubsub = "15.0" -jsonrpc-core = "15.0" +thiserror = "1.0.21" +futures = "0.3.16" +jsonrpc-pubsub = "18.0" +jsonrpc-core = "18.0" rand = "0.7.3" -parking_lot = "0.10.0" -lazy_static = "1.4.0" -log = "0.4.8" -slog = { version = "2.5.2", features = ["nested-values"] } +parking_lot = "0.11.1" +log = "0.4.11" futures-timer = "3.0.1" -wasm-timer = "0.2" exit-future = "0.2.0" -pin-project = "0.4.8" +pin-project = "1.0.4" hash-db = "0.15.2" -serde = "1.0.101" -serde_json = "1.0.41" -sc-keystore = { version = "2.0.0", path = "../keystore" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sp-session = { version = "2.0.0", path = "../../primitives/session" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sc-network = { version = "0.8.0", path = "../network" } -sc-chain-spec = { version = "2.0.0", path = "../chain-spec" } -sc-light = { version = "2.0.0", path = "../light" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sc-client-db = { version = "0.8.0", default-features = false, path = "../db" } -codec = { package = "parity-scale-codec", version = "1.3.4" } -sc-executor = { version = "0.8.0", path = "../executor" } -sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sc-rpc-server = { version = "2.0.0", path = "../rpc-servers" } -sc-rpc = { version = "2.0.0", path = "../rpc" } -sc-block-builder = { version = "0.8.0", path = "../block-builder" } -sp-block-builder = { version = "2.0.0", path = "../../primitives/block-builder" } -sc-informant = { version = "0.8.0", path = "../informant" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sc-offchain = { version = "2.0.0", path = "../offchain" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -sc-tracing = { version = "2.0.0", path = "../tracing" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -tokio = "0.2.0" -tracing = "0.1.19" +serde = "1.0.126" +serde_json = "1.0.68" +sc-keystore = { version = "4.0.0-dev", path = "../keystore" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" } +sp-externalities = { version = "0.10.0-dev", path = "../../primitives/externalities" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } +sp-version = { version = "4.0.0-dev", path = "../../primitives/version" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } +sp-session = { version = "4.0.0-dev", path = "../../primitives/session" } +sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } +sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } +sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } +sp-storage = { version = "4.0.0-dev", path = "../../primitives/storage" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } +sc-light = { version = "4.0.0-dev", path = "../light" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-executor = { version = "0.10.0-dev", path = "../executor" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } +sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } +sp-transaction-storage-proof = { version = "4.0.0-dev", path = "../../primitives/transaction-storage-proof" } +sc-rpc-server = { version = "4.0.0-dev", path = "../rpc-servers" } +sc-rpc = { version = "4.0.0-dev", path = "../rpc" } +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +sp-block-builder = { version = "4.0.0-dev", path = "../../primitives/block-builder" } +sc-informant = { version = "0.10.0-dev", path = "../informant" } +sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } +sc-offchain = { version = "4.0.0-dev", path = "../offchain" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0" } +sc-tracing = { version = "4.0.0-dev", path = "../tracing" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } +tracing = "0.1.25" tracing-futures = { version = "0.2.4" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } - -[target.'cfg(not(target_os = "unknown"))'.dependencies] +parity-util-mem = { version = "0.10.0", default-features = false, features = [ + "primitive-types", +] } +async-trait = "0.1.50" +tokio = { version = "1.10", features = ["time", "rt-multi-thread"] } tempfile = "3.1.0" -directories = "2.0.2" +directories = "3.0.2" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0", path = "../../primitives/consensus/babe" } -grandpa = { version = "0.8.0", package = "sc-finality-grandpa", path = "../finality-grandpa" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } -tokio = { version = "0.2", default-features = false } +substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime/" } async-std = { version = "1.6.5", default-features = false } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 4aeace47a943f..75f9ebc8cc768 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,57 +17,51 @@ // along with this program. If not, see . use crate::{ - error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm, - TelemetryConnectionSinks, RpcHandlers, NetworkStatusSinks, - start_rpc_servers, build_network_future, TransactionPoolAdapter, TaskManager, SpawnTaskHandle, - metrics::MetricsService, + build_network_future, client::{light, Client, ClientConfig}, - config::{Configuration, KeystoreConfig, PrometheusConfig}, -}; -use sc_client_api::{ - light::RemoteBlockchain, ForkBlocks, BadBlocks, UsageProvider, ExecutorProvider, -}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use sc_chain_spec::get_extension; -use sp_consensus::{ - block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator, Chain}, - import_queue::ImportQueue, + config::{Configuration, KeystoreConfig, PrometheusConfig, TransactionStorageMode}, + error::Error, + metrics::MetricsService, + start_rpc_servers, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, }; +use futures::{channel::oneshot, future::ready, FutureExt, StreamExt}; use jsonrpc_pubsub::manager::SubscriptionManager; -use futures::{ - FutureExt, StreamExt, - future::ready, - channel::oneshot, +use log::info; +use prometheus_endpoint::Registry; +use sc_chain_spec::get_extension; +use sc_client_api::{ + execution_extensions::ExecutionExtensions, light::RemoteBlockchain, + proof_provider::ProofProvider, BadBlocks, BlockBackend, BlockchainEvents, ExecutorProvider, + ForkBlocks, StorageProvider, UsageProvider, }; +use sc_client_db::{Backend, DatabaseSettings}; +use sc_consensus::import_queue::ImportQueue; +use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; -use log::{info, warn}; -use sc_network::config::{Role, FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; -use sc_network::NetworkService; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{ - Block as BlockT, SaturatedConversion, HashFor, Zero, BlockIdTo, +use sc_network::{ + block_request_handler::{self, BlockRequestHandler}, + config::{OnDemand, Role, SyncMode}, + light_client_requests::{self, handler::LightClientRequestHandler}, + state_request_handler::{self, StateRequestHandler}, + warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler, WarpSyncProvider}, + NetworkService, }; -use sp_api::{ProvideRuntimeApi, CallApiAt}; -use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; -use std::sync::Arc; -use wasm_timer::SystemTime; -use sc_telemetry::{telemetry, SUBSTRATE_INFO}; -use sp_transaction_pool::MaintainedTransactionPool; -use prometheus_endpoint::Registry; -use sc_client_db::{Backend, DatabaseSettings}; -use sp_core::traits::{ - CodeExecutor, - SpawnNamed, +use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; +use sc_transaction_pool_api::MaintainedTransactionPool; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; +use sp_api::{CallApiAt, ProvideRuntimeApi}; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_consensus::block_validation::{ + BlockAnnounceValidator, Chain, DefaultBlockAnnounceValidator, }; -use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; -use sp_runtime::BuildStorage; -use sc_client_api::{ - BlockBackend, BlockchainEvents, - backend::StorageProvider, - proof_provider::ProofProvider, - execution_extensions::ExecutionExtensions +use sp_core::traits::{CodeExecutor, SpawnNamed}; +use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, BlockIdTo, HashFor, Zero}, + BuildStorage, }; -use sp_blockchain::{HeaderMetadata, HeaderBackend}; +use std::{str::FromStr, sync::Arc, time::SystemTime}; /// A utility trait for building an RPC extension given a `DenyUnsafe` instance. /// This is useful since at service definition time we don't know whether the @@ -84,11 +78,12 @@ pub trait RpcExtensionBuilder { &self, deny: sc_rpc::DenyUnsafe, subscription_executor: sc_rpc::SubscriptionTaskExecutor, - ) -> Self::Output; + ) -> Result; } -impl RpcExtensionBuilder for F where - F: Fn(sc_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor) -> R, +impl RpcExtensionBuilder for F +where + F: Fn(sc_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor) -> Result, R: sc_rpc::RpcExtension, { type Output = R; @@ -97,7 +92,7 @@ impl RpcExtensionBuilder for F where &self, deny: sc_rpc::DenyUnsafe, subscription_executor: sc_rpc::SubscriptionTaskExecutor, - ) -> Self::Output { + ) -> Result { (*self)(deny, subscription_executor) } } @@ -107,7 +102,8 @@ impl RpcExtensionBuilder for F where /// `DenyUnsafe` instance and return a static `RpcExtension` instance. pub struct NoopRpcExtensionBuilder(pub R); -impl RpcExtensionBuilder for NoopRpcExtensionBuilder where +impl RpcExtensionBuilder for NoopRpcExtensionBuilder +where R: Clone + sc_rpc::RpcExtension, { type Output = R; @@ -116,12 +112,13 @@ impl RpcExtensionBuilder for NoopRpcExtensionBuilder where &self, _deny: sc_rpc::DenyUnsafe, _subscription_executor: sc_rpc::SubscriptionTaskExecutor, - ) -> Self::Output { - self.0.clone() + ) -> Result { + Ok(self.0.clone()) } } -impl From for NoopRpcExtensionBuilder where +impl From for NoopRpcExtensionBuilder +where R: sc_rpc::RpcExtension, { fn from(e: R) -> NoopRpcExtensionBuilder { @@ -129,59 +126,40 @@ impl From for NoopRpcExtensionBuilder where } } - /// Full client type. -pub type TFullClient = Client< - TFullBackend, - TFullCallExecutor, - TBl, - TRtApi, ->; +pub type TFullClient = + Client, TFullCallExecutor, TBl, TRtApi>; /// Full client backend type. pub type TFullBackend = sc_client_db::Backend; /// Full client call executor type. -pub type TFullCallExecutor = crate::client::LocalCallExecutor< - sc_client_db::Backend, - NativeExecutor, ->; +pub type TFullCallExecutor = + crate::client::LocalCallExecutor, TExec>; /// Light client type. -pub type TLightClient = TLightClientWithBackend< - TBl, TRtApi, TExecDisp, TLightBackend ->; +pub type TLightClient = + TLightClientWithBackend>; /// Light client backend type. -pub type TLightBackend = sc_light::Backend< - sc_client_db::light::LightStorage, - HashFor, ->; +pub type TLightBackend = + sc_light::Backend, HashFor>; /// Light call executor type. -pub type TLightCallExecutor = sc_light::GenesisCallExecutor< - sc_light::Backend< - sc_client_db::light::LightStorage, - HashFor - >, +pub type TLightCallExecutor = sc_light::GenesisCallExecutor< + sc_light::Backend, HashFor>, crate::client::LocalCallExecutor< - sc_light::Backend< - sc_client_db::light::LightStorage, - HashFor - >, - NativeExecutor + TBl, + sc_light::Backend, HashFor>, + TExec, >, >; -type TFullParts = ( - TFullClient, - Arc>, - KeystoreContainer, - TaskManager, -); +type TFullParts = + (TFullClient, Arc>, KeystoreContainer, TaskManager); -type TLightParts = ( - Arc>, +type TLightParts = ( + Arc>, Arc>, KeystoreContainer, TaskManager, @@ -189,73 +167,120 @@ type TLightParts = ( ); /// Light client backend type with a specific hash type. -pub type TLightBackendWithHash = sc_light::Backend< - sc_client_db::light::LightStorage, - THash, ->; +pub type TLightBackendWithHash = + sc_light::Backend, THash>; /// Light client type with a specific backend. -pub type TLightClientWithBackend = Client< +pub type TLightClientWithBackend = Client< TBackend, - sc_light::GenesisCallExecutor< - TBackend, - crate::client::LocalCallExecutor>, - >, + sc_light::GenesisCallExecutor>, TBl, TRtApi, >; +trait AsCryptoStoreRef { + fn keystore_ref(&self) -> Arc; + fn sync_keystore_ref(&self) -> Arc; +} + +impl AsCryptoStoreRef for Arc +where + T: CryptoStore + SyncCryptoStore + 'static, +{ + fn keystore_ref(&self) -> Arc { + self.clone() + } + fn sync_keystore_ref(&self) -> Arc { + self.clone() + } +} + /// Construct and hold different layers of Keystore wrappers pub struct KeystoreContainer { - keystore: Arc, - sync_keystore: SyncCryptoStorePtr, + remote: Option>, + local: Arc, } impl KeystoreContainer { /// Construct KeystoreContainer pub fn new(config: &KeystoreConfig) -> Result { let keystore = Arc::new(match config { - KeystoreConfig::Path { path, password } => LocalKeystore::open( - path.clone(), - password.clone(), - )?, + KeystoreConfig::Path { path, password } => + LocalKeystore::open(path.clone(), password.clone())?, KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); - let sync_keystore = keystore.clone() as SyncCryptoStorePtr; - Ok(Self { - keystore, - sync_keystore, - }) + Ok(Self { remote: Default::default(), local: keystore }) + } + + /// Set the remote keystore. + /// Should be called right away at startup and not at runtime: + /// even though this overrides any previously set remote store, it + /// does not reset any references previously handed out - they will + /// stick around. + pub fn set_remote_keystore(&mut self, remote: Arc) + where + T: CryptoStore + SyncCryptoStore + 'static, + { + self.remote = Some(Box::new(remote)) } /// Returns an adapter to the asynchronous keystore that implements `CryptoStore` pub fn keystore(&self) -> Arc { - self.keystore.clone() + if let Some(c) = self.remote.as_ref() { + c.keystore_ref() + } else { + self.local.clone() + } } - /// Returns the synchrnous keystore wrapper + /// Returns the synchronous keystore wrapper pub fn sync_keystore(&self) -> SyncCryptoStorePtr { - self.sync_keystore.clone() + if let Some(c) = self.remote.as_ref() { + c.sync_keystore_ref() + } else { + self.local.clone() as SyncCryptoStorePtr + } + } + + /// Returns the local keystore if available + /// + /// The function will return None if the available keystore is not a local keystore. + /// + /// # Note + /// + /// Using the [`LocalKeystore`] will result in loosing the ability to use any other keystore + /// implementation, like a remote keystore for example. Only use this if you a certain that you + /// require it! + pub fn local_keystore(&self) -> Option> { + Some(self.local.clone()) } } /// Creates a new full client for the given config. -pub fn new_full_client( - config: &Configuration -) -> Result, Error> where +pub fn new_full_client( + config: &Configuration, + telemetry: Option, + executor: TExec, +) -> Result, Error> +where TBl: BlockT, - TExecDisp: NativeExecutionDispatch + 'static, + TExec: CodeExecutor + RuntimeVersionOf + Clone, + TBl::Hash: FromStr, { - new_full_parts(config).map(|parts| parts.0) + new_full_parts(config, telemetry, executor).map(|parts| parts.0) } /// Create the initial parts of a full node. -pub fn new_full_parts( - config: &Configuration -) -> Result, Error> where +pub fn new_full_parts( + config: &Configuration, + telemetry: Option, + executor: TExec, +) -> Result, Error> +where TBl: BlockT, - TExecDisp: NativeExecutionDispatch + 'static, + TExec: CodeExecutor + RuntimeVersionOf + Clone, + TBl::Hash: FromStr, { let keystore_container = KeystoreContainer::new(&config.keystore)?; @@ -263,15 +288,9 @@ pub fn new_full_parts( let task_manager = { let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManager::new(config.task_executor.clone(), ipfs_rt, registry)? + TaskManager::new(config.tokio_handle.clone(), ipfs_rt, registry)? }; - let executor = NativeExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - ); - let chain_spec = &config.chain_spec; let fork_blocks = get_extension::>(chain_spec.extensions()) .cloned() @@ -284,19 +303,38 @@ pub fn new_full_parts( let (client, backend) = { let db_config = sc_client_db::DatabaseSettings { state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - pruning: config.pruning.clone(), + state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), + state_pruning: config.state_pruning.clone(), source: config.database.clone(), + keep_blocks: config.keep_blocks.clone(), + transaction_storage: config.transaction_storage.clone(), }; + let backend = new_db_backend(db_config)?; + let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( config.execution_strategies.clone(), Some(keystore_container.sync_keystore()), + sc_offchain::OffchainDb::factory_from_backend(&*backend), ); - new_client( - db_config, + let wasm_runtime_substitutes = config + .chain_spec + .code_substitutes() + .into_iter() + .map(|(h, c)| { + let hash = TBl::Hash::from_str(&h).map_err(|_| { + Error::Application(Box::from(format!( + "Failed to parse `{}` as block hash for code substitutes.", + h + ))) + })?; + Ok((hash, c)) + }) + .collect::, Error>>()?; + + let client = new_client( + backend.clone(), executor, chain_spec.as_storage_builder(), fork_blocks, @@ -304,59 +342,59 @@ pub fn new_full_parts( extensions, Box::new(task_manager.spawn_handle()), config.prometheus_config.as_ref().map(|config| config.registry.clone()), + telemetry, ClientConfig { - offchain_worker_enabled : config.offchain_worker.enabled , + offchain_worker_enabled: config.offchain_worker.enabled, offchain_indexing_api: config.offchain_worker.indexing_enabled, + wasm_runtime_overrides: config.wasm_runtime_overrides.clone(), + no_genesis: matches!( + config.network.sync_mode, + sc_network::config::SyncMode::Fast { .. } | sc_network::config::SyncMode::Warp + ), + wasm_runtime_substitutes, }, - )? + )?; + + (client, backend) }; - Ok(( - client, - backend, - keystore_container, - task_manager, - )) + Ok((client, backend, keystore_container, task_manager)) } /// Create the initial parts of a light node. -pub fn new_light_parts( - config: &Configuration -) -> Result, Error> where +pub fn new_light_parts( + config: &Configuration, + telemetry: Option, + executor: TExec, +) -> Result, Error> +where TBl: BlockT, - TExecDisp: NativeExecutionDispatch + 'static, + TExec: CodeExecutor + RuntimeVersionOf + Clone, { let keystore_container = KeystoreContainer::new(&config.keystore)?; let ipfs_rt = tokio::runtime::Runtime::new().expect("couldn't start the IPFS runtime"); let task_manager = { let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManager::new(config.task_executor.clone(), ipfs_rt, registry)? + TaskManager::new(config.tokio_handle.clone(), ipfs_rt, registry)? }; - let executor = NativeExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - ); - let db_storage = { let db_settings = sc_client_db::DatabaseSettings { state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - pruning: config.pruning.clone(), + state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), + state_pruning: config.state_pruning.clone(), source: config.database.clone(), + keep_blocks: config.keep_blocks.clone(), + transaction_storage: config.transaction_storage.clone(), }; sc_client_db::light::LightStorage::new(db_settings)? }; let light_blockchain = sc_light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new( - sc_light::new_fetch_checker::<_, TBl, _>( - light_blockchain.clone(), - executor.clone(), - Box::new(task_manager.spawn_handle()), - ), - ); + let fetch_checker = Arc::new(sc_light::new_fetch_checker::<_, TBl, _>( + light_blockchain.clone(), + executor.clone(), + Box::new(task_manager.spawn_handle()), + )); let on_demand = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); let backend = sc_light::new_light_backend(light_blockchain); let client = Arc::new(light::new_light( @@ -365,14 +403,27 @@ pub fn new_light_parts( executor, Box::new(task_manager.spawn_handle()), config.prometheus_config.as_ref().map(|config| config.registry.clone()), + telemetry, )?); Ok((client, backend, keystore_container, task_manager, on_demand)) } -/// Create an instance of db-backed client. -pub fn new_client( +/// Create an instance of default DB-backend backend. +pub fn new_db_backend( settings: DatabaseSettings, +) -> Result>, sp_blockchain::Error> +where + Block: BlockT, +{ + const CANONICALIZATION_DELAY: u64 = 4096; + + Ok(Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?)) +} + +/// Create an instance of client backed by given backend. +pub fn new_client( + backend: Arc>, executor: E, genesis_storage: &dyn BuildStorage, fork_blocks: ForkBlocks, @@ -380,39 +431,38 @@ pub fn new_client( execution_extensions: ExecutionExtensions, spawn_handle: Box, prometheus_registry: Option, - config: ClientConfig, -) -> Result<( + telemetry: Option, + config: ClientConfig, +) -> Result< crate::client::Client< Backend, - crate::client::LocalCallExecutor, E>, + crate::client::LocalCallExecutor, E>, Block, RA, >, - Arc>, -), sp_blockchain::Error, > - where - Block: BlockT, - E: CodeExecutor + RuntimeInfo, +where + Block: BlockT, + E: CodeExecutor + RuntimeVersionOf, { - const CANONICALIZATION_DELAY: u64 = 4096; - - let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?); - let executor = crate::client::LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone()); - Ok(( - crate::client::Client::new( - backend.clone(), - executor, - genesis_storage, - fork_blocks, - bad_blocks, - execution_extensions, - prometheus_registry, - config, - )?, + let executor = crate::client::LocalCallExecutor::new( + backend.clone(), + executor, + spawn_handle, + config.clone(), + )?; + Ok(crate::client::Client::new( backend, - )) + executor, + genesis_storage, + fork_blocks, + bad_blocks, + execution_extensions, + prometheus_registry, + telemetry, + config, + )?) } /// Parameters to pass into `build`. @@ -438,38 +488,26 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub remote_blockchain: Option>>, /// A shared network instance. pub network: Arc::Hash>>, - /// Sinks to propagate network status updates. - pub network_status_sinks: NetworkStatusSinks, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, - /// Shared Telemetry connection sinks, - pub telemetry_connection_sinks: TelemetryConnectionSinks, + /// Telemetry instance for this node. + pub telemetry: Option<&'a mut Telemetry>, } /// Build a shared offchain workers instance. -pub fn build_offchain_workers( +pub fn build_offchain_workers( config: &Configuration, - backend: Arc, spawn_handle: SpawnTaskHandle, client: Arc, network: Arc::Hash>>, ipfs_rt: Arc>, -) -> Option>> - where - TBl: BlockT, TBackend: sc_client_api::Backend, - >::OffchainStorage: 'static, - TCl: Send + Sync + ProvideRuntimeApi + BlockchainEvents + 'static, - >::Api: sc_offchain::OffchainWorkerApi, +) -> Option>> +where + TBl: BlockT, + TCl: Send + Sync + ProvideRuntimeApi + BlockchainEvents + 'static, + >::Api: sc_offchain::OffchainWorkerApi, { - let offchain_workers = match backend.offchain_storage() { - Some(db) => { - Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone(), db, ipfs_rt))) - }, - None => { - warn!("Offchain workers disabled, due to lack of offchain storage support in backend."); - None - }, - }; + let offchain_workers = Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone(), ipfs_rt))); // Inform the offchain worker about new imported blocks if let Some(offchain) = offchain_workers.clone() { @@ -481,7 +519,7 @@ pub fn build_offchain_workers( offchain, Clone::clone(&spawn_handle), network.clone(), - ) + ), ); } @@ -492,24 +530,34 @@ pub fn build_offchain_workers( pub fn spawn_tasks( params: SpawnTasksParams, ) -> Result - where - TCl: ProvideRuntimeApi + HeaderMetadata + Chain + - BlockBackend + BlockIdTo + ProofProvider + - HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + - StorageProvider + CallApiAt + - Send + 'static, - >::Api: - sp_api::Metadata + - sc_offchain::OffchainWorkerApi + - sp_transaction_pool::runtime_api::TaggedTransactionQueue + - sp_session::SessionKeys + - sp_api::ApiErrorExt + - sp_api::ApiExt, - TBl: BlockT, - TBackend: 'static + sc_client_api::backend::Backend + Send, - TExPool: MaintainedTransactionPool::Hash> + - MallocSizeOfWasm + 'static, - TRpc: sc_rpc::RpcExtension +where + TCl: ProvideRuntimeApi + + HeaderMetadata + + Chain + + BlockBackend + + BlockIdTo + + ProofProvider + + HeaderBackend + + BlockchainEvents + + ExecutorProvider + + UsageProvider + + StorageProvider + + CallApiAt + + Send + + 'static, + >::Api: sp_api::Metadata + + sc_offchain::OffchainWorkerApi + + sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_session::SessionKeys + + sp_api::ApiExt, + TBl: BlockT, + TBl::Hash: Unpin, + TBl::Header: Unpin, + TBackend: 'static + sc_client_api::backend::Backend + Send, + TExPool: MaintainedTransactionPool::Hash> + + parity_util_mem::MallocSizeOf + + 'static, + TRpc: sc_rpc::RpcExtension, { let SpawnTasksParams { mut config, @@ -522,9 +570,8 @@ pub fn spawn_tasks( rpc_extensions_builder, remote_blockchain, network, - network_status_sinks, system_rpc_tx, - telemetry_connection_sinks, + telemetry, } = params; let chain_info = client.usage_info().chain; @@ -533,15 +580,14 @@ pub fn spawn_tasks( client.clone(), &BlockId::Hash(chain_info.best_hash), config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), - )?; + ) + .map_err(|e| Error::Application(Box::new(e)))?; + + let telemetry = telemetry + .map(|telemetry| init_telemetry(&mut config, network.clone(), client.clone(), telemetry)) + .transpose()?; info!("📦 Highest known block at #{}", chain_info.best_number); - telemetry!( - SUBSTRATE_INFO; - "node.start"; - "height" => chain_info.best_number.saturated_into::(), - "best" => ?chain_info.best_hash - ); let spawn_handle = task_manager.spawn_handle(); @@ -553,154 +599,130 @@ pub fn spawn_tasks( spawn_handle.spawn( "on-transaction-imported", - transaction_notifications(transaction_pool.clone(), network.clone()), + transaction_notifications(transaction_pool.clone(), network.clone(), telemetry.clone()), ); // Prometheus metrics. - let metrics_service = if let Some(PrometheusConfig { port, registry }) = - config.prometheus_config.clone() - { - // Set static metrics. - let metrics = MetricsService::with_prometheus(®istry, &config)?; - spawn_handle.spawn( - "prometheus-endpoint", - prometheus_endpoint::init_prometheus(port, registry).map(drop) - ); + let metrics_service = + if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { + // Set static metrics. + let metrics = MetricsService::with_prometheus(telemetry.clone(), ®istry, &config)?; + spawn_handle.spawn( + "prometheus-endpoint", + prometheus_endpoint::init_prometheus(port, registry).map(drop), + ); - metrics - } else { - MetricsService::new() - }; + metrics + } else { + MetricsService::new(telemetry.clone()) + }; // Periodically updated metrics and telemetry updates. - spawn_handle.spawn("telemetry-periodic-send", - metrics_service.run( - client.clone(), - transaction_pool.clone(), - network_status_sinks.clone() - ) + spawn_handle.spawn( + "telemetry-periodic-send", + metrics_service.run(client.clone(), transaction_pool.clone(), network.clone()), ); // RPC - let gen_handler = | - deny_unsafe: sc_rpc::DenyUnsafe, - rpc_middleware: sc_rpc_server::RpcMiddleware - | gen_handler( - deny_unsafe, rpc_middleware, &config, task_manager.spawn_handle(), - client.clone(), transaction_pool.clone(), keystore.clone(), - on_demand.clone(), remote_blockchain.clone(), &*rpc_extensions_builder, - backend.offchain_storage(), system_rpc_tx.clone() - ); - let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry()).ok(); - let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.as_ref())?; + let gen_handler = |deny_unsafe: sc_rpc::DenyUnsafe, + rpc_middleware: sc_rpc_server::RpcMiddleware| { + gen_handler( + deny_unsafe, + rpc_middleware, + &config, + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + keystore.clone(), + on_demand.clone(), + remote_blockchain.clone(), + &*rpc_extensions_builder, + backend.offchain_storage(), + system_rpc_tx.clone(), + ) + }; + let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry())?; + let server_metrics = sc_rpc_server::ServerMetrics::new(config.prometheus_registry())?; + let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.clone(), server_metrics)?; // This is used internally, so don't restrict access to unsafe RPC - let rpc_handlers = RpcHandlers(Arc::new(gen_handler( - sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics.as_ref().cloned(), "inbrowser") - ).into())); - - // Telemetry - let telemetry = config.telemetry_endpoints.clone().and_then(|endpoints| { - if endpoints.is_empty() { - // we don't want the telemetry to be initialized if telemetry_endpoints == Some([]) - return None; - } - - let genesis_hash = match client.block_hash(Zero::zero()) { - Ok(Some(hash)) => hash, - _ => Default::default(), - }; - - Some(build_telemetry( - &mut config, endpoints, telemetry_connection_sinks.clone(), network.clone(), - task_manager.spawn_handle(), genesis_hash, - )) - }); + let known_rpc_method_names = + sc_rpc_server::method_names(|m| gen_handler(sc_rpc::DenyUnsafe::No, m))?; + let rpc_handlers = RpcHandlers(Arc::new( + gen_handler( + sc_rpc::DenyUnsafe::No, + sc_rpc_server::RpcMiddleware::new(rpc_metrics, known_rpc_method_names, "inbrowser"), + )? + .into(), + )); // Spawn informant task - spawn_handle.spawn("informant", sc_informant::build( - client.clone(), - network_status_sinks.status.clone(), - transaction_pool.clone(), - config.informant_output_format, - )); + spawn_handle.spawn( + "informant", + sc_informant::build( + client.clone(), + network.clone(), + transaction_pool.clone(), + config.informant_output_format, + ), + ); - task_manager.keep_alive((telemetry, config.base_path, rpc, rpc_handlers.clone())); + task_manager.keep_alive((config.base_path, rpc, rpc_handlers.clone())); Ok(rpc_handlers) } async fn transaction_notifications( transaction_pool: Arc, - network: Arc::Hash>> -) - where - TBl: BlockT, - TExPool: MaintainedTransactionPool::Hash>, + network: Arc::Hash>>, + telemetry: Option, +) where + TBl: BlockT, + TExPool: MaintainedTransactionPool::Hash>, { // transaction notifications - transaction_pool.import_notification_stream() + transaction_pool + .import_notification_stream() .for_each(move |hash| { network.propagate_transaction(hash); let status = transaction_pool.status(); - telemetry!(SUBSTRATE_INFO; "txpool.import"; + telemetry!( + telemetry; + SUBSTRATE_INFO; + "txpool.import"; "ready" => status.ready, - "future" => status.future + "future" => status.future, ); ready(()) }) .await; } -fn build_telemetry( +fn init_telemetry>( config: &mut Configuration, - endpoints: sc_telemetry::TelemetryEndpoints, - telemetry_connection_sinks: TelemetryConnectionSinks, network: Arc::Hash>>, - spawn_handle: SpawnTaskHandle, - genesis_hash: ::Hash, -) -> sc_telemetry::Telemetry { - let is_authority = config.role.is_authority(); - let network_id = network.local_peer_id().to_base58(); - let name = config.network.node_name.clone(); - let impl_name = config.impl_name.clone(); - let impl_version = config.impl_version.clone(); - let chain_name = config.chain_spec.name().to_owned(); - let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { - endpoints, - wasm_external_transport: config.telemetry_external_transport.take(), - }); - let startup_time = SystemTime::UNIX_EPOCH.elapsed() - .map(|dur| dur.as_millis()) - .unwrap_or(0); + client: Arc, + telemetry: &mut Telemetry, +) -> sc_telemetry::Result { + let genesis_hash = client.block_hash(Zero::zero()).ok().flatten().unwrap_or_default(); + let connection_message = ConnectionMessage { + name: config.network.node_name.to_owned(), + implementation: config.impl_name.to_owned(), + version: config.impl_version.to_owned(), + config: String::new(), + chain: config.chain_spec.name().to_owned(), + genesis_hash: format!("{:?}", genesis_hash), + authority: config.role.is_authority(), + startup_time: SystemTime::UNIX_EPOCH + .elapsed() + .map(|dur| dur.as_millis()) + .unwrap_or(0) + .to_string(), + network_id: network.local_peer_id().to_base58(), + }; - spawn_handle.spawn( - "telemetry-worker", - telemetry.clone() - .for_each(move |event| { - // Safe-guard in case we add more events in the future. - let sc_telemetry::TelemetryEvent::Connected = event; - - telemetry!(SUBSTRATE_INFO; "system.connected"; - "name" => name.clone(), - "implementation" => impl_name.clone(), - "version" => impl_version.clone(), - "config" => "", - "chain" => chain_name.clone(), - "genesis_hash" => ?genesis_hash, - "authority" => is_authority, - "startup_time" => startup_time, - "network_id" => network_id.clone() - ); - - telemetry_connection_sinks.0.lock().retain(|sink| { - sink.unbounded_send(()).is_ok() - }); - ready(()) - }) - ); + telemetry.start_telemetry(connection_message)?; - telemetry + Ok(telemetry.handle()) } fn gen_handler( @@ -715,22 +737,30 @@ fn gen_handler( remote_blockchain: Option>>, rpc_extensions_builder: &(dyn RpcExtensionBuilder + Send), offchain_storage: Option<>::OffchainStorage>, - system_rpc_tx: TracingUnboundedSender> -) -> sc_rpc_server::RpcHandler - where - TBl: BlockT, - TCl: ProvideRuntimeApi + BlockchainEvents + HeaderBackend + - HeaderMetadata + ExecutorProvider + - CallApiAt + ProofProvider + - StorageProvider + BlockBackend + Send + Sync + 'static, - TExPool: MaintainedTransactionPool::Hash> + 'static, - TBackend: sc_client_api::backend::Backend + 'static, - TRpc: sc_rpc::RpcExtension, - >::Api: - sp_session::SessionKeys + - sp_api::Metadata, + system_rpc_tx: TracingUnboundedSender>, +) -> Result, Error> +where + TBl: BlockT, + TCl: ProvideRuntimeApi + + BlockchainEvents + + HeaderBackend + + HeaderMetadata + + ExecutorProvider + + CallApiAt + + ProofProvider + + StorageProvider + + BlockBackend + + Send + + Sync + + 'static, + TExPool: MaintainedTransactionPool::Hash> + 'static, + TBackend: sc_client_api::backend::Backend + 'static, + TRpc: sc_rpc::RpcExtension, + >::Api: sp_session::SessionKeys + sp_api::Metadata, + TBl::Hash: Unpin, + TBl::Header: Unpin, { - use sc_rpc::{chain, state, author, system, offchain}; + use sc_rpc::{author, chain, offchain, state, system}; let system_info = sc_rpc::system::SystemInfo { chain_name: config.chain_spec.name().into(), @@ -743,42 +773,37 @@ fn gen_handler( let task_executor = sc_rpc::SubscriptionTaskExecutor::new(spawn_handle); let subscriptions = SubscriptionManager::new(Arc::new(task_executor.clone())); - let (chain, state, child_state) = if let (Some(remote_blockchain), Some(on_demand)) = - (remote_blockchain, on_demand) { - // Light clients - let chain = sc_rpc::chain::new_light( - client.clone(), - subscriptions.clone(), - remote_blockchain.clone(), - on_demand.clone(), - ); - let (state, child_state) = sc_rpc::state::new_light( - client.clone(), - subscriptions.clone(), - remote_blockchain.clone(), - on_demand, - deny_unsafe, - ); - (chain, state, child_state) - - } else { - // Full nodes - let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); - let (state, child_state) = sc_rpc::state::new_full( - client.clone(), - subscriptions.clone(), - deny_unsafe, - ); - (chain, state, child_state) - }; + let (chain, state, child_state) = + if let (Some(remote_blockchain), Some(on_demand)) = (remote_blockchain, on_demand) { + // Light clients + let chain = sc_rpc::chain::new_light( + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + on_demand.clone(), + ); + let (state, child_state) = sc_rpc::state::new_light( + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + on_demand, + deny_unsafe, + ); + (chain, state, child_state) + } else { + // Full nodes + let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); + let (state, child_state) = sc_rpc::state::new_full( + client.clone(), + subscriptions.clone(), + deny_unsafe, + config.rpc_max_payload, + ); + (chain, state, child_state) + }; - let author = sc_rpc::author::Author::new( - client, - transaction_pool, - subscriptions, - keystore, - deny_unsafe, - ); + let author = + sc_rpc::author::Author::new(client, transaction_pool, subscriptions, keystore, deny_unsafe); let system = system::System::new(system_info, system_rpc_tx, deny_unsafe); let maybe_offchain_rpc = offchain_storage.map(|storage| { @@ -786,7 +811,7 @@ fn gen_handler( offchain::OffchainApi::to_delegate(offchain) }); - sc_rpc_server::rpc_handler( + Ok(sc_rpc_server::rpc_handler( ( state::StateApi::to_delegate(state), state::ChildStateApi::to_delegate(child_state), @@ -794,10 +819,10 @@ fn gen_handler( maybe_offchain_rpc, author::AuthorApi::to_delegate(author), system::SystemApi::to_delegate(system), - rpc_extensions_builder.build(deny_unsafe, task_executor), + rpc_extensions_builder.build(deny_unsafe, task_executor)?, ), - rpc_middleware - ) + rpc_middleware, + )) } /// Parameters to pass into `build_network`. @@ -814,39 +839,47 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { pub import_queue: TImpQu, /// An optional, shared data fetcher for light clients. pub on_demand: Option>>, - /// A block annouce validator builder. - pub block_announce_validator_builder: Option) -> Box + Send> + Send - >>, - /// An optional finality proof request builder. - pub finality_proof_request_builder: Option>, - /// An optional, shared finality proof request provider. - pub finality_proof_provider: Option>>, + /// A block announce validator builder. + pub block_announce_validator_builder: + Option) -> Box + Send> + Send>>, + /// An optional warp sync provider. + pub warp_sync: Option>>, } /// Build the network service, the network status sinks and an RPC sender. pub fn build_network( - params: BuildNetworkParams + params: BuildNetworkParams, ) -> Result< ( Arc::Hash>>, - NetworkStatusSinks, TracingUnboundedSender>, NetworkStarter, ), - Error + Error, > - where - TBl: BlockT, - TCl: ProvideRuntimeApi + HeaderMetadata + Chain + - BlockBackend + BlockIdTo + ProofProvider + - HeaderBackend + BlockchainEvents + 'static, - TExPool: MaintainedTransactionPool::Hash> + 'static, - TImpQu: ImportQueue + 'static, +where + TBl: BlockT, + TCl: ProvideRuntimeApi + + HeaderMetadata + + Chain + + BlockBackend + + BlockIdTo + + ProofProvider + + HeaderBackend + + BlockchainEvents + + 'static, + TExPool: MaintainedTransactionPool::Hash> + 'static, + TImpQu: ImportQueue + 'static, { let BuildNetworkParams { - config, client, transaction_pool, spawn_handle, import_queue, on_demand, - block_announce_validator_builder, finality_proof_request_builder, finality_proof_provider, + config, + client, + transaction_pool, + spawn_handle, + import_queue, + on_demand, + block_announce_validator_builder, + warp_sync, } = params; let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { @@ -855,18 +888,7 @@ pub fn build_network( client: client.clone(), }); - let protocol_id = { - let protocol_id_full = match config.chain_spec.protocol_id() { - Some(pid) => pid, - None => { - warn!("Using default protocol ID {:?} because none is configured in the \ - chain specs", DEFAULT_PROTOCOL_ID - ); - DEFAULT_PROTOCOL_ID - } - }; - sc_network::config::ProtocolId::from(protocol_id_full) - }; + let protocol_id = config.protocol_id(); let block_announce_validator = if let Some(f) = block_announce_validator_builder { f(client.clone()) @@ -874,7 +896,68 @@ pub fn build_network( Box::new(DefaultBlockAnnounceValidator) }; - let network_params = sc_network::config::Params { + let block_request_protocol_config = { + if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + block_request_handler::generate_protocol_config(&protocol_id) + } else { + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = BlockRequestHandler::new( + &protocol_id, + client.clone(), + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, + ); + spawn_handle.spawn("block_request_handler", handler.run()); + protocol_config + } + }; + + let state_request_protocol_config = { + if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + state_request_handler::generate_protocol_config(&protocol_id) + } else { + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = StateRequestHandler::new( + &protocol_id, + client.clone(), + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, + ); + spawn_handle.spawn("state_request_handler", handler.run()); + protocol_config + } + }; + + let warp_sync_params = warp_sync.map(|provider| { + let protocol_config = if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + warp_request_handler::generate_request_response_config(protocol_id.clone()) + } else { + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = + WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); + spawn_handle.spawn("warp_sync_request_handler", handler.run()); + protocol_config + }; + (provider, protocol_config) + }); + + let light_client_request_protocol_config = { + if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + light_client_requests::generate_protocol_config(&protocol_id) + } else { + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, client.clone()); + spawn_handle.spawn("light_client_request_handler", handler.run()); + protocol_config + } + }; + + let mut network_params = sc_network::config::Params { role: config.role.clone(), executor: { let spawn_handle = Clone::clone(&spawn_handle); @@ -882,22 +965,36 @@ pub fn build_network( spawn_handle.spawn("libp2p-node", fut); })) }, + transactions_handler_executor: { + let spawn_handle = Clone::clone(&spawn_handle); + Box::new(move |fut| { + spawn_handle.spawn("network-transactions-handler", fut); + }) + }, network_config: config.network.clone(), chain: client.clone(), - finality_proof_provider, - finality_proof_request_builder, - on_demand: on_demand, + on_demand, transaction_pool: transaction_pool_adapter as _, import_queue: Box::new(import_queue), protocol_id, block_announce_validator, - metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()) + metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), + block_request_protocol_config, + state_request_protocol_config, + warp_sync: warp_sync_params, + light_client_request_protocol_config, }; + // Storage chains don't keep full block history and can't be synced in full mode. + // Force fast sync when storage chain mode is enabled. + if matches!(config.transaction_storage, TransactionStorageMode::StorageChain) { + network_params.network_config.sync_mode = + SyncMode::Fast { storage_chain_mode: true, skip_proofs: false }; + } + let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); - let network_status_sinks = NetworkStatusSinks::new(); let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); @@ -905,7 +1002,6 @@ pub fn build_network( config.role.clone(), network_mut, client, - network_status_sinks.clone(), system_rpc_rx, has_bootnodes, config.announce_block, @@ -936,19 +1032,18 @@ pub fn build_network( // future using `spawn_blocking`. spawn_handle.spawn_blocking("network-worker", async move { if network_start_rx.await.is_err() { - debug_assert!(false); log::warn!( "The NetworkStart returned as part of `build_network` has been silently dropped" ); // This `return` might seem unnecessary, but we don't want to make it look like // everything is working as normal even though the user is clearly misusing the API. - return; + return } future.await }); - Ok((network, network_status_sinks, system_rpc_tx, NetworkStarter(network_start_tx))) + Ok((network, system_rpc_tx, NetworkStarter(network_start_tx))) } /// Object used to start the network. diff --git a/client/service/src/chain_ops/check_block.rs b/client/service/src/chain_ops/check_block.rs index 34baeb55445a8..4728e014540ee 100644 --- a/client/service/src/chain_ops/check_block.rs +++ b/client/service/src/chain_ops/check_block.rs @@ -1,36 +1,36 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::error::Error; -use futures::{future, prelude::*}; -use sp_runtime::traits::Block as BlockT; -use sp_runtime::generic::BlockId; use codec::Encode; -use sp_consensus::import_queue::ImportQueue; +use futures::{future, prelude::*}; use sc_client_api::{BlockBackend, UsageProvider}; +use sc_consensus::import_queue::ImportQueue; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use std::pin::Pin; -use std::sync::Arc; use crate::chain_ops::import_blocks; +use std::{pin::Pin, sync::Arc}; /// Re-validate known block. pub fn check_block( client: Arc, import_queue: IQ, - block_id: BlockId + block_id: BlockId, ) -> Pin> + Send>> where C: BlockBackend + UsageProvider + Send + Sync + 'static, @@ -44,7 +44,7 @@ where block.encode_to(&mut buf); let reader = std::io::Cursor::new(buf); import_blocks(client, import_queue, reader, true, true) - } + }, Ok(None) => Box::pin(future::err("Unknown block".into())), Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), } diff --git a/client/service/src/chain_ops/export_blocks.rs b/client/service/src/chain_ops/export_blocks.rs index 2f32cbf7fbdb7..8887180103182 100644 --- a/client/service/src/chain_ops/export_blocks.rs +++ b/client/service/src/chain_ops/export_blocks.rs @@ -1,32 +1,32 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::error::Error; -use log::info; +use codec::Encode; use futures::{future, prelude::*}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, One, Zero, SaturatedConversion +use log::info; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor, One, SaturatedConversion, Zero}, }; -use sp_runtime::generic::BlockId; -use codec::Encode; -use std::{io::Write, pin::Pin}; use sc_client_api::{BlockBackend, UsageProvider}; -use std::sync::Arc; -use std::task::Poll; +use std::{io::Write, pin::Pin, sync::Arc, task::Poll}; /// Performs the blocks export. pub fn export_blocks( @@ -34,7 +34,7 @@ pub fn export_blocks( mut output: impl Write + 'static, from: NumberFor, to: Option>, - binary: bool + binary: bool, ) -> Pin>>> where C: BlockBackend + UsageProvider + 'static, @@ -61,7 +61,7 @@ where let client = &client; if last < block { - return Poll::Ready(Err("Invalid block range specified".into())); + return Poll::Ready(Err("Invalid block range specified".into())) } if !wrote_header { @@ -76,22 +76,21 @@ where } match client.block(&BlockId::number(block))? { - Some(block) => { + Some(block) => if binary { output.write_all(&block.encode())?; } else { serde_json::to_writer(&mut output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; - } - }, + }, // Reached end of the chain. None => return Poll::Ready(Ok(())), } - if (block % 10000.into()).is_zero() { + if (block % 10000u32.into()).is_zero() { info!("#{}", block); } if block == last { - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) } block += One::one(); diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index 3fe44dbdb142d..975149c61cfab 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -1,24 +1,25 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::error::Error; -use sp_runtime::traits::Block as BlockT; -use sp_runtime::generic::BlockId; -use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageChild, StorageMap}; use sc_client_api::{StorageProvider, UsageProvider}; +use sp_core::storage::{well_known_keys, ChildInfo, Storage, StorageChild, StorageKey, StorageMap}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::{collections::HashMap, sync::Arc}; @@ -33,9 +34,7 @@ where B: BlockT, BA: sc_client_api::backend::Backend, { - let block = block.unwrap_or_else( - || BlockId::Hash(client.usage_info().chain.best_hash) - ); + let block = block.unwrap_or_else(|| BlockId::Hash(client.usage_info().chain.best_hash)); let empty_key = StorageKey(Vec::new()); let mut top_storage = client.storage_pairs(&block, &empty_key)?; @@ -45,12 +44,12 @@ where // pairs. while let Some(pos) = top_storage .iter() - .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) { + .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) + { let (key, _) = top_storage.swap_remove(pos); - let key = StorageKey( - key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(), - ); + let key = + StorageKey(key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec()); let child_info = ChildInfo::new_default(&key.0); let keys = client.child_storage_keys(&block, &child_info, &empty_key)?; diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index 46ad0d0501d93..1ba9e0bd61444 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,29 +16,31 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::error::Error; -use sc_chain_spec::ChainSpec; -use log::{warn, info}; -use futures::{future, prelude::*}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, Zero, Header, MaybeSerializeDeserialize, -}; -use sp_runtime::generic::SignedBlock; +use crate::{error, error::Error}; use codec::{Decode, IoReader as CodecIoReader}; -use sp_consensus::{ - BlockOrigin, - import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, -}; - -use std::{io::{Read, Seek}, pin::Pin}; -use std::time::{Duration, Instant}; +use futures::{future, prelude::*}; use futures_timer::Delay; -use std::task::Poll; -use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; -use std::convert::{TryFrom, TryInto}; -use sp_runtime::traits::{CheckedDiv, Saturating}; +use log::{info, warn}; +use sc_chain_spec::ChainSpec; use sc_client_api::UsageProvider; +use sc_consensus::import_queue::{ + BlockImportError, BlockImportStatus, ImportQueue, IncomingBlock, Link, +}; +use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; +use sp_consensus::BlockOrigin; +use sp_runtime::{ + generic::SignedBlock, + traits::{ + Block as BlockT, CheckedDiv, Header, MaybeSerializeDeserialize, NumberFor, Saturating, Zero, + }, +}; +use std::{ + convert::{TryFrom, TryInto}, + io::{Read, Seek}, + pin::Pin, + task::Poll, + time::{Duration, Instant}, +}; /// Number of blocks we will add to the queue before waiting for the queue to catch up. const MAX_PENDING_BLOCKS: u64 = 1_024; @@ -56,11 +58,11 @@ pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result { spec.as_json(raw).map_err(Into::into) } - /// Helper enum that wraps either a binary decoder (from parity-scale-codec), or a JSON decoder /// (from serde_json). Implements the Iterator Trait, calling `next()` will decode the next /// SignedBlock and return it. -enum BlockIter where +enum BlockIter +where R: std::io::Read + std::io::Seek, { Binary { @@ -79,7 +81,8 @@ enum BlockIter where }, } -impl BlockIter where +impl BlockIter +where R: Read + Seek + 'static, B: BlockT + MaybeSerializeDeserialize, { @@ -90,40 +93,32 @@ impl BlockIter where // of blocks that are going to be decoded. We read it and add it to our enum struct. let num_expected_blocks: u64 = Decode::decode(&mut reader) .map_err(|e| format!("Failed to decode the number of blocks: {:?}", e))?; - Ok(BlockIter::Binary { - num_expected_blocks, - read_block_count: 0, - reader, - }) + Ok(BlockIter::Binary { num_expected_blocks, read_block_count: 0, reader }) } else { - let stream_deser = Deserializer::from_reader(input) - .into_iter::>(); - Ok(BlockIter::Json { - reader: stream_deser, - read_block_count: 0, - }) + let stream_deser = Deserializer::from_reader(input).into_iter::>(); + Ok(BlockIter::Json { reader: stream_deser, read_block_count: 0 }) } } /// Returns the number of blocks read thus far. fn read_block_count(&self) -> u64 { match self { - BlockIter::Binary { read_block_count, .. } - | BlockIter::Json { read_block_count, .. } - => *read_block_count, + BlockIter::Binary { read_block_count, .. } | + BlockIter::Json { read_block_count, .. } => *read_block_count, } } /// Returns the total number of blocks to be imported, if possible. fn num_expected_blocks(&self) -> Option { match self { - BlockIter::Binary { num_expected_blocks, ..} => Some(*num_expected_blocks), - BlockIter::Json {..} => None + BlockIter::Binary { num_expected_blocks, .. } => Some(*num_expected_blocks), + BlockIter::Json { .. } => None, } } } -impl Iterator for BlockIter where +impl Iterator for BlockIter +where R: Read + Seek + 'static, B: BlockT + MaybeSerializeDeserialize, { @@ -133,20 +128,20 @@ impl Iterator for BlockIter where match self { BlockIter::Binary { num_expected_blocks, read_block_count, reader } => { if read_block_count < num_expected_blocks { - let block_result: Result, _> = SignedBlock::::decode(reader) - .map_err(|e| e.to_string()); + let block_result: Result, _> = + SignedBlock::::decode(reader).map_err(|e| e.to_string()); *read_block_count += 1; Some(block_result) } else { // `read_block_count` == `num_expected_blocks` so we've read enough blocks. None } - } + }, BlockIter::Json { reader, read_block_count } => { let res = Some(reader.next()?.map_err(|e| e.to_string())); *read_block_count += 1; res - } + }, } } } @@ -155,7 +150,7 @@ impl Iterator for BlockIter where fn import_block_to_queue( signed_block: SignedBlock, queue: &mut TImpQu, - force: bool + force: bool, ) where TBl: BlockT + MaybeSerializeDeserialize, TImpQu: 'static + ImportQueue, @@ -163,24 +158,28 @@ fn import_block_to_queue( let (header, extrinsics) = signed_block.block.deconstruct(); let hash = header.hash(); // import queue handles verification and importing it into the client. - queue.import_blocks(BlockOrigin::File, vec![ - IncomingBlock:: { + queue.import_blocks( + BlockOrigin::File, + vec![IncomingBlock:: { hash, header: Some(header), body: Some(extrinsics), - justification: signed_block.justification, + indexed_body: None, + justifications: signed_block.justifications, origin: None, allow_missing_state: false, import_existing: force, - } - ]); + state: None, + skip_execution: false, + }], + ); } /// Returns true if we have imported every block we were supposed to import, else returns false. fn importing_is_done( num_expected_blocks: Option, read_block_count: u64, - imported_blocks: u64 + imported_blocks: u64, ) -> bool { if let Some(num_expected_blocks) = num_expected_blocks { imported_blocks >= num_expected_blocks @@ -200,13 +199,13 @@ impl Speedometer { /// Creates a fresh Speedometer. fn new() -> Self { Self { - best_number: NumberFor::::from(0), + best_number: NumberFor::::from(0u32), last_number: None, last_update: Instant::now(), } } - /// Calculates `(best_number - last_number) / (now - last_update)` and + /// Calculates `(best_number - last_number) / (now - last_update)` and /// logs the speed of import. fn display_speed(&self) { // Number of milliseconds elapsed since last time. @@ -220,24 +219,28 @@ impl Speedometer { // Number of blocks that have been imported since last time. let diff = match self.last_number { None => return, - Some(n) => self.best_number.saturating_sub(n) + Some(n) => self.best_number.saturating_sub(n), }; if let Ok(diff) = TryInto::::try_into(diff) { // If the number of blocks can be converted to a regular integer, then it's easy: just // do the math and turn it into a `f64`. - let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / 10.0; + let speed = diff + .saturating_mul(10_000) + .checked_div(u128::from(elapsed_ms)) + .map_or(0.0, |s| s as f64) / + 10.0; info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); } else { // If the number of blocks can't be converted to a regular integer, then we need a more // algebraic approach and we stay within the realm of integers. - let one_thousand = NumberFor::::from(1_000); - let elapsed = NumberFor::::from( - >::try_from(elapsed_ms).unwrap_or(u32::max_value()) - ); + let one_thousand = NumberFor::::from(1_000u32); + let elapsed = + NumberFor::::from(>::try_from(elapsed_ms).unwrap_or(u32::MAX)); - let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) + let speed = diff + .saturating_mul(one_thousand) + .checked_div(&elapsed) .unwrap_or_else(Zero::zero); info!("📦 Current best block: {} ({} bps)", self.best_number, speed) } @@ -262,22 +265,23 @@ impl Speedometer { } /// Different State that the `import_blocks` future could be in. -enum ImportState where +enum ImportState +where R: Read + Seek + 'static, B: BlockT + MaybeSerializeDeserialize, { /// We are reading from the BlockIter structure, adding those blocks to the queue if possible. - Reading{block_iter: BlockIter}, - /// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it to - /// catch up. - WaitingForImportQueueToCatchUp{ + Reading { block_iter: BlockIter }, + /// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it + /// to catch up. + WaitingForImportQueueToCatchUp { block_iter: BlockIter, delay: Delay, - block: SignedBlock + block: SignedBlock, }, // We have added all the blocks to the queue but they are still being processed. - WaitingForImportQueueToFinish{ - num_expected_blocks: Option, + WaitingForImportQueueToFinish { + num_expected_blocks: Option, read_block_count: u64, delay: Delay, }, @@ -303,10 +307,7 @@ where impl WaitLink { fn new() -> WaitLink { - WaitLink { - imported_blocks: 0, - has_error: false, - } + WaitLink { imported_blocks: 0, has_error: false } } } @@ -315,7 +316,7 @@ where &mut self, imported: usize, _num_expected_blocks: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { self.imported_blocks += imported as u64; @@ -323,7 +324,7 @@ where if let (Err(err), hash) = result { warn!("There was an error importing block with hash {:?}: {:?}", hash, err); self.has_error = true; - break; + break } } } @@ -335,13 +336,13 @@ where let block_iter = match block_iter_res { Ok(block_iter) => block_iter, Err(e) => { - // We've encountered an error while creating the block iterator + // We've encountered an error while creating the block iterator // so we can just return a future that returns an error. return future::ready(Err(Error::Other(e))).boxed() - } + }, }; - let mut state = Some(ImportState::Reading{block_iter}); + let mut state = Some(ImportState::Reading { block_iter }); let mut speedometer = Speedometer::::new(); // Importing blocks is implemented as a future, because we want the operation to be @@ -355,7 +356,7 @@ where let client = &client; let queue = &mut import_queue; match state.take().expect("state should never be None; qed") { - ImportState::Reading{mut block_iter} => { + ImportState::Reading { mut block_iter } => { match block_iter.next() { None => { // The iterator is over: we now need to wait for the import queue to finish. @@ -363,7 +364,9 @@ where let read_block_count = block_iter.read_block_count(); let delay = Delay::new(Duration::from_millis(DELAY_TIME)); state = Some(ImportState::WaitingForImportQueueToFinish { - num_expected_blocks, read_block_count, delay + num_expected_blocks, + read_block_count, + delay, }); }, Some(block_result) => { @@ -375,32 +378,35 @@ where // until the queue has made some progress. let delay = Delay::new(Duration::from_millis(DELAY_TIME)); state = Some(ImportState::WaitingForImportQueueToCatchUp { - block_iter, delay, block + block_iter, + delay, + block, }); } else { // Queue is not full, we can keep on adding blocks to the queue. import_block_to_queue(block, queue, force); - state = Some(ImportState::Reading{block_iter}); + state = Some(ImportState::Reading { block_iter }); } - } - Err(e) => { - return Poll::Ready( - Err(Error::Other( - format!("Error reading block #{}: {}", read_block_count, e) - ))) - } + }, + Err(e) => + return Poll::Ready(Err(Error::Other(format!( + "Error reading block #{}: {}", + read_block_count, e + )))), } - } + }, } }, - ImportState::WaitingForImportQueueToCatchUp{block_iter, mut delay, block} => { + ImportState::WaitingForImportQueueToCatchUp { block_iter, mut delay, block } => { let read_block_count = block_iter.read_block_count(); if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { // Queue is still full, so wait until there is room to insert our block. match Pin::new(&mut delay).poll(cx) { Poll::Pending => { state = Some(ImportState::WaitingForImportQueueToCatchUp { - block_iter, delay, block + block_iter, + delay, + block, }); return Poll::Pending }, @@ -409,25 +415,30 @@ where }, } state = Some(ImportState::WaitingForImportQueueToCatchUp { - block_iter, delay, block + block_iter, + delay, + block, }); } else { // Queue is no longer full, so we can add our block to the queue. import_block_to_queue(block, queue, force); // Switch back to Reading state. - state = Some(ImportState::Reading{block_iter}); + state = Some(ImportState::Reading { block_iter }); } }, ImportState::WaitingForImportQueueToFinish { - num_expected_blocks, read_block_count, mut delay + num_expected_blocks, + read_block_count, + mut delay, } => { - // All the blocks have been added to the queue, which doesn't mean they + // All the blocks have been added to the queue, which doesn't mean they // have all been properly imported. if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) { // Importing is done, we can log the result and return. info!( "🎉 Imported {} blocks. Best: #{}", - read_block_count, client.usage_info().chain.best_number + read_block_count, + client.usage_info().chain.best_number ); return Poll::Ready(Ok(())) } else { @@ -436,7 +447,9 @@ where match Pin::new(&mut delay).poll(cx) { Poll::Pending => { state = Some(ImportState::WaitingForImportQueueToFinish { - num_expected_blocks, read_block_count, delay + num_expected_blocks, + read_block_count, + delay, }); return Poll::Pending }, @@ -446,10 +459,12 @@ where } state = Some(ImportState::WaitingForImportQueueToFinish { - num_expected_blocks, read_block_count, delay + num_expected_blocks, + read_block_count, + delay, }); } - } + }, } queue.poll_actions(cx, &mut link); @@ -458,11 +473,10 @@ where speedometer.notify_user(best_number); if link.has_error { - return Poll::Ready(Err( - Error::Other( - format!("Stopping after #{} blocks because of an error", link.imported_blocks) - ) - )) + return Poll::Ready(Err(Error::Other(format!( + "Stopping after #{} blocks because of an error", + link.imported_blocks + )))) } cx.waker().wake_by_ref(); diff --git a/client/service/src/chain_ops/mod.rs b/client/service/src/chain_ops/mod.rs index af6e6f632fc06..c213e745a5d6b 100644 --- a/client/service/src/chain_ops/mod.rs +++ b/client/service/src/chain_ops/mod.rs @@ -1,18 +1,20 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Chain utilities. diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs index eaee2c03f9b31..63f1cbd15dd63 100644 --- a/client/service/src/chain_ops/revert_chain.rs +++ b/client/service/src/chain_ops/revert_chain.rs @@ -1,30 +1,32 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::error::Error; use log::info; -use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use sc_client_api::{Backend, UsageProvider}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::sync::Arc; /// Performs a revert of `blocks` blocks. pub fn revert_chain( client: Arc, backend: Arc, - blocks: NumberFor + blocks: NumberFor, ) -> Result<(), Error> where B: BlockT, diff --git a/client/service/src/client/block_rules.rs b/client/service/src/client/block_rules.rs index be84614c2a590..4bdf338362960 100644 --- a/client/service/src/client/block_rules.rs +++ b/client/service/src/client/block_rules.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,11 +20,9 @@ use std::collections::{HashMap, HashSet}; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, -}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; -use sc_client_api::{ForkBlocks, BadBlocks}; +use sc_client_api::{BadBlocks, ForkBlocks}; /// Chain specification rules lookup result. pub enum LookupResult { @@ -33,7 +31,7 @@ pub enum LookupResult { /// The block is known to be bad and should not be imported KnownBad, /// There is a specified canonical block hash for the given height - Expected(B::Hash) + Expected(B::Hash), } /// Chain-specific block filtering rules. @@ -47,10 +45,7 @@ pub struct BlockRules { impl BlockRules { /// New block rules with provided black and white lists. - pub fn new( - fork_blocks: ForkBlocks, - bad_blocks: BadBlocks, - ) -> Self { + pub fn new(fork_blocks: ForkBlocks, bad_blocks: BadBlocks) -> Self { Self { bad: bad_blocks.unwrap_or_else(|| HashSet::new()), forks: fork_blocks.unwrap_or_else(|| vec![]).into_iter().collect(), @@ -66,7 +61,7 @@ impl BlockRules { pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { if let Some(hash_for_height) = self.forks.get(&number) { if hash_for_height != hash { - return LookupResult::Expected(hash_for_height.clone()); + return LookupResult::Expected(hash_for_height.clone()) } } diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 1919c76ff489b..9b8774ce6d497 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,66 +16,128 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{sync::Arc, panic::UnwindSafe, result, cell::RefCell}; -use codec::{Encode, Decode}; +use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use codec::{Decode, Encode}; +use sc_client_api::{backend, call_executor::CallExecutor, HeaderBackend}; +use sc_executor::{RuntimeVersion, RuntimeVersionOf}; +use sp_api::{ProofRecorder, StorageTransactionCache}; +use sp_core::{ + traits::{CodeExecutor, RuntimeCode, SpawnNamed}, + NativeOrEncoded, NeverNativeValue, +}; +use sp_externalities::Extensions; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, + generic::BlockId, + traits::{Block as BlockT, NumberFor}, }; use sp_state_machine::{ - self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, - backend::Backend as _, StorageProof, -}; -use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; -use sp_externalities::Extensions; -use sp_core::{ - NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, SpawnNamed}, - offchain::storage::OffchainOverlayedChanges, + self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, + StateMachine, StorageProof, }; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; -use sc_client_api::{backend, call_executor::CallExecutor}; -use super::client::ClientConfig; +use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; /// Call executor that executes methods locally, querying all required /// data from local backend. -pub struct LocalCallExecutor { +pub struct LocalCallExecutor { backend: Arc, executor: E, + wasm_override: Option>, + wasm_substitutes: WasmSubstitutes, spawn_handle: Box, - client_config: ClientConfig, + client_config: ClientConfig, } -impl LocalCallExecutor { +impl LocalCallExecutor +where + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, + B: backend::Backend, +{ /// Creates new instance of local call executor. pub fn new( backend: Arc, executor: E, spawn_handle: Box, - client_config: ClientConfig, - ) -> Self { - LocalCallExecutor { + client_config: ClientConfig, + ) -> sp_blockchain::Result { + let wasm_override = client_config + .wasm_runtime_overrides + .as_ref() + .map(|p| WasmOverride::new(p.clone(), executor.clone())) + .transpose()?; + + let wasm_substitutes = WasmSubstitutes::new( + client_config.wasm_runtime_substitutes.clone(), + executor.clone(), + backend.clone(), + )?; + + Ok(LocalCallExecutor { backend, executor, + wasm_override, spawn_handle, client_config, - } + wasm_substitutes, + }) + } + + /// Check if local runtime code overrides are enabled and one is available + /// for the given `BlockId`. If yes, return it; otherwise return the same + /// `RuntimeCode` instance that was passed. + fn check_override<'a>( + &'a self, + onchain_code: RuntimeCode<'a>, + id: &BlockId, + ) -> sp_blockchain::Result> + where + Block: BlockT, + B: backend::Backend, + { + let spec = self.runtime_version(id)?.spec_version; + let code = if let Some(d) = self + .wasm_override + .as_ref() + .map(|o| o.get(&spec, onchain_code.heap_pages)) + .flatten() + { + log::debug!(target: "wasm_overrides", "using WASM override for block {}", id); + d + } else if let Some(s) = self.wasm_substitutes.get(spec, onchain_code.heap_pages, id) { + log::debug!(target: "wasm_substitutes", "Using WASM substitute for block {:?}", id); + s + } else { + log::debug!( + target: "wasm_overrides", + "No WASM override available for block {}, using onchain code", + id + ); + onchain_code + }; + + Ok(code) } } -impl Clone for LocalCallExecutor where E: Clone { +impl Clone for LocalCallExecutor +where + E: Clone, +{ fn clone(&self) -> Self { LocalCallExecutor { backend: self.backend.clone(), executor: self.executor.clone(), + wasm_override: self.wasm_override.clone(), spawn_handle: self.spawn_handle.clone(), client_config: self.client_config.clone(), + wasm_substitutes: self.wasm_substitutes.clone(), } } } -impl CallExecutor for LocalCallExecutor +impl CallExecutor for LocalCallExecutor where B: backend::Backend, - E: CodeExecutor + RuntimeInfo + Clone + 'static, + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, Block: BlockT, { type Error = E::Error; @@ -84,35 +146,39 @@ where fn call( &self, - id: &BlockId, + at: &BlockId, method: &str, call_data: &[u8], strategy: ExecutionStrategy, extensions: Option, ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); - let mut offchain_changes = if self.client_config.offchain_indexing_api { - OffchainOverlayedChanges::enabled() - } else { - OffchainOverlayedChanges::disabled() - }; - let changes_trie = backend::changes_tries_state_at_block( - id, self.backend.changes_trie_storage() - )?; - let state = self.backend.state_at(*id)?; + let changes_trie = + backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; + let state = self.backend.state_at(*at)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + + let runtime_code = self.check_override(runtime_code, at)?; + + let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) + })?; + let return_data = StateMachine::new( &state, changes_trie, &mut changes, - &mut offchain_changes, &self.executor, method, call_data, extensions.unwrap_or_default(), - &state_runtime_code.runtime_code()?, + &runtime_code, self.spawn_handle.clone(), - ).execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + ) + .set_parent_hash(at_hash) + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( strategy.get_manager(), None, )?; @@ -121,59 +187,54 @@ where } fn contextual_call< - 'a, - IB: Fn() -> sp_blockchain::Result<()>, EM: Fn( Result, Self::Error>, - Result, Self::Error> + Result, Self::Error>, ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, - initialize_block_fn: IB, at: &BlockId, method: &str, call_data: &[u8], changes: &RefCell, - offchain_changes: &RefCell, - storage_transaction_cache: Option<&RefCell< - StorageTransactionCache - >>, - initialize_block: InitializeBlock<'a, Block>, + storage_transaction_cache: Option<&RefCell>>, execution_manager: ExecutionManager, native_call: Option, recorder: &Option>, extensions: Option, - ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { - match initialize_block { - InitializeBlock::Do(ref init_block) - if init_block.borrow().as_ref().map(|id| id != at).unwrap_or(true) => { - initialize_block_fn()?; - }, - // We don't need to initialize the runtime at a block. - _ => {}, - } - - let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; + ) -> Result, sp_blockchain::Error> + where + ExecutionManager: Clone, + { + let changes_trie_state = + backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - let mut state = self.backend.state_at(*at)?; + let state = self.backend.state_at(*at)?; let changes = &mut *changes.borrow_mut(); - let offchain_changes = &mut *offchain_changes.borrow_mut(); + + let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) + })?; + + // It is important to extract the runtime code here before we create the proof + // recorder to not record it. We also need to fetch the runtime code from `state` to + // make sure we use the caching layers. + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, at)?; match recorder { Some(recorder) => { - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box - )?; - - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_state); - // It is important to extract the runtime code here before we create the proof - // recorder. - let runtime_code = state_runtime_code.runtime_code()?; + let trie_state = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; let backend = sp_state_machine::ProvingBackend::new_with_recorder( trie_state, @@ -184,98 +245,183 @@ where &backend, changes_trie_state, changes, - offchain_changes, &self.executor, method, call_data, extensions.unwrap_or_default(), &runtime_code, self.spawn_handle.clone(), - ); + ) + .set_parent_hash(at_hash); // TODO: https://github.com/paritytech/substrate/issues/4455 - // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + state_machine.execute_using_consensus_failure_handler( + execution_manager, + native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), + ) }, None => { - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = state_runtime_code.runtime_code()?; let mut state_machine = StateMachine::new( &state, changes_trie_state, changes, - offchain_changes, &self.executor, method, call_data, extensions.unwrap_or_default(), &runtime_code, self.spawn_handle.clone(), - ).with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)); - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) - } - }.map_err(Into::into) + ) + .with_storage_transaction_cache( + storage_transaction_cache.as_mut().map(|c| &mut **c), + ) + .set_parent_hash(at_hash); + state_machine.execute_using_consensus_failure_handler( + execution_manager, + native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), + ) + }, + } + .map_err(Into::into) } fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); - let changes_trie_state = backend::changes_tries_state_at_block( - id, - self.backend.changes_trie_storage(), - )?; + let changes_trie_state = + backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; let state = self.backend.state_at(*id)?; let mut cache = StorageTransactionCache::::default(); - let mut ext = Ext::new( - &mut overlay, - &mut offchain_overlay, - &mut cache, - &state, - changes_trie_state, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &state, changes_trie_state, None); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - self.executor.runtime_version(&mut ext, &state_runtime_code.runtime_code()?) + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + self.executor + .runtime_version(&mut ext, &runtime_code) .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } - fn prove_at_trie_state>>( + fn prove_execution( &self, - trie_state: &sp_state_machine::TrieBackend>, - overlay: &mut OverlayedChanges, + at: &BlockId, method: &str, - call_data: &[u8] - ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { + call_data: &[u8], + ) -> sp_blockchain::Result<(Vec, StorageProof)> { + let state = self.backend.state_at(*at)?; + + let trie_backend = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; + + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, at)?; + sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _, _>( - trie_state, - overlay, + &trie_backend, + &mut Default::default(), &self.executor, self.spawn_handle.clone(), method, call_data, - &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, + &runtime_code, ) .map_err(Into::into) } +} - fn native_runtime_version(&self) -> Option<&NativeVersion> { - Some(self.executor.native_version()) +impl sp_version::GetRuntimeVersionAt for LocalCallExecutor +where + B: backend::Backend, + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, + Block: BlockT, +{ + fn runtime_version(&self, at: &BlockId) -> Result { + CallExecutor::runtime_version(self, at).map_err(|e| format!("{:?}", e)) } } -impl sp_version::GetRuntimeVersion for LocalCallExecutor - where - B: backend::Backend, - E: CodeExecutor + RuntimeInfo + Clone + 'static, - Block: BlockT, +impl sp_version::GetNativeVersion for LocalCallExecutor +where + B: backend::Backend, + E: CodeExecutor + sp_version::GetNativeVersion + Clone + 'static, + Block: BlockT, { fn native_version(&self) -> &sp_version::NativeVersion { self.executor.native_version() } +} - fn runtime_version( - &self, - at: &BlockId, - ) -> Result { - CallExecutor::runtime_version(self, at).map_err(|e| format!("{:?}", e)) +#[cfg(test)] +mod tests { + use super::*; + use sc_client_api::in_mem; + use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod}; + use sp_core::{ + testing::TaskExecutor, + traits::{FetchRuntimeCode, WrappedRuntimeCode}, + }; + use substrate_test_runtime_client::{runtime, GenesisInit, LocalExecutorDispatch}; + + #[test] + fn should_get_override_if_exists() { + let executor = NativeElseWasmExecutor::::new( + WasmExecutionMethod::Interpreted, + Some(128), + 1, + ); + + let overrides = crate::client::wasm_override::dummy_overrides(&executor); + let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); + let onchain_code = RuntimeCode { + code_fetcher: &onchain_code, + heap_pages: Some(128), + hash: vec![0, 0, 0, 0], + }; + + let backend = Arc::new(in_mem::Backend::::new()); + + // wasm_runtime_overrides is `None` here because we construct the + // LocalCallExecutor directly later on + let client_config = ClientConfig::default(); + + // client is used for the convenience of creating and inserting the genesis block. + let _client = substrate_test_runtime_client::client::new_with_backend::< + _, + _, + runtime::Block, + _, + runtime::RuntimeApi, + >( + backend.clone(), + executor.clone(), + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + None, + Box::new(TaskExecutor::new()), + None, + None, + Default::default(), + ) + .expect("Creates a client"); + + let call_executor = LocalCallExecutor { + backend: backend.clone(), + executor: executor.clone(), + wasm_override: Some(overrides), + spawn_handle: Box::new(TaskExecutor::new()), + client_config, + wasm_substitutes: WasmSubstitutes::new( + Default::default(), + executor.clone(), + backend.clone(), + ) + .unwrap(), + }; + + let check = call_executor + .check_override(onchain_code, &BlockId::Number(Default::default())) + .expect("RuntimeCode override"); + + assert_eq!(Some(vec![2, 2, 2, 2, 2, 2, 2, 2]), check.fetch_runtime_code().map(Into::into)); } } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 64b00f81905dc..f7d93d036a3fa 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,92 +18,95 @@ //! Substrate Client -use std::{ - marker::PhantomData, - collections::{HashSet, BTreeMap, HashMap}, - sync::Arc, panic::UnwindSafe, result, +use super::{ + block_rules::{BlockRules, LookupResult as BlockLookupResult}, + genesis, }; +use codec::{Decode, Encode}; +use hash_db::Prefix; use log::{info, trace, warn}; use parking_lot::{Mutex, RwLock}; -use codec::{Encode, Decode}; -use hash_db::Prefix; +use prometheus_endpoint::Registry; +use rand::Rng; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider, RecordProof}; +use sc_client_api::{ + backend::{ + self, apply_aux, changes_tries_state_at_block, BlockImportOperation, ClientImportOperation, + Finalizer, ImportSummary, LockImportRun, NewBlockState, PrunableStateChangesTrieStorage, + StorageProvider, + }, + cht, + client::{ + BadBlocks, BlockBackend, BlockImportNotification, BlockOf, BlockchainEvents, ClientInfo, + FinalityNotification, FinalityNotifications, ForkBlocks, ImportNotifications, + ProvideUncles, + }, + execution_extensions::ExecutionExtensions, + notifications::{StorageEventStream, StorageNotifications}, + CallExecutor, ExecutorProvider, KeyIterator, ProofProvider, UsageProvider, +}; +use sc_consensus::{ + BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, +}; +use sc_executor::RuntimeVersion; +use sc_light::fetcher::ChangesProof; +use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; +use sp_api::{ + ApiExt, ApiRef, CallApiAt, CallApiAtParams, ConstructRuntimeApi, Core as CoreApi, + ProvideRuntimeApi, +}; +use sp_blockchain::{ + self as blockchain, well_known_cache_keys::Id as CacheKeyId, Backend as ChainBackend, Cache, + CachedHeaderMetadata, Error, HeaderBackend as ChainHeaderBackend, HeaderMetadata, ProvideCache, +}; +use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; + +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_core::{ convert_hash, storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey}, - ChangesTrieConfiguration, ExecutionContext, NativeOrEncoded, + ChangesTrieConfiguration, NativeOrEncoded, }; -#[cfg(feature="test-helpers")] +#[cfg(feature = "test-helpers")] use sp_keystore::SyncCryptoStorePtr; -use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_runtime::{ - Justification, BuildStorage, - generic::{BlockId, SignedBlock, DigestItem}, + generic::{BlockId, DigestItem, SignedBlock}, traits::{ - Block as BlockT, Header as HeaderT, Zero, NumberFor, - HashFor, SaturatedConversion, One, DigestFor, + Block as BlockT, DigestFor, HashFor, Header as HeaderT, NumberFor, One, + SaturatedConversion, Zero, }, + BuildStorage, Justification, Justifications, }; use sp_state_machine::{ - DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, - prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, - ChangesTrieConfigurationRange, key_changes, key_changes_proof, -}; -use sc_executor::RuntimeVersion; -use sp_consensus::{ - Error as ConsensusError, BlockStatus, BlockImportParams, BlockCheckParams, - ImportResult, BlockOrigin, ForkChoiceStrategy, RecordProof, -}; -use sp_blockchain::{ - self as blockchain, - Backend as ChainBackend, - HeaderBackend as ChainHeaderBackend, ProvideCache, Cache, - well_known_cache_keys::Id as CacheKeyId, - HeaderMetadata, CachedHeaderMetadata, + key_changes, key_changes_proof, prove_child_read, prove_range_read_with_size, prove_read, + read_range_proof_check, Backend as StateBackend, ChangesTrieAnchorBlockId, + ChangesTrieConfigurationRange, ChangesTrieRootsStorage, ChangesTrieStorage, DBValue, }; use sp_trie::StorageProof; -use sp_api::{ - CallApiAt, ConstructRuntimeApi, Core as CoreApi, ApiExt, ApiRef, ProvideRuntimeApi, - CallApiAtParams, -}; -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; -use sc_client_api::{ - backend::{ - self, BlockImportOperation, PrunableStateChangesTrieStorage, - ClientImportOperation, Finalizer, ImportSummary, NewBlockState, - changes_tries_state_at_block, StorageProvider, - LockImportRun, apply_aux, - }, - client::{ - ImportNotifications, FinalityNotification, FinalityNotifications, BlockImportNotification, - ClientInfo, BlockchainEvents, BlockBackend, ProvideUncles, BadBlocks, ForkBlocks, - BlockOf, - }, - execution_extensions::ExecutionExtensions, - notifications::{StorageNotifications, StorageEventStream}, - KeyIterator, CallExecutor, ExecutorProvider, ProofProvider, - cht, UsageProvider -}; -use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded}; -use sp_blockchain::Error; -use prometheus_endpoint::Registry; -use super::{ - genesis, block_rules::{BlockRules, LookupResult as BlockLookupResult}, +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + marker::PhantomData, + panic::UnwindSafe, + path::PathBuf, + result, + sync::Arc, }; -use sc_light::{call_executor::prove_execution, fetcher::ChangesProof}; -use rand::Rng; -#[cfg(feature="test-helpers")] +#[cfg(feature = "test-helpers")] use { - sp_core::traits::{CodeExecutor, SpawnNamed}, - sc_client_api::in_mem, - sc_executor::RuntimeInfo, super::call_executor::LocalCallExecutor, + sc_client_api::in_mem, + sc_executor::RuntimeVersionOf, + sp_core::traits::{CodeExecutor, SpawnNamed}, }; type NotificationSinks = Mutex>>; /// Substrate Client -pub struct Client where Block: BlockT { +pub struct Client +where + Block: BlockT, +{ backend: Arc, executor: E, storage_notifications: Mutex>, @@ -113,21 +116,23 @@ pub struct Client where Block: BlockT { importing_block: RwLock>, block_rules: BlockRules, execution_extensions: ExecutionExtensions, - config: ClientConfig, + config: ClientConfig, + telemetry: Option, _phantom: PhantomData, } -// used in importing a block, where additional changes are made after the runtime -// executed. +/// Used in importing a block, where additional changes are made after the runtime +/// executed. enum PrePostHeader { - // they are the same: no post-runtime digest items. + /// they are the same: no post-runtime digest items. Same(H), - // different headers (pre, post). + /// different headers (pre, post). Different(H, H), } impl PrePostHeader { - // get a reference to the "post-header" -- the header as it should be after all changes are applied. + /// get a reference to the "post-header" -- the header as it should be + /// after all changes are applied. fn post(&self) -> &H { match *self { PrePostHeader::Same(ref h) => h, @@ -135,7 +140,8 @@ impl PrePostHeader { } } - // convert to the "post-header" -- the header as it should be after all changes are applied. + /// convert to the "post-header" -- the header as it should be after + /// all changes are applied. fn into_post(self) -> H { match self { PrePostHeader::Same(h) => h, @@ -144,22 +150,26 @@ impl PrePostHeader { } } +enum PrepareStorageChangesResult, Block: BlockT> { + Discard(ImportResult), + Import(Option>>), +} + /// Create an instance of in-memory client. -#[cfg(feature="test-helpers")] +#[cfg(feature = "test-helpers")] pub fn new_in_mem( executor: E, genesis_storage: &S, keystore: Option, prometheus_registry: Option, + telemetry: Option, spawn_handle: Box, - config: ClientConfig, -) -> sp_blockchain::Result, - LocalCallExecutor, E>, - Block, - RA ->> where - E: CodeExecutor + RuntimeInfo, + config: ClientConfig, +) -> sp_blockchain::Result< + Client, LocalCallExecutor, E>, Block, RA>, +> +where + E: CodeExecutor + RuntimeVersionOf, S: BuildStorage, Block: BlockT, { @@ -170,22 +180,42 @@ pub fn new_in_mem( keystore, spawn_handle, prometheus_registry, + telemetry, config, ) } /// Relevant client configuration items relevant for the client. -#[derive(Debug,Clone,Default)] -pub struct ClientConfig { +#[derive(Debug, Clone)] +pub struct ClientConfig { /// Enable the offchain worker db. pub offchain_worker_enabled: bool, /// If true, allows access from the runtime to write into offchain worker db. pub offchain_indexing_api: bool, + /// Path where WASM files exist to override the on-chain WASM. + pub wasm_runtime_overrides: Option, + /// Skip writing genesis state on first start. + pub no_genesis: bool, + /// Map of WASM runtime substitute starting at the child of the given block until the runtime + /// version doesn't match anymore. + pub wasm_runtime_substitutes: HashMap>, +} + +impl Default for ClientConfig { + fn default() -> Self { + Self { + offchain_worker_enabled: false, + offchain_indexing_api: false, + wasm_runtime_overrides: None, + no_genesis: false, + wasm_runtime_substitutes: HashMap::new(), + } + } } /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. -#[cfg(feature="test-helpers")] +#[cfg(feature = "test-helpers")] pub fn new_with_backend( backend: Arc, executor: E, @@ -193,16 +223,22 @@ pub fn new_with_backend( keystore: Option, spawn_handle: Box, prometheus_registry: Option, - config: ClientConfig, -) -> sp_blockchain::Result, Block, RA>> - where - E: CodeExecutor + RuntimeInfo, - S: BuildStorage, - Block: BlockT, - B: backend::LocalBackend + 'static, + telemetry: Option, + config: ClientConfig, +) -> sp_blockchain::Result, Block, RA>> +where + E: CodeExecutor + RuntimeVersionOf, + S: BuildStorage, + Block: BlockT, + B: backend::LocalBackend + 'static, { - let call_executor = LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone()); - let extensions = ExecutionExtensions::new(Default::default(), keystore); + let call_executor = + LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone())?; + let extensions = ExecutionExtensions::new( + Default::default(), + keystore, + sc_offchain::OffchainDb::factory_from_backend(&*backend), + ); Client::new( backend, call_executor, @@ -211,11 +247,13 @@ pub fn new_with_backend( Default::default(), extensions, prometheus_registry, + telemetry, config, ) } -impl BlockOf for Client where +impl BlockOf for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -224,15 +262,15 @@ impl BlockOf for Client where } impl LockImportRun for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { fn lock_import_and_run(&self, f: F) -> Result - where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From, + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, { let inner = || { let _import_lock = self.backend.get_import_lock().write(); @@ -262,21 +300,22 @@ impl LockImportRun for Client } impl LockImportRun for &Client - where - Block: BlockT, - B: backend::Backend, - E: CallExecutor, +where + Block: BlockT, + B: backend::Backend, + E: CallExecutor, { fn lock_import_and_run(&self, f: F) -> Result - where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From, + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, { (**self).lock_import_and_run(f) } } -impl Client where +impl Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -291,23 +330,34 @@ impl Client where bad_blocks: BadBlocks, execution_extensions: ExecutionExtensions, prometheus_registry: Option, - config: ClientConfig, + telemetry: Option, + config: ClientConfig, ) -> sp_blockchain::Result { - if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { - let genesis_storage = build_genesis_storage.build_storage()?; + let info = backend.blockchain().info(); + if info.finalized_state.is_none() { + let genesis_storage = + build_genesis_storage.build_storage().map_err(sp_blockchain::Error::Storage)?; let mut op = backend.begin_operation()?; - backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; - let state_root = op.reset_storage(genesis_storage)?; + let state_root = op.set_genesis_state(genesis_storage, !config.no_genesis)?; let genesis_block = genesis::construct_genesis_block::(state_root.into()); - info!("🔨 Initializing Genesis block/state (state: {}, header-hash: {})", + info!( + "🔨 Initializing Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash() ); + // Genesis may be written after some blocks have been imported and finalized. + // So we only finalize it when the database is empty. + let block_state = if info.best_hash == Default::default() { + NewBlockState::Final + } else { + NewBlockState::Normal + }; op.set_block_data( genesis_block.deconstruct().0, Some(vec![]), None, - NewBlockState::Final + None, + block_state, )?; backend.commit_operation(op)?; } @@ -322,6 +372,7 @@ impl Client where block_rules: BlockRules::new(fork_blocks, bad_blocks), execution_extensions, config, + telemetry, _phantom: Default::default(), }) } @@ -346,8 +397,11 @@ impl Client where /// Get the code at a given block. pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { Ok(StorageProvider::storage(self, id, &StorageKey(well_known_keys::CODE.to_vec()))? - .expect("None is returned if there's no value stored for the given key;\ - ':code' key is always defined; qed").0) + .expect( + "None is returned if there's no value stored for the given key;\ + ':code' key is always defined; qed", + ) + .0) } /// Get the RuntimeVersion at a given block. @@ -361,7 +415,9 @@ impl Client where id: &BlockId, cht_size: NumberFor, ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - let proof_error = || sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)); + let proof_error = || { + sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)) + }; let header = self.backend.blockchain().expect_header(*id)?; let block_num = *header.number(); let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; @@ -397,14 +453,15 @@ impl Client where storage: &'a dyn ChangesTrieStorage, NumberFor>, min: NumberFor, required_roots_proofs: Mutex, Block::Hash>>, - }; + } - impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> for - AccessedRootsRecorder<'a, Block> + impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> + for AccessedRootsRecorder<'a, Block> { - fn build_anchor(&self, hash: Block::Hash) - -> Result>, String> - { + fn build_anchor( + &self, + hash: Block::Hash, + ) -> Result>, String> { self.storage.build_anchor(hash) } @@ -416,22 +473,19 @@ impl Client where let root = self.storage.root(anchor, block)?; if block < self.min { if let Some(ref root) = root { - self.required_roots_proofs.lock().insert( - block, - root.clone() - ); + self.required_roots_proofs.lock().insert(block, root.clone()); } } Ok(root) } } - impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> for - AccessedRootsRecorder<'a, Block> + impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> + for AccessedRootsRecorder<'a, Block> { - fn as_roots_storage(&self) - -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> - { + fn as_roots_storage( + &self, + ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { self } @@ -448,10 +502,11 @@ impl Client where } } - let first_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(first))?; + let first_number = + self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(first))?; let (storage, configs) = self.require_changes_trie(first_number, last, true)?; - let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; + let min_number = + self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; let recording_storage = AccessedRootsRecorder:: { storage: storage.storage(), @@ -467,8 +522,8 @@ impl Client where // fetch key changes proof let mut proof = Vec::new(); for (config_zero, config_end, config) in configs { - let last_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(last))?; + let last_number = + self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(last))?; let config_range = ChangesTrieConfigurationRange { config: &config, zero: config_zero, @@ -478,10 +533,7 @@ impl Client where config_range, &recording_storage, first_number, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last), - number: last_number, - }, + &ChangesTrieAnchorBlockId { hash: convert_hash(&last), number: last_number }, max_number, storage_key, &key.0, @@ -504,30 +556,37 @@ impl Client where } /// Generate CHT-based proof for roots of changes tries at given blocks. - fn changes_trie_roots_proof>>( + fn changes_trie_roots_proof>>( &self, cht_size: NumberFor, - blocks: I + blocks: I, ) -> sp_blockchain::Result { // most probably we have touched several changes tries that are parts of the single CHT // => GroupBy changes tries by CHT number and then gather proof for the whole group at once let mut proofs = Vec::new(); - cht::for_each_cht_group::(cht_size, blocks, |_, cht_num, cht_blocks| { - let cht_proof = self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; - proofs.push(cht_proof); - Ok(()) - }, ())?; + cht::for_each_cht_group::( + cht_size, + blocks, + |_, cht_num, cht_blocks| { + let cht_proof = + self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; + proofs.push(cht_proof); + Ok(()) + }, + (), + )?; Ok(StorageProof::merge(proofs)) } - /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). + /// Generates CHT-based proof for roots of changes tries at given blocks + /// (that are part of single CHT). fn changes_trie_roots_proof_at_cht( &self, cht_size: NumberFor, cht_num: NumberFor, - blocks: Vec> + blocks: Vec>, ) -> sp_blockchain::Result { let cht_start = cht::start_number(cht_size, cht_num); let mut current_num = cht_start; @@ -536,25 +595,24 @@ impl Client where current_num = current_num + One::one(); Some(old_current_num) }); - let roots = cht_range - .map(|num| self.header(&BlockId::Number(num)) - .map(|block| - block.and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned())) - ); + let roots = cht_range.map(|num| { + self.header(&BlockId::Number(num)).map(|block| { + block + .and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned()) + }) + }); let proof = cht::build_proof::, _, _>( - cht_size, - cht_num, - blocks, - roots, + cht_size, cht_num, blocks, roots, )?; Ok(proof) } - /// Returns changes trie storage and all configurations that have been active in the range [first; last]. + /// Returns changes trie storage and all configurations that have been active + /// in the range [first; last]. /// /// Configurations are returned in descending order (and obviously never overlap). - /// If fail_if_disabled is false, returns maximal consequent configurations ranges, starting from last and - /// stopping on either first, or when CT have been disabled. + /// If fail_if_disabled is false, returns maximal consequent configurations ranges, + /// starting from last and stopping on either first, or when CT have been disabled. /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled /// inside first..last blocks range. fn require_changes_trie( @@ -566,10 +624,10 @@ impl Client where &dyn PrunableStateChangesTrieStorage, Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, )> { - let storage = match self.backend.changes_trie_storage() { - Some(storage) => storage, - None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), - }; + let storage = self + .backend + .changes_trie_storage() + .ok_or_else(|| sp_blockchain::Error::ChangesTriesNotSupported)?; let mut configs = Vec::with_capacity(1); let mut current = last; @@ -582,10 +640,14 @@ impl Client where } if config_range.zero.0 < first { - break; + break } - current = *self.backend.blockchain().expect_header(BlockId::Hash(config_range.zero.1))?.parent_hash(); + current = *self + .backend + .blockchain() + .expect_header(BlockId::Hash(config_range.zero.1))? + .parent_hash(); } Ok((storage, configs)) @@ -598,18 +660,22 @@ impl Client where operation: &mut ClientImportOperation, import_block: BlockImportParams>, new_cache: HashMap>, - ) -> sp_blockchain::Result where + storage_changes: Option< + sc_consensus::StorageChanges>, + >, + ) -> sp_blockchain::Result + where Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, + >::Api: + CoreApi + ApiExt, { let BlockImportParams { origin, header, - justification, + justifications, post_digests, body, - storage_changes, + indexed_body, finalized, auxiliary, fork_choice, @@ -618,7 +684,7 @@ impl Client where .. } = import_block; - assert!(justification.is_some() && finalized || justification.is_none()); + assert!(justifications.is_some() && finalized || justifications.is_none()); if !intermediates.is_empty() { return Err(Error::IncompletePipeline) @@ -646,8 +712,9 @@ impl Client where origin, hash, import_headers, - justification, + justifications, body, + indexed_body, storage_changes, new_cache, finalized, @@ -661,10 +728,11 @@ impl Client where // don't send telemetry block import events during initial sync for every // block to avoid spamming the telemetry server, these events will be randomly // sent at a rate of 1/10. - if origin != BlockOrigin::NetworkInitialSync || - rand::thread_rng().gen_bool(0.1) - { - telemetry!(SUBSTRATE_INFO; "block.import"; + if origin != BlockOrigin::NetworkInitialSync || rand::thread_rng().gen_bool(0.1) { + telemetry!( + self.telemetry; + SUBSTRATE_INFO; + "block.import"; "height" => height, "best" => ?hash, "origin" => ?origin @@ -682,52 +750,95 @@ impl Client where origin: BlockOrigin, hash: Block::Hash, import_headers: PrePostHeader, - justification: Option, + justifications: Option, body: Option>, - storage_changes: Option, Block>>, + indexed_body: Option>>, + storage_changes: Option< + sc_consensus::StorageChanges>, + >, new_cache: HashMap>, finalized: bool, aux: Vec<(Vec, Option>)>, fork_choice: ForkChoiceStrategy, import_existing: bool, - ) -> sp_blockchain::Result where + ) -> sp_blockchain::Result + where Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, + >::Api: + CoreApi + ApiExt, { let parent_hash = import_headers.post().parent_hash().clone(); let status = self.backend.blockchain().status(BlockId::Hash(hash))?; + let parent_exists = self.backend.blockchain().status(BlockId::Hash(parent_hash))? == + blockchain::BlockStatus::InChain; match (import_existing, status) { (false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), (false, blockchain::BlockStatus::Unknown) => {}, - (true, blockchain::BlockStatus::InChain) => {}, - (true, blockchain::BlockStatus::Unknown) => - return Err(Error::UnknownBlock(format!("{:?}", hash))), + (true, blockchain::BlockStatus::InChain) => {}, + (true, blockchain::BlockStatus::Unknown) => {}, } let info = self.backend.blockchain().info(); // the block is lower than our last finalized block so it must revert // finality, refusing import. - if *import_headers.post().number() <= info.finalized_number { - return Err(sp_blockchain::Error::NotInFinalizedChain); + if status == blockchain::BlockStatus::Unknown && + *import_headers.post().number() <= info.finalized_number + { + return Err(sp_blockchain::Error::NotInFinalizedChain) } // this is a fairly arbitrary choice of where to draw the line on making notifications, // but the general goal is to only make notifications when we are already fully synced // and get a new chain head. let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => true, + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => + true, BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, }; let storage_changes = match storage_changes { Some(storage_changes) => { - self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; - - // ensure parent block is finalized to maintain invariant that - // finality is called sequentially. - if finalized { + let storage_changes = match storage_changes { + sc_consensus::StorageChanges::Changes(storage_changes) => { + self.backend + .begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; + let (main_sc, child_sc, offchain_sc, tx, _, changes_trie_tx, tx_index) = + storage_changes.into_inner(); + + if self.config.offchain_indexing_api { + operation.op.update_offchain_storage(offchain_sc)?; + } + + operation.op.update_db_storage(tx)?; + operation.op.update_storage(main_sc.clone(), child_sc.clone())?; + operation.op.update_transaction_index(tx_index)?; + + if let Some(changes_trie_transaction) = changes_trie_tx { + operation.op.update_changes_trie(changes_trie_transaction)?; + } + Some((main_sc, child_sc)) + }, + sc_consensus::StorageChanges::Import(changes) => { + let storage = sp_storage::Storage { + top: changes.state.into_iter().collect(), + children_default: Default::default(), + }; + + let state_root = operation.op.reset_storage(storage)?; + if state_root != *import_headers.post().state_root() { + // State root mismatch when importing state. This should not happen in + // safe fast sync mode, but may happen in unsafe mode. + warn!("Error imporing state: State root mismatch."); + return Err(Error::InvalidStateRoot) + } + None + }, + }; + // Ensure parent chain is finalized to maintain invariant that + // finality is called sequentially. This will also send finality + // notifications for top 250 newly finalized blocks. + if finalized && parent_exists { self.apply_finality_with_block_hash( operation, parent_hash, @@ -738,35 +849,17 @@ impl Client where } operation.op.update_cache(new_cache); - - let ( - main_sc, - child_sc, - offchain_sc, - tx, _, - changes_trie_tx, - ) = storage_changes.into_inner(); - - if self.config.offchain_indexing_api { - operation.op.update_offchain_storage(offchain_sc)?; - } - - operation.op.update_db_storage(tx)?; - operation.op.update_storage(main_sc.clone(), child_sc.clone())?; - - if let Some(changes_trie_transaction) = changes_trie_tx { - operation.op.update_changes_trie(changes_trie_transaction)?; - } - - Some((main_sc, child_sc)) + storage_changes }, None => None, }; - let is_new_best = finalized || match fork_choice { - ForkChoiceStrategy::LongestChain => import_headers.post().number() > &info.best_number, - ForkChoiceStrategy::Custom(v) => v, - }; + let is_new_best = finalized || + match fork_choice { + ForkChoiceStrategy::LongestChain => + import_headers.post().number() > &info.best_number, + ForkChoiceStrategy::Custom(v) => v, + }; let leaf_state = if finalized { NewBlockState::Final @@ -776,12 +869,9 @@ impl Client where NewBlockState::Normal }; - let tree_route = if is_new_best && info.best_hash != parent_hash { - let route_from_best = sp_blockchain::tree_route( - self.backend.blockchain(), - info.best_hash, - parent_hash, - )?; + let tree_route = if is_new_best && info.best_hash != parent_hash && parent_exists { + let route_from_best = + sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, parent_hash)?; Some(route_from_best) } else { None @@ -798,13 +888,15 @@ impl Client where operation.op.set_block_data( import_headers.post().clone(), body, - justification, + indexed_body, + justifications, leaf_state, )?; operation.op.insert_aux(aux)?; - // we only notify when we are already synced to the tip of the chain or if this import triggers a re-org + // we only notify when we are already synced to the tip of the chain + // or if this import triggers a re-org if make_notifications || tree_route.is_some() { if finalized { operation.notify_finalized.push(hash); @@ -831,35 +923,42 @@ impl Client where fn prepare_block_storage_changes( &self, import_block: &mut BlockImportParams>, - ) -> sp_blockchain::Result> - where - Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, + ) -> sp_blockchain::Result> + where + Self: ProvideRuntimeApi, + >::Api: + CoreApi + ApiExt, { let parent_hash = import_block.header.parent_hash(); let at = BlockId::Hash(*parent_hash); - let enact_state = match self.block_status(&at)? { - BlockStatus::Unknown => return Ok(Some(ImportResult::UnknownParent)), - BlockStatus::InChainWithState | BlockStatus::Queued => true, - BlockStatus::InChainPruned if import_block.allow_missing_state => false, - BlockStatus::InChainPruned => return Ok(Some(ImportResult::MissingState)), - BlockStatus::KnownBad => return Ok(Some(ImportResult::KnownBad)), + let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); + let (enact_state, storage_changes) = match (self.block_status(&at)?, state_action) { + (BlockStatus::KnownBad, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), + ( + BlockStatus::InChainPruned, + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)), + ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), + (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), + (BlockStatus::Unknown, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), + (_, StateAction::Skip) => (false, None), + (BlockStatus::InChainPruned, StateAction::Execute) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), + (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), + (_, StateAction::Execute) => (true, None), + (_, StateAction::ExecuteIfPossible) => (true, None), }; - match (enact_state, &mut import_block.storage_changes, &mut import_block.body) { + let storage_changes = match (enact_state, storage_changes, &import_block.body) { // We have storage changes and should enact the state, so we don't need to do anything // here - (true, Some(_), _) => {}, + (true, changes @ Some(_), _) => changes, // We should enact state, but don't have any storage changes, so we need to execute the // block. - (true, ref mut storage_changes @ None, Some(ref body)) => { + (true, None, Some(ref body)) => { let runtime_api = self.runtime_api(); - let execution_context = if import_block.origin == BlockOrigin::NetworkInitialSync { - ExecutionContext::Syncing - } else { - ExecutionContext::Importing - }; + let execution_context = import_block.origin.into(); runtime_api.execute_block_with_context( &at, @@ -868,34 +967,26 @@ impl Client where )?; let state = self.backend.state_at(at)?; - let changes_trie_state = changes_tries_state_at_block( - &at, - self.backend.changes_trie_storage(), - )?; + let changes_trie_state = + changes_tries_state_at_block(&at, self.backend.changes_trie_storage())?; - let gen_storage_changes = runtime_api.into_storage_changes( - &state, - changes_trie_state.as_ref(), - *parent_hash, - )?; + let gen_storage_changes = runtime_api + .into_storage_changes(&state, changes_trie_state.as_ref(), *parent_hash) + .map_err(sp_blockchain::Error::Storage)?; - if import_block.header.state_root() - != &gen_storage_changes.transaction_storage_root + if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root { return Err(Error::InvalidStateRoot) - } else { - **storage_changes = Some(gen_storage_changes); } + Some(sc_consensus::StorageChanges::Changes(gen_storage_changes)) }, // No block body, no storage changes - (true, None, None) => {}, + (true, None, None) => None, // We should not enact the state, so we set the storage changes to `None`. - (false, changes, _) => { - changes.take(); - } + (false, _, _) => None, }; - Ok(None) + Ok(PrepareStorageChangesResult::Import(storage_changes)) } fn apply_finality_with_block_hash( @@ -910,20 +1001,28 @@ impl Client where let last_finalized = self.backend.blockchain().last_finalized()?; if block == last_finalized { - warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized); - return Ok(()); + warn!( + "Possible safety violation: attempted to re-finalize last finalized block {:?} ", + last_finalized + ); + return Ok(()) } - let route_from_finalized = sp_blockchain::tree_route(self.backend.blockchain(), last_finalized, block)?; + let route_from_finalized = + sp_blockchain::tree_route(self.backend.blockchain(), last_finalized, block)?; if let Some(retracted) = route_from_finalized.retracted().get(0) { - warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \ - same chain as last finalized {:?}", retracted, last_finalized); + warn!( + "Safety violation: attempted to revert finalized block {:?} which is not in the \ + same chain as last finalized {:?}", + retracted, last_finalized + ); - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } - let route_from_best = sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; + let route_from_best = + sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; // if the block is not a direct ancestor of the current best chain, // then some other block is the common ancestor. @@ -960,10 +1059,7 @@ impl Client where Ok(()) } - fn notify_finalized( - &self, - notify_finalized: Vec, - ) -> sp_blockchain::Result<()> { + fn notify_finalized(&self, notify_finalized: Vec) -> sp_blockchain::Result<()> { let mut sinks = self.finality_notification_sinks.lock(); if notify_finalized.is_empty() { @@ -972,35 +1068,33 @@ impl Client where // would also remove any closed sinks. sinks.retain(|sink| !sink.is_closed()); - return Ok(()); + return Ok(()) } // We assume the list is sorted and only want to inform the // telemetry once about the finalized block. if let Some(last) = notify_finalized.last() { - let header = self.header(&BlockId::Hash(*last))? - .expect( - "Header already known to exist in DB because it is \ - indicated in the tree route; qed" - ); + let header = self.header(&BlockId::Hash(*last))?.expect( + "Header already known to exist in DB because it is indicated in the tree route; \ + qed", + ); - telemetry!(SUBSTRATE_INFO; "notify.finalized"; + telemetry!( + self.telemetry; + SUBSTRATE_INFO; + "notify.finalized"; "height" => format!("{}", header.number()), "best" => ?last, ); } for finalized_hash in notify_finalized { - let header = self.header(&BlockId::Hash(finalized_hash))? - .expect( - "Header already known to exist in DB because it is \ - indicated in the tree route; qed" - ); + let header = self.header(&BlockId::Hash(finalized_hash))?.expect( + "Header already known to exist in DB because it is indicated in the tree route; \ + qed", + ); - let notification = FinalityNotification { - header, - hash: finalized_hash, - }; + let notification = FinalityNotification { header, hash: finalized_hash }; sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); } @@ -1021,22 +1115,19 @@ impl Client where // won't send any import notifications which could lead to a // temporary leak of closed/discarded notification sinks (e.g. // from consensus code). - self.import_notification_sinks - .lock() - .retain(|sink| !sink.is_closed()); + self.import_notification_sinks.lock().retain(|sink| !sink.is_closed()); - return Ok(()); - } + return Ok(()) + }, }; if let Some(storage_changes) = notify_import.storage_changes { // TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes? - self.storage_notifications.lock() - .trigger( - ¬ify_import.hash, - storage_changes.0.into_iter(), - storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), - ); + self.storage_notifications.lock().trigger( + ¬ify_import.hash, + storage_changes.0.into_iter(), + storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), + ); } let notification = BlockImportNotification:: { @@ -1047,7 +1138,8 @@ impl Client where tree_route: notify_import.tree_route.map(Arc::new), }; - self.import_notification_sinks.lock() + self.import_notification_sinks + .lock() .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); Ok(()) @@ -1094,7 +1186,7 @@ impl Client where // this can probably be implemented more efficiently if let BlockId::Hash(ref h) = id { if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); + return Ok(BlockStatus::Queued) } } let hash_and_number = match id.clone() { @@ -1102,38 +1194,49 @@ impl Client where BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), }; match hash_and_number { - Some((hash, number)) => { + Some((hash, number)) => if self.backend.have_state_at(&hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) - } - } + }, None => Ok(BlockStatus::Unknown), } } /// Get block header by id. - pub fn header(&self, id: &BlockId) -> sp_blockchain::Result::Header>> { + pub fn header( + &self, + id: &BlockId, + ) -> sp_blockchain::Result::Header>> { self.backend.blockchain().header(*id) } /// Get block body by id. - pub fn body(&self, id: &BlockId) -> sp_blockchain::Result::Extrinsic>>> { + pub fn body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result::Extrinsic>>> { self.backend.blockchain().body(*id) } /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. - pub fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { + pub fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> sp_blockchain::Result> { let load_header = |id: Block::Hash| -> sp_blockchain::Result { - match self.backend.blockchain().header(BlockId::Hash(id))? { - Some(hdr) => Ok(hdr), - None => Err(Error::UnknownBlock(format!("{:?}", id))), - } + self.backend + .blockchain() + .header(BlockId::Hash(id))? + .ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))) }; let genesis_hash = self.backend.blockchain().info().genesis_hash; - if genesis_hash == target_hash { return Ok(Vec::new()); } + if genesis_hash == target_hash { + return Ok(Vec::new()) + } let mut current_hash = target_hash; let mut current = load_header(current_hash)?; @@ -1141,47 +1244,40 @@ impl Client where let mut ancestor = load_header(ancestor_hash)?; let mut uncles = Vec::new(); - for _generation in 0..max_generation.saturated_into() { + let mut generation: NumberFor = Zero::zero(); + while generation < max_generation { let children = self.backend.blockchain().children(ancestor_hash)?; uncles.extend(children.into_iter().filter(|h| h != ¤t_hash)); current_hash = ancestor_hash; - if genesis_hash == current_hash { break; } + + if genesis_hash == current_hash { + break + } + current = ancestor; ancestor_hash = *current.parent_hash(); ancestor = load_header(ancestor_hash)?; + generation += One::one(); } trace!("Collected {} uncles", uncles.len()); Ok(uncles) } - - /// Prepare in-memory header that is used in execution environment. - fn prepare_environment_block(&self, parent: &BlockId) -> sp_blockchain::Result { - let parent_header = self.backend.blockchain().expect_header(*parent)?; - Ok(<::Header as HeaderT>::new( - self.backend.blockchain().expect_block_number_from_id(parent)? + One::one(), - Default::default(), - Default::default(), - parent_header.hash(), - Default::default(), - )) - } } -impl UsageProvider for Client where +impl UsageProvider for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, { /// Get usage info about current client. fn usage_info(&self) -> ClientInfo { - ClientInfo { - chain: self.chain_info(), - usage: self.backend.usage_info(), - } + ClientInfo { chain: self.chain_info(), usage: self.backend.usage_info() } } } -impl ProofProvider for Client where +impl ProofProvider for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -1189,29 +1285,26 @@ impl ProofProvider for Client where fn read_proof( &self, id: &BlockId, - keys: &mut dyn Iterator, + keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - self.state_at(id) - .and_then(|state| prove_read(state, keys) - .map_err(Into::into)) + self.state_at(id).and_then(|state| prove_read(state, keys).map_err(Into::into)) } fn read_child_proof( &self, id: &BlockId, child_info: &ChildInfo, - keys: &mut dyn Iterator, + keys: &mut dyn Iterator, ) -> sp_blockchain::Result { self.state_at(id) - .and_then(|state| prove_child_read(state, child_info, keys) - .map_err(Into::into)) + .and_then(|state| prove_child_read(state, child_info, keys).map_err(Into::into)) } fn execution_proof( &self, id: &BlockId, method: &str, - call_data: &[u8] + call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. @@ -1222,20 +1315,15 @@ impl ProofProvider for Client where &mut [well_known_keys::CODE, well_known_keys::HEAP_PAGES].iter().map(|v| *v), )?; - let state = self.state_at(id)?; - let header = self.prepare_environment_block(id)?; - prove_execution( - state, - header, - &self.executor, - method, - call_data, - ).map(|(r, p)| { - (r, StorageProof::merge(vec![p, code_proof])) - }) + self.executor + .prove_execution(id, method, call_data) + .map(|(r, p)| (r, StorageProof::merge(vec![p, code_proof]))) } - fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)> { + fn header_proof( + &self, + id: &BlockId, + ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { self.header_proof_with_cht_size(id, cht::size()) } @@ -1248,27 +1336,79 @@ impl ProofProvider for Client where storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result> { - self.key_changes_proof_with_cht_size( - first, - last, - min, - max, - storage_key, - key, - cht::size(), - ) + self.key_changes_proof_with_cht_size(first, last, min, max, storage_key, key, cht::size()) } -} + fn read_proof_collection( + &self, + id: &BlockId, + start_key: &[u8], + size_limit: usize, + ) -> sp_blockchain::Result<(StorageProof, u32)> { + let state = self.state_at(id)?; + Ok(prove_range_read_with_size::<_, HashFor>( + state, + None, + None, + size_limit, + Some(start_key), + )?) + } + + fn storage_collection( + &self, + id: &BlockId, + start_key: &[u8], + size_limit: usize, + ) -> sp_blockchain::Result, Vec)>> { + let state = self.state_at(id)?; + let mut current_key = start_key.to_vec(); + let mut total_size = 0; + let mut entries = Vec::new(); + while let Some(next_key) = state + .next_storage_key(¤t_key) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + { + let value = state + .storage(next_key.as_ref()) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .unwrap_or_default(); + let size = value.len() + next_key.len(); + if total_size + size > size_limit && !entries.is_empty() { + break + } + total_size += size; + entries.push((next_key.clone(), value)); + current_key = next_key; + } + Ok(entries) + } + + fn verify_range_proof( + &self, + root: Block::Hash, + proof: StorageProof, + start_key: &[u8], + ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)> { + Ok(read_range_proof_check::>( + root, + proof, + None, + None, + None, + Some(start_key), + )?) + } +} impl BlockBuilderProvider for Client - where - B: backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static, - Block: BlockT, - Self: ChainHeaderBackend + ProvideRuntimeApi, - >::Api: ApiExt> - + BlockBuilderApi, +where + B: backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + 'static, + Block: BlockT, + Self: ChainHeaderBackend + ProvideRuntimeApi, + >::Api: + ApiExt> + BlockBuilderApi, { fn new_block_at>( &self, @@ -1282,7 +1422,7 @@ impl BlockBuilderProvider for Client BlockBuilderProvider for Client ExecutorProvider for Client where +impl ExecutorProvider for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -1318,19 +1459,26 @@ impl ExecutorProvider for Client where } } -impl StorageProvider for Client where +impl StorageProvider for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, { - fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result> { + fn storage_keys( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result> { let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); Ok(keys) } - fn storage_pairs(&self, id: &BlockId, key_prefix: &StorageKey) - -> sp_blockchain::Result> - { + fn storage_pairs( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result> { let state = self.state_at(id)?; let keys = state .keys(&key_prefix.0) @@ -1343,51 +1491,60 @@ impl StorageProvider for Client wher Ok(keys) } - fn storage_keys_iter<'a>( &self, id: &BlockId, prefix: Option<&'a StorageKey>, - start_key: Option<&StorageKey> + start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { let state = self.state_at(id)?; - let start_key = start_key - .or(prefix) - .map(|key| key.0.clone()) - .unwrap_or_else(Vec::new); + let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new(state, prefix, start_key)) } + fn child_storage_keys_iter<'a>( + &self, + id: &BlockId, + child_info: ChildInfo, + prefix: Option<&'a StorageKey>, + start_key: Option<&StorageKey>, + ) -> sp_blockchain::Result> { + let state = self.state_at(id)?; + let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); + Ok(KeyIterator::new_child(state, child_info, prefix, start_key)) + } fn storage( &self, id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? - .storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - .map(StorageData) - ) + Ok(self + .state_at(id)? + .storage(&key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .map(StorageData)) } - fn storage_hash( &self, id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? - .storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - ) + Ok(self + .state_at(id)? + .storage_hash(&key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?) } fn child_storage_keys( &self, id: &BlockId, child_info: &ChildInfo, - key_prefix: &StorageKey + key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let keys = self.state_at(id)? + let keys = self + .state_at(id)? .child_keys(child_info, &key_prefix.0) .into_iter() .map(StorageKey) @@ -1399,9 +1556,10 @@ impl StorageProvider for Client wher &self, id: &BlockId, child_info: &ChildInfo, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? + Ok(self + .state_at(id)? .child_storage(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1411,12 +1569,12 @@ impl StorageProvider for Client wher &self, id: &BlockId, child_info: &ChildInfo, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? + Ok(self + .state_at(id)? .child_storage_hash(child_info, &key.0) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - ) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?) } fn max_key_changes_range( @@ -1427,7 +1585,9 @@ impl StorageProvider for Client wher let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; if first > last_number { - return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); + return Err(sp_blockchain::Error::ChangesTrieAccessFailed( + "Invalid changes trie range".into(), + )) } let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { @@ -1442,7 +1602,7 @@ impl StorageProvider for Client wher let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); Ok(Some((first, last))) }, - None => Ok(None) + None => Ok(None), } } @@ -1451,7 +1611,7 @@ impl StorageProvider for Client wher first: NumberFor, last: BlockId, storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result, u32)>> { let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; @@ -1462,12 +1622,20 @@ impl StorageProvider for Client wher for (config_zero, config_end, config) in configs { let range_first = ::std::cmp::max(first, config_zero + One::one()); let range_anchor = match config_end { - Some((config_end_number, config_end_hash)) => if last_number > config_end_number { - ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number } - } else { - ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number } - }, - None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, + Some((config_end_number, config_end_hash)) => + if last_number > config_end_number { + ChangesTrieAnchorBlockId { + hash: config_end_hash, + number: config_end_number, + } + } else { + ChangesTrieAnchorBlockId { + hash: convert_hash(&last_hash), + number: last_number, + } + }, + None => + ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, }; let config_range = ChangesTrieConfigurationRange { @@ -1482,9 +1650,10 @@ impl StorageProvider for Client wher &range_anchor, best_number, storage_key, - &key.0) - .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; + &key.0, + ) + .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) + .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; result.extend(result_range); } @@ -1492,14 +1661,18 @@ impl StorageProvider for Client wher } } -impl HeaderMetadata for Client where +impl HeaderMetadata for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, { type Error = sp_blockchain::Error; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { self.backend.blockchain().header_metadata(hash) } @@ -1512,21 +1685,26 @@ impl HeaderMetadata for Client where } } -impl ProvideUncles for Client where +impl ProvideUncles for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, { - fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { + fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> sp_blockchain::Result> { Ok(Client::uncles(self, target_hash, max_generation)? .into_iter() .filter_map(|hash| Client::header(self, &BlockId::Hash(hash)).unwrap_or(None)) - .collect() - ) + .collect()) } } -impl ChainHeaderBackend for Client where +impl ChainHeaderBackend for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1544,7 +1722,10 @@ impl ChainHeaderBackend for Client wher self.backend.blockchain().status(id) } - fn number(&self, hash: Block::Hash) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + fn number( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Header as HeaderT>::Number>> { self.backend.blockchain().number(hash) } @@ -1553,7 +1734,8 @@ impl ChainHeaderBackend for Client wher } } -impl sp_runtime::traits::BlockIdTo for Client where +impl sp_runtime::traits::BlockIdTo for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1565,12 +1747,16 @@ impl sp_runtime::traits::BlockIdTo for Client) -> sp_blockchain::Result>> { + fn to_number( + &self, + block_id: &BlockId, + ) -> sp_blockchain::Result>> { self.block_number_from_id(block_id) } } -impl ChainHeaderBackend for &Client where +impl ChainHeaderBackend for &Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1588,7 +1774,10 @@ impl ChainHeaderBackend for &Client whe (**self).status(id) } - fn number(&self, hash: Block::Hash) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + fn number( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Header as HeaderT>::Number>> { (**self).number(hash) } @@ -1597,7 +1786,8 @@ impl ChainHeaderBackend for &Client whe } } -impl ProvideCache for Client where +impl ProvideCache for Client +where B: backend::Backend, Block: BlockT, { @@ -1606,7 +1796,8 @@ impl ProvideCache for Client where } } -impl ProvideRuntimeApi for Client where +impl ProvideRuntimeApi for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1619,62 +1810,61 @@ impl ProvideRuntimeApi for Client where } } -impl CallApiAt for Client where +impl CallApiAt for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, { - type Error = Error; type StateBackend = B::State; fn call_api_at< 'a, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - C: CoreApi, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, - params: CallApiAtParams<'a, Block, C, NC, B::State>, - ) -> sp_blockchain::Result> { - let core_api = params.core_api; + params: CallApiAtParams<'a, Block, NC, B::State>, + ) -> Result, sp_api::ApiError> { let at = params.at; - let (manager, extensions) = self.execution_extensions.manager_and_extensions( - at, - params.context, - ); - - self.executor.contextual_call::<_, fn(_,_) -> _,_,_>( - || core_api.initialize_block(at, &self.prepare_environment_block(at)?), - at, - params.function, - ¶ms.arguments, - params.overlayed_changes, - params.offchain_changes, - Some(params.storage_transaction_cache), - params.initialize_block, - manager, - params.native_call, - params.recorder, - Some(extensions), - ) + let (manager, extensions) = + self.execution_extensions.manager_and_extensions(at, params.context); + + self.executor + .contextual_call:: _, _, _>( + at, + params.function, + ¶ms.arguments, + params.overlayed_changes, + Some(params.storage_transaction_cache), + manager, + params.native_call, + params.recorder, + Some(extensions), + ) + .map_err(Into::into) } - fn runtime_version_at(&self, at: &BlockId) -> sp_blockchain::Result { - self.runtime_version_at(at) + fn runtime_version_at(&self, at: &BlockId) -> Result { + self.runtime_version_at(at).map_err(Into::into) } } /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport /// objects. Otherwise, importing blocks directly into the client would be bypassing /// important verification work. -impl sp_consensus::BlockImport for &Client where +#[async_trait::async_trait] +impl sc_consensus::BlockImport for &Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: CoreApi + - ApiExt, + as ProvideRuntimeApi>::Api: + CoreApi + ApiExt, + RA: Sync + Send, + backend::TransactionFor: Send + 'static, { type Error = ConsensusError; type Transaction = backend::TransactionFor; @@ -1688,7 +1878,7 @@ impl sp_consensus::BlockImport for &Client>, new_cache: HashMap>, @@ -1696,38 +1886,44 @@ impl sp_consensus::BlockImport for &Client return Ok(res), + PrepareStorageChangesResult::Import(storage_changes) => storage_changes, + }; self.lock_import_and_run(|operation| { - self.apply_block(operation, import_block, new_cache) - }).map_err(|e| { + self.apply_block(operation, import_block, new_cache, storage_changes) + }) + .map_err(|e| { warn!("Block import error:\n{:?}", e); ConsensusError::ClientImport(e.to_string()).into() }) } /// Check block preconditions. - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - let BlockCheckParams { hash, number, parent_hash, allow_missing_state, import_existing } = block; + let BlockCheckParams { + hash, + number, + parent_hash, + allow_missing_state, + import_existing, + allow_missing_parent, + } = block; // Check the block against white and black lists if any are defined // (i.e. fork blocks and bad blocks respectively) match self.block_rules.lookup(number, &hash) { BlockLookupResult::KnownBad => { - trace!( - "Rejecting known bad block: #{} {:?}", - number, - hash, - ); - return Ok(ImportResult::KnownBad); + trace!("Rejecting known bad block: #{} {:?}", number, hash); + return Ok(ImportResult::KnownBad) }, BlockLookupResult::Expected(expected_hash) => { trace!( @@ -1736,66 +1932,76 @@ impl sp_consensus::BlockImport for &Client {} + BlockLookupResult::NotSpecial => {}, } // Own status must be checked first. If the block and ancestry is pruned // this function must return `AlreadyInChain` rather than `MissingState` - match self.block_status(&BlockId::Hash(hash)) + match self + .block_status(&BlockId::Hash(hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { - BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => + return Ok(ImportResult::AlreadyInChain), BlockStatus::InChainWithState | BlockStatus::Queued => {}, - BlockStatus::InChainPruned => return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainPruned if !import_existing => + return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainPruned => {}, BlockStatus::Unknown => {}, BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), } - match self.block_status(&BlockId::Hash(parent_hash)) + match self + .block_status(&BlockId::Hash(parent_hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - { - BlockStatus::InChainWithState | BlockStatus::Queued => {}, - BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), - BlockStatus::InChainPruned if allow_missing_state => {}, - BlockStatus::InChainPruned => return Ok(ImportResult::MissingState), - BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), - } - + { + BlockStatus::InChainWithState | BlockStatus::Queued => {}, + BlockStatus::Unknown if allow_missing_parent => {}, + BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), + BlockStatus::InChainPruned if allow_missing_state => {}, + BlockStatus::InChainPruned => return Ok(ImportResult::MissingState), + BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), + } Ok(ImportResult::imported(false)) } } -impl sp_consensus::BlockImport for Client where +#[async_trait::async_trait] +impl sc_consensus::BlockImport for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, + >::Api: + CoreApi + ApiExt, + RA: Sync + Send, + backend::TransactionFor: Send + 'static, { type Error = ConsensusError; type Transaction = backend::TransactionFor; - fn import_block( + async fn import_block( &mut self, import_block: BlockImportParams, new_cache: HashMap>, ) -> Result { - (&*self).import_block(import_block, new_cache) + (&*self).import_block(import_block, new_cache).await } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - (&*self).check_block(block) + (&*self).check_block(block).await } } -impl Finalizer for Client where +impl Finalizer for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -1830,8 +2036,8 @@ impl Finalizer for Client where } } - -impl Finalizer for &Client where +impl Finalizer for &Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -1885,10 +2091,10 @@ where } impl BlockBackend for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { fn block_body( &self, @@ -1897,70 +2103,67 @@ impl BlockBackend for Client self.body(id) } - fn block(&self, id: &BlockId) -> sp_blockchain::Result>> - { - Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { - (Some(header), Some(extrinsics), justification) => - Some(SignedBlock { block: Block::new(header, extrinsics), justification }), + fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { + Ok(match (self.header(id)?, self.body(id)?, self.justifications(id)?) { + (Some(header), Some(extrinsics), justifications) => + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, }) } fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { - // this can probably be implemented more efficiently - if let BlockId::Hash(ref h) = id { - if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); - } - } - let hash_and_number = match id.clone() { - BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), - BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), - }; - match hash_and_number { - Some((hash, number)) => { - if self.backend.have_state_at(&hash, number) { - Ok(BlockStatus::InChainWithState) - } else { - Ok(BlockStatus::InChainPruned) - } - } - None => Ok(BlockStatus::Unknown), - } + Client::block_status(self, id) } - fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { - self.backend.blockchain().justification(*id) + fn justifications(&self, id: &BlockId) -> sp_blockchain::Result> { + self.backend.blockchain().justifications(*id) } fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result> { self.backend.blockchain().hash(number) } + + fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>> { + self.backend.blockchain().indexed_transaction(hash) + } + + fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { + self.backend.blockchain().has_indexed_transaction(hash) + } + + fn block_indexed_body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result>>> { + self.backend.blockchain().block_indexed_body(*id) + } } impl backend::AuxStore for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - Self: ProvideRuntimeApi, - >::Api: CoreApi, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + Self: ProvideRuntimeApi, + >::Api: CoreApi, { /// Insert auxiliary data into key-value store. fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { // Import is locked here because we may have other block import // operations that tries to set aux data. Note that for consensus // layer, one can always use atomic operations to make sure // import is only locked once. - self.lock_import_and_run(|operation| { - apply_aux(operation, insert, delete) - }) + self.lock_import_and_run(|operation| apply_aux(operation, insert, delete)) } /// Query auxiliary data from key-value store. fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { @@ -1969,20 +2172,24 @@ impl backend::AuxStore for Client } impl backend::AuxStore for &Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: CoreApi, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: CoreApi, { fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { (**self).insert_aux(insert, delete) } @@ -1992,9 +2199,10 @@ impl backend::AuxStore for &Client } impl sp_consensus::block_validation::Chain for Client - where BE: backend::Backend, - E: CallExecutor, - B: BlockT +where + BE: backend::Backend, + E: CallExecutor, + B: BlockT, { fn block_status( &self, @@ -2003,3 +2211,30 @@ impl sp_consensus::block_validation::Chain for Client) } } + +impl sp_transaction_storage_proof::IndexedBody for Client +where + BE: backend::Backend, + E: CallExecutor, + B: BlockT, +{ + fn block_indexed_body( + &self, + number: NumberFor, + ) -> Result>>, sp_transaction_storage_proof::Error> { + self.backend + .blockchain() + .block_indexed_body(BlockId::number(number)) + .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) + } + + fn number( + &self, + hash: B::Hash, + ) -> Result>, sp_transaction_storage_proof::Error> { + self.backend + .blockchain() + .number(hash) + .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) + } +} diff --git a/client/service/src/client/genesis.rs b/client/service/src/client/genesis.rs index 4df08025e3826..e764e8e24f105 100644 --- a/client/service/src/client/genesis.rs +++ b/client/service/src/client/genesis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,17 +18,12 @@ //! Tool for creating the genesis block. -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, Zero}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, Zero}; /// Create a genesis block, given the initial storage. -pub fn construct_genesis_block< - Block: BlockT -> ( - state_root: Block::Hash -) -> Block { - let extrinsics_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - Vec::new(), - ); +pub fn construct_genesis_block(state_root: Block::Hash) -> Block { + let extrinsics_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root(Vec::new()); Block::new( <::Header as HeaderT>::new( @@ -36,8 +31,8 @@ pub fn construct_genesis_block< extrinsics_root, state_root, Default::default(), - Default::default() + Default::default(), ), - Default::default() + Default::default(), ) } diff --git a/client/service/src/client/light.rs b/client/service/src/client/light.rs index e8e1286eccdb0..7c13b98843e05 100644 --- a/client/service/src/client/light.rs +++ b/client/service/src/client/light.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,18 +20,23 @@ use std::sync::Arc; -use sc_executor::RuntimeInfo; -use sp_core::traits::{CodeExecutor, SpawnNamed}; -use sp_runtime::BuildStorage; -use sp_runtime::traits::{Block as BlockT, HashFor}; -use sp_blockchain::Result as ClientResult; use prometheus_endpoint::Registry; +use sc_executor::RuntimeVersionOf; +use sc_telemetry::TelemetryHandle; +use sp_blockchain::Result as ClientResult; +use sp_core::traits::{CodeExecutor, SpawnNamed}; +use sp_runtime::{ + traits::{Block as BlockT, HashFor}, + BuildStorage, +}; -use super::{call_executor::LocalCallExecutor, client::{Client, ClientConfig}}; +use super::{ + call_executor::LocalCallExecutor, + client::{Client, ClientConfig}, +}; use sc_client_api::light::Storage as BlockchainStorage; use sc_light::{Backend, GenesisCallExecutor}; - /// Create an instance of light client. pub fn new_light( backend: Arc>>, @@ -39,28 +44,29 @@ pub fn new_light( code_executor: E, spawn_handle: Box, prometheus_registry: Option, + telemetry: Option, ) -> ClientResult< - Client< + Client< + Backend>, + GenesisCallExecutor< Backend>, - GenesisCallExecutor< - Backend>, - LocalCallExecutor>, E> - >, - B, - RA - > - > - where - B: BlockT, - S: BlockchainStorage + 'static, - E: CodeExecutor + RuntimeInfo + Clone + 'static, + LocalCallExecutor>, E>, + >, + B, + RA, + >, +> +where + B: BlockT, + S: BlockchainStorage + 'static, + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, { let local_executor = LocalCallExecutor::new( backend.clone(), code_executor, spawn_handle.clone(), - ClientConfig::default() - ); + ClientConfig::default(), + )?; let executor = GenesisCallExecutor::new(backend.clone(), local_executor); Client::new( backend, @@ -70,6 +76,7 @@ pub fn new_light( Default::default(), Default::default(), prometheus_registry, + telemetry, ClientConfig::default(), ) } diff --git a/client/service/src/client/mod.rs b/client/service/src/client/mod.rs index 7c96f61a7867a..754309e864ebd 100644 --- a/client/service/src/client/mod.rs +++ b/client/service/src/client/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -23,37 +23,40 @@ //! //! - A database containing the blocks and chain state, generally referred to as //! the [`Backend`](sc_client_api::backend::Backend). -//! - A runtime environment, generally referred to as the [`Executor`](CallExecutor). +//! - A runtime environment, generally referred to as the +//! [`Executor`](sc_client_api::call_executor::CallExecutor). //! //! # Initialization //! //! Creating a [`Client`] is done by calling the `new` method and passing to it a -//! [`Backend`](sc_client_api::backend::Backend) and an [`Executor`](CallExecutor). +//! [`Backend`](sc_client_api::backend::Backend) and an +//! [`Executor`](sc_client_api::call_executor::CallExecutor). //! //! The former is typically provided by the `sc-client-db` crate. //! //! The latter typically requires passing one of: //! //! - A [`LocalCallExecutor`] running the runtime locally. -//! - A [`RemoteCallExecutor`](light::call_executor::RemoteCallRequest) that will ask a +//! - A [`RemoteCallExecutor`](sc_client_api::light::RemoteCallRequest) that will ask a //! third-party to perform the executions. -//! - A [`RemoteOrLocalCallExecutor`](light::call_executor::RemoteOrLocalCallExecutor), combination -//! of the two. +//! - A [`RemoteOrLocalCallExecutor`](sc_client_api::light::LocalOrRemote), combination of the two. //! //! Additionally, the fourth generic parameter of the `Client` is a marker type representing //! the ways in which the runtime can interface with the outside. Any code that builds a `Client` //! is responsible for putting the right marker. -pub mod genesis; -pub mod light; +mod block_rules; mod call_executor; mod client; -mod block_rules; +pub mod genesis; +pub mod light; +mod wasm_override; +mod wasm_substitutes; pub use self::{ call_executor::LocalCallExecutor, client::{Client, ClientConfig}, }; -#[cfg(feature="test-helpers")] -pub use self::client::{new_with_backend, new_in_mem}; +#[cfg(feature = "test-helpers")] +pub use self::client::{new_in_mem, new_with_backend}; diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs new file mode 100644 index 0000000000000..6d5a071269d4d --- /dev/null +++ b/client/service/src/client/wasm_override.rs @@ -0,0 +1,284 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! # WASM Local Blob-Override +//! +//! WASM Local blob override provides tools to replace on-chain WASM with custom WASM. +//! These customized WASM blobs may include functionality that is not included in the +//! on-chain WASM, such as tracing or debugging information. This extra information is especially +//! useful in external scenarios, like exchanges or archive nodes. +//! +//! ## Usage +//! +//! WASM overrides may be enabled with the `--wasm-runtime-overrides` argument. The argument +//! expects a path to a directory that holds custom WASM. +//! +//! Any file ending in '.wasm' will be scraped and instantiated as a WASM blob. WASM can be built by +//! compiling the required runtime with the changes needed. For example, compiling a runtime with +//! tracing enabled would produce a WASM blob that can used. +//! +//! A custom WASM blob will override on-chain WASM if the spec version matches. If it is +//! required to overrides multiple runtimes, multiple WASM blobs matching each of the spec versions +//! needed must be provided in the given directory. +use sc_executor::RuntimeVersionOf; +use sp_blockchain::Result; +use sp_core::traits::{FetchRuntimeCode, RuntimeCode}; +use sp_state_machine::BasicExternalities; +use sp_version::RuntimeVersion; +use std::{ + collections::{hash_map::DefaultHasher, HashMap}, + fs, + hash::Hasher as _, + path::{Path, PathBuf}, +}; + +#[derive(Clone, Debug, PartialEq)] +/// Auxiliary structure that holds a wasm blob and its hash. +struct WasmBlob { + code: Vec, + hash: Vec, +} + +impl WasmBlob { + fn new(code: Vec) -> Self { + let hash = make_hash(&code); + Self { code, hash } + } + + fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { + RuntimeCode { code_fetcher: self, hash: self.hash.clone(), heap_pages } + } +} + +/// Make a hash out of a byte string using the default rust hasher +fn make_hash(val: &K) -> Vec { + let mut state = DefaultHasher::new(); + val.hash(&mut state); + state.finish().to_le_bytes().to_vec() +} + +impl FetchRuntimeCode for WasmBlob { + fn fetch_runtime_code<'a>(&'a self) -> Option> { + Some(self.code.as_slice().into()) + } +} + +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum WasmOverrideError { + #[error("Failed to get runtime version: {0}")] + VersionInvalid(String), + + #[error("WASM override IO error")] + Io(PathBuf, #[source] std::io::Error), + + #[error("Overwriting WASM requires a directory where local \ + WASM is stored. {} is not a directory", .0.display())] + NotADirectory(PathBuf), + + #[error("Duplicate WASM Runtimes found: \n{}\n", .0.join("\n") )] + DuplicateRuntime(Vec), +} + +impl From for sp_blockchain::Error { + fn from(err: WasmOverrideError) -> Self { + Self::Application(Box::new(err)) + } +} + +/// Scrapes WASM from a folder and returns WASM from that folder +/// if the runtime spec version matches. +#[derive(Clone, Debug)] +pub struct WasmOverride { + // Map of runtime spec version -> Wasm Blob + overrides: HashMap, + executor: E, +} + +impl WasmOverride +where + E: RuntimeVersionOf + Clone + 'static, +{ + pub fn new

(path: P, executor: E) -> Result + where + P: AsRef, + { + let overrides = Self::scrape_overrides(path.as_ref(), &executor)?; + Ok(Self { overrides, executor }) + } + + /// Gets an override by it's runtime spec version. + /// + /// Returns `None` if an override for a spec version does not exist. + pub fn get<'a, 'b: 'a>(&'b self, spec: &u32, pages: Option) -> Option> { + self.overrides.get(spec).map(|w| w.runtime_code(pages)) + } + + /// Scrapes a folder for WASM runtimes. + /// Returns a hashmap of the runtime version and wasm runtime code. + fn scrape_overrides(dir: &Path, executor: &E) -> Result> { + let handle_err = |e: std::io::Error| -> sp_blockchain::Error { + WasmOverrideError::Io(dir.to_owned(), e).into() + }; + + if !dir.is_dir() { + return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()) + } + + let mut overrides = HashMap::new(); + let mut duplicates = Vec::new(); + for entry in fs::read_dir(dir).map_err(handle_err)? { + let entry = entry.map_err(handle_err)?; + let path = entry.path(); + match path.extension().map(|e| e.to_str()).flatten() { + Some("wasm") => { + let wasm = WasmBlob::new(fs::read(&path).map_err(handle_err)?); + let version = Self::runtime_version(executor, &wasm, Some(128))?; + log::info!( + target: "wasm_overrides", + "Found wasm override in file: `{:?}`, version: {}", + path.to_str(), + version, + ); + if let Some(_duplicate) = overrides.insert(version.spec_version, wasm) { + log::info!( + target: "wasm_overrides", + "Found duplicate spec version for runtime in file: `{:?}`, version: {}", + path.to_str(), + version, + ); + duplicates.push(format!("{}", path.display())); + } + }, + _ => (), + } + } + + if !duplicates.is_empty() { + return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()) + } + + Ok(overrides) + } + + fn runtime_version( + executor: &E, + code: &WasmBlob, + heap_pages: Option, + ) -> Result { + let mut ext = BasicExternalities::default(); + executor + .runtime_version(&mut ext, &code.runtime_code(heap_pages)) + .map_err(|e| WasmOverrideError::VersionInvalid(format!("{:?}", e)).into()) + } +} + +/// Returns a WasmOverride struct filled with dummy data for testing. +#[cfg(test)] +pub fn dummy_overrides(executor: &E) -> WasmOverride +where + E: RuntimeVersionOf + Clone + 'static, +{ + let mut overrides = HashMap::new(); + overrides.insert(0, WasmBlob::new(vec![0, 0, 0, 0, 0, 0, 0, 0])); + overrides.insert(1, WasmBlob::new(vec![1, 1, 1, 1, 1, 1, 1, 1])); + overrides.insert(2, WasmBlob::new(vec![2, 2, 2, 2, 2, 2, 2, 2])); + WasmOverride { overrides, executor: executor.clone() } +} + +#[cfg(test)] +mod tests { + use super::*; + use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod}; + use std::fs::{self, File}; + use substrate_test_runtime_client::LocalExecutorDispatch; + + fn wasm_test(fun: F) + where + F: Fn(&Path, &[u8], &NativeElseWasmExecutor), + { + let exec = + NativeElseWasmExecutor::::new( + WasmExecutionMethod::Interpreted, + Some(128), + 1, + ); + let bytes = substrate_test_runtime::wasm_binary_unwrap(); + let dir = tempfile::tempdir().expect("Create a temporary directory"); + fun(dir.path(), bytes, &exec); + dir.close().expect("Temporary Directory should close"); + } + + #[test] + fn should_get_runtime_version() { + let wasm = WasmBlob::new(substrate_test_runtime::wasm_binary_unwrap().to_vec()); + let executor = NativeElseWasmExecutor::::new( + WasmExecutionMethod::Interpreted, + Some(128), + 1, + ); + + let version = WasmOverride::runtime_version(&executor, &wasm, Some(128)) + .expect("should get the `RuntimeVersion` of the test-runtime wasm blob"); + assert_eq!(version.spec_version, 2); + } + + #[test] + fn should_scrape_wasm() { + wasm_test(|dir, wasm_bytes, exec| { + fs::write(dir.join("test.wasm"), wasm_bytes).expect("Create test file"); + let overrides = + WasmOverride::scrape_overrides(dir, exec).expect("HashMap of u32 and WasmBlob"); + let wasm = overrides.get(&2).expect("WASM binary"); + assert_eq!(wasm.code, substrate_test_runtime::wasm_binary_unwrap().to_vec()) + }); + } + + #[test] + fn should_check_for_duplicates() { + wasm_test(|dir, wasm_bytes, exec| { + fs::write(dir.join("test0.wasm"), wasm_bytes).expect("Create test file"); + fs::write(dir.join("test1.wasm"), wasm_bytes).expect("Create test file"); + let scraped = WasmOverride::scrape_overrides(dir, exec); + + match scraped { + Err(sp_blockchain::Error::Application(e)) => { + match e.downcast_ref::() { + Some(WasmOverrideError::DuplicateRuntime(duplicates)) => { + assert_eq!(duplicates.len(), 1); + }, + _ => panic!("Test should end with Msg Error Variant"), + } + }, + _ => panic!("Test should end in error"), + } + }); + } + + #[test] + fn should_ignore_non_wasm() { + wasm_test(|dir, wasm_bytes, exec| { + File::create(dir.join("README.md")).expect("Create test file"); + File::create(dir.join("LICENSE")).expect("Create a test file"); + fs::write(dir.join("test0.wasm"), wasm_bytes).expect("Create test file"); + let scraped = + WasmOverride::scrape_overrides(dir, exec).expect("HashMap of u32 and WasmBlob"); + assert_eq!(scraped.len(), 1); + }); + } +} diff --git a/client/service/src/client/wasm_substitutes.rs b/client/service/src/client/wasm_substitutes.rs new file mode 100644 index 0000000000000..28975790e9b57 --- /dev/null +++ b/client/service/src/client/wasm_substitutes.rs @@ -0,0 +1,186 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! # WASM substitutes + +use parking_lot::RwLock; +use sc_client_api::backend; +use sc_executor::RuntimeVersionOf; +use sp_blockchain::{HeaderBackend, Result}; +use sp_core::traits::{FetchRuntimeCode, RuntimeCode}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor}, +}; +use sp_state_machine::BasicExternalities; +use sp_version::RuntimeVersion; +use std::{ + collections::{hash_map::DefaultHasher, HashMap}, + hash::Hasher as _, + sync::Arc, +}; + +/// A wasm substitute for the on chain wasm. +#[derive(Debug)] +struct WasmSubstitute { + code: Vec, + hash: Vec, + /// The hash of the block from that on we should use the substitute. + block_hash: Block::Hash, + /// The block number of `block_hash`. If `None`, the block is still unknown. + block_number: RwLock>>, +} + +impl WasmSubstitute { + fn new( + code: Vec, + block_hash: Block::Hash, + backend: &impl backend::Backend, + ) -> Result { + let block_number = RwLock::new(backend.blockchain().number(block_hash)?); + let hash = make_hash(&code); + Ok(Self { code, hash, block_hash, block_number }) + } + + fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { + RuntimeCode { code_fetcher: self, hash: self.hash.clone(), heap_pages } + } + + /// Returns `true` when the substitute matches for the given `block_id`. + fn matches(&self, block_id: &BlockId, backend: &impl backend::Backend) -> bool { + let block_number = *self.block_number.read(); + let block_number = if let Some(block_number) = block_number { + block_number + } else { + let block_number = match backend.blockchain().number(self.block_hash) { + Ok(Some(n)) => n, + // still unknown + Ok(None) => return false, + Err(e) => { + log::debug!( + target: "wasm_substitutes", + "Failed to get block number for block hash {:?}: {:?}", + self.block_hash, + e, + ); + return false + }, + }; + *self.block_number.write() = Some(block_number); + block_number + }; + + let requested_block_number = + backend.blockchain().block_number_from_id(&block_id).ok().flatten(); + + Some(block_number) <= requested_block_number + } +} + +/// Make a hash out of a byte string using the default rust hasher +fn make_hash(val: &K) -> Vec { + let mut state = DefaultHasher::new(); + val.hash(&mut state); + state.finish().to_le_bytes().to_vec() +} + +impl FetchRuntimeCode for WasmSubstitute { + fn fetch_runtime_code<'a>(&'a self) -> Option> { + Some(self.code.as_slice().into()) + } +} + +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum WasmSubstituteError { + #[error("Failed to get runtime version: {0}")] + VersionInvalid(String), +} + +impl From for sp_blockchain::Error { + fn from(err: WasmSubstituteError) -> Self { + Self::Application(Box::new(err)) + } +} + +/// Substitutes the on-chain wasm with some hard coded blobs. +#[derive(Debug)] +pub struct WasmSubstitutes { + /// spec_version -> WasmSubstitute + substitutes: Arc>>, + executor: Executor, + backend: Arc, +} + +impl Clone for WasmSubstitutes { + fn clone(&self) -> Self { + Self { + substitutes: self.substitutes.clone(), + executor: self.executor.clone(), + backend: self.backend.clone(), + } + } +} + +impl WasmSubstitutes +where + Executor: RuntimeVersionOf + Clone + 'static, + Backend: backend::Backend, + Block: BlockT, +{ + /// Create a new instance. + pub fn new( + substitutes: HashMap>, + executor: Executor, + backend: Arc, + ) -> Result { + let substitutes = substitutes + .into_iter() + .map(|(parent_block_hash, code)| { + let substitute = WasmSubstitute::new(code, parent_block_hash, &*backend)?; + let version = Self::runtime_version(&executor, &substitute)?; + Ok((version.spec_version, substitute)) + }) + .collect::>>()?; + + Ok(Self { executor, substitutes: Arc::new(substitutes), backend }) + } + + /// Get a substitute. + /// + /// Returns `None` if there isn't any substitute required. + pub fn get( + &self, + spec: u32, + pages: Option, + block_id: &BlockId, + ) -> Option> { + let s = self.substitutes.get(&spec)?; + s.matches(block_id, &*self.backend).then(|| s.runtime_code(pages)) + } + + fn runtime_version( + executor: &Executor, + code: &WasmSubstitute, + ) -> Result { + let mut ext = BasicExternalities::default(); + executor + .runtime_version(&mut ext, &code.runtime_code(None)) + .map_err(|e| WasmSubstituteError::VersionInvalid(format!("{:?}", e)).into()) + } +} diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 15783a87f9917..a98a34b473cee 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,19 +18,28 @@ //! Service configuration. -pub use sc_client_db::{Database, PruningMode, DatabaseSettingsSrc as DatabaseConfig}; -pub use sc_network::Multiaddr; -pub use sc_network::config::{ExtTransport, MultiaddrWithPeerId, NetworkConfiguration, Role, NodeKeyConfig}; +pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy}; +pub use sc_client_db::{Database, DatabaseSource, KeepBlocks, PruningMode, TransactionStorageMode}; pub use sc_executor::WasmExecutionMethod; -use sc_client_api::execution_extensions::ExecutionStrategies; +pub use sc_network::{ + config::{ + IncomingRequest, MultiaddrWithPeerId, NetworkConfiguration, NodeKeyConfig, + NonDefaultSetConfig, OutgoingResponse, RequestResponseConfig, Role, SetConfig, + TransportConfig, + }, + Multiaddr, +}; -use std::{io, future::Future, path::{PathBuf, Path}, pin::Pin, net::SocketAddr, sync::Arc}; -pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; +use prometheus_endpoint::Registry; use sc_chain_spec::ChainSpec; -use sp_core::crypto::SecretString; pub use sc_telemetry::TelemetryEndpoints; -use prometheus_endpoint::Registry; -#[cfg(not(target_os = "unknown"))] +pub use sc_transaction_pool::Options as TransactionPoolOptions; +use sp_core::crypto::SecretString; +use std::{ + io, + net::SocketAddr, + path::{Path, PathBuf}, +}; use tempfile::TempDir; /// Service configuration. @@ -42,26 +51,36 @@ pub struct Configuration { pub impl_version: String, /// Node role. pub role: Role, - /// How to spawn background tasks. Mandatory, otherwise creating a `Service` will error. - pub task_executor: TaskExecutor, + /// Handle to the tokio runtime. Will be used to spawn futures by the task manager. + pub tokio_handle: tokio::runtime::Handle, /// Extrinsic pool configuration. pub transaction_pool: TransactionPoolOptions, /// Network configuration. pub network: NetworkConfiguration, /// Configuration for the keystore. pub keystore: KeystoreConfig, + /// Remote URI to connect to for async keystore support + pub keystore_remote: Option, /// Configuration for the database. - pub database: DatabaseConfig, + pub database: DatabaseSource, /// Size of internal state cache in Bytes pub state_cache_size: usize, /// Size in percent of cache size dedicated to child tries pub state_cache_child_ratio: Option, - /// Pruning settings. - pub pruning: PruningMode, + /// State pruning settings. + pub state_pruning: PruningMode, + /// Number of blocks to keep in the db. + pub keep_blocks: KeepBlocks, + /// Transaction storage scheme. + pub transaction_storage: TransactionStorageMode, /// Chain configuration. pub chain_spec: Box, /// Wasm execution method. pub wasm_method: WasmExecutionMethod, + /// Directory where local WASM runtimes live. These runtimes take precedence + /// over on-chain runtimes when the spec version matches. Set to `None` to + /// disable overrides (default). + pub wasm_runtime_overrides: Option, /// Execution strategies. pub execution_strategies: ExecutionStrategies, /// RPC over HTTP binding address. `None` if disabled. @@ -76,13 +95,12 @@ pub struct Configuration { pub rpc_cors: Option>, /// RPC methods to expose (by default only a safe subset or all of them). pub rpc_methods: RpcMethods, + /// Maximum payload of rpc request/responses. + pub rpc_max_payload: Option, /// Prometheus endpoint configuration. `None` if disabled. pub prometheus_config: Option, /// Telemetry service URL. `None` if disabled. pub telemetry_endpoints: Option, - /// External WASM transport for the telemetry. If `Some`, when connection to a telemetry - /// endpoint, this transport will be tried in priority before all others. - pub telemetry_external_transport: Option, /// The default number of 64KB pages to allocate for Wasm execution pub default_heap_pages: Option, /// Should offchain workers be executed. @@ -93,12 +111,15 @@ pub struct Configuration { pub disable_grandpa: bool, /// Development key seed. /// - /// When running in development mode, the seed will be used to generate authority keys by the keystore. + /// When running in development mode, the seed will be used to generate authority keys by the + /// keystore. /// /// Should only be set when `node` is running development mode. pub dev_key_seed: Option, /// Tracing targets pub tracing_targets: Option, + /// Is log filter reloading disabled + pub disable_log_reloading: bool, /// Tracing receiver pub tracing_receiver: sc_tracing::TracingReceiver, /// The size of the instances cache. @@ -130,7 +151,7 @@ pub enum KeystoreConfig { /// The path of the keystore. path: PathBuf, /// Node keystore's password. - password: Option + password: Option, }, /// In-memory keystore. Recommended for in-browser nodes. InMemory, @@ -171,7 +192,7 @@ impl PrometheusConfig { Self { port, registry: Registry::new_custom(Some("substrate".into()), None) - .expect("this can only fail if the prefix is empty") + .expect("this can only fail if the prefix is empty"), } } } @@ -183,9 +204,25 @@ impl Configuration { } /// Returns the prometheus metrics registry, if available. - pub fn prometheus_registry<'a>(&'a self) -> Option<&'a Registry> { + pub fn prometheus_registry(&self) -> Option<&Registry> { self.prometheus_config.as_ref().map(|config| &config.registry) } + + /// Returns the network protocol id from the chain spec, or the default. + pub fn protocol_id(&self) -> sc_network::config::ProtocolId { + let protocol_id_full = match self.chain_spec.protocol_id() { + Some(pid) => pid, + None => { + log::warn!( + "Using default protocol ID {:?} because none is configured in the \ + chain specs", + crate::DEFAULT_PROTOCOL_ID + ); + crate::DEFAULT_PROTOCOL_ID + }, + }; + sc_network::config::ProtocolId::from(protocol_id_full) + } } /// Available RPC methods. @@ -210,7 +247,6 @@ impl Default for RpcMethods { #[derive(Debug)] pub enum BasePath { /// A temporary directory is used as base path and will be deleted when dropped. - #[cfg(not(target_os = "unknown"))] Temporary(TempDir), /// A path on the disk. Permanenent(PathBuf), @@ -222,11 +258,8 @@ impl BasePath { /// /// Note: the temporary directory will be created automatically and deleted when the `BasePath` /// instance is dropped. - #[cfg(not(target_os = "unknown"))] pub fn new_temp_dir() -> io::Result { - Ok(BasePath::Temporary( - tempfile::Builder::new().prefix("substrate").tempdir()?, - )) + Ok(BasePath::Temporary(tempfile::Builder::new().prefix("substrate").tempdir()?)) } /// Create a `BasePath` instance based on an existing path on disk. @@ -238,7 +271,6 @@ impl BasePath { } /// Create a base path from values describing the project. - #[cfg(not(target_os = "unknown"))] pub fn from_project(qualifier: &str, organization: &str, application: &str) -> BasePath { BasePath::new( directories::ProjectDirs::from(qualifier, organization, application) @@ -250,11 +282,17 @@ impl BasePath { /// Retrieve the base path. pub fn path(&self) -> &Path { match self { - #[cfg(not(target_os = "unknown"))] BasePath::Temporary(temp_dir) => temp_dir.path(), BasePath::Permanenent(path) => path.as_path(), } } + + /// Returns the configuration directory inside this base path. + /// + /// The path looks like `$base_path/chains/$chain_id` + pub fn config_dir(&self, chain_id: &str) -> PathBuf { + self.path().join("chains").join(chain_id) + } } impl std::convert::From for BasePath { @@ -262,62 +300,3 @@ impl std::convert::From for BasePath { BasePath::new(path) } } - -// NOTE: here for code readability. -pub(crate) type SomeFuture = Pin + Send>>; -pub(crate) type JoinFuture = Pin + Send>>; - -/// Callable object that execute tasks. -/// -/// This struct can be created easily using `Into`. -/// -/// # Examples -/// -/// ## Using tokio -/// -/// ``` -/// # use sc_service::TaskExecutor; -/// use futures::future::FutureExt; -/// use tokio::runtime::Runtime; -/// -/// let runtime = Runtime::new().unwrap(); -/// let handle = runtime.handle().clone(); -/// let task_executor: TaskExecutor = (move |future, _task_type| { -/// handle.spawn(future).map(|_| ()) -/// }).into(); -/// ``` -/// -/// ## Using async-std -/// -/// ``` -/// # use sc_service::TaskExecutor; -/// let task_executor: TaskExecutor = (|future, _task_type| { -/// // NOTE: async-std's JoinHandle is not a Result so we don't need to map the result -/// async_std::task::spawn(future) -/// }).into(); -/// ``` -#[derive(Clone)] -pub struct TaskExecutor(Arc JoinFuture + Send + Sync>); - -impl std::fmt::Debug for TaskExecutor { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "TaskExecutor") - } -} - -impl std::convert::From for TaskExecutor -where - F: Fn(SomeFuture, TaskType) -> FUT + Send + Sync + 'static, - FUT: Future + Send + 'static, -{ - fn from(func: F) -> Self { - Self(Arc::new(move |fut, tt| Box::pin(func(fut, tt)))) - } -} - -impl TaskExecutor { - /// Spawns a new asynchronous task. - pub fn spawn(&self, future: SomeFuture, task_type: TaskType) -> JoinFuture { - self.0(future, task_type) - } -} diff --git a/client/service/src/error.rs b/client/service/src/error.rs index ffe1b39405501..1acd33ead6777 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,34 +18,50 @@ //! Errors that can occur during the service operation. -use sc_network; use sc_keystore; -use sp_consensus; +use sc_network; use sp_blockchain; +use sp_consensus; /// Service Result typedef. pub type Result = std::result::Result; /// Service errors. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +#[non_exhaustive] pub enum Error { - /// Client error. - Client(sp_blockchain::Error), - /// IO error. - Io(std::io::Error), - /// Consensus error. - Consensus(sp_consensus::Error), - /// Network error. - Network(sc_network::error::Error), - /// Keystore error. - Keystore(sc_keystore::Error), - /// Best chain selection strategy is missing. - #[display(fmt="Best chain selection strategy (SelectChain) is not provided.")] + #[error(transparent)] + Client(#[from] sp_blockchain::Error), + + #[error(transparent)] + Io(#[from] std::io::Error), + + #[error(transparent)] + Consensus(#[from] sp_consensus::Error), + + #[error(transparent)] + Network(#[from] sc_network::error::Error), + + #[error(transparent)] + Keystore(#[from] sc_keystore::Error), + + #[error(transparent)] + Telemetry(#[from] sc_telemetry::Error), + + #[error("Best chain selection strategy (SelectChain) is not provided.")] SelectChainRequired, - /// Tasks executor is missing. - #[display(fmt="Tasks executor hasn't been provided.")] + + #[error("Tasks executor hasn't been provided.")] TaskExecutorRequired, - /// Other error. + + #[error("Prometheus metrics error")] + Prometheus(#[from] prometheus_endpoint::PrometheusError), + + #[error("Application")] + Application(#[from] Box), + + #[error("Other: {0}")] Other(String), } @@ -55,21 +71,8 @@ impl<'a> From<&'a str> for Error { } } -impl From for Error { - fn from(e: prometheus_endpoint::PrometheusError) -> Self { - Error::Other(format!("Prometheus error: {}", e)) - } -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(err), - Error::Io(ref err) => Some(err), - Error::Consensus(ref err) => Some(err), - Error::Network(ref err) => Some(err), - Error::Keystore(ref err) => Some(err), - _ => None, - } +impl<'a> From for Error { + fn from(s: String) -> Self { + Error::Other(s) } } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index cb741c2920b06..7284747424aa9 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,81 +22,69 @@ #![warn(missing_docs)] #![recursion_limit = "1024"] -pub mod config; pub mod chain_ops; +pub mod config; pub mod error; -mod metrics; mod builder; #[cfg(feature = "test-helpers")] pub mod client; #[cfg(not(feature = "test-helpers"))] mod client; +mod metrics; mod task_manager; -use std::{io, pin::Pin}; -use std::net::SocketAddr; -use std::collections::HashMap; -use std::time::Duration; -use std::task::Poll; -use parking_lot::Mutex; - -use futures::{Future, FutureExt, Stream, StreamExt, stream, compat::*}; -use sc_network::{NetworkStatus, network_state::NetworkState, PeerId}; -use log::{warn, debug, error}; -use codec::{Encode, Decode}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use parity_util_mem::MallocSizeOf; -use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}}; - -pub use self::error::Error; -pub use self::builder::{ - new_full_client, new_client, new_full_parts, new_light_parts, - spawn_tasks, build_network, build_offchain_workers, - BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullClient, TLightClient, - TFullBackend, TLightBackend, TLightBackendWithHash, TLightClientWithBackend, - TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, NoopRpcExtensionBuilder, +use std::{collections::HashMap, io, net::SocketAddr, pin::Pin, task::Poll}; + +use codec::{Decode, Encode}; +use futures::{stream, Future, FutureExt, Stream, StreamExt}; +use log::{debug, error, warn}; +use sc_network::PeerId; +use sc_utils::mpsc::TracingUnboundedReceiver; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, +}; + +pub use self::{ + builder::{ + build_network, build_offchain_workers, new_client, new_db_backend, new_full_client, + new_full_parts, new_light_parts, spawn_tasks, BuildNetworkParams, KeystoreContainer, + NetworkStarter, NoopRpcExtensionBuilder, RpcExtensionBuilder, SpawnTasksParams, + TFullBackend, TFullCallExecutor, TFullClient, TLightBackend, TLightBackendWithHash, + TLightCallExecutor, TLightClient, TLightClientWithBackend, + }, + client::{ClientConfig, LocalCallExecutor}, + error::Error, }; pub use config::{ - BasePath, Configuration, DatabaseConfig, PruningMode, Role, RpcMethods, TaskExecutor, TaskType, + BasePath, Configuration, DatabaseSource, KeepBlocks, PruningMode, Role, RpcMethods, TaskType, + TransactionStorageMode, }; pub use sc_chain_spec::{ - ChainSpec, GenericChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension, - NoExtension, ChainType, + ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, + Properties, RuntimeGenesis, }; -pub use sp_transaction_pool::{TransactionPool, InPoolTransaction, error::IntoPoolError}; -pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; -pub use sc_rpc::Metadata as RpcMetadata; +use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; +pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] -pub use std::{ops::Deref, result::Result, sync::Arc}; -#[doc(hidden)] -pub use sc_network::config::{ - FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder, TransactionImport, - TransactionImportFuture, -}; +pub use sc_network::config::{OnDemand, TransactionImport, TransactionImportFuture}; +pub use sc_rpc::Metadata as RpcMetadata; pub use sc_tracing::TracingReceiver; -pub use task_manager::SpawnTaskHandle; -pub use task_manager::TaskManager; -pub use sp_consensus::import_queue::ImportQueue; -use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; +pub use sc_transaction_pool::Options as TransactionPoolOptions; +pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool}; +#[doc(hidden)] +pub use std::{ops::Deref, result::Result, sync::Arc}; +pub use task_manager::{SpawnTaskHandle, TaskManager}; const DEFAULT_PROTOCOL_ID: &str = "sup"; -/// A type that implements `MallocSizeOf` on native but not wasm. -#[cfg(not(target_os = "unknown"))] -pub trait MallocSizeOfWasm: MallocSizeOf {} -#[cfg(target_os = "unknown")] -pub trait MallocSizeOfWasm {} -#[cfg(not(target_os = "unknown"))] -impl MallocSizeOfWasm for T {} -#[cfg(target_os = "unknown")] -impl MallocSizeOfWasm for T {} - /// RPC handlers that can perform RPC queries. #[derive(Clone)] -pub struct RpcHandlers(Arc>); +pub struct RpcHandlers( + Arc>, +); impl RpcHandlers { /// Starts an RPC query. @@ -108,71 +96,23 @@ impl RpcHandlers { /// /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to /// send back spontaneous events. - pub fn rpc_query(&self, mem: &RpcSession, request: &str) - -> Pin> + Send>> { - self.0.handle_request(request, mem.metadata.clone()) - .compat() - .map(|res| res.expect("this should never fail")) - .boxed() + pub fn rpc_query( + &self, + mem: &RpcSession, + request: &str, + ) -> Pin> + Send>> { + self.0.handle_request(request, mem.metadata.clone()).boxed() } /// Provides access to the underlying `MetaIoHandler` - pub fn io_handler(&self) - -> Arc> { + pub fn io_handler( + &self, + ) -> Arc> { self.0.clone() } } -/// Sinks to propagate network status updates. -/// For each element, every time the `Interval` fires we push an element on the sender. -#[derive(Clone)] -pub struct NetworkStatusSinks { - status: Arc>>, - state: Arc>, -} - -impl NetworkStatusSinks { - fn new() -> Self { - Self { - status: Arc::new(status_sinks::StatusSinks::new()), - state: Arc::new(status_sinks::StatusSinks::new()), - } - } - - /// Returns a receiver that periodically yields a [`NetworkStatus`]. - pub fn status_stream(&self, interval: Duration) - -> TracingUnboundedReceiver> - { - let (sink, stream) = tracing_unbounded("mpsc_network_status"); - self.status.push(interval, sink); - stream - } - - /// Returns a receiver that periodically yields a [`NetworkState`]. - pub fn state_stream(&self, interval: Duration) - -> TracingUnboundedReceiver - { - let (sink, stream) = tracing_unbounded("mpsc_network_state"); - self.state.push(interval, sink); - stream - } - -} - -/// Sinks to propagate telemetry connection established events. -#[derive(Default, Clone)] -pub struct TelemetryConnectionSinks(Arc>>>); - -impl TelemetryConnectionSinks { - /// Get event stream for telemetry connection established events. - pub fn on_connect_stream(&self) -> TracingUnboundedReceiver<()> { - let (sink, stream) =tracing_unbounded("mpsc_telemetry_on_connect"); - self.0.lock().push(sink); - stream - } -} - -/// An imcomplete set of chain components, but enough to run the chain ops subcommands. +/// An incomplete set of chain components, but enough to run the chain ops subcommands. pub struct PartialComponents { /// A shared client instance. pub client: Arc, @@ -188,8 +128,6 @@ pub struct PartialComponents, - /// A registry of all providers of `InherentData`. - pub inherent_data_providers: sp_inherents::InherentDataProviders, /// Everything else that needs to be passed into the main build function. pub other: Other, } @@ -200,12 +138,11 @@ pub struct PartialComponents + HeaderBackend, - H: sc_network::ExHashT -> ( + H: sc_network::ExHashT, +>( role: Role, mut network: sc_network::NetworkWorker, client: Arc, - status_sinks: NetworkStatusSinks, mut rpc_rx: TracingUnboundedReceiver>, should_have_peers: bool, announce_imported_blocks: bool, @@ -223,7 +160,9 @@ async fn build_network_future< // ready. This way, we only get the latest finalized block. stream::poll_fn(move |cx| { let mut last = None; - while let Poll::Ready(Some(item)) = Pin::new(&mut finality_notification_stream).poll_next(cx) { + while let Poll::Ready(Some(item)) = + Pin::new(&mut finality_notification_stream).poll_next(cx) + { last = Some(item); } if let Some(last) = last { @@ -231,11 +170,12 @@ async fn build_network_future< } else { Poll::Pending } - }).fuse() + }) + .fuse() }; loop { - futures::select!{ + futures::select! { // List of blocks that the client has imported. notification = imported_blocks_stream.next() => { let notification = match notification { @@ -246,11 +186,11 @@ async fn build_network_future< }; if announce_imported_blocks { - network.service().announce_block(notification.hash, Vec::new()); + network.service().announce_block(notification.hash, None); } - if let sp_consensus::BlockOrigin::Own = notification.origin { - network.service().own_block_imported( + if notification.is_new_best { + network.service().new_best_block_imported( notification.hash, notification.header.number().clone(), ); @@ -314,6 +254,14 @@ async fn build_network_future< ))), }; } + sc_rpc::system::Request::NetworkReservedPeers(sender) => { + let reserved_peers = network.reserved_peers(); + let reserved_peers = reserved_peers + .map(|peer_id| peer_id.to_base58()) + .collect(); + + let _ = sender.send(reserved_peers); + } sc_rpc::system::Request::NodeRoles(sender) => { use sc_rpc::system::NodeRole; @@ -321,7 +269,6 @@ async fn build_network_future< Role::Authority { .. } => NodeRole::Authority, Role::Light => NodeRole::LightClient, Role::Full => NodeRole::Full, - Role::Sentry { .. } => NodeRole::Sentry, }; let _ = sender.send(vec![node_role]); @@ -342,23 +289,10 @@ async fn build_network_future< // used in the future to perform actions in response of things that happened on // the network. _ = (&mut network).fuse() => {} - - // At a regular interval, we send high-level status as well as - // detailed state information of the network on what are called - // "status sinks". - - status_sink = status_sinks.status.next().fuse() => { - status_sink.send(network.status()); - } - - state_sink = status_sinks.state.next().fuse() => { - state_sink.send(network.network_state()); - } } } } -#[cfg(not(target_os = "unknown"))] // Wrapper for HTTP and WS servers that makes sure they are properly shut down. mod waiting { pub struct HttpServer(pub Option); @@ -392,89 +326,113 @@ mod waiting { } } -/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. -#[cfg(not(target_os = "unknown"))] +/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them +/// alive. fn start_rpc_servers< - H: FnMut(sc_rpc::DenyUnsafe, sc_rpc_server::RpcMiddleware) - -> sc_rpc_server::RpcHandler + H: FnMut( + sc_rpc::DenyUnsafe, + sc_rpc_server::RpcMiddleware, + ) -> Result, Error>, >( config: &Configuration, mut gen_handler: H, - rpc_metrics: Option<&sc_rpc_server::RpcMetrics> -) -> Result, error::Error> { - fn maybe_start_server(address: Option, mut start: F) -> Result, io::Error> - where F: FnMut(&SocketAddr) -> Result, + rpc_metrics: Option, + server_metrics: sc_rpc_server::ServerMetrics, +) -> Result, Error> { + fn maybe_start_server( + address: Option, + mut start: F, + ) -> Result, Error> + where + F: FnMut(&SocketAddr) -> Result, { - Ok(match address { - Some(mut address) => Some(start(&address) - .or_else(|e| match e.kind() { - io::ErrorKind::AddrInUse | - io::ErrorKind::PermissionDenied => { - warn!("Unable to bind RPC server to {}. Trying random port.", address); - address.set_port(0); - start(&address) + address + .map(|mut address| { + start(&address).or_else(|e| match e { + Error::Io(e) => match e.kind() { + io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied => { + warn!("Unable to bind RPC server to {}. Trying random port.", address); + address.set_port(0); + start(&address) + }, + _ => Err(e.into()), }, - _ => Err(e), - })?), - None => None, - }) + e => Err(e), + }) + }) + .transpose() } fn deny_unsafe(addr: &SocketAddr, methods: &RpcMethods) -> sc_rpc::DenyUnsafe { let is_exposed_addr = !addr.ip().is_loopback(); match (is_exposed_addr, methods) { - | (_, RpcMethods::Unsafe) - | (false, RpcMethods::Auto) => sc_rpc::DenyUnsafe::No, - _ => sc_rpc::DenyUnsafe::Yes + | (_, RpcMethods::Unsafe) | (false, RpcMethods::Auto) => sc_rpc::DenyUnsafe::No, + _ => sc_rpc::DenyUnsafe::Yes, } } + let rpc_method_names = sc_rpc_server::method_names(|m| gen_handler(sc_rpc::DenyUnsafe::No, m))?; Ok(Box::new(( - config.rpc_ipc.as_ref().map(|path| sc_rpc_server::start_ipc( - &*path, gen_handler( - sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "ipc") - ) - )), - maybe_start_server( - config.rpc_http, - |address| sc_rpc_server::start_http( + config + .rpc_ipc + .as_ref() + .map(|path| { + sc_rpc_server::start_ipc( + &*path, + gen_handler( + sc_rpc::DenyUnsafe::No, + sc_rpc_server::RpcMiddleware::new( + rpc_metrics.clone(), + rpc_method_names.clone(), + "ipc", + ), + )?, + server_metrics.clone(), + ) + .map_err(Error::from) + }) + .transpose()?, + maybe_start_server(config.rpc_http, |address| { + sc_rpc_server::start_http( address, config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "http") - ), - ), - )?.map(|s| waiting::HttpServer(Some(s))), - maybe_start_server( - config.rpc_ws, - |address| sc_rpc_server::start_ws( + sc_rpc_server::RpcMiddleware::new( + rpc_metrics.clone(), + rpc_method_names.clone(), + "http", + ), + )?, + config.rpc_max_payload, + config.tokio_handle.clone(), + ) + .map_err(Error::from) + })? + .map(|s| waiting::HttpServer(Some(s))), + maybe_start_server(config.rpc_ws, |address| { + sc_rpc_server::start_ws( address, config.rpc_ws_max_connections, config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "ws") - ), - ), - )?.map(|s| waiting::WsServer(Some(s))), + sc_rpc_server::RpcMiddleware::new( + rpc_metrics.clone(), + rpc_method_names.clone(), + "ws", + ), + )?, + config.rpc_max_payload, + server_metrics.clone(), + config.tokio_handle.clone(), + ) + .map_err(Error::from) + })? + .map(|s| waiting::WsServer(Some(s))), ))) } -/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. -#[cfg(target_os = "unknown")] -fn start_rpc_servers< - H: FnMut(sc_rpc::DenyUnsafe, sc_rpc_server::RpcMiddleware) - -> sc_rpc_server::RpcHandler ->( - _: &Configuration, - _: H, - _: Option<&sc_rpc_server::RpcMetrics> -) -> Result, error::Error> { - Ok(Box::new(())) -} - /// An RPC session. Used to perform in-memory RPC queries (ie. RPC queries that don't go through /// the HTTP or WebSockets server). #[derive(Clone)] @@ -489,10 +447,8 @@ impl RpcSession { /// messages. /// /// The `RpcSession` must be kept alive in order to receive messages on the sender. - pub fn new(sender: futures01::sync::mpsc::Sender) -> RpcSession { - RpcSession { - metadata: sender.into(), - } + pub fn new(sender: futures::channel::mpsc::UnboundedSender) -> RpcSession { + RpcSession { metadata: sender.into() } } } @@ -506,13 +462,12 @@ pub struct TransactionPoolAdapter { /// Get transactions for propagation. /// /// Function extracted to simplify the test and prevent creating `ServiceFactory`. -fn transactions_to_propagate(pool: &Pool) - -> Vec<(H, B::Extrinsic)> +fn transactions_to_propagate(pool: &Pool) -> Vec<(H, B::Extrinsic)> where - Pool: TransactionPool, + Pool: TransactionPool, B: BlockT, H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, - E: IntoPoolError + From, + E: IntoPoolError + From, { pool.ready() .filter(|t| t.is_propagable()) @@ -524,14 +479,13 @@ where .collect() } -impl sc_network::config::TransactionPool for - TransactionPoolAdapter +impl sc_network::config::TransactionPool for TransactionPoolAdapter where C: sc_network::config::Client + Send + Sync, - Pool: 'static + TransactionPool, + Pool: 'static + TransactionPool, B: BlockT, H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, - E: 'static + IntoPoolError + From, + E: 'static + IntoPoolError + From, { fn transactions(&self) -> Vec<(H, B::Extrinsic)> { transactions_to_propagate(&*self.pool) @@ -541,10 +495,7 @@ where self.pool.hash_of(transaction) } - fn import( - &self, - transaction: B::Extrinsic, - ) -> TransactionImportFuture { + fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture { if !self.imports_external_transactions { debug!("Transaction rejected"); Box::pin(futures::future::ready(TransactionImport::None)); @@ -555,28 +506,34 @@ where Ok(uxt) => uxt, Err(e) => { debug!("Transaction invalid: {:?}", e); - return Box::pin(futures::future::ready(TransactionImport::Bad)); - } + return Box::pin(futures::future::ready(TransactionImport::Bad)) + }, }; let best_block_id = BlockId::hash(self.client.info().best_hash); - let import_future = self.pool.submit_one(&best_block_id, sp_transaction_pool::TransactionSource::External, uxt); + let import_future = self.pool.submit_one( + &best_block_id, + sc_transaction_pool_api::TransactionSource::External, + uxt, + ); Box::pin(async move { match import_future.await { Ok(_) => TransactionImport::NewGood, Err(e) => match e.into_pool_error() { - Ok(sp_transaction_pool::error::Error::AlreadyImported(_)) => TransactionImport::KnownGood, + Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => + TransactionImport::KnownGood, Ok(e) => { debug!("Error adding transaction to the pool: {:?}", e); TransactionImport::Bad - } + }, Err(e) => { debug!("Error converting pool error: {:?}", e); - // it is not bad at least, just some internal node logic error, so peer is innocent. + // it is not bad at least, just some internal node logic error, so peer is + // innocent. TransactionImport::KnownGood - } - } + }, + }, } }) } @@ -586,11 +543,10 @@ where } fn transaction(&self, hash: &H) -> Option { - self.pool.ready_transaction(hash) - .and_then( - // Only propagable transactions should be resolved for network service. - |tx| if tx.is_propagable() { Some(tx.data().clone()) } else { None } - ) + self.pool.ready_transaction(hash).and_then( + // Only propagable transactions should be resolved for network service. + |tx| if tx.is_propagable() { Some(tx.data().clone()) } else { None }, + ) } } @@ -598,10 +554,13 @@ where mod tests { use super::*; use futures::executor::block_on; + use sc_transaction_pool::BasicPool; use sp_consensus::SelectChain; use sp_runtime::traits::BlindCheckable; - use substrate_test_runtime_client::{prelude::*, runtime::{Extrinsic, Transfer}}; - use sc_transaction_pool::BasicPool; + use substrate_test_runtime_client::{ + prelude::*, + runtime::{Extrinsic, Transfer}, + }; #[test] fn should_not_propagate_transactions_that_are_marked_as_such() { @@ -609,26 +568,25 @@ mod tests { let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let source = sp_runtime::transaction_validity::TransactionSource::External; - let best = longest_chain.best_chain().unwrap(); + let best = block_on(longest_chain.best_chain()).unwrap(); let transaction = Transfer { amount: 5, nonce: 0, from: AccountKeyring::Alice.into(), to: Default::default(), - }.into_signed_tx(); - block_on(pool.submit_one( - &BlockId::hash(best.hash()), source, transaction.clone()), - ).unwrap(); + } + .into_signed_tx(); + block_on(pool.submit_one(&BlockId::hash(best.hash()), source, transaction.clone())) + .unwrap(); block_on(pool.submit_one( - &BlockId::hash(best.hash()), source, Extrinsic::IncludeData(vec![1])), - ).unwrap(); + &BlockId::hash(best.hash()), + source, + Extrinsic::IncludeData(vec![1]), + )) + .unwrap(); assert_eq!(pool.status().ready, 2); // when diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 0af393b53f517..4d3c6df92fee7 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,20 +18,20 @@ use std::{convert::TryFrom, time::SystemTime}; -use crate::{NetworkStatus, NetworkState, NetworkStatusSinks, config::Configuration}; +use crate::config::Configuration; use futures_timer::Delay; -use prometheus_endpoint::{register, Gauge, U64, Registry, PrometheusError, Opts, GaugeVec}; -use sc_telemetry::{telemetry, SUBSTRATE_INFO}; -use sp_api::ProvideRuntimeApi; -use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; -use sp_transaction_pool::{PoolStatus, MaintainedTransactionPool}; -use sp_utils::metrics::register_globals; -use sp_utils::mpsc::TracingUnboundedReceiver; +use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{ClientInfo, UsageProvider}; -use sc_network::config::Role; -use std::sync::Arc; -use std::time::Duration; -use wasm_timer::Instant; +use sc_network::{config::Role, NetworkService, NetworkStatus}; +use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; +use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus}; +use sc_utils::metrics::register_globals; +use sp_api::ProvideRuntimeApi; +use sp_runtime::traits::{Block, NumberFor, SaturatedConversion, UniqueSaturatedInto}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; struct PrometheusMetrics { // generic info @@ -52,54 +52,74 @@ impl PrometheusMetrics { version: &str, roles: u64, ) -> Result { - register(Gauge::::with_opts( - Opts::new( - "build_info", - "A metric with a constant '1' value labeled by name, version" - ) + register( + Gauge::::with_opts( + Opts::new( + "build_info", + "A metric with a constant '1' value labeled by name, version", + ) .const_label("name", name) - .const_label("version", version) - )?, ®istry)?.set(1); + .const_label("version", version), + )?, + ®istry, + )? + .set(1); - register(Gauge::::new( - "node_roles", "The roles the node is running as", - )?, ®istry)?.set(roles); + register(Gauge::::new("node_roles", "The roles the node is running as")?, ®istry)? + .set(roles); register_globals(registry)?; - let start_time_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) - .unwrap_or_default(); - register(Gauge::::new( - "process_start_time_seconds", - "Number of seconds between the UNIX epoch and the moment the process started", - )?, registry)?.set(start_time_since_epoch.as_secs()); + let start_time_since_epoch = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default(); + register( + Gauge::::new( + "process_start_time_seconds", + "Number of seconds between the UNIX epoch and the moment the process started", + )?, + registry, + )? + .set(start_time_since_epoch.as_secs()); Ok(Self { // generic internals - block_height: register(GaugeVec::new( - Opts::new("block_height", "Block height info of the chain"), - &["status"] - )?, registry)?, - - number_leaves: register(Gauge::new( - "number_leaves", "Number of known chain leaves (aka forks)", - )?, registry)?, - - ready_transactions_number: register(Gauge::new( - "ready_transactions_number", "Number of transactions in the ready queue", - )?, registry)?, + block_height: register( + GaugeVec::new( + Opts::new("block_height", "Block height info of the chain"), + &["status"], + )?, + registry, + )?, + + number_leaves: register( + Gauge::new("number_leaves", "Number of known chain leaves (aka forks)")?, + registry, + )?, + + ready_transactions_number: register( + Gauge::new( + "ready_transactions_number", + "Number of transactions in the ready queue", + )?, + registry, + )?, // I/ O - database_cache: register(Gauge::new( - "database_cache_bytes", "RocksDB cache size in bytes", - )?, registry)?, - state_cache: register(Gauge::new( - "state_cache_bytes", "State cache size in bytes", - )?, registry)?, - state_db: register(GaugeVec::new( - Opts::new("state_db_cache_bytes", "State DB cache in bytes"), - &["subtype"] - )?, registry)?, + database_cache: register( + Gauge::new("database_cache_bytes", "RocksDB cache size in bytes")?, + registry, + )?, + state_cache: register( + Gauge::new("state_cache_bytes", "State cache size in bytes")?, + registry, + )?, + state_db: register( + GaugeVec::new( + Opts::new("state_db_cache_bytes", "State DB cache in bytes"), + &["subtype"], + )?, + registry, + )?, }) } } @@ -112,30 +132,32 @@ pub struct MetricsService { last_update: Instant, last_total_bytes_inbound: u64, last_total_bytes_outbound: u64, + telemetry: Option, } impl MetricsService { /// Creates a `MetricsService` that only sends information /// to the telemetry. - pub fn new() -> Self { + pub fn new(telemetry: Option) -> Self { MetricsService { metrics: None, last_total_bytes_inbound: 0, last_total_bytes_outbound: 0, last_update: Instant::now(), + telemetry, } } /// Creates a `MetricsService` that sends metrics /// to prometheus alongside the telemetry. pub fn with_prometheus( + telemetry: Option, registry: &Registry, config: &Configuration, ) -> Result { let role_bits = match config.role { Role::Full => 1u64, Role::Light => 2u64, - Role::Sentry { .. } => 3u64, Role::Authority { .. } => 4u64, }; @@ -150,6 +172,7 @@ impl MetricsService { last_total_bytes_inbound: 0, last_total_bytes_outbound: 0, last_update: Instant::now(), + telemetry, }) } @@ -160,7 +183,7 @@ impl MetricsService { mut self, client: Arc, transactions: Arc, - network: NetworkStatusSinks, + network: Arc::Hash>>, ) where TBl: Block, TCl: ProvideRuntimeApi + UsageProvider, @@ -169,72 +192,26 @@ impl MetricsService { let mut timer = Delay::new(Duration::from_secs(0)); let timer_interval = Duration::from_secs(5); - // Metric and telemetry update interval. - let net_status_interval = timer_interval; - let net_state_interval = Duration::from_secs(30); - - // Source of network information. - let mut net_status_rx = Some(network.status_stream(net_status_interval)); - let mut net_state_rx = Some(network.state_stream(net_state_interval)); - loop { // Wait for the next tick of the timer. (&mut timer).await; // Try to get the latest network information. - let mut net_status = None; - let mut net_state = None; - if let Some(rx) = net_status_rx.as_mut() { - match Self::latest(rx) { - Ok(status) => { net_status = status; } - Err(()) => { net_status_rx = None; } - } - } - if let Some(rx) = net_state_rx.as_mut() { - match Self::latest(rx) { - Ok(state) => { net_state = state; } - Err(()) => { net_state_rx = None; } - } - } + let net_status = network.status().await.ok(); // Update / Send the metrics. - self.update( - &client.usage_info(), - &transactions.status(), - net_status, - net_state, - ); + self.update(&client.usage_info(), &transactions.status(), net_status); // Schedule next tick. timer.reset(timer_interval); } } - // Try to get the latest value from a receiver, dropping intermediate values. - fn latest(rx: &mut TracingUnboundedReceiver) -> Result, ()> { - let mut value = None; - - while let Ok(next) = rx.try_next() { - match next { - Some(v) => { - value = Some(v) - } - None => { - log::error!("Receiver closed unexpectedly."); - return Err(()) - } - } - } - - Ok(value) - } - fn update( &mut self, info: &ClientInfo, txpool_status: &PoolStatus, net_status: Option>, - net_state: Option, ) { let now = Instant::now(); let elapsed = (now - self.last_update).as_secs(); @@ -246,6 +223,7 @@ impl MetricsService { // Update/send metrics that are always available. telemetry!( + self.telemetry; SUBSTRATE_INFO; "system.interval"; "height" => best_number, @@ -259,14 +237,8 @@ impl MetricsService { ); if let Some(metrics) = self.metrics.as_ref() { - metrics - .block_height - .with_label_values(&["finalized"]) - .set(finalized_number); - metrics - .block_height - .with_label_values(&["best"]) - .set(best_number); + metrics.block_height.with_label_values(&["finalized"]).set(finalized_number); + metrics.block_height.with_label_values(&["best"]).set(best_number); if let Ok(leaves) = u64::try_from(info.chain.number_leaves) { metrics.number_leaves.set(leaves); @@ -278,15 +250,17 @@ impl MetricsService { metrics.database_cache.set(info.memory.database_cache.as_bytes() as u64); metrics.state_cache.set(info.memory.state_cache.as_bytes() as u64); - metrics.state_db.with_label_values(&["non_canonical"]).set( - info.memory.state_db.non_canonical.as_bytes() as u64, - ); + metrics + .state_db + .with_label_values(&["non_canonical"]) + .set(info.memory.state_db.non_canonical.as_bytes() as u64); if let Some(pruning) = info.memory.state_db.pruning { metrics.state_db.with_label_values(&["pruning"]).set(pruning.as_bytes() as u64); } - metrics.state_db.with_label_values(&["pinned"]).set( - info.memory.state_db.pinned.as_bytes() as u64, - ); + metrics + .state_db + .with_label_values(&["pinned"]) + .set(info.memory.state_db.pinned.as_bytes() as u64); } } @@ -298,16 +272,16 @@ impl MetricsService { let diff_bytes_inbound = total_bytes_inbound - self.last_total_bytes_inbound; let diff_bytes_outbound = total_bytes_outbound - self.last_total_bytes_outbound; - let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = - if elapsed > 0 { - self.last_total_bytes_inbound = total_bytes_inbound; - self.last_total_bytes_outbound = total_bytes_outbound; - (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) - } else { - (diff_bytes_inbound, diff_bytes_outbound) - }; + let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = if elapsed > 0 { + self.last_total_bytes_inbound = total_bytes_inbound; + self.last_total_bytes_outbound = total_bytes_outbound; + (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) + } else { + (diff_bytes_inbound, diff_bytes_outbound) + }; telemetry!( + self.telemetry; SUBSTRATE_INFO; "system.interval"; "peers" => num_peers, @@ -316,23 +290,15 @@ impl MetricsService { ); if let Some(metrics) = self.metrics.as_ref() { - let best_seen_block = net_status - .best_seen_block - .map(|num: NumberFor| num.unique_saturated_into() as u64); + let best_seen_block: Option = + net_status.best_seen_block.map(|num: NumberFor| { + UniqueSaturatedInto::::unique_saturated_into(num) + }); if let Some(best_seen_block) = best_seen_block { metrics.block_height.with_label_values(&["sync_target"]).set(best_seen_block); } } } - - // Send network state information, if any. - if let Some(net_state) = net_state { - telemetry!( - SUBSTRATE_INFO; - "system.network_state"; - "state" => net_state, - ); - } } } diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index 961435965403f..5657a80332109 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -1,34 +1,38 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + //! Substrate service tasks management module. -use std::{panic, result::Result, pin::Pin}; +use crate::{config::TaskType, Error}; use exit_future::Signal; -use log::{debug, error}; use futures::{ + future::{join_all, pending, select, try_join_all, BoxFuture, Either}, Future, FutureExt, StreamExt, - future::{select, Either, BoxFuture, join_all, try_join_all, pending}, - sink::SinkExt, }; +use log::debug; use prometheus_endpoint::{ - exponential_buckets, register, - PrometheusError, - CounterVec, HistogramOpts, HistogramVec, Opts, Registry, U64 + exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, + Registry, U64, }; -use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{panic, pin::Pin, result::Result}; +use tokio::{runtime::Handle, task::JoinHandle}; use tracing_futures::Instrument; -use crate::{config::{TaskExecutor, TaskType, JoinFuture}, Error}; mod prometheus_future; #[cfg(test)] @@ -38,9 +42,9 @@ mod tests; #[derive(Clone)] pub struct SpawnTaskHandle { on_exit: exit_future::Exit, - executor: TaskExecutor, + tokio_handle: Handle, metrics: Option, - task_notifier: TracingUnboundedSender, + task_notifier: TracingUnboundedSender>, } impl SpawnTaskHandle { @@ -57,7 +61,11 @@ impl SpawnTaskHandle { } /// Spawns the blocking task with the given name. See also `spawn`. - pub fn spawn_blocking(&self, name: &'static str, task: impl Future + Send + 'static) { + pub fn spawn_blocking( + &self, + name: &'static str, + task: impl Future + Send + 'static, + ) { self.spawn_inner(name, task, TaskType::Blocking) } @@ -70,7 +78,7 @@ impl SpawnTaskHandle { ) { if self.task_notifier.is_closed() { debug!("Attempt to spawn a new task has been prevented: {}", name); - return; + return } let on_exit = self.on_exit.clone(); @@ -90,7 +98,8 @@ impl SpawnTaskHandle { let task = { let poll_duration = metrics.poll_duration.with_label_values(&[name]); let poll_start = metrics.poll_start.with_label_values(&[name]); - let inner = prometheus_future::with_poll_durations(poll_duration, poll_start, task); + let inner = + prometheus_future::with_poll_durations(poll_duration, poll_start, task); // The logic of `AssertUnwindSafe` here is ok considering that we throw // away the `Future` after it has panicked. panic::AssertUnwindSafe(inner).catch_unwind() @@ -101,32 +110,33 @@ impl SpawnTaskHandle { Either::Right((Err(payload), _)) => { metrics.tasks_ended.with_label_values(&[name, "panic"]).inc(); panic::resume_unwind(payload) - } + }, Either::Right((Ok(()), _)) => { metrics.tasks_ended.with_label_values(&[name, "finished"]).inc(); - } + }, Either::Left(((), _)) => { // The `on_exit` has triggered. metrics.tasks_ended.with_label_values(&[name, "interrupted"]).inc(); - } + }, } - } else { futures::pin_mut!(task); let _ = select(on_exit, task).await; } + } + .in_current_span(); + + let join_handle = match task_type { + TaskType::Async => self.tokio_handle.spawn(future), + TaskType::Blocking => { + let handle = self.tokio_handle.clone(); + self.tokio_handle.spawn_blocking(move || { + handle.block_on(future); + }) + }, }; - let join_handle = self.executor.spawn(Box::pin(future.in_current_span()), task_type); - let mut task_notifier = self.task_notifier.clone(); - self.executor.spawn( - Box::pin(async move { - if let Err(err) = task_notifier.send(join_handle).await { - error!("Could not send spawned task handle to queue: {}", err); - } - }), - TaskType::Async, - ); + let _ = self.task_notifier.unbounded_send(join_handle); } } @@ -144,6 +154,7 @@ impl sp_core::traits::SpawnNamed for SpawnTaskHandle { /// task spawned through it fails. The service should be on the receiver side /// and will shut itself down whenever it receives any message, i.e. an /// essential task has failed. +#[derive(Clone)] pub struct SpawnEssentialTaskHandle { essential_failed_tx: TracingUnboundedSender<()>, inner: SpawnTaskHandle, @@ -155,10 +166,7 @@ impl SpawnEssentialTaskHandle { essential_failed_tx: TracingUnboundedSender<()>, spawn_task_handle: SpawnTaskHandle, ) -> SpawnEssentialTaskHandle { - SpawnEssentialTaskHandle { - essential_failed_tx, - inner: spawn_task_handle, - } + SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle } } /// Spawns the given task with the given name. @@ -186,17 +194,25 @@ impl SpawnEssentialTaskHandle { task_type: TaskType, ) { let essential_failed = self.essential_failed_tx.clone(); - let essential_task = std::panic::AssertUnwindSafe(task) - .catch_unwind() - .map(move |_| { - log::error!("Essential task `{}` failed. Shutting down service.", name); - let _ = essential_failed.close_channel(); - }); + let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| { + log::error!("Essential task `{}` failed. Shutting down service.", name); + let _ = essential_failed.close_channel(); + }); let _ = self.inner.spawn_inner(name, essential_task, task_type); } } +impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle { + fn spawn_essential_blocking(&self, name: &'static str, future: BoxFuture<'static, ()>) { + self.spawn_blocking(name, future); + } + + fn spawn_essential(&self, name: &'static str, future: BoxFuture<'static, ()>) { + self.spawn(name, future); + } +} + /// Helper struct to manage background/async tasks in Service. pub struct TaskManager { /// A future that resolves when the service has exited, this is useful to @@ -204,8 +220,8 @@ pub struct TaskManager { on_exit: exit_future::Exit, /// A signal that makes the exit future above resolve, fired on service drop. signal: Option, - /// How to spawn background tasks. - executor: TaskExecutor, + /// Tokio runtime handle that is used to spawn futures. + tokio_handle: Handle, /// Prometheus metric where to report the polling times. metrics: Option, /// Send a signal when a spawned essential task has concluded. The next time @@ -214,11 +230,11 @@ pub struct TaskManager { /// A receiver for spawned essential-tasks concluding. essential_failed_rx: TracingUnboundedReceiver<()>, /// Things to keep alive until the task manager is dropped. - keep_alive: Box, + keep_alive: Box, /// A sender to a stream of background tasks. This is used for the completion future. - task_notifier: TracingUnboundedSender, + task_notifier: TracingUnboundedSender>, /// This future will complete when all the tasks are joined and the stream is closed. - completion_future: JoinFuture, + completion_future: JoinHandle<()>, /// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. @@ -228,12 +244,12 @@ pub struct TaskManager { } impl TaskManager { - /// If a Prometheus registry is passed, it will be used to report statistics about the - /// service tasks. - pub(super) fn new( - executor: TaskExecutor, + /// If a Prometheus registry is passed, it will be used to report statistics about the + /// service tasks. + pub fn new( + tokio_handle: Handle, ipfs_rt: tokio::runtime::Runtime, - prometheus_registry: Option<&Registry> + prometheus_registry: Option<&Registry>, ) -> Result { let (signal, on_exit) = exit_future::signal(); @@ -246,17 +262,17 @@ impl TaskManager { // NOTE: for_each_concurrent will await on all the JoinHandle futures at the same time. It // is possible to limit this but it's actually better for the memory foot print to await // them all to not accumulate anything on that stream. - let completion_future = executor.spawn( - Box::pin(background_tasks.for_each_concurrent(None, |x| x)), - TaskType::Async, - ); + let completion_future = + tokio_handle.spawn(background_tasks.for_each_concurrent(None, |x| async move { + let _ = x.await; + })); let ipfs_rt = std::sync::Arc::new(parking_lot::Mutex::new(ipfs_rt)); Ok(Self { on_exit, signal: Some(signal), - executor, + tokio_handle, metrics, essential_failed_tx, essential_failed_rx, @@ -272,7 +288,7 @@ impl TaskManager { pub fn spawn_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { on_exit: self.on_exit.clone(), - executor: self.executor.clone(), + tokio_handle: self.tokio_handle.clone(), metrics: self.metrics.clone(), task_notifier: self.task_notifier.clone(), } @@ -300,8 +316,9 @@ impl TaskManager { Box::pin(async move { join_all(children_shutdowns).await; - completion_future.await; - drop(keep_alive); + let _ = completion_future.await; + + let _ = keep_alive; }) } @@ -312,16 +329,21 @@ impl TaskManager { /// /// This function will not wait until the end of the remaining task. You must call and await /// `clean_shutdown()` after this. - pub fn future<'a>(&'a mut self) -> Pin> + Send + 'a>> { + pub fn future<'a>( + &'a mut self, + ) -> Pin> + Send + 'a>> { Box::pin(async move { let mut t1 = self.essential_failed_rx.next().fuse(); let mut t2 = self.on_exit.clone().fuse(); let mut t3 = try_join_all( - self.children.iter_mut().map(|x| x.future()) + self.children + .iter_mut() + .map(|x| x.future()) // Never end this future if there is no error because if there is no children, // it must not stop - .chain(std::iter::once(pending().boxed())) - ).fuse(); + .chain(std::iter::once(pending().boxed())), + ) + .fuse(); futures::select! { _ = t1 => Err(Error::Other("Essential task failed.".into())), @@ -344,7 +366,7 @@ impl TaskManager { } /// Set what the task manager should keep alive, can be called multiple times. - pub fn keep_alive(&mut self, to_keep_alive: T) { + pub fn keep_alive(&mut self, to_keep_alive: T) { // allows this fn to safely called multiple times. use std::mem; let old = mem::replace(&mut self.keep_alive, Box::new(())); diff --git a/client/service/src/task_manager/prometheus_future.rs b/client/service/src/task_manager/prometheus_future.rs index 53bd59aa7a507..43a76a0f596c2 100644 --- a/client/service/src/task_manager/prometheus_future.rs +++ b/client/service/src/task_manager/prometheus_future.rs @@ -1,34 +1,39 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + //! Wrapper around a `Future` that reports statistics about when the `Future` is polled. use futures::prelude::*; use prometheus_endpoint::{Counter, Histogram, U64}; -use std::{fmt, pin::Pin, task::{Context, Poll}}; +use std::{ + fmt, + pin::Pin, + task::{Context, Poll}, +}; /// Wraps around a `Future`. Report the polling duration to the `Histogram` and when the polling /// starts to the `Counter`. pub fn with_poll_durations( poll_duration: Histogram, poll_start: Counter, - inner: T + inner: T, ) -> PrometheusFuture { - PrometheusFuture { - inner, - poll_duration, - poll_start, - } + PrometheusFuture { inner, poll_duration, poll_start } } /// Wraps around `Future` and adds diagnostics to it. diff --git a/client/service/src/task_manager/tests.rs b/client/service/src/task_manager/tests.rs index 27d9b0b9e9ad9..291d71ebaf03b 100644 --- a/client/service/src/task_manager/tests.rs +++ b/client/service/src/task_manager/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,13 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::config::TaskExecutor; use crate::task_manager::TaskManager; use futures::{future::FutureExt, pin_mut, select}; use parking_lot::Mutex; -use std::any::Any; -use std::sync::Arc; -use std::time::Duration; +use std::{any::Any, sync::Arc, time::Duration}; #[derive(Clone, Debug)] struct DropTester(Arc>); @@ -38,6 +35,12 @@ impl DropTester { *self.0.lock() += 1; DropTesterRef(self.clone()) } + + fn wait_on_drop(&self) { + while *self != 0 { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + } } impl PartialEq for DropTester { @@ -68,7 +71,7 @@ fn ensure_drop_tester_working() { async fn run_background_task(_keep_alive: impl Any) { loop { - tokio::time::delay_for(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; } } @@ -77,55 +80,56 @@ async fn run_background_task_blocking(duration: Duration, _keep_alive: impl Any) // block for X sec (not interruptible) std::thread::sleep(duration); // await for 1 sec (interruptible) - tokio::time::delay_for(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; } } +fn new_task_manager(tokio_handle: tokio::runtime::Handle) -> TaskManager { + TaskManager::new(tokio_handle, None).unwrap() +} + #[test] fn ensure_tasks_are_awaited_on_shutdown() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); - let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let task_manager = TaskManager::new(task_executor, None).unwrap(); + let task_manager = new_task_manager(handle); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_keep_alive_during_shutdown() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); - let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let mut task_manager = new_task_manager(handle); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); task_manager.keep_alive(drop_tester.new_ref()); spawn_handle.spawn("task1", run_background_task(())); assert_eq!(drop_tester, 1); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 1); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_blocking_futures_are_awaited_on_shutdown() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); - let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let task_manager = TaskManager::new(task_executor, None).unwrap(); + let task_manager = new_task_manager(handle); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn( @@ -138,7 +142,7 @@ fn ensure_blocking_futures_are_awaited_on_shutdown() { ); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); runtime.block_on(task_manager.clean_shutdown()); assert_eq!(drop_tester, 0); @@ -146,39 +150,37 @@ fn ensure_blocking_futures_are_awaited_on_shutdown() { #[test] fn ensure_no_task_can_be_spawn_after_terminate() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); - let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let mut task_manager = new_task_manager(handle); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); task_manager.terminate(); spawn_handle.spawn("task3", run_background_task(drop_tester.new_ref())); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_task_manager_future_ends_when_task_manager_terminated() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); - let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let mut task_manager = new_task_manager(handle); let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); task_manager.terminate(); runtime.block_on(task_manager.future()).expect("future has ended without error"); @@ -188,11 +190,10 @@ fn ensure_task_manager_future_ends_when_task_manager_terminated() { #[test] fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); - let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let mut task_manager = new_task_manager(handle); let spawn_handle = task_manager.spawn_handle(); let spawn_essential_handle = task_manager.spawn_essential_handle(); let drop_tester = DropTester::new(); @@ -200,25 +201,26 @@ fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 2); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); spawn_essential_handle.spawn("task3", async { panic!("task failed") }); - runtime.block_on(task_manager.future()).expect_err("future()'s Result must be Err"); + runtime + .block_on(task_manager.future()) + .expect_err("future()'s Result must be Err"); assert_eq!(drop_tester, 2); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_children_tasks_ends_when_task_manager_terminated() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); - let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); - let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let mut task_manager = new_task_manager(handle.clone()); + let child_1 = new_task_manager(handle.clone()); let spawn_handle_child_1 = child_1.spawn_handle(); - let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_2 = new_task_manager(handle.clone()); let spawn_handle_child_2 = child_2.spawn_handle(); task_manager.add_child(child_1); task_manager.add_child(child_2); @@ -230,25 +232,24 @@ fn ensure_children_tasks_ends_when_task_manager_terminated() { spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 4); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 4); task_manager.terminate(); runtime.block_on(task_manager.future()).expect("future has ended without error"); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); - let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); - let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let mut task_manager = new_task_manager(handle.clone()); + let child_1 = new_task_manager(handle.clone()); let spawn_handle_child_1 = child_1.spawn_handle(); let spawn_essential_handle_child_1 = child_1.spawn_essential_handle(); - let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_2 = new_task_manager(handle.clone()); let spawn_handle_child_2 = child_2.spawn_handle(); task_manager.add_child(child_1); task_manager.add_child(child_2); @@ -260,25 +261,26 @@ fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 4); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 4); spawn_essential_handle_child_1.spawn("task5", async { panic!("task failed") }); - runtime.block_on(task_manager.future()).expect_err("future()'s Result must be Err"); + runtime + .block_on(task_manager.future()) + .expect_err("future()'s Result must be Err"); assert_eq!(drop_tester, 4); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } #[test] fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); - let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); - let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); - let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let mut task_manager = new_task_manager(handle.clone()); + let child_1 = new_task_manager(handle.clone()); let spawn_handle_child_1 = child_1.spawn_handle(); - let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_2 = new_task_manager(handle.clone()); let spawn_handle_child_2 = child_2.spawn_handle(); task_manager.add_child(child_1); task_manager.add_child(child_2); @@ -290,12 +292,12 @@ fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); assert_eq!(drop_tester, 4); // allow the tasks to even start - runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 4); spawn_handle_child_1.spawn("task5", async { panic!("task failed") }); runtime.block_on(async { let t1 = task_manager.future().fuse(); - let t2 = tokio::time::delay_for(Duration::from_secs(3)).fuse(); + let t2 = tokio::time::sleep(Duration::from_secs(3)).fuse(); pin_mut!(t1, t2); @@ -306,5 +308,5 @@ fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { }); assert_eq!(drop_tester, 4); runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); + drop_tester.wait_on_drop(); } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 84ac84e630d00..85a6dcc9e8b29 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -14,31 +14,31 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] hex-literal = "0.3.1" tempfile = "3.1.0" -tokio = "0.1.22" -futures01 = { package = "futures", version = "0.1.29" } +tokio = { version = "1.10.0", features = ["time"] } log = "0.4.8" fdlimit = "0.2.1" -parking_lot = "0.10.0" -sc-light = { version = "2.0.0", path = "../../light" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } -sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } -sp-storage = { version = "2.0.0", path = "../../../primitives/storage" } -sc-client-db = { version = "0.8.0", default-features = false, path = "../../db" } -futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../service" } -sc-network = { version = "0.8.0", path = "../../network" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +parking_lot = "0.11.1" +sc-light = { version = "4.0.0-dev", path = "../../light" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } +sp-externalities = { version = "0.10.0-dev", path = "../../../primitives/externalities" } +sp-trie = { version = "4.0.0-dev", path = "../../../primitives/trie" } +sp-storage = { version = "4.0.0-dev", path = "../../../primitives/storage" } +sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../../db" } +futures = "0.3.16" +sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" } +sc-network = { version = "0.10.0-dev", path = "../../network" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sc-block-builder = { version = "0.8.0", path = "../../block-builder" } -sc-executor = { version = "0.8.0", path = "../../executor" } -sp-panic-handler = { version = "2.0.0", path = "../../../primitives/panic-handler" } -parity-scale-codec = "1.3.4" -sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } +sc-executor = { version = "0.10.0-dev", path = "../../executor" } +sp-panic-handler = { version = "3.0.0", path = "../../../primitives/panic-handler" } +parity-scale-codec = "2.0.0" +sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } diff --git a/client/service/test/src/client/db.rs b/client/service/test/src/client/db.rs index 36d49732246e5..5278c9a13a4d7 100644 --- a/client/service/test/src/client/db.rs +++ b/client/service/test/src/client/db.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_core::offchain::{OffchainStorage, storage::InMemOffchainStorage}; +use sp_core::offchain::{storage::InMemOffchainStorage, OffchainStorage}; use std::sync::Arc; type TestBackend = sc_client_api::in_mem::Backend; @@ -32,12 +32,13 @@ fn test_leaves_with_complex_block_tree() { fn test_blockchain_query_by_number_gets_canonical() { let backend = Arc::new(TestBackend::new()); - substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); + substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical( + backend, + ); } #[test] fn in_memory_offchain_storage() { - let mut storage = InMemOffchainStorage::default(); assert_eq!(storage.get(b"A", b"B"), None); assert_eq!(storage.get(b"B", b"A"), None); diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index f38aef008e11c..fb9566d208f76 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,53 +16,52 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use super::prepare_client_with_key_changes; +use parity_scale_codec::{Decode, Encode}; +use parking_lot::Mutex; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{ + backend::NewBlockState, + blockchain::Info, + cht, + in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, + AuxStore, Backend as ClientBackend, BlockBackend, BlockImportOperation, CallExecutor, + ChangesProof, ExecutionStrategy, FetchChecker, ProofProvider, ProvideChtRoots, + RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, + RemoteReadChildRequest, RemoteReadRequest, Storage, StorageProof, StorageProvider, +}; +use sc_executor::{NativeElseWasmExecutor, RuntimeVersion, WasmExecutionMethod}; use sc_light::{ - call_executor::{ - GenesisCallExecutor, - check_execution_proof, - check_execution_proof_with_make_header, - }, - fetcher::LightDataChecker, - blockchain::{BlockchainCache, Blockchain}, backend::{Backend, GenesisOrUnavailableState}, + blockchain::{Blockchain, BlockchainCache}, + call_executor::{check_execution_proof, GenesisCallExecutor}, + fetcher::LightDataChecker, }; -use std::sync::Arc; -use sp_runtime::{ - traits::{BlakeTwo256, HashFor, NumberFor}, - generic::BlockId, traits::{Block as _, Header as HeaderT}, Digest, -}; -use std::collections::HashMap; -use parking_lot::Mutex; -use substrate_test_runtime_client::{ - runtime::{Hash, Block, Header}, TestClient, ClientBlockImportExt, +use sp_api::{ProofRecorder, StorageTransactionCache}; +use sp_blockchain::{ + well_known_cache_keys, BlockStatus, CachedHeaderMetadata, Error as ClientError, HeaderBackend, + Result as ClientResult, }; -use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOverlayedChanges}; use sp_consensus::BlockOrigin; -use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; -use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; -use sc_client_api::{ - blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, - in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, ProvideChtRoots, - AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, - RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, - RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, BlockBackend, -}; +use sp_core::{testing::TaskExecutor, NativeOrEncoded, H256}; use sp_externalities::Extensions; -use sc_block_builder::BlockBuilderProvider; -use sp_blockchain::{ - BlockStatus, Result as ClientResult, Error as ClientError, CachedHeaderMetadata, - HeaderBackend, well_known_cache_keys +use sp_runtime::{ + generic::BlockId, + traits::{BlakeTwo256, Block as _, Header as HeaderT, NumberFor}, + Digest, Justifications, }; -use std::panic::UnwindSafe; -use std::cell::RefCell; -use sp_state_machine::{OverlayedChanges, ExecutionManager}; -use parity_scale_codec::{Decode, Encode}; -use super::prepare_client_with_key_changes; +use sp_state_machine::{ExecutionManager, OverlayedChanges}; +use std::{cell::RefCell, collections::HashMap, panic::UnwindSafe, sync::Arc}; use substrate_test_runtime_client::{ - AccountKeyring, runtime::{self, Extrinsic}, + runtime::{self, Block, Extrinsic, Hash, Header}, + AccountKeyring, ClientBlockImportExt, TestClient, }; -use sp_core::{blake2_256, ChangesTrieConfiguration, storage::{well_known_keys, StorageKey, ChildInfo}}; +use sp_core::{ + blake2_256, + storage::{well_known_keys, ChildInfo, StorageKey}, + ChangesTrieConfiguration, +}; use sp_state_machine::Backend as _; pub type DummyBlockchain = Blockchain; @@ -115,7 +114,8 @@ impl sp_blockchain::HeaderMetadata for DummyStorage { type Error = ClientError; fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { - self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header)) + self.header(BlockId::hash(hash))? + .map(|header| CachedHeaderMetadata::from(&header)) .ok_or(ClientError::UnknownBlock("header not found".to_owned())) } fn insert_header_metadata(&self, _hash: Hash, _metadata: CachedHeaderMetadata) {} @@ -127,9 +127,13 @@ impl AuxStore for DummyStorage { 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, _delete: D) -> ClientResult<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + _delete: D, + ) -> ClientResult<()> { for (k, v) in insert.into_iter() { self.aux_store.lock().insert(k.to_vec(), v.to_vec()); } @@ -182,9 +186,10 @@ impl ProvideChtRoots for DummyStorage { cht::block_to_cht_number(cht_size, block) .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) .cloned() - .ok_or_else(|| ClientError::Backend( - format!("Test error: CHT for block #{} not found", block) - ).into()) + .ok_or_else(|| { + ClientError::Backend(format!("Test error: CHT for block #{} not found", block)) + .into() + }) .map(Some) } } @@ -208,34 +213,34 @@ impl CallExecutor for DummyCallExecutor { } fn contextual_call< - 'a, - IB: Fn() -> ClientResult<()>, EM: Fn( Result, Self::Error>, - Result, Self::Error> + Result, Self::Error>, ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, - NC: FnOnce() -> Result + UnwindSafe, + NC: FnOnce() -> Result + UnwindSafe, >( &self, - _initialize_block_fn: IB, _at: &BlockId, _method: &str, _call_data: &[u8], _changes: &RefCell, - _offchain_changes: &RefCell, - _storage_transaction_cache: Option<&RefCell< - StorageTransactionCache< - Block, - >::State, - > - >>, - _initialize_block: InitializeBlock<'a, Block>, + _storage_transaction_cache: Option< + &RefCell< + StorageTransactionCache< + Block, + >::State, + >, + >, + >, _execution_manager: ExecutionManager, _native_call: Option, _proof_recorder: &Option>, _extensions: Option, - ) -> ClientResult> where ExecutionManager: Clone { + ) -> ClientResult> + where + ExecutionManager: Clone, + { unreachable!() } @@ -243,36 +248,32 @@ impl CallExecutor for DummyCallExecutor { unreachable!() } - fn prove_at_trie_state>>( + fn prove_execution( &self, - _trie_state: &sp_state_machine::TrieBackend>, - _overlay: &mut OverlayedChanges, - _method: &str, - _call_data: &[u8] + _: &BlockId, + _: &str, + _: &[u8], ) -> Result<(Vec, StorageProof), ClientError> { unreachable!() } - - fn native_runtime_version(&self) -> Option<&NativeVersion> { - unreachable!() - } } -fn local_executor() -> NativeExecutor { - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) +fn local_executor() -> NativeElseWasmExecutor +{ + NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } #[test] fn local_state_is_created_when_genesis_state_is_available() { let def = Default::default(); - let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); + let header0 = + substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); - let backend: Backend<_, BlakeTwo256> = Backend::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - ); + let backend: Backend<_, BlakeTwo256> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header0, None, None, NewBlockState::Final).unwrap(); - op.reset_storage(Default::default()).unwrap(); + op.set_block_data(header0, None, None, None, NewBlockState::Final).unwrap(); + op.set_genesis_state(Default::default(), true).unwrap(); backend.commit_operation(op).unwrap(); match backend.state_at(BlockId::Number(0)).unwrap() { @@ -283,9 +284,8 @@ fn local_state_is_created_when_genesis_state_is_available() { #[test] fn unavailable_state_is_created_when_genesis_state_is_unavailable() { - let backend: Backend<_, BlakeTwo256> = Backend::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - ); + let backend: Backend<_, BlakeTwo256> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); match backend.state_at(BlockId::Number(0)).unwrap() { GenesisOrUnavailableState::Unavailable => (), @@ -310,11 +310,8 @@ fn execution_proof_is_generated_and_checked() { let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); // 'fetch' execution proof from remote node - let (remote_result, remote_execution_proof) = remote_client.execution_proof( - &remote_block_id, - method, - &[] - ).unwrap(); + let (remote_result, remote_execution_proof) = + remote_client.execution_proof(&remote_block_id, method, &[]).unwrap(); // check remote execution proof locally let local_result = check_execution_proof::<_, _, BlakeTwo256>( @@ -328,41 +325,51 @@ fn execution_proof_is_generated_and_checked() { retry_count: None, }, remote_execution_proof, - ).unwrap(); + ) + .unwrap(); (remote_result, local_result) } - fn execute_with_proof_failure(remote_client: &TestClient, at: u64, method: &'static str) { + fn execute_with_proof_failure(remote_client: &TestClient, at: u64) { let remote_block_id = BlockId::Number(at); let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); // 'fetch' execution proof from remote node - let (_, remote_execution_proof) = remote_client.execution_proof( - &remote_block_id, - method, - &[] - ).unwrap(); + let (_, remote_execution_proof) = remote_client + .execution_proof( + &remote_block_id, + "Core_initialize_block", + &Header::new( + at, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + .encode(), + ) + .unwrap(); // check remote execution proof locally - let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( + let execution_result = check_execution_proof::<_, _, BlakeTwo256>( &local_executor(), Box::new(TaskExecutor::new()), &RemoteCallRequest { block: substrate_test_runtime_client::runtime::Hash::default(), - header: remote_header, - method: method.into(), - call_data: vec![], + header: remote_header.clone(), + method: "Core_initialize_block".into(), + call_data: Header::new( + at + 1, + Default::default(), + Default::default(), + remote_header.hash(), + remote_header.digest().clone(), // this makes next header wrong + ) + .encode(), retry_count: None, }, remote_execution_proof, - |header|

::new( - at + 1, - Default::default(), - Default::default(), - header.hash(), - header.digest().clone(), // this makes next header wrong - ), ); match execution_result { Err(sp_blockchain::Error::Execution(_)) => (), @@ -375,11 +382,12 @@ fn execution_proof_is_generated_and_checked() { for i in 1u32..3u32 { let mut digest = Digest::default(); digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); - remote_client.import_justified( + futures::executor::block_on(remote_client.import_justified( BlockOrigin::Own, remote_client.new_block(digest).unwrap().build().unwrap().block, - Default::default(), - ).unwrap(); + Justifications::from((*b"TEST", Default::default())), + )) + .unwrap(); } // check method that doesn't requires environment @@ -389,43 +397,38 @@ fn execution_proof_is_generated_and_checked() { let (remote, local) = execute(&remote_client, 2, "Core_version"); assert_eq!(remote, local); - // check method that requires environment - let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 1); - - let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 3); - // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set - execute_with_proof_failure(&remote_client, 2, "Core_version"); + execute_with_proof_failure(&remote_client, 2); // check that proof check doesn't panic even if proof is incorrect AND panic handler is set sp_panic_handler::set("TEST", "1.2.3"); - execute_with_proof_failure(&remote_client, 2, "Core_version"); + execute_with_proof_failure(&remote_client, 2); } #[test] fn code_is_executed_at_genesis_only() { let backend = Arc::new(InMemBackend::::new()); let def = H256::default(); - let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); + let header0 = + substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); let hash0 = header0.hash(); - let header1 = substrate_test_runtime_client::runtime::Header::new(1, def, def, hash0, Default::default()); + let header1 = + substrate_test_runtime_client::runtime::Header::new(1, def, def, hash0, Default::default()); let hash1 = header1.hash(); - backend.blockchain().insert(hash0, header0, None, None, NewBlockState::Final).unwrap(); - backend.blockchain().insert(hash1, header1, None, None, NewBlockState::Final).unwrap(); + backend + .blockchain() + .insert(hash0, header0, None, None, NewBlockState::Final) + .unwrap(); + backend + .blockchain() + .insert(hash1, header1, None, None, NewBlockState::Final) + .unwrap(); let genesis_executor = GenesisCallExecutor::new(backend, DummyCallExecutor); assert_eq!( - genesis_executor.call( - &BlockId::Number(0), - "test_method", - &[], - ExecutionStrategy::NativeElseWasm, - None, - ).unwrap(), + genesis_executor + .call(&BlockId::Number(0), "test_method", &[], ExecutionStrategy::NativeElseWasm, None,) + .unwrap(), vec![42], ); @@ -443,10 +446,8 @@ fn code_is_executed_at_genesis_only() { } } - type TestChecker = LightDataChecker< - NativeExecutor, - BlakeTwo256, + NativeElseWasmExecutor, Block, DummyStorage, >; @@ -457,27 +458,28 @@ fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { let remote_block_id = BlockId::Number(0); let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); + remote_block_header.state_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(::std::iter::empty()) + .0 + .into(); // 'fetch' read proof from remote node - let heap_pages = remote_client.storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) + let heap_pages = remote_client + .storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) .unwrap() - .and_then(|v| Decode::decode(&mut &v.0[..]).ok()).unwrap(); - let remote_read_proof = remote_client.read_proof( - &remote_block_id, - &mut std::iter::once(well_known_keys::HEAP_PAGES), - ).unwrap(); + .and_then(|v| Decode::decode(&mut &v.0[..]).ok()) + .unwrap(); + let remote_read_proof = remote_client + .read_proof(&remote_block_id, &mut std::iter::once(well_known_keys::HEAP_PAGES)) + .unwrap(); // check remote read proof locally let local_storage = InMemoryBlockchain::::new(); - local_storage.insert( - remote_block_hash, - remote_block_header.clone(), - None, - None, - NewBlockState::Final, - ).unwrap(); + local_storage + .insert(remote_block_hash, remote_block_header.clone(), None, None, NewBlockState::Final) + .unwrap(); let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), @@ -487,45 +489,39 @@ fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - use substrate_test_runtime_client::DefaultTestClientBuilderExt; - use substrate_test_runtime_client::TestClientBuilderExt; + use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; let child_info = ChildInfo::new_default(b"child1"); let child_info = &child_info; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() - .add_extra_child_storage( - child_info, - b"key1".to_vec(), - b"value1".to_vec(), - ).build(); + .add_extra_child_storage(child_info, b"key1".to_vec(), b"value1".to_vec()) + .build(); let remote_block_id = BlockId::Number(0); let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); + remote_block_header.state_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(::std::iter::empty()) + .0 + .into(); // 'fetch' child read proof from remote node - let child_value = remote_client.child_storage( - &remote_block_id, - child_info, - &StorageKey(b"key1".to_vec()), - ).unwrap().unwrap().0; + let child_value = remote_client + .child_storage(&remote_block_id, child_info, &StorageKey(b"key1".to_vec())) + .unwrap() + .unwrap() + .0; assert_eq!(b"value1"[..], child_value[..]); - let remote_read_proof = remote_client.read_child_proof( - &remote_block_id, - child_info, - &mut std::iter::once("key1".as_bytes()), - ).unwrap(); + let remote_read_proof = remote_client + .read_child_proof(&remote_block_id, child_info, &mut std::iter::once("key1".as_bytes())) + .unwrap(); // check locally let local_storage = InMemoryBlockchain::::new(); - local_storage.insert( - remote_block_hash, - remote_block_header.clone(), - None, - None, - NewBlockState::Final, - ).unwrap(); + local_storage + .insert(remote_block_hash, remote_block_header.clone(), None, None, NewBlockState::Final) + .unwrap(); let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), @@ -540,20 +536,23 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade let mut local_headers_hashes = Vec::new(); for i in 0..4 { let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); local_headers_hashes.push( - remote_client.block_hash(i + 1) - .map_err(|_| ClientError::Backend("TestError".into())) + remote_client + .block_hash(i + 1) + .map_err(|_| ClientError::Backend("TestError".into())), ); } // 'fetch' header proof from remote node let remote_block_id = BlockId::Number(1); - let (remote_block_header, remote_header_proof) = remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); + let (remote_block_header, remote_header_proof) = + remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); // check remote read proof locally let local_storage = InMemoryBlockchain::::new(); - let local_cht_root = cht::compute_root::(4, 0, local_headers_hashes).unwrap(); + let local_cht_root = + cht::compute_root::(4, 0, local_headers_hashes).unwrap(); if insert_cht { local_storage.insert_cht_root(1, local_cht_root); } @@ -566,7 +565,7 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade } fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { - use sp_trie::{TrieConfiguration, trie_types::Layout}; + use sp_trie::{trie_types::Layout, TrieConfiguration}; let iter = extrinsics.iter().map(Encode::encode); let extrinsics_root = Layout::::ordered_trie_root(iter); @@ -576,66 +575,106 @@ fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { #[test] fn storage_read_proof_is_generated_and_checked() { - let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); + let (local_checker, remote_block_header, remote_read_proof, heap_pages) = + prepare_for_read_proof_check(); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_read_proof( + &RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, + remote_read_proof + ) + .unwrap() + .remove(well_known_keys::HEAP_PAGES) + .unwrap() + .unwrap()[0], + heap_pages as u8 + ); } #[test] fn storage_child_read_proof_is_generated_and_checked() { let child_info = ChildInfo::new_default(&b"child1"[..]); - let ( - local_checker, - remote_block_header, - remote_read_proof, - result, - ) = prepare_for_read_child_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( - &RemoteReadChildRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - storage_key: child_info.prefixed_storage_key(), - keys: vec![b"key1".to_vec()], - retry_count: None, - }, - remote_read_proof - ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); + let (local_checker, remote_block_header, remote_read_proof, result) = + prepare_for_read_child_proof_check(); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_read_child_proof( + &RemoteReadChildRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + storage_key: child_info.prefixed_storage_key(), + keys: vec![b"key1".to_vec()], + retry_count: None, + }, + remote_read_proof + ) + .unwrap() + .remove(b"key1".as_ref()) + .unwrap() + .unwrap(), + result + ); } #[test] fn header_proof_is_generated_and_checked() { - let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); + let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .unwrap(), + remote_block_header + ); } #[test] fn check_header_proof_fails_if_cht_root_is_invalid() { - let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + let (local_checker, _, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: Default::default(), - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); + assert!((&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: Default::default(), + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .is_err()); } #[test] fn check_header_proof_fails_if_invalid_header_provided() { - let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); + assert!((&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .is_err()); } #[test] @@ -656,9 +695,9 @@ fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { // 'fetch' changes proof from remote node let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, None, &key - ).unwrap(); + let remote_proof = remote_client + .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, None, &key) + .unwrap(); // check proof on local client let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); @@ -677,18 +716,24 @@ fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { storage_key: None, retry_count: None, }; - let local_result = local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).unwrap(); + let local_result = local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, + ) + .unwrap(); // ..and ensure that result is the same as on remote node - match local_result == expected_result { - true => (), - false => panic!(format!("Failed test {}: local = {:?}, expected = {:?}", - index, local_result, expected_result)), + if local_result != expected_result { + panic!( + "Failed test {}: local = {:?}, expected = {:?}", + index, local_result, expected_result, + ); } } } @@ -708,12 +753,17 @@ fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, None, &dave, 4 - ).unwrap(); + let remote_proof = remote_client + .key_changes_proof_with_cht_size(b1, b4, b3, b4, None, &dave, 4) + .unwrap(); // prepare local checker, having a root of changes trie CHT#0 - let local_cht_root = cht::compute_root::(4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); + let local_cht_root = cht::compute_root::( + 4, + 0, + remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), + ) + .unwrap(); let mut local_storage = DummyStorage::new(); local_storage.changes_tries_cht_roots.insert(0, local_cht_root); let local_checker = TestChecker::new( @@ -738,12 +788,18 @@ fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { key: dave.0, retry_count: None, }; - let local_result = local_checker.check_changes_proof_with_cht_size(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }, 4).unwrap(); + let local_result = local_checker + .check_changes_proof_with_cht_size( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, + 4, + ) + .unwrap(); assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); } @@ -766,8 +822,9 @@ fn check_changes_proof_fails_if_proof_is_wrong() { // 'fetch' changes proof from remote node let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, None, &key).unwrap(); + let remote_proof = remote_client + .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, None, &key) + .unwrap(); let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); let config = ChangesTrieConfiguration::new(4, 2); @@ -787,34 +844,54 @@ fn check_changes_proof_fails_if_proof_is_wrong() { }; // check proof on local client using max from the future - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block + 1, - proof: remote_proof.proof.clone(), - roots: remote_proof.roots.clone(), - roots_proof: remote_proof.roots_proof.clone(), - }).is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block + 1, + proof: remote_proof.proof.clone(), + roots: remote_proof.roots.clone(), + roots_proof: remote_proof.roots_proof.clone(), + } + ) + .is_err()); // check proof on local client using broken proof - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + } + ) + .is_err()); // extra roots proofs are provided - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(begin - 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - }).is_err()); - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(end + 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - }).is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(begin - 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + } + ) + .is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(end + 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + } + ) + .is_err()); } #[test] @@ -823,7 +900,11 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); let local_cht_root = cht::compute_root::( - 4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); + 4, + 0, + remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), + ) + .unwrap(); let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); let dave = StorageKey(dave); @@ -834,9 +915,9 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, None, &dave, 4 - ).unwrap(); + let remote_proof = remote_client + .key_changes_proof_with_cht_size(b1, b4, b3, b4, None, &dave, 4) + .unwrap(); // fails when changes trie CHT is missing from the local db let local_checker = TestChecker::new( @@ -844,8 +925,9 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { local_executor(), Box::new(TaskExecutor::new()), ); - assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, - remote_proof.roots_proof.clone()).is_err()); + assert!(local_checker + .check_changes_tries_proof(4, &remote_proof.roots, remote_proof.roots_proof.clone()) + .is_err()); // fails when proof is broken let mut local_storage = DummyStorage::new(); @@ -855,17 +937,15 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { local_executor(), Box::new(TaskExecutor::new()), ); - let result = local_checker.check_changes_tries_proof( - 4, &remote_proof.roots, StorageProof::empty() - ); + let result = + local_checker.check_changes_tries_proof(4, &remote_proof.roots, StorageProof::empty()); assert!(result.is_err()); } #[test] fn check_body_proof_faulty() { - let header = header_with_computed_extrinsics_root( - vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])] - ); + let header = + header_with_computed_extrinsics_root(vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])]); let block = Block::new(header.clone(), Vec::new()); let local_checker = TestChecker::new( @@ -874,10 +954,7 @@ fn check_body_proof_faulty() { Box::new(TaskExecutor::new()), ); - let body_request = RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }; + let body_request = RemoteBodyRequest { header: header.clone(), retry_count: None }; assert!( local_checker.check_body_proof(&body_request, block.extrinsics).is_err(), @@ -898,10 +975,7 @@ fn check_body_proof_of_same_data_should_succeed() { Box::new(TaskExecutor::new()), ); - let body_request = RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }; + let body_request = RemoteBodyRequest { header: header.clone(), retry_count: None }; assert!(local_checker.check_body_proof(&body_request, block.extrinsics).is_ok()); } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 23d6a34297328..295e941f7ceb1 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,72 +16,86 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use parity_scale_codec::{Encode, Decode, Joiner}; -use sc_executor::native_executor_instance; -use sp_state_machine::{StateMachine, OverlayedChanges, ExecutionStrategy, InMemoryBackend}; -use substrate_test_runtime_client::{ - prelude::*, - runtime::{ - self, genesismap::{GenesisConfig, insert_genesis_block}, - Hash, Transfer, Block, BlockNumber, Header, Digest, RuntimeApi, - }, - AccountKeyring, Sr25519Keyring, TestClientBuilder, ClientBlockImportExt, - BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilderExt, ClientExt, +use futures::executor::block_on; +use hex_literal::hex; +use parity_scale_codec::{Decode, Encode, Joiner}; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{in_mem, BlockBackend, BlockchainEvents, StorageProvider}; +use sc_client_db::{ + Backend, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode, TransactionStorageMode, }; -use sc_client_api::{ - StorageProvider, BlockBackend, in_mem, BlockchainEvents, +use sc_consensus::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; -use sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode}; -use sc_block_builder::BlockBuilderProvider; -use sc_service::client::{self, Client, LocalCallExecutor, new_in_mem}; -use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, Header as HeaderT, +use sc_service::client::{self, new_in_mem, Client, LocalCallExecutor}; +use sp_api::ProvideRuntimeApi; +use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError, SelectChain}; +use sp_core::{blake2_256, testing::TaskExecutor, ChangesTrieConfiguration, H256}; +use sp_runtime::{ + generic::BlockId, + traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, + ConsensusEngineId, DigestItem, Justifications, +}; +use sp_state_machine::{ + backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine, +}; +use sp_storage::{ChildInfo, StorageKey}; +use sp_trie::{trie_types::Layout, TrieConfiguration}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; use substrate_test_runtime::TestAPI; -use sp_state_machine::backend::Backend as _; -use sp_api::{ProvideRuntimeApi, OffchainOverlayedChanges}; -use sp_core::{H256, ChangesTrieConfiguration, blake2_256, testing::TaskExecutor}; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use sp_consensus::{ - BlockOrigin, SelectChain, BlockImport, Error as ConsensusError, BlockCheckParams, ImportResult, - BlockStatus, BlockImportParams, ForkChoiceStrategy, +use substrate_test_runtime_client::{ + prelude::*, + runtime::{ + self, + genesismap::{insert_genesis_block, GenesisConfig}, + Block, BlockNumber, Digest, Hash, Header, RuntimeApi, Transfer, + }, + AccountKeyring, BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, }; -use sp_storage::StorageKey; -use sp_trie::{TrieConfiguration, trie_types::Layout}; -use sp_runtime::{generic::BlockId, DigestItem}; -use hex_literal::hex; -mod light; mod db; +mod light; -native_executor_instance!( - Executor, - substrate_test_runtime_client::runtime::api::dispatch, - substrate_test_runtime_client::runtime::native_version, -); +const TEST_ENGINE_ID: ConsensusEngineId = *b"TEST"; -fn executor() -> sc_executor::NativeExecutor { - sc_executor::NativeExecutor::new( - sc_executor::WasmExecutionMethod::Interpreted, - None, - 8, - ) +pub struct ExecutorDispatch; + +impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + type ExtendHostFunctions = (); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + substrate_test_runtime_client::runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + substrate_test_runtime_client::runtime::native_version() + } +} + +fn executor() -> sc_executor::NativeElseWasmExecutor { + sc_executor::NativeElseWasmExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) } pub fn prepare_client_with_key_changes() -> ( client::Client< substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, + substrate_test_runtime_client::ExecutorDispatch, Block, - RuntimeApi + RuntimeApi, >, Vec, Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, ) { // prepare block structure let blocks_transfers = vec![ - vec![(AccountKeyring::Alice, AccountKeyring::Dave), (AccountKeyring::Bob, AccountKeyring::Dave)], + vec![ + (AccountKeyring::Alice, AccountKeyring::Dave), + (AccountKeyring::Bob, AccountKeyring::Dave), + ], vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], vec![], vec![(AccountKeyring::Alice, AccountKeyring::Dave)], @@ -95,18 +109,22 @@ pub fn prepare_client_with_key_changes() -> ( for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { let mut builder = remote_client.new_block(Default::default()).unwrap(); for (from, to) in block_transfers { - builder.push_transfer(Transfer { - from: from.into(), - to: to.into(), - amount: 1, - nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(), - }).unwrap(); + builder + .push_transfer(Transfer { + from: from.into(), + to: to.into(), + amount: 1, + nonce: *nonces.entry(from).and_modify(|n| *n = *n + 1).or_default(), + }) + .unwrap(); } let block = builder.build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); + block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); - let trie_root = header.digest().log(DigestItem::as_changes_trie_root) + let trie_root = header + .digest() + .log(DigestItem::as_changes_trie_root) .map(|root| H256::from_slice(root.as_ref())) .unwrap(); local_roots.push(trie_root); @@ -115,10 +133,12 @@ pub fn prepare_client_with_key_changes() -> ( // prepare test cases let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec(); let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); - let charlie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); + let charlie = + blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); - let ferdie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); + let ferdie = + blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); let test_cases = vec![ (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), (1, 3, alice.clone(), vec![(1, 0)]), @@ -161,8 +181,7 @@ fn construct_block( }; let hash = header.hash(); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); - let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); let task_executor = Box::new(TaskExecutor::new()); @@ -170,48 +189,45 @@ fn construct_block( backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "Core_initialize_block", &header.encode(), Default::default(), &runtime_code, task_executor.clone() as Box<_>, - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); for tx in transactions.iter() { StateMachine::new( backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "BlockBuilder_apply_extrinsic", &tx.encode(), Default::default(), &runtime_code, task_executor.clone() as Box<_>, - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); } let ret_data = StateMachine::new( backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "BlockBuilder_finalize_block", &[], Default::default(), &runtime_code, task_executor.clone() as Box<_>, - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); header = Header::decode(&mut &ret_data[..]).unwrap(); (vec![].and(&Block { header, extrinsics: transactions }), hash) @@ -241,7 +257,8 @@ fn construct_genesis_should_work_with_native() { 1000, None, Default::default(), - ).genesis_map(); + ) + .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); let backend = InMemoryBackend::from(storage); @@ -250,22 +267,20 @@ fn construct_genesis_should_work_with_native() { let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let _ = StateMachine::new( &backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "Core_execute_block", &b1data, Default::default(), &runtime_code, TaskExecutor::new(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); } #[test] @@ -277,7 +292,8 @@ fn construct_genesis_should_work_with_wasm() { 1000, None, Default::default(), - ).genesis_map(); + ) + .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); let backend = InMemoryBackend::from(storage); @@ -286,22 +302,20 @@ fn construct_genesis_should_work_with_wasm() { let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let _ = StateMachine::new( &backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "Core_execute_block", &b1data, Default::default(), &runtime_code, TaskExecutor::new(), - ).execute( - ExecutionStrategy::AlwaysWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::AlwaysWasm) + .unwrap(); } #[test] @@ -313,7 +327,8 @@ fn construct_genesis_with_bad_transaction_should_panic() { 68, None, Default::default(), - ).genesis_map(); + ) + .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); let backend = InMemoryBackend::from(storage); @@ -322,42 +337,44 @@ fn construct_genesis_with_bad_transaction_should_panic() { let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let r = StateMachine::new( &backend, sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, - &mut offchain_overlay, &executor(), "Core_execute_block", &b1data, Default::default(), &runtime_code, TaskExecutor::new(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ); + ) + .execute(ExecutionStrategy::NativeElseWasm); assert!(r.is_err()); } - #[test] fn client_initializes_from_genesis_ok() { let client = substrate_test_runtime_client::new(); assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Alice.into(), - ).unwrap(), + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ) + .unwrap(), 1000 ); assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Ferdie.into(), - ).unwrap(), + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into(), + ) + .unwrap(), 0 ); } @@ -368,7 +385,7 @@ fn block_builder_works_with_no_transactions() { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(client.chain_info().best_number, 1); } @@ -379,15 +396,17 @@ fn block_builder_works_with_transactions() { let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(client.chain_info().best_number, 1); assert_ne!( @@ -395,17 +414,23 @@ fn block_builder_works_with_transactions() { client.state_at(&BlockId::Number(0)).unwrap().pairs() ); assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Alice.into(), - ).unwrap(), + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ) + .unwrap(), 958 ); assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Ferdie.into(), - ).unwrap(), + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into(), + ) + .unwrap(), 42 ); } @@ -416,24 +441,26 @@ fn block_builder_does_not_include_invalid() { let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); - assert!( - builder.push_transfer(Transfer { + assert!(builder + .push_transfer(Transfer { from: AccountKeyring::Eve.into(), to: AccountKeyring::Alice.into(), amount: 42, nonce: 0, - }).is_err() - ); + }) + .is_err()); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(client.chain_info().best_number, 1); assert_ne!( @@ -454,7 +481,9 @@ fn best_containing_with_genesis_block() { assert_eq!( genesis_hash.clone(), - longest_chain_select.finality_target(genesis_hash.clone(), None).unwrap().unwrap() + block_on(longest_chain_select.finality_target(genesis_hash.clone(), None)) + .unwrap() + .unwrap(), ); } @@ -469,7 +498,8 @@ fn best_containing_with_hash_not_found() { assert_eq!( None, - longest_chain_select.finality_target(uninserted_block.hash().clone(), None).unwrap() + block_on(longest_chain_select.finality_target(uninserted_block.hash().clone(), None)) + .unwrap(), ); } @@ -481,11 +511,11 @@ fn uncles_with_only_ancestors() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let v: Vec = Vec::new(); assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); } @@ -495,109 +525,115 @@ fn uncles_with_multiple_forks() { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // B2 -> C3 + // A1 -> D2 let mut client = substrate_test_runtime_client::new(); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -629,17 +665,32 @@ fn best_containing_on_longest_chain_with_single_chain_3_blocks() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; - assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, None).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target(a1.hash(), None).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target(a2.hash(), None).unwrap().unwrap()); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), None)) + .unwrap() + .unwrap() + ); } #[test] @@ -647,114 +698,120 @@ fn best_containing_on_longest_chain_with_multiple_forks() { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // B2 -> C3 + // A1 -> D2 let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); - // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + // B2 -> C3 + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); assert_eq!(client.chain_info().best_hash, a5.hash()); let genesis_hash = client.chain_info().genesis_hash; - let leaves = longest_chain_select.leaves().unwrap(); + let leaves = block_on(longest_chain_select.leaves()).unwrap(); assert!(leaves.contains(&a5.hash())); assert!(leaves.contains(&b4.hash())); @@ -763,205 +820,342 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert_eq!(leaves.len(), 4); // search without restriction - - assert_eq!(a5.hash(), longest_chain_select.finality_target( - genesis_hash, None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a1.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a2.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a3.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a4.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a5.hash(), None).unwrap().unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), None).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), None).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), None).unwrap().unwrap()); - + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a3.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a4.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a5.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b3.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b4.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + c3.hash(), + block_on(longest_chain_select.finality_target(c3.hash(), None)) + .unwrap() + .unwrap() + ); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), None)) + .unwrap() + .unwrap() + ); // search only blocks with number <= 5. equivalent to without restriction for this scenario - - assert_eq!(a5.hash(), longest_chain_select.finality_target( - genesis_hash, Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a1.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a4.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a5.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(5)).unwrap().unwrap()); - + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a3.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a4.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + block_on(longest_chain_select.finality_target(a5.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b3.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b4.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + c3.hash(), + block_on(longest_chain_select.finality_target(c3.hash(), Some(5))) + .unwrap() + .unwrap() + ); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), Some(5))) + .unwrap() + .unwrap() + ); // search only blocks with number <= 4 - - assert_eq!(a4.hash(), longest_chain_select.finality_target( - genesis_hash, Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a1.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a4.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(4)).unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(4)).unwrap().unwrap()); - + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(a3.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + block_on(longest_chain_select.finality_target(a4.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(4))).unwrap()); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b3.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + block_on(longest_chain_select.finality_target(b4.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + c3.hash(), + block_on(longest_chain_select.finality_target(c3.hash(), Some(4))) + .unwrap() + .unwrap() + ); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), Some(4))) + .unwrap() + .unwrap() + ); // search only blocks with number <= 3 - - assert_eq!(a3.hash(), longest_chain_select.finality_target( - genesis_hash, Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a1.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(3)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(3)).unwrap()); - - assert_eq!(b3.hash(), longest_chain_select.finality_target( - b2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(b3.hash(), longest_chain_select.finality_target( - b3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(3)).unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(3)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(3)).unwrap().unwrap()); - + assert_eq!( + a3.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + block_on(longest_chain_select.finality_target(a3.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(3))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(3))).unwrap()); + assert_eq!( + b3.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + b3.hash(), + block_on(longest_chain_select.finality_target(b3.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(3))).unwrap()); + assert_eq!( + c3.hash(), + block_on(longest_chain_select.finality_target(c3.hash(), Some(3))) + .unwrap() + .unwrap() + ); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), Some(3))) + .unwrap() + .unwrap() + ); // search only blocks with number <= 2 - - assert_eq!(a2.hash(), longest_chain_select.finality_target( - genesis_hash, Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target( - a1.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target( - a2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(2)).unwrap()); - - assert_eq!(b2.hash(), longest_chain_select.finality_target( - b2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(2)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash(), Some(2)).unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(2)).unwrap().unwrap()); - + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(2))) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(2))) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(a2.hash(), Some(2))) + .unwrap() + .unwrap() + ); + assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(2))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(2))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(2))).unwrap()); + assert_eq!( + b2.hash(), + block_on(longest_chain_select.finality_target(b2.hash(), Some(2))) + .unwrap() + .unwrap() + ); + assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(2))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(2))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(c3.hash(), Some(2))).unwrap()); + assert_eq!( + d2.hash(), + block_on(longest_chain_select.finality_target(d2.hash(), Some(2))) + .unwrap() + .unwrap() + ); // search only blocks with number <= 1 + assert_eq!( + a1.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(1))) + .unwrap() + .unwrap() + ); + assert_eq!( + a1.hash(), + block_on(longest_chain_select.finality_target(a1.hash(), Some(1))) + .unwrap() + .unwrap() + ); + assert_eq!(None, block_on(longest_chain_select.finality_target(a2.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(1))).unwrap()); - assert_eq!(a1.hash(), longest_chain_select.finality_target( - genesis_hash, Some(1)).unwrap().unwrap()); - assert_eq!(a1.hash(), longest_chain_select.finality_target( - a1.hash(), Some(1)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a2.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - b2.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - d2.hash(), Some(1)).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b2.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(c3.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(d2.hash(), Some(1))).unwrap()); // search only blocks with number <= 0 - - assert_eq!(genesis_hash, longest_chain_select.finality_target( - genesis_hash, Some(0)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a1.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a2.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - b2.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash().clone(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - d2.hash().clone(), Some(0)).unwrap()); + assert_eq!( + genesis_hash, + block_on(longest_chain_select.finality_target(genesis_hash, Some(0))) + .unwrap() + .unwrap() + ); + assert_eq!(None, block_on(longest_chain_select.finality_target(a1.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a2.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b2.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(0))).unwrap()); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(c3.hash().clone(), Some(0))).unwrap(), + ); + assert_eq!( + None, + block_on(longest_chain_select.finality_target(d2.hash().clone(), Some(0))).unwrap(), + ); } #[test] @@ -973,15 +1167,20 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; - assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap()); + assert_eq!( + a2.hash(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(10))) + .unwrap() + .unwrap(), + ); } #[test] @@ -990,16 +1189,13 @@ fn key_changes_works() { for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { let end = client.block_hash(end).unwrap().unwrap(); - let actual_result = client.key_changes( - begin, - BlockId::Hash(end), - None, - &StorageKey(key), - ).unwrap(); - match actual_result == expected_result { - true => (), - false => panic!(format!("Failed test {}: actual = {:?}, expected = {:?}", - index, actual_result, expected_result)), + let actual_result = + client.key_changes(begin, BlockId::Hash(end), None, &StorageKey(key)).unwrap(); + if actual_result != expected_result { + panic!( + "Failed test {}: actual = {:?}, expected = {:?}", + index, actual_result, expected_result, + ); } } } @@ -1010,44 +1206,35 @@ fn import_with_justification() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); + client.finalize_block(BlockId::hash(a2.hash()), None).unwrap(); // A2 -> A3 - let justification = vec![1, 2, 3]; - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap(); + let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone())).unwrap(); - assert_eq!( - client.chain_info().finalized_hash, - a3.hash(), - ); + assert_eq!(client.chain_info().finalized_hash, a3.hash()); - assert_eq!( - client.justification(&BlockId::Hash(a3.hash())).unwrap(), - Some(justification), - ); + assert_eq!(client.justifications(&BlockId::Hash(a3.hash())).unwrap(), Some(justification)); - assert_eq!( - client.justification(&BlockId::Hash(a1.hash())).unwrap(), - None, - ); + assert_eq!(client.justifications(&BlockId::Hash(a1.hash())).unwrap(), None); - assert_eq!( - client.justification(&BlockId::Hash(a2.hash())).unwrap(), - None, - ); + assert_eq!(client.justifications(&BlockId::Hash(a2.hash())).unwrap(), None); } #[test] @@ -1057,54 +1244,44 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { // G -> A1 -> A2 // \ // -> B1 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); // create but don't import B1 just yet let b1 = b1.build().unwrap().block; // A2 is the current best since it's the longest chain - assert_eq!( - client.chain_info().best_hash, - a2.hash(), - ); + assert_eq!(client.chain_info().best_hash, a2.hash()); // importing B1 as finalized should trigger a re-org and set it as new best - let justification = vec![1, 2, 3]; - client.import_justified(BlockOrigin::Own, b1.clone(), justification).unwrap(); + let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); + block_on(client.import_justified(BlockOrigin::Own, b1.clone(), justification)).unwrap(); - assert_eq!( - client.chain_info().best_hash, - b1.hash(), - ); + assert_eq!(client.chain_info().best_hash, b1.hash()); - assert_eq!( - client.chain_info().finalized_hash, - b1.hash(), - ); + assert_eq!(client.chain_info().finalized_hash, b1.hash()); } #[test] @@ -1114,84 +1291,70 @@ fn finalizing_diverged_block_should_trigger_reorg() { // G -> A1 -> A2 // \ // -> B1 -> B2 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); - let b2 = client.new_block_at( - &BlockId::Hash(b1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + let b2 = client + .new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // A2 is the current best since it's the longest chain - assert_eq!( - client.chain_info().best_hash, - a2.hash(), - ); + assert_eq!(client.chain_info().best_hash, a2.hash()); // we finalize block B1 which is on a different branch from current best // which should trigger a re-org. ClientExt::finalize_block(&client, BlockId::Hash(b1.hash()), None).unwrap(); // B1 should now be the latest finalized - assert_eq!( - client.chain_info().finalized_hash, - b1.hash(), - ); + assert_eq!(client.chain_info().finalized_hash, b1.hash()); // and B1 should be the new best block (`finalize_block` as no way of // knowing about B2) - assert_eq!( - client.chain_info().best_hash, - b1.hash(), - ); + assert_eq!(client.chain_info().best_hash, b1.hash()); // `SelectChain` should report B2 as best block though - assert_eq!( - select_chain.best_chain().unwrap().hash(), - b2.hash(), - ); + assert_eq!(block_on(select_chain.best_chain()).unwrap().hash(), b2.hash()); // after we build B3 on top of B2 and import it // it should be the new best block, - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); - assert_eq!( - client.chain_info().best_hash, - b3.hash(), - ); + assert_eq!(client.chain_info().best_hash, b3.hash()); } #[test] @@ -1209,58 +1372,56 @@ fn state_reverted_on_reorg() { sp_tracing::try_init_simple(); let mut client = substrate_test_runtime_client::new(); - let current_balance = |client: &substrate_test_runtime_client::TestClient| - client.runtime_api().balance_of( - &BlockId::number(client.chain_info().best_number), AccountKeyring::Alice.into(), - ).unwrap(); + let current_balance = |client: &substrate_test_runtime_client::TestClient| { + client + .runtime_api() + .balance_of( + &BlockId::number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ) + .unwrap() + }; // G -> A1 -> A2 // \ // -> B1 - let mut a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut a1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); a1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 10, nonce: 0, - }).unwrap(); + }) + .unwrap(); let a1 = a1.build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Ferdie.into(), amount: 50, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; // Reorg to B1 - client.import_as_best(BlockOrigin::Own, b1.clone()).unwrap(); + block_on(client.import_as_best(BlockOrigin::Own, b1.clone())).unwrap(); assert_eq!(950, current_balance(&client)); - let mut a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); a2.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 10, nonce: 1, - }).unwrap(); + }) + .unwrap(); let a2 = a2.build().unwrap().block; // Re-org to A2 - client.import_as_best(BlockOrigin::Own, a2).unwrap(); + block_on(client.import_as_best(BlockOrigin::Own, a2)).unwrap(); assert_eq!(980, current_balance(&client)); } @@ -1271,18 +1432,20 @@ fn doesnt_import_blocks_that_revert_finality() { // we need to run with archive pruning to avoid pruning non-canonical // states - let backend = Arc::new(Backend::new( - DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, - pruning: PruningMode::ArchiveAll, - source: DatabaseSettingsSrc::RocksDb { - path: tmp.path().into(), - cache_size: 1024, + let backend = Arc::new( + Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, - }, - u64::max_value(), - ).unwrap()); + u64::MAX, + ) + .unwrap(), + ); let mut client = TestClientBuilder::with_backend(backend).build(); @@ -1292,19 +1455,21 @@ fn doesnt_import_blocks_that_revert_finality() { // \ // -> B1 -> B2 -> B3 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1314,32 +1479,41 @@ fn doesnt_import_blocks_that_revert_finality() { to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); - let b2 = client.new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + let b2 = client + .new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // prepare B3 before we finalize A2, because otherwise we won't be able to // read changes trie configuration after A2 is finalized - let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; // we will finalize A2 which should make it impossible to import a new // B3 at the same height but that doesn't include it ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); - let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); + let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::NotInFinalizedChain.to_string() + sp_blockchain::Error::RuntimeApiError(sp_api::ApiError::Application(Box::new( + sp_blockchain::Error::NotInFinalizedChain, + ))) + .to_string(), ); - assert_eq!( - import_err.to_string(), - expected_err.to_string(), - ); + assert_eq!(import_err.to_string(), expected_err.to_string()); // adding a C1 block which is lower than the last finalized should also // fail (with a cheaper check that doesn't require checking ancestry). @@ -1351,21 +1525,17 @@ fn doesnt_import_blocks_that_revert_finality() { to: AccountKeyring::Ferdie.into(), amount: 2, nonce: 0, - }).unwrap(); + }) + .unwrap(); let c1 = c1.build().unwrap().block; - let import_err = client.import(BlockOrigin::Own, c1).err().unwrap(); - let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::NotInFinalizedChain.to_string() - ); + let import_err = block_on(client.import(BlockOrigin::Own, c1)).err().unwrap(); + let expected_err = + ConsensusError::ClientImport(sp_blockchain::Error::NotInFinalizedChain.to_string()); - assert_eq!( - import_err.to_string(), - expected_err.to_string(), - ); + assert_eq!(import_err.to_string(), expected_err.to_string()); } - #[test] fn respects_block_rules() { fn run_test( @@ -1377,28 +1547,30 @@ fn respects_block_rules() { TestClientBuilder::new().build() } else { TestClientBuilder::new() - .set_block_rules( - Some(fork_rules.clone()), - Some(known_bad.clone()), - ) + .set_block_rules(Some(fork_rules.clone()), Some(known_bad.clone())) .build() }; - let block_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap().build().unwrap().block; + let block_ok = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; let params = BlockCheckParams { hash: block_ok.hash().clone(), number: 0, parent_hash: block_ok.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // this is 0x0d6d6612a10485370d9e085aeea7ec427fb3f34d961c6a816cdbe5cde2278864 - let mut block_not_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap(); + let mut block_not_ok = + client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![1])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; @@ -1407,20 +1579,21 @@ fn respects_block_rules() { number: 0, parent_hash: block_not_ok.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; if record_only { known_bad.insert(block_not_ok.hash()); } else { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::KnownBad); } // Now going to the fork - client.import_as_final(BlockOrigin::Own, block_ok).unwrap(); + block_on(client.import_as_final(BlockOrigin::Own, block_ok)).unwrap(); // And check good fork - let mut block_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) - .unwrap(); + let mut block_ok = + client.new_block_at(&BlockId::Number(1), Default::default(), false).unwrap(); block_ok.push_storage_change(vec![0], Some(vec![2])).unwrap(); let block_ok = block_ok.build().unwrap().block; @@ -1429,16 +1602,17 @@ fn respects_block_rules() { number: 1, parent_hash: block_ok.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; if record_only { fork_rules.push((1, block_ok.hash().clone())); } - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // And now try bad fork - let mut block_not_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) - .unwrap(); + let mut block_not_ok = + client.new_block_at(&BlockId::Number(1), Default::default(), false).unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![3])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; @@ -1447,11 +1621,12 @@ fn respects_block_rules() { number: 1, parent_hash: block_not_ok.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; if !record_only { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::KnownBad); } } @@ -1472,23 +1647,29 @@ fn returns_status_for_pruned_blocks() { // set to prune after 1 block // states - let backend = Arc::new(Backend::new( - DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, - pruning: PruningMode::keep_blocks(1), - source: DatabaseSettingsSrc::RocksDb { - path: tmp.path().into(), - cache_size: 1024, + let backend = Arc::new( + Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + state_pruning: PruningMode::keep_blocks(1), + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, - }, - u64::max_value(), - ).unwrap()); + u64::MAX, + ) + .unwrap(), + ); let mut client = TestClientBuilder::with_backend(backend).build(); - let a1 = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap().build().unwrap().block; + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1498,7 +1679,8 @@ fn returns_status_for_pruned_blocks() { to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; let check_block_a1 = BlockCheckParams { @@ -1506,66 +1688,129 @@ fn returns_status_for_pruned_blocks() { number: 0, parent_hash: a1.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::imported(false)); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::Unknown); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::imported(false), + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::Unknown, + ); - client.import_as_final(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import_as_final(BlockOrigin::Own, a1.clone())).unwrap(); - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainWithState); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::InChainWithState, + ); - let a2 = client.new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - client.import_as_final(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import_as_final(BlockOrigin::Own, a2.clone())).unwrap(); let check_block_a2 = BlockCheckParams { hash: a2.hash().clone(), number: 1, parent_hash: a1.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainWithState); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::InChainPruned, + ); + assert_eq!( + block_on(client.check_block(check_block_a2.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), + BlockStatus::InChainWithState, + ); - let a3 = client.new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; - client.import_as_final(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import_as_final(BlockOrigin::Own, a3.clone())).unwrap(); let check_block_a3 = BlockCheckParams { hash: a3.hash().clone(), number: 2, parent_hash: a2.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; // a1 and a2 are both pruned at this point - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a3.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), BlockStatus::InChainWithState); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::InChainPruned, + ); + assert_eq!( + block_on(client.check_block(check_block_a2.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), + BlockStatus::InChainPruned, + ); + assert_eq!( + block_on(client.check_block(check_block_a3.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), + BlockStatus::InChainWithState, + ); let mut check_block_b1 = BlockCheckParams { hash: b1.hash().clone(), number: 0, parent_hash: b1.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::MissingState); + assert_eq!( + block_on(client.check_block(check_block_b1.clone())).unwrap(), + ImportResult::MissingState, + ); check_block_b1.allow_missing_state = true; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::imported(false)); + assert_eq!( + block_on(client.check_block(check_block_b1.clone())).unwrap(), + ImportResult::imported(false), + ); check_block_b1.parent_hash = H256::random(); - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::UnknownParent); + assert_eq!( + block_on(client.check_block(check_block_b1.clone())).unwrap(), + ImportResult::UnknownParent, + ); } #[test] @@ -1575,7 +1820,8 @@ fn imports_blocks_with_changes_tries_config_change() { .changes_trie_config(Some(ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2, - })).build(); + })) + .build(); // =================================================================== // blocks 1,2,3,4,5,6,7,8,9,10 are empty @@ -1587,78 +1833,123 @@ fn imports_blocks_with_changes_tries_config_change() { // blocks 24,25 are changing the key // block 26 is empty // block 27 changes the key - // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to 3^1 + // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to + // `3^1` // =================================================================== // block 29 is empty // block 30 changes the key // block 31 is L1 digest that covers this change // =================================================================== (1..11).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (11..12).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (12..23).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (23..24).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 5, - digest_levels: 1, - })).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 5, + digest_levels: 1, + })) + .unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (24..26).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (26..27).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (27..28).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (28..29).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 3, - digest_levels: 1, - })).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 3, + digest_levels: 1, + })) + .unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (29..30).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (30..31).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (31..32).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); // now check that configuration cache works @@ -1670,27 +1961,67 @@ fn imports_blocks_with_changes_tries_config_change() { #[test] fn storage_keys_iter_prefix_and_start_key_works() { - let client = substrate_test_runtime_client::new(); - + let child_info = ChildInfo::new_default(b"child"); + let client = TestClientBuilder::new() + .add_extra_child_storage(&child_info, b"first".to_vec(), vec![0u8; 32]) + .add_extra_child_storage(&child_info, b"second".to_vec(), vec![0u8; 32]) + .add_extra_child_storage(&child_info, b"third".to_vec(), vec![0u8; 32]) + .build(); + + let child_root = b":child_storage:default:child".to_vec(); let prefix = StorageKey(hex!("3a").to_vec()); + let child_prefix = StorageKey(b"sec".to_vec()); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + let res: Vec<_> = client + .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() .map(|x| x.0) .collect(); - assert_eq!(res, [hex!("3a636f6465").to_vec(), hex!("3a686561707061676573").to_vec()]); + assert_eq!( + res, + [child_root.clone(), hex!("3a636f6465").to_vec(), hex!("3a686561707061676573").to_vec(),] + ); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey(hex!("3a636f6465").to_vec())), + ) .unwrap() .map(|x| x.0) .collect(); assert_eq!(res, [hex!("3a686561707061676573").to_vec()]); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a686561707061676573").to_vec()))) + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey(hex!("3a686561707061676573").to_vec())), + ) .unwrap() .map(|x| x.0) .collect(); assert_eq!(res, Vec::>::new()); + + let res: Vec<_> = client + .child_storage_keys_iter(&BlockId::Number(0), child_info.clone(), Some(&child_prefix), None) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, [b"second".to_vec()]); + + let res: Vec<_> = client + .child_storage_keys_iter( + &BlockId::Number(0), + child_info, + None, + Some(&StorageKey(b"second".to_vec())), + ) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, [b"third".to_vec()]); } #[test] @@ -1699,30 +2030,52 @@ fn storage_keys_iter_works() { let prefix = StorageKey(hex!("").to_vec()); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + let res: Vec<_> = client + .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() .take(2) .map(|x| x.0) .collect(); - assert_eq!(res, [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()]); + assert_eq!( + res, + [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()] + ); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey(hex!("3a636f6465").to_vec())), + ) .unwrap() .take(3) .map(|x| x.0) .collect(); - assert_eq!(res, [ - hex!("3a686561707061676573").to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), - ]); + assert_eq!( + res, + [ + hex!("3a686561707061676573").to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + ] + ); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec()))) + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey( + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + )), + ) .unwrap() .take(1) .map(|x| x.0) .collect(); - assert_eq!(res, [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()]); + assert_eq!( + res, + [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()] + ); } #[test] @@ -1732,25 +2085,29 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = - new_in_mem::< - _, - substrate_test_runtime_client::runtime::Block, - _, - substrate_test_runtime_client::runtime::RuntimeApi, - >( - substrate_test_runtime_client::new_native_executor(), - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - None, - None, - Box::new(TaskExecutor::new()), - Default::default(), - ) - .unwrap(); + let mut client = new_in_mem::< + _, + substrate_test_runtime_client::runtime::Block, + _, + substrate_test_runtime_client::runtime::RuntimeApi, + >( + substrate_test_runtime_client::new_native_executor(), + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + None, + None, + None, + Box::new(TaskExecutor::new()), + Default::default(), + ) + .unwrap(); type TestClient = Client< in_mem::Backend, - LocalCallExecutor, sc_executor::NativeExecutor>, + LocalCallExecutor< + Block, + in_mem::Backend, + sc_executor::NativeElseWasmExecutor, + >, substrate_test_runtime_client::runtime::Block, substrate_test_runtime_client::runtime::RuntimeApi, >; @@ -1762,18 +2119,13 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // for some reason I can't seem to use `ClientBlockImportExt` let bake_and_import_block = |client: &mut TestClient, origin| { - let block = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - client.import_block(import, Default::default()).unwrap(); + block_on(client.import_block(import, Default::default())).unwrap(); }; // after importing a block we should still have 4 notification sinks @@ -1807,47 +2159,46 @@ fn cleans_up_closed_notification_sinks_on_block_import() { fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifications() { let mut client = TestClientBuilder::new().build(); - let mut notification_stream = futures::executor::block_on_stream( - client.import_notification_stream() - ); + let mut notification_stream = + futures::executor::block_on_stream(client.import_notification_stream()); - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::NetworkInitialSync, a1.clone()).unwrap(); + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::NetworkInitialSync, a1.clone())).unwrap(); - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::NetworkInitialSync, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::NetworkInitialSync, a2.clone())).unwrap(); - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::NetworkInitialSync, b1.clone()).unwrap(); + block_on(client.import(BlockOrigin::NetworkInitialSync, b1.clone())).unwrap(); - let b2 = client.new_block_at( - &BlockId::Hash(b1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b2 = client + .new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; // Should trigger a notification because we reorg - client.import_as_best(BlockOrigin::NetworkInitialSync, b2.clone()).unwrap(); + block_on(client.import_as_best(BlockOrigin::NetworkInitialSync, b2.clone())).unwrap(); // There should be one notification let notification = notification_stream.next().unwrap(); diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 6d8b4decb18c1..8000c536cdf93 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,37 +18,26 @@ //! Service integration test utils. -use std::iter; -use std::sync::Arc; -use std::net::Ipv4Addr; -use std::pin::Pin; -use std::time::Duration; -use log::{info, debug}; -use futures01::{Future, Stream, Poll}; -use futures::{FutureExt as _, TryFutureExt as _}; -use tempfile::TempDir; -use tokio::{runtime::Runtime, prelude::FutureExt}; -use tokio::timer::Interval; +use futures::{task::Poll, Future, TryFutureExt as _}; +use log::{debug, info}; +use parking_lot::Mutex; +use sc_client_api::{Backend, CallExecutor}; +use sc_network::{ + config::{NetworkConfiguration, TransportConfig}, + multiaddr, Multiaddr, +}; use sc_service::{ - TaskManager, - SpawnTaskHandle, - GenericChainSpec, - ChainSpecExtension, - Configuration, - config::{BasePath, DatabaseConfig, KeystoreConfig}, - RuntimeGenesis, - Role, - Error, - TaskExecutor, client::Client, + config::{BasePath, DatabaseSource, KeystoreConfig}, + ChainSpecExtension, Configuration, Error, GenericChainSpec, KeepBlocks, Role, RuntimeGenesis, + SpawnTaskHandle, TaskManager, TransactionStorageMode, }; +use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderBackend; -use sc_network::{multiaddr, Multiaddr}; -use sc_network::config::{NetworkConfiguration, TransportConfig}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use sp_transaction_pool::TransactionPool; -use sc_client_api::{Backend, CallExecutor}; -use parking_lot::Mutex; +use std::{iter, net::Ipv4Addr, pin::Pin, sync::Arc, task::Context, time::Duration}; +use tempfile::TempDir; +use tokio::{runtime::Runtime, time}; #[cfg(test)] mod client; @@ -66,7 +55,19 @@ struct TestNet { nodes: usize, } -pub trait TestNetNode: Clone + Future + Send + 'static { +impl Drop for TestNet { + fn drop(&mut self) { + // Drop the nodes before dropping the runtime, as the runtime otherwise waits for all + // futures to be ended and we run into a dead lock. + self.full_nodes.drain(..); + self.light_nodes.drain(..); + self.authority_nodes.drain(..); + } +} + +pub trait TestNetNode: + Clone + Future> + Send + 'static +{ type Block: BlockT; type Backend: Backend; type Executor: CallExecutor + Send + Sync; @@ -75,7 +76,9 @@ pub trait TestNetNode: Clone + Future + Se fn client(&self) -> Arc>; fn transaction_pool(&self) -> Arc; - fn network(&self) -> Arc::Hash>>; + fn network( + &self, + ) -> Arc::Hash>>; fn spawn_handle(&self) -> SpawnTaskHandle; } @@ -87,23 +90,21 @@ pub struct TestNetComponents { } impl -TestNetComponents { + TestNetComponents +{ pub fn new( task_manager: TaskManager, client: Arc>, network: Arc::Hash>>, transaction_pool: Arc, ) -> Self { - Self { - client, transaction_pool, network, - task_manager: Arc::new(Mutex::new(task_manager)), - } + Self { client, transaction_pool, network, task_manager: Arc::new(Mutex::new(task_manager)) } } } - -impl Clone for -TestNetComponents { +impl Clone + for TestNetComponents +{ fn clone(&self) -> Self { Self { task_manager: self.task_manager.clone(), @@ -114,25 +115,24 @@ TestNetComponents { } } -impl Future for - TestNetComponents +impl Future + for TestNetComponents { - type Item = (); - type Error = sc_service::Error; + type Output = Result<(), sc_service::Error>; - fn poll(&mut self) -> Poll { - futures::compat::Compat::new(&mut self.task_manager.lock().future()).poll() + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + Pin::new(&mut self.task_manager.lock().future()).poll(cx) } } -impl TestNetNode for -TestNetComponents - where - TBl: BlockT, - TBackend: sc_client_api::Backend + Send + Sync + 'static, - TExec: CallExecutor + Send + Sync + 'static, - TRtApi: Send + Sync + 'static, - TExPool: TransactionPool + Send + Sync + 'static, +impl TestNetNode + for TestNetComponents +where + TBl: BlockT, + TBackend: sc_client_api::Backend + Send + Sync + 'static, + TExec: CallExecutor + Send + Sync + 'static, + TRtApi: Send + Sync + 'static, + TExPool: TransactionPool + Send + Sync + 'static, { type Block = TBl; type Backend = TBackend; @@ -146,7 +146,9 @@ TestNetComponents fn transaction_pool(&self) -> Arc { self.transaction_pool.clone() } - fn network(&self) -> Arc::Hash>> { + fn network( + &self, + ) -> Arc::Hash>> { self.network.clone() } fn spawn_handle(&self) -> SpawnTaskHandle { @@ -155,60 +157,64 @@ TestNetComponents } impl TestNet -where F: Clone + Send + 'static, L: Clone + Send +'static, U: Clone + Send + 'static +where + F: Clone + Send + 'static, + L: Clone + Send + 'static, + U: Clone + Send + 'static, { - pub fn run_until_all_full( - &mut self, - full_predicate: FP, - light_predicate: LP, - ) - where - FP: Send + Fn(usize, &F) -> bool + 'static, - LP: Send + Fn(usize, &L) -> bool + 'static, + pub fn run_until_all_full(&mut self, full_predicate: FP, light_predicate: LP) + where + FP: Send + Fn(usize, &F) -> bool + 'static, + LP: Send + Fn(usize, &L) -> bool + 'static, { let full_nodes = self.full_nodes.clone(); let light_nodes = self.light_nodes.clone(); - let interval = Interval::new_interval(Duration::from_millis(100)) - .map_err(|_| ()) - .for_each(move |_| { - let full_ready = full_nodes.iter().all(|&(ref id, ref service, _, _)| - full_predicate(*id, service) - ); + let future = async move { + let mut interval = time::interval(Duration::from_millis(100)); + + loop { + interval.tick().await; + + let full_ready = full_nodes + .iter() + .all(|&(ref id, ref service, _, _)| full_predicate(*id, service)); if !full_ready { - return Ok(()); + continue } - let light_ready = light_nodes.iter().all(|&(ref id, ref service, _)| - light_predicate(*id, service) - ); + let light_ready = light_nodes + .iter() + .all(|&(ref id, ref service, _)| light_predicate(*id, service)); - if !light_ready { - Ok(()) - } else { - Err(()) + if light_ready { + return } - }) - .timeout(MAX_WAIT_TIME); + } + }; - match self.runtime.block_on(interval) { - Ok(()) => unreachable!("interval always fails; qed"), - Err(ref err) if err.is_inner() => (), - Err(_) => panic!("Waited for too long"), + if self + .runtime + .block_on(async move { time::timeout(MAX_WAIT_TIME, future).await }) + .is_err() + { + panic!("Waited for too long"); } } } -fn node_config ( +fn node_config< + G: RuntimeGenesis + 'static, + E: ChainSpecExtension + Clone + 'static + Send + Sync, +>( index: usize, spec: &GenericChainSpec, role: Role, - task_executor: TaskExecutor, + tokio_handle: tokio::runtime::Handle, key_seed: Option, base_port: u16, root: &TempDir, -) -> Configuration -{ +) -> Configuration { let root = root.path().join(format!("node-{}", index)); let mut network_config = NetworkConfiguration::new( @@ -223,35 +229,30 @@ fn node_config TestNet where +impl TestNet +where F: TestNetNode, L: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send + Sync, @@ -287,11 +290,8 @@ impl TestNet where spec: GenericChainSpec, full: impl Iterator Result<(F, U), Error>>, light: impl Iterator Result>, - authorities: impl Iterator Result<(F, U), Error> - )>, - base_port: u16 + authorities: impl Iterator Result<(F, U), Error>)>, + base_port: u16, ) -> TestNet { sp_tracing::try_init_simple(); fdlimit::raise_fd_limit(); @@ -314,32 +314,27 @@ impl TestNet where temp: &TempDir, full: impl Iterator Result<(F, U), Error>>, light: impl Iterator Result>, - authorities: impl Iterator Result<(F, U), Error>)> + authorities: impl Iterator Result<(F, U), Error>)>, ) { - let executor = self.runtime.executor(); - let task_executor: TaskExecutor = { - let executor = executor.clone(); - (move |fut: Pin + Send>>, _| { - executor.spawn(fut.unit_error().compat()); - async {} - }).into() - }; + let handle = self.runtime.handle().clone(); for (key, authority) in authorities { let node_config = node_config( self.nodes, &self.chain_spec, - Role::Authority { sentry_nodes: Vec::new() }, - task_executor.clone(), + Role::Authority, + handle.clone(), Some(key), self.base_port, &temp, ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let (service, user_data) = authority(node_config).expect("Error creating test node service"); + let (service, user_data) = + authority(node_config).expect("Error creating test node service"); - executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); + handle.spawn(service.clone().map_err(|_| ())); + let addr = addr + .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -349,7 +344,7 @@ impl TestNet where self.nodes, &self.chain_spec, Role::Full, - task_executor.clone(), + handle.clone(), None, self.base_port, &temp, @@ -357,8 +352,9 @@ impl TestNet where let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let (service, user_data) = full(node_config).expect("Error creating test node service"); - executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); + handle.spawn(service.clone().map_err(|_| ())); + let addr = addr + .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -368,7 +364,7 @@ impl TestNet where self.nodes, &self.chain_spec, Role::Light, - task_executor.clone(), + handle.clone(), None, self.base_port, &temp, @@ -376,8 +372,9 @@ impl TestNet where let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let service = light(node_config).expect("Error creating test node service"); - executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); + handle.spawn(service.clone().map_err(|_| ())); + let addr = addr + .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.light_nodes.push((self.nodes, service, addr)); self.nodes += 1; } @@ -385,7 +382,10 @@ impl TestNet where } fn tempdir_with_prefix(prefix: &str) -> TempDir { - tempfile::Builder::new().prefix(prefix).tempdir().expect("Error creating test dir") + tempfile::Builder::new() + .prefix(prefix) + .tempdir() + .expect("Error creating test dir") } pub fn connectivity( @@ -408,12 +408,12 @@ pub fn connectivity( { let temp = tempdir_with_prefix("substrate-connectivity-test"); - let runtime = { + { let mut network = TestNet::new( &temp, spec.clone(), - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), @@ -422,11 +422,15 @@ pub fn connectivity( info!("Checking star topology"); let first_address = network.full_nodes[0].3.clone(); for (_, service, _, _) in network.full_nodes.iter().skip(1) { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } @@ -442,12 +446,8 @@ pub fn connectivity( connected == expected_light_connections }, ); - - network.runtime }; - runtime.shutdown_now().wait().expect("Error shutting down runtime"); - temp.close().expect("Error removing temp dir"); } { @@ -456,8 +456,8 @@ pub fn connectivity( let mut network = TestNet::new( &temp, spec, - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), @@ -469,14 +469,18 @@ pub fn connectivity( for i in 0..max_nodes { if i != 0 { if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { - service.network().add_reserved_peer(address.to_string()) + service + .network() + .add_reserved_peer(address.to_string()) .expect("Error adding reserved peer"); address = node_id.clone(); } } if let Some((_, service, node_id)) = network.light_nodes.get(i) { - service.network().add_reserved_peer(address.to_string()) + service + .network() + .add_reserved_peer(address.to_string()) .expect("Error adding reserved peer"); address = node_id.clone(); } @@ -504,7 +508,7 @@ pub fn sync( full_builder: Fb, light_builder: Lb, mut make_block_and_import: B, - mut extrinsic_factory: ExF + mut extrinsic_factory: ExF, ) where Fb: Fn(Configuration) -> Result<(F, U), Error>, F: TestNetNode, @@ -524,8 +528,8 @@ pub fn sync( let mut network = TestNet::new( &temp, spec, - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), - (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg)), + (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), @@ -534,31 +538,37 @@ pub fn sync( info!("Checking block sync"); let first_address = { let &mut (_, ref first_service, ref mut first_user_data, _) = &mut network.full_nodes[0]; - for i in 0 .. NUM_BLOCKS { + for i in 0..NUM_BLOCKS { if i % 128 == 0 { info!("Generating #{}", i + 1); } make_block_and_import(&first_service, first_user_data); } - network.full_nodes[0].1.network().update_chain(); + let info = network.full_nodes[0].1.client().info(); + network.full_nodes[0] + .1 + .network() + .new_best_block_imported(info.best_hash, info.best_number); network.full_nodes[0].3.clone() }; info!("Running sync"); for (_, service, _, _) in network.full_nodes.iter().skip(1) { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } network.run_until_all_full( - |_index, service| - service.client().info().best_number == (NUM_BLOCKS as u32).into(), - |_index, service| - service.client().info().best_number == (NUM_BLOCKS as u32).into(), + |_index, service| service.client().info().best_number == (NUM_BLOCKS as u32).into(), + |_index, service| service.client().info().best_number == (NUM_BLOCKS as u32).into(), ); info!("Checking extrinsic propagation"); @@ -566,11 +576,14 @@ pub fn sync( let first_user_data = &network.full_nodes[0].2; let best_block = BlockId::number(first_service.client().info().best_number); let extrinsic = extrinsic_factory(&first_service, first_user_data); - let source = sp_transaction_pool::TransactionSource::External; + let source = sc_transaction_pool_api::TransactionSource::External; - futures::executor::block_on( - first_service.transaction_pool().submit_one(&best_block, source, extrinsic) - ).expect("failed to submit extrinsic"); + futures::executor::block_on(first_service.transaction_pool().submit_one( + &best_block, + source, + extrinsic, + )) + .expect("failed to submit extrinsic"); network.run_until_all_full( |_index, service| service.transaction_pool().ready().count() == 1, @@ -582,7 +595,7 @@ pub fn consensus( spec: GenericChainSpec, full_builder: Fb, light_builder: Lb, - authorities: impl IntoIterator + authorities: impl IntoIterator, ) where Fb: Fn(Configuration) -> Result, F: TestNetNode, @@ -598,54 +611,64 @@ pub fn consensus( let mut network = TestNet::new( &temp, spec, - (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), - authorities.into_iter().map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), + (0..NUM_FULL_NODES / 2).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES / 2).map(|_| |cfg| light_builder(cfg)), + authorities + .into_iter() + .map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30600, ); info!("Checking consensus"); let first_address = network.authority_nodes[0].3.clone(); for (_, service, _, _) in network.full_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _, _) in network.authority_nodes.iter().skip(1) { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } network.run_until_all_full( - |_index, service| - service.client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into(), - |_index, service| - service.client().info().best_number >= (NUM_BLOCKS as u32 / 2).into(), + |_index, service| { + service.client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into() + }, + |_index, service| service.client().info().best_number >= (NUM_BLOCKS as u32 / 2).into(), ); info!("Adding more peers"); network.insert_nodes( &temp, - (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + (0..NUM_FULL_NODES / 2).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES / 2).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), ); for (_, service, _, _) in network.full_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } network.run_until_all_full( - |_index, service| - service.client().info().finalized_number >= (NUM_BLOCKS as u32).into(), - |_index, service| - service.client().info().best_number >= (NUM_BLOCKS as u32).into(), + |_index, service| service.client().info().finalized_number >= (NUM_BLOCKS as u32).into(), + |_index, service| service.client().info().best_number >= (NUM_BLOCKS as u32).into(), ); } diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 4d3e736d9539e..93d5e1464b39b 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-state-db" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,10 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = "0.10.0" -log = "0.4.8" -sc-client-api = { version = "2.0.0", path = "../api" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +parking_lot = "0.11.1" +log = "0.4.11" +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 61470894e487e..44629975d7813 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,35 +16,47 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! State database maintenance. Handles canonicalization and pruning in the database. The input to -//! this module is a `ChangeSet` which is basically a list of key-value pairs (trie nodes) that -//! were added or deleted during block execution. +//! State database maintenance. Handles canonicalization and pruning in the database. //! //! # Canonicalization. //! Canonicalization window tracks a tree of blocks identified by header hash. The in-memory -//! overlay allows to get any node that was inserted in any of the blocks within the window. -//! The tree is journaled to the backing database and rebuilt on startup. -//! Canonicalization function selects one root from the top of the tree and discards all other roots and -//! their subtrees. +//! overlay allows to get any trie node that was inserted in any of the blocks within the window. +//! The overlay is journaled to the backing database and rebuilt on startup. +//! There's a limit of 32 blocks that may have the same block number in the canonicalization window. +//! +//! Canonicalization function selects one root from the top of the tree and discards all other roots +//! and their subtrees. Upon canonicalization all trie nodes that were inserted in the block are +//! added to the backing DB and block tracking is moved to the pruning window, where no forks are +//! allowed. +//! +//! # Canonicalization vs Finality +//! Database engine uses a notion of canonicality, rather then finality. A canonical block may not +//! be yet finalized from the perspective of the consensus engine, but it still can't be reverted in +//! the database. Most of the time during normal operation last canonical block is the same as last +//! finalized. However if finality stall for a long duration for some reason, there's only a certain +//! number of blocks that can fit in the non-canonical overlay, so canonicalization of an +//! unfinalized block may be forced. //! //! # Pruning. -//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until pruning -//! constraints are satisfied. +//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until +//! pruning constraints are satisfied. mod noncanonical; mod pruning; #[cfg(test)] mod test; -use std::fmt; -use parking_lot::RwLock; use codec::Codec; -use std::collections::{HashMap, hash_map::Entry}; +use log::trace; use noncanonical::NonCanonicalOverlay; +use parity_util_mem::{malloc_size, MallocSizeOf}; +use parking_lot::RwLock; use pruning::RefWindow; -use log::trace; -use parity_util_mem::{MallocSizeOf, malloc_size}; -use sc_client_api::{StateDbMemoryInfo, MemorySize}; +use sc_client_api::{MemorySize, StateDbMemoryInfo}; +use std::{ + collections::{hash_map::Entry, HashMap}, + fmt, +}; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -55,8 +67,35 @@ const PRUNING_MODE_CONSTRAINED: &[u8] = b"constrained"; pub type DBValue = Vec; /// Basic set of requirements for the Block hash and node key types. -pub trait Hash: Send + Sync + Sized + Eq + PartialEq + Clone + Default + fmt::Debug + Codec + std::hash::Hash + 'static {} -impl Hash for T {} +pub trait Hash: + Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Codec + + std::hash::Hash + + 'static +{ +} +impl< + T: Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Codec + + std::hash::Hash + + 'static, + > Hash for T +{ +} /// Backend database trait. Read-only. pub trait MetaDb { @@ -89,6 +128,8 @@ pub enum Error { InvalidParent, /// Invalid pruning mode specified. Contains expected mode. InvalidPruningMode(String), + /// Too many unfinalized sibling blocks inserted. + TooManySiblingBlocks, } /// Pinning error type. @@ -107,11 +148,12 @@ impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Db(e) => e.fmt(f), - Error::Decoding(e) => write!(f, "Error decoding sliceable value: {}", e.what()), + Error::Decoding(e) => write!(f, "Error decoding sliceable value: {}", e), Error::InvalidBlock => write!(f, "Trying to canonicalize invalid block"), Error::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), Error::InvalidParent => write!(f, "Trying to insert block with unknown parent"), Error::InvalidPruningMode(e) => write!(f, "Expected pruning mode: {}", e), + Error::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"), } } } @@ -137,7 +179,8 @@ pub struct CommitSet { /// Pruning constraints. If none are specified pruning is #[derive(Default, Debug, Clone, Eq, PartialEq)] pub struct Constraints { - /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical states. + /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical + /// states. pub max_blocks: Option, /// Maximum memory in the pruning overlay. pub max_mem: Option, @@ -157,17 +200,14 @@ pub enum PruningMode { impl PruningMode { /// Create a mode that keeps given number of blocks. pub fn keep_blocks(n: u32) -> PruningMode { - PruningMode::Constrained(Constraints { - max_blocks: Some(n), - max_mem: None, - }) + PruningMode::Constrained(Constraints { max_blocks: Some(n), max_mem: None }) } /// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode? pub fn is_archive(&self) -> bool { match *self { PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => true, - PruningMode::Constrained(_) => false + PruningMode::Constrained(_) => false, } } @@ -213,20 +253,12 @@ impl StateDbSync = NonCanonicalOverlay::new(db)?; let pruning: Option> = match mode { - PruningMode::Constrained(Constraints { - max_mem: Some(_), - .. - }) => unimplemented!(), + PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(), PruningMode::Constrained(_) => Some(RefWindow::new(db, ref_counting)?), PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, }; - Ok(StateDbSync { - mode, - non_canonical, - pruning, - pinned: Default::default(), - }) + Ok(StateDbSync { mode, non_canonical, pruning, pinned: Default::default() }) } fn check_meta(mode: &PruningMode, db: &D) -> Result<(), Error> { @@ -259,10 +291,7 @@ impl StateDbSync { changeset.deleted.clear(); // write changes immediately - Ok(CommitSet { - data: changeset, - meta, - }) + Ok(CommitSet { data: changeset, meta }) }, PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { let commit = self.non_canonical.insert(hash, number, parent_hash, changeset); @@ -270,7 +299,7 @@ impl StateDbSync StateDbSync { + Ok(()) => if self.mode == PruningMode::ArchiveCanonical { commit.data.deleted.clear(); - } - } + }, Err(e) => return Err(e), }; if let Some(ref mut pruning) = self.pruning { @@ -308,31 +336,30 @@ impl StateDbSync c).unwrap_or(true) { !self.non_canonical.have_block(hash) } else { - self.pruning - .as_ref() - .map_or( - false, - |pruning| number < pruning.pending() || !pruning.have_block(hash), - ) + self.pruning.as_ref().map_or(false, |pruning| { + number < pruning.pending() || !pruning.have_block(hash) + }) } - } + }, } } fn prune(&mut self, commit: &mut CommitSet) { - if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = (&mut self.pruning, &self.mode) { + if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = + (&mut self.pruning, &self.mode) + { loop { if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { - break; + break } if constraints.max_mem.map_or(false, |m| pruning.mem_used() > m) { - break; + break } let pinned = &self.pinned; if pruning.next_hash().map_or(false, |h| pinned.contains_key(&h)) { - break; + break } pruning.prune_one(commit); } @@ -344,12 +371,17 @@ impl StateDbSync Option> { match self.mode { - PruningMode::ArchiveAll => { - Some(CommitSet::default()) - }, - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - self.non_canonical.revert_one() - }, + PruningMode::ArchiveAll => Some(CommitSet::default()), + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => + self.non_canonical.revert_one(), + } + } + + fn remove(&mut self, hash: &BlockHash) -> Option> { + match self.mode { + PruningMode::ArchiveAll => Some(CommitSet::default()), + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => + self.non_canonical.remove(hash), } } @@ -370,7 +402,7 @@ impl StateDbSync StateDbSync(&self, key: &Q, db: &D) -> Result, Error> + pub fn get( + &self, + key: &Q, + db: &D, + ) -> Result, Error> where Q: AsRef, Key: std::borrow::Borrow, Q: std::hash::Hash + Eq, { if let Some(value) = self.non_canonical.get(key) { - return Ok(Some(value)); + return Ok(Some(value)) } db.get(key.as_ref()).map_err(|e| Error::Db(e)) } @@ -447,9 +483,7 @@ impl StateDb Result, Error> { - Ok(StateDb { - db: RwLock::new(StateDbSync::new(mode, ref_counting, db)?) - }) + Ok(StateDb { db: RwLock::new(StateDbSync::new(mode, ref_counting, db)?) }) } /// Add a new non-canonical block. @@ -482,11 +516,15 @@ impl StateDb(&self, key: &Q, db: &D) -> Result, Error> - where - Q: AsRef, - Key: std::borrow::Borrow, - Q: std::hash::Hash + Eq, + pub fn get( + &self, + key: &Q, + db: &D, + ) -> Result, Error> + where + Q: AsRef, + Key: std::borrow::Borrow, + Q: std::hash::Hash + Eq, { self.db.read().get(key, db) } @@ -498,6 +536,12 @@ impl StateDb Option> { + self.db.write().remove(hash) + } + /// Returns last finalized block number. pub fn best_canonical(&self) -> Option { return self.db.read().best_canonical() @@ -526,10 +570,12 @@ impl StateDb (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); @@ -651,13 +697,13 @@ mod tests { let state_db = StateDb::new(PruningMode::ArchiveAll, false, &db).unwrap(); db.commit( &state_db - .insert_block::( - &H256::from_low_u64_be(0), - 0, - &H256::from_low_u64_be(0), - make_changeset(&[], &[]), - ) - .unwrap(), + .insert_block::( + &H256::from_low_u64_be(0), + 0, + &H256::from_low_u64_be(0), + make_changeset(&[], &[]), + ) + .unwrap(), ); let new_mode = PruningMode::Constrained(Constraints { max_blocks: Some(2), max_mem: None }); let state_db: Result, _> = StateDb::new(new_mode, false, &db); diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index d77f20c50d05f..c726ceae4b058 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -22,29 +22,59 @@ //! All pending changes are kept in memory until next call to `apply_pending` or //! `revert_pending` -use std::fmt; -use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key}; -use codec::{Encode, Decode}; +use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb}; +use codec::{Decode, Encode}; use log::trace; +use std::{ + collections::{hash_map::Entry, HashMap, VecDeque}, + fmt, +}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; +const MAX_BLOCKS_PER_LEVEL: u64 = 32; /// See module documentation. #[derive(parity_util_mem_derive::MallocSizeOf)] pub struct NonCanonicalOverlay { last_canonicalized: Option<(BlockHash, u64)>, - levels: VecDeque>>, + levels: VecDeque>, parents: HashMap, pending_canonicalizations: Vec, pending_insertions: Vec, - values: HashMap, //ref counted - //would be deleted but kept around because block is pinned, ref counted. + values: HashMap, // ref counted + // would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, pinned_insertions: HashMap, u32)>, } +#[derive(parity_util_mem_derive::MallocSizeOf)] +#[cfg_attr(test, derive(PartialEq, Debug))] +struct OverlayLevel { + blocks: Vec>, + used_indicies: u64, // Bitmask of available journal indicies. +} + +impl OverlayLevel { + fn push(&mut self, overlay: BlockOverlay) { + self.used_indicies |= 1 << overlay.journal_index; + self.blocks.push(overlay) + } + + fn available_index(&self) -> u64 { + self.used_indicies.trailing_ones() as u64 + } + + fn remove(&mut self, index: usize) -> BlockOverlay { + self.used_indicies &= !(1 << self.blocks[index].journal_index); + self.blocks.remove(index) + } + + fn new() -> OverlayLevel { + OverlayLevel { blocks: Vec::new(), used_indicies: 0 } + } +} + #[derive(Encode, Decode)] struct JournalRecord { hash: BlockHash, @@ -61,12 +91,16 @@ fn to_journal_key(block: u64, index: u64) -> Vec { #[derive(parity_util_mem_derive::MallocSizeOf)] struct BlockOverlay { hash: BlockHash, + journal_index: u64, journal_key: Vec, inserted: Vec, deleted: Vec, } -fn insert_values(values: &mut HashMap, inserted: Vec<(Key, DBValue)>) { +fn insert_values( + values: &mut HashMap, + inserted: Vec<(Key, DBValue)>, +) { for (k, v) in inserted { debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); @@ -86,13 +120,13 @@ fn discard_values(values: &mut HashMap, inserted }, Entry::Vacant(_) => { debug_assert!(false, "Trying to discard missing value"); - } + }, } } } fn discard_descendants( - levels: &mut (&mut [Vec>], &mut [Vec>]), + levels: &mut (&mut [OverlayLevel], &mut [OverlayLevel]), mut values: &mut HashMap, parents: &mut HashMap, pinned: &HashMap, @@ -110,36 +144,34 @@ fn discard_descendants( }; let mut pinned_children = 0; if let Some(level) = first { - *level = level.drain(..).filter_map(|overlay| { - let parent = parents.get(&overlay.hash) - .expect("there is a parent entry for each entry in levels; qed"); - - if parent == hash { - let mut num_pinned = discard_descendants( - &mut remainder, - values, - parents, - pinned, - pinned_insertions, - &overlay.hash - ); - if pinned.contains_key(&overlay.hash) { - num_pinned += 1; - } - if num_pinned != 0 { - // save to be discarded later. - pinned_insertions.insert(overlay.hash.clone(), (overlay.inserted, num_pinned)); - pinned_children += num_pinned; - } else { - // discard immediately. - parents.remove(&overlay.hash); - discard_values(&mut values, overlay.inserted); - } - None + while let Some(i) = level.blocks.iter().position(|overlay| { + parents + .get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed") == + hash + }) { + let overlay = level.remove(i); + let mut num_pinned = discard_descendants( + &mut remainder, + values, + parents, + pinned, + pinned_insertions, + &overlay.hash, + ); + if pinned.contains_key(&overlay.hash) { + num_pinned += 1; + } + if num_pinned != 0 { + // save to be discarded later. + pinned_insertions.insert(overlay.hash.clone(), (overlay.inserted, num_pinned)); + pinned_children += num_pinned; } else { - Some(overlay) + // discard immediately. + parents.remove(&overlay.hash); + discard_values(&mut values, overlay.inserted); } - }).collect(); + } } pinned_children } @@ -147,12 +179,11 @@ fn discard_descendants( impl NonCanonicalOverlay { /// Creates a new instance. Does not expect any metadata to be present in the DB. pub fn new(db: &D) -> Result, Error> { - let last_canonicalized = db.get_meta(&to_meta_key(LAST_CANONICAL, &())) - .map_err(|e| Error::Db(e))?; - let last_canonicalized = match last_canonicalized { - Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice())?), - None => None, - }; + let last_canonicalized = + db.get_meta(&to_meta_key(LAST_CANONICAL, &())).map_err(|e| Error::Db(e))?; + let last_canonicalized = last_canonicalized + .map(|buffer| <(BlockHash, u64)>::decode(&mut buffer.as_slice())) + .transpose()?; let mut levels = VecDeque::new(); let mut parents = HashMap::new(); let mut values = HashMap::new(); @@ -162,32 +193,36 @@ impl NonCanonicalOverlay { let mut total: u64 = 0; block += 1; loop { - let mut index: u64 = 0; - let mut level = Vec::new(); - loop { + let mut level = OverlayLevel::new(); + for index in 0..MAX_BLOCKS_PER_LEVEL { let journal_key = to_journal_key(block, index); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_key, - inserted: inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - index += 1; - total += 1; - }, - None => break, + if let Some(record) = db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + let record: JournalRecord = + Decode::decode(&mut record.as_slice())?; + let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); + let overlay = BlockOverlay { + hash: record.hash.clone(), + journal_index: index, + journal_key, + inserted, + deleted: record.deleted, + }; + insert_values(&mut values, record.inserted); + trace!( + target: "state-db", + "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", + block, + index, + overlay.inserted.len(), + overlay.deleted.len() + ); + level.push(overlay); + parents.insert(record.hash, record.parent_hash); + total += 1; } } - if level.is_empty() { - break; + if level.blocks.is_empty() { + break } levels.push_back(level); block += 1; @@ -202,53 +237,76 @@ impl NonCanonicalOverlay { pending_insertions: Default::default(), pinned: Default::default(), pinned_insertions: Default::default(), - values: values, + values, }) } - /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. - pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { + /// Insert a new block into the overlay. If inserted on the second level or lover expects parent + /// to be present in the window. + pub fn insert( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + ) -> Result, Error> { let mut commit = CommitSet::default(); let front_block_number = self.front_block_number(); if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { // assume that parent was canonicalized let last_canonicalized = (parent_hash.clone(), number - 1); - commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode())); + commit + .meta + .inserted + .push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode())); self.last_canonicalized = Some(last_canonicalized); } else if self.last_canonicalized.is_some() { - if number < front_block_number || number >= front_block_number + self.levels.len() as u64 + 1 { + if number < front_block_number || + number >= front_block_number + self.levels.len() as u64 + 1 + { trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})", number, front_block_number, front_block_number + self.levels.len() as u64, ); - return Err(Error::InvalidBlockNumber); + return Err(Error::InvalidBlockNumber) } // check for valid parent if inserting on second level or higher if number == front_block_number { - if !self.last_canonicalized.as_ref().map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) { - return Err(Error::InvalidParent); + if !self + .last_canonicalized + .as_ref() + .map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) + { + return Err(Error::InvalidParent) } } else if !self.parents.contains_key(&parent_hash) { - return Err(Error::InvalidParent); + return Err(Error::InvalidParent) } } - let level = if self.levels.is_empty() || number == front_block_number + self.levels.len() as u64 { - self.levels.push_back(Vec::new()); + let level = if self.levels.is_empty() || + number == front_block_number + self.levels.len() as u64 + { + self.levels.push_back(OverlayLevel::new()); self.levels.back_mut().expect("can't be empty after insertion; qed") } else { self.levels.get_mut((number - front_block_number) as usize) .expect("number is [front_block_number .. front_block_number + levels.len()) is asserted in precondition; qed") }; - let index = level.len() as u64; + if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize { + return Err(Error::TooManySiblingBlocks) + } + + let index = level.available_index(); let journal_key = to_journal_key(number, index); let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); let overlay = BlockOverlay { hash: hash.clone(), + journal_index: index, journal_key: journal_key.clone(), - inserted: inserted, + inserted, deleted: changeset.deleted.clone(), }; level.push(overlay); @@ -271,15 +329,24 @@ impl NonCanonicalOverlay { level_index: usize, discarded_journals: &mut Vec>, discarded_blocks: &mut Vec, - hash: &BlockHash + hash: &BlockHash, ) { if let Some(level) = self.levels.get(level_index) { - level.iter().for_each(|overlay| { - let parent = self.parents.get(&overlay.hash).expect("there is a parent entry for each entry in levels; qed").clone(); + level.blocks.iter().for_each(|overlay| { + let parent = self + .parents + .get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed") + .clone(); if parent == *hash { discarded_journals.push(overlay.journal_key.clone()); discarded_blocks.push(overlay.hash.clone()); - self.discard_journals(level_index + 1, discarded_journals, discarded_blocks, &overlay.hash); + self.discard_journals( + level_index + 1, + discarded_journals, + discarded_blocks, + &overlay.hash, + ); } }); } @@ -292,7 +359,8 @@ impl NonCanonicalOverlay { pub fn last_canonicalized_block_number(&self) -> Option { match self.last_canonicalized.as_ref().map(|&(_, n)| n) { Some(n) => Some(n + self.pending_canonicalizations.len() as u64), - None if !self.pending_canonicalizations.is_empty() => Some(self.pending_canonicalizations.len() as u64), + None if !self.pending_canonicalizations.is_empty() => + Some(self.pending_canonicalizations.len() as u64), _ => None, } } @@ -305,7 +373,7 @@ impl NonCanonicalOverlay { let start = self.last_canonicalized_block_number().unwrap_or(0); self.levels .get(self.pending_canonicalizations.len()) - .map(|level| level.iter().map(|r| (r.hash.clone(), start)).collect()) + .map(|level| level.blocks.iter().map(|r| (r.hash.clone(), start)).collect()) .unwrap_or_default() } @@ -317,21 +385,25 @@ impl NonCanonicalOverlay { commit: &mut CommitSet, ) -> Result<(), Error> { trace!(target: "state-db", "Canonicalizing {:?}", hash); - let level = self.levels.get(self.pending_canonicalizations.len()).ok_or_else(|| Error::InvalidBlock)?; + let level = self + .levels + .get(self.pending_canonicalizations.len()) + .ok_or_else(|| Error::InvalidBlock)?; let index = level + .blocks .iter() .position(|overlay| overlay.hash == *hash) .ok_or_else(|| Error::InvalidBlock)?; let mut discarded_journals = Vec::new(); let mut discarded_blocks = Vec::new(); - for (i, overlay) in level.iter().enumerate() { + for (i, overlay) in level.blocks.iter().enumerate() { if i != index { self.discard_journals( self.pending_canonicalizations.len() + 1, &mut discarded_journals, &mut discarded_blocks, - &overlay.hash + &overlay.hash, ); } discarded_journals.push(overlay.journal_key.clone()); @@ -339,14 +411,26 @@ impl NonCanonicalOverlay { } // get the one we need to canonicalize - let overlay = &level[index]; - commit.data.inserted.extend(overlay.inserted.iter() - .map(|k| (k.clone(), self.values.get(k).expect("For each key in overlays there's a value in values").1.clone()))); + let overlay = &level.blocks[index]; + commit.data.inserted.extend(overlay.inserted.iter().map(|k| { + ( + k.clone(), + self.values + .get(k) + .expect("For each key in overlays there's a value in values") + .1 + .clone(), + ) + })); commit.data.deleted.extend(overlay.deleted.clone()); commit.meta.deleted.append(&mut discarded_journals); - let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); - commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); + let canonicalized = + (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); + commit + .meta + .inserted + .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); self.pending_canonicalizations.push(hash.clone()); Ok(()) @@ -357,14 +441,16 @@ impl NonCanonicalOverlay { let count = self.pending_canonicalizations.len() as u64; for hash in self.pending_canonicalizations.drain(..) { trace!(target: "state-db", "Post canonicalizing {:?}", hash); - let level = self.levels.pop_front().expect("Hash validity is checked in `canonicalize`"); + let level = + self.levels.pop_front().expect("Hash validity is checked in `canonicalize`"); let index = level + .blocks .iter() .position(|overlay| overlay.hash == hash) .expect("Hash validity is checked in `canonicalize`"); // discard unfinalized overlays and values - for (i, overlay) in level.into_iter().enumerate() { + for (i, overlay) in level.blocks.into_iter().enumerate() { let mut pinned_children = if i != index { discard_descendants( &mut self.levels.as_mut_slices(), @@ -381,7 +467,8 @@ impl NonCanonicalOverlay { pinned_children += 1; } if pinned_children != 0 { - self.pinned_insertions.insert(overlay.hash.clone(), (overlay.inserted, pinned_children)); + self.pinned_insertions + .insert(overlay.hash.clone(), (overlay.inserted, pinned_children)); } else { self.parents.remove(&overlay.hash); discard_values(&mut self.values, overlay.inserted); @@ -389,7 +476,10 @@ impl NonCanonicalOverlay { } } if let Some(hash) = last { - let last_canonicalized = (hash, self.last_canonicalized.as_ref().map(|(_, n)| n + count).unwrap_or(count - 1)); + let last_canonicalized = ( + hash, + self.last_canonicalized.as_ref().map(|(_, n)| n + count).unwrap_or(count - 1), + ); self.last_canonicalized = Some(last_canonicalized); } } @@ -401,22 +491,23 @@ impl NonCanonicalOverlay { Q: std::hash::Hash + Eq, { if let Some((_, value)) = self.values.get(&key) { - return Some(value.clone()); + return Some(value.clone()) } None } /// Check if the block is in the canonicalization queue. pub fn have_block(&self, hash: &BlockHash) -> bool { - (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) - && !self.pending_canonicalizations.contains(hash) + (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) && + !self.pending_canonicalizations.contains(hash) } - /// Revert a single level. Returns commit set that deletes the journal or `None` if not possible. + /// Revert a single level. Returns commit set that deletes the journal or `None` if not + /// possible. pub fn revert_one(&mut self) -> Option> { self.levels.pop_back().map(|level| { let mut commit = CommitSet::default(); - for overlay in level.into_iter() { + for overlay in level.blocks.into_iter() { commit.meta.deleted.push(overlay.journal_key); self.parents.remove(&overlay.hash); discard_values(&mut self.values, overlay.inserted); @@ -425,19 +516,56 @@ impl NonCanonicalOverlay { }) } + /// Revert a single block. Returns commit set that deletes the journal or `None` if not + /// possible. + pub fn remove(&mut self, hash: &BlockHash) -> Option> { + let mut commit = CommitSet::default(); + let level_count = self.levels.len(); + for (level_index, level) in self.levels.iter_mut().enumerate().rev() { + let index = match level.blocks.iter().position(|overlay| &overlay.hash == hash) { + Some(index) => index, + None => continue, + }; + // Check that it does not have any children + if (level_index != level_count - 1) && self.parents.values().any(|h| h == hash) { + log::debug!(target: "state-db", "Trying to remove block {:?} with children", hash); + return None + } + let overlay = level.remove(index); + commit.meta.deleted.push(overlay.journal_key); + self.parents.remove(&overlay.hash); + discard_values(&mut self.values, overlay.inserted); + break + } + if self.levels.back().map_or(false, |l| l.blocks.is_empty()) { + self.levels.pop_back(); + } + if !commit.meta.deleted.is_empty() { + Some(commit) + } else { + None + } + } + fn revert_insertions(&mut self) { self.pending_insertions.reverse(); for hash in self.pending_insertions.drain(..) { self.parents.remove(&hash); - // find a level. When iterating insertions backwards the hash is always last in the level. - let level_index = - self.levels.iter().position(|level| - level.last().expect("Hash is added in `insert` in reverse order").hash == hash) + // find a level. When iterating insertions backwards the hash is always last in the + // level. + let level_index = self + .levels + .iter() + .position(|level| { + level.blocks.last().expect("Hash is added in `insert` in reverse order").hash == + hash + }) .expect("Hash is added in insert"); - let overlay = self.levels[level_index].pop().expect("Empty levels are not allowed in self.levels"); + let overlay_index = self.levels[level_index].blocks.len() - 1; + let overlay = self.levels[level_index].remove(overlay_index); discard_values(&mut self.values, overlay.inserted); - if self.levels[level_index].is_empty() { + if self.levels[level_index].blocks.is_empty() { debug_assert_eq!(level_index, self.levels.len() - 1); self.levels.pop_back(); } @@ -459,8 +587,9 @@ impl NonCanonicalOverlay { /// Pin state values in memory pub fn pin(&mut self, hash: &BlockHash) { if self.pending_insertions.contains(hash) { - debug_assert!(false, "Trying to pin pending state"); - return; + // Pinning pending state is not implemented. Pending states + // won't be pruned for quite some time anyway, so it's not a big deal. + return } let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { @@ -510,14 +639,17 @@ impl NonCanonicalOverlay { #[cfg(test)] mod tests { - use std::io; + use super::{to_journal_key, NonCanonicalOverlay}; + use crate::{ + test::{make_changeset, make_db}, + ChangeSet, CommitSet, MetaDb, + }; use sp_core::H256; - use super::{NonCanonicalOverlay, to_journal_key}; - use crate::{ChangeSet, CommitSet}; - use crate::test::{make_db, make_changeset}; + use std::io; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(&H256::from_low_u64_be(key)) == + Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] @@ -545,7 +677,9 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); + overlay + .insert::(&h1, 2, &H256::default(), ChangeSet::default()) + .unwrap(); overlay.insert::(&h2, 1, &h1, ChangeSet::default()).unwrap(); } @@ -556,7 +690,9 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); overlay.insert::(&h2, 3, &h1, ChangeSet::default()).unwrap(); } @@ -567,8 +703,12 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); + overlay + .insert::(&h2, 2, &H256::default(), ChangeSet::default()) + .unwrap(); } #[test] @@ -578,7 +718,9 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); let mut commit = CommitSet::default(); overlay.canonicalize::(&h2, &mut commit).unwrap(); } @@ -589,7 +731,9 @@ mod tests { let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); let changeset = make_changeset(&[3, 4], &[2]); - let insertion = overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap(); + let insertion = overlay + .insert::(&h1, 1, &H256::default(), changeset.clone()) + .unwrap(); assert_eq!(insertion.data.inserted.len(), 0); assert_eq!(insertion.data.deleted.len(), 0); assert_eq!(insertion.meta.inserted.len(), 2); @@ -611,7 +755,11 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); + db.commit( + &overlay + .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .unwrap(), + ); db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); assert_eq!(db.meta.len(), 3); @@ -627,7 +775,11 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); + db.commit( + &overlay + .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .unwrap(), + ); db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); let mut commit = CommitSet::default(); overlay.canonicalize::(&h1, &mut commit).unwrap(); @@ -702,7 +854,11 @@ mod tests { let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); let changeset = make_changeset(&[], &[]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap()); + db.commit( + &overlay + .insert::(&h1, 1, &H256::default(), changeset.clone()) + .unwrap(), + ); db.commit(&overlay.insert::(&h2, 2, &h1, changeset.clone()).unwrap()); overlay.apply_pending(); let mut commit = CommitSet::default(); @@ -716,9 +872,9 @@ mod tests { #[test] fn complex_tree() { - use crate::MetaDb; let mut db = make_db(&[]); + #[rustfmt::skip] // - 1 - 1_1 - 1_1_1 // \ 1_2 - 1_2_1 // \ 1_2_2 @@ -876,6 +1032,7 @@ mod tests { fn keeps_pinned() { let mut db = make_db(&[]); + #[rustfmt::skip] // - 0 - 1_1 // \ 1_2 @@ -902,6 +1059,7 @@ mod tests { fn keeps_pinned_ref_count() { let mut db = make_db(&[]); + #[rustfmt::skip] // - 0 - 1_1 // \ 1_2 // \ 1_3 @@ -933,6 +1091,7 @@ mod tests { fn pin_keeps_parent() { let mut db = make_db(&[]); + #[rustfmt::skip] // - 0 - 1_1 - 2_1 // \ 1_2 @@ -958,4 +1117,118 @@ mod tests { assert!(!contains(&overlay, 1)); assert!(overlay.pinned.is_empty()); } + + #[test] + fn restore_from_journal_after_canonicalize_no_first() { + // This test discards a branch that is journaled under a non-zero index on level 1, + // making sure all journals are loaded for each level even if some of them are missing. + let root = H256::random(); + let h1 = H256::random(); + let h2 = H256::random(); + let h11 = H256::random(); + let h21 = H256::random(); + let mut db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&root, 10, &H256::default(), make_changeset(&[], &[])) + .unwrap(), + ); + db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); + db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); + db.commit(&overlay.insert::(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); + let mut commit = CommitSet::default(); + overlay.canonicalize::(&root, &mut commit).unwrap(); + overlay.canonicalize::(&h2, &mut commit).unwrap(); // h11 should stay in the DB + db.commit(&commit); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + assert!(contains(&overlay, 21)); + assert!(!contains(&overlay, 11)); + assert!(db.get_meta(&to_journal_key(12, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key(12, 0)).unwrap().is_none()); + + // Restore into a new overlay and check that journaled value exists. + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + assert!(contains(&overlay, 21)); + + let mut commit = CommitSet::default(); + overlay.canonicalize::(&h21, &mut commit).unwrap(); // h11 should stay in the DB + db.commit(&commit); + overlay.apply_pending(); + assert!(!contains(&overlay, 21)); + } + + #[test] + fn index_reuse() { + // This test discards a branch that is journaled under a non-zero index on level 1, + // making sure all journals are loaded for each level even if some of them are missing. + let root = H256::random(); + let h1 = H256::random(); + let h2 = H256::random(); + let h11 = H256::random(); + let h21 = H256::random(); + let mut db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&root, 10, &H256::default(), make_changeset(&[], &[])) + .unwrap(), + ); + db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); + db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); + db.commit(&overlay.insert::(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); + let mut commit = CommitSet::default(); + overlay.canonicalize::(&root, &mut commit).unwrap(); + overlay.canonicalize::(&h2, &mut commit).unwrap(); // h11 should stay in the DB + db.commit(&commit); + overlay.apply_pending(); + + // add another block at top level. It should reuse journal index 0 of previously discarded + // block + let h22 = H256::random(); + db.commit(&overlay.insert::(&h22, 12, &h2, make_changeset(&[22], &[])).unwrap()); + assert_eq!(overlay.levels[0].blocks[0].journal_index, 1); + assert_eq!(overlay.levels[0].blocks[1].journal_index, 0); + + // Restore into a new overlay and check that journaled value exists. + let overlay = NonCanonicalOverlay::::new(&db).unwrap(); + assert_eq!(overlay.parents.len(), 2); + assert!(contains(&overlay, 21)); + assert!(contains(&overlay, 22)); + } + + #[test] + fn remove_works() { + let root = H256::random(); + let h1 = H256::random(); + let h2 = H256::random(); + let h11 = H256::random(); + let h21 = H256::random(); + let mut db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&root, 10, &H256::default(), make_changeset(&[], &[])) + .unwrap(), + ); + db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); + db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); + db.commit(&overlay.insert::(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); + assert!(overlay.remove(&h1).is_none()); + assert!(overlay.remove(&h2).is_none()); + assert_eq!(overlay.levels.len(), 3); + + db.commit(&overlay.remove(&h11).unwrap()); + assert!(!contains(&overlay, 11)); + + db.commit(&overlay.remove(&h21).unwrap()); + assert_eq!(overlay.levels.len(), 2); + + db.commit(&overlay.remove(&h2).unwrap()); + assert!(!contains(&overlay, 2)); + } } diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 69b07c285fad8..465c1ecda6cc1 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -24,10 +24,10 @@ //! the death list. //! The changes are journaled in the DB. -use std::collections::{HashMap, HashSet, VecDeque}; -use codec::{Encode, Decode}; -use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; +use crate::{to_meta_key, CommitSet, Error, Hash, MetaDb}; +use codec::{Decode, Encode}; use log::{trace, warn}; +use std::collections::{HashMap, HashSet, VecDeque}; const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; @@ -72,9 +72,11 @@ fn to_journal_key(block: u64) -> Vec { } impl RefWindow { - pub fn new(db: &D, count_insertions: bool) -> Result, Error> { - let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())) - .map_err(|e| Error::Db(e))?; + pub fn new( + db: &D, + count_insertions: bool, + ) -> Result, Error> { + let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())).map_err(|e| Error::Db(e))?; let pending_number: u64 = match last_pruned { Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, None => 0, @@ -83,7 +85,7 @@ impl RefWindow { let mut pruning = RefWindow { death_rows: Default::default(), death_index: Default::default(), - pending_number: pending_number, + pending_number, pending_canonicalizations: 0, pending_prunings: 0, count_insertions, @@ -94,9 +96,15 @@ impl RefWindow { let journal_key = to_journal_key(block); match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; + let record: JournalRecord = + Decode::decode(&mut record.as_slice())?; trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); + pruning.import( + &record.hash, + journal_key, + record.inserted.into_iter(), + record.deleted, + ); }, None => break, } @@ -105,7 +113,13 @@ impl RefWindow { Ok(pruning) } - fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { + fn import>( + &mut self, + hash: &BlockHash, + journal_key: Vec, + inserted: I, + deleted: Vec, + ) { if self.count_insertions { // remove all re-inserted keys from death rows for k in inserted { @@ -120,13 +134,11 @@ impl RefWindow { self.death_index.insert(k.clone(), imported_block); } } - self.death_rows.push_back( - DeathRow { - hash: hash.clone(), - deleted: deleted.into_iter().collect(), - journal_key: journal_key, - } - ); + self.death_rows.push_back(DeathRow { + hash: hash.clone(), + deleted: deleted.into_iter().collect(), + journal_key, + }); } pub fn window_size(&self) -> u64 { @@ -172,23 +184,27 @@ impl RefWindow { Default::default() }; let deleted = ::std::mem::take(&mut commit.data.deleted); - let journal_record = JournalRecord { - hash: hash.clone(), - inserted, - deleted, - }; + let journal_record = JournalRecord { hash: hash.clone(), inserted, deleted }; let block = self.pending_number + self.death_rows.len() as u64; let journal_key = to_journal_key(block); commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); - self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); + self.import( + &journal_record.hash, + journal_key, + journal_record.inserted.into_iter(), + journal_record.deleted, + ); self.pending_canonicalizations += 1; } /// Apply all pending changes pub fn apply_pending(&mut self) { self.pending_canonicalizations = 0; - for _ in 0 .. self.pending_prunings { - let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); + for _ in 0..self.pending_prunings { + let pruned = self + .death_rows + .pop_front() + .expect("pending_prunings is always < death_rows.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); if self.count_insertions { for k in pruned.deleted.iter() { @@ -203,9 +219,10 @@ impl RefWindow { /// Revert all pending changes pub fn revert_pending(&mut self) { // Revert pending deletions. - // Note that pending insertions might cause some existing deletions to be removed from `death_index` - // We don't bother to track and revert that for now. This means that a few nodes might end up no being - // deleted in case transaction fails and `revert_pending` is called. + // Note that pending insertions might cause some existing deletions to be removed from + // `death_index` We don't bother to track and revert that for now. This means that a few + // nodes might end up no being deleted in case transaction fails and `revert_pending` is + // called. self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); if self.count_insertions { let new_max_block = self.death_rows.len() as u64 + self.pending_number; @@ -219,9 +236,11 @@ impl RefWindow { #[cfg(test)] mod tests { use super::RefWindow; + use crate::{ + test::{make_commit, make_db, TestDb}, + CommitSet, + }; use sp_core::H256; - use crate::CommitSet; - use crate::test::{make_db, make_commit, TestDb}; fn check_journal(pruning: &RefWindow, db: &TestDb) { let restored: RefWindow = RefWindow::new(db, pruning.count_insertions).unwrap(); @@ -419,5 +438,4 @@ mod tests { assert!(db.data_eq(&make_db(&[1, 3]))); assert!(pruning.death_index.is_empty()); } - } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index 11ce4ad822620..ad5ce8e874cc7 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,9 +18,9 @@ //! Test utils -use std::collections::HashMap; +use crate::{ChangeSet, CommitSet, DBValue, MetaDb, NodeDb}; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb}; +use std::collections::HashMap; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { @@ -67,30 +67,22 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { ChangeSet { inserted: inserted .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) + .map(|v| (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec())) .collect(), deleted: deleted.iter().map(|v| H256::from_low_u64_be(*v)).collect(), } } pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { - CommitSet { - data: make_changeset(inserted, deleted), - meta: ChangeSet::default(), - } + CommitSet { data: make_changeset(inserted, deleted), meta: ChangeSet::default() } } pub fn make_db(inserted: &[u64]) -> TestDb { TestDb { data: inserted .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) + .map(|v| (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec())) .collect(), meta: Default::default(), } } - diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index 8da372db94ffc..b81fd1fd5c611 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-sync-state-rpc" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "A RPC handler to create sync states for light clients." edition = "2018" @@ -13,15 +13,18 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpc-core = "15.0" -jsonrpc-core-client = "15.0" -jsonrpc-derive = "15.0" -sc-chain-spec = { version = "2.0.0", path = "../chain-spec" } -sc-client-api = { version = "2.0.0", path = "../api" } -sc-consensus-babe = { version = "0.8.0", path = "../consensus/babe" } -sc-consensus-epochs = { version = "0.8.0", path = "../consensus/epochs" } -sc-finality-grandpa = { version = "0.8.0", path = "../finality-grandpa" } -sc-rpc-api = { version = "0.8.0", path = "../rpc-api" } -serde_json = "1.0.58" -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +thiserror = "1.0.21" +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" +sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-consensus-babe = { version = "0.10.0-dev", path = "../consensus/babe" } +sc-consensus-epochs = { version = "0.10.0-dev", path = "../consensus/epochs" } +sc-finality-grandpa = { version = "0.10.0-dev", path = "../finality-grandpa" } +sc-rpc-api = { version = "0.10.0-dev", path = "../rpc-api" } +serde_json = "1.0.68" +serde = { version = "1.0.126", features = ["derive"] } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0" } diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index fa433e5e31d2d..a1621e3986d76 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -1,128 +1,214 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! A RPC handler to create sync states for light clients. +//! //! Currently only usable with BABE + GRANDPA. - -use sp_runtime::traits::{Block as BlockT, NumberFor}; +//! +//! # Usage +//! +//! To use the light sync state, it needs to be added as an extension to the chain spec: +//! +//! ``` +//! use sc_sync_state_rpc::LightSyncStateExtension; +//! +//! #[derive(Default, Clone, serde::Serialize, serde::Deserialize, sc_chain_spec::ChainSpecExtension)] +//! #[serde(rename_all = "camelCase")] +//! pub struct Extensions { +//! light_sync_state: LightSyncStateExtension, +//! } +//! +//! type ChainSpec = sc_chain_spec::GenericChainSpec<(), Extensions>; +//! ``` +//! +//! If the [`LightSyncStateExtension`] is not added as an extension to the chain spec, +//! the [`SyncStateRpcHandler`] will fail at instantiation. + +#![deny(unused_crate_dependencies)] + +use sc_client_api::StorageData; use sp_blockchain::HeaderBackend; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor}, +}; use std::sync::Arc; -use sp_runtime::generic::BlockId; use jsonrpc_derive::rpc; type SharedAuthoritySet = sc_finality_grandpa::SharedAuthoritySet<::Hash, NumberFor>; -type SharedEpochChanges = sc_consensus_epochs::SharedEpochChanges; - -struct Error(sp_blockchain::Error); +type SharedEpochChanges = + sc_consensus_epochs::SharedEpochChanges; + +/// Error type used by this crate. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error { + #[error(transparent)] + Blockchain(#[from] sp_blockchain::Error), + + #[error("Failed to load the block weight for block {0:?}")] + LoadingBlockWeightFailed(Block::Hash), + + #[error("JsonRpc error: {0}")] + JsonRpc(String), + + #[error( + "The light sync state extension is not provided by the chain spec. \ + Read the `sc-sync-state-rpc` crate docs on how to do this!" + )] + LightSyncStateExtensionNotFound, +} -impl From for jsonrpc_core::Error { - fn from(error: Error) -> Self { - jsonrpc_core::Error { - message: error.0.to_string(), - code: jsonrpc_core::ErrorCode::ServerError(1), - data: None, - } +impl From> for jsonrpc_core::Error { + fn from(error: Error) -> Self { + let message = match error { + Error::JsonRpc(s) => s, + _ => error.to_string(), + }; + jsonrpc_core::Error { message, code: jsonrpc_core::ErrorCode::ServerError(1), data: None } } } +/// Serialize the given `val` by encoding it with SCALE codec and serializing it as hex. +fn serialize_encoded( + val: &T, + s: S, +) -> Result { + let encoded = StorageData(val.encode()); + serde::Serialize::serialize(&encoded, s) +} + +/// The light sync state extension. +/// +/// This represents a JSON serialized [`LightSyncState`]. It is required to be added to the +/// chain-spec as an extension. +pub type LightSyncStateExtension = Option; + +/// Hardcoded infomation that allows light clients to sync quickly. +#[derive(serde::Serialize, Clone)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +pub struct LightSyncState { + /// The header of the best finalized block. + #[serde(serialize_with = "serialize_encoded")] + pub finalized_block_header: ::Header, + /// The epoch changes tree for babe. + #[serde(serialize_with = "serialize_encoded")] + pub babe_epoch_changes: sc_consensus_epochs::EpochChangesFor, + /// The babe weight of the finalized block. + pub babe_finalized_block_weight: sc_consensus_babe::BabeBlockWeight, + /// The authority set for grandpa. + #[serde(serialize_with = "serialize_encoded")] + pub grandpa_authority_set: + sc_finality_grandpa::AuthoritySet<::Hash, NumberFor>, +} + /// An api for sync state RPC calls. #[rpc] pub trait SyncStateRpcApi { /// Returns the json-serialized chainspec running the node, with a sync state. #[rpc(name = "sync_state_genSyncSpec", returns = "jsonrpc_core::Value")] - fn system_gen_sync_spec(&self, raw: bool) - -> jsonrpc_core::Result; + fn system_gen_sync_spec(&self, raw: bool) -> jsonrpc_core::Result; } /// The handler for sync state RPC calls. -pub struct SyncStateRpcHandler { +pub struct SyncStateRpcHandler { chain_spec: Box, - client: Arc, - shared_authority_set: SharedAuthoritySet, - shared_epoch_changes: SharedEpochChanges, + client: Arc, + shared_authority_set: SharedAuthoritySet, + shared_epoch_changes: SharedEpochChanges, deny_unsafe: sc_rpc_api::DenyUnsafe, } -impl SyncStateRpcHandler - where - TBl: BlockT, - TCl: HeaderBackend + sc_client_api::AuxStore + 'static, +impl SyncStateRpcHandler +where + Block: BlockT, + Backend: HeaderBackend + sc_client_api::AuxStore + 'static, { /// Create a new handler. pub fn new( chain_spec: Box, - client: Arc, - shared_authority_set: SharedAuthoritySet, - shared_epoch_changes: SharedEpochChanges, + client: Arc, + shared_authority_set: SharedAuthoritySet, + shared_epoch_changes: SharedEpochChanges, deny_unsafe: sc_rpc_api::DenyUnsafe, - ) -> Self { - Self { - chain_spec, client, shared_authority_set, shared_epoch_changes, deny_unsafe, + ) -> Result> { + if sc_chain_spec::get_extension::(chain_spec.extensions()) + .is_some() + { + Ok(Self { chain_spec, client, shared_authority_set, shared_epoch_changes, deny_unsafe }) + } else { + Err(Error::::LightSyncStateExtensionNotFound) } } - - fn build_sync_state(&self) -> Result, sp_blockchain::Error> { + + fn build_sync_state(&self) -> Result, Error> { let finalized_hash = self.client.info().finalized_hash; - let finalized_header = self.client.header(BlockId::Hash(finalized_hash))? - .ok_or_else(|| sp_blockchain::Error::Msg( - format!("Failed to get the header for block {:?}", finalized_hash) - ))?; - - let finalized_block_weight = sc_consensus_babe::aux_schema::load_block_weight( - &*self.client, - finalized_hash, - )? - .ok_or_else(|| sp_blockchain::Error::Msg( - format!("Failed to load the block weight for block {:?}", finalized_hash) - ))?; - - Ok(sc_chain_spec::LightSyncState { + let finalized_header = self + .client + .header(BlockId::Hash(finalized_hash))? + .ok_or_else(|| sp_blockchain::Error::MissingHeader(finalized_hash.to_string()))?; + + let finalized_block_weight = + sc_consensus_babe::aux_schema::load_block_weight(&*self.client, finalized_hash)? + .ok_or_else(|| Error::LoadingBlockWeightFailed(finalized_hash))?; + + Ok(LightSyncState { finalized_block_header: finalized_header, - babe_epoch_changes: self.shared_epoch_changes.lock().clone(), + babe_epoch_changes: self.shared_epoch_changes.shared_data().clone(), babe_finalized_block_weight: finalized_block_weight, grandpa_authority_set: self.shared_authority_set.clone_inner(), }) } } -impl SyncStateRpcApi for SyncStateRpcHandler - where - TBl: BlockT, - TCl: HeaderBackend + sc_client_api::AuxStore + 'static, +impl SyncStateRpcApi for SyncStateRpcHandler +where + Block: BlockT, + Backend: HeaderBackend + sc_client_api::AuxStore + 'static, { - fn system_gen_sync_spec(&self, raw: bool) - -> jsonrpc_core::Result - { + fn system_gen_sync_spec(&self, raw: bool) -> jsonrpc_core::Result { if let Err(err) = self.deny_unsafe.check_if_safe() { - return Err(err.into()); + return Err(err.into()) } let mut chain_spec = self.chain_spec.cloned_box(); - let sync_state = self.build_sync_state().map_err(Error)?; + let sync_state = self.build_sync_state().map_err(map_error::>)?; + + let extension = sc_chain_spec::get_extension_mut::( + chain_spec.extensions_mut(), + ) + .ok_or_else(|| { + Error::::JsonRpc("Could not find `LightSyncState` chain-spec extension!".into()) + })?; + + *extension = + Some(serde_json::to_value(&sync_state).map_err(|err| map_error::(err))?); - chain_spec.set_light_sync_state(sync_state.to_serializable()); - let string = chain_spec.as_json(raw).map_err(map_error)?; + let json_string = chain_spec.as_json(raw).map_err(map_error::)?; - serde_json::from_str(&string).map_err(|err| map_error(err.to_string())) + serde_json::from_str(&json_string).map_err(|err| map_error::(err)) } } -fn map_error(error: String) -> jsonrpc_core::Error { - Error(sp_blockchain::Error::Msg(error)).into() +fn map_error(error: S) -> jsonrpc_core::Error { + Error::::JsonRpc(error.to_string()).into() } diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index be7c88f68ae79..f115017f09701 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-telemetry" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] description = "Telemetry utils" edition = "2018" @@ -15,17 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = "0.10.0" -futures = "0.3.4" -futures-timer = "3.0.1" -wasm-timer = "0.2.0" -libp2p = { version = "0.28.1", default-features = false, features = ["dns", "tcp-async-std", "wasm-ext", "websocket"] } +parking_lot = "0.11.1" +futures = "0.3.9" +wasm-timer = "0.2.5" +libp2p = { version = "0.39.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.8" -pin-project = "0.4.6" +pin-project = "1.0.4" rand = "0.7.2" -serde = { version = "1.0.101", features = ["derive"] } -slog = { version = "2.5.2", features = ["nested-values"] } -slog-json = { version = "2.3.0", features = ["nested-values"] } -slog-scope = "4.1.2" -take_mut = "0.2.2" -void = "1.0.2" +serde = { version = "1.0.126", features = ["derive"] } +serde_json = "1.0.68" +chrono = "0.4.19" +thiserror = "1.0.21" diff --git a/client/telemetry/README.md b/client/telemetry/README.md index 8fdf9e500722d..2e3e19bd2f628 100644 --- a/client/telemetry/README.md +++ b/client/telemetry/README.md @@ -1,45 +1,21 @@ -Telemetry utilities. +# sc-telemetry -Calling `init_telemetry` registers a global `slog` logger using `slog_scope::set_global_logger`. -After that, calling `slog_scope::with_logger` will return a logger that sends information to -the telemetry endpoints. The `telemetry!` macro is a short-cut for calling -`slog_scope::with_logger` followed with `slog_log!`. +Substrate's client telemetry is a part of substrate that allows ingesting telemetry data +with for example [Polkadot telemetry](https://github.com/paritytech/substrate-telemetry). -Note that you are supposed to only ever use `telemetry!` and not `slog_scope::with_logger` at -the moment. Substrate may eventually be reworked to get proper `slog` support, including sending -information to the telemetry. +It works using Tokio's [tracing](https://github.com/tokio-rs/tracing/) library. The telemetry +information uses tracing's logging to report the telemetry data which is then retrieved by a +tracing `Layer`. This layer will then send the data through an asynchronous channel to a +background task called [`TelemetryWorker`] which will send the information to the configured +remote telemetry servers. -The [`Telemetry`] struct implements `Stream` and must be polled regularly (or sent to a -background thread/task) in order for the telemetry to properly function. Dropping the object -will also deregister the global logger and replace it with a logger that discards messages. -The `Stream` generates [`TelemetryEvent`]s. +If multiple substrate nodes are running in the same process, it uses a `tracing::Span` to +identify which substrate node is reporting the telemetry. Every task spawned using sc-service's +`TaskManager` automatically inherit this span. -> **Note**: Cloning the [`Telemetry`] and polling from multiple clones has an unspecified behaviour. +Substrate's nodes initialize/register with the [`TelemetryWorker`] using a [`TelemetryHandle`]. +This handle can be cloned and passed around. It uses an asynchronous channel to communicate with +the running [`TelemetryWorker`] dedicated to registration. Registering can happen at any point +in time during the process execution. -# Example - -```rust -use futures::prelude::*; - -let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { - endpoints: sc_telemetry::TelemetryEndpoints::new(vec![ - // The `0` is the maximum verbosity level of messages to send to this endpoint. - ("wss://example.com".into(), 0) - ]).expect("Invalid URL or multiaddr provided"), - // Can be used to pass an external implementation of WebSockets. - wasm_external_transport: None, -}); - -// The `telemetry` object implements `Stream` and must be processed. -std::thread::spawn(move || { - futures::executor::block_on(telemetry.for_each(|_| future::ready(()))); -}); - -// Sends a message on the telemetry. -sc_telemetry::telemetry!(sc_telemetry::SUBSTRATE_INFO; "test"; - "foo" => "bar", -) -``` - - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/client/telemetry/src/async_record.rs b/client/telemetry/src/async_record.rs deleted file mode 100644 index 34b7c1435afa1..0000000000000 --- a/client/telemetry/src/async_record.rs +++ /dev/null @@ -1,155 +0,0 @@ -//! # Internal types to ssync drain slog -//! FIXME: REMOVE THIS ONCE THE PR WAS MERGE -//! https://github.com/slog-rs/async/pull/14 - -use slog::{Record, RecordStatic, Level, SingleKV, KV, BorrowedKV}; -use slog::{Serializer, OwnedKVList, Key}; - -use std::fmt; -use take_mut::take; - -struct ToSendSerializer { - kv: Box, -} - -impl ToSendSerializer { - fn new() -> Self { - ToSendSerializer { kv: Box::new(()) } - } - - fn finish(self) -> Box { - self.kv - } -} - -impl Serializer for ToSendSerializer { - fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_unit(&mut self, key: Key) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, ())))); - Ok(()) - } - fn emit_none(&mut self, key: Key) -> slog::Result { - let val: Option<()> = None; - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_char(&mut self, key: Key, val: char) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { - let val = val.to_owned(); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_arguments( - &mut self, - key: Key, - val: &fmt::Arguments, - ) -> slog::Result { - let val = fmt::format(*val); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - - fn emit_serde(&mut self, key: Key, value: &dyn slog::SerdeValue) -> slog::Result { - let val = value.to_sendable(); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } -} - -pub(crate) struct AsyncRecord { - msg: String, - level: Level, - location: Box, - tag: String, - logger_values: OwnedKVList, - kv: Box, -} - -impl AsyncRecord { - /// Serializes a `Record` and an `OwnedKVList`. - pub fn from(record: &Record, logger_values: &OwnedKVList) -> Self { - let mut ser = ToSendSerializer::new(); - record - .kv() - .serialize(record, &mut ser) - .expect("`ToSendSerializer` can't fail"); - - AsyncRecord { - msg: fmt::format(*record.msg()), - level: record.level(), - location: Box::new(*record.location()), - tag: String::from(record.tag()), - logger_values: logger_values.clone(), - kv: ser.finish(), - } - } - - /// Deconstruct this `AsyncRecord` into a record and `OwnedKVList`. - pub fn as_record_values(&self, mut f: impl FnMut(&Record, &OwnedKVList)) { - let rs = RecordStatic { - location: &*self.location, - level: self.level, - tag: &self.tag, - }; - - f(&Record::new( - &rs, - &format_args!("{}", self.msg), - BorrowedKV(&self.kv), - ), &self.logger_values) - } -} diff --git a/client/telemetry/src/endpoints.rs b/client/telemetry/src/endpoints.rs new file mode 100644 index 0000000000000..62e6180311980 --- /dev/null +++ b/client/telemetry/src/endpoints.rs @@ -0,0 +1,115 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use libp2p::Multiaddr; +use serde::{Deserialize, Deserializer, Serialize}; + +/// List of telemetry servers we want to talk to. Contains the URL of the server, and the +/// maximum verbosity level. +/// +/// The URL string can be either a URL or a multiaddress. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct TelemetryEndpoints( + #[serde(deserialize_with = "url_or_multiaddr_deser")] pub(crate) Vec<(Multiaddr, u8)>, +); + +/// Custom deserializer for TelemetryEndpoints, used to convert urls or multiaddr to multiaddr. +fn url_or_multiaddr_deser<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + Vec::<(String, u8)>::deserialize(deserializer)? + .iter() + .map(|e| url_to_multiaddr(&e.0).map_err(serde::de::Error::custom).map(|m| (m, e.1))) + .collect() +} + +impl TelemetryEndpoints { + /// Create a `TelemetryEndpoints` based on a list of `(String, u8)`. + pub fn new(endpoints: Vec<(String, u8)>) -> Result { + let endpoints: Result, libp2p::multiaddr::Error> = + endpoints.iter().map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))).collect(); + endpoints.map(Self) + } +} + +impl TelemetryEndpoints { + /// Return `true` if there are no telemetry endpoints, `false` otherwise. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +/// Parses a WebSocket URL into a libp2p `Multiaddr`. +fn url_to_multiaddr(url: &str) -> Result { + // First, assume that we have a `Multiaddr`. + let parse_error = match url.parse() { + Ok(ma) => return Ok(ma), + Err(err) => err, + }; + + // If not, try the `ws://path/url` format. + if let Ok(ma) = libp2p::multiaddr::from_url(url) { + return Ok(ma) + } + + // If we have no clue about the format of that string, assume that we were expecting a + // `Multiaddr`. + Err(parse_error) +} + +#[cfg(test)] +mod tests { + use super::{url_to_multiaddr, TelemetryEndpoints}; + use libp2p::Multiaddr; + + #[test] + fn valid_endpoints() { + let endp = vec![ + ("wss://telemetry.polkadot.io/submit/".into(), 3), + ("/ip4/80.123.90.4/tcp/5432".into(), 4), + ]; + let telem = + TelemetryEndpoints::new(endp.clone()).expect("Telemetry endpoint should be valid"); + let mut res: Vec<(Multiaddr, u8)> = vec![]; + for (a, b) in endp.iter() { + res.push((url_to_multiaddr(a).expect("provided url should be valid"), *b)) + } + assert_eq!(telem.0, res); + } + + #[test] + fn invalid_endpoints() { + let endp = vec![ + ("/ip4/...80.123.90.4/tcp/5432".into(), 3), + ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4), + ]; + let telem = TelemetryEndpoints::new(endp); + assert!(telem.is_err()); + } + + #[test] + fn valid_and_invalid_endpoints() { + let endp = vec![ + ("/ip4/80.123.90.4/tcp/5432".into(), 3), + ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4), + ]; + let telem = TelemetryEndpoints::new(endp); + assert!(telem.is_err()); + } +} diff --git a/client/telemetry/src/error.rs b/client/telemetry/src/error.rs new file mode 100644 index 0000000000000..90a8018f4e1d3 --- /dev/null +++ b/client/telemetry/src/error.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[allow(missing_docs)] +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("IO Error")] + IoError(#[from] std::io::Error), + #[error("This telemetry instance has already been initialized!")] + TelemetryAlreadyInitialized, + #[error("The telemetry worker has been dropped already.")] + TelemetryWorkerDropped, +} + +#[allow(missing_docs)] +pub type Result = std::result::Result; diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 6a5ac0e0cb312..9fb86f57d8392 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,339 +16,505 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Telemetry utilities. +//! Substrate's client telemetry is a part of substrate that allows ingesting telemetry data +//! with for example [Polkadot telemetry](https://github.com/paritytech/substrate-telemetry). //! -//! Calling `init_telemetry` registers a global `slog` logger using `slog_scope::set_global_logger`. -//! After that, calling `slog_scope::with_logger` will return a logger that sends information to -//! the telemetry endpoints. The `telemetry!` macro is a short-cut for calling -//! `slog_scope::with_logger` followed with `slog_log!`. +//! It works using Tokio's [tracing](https://github.com/tokio-rs/tracing/) library. The telemetry +//! information uses tracing's logging to report the telemetry data which is then retrieved by a +//! tracing `Layer`. This layer will then send the data through an asynchronous channel to a +//! background task called [`TelemetryWorker`] which will send the information to the configured +//! remote telemetry servers. //! -//! Note that you are supposed to only ever use `telemetry!` and not `slog_scope::with_logger` at -//! the moment. Substrate may eventually be reworked to get proper `slog` support, including sending -//! information to the telemetry. -//! -//! The [`Telemetry`] struct implements `Stream` and must be polled regularly (or sent to a -//! background thread/task) in order for the telemetry to properly function. Dropping the object -//! will also deregister the global logger and replace it with a logger that discards messages. -//! The `Stream` generates [`TelemetryEvent`]s. -//! -//! > **Note**: Cloning the [`Telemetry`] and polling from multiple clones has an unspecified behaviour. -//! -//! # Example -//! -//! ```no_run -//! use futures::prelude::*; -//! -//! let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { -//! endpoints: sc_telemetry::TelemetryEndpoints::new(vec![ -//! // The `0` is the maximum verbosity level of messages to send to this endpoint. -//! ("wss://example.com".into(), 0) -//! ]).expect("Invalid URL or multiaddr provided"), -//! // Can be used to pass an external implementation of WebSockets. -//! wasm_external_transport: None, -//! }); -//! -//! // The `telemetry` object implements `Stream` and must be processed. -//! std::thread::spawn(move || { -//! futures::executor::block_on(telemetry.for_each(|_| future::ready(()))); -//! }); -//! -//! // Sends a message on the telemetry. -//! sc_telemetry::telemetry!(sc_telemetry::SUBSTRATE_INFO; "test"; -//! "foo" => "bar", -//! ) -//! ``` +//! If multiple substrate nodes are running in the same process, it uses a `tracing::Span` to +//! identify which substrate node is reporting the telemetry. Every task spawned using sc-service's +//! `TaskManager` automatically inherit this span. //! +//! Substrate's nodes initialize/register with the [`TelemetryWorker`] using a +//! [`TelemetryWorkerHandle`]. This handle can be cloned and passed around. It uses an asynchronous +//! channel to communicate with the running [`TelemetryWorker`] dedicated to registration. +//! Registering can happen at any point in time during the process execution. + +#![warn(missing_docs)] -use futures::{prelude::*, channel::mpsc}; -use libp2p::{Multiaddr, wasm_ext}; +use futures::{channel::mpsc, prelude::*}; +use libp2p::Multiaddr; use log::{error, warn}; use parking_lot::Mutex; -use serde::{Serialize, Deserialize, Deserializer}; -use std::{pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration}; -use wasm_timer::Instant; - -pub use libp2p::wasm_ext::ExtTransport; -pub use slog_scope::with_logger; -pub use slog; - -mod async_record; -mod worker; - -/// Configuration for telemetry. -pub struct TelemetryConfig { - /// Collection of telemetry WebSocket servers with a corresponding verbosity level. - pub endpoints: TelemetryEndpoints, - - /// Optional external implementation of a libp2p transport. Used in WASM contexts where we need - /// some binding between the networking provided by the operating system or environment and - /// libp2p. - /// - /// This parameter exists whatever the target platform is, but it is expected to be set to - /// `Some` only when compiling for WASM. - /// - /// > **Important**: Each individual call to `write` corresponds to one message. There is no - /// > internal buffering going on. In the context of WebSockets, each `write` - /// > must be one individual WebSockets frame. - pub wasm_external_transport: Option, +use serde::Serialize; +use std::{ + collections::HashMap, + sync::{atomic, Arc}, +}; + +pub use log; +pub use serde_json; + +mod endpoints; +mod error; +mod node; +mod transport; + +pub use endpoints::*; +pub use error::*; +use node::*; +use transport::*; + +/// Substrate DEBUG log level. +pub const SUBSTRATE_DEBUG: VerbosityLevel = 9; +/// Substrate INFO log level. +pub const SUBSTRATE_INFO: VerbosityLevel = 0; + +/// Consensus TRACE log level. +pub const CONSENSUS_TRACE: VerbosityLevel = 9; +/// Consensus DEBUG log level. +pub const CONSENSUS_DEBUG: VerbosityLevel = 5; +/// Consensus WARN log level. +pub const CONSENSUS_WARN: VerbosityLevel = 4; +/// Consensus INFO log level. +pub const CONSENSUS_INFO: VerbosityLevel = 1; + +/// Telemetry message verbosity. +pub type VerbosityLevel = u8; + +pub(crate) type Id = u64; +pub(crate) type TelemetryPayload = serde_json::Map; +pub(crate) type TelemetryMessage = (Id, VerbosityLevel, TelemetryPayload); + +/// Message sent when the connection (re-)establishes. +#[derive(Debug, Serialize)] +pub struct ConnectionMessage { + /// Node's name. + pub name: String, + /// Node's implementation. + pub implementation: String, + /// Node's version. + pub version: String, + /// Node's configuration. + pub config: String, + /// Node's chain. + pub chain: String, + /// Node's genesis hash. + pub genesis_hash: String, + /// Node is an authority. + pub authority: bool, + /// Node's startup time. + pub startup_time: String, + /// Node's network ID. + pub network_id: String, } -/// List of telemetry servers we want to talk to. Contains the URL of the server, and the -/// maximum verbosity level. +/// Telemetry worker. /// -/// The URL string can be either a URL or a multiaddress. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct TelemetryEndpoints( - #[serde(deserialize_with = "url_or_multiaddr_deser")] - Vec<(Multiaddr, u8)> -); - -/// Custom deserializer for TelemetryEndpoints, used to convert urls or multiaddr to multiaddr. -fn url_or_multiaddr_deser<'de, D>(deserializer: D) -> Result, D::Error> - where D: Deserializer<'de> -{ - Vec::<(String, u8)>::deserialize(deserializer)? - .iter() - .map(|e| Ok((url_to_multiaddr(&e.0) - .map_err(serde::de::Error::custom)?, e.1))) - .collect() -} - -impl TelemetryEndpoints { - pub fn new(endpoints: Vec<(String, u8)>) -> Result { - let endpoints: Result, libp2p::multiaddr::Error> = endpoints.iter() - .map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))) - .collect(); - endpoints.map(Self) - } +/// It should run as a background task using the [`TelemetryWorker::run`] method. This method +/// will consume the object and any further attempts of initializing a new telemetry through its +/// handle will fail (without being fatal). +#[derive(Debug)] +pub struct TelemetryWorker { + message_receiver: mpsc::Receiver, + message_sender: mpsc::Sender, + register_receiver: mpsc::UnboundedReceiver, + register_sender: mpsc::UnboundedSender, + id_counter: Arc, + transport: WsTrans, } -impl TelemetryEndpoints { - /// Return `true` if there are no telemetry endpoints, `false` otherwise. - pub fn is_empty(&self) -> bool { - self.0.is_empty() +impl TelemetryWorker { + /// Instantiate a new [`TelemetryWorker`] which can run in background. + /// + /// Only one is needed per process. + pub fn new(buffer_size: usize) -> Result { + let transport = initialize_transport()?; + let (message_sender, message_receiver) = mpsc::channel(buffer_size); + let (register_sender, register_receiver) = mpsc::unbounded(); + + Ok(Self { + message_receiver, + message_sender, + register_receiver, + register_sender, + id_counter: Arc::new(atomic::AtomicU64::new(1)), + transport, + }) } -} -/// Parses a WebSocket URL into a libp2p `Multiaddr`. -fn url_to_multiaddr(url: &str) -> Result { - // First, assume that we have a `Multiaddr`. - let parse_error = match url.parse() { - Ok(ma) => return Ok(ma), - Err(err) => err, - }; - - // If not, try the `ws://path/url` format. - if let Ok(ma) = libp2p::multiaddr::from_url(url) { - return Ok(ma) + /// Get a new [`TelemetryWorkerHandle`]. + /// + /// This is used when you want to register with the [`TelemetryWorker`]. + pub fn handle(&self) -> TelemetryWorkerHandle { + TelemetryWorkerHandle { + message_sender: self.message_sender.clone(), + register_sender: self.register_sender.clone(), + id_counter: self.id_counter.clone(), + } } - // If we have no clue about the format of that string, assume that we were expecting a - // `Multiaddr`. - Err(parse_error) -} - -/// Log levels. -pub const SUBSTRATE_DEBUG: &str = "9"; -pub const SUBSTRATE_INFO: &str = "0"; - -pub const CONSENSUS_TRACE: &str = "9"; -pub const CONSENSUS_DEBUG: &str = "5"; -pub const CONSENSUS_WARN: &str = "4"; -pub const CONSENSUS_INFO: &str = "1"; - -/// Telemetry object. Implements `Future` and must be polled regularly. -/// Contains an `Arc` and can be cloned and pass around. Only one clone needs to be polled -/// regularly and should be polled regularly. -/// Dropping all the clones unregisters the telemetry. -#[derive(Clone)] -pub struct Telemetry { - inner: Arc>, - /// Slog guard so that we don't get deregistered. - _guard: Arc, -} - -/// Behind the `Mutex` in `Telemetry`. -/// -/// Note that ideally we wouldn't have to make the `Telemetry` cloneable, as that would remove the -/// need for a `Mutex`. However there is currently a weird hack in place in `sc-service` -/// where we extract the telemetry registration so that it continues running during the shutdown -/// process. -struct TelemetryInner { - /// Worker for the telemetry. `None` if it failed to initialize. - worker: Option, - /// Receives log entries for them to be dispatched to the worker. - receiver: mpsc::Receiver, -} - -/// Implements `slog::Drain`. -struct TelemetryDrain { - /// Sends log entries. - sender: std::panic::AssertUnwindSafe>, -} + /// Run the telemetry worker. + /// + /// This should be run in a background task. + pub async fn run(mut self) { + let mut node_map: HashMap> = HashMap::new(); + let mut node_pool: HashMap = HashMap::new(); + let mut pending_connection_notifications: Vec<_> = Vec::new(); -/// Initializes the telemetry. See the crate root documentation for more information. -/// -/// Please be careful to not call this function twice in the same program. The `slog` crate -/// doesn't provide any way of knowing whether a global logger has already been registered. -pub fn init_telemetry(config: TelemetryConfig) -> Telemetry { - // Build the list of telemetry endpoints. - let (endpoints, wasm_external_transport) = (config.endpoints.0, config.wasm_external_transport); - - let (sender, receiver) = mpsc::channel(16); - let guard = { - let logger = TelemetryDrain { sender: std::panic::AssertUnwindSafe(sender) }; - let root = slog::Logger::root(slog::Drain::fuse(logger), slog::o!()); - slog_scope::set_global_logger(root) - }; - - let worker = match worker::TelemetryWorker::new(endpoints, wasm_external_transport) { - Ok(w) => Some(w), - Err(err) => { - error!(target: "telemetry", "Failed to initialize telemetry worker: {:?}", err); - None + loop { + futures::select! { + message = self.message_receiver.next() => Self::process_message( + message, + &mut node_pool, + &node_map, + ).await, + init_payload = self.register_receiver.next() => Self::process_register( + init_payload, + &mut node_pool, + &mut node_map, + &mut pending_connection_notifications, + self.transport.clone(), + ).await, + } } - }; - - Telemetry { - inner: Arc::new(Mutex::new(TelemetryInner { - worker, - receiver, - })), - _guard: Arc::new(guard), } -} -/// Event generated when polling the worker. -#[derive(Debug)] -pub enum TelemetryEvent { - /// We have established a connection to one of the telemetry endpoint, either for the first - /// time or after having been disconnected earlier. - Connected, -} + async fn process_register( + input: Option, + node_pool: &mut HashMap>, + node_map: &mut HashMap>, + pending_connection_notifications: &mut Vec<(Multiaddr, ConnectionNotifierSender)>, + transport: WsTrans, + ) { + let input = input.expect("the stream is never closed; qed"); + + match input { + Register::Telemetry { id, endpoints, connection_message } => { + let endpoints = endpoints.0; + + let connection_message = match serde_json::to_value(&connection_message) { + Ok(serde_json::Value::Object(mut value)) => { + value.insert("msg".into(), "system.connected".into()); + let mut obj = serde_json::Map::new(); + obj.insert("id".to_string(), id.into()); + obj.insert("payload".to_string(), value.into()); + Some(obj) + }, + Ok(_) => { + unreachable!("ConnectionMessage always serialize to an object; qed") + }, + Err(err) => { + log::error!( + target: "telemetry", + "Could not serialize connection message: {}", + err, + ); + None + }, + }; + + for (addr, verbosity) in endpoints { + log::trace!( + target: "telemetry", + "Initializing telemetry for: {:?}", + addr, + ); + node_map.entry(id.clone()).or_default().push((verbosity, addr.clone())); + + let node = node_pool.entry(addr.clone()).or_insert_with(|| { + Node::new(transport.clone(), addr.clone(), Vec::new(), Vec::new()) + }); + + node.connection_messages.extend(connection_message.clone()); + + pending_connection_notifications.retain(|(addr_b, connection_message)| { + if *addr_b == addr { + node.telemetry_connection_notifier.push(connection_message.clone()); + false + } else { + true + } + }); + } + }, + Register::Notifier { addresses, connection_notifier } => { + for addr in addresses { + // If the Node has been initialized, we directly push the connection_notifier. + // Otherwise we push it to a queue that will be consumed when the connection + // initializes, thus ensuring that the connection notifier will be sent to the + // Node when it becomes available. + if let Some(node) = node_pool.get_mut(&addr) { + node.telemetry_connection_notifier.push(connection_notifier.clone()); + } else { + pending_connection_notifications.push((addr, connection_notifier.clone())); + } + } + }, + } + } -impl Stream for Telemetry { - type Item = TelemetryEvent; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let before = Instant::now(); - - // Because the `Telemetry` is cloneable, we need to put the actual fields behind a `Mutex`. - // However, the user is only ever supposed to poll from one instance of `Telemetry`, while - // the other instances are used only for RAII purposes. - // We assume that the user is following this advice and therefore that the `Mutex` is only - // ever locked once at a time. - let mut inner = match self.inner.try_lock() { - Some(l) => l, - None => { - warn!( - target: "telemetry", - "The telemetry seems to be polled multiple times simultaneously" - ); - // Returning `Pending` here means that we may never get polled again, but this is - // ok because we're in a situation where something else is actually currently doing - // the polling. - return Poll::Pending; - } + // dispatch messages to the telemetry nodes + async fn process_message( + input: Option, + node_pool: &mut HashMap>, + node_map: &HashMap>, + ) { + let (id, verbosity, payload) = input.expect("the stream is never closed; qed"); + + let ts = chrono::Local::now().to_rfc3339().to_string(); + let mut message = serde_json::Map::new(); + message.insert("id".into(), id.into()); + message.insert("ts".into(), ts.into()); + message.insert("payload".into(), payload.into()); + + let nodes = if let Some(nodes) = node_map.get(&id) { + nodes + } else { + // This is a normal error because the telemetry ID exists before the telemetry is + // initialized. + log::trace!( + target: "telemetry", + "Received telemetry log for unknown id ({:?}): {}", + id, + serde_json::to_string(&message) + .unwrap_or_else(|err| format!( + "could not be serialized ({}): {:?}", + err, + message, + )), + ); + return }; - let mut has_connected = false; - - // The polling pattern is: poll the worker so that it processes its queue, then add one - // message from the receiver (if possible), then poll the worker again, and so on. - loop { - if let Some(worker) = inner.worker.as_mut() { - while let Poll::Ready(event) = worker.poll(cx) { - // Right now we only have one possible event. This line is here in order to not - // forget to handle any possible new event type. - let worker::TelemetryWorkerEvent::Connected = event; - has_connected = true; - } + for (node_max_verbosity, addr) in nodes { + if verbosity > *node_max_verbosity { + continue } - if let Poll::Ready(Some(log_entry)) = Stream::poll_next(Pin::new(&mut inner.receiver), cx) { - if let Some(worker) = inner.worker.as_mut() { - log_entry.as_record_values(|rec, val| { let _ = worker.log(rec, val); }); - } + if let Some(node) = node_pool.get_mut(&addr) { + let _ = node.send(message.clone()).await; } else { - break; + log::debug!( + target: "telemetry", + "Received message for unknown node ({}). This is a bug. \ + Message sent: {}", + addr, + serde_json::to_string(&message) + .unwrap_or_else(|err| format!( + "could not be serialized ({}): {:?}", + err, + message, + )), + ); } } + } +} - if before.elapsed() > Duration::from_millis(200) { - warn!(target: "telemetry", "Polling the telemetry took more than 200ms"); - } +/// Handle to the [`TelemetryWorker`] thats allows initializing the telemetry for a Substrate node. +#[derive(Debug, Clone)] +pub struct TelemetryWorkerHandle { + message_sender: mpsc::Sender, + register_sender: mpsc::UnboundedSender, + id_counter: Arc, +} - if has_connected { - Poll::Ready(Some(TelemetryEvent::Connected)) - } else { - Poll::Pending +impl TelemetryWorkerHandle { + /// Instantiate a new [`Telemetry`] object. + pub fn new_telemetry(&mut self, endpoints: TelemetryEndpoints) -> Telemetry { + let addresses = endpoints.0.iter().map(|(addr, _)| addr.clone()).collect(); + + Telemetry { + message_sender: self.message_sender.clone(), + register_sender: self.register_sender.clone(), + id: self.id_counter.fetch_add(1, atomic::Ordering::Relaxed), + connection_notifier: TelemetryConnectionNotifier { + register_sender: self.register_sender.clone(), + addresses, + }, + endpoints: Some(endpoints), } } } -impl slog::Drain for TelemetryDrain { - type Ok = (); - type Err = (); +/// A telemetry instance that can be used to send telemetry messages. +#[derive(Debug)] +pub struct Telemetry { + message_sender: mpsc::Sender, + register_sender: mpsc::UnboundedSender, + id: Id, + connection_notifier: TelemetryConnectionNotifier, + endpoints: Option, +} - fn log(&self, record: &slog::Record, values: &slog::OwnedKVList) -> Result { - let before = Instant::now(); +impl Telemetry { + /// Initialize the telemetry with the endpoints provided in argument for the current substrate + /// node. + /// + /// This method must be called during the substrate node initialization. + /// + /// The `endpoints` argument is a collection of telemetry WebSocket servers with a corresponding + /// verbosity level. + /// + /// The `connection_message` argument is a JSON object that is sent every time the connection + /// (re-)establishes. + pub fn start_telemetry(&mut self, connection_message: ConnectionMessage) -> Result<()> { + let endpoints = self.endpoints.take().ok_or_else(|| Error::TelemetryAlreadyInitialized)?; + + self.register_sender + .unbounded_send(Register::Telemetry { id: self.id, endpoints, connection_message }) + .map_err(|_| Error::TelemetryWorkerDropped) + } - let serialized = async_record::AsyncRecord::from(record, values); - // Note: interestingly, `try_send` requires a `&mut` because it modifies some internal value, while `clone()` - // is lock-free. - if let Err(err) = self.sender.clone().try_send(serialized) { - warn!(target: "telemetry", "Ignored telemetry message because of error on channel: {:?}", err); + /// Make a new cloneable handle to this [`Telemetry`]. This is used for reporting telemetries. + pub fn handle(&self) -> TelemetryHandle { + TelemetryHandle { + message_sender: Arc::new(Mutex::new(self.message_sender.clone())), + id: self.id, + connection_notifier: self.connection_notifier.clone(), } + } +} + +/// Handle to a [`Telemetry`]. +/// +/// Used to report telemetry messages. +#[derive(Debug, Clone)] +pub struct TelemetryHandle { + message_sender: Arc>>, + id: Id, + connection_notifier: TelemetryConnectionNotifier, +} - if before.elapsed() > Duration::from_millis(50) { - warn!(target: "telemetry", "Writing a telemetry log took more than 50ms"); +impl TelemetryHandle { + /// Send telemetry messages. + pub fn send_telemetry(&self, verbosity: VerbosityLevel, payload: TelemetryPayload) { + match self.message_sender.lock().try_send((self.id, verbosity, payload)) { + Ok(()) => {}, + Err(err) if err.is_full() => log::trace!( + target: "telemetry", + "Telemetry channel full.", + ), + Err(_) => log::trace!( + target: "telemetry", + "Telemetry channel closed.", + ), } + } - Ok(()) + /// Get event stream for telemetry connection established events. + /// + /// This function will return an error if the telemetry has already been started by + /// [`Telemetry::start_telemetry`]. + pub fn on_connect_stream(&self) -> ConnectionNotifierReceiver { + self.connection_notifier.on_connect_stream() } } -/// Translates to `slog_scope::info`, but contains an additional verbosity -/// parameter which the log record is tagged with. Additionally the verbosity -/// parameter is added to the record as a key-value pair. -#[macro_export] -macro_rules! telemetry { - ( $a:expr; $b:expr; $( $t:tt )* ) => { - $crate::with_logger(|l| { - $crate::slog::slog_info!(l, #$a, $b; $($t)* ) - }) - } +/// Used to create a stream of events with only one event: when a telemetry connection +/// (re-)establishes. +#[derive(Clone, Debug)] +pub struct TelemetryConnectionNotifier { + register_sender: mpsc::UnboundedSender, + addresses: Vec, } -#[cfg(test)] -mod telemetry_endpoints_tests { - use libp2p::Multiaddr; - use super::TelemetryEndpoints; - use super::url_to_multiaddr; - - #[test] - fn valid_endpoints() { - let endp = vec![("wss://telemetry.polkadot.io/submit/".into(), 3), ("/ip4/80.123.90.4/tcp/5432".into(), 4)]; - let telem = TelemetryEndpoints::new(endp.clone()).expect("Telemetry endpoint should be valid"); - let mut res: Vec<(Multiaddr, u8)> = vec![]; - for (a, b) in endp.iter() { - res.push((url_to_multiaddr(a).expect("provided url should be valid"), *b)) +impl TelemetryConnectionNotifier { + fn on_connect_stream(&self) -> ConnectionNotifierReceiver { + let (message_sender, message_receiver) = connection_notifier_channel(); + if let Err(err) = self.register_sender.unbounded_send(Register::Notifier { + addresses: self.addresses.clone(), + connection_notifier: message_sender, + }) { + error!( + target: "telemetry", + "Could not create a telemetry connection notifier: \ + the telemetry is probably already running: {}", + err, + ); } - assert_eq!(telem.0, res); + message_receiver } +} - #[test] - fn invalid_endpoints() { - let endp = vec![("/ip4/...80.123.90.4/tcp/5432".into(), 3), ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4)]; - let telem = TelemetryEndpoints::new(endp); - assert!(telem.is_err()); - } +#[derive(Debug)] +enum Register { + Telemetry { id: Id, endpoints: TelemetryEndpoints, connection_message: ConnectionMessage }, + Notifier { addresses: Vec, connection_notifier: ConnectionNotifierSender }, +} - #[test] - fn valid_and_invalid_endpoints() { - let endp = vec![("/ip4/80.123.90.4/tcp/5432".into(), 3), ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4)]; - let telem = TelemetryEndpoints::new(endp); - assert!(telem.is_err()); - } +/// Report a telemetry. +/// +/// Translates to [`tracing::info`], but contains an additional verbosity parameter which the log +/// record is tagged with. Additionally the verbosity parameter is added to the record as a +/// key-value pair. +/// +/// # Example +/// +/// ```no_run +/// # use sc_telemetry::*; +/// # let authority_id = 42_u64; +/// # let set_id = (43_u64, 44_u64); +/// # let authorities = vec![45_u64]; +/// # let telemetry: Option = None; +/// telemetry!( +/// telemetry; // an `Option` +/// CONSENSUS_INFO; +/// "afg.authority_set"; +/// "authority_id" => authority_id.to_string(), +/// "authority_set_id" => ?set_id, +/// "authorities" => authorities, +/// ); +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! telemetry { + ( $telemetry:expr; $verbosity:expr; $msg:expr; $( $t:tt )* ) => {{ + if let Some(telemetry) = $telemetry.as_ref() { + let verbosity: $crate::VerbosityLevel = $verbosity; + match format_fields_to_json!($($t)*) { + Err(err) => { + $crate::log::debug!( + target: "telemetry", + "Could not serialize value for telemetry: {}", + err, + ); + }, + Ok(mut json) => { + json.insert("msg".into(), $msg.into()); + telemetry.send_telemetry(verbosity, json); + }, + } + } + }}; +} + +#[macro_export(local_inner_macros)] +#[doc(hidden)] +macro_rules! format_fields_to_json { + ( $k:literal => $v:expr $(,)? $(, $($t:tt)+ )? ) => {{ + $crate::serde_json::to_value(&$v) + .map(|value| { + let mut map = $crate::serde_json::Map::new(); + map.insert($k.into(), value); + map + }) + $( + .and_then(|mut prev_map| { + format_fields_to_json!($($t)*) + .map(move |mut other_map| { + prev_map.append(&mut other_map); + prev_map + }) + }) + )* + }}; + ( $k:literal => ? $v:expr $(,)? $(, $($t:tt)+ )? ) => {{ + let mut map = $crate::serde_json::Map::new(); + map.insert($k.into(), std::format!("{:?}", &$v).into()); + $crate::serde_json::Result::Ok(map) + $( + .and_then(|mut prev_map| { + format_fields_to_json!($($t)*) + .map(move |mut other_map| { + prev_map.append(&mut other_map); + prev_map + }) + }) + )* + }}; } diff --git a/client/telemetry/src/node.rs b/client/telemetry/src/node.rs new file mode 100644 index 0000000000000..4d845c328fe89 --- /dev/null +++ b/client/telemetry/src/node.rs @@ -0,0 +1,313 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::TelemetryPayload; +use futures::{channel::mpsc, prelude::*}; +use libp2p::{core::transport::Transport, Multiaddr}; +use rand::Rng as _; +use std::{ + fmt, mem, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; +use wasm_timer::Delay; + +pub(crate) type ConnectionNotifierSender = mpsc::Sender<()>; +pub(crate) type ConnectionNotifierReceiver = mpsc::Receiver<()>; + +pub(crate) fn connection_notifier_channel() -> (ConnectionNotifierSender, ConnectionNotifierReceiver) +{ + mpsc::channel(0) +} + +/// Handler for a single telemetry node. +/// +/// This is a wrapper `Sink` around a network `Sink` with 3 particularities: +/// - It is infallible: if the connection stops, it will reconnect automatically when the server +/// becomes available again. +/// - It holds a list of "connection messages" which are sent automatically when the connection is +/// (re-)established. This is used for the "system.connected" message that needs to be send for +/// every substrate node that connects. +/// - It doesn't stay in pending while waiting for connection. Instead, it moves data into the void +/// if the connection could not be established. This is important for the `Dispatcher` `Sink` +/// which we don't want to block if one connection is broken. +#[derive(Debug)] +pub(crate) struct Node { + /// Address of the node. + addr: Multiaddr, + /// State of the connection. + socket: NodeSocket, + /// Transport used to establish new connections. + transport: TTrans, + /// Messages that are sent when the connection (re-)establishes. + pub(crate) connection_messages: Vec, + /// Notifier for when the connection (re-)establishes. + pub(crate) telemetry_connection_notifier: Vec, +} + +enum NodeSocket { + /// We're connected to the node. This is the normal state. + Connected(NodeSocketConnected), + /// We are currently dialing the node. + Dialing(TTrans::Dial), + /// A new connection should be started as soon as possible. + ReconnectNow, + /// Waiting before attempting to dial again. + WaitingReconnect(Delay), + /// Temporary transition state. + Poisoned, +} + +impl NodeSocket { + fn wait_reconnect() -> NodeSocket { + let random_delay = rand::thread_rng().gen_range(10, 20); + let delay = Delay::new(Duration::from_secs(random_delay)); + log::trace!(target: "telemetry", "Pausing for {} secs before reconnecting", random_delay); + NodeSocket::WaitingReconnect(delay) + } +} + +struct NodeSocketConnected { + /// Where to send data. + sink: TTrans::Output, + /// Queue of packets to send before accepting new packets. + buf: Vec>, +} + +impl Node { + /// Builds a new node handler. + pub(crate) fn new( + transport: TTrans, + addr: Multiaddr, + connection_messages: Vec>, + telemetry_connection_notifier: Vec, + ) -> Self { + Node { + addr, + socket: NodeSocket::ReconnectNow, + transport, + connection_messages, + telemetry_connection_notifier, + } + } +} + +impl Node +where + TTrans: Clone + Unpin, + TTrans::Dial: Unpin, + TTrans::Output: + Sink, Error = TSinkErr> + Stream, TSinkErr>> + Unpin, + TSinkErr: fmt::Debug, +{ + // NOTE: this code has been inspired from `Buffer` (`futures_util::sink::Buffer`). + // https://docs.rs/futures-util/0.3.8/src/futures_util/sink/buffer.rs.html#32 + fn try_send_connection_messages( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + conn: &mut NodeSocketConnected, + ) -> Poll> { + while let Some(item) = conn.buf.pop() { + if let Err(e) = conn.sink.start_send_unpin(item) { + return Poll::Ready(Err(e)) + } + futures::ready!(conn.sink.poll_ready_unpin(cx))?; + } + Poll::Ready(Ok(())) + } +} + +pub(crate) enum Infallible {} + +impl Sink for Node +where + TTrans: Clone + Unpin, + TTrans::Dial: Unpin, + TTrans::Output: + Sink, Error = TSinkErr> + Stream, TSinkErr>> + Unpin, + TSinkErr: fmt::Debug, +{ + type Error = Infallible; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut socket = mem::replace(&mut self.socket, NodeSocket::Poisoned); + self.socket = loop { + match socket { + NodeSocket::Connected(mut conn) => match conn.sink.poll_ready_unpin(cx) { + Poll::Ready(Ok(())) => { + match self.as_mut().try_send_connection_messages(cx, &mut conn) { + Poll::Ready(Err(err)) => { + log::warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); + socket = NodeSocket::wait_reconnect(); + }, + Poll::Ready(Ok(())) => { + self.socket = NodeSocket::Connected(conn); + return Poll::Ready(Ok(())) + }, + Poll::Pending => { + self.socket = NodeSocket::Connected(conn); + return Poll::Pending + }, + } + }, + Poll::Ready(Err(err)) => { + log::warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); + socket = NodeSocket::wait_reconnect(); + }, + Poll::Pending => { + self.socket = NodeSocket::Connected(conn); + return Poll::Pending + }, + }, + NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { + Poll::Ready(Ok(sink)) => { + log::debug!(target: "telemetry", "✅ Connected to {}", self.addr); + + for sender in self.telemetry_connection_notifier.iter_mut() { + let _ = sender.send(()); + } + + let buf = self + .connection_messages + .iter() + .map(|json| { + let mut json = json.clone(); + json.insert( + "ts".to_string(), + chrono::Local::now().to_rfc3339().into(), + ); + json + }) + .filter_map(|json| match serde_json::to_vec(&json) { + Ok(message) => Some(message), + Err(err) => { + log::error!( + target: "telemetry", + "An error occurred while generating new connection \ + messages: {}", + err, + ); + None + }, + }) + .collect(); + + socket = NodeSocket::Connected(NodeSocketConnected { sink, buf }); + }, + Poll::Pending => break NodeSocket::Dialing(s), + Poll::Ready(Err(err)) => { + log::warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); + socket = NodeSocket::wait_reconnect(); + }, + }, + NodeSocket::ReconnectNow => match self.transport.clone().dial(self.addr.clone()) { + Ok(d) => { + log::trace!(target: "telemetry", "Re-dialing {}", self.addr); + socket = NodeSocket::Dialing(d); + }, + Err(err) => { + log::warn!(target: "telemetry", "❌ Error while re-dialing {}: {:?}", self.addr, err); + socket = NodeSocket::wait_reconnect(); + }, + }, + NodeSocket::WaitingReconnect(mut s) => { + if let Poll::Ready(_) = Future::poll(Pin::new(&mut s), cx) { + socket = NodeSocket::ReconnectNow; + } else { + break NodeSocket::WaitingReconnect(s) + } + }, + NodeSocket::Poisoned => { + log::error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); + break NodeSocket::Poisoned + }, + } + }; + + // The Dispatcher blocks when the Node syncs blocks. This is why it is important that the + // Node sinks don't go into "Pending" state while waiting for reconnection but rather + // discard the excess of telemetry messages. + Poll::Ready(Ok(())) + } + + fn start_send(mut self: Pin<&mut Self>, item: TelemetryPayload) -> Result<(), Self::Error> { + // Any buffered outgoing telemetry messages are discarded while (re-)connecting. + match &mut self.socket { + NodeSocket::Connected(conn) => match serde_json::to_vec(&item) { + Ok(data) => { + log::trace!(target: "telemetry", "Sending {} bytes", data.len()); + let _ = conn.sink.start_send_unpin(data); + }, + Err(err) => log::debug!( + target: "telemetry", + "Could not serialize payload: {}", + err, + ), + }, + // We are currently dialing the node. + NodeSocket::Dialing(_) => log::trace!(target: "telemetry", "Dialing"), + // A new connection should be started as soon as possible. + NodeSocket::ReconnectNow => log::trace!(target: "telemetry", "Reconnecting"), + // Waiting before attempting to dial again. + NodeSocket::WaitingReconnect(_) => {}, + // Temporary transition state. + NodeSocket::Poisoned => log::trace!(target: "telemetry", "Poisoned"), + } + Ok(()) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match &mut self.socket { + NodeSocket::Connected(conn) => match conn.sink.poll_flush_unpin(cx) { + Poll::Ready(Err(e)) => { + // When `telemetry` closes the websocket connection we end + // up here, which is sub-optimal. See + // https://github.com/libp2p/rust-libp2p/issues/2021 for + // what we could do to improve this. + log::trace!(target: "telemetry", "[poll_flush] Error: {:?}", e); + self.socket = NodeSocket::wait_reconnect(); + Poll::Ready(Ok(())) + }, + Poll::Ready(Ok(())) => Poll::Ready(Ok(())), + Poll::Pending => Poll::Pending, + }, + _ => Poll::Ready(Ok(())), + } + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match &mut self.socket { + NodeSocket::Connected(conn) => conn.sink.poll_close_unpin(cx).map(|_| Ok(())), + _ => Poll::Ready(Ok(())), + } + } +} + +impl fmt::Debug for NodeSocket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use NodeSocket::*; + f.write_str(match self { + Connected(_) => "Connected", + Dialing(_) => "Dialing", + ReconnectNow => "ReconnectNow", + WaitingReconnect(_) => "WaitingReconnect", + Poisoned => "Poisoned", + }) + } +} diff --git a/client/telemetry/src/transport.rs b/client/telemetry/src/transport.rs new file mode 100644 index 0000000000000..04ec79ebf5645 --- /dev/null +++ b/client/telemetry/src/transport.rs @@ -0,0 +1,144 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use futures::{ + executor::block_on, + prelude::*, + ready, + task::{Context, Poll}, +}; +use libp2p::{core::transport::timeout::TransportTimeout, Transport}; +use std::{io, pin::Pin, time::Duration}; + +/// Timeout after which a connection attempt is considered failed. Includes the WebSocket HTTP +/// upgrading. +const CONNECT_TIMEOUT: Duration = Duration::from_secs(20); + +pub(crate) fn initialize_transport() -> Result { + let transport = { + let inner = block_on(libp2p::dns::DnsConfig::system(libp2p::tcp::TcpConfig::new()))?; + libp2p::websocket::framed::WsConfig::new(inner).and_then(|connec, _| { + let connec = connec + .with(|item| { + let item = libp2p::websocket::framed::OutgoingData::Binary(item); + future::ready(Ok::<_, io::Error>(item)) + }) + .try_filter(|item| future::ready(item.is_data())) + .map_ok(|data| data.into_bytes()); + future::ready(Ok::<_, io::Error>(connec)) + }) + }; + + Ok(TransportTimeout::new( + transport.map(|out, _| { + let out = out + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .sink_map_err(|err| io::Error::new(io::ErrorKind::Other, err)); + Box::pin(out) as Pin> + }), + CONNECT_TIMEOUT, + ) + .boxed()) +} + +/// A trait that implements `Stream` and `Sink`. +pub(crate) trait StreamAndSink: Stream + Sink {} +impl, I> StreamAndSink for T {} + +/// A type alias for the WebSocket transport. +pub(crate) type WsTrans = libp2p::core::transport::Boxed< + Pin< + Box< + dyn StreamAndSink, Item = Result, io::Error>, Error = io::Error> + Send, + >, + >, +>; + +/// Wraps around an `AsyncWrite` and implements `Sink`. Guarantees that each item being sent maps +/// to one call of `write`. +#[pin_project::pin_project] +pub(crate) struct StreamSink(#[pin] T, Option>); + +impl From for StreamSink { + fn from(inner: T) -> StreamSink { + StreamSink(inner, None) + } +} + +impl Stream for StreamSink { + type Item = Result, io::Error>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = self.project(); + let mut buf = vec![0; 128]; + match ready!(AsyncRead::poll_read(this.0, cx, &mut buf)) { + Ok(0) => Poll::Ready(None), + Ok(n) => { + buf.truncate(n); + Poll::Ready(Some(Ok(buf))) + }, + Err(err) => Poll::Ready(Some(Err(err))), + } + } +} + +impl StreamSink { + fn poll_flush_buffer(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = self.project(); + + if let Some(buffer) = this.1 { + if ready!(this.0.poll_write(cx, &buffer[..]))? != buffer.len() { + log::error!(target: "telemetry", + "Detected some internal buffering happening in the telemetry"); + let err = io::Error::new(io::ErrorKind::Other, "Internal buffering detected"); + return Poll::Ready(Err(err)) + } + } + + *this.1 = None; + Poll::Ready(Ok(())) + } +} + +impl Sink> for StreamSink { + type Error = io::Error; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(StreamSink::poll_flush_buffer(self, cx))?; + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { + let this = self.project(); + debug_assert!(this.1.is_none()); + *this.1 = Some(item); + Ok(()) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(self.as_mut().poll_flush_buffer(cx))?; + let this = self.project(); + AsyncWrite::poll_flush(this.0, cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(self.as_mut().poll_flush_buffer(cx))?; + let this = self.project(); + AsyncWrite::poll_close(this.0, cx) + } +} diff --git a/client/telemetry/src/worker.rs b/client/telemetry/src/worker.rs deleted file mode 100644 index e01ac62d12dc3..0000000000000 --- a/client/telemetry/src/worker.rs +++ /dev/null @@ -1,260 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Contains the object that makes the telemetry work. -//! -//! # Usage -//! -//! - Create a `TelemetryWorker` with `TelemetryWorker::new`. -//! - Send messages to the telemetry with `TelemetryWorker::send_message`. Messages will only be -//! sent to the appropriate targets. Messages may be ignored if the target happens to be -//! temporarily unreachable. -//! - You must appropriately poll the worker with `TelemetryWorker::poll`. Polling will/may produce -//! events indicating what happened since the latest polling. -//! - -use futures::{prelude::*, ready}; -use libp2p::{core::transport::OptionalTransport, Multiaddr, Transport, wasm_ext}; -use log::{trace, warn, error}; -use slog::Drain; -use std::{io, pin::Pin, task::Context, task::Poll, time}; - -mod node; - -/// Timeout after which a connection attempt is considered failed. Includes the WebSocket HTTP -/// upgrading. -const CONNECT_TIMEOUT: time::Duration = time::Duration::from_secs(20); - -/// Event generated when polling the worker. -#[derive(Debug)] -pub enum TelemetryWorkerEvent { - /// We have established a connection to one of the telemetry endpoint, either for the first - /// time or after having been disconnected earlier. - Connected, -} - -/// Telemetry processing machine. -#[derive(Debug)] -pub struct TelemetryWorker { - /// List of nodes with their maximum verbosity level. - nodes: Vec<(node::Node, u8)>, -} - -trait StreamAndSink: Stream + Sink {} -impl, I> StreamAndSink for T {} - -type WsTrans = libp2p::core::transport::boxed::Boxed< - Pin, - Item = Result, io::Error>, - Error = io::Error - > + Send>>, - io::Error ->; - -impl TelemetryWorker { - /// Builds a new `TelemetryWorker`. - /// - /// The endpoints must be a list of targets, plus a verbosity level. When you send a message - /// to the telemetry, only the targets whose verbosity is higher than the verbosity of the - /// message will receive it. - pub fn new( - endpoints: impl IntoIterator, - wasm_external_transport: impl Into> - ) -> Result { - let transport = match wasm_external_transport.into() { - Some(t) => OptionalTransport::some(t), - None => OptionalTransport::none() - }.map((|inner, _| StreamSink::from(inner)) as fn(_, _) -> _); - - // The main transport is the `wasm_external_transport`, but if we're on desktop we add - // support for TCP+WebSocket+DNS as a fallback. In practice, you're not expected to pass - // an external transport on desktop and the fallback is used all the time. - #[cfg(not(target_os = "unknown"))] - let transport = transport.or_transport({ - let inner = libp2p::dns::DnsConfig::new(libp2p::tcp::TcpConfig::new())?; - libp2p::websocket::framed::WsConfig::new(inner) - .and_then(|connec, _| { - let connec = connec - .with(|item| { - let item = libp2p::websocket::framed::OutgoingData::Binary(item); - future::ready(Ok::<_, io::Error>(item)) - }) - .try_filter(|item| future::ready(item.is_data())) - .map_ok(|data| data.into_bytes()); - future::ready(Ok::<_, io::Error>(connec)) - }) - }); - - let transport = transport - .timeout(CONNECT_TIMEOUT) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .map(|out, _| { - let out = out - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .sink_map_err(|err| io::Error::new(io::ErrorKind::Other, err)); - Box::pin(out) as Pin> - }) - .boxed(); - - Ok(TelemetryWorker { - nodes: endpoints.into_iter().map(|(addr, verbosity)| { - let node = node::Node::new(transport.clone(), addr); - (node, verbosity) - }).collect() - }) - } - - /// Polls the worker for events that happened. - pub fn poll(&mut self, cx: &mut Context) -> Poll { - for (node, _) in &mut self.nodes { - loop { - match node::Node::poll(Pin::new(node), cx) { - Poll::Ready(node::NodeEvent::Connected) => - return Poll::Ready(TelemetryWorkerEvent::Connected), - Poll::Ready(node::NodeEvent::Disconnected(_)) => continue, - Poll::Pending => break, - } - } - } - - Poll::Pending - } - - /// Equivalent to `slog::Drain::log`, but takes `self` by `&mut` instead, which is more convenient. - /// - /// Keep in mind that you should call `TelemetryWorker::poll` in order to process the messages. - /// You should call this function right after calling `slog::Drain::log`. - pub fn log(&mut self, record: &slog::Record, values: &slog::OwnedKVList) -> Result<(), ()> { - let msg_verbosity = match record.tag().parse::() { - Ok(v) => v, - Err(err) => { - warn!(target: "telemetry", "Failed to parse telemetry tag {:?}: {:?}", - record.tag(), err); - return Err(()) - } - }; - - // None of the nodes want that verbosity, so just return without doing any serialization. - if self.nodes.iter().all(|(_, node_max_verbosity)| msg_verbosity > *node_max_verbosity) { - trace!( - target: "telemetry", - "Skipping log entry because verbosity {:?} is too high for all endpoints", - msg_verbosity - ); - return Ok(()) - } - - // Turn the message into JSON. - let serialized = { - let mut out = Vec::new(); - slog_json::Json::default(&mut out).log(record, values).map_err(|_| ())?; - out - }; - - for (node, node_max_verbosity) in &mut self.nodes { - if msg_verbosity > *node_max_verbosity { - trace!(target: "telemetry", "Skipping {:?} for log entry with verbosity {:?}", - node.addr(), msg_verbosity); - continue; - } - - // `send_message` returns an error if we're not connected, which we silently ignore. - let _ = node.send_message(&serialized.clone()[..]); - } - - Ok(()) - } -} - -/// Wraps around an `AsyncWrite` and implements `Sink`. Guarantees that each item being sent maps -/// to one call of `write`. -/// -/// For some context, we put this object around the `wasm_ext::ExtTransport` in order to make sure -/// that each telemetry message maps to one single call to `write` in the WASM FFI. -#[pin_project::pin_project] -struct StreamSink(#[pin] T, Option>); - -impl From for StreamSink { - fn from(inner: T) -> StreamSink { - StreamSink(inner, None) - } -} - -impl Stream for StreamSink { - type Item = Result, io::Error>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let this = self.project(); - let mut buf = vec![0; 128]; - match ready!(AsyncRead::poll_read(this.0, cx, &mut buf)) { - Ok(0) => Poll::Ready(None), - Ok(n) => { - buf.truncate(n); - Poll::Ready(Some(Ok(buf))) - }, - Err(err) => Poll::Ready(Some(Err(err))), - } - } -} - -impl StreamSink { - fn poll_flush_buffer(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let this = self.project(); - - if let Some(buffer) = this.1 { - if ready!(this.0.poll_write(cx, &buffer[..]))? != buffer.len() { - error!(target: "telemetry", - "Detected some internal buffering happening in the telemetry"); - let err = io::Error::new(io::ErrorKind::Other, "Internal buffering detected"); - return Poll::Ready(Err(err)); - } - } - - *this.1 = None; - Poll::Ready(Ok(())) - } -} - -impl Sink> for StreamSink { - type Error = io::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(StreamSink::poll_flush_buffer(self, cx))?; - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { - let this = self.project(); - debug_assert!(this.1.is_none()); - *this.1 = Some(item); - Ok(()) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(self.as_mut().poll_flush_buffer(cx))?; - let this = self.project(); - AsyncWrite::poll_flush(this.0, cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(self.as_mut().poll_flush_buffer(cx))?; - let this = self.project(); - AsyncWrite::poll_close(this.0, cx) - } -} diff --git a/client/telemetry/src/worker/node.rs b/client/telemetry/src/worker/node.rs deleted file mode 100644 index eef7ca7e81553..0000000000000 --- a/client/telemetry/src/worker/node.rs +++ /dev/null @@ -1,305 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Contains the `Node` struct, which handles communications with a single telemetry endpoint. - -use futures::prelude::*; -use futures_timer::Delay; -use libp2p::Multiaddr; -use libp2p::core::transport::Transport; -use log::{trace, debug, warn, error}; -use rand::Rng as _; -use std::{collections::VecDeque, fmt, mem, pin::Pin, task::Context, task::Poll, time::Duration}; - -/// Maximum number of pending telemetry messages. -const MAX_PENDING: usize = 10; - -/// Handler for a single telemetry node. -pub struct Node { - /// Address of the node. - addr: Multiaddr, - /// State of the connection. - socket: NodeSocket, - /// Transport used to establish new connections. - transport: TTrans, -} - -enum NodeSocket { - /// We're connected to the node. This is the normal state. - Connected(NodeSocketConnected), - /// We are currently dialing the node. - Dialing(TTrans::Dial), - /// A new connection should be started as soon as possible. - ReconnectNow, - /// Waiting before attempting to dial again. - WaitingReconnect(Delay), - /// Temporary transition state. - Poisoned, -} - -struct NodeSocketConnected { - /// Where to send data. - sink: TTrans::Output, - /// Queue of packets to send. - pending: VecDeque>, - /// If true, we need to flush the sink. - need_flush: bool, - /// A timeout for the socket to write data. - timeout: Option, -} - -/// Event that can happen with this node. -#[derive(Debug)] -pub enum NodeEvent { - /// We are now connected to this node. - Connected, - /// We are now disconnected from this node. - Disconnected(ConnectionError), -} - -/// Reason for disconnecting from a node. -#[derive(Debug)] -pub enum ConnectionError { - /// The connection timed-out. - Timeout, - /// Reading from the socket returned and end-of-file, indicating that the socket has been - /// closed. - Closed, - /// The sink errored. - Sink(TSinkErr), -} - -impl Node { - /// Builds a new node handler. - pub fn new(transport: TTrans, addr: Multiaddr) -> Self { - Node { - addr, - socket: NodeSocket::ReconnectNow, - transport, - } - } - - /// Returns the address that was passed to `new`. - pub fn addr(&self) -> &Multiaddr { - &self.addr - } -} - -impl Node -where TTrans: Clone + Unpin, TTrans::Dial: Unpin, - TTrans::Output: Sink, Error = TSinkErr> - + Stream, TSinkErr>> - + Unpin, - TSinkErr: fmt::Debug -{ - /// Sends a WebSocket frame to the node. Returns an error if we are not connected to the node. - /// - /// After calling this method, you should call `poll` in order for it to be properly processed. - pub fn send_message(&mut self, payload: impl Into>) -> Result<(), ()> { - if let NodeSocket::Connected(NodeSocketConnected { pending, .. }) = &mut self.socket { - if pending.len() <= MAX_PENDING { - trace!(target: "telemetry", "Adding log entry to queue for {:?}", self.addr); - pending.push_back(payload.into()); - Ok(()) - } else { - warn!(target: "telemetry", "⚠️ Rejected log entry because queue is full for {:?}", - self.addr); - Err(()) - } - } else { - Err(()) - } - } - - /// Polls the node for updates. Must be performed regularly. - pub fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let mut socket = mem::replace(&mut self.socket, NodeSocket::Poisoned); - self.socket = loop { - match socket { - NodeSocket::Connected(mut conn) => { - match NodeSocketConnected::poll(Pin::new(&mut conn), cx, &self.addr) { - Poll::Ready(Ok(v)) => match v {}, - Poll::Pending => { - break NodeSocket::Connected(conn) - }, - Poll::Ready(Err(err)) => { - warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); - let timeout = gen_rand_reconnect_delay(); - self.socket = NodeSocket::WaitingReconnect(timeout); - return Poll::Ready(NodeEvent::Disconnected(err)) - } - } - } - NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { - Poll::Ready(Ok(sink)) => { - debug!(target: "telemetry", "✅ Connected to {}", self.addr); - let conn = NodeSocketConnected { - sink, - pending: VecDeque::new(), - need_flush: false, - timeout: None, - }; - self.socket = NodeSocket::Connected(conn); - return Poll::Ready(NodeEvent::Connected) - }, - Poll::Pending => break NodeSocket::Dialing(s), - Poll::Ready(Err(err)) => { - warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); - let timeout = gen_rand_reconnect_delay(); - socket = NodeSocket::WaitingReconnect(timeout); - } - } - NodeSocket::ReconnectNow => match self.transport.clone().dial(self.addr.clone()) { - Ok(d) => { - debug!(target: "telemetry", "Started dialing {}", self.addr); - socket = NodeSocket::Dialing(d); - } - Err(err) => { - warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); - let timeout = gen_rand_reconnect_delay(); - socket = NodeSocket::WaitingReconnect(timeout); - } - } - NodeSocket::WaitingReconnect(mut s) => - if let Poll::Ready(_) = Future::poll(Pin::new(&mut s), cx) { - socket = NodeSocket::ReconnectNow; - } else { - break NodeSocket::WaitingReconnect(s) - } - NodeSocket::Poisoned => { - error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); - break NodeSocket::Poisoned - } - } - }; - - Poll::Pending - } -} - -/// Generates a `Delay` object with a random timeout. -/// -/// If there are general connection issues, not all endpoints should be synchronized in their -/// re-connection time. -fn gen_rand_reconnect_delay() -> Delay { - let random_delay = rand::thread_rng().gen_range(5, 10); - Delay::new(Duration::from_secs(random_delay)) -} - -impl NodeSocketConnected -where TTrans::Output: Sink, Error = TSinkErr> - + Stream, TSinkErr>> - + Unpin -{ - /// Processes the queue of messages for the connected socket. - /// - /// The address is passed for logging purposes only. - fn poll( - mut self: Pin<&mut Self>, - cx: &mut Context, - my_addr: &Multiaddr, - ) -> Poll>> { - - while let Some(item) = self.pending.pop_front() { - if let Poll::Ready(result) = Sink::poll_ready(Pin::new(&mut self.sink), cx) { - if let Err(err) = result { - return Poll::Ready(Err(ConnectionError::Sink(err))) - } - - let item_len = item.len(); - if let Err(err) = Sink::start_send(Pin::new(&mut self.sink), item) { - return Poll::Ready(Err(ConnectionError::Sink(err))) - } - trace!( - target: "telemetry", "Successfully sent {:?} bytes message to {}", - item_len, my_addr - ); - self.need_flush = true; - - } else { - self.pending.push_front(item); - if self.timeout.is_none() { - self.timeout = Some(Delay::new(Duration::from_secs(10))); - } - break; - } - } - - if self.need_flush { - match Sink::poll_flush(Pin::new(&mut self.sink), cx) { - Poll::Pending => { - if self.timeout.is_none() { - self.timeout = Some(Delay::new(Duration::from_secs(10))); - } - }, - Poll::Ready(Err(err)) => { - self.timeout = None; - return Poll::Ready(Err(ConnectionError::Sink(err))) - }, - Poll::Ready(Ok(())) => { - self.timeout = None; - self.need_flush = false; - }, - } - } - - if let Some(timeout) = self.timeout.as_mut() { - match Future::poll(Pin::new(timeout), cx) { - Poll::Pending => {}, - Poll::Ready(()) => { - self.timeout = None; - return Poll::Ready(Err(ConnectionError::Timeout)) - } - } - } - - match Stream::poll_next(Pin::new(&mut self.sink), cx) { - Poll::Ready(Some(Ok(_))) => { - // We poll the telemetry `Stream` because the underlying implementation relies on - // this in order to answer PINGs. - // We don't do anything with incoming messages, however. - }, - Poll::Ready(Some(Err(err))) => { - return Poll::Ready(Err(ConnectionError::Sink(err))) - }, - Poll::Ready(None) => { - return Poll::Ready(Err(ConnectionError::Closed)) - }, - Poll::Pending => {}, - } - - Poll::Pending - } -} - -impl fmt::Debug for Node { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let state = match self.socket { - NodeSocket::Connected(_) => "Connected", - NodeSocket::Dialing(_) => "Dialing", - NodeSocket::ReconnectNow => "Pending reconnect", - NodeSocket::WaitingReconnect(_) => "Pending reconnect", - NodeSocket::Poisoned => "Poisoned", - }; - - f.debug_struct("Node") - .field("addr", &self.addr) - .field("state", &state) - .finish() - } -} diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 35db326c94929..3e314a82aa583 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-tracing" -version = "2.0.0" +version = "4.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,15 +13,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -erased-serde = "0.3.9" +ansi_term = "0.12.1" +atty = "0.2.13" +lazy_static = "1.4.0" log = { version = "0.4.8" } -parking_lot = "0.10.0" +once_cell = "1.4.1" +parking_lot = "0.11.1" +regex = "1.4.2" rustc-hash = "1.1.0" -serde = "1.0.101" -serde_json = "1.0.41" -slog = { version = "2.5.2", features = ["nested-values"] } -tracing = "0.1.21" -tracing-core = "0.1.17" -tracing-subscriber = "0.2.13" -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } +serde = "1.0.126" +thiserror = "1.0.21" +tracing = "0.1.25" +tracing-log = "0.1.2" +tracing-subscriber = "0.2.19" +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } +sp-rpc = { version = "4.0.0-dev", path = "../../primitives/rpc" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-tracing-proc-macro = { version = "4.0.0-dev", path = "./proc-macro" } +sc-rpc-server = { version = "4.0.0-dev", path = "../rpc-servers" } diff --git a/client/cli/proc-macro/Cargo.toml b/client/tracing/proc-macro/Cargo.toml similarity index 68% rename from client/cli/proc-macro/Cargo.toml rename to client/tracing/proc-macro/Cargo.toml index 9b9d134c5a836..002370b515f28 100644 --- a/client/cli/proc-macro/Cargo.toml +++ b/client/tracing/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "sc-cli-proc-macro" -version = "2.0.0" +name = "sc-tracing-proc-macro" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "0.1.4" -proc-macro2 = "1.0.6" +proc-macro-crate = "1.0.0" +proc-macro2 = "1.0.29" quote = { version = "1.0.3", features = ["proc-macro"] } -syn = { version = "1.0.7", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "1.0.58", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/client/cli/proc-macro/src/lib.rs b/client/tracing/proc-macro/src/lib.rs similarity index 90% rename from client/cli/proc-macro/src/lib.rs rename to client/tracing/proc-macro/src/lib.rs index 775d1eb96ea38..e9a4f58705b41 100644 --- a/client/cli/proc-macro/src/lib.rs +++ b/client/tracing/proc-macro/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,7 +18,7 @@ use proc_macro::TokenStream; use proc_macro2::Span; -use proc_macro_crate::crate_name; +use proc_macro_crate::{crate_name, FoundCrate}; use quote::quote; use syn::{Error, Expr, Ident, ItemFn}; @@ -113,37 +113,24 @@ pub fn prefix_logs_with(arg: TokenStream, item: TokenStream) -> TokenStream { "missing argument: name of the node. Example: sc_cli::prefix_logs_with()", ) .to_compile_error() - .into(); + .into() } let name = syn::parse_macro_input!(arg as Expr); - let crate_name = if std::env::var("CARGO_PKG_NAME") - .expect("cargo env var always there when compiling; qed") - == "sc-cli" - { - Ident::new("sc_cli", Span::call_site().into()) - } else { - let crate_name = match crate_name("sc-cli") { - Ok(x) => x, - Err(err) => return Error::new(Span::call_site(), err).to_compile_error().into(), - }; - - Ident::new(&crate_name, Span::call_site().into()) + let crate_name = match crate_name("sc-tracing") { + Ok(FoundCrate::Itself) => Ident::from(Ident::new("sc_tracing", Span::call_site())), + Ok(FoundCrate::Name(crate_name)) => Ident::new(&crate_name, Span::call_site()), + Err(e) => return Error::new(Span::call_site(), e).to_compile_error().into(), }; - let ItemFn { - attrs, - vis, - sig, - block, - } = item_fn; + let ItemFn { attrs, vis, sig, block } = item_fn; (quote! { #(#attrs)* #vis #sig { let span = #crate_name::tracing::info_span!( - #crate_name::PREFIX_LOG_SPAN, + #crate_name::logging::PREFIX_LOG_SPAN, name = #name, ); let _enter = span.enter(); diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs new file mode 100644 index 0000000000000..8280d4613a189 --- /dev/null +++ b/client/tracing/src/block/mod.rs @@ -0,0 +1,361 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Utilities for tracing block execution + +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Instant, +}; + +use parking_lot::Mutex; +use tracing::{ + dispatcher, + span::{Attributes, Id, Record}, + Dispatch, Level, Subscriber, +}; + +use crate::{SpanDatum, TraceEvent, Values}; +use sc_client_api::BlockBackend; +use sc_rpc_server::RPC_MAX_PAYLOAD_DEFAULT; +use sp_api::{Core, Encode, Metadata, ProvideRuntimeApi}; +use sp_blockchain::HeaderBackend; +use sp_core::hexdisplay::HexDisplay; +use sp_rpc::tracing::{BlockTrace, Span, TraceBlockResponse, TraceError}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header}, +}; +use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; + +// Heuristic for average event size in bytes. +const AVG_EVENT: usize = 600 * 8; +// Heuristic for average span size in bytes. +const AVG_SPAN: usize = 100 * 8; +// Estimate of the max base RPC payload size when the Id is bound as a u64. If strings +// are used for the RPC Id this may need to be adjusted. Note: The base payload +// does not include the RPC result. +// +// The estimate is based on the JSONRPC response message which has the following format: +// `{"jsonrpc":"2.0","result":[],"id":18446744073709551615}`. +// +// We care about the total size of the payload because jsonrpc-server will simply ignore +// messages larger than `sc_rpc_server::MAX_PAYLOAD` and the caller will not get any +// response. +const BASE_PAYLOAD: usize = 100; +// Default to only pallet, frame support and state related traces +const DEFAULT_TARGETS: &str = "pallet,frame,state"; +const TRACE_TARGET: &str = "block_trace"; +// The name of a field required for all events. +const REQUIRED_EVENT_FIELD: &str = "method"; +const MEGABYTE: usize = 1024 * 1024; + +/// Tracing Block Result type alias +pub type TraceBlockResult = Result; + +/// Tracing Block error +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +#[non_exhaustive] +pub enum Error { + #[error("Invalid block Id: {0}")] + InvalidBlockId(#[from] sp_blockchain::Error), + #[error("Missing block component: {0}")] + MissingBlockComponent(String), + #[error("Dispatch error: {0}")] + Dispatch(String), +} + +struct BlockSubscriber { + targets: Vec<(String, Level)>, + next_id: AtomicU64, + spans: Mutex>, + events: Mutex>, +} + +impl BlockSubscriber { + fn new(targets: &str) -> Self { + let next_id = AtomicU64::new(1); + let mut targets: Vec<_> = targets.split(',').map(crate::parse_target).collect(); + // Ensure that WASM traces are always enabled + // Filtering happens when decoding the actual target / level + targets.push((WASM_TRACE_IDENTIFIER.to_owned(), Level::TRACE)); + BlockSubscriber { + targets, + next_id, + spans: Mutex::new(HashMap::new()), + events: Mutex::new(Vec::new()), + } + } +} + +impl Subscriber for BlockSubscriber { + fn enabled(&self, metadata: &tracing::Metadata<'_>) -> bool { + if !metadata.is_span() && !metadata.fields().field(REQUIRED_EVENT_FIELD).is_some() { + return false + } + for (target, level) in &self.targets { + if metadata.level() <= level && metadata.target().starts_with(target) { + return true + } + } + false + } + + fn new_span(&self, attrs: &Attributes<'_>) -> Id { + let id = Id::from_u64(self.next_id.fetch_add(1, Ordering::Relaxed)); + let mut values = Values::default(); + attrs.record(&mut values); + let parent_id = attrs.parent().cloned(); + let span = SpanDatum { + id: id.clone(), + parent_id, + name: attrs.metadata().name().to_owned(), + target: attrs.metadata().target().to_owned(), + level: *attrs.metadata().level(), + line: attrs.metadata().line().unwrap_or(0), + start_time: Instant::now(), + values, + overall_time: Default::default(), + }; + + self.spans.lock().insert(id.clone(), span); + id + } + + fn record(&self, span: &Id, values: &Record<'_>) { + let mut span_data = self.spans.lock(); + if let Some(s) = span_data.get_mut(span) { + values.record(&mut s.values); + } + } + + fn record_follows_from(&self, _span: &Id, _follows: &Id) { + // Not currently used + unimplemented!("record_follows_from is not implemented"); + } + + fn event(&self, event: &tracing::Event<'_>) { + let mut values = crate::Values::default(); + event.record(&mut values); + let parent_id = event.parent().cloned(); + let trace_event = TraceEvent { + name: event.metadata().name().to_owned(), + target: event.metadata().target().to_owned(), + level: *event.metadata().level(), + values, + parent_id, + }; + self.events.lock().push(trace_event); + } + + fn enter(&self, _id: &Id) {} + + fn exit(&self, _span: &Id) {} +} + +/// Holds a reference to the client in order to execute the given block. +/// Records spans & events for the supplied targets (eg. "pallet,frame,state") and +/// only records events with the specified hex encoded storage key prefixes. +/// Note: if `targets` or `storage_keys` is an empty string then nothing is +/// filtered out. +pub struct BlockExecutor { + client: Arc, + block: Block::Hash, + targets: Option, + storage_keys: Option, + methods: Option, + rpc_max_payload: usize, +} + +impl BlockExecutor +where + Block: BlockT + 'static, + Client: HeaderBackend + + BlockBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: Metadata, +{ + /// Create a new `BlockExecutor` + pub fn new( + client: Arc, + block: Block::Hash, + targets: Option, + storage_keys: Option, + methods: Option, + rpc_max_payload: Option, + ) -> Self { + let rpc_max_payload = rpc_max_payload + .map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); + Self { client, block, targets, storage_keys, methods, rpc_max_payload } + } + + /// Execute block, record all spans and events belonging to `Self::targets` + /// and filter out events which do not have keys starting with one of the + /// prefixes in `Self::storage_keys`. + pub fn trace_block(&self) -> TraceBlockResult { + tracing::debug!(target: "state_tracing", "Tracing block: {}", self.block); + // Prepare the block + let id = BlockId::Hash(self.block); + let mut header = self + .client + .header(id) + .map_err(|e| Error::InvalidBlockId(e))? + .ok_or_else(|| Error::MissingBlockComponent("Header not found".to_string()))?; + let extrinsics = self + .client + .block_body(&id) + .map_err(|e| Error::InvalidBlockId(e))? + .ok_or_else(|| Error::MissingBlockComponent("Extrinsics not found".to_string()))?; + tracing::debug!(target: "state_tracing", "Found {} extrinsics", extrinsics.len()); + let parent_hash = *header.parent_hash(); + let parent_id = BlockId::Hash(parent_hash); + // Remove all `Seal`s as they are added by the consensus engines after building the block. + // On import they are normally removed by the consensus engine. + header.digest_mut().logs.retain(|d| d.as_seal().is_none()); + let block = Block::new(header, extrinsics); + + let targets = if let Some(t) = &self.targets { t } else { DEFAULT_TARGETS }; + let block_subscriber = BlockSubscriber::new(targets); + let dispatch = Dispatch::new(block_subscriber); + + { + let dispatcher_span = tracing::debug_span!( + target: "state_tracing", + "execute_block", + extrinsics_len = block.extrinsics().len(), + ); + let _guard = dispatcher_span.enter(); + if let Err(e) = dispatcher::with_default(&dispatch, || { + let span = tracing::info_span!(target: TRACE_TARGET, "trace_block"); + let _enter = span.enter(); + self.client.runtime_api().execute_block(&parent_id, block) + }) { + return Err(Error::Dispatch( + format!("Failed to collect traces and execute block: {:?}", e).to_string(), + )) + } + } + + let block_subscriber = + dispatch.downcast_ref::().ok_or(Error::Dispatch( + "Cannot downcast Dispatch to BlockSubscriber after tracing block".to_string(), + ))?; + let spans: Vec<_> = block_subscriber + .spans + .lock() + .drain() + // Patch wasm identifiers + .filter_map(|(_, s)| patch_and_filter(SpanDatum::from(s), targets)) + .collect(); + let events: Vec<_> = block_subscriber + .events + .lock() + .drain(..) + .filter(|e| { + self.storage_keys + .as_ref() + .map(|keys| event_values_filter(e, "key", keys)) + .unwrap_or(false) + }) + .filter(|e| { + self.methods + .as_ref() + .map(|methods| event_values_filter(e, "method", methods)) + .unwrap_or(false) + }) + .map(|s| s.into()) + .collect(); + tracing::debug!(target: "state_tracing", "Captured {} spans and {} events", spans.len(), events.len()); + + let approx_payload_size = BASE_PAYLOAD + events.len() * AVG_EVENT + spans.len() * AVG_SPAN; + let response = if approx_payload_size > self.rpc_max_payload { + TraceBlockResponse::TraceError(TraceError { + error: "Payload likely exceeds max payload size of RPC server.".to_string(), + }) + } else { + TraceBlockResponse::BlockTrace(BlockTrace { + block_hash: block_id_as_string(id), + parent_hash: block_id_as_string(parent_id), + tracing_targets: targets.to_string(), + storage_keys: self.storage_keys.clone().unwrap_or_default(), + methods: self.methods.clone().unwrap_or_default(), + spans, + events, + }) + }; + + Ok(response) + } +} + +fn event_values_filter(event: &TraceEvent, filter_kind: &str, values: &str) -> bool { + event + .values + .string_values + .get(filter_kind) + .and_then(|value| Some(check_target(values, value, &event.level))) + .unwrap_or(false) +} + +/// Filter out spans that do not match our targets and if the span is from WASM update its `name` +/// and `target` fields to the WASM values for those fields. +// The `tracing` crate requires trace metadata to be static. This does not work for wasm code in +// substrate, as it is regularly updated with new code from on-chain events. The workaround for this +// is for substrate's WASM tracing wrappers to put the `name` and `target` data in the `values` map +// (normally they would be in the static metadata assembled at compile time). Here, if a special +// WASM `name` or `target` key is found in the `values` we remove it and put the key value pair in +// the span's metadata, making it consistent with spans that come from native code. +fn patch_and_filter(mut span: SpanDatum, targets: &str) -> Option { + if span.name == WASM_TRACE_IDENTIFIER { + span.values.bool_values.insert("wasm".to_owned(), true); + if let Some(n) = span.values.string_values.remove(WASM_NAME_KEY) { + span.name = n; + } + if let Some(t) = span.values.string_values.remove(WASM_TARGET_KEY) { + span.target = t; + } + if !check_target(targets, &span.target, &span.level) { + return None + } + } + Some(span.into()) +} + +/// Check if a `target` matches any `targets` by prefix +fn check_target(targets: &str, target: &str, level: &Level) -> bool { + for (t, l) in targets.split(',').map(crate::parse_target) { + if target.starts_with(t.as_str()) && level <= &l { + return true + } + } + false +} + +fn block_id_as_string(block_id: BlockId) -> String { + match block_id { + BlockId::Hash(h) => HexDisplay::from(&h.encode()).to_string(), + BlockId::Number(n) => HexDisplay::from(&n.encode()).to_string(), + } +} diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index 6690f283464ea..bf6e3d780c6ed 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -1,18 +1,20 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . //! Instrumentation implementation for substrate. //! @@ -22,33 +24,41 @@ //! //! See `sp-tracing` for examples on how to use tracing. //! -//! Currently we provide `Log` (default), `Telemetry` variants for `Receiver` +//! Currently we only provide `Log` (default). -use rustc_hash::FxHashMap; -use std::fmt; -use std::time::{Duration, Instant}; +#![warn(missing_docs)] + +pub mod block; +pub mod logging; -use parking_lot::Mutex; -use serde::ser::{Serialize, Serializer, SerializeMap}; +use rustc_hash::FxHashMap; +use serde::ser::{Serialize, SerializeMap, Serializer}; +use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; +use std::{ + fmt, + time::{Duration, Instant}, +}; use tracing::{ event::Event, - field::{Visit, Field}, - Level, + field::{Field, Visit}, span::{Attributes, Id, Record}, subscriber::Subscriber, + Level, +}; +use tracing_subscriber::{ + layer::{Context, Layer}, + registry::LookupSpan, }; -use tracing_subscriber::{CurrentSpan, layer::{Layer, Context}}; -use sc_telemetry::{telemetry, SUBSTRATE_INFO}; -use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; +#[doc(hidden)] +pub use tracing; + const ZERO_DURATION: Duration = Duration::from_nanos(0); /// Responsible for assigning ids to new spans, which are not re-used. pub struct ProfilingLayer { targets: Vec<(String, Level)>, trace_handler: Box, - span_data: Mutex>, - current_span: CurrentSpan, } /// Used to configure how to receive the metrics @@ -56,8 +66,6 @@ pub struct ProfilingLayer { pub enum TracingReceiver { /// Output to logger Log, - /// Output to telemetry - Telemetry, } impl Default for TracingReceiver { @@ -77,10 +85,15 @@ pub trait TraceHandler: Send + Sync { /// Represents a tracing event, complete with values #[derive(Debug)] pub struct TraceEvent { - pub name: &'static str, + /// Name of the event. + pub name: String, + /// Target of the event. pub target: String, + /// Level of the event. pub level: Level, + /// Values for this event. pub values: Values, + /// Id of the parent tracing event, if any. pub parent_id: Option, } @@ -110,13 +123,13 @@ pub struct SpanDatum { /// Holds associated values for a tracing span #[derive(Default, Clone, Debug)] pub struct Values { - /// HashMap of `bool` values + /// FxHashMap of `bool` values pub bool_values: FxHashMap, - /// HashMap of `i64` values + /// FxHashMap of `i64` values pub i64_values: FxHashMap, - /// HashMap of `u64` values + /// FxHashMap of `u64` values pub u64_values: FxHashMap, - /// HashMap of `String` values + /// FxHashMap of `String` values pub string_values: FxHashMap, } @@ -153,15 +166,20 @@ impl Visit for Values { } fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { - self.string_values.insert(field.name().to_string(), format!("{:?}", value).to_owned()); + self.string_values + .insert(field.name().to_string(), format!("{:?}", value).to_owned()); } } impl Serialize for Values { fn serialize(&self, serializer: S) -> Result - where S: Serializer, + where + S: Serializer, { - let len = self.bool_values.len() + self.i64_values.len() + self.u64_values.len() + self.string_values.len(); + let len = self.bool_values.len() + + self.i64_values.len() + + self.u64_values.len() + + self.string_values.len(); let mut map = serializer.serialize_map(Some(len))?; for (k, v) in &self.bool_values { map.serialize_entry(k, v)?; @@ -185,32 +203,16 @@ impl fmt::Display for Values { let i64_iter = self.i64_values.iter().map(|(k, v)| format!("{}={}", k, v)); let u64_iter = self.u64_values.iter().map(|(k, v)| format!("{}={}", k, v)); let string_iter = self.string_values.iter().map(|(k, v)| format!("{}=\"{}\"", k, v)); - let values = bool_iter.chain(i64_iter).chain(u64_iter).chain(string_iter).collect::>().join(", "); + let values = bool_iter + .chain(i64_iter) + .chain(u64_iter) + .chain(string_iter) + .collect::>() + .join(", "); write!(f, "{}", values) } } -impl slog::SerdeValue for Values { - fn as_serde(&self) -> &dyn erased_serde::Serialize { - self - } - - fn to_sendable(&self) -> Box { - Box::new(self.clone()) - } -} - -impl slog::Value for Values { - fn serialize( - &self, - _record: &slog::Record, - key: slog::Key, - ser: &mut dyn slog::Serializer, - ) -> slog::Result { - ser.emit_serde(key, self) - } -} - impl ProfilingLayer { /// Takes a `TracingReceiver` and a comma separated list of targets, /// either with a level: "pallet=trace,frame=debug" @@ -219,10 +221,6 @@ impl ProfilingLayer { pub fn new(receiver: TracingReceiver, targets: &str) -> Self { match receiver { TracingReceiver::Log => Self::new_with_handler(Box::new(LogTraceHandler), targets), - TracingReceiver::Telemetry => Self::new_with_handler( - Box::new(TelemetryTraceHandler), - targets, - ), } } @@ -231,22 +229,15 @@ impl ProfilingLayer { /// either with a level, eg: "pallet=trace" /// or without: "pallet" in which case the level defaults to `trace`. /// wasm_tracing indicates whether to enable wasm traces - pub fn new_with_handler(trace_handler: Box, targets: &str) - -> Self - { + pub fn new_with_handler(trace_handler: Box, targets: &str) -> Self { let targets: Vec<_> = targets.split(',').map(|s| parse_target(s)).collect(); - Self { - targets, - trace_handler, - span_data: Mutex::new(FxHashMap::default()), - current_span: Default::default() - } + Self { targets, trace_handler } } fn check_target(&self, target: &str, level: &Level) -> bool { for t in &self.targets { if target.starts_with(t.0.as_str()) && level <= &t.1 { - return true; + return true } } false @@ -260,93 +251,113 @@ fn parse_target(s: &str) -> (String, Level) { Some(i) => { let target = s[0..i].to_string(); if s.len() > i { - let level = s[i + 1..s.len()].parse::().unwrap_or(Level::TRACE); + let level = s[i + 1..].parse::().unwrap_or(Level::TRACE); (target, level) } else { (target, Level::TRACE) } - } - None => (s.to_string(), Level::TRACE) + }, + None => (s.to_string(), Level::TRACE), } } -impl Layer for ProfilingLayer { - fn new_span(&self, attrs: &Attributes<'_>, id: &Id, _ctx: Context) { - let mut values = Values::default(); - attrs.record(&mut values); - let span_datum = SpanDatum { - id: id.clone(), - parent_id: attrs.parent().cloned().or_else(|| self.current_span.id()), - name: attrs.metadata().name().to_owned(), - target: attrs.metadata().target().to_owned(), - level: attrs.metadata().level().clone(), - line: attrs.metadata().line().unwrap_or(0), - start_time: Instant::now(), - overall_time: ZERO_DURATION, - values, - }; - self.span_data.lock().insert(id.clone(), span_datum); +impl Layer for ProfilingLayer +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + fn new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: Context) { + if let Some(span) = ctx.span(id) { + let mut extension = span.extensions_mut(); + let parent_id = attrs.parent().cloned().or_else(|| { + if attrs.is_contextual() { + ctx.lookup_current().map(|span| span.id()) + } else { + None + } + }); + + let mut values = Values::default(); + attrs.record(&mut values); + let span_datum = SpanDatum { + id: id.clone(), + parent_id, + name: attrs.metadata().name().to_owned(), + target: attrs.metadata().target().to_owned(), + level: *attrs.metadata().level(), + line: attrs.metadata().line().unwrap_or(0), + start_time: Instant::now(), + overall_time: ZERO_DURATION, + values, + }; + extension.insert(span_datum); + } } - fn on_record(&self, span: &Id, values: &Record<'_>, _ctx: Context) { - let mut span_data = self.span_data.lock(); - if let Some(s) = span_data.get_mut(span) { - values.record(&mut s.values); + fn on_record(&self, id: &Id, values: &Record<'_>, ctx: Context) { + if let Some(span) = ctx.span(id) { + let mut extensions = span.extensions_mut(); + if let Some(s) = extensions.get_mut::() { + values.record(&mut s.values); + } } } - fn on_event(&self, event: &Event<'_>, _ctx: Context) { + fn on_event(&self, event: &Event<'_>, ctx: Context) { + let parent_id = event.parent().cloned().or_else(|| { + if event.is_contextual() { + ctx.lookup_current().map(|span| span.id()) + } else { + None + } + }); + let mut values = Values::default(); event.record(&mut values); let trace_event = TraceEvent { - name: event.metadata().name(), + name: event.metadata().name().to_owned(), target: event.metadata().target().to_owned(), - level: event.metadata().level().clone(), + level: *event.metadata().level(), values, - parent_id: event.parent().cloned().or_else(|| self.current_span.id()), + parent_id, }; self.trace_handler.handle_event(trace_event); } - fn on_enter(&self, span: &Id, _ctx: Context) { - self.current_span.enter(span.clone()); - let mut span_data = self.span_data.lock(); - let start_time = Instant::now(); - if let Some(mut s) = span_data.get_mut(&span) { - s.start_time = start_time; + fn on_enter(&self, span: &Id, ctx: Context) { + if let Some(span) = ctx.span(span) { + let mut extensions = span.extensions_mut(); + if let Some(s) = extensions.get_mut::() { + let start_time = Instant::now(); + s.start_time = start_time; + } } } - fn on_exit(&self, span: &Id, _ctx: Context) { - self.current_span.exit(); - let end_time = Instant::now(); - let span_datum = { - let mut span_data = self.span_data.lock(); - span_data.remove(&span) - }; - - if let Some(mut span_datum) = span_datum { - span_datum.overall_time += end_time - span_datum.start_time; - if span_datum.name == WASM_TRACE_IDENTIFIER { - span_datum.values.bool_values.insert("wasm".to_owned(), true); - if let Some(n) = span_datum.values.string_values.remove(WASM_NAME_KEY) { - span_datum.name = n; - } - if let Some(t) = span_datum.values.string_values.remove(WASM_TARGET_KEY) { - span_datum.target = t; - } - if self.check_target(&span_datum.target, &span_datum.level) { + fn on_exit(&self, span: &Id, ctx: Context) { + if let Some(span) = ctx.span(span) { + let end_time = Instant::now(); + let mut extensions = span.extensions_mut(); + if let Some(mut span_datum) = extensions.remove::() { + span_datum.overall_time += end_time - span_datum.start_time; + if span_datum.name == WASM_TRACE_IDENTIFIER { + span_datum.values.bool_values.insert("wasm".to_owned(), true); + if let Some(n) = span_datum.values.string_values.remove(WASM_NAME_KEY) { + span_datum.name = n; + } + if let Some(t) = span_datum.values.string_values.remove(WASM_TARGET_KEY) { + span_datum.target = t; + } + if self.check_target(&span_datum.target, &span_datum.level) { + self.trace_handler.handle_span(span_datum); + } + } else { self.trace_handler.handle_span(span_datum); } - } else { - self.trace_handler.handle_span(span_datum); } - }; + } } - fn on_close(&self, span: Id, ctx: Context) { - self.on_exit(&span, ctx) - } + fn on_close(&self, _span: Id, _ctx: Context) {} } /// TraceHandler for sending span data to the logger @@ -399,36 +410,34 @@ impl TraceHandler for LogTraceHandler { } } -/// TraceHandler for sending span data to telemetry, -/// Please see telemetry documentation for details on how to specify endpoints and -/// set the required telemetry level to activate tracing messages -pub struct TelemetryTraceHandler; - -impl TraceHandler for TelemetryTraceHandler { - fn handle_span(&self, span_datum: SpanDatum) { - telemetry!(SUBSTRATE_INFO; "tracing.profiling"; - "name" => span_datum.name, - "target" => span_datum.target, - "time" => span_datum.overall_time.as_nanos(), - "id" => span_datum.id.into_u64(), - "parent_id" => span_datum.parent_id.map(|i| i.into_u64()), - "values" => span_datum.values - ); +impl From for sp_rpc::tracing::Event { + fn from(trace_event: TraceEvent) -> Self { + let data = sp_rpc::tracing::Data { string_values: trace_event.values.string_values }; + sp_rpc::tracing::Event { + target: trace_event.target, + data, + parent_id: trace_event.parent_id.map(|id| id.into_u64()), + } } +} - fn handle_event(&self, event: TraceEvent) { - telemetry!(SUBSTRATE_INFO; "tracing.event"; - "name" => event.name, - "target" => event.target, - "parent_id" => event.parent_id.map(|i| i.into_u64()), - "values" => event.values - ); +impl From for sp_rpc::tracing::Span { + fn from(span_datum: SpanDatum) -> Self { + let wasm = span_datum.values.bool_values.get("wasm").is_some(); + sp_rpc::tracing::Span { + id: span_datum.id.into_u64(), + parent_id: span_datum.parent_id.map(|id| id.into_u64()), + name: span_datum.name, + target: span_datum.target, + wasm, + } } } #[cfg(test)] mod tests { use super::*; + use parking_lot::Mutex; use std::sync::Arc; use tracing_subscriber::layer::SubscriberExt; @@ -447,23 +456,16 @@ mod tests { } } - type TestSubscriber = tracing_subscriber::layer::Layered< - ProfilingLayer, - tracing_subscriber::fmt::Subscriber - >; - - fn setup_subscriber() -> (TestSubscriber, Arc>>, Arc>>) { + fn setup_subscriber() -> ( + impl tracing::Subscriber + Send + Sync, + Arc>>, + Arc>>, + ) { let spans = Arc::new(Mutex::new(Vec::new())); let events = Arc::new(Mutex::new(Vec::new())); - let handler = TestTraceHandler { - spans: spans.clone(), - events: events.clone(), - }; - let layer = ProfilingLayer::new_with_handler( - Box::new(handler), - "test_target" - ); - let subscriber = tracing_subscriber::fmt().finish().with(layer); + let handler = TestTraceHandler { spans: spans.clone(), events: events.clone() }; + let layer = ProfilingLayer::new_with_handler(Box::new(handler), "test_target"); + let subscriber = tracing_subscriber::fmt().with_writer(std::io::sink).finish().with(layer); (subscriber, spans, events) } @@ -540,7 +542,10 @@ mod tests { let _sub_guard = tracing::subscriber::set_default(sub); tracing::event!(target: "test_target", tracing::Level::INFO, "test_event"); let mut te1 = events.lock().remove(0); - assert_eq!(te1.values.string_values.remove(&"message".to_owned()).unwrap(), "test_event".to_owned()); + assert_eq!( + te1.values.string_values.remove(&"message".to_owned()).unwrap(), + "test_event".to_owned() + ); } #[test] @@ -555,7 +560,7 @@ mod tests { // emit event tracing::event!(target: "test_target", tracing::Level::INFO, "test_event"); - //exit span + // exit span drop(_guard1); drop(span1); @@ -567,64 +572,76 @@ mod tests { #[test] fn test_parent_id_with_threads() { - use std::sync::mpsc; - use std::thread; - - let (sub, spans, events) = setup_subscriber(); - let _sub_guard = tracing::subscriber::set_global_default(sub); - let span1 = tracing::info_span!(target: "test_target", "test_span1"); - let _guard1 = span1.enter(); - - let (tx, rx) = mpsc::channel(); - let handle = thread::spawn(move || { - let span2 = tracing::info_span!(target: "test_target", "test_span2"); - let _guard2 = span2.enter(); - // emit event - tracing::event!(target: "test_target", tracing::Level::INFO, "test_event1"); - for msg in rx.recv() { - if msg == false { - break; + use std::{sync::mpsc, thread}; + + if std::env::var("RUN_TEST_PARENT_ID_WITH_THREADS").is_err() { + let executable = std::env::current_exe().unwrap(); + let mut command = std::process::Command::new(executable); + + let res = command + .env("RUN_TEST_PARENT_ID_WITH_THREADS", "1") + .args(&["--nocapture", "test_parent_id_with_threads"]) + .output() + .unwrap() + .status; + assert!(res.success()); + } else { + let (sub, spans, events) = setup_subscriber(); + let _sub_guard = tracing::subscriber::set_global_default(sub); + let span1 = tracing::info_span!(target: "test_target", "test_span1"); + let _guard1 = span1.enter(); + + let (tx, rx) = mpsc::channel(); + let handle = thread::spawn(move || { + let span2 = tracing::info_span!(target: "test_target", "test_span2"); + let _guard2 = span2.enter(); + // emit event + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event1"); + for msg in rx.recv() { + if msg == false { + break + } } - } - // gard2 and span2 dropped / exited - }); + // guard2 and span2 dropped / exited + }); - // wait for Event to be dispatched and stored - while events.lock().is_empty() { - thread::sleep(Duration::from_millis(1)); - } + // wait for Event to be dispatched and stored + while events.lock().is_empty() { + thread::sleep(Duration::from_millis(1)); + } - // emit new event (will be second item in Vec) while span2 still active in other thread - tracing::event!(target: "test_target", tracing::Level::INFO, "test_event2"); + // emit new event (will be second item in Vec) while span2 still active in other thread + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event2"); - // stop thread and drop span - let _ = tx.send(false); - let _ = handle.join(); + // stop thread and drop span + let _ = tx.send(false); + let _ = handle.join(); - // wait for Span to be dispatched and stored - while spans.lock().is_empty() { - thread::sleep(Duration::from_millis(1)); + // wait for Span to be dispatched and stored + while spans.lock().is_empty() { + thread::sleep(Duration::from_millis(1)); + } + let span2 = spans.lock().remove(0); + let event1 = events.lock().remove(0); + drop(_guard1); + drop(span1); + + // emit event with no parent + tracing::event!(target: "test_target", tracing::Level::INFO, "test_event3"); + + let span1 = spans.lock().remove(0); + let event2 = events.lock().remove(0); + + assert_eq!(event1.values.string_values.get("message").unwrap(), "test_event1"); + assert_eq!(event2.values.string_values.get("message").unwrap(), "test_event2"); + assert!(span1.parent_id.is_none()); + assert!(span2.parent_id.is_none()); + assert_eq!(span2.id, event1.parent_id.unwrap()); + assert_eq!(span1.id, event2.parent_id.unwrap()); + assert_ne!(span2.id, span1.id); + + let event3 = events.lock().remove(0); + assert!(event3.parent_id.is_none()); } - let span2 = spans.lock().remove(0); - let event1 = events.lock().remove(0); - drop(_guard1); - drop(span1); - - // emit event with no parent - tracing::event!(target: "test_target", tracing::Level::INFO, "test_event3"); - - let span1 = spans.lock().remove(0); - let event2 = events.lock().remove(0); - - assert_eq!(event1.values.string_values.get("message").unwrap(), "test_event1"); - assert_eq!(event2.values.string_values.get("message").unwrap(), "test_event2"); - assert!(span1.parent_id.is_none()); - assert!(span2.parent_id.is_none()); - assert_eq!(span2.id, event1.parent_id.unwrap()); - assert_eq!(span1.id, event2.parent_id.unwrap()); - assert_ne!(span2.id, span1.id); - - let event3 = events.lock().remove(0); - assert!(event3.parent_id.is_none()); } } diff --git a/client/tracing/src/logging/directives.rs b/client/tracing/src/logging/directives.rs new file mode 100644 index 0000000000000..5aaeb4d17e7d3 --- /dev/null +++ b/client/tracing/src/logging/directives.rs @@ -0,0 +1,114 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use once_cell::sync::OnceCell; +use parking_lot::Mutex; +use tracing_subscriber::{ + filter::Directive, fmt as tracing_fmt, fmt::time::ChronoLocal, layer, reload::Handle, + EnvFilter, Registry, +}; + +// Handle to reload the tracing log filter +static FILTER_RELOAD_HANDLE: OnceCell> = OnceCell::new(); +// Directives that are defaulted to when resetting the log filter +static DEFAULT_DIRECTIVES: OnceCell>> = OnceCell::new(); +// Current state of log filter +static CURRENT_DIRECTIVES: OnceCell>> = OnceCell::new(); + +/// Add log filter directive(s) to the defaults +/// +/// The syntax is identical to the CLI `=`: +/// +/// `sync=debug,state=trace` +pub(crate) fn add_default_directives(directives: &str) { + DEFAULT_DIRECTIVES + .get_or_init(|| Mutex::new(Vec::new())) + .lock() + .push(directives.to_owned()); + add_directives(directives); +} + +/// Add directives to current directives +pub fn add_directives(directives: &str) { + CURRENT_DIRECTIVES + .get_or_init(|| Mutex::new(Vec::new())) + .lock() + .push(directives.to_owned()); +} + +/// Parse `Directive` and add to default directives if successful. +/// +/// Ensures the supplied directive will be restored when resetting the log filter. +pub(crate) fn parse_default_directive(directive: &str) -> super::Result { + let dir = directive.parse()?; + add_default_directives(directive); + Ok(dir) +} + +/// Reload the logging filter with the supplied directives added to the existing directives +pub fn reload_filter() -> Result<(), String> { + let mut env_filter = EnvFilter::default(); + if let Some(current_directives) = CURRENT_DIRECTIVES.get() { + // Use join and then split in case any directives added together + for directive in current_directives.lock().join(",").split(',').map(|d| d.parse()) { + match directive { + Ok(dir) => env_filter = env_filter.add_directive(dir), + Err(invalid_directive) => { + log::warn!( + target: "tracing", + "Unable to parse directive while setting log filter: {:?}", + invalid_directive, + ); + }, + } + } + } + + // Set the max logging level for the `log` macros. + let max_level_hint = + tracing_subscriber::Layer::::max_level_hint(&env_filter); + log::set_max_level(super::to_log_level_filter(max_level_hint)); + + log::debug!(target: "tracing", "Reloading log filter with: {}", env_filter); + FILTER_RELOAD_HANDLE + .get() + .ok_or("No reload handle present".to_string())? + .reload(env_filter) + .map_err(|e| format!("{}", e)) +} + +/// Resets the log filter back to the original state when the node was started. +/// +/// Includes substrate defaults and CLI supplied directives. +pub fn reset_log_filter() -> Result<(), String> { + let directive = DEFAULT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock().clone(); + + *CURRENT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock() = directive; + reload_filter() +} + +/// Initialize FILTER_RELOAD_HANDLE, only possible once +pub(crate) fn set_reload_handle(handle: Handle) { + let _ = FILTER_RELOAD_HANDLE.set(handle); +} + +// The layered Subscriber as built up in `LoggerBuilder::init()`. +// Used in the reload `Handle`. +type SCSubscriber< + N = tracing_fmt::format::DefaultFields, + E = crate::logging::EventFormat, + W = fn() -> std::io::Stderr, +> = layer::Layered, Registry>; diff --git a/client/cli/src/logging.rs b/client/tracing/src/logging/event_format.rs similarity index 55% rename from client/cli/src/logging.rs rename to client/tracing/src/logging/event_format.rs index 3b87d95fe0643..61d7fe77aec68 100644 --- a/client/cli/src/logging.rs +++ b/client/tracing/src/logging/event_format.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,55 +16,61 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use ansi_term::{Colour, Style}; -use std::{fmt::{self, Write as _}, iter}; -use tracing::{ - span::{self, Attributes}, - Event, Id, Level, Subscriber, -}; +use ansi_term::Colour; +use regex::Regex; +use std::fmt::{self, Write}; +use tracing::{Event, Level, Subscriber}; use tracing_log::NormalizeEvent; use tracing_subscriber::{ + field::RecordFields, fmt::{ time::{FormatTime, SystemTime}, FmtContext, FormatEvent, FormatFields, }, layer::Context, - registry::LookupSpan, - Layer, + registry::{LookupSpan, SpanRef}, }; -/// Span name used for the logging prefix. See macro `sc_cli::prefix_logs_with!` -pub const PREFIX_LOG_SPAN: &str = "substrate-log-prefix"; - -pub(crate) struct EventFormat { - pub(crate) timer: T, - pub(crate) ansi: bool, - pub(crate) display_target: bool, - pub(crate) display_level: bool, - pub(crate) display_thread_name: bool, +/// A pre-configured event formatter. +pub struct EventFormat { + /// Use the given timer for log message timestamps. + pub timer: T, + /// Sets whether or not an event's target is displayed. + pub display_target: bool, + /// Sets whether or not an event's level is displayed. + pub display_level: bool, + /// Sets whether or not the name of the current thread is displayed when formatting events. + pub display_thread_name: bool, + /// Enable ANSI terminal colors for formatted output. + pub enable_color: bool, + /// Duplicate INFO, WARN and ERROR messages to stdout. + pub dup_to_stdout: bool, } -// NOTE: the following code took inspiration from tracing-subscriber -// -// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L449 -impl FormatEvent for EventFormat +impl EventFormat where - S: Subscriber + for<'a> LookupSpan<'a>, - N: for<'a> FormatFields<'a> + 'static, T: FormatTime, { - fn format_event( + // NOTE: the following code took inspiration from tracing-subscriber + // + // https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L449 + pub(crate) fn format_event_custom<'b, S, N>( &self, - ctx: &FmtContext, + ctx: CustomFmtContext<'b, S, N>, writer: &mut dyn fmt::Write, event: &Event, - ) -> fmt::Result { + ) -> fmt::Result + where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, + { + let writer = &mut MaybeColorWriter::new(self.enable_color, writer); let normalized_meta = event.normalized_metadata(); let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata()); - time::write(&self.timer, writer, self.ansi)?; + time::write(&self.timer, writer, self.enable_color)?; if self.display_level { - let fmt_level = { FmtLevel::new(meta.level(), self.ansi) }; + let fmt_level = { FmtLevel::new(meta.level(), self.enable_color) }; write!(writer, "{} ", fmt_level)?; } @@ -73,92 +79,64 @@ where match current_thread.name() { Some(name) => { write!(writer, "{} ", FmtThreadName::new(name))?; - } + }, // fall-back to thread id when name is absent and ids are not enabled None => { write!(writer, "{:0>2?} ", current_thread.id())?; - } + }, } } + if self.display_target { + write!(writer, "{}: ", meta.target())?; + } + // Custom code to display node name if let Some(span) = ctx.lookup_current() { - let parents = span.parents(); - for span in std::iter::once(span).chain(parents) { + for span in span.scope() { let exts = span.extensions(); - if let Some(node_name) = exts.get::() { - write!(writer, "{}", node_name.as_str())?; - break; + if let Some(prefix) = exts.get::() { + write!(writer, "{}", prefix.as_str())?; + break } } } - let fmt_ctx = { FmtCtx::new(&ctx, event.parent(), self.ansi) }; - write!(writer, "{}", fmt_ctx)?; - if self.display_target { - write!(writer, "{}:", meta.target())?; - } ctx.format_fields(writer, event)?; - writeln!(writer) + writeln!(writer)?; + + writer.write() } } -pub(crate) struct NodeNameLayer; - -impl Layer for NodeNameLayer +// NOTE: the following code took inspiration from tracing-subscriber +// +// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L449 +impl FormatEvent for EventFormat where S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, + T: FormatTime, { - fn new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) { - let span = ctx - .span(id) - .expect("new_span has been called for this span; qed"); - - if span.name() != PREFIX_LOG_SPAN { - return; - } - - let mut extensions = span.extensions_mut(); - - if extensions.get_mut::().is_none() { - let mut s = String::new(); - let mut v = NodeNameVisitor(&mut s); - attrs.record(&mut v); - - if !s.is_empty() { - let fmt_fields = NodeName(s); - extensions.insert(fmt_fields); - } - } - } -} - -struct NodeNameVisitor<'a, W: std::fmt::Write>(&'a mut W); - -macro_rules! write_node_name { - ($method:ident, $type:ty, $format:expr) => { - fn $method(&mut self, field: &tracing::field::Field, value: $type) { - if field.name() == "name" { - write!(self.0, $format, value).expect("no way to return the err; qed"); - } + fn format_event( + &self, + ctx: &FmtContext, + writer: &mut dyn fmt::Write, + event: &Event, + ) -> fmt::Result { + if self.dup_to_stdout && + (event.metadata().level() == &Level::INFO || + event.metadata().level() == &Level::WARN || + event.metadata().level() == &Level::ERROR) + { + let mut out = String::new(); + self.format_event_custom(CustomFmtContext::FmtContext(ctx), &mut out, event)?; + writer.write_str(&out)?; + print!("{}", out); + Ok(()) + } else { + self.format_event_custom(CustomFmtContext::FmtContext(ctx), writer, event) } - }; -} - -impl<'a, W: std::fmt::Write> tracing::field::Visit for NodeNameVisitor<'a, W> { - write_node_name!(record_debug, &dyn std::fmt::Debug, "[{:?}] "); - write_node_name!(record_str, &str, "[{}] "); - write_node_name!(record_i64, i64, "[{}] "); - write_node_name!(record_u64, u64, "[{}] "); - write_node_name!(record_bool, bool, "[{}] "); -} - -#[derive(Debug)] -struct NodeName(String); - -impl NodeName { - fn as_str(&self) -> &str { - self.0.as_str() } } @@ -247,91 +225,114 @@ impl<'a> fmt::Display for FmtThreadName<'a> { } } -struct FmtCtx<'a, S, N> { - ctx: &'a FmtContext<'a, S, N>, - span: Option<&'a span::Id>, - ansi: bool, +// NOTE: the following code has been duplicated from tracing-subscriber +// +// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/time/mod.rs#L252 +mod time { + use ansi_term::Style; + use std::fmt; + use tracing_subscriber::fmt::time::FormatTime; + + pub(crate) fn write(timer: T, writer: &mut dyn fmt::Write, with_ansi: bool) -> fmt::Result + where + T: FormatTime, + { + if with_ansi { + let style = Style::new().dimmed(); + write!(writer, "{}", style.prefix())?; + timer.format_time(writer)?; + write!(writer, "{}", style.suffix())?; + } else { + timer.format_time(writer)?; + } + writer.write_char(' ')?; + Ok(()) + } +} + +// NOTE: `FmtContext`'s fields are private. This enum allows us to make a `format_event` function +// that works with `FmtContext` or `Context` with `FormatFields` +#[allow(dead_code)] +pub(crate) enum CustomFmtContext<'a, S, N> { + FmtContext(&'a FmtContext<'a, S, N>), + ContextWithFormatFields(&'a Context<'a, S>, &'a N), } -impl<'a, S, N: 'a> FmtCtx<'a, S, N> +impl<'a, S, N> FormatFields<'a> for CustomFmtContext<'a, S, N> where S: Subscriber + for<'lookup> LookupSpan<'lookup>, N: for<'writer> FormatFields<'writer> + 'static, { - pub(crate) fn new( - ctx: &'a FmtContext<'_, S, N>, - span: Option<&'a span::Id>, - ansi: bool, - ) -> Self { - Self { ctx, ansi, span } - } - - fn bold(&self) -> Style { - if self.ansi { - return Style::new().bold(); + fn format_fields( + &self, + writer: &'a mut dyn fmt::Write, + fields: R, + ) -> fmt::Result { + match self { + CustomFmtContext::FmtContext(fmt_ctx) => fmt_ctx.format_fields(writer, fields), + CustomFmtContext::ContextWithFormatFields(_ctx, fmt_fields) => + fmt_fields.format_fields(writer, fields), } - - Style::new() } } -// NOTE: the following code took inspiration from tracing-subscriber +// NOTE: the following code has been duplicated from tracing-subscriber // -// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L711 -impl<'a, S, N: 'a> fmt::Display for FmtCtx<'a, S, N> +// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/fmt_layer.rs#L788 +impl<'a, S, N> CustomFmtContext<'a, S, N> where S: Subscriber + for<'lookup> LookupSpan<'lookup>, N: for<'writer> FormatFields<'writer> + 'static, { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let bold = self.bold(); - let mut seen = false; - - let span = self - .span - .and_then(|id| self.ctx.span(&id)) - .or_else(|| self.ctx.lookup_current()); - - let scope = span - .into_iter() - .flat_map(|span| span.from_root().chain(iter::once(span))); - - for name in scope - .map(|span| span.metadata().name()) - .filter(|&x| x != "substrate-node") - { - seen = true; - write!(f, "{}:", bold.paint(name))?; + #[inline] + pub fn lookup_current(&self) -> Option> + where + S: for<'lookup> LookupSpan<'lookup>, + { + match self { + CustomFmtContext::FmtContext(fmt_ctx) => fmt_ctx.lookup_current(), + CustomFmtContext::ContextWithFormatFields(ctx, _) => ctx.lookup_current(), } + } +} - if seen { - f.write_char(' ')?; - } +/// A writer that may write to `inner_writer` with colors. +/// +/// This is used by [`EventFormat`] to kill colors when `enable_color` is `false`. +/// +/// It is required to call [`MaybeColorWriter::write`] after all writes are done, +/// because the content of these writes is buffered and will only be written to the +/// `inner_writer` at that point. +struct MaybeColorWriter<'a> { + enable_color: bool, + buffer: String, + inner_writer: &'a mut dyn fmt::Write, +} + +impl<'a> fmt::Write for MaybeColorWriter<'a> { + fn write_str(&mut self, buf: &str) -> fmt::Result { + self.buffer.push_str(buf); Ok(()) } } -// NOTE: the following code has been duplicated from tracing-subscriber -// -// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/time/mod.rs#L252 -mod time { - use ansi_term::Style; - use std::fmt; - use tracing_subscriber::fmt::time::FormatTime; +impl<'a> MaybeColorWriter<'a> { + /// Creates a new instance. + fn new(enable_color: bool, inner_writer: &'a mut dyn fmt::Write) -> Self { + Self { enable_color, inner_writer, buffer: String::new() } + } - pub(crate) fn write(timer: T, writer: &mut dyn fmt::Write, with_ansi: bool) -> fmt::Result - where - T: FormatTime, - { - if with_ansi { - let style = Style::new().dimmed(); - write!(writer, "{}", style.prefix())?; - timer.format_time(writer)?; - write!(writer, "{}", style.suffix())?; + /// Write the buffered content to the `inner_writer`. + fn write(&mut self) -> fmt::Result { + lazy_static::lazy_static! { + static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); + } + + if !self.enable_color { + let replaced = RE.replace_all(&self.buffer, ""); + self.inner_writer.write_str(&replaced) } else { - timer.format_time(writer)?; + self.inner_writer.write_str(&self.buffer) } - writer.write_char(' ')?; - Ok(()) } } diff --git a/client/transaction-pool/src/testing/mod.rs b/client/tracing/src/logging/layers/mod.rs similarity index 87% rename from client/transaction-pool/src/testing/mod.rs rename to client/tracing/src/logging/layers/mod.rs index 350c4137c37b2..7dd0c4d120ad7 100644 --- a/client/transaction-pool/src/testing/mod.rs +++ b/client/tracing/src/logging/layers/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,6 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Tests for top-level transaction pool api +mod prefix_layer; -mod pool; +pub use prefix_layer::*; diff --git a/client/tracing/src/logging/layers/prefix_layer.rs b/client/tracing/src/logging/layers/prefix_layer.rs new file mode 100644 index 0000000000000..2ad786a092233 --- /dev/null +++ b/client/tracing/src/logging/layers/prefix_layer.rs @@ -0,0 +1,95 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use tracing::{span::Attributes, Id, Subscriber}; +use tracing_subscriber::{layer::Context, registry::LookupSpan, Layer}; + +/// Span name used for the logging prefix. See macro `sc_tracing::logging::prefix_logs_with!` +pub const PREFIX_LOG_SPAN: &str = "substrate-log-prefix"; + +/// A `Layer` that captures the prefix span ([`PREFIX_LOG_SPAN`]) which is then used by +/// [`crate::logging::EventFormat`] to prefix the log lines by customizable string. +/// +/// See the macro `sc_cli::prefix_logs_with!` for more details. +pub struct PrefixLayer; + +impl Layer for PrefixLayer +where + S: Subscriber + for<'a> LookupSpan<'a>, +{ + fn new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) { + let span = match ctx.span(id) { + Some(span) => span, + None => { + // this shouldn't happen! + debug_assert!( + false, + "newly created span with ID {:?} did not exist in the registry; this is a bug!", + id + ); + return + }, + }; + + if span.name() != PREFIX_LOG_SPAN { + return + } + + let mut extensions = span.extensions_mut(); + + if extensions.get_mut::().is_none() { + let mut s = String::new(); + let mut v = PrefixVisitor(&mut s); + attrs.record(&mut v); + + if !s.is_empty() { + let fmt_fields = Prefix(s); + extensions.insert(fmt_fields); + } + } + } +} + +struct PrefixVisitor<'a, W: std::fmt::Write>(&'a mut W); + +macro_rules! write_node_name { + ($method:ident, $type:ty, $format:expr) => { + fn $method(&mut self, field: &tracing::field::Field, value: $type) { + if field.name() == "name" { + let _ = write!(self.0, $format, value); + } + } + }; +} + +impl<'a, W: std::fmt::Write> tracing::field::Visit for PrefixVisitor<'a, W> { + write_node_name!(record_debug, &dyn std::fmt::Debug, "[{:?}] "); + write_node_name!(record_str, &str, "[{}] "); + write_node_name!(record_i64, i64, "[{}] "); + write_node_name!(record_u64, u64, "[{}] "); + write_node_name!(record_bool, bool, "[{}] "); +} + +#[derive(Debug)] +pub(crate) struct Prefix(String); + +impl Prefix { + pub(crate) fn as_str(&self) -> &str { + self.0.as_str() + } +} diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs new file mode 100644 index 0000000000000..dd4830fe89752 --- /dev/null +++ b/client/tracing/src/logging/mod.rs @@ -0,0 +1,484 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate logging library. +//! +//! This crate uses tokio's [tracing](https://github.com/tokio-rs/tracing/) library for logging. + +#![warn(missing_docs)] + +mod directives; +mod event_format; +mod layers; + +pub use directives::*; +pub use sc_tracing_proc_macro::*; + +use std::io; +use tracing::Subscriber; +use tracing_subscriber::{ + filter::LevelFilter, + fmt::{ + format, time::ChronoLocal, FormatEvent, FormatFields, Formatter, Layer as FmtLayer, + MakeWriter, SubscriberBuilder, + }, + layer::{self, SubscriberExt}, + registry::LookupSpan, + EnvFilter, FmtSubscriber, Layer, Registry, +}; + +pub use event_format::*; +pub use layers::*; + +/// Logging Result typedef. +pub type Result = std::result::Result; + +/// Logging errors. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +#[non_exhaustive] +#[error(transparent)] +pub enum Error { + IoError(#[from] io::Error), + SetGlobalDefaultError(#[from] tracing::subscriber::SetGlobalDefaultError), + DirectiveParseError(#[from] tracing_subscriber::filter::ParseError), + SetLoggerError(#[from] tracing_log::log_tracer::SetLoggerError), +} + +macro_rules! enable_log_reloading { + ($builder:expr) => {{ + let builder = $builder.with_filter_reloading(); + let handle = builder.reload_handle(); + set_reload_handle(handle); + builder + }}; +} + +/// Convert a `Option` to a [`log::LevelFilter`]. +/// +/// `None` is interpreted as `Info`. +fn to_log_level_filter(level_filter: Option) -> log::LevelFilter { + match level_filter { + Some(LevelFilter::INFO) | None => log::LevelFilter::Info, + Some(LevelFilter::TRACE) => log::LevelFilter::Trace, + Some(LevelFilter::WARN) => log::LevelFilter::Warn, + Some(LevelFilter::ERROR) => log::LevelFilter::Error, + Some(LevelFilter::DEBUG) => log::LevelFilter::Debug, + Some(LevelFilter::OFF) => log::LevelFilter::Off, + } +} + +/// Common implementation to get the subscriber. +fn prepare_subscriber( + directives: &str, + profiling_targets: Option<&str>, + force_colors: Option, + builder_hook: impl Fn( + SubscriberBuilder< + format::DefaultFields, + EventFormat, + EnvFilter, + fn() -> std::io::Stderr, + >, + ) -> SubscriberBuilder, +) -> Result LookupSpan<'a>> +where + N: for<'writer> FormatFields<'writer> + 'static, + E: FormatEvent + 'static, + W: MakeWriter + 'static, + F: layer::Layer> + Send + Sync + 'static, + FmtLayer: layer::Layer + Send + Sync + 'static, +{ + // Accept all valid directives and print invalid ones + fn parse_user_directives(mut env_filter: EnvFilter, dirs: &str) -> Result { + for dir in dirs.split(',') { + env_filter = env_filter.add_directive(parse_default_directive(&dir)?); + } + Ok(env_filter) + } + + // Initialize filter - ensure to use `parse_default_directive` for any defaults to persist + // after log filter reloading by RPC + let mut env_filter = EnvFilter::default() + // Enable info + .add_directive(parse_default_directive("info").expect("provided directive is valid")) + // Disable info logging by default for some modules. + .add_directive(parse_default_directive("ws=off").expect("provided directive is valid")) + .add_directive(parse_default_directive("yamux=off").expect("provided directive is valid")) + .add_directive( + parse_default_directive("regalloc=off").expect("provided directive is valid"), + ) + .add_directive( + parse_default_directive("cranelift_codegen=off").expect("provided directive is valid"), + ) + // Set warn logging by default for some modules. + .add_directive( + parse_default_directive("cranelift_wasm=warn").expect("provided directive is valid"), + ) + .add_directive(parse_default_directive("hyper=warn").expect("provided directive is valid")); + + if let Ok(lvl) = std::env::var("RUST_LOG") { + if lvl != "" { + env_filter = parse_user_directives(env_filter, &lvl)?; + } + } + + if directives != "" { + env_filter = parse_user_directives(env_filter, directives)?; + } + + if let Some(profiling_targets) = profiling_targets { + env_filter = parse_user_directives(env_filter, profiling_targets)?; + env_filter = env_filter.add_directive( + parse_default_directive("sc_tracing=trace").expect("provided directive is valid"), + ); + } + + let max_level_hint = Layer::::max_level_hint(&env_filter); + let max_level = to_log_level_filter(max_level_hint); + + tracing_log::LogTracer::builder().with_max_level(max_level).init()?; + + // If we're only logging `INFO` entries then we'll use a simplified logging format. + let simple = match max_level_hint { + Some(level) if level <= tracing_subscriber::filter::LevelFilter::INFO => true, + _ => false, + }; + + let enable_color = force_colors.unwrap_or_else(|| atty::is(atty::Stream::Stderr)); + let timer = ChronoLocal::with_format(if simple { + "%Y-%m-%d %H:%M:%S".to_string() + } else { + "%Y-%m-%d %H:%M:%S%.3f".to_string() + }); + + let event_format = EventFormat { + timer, + display_target: !simple, + display_level: !simple, + display_thread_name: !simple, + enable_color, + dup_to_stdout: !atty::is(atty::Stream::Stderr) && atty::is(atty::Stream::Stdout), + }; + let builder = FmtSubscriber::builder().with_env_filter(env_filter); + + let builder = builder.with_span_events(format::FmtSpan::NONE); + + let builder = builder.with_writer(std::io::stderr as _); + + let builder = builder.event_format(event_format); + + let builder = builder_hook(builder); + + let subscriber = builder.finish().with(PrefixLayer); + + Ok(subscriber) +} + +/// A builder that is used to initialize the global logger. +pub struct LoggerBuilder { + directives: String, + profiling: Option<(crate::TracingReceiver, String)>, + log_reloading: bool, + force_colors: Option, +} + +impl LoggerBuilder { + /// Create a new [`LoggerBuilder`] which can be used to initialize the global logger. + pub fn new>(directives: S) -> Self { + Self { + directives: directives.into(), + profiling: None, + log_reloading: true, + force_colors: None, + } + } + + /// Set up the profiling. + pub fn with_profiling>( + &mut self, + tracing_receiver: crate::TracingReceiver, + profiling_targets: S, + ) -> &mut Self { + self.profiling = Some((tracing_receiver, profiling_targets.into())); + self + } + + /// Wether or not to disable log reloading. + pub fn with_log_reloading(&mut self, enabled: bool) -> &mut Self { + self.log_reloading = enabled; + self + } + + /// Force enable/disable colors. + pub fn with_colors(&mut self, enable: bool) -> &mut Self { + self.force_colors = Some(enable); + self + } + + /// Initialize the global logger + /// + /// This sets various global logging and tracing instances and thus may only be called once. + pub fn init(self) -> Result<()> { + if let Some((tracing_receiver, profiling_targets)) = self.profiling { + if self.log_reloading { + let subscriber = prepare_subscriber( + &self.directives, + Some(&profiling_targets), + self.force_colors, + |builder| enable_log_reloading!(builder), + )?; + let profiling = crate::ProfilingLayer::new(tracing_receiver, &profiling_targets); + + tracing::subscriber::set_global_default(subscriber.with(profiling))?; + + Ok(()) + } else { + let subscriber = prepare_subscriber( + &self.directives, + Some(&profiling_targets), + self.force_colors, + |builder| builder, + )?; + let profiling = crate::ProfilingLayer::new(tracing_receiver, &profiling_targets); + + tracing::subscriber::set_global_default(subscriber.with(profiling))?; + + Ok(()) + } + } else { + if self.log_reloading { + let subscriber = + prepare_subscriber(&self.directives, None, self.force_colors, |builder| { + enable_log_reloading!(builder) + })?; + + tracing::subscriber::set_global_default(subscriber)?; + + Ok(()) + } else { + let subscriber = + prepare_subscriber(&self.directives, None, self.force_colors, |builder| { + builder + })?; + + tracing::subscriber::set_global_default(subscriber)?; + + Ok(()) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate as sc_tracing; + use std::{env, process::Command}; + use tracing::{metadata::Kind, subscriber::Interest, Callsite, Level, Metadata}; + + const EXPECTED_LOG_MESSAGE: &'static str = "yeah logging works as expected"; + const EXPECTED_NODE_NAME: &'static str = "THE_NODE"; + + fn init_logger(directives: &str) { + let _ = LoggerBuilder::new(directives).init().unwrap(); + } + + #[test] + fn test_logger_filters() { + if env::var("RUN_TEST_LOGGER_FILTERS").is_ok() { + let test_directives = + "afg=debug,sync=trace,client=warn,telemetry,something-with-dash=error"; + init_logger(&test_directives); + + tracing::dispatcher::get_default(|dispatcher| { + let test_filter = |target, level| { + struct DummyCallSite; + impl Callsite for DummyCallSite { + fn set_interest(&self, _: Interest) {} + fn metadata(&self) -> &Metadata<'_> { + unreachable!(); + } + } + + let metadata = tracing::metadata!( + name: "", + target: target, + level: level, + fields: &[], + callsite: &DummyCallSite, + kind: Kind::SPAN, + ); + + dispatcher.enabled(&metadata) + }; + + assert!(test_filter("afg", Level::INFO)); + assert!(test_filter("afg", Level::DEBUG)); + assert!(!test_filter("afg", Level::TRACE)); + + assert!(test_filter("sync", Level::TRACE)); + assert!(test_filter("client", Level::WARN)); + + assert!(test_filter("telemetry", Level::TRACE)); + assert!(test_filter("something-with-dash", Level::ERROR)); + }); + } else { + let status = Command::new(env::current_exe().unwrap()) + .arg("test_logger_filters") + .env("RUN_TEST_LOGGER_FILTERS", "1") + .output() + .unwrap() + .status; + assert!(status.success()); + } + } + + /// This test ensures that using dash (`-`) in the target name in logs and directives actually + /// work. + #[test] + fn dash_in_target_name_works() { + let executable = env::current_exe().unwrap(); + let output = Command::new(executable) + .env("ENABLE_LOGGING", "1") + .args(&["--nocapture", "log_something_with_dash_target_name"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stderr).unwrap(); + assert!(output.contains(EXPECTED_LOG_MESSAGE)); + } + + /// This is not an actual test, it is used by the `dash_in_target_name_works` test. + /// The given test will call the test executable and only execute this one test that + /// only prints `EXPECTED_LOG_MESSAGE` through logging while using a target + /// name that contains a dash. This ensures that target names with dashes work. + #[test] + fn log_something_with_dash_target_name() { + if env::var("ENABLE_LOGGING").is_ok() { + let test_directives = "test-target=info"; + let _guard = init_logger(&test_directives); + + log::info!(target: "test-target", "{}", EXPECTED_LOG_MESSAGE); + } + } + + #[test] + fn prefix_in_log_lines() { + let re = regex::Regex::new(&format!( + r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} \[{}\] {}$", + EXPECTED_NODE_NAME, EXPECTED_LOG_MESSAGE, + )) + .unwrap(); + let executable = env::current_exe().unwrap(); + let output = Command::new(executable) + .env("ENABLE_LOGGING", "1") + .args(&["--nocapture", "prefix_in_log_lines_entrypoint"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stderr).unwrap(); + assert!(re.is_match(output.trim()), "Expected:\n{}\nGot:\n{}", re, output); + } + + /// This is not an actual test, it is used by the `prefix_in_log_lines` test. + /// The given test will call the test executable and only execute this one test that + /// only prints a log line prefixed by the node name `EXPECTED_NODE_NAME`. + #[test] + fn prefix_in_log_lines_entrypoint() { + if env::var("ENABLE_LOGGING").is_ok() { + let _guard = init_logger(""); + prefix_in_log_lines_process(); + } + } + + #[crate::logging::prefix_logs_with(EXPECTED_NODE_NAME)] + fn prefix_in_log_lines_process() { + log::info!("{}", EXPECTED_LOG_MESSAGE); + } + + /// This is not an actual test, it is used by the `do_not_write_with_colors_on_tty` test. + /// The given test will call the test executable and only execute this one test that + /// only prints a log line with some colors in it. + #[test] + fn do_not_write_with_colors_on_tty_entrypoint() { + if env::var("ENABLE_LOGGING").is_ok() { + let _guard = init_logger(""); + log::info!("{}", ansi_term::Colour::Yellow.paint(EXPECTED_LOG_MESSAGE)); + } + } + + #[test] + fn do_not_write_with_colors_on_tty() { + let re = regex::Regex::new(&format!( + r"^\d{{4}}-\d{{2}}-\d{{2}} \d{{2}}:\d{{2}}:\d{{2}} {}$", + EXPECTED_LOG_MESSAGE, + )) + .unwrap(); + let executable = env::current_exe().unwrap(); + let output = Command::new(executable) + .env("ENABLE_LOGGING", "1") + .args(&["--nocapture", "do_not_write_with_colors_on_tty_entrypoint"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stderr).unwrap(); + assert!(re.is_match(output.trim()), "Expected:\n{}\nGot:\n{}", re, output); + } + + #[test] + fn log_max_level_is_set_properly() { + fn run_test(rust_log: Option, tracing_targets: Option) -> String { + let executable = env::current_exe().unwrap(); + let mut command = Command::new(executable); + + command + .env("PRINT_MAX_LOG_LEVEL", "1") + .args(&["--nocapture", "log_max_level_is_set_properly"]); + + if let Some(rust_log) = rust_log { + command.env("RUST_LOG", rust_log); + } + + if let Some(tracing_targets) = tracing_targets { + command.env("TRACING_TARGETS", tracing_targets); + } + + let output = command.output().unwrap(); + + dbg!(String::from_utf8(output.stderr)).unwrap() + } + + if env::var("PRINT_MAX_LOG_LEVEL").is_ok() { + let mut builder = LoggerBuilder::new(""); + + if let Ok(targets) = env::var("TRACING_TARGETS") { + builder.with_profiling(crate::TracingReceiver::Log, targets); + } + + builder.init().unwrap(); + + eprint!("MAX_LOG_LEVEL={:?}", log::max_level()); + } else { + assert_eq!("MAX_LOG_LEVEL=Info", run_test(None, None)); + assert_eq!("MAX_LOG_LEVEL=Trace", run_test(Some("test=trace".into()), None)); + assert_eq!("MAX_LOG_LEVEL=Debug", run_test(Some("test=debug".into()), None)); + assert_eq!("MAX_LOG_LEVEL=Trace", run_test(None, Some("test=info".into()))); + } + } +} diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 5db37f5368387..2184af819adf7 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-pool" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -13,31 +13,41 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } -derive_more = "0.99.2" -futures = { version = "0.3.1", features = ["compat"] } -futures-diagnose = "1.0" +codec = { package = "parity-scale-codec", version = "2.0.0" } +thiserror = "1.0.21" +futures = "0.3.16" intervalier = "0.4.0" log = "0.4.8" -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -parking_lot = "0.10.0" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0"} -sc-client-api = { version = "2.0.0", path = "../api" } -sc-transaction-graph = { version = "2.0.0", path = "./graph" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-utils = { version = "2.0.0", path = "../../primitives/utils" } -wasm-timer = "0.2" +parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } +parking_lot = "0.11.1" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } +sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "./api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sc-utils = { version = "4.0.0-dev", path = "../utils" } +serde = { version = "1.0.126", features = ["derive"] } +linked-hash-map = "0.5.4" +retain_mut = "0.1.3" [dev-dependencies] assert_matches = "1.3.0" hex = "0.4" -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } substrate-test-runtime-transaction-pool = { version = "2.0.0", path = "../../test-utils/runtime/transaction-pool" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0", path = "../block-builder" } +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +criterion = "0.3" + +[[bench]] +name = "basics" +harness = false + +[features] +test-helpers = [] diff --git a/client/transaction-pool/README.md b/client/transaction-pool/README.md index 15e4641c1f48d..e4f8ccb3d8105 100644 --- a/client/transaction-pool/README.md +++ b/client/transaction-pool/README.md @@ -1,3 +1,367 @@ Substrate transaction pool implementation. -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 + +# Problem Statement + +The transaction pool is responsible for maintaining a set of transactions that +possible to include by block authors in upcoming blocks. Transactions are received +either from networking (gossiped by other peers) or RPC (submitted locally). + +The main task of the pool is to prepare an ordered list of transactions for block +authorship module. The same list is useful for gossiping to other peers, but note +that it's not a hard requirement for the gossiped transactions to be exactly the +same (see implementation notes below). + +It's within block author incentives to have the transactions stored and ordered in +such a way to: + +1. Maximize block author's profits (value of the produced block) +2. Minimize block author's amount of work (time to produce block) + +In the case of FRAME the first property is simply making sure that the fee per weight +unit is the highest (high `tip` values), the second is about avoiding feeding +transactions that cannot be part of the next block (they are invalid, obsolete, etc). + +From the transaction pool PoV, transactions are simply opaque blob of bytes, +it's required to query the runtime (via `TaggedTransactionQueue` Runtime API) to +verify transaction's mere correctness and extract any information about how the +transaction relates to other transactions in the pool and current on-chain state. +Only valid transactions should be stored in the pool. + +Each imported block can affect validity of transactions already in the pool. Block +authors expect from the pool to get most up to date information about transactions +that can be included in the block that they are going to build on top of the just +imported one. The process of ensuring this property is called *pruning*. During +pruning the pool should remove transactions which are considered invalid by the +runtime (queried at current best imported block). + +Since the blockchain is not always linear, forks need to be correctly handled by +the transaction pool as well. In case of a fork, some blocks are *retracted* +from the canonical chain, and some other blocks get *enacted* on top of some +common ancestor. The transactions from retrated blocks could simply be discarded, +but it's desirable to make sure they are still considered for inclusion in case they +are deemed valid by the runtime state at best, recently enacted block (fork the +chain re-organized to). + +Transaction pool should also offer a way of tracking transaction lifecycle in the +pool, it's broadcasting status, block inclusion, finality, etc. + +## Transaction Validity details + +Information retrieved from the the runtime are encapsulated in `TransactionValidity` +type. + +```rust +pub type TransactionValidity = Result; + +pub struct ValidTransaction { + pub requires: Vec, + pub provides: Vec, + pub priority: TransactionPriority, + pub longevity: TransactionLongevity, + pub propagate: bool, +} + +pub enum TransactionValidityError { + Invalid(/* details */), + Unknown(/* details */), +} +``` + +We will go through each of the parameter now to understand the requirements they +create for transaction ordering. + +The runtime is expected to return these values in a deterministic fashion. Calling +the API multiple times given exactly the same state must return same results. +Field-specific rules are described below. + +### `requires` / `provides` + +These two fields contain a set of `TransactionTag`s (opaque blobs) associated with +a given transaction. This is a mechanism for the runtime to be able to +express dependencies between transactions (that this transaction pool can take +account of). By looking at these fields we can establish a transaction's readiness +for block inclusion. + +The `provides` set contains properties that will be *satisfied* in case the transaction +is successfully added to a block. Only a transaction in a block may provide a specific +tag. `requires` contains properties that must be satisfied **before** the transaction +can be included to a block. + +Note that a transaction with empty `requires` set can be added to a block immediately, +there are no other transactions that it expects to be included before. + +For some given series of transactions the `provides` and `requires` fields will create +a (simple) directed acyclic graph. The *sources* in such graph, if they don't have +any extra `requires` tags (i.e. they have their all dependencies *satisfied*), should +be considered for block inclusion first. Multiple transactions that are ready for +block inclusion should be ordered by `priority` (see below). + +Note the process of including transactions to a block is basically building the graph, +then selecting "the best" source vertex (transaction) with all tags satisfied and +removing it from that graph. + +#### Examples + +- A transaction in Bitcoin-like chain will `provide` generated UTXOs and will `require` + UTXOs it is still awaiting for (note that it's not necessarily all require inputs, + since some of them might already be spendable (i.e. the UTXO is in state)) + +- A transaction in account-based chain will `provide` a `(sender, transaction_index/nonce)` + (as one tag), and will `require` `(sender, nonce - 1)` in case + `on_chain_nonce < nonce - 1`. + +#### Rules & caveats + +- `provides` must not be empty +- transactions with an overlap in `provides` tags are mutually exclusive +- checking validity of transaction that `requires` tag `A` after including + transaction that provides that tag must not return `A` in `requires` again +- runtime developers should avoid re-using `provides` tag (i.e. it should be unique) +- there should be no cycles in transaction dependencies +- caveat: on-chain state conditions may render transaction invalid despite no + `requires` tags +- caveat: on-chain state conditions may render transaction valid despite some + `requires` tags +- caveat: including transactions to a chain might make them valid again right away + (for instance UTXO transaction gets in, but since we don't store spent outputs + it will be valid again, awaiting the same inputs/tags to be satisfied) + +### `priority` + +Transaction priority describes importance of the transaction relative to other transactions +in the pool. Block authors can expect benefiting from including such transactions +before others. + +Note that we can't simply order transactions in the pool by `priority`, because first +we need to make sure that all of the transaction requirements are satisfied (see +`requires/provides` section). However if we consider a set of transactions +which all have their requirements (tags) satisfied, the block author should be +choosing the ones with highest priority to include to the next block first. + +`priority` can be any number between `0` (lowest inclusion priority) to `u64::MAX` +(highest inclusion priority). + +#### Rules & caveats + +- `priority` of transaction may change over time +- on-chain conditions may affect `priority` +- Given two transactions with overlapping `provides` tags, the one with higher + `priority` should be preferred. However we can also look at the total priority + of a subtree rooted at that transaction and compare that instead (i.e. even though + the transaction itself has lower `priority` it "unlocks" other high priority transactions). + +### `longevity` + +Longevity describes how long (in blocks) the transaction is expected to be +valid. This parameter only gives a hint to the transaction pool how long +current transaction may still be valid. Note that it does not guarantee +the transaction is valid all that time though. + +#### Rules & caveats + +- `longevity` of transaction may change over time +- on-chain conditions may affect `longevity` +- After `longevity` lapses the transaction may still be valid + +### `propagate` + +This parameter instructs the pool propagate/gossip a transaction to node peers. +By default this should be `true`, however in some cases it might be undesirable +to propagate transactions further. Examples might include heavy transactions +produced by block authors in offchain workers (DoS) or risking being front +runned by someone else after finding some non trivial solution or equivocation, +etc. + +### 'TransactionSource` + +To make it possible for the runtime to distinguish if the transaction that is +being validated was received over the network or submitted using local RPC or +maybe it's simply part of a block that is being imported, the transaction pool +should pass additional `TransactionSource` parameter to the validity function +runtime call. + +This can be used by runtime developers to quickly reject transactions that for +instance are not expected to be gossiped in the network. + + +### `Invalid` transaction + +In case the runtime returns an `Invalid` error it means the transaction cannot +be added to a block at all. Extracting the actual reason of invalidity gives +more details about the source. For instance `Stale` transaction just indicates +the transaction was already included in a block, while `BadProof` signifies +invalid signature. +Invalidity might also be temporary. In case of `ExhaustsResources` the +transaction does not fit to the current block, but it might be okay for the next +one. + +### `Unknown` transaction + +In case of `Unknown` validity, the runtime cannot determine if the transaction +is valid or not in current block. However this situation might be temporary, so +it is expected for the transaction to be retried in the future. + +# Implementation + +An ideal transaction pool should be storing only transactions that are considered +valid by the runtime at current best imported block. +After every block is imported, the pool should: + +1. Revalidate all transactions in the pool and remove the invalid ones. +1. Construct the transaction inclusion graph based on `provides/requires` tags. + Some transactions might not be reachable (have unsatisfied dependencies), + they should be just left out in the pool. +1. On block author request, the graph should be copied and transactions should + be removed one-by-one from the graph starting from the one with highest + priority and all conditions satisfied. + +With current gossip protocol, networking should propagate transactions in the +same order as block author would include them. Most likely it's fine if we +propagate transactions with cumulative weight not exceeding upcoming `N` +blocks (choosing `N` is subject to networking conditions and block times). + +Note that it's not a strict requirement though to propagate exactly the same +transactions that are prepared for block inclusion. Propagation is best +effort, especially for block authors and is not directly incentivised. +However the networking protocol might penalise peers that send invalid or +useless transactions so we should be nice to others. Also see below a proposal +to instead of gossiping everyting have other peers request transactions they +are interested in. + +Since the pool is expected to store more transactions than what can fit +to a single block. Validating the entire pool on every block might not be +feasible, so the actual implementation might need to take some shortcuts. + +## Suggestions & caveats + +1. The validity of transaction should not change significantly from block to + block. I.e. changes in validity should happen predictably, e.g. `longevity` + decrements by 1, `priority` stays the same, `requires` changes if transaction + that provided a tag was included in block. `provides` does not change, etc. + +1. That means we don't have to revalidate every transaction after every block + import, but we need to take care of removing potentially stale transactions. + +1. Transactions with exactly the same bytes are most likely going to give the + same validity results. We can essentially treat them as identical. + +1. Watch out for re-organisations and re-importing transactions from retracted + blocks. + +1. In the past there were many issues found when running small networks with a + lot of re-orgs. Make sure that transactions are never lost. + +1. UTXO model is quite challenging. The transaction becomes valid right after + it's included in block, however it is waiting for exactly the same inputs to + be spent, so it will never really be included again. + +1. Note that in a non-ideal implementation the state of the pool will most + likely always be a bit off, i.e. some transactions might be still in the pool, + but they are invalid. The hard decision is about trade-offs you take. + +1. Note that import notification is not reliable - you might not receive a + notification about every imported block. + +## Potential implementation ideas + +1. Block authors remove transactions from the pool when they author a block. We + still store them around to re-import in case the block does not end up + canonical. This only works if the block is actively authoring blocks (also + see below). + +1. We don't prune, but rather remove a fixed amount of transactions from the front + of the pool (number based on average/max transactions per block from the + past) and re-validate them, reimporting the ones that are still valid. + +1. We periodically validate all transactions in the pool in batches. + +1. To minimize runtime calls, we introduce batch-verify call. Note it should reset + the state (overlay) after every verification. + +1. Consider leveraging finality. Maybe we could verify against latest finalised + block instead. With this the pool in different nodes can be more similar + which might help with gossiping (see set reconciliation). Note that finality + is not a strict requirement for a Substrate chain to have though. + +1. Perhaps we could avoid maintaining ready/future queues as currently, but + rather if transaction doesn't have all requirements satisfied by existing + transactions we attempt to re-import it in the future. + +1. Instead of maintaining a full pool with total ordering we attempt to maintain + a set of next (couple of) blocks. We could introduce batch-validate runtime + api method that pretty much attempts to simulate actual block inclusion of + a set of such transactions (without necessarily fully running/dispatching + them). Importing a transaction would consist of figuring out which next block + this transaction have a chance to be included in and then attempting to + either push it back or replace some of existing transactions. + +1. Perhaps we could use some immutable graph structure to easily add/remove + transactions. We need some traversal method that takes priority and + reachability into account. + +1. It was discussed in the past to use set reconciliation strategies instead of +simply broadcasting all/some transactions to all/selected peers. An Ethereum's +[EIP-2464](https://github.com/ethereum/EIPs/blob/5b9685bb9c7ba0f5f921e4d3f23504f7ef08d5b1/EIPS/eip-2464.md) +might be a good first approach to reduce transaction gossip. + +# Current implementation + +Current implementation of the pool is a result of experiences from Ethereum's +pool implementation, but also has some warts coming from the learning process of +Substrate's generic nature and light client support. + +The pool consists of basically two independent parts: + +1. The transaction pool itself. +2. Maintenance background task. + +The pool is split into `ready` pool and `future` pool. The latter contains +transactions that don't have their requirements satisfied, and the former holds +transactions that can be used to build a graph of dependencies. Note that the +graph is build ad-hoc during the traversal process (getting the `ready` +iterator). This makes the importing process cheaper (we don't need to find the +exact position in the queue or graph), but traversal process slower +(logarithmic). However most of the time we will only need the beginning of the +total ordering of transactions for block inclusion or network propagation, hence +the decision. + +The maintenance task is responsible for: + +1. Periodically revalidating pool's transactions (revalidation queue). +1. Handling block import notifications and doing pruning + re-importing of + transactions from retracted blocks. +1. Handling finality notifications and relaying that to transaction-specific + listeners. + +Additionally we maintain a list of recently included/rejected transactions +(`PoolRotator`) to quickly reject transactions that are unlikely to be valid +to limit number of runtime verification calls. + +Each time a transaction is imported, we first verify it's validity and later +find if the tags it `requires` can be satisfied by transactions already in +`ready` pool. In case the transaction is imported to the `ready` pool we +additionally *promote* transactions from `future` pool if the transaction +happened to fulfill their requirements. +Note we need to cater for cases where transaction might replace a already +existing transaction in the pool. In such case we check the entire sub-tree of +transactions that we are about to replace, compare their cumulative priority to +determine which subtree to keep. + +After a block is imported we kick-off pruning procedure. We first attempt to +figure out what tags were satisfied by transaction in that block. For each block +transaction we either call into runtime to get it's `ValidTransaction` object, +or we check the pool if that transaction is already known to spare the runtime +call. From this we gather full set of `provides` tags and perform pruning of +`ready` pool based on that. Also we promote all transactions from `future` that +have their tags satisfied. + +In case we remove transactions that we are unsure if they were already included +in current block or some block in the past, it is being added to revalidation +queue and attempted to be re-imported by the background task in the future. + +Runtime calls to verify transactions are performed from a separate (limited) +thread pool to avoid interferring too much with other subsystems of the node. We +definitely don't want to have all cores validating network transactions, because +all of these transactions need to be considered untrusted (potentially DoS). diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml new file mode 100644 index 0000000000000..efef36071f083 --- /dev/null +++ b/client/transaction-pool/api/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "sc-transaction-pool-api" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Transaction pool client facing API." + +[dependencies] +futures = { version = "0.3.1" } +log = { version = "0.4.8" } +serde = { version = "1.0.126", features = ["derive"] } +thiserror = { version = "1.0.21" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } + +derive_more = { version = "0.99.11" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/client/transaction-pool/graph/src/error.rs b/client/transaction-pool/api/src/error.rs similarity index 57% rename from client/transaction-pool/graph/src/error.rs rename to client/transaction-pool/api/src/error.rs index 392ddaa39be6f..feee3b0a949c2 100644 --- a/client/transaction-pool/graph/src/error.rs +++ b/client/transaction-pool/api/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,63 +19,73 @@ //! Transaction pool errors. use sp_runtime::transaction_validity::{ - TransactionPriority as Priority, InvalidTransaction, UnknownTransaction, + InvalidTransaction, TransactionPriority as Priority, UnknownTransaction, }; /// Transaction pool result. pub type Result = std::result::Result; /// Transaction pool error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error, derive_more::From)] +#[allow(missing_docs)] pub enum Error { - /// Transaction is not verifiable yet, but might be in the future. - #[display(fmt="Unknown transaction validity: {:?}", _0)] + #[error("Unknown transaction validity: {0:?}")] UnknownTransaction(UnknownTransaction), - /// Transaction is invalid. - #[display(fmt="Invalid transaction validity: {:?}", _0)] + + #[error("Invalid transaction validity: {0:?}")] InvalidTransaction(InvalidTransaction), + /// The transaction validity returned no "provides" tag. /// /// Such transactions are not accepted to the pool, since we use those tags /// to define identity of transactions (occupance of the same "slot"). - #[display(fmt="The transaction does not provide any tags, so the pool can't identify it.")] + #[error("Transaction does not provide any tags, so the pool can't identify it")] NoTagsProvided, - /// The transaction is temporarily banned. - #[display(fmt="Temporarily Banned")] + + #[error("Transaction temporarily Banned")] TemporarilyBanned, - /// The transaction is already in the pool. - #[display(fmt="[{:?}] Already imported", _0)] + + #[error("[{0:?}] Already imported")] AlreadyImported(Box), - /// The transaction cannot be imported cause it's a replacement and has too low priority. - #[display(fmt="Too low priority ({} > {})", old, new)] + + #[error("Too low priority ({} > {})", old, new)] TooLowPriority { /// Transaction already in the pool. old: Priority, /// Transaction entering the pool. - new: Priority + new: Priority, }, - /// Deps cycle detected and we couldn't import transaction. - #[display(fmt="Cycle Detected")] + #[error("Transaction with cyclic dependency")] CycleDetected, - /// Transaction was dropped immediately after it got inserted. - #[display(fmt="Transaction couldn't enter the pool because of the limit.")] + + #[error("Transaction couldn't enter the pool because of the limit")] ImmediatelyDropped, - /// Invalid block id. + + #[error("Transaction cannot be propagated and the local node does not author blocks")] + Unactionable, + + #[from(ignore)] + #[error("{0}")] InvalidBlockId(String), -} -impl std::error::Error for Error {} + #[error("The pool is not accepting future transactions")] + RejectedFutureTransaction, +} /// Transaction pool error conversion. -pub trait IntoPoolError: ::std::error::Error + Send + Sized { +pub trait IntoPoolError: std::error::Error + Send + Sized { /// Try to extract original `Error` /// /// This implementation is optional and used only to /// provide more descriptive error messages for end users /// of RPC API. - fn into_pool_error(self) -> ::std::result::Result { Err(self) } + fn into_pool_error(self) -> std::result::Result { + Err(self) + } } impl IntoPoolError for Error { - fn into_pool_error(self) -> ::std::result::Result { Ok(self) } + fn into_pool_error(self) -> std::result::Result { + Ok(self) + } } diff --git a/primitives/transaction-pool/src/pool.rs b/client/transaction-pool/api/src/lib.rs similarity index 83% rename from primitives/transaction-pool/src/pool.rs rename to client/transaction-pool/api/src/lib.rs index 6235ca7cdfcf3..a6252f1373c5d 100644 --- a/primitives/transaction-pool/src/pool.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -1,37 +1,36 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Transaction pool primitives types & Runtime API. - -use std::{ - collections::HashMap, - hash::Hash, - sync::Arc, - pin::Pin, -}; +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transaction pool client facing API. +#![warn(missing_docs)] + +pub mod error; + use futures::{Future, Stream}; use serde::{Deserialize, Serialize}; +pub use sp_runtime::transaction_validity::{ + TransactionLongevity, TransactionPriority, TransactionSource, TransactionTag, +}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Member, NumberFor}, - transaction_validity::{ - TransactionLongevity, TransactionPriority, TransactionTag, TransactionSource, - }, }; +use std::{collections::HashMap, hash::Hash, pin::Pin, sync::Arc}; /// Transaction pool status. #[derive(Debug)] @@ -60,20 +59,20 @@ impl PoolStatus { /// /// The status events can be grouped based on their kinds as: /// 1. Entering/Moving within the pool: -/// - `Future` -/// - `Ready` +/// - `Future` +/// - `Ready` /// 2. Inside `Ready` queue: -/// - `Broadcast` +/// - `Broadcast` /// 3. Leaving the pool: -/// - `InBlock` -/// - `Invalid` -/// - `Usurped` -/// - `Dropped` -/// 4. Re-entering the pool: -/// - `Retracted` -/// 5. Block finalized: -/// - `Finalized` -/// - `FinalityTimeout` +/// - `InBlock` +/// - `Invalid` +/// - `Usurped` +/// - `Dropped` +/// 4. Re-entering the pool: +/// - `Retracted` +/// 5. Block finalized: +/// - `Finalized` +/// - `FinalityTimeout` /// /// The events will always be received in the order described above, however /// there might be cases where transactions alternate between `Future` and `Ready` @@ -127,7 +126,8 @@ pub enum TransactionStatus { } /// The stream of transaction events. -pub type TransactionStatusStream = dyn Stream> + Send + Unpin; +pub type TransactionStatusStream = + dyn Stream> + Send; /// The import notification event stream. pub type ImportNotificationStream = futures::channel::mpsc::Receiver; @@ -144,7 +144,7 @@ pub type TransactionStatusStreamFor

= TransactionStatusStream, Bloc pub type LocalTransactionFor

= <

::Block as BlockT>::Extrinsic; /// Typical future type used in transaction pool api. -pub type PoolFuture = std::pin::Pin> + Send>>; +pub type PoolFuture = std::pin::Pin> + Send>>; /// In-pool transaction interface. /// @@ -181,7 +181,7 @@ pub trait TransactionPool: Send + Sync { /// In-pool transaction type. type InPoolTransaction: InPoolTransaction< Transaction = TransactionFor, - Hash = TxHash + Hash = TxHash, >; /// Error type. type Error: From + crate::error::IntoPoolError; @@ -204,24 +204,32 @@ pub trait TransactionPool: Send + Sync { xt: TransactionFor, ) -> PoolFuture, Self::Error>; - /// Returns a future that import a single transaction and starts to watch their progress in the pool. + /// Returns a future that import a single transaction and starts to watch their progress in the + /// pool. fn submit_and_watch( &self, at: &BlockId, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture>, Self::Error>; + ) -> PoolFuture>>, Self::Error>; // *** Block production / Networking /// Get an iterator for ready transactions ordered by priority. /// /// Guarantees to return only when transaction pool got updated at `at` block. /// Guarantees to return immediately when `None` is passed. - fn ready_at(&self, at: NumberFor) - -> Pin> + Send>> + Send>>; + fn ready_at( + &self, + at: NumberFor, + ) -> Pin< + Box< + dyn Future> + Send>> + + Send, + >, + >; /// Get an iterator for ready transactions ordered by priority. - fn ready(&self) -> Box> + Send>; + fn ready(&self) -> Box> + Send>; // *** Block production /// Remove transactions identified by given hashes (and dependent transactions) from the pool. @@ -267,7 +275,7 @@ pub enum ChainEvent { /// Trait for transaction pool maintenance. pub trait MaintainedTransactionPool: TransactionPool { /// Perform maintenance - fn maintain(&self, event: ChainEvent) -> Pin + Send>>; + fn maintain(&self, event: ChainEvent) -> Pin + Send>>; } /// Transaction pool interface for submitting local transactions that exposes a @@ -303,11 +311,7 @@ pub trait OffchainSubmitTransaction: Send + Sync { /// Submit transaction. /// /// The transaction will end up in the pool and be propagated to others. - fn submit_at( - &self, - at: &BlockId, - extrinsic: Block::Extrinsic, - ) -> Result<(), ()>; + fn submit_at(&self, at: &BlockId, extrinsic: Block::Extrinsic) -> Result<(), ()>; } impl OffchainSubmitTransaction for TPool { diff --git a/client/transaction-pool/graph/benches/basics.rs b/client/transaction-pool/benches/basics.rs similarity index 67% rename from client/transaction-pool/graph/benches/basics.rs rename to client/transaction-pool/benches/basics.rs index bb10086bd4a55..cf30a0200ad76 100644 --- a/client/transaction-pool/graph/benches/basics.rs +++ b/client/transaction-pool/benches/basics.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,18 +18,22 @@ use criterion::{criterion_group, criterion_main, Criterion}; -use futures::{future::{ready, Ready}, executor::block_on}; -use sc_transaction_graph::*; use codec::Encode; -use substrate_test_runtime::{Block, Extrinsic, Transfer, H256, AccountId}; +use futures::{ + executor::block_on, + future::{ready, Ready}, +}; +use sc_transaction_pool::{test_helpers::*, *}; +use sp_core::blake2_256; use sp_runtime::{ generic::BlockId, + traits::Block as BlockT, transaction_validity::{ - ValidTransaction, InvalidTransaction, TransactionValidity, TransactionTag as Tag, - TransactionSource, + InvalidTransaction, TransactionSource, TransactionTag as Tag, TransactionValidity, + ValidTransaction, }, }; -use sp_core::blake2_256; +use substrate_test_runtime::{AccountId, Block, Extrinsic, Transfer, H256}; #[derive(Clone, Debug, Default)] struct TestApi { @@ -51,45 +55,41 @@ fn to_tag(nonce: u64, from: AccountId) -> Tag { impl ChainApi for TestApi { type Block = Block; - type Error = sp_transaction_pool::error::Error; - type ValidationFuture = Ready>; - type BodyFuture = Ready>>>; + type Error = sc_transaction_pool_api::error::Error; + type ValidationFuture = Ready>; + type BodyFuture = Ready>>>; fn validate_transaction( &self, at: &BlockId, _source: TransactionSource, - uxt: ExtrinsicFor, + uxt: test_helpers::ExtrinsicFor, ) -> Self::ValidationFuture { let nonce = uxt.transfer().nonce; let from = uxt.transfer().from.clone(); match self.block_id_to_number(at) { - Ok(Some(num)) if num > 5 => { - return ready( - Ok(Err(InvalidTransaction::Stale.into())) - ) - }, + Ok(Some(num)) if num > 5 => return ready(Ok(Err(InvalidTransaction::Stale.into()))), _ => {}, } - ready( - Ok(Ok(ValidTransaction { - priority: 4, - requires: if nonce > 1 && self.nonce_dependant { - vec![to_tag(nonce-1, from.clone())] - } else { vec![] }, - provides: vec![to_tag(nonce, from)], - longevity: 10, - propagate: true, - })) - ) + ready(Ok(Ok(ValidTransaction { + priority: 4, + requires: if nonce > 1 && self.nonce_dependant { + vec![to_tag(nonce - 1, from.clone())] + } else { + vec![] + }, + provides: vec![to_tag(nonce, from)], + longevity: 10, + propagate: true, + }))) } fn block_id_to_number( &self, at: &BlockId, - ) -> Result>, Self::Error> { + ) -> Result>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(*num), BlockId::Hash(_) => None, @@ -99,14 +99,14 @@ impl ChainApi for TestApi { fn block_id_to_hash( &self, at: &BlockId, - ) -> Result>, Self::Error> { + ) -> Result>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), BlockId::Hash(_) => None, }) } - fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (H256, usize) { + fn hash_and_length(&self, uxt: &test_helpers::ExtrinsicFor) -> (H256, usize) { let encoded = uxt.encode(); (blake2_256(&encoded).into(), encoded.len()) } @@ -114,6 +114,13 @@ impl ChainApi for TestApi { fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { ready(Ok(None)) } + + fn block_header( + &self, + _: &BlockId, + ) -> Result::Header>, Self::Error> { + Ok(None) + } } fn uxt(transfer: Transfer) -> Extrinsic { @@ -149,11 +156,7 @@ fn bench_configured(pool: Pool, number: u64) { // Prune all transactions. let block_num = 6; - block_on(pool.prune_tags( - &BlockId::Number(block_num), - tags, - vec![], - )).expect("Prune failed"); + block_on(pool.prune_tags(&BlockId::Number(block_num), tags, vec![])).expect("Prune failed"); // pool is empty assert_eq!(pool.validated_pool().status().ready, 0); @@ -161,16 +164,21 @@ fn bench_configured(pool: Pool, number: u64) { } fn benchmark_main(c: &mut Criterion) { - c.bench_function("sequential 50 tx", |b| { b.iter(|| { - bench_configured(Pool::new(Default::default(), TestApi::new_dependant().into()), 50); + bench_configured( + Pool::new(Default::default(), true.into(), TestApi::new_dependant().into()), + 50, + ); }); }); c.bench_function("random 100 tx", |b| { b.iter(|| { - bench_configured(Pool::new(Default::default(), TestApi::default().into()), 100); + bench_configured( + Pool::new(Default::default(), true.into(), TestApi::default().into()), + 100, + ); }); }); } diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index c5850e765fcfa..b49cadc51c33c 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-graph" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" @@ -14,23 +14,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -futures = "0.3.4" +thiserror = "1.0.21" +futures = "0.3.9" log = "0.4.8" -parking_lot = "0.10.0" +parking_lot = "0.11.1" serde = { version = "1.0.101", features = ["derive"] } -wasm-timer = "0.2" -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-utils = { version = "2.0.0", path = "../../../primitives/utils" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -linked-hash-map = "0.5.2" -retain_mut = "0.1.1" +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sc-utils = { version = "4.0.0-dev", path = "../../utils" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "4.0.0-dev", path = "../../../primitives/transaction-pool" } +parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } +linked-hash-map = "0.5.4" +retain_mut = "0.1.3" [dev-dependencies] assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "1.3.4" } +codec = { package = "parity-scale-codec", version = "2.0.0" } substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } criterion = "0.3" diff --git a/client/transaction-pool/graph/README.md b/client/transaction-pool/graph/README.md deleted file mode 100644 index bc9cd929122f1..0000000000000 --- a/client/transaction-pool/graph/README.md +++ /dev/null @@ -1,8 +0,0 @@ -Generic Transaction Pool - -The pool is based on dependency graph between transactions -and their priority. -The pool is able to return an iterator that traverses transaction -graph in the correct order taking into account priorities and dependencies. - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 853b66f6e74bb..a735c67d846ce 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,31 +18,63 @@ //! Chain api required for the transaction pool. -use std::{marker::PhantomData, pin::Pin, sync::Arc}; use codec::{Decode, Encode}; use futures::{ - channel::oneshot, executor::{ThreadPool, ThreadPoolBuilder}, future::{Future, FutureExt, ready, Ready}, + channel::{mpsc, oneshot}, + future::{ready, Future, FutureExt, Ready}, + lock::Mutex, + SinkExt, StreamExt, }; +use std::{marker::PhantomData, pin::Pin, sync::Arc}; +use prometheus_endpoint::Registry as PrometheusRegistry; use sc_client_api::{ - blockchain::HeaderBackend, light::{Fetcher, RemoteCallRequest, RemoteBodyRequest}, BlockBackend, + blockchain::HeaderBackend, + light::{Fetcher, RemoteBodyRequest, RemoteCallRequest}, + BlockBackend, }; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ - generic::BlockId, traits::{self, Block as BlockT, BlockIdTo, Header as HeaderT, Hash as HashT}, - transaction_validity::{TransactionValidity, TransactionSource}, + generic::BlockId, + traits::{self, Block as BlockT, BlockIdTo, Hash as HashT, Header as HeaderT}, + transaction_validity::{TransactionSource, TransactionValidity}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; -use sp_api::{ProvideRuntimeApi, ApiExt}; -use prometheus_endpoint::Registry as PrometheusRegistry; -use crate::{metrics::{ApiMetrics, ApiMetricsExt}, error::{self, Error}}; +use crate::{ + error::{self, Error}, + graph, + metrics::{ApiMetrics, ApiMetricsExt}, +}; /// The transaction pool logic for full client. pub struct FullChainApi { client: Arc, - pool: ThreadPool, _marker: PhantomData, metrics: Option>, + validation_pool: Arc + Send>>>>>, +} + +/// Spawn a validation task that will be used by the transaction pool to validate transactions. +fn spawn_validation_pool_task( + name: &'static str, + receiver: Arc + Send>>>>>, + spawner: &impl SpawnEssentialNamed, +) { + spawner.spawn_essential_blocking( + name, + async move { + loop { + let task = receiver.lock().await.next().await; + match task { + None => return, + Some(task) => task.await, + } + } + } + .boxed(), + ); } impl FullChainApi { @@ -50,47 +82,47 @@ impl FullChainApi { pub fn new( client: Arc, prometheus: Option<&PrometheusRegistry>, + spawner: &impl SpawnEssentialNamed, ) -> Self { - let metrics = prometheus.map(ApiMetrics::register).and_then(|r| { - match r { - Err(err) => { - log::warn!( - target: "txpool", - "Failed to register transaction pool api prometheus metrics: {:?}", - err, - ); - None - }, - Ok(api) => Some(Arc::new(api)) - } + let metrics = prometheus.map(ApiMetrics::register).and_then(|r| match r { + Err(err) => { + log::warn!( + target: "txpool", + "Failed to register transaction pool api prometheus metrics: {:?}", + err, + ); + None + }, + Ok(api) => Some(Arc::new(api)), }); + let (sender, receiver) = mpsc::channel(0); + + let receiver = Arc::new(Mutex::new(receiver)); + spawn_validation_pool_task("transaction-pool-task-0", receiver.clone(), spawner); + spawn_validation_pool_task("transaction-pool-task-1", receiver, spawner); + FullChainApi { client, - pool: ThreadPoolBuilder::new() - .pool_size(2) - .name_prefix("txpool-verifier") - .create() - .expect("Failed to spawn verifier threads, that are critical for node operation."), + validation_pool: Arc::new(Mutex::new(sender)), _marker: Default::default(), metrics, } } } -impl sc_transaction_graph::ChainApi for FullChainApi +impl graph::ChainApi for FullChainApi where Block: BlockT, - Client: ProvideRuntimeApi + BlockBackend + BlockIdTo, + Client: + ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { type Block = Block; type Error = error::Error; - type ValidationFuture = Pin< - Box> + Send> - >; + type ValidationFuture = + Pin> + Send>>; type BodyFuture = Ready::Extrinsic>>>>; fn block_body(&self, id: &BlockId) -> Self::BodyFuture { @@ -101,55 +133,69 @@ where &self, at: &BlockId, source: TransactionSource, - uxt: sc_transaction_graph::ExtrinsicFor, + uxt: graph::ExtrinsicFor, ) -> Self::ValidationFuture { let (tx, rx) = oneshot::channel(); let client = self.client.clone(); let at = at.clone(); - + let validation_pool = self.validation_pool.clone(); let metrics = self.metrics.clone(); - metrics.report(|m| m.validations_scheduled.inc()); - - self.pool.spawn_ok(futures_diagnose::diagnose( - "validate-transaction", - async move { - let res = validate_transaction_blocking(&*client, &at, source, uxt); - if let Err(e) = tx.send(res) { - log::warn!("Unable to send a validate transaction result: {:?}", e); - } - metrics.report(|m| m.validations_finished.inc()); - }, - )); - Box::pin(async move { + async move { + metrics.report(|m| m.validations_scheduled.inc()); + + validation_pool + .lock() + .await + .send( + async move { + let res = validate_transaction_blocking(&*client, &at, source, uxt); + let _ = tx.send(res); + metrics.report(|m| m.validations_finished.inc()); + } + .boxed(), + ) + .await + .map_err(|e| Error::RuntimeApi(format!("Validation pool down: {:?}", e)))?; + match rx.await { Ok(r) => r, Err(_) => Err(Error::RuntimeApi("Validation was canceled".into())), } - }) + } + .boxed() } fn block_id_to_number( &self, at: &BlockId, - ) -> error::Result>> { - self.client.to_number(at).map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) + ) -> error::Result>> { + self.client + .to_number(at) + .map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) } fn block_id_to_hash( &self, at: &BlockId, - ) -> error::Result>> { - self.client.to_hash(at).map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) + ) -> error::Result>> { + self.client + .to_hash(at) + .map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) } fn hash_and_length( &self, - ex: &sc_transaction_graph::ExtrinsicFor, - ) -> (sc_transaction_graph::ExtrinsicHash, usize) { - ex.using_encoded(|x| { - ( as traits::Hash>::hash(x), x.len()) - }) + ex: &graph::ExtrinsicFor, + ) -> (graph::ExtrinsicHash, usize) { + ex.using_encoded(|x| ( as traits::Hash>::hash(x), x.len())) + } + + fn block_header( + &self, + at: &BlockId, + ) -> Result::Header>, Self::Error> { + self.client.header(*at).map_err(Into::into) } } @@ -159,46 +205,76 @@ fn validate_transaction_blocking( client: &Client, at: &BlockId, source: TransactionSource, - uxt: sc_transaction_graph::ExtrinsicFor>, + uxt: graph::ExtrinsicFor>, ) -> error::Result where Block: BlockT, - Client: ProvideRuntimeApi + BlockBackend + BlockIdTo, + Client: + ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { sp_tracing::within_span!(sp_tracing::Level::TRACE, "validate_transaction"; { let runtime_api = client.runtime_api(); - let has_v2 = sp_tracing::within_span! { sp_tracing::Level::TRACE, "check_version"; + let api_version = sp_tracing::within_span! { sp_tracing::Level::TRACE, "check_version"; runtime_api - .has_api_with::, _>(&at, |v| v >= 2) - .unwrap_or_default() - }; + .api_version::>(&at) + .map_err(|e| Error::RuntimeApi(e.to_string()))? + .ok_or_else(|| Error::RuntimeApi( + format!("Could not find `TaggedTransactionQueue` api for block `{:?}`.", at) + )) + }?; + + let block_hash = client.to_hash(at) + .map_err(|e| Error::RuntimeApi(format!("{:?}", e)))? + .ok_or_else(|| Error::RuntimeApi(format!("Could not get hash for block `{:?}`.", at)))?; + + use sp_api::Core; - let res = sp_tracing::within_span!( + sp_tracing::within_span!( sp_tracing::Level::TRACE, "runtime::validate_transaction"; { - if has_v2 { - runtime_api.validate_transaction(&at, source, uxt) + if api_version >= 3 { + runtime_api.validate_transaction(&at, source, uxt, block_hash) + .map_err(|e| Error::RuntimeApi(e.to_string())) } else { - #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_2(&at, uxt) + let block_number = client.to_number(at) + .map_err(|e| Error::RuntimeApi(format!("{:?}", e)))? + .ok_or_else(|| + Error::RuntimeApi(format!("Could not get number for block `{:?}`.", at)) + )?; + + // The old versions require us to call `initialize_block` before. + runtime_api.initialize_block(at, &sp_runtime::traits::Header::new( + block_number + sp_runtime::traits::One::one(), + Default::default(), + Default::default(), + block_hash, + Default::default()), + ).map_err(|e| Error::RuntimeApi(e.to_string()))?; + + if api_version == 2 { + #[allow(deprecated)] // old validate_transaction + runtime_api.validate_transaction_before_version_3(&at, source, uxt) + .map_err(|e| Error::RuntimeApi(e.to_string())) + } else { + #[allow(deprecated)] // old validate_transaction + runtime_api.validate_transaction_before_version_2(&at, uxt) + .map_err(|e| Error::RuntimeApi(e.to_string())) + } } - }); - - res.map_err(|e| Error::RuntimeApi(e.to_string())) + }) }) } impl FullChainApi where Block: BlockT, - Client: ProvideRuntimeApi + BlockBackend + BlockIdTo, + Client: + ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { /// Validates a transaction by calling into the runtime, same as /// `validate_transaction` but blocks the current thread when performing @@ -208,7 +284,7 @@ where &self, at: &BlockId, source: TransactionSource, - uxt: sc_transaction_graph::ExtrinsicFor, + uxt: graph::ExtrinsicFor, ) -> error::Result { validate_transaction_blocking(&*self.client, at, source, uxt) } @@ -224,42 +300,39 @@ pub struct LightChainApi { impl LightChainApi { /// Create new transaction pool logic. pub fn new(client: Arc, fetcher: Arc) -> Self { - LightChainApi { - client, - fetcher, - _phantom: Default::default(), - } + LightChainApi { client, fetcher, _phantom: Default::default() } } } -impl sc_transaction_graph::ChainApi for - LightChainApi where - Block: BlockT, - Client: HeaderBackend + 'static, - F: Fetcher + 'static, +impl graph::ChainApi for LightChainApi +where + Block: BlockT, + Client: HeaderBackend + 'static, + F: Fetcher + 'static, { type Block = Block; type Error = error::Error; - type ValidationFuture = Box< - dyn Future> + Send + Unpin - >; + type ValidationFuture = + Box> + Send + Unpin>; type BodyFuture = Pin< Box< dyn Future::Extrinsic>>>> - + Send - > + + Send, + >, >; fn validate_transaction( &self, at: &BlockId, source: TransactionSource, - uxt: sc_transaction_graph::ExtrinsicFor, + uxt: graph::ExtrinsicFor, ) -> Self::ValidationFuture { let header_hash = self.client.expect_block_hash_from_id(at); - let header_and_hash = header_hash - .and_then(|header_hash| self.client.expect_header(BlockId::Hash(header_hash)) - .map(|header| (header_hash, header))); + let header_and_hash = header_hash.and_then(|header_hash| { + self.client + .expect_header(BlockId::Hash(header_hash)) + .map(|header| (header_hash, header)) + }); let (block, header) = match header_and_hash { Ok((header_hash, header)) => (header_hash, header), Err(err) => return Box::new(ready(Err(err.into()))), @@ -268,17 +341,16 @@ impl sc_transaction_graph::ChainApi for block, header, method: "TaggedTransactionQueue_validate_transaction".into(), - call_data: (source, uxt).encode(), + call_data: (source, uxt, block).encode(), retry_count: None, }); let remote_validation_request = remote_validation_request.then(move |result| { - let result: error::Result = result - .map_err(Into::into) - .and_then(|result| Decode::decode(&mut &result[..]) - .map_err(|e| Error::RuntimeApi( - format!("Error decoding tx validation result: {:?}", e) - )) - ); + let result: error::Result = + result.map_err(Into::into).and_then(|result| { + Decode::decode(&mut &result[..]).map_err(|e| { + Error::RuntimeApi(format!("Error decoding tx validation result: {:?}", e)) + }) + }); ready(result) }); @@ -288,45 +360,41 @@ impl sc_transaction_graph::ChainApi for fn block_id_to_number( &self, at: &BlockId, - ) -> error::Result>> { + ) -> error::Result>> { Ok(self.client.block_number_from_id(at)?) } fn block_id_to_hash( &self, at: &BlockId, - ) -> error::Result>> { + ) -> error::Result>> { Ok(self.client.block_hash_from_id(at)?) } fn hash_and_length( &self, - ex: &sc_transaction_graph::ExtrinsicFor, - ) -> (sc_transaction_graph::ExtrinsicHash, usize) { - ex.using_encoded(|x| { - (<::Hashing as HashT>::hash(x), x.len()) - }) + ex: &graph::ExtrinsicFor, + ) -> (graph::ExtrinsicHash, usize) { + ex.using_encoded(|x| (<::Hashing as HashT>::hash(x), x.len())) } fn block_body(&self, id: &BlockId) -> Self::BodyFuture { - let header = self.client.header(*id) + let header = self + .client + .header(*id) .and_then(|h| h.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", id)))); let header = match header { Ok(header) => header, Err(err) => { log::warn!(target: "txpool", "Failed to query header: {:?}", err); - return Box::pin(ready(Ok(None))); - } + return Box::pin(ready(Ok(None))) + }, }; let fetcher = self.fetcher.clone(); async move { - let transactions = fetcher.remote_body({ - RemoteBodyRequest { - header, - retry_count: None, - } - }) + let transactions = fetcher + .remote_body(RemoteBodyRequest { header, retry_count: None }) .await .unwrap_or_else(|e| { log::warn!(target: "txpool", "Failed to fetch block body: {:?}", e); @@ -334,6 +402,14 @@ impl sc_transaction_graph::ChainApi for }); Ok(Some(transactions)) - }.boxed() + } + .boxed() + } + + fn block_header( + &self, + at: &BlockId, + ) -> Result::Header>, Self::Error> { + self.client.header(*at).map_err(Into::into) } } diff --git a/client/transaction-pool/src/error.rs b/client/transaction-pool/src/error.rs index c0f795df1801a..b14e0569f0830 100644 --- a/client/transaction-pool/src/error.rs +++ b/client/transaction-pool/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,38 +18,29 @@ //! Transaction pool error. -use sp_transaction_pool::error::Error as TxPoolError; +use sc_transaction_pool_api::error::Error as TxPoolError; /// Transaction pool result. pub type Result = std::result::Result; /// Transaction pool error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum Error { - /// Pool error. - Pool(TxPoolError), - /// Blockchain error. - Blockchain(sp_blockchain::Error), - /// Error while converting a `BlockId`. - #[from(ignore)] + #[error("Transaction pool error")] + Pool(#[from] TxPoolError), + + #[error("Blockchain error")] + Blockchain(#[from] sp_blockchain::Error), + + #[error("Block conversion error: {0}")] BlockIdConversion(String), - /// Error while calling the runtime api. - #[from(ignore)] - RuntimeApi(String), -} -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Pool(ref err) => Some(err), - Error::Blockchain(ref err) => Some(err), - Error::BlockIdConversion(_) => None, - Error::RuntimeApi(_) => None, - } - } + #[error("Runtime error: {0}")] + RuntimeApi(String), } -impl sp_transaction_pool::error::IntoPoolError for Error { +impl sc_transaction_pool_api::error::IntoPoolError for Error { fn into_pool_error(self) -> std::result::Result { match self { Error::Pool(e) => Ok(e), diff --git a/client/transaction-pool/graph/src/base_pool.rs b/client/transaction-pool/src/graph/base_pool.rs similarity index 69% rename from client/transaction-pool/graph/src/base_pool.rs rename to client/transaction-pool/src/graph/base_pool.rs index 81d8e802c2c9e..890a87e82929d 100644 --- a/client/transaction-pool/graph/src/base_pool.rs +++ b/client/transaction-pool/src/graph/base_pool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -20,27 +20,24 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{ - collections::HashSet, - fmt, - hash, - sync::Arc, -}; +use std::{collections::HashSet, fmt, hash, sync::Arc}; -use log::{trace, debug, warn}; +use log::{debug, trace, warn}; +use sc_transaction_pool_api::{error, InPoolTransaction, PoolStatus}; use serde::Serialize; use sp_core::hexdisplay::HexDisplay; -use sp_runtime::traits::Member; -use sp_runtime::transaction_validity::{ - TransactionTag as Tag, - TransactionLongevity as Longevity, - TransactionPriority as Priority, - TransactionSource as Source, +use sp_runtime::{ + traits::Member, + transaction_validity::{ + TransactionLongevity as Longevity, TransactionPriority as Priority, + TransactionSource as Source, TransactionTag as Tag, + }, }; -use sp_transaction_pool::{error, PoolStatus, InPoolTransaction}; -use crate::future::{FutureTransactions, WaitingTransaction}; -use crate::ready::ReadyTransactions; +use super::{ + future::{FutureTransactions, WaitingTransaction}, + ready::ReadyTransactions, +}; /// Successful import result. #[derive(Debug, PartialEq, Eq)] @@ -60,7 +57,7 @@ pub enum Imported { Future { /// Hash of transaction that was successfully imported. hash: Hash, - } + }, } impl Imported { @@ -131,7 +128,7 @@ impl InPoolTransaction for Transaction { &self.priority } - fn longevity(&self) ->&Longevity { + fn longevity(&self) -> &Longevity { &self.valid_till } @@ -155,13 +152,13 @@ impl Transaction { /// every reason to be commented. That's why we `Transaction` is not `Clone`, /// but there's explicit `duplicate` method. pub fn duplicate(&self) -> Self { - Transaction { + Self { data: self.data.clone(), - bytes: self.bytes.clone(), + bytes: self.bytes, hash: self.hash.clone(), - priority: self.priority.clone(), + priority: self.priority, source: self.source, - valid_till: self.valid_till.clone(), + valid_till: self.valid_till, requires: self.requires.clone(), provides: self.provides.clone(), propagate: self.propagate, @@ -169,21 +166,18 @@ impl Transaction { } } -impl fmt::Debug for Transaction where +impl fmt::Debug for Transaction +where Hash: fmt::Debug, Extrinsic: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fn print_tags(fmt: &mut fmt::Formatter, tags: &[Tag]) -> fmt::Result { - let mut it = tags.iter(); - if let Some(t) = it.next() { - write!(fmt, "{}", HexDisplay::from(t))?; - } - for t in it { - write!(fmt, ",{}", HexDisplay::from(t))?; - } - Ok(()) - } + let join_tags = |tags: &[Tag]| { + tags.iter() + .map(|tag| HexDisplay::from(tag).to_string()) + .collect::>() + .join(", ") + }; write!(fmt, "Transaction {{ ")?; write!(fmt, "hash: {:?}, ", &self.hash)?; @@ -192,11 +186,8 @@ impl fmt::Debug for Transaction where write!(fmt, "bytes: {:?}, ", &self.bytes)?; write!(fmt, "propagate: {:?}, ", &self.propagate)?; write!(fmt, "source: {:?}, ", &self.source)?; - write!(fmt, "requires: [")?; - print_tags(fmt, &self.requires)?; - write!(fmt, "], provides: [")?; - print_tags(fmt, &self.provides)?; - write!(fmt, "], ")?; + write!(fmt, "requires: [{}], ", join_tags(&self.requires))?; + write!(fmt, "provides: [{}], ", join_tags(&self.provides))?; write!(fmt, "data: {:?}", &self.data)?; write!(fmt, "}}")?; Ok(()) @@ -216,8 +207,7 @@ const RECENTLY_PRUNED_TAGS: usize = 2; /// as-is for the second time will fail or produce unwanted results. /// Most likely it is required to revalidate them and recompute set of /// required tags. -#[derive(Debug)] -#[cfg_attr(not(target_os = "unknown"), derive(parity_util_mem::MallocSizeOf))] +#[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct BasePool { reject_future_transactions: bool, future: FutureTransactions, @@ -239,7 +229,7 @@ impl Default for Bas impl BasePool { /// Create new pool given reject_future_transactions flag. pub fn new(reject_future_transactions: bool) -> Self { - BasePool { + Self { reject_future_transactions, future: Default::default(), ready: Default::default(), @@ -253,7 +243,10 @@ impl BasePool(&mut self, closure: impl FnOnce(&mut Self, bool) -> T) -> T { + pub(crate) fn with_futures_enabled( + &mut self, + closure: impl FnOnce(&mut Self, bool) -> T, + ) -> T { let previous = self.reject_future_transactions; self.reject_future_transactions = false; let return_value = closure(self, previous); @@ -273,19 +266,12 @@ impl BasePool, - ) -> error::Result> { + pub fn import(&mut self, tx: Transaction) -> error::Result> { if self.is_imported(&tx.hash) { return Err(error::Error::AlreadyImported(Box::new(tx.hash))) } - let tx = WaitingTransaction::new( - tx, - self.ready.provided_tags(), - &self.recently_pruned, - ); + let tx = WaitingTransaction::new(tx, self.ready.provided_tags(), &self.recently_pruned); trace!(target: "txpool", "[{:?}] {:?}", tx.transaction.hash, tx); debug!( target: "txpool", @@ -297,12 +283,12 @@ impl BasePool BasePool) -> error::Result> { + fn import_to_ready( + &mut self, + tx: WaitingTransaction, + ) -> error::Result> { let hash = tx.transaction.hash.clone(); let mut promoted = vec![]; let mut failed = vec![]; @@ -320,13 +309,8 @@ impl BasePool tx, - None => break, - }; - + // take first transaction from the list + while let Some(tx) = to_import.pop() { // find transactions in Future that it unlocks to_import.append(&mut self.future.satisfy_tags(&tx.transaction.provides)); @@ -337,16 +321,18 @@ impl BasePool if first { - debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); - return Err(e) - } else { - failed.push(current_hash); - }, + Err(e) => + if first { + debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); + return Err(e) + } else { + failed.push(current_hash); + }, } first = false; } @@ -365,21 +351,16 @@ impl BasePool impl Iterator>> { + pub fn ready(&self) -> impl Iterator>> { self.ready.get() } /// Returns an iterator over future transactions in the pool. - pub fn futures(&self) -> impl Iterator> { + pub fn futures(&self) -> impl Iterator> { self.future.all() } @@ -391,11 +372,7 @@ impl BasePool BasePool Vec>> { + /// Removes and returns worst transactions from the queues and all transactions that depend on + /// them. Technically the worst transaction should be evaluated by computing the entire pending + /// set. We use a simplified approach to remove the transaction that occupies the pool for the + /// longest time. + pub fn enforce_limits( + &mut self, + ready: &Limit, + future: &Limit, + ) -> Vec>> { let mut removed = vec![]; while ready.is_exceeded(self.ready.len(), self.ready.bytes()) { // find the worst transaction - let minimal = self.ready - .fold(|minimal, current| { - let transaction = ¤t.transaction; - match minimal { - None => Some(transaction.clone()), - Some(ref tx) if tx.insertion_id > transaction.insertion_id => { - Some(transaction.clone()) - }, - other => other, - } - }); + let minimal = self.ready.fold(|minimal, current| { + let transaction = ¤t.transaction; + match minimal { + None => Some(transaction.clone()), + Some(ref tx) if tx.insertion_id > transaction.insertion_id => + Some(transaction.clone()), + other => other, + } + }); if let Some(minimal) = minimal { removed.append(&mut self.remove_subtree(&[minimal.transaction.hash.clone()])) } else { - break; + break } } while future.is_exceeded(self.future.len(), self.future.bytes()) { // find the worst transaction - let minimal = self.future - .fold(|minimal, current| { - match minimal { - None => Some(current.clone()), - Some(ref tx) if tx.imported_at > current.imported_at => { - Some(current.clone()) - }, - other => other, - } - }); + let minimal = self.future.fold(|minimal, current| match minimal { + None => Some(current.clone()), + Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), + other => other, + }); if let Some(minimal) = minimal { removed.append(&mut self.remove_subtree(&[minimal.transaction.hash.clone()])) } else { - break; + break } } @@ -480,7 +455,7 @@ impl BasePool) -> PruneStatus { + pub fn prune_tags(&mut self, tags: impl IntoIterator) -> PruneStatus { let mut to_import = vec![]; let mut pruned = vec![]; let recently_pruned = &mut self.recently_pruned[self.recently_pruned_index]; @@ -509,11 +484,7 @@ impl BasePool> = Transaction { + data: vec![], + bytes: 1, + hash: 1u64, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![], + propagate: true, + source: Source::External, + }; + #[test] fn should_import_transaction_to_ready() { // given let mut pool = pool(); // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1u64, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); + pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) + .unwrap(); // then assert_eq!(pool.ready().count(), 1); @@ -582,35 +556,16 @@ mod tests { let mut pool = pool(); // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap_err(); + pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) + .unwrap(); + pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) + .unwrap_err(); // then assert_eq!(pool.ready().count(), 1); assert_eq!(pool.ready.len(), 1); } - #[test] fn should_import_transaction_to_future_and_promote_it_later() { // given @@ -619,28 +574,20 @@ mod tests { // when pool.import(Transaction { data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![0]], provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); pool.import(Transaction { data: vec![2u8], - bytes: 1, hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![], provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // then assert_eq!(pool.ready().count(), 2); @@ -655,62 +602,45 @@ mod tests { // when pool.import(Transaction { data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![0]], provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], - bytes: 1, hash: 3, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![2]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![2u8], - bytes: 1, hash: 2, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![1]], provides: vec![vec![3], vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![4u8], - bytes: 1, hash: 4, priority: 1_000u64, - valid_till: 64u64, requires: vec![vec![3], vec![4]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); - let res = pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0], vec![4]], - propagate: true, - source: Source::External, - }).unwrap(); + let res = pool + .import(Transaction { + data: vec![5u8], + hash: 5, + provides: vec![vec![0], vec![4]], + ..DEFAULT_TX.clone() + }) + .unwrap(); // then let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); @@ -721,12 +651,15 @@ mod tests { assert_eq!(it.next(), Some(4)); assert_eq!(it.next(), Some(3)); assert_eq!(it.next(), None); - assert_eq!(res, Imported::Ready { - hash: 5, - promoted: vec![1, 2, 3, 4], - failed: vec![], - removed: vec![], - }); + assert_eq!( + res, + Imported::Ready { + hash: 5, + promoted: vec![1, 2, 3, 4], + failed: vec![], + removed: vec![], + } + ); } #[test] @@ -735,41 +668,31 @@ mod tests { let mut pool = pool(); pool.import(Transaction { data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![0]], provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], - bytes: 1, hash: 3, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![1]], provides: vec![vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); // when pool.import(Transaction { data: vec![2u8], - bytes: 1, hash: 2, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![2]], provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // then { @@ -780,28 +703,24 @@ mod tests { assert_eq!(pool.future.len(), 3); // let's close the cycle with one additional transaction - let res = pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 50u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap(); + let res = pool + .import(Transaction { + data: vec![4u8], + hash: 4, + priority: 50u64, + provides: vec![vec![0]], + ..DEFAULT_TX.clone() + }) + .unwrap(); let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); assert_eq!(it.next(), Some(4)); assert_eq!(it.next(), Some(1)); assert_eq!(it.next(), Some(3)); assert_eq!(it.next(), None); - assert_eq!(res, Imported::Ready { - hash: 4, - promoted: vec![1, 3], - failed: vec![2], - removed: vec![], - }); + assert_eq!( + res, + Imported::Ready { hash: 4, promoted: vec![1, 3], failed: vec![2], removed: vec![] } + ); assert_eq!(pool.future.len(), 0); } @@ -811,41 +730,31 @@ mod tests { let mut pool = pool(); pool.import(Transaction { data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![0]], provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], - bytes: 1, hash: 3, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![1]], provides: vec![vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); // when pool.import(Transaction { data: vec![2u8], - bytes: 1, hash: 2, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![2]], provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // then { @@ -856,17 +765,15 @@ mod tests { assert_eq!(pool.future.len(), 3); // let's close the cycle with one additional transaction - let err = pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1u64, // lower priority than Tx(2) - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap_err(); + let err = pool + .import(Transaction { + data: vec![4u8], + hash: 4, + priority: 1u64, // lower priority than Tx(2) + provides: vec![vec![0]], + ..DEFAULT_TX.clone() + }) + .unwrap_err(); let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); assert_eq!(it.next(), None); assert_eq!(pool.ready.len(), 0); @@ -882,26 +789,18 @@ mod tests { let mut pool = pool(); pool.import(Transaction { data: vec![5u8; 1024], - bytes: 1, hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![], provides: vec![vec![0], vec![4]], - propagate: true, - source: Source::External, - }).expect("import 1 should be ok"); + ..DEFAULT_TX.clone() + }) + .expect("import 1 should be ok"); pool.import(Transaction { data: vec![3u8; 1024], - bytes: 1, hash: 7, - priority: 5u64, - valid_till: 64u64, - requires: vec![], provides: vec![vec![2], vec![7]], - propagate: true, - source: Source::External, - }).expect("import 2 should be ok"); + ..DEFAULT_TX.clone() + }) + .expect("import 2 should be ok"); assert!(parity_util_mem::malloc_size(&pool) > 5000); } @@ -912,71 +811,50 @@ mod tests { let mut pool = pool(); pool.import(Transaction { data: vec![5u8], - bytes: 1, hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![], provides: vec![vec![0], vec![4]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![0]], provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], - bytes: 1, hash: 3, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![2]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![2u8], - bytes: 1, hash: 2, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![1]], provides: vec![vec![3], vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![4u8], - bytes: 1, hash: 4, priority: 1_000u64, - valid_till: 64u64, requires: vec![vec![3], vec![4]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // future pool.import(Transaction { data: vec![6u8], - bytes: 1, hash: 6, priority: 1_000u64, - valid_till: 64u64, requires: vec![vec![11]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 5); assert_eq!(pool.future.len(), 1); @@ -995,60 +873,40 @@ mod tests { // future (waiting for 0) pool.import(Transaction { data: vec![5u8], - bytes: 1, hash: 5, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![0]], provides: vec![vec![100]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // ready - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); + pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) + .unwrap(); pool.import(Transaction { data: vec![2u8], - bytes: 1, hash: 2, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![2]], provides: vec![vec![3]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], - bytes: 1, hash: 3, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![1]], provides: vec![vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![4u8], - bytes: 1, hash: 4, priority: 1_000u64, - valid_till: 64u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 4); assert_eq!(pool.future.len(), 1); @@ -1059,12 +917,10 @@ mod tests { // then assert_eq!(result.pruned.len(), 2); assert_eq!(result.failed.len(), 0); - assert_eq!(result.promoted[0], Imported::Ready { - hash: 5, - promoted: vec![], - failed: vec![], - removed: vec![], - }); + assert_eq!( + result.promoted[0], + Imported::Ready { hash: 5, promoted: vec![], failed: vec![], removed: vec![] } + ); assert_eq!(result.promoted.len(), 1); assert_eq!(pool.future.len(), 0); assert_eq!(pool.ready.len(), 3); @@ -1074,48 +930,52 @@ mod tests { #[test] fn transaction_debug() { assert_eq!( - format!("{:?}", Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![2]], - provides: vec![vec![4]], - propagate: true, - source: Source::External, - }), + format!( + "{:?}", + Transaction { + data: vec![4u8], + hash: 4, + priority: 1_000u64, + requires: vec![vec![3], vec![2]], + provides: vec![vec![4]], + ..DEFAULT_TX.clone() + } + ), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TransactionSource::External, requires: [03,02], provides: [04], data: [4]}".to_owned() +source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}" + .to_owned() ); } #[test] fn transaction_propagation() { - assert_eq!(Transaction { + assert_eq!( + Transaction { data: vec![4u8], - bytes: 1, hash: 4, priority: 1_000u64, - valid_till: 64u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], - propagate: true, - source: Source::External, - }.is_propagable(), true); + ..DEFAULT_TX.clone() + } + .is_propagable(), + true + ); - assert_eq!(Transaction { + assert_eq!( + Transaction { data: vec![4u8], - bytes: 1, hash: 4, priority: 1_000u64, - valid_till: 64u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], propagate: false, - source: Source::External, - }.is_propagable(), false); + ..DEFAULT_TX.clone() + } + .is_propagable(), + false + ); } #[test] @@ -1129,14 +989,9 @@ source: TransactionSource::External, requires: [03,02], provides: [04], data: [4 // then let err = pool.import(Transaction { data: vec![5u8], - bytes: 1, hash: 5, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![0]], - provides: vec![], - propagate: true, - source: Source::External, + ..DEFAULT_TX.clone() }); if let Err(error::Error::RejectedFutureTransaction) = err { @@ -1153,15 +1008,11 @@ source: TransactionSource::External, requires: [03,02], provides: [04], data: [4 // when pool.import(Transaction { data: vec![5u8], - bytes: 1, hash: 5, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![0]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // then assert_eq!(pool.future.len(), 1); @@ -1183,15 +1034,11 @@ source: TransactionSource::External, requires: [03,02], provides: [04], data: [4 let flag_value = pool.with_futures_enabled(|pool, flag| { pool.import(Transaction { data: vec![5u8], - bytes: 1, hash: 5, - priority: 5u64, - valid_till: 64u64, requires: vec![vec![0]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); flag }); diff --git a/client/transaction-pool/graph/src/future.rs b/client/transaction-pool/src/graph/future.rs similarity index 82% rename from client/transaction-pool/graph/src/future.rs rename to client/transaction-pool/src/graph/future.rs index 80e6825d4ff9d..6ed1f1014304f 100644 --- a/client/transaction-pool/graph/src/future.rs +++ b/client/transaction-pool/src/graph/future.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,20 +18,17 @@ use std::{ collections::{HashMap, HashSet}, - fmt, - hash, + fmt, hash, sync::Arc, }; use sp_core::hexdisplay::HexDisplay; -use sp_runtime::transaction_validity::{ - TransactionTag as Tag, -}; -use wasm_timer::Instant; +use sp_runtime::transaction_validity::TransactionTag as Tag; +use std::time::Instant; -use crate::base_pool::Transaction; +use super::base_pool::Transaction; -#[cfg_attr(not(target_os = "unknown"), derive(parity_util_mem::MallocSizeOf))] +#[derive(parity_util_mem::MallocSizeOf)] /// Transaction with partially satisfied dependencies. pub struct WaitingTransaction { /// Transaction details. @@ -47,24 +44,25 @@ impl fmt::Debug for WaitingTransaction>() + .join(", "), + )?; + write!(fmt, "}}") } } impl Clone for WaitingTransaction { fn clone(&self) -> Self { - WaitingTransaction { + Self { transaction: self.transaction.clone(), missing_tags: self.missing_tags.clone(), - imported_at: self.imported_at.clone(), + imported_at: self.imported_at, } } } @@ -79,22 +77,20 @@ impl WaitingTransaction { provided: &HashMap, recently_pruned: &[HashSet], ) -> Self { - let missing_tags = transaction.requires + let missing_tags = transaction + .requires .iter() .filter(|tag| { // is true if the tag is already satisfied either via transaction in the pool // or one that was recently included. - let is_provided = provided.contains_key(&**tag) || recently_pruned.iter().any(|x| x.contains(&**tag)); + let is_provided = provided.contains_key(&**tag) || + recently_pruned.iter().any(|x| x.contains(&**tag)); !is_provided }) .cloned() .collect(); - WaitingTransaction { - transaction: Arc::new(transaction), - missing_tags, - imported_at: Instant::now(), - } + Self { transaction: Arc::new(transaction), missing_tags, imported_at: Instant::now() } } /// Marks the tag as satisfied. @@ -112,8 +108,7 @@ impl WaitingTransaction { /// /// Contains transactions that are still awaiting for some other transactions that /// could provide a tag that they require. -#[derive(Debug)] -#[cfg_attr(not(target_os = "unknown"), derive(parity_util_mem::MallocSizeOf))] +#[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct FutureTransactions { /// tags that are not yet provided by any transaction and we await for them wanted_tags: HashMap>, @@ -123,10 +118,7 @@ pub struct FutureTransactions { impl Default for FutureTransactions { fn default() -> Self { - FutureTransactions { - wanted_tags: Default::default(), - waiting: Default::default(), - } + Self { wanted_tags: Default::default(), waiting: Default::default() } } } @@ -146,7 +138,10 @@ impl FutureTransactions { /// we should remove the transactions from here and move them to the Ready queue. pub fn import(&mut self, tx: WaitingTransaction) { assert!(!tx.is_ready(), "Transaction is ready."); - assert!(!self.waiting.contains_key(&tx.transaction.hash), "Transaction is already imported."); + assert!( + !self.waiting.contains_key(&tx.transaction.hash), + "Transaction is already imported." + ); // Add all tags that are missing for tag in &tx.missing_tags { @@ -165,14 +160,20 @@ impl FutureTransactions { /// Returns a list of known transactions pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { - hashes.iter().map(|h| self.waiting.get(h).map(|x| x.transaction.clone())).collect() + hashes + .iter() + .map(|h| self.waiting.get(h).map(|x| x.transaction.clone())) + .collect() } /// Satisfies provided tags in transactions that are waiting for them. /// /// Returns (and removes) transactions that became ready after their last tag got /// satisfied and now we can remove them from Future and move to Ready queue. - pub fn satisfy_tags>(&mut self, tags: impl IntoIterator) -> Vec> { + pub fn satisfy_tags>( + &mut self, + tags: impl IntoIterator, + ) -> Vec> { let mut became_ready = vec![]; for tag in tags { @@ -207,7 +208,9 @@ impl FutureTransactions { let remove = if let Some(wanted) = self.wanted_tags.get_mut(&tag) { wanted.remove(hash); wanted.is_empty() - } else { false }; + } else { + false + }; if remove { self.wanted_tags.remove(&tag); } @@ -220,14 +223,15 @@ impl FutureTransactions { } /// Fold a list of future transactions to compute a single value. - pub fn fold, &WaitingTransaction) -> Option>(&mut self, f: F) -> Option { - self.waiting - .values() - .fold(None, f) + pub fn fold, &WaitingTransaction) -> Option>( + &mut self, + f: F, + ) -> Option { + self.waiting.values().fold(None, f) } /// Returns iterator over all future transactions - pub fn all(&self) -> impl Iterator> { + pub fn all(&self) -> impl Iterator> { self.waiting.values().map(|waiting| &*waiting.transaction) } @@ -267,7 +271,8 @@ mod tests { provides: vec![vec![3], vec![4]], propagate: true, source: TransactionSource::External, - }.into(), + } + .into(), missing_tags: vec![vec![1u8], vec![2u8]].into_iter().collect(), imported_at: std::time::Instant::now(), }); diff --git a/client/transaction-pool/graph/src/listener.rs b/client/transaction-pool/src/graph/listener.rs similarity index 86% rename from client/transaction-pool/graph/src/listener.rs rename to client/transaction-pool/src/graph/listener.rs index 1bc3720fa6b85..b8149018f7836 100644 --- a/client/transaction-pool/graph/src/listener.rs +++ b/client/transaction-pool/src/graph/listener.rs @@ -1,7 +1,6 @@ - // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,15 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{ - collections::HashMap, hash, fmt::Debug, -}; +use std::{collections::HashMap, fmt::Debug, hash}; + use linked_hash_map::LinkedHashMap; +use log::{debug, trace}; use serde::Serialize; -use crate::{watcher, ChainApi, ExtrinsicHash, BlockHash}; -use log::{debug, trace, warn}; use sp_runtime::traits; +use super::{watcher, BlockHash, ChainApi, ExtrinsicHash}; + /// Extrinsic pool default listener. pub struct Listener { watchers: HashMap>>, @@ -37,15 +36,15 @@ const MAX_FINALITY_WATCHERS: usize = 512; impl Default for Listener { fn default() -> Self { - Listener { - watchers: Default::default(), - finality_watchers: Default::default(), - } + Self { watchers: Default::default(), finality_watchers: Default::default() } } } impl Listener { - fn fire(&mut self, hash: &H, fun: F) where F: FnOnce(&mut watcher::Sender>) { + fn fire(&mut self, hash: &H, fun: F) + where + F: FnOnce(&mut watcher::Sender>), + { let clean = if let Some(h) = self.watchers.get_mut(hash) { fun(h); h.is_done() @@ -97,12 +96,8 @@ impl Listener { } /// Transaction was removed as invalid. - pub fn invalid(&mut self, tx: &H, warn: bool) { - if warn { - warn!(target: "txpool", "[{:?}] Extrinsic invalid", tx); - } else { - debug!(target: "txpool", "[{:?}] Extrinsic invalid", tx); - } + pub fn invalid(&mut self, tx: &H) { + debug!(target: "txpool", "[{:?}] Extrinsic invalid", tx); self.fire(tx, |watcher| watcher.invalid()); } @@ -115,7 +110,7 @@ impl Listener { while self.finality_watchers.len() > MAX_FINALITY_WATCHERS { if let Some((hash, txs)) = self.finality_watchers.pop_front() { for tx in txs { - self.fire(&tx, |s| s.finality_timeout(hash.clone())); + self.fire(&tx, |s| s.finality_timeout(hash)); } } } diff --git a/client/transaction-pool/graph/src/lib.rs b/client/transaction-pool/src/graph/mod.rs similarity index 81% rename from client/transaction-pool/graph/src/lib.rs rename to client/transaction-pool/src/graph/mod.rs index bf220ce22973a..3ecfb8fe68c60 100644 --- a/client/transaction-pool/graph/src/lib.rs +++ b/client/transaction-pool/src/graph/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -31,14 +31,17 @@ mod listener; mod pool; mod ready; mod rotator; -mod validated_pool; mod tracked_map; +mod validated_pool; pub mod base_pool; pub mod watcher; -pub use self::base_pool::Transaction; -pub use self::pool::{ - Pool, Options, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, - BlockHash, NumberFor, TransactionFor, ValidatedTransaction, +pub use self::{ + base_pool::Transaction, + pool::{ + BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, + TransactionFor, + }, }; +pub use validated_pool::{IsValidator, ValidatedTransaction}; diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/src/graph/pool.rs similarity index 69% rename from client/transaction-pool/graph/src/pool.rs rename to client/transaction-pool/src/graph/pool.rs index 56ff550d7754f..2af5a8a19a5a9 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,27 +16,24 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{ - collections::HashMap, - sync::Arc, -}; - -use crate::{base_pool as base, watcher::Watcher}; +use std::{collections::HashMap, sync::Arc}; -use futures::Future; +use futures::{channel::mpsc::Receiver, Future}; +use sc_transaction_pool_api::error; use sp_runtime::{ generic::BlockId, - traits::{self, SaturatedConversion, Block as BlockT}, + traits::{self, Block as BlockT, SaturatedConversion}, transaction_validity::{ - TransactionValidity, TransactionTag as Tag, TransactionValidityError, TransactionSource, + TransactionSource, TransactionTag as Tag, TransactionValidity, TransactionValidityError, }, }; -use sp_transaction_pool::error; -use wasm_timer::Instant; -use futures::channel::mpsc::Receiver; +use std::time::Instant; -use crate::validated_pool::ValidatedPool; -pub use crate::validated_pool::ValidatedTransaction; +use super::{ + base_pool as base, + validated_pool::{IsValidator, ValidatedPool, ValidatedTransaction}, + watcher::Watcher, +}; /// Modification notification event stream type; pub type EventStream = Receiver; @@ -52,11 +49,8 @@ pub type NumberFor = traits::NumberFor<::Block>; /// A type of transaction stored in the pool pub type TransactionFor = Arc, ExtrinsicFor>>; /// A type of validated transaction stored in the pool. -pub type ValidatedTransactionFor = ValidatedTransaction< - ExtrinsicHash, - ExtrinsicFor, - ::Error, ->; +pub type ValidatedTransactionFor = + ValidatedTransaction, ExtrinsicFor, ::Error>; /// Concrete extrinsic validation and query logic. pub trait ChainApi: Send + Sync { @@ -65,11 +59,12 @@ pub trait ChainApi: Send + Sync { /// Error type. type Error: From + error::IntoPoolError; /// Validate transaction future. - type ValidationFuture: Future> + Send + Unpin; + type ValidationFuture: Future> + Send + Unpin; /// Body future (since block body might be remote) - type BodyFuture: Future< - Output = Result::Extrinsic>>, Self::Error> - > + Unpin + Send + 'static; + type BodyFuture: Future::Extrinsic>>, Self::Error>> + + Unpin + + Send + + 'static; /// Verify extrinsic at given block. fn validate_transaction( @@ -96,6 +91,12 @@ pub trait ChainApi: Send + Sync { /// Returns a block body given the block id. fn block_body(&self, at: &BlockId) -> Self::BodyFuture; + + /// Returns a block header given the block id. + fn block_header( + &self, + at: &BlockId, + ) -> Result::Header>, Self::Error>; } /// Pool configuration options. @@ -111,15 +112,9 @@ pub struct Options { impl Default for Options { fn default() -> Self { - Options { - ready: base::Limit { - count: 8192, - total_bytes: 20 * 1024 * 1024, - }, - future: base::Limit { - count: 512, - total_bytes: 1 * 1024 * 1024, - }, + Self { + ready: base::Limit { count: 8192, total_bytes: 20 * 1024 * 1024 }, + future: base::Limit { count: 512, total_bytes: 1 * 1024 * 1024 }, reject_future_transactions: false, } } @@ -138,7 +133,6 @@ pub struct Pool { validated_pool: Arc>, } -#[cfg(not(target_os = "unknown"))] impl parity_util_mem::MallocSizeOf for Pool where ExtrinsicFor: parity_util_mem::MallocSizeOf, @@ -150,10 +144,8 @@ where impl Pool { /// Create a new transaction pool. - pub fn new(options: Options, api: Arc) -> Self { - Pool { - validated_pool: Arc::new(ValidatedPool::new(options, api)), - } + pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { + Self { validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)) } } /// Imports a bunch of unverified extrinsics to the pool @@ -161,7 +153,7 @@ impl Pool { &self, at: &BlockId, source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator>, ) -> Result, B::Error>>, B::Error> { let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await?; @@ -175,7 +167,7 @@ impl Pool { &self, at: &BlockId, source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator>, ) -> Result, B::Error>>, B::Error> { let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await?; @@ -193,7 +185,7 @@ impl Pool { res.expect("One extrinsic passed; one result returned; qed") } - /// Import a single extrinsic and starts to watch their progress in the pool. + /// Import a single extrinsic and starts to watch its progress in the pool. pub async fn submit_and_watch( &self, at: &BlockId, @@ -201,13 +193,9 @@ impl Pool { xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, B::Error> { let block_number = self.resolve_block_number(at)?; - let (_, tx) = self.verify_one( - at, - block_number, - source, - xt, - CheckBannedBeforeVerify::Yes, - ).await; + let (_, tx) = self + .verify_one(at, block_number, source, xt, CheckBannedBeforeVerify::Yes) + .await; self.validated_pool.submit_and_watch(tx) } @@ -216,7 +204,6 @@ impl Pool { &self, revalidated_transactions: HashMap, ValidatedTransactionFor>, ) { - let now = Instant::now(); self.validated_pool.resubmit(revalidated_transactions); log::debug!(target: "txpool", @@ -237,13 +224,17 @@ impl Pool { hashes: &[ExtrinsicHash], ) -> Result<(), B::Error> { // Get details of all extrinsics that are already in the pool - let in_pool_tags = self.validated_pool.extrinsics_tags(hashes) - .into_iter().filter_map(|x| x).flat_map(|x| x); + let in_pool_tags = self + .validated_pool + .extrinsics_tags(hashes) + .into_iter() + .filter_map(|x| x) + .flatten(); // Prune all transactions that provide given tags let prune_status = self.validated_pool.prune_tags(in_pool_tags)?; - let pruned_transactions = hashes.into_iter().cloned() - .chain(prune_status.pruned.iter().map(|tx| tx.hash.clone())); + let pruned_transactions = + hashes.iter().cloned().chain(prune_status.pruned.iter().map(|tx| tx.hash)); self.validated_pool.fire_pruned(at, pruned_transactions) } @@ -266,10 +257,12 @@ impl Pool { extrinsics.len() ); // Get details of all extrinsics that are already in the pool - let in_pool_hashes = extrinsics.iter().map(|extrinsic| self.hash_of(extrinsic)).collect::>(); + let in_pool_hashes = + extrinsics.iter().map(|extrinsic| self.hash_of(extrinsic)).collect::>(); let in_pool_tags = self.validated_pool.extrinsics_tags(&in_pool_hashes); - // Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option>)`) + // Zip the ones from the pool with the full list (we get pairs `(Extrinsic, + // Option>)`) let all = extrinsics.iter().zip(in_pool_tags.into_iter()); let mut future_tags = Vec::new(); @@ -280,7 +273,9 @@ impl Pool { // if it's not found in the pool query the runtime at parent block // to get validity info and tags that the extrinsic provides. None => { - let validity = self.validated_pool.api() + let validity = self + .validated_pool + .api() .validate_transaction(parent, TransactionSource::InBlock, extrinsic.clone()) .await; @@ -318,35 +313,27 @@ impl Pool { pub async fn prune_tags( &self, at: &BlockId, - tags: impl IntoIterator, - known_imported_hashes: impl IntoIterator> + Clone, + tags: impl IntoIterator, + known_imported_hashes: impl IntoIterator> + Clone, ) -> Result<(), B::Error> { log::debug!(target: "txpool", "Pruning at {:?}", at); // Prune all transactions that provide given tags - let prune_status = match self.validated_pool.prune_tags(tags) { - Ok(prune_status) => prune_status, - Err(e) => return Err(e), - }; + let prune_status = self.validated_pool.prune_tags(tags)?; // Make sure that we don't revalidate extrinsics that were part of the recently // imported block. This is especially important for UTXO-like chains cause the // inputs are pruned so such transaction would go to future again. - self.validated_pool.ban(&Instant::now(), known_imported_hashes.clone().into_iter()); + self.validated_pool + .ban(&Instant::now(), known_imported_hashes.clone().into_iter()); // Try to re-validate pruned transactions since some of them might be still valid. // note that `known_imported_hashes` will be rejected here due to temporary ban. - let pruned_hashes = prune_status.pruned - .iter() - .map(|tx| tx.hash.clone()).collect::>(); - let pruned_transactions = prune_status.pruned - .into_iter() - .map(|tx| (tx.source, tx.data.clone())); + let pruned_hashes = prune_status.pruned.iter().map(|tx| tx.hash).collect::>(); + let pruned_transactions = + prune_status.pruned.into_iter().map(|tx| (tx.source, tx.data.clone())); - let reverified_transactions = self.verify( - at, - pruned_transactions, - CheckBannedBeforeVerify::Yes, - ).await?; + let reverified_transactions = + self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await?; log::trace!(target: "txpool", "Pruning at {:?}. Resubmitting transactions.", at); // And finally - submit reverified transactions back to the pool @@ -366,16 +353,16 @@ impl Pool { /// Resolves block number by id. fn resolve_block_number(&self, at: &BlockId) -> Result, B::Error> { - self.validated_pool.api().block_id_to_number(at) - .and_then(|number| number.ok_or_else(|| - error::Error::InvalidBlockId(format!("{:?}", at)).into())) + self.validated_pool.api().block_id_to_number(at).and_then(|number| { + number.ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into()) + }) } /// Returns future that validates a bunch of transactions at given block. async fn verify( &self, at: &BlockId, - xts: impl IntoIterator)>, + xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, ) -> Result, ValidatedTransactionFor>, B::Error> { // we need a block number to compute tx validity @@ -383,8 +370,11 @@ impl Pool { let res = futures::future::join_all( xts.into_iter() - .map(|(source, xt)| self.verify_one(at, block_number, source, xt, check)) - ).await.into_iter().collect::>(); + .map(|(source, xt)| self.verify_one(at, block_number, source, xt, check)), + ) + .await + .into_iter() + .collect::>(); Ok(res) } @@ -402,74 +392,72 @@ impl Pool { let ignore_banned = matches!(check, CheckBannedBeforeVerify::No); if let Err(err) = self.validated_pool.check_is_known(&hash, ignore_banned) { - return (hash.clone(), ValidatedTransaction::Invalid(hash, err.into())) + return (hash, ValidatedTransaction::Invalid(hash, err)) } - let validation_result = self.validated_pool.api().validate_transaction( - block_id, - source, - xt.clone(), - ).await; + let validation_result = self + .validated_pool + .api() + .validate_transaction(block_id, source, xt.clone()) + .await; let status = match validation_result { Ok(status) => status, - Err(e) => return (hash.clone(), ValidatedTransaction::Invalid(hash, e)), + Err(e) => return (hash, ValidatedTransaction::Invalid(hash, e)), }; let validity = match status { - Ok(validity) => { + Ok(validity) => if validity.provides.is_empty() { - ValidatedTransaction::Invalid(hash.clone(), error::Error::NoTagsProvided.into()) + ValidatedTransaction::Invalid(hash, error::Error::NoTagsProvided.into()) } else { ValidatedTransaction::valid_at( block_number.saturated_into::(), - hash.clone(), + hash, source, xt, bytes, validity, ) - } - }, + }, Err(TransactionValidityError::Invalid(e)) => - ValidatedTransaction::Invalid(hash.clone(), error::Error::InvalidTransaction(e).into()), + ValidatedTransaction::Invalid(hash, error::Error::InvalidTransaction(e).into()), Err(TransactionValidityError::Unknown(e)) => - ValidatedTransaction::Unknown(hash.clone(), error::Error::UnknownTransaction(e).into()), + ValidatedTransaction::Unknown(hash, error::Error::UnknownTransaction(e).into()), }; (hash, validity) } /// get a reference to the underlying validated pool. - pub fn validated_pool(&self) -> &ValidatedPool { + pub fn validated_pool(&self) -> &ValidatedPool { &self.validated_pool } } impl Clone for Pool { fn clone(&self) -> Self { - Self { - validated_pool: self.validated_pool.clone(), - } + Self { validated_pool: self.validated_pool.clone() } } } #[cfg(test)] mod tests { - use std::collections::{HashMap, HashSet}; - use parking_lot::Mutex; + use super::{super::base_pool::Limit, *}; + use assert_matches::assert_matches; + use codec::Encode; use futures::executor::block_on; - use super::*; - use sp_transaction_pool::TransactionStatus; + use parking_lot::Mutex; + use sc_transaction_pool_api::TransactionStatus; use sp_runtime::{ traits::Hash, - transaction_validity::{ValidTransaction, InvalidTransaction, TransactionSource}, + transaction_validity::{InvalidTransaction, TransactionSource, ValidTransaction}, }; - use codec::Encode; - use substrate_test_runtime::{Block, Extrinsic, Transfer, H256, AccountId, Hashing}; - use assert_matches::assert_matches; - use wasm_timer::Instant; - use crate::base_pool::Limit; + use std::{ + collections::{HashMap, HashSet}, + time::Instant, + }; + use substrate_test_runtime::{AccountId, Block, Extrinsic, Hashing, Transfer, H256}; const INVALID_NONCE: u64 = 254; const SOURCE: TransactionSource = TransactionSource::External; @@ -497,43 +485,64 @@ mod tests { ) -> Self::ValidationFuture { let hash = self.hash_and_length(&uxt).0; let block_number = self.block_id_to_number(at).unwrap().unwrap(); - let nonce = uxt.transfer().nonce; - - // This is used to control the test flow. - if nonce > 0 { - let opt = self.delay.lock().take(); - if let Some(delay) = opt { - if delay.recv().is_err() { - println!("Error waiting for delay!"); + + let res = match uxt { + Extrinsic::Transfer { transfer, .. } => { + let nonce = transfer.nonce; + + // This is used to control the test flow. + if nonce > 0 { + let opt = self.delay.lock().take(); + if let Some(delay) = opt { + if delay.recv().is_err() { + println!("Error waiting for delay!"); + } + } } - } - } - if self.invalidate.lock().contains(&hash) { - return futures::future::ready(Ok(InvalidTransaction::Custom(0).into())); - } + if self.invalidate.lock().contains(&hash) { + InvalidTransaction::Custom(0).into() + } else if nonce < block_number { + InvalidTransaction::Stale.into() + } else { + let mut transaction = ValidTransaction { + priority: 4, + requires: if nonce > block_number { + vec![vec![nonce as u8 - 1]] + } else { + vec![] + }, + provides: if nonce == INVALID_NONCE { + vec![] + } else { + vec![vec![nonce as u8]] + }, + longevity: 3, + propagate: true, + }; + + if self.clear_requirements.lock().contains(&hash) { + transaction.requires.clear(); + } + + if self.add_requirements.lock().contains(&hash) { + transaction.requires.push(vec![128]); + } + + Ok(transaction) + } + }, + Extrinsic::IncludeData(_) => Ok(ValidTransaction { + priority: 9001, + requires: vec![], + provides: vec![vec![42]], + longevity: 9001, + propagate: false, + }), + _ => unimplemented!(), + }; - futures::future::ready(if nonce < block_number { - Ok(InvalidTransaction::Stale.into()) - } else { - let mut transaction = ValidTransaction { - priority: 4, - requires: if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] }, - provides: if nonce == INVALID_NONCE { vec![] } else { vec![vec![nonce as u8]] }, - longevity: 3, - propagate: true, - }; - - if self.clear_requirements.lock().contains(&hash) { - transaction.requires.clear(); - } - - if self.add_requirements.lock().contains(&hash) { - transaction.requires.push(vec![128]); - } - - Ok(Ok(transaction)) - }) + futures::future::ready(Ok(res)) } /// Returns a block number given the block id. @@ -568,6 +577,13 @@ mod tests { fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { futures::future::ready(Ok(None)) } + + fn block_header( + &self, + _: &BlockId, + ) -> Result::Header>, Self::Error> { + Ok(None) + } } fn uxt(transfer: Transfer) -> Extrinsic { @@ -579,7 +595,7 @@ mod tests { } fn pool() -> Pool { - Pool::new(Default::default(), TestApi::default().into()) + Pool::new(Default::default(), true.into(), TestApi::default().into()) } #[test] @@ -588,12 +604,17 @@ mod tests { let pool = pool(); // when - let hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + let hash = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); // then assert_eq!(pool.validated_pool().ready().map(|v| v.hash).collect::>(), vec![hash]); @@ -620,6 +641,26 @@ mod tests { assert_matches!(res.unwrap_err(), error::Error::TemporarilyBanned); } + #[test] + fn should_reject_unactionable_transactions() { + // given + let pool = Pool::new( + Default::default(), + // the node does not author blocks + false.into(), + TestApi::default().into(), + ); + + // after validation `IncludeData` will be set to non-propagable + let uxt = Extrinsic::IncludeData(vec![42]); + + // when + let res = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt)); + + // then + assert_matches!(res.unwrap_err(), error::Error::Unactionable); + } + #[test] fn should_notify_about_pool_events() { let (stream, hash0, hash1) = { @@ -628,25 +669,40 @@ mod tests { let stream = pool.validated_pool().import_notification_stream(); // when - let hash0 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); + let hash0 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); // future doesn't count - let _hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - }))).unwrap(); + let _hash = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); assert_eq!(pool.validated_pool().status().future, 1); @@ -665,24 +721,39 @@ mod tests { fn should_clear_stale_transactions() { // given let pool = pool(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - let hash2 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); - let hash3 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - }))).unwrap(); + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + let hash2 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); + let hash3 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }), + )) + .unwrap(); // when pool.validated_pool.clear_stale(&BlockId::Number(5)).unwrap(); @@ -701,12 +772,17 @@ mod tests { fn should_ban_mined_transactions() { // given let pool = pool(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); // when block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1.clone()])).unwrap(); @@ -718,31 +794,37 @@ mod tests { #[test] fn should_limit_futures() { // given - let limit = Limit { - count: 100, - total_bytes: 200, - }; - let pool = Pool::new(Options { - ready: limit.clone(), - future: limit.clone(), - ..Default::default() - }, TestApi::default().into()); + let limit = Limit { count: 100, total_bytes: 200 }; - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); + let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; + + let pool = Pool::new(options, true.into(), TestApi::default().into()); + + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().future, 1); // when - let hash2 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(2)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 10, - }))).unwrap(); + let hash2 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(2)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 10, + }), + )) + .unwrap(); // then assert_eq!(pool.validated_pool().status().future, 1); @@ -753,23 +835,24 @@ mod tests { #[test] fn should_error_if_reject_immediately() { // given - let limit = Limit { - count: 100, - total_bytes: 10, - }; - let pool = Pool::new(Options { - ready: limit.clone(), - future: limit.clone(), - ..Default::default() - }, TestApi::default().into()); + let limit = Limit { count: 100, total_bytes: 10 }; + + let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; + + let pool = Pool::new(options, true.into(), TestApi::default().into()); // when - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap_err(); + block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap_err(); // then assert_eq!(pool.validated_pool().status().ready, 0); @@ -782,12 +865,17 @@ mod tests { let pool = pool(); // when - let err = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: INVALID_NONCE, - }))).unwrap_err(); + let err = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: INVALID_NONCE, + }), + )) + .unwrap_err(); // then assert_eq!(pool.validated_pool().status().ready, 0); @@ -802,12 +890,17 @@ mod tests { fn should_trigger_ready_and_finalized() { // given let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + let watcher = block_on(pool.submit_and_watch( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -829,19 +922,27 @@ mod tests { fn should_trigger_ready_and_finalized_when_pruning_via_hash() { // given let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + let watcher = block_on(pool.submit_and_watch( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); // when - block_on( - pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![watcher.hash().clone()]), - ).unwrap(); + block_on(pool.prune_tags( + &BlockId::Number(2), + vec![vec![0u8]], + vec![watcher.hash().clone()], + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -858,22 +959,32 @@ mod tests { fn should_trigger_future_and_ready_after_promoted() { // given let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); + let watcher = block_on(pool.submit_and_watch( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 1); // when - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); // then @@ -892,13 +1003,13 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + let watcher = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when pool.validated_pool.remove_invalid(&[*watcher.hash()]); - // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); @@ -916,7 +1027,8 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + let watcher = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -925,7 +1037,6 @@ mod tests { map.insert(*watcher.hash(), peers.clone()); pool.validated_pool().on_broadcasted(map); - // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); @@ -935,15 +1046,11 @@ mod tests { #[test] fn should_trigger_dropped() { // given - let limit = Limit { - count: 1, - total_bytes: 1000, - }; - let pool = Pool::new(Options { - ready: limit.clone(), - future: limit.clone(), - ..Default::default() - }, TestApi::default().into()); + let limit = Limit { count: 1, total_bytes: 1000 }; + let options = + Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; + + let pool = Pool::new(options, true.into(), TestApi::default().into()); let xt = uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), @@ -977,7 +1084,7 @@ mod tests { let (tx, rx) = std::sync::mpsc::sync_channel(1); let mut api = TestApi::default(); api.delay = Arc::new(Mutex::new(rx.into())); - let pool = Arc::new(Pool::new(Default::default(), api.into())); + let pool = Arc::new(Pool::new(Default::default(), true.into(), api.into())); // when let xt = uxt(Transfer { @@ -1007,14 +1114,14 @@ mod tests { block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); - // Now block import happens before the second transaction is able to finish verification. + // Now block import happens before the second transaction is able to finish + // verification. block_on(pool.prune_tags(&BlockId::Number(1), vec![provides], vec![])).unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); - // so when we release the verification of the previous one it will have - // something in `requires`, but should go to ready directly, since the previous transaction was imported - // correctly. + // something in `requires`, but should go to ready directly, since the previous + // transaction was imported correctly. tx.send(()).unwrap(); // then diff --git a/client/transaction-pool/graph/src/ready.rs b/client/transaction-pool/src/graph/ready.rs similarity index 86% rename from client/transaction-pool/graph/src/ready.rs rename to client/transaction-pool/src/graph/ready.rs index cbdb25078931e..03689aeb32e6d 100644 --- a/client/transaction-pool/graph/src/ready.rs +++ b/client/transaction-pool/src/graph/ready.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,21 +17,18 @@ // along with this program. If not, see . use std::{ - collections::{HashMap, HashSet, BTreeSet}, cmp, + collections::{BTreeSet, HashMap, HashSet}, hash, sync::Arc, }; -use serde::Serialize; use log::trace; -use sp_runtime::traits::Member; -use sp_runtime::transaction_validity::{ - TransactionTag as Tag, -}; -use sp_transaction_pool::error; +use sc_transaction_pool_api::error; +use serde::Serialize; +use sp_runtime::{traits::Member, transaction_validity::TransactionTag as Tag}; -use crate::{ +use super::{ base_pool::Transaction, future::WaitingTransaction, tracked_map::{self, ReadOnlyTrackedMap, TrackedMap}, @@ -50,16 +47,15 @@ pub struct TransactionRef { impl Clone for TransactionRef { fn clone(&self) -> Self { - TransactionRef { - transaction: self.transaction.clone(), - insertion_id: self.insertion_id, - } + Self { transaction: self.transaction.clone(), insertion_id: self.insertion_id } } } impl Ord for TransactionRef { fn cmp(&self, other: &Self) -> cmp::Ordering { - self.transaction.priority.cmp(&other.transaction.priority) + self.transaction + .priority + .cmp(&other.transaction.priority) .then_with(|| other.transaction.valid_till.cmp(&self.transaction.valid_till)) .then_with(|| other.insertion_id.cmp(&self.insertion_id)) } @@ -93,7 +89,7 @@ pub struct ReadyTx { impl Clone for ReadyTx { fn clone(&self) -> Self { - ReadyTx { + Self { transaction: self.transaction.clone(), unlocks: self.unlocks.clone(), requires_offset: self.requires_offset, @@ -108,15 +104,18 @@ Hence every hash retrieved from `provided_tags` is always present in `ready`; qed "#; +/// Validated transactions that are block ready with all their dependencies met. #[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct ReadyTransactions { - /// Insertion id + /// Next free insertion id (used to indicate when a transaction was inserted into the pool). insertion_id: u64, /// tags that are provided by Ready transactions + /// (only a single transaction can provide a specific tag) provided_tags: HashMap, /// Transactions that are ready (i.e. don't have any requirements external to the pool) ready: TrackedMap>, - /// Best transactions that are ready to be included to the block without any other previous transaction. + /// Best transactions that are ready to be included to the block without any other previous + /// transaction. best: BTreeSet>, } @@ -128,7 +127,7 @@ impl tracked_map::Size for ReadyTx { impl Default for ReadyTransactions { fn default() -> Self { - ReadyTransactions { + Self { insertion_id: Default::default(), provided_tags: Default::default(), ready: Default::default(), @@ -147,14 +146,17 @@ impl ReadyTransactions { /// /// Transactions are returned in order: /// 1. First by the dependencies: - /// - never return transaction that requires a tag, which was not provided by one of the previously returned transactions + /// - never return transaction that requires a tag, which was not provided by one of the + /// previously + /// returned transactions /// 2. Then by priority: - /// - If there are two transactions with all requirements satisfied the one with higher priority goes first. + /// - If there are two transactions with all requirements satisfied the one with higher priority + /// goes first. /// 3. Then by the ttl that's left /// - transactions that are valid for a shorter time go first /// 4. Lastly we sort by the time in the queue /// - transactions that are longer in the queue go first - pub fn get(&self) -> impl Iterator>> { + pub fn get(&self) -> impl Iterator>> { BestIterator { all: self.ready.clone(), best: self.best.clone(), @@ -173,9 +175,13 @@ impl ReadyTransactions { ) -> error::Result>>> { assert!( tx.is_ready(), - "Only ready transactions can be imported. Missing: {:?}", tx.missing_tags + "Only ready transactions can be imported. Missing: {:?}", + tx.missing_tags + ); + assert!( + !self.ready.read().contains_key(&tx.transaction.hash), + "Transaction is already imported." ); - assert!(!self.ready.read().contains_key(&tx.transaction.hash), "Transaction is already imported."); self.insertion_id += 1; let insertion_id = self.insertion_id; @@ -198,7 +204,7 @@ impl ReadyTransactions { } else { requires_offset += 1; } - } + } // update provided_tags // call to replace_previous guarantees that we will be overwriting @@ -207,10 +213,7 @@ impl ReadyTransactions { self.provided_tags.insert(tag.clone(), hash.clone()); } - let transaction = TransactionRef { - insertion_id, - transaction - }; + let transaction = TransactionRef { insertion_id, transaction }; // insert to best if it doesn't require any other transaction to be included before it if goes_to_best { @@ -218,29 +221,25 @@ impl ReadyTransactions { } // insert to Ready - ready.insert(hash, ReadyTx { - transaction, - unlocks, - requires_offset, - }); + ready.insert(hash, ReadyTx { transaction, unlocks, requires_offset }); Ok(replaced) } /// Fold a list of ready transactions to compute a single value. - pub fn fold, &ReadyTx) -> Option>(&mut self, f: F) -> Option { - self.ready - .read() - .values() - .fold(None, f) + pub fn fold, &ReadyTx) -> Option>( + &mut self, + f: F, + ) -> Option { + self.ready.read().values().fold(None, f) } - /// Returns true if given hash is part of the queue. + /// Returns true if given transaction is part of the queue. pub fn contains(&self, hash: &Hash) -> bool { self.ready.read().contains_key(hash) } - /// Retrive transaction by hash + /// Retrieve transaction by hash pub fn by_hash(&self, hash: &Hash) -> Option>> { self.by_hashes(&[hash.clone()]).into_iter().next().unwrap_or(None) } @@ -248,18 +247,19 @@ impl ReadyTransactions { /// Retrieve transactions by hash pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { let ready = self.ready.read(); - hashes.iter().map(|hash| { - ready.get(hash).map(|x| x.transaction.transaction.clone()) - }).collect() + hashes + .iter() + .map(|hash| ready.get(hash).map(|x| x.transaction.transaction.clone())) + .collect() } /// Removes a subtree of transactions from the ready pool. /// - /// NOTE removing a transaction will also cause a removal of all transactions that depend on that one - /// (i.e. the entire subgraph that this transaction is a start of will be removed). + /// NOTE removing a transaction will also cause a removal of all transactions that depend on + /// that one (i.e. the entire subgraph that this transaction is a start of will be removed). /// All removed transactions are returned. pub fn remove_subtree(&mut self, hashes: &[Hash]) -> Vec>> { - let to_remove = hashes.iter().cloned().collect::>(); + let to_remove = hashes.to_vec(); self.remove_subtree_with_tag_filter(to_remove, None) } @@ -277,13 +277,12 @@ impl ReadyTransactions { let mut ready = self.ready.write(); while let Some(hash) = to_remove.pop() { if let Some(mut tx) = ready.remove(&hash) { - let invalidated = tx.transaction.transaction.provides - .iter() - .filter(|tag| provides_tag_filter + let invalidated = tx.transaction.transaction.provides.iter().filter(|tag| { + provides_tag_filter .as_ref() .map(|filter| !filter.contains(&**tag)) .unwrap_or(true) - ); + }); let mut removed_some_tags = false; // remove entries from provided_tags @@ -328,7 +327,9 @@ impl ReadyTransactions { let mut to_remove = vec![tag]; while let Some(tag) = to_remove.pop() { - let res = self.provided_tags.remove(&tag) + let res = self + .provided_tags + .remove(&tag) .and_then(|hash| self.ready.write().remove(&hash)); if let Some(tx) = res { @@ -414,19 +415,18 @@ impl ReadyTransactions { fn replace_previous( &mut self, tx: &Transaction, - ) -> error::Result< - (Vec>>, Vec) - > { + ) -> error::Result<(Vec>>, Vec)> { let (to_remove, unlocks) = { // check if we are replacing a transaction - let replace_hashes = tx.provides + let replace_hashes = tx + .provides .iter() .filter_map(|tag| self.provided_tags.get(tag)) .collect::>(); // early exit if we are not replacing anything. if replace_hashes.is_empty() { - return Ok((vec![], vec![])); + return Ok((vec![], vec![])) } // now check if collective priority is lower than the replacement transaction. @@ -435,9 +435,9 @@ impl ReadyTransactions { replace_hashes .iter() .filter_map(|hash| ready.get(hash)) - .fold(0u64, |total, tx| + .fold(0u64, |total, tx| { total.saturating_add(tx.transaction.transaction.priority) - ) + }) }; // bail - the transaction has too low priority to replace the old ones @@ -448,28 +448,22 @@ impl ReadyTransactions { // construct a list of unlocked transactions let unlocks = { let ready = self.ready.read(); - replace_hashes - .iter() - .filter_map(|hash| ready.get(hash)) - .fold(vec![], |mut list, tx| { + replace_hashes.iter().filter_map(|hash| ready.get(hash)).fold( + vec![], + |mut list, tx| { list.extend(tx.unlocks.iter().cloned()); list - }) + }, + ) }; - ( - replace_hashes.into_iter().cloned().collect::>(), - unlocks - ) + (replace_hashes.into_iter().cloned().collect::>(), unlocks) }; let new_provides = tx.provides.iter().cloned().collect::>(); let removed = self.remove_subtree_with_tag_filter(to_remove, Some(new_provides)); - Ok(( - removed, - unlocks - )) + Ok((removed, unlocks)) } /// Returns number of transactions in this queue. @@ -497,7 +491,6 @@ impl BestIterator { if satisfied >= tx_ref.transaction.requires.len() { // If we have satisfied all deps insert to best self.best.insert(tx_ref); - } else { // otherwise we're still awaiting for some deps self.awaiting.insert(tx_ref.transaction.hash.clone(), (satisfied, tx_ref)); @@ -527,12 +520,12 @@ impl Iterator for BestIterator { satisfied += 1; Some((satisfied, tx_ref)) // then get from the pool - } else if let Some(next) = self.all.read().get(hash) { - Some((next.requires_offset + 1, next.transaction.clone())) } else { - None + self.all + .read() + .get(hash) + .map(|next| (next.requires_offset + 1, next.transaction.clone())) }; - if let Some((satisfied, tx_ref)) = res { self.best_or_awaiting(satisfied, tx_ref) } @@ -571,7 +564,7 @@ mod tests { fn import( ready: &mut ReadyTransactions, - tx: Transaction + tx: Transaction, ) -> error::Result>>> { let x = WaitingTransaction::new(tx, ready.provided_tags(), &[]); ready.import(x) @@ -662,7 +655,7 @@ mod tests { bytes: 1, hash: 5, priority: 1, - valid_till: u64::max_value(), // use the max_value() here for testing. + valid_till: u64::MAX, // use the max here for testing. requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, @@ -695,7 +688,7 @@ mod tests { bytes: 1, hash: 5, priority: 1, - valid_till: u64::max_value(), // use the max_value() here for testing. + valid_till: u64::MAX, // use the max here for testing. requires: vec![], provides: vec![], propagate: true, @@ -717,28 +710,19 @@ mod tests { tx }; // higher priority = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(2, 3)), - insertion_id: 2, - }); + assert!( + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } > + TransactionRef { transaction: Arc::new(with_priority(2, 3)), insertion_id: 2 } + ); // lower validity = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 2)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 2, - }); + assert!( + TransactionRef { transaction: Arc::new(with_priority(3, 2)), insertion_id: 1 } > + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } + ); // lower insertion_id = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 2, - }); + assert!( + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } > + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } + ); } } diff --git a/client/transaction-pool/graph/src/rotator.rs b/client/transaction-pool/src/graph/rotator.rs similarity index 88% rename from client/transaction-pool/graph/src/rotator.rs rename to client/transaction-pool/src/graph/rotator.rs index 65e21d0d4b506..910f86b5ed5b8 100644 --- a/client/transaction-pool/graph/src/rotator.rs +++ b/client/transaction-pool/src/graph/rotator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,16 +21,14 @@ //! Keeps only recent extrinsic and discard the ones kept for a significant amount of time. //! Discarded extrinsics are banned so that they don't get re-imported again. +use parking_lot::RwLock; use std::{ collections::HashMap, - hash, - iter, - time::Duration, + hash, iter, + time::{Duration, Instant}, }; -use parking_lot::RwLock; -use wasm_timer::Instant; -use crate::base_pool::Transaction; +use super::base_pool::Transaction; /// Expected size of the banned extrinsics cache. const EXPECTED_SIZE: usize = 2048; @@ -48,10 +46,7 @@ pub struct PoolRotator { impl Default for PoolRotator { fn default() -> Self { - PoolRotator { - ban_time: Duration::from_secs(60 * 30), - banned_until: Default::default(), - } + Self { ban_time: Duration::from_secs(60 * 30), banned_until: Default::default() } } } @@ -62,7 +57,7 @@ impl PoolRotator { } /// Bans given set of hashes. - pub fn ban(&self, now: &Instant, hashes: impl IntoIterator) { + pub fn ban(&self, now: &Instant, hashes: impl IntoIterator) { let mut banned = self.banned_until.write(); for hash in hashes { @@ -78,13 +73,17 @@ impl PoolRotator { } } - /// Bans extrinsic if it's stale. /// /// Returns `true` if extrinsic is stale and got banned. - pub fn ban_if_stale(&self, now: &Instant, current_block: u64, xt: &Transaction) -> bool { + pub fn ban_if_stale( + &self, + now: &Instant, + current_block: u64, + xt: &Transaction, + ) -> bool { if xt.valid_till > current_block { - return false; + return false } self.ban(now, iter::once(xt.hash.clone())); @@ -108,10 +107,7 @@ mod tests { type Ex = (); fn rotator() -> PoolRotator { - PoolRotator { - ban_time: Duration::from_millis(10), - ..Default::default() - } + PoolRotator { ban_time: Duration::from_millis(10), ..Default::default() } } fn tx() -> (Hash, Transaction) { @@ -161,7 +157,6 @@ mod tests { assert!(rotator.is_banned(&hash)); } - #[test] fn should_clear_banned() { // given @@ -202,14 +197,14 @@ mod tests { let past_block = 0; // when - for i in 0..2*EXPECTED_SIZE { + for i in 0..2 * EXPECTED_SIZE { let tx = tx_with(i as u64, past_block); assert!(rotator.ban_if_stale(&now, past_block, &tx)); } - assert_eq!(rotator.banned_until.read().len(), 2*EXPECTED_SIZE); + assert_eq!(rotator.banned_until.read().len(), 2 * EXPECTED_SIZE); // then - let tx = tx_with(2*EXPECTED_SIZE as u64, past_block); + let tx = tx_with(2 * EXPECTED_SIZE as u64, past_block); // trigger a garbage collection assert!(rotator.ban_if_stale(&now, past_block, &tx)); assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE); diff --git a/client/transaction-pool/graph/src/tracked_map.rs b/client/transaction-pool/src/graph/tracked_map.rs similarity index 82% rename from client/transaction-pool/graph/src/tracked_map.rs rename to client/transaction-pool/src/graph/tracked_map.rs index c799eb0b96ea1..c1fdda227c6ae 100644 --- a/client/transaction-pool/graph/src/tracked_map.rs +++ b/client/transaction-pool/src/graph/tracked_map.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,13 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use std::{ collections::HashMap, - sync::{Arc, atomic::{AtomicIsize, Ordering as AtomicOrdering}}, + sync::{ + atomic::{AtomicIsize, Ordering as AtomicOrdering}, + Arc, + }, }; -use parking_lot::{RwLock, RwLockWriteGuard, RwLockReadGuard}; -/// Something that can report it's size. +/// Something that can report its size. pub trait Size { fn size(&self) -> usize; } @@ -39,11 +42,7 @@ pub struct TrackedMap { impl Default for TrackedMap { fn default() -> Self { - Self { - index: Arc::new(HashMap::default().into()), - bytes: 0.into(), - length: 0.into(), - } + Self { index: Arc::new(HashMap::default().into()), bytes: 0.into(), length: 0.into() } } } @@ -64,14 +63,12 @@ impl TrackedMap { } /// Lock map for read. - pub fn read<'a>(&'a self) -> TrackedMapReadAccess<'a, K, V> { - TrackedMapReadAccess { - inner_guard: self.index.read(), - } + pub fn read(&self) -> TrackedMapReadAccess { + TrackedMapReadAccess { inner_guard: self.index.read() } } /// Lock map for write. - pub fn write<'a>(&'a self) -> TrackedMapWriteAccess<'a, K, V> { + pub fn write(&self) -> TrackedMapWriteAccess { TrackedMapWriteAccess { inner_guard: self.index.write(), bytes: &self.bytes, @@ -87,13 +84,11 @@ pub struct ReadOnlyTrackedMap(Arc>>); impl ReadOnlyTrackedMap where - K: Eq + std::hash::Hash + K: Eq + std::hash::Hash, { /// Lock map for read. - pub fn read<'a>(&'a self) -> TrackedMapReadAccess<'a, K, V> { - TrackedMapReadAccess { - inner_guard: self.0.read(), - } + pub fn read(&self) -> TrackedMapReadAccess { + TrackedMapReadAccess { inner_guard: self.0.read() } } } @@ -103,7 +98,7 @@ pub struct TrackedMapReadAccess<'a, K, V> { impl<'a, K, V> TrackedMapReadAccess<'a, K, V> where - K: Eq + std::hash::Hash + K: Eq + std::hash::Hash, { /// Returns true if map contains key. pub fn contains_key(&self, key: &K) -> bool { @@ -129,17 +124,18 @@ pub struct TrackedMapWriteAccess<'a, K, V> { impl<'a, K, V> TrackedMapWriteAccess<'a, K, V> where - K: Eq + std::hash::Hash, V: Size + K: Eq + std::hash::Hash, + V: Size, { /// Insert value and return previous (if any). pub fn insert(&mut self, key: K, val: V) -> Option { let new_bytes = val.size(); self.bytes.fetch_add(new_bytes as isize, AtomicOrdering::Relaxed); self.length.fetch_add(1, AtomicOrdering::Relaxed); - self.inner_guard.insert(key, val).and_then(|old_val| { + self.inner_guard.insert(key, val).map(|old_val| { self.bytes.fetch_sub(old_val.size() as isize, AtomicOrdering::Relaxed); self.length.fetch_sub(1, AtomicOrdering::Relaxed); - Some(old_val) + old_val }) } @@ -165,7 +161,9 @@ mod tests { use super::*; impl Size for i32 { - fn size(&self) -> usize { *self as usize / 10 } + fn size(&self) -> usize { + *self as usize / 10 + } } #[test] @@ -186,4 +184,4 @@ mod tests { assert_eq!(map.bytes(), 1); assert_eq!(map.len(), 1); } -} \ No newline at end of file +} diff --git a/client/transaction-pool/graph/src/validated_pool.rs b/client/transaction-pool/src/graph/validated_pool.rs similarity index 77% rename from client/transaction-pool/graph/src/validated_pool.rs rename to client/transaction-pool/src/graph/validated_pool.rs index 86c2e75832f07..e4aad7f342b5b 100644 --- a/client/transaction-pool/graph/src/validated_pool.rs +++ b/client/transaction-pool/src/graph/validated_pool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,31 +17,31 @@ // along with this program. If not, see . use std::{ - collections::{HashSet, HashMap}, + collections::{HashMap, HashSet}, hash, sync::Arc, }; -use crate::base_pool as base; -use crate::listener::Listener; -use crate::rotator::PoolRotator; -use crate::watcher::Watcher; -use serde::Serialize; - +use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; +use retain_mut::RetainMut; +use sc_transaction_pool_api::{error, PoolStatus}; +use serde::Serialize; use sp_runtime::{ generic::BlockId, traits::{self, SaturatedConversion}, - transaction_validity::{TransactionTag as Tag, ValidTransaction, TransactionSource}, + transaction_validity::{TransactionSource, TransactionTag as Tag, ValidTransaction}, }; -use sp_transaction_pool::{error, PoolStatus}; -use wasm_timer::Instant; -use futures::channel::mpsc::{channel, Sender}; -use retain_mut::RetainMut; - -use crate::base_pool::PruneStatus; -use crate::pool::{ - EventStream, Options, ChainApi, BlockHash, ExtrinsicHash, ExtrinsicFor, TransactionFor, +use std::time::Instant; + +use super::{ + base_pool::{self as base, PruneStatus}, + listener::Listener, + pool::{ + BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, Options, TransactionFor, + }, + rotator::PoolRotator, + watcher::Watcher, }; /// Pre-validated transaction. Validated pool only accepts transactions wrapped in this enum. @@ -76,34 +76,41 @@ impl ValidatedTransaction { requires: validity.requires, provides: validity.provides, propagate: validity.propagate, - valid_till: at - .saturated_into::() - .saturating_add(validity.longevity), + valid_till: at.saturated_into::().saturating_add(validity.longevity), }) } } /// A type of validated transaction stored in the pool. -pub type ValidatedTransactionFor = ValidatedTransaction< - ExtrinsicHash, - ExtrinsicFor, - ::Error, ->; +pub type ValidatedTransactionFor = + ValidatedTransaction, ExtrinsicFor, ::Error>; + +/// A closure that returns true if the local node is a validator that can author blocks. +pub struct IsValidator(Box bool + Send + Sync>); + +impl From for IsValidator { + fn from(is_validator: bool) -> Self { + Self(Box::new(move || is_validator)) + } +} + +impl From bool + Send + Sync>> for IsValidator { + fn from(is_validator: Box bool + Send + Sync>) -> Self { + Self(is_validator) + } +} /// Pool that deals with validated transactions. pub struct ValidatedPool { api: Arc, + is_validator: IsValidator, options: Options, listener: RwLock, B>>, - pool: RwLock, - ExtrinsicFor, - >>, + pool: RwLock, ExtrinsicFor>>, import_notification_sinks: Mutex>>>, rotator: PoolRotator>, } -#[cfg(not(target_os = "unknown"))] impl parity_util_mem::MallocSizeOf for ValidatedPool where ExtrinsicFor: parity_util_mem::MallocSizeOf, @@ -116,9 +123,10 @@ where impl ValidatedPool { /// Create a new transaction pool. - pub fn new(options: Options, api: Arc) -> Self { + pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { let base_pool = base::BasePool::new(options.reject_future_transactions); - ValidatedPool { + Self { + is_validator, options, listener: Default::default(), api, @@ -129,7 +137,7 @@ impl ValidatedPool { } /// Bans given set of hashes. - pub fn ban(&self, now: &Instant, hashes: impl IntoIterator>) { + pub fn ban(&self, now: &Instant, hashes: impl IntoIterator>) { self.rotator.ban(now, hashes) } @@ -140,7 +148,7 @@ impl ValidatedPool { /// A fast check before doing any further processing of a transaction, like validation. /// - /// If `ingore_banned` is `true`, it will not check if the transaction is banned. + /// If `ignore_banned` is `true`, it will not check if the transaction is banned. /// /// It checks if the transaction is already imported or banned. If so, it returns an error. pub fn check_is_known( @@ -151,7 +159,7 @@ impl ValidatedPool { if !ignore_banned && self.is_banned(tx_hash) { Err(error::Error::TemporarilyBanned.into()) } else if self.pool.read().is_imported(tx_hash) { - Err(error::Error::AlreadyImported(Box::new(tx_hash.clone())).into()) + Err(error::Error::AlreadyImported(Box::new(*tx_hash)).into()) } else { Ok(()) } @@ -160,9 +168,10 @@ impl ValidatedPool { /// Imports a bunch of pre-validated transactions to the pool. pub fn submit( &self, - txs: impl IntoIterator>, + txs: impl IntoIterator>, ) -> Vec, B::Error>> { - let results = txs.into_iter() + let results = txs + .into_iter() .map(|validated_tx| self.submit_one(validated_tx)) .collect::>(); @@ -173,46 +182,56 @@ impl ValidatedPool { Default::default() }; - results.into_iter().map(|res| match res { - Ok(ref hash) if removed.contains(hash) => Err(error::Error::ImmediatelyDropped.into()), - other => other, - }).collect() + results + .into_iter() + .map(|res| match res { + Ok(ref hash) if removed.contains(hash) => + Err(error::Error::ImmediatelyDropped.into()), + other => other, + }) + .collect() } /// Submit single pre-validated transaction to the pool. fn submit_one(&self, tx: ValidatedTransactionFor) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { + if !tx.propagate && !(self.is_validator.0)() { + return Err(error::Error::Unactionable.into()) + } + let imported = self.pool.write().import(tx)?; if let base::Imported::Ready { ref hash, .. } = imported { - self.import_notification_sinks.lock() - .retain_mut(|sink| { - match sink.try_send(hash.clone()) { - Ok(()) => true, - Err(e) => { - if e.is_full() { - log::warn!(target: "txpool", "[{:?}] Trying to notify an import but the channel is full", hash); - true - } else { - false - } + self.import_notification_sinks.lock().retain_mut(|sink| { + match sink.try_send(*hash) { + Ok(()) => true, + Err(e) => + if e.is_full() { + log::warn!( + target: "txpool", + "[{:?}] Trying to notify an import but the channel is full", + hash, + ); + true + } else { + false }, - } - }); + } + }); } let mut listener = self.listener.write(); fire_events(&mut *listener, &imported); - Ok(imported.hash().clone()) + Ok(*imported.hash()) }, ValidatedTransaction::Invalid(hash, err) => { self.rotator.ban(&Instant::now(), std::iter::once(hash)); - Err(err.into()) + Err(err) }, ValidatedTransaction::Unknown(hash, err) => { - self.listener.write().invalid(&hash, false); - Err(err.into()) + self.listener.write().invalid(&hash); + Err(err) }, } } @@ -223,8 +242,8 @@ impl ValidatedPool { let future_limit = &self.options.future; log::debug!(target: "txpool", "Pool Status: {:?}", status); - if ready_limit.is_exceeded(status.ready, status.ready_bytes) - || future_limit.is_exceeded(status.future, status.future_bytes) + if ready_limit.is_exceeded(status.ready, status.ready_bytes) || + future_limit.is_exceeded(status.future, status.future_bytes) { log::debug!( target: "txpool", @@ -236,10 +255,13 @@ impl ValidatedPool { // clean up the pool let removed = { let mut pool = self.pool.write(); - let removed = pool.enforce_limits(ready_limit, future_limit) - .into_iter().map(|x| x.hash.clone()).collect::>(); + let removed = pool + .enforce_limits(ready_limit, future_limit) + .into_iter() + .map(|x| x.hash) + .collect::>(); // ban all removed transactions - self.rotator.ban(&Instant::now(), removed.iter().map(|x| x.clone())); + self.rotator.ban(&Instant::now(), removed.iter().copied()); removed }; if !removed.is_empty() { @@ -274,9 +296,9 @@ impl ValidatedPool { }, ValidatedTransaction::Invalid(hash, err) => { self.rotator.ban(&Instant::now(), std::iter::once(hash)); - Err(err.into()) + Err(err) }, - ValidatedTransaction::Unknown(_, err) => Err(err.into()), + ValidatedTransaction::Unknown(_, err) => Err(err), } } @@ -284,9 +306,17 @@ impl ValidatedPool { /// /// Removes and then submits passed transactions and all dependent transactions. /// Transactions that are missing from the pool are not submitted. - pub fn resubmit(&self, mut updated_transactions: HashMap, ValidatedTransactionFor>) { + pub fn resubmit( + &self, + mut updated_transactions: HashMap, ValidatedTransactionFor>, + ) { #[derive(Debug, Clone, Copy, PartialEq)] - enum Status { Future, Ready, Failed, Dropped }; + enum Status { + Future, + Ready, + Failed, + Dropped, + } let (mut initial_statuses, final_statuses) = { let mut pool = self.pool.write(); @@ -301,14 +331,18 @@ impl ValidatedPool { let mut initial_statuses = HashMap::new(); let mut txs_to_resubmit = Vec::with_capacity(updated_transactions.len()); while !updated_transactions.is_empty() { - let hash = updated_transactions.keys().next().cloned().expect("transactions is not empty; qed"); + let hash = updated_transactions + .keys() + .next() + .cloned() + .expect("transactions is not empty; qed"); // note we are not considering tx with hash invalid here - we just want // to remove it along with dependent transactions and `remove_subtree()` // does exactly what we need - let removed = pool.remove_subtree(&[hash.clone()]); + let removed = pool.remove_subtree(&[hash]); for removed_tx in removed { - let removed_hash = removed_tx.hash.clone(); + let removed_hash = removed_tx.hash; let updated_transaction = updated_transactions.remove(&removed_hash); let tx_to_resubmit = if let Some(updated_tx) = updated_transaction { updated_tx @@ -322,7 +356,7 @@ impl ValidatedPool { ValidatedTransaction::Valid(transaction) }; - initial_statuses.insert(removed_hash.clone(), Status::Ready); + initial_statuses.insert(removed_hash, Status::Ready); txs_to_resubmit.push((removed_hash, tx_to_resubmit)); } // make sure to remove the hash even if it's not present in the pool any more. @@ -349,7 +383,7 @@ impl ValidatedPool { final_statuses.insert(hash, Status::Failed); } for tx in removed { - final_statuses.insert(tx.hash.clone(), Status::Dropped); + final_statuses.insert(tx.hash, Status::Dropped); } }, base::Imported::Future { .. } => { @@ -358,8 +392,9 @@ impl ValidatedPool { }, Err(err) => { // we do not want to fail if single transaction import has failed - // nor we do want to propagate this error, because it could tx unknown to caller - // => let's just notify listeners (and issue debug message) + // nor we do want to propagate this error, because it could tx + // unknown to caller => let's just notify listeners (and issue debug + // message) log::warn!( target: "txpool", "[{:?}] Removing invalid transaction from update: {}", @@ -369,7 +404,8 @@ impl ValidatedPool { final_statuses.insert(hash, Status::Failed); }, }, - ValidatedTransaction::Invalid(_, _) | ValidatedTransaction::Unknown(_, _) => { + ValidatedTransaction::Invalid(_, _) | + ValidatedTransaction::Unknown(_, _) => { final_statuses.insert(hash, Status::Failed); }, } @@ -379,7 +415,7 @@ impl ValidatedPool { // queue, updating final statuses as required if reject_future_transactions { for future_tx in pool.clear_future() { - final_statuses.insert(future_tx.hash.clone(), Status::Dropped); + final_statuses.insert(future_tx.hash, Status::Dropped); } } @@ -396,7 +432,7 @@ impl ValidatedPool { Status::Future => listener.future(&hash), Status::Ready => listener.ready(&hash, None), Status::Dropped => listener.dropped(&hash, None), - Status::Failed => listener.invalid(&hash, initial_status.is_some()), + Status::Failed => listener.invalid(&hash), } } } @@ -404,10 +440,13 @@ impl ValidatedPool { /// For each extrinsic, returns tags that it provides (if known), or None (if it is unknown). pub fn extrinsics_tags(&self, hashes: &[ExtrinsicHash]) -> Vec>> { - self.pool.read().by_hashes(&hashes) + self.pool + .read() + .by_hashes(&hashes) .into_iter() - .map(|existing_in_pool| existing_in_pool - .map(|transaction| transaction.provides.iter().cloned().collect())) + .map(|existing_in_pool| { + existing_in_pool.map(|transaction| transaction.provides.to_vec()) + }) .collect() } @@ -419,7 +458,7 @@ impl ValidatedPool { /// Prunes ready transactions that provide given list of tags. pub fn prune_tags( &self, - tags: impl IntoIterator, + tags: impl IntoIterator, ) -> Result, ExtrinsicFor>, B::Error> { // Perform tag-based pruning in the base pool let status = self.pool.write().prune_tags(tags); @@ -442,7 +481,7 @@ impl ValidatedPool { pub fn resubmit_pruned( &self, at: &BlockId, - known_imported_hashes: impl IntoIterator> + Clone, + known_imported_hashes: impl IntoIterator> + Clone, pruned_hashes: Vec>, pruned_xts: Vec>, ) -> Result<(), B::Error> { @@ -451,14 +490,14 @@ impl ValidatedPool { // Resubmit pruned transactions let results = self.submit(pruned_xts); - // Collect the hashes of transactions that now became invalid (meaning that they are successfully pruned). - let hashes = results - .into_iter() - .enumerate() - .filter_map(|(idx, r)| match r.map_err(error::IntoPoolError::into_pool_error) { - Err(Ok(error::Error::InvalidTransaction(_))) => Some(pruned_hashes[idx].clone()), + // Collect the hashes of transactions that now became invalid (meaning that they are + // successfully pruned). + let hashes = results.into_iter().enumerate().filter_map(|(idx, r)| { + match r.map_err(error::IntoPoolError::into_pool_error) { + Err(Ok(error::Error::InvalidTransaction(_))) => Some(pruned_hashes[idx]), _ => None, - }); + } + }); // Fire `pruned` notifications for collected hashes and make sure to include // `known_imported_hashes` since they were just imported as part of the block. let hashes = hashes.chain(known_imported_hashes.into_iter()); @@ -474,10 +513,12 @@ impl ValidatedPool { pub fn fire_pruned( &self, at: &BlockId, - hashes: impl Iterator>, + hashes: impl Iterator>, ) -> Result<(), B::Error> { - let header_hash = self.api.block_id_to_hash(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())?; + let header_hash = self + .api + .block_id_to_hash(at)? + .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)))?; let mut listener = self.listener.write(); let mut set = HashSet::with_capacity(hashes.size_hint().0); for h in hashes { @@ -497,14 +538,16 @@ impl ValidatedPool { /// Note this function does not remove transactions that are already included in the chain. /// See `prune_tags` if you want this. pub fn clear_stale(&self, at: &BlockId) -> Result<(), B::Error> { - let block_number = self.api.block_id_to_number(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())? + let block_number = self + .api + .block_id_to_number(at)? + .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)))? .saturated_into::(); let now = Instant::now(); let to_remove = { self.ready() .filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx)) - .map(|tx| tx.hash.clone()) + .map(|tx| tx.hash) .collect::>() }; let futures_to_remove: Vec> = { @@ -512,7 +555,7 @@ impl ValidatedPool { let mut hashes = Vec::new(); for tx in p.futures() { if self.rotator.ban_if_stale(&now, block_number, &tx) { - hashes.push(tx.hash.clone()); + hashes.push(tx.hash); } } hashes @@ -527,7 +570,7 @@ impl ValidatedPool { } /// Get rotator reference. - #[cfg(test)] + #[cfg(feature = "test-helpers")] pub fn rotator(&self) -> &PoolRotator> { &self.rotator } @@ -566,7 +609,7 @@ impl ValidatedPool { pub fn remove_invalid(&self, hashes: &[ExtrinsicHash]) -> Vec> { // early exit in case there is no invalid transactions. if hashes.is_empty() { - return vec![]; + return vec![] } log::debug!(target: "txpool", "Removing invalid transactions: {:?}", hashes); @@ -580,17 +623,26 @@ impl ValidatedPool { let mut listener = self.listener.write(); for tx in &invalid { - listener.invalid(&tx.hash, true); + listener.invalid(&tx.hash); } invalid } /// Get an iterator for ready transactions ordered by priority - pub fn ready(&self) -> impl Iterator> + Send { + pub fn ready(&self) -> impl Iterator> + Send { self.pool.read().ready() } + /// Returns a Vec of hashes and extrinsics in the future pool. + pub fn futures(&self) -> Vec<(ExtrinsicHash, ExtrinsicFor)> { + self.pool + .read() + .futures() + .map(|tx| (tx.hash.clone(), tx.data.clone())) + .collect() + } + /// Returns pool status. pub fn status(&self) -> PoolStatus { self.pool.read().status() @@ -609,28 +661,18 @@ impl ValidatedPool { } } -fn fire_events( - listener: &mut Listener, - imported: &base::Imported, -) where +fn fire_events(listener: &mut Listener, imported: &base::Imported) +where H: hash::Hash + Eq + traits::Member + Serialize, B: ChainApi, { match *imported { base::Imported::Ready { ref promoted, ref failed, ref removed, ref hash } => { listener.ready(hash, None); - for f in failed { - listener.invalid(f, true); - } - for r in removed { - listener.dropped(&r.hash, Some(hash)); - } - for p in promoted { - listener.ready(p, None); - } - }, - base::Imported::Future { ref hash } => { - listener.future(hash) + failed.into_iter().for_each(|f| listener.invalid(f)); + removed.into_iter().for_each(|r| listener.dropped(&r.hash, Some(hash))); + promoted.into_iter().for_each(|p| listener.ready(p, None)); }, + base::Imported::Future { ref hash } => listener.future(hash), } } diff --git a/client/transaction-pool/graph/src/watcher.rs b/client/transaction-pool/src/graph/watcher.rs similarity index 88% rename from client/transaction-pool/graph/src/watcher.rs rename to client/transaction-pool/src/graph/watcher.rs index 9d9a91bb23f69..975ee6608886b 100644 --- a/client/transaction-pool/graph/src/watcher.rs +++ b/client/transaction-pool/src/graph/watcher.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,15 +19,16 @@ //! Extrinsics status updates. use futures::Stream; -use sp_transaction_pool::TransactionStatus; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use sc_transaction_pool_api::TransactionStatus; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; /// Extrinsic watcher. /// -/// Represents a stream of status updates for particular extrinsic. +/// Represents a stream of status updates for a particular extrinsic. #[derive(Debug)] pub struct Watcher { receiver: TracingUnboundedReceiver>, + /// transaction hash of watched extrinsic hash: H, } @@ -40,7 +41,7 @@ impl Watcher { /// Pipe the notifications to given sink. /// /// Make sure to drive the future to completion. - pub fn into_stream(self) -> impl Stream> { + pub fn into_stream(self) -> impl Stream> { self.receiver } } @@ -54,10 +55,7 @@ pub struct Sender { impl Default for Sender { fn default() -> Self { - Sender { - receivers: Default::default(), - is_finalized: false, - } + Sender { receivers: Default::default(), is_finalized: false } } } @@ -66,10 +64,7 @@ impl Sender { pub fn new_watcher(&mut self, hash: H) -> Watcher { let (tx, receiver) = tracing_unbounded("mpsc_txpool_watcher"); self.receivers.push(tx); - Watcher { - receiver, - hash, - } + Watcher { receiver, hash } } /// Transaction became ready. diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 0b6a1e935b9d0..6eb5bd2f332ec 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,64 +18,77 @@ //! Substrate transaction pool implementation. -#![recursion_limit="256"] +#![recursion_limit = "256"] #![warn(missing_docs)] #![warn(unused_extern_crates)] mod api; -mod revalidation; +mod graph; mod metrics; +mod revalidation; pub mod error; -#[cfg(test)] -pub mod testing; +/// Common types for testing the transaction pool +#[cfg(feature = "test-helpers")] +pub mod test_helpers { + pub use super::{ + graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, Pool}, + revalidation::RevalidationQueue, + }; +} -pub use sc_transaction_graph as txpool; pub use crate::api::{FullChainApi, LightChainApi}; - -use std::{collections::{HashMap, HashSet}, sync::Arc, pin::Pin, convert::TryInto}; -use futures::{prelude::*, future::{self, ready}, channel::oneshot}; +use futures::{ + channel::oneshot, + future::{self, ready}, + prelude::*, +}; +pub use graph::{ChainApi, Options, Pool, Transaction}; use parking_lot::Mutex; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, + pin::Pin, + sync::Arc, +}; +use graph::{ExtrinsicHash, IsValidator}; +use sc_transaction_pool_api::{ + ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolFuture, PoolStatus, + TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, +}; +use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, NumberFor, AtLeast32Bit, Extrinsic, Zero}, + traits::{AtLeast32Bit, Block as BlockT, Extrinsic, Header as HeaderT, NumberFor, Zero}, }; -use sp_core::traits::SpawnNamed; -use sp_transaction_pool::{ - TransactionPool, PoolStatus, ImportNotificationStream, TxHash, TransactionFor, - TransactionStatusStreamFor, MaintainedTransactionPool, PoolFuture, ChainEvent, - TransactionSource, -}; -use sc_transaction_graph::{ChainApi, ExtrinsicHash}; -use wasm_timer::Instant; +use std::time::Instant; -use prometheus_endpoint::Registry as PrometheusRegistry; use crate::metrics::MetricsLink as PrometheusMetrics; +use prometheus_endpoint::Registry as PrometheusRegistry; -type BoxedReadyIterator = Box< - dyn Iterator>> + Send ->; +type BoxedReadyIterator = + Box>> + Send>; -type ReadyIteratorFor = BoxedReadyIterator< - sc_transaction_graph::ExtrinsicHash, sc_transaction_graph::ExtrinsicFor ->; +type ReadyIteratorFor = + BoxedReadyIterator, graph::ExtrinsicFor>; -type PolledIterator = Pin> + Send>>; +type PolledIterator = Pin> + Send>>; /// A transaction pool for a full node. pub type FullPool = BasicPool, Block>; /// A transaction pool for a light node. -pub type LightPool = BasicPool, Block>; +pub type LightPool = + BasicPool, Block>; /// Basic implementation of transaction pool that can be customized by providing PoolApi. pub struct BasicPool - where - Block: BlockT, - PoolApi: ChainApi, +where + Block: BlockT, + PoolApi: graph::ChainApi, { - pool: Arc>, + pool: Arc>, api: Arc, revalidation_strategy: Arc>>>, revalidation_queue: Arc>, @@ -90,14 +103,15 @@ struct ReadyPoll { impl Default for ReadyPoll { fn default() -> Self { - Self { - updated_at: NumberFor::::zero(), - pollers: Default::default(), - } + Self { updated_at: NumberFor::::zero(), pollers: Default::default() } } } impl ReadyPoll { + fn new(best_block_number: NumberFor) -> Self { + Self { updated_at: best_block_number, pollers: Default::default() } + } + fn trigger(&mut self, number: NumberFor, iterator_factory: impl Fn() -> T) { self.updated_at = number; @@ -124,10 +138,9 @@ impl ReadyPoll { } } -#[cfg(not(target_os = "unknown"))] impl parity_util_mem::MallocSizeOf for BasicPool where - PoolApi: ChainApi, + PoolApi: graph::ChainApi, Block: BlockT, { fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { @@ -154,20 +167,20 @@ pub enum RevalidationType { } impl BasicPool - where - Block: BlockT, - PoolApi: ChainApi + 'static, +where + Block: BlockT, + PoolApi: graph::ChainApi + 'static, { /// Create new basic transaction pool with provided api, for tests. - #[cfg(test)] + #[cfg(feature = "test-helpers")] pub fn new_test( pool_api: Arc, - ) -> (Self, Pin + Send>>, intervalier::BackSignalControl) { - let pool = Arc::new(sc_transaction_graph::Pool::new(Default::default(), pool_api.clone())); + ) -> (Self, Pin + Send>>, intervalier::BackSignalControl) { + let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); let (revalidation_queue, background_task, notifier) = revalidation::RevalidationQueue::new_test(pool_api.clone(), pool.clone()); ( - BasicPool { + Self { api: pool_api, pool, revalidation_queue: Arc::new(revalidation_queue), @@ -183,56 +196,63 @@ impl BasicPool /// Create new basic transaction pool with provided api and custom /// revalidation type. pub fn with_revalidation_type( - options: sc_transaction_graph::Options, + options: graph::Options, + is_validator: IsValidator, pool_api: Arc, prometheus: Option<&PrometheusRegistry>, revalidation_type: RevalidationType, - spawner: impl SpawnNamed, + spawner: impl SpawnEssentialNamed, + best_block_number: NumberFor, ) -> Self { - let pool = Arc::new(sc_transaction_graph::Pool::new(options, pool_api.clone())); + let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { - RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), + RevalidationType::Light => + (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), RevalidationType::Full => { - let (queue, background) = revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); + let (queue, background) = + revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); (queue, Some(background)) }, }; if let Some(background_task) = background_task { - spawner.spawn("txpool-background", background_task); + spawner.spawn_essential("txpool-background", background_task); } - BasicPool { + Self { api: pool_api, pool, revalidation_queue: Arc::new(revalidation_queue), - revalidation_strategy: Arc::new(Mutex::new( - match revalidation_type { - RevalidationType::Light => RevalidationStrategy::Light(RevalidationStatus::NotScheduled), - RevalidationType::Full => RevalidationStrategy::Always, - } - )), - ready_poll: Default::default(), + revalidation_strategy: Arc::new(Mutex::new(match revalidation_type { + RevalidationType::Light => + RevalidationStrategy::Light(RevalidationStatus::NotScheduled), + RevalidationType::Full => RevalidationStrategy::Always, + })), + ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))), metrics: PrometheusMetrics::new(prometheus), } } /// Gets shared reference to the underlying pool. - pub fn pool(&self) -> &Arc> { + pub fn pool(&self) -> &Arc> { &self.pool } + + /// Get access to the underlying api + #[cfg(feature = "test-helpers")] + pub fn api(&self) -> &PoolApi { + &self.api + } } impl TransactionPool for BasicPool - where - Block: BlockT, - PoolApi: 'static + ChainApi, +where + Block: BlockT, + PoolApi: 'static + graph::ChainApi, { type Block = PoolApi::Block; - type Hash = sc_transaction_graph::ExtrinsicHash; - type InPoolTransaction = sc_transaction_graph::base_pool::Transaction< - TxHash, TransactionFor - >; + type Hash = graph::ExtrinsicHash; + type InPoolTransaction = graph::base_pool::Transaction, TransactionFor>; type Error = PoolApi::Error; fn submit_at( @@ -244,7 +264,8 @@ impl TransactionPool for BasicPool let pool = self.pool.clone(); let at = *at; - self.metrics.report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); + self.metrics + .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); async move { pool.submit_at(&at, source, xts).await }.boxed() } @@ -268,22 +289,24 @@ impl TransactionPool for BasicPool at: &BlockId, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture>, Self::Error> { + ) -> PoolFuture>>, Self::Error> { let at = *at; let pool = self.pool.clone(); self.metrics.report(|metrics| metrics.submitted_transactions.inc()); async move { - pool.submit_and_watch(&at, source, xt) - .map(|result| result.map(|watcher| Box::new(watcher.into_stream()) as _)) - .await - }.boxed() + let watcher = pool.submit_and_watch(&at, source, xt).await?; + + Ok(watcher.into_stream().boxed()) + } + .boxed() } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { let removed = self.pool.validated_pool().remove_invalid(hashes); - self.metrics.report(|metrics| metrics.validations_invalid.inc_by(removed.len() as u64)); + self.metrics + .report(|metrics| metrics.validations_invalid.inc_by(removed.len() as u64)); removed } @@ -308,21 +331,31 @@ impl TransactionPool for BasicPool } fn ready_at(&self, at: NumberFor) -> PolledIterator { + let status = self.status(); + // If there are no transactions in the pool, it is fine to return early. + // + // There could be transaction being added because of some re-org happening at the relevant + // block, but this is relative unlikely. + if status.ready == 0 && status.future == 0 { + return async { Box::new(std::iter::empty()) as Box<_> }.boxed() + } + if self.ready_poll.lock().updated_at() >= at { log::trace!(target: "txpool", "Transaction pool already processed block #{}", at); let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); - return Box::pin(futures::future::ready(iterator)); + return async move { iterator }.boxed() } - Box::pin( - self.ready_poll - .lock() - .add(at) - .map(|received| received.unwrap_or_else(|e| { + self.ready_poll + .lock() + .add(at) + .map(|received| { + received.unwrap_or_else(|e| { log::warn!("Error receiving pending set: {:?}", e); - Box::new(vec![].into_iter()) - })) - ) + Box::new(std::iter::empty()) + }) + }) + .boxed() } fn ready(&self) -> ReadyIteratorFor { @@ -333,20 +366,26 @@ impl TransactionPool for BasicPool impl LightPool where Block: BlockT, - Client: sp_blockchain::HeaderBackend + 'static, + Client: sp_blockchain::HeaderBackend + sc_client_api::UsageProvider + 'static, Fetcher: sc_client_api::Fetcher + 'static, { /// Create new basic transaction pool for a light node with the provided api. pub fn new_light( - options: sc_transaction_graph::Options, + options: graph::Options, prometheus: Option<&PrometheusRegistry>, - spawner: impl SpawnNamed, + spawner: impl SpawnEssentialNamed, client: Arc, fetcher: Arc, ) -> Self { - let pool_api = Arc::new(LightChainApi::new(client, fetcher)); + let pool_api = Arc::new(LightChainApi::new(client.clone(), fetcher)); Self::with_revalidation_type( - options, pool_api, prometheus, RevalidationType::Light, spawner, + options, + false.into(), + pool_api, + prometheus, + RevalidationType::Light, + spawner, + client.usage_info().chain.best_number, ) } } @@ -356,21 +395,32 @@ where Block: BlockT, Client: sp_api::ProvideRuntimeApi + sc_client_api::BlockBackend - + sp_runtime::traits::BlockIdTo, - Client: sc_client_api::ExecutorProvider + Send + Sync + 'static, + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sc_client_api::ExecutorProvider + + sc_client_api::UsageProvider + + Send + + Sync + + 'static, Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { /// Create new basic transaction pool for a full node with the provided api. pub fn new_full( - options: sc_transaction_graph::Options, + options: graph::Options, + is_validator: IsValidator, prometheus: Option<&PrometheusRegistry>, - spawner: impl SpawnNamed, + spawner: impl SpawnEssentialNamed, client: Arc, ) -> Arc { - let pool_api = Arc::new(FullChainApi::new(client.clone(), prometheus)); + let pool_api = Arc::new(FullChainApi::new(client.clone(), prometheus, &spawner)); let pool = Arc::new(Self::with_revalidation_type( - options, pool_api, prometheus, RevalidationType::Full, spawner + options, + is_validator, + pool_api, + prometheus, + RevalidationType::Full, + spawner, + client.usage_info().chain.best_number, )); // make transaction pool available for off-chain runtime calls. @@ -380,29 +430,30 @@ where } } -impl sp_transaction_pool::LocalTransactionPool +impl sc_transaction_pool_api::LocalTransactionPool for BasicPool, Block> where Block: BlockT, Client: sp_api::ProvideRuntimeApi + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + sp_runtime::traits::BlockIdTo, Client: Send + Sync + 'static, Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { type Block = Block; - type Hash = sc_transaction_graph::ExtrinsicHash>; - type Error = as ChainApi>::Error; + type Hash = graph::ExtrinsicHash>; + type Error = as graph::ChainApi>::Error; fn submit_local( &self, at: &BlockId, - xt: sp_transaction_pool::LocalTransactionFor, + xt: sc_transaction_pool_api::LocalTransactionFor, ) -> Result { - use sc_transaction_graph::ValidatedTransaction; - use sp_runtime::traits::SaturatedConversion; - use sp_runtime::transaction_validity::TransactionValidityError; + use graph::ValidatedTransaction; + use sp_runtime::{ + traits::SaturatedConversion, transaction_validity::TransactionValidityError, + }; let validity = self .api @@ -475,10 +526,7 @@ impl RevalidationStrategy { ), resubmit: false, }, - Self::Always => RevalidationAction { - revalidate: true, - resubmit: true, - } + Self::Always => RevalidationAction { revalidate: true, resubmit: true }, } } } @@ -503,39 +551,54 @@ impl RevalidationStatus { revalidate_block_period.map(|period| block + period), ); false - } + }, Self::Scheduled(revalidate_at_time, revalidate_at_block) => { - let is_required = revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) - || revalidate_at_block.map(|at| block >= at).unwrap_or(false); + let is_required = + revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) || + revalidate_at_block.map(|at| block >= at).unwrap_or(false); if is_required { *self = Self::InProgress; } is_required - } + }, Self::InProgress => false, } } } /// Prune the known txs for the given block. -async fn prune_known_txs_for_block>( +async fn prune_known_txs_for_block>( block_id: BlockId, api: &Api, - pool: &sc_transaction_graph::Pool, + pool: &graph::Pool, ) -> Vec> { - let hashes = api.block_body(&block_id).await + let extrinsics = api + .block_body(&block_id) + .await .unwrap_or_else(|e| { log::warn!("Prune known transactions: error request {:?}!", e); None }) - .unwrap_or_default() - .into_iter() - .map(|tx| pool.hash_of(&tx)) - .collect::>(); + .unwrap_or_default(); + + let hashes = extrinsics.iter().map(|tx| pool.hash_of(&tx)).collect::>(); log::trace!(target: "txpool", "Pruning transactions: {:?}", hashes); - if let Err(e) = pool.prune_known(&block_id, &hashes) { + let header = match api.block_header(&block_id) { + Ok(Some(h)) => h, + Ok(None) => { + log::debug!(target: "txpool", "Could not find header for {:?}.", block_id); + return hashes + }, + Err(e) => { + log::debug!(target: "txpool", "Error retrieving header for {:?}: {:?}", block_id, e); + return hashes + }, + }; + + if let Err(e) = pool.prune(&block_id, &BlockId::hash(*header.parent_hash()), &extrinsics).await + { log::error!("Cannot prune known in the pool {:?}!", e); } @@ -543,11 +606,11 @@ async fn prune_known_txs_for_block>( } impl MaintainedTransactionPool for BasicPool - where - Block: BlockT, - PoolApi: 'static + ChainApi, +where + Block: BlockT, + PoolApi: 'static + graph::ChainApi, { - fn maintain(&self, event: ChainEvent) -> Pin + Send>> { + fn maintain(&self, event: ChainEvent) -> Pin + Send>> { match event { ChainEvent::NewBestBlock { hash, tree_route } => { let pool = self.pool.clone(); @@ -562,14 +625,14 @@ impl MaintainedTransactionPool for BasicPool "Skipping chain event - no number for that block {:?}", id, ); - return Box::pin(ready(())); - } + return Box::pin(ready(())) + }, }; let next_action = self.revalidation_strategy.lock().next( block_number, Some(std::time::Duration::from_secs(60)), - Some(20.into()), + Some(20u32.into()), ); let revalidation_strategy = self.revalidation_strategy.clone(); let revalidation_queue = self.revalidation_queue.clone(); @@ -578,7 +641,7 @@ impl MaintainedTransactionPool for BasicPool async move { // We keep track of everything we prune so that later we won't add - // tranactions with those hashes from the retracted blocks. + // transactions with those hashes from the retracted blocks. let mut pruned_log = HashSet::>::new(); // If there is a tree route, we use this to prune known tx based on the enacted @@ -592,27 +655,21 @@ impl MaintainedTransactionPool for BasicPool pool.validated_pool().on_block_retracted(retracted.hash.clone()); } - future::join_all( - tree_route - .enacted() - .iter() - .map(|h| - prune_known_txs_for_block( - BlockId::Hash(h.hash.clone()), - &*api, - &*pool, - ), - ), - ).await.into_iter().for_each(|enacted_log|{ + future::join_all(tree_route.enacted().iter().map(|h| { + prune_known_txs_for_block(BlockId::Hash(h.hash.clone()), &*api, &*pool) + })) + .await + .into_iter() + .for_each(|enacted_log| { pruned_log.extend(enacted_log); }) } pruned_log.extend(prune_known_txs_for_block(id.clone(), &*api, &*pool).await); - metrics.report( - |metrics| metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64) - ); + metrics.report(|metrics| { + metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64) + }); if let (true, Some(tree_route)) = (next_action.resubmit, tree_route) { let mut resubmit_transactions = Vec::new(); @@ -620,7 +677,8 @@ impl MaintainedTransactionPool for BasicPool for retracted in tree_route.retracted() { let hash = retracted.hash.clone(); - let block_transactions = api.block_body(&BlockId::hash(hash)) + let block_transactions = api + .block_body(&BlockId::hash(hash)) .await .unwrap_or_else(|e| { log::warn!("Failed to fetch block body {:?}!", e); @@ -632,8 +690,8 @@ impl MaintainedTransactionPool for BasicPool let mut resubmitted_to_report = 0; - resubmit_transactions.extend( - block_transactions.into_iter().filter(|tx| { + resubmit_transactions.extend(block_transactions.into_iter().filter( + |tx| { let tx_hash = pool.hash_of(&tx); let contains = pruned_log.contains(&tx_hash); @@ -649,21 +707,24 @@ impl MaintainedTransactionPool for BasicPool ); } !contains - }) - ); + }, + )); - metrics.report( - |metrics| metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) - ); + metrics.report(|metrics| { + metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) + }); } - if let Err(e) = pool.resubmit_at( - &id, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ).await { + if let Err(e) = pool + .resubmit_at( + &id, + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TransactionSource::External, + resubmit_transactions, + ) + .await + { log::debug!( target: "txpool", "[{:?}] Error re-submitting transactions: {:?}", @@ -676,22 +737,20 @@ impl MaintainedTransactionPool for BasicPool let extra_pool = pool.clone(); // After #5200 lands, this arguably might be moved to the // handler of "all blocks notification". - ready_poll.lock().trigger( - block_number, - move || Box::new(extra_pool.validated_pool().ready()), - ); + ready_poll.lock().trigger(block_number, move || { + Box::new(extra_pool.validated_pool().ready()) + }); if next_action.revalidate { - let hashes = pool.validated_pool() - .ready() - .map(|tx| tx.hash.clone()) - .collect(); + let hashes = + pool.validated_pool().ready().map(|tx| tx.hash.clone()).collect(); revalidation_queue.revalidate_later(block_number, hashes).await; revalidation_strategy.lock().clear(); } - }.boxed() - } + } + .boxed() + }, ChainEvent::Finalized { hash } => { let pool = self.pool.clone(); async move { @@ -702,28 +761,25 @@ impl MaintainedTransactionPool for BasicPool e, hash ) } - }.boxed() - } + } + .boxed() + }, } } } /// Inform the transaction pool about imported and finalized blocks. -pub async fn notification_future( - client: Arc, - txpool: Arc -) - where - Block: BlockT, - Client: sc_client_api::BlockchainEvents, - Pool: MaintainedTransactionPool, +pub async fn notification_future(client: Arc, txpool: Arc) +where + Block: BlockT, + Client: sc_client_api::BlockchainEvents, + Pool: MaintainedTransactionPool, { - let import_stream = client.import_notification_stream() + let import_stream = client + .import_notification_stream() .filter_map(|n| ready(n.try_into().ok())) .fuse(); - let finality_stream = client.finality_notification_stream() - .map(Into::into) - .fuse(); + let finality_stream = client.finality_notification_stream().map(Into::into).fuse(); futures::stream::select(import_stream, finality_stream) .for_each(|evt| txpool.maintain(evt)) diff --git a/client/transaction-pool/src/metrics.rs b/client/transaction-pool/src/metrics.rs index 376e6dfe94488..d62d64f13a0a4 100644 --- a/client/transaction-pool/src/metrics.rs +++ b/client/transaction-pool/src/metrics.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -27,13 +27,13 @@ pub struct MetricsLink(Arc>); impl MetricsLink { pub fn new(registry: Option<&Registry>) -> Self { - Self(Arc::new( - registry.and_then(|registry| - Metrics::register(registry) - .map_err(|err| { log::warn!("Failed to register prometheus metrics: {}", err); }) - .ok() - ) - )) + Self(Arc::new(registry.and_then(|registry| { + Metrics::register(registry) + .map_err(|err| { + log::warn!("Failed to register prometheus metrics: {}", err); + }) + .ok() + }))) } pub fn report(&self, do_this: impl FnOnce(&Metrics)) { diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index 7be8688eaea5d..a8b2c1d32036a 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,20 +18,26 @@ //! Pool periodic revalidation. -use std::{sync::Arc, pin::Pin, collections::{HashMap, HashSet, BTreeMap}}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + pin::Pin, + sync::Arc, +}; -use sc_transaction_graph::{ChainApi, Pool, ExtrinsicHash, NumberFor, ValidatedTransaction}; -use sp_runtime::traits::{Zero, SaturatedConversion}; -use sp_runtime::generic::BlockId; -use sp_runtime::transaction_validity::TransactionValidityError; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use crate::graph::{ChainApi, ExtrinsicHash, NumberFor, Pool, ValidatedTransaction}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_runtime::{ + generic::BlockId, + traits::{SaturatedConversion, Zero}, + transaction_validity::TransactionValidityError, +}; use futures::prelude::*; use std::time::Duration; -#[cfg(not(test))] +#[cfg(not(feature = "test-helpers"))] const BACKGROUND_REVALIDATION_INTERVAL: Duration = Duration::from_millis(200); -#[cfg(test)] +#[cfg(feature = "test-helpers")] pub const BACKGROUND_REVALIDATION_INTERVAL: Duration = Duration::from_millis(1); const MIN_BACKGROUND_REVALIDATION_BATCH_SIZE: usize = 20; @@ -63,19 +69,18 @@ async fn batch_revalidate( pool: Arc>, api: Arc, at: NumberFor, - batch: impl IntoIterator>, + batch: impl IntoIterator>, ) { let mut invalid_hashes = Vec::new(); let mut revalidated = HashMap::new(); - let validation_results = futures::future::join_all( - batch.into_iter().filter_map(|ext_hash| { - pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(&BlockId::Number(at), ext.source, ext.data.clone()) - .map(move |validation_result| (validation_result, ext_hash, ext)) - }) + let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { + pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { + api.validate_transaction(&BlockId::Number(at), ext.source, ext.data.clone()) + .map(move |validation_result| (validation_result, ext_hash, ext)) }) - ).await; + })) + .await; for (validation_result, ext_hash, ext) in validation_results { match validation_result { @@ -98,7 +103,7 @@ async fn batch_revalidate( ext.data.clone(), api.hash_and_length(&ext.data).1, validity, - ) + ), ); }, Err(validation_err) => { @@ -109,7 +114,7 @@ async fn batch_revalidate( validation_err ); invalid_hashes.push(ext_hash); - } + }, } } @@ -120,10 +125,7 @@ async fn batch_revalidate( } impl RevalidationWorker { - fn new( - api: Arc, - pool: Arc>, - ) -> Self { + fn new(api: Arc, pool: Arc>) -> Self { Self { api, pool, @@ -135,7 +137,8 @@ impl RevalidationWorker { fn prepare_batch(&mut self) -> Vec> { let mut queued_exts = Vec::new(); - let mut left = std::cmp::max(MIN_BACKGROUND_REVALIDATION_BATCH_SIZE, self.members.len() / 4); + let mut left = + std::cmp::max(MIN_BACKGROUND_REVALIDATION_BATCH_SIZE, self.members.len() / 4); // Take maximum of count transaction by order // which they got into the pool @@ -188,11 +191,14 @@ impl RevalidationWorker { ext_hash, ); - continue; + continue } - self.block_ordered.entry(block_number) - .and_modify(|value| { value.insert(ext_hash.clone()); }) + self.block_ordered + .entry(block_number) + .and_modify(|value| { + value.insert(ext_hash.clone()); + }) .or_insert_with(|| { let mut bt = HashSet::new(); bt.insert(ext_hash.clone()); @@ -211,7 +217,10 @@ impl RevalidationWorker { mut self, from_queue: TracingUnboundedReceiver>, interval: R, - ) where R: Send, R::Guard: Send { + ) where + R: Send, + R::Guard: Send, + { let interval = interval.into_stream().fuse(); let from_queue = from_queue.fuse(); futures::pin_mut!(interval, from_queue); @@ -225,7 +234,7 @@ impl RevalidationWorker { batch_revalidate(this.pool.clone(), this.api.clone(), this.best_block, next_batch).await; - #[cfg(test)] + #[cfg(feature = "test-helpers")] { use intervalier::Guard; // only trigger test events if something was processed @@ -269,7 +278,6 @@ impl RevalidationWorker { } } - /// Revalidation queue. /// /// Can be configured background (`new_background`) @@ -286,45 +294,48 @@ where { /// New revalidation queue without background worker. pub fn new(api: Arc, pool: Arc>) -> Self { - Self { - api, - pool, - background: None, - } + Self { api, pool, background: None } } + /// New revalidation queue with background worker. pub fn new_with_interval( api: Arc, pool: Arc>, interval: R, - ) -> (Self, Pin + Send>>) where R: Send + 'static, R::Guard: Send { + ) -> (Self, Pin + Send>>) + where + R: Send + 'static, + R::Guard: Send, + { let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue"); let worker = RevalidationWorker::new(api.clone(), pool.clone()); - let queue = - Self { - api, - pool, - background: Some(to_worker), - }; + let queue = Self { api, pool, background: Some(to_worker) }; (queue, worker.run(from_queue, interval).boxed()) } /// New revalidation queue with background worker. - pub fn new_background(api: Arc, pool: Arc>) -> - (Self, Pin + Send>>) - { - Self::new_with_interval(api, pool, intervalier::Interval::new(BACKGROUND_REVALIDATION_INTERVAL)) + pub fn new_background( + api: Arc, + pool: Arc>, + ) -> (Self, Pin + Send>>) { + Self::new_with_interval( + api, + pool, + intervalier::Interval::new(BACKGROUND_REVALIDATION_INTERVAL), + ) } /// New revalidation queue with background worker and test signal. - #[cfg(test)] - pub fn new_test(api: Arc, pool: Arc>) -> - (Self, Pin + Send>>, intervalier::BackSignalControl) - { - let (interval, notifier) = intervalier::BackSignalInterval::new(BACKGROUND_REVALIDATION_INTERVAL); + #[cfg(feature = "test-helpers")] + pub fn new_test( + api: Arc, + pool: Arc>, + ) -> (Self, Pin + Send>>, intervalier::BackSignalControl) { + let (interval, notifier) = + intervalier::BackSignalInterval::new(BACKGROUND_REVALIDATION_INTERVAL); let (queue, background) = Self::new_with_interval(api, pool, interval); (queue, background, notifier) @@ -360,36 +371,4 @@ where } #[cfg(test)] -mod tests { - use super::*; - use sc_transaction_graph::Pool; - use sp_transaction_pool::TransactionSource; - use substrate_test_runtime_transaction_pool::{TestApi, uxt}; - use futures::executor::block_on; - use substrate_test_runtime_client::AccountKeyring::*; - - fn setup() -> (Arc, Pool) { - let test_api = Arc::new(TestApi::empty()); - let pool = Pool::new(Default::default(), test_api.clone()); - (test_api, pool) - } - - #[test] - fn smoky() { - let (api, pool) = setup(); - let pool = Arc::new(pool); - let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); - - let uxt = uxt(Alice, 0); - let uxt_hash = block_on( - pool.submit_one(&BlockId::number(0), TransactionSource::External, uxt.clone()) - ).expect("Should be valid"); - - block_on(queue.revalidate_later(0, vec![uxt_hash])); - - // revalidated in sync offload 2nd time - assert_eq!(api.validation_requests().len(), 2); - // number of ready - assert_eq!(pool.validated_pool().status().ready, 1); - } -} +mod tests {} diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/tests/pool.rs similarity index 68% rename from client/transaction-pool/src/testing/pool.rs rename to client/transaction-pool/tests/pool.rs index 8fa742cd419a3..6c34d05cd5dcb 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,38 +16,41 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::*; -use sp_transaction_pool::TransactionStatus; -use futures::executor::{block_on, block_on_stream}; -use txpool::{self, Pool}; +//! Tests for top-level transaction pool api +use codec::Encode; +use futures::{ + executor::{block_on, block_on_stream}, + prelude::*, + task::Poll, +}; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::client::BlockchainEvents; +use sc_transaction_pool::{test_helpers::*, *}; +use sc_transaction_pool_api::{ + ChainEvent, MaintainedTransactionPool, TransactionPool, TransactionStatus, +}; +use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, - transaction_validity::{ValidTransaction, TransactionSource, InvalidTransaction}, + traits::Block as _, + transaction_validity::{InvalidTransaction, TransactionSource, ValidTransaction}, }; +use std::{collections::BTreeSet, convert::TryInto, sync::Arc}; use substrate_test_runtime_client::{ - runtime::{Block, Hash, Index, Header, Extrinsic, Transfer}, AccountKeyring::*, + runtime::{Block, Extrinsic, Hash, Header, Index, Transfer}, + AccountKeyring::*, ClientBlockImportExt, }; -use substrate_test_runtime_transaction_pool::{TestApi, uxt}; -use futures::{prelude::*, task::Poll}; -use codec::Encode; -use std::collections::BTreeSet; -use sc_client_api::client::BlockchainEvents; -use sc_block_builder::BlockBuilderProvider; -use sp_consensus::BlockOrigin; +use substrate_test_runtime_transaction_pool::{uxt, TestApi}; fn pool() -> Pool { - Pool::new(Default::default(), TestApi::with_alice_nonce(209).into()) + Pool::new(Default::default(), true.into(), TestApi::with_alice_nonce(209).into()) } -fn maintained_pool() -> ( - BasicPool, - futures::executor::ThreadPool, - intervalier::BackSignalControl, -) { - let (pool, background_task, notifier) = BasicPool::new_test( - Arc::new(TestApi::with_alice_nonce(209)), - ); +fn maintained_pool( +) -> (BasicPool, futures::executor::ThreadPool, intervalier::BackSignalControl) { + let (pool, background_task, notifier) = + BasicPool::new_test(Arc::new(TestApi::with_alice_nonce(209))); let thread_pool = futures::executor::ThreadPool::new().unwrap(); thread_pool.spawn_ok(background_task); @@ -107,13 +110,8 @@ fn prune_tags_should_work() { assert_eq!(pending, vec![209, 210]); pool.validated_pool().api().push_block(1, Vec::new(), true); - block_on( - pool.prune_tags( - &BlockId::number(1), - vec![vec![209]], - vec![hash209], - ) - ).expect("Prune tags"); + block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![hash209])) + .expect("Prune tags"); let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![210]); @@ -140,17 +138,13 @@ fn only_prune_on_new_best() { let pool = maintained_pool().0; let uxt = uxt(Alice, 209); - let _ = block_on( - pool.submit_and_watch(&BlockId::number(0), SOURCE, uxt.clone()) - ).expect("1. Imported"); - pool.api.push_block(1, vec![uxt.clone()], true); + let _ = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, uxt.clone())) + .expect("1. Imported"); + pool.api().push_block(1, vec![uxt.clone()], true); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(2, vec![uxt], true); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let header = pool.api().push_block(2, vec![uxt], true); + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); } @@ -161,7 +155,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| { v.provides.push(vec![155]); })); - let pool = Pool::new(Default::default(), api.clone()); + let pool = Pool::new(Default::default(), true.into(), api.clone()); let xt = uxt(Alice, 209); block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); @@ -193,10 +187,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { } fn block_event(header: Header) -> ChainEvent { - ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - } + ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None } } fn block_event_with_retracted( @@ -204,12 +195,10 @@ fn block_event_with_retracted( retracted_start: Hash, api: &TestApi, ) -> ChainEvent { - let tree_route = api.tree_route(retracted_start, header.parent_hash).expect("Tree route exists"); + let tree_route = + api.tree_route(retracted_start, header.parent_hash).expect("Tree route exists"); - ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: Some(Arc::new(tree_route)), - } + ChainEvent::NewBestBlock { hash: header.hash(), tree_route: Some(Arc::new(tree_route)) } } #[test] @@ -221,7 +210,7 @@ fn should_prune_old_during_maintenance() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![xt.clone()], true); + let header = pool.api().push_block(1, vec![xt.clone()], true); block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 0); @@ -236,16 +225,16 @@ fn should_revalidate_during_maintenance() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt2.clone())).expect("2. Imported"); assert_eq!(pool.status().ready, 2); - assert_eq!(pool.api.validation_requests().len(), 2); + assert_eq!(pool.api().validation_requests().len(), 2); - let header = pool.api.push_block(1, vec![xt1.clone()], true); + let header = pool.api().push_block(1, vec![xt1.clone()], true); block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 1); block_on(notifier.next()); // test that pool revalidated transaction that left ready and not included in the block - assert_eq!(pool.api.validation_requests().len(), 3); + assert_eq!(pool.api().validation_requests().len(), 3); } #[test] @@ -257,16 +246,15 @@ fn should_resubmit_from_retracted_during_maintenance() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![], true); - let fork_header = pool.api.push_block(1, vec![], false); + let header = pool.api().push_block(1, vec![], true); + let fork_header = pool.api().push_block(1, vec![], false); - let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api); + let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 1); } - #[test] fn should_not_resubmit_from_retracted_during_maintenance_if_tx_is_also_in_enacted() { let xt = uxt(Alice, 209); @@ -276,10 +264,10 @@ fn should_not_resubmit_from_retracted_during_maintenance_if_tx_is_also_in_enacte block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![xt.clone()], true); - let fork_header = pool.api.push_block(1, vec![xt], false); + let header = pool.api().push_block(1, vec![xt.clone()], true); + let fork_header = pool.api().push_block(1, vec![xt], false); - let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api); + let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -294,11 +282,11 @@ fn should_not_retain_invalid_hashes_from_retracted() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![], true); - let fork_header = pool.api.push_block(1, vec![xt.clone()], false); - pool.api.add_invalid(&xt); + let header = pool.api().push_block(1, vec![], true); + let fork_header = pool.api().push_block(1, vec![xt.clone()], false); + pool.api().add_invalid(&xt); - let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api); + let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api()); block_on(pool.maintain(event)); block_on(notifier.next()); @@ -306,31 +294,6 @@ fn should_not_retain_invalid_hashes_from_retracted() { assert_eq!(pool.status().ready, 0); } -#[test] -fn should_revalidate_transaction_multiple_times() { - let xt = uxt(Alice, 209); - - let (pool, _guard, mut notifier) = maintained_pool(); - - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); - assert_eq!(pool.status().ready, 1); - - let header = pool.api.push_block(1, vec![xt.clone()], true); - - block_on(pool.maintain(block_event(header))); - - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); - assert_eq!(pool.status().ready, 1); - - let header = pool.api.push_block(2, vec![], true); - pool.api.add_invalid(&xt); - - block_on(pool.maintain(block_event(header))); - block_on(notifier.next()); - - assert_eq!(pool.status().ready, 0); -} - #[test] fn should_revalidate_across_many_blocks() { let xt1 = uxt(Alice, 209); @@ -343,25 +306,24 @@ fn should_revalidate_across_many_blocks() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt2.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 2); - let header = pool.api.push_block(1, vec![], true); + let header = pool.api().push_block(1, vec![], true); block_on(pool.maintain(block_event(header))); block_on(notifier.next()); block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt3.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 3); - let header = pool.api.push_block(2, vec![xt1.clone()], true); + let header = pool.api().push_block(2, vec![xt1.clone()], true); block_on(pool.maintain(block_event(header))); block_on(notifier.next()); assert_eq!(pool.status().ready, 2); // xt1 and xt2 validated twice, then xt3 once, then xt2 and xt3 again - assert_eq!(pool.api.validation_requests().len(), 7); + assert_eq!(pool.api().validation_requests().len(), 7); } - #[test] -fn should_push_watchers_during_maintaince() { +fn should_push_watchers_during_maintenance() { fn alice_uxt(nonce: u64) -> Extrinsic { uxt(Alice, 209 + nonce) } @@ -370,33 +332,28 @@ fn should_push_watchers_during_maintaince() { let (pool, _guard, mut notifier) = maintained_pool(); let tx0 = alice_uxt(0); - let watcher0 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx0.clone()) - ).unwrap(); + let watcher0 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx0.clone())).unwrap(); let tx1 = alice_uxt(1); - let watcher1 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx1.clone()) - ).unwrap(); + let watcher1 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx1.clone())).unwrap(); let tx2 = alice_uxt(2); - let watcher2 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx2.clone()) - ).unwrap(); + let watcher2 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx2.clone())).unwrap(); let tx3 = alice_uxt(3); - let watcher3 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx3.clone()) - ).unwrap(); + let watcher3 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx3.clone())).unwrap(); let tx4 = alice_uxt(4); - let watcher4 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx4.clone()) - ).unwrap(); + let watcher4 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx4.clone())).unwrap(); assert_eq!(pool.status().ready, 5); // when - pool.api.add_invalid(&tx3); - pool.api.add_invalid(&tx4); + pool.api().add_invalid(&tx3); + pool.api().add_invalid(&tx4); // clear timer events if any - let header = pool.api.push_block(1, vec![], true); + let header = pool.api().push_block(1, vec![], true); block_on(pool.maintain(block_event(header))); block_on(notifier.next()); @@ -414,7 +371,7 @@ fn should_push_watchers_during_maintaince() { ); // when - let header = pool.api.push_block(2, vec![tx0, tx1, tx2], true); + let header = pool.api().push_block(2, vec![tx0, tx1, tx2], true); let header_hash = header.hash(); block_on(pool.maintain(block_event(header))); @@ -430,21 +387,24 @@ fn should_push_watchers_during_maintaince() { vec![ TransactionStatus::Ready, TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone())], + TransactionStatus::Finalized(header_hash.clone()) + ], ); assert_eq!( futures::executor::block_on_stream(watcher1).collect::>(), vec![ TransactionStatus::Ready, TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone())], + TransactionStatus::Finalized(header_hash.clone()) + ], ); assert_eq!( futures::executor::block_on_stream(watcher2).collect::>(), vec![ TransactionStatus::Ready, TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone())], + TransactionStatus::Finalized(header_hash.clone()) + ], ); } @@ -465,16 +425,12 @@ fn finalization() { let api = TestApi::with_alice_nonce(209); api.push_block(1, vec![], true); let (pool, _background, _) = BasicPool::new_test(api.into()); - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) - ).expect("1. Imported"); - pool.api.push_block(2, vec![xt.clone()], true); - - let header = pool.api.chain().read().block_by_number.get(&2).unwrap()[0].0.header().clone(); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) + .expect("1. Imported"); + pool.api().push_block(2, vec![xt.clone()], true); + + let header = pool.api().chain().read().block_by_number.get(&2).unwrap()[0].0.header().clone(); + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); let event = ChainEvent::Finalized { hash: header.hash() }; @@ -500,10 +456,10 @@ fn fork_aware_finalization() { let from_dave = uxt(Dave, 2); let from_bob = uxt(Bob, 1); let from_charlie = uxt(Charlie, 1); - pool.api.increment_nonce(Alice.into()); - pool.api.increment_nonce(Dave.into()); - pool.api.increment_nonce(Charlie.into()); - pool.api.increment_nonce(Bob.into()); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Dave.into()); + pool.api().increment_nonce(Charlie.into()); + pool.api().increment_nonce(Bob.into()); let from_dave_watcher; let from_bob_watcher; @@ -514,17 +470,14 @@ fn fork_aware_finalization() { // block B1 { - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()) - ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![from_alice.clone()], true); + let watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = pool.api().push_block(2, vec![from_alice.clone()], true); canon_watchers.push((watcher, header.hash())); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; b1 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -534,15 +487,12 @@ fn fork_aware_finalization() { // block C2 { - let header = pool.api.push_block_with_parent(b1, vec![from_dave.clone()], true); - from_dave_watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone()) - ).expect("1. Imported"); + let header = pool.api().push_block_with_parent(b1, vec![from_dave.clone()], true); + from_dave_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; c2 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -550,16 +500,13 @@ fn fork_aware_finalization() { // block D2 { - from_bob_watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()) - ).expect("1. Imported"); + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block_with_parent(c2, vec![from_bob.clone()], true); + let header = pool.api().push_block_with_parent(c2, vec![from_bob.clone()], true); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; d2 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -567,14 +514,14 @@ fn fork_aware_finalization() { // block C1 { - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone()) - ).expect("1.Imported"); + let watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) + .expect("1.Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(3, vec![from_charlie.clone()], true); + let header = pool.api().push_block(3, vec![from_charlie.clone()], true); canon_watchers.push((watcher, header.hash())); - let event = block_event_with_retracted(header.clone(), d2, &*pool.api); + let event = block_event_with_retracted(header.clone(), d2, &*pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); @@ -585,17 +532,13 @@ fn fork_aware_finalization() { // block D1 { let xt = uxt(Eve, 0); - let w = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) - ).expect("1. Imported"); + let w = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 3); - let header = pool.api.push_block(4, vec![xt.clone()], true); + let header = pool.api().push_block(4, vec![xt.clone()], true); canon_watchers.push((w, header.hash())); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; d1 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); @@ -607,18 +550,14 @@ fn fork_aware_finalization() { // block e1 { - let header = pool.api.push_block(5, vec![from_dave, from_bob], true); + let header = pool.api().push_block(5, vec![from_dave, from_bob], true); e1 = header.hash(); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); block_on(pool.maintain(ChainEvent::Finalized { hash: e1 })); } - for (canon_watcher, h) in canon_watchers { let mut stream = futures::executor::block_on_stream(canon_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); @@ -627,7 +566,6 @@ fn fork_aware_finalization() { assert_eq!(stream.next(), None); } - { let mut stream = futures::executor::block_on_stream(from_dave_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); @@ -662,21 +600,17 @@ fn prune_and_retract_tx_at_same_time() { let (pool, _background, _) = BasicPool::new_test(api.into()); let from_alice = uxt(Alice, 1); - pool.api.increment_nonce(Alice.into()); + pool.api().increment_nonce(Alice.into()); - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()) - ).expect("1. Imported"); + let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); // Block B1 let b1 = { - let header = pool.api.push_block(2, vec![from_alice.clone()], true); + let header = pool.api().push_block(2, vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); header.hash() @@ -684,10 +618,10 @@ fn prune_and_retract_tx_at_same_time() { // Block B2 let b2 = { - let header = pool.api.push_block(2, vec![from_alice.clone()], false); + let header = pool.api().push_block(2, vec![from_alice.clone()], false); assert_eq!(pool.status().ready, 0); - let event = block_event_with_retracted(header.clone(), b1, &*pool.api); + let event = block_event_with_retracted(header.clone(), b1, &*pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -708,7 +642,6 @@ fn prune_and_retract_tx_at_same_time() { } } - /// This test ensures that transactions from a fork are re-submitted if /// the forked block is not part of the retracted blocks. This happens as the /// retracted block list only contains the route from the old best to the new @@ -734,23 +667,19 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); - pool.api.increment_nonce(Alice.into()); - pool.api.increment_nonce(Dave.into()); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Dave.into()); let d0; // Block D0 { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone()) - ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![tx0.clone()], true); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone())) + .expect("1. Imported"); + let header = pool.api().push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; d0 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -758,17 +687,16 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { // Block D1 { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone()) - ).expect("1. Imported"); - pool.api.push_block(2, vec![tx1.clone()], false); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone())) + .expect("1. Imported"); + pool.api().push_block(2, vec![tx1.clone()], false); assert_eq!(pool.status().ready, 1); } // Block D2 { - let header = pool.api.push_block(2, vec![], false); - let event = block_event_with_retracted(header, d0, &*pool.api); + let header = pool.api().push_block(2, vec![], false); + let event = block_event_with_retracted(header, d0, &*pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); } @@ -791,19 +719,18 @@ fn resubmit_from_retracted_fork() { let tx4 = uxt(Ferdie, 2); let tx5 = uxt(One, 3); - pool.api.increment_nonce(Alice.into()); - pool.api.increment_nonce(Dave.into()); - pool.api.increment_nonce(Bob.into()); - pool.api.increment_nonce(Eve.into()); - pool.api.increment_nonce(Ferdie.into()); - pool.api.increment_nonce(One.into()); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Dave.into()); + pool.api().increment_nonce(Bob.into()); + pool.api().increment_nonce(Eve.into()); + pool.api().increment_nonce(Ferdie.into()); + pool.api().increment_nonce(One.into()); // Block D0 { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone()) - ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![tx0.clone()], true); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone())) + .expect("1. Imported"); + let header = pool.api().push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); block_on(pool.maintain(block_event(header))); @@ -812,20 +739,18 @@ fn resubmit_from_retracted_fork() { // Block E0 { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone()) - ).expect("1. Imported"); - let header = pool.api.push_block(3, vec![tx1.clone()], true); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone())) + .expect("1. Imported"); + let header = pool.api().push_block(3, vec![tx1.clone()], true); block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 0); } // Block F0 let f0 = { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx2.clone()) - ).expect("1. Imported"); - let header = pool.api.push_block(4, vec![tx2.clone()], true); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx2.clone())) + .expect("1. Imported"); + let header = pool.api().push_block(4, vec![tx2.clone()], true); block_on(pool.maintain(block_event(header.clone()))); assert_eq!(pool.status().ready, 0); header.hash() @@ -833,30 +758,27 @@ fn resubmit_from_retracted_fork() { // Block D1 let d1 = { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx3.clone()) - ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![tx3.clone()], true); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx3.clone())) + .expect("1. Imported"); + let header = pool.api().push_block(2, vec![tx3.clone()], true); assert_eq!(pool.status().ready, 1); header.hash() }; // Block E1 let e1 = { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone()) - ).expect("1. Imported"); - let header = pool.api.push_block_with_parent(d1.clone(), vec![tx4.clone()], true); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone())) + .expect("1. Imported"); + let header = pool.api().push_block_with_parent(d1.clone(), vec![tx4.clone()], true); assert_eq!(pool.status().ready, 2); header.hash() }; // Block F1 let f1_header = { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone()) - ).expect("1. Imported"); - let header = pool.api.push_block_with_parent(e1.clone(), vec![tx5.clone()], true); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone())) + .expect("1. Imported"); + let header = pool.api().push_block_with_parent(e1.clone(), vec![tx5.clone()], true); // Don't announce the block event to the pool directly, because we will // re-org to this block. assert_eq!(pool.status().ready, 3); @@ -867,7 +789,7 @@ fn resubmit_from_retracted_fork() { let expected_ready = vec![tx3, tx4, tx5].iter().map(Encode::encode).collect::>(); assert_eq!(expected_ready, ready); - let event = block_event_with_retracted(f1_header, f0, &*pool.api); + let event = block_event_with_retracted(f1_header, f0, &*pool.api()); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 3); @@ -888,7 +810,7 @@ fn ready_set_should_not_resolve_before_block_update() { #[test] fn ready_set_should_resolve_after_block_update() { let (pool, _guard, _notifier) = maintained_pool(); - let header = pool.api.push_block(1, vec![], true); + let header = pool.api().push_block(1, vec![], true); let xt1 = uxt(Alice, 209); @@ -901,7 +823,7 @@ fn ready_set_should_resolve_after_block_update() { #[test] fn ready_set_should_eventually_resolve_when_block_update_arrives() { let (pool, _guard, _notifier) = maintained_pool(); - let header = pool.api.push_block(1, vec![], true); + let header = pool.api().push_block(1, vec![], true); let xt1 = uxt(Alice, 209); @@ -917,14 +839,14 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { block_on(pool.maintain(block_event(header))); - match ready_set_future.poll_unpin(&mut context) { + match ready_set_future.poll_unpin(&mut context) { Poll::Pending => { panic!("Ready set should become ready after block update!"); }, Poll::Ready(iterator) => { let data = iterator.collect::>(); assert_eq!(data.len(), 1); - } + }, } } @@ -935,21 +857,26 @@ fn should_not_accept_old_signatures() { let client = Arc::new(substrate_test_runtime_client::new()); let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new(client, None))).0 + BasicPool::new_test(Arc::new(FullChainApi::new( + client, + None, + &sp_core::testing::TaskExecutor::new(), + ))) + .0, ); - let transfer = Transfer { - from: Alice.into(), - to: Bob.into(), - nonce: 0, - amount: 1, - }; + let transfer = Transfer { from: Alice.into(), to: Bob.into(), nonce: 0, amount: 1 }; let _bytes: sp_core::sr25519::Signature = transfer.using_encoded(|e| Alice.sign(e)).into(); // generated with schnorrkel 0.1.1 from `_bytes` - let old_singature = sp_core::sr25519::Signature::try_from(&hex::decode( - "c427eb672e8c441c86d31f1a81b22b43102058e9ce237cabe9897ea5099ffd426cd1c6a1f4f2869c3df57901d36bedcb295657adb3a4355add86ed234eb83108" - ).expect("hex invalid")[..]).expect("signature construction failed"); + let old_singature = sp_core::sr25519::Signature::try_from( + &hex::decode( + "c427eb672e8c441c86d31f1a81b22b43102058e9ce237cabe9897ea5099ffd426\ + cd1c6a1f4f2869c3df57901d36bedcb295657adb3a4355add86ed234eb83108", + ) + .expect("hex invalid")[..], + ) + .expect("signature construction failed"); let xt = Extrinsic::Transfer { transfer, @@ -959,9 +886,9 @@ fn should_not_accept_old_signatures() { assert_matches::assert_matches!( block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())), - Err(error::Error::Pool( - sp_transaction_pool::error::Error::InvalidTransaction(InvalidTransaction::BadProof) - )), + Err(error::Error::Pool(sc_transaction_pool_api::error::Error::InvalidTransaction( + InvalidTransaction::BadProof + ))), "Should be invalid transaction with bad proof", ); } @@ -971,7 +898,12 @@ fn import_notification_to_pool_maintain_works() { let mut client = Arc::new(substrate_test_runtime_client::new()); let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new(client.clone(), None))).0 + BasicPool::new_test(Arc::new(FullChainApi::new( + client.clone(), + None, + &sp_core::testing::TaskExecutor::new(), + ))) + .0, ); // Prepare the extrisic, push it to the pool and check that it was added. @@ -985,7 +917,7 @@ fn import_notification_to_pool_maintain_works() { let mut block_builder = client.new_block(Default::default()).unwrap(); block_builder.push(xt).unwrap(); let block = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); // Get the notification of the block import and maintain the pool with it, // Now, the pool should not contain any transactions. @@ -1002,21 +934,13 @@ fn pruning_a_transaction_should_remove_it_from_best_transaction() { let xt1 = Extrinsic::IncludeData(Vec::new()); block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); - let header = pool.api.push_block(1, vec![xt1.clone()], true); + assert_eq!(pool.status().ready, 1); + let header = pool.api().push_block(1, vec![xt1.clone()], true); // This will prune `xt1`. block_on(pool.maintain(block_event(header))); - // Submit the tx again. - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("2. Imported"); - - let mut iterator = block_on(pool.ready_at(1)); - - assert_eq!(iterator.next().unwrap().data, xt1.clone()); - - // If the tx was not removed from the best txs, the tx would be - // returned a second time by the iterator. - assert!(iterator.next().is_none()); + assert_eq!(pool.status().ready, 0); } #[test] @@ -1028,13 +952,58 @@ fn only_revalidate_on_best_block() { block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(1, vec![], true); + let header = pool.api().push_block(1, vec![], true); - pool.api.push_block(2, vec![], false); - pool.api.push_block(2, vec![], false); + pool.api().push_block(2, vec![], false); + pool.api().push_block(2, vec![], false); block_on(pool.maintain(block_event(header))); block_on(notifier.next()); assert_eq!(pool.status().ready, 1); } + +#[test] +fn stale_transactions_are_pruned() { + sp_tracing::try_init_simple(); + + // Our initial transactions + let xts = vec![ + Transfer { from: Alice.into(), to: Bob.into(), nonce: 1, amount: 1 }, + Transfer { from: Alice.into(), to: Bob.into(), nonce: 2, amount: 1 }, + Transfer { from: Alice.into(), to: Bob.into(), nonce: 3, amount: 1 }, + ]; + + let (pool, _guard, _notifier) = maintained_pool(); + + xts.into_iter().for_each(|xt| { + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.into_signed_tx())) + .expect("1. Imported"); + }); + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 3); + + // Almost the same as our initial transactions, but with some different `amount`s to make them + // generate a different hash + let xts = vec![ + Transfer { from: Alice.into(), to: Bob.into(), nonce: 1, amount: 2 }.into_signed_tx(), + Transfer { from: Alice.into(), to: Bob.into(), nonce: 2, amount: 2 }.into_signed_tx(), + Transfer { from: Alice.into(), to: Bob.into(), nonce: 3, amount: 2 }.into_signed_tx(), + ]; + + // Import block + let header = pool.api().push_block(1, xts, true); + block_on(pool.maintain(block_event(header))); + // The imported transactions have a different hash and should not evict our initial + // transactions. + assert_eq!(pool.status().future, 3); + + // Import enough blocks to make our transactions stale + for n in 1..66 { + let header = pool.api().push_block(n, vec![], true); + block_on(pool.maintain(block_event(header))); + } + + assert_eq!(pool.status().future, 0); + assert_eq!(pool.status().ready, 0); +} diff --git a/client/transaction-pool/tests/revalidation.rs b/client/transaction-pool/tests/revalidation.rs new file mode 100644 index 0000000000000..b2c8225b78f58 --- /dev/null +++ b/client/transaction-pool/tests/revalidation.rs @@ -0,0 +1,32 @@ +use futures::executor::block_on; +use sc_transaction_pool::test_helpers::{Pool, RevalidationQueue}; +use sc_transaction_pool_api::TransactionSource; +use sp_runtime::generic::BlockId; +use std::sync::Arc; +use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_transaction_pool::{uxt, TestApi}; + +fn setup() -> (Arc, Pool) { + let test_api = Arc::new(TestApi::empty()); + let pool = Pool::new(Default::default(), true.into(), test_api.clone()); + (test_api, pool) +} + +#[test] +fn smoky() { + let (api, pool) = setup(); + let pool = Arc::new(pool); + let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); + + let uxt = uxt(Alice, 0); + let uxt_hash = + block_on(pool.submit_one(&BlockId::number(0), TransactionSource::External, uxt.clone())) + .expect("Should be valid"); + + block_on(queue.revalidate_later(0, vec![uxt_hash])); + + // revalidated in sync offload 2nd time + assert_eq!(api.validation_requests().len(), 2); + // number of ready + assert_eq!(pool.validated_pool().status().ready, 1); +} diff --git a/primitives/utils/Cargo.toml b/client/utils/Cargo.toml similarity index 72% rename from primitives/utils/Cargo.toml rename to client/utils/Cargo.toml index 80329d2e59ea9..99765dd501dd5 100644 --- a/primitives/utils/Cargo.toml +++ b/client/utils/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "sp-utils" -version = "2.0.0" +name = "sc-utils" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -10,10 +10,9 @@ description = "I/O for Substrate runtimes" readme = "README.md" [dependencies] -futures = "0.3.4" -futures-core = "0.3.4" +futures = "0.3.9" lazy_static = "1.4.0" -prometheus = { version = "0.10.0", default-features = false } +prometheus = { version = "0.11.0", default-features = false } futures-timer = "3.0.2" [features] diff --git a/client/utils/README.md b/client/utils/README.md new file mode 100644 index 0000000000000..2da70f09ccbc5 --- /dev/null +++ b/client/utils/README.md @@ -0,0 +1,20 @@ +Utilities Primitives for Substrate + +## Features + +### metered + +This feature changes the behaviour of the function `mpsc::tracing_unbounded`. +With the disabled feature this function is an alias to `futures::channel::mpsc::unbounded`. +However, when the feature is enabled it creates wrapper types to `UnboundedSender` +and `UnboundedReceiver` to register every `send`/`received`/`dropped` action happened on +the channel. + +Also this feature creates and registers a prometheus vector with name `unbounded_channel_len` and labels: + +| Label | Description | +| ------------ | --------------------------------------------- | +| entity | Name of channel passed to `tracing_unbounded` | +| action | One of `send`/`received`/`dropped` | + +License: Apache-2.0 diff --git a/client/utils/src/lib.rs b/client/utils/src/lib.rs new file mode 100644 index 0000000000000..b49cd60d67b13 --- /dev/null +++ b/client/utils/src/lib.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Utilities Primitives for Substrate +//! +//! # Features +//! +//! ## metered +//! +//! This feature changes the behaviour of the function `mpsc::tracing_unbounded`. +//! With the disabled feature this function is an alias to `futures::channel::mpsc::unbounded`. +//! However, when the feature is enabled it creates wrapper types to `UnboundedSender` +//! and `UnboundedReceiver` to register every `send`/`received`/`dropped` action happened on +//! the channel. +//! +//! Also this feature creates and registers a prometheus vector with name `unbounded_channel_len` +//! and labels: +//! +//! | Label | Description | +//! | ------------ | --------------------------------------------- | +//! | entity | Name of channel passed to `tracing_unbounded` | +//! | action | One of `send`/`received`/`dropped` | + +pub mod metrics; +pub mod mpsc; +pub mod status_sinks; diff --git a/client/utils/src/metrics.rs b/client/utils/src/metrics.rs new file mode 100644 index 0000000000000..85ccce626bc25 --- /dev/null +++ b/client/utils/src/metrics.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Metering primitives and globals + +use lazy_static::lazy_static; +use prometheus::{ + core::{AtomicU64, GenericCounter, GenericGauge}, + Error as PrometheusError, Registry, +}; + +#[cfg(feature = "metered")] +use prometheus::{core::GenericCounterVec, Opts}; + +lazy_static! { + pub static ref TOKIO_THREADS_TOTAL: GenericCounter = + GenericCounter::new("tokio_threads_total", "Total number of threads created") + .expect("Creating of statics doesn't fail. qed"); + pub static ref TOKIO_THREADS_ALIVE: GenericGauge = + GenericGauge::new("tokio_threads_alive", "Number of threads alive right now") + .expect("Creating of statics doesn't fail. qed"); +} + +#[cfg(feature = "metered")] +lazy_static! { + pub static ref UNBOUNDED_CHANNELS_COUNTER : GenericCounterVec = GenericCounterVec::new( + Opts::new("unbounded_channel_len", "Items in each mpsc::unbounded instance"), + &["entity", "action"] // 'name of channel, send|received|dropped + ).expect("Creating of statics doesn't fail. qed"); + +} + +/// Register the statics to report to registry +pub fn register_globals(registry: &Registry) -> Result<(), PrometheusError> { + registry.register(Box::new(TOKIO_THREADS_ALIVE.clone()))?; + registry.register(Box::new(TOKIO_THREADS_TOTAL.clone()))?; + + #[cfg(feature = "metered")] + registry.register(Box::new(UNBOUNDED_CHANNELS_COUNTER.clone()))?; + + Ok(()) +} diff --git a/primitives/utils/src/mpsc.rs b/client/utils/src/mpsc.rs similarity index 63% rename from primitives/utils/src/mpsc.rs rename to client/utils/src/mpsc.rs index 70baa006bdcdc..1739af5e9015c 100644 --- a/primitives/utils/src/mpsc.rs +++ b/client/utils/src/mpsc.rs @@ -1,19 +1,20 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . //! Features to meter unbounded channels @@ -25,22 +26,26 @@ mod inner { pub type TracingUnboundedReceiver = UnboundedReceiver; /// Alias `mpsc::unbounded` - pub fn tracing_unbounded(_key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { + pub fn tracing_unbounded( + _key: &'static str, + ) -> (TracingUnboundedSender, TracingUnboundedReceiver) { mpsc::unbounded() } } - #[cfg(feature = "metered")] mod inner { - //tracing implementation - use futures::channel::mpsc::{self, - UnboundedReceiver, UnboundedSender, - TryRecvError, TrySendError, SendError + // tracing implementation + use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; + use futures::{ + channel::mpsc::{ + self, SendError, TryRecvError, TrySendError, UnboundedReceiver, UnboundedSender, + }, + sink::Sink, + stream::{FusedStream, Stream}, + task::{Context, Poll}, }; - use futures::{sink::Sink, task::{Poll, Context}, stream::{Stream, FusedStream}}; use std::pin::Pin; - use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; /// Wrapper Type around `UnboundedSender` that increases the global /// measure when a message is added @@ -61,9 +66,11 @@ mod inner { /// Wrapper around `mpsc::unbounded` that tracks the in- and outflow via /// `UNBOUNDED_CHANNELS_COUNTER` - pub fn tracing_unbounded(key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { + pub fn tracing_unbounded( + key: &'static str, + ) -> (TracingUnboundedSender, TracingUnboundedReceiver) { let (s, r) = mpsc::unbounded(); - (TracingUnboundedSender(key.clone(), s), TracingUnboundedReceiver(key,r)) + (TracingUnboundedSender(key, s), TracingUnboundedReceiver(key, r)) } impl TracingUnboundedSender { @@ -94,8 +101,8 @@ mod inner { /// Proxy function to mpsc::UnboundedSender pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { - self.1.unbounded_send(msg).map(|s|{ - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"send"]).inc(); + self.1.unbounded_send(msg).map(|s| { + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, "send"]).inc(); s }) } @@ -107,25 +114,23 @@ mod inner { } impl TracingUnboundedReceiver { - fn consume(&mut self) { // consume all items, make sure to reflect the updated count let mut count = 0; loop { if self.1.is_terminated() { - break; + break } match self.try_next() { Ok(Some(..)) => count += 1, - _ => break + _ => break, } } // and discount the messages if count > 0 { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"dropped"]).inc_by(count); + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, "dropped"]).inc_by(count); } - } /// Proxy function to mpsc::UnboundedReceiver @@ -140,7 +145,7 @@ mod inner { pub fn try_next(&mut self) -> Result, TryRecvError> { self.1.try_next().map(|s| { if s.is_some() { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"received"]).inc(); + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, "received"]).inc(); } s }) @@ -158,21 +163,16 @@ mod inner { impl Stream for TracingUnboundedReceiver { type Item = T; - fn poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let s = self.get_mut(); match Pin::new(&mut s.1).poll_next(cx) { Poll::Ready(msg) => { if msg.is_some() { UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[s.0, "received"]).inc(); - } + } Poll::Ready(msg) - } - Poll::Pending => { - Poll::Pending - } + }, + Poll::Pending => Poll::Pending, } } } @@ -186,24 +186,15 @@ mod inner { impl Sink for TracingUnboundedSender { type Error = SendError; - fn poll_ready( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { TracingUnboundedSender::poll_ready(&*self, cx) } - fn start_send( - mut self: Pin<&mut Self>, - msg: T, - ) -> Result<(), Self::Error> { + fn start_send(mut self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { TracingUnboundedSender::start_send(&mut *self, msg) } - fn poll_flush( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } @@ -219,33 +210,23 @@ mod inner { impl Sink for &TracingUnboundedSender { type Error = SendError; - fn poll_ready( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { TracingUnboundedSender::poll_ready(*self, cx) } fn start_send(self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { - self.unbounded_send(msg) - .map_err(TrySendError::into_send_error) + self.unbounded_send(msg).map_err(TrySendError::into_send_error) } - fn poll_flush( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn poll_close( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { self.close_channel(); Poll::Ready(Ok(())) } } } -pub use inner::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +pub use inner::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; diff --git a/primitives/utils/src/status_sinks.rs b/client/utils/src/status_sinks.rs similarity index 86% rename from primitives/utils/src/status_sinks.rs rename to client/utils/src/status_sinks.rs index 65a560af4eaa5..a87f0e0ad6e8f 100644 --- a/primitives/utils/src/status_sinks.rs +++ b/client/utils/src/status_sinks.rs @@ -1,23 +1,29 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. -// Substrate is distributed in the hope that it will be useful, +// This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// along with this program. If not, see . use crate::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use futures::{prelude::*, lock::Mutex}; +use futures::{lock::Mutex, prelude::*}; use futures_timer::Delay; -use std::{pin::Pin, task::{Poll, Context}, time::Duration}; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; /// Holds a list of `UnboundedSender`s, each associated with a certain time period. Every time the /// period elapses, we push an element on the sender. @@ -43,16 +49,19 @@ struct YieldAfter { sender: Option>, } +impl Default for StatusSinks { + fn default() -> Self { + Self::new() + } +} + impl StatusSinks { /// Builds a new empty collection. pub fn new() -> StatusSinks { let (entries_tx, entries_rx) = tracing_unbounded("status-sinks-entries"); StatusSinks { - inner: Mutex::new(Inner { - entries: stream::FuturesUnordered::new(), - entries_rx, - }), + inner: Mutex::new(Inner { entries: stream::FuturesUnordered::new(), entries_rx }), entries_tx, } } @@ -78,8 +87,8 @@ impl StatusSinks { let inner = &mut *inner; loop { - // Future that produces the next ready entry in `entries`, or doesn't produce anything if - // the list is empty. + // Future that produces the next ready entry in `entries`, or doesn't produce anything + // if the list is empty. let next_ready_entry = { let entries = &mut inner.entries; async move { @@ -93,7 +102,7 @@ impl StatusSinks { } }; - futures::select!{ + futures::select! { new_entry = inner.entries_rx.next() => { if let Some(new_entry) = new_entry { inner.entries.push(new_entry); @@ -142,7 +151,7 @@ impl<'a, T> Drop for ReadySinkEvent<'a, T> { fn drop(&mut self) { if let Some(sender) = self.sender.take() { if sender.is_closed() { - return; + return } let _ = self.sinks.entries_tx.unbounded_send(YieldAfter { @@ -163,18 +172,20 @@ impl futures::Future for YieldAfter { match Pin::new(&mut this.delay).poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(()) => { - let sender = this.sender.take() + let sender = this + .sender + .take() .expect("sender is always Some unless the future is finished; qed"); Poll::Ready((sender, this.interval)) - } + }, } } } #[cfg(test)] mod tests { - use crate::mpsc::tracing_unbounded; use super::StatusSinks; + use crate::mpsc::tracing_unbounded; use futures::prelude::*; use std::time::Duration; @@ -201,7 +212,7 @@ mod tests { Box::pin(async { let items: Vec = rx.take(3).collect().await; assert_eq!(items, [6, 7, 8]); - }) + }), )); } } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 1582eee5d9265..c867a245739ff 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -6,6 +6,159 @@ The format is based on [Keep a Changelog]. ## Unreleased +## 2.0.1-> 3.0.0 - Apollo 14 + +Most notably, this is the first release of the new FRAME (2.0) with its new macro-syntax and some changes in types, and pallet versioning. This release also incorporates the faster and improve version 2.0 of the parity-scale-codec and upgraded dependencies all-around. While the `FinalityTracker` pallet has been dropped, this release marks the first public appearance of a few new pallets, too;Bounties, Lottery, Tips (extracted from the `Treasury`-pallet, see #7536) and Merkle-Mountain-Ranges (MMR). + +On the client side, the most notable changes are around the keystore, making it async and switching to a different signing model allowing for remote-signing to be implemented; and various changes to improve networking and light-client support, like adding the Grandpa warp sync request-response protocol (#7711). + +_Contracts_: Please note that the contracts pallet _is not part_ of this release. The pallet is not yet ready and will be released separately in the coming weeks. The currently released contracts pallet _is not compatible_ with the new FRAME, thus if you need the contracts pallet, we recommend you wait with the upgrade until it has been released, too. +### Upgrade instructions + +Not too much has changed on the top and API level for developing Substrate between 2.0 and 3.0. The easiest and quickest path for upgrading is just to take the latest node-template and try applying your changes to it: +1. take a diff between 2.0 and your changes +2. store that diff +3. remove everything, copy over the 3.0 node-template +4. try re-applying your diff, manually, a hunk at a time. + +If that doesn't work for you, we are working on an in-depth-guide for all major changes that took place and how you need to adapt your code for it. [You can find the upgrade guide under `docs/` in the repo](https://github.com/paritytech/substrate/blob/master/docs/Upgrading-2.0-to-3.0.md), if you have further questions or problem, please [feel free to ask in the github discussion board](https://github.com/paritytech/substrate/discussions). + + +Runtime +------- + +* contracts: Charge rent for code storage (#7935) +* contracts: Emit event on contract termination (#8014) +* Fix elections-phragmen and proxy issue (#7040) +* Allow validators to block and kick their nominator set. (#7930) +* Decouple Staking and Election - Part1: Support traits (#7908) +* Introduces account existence providers reference counting (#7363) +* contracts: Cap the surcharge reward by the amount of rent that way payed by a contract (#7870) +* Use checked math when calculating storage size (#7885) +* Fix clear prefix check to avoid erasing child trie roots. (#7848) +* contracts: Collect rent for the first block during deployment (#7847) +* contracts: Add configurable per-storage item cost (#7819) +* babe: expose next epoch data (#7829) +* fix : remove `_{ }` syntax from benchmark macro (#7822) +* Define ss58 prefix inside the runtime (#7810) +* Allow council to slash treasury tip (#7753) +* Don't allow self proxies (#7803) +* add a `current_epoch` to BabeApi (#7789) +* Add `pallet` attribute macro to declare pallets (#6877) +* Make it possible to calculate the storage root as often as you want (#7714) +* Issue 7143 | Refactor Treasury Pallet into Bounties, Tips, and Proposals (#7536) +* Participating in Council Governance is Free for First Time Voters and Successful Closing (#7661) +* Streamline frame_system weight parametrization (#6629) +* Features needed for reserve-backed stablecoins (#7152) +* `sudo_as` should return a result (#7620) +* More Extensible Multiaddress Format (#7380) +* Fix `on_runtime_upgrade` weight recording (#7480) +* Implement batch_all and update Utility pallet for weight refunds (#7188) +* Fix wrong outgoing calculation in election (#7384) +* Implements pallet versioning (#7208) +* Runtime worker threads (#7089) +* Allow `schedule_after(0, ...)` to work (#7284) +* Fix offchain election to respect the weight (#7215) +* Fix weight for inner call with new origin (#7196) +* Move proxies migration (#7205) +* Introduce `cancel_proposal` to rid us of those pesky proposals (#7111) + +Client +------ + +* Remove backwards-compatibility networking hack (#8068) +* Extend SS58 network identifiers (#8039) +* Update dependencies ahead of next release (#8015) +* Storage chains: serve transactions over IPFS/bitswap (#7963) +* Add a send_request function to NetworkService (#8008) +* Rename system_networkState to system_unstable_networkState (#8001) +* Allow transaction for offchain indexing (#7290) +* Grandpa warp sync request-response protocol (#7711) +* Add explicit limits to notifications sizes and adjust yamux buffer size (#7925) +* Rework priority groups, take 2 (#7700) +* Define ss58 prefix inside the runtime (#7810) +* Expand remote keystore interface to allow for hybrid mode (#7628) +* Allow capping the amount of work performed when deleting a child trie (#7671) +* RPC to allow setting the log filter (#7474) +* Remove sc_network::NetworkService::register_notifications_protocol and partially refactor Grandpa tests (#7646) +* minor fix and improvements on localkeystore (#7626) +* contracts: Add `salt` argument to contract instantiation (#7482) +* contracts: Rework contracts_call RPC (#7468) +* Make sure to use the optimized method instead of reading the storage. (#7445) +* WASM Local-blob override (#7317) +* client/network: Allow configuring Kademlia's disjoint query paths (#7356) +* client/network: Remove option to disable yamux flow control (#7358) +* Make `queryStorage` and `storagePairs` unsafe RPC functions (#7342) +* No longer actively open legacy substreams (#7076) +* Make `run_node_until_exit` take a future (#7318) +* Add an system_syncState RPC method (#7315) +* Async keystore + Authority-Discovery async/await (#7000) +* Fixes logging of target names with dashes (#7281) +* Refactor CurrencyToVote (#6896) +* client/network: Stop sending noise legacy handshake (#7211) + +API +--- + +* pallet macro: easier syntax for `#[pallet::pallet]` with `struct Pallet(_)` (#8091) +* WasmExecutor takes a cache directory (#8057) +* Remove PalletInfo impl for () (#8090) +* Migrate assets pallet to new macros (#7984) +* contracts: Make ChainExtension trait generic over the runtime (#8003) +* Decouple the session validators from im-online (#7127) +* Update parity-scale-codec to 2.0 (#7994) +* Merkle Mountain Range pallet improvements (#7891) +* Cleaner GRANDPA RPC API for proving finality (#7339) +* Migrate frame-system to pallet attribute macro (#7898) +* Introduces account existence providers reference counting (#7363) +* contracts: Lazy storage removal (#7740) +* contracts: Allow runtime authors to define a chain extension (#7548) +* Define ss58 prefix inside the runtime (#7810) +* Add `pallet` attribute macro to declare pallets (#6877) +* Add keccak-512 to host functions. (#7531) +* Merkle Mountain Range pallet (#7312) +* Allow capping the amount of work performed when deleting a child trie (#7671) +* add an upgrade_keys method for pallet-session (#7688) +* Streamline frame_system weight parametrization (#6629) +* Rename pallet trait `Trait` to `Config` (#7599) +* contracts: Add `salt` argument to contract instantiation (#7482) +* pallet-evm: move to Frontier (Part IV) (#7573) +* refactor subtrait/elevated trait as not needed (#7497) +* Allow BabeConsensusDataProvider fork existing chain (#7078) +* decouple transaction payment and currency (#6912) +* contracts: Refactor the runtime API in order to simplify node integration (#7409) +* client/authority-discovery: Remove sentry node logic (#7368) +* client/network: Make NetworkService::set_priority_group async (#7352) +* *: Bump async-std to v1.6.5 (#7306) +* babe: make secondary slot randomness available on-chain (#7053) +* allow where clause in decl_error (#7324) +* reschedule (#6860) +* SystemOrigin trait (#7226) +* permit setting treasury pallet initial funding through genesis (#7214) + +Runtime Migrations +------------------ + +* Migrate assets pallet to new macros (#7984) +* Fix elections-phragmen and proxy issue (#7040) +* Allow validators to block and kick their nominator set. (#7930) +* Migrate frame-system to pallet attribute macro (#7898) +* Implements pallet versioning (#7208) +* Move proxies migration (#7205) + + +## 2.0.0-> 2.0.1 + +Patch release with backports to fix broken nightly builds. +Namely contains backports of + +* [#7381: Make Substrate compile with latest nightly](https://github.com/paritytech/substrate/pull/7381) +* [#7238: Fix compilation with environmental on latest nightly](https://github.com/paritytech/substrate/pull/7238) +* [#7395: Make benchmarks compile with latest nightly](https://github.com/paritytech/substrate/pull/7395) +* [#7838: Fix incorrect use of syn::exports](https://github.com/paritytech/substrate/pull/7838) (partially) +* [#7854: Update to futures 0.3.9](https://github.com/paritytech/substrate/pull/7854) + + ## 2.0.0-rc6 -> 2.0.0 – two dot 😮 Runtime @@ -62,7 +215,7 @@ Runtime Migrations Runtime ------- -* Custom Codec Implenetation for NPoS Election (#6720) +* Custom Codec Implementation for NPoS Election (#6720) * Successful `note_imminent_preimage` is free (#6793) * pallet-democracy use of weightinfo (#6783) * Update Balances Pallet to use `WeightInfo` (#6610) @@ -123,7 +276,7 @@ Runtime Client ------ -* Update wasmtime to (almost) lastest master (#6662) +* Update wasmtime to (almost) latest master (#6662) * Update to latest sysinfo prevents leaking fd-handlers (#6708) * Tracing values (#6679) * Graceful shutdown for the task manager (#6654) @@ -156,7 +309,7 @@ Runtime * `pallet-scheduler`: Check that `when` is not in the past (#6480) * Fix `sp-api` handling of multiple arguments (#6484) * Fix issues with `Operational` transactions validity and prioritization. (#6435) -* pallet-atomic-swap: generialized swap action (#6421) +* pallet-atomic-swap: generalized swap action (#6421) * Avoid multisig reentrancy (#6445) * Root origin use no filter by default. Scheduler and Democracy dispatch without asserting BaseCallFilter (#6408) * Scale and increase validator count (#6417) @@ -181,7 +334,7 @@ Client * Remove penalty on duplicate Status message (#6377) * Fix the broken weight multiplier update function (#6334) * client/authority-discovery: Don't add own address to priority group (#6370) -* Split the service initialisation up into seperate functions (#6332) +* Split the service initialisation up into separate functions (#6332) * Fix transaction pool event sending (#6341) * Add a [prefix]_process_start_time_seconds metric (#6315) * new crate sc-light (#6235) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index d9342de399503..42d25a0a228f7 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -18,17 +18,15 @@ # are more recognizable on GitHub, you can use them for mentioning unlike an email. # - The latest matching rule, if multiple, takes precedence. -# Block production -/client/basic-authorship/ @NikVolf +# CI +/.maintain/ @paritytech/ci +/.github/ @paritytech/ci +/.gitlab-ci.yml @paritytech/ci # Sandboxing capability of Substrate Runtime /primitives/sr-sandbox/ @pepyakin /primitives/core/src/sandbox.rs @pepyakin -# Transaction pool -/client/transaction-pool/ @NikVolf -/primitives/transaction-pool/ @NikVolf - # Offchain /client/offchain/ @tomusdrw /primitives/offchain/ @tomusdrw @@ -43,10 +41,7 @@ /primitives/consensus/pow/ @sorpaas # Contracts -/frame/contracts/ @pepyakin - -# EVM -/frame/evm/ @sorpaas +/frame/contracts/ @athei # NPoS and election /frame/staking/ @kianenigma @@ -59,10 +54,3 @@ # Transaction weight stuff /frame/support/src/weights.rs @shawntabrizi - -# Authority discovery -/client/authority-discovery/ @mxinden -/frame/authority-discovery/ @mxinden - -# Prometheus endpoint -/utils/prometheus/ @mxinden diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc index 491e24aeaec85..b0eaec04455e4 100644 --- a/docs/CONTRIBUTING.adoc +++ b/docs/CONTRIBUTING.adoc @@ -10,18 +10,18 @@ Individuals making significant and valuable contributions are given commit-acces There are a few basic ground-rules for contributors (including the maintainer(s) of the project): -. **No `--force` pushes** or modifying the master branch history in any way. If you need to rebase, ensure you do it in your own repo. +. **No `--force` pushes** or modifying the master branch history in any way. If you need to rebase, ensure you do it in your own repo. No rewriting of the history after the code has been shared (e.g. through a Pull-Request). . **Non-master branches**, prefixed with a short name moniker (e.g. `gav-my-feature`) must be used for ongoing work. . **All modifications** must be made in a **pull-request** to solicit feedback from other contributors. . A pull-request *must not be merged until CI* has finished successfully. -. Contributors should adhere to the ./STYLE_GUIDE.md[house coding style]. +. Contributors should adhere to the link:STYLE_GUIDE.md[house coding style]. == Merge Process *In General* -A PR needs to be reviewed and approved by project maintainers unless: +A Pull Request (PR) needs to be reviewed and approved by project maintainers unless: - it does not alter any logic (e.g. comments, dependencies, docs), then it may be tagged https://github.com/paritytech/substrate/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+label%3AA2-insubstantial[`insubstantial`] and merged by its author once CI is complete. - it is an urgent fix with no large change to logic, then it may be merged after a non-author contributor has approved the review once CI is complete. @@ -35,12 +35,21 @@ A PR needs to be reviewed and approved by project maintainers unless: *Process:* -. Please tag each PR with exactly one `A`, `B` and `C` label at the minimum. +. Please tag each PR with exactly one `A`, `B`, `C` and `D` label at the minimum. . Once a PR is ready for review please add the https://github.com/paritytech/substrate/pulls?q=is%3Apr+is%3Aopen+label%3AA0-pleasereview[`A0-pleasereview`] label. Generally PRs should sit with this label for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. . If the first review is not an approval, swap `A0-pleasereview` to any label `[A3, A7]` to indicate that the PR has received some feedback, but needs further work. For example. https://github.com/paritytech/substrate/labels/A3-inprogress[`A3-inprogress`] is a general indicator that the PR is work in progress and https://github.com/paritytech/substrate/labels/A4-gotissues[`A4-gotissues`] means that it has significant problems that need fixing. Once the work is done, change the label back to `A0-pleasereview`. You might end up swapping a few times back and forth to climb up the A label group. Once a PR is https://github.com/paritytech/substrate/labels/A8-mergeoncegreen[`A8-mergeoncegreen`], it is ready to merge. -. PRs must be tagged with respect to _release notes_ with https://github.com/paritytech/substrate/labels/B0-silent[`B0-silent`] and `B1-..`. The former indicates that no changes should be mentioned in any release notes. The latter indicates that the changes should be reported in the corresponding release note -. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/D2-breaksapi[`D2-breaksapi`], when it changes the FRAME or consensus of running system with https://github.com/paritytech/substrate/labels/B3-breaksconsensus[`B3-breaksconsensus`]. -. PRs should be labeled with their release importance via the `C1-C9`. +. PRs must be tagged with their release notes requirements via the `B1-B9` labels. +. PRs must be tagged with their release importance via the `C1-C9` labels. +. PRs must be tagged with their audit requirements via the `D1-D9` labels. +. PRs that must be backported to a stable branch must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E0-patchthis`]. +. PRs that introduce runtime migrations must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E1-runtimemigration`]. +. PRs that introduce irreversible database migrations must be tagged with https://github.com/paritytech/substrate/labels/E2-databasemigration[`E2-databasemigration`]. +. PRs that add host functions must be tagged with with https://github.com/paritytech/substrate/labels/E4-newhostfunctions[`E4-newhostfunctions`]. +. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/E5-breaksapi[`E5-breaksapi`]. +. PRs that materially change the FRAME/runtime semantics must be tagged with https://github.com/paritytech/substrate/labels/E6-transactionversion[`E6-transactionversion`]. +. PRs that change the mechanism for block authoring in a backwards-incompatible way must be tagged with https://github.com/paritytech/substrate/labels/E7-breaksauthoring[`E7-breaksauthoring`]. +. PRs that "break everything" must be tagged with https://github.com/paritytech/substrate/labels/E8-breakseverything[`E8-breakseverything`]. +. PRs that block a new release must be tagged with https://github.com/paritytech/substrate/labels/E9-blocker%20%E2%9B%94%EF%B8%8F[`E9-blocker`]. . PRs should be categorized into projects. . No PR should be merged until all reviews' comments are addressed and CI is successful. @@ -67,12 +76,20 @@ When reviewing a pull request, the end-goal is to suggest useful changes to the To create a Polkadot companion PR: . Pull latest Polkadot master (or clone it, if you haven't yet). -. Override your local cargo config to point to your local substrate (pointing to your WIP branch): place `paths = ["path/to/substrate"]` in `~/.cargo/config`. +. Override substrate deps to point to your local path or branch using https://github.com/bkchr/diener. (E.g. from the polkadot clone dir run `diener patch --crates-to-patch ../substrate --substrate` assuming substrate clone is in a sibling dir. If you do use diener, ensure that you _do not_ commit the changes diener makes to the Cargo.tomls.) . Make the changes required and build polkadot locally. -. Submit all this as a PR against the Polkadot Repo. Link to your Polkadot PR in the _description_ of your Substrate PR as "polkadot companion: [URL]" +. Submit all this as a PR against the Polkadot Repo. +. Link to your Polkadot PR in the _description_ of your _Substrate_ PR as "polkadot companion: [URL]" . Now you should see that the `check_polkadot` CI job will build your Substrate PR agains the mentioned Polkadot branch in your PR description. -. Wait for reviews on both -. Once both PRs have been green lit, they can both be merged 🍻. +. Someone will need to approve the Polkadot PR before the Substrate CI will go green. (The Polkadot CI failing can be ignored as long as the polkadot job in the _substrate_ PR is green). +. Wait for reviews on both the Substrate and the Polkadot PRs. +. Once the Substrate PR runs green, a member of the `parity` github group can comment on the Substrate PR with `bot merge` which will: + - Merge the Substrate PR. + - The bot will push a commit to the Polkadot PR updating its Substrate reference. + - If the polkadot PR origins from a fork then a project member may need to press `approve run` on the polkadot PR. + - The bot will merge the Polkadot PR once all its CI `{"build_allow_failure":false}` checks are green. + + Note: The merge-bot currently doesn't work with forks on org accounts, only individual accounts. If your PR is reviewed well, but a Polkadot PR is missing, signal it with https://github.com/paritytech/substrate/labels/A7-needspolkadotpr[`A7-needspolkadotpr`] to prevent it from getting automatically merged. diff --git a/docs/PULL_REQUEST_TEMPLATE.md b/docs/PULL_REQUEST_TEMPLATE.md index 8ca6ba9b01fe2..77f5f79f60d40 100644 --- a/docs/PULL_REQUEST_TEMPLATE.md +++ b/docs/PULL_REQUEST_TEMPLATE.md @@ -14,7 +14,7 @@ Before you submitting, please check that: - [ ] Github's project assignment - [ ] You mentioned a related issue if this PR related to it, e.g. `Fixes #228` or `Related #1337`. - [ ] You asked any particular reviewers to review. If you aren't sure, start with GH suggestions. -- [ ] Your PR adheres to [the style guide](https://wiki.parity.io/Substrate-Style-Guide) +- [ ] Your PR adheres to [the style guide](https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md) - In particular, mind the maximal line length of 100 (120 in exceptional circumstances). - There is no commented code checked in unless necessary. - Any panickers have a proof or removed. diff --git a/docs/README.adoc b/docs/README.adoc index 7f3d50faac7d6..71052420b1aa9 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -308,28 +308,6 @@ cargo run --release \-- \ Additional Substrate CLI usage options are available and may be shown by running `cargo run \-- --help`. -=== WASM binaries - -The WASM binaries are built during the normal `cargo build` process. To control the WASM binary building, -we support multiple environment variables: - -* `SKIP_WASM_BUILD` - Skips building any WASM binary. This is useful when only native should be recompiled. -* `BUILD_DUMMY_WASM_BINARY` - Builds dummy WASM binaries. These dummy binaries are empty and useful - for `cargo check` runs. -* `WASM_BUILD_TYPE` - Sets the build type for building WASM binaries. Supported values are `release` or `debug`. - By default the build type is equal to the build type used by the main build. -* `FORCE_WASM_BUILD` - Can be set to force a WASM build. On subsequent calls the value of the variable - needs to change. As WASM builder instructs `cargo` to watch for file changes - this environment variable should only be required in certain circumstances. -* `WASM_TARGET_DIRECTORY` - Will copy release build WASM binary to the given directory. The path needs - to be absolute. -* `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. -* `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. - -Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. -Where `PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `node-runtime` will -be `NODE_RUNTIME`. - [[flaming-fir]] === Joining the Flaming Fir Testnet diff --git a/docs/SECURITY.md b/docs/SECURITY.md index 7240218fa8729..19f5b145feb5e 100644 --- a/docs/SECURITY.md +++ b/docs/SECURITY.md @@ -1,3 +1,4 @@ + # Security Policy Parity Technologies is committed to resolving security vulnerabilities in our software quickly and carefully. We take the necessary steps to minimize risk, provide timely information, and deliver vulnerability fixes and mitigations required to address security issues. diff --git a/docs/STYLE_GUIDE.md b/docs/STYLE_GUIDE.md index e6f217f2b4859..ea070cdbc59f3 100644 --- a/docs/STYLE_GUIDE.md +++ b/docs/STYLE_GUIDE.md @@ -2,6 +2,9 @@ title: Style Guide for Rust in Substrate --- +Where possible these styles are enforced by settings in `rustfmt.toml` so if you run `cargo fmt` +then you will adhere to most of these style guidelines automatically. + # Formatting - Indent using tabs. diff --git a/docs/Structure.adoc b/docs/Structure.adoc index c8cd63506a347..6c810a83c51b9 100644 --- a/docs/Structure.adoc +++ b/docs/Structure.adoc @@ -33,7 +33,7 @@ In the lowest level, Substrate defines primitives, interfaces and traits to impl === Client * _found in_: `/client` -* _crates prefix_: `substrate-` +* _crates prefix_: `sc-` * _constraints_: ** crates may not (dev-)depend on any `frame-`-crates diff --git a/docs/Upgrading-2.0-to-3.0.md b/docs/Upgrading-2.0-to-3.0.md new file mode 100644 index 0000000000000..45da3811220f4 --- /dev/null +++ b/docs/Upgrading-2.0-to-3.0.md @@ -0,0 +1,1057 @@ +# Upgrading from Substrate 2.0 to 3.0 + +An incomplete guide. + +## Refreshing the node-template + +Not much has changed on the top and API level for developing Substrate between 2.0 and 3.0. If you've made only small changes to the node-template, we recommend to do the following - it is easiest and quickest path forward: +1. take a diff between 2.0 and your changes +2. store that diff +3. remove everything, copy over the 3.0 node-template +4. try re-applying your diff, manually, a hunk at a time. + +## In-Depth guide on the changes + +If you've made significant changes or diverted from the node-template a lot, starting out with that is probably not helping. For that case, we'll take a look at all changes between 2.0 and 3.0 to the fully-implemented node and explain them one by one, so you can follow up, what needs to be changing for your node. + +_Note_: Of course, step 1 is to upgrade your `Cargo.toml`'s to use the latest version of Substrate and all dependencies. + +We'll be taking the diff from 2.0.1 to 3.0.0 on `bin/node` as the baseline of what has changed between these two versions in terms of adapting ones code base. We will not be covering the changes made on the tests and bench-marking as they are mostly reactions to the other changes. + +### Versions upgrade + +First and foremost you have to upgrade the version pf the dependencies of course, that's `0.8.x -> 0.9.0` and `2.0.x -> 3.0.0` for all `sc-`, `sp-`, `frame-`, and `pallet-` coming from Parity. Further more this release also upgraded its own dependencies, most notably, we are now using `parity-scale-codec 2.0`, `parking_lot 0.11` and `substrate-wasm-builder 3.0.0` (as build dependency). All other dependency upgrades should resolve automatically or are just internal. However you might see some error that another dependency/type you have as a dependency and one of our upgraded crates don't match up, if so please check the version of said dependency - we've probably upgraded it. + +### WASM-Builder + +The new version of wasm-builder has gotten a bit smarter and a lot faster (you should definitely switch). Once you've upgraded the dependency, in most cases you just have to remove the now obsolete `with_wasm_builder_from_crates_or_path`-function and you are good to go: + +```diff: rust +--- a/bin/node/runtime/build.rs ++++ b/bin/node/runtime/build.rs +@@ -15,12 +15,11 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-use wasm_builder_runner::WasmBuilder; ++use substrate_wasm_builder::WasmBuilder; + + fn main() { + WasmBuilder::new() + .with_current_project() +- .with_wasm_builder_from_crates_or_path("2.0.0", "../../../utils/wasm-builder") + .export_heap_base() + .import_memory() + .build() +``` + +### Runtime + +#### FRAME 2.0 + +The new FRAME 2.0 macros are a lot nicer to use and easier to read. While we were on that change though, we also cleaned up some mainly internal names and traits. The old `macro`'s still work and also produce the new structure, however, when plugging all that together as a Runtime, there's some things we have to adapt now: + +##### `::Trait for Runtime` becomes `::Config for Runtime` + +The most visible and significant change is that the macros no longer generate the `$pallet::Trait` but now a much more aptly named `$pallet::Config`. Thus, we need to rename all `::Trait for Runtime` into`::Config for Runtime`, e.g. for the `sudo` pallet we must do: + +```diff +-impl pallet_sudo::Trait for Runtime { ++impl pallet_sudo::Config for Runtime { +``` + +The same goes for all `` and alike, which simply becomes ``. + +#### SS58 Prefix is now a runtime param + + +Since [#7810](https://github.com/paritytech/substrate/pull/7810) we don't define the ss58 prefix in the chainspec anymore but moved it into the runtime. Namely, `frame_system` now needs a new `SS58Prefix`, which in substrate node we have defined for ourselves as: `pub const SS58Prefix: u8 = 42;`. Use your own chain-specific value there. + +#### Weight Definition + +`type WeightInfo` has changed and instead on `weights::pallet_$name::WeightInfo` is now bound to the Runtime as `pallet_$name::weights::SubstrateWeight`. As a result we have to the change the type definitions everywhere in our Runtime accordingly: + +```diff +- type WeightInfo = weights::pallet_$name::WeightInfo; ++ type WeightInfo = pallet_$name::weights::SubstrateWeight; +``` + +e.g. +```diff +- type WeightInfo = weights::pallet_collective::WeightInfo; ++ type WeightInfo = pallet_collective::weights::SubstrateWeight; +``` +and + +```diff +- type WeightInfo = weights::pallet_proxy::WeightInfo; ++ type WeightInfo = pallet_proxy::weights::SubstrateWeight; +``` + +And update the overall definition for weights on frame and a few related types and runtime parameters: + +```diff= + +-const AVERAGE_ON_INITIALIZE_WEIGHT: Perbill = Perbill::from_percent(10); ++/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. ++/// This is used to limit the maximal weight of a single extrinsic. ++const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); ++/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used ++/// by Operational extrinsics. ++const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); ++/// We allow for 2 seconds of compute with a 6 second average block time. ++const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; ++ + parameter_types! { + pub const BlockHashCount: BlockNumber = 2400; +- /// We allow for 2 seconds of compute with a 6 second average block time. +- pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; +- pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); +- /// Assume 10% of weight for average on_initialize calls. +- pub MaximumExtrinsicWeight: Weight = +- AvailableBlockRatio::get().saturating_sub(AVERAGE_ON_INITIALIZE_WEIGHT) +- * MaximumBlockWeight::get(); +- pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; + pub const Version: RuntimeVersion = VERSION; +-} +- +-const_assert!(AvailableBlockRatio::get().deconstruct() >= AVERAGE_ON_INITIALIZE_WEIGHT.deconstruct()); +- +-impl frame_system::Trait for Runtime { ++ pub RuntimeBlockLength: BlockLength = ++ BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); ++ pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() ++ .base_block(BlockExecutionWeight::get()) ++ .for_class(DispatchClass::all(), |weights| { ++ weights.base_extrinsic = ExtrinsicBaseWeight::get(); ++ }) ++ .for_class(DispatchClass::Normal, |weights| { ++ weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); ++ }) ++ .for_class(DispatchClass::Operational, |weights| { ++ weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); ++ // Operational transactions have some extra reserved space, so that they ++ // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. ++ weights.reserved = Some( ++ MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT ++ ); ++ }) ++ .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) ++ .build_or_panic(); ++} ++ ++const_assert!(NORMAL_DISPATCH_RATIO.deconstruct() >= AVERAGE_ON_INITIALIZE_RATIO.deconstruct()); ++ ++impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::AllowAll; ++ type BlockWeights = RuntimeBlockWeights; ++ type BlockLength = RuntimeBlockLength; ++ type DbWeight = RocksDbWeight; + type Origin = Origin; + type Call = Call; + type Index = Index; +@@ -171,25 +198,19 @@ impl frame_system::Trait for Runtime { + type Header = generic::Header; + type Event = Event; + type BlockHashCount = BlockHashCount; +- type MaximumBlockWeight = MaximumBlockWeight; +- type DbWeight = RocksDbWeight; +- type BlockExecutionWeight = BlockExecutionWeight; +- type ExtrinsicBaseWeight = ExtrinsicBaseWeight; +- type MaximumExtrinsicWeight = MaximumExtrinsicWeight; +- type MaximumBlockLength = MaximumBlockLength; +- type AvailableBlockRatio = AvailableBlockRatio; + type Version = Version; + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); +- type SystemWeightInfo = weights::frame_system::WeightInfo; ++ type SystemWeightInfo = frame_system::weights::SubstrateWeight; +``` + +#### Pallets: + +##### Assets + +The assets pallet has seen a variety of changes: +- [Features needed for reserve-backed stablecoins #7152 ](https://github.com/paritytech/substrate/pull/7152) +- [Freeze Assets and Asset Metadata #7346 ](https://github.com/paritytech/substrate/pull/7346) +- [Introduces account existence providers reference counting #7363 ]((https://github.com/paritytech/substrate/pull/7363)) + +have all altered the feature set and changed the concepts. However, it has some of the best documentation and explains the current state very well. If you are using the assets pallet and need to upgrade from an earlier version, we recommend you use the current docs to guide your way! + +##### Contracts + +As noted in the changelog, the `contracts`-pallet is still undergoing massive changes and is not yet part of this release. We are expecting for it to be released a few weeks after. If your chain is dependent on this pallet, we recommend to wait until it has been released as the currently released version is not compatible with FRAME 2.0. + +#### (changes) Treasury + +As mentioned above, Bounties, Tips and Lottery have been extracted out of treasury into their own pallets - removing these options here. Secondly we must now specify the `BurnDestination` and `SpendFunds`, which now go the `Bounties`. + +```diff +- type Tippers = Elections; +- type TipCountdown = TipCountdown; +- type TipFindersFee = TipFindersFee; +- type TipReportDepositBase = TipReportDepositBase; +- type DataDepositPerByte = DataDepositPerByte; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; ++ type BurnDestination = (); ++ type SpendFunds = Bounties; +``` + +Factoring out Bounties and Tips means most of these definitions have now moved there, while the parameter types can be left as they were: + +###### 🆕 Bounties + +```rust= +impl pallet_bounties::Config for Runtime { + type Event = Event; + type BountyDepositBase = BountyDepositBase; + type BountyDepositPayoutDelay = BountyDepositPayoutDelay; + type BountyUpdatePeriod = BountyUpdatePeriod; + type BountyCuratorDeposit = BountyCuratorDeposit; + type BountyValueMinimum = BountyValueMinimum; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type WeightInfo = pallet_bounties::weights::SubstrateWeight; + } +``` + +###### 🆕 Tips + +```rust= +impl pallet_tips::Config for Runtime { + type Event = Event; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type Tippers = Elections; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type WeightInfo = pallet_tips::weights::SubstrateWeight; + } +``` + +#### `FinalityTracker` removed + +Finality Tracker has been removed in favor of a different approach to handle the issue in GRANDPA, [see #7228 for details](https://github.com/paritytech/substrate/pull/7228). With latest GRANDPA this is not needed anymore and can be removed without worry. + +#### (changes) Elections Phragmen + +The pallet has been moved to a new system in which the exact amount of deposit for each voter, candidate, member, or runner-up is now deposited on-chain. Moreover, the concept of a `defunct_voter` is removed, since votes now have adequate deposit associated with them. A number of configuration parameters has changed to reflect this, as shown below: + +```diff= + parameter_types! { + pub const CandidacyBond: Balance = 10 * DOLLARS; +- pub const VotingBond: Balance = 1 * DOLLARS; ++ // 1 storage item created, key size is 32 bytes, value size is 16+16. ++ pub const VotingBondBase: Balance = deposit(1, 64); ++ // additional data per vote is 32 bytes (account id). ++ pub const VotingBondFactor: Balance = deposit(0, 32); + pub const TermDuration: BlockNumber = 7 * DAYS; + pub const DesiredMembers: u32 = 13; + pub const DesiredRunnersUp: u32 = 7; + +@@ -559,16 +600,16 @@ impl pallet_elections_phragmen::Trait for Runtime { + // NOTE: this implies that council's genesis members cannot be set directly and must come from + // this module. + type InitializeMembers = Council; +- type CurrencyToVote = CurrencyToVoteHandler; ++ type CurrencyToVote = U128CurrencyToVote; + type CandidacyBond = CandidacyBond; +- type VotingBond = VotingBond; ++ type VotingBondBase = VotingBondBase; ++ type VotingBondFactor = VotingBondFactor; + type LoserCandidate = (); +- type BadReport = (); + type KickedMember = (); + type DesiredMembers = DesiredMembers; + type DesiredRunnersUp = DesiredRunnersUp; + type TermDuration = TermDuration; + ``` + + **This upgrade requires storage [migration](https://github.com/paritytech/substrate/blob/master/frame/elections-phragmen/src/migrations_3_0_0.rs)**. Further details can be found in the [pallet-specific changelog](https://github.com/paritytech/substrate/blob/master/frame/elections-phragmen/CHANGELOG.md#security). + +#### (changes) Democracy + +Democracy brings three new settings with this release, all to allow for better influx- and spam-control. Namely these allow to specify the maximum number of proposals at a time, who can blacklist and who can cancel proposals. This diff acts as a good starting point: + +```diff= +@@ -508,6 +537,14 @@ impl pallet_democracy::Trait for Runtime { + type FastTrackVotingPeriod = FastTrackVotingPeriod; + // To cancel a proposal which has been passed, 2/3 of the council must agree to it. + type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; ++ // To cancel a proposal before it has been passed, the technical committee must be unanimous or ++ // Root must agree. ++ type CancelProposalOrigin = EnsureOneOf< ++ AccountId, ++ EnsureRoot, ++ pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>, ++ >; ++ type BlacklistOrigin = EnsureRoot; + // Any single technical committee member may veto a coming council proposal, however they can + // only do it once and it lasts only for the cooloff period. + type VetoOrigin = pallet_collective::EnsureMember; +@@ -518,7 +555,8 @@ impl pallet_democracy::Trait for Runtime { + type Scheduler = Scheduler; + type PalletsOrigin = OriginCaller; + type MaxVotes = MaxVotes; ++ type MaxProposals = MaxProposals; + } +``` + +---- + +### Primitives + +The shared primitives define the API between Client and Runtime. Usually, you don't have to touch nor directly interact with them, unless you created your own client or frame-less runtime. Therefore we'd expect you to understand whether you are effected by changes and how to update your code yourself. + +---- + +### Client + +#### CLI + +A few minor things have changed in the `cli` (compared to 2.0.1): + +1. we've [replaced the newly added `BuildSyncSpec` subcommand with an RPC API](https://github.com/paritytech/substrate/commit/65cc9af9b8df8d36928f6144ee7474cefbd70454#diff-c57da6fbeff8c46ce15f55ea42fedaa5a4684d79578006ce4af01ae04fd6b8f8) in an on-going effort to make light-client-support smoother, see below +2. we've [removed double accounts from our chainspec-builder](https://github.com/paritytech/substrate/commit/31499cd29ed30df932fb71b7459796f7160d0272) +3. we [don't fallback to `--chain flaming-fir` anymore](https://github.com/paritytech/substrate/commit/13cdf1c8cd2ee62d411f82b64dc7eba860c9c6c6), if no chain is given our substrate-node will error. +4. [the `subkey`-integration has seen a fix to the `insert`-command](https://github.com/paritytech/substrate/commit/54bde60cfd2c544c54e9e8623b6b8725b99557f8) that requires you to now add the `&cli` as a param. + ```diff= + --- a/bin/node/cli/src/command.rs + +++ b/bin/node/cli/src/command.rs + @@ -92,7 +97,7 @@ pub fn run() -> Result<()> { + You can enable it with `--features runtime-benchmarks`.".into()) + } + } + - Some(Subcommand::Key(cmd)) => cmd.run(), + + Some(Subcommand::Key(cmd)) => cmd.run(&cli), + Some(Subcommand::Sign(cmd)) => cmd.run(), + Some(Subcommand::Verify(cmd)) => cmd.run(), + Some(Subcommand::Vanity(cmd)) => cmd.run(), + ``` + + +#### Service Builder Upgrades + +##### Light client support + +As said, we've added a new optional RPC service for improved light client support. For that to work, we need to pass the `chain_spec` and give access to the `AuxStore` to our `rpc`: + + +```diff= + +--- a/bin/node/rpc/src/lib.rs ++++ b/bin/node/rpc/src/lib.rs +@@ -49,6 +49,7 @@ use sp_consensus::SelectChain; + use sp_consensus_babe::BabeApi; + use sc_rpc::SubscriptionTaskExecutor; + use sp_transaction_pool::TransactionPool; ++use sc_client_api::AuxStore; + + /// Light client extra dependencies. + pub struct LightDeps { +@@ -94,6 +95,8 @@ pub struct FullDeps { + pub pool: Arc

, + /// The SelectChain Strategy + pub select_chain: SC, ++ /// A copy of the chain spec. ++ pub chain_spec: Box, + /// Whether to deny unsafe calls + pub deny_unsafe: DenyUnsafe, + /// BABE specific dependencies. +@@ -109,9 +112,8 @@ pub type IoHandler = jsonrpc_core::IoHandler; + pub fn create_full( + deps: FullDeps, + ) -> jsonrpc_core::IoHandler where +- C: ProvideRuntimeApi, +- C: HeaderBackend + HeaderMetadata + 'static, +- C: Send + Sync + 'static, ++ C: ProvideRuntimeApi + HeaderBackend + AuxStore + ++ HeaderMetadata + Sync + Send + 'static, + C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: pallet_contracts_rpc::ContractsRuntimeApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, +@@ -131,6 +133,7 @@ pub fn create_full( + client, + pool, + select_chain, ++ chain_spec, + deny_unsafe, + babe, + grandpa, +@@ -164,8 +167,8 @@ pub fn create_full( + io.extend_with( + sc_consensus_babe_rpc::BabeApi::to_delegate( + BabeRpcHandler::new( +- client, +- shared_epoch_changes, ++ client.clone(), ++ shared_epoch_changes.clone(), + keystore, + babe_config, + select_chain, +@@ -176,7 +179,7 @@ pub fn create_full( + io.extend_with( + sc_finality_grandpa_rpc::GrandpaApi::to_delegate( + GrandpaRpcHandler::new( +- shared_authority_set, ++ shared_authority_set.clone(), + shared_voter_state, + justification_stream, + subscription_executor, + +``` + +and add the new service: + +```diff= +--- a/bin/node/rpc/src/lib.rs ++++ b/bin/node/rpc/src/lib.rs +@@ -185,6 +188,18 @@ pub fn create_full( + ) + ); + ++ io.extend_with( ++ sc_sync_state_rpc::SyncStateRpcApi::to_delegate( ++ sc_sync_state_rpc::SyncStateRpcHandler::new( ++ chain_spec, ++ client, ++ shared_authority_set, ++ shared_epoch_changes, ++ deny_unsafe, ++ ) ++ ) ++ ); ++ + io + } +``` + +##### Telemetry + +The telemetry subsystem has seen a few fixes and refactorings to allow for a more flexible handling, in particular in regards to parachains. Most notably `sc_service::spawn_tasks` now returns the `telemetry_connection_notifier` as the second member of the tuple, (`let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(`), which should be passed to `telemetry_on_connect` of `new_full_base` now: `telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()),` (see the service-section below for a full diff). + +##### Async & Remote Keystore support + +In order to allow for remote-keystores, the keystore-subsystem has been reworked to support async operations and generally refactored to not provide the keys itself but only sign on request. This allows for remote-keystore to never hand out keys and thus to operate any substrate-based node in a manner without ever having the private keys in the local system memory. + +There are some operations, however, that the keystore must be local for performance reasons and for which a remote keystore won't work (in particular around parachains). As such, the keystore has both a slot for remote but also always a local instance, where some operations hard bind to the local variant, while most subsystems just ask the generic keystore which prefers a remote signer if given. To reflect this change, `sc_service::new_full_parts` now returns a `KeystoreContainer` rather than the keystore, and the other subsystems (e.g. `sc_service::PartialComponents`) expect to be given that. + +###### on RPC: + +This has most visible changes for the rpc, where we are switching from the previous `KeyStorePtr` to the new `SyncCryptoStorePtr`: + +```diff + +--- a/bin/node/rpc/src/lib.rs ++++ b/bin/node/rpc/src/lib.rs +@@ -32,6 +32,7 @@ + + use std::sync::Arc; + ++use sp_keystore::SyncCryptoStorePtr; + use node_primitives::{Block, BlockNumber, AccountId, Index, Balance, Hash}; + use sc_consensus_babe::{Config, Epoch}; + use sc_consensus_babe_rpc::BabeRpcHandler; +@@ -40,7 +41,6 @@ use sc_finality_grandpa::{ + SharedVoterState, SharedAuthoritySet, FinalityProofProvider, GrandpaJustificationStream + }; + use sc_finality_grandpa_rpc::GrandpaRpcHandler; +-use sc_keystore::KeyStorePtr; + pub use sc_rpc_api::DenyUnsafe; + use sp_api::ProvideRuntimeApi; + use sp_block_builder::BlockBuilder; + pub struct LightDeps { +@@ -69,7 +70,7 @@ pub struct BabeDeps { + /// BABE pending epoch changes. + pub shared_epoch_changes: SharedEpochChanges, + /// The keystore that manages the keys of the node. +- pub keystore: KeyStorePtr, ++ pub keystore: SyncCryptoStorePtr, + } + +``` + +##### GRANDPA + +As already in the changelog, a few things significant things have changed in regards to GRANDPA: the finality tracker has been replaced, an RPC command has been added and WARP-sync-support for faster light client startup has been implemented. All this means we have to do a few changes to our GRANDPA setup procedures in the client. + +First and foremost, grandpa internalised a few aspects, and thus `new_partial` doesn't expect a tuple but only the `grandpa::SharedVoterState` as input now, and unpacking that again later is not needed anymore either. On the opposite side `grandpa::FinalityProofProvider::new_for_service` now requires the `Some(shared_authority_set)` to be passed as a new third parameter. This set also becomes relevant when adding warp-sync-support, which is added as an extra-protocol-layer to the networking as: +```diff= + ++ config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); ++ ++ #[cfg(feature = "cli")] ++ config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain( ++ &config, task_manager.spawn_handle(), backend.clone(), ++ )); +``` + +As these changes pull through the entirety of `cli/src/service.rs`, we recommend looking at the final diff below for guidance. + +##### In a nutshell + +Altogether this accumulates to the following diff for `node/cli/src/service.rs`. If you want these features and have modified your chain you should probably try to apply these patches: + + +```diff= +--- a/bin/node/cli/src/service.rs ++++ b/bin/node/cli/src/service.rs +@@ -22,11 +22,10 @@ + + use std::sync::Arc; + use sc_consensus_babe; +-use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; + use node_primitives::Block; + use node_runtime::RuntimeApi; + use sc_service::{ +- config::{Role, Configuration}, error::{Error as ServiceError}, ++ config::{Configuration}, error::{Error as ServiceError}, + RpcHandlers, TaskManager, + }; + use sp_inherents::InherentDataProviders; +@@ -34,8 +33,8 @@ use sc_network::{Event, NetworkService}; + use sp_runtime::traits::Block as BlockT; + use futures::prelude::*; + use sc_client_api::{ExecutorProvider, RemoteBackend}; +-use sp_core::traits::BareCryptoStorePtr; + use node_executor::Executor; ++use sc_telemetry::TelemetryConnectionNotifier; + + type FullClient = sc_service::TFullClient; + type FullBackend = sc_service::TFullBackend; +@@ -58,13 +57,10 @@ pub fn new_partial(config: &Configuration) -> Result, + sc_consensus_babe::BabeLink, + ), +- ( +- grandpa::SharedVoterState, +- Arc>, +- ), ++ grandpa::SharedVoterState, + ) + >, ServiceError> { +- let (client, backend, keystore, task_manager) = ++ let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::(&config)?; + let client = Arc::new(client); + +@@ -94,7 +90,6 @@ pub fn new_partial(config: &Configuration) -> Result Result Result Result, + &sc_consensus_babe::BabeLink, + ) + ) -> Result { + let sc_service::PartialComponents { +- client, backend, mut task_manager, import_queue, keystore, select_chain, transaction_pool, ++ client, ++ backend, ++ mut task_manager, ++ import_queue, ++ keystore_container, ++ select_chain, ++ transaction_pool, + inherent_data_providers, + other: (rpc_extensions_builder, import_setup, rpc_setup), + } = new_partial(&config)?; + +- let (shared_voter_state, finality_proof_provider) = rpc_setup; ++ let shared_voter_state = rpc_setup; ++ ++ config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); ++ ++ #[cfg(feature = "cli")] ++ config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain( ++ &config, task_manager.spawn_handle(), backend.clone(), ++ )); + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { +@@ -191,8 +209,6 @@ pub fn new_full_base( + import_queue, + on_demand: None, + block_announce_validator_builder: None, +- finality_proof_request_builder: None, +- finality_proof_provider: Some(finality_proof_provider.clone()), + })?; + + if config.offchain_worker.enabled { +@@ -203,26 +219,28 @@ pub fn new_full_base( + + let role = config.role.clone(); + let force_authoring = config.force_authoring; ++ let backoff_authoring_blocks = ++ Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default()); + let name = config.network.node_name.clone(); + let enable_grandpa = !config.disable_grandpa; + let prometheus_registry = config.prometheus_registry().cloned(); +- let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); + +- sc_service::spawn_tasks(sc_service::SpawnTasksParams { +- config, +- backend: backend.clone(), +- client: client.clone(), +- keystore: keystore.clone(), +- network: network.clone(), +- rpc_extensions_builder: Box::new(rpc_extensions_builder), +- transaction_pool: transaction_pool.clone(), +- task_manager: &mut task_manager, +- on_demand: None, +- remote_blockchain: None, +- telemetry_connection_sinks: telemetry_connection_sinks.clone(), +- network_status_sinks: network_status_sinks.clone(), +- system_rpc_tx, +- })?; ++ let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks( ++ sc_service::SpawnTasksParams { ++ config, ++ backend: backend.clone(), ++ client: client.clone(), ++ keystore: keystore_container.sync_keystore(), ++ network: network.clone(), ++ rpc_extensions_builder: Box::new(rpc_extensions_builder), ++ transaction_pool: transaction_pool.clone(), ++ task_manager: &mut task_manager, ++ on_demand: None, ++ remote_blockchain: None, ++ network_status_sinks: network_status_sinks.clone(), ++ system_rpc_tx, ++ }, ++ )?; + + let (block_import, grandpa_link, babe_link) = import_setup; + +@@ -230,6 +248,7 @@ pub fn new_full_base( + + if let sc_service::config::Role::Authority { .. } = &role { + let proposer = sc_basic_authorship::ProposerFactory::new( ++ task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), +@@ -239,7 +258,7 @@ pub fn new_full_base( + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let babe_config = sc_consensus_babe::BabeParams { +- keystore: keystore.clone(), ++ keystore: keystore_container.sync_keystore(), + client: client.clone(), + select_chain, + env: proposer, +@@ -247,6 +266,7 @@ pub fn new_full_base( + sync_oracle: network.clone(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring, ++ backoff_authoring_blocks, + babe_link, + can_author_with, + }; +@@ -256,42 +276,30 @@ pub fn new_full_base( + } + + // Spawn authority discovery module. +- if matches!(role, Role::Authority{..} | Role::Sentry {..}) { +- let (sentries, authority_discovery_role) = match role { +- sc_service::config::Role::Authority { ref sentry_nodes } => ( +- sentry_nodes.clone(), +- sc_authority_discovery::Role::Authority ( +- keystore.clone(), +- ), +- ), +- sc_service::config::Role::Sentry {..} => ( +- vec![], +- sc_authority_discovery::Role::Sentry, +- ), +- _ => unreachable!("Due to outer matches! constraint; qed.") +- }; +- ++ if role.is_authority() { ++ let authority_discovery_role = sc_authority_discovery::Role::PublishAndDiscover( ++ keystore_container.keystore(), ++ ); + let dht_event_stream = network.event_stream("authority-discovery") + .filter_map(|e| async move { match e { + Event::Dht(e) => Some(e), + _ => None, +- }}).boxed(); ++ }}); + let (authority_discovery_worker, _service) = sc_authority_discovery::new_worker_and_service( + client.clone(), + network.clone(), +- sentries, +- dht_event_stream, ++ Box::pin(dht_event_stream), + authority_discovery_role, + prometheus_registry.clone(), + ); + +- task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker); ++ task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker.run()); + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = if role.is_authority() { +- Some(keystore as BareCryptoStorePtr) ++ Some(keystore_container.sync_keystore()) + } else { + None + }; +@@ -317,8 +325,7 @@ pub fn new_full_base( + config, + link: grandpa_link, + network: network.clone(), +- inherent_data_providers: inherent_data_providers.clone(), +- telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), ++ telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), + voting_rule: grandpa::VotingRulesBuilder::default().build(), + prometheus_registry, + shared_voter_state, +@@ -330,17 +337,15 @@ pub fn new_full_base( + "grandpa-voter", + grandpa::run_grandpa_voter(grandpa_config)? + ); +- } else { +- grandpa::setup_disabled_grandpa( +- client.clone(), +- &inherent_data_providers, +- network.clone(), +- )?; + } + + network_starter.start_network(); + Ok(NewFullBase { +- task_manager, inherent_data_providers, client, network, network_status_sinks, ++ task_manager, ++ inherent_data_providers, ++ client, ++ network, ++ network_status_sinks, + transaction_pool, + }) + } +@@ -353,14 +358,16 @@ pub fn new_full(config: Configuration) + }) + } + +-pub fn new_light_base(config: Configuration) -> Result<( +- TaskManager, RpcHandlers, Arc, ++pub fn new_light_base(mut config: Configuration) -> Result<( ++ TaskManager, RpcHandlers, Option, Arc, + Arc::Hash>>, + Arc>> + ), ServiceError> { +- let (client, backend, keystore, mut task_manager, on_demand) = ++ let (client, backend, keystore_container, mut task_manager, on_demand) = + sc_service::new_light_parts::(&config)?; + ++ config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); ++ + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( +@@ -371,14 +378,12 @@ pub fn new_light_base(config: Configuration) -> Result<( + on_demand.clone(), + )); + +- let grandpa_block_import = grandpa::light_block_import( +- client.clone(), backend.clone(), &(client.clone() as Arc<_>), +- Arc::new(on_demand.checker().clone()), ++ let (grandpa_block_import, _) = grandpa::block_import( ++ client.clone(), ++ &(client.clone() as Arc<_>), ++ select_chain.clone(), + )?; +- +- let finality_proof_import = grandpa_block_import.clone(); +- let finality_proof_request_builder = +- finality_proof_import.create_finality_proof_request_builder(); ++ let justification_import = grandpa_block_import.clone(); + + let (babe_block_import, babe_link) = sc_consensus_babe::block_import( + sc_consensus_babe::Config::get_or_compute(&*client)?, +@@ -391,8 +396,7 @@ pub fn new_light_base(config: Configuration) -> Result<( + let import_queue = sc_consensus_babe::import_queue( + babe_link, + babe_block_import, +- None, +- Some(Box::new(finality_proof_import)), ++ Some(Box::new(justification_import)), + client.clone(), + select_chain.clone(), + inherent_data_providers.clone(), +@@ -401,9 +405,6 @@ pub fn new_light_base(config: Configuration) -> Result<( + sp_consensus::NeverCanAuthor, + )?; + +- let finality_proof_provider = +- GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); +- + let (network, network_status_sinks, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, +@@ -413,8 +414,6 @@ pub fn new_light_base(config: Configuration) -> Result<( + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, +- finality_proof_request_builder: Some(finality_proof_request_builder), +- finality_proof_provider: Some(finality_proof_provider), + })?; + network_starter.start_network(); + +@@ -433,32 +432,39 @@ pub fn new_light_base(config: Configuration) -> Result<( + + let rpc_extensions = node_rpc::create_light(light_deps); + +- let rpc_handlers = ++ let (rpc_handlers, telemetry_connection_notifier) = + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + on_demand: Some(on_demand), + remote_blockchain: Some(backend.remote_blockchain()), + rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), + client: client.clone(), + transaction_pool: transaction_pool.clone(), +- config, keystore, backend, network_status_sinks, system_rpc_tx, ++ keystore: keystore_container.sync_keystore(), ++ config, backend, network_status_sinks, system_rpc_tx, + network: network.clone(), +- telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), + task_manager: &mut task_manager, + })?; + +- Ok((task_manager, rpc_handlers, client, network, transaction_pool)) ++ Ok(( ++ task_manager, ++ rpc_handlers, ++ telemetry_connection_notifier, ++ client, ++ network, ++ transaction_pool, ++ )) + } + + /// Builds a new service for a light client. + pub fn new_light(config: Configuration) -> Result { +- new_light_base(config).map(|(task_manager, _, _, _, _)| { ++ new_light_base(config).map(|(task_manager, _, _, _, _, _)| { + task_manager + }) + } + + #[cfg(test)] + mod tests { +- use std::{sync::Arc, borrow::Cow, any::Any}; ++ use std::{sync::Arc, borrow::Cow, any::Any, convert::TryInto}; + use sc_consensus_babe::{CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY}; + use sc_consensus_epochs::descendent_query; + use sp_consensus::{ +@@ -469,20 +475,25 @@ mod tests { + use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; + use node_runtime::constants::{currency::CENTS, time::SLOT_DURATION}; + use codec::Encode; +- use sp_core::{crypto::Pair as CryptoPair, H256}; ++ use sp_core::{ ++ crypto::Pair as CryptoPair, ++ H256, ++ Public ++ }; ++ use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; + use sp_runtime::{ + generic::{BlockId, Era, Digest, SignedPayload}, + traits::{Block as BlockT, Header as HeaderT}, + traits::Verify, + }; + use sp_timestamp; +- use sp_finality_tracker; + use sp_keyring::AccountKeyring; + use sc_service_test::TestNetNode; + use crate::service::{new_full_base, new_light_base, NewFullBase}; +- use sp_runtime::traits::IdentifyAccount; ++ use sp_runtime::{key_types::BABE, traits::IdentifyAccount, RuntimeAppPublic}; + use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; + use sc_client_api::BlockBackend; ++ use sc_keystore::LocalKeystore; + + type AccountPublic = ::Signer; + +@@ -492,15 +503,15 @@ mod tests { + #[ignore] + fn test_sync() { + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); +- let keystore = sc_keystore::Store::open(keystore_path.path(), None) +- .expect("Creates keystore"); +- let alice = keystore.write().insert_ephemeral_from_seed::("//Alice") +- .expect("Creates authority pair"); ++ let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) ++ .expect("Creates keystore")); ++ let alice: sp_consensus_babe::AuthorityId = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) ++ .expect("Creates authority pair").into(); + + let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); + + // For the block factory +- let mut slot_num = 1u64; ++ let mut slot = 1u64; + + // For the extrinsics factory + let bob = Arc::new(AccountKeyring::Bob.pair()); +@@ -528,14 +539,13 @@ mod tests { + Ok((node, (inherent_data_providers, setup_handles.unwrap()))) + }, + |config| { +- let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; ++ let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?; + Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + }, + |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { + let mut inherent_data = inherent_data_providers + .create_inherent_data() + .expect("Creates inherent data."); +- inherent_data.replace_data(sp_finality_tracker::INHERENT_IDENTIFIER, &1u64); + + let parent_id = BlockId::number(service.client().chain_info().best_number); + let parent_header = service.client().header(&parent_id).unwrap().unwrap(); +@@ -552,6 +562,7 @@ mod tests { + ); + + let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( ++ service.spawn_handle(), + service.client(), + service.transaction_pool(), + None, +@@ -561,7 +572,7 @@ mod tests { + descendent_query(&*service.client()), + &parent_hash, + parent_number, +- slot_num, ++ slot.into(), + ).unwrap().unwrap(); + + let mut digest = Digest::::default(); +@@ -569,18 +580,18 @@ mod tests { + // even though there's only one authority some slots might be empty, + // so we must keep trying the next slots until we can claim one. + let babe_pre_digest = loop { +- inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); ++ inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot * SLOT_DURATION)); + if let Some(babe_pre_digest) = sc_consensus_babe::test_helpers::claim_slot( +- slot_num, ++ slot.into(), + &parent_header, + &*service.client(), +- &keystore, ++ keystore.clone(), + &babe_link, + ) { + break babe_pre_digest; + } + +- slot_num += 1; ++ slot += 1; + }; + + digest.push(::babe_pre_digest(babe_pre_digest)); +@@ -600,11 +611,18 @@ mod tests { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let to_sign = pre_hash.encode(); +- let signature = alice.sign(&to_sign[..]); ++ let signature = SyncCryptoStore::sign_with( ++ &*keystore, ++ sp_consensus_babe::AuthorityId::ID, ++ &alice.to_public_crypto_pair(), ++ &to_sign, ++ ).unwrap() ++ .try_into() ++ .unwrap(); + let item = ::babe_seal( +- signature.into(), ++ signature, + ); +- slot_num += 1; ++ slot += 1; + + let mut params = BlockImportParams::new(BlockOrigin::File, new_header); + params.post_digests.push(item); +@@ -679,7 +697,7 @@ mod tests { + Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) + }, + |config| { +- let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; ++ let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?; + Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + }, + vec![ +``` diff --git a/docs/license_header.txt b/docs/license_header.txt deleted file mode 100644 index f9c1daa1ad1c1..0000000000000 --- a/docs/license_header.txt +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . diff --git a/docs/node-template-release.md b/docs/node-template-release.md new file mode 100644 index 0000000000000..25834ae99f438 --- /dev/null +++ b/docs/node-template-release.md @@ -0,0 +1,78 @@ +# Substrate Node Template Release Process + +1. This release process has to be run in a github checkout Substrate directory with your work +committed into `https://github.com/paritytech/substrate/`, because the build script will check +the existence of your current git commit ID in the remote repository. + + Assume you are in root directory of Substrate. Run: + + ```bash + cd .maintain/ + ./node-template-release.sh + ``` + +2. Expand the output tar gzipped file and replace files in current Substrate Node Template +by running the following command. + + ```bash + # This is where the tar.gz file uncompressed + cd substrate-node-template + # rsync with force copying. Note the slash at the destination directory is important + rsync -avh * / + # For dry-running add `-n` argument + # rsync -avhn * / + ``` + + The above command only copies existing files from the source to the destination, but does not + delete files/directories that are removed from the source. So you need to manually check and + remove them in the destination. + +3. There are actually three packages in the Node Template, `node-template` (the node), +`node-template-runtime` (the runtime), and `pallet-template`, and each has its own `Cargo.toml`. +Inside these three files, dependencies are listed in expanded form and linked to a certain git +commit in Substrate remote repository, such as: + + ```toml + [dev-dependencies.sp-core] + default-features = false + git = 'https://github.com/paritytech/substrate.git' + rev = 'c1fe59d060600a10eebb4ace277af1fee20bad17' + version = '3.0.0' + ``` + + We will update each of them to the shortened form and link them to the Rust + [crate registry](https://crates.io/). After confirming the versioned package is published in + the crate, the above will become: + + ```toml + [dev-dependencies] + sp-core = { version = '3.0.0', default-features = false } + ``` + + P.S: This step can be automated if we update `node-template-release` package in + `.maintain/node-template-release`. + +4. Once the three `Cargo.toml`s are updated, compile and confirm that the Node Template builds. Then +commit the changes to a new branch in [Substrate Node Template](https://github.com/substrate-developer-hub/substrate-node-template), and make a PR. + + > Note that there is a chance the code in Substrate Node Template works with the linked Substrate git + commit but not with published packages due to the latest (as yet) unpublished features. In this case, + rollback that section of the Node Template to its previous version to ensure the Node Template builds. + +5. Once the PR is merged, tag the merged commit in master branch with the version number +`vX.Y.Z+A` (e.g. `v3.0.0+1`). The `X`(major), `Y`(minor), and `Z`(patch) version number should +follow Substrate release version. The last digit is any significant fixes made in the Substrate +Node Template apart from Substrate. When the Substrate version is updated, this digit is reset to 0. + +## Troubleshooting + +- Running the script `./node-template-release.sh `, after all tests passed + successfully, seeing the following error message: + + ``` + thread 'main' panicked at 'Creates output file: Os { code: 2, kind: NotFound, message: "No such file or directory" }', src/main.rs:250:10 +note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace + ``` + + This is likely due to that your output path is not a valid `tar.gz` filename or you don't have write + permission to the destination. Try with a simple output path such as `~/node-tpl.tar.gz`. diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index d1742e567cfac..05e7912dd07c6 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-assets" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,26 +13,37 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. -frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-std = { version = "2.0.0", path = "../../primitives/std" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-std = { version = "4.0.0-dev", path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", + "sp-std/std", "sp-runtime/std", "frame-support/std", "frame-system/std", + "frame-benchmarking/std", ] +runtime-benchmarks = [ + "frame-benchmarking", + "sp-runtime/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/assets/README.md b/frame/assets/README.md index 6b3fe21e52775..a99b60fa33d56 100644 --- a/frame/assets/README.md +++ b/frame/assets/README.md @@ -11,9 +11,9 @@ with a fixed supply, including: * Asset Transfer * Asset Destruction -To use it in your runtime, you need to implement the assets [`Trait`](https://docs.rs/pallet-assets/latest/pallet_assets/trait.Trait.html). +To use it in your runtime, you need to implement the assets [`assets::Config`](https://docs.rs/pallet-assets/latest/pallet_assets/pallet/trait.Config.html). -The supported dispatchable functions are documented in the [`Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum. +The supported dispatchable functions are documented in the [`assets::Call`](https://docs.rs/pallet-assets/latest/pallet_assets/pallet/enum.Call.html) enum. ### Terminology @@ -51,7 +51,7 @@ Please refer to the [`Call`](https://docs.rs/pallet-assets/latest/pallet_assets/ * `balance` - Get the asset `id` balance of `who`. * `total_supply` - Get the total supply of an asset `id`. -Please refer to the [`Module`](https://docs.rs/pallet-assets/latest/pallet_assets/struct.Module.html) struct for details on publicly available functions. +Please refer to the [`Pallet`](https://docs.rs/pallet-assets/latest/pallet_assets/pallet/struct.Pallet.html) struct for details on publicly available functions. ## Usage @@ -71,11 +71,12 @@ Import the Assets module and types and derive your runtime's configuration trait use pallet_assets as assets; use frame_support::{decl_module, dispatch, ensure}; use frame_system::ensure_signed; +use sp_runtime::ArithmeticError; -pub trait Trait: assets::Trait { } +pub trait Config: assets::Config { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { pub fn issue_token_airdrop(origin) -> dispatch::DispatchResult { let sender = ensure_signed(origin).map_err(|e| e.as_str())?; @@ -84,7 +85,7 @@ decl_module! { const COUNT_AIRDROP_RECIPIENTS: u64 = 2; const TOKENS_FIXED_SUPPLY: u64 = 100; - ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), "Divide by zero error."); + ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), ArithmeticError::DivisionByZero); let asset_id = Self::next_asset_id(); @@ -106,11 +107,11 @@ Below are assumptions that must be held when using this module. If any of them are violated, the behavior of this module is undefined. * The total count of assets should be less than - `Trait::AssetId::max_value()`. + `Config::AssetId::max_value()`. ## Related Modules * [`System`](https://docs.rs/frame-system/latest/frame_system/) * [`Support`](https://docs.rs/frame-support/latest/frame_support/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs new file mode 100644 index 0000000000000..43eadffbe8497 --- /dev/null +++ b/frame/assets/src/benchmarking.rs @@ -0,0 +1,443 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Assets pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_benchmarking::{ + account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist_account, + whitelisted_caller, +}; +use frame_support::{ + dispatch::UnfilteredDispatchable, + traits::{EnsureOrigin, Get}, +}; +use frame_system::RawOrigin as SystemOrigin; +use sp_runtime::traits::Bounded; +use sp_std::prelude::*; + +use crate::Pallet as Assets; + +const SEED: u32 = 0; + +fn create_default_asset, I: 'static>( + is_sufficient: bool, +) -> (T::AccountId, ::Source) { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let root = SystemOrigin::Root.into(); + assert!(Assets::::force_create( + root, + Default::default(), + caller_lookup.clone(), + is_sufficient, + 1u32.into(), + ) + .is_ok()); + (caller, caller_lookup) +} + +fn create_default_minted_asset, I: 'static>( + is_sufficient: bool, + amount: T::Balance, +) -> (T::AccountId, ::Source) { + let (caller, caller_lookup) = create_default_asset::(is_sufficient); + if !is_sufficient { + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); + } + assert!(Assets::::mint( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + caller_lookup.clone(), + amount, + ) + .is_ok()); + (caller, caller_lookup) +} + +fn swap_is_sufficient, I: 'static>(s: &mut bool) { + Asset::::mutate(&T::AssetId::default(), |maybe_a| { + if let Some(ref mut a) = maybe_a { + sp_std::mem::swap(s, &mut a.is_sufficient) + } + }); +} + +fn add_consumers, I: 'static>(minter: T::AccountId, n: u32) { + let origin = SystemOrigin::Signed(minter); + let mut s = false; + swap_is_sufficient::(&mut s); + for i in 0..n { + let target = account("consumer", i, SEED); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + let target_lookup = T::Lookup::unlookup(target); + assert!(Assets::::mint( + origin.clone().into(), + Default::default(), + target_lookup, + 100u32.into() + ) + .is_ok()); + } + swap_is_sufficient::(&mut s); +} + +fn add_sufficients, I: 'static>(minter: T::AccountId, n: u32) { + let origin = SystemOrigin::Signed(minter); + let mut s = true; + swap_is_sufficient::(&mut s); + for i in 0..n { + let target = account("sufficient", i, SEED); + let target_lookup = T::Lookup::unlookup(target); + assert!(Assets::::mint( + origin.clone().into(), + Default::default(), + target_lookup, + 100u32.into() + ) + .is_ok()); + } + swap_is_sufficient::(&mut s); +} + +fn add_approvals, I: 'static>(minter: T::AccountId, n: u32) { + T::Currency::deposit_creating(&minter, T::ApprovalDeposit::get() * n.into()); + let minter_lookup = T::Lookup::unlookup(minter.clone()); + let origin = SystemOrigin::Signed(minter); + Assets::::mint( + origin.clone().into(), + Default::default(), + minter_lookup, + (100 * (n + 1)).into(), + ) + .unwrap(); + for i in 0..n { + let target = account("approval", i, SEED); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + let target_lookup = T::Lookup::unlookup(target); + Assets::::approve_transfer( + origin.clone().into(), + Default::default(), + target_lookup, + 100u32.into(), + ) + .unwrap(); + } +} + +fn assert_last_event, I: 'static>(generic_event: >::Event) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +fn assert_event, I: 'static>(generic_event: >::Event) { + frame_system::Pallet::::assert_has_event(generic_event.into()); +} + +benchmarks_instance_pallet! { + create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, 1u32.into()) + verify { + assert_last_event::(Event::Created(Default::default(), caller.clone(), caller).into()); + } + + force_create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + }: _(SystemOrigin::Root, Default::default(), caller_lookup, true, 1u32.into()) + verify { + assert_last_event::(Event::ForceCreated(Default::default(), caller).into()); + } + + destroy { + let c in 0 .. 5_000; + let s in 0 .. 5_000; + let a in 0 .. 5_00; + let (caller, _) = create_default_asset::(true); + add_consumers::(caller.clone(), c); + add_sufficients::(caller.clone(), s); + add_approvals::(caller.clone(), a); + let witness = Asset::::get(T::AssetId::default()).unwrap().destroy_witness(); + }: _(SystemOrigin::Signed(caller), Default::default(), witness) + verify { + assert_last_event::(Event::Destroyed(Default::default()).into()); + } + + mint { + let (caller, caller_lookup) = create_default_asset::(true); + let amount = T::Balance::from(100u32); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) + verify { + assert_last_event::(Event::Issued(Default::default(), caller, amount).into()); + } + + burn { + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(true, amount); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) + verify { + assert_last_event::(Event::Burned(Default::default(), caller, amount).into()); + } + + transfer { + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(true, amount); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), target_lookup, amount) + verify { + assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); + } + + transfer_keep_alive { + let mint_amount = T::Balance::from(200u32); + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(true, mint_amount); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), target_lookup, amount) + verify { + assert!(frame_system::Pallet::::account_exists(&caller)); + assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); + } + + force_transfer { + let amount = T::Balance::from(100u32); + let (caller, caller_lookup) = create_default_minted_asset::(true, amount); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, target_lookup, amount) + verify { + assert_last_event::( + Event::Transferred(Default::default(), caller, target, amount).into() + ); + } + + freeze { + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) + verify { + assert_last_event::(Event::Frozen(Default::default(), caller).into()); + } + + thaw { + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + Assets::::freeze( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + caller_lookup.clone(), + )?; + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) + verify { + assert_last_event::(Event::Thawed(Default::default(), caller).into()); + } + + freeze_asset { + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default()) + verify { + assert_last_event::(Event::AssetFrozen(Default::default()).into()); + } + + thaw_asset { + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + Assets::::freeze_asset( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + )?; + }: _(SystemOrigin::Signed(caller.clone()), Default::default()) + verify { + assert_last_event::(Event::AssetThawed(Default::default()).into()); + } + + transfer_ownership { + let (caller, _) = create_default_asset::(true); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller), Default::default(), target_lookup) + verify { + assert_last_event::(Event::OwnerChanged(Default::default(), target).into()); + } + + set_team { + let (caller, _) = create_default_asset::(true); + let target0 = T::Lookup::unlookup(account("target", 0, SEED)); + let target1 = T::Lookup::unlookup(account("target", 1, SEED)); + let target2 = T::Lookup::unlookup(account("target", 2, SEED)); + }: _(SystemOrigin::Signed(caller), Default::default(), target0.clone(), target1.clone(), target2.clone()) + verify { + assert_last_event::(Event::TeamChanged( + Default::default(), + account("target", 0, SEED), + account("target", 1, SEED), + account("target", 2, SEED), + ).into()); + } + + set_metadata { + let n in 0 .. T::StringLimit::get(); + let s in 0 .. T::StringLimit::get(); + + let name = vec![0u8; n as usize]; + let symbol = vec![0u8; s as usize]; + let decimals = 12; + + let (caller, _) = create_default_asset::(true); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + }: _(SystemOrigin::Signed(caller), Default::default(), name.clone(), symbol.clone(), decimals) + verify { + let id = Default::default(); + assert_last_event::(Event::MetadataSet(id, name, symbol, decimals, false).into()); + } + + clear_metadata { + let (caller, _) = create_default_asset::(true); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let dummy = vec![0u8; T::StringLimit::get() as usize]; + let origin = SystemOrigin::Signed(caller.clone()).into(); + Assets::::set_metadata(origin, Default::default(), dummy.clone(), dummy, 12)?; + }: _(SystemOrigin::Signed(caller), Default::default()) + verify { + assert_last_event::(Event::MetadataCleared(Default::default()).into()); + } + + force_set_metadata { + let n in 0 .. T::StringLimit::get(); + let s in 0 .. T::StringLimit::get(); + + let name = vec![0u8; n as usize]; + let symbol = vec![0u8; s as usize]; + let decimals = 12; + + create_default_asset::(true); + + let origin = T::ForceOrigin::successful_origin(); + let call = Call::::force_set_metadata { + id: Default::default(), + name: name.clone(), + symbol: symbol.clone(), + decimals, + is_frozen: false, + }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + let id = Default::default(); + assert_last_event::(Event::MetadataSet(id, name, symbol, decimals, false).into()); + } + + force_clear_metadata { + let (caller, _) = create_default_asset::(true); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let dummy = vec![0u8; T::StringLimit::get() as usize]; + let origin = SystemOrigin::Signed(caller.clone()).into(); + Assets::::set_metadata(origin, Default::default(), dummy.clone(), dummy, 12)?; + + let origin = T::ForceOrigin::successful_origin(); + let call = Call::::force_clear_metadata { id: Default::default() }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_last_event::(Event::MetadataCleared(Default::default()).into()); + } + + force_asset_status { + let (caller, caller_lookup) = create_default_asset::(true); + + let origin = T::ForceOrigin::successful_origin(); + let call = Call::::force_asset_status { + id: Default::default(), + owner: caller_lookup.clone(), + issuer: caller_lookup.clone(), + admin: caller_lookup.clone(), + freezer: caller_lookup.clone(), + min_balance: 100u32.into(), + is_sufficient: true, + is_frozen: false, + }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_last_event::(Event::AssetStatusChanged(Default::default()).into()); + } + + approve_transfer { + let (caller, _) = create_default_minted_asset::(true, 100u32.into()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + + let id = Default::default(); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let amount = 100u32.into(); + }: _(SystemOrigin::Signed(caller.clone()), id, delegate_lookup, amount) + verify { + assert_last_event::(Event::ApprovedTransfer(id, caller, delegate, amount).into()); + } + + transfer_approved { + let (owner, owner_lookup) = create_default_minted_asset::(true, 100u32.into()); + T::Currency::make_free_balance_be(&owner, DepositBalanceOf::::max_value()); + + let id = Default::default(); + let delegate: T::AccountId = account("delegate", 0, SEED); + whitelist_account!(delegate); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let amount = 100u32.into(); + let origin = SystemOrigin::Signed(owner.clone()).into(); + Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; + + let dest: T::AccountId = account("dest", 0, SEED); + let dest_lookup = T::Lookup::unlookup(dest.clone()); + }: _(SystemOrigin::Signed(delegate.clone()), id, owner_lookup, dest_lookup, amount) + verify { + assert!(T::Currency::reserved_balance(&owner).is_zero()); + assert_event::(Event::Transferred(id, owner, dest, amount).into()); + } + + cancel_approval { + let (caller, _) = create_default_minted_asset::(true, 100u32.into()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + + let id = Default::default(); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let amount = 100u32.into(); + let origin = SystemOrigin::Signed(caller.clone()).into(); + Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; + }: _(SystemOrigin::Signed(caller.clone()), id, delegate_lookup) + verify { + assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); + } + + force_cancel_approval { + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + + let id = Default::default(); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let amount = 100u32.into(); + let origin = SystemOrigin::Signed(caller.clone()).into(); + Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; + }: _(SystemOrigin::Signed(caller.clone()), id, caller_lookup, delegate_lookup) + verify { + assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); + } +} + +impl_benchmark_test_suite!(Assets, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/assets/src/extra_mutator.rs b/frame/assets/src/extra_mutator.rs new file mode 100644 index 0000000000000..8c601b746346c --- /dev/null +++ b/frame/assets/src/extra_mutator.rs @@ -0,0 +1,105 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Datatype for easy mutation of the extra "sidecar" data. + +use super::*; + +/// A mutator type allowing inspection and possible modification of the extra "sidecar" data. +/// +/// This may be used as a `Deref` for the pallet's extra data. If mutated (using `DerefMut`), then +/// any uncommitted changes (see `commit` function) will be automatically committed to storage when +/// dropped. Changes, even after committed, may be reverted to their original values with the +/// `revert` function. +pub struct ExtraMutator, I: 'static = ()> { + id: T::AssetId, + who: T::AccountId, + original: T::Extra, + pending: Option, +} + +impl, I: 'static> Drop for ExtraMutator { + fn drop(&mut self) { + debug_assert!(self.commit().is_ok(), "attempt to write to non-existent asset account"); + } +} + +impl, I: 'static> sp_std::ops::Deref for ExtraMutator { + type Target = T::Extra; + fn deref(&self) -> &T::Extra { + match self.pending { + Some(ref value) => value, + None => &self.original, + } + } +} + +impl, I: 'static> sp_std::ops::DerefMut for ExtraMutator { + fn deref_mut(&mut self) -> &mut T::Extra { + if self.pending.is_none() { + self.pending = Some(self.original.clone()); + } + self.pending.as_mut().unwrap() + } +} + +impl, I: 'static> ExtraMutator { + pub(super) fn maybe_new( + id: T::AssetId, + who: impl sp_std::borrow::Borrow, + ) -> Option> { + if Account::::contains_key(id, who.borrow()) { + Some(ExtraMutator:: { + id, + who: who.borrow().clone(), + original: Account::::get(id, who.borrow()).extra, + pending: None, + }) + } else { + None + } + } + + /// Commit any changes to storage. + pub fn commit(&mut self) -> Result<(), ()> { + if let Some(extra) = self.pending.take() { + Account::::try_mutate_exists(self.id, self.who.borrow(), |maybe_account| { + if let Some(ref mut account) = maybe_account { + account.extra = extra; + Ok(()) + } else { + Err(()) + } + }) + } else { + Ok(()) + } + } + + /// Revert any changes, even those already committed by `self` and drop self. + pub fn revert(mut self) -> Result<(), ()> { + self.pending = None; + Account::::try_mutate_exists(self.id, self.who.borrow(), |maybe_account| { + if let Some(ref mut account) = maybe_account { + account.extra = self.original.clone(); + Ok(()) + } else { + Err(()) + } + }) + } +} diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs new file mode 100644 index 0000000000000..81b490eaf877c --- /dev/null +++ b/frame/assets/src/functions.rs @@ -0,0 +1,481 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Functions for the Assets pallet. + +use super::*; + +// The main implementation block for the module. +impl, I: 'static> Pallet { + // Public immutables + + /// Return the extra "sid-car" data for `id`/`who`, or `None` if the account doesn't exist. + pub fn adjust_extra( + id: T::AssetId, + who: impl sp_std::borrow::Borrow, + ) -> Option> { + ExtraMutator::maybe_new(id, who) + } + + /// Get the asset `id` balance of `who`. + pub fn balance(id: T::AssetId, who: impl sp_std::borrow::Borrow) -> T::Balance { + Account::::get(id, who.borrow()).balance + } + + /// Get the total supply of an asset `id`. + pub fn total_supply(id: T::AssetId) -> T::Balance { + Asset::::get(id).map(|x| x.supply).unwrap_or_else(Zero::zero) + } + + pub(super) fn new_account( + who: &T::AccountId, + d: &mut AssetDetails>, + ) -> Result { + let accounts = d.accounts.checked_add(1).ok_or(ArithmeticError::Overflow)?; + let is_sufficient = if d.is_sufficient { + frame_system::Pallet::::inc_sufficients(who); + d.sufficients += 1; + true + } else { + frame_system::Pallet::::inc_consumers(who).map_err(|_| Error::::NoProvider)?; + false + }; + d.accounts = accounts; + Ok(is_sufficient) + } + + pub(super) fn dead_account( + what: T::AssetId, + who: &T::AccountId, + d: &mut AssetDetails>, + sufficient: bool, + ) { + if sufficient { + d.sufficients = d.sufficients.saturating_sub(1); + frame_system::Pallet::::dec_sufficients(who); + } else { + frame_system::Pallet::::dec_consumers(who); + } + d.accounts = d.accounts.saturating_sub(1); + T::Freezer::died(what, who) + } + + pub(super) fn can_increase( + id: T::AssetId, + who: &T::AccountId, + amount: T::Balance, + ) -> DepositConsequence { + let details = match Asset::::get(id) { + Some(details) => details, + None => return DepositConsequence::UnknownAsset, + }; + if details.supply.checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + let account = Account::::get(id, who); + if account.balance.checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + if account.balance.is_zero() { + if amount < details.min_balance { + return DepositConsequence::BelowMinimum + } + if !details.is_sufficient && frame_system::Pallet::::providers(who) == 0 { + return DepositConsequence::CannotCreate + } + if details.is_sufficient && details.sufficients.checked_add(1).is_none() { + return DepositConsequence::Overflow + } + } + + DepositConsequence::Success + } + + /// Return the consequence of a withdraw. + pub(super) fn can_decrease( + id: T::AssetId, + who: &T::AccountId, + amount: T::Balance, + keep_alive: bool, + ) -> WithdrawConsequence { + use WithdrawConsequence::*; + let details = match Asset::::get(id) { + Some(details) => details, + None => return UnknownAsset, + }; + if details.supply.checked_sub(&amount).is_none() { + return Underflow + } + if details.is_frozen { + return Frozen + } + let account = Account::::get(id, who); + if account.is_frozen { + return Frozen + } + if let Some(rest) = account.balance.checked_sub(&amount) { + if let Some(frozen) = T::Freezer::frozen_balance(id, who) { + match frozen.checked_add(&details.min_balance) { + Some(required) if rest < required => return Frozen, + None => return Overflow, + _ => {}, + } + } + + let is_provider = false; + let is_required = is_provider && !frame_system::Pallet::::can_dec_provider(who); + let must_keep_alive = keep_alive || is_required; + + if rest < details.min_balance { + if must_keep_alive { + WouldDie + } else { + ReducedToZero(rest) + } + } else { + Success + } + } else { + NoFunds + } + } + + // Maximum `amount` that can be passed into `can_withdraw` to result in a `WithdrawConsequence` + // of `Success`. + pub(super) fn reducible_balance( + id: T::AssetId, + who: &T::AccountId, + keep_alive: bool, + ) -> Result { + let details = Asset::::get(id).ok_or_else(|| Error::::Unknown)?; + ensure!(!details.is_frozen, Error::::Frozen); + + let account = Account::::get(id, who); + ensure!(!account.is_frozen, Error::::Frozen); + + let amount = if let Some(frozen) = T::Freezer::frozen_balance(id, who) { + // Frozen balance: account CANNOT be deleted + let required = + frozen.checked_add(&details.min_balance).ok_or(ArithmeticError::Overflow)?; + account.balance.saturating_sub(required) + } else { + let is_provider = false; + let is_required = is_provider && !frame_system::Pallet::::can_dec_provider(who); + if keep_alive || is_required { + // We want to keep the account around. + account.balance.saturating_sub(details.min_balance) + } else { + // Don't care if the account dies + account.balance + } + }; + Ok(amount.min(details.supply)) + } + + /// Make preparatory checks for debiting some funds from an account. Flags indicate requirements + /// of the debit. + /// + /// - `amount`: The amount desired to be debited. The actual amount returned for debit may be + /// less (in the case of `best_effort` being `true`) or greater by up to the minimum balance + /// less one. + /// - `keep_alive`: Require that `target` must stay alive. + /// - `respect_freezer`: Respect any freezes on the account or token (or not). + /// - `best_effort`: The debit amount may be less than `amount`. + /// + /// On success, the amount which should be debited (this will always be at least `amount` unless + /// `best_effort` is `true`) together with an optional value indicating the argument which must + /// be passed into the `melted` function of the `T::Freezer` if `Some`. + /// + /// If no valid debit can be made then return an `Err`. + pub(super) fn prep_debit( + id: T::AssetId, + target: &T::AccountId, + amount: T::Balance, + f: DebitFlags, + ) -> Result { + let actual = Self::reducible_balance(id, target, f.keep_alive)?.min(amount); + ensure!(f.best_effort || actual >= amount, Error::::BalanceLow); + + let conseq = Self::can_decrease(id, target, actual, f.keep_alive); + let actual = match conseq.into_result() { + Ok(dust) => actual.saturating_add(dust), //< guaranteed by reducible_balance + Err(e) => { + debug_assert!(false, "passed from reducible_balance; qed"); + return Err(e.into()) + }, + }; + + Ok(actual) + } + + /// Make preparatory checks for crediting some funds from an account. Flags indicate + /// requirements of the credit. + /// + /// - `amount`: The amount desired to be credited. + /// - `debit`: The amount by which some other account has been debited. If this is greater than + /// `amount`, then the `burn_dust` parameter takes effect. + /// - `burn_dust`: Indicates that in the case of debit being greater than amount, the additional + /// (dust) value should be burned, rather than credited. + /// + /// On success, the amount which should be credited (this will always be at least `amount`) + /// together with an optional value indicating the value which should be burned. The latter + /// will always be `None` as long as `burn_dust` is `false` or `debit` is no greater than + /// `amount`. + /// + /// If no valid credit can be made then return an `Err`. + pub(super) fn prep_credit( + id: T::AssetId, + dest: &T::AccountId, + amount: T::Balance, + debit: T::Balance, + burn_dust: bool, + ) -> Result<(T::Balance, Option), DispatchError> { + let (credit, maybe_burn) = match (burn_dust, debit.checked_sub(&amount)) { + (true, Some(dust)) => (amount, Some(dust)), + _ => (debit, None), + }; + Self::can_increase(id, &dest, credit).into_result()?; + Ok((credit, maybe_burn)) + } + + /// Increases the asset `id` balance of `beneficiary` by `amount`. + /// + /// This alters the registered supply of the asset and emits an event. + /// + /// Will return an error or will increase the amount by exactly `amount`. + pub(super) fn do_mint( + id: T::AssetId, + beneficiary: &T::AccountId, + amount: T::Balance, + maybe_check_issuer: Option, + ) -> DispatchResult { + Self::increase_balance(id, beneficiary, amount, |details| -> DispatchResult { + if let Some(check_issuer) = maybe_check_issuer { + ensure!(&check_issuer == &details.issuer, Error::::NoPermission); + } + debug_assert!( + T::Balance::max_value() - details.supply >= amount, + "checked in prep; qed" + ); + details.supply = details.supply.saturating_add(amount); + Ok(()) + })?; + Self::deposit_event(Event::Issued(id, beneficiary.clone(), amount)); + Ok(()) + } + + /// Increases the asset `id` balance of `beneficiary` by `amount`. + /// + /// LOW-LEVEL: Does not alter the supply of asset or emit an event. Use `do_mint` if you need + /// that. This is not intended to be used alone. + /// + /// Will return an error or will increase the amount by exactly `amount`. + pub(super) fn increase_balance( + id: T::AssetId, + beneficiary: &T::AccountId, + amount: T::Balance, + check: impl FnOnce( + &mut AssetDetails>, + ) -> DispatchResult, + ) -> DispatchResult { + if amount.is_zero() { + return Ok(()) + } + + Self::can_increase(id, beneficiary, amount).into_result()?; + Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + check(details)?; + + Account::::try_mutate(id, beneficiary, |t| -> DispatchResult { + let new_balance = t.balance.saturating_add(amount); + ensure!(new_balance >= details.min_balance, TokenError::BelowMinimum); + if t.balance.is_zero() { + t.sufficient = Self::new_account(beneficiary, details)?; + } + t.balance = new_balance; + Ok(()) + })?; + Ok(()) + })?; + Ok(()) + } + + /// Reduces asset `id` balance of `target` by `amount`. Flags `f` can be given to alter whether + /// it attempts a `best_effort` or makes sure to `keep_alive` the account. + /// + /// This alters the registered supply of the asset and emits an event. + /// + /// Will return an error and do nothing or will decrease the amount and return the amount + /// reduced by. + pub(super) fn do_burn( + id: T::AssetId, + target: &T::AccountId, + amount: T::Balance, + maybe_check_admin: Option, + f: DebitFlags, + ) -> Result { + let actual = Self::decrease_balance(id, target, amount, f, |actual, details| { + // Check admin rights. + if let Some(check_admin) = maybe_check_admin { + ensure!(&check_admin == &details.admin, Error::::NoPermission); + } + + debug_assert!(details.supply >= actual, "checked in prep; qed"); + details.supply = details.supply.saturating_sub(actual); + + Ok(()) + })?; + Self::deposit_event(Event::Burned(id, target.clone(), actual)); + Ok(actual) + } + + /// Reduces asset `id` balance of `target` by `amount`. Flags `f` can be given to alter whether + /// it attempts a `best_effort` or makes sure to `keep_alive` the account. + /// + /// LOW-LEVEL: Does not alter the supply of asset or emit an event. Use `do_burn` if you need + /// that. This is not intended to be used alone. + /// + /// Will return an error and do nothing or will decrease the amount and return the amount + /// reduced by. + pub(super) fn decrease_balance( + id: T::AssetId, + target: &T::AccountId, + amount: T::Balance, + f: DebitFlags, + check: impl FnOnce( + T::Balance, + &mut AssetDetails>, + ) -> DispatchResult, + ) -> Result { + if amount.is_zero() { + return Ok(amount) + } + + let actual = Self::prep_debit(id, target, amount, f)?; + + Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + check(actual, details)?; + + Account::::try_mutate_exists(id, target, |maybe_account| -> DispatchResult { + let mut account = maybe_account.take().unwrap_or_default(); + debug_assert!(account.balance >= actual, "checked in prep; qed"); + + // Make the debit. + account.balance = account.balance.saturating_sub(actual); + *maybe_account = if account.balance < details.min_balance { + debug_assert!(account.balance.is_zero(), "checked in prep; qed"); + Self::dead_account(id, target, details, account.sufficient); + None + } else { + Some(account) + }; + Ok(()) + })?; + + Ok(()) + })?; + + Ok(actual) + } + + /// Reduces the asset `id` balance of `source` by some `amount` and increases the balance of + /// `dest` by (similar) amount. + /// + /// Returns the actual amount placed into `dest`. Exact semantics are determined by the flags + /// `f`. + /// + /// Will fail if the amount transferred is so small that it cannot create the destination due + /// to minimum balance requirements. + pub(super) fn do_transfer( + id: T::AssetId, + source: &T::AccountId, + dest: &T::AccountId, + amount: T::Balance, + maybe_need_admin: Option, + f: TransferFlags, + ) -> Result { + // Early exist if no-op. + if amount.is_zero() { + Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), amount)); + return Ok(amount) + } + + // Figure out the debit and credit, together with side-effects. + let debit = Self::prep_debit(id, &source, amount, f.into())?; + let (credit, maybe_burn) = Self::prep_credit(id, &dest, amount, debit, f.burn_dust)?; + + let mut source_account = Account::::get(id, &source); + + Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + // Check admin rights. + if let Some(need_admin) = maybe_need_admin { + ensure!(&need_admin == &details.admin, Error::::NoPermission); + } + + // Skip if source == dest + if source == dest { + return Ok(()) + } + + // Burn any dust if needed. + if let Some(burn) = maybe_burn { + // Debit dust from supply; this will not saturate since it's already checked in + // prep. + debug_assert!(details.supply >= burn, "checked in prep; qed"); + details.supply = details.supply.saturating_sub(burn); + } + + // Debit balance from source; this will not saturate since it's already checked in prep. + debug_assert!(source_account.balance >= debit, "checked in prep; qed"); + source_account.balance = source_account.balance.saturating_sub(debit); + + Account::::try_mutate(id, &dest, |a| -> DispatchResult { + // Calculate new balance; this will not saturate since it's already checked in prep. + debug_assert!(a.balance.checked_add(&credit).is_some(), "checked in prep; qed"); + let new_balance = a.balance.saturating_add(credit); + + // Create a new account if there wasn't one already. + if a.balance.is_zero() { + a.sufficient = Self::new_account(&dest, details)?; + } + + a.balance = new_balance; + Ok(()) + })?; + + // Remove source account if it's now dead. + if source_account.balance < details.min_balance { + debug_assert!(source_account.balance.is_zero(), "checked in prep; qed"); + Self::dead_account(id, &source, details, source_account.sufficient); + Account::::remove(id, &source); + } else { + Account::::insert(id, &source, &source_account) + } + + Ok(()) + })?; + + Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), credit)); + Ok(credit) + } +} diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs new file mode 100644 index 0000000000000..4e85b20a1fbb1 --- /dev/null +++ b/frame/assets/src/impl_fungibles.rs @@ -0,0 +1,149 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementations for fungibles trait. + +use super::*; + +impl, I: 'static> fungibles::Inspect<::AccountId> for Pallet { + type AssetId = T::AssetId; + type Balance = T::Balance; + + fn total_issuance(asset: Self::AssetId) -> Self::Balance { + Asset::::get(asset).map(|x| x.supply).unwrap_or_else(Zero::zero) + } + + fn minimum_balance(asset: Self::AssetId) -> Self::Balance { + Asset::::get(asset).map(|x| x.min_balance).unwrap_or_else(Zero::zero) + } + + fn balance(asset: Self::AssetId, who: &::AccountId) -> Self::Balance { + Pallet::::balance(asset, who) + } + + fn reducible_balance( + asset: Self::AssetId, + who: &::AccountId, + keep_alive: bool, + ) -> Self::Balance { + Pallet::::reducible_balance(asset, who, keep_alive).unwrap_or(Zero::zero()) + } + + fn can_deposit( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> DepositConsequence { + Pallet::::can_increase(asset, who, amount) + } + + fn can_withdraw( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { + Pallet::::can_decrease(asset, who, amount, false) + } +} + +impl, I: 'static> fungibles::Mutate<::AccountId> for Pallet { + fn mint_into( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> DispatchResult { + Self::do_mint(asset, who, amount, None) + } + + fn burn_from( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> Result { + let f = DebitFlags { keep_alive: false, best_effort: false }; + Self::do_burn(asset, who, amount, None, f) + } + + fn slash( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> Result { + let f = DebitFlags { keep_alive: false, best_effort: true }; + Self::do_burn(asset, who, amount, None, f) + } +} + +impl, I: 'static> fungibles::Transfer for Pallet { + fn transfer( + asset: Self::AssetId, + source: &T::AccountId, + dest: &T::AccountId, + amount: T::Balance, + keep_alive: bool, + ) -> Result { + let f = TransferFlags { keep_alive, best_effort: false, burn_dust: false }; + Self::do_transfer(asset, source, dest, amount, None, f) + } +} + +impl, I: 'static> fungibles::Unbalanced for Pallet { + fn set_balance(_: Self::AssetId, _: &T::AccountId, _: Self::Balance) -> DispatchResult { + unreachable!("set_balance is not used if other functions are impl'd"); + } + fn set_total_issuance(id: T::AssetId, amount: Self::Balance) { + Asset::::mutate_exists(id, |maybe_asset| { + if let Some(ref mut asset) = maybe_asset { + asset.supply = amount + } + }); + } + fn decrease_balance( + asset: T::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> Result { + let f = DebitFlags { keep_alive: false, best_effort: false }; + Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())) + } + fn decrease_balance_at_most( + asset: T::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> Self::Balance { + let f = DebitFlags { keep_alive: false, best_effort: true }; + Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())).unwrap_or(Zero::zero()) + } + fn increase_balance( + asset: T::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> Result { + Self::increase_balance(asset, who, amount, |_| Ok(()))?; + Ok(amount) + } + fn increase_balance_at_most( + asset: T::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> Self::Balance { + match Self::increase_balance(asset, who, amount, |_| Ok(())) { + Ok(()) => amount, + Err(_) => Zero::zero(), + } + } +} diff --git a/frame/assets/src/impl_stored_map.rs b/frame/assets/src/impl_stored_map.rs new file mode 100644 index 0000000000000..4c1ff1a0c6027 --- /dev/null +++ b/frame/assets/src/impl_stored_map.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Assets pallet's `StoredMap` implementation. + +use super::*; + +impl, I: 'static> StoredMap<(T::AssetId, T::AccountId), T::Extra> for Pallet { + fn get(id_who: &(T::AssetId, T::AccountId)) -> T::Extra { + let &(id, ref who) = id_who; + if Account::::contains_key(id, who) { + Account::::get(id, who).extra + } else { + Default::default() + } + } + + fn try_mutate_exists>( + id_who: &(T::AssetId, T::AccountId), + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + let &(id, ref who) = id_who; + let mut maybe_extra = Some(Account::::get(id, who).extra); + let r = f(&mut maybe_extra)?; + // They want to write some value or delete it. + // If the account existed and they want to write a value, then we write. + // If the account didn't exist and they want to delete it, then we let it pass. + // Otherwise, we fail. + Account::::try_mutate_exists(id, who, |maybe_account| { + if let Some(extra) = maybe_extra { + // They want to write a value. Let this happen only if the account actually exists. + if let Some(ref mut account) = maybe_account { + account.extra = extra; + } else { + Err(DispatchError::NoProviders)?; + } + } else { + // They want to delete it. Let this pass if the item never existed anyway. + ensure!(maybe_account.is_none(), DispatchError::ConsumerRemaining); + } + Ok(r) + }) + } +} diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index e5ad2ae352eb8..797a3ae7ee9fb 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Assets Module +//! # Assets Pallet //! //! A simple, secure module for dealing with fungible assets. //! @@ -24,43 +24,88 @@ //! The Assets module provides functionality for asset management of fungible asset classes //! with a fixed supply, including: //! -//! * Asset Issuance -//! * Asset Transfer -//! * Asset Destruction +//! * Asset Issuance (Minting) +//! * Asset Transferal +//! * Asset Freezing +//! * Asset Destruction (Burning) +//! * Delegated Asset Transfers ("Approval API") //! -//! To use it in your runtime, you need to implement the assets [`Trait`](./trait.Trait.html). +//! To use it in your runtime, you need to implement the assets [`Config`]. //! -//! The supported dispatchable functions are documented in the [`Call`](./enum.Call.html) enum. +//! The supported dispatchable functions are documented in the [`Call`] enum. //! //! ### Terminology //! -//! * **Asset issuance:** The creation of a new asset, whose total supply will belong to the -//! account that issues the asset. -//! * **Asset transfer:** The action of transferring assets from one account to another. -//! * **Asset destruction:** The process of an account removing its entire holding of an asset. -//! * **Fungible asset:** An asset whose units are interchangeable. -//! * **Non-fungible asset:** An asset for which each unit has unique characteristics. +//! * **Admin**: An account ID uniquely privileged to be able to unfreeze (thaw) an account and it's +//! assets, as well as forcibly transfer a particular class of assets between arbitrary accounts +//! and reduce the balance of a particular class of assets of arbitrary accounts. +//! * **Asset issuance/minting**: The creation of a new asset, whose total supply will belong to the +//! account that issues the asset. This is a privileged operation. +//! * **Asset transfer**: The reduction of the balance of an asset of one account with the +//! corresponding increase in the balance of another. +//! * **Asset destruction**: The process of reduce the balance of an asset of one account. This is a +//! privileged operation. +//! * **Fungible asset**: An asset whose units are interchangeable. +//! * **Issuer**: An account ID uniquely privileged to be able to mint a particular class of assets. +//! * **Freezer**: An account ID uniquely privileged to be able to freeze an account from +//! transferring a particular class of assets. +//! * **Freezing**: Removing the possibility of an unpermissioned transfer of an asset from a +//! particular account. +//! * **Non-fungible asset**: An asset for which each unit has unique characteristics. +//! * **Owner**: An account ID uniquely privileged to be able to destroy a particular asset class, +//! or to set the Issuer, Freezer or Admin of that asset class. +//! * **Approval**: The act of allowing an account the permission to transfer some balance of asset +//! from the approving account into some third-party destination account. +//! * **Sufficiency**: The idea of a minimum-balance of an asset being sufficient to allow the +//! account's existence on the system without requiring any other existential-deposit. //! //! ### Goals //! //! The assets system in Substrate is designed to make the following possible: //! -//! * Issue a unique asset to its creator's account. +//! * Issue a new assets in a permissioned or permissionless way, if permissionless, then with a +//! deposit required. +//! * Allow accounts to be delegated the ability to transfer assets without otherwise existing +//! on-chain (*approvals*). //! * Move assets between accounts. -//! * Remove an account's balance of an asset when requested by that account's owner and update -//! the asset's total supply. +//! * Update the asset's total supply. +//! * Allow administrative activities by specially privileged accounts including freezing account +//! balances and minting/burning assets. //! //! ## Interface //! -//! ### Dispatchable Functions +//! ### Permissionless Functions //! -//! * `issue` - Issues the total supply of a new fungible asset to the account of the caller of the function. -//! * `transfer` - Transfers an `amount` of units of fungible asset `id` from the balance of -//! the function caller's account (`origin`) to a `target` account. -//! * `destroy` - Destroys the entire holding of a fungible asset `id` associated with the account -//! that called the function. +//! * `create`: Creates a new asset class, taking the required deposit. +//! * `transfer`: Transfer sender's assets to another account. +//! * `transfer_keep_alive`: Transfer sender's assets to another account, keeping the sender alive. +//! * `set_metadata`: Set the metadata of an asset class. +//! * `clear_metadata`: Remove the metadata of an asset class. +//! * `approve_transfer`: Create or increase an delegated transfer. +//! * `cancel_approval`: Rescind a previous approval. +//! * `transfer_approved`: Transfer third-party's assets to another account. //! -//! Please refer to the [`Call`](./enum.Call.html) enum and its associated variants for documentation on each function. +//! ### Permissioned Functions +//! +//! * `force_create`: Creates a new asset class without taking any deposit. +//! * `force_set_metadata`: Set the metadata of an asset class. +//! * `force_clear_metadata`: Remove the metadata of an asset class. +//! * `force_asset_status`: Alter an asset class's attributes. +//! * `force_cancel_approval`: Rescind a previous approval. +//! +//! ### Privileged Functions +//! * `destroy`: Destroys an entire asset class; called by the asset class's Owner. +//! * `mint`: Increases the asset balance of an account; called by the asset class's Issuer. +//! * `burn`: Decreases the asset balance of an account; called by the asset class's Admin. +//! * `force_transfer`: Transfers between arbitrary accounts; called by the asset class's Admin. +//! * `freeze`: Disallows further `transfer`s from an account; called by the asset class's Freezer. +//! * `thaw`: Allows further `transfer`s from an account; called by the asset class's Admin. +//! * `transfer_ownership`: Changes an asset class's Owner; called by the asset class's Owner. +//! * `set_team`: Changes an asset class's Admin, Freezer and Issuer; called by the asset class's +//! Owner. +//! +//! Please refer to the [`Call`] enum and its associated variants for documentation on each +//! function. //! //! ### Public Functions //! @@ -68,62 +113,7 @@ //! * `balance` - Get the asset `id` balance of `who`. //! * `total_supply` - Get the total supply of an asset `id`. //! -//! Please refer to the [`Module`](./struct.Module.html) struct for details on publicly available functions. -//! -//! ## Usage -//! -//! The following example shows how to use the Assets module in your runtime by exposing public functions to: -//! -//! * Issue a new fungible asset for a token distribution event (airdrop). -//! * Query the fungible asset holding balance of an account. -//! * Query the total supply of a fungible asset that has been issued. -//! -//! ### Prerequisites -//! -//! Import the Assets module and types and derive your runtime's configuration traits from the Assets module trait. -//! -//! ### Simple Code Snippet -//! -//! ```rust,ignore -//! use pallet_assets as assets; -//! use frame_support::{decl_module, dispatch, ensure}; -//! use frame_system::ensure_signed; -//! -//! pub trait Trait: assets::Trait { } -//! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! pub fn issue_token_airdrop(origin) -> dispatch::DispatchResult { -//! let sender = ensure_signed(origin).map_err(|e| e.as_str())?; -//! -//! const ACCOUNT_ALICE: u64 = 1; -//! const ACCOUNT_BOB: u64 = 2; -//! const COUNT_AIRDROP_RECIPIENTS: u64 = 2; -//! const TOKENS_FIXED_SUPPLY: u64 = 100; -//! -//! ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), "Divide by zero error."); -//! -//! let asset_id = Self::next_asset_id(); -//! -//! >::mutate(|asset_id| *asset_id += 1); -//! >::insert((asset_id, &ACCOUNT_ALICE), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); -//! >::insert((asset_id, &ACCOUNT_BOB), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); -//! >::insert(asset_id, TOKENS_FIXED_SUPPLY); -//! -//! Self::deposit_event(RawEvent::Issued(asset_id, sender, TOKENS_FIXED_SUPPLY)); -//! Ok(()) -//! } -//! } -//! } -//! ``` -//! -//! ## Assumptions -//! -//! Below are assumptions that must be held when using this module. If any of -//! them are violated, the behavior of this module is undefined. -//! -//! * The total count of assets should be less than -//! `Trait::AssetId::max_value()`. +//! Please refer to the [`Pallet`] struct for details on publicly available functions. //! //! ## Related Modules //! @@ -133,292 +123,1165 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{Parameter, decl_module, decl_event, decl_storage, decl_error, ensure}; -use sp_runtime::traits::{Member, AtLeast32Bit, AtLeast32BitUnsigned, Zero, StaticLookup}; -use frame_system::ensure_signed; -use sp_runtime::traits::One; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +pub mod mock; +#[cfg(test)] +mod tests; +pub mod weights; -/// The module configuration trait. -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; +mod extra_mutator; +pub use extra_mutator::*; +mod functions; +mod impl_fungibles; +mod impl_stored_map; +mod types; +pub use types::*; - /// The units in which we record balances. - type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy; +use codec::HasCompact; +use frame_support::{ + dispatch::{DispatchError, DispatchResult}, + ensure, + traits::{ + tokens::{fungibles, DepositConsequence, WithdrawConsequence}, + BalanceStatus::Reserved, + Currency, ReservableCurrency, StoredMap, + }, +}; +use frame_system::Config as SystemConfig; +use sp_runtime::{ + traits::{ + AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, Saturating, StaticLookup, Zero, + }, + ArithmeticError, TokenError, +}; +use sp_std::{borrow::Borrow, convert::TryInto, prelude::*}; - /// The arithmetic type of asset identifier. - type AssetId: Parameter + AtLeast32Bit + Default + Copy; -} +pub use pallet::*; +pub use weights::WeightInfo; -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - /// Issue a new class of fungible assets. There are, and will only ever be, `total` - /// such assets and they'll all belong to the `origin` initially. It will have an - /// identifier `AssetId` instance: this will be specified in the `Issued` event. - /// - /// # - /// - `O(1)` - /// - 1 storage mutation (codec `O(1)`). - /// - 2 storage writes (condec `O(1)`). - /// - 1 event. - /// # - #[weight = 0] - fn issue(origin, #[compact] total: T::Balance) { - let origin = ensure_signed(origin)?; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; + use frame_system::pallet_prelude::*; - let id = Self::next_asset_id(); - >::mutate(|id| *id += One::one()); + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(_); - >::insert((id, &origin), total); - >::insert(id, total); + #[pallet::config] + /// The module configuration trait. + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; - Self::deposit_event(RawEvent::Issued(id, origin, total)); - } + /// The units in which we record balances. + type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy + MaxEncodedLen; - /// Move some assets from one holder to another. - /// - /// # - /// - `O(1)` - /// - 1 static lookup - /// - 2 storage mutations (codec `O(1)`). - /// - 1 event. - /// # - #[weight = 0] - fn transfer(origin, - #[compact] id: T::AssetId, - target: ::Source, - #[compact] amount: T::Balance - ) { - let origin = ensure_signed(origin)?; - let origin_account = (id, origin.clone()); - let origin_balance = >::get(&origin_account); - let target = T::Lookup::lookup(target)?; - ensure!(!amount.is_zero(), Error::::AmountZero); - ensure!(origin_balance >= amount, Error::::BalanceLow); - - Self::deposit_event(RawEvent::Transferred(id, origin, target.clone(), amount)); - >::insert(origin_account, origin_balance - amount); - >::mutate((id, target), |balance| *balance += amount); - } + /// Identifier for the class of asset. + type AssetId: Member + Parameter + Default + Copy + HasCompact + MaxEncodedLen; - /// Destroy any assets of `id` owned by `origin`. - /// - /// # - /// - `O(1)` - /// - 1 storage mutation (codec `O(1)`). - /// - 1 storage deletion (codec `O(1)`). - /// - 1 event. - /// # - #[weight = 0] - fn destroy(origin, #[compact] id: T::AssetId) { - let origin = ensure_signed(origin)?; - let balance = >::take((id, &origin)); - ensure!(!balance.is_zero(), Error::::BalanceZero); + /// The currency mechanism. + type Currency: ReservableCurrency; - >::mutate(id, |total_supply| *total_supply -= balance); - Self::deposit_event(RawEvent::Destroyed(id, origin, balance)); - } + /// The origin which may forcibly create or destroy an asset or otherwise alter privileged + /// attributes. + type ForceOrigin: EnsureOrigin; + + /// The basic amount of funds that must be reserved for an asset. + #[pallet::constant] + type AssetDeposit: Get>; + + /// The basic amount of funds that must be reserved when adding metadata to your asset. + #[pallet::constant] + type MetadataDepositBase: Get>; + + /// The additional funds that must be reserved for the number of bytes you store in your + /// metadata. + #[pallet::constant] + type MetadataDepositPerByte: Get>; + + /// The amount of funds that must be reserved when creating a new approval. + #[pallet::constant] + type ApprovalDeposit: Get>; + + /// The maximum length of a name or symbol stored on-chain. + #[pallet::constant] + type StringLimit: Get; + + /// A hook to allow a per-asset, per-account minimum balance to be enforced. This must be + /// respected in all permissionless operations. + type Freezer: FrozenBalance; + + /// Additional data to be stored with an account's asset balance. + type Extra: Member + Parameter + Default + MaxEncodedLen; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } -} -decl_event! { - pub enum Event where - ::AccountId, - ::Balance, - ::AssetId, - { + #[pallet::storage] + /// Details of an asset. + pub(super) type Asset, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AssetId, + AssetDetails>, + >; + + #[pallet::storage] + /// The number of units of assets held by any given account. + pub(super) type Account, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::AssetId, + Blake2_128Concat, + T::AccountId, + AssetBalance, + ValueQuery, + GetDefault, + ConstU32<300_000>, + >; + + #[pallet::storage] + /// Approved balance transfers. First balance is the amount approved for transfer. Second + /// is the amount of `T::Currency` reserved for storing this. + /// First key is the asset ID, second key is the owner and third key is the delegate. + pub(super) type Approvals, I: 'static = ()> = StorageNMap< + _, + ( + NMapKey, + NMapKey, // owner + NMapKey, // delegate + ), + Approval>, + OptionQuery, + GetDefault, + ConstU32<300_000>, + >; + + #[pallet::storage] + /// Metadata of an asset. + pub(super) type Metadata, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AssetId, + AssetMetadata, BoundedVec>, + ValueQuery, + GetDefault, + ConstU32<300_000>, + >; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// Some asset class was created. \[asset_id, creator, owner\] + Created(T::AssetId, T::AccountId, T::AccountId), /// Some assets were issued. \[asset_id, owner, total_supply\] - Issued(AssetId, AccountId, Balance), + Issued(T::AssetId, T::AccountId, T::Balance), /// Some assets were transferred. \[asset_id, from, to, amount\] - Transferred(AssetId, AccountId, AccountId, Balance), + Transferred(T::AssetId, T::AccountId, T::AccountId, T::Balance), /// Some assets were destroyed. \[asset_id, owner, balance\] - Destroyed(AssetId, AccountId, Balance), + Burned(T::AssetId, T::AccountId, T::Balance), + /// The management team changed \[asset_id, issuer, admin, freezer\] + TeamChanged(T::AssetId, T::AccountId, T::AccountId, T::AccountId), + /// The owner changed \[asset_id, owner\] + OwnerChanged(T::AssetId, T::AccountId), + /// Some account `who` was frozen. \[asset_id, who\] + Frozen(T::AssetId, T::AccountId), + /// Some account `who` was thawed. \[asset_id, who\] + Thawed(T::AssetId, T::AccountId), + /// Some asset `asset_id` was frozen. \[asset_id\] + AssetFrozen(T::AssetId), + /// Some asset `asset_id` was thawed. \[asset_id\] + AssetThawed(T::AssetId), + /// An asset class was destroyed. + Destroyed(T::AssetId), + /// Some asset class was force-created. \[asset_id, owner\] + ForceCreated(T::AssetId, T::AccountId), + /// New metadata has been set for an asset. \[asset_id, name, symbol, decimals, is_frozen\] + MetadataSet(T::AssetId, Vec, Vec, u8, bool), + /// Metadata has been cleared for an asset. \[asset_id\] + MetadataCleared(T::AssetId), + /// (Additional) funds have been approved for transfer to a destination account. + /// \[asset_id, source, delegate, amount\] + ApprovedTransfer(T::AssetId, T::AccountId, T::AccountId, T::Balance), + /// An approval for account `delegate` was cancelled by `owner`. + /// \[id, owner, delegate\] + ApprovalCancelled(T::AssetId, T::AccountId, T::AccountId), + /// An `amount` was transferred in its entirety from `owner` to `destination` by + /// the approved `delegate`. + /// \[id, owner, delegate, destination\] + TransferredApproved(T::AssetId, T::AccountId, T::AccountId, T::AccountId, T::Balance), + /// An asset has had its attributes changed by the `Force` origin. + /// \[id\] + AssetStatusChanged(T::AssetId), } -} -decl_error! { - pub enum Error for Module { - /// Transfer amount should be non-zero - AmountZero, - /// Account balance must be greater than or equal to the transfer amount + #[pallet::error] + pub enum Error { + /// Account balance must be greater than or equal to the transfer amount. BalanceLow, - /// Balance should be non-zero + /// Balance should be non-zero. BalanceZero, + /// The signing account has no permission to do the operation. + NoPermission, + /// The given asset ID is unknown. + Unknown, + /// The origin account is frozen. + Frozen, + /// The asset ID is already taken. + InUse, + /// Invalid witness data given. + BadWitness, + /// Minimum balance should be non-zero. + MinBalanceZero, + /// No provider reference exists to allow a non-zero balance of a non-self-sufficient + /// asset. + NoProvider, + /// Invalid metadata given. + BadMetadata, + /// No approval exists that would allow the transfer. + Unapproved, + /// The source account would not survive the transfer and it needs to stay alive. + WouldDie, } -} -decl_storage! { - trait Store for Module as Assets { - /// The number of units of assets held by any given account. - Balances: map hasher(blake2_128_concat) (T::AssetId, T::AccountId) => T::Balance; - /// The next asset identifier up for grabs. - NextAssetId get(fn next_asset_id): T::AssetId; - /// The total unit supply of an asset. + #[pallet::call] + impl, I: 'static> Pallet { + /// Issue a new class of fungible assets from a public origin. /// - /// TWOX-NOTE: `AssetId` is trusted, so this is safe. - TotalSupply: map hasher(twox_64_concat) T::AssetId => T::Balance; - } -} + /// This new asset class has no assets initially and its owner is the origin. + /// + /// The origin must be Signed and the sender must have sufficient funds free. + /// + /// Funds of sender are reserved by `AssetDeposit`. + /// + /// Parameters: + /// - `id`: The identifier of the new asset. This must not be currently in use to identify + /// an existing asset. + /// - `admin`: The admin of this class of assets. The admin is the initial address of each + /// member of the asset class's admin team. + /// - `min_balance`: The minimum balance of this new asset that any single account must + /// have. If an account's balance is reduced below this, then it collapses to zero. + /// + /// Emits `Created` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::create())] + pub fn create( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + admin: ::Source, + min_balance: T::Balance, + ) -> DispatchResult { + let owner = ensure_signed(origin)?; + let admin = T::Lookup::lookup(admin)?; -// The main implementation block for the module. -impl Module { - // Public immutables + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); - /// Get the asset `id` balance of `who`. - pub fn balance(id: T::AssetId, who: T::AccountId) -> T::Balance { - >::get((id, who)) - } + let deposit = T::AssetDeposit::get(); + T::Currency::reserve(&owner, deposit)?; - /// Get the total supply of an asset `id`. - pub fn total_supply(id: T::AssetId) -> T::Balance { - >::get(id) - } -} + Asset::::insert( + id, + AssetDetails { + owner: owner.clone(), + issuer: admin.clone(), + admin: admin.clone(), + freezer: admin.clone(), + supply: Zero::zero(), + deposit, + min_balance, + is_sufficient: false, + accounts: 0, + sufficients: 0, + approvals: 0, + is_frozen: false, + }, + ); + Self::deposit_event(Event::Created(id, owner, admin)); + Ok(()) + } -#[cfg(test)] -mod tests { - use super::*; + /// Issue a new class of fungible assets from a privileged origin. + /// + /// This new asset class has no assets initially. + /// + /// The origin must conform to `ForceOrigin`. + /// + /// Unlike `create`, no funds are reserved. + /// + /// - `id`: The identifier of the new asset. This must not be currently in use to identify + /// an existing asset. + /// - `owner`: The owner of this class of assets. The owner has full superuser permissions + /// over this asset, but may later change and configure the permissions using + /// `transfer_ownership` and `set_team`. + /// - `min_balance`: The minimum balance of this new asset that any single account must + /// have. If an account's balance is reduced below this, then it collapses to zero. + /// + /// Emits `ForceCreated` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_create())] + pub fn force_create( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + owner: ::Source, + is_sufficient: bool, + #[pallet::compact] min_balance: T::Balance, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + let owner = T::Lookup::lookup(owner)?; - use frame_support::{impl_outer_origin, assert_ok, assert_noop, parameter_types, weights::Weight}; - use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + Asset::::insert( + id, + AssetDetails { + owner: owner.clone(), + issuer: owner.clone(), + admin: owner.clone(), + freezer: owner.clone(), + supply: Zero::zero(), + deposit: Zero::zero(), + min_balance, + is_sufficient, + accounts: 0, + sufficients: 0, + approvals: 0, + is_frozen: false, + }, + ); + Self::deposit_event(Event::ForceCreated(id, owner)); + Ok(()) + } - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type Call = (); - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type PalletInfo = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - } - impl Trait for Test { - type Event = (); - type Balance = u64; - type AssetId = u32; - } - type Assets = Module; + /// Destroy a class of fungible assets. + /// + /// The origin must conform to `ForceOrigin` or must be Signed and the sender must be the + /// owner of the asset `id`. + /// + /// - `id`: The identifier of the asset to be destroyed. This must identify an existing + /// asset. + /// + /// Emits `Destroyed` event when successful. + /// + /// NOTE: It can be helpful to first freeze an asset before destroying it so that you + /// can provide accurate witness information and prevent users from manipulating state + /// in a way that can make it harder to destroy. + /// + /// Weight: `O(c + p + a)` where: + /// - `c = (witness.accounts - witness.sufficients)` + /// - `s = witness.sufficients` + /// - `a = witness.approvals` + #[pallet::weight(T::WeightInfo::destroy( + witness.accounts.saturating_sub(witness.sufficients), + witness.sufficients, + witness.approvals, + ))] + pub fn destroy( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + witness: DestroyWitness, + ) -> DispatchResultWithPostInfo { + let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { + Ok(_) => None, + Err(origin) => Some(ensure_signed(origin)?), + }; + Asset::::try_mutate_exists(id, |maybe_details| { + let mut details = maybe_details.take().ok_or(Error::::Unknown)?; + if let Some(check_owner) = maybe_check_owner { + ensure!(details.owner == check_owner, Error::::NoPermission); + } + ensure!(details.accounts <= witness.accounts, Error::::BadWitness); + ensure!(details.sufficients <= witness.sufficients, Error::::BadWitness); + ensure!(details.approvals <= witness.approvals, Error::::BadWitness); - fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() - } + for (who, v) in Account::::drain_prefix(id) { + Self::dead_account(id, &who, &mut details, v.sufficient); + } + debug_assert_eq!(details.accounts, 0); + debug_assert_eq!(details.sufficients, 0); - #[test] - fn issuing_asset_units_to_issuer_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - }); - } + let metadata = Metadata::::take(&id); + T::Currency::unreserve( + &details.owner, + details.deposit.saturating_add(metadata.deposit), + ); - #[test] - fn querying_total_supply_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::transfer(Origin::signed(2), 0, 3, 31)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 19); - assert_eq!(Assets::balance(0, 3), 31); - assert_ok!(Assets::destroy(Origin::signed(3), 0)); - assert_eq!(Assets::total_supply(0), 69); - }); - } + for ((owner, _), approval) in Approvals::::drain_prefix((&id,)) { + T::Currency::unreserve(&owner, approval.deposit); + } + Self::deposit_event(Event::Destroyed(id)); - #[test] - fn transferring_amount_above_available_balance_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 50); - }); - } + Ok(Some(T::WeightInfo::destroy( + details.accounts.saturating_sub(details.sufficients), + details.sufficients, + details.approvals, + )) + .into()) + }) + } - #[test] - fn transferring_amount_more_than_available_balance_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); - assert_eq!(Assets::balance(0, 1), 0); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 1, 50), Error::::BalanceLow); - }); - } + /// Mint assets of a particular class. + /// + /// The origin must be Signed and the sender must be the Issuer of the asset `id`. + /// + /// - `id`: The identifier of the asset to have some amount minted. + /// - `beneficiary`: The account to be credited with the minted assets. + /// - `amount`: The amount of the asset to be minted. + /// + /// Emits `Issued` event when successful. + /// + /// Weight: `O(1)` + /// Modes: Pre-existing balance of `beneficiary`; Account pre-existence of `beneficiary`. + #[pallet::weight(T::WeightInfo::mint())] + pub fn mint( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + beneficiary: ::Source, + #[pallet::compact] amount: T::Balance, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + Self::do_mint(id, &beneficiary, amount, Some(origin))?; + Ok(()) + } - #[test] - fn transferring_less_than_one_unit_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 0), Error::::AmountZero); - }); - } + /// Reduce the balance of `who` by as much as possible up to `amount` assets of `id`. + /// + /// Origin must be Signed and the sender should be the Manager of the asset `id`. + /// + /// Bails with `BalanceZero` if the `who` is already dead. + /// + /// - `id`: The identifier of the asset to have some amount burned. + /// - `who`: The account to be debited from. + /// - `amount`: The maximum amount by which `who`'s balance should be reduced. + /// + /// Emits `Burned` with the actual amount burned. If this takes the balance to below the + /// minimum for the asset, then the amount burned is increased to take it to zero. + /// + /// Weight: `O(1)` + /// Modes: Post-existence of `who`; Pre & post Zombie-status of `who`. + #[pallet::weight(T::WeightInfo::burn())] + pub fn burn( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + who: ::Source, + #[pallet::compact] amount: T::Balance, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; - #[test] - fn transferring_more_units_than_total_supply_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 101), Error::::BalanceLow); - }); - } + let f = DebitFlags { keep_alive: false, best_effort: true }; + let _ = Self::do_burn(id, &who, amount, Some(origin), f)?; + Ok(()) + } - #[test] - fn destroying_asset_balance_with_positive_balance_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); - }); - } + /// Move some assets from the sender account to another. + /// + /// Origin must be Signed. + /// + /// - `id`: The identifier of the asset to have some amount transferred. + /// - `target`: The account to be credited. + /// - `amount`: The amount by which the sender's balance of assets should be reduced and + /// `target`'s balance increased. The amount actually transferred may be slightly greater in + /// the case that the transfer would otherwise take the sender balance above zero but below + /// the minimum balance. Must be greater than zero. + /// + /// Emits `Transferred` with the actual amount transferred. If this takes the source balance + /// to below the minimum for the asset, then the amount transferred is increased to take it + /// to zero. + /// + /// Weight: `O(1)` + /// Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of + /// `target`. + #[pallet::weight(T::WeightInfo::transfer())] + pub fn transfer( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + target: ::Source, + #[pallet::compact] amount: T::Balance, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(target)?; + + let f = TransferFlags { keep_alive: false, best_effort: false, burn_dust: false }; + Self::do_transfer(id, &origin, &dest, amount, None, f).map(|_| ()) + } + + /// Move some assets from the sender account to another, keeping the sender account alive. + /// + /// Origin must be Signed. + /// + /// - `id`: The identifier of the asset to have some amount transferred. + /// - `target`: The account to be credited. + /// - `amount`: The amount by which the sender's balance of assets should be reduced and + /// `target`'s balance increased. The amount actually transferred may be slightly greater in + /// the case that the transfer would otherwise take the sender balance above zero but below + /// the minimum balance. Must be greater than zero. + /// + /// Emits `Transferred` with the actual amount transferred. If this takes the source balance + /// to below the minimum for the asset, then the amount transferred is increased to take it + /// to zero. + /// + /// Weight: `O(1)` + /// Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of + /// `target`. + #[pallet::weight(T::WeightInfo::transfer_keep_alive())] + pub fn transfer_keep_alive( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + target: ::Source, + #[pallet::compact] amount: T::Balance, + ) -> DispatchResult { + let source = ensure_signed(origin)?; + let dest = T::Lookup::lookup(target)?; + + let f = TransferFlags { keep_alive: true, best_effort: false, burn_dust: false }; + Self::do_transfer(id, &source, &dest, amount, None, f).map(|_| ()) + } + + /// Move some assets from one account to another. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `id`. + /// + /// - `id`: The identifier of the asset to have some amount transferred. + /// - `source`: The account to be debited. + /// - `dest`: The account to be credited. + /// - `amount`: The amount by which the `source`'s balance of assets should be reduced and + /// `dest`'s balance increased. The amount actually transferred may be slightly greater in + /// the case that the transfer would otherwise take the `source` balance above zero but + /// below the minimum balance. Must be greater than zero. + /// + /// Emits `Transferred` with the actual amount transferred. If this takes the source balance + /// to below the minimum for the asset, then the amount transferred is increased to take it + /// to zero. + /// + /// Weight: `O(1)` + /// Modes: Pre-existence of `dest`; Post-existence of `source`; Account pre-existence of + /// `dest`. + #[pallet::weight(T::WeightInfo::force_transfer())] + pub fn force_transfer( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + source: ::Source, + dest: ::Source, + #[pallet::compact] amount: T::Balance, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let source = T::Lookup::lookup(source)?; + let dest = T::Lookup::lookup(dest)?; + + let f = TransferFlags { keep_alive: false, best_effort: false, burn_dust: false }; + Self::do_transfer(id, &source, &dest, amount, Some(origin), f).map(|_| ()) + } + + /// Disallow further unprivileged transfers from an account. + /// + /// Origin must be Signed and the sender should be the Freezer of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `who`: The account to be frozen. + /// + /// Emits `Frozen`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::freeze())] + pub fn freeze( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + who: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.freezer, Error::::NoPermission); + let who = T::Lookup::lookup(who)?; + ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); + + Account::::mutate(id, &who, |a| a.is_frozen = true); + + Self::deposit_event(Event::::Frozen(id, who)); + Ok(()) + } + + /// Allow unprivileged transfers from an account again. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `who`: The account to be unfrozen. + /// + /// Emits `Thawed`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::thaw())] + pub fn thaw( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + who: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let details = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &details.admin, Error::::NoPermission); + let who = T::Lookup::lookup(who)?; + ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); + + Account::::mutate(id, &who, |a| a.is_frozen = false); + + Self::deposit_event(Event::::Thawed(id, who)); + Ok(()) + } + + /// Disallow further unprivileged transfers for the asset class. + /// + /// Origin must be Signed and the sender should be the Freezer of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// + /// Emits `Frozen`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::freeze_asset())] + pub fn freeze_asset( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + Asset::::try_mutate(id, |maybe_details| { + let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &d.freezer, Error::::NoPermission); + + d.is_frozen = true; + + Self::deposit_event(Event::::AssetFrozen(id)); + Ok(()) + }) + } + + /// Allow unprivileged transfers for the asset again. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `id`. + /// + /// - `id`: The identifier of the asset to be thawed. + /// + /// Emits `Thawed`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::thaw_asset())] + pub fn thaw_asset( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + Asset::::try_mutate(id, |maybe_details| { + let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &d.admin, Error::::NoPermission); + + d.is_frozen = false; + + Self::deposit_event(Event::::AssetThawed(id)); + Ok(()) + }) + } + + /// Change the Owner of an asset. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// - `id`: The identifier of the asset. + /// - `owner`: The new Owner of this asset. + /// + /// Emits `OwnerChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::transfer_ownership())] + pub fn transfer_ownership( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + owner: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + if details.owner == owner { + return Ok(()) + } + + let metadata_deposit = Metadata::::get(id).deposit; + let deposit = details.deposit + metadata_deposit; + + // Move the deposit to the new owner. + T::Currency::repatriate_reserved(&details.owner, &owner, deposit, Reserved)?; + + details.owner = owner.clone(); + + Self::deposit_event(Event::OwnerChanged(id, owner)); + Ok(()) + }) + } + + /// Change the Issuer, Admin and Freezer of an asset. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// - `id`: The identifier of the asset to be frozen. + /// - `issuer`: The new Issuer of this asset. + /// - `admin`: The new Admin of this asset. + /// - `freezer`: The new Freezer of this asset. + /// + /// Emits `TeamChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_team())] + pub fn set_team( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let issuer = T::Lookup::lookup(issuer)?; + let admin = T::Lookup::lookup(admin)?; + let freezer = T::Lookup::lookup(freezer)?; + + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + + details.issuer = issuer.clone(); + details.admin = admin.clone(); + details.freezer = freezer.clone(); + + Self::deposit_event(Event::TeamChanged(id, issuer, admin, freezer)); + Ok(()) + }) + } + + /// Set the metadata for an asset. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// Funds of sender are reserved according to the formula: + /// `MetadataDepositBase + MetadataDepositPerByte * (name.len + symbol.len)` taking into + /// account any already reserved funds. + /// + /// - `id`: The identifier of the asset to update. + /// - `name`: The user friendly name of this asset. Limited in length by `StringLimit`. + /// - `symbol`: The exchange symbol for this asset. Limited in length by `StringLimit`. + /// - `decimals`: The number of decimals this asset uses to represent one unit. + /// + /// Emits `MetadataSet`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_metadata(name.len() as u32, symbol.len() as u32))] + pub fn set_metadata( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + name: Vec, + symbol: Vec, + decimals: u8, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let bounded_name: BoundedVec = + name.clone().try_into().map_err(|_| Error::::BadMetadata)?; + let bounded_symbol: BoundedVec = + symbol.clone().try_into().map_err(|_| Error::::BadMetadata)?; + + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.owner, Error::::NoPermission); + + Metadata::::try_mutate_exists(id, |metadata| { + ensure!( + metadata.as_ref().map_or(true, |m| !m.is_frozen), + Error::::NoPermission + ); + + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + let new_deposit = T::MetadataDepositPerByte::get() + .saturating_mul(((name.len() + symbol.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + + if new_deposit > old_deposit { + T::Currency::reserve(&origin, new_deposit - old_deposit)?; + } else { + T::Currency::unreserve(&origin, old_deposit - new_deposit); + } + + *metadata = Some(AssetMetadata { + deposit: new_deposit, + name: bounded_name, + symbol: bounded_symbol, + decimals, + is_frozen: false, + }); + + Self::deposit_event(Event::MetadataSet(id, name, symbol, decimals, false)); + Ok(()) + }) + } + + /// Clear the metadata for an asset. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `id`. + /// + /// Any deposit is freed for the asset owner. + /// + /// - `id`: The identifier of the asset to clear. + /// + /// Emits `MetadataCleared`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::clear_metadata())] + pub fn clear_metadata( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; - #[test] - fn destroying_asset_balance_with_zero_balance_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 2), 0); - assert_noop!(Assets::destroy(Origin::signed(2), 0), Error::::BalanceZero); - }); + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.owner, Error::::NoPermission); + + Metadata::::try_mutate_exists(id, |metadata| { + let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; + T::Currency::unreserve(&d.owner, deposit); + Self::deposit_event(Event::MetadataCleared(id)); + Ok(()) + }) + } + + /// Force the metadata for an asset to some value. + /// + /// Origin must be ForceOrigin. + /// + /// Any deposit is left alone. + /// + /// - `id`: The identifier of the asset to update. + /// - `name`: The user friendly name of this asset. Limited in length by `StringLimit`. + /// - `symbol`: The exchange symbol for this asset. Limited in length by `StringLimit`. + /// - `decimals`: The number of decimals this asset uses to represent one unit. + /// + /// Emits `MetadataSet`. + /// + /// Weight: `O(N + S)` where N and S are the length of the name and symbol respectively. + #[pallet::weight(T::WeightInfo::force_set_metadata(name.len() as u32, symbol.len() as u32))] + pub fn force_set_metadata( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + name: Vec, + symbol: Vec, + decimals: u8, + is_frozen: bool, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + + let bounded_name: BoundedVec = + name.clone().try_into().map_err(|_| Error::::BadMetadata)?; + + let bounded_symbol: BoundedVec = + symbol.clone().try_into().map_err(|_| Error::::BadMetadata)?; + + ensure!(Asset::::contains_key(id), Error::::Unknown); + Metadata::::try_mutate_exists(id, |metadata| { + let deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + *metadata = Some(AssetMetadata { + deposit, + name: bounded_name, + symbol: bounded_symbol, + decimals, + is_frozen, + }); + + Self::deposit_event(Event::MetadataSet(id, name, symbol, decimals, is_frozen)); + Ok(()) + }) + } + + /// Clear the metadata for an asset. + /// + /// Origin must be ForceOrigin. + /// + /// Any deposit is returned. + /// + /// - `id`: The identifier of the asset to clear. + /// + /// Emits `MetadataCleared`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_clear_metadata())] + pub fn force_clear_metadata( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + Metadata::::try_mutate_exists(id, |metadata| { + let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; + T::Currency::unreserve(&d.owner, deposit); + Self::deposit_event(Event::MetadataCleared(id)); + Ok(()) + }) + } + + /// Alter the attributes of a given asset. + /// + /// Origin must be `ForceOrigin`. + /// + /// - `id`: The identifier of the asset. + /// - `owner`: The new Owner of this asset. + /// - `issuer`: The new Issuer of this asset. + /// - `admin`: The new Admin of this asset. + /// - `freezer`: The new Freezer of this asset. + /// - `min_balance`: The minimum balance of this new asset that any single account must + /// have. If an account's balance is reduced below this, then it collapses to zero. + /// - `is_sufficient`: Whether a non-zero balance of this asset is deposit of sufficient + /// value to account for the state bloat associated with its balance storage. If set to + /// `true`, then non-zero balances may be stored without a `consumer` reference (and thus + /// an ED in the Balances pallet or whatever else is used to control user-account state + /// growth). + /// - `is_frozen`: Whether this asset class is frozen except for permissioned/admin + /// instructions. + /// + /// Emits `AssetStatusChanged` with the identity of the asset. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_asset_status())] + pub fn force_asset_status( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + owner: ::Source, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, + #[pallet::compact] min_balance: T::Balance, + is_sufficient: bool, + is_frozen: bool, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + + Asset::::try_mutate(id, |maybe_asset| { + let mut asset = maybe_asset.take().ok_or(Error::::Unknown)?; + asset.owner = T::Lookup::lookup(owner)?; + asset.issuer = T::Lookup::lookup(issuer)?; + asset.admin = T::Lookup::lookup(admin)?; + asset.freezer = T::Lookup::lookup(freezer)?; + asset.min_balance = min_balance; + asset.is_sufficient = is_sufficient; + asset.is_frozen = is_frozen; + *maybe_asset = Some(asset); + + Self::deposit_event(Event::AssetStatusChanged(id)); + Ok(()) + }) + } + + /// Approve an amount of asset for transfer by a delegated third-party account. + /// + /// Origin must be Signed. + /// + /// Ensures that `ApprovalDeposit` worth of `Currency` is reserved from signing account + /// for the purpose of holding the approval. If some non-zero amount of assets is already + /// approved from signing account to `delegate`, then it is topped up or unreserved to + /// meet the right value. + /// + /// NOTE: The signing account does not need to own `amount` of assets at the point of + /// making this call. + /// + /// - `id`: The identifier of the asset. + /// - `delegate`: The account to delegate permission to transfer asset. + /// - `amount`: The amount of asset that may be transferred by `delegate`. If there is + /// already an approval in place, then this acts additively. + /// + /// Emits `ApprovedTransfer` on success. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::approve_transfer())] + pub fn approve_transfer( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + delegate: ::Source, + #[pallet::compact] amount: T::Balance, + ) -> DispatchResult { + let owner = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + + let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(!d.is_frozen, Error::::Frozen); + Approvals::::try_mutate( + (id, &owner, &delegate), + |maybe_approved| -> DispatchResult { + let mut approved = match maybe_approved.take() { + // an approval already exists and is being updated + Some(a) => a, + // a new approval is created + None => { + d.approvals.saturating_inc(); + Default::default() + }, + }; + let deposit_required = T::ApprovalDeposit::get(); + if approved.deposit < deposit_required { + T::Currency::reserve(&owner, deposit_required - approved.deposit)?; + approved.deposit = deposit_required; + } + approved.amount = approved.amount.saturating_add(amount); + *maybe_approved = Some(approved); + Ok(()) + }, + )?; + Asset::::insert(id, d); + Self::deposit_event(Event::ApprovedTransfer(id, owner, delegate, amount)); + + Ok(()) + } + + /// Cancel all of some asset approved for delegated transfer by a third-party account. + /// + /// Origin must be Signed and there must be an approval in place between signer and + /// `delegate`. + /// + /// Unreserves any deposit previously reserved by `approve_transfer` for the approval. + /// + /// - `id`: The identifier of the asset. + /// - `delegate`: The account delegated permission to transfer asset. + /// + /// Emits `ApprovalCancelled` on success. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::cancel_approval())] + pub fn cancel_approval( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + delegate: ::Source, + ) -> DispatchResult { + let owner = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; + let approval = + Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; + T::Currency::unreserve(&owner, approval.deposit); + + d.approvals.saturating_dec(); + Asset::::insert(id, d); + + Self::deposit_event(Event::ApprovalCancelled(id, owner, delegate)); + Ok(()) + } + + /// Cancel all of some asset approved for delegated transfer by a third-party account. + /// + /// Origin must be either ForceOrigin or Signed origin with the signer being the Admin + /// account of the asset `id`. + /// + /// Unreserves any deposit previously reserved by `approve_transfer` for the approval. + /// + /// - `id`: The identifier of the asset. + /// - `delegate`: The account delegated permission to transfer asset. + /// + /// Emits `ApprovalCancelled` on success. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_cancel_approval())] + pub fn force_cancel_approval( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + owner: ::Source, + delegate: ::Source, + ) -> DispatchResult { + let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; + T::ForceOrigin::try_origin(origin) + .map(|_| ()) + .or_else(|origin| -> DispatchResult { + let origin = ensure_signed(origin)?; + ensure!(&origin == &d.admin, Error::::NoPermission); + Ok(()) + })?; + + let owner = T::Lookup::lookup(owner)?; + let delegate = T::Lookup::lookup(delegate)?; + + let approval = + Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; + T::Currency::unreserve(&owner, approval.deposit); + d.approvals.saturating_dec(); + Asset::::insert(id, d); + + Self::deposit_event(Event::ApprovalCancelled(id, owner, delegate)); + Ok(()) + } + + /// Transfer some asset balance from a previously delegated account to some third-party + /// account. + /// + /// Origin must be Signed and there must be an approval in place by the `owner` to the + /// signer. + /// + /// If the entire amount approved for transfer is transferred, then any deposit previously + /// reserved by `approve_transfer` is unreserved. + /// + /// - `id`: The identifier of the asset. + /// - `owner`: The account which previously approved for a transfer of at least `amount` and + /// from which the asset balance will be withdrawn. + /// - `destination`: The account to which the asset balance of `amount` will be transferred. + /// - `amount`: The amount of assets to transfer. + /// + /// Emits `TransferredApproved` on success. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::transfer_approved())] + pub fn transfer_approved( + origin: OriginFor, + #[pallet::compact] id: T::AssetId, + owner: ::Source, + destination: ::Source, + #[pallet::compact] amount: T::Balance, + ) -> DispatchResult { + let delegate = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; + let destination = T::Lookup::lookup(destination)?; + + Approvals::::try_mutate_exists( + (id, &owner, delegate), + |maybe_approved| -> DispatchResult { + let mut approved = maybe_approved.take().ok_or(Error::::Unapproved)?; + let remaining = + approved.amount.checked_sub(&amount).ok_or(Error::::Unapproved)?; + + let f = + TransferFlags { keep_alive: false, best_effort: false, burn_dust: false }; + Self::do_transfer(id, &owner, &destination, amount, None, f)?; + + if remaining.is_zero() { + T::Currency::unreserve(&owner, approved.deposit); + Asset::::mutate(id, |maybe_details| { + if let Some(details) = maybe_details { + details.approvals.saturating_dec(); + } + }); + } else { + approved.amount = remaining; + *maybe_approved = Some(approved); + } + Ok(()) + }, + )?; + Ok(()) + } } } diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs new file mode 100644 index 0000000000000..1b2602792d844 --- /dev/null +++ b/frame/assets/src/mock.rs @@ -0,0 +1,152 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Assets pallet. + +use super::*; +use crate as pallet_assets; + +use frame_support::{construct_runtime, parameter_types}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; +} + +parameter_types! { + pub const AssetDeposit: u64 = 1; + pub const ApprovalDeposit: u64 = 1; + pub const StringLimit: u32 = 50; + pub const MetadataDepositBase: u64 = 1; + pub const MetadataDepositPerByte: u64 = 1; +} + +impl Config for Test { + type Event = Event; + type Balance = u64; + type AssetId = u32; + type Currency = Balances; + type ForceOrigin = frame_system::EnsureRoot; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = StringLimit; + type Freezer = TestFreezer; + type WeightInfo = (); + type Extra = (); +} + +use std::{cell::RefCell, collections::HashMap}; + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub(crate) enum Hook { + Died(u32, u64), +} +thread_local! { + static FROZEN: RefCell> = RefCell::new(Default::default()); + static HOOKS: RefCell> = RefCell::new(Default::default()); +} + +pub struct TestFreezer; +impl FrozenBalance for TestFreezer { + fn frozen_balance(asset: u32, who: &u64) -> Option { + FROZEN.with(|f| f.borrow().get(&(asset, who.clone())).cloned()) + } + + fn died(asset: u32, who: &u64) { + HOOKS.with(|h| h.borrow_mut().push(Hook::Died(asset, who.clone()))); + } +} + +pub(crate) fn set_frozen_balance(asset: u32, who: u64, amount: u64) { + FROZEN.with(|f| f.borrow_mut().insert((asset, who), amount)); +} +pub(crate) fn clear_frozen_balance(asset: u32, who: u64) { + FROZEN.with(|f| f.borrow_mut().remove(&(asset, who))); +} +pub(crate) fn hooks() -> Vec { + HOOKS.with(|h| h.borrow().clone()) +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs new file mode 100644 index 0000000000000..aab534a6e4efc --- /dev/null +++ b/frame/assets/src/tests.rs @@ -0,0 +1,786 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Assets pallet. + +use super::*; +use crate::{mock::*, Error}; +use frame_support::{assert_noop, assert_ok, traits::Currency}; +use pallet_balances::Error as BalancesError; +use sp_runtime::{traits::ConvertInto, TokenError}; + +#[test] +fn basic_minting_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_eq!(Assets::balance(0, 2), 100); + }); +} + +#[test] +fn approval_lifecycle_works() { + new_test_ext().execute_with(|| { + // can't approve non-existent token + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Unknown); + // so we create it :) + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + Balances::make_free_balance_be(&1, 1); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); + assert_eq!(Balances::reserved_balance(&1), 1); + assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 40)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); + assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); + assert_eq!(Asset::::get(0).unwrap().approvals, 0); + assert_eq!(Assets::balance(0, 1), 60); + assert_eq!(Assets::balance(0, 3), 40); + assert_eq!(Balances::reserved_balance(&1), 0); + }); +} + +#[test] +fn transfer_approved_all_funds() { + new_test_ext().execute_with(|| { + // can't approve non-existent token + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Unknown); + // so we create it :) + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + Balances::make_free_balance_be(&1, 1); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); + assert_eq!(Balances::reserved_balance(&1), 1); + + // transfer the full amount, which should trigger auto-cleanup + assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 0); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 3), 50); + assert_eq!(Balances::reserved_balance(&1), 0); + }); +} + +#[test] +fn approval_deposits_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + let e = BalancesError::::InsufficientBalance; + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), e); + + Balances::make_free_balance_be(&1, 1); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Balances::reserved_balance(&1), 1); + + assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 50)); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); + assert_eq!(Balances::reserved_balance(&1), 0); + }); +} + +#[test] +fn cannot_transfer_more_than_approved() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + Balances::make_free_balance_be(&1, 1); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + let e = Error::::Unapproved; + assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 51), e); + }); +} + +#[test] +fn cannot_transfer_more_than_exists() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + Balances::make_free_balance_be(&1, 1); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 101)); + let e = Error::::BalanceLow; + assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 101), e); + }); +} + +#[test] +fn cancel_approval_works() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + Balances::make_free_balance_be(&1, 1); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); + assert_noop!(Assets::cancel_approval(Origin::signed(1), 1, 2), Error::::Unknown); + assert_noop!(Assets::cancel_approval(Origin::signed(2), 0, 2), Error::::Unknown); + assert_noop!(Assets::cancel_approval(Origin::signed(1), 0, 3), Error::::Unknown); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); + assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); + assert_eq!(Asset::::get(0).unwrap().approvals, 0); + assert_noop!(Assets::cancel_approval(Origin::signed(1), 0, 2), Error::::Unknown); + }); +} + +#[test] +fn force_cancel_approval_works() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + Balances::make_free_balance_be(&1, 1); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); + let e = Error::::NoPermission; + assert_noop!(Assets::force_cancel_approval(Origin::signed(2), 0, 1, 2), e); + assert_noop!( + Assets::force_cancel_approval(Origin::signed(1), 1, 1, 2), + Error::::Unknown + ); + assert_noop!( + Assets::force_cancel_approval(Origin::signed(1), 0, 2, 2), + Error::::Unknown + ); + assert_noop!( + Assets::force_cancel_approval(Origin::signed(1), 0, 1, 3), + Error::::Unknown + ); + assert_eq!(Asset::::get(0).unwrap().approvals, 1); + assert_ok!(Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2)); + assert_eq!(Asset::::get(0).unwrap().approvals, 0); + assert_noop!( + Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2), + Error::::Unknown + ); + }); +} + +#[test] +fn lifecycle_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 1)); + assert_eq!(Balances::reserved_balance(&1), 1); + assert!(Asset::::contains_key(0)); + + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0], vec![0], 12)); + assert_eq!(Balances::reserved_balance(&1), 4); + assert!(Metadata::::contains_key(0)); + + Balances::make_free_balance_be(&10, 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + Balances::make_free_balance_be(&20, 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 20, 100)); + assert_eq!(Account::::iter_prefix(0).count(), 2); + + let w = Asset::::get(0).unwrap().destroy_witness(); + assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert!(!Asset::::contains_key(0)); + assert!(!Metadata::::contains_key(0)); + assert_eq!(Account::::iter_prefix(0).count(), 0); + + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 1)); + assert_eq!(Balances::reserved_balance(&1), 1); + assert!(Asset::::contains_key(0)); + + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0], vec![0], 12)); + assert_eq!(Balances::reserved_balance(&1), 4); + assert!(Metadata::::contains_key(0)); + + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 20, 100)); + assert_eq!(Account::::iter_prefix(0).count(), 2); + + let w = Asset::::get(0).unwrap().destroy_witness(); + assert_ok!(Assets::destroy(Origin::root(), 0, w)); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert!(!Asset::::contains_key(0)); + assert!(!Metadata::::contains_key(0)); + assert_eq!(Account::::iter_prefix(0).count(), 0); + }); +} + +#[test] +fn destroy_with_bad_witness_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + let mut w = Asset::::get(0).unwrap().destroy_witness(); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + // witness too low + assert_noop!(Assets::destroy(Origin::signed(1), 0, w), Error::::BadWitness); + // witness too high is okay though + w.accounts += 2; + w.sufficients += 2; + assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); + }); +} + +#[test] +fn destroy_should_refund_approvals() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 3, 50)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 4, 50)); + assert_eq!(Balances::reserved_balance(&1), 3); + + let w = Asset::::get(0).unwrap().destroy_witness(); + assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); + assert_eq!(Balances::reserved_balance(&1), 0); + + // all approvals are removed + assert!(Approvals::::iter().count().is_zero()) + }); +} + +#[test] +fn non_providing_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); + + Balances::make_free_balance_be(&0, 100); + assert_ok!(Assets::mint(Origin::signed(1), 0, 0, 100)); + + // Cannot mint into account 2 since it doesn't (yet) exist... + assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), TokenError::CannotCreate); + // ...or transfer... + assert_noop!(Assets::transfer(Origin::signed(0), 0, 1, 50), TokenError::CannotCreate); + // ...or force-transfer + assert_noop!( + Assets::force_transfer(Origin::signed(1), 0, 0, 1, 50), + TokenError::CannotCreate + ); + + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + assert_ok!(Assets::transfer(Origin::signed(0), 0, 1, 25)); + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 0, 2, 25)); + }); +} + +#[test] +fn min_balance_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Asset::::get(0).unwrap().accounts, 1); + + // Cannot create a new account with a balance that is below minimum... + assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!( + Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), + TokenError::BelowMinimum + ); + + // When deducting from an account to below minimum, it should be reaped. + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 91)); + assert!(Assets::balance(0, 1).is_zero()); + assert_eq!(Assets::balance(0, 2), 100); + assert_eq!(Asset::::get(0).unwrap().accounts, 1); + + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 2, 1, 91)); + assert!(Assets::balance(0, 2).is_zero()); + assert_eq!(Assets::balance(0, 1), 100); + assert_eq!(Asset::::get(0).unwrap().accounts, 1); + + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, 91)); + assert!(Assets::balance(0, 1).is_zero()); + assert_eq!(Asset::::get(0).unwrap().accounts, 0); + }); +} + +#[test] +fn querying_total_supply_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + assert_ok!(Assets::transfer(Origin::signed(2), 0, 3, 31)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 19); + assert_eq!(Assets::balance(0, 3), 31); + assert_ok!(Assets::burn(Origin::signed(1), 0, 3, u64::MAX)); + assert_eq!(Assets::total_supply(0), 69); + }); +} + +#[test] +fn transferring_amount_below_available_balance_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + }); +} + +#[test] +fn transferring_enough_to_kill_source_when_keep_alive_should_fail() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_noop!( + Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 91), + Error::::BalanceLow + ); + assert_ok!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 90)); + assert_eq!(Assets::balance(0, 1), 10); + assert_eq!(Assets::balance(0, 2), 90); + }); +} + +#[test] +fn transferring_frozen_user_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::freeze(Origin::signed(1), 0, 1)); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw(Origin::signed(1), 0, 1)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + }); +} + +#[test] +fn transferring_frozen_asset_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::freeze_asset(Origin::signed(1), 0)); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw_asset(Origin::signed(1), 0)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + }); +} + +#[test] +fn approve_transfer_frozen_asset_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::freeze_asset(Origin::signed(1), 0)); + assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw_asset(Origin::signed(1), 0)); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + }); +} + +#[test] +fn origin_guards_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_noop!( + Assets::transfer_ownership(Origin::signed(2), 0, 2), + Error::::NoPermission + ); + assert_noop!(Assets::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); + assert_noop!(Assets::freeze(Origin::signed(2), 0, 1), Error::::NoPermission); + assert_noop!(Assets::thaw(Origin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!(Assets::mint(Origin::signed(2), 0, 2, 100), Error::::NoPermission); + assert_noop!(Assets::burn(Origin::signed(2), 0, 1, 100), Error::::NoPermission); + assert_noop!( + Assets::force_transfer(Origin::signed(2), 0, 1, 2, 100), + Error::::NoPermission + ); + let w = Asset::::get(0).unwrap().destroy_witness(); + assert_noop!(Assets::destroy(Origin::signed(2), 0, w), Error::::NoPermission); + }); +} + +#[test] +fn transfer_owner_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 1)); + + assert_eq!(Balances::reserved_balance(&1), 1); + + assert_ok!(Assets::transfer_ownership(Origin::signed(1), 0, 2)); + assert_eq!(Balances::reserved_balance(&2), 1); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert_noop!( + Assets::transfer_ownership(Origin::signed(1), 0, 1), + Error::::NoPermission + ); + + // Set metadata now and make sure that deposit gets transferred back. + assert_ok!(Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12)); + assert_ok!(Assets::transfer_ownership(Origin::signed(2), 0, 1)); + assert_eq!(Balances::reserved_balance(&1), 22); + assert_eq!(Balances::reserved_balance(&2), 0); + }); +} + +#[test] +fn set_team_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Assets::mint(Origin::signed(2), 0, 2, 100)); + assert_ok!(Assets::freeze(Origin::signed(4), 0, 2)); + assert_ok!(Assets::thaw(Origin::signed(3), 0, 2)); + assert_ok!(Assets::force_transfer(Origin::signed(3), 0, 2, 3, 100)); + assert_ok!(Assets::burn(Origin::signed(3), 0, 3, 100)); + }); +} + +#[test] +fn transferring_to_frozen_account_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_eq!(Assets::balance(0, 2), 100); + assert_ok!(Assets::freeze(Origin::signed(1), 0, 2)); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 2), 150); + }); +} + +#[test] +fn transferring_amount_more_than_available_balance_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::MAX)); + assert_eq!(Assets::balance(0, 1), 0); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 1, 50), Error::::BalanceLow); + assert_noop!(Assets::transfer(Origin::signed(2), 0, 1, 51), Error::::BalanceLow); + }); +} + +#[test] +fn transferring_less_than_one_unit_is_fine() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 0)); + System::assert_last_event(mock::Event::Assets(crate::Event::Transferred(0, 1, 2, 0))); + }); +} + +#[test] +fn transferring_more_units_than_total_supply_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 101), Error::::BalanceLow); + }); +} + +#[test] +fn burning_asset_balance_with_positive_balance_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::MAX)); + assert_eq!(Assets::balance(0, 1), 0); + }); +} + +#[test] +fn burning_asset_balance_with_zero_balance_does_nothing() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 2), 0); + assert_ok!(Assets::burn(Origin::signed(1), 0, 2, u64::MAX)); + assert_eq!(Assets::balance(0, 2), 0); + assert_eq!(Assets::total_supply(0), 100); + }); +} + +#[test] +fn set_metadata_should_work() { + new_test_ext().execute_with(|| { + // Cannot add metadata to unknown asset + assert_noop!( + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12), + Error::::Unknown, + ); + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + // Cannot add metadata to unowned asset + assert_noop!( + Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12), + Error::::NoPermission, + ); + + // Cannot add oversized metadata + assert_noop!( + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 100], vec![0u8; 10], 12), + Error::::BadMetadata, + ); + assert_noop!( + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 100], 12), + Error::::BadMetadata, + ); + + // Successfully add metadata and take deposit + Balances::make_free_balance_be(&1, 30); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12)); + assert_eq!(Balances::free_balance(&1), 9); + + // Update deposit + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 5], 12)); + assert_eq!(Balances::free_balance(&1), 14); + assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 15], 12)); + assert_eq!(Balances::free_balance(&1), 4); + + // Cannot over-reserve + assert_noop!( + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 20], vec![0u8; 20], 12), + BalancesError::::InsufficientBalance, + ); + + // Clear Metadata + assert!(Metadata::::contains_key(0)); + assert_noop!(Assets::clear_metadata(Origin::signed(2), 0), Error::::NoPermission); + assert_noop!(Assets::clear_metadata(Origin::signed(1), 1), Error::::Unknown); + assert_ok!(Assets::clear_metadata(Origin::signed(1), 0)); + assert!(!Metadata::::contains_key(0)); + }); +} + +#[test] +fn freezer_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + + // freeze 50 of it. + set_frozen_balance(0, 1, 50); + + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 20)); + // cannot transfer another 21 away as this would take the non-frozen balance (30) to below + // the minimum balance (10). + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 21), Error::::BalanceLow); + + // create an approved transfer... + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + let e = Error::::BalanceLow; + // ...but that wont work either: + assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 2, 21), e); + // a force transfer won't work also. + let e = Error::::BalanceLow; + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21), e); + + // reduce it to only 49 frozen... + set_frozen_balance(0, 1, 49); + // ...and it's all good: + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21)); + + // and if we clear it, we can remove the account completely. + clear_frozen_balance(0, 1); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(hooks(), vec![Hook::Died(0, 1)]); + }); +} + +#[test] +fn imbalances_should_work() { + use frame_support::traits::tokens::fungibles::Balanced; + + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + + let imb = Assets::issue(0, 100); + assert_eq!(Assets::total_supply(0), 100); + assert_eq!(imb.peek(), 100); + + let (imb1, imb2) = imb.split(30); + assert_eq!(imb1.peek(), 30); + assert_eq!(imb2.peek(), 70); + + drop(imb2); + assert_eq!(Assets::total_supply(0), 30); + + assert!(Assets::resolve(&1, imb1).is_ok()); + assert_eq!(Assets::balance(0, 1), 30); + assert_eq!(Assets::total_supply(0), 30); + }); +} + +#[test] +fn force_metadata_should_work() { + new_test_ext().execute_with(|| { + // force set metadata works + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_set_metadata( + Origin::root(), + 0, + vec![0u8; 10], + vec![0u8; 10], + 8, + false + )); + assert!(Metadata::::contains_key(0)); + + // overwrites existing metadata + let asset_original_metadata = Metadata::::get(0); + assert_ok!(Assets::force_set_metadata( + Origin::root(), + 0, + vec![1u8; 10], + vec![1u8; 10], + 8, + false + )); + assert_ne!(Metadata::::get(0), asset_original_metadata); + + // attempt to set metadata for non-existent asset class + assert_noop!( + Assets::force_set_metadata(Origin::root(), 1, vec![0u8; 10], vec![0u8; 10], 8, false), + Error::::Unknown + ); + + // string length limit check + let limit = StringLimit::get() as usize; + assert_noop!( + Assets::force_set_metadata( + Origin::root(), + 0, + vec![0u8; limit + 1], + vec![0u8; 10], + 8, + false + ), + Error::::BadMetadata + ); + assert_noop!( + Assets::force_set_metadata( + Origin::root(), + 0, + vec![0u8; 10], + vec![0u8; limit + 1], + 8, + false + ), + Error::::BadMetadata + ); + + // force clear metadata works + assert!(Metadata::::contains_key(0)); + assert_ok!(Assets::force_clear_metadata(Origin::root(), 0)); + assert!(!Metadata::::contains_key(0)); + + // Error handles clearing non-existent asset class + assert_noop!(Assets::force_clear_metadata(Origin::root(), 1), Error::::Unknown); + }); +} + +#[test] +fn force_asset_status_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 10); + Balances::make_free_balance_be(&2, 10); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 30)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 50)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 150)); + + // force asset status to change min_balance > balance + assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 100, true, false)); + assert_eq!(Assets::balance(0, 1), 50); + + // account can recieve assets for balance < min_balance + assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 1)); + assert_eq!(Assets::balance(0, 1), 51); + + // account on outbound transfer will cleanup for balance < min_balance + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 1)); + assert_eq!(Assets::balance(0, 1), 0); + + // won't create new account with balance below min_balance + assert_noop!(Assets::transfer(Origin::signed(2), 0, 3, 50), TokenError::BelowMinimum); + + // force asset status will not execute for non-existent class + assert_noop!( + Assets::force_asset_status(Origin::root(), 1, 1, 1, 1, 1, 90, true, false), + Error::::Unknown + ); + + // account drains to completion when funds dip below min_balance + assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 110, true, false)); + assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 110)); + assert_eq!(Assets::balance(0, 1), 200); + assert_eq!(Assets::balance(0, 2), 0); + assert_eq!(Assets::total_supply(0), 200); + }); +} + +#[test] +fn balance_conversion_should_work() { + new_test_ext().execute_with(|| { + use frame_support::traits::tokens::BalanceConversion; + + let id = 42; + assert_ok!(Assets::force_create(Origin::root(), id, 1, true, 10)); + let not_sufficient = 23; + assert_ok!(Assets::force_create(Origin::root(), not_sufficient, 1, false, 10)); + + assert_eq!( + BalanceToAssetBalance::::to_asset_balance(100, 1234), + Err(ConversionError::AssetMissing) + ); + assert_eq!( + BalanceToAssetBalance::::to_asset_balance( + 100, + not_sufficient + ), + Err(ConversionError::AssetNotSufficient) + ); + // 10 / 1 == 10 -> the conversion should 10x the value + assert_eq!( + BalanceToAssetBalance::::to_asset_balance(100, id), + Ok(100 * 10) + ); + }); +} diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs new file mode 100644 index 0000000000000..bc2edce848a64 --- /dev/null +++ b/frame/assets/src/types.rs @@ -0,0 +1,235 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various basic types for use in the assets pallet. + +use super::*; +use frame_support::pallet_prelude::*; +use scale_info::TypeInfo; + +use frame_support::traits::{fungible, tokens::BalanceConversion}; +use sp_runtime::{traits::Convert, FixedPointNumber, FixedPointOperand, FixedU128}; + +pub(super) type DepositBalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct AssetDetails { + /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. + pub(super) owner: AccountId, + /// Can mint tokens. + pub(super) issuer: AccountId, + /// Can thaw tokens, force transfers and burn tokens from any account. + pub(super) admin: AccountId, + /// Can freeze tokens. + pub(super) freezer: AccountId, + /// The total supply across all accounts. + pub(super) supply: Balance, + /// The balance deposited for this asset. This pays for the data stored here. + pub(super) deposit: DepositBalance, + /// The ED for virtual accounts. + pub(super) min_balance: Balance, + /// If `true`, then any account with this asset is given a provider reference. Otherwise, it + /// requires a consumer reference. + pub(super) is_sufficient: bool, + /// The total number of accounts. + pub(super) accounts: u32, + /// The total number of accounts for which we have placed a self-sufficient reference. + pub(super) sufficients: u32, + /// The total number of approvals. + pub(super) approvals: u32, + /// Whether the asset is frozen for non-admin transfers. + pub(super) is_frozen: bool, +} + +impl AssetDetails { + pub fn destroy_witness(&self) -> DestroyWitness { + DestroyWitness { + accounts: self.accounts, + sufficients: self.sufficients, + approvals: self.approvals, + } + } +} + +/// Data concerning an approval. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, MaxEncodedLen, TypeInfo)] +pub struct Approval { + /// The amount of funds approved for the balance transfer from the owner to some delegated + /// target. + pub(super) amount: Balance, + /// The amount reserved on the owner's account to hold this item in storage. + pub(super) deposit: DepositBalance, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, MaxEncodedLen, TypeInfo)] +pub struct AssetBalance { + /// The balance. + pub(super) balance: Balance, + /// Whether the account is frozen. + pub(super) is_frozen: bool, + /// `true` if this balance gave the account a self-sufficient reference. + pub(super) sufficient: bool, + /// Additional "sidecar" data, in case some other pallet wants to use this storage item. + pub(super) extra: Extra, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct AssetMetadata { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + pub(super) deposit: DepositBalance, + /// The user friendly name of this asset. Limited in length by `StringLimit`. + pub(super) name: BoundedString, + /// The ticker symbol for this asset. Limited in length by `StringLimit`. + pub(super) symbol: BoundedString, + /// The number of decimals this asset uses to represent one unit. + pub(super) decimals: u8, + /// Whether the asset metadata may be changed by a non Force origin. + pub(super) is_frozen: bool, +} + +/// Witness data for the destroy transactions. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct DestroyWitness { + /// The number of accounts holding the asset. + #[codec(compact)] + pub(super) accounts: u32, + /// The number of accounts holding the asset with a self-sufficient reference. + #[codec(compact)] + pub(super) sufficients: u32, + /// The number of transfer-approvals of the asset. + #[codec(compact)] + pub(super) approvals: u32, +} + +/// Trait for allowing a minimum balance on the account to be specified, beyond the +/// `minimum_balance` of the asset. This is additive - the `minimum_balance` of the asset must be +/// met *and then* anything here in addition. +pub trait FrozenBalance { + /// Return the frozen balance. Under normal behaviour, this amount should always be + /// withdrawable. + /// + /// In reality, the balance of every account must be at least the sum of this (if `Some`) and + /// the asset's minimum_balance, since there may be complications to destroying an asset's + /// account completely. + /// + /// If `None` is returned, then nothing special is enforced. + /// + /// If any operation ever breaks this requirement (which will only happen through some sort of + /// privileged intervention), then `melted` is called to do any cleanup. + fn frozen_balance(asset: AssetId, who: &AccountId) -> Option; + + /// Called when an account has been removed. + fn died(asset: AssetId, who: &AccountId); +} + +impl FrozenBalance for () { + fn frozen_balance(_: AssetId, _: &AccountId) -> Option { + None + } + fn died(_: AssetId, _: &AccountId) {} +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub(super) struct TransferFlags { + /// The debited account must stay alive at the end of the operation; an error is returned if + /// this cannot be achieved legally. + pub(super) keep_alive: bool, + /// Less than the amount specified needs be debited by the operation for it to be considered + /// successful. If `false`, then the amount debited will always be at least the amount + /// specified. + pub(super) best_effort: bool, + /// Any additional funds debited (due to minimum balance requirements) should be burned rather + /// than credited to the destination account. + pub(super) burn_dust: bool, +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub(super) struct DebitFlags { + /// The debited account must stay alive at the end of the operation; an error is returned if + /// this cannot be achieved legally. + pub(super) keep_alive: bool, + /// Less than the amount specified needs be debited by the operation for it to be considered + /// successful. If `false`, then the amount debited will always be at least the amount + /// specified. + pub(super) best_effort: bool, +} + +impl From for DebitFlags { + fn from(f: TransferFlags) -> Self { + Self { keep_alive: f.keep_alive, best_effort: f.best_effort } + } +} + +/// Possible errors when converting between external and asset balances. +#[derive(Eq, PartialEq, Copy, Clone, RuntimeDebug, Encode, Decode)] +pub enum ConversionError { + /// The external minimum balance must not be zero. + MinBalanceZero, + /// The asset is not present in storage. + AssetMissing, + /// The asset is not sufficient and thus does not have a reliable `min_balance` so it cannot be + /// converted. + AssetNotSufficient, +} + +// Type alias for `frame_system`'s account id. +type AccountIdOf = ::AccountId; +// This pallet's asset id and balance type. +type AssetIdOf = >::AssetId; +type AssetBalanceOf = >::Balance; +// Generic fungible balance type. +type BalanceOf = >>::Balance; + +/// Converts a balance value into an asset balance based on the ratio between the fungible's +/// minimum balance and the minimum asset balance. +pub struct BalanceToAssetBalance(PhantomData<(F, T, CON, I)>); +impl BalanceConversion, AssetIdOf, AssetBalanceOf> + for BalanceToAssetBalance +where + F: fungible::Inspect>, + T: Config, + I: 'static, + CON: Convert, AssetBalanceOf>, + BalanceOf: FixedPointOperand + Zero, + AssetBalanceOf: FixedPointOperand + Zero, +{ + type Error = ConversionError; + + /// Convert the given balance value into an asset balance based on the ratio between the + /// fungible's minimum balance and the minimum asset balance. + /// + /// Will return `Err` if the asset is not found, not sufficient or the fungible's minimum + /// balance is zero. + fn to_asset_balance( + balance: BalanceOf, + asset_id: AssetIdOf, + ) -> Result, ConversionError> { + let asset = Asset::::get(asset_id).ok_or(ConversionError::AssetMissing)?; + // only sufficient assets have a min balance with reliable value + ensure!(asset.is_sufficient, ConversionError::AssetNotSufficient); + let min_balance = CON::convert(F::minimum_balance()); + // make sure we don't divide by zero + ensure!(!min_balance.is_zero(), ConversionError::MinBalanceZero); + let balance = CON::convert(balance); + // balance * asset.min_balance / min_balance + Ok(FixedU128::saturating_from_rational(asset.min_balance, min_balance) + .saturating_mul_int(balance)) + } +} diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs new file mode 100644 index 0000000000000..912ebcf7e8510 --- /dev/null +++ b/frame/assets/src/weights.rs @@ -0,0 +1,442 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_assets +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_assets +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/assets/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_assets. +pub trait WeightInfo { + fn create() -> Weight; + fn force_create() -> Weight; + fn destroy(c: u32, s: u32, a: u32, ) -> Weight; + fn mint() -> Weight; + fn burn() -> Weight; + fn transfer() -> Weight; + fn transfer_keep_alive() -> Weight; + fn force_transfer() -> Weight; + fn freeze() -> Weight; + fn thaw() -> Weight; + fn freeze_asset() -> Weight; + fn thaw_asset() -> Weight; + fn transfer_ownership() -> Weight; + fn set_team() -> Weight; + fn set_metadata(n: u32, s: u32, ) -> Weight; + fn clear_metadata() -> Weight; + fn force_set_metadata(n: u32, s: u32, ) -> Weight; + fn force_clear_metadata() -> Weight; + fn force_asset_status() -> Weight; + fn approve_transfer() -> Weight; + fn transfer_approved() -> Weight; + fn cancel_approval() -> Weight; + fn force_cancel_approval() -> Weight; +} + +/// Weights for pallet_assets using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Assets Asset (r:1 w:1) + fn create() -> Weight { + (41_651_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn force_create() -> Weight { + (21_378_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:5002 w:5001) + // Storage: System Account (r:5000 w:5000) + // Storage: Assets Metadata (r:1 w:0) + // Storage: Assets Approvals (r:501 w:500) + fn destroy(c: u32, s: u32, a: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 32_000 + .saturating_add((21_163_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 32_000 + .saturating_add((26_932_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 329_000 + .saturating_add((29_714_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:1 w:1) + fn mint() -> Weight { + (47_913_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:1 w:1) + fn burn() -> Weight { + (55_759_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + fn transfer() -> Weight { + (83_205_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + fn transfer_keep_alive() -> Weight { + (70_665_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + fn force_transfer() -> Weight { + (81_458_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Account (r:1 w:1) + fn freeze() -> Weight { + (32_845_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Account (r:1 w:1) + fn thaw() -> Weight { + (33_303_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn freeze_asset() -> Weight { + (23_434_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn thaw_asset() -> Weight { + (24_173_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Metadata (r:1 w:0) + fn transfer_ownership() -> Weight { + (27_466_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn set_team() -> Weight { + (24_608_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Metadata (r:1 w:1) + fn set_metadata(n: u32, s: u32, ) -> Weight { + (49_515_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_000 + .saturating_add((6_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Metadata (r:1 w:1) + fn clear_metadata() -> Weight { + (48_163_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Metadata (r:1 w:1) + fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + (26_722_000 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Metadata (r:1 w:1) + fn force_clear_metadata() -> Weight { + (47_923_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn force_asset_status() -> Weight { + (23_081_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Approvals (r:1 w:1) + fn approve_transfer() -> Weight { + (56_998_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Assets Approvals (r:1 w:1) + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + fn transfer_approved() -> Weight { + (107_171_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Approvals (r:1 w:1) + fn cancel_approval() -> Weight { + (57_358_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Approvals (r:1 w:1) + fn force_cancel_approval() -> Weight { + (58_330_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Assets Asset (r:1 w:1) + fn create() -> Weight { + (41_651_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn force_create() -> Weight { + (21_378_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:5002 w:5001) + // Storage: System Account (r:5000 w:5000) + // Storage: Assets Metadata (r:1 w:0) + // Storage: Assets Approvals (r:501 w:500) + fn destroy(c: u32, s: u32, a: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 32_000 + .saturating_add((21_163_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 32_000 + .saturating_add((26_932_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 329_000 + .saturating_add((29_714_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:1 w:1) + fn mint() -> Weight { + (47_913_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:1 w:1) + fn burn() -> Weight { + (55_759_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + fn transfer() -> Weight { + (83_205_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + fn transfer_keep_alive() -> Weight { + (70_665_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + fn force_transfer() -> Weight { + (81_458_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Account (r:1 w:1) + fn freeze() -> Weight { + (32_845_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Account (r:1 w:1) + fn thaw() -> Weight { + (33_303_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn freeze_asset() -> Weight { + (23_434_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn thaw_asset() -> Weight { + (24_173_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Metadata (r:1 w:0) + fn transfer_ownership() -> Weight { + (27_466_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn set_team() -> Weight { + (24_608_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Metadata (r:1 w:1) + fn set_metadata(n: u32, s: u32, ) -> Weight { + (49_515_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_000 + .saturating_add((6_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Metadata (r:1 w:1) + fn clear_metadata() -> Weight { + (48_163_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Metadata (r:1 w:1) + fn force_set_metadata(_n: u32, s: u32, ) -> Weight { + (26_722_000 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:0) + // Storage: Assets Metadata (r:1 w:1) + fn force_clear_metadata() -> Weight { + (47_923_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + fn force_asset_status() -> Weight { + (23_081_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Approvals (r:1 w:1) + fn approve_transfer() -> Weight { + (56_998_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Assets Approvals (r:1 w:1) + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Account (r:2 w:2) + // Storage: System Account (r:1 w:1) + fn transfer_approved() -> Weight { + (107_171_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Approvals (r:1 w:1) + fn cancel_approval() -> Weight { + (57_358_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Assets Asset (r:1 w:1) + // Storage: Assets Approvals (r:1 w:1) + fn force_cancel_approval() -> Weight { + (58_330_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index a65632289426b..53a8c3a81165b 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-atomic-swap" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,23 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "frame-support/std", "frame-system/std", "sp-runtime/std", @@ -37,3 +37,4 @@ std = [ "sp-io/std", "sp-core/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/atomic-swap/README.md b/frame/atomic-swap/README.md index 1287e90bc0da5..888a64ec7e065 100644 --- a/frame/atomic-swap/README.md +++ b/frame/atomic-swap/README.md @@ -2,7 +2,7 @@ A module for atomically sending funds. -- [`atomic_swap::Trait`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/trait.Trait.html) +- [`atomic_swap::Config`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/trait.Config.html) - [`Call`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/enum.Call.html) - [`Module`](https://docs.rs/pallet-atomic-swap/latest/pallet_atomic_swap/struct.Module.html) @@ -20,4 +20,4 @@ claimed within a specified duration of time, the sender may cancel it. * `claim_swap` - called by the target to approve a swap * `cancel_swap` - may be called by a sender after a specified duration -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 31f0c0f426525..9cf92c3bd2337 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,15 +17,15 @@ //! # Atomic Swap //! -//! A module for atomically sending funds. +//! A pallet for atomically sending funds. //! -//! - [`atomic_swap::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Overview //! -//! A module for atomically sending funds from an origin to a target. A proof +//! A pallet for atomically sending funds from an origin to a target. A proof //! is used to allow the target to approve (claim) the swap. If the swap is not //! claimed within a specified duration of time, the sender may cancel it. //! @@ -33,30 +33,35 @@ //! //! ### Dispatchable Functions //! -//! * `create_swap` - called by a sender to register a new atomic swap -//! * `claim_swap` - called by the target to approve a swap -//! * `cancel_swap` - may be called by a sender after a specified duration +//! * [`create_swap`](Call::create_swap) - called by a sender to register a new atomic swap +//! * [`claim_swap`](Call::claim_swap) - called by the target to approve a swap +//! * [`cancel_swap`](Call::cancel_swap) - may be called by a sender after a specified duration // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] mod tests; -use sp_std::{prelude::*, marker::PhantomData, ops::{Deref, DerefMut}}; -use sp_io::hashing::blake2_256; +use codec::{Decode, Encode}; use frame_support::{ - Parameter, decl_module, decl_storage, decl_event, decl_error, ensure, - traits::{Get, Currency, ReservableCurrency, BalanceStatus}, - weights::Weight, dispatch::DispatchResult, + traits::{BalanceStatus, Currency, Get, ReservableCurrency}, + weights::Weight, + RuntimeDebugNoBound, }; -use frame_system::{self as system, ensure_signed}; -use codec::{Encode, Decode}; +use scale_info::TypeInfo; +use sp_io::hashing::blake2_256; use sp_runtime::RuntimeDebug; +use sp_std::{ + marker::PhantomData, + ops::{Deref, DerefMut}, + prelude::*, +}; /// Pending atomic swap operation. -#[derive(Clone, Eq, PartialEq, RuntimeDebug, Encode, Decode)] -pub struct PendingSwap { +#[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct PendingSwap { /// Source of the swap. pub source: T::AccountId, /// Action of this swap. @@ -74,7 +79,7 @@ pub type HashedProof = [u8; 32]; /// succeeds with best efforts. /// - **Claim**: claim any resources reserved in the first phrase. /// - **Cancel**: cancel any resources reserved in the first phrase. -pub trait SwapAction { +pub trait SwapAction { /// Reserve the resources needed for the swap, from the given `source`. The reservation is /// allowed to fail. If that is the case, the the full swap creation operation is cancelled. fn reserve(&self, source: &AccountId) -> DispatchResult; @@ -88,20 +93,27 @@ pub trait SwapAction { } /// A swap action that only allows transferring balances. -#[derive(Clone, RuntimeDebug, Eq, PartialEq, Encode, Decode)] +#[derive(Clone, RuntimeDebug, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(C))] pub struct BalanceSwapAction> { value: >::Balance, _marker: PhantomData, } -impl BalanceSwapAction where C: ReservableCurrency { +impl BalanceSwapAction +where + C: ReservableCurrency, +{ /// Create a new swap action value of balance. pub fn new(value: >::Balance) -> Self { Self { value, _marker: PhantomData } } } -impl Deref for BalanceSwapAction where C: ReservableCurrency { +impl Deref for BalanceSwapAction +where + C: ReservableCurrency, +{ type Target = >::Balance; fn deref(&self) -> &Self::Target { @@ -109,14 +121,18 @@ impl Deref for BalanceSwapAction where C: Reservable } } -impl DerefMut for BalanceSwapAction where C: ReservableCurrency { +impl DerefMut for BalanceSwapAction +where + C: ReservableCurrency, +{ fn deref_mut(&mut self) -> &mut Self::Target { &mut self.value } } -impl SwapAction for BalanceSwapAction - where C: ReservableCurrency +impl SwapAction for BalanceSwapAction +where + C: ReservableCurrency, { fn reserve(&self, source: &AccountId) -> DispatchResult { C::reserve(&source, self.value) @@ -135,35 +151,51 @@ impl SwapAction for BalanceSwapAction> + Into<::Event>; - /// Swap action. - type SwapAction: SwapAction + Parameter; - /// Limit of proof size. - /// - /// Atomic swap is only atomic if once the proof is revealed, both parties can submit the proofs - /// on-chain. If A is the one that generates the proof, then it requires that either: - /// - A's blockchain has the same proof length limit as B's blockchain. - /// - Or A's blockchain has shorter proof length limit as B's blockchain. - /// - /// If B sees A is on a blockchain with larger proof length limit, then it should kindly refuse - /// to accept the atomic swap request if A generates the proof, and asks that B generates the - /// proof instead. - type ProofLimit: Get; -} - -decl_storage! { - trait Store for Module as AtomicSwap { - pub PendingSwaps: double_map - hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) HashedProof - => Option>; +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// Atomic swap's pallet configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + /// Swap action. + type SwapAction: SwapAction + Parameter; + /// Limit of proof size. + /// + /// Atomic swap is only atomic if once the proof is revealed, both parties can submit the + /// proofs on-chain. If A is the one that generates the proof, then it requires that either: + /// - A's blockchain has the same proof length limit as B's blockchain. + /// - Or A's blockchain has shorter proof length limit as B's blockchain. + /// + /// If B sees A is on a blockchain with larger proof length limit, then it should kindly + /// refuse to accept the atomic swap request if A generates the proof, and asks that B + /// generates the proof instead. + #[pallet::constant] + type ProofLimit: Get; } -} -decl_error! { - pub enum Error for Module { + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::storage] + pub type PendingSwaps = StorageDoubleMap< + _, + Twox64Concat, + T::AccountId, + Blake2_128Concat, + HashedProof, + PendingSwap, + >; + + #[pallet::error] + pub enum Error { /// Swap already exists. AlreadyExist, /// Swap proof is invalid. @@ -181,31 +213,26 @@ decl_error! { /// Duration has not yet passed for the swap to be cancelled. DurationNotPassed, } -} -decl_event!( /// Event of atomic swap pallet. - pub enum Event where - AccountId = ::AccountId, - PendingSwap = PendingSwap, - { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// Swap created. \[account, proof, swap\] - NewSwap(AccountId, HashedProof, PendingSwap), - /// Swap claimed. The last parameter indicates whether the execution succeeds. + NewSwap(T::AccountId, HashedProof, PendingSwap), + /// Swap claimed. The last parameter indicates whether the execution succeeds. /// \[account, proof, success\] - SwapClaimed(AccountId, HashedProof, bool), + SwapClaimed(T::AccountId, HashedProof, bool), /// Swap cancelled. \[account, proof\] - SwapCancelled(AccountId, HashedProof), + SwapCancelled(T::AccountId, HashedProof), } -); -decl_module! { - /// Module definition of atomic swap pallet. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + #[pallet::call] + impl Pallet { /// Register a new atomic swap, declaring an intention to send funds from origin to target /// on the current blockchain. The target can claim the fund using the revealed proof. If /// the fund is not claimed after `duration` blocks, then the sender can cancel the swap. @@ -218,14 +245,14 @@ decl_module! { /// - `duration`: Locked duration of the atomic swap. For safety reasons, it is recommended /// that the revealer uses a shorter duration than the counterparty, to prevent the /// situation where the revealer reveals the proof too late around the end block. - #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] - fn create_swap( - origin, + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + pub fn create_swap( + origin: OriginFor, target: T::AccountId, hashed_proof: HashedProof, action: T::SwapAction, duration: T::BlockNumber, - ) { + ) -> DispatchResult { let source = ensure_signed(origin)?; ensure!( !PendingSwaps::::contains_key(&target, hashed_proof), @@ -237,13 +264,13 @@ decl_module! { let swap = PendingSwap { source, action, - end_block: frame_system::Module::::block_number() + duration, + end_block: frame_system::Pallet::::block_number() + duration, }; PendingSwaps::::insert(target.clone(), hashed_proof.clone(), swap.clone()); - Self::deposit_event( - RawEvent::NewSwap(target, hashed_proof, swap) - ); + Self::deposit_event(Event::NewSwap(target, hashed_proof, swap)); + + Ok(()) } /// Claim an atomic swap. @@ -253,35 +280,31 @@ decl_module! { /// - `proof`: Revealed proof of the claim. /// - `action`: Action defined in the swap, it must match the entry in blockchain. Otherwise /// the operation fails. This is used for weight calculation. - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(40_000_000) - .saturating_add((proof.len() as Weight).saturating_mul(100)) - .saturating_add(action.weight()) - ] - fn claim_swap( - origin, + #[pallet::weight( + T::DbWeight::get().reads_writes(1, 1) + .saturating_add(40_000_000) + .saturating_add((proof.len() as Weight).saturating_mul(100)) + .saturating_add(action.weight()) + )] + pub fn claim_swap( + origin: OriginFor, proof: Vec, action: T::SwapAction, ) -> DispatchResult { - ensure!( - proof.len() <= T::ProofLimit::get() as usize, - Error::::ProofTooLarge, - ); + ensure!(proof.len() <= T::ProofLimit::get() as usize, Error::::ProofTooLarge); let target = ensure_signed(origin)?; let hashed_proof = blake2_256(&proof); - let swap = PendingSwaps::::get(&target, hashed_proof) - .ok_or(Error::::InvalidProof)?; + let swap = + PendingSwaps::::get(&target, hashed_proof).ok_or(Error::::InvalidProof)?; ensure!(swap.action == action, Error::::ClaimActionMismatch); let succeeded = swap.action.claim(&swap.source, &target); PendingSwaps::::remove(target.clone(), hashed_proof.clone()); - Self::deposit_event( - RawEvent::SwapClaimed(target, hashed_proof, succeeded) - ); + Self::deposit_event(Event::SwapClaimed(target, hashed_proof, succeeded)); Ok(()) } @@ -292,31 +315,27 @@ decl_module! { /// /// - `target`: Target of the original atomic swap. /// - `hashed_proof`: Hashed proof of the original atomic swap. - #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] - fn cancel_swap( - origin, + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + pub fn cancel_swap( + origin: OriginFor, target: T::AccountId, hashed_proof: HashedProof, - ) { + ) -> DispatchResult { let source = ensure_signed(origin)?; - let swap = PendingSwaps::::get(&target, hashed_proof) - .ok_or(Error::::NotExist)?; - ensure!( - swap.source == source, - Error::::SourceMismatch, - ); + let swap = PendingSwaps::::get(&target, hashed_proof).ok_or(Error::::NotExist)?; + ensure!(swap.source == source, Error::::SourceMismatch); ensure!( - frame_system::Module::::block_number() >= swap.end_block, + frame_system::Pallet::::block_number() >= swap.end_block, Error::::DurationNotPassed, ); swap.action.cancel(&swap.source); PendingSwaps::::remove(&target, hashed_proof.clone()); - Self::deposit_event( - RawEvent::SwapCancelled(target, hashed_proof) - ); + Self::deposit_event(Event::SwapCancelled(target, hashed_proof)); + + Ok(()) } } } diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 060411c8815da..a76d0f20ffa3b 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -1,64 +1,70 @@ #![cfg(test)] use super::*; +use crate as pallet_atomic_swap; -use frame_support::{ - impl_outer_origin, parameter_types, weights::Weight, -}; +use frame_support::parameter_types; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + AtomicSwap: pallet_atomic_swap::{Pallet, Call, Event}, + } +); -#[derive(Clone, Eq, Debug, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type Event = (); + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -67,26 +73,18 @@ parameter_types! { pub const ProofLimit: u32 = 1024; pub const ExpireDuration: u64 = 100; } -impl Trait for Test { - type Event = (); +impl Config for Test { + type Event = Event; type SwapAction = BalanceSwapAction; type ProofLimit = ProofLimit; } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type AtomicSwap = Module; const A: u64 = 1; const B: u64 = 2; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let genesis = pallet_balances::GenesisConfig:: { - balances: vec![ - (A, 100), - (B, 200), - ], - }; + let genesis = pallet_balances::GenesisConfig:: { balances: vec![(A, 100), (B, 200)] }; genesis.assimilate_storage(&mut t).unwrap(); t.into() } @@ -109,7 +107,8 @@ fn two_party_successful_swap() { hashed_proof.clone(), BalanceSwapAction::new(50), 1000, - ).unwrap(); + ) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 - 50); assert_eq!(Balances::free_balance(B), 200); @@ -123,7 +122,8 @@ fn two_party_successful_swap() { hashed_proof.clone(), BalanceSwapAction::new(75), 1000, - ).unwrap(); + ) + .unwrap(); assert_eq!(Balances::free_balance(A), 100); assert_eq!(Balances::free_balance(B), 200 - 75); @@ -131,11 +131,8 @@ fn two_party_successful_swap() { // A reveals the proof and claims the swap on chain2. chain2.execute_with(|| { - AtomicSwap::claim_swap( - Origin::signed(A), - proof.to_vec(), - BalanceSwapAction::new(75), - ).unwrap(); + AtomicSwap::claim_swap(Origin::signed(A), proof.to_vec(), BalanceSwapAction::new(75)) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 + 75); assert_eq!(Balances::free_balance(B), 200 - 75); @@ -143,11 +140,8 @@ fn two_party_successful_swap() { // B use the revealed proof to claim the swap on chain1. chain1.execute_with(|| { - AtomicSwap::claim_swap( - Origin::signed(B), - proof.to_vec(), - BalanceSwapAction::new(50), - ).unwrap(); + AtomicSwap::claim_swap(Origin::signed(B), proof.to_vec(), BalanceSwapAction::new(50)) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 - 50); assert_eq!(Balances::free_balance(B), 200 + 50); diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index 27a579e0f9f8b..8f5c42bc3c465 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-aura" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,38 +13,31 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -sp-consensus-aura = { version = "0.8.0", path = "../../primitives/consensus/aura", default-features = false } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } - +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "max-encoded-len"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +sp-consensus-aura = { version = "0.10.0-dev", path = "../../primitives/consensus/aura", default-features = false } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } [dev-dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -lazy_static = "1.4.0" -parking_lot = "0.10.0" +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } [features] default = ["std"] std = [ "sp-application-crypto/std", "codec/std", - "sp-inherents/std", + "scale-info/std", "sp-std/std", - "serde", "sp-runtime/std", "frame-support/std", "sp-consensus-aura/std", "frame-system/std", - "sp-timestamp/std", "pallet-timestamp/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/aura/README.md b/frame/aura/README.md index 4f3eacbad8a06..263f158d79068 100644 --- a/frame/aura/README.md +++ b/frame/aura/README.md @@ -1,7 +1,7 @@ # Aura Module -- [`aura::Trait`](https://docs.rs/pallet-aura/latest/pallet_aura/trait.Trait.html) -- [`Module`](https://docs.rs/pallet-aura/latest/pallet_aura/struct.Module.html) +- [`aura::Config`](https://docs.rs/pallet-aura/latest/pallet_aura/pallet/trait.Config.html) +- [`Pallet`](https://docs.rs/pallet-aura/latest/pallet_aura/pallet/struct.Pallet.html) ## Overview @@ -25,4 +25,4 @@ If you're interested in hacking on this module, it is useful to understand the i [`ProvideInherent`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherent.html) and [`ProvideInherentData`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherentData.html) to create and check inherents. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index ca3d1f15f421b..e8b68f928e087 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ //! # Aura Module //! -//! - [`aura::Trait`](./trait.Trait.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Pallet`] //! //! ## Overview //! @@ -28,109 +28,195 @@ //! //! ### Public Functions //! -//! - `slot_duration` - Determine the Aura slot-duration based on the Timestamp module configuration. +//! - `slot_duration` - Determine the Aura slot-duration based on the Timestamp module +//! configuration. //! //! ## Related Modules //! //! - [Timestamp](../pallet_timestamp/index.html): The Timestamp module is used in Aura to track //! consensus rounds (via `slots`). -//! -//! ## References -//! -//! If you're interested in hacking on this module, it is useful to understand the interaction with -//! `substrate/primitives/inherents/src/lib.rs` and, specifically, the required implementation of -//! [`ProvideInherent`](../sp_inherents/trait.ProvideInherent.html) and -//! [`ProvideInherentData`](../sp_inherents/trait.ProvideInherentData.html) to create and check inherents. #![cfg_attr(not(feature = "std"), no_std)] -use pallet_timestamp; - -use sp_std::{result, prelude::*}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - decl_storage, decl_module, Parameter, traits::{Get, FindAuthor}, - ConsensusEngineId, + traits::{DisabledValidators, FindAuthor, Get, OnTimestampSet, OneSessionHandler}, + BoundedSlice, ConsensusEngineId, Parameter, WeakBoundedVec, }; +use sp_consensus_aura::{AuthorityIndex, ConsensusLog, Slot, AURA_ENGINE_ID}; use sp_runtime::{ + generic::DigestItem, + traits::{IsMember, Member, SaturatedConversion, Saturating, Zero}, RuntimeAppPublic, - traits::{SaturatedConversion, Saturating, Zero, Member, IsMember}, generic::DigestItem, -}; -use sp_timestamp::OnTimestampSet; -use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; -use sp_consensus_aura::{ - AURA_ENGINE_ID, ConsensusLog, AuthorityIndex, - inherents::{INHERENT_IDENTIFIER, AuraInherentData}, }; +use sp_std::{convert::TryFrom, vec::Vec}; +pub mod migrations; mod mock; mod tests; -pub trait Trait: pallet_timestamp::Trait { - /// The identifier type for an authority. - type AuthorityId: Member + Parameter + RuntimeAppPublic + Default; -} +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: pallet_timestamp::Config + frame_system::Config { + /// The identifier type for an authority. + type AuthorityId: Member + + Parameter + + RuntimeAppPublic + + Default + + MaybeSerializeDeserialize + + MaxEncodedLen; + /// The maximum number of authorities that the pallet can hold. + type MaxAuthorities: Get; + + /// A way to check whether a given validator is disabled and should not be authoring blocks. + /// Blocks authored by a disabled validator will lead to a panic as part of this module's + /// initialization. + type DisabledValidators: DisabledValidators; + } + + #[pallet::pallet] + #[pallet::generate_storage_info] + pub struct Pallet(sp_std::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_: T::BlockNumber) -> Weight { + if let Some(new_slot) = Self::current_slot_from_digests() { + let current_slot = CurrentSlot::::get(); + + assert!(current_slot < new_slot, "Slot must increase"); + CurrentSlot::::put(new_slot); + + if let Some(n_authorities) = >::decode_len() { + let authority_index = *new_slot % n_authorities as u64; + if T::DisabledValidators::is_disabled(authority_index as u32) { + panic!( + "Validator with index {:?} is disabled and should not be attempting to author blocks.", + authority_index, + ); + } + } -decl_storage! { - trait Store for Module as Aura { - /// The last timestamp. - LastTimestamp get(fn last) build(|_| 0.into()): T::Moment; + // TODO [#3398] Generate offence report for all authorities that skipped their + // slots. - /// The current authorities - pub Authorities get(fn authorities): Vec; + T::DbWeight::get().reads_writes(2, 1) + } else { + T::DbWeight::get().reads(1) + } + } } - add_extra_genesis { - config(authorities): Vec; - build(|config| Module::::initialize_authorities(&config.authorities)) + + /// The current authority set. + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub(super) type Authorities = + StorageValue<_, WeakBoundedVec, ValueQuery>; + + /// The current slot of this block. + /// + /// This will be set in `on_initialize`. + #[pallet::storage] + #[pallet::getter(fn current_slot)] + pub(super) type CurrentSlot = StorageValue<_, Slot, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub authorities: Vec, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { authorities: Vec::new() } + } } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { } + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_authorities(&self.authorities); + } + } } -impl Module { - fn change_authorities(new: Vec) { +impl Pallet { + fn change_authorities(new: WeakBoundedVec) { >::put(&new); let log: DigestItem = DigestItem::Consensus( AURA_ENGINE_ID, - ConsensusLog::AuthoritiesChange(new).encode() + ConsensusLog::AuthoritiesChange(new.into_inner()).encode(), ); - >::deposit_log(log.into()); + >::deposit_log(log.into()); } fn initialize_authorities(authorities: &[T::AuthorityId]) { if !authorities.is_empty() { assert!(>::get().is_empty(), "Authorities are already initialized!"); - >::put(authorities); + let bounded = >::try_from(authorities) + .expect("Initial authority set must be less than T::MaxAuthorities"); + >::put(bounded); + } + } + + /// Get the current slot from the pre-runtime digests. + fn current_slot_from_digests() -> Option { + let digest = frame_system::Pallet::::digest(); + let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); + for (id, mut data) in pre_runtime_digests { + if id == AURA_ENGINE_ID { + return Slot::decode(&mut data).ok() + } } + + None + } + + /// Determine the Aura slot-duration based on the Timestamp module configuration. + pub fn slot_duration() -> T::Moment { + // we double the minimum block-period so each author can always propose within + // the majority of its slot. + ::MinimumPeriod::get().saturating_mul(2u32.into()) } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { type Public = T::AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl OneSessionHandler for Pallet { type Key = T::AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator + where + I: Iterator, { let authorities = validators.map(|(_, k)| k).collect::>(); Self::initialize_authorities(&authorities); } fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) - where I: Iterator + where + I: Iterator, { // instant changes if changed { let next_authorities = validators.map(|(_, k)| k).collect::>(); - let last_authorities = >::authorities(); - if next_authorities != last_authorities { - Self::change_authorities(next_authorities); + let last_authorities = Self::authorities(); + if last_authorities != next_authorities { + let bounded = >::force_from( + next_authorities, + Some("AuRa new session"), + ); + Self::change_authorities(bounded); } } } @@ -141,20 +227,20 @@ impl pallet_session::OneSessionHandler for Module { ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), ); - >::deposit_log(log.into()); + >::deposit_log(log.into()); } } -impl FindAuthor for Module { - fn find_author<'a, I>(digests: I) -> Option where - I: 'a + IntoIterator +impl FindAuthor for Pallet { + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, { for (id, mut data) in digests.into_iter() { if id == AURA_ENGINE_ID { - if let Ok(slot_num) = u64::decode(&mut data) { - let author_index = slot_num % Self::authorities().len() as u64; - return Some(author_index as u32) - } + let slot = Slot::decode(&mut data).ok()?; + let author_index = *slot % Self::authorities().len() as u64; + return Some(author_index as u32) } } @@ -162,92 +248,45 @@ impl FindAuthor for Module { } } -/// We can not implement `FindAuthor` twice, because the compiler does not know if +/// We can not implement `FindAuthor` twice, because the compiler does not know if /// `u32 == T::AuthorityId` and thus, prevents us to implement the trait twice. #[doc(hidden)] pub struct FindAccountFromAuthorIndex(sp_std::marker::PhantomData<(T, Inner)>); -impl> FindAuthor +impl> FindAuthor for FindAccountFromAuthorIndex { fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator + where + I: 'a + IntoIterator, { let i = Inner::find_author(digests)?; - let validators = >::authorities(); + let validators = >::authorities(); validators.get(i as usize).map(|k| k.clone()) } } /// Find the authority ID of the Aura authority who authored the current block. -pub type AuraAuthorId = FindAccountFromAuthorIndex>; +pub type AuraAuthorId = FindAccountFromAuthorIndex>; -impl IsMember for Module { +impl IsMember for Pallet { fn is_member(authority_id: &T::AuthorityId) -> bool { - Self::authorities() - .iter() - .any(|id| id == authority_id) + Self::authorities().iter().any(|id| id == authority_id) } } -impl Module { - /// Determine the Aura slot-duration based on the Timestamp module configuration. - pub fn slot_duration() -> T::Moment { - // we double the minimum block-period so each author can always propose within - // the majority of its slot. - ::MinimumPeriod::get().saturating_mul(2.into()) - } - - fn on_timestamp_set(now: T::Moment, slot_duration: T::Moment) { - let last = Self::last(); - ::LastTimestamp::put(now); - - if last.is_zero() { - return; - } - - assert!(!slot_duration.is_zero(), "Aura slot duration cannot be zero."); - - let last_slot = last / slot_duration; - let cur_slot = now / slot_duration; - - assert!(last_slot < cur_slot, "Only one block may be authored per slot."); - - // TODO [#3398] Generate offence report for all authorities that skipped their slots. - } -} - -impl OnTimestampSet for Module { +impl OnTimestampSet for Pallet { fn on_timestamp_set(moment: T::Moment) { - Self::on_timestamp_set(moment, Self::slot_duration()) - } -} - -impl ProvideInherent for Module { - type Call = pallet_timestamp::Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_: &InherentData) -> Option { - None - } - - /// Verify the validity of the inherent using the timestamp. - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - let timestamp = match call { - pallet_timestamp::Call::set(ref timestamp) => timestamp.clone(), - _ => return Ok(()), - }; - - let timestamp_based_slot = timestamp / Self::slot_duration(); + let slot_duration = Self::slot_duration(); + assert!(!slot_duration.is_zero(), "Aura slot duration cannot be zero."); - let seal_slot = data.aura_inherent_data()?.saturated_into(); + let timestamp_slot = moment / slot_duration; + let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); - if timestamp_based_slot == seal_slot { - Ok(()) - } else { - Err(sp_inherents::Error::from("timestamp set in block doesn't match slot in seal").into()) - } + assert!( + CurrentSlot::::get() == timestamp_slot, + "Timestamp slot must match `CurrentSlot`" + ); } } diff --git a/frame/aura/src/migrations.rs b/frame/aura/src/migrations.rs new file mode 100644 index 0000000000000..e194c17406b63 --- /dev/null +++ b/frame/aura/src/migrations.rs @@ -0,0 +1,45 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Migrations for the AURA pallet. + +use frame_support::{pallet_prelude::*, traits::Get, weights::Weight}; + +struct __LastTimestamp(sp_std::marker::PhantomData); +impl frame_support::traits::StorageInstance for __LastTimestamp { + fn pallet_prefix() -> &'static str { + T::PalletPrefix::get() + } + const STORAGE_PREFIX: &'static str = "LastTimestamp"; +} + +type LastTimestamp = StorageValue<__LastTimestamp, (), ValueQuery>; + +pub trait RemoveLastTimestamp: super::Config { + type PalletPrefix: Get<&'static str>; +} + +/// Remove the `LastTimestamp` storage value. +/// +/// This storage value was removed and replaced by `CurrentSlot`. As we only remove this storage +/// value, it is safe to call this method multiple times. +/// +/// This migration requires a type `T` that implements [`RemoveLastTimestamp`]. +pub fn remove_last_timestamp() -> Weight { + LastTimestamp::::kill(); + T::DbWeight::get().writes(1) +} diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index a3875727e47c2..4418d9e85ae24 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,77 +19,113 @@ #![cfg(test)] -use crate::{Trait, Module, GenesisConfig}; -use sp_consensus_aura::ed25519::AuthorityId; +use crate as pallet_aura; +use frame_support::{ + parameter_types, + traits::{DisabledValidators, GenesisBuild}, +}; +use sp_consensus_aura::{ed25519::AuthorityId, AuthorityIndex}; +use sp_core::H256; use sp_runtime::{ - traits::IdentityLookup, Perbill, testing::{Header, UintAuthorityId}, + traits::IdentityLookup, }; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; -use sp_io; -use sp_core::H256; +use sp_std::cell::RefCell; -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Aura: pallet_aura::{Pallet, Storage, Config}, + } +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); pub const MinimumPeriod: u64 = 1; } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl Trait for Test { +parameter_types! { + pub const MaxAuthorities: u32 = 10; +} + +thread_local! { + static DISABLED_VALIDATORS: RefCell> = RefCell::new(Default::default()); +} + +pub struct MockDisabledValidators; + +impl MockDisabledValidators { + pub fn disable_validator(index: AuthorityIndex) { + DISABLED_VALIDATORS.with(|v| { + let mut disabled = v.borrow_mut(); + if let Err(i) = disabled.binary_search(&index) { + disabled.insert(i, index); + } + }) + } +} + +impl DisabledValidators for MockDisabledValidators { + fn is_disabled(index: AuthorityIndex) -> bool { + DISABLED_VALIDATORS.with(|v| v.borrow().binary_search(&index).is_ok()) + } +} + +impl pallet_aura::Config for Test { type AuthorityId = AuthorityId; + type DisabledValidators = MockDisabledValidators; + type MaxAuthorities = MaxAuthorities; } pub fn new_test_ext(authorities: Vec) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig::{ + pallet_aura::GenesisConfig:: { authorities: authorities.into_iter().map(|a| UintAuthorityId(a).to_public_key()).collect(), - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } - -pub type Aura = Module; diff --git a/frame/aura/src/tests.rs b/frame/aura/src/tests.rs index ca0fc3de37638..596858aac7c92 100644 --- a/frame/aura/src/tests.rs +++ b/frame/aura/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,12 +19,38 @@ #![cfg(test)] -use crate::mock::{Aura, new_test_ext}; +use crate::mock::{new_test_ext, Aura, MockDisabledValidators, System}; +use codec::Encode; +use frame_support::traits::OnInitialize; +use frame_system::InitKind; +use sp_consensus_aura::{Slot, AURA_ENGINE_ID}; +use sp_runtime::{Digest, DigestItem}; #[test] fn initial_values() { new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { - assert_eq!(Aura::last(), 0u64); + assert_eq!(Aura::current_slot(), 0u64); assert_eq!(Aura::authorities().len(), 4); }); } + +#[test] +#[should_panic( + expected = "Validator with index 1 is disabled and should not be attempting to author blocks." +)] +fn disabled_validators_cannot_author_blocks() { + new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { + // slot 1 should be authored by validator at index 1 + let slot = Slot::from(1); + let pre_digest = + Digest { logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())] }; + + System::initialize(&42, &System::parent_hash(), &pre_digest, InitKind::Full); + + // let's disable the validator + MockDisabledValidators::disable_validator(1); + + // and we should not be able to initialize the block + Aura::on_initialize(42); + }); +} diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index 0e1db74632786..80a320c31e77f 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authority-discovery" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,20 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-authority-discovery = { version = "2.0.0", default-features = false, path = "../../primitives/authority-discovery" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0", features = ["historical" ], path = "../session", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-authority-discovery = { version = "4.0.0-dev", default-features = false, path = "../../primitives/authority-discovery" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +pallet-session = { version = "4.0.0-dev", features = [ + "historical", +], path = "../session", default-features = false } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } [features] default = ["std"] @@ -34,10 +37,11 @@ std = [ "sp-application-crypto/std", "sp-authority-discovery/std", "codec/std", + "scale-info/std", "sp-std/std", - "serde", "pallet-session/std", "sp-runtime/std", "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 09be533474fca..d093b1533c693 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,56 +15,116 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Authority discovery module. +//! # Authority discovery pallet. //! -//! This module is used by the `client/authority-discovery` to retrieve the -//! current set of authorities. +//! This pallet is used by the `client/authority-discovery` and by polkadot's parachain logic +//! to retrieve the current and the next set of authorities. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; -use frame_support::{decl_module, decl_storage}; +use frame_support::{ + traits::{Get, OneSessionHandler}, + WeakBoundedVec, +}; use sp_authority_discovery::AuthorityId; +use sp_std::prelude::*; -/// The module's config trait. -pub trait Trait: frame_system::Trait + pallet_session::Trait {} +use core::convert::TryFrom; -decl_storage! { - trait Store for Module as AuthorityDiscovery { - /// Keys of the current and next authority set. - Keys get(fn keys): Vec; +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(_); + + #[pallet::config] + /// The pallet's config trait. + pub trait Config: frame_system::Config + pallet_session::Config { + /// The maximum number of authorities that can be added. + type MaxAuthorities: Get; } - add_extra_genesis { - config(keys): Vec; - build(|config| Module::::initialize_keys(&config.keys)) + + #[pallet::storage] + #[pallet::getter(fn keys)] + /// Keys of the current authority set. + pub(super) type Keys = + StorageValue<_, WeakBoundedVec, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn next_keys)] + /// Keys of the next authority set. + pub(super) type NextKeys = + StorageValue<_, WeakBoundedVec, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub keys: Vec, } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { keys: Default::default() } + } + } + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_keys(&self.keys) + } } } -impl Module { - /// Retrieve authority identifiers of the current and next authority set. +impl Pallet { + /// Retrieve authority identifiers of the current and next authority set + /// sorted and deduplicated. pub fn authorities() -> Vec { - Keys::get() + let mut keys = Keys::::get().to_vec(); + let next = NextKeys::::get().to_vec(); + + keys.extend(next); + keys.sort(); + keys.dedup(); + + keys.to_vec() } - fn initialize_keys(keys: &[AuthorityId]) { + /// Retrieve authority identifiers of the current authority set in the original order. + pub fn current_authorities() -> WeakBoundedVec { + Keys::::get() + } + + /// Retrieve authority identifiers of the next authority set in the original order. + pub fn next_authorities() -> WeakBoundedVec { + NextKeys::::get() + } + + fn initialize_keys(keys: &Vec) { if !keys.is_empty() { - assert!(Keys::get().is_empty(), "Keys are already initialized!"); - Keys::put(keys); + assert!(Keys::::get().is_empty(), "Keys are already initialized!"); + + let bounded_keys = + WeakBoundedVec::::try_from((*keys).clone()) + .expect("Keys vec too big"); + + Keys::::put(&bounded_keys); + NextKeys::::put(&bounded_keys); } } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { type Public = AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl OneSessionHandler for Pallet { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(authorities: I) @@ -80,8 +140,29 @@ impl pallet_session::OneSessionHandler for Module { { // Remember who the authorities are for the new and next session. if changed { - let keys = validators.chain(queued_validators).map(|x| x.1).collect::>(); - Keys::put(keys.into_iter().collect::>()); + let keys = validators.map(|x| x.1).collect::>(); + + let bounded_keys = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + keys, + Some( + "Warning: The session has more validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + Keys::::put(bounded_keys); + + let next_keys = queued_validators.map(|x| x.1).collect::>(); + + let next_bounded_keys = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_keys, + Some( + "Warning: The session has more queued validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + NextKeys::::put(next_bounded_keys); } } @@ -93,32 +174,48 @@ impl pallet_session::OneSessionHandler for Module { #[cfg(test)] mod tests { use super::*; - use sp_authority_discovery::{AuthorityPair}; + use crate as pallet_authority_discovery; + use frame_support::{parameter_types, traits::GenesisBuild}; use sp_application_crypto::Pair; + use sp_authority_discovery::AuthorityPair; use sp_core::{crypto::key_types, H256}; use sp_io::TestExternalities; use sp_runtime::{ - testing::{Header, UintAuthorityId}, traits::{ConvertInto, IdentityLookup, OpaqueKeys}, - Perbill, KeyTypeId, + testing::{Header, UintAuthorityId}, + traits::{ConvertInto, IdentityLookup, OpaqueKeys}, + KeyTypeId, Perbill, }; - use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; - type AuthorityDiscovery = Module; - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl Trait for Test {} + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config}, + } + ); parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); + pub const MaxAuthorities: u32 = 100; } - impl pallet_session::Trait for Test { + impl Config for Test { + type MaxAuthorities = MaxAuthorities; + } + + impl pallet_session::Config for Test { type SessionManager = (); type Keys = UintAuthorityId; type ShouldEndSession = pallet_session::PeriodicSessions; type SessionHandler = TestSessionHandler; - type Event = (); + type Event = Event; type ValidatorId = AuthorityId; type ValidatorIdOf = ConvertInto; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; @@ -126,7 +223,7 @@ mod tests { type WeightInfo = (); } - impl pallet_session::historical::Trait for Test { + impl pallet_session::historical::Config for Test { type FullIdentification = (); type FullIdentificationOf = (); } @@ -138,41 +235,34 @@ mod tests { pub const Offset: BlockNumber = 0; pub const UncleGenerations: u64 = 0; pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { - type BaseCallFilter = (); + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = BlockNumber; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AuthorityId; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); - } - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + type SS58Prefix = (); + type OnSetCode = (); } pub struct TestSessionHandler; @@ -193,55 +283,58 @@ mod tests { #[test] fn authorities_returns_current_and_next_authority_set() { - // The whole authority discovery module ignores account ids, but we still need them for + // The whole authority discovery pallet ignores account ids, but we still need them for // `pallet_session::OneSessionHandler::on_new_session`, thus its safe to use the same value // everywhere. let account_id = AuthorityPair::from_seed_slice(vec![10; 32].as_ref()).unwrap().public(); - let mut first_authorities: Vec = vec![0, 1].into_iter() + let mut first_authorities: Vec = vec![0, 1] + .into_iter() .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) .map(AuthorityId::from) .collect(); - let second_authorities: Vec = vec![2, 3].into_iter() + let second_authorities: Vec = vec![2, 3] + .into_iter() .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) .map(AuthorityId::from) .collect(); // Needed for `pallet_session::OneSessionHandler::on_new_session`. - let second_authorities_and_account_ids = second_authorities.clone() + let second_authorities_and_account_ids = second_authorities + .clone() .into_iter() .map(|id| (&account_id, id)) - .collect:: >(); + .collect::>(); - let mut third_authorities: Vec = vec![4, 5].into_iter() + let mut third_authorities: Vec = vec![4, 5] + .into_iter() .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) .map(AuthorityId::from) .collect(); // Needed for `pallet_session::OneSessionHandler::on_new_session`. - let third_authorities_and_account_ids = third_authorities.clone() + let third_authorities_and_account_ids = third_authorities + .clone() .into_iter() .map(|id| (&account_id, id)) - .collect:: >(); + .collect::>(); // Build genesis. - let mut t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig { - keys: vec![], - } - .assimilate_storage::(&mut t) + GenesisBuild::::assimilate_storage( + &pallet_authority_discovery::GenesisConfig { keys: vec![] }, + &mut t, + ) .unwrap(); // Create externalities. let mut externalities = TestExternalities::new(t); externalities.execute_with(|| { - use pallet_session::OneSessionHandler; + use frame_support::traits::OneSessionHandler; AuthorityDiscovery::on_genesis_session( - first_authorities.iter().map(|id| (id, id.clone())) + first_authorities.iter().map(|id| (id, id.clone())), ); first_authorities.sort(); let mut authorities_returned = AuthorityDiscovery::authorities(); @@ -254,11 +347,9 @@ mod tests { second_authorities_and_account_ids.clone().into_iter(), third_authorities_and_account_ids.clone().into_iter(), ); - let mut authorities_returned = AuthorityDiscovery::authorities(); - authorities_returned.sort(); + let authorities_returned = AuthorityDiscovery::authorities(); assert_eq!( - first_authorities, - authorities_returned, + first_authorities, authorities_returned, "Expected authority set not to change as `changed` was set to false.", ); @@ -268,7 +359,8 @@ mod tests { second_authorities_and_account_ids.into_iter(), third_authorities_and_account_ids.clone().into_iter(), ); - let mut second_and_third_authorities = second_authorities.iter() + let mut second_and_third_authorities = second_authorities + .iter() .chain(third_authorities.iter()) .cloned() .collect::>(); diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index e8b6444583821..120b72f8e6511 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authorship" -version = "2.0.0" +version = "4.0.0-dev" description = "Block and Uncle Author tracking for the FRAME" authors = ["Parity Technologies "] edition = "2018" @@ -13,27 +13,30 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-authorship = { version = "2.0.0", default-features = false, path = "../../primitives/authorship" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -impl-trait-for-tuples = "0.1.3" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-authorship = { version = "4.0.0-dev", default-features = false, path = "../../primitives/authorship" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +impl-trait-for-tuples = "0.2.1" [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io ={ version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } [features] default = ["std"] std = [ "codec/std", - "sp-inherents/std", + "scale-info/std", "sp-runtime/std", "sp-std/std", "frame-support/std", "frame-system/std", "sp-authorship/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 0a10c8849571b..5d36adabe888f 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,45 +21,20 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{result, prelude::*}; -use sp_std::collections::btree_set::BTreeSet; -use frame_support::{decl_module, decl_storage, decl_error, dispatch, ensure}; -use frame_support::traits::{FindAuthor, VerifySeal, Get}; -use codec::{Encode, Decode}; -use frame_system::ensure_none; -use sp_runtime::traits::{Header as HeaderT, One, Zero}; -use frame_support::weights::{Weight, DispatchClass}; -use sp_inherents::{InherentIdentifier, ProvideInherent, InherentData}; -use sp_authorship::{INHERENT_IDENTIFIER, UnclesInherentData, InherentError}; +use codec::{Decode, Encode}; +use frame_support::{ + dispatch, + traits::{FindAuthor, Get, VerifySeal}, +}; +use sp_authorship::{InherentError, UnclesInherentData, INHERENT_IDENTIFIER}; +use sp_runtime::traits::{Header as HeaderT, One, Saturating}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*, result}; const MAX_UNCLES: usize = 10; -pub trait Trait: frame_system::Trait { - /// Find the author of a block. - type FindAuthor: FindAuthor; - /// The number of blocks back we should accept uncles. - /// This means that we will deal with uncle-parents that are - /// `UncleGenerations + 1` before `now`. - type UncleGenerations: Get; - /// A filter for uncles within a block. This is for implementing - /// further constraints on what uncles can be included, other than their ancestry. - /// - /// For PoW, as long as the seals are checked, there is no need to use anything - /// but the `VerifySeal` implementation as the filter. This is because the cost of making many equivocating - /// uncles is high. - /// - /// For PoS, there is no such limitation, so a further constraint must be imposed - /// beyond a seal check in order to prevent an arbitrary number of - /// equivocating uncles from being included. - /// - /// The `OnePerAuthorPerHeight` filter is good for many slot-based PoS - /// engines. - type FilterUncle: FilterUncle; - /// An event handler for authored blocks. - type EventHandler: EventHandler; -} +pub use pallet::*; -/// An event handler for the authorship module. There is a dummy implementation +/// An event handler for the authorship pallet. There is a dummy implementation /// for `()`, which does nothing. #[impl_trait_for_tuples::impl_for_tuples(30)] pub trait EventHandler { @@ -82,15 +57,15 @@ pub trait FilterUncle { /// Do additional filtering on a seal-checked uncle block, with the accumulated /// filter. - fn filter_uncle(header: &Header, acc: &mut Self::Accumulator) - -> Result, &'static str>; + fn filter_uncle( + header: &Header, + acc: &mut Self::Accumulator, + ) -> Result, &'static str>; } impl FilterUncle for () { type Accumulator = (); - fn filter_uncle(_: &H, _acc: &mut Self::Accumulator) - -> Result, &'static str> - { + fn filter_uncle(_: &H, _acc: &mut Self::Accumulator) -> Result, &'static str> { Ok(None) } } @@ -100,14 +75,10 @@ impl FilterUncle for () { /// equivocating is high. pub struct SealVerify(sp_std::marker::PhantomData); -impl> FilterUncle - for SealVerify -{ +impl> FilterUncle for SealVerify { type Accumulator = (); - fn filter_uncle(header: &Header, _acc: &mut ()) - -> Result, &'static str> - { + fn filter_uncle(header: &Header, _acc: &mut ()) -> Result, &'static str> { T::verify_seal(header) } } @@ -118,8 +89,7 @@ impl> FilterUncle /// This does O(n log n) work in the number of uncles included. pub struct OnePerAuthorPerHeight(sp_std::marker::PhantomData<(T, N)>); -impl FilterUncle - for OnePerAuthorPerHeight +impl FilterUncle for OnePerAuthorPerHeight where Header: HeaderT + PartialEq, Header::Number: Ord, @@ -128,15 +98,16 @@ where { type Accumulator = BTreeSet<(Header::Number, Author)>; - fn filter_uncle(header: &Header, acc: &mut Self::Accumulator) - -> Result, &'static str> - { + fn filter_uncle( + header: &Header, + acc: &mut Self::Accumulator, + ) -> Result, &'static str> { let author = T::verify_seal(header)?; let number = header.number(); if let Some(ref author) = author { if !acc.insert((number.clone(), author.clone())) { - return Err("more than one uncle per number per author included"); + return Err("more than one uncle per number per author included") } } @@ -144,48 +115,51 @@ where } } -#[derive(Encode, Decode, sp_runtime::RuntimeDebug)] +#[derive(Encode, Decode, sp_runtime::RuntimeDebug, scale_info::TypeInfo)] #[cfg_attr(any(feature = "std", test), derive(PartialEq))] enum UncleEntryItem { InclusionHeight(BlockNumber), Uncle(Hash, Option), } - -decl_storage! { - trait Store for Module as Authorship { - /// Uncles - Uncles: Vec>; - /// Author of current block. - Author: Option; - /// Whether uncles were already set in this block. - DidSetUncles: bool; - } -} - -decl_error! { - /// Error for the authorship module. - pub enum Error for Module { - /// The uncle parent not in the chain. - InvalidUncleParent, - /// Uncles already set in the block. - UnclesAlreadySet, - /// Too many uncles. - TooManyUncles, - /// The uncle is genesis. - GenesisUncle, - /// The uncle is too high in chain. - TooHighUncle, - /// The uncle is already included. - UncleAlreadyIncluded, - /// The uncle isn't recent enough to be included. - OldUncle, +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Find the author of a block. + type FindAuthor: FindAuthor; + /// The number of blocks back we should accept uncles. + /// This means that we will deal with uncle-parents that are + /// `UncleGenerations + 1` before `now`. + #[pallet::constant] + type UncleGenerations: Get; + /// A filter for uncles within a block. This is for implementing + /// further constraints on what uncles can be included, other than their ancestry. + /// + /// For PoW, as long as the seals are checked, there is no need to use anything + /// but the `VerifySeal` implementation as the filter. This is because the cost of making + /// many equivocating uncles is high. + /// + /// For PoS, there is no such limitation, so a further constraint must be imposed + /// beyond a seal check in order to prevent an arbitrary number of + /// equivocating uncles from being included. + /// + /// The `OnePerAuthorPerHeight` filter is good for many slot-based PoS + /// engines. + type FilterUncle: FilterUncle; + /// An event handler for authored blocks. + type EventHandler: EventHandler; } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + #[pallet::hooks] + impl Hooks> for Pallet { fn on_initialize(now: T::BlockNumber) -> Weight { let uncle_generations = T::UncleGenerations::get(); // prune uncles that are older than the allowed number of generations. @@ -194,50 +168,148 @@ decl_module! { Self::prune_old_uncles(minimum_height) } - ::DidSetUncles::put(false); + >::put(false); T::EventHandler::note_author(Self::author()); 0 } - fn on_finalize() { + fn on_finalize(_: T::BlockNumber) { // ensure we never go to trie with these values. - ::Author::kill(); - ::DidSetUncles::kill(); + >::kill(); + >::kill(); } + } + + #[pallet::storage] + /// Uncles + pub(super) type Uncles = + StorageValue<_, Vec>, ValueQuery>; + + #[pallet::storage] + /// Author of current block. + pub(super) type Author = StorageValue<_, T::AccountId, OptionQuery>; + + #[pallet::storage] + /// Whether uncles were already set in this block. + pub(super) type DidSetUncles = StorageValue<_, bool, ValueQuery>; + + #[pallet::error] + pub enum Error { + /// The uncle parent not in the chain. + InvalidUncleParent, + /// Uncles already set in the block. + UnclesAlreadySet, + /// Too many uncles. + TooManyUncles, + /// The uncle is genesis. + GenesisUncle, + /// The uncle is too high in chain. + TooHighUncle, + /// The uncle is already included. + UncleAlreadyIncluded, + /// The uncle isn't recent enough to be included. + OldUncle, + } + #[pallet::call] + impl Pallet { /// Provide a set of uncles. - #[weight = (0, DispatchClass::Mandatory)] - fn set_uncles(origin, new_uncles: Vec) -> dispatch::DispatchResult { + #[pallet::weight((0, DispatchClass::Mandatory))] + pub fn set_uncles(origin: OriginFor, new_uncles: Vec) -> DispatchResult { ensure_none(origin)?; ensure!(new_uncles.len() <= MAX_UNCLES, Error::::TooManyUncles); - if ::DidSetUncles::get() { + if >::get() { Err(Error::::UnclesAlreadySet)? } - ::DidSetUncles::put(true); + >::put(true); Self::verify_and_import_uncles(new_uncles) } } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let uncles = data.uncles().unwrap_or_default(); + let mut new_uncles = Vec::new(); + + if !uncles.is_empty() { + let prev_uncles = >::get(); + let mut existing_hashes: Vec<_> = prev_uncles + .into_iter() + .filter_map(|entry| match entry { + UncleEntryItem::InclusionHeight(_) => None, + UncleEntryItem::Uncle(h, _) => Some(h), + }) + .collect(); + + let mut acc: >::Accumulator = + Default::default(); + + for uncle in uncles { + match Self::verify_uncle(&uncle, &existing_hashes, &mut acc) { + Ok(_) => { + let hash = uncle.hash(); + new_uncles.push(uncle); + existing_hashes.push(hash); + + if new_uncles.len() == MAX_UNCLES { + break + } + }, + Err(_) => { + // skip this uncle + }, + } + } + } + + if new_uncles.is_empty() { + None + } else { + Some(Call::set_uncles { new_uncles }) + } + } + + fn check_inherent( + call: &Self::Call, + _data: &InherentData, + ) -> result::Result<(), Self::Error> { + match call { + Call::set_uncles { ref new_uncles } if new_uncles.len() > MAX_UNCLES => + Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())), + _ => Ok(()), + } + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::set_uncles { .. }) + } + } } -impl Module { +impl Pallet { /// Fetch the author of the block. /// /// This is safe to invoke in `on_initialize` implementations, as well /// as afterwards. pub fn author() -> T::AccountId { // Check the memoized storage value. - if let Some(author) = ::Author::get() { - return author; + if let Some(author) = >::get() { + return author } - let digest = >::digest(); + let digest = >::digest(); let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); if let Some(author) = T::FindAuthor::find_author(pre_runtime_digests) { - ::Author::put(&author); + >::put(&author); author } else { Default::default() @@ -245,19 +317,18 @@ impl Module { } fn verify_and_import_uncles(new_uncles: Vec) -> dispatch::DispatchResult { - let now = >::block_number(); + let now = >::block_number(); - let mut uncles = ::Uncles::get(); + let mut uncles = >::get(); uncles.push(UncleEntryItem::InclusionHeight(now)); let mut acc: >::Accumulator = Default::default(); for uncle in new_uncles { - let prev_uncles = uncles.iter().filter_map(|entry| - match entry { - UncleEntryItem::InclusionHeight(_) => None, - UncleEntryItem::Uncle(h, _) => Some(h), - }); + let prev_uncles = uncles.iter().filter_map(|entry| match entry { + UncleEntryItem::InclusionHeight(_) => None, + UncleEntryItem::Uncle(h, _) => Some(h), + }); let author = Self::verify_uncle(&uncle, prev_uncles, &mut acc)?; let hash = uncle.hash(); @@ -268,25 +339,20 @@ impl Module { uncles.push(UncleEntryItem::Uncle(hash, author)); } - ::Uncles::put(&uncles); + >::put(&uncles); Ok(()) } - fn verify_uncle<'a, I: IntoIterator>( + fn verify_uncle<'a, I: IntoIterator>( uncle: &T::Header, existing_uncles: I, accumulator: &mut >::Accumulator, - ) -> Result, dispatch::DispatchError> - { - let now = >::block_number(); + ) -> Result, dispatch::DispatchError> { + let now = >::block_number(); let (minimum_height, maximum_height) = { let uncle_generations = T::UncleGenerations::get(); - let min = if now >= uncle_generations { - now - uncle_generations - } else { - Zero::zero() - }; + let min = now.saturating_sub(uncle_generations); (min, now) }; @@ -294,27 +360,27 @@ impl Module { let hash = uncle.hash(); if uncle.number() < &One::one() { - return Err(Error::::GenesisUncle.into()); + return Err(Error::::GenesisUncle.into()) } if uncle.number() > &maximum_height { - return Err(Error::::TooHighUncle.into()); + return Err(Error::::TooHighUncle.into()) } { let parent_number = uncle.number().clone() - One::one(); - let parent_hash = >::block_hash(&parent_number); + let parent_hash = >::block_hash(&parent_number); if &parent_hash != uncle.parent_hash() { - return Err(Error::::InvalidUncleParent.into()); + return Err(Error::::InvalidUncleParent.into()) } } if uncle.number() < &minimum_height { - return Err(Error::::OldUncle.into()); + return Err(Error::::OldUncle.into()) } - let duplicate = existing_uncles.into_iter().find(|h| **h == hash).is_some(); - let in_chain = >::block_hash(uncle.number()) == hash; + let duplicate = existing_uncles.into_iter().any(|h| *h == hash); + let in_chain = >::block_hash(uncle.number()) == hash; if duplicate || in_chain { return Err(Error::::UncleAlreadyIncluded.into()) @@ -325,151 +391,98 @@ impl Module { } fn prune_old_uncles(minimum_height: T::BlockNumber) { - let mut uncles = ::Uncles::get(); + let uncles = >::get(); let prune_entries = uncles.iter().take_while(|item| match item { UncleEntryItem::Uncle(_, _) => true, UncleEntryItem::InclusionHeight(height) => height < &minimum_height, }); let prune_index = prune_entries.count(); - let _ = uncles.drain(..prune_index); - ::Uncles::put(uncles); - } -} - -impl ProvideInherent for Module { - type Call = Call; - type Error = InherentError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - let uncles = data.uncles().unwrap_or_default(); - let mut set_uncles = Vec::new(); - - if !uncles.is_empty() { - let prev_uncles = ::Uncles::get(); - let mut existing_hashes: Vec<_> = prev_uncles.into_iter().filter_map(|entry| - match entry { - UncleEntryItem::InclusionHeight(_) => None, - UncleEntryItem::Uncle(h, _) => Some(h), - } - ).collect(); - - let mut acc: >::Accumulator = Default::default(); - - for uncle in uncles { - match Self::verify_uncle(&uncle, &existing_hashes, &mut acc) { - Ok(_) => { - let hash = uncle.hash(); - set_uncles.push(uncle); - existing_hashes.push(hash); - - if set_uncles.len() == MAX_UNCLES { - break - } - } - Err(_) => { - // skip this uncle - } - } - } - } - - if set_uncles.is_empty() { - None - } else { - Some(Call::set_uncles(set_uncles)) - } - } - - fn check_inherent(call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { - match call { - Call::set_uncles(ref uncles) if uncles.len() > MAX_UNCLES => { - Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())) - }, - _ => { - Ok(()) - }, - } + >::put(&uncles[prune_index..]); } } #[cfg(test)] mod tests { use super::*; + use crate as pallet_authorship; + use frame_support::{parameter_types, ConsensusEngineId}; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, generic::DigestItem, Perbill, + generic::DigestItem, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, }; - use frame_support::{parameter_types, impl_outer_origin, ConsensusEngineId, weights::Weight}; - impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; - #[derive(Clone, Eq, PartialEq)] - pub struct Test; + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, + } + ); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { - type BaseCallFilter = (); + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const UncleGenerations: u64 = 5; } - impl Trait for Test { + impl pallet::Config for Test { type FindAuthor = AuthorGiven; type UncleGenerations = UncleGenerations; type FilterUncle = SealVerify; type EventHandler = (); } - type System = frame_system::Module; - type Authorship = Module; - const TEST_ID: ConsensusEngineId = [1, 2, 3, 4]; pub struct AuthorGiven; impl FindAuthor for AuthorGiven { fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator + where + I: 'a + IntoIterator, { for (id, data) in digests { if id == TEST_ID { - return u64::decode(&mut &data[..]).ok(); + return u64::decode(&mut &data[..]).ok() } } @@ -484,10 +497,8 @@ mod tests { let pre_runtime_digests = header.digest.logs.iter().filter_map(|d| d.as_pre_runtime()); let seals = header.digest.logs.iter().filter_map(|d| d.as_seal()); - let author = match AuthorGiven::find_author(pre_runtime_digests) { - None => return Err("no author"), - Some(author) => author, - }; + let author = + AuthorGiven::find_author(pre_runtime_digests).ok_or_else(|| "no author")?; for (id, seal) in seals { if id == TEST_ID { @@ -495,10 +506,10 @@ mod tests { Err(_) => return Err("wrong seal"), Ok(a) => { if a != author { - return Err("wrong author in seal"); + return Err("wrong author in seal") } break - } + }, } } } @@ -518,13 +529,7 @@ mod tests { } fn create_header(number: u64, parent_hash: H256, state_root: H256) -> Header { - Header::new( - number, - Default::default(), - state_root, - parent_hash, - Default::default(), - ) + Header::new(number, Default::default(), state_root, parent_hash, Default::default()) } fn new_test_ext() -> sp_io::TestExternalities { @@ -539,9 +544,14 @@ mod tests { let hash = Default::default(); let author = Default::default(); let uncles = vec![ - InclusionHeight(1u64), Uncle(hash, Some(author)), Uncle(hash, None), Uncle(hash, None), - InclusionHeight(2u64), Uncle(hash, None), - InclusionHeight(3u64), Uncle(hash, None), + InclusionHeight(1u64), + Uncle(hash, Some(author)), + Uncle(hash, None), + Uncle(hash, None), + InclusionHeight(2u64), + Uncle(hash, None), + InclusionHeight(3u64), + Uncle(hash, None), ]; ::Uncles::put(uncles); @@ -580,16 +590,15 @@ mod tests { } let mut canon_chain = CanonChain { - inner: vec![seal_header(create_header(0, Default::default(), Default::default()), 999)], + inner: vec![seal_header( + create_header(0, Default::default(), Default::default()), + 999, + )], }; - let initialize_block = |number, hash: H256| System::initialize( - &number, - &hash, - &Default::default(), - &Default::default(), - Default::default() - ); + let initialize_block = |number, hash: H256| { + System::initialize(&number, &hash, &Default::default(), Default::default()) + }; for number in 1..8 { initialize_block(number, canon_chain.best_hash()); @@ -677,19 +686,11 @@ mod tests { fn sets_author_lazily() { new_test_ext().execute_with(|| { let author = 42; - let mut header = seal_header( - create_header(1, Default::default(), [1; 32].into()), - author, - ); + let mut header = + seal_header(create_header(1, Default::default(), [1; 32].into()), author); header.digest_mut().pop(); // pop the seal off. - System::initialize( - &1, - &Default::default(), - &Default::default(), - header.digest(), - Default::default(), - ); + System::initialize(&1, &Default::default(), header.digest(), Default::default()); assert_eq!(Authorship::author(), author); }); @@ -703,27 +704,15 @@ mod tests { let author_b = 43; let mut acc: >::Accumulator = Default::default(); - let header_a1 = seal_header( - create_header(1, Default::default(), [1; 32].into()), - author_a, - ); - let header_b1 = seal_header( - create_header(1, Default::default(), [1; 32].into()), - author_b, - ); - - let header_a2_1 = seal_header( - create_header(2, Default::default(), [1; 32].into()), - author_a, - ); - let header_a2_2 = seal_header( - create_header(2, Default::default(), [2; 32].into()), - author_a, - ); - - let mut check_filter = move |uncle| { - Filter::filter_uncle(uncle, &mut acc) - }; + let header_a1 = seal_header(create_header(1, Default::default(), [1; 32].into()), author_a); + let header_b1 = seal_header(create_header(1, Default::default(), [1; 32].into()), author_b); + + let header_a2_1 = + seal_header(create_header(2, Default::default(), [1; 32].into()), author_a); + let header_a2_2 = + seal_header(create_header(2, Default::default(), [2; 32].into()), author_a); + + let mut check_filter = move |uncle| Filter::filter_uncle(uncle, &mut acc); // same height, different author is OK. assert_eq!(check_filter(&header_a1), Ok(Some(author_a))); diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index a210a2a8ef06a..d95f1419fd035 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-babe" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,53 +13,52 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -serde = { version = "1.0.101", optional = true } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-babe = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/babe" } -sp-consensus-vrf = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/vrf" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../authorship" } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } +sp-consensus-vrf = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/vrf" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -frame-benchmarking = { version = "2.0.0", path = "../benchmarking" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-offences = { version = "2.0.0", path = "../offences" } -pallet-staking = { version = "2.0.0", path = "../staking" } -pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-offences = { version = "4.0.0-dev", path = "../offences" } +pallet-staking = { version = "4.0.0-dev", path = "../staking" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", "pallet-authorship/std", "pallet-session/std", "pallet-timestamp/std", - "serde", "sp-application-crypto/std", "sp-consensus-babe/std", "sp-consensus-vrf/std", - "sp-inherents/std", "sp-io/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", "sp-std/std", - "sp-timestamp/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/babe/src/benchmarking.rs b/frame/babe/src/benchmarking.rs index 8ee4a5913c885..372dfa532a894 100644 --- a/frame/babe/src/benchmarking.rs +++ b/frame/babe/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,8 +23,6 @@ use frame_benchmarking::benchmarks; type Header = sp_runtime::generic::Header; benchmarks! { - _ { } - check_equivocation_proof { let x in 0 .. 1; @@ -71,14 +69,12 @@ benchmarks! { mod tests { use super::*; use crate::mock::*; - use frame_support::assert_ok; - #[test] - fn test_benchmarks() { - new_test_ext(3).execute_with(|| { - assert_ok!(test_benchmark_check_equivocation_proof::()); - }) - } + frame_benchmarking::impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(3), + crate::mock::Test, + ); #[test] fn test_generate_equivocation_report_blob() { @@ -93,14 +89,11 @@ mod tests { let equivocation_proof = generate_equivocation_proof( offending_authority_index, offending_authority_pair, - CurrentSlot::get() + 1, + CurrentSlot::::get() + 1, ); println!("equivocation_proof: {:?}", equivocation_proof); - println!( - "equivocation_proof.encode(): {:?}", - equivocation_proof.encode() - ); + println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); }); } } diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs index a0e13781961cc..20ac9b961fc8d 100644 --- a/frame/babe/src/default_weights.rs +++ b/frame/babe/src/default_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,10 +19,15 @@ //! This file was not auto-generated. use frame_support::weights::{ - Weight, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS, RocksDbWeight as DbWeight}, + constants::{RocksDbWeight as DbWeight, WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, + Weight, }; impl crate::WeightInfo for () { + fn plan_config_change() -> Weight { + DbWeight::get().writes(1) + } + fn report_equivocation(validator_count: u32) -> Weight { // we take the validator set count from the membership proof to // calculate the weight but we set a floor of 100 validators. diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index 322dff92f2398..2397918d1ef13 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! //! An opt-in utility module for reporting equivocations. //! //! This module defines an offence type for BABE equivocations @@ -33,29 +32,34 @@ //! When using this module for enabling equivocation reporting it is required //! that the `ValidateUnsigned` for the BABE pallet is used in the runtime //! definition. -//! -use frame_support::{debug, traits::KeyOwnerProofSystem}; -use sp_consensus_babe::{EquivocationProof, SlotNumber}; -use sp_runtime::transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, - TransactionValidityError, ValidTransaction, +use frame_support::traits::{Get, KeyOwnerProofSystem}; +use sp_consensus_babe::{EquivocationProof, Slot}; +use sp_runtime::{ + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + TransactionValidityError, ValidTransaction, + }, + DispatchResult, Perbill, }; -use sp_runtime::{DispatchResult, Perbill}; use sp_staking::{ offence::{Kind, Offence, OffenceError, ReportOffence}, SessionIndex, }; use sp_std::prelude::*; -use crate::{Call, Module, Trait}; +use crate::{Call, Config, Pallet}; /// A trait with utility methods for handling equivocation reports in BABE. /// The trait provides methods for reporting an offence triggered by a valid /// equivocation report, checking the current block author (to declare as the /// reporter), and also for creating and submitting equivocation report /// extrinsics (useful only in offchain context). -pub trait HandleEquivocation { +pub trait HandleEquivocation { + /// The longevity, in blocks, that the equivocation report is valid for. When using the staking + /// pallet this should be equal to the bonding duration (in blocks, not eras). + type ReportLongevity: Get; + /// Report an offence proved by the given reporters. fn report_offence( reporters: Vec, @@ -63,7 +67,7 @@ pub trait HandleEquivocation { ) -> Result<(), OffenceError>; /// Returns true if all of the offenders at the given time slot have already been reported. - fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &SlotNumber) -> bool; + fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &Slot) -> bool; /// Create and dispatch an equivocation report extrinsic. fn submit_unsigned_equivocation_report( @@ -75,7 +79,9 @@ pub trait HandleEquivocation { fn block_author() -> Option; } -impl HandleEquivocation for () { +impl HandleEquivocation for () { + type ReportLongevity = (); + fn report_offence( _reporters: Vec, _offence: BabeEquivocationOffence, @@ -83,7 +89,7 @@ impl HandleEquivocation for () { Ok(()) } - fn is_known_offence(_offenders: &[T::KeyOwnerIdentification], _time_slot: &SlotNumber) -> bool { + fn is_known_offence(_offenders: &[T::KeyOwnerIdentification], _time_slot: &Slot) -> bool { true } @@ -103,24 +109,22 @@ impl HandleEquivocation for () { /// using existing subsystems that are part of frame (type bounds described /// below) and will dispatch to them directly, it's only purpose is to wire all /// subsystems together. -pub struct EquivocationHandler { - _phantom: sp_std::marker::PhantomData<(I, R)>, +pub struct EquivocationHandler { + _phantom: sp_std::marker::PhantomData<(I, R, L)>, } -impl Default for EquivocationHandler { +impl Default for EquivocationHandler { fn default() -> Self { - Self { - _phantom: Default::default(), - } + Self { _phantom: Default::default() } } } -impl HandleEquivocation for EquivocationHandler +impl HandleEquivocation for EquivocationHandler where // We use the authorship pallet to fetch the current block author and use // `offchain::SendTransactionTypes` for unsigned extrinsic creation and // submission. - T: Trait + pallet_authorship::Trait + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, // A system for reporting offences after valid equivocation reports are // processed. R: ReportOffence< @@ -128,7 +132,12 @@ where T::KeyOwnerIdentification, BabeEquivocationOffence, >, + // The longevity (in blocks) that the equivocation report is valid for. When using the staking + // pallet this should be the bonding duration. + L: Get, { + type ReportLongevity = L; + fn report_offence( reporters: Vec, offence: BabeEquivocationOffence, @@ -136,7 +145,7 @@ where R::report_offence(reporters, offence) } - fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &SlotNumber) -> bool { + fn is_known_offence(offenders: &[T::KeyOwnerIdentification], time_slot: &Slot) -> bool { R::is_known_offence(offenders, time_slot) } @@ -146,49 +155,63 @@ where ) -> DispatchResult { use frame_system::offchain::SubmitTransaction; - let call = Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof); + let call = Call::report_equivocation_unsigned { + equivocation_proof: Box::new(equivocation_proof), + key_owner_proof, + }; match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { - Ok(()) => debug::info!("Submitted BABE equivocation report."), - Err(e) => debug::error!("Error submitting equivocation report: {:?}", e), + Ok(()) => log::info!( + target: "runtime::babe", + "Submitted BABE equivocation report.", + ), + Err(e) => log::error!( + target: "runtime::babe", + "Error submitting equivocation report: {:?}", + e, + ), } Ok(()) } fn block_author() -> Option { - Some(>::author()) + Some(>::author()) } } -/// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` -/// to local calls (i.e. extrinsics generated on this node) or that already in a block. This -/// guarantees that only block authors can include unsigned equivocation reports. -impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::report_equivocation_unsigned(equivocation_proof, _) = call { +/// Methods for the `ValidateUnsigned` implementation: +/// It restricts calls to `report_equivocation_unsigned` to local calls (i.e. extrinsics generated +/// on this node) or that already in a block. This guarantees that only block authors can include +/// unsigned equivocation reports. +impl Pallet { + pub fn validate_unsigned(source: TransactionSource, call: &Call) -> TransactionValidity { + if let Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } = call { // discard equivocation report not coming from the local node match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, _ => { - debug::warn!( - target: "babe", - "rejecting unsigned report equivocation transaction because it is not local/in-block." + log::warn!( + target: "runtime::babe", + "rejecting unsigned report equivocation transaction because it is not local/in-block.", ); - return InvalidTransaction::Call.into(); - } + return InvalidTransaction::Call.into() + }, } + // check report staleness + is_known_offence::(equivocation_proof, key_owner_proof)?; + + let longevity = + >::ReportLongevity::get(); + ValidTransaction::with_tag_prefix("BabeEquivocation") // We assign the maximum priority for any equivocation report. .priority(TransactionPriority::max_value()) // Only one equivocation report for the same offender at the same slot. - .and_provides(( - equivocation_proof.offender.clone(), - equivocation_proof.slot_number, - )) + .and_provides((equivocation_proof.offender.clone(), *equivocation_proof.slot)) + .longevity(longevity) // We don't propagate this. This can never be included on a remote node. .propagate(false) .build() @@ -197,41 +220,40 @@ impl frame_support::unsigned::ValidateUnsigned for Module { } } - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { - // check the membership proof to extract the offender's id - let key = ( - sp_consensus_babe::KEY_TYPE, - equivocation_proof.offender.clone(), - ); - - let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) - .ok_or(InvalidTransaction::BadProof)?; - - // check if the offence has already been reported, - // and if so then we can discard the report. - let is_known_offence = T::HandleEquivocation::is_known_offence( - &[offender], - &equivocation_proof.slot_number, - ); - - if is_known_offence { - Err(InvalidTransaction::Stale.into()) - } else { - Ok(()) - } + pub fn pre_dispatch(call: &Call) -> Result<(), TransactionValidityError> { + if let Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } = call { + is_known_offence::(equivocation_proof, key_owner_proof) } else { Err(InvalidTransaction::Call.into()) } } } +fn is_known_offence( + equivocation_proof: &EquivocationProof, + key_owner_proof: &T::KeyOwnerProof, +) -> Result<(), TransactionValidityError> { + // check the membership proof to extract the offender's id + let key = (sp_consensus_babe::KEY_TYPE, equivocation_proof.offender.clone()); + + let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) + .ok_or(InvalidTransaction::BadProof)?; + + // check if the offence has already been reported, + // and if so then we can discard the report. + if T::HandleEquivocation::is_known_offence(&[offender], &equivocation_proof.slot) { + Err(InvalidTransaction::Stale.into()) + } else { + Ok(()) + } +} + /// A BABE equivocation offence report. /// /// When a validator released two or more blocks at the same slot. pub struct BabeEquivocationOffence { - /// A babe slot number in which this incident happened. - pub slot: SlotNumber, + /// A babe slot in which this incident happened. + pub slot: Slot, /// The session index in which the incident happened. pub session_index: SessionIndex, /// The size of the validator set at the time of the offence. @@ -244,7 +266,7 @@ impl Offence for BabeEquivocationOffence { const ID: Kind = *b"babe:equivocatio"; - type TimeSlot = SlotNumber; + type TimeSlot = Slot; fn offenders(&self) -> Vec { vec![self.offender.clone()] @@ -264,7 +286,7 @@ impl Offence fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index efada5f18cbf8..b39074bb3f057 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,35 +23,33 @@ use codec::{Decode, Encode}; use frame_support::{ - decl_error, decl_module, decl_storage, dispatch::DispatchResultWithPostInfo, - traits::{FindAuthor, Get, KeyOwnerProofSystem, Randomness as RandomnessT}, + traits::{ + DisabledValidators, FindAuthor, Get, KeyOwnerProofSystem, OnTimestampSet, OneSessionHandler, + }, weights::{Pays, Weight}, - Parameter, }; -use frame_system::{ensure_none, ensure_signed}; use sp_application_crypto::Public; use sp_runtime::{ generic::DigestItem, - traits::{Hash, IsMember, One, SaturatedConversion, Saturating}, - ConsensusEngineId, KeyTypeId, + traits::{IsMember, One, SaturatedConversion, Saturating, Zero}, + ConsensusEngineId, KeyTypeId, Permill, }; use sp_session::{GetSessionNumber, GetValidatorCount}; -use sp_std::{prelude::*, result}; -use sp_timestamp::OnTimestampSet; +use sp_std::prelude::*; use sp_consensus_babe::{ digests::{NextConfigDescriptor, NextEpochDescriptor, PreDigest}, - inherents::{BabeInherentData, INHERENT_IDENTIFIER}, - BabeAuthorityWeight, ConsensusLog, EquivocationProof, SlotNumber, BABE_ENGINE_ID, + BabeAuthorityWeight, BabeEpochConfiguration, ConsensusLog, Epoch, EquivocationProof, Slot, + BABE_ENGINE_ID, }; use sp_consensus_vrf::schnorrkel; -use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}; pub use sp_consensus_babe::{AuthorityId, PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH}; -mod equivocation; mod default_weights; +mod equivocation; +mod randomness; #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; @@ -61,53 +59,14 @@ mod mock; mod tests; pub use equivocation::{BabeEquivocationOffence, EquivocationHandler, HandleEquivocation}; +pub use randomness::{ + CurrentBlockRandomness, RandomnessFromOneEpochAgo, RandomnessFromTwoEpochsAgo, +}; -pub trait Trait: pallet_timestamp::Trait { - /// The amount of time, in slots, that each epoch should last. - type EpochDuration: Get; - - /// The expected average block time at which BABE should be creating - /// blocks. Since BABE is probabilistic it is not trivial to figure out - /// what the expected average block time should be based on the slot - /// duration and the security parameter `c` (where `1 - c` represents - /// the probability of a slot being empty). - type ExpectedBlockTime: Get; - - /// BABE requires some logic to be triggered on every block to query for whether an epoch - /// has ended and to perform the transition to the next epoch. - /// - /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only be used - /// when no other module is responsible for changing authority set. - type EpochChangeTrigger: EpochChangeTrigger; - - /// The proof of key ownership, used for validating equivocation reports. - /// The proof must include the session index and validator count of the - /// session at which the equivocation occurred. - type KeyOwnerProof: Parameter + GetSessionNumber + GetValidatorCount; - - /// The identification of a key owner, used when reporting equivocations. - type KeyOwnerIdentification: Parameter; - - /// A system for proving ownership of keys, i.e. that a given key was part - /// of a validator set, needed for validating equivocation reports. - type KeyOwnerProofSystem: KeyOwnerProofSystem< - (KeyTypeId, AuthorityId), - Proof = Self::KeyOwnerProof, - IdentificationTuple = Self::KeyOwnerIdentification, - >; - - /// The equivocation handling subsystem, defines methods to report an - /// offence (after the equivocation has been validated) and for submitting a - /// transaction to report an equivocation (from an offchain context). - /// NOTE: when enabling equivocation handling (i.e. this type isn't set to - /// `()`) you must use this pallet's `ValidateUnsigned` in the runtime - /// definition. - type HandleEquivocation: HandleEquivocation; - - type WeightInfo: WeightInfo; -} +pub use pallet::*; pub trait WeightInfo { + fn plan_config_change() -> Weight; fn report_equivocation(validator_count: u32) -> Weight; } @@ -115,7 +74,7 @@ pub trait WeightInfo { pub trait EpochChangeTrigger { /// Trigger an epoch change, if any should take place. This should be called /// during every block, after initialization is done. - fn trigger(now: T::BlockNumber); + fn trigger(now: T::BlockNumber); } /// A type signifying to BABE that an external trigger @@ -123,7 +82,7 @@ pub trait EpochChangeTrigger { pub struct ExternalTrigger; impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) { } // nothing - trigger is external. + fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. } /// A type signifying to BABE that it should perform epoch changes @@ -131,12 +90,12 @@ impl EpochChangeTrigger for ExternalTrigger { pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { - fn trigger(now: T::BlockNumber) { - if >::should_epoch_change(now) { - let authorities = >::authorities(); + fn trigger(now: T::BlockNumber) { + if >::should_epoch_change(now) { + let authorities = >::authorities(); let next_authorities = authorities.clone(); - >::enact_epoch_change(authorities, next_authorities); + >::enact_epoch_change(authorities, next_authorities); } } } @@ -145,8 +104,75 @@ const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; type MaybeRandomness = Option; -decl_error! { - pub enum Error for Module { +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The BABE Pallet + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: pallet_timestamp::Config { + /// The amount of time, in slots, that each epoch should last. + /// NOTE: Currently it is not possible to change the epoch duration after + /// the chain has started. Attempting to do so will brick block production. + #[pallet::constant] + type EpochDuration: Get; + + /// The expected average block time at which BABE should be creating + /// blocks. Since BABE is probabilistic it is not trivial to figure out + /// what the expected average block time should be based on the slot + /// duration and the security parameter `c` (where `1 - c` represents + /// the probability of a slot being empty). + #[pallet::constant] + type ExpectedBlockTime: Get; + + /// BABE requires some logic to be triggered on every block to query for whether an epoch + /// has ended and to perform the transition to the next epoch. + /// + /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only be + /// used when no other module is responsible for changing authority set. + type EpochChangeTrigger: EpochChangeTrigger; + + /// A way to check whether a given validator is disabled and should not be authoring blocks. + /// Blocks authored by a disabled validator will lead to a panic as part of this module's + /// initialization. + type DisabledValidators: DisabledValidators; + + /// The proof of key ownership, used for validating equivocation reports. + /// The proof must include the session index and validator count of the + /// session at which the equivocation occurred. + type KeyOwnerProof: Parameter + GetSessionNumber + GetValidatorCount; + + /// The identification of a key owner, used when reporting equivocations. + type KeyOwnerIdentification: Parameter; + + /// A system for proving ownership of keys, i.e. that a given key was part + /// of a validator set, needed for validating equivocation reports. + type KeyOwnerProofSystem: KeyOwnerProofSystem< + (KeyTypeId, AuthorityId), + Proof = Self::KeyOwnerProof, + IdentificationTuple = Self::KeyOwnerIdentification, + >; + + /// The equivocation handling subsystem, defines methods to report an + /// offence (after the equivocation has been validated) and for submitting a + /// transaction to report an equivocation (from an offchain context). + /// NOTE: when enabling equivocation handling (i.e. this type isn't set to + /// `()`) you must use this pallet's `ValidateUnsigned` in the runtime + /// definition. + type HandleEquivocation: HandleEquivocation; + + type WeightInfo: WeightInfo; + } + + #[pallet::error] + pub enum Error { /// An equivocation proof provided as part of an equivocation report is invalid. InvalidEquivocationProof, /// A key ownership proof provided as part of an equivocation report is invalid. @@ -154,136 +180,182 @@ decl_error! { /// A given equivocation report is valid but already previously reported. DuplicateOffenceReport, } -} - -decl_storage! { - trait Store for Module as Babe { - /// Current epoch index. - pub EpochIndex get(fn epoch_index): u64; - - /// Current epoch authorities. - pub Authorities get(fn authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; - /// The slot at which the first epoch actually started. This is 0 - /// until the first block of the chain. - pub GenesisSlot get(fn genesis_slot): u64; + /// Current epoch index. + #[pallet::storage] + #[pallet::getter(fn epoch_index)] + pub type EpochIndex = StorageValue<_, u64, ValueQuery>; - /// Current slot number. - pub CurrentSlot get(fn current_slot): u64; + /// Current epoch authorities. + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub type Authorities = StorageValue<_, Vec<(AuthorityId, BabeAuthorityWeight)>, ValueQuery>; - /// The epoch randomness for the *current* epoch. - /// - /// # Security - /// - /// This MUST NOT be used for gambling, as it can be influenced by a - /// malicious validator in the short term. It MAY be used in many - /// cryptographic protocols, however, so long as one remembers that this - /// (like everything else on-chain) it is public. For example, it can be - /// used where a number is needed that cannot have been chosen by an - /// adversary, for purposes such as public-coin zero-knowledge proofs. - // NOTE: the following fields don't use the constants to define the - // array size because the metadata API currently doesn't resolve the - // variable to its underlying value. - pub Randomness get(fn randomness): schnorrkel::Randomness; - - /// Next epoch configuration, if changed. - NextEpochConfig: Option; - - /// Next epoch randomness. - NextRandomness: schnorrkel::Randomness; - - /// Randomness under construction. - /// - /// We make a tradeoff between storage accesses and list length. - /// We store the under-construction randomness in segments of up to - /// `UNDER_CONSTRUCTION_SEGMENT_LENGTH`. - /// - /// Once a segment reaches this length, we begin the next one. - /// We reset all segments and return to `0` at the beginning of every - /// epoch. - SegmentIndex build(|_| 0): u32; + /// The slot at which the first epoch actually started. This is 0 + /// until the first block of the chain. + #[pallet::storage] + #[pallet::getter(fn genesis_slot)] + pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; - /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. - UnderConstruction: map hasher(twox_64_concat) u32 => Vec; + /// Current slot number. + #[pallet::storage] + #[pallet::getter(fn current_slot)] + pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; - /// Temporary value (cleared at block finalization) which is `Some` - /// if per-block initialization has already been called for current block. - Initialized get(fn initialized): Option; - - /// Temporary value (cleared at block finalization) that includes the VRF output generated - /// at this block. This field should always be populated during block processing unless - /// secondary plain slots are enabled (which don't contain a VRF output). - AuthorVrfRandomness get(fn author_vrf_randomness): MaybeRandomness; - - /// How late the current block is compared to its parent. - /// - /// This entry is populated as part of block execution and is cleaned up - /// on block finalization. Querying this storage entry outside of block - /// execution context should always yield zero. - Lateness get(fn lateness): T::BlockNumber; - } - add_extra_genesis { - config(authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; - build(|config| Module::::initialize_authorities(&config.authorities)) + /// The epoch randomness for the *current* epoch. + /// + /// # Security + /// + /// This MUST NOT be used for gambling, as it can be influenced by a + /// malicious validator in the short term. It MAY be used in many + /// cryptographic protocols, however, so long as one remembers that this + /// (like everything else on-chain) it is public. For example, it can be + /// used where a number is needed that cannot have been chosen by an + /// adversary, for purposes such as public-coin zero-knowledge proofs. + // NOTE: the following fields don't use the constants to define the + // array size because the metadata API currently doesn't resolve the + // variable to its underlying value. + #[pallet::storage] + #[pallet::getter(fn randomness)] + pub type Randomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + + /// Pending epoch configuration change that will be applied when the next epoch is enacted. + #[pallet::storage] + pub(super) type PendingEpochConfigChange = StorageValue<_, NextConfigDescriptor>; + + /// Next epoch randomness. + #[pallet::storage] + pub(super) type NextRandomness = StorageValue<_, schnorrkel::Randomness, ValueQuery>; + + /// Next epoch authorities. + #[pallet::storage] + pub(super) type NextAuthorities = + StorageValue<_, Vec<(AuthorityId, BabeAuthorityWeight)>, ValueQuery>; + + /// Randomness under construction. + /// + /// We make a tradeoff between storage accesses and list length. + /// We store the under-construction randomness in segments of up to + /// `UNDER_CONSTRUCTION_SEGMENT_LENGTH`. + /// + /// Once a segment reaches this length, we begin the next one. + /// We reset all segments and return to `0` at the beginning of every + /// epoch. + #[pallet::storage] + pub(super) type SegmentIndex = StorageValue<_, u32, ValueQuery>; + + /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. + #[pallet::storage] + pub(super) type UnderConstruction = + StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; + + /// Temporary value (cleared at block finalization) which is `Some` + /// if per-block initialization has already been called for current block. + #[pallet::storage] + #[pallet::getter(fn initialized)] + pub(super) type Initialized = StorageValue<_, MaybeRandomness>; + + /// This field should always be populated during block processing unless + /// secondary plain slots are enabled (which don't contain a VRF output). + /// + /// It is set in `on_initialize`, before it will contain the value from the last block. + #[pallet::storage] + #[pallet::getter(fn author_vrf_randomness)] + pub(super) type AuthorVrfRandomness = StorageValue<_, MaybeRandomness, ValueQuery>; + + /// The block numbers when the last and current epoch have started, respectively `N-1` and + /// `N`. + /// NOTE: We track this is in order to annotate the block number when a given pool of + /// entropy was fixed (i.e. it was known to chain observers). Since epochs are defined in + /// slots, which may be skipped, the block numbers may not line up with the slot numbers. + #[pallet::storage] + pub(super) type EpochStart = + StorageValue<_, (T::BlockNumber, T::BlockNumber), ValueQuery>; + + /// How late the current block is compared to its parent. + /// + /// This entry is populated as part of block execution and is cleaned up + /// on block finalization. Querying this storage entry outside of block + /// execution context should always yield zero. + #[pallet::storage] + #[pallet::getter(fn lateness)] + pub(super) type Lateness = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// The configuration for the current epoch. Should never be `None` as it is initialized in + /// genesis. + #[pallet::storage] + pub(super) type EpochConfig = StorageValue<_, BabeEpochConfiguration>; + + /// The configuration for the next epoch, `None` if the config will not change + /// (you can fallback to `EpochConfig` instead in that case). + #[pallet::storage] + pub(super) type NextEpochConfig = StorageValue<_, BabeEpochConfiguration>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + pub epoch_config: Option, } -} -decl_module! { - /// The BABE Pallet - pub struct Module for enum Call where origin: T::Origin { - /// The number of **slots** that an epoch takes. We couple sessions to - /// epochs, i.e. we start a new session once the new epoch begins. - const EpochDuration: u64 = T::EpochDuration::get(); + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { authorities: Default::default(), epoch_config: Default::default() } + } + } - /// The expected average block time at which BABE should be creating - /// blocks. Since BABE is probabilistic it is not trivial to figure out - /// what the expected average block time should be based on the slot - /// duration and the security parameter `c` (where `1 - c` represents - /// the probability of a slot being empty). - const ExpectedBlockTime: T::Moment = T::ExpectedBlockTime::get(); + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + SegmentIndex::::put(0); + Pallet::::initialize_authorities(&self.authorities); + EpochConfig::::put( + self.epoch_config.clone().expect("epoch_config must not be None"), + ); + } + } + #[pallet::hooks] + impl Hooks> for Pallet { /// Initialization - fn on_initialize(now: T::BlockNumber) -> Weight { + fn on_initialize(now: BlockNumberFor) -> Weight { Self::do_initialize(now); - 0 } /// Block finalization - fn on_finalize() { + fn on_finalize(_n: BlockNumberFor) { // at the end of the block, we can safely include the new VRF output // from this block into the under-construction randomness. If we've determined // that this block was the first in a new epoch, the changeover logic has // already occurred at this point, so the under-construction randomness // will only contain outputs from the right epoch. - if let Some(Some(randomness)) = Initialized::take() { + if let Some(Some(randomness)) = Initialized::::take() { Self::deposit_randomness(&randomness); } - // The stored author generated VRF output is ephemeral. - AuthorVrfRandomness::kill(); - // remove temporary "environment" entry from storage Lateness::::kill(); } + } + #[pallet::call] + impl Pallet { /// Report authority equivocation/misbehavior. This method will verify /// the equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence will /// be reported. - #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] - fn report_equivocation( - origin, - equivocation_proof: EquivocationProof, + #[pallet::weight(::WeightInfo::report_equivocation( + key_owner_proof.validator_count(), + ))] + pub fn report_equivocation( + origin: OriginFor, + equivocation_proof: Box>, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { let reporter = ensure_signed(origin)?; - Self::do_report_equivocation( - Some(reporter), - equivocation_proof, - key_owner_proof, - ) + Self::do_report_equivocation(Some(reporter), *equivocation_proof, key_owner_proof) } /// Report authority equivocation/misbehavior. This method will verify @@ -294,54 +366,58 @@ decl_module! { /// block authors will call it (validated in `ValidateUnsigned`), as such /// if the block author is defined it will be defined as the equivocation /// reporter. - #[weight = ::WeightInfo::report_equivocation(key_owner_proof.validator_count())] - fn report_equivocation_unsigned( - origin, - equivocation_proof: EquivocationProof, + #[pallet::weight(::WeightInfo::report_equivocation( + key_owner_proof.validator_count(), + ))] + pub fn report_equivocation_unsigned( + origin: OriginFor, + equivocation_proof: Box>, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { ensure_none(origin)?; Self::do_report_equivocation( T::HandleEquivocation::block_author(), - equivocation_proof, + *equivocation_proof, key_owner_proof, ) } + + /// Plan an epoch config change. The epoch config change is recorded and will be enacted on + /// the next call to `enact_epoch_change`. The config will be activated one epoch after. + /// Multiple calls to this method will replace any existing planned config change that had + /// not been enacted yet. + #[pallet::weight(::WeightInfo::plan_config_change())] + pub fn plan_config_change( + origin: OriginFor, + config: NextConfigDescriptor, + ) -> DispatchResult { + ensure_root(origin)?; + PendingEpochConfigChange::::put(config); + Ok(()) + } } -} -impl RandomnessT<::Hash> for Module { - /// Some BABE blocks have VRF outputs where the block producer has exactly one bit of influence, - /// either they make the block or they do not make the block and thus someone else makes the - /// next block. Yet, this randomness is not fresh in all BABE blocks. - /// - /// If that is an insufficient security guarantee then two things can be used to improve this - /// randomness: - /// - /// - Name, in advance, the block number whose random value will be used; ensure your module - /// retains a buffer of previous random values for its subject and then index into these in - /// order to obviate the ability of your user to look up the parent hash and choose when to - /// transact based upon it. - /// - Require your user to first commit to an additional value by first posting its hash. - /// Require them to reveal the value to determine the final result, hashing it with the - /// output of this random function. This reduces the ability of a cabal of block producers - /// from conspiring against individuals. - fn random(subject: &[u8]) -> T::Hash { - let mut subject = subject.to_vec(); - subject.reserve(VRF_OUTPUT_LENGTH); - subject.extend_from_slice(&Self::randomness()[..]); - - ::Hashing::hash(&subject[..]) + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + Self::validate_unsigned(source, call) + } + + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + Self::pre_dispatch(call) + } } } /// A BABE public key pub type BabeKey = [u8; PUBLIC_KEY_LENGTH]; -impl FindAuthor for Module { - fn find_author<'a, I>(digests: I) -> Option where - I: 'a + IntoIterator +impl FindAuthor for Pallet { + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, { for (id, mut data) in digests.into_iter() { if id == BABE_ENGINE_ID { @@ -350,19 +426,17 @@ impl FindAuthor for Module { } } - return None; + return None } } -impl IsMember for Module { +impl IsMember for Pallet { fn is_member(authority_id: &AuthorityId) -> bool { - >::authorities() - .iter() - .any(|id| &id.0 == authority_id) + >::authorities().iter().any(|id| &id.0 == authority_id) } } -impl pallet_session::ShouldEndSession for Module { +impl pallet_session::ShouldEndSession for Pallet { fn should_end_session(now: T::BlockNumber) -> bool { // it might be (and it is in current implementation) that session module is calling // should_end_session() from it's own on_initialize() handler @@ -374,12 +448,12 @@ impl pallet_session::ShouldEndSession for Module { } } -impl Module { +impl Pallet { /// Determine the BABE slot duration based on the Timestamp module configuration. pub fn slot_duration() -> T::Moment { // we double the minimum block-period so each author can always propose within // the majority of their slot. - ::MinimumPeriod::get().saturating_mul(2.into()) + ::MinimumPeriod::get().saturating_mul(2u32.into()) } /// Determine whether an epoch change should take place at this block. @@ -394,8 +468,8 @@ impl Module { // the same randomness and validator set as signalled in the genesis, // so we don't rotate the epoch. now != One::one() && { - let diff = CurrentSlot::get().saturating_sub(Self::current_epoch_start()); - diff >= T::EpochDuration::get() + let diff = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()); + *diff >= T::EpochDuration::get() } } @@ -407,37 +481,27 @@ impl Module { /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot /// number will grow while the block number will not. Hence, the result can be interpreted as an /// upper bound. - // -------------- IMPORTANT NOTE -------------- + // ## IMPORTANT NOTE + // // This implementation is linked to how [`should_epoch_change`] is working. This might need to // be updated accordingly, if the underlying mechanics of slot and epochs change. // - // WEIGHT NOTE: This function is tied to the weight of `EstimateNextSessionRotation`. If you update - // this function, you must also update the corresponding weight. + // WEIGHT NOTE: This function is tied to the weight of `EstimateNextSessionRotation`. If you + // update this function, you must also update the corresponding weight. pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); - next_slot - .checked_sub(CurrentSlot::get()) - .map(|slots_remaining| { - // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. - let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); - now.saturating_add(blocks_remaining) - }) + next_slot.checked_sub(*CurrentSlot::::get()).map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }) } - /// Plan an epoch config change. The epoch config change is recorded and will be enacted on the - /// next call to `enact_epoch_change`. The config will be activated one epoch after. Multiple calls to this - /// method will replace any existing planned config change that had not been enacted yet. - pub fn plan_config_change( - config: NextConfigDescriptor, - ) { - NextEpochConfig::put(config); - } - - /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` has returned `true`, - /// and the caller is the only caller of this function. + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` + /// has returned `true`, and the caller is the only caller of this function. /// - /// Typically, this is not handled directly by the user, but by higher-level validator-set manager logic like - /// `pallet-session`. + /// Typically, this is not handled directly by the user, but by higher-level validator-set + /// manager logic like `pallet-session`. pub fn enact_epoch_change( authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, next_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, @@ -447,12 +511,12 @@ impl Module { debug_assert!(Self::initialized().is_some()); // Update epoch index - let epoch_index = EpochIndex::get() + let epoch_index = EpochIndex::::get() .checked_add(1) .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - EpochIndex::put(epoch_index); - Authorities::put(authorities); + EpochIndex::::put(epoch_index); + Authorities::::put(authorities); // Update epoch randomness. let next_epoch_index = epoch_index @@ -462,47 +526,108 @@ impl Module { // Returns randomness for the current epoch and computes the *next* // epoch randomness. let randomness = Self::randomness_change_epoch(next_epoch_index); - Randomness::put(randomness); + Randomness::::put(randomness); + + // Update the next epoch authorities. + NextAuthorities::::put(&next_authorities); + + // Update the start blocks of the previous and new current epoch. + >::mutate(|(previous_epoch_start_block, current_epoch_start_block)| { + *previous_epoch_start_block = sp_std::mem::take(current_epoch_start_block); + *current_epoch_start_block = >::block_number(); + }); // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. - let next_randomness = NextRandomness::get(); + let next_randomness = NextRandomness::::get(); - let next_epoch = NextEpochDescriptor { - authorities: next_authorities, - randomness: next_randomness, - }; + let next_epoch = + NextEpochDescriptor { authorities: next_authorities, randomness: next_randomness }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); - if let Some(next_config) = NextEpochConfig::take() { - Self::deposit_consensus(ConsensusLog::NextConfigData(next_config)); + if let Some(next_config) = NextEpochConfig::::get() { + EpochConfig::::put(next_config); + } + + if let Some(pending_epoch_config_change) = PendingEpochConfigChange::::take() { + let next_epoch_config: BabeEpochConfiguration = + pending_epoch_config_change.clone().into(); + NextEpochConfig::::put(next_epoch_config); + + Self::deposit_consensus(ConsensusLog::NextConfigData(pending_epoch_config_change)); + } + } + + /// Finds the start slot of the current epoch. only guaranteed to + /// give correct results after `do_initialize` of the first block + /// in the chain (as its result is based off of `GenesisSlot`). + pub fn current_epoch_start() -> Slot { + Self::epoch_start(EpochIndex::::get()) + } + + /// Produces information about the current epoch. + pub fn current_epoch() -> Epoch { + Epoch { + epoch_index: EpochIndex::::get(), + start_slot: Self::current_epoch_start(), + duration: T::EpochDuration::get(), + authorities: Self::authorities(), + randomness: Self::randomness(), + config: EpochConfig::::get() + .expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), + } + } + + /// Produces information about the next epoch (which was already previously + /// announced). + pub fn next_epoch() -> Epoch { + let next_epoch_index = EpochIndex::::get().checked_add(1).expect( + "epoch index is u64; it is always only incremented by one; \ + if u64 is not enough we should crash for safety; qed.", + ); + + Epoch { + epoch_index: next_epoch_index, + start_slot: Self::epoch_start(next_epoch_index), + duration: T::EpochDuration::get(), + authorities: NextAuthorities::::get(), + randomness: NextRandomness::::get(), + config: NextEpochConfig::::get().unwrap_or_else(|| { + EpochConfig::::get().expect( + "EpochConfig is initialized in genesis; we never `take` or `kill` it; qed", + ) + }), } } - // finds the start slot of the current epoch. only guaranteed to - // give correct results after `do_initialize` of the first block - // in the chain (as its result is based off of `GenesisSlot`). - pub fn current_epoch_start() -> SlotNumber { - (EpochIndex::get() * T::EpochDuration::get()) + GenesisSlot::get() + fn epoch_start(epoch_index: u64) -> Slot { + // (epoch_index * epoch_duration) + genesis_slot + + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + + let epoch_start = epoch_index.checked_mul(T::EpochDuration::get()).expect(PROOF); + + epoch_start.checked_add(*GenesisSlot::::get()).expect(PROOF).into() } fn deposit_consensus(new: U) { let log: DigestItem = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); - >::deposit_log(log.into()) + >::deposit_log(log.into()) } fn deposit_randomness(randomness: &schnorrkel::Randomness) { - let segment_idx = ::get(); - let mut segment = ::get(&segment_idx); + let segment_idx = SegmentIndex::::get(); + let mut segment = UnderConstruction::::get(&segment_idx); if segment.len() < UNDER_CONSTRUCTION_SEGMENT_LENGTH { // push onto current segment: not full. segment.push(*randomness); - ::insert(&segment_idx, &segment); + UnderConstruction::::insert(&segment_idx, &segment); } else { // move onto the next segment and update the index. let segment_idx = segment_idx + 1; - ::insert(&segment_idx, &vec![randomness.clone()]); - ::put(&segment_idx); + UnderConstruction::::insert(&segment_idx, &vec![randomness.clone()]); + SegmentIndex::::put(&segment_idx); } } @@ -511,19 +636,22 @@ impl Module { // => let's ensure that we only modify the storage once per block let initialized = Self::initialized().is_some(); if initialized { - return; + return } - let maybe_pre_digest: Option = >::digest() - .logs - .iter() - .filter_map(|s| s.as_pre_runtime()) - .filter_map(|(id, mut data)| if id == BABE_ENGINE_ID { - PreDigest::decode(&mut data).ok() - } else { - None - }) - .next(); + let maybe_pre_digest: Option = + >::digest() + .logs + .iter() + .filter_map(|s| s.as_pre_runtime()) + .filter_map(|(id, mut data)| { + if id == BABE_ENGINE_ID { + PreDigest::decode(&mut data).ok() + } else { + None + } + }) + .next(); let is_primary = matches!(maybe_pre_digest, Some(PreDigest::Primary(..))); @@ -531,9 +659,9 @@ impl Module { // on the first non-zero block (i.e. block #1) // this is where the first epoch (epoch #0) actually starts. // we need to adjust internal storage accordingly. - if GenesisSlot::get() == 0 { - GenesisSlot::put(digest.slot_number()); - debug_assert_ne!(GenesisSlot::get(), 0); + if *GenesisSlot::::get() == 0 { + GenesisSlot::::put(digest.slot()); + debug_assert_ne!(*GenesisSlot::::get(), 0); // deposit a log because this is the first block in epoch #0 // we use the same values as genesis because we haven't collected any @@ -547,53 +675,51 @@ impl Module { } // the slot number of the current block being initialized - let current_slot = digest.slot_number(); + let current_slot = digest.slot(); // how many slots were skipped between current and last block - let lateness = current_slot.saturating_sub(CurrentSlot::get() + 1); - let lateness = T::BlockNumber::from(lateness as u32); + let lateness = current_slot.saturating_sub(CurrentSlot::::get() + 1); + let lateness = T::BlockNumber::from(*lateness as u32); Lateness::::put(lateness); - CurrentSlot::put(current_slot); + CurrentSlot::::put(current_slot); let authority_index = digest.authority_index(); + if T::DisabledValidators::is_disabled(authority_index) { + panic!( + "Validator with index {:?} is disabled and should not be attempting to author blocks.", + authority_index, + ); + } + // Extract out the VRF output if we have it - digest - .vrf_output() - .and_then(|vrf_output| { - // Reconstruct the bytes of VRFInOut using the authority id. - Authorities::get() - .get(authority_index as usize) - .and_then(|author| { - schnorrkel::PublicKey::from_bytes(author.0.as_slice()).ok() - }) - .and_then(|pubkey| { - let transcript = sp_consensus_babe::make_transcript( - &Self::randomness(), - current_slot, - EpochIndex::get(), - ); - - vrf_output.0.attach_input_hash( - &pubkey, - transcript - ).ok() - }) - .map(|inout| { - inout.make_bytes(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT) - }) - }) + digest.vrf_output().and_then(|vrf_output| { + // Reconstruct the bytes of VRFInOut using the authority id. + Authorities::::get() + .get(authority_index as usize) + .and_then(|author| schnorrkel::PublicKey::from_bytes(author.0.as_slice()).ok()) + .and_then(|pubkey| { + let transcript = sp_consensus_babe::make_transcript( + &Self::randomness(), + current_slot, + EpochIndex::::get(), + ); + + vrf_output.0.attach_input_hash(&pubkey, transcript).ok() + }) + .map(|inout| inout.make_bytes(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT)) + }) }); // For primary VRF output we place it in the `Initialized` storage // item and it'll be put onto the under-construction randomness later, // once we've decided which epoch this block is in. - Initialized::put(if is_primary { maybe_randomness } else { None }); + Initialized::::put(if is_primary { maybe_randomness } else { None }); // Place either the primary or secondary VRF output into the // `AuthorVrfRandomness` storage item. - AuthorVrfRandomness::put(maybe_randomness); + AuthorVrfRandomness::::put(maybe_randomness); // enact epoch change, if necessary. T::EpochChangeTrigger::trigger::(now) @@ -602,8 +728,8 @@ impl Module { /// Call this function exactly once when an epoch changes, to update the /// randomness. Returns the new randomness. fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { - let this_randomness = NextRandomness::get(); - let segment_idx: u32 = ::mutate(|s| sp_std::mem::replace(s, 0)); + let this_randomness = NextRandomness::::get(); + let segment_idx: u32 = SegmentIndex::::mutate(|s| sp_std::mem::replace(s, 0)); // overestimate to the segment being full. let rho_size = segment_idx.saturating_add(1) as usize * UNDER_CONSTRUCTION_SEGMENT_LENGTH; @@ -611,17 +737,18 @@ impl Module { let next_randomness = compute_randomness( this_randomness, next_epoch_index, - (0..segment_idx).flat_map(|i| ::take(&i)), + (0..segment_idx).flat_map(|i| UnderConstruction::::take(&i)), Some(rho_size), ); - NextRandomness::put(&next_randomness); + NextRandomness::::put(&next_randomness); this_randomness } fn initialize_authorities(authorities: &[(AuthorityId, BabeAuthorityWeight)]) { if !authorities.is_empty() { - assert!(Authorities::get().is_empty(), "Authorities are already initialized!"); - Authorities::put(authorities); + assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); + Authorities::::put(authorities); + NextAuthorities::::put(authorities); } } @@ -631,23 +758,23 @@ impl Module { key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { let offender = equivocation_proof.offender.clone(); - let slot_number = equivocation_proof.slot_number; + let slot = equivocation_proof.slot; // validate the equivocation proof if !sp_consensus_babe::check_equivocation_proof(equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()) } let validator_set_count = key_owner_proof.validator_count(); let session_index = key_owner_proof.session(); - let epoch_index = (slot_number.saturating_sub(GenesisSlot::get()) / T::EpochDuration::get()) + let epoch_index = (*slot.saturating_sub(GenesisSlot::::get()) / T::EpochDuration::get()) .saturated_into::(); // check that the slot number is consistent with the session index // in the key ownership proof (i.e. slot is for that epoch) if epoch_index != session_index { - return Err(Error::::InvalidKeyOwnershipProof.into()); + return Err(Error::::InvalidKeyOwnershipProof.into()) } // check the membership proof and extract the offender's id @@ -655,12 +782,8 @@ impl Module { let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof) .ok_or(Error::::InvalidKeyOwnershipProof)?; - let offence = BabeEquivocationOffence { - slot: slot_number, - validator_set_count, - offender, - session_index, - }; + let offence = + BabeEquivocationOffence { slot, validator_set_count, offender, session_index }; let reporters = match reporter { Some(id) => vec![id], @@ -690,52 +813,73 @@ impl Module { } } -impl OnTimestampSet for Module { - fn on_timestamp_set(_moment: T::Moment) { } +impl OnTimestampSet for Pallet { + fn on_timestamp_set(moment: T::Moment) { + let slot_duration = Self::slot_duration(); + assert!(!slot_duration.is_zero(), "Babe slot duration cannot be zero."); + + let timestamp_slot = moment / slot_duration; + let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); + + assert!( + CurrentSlot::::get() == timestamp_slot, + "Timestamp slot must match `CurrentSlot`" + ); + } } -impl frame_support::traits::EstimateNextSessionRotation for Module { - fn estimate_next_session_rotation(now: T::BlockNumber) -> Option { - Self::next_expected_epoch_change(now) +impl frame_support::traits::EstimateNextSessionRotation for Pallet { + fn average_session_length() -> T::BlockNumber { + T::EpochDuration::get().saturated_into() + } + + fn estimate_current_session_progress(_now: T::BlockNumber) -> (Option, Weight) { + let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; + + ( + Some(Permill::from_rational(*elapsed, T::EpochDuration::get())), + // Read: Current Slot, Epoch Index, Genesis Slot + T::DbWeight::get().reads(3), + ) } - // The validity of this weight depends on the implementation of `estimate_next_session_rotation` - fn weight(_now: T::BlockNumber) -> Weight { - // Read: Current Slot, Epoch Index, Genesis Slot - T::DbWeight::get().reads(3) + fn estimate_next_session_rotation(now: T::BlockNumber) -> (Option, Weight) { + ( + Self::next_expected_epoch_change(now), + // Read: Current Slot, Epoch Index, Genesis Slot + T::DbWeight::get().reads(3), + ) } } -impl frame_support::traits::Lateness for Module { +impl frame_support::traits::Lateness for Pallet { fn lateness(&self) -> T::BlockNumber { Self::lateness() } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { type Public = AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl OneSessionHandler for Pallet { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator + where + I: Iterator, { let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); Self::initialize_authorities(&authorities); } fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) - where I: Iterator + where + I: Iterator, { - let authorities = validators.map(|(_account, k)| { - (k, 1) - }).collect::>(); + let authorities = validators.map(|(_account, k)| (k, 1)).collect::>(); - let next_authorities = queued_validators.map(|(_account, k)| { - (k, 1) - }).collect::>(); + let next_authorities = queued_validators.map(|(_account, k)| (k, 1)).collect::>(); Self::enact_epoch_change(authorities, next_authorities) } @@ -752,7 +896,7 @@ impl pallet_session::OneSessionHandler for Module { fn compute_randomness( last_epoch_randomness: schnorrkel::Randomness, epoch_index: u64, - rho: impl Iterator, + rho: impl Iterator, rho_size_hint: Option, ) -> schnorrkel::Randomness { let mut s = Vec::with_capacity(40 + rho_size_hint.unwrap_or(0) * VRF_OUTPUT_LENGTH); @@ -766,28 +910,49 @@ fn compute_randomness( sp_io::hashing::blake2_256(&s) } -impl ProvideInherent for Module { - type Call = pallet_timestamp::Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; +pub mod migrations { + use super::*; + use frame_support::pallet_prelude::{StorageValue, ValueQuery}; - fn create_inherent(_: &InherentData) -> Option { - None + /// Something that can return the storage prefix of the `Babe` pallet. + pub trait BabePalletPrefix: Config { + fn pallet_prefix() -> &'static str; } - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - let timestamp = match call { - pallet_timestamp::Call::set(ref timestamp) => timestamp.clone(), - _ => return Ok(()), - }; + struct __OldNextEpochConfig(sp_std::marker::PhantomData); + impl frame_support::traits::StorageInstance for __OldNextEpochConfig { + fn pallet_prefix() -> &'static str { + T::pallet_prefix() + } + const STORAGE_PREFIX: &'static str = "NextEpochConfig"; + } - let timestamp_based_slot = (timestamp / Self::slot_duration()).saturated_into::(); - let seal_slot = data.babe_inherent_data()?; + type OldNextEpochConfig = + StorageValue<__OldNextEpochConfig, Option, ValueQuery>; - if timestamp_based_slot == seal_slot { - Ok(()) - } else { - Err(sp_inherents::Error::from("timestamp set in block doesn't match slot in seal").into()) + /// A storage migration that adds the current epoch configuration for Babe + /// to storage. + pub fn add_epoch_configuration( + epoch_config: BabeEpochConfiguration, + ) -> Weight { + let mut writes = 0; + let mut reads = 0; + + if let Some(pending_change) = OldNextEpochConfig::::get() { + PendingEpochConfigChange::::put(pending_change); + + writes += 1; } + + reads += 1; + + OldNextEpochConfig::::kill(); + + EpochConfig::::put(epoch_config.clone()); + NextEpochConfig::::put(epoch_config); + + writes += 3; + + T::DbWeight::get().writes(writes) + T::DbWeight::get().reads(reads) } } diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 9f00a4ddfc3cd..bc0be32624cba 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,56 +17,67 @@ //! Test utilities +use crate::{self as pallet_babe, Config, CurrentSlot}; use codec::Encode; -use super::{Trait, Module, CurrentSlot}; -use sp_runtime::{ - Perbill, impl_opaque_keys, - curve::PiecewiseLinear, - testing::{Digest, DigestItem, Header, TestXt,}, - traits::{Header as _, IdentityLookup, OpaqueKeys}, +use frame_election_provider_support::onchain; +use frame_support::{ + parameter_types, + traits::{GenesisBuild, KeyOwnerProofSystem, OnInitialize}, }; use frame_system::InitKind; -use frame_support::{ - impl_outer_dispatch, impl_outer_origin, parameter_types, StorageValue, - traits::{KeyOwnerProofSystem, OnInitialize}, - weights::Weight, +use pallet_session::historical as pallet_session_historical; +use pallet_staking::EraIndex; +use sp_consensus_babe::{AuthorityId, AuthorityPair, Slot}; +use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_core::{ + crypto::{IsWrappedBy, KeyTypeId, Pair}, + H256, U256, }; use sp_io; -use sp_core::{H256, U256, crypto::{IsWrappedBy, KeyTypeId, Pair}}; -use sp_consensus_babe::{AuthorityId, AuthorityPair, SlotNumber}; -use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_runtime::{ + curve::PiecewiseLinear, + impl_opaque_keys, + testing::{Digest, DigestItem, Header, TestXt}, + traits::{Header as _, IdentityLookup, OpaqueKeys}, + Perbill, +}; use sp_staking::SessionIndex; -use pallet_staking::EraIndex; - -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - babe::Babe, - staking::Staking, - } -} type DummyValidatorId = u64; -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Historical: pallet_session_historical::{Pallet}, + Offences: pallet_offences::{Pallet, Storage, Event}, + Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned}, + Staking: pallet_staking::{Pallet, Call, Storage, Config, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + } +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const EpochDuration: u64 = 3; - pub const ExpectedBlockTime: u64 = 1; pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(16); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -77,20 +88,15 @@ impl frame_system::Trait for Test { type AccountId = DummyValidatorId; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } impl frame_system::offchain::SendTransactionTypes for Test @@ -103,13 +109,13 @@ where impl_opaque_keys! { pub struct MockSessionKeys { - pub babe_authority: super::Module, + pub babe_authority: super::Pallet, } } -impl pallet_session::Trait for Test { - type Event = (); - type ValidatorId = ::AccountId; +impl pallet_session::Config for Test { + type Event = Event; + type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; @@ -120,7 +126,7 @@ impl pallet_session::Trait for Test { type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -129,7 +135,7 @@ parameter_types! { pub const UncleGenerations: u64 = 0; } -impl pallet_authorship::Trait for Test { +impl pallet_authorship::Config for Test { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -140,7 +146,7 @@ parameter_types! { pub const MinimumPeriod: u64 = 1; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -151,11 +157,13 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); - type Event = (); + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -180,13 +188,19 @@ parameter_types! { pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 64; pub const ElectionLookahead: u64 = 0; - pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; + pub const StakingUnsignedPriority: u64 = u64::MAX / 2; } -impl pallet_staking::Trait for Test { +impl onchain::Config for Test { + type Accuracy = Perbill; + type DataProvider = Staking; +} + +impl pallet_staking::Config for Test { + const MAX_NOMINATIONS: u32 = 16; type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type Event = (); + type Event = Event; type Currency = Balances; type Slash = (); type Reward = (); @@ -195,34 +209,33 @@ impl pallet_staking::Trait for Test { type SlashDeferDuration = SlashDeferDuration; type SlashCancelOrigin = frame_system::EnsureRoot; type SessionInterface = Self; - type UnixTime = pallet_timestamp::Module; - type RewardCurve = RewardCurve; + type UnixTime = pallet_timestamp::Pallet; + type EraPayout = pallet_staking::ConvertCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type UnsignedPriority = StakingUnsignedPriority; - type MaxIterations = (); - type MinSolutionScoreBump = (); - type OffchainSolutionWeightLimit = (); + type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } -parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); -} - -impl pallet_offences::Trait for Test { - type Event = (); +impl pallet_offences::Config for Test { + type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; - type WeightSoftLimit = OffencesWeightSoftLimit; } -impl Trait for Test { +parameter_types! { + pub const EpochDuration: u64 = 3; + pub const ExpectedBlockTime: u64 = 1; + pub const ReportLongevity: u64 = + BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * EpochDuration::get(); +} + +impl Config for Test { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; type EpochChangeTrigger = crate::ExternalTrigger; + type DisabledValidators = Session; type KeyOwnerProofSystem = Historical; @@ -234,23 +247,16 @@ impl Trait for Test { AuthorityId, )>>::IdentificationTuple; - type HandleEquivocation = super::EquivocationHandler; + type HandleEquivocation = + super::EquivocationHandler; + type WeightInfo = (); } -pub type Balances = pallet_balances::Module; -pub type Historical = pallet_session::historical::Module; -pub type Offences = pallet_offences::Module; -pub type Session = pallet_session::Module; -pub type Staking = pallet_staking::Module; -pub type System = frame_system::Module; -pub type Timestamp = pallet_timestamp::Module; -pub type Babe = Module; - pub fn go_to_block(n: u64, s: u64) { use frame_support::traits::OnFinalize; - System::on_finalize(System::block_number()); + Babe::on_finalize(System::block_number()); Session::on_finalize(System::block_number()); Staking::on_finalize(System::block_number()); @@ -261,25 +267,19 @@ pub fn go_to_block(n: u64, s: u64) { System::parent_hash() }; - let pre_digest = make_secondary_plain_pre_digest(0, s); + let pre_digest = make_secondary_plain_pre_digest(0, s.into()); - System::initialize(&n, &parent_hash, &Default::default(), &pre_digest, InitKind::Full); - System::set_block_number(n); - Timestamp::set_timestamp(n); - - if s > 1 { - CurrentSlot::put(s); - } + System::initialize(&n, &parent_hash, &pre_digest, InitKind::Full); - System::on_initialize(n); + Babe::on_initialize(n); Session::on_initialize(n); Staking::on_initialize(n); } /// Slots will grow accordingly to blocks pub fn progress_to_block(n: u64) { - let mut slot = Babe::current_slot() + 1; - for i in System::block_number()+1..=n { + let mut slot = u64::from(Babe::current_slot()) + 1; + for i in System::block_number() + 1..=n { go_to_block(i, slot); slot += 1; } @@ -298,19 +298,19 @@ pub fn start_era(era_index: EraIndex) { assert_eq!(Staking::current_era(), Some(era_index)); } -pub fn make_pre_digest( +pub fn make_primary_pre_digest( authority_index: sp_consensus_babe::AuthorityIndex, - slot_number: sp_consensus_babe::SlotNumber, + slot: sp_consensus_babe::Slot, vrf_output: VRFOutput, vrf_proof: VRFProof, ) -> Digest { let digest_data = sp_consensus_babe::digests::PreDigest::Primary( sp_consensus_babe::digests::PrimaryPreDigest { authority_index, - slot_number, + slot, vrf_output, vrf_proof, - } + }, ); let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); Digest { logs: vec![log] } @@ -318,13 +318,10 @@ pub fn make_pre_digest( pub fn make_secondary_plain_pre_digest( authority_index: sp_consensus_babe::AuthorityIndex, - slot_number: sp_consensus_babe::SlotNumber, + slot: sp_consensus_babe::Slot, ) -> Digest { let digest_data = sp_consensus_babe::digests::PreDigest::SecondaryPlain( - sp_consensus_babe::digests::SecondaryPlainPreDigest { - authority_index, - slot_number, - } + sp_consensus_babe::digests::SecondaryPlainPreDigest { authority_index, slot }, ); let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); Digest { logs: vec![log] } @@ -332,31 +329,31 @@ pub fn make_secondary_plain_pre_digest( pub fn make_secondary_vrf_pre_digest( authority_index: sp_consensus_babe::AuthorityIndex, - slot_number: sp_consensus_babe::SlotNumber, + slot: sp_consensus_babe::Slot, vrf_output: VRFOutput, vrf_proof: VRFProof, ) -> Digest { let digest_data = sp_consensus_babe::digests::PreDigest::SecondaryVRF( sp_consensus_babe::digests::SecondaryVRFPreDigest { authority_index, - slot_number, + slot, vrf_output, vrf_proof, - } + }, ); let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); Digest { logs: vec![log] } } pub fn make_vrf_output( - slot_number: u64, - pair: &sp_consensus_babe::AuthorityPair + slot: Slot, + pair: &sp_consensus_babe::AuthorityPair, ) -> (VRFOutput, VRFProof, [u8; 32]) { let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); - let transcript = sp_consensus_babe::make_transcript(&Babe::randomness(), slot_number, 0); + let transcript = sp_consensus_babe::make_transcript(&Babe::randomness(), slot, 0); let vrf_inout = pair.vrf_sign(transcript); - let vrf_randomness: sp_consensus_vrf::schnorrkel::Randomness = vrf_inout.0 - .make_bytes::<[u8; 32]>(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT); + let vrf_randomness: sp_consensus_vrf::schnorrkel::Randomness = + vrf_inout.0.make_bytes::<[u8; 32]>(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT); let vrf_output = VRFOutput(vrf_inout.0.to_output()); let vrf_proof = VRFProof(vrf_inout.1); @@ -367,10 +364,12 @@ pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { new_test_ext_with_pairs(authorities_len).1 } -pub fn new_test_ext_with_pairs(authorities_len: usize) -> (Vec, sp_io::TestExternalities) { - let pairs = (0..authorities_len).map(|i| { - AuthorityPair::from_seed(&U256::from(i).into()) - }).collect::>(); +pub fn new_test_ext_with_pairs( + authorities_len: usize, +) -> (Vec, sp_io::TestExternalities) { + let pairs = (0..authorities_len) + .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .collect::>(); let public = pairs.iter().map(|p| p.public()).collect(); @@ -378,8 +377,12 @@ pub fn new_test_ext_with_pairs(authorities_len: usize) -> (Vec, s } pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) .unwrap(); // stashes are the index. @@ -387,41 +390,22 @@ pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::Tes .iter() .enumerate() .map(|(i, k)| { - ( - i as u64, - i as u64, - MockSessionKeys { - babe_authority: AuthorityId::from(k.clone()), - }, - ) + (i as u64, i as u64, MockSessionKeys { babe_authority: AuthorityId::from(k.clone()) }) }) .collect(); - // controllers are the index + 1000 - let stakers: Vec<_> = (0..authorities.len()) - .map(|i| { - ( - i as u64, - i as u64 + 1000, - 10_000, - pallet_staking::StakerStatus::::Validator, - ) - }) - .collect(); - - let balances: Vec<_> = (0..authorities.len()) - .map(|i| (i as u64, 10_000_000)) - .collect(); - // NOTE: this will initialize the babe authorities // through OneSessionHandler::on_genesis_session pallet_session::GenesisConfig:: { keys: session_keys } .assimilate_storage(&mut t) .unwrap(); - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); + // controllers are the index + 1000 + let stakers: Vec<_> = (0..authorities.len()) + .map(|i| { + (i as u64, i as u64 + 1000, 10_000, pallet_staking::StakerStatus::::Validator) + }) + .collect(); let staking_config = pallet_staking::GenesisConfig:: { stakers, @@ -441,17 +425,17 @@ pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::Tes pub fn generate_equivocation_proof( offender_authority_index: u32, offender_authority_pair: &AuthorityPair, - slot_number: SlotNumber, + slot: Slot, ) -> sp_consensus_babe::EquivocationProof

{ use sp_consensus_babe::digests::CompatibleDigestItem; let current_block = System::block_number(); - let current_slot = CurrentSlot::get(); + let current_slot = CurrentSlot::::get(); let make_header = || { let parent_hash = System::parent_hash(); - let pre_digest = make_secondary_plain_pre_digest(offender_authority_index, slot_number); - System::initialize(¤t_block, &parent_hash, &Default::default(), &pre_digest, InitKind::Full); + let pre_digest = make_secondary_plain_pre_digest(offender_authority_index, slot); + System::initialize(¤t_block, &parent_hash, &pre_digest, InitKind::Full); System::set_block_number(current_block); Timestamp::set_timestamp(current_block); System::finalize() @@ -475,10 +459,10 @@ pub fn generate_equivocation_proof( seal_header(&mut h2); // restore previous runtime state - go_to_block(current_block, current_slot); + go_to_block(current_block, *current_slot); sp_consensus_babe::EquivocationProof { - slot_number, + slot, offender: offender_authority_pair.public(), first_header: h1, second_header: h2, diff --git a/frame/babe/src/randomness.rs b/frame/babe/src/randomness.rs new file mode 100644 index 0000000000000..7d18629050213 --- /dev/null +++ b/frame/babe/src/randomness.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provides multiple implementations of the randomness trait based on the on-chain epoch +//! randomness collected from VRF outputs. + +use super::{ + AuthorVrfRandomness, Config, EpochStart, NextRandomness, Randomness, VRF_OUTPUT_LENGTH, +}; +use frame_support::traits::Randomness as RandomnessT; +use sp_runtime::traits::Hash; + +/// Randomness usable by consensus protocols that **depend** upon finality and take action +/// based upon on-chain commitments made during the epoch before the previous epoch. +/// +/// An off-chain consensus protocol requires randomness be finalized before usage, but one +/// extra epoch delay beyond `RandomnessFromOneEpochAgo` suffices, under the assumption +/// that finality never stalls for longer than one epoch. +/// +/// All randomness is relative to commitments to any other inputs to the computation: If +/// Alice samples randomness near perfectly using radioactive decay, but then afterwards +/// Eve selects an arbitrary value with which to xor Alice's randomness, then Eve always +/// wins whatever game they play. +/// +/// All input commitments used with `RandomnessFromTwoEpochsAgo` should come from at least +/// three epochs ago. We require BABE session keys be registered at least three epochs +/// before being used to derive `CurrentBlockRandomness` for example. +/// +/// All users learn `RandomnessFromTwoEpochsAgo` when epoch `current_epoch - 1` starts, +/// although some learn it a few block earlier inside epoch `current_epoch - 2`. +/// +/// Adversaries with enough block producers could bias this randomness by choosing upon +/// what their block producers build at the end of epoch `current_epoch - 2` or the +/// beginning epoch `current_epoch - 1`, or skipping slots at the end of epoch +/// `current_epoch - 2`. +/// +/// Adversaries should not possess many block production slots towards the beginning or +/// end of every epoch, but they possess some influence over when they possess more slots. +pub struct RandomnessFromTwoEpochsAgo(sp_std::marker::PhantomData); + +/// Randomness usable by on-chain code that **does not depend** upon finality and takes +/// action based upon on-chain commitments made during the previous epoch. +/// +/// All randomness is relative to commitments to any other inputs to the computation: If +/// Alice samples randomness near perfectly using radioactive decay, but then afterwards +/// Eve selects an arbitrary value with which to xor Alice's randomness, then Eve always +/// wins whatever game they play. +/// +/// All input commitments used with `RandomnessFromOneEpochAgo` should come from at least +/// two epochs ago, although the previous epoch might work in special cases under +/// additional assumption. +/// +/// All users learn `RandomnessFromOneEpochAgo` at the end of the previous epoch, although +/// some block producers learn it several block earlier. +/// +/// Adversaries with enough block producers could bias this randomness by choosing upon +/// what their block producers build at either the end of the previous epoch or the +/// beginning of the current epoch, or electing to skipping some of their own block +/// production slots towards the end of the previous epoch. +/// +/// Adversaries should not possess many block production slots towards the beginning or +/// end of every epoch, but they possess some influence over when they possess more slots. +/// +/// As an example usage, we determine parachain auctions ending times in Polkadot using +/// `RandomnessFromOneEpochAgo` because it reduces bias from `CurrentBlockRandomness` and +/// does not require the extra finality delay of `RandomnessFromTwoEpochsAgo`. +pub struct RandomnessFromOneEpochAgo(sp_std::marker::PhantomData); + +/// Randomness produced semi-freshly with each block, but inherits limitations of +/// `RandomnessFromTwoEpochsAgo` from which it derives. +/// +/// All randomness is relative to commitments to any other inputs to the computation: If +/// Alice samples randomness near perfectly using radioactive decay, but then afterwards +/// Eve selects an arbitrary value with which to xor Alice's randomness, then Eve always +/// wins whatever game they play. +/// +/// As with `RandomnessFromTwoEpochsAgo`, all input commitments combined with +/// `CurrentBlockRandomness` should come from at least two epoch ago, except preferably +/// not near epoch ending, and thus ideally three epochs ago. +/// +/// Almost all users learn this randomness for a block when the block producer announces +/// the block, which makes this randomness appear quite fresh. Yet, the block producer +/// themselves learned this randomness at the beginning of epoch `current_epoch - 2`, at +/// the same time as they learn `RandomnessFromTwoEpochsAgo`. +/// +/// Aside from just biasing `RandomnessFromTwoEpochsAgo`, adversaries could also bias +/// `CurrentBlockRandomness` by never announcing their block if doing so yields an +/// unfavorable randomness. As such, `CurrentBlockRandomness` should be considered weaker +/// than both other randomness sources provided by BABE, but `CurrentBlockRandomness` +/// remains constrained by declared staking, while a randomness source like block hash is +/// only constrained by adversaries' unknowable computational power. +/// +/// As an example use, parachains could assign block production slots based upon the +/// `CurrentBlockRandomness` of their relay parent or relay parent's parent, provided the +/// parachain registers collators but avoids censorship sensitive functionality like +/// slashing. Any parachain with slashing could operate BABE itself or perhaps better yet +/// a BABE-like approach that derives its `CurrentBlockRandomness`, and authorizes block +/// production, based upon the relay parent's `CurrentBlockRandomness` or more likely the +/// relay parent's `RandomnessFromTwoEpochsAgo`. +pub struct CurrentBlockRandomness(sp_std::marker::PhantomData); + +impl RandomnessT for RandomnessFromTwoEpochsAgo { + fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { + let mut subject = subject.to_vec(); + subject.reserve(VRF_OUTPUT_LENGTH); + subject.extend_from_slice(&Randomness::::get()[..]); + + (T::Hashing::hash(&subject[..]), EpochStart::::get().0) + } +} + +impl RandomnessT for RandomnessFromOneEpochAgo { + fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { + let mut subject = subject.to_vec(); + subject.reserve(VRF_OUTPUT_LENGTH); + subject.extend_from_slice(&NextRandomness::::get()[..]); + + (T::Hashing::hash(&subject[..]), EpochStart::::get().1) + } +} + +impl RandomnessT, T::BlockNumber> for CurrentBlockRandomness { + fn random(subject: &[u8]) -> (Option, T::BlockNumber) { + let random = AuthorVrfRandomness::::get().map(|random| { + let mut subject = subject.to_vec(); + subject.reserve(VRF_OUTPUT_LENGTH); + subject.extend_from_slice(&random); + + T::Hashing::hash(&subject[..]) + }); + + (random, >::block_number()) + } +} diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 06bf84614ca6d..dc2f74c719519 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,20 +19,18 @@ use super::{Call, *}; use frame_support::{ - assert_err, assert_ok, - traits::{Currency, OnFinalize}, + assert_err, assert_noop, assert_ok, + traits::{Currency, EstimateNextSessionRotation, OnFinalize}, weights::{GetDispatchInfo, Pays}, }; use mock::*; use pallet_session::ShouldEndSession; -use sp_consensus_babe::AllowedSlots; +use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Slot}; use sp_core::crypto::Pair; const EMPTY_RANDOMNESS: [u8; 32] = [ - 74, 25, 49, 128, 53, 97, 244, 49, - 222, 202, 176, 2, 231, 66, 95, 10, - 133, 49, 213, 228, 86, 161, 164, 127, - 217, 153, 138, 37, 48, 192, 248, 0, + 74, 25, 49, 128, 53, 97, 244, 49, 222, 202, 176, 2, 231, 66, 95, 10, 133, 49, 213, 228, 86, + 161, 164, 127, 217, 153, 138, 37, 48, 192, 248, 0, ]; #[test] @@ -43,17 +41,17 @@ fn empty_randomness_is_correct() { #[test] fn initial_values() { - new_test_ext(4).execute_with(|| { - assert_eq!(Babe::authorities().len(), 4) - }) + new_test_ext(4).execute_with(|| assert_eq!(Babe::authorities().len(), 4)) } #[test] fn check_module() { new_test_ext(4).execute_with(|| { assert!(!Babe::should_end_session(0), "Genesis does not change sessions"); - assert!(!Babe::should_end_session(200000), - "BABE does not include the block number in epoch calculations"); + assert!( + !Babe::should_end_session(200000), + "BABE does not include the block number in epoch calculations" + ); }) } @@ -62,25 +60,14 @@ fn first_block_epoch_zero_start() { let (pairs, mut ext) = new_test_ext_with_pairs(4); ext.execute_with(|| { - let genesis_slot = 100; + let genesis_slot = Slot::from(100); let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); let first_vrf = vrf_output; - let pre_digest = make_pre_digest( - 0, - genesis_slot, - first_vrf.clone(), - vrf_proof, - ); + let pre_digest = make_primary_pre_digest(0, genesis_slot, first_vrf.clone(), vrf_proof); - assert_eq!(Babe::genesis_slot(), 0); - System::initialize( - &1, - &Default::default(), - &Default::default(), - &pre_digest, - Default::default(), - ); + assert_eq!(Babe::genesis_slot(), Slot::from(0)); + System::initialize(&1, &Default::default(), &pre_digest, Default::default()); // see implementation of the function for details why: we issue an // epoch-change digest but don't do it via the normal session mechanism. @@ -93,11 +80,11 @@ fn first_block_epoch_zero_start() { Babe::on_finalize(1); let header = System::finalize(); - assert_eq!(SegmentIndex::get(), 0); - assert_eq!(UnderConstruction::get(0), vec![vrf_randomness]); + assert_eq!(SegmentIndex::::get(), 0); + assert_eq!(UnderConstruction::::get(0), vec![vrf_randomness]); assert_eq!(Babe::randomness(), [0; 32]); - assert_eq!(Babe::author_vrf_randomness(), None); - assert_eq!(NextRandomness::get(), [0; 32]); + assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); + assert_eq!(NextRandomness::::get(), [0; 32]); assert_eq!(header.digest.logs.len(), 2); assert_eq!(pre_digest.logs.len(), 1); @@ -107,7 +94,7 @@ fn first_block_epoch_zero_start() { sp_consensus_babe::digests::NextEpochDescriptor { authorities: Babe::authorities(), randomness: Babe::randomness(), - } + }, ); let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); @@ -121,25 +108,18 @@ fn author_vrf_output_for_primary() { let (pairs, mut ext) = new_test_ext_with_pairs(1); ext.execute_with(|| { - let genesis_slot = 10; + let genesis_slot = Slot::from(10); let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); - let primary_pre_digest = make_pre_digest(0, genesis_slot, vrf_output, vrf_proof); + let primary_pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_output, vrf_proof); - System::initialize( - &1, - &Default::default(), - &Default::default(), - &primary_pre_digest, - Default::default(), - ); - assert_eq!(Babe::author_vrf_randomness(), None); + System::initialize(&1, &Default::default(), &primary_pre_digest, Default::default()); Babe::do_initialize(1); assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); Babe::on_finalize(1); System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); }) } @@ -148,38 +128,31 @@ fn author_vrf_output_for_secondary_vrf() { let (pairs, mut ext) = new_test_ext_with_pairs(1); ext.execute_with(|| { - let genesis_slot = 10; + let genesis_slot = Slot::from(10); let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); - let secondary_vrf_pre_digest = make_secondary_vrf_pre_digest(0, genesis_slot, vrf_output, vrf_proof); + let secondary_vrf_pre_digest = + make_secondary_vrf_pre_digest(0, genesis_slot, vrf_output, vrf_proof); - System::initialize( - &1, - &Default::default(), - &Default::default(), - &secondary_vrf_pre_digest, - Default::default(), - ); - assert_eq!(Babe::author_vrf_randomness(), None); + System::initialize(&1, &Default::default(), &secondary_vrf_pre_digest, Default::default()); Babe::do_initialize(1); assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); Babe::on_finalize(1); System::finalize(); - assert_eq!(Babe::author_vrf_randomness(), None); + assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); }) } #[test] fn no_author_vrf_output_for_secondary_plain() { new_test_ext(1).execute_with(|| { - let genesis_slot = 10; + let genesis_slot = Slot::from(10); let secondary_plain_pre_digest = make_secondary_plain_pre_digest(0, genesis_slot); System::initialize( &1, &Default::default(), - &Default::default(), &secondary_plain_pre_digest, Default::default(), ); @@ -198,64 +171,233 @@ fn no_author_vrf_output_for_secondary_plain() { fn authority_index() { new_test_ext(4).execute_with(|| { assert_eq!( - Babe::find_author((&[(BABE_ENGINE_ID, &[][..])]).into_iter().cloned()), None, - "Trivially invalid authorities are ignored") + Babe::find_author((&[(BABE_ENGINE_ID, &[][..])]).into_iter().cloned()), + None, + "Trivially invalid authorities are ignored" + ) }) } #[test] fn can_predict_next_epoch_change() { new_test_ext(1).execute_with(|| { - assert_eq!(::EpochDuration::get(), 3); + assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(Babe::genesis_slot(), 6); - assert_eq!(Babe::current_slot(), 6); + assert_eq!(*Babe::genesis_slot(), 6); + assert_eq!(*Babe::current_slot(), 6); assert_eq!(Babe::epoch_index(), 0); progress_to_block(5); assert_eq!(Babe::epoch_index(), 5 / 3); - assert_eq!(Babe::current_slot(), 10); + assert_eq!(*Babe::current_slot(), 10); // next epoch change will be at - assert_eq!(Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now + assert_eq!(*Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now assert_eq!(Babe::next_expected_epoch_change(System::block_number()), Some(5 + 2)); }) } +#[test] +fn can_estimate_current_epoch_progress() { + new_test_ext(1).execute_with(|| { + assert_eq!(::EpochDuration::get(), 3); + + // with BABE the genesis block is not part of any epoch, the first epoch starts at block #1, + // therefore its last block should be #3 + for i in 1u64..4 { + progress_to_block(i); + + assert_eq!(Babe::estimate_next_session_rotation(i).0.unwrap(), 4); + + // the last block of the epoch must have 100% progress. + if Babe::estimate_next_session_rotation(i).0.unwrap() - 1 == i { + assert_eq!( + Babe::estimate_current_session_progress(i).0.unwrap(), + Permill::from_percent(100) + ); + } else { + assert!( + Babe::estimate_current_session_progress(i).0.unwrap() < + Permill::from_percent(100) + ); + } + } + + // the first block of the new epoch counts towards the epoch progress as well + progress_to_block(4); + assert_eq!( + Babe::estimate_current_session_progress(4).0.unwrap(), + Permill::from_float(1.0 / 3.0), + ); + }) +} + #[test] fn can_enact_next_config() { new_test_ext(1).execute_with(|| { - assert_eq!(::EpochDuration::get(), 3); + assert_eq!(::EpochDuration::get(), 3); // this sets the genesis slot to 6; go_to_block(1, 6); - assert_eq!(Babe::genesis_slot(), 6); - assert_eq!(Babe::current_slot(), 6); + assert_eq!(*Babe::genesis_slot(), 6); + assert_eq!(*Babe::current_slot(), 6); assert_eq!(Babe::epoch_index(), 0); go_to_block(2, 7); - Babe::plan_config_change(NextConfigDescriptor::V1 { + let current_config = BabeEpochConfiguration { + c: (0, 4), + allowed_slots: sp_consensus_babe::AllowedSlots::PrimarySlots, + }; + + let next_config = BabeEpochConfiguration { c: (1, 4), - allowed_slots: AllowedSlots::PrimarySlots, - }); + allowed_slots: sp_consensus_babe::AllowedSlots::PrimarySlots, + }; + + let next_next_config = BabeEpochConfiguration { + c: (2, 4), + allowed_slots: sp_consensus_babe::AllowedSlots::PrimarySlots, + }; + + EpochConfig::::put(current_config); + NextEpochConfig::::put(next_config.clone()); + + assert_eq!(NextEpochConfig::::get(), Some(next_config.clone())); + + Babe::plan_config_change( + Origin::root(), + NextConfigDescriptor::V1 { + c: next_next_config.c, + allowed_slots: next_next_config.allowed_slots, + }, + ) + .unwrap(); progress_to_block(4); Babe::on_finalize(9); let header = System::finalize(); - let consensus_log = sp_consensus_babe::ConsensusLog::NextConfigData( - sp_consensus_babe::digests::NextConfigDescriptor::V1 { - c: (1, 4), - allowed_slots: AllowedSlots::PrimarySlots, - } - ); + assert_eq!(EpochConfig::::get(), Some(next_config)); + assert_eq!(NextEpochConfig::::get(), Some(next_next_config.clone())); + + let consensus_log = + sp_consensus_babe::ConsensusLog::NextConfigData(NextConfigDescriptor::V1 { + c: next_next_config.c, + allowed_slots: next_next_config.allowed_slots, + }); let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); assert_eq!(header.digest.logs[2], consensus_digest.clone()) }); } +#[test] +fn only_root_can_enact_config_change() { + use sp_runtime::DispatchError; + + new_test_ext(1).execute_with(|| { + let next_config = + NextConfigDescriptor::V1 { c: (1, 4), allowed_slots: AllowedSlots::PrimarySlots }; + + let res = Babe::plan_config_change(Origin::none(), next_config.clone()); + + assert_noop!(res, DispatchError::BadOrigin); + + let res = Babe::plan_config_change(Origin::signed(1), next_config.clone()); + + assert_noop!(res, DispatchError::BadOrigin); + + let res = Babe::plan_config_change(Origin::root(), next_config); + + assert!(res.is_ok()); + }); +} + +#[test] +fn can_fetch_current_and_next_epoch_data() { + new_test_ext(5).execute_with(|| { + EpochConfig::::put(BabeEpochConfiguration { + c: (1, 4), + allowed_slots: sp_consensus_babe::AllowedSlots::PrimarySlots, + }); + + // genesis authorities should be used for the first and second epoch + assert_eq!(Babe::current_epoch().authorities, Babe::next_epoch().authorities); + // 1 era = 3 epochs + // 1 epoch = 3 slots + // Eras start from 0. + // Therefore at era 1 we should be starting epoch 3 with slot 10. + start_era(1); + + let current_epoch = Babe::current_epoch(); + assert_eq!(current_epoch.epoch_index, 3); + assert_eq!(*current_epoch.start_slot, 10); + assert_eq!(current_epoch.authorities.len(), 5); + + let next_epoch = Babe::next_epoch(); + assert_eq!(next_epoch.epoch_index, 4); + assert_eq!(*next_epoch.start_slot, 13); + assert_eq!(next_epoch.authorities.len(), 5); + + // the on-chain randomness should always change across epochs + assert!(current_epoch.randomness != next_epoch.randomness); + + // but in this case the authorities stay the same + assert!(current_epoch.authorities == next_epoch.authorities); + }); +} + +#[test] +fn tracks_block_numbers_when_current_and_previous_epoch_started() { + new_test_ext(5).execute_with(|| { + // an epoch is 3 slots therefore at block 8 we should be in epoch #3 + // with the previous epochs having the following blocks: + // epoch 1 - [1, 2, 3] + // epoch 2 - [4, 5, 6] + // epoch 3 - [7, 8, 9] + progress_to_block(8); + + let (last_epoch, current_epoch) = EpochStart::::get(); + + assert_eq!(last_epoch, 4); + assert_eq!(current_epoch, 7); + + // once we reach block 10 we switch to epoch #4 + progress_to_block(10); + + let (last_epoch, current_epoch) = EpochStart::::get(); + + assert_eq!(last_epoch, 7); + assert_eq!(current_epoch, 10); + }); +} + +#[test] +#[should_panic( + expected = "Validator with index 0 is disabled and should not be attempting to author blocks." +)] +fn disabled_validators_cannot_author_blocks() { + new_test_ext(4).execute_with(|| { + start_era(1); + + // let's disable the validator at index 1 + Session::disable_index(1); + + // the mocking infrastructure always authors all blocks using authority index 0, + // so we should still be able to author blocks + start_era(2); + + assert_eq!(Staking::current_era().unwrap(), 2); + + // let's disable the validator at index 0 + Session::disable_index(0); + + // this should now panic as the validator authoring blocks is disabled + start_era(3); + }); +} + #[test] fn report_equivocation_current_session_works() { let (pairs, mut ext) = new_test_ext_with_pairs(3); @@ -273,16 +415,12 @@ fn report_equivocation_current_session_works() { assert_eq!( Staking::eras_stakers(1, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } - // we will use the validator at index 0 as the offending authority - let offending_validator_index = 0; + // we will use the validator at index 1 as the offending authority + let offending_validator_index = 1; let offending_validator_id = Session::validators()[offending_validator_index]; let offending_authority_pair = pairs .into_iter() @@ -294,54 +432,44 @@ fn report_equivocation_current_session_works() { let equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); // create the key ownership proof - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let key_owner_proof = Historical::prove(key).unwrap(); // report the equivocation - Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) - .unwrap(); + Babe::report_equivocation_unsigned( + Origin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) + .unwrap(); // start a new era so that the results of the offence report // are applied at era end start_era(2); // check that the balance of offending validator is slashed 100%. - assert_eq!( - Balances::total_balance(&offending_validator_id), - 10_000_000 - 10_000 - ); + assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); assert_eq!( Staking::eras_stakers(2, offending_validator_id), - pallet_staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); // check that the balances of all other validators are left intact. for validator in &validators { if *validator == offending_validator_id { - continue; + continue } assert_eq!(Balances::total_balance(validator), 10_000_000); assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( Staking::eras_stakers(2, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } }) @@ -357,7 +485,7 @@ fn report_equivocation_old_session_works() { let authorities = Babe::authorities(); // we will use the validator at index 0 as the offending authority - let offending_validator_index = 0; + let offending_validator_index = 1; let offending_validator_id = Session::validators()[offending_validator_index]; let offending_authority_pair = pairs .into_iter() @@ -368,14 +496,11 @@ fn report_equivocation_old_session_works() { let equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); // create the key ownership proof - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let key_owner_proof = Historical::prove(key).unwrap(); // start a new era and report the equivocation @@ -384,32 +509,26 @@ fn report_equivocation_old_session_works() { // check the balance of the offending validator assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000); - assert_eq!( - Staking::slashable_balance_of(&offending_validator_id), - 10_000 - ); + assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 10_000); // report the equivocation - Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) - .unwrap(); + Babe::report_equivocation_unsigned( + Origin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) + .unwrap(); // start a new era so that the results of the offence report // are applied at era end start_era(3); // check that the balance of offending validator is slashed 100%. - assert_eq!( - Balances::total_balance(&offending_validator_id), - 10_000_000 - 10_000 - ); + assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); assert_eq!( Staking::eras_stakers(3, offending_validator_id), - pallet_staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); }) } @@ -434,14 +553,11 @@ fn report_equivocation_invalid_key_owner_proof() { let equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); // create the key ownership proof - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let mut key_owner_proof = Historical::prove(key).unwrap(); // we change the session index in the key ownership proof @@ -450,7 +566,7 @@ fn report_equivocation_invalid_key_owner_proof() { assert_err!( Babe::report_equivocation_unsigned( Origin::none(), - equivocation_proof.clone(), + Box::new(equivocation_proof.clone()), key_owner_proof ), Error::::InvalidKeyOwnershipProof, @@ -468,7 +584,11 @@ fn report_equivocation_invalid_key_owner_proof() { start_era(2); assert_err!( - Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof), + Babe::report_equivocation_unsigned( + Origin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ), Error::::InvalidKeyOwnershipProof, ); }) @@ -493,17 +613,14 @@ fn report_equivocation_invalid_equivocation_proof() { .unwrap(); // create the key ownership proof - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let key_owner_proof = Historical::prove(key).unwrap(); let assert_invalid_equivocation = |equivocation_proof| { assert_err!( Babe::report_equivocation_unsigned( Origin::none(), - equivocation_proof, + Box::new(equivocation_proof), key_owner_proof.clone(), ), Error::::InvalidEquivocationProof, @@ -514,7 +631,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); equivocation_proof.second_header = equivocation_proof.first_header.clone(); assert_invalid_equivocation(equivocation_proof); @@ -523,7 +640,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); equivocation_proof.first_header.digest_mut().logs.remove(0); assert_invalid_equivocation(equivocation_proof); @@ -532,7 +649,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); equivocation_proof.first_header.digest_mut().logs.remove(1); assert_invalid_equivocation(equivocation_proof); @@ -541,9 +658,9 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); - equivocation_proof.slot_number = 0; + equivocation_proof.slot = Slot::from(0); assert_invalid_equivocation(equivocation_proof.clone()); // different slot numbers in headers @@ -551,7 +668,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get() + 1, + CurrentSlot::::get() + 1, ); // use the header from the previous equivocation generated @@ -564,7 +681,7 @@ fn report_equivocation_invalid_equivocation_proof() { let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get() + 1, + CurrentSlot::::get() + 1, ); // replace the seal digest with the digest from the @@ -582,8 +699,8 @@ fn report_equivocation_invalid_equivocation_proof() { #[test] fn report_equivocation_validate_unsigned_prevents_duplicates() { use sp_runtime::transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, - TransactionValidity, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + ValidTransaction, }; let (pairs, mut ext) = new_test_ext_with_pairs(3); @@ -603,17 +720,16 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, - CurrentSlot::get(), + CurrentSlot::::get(), ); - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let key_owner_proof = Historical::prove(key).unwrap(); - let inner = - Call::report_equivocation_unsigned(equivocation_proof.clone(), key_owner_proof.clone()); + let inner = Call::report_equivocation_unsigned { + equivocation_proof: Box::new(equivocation_proof.clone()), + key_owner_proof: key_owner_proof.clone(), + }; // only local/inblock reports are allowed assert_eq!( @@ -625,7 +741,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { ); // the transaction is valid when passed as local - let tx_tag = (offending_authority_pair.public(), CurrentSlot::get()); + let tx_tag = (offending_authority_pair.public(), CurrentSlot::::get()); assert_eq!( ::validate_unsigned( TransactionSource::Local, @@ -635,7 +751,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { priority: TransactionPriority::max_value(), requires: vec![], provides: vec![("BabeEquivocation", tx_tag).encode()], - longevity: TransactionLongevity::max_value(), + longevity: ReportLongevity::get(), propagate: false, }) ); @@ -644,10 +760,23 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { assert_ok!(::pre_dispatch(&inner)); // we submit the report - Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) - .unwrap(); + Babe::report_equivocation_unsigned( + Origin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) + .unwrap(); + + // the report should now be considered stale and the transaction is invalid. + // the check for staleness should be done on both `validate_unsigned` and on `pre_dispatch` + assert_err!( + ::validate_unsigned( + TransactionSource::Local, + &inner, + ), + InvalidTransaction::Stale, + ); - // the report should now be considered stale and the transaction is invalid assert_err!( ::pre_dispatch(&inner), InvalidTransaction::Stale, @@ -659,23 +788,19 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { fn report_equivocation_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. - assert!( - (1..=100) - .map(::WeightInfo::report_equivocation) - .collect::>() - .windows(2) - .all(|w| w[0] == w[1]) - ); + assert!((1..=100) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] == w[1])); // after 100 validators the weight should keep increasing // with every extra validator. - assert!( - (100..=1000) - .map(::WeightInfo::report_equivocation) - .collect::>() - .windows(2) - .all(|w| w[0] < w[1]) - ); + assert!((100..=1000) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] < w[1])); } #[test] @@ -689,20 +814,18 @@ fn valid_equivocation_reports_dont_pay_fees() { // generate an equivocation proof. let equivocation_proof = - generate_equivocation_proof(0, &offending_authority_pair, CurrentSlot::get()); + generate_equivocation_proof(0, &offending_authority_pair, CurrentSlot::::get()); // create the key ownership proof. - let key_owner_proof = Historical::prove(( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - )) - .unwrap(); + let key_owner_proof = + Historical::prove((sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public())) + .unwrap(); // check the dispatch info for the call. - let info = Call::::report_equivocation_unsigned( - equivocation_proof.clone(), - key_owner_proof.clone(), - ) + let info = Call::::report_equivocation_unsigned { + equivocation_proof: Box::new(equivocation_proof.clone()), + key_owner_proof: key_owner_proof.clone(), + } .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. @@ -712,7 +835,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // report the equivocation. let post_info = Babe::report_equivocation_unsigned( Origin::none(), - equivocation_proof.clone(), + Box::new(equivocation_proof.clone()), key_owner_proof.clone(), ) .unwrap(); @@ -724,14 +847,59 @@ fn valid_equivocation_reports_dont_pay_fees() { // report the equivocation again which is invalid now since it is // duplicate. - let post_info = - Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) - .err() - .unwrap() - .post_info; + let post_info = Babe::report_equivocation_unsigned( + Origin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) + .err() + .unwrap() + .post_info; // the fee is not waived and the original weight is kept. assert!(post_info.actual_weight.is_none()); assert_eq!(post_info.pays_fee, Pays::Yes); }) } + +#[test] +fn add_epoch_configurations_migration_works() { + use frame_support::storage::migration::{get_storage_value, put_storage_value}; + + impl crate::migrations::BabePalletPrefix for Test { + fn pallet_prefix() -> &'static str { + "Babe" + } + } + + new_test_ext(1).execute_with(|| { + let next_config_descriptor = + NextConfigDescriptor::V1 { c: (3, 4), allowed_slots: AllowedSlots::PrimarySlots }; + + put_storage_value(b"Babe", b"NextEpochConfig", &[], Some(next_config_descriptor.clone())); + + assert!(get_storage_value::>( + b"Babe", + b"NextEpochConfig", + &[], + ) + .is_some()); + + let current_epoch = BabeEpochConfiguration { + c: (1, 4), + allowed_slots: sp_consensus_babe::AllowedSlots::PrimarySlots, + }; + + crate::migrations::add_epoch_configuration::(current_epoch.clone()); + + assert!(get_storage_value::>( + b"Babe", + b"NextEpochConfig", + &[], + ) + .is_none()); + + assert_eq!(EpochConfig::::get(), Some(current_epoch)); + assert_eq!(PendingEpochConfigChange::::get(), Some(next_config_descriptor)); + }); +} diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 21c8abbc24a6c..2263387d6d8ef 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-balances" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,28 +13,31 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "max-encoded-len"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-transaction-payment = { version = "2.0.0", path = "../transaction-payment" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-transaction-payment = { version = "4.0.0-dev", path = "../transaction-payment" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-std/std", "sp-runtime/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/balances/README.md b/frame/balances/README.md index 4104fdc641975..93e424a89c721 100644 --- a/frame/balances/README.md +++ b/frame/balances/README.md @@ -2,9 +2,9 @@ The Balances module provides functionality for handling accounts and balances. -- [`balances::Trait`](https://docs.rs/pallet-balances/latest/pallet_balances/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-balances/latest/pallet_balances/enum.Call.html) -- [`Module`](https://docs.rs/pallet-balances/latest/pallet_balances/struct.Module.html) +- [`Config`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/trait.Config.html) +- [`Call`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/enum.Call.html) +- [`Pallet`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/struct.Pallet.html) ## Overview @@ -62,7 +62,7 @@ dealing with accounts that allow liquidity restrictions. - [`Imbalance`](https://docs.rs/frame-support/latest/frame_support/traits/trait.Imbalance.html): Functions for handling imbalances between total issuance in the system and account balances. Must be used when a function creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). -- [`IsDeadAccount`](https://docs.rs/frame-system/latest/frame_system/trait.IsDeadAccount.html): Determiner to say whether a +- [`IsDeadAccount`](https://docs.rs/frame-support/latest/frame_support/traits/trait.IsDeadAccount.html): Determiner to say whether a given account is unused. ## Interface @@ -83,8 +83,8 @@ The Contract module uses the `Currency` trait to handle gas payment, and its typ ```rust use frame_support::traits::Currency; -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; ``` @@ -93,11 +93,11 @@ The Staking module uses the `LockableCurrency` trait to lock a stash account's f ```rust use frame_support::traits::{WithdrawReasons, LockableCurrency}; use sp_runtime::traits::Bounded; -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { type Currency: LockableCurrency; } -fn update_ledger( +fn update_ledger( controller: &T::AccountId, ledger: &StakingLedger ) { @@ -113,10 +113,10 @@ fn update_ledger( ## Genesis config -The Balances module depends on the [`GenesisConfig`](https://docs.rs/pallet-balances/latest/pallet_balances/struct.GenesisConfig.html). +The Balances module depends on the [`GenesisConfig`](https://docs.rs/pallet-balances/latest/pallet_balances/pallet/struct.GenesisConfig.html). ## Assumptions -* Total issued balanced of all accounts should be less than `Trait::Balance::max_value()`. +* Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 21f43c7c63640..06d202ea37002 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,20 +21,19 @@ use super::*; +use frame_benchmarking::{ + account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelisted_caller, +}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; -use crate::Module as Balances; +use crate::Pallet as Balances; const SEED: u32 = 0; // existential deposit multiplier const ED_MULTIPLIER: u32 = 10; - -benchmarks! { - _ { } - +benchmarks_instance_pallet! { // Benchmark `transfer` extrinsic with the worst possible conditions: // * Transfer will kill the sender account. // * Transfer will create the recipient account. @@ -42,18 +41,19 @@ benchmarks! { let existential_deposit = T::ExistentialDeposit::get(); let caller = whitelisted_caller(); - // Give some multiple of the existential deposit + creation fee + transfer fee + // Give some multiple of the existential deposit let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. + // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, + // and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); - let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1.into(); + let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { - assert_eq!(Balances::::free_balance(&caller), Zero::zero()); - assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } // Benchmark `transfer` with the best possible condition: @@ -65,16 +65,16 @@ benchmarks! { let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds for transfer (their account will never reasonably be killed). - let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); + let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); // Give the recipient account existential deposit (thus their account already exists). let existential_deposit = T::ExistentialDeposit::get(); - let _ = as Currency<_>>::make_free_balance_be(&recipient, existential_deposit); + let _ = as Currency<_>>::make_free_balance_be(&recipient, existential_deposit); let transfer_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { - assert!(!Balances::::free_balance(&caller).is_zero()); - assert!(!Balances::::free_balance(&recipient).is_zero()); + assert!(!Balances::::free_balance(&caller).is_zero()); + assert!(!Balances::::free_balance(&recipient).is_zero()); } // Benchmark `transfer_keep_alive` with the worst possible condition: @@ -85,13 +85,13 @@ benchmarks! { let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds, thus a transfer will not kill account. - let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); + let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); let existential_deposit = T::ExistentialDeposit::get(); let transfer_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { - assert!(!Balances::::free_balance(&caller).is_zero()); - assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + assert!(!Balances::::free_balance(&caller).is_zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } // Benchmark `set_balance` coming from ROOT account. This always creates an account. @@ -102,11 +102,11 @@ benchmarks! { // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); let balance_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); + let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); }: set_balance(RawOrigin::Root, user_lookup, balance_amount, balance_amount) verify { - assert_eq!(Balances::::free_balance(&user), balance_amount); - assert_eq!(Balances::::reserved_balance(&user), balance_amount); + assert_eq!(Balances::::free_balance(&user), balance_amount); + assert_eq!(Balances::::reserved_balance(&user), balance_amount); } // Benchmark `set_balance` coming from ROOT account. This always kills an account. @@ -117,10 +117,10 @@ benchmarks! { // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); let balance_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); + let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); }: set_balance(RawOrigin::Root, user_lookup, Zero::zero(), Zero::zero()) verify { - assert!(Balances::::free_balance(&user).is_zero()); + assert!(Balances::::free_balance(&user).is_zero()); } // Benchmark `force_transfer` extrinsic with the worst possible conditions: @@ -131,66 +131,94 @@ benchmarks! { let source: T::AccountId = account("source", 0, SEED); let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); - // Give some multiple of the existential deposit + creation fee + transfer fee + // Give some multiple of the existential deposit let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - let _ = as Currency<_>>::make_free_balance_be(&source, balance); + let _ = as Currency<_>>::make_free_balance_be(&source, balance); // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); - let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1.into(); + let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: force_transfer(RawOrigin::Root, source_lookup, recipient_lookup, transfer_amount) verify { - assert_eq!(Balances::::free_balance(&source), Zero::zero()); - assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + assert_eq!(Balances::::free_balance(&source), Zero::zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } -} -#[cfg(test)] -mod tests { - use super::*; - use crate::tests_composite::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn transfer() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer::()); - }); - } + // This benchmark performs the same operation as `transfer` in the worst case scenario, + // but additionally introduces many new users into the storage, increasing the the merkle + // trie and PoV size. + #[extra] + transfer_increasing_users { + // 1_000 is not very much, but this upper bound can be controlled by the CLI. + let u in 0 .. 1_000; + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); - #[test] - fn transfer_best_case() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer_best_case::()); - }); - } + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - #[test] - fn transfer_keep_alive() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer_keep_alive::()); - }); + // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, + // and reap this user. + let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); + + // Create a bunch of users in storage. + for i in 0 .. u { + // The `account` function uses `blake2_256` to generate unique accounts, so these + // should be quite random and evenly distributed in the trie. + let new_user: T::AccountId = account("new_user", i, SEED); + let _ = as Currency<_>>::make_free_balance_be(&new_user, balance); + } + }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) + verify { + assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } - #[test] - fn transfer_set_balance_creating() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_set_balance_creating::()); - }); - } + // Benchmark `transfer_all` with the worst possible condition: + // * The recipient account is created + // * The sender is killed + transfer_all { + let caller = whitelisted_caller(); + let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); - #[test] - fn transfer_set_balance_killing() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_set_balance_killing::()); - }); + // Give some multiple of the existential deposit + let existential_deposit = T::ExistentialDeposit::get(); + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, false) + verify { + assert!(Balances::::free_balance(&caller).is_zero()); + assert_eq!(Balances::::free_balance(&recipient), balance); } - #[test] - fn force_transfer() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_force_transfer::()); - }); + force_unreserve { + let user: T::AccountId = account("user", 0, SEED); + let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + + // Give some multiple of the existential deposit + let existential_deposit = T::ExistentialDeposit::get(); + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + let _ = as Currency<_>>::make_free_balance_be(&user, balance); + + // Reserve the balance + as ReservableCurrency<_>>::reserve(&user, balance)?; + assert_eq!(Balances::::reserved_balance(&user), balance); + assert!(Balances::::free_balance(&user).is_zero()); + + }: _(RawOrigin::Root, user_lookup, balance) + verify { + assert!(Balances::::reserved_balance(&user).is_zero()); + assert_eq!(Balances::::free_balance(&user), balance); } } + +impl_benchmark_test_suite!( + Balances, + crate::tests_composite::ExtBuilder::default().build(), + crate::tests_composite::Test, +); diff --git a/frame/balances/src/default_weight.rs b/frame/balances/src/default_weight.rs deleted file mode 100644 index 47a9199600564..0000000000000 --- a/frame/balances/src/default_weight.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Weights for the Balances Pallet - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn transfer() -> Weight { - (65949000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn transfer_keep_alive() -> Weight { - (46665000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_balance_creating() -> Weight { - (27086000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_balance_killing() -> Weight { - (33424000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn force_transfer() -> Weight { - (65343000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } -} diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 422e112bdf276..afd2331c8e3cf 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Balances Module +//! # Balances Pallet //! -//! The Balances module provides functionality for handling accounts and balances. +//! The Balances pallet provides functionality for handling accounts and balances. //! -//! - [`balances::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Overview //! -//! The Balances module provides functions for: +//! The Balances pallet provides functions for: //! //! - Getting and setting free balances. //! - Retrieving total, reserved and unreserved balances. @@ -38,17 +38,18 @@ //! //! ### Terminology //! -//! - **Existential Deposit:** The minimum balance required to create or keep an account open. This prevents -//! "dust accounts" from filling storage. When the free plus the reserved balance (i.e. the total balance) -//! fall below this, then the account is said to be dead; and it loses its functionality as well as any -//! prior history and all information on it is removed from the chain's state. -//! No account should ever have a total balance that is strictly between 0 and the existential -//! deposit (exclusive). If this ever happens, it indicates either a bug in this module or an -//! erroneous raw mutation of storage. +//! - **Existential Deposit:** The minimum balance required to create or keep an account open. This +//! prevents "dust accounts" from filling storage. When the free plus the reserved balance (i.e. +//! the total balance) fall below this, then the account is said to be dead; and it loses its +//! functionality as well as any prior history and all information on it is removed from the +//! chain's state. No account should ever have a total balance that is strictly between 0 and the +//! existential deposit (exclusive). If this ever happens, it indicates either a bug in this +//! pallet or an erroneous raw mutation of storage. //! //! - **Total Issuance:** The total number of units in existence in a system. //! -//! - **Reaping an account:** The act of removing an account by resetting its nonce. Happens after its +//! - **Reaping an account:** The act of removing an account by resetting its nonce. Happens after +//! its //! total balance has become zero (or, strictly speaking, less than the Existential Deposit). //! //! - **Free Balance:** The portion of a balance that is not reserved. The free balance is the only @@ -57,30 +58,32 @@ //! - **Reserved Balance:** Reserved balance still belongs to the account holder, but is suspended. //! Reserved balance can still be slashed, but only after all the free balance has been slashed. //! -//! - **Imbalance:** A condition when some funds were credited or debited without equal and opposite accounting -//! (i.e. a difference between total issuance and account balances). Functions that result in an imbalance will -//! return an object of the `Imbalance` trait that can be managed within your runtime logic. (If an imbalance is -//! simply dropped, it should automatically maintain any book-keeping such as total issuance.) +//! - **Imbalance:** A condition when some funds were credited or debited without equal and opposite +//! accounting +//! (i.e. a difference between total issuance and account balances). Functions that result in an +//! imbalance will return an object of the `Imbalance` trait that can be managed within your runtime +//! logic. (If an imbalance is simply dropped, it should automatically maintain any book-keeping +//! such as total issuance.) //! -//! - **Lock:** A freeze on a specified amount of an account's free balance until a specified block number. Multiple +//! - **Lock:** A freeze on a specified amount of an account's free balance until a specified block +//! number. Multiple //! locks always operate over the same funds, so they "overlay" rather than "stack". //! //! ### Implementations //! -//! The Balances module provides implementations for the following traits. If these traits provide the functionality -//! that you need, then you can avoid coupling with the Balances module. +//! The Balances pallet provides implementations for the following traits. If these traits provide +//! the functionality that you need, then you can avoid coupling with the Balances pallet. //! -//! - [`Currency`](../frame_support/traits/trait.Currency.html): Functions for dealing with a +//! - [`Currency`](frame_support::traits::Currency): Functions for dealing with a //! fungible assets system. -//! - [`ReservableCurrency`](../frame_support/traits/trait.ReservableCurrency.html): +//! - [`ReservableCurrency`](frame_support::traits::ReservableCurrency): +//! - [`NamedReservableCurrency`](frame_support::traits::NamedReservableCurrency): //! Functions for dealing with assets that can be reserved from an account. -//! - [`LockableCurrency`](../frame_support/traits/trait.LockableCurrency.html): Functions for +//! - [`LockableCurrency`](frame_support::traits::LockableCurrency): Functions for //! dealing with accounts that allow liquidity restrictions. -//! - [`Imbalance`](../frame_support/traits/trait.Imbalance.html): Functions for handling -//! imbalances between total issuance in the system and account balances. Must be used when a function -//! creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). -//! - [`IsDeadAccount`](../frame_system/trait.IsDeadAccount.html): Determiner to say whether a -//! given account is unused. +//! - [`Imbalance`](frame_support::traits::Imbalance): Functions for handling +//! imbalances between total issuance in the system and account balances. Must be used when a +//! function creates new funds (e.g. a reward) or destroys some funds (e.g. a system fee). //! //! ## Interface //! @@ -91,40 +94,41 @@ //! //! ## Usage //! -//! The following examples show how to use the Balances module in your custom module. +//! The following examples show how to use the Balances pallet in your custom pallet. //! //! ### Examples from the FRAME //! -//! The Contract module uses the `Currency` trait to handle gas payment, and its types inherit from `Currency`: +//! The Contract pallet uses the `Currency` trait to handle gas payment, and its types inherit from +//! `Currency`: //! //! ``` //! use frame_support::traits::Currency; -//! # pub trait Trait: frame_system::Trait { +//! # pub trait Config: frame_system::Config { //! # type Currency: Currency; //! # } //! -//! pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -//! pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +//! pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +//! pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; //! //! # fn main() {} //! ``` //! -//! The Staking module uses the `LockableCurrency` trait to lock a stash account's funds: +//! The Staking pallet uses the `LockableCurrency` trait to lock a stash account's funds: //! //! ``` //! use frame_support::traits::{WithdrawReasons, LockableCurrency}; //! use sp_runtime::traits::Bounded; -//! pub trait Trait: frame_system::Trait { +//! pub trait Config: frame_system::Config { //! type Currency: LockableCurrency; //! } -//! # struct StakingLedger { -//! # stash: ::AccountId, -//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, +//! # struct StakingLedger { +//! # stash: ::AccountId, +//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, //! # phantom: std::marker::PhantomData, //! # } //! # const STAKING_ID: [u8; 8] = *b"staking "; //! -//! fn update_ledger( +//! fn update_ledger( //! controller: &T::AccountId, //! ledger: &StakingLedger //! ) { @@ -141,139 +145,346 @@ //! //! ## Genesis config //! -//! The Balances module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). +//! The Balances pallet depends on the [`GenesisConfig`]. //! //! ## Assumptions //! -//! * Total issued balanced of all accounts should be less than `Trait::Balance::max_value()`. +//! * Total issued balanced of all accounts should be less than `Config::Balance::max_value()`. #![cfg_attr(not(feature = "std"), no_std)] #[macro_use] mod tests; -mod tests_local; -mod tests_composite; mod benchmarking; -mod default_weight; +mod tests_composite; +mod tests_local; +mod tests_reentrancy; +pub mod weights; -use sp_std::prelude::*; -use sp_std::{cmp, result, mem, fmt::Debug, ops::BitOr, convert::Infallible}; -use codec::{Codec, Encode, Decode}; +pub use self::imbalances::{NegativeImbalance, PositiveImbalance}; +use codec::{Codec, Decode, Encode, MaxEncodedLen}; +#[cfg(feature = "std")] +use frame_support::traits::GenesisBuild; use frame_support::{ - StorageValue, Parameter, decl_event, decl_storage, decl_module, decl_error, ensure, - weights::Weight, + ensure, + pallet_prelude::DispatchResult, traits::{ - Currency, OnKilledAccount, OnUnbalanced, TryDrop, StoredMap, - WithdrawReason, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, - Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::KeepAlive, - ExistenceRequirement::AllowDeath, IsDeadAccount, BalanceStatus as Status, - } + tokens::{fungible, BalanceStatus as Status, DepositConsequence, WithdrawConsequence}, + Currency, ExistenceRequirement, + ExistenceRequirement::{AllowDeath, KeepAlive}, + Get, Imbalance, LockIdentifier, LockableCurrency, NamedReservableCurrency, OnUnbalanced, + ReservableCurrency, SignedImbalance, StoredMap, TryDrop, WithdrawReasons, + }, + WeakBoundedVec, }; +use frame_system as system; +use scale_info::TypeInfo; use sp_runtime::{ - RuntimeDebug, DispatchResult, DispatchError, traits::{ - Zero, AtLeast32BitUnsigned, StaticLookup, Member, CheckedAdd, CheckedSub, - MaybeSerializeDeserialize, Saturating, Bounded, + AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, + Saturating, StaticLookup, Zero, }, + ArithmeticError, DispatchError, RuntimeDebug, }; -use frame_system::{self as system, ensure_signed, ensure_root}; -pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; - -pub trait WeightInfo { - fn transfer() -> Weight; - fn transfer_keep_alive() -> Weight; - fn set_balance_creating() -> Weight; - fn set_balance_killing() -> Weight; - fn force_transfer() -> Weight; -} +use sp_std::{cmp, fmt::Debug, mem, ops::BitOr, prelude::*, result}; +pub use weights::WeightInfo; -pub trait Subtrait: frame_system::Trait { - /// The balance of an account. - type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + - MaybeSerializeDeserialize + Debug; +pub use pallet::*; - /// The minimum amount required to keep an account open. - type ExistentialDeposit: Get; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - /// The means of storing the balances of an account. - type AccountStore: StoredMap>; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The balance of an account. + type Balance: Parameter + + Member + + AtLeast32BitUnsigned + + Codec + + Default + + Copy + + MaybeSerializeDeserialize + + Debug + + MaxEncodedLen + + TypeInfo; + + /// Handler for the unbalanced reduction when removing a dust account. + type DustRemoval: OnUnbalanced>; + + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// Weight information for the extrinsics in this pallet. - type WeightInfo: WeightInfo; + /// The minimum amount required to keep an account open. + #[pallet::constant] + type ExistentialDeposit: Get; - /// The maximum number of locks that should exist on an account. - /// Not strictly enforced, but used for weight estimation. - type MaxLocks: Get; -} + /// The means of storing the balances of an account. + type AccountStore: StoredMap>; -pub trait Trait: frame_system::Trait { - /// The balance of an account. - type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + - MaybeSerializeDeserialize + Debug; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; - /// Handler for the unbalanced reduction when removing a dust account. - type DustRemoval: OnUnbalanced>; + /// The maximum number of locks that should exist on an account. + /// Not strictly enforced, but used for weight estimation. + #[pallet::constant] + type MaxLocks: Get; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The maximum number of named reserves that can exist on an account. + #[pallet::constant] + type MaxReserves: Get; - /// The minimum amount required to keep an account open. - type ExistentialDeposit: Get; + /// The id type for named reserves. + type ReserveIdentifier: Parameter + Member + MaxEncodedLen + Ord + Copy; + } - /// The means of storing the balances of an account. - type AccountStore: StoredMap>; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(PhantomData<(T, I)>); - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; + #[pallet::call] + impl, I: 'static> Pallet { + /// Transfer some liquid free balance to another account. + /// + /// `transfer` will set the `FreeBalance` of the sender and receiver. + /// It will decrease the total issuance of the system by the `TransferFee`. + /// If the sender's account is below the existential deposit as a result + /// of the transfer, the account will be reaped. + /// + /// The dispatch origin for this call must be `Signed` by the transactor. + /// + /// # + /// - Dependent on arguments but not critical, given proper implementations for input config + /// types. See related functions below. + /// - It contains a limited number of reads and writes internally and no complex + /// computation. + /// + /// Related functions: + /// + /// - `ensure_can_withdraw` is always called internally but has a bounded complexity. + /// - Transferring balances to accounts that did not exist before will cause + /// `T::OnNewAccount::on_new_account` to be called. + /// - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`. + /// - `transfer_keep_alive` works the same way as `transfer`, but has an additional check + /// that the transfer will not kill the origin account. + /// --------------------------------- + /// - Base Weight: 73.64 µs, worst case scenario (account created, account removed) + /// - DB Weight: 1 Read and 1 Write to destination account + /// - Origin account is already in memory, so no DB operations for them. + /// # + #[pallet::weight(T::WeightInfo::transfer())] + pub fn transfer( + origin: OriginFor, + dest: ::Source, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + let transactor = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer( + &transactor, + &dest, + value, + ExistenceRequirement::AllowDeath, + )?; + Ok(().into()) + } - /// The maximum number of locks that should exist on an account. - /// Not strictly enforced, but used for weight estimation. - type MaxLocks: Get; -} + /// Set the balances of a given account. + /// + /// This will alter `FreeBalance` and `ReservedBalance` in storage. it will + /// also decrease the total issuance of the system (`TotalIssuance`). + /// If the new free or reserved balance is below the existential deposit, + /// it will reset the account nonce (`frame_system::AccountNonce`). + /// + /// The dispatch origin for this call is `root`. + /// + /// # + /// - Independent of the arguments. + /// - Contains a limited number of reads and writes. + /// --------------------- + /// - Base Weight: + /// - Creating: 27.56 µs + /// - Killing: 35.11 µs + /// - DB Weight: 1 Read, 1 Write to `who` + /// # + #[pallet::weight( + T::WeightInfo::set_balance_creating() // Creates a new account. + .max(T::WeightInfo::set_balance_killing()) // Kills an existing account. + )] + pub fn set_balance( + origin: OriginFor, + who: ::Source, + #[pallet::compact] new_free: T::Balance, + #[pallet::compact] new_reserved: T::Balance, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + let existential_deposit = T::ExistentialDeposit::get(); -impl, I: Instance> Subtrait for T { - type Balance = T::Balance; - type ExistentialDeposit = T::ExistentialDeposit; - type AccountStore = T::AccountStore; - type WeightInfo = >::WeightInfo; - type MaxLocks = T::MaxLocks; -} + let wipeout = new_free + new_reserved < existential_deposit; + let new_free = if wipeout { Zero::zero() } else { new_free }; + let new_reserved = if wipeout { Zero::zero() } else { new_reserved }; + + let (free, reserved) = Self::mutate_account(&who, |account| { + if new_free > account.free { + mem::drop(PositiveImbalance::::new(new_free - account.free)); + } else if new_free < account.free { + mem::drop(NegativeImbalance::::new(account.free - new_free)); + } + + if new_reserved > account.reserved { + mem::drop(PositiveImbalance::::new(new_reserved - account.reserved)); + } else if new_reserved < account.reserved { + mem::drop(NegativeImbalance::::new(account.reserved - new_reserved)); + } -decl_event!( - pub enum Event where - ::AccountId, - >::Balance - { + account.free = new_free; + account.reserved = new_reserved; + + (account.free, account.reserved) + })?; + Self::deposit_event(Event::BalanceSet(who, free, reserved)); + Ok(().into()) + } + + /// Exactly as `transfer`, except the origin must be root and the source account may be + /// specified. + /// # + /// - Same as transfer, but additional read and write because the source account is not + /// assumed to be in the overlay. + /// # + #[pallet::weight(T::WeightInfo::force_transfer())] + pub fn force_transfer( + origin: OriginFor, + source: ::Source, + dest: ::Source, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let source = T::Lookup::lookup(source)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer( + &source, + &dest, + value, + ExistenceRequirement::AllowDeath, + )?; + Ok(().into()) + } + + /// Same as the [`transfer`] call, but with a check that the transfer will not kill the + /// origin account. + /// + /// 99% of the time you want [`transfer`] instead. + /// + /// [`transfer`]: struct.Pallet.html#method.transfer + /// # + /// - Cheaper than transfer because account cannot be killed. + /// - Base Weight: 51.4 µs + /// - DB Weight: 1 Read and 1 Write to dest (sender is in overlay already) + /// # + #[pallet::weight(T::WeightInfo::transfer_keep_alive())] + pub fn transfer_keep_alive( + origin: OriginFor, + dest: ::Source, + #[pallet::compact] value: T::Balance, + ) -> DispatchResultWithPostInfo { + let transactor = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&transactor, &dest, value, KeepAlive)?; + Ok(().into()) + } + + /// Transfer the entire transferable balance from the caller account. + /// + /// NOTE: This function only attempts to transfer _transferable_ balances. This means that + /// any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be + /// transferred by this function. To ensure that this function results in a killed account, + /// you might need to prepare the account by removing any reference counters, storage + /// deposits, etc... + /// + /// The dispatch origin of this call must be Signed. + /// + /// - `dest`: The recipient of the transfer. + /// - `keep_alive`: A boolean to determine if the `transfer_all` operation should send all + /// of the funds the account has, causing the sender account to be killed (false), or + /// transfer everything except at least the existential deposit, which will guarantee to + /// keep the sender account alive (true). # + /// - O(1). Just like transfer, but reading the user's transferable balance first. + /// # + #[pallet::weight(T::WeightInfo::transfer_all())] + pub fn transfer_all( + origin: OriginFor, + dest: ::Source, + keep_alive: bool, + ) -> DispatchResult { + use fungible::Inspect; + let transactor = ensure_signed(origin)?; + let reducible_balance = Self::reducible_balance(&transactor, keep_alive); + let dest = T::Lookup::lookup(dest)?; + let keep_alive = if keep_alive { KeepAlive } else { AllowDeath }; + >::transfer( + &transactor, + &dest, + reducible_balance, + keep_alive.into(), + )?; + Ok(()) + } + + /// Unreserve some balance from a user by force. + /// + /// Can only be called by ROOT. + #[pallet::weight(T::WeightInfo::force_unreserve())] + pub fn force_unreserve( + origin: OriginFor, + who: ::Source, + amount: T::Balance, + ) -> DispatchResult { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + let _leftover = >::unreserve(&who, amount); + Ok(()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { /// An account was created with some free balance. \[account, free_balance\] - Endowed(AccountId, Balance), + Endowed(T::AccountId, T::Balance), /// An account was removed whose balance was non-zero but below ExistentialDeposit, /// resulting in an outright loss. \[account, balance\] - DustLost(AccountId, Balance), + DustLost(T::AccountId, T::Balance), /// Transfer succeeded. \[from, to, value\] - Transfer(AccountId, AccountId, Balance), + Transfer(T::AccountId, T::AccountId, T::Balance), /// A balance was set by root. \[who, free, reserved\] - BalanceSet(AccountId, Balance, Balance), + BalanceSet(T::AccountId, T::Balance, T::Balance), /// Some amount was deposited (e.g. for transaction fees). \[who, deposit\] - Deposit(AccountId, Balance), + Deposit(T::AccountId, T::Balance), /// Some balance was reserved (moved from free to reserved). \[who, value\] - Reserved(AccountId, Balance), + Reserved(T::AccountId, T::Balance), /// Some balance was unreserved (moved from reserved to free). \[who, value\] - Unreserved(AccountId, Balance), + Unreserved(T::AccountId, T::Balance), /// Some balance was moved from the reserve of the first account to the second account. /// Final argument indicates the destination balance type. /// \[from, to, balance, destination_status\] - ReserveRepatriated(AccountId, AccountId, Balance, Status), + ReserveRepatriated(T::AccountId, T::AccountId, T::Balance, Status), } -); -decl_error! { - pub enum Error for Module, I: Instance> { + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { /// Vesting balance too high to send value VestingBalance, /// Account liquidity restrictions prevent withdrawal LiquidityRestrictions, - /// Got an overflow after adding - Overflow, /// Balance too low to send value InsufficientBalance, /// Value too low to create account due to existential deposit @@ -284,11 +495,128 @@ decl_error! { ExistingVestingSchedule, /// Beneficiary account must pre-exist DeadAccount, + /// Number of named reserves exceed MaxReserves + TooManyReserves, + } + + /// The total units issued in the system. + #[pallet::storage] + #[pallet::getter(fn total_issuance)] + pub type TotalIssuance, I: 'static = ()> = StorageValue<_, T::Balance, ValueQuery>; + + /// The balance of an account. + /// + /// NOTE: This is only used in the case that this pallet is used to store balances. + #[pallet::storage] + pub type Account, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + AccountData, + ValueQuery, + GetDefault, + ConstU32<300_000>, + >; + + /// Any liquidity locks on some account balances. + /// NOTE: Should only be accessed when setting, changing and freeing a lock. + #[pallet::storage] + #[pallet::getter(fn locks)] + pub type Locks, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + WeakBoundedVec, T::MaxLocks>, + ValueQuery, + GetDefault, + ConstU32<300_000>, + >; + + /// Named reserves on some account balances. + #[pallet::storage] + #[pallet::getter(fn reserves)] + pub type Reserves, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + BoundedVec, T::MaxReserves>, + ValueQuery, + >; + + /// Storage version of the pallet. + /// + /// This is set to v2.0.0 for new networks. + #[pallet::storage] + pub(super) type StorageVersion, I: 'static = ()> = + StorageValue<_, Releases, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + pub balances: Vec<(T::AccountId, T::Balance)>, + } + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { balances: Default::default() } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + let total = self.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n); + >::put(total); + + >::put(Releases::V2_0_0); + + for (_, balance) in &self.balances { + assert!( + *balance >= >::ExistentialDeposit::get(), + "the balance of any account should always be at least the existential deposit.", + ) + } + + // ensure no duplicates exist. + let endowed_accounts = self + .balances + .iter() + .map(|(x, _)| x) + .cloned() + .collect::>(); + + assert!( + endowed_accounts.len() == self.balances.len(), + "duplicate balances in genesis." + ); + + for &(ref who, free) in self.balances.iter() { + assert!(T::AccountStore::insert(who, AccountData { free, ..Default::default() }) + .is_ok()); + } + } + } +} + +#[cfg(feature = "std")] +impl, I: 'static> GenesisConfig { + /// Direct implementation of `GenesisBuild::build_storage`. + /// + /// Kept in order not to break dependency. + pub fn build_storage(&self) -> Result { + >::build_storage(self) + } + + /// Direct implementation of `GenesisBuild::assimilate_storage`. + /// + /// Kept in order not to break dependency. + pub fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { + >::assimilate_storage(self, storage) } } /// Simplified reasons for withdrawing balance. -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] pub enum Reasons { /// Paying system transaction fees. Fee = 0, @@ -300,9 +628,9 @@ pub enum Reasons { impl From for Reasons { fn from(r: WithdrawReasons) -> Reasons { - if r == WithdrawReasons::from(WithdrawReason::TransactionPayment) { + if r == WithdrawReasons::from(WithdrawReasons::TRANSACTION_PAYMENT) { Reasons::Fee - } else if r.contains(WithdrawReason::TransactionPayment) { + } else if r.contains(WithdrawReasons::TRANSACTION_PAYMENT) { Reasons::All } else { Reasons::Misc @@ -313,14 +641,16 @@ impl From for Reasons { impl BitOr for Reasons { type Output = Reasons; fn bitor(self, other: Reasons) -> Reasons { - if self == other { return self } + if self == other { + return self + } Reasons::All } } /// A single lock on a balance. There can be many of these on an account and they "overlap", so the /// same balance is frozen by multiple locks. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] pub struct BalanceLock { /// An identifier for this lock. Only one lock may be in existence for each identifier. pub id: LockIdentifier, @@ -330,8 +660,17 @@ pub struct BalanceLock { pub reasons: Reasons, } +/// Store named reserved balance. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct ReserveData { + /// The identifier for the named reserve. + pub id: ReserveIdentifier, + /// The amount of the named reserve. + pub amount: Balance, +} + /// All balance information for an account. -#[derive(Encode, Decode, Clone, PartialEq, Eq, Default, RuntimeDebug)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, Default, RuntimeDebug, MaxEncodedLen, TypeInfo)] pub struct AccountData { /// Non-reserved part of the balance. There may still be restrictions on this, but it is the /// total pool what may in principle be transferred, reserved and used for tipping. @@ -345,6 +684,7 @@ pub struct AccountData { /// /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens /// that are still 'owned' by the account holder, but which are suspendable. + /// This includes named reserve and unnamed reserve. pub reserved: Balance, /// The amount that `free` may not drop below when withdrawing for *anything except transaction /// fee payment*. @@ -377,7 +717,7 @@ impl AccountData { // A value placed in storage that represents the current version of the Balances storage. // This value is used by the `on_runtime_upgrade` logic to determine whether we run // storage migration logic. This should match directly with the semantic versions of the Rust crate. -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] enum Releases { V1_0_0, V2_0_0, @@ -389,193 +729,20 @@ impl Default for Releases { } } -decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Balances { - /// The total units issued in the system. - pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig| { - config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n) - }): T::Balance; - - /// The balance of an account. - /// - /// NOTE: This is only used in the case that this module is used to store balances. - pub Account: map hasher(blake2_128_concat) T::AccountId => AccountData; - - /// Any liquidity locks on some account balances. - /// NOTE: Should only be accessed when setting, changing and freeing a lock. - pub Locks get(fn locks): map hasher(blake2_128_concat) T::AccountId => Vec>; - - /// Storage version of the pallet. - /// - /// This is set to v2.0.0 for new networks. - StorageVersion build(|_: &GenesisConfig| Releases::V2_0_0): Releases; - } - add_extra_genesis { - config(balances): Vec<(T::AccountId, T::Balance)>; - // ^^ begin, length, amount liquid at genesis - build(|config: &GenesisConfig| { - for (_, balance) in &config.balances { - assert!( - *balance >= >::ExistentialDeposit::get(), - "the balance of any account should always be more than existential deposit.", - ) - } - for &(ref who, free) in config.balances.iter() { - T::AccountStore::insert(who, AccountData { free, .. Default::default() }); - } - }); - } -} - -decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - type Error = Error; - - /// The minimum amount required to keep an account open. - const ExistentialDeposit: T::Balance = T::ExistentialDeposit::get(); - - fn deposit_event() = default; - - /// Transfer some liquid free balance to another account. - /// - /// `transfer` will set the `FreeBalance` of the sender and receiver. - /// It will decrease the total issuance of the system by the `TransferFee`. - /// If the sender's account is below the existential deposit as a result - /// of the transfer, the account will be reaped. - /// - /// The dispatch origin for this call must be `Signed` by the transactor. - /// - /// # - /// - Dependent on arguments but not critical, given proper implementations for - /// input config types. See related functions below. - /// - It contains a limited number of reads and writes internally and no complex computation. - /// - /// Related functions: - /// - /// - `ensure_can_withdraw` is always called internally but has a bounded complexity. - /// - Transferring balances to accounts that did not exist before will cause - /// `T::OnNewAccount::on_new_account` to be called. - /// - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`. - /// - `transfer_keep_alive` works the same way as `transfer`, but has an additional - /// check that the transfer will not kill the origin account. - /// --------------------------------- - /// - Base Weight: 73.64 µs, worst case scenario (account created, account removed) - /// - DB Weight: 1 Read and 1 Write to destination account - /// - Origin account is already in memory, so no DB operations for them. - /// # - #[weight = T::WeightInfo::transfer()] - pub fn transfer( - origin, - dest: ::Source, - #[compact] value: T::Balance - ) { - let transactor = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?; - } - - /// Set the balances of a given account. - /// - /// This will alter `FreeBalance` and `ReservedBalance` in storage. it will - /// also decrease the total issuance of the system (`TotalIssuance`). - /// If the new free or reserved balance is below the existential deposit, - /// it will reset the account nonce (`frame_system::AccountNonce`). - /// - /// The dispatch origin for this call is `root`. - /// - /// # - /// - Independent of the arguments. - /// - Contains a limited number of reads and writes. - /// --------------------- - /// - Base Weight: - /// - Creating: 27.56 µs - /// - Killing: 35.11 µs - /// - DB Weight: 1 Read, 1 Write to `who` - /// # - #[weight = T::WeightInfo::set_balance_creating() // Creates a new account. - .max(T::WeightInfo::set_balance_killing()) // Kills an existing account. - ] - fn set_balance( - origin, - who: ::Source, - #[compact] new_free: T::Balance, - #[compact] new_reserved: T::Balance - ) { - ensure_root(origin)?; - let who = T::Lookup::lookup(who)?; - let existential_deposit = T::ExistentialDeposit::get(); - - let wipeout = new_free + new_reserved < existential_deposit; - let new_free = if wipeout { Zero::zero() } else { new_free }; - let new_reserved = if wipeout { Zero::zero() } else { new_reserved }; - - let (free, reserved) = Self::mutate_account(&who, |account| { - if new_free > account.free { - mem::drop(PositiveImbalance::::new(new_free - account.free)); - } else if new_free < account.free { - mem::drop(NegativeImbalance::::new(account.free - new_free)); - } - - if new_reserved > account.reserved { - mem::drop(PositiveImbalance::::new(new_reserved - account.reserved)); - } else if new_reserved < account.reserved { - mem::drop(NegativeImbalance::::new(account.reserved - new_reserved)); - } - - account.free = new_free; - account.reserved = new_reserved; - - (account.free, account.reserved) - }); - Self::deposit_event(RawEvent::BalanceSet(who, free, reserved)); - } - - /// Exactly as `transfer`, except the origin must be root and the source account may be - /// specified. - /// # - /// - Same as transfer, but additional read and write because the source account is - /// not assumed to be in the overlay. - /// # - #[weight = T::WeightInfo::force_transfer()] - pub fn force_transfer( - origin, - source: ::Source, - dest: ::Source, - #[compact] value: T::Balance - ) { - ensure_root(origin)?; - let source = T::Lookup::lookup(source)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?; - } +pub struct DustCleaner, I: 'static = ()>( + Option<(T::AccountId, NegativeImbalance)>, +); - /// Same as the [`transfer`] call, but with a check that the transfer will not kill the - /// origin account. - /// - /// 99% of the time you want [`transfer`] instead. - /// - /// [`transfer`]: struct.Module.html#method.transfer - /// # - /// - Cheaper than transfer because account cannot be killed. - /// - Base Weight: 51.4 µs - /// - DB Weight: 1 Read and 1 Write to dest (sender is in overlay already) - /// # - #[weight = T::WeightInfo::transfer_keep_alive()] - pub fn transfer_keep_alive( - origin, - dest: ::Source, - #[compact] value: T::Balance - ) { - let transactor = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&transactor, &dest, value, KeepAlive)?; +impl, I: 'static> Drop for DustCleaner { + fn drop(&mut self) { + if let Some((who, dust)) = self.0.take() { + Pallet::::deposit_event(Event::DustLost(who, dust.peek())); + T::DustRemoval::on_unbalanced(dust); } } } -impl, I: Instance> Module { - // PRIVATE MUTABLES - +impl, I: 'static> Pallet { /// Get the free balance of an account. pub fn free_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { Self::account(who.borrow()).free @@ -603,82 +770,192 @@ impl, I: Instance> Module { T::AccountStore::get(&who) } - /// Places the `free` and `reserved` parts of `new` into `account`. Also does any steps needed - /// after mutating an account. This includes DustRemoval unbalancing, in the case than the `new` - /// account's total balance is non-zero but below ED. + /// Handles any steps needed after mutating an account. + /// + /// This includes DustRemoval unbalancing, in the case than the `new` account's total balance + /// is non-zero but below ED. /// - /// Returns the final free balance, iff the account was previously of total balance zero, known - /// as its "endowment". + /// Returns two values: + /// - `Some` containing the the `new` account, iff the account has sufficient balance. + /// - `Some` containing the dust to be dropped, iff some dust should be dropped. fn post_mutation( - who: &T::AccountId, + _who: &T::AccountId, new: AccountData, - ) -> Option> { + ) -> (Option>, Option>) { let total = new.total(); if total < T::ExistentialDeposit::get() { - if !total.is_zero() { - T::DustRemoval::on_unbalanced(NegativeImbalance::new(total)); - Self::deposit_event(RawEvent::DustLost(who.clone(), total)); + if total.is_zero() { + (None, None) + } else { + (None, Some(NegativeImbalance::new(total))) + } + } else { + (Some(new), None) + } + } + + fn deposit_consequence( + _who: &T::AccountId, + amount: T::Balance, + account: &AccountData, + ) -> DepositConsequence { + if amount.is_zero() { + return DepositConsequence::Success + } + + if TotalIssuance::::get().checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + + let new_total_balance = match account.total().checked_add(&amount) { + Some(x) => x, + None => return DepositConsequence::Overflow, + }; + + if new_total_balance < T::ExistentialDeposit::get() { + return DepositConsequence::BelowMinimum + } + + // NOTE: We assume that we are a provider, so don't need to do any checks in the + // case of account creation. + + DepositConsequence::Success + } + + fn withdraw_consequence( + who: &T::AccountId, + amount: T::Balance, + account: &AccountData, + ) -> WithdrawConsequence { + if amount.is_zero() { + return WithdrawConsequence::Success + } + + if TotalIssuance::::get().checked_sub(&amount).is_none() { + return WithdrawConsequence::Underflow + } + + let new_total_balance = match account.total().checked_sub(&amount) { + Some(x) => x, + None => return WithdrawConsequence::NoFunds, + }; + + // Provider restriction - total account balance cannot be reduced to zero if it cannot + // sustain the loss of a provider reference. + // NOTE: This assumes that the pallet is a provider (which is true). Is this ever changes, + // then this will need to adapt accordingly. + let ed = T::ExistentialDeposit::get(); + let success = if new_total_balance < ed { + if frame_system::Pallet::::can_dec_provider(who) { + WithdrawConsequence::ReducedToZero(new_total_balance) + } else { + return WithdrawConsequence::WouldDie } - None } else { - Some(new) + WithdrawConsequence::Success + }; + + // Enough free funds to have them be reduced. + let new_free_balance = match account.free.checked_sub(&amount) { + Some(b) => b, + None => return WithdrawConsequence::NoFunds, + }; + + // Eventual free funds must be no less than the frozen balance. + let min_balance = account.frozen(Reasons::All); + if new_free_balance < min_balance { + return WithdrawConsequence::Frozen } + + success + } + + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce + /// `ExistentialDeposit` law, annulling the account as needed. + /// + /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used + /// when it is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + pub fn mutate_account( + who: &T::AccountId, + f: impl FnOnce(&mut AccountData) -> R, + ) -> Result { + Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) } /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce - /// `ExistentialDeposit` law, annulling the account as needed. + /// `ExistentialDeposit` law, annulling the account as needed. This will do nothing if the + /// result of `f` is an `Err`. /// /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used /// when it is known that the account already exists. /// /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that /// the caller will do this. - pub fn mutate_account( + fn try_mutate_account>( who: &T::AccountId, - f: impl FnOnce(&mut AccountData) -> R - ) -> R { - Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) - .expect("Error is infallible; qed") + f: impl FnOnce(&mut AccountData, bool) -> Result, + ) -> Result { + Self::try_mutate_account_with_dust(who, f).map(|(result, dust_cleaner)| { + drop(dust_cleaner); + result + }) } /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce /// `ExistentialDeposit` law, annulling the account as needed. This will do nothing if the /// result of `f` is an `Err`. /// + /// It returns both the result from the closure, and an optional `DustCleaner` instance which + /// should be dropped once it is known that all nested mutates that could affect storage items + /// what the dust handler touches have completed. + /// /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used /// when it is known that the account already exists. /// /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that /// the caller will do this. - fn try_mutate_account( + fn try_mutate_account_with_dust>( who: &T::AccountId, - f: impl FnOnce(&mut AccountData, bool) -> Result - ) -> Result { - T::AccountStore::try_mutate_exists(who, |maybe_account| { + f: impl FnOnce(&mut AccountData, bool) -> Result, + ) -> Result<(R, DustCleaner), E> { + let result = T::AccountStore::try_mutate_exists(who, |maybe_account| { let is_new = maybe_account.is_none(); let mut account = maybe_account.take().unwrap_or_default(); f(&mut account, is_new).map(move |result| { let maybe_endowed = if is_new { Some(account.free) } else { None }; - *maybe_account = Self::post_mutation(who, account); - (maybe_endowed, result) + let maybe_account_maybe_dust = Self::post_mutation(who, account); + *maybe_account = maybe_account_maybe_dust.0; + (maybe_endowed, maybe_account_maybe_dust.1, result) }) - }).map(|(maybe_endowed, result)| { + }); + result.map(|(maybe_endowed, maybe_dust, result)| { if let Some(endowed) = maybe_endowed { - Self::deposit_event(RawEvent::Endowed(who.clone(), endowed)); + Self::deposit_event(Event::Endowed(who.clone(), endowed)); } - result + let dust_cleaner = DustCleaner(maybe_dust.map(|dust| (who.clone(), dust))); + (result, dust_cleaner) }) } /// Update the account entry for `who`, given the locks. fn update_locks(who: &T::AccountId, locks: &[BalanceLock]) { + let bounded_locks = WeakBoundedVec::<_, T::MaxLocks>::force_from( + locks.to_vec(), + Some("Balances Update Locks"), + ); + if locks.len() as u32 > T::MaxLocks::get() { - frame_support::debug::warn!( + log::warn!( + target: "runtime::balances", "Warning: A user has more currency locks than expected. \ A runtime configuration adjustment may be needed." ); } - Self::mutate_account(who, |b| { + // No way this can fail since we do not alter the existential balances. + let res = Self::mutate_account(who, |b| { b.misc_frozen = Zero::zero(); b.fee_frozen = Zero::zero(); for l in locks.iter() { @@ -690,6 +967,7 @@ impl, I: Instance> Module { } } }); + debug_assert!(res.is_ok()); let existed = Locks::::contains_key(who); if locks.is_empty() { @@ -697,32 +975,257 @@ impl, I: Instance> Module { if existed { // TODO: use Locks::::hashed_key // https://github.com/paritytech/substrate/issues/4969 - system::Module::::dec_ref(who); + system::Pallet::::dec_consumers(who); } } else { - Locks::::insert(who, locks); + Locks::::insert(who, bounded_locks); if !existed { - system::Module::::inc_ref(who); + if system::Pallet::::inc_consumers(who).is_err() { + // No providers for the locks. This is impossible under normal circumstances + // since the funds that are under the lock will themselves be stored in the + // account and therefore will need a reference. + log::warn!( + target: "runtime::balances", + "Warning: Attempt to introduce lock consumer reference, yet no providers. \ + This is unexpected but should be safe." + ); + } + } + } + } + + /// Move the reserved balance of one account into the balance of another, according to `status`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + fn do_transfer_reserved( + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: T::Balance, + best_effort: bool, + status: Status, + ) -> Result { + if value.is_zero() { + return Ok(Zero::zero()) + } + + if slashed == beneficiary { + return match status { + Status::Free => Ok(Self::unreserve(slashed, value)), + Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), } } + + let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( + beneficiary, + |to_account, is_new| -> Result<(T::Balance, DustCleaner), DispatchError> { + ensure!(!is_new, Error::::DeadAccount); + Self::try_mutate_account_with_dust( + slashed, + |from_account, _| -> Result { + let actual = cmp::min(from_account.reserved, value); + ensure!(best_effort || actual == value, Error::::InsufficientBalance); + match status { + Status::Free => + to_account.free = to_account + .free + .checked_add(&actual) + .ok_or(ArithmeticError::Overflow)?, + Status::Reserved => + to_account.reserved = to_account + .reserved + .checked_add(&actual) + .ok_or(ArithmeticError::Overflow)?, + } + from_account.reserved -= actual; + Ok(actual) + }, + ) + }, + )?; + + Self::deposit_event(Event::ReserveRepatriated( + slashed.clone(), + beneficiary.clone(), + actual, + status, + )); + Ok(actual) + } +} + +impl, I: 'static> fungible::Inspect for Pallet { + type Balance = T::Balance; + + fn total_issuance() -> Self::Balance { + TotalIssuance::::get() + } + fn minimum_balance() -> Self::Balance { + T::ExistentialDeposit::get() + } + fn balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).total() + } + fn reducible_balance(who: &T::AccountId, keep_alive: bool) -> Self::Balance { + let a = Self::account(who); + // Liquid balance is what is neither reserved nor locked/frozen. + let liquid = a.free.saturating_sub(a.fee_frozen.max(a.misc_frozen)); + if frame_system::Pallet::::can_dec_provider(who) && !keep_alive { + liquid + } else { + // `must_remain_to_exist` is the part of liquid balance which must remain to keep total + // over ED. + let must_remain_to_exist = + T::ExistentialDeposit::get().saturating_sub(a.total() - liquid); + liquid.saturating_sub(must_remain_to_exist) + } + } + fn can_deposit(who: &T::AccountId, amount: Self::Balance) -> DepositConsequence { + Self::deposit_consequence(who, amount, &Self::account(who)) + } + fn can_withdraw( + who: &T::AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { + Self::withdraw_consequence(who, amount, &Self::account(who)) + } +} + +impl, I: 'static> fungible::Mutate for Pallet { + fn mint_into(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + if amount.is_zero() { + return Ok(()) + } + Self::try_mutate_account(who, |account, _is_new| -> DispatchResult { + Self::deposit_consequence(who, amount, &account).into_result()?; + account.free += amount; + Ok(()) + })?; + TotalIssuance::::mutate(|t| *t += amount); + Ok(()) + } + + fn burn_from( + who: &T::AccountId, + amount: Self::Balance, + ) -> Result { + if amount.is_zero() { + return Ok(Self::Balance::zero()) + } + let actual = Self::try_mutate_account( + who, + |account, _is_new| -> Result { + let extra = Self::withdraw_consequence(who, amount, &account).into_result()?; + let actual = amount + extra; + account.free -= actual; + Ok(actual) + }, + )?; + TotalIssuance::::mutate(|t| *t -= actual); + Ok(actual) + } +} + +impl, I: 'static> fungible::Transfer for Pallet { + fn transfer( + source: &T::AccountId, + dest: &T::AccountId, + amount: T::Balance, + keep_alive: bool, + ) -> Result { + let er = if keep_alive { KeepAlive } else { AllowDeath }; + >::transfer(source, dest, amount, er).map(|_| amount) + } +} + +impl, I: 'static> fungible::Unbalanced for Pallet { + fn set_balance(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + Self::mutate_account(who, |account| account.free = amount)?; + Ok(()) + } + + fn set_total_issuance(amount: Self::Balance) { + TotalIssuance::::mutate(|t| *t = amount); + } +} + +impl, I: 'static> fungible::InspectHold for Pallet { + fn balance_on_hold(who: &T::AccountId) -> T::Balance { + Self::account(who).reserved + } + fn can_hold(who: &T::AccountId, amount: T::Balance) -> bool { + let a = Self::account(who); + let min_balance = T::ExistentialDeposit::get().max(a.frozen(Reasons::All)); + if a.reserved.checked_add(&amount).is_none() { + return false + } + // We require it to be min_balance + amount to ensure that the full reserved funds may be + // slashed without compromising locked funds or destroying the account. + let required_free = match min_balance.checked_add(&amount) { + Some(x) => x, + None => return false, + }; + a.free >= required_free + } +} +impl, I: 'static> fungible::MutateHold for Pallet { + fn hold(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + if amount.is_zero() { + return Ok(()) + } + ensure!(Self::can_reserve(who, amount), Error::::InsufficientBalance); + Self::mutate_account(who, |a| { + a.free -= amount; + a.reserved += amount; + })?; + Ok(()) + } + fn release( + who: &T::AccountId, + amount: Self::Balance, + best_effort: bool, + ) -> Result { + if amount.is_zero() { + return Ok(amount) + } + // Done on a best-effort basis. + Self::try_mutate_account(who, |a, _| { + let new_free = a.free.saturating_add(amount.min(a.reserved)); + let actual = new_free - a.free; + ensure!(best_effort || actual == amount, Error::::InsufficientBalance); + // ^^^ Guaranteed to be <= amount and <= a.reserved + a.free = new_free; + a.reserved = a.reserved.saturating_sub(actual.clone()); + Ok(actual) + }) + } + fn transfer_held( + source: &T::AccountId, + dest: &T::AccountId, + amount: Self::Balance, + best_effort: bool, + on_hold: bool, + ) -> Result { + let status = if on_hold { Status::Reserved } else { Status::Free }; + Self::do_transfer_reserved(source, dest, amount, best_effort, status) } } // wrapping these imbalances in a private module is necessary to ensure absolute privacy // of the inner member. mod imbalances { - use super::{ - result, Subtrait, DefaultInstance, Imbalance, Trait, Zero, Instance, Saturating, - StorageValue, TryDrop, - }; + use super::{result, Config, Imbalance, RuntimeDebug, Saturating, TryDrop, Zero}; + use frame_support::traits::SameOrOther; use sp_std::mem; /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been created without any equal and opposite accounting. #[must_use] - pub struct PositiveImbalance, I: Instance=DefaultInstance>(T::Balance); + #[derive(RuntimeDebug, PartialEq, Eq)] + pub struct PositiveImbalance, I: 'static = ()>(T::Balance); - impl, I: Instance> PositiveImbalance { + impl, I: 'static> PositiveImbalance { /// Create a new positive imbalance from a balance. pub fn new(amount: T::Balance) -> Self { PositiveImbalance(amount) @@ -732,22 +1235,29 @@ mod imbalances { /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been destroyed without any equal and opposite accounting. #[must_use] - pub struct NegativeImbalance, I: Instance=DefaultInstance>(T::Balance); + #[derive(RuntimeDebug, PartialEq, Eq)] + pub struct NegativeImbalance, I: 'static = ()>(T::Balance); - impl, I: Instance> NegativeImbalance { + impl, I: 'static> NegativeImbalance { /// Create a new negative imbalance from a balance. pub fn new(amount: T::Balance) -> Self { NegativeImbalance(amount) } } - impl, I: Instance> TryDrop for PositiveImbalance { + impl, I: 'static> TryDrop for PositiveImbalance { fn try_drop(self) -> result::Result<(), Self> { self.drop_zero() } } - impl, I: Instance> Imbalance for PositiveImbalance { + impl, I: 'static> Default for PositiveImbalance { + fn default() -> Self { + Self::zero() + } + } + + impl, I: 'static> Imbalance for PositiveImbalance { type Opposite = NegativeImbalance; fn zero() -> Self { @@ -777,14 +1287,16 @@ mod imbalances { self.0 = self.0.saturating_add(other.0); mem::forget(other); } - fn offset(self, other: Self::Opposite) -> result::Result { + fn offset(self, other: Self::Opposite) -> SameOrOther { let (a, b) = (self.0, other.0); mem::forget((self, other)); - if a >= b { - Ok(Self(a - b)) + if a > b { + SameOrOther::Same(Self(a - b)) + } else if b > a { + SameOrOther::Other(NegativeImbalance::new(b - a)) } else { - Err(NegativeImbalance::new(b - a)) + SameOrOther::None } } fn peek(&self) -> T::Balance { @@ -792,13 +1304,19 @@ mod imbalances { } } - impl, I: Instance> TryDrop for NegativeImbalance { + impl, I: 'static> TryDrop for NegativeImbalance { fn try_drop(self) -> result::Result<(), Self> { self.drop_zero() } } - impl, I: Instance> Imbalance for NegativeImbalance { + impl, I: 'static> Default for NegativeImbalance { + fn default() -> Self { + Self::zero() + } + } + + impl, I: 'static> Imbalance for NegativeImbalance { type Opposite = PositiveImbalance; fn zero() -> Self { @@ -828,14 +1346,16 @@ mod imbalances { self.0 = self.0.saturating_add(other.0); mem::forget(other); } - fn offset(self, other: Self::Opposite) -> result::Result { + fn offset(self, other: Self::Opposite) -> SameOrOther { let (a, b) = (self.0, other.0); mem::forget((self, other)); - if a >= b { - Ok(Self(a - b)) + if a > b { + SameOrOther::Same(Self(a - b)) + } else if b > a { + SameOrOther::Other(PositiveImbalance::new(b - a)) } else { - Err(PositiveImbalance::new(b - a)) + SameOrOther::None } } fn peek(&self) -> T::Balance { @@ -843,83 +1363,24 @@ mod imbalances { } } - impl, I: Instance> Drop for PositiveImbalance { + impl, I: 'static> Drop for PositiveImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { - , I>>::mutate( - |v| *v = v.saturating_add(self.0) - ); + >::mutate(|v| *v = v.saturating_add(self.0)); } } - impl, I: Instance> Drop for NegativeImbalance { + impl, I: 'static> Drop for NegativeImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { - , I>>::mutate( - |v| *v = v.saturating_sub(self.0) - ); + >::mutate(|v| *v = v.saturating_sub(self.0)); } } } -// TODO: #2052 -// Somewhat ugly hack in order to gain access to module's `increase_total_issuance_by` -// using only the Subtrait (which defines only the types that are not dependent -// on Positive/NegativeImbalance). Subtrait must be used otherwise we end up with a -// circular dependency with Trait having some types be dependent on PositiveImbalance -// and PositiveImbalance itself depending back on Trait for its Drop impl (and thus -// its type declaration). -// This works as long as `increase_total_issuance_by` doesn't use the Imbalance -// types (basically for charging fees). -// This should eventually be refactored so that the type item that -// depends on the Imbalance type (DustRemoval) is placed in its own pallet. -struct ElevatedTrait, I: Instance>(T, I); -impl, I: Instance> Clone for ElevatedTrait { - fn clone(&self) -> Self { unimplemented!() } -} -impl, I: Instance> PartialEq for ElevatedTrait { - fn eq(&self, _: &Self) -> bool { unimplemented!() } -} -impl, I: Instance> Eq for ElevatedTrait {} -impl, I: Instance> frame_system::Trait for ElevatedTrait { - type BaseCallFilter = T::BaseCallFilter; - type Origin = T::Origin; - type Call = T::Call; - type Index = T::Index; - type BlockNumber = T::BlockNumber; - type Hash = T::Hash; - type Hashing = T::Hashing; - type AccountId = T::AccountId; - type Lookup = T::Lookup; - type Header = T::Header; - type Event = (); - type BlockHashCount = T::BlockHashCount; - type MaximumBlockWeight = T::MaximumBlockWeight; - type DbWeight = T::DbWeight; - type BlockExecutionWeight = T::BlockExecutionWeight; - type ExtrinsicBaseWeight = T::ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = T::MaximumBlockWeight; - type MaximumBlockLength = T::MaximumBlockLength; - type AvailableBlockRatio = T::AvailableBlockRatio; - type Version = T::Version; - type PalletInfo = T::PalletInfo; - type OnNewAccount = T::OnNewAccount; - type OnKilledAccount = T::OnKilledAccount; - type AccountData = T::AccountData; - type SystemWeightInfo = T::SystemWeightInfo; -} -impl, I: Instance> Trait for ElevatedTrait { - type Balance = T::Balance; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = T::ExistentialDeposit; - type AccountStore = T::AccountStore; - type WeightInfo = >::WeightInfo; - type MaxLocks = T::MaxLocks; -} - -impl, I: Instance> Currency for Module where - T::Balance: MaybeSerializeDeserialize + Debug +impl, I: 'static> Currency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, { type Balance = T::Balance; type PositiveImbalance = PositiveImbalance; @@ -931,7 +1392,9 @@ impl, I: Instance> Currency for Module where // Check if `value` amount of free balance can be slashed from `who`. fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { - if value.is_zero() { return true } + if value.is_zero() { + return true + } Self::free_balance(who) >= value } @@ -946,7 +1409,9 @@ impl, I: Instance> Currency for Module where // Burn funds from the total issuance, returning a positive imbalance for the amount burned. // Is a no-op if amount to be burned is zero. fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { - if amount.is_zero() { return PositiveImbalance::zero() } + if amount.is_zero() { + return PositiveImbalance::zero() + } >::mutate(|issued| { *issued = issued.checked_sub(&amount).unwrap_or_else(|| { amount = *issued; @@ -960,13 +1425,15 @@ impl, I: Instance> Currency for Module where // for the amount issued. // Is a no-op if amount to be issued it zero. fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { - if amount.is_zero() { return NegativeImbalance::zero() } - >::mutate(|issued| + if amount.is_zero() { + return NegativeImbalance::zero() + } + >::mutate(|issued| { *issued = issued.checked_add(&amount).unwrap_or_else(|| { amount = Self::Balance::max_value() - *issued; Self::Balance::max_value() }) - ); + }); NegativeImbalance::new(amount) } @@ -980,7 +1447,7 @@ impl, I: Instance> Currency for Module where // // # // Despite iterating over a list of locks, they are limited by the number of - // lock IDs, which means the number of runtime modules that intend to use and create locks. + // lock IDs, which means the number of runtime pallets that intend to use and create locks. // # fn ensure_can_withdraw( who: &T::AccountId, @@ -988,7 +1455,9 @@ impl, I: Instance> Currency for Module where reasons: WithdrawReasons, new_balance: T::Balance, ) -> DispatchResult { - if amount.is_zero() { return Ok(()) } + if amount.is_zero() { + return Ok(()) + } let min_balance = Self::account(who).frozen(reasons.into()); ensure!(new_balance >= min_balance, Error::::LiquidityRestrictions); Ok(()) @@ -1002,37 +1471,56 @@ impl, I: Instance> Currency for Module where value: Self::Balance, existence_requirement: ExistenceRequirement, ) -> DispatchResult { - if value.is_zero() || transactor == dest { return Ok(()) } - - Self::try_mutate_account(dest, |to_account, _| -> DispatchResult { - Self::try_mutate_account(transactor, |from_account, _| -> DispatchResult { - from_account.free = from_account.free.checked_sub(&value) - .ok_or(Error::::InsufficientBalance)?; - - // NOTE: total stake being stored in the same type means that this could never overflow - // but better to be safe than sorry. - to_account.free = to_account.free.checked_add(&value).ok_or(Error::::Overflow)?; - - let ed = T::ExistentialDeposit::get(); - ensure!(to_account.total() >= ed, Error::::ExistentialDeposit); + if value.is_zero() || transactor == dest { + return Ok(()) + } - Self::ensure_can_withdraw( + Self::try_mutate_account_with_dust( + dest, + |to_account, _| -> Result, DispatchError> { + Self::try_mutate_account_with_dust( transactor, - value, - WithdrawReason::Transfer.into(), - from_account.free, - )?; - - let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; - let allow_death = allow_death && system::Module::::allow_death(transactor); - ensure!(allow_death || from_account.free >= ed, Error::::KeepAlive); - - Ok(()) - }) - })?; + |from_account, _| -> DispatchResult { + from_account.free = from_account + .free + .checked_sub(&value) + .ok_or(Error::::InsufficientBalance)?; + + // NOTE: total stake being stored in the same type means that this could + // never overflow but better to be safe than sorry. + to_account.free = + to_account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; + + let ed = T::ExistentialDeposit::get(); + ensure!(to_account.total() >= ed, Error::::ExistentialDeposit); + + Self::ensure_can_withdraw( + transactor, + value, + WithdrawReasons::TRANSFER, + from_account.free, + ) + .map_err(|_| Error::::LiquidityRestrictions)?; + + // TODO: This is over-conservative. There may now be other providers, and + // this pallet may not even be a provider. + let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; + let allow_death = + allow_death && system::Pallet::::can_dec_provider(transactor); + ensure!( + allow_death || from_account.total() >= ed, + Error::::KeepAlive + ); + + Ok(()) + }, + ) + .map(|(_, maybe_dust_cleaner)| maybe_dust_cleaner) + }, + )?; // Emit transfer event. - Self::deposit_event(RawEvent::Transfer(transactor.clone(), dest.clone(), value)); + Self::deposit_event(Event::Transfer(transactor.clone(), dest.clone(), value)); Ok(()) } @@ -1040,31 +1528,68 @@ impl, I: Instance> Currency for Module where /// Slash a target account `who`, returning the negative imbalance created and any left over /// amount that could not be slashed. /// - /// Is a no-op if `value` to be slashed is zero. + /// Is a no-op if `value` to be slashed is zero or the account does not exist. /// /// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn - /// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having - /// to draw from reserved funds, however we err on the side of punishment if things are inconsistent - /// or `can_slash` wasn't used appropriately. - fn slash( - who: &T::AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } - - Self::mutate_account(who, |account| { - let free_slash = cmp::min(account.free, value); - account.free -= free_slash; + /// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid + /// having to draw from reserved funds, however we err on the side of punishment if things are + /// inconsistent or `can_slash` wasn't used appropriately. + fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()) + } + if Self::total_balance(&who).is_zero() { + return (NegativeImbalance::zero(), value) + } - let remaining_slash = value - free_slash; - if !remaining_slash.is_zero() { - let reserved_slash = cmp::min(account.reserved, remaining_slash); - account.reserved -= reserved_slash; - (NegativeImbalance::new(free_slash + reserved_slash), remaining_slash - reserved_slash) - } else { - (NegativeImbalance::new(value), Zero::zero()) + for attempt in 0..2 { + match Self::try_mutate_account( + who, + |account, + _is_new| + -> Result<(Self::NegativeImbalance, Self::Balance), DispatchError> { + // Best value is the most amount we can slash following liveness rules. + let best_value = match attempt { + // First attempt we try to slash the full amount, and see if liveness issues + // happen. + 0 => value, + // If acting as a critical provider (i.e. first attempt failed), then slash + // as much as possible while leaving at least at ED. + _ => value.min( + (account.free + account.reserved) + .saturating_sub(T::ExistentialDeposit::get()), + ), + }; + + let free_slash = cmp::min(account.free, best_value); + account.free -= free_slash; // Safe because of above check + let remaining_slash = best_value - free_slash; // Safe because of above check + + if !remaining_slash.is_zero() { + // If we have remaining slash, take it from reserved balance. + let reserved_slash = cmp::min(account.reserved, remaining_slash); + account.reserved -= reserved_slash; // Safe because of above check + Ok(( + NegativeImbalance::new(free_slash + reserved_slash), + value - free_slash - reserved_slash, /* Safe because value is gt or + * eq total slashed */ + )) + } else { + // Else we are done! + Ok(( + NegativeImbalance::new(free_slash), + value - free_slash, // Safe because value is gt or eq to total slashed + )) + } + }, + ) { + Ok(r) => return r, + Err(_) => (), } - }) + } + + // Should never get here. But we'll be defensive anyway. + (Self::NegativeImbalance::zero(), value) } /// Deposit some `value` into the free balance of an existing target account `who`. @@ -1072,40 +1597,55 @@ impl, I: Instance> Currency for Module where /// Is a no-op if the `value` to be deposited is zero. fn deposit_into_existing( who: &T::AccountId, - value: Self::Balance + value: Self::Balance, ) -> Result { - if value.is_zero() { return Ok(PositiveImbalance::zero()) } + if value.is_zero() { + return Ok(PositiveImbalance::zero()) + } - Self::try_mutate_account(who, |account, is_new| -> Result { - ensure!(!is_new, Error::::DeadAccount); - account.free = account.free.checked_add(&value).ok_or(Error::::Overflow)?; - Ok(PositiveImbalance::new(value)) - }) + Self::try_mutate_account( + who, + |account, is_new| -> Result { + ensure!(!is_new, Error::::DeadAccount); + account.free = account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; + Ok(PositiveImbalance::new(value)) + }, + ) } /// Deposit some `value` into the free balance of `who`, possibly creating a new account. /// /// This function is a no-op if: /// - the `value` to be deposited is zero; or - /// - if the `value` to be deposited is less than the ED and the account does not yet exist; or + /// - the `value` to be deposited is less than the required ED and the account does not yet + /// exist; or + /// - the deposit would necessitate the account to exist and there are no provider references; + /// or /// - `value` is so large it would cause the balance of `who` to overflow. - fn deposit_creating( - who: &T::AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance { - if value.is_zero() { return Self::PositiveImbalance::zero() } + fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { + if value.is_zero() { + return Self::PositiveImbalance::zero() + } + + let r = Self::try_mutate_account( + who, + |account, is_new| -> Result { + let ed = T::ExistentialDeposit::get(); + ensure!(value >= ed || !is_new, Error::::ExistentialDeposit); - Self::try_mutate_account(who, |account, is_new| -> Result { - // bail if not yet created and this operation wouldn't be enough to create it. - let ed = T::ExistentialDeposit::get(); - ensure!(value >= ed || !is_new, Self::PositiveImbalance::zero()); + // defensive only: overflow should never happen, however in case it does, then this + // operation is a no-op. + account.free = match account.free.checked_add(&value) { + Some(x) => x, + None => return Ok(Self::PositiveImbalance::zero()), + }; - // defensive only: overflow should never happen, however in case it does, then this - // operation is a no-op. - account.free = account.free.checked_add(&value).ok_or_else(|| Self::PositiveImbalance::zero())?; + Ok(PositiveImbalance::new(value)) + }, + ) + .unwrap_or_else(|_| Self::PositiveImbalance::zero()); - Ok(PositiveImbalance::new(value)) - }).unwrap_or_else(|x| x) + r } /// Withdraw some free balance from an account, respecting existence requirements. @@ -1117,69 +1657,79 @@ impl, I: Instance> Currency for Module where reasons: WithdrawReasons, liveness: ExistenceRequirement, ) -> result::Result { - if value.is_zero() { return Ok(NegativeImbalance::zero()); } + if value.is_zero() { + return Ok(NegativeImbalance::zero()) + } - Self::try_mutate_account(who, |account, _| - -> Result - { - let new_free_account = account.free.checked_sub(&value) - .ok_or(Error::::InsufficientBalance)?; + Self::try_mutate_account( + who, + |account, _| -> Result { + let new_free_account = + account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; - // bail if we need to keep the account alive and this would kill it. - let ed = T::ExistentialDeposit::get(); - let would_be_dead = new_free_account + account.reserved < ed; - let would_kill = would_be_dead && account.free + account.reserved >= ed; - ensure!(liveness == AllowDeath || !would_kill, Error::::KeepAlive); + // bail if we need to keep the account alive and this would kill it. + let ed = T::ExistentialDeposit::get(); + let would_be_dead = new_free_account + account.reserved < ed; + let would_kill = would_be_dead && account.free + account.reserved >= ed; + ensure!(liveness == AllowDeath || !would_kill, Error::::KeepAlive); - Self::ensure_can_withdraw(who, value, reasons, new_free_account)?; + Self::ensure_can_withdraw(who, value, reasons, new_free_account)?; - account.free = new_free_account; + account.free = new_free_account; - Ok(NegativeImbalance::new(value)) - }) + Ok(NegativeImbalance::new(value)) + }, + ) } /// Force the new free balance of a target account `who` to some new value `balance`. - fn make_free_balance_be(who: &T::AccountId, value: Self::Balance) - -> SignedImbalance - { - Self::try_mutate_account(who, |account, is_new| - -> Result, ()> - { - let ed = T::ExistentialDeposit::get(); - // If we're attempting to set an existing account to less than ED, then - // bypass the entire operation. It's a no-op if you follow it through, but - // since this is an instance where we might account for a negative imbalance - // (in the dust cleaner of set_account) before we account for its actual - // equal and opposite cause (returned as an Imbalance), then in the - // instance that there's no other accounts on the system at all, we might - // underflow the issuance and our arithmetic will be off. - ensure!(value.saturating_add(account.reserved) >= ed || !is_new, ()); - - let imbalance = if account.free <= value { - SignedImbalance::Positive(PositiveImbalance::new(value - account.free)) - } else { - SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) - }; - account.free = value; - Ok(imbalance) - }).unwrap_or_else(|_| SignedImbalance::Positive(Self::PositiveImbalance::zero())) + fn make_free_balance_be( + who: &T::AccountId, + value: Self::Balance, + ) -> SignedImbalance { + Self::try_mutate_account( + who, + |account, + is_new| + -> Result, DispatchError> { + let ed = T::ExistentialDeposit::get(); + let total = value.saturating_add(account.reserved); + // If we're attempting to set an existing account to less than ED, then + // bypass the entire operation. It's a no-op if you follow it through, but + // since this is an instance where we might account for a negative imbalance + // (in the dust cleaner of set_account) before we account for its actual + // equal and opposite cause (returned as an Imbalance), then in the + // instance that there's no other accounts on the system at all, we might + // underflow the issuance and our arithmetic will be off. + ensure!(total >= ed || !is_new, Error::::ExistentialDeposit); + + let imbalance = if account.free <= value { + SignedImbalance::Positive(PositiveImbalance::new(value - account.free)) + } else { + SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) + }; + account.free = value; + Ok(imbalance) + }, + ) + .unwrap_or_else(|_| SignedImbalance::Positive(Self::PositiveImbalance::zero())) } } -impl, I: Instance> ReservableCurrency for Module where - T::Balance: MaybeSerializeDeserialize + Debug +impl, I: 'static> ReservableCurrency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, { /// Check if `who` can reserve `value` from their free balance. /// /// Always `true` if value to be reserved is zero. fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { - if value.is_zero() { return true } - Self::account(who).free - .checked_sub(&value) - .map_or(false, |new_balance| - Self::ensure_can_withdraw(who, value, WithdrawReason::Reserve.into(), new_balance).is_ok() - ) + if value.is_zero() { + return true + } + Self::account(who).free.checked_sub(&value).map_or(false, |new_balance| { + Self::ensure_can_withdraw(who, value, WithdrawReasons::RESERVE, new_balance).is_ok() + }) } fn reserved_balance(who: &T::AccountId) -> Self::Balance { @@ -1190,53 +1740,97 @@ impl, I: Instance> ReservableCurrency for Module /// /// Is a no-op if value to be reserved is zero. fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { - if value.is_zero() { return Ok(()) } + if value.is_zero() { + return Ok(()) + } Self::try_mutate_account(who, |account, _| -> DispatchResult { - account.free = account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; - account.reserved = account.reserved.checked_add(&value).ok_or(Error::::Overflow)?; - Self::ensure_can_withdraw(&who, value.clone(), WithdrawReason::Reserve.into(), account.free) + account.free = + account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; + account.reserved = + account.reserved.checked_add(&value).ok_or(ArithmeticError::Overflow)?; + Self::ensure_can_withdraw(&who, value.clone(), WithdrawReasons::RESERVE, account.free) })?; - Self::deposit_event(RawEvent::Reserved(who.clone(), value)); + Self::deposit_event(Event::Reserved(who.clone(), value)); Ok(()) } /// Unreserve some funds, returning any amount that was unable to be unreserved. /// - /// Is a no-op if the value to be unreserved is zero. + /// Is a no-op if the value to be unreserved is zero or the account does not exist. fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { - if value.is_zero() { return Zero::zero() } + if value.is_zero() { + return Zero::zero() + } + if Self::total_balance(&who).is_zero() { + return value + } - let actual = Self::mutate_account(who, |account| { + let actual = match Self::mutate_account(who, |account| { let actual = cmp::min(account.reserved, value); account.reserved -= actual; - // defensive only: this can never fail since total issuance which is at least free+reserved - // fits into the same data type. + // defensive only: this can never fail since total issuance which is at least + // free+reserved fits into the same data type. account.free = account.free.saturating_add(actual); actual - }); - - Self::deposit_event(RawEvent::Unreserved(who.clone(), actual.clone())); + }) { + Ok(x) => x, + Err(_) => { + // This should never happen since we don't alter the total amount in the account. + // If it ever does, then we should fail gracefully though, indicating that nothing + // could be done. + return value + }, + }; + + Self::deposit_event(Event::Unreserved(who.clone(), actual.clone())); value - actual } /// Slash from reserved balance, returning the negative imbalance created, /// and any amount that was unable to be slashed. /// - /// Is a no-op if the value to be slashed is zero. + /// Is a no-op if the value to be slashed is zero or the account does not exist. fn slash_reserved( who: &T::AccountId, - value: Self::Balance + value: Self::Balance, ) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()) + } + if Self::total_balance(&who).is_zero() { + return (NegativeImbalance::zero(), value) + } - Self::mutate_account(who, |account| { - // underflow should never happen, but it if does, there's nothing to be done here. - let actual = cmp::min(account.reserved, value); - account.reserved -= actual; - (NegativeImbalance::new(actual), value - actual) - }) + // NOTE: `mutate_account` may fail if it attempts to reduce the balance to the point that an + // account is attempted to be illegally destroyed. + + for attempt in 0..2 { + match Self::mutate_account(who, |account| { + let best_value = match attempt { + 0 => value, + // If acting as a critical provider (i.e. first attempt failed), then ensure + // slash leaves at least the ED. + _ => value.min( + (account.free + account.reserved) + .saturating_sub(T::ExistentialDeposit::get()), + ), + }; + + let actual = cmp::min(account.reserved, best_value); + account.reserved -= actual; + + // underflow should never happen, but it if does, there's nothing to be done here. + (NegativeImbalance::new(actual), value - actual) + }) { + Ok(r) => return r, + Err(_) => (), + } + } + // Should never get here as we ensure that ED is left in the second attempt. + // In case we do, though, then we fail gracefully. + (Self::NegativeImbalance::zero(), value) } /// Move the reserved balance of one account into the balance of another, according to `status`. @@ -1250,54 +1844,240 @@ impl, I: Instance> ReservableCurrency for Module value: Self::Balance, status: Status, ) -> Result { - if value.is_zero() { return Ok(Zero::zero()) } + let actual = Self::do_transfer_reserved(slashed, beneficiary, value, true, status)?; + Ok(value.saturating_sub(actual)) + } +} - if slashed == beneficiary { - return match status { - Status::Free => Ok(Self::unreserve(slashed, value)), - Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), +impl, I: 'static> NamedReservableCurrency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, +{ + type ReserveIdentifier = T::ReserveIdentifier; + + fn reserved_balance_named(id: &Self::ReserveIdentifier, who: &T::AccountId) -> Self::Balance { + let reserves = Self::reserves(who); + reserves + .binary_search_by_key(id, |data| data.id) + .map(|index| reserves[index].amount) + .unwrap_or_default() + } + + /// Move `value` from the free balance from `who` to a named reserve balance. + /// + /// Is a no-op if value to be reserved is zero. + fn reserve_named( + id: &Self::ReserveIdentifier, + who: &T::AccountId, + value: Self::Balance, + ) -> DispatchResult { + if value.is_zero() { + return Ok(()) + } + + Reserves::::try_mutate(who, |reserves| -> DispatchResult { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + // this add can't overflow but just to be defensive. + reserves[index].amount = reserves[index].amount.saturating_add(value); + }, + Err(index) => { + reserves + .try_insert(index, ReserveData { id: id.clone(), amount: value }) + .map_err(|_| Error::::TooManyReserves)?; + }, }; + >::reserve(who, value)?; + Ok(()) + }) + } + + /// Unreserve some funds, returning any amount that was unable to be unreserved. + /// + /// Is a no-op if the value to be unreserved is zero. + fn unreserve_named( + id: &Self::ReserveIdentifier, + who: &T::AccountId, + value: Self::Balance, + ) -> Self::Balance { + if value.is_zero() { + return Zero::zero() } - let actual = Self::try_mutate_account(beneficiary, |to_account, is_new|-> Result { - ensure!(!is_new, Error::::DeadAccount); - Self::try_mutate_account(slashed, |from_account, _| -> Result { - let actual = cmp::min(from_account.reserved, value); - match status { - Status::Free => to_account.free = to_account.free.checked_add(&actual).ok_or(Error::::Overflow)?, - Status::Reserved => to_account.reserved = to_account.reserved.checked_add(&actual).ok_or(Error::::Overflow)?, + Reserves::::mutate_exists(who, |maybe_reserves| -> Self::Balance { + if let Some(reserves) = maybe_reserves.as_mut() { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let to_change = cmp::min(reserves[index].amount, value); + + let remain = >::unreserve(who, to_change); + + // remain should always be zero but just to be defensive here + let actual = to_change.saturating_sub(remain); + + // `actual <= to_change` and `to_change <= amount`; qed; + reserves[index].amount -= actual; + + if reserves[index].amount.is_zero() { + if reserves.len() == 1 { + // no more named reserves + *maybe_reserves = None; + } else { + // remove this named reserve + reserves.remove(index); + } + } + + value - actual + }, + Err(_) => value, } - from_account.reserved -= actual; - Ok(actual) - }) - })?; + } else { + value + } + }) + } + + /// Slash from reserved balance, returning the negative imbalance created, + /// and any amount that was unable to be slashed. + /// + /// Is a no-op if the value to be slashed is zero. + fn slash_reserved_named( + id: &Self::ReserveIdentifier, + who: &T::AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()) + } - Self::deposit_event(RawEvent::ReserveRepatriated(slashed.clone(), beneficiary.clone(), actual, status)); - Ok(value - actual) + Reserves::::mutate(who, |reserves| -> (Self::NegativeImbalance, Self::Balance) { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let to_change = cmp::min(reserves[index].amount, value); + + let (imb, remain) = + >::slash_reserved(who, to_change); + + // remain should always be zero but just to be defensive here + let actual = to_change.saturating_sub(remain); + + // `actual <= to_change` and `to_change <= amount`; qed; + reserves[index].amount -= actual; + + (imb, value - actual) + }, + Err(_) => (NegativeImbalance::zero(), value), + } + }) } -} -/// Implement `OnKilledAccount` to remove the local account, if using local account storage. -/// -/// NOTE: You probably won't need to use this! This only needs to be "wired in" to System module -/// if you're using the local balance storage. **If you're using the composite system account -/// storage (which is the default in most examples and tests) then there's no need.** -impl, I: Instance> OnKilledAccount for Module { - fn on_killed_account(who: &T::AccountId) { - Account::::mutate_exists(who, |account| { - let total = account.as_ref().map(|acc| acc.total()).unwrap_or_default(); - if !total.is_zero() { - T::DustRemoval::on_unbalanced(NegativeImbalance::new(total)); - Self::deposit_event(RawEvent::DustLost(who.clone(), total)); + /// Move the reserved balance of one account into the balance of another, according to `status`. + /// If `status` is `Reserved`, the balance will be reserved with given `id`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + fn repatriate_reserved_named( + id: &Self::ReserveIdentifier, + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: Self::Balance, + status: Status, + ) -> Result { + if value.is_zero() { + return Ok(Zero::zero()) + } + + if slashed == beneficiary { + return match status { + Status::Free => Ok(Self::unreserve_named(id, slashed, value)), + Status::Reserved => + Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))), } - *account = None; - }); + } + + Reserves::::try_mutate(slashed, |reserves| -> Result { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let to_change = cmp::min(reserves[index].amount, value); + + let actual = if status == Status::Reserved { + // make it the reserved under same identifier + Reserves::::try_mutate( + beneficiary, + |reserves| -> Result { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let remain = + >::repatriate_reserved( + slashed, + beneficiary, + to_change, + status, + )?; + + // remain should always be zero but just to be defensive + // here + let actual = to_change.saturating_sub(remain); + + // this add can't overflow but just to be defensive. + reserves[index].amount = + reserves[index].amount.saturating_add(actual); + + Ok(actual) + }, + Err(index) => { + let remain = + >::repatriate_reserved( + slashed, + beneficiary, + to_change, + status, + )?; + + // remain should always be zero but just to be defensive + // here + let actual = to_change.saturating_sub(remain); + + reserves + .try_insert( + index, + ReserveData { id: id.clone(), amount: actual }, + ) + .map_err(|_| Error::::TooManyReserves)?; + + Ok(actual) + }, + } + }, + )? + } else { + let remain = >::repatriate_reserved( + slashed, + beneficiary, + to_change, + status, + )?; + + // remain should always be zero but just to be defensive here + to_change.saturating_sub(remain) + }; + + // `actual <= to_change` and `to_change <= amount`; qed; + reserves[index].amount -= actual; + + Ok(value - actual) + }, + Err(_) => Ok(value), + } + }) } } -impl, I: Instance> LockableCurrency for Module +impl, I: 'static> LockableCurrency for Pallet where - T::Balance: MaybeSerializeDeserialize + Debug + T::Balance: MaybeSerializeDeserialize + Debug, { type Moment = T::BlockNumber; @@ -1311,9 +2091,12 @@ where amount: T::Balance, reasons: WithdrawReasons, ) { - if amount.is_zero() || reasons.is_none() { return } + if amount.is_zero() || reasons.is_empty() { + return + } let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); - let mut locks = Self::locks(who).into_iter() + let mut locks = Self::locks(who) + .into_iter() .filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) }) .collect::>(); if let Some(lock) = new_lock { @@ -1330,41 +2113,33 @@ where amount: T::Balance, reasons: WithdrawReasons, ) { - if amount.is_zero() || reasons.is_none() { return } + if amount.is_zero() || reasons.is_empty() { + return + } let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); - let mut locks = Self::locks(who).into_iter().filter_map(|l| - if l.id == id { - new_lock.take().map(|nl| { - BalanceLock { + let mut locks = Self::locks(who) + .into_iter() + .filter_map(|l| { + if l.id == id { + new_lock.take().map(|nl| BalanceLock { id: l.id, amount: l.amount.max(nl.amount), reasons: l.reasons | nl.reasons, - } - }) - } else { - Some(l) - }).collect::>(); + }) + } else { + Some(l) + } + }) + .collect::>(); if let Some(lock) = new_lock { locks.push(lock) } Self::update_locks(who, &locks[..]); } - fn remove_lock( - id: LockIdentifier, - who: &T::AccountId, - ) { + fn remove_lock(id: LockIdentifier, who: &T::AccountId) { let mut locks = Self::locks(who); locks.retain(|l| l.id != id); Self::update_locks(who, &locks[..]); } } - -impl, I: Instance> IsDeadAccount for Module where - T::Balance: MaybeSerializeDeserialize + Debug -{ - fn is_dead_account(who: &T::AccountId) -> bool { - // this should always be exactly equivalent to `Self::account(who).total().is_zero()` if ExistentialDeposit > 0 - !T::AccountStore::is_explicit(who) - } -} diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 210c75631da63..a08643821eba8 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,31 +19,17 @@ #![cfg(test)] -#[derive(Debug)] -pub struct CallWithDispatchInfo; -impl sp_runtime::traits::Dispatchable for CallWithDispatchInfo { - type Origin = (); - type Trait = (); - type Info = frame_support::weights::DispatchInfo; - type PostInfo = frame_support::weights::PostDispatchInfo; - - fn dispatch(self, _origin: Self::Origin) - -> sp_runtime::DispatchResultWithInfo { - panic!("Do not use dummy implementation for dispatch."); - } -} - #[macro_export] macro_rules! decl_tests { ($test:ty, $ext_builder:ty, $existential_deposit:expr) => { use crate::*; - use sp_runtime::{FixedPointNumber, traits::{SignedExtension, BadOrigin}}; + use sp_runtime::{ArithmeticError, FixedPointNumber, traits::{SignedExtension, BadOrigin}}; use frame_support::{ - assert_noop, assert_ok, assert_err, + assert_noop, assert_storage_noop, assert_ok, assert_err, traits::{ - LockableCurrency, LockIdentifier, WithdrawReason, WithdrawReasons, - Currency, ReservableCurrency, ExistenceRequirement::AllowDeath, StoredMap + LockableCurrency, LockIdentifier, WithdrawReasons, + Currency, ReservableCurrency, ExistenceRequirement::AllowDeath } }; use pallet_transaction_payment::{ChargeTransactionPayment, Multiplier}; @@ -52,10 +38,8 @@ macro_rules! decl_tests { const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; - pub type System = frame_system::Module<$test>; - pub type Balances = Module<$test>; - - pub const CALL: &<$test as frame_system::Trait>::Call = &$crate::tests::CallWithDispatchInfo; + pub const CALL: &<$test as frame_system::Config>::Call = + &Call::Balances(pallet_balances::Call::transfer { dest: 0, value: 0 }); /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { @@ -70,10 +54,6 @@ macro_rules! decl_tests { evt } - fn last_event() -> Event { - system::Module::::events().pop().expect("Event expected").event - } - #[test] fn basic_locking_should_work() { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { @@ -91,7 +71,32 @@ macro_rules! decl_tests { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { assert_eq!(Balances::free_balance(1), 10); assert_ok!(>::transfer(&1, &2, 10, AllowDeath)); - assert!(!<::AccountStore as StoredMap>>::is_explicit(&1)); + // Check that the account is dead. + assert!(!frame_system::Account::::contains_key(&1)); + }); + } + + #[test] + fn reap_failed_due_to_provider_and_consumer() { + <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { + // SCENARIO: only one provider and there are remaining consumers. + assert_ok!(System::inc_consumers(&1)); + assert!(!System::can_dec_provider(&1)); + assert_noop!( + >::transfer(&1, &2, 10, AllowDeath), + Error::<$test, _>::KeepAlive + ); + assert!(System::account_exists(&1)); + assert_eq!(Balances::free_balance(1), 10); + + // SCENARIO: more than one provider, but will not kill account due to other provider. + assert_eq!(System::inc_providers(&1), frame_system::IncRefStatus::Existed); + assert_eq!(System::providers(&1), 2); + assert!(System::can_dec_provider(&1)); + assert_ok!(>::transfer(&1, &2, 10, AllowDeath)); + assert_eq!(System::providers(&1), 1); + assert!(System::account_exists(&1)); + assert_eq!(Balances::free_balance(1), 0); }); } @@ -106,7 +111,7 @@ macro_rules! decl_tests { #[test] fn lock_removal_should_work() { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, u64::max_value(), WithdrawReasons::all()); + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); Balances::remove_lock(ID_1, &1); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); @@ -115,7 +120,7 @@ macro_rules! decl_tests { #[test] fn lock_replacement_should_work() { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, u64::max_value(), WithdrawReasons::all()); + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::all()); Balances::set_lock(ID_1, &1, 5, WithdrawReasons::all()); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); @@ -133,7 +138,7 @@ macro_rules! decl_tests { #[test] fn combination_locking_should_work() { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, u64::max_value(), WithdrawReasons::none()); + Balances::set_lock(ID_1, &1, u64::MAX, WithdrawReasons::empty()); Balances::set_lock(ID_2, &1, 0, WithdrawReasons::all()); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); }); @@ -167,8 +172,10 @@ macro_rules! decl_tests { .monied(true) .build() .execute_with(|| { - pallet_transaction_payment::NextFeeMultiplier::put(Multiplier::saturating_from_integer(1)); - Balances::set_lock(ID_1, &1, 10, WithdrawReason::Reserve.into()); + pallet_transaction_payment::NextFeeMultiplier::<$test>::put( + Multiplier::saturating_from_integer(1) + ); + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::RESERVE); assert_noop!( >::transfer(&1, &2, 1, AllowDeath), Error::<$test, _>::LiquidityRestrictions @@ -184,15 +191,15 @@ macro_rules! decl_tests { &info_from_weight(1), 1, ).is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert_ok!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, &info_from_weight(1), 1, - ).is_ok()); + )); - Balances::set_lock(ID_1, &1, 10, WithdrawReason::TransactionPayment.into()); + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::TRANSACTION_PAYMENT); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); assert_ok!(>::reserve(&1, 1)); assert!( as SignedExtension>::pre_dispatch( @@ -237,17 +244,17 @@ macro_rules! decl_tests { #[test] fn lock_reasons_extension_should_work() { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { - Balances::set_lock(ID_1, &1, 10, WithdrawReason::Transfer.into()); + Balances::set_lock(ID_1, &1, 10, WithdrawReasons::TRANSFER); assert_noop!( >::transfer(&1, &2, 6, AllowDeath), Error::<$test, _>::LiquidityRestrictions ); - Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::none()); + Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::empty()); assert_noop!( >::transfer(&1, &2, 6, AllowDeath), Error::<$test, _>::LiquidityRestrictions ); - Balances::extend_lock(ID_1, &1, 10, WithdrawReason::Reserve.into()); + Balances::extend_lock(ID_1, &1, 10, WithdrawReasons::RESERVE); assert_noop!( >::transfer(&1, &2, 6, AllowDeath), Error::<$test, _>::LiquidityRestrictions @@ -262,14 +269,12 @@ macro_rules! decl_tests { .monied(true) .build() .execute_with(|| { - assert_eq!(Balances::is_dead_account(&5), true); // account 5 should not exist // ext_deposit is 10, value is 9, not satisfies for ext_deposit assert_noop!( Balances::transfer(Some(1).into(), 5, 9), Error::<$test, _>::ExistentialDeposit, ); - assert_eq!(Balances::is_dead_account(&5), true); // account 5 should not exist assert_eq!(Balances::free_balance(1), 100); }); } @@ -282,31 +287,25 @@ macro_rules! decl_tests { .build() .execute_with(|| { System::inc_account_nonce(&2); - assert_eq!(Balances::is_dead_account(&2), false); - assert_eq!(Balances::is_dead_account(&5), true); assert_eq!(Balances::total_balance(&2), 256 * 20); assert_ok!(Balances::reserve(&2, 256 * 19 + 1)); // account 2 becomes mostly reserved assert_eq!(Balances::free_balance(2), 255); // "free" account deleted." assert_eq!(Balances::total_balance(&2), 256 * 20); // reserve still exists. - assert_eq!(Balances::is_dead_account(&2), false); assert_eq!(System::account_nonce(&2), 1); // account 4 tries to take index 1 for account 5. assert_ok!(Balances::transfer(Some(4).into(), 5, 256 * 1 + 0x69)); assert_eq!(Balances::total_balance(&5), 256 * 1 + 0x69); - assert_eq!(Balances::is_dead_account(&5), false); assert!(Balances::slash(&2, 256 * 19 + 2).1.is_zero()); // account 2 gets slashed // "reserve" account reduced to 255 (below ED) so account deleted assert_eq!(Balances::total_balance(&2), 0); assert_eq!(System::account_nonce(&2), 0); // nonce zero - assert_eq!(Balances::is_dead_account(&2), true); // account 4 tries to take index 1 again for account 6. assert_ok!(Balances::transfer(Some(4).into(), 6, 256 * 1 + 0x69)); assert_eq!(Balances::total_balance(&6), 256 * 1 + 0x69); - assert_eq!(Balances::is_dead_account(&6), false); }); } @@ -417,7 +416,7 @@ macro_rules! decl_tests { fn refunding_balance_should_work() { <$ext_builder>::default().build().execute_with(|| { let _ = Balances::deposit_creating(&1, 42); - Balances::mutate_account(&1, |a| a.reserved = 69); + assert_ok!(Balances::mutate_account(&1, |a| a.reserved = 69)); Balances::unreserve(&1, 69); assert_eq!(Balances::free_balance(1), 111); assert_eq!(Balances::reserved_balance(1), 0); @@ -490,9 +489,8 @@ macro_rules! decl_tests { let _ = Balances::deposit_creating(&2, 1); assert_ok!(Balances::reserve(&1, 110)); assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Status::Free), 0); - assert_eq!( - last_event(), - Event::balances(RawEvent::ReserveRepatriated(1, 2, 41, Status::Free)), + System::assert_last_event( + Event::Balances(crate::Event::ReserveRepatriated(1, 2, 41, Status::Free)) ); assert_eq!(Balances::reserved_balance(1), 69); assert_eq!(Balances::free_balance(1), 0); @@ -541,15 +539,15 @@ macro_rules! decl_tests { #[test] fn transferring_too_high_value_should_not_panic() { <$ext_builder>::default().build().execute_with(|| { - Balances::make_free_balance_be(&1, u64::max_value()); + Balances::make_free_balance_be(&1, u64::MAX); Balances::make_free_balance_be(&2, 1); assert_err!( - Balances::transfer(Some(1).into(), 2, u64::max_value()), - Error::<$test, _>::Overflow, + Balances::transfer(Some(1).into(), 2, u64::MAX), + ArithmeticError::Overflow, ); - assert_eq!(Balances::free_balance(1), u64::max_value()); + assert_eq!(Balances::free_balance(1), u64::MAX); assert_eq!(Balances::free_balance(2), 1); }); } @@ -623,22 +621,30 @@ macro_rules! decl_tests { Balances::transfer_keep_alive(Some(1).into(), 2, 100), Error::<$test, _>::KeepAlive ); - assert_eq!(Balances::is_dead_account(&1), false); assert_eq!(Balances::total_balance(&1), 100); assert_eq!(Balances::total_balance(&2), 0); }); } #[test] - #[should_panic = "the balance of any account should always be more than existential deposit."] + #[should_panic = "the balance of any account should always be at least the existential deposit."] fn cannot_set_genesis_value_below_ed() { ($existential_deposit).with(|v| *v.borrow_mut() = 11); let mut t = frame_system::GenesisConfig::default().build_storage::<$test>().unwrap(); - let _ = GenesisConfig::<$test> { + let _ = pallet_balances::GenesisConfig::<$test> { balances: vec![(1, 10)], }.assimilate_storage(&mut t).unwrap(); } + #[test] + #[should_panic = "duplicate balances in genesis."] + fn cannot_set_genesis_value_twice() { + let mut t = frame_system::GenesisConfig::default().build_storage::<$test>().unwrap(); + let _ = pallet_balances::GenesisConfig::<$test> { + balances: vec![(1, 10), (2, 20), (1, 15)], + }.assimilate_storage(&mut t).unwrap(); + } + #[test] fn dust_moves_between_free_and_reserved() { <$ext_builder>::default() @@ -684,9 +690,10 @@ macro_rules! decl_tests { assert_eq!(Balances::reserved_balance(1), 50); // Reserve some free balance - let _ = Balances::slash(&1, 1); + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); + // The account should be dead. - assert!(Balances::is_dead_account(&1)); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -700,29 +707,20 @@ macro_rules! decl_tests { let _ = Balances::deposit_creating(&1, 100); System::set_block_number(2); - let _ = Balances::reserve(&1, 10); + assert_ok!(Balances::reserve(&1, 10)); - assert_eq!( - last_event(), - Event::balances(RawEvent::Reserved(1, 10)), - ); + System::assert_last_event(Event::Balances(crate::Event::Reserved(1, 10))); System::set_block_number(3); - let _ = Balances::unreserve(&1, 5); + assert!(Balances::unreserve(&1, 5).is_zero()); - assert_eq!( - last_event(), - Event::balances(RawEvent::Unreserved(1, 5)), - ); + System::assert_last_event(Event::Balances(crate::Event::Unreserved(1, 5))); System::set_block_number(4); - let _ = Balances::unreserve(&1, 6); + assert_eq!(Balances::unreserve(&1, 6), 1); // should only unreserve 5 - assert_eq!( - last_event(), - Event::balances(RawEvent::Unreserved(1, 5)), - ); + System::assert_last_event(Event::Balances(crate::Event::Unreserved(1, 5))); }); } @@ -737,19 +735,20 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::system(system::RawEvent::NewAccount(1)), - Event::balances(RawEvent::Endowed(1, 100)), - Event::balances(RawEvent::BalanceSet(1, 100, 0)), + Event::System(system::Event::NewAccount(1)), + Event::Balances(crate::Event::Endowed(1, 100)), + Event::Balances(crate::Event::BalanceSet(1, 100, 0)), ] ); - let _ = Balances::slash(&1, 1); + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); assert_eq!( events(), [ - Event::balances(RawEvent::DustLost(1, 99)), - Event::system(system::RawEvent::KilledAccount(1)) + Event::System(system::Event::KilledAccount(1)), + Event::Balances(crate::Event::DustLost(1, 99)), ] ); }); @@ -758,7 +757,7 @@ macro_rules! decl_tests { #[test] fn emit_events_with_no_existential_deposit_suicide() { <$ext_builder>::default() - .existential_deposit(0) + .existential_deposit(1) .build() .execute_with(|| { assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); @@ -766,26 +765,461 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::system(system::RawEvent::NewAccount(1)), - Event::balances(RawEvent::Endowed(1, 100)), - Event::balances(RawEvent::BalanceSet(1, 100, 0)), + Event::System(system::Event::NewAccount(1)), + Event::Balances(crate::Event::Endowed(1, 100)), + Event::Balances(crate::Event::BalanceSet(1, 100, 0)), ] ); - let _ = Balances::slash(&1, 100); - - // no events - assert_eq!(events(), []); - - assert_ok!(System::suicide(Origin::signed(1))); + let res = Balances::slash(&1, 100); + assert_eq!(res, (NegativeImbalance::new(100), 0)); assert_eq!( events(), [ - Event::system(system::RawEvent::KilledAccount(1)) + Event::System(system::Event::KilledAccount(1)) ] ); }); } + + #[test] + fn slash_loop_works() { + <$ext_builder>::default() + .existential_deposit(100) + .build() + .execute_with(|| { + /* User has no reference counter, so they can die in these scenarios */ + + // SCENARIO: Slash would not kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Slash will kill account because not enough balance left. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(950), 0)); + // Account is killed + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash will kill account, and report missing slash amount. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed full free_balance, and reports 300 not slashed + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1000), 300)); + // Account is dead + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash can take from reserved, but keep alive. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 400)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash can take from reserved, and kill. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 350)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); + // Account is dead because 50 reserved balance is not enough to keep alive + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash can take as much as possible from reserved, kill, and report missing amount. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 250)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); + // Account is super dead + assert!(!System::account_exists(&1)); + + /* User will now have a reference counter on them, keeping them alive in these scenarios */ + + // SCENARIO: Slash would not kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests + // Slashed completed in full + assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Slash will take as much as possible without killing account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed completed in full + assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(900), 50)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash will not kill account, and report missing slash amount. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + // Slashed full free_balance minus ED, and reports 400 not slashed + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(900), 400)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash can take from reserved, but keep alive. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 400)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash can take from reserved, but keep alive. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 350)); + // Slashed full free_balance and 250 of reserved balance to leave ED + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash can take as much as possible from reserved and report missing amount. + assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 250)); + // Slashed full free_balance and 300 of reserved balance + assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1150), 150)); + // Account is still alive + assert!(System::account_exists(&1)); + + // Slash on non-existent account is okay. + assert_eq!(Balances::slash(&12345, 1_300), (NegativeImbalance::new(0), 1300)); + }); + } + + #[test] + fn slash_reserved_loop_works() { + <$ext_builder>::default() + .existential_deposit(100) + .build() + .execute_with(|| { + /* User has no reference counter, so they can die in these scenarios */ + + // SCENARIO: Slash would not kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Slash would kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(1_000), 0)); + // Account is dead + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash would kill account, and reports left over slash. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); + // Account is dead + assert!(!System::account_exists(&1)); + + // SCENARIO: Over-slash does not take from free balance. + assert_ok!(Balances::set_balance(Origin::root(), 1, 300, 1_000)); + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); + // Account is alive because of free balance + assert!(System::account_exists(&1)); + + /* User has a reference counter, so they cannot die */ + + // SCENARIO: Slash would not kill account. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests + // Slashed completed in full + assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Slash as much as possible without killing. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed as much as possible + assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(950), 50)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash reports correctly, where reserved is needed to keep alive. + assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + // Slashed as much as possible + assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(950), 350)); + // Account is still alive + assert!(System::account_exists(&1)); + + // SCENARIO: Over-slash reports correctly, where full reserved is removed. + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 1_000)); + // Slashed as much as possible + assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); + // Account is still alive + assert!(System::account_exists(&1)); + + // Slash on non-existent account is okay. + assert_eq!(Balances::slash_reserved(&12345, 1_300), (NegativeImbalance::new(0), 1300)); + }); + } + + #[test] + fn operations_on_dead_account_should_not_change_state() { + // These functions all use `mutate_account` which may introduce a storage change when + // the account never existed to begin with, and shouldn't exist in the end. + <$ext_builder>::default() + .existential_deposit(0) + .build() + .execute_with(|| { + assert!(!frame_system::Account::::contains_key(&1337)); + + // Unreserve + assert_storage_noop!(assert_eq!(Balances::unreserve(&1337, 42), 42)); + // Reserve + assert_noop!(Balances::reserve(&1337, 42), Error::::InsufficientBalance); + // Slash Reserve + assert_storage_noop!(assert_eq!(Balances::slash_reserved(&1337, 42).1, 42)); + // Repatriate Reserve + assert_noop!(Balances::repatriate_reserved(&1337, &1338, 42, Status::Free), Error::::DeadAccount); + // Slash + assert_storage_noop!(assert_eq!(Balances::slash(&1337, 42).1, 42)); + }); + } + + #[test] + fn transfer_keep_alive_all_free_succeed() { + <$ext_builder>::default() + .existential_deposit(100) + .build() + .execute_with(|| { + assert_ok!(Balances::set_balance(Origin::root(), 1, 100, 100)); + assert_ok!(Balances::transfer_keep_alive(Some(1).into(), 2, 100)); + assert_eq!(Balances::total_balance(&1), 100); + assert_eq!(Balances::total_balance(&2), 100); + }); + } + + #[test] + fn transfer_all_works() { + <$ext_builder>::default() + .existential_deposit(100) + .build() + .execute_with(|| { + // setup + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + // transfer all and allow death + assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); + assert_eq!(Balances::total_balance(&1), 0); + assert_eq!(Balances::total_balance(&2), 200); + + // setup + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 0)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + // transfer all and keep alive + assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); + assert_eq!(Balances::total_balance(&1), 100); + assert_eq!(Balances::total_balance(&2), 100); + + // setup + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 10)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + // transfer all and allow death w/ reserved + assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); + assert_eq!(Balances::total_balance(&1), 0); + assert_eq!(Balances::total_balance(&2), 200); + + // setup + assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 10)); + assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + // transfer all and keep alive w/ reserved + assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); + assert_eq!(Balances::total_balance(&1), 100); + assert_eq!(Balances::total_balance(&2), 110); + }); + } + + #[test] + fn named_reserve_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id_1 = [1u8; 8]; + let id_2 = [2u8; 8]; + let id_3 = [3u8; 8]; + + // reserve + + assert_noop!(Balances::reserve_named(&id_1, &1, 112), Error::::InsufficientBalance); + + assert_ok!(Balances::reserve_named(&id_1, &1, 12)); + + assert_eq!(Balances::reserved_balance(1), 12); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 12); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); + + assert_ok!(Balances::reserve_named(&id_1, &1, 2)); + + assert_eq!(Balances::reserved_balance(1), 14); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); + + assert_ok!(Balances::reserve_named(&id_2, &1, 23)); + + assert_eq!(Balances::reserved_balance(1), 37); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_ok!(Balances::reserve(&1, 34)); + + assert_eq!(Balances::reserved_balance(1), 71); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 14); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(1), 40); + + assert_noop!(Balances::reserve_named(&id_3, &1, 2), Error::::TooManyReserves); + + // unreserve + + assert_eq!(Balances::unreserve_named(&id_1, &1, 10), 0); + + assert_eq!(Balances::reserved_balance(1), 61); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 4); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_eq!(Balances::unreserve_named(&id_1, &1, 5), 1); + + assert_eq!(Balances::reserved_balance(1), 57); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 23); + + assert_eq!(Balances::unreserve_named(&id_2, &1, 3), 0); + + assert_eq!(Balances::reserved_balance(1), 54); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 20); + + assert_eq!(Balances::total_balance(&1), 111); + assert_eq!(Balances::free_balance(1), 57); + + // slash_reserved_named + + assert_ok!(Balances::reserve_named(&id_1, &1, 10)); + + assert_eq!(Balances::slash_reserved_named(&id_1, &1, 25).1, 15); + + assert_eq!(Balances::reserved_balance(1), 54); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 20); + assert_eq!(Balances::total_balance(&1), 101); + + assert_eq!(Balances::slash_reserved_named(&id_2, &1, 5).1, 0); + + assert_eq!(Balances::reserved_balance(1), 49); + assert_eq!(Balances::reserved_balance_named(&id_1, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 15); + assert_eq!(Balances::total_balance(&1), 96); + + // repatriate_reserved_named + + let _ = Balances::deposit_creating(&2, 100); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &2, 10, Status::Reserved).unwrap(), 0); + + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); + assert_eq!(Balances::reserved_balance_named(&id_2, &2), 10); + assert_eq!(Balances::reserved_balance(&2), 10); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &2, &1, 11, Status::Reserved).unwrap(), 1); + + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 15); + assert_eq!(Balances::reserved_balance_named(&id_2, &2), 0); + assert_eq!(Balances::reserved_balance(&2), 0); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &2, 10, Status::Free).unwrap(), 0); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); + assert_eq!(Balances::reserved_balance_named(&id_2, &2), 0); + assert_eq!(Balances::free_balance(&2), 110); + + // repatriate_reserved_named to self + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &1, 10, Status::Reserved).unwrap(), 5); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 5); + + assert_eq!(Balances::free_balance(&1), 47); + + assert_eq!(Balances::repatriate_reserved_named(&id_2, &1, &1, 15, Status::Free).unwrap(), 10); + assert_eq!(Balances::reserved_balance_named(&id_2, &1), 0); + + assert_eq!(Balances::free_balance(&1), 52); + }); + } + + #[test] + fn ensure_reserved_named_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id = [1u8; 8]; + + assert_ok!(Balances::ensure_reserved_named(&id, &1, 15)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 15); + + assert_ok!(Balances::ensure_reserved_named(&id, &1, 10)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 10); + + assert_ok!(Balances::ensure_reserved_named(&id, &1, 20)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 20); + }); + } + + #[test] + fn unreserve_all_named_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id = [1u8; 8]; + + assert_ok!(Balances::reserve_named(&id, &1, 15)); + + assert_eq!(Balances::unreserve_all_named(&id, &1), 15); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + assert_eq!(Balances::free_balance(&1), 111); + + assert_eq!(Balances::unreserve_all_named(&id, &1), 0); + }); + } + + #[test] + fn slash_all_reserved_named_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + + let id = [1u8; 8]; + + assert_ok!(Balances::reserve_named(&id, &1, 15)); + + assert_eq!(Balances::slash_all_reserved_named(&id, &1).peek(), 15); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + assert_eq!(Balances::free_balance(&1), 96); + + assert_eq!(Balances::slash_all_reserved_named(&id, &1).peek(), 0); + }); + } + + #[test] + fn repatriate_all_reserved_named_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 111); + let _ = Balances::deposit_creating(&2, 10); + let _ = Balances::deposit_creating(&3, 10); + + let id = [1u8; 8]; + + assert_ok!(Balances::reserve_named(&id, &1, 15)); + + assert_ok!(Balances::repatriate_all_reserved_named(&id, &1, &2, Status::Reserved)); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + assert_eq!(Balances::reserved_balance_named(&id, &2), 15); + + assert_ok!(Balances::repatriate_all_reserved_named(&id, &2, &3, Status::Free)); + assert_eq!(Balances::reserved_balance_named(&id, &2), 0); + assert_eq!(Balances::free_balance(&3), 25); + }); + } } } diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 0ee488d097294..f6faebed39316 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,59 +19,45 @@ #![cfg(test)] -use sp_runtime::{ - Perbill, - traits::IdentityLookup, - testing::Header, +use crate::{self as pallet_balances, decl_tests, Config, Pallet}; +use frame_support::{ + parameter_types, + weights::{DispatchInfo, IdentityFee, Weight}, }; +use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; use sp_io; -use frame_support::{impl_outer_origin, impl_outer_event, parameter_types}; -use frame_support::traits::Get; -use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; -use std::cell::RefCell; -use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; +use sp_runtime::{testing::Header, traits::IdentityLookup}; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -use frame_system as system; -impl_outer_origin!{ - pub enum Origin for Test {} -} - -mod balances { - pub use crate::Event; -} - -impl_outer_event! { - pub enum Event for Test { - system, - balances, +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, } -} - -thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); -} - -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } -} +); -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); + pub static ExistentialDeposit: u64 = 0; } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = CallWithDispatchInfo; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; @@ -79,38 +65,38 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = super::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const TransactionByteFee: u64 = 1; } -impl pallet_transaction_payment::Trait for Test { - type Currency = Module; - type OnTransactionPayment = (); +impl pallet_transaction_payment::Config for Test { + type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } -impl Trait for Test { +parameter_types! { + pub const MaxReserves: u32 = 2; +} + +impl Config for Test { type Balance = u64; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; - type AccountStore = system::Module; + type AccountStore = frame_system::Pallet; type MaxLocks = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } @@ -120,10 +106,7 @@ pub struct ExtBuilder { } impl Default for ExtBuilder { fn default() -> Self { - Self { - existential_deposit: 1, - monied: false, - } + Self { existential_deposit: 1, monied: false } } } impl ExtBuilder { @@ -141,19 +124,21 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { self.set_associated_consts(); let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { + pallet_balances::GenesisConfig:: { balances: if self.monied { vec![ (1, 10 * self.existential_deposit), (2, 20 * self.existential_deposit), (3, 30 * self.existential_deposit), (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit) + (12, 10 * self.existential_deposit), ] } else { vec![] }, - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); @@ -161,4 +146,4 @@ impl ExtBuilder { } } -decl_tests!{ Test, ExtBuilder, EXISTENTIAL_DEPOSIT } +decl_tests! { Test, ExtBuilder, EXISTENTIAL_DEPOSIT } diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 4efcdad8ca334..d8c07aa9c42e5 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,59 +19,47 @@ #![cfg(test)] -use sp_runtime::{ - Perbill, - traits::IdentityLookup, - testing::Header, +use crate::{self as pallet_balances, decl_tests, Config, Pallet}; +use frame_support::{ + parameter_types, + traits::StorageMapShim, + weights::{DispatchInfo, IdentityFee, Weight}, }; +use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; use sp_io; -use frame_support::{impl_outer_origin, impl_outer_event, parameter_types}; -use frame_support::traits::{Get, StorageMapShim}; -use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; -use std::cell::RefCell; -use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; - -use frame_system as system; -impl_outer_origin!{ - pub enum Origin for Test {} -} - -mod balances { - pub use crate::Event; -} - -impl_outer_event! { - pub enum Event for Test { - system, - balances, +use sp_runtime::{testing::Header, traits::IdentityLookup}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, } -} +); -thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); -} - -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } -} - -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); + pub static ExistentialDeposit: u64 = 0; } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = CallWithDispatchInfo; + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; @@ -79,45 +67,38 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); - type AccountData = super::AccountData; + type PalletInfo = PalletInfo; + type AccountData = (); type OnNewAccount = (); - type OnKilledAccount = Module; + type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const TransactionByteFee: u64 = 1; } -impl pallet_transaction_payment::Trait for Test { - type Currency = Module; - type OnTransactionPayment = (); +impl pallet_transaction_payment::Config for Test { + type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } parameter_types! { pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 2; } -impl Trait for Test { +impl Config for Test { type Balance = u64; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; - type AccountStore = StorageMapShim< - super::Account, - system::CallOnCreatedAccount, - system::CallKillAccount, - u64, super::AccountData - >; + type AccountStore = + StorageMapShim, system::Provider, u64, super::AccountData>; type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } @@ -127,10 +108,7 @@ pub struct ExtBuilder { } impl Default for ExtBuilder { fn default() -> Self { - Self { - existential_deposit: 1, - monied: false, - } + Self { existential_deposit: 1, monied: false } } } impl ExtBuilder { @@ -151,19 +129,21 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { self.set_associated_consts(); let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { + pallet_balances::GenesisConfig:: { balances: if self.monied { vec![ (1, 10 * self.existential_deposit), (2, 20 * self.existential_deposit), (3, 30 * self.existential_deposit), (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit) + (12, 10 * self.existential_deposit), ] } else { vec![] }, - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); @@ -171,38 +151,37 @@ impl ExtBuilder { } } -decl_tests!{ Test, ExtBuilder, EXISTENTIAL_DEPOSIT } +decl_tests! { Test, ExtBuilder, EXISTENTIAL_DEPOSIT } #[test] fn emit_events_with_no_existential_deposit_suicide_with_dust() { - ::default() - .existential_deposit(0) - .build() - .execute_with(|| { - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); - - assert_eq!( - events(), - [ - Event::system(system::RawEvent::NewAccount(1)), - Event::balances(RawEvent::Endowed(1, 100)), - Event::balances(RawEvent::BalanceSet(1, 100, 0)), - ] - ); - - let _ = Balances::slash(&1, 99); - - // no events - assert_eq!(events(), []); - - assert_ok!(System::suicide(Origin::signed(1))); - - assert_eq!( - events(), - [ - Event::balances(RawEvent::DustLost(1, 1)), - Event::system(system::RawEvent::KilledAccount(1)) - ] - ); - }); + ::default().existential_deposit(2).build().execute_with(|| { + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); + + assert_eq!( + events(), + [ + Event::System(system::Event::NewAccount(1)), + Event::Balances(crate::Event::Endowed(1, 100)), + Event::Balances(crate::Event::BalanceSet(1, 100, 0)), + ] + ); + + let res = Balances::slash(&1, 98); + assert_eq!(res, (NegativeImbalance::new(98), 0)); + + // no events + assert_eq!(events(), []); + + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); + + assert_eq!( + events(), + [ + Event::System(system::Event::KilledAccount(1)), + Event::Balances(crate::Event::DustLost(1, 1)), + ] + ); + }); } diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs new file mode 100644 index 0000000000000..9c7ba3e1ec824 --- /dev/null +++ b/frame/balances/src/tests_reentrancy.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test setup for potential reentracy and lost updates of nested mutations. + +#![cfg(test)] + +use crate::{self as pallet_balances, Config, Pallet}; +use frame_support::{parameter_types, traits::StorageMapShim, weights::IdentityFee}; +use pallet_transaction_payment::CurrencyAdapter; +use sp_core::H256; +use sp_io; +use sp_runtime::{testing::Header, traits::IdentityLookup}; + +use crate::*; +use frame_support::{ + assert_ok, + traits::{Currency, ReservableCurrency}, +}; +use frame_system::RawOrigin; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); + pub static ExistentialDeposit: u64 = 0; +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +parameter_types! { + pub const TransactionByteFee: u64 = 1; +} +impl pallet_transaction_payment::Config for Test { + type OnChargeTransaction = CurrencyAdapter, ()>; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +pub struct OnDustRemoval; +impl OnUnbalanced> for OnDustRemoval { + fn on_nonzero_unbalanced(amount: NegativeImbalance) { + assert_ok!(Balances::resolve_into_existing(&1, amount)); + } +} +parameter_types! { + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 2; +} +impl Config for Test { + type Balance = u64; + type DustRemoval = OnDustRemoval; + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = + StorageMapShim, system::Provider, u64, super::AccountData>; + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type WeightInfo = (); +} + +pub struct ExtBuilder { + existential_deposit: u64, +} +impl Default for ExtBuilder { + fn default() -> Self { + Self { existential_deposit: 1 } + } +} +impl ExtBuilder { + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + + pub fn set_associated_consts(&self) { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); + } + + pub fn build(self) -> sp_io::TestExternalities { + self.set_associated_consts(); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![] } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} + +#[test] +fn transfer_dust_removal_tst1_should_work() { + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); + + // In this transaction, account 2 free balance + // drops below existential balance + // and dust balance is removed from account 2 + assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 3, 450)); + + // As expected dust balance is removed. + assert_eq!(Balances::free_balance(&2), 0); + + // As expected beneficiary account 3 + // received the transfered fund. + assert_eq!(Balances::free_balance(&3), 450); + + // Dust balance is deposited to account 1 + // during the process of dust removal. + assert_eq!(Balances::free_balance(&1), 1050); + + // Verify the events + // Number of events expected is 8 + assert_eq!(System::events().len(), 11); + + System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 3, 450))); + System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); + }); +} + +#[test] +fn transfer_dust_removal_tst2_should_work() { + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); + + // In this transaction, account 2 free balance + // drops below existential balance + // and dust balance is removed from account 2 + assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 1, 450)); + + // As expected dust balance is removed. + assert_eq!(Balances::free_balance(&2), 0); + + // Dust balance is deposited to account 1 + // during the process of dust removal. + assert_eq!(Balances::free_balance(&1), 1500); + + // Verify the events + // Number of events expected is 8 + assert_eq!(System::events().len(), 9); + + System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 1, 450))); + System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); + }); +} + +#[test] +fn repatriating_reserved_balance_dust_removal_should_work() { + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); + + // Reserve a value on account 2, + // Such that free balance is lower than + // Exestintial deposit. + assert_ok!(Balances::reserve(&2, 450)); + + // Transfer of reserved fund from slashed account 2 to + // beneficiary account 1 + assert_ok!(Balances::repatriate_reserved(&2, &1, 450, Status::Free), 0); + + // Since free balance of account 2 is lower than + // existential deposit, dust amount is + // removed from the account 2 + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(Balances::free_balance(2), 0); + + // account 1 is credited with reserved amount + // together with dust balance during dust + // removal. + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(1), 1500); + + // Verify the events + // Number of events expected is 10 + assert_eq!(System::events().len(), 10); + + System::assert_has_event(Event::Balances(crate::Event::ReserveRepatriated( + 2, + 1, + 450, + Status::Free, + ))); + + System::assert_last_event(Event::Balances(crate::Event::DustLost(2, 50))); + }); +} diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs new file mode 100644 index 0000000000000..6f333bfc0500f --- /dev/null +++ b/frame/balances/src/weights.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_balances +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-09-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_balances +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/balances/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_balances. +pub trait WeightInfo { + fn transfer() -> Weight; + fn transfer_keep_alive() -> Weight; + fn set_balance_creating() -> Weight; + fn set_balance_killing() -> Weight; + fn force_transfer() -> Weight; + fn transfer_all() -> Weight; + fn force_unreserve() -> Weight; +} + +/// Weights for pallet_balances using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: System Account (r:1 w:1) + fn transfer() -> Weight { + (70_952_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn transfer_keep_alive() -> Weight { + (54_410_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn set_balance_creating() -> Weight { + (29_176_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn set_balance_killing() -> Weight { + (35_214_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:2 w:2) + fn force_transfer() -> Weight { + (71_780_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn transfer_all() -> Weight { + (66_475_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn force_unreserve() -> Weight { + (27_766_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: System Account (r:1 w:1) + fn transfer() -> Weight { + (70_952_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn transfer_keep_alive() -> Weight { + (54_410_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn set_balance_creating() -> Weight { + (29_176_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn set_balance_killing() -> Weight { + (35_214_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:2 w:2) + fn force_transfer() -> Weight { + (71_780_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn transfer_all() -> Weight { + (66_475_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn force_unreserve() -> Weight { + (27_766_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 924ffc8627abc..ea690d966c979 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,29 +13,34 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -linregress = "0.1" -paste = "0.1" -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-api = { version = "2.0.0", path = "../../primitives/api", default-features = false } -sp-runtime-interface = { version = "2.0.0", path = "../../primitives/runtime-interface", default-features = false } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime", default-features = false } -sp-std = { version = "2.0.0", path = "../../primitives/std", default-features = false } -sp-io = { version = "2.0.0", path = "../../primitives/io", default-features = false } -sp-storage = { version = "2.0.0", path = "../../primitives/storage", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +linregress = { version = "0.4.3", optional = true } +paste = "1.0" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api", default-features = false } +sp-runtime-interface = { version = "4.0.0-dev", path = "../../primitives/runtime-interface", default-features = false } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../../primitives/std", default-features = false } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io", default-features = false } +sp-storage = { version = "4.0.0-dev", path = "../../primitives/storage", default-features = false } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] hex-literal = "0.3.1" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", + "scale-info/std", "sp-runtime-interface/std", "sp-runtime/std", "sp-api/std", "sp-std/std", "frame-support/std", "frame-system/std", + "linregress", + "log/std", ] diff --git a/frame/benchmarking/README.md b/frame/benchmarking/README.md index bf4bf951aa2b2..38c683cb8db5b 100644 --- a/frame/benchmarking/README.md +++ b/frame/benchmarking/README.md @@ -43,7 +43,7 @@ The benchmarking framework comes with the following tools: * [A set of macros](./src/lib.rs) (`benchmarks!`, `add_benchmark!`, etc...) to make it easy to write, test, and add runtime benchmarks. * [A set of linear regression analysis functions](./src/analysis.rs) for processing benchmark data. -* [A CLI extension](../../utils/benchmarking-cli/) to make it easy to execute benchmarks on your +* [A CLI extension](../../utils/frame/benchmarking-cli/) to make it easy to execute benchmarks on your node. The end-to-end benchmarking pipeline is disabled by default when compiling a node. If you want to @@ -116,10 +116,15 @@ need to move into your node's binary folder. For example, with the Substrate rep you would test the Balances pallet's benchmarks: ```bash -cd bin/node/cli cargo test -p pallet-balances --features runtime-benchmarks ``` +> NOTE: Substrate uses a virtual workspace which does not allow you to compile with feature flags. +> ``` +> error: --features is not allowed in the root of a virtual workspace` +> ``` +> To solve this, navigate to the folder of the node (`cd bin/node/cli`) or pallet (`cd frame/pallet`) and run the command there. + ## Adding Benchmarks The benchmarks included with each pallet are not automatically added to your node. To actually @@ -163,14 +168,14 @@ Then you can run a benchmark like so: ```bash ./target/release/substrate benchmark \ - --chain dev \ # Configurable Chain Spec - --execution=wasm \ # Always test with Wasm - --wasm-execution=compiled \ # Always used `wasm-time` - --pallet pallet_balances \ # Select the pallet - --extrinsic transfer \ # Select the extrinsic - --steps 50 \ # Number of samples across component ranges - --repeat 20 \ # Number of times we repeat a benchmark - --output \ # Output benchmark results into a Rust file + --chain dev \ # Configurable Chain Spec + --execution=wasm \ # Always test with Wasm + --wasm-execution=compiled \ # Always used `wasm-time` + --pallet pallet_balances \ # Select the pallet + --extrinsic transfer \ # Select the extrinsic + --steps 50 \ # Number of samples across component ranges + --repeat 20 \ # Number of times we repeat a benchmark + --output \ # Output benchmark results into a folder or file ``` This will output a file `pallet_name.rs` which implements the `WeightInfo` trait you should include @@ -179,6 +184,19 @@ implementation of the `WeightInfo` trait. This means that you will be able to us Substrate pallets while still keeping your network safe for your specific configuration and requirements. +The benchmarking CLI uses a Handlebars template to format the final output file. You can optionally +pass the flag `--template` pointing to a custom template that can be used instead. Within the +template, you have access to all the data provided by the `TemplateData` struct in the +[benchmarking CLI writer](../../utils/frame/benchmarking-cli/src/writer.rs). You can find the +default template used [here](../../utils/frame/benchmarking-cli/src/template.hbs). + +There are some custom Handlebars helpers included with our output generation: + +* `underscore`: Add an underscore to every 3rd character from the right of a string. Primarily to be +used for delimiting large numbers. +* `join`: Join an array of strings into a space-separated string for the template. Primarily to be +used for joining all the arguments passed to the CLI. + To get a full list of available options when running benchmarks, run: ```bash diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index dafb4a74b669f..2bb20ebe2e7f8 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,12 @@ //! Tools for analyzing the benchmark results. +use crate::BenchmarkResult; +use core::convert::TryFrom; +use linregress::{FormulaRegressionBuilder, RegressionDataBuilder}; use std::collections::BTreeMap; -use linregress::{FormulaRegressionBuilder, RegressionDataBuilder, RegressionModel}; -use crate::BenchmarkResults; + +pub use linregress::RegressionModel; pub struct Analysis { pub base: u128, @@ -29,27 +32,65 @@ pub struct Analysis { pub model: Option, } +#[derive(Clone, Copy)] pub enum BenchmarkSelector { ExtrinsicTime, StorageRootTime, Reads, Writes, + ProofSize, +} + +#[derive(Debug)] +pub enum AnalysisChoice { + /// Use minimum squares regression for analyzing the benchmarking results. + MinSquares, + /// Use median slopes for analyzing the benchmarking results. + MedianSlopes, + /// Use the maximum values among all other analysis functions for the benchmarking results. + Max, +} + +impl Default for AnalysisChoice { + fn default() -> Self { + AnalysisChoice::MinSquares + } +} + +impl TryFrom> for AnalysisChoice { + type Error = &'static str; + + fn try_from(s: Option) -> Result { + match s { + None => Ok(AnalysisChoice::default()), + Some(i) => match &i[..] { + "min-squares" | "min_squares" => Ok(AnalysisChoice::MinSquares), + "median-slopes" | "median_slopes" => Ok(AnalysisChoice::MedianSlopes), + "max" => Ok(AnalysisChoice::Max), + _ => Err("invalid analysis string"), + }, + } + } } impl Analysis { - // Useful for when there are no components, and we just need an median value of the benchmark results. - // Note: We choose the median value because it is more robust to outliers. - fn median_value(r: &Vec, selector: BenchmarkSelector) -> Option { - if r.is_empty() { return None } + // Useful for when there are no components, and we just need an median value of the benchmark + // results. Note: We choose the median value because it is more robust to outliers. + fn median_value(r: &Vec, selector: BenchmarkSelector) -> Option { + if r.is_empty() { + return None + } - let mut values: Vec = r.iter().map(|result| - match selector { + let mut values: Vec = r + .iter() + .map(|result| match selector { BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, BenchmarkSelector::StorageRootTime => result.storage_root_time, BenchmarkSelector::Reads => result.reads.into(), BenchmarkSelector::Writes => result.writes.into(), - } - ).collect(); + BenchmarkSelector::ProofSize => result.proof_size.into(), + }) + .collect(); values.sort(); let mid = values.len() / 2; @@ -63,64 +104,81 @@ impl Analysis { }) } - pub fn median_slopes(r: &Vec, selector: BenchmarkSelector) -> Option { - if r[0].components.is_empty() { return Self::median_value(r, selector) } + pub fn median_slopes(r: &Vec, selector: BenchmarkSelector) -> Option { + if r[0].components.is_empty() { + return Self::median_value(r, selector) + } - let results = r[0].components.iter().enumerate().map(|(i, &(param, _))| { - let mut counted = BTreeMap::, usize>::new(); - for result in r.iter() { - let mut p = result.components.iter().map(|x| x.1).collect::>(); - p[i] = 0; - *counted.entry(p).or_default() += 1; - } - let others: Vec = counted.iter().max_by_key(|i| i.1).expect("r is not empty; qed").0.clone(); - let values = r.iter() - .filter(|v| - v.components.iter() - .map(|x| x.1) - .zip(others.iter()) - .enumerate() - .all(|(j, (v1, v2))| j == i || v1 == *v2) - ).map(|result| { - // Extract the data we are interested in analyzing - let data = match selector { - BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, - BenchmarkSelector::StorageRootTime => result.storage_root_time, - BenchmarkSelector::Reads => result.reads.into(), - BenchmarkSelector::Writes => result.writes.into(), - }; - (result.components[i].1, data) - }) - .collect::>(); - (format!("{:?}", param), i, others, values) - }).collect::>(); - - let models = results.iter().map(|(_, _, _, ref values)| { - let mut slopes = vec![]; - for (i, &(x1, y1)) in values.iter().enumerate() { - for &(x2, y2) in values.iter().skip(i + 1) { - if x1 != x2 { - slopes.push((y1 as f64 - y2 as f64) / (x1 as f64 - x2 as f64)); + let results = r[0] + .components + .iter() + .enumerate() + .map(|(i, &(param, _))| { + let mut counted = BTreeMap::, usize>::new(); + for result in r.iter() { + let mut p = result.components.iter().map(|x| x.1).collect::>(); + p[i] = 0; + *counted.entry(p).or_default() += 1; + } + let others: Vec = + counted.iter().max_by_key(|i| i.1).expect("r is not empty; qed").0.clone(); + let values = r + .iter() + .filter(|v| { + v.components + .iter() + .map(|x| x.1) + .zip(others.iter()) + .enumerate() + .all(|(j, (v1, v2))| j == i || v1 == *v2) + }) + .map(|result| { + // Extract the data we are interested in analyzing + let data = match selector { + BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, + BenchmarkSelector::StorageRootTime => result.storage_root_time, + BenchmarkSelector::Reads => result.reads.into(), + BenchmarkSelector::Writes => result.writes.into(), + BenchmarkSelector::ProofSize => result.proof_size.into(), + }; + (result.components[i].1, data) + }) + .collect::>(); + (format!("{:?}", param), i, others, values) + }) + .collect::>(); + + let models = results + .iter() + .map(|(_, _, _, ref values)| { + let mut slopes = vec![]; + for (i, &(x1, y1)) in values.iter().enumerate() { + for &(x2, y2) in values.iter().skip(i + 1) { + if x1 != x2 { + slopes.push((y1 as f64 - y2 as f64) / (x1 as f64 - x2 as f64)); + } } } - } - slopes.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); - let slope = slopes[slopes.len() / 2]; + slopes.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); + let slope = slopes[slopes.len() / 2]; - let mut offsets = vec![]; - for &(x, y) in values.iter() { - offsets.push(y as f64 - slope * x as f64); - } - offsets.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); - let offset = offsets[offsets.len() / 2]; + let mut offsets = vec![]; + for &(x, y) in values.iter() { + offsets.push(y as f64 - slope * x as f64); + } + offsets.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); + let offset = offsets[offsets.len() / 2]; - (offset, slope) - }).collect::>(); + (offset, slope) + }) + .collect::>(); - let models = models.iter() + let models = models + .iter() .zip(results.iter()) .map(|((offset, slope), (_, i, others, _))| { - let over = others.iter() + let over = others + .iter() .enumerate() .filter(|(j, _)| j != i) .map(|(j, v)| models[j].1 * *v as f64) @@ -141,18 +199,21 @@ impl Analysis { }) } - pub fn min_squares_iqr(r: &Vec, selector: BenchmarkSelector) -> Option { - if r[0].components.is_empty() { return Self::median_value(r, selector) } + pub fn min_squares_iqr(r: &Vec, selector: BenchmarkSelector) -> Option { + if r[0].components.is_empty() { + return Self::median_value(r, selector) + } let mut results = BTreeMap::, Vec>::new(); for result in r.iter() { let p = result.components.iter().map(|x| x.1).collect::>(); results.entry(p).or_default().push(match selector { - BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, - BenchmarkSelector::StorageRootTime => result.storage_root_time, - BenchmarkSelector::Reads => result.reads.into(), - BenchmarkSelector::Writes => result.writes.into(), - }) + BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, + BenchmarkSelector::StorageRootTime => result.storage_root_time, + BenchmarkSelector::Reads => result.reads.into(), + BenchmarkSelector::Writes => result.writes.into(), + BenchmarkSelector::ProofSize => result.proof_size.into(), + }) } for (_, rs) in results.iter_mut() { @@ -161,21 +222,19 @@ impl Analysis { *rs = rs[ql..rs.len() - ql].to_vec(); } - let mut data = vec![("Y", results.iter().flat_map(|x| x.1.iter().map(|v| *v as f64)).collect())]; + let mut data = + vec![("Y", results.iter().flat_map(|x| x.1.iter().map(|v| *v as f64)).collect())]; let names = r[0].components.iter().map(|x| format!("{:?}", x.0)).collect::>(); - data.extend(names.iter() - .enumerate() - .map(|(i, p)| ( + data.extend(names.iter().enumerate().map(|(i, p)| { + ( p.as_str(), - results.iter() - .flat_map(|x| Some(x.0[i] as f64) - .into_iter() - .cycle() - .take(x.1.len()) - ).collect::>() - )) - ); + results + .iter() + .flat_map(|x| Some(x.0[i] as f64).into_iter().cycle().take(x.1.len())) + .collect::>(), + ) + })); let data = RegressionDataBuilder::new().build_from(data).ok()?; @@ -185,25 +244,31 @@ impl Analysis { .fit() .ok()?; - let slopes = model.parameters.regressor_values.iter() + let slopes = model + .parameters + .regressor_values + .iter() .enumerate() .map(|(_, x)| (*x + 0.5) as u128) .collect(); - let value_dists = results.iter().map(|(p, vs)| { - // Avoid divide by zero - if vs.len() == 0 { return (p.clone(), 0, 0) } - let total = vs.iter() - .fold(0u128, |acc, v| acc + *v); - let mean = total / vs.len() as u128; - let sum_sq_diff = vs.iter() - .fold(0u128, |acc, v| { + let value_dists = results + .iter() + .map(|(p, vs)| { + // Avoid divide by zero + if vs.len() == 0 { + return (p.clone(), 0, 0) + } + let total = vs.iter().fold(0u128, |acc, v| acc + *v); + let mean = total / vs.len() as u128; + let sum_sq_diff = vs.iter().fold(0u128, |acc, v| { let d = mean.max(*v) - mean.min(*v); acc + d * d }); - let stddev = (sum_sq_diff as f64 / vs.len() as f64).sqrt() as u128; - (p.clone(), mean, stddev) - }).collect::>(); + let stddev = (sum_sq_diff as f64 / vs.len() as f64).sqrt() as u128; + (p.clone(), mean, stddev) + }) + .collect::>(); Some(Self { base: (model.parameters.intercept_value + 0.5) as u128, @@ -213,6 +278,37 @@ impl Analysis { model: Some(model), }) } + + pub fn max(r: &Vec, selector: BenchmarkSelector) -> Option { + let median_slopes = Self::median_slopes(r, selector); + let min_squares = Self::min_squares_iqr(r, selector); + + if median_slopes.is_none() || min_squares.is_none() { + return None + } + + let median_slopes = median_slopes.unwrap(); + let min_squares = min_squares.unwrap(); + + let base = median_slopes.base.max(min_squares.base); + let slopes = median_slopes + .slopes + .into_iter() + .zip(min_squares.slopes.into_iter()) + .map(|(a, b): (u128, u128)| a.max(b)) + .collect::>(); + // components should always be in the same order + median_slopes + .names + .iter() + .zip(min_squares.names.iter()) + .for_each(|(a, b)| assert!(a == b, "benchmark results not in the same order")); + let names = median_slopes.names; + let value_dists = min_squares.value_dists; + let model = min_squares.model; + + Some(Self { base, slopes, names, value_dists, model }) + } } fn ms(mut nanos: u128) -> String { @@ -220,7 +316,7 @@ fn ms(mut nanos: u128) -> String { while x > 1 { if nanos > x * 1_000 { nanos = nanos / x * x; - break; + break } x /= 10; } @@ -231,19 +327,35 @@ impl std::fmt::Display for Analysis { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { if let Some(ref value_dists) = self.value_dists { writeln!(f, "\nData points distribution:")?; - writeln!(f, "{} mean µs sigma µs %", self.names.iter().map(|p| format!("{:>5}", p)).collect::>().join(" "))?; + writeln!( + f, + "{} mean µs sigma µs %", + self.names.iter().map(|p| format!("{:>5}", p)).collect::>().join(" ") + )?; for (param_values, mean, sigma) in value_dists.iter() { if *mean == 0 { - writeln!(f, "{} {:>8} {:>8} {:>3}.{}%", - param_values.iter().map(|v| format!("{:>5}", v)).collect::>().join(" "), + writeln!( + f, + "{} {:>8} {:>8} {:>3}.{}%", + param_values + .iter() + .map(|v| format!("{:>5}", v)) + .collect::>() + .join(" "), ms(*mean), ms(*sigma), "?", "?" )?; } else { - writeln!(f, "{} {:>8} {:>8} {:>3}.{}%", - param_values.iter().map(|v| format!("{:>5}", v)).collect::>().join(" "), + writeln!( + f, + "{} {:>8} {:>8} {:>3}.{}%", + param_values + .iter() + .map(|v| format!("{:>5}", v)) + .collect::>() + .join(" "), ms(*mean), ms(*sigma), (sigma * 100 / mean), @@ -275,7 +387,7 @@ impl std::fmt::Debug for Analysis { for (&m, n) in self.slopes.iter().zip(self.names.iter()) { write!(f, " + ({} * {})", m, n)?; } - write!(f,"") + write!(f, "") } } @@ -290,8 +402,8 @@ mod tests { storage_root_time: u128, reads: u32, writes: u32, - ) -> BenchmarkResults { - BenchmarkResults { + ) -> BenchmarkResult { + BenchmarkResult { components, extrinsic_time, storage_root_time, @@ -299,23 +411,74 @@ mod tests { repeat_reads: 0, writes, repeat_writes: 0, + proof_size: 0, + keys: vec![], } } #[test] fn analysis_median_slopes_should_work() { let data = vec![ - benchmark_result(vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0, 3, 10), - benchmark_result(vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0, 4, 10), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0, 5, 10), - benchmark_result(vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0, 6, 10), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0, 5, 2), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0, 5, 6), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0, 5, 14), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0, 5, 20), + benchmark_result( + vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], + 11_500_000, + 0, + 3, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], + 12_500_000, + 0, + 4, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], + 13_500_000, + 0, + 5, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], + 14_500_000, + 0, + 6, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], + 13_100_000, + 0, + 5, + 2, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], + 13_300_000, + 0, + 5, + 6, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], + 13_700_000, + 0, + 5, + 14, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], + 14_000_000, + 0, + 5, + 20, + ), ]; - let extrinsic_time = Analysis::median_slopes(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); + let extrinsic_time = + Analysis::median_slopes(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); assert_eq!(extrinsic_time.base, 10_000_000); assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); @@ -331,17 +494,66 @@ mod tests { #[test] fn analysis_median_min_squares_should_work() { let data = vec![ - benchmark_result(vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0, 3, 10), - benchmark_result(vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0, 4, 10), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0, 5, 10), - benchmark_result(vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0, 6, 10), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0, 5, 2), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0, 5, 6), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0, 5, 14), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0, 5, 20), + benchmark_result( + vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], + 11_500_000, + 0, + 3, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], + 12_500_000, + 0, + 4, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], + 13_500_000, + 0, + 5, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], + 14_500_000, + 0, + 6, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], + 13_100_000, + 0, + 5, + 2, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], + 13_300_000, + 0, + 5, + 6, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], + 13_700_000, + 0, + 5, + 14, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], + 14_000_000, + 0, + 5, + 20, + ), ]; - let extrinsic_time = Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); + let extrinsic_time = + Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); assert_eq!(extrinsic_time.base, 10_000_000); assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index b189cdb6e705e..6c124a8a75761 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,20 +19,41 @@ #![cfg_attr(not(feature = "std"), no_std)] -mod tests; -mod utils; #[cfg(feature = "std")] mod analysis; +#[cfg(test)] +mod tests; +#[cfg(test)] +mod tests_instance; +mod utils; -pub use utils::*; #[cfg(feature = "std")] -pub use analysis::{Analysis, BenchmarkSelector}; +pub use analysis::{Analysis, AnalysisChoice, BenchmarkSelector, RegressionModel}; #[doc(hidden)] -pub use sp_io::storage::root as storage_root; -pub use sp_runtime::traits::Zero; pub use frame_support; +#[doc(hidden)] +pub use log; +#[doc(hidden)] pub use paste; +#[doc(hidden)] +pub use sp_io::storage::root as storage_root; +#[doc(hidden)] +pub use sp_runtime::traits::Zero; +#[doc(hidden)] +pub use sp_std::{self, boxed::Box, prelude::Vec, str, vec}; +#[doc(hidden)] pub use sp_storage::TrackedStorageKey; +pub use utils::*; + +/// Whitelist the given account. +#[macro_export] +macro_rules! whitelist { + ($acc:ident) => { + frame_benchmarking::benchmarking::add_to_whitelist( + frame_system::Account::::hashed_key_for(&$acc).into(), + ); + }; +} /// Construct pallet benchmarks for weighing dispatchables. /// @@ -68,10 +89,6 @@ pub use sp_storage::TrackedStorageKey; /// for arbitrary expresions to be evaluated in a benchmark (including for example, /// `on_initialize`). /// -/// The macro allows for common parameters whose ranges and instancing expressions may be drawn upon -/// (or not) by each arm. Syntax is available to allow for only the range to be drawn upon if -/// desired, allowing an alternative instancing expression to be given. -/// /// Note that the ranges are *inclusive* on both sides. This is in contrast to ranges in Rust which /// are left-inclusive right-exclusive. /// @@ -80,39 +97,29 @@ pub use sp_storage::TrackedStorageKey; /// at any time. Local variables are shared between the two pre- and post- code blocks, but do not /// leak from the interior of any instancing expressions. /// -/// Any common parameters that are unused in an arm do not have their instancing expressions -/// evaluated. -/// /// Example: /// ```ignore /// benchmarks! { /// where_clause { where T::A: From } // Optional line to give additional bound on `T`. /// -/// // common parameter; just one for this example. -/// // will be `1`, `MAX_LENGTH` or any value inbetween -/// _ { -/// let l in 1 .. MAX_LENGTH => initialize_l(l); -/// } -/// /// // first dispatchable: foo; this is a user dispatchable and operates on a `u8` vector of -/// // size `l`, which we allow to be initialized as usual. +/// // size `l` /// foo { /// let caller = account::(b"caller", 0, benchmarks_seed); -/// let l = ...; +/// let l in 1 .. MAX_LENGTH => initialize_l(l); /// }: _(Origin::Signed(caller), vec![0u8; l]) /// /// // second dispatchable: bar; this is a root dispatchable and accepts a `u8` vector of size -/// // `l`. We don't want it pre-initialized like before so we override using the `=> ()` notation. +/// // `l`. /// // In this case, we explicitly name the call using `bar` instead of `_`. /// bar { -/// let l = _ .. _ => (); +/// let l in 1 .. MAX_LENGTH => initialize_l(l); /// }: bar(Origin::Root, vec![0u8; l]) /// /// // third dispatchable: baz; this is a user dispatchable. It isn't dependent on length like the /// // other two but has its own complexity `c` that needs setting up. It uses `caller` (in the /// // pre-instancing block) within the code block. This is only allowed in the param instancers -/// // of arms. Instancers of common params cannot optimistically draw upon hypothetical variables -/// // that the arm's pre-instancing code block might have declared. +/// // of arms. /// baz1 { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let c = 0 .. 10 => setup_c(&caller, c); @@ -136,8 +143,8 @@ pub use sp_storage::TrackedStorageKey; /// ``` /// /// Test functions are automatically generated for each benchmark and are accessible to you when you -/// run `cargo test`. All tests are named `test_benchmark_`, expect you to pass them -/// the Runtime Trait, and run them in a test externalities environment. The test function runs your +/// run `cargo test`. All tests are named `test_benchmark_`, implemented on the +/// Pallet struct, and run them in a test externalities environment. The test function runs your /// benchmark just like a regular benchmark, but only testing at the lowest and highest values for /// each component. The function will return `Ok(())` if the benchmarks return no errors. /// @@ -166,28 +173,22 @@ pub use sp_storage::TrackedStorageKey; /// #[test] /// fn test_benchmarks() { /// new_test_ext().execute_with(|| { -/// assert_ok!(test_benchmark_dummy::()); -/// assert_err!(test_benchmark_other_name::(), "Bad origin"); -/// assert_ok!(test_benchmark_sort_vector::()); -/// assert_err!(test_benchmark_broken_benchmark::(), "You forgot to sort!"); +/// assert_ok!(Pallet::::test_benchmark_dummy()); +/// assert_err!(Pallet::::test_benchmark_other_name(), "Bad origin"); +/// assert_ok!(Pallet::::test_benchmark_sort_vector()); +/// assert_err!(Pallet::::test_benchmark_broken_benchmark(), "You forgot to sort!"); /// }); /// } /// ``` #[macro_export] macro_rules! benchmarks { ( - $( where_clause { where $( $where_ty:ty: $where_bound:path ),* $(,)? } )? - _ { - $( - let $common:ident in $common_from:tt .. $common_to:expr => $common_instancer:expr; - )* - } $( $rest:tt )* ) => { $crate::benchmarks_iter!( { } - { $( $( $where_ty: $where_bound ),* )? } - { $( { $common , $common_from , $common_to , $common_instancer } )* } + { } + ( ) ( ) ( ) $( $rest )* @@ -196,21 +197,36 @@ macro_rules! benchmarks { } /// Same as [`benchmarks`] but for instantiable module. +/// +/// NOTE: For pallet declared with [`frame_support::pallet`], use [`benchmarks_instance_pallet`]. #[macro_export] macro_rules! benchmarks_instance { ( - $( where_clause { where $( $where_ty:ty: $where_bound:path ),* $(,)? } )? - _ { - $( - let $common:ident in $common_from:tt .. $common_to:expr => $common_instancer:expr; - )* - } $( $rest:tt )* ) => { $crate::benchmarks_iter!( - { I } - { $( $( $where_ty: $where_bound ),* )? } - { $( { $common , $common_from , $common_to , $common_instancer } )* } + { I: Instance } + { } + ( ) + ( ) + ( ) + $( $rest )* + ); + } +} + +/// Same as [`benchmarks`] but for instantiable pallet declared [`frame_support::pallet`]. +/// +/// NOTE: For pallet declared with `decl_module!`, use [`benchmarks_instance`]. +#[macro_export] +macro_rules! benchmarks_instance_pallet { + ( + $( $rest:tt )* + ) => { + $crate::benchmarks_iter!( + { I: 'static } + { } + ( ) ( ) ( ) $( $rest )* @@ -221,44 +237,84 @@ macro_rules! benchmarks_instance { #[macro_export] #[doc(hidden)] macro_rules! benchmarks_iter { - // detect and extract extra tag: + // detect and extract where clause: + ( + { $( $instance:ident: $instance_bound:tt )? } + { $( $where_clause:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + where_clause { where $( $where_bound:tt )* } + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $( $instance: $instance_bound)? } + { $( $where_bound )* } + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) + $( $rest )* + } + }; + // detect and extract `#[skip_meta]` tag: ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + #[skip_meta] + $name:ident + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $( $instance: $instance_bound )? } + { $( $where_clause )* } + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* $name ) + $name + $( $rest )* + } + }; + // detect and extract `#[extra] tag: + ( + { $( $instance:ident: $instance_bound:tt )? } + { $( $where_clause:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) #[extra] $name:ident $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* $name ) + ( $( $names_skip_meta )* ) $name $( $rest )* } }; // mutation arm: ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) // This contains $( $( { $instance } )? $name:ident )* ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) $name { $( $code )* }: $name ( $origin $( , $arg )* ) verify $postcode $( $rest )* @@ -266,48 +322,64 @@ macro_rules! benchmarks_iter { }; // mutation arm: ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* ) => { - $crate::benchmarks_iter! { - { $( $instance)? } - { $( $where_clause )* } - { $( $common )* } - ( $( $names )* ) - ( $( $names_extra )* ) - $name { $( $code )* }: { - < - Call as $crate::frame_support::traits::UnfilteredDispatchable - >::dispatch_bypass_filter( - Call::::$dispatch($($arg),*), $origin.into() - )?; + $crate::paste::paste! { + $crate::benchmarks_iter! { + { $( $instance: $instance_bound )? } + { $( $where_clause )* } + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) + $name { + $( $code )* + let __call = Call::< + T + $( , $instance )? + >:: [< new_call_variant_ $dispatch >] ( + $($arg),* + ); + let __benchmarked_call_encoded = $crate::frame_support::codec::Encode::encode( + &__call + ); + }: { + let call_decoded = < + Call + as $crate::frame_support::codec::Decode + >::decode(&mut &__benchmarked_call_encoded[..]) + .expect("call is encoded above, encoding must be correct"); + + < + Call as $crate::frame_support::traits::UnfilteredDispatchable + >::dispatch_bypass_filter(call_decoded, $origin.into())?; + } + verify $postcode + $( $rest )* } - verify $postcode - $( $rest )* } }; // iteration arm: ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) $name:ident { $( $code:tt )* }: $eval:block verify $postcode:block $( $rest:tt )* ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } - { $( $common )* } { } { $eval } { $( $code )* } @@ -317,55 +389,56 @@ macro_rules! benchmarks_iter { #[cfg(test)] $crate::impl_benchmark_test!( { $( $where_clause )* } - { $( $instance)? } + { $( $instance: $instance_bound )? } $name ); $crate::benchmarks_iter!( - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* { $( $instance )? } $name ) ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) $( $rest )* ); }; // iteration-exit arm ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) ) => { $crate::selected_benchmark!( { $( $where_clause)* } - { $( $instance)? } + { $( $instance: $instance_bound )? } $( $names )* ); $crate::impl_benchmark!( { $( $where_clause )* } - { $( $instance)? } + { $( $instance: $instance_bound )? } ( $( $names )* ) ( $( $names_extra ),* ) + ( $( $names_skip_meta ),* ) ); }; // add verify block to _() format ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) $name { $( $code )* }: _ ( $origin $( , $arg )* ) verify { } $( $rest )* @@ -373,20 +446,20 @@ macro_rules! benchmarks_iter { }; // add verify block to name() format ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) $name { $( $code )* }: $dispatch ( $origin $( , $arg )* ) verify { } $( $rest )* @@ -394,20 +467,20 @@ macro_rules! benchmarks_iter { }; // add verify block to {} format ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } - { $( $common:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) $name:ident { $( $code:tt )* }: $eval:block $( $rest:tt )* ) => { $crate::benchmarks_iter!( - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } - { $( $common )* } ( $( $names )* ) ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) $name { $( $code )* }: $eval verify { } $( $rest )* @@ -420,10 +493,9 @@ macro_rules! benchmarks_iter { macro_rules! benchmark_backend { // parsing arms ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( PRE { $( $pre_parsed:tt )* } )* } { $eval:block } { @@ -433,10 +505,9 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } - { $( $common )* } { $( PRE { $( $pre_parsed )* } )* PRE { $pre_id , $pre_ty , $pre_ex } @@ -447,10 +518,9 @@ macro_rules! benchmark_backend { } }; ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { @@ -460,10 +530,9 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } - { $( $common )* } { $( $parsed )* PARAM { $param , $param_from , $param_to , $param_instancer } @@ -473,74 +542,11 @@ macro_rules! benchmark_backend { $postcode } }; - // mutation arm to look after defaulting to a common param - ( - { $( $instance:ident )? } - $name:ident - { $( $where_clause:tt )* } - { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } - { $( $parsed:tt )* } - { $eval:block } - { - let $param:ident in ...; - $( $rest:tt )* - } - $postcode:block - ) => { - $crate::benchmark_backend! { - { $( $instance)? } - $name - { $( $where_clause )* } - { $( { $common , $common_from , $common_to , $common_instancer } )* } - { $( $parsed )* } - { $eval } - { - let $param - in ({ $( let $common = $common_from; )* $param }) - .. ({ $( let $common = $common_to; )* $param }) - => ({ $( let $common = || -> Result<(), &'static str> { $common_instancer ; Ok(()) }; )* $param()? }); - $( $rest )* - } - $postcode - } - }; - // mutation arm to look after defaulting only the range to common param - ( - { $( $instance:ident )? } - $name:ident - { $( $where_clause:tt )* } - { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } - { $( $parsed:tt )* } - { $eval:block } - { - let $param:ident in _ .. _ => $param_instancer:expr ; - $( $rest:tt )* - } - $postcode:block - ) => { - $crate::benchmark_backend! { - { $( $instance)? } - $name - { $( $where_clause )* } - { $( { $common , $common_from , $common_to , $common_instancer } )* } - { $( $parsed )* } - { $eval } - { - let $param - in ({ $( let $common = $common_from; )* $param }) - .. ({ $( let $common = $common_to; )* $param }) - => $param_instancer ; - $( $rest )* - } - $postcode - } - }; // mutation arm to look after a single tt for param_from. ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { @@ -550,10 +556,9 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } - { $( $common )* } { $( $parsed )* } { $eval } { @@ -565,10 +570,9 @@ macro_rules! benchmark_backend { }; // mutation arm to look after the default tail of `=> ()` ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { @@ -578,10 +582,9 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } - { $( $common )* } { $( $parsed )* } { $eval } { @@ -593,10 +596,9 @@ macro_rules! benchmark_backend { }; // mutation arm to look after `let _ =` ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } - { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { @@ -606,10 +608,9 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } - { $( $common )* } { $( $parsed )* } { $eval } { @@ -621,10 +622,9 @@ macro_rules! benchmark_backend { }; // actioning arm ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } - { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } { $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* @@ -636,12 +636,12 @@ macro_rules! benchmark_backend { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] - impl, I: Instance)? > + impl, $instance: $instance_bound )? > $crate::BenchmarkingSetup for $name where $( $where_clause )* { - fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { - vec! [ + fn components(&self) -> $crate::Vec<($crate::BenchmarkParameter, u32, u32)> { + $crate::vec! [ $( ($crate::BenchmarkParameter::$param, $param_from, $param_to) ),* @@ -652,10 +652,7 @@ macro_rules! benchmark_backend { &self, components: &[($crate::BenchmarkParameter, u32)], verify: bool - ) -> Result Result<(), &'static str>>, &'static str> { - $( - let $common = $common_from; - )* + ) -> Result<$crate::Box Result<(), $crate::BenchmarkError>>, $crate::BenchmarkError> { $( // Prepare instance let $param = components.iter() @@ -669,7 +666,7 @@ macro_rules! benchmark_backend { $( $param_instancer ; )* $( $post )* - Ok(Box::new(move || -> Result<(), &'static str> { + Ok($crate::Box::new(move || -> Result<(), $crate::BenchmarkError> { $eval; if verify { $postcode; @@ -686,7 +683,7 @@ macro_rules! benchmark_backend { // Every variant must implement [`BenchmarkingSetup`]. // // ```nocompile -// +// // struct Transfer; // impl BenchmarkingSetup for Transfer { ... } // @@ -700,7 +697,7 @@ macro_rules! benchmark_backend { macro_rules! selected_benchmark { ( { $( $where_clause:tt )* } - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $( { $( $bench_inst:ident )? } $bench:ident )* ) => { // The list of available benchmarks for this pallet. @@ -710,11 +707,11 @@ macro_rules! selected_benchmark { } // Allow us to select a benchmark from the list of available benchmarks. - impl, I: Instance )? > + impl, $instance: $instance_bound )? > $crate::BenchmarkingSetup for SelectedBenchmark where $( $where_clause )* { - fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { + fn components(&self) -> $crate::Vec<($crate::BenchmarkParameter, u32, u32)> { match self { $( Self::$bench => < @@ -728,7 +725,7 @@ macro_rules! selected_benchmark { &self, components: &[($crate::BenchmarkParameter, u32)], verify: bool - ) -> Result Result<(), &'static str>>, &'static str> { + ) -> Result<$crate::Box Result<(), $crate::BenchmarkError>>, $crate::BenchmarkError> { match self { $( Self::$bench => < @@ -746,191 +743,173 @@ macro_rules! selected_benchmark { macro_rules! impl_benchmark { ( { $( $where_clause:tt )* } - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } ( $( { $( $name_inst:ident )? } $name:ident )* ) ( $( $name_extra:ident ),* ) + ( $( $name_skip_meta:ident ),* ) ) => { - impl, I: Instance)? > - $crate::Benchmarking<$crate::BenchmarkResults> for Module - where T: frame_system::Trait, $( $where_clause )* + impl, $instance: $instance_bound )? > + $crate::Benchmarking for Pallet + where T: frame_system::Config, $( $where_clause )* { - fn benchmarks(extra: bool) -> Vec<&'static [u8]> { - let mut all = vec![ $( stringify!($name).as_ref() ),* ]; + fn benchmarks(extra: bool) -> $crate::Vec<$crate::BenchmarkMetadata> { + let mut all_names = $crate::vec![ $( stringify!($name).as_ref() ),* ]; if !extra { let extra = [ $( stringify!($name_extra).as_ref() ),* ]; - all.retain(|x| !extra.contains(x)); + all_names.retain(|x| !extra.contains(x)); } - all + all_names.into_iter().map(|benchmark| { + let selected_benchmark = match benchmark { + $( stringify!($name) => SelectedBenchmark::$name, )* + _ => panic!("all benchmarks should be selectable"), + }; + let components = < + SelectedBenchmark as $crate::BenchmarkingSetup + >::components(&selected_benchmark); + + $crate::BenchmarkMetadata { + name: benchmark.as_bytes().to_vec(), + components, + } + }).collect::<$crate::Vec<_>>() } fn run_benchmark( extrinsic: &[u8], - lowest_range_values: &[u32], - highest_range_values: &[u32], - steps: &[u32], - repeat: u32, + c: &[($crate::BenchmarkParameter, u32)], whitelist: &[$crate::TrackedStorageKey], verify: bool, - ) -> Result, &'static str> { + internal_repeats: u32, + ) -> Result<$crate::Vec<$crate::BenchmarkResult>, $crate::BenchmarkError> { // Map the input to the selected benchmark. - let extrinsic = sp_std::str::from_utf8(extrinsic) + let extrinsic = $crate::str::from_utf8(extrinsic) .map_err(|_| "`extrinsic` is not a valid utf8 string!")?; let selected_benchmark = match extrinsic { $( stringify!($name) => SelectedBenchmark::$name, )* - _ => return Err("Could not find extrinsic."), + _ => return Err("Could not find extrinsic.".into()), }; - let mut results: Vec<$crate::BenchmarkResults> = Vec::new(); - if repeat == 0 { - return Ok(results); - } // Add whitelist to DB including whitelisted caller let mut whitelist = whitelist.to_vec(); let whitelisted_caller_key = - as frame_support::storage::StorageMap<_,_>>::hashed_key_for( + as $crate::frame_support::storage::StorageMap<_,_>>::hashed_key_for( $crate::whitelisted_caller::() ); whitelist.push(whitelisted_caller_key.into()); $crate::benchmarking::set_whitelist(whitelist); - // Warm up the DB - $crate::benchmarking::commit_db(); - $crate::benchmarking::wipe_db(); - - let components = < - SelectedBenchmark as $crate::BenchmarkingSetup - >::components(&selected_benchmark); - - // Default number of steps for a component. - let mut prev_steps = 10; + let mut results: $crate::Vec<$crate::BenchmarkResult> = $crate::Vec::new(); - let repeat_benchmark = | - repeat: u32, - c: &[($crate::BenchmarkParameter, u32)], - results: &mut Vec<$crate::BenchmarkResults>, - verify: bool, - | -> Result<(), &'static str> { - // Run the benchmark `repeat` times. - for _ in 0..repeat { - // Set up the externalities environment for the setup we want to - // benchmark. - let closure_to_benchmark = < - SelectedBenchmark as $crate::BenchmarkingSetup - >::instance(&selected_benchmark, c, verify)?; + // Always do at least one internal repeat... + for _ in 0 .. internal_repeats.max(1) { + // Set up the externalities environment for the setup we want to + // benchmark. + let closure_to_benchmark = < + SelectedBenchmark as $crate::BenchmarkingSetup + >::instance(&selected_benchmark, c, verify)?; - // Set the block number to at least 1 so events are deposited. - if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { - frame_system::Module::::set_block_number(1.into()); - } - - // Commit the externalities to the database, flushing the DB cache. - // This will enable worst case scenario for reading from the database. - $crate::benchmarking::commit_db(); - - // Reset the read/write counter so we don't count operations in the setup process. - $crate::benchmarking::reset_read_write_count(); - - if verify { - closure_to_benchmark()?; - } else { - // Time the extrinsic logic. - frame_support::debug::trace!( - target: "benchmark", - "Start Benchmark: {:?}", c - ); - - let start_extrinsic = $crate::benchmarking::current_time(); + // Set the block number to at least 1 so events are deposited. + if $crate::Zero::is_zero(&frame_system::Pallet::::block_number()) { + frame_system::Pallet::::set_block_number(1u32.into()); + } - closure_to_benchmark()?; + // Commit the externalities to the database, flushing the DB cache. + // This will enable worst case scenario for reading from the database. + $crate::benchmarking::commit_db(); - let finish_extrinsic = $crate::benchmarking::current_time(); - let elapsed_extrinsic = finish_extrinsic - start_extrinsic; - // Commit the changes to get proper write count - $crate::benchmarking::commit_db(); - frame_support::debug::trace!( - target: "benchmark", - "End Benchmark: {} ns", elapsed_extrinsic - ); - let read_write_count = $crate::benchmarking::read_write_count(); - frame_support::debug::trace!( - target: "benchmark", - "Read/Write Count {:?}", read_write_count - ); + // Reset the read/write counter so we don't count operations in the setup process. + $crate::benchmarking::reset_read_write_count(); - // Time the storage root recalculation. - let start_storage_root = $crate::benchmarking::current_time(); - $crate::storage_root(); - let finish_storage_root = $crate::benchmarking::current_time(); - let elapsed_storage_root = finish_storage_root - start_storage_root; + // Time the extrinsic logic. + $crate::log::trace!( + target: "benchmark", + "Start Benchmark: {:?}", c + ); - results.push($crate::BenchmarkResults { - components: c.to_vec(), - extrinsic_time: elapsed_extrinsic, - storage_root_time: elapsed_storage_root, - reads: read_write_count.0, - repeat_reads: read_write_count.1, - writes: read_write_count.2, - repeat_writes: read_write_count.3, - }); - } + let start_pov = $crate::benchmarking::proof_size(); + let start_extrinsic = $crate::benchmarking::current_time(); - // Wipe the DB back to the genesis state. - $crate::benchmarking::wipe_db(); - } + closure_to_benchmark()?; - Ok(()) - }; + let finish_extrinsic = $crate::benchmarking::current_time(); + let end_pov = $crate::benchmarking::proof_size(); - if components.is_empty() { - if verify { - // If `--verify` is used, run the benchmark once to verify it would complete. - repeat_benchmark(1, Default::default(), &mut Vec::new(), true)?; - } - repeat_benchmark(repeat, Default::default(), &mut results, false)?; - } else { - // Select the component we will be benchmarking. Each component will be benchmarked. - for (idx, (name, low, high)) in components.iter().enumerate() { - // Get the number of steps for this component. - let steps = steps.get(idx).cloned().unwrap_or(prev_steps); - prev_steps = steps; + // Calculate the diff caused by the benchmark. + let elapsed_extrinsic = finish_extrinsic.saturating_sub(start_extrinsic); + let diff_pov = match (start_pov, end_pov) { + (Some(start), Some(end)) => end.saturating_sub(start), + _ => Default::default(), + }; - // Skip this loop if steps is zero - if steps == 0 { continue } + // Commit the changes to get proper write count + $crate::benchmarking::commit_db(); + $crate::log::trace!( + target: "benchmark", + "End Benchmark: {} ns", elapsed_extrinsic + ); + let read_write_count = $crate::benchmarking::read_write_count(); + $crate::log::trace!( + target: "benchmark", + "Read/Write Count {:?}", read_write_count + ); - let lowest = lowest_range_values.get(idx).cloned().unwrap_or(*low); - let highest = highest_range_values.get(idx).cloned().unwrap_or(*high); + // Time the storage root recalculation. + let start_storage_root = $crate::benchmarking::current_time(); + $crate::storage_root(); + let finish_storage_root = $crate::benchmarking::current_time(); + let elapsed_storage_root = finish_storage_root - start_storage_root; - let diff = highest - lowest; + let skip_meta = [ $( stringify!($name_skip_meta).as_ref() ),* ]; + let read_and_written_keys = if skip_meta.contains(&extrinsic) { + $crate::vec![(b"Skipped Metadata".to_vec(), 0, 0, false)] + } else { + $crate::benchmarking::get_read_and_written_keys() + }; - // Create up to `STEPS` steps for that component between high and low. - let step_size = (diff / steps).max(1); - let num_of_steps = diff / step_size + 1; + results.push($crate::BenchmarkResult { + components: c.to_vec(), + extrinsic_time: elapsed_extrinsic, + storage_root_time: elapsed_storage_root, + reads: read_write_count.0, + repeat_reads: read_write_count.1, + writes: read_write_count.2, + repeat_writes: read_write_count.3, + proof_size: diff_pov, + keys: read_and_written_keys, + }); - for s in 0..num_of_steps { - // This is the value we will be testing for component `name` - let component_value = lowest + step_size * s; + // Wipe the DB back to the genesis state. + $crate::benchmarking::wipe_db(); + } - // Select the max value for all the other components. - let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() - .enumerate() - .map(|(idx, (n, _, h))| - if n == name { - (*n, component_value) - } else { - (*n, *highest_range_values.get(idx).unwrap_or(h)) - } - ) - .collect(); + return Ok(results); + } + } - if verify { - // If `--verify` is used, run the benchmark once to verify it would complete. - repeat_benchmark(1, &c, &mut Vec::new(), true)?; - } - repeat_benchmark(repeat, &c, &mut results, false)?; - } - } + #[cfg(test)] + impl, $instance: $instance_bound )? > + Pallet + where T: frame_system::Config, $( $where_clause )* + { + /// Test a particular benchmark by name. + /// + /// This isn't called `test_benchmark_by_name` just in case some end-user eventually + /// writes a benchmark, itself called `by_name`; the function would be shadowed in + /// that case. + /// + /// This is generally intended to be used by child test modules such as those created + /// by the `impl_benchmark_test_suite` macro. However, it is not an error if a pallet + /// author chooses not to implement benchmarks. + #[allow(unused)] + fn test_bench_by_name(name: &[u8]) -> Result<(), $crate::BenchmarkError> { + let name = $crate::str::from_utf8(name) + .map_err(|_| -> $crate::BenchmarkError { "`name` is not a valid utf8 string!".into() })?; + match name { + $( stringify!($name) => { + $crate::paste::paste! { Self::[< test_benchmark_ $name >]() } + } )* + _ => Err("Could not find test for requested benchmark.".into()), } - return Ok(results); } } }; @@ -944,68 +923,371 @@ macro_rules! impl_benchmark { macro_rules! impl_benchmark_test { ( { $( $where_clause:tt )* } - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident ) => { $crate::paste::item! { - fn [] () -> Result<(), &'static str> - where T: frame_system::Trait, $( $where_clause )* + #[cfg(test)] + impl, $instance: $instance_bound )? > + Pallet + where T: frame_system::Config, $( $where_clause )* { - let selected_benchmark = SelectedBenchmark::$name; - let components = < - SelectedBenchmark as $crate::BenchmarkingSetup - >::components(&selected_benchmark); - - let execute_benchmark = | - c: Vec<($crate::BenchmarkParameter, u32)> - | -> Result<(), &'static str> { - // Set up the benchmark, return execution + verification function. - let closure_to_verify = < + #[allow(unused)] + fn [] () -> Result<(), $crate::BenchmarkError> { + let selected_benchmark = SelectedBenchmark::$name; + let components = < SelectedBenchmark as $crate::BenchmarkingSetup - >::instance(&selected_benchmark, &c, true)?; + >::components(&selected_benchmark); - // Set the block number to at least 1 so events are deposited. - if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { - frame_system::Module::::set_block_number(1.into()); - } + let execute_benchmark = | + c: $crate::Vec<($crate::BenchmarkParameter, u32)> + | -> Result<(), $crate::BenchmarkError> { + // Set up the benchmark, return execution + verification function. + let closure_to_verify = < + SelectedBenchmark as $crate::BenchmarkingSetup + >::instance(&selected_benchmark, &c, true)?; - // Run execution + verification - closure_to_verify()?; + // Set the block number to at least 1 so events are deposited. + if $crate::Zero::is_zero(&frame_system::Pallet::::block_number()) { + frame_system::Pallet::::set_block_number(1u32.into()); + } - // Reset the state - $crate::benchmarking::wipe_db(); + // Run execution + verification + closure_to_verify()?; - Ok(()) - }; + // Reset the state + $crate::benchmarking::wipe_db(); - if components.is_empty() { - execute_benchmark(Default::default())?; - } else { - for (_, (name, low, high)) in components.iter().enumerate() { - // Test only the low and high value, assuming values in the middle won't break - for component_value in vec![low, high] { - // Select the max value for all the other components. - let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() - .enumerate() - .map(|(_, (n, _, h))| - if n == name { - (*n, *component_value) - } else { - (*n, *h) - } - ) - .collect(); + Ok(()) + }; + + if components.is_empty() { + execute_benchmark(Default::default())?; + } else { + for (name, low, high) in components.iter() { + // Test only the low and high value, assuming values in the middle + // won't break + for component_value in $crate::vec![low, high] { + // Select the max value for all the other components. + let c: $crate::Vec<($crate::BenchmarkParameter, u32)> = components + .iter() + .map(|(n, _, h)| + if n == name { + (*n, *component_value) + } else { + (*n, *h) + } + ) + .collect(); - execute_benchmark(c)?; + execute_benchmark(c)?; + } } } + Ok(()) } - Ok(()) } } }; } +/// This creates a test suite which runs the module's benchmarks. +/// +/// When called in `pallet_example` as +/// +/// ```rust,ignore +/// impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); +/// ``` +/// +/// It expands to the equivalent of: +/// +/// ```rust,ignore +/// #[cfg(test)] +/// mod tests { +/// use super::*; +/// use crate::tests::{new_test_ext, Test}; +/// use frame_support::assert_ok; +/// +/// #[test] +/// fn test_benchmarks() { +/// new_test_ext().execute_with(|| { +/// assert_ok!(test_benchmark_accumulate_dummy::()); +/// assert_ok!(test_benchmark_set_dummy::()); +/// assert_ok!(test_benchmark_another_set_dummy::()); +/// assert_ok!(test_benchmark_sort_vector::()); +/// }); +/// } +/// } +/// ``` +/// +/// ## Arguments +/// +/// The first argument, `module`, must be the path to this crate's module. +/// +/// The second argument, `new_test_ext`, must be a function call which returns either a +/// `sp_io::TestExternalities`, or some other type with a similar interface. +/// +/// Note that this function call is _not_ evaluated at compile time, but is instead copied textually +/// into each appropriate invocation site. +/// +/// The third argument, `test`, must be the path to the runtime. The item to which this must refer +/// will generally take the form: +/// +/// ```rust,ignore +/// frame_support::construct_runtime!( +/// pub enum Test where ... +/// { ... } +/// ); +/// ``` +/// +/// There is an optional fourth argument, with keyword syntax: `benchmarks_path = +/// path_to_benchmarks_invocation`. In the typical case in which this macro is in the same module as +/// the `benchmarks!` invocation, you don't need to supply this. However, if the +/// `impl_benchmark_test_suite!` invocation is in a different module than the `benchmarks!` +/// invocation, then you should provide the path to the module containing the `benchmarks!` +/// invocation: +/// +/// ```rust,ignore +/// mod benches { +/// benchmarks!{ +/// ... +/// } +/// } +/// +/// mod tests { +/// // because of macro syntax limitations, neither Pallet nor benches can be paths, but both have +/// // to be idents in the scope of `impl_benchmark_test_suite`. +/// use crate::{benches, Pallet}; +/// +/// impl_benchmark_test_suite!(Pallet, new_test_ext(), Test, benchmarks_path = benches); +/// +/// // new_test_ext and the Test item are defined later in this module +/// } +/// ``` +/// +/// There is an optional fifth argument, with keyword syntax: `extra = true` or `extra = false`. +/// By default, this generates a test suite which iterates over all benchmarks, including those +/// marked with the `#[extra]` annotation. Setting `extra = false` excludes those. +/// +/// There is an optional sixth argument, with keyword syntax: `exec_name = custom_exec_name`. +/// By default, this macro uses `execute_with` for this parameter. This argument, if set, is subject +/// to these restrictions: +/// +/// - It must be the name of a method applied to the output of the `new_test_ext` argument. +/// - That method must have a signature capable of receiving a single argument of the form `impl +/// FnOnce()`. +// ## Notes (not for rustdoc) +// +// The biggest challenge for this macro is communicating the actual test functions to be run. We +// can't just build an array of function pointers to each test function and iterate over it, because +// the test functions are parameterized by the `Test` type. That's incompatible with +// monomorphization: if it were legal, then even if the compiler detected and monomorphized the +// functions into only the types of the callers, which implementation would the function pointer +// point to? There would need to be some kind of syntax for selecting the destination of the pointer +// according to a generic argument, and in general it would be a huge mess and not worth it. +// +// Instead, we're going to steal a trick from `fn run_benchmark`: generate a function which is +// itself parametrized by `Test`, which accepts a `&[u8]` parameter containing the name of the +// benchmark, and dispatches based on that to the appropriate real test implementation. Then, we can +// just iterate over the `Benchmarking::benchmarks` list to run the actual implementations. +#[macro_export] +macro_rules! impl_benchmark_test_suite { + // user might or might not have set some keyword arguments; set the defaults + // + // The weird syntax indicates that `rest` comes only after a comma, which is otherwise optional + ( + $bench_module:ident, + $new_test_ext:expr, + $test:path + $(, $( $rest:tt )* )? + ) => { + $crate::impl_benchmark_test_suite!( + @selected: + $bench_module, + $new_test_ext, + $test, + benchmarks_path = super, + extra = true, + exec_name = execute_with, + @user: + $( $( $rest )* )? + ); + }; + // pick off the benchmarks_path keyword argument + ( + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $old:ident, + extra = $extra:expr, + exec_name = $exec_name:ident, + @user: + benchmarks_path = $benchmarks_path:ident + $(, $( $rest:tt )* )? + ) => { + $crate::impl_benchmark_test_suite!( + @selected: + $bench_module, + $new_test_ext, + $test, + benchmarks_path = $benchmarks_path, + extra = $extra, + exec_name = $exec_name, + @user: + $( $( $rest )* )? + ); + }; + // pick off the extra keyword argument + ( + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $benchmarks_path:ident, + extra = $old:expr, + exec_name = $exec_name:ident, + @user: + extra = $extra:expr + $(, $( $rest:tt )* )? + ) => { + $crate::impl_benchmark_test_suite!( + @selected: + $bench_module, + $new_test_ext, + $test, + benchmarks_path = $benchmarks_path, + extra = $extra, + exec_name = $exec_name, + @user: + $( $( $rest )* )? + ); + }; + // pick off the exec_name keyword argument + ( + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $benchmarks_path:ident, + extra = $extra:expr, + exec_name = $old:ident, + @user: + exec_name = $exec_name:ident + $(, $( $rest:tt )* )? + ) => { + $crate::impl_benchmark_test_suite!( + @selected: + $bench_module, + $new_test_ext, + $test, + benchmarks_path = $benchmarks_path, + extra = $extra, + exec_name = $exec_name, + @user: + $( $( $rest )* )? + ); + }; + // all options set; nothing else in user-provided keyword arguments + ( + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $path_to_benchmarks_invocation:ident, + extra = $extra:expr, + exec_name = $exec_name:ident, + @user: + $(,)? + ) => { + #[cfg(test)] + mod benchmark_tests { + use super::$bench_module; + + #[test] + fn test_benchmarks() { + $new_test_ext.$exec_name(|| { + use $crate::Benchmarking; + + let mut anything_failed = false; + println!("failing benchmark tests:"); + for benchmark_metadata in $bench_module::<$test>::benchmarks($extra) { + let benchmark_name = &benchmark_metadata.name; + match std::panic::catch_unwind(|| { + $bench_module::<$test>::test_bench_by_name(benchmark_name) + }) { + Err(err) => { + println!( + "{}: {:?}", + $crate::str::from_utf8(benchmark_name) + .expect("benchmark name is always a valid string!"), + err, + ); + anything_failed = true; + }, + Ok(Err(err)) => { + match err { + $crate::BenchmarkError::Stop(err) => { + println!( + "{}: {:?}", + $crate::str::from_utf8(benchmark_name) + .expect("benchmark name is always a valid string!"), + err, + ); + anything_failed = true; + }, + $crate::BenchmarkError::Override(_) => { + // This is still considered a success condition. + $crate::log::error!( + "WARNING: benchmark error overrided - {}", + $crate::str::from_utf8(benchmark_name) + .expect("benchmark name is always a valid string!"), + ); + }, + $crate::BenchmarkError::Skip => { + // This is considered a success condition. + $crate::log::error!( + "WARNING: benchmark error skipped - {}", + $crate::str::from_utf8(benchmark_name) + .expect("benchmark name is always a valid string!"), + ); + } + } + }, + Ok(Ok(())) => (), + } + } + assert!(!anything_failed); + }); + } + } + }; +} + +/// show error message and debugging info for the case of an error happening +/// during a benchmark +pub fn show_benchmark_debug_info( + instance_string: &[u8], + benchmark: &[u8], + components: &[(BenchmarkParameter, u32)], + verify: &bool, + error_message: &str, +) -> sp_runtime::RuntimeString { + sp_runtime::format_runtime_string!( + "\n* Pallet: {}\n\ + * Benchmark: {}\n\ + * Components: {:?}\n\ + * Verify: {:?}\n\ + * Error message: {}", + sp_std::str::from_utf8(instance_string) + .expect("it's all just strings ran through the wasm interface. qed"), + sp_std::str::from_utf8(benchmark) + .expect("it's all just strings ran through the wasm interface. qed"), + components, + verify, + error_message, + ) +} /// This macro adds pallet benchmarks to a `Vec` object. /// @@ -1016,8 +1298,8 @@ macro_rules! impl_benchmark_test { /// ``` /// /// The `whitelist` is a parameter you pass to control the DB read/write tracking. -/// We use a vector of [TrackedStorageKey](./struct.TrackedStorageKey.html), which is a simple struct used to set -/// if a key has been read or written to. +/// We use a vector of [TrackedStorageKey](./struct.TrackedStorageKey.html), which is a simple +/// struct used to set if a key has been read or written to. /// /// For values that should be skipped entirely, we can just pass `key.into()`. For example: /// @@ -1052,53 +1334,123 @@ macro_rules! impl_benchmark_test { /// ``` /// /// At the end of `dispatch_benchmark`, you should return this batches object. +/// +/// In the case where you have multiple instances of a pallet that you need to separately benchmark, +/// the name of your module struct will be used as a suffix to your outputted weight file. For +/// example: +/// +/// ```ignore +/// add_benchmark!(params, batches, pallet_balances, Balances); // pallet_balances.rs +/// add_benchmark!(params, batches, pallet_collective, Council); // pallet_collective_council.rs +/// add_benchmark!(params, batches, pallet_collective, TechnicalCommittee); // pallet_collective_technical_committee.rs +/// ``` +/// +/// You can manipulate this suffixed string by using a type alias if needed. For example: +/// +/// ```ignore +/// type Council2 = TechnicalCommittee; +/// add_benchmark!(params, batches, pallet_collective, Council2); // pallet_collective_council_2.rs +/// ``` + #[macro_export] macro_rules! add_benchmark { - ( $params:ident, $batches:ident, $name:ident, $( $location:tt )* ) => ( + ( $params:ident, $batches:ident, $name:path, $( $location:tt )* ) => ( let name_string = stringify!($name).as_bytes(); + let instance_string = stringify!( $( $location )* ).as_bytes(); let (config, whitelist) = $params; let $crate::BenchmarkConfig { pallet, benchmark, - lowest_range_values, - highest_range_values, - steps, - repeat, + selected_components, verify, - extra, + internal_repeats, } = config; - if &pallet[..] == &name_string[..] || &pallet[..] == &b"*"[..] { - if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { - for benchmark in $( $location )*::benchmarks(*extra).into_iter() { - $batches.push($crate::BenchmarkBatch { - results: $( $location )*::run_benchmark( - benchmark, - &lowest_range_values[..], - &highest_range_values[..], - &steps[..], - *repeat, - whitelist, - *verify, - )?, - pallet: name_string.to_vec(), - benchmark: benchmark.to_vec(), - }); + if &pallet[..] == &name_string[..] { + let benchmark_result = $( $location )*::run_benchmark( + &benchmark[..], + &selected_components[..], + whitelist, + *verify, + *internal_repeats, + ); + + let final_results = match benchmark_result { + Ok(results) => Some(results), + Err($crate::BenchmarkError::Override(mut result)) => { + // Insert override warning as the first storage key. + $crate::log::error!( + "WARNING: benchmark error overrided - {}", + $crate::str::from_utf8(benchmark) + .expect("benchmark name is always a valid string!") + ); + result.keys.insert(0, + (b"Benchmark Override".to_vec(), 0, 0, false) + ); + Some($crate::vec![result]) + }, + Err($crate::BenchmarkError::Stop(e)) => { + $crate::show_benchmark_debug_info( + instance_string, + benchmark, + selected_components, + verify, + e, + ); + return Err(e.into()); + }, + Err($crate::BenchmarkError::Skip) => { + $crate::log::error!( + "WARNING: benchmark error skipped - {}", + $crate::str::from_utf8(benchmark) + .expect("benchmark name is always a valid string!") + ); + None } - } else { + }; + + if let Some(final_results) = final_results { $batches.push($crate::BenchmarkBatch { - results: $( $location )*::run_benchmark( - &benchmark[..], - &lowest_range_values[..], - &highest_range_values[..], - &steps[..], - *repeat, - whitelist, - *verify, - )?, pallet: name_string.to_vec(), + instance: instance_string.to_vec(), benchmark: benchmark.clone(), + results: final_results, }); } } ) } + +/// This macro allows users to easily generate a list of benchmarks for the pallets configured +/// in the runtime. +/// +/// To use this macro, first create a an object to store the list: +/// +/// ```ignore +/// let mut list = Vec::::new(); +/// ``` +/// +/// Then pass this `list` to the macro, along with the `extra` boolean, the pallet crate, and +/// pallet struct: +/// +/// ```ignore +/// list_benchmark!(list, extra, pallet_balances, Balances); +/// list_benchmark!(list, extra, pallet_session, SessionBench::); +/// list_benchmark!(list, extra, frame_system, SystemBench::); +/// ``` +/// +/// This should match what exists with the `add_benchmark!` macro. + +#[macro_export] +macro_rules! list_benchmark { + ( $list:ident, $extra:ident, $name:path, $( $location:tt )* ) => ( + let pallet_string = stringify!($name).as_bytes(); + let instance_string = stringify!( $( $location )* ).as_bytes(); + let benchmarks = $( $location )*::benchmarks($extra); + let pallet_benchmarks = BenchmarkList { + pallet: pallet_string.to_vec(), + instance: instance_string.to_vec(), + benchmarks: benchmarks.to_vec(), + }; + $list.push(pallet_benchmarks) + ) +} diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 0429d98e18618..a2cf381e6ecf8 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,241 +20,335 @@ #![cfg(test)] use super::*; -use sp_std::prelude::*; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::{H256, Header}}; -use frame_support::{ - dispatch::DispatchResult, - decl_module, decl_storage, impl_outer_origin, assert_ok, assert_err, ensure +use frame_support::parameter_types; +use sp_runtime::{ + testing::{Header, H256}, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -use frame_system::{RawOrigin, ensure_signed, ensure_none}; +use sp_std::prelude::*; -decl_storage! { - trait Store for Module as Test where - ::OtherEvent: Into<::Event> - { - Value get(fn value): Option; +#[frame_support::pallet] +mod pallet_test { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type LowerBound: Get; + type UpperBound: Get; + type MaybeItem: Get>; } -} -decl_module! { - pub struct Module for enum Call where - origin: T::Origin, ::OtherEvent: Into<::Event> - { - #[weight = 0] - fn set_value(origin, n: u32) -> DispatchResult { - let _sender = ensure_signed(origin)?; - Value::put(n); + #[pallet::storage] + #[pallet::getter(fn heartbeat_after)] + pub(crate) type Value = StorageValue<_, u32, OptionQuery>; + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { + let _sender = frame_system::ensure_signed(origin)?; + Value::::put(n); Ok(()) } - #[weight = 0] - fn dummy(origin, _n: u32) -> DispatchResult { - let _sender = ensure_none(origin)?; + #[pallet::weight(0)] + pub fn dummy(origin: OriginFor, _n: u32) -> DispatchResult { + let _sender = frame_system::ensure_none(origin)?; Ok(()) } - } -} - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} -pub trait OtherTrait { - type OtherEvent; + #[pallet::weight(0)] + pub fn always_error(_origin: OriginFor) -> DispatchResult { + return Err("I always fail".into()) + } + } } -pub trait Trait: frame_system::Trait + OtherTrait - where Self::OtherEvent: Into<::Event> -{ - type Event; -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -#[derive(Clone, Eq, PartialEq)] -pub struct Test; +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + TestPallet: pallet_test::{Pallet, Call, Storage}, + } +); -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); - type MaximumBlockLength = (); - type AvailableBlockRatio = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl Trait for Test { - type Event = (); +parameter_types! { + pub const LowerBound: u32 = 1; + pub const UpperBound: u32 = 100; + pub const MaybeItem: Option = None; } -impl OtherTrait for Test { - type OtherEvent = (); +impl pallet_test::Config for Test { + type LowerBound = LowerBound; + type UpperBound = UpperBound; + type MaybeItem = MaybeItem; } fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() + GenesisConfig::default().build_storage().unwrap().into() } -benchmarks!{ - where_clause { where ::OtherEvent: Into<::Event> } - - _ { - // Define a common range for `b`. - let b in 1 .. 1000 => (); - } - - set_value { - let b in ...; - let caller = account::("caller", 0, 0); - }: _ (RawOrigin::Signed(caller), b.into()) - verify { - assert_eq!(Value::get(), Some(b)); - } +mod benchmarks { + use super::{new_test_ext, pallet_test::Value, Test}; + use crate::{account, BenchmarkError, BenchmarkParameter, BenchmarkResult, BenchmarkingSetup}; + use frame_support::{assert_err, assert_ok, ensure, traits::Get}; + use frame_system::RawOrigin; + use sp_std::prelude::*; - other_name { - let b in ...; - }: dummy (RawOrigin::None, b.into()) + // Additional used internally by the benchmark macro. + use super::pallet_test::{Call, Config, Pallet}; - sort_vector { - let x in 1 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); + crate::benchmarks! { + where_clause { + where + crate::tests::Origin: From::AccountId>>, } - }: { - m.sort(); - } verify { - ensure!(m[0] == 0, "You forgot to sort!") - } - bad_origin { - let b in ...; - let caller = account::("caller", 0, 0); - }: dummy (RawOrigin::Signed(caller), b.into()) + set_value { + let b in 1 .. 1000; + let caller = account::("caller", 0, 0); + }: _ (RawOrigin::Signed(caller), b.into()) + verify { + assert_eq!(Value::::get(), Some(b)); + } - bad_verify { - let x in 1 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); + other_name { + let b in 1 .. 1000; + }: dummy (RawOrigin::None, b.into()) + + sort_vector { + let x in 1 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { + m.sort(); + } verify { + ensure!(m[0] == 0, "You forgot to sort!") } - }: { } - verify { - ensure!(m[0] == 0, "You forgot to sort!") - } - no_components { - let caller = account::("caller", 0, 0); - }: set_value(RawOrigin::Signed(caller), 0) -} + bad_origin { + let b in 1 .. 1000; + let caller = account::("caller", 0, 0); + }: dummy (RawOrigin::Signed(caller), b.into()) + + bad_verify { + let x in 1 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { } + verify { + ensure!(m[0] == 0, "You forgot to sort!") + } -#[test] -fn benchmarks_macro_works() { - // Check benchmark creation for `set_value`. - let selected = SelectedBenchmark::set_value; + no_components { + let caller = account::("caller", 0, 0); + }: set_value(RawOrigin::Signed(caller), 0) + + variable_components { + let b in ( T::LowerBound::get() ) .. T::UpperBound::get(); + }: dummy (RawOrigin::None, b.into()) + + #[extra] + extra_benchmark { + let b in 1 .. 1000; + let caller = account::("caller", 0, 0); + }: set_value(RawOrigin::Signed(caller), b.into()) + verify { + assert_eq!(Value::::get(), Some(b)); + } - let components = >::components(&selected); - assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + #[skip_meta] + skip_meta_benchmark { + let b in 1 .. 1000; + let caller = account::("caller", 0, 0); + }: set_value(RawOrigin::Signed(caller), b.into()) + verify { + assert_eq!(Value::::get(), Some(b)); + } - let closure = >::instance( - &selected, - &[(BenchmarkParameter::b, 1)], - true, - ).expect("failed to create closure"); + override_benchmark { + let b in 1 .. 1000; + let caller = account::("caller", 0, 0); + }: { + Err(BenchmarkError::Override( + BenchmarkResult { + extrinsic_time: 1_234_567_890, + reads: 1337, + writes: 420, + ..Default::default() + } + ))?; + } - new_test_ext().execute_with(|| { - assert_ok!(closure()); - }); -} + skip_benchmark { + let value = T::MaybeItem::get().ok_or(BenchmarkError::Skip)?; + }: { + // This should never be reached. + assert!(value > 100); + } + } -#[test] -fn benchmarks_macro_rename_works() { - // Check benchmark creation for `other_dummy`. - let selected = SelectedBenchmark::other_name; - let components = >::components(&selected); - assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + #[test] + fn benchmarks_macro_works() { + // Check benchmark creation for `set_value`. + let selected = SelectedBenchmark::set_value; - let closure = >::instance( - &selected, - &[(BenchmarkParameter::b, 1)], - true, - ).expect("failed to create closure"); + let components = >::components(&selected); + assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); - new_test_ext().execute_with(|| { - assert_ok!(closure()); - }); -} - -#[test] -fn benchmarks_macro_works_for_non_dispatchable() { - let selected = SelectedBenchmark::sort_vector; + let closure = >::instance( + &selected, + &[(BenchmarkParameter::b, 1)], + true, + ) + .expect("failed to create closure"); - let components = >::components(&selected); - assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]); + new_test_ext().execute_with(|| { + assert_ok!(closure()); + }); + } - let closure = >::instance( - &selected, - &[(BenchmarkParameter::x, 1)], - true, - ).expect("failed to create closure"); + #[test] + fn benchmarks_macro_rename_works() { + // Check benchmark creation for `other_dummy`. + let selected = SelectedBenchmark::other_name; + let components = >::components(&selected); + assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + + let closure = >::instance( + &selected, + &[(BenchmarkParameter::b, 1)], + true, + ) + .expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_ok!(closure()); + }); + } - assert_ok!(closure()); -} + #[test] + fn benchmarks_macro_works_for_non_dispatchable() { + let selected = SelectedBenchmark::sort_vector; -#[test] -fn benchmarks_macro_verify_works() { - // Check postcondition for benchmark `set_value` is valid. - let selected = SelectedBenchmark::set_value; + let components = >::components(&selected); + assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]); - let closure = >::instance( - &selected, - &[(BenchmarkParameter::b, 1)], - true, - ).expect("failed to create closure"); + let closure = >::instance( + &selected, + &[(BenchmarkParameter::x, 1)], + true, + ) + .expect("failed to create closure"); - new_test_ext().execute_with(|| { assert_ok!(closure()); - }); - - // Check postcondition for benchmark `bad_verify` is invalid. - let selected = SelectedBenchmark::bad_verify; + } - let closure = >::instance( - &selected, - &[(BenchmarkParameter::x, 10000)], - true, - ).expect("failed to create closure"); + #[test] + fn benchmarks_macro_verify_works() { + // Check postcondition for benchmark `set_value` is valid. + let selected = SelectedBenchmark::set_value; + + let closure = >::instance( + &selected, + &[(BenchmarkParameter::b, 1)], + true, + ) + .expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_ok!(closure()); + }); + + // Check postcondition for benchmark `bad_verify` is invalid. + let selected = SelectedBenchmark::bad_verify; + + let closure = >::instance( + &selected, + &[(BenchmarkParameter::x, 10000)], + true, + ) + .expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_err!(closure(), "You forgot to sort!"); + }); + } - new_test_ext().execute_with(|| { - assert_err!(closure(), "You forgot to sort!"); - }); -} + #[test] + fn benchmark_override_works() { + let selected = SelectedBenchmark::override_benchmark; + + let closure = >::instance( + &selected, + &[(BenchmarkParameter::b, 1)], + true, + ) + .expect("failed to create closure"); + + new_test_ext().execute_with(|| { + let result = closure(); + assert!(matches!(result, Err(BenchmarkError::Override(_)))); + }); + } -#[test] -fn benchmarks_generate_unit_tests() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set_value::()); - assert_ok!(test_benchmark_other_name::()); - assert_ok!(test_benchmark_sort_vector::()); - assert_err!(test_benchmark_bad_origin::(), "Bad origin"); - assert_err!(test_benchmark_bad_verify::(), "You forgot to sort!"); - assert_ok!(test_benchmark_no_components::()); - }); + #[test] + fn benchmarks_generate_unit_tests() { + new_test_ext().execute_with(|| { + assert_ok!(Pallet::::test_benchmark_set_value()); + assert_ok!(Pallet::::test_benchmark_other_name()); + assert_ok!(Pallet::::test_benchmark_sort_vector()); + assert_err!(Pallet::::test_benchmark_bad_origin(), "Bad origin"); + assert_err!(Pallet::::test_benchmark_bad_verify(), "You forgot to sort!"); + assert_ok!(Pallet::::test_benchmark_no_components()); + assert_ok!(Pallet::::test_benchmark_variable_components()); + assert!(matches!( + Pallet::::test_benchmark_override_benchmark(), + Err(BenchmarkError::Override(_)), + )); + assert_eq!(Pallet::::test_benchmark_skip_benchmark(), Err(BenchmarkError::Skip),); + }); + } } diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs new file mode 100644 index 0000000000000..caccebd39c70b --- /dev/null +++ b/frame/benchmarking/src/tests_instance.rs @@ -0,0 +1,183 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for the benchmark macro for instantiable modules + +#![cfg(test)] + +use super::*; +use frame_support::parameter_types; +use sp_runtime::{ + testing::{Header, H256}, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; +use sp_std::prelude::*; + +mod pallet_test { + use frame_support::pallet_prelude::Get; + + frame_support::decl_storage! { + trait Store for Module, I: Instance = DefaultInstance> as Test where + ::OtherEvent: Into<>::Event> + { + pub Value get(fn value): Option; + } + } + + frame_support::decl_module! { + pub struct Module, I: Instance = DefaultInstance> for enum Call where + origin: T::Origin, ::OtherEvent: Into<>::Event> + { + #[weight = 0] + fn set_value(origin, n: u32) -> frame_support::dispatch::DispatchResult { + let _sender = frame_system::ensure_signed(origin)?; + Value::::put(n); + Ok(()) + } + + #[weight = 0] + fn dummy(origin, _n: u32) -> frame_support::dispatch::DispatchResult { + let _sender = frame_system::ensure_none(origin)?; + Ok(()) + } + } + } + + pub trait OtherConfig { + type OtherEvent; + } + + pub trait Config: frame_system::Config + OtherConfig + where + Self::OtherEvent: Into<>::Event>, + { + type Event; + type LowerBound: Get; + type UpperBound: Get; + } +} + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + TestPallet: pallet_test::{Pallet, Call, Storage}, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub const LowerBound: u32 = 1; + pub const UpperBound: u32 = 100; +} + +impl pallet_test::Config for Test { + type Event = Event; + type LowerBound = LowerBound; + type UpperBound = UpperBound; +} + +impl pallet_test::OtherConfig for Test { + type OtherEvent = Event; +} + +fn new_test_ext() -> sp_io::TestExternalities { + GenesisConfig::default().build_storage().unwrap().into() +} + +mod benchmarks { + use super::pallet_test::{self, Value}; + use crate::account; + use frame_support::{ensure, StorageValue}; + use frame_system::RawOrigin; + use sp_std::prelude::*; + + // Additional used internally by the benchmark macro. + use super::pallet_test::{Call, Config, Pallet}; + use frame_support::traits::Instance; + + crate::benchmarks_instance! { + where_clause { + where + ::OtherEvent: Clone + + Into<>::Event>, + >::Event: Clone, + } + + set_value { + let b in 1 .. 1000; + let caller = account::("caller", 0, 0); + }: _ (RawOrigin::Signed(caller), b.into()) + verify { + assert_eq!(Value::::get(), Some(b)); + } + + other_name { + let b in 1 .. 1000; + }: dummy (RawOrigin::None, b.into()) + + sort_vector { + let x in 1 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { + m.sort(); + } verify { + ensure!(m[0] == 0, "You forgot to sort!") + } + } + + crate::impl_benchmark_test_suite!( + Pallet, + crate::tests_instance::new_test_ext(), + crate::tests_instance::Test + ); +} diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 042f4b707aef4..158f5c5b57573 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,14 @@ //! Interfaces, types and utils for benchmarking a FRAME runtime. -use codec::{Encode, Decode}; -use sp_std::{vec::Vec, prelude::Box}; +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{DispatchError, DispatchErrorWithPostInfo}, + pallet_prelude::*, + traits::StorageInfo, +}; use sp_io::hashing::blake2_256; -use sp_runtime::RuntimeString; +use sp_std::{prelude::Box, vec::Vec}; use sp_storage::TrackedStorageKey; /// An alphabet of possible parameters to use for benchmarking. @@ -28,7 +32,32 @@ use sp_storage::TrackedStorageKey; #[allow(missing_docs)] #[allow(non_camel_case_types)] pub enum BenchmarkParameter { - a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, + a, + b, + c, + d, + e, + f, + g, + h, + i, + j, + k, + l, + m, + n, + o, + p, + q, + r, + s, + t, + u, + v, + w, + x, + y, + z, } #[cfg(feature = "std")] @@ -43,17 +72,35 @@ impl std::fmt::Display for BenchmarkParameter { pub struct BenchmarkBatch { /// The pallet containing this benchmark. pub pallet: Vec, + /// The instance of this pallet being benchmarked. + pub instance: Vec, /// The extrinsic (or benchmark name) of this benchmark. pub benchmark: Vec, /// The results from this benchmark. - pub results: Vec, + pub results: Vec, } -/// Results from running benchmarks on a FRAME pallet. +// TODO: could probably make API cleaner here. +/// The results of a single of benchmark, where time and db results are separated. +#[derive(Encode, Decode, Clone, PartialEq, Debug)] +pub struct BenchmarkBatchSplitResults { + /// The pallet containing this benchmark. + pub pallet: Vec, + /// The instance of this pallet being benchmarked. + pub instance: Vec, + /// The extrinsic (or benchmark name) of this benchmark. + pub benchmark: Vec, + /// The extrinsic timing results from this benchmark. + pub time_results: Vec, + /// The db tracking results from this benchmark. + pub db_results: Vec, +} + +/// Result from running benchmarks on a FRAME pallet. /// Contains duration of the function call in nanoseconds along with the benchmark parameters /// used for that benchmark result. #[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] -pub struct BenchmarkResults { +pub struct BenchmarkResult { pub components: Vec<(BenchmarkParameter, u32)>, pub extrinsic_time: u128, pub storage_root_time: u128, @@ -61,6 +108,55 @@ pub struct BenchmarkResults { pub repeat_reads: u32, pub writes: u32, pub repeat_writes: u32, + pub proof_size: u32, + pub keys: Vec<(Vec, u32, u32, bool)>, +} + +impl BenchmarkResult { + pub fn from_weight(w: Weight) -> Self { + Self { extrinsic_time: (w as u128) / 1_000, ..Default::default() } + } +} + +/// Possible errors returned from the benchmarking pipeline. +#[derive(Clone, PartialEq, Debug)] +pub enum BenchmarkError { + /// The benchmarking pipeline should stop and return the inner string. + Stop(&'static str), + /// The benchmarking pipeline is allowed to fail here, and we should use the + /// included weight instead. + Override(BenchmarkResult), + /// The benchmarking pipeline is allowed to fail here, and we should simply + /// skip processing these results. + Skip, +} + +impl From for &'static str { + fn from(e: BenchmarkError) -> Self { + match e { + BenchmarkError::Stop(s) => s, + BenchmarkError::Override(_) => "benchmark override", + BenchmarkError::Skip => "benchmark skip", + } + } +} + +impl From<&'static str> for BenchmarkError { + fn from(s: &'static str) -> Self { + Self::Stop(s) + } +} + +impl From for BenchmarkError { + fn from(e: DispatchErrorWithPostInfo) -> Self { + Self::Stop(e.into()) + } +} + +impl From for BenchmarkError { + fn from(e: DispatchError) -> Self { + Self::Stop(e.into()) + } } /// Configuration used to setup and run runtime benchmarks. @@ -70,25 +166,42 @@ pub struct BenchmarkConfig { pub pallet: Vec, /// The encoded name of the benchmark/extrinsic to run. pub benchmark: Vec, - /// An optional manual override to the lowest values used in the `steps` range. - pub lowest_range_values: Vec, - /// An optional manual override to the highest values used in the `steps` range. - pub highest_range_values: Vec, - /// The number of samples to take across the range of values for components. - pub steps: Vec, - /// The number of times to repeat a benchmark. - pub repeat: u32, + /// The selected component values to use when running the benchmark. + pub selected_components: Vec<(BenchmarkParameter, u32)>, /// Enable an extra benchmark iteration which runs the verification logic for a benchmark. pub verify: bool, - /// Enable benchmarking of "extra" extrinsics, i.e. those that are not directly used in a pallet. - pub extra: bool, + /// Number of times to repeat benchmark within the Wasm environment. (versus in the client) + pub internal_repeats: u32, +} + +/// A list of benchmarks available for a particular pallet and instance. +/// +/// All `Vec` must be valid utf8 strings. +#[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] +pub struct BenchmarkList { + pub pallet: Vec, + pub instance: Vec, + pub benchmarks: Vec, +} + +#[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] +pub struct BenchmarkMetadata { + pub name: Vec, + pub components: Vec<(BenchmarkParameter, u32, u32)>, } sp_api::decl_runtime_apis! { /// Runtime api for benchmarking a FRAME runtime. pub trait Benchmark { + /// Get the benchmark metadata available for this runtime. + /// + /// Parameters + /// - `extra`: Also list benchmarks marked "extra" which would otherwise not be + /// needed for weight calculation. + fn benchmark_metadata(extra: bool) -> (Vec, Vec); + /// Dispatch the given benchmark. - fn dispatch_benchmark(config: BenchmarkConfig) -> Result, RuntimeString>; + fn dispatch_benchmark(config: BenchmarkConfig) -> Result, sp_runtime::RuntimeString>; } } @@ -100,7 +213,8 @@ pub trait Benchmarking { /// WARNING! This is a non-deterministic call. Do not use this within /// consensus critical logic. fn current_time() -> u128 { - std::time::SystemTime::now().duration_since(std::time::SystemTime::UNIX_EPOCH) + std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) .expect("Unix time doesn't go backwards; qed") .as_nanos() } @@ -139,18 +253,17 @@ pub trait Benchmarking { fn add_to_whitelist(&mut self, add: TrackedStorageKey) { let mut whitelist = self.get_whitelist(); match whitelist.iter_mut().find(|x| x.key == add.key) { - // If we already have this key in the whitelist, update to be the most constrained value. + // If we already have this key in the whitelist, update to be the most constrained + // value. Some(item) => { - *item = TrackedStorageKey { - key: add.key, - has_been_read: item.has_been_read || add.has_been_read, - has_been_written: item.has_been_written || add.has_been_written, - } + item.reads += add.reads; + item.writes += add.writes; + item.whitelisted = item.whitelisted || add.whitelisted; }, // If the key does not exist, add it. None => { whitelist.push(add); - } + }, } self.set_whitelist(whitelist); } @@ -161,36 +274,35 @@ pub trait Benchmarking { whitelist.retain(|x| x.key != remove); self.set_whitelist(whitelist); } + + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { + self.get_read_and_written_keys() + } + + /// Get current estimated proof size. + fn proof_size(&self) -> Option { + self.proof_size() + } } /// The pallet benchmarking trait. -pub trait Benchmarking { +pub trait Benchmarking { /// Get the benchmarks available for this pallet. Generally there is one benchmark per /// extrinsic, so these are sometimes just called "extrinsics". /// /// Parameters - /// - `extra`: Also return benchmarks marked "extra" which would otherwise not be - /// needed for weight calculation. - fn benchmarks(extra: bool) -> Vec<&'static [u8]>; + /// - `extra`: Also return benchmarks marked "extra" which would otherwise not be needed for + /// weight calculation. + fn benchmarks(extra: bool) -> Vec; /// Run the benchmarks for this pallet. - /// - /// Parameters - /// - `name`: The name of extrinsic function or benchmark you want to benchmark encoded as - /// bytes. - /// - `steps`: The number of sample points you want to take across the range of parameters. - /// - `lowest_range_values`: The lowest number for each range of parameters. - /// - `highest_range_values`: The highest number for each range of parameters. - /// - `repeat`: The number of times you want to repeat a benchmark. fn run_benchmark( name: &[u8], - lowest_range_values: &[u32], - highest_range_values: &[u32], - steps: &[u32], - repeat: u32, + selected_components: &[(BenchmarkParameter, u32)], whitelist: &[TrackedStorageKey], verify: bool, - ) -> Result, &'static str>; + internal_repeats: u32, + ) -> Result, BenchmarkError>; } /// The required setup for creating a benchmark. @@ -205,12 +317,16 @@ pub trait BenchmarkingSetup { fn instance( &self, components: &[(BenchmarkParameter, u32)], - verify: bool - ) -> Result Result<(), &'static str>>, &'static str>; + verify: bool, + ) -> Result Result<(), BenchmarkError>>, BenchmarkError>; } /// Grab an account, seeded by a name and index. -pub fn account(name: &'static str, index: u32, seed: u32) -> AccountId { +pub fn account( + name: &'static str, + index: u32, + seed: u32, +) -> AccountId { let entropy = (name, index, seed).using_encoded(blake2_256); AccountId::decode(&mut &entropy[..]).unwrap_or_default() } @@ -224,7 +340,7 @@ pub fn whitelisted_caller() -> AccountId { macro_rules! whitelist_account { ($acc:ident) => { frame_benchmarking::benchmarking::add_to_whitelist( - frame_system::Account::::hashed_key_for(&$acc).into() + frame_system::Account::::hashed_key_for(&$acc).into(), ); - } + }; } diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml new file mode 100644 index 0000000000000..3bb184d5b3393 --- /dev/null +++ b/frame/bounties/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "pallet-bounties" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to manage bounties" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../treasury" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "pallet-treasury/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/bounties/README.md b/frame/bounties/README.md new file mode 100644 index 0000000000000..bf63fca5f34b2 --- /dev/null +++ b/frame/bounties/README.md @@ -0,0 +1,52 @@ +# Bounties Module ( pallet-bounties ) + +## Bounty + +**Note :: This pallet is tightly coupled with pallet-treasury** + +A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that +needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after +the bounty is approved and funded by Council, to be delegated with the responsibility of assigning a +payout address once the specified set of objectives is completed. + +After the Council has activated a bounty, it delegates the work that requires expertise to a curator +in exchange of a deposit. Once the curator accepts the bounty, they get to close the active bounty. +Closing the active bounty enacts a delayed payout to the payout address, the curator fee and the +return of the curator deposit. The delay allows for intervention through regular democracy. The +Council gets to unassign the curator, resulting in a new curator election. The Council also gets to +cancel the bounty if deemed necessary before assigning a curator or once the bounty is active or +payout is pending, resulting in the slash of the curator's deposit. + +### Terminology + +- **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by + the Treasury. +- **Proposer:** An account proposing a bounty spending. +- **Curator:** An account managing the bounty and assigning a payout address receiving the reward + for the completion of work. +- **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on + deposit per byte within the bounty description. +- **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The + deposit is returned when/if the bounty is completed. +- **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is + rewarded. +- **Payout address:** The account to which the total or part of the bounty is assigned to. +- **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. +- **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. + +## Interface + +### Dispatchable Functions + +Bounty protocol: +- `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of + tasks and stake the required deposit. +- `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of + work. +- `propose_curator` - Assign an account to a bounty as candidate curator. +- `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. +- `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. +- `award_bounty` - Close and pay out the specified amount for the completed work. +- `claim_bounty` - Claim a specific bounty amount from the Payout Address. +- `unassign_curator` - Unassign an accepted curator from a specific earmark. +- `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs new file mode 100644 index 0000000000000..798d929d241f7 --- /dev/null +++ b/frame/bounties/src/benchmarking.rs @@ -0,0 +1,216 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! bounties pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::traits::OnInitialize; +use frame_system::RawOrigin; +use sp_runtime::traits::Bounded; + +use crate::Module as Bounties; +use pallet_treasury::Pallet as Treasury; + +const SEED: u32 = 0; + +// Create bounties that are approved for use in `on_initialize`. +fn create_approved_bounties(n: u32) -> Result<(), &'static str> { + for i in 0..n { + let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + } + ensure!(BountyApprovals::get().len() == n as usize, "Not all bounty approved"); + Ok(()) +} + +// Create the pre-requisite information needed to create a treasury `propose_bounty`. +fn setup_bounty( + u: u32, + d: u32, +) -> (T::AccountId, T::AccountId, BalanceOf, BalanceOf, Vec) { + let caller = account("caller", u, SEED); + let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); + let fee = value / 2u32.into(); + let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * MAX_BYTES.into(); + let _ = T::Currency::make_free_balance_be(&caller, deposit); + let curator = account("curator", u, SEED); + let _ = T::Currency::make_free_balance_be(&curator, fee / 2u32.into()); + let reason = vec![0; d as usize]; + (caller, curator, fee, value, reason) +} + +fn create_bounty( +) -> Result<(::Source, BountyIndex), &'static str> { + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Treasury::::on_initialize(T::BlockNumber::zero()); + Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup.clone(), fee)?; + Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; + Ok((curator_lookup, bounty_id)) +} + +fn setup_pot_account() { + let pot_account = Bounties::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); +} + +fn assert_last_event(generic_event: ::Event) { + frame_system::Pallet::::assert_last_event(generic_event.into()); +} + +const MAX_BYTES: u32 = 16384; + +benchmarks! { + propose_bounty { + let d in 0 .. MAX_BYTES; + + let (caller, curator, fee, value, description) = setup_bounty::(0, d); + }: _(RawOrigin::Signed(caller), value, description) + + approve_bounty { + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + }: _(RawOrigin::Root, bounty_id) + + propose_curator { + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Bounties::::on_initialize(T::BlockNumber::zero()); + }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) + + // Worst case when curator is inactive and any sender unassigns the curator. + unassign_curator { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::get() - 1; + frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), bounty_id) + + accept_curator { + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + Bounties::::on_initialize(T::BlockNumber::zero()); + Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; + }: _(RawOrigin::Signed(curator), bounty_id) + + award_bounty { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::get() - 1; + let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; + + let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); + }: _(RawOrigin::Signed(curator), bounty_id, beneficiary) + + claim_bounty { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::get() - 1; + let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; + + + let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); + let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); + Bounties::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; + + frame_system::Pallet::::set_block_number(T::BountyDepositPayoutDelay::get()); + ensure!(T::Currency::free_balance(&beneficiary_account).is_zero(), "Beneficiary already has balance"); + + }: _(RawOrigin::Signed(curator), bounty_id) + verify { + ensure!(!T::Currency::free_balance(&beneficiary_account).is_zero(), "Beneficiary didn't get paid"); + } + + close_bounty_proposed { + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::get() - 1; + }: close_bounty(RawOrigin::Root, bounty_id) + + close_bounty_active { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + let bounty_id = BountyCount::get() - 1; + }: close_bounty(RawOrigin::Root, bounty_id) + verify { + assert_last_event::(RawEvent::BountyCanceled(bounty_id).into()) + } + + extend_bounty_expiry { + setup_pot_account::(); + let (curator_lookup, bounty_id) = create_bounty::()?; + Bounties::::on_initialize(T::BlockNumber::zero()); + + let bounty_id = BountyCount::get() - 1; + let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; + }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) + verify { + assert_last_event::(RawEvent::BountyExtended(bounty_id).into()) + } + + spend_funds { + let b in 1 .. 100; + setup_pot_account::(); + create_approved_bounties::(b)?; + + let mut budget_remaining = BalanceOf::::max_value(); + let mut imbalance = PositiveImbalanceOf::::zero(); + let mut total_weight = Weight::zero(); + let mut missed_any = false; + }: { + as pallet_treasury::SpendFunds>::spend_funds( + &mut budget_remaining, + &mut imbalance, + &mut total_weight, + &mut missed_any, + ); + } + verify { + ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); + ensure!(missed_any == false, "Missed some"); + assert_last_event::(RawEvent::BountyBecameActive(b - 1).into()) + } +} + +impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs new file mode 100644 index 0000000000000..77a8e47174019 --- /dev/null +++ b/frame/bounties/src/lib.rs @@ -0,0 +1,774 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Bounties Module ( pallet-bounties ) +//! +//! ## Bounty +//! +//! > NOTE: This pallet is tightly coupled with pallet-treasury. +//! +//! A Bounty Spending is a reward for a specified body of work - or specified set of objectives - +//! that needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned +//! after the bounty is approved and funded by Council, to be delegated with the responsibility of +//! assigning a payout address once the specified set of objectives is completed. +//! +//! After the Council has activated a bounty, it delegates the work that requires expertise to a +//! curator in exchange of a deposit. Once the curator accepts the bounty, they get to close the +//! active bounty. Closing the active bounty enacts a delayed payout to the payout address, the +//! curator fee and the return of the curator deposit. The delay allows for intervention through +//! regular democracy. The Council gets to unassign the curator, resulting in a new curator +//! election. The Council also gets to cancel the bounty if deemed necessary before assigning a +//! curator or once the bounty is active or payout is pending, resulting in the slash of the +//! curator's deposit. +//! +//! +//! ### Terminology +//! +//! Bounty: +//! - **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion +//! by the Treasury. +//! - **Proposer:** An account proposing a bounty spending. +//! - **Curator:** An account managing the bounty and assigning a payout address receiving the +//! reward for the completion of work. +//! - **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on +//! deposit per byte within the bounty description. +//! - **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The +//! deposit is returned when/if the bounty is completed. +//! - **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is +//! rewarded. +//! - **Payout address:** The account to which the total or part of the bounty is assigned to. +//! - **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before +//! claiming. +//! - **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! Bounty protocol: +//! - `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of +//! tasks and stake the required deposit. +//! - `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of +//! work. +//! - `propose_curator` - Assign an account to a bounty as candidate curator. +//! - `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. +//! - `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. +//! - `award_bounty` - Close and pay out the specified amount for the completed work. +//! - `claim_bounty` - Claim a specific bounty amount from the Payout Address. +//! - `unassign_curator` - Unassign an accepted curator from a specific earmark. +//! - `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +mod tests; +pub mod weights; + +use sp_std::prelude::*; + +use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure}; + +use frame_support::traits::{ + Currency, ExistenceRequirement::AllowDeath, Get, Imbalance, OnUnbalanced, ReservableCurrency, +}; + +use sp_runtime::{ + traits::{AccountIdConversion, BadOrigin, Saturating, StaticLookup, Zero}, + DispatchResult, Permill, RuntimeDebug, +}; + +use frame_support::{dispatch::DispatchResultWithPostInfo, traits::EnsureOrigin}; + +use frame_support::weights::Weight; + +use codec::{Decode, Encode}; +use frame_system::{self as system, ensure_signed}; +use scale_info::TypeInfo; +pub use weights::WeightInfo; + +type BalanceOf = pallet_treasury::BalanceOf; + +type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf; + +pub trait Config: frame_system::Config + pallet_treasury::Config { + /// The amount held on deposit for placing a bounty proposal. + type BountyDepositBase: Get>; + + /// The delay period for which a bounty beneficiary need to wait before claim the payout. + type BountyDepositPayoutDelay: Get; + + /// Bounty duration in blocks. + type BountyUpdatePeriod: Get; + + /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. + type BountyCuratorDeposit: Get; + + /// Minimum value for a bounty. + type BountyValueMinimum: Get>; + + /// The amount held on deposit per byte within the tip report reason or bounty description. + type DataDepositPerByte: Get>; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// Maximum acceptable reason length. + type MaximumReasonLength: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; +} + +/// An index of a bounty. Just a `u32`. +pub type BountyIndex = u32; + +/// A bounty proposal. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct Bounty { + /// The account proposing it. + proposer: AccountId, + /// The (total) amount that should be paid if the bounty is rewarded. + value: Balance, + /// The curator fee. Included in value. + fee: Balance, + /// The deposit of curator. + curator_deposit: Balance, + /// The amount held on deposit (reserved) for making this proposal. + bond: Balance, + /// The status of this bounty. + status: BountyStatus, +} + +/// The status of a bounty proposal. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub enum BountyStatus { + /// The bounty is proposed and waiting for approval. + Proposed, + /// The bounty is approved and waiting to become active at next spend period. + Approved, + /// The bounty is funded and waiting for curator assignment. + Funded, + /// A curator has been proposed by the `ApproveOrigin`. Waiting for acceptance from the + /// curator. + CuratorProposed { + /// The assigned curator of this bounty. + curator: AccountId, + }, + /// The bounty is active and waiting to be awarded. + Active { + /// The curator of this bounty. + curator: AccountId, + /// An update from the curator is due by this block, else they are considered inactive. + update_due: BlockNumber, + }, + /// The bounty is awarded and waiting to released after a delay. + PendingPayout { + /// The curator of this bounty. + curator: AccountId, + /// The beneficiary of the bounty. + beneficiary: AccountId, + /// When the bounty can be claimed. + unlock_at: BlockNumber, + }, +} + +// Note :: For backward compatibility reasons, +// pallet-bounties uses Treasury for storage. +// This is temporary solution, soon will get replaced with +// Own storage identifier. +decl_storage! { + trait Store for Module as Treasury { + + /// Number of bounty proposals that have been made. + pub BountyCount get(fn bounty_count): BountyIndex; + + /// Bounties that have been made. + pub Bounties get(fn bounties): + map hasher(twox_64_concat) BountyIndex + => Option, T::BlockNumber>>; + + /// The description of each bounty. + pub BountyDescriptions get(fn bounty_descriptions): map hasher(twox_64_concat) BountyIndex => Option>; + + /// Bounty indices that have been approved but not yet funded. + pub BountyApprovals get(fn bounty_approvals): Vec; + } +} + +decl_event!( + pub enum Event + where + Balance = BalanceOf, + ::AccountId, + { + /// New bounty proposal. \[index\] + BountyProposed(BountyIndex), + /// A bounty proposal was rejected; funds were slashed. \[index, bond\] + BountyRejected(BountyIndex, Balance), + /// A bounty proposal is funded and became active. \[index\] + BountyBecameActive(BountyIndex), + /// A bounty is awarded to a beneficiary. \[index, beneficiary\] + BountyAwarded(BountyIndex, AccountId), + /// A bounty is claimed by beneficiary. \[index, payout, beneficiary\] + BountyClaimed(BountyIndex, Balance, AccountId), + /// A bounty is cancelled. \[index\] + BountyCanceled(BountyIndex), + /// A bounty expiry is extended. \[index\] + BountyExtended(BountyIndex), + } +); + +decl_error! { + /// Error for the treasury module. + pub enum Error for Module { + /// Proposer's balance is too low. + InsufficientProposersBalance, + /// No proposal or bounty at that index. + InvalidIndex, + /// The reason given is just too big. + ReasonTooBig, + /// The bounty status is unexpected. + UnexpectedStatus, + /// Require bounty curator. + RequireCurator, + /// Invalid bounty value. + InvalidValue, + /// Invalid bounty fee. + InvalidFee, + /// A bounty payout is pending. + /// To cancel the bounty, you must unassign and slash the curator. + PendingPayout, + /// The bounties cannot be claimed/closed because it's still in the countdown period. + Premature, + } +} + +decl_module! { + pub struct Module + for enum Call + where origin: T::Origin + { + /// The amount held on deposit per byte within bounty description. + const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); + + /// The amount held on deposit for placing a bounty proposal. + const BountyDepositBase: BalanceOf = T::BountyDepositBase::get(); + + /// The delay period for which a bounty beneficiary need to wait before claim the payout. + const BountyDepositPayoutDelay: T::BlockNumber = T::BountyDepositPayoutDelay::get(); + + /// Bounty duration in blocks. + const BountyUpdatePeriod: T::BlockNumber = T::BountyUpdatePeriod::get(); + + /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. + const BountyCuratorDeposit: Permill = T::BountyCuratorDeposit::get(); + + /// Minimum value for a bounty. + const BountyValueMinimum: BalanceOf = T::BountyValueMinimum::get(); + + /// Maximum acceptable reason length. + const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); + + type Error = Error; + + fn deposit_event() = default; + + /// Propose a new bounty. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as + /// `DataDepositPerByte` for each byte in `reason`. It will be unreserved upon approval, + /// or slashed when rejected. + /// + /// - `curator`: The curator account whom will manage this bounty. + /// - `fee`: The curator fee. + /// - `value`: The total payment amount of this bounty, curator fee included. + /// - `description`: The description of this bounty. + #[weight = ::WeightInfo::propose_bounty(description.len() as u32)] + fn propose_bounty( + origin, + #[compact] value: BalanceOf, + description: Vec, + ) { + let proposer = ensure_signed(origin)?; + Self::create_bounty(proposer, description, value)?; + } + + /// Approve a bounty proposal. At a later time, the bounty will be funded and become active + /// and the original deposit will be returned. + /// + /// May only be called from `T::ApproveOrigin`. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::approve_bounty()] + fn approve_bounty(origin, #[compact] bounty_id: BountyIndex) { + T::ApproveOrigin::ensure_origin(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); + + bounty.status = BountyStatus::Approved; + + BountyApprovals::append(bounty_id); + + Ok(()) + })?; + } + + /// Assign a curator to a funded bounty. + /// + /// May only be called from `T::ApproveOrigin`. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::propose_curator()] + fn propose_curator( + origin, + #[compact] bounty_id: BountyIndex, + curator: ::Source, + #[compact] fee: BalanceOf, + ) { + T::ApproveOrigin::ensure_origin(origin)?; + + let curator = T::Lookup::lookup(curator)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + match bounty.status { + BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => {}, + _ => return Err(Error::::UnexpectedStatus.into()), + }; + + ensure!(fee < bounty.value, Error::::InvalidFee); + + bounty.status = BountyStatus::CuratorProposed { curator }; + bounty.fee = fee; + + Ok(()) + })?; + } + + /// Unassign curator from a bounty. + /// + /// This function can only be called by the `RejectOrigin` a signed origin. + /// + /// If this function is called by the `RejectOrigin`, we assume that the curator is malicious + /// or inactive. As a result, we will slash the curator when possible. + /// + /// If the origin is the curator, we take this as a sign they are unable to do their job and + /// they willingly give up. We could slash them, but for now we allow them to recover their + /// deposit and exit without issue. (We may want to change this if it is abused.) + /// + /// Finally, the origin can be anyone if and only if the curator is "inactive". This allows + /// anyone in the community to call out that a curator is not doing their due diligence, and + /// we should pick a new curator. In this case the curator should also be slashed. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::unassign_curator()] + fn unassign_curator( + origin, + #[compact] bounty_id: BountyIndex, + ) { + let maybe_sender = ensure_signed(origin.clone()) + .map(Some) + .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + let slash_curator = |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { + let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; + T::OnSlash::on_unbalanced(imbalance); + *curator_deposit = Zero::zero(); + }; + + match bounty.status { + BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { + // No curator to unassign at this point. + return Err(Error::::UnexpectedStatus.into()) + } + BountyStatus::CuratorProposed { ref curator } => { + // A curator has been proposed, but not accepted yet. + // Either `RejectOrigin` or the proposed curator can unassign the curator. + ensure!(maybe_sender.map_or(true, |sender| sender == *curator), BadOrigin); + }, + BountyStatus::Active { ref curator, ref update_due } => { + // The bounty is active. + match maybe_sender { + // If the `RejectOrigin` is calling this function, slash the curator. + None => { + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + }, + Some(sender) => { + // If the sender is not the curator, and the curator is inactive, + // slash the curator. + if sender != *curator { + let block_number = system::Pallet::::block_number(); + if *update_due < block_number { + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + } else { + // Curator has more time to give an update. + return Err(Error::::Premature.into()) + } + } else { + // Else this is the curator, willingly giving up their role. + // Give back their deposit. + let err_amount = T::Currency::unreserve(&curator, bounty.curator_deposit); + debug_assert!(err_amount.is_zero()); + // Continue to change bounty status below... + } + }, + } + }, + BountyStatus::PendingPayout { ref curator, .. } => { + // The bounty is pending payout, so only council can unassign a curator. + // By doing so, they are claiming the curator is acting maliciously, so + // we slash the curator. + ensure!(maybe_sender.is_none(), BadOrigin); + slash_curator(curator, &mut bounty.curator_deposit); + // Continue to change bounty status below... + } + }; + + bounty.status = BountyStatus::Funded; + Ok(()) + })?; + } + + /// Accept the curator role for a bounty. + /// A deposit will be reserved from curator and refund upon successful payout. + /// + /// May only be called from the curator. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::accept_curator()] + fn accept_curator(origin, #[compact] bounty_id: BountyIndex) { + let signer = ensure_signed(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + match bounty.status { + BountyStatus::CuratorProposed { ref curator } => { + ensure!(signer == *curator, Error::::RequireCurator); + + let deposit = T::BountyCuratorDeposit::get() * bounty.fee; + T::Currency::reserve(curator, deposit)?; + bounty.curator_deposit = deposit; + + let update_due = system::Pallet::::block_number() + T::BountyUpdatePeriod::get(); + bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; + + Ok(()) + }, + _ => Err(Error::::UnexpectedStatus.into()), + } + })?; + } + + /// Award bounty to a beneficiary account. The beneficiary will be able to claim the funds after a delay. + /// + /// The dispatch origin for this call must be the curator of this bounty. + /// + /// - `bounty_id`: Bounty ID to award. + /// - `beneficiary`: The beneficiary account whom will receive the payout. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::award_bounty()] + fn award_bounty(origin, #[compact] bounty_id: BountyIndex, beneficiary: ::Source) { + let signer = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + match &bounty.status { + BountyStatus::Active { + curator, + .. + } => { + ensure!(signer == *curator, Error::::RequireCurator); + }, + _ => return Err(Error::::UnexpectedStatus.into()), + } + bounty.status = BountyStatus::PendingPayout { + curator: signer, + beneficiary: beneficiary.clone(), + unlock_at: system::Pallet::::block_number() + T::BountyDepositPayoutDelay::get(), + }; + + Ok(()) + })?; + + Self::deposit_event(Event::::BountyAwarded(bounty_id, beneficiary)); + } + + /// Claim the payout from an awarded bounty after payout delay. + /// + /// The dispatch origin for this call must be the beneficiary of this bounty. + /// + /// - `bounty_id`: Bounty ID to claim. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::claim_bounty()] + fn claim_bounty(origin, #[compact] bounty_id: BountyIndex) { + let _ = ensure_signed(origin)?; // anyone can trigger claim + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; + if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { + ensure!(system::Pallet::::block_number() >= unlock_at, Error::::Premature); + let bounty_account = Self::bounty_account_id(bounty_id); + let balance = T::Currency::free_balance(&bounty_account); + let fee = bounty.fee.min(balance); // just to be safe + let payout = balance.saturating_sub(fee); + let err_amount = T::Currency::unreserve(&curator, bounty.curator_deposit); + debug_assert!(err_amount.is_zero()); + let res = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail + debug_assert!(res.is_ok()); + let res = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail + debug_assert!(res.is_ok()); + + *maybe_bounty = None; + + BountyDescriptions::remove(bounty_id); + + Self::deposit_event(Event::::BountyClaimed(bounty_id, payout, beneficiary)); + Ok(()) + } else { + Err(Error::::UnexpectedStatus.into()) + } + })?; + } + + /// Cancel a proposed or active bounty. All the funds will be sent to treasury and + /// the curator deposit will be unreserved if possible. + /// + /// Only `T::RejectOrigin` is able to cancel a bounty. + /// + /// - `bounty_id`: Bounty ID to cancel. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::close_bounty_proposed().max(::WeightInfo::close_bounty_active())] + fn close_bounty(origin, #[compact] bounty_id: BountyIndex) -> DispatchResultWithPostInfo { + T::RejectOrigin::ensure_origin(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResultWithPostInfo { + let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; + + match &bounty.status { + BountyStatus::Proposed => { + // The reject origin would like to cancel a proposed bounty. + BountyDescriptions::remove(bounty_id); + let value = bounty.bond; + let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; + T::OnSlash::on_unbalanced(imbalance); + *maybe_bounty = None; + + Self::deposit_event(Event::::BountyRejected(bounty_id, value)); + // Return early, nothing else to do. + return Ok(Some(::WeightInfo::close_bounty_proposed()).into()) + }, + BountyStatus::Approved => { + // For weight reasons, we don't allow a council to cancel in this phase. + // We ask for them to wait until it is funded before they can cancel. + return Err(Error::::UnexpectedStatus.into()) + }, + BountyStatus::Funded | + BountyStatus::CuratorProposed { .. } => { + // Nothing extra to do besides the removal of the bounty below. + }, + BountyStatus::Active { curator, .. } => { + // Cancelled by council, refund deposit of the working curator. + let err_amount = T::Currency::unreserve(&curator, bounty.curator_deposit); + debug_assert!(err_amount.is_zero()); + // Then execute removal of the bounty below. + }, + BountyStatus::PendingPayout { .. } => { + // Bounty is already pending payout. If council wants to cancel + // this bounty, it should mean the curator was acting maliciously. + // So the council should first unassign the curator, slashing their + // deposit. + return Err(Error::::PendingPayout.into()) + } + } + + let bounty_account = Self::bounty_account_id(bounty_id); + + BountyDescriptions::remove(bounty_id); + + let balance = T::Currency::free_balance(&bounty_account); + let res = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail + debug_assert!(res.is_ok()); + *maybe_bounty = None; + + Self::deposit_event(Event::::BountyCanceled(bounty_id)); + Ok(Some(::WeightInfo::close_bounty_active()).into()) + }) + } + + /// Extend the expiry time of an active bounty. + /// + /// The dispatch origin for this call must be the curator of this bounty. + /// + /// - `bounty_id`: Bounty ID to extend. + /// - `remark`: additional information. + /// + /// # + /// - O(1). + /// # + #[weight = ::WeightInfo::extend_bounty_expiry()] + fn extend_bounty_expiry(origin, #[compact] bounty_id: BountyIndex, _remark: Vec) { + let signer = ensure_signed(origin)?; + + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + + match bounty.status { + BountyStatus::Active { ref curator, ref mut update_due } => { + ensure!(*curator == signer, Error::::RequireCurator); + *update_due = (system::Pallet::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); + }, + _ => return Err(Error::::UnexpectedStatus.into()), + } + + Ok(()) + })?; + + Self::deposit_event(Event::::BountyExtended(bounty_id)); + } + } +} + +impl Module { + // Add public immutables and private mutables. + + /// The account ID of the treasury pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + T::PalletId::get().into_account() + } + + /// The account ID of a bounty account + pub fn bounty_account_id(id: BountyIndex) -> T::AccountId { + // only use two byte prefix to support 16 byte account id (used by test) + // "modl" ++ "py/trsry" ++ "bt" is 14 bytes, and two bytes remaining for bounty index + T::PalletId::get().into_sub_account(("bt", id)) + } + + fn create_bounty( + proposer: T::AccountId, + description: Vec, + value: BalanceOf, + ) -> DispatchResult { + ensure!( + description.len() <= T::MaximumReasonLength::get() as usize, + Error::::ReasonTooBig + ); + ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); + + let index = Self::bounty_count(); + + // reserve deposit for new bounty + let bond = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * (description.len() as u32).into(); + T::Currency::reserve(&proposer, bond) + .map_err(|_| Error::::InsufficientProposersBalance)?; + + BountyCount::put(index + 1); + + let bounty = Bounty { + proposer, + value, + fee: 0u32.into(), + curator_deposit: 0u32.into(), + bond, + status: BountyStatus::Proposed, + }; + + Bounties::::insert(index, &bounty); + BountyDescriptions::insert(index, description); + + Self::deposit_event(RawEvent::BountyProposed(index)); + + Ok(()) + } +} + +impl pallet_treasury::SpendFunds for Module { + fn spend_funds( + budget_remaining: &mut BalanceOf, + imbalance: &mut PositiveImbalanceOf, + total_weight: &mut Weight, + missed_any: &mut bool, + ) { + let bounties_len = BountyApprovals::mutate(|v| { + let bounties_approval_len = v.len() as u32; + v.retain(|&index| { + Bounties::::mutate(index, |bounty| { + // Should always be true, but shouldn't panic if false or we're screwed. + if let Some(bounty) = bounty { + if bounty.value <= *budget_remaining { + *budget_remaining -= bounty.value; + + bounty.status = BountyStatus::Funded; + + // return their deposit. + let err_amount = T::Currency::unreserve(&bounty.proposer, bounty.bond); + debug_assert!(err_amount.is_zero()); + + // fund the bounty account + imbalance.subsume(T::Currency::deposit_creating( + &Self::bounty_account_id(index), + bounty.value, + )); + + Self::deposit_event(RawEvent::BountyBecameActive(index)); + false + } else { + *missed_any = true; + true + } + } else { + false + } + }) + }); + bounties_approval_len + }); + + *total_weight += ::WeightInfo::spend_funds(bounties_len); + } +} diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs new file mode 100644 index 0000000000000..ff058a3601e07 --- /dev/null +++ b/frame/bounties/src/tests.rs @@ -0,0 +1,954 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! bounties pallet tests. + +#![cfg(test)] + +use super::*; +use crate as pallet_bounties; +use std::cell::RefCell; + +use frame_support::{ + assert_noop, assert_ok, pallet_prelude::GenesisBuild, parameter_types, traits::OnInitialize, + weights::Weight, PalletId, +}; + +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} +parameter_types! { + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: u64 = 1; + pub const SpendPeriod: u64 = 2; + pub const Burn: Permill = Permill::from_percent(50); + pub const DataDepositPerByte: u64 = 1; + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + pub const MaxApprovals: u32 = 100; +} +// impl pallet_treasury::Config for Test { +impl pallet_treasury::Config for Test { + type PalletId = TreasuryPalletId; + type Currency = pallet_balances::Pallet; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); // Just gets burned. + type WeightInfo = (); + type SpendFunds = Bounties; + type MaxApprovals = MaxApprovals; +} +parameter_types! { + pub const BountyDepositBase: u64 = 80; + pub const BountyDepositPayoutDelay: u64 = 3; + pub const BountyUpdatePeriod: u32 = 20; + pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); + pub const BountyValueMinimum: u64 = 1; + pub const MaximumReasonLength: u32 = 16384; +} +impl Config for Test { + type Event = Event; + type BountyDepositBase = BountyDepositBase; + type BountyDepositPayoutDelay = BountyDepositPayoutDelay; + type BountyUpdatePeriod = BountyUpdatePeriod; + type BountyCuratorDeposit = BountyCuratorDeposit; + type BountyValueMinimum = BountyValueMinimum; + type DataDepositPerByte = DataDepositPerByte; + type MaximumReasonLength = MaximumReasonLength; + type WeightInfo = (); +} + +type TreasuryError = pallet_treasury::Error; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + // Total issuance will be 200 with treasury account initialized at ED. + balances: vec![(0, 100), (1, 98), (2, 1)], + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); + t.into() +} + +fn last_event() -> RawEvent { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let Event::Bounties(inner) = e { Some(inner) } else { None }) + .last() + .unwrap() +} + +#[test] +fn genesis_config_works() { + new_test_ext().execute_with(|| { + assert_eq!(Treasury::pot(), 0); + assert_eq!(Treasury::proposal_count(), 0); + }); +} + +#[test] +fn minting_works() { + new_test_ext().execute_with(|| { + // Check that accumulate works when we have Some value in Dummy already. + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + }); +} + +#[test] +fn spend_proposal_takes_min_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_eq!(Balances::free_balance(0), 99); + assert_eq!(Balances::reserved_balance(0), 1); + }); +} + +#[test] +fn spend_proposal_takes_proportional_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 5); + }); +} + +#[test] +fn spend_proposal_fails_when_proposer_poor() { + new_test_ext().execute_with(|| { + assert_noop!( + Treasury::propose_spend(Origin::signed(2), 100, 3), + TreasuryError::InsufficientProposersBalance, + ); + }); +} + +#[test] +fn accepted_spend_proposal_ignored_outside_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(1); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 100); + }); +} + +#[test] +fn unused_pot_should_diminish() { + new_test_ext().execute_with(|| { + let init_total_issuance = Balances::total_issuance(); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Balances::total_issuance(), init_total_issuance + 100); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 50); + assert_eq!(Balances::total_issuance(), init_total_issuance + 50); + }); +} + +#[test] +fn rejected_spend_proposal_ignored_on_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 50); + }); +} + +#[test] +fn reject_already_rejected_spend_proposal_fails() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::reject_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); + }); +} + +#[test] +fn reject_non_existent_spend_proposal_fails() { + new_test_ext().execute_with(|| { + assert_noop!( + Treasury::reject_proposal(Origin::root(), 0), + pallet_treasury::Error::::InvalidIndex + ); + }); +} + +#[test] +fn accept_non_existent_spend_proposal_fails() { + new_test_ext().execute_with(|| { + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); + }); +} + +#[test] +fn accept_already_rejected_spend_proposal_fails() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); + }); +} + +#[test] +fn accepted_spend_proposal_enacted_on_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Treasury::pot(), 0); + }); +} + +#[test] +fn pot_underflow_should_not_diminish() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + + assert_ok!(Balances::deposit_into_existing(&Treasury::account_id(), 100)); + >::on_initialize(4); + assert_eq!(Balances::free_balance(3), 150); // Fund has been spent + assert_eq!(Treasury::pot(), 25); // Pot has finally changed + }); +} + +// Treasury account doesn't get deleted if amount approved to spend is all its free balance. +// i.e. pot should not include existential deposit needed for account survival. +#[test] +fn treasury_account_doesnt_get_deleted() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + let treasury_balance = Balances::free_balance(&Treasury::account_id()); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + + assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + + >::on_initialize(4); + assert_eq!(Treasury::pot(), 0); // Pot is emptied + assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there + }); +} + +// In case treasury account is not existing then it works fine. +// This is useful for chain that will just update runtime. +#[test] +fn inexistent_account_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(0, 100), (1, 99), (2, 1)] } + .assimilate_storage(&mut t) + .unwrap(); + // Treasury genesis config is not build thus treasury account does not exist + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist + assert_eq!(Treasury::pot(), 0); // Pot is empty + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + >::on_initialize(2); + assert_eq!(Treasury::pot(), 0); // Pot hasn't changed + assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed + + Balances::make_free_balance_be(&Treasury::account_id(), 100); + assert_eq!(Treasury::pot(), 99); // Pot now contains funds + assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist + + >::on_initialize(4); + + assert_eq!(Treasury::pot(), 0); // Pot has changed + assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed + }); +} + +#[test] +fn propose_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); + + assert_eq!(last_event(), RawEvent::BountyProposed(0)); + + let deposit: u64 = 85 + 5; + assert_eq!(Balances::reserved_balance(0), deposit); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 10, + bond: deposit, + status: BountyStatus::Proposed, + } + ); + + assert_eq!(Bounties::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); + + assert_eq!(Bounties::bounty_count(), 1); + }); +} + +#[test] +fn propose_bounty_validation_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_noop!( + Bounties::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()), + Error::::ReasonTooBig + ); + + assert_noop!( + Bounties::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()), + Error::::InsufficientProposersBalance + ); + + assert_noop!( + Bounties::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()), + Error::::InvalidValue + ); + }); +} + +#[test] +fn close_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::InvalidIndex); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"12345".to_vec())); + + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + + let deposit: u64 = 80 + 5; + + assert_eq!(last_event(), RawEvent::BountyRejected(0, deposit)); + + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + assert_eq!(Bounties::bounties(0), None); + assert!(!pallet_treasury::Proposals::::contains_key(0)); + + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn approve_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_noop!(Bounties::approve_bounty(Origin::root(), 0), Error::::InvalidIndex); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + let deposit: u64 = 80 + 5; + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 0, + value: 50, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Approved, + } + ); + assert_eq!(Bounties::bounty_approvals(), vec![0]); + + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); + + // deposit not returned yet + assert_eq!(Balances::reserved_balance(0), deposit); + assert_eq!(Balances::free_balance(0), 100 - deposit); + + >::on_initialize(2); + + // return deposit + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: deposit, + status: BountyStatus::Funded, + } + ); + + assert_eq!(Treasury::pot(), 100 - 50 - 25); // burn 25 + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); + }); +} + +#[test] +fn assign_curator_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_noop!( + Bounties::propose_curator(Origin::root(), 0, 4, 4), + Error::::InvalidIndex + ); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_noop!( + Bounties::propose_curator(Origin::root(), 0, 4, 50), + Error::::InvalidFee + ); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::CuratorProposed { curator: 4 }, + } + ); + + assert_noop!(Bounties::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); + assert_noop!( + Bounties::accept_curator(Origin::signed(4), 0), + pallet_balances::Error::::InsufficientBalance + ); + + Balances::make_free_balance_be(&4, 10); + + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 22 }, + } + ); + + assert_eq!(Balances::free_balance(&4), 8); + assert_eq!(Balances::reserved_balance(&4), 2); + }); +} + +#[test] +fn unassign_curator_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + + assert_noop!(Bounties::unassign_curator(Origin::signed(1), 0), BadOrigin); + + assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + + Balances::make_free_balance_be(&4, 10); + + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); + + assert_eq!(Balances::free_balance(&4), 8); + assert_eq!(Balances::reserved_balance(&4), 0); // slashed 2 + }); +} + +#[test] +fn award_and_claim_bounty_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 10); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 8); // inital 10 - 2 deposit + + assert_noop!( + Bounties::award_bounty(Origin::signed(1), 0, 3), + Error::::RequireCurator + ); + + assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::PendingPayout { curator: 4, beneficiary: 3, unlock_at: 5 }, + } + ); + + assert_noop!(Bounties::claim_bounty(Origin::signed(1), 0), Error::::Premature); + + System::set_block_number(5); + >::on_initialize(5); + + assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); + + assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); + + assert_eq!(last_event(), RawEvent::BountyClaimed(0, 56, 3)); + + assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4 + + assert_eq!(Balances::free_balance(3), 56); + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); + + assert_eq!(Bounties::bounties(0), None); + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn claim_handles_high_fee() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 30); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 49)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); + + System::set_block_number(5); + >::on_initialize(5); + + // make fee > balance + let res = Balances::slash(&Bounties::bounty_account_id(0), 10); + assert_eq!(res.0.peek(), 10); + + assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); + + assert_eq!(last_event(), RawEvent::BountyClaimed(0, 0, 3)); + + assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10 + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); + + assert_eq!(Bounties::bounties(0), None); + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn cancel_and_refund() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); + + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 60); + + assert_noop!(Bounties::close_bounty(Origin::signed(0), 0), BadOrigin); + + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + + // `- 25 + 10` + assert_eq!(Treasury::pot(), 85); + }); +} + +#[test] +fn award_and_cancel() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 0, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(0), 0)); + + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 5); + + assert_ok!(Bounties::award_bounty(Origin::signed(0), 0, 3)); + + // Cannot close bounty directly when payout is happening... + assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::PendingPayout); + + // Instead unassign the curator to slash them and then close. + assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); + assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + + assert_eq!(last_event(), RawEvent::BountyCanceled(0)); + + assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); + + // Slashed. + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 0); + + assert_eq!(Bounties::bounties(0), None); + assert_eq!(Bounties::bounty_descriptions(0), None); + }); +} + +#[test] +fn expire_and_unassign() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 1, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 93); + assert_eq!(Balances::reserved_balance(1), 5); + + System::set_block_number(22); + >::on_initialize(22); + + assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); + + System::set_block_number(23); + >::on_initialize(23); + + assert_ok!(Bounties::unassign_curator(Origin::signed(0), 0)); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 10, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); + + assert_eq!(Balances::free_balance(1), 93); + assert_eq!(Balances::reserved_balance(1), 0); // slashed + }); +} + +#[test] +fn extend_expiry() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&4, 10); + assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + + assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + + assert_noop!( + Bounties::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), + Error::::UnexpectedStatus + ); + + System::set_block_number(2); + >::on_initialize(2); + + assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 10)); + assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 5); + assert_eq!(Balances::reserved_balance(4), 5); + + System::set_block_number(10); + >::on_initialize(10); + + assert_noop!( + Bounties::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), + Error::::RequireCurator + ); + assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, + } + ); + + assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same + } + ); + + System::set_block_number(25); + >::on_initialize(25); + + assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); + assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); + + assert_eq!(Balances::free_balance(4), 10); // not slashed + assert_eq!(Balances::reserved_balance(4), 0); + }); +} + +#[test] +fn genesis_funding_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let initial_funding = 100; + pallet_balances::GenesisConfig:: { + // Total issuance will be 200 with treasury account initialized with 100. + balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), initial_funding); + assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance()); + }); +} diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs new file mode 100644 index 0000000000000..be93636424399 --- /dev/null +++ b/frame/bounties/src/weights.rs @@ -0,0 +1,240 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_bounties +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_bounties +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/bounties/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_bounties. +pub trait WeightInfo { + fn propose_bounty(d: u32, ) -> Weight; + fn approve_bounty() -> Weight; + fn propose_curator() -> Weight; + fn unassign_curator() -> Weight; + fn accept_curator() -> Weight; + fn award_bounty() -> Weight; + fn claim_bounty() -> Weight; + fn close_bounty_proposed() -> Weight; + fn close_bounty_active() -> Weight; + fn extend_bounty_expiry() -> Weight; + fn spend_funds(b: u32, ) -> Weight; +} + +/// Weights for pallet_bounties using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Treasury BountyCount (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Treasury BountyDescriptions (r:0 w:1) + // Storage: Treasury Bounties (r:0 w:1) + fn propose_bounty(d: u32, ) -> Weight { + (44_482_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: Treasury BountyApprovals (r:1 w:1) + fn approve_bounty() -> Weight { + (11_955_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + fn propose_curator() -> Weight { + (9_771_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unassign_curator() -> Weight { + (40_683_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn accept_curator() -> Weight { + (36_390_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + fn award_bounty() -> Weight { + (25_187_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:3 w:3) + // Storage: Treasury BountyDescriptions (r:0 w:1) + fn claim_bounty() -> Weight { + (124_785_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Treasury BountyDescriptions (r:0 w:1) + fn close_bounty_proposed() -> Weight { + (39_483_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:2 w:2) + // Storage: Treasury BountyDescriptions (r:0 w:1) + fn close_bounty_active() -> Weight { + (83_453_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + fn extend_bounty_expiry() -> Weight { + (24_151_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury BountyApprovals (r:1 w:1) + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:2 w:2) + fn spend_funds(b: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 16_000 + .saturating_add((58_004_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Treasury BountyCount (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Treasury BountyDescriptions (r:0 w:1) + // Storage: Treasury Bounties (r:0 w:1) + fn propose_bounty(d: u32, ) -> Weight { + (44_482_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: Treasury BountyApprovals (r:1 w:1) + fn approve_bounty() -> Weight { + (11_955_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + fn propose_curator() -> Weight { + (9_771_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unassign_curator() -> Weight { + (40_683_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn accept_curator() -> Weight { + (36_390_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + fn award_bounty() -> Weight { + (25_187_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:3 w:3) + // Storage: Treasury BountyDescriptions (r:0 w:1) + fn claim_bounty() -> Weight { + (124_785_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Treasury BountyDescriptions (r:0 w:1) + fn close_bounty_proposed() -> Weight { + (39_483_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:2 w:2) + // Storage: Treasury BountyDescriptions (r:0 w:1) + fn close_bounty_active() -> Weight { + (83_453_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Treasury Bounties (r:1 w:1) + fn extend_bounty_expiry() -> Weight { + (24_151_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury BountyApprovals (r:1 w:1) + // Storage: Treasury Bounties (r:1 w:1) + // Storage: System Account (r:2 w:2) + fn spend_funds(b: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 16_000 + .saturating_add((58_004_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + } +} diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index fd302fb836579..e88f28d417730 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-collective" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,34 +13,37 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +log = { version = "0.4.14", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } -[dev-dependencies] -hex-literal = "0.3.1" -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [features] default = ["std"] std = [ "codec/std", + "log/std", + "scale-info/std", "sp-core/std", - "sp-std/std", - "serde", "sp-io/std", - "frame-support/std", "sp-runtime/std", + "sp-std/std", + "frame-benchmarking/std", + "frame-support/std", "frame-system/std", ] runtime-benchmarks = [ "frame-benchmarking", "sp-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/collective/README.md b/frame/collective/README.md index f62df65f728cd..444927e51da22 100644 --- a/frame/collective/README.md +++ b/frame/collective/README.md @@ -7,19 +7,19 @@ The pallet assumes that the amount of members stays at or below `MaxMembers` for calculations, but enforces this neither in `set_members` nor in `change_members_sorted`. A "prime" member may be set to help determine the default vote behavior based on chain -config. If `PreimDefaultVote` is used, the prime vote acts as the default vote in case of any +config. If `PrimeDefaultVote` is used, the prime vote acts as the default vote in case of any abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then abstentations will first follow the majority of the collective voting, and then the prime member. -Voting happens through motions comprising a proposal (i.e. a curried dispatchable) plus a +Voting happens through motions comprising a proposal (i.e. a dispatchable) plus a number of approvals required for it to pass and be called. Motions are open for members to -vote on for a minimum period given by `MotionDuration`. As soon as the needed number of +vote on for a minimum period given by `MotionDuration`. As soon as the required number of approvals is given, the motion is closed and executed. If the number of approvals is not reached during the voting period, then `close` may be called by any account in order to force the end -the motion explicitly. If a prime member is defined then their vote is used in place of any +the motion explicitly. If a prime member is defined, then their vote is used instead of any abstentions and the proposal is executed if there are enough approvals counting the new votes. -If there are not, or if no prime is set, then the motion is dropped without being executed. +If there are not, or if no prime member is set, then the motion is dropped without being executed. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index d4e80d515941f..c7e695babf27d 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,32 +18,25 @@ //! Staking pallet benchmarking. use super::*; +use crate::Pallet as Collective; -use frame_system::RawOrigin as SystemOrigin; -use frame_system::EventRecord; -use frame_benchmarking::{benchmarks_instance, account, whitelisted_caller}; use sp_runtime::traits::Bounded; use sp_std::mem::size_of; -use frame_system::Call as SystemCall; -use frame_system::Module as System; -use crate::Module as Collective; +use frame_benchmarking::{ + account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelisted_caller, +}; +use frame_system::{Call as SystemCall, Pallet as System, RawOrigin as SystemOrigin}; const SEED: u32 = 0; const MAX_BYTES: u32 = 1_024; -fn assert_last_event, I: Instance>(generic_event: >::Event) { - let events = System::::events(); - let system_event: ::Event = generic_event.into(); - // compare to the last event record - let EventRecord { event, .. } = &events[events.len() - 1]; - assert_eq!(event, &system_event); +fn assert_last_event, I: 'static>(generic_event: >::Event) { + frame_system::Pallet::::assert_last_event(generic_event.into()); } -benchmarks_instance! { - _{ } - +benchmarks_instance_pallet! { set_members { let m in 1 .. T::MaxMembers::get(); let n in 1 .. T::MaxMembers::get(); @@ -59,7 +52,7 @@ benchmarks_instance! { } let old_members_count = old_members.len() as u32; - Collective::::set_members( + Collective::::set_members( SystemOrigin::Root.into(), old_members.clone(), Some(last_old_member.clone()), @@ -72,8 +65,10 @@ benchmarks_instance! { let length = 100; for i in 0 .. p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; length]).into(); - Collective::::propose( + let proposal: T::Proposal = SystemCall::::remark { + remark: vec![i as u8; length] + }.into(); + Collective::::propose( SystemOrigin::Signed(last_old_member.clone()).into(), threshold, Box::new(proposal.clone()), @@ -86,7 +81,7 @@ benchmarks_instance! { for j in 2 .. m - 1 { let voter = &old_members[j as usize]; let approve = true; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), hash, i, @@ -107,7 +102,7 @@ benchmarks_instance! { }: _(SystemOrigin::Root, new_members.clone(), Some(last_member), T::MaxMembers::get()) verify { new_members.sort(); - assert_eq!(Collective::::members(), new_members); + assert_eq!(Collective::::members(), new_members); } execute { @@ -126,16 +121,16 @@ benchmarks_instance! { let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; + Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; - let proposal: T::Proposal = SystemCall::::remark(vec![1; b as usize]).into(); + let proposal: T::Proposal = SystemCall::::remark { remark: vec![1; b as usize] }.into(); }: _(SystemOrigin::Signed(caller), Box::new(proposal.clone()), bytes_in_storage) verify { let proposal_hash = T::Hashing::hash_of(&proposal); // Note that execution fails due to mis-matched origin assert_last_event::( - RawEvent::MemberExecuted(proposal_hash, Err(DispatchError::BadOrigin)).into() + Event::MemberExecuted(proposal_hash, Err(DispatchError::BadOrigin)).into() ); } @@ -156,9 +151,9 @@ benchmarks_instance! { let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; + Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; - let proposal: T::Proposal = SystemCall::::remark(vec![1; b as usize]).into(); + let proposal: T::Proposal = SystemCall::::remark { remark: vec![1; b as usize] }.into(); let threshold = 1; }: propose(SystemOrigin::Signed(caller), threshold, Box::new(proposal.clone()), bytes_in_storage) @@ -166,7 +161,7 @@ benchmarks_instance! { let proposal_hash = T::Hashing::hash_of(&proposal); // Note that execution fails due to mis-matched origin assert_last_event::( - RawEvent::Executed(proposal_hash, Err(DispatchError::BadOrigin)).into() + Event::Executed(proposal_hash, Err(DispatchError::BadOrigin)).into() ); } @@ -186,14 +181,14 @@ benchmarks_instance! { } let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; + Collective::::set_members(SystemOrigin::Root.into(), members, None, T::MaxMembers::get())?; let threshold = m; // Add previous proposals. for i in 0 .. p - 1 { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; b as usize]).into(); - Collective::::propose( + let proposal: T::Proposal = SystemCall::::remark { remark: vec![i as u8; b as usize] }.into(); + Collective::::propose( SystemOrigin::Signed(caller.clone()).into(), threshold, Box::new(proposal), @@ -201,16 +196,16 @@ benchmarks_instance! { )?; } - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - let proposal: T::Proposal = SystemCall::::remark(vec![p as u8; b as usize]).into(); + let proposal: T::Proposal = SystemCall::::remark { remark: vec![p as u8; b as usize] }.into(); }: propose(SystemOrigin::Signed(caller.clone()), threshold, Box::new(proposal.clone()), bytes_in_storage) verify { // New proposal is recorded - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Collective::::proposals().len(), p as usize); let proposal_hash = T::Hashing::hash_of(&proposal); - assert_last_event::(RawEvent::Proposed(caller, p - 1, proposal_hash, threshold).into()); + assert_last_event::(Event::Proposed(caller, p - 1, proposal_hash, threshold).into()); } vote { @@ -231,7 +226,7 @@ benchmarks_instance! { } let voter: T::AccountId = account("voter", 0, SEED); members.push(voter.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; + Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; // Threshold is 1 less than the number of members so that one person can vote nay let threshold = m - 1; @@ -240,8 +235,8 @@ benchmarks_instance! { let mut last_hash = T::Hash::default(); for i in 0 .. p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; b as usize]).into(); - Collective::::propose( + let proposal: T::Proposal = SystemCall::::remark { remark: vec![i as u8; b as usize] }.into(); + Collective::::propose( SystemOrigin::Signed(proposer.clone()).into(), threshold, Box::new(proposal.clone()), @@ -252,11 +247,10 @@ benchmarks_instance! { let index = p - 1; // Have almost everyone vote aye on last proposal, while keeping it from passing. - // Proposer already voted aye so we start at 1. - for j in 1 .. m - 3 { + for j in 0 .. m - 3 { let voter = &members[j as usize]; let approve = true; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash.clone(), index, @@ -265,14 +259,14 @@ benchmarks_instance! { } // Voter votes aye without resolving the vote. let approve = true; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash.clone(), index, approve, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Collective::::proposals().len(), p as usize); // Voter switches vote to nay, but does not kill the vote, just updates + inserts let approve = false; @@ -283,8 +277,8 @@ benchmarks_instance! { }: _(SystemOrigin::Signed(voter), last_hash.clone(), index, approve) verify { // All proposals exist and the last proposal has just been updated. - assert_eq!(Collective::::proposals().len(), p as usize); - let voting = Collective::::voting(&last_hash).ok_or(Error::::ProposalMissing)?; + assert_eq!(Collective::::proposals().len(), p as usize); + let voting = Collective::::voting(&last_hash).ok_or("Proposal Missing")?; assert_eq!(voting.ayes.len(), (m - 3) as usize); assert_eq!(voting.nays.len(), 1); } @@ -307,7 +301,7 @@ benchmarks_instance! { } let voter: T::AccountId = account("voter", 0, SEED); members.push(voter.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; + Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; // Threshold is total members so that one nay will disapprove the vote let threshold = m; @@ -316,8 +310,10 @@ benchmarks_instance! { let mut last_hash = T::Hash::default(); for i in 0 .. p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; bytes as usize]).into(); - Collective::::propose( + let proposal: T::Proposal = SystemCall::::remark { + remark: vec![i as u8; bytes as usize] + }.into(); + Collective::::propose( SystemOrigin::Signed(proposer.clone()).into(), threshold, Box::new(proposal.clone()), @@ -328,11 +324,10 @@ benchmarks_instance! { let index = p - 1; // Have most everyone vote aye on last proposal, while keeping it from passing. - // Proposer already voted aye so we start at 1. - for j in 1 .. m - 2 { + for j in 0 .. m - 2 { let voter = &members[j as usize]; let approve = true; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash.clone(), index, @@ -341,18 +336,18 @@ benchmarks_instance! { } // Voter votes aye without resolving the vote. let approve = true; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash.clone(), index, approve, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Collective::::proposals().len(), p as usize); // Voter switches vote to nay, which kills the vote let approve = false; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash.clone(), index, @@ -365,8 +360,8 @@ benchmarks_instance! { }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) verify { // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(RawEvent::Disapproved(last_hash).into()); + assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_last_event::(Event::Disapproved(last_hash).into()); } close_early_approved { @@ -385,7 +380,7 @@ benchmarks_instance! { } let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; + Collective::::set_members(SystemOrigin::Root.into(), members.clone(), None, T::MaxMembers::get())?; // Threshold is 2 so any two ayes will approve the vote let threshold = 2; @@ -394,8 +389,8 @@ benchmarks_instance! { let mut last_hash = T::Hash::default(); for i in 0 .. p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; b as usize]).into(); - Collective::::propose( + let proposal: T::Proposal = SystemCall::::remark { remark: vec![i as u8; b as usize] }.into(); + Collective::::propose( SystemOrigin::Signed(caller.clone()).into(), threshold, Box::new(proposal.clone()), @@ -405,7 +400,7 @@ benchmarks_instance! { } // Caller switches vote to nay on their own proposal, allowing them to be the deciding approval vote - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(caller.clone()).into(), last_hash.clone(), p - 1, @@ -416,7 +411,7 @@ benchmarks_instance! { for j in 2 .. m - 1 { let voter = &members[j as usize]; let approve = false; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash.clone(), p - 1, @@ -425,19 +420,19 @@ benchmarks_instance! { } // Member zero is the first aye - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(members[0].clone()).into(), last_hash.clone(), p - 1, true, )?; - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Collective::::proposals().len(), p as usize); // Caller switches vote to aye, which passes the vote let index = p - 1; let approve = true; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(caller.clone()).into(), last_hash.clone(), index, approve, @@ -446,8 +441,8 @@ benchmarks_instance! { }: close(SystemOrigin::Signed(caller), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) verify { // The last proposal is removed. - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(RawEvent::Executed(last_hash, Err(DispatchError::BadOrigin)).into()); + assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_last_event::(Event::Executed(last_hash, Err(DispatchError::BadOrigin)).into()); } close_disapproved { @@ -466,7 +461,7 @@ benchmarks_instance! { } let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members( + Collective::::set_members( SystemOrigin::Root.into(), members.clone(), Some(caller.clone()), @@ -480,8 +475,10 @@ benchmarks_instance! { let mut last_hash = T::Hash::default(); for i in 0 .. p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; bytes as usize]).into(); - Collective::::propose( + let proposal: T::Proposal = SystemCall::::remark { + remark: vec![i as u8; bytes as usize] + }.into(); + Collective::::propose( SystemOrigin::Signed(caller.clone()).into(), threshold, Box::new(proposal.clone()), @@ -496,7 +493,7 @@ benchmarks_instance! { for j in 2 .. m - 1 { let voter = &members[j as usize]; let approve = true; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash.clone(), index, @@ -505,7 +502,7 @@ benchmarks_instance! { } // caller is prime, prime votes nay - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(caller.clone()).into(), last_hash.clone(), index, @@ -513,13 +510,13 @@ benchmarks_instance! { )?; System::::set_block_number(T::BlockNumber::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Collective::::proposals().len(), p as usize); // Prime nay will close it as disapproved }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::max_value(), bytes_in_storage) verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(RawEvent::Disapproved(last_hash).into()); + assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_last_event::(Event::Disapproved(last_hash).into()); } close_approved { @@ -538,7 +535,7 @@ benchmarks_instance! { } let caller: T::AccountId = whitelisted_caller(); members.push(caller.clone()); - Collective::::set_members( + Collective::::set_members( SystemOrigin::Root.into(), members.clone(), Some(caller.clone()), @@ -552,8 +549,8 @@ benchmarks_instance! { let mut last_hash = T::Hash::default(); for i in 0 .. p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; b as usize]).into(); - Collective::::propose( + let proposal: T::Proposal = SystemCall::::remark { remark: vec![i as u8; b as usize] }.into(); + Collective::::propose( SystemOrigin::Signed(caller.clone()).into(), threshold, Box::new(proposal.clone()), @@ -562,12 +559,20 @@ benchmarks_instance! { last_hash = T::Hashing::hash_of(&proposal); } + // The prime member votes aye, so abstentions default to aye. + Collective::::vote( + SystemOrigin::Signed(caller.clone()).into(), + last_hash.clone(), + p - 1, + true // Vote aye. + )?; + // Have almost everyone vote nay on last proposal, while keeping it from failing. // A few abstainers will be the aye votes needed to pass the vote. for j in 2 .. m - 1 { let voter = &members[j as usize]; let approve = false; - Collective::::vote( + Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash.clone(), p - 1, @@ -577,13 +582,13 @@ benchmarks_instance! { // caller is prime, prime already votes aye by creating the proposal System::::set_block_number(T::BlockNumber::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Collective::::proposals().len(), p as usize); // Prime aye will close it as approved }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::max_value(), bytes_in_storage) verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(RawEvent::Executed(last_hash, Err(DispatchError::BadOrigin)).into()); + assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_last_event::(Event::Executed(last_hash, Err(DispatchError::BadOrigin)).into()); } disapprove_proposal { @@ -601,7 +606,7 @@ benchmarks_instance! { } let caller: T::AccountId = account("caller", 0, SEED); members.push(caller.clone()); - Collective::::set_members( + Collective::::set_members( SystemOrigin::Root.into(), members.clone(), Some(caller.clone()), @@ -615,8 +620,8 @@ benchmarks_instance! { let mut last_hash = T::Hash::default(); for i in 0 .. p { // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark(vec![i as u8; b as usize]).into(); - Collective::::propose( + let proposal: T::Proposal = SystemCall::::remark { remark: vec![i as u8; b as usize] }.into(); + Collective::::propose( SystemOrigin::Signed(caller.clone()).into(), threshold, Box::new(proposal.clone()), @@ -626,88 +631,13 @@ benchmarks_instance! { } System::::set_block_number(T::BlockNumber::max_value()); - assert_eq!(Collective::::proposals().len(), p as usize); + assert_eq!(Collective::::proposals().len(), p as usize); }: _(SystemOrigin::Root, last_hash) verify { - assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(RawEvent::Disapproved(last_hash).into()); + assert_eq!(Collective::::proposals().len(), (p - 1) as usize); + assert_last_event::(Event::Disapproved(last_hash).into()); } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn set_members() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set_members::()); - }); - } - - #[test] - fn execute() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_execute::()); - }); - } - - #[test] - fn propose_execute() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose_execute::()); - }); - } - - #[test] - fn propose_proposed() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose_proposed::()); - }); - } - - #[test] - fn vote() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_vote::()); - }); - } - - #[test] - fn close_early_disapproved() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_close_early_disapproved::()); - }); - } - - #[test] - fn close_early_approved() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_close_early_approved::()); - }); - } - - #[test] - fn close_disapproved() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_close_disapproved::()); - }); - } - - #[test] - fn close_approved() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_close_approved::()); - }); - } - - #[test] - fn disapprove_proposal() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_disapprove_proposal::()); - }); - } -} +impl_benchmark_test_suite!(Collective, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/collective/src/default_weight.rs b/frame/collective/src/default_weight.rs deleted file mode 100644 index bb6fe0ea25312..0000000000000 --- a/frame/collective/src/default_weight.rs +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Default weights for the Collective Pallet -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - (0 as Weight) - .saturating_add((21040000 as Weight).saturating_mul(m as Weight)) - .saturating_add((173000 as Weight).saturating_mul(n as Weight)) - .saturating_add((31595000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) - .saturating_add(DbWeight::get().writes(2 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) - } - fn execute(b: u32, m: u32, ) -> Weight { - (43359000 as Weight) - .saturating_add((4000 as Weight).saturating_mul(b as Weight)) - .saturating_add((123000 as Weight).saturating_mul(m as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - } - fn propose_execute(b: u32, m: u32, ) -> Weight { - (54134000 as Weight) - .saturating_add((4000 as Weight).saturating_mul(b as Weight)) - .saturating_add((239000 as Weight).saturating_mul(m as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - } - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (90650000 as Weight) - .saturating_add((5000 as Weight).saturating_mul(b as Weight)) - .saturating_add((152000 as Weight).saturating_mul(m as Weight)) - .saturating_add((970000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(4 as Weight)) - } - fn vote(m: u32, ) -> Weight { - (74460000 as Weight) - .saturating_add((290000 as Weight).saturating_mul(m as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (86360000 as Weight) - .saturating_add((232000 as Weight).saturating_mul(m as Weight)) - .saturating_add((954000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (123653000 as Weight) - .saturating_add((1000 as Weight).saturating_mul(b as Weight)) - .saturating_add((287000 as Weight).saturating_mul(m as Weight)) - .saturating_add((920000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn close_disapproved(m: u32, p: u32, ) -> Weight { - (95395000 as Weight) - .saturating_add((236000 as Weight).saturating_mul(m as Weight)) - .saturating_add((965000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (135284000 as Weight) - .saturating_add((4000 as Weight).saturating_mul(b as Weight)) - .saturating_add((218000 as Weight).saturating_mul(m as Weight)) - .saturating_add((951000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn disapprove_proposal(p: u32, ) -> Weight { - (50500000 as Weight) - .saturating_add((966000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } -} diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index dd44f5e2aea9e..89d4c8a150c36 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,9 +24,9 @@ //! calculations, but enforces this neither in `set_members` nor in `change_members_sorted`. //! //! A "prime" member may be set to help determine the default vote behavior based on chain -//! config. If `PreimDefaultVote` is used, the prime vote acts as the default vote in case of any +//! config. If `PrimeDefaultVote` is used, the prime vote acts as the default vote in case of any //! abstentions after the voting period. If `MoreThanMajorityThenPrimeDefaultVote` is used, then -//! abstentations will first follow the majority of the collective voting, and then the prime +//! abstentions will first follow the majority of the collective voting, and then the prime //! member. //! //! Voting happens through motions comprising a proposal (i.e. a curried dispatchable) plus a @@ -40,30 +40,34 @@ //! If there are not, or if no prime is set, then the motion is dropped without being executed. #![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit="128"] +#![recursion_limit = "128"] -use sp_std::{prelude::*, result}; +use scale_info::TypeInfo; use sp_core::u32_trait::Value as U32; use sp_io::storage; -use sp_runtime::{RuntimeDebug, traits::Hash}; +use sp_runtime::{traits::Hash, RuntimeDebug}; +use sp_std::{marker::PhantomData, prelude::*, result}; use frame_support::{ codec::{Decode, Encode}, - debug, decl_error, decl_event, decl_module, decl_storage, - dispatch::{ - DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable, Parameter, - PostDispatchInfo, - }, + dispatch::{DispatchError, DispatchResultWithPostInfo, Dispatchable, PostDispatchInfo}, ensure, - traits::{ChangeMembers, EnsureOrigin, Get, InitializeMembers}, - weights::{DispatchClass, GetDispatchInfo, Weight}, + traits::{ + Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers, StorageVersion, + }, + weights::{GetDispatchInfo, Weight}, }; -use frame_system::{self as system, ensure_signed, ensure_root}; + +#[cfg(test)] +mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migrations; +pub mod weights; -mod default_weight; +pub use pallet::*; +pub use weights::WeightInfo; /// Simple index type for proposal counting. pub type ProposalIndex = u32; @@ -105,7 +109,7 @@ impl DefaultVote for PrimeDefaultVote { } /// First see if yes vote are over majority of the whole collective. If so, set the default vote -/// as yes. Otherwise, use the prime meber's vote as the default vote. +/// as yes. Otherwise, use the prime member's vote as the default vote. pub struct MoreThanMajorityThenPrimeDefaultVote; impl DefaultVote for MoreThanMajorityThenPrimeDefaultVote { @@ -120,68 +124,29 @@ impl DefaultVote for MoreThanMajorityThenPrimeDefaultVote { } } -pub trait WeightInfo { - fn set_members(m: u32, n: u32, p: u32, ) -> Weight; - fn execute(b: u32, m: u32, ) -> Weight; - fn propose_execute(b: u32, m: u32, ) -> Weight; - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight; - fn vote(m: u32, ) -> Weight; - fn close_early_disapproved(m: u32, p: u32, ) -> Weight; - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight; - fn close_disapproved(m: u32, p: u32, ) -> Weight; - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight; - fn disapprove_proposal(p: u32, ) -> Weight; -} - -pub trait Trait: frame_system::Trait { - /// The outer origin type. - type Origin: From>; - - /// The outer call dispatch type. - type Proposal: Parameter - + Dispatchable>::Origin, PostInfo=PostDispatchInfo> - + From> - + GetDispatchInfo; - - /// The outer event type. - type Event: From> + Into<::Event>; - - /// The time-out for council motions. - type MotionDuration: Get; - - /// Maximum number of proposals allowed to be active in parallel. - type MaxProposals: Get; - - /// The maximum number of members supported by the pallet. Used for weight estimation. - /// - /// NOTE: - /// + Benchmarks will need to be re-run and weights adjusted if this changes. - /// + This pallet assumes that dependents keep to the limit without enforcing it. - type MaxMembers: Get; - - /// Default vote strategy of this collective. - type DefaultVote: DefaultVote; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} - /// Origin for the collective module. -#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode)] +#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(I))] pub enum RawOrigin { /// It has been condoned by a given number of members of the collective from a given total. Members(MemberCount, MemberCount), /// It has been condoned by a single member of the collective. Member(AccountId), /// Dummy to manage the fact we have instancing. - _Phantom(sp_std::marker::PhantomData), + _Phantom(PhantomData), } -/// Origin for the collective module. -pub type Origin = RawOrigin<::AccountId, I>; +impl GetBacking for RawOrigin { + fn get_backing(&self) -> Option { + match self { + RawOrigin::Members(n, d) => Some(Backing { approvals: *n, eligible: *d }), + _ => None, + } + } +} -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] /// Info for keeping track of a motion being voted on. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct Votes { /// The proposal's unique index. index: ProposalIndex, @@ -195,63 +160,154 @@ pub struct Votes { end: BlockNumber, } -decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Collective { - /// The hashes of the active proposals. - pub Proposals get(fn proposals): Vec; - /// Actual proposal for a given hash, if it's current. - pub ProposalOf get(fn proposal_of): - map hasher(identity) T::Hash => Option<>::Proposal>; - /// Votes on a given proposal, if it is ongoing. - pub Voting get(fn voting): - map hasher(identity) T::Hash => Option>; - /// Proposals so far. - pub ProposalCount get(fn proposal_count): u32; - /// The current members of the collective. This is stored sorted (just by value). - pub Members get(fn members): Vec; - /// The prime member that helps determine the default vote behavior in case of absentations. - pub Prime get(fn prime): Option; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The outer origin type. + type Origin: From>; + + /// The outer call dispatch type. + type Proposal: Parameter + + Dispatchable>::Origin, PostInfo = PostDispatchInfo> + + From> + + GetDispatchInfo; + + /// The outer event type. + type Event: From> + IsType<::Event>; + + /// The time-out for council motions. + type MotionDuration: Get; + + /// Maximum number of proposals allowed to be active in parallel. + type MaxProposals: Get; + + /// The maximum number of members supported by the pallet. Used for weight estimation. + /// + /// NOTE: + /// + Benchmarks will need to be re-run and weights adjusted if this changes. + /// + This pallet assumes that dependents keep to the limit without enforcing it. + type MaxMembers: Get; + + /// Default vote strategy of this collective. + type DefaultVote: DefaultVote; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } - add_extra_genesis { - config(phantom): sp_std::marker::PhantomData; - config(members): Vec; - build(|config| Module::::initialize_members(&config.members)) + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + pub phantom: PhantomData, + pub members: Vec, + } + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { phantom: Default::default(), members: Default::default() } + } } -} -decl_event! { - pub enum Event where - ::Hash, - ::AccountId, - { + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + use sp_std::collections::btree_set::BTreeSet; + let members_set: BTreeSet<_> = self.members.iter().collect(); + assert_eq!( + members_set.len(), + self.members.len(), + "Members cannot contain duplicate accounts." + ); + + Pallet::::initialize_members(&self.members) + } + } + + /// Origin for the collective pallet. + #[pallet::origin] + pub type Origin = RawOrigin<::AccountId, I>; + + /// The hashes of the active proposals. + #[pallet::storage] + #[pallet::getter(fn proposals)] + pub type Proposals, I: 'static = ()> = + StorageValue<_, BoundedVec, ValueQuery>; + + /// Actual proposal for a given hash, if it's current. + #[pallet::storage] + #[pallet::getter(fn proposal_of)] + pub type ProposalOf, I: 'static = ()> = + StorageMap<_, Identity, T::Hash, >::Proposal, OptionQuery>; + + /// Votes on a given proposal, if it is ongoing. + #[pallet::storage] + #[pallet::getter(fn voting)] + pub type Voting, I: 'static = ()> = + StorageMap<_, Identity, T::Hash, Votes, OptionQuery>; + + /// Proposals so far. + #[pallet::storage] + #[pallet::getter(fn proposal_count)] + pub type ProposalCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; + + /// The current members of the collective. This is stored sorted (just by value). + #[pallet::storage] + #[pallet::getter(fn members)] + pub type Members, I: 'static = ()> = + StorageValue<_, Vec, ValueQuery>; + + /// The prime member that helps determine the default vote behavior in case of absentations. + #[pallet::storage] + #[pallet::getter(fn prime)] + pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { /// A motion (given hash) has been proposed (by given account) with a threshold (given /// `MemberCount`). /// \[account, proposal_index, proposal_hash, threshold\] - Proposed(AccountId, ProposalIndex, Hash, MemberCount), + Proposed(T::AccountId, ProposalIndex, T::Hash, MemberCount), /// A motion (given hash) has been voted on by given account, leaving /// a tally (yes votes and no votes given respectively as `MemberCount`). /// \[account, proposal_hash, voted, yes, no\] - Voted(AccountId, Hash, bool, MemberCount, MemberCount), + Voted(T::AccountId, T::Hash, bool, MemberCount, MemberCount), /// A motion was approved by the required threshold. /// \[proposal_hash\] - Approved(Hash), + Approved(T::Hash), /// A motion was not approved by the required threshold. /// \[proposal_hash\] - Disapproved(Hash), + Disapproved(T::Hash), /// A motion was executed; result will be `Ok` if it returned without error. /// \[proposal_hash, result\] - Executed(Hash, DispatchResult), + Executed(T::Hash, DispatchResult), /// A single member did some action; result will be `Ok` if it returned without error. /// \[proposal_hash, result\] - MemberExecuted(Hash, DispatchResult), + MemberExecuted(T::Hash, DispatchResult), /// A proposal was closed because its threshold was reached or after its duration was up. /// \[proposal_hash, yes, no\] - Closed(Hash, MemberCount, MemberCount), + Closed(T::Hash, MemberCount, MemberCount), } -} -decl_error! { - pub enum Error for Module, I: Instance> { + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { /// Account is not a member NotMember, /// Duplicate proposals not allowed @@ -273,32 +329,16 @@ decl_error! { /// The given length bound for the proposal was too low. WrongProposalLength, } -} - -/// Return the weight of a dispatch call result as an `Option`. -/// -/// Will return the weight regardless of what the state of the result is. -fn get_result_weight(result: DispatchResultWithPostInfo) -> Option { - match result { - Ok(post_info) => post_info.actual_weight, - Err(err) => err.post_info.actual_weight, - } -} - - -// Note that councillor operations are assigned to the operational class. -decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { - type Error = Error; - - fn deposit_event() = default; + // Note that councillor operations are assigned to the operational class. + #[pallet::call] + impl, I: 'static> Pallet { /// Set the collective's membership. /// /// - `new_members`: The new member list. Be nice to the chain and provide it sorted. /// - `prime`: The prime member whose vote sets the default. - /// - `old_count`: The upper bound for the previous number of members in storage. - /// Used for weight estimation. + /// - `old_count`: The upper bound for the previous number of members in storage. Used for + /// weight estimation. /// /// Requires root origin. /// @@ -312,39 +352,43 @@ decl_module! { /// - `N` new-members-count (code- and governance-bounded) /// - `P` proposals-count (code-bounded) /// - DB: - /// - 1 storage mutation (codec `O(M)` read, `O(N)` write) for reading and writing the members + /// - 1 storage mutation (codec `O(M)` read, `O(N)` write) for reading and writing the + /// members /// - 1 storage read (codec `O(P)`) for reading the proposals /// - `P` storage mutations (codec `O(M)`) for updating the votes for each proposal /// - 1 storage write (codec `O(1)`) for deleting the old `prime` and setting the new one /// # - #[weight = ( + #[pallet::weight(( T::WeightInfo::set_members( *old_count, // M new_members.len() as u32, // N T::MaxProposals::get() // P ), DispatchClass::Operational - )] - fn set_members(origin, + ))] + pub fn set_members( + origin: OriginFor, new_members: Vec, prime: Option, old_count: MemberCount, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; if new_members.len() > T::MaxMembers::get() as usize { - debug::error!( - "New members count exceeds maximum amount of members expected. (expected: {}, actual: {})", + log::error!( + target: "runtime::collective", + "New members count ({}) exceeds maximum amount of members expected ({}).", + new_members.len(), T::MaxMembers::get(), - new_members.len() ); } let old = Members::::get(); if old.len() > old_count as usize { - debug::warn!( - "Wrong count used to estimate set_members weight. (expected: {}, actual: {})", + log::warn!( + target: "runtime::collective", + "Wrong count used to estimate set_members weight. expected ({}) vs actual ({})", old_count, - old.len() + old.len(), ); } let mut new_members = new_members; @@ -353,10 +397,11 @@ decl_module! { Prime::::set(prime); Ok(Some(T::WeightInfo::set_members( - old.len() as u32, // M + old.len() as u32, // M new_members.len() as u32, // N - T::MaxProposals::get(), // P - )).into()) + T::MaxProposals::get(), // P + )) + .into()) } /// Dispatch a proposal from a member using the `Member` origin. @@ -365,20 +410,22 @@ decl_module! { /// /// # /// ## Weight - /// - `O(M + P)` where `M` members-count (code-bounded) and `P` complexity of dispatching `proposal` + /// - `O(M + P)` where `M` members-count (code-bounded) and `P` complexity of dispatching + /// `proposal` /// - DB: 1 read (codec `O(M)`) + DB access of `proposal` /// - 1 event /// # - #[weight = ( + #[pallet::weight(( T::WeightInfo::execute( *length_bound, // B T::MaxMembers::get(), // M ).saturating_add(proposal.get_dispatch_info().weight), // P DispatchClass::Operational - )] - fn execute(origin, - proposal: Box<>::Proposal>, - #[compact] length_bound: u32, + ))] + pub fn execute( + origin: OriginFor, + proposal: Box<>::Proposal>, + #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let members = Self::members(); @@ -388,16 +435,20 @@ decl_module! { let proposal_hash = T::Hashing::hash_of(&proposal); let result = proposal.dispatch(RawOrigin::Member(who).into()); - Self::deposit_event( - RawEvent::MemberExecuted(proposal_hash, result.map(|_| ()).map_err(|e| e.error)) - ); - - Ok(get_result_weight(result).map(|w| { - T::WeightInfo::execute( - proposal_len as u32, // B - members.len() as u32, // M - ).saturating_add(w) // P - }).into()) + Self::deposit_event(Event::MemberExecuted( + proposal_hash, + result.map(|_| ()).map_err(|e| e.error), + )); + + Ok(get_result_weight(result) + .map(|w| { + T::WeightInfo::execute( + proposal_len as u32, // B + members.len() as u32, // M + ) + .saturating_add(w) // P + }) + .into()) } /// Add a new proposal to either be voted on or executed directly. @@ -427,7 +478,7 @@ decl_module! { /// - 1 storage write `Voting` (codec `O(M)`) /// - 1 event /// # - #[weight = ( + #[pallet::weight(( if *threshold < 2 { T::WeightInfo::propose_execute( *length_bound, // B @@ -441,11 +492,12 @@ decl_module! { ) }, DispatchClass::Operational - )] - fn propose(origin, - #[compact] threshold: MemberCount, - proposal: Box<>::Proposal>, - #[compact] length_bound: u32 + ))] + pub fn propose( + origin: OriginFor, + #[pallet::compact] threshold: MemberCount, + proposal: Box<>::Proposal>, + #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let members = Self::members(); @@ -454,45 +506,53 @@ decl_module! { let proposal_len = proposal.using_encoded(|x| x.len()); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); let proposal_hash = T::Hashing::hash_of(&proposal); - ensure!(!>::contains_key(proposal_hash), Error::::DuplicateProposal); + ensure!( + !>::contains_key(proposal_hash), + Error::::DuplicateProposal + ); if threshold < 2 { let seats = Self::members().len() as MemberCount; let result = proposal.dispatch(RawOrigin::Members(1, seats).into()); - Self::deposit_event( - RawEvent::Executed(proposal_hash, result.map(|_| ()).map_err(|e| e.error)) - ); - - Ok(get_result_weight(result).map(|w| { - T::WeightInfo::propose_execute( - proposal_len as u32, // B - members.len() as u32, // M - ).saturating_add(w) // P1 - }).into()) + Self::deposit_event(Event::Executed( + proposal_hash, + result.map(|_| ()).map_err(|e| e.error), + )); + + Ok(get_result_weight(result) + .map(|w| { + T::WeightInfo::propose_execute( + proposal_len as u32, // B + members.len() as u32, // M + ) + .saturating_add(w) // P1 + }) + .into()) } else { let active_proposals = >::try_mutate(|proposals| -> Result { - proposals.push(proposal_hash); - ensure!( - proposals.len() <= T::MaxProposals::get() as usize, - Error::::TooManyProposals - ); + proposals + .try_push(proposal_hash) + .map_err(|_| Error::::TooManyProposals)?; Ok(proposals.len()) })?; let index = Self::proposal_count(); - >::mutate(|i| *i += 1); + >::mutate(|i| *i += 1); >::insert(proposal_hash, *proposal); - let end = system::Module::::block_number() + T::MotionDuration::get(); - let votes = Votes { index, threshold, ayes: vec![who.clone()], nays: vec![], end }; + let votes = { + let end = frame_system::Pallet::::block_number() + T::MotionDuration::get(); + Votes { index, threshold, ayes: vec![], nays: vec![], end } + }; >::insert(proposal_hash, votes); - Self::deposit_event(RawEvent::Proposed(who, index, proposal_hash, threshold)); + Self::deposit_event(Event::Proposed(who, index, proposal_hash, threshold)); Ok(Some(T::WeightInfo::propose_proposed( - proposal_len as u32, // B - members.len() as u32, // M + proposal_len as u32, // B + members.len() as u32, // M active_proposals as u32, // P2 - )).into()) + )) + .into()) } } @@ -500,6 +560,9 @@ decl_module! { /// /// Requires the sender to be a member. /// + /// Transaction fees will be waived if the member is voting on any particular proposal + /// for the first time and the call is successful. Subsequent vote changes will charge a + /// fee. /// # /// ## Weight /// - `O(M)` where `M` is members-count (code- and governance-bounded) @@ -508,13 +571,11 @@ decl_module! { /// - 1 storage mutation `Voting` (codec `O(M)`) /// - 1 event /// # - #[weight = ( - T::WeightInfo::vote(T::MaxMembers::get()), - DispatchClass::Operational - )] - fn vote(origin, + #[pallet::weight((T::WeightInfo::vote(T::MaxMembers::get()), DispatchClass::Operational))] + pub fn vote( + origin: OriginFor, proposal: T::Hash, - #[compact] index: ProposalIndex, + #[pallet::compact] index: ProposalIndex, approve: bool, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -527,11 +588,14 @@ decl_module! { let position_yes = voting.ayes.iter().position(|a| a == &who); let position_no = voting.nays.iter().position(|a| a == &who); + // Detects first vote of the member in the motion + let is_account_voting_first_time = position_yes.is_none() && position_no.is_none(); + if approve { if position_yes.is_none() { voting.ayes.push(who.clone()); } else { - Err(Error::::DuplicateVote)? + return Err(Error::::DuplicateVote.into()) } if let Some(pos) = position_no { voting.nays.swap_remove(pos); @@ -540,7 +604,7 @@ decl_module! { if position_no.is_none() { voting.nays.push(who.clone()); } else { - Err(Error::::DuplicateVote)? + return Err(Error::::DuplicateVote.into()) } if let Some(pos) = position_yes { voting.ayes.swap_remove(pos); @@ -549,11 +613,15 @@ decl_module! { let yes_votes = voting.ayes.len() as MemberCount; let no_votes = voting.nays.len() as MemberCount; - Self::deposit_event(RawEvent::Voted(who, proposal, approve, yes_votes, no_votes)); + Self::deposit_event(Event::Voted(who, proposal, approve, yes_votes, no_votes)); Voting::::insert(&proposal, voting); - Ok(Some(T::WeightInfo::vote(members.len() as u32)).into()) + if is_account_voting_first_time { + Ok((Some(T::WeightInfo::vote(members.len() as u32)), Pays::No).into()) + } else { + Ok((Some(T::WeightInfo::vote(members.len() as u32)), Pays::Yes).into()) + } } /// Close a vote that is either approved, disapproved or whose voting period has ended. @@ -566,9 +634,13 @@ decl_module! { /// If called after the end of the voting period abstentions are counted as rejections /// unless there is a prime member set and the prime member cast an approval. /// - /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed proposal. + /// If the close operation completes successfully with disapproval, the transaction fee will + /// be waived. Otherwise execution of the approved operation will be charged to the caller. + /// + /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed + /// proposal. /// + `length_bound`: The upper bound for the length of the proposal in storage. Checked via - /// `storage::read` so it is `size_of::() == 4` larger than the pure length. + /// `storage::read` so it is `size_of::() == 4` larger than the pure length. /// /// # /// ## Weight @@ -579,11 +651,12 @@ decl_module! { /// - `P2` is proposal-count (code-bounded) /// - DB: /// - 2 storage reads (`Members`: codec `O(M)`, `Prime`: codec `O(1)`) - /// - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec `O(P2)`) + /// - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec + /// `O(P2)`) /// - any mutations done while executing `proposal` (`P1`) /// - up to 3 events /// # - #[weight = ( + #[pallet::weight(( { let b = *length_bound; let m = T::MaxMembers::get(); @@ -596,12 +669,13 @@ decl_module! { .saturating_add(p1) }, DispatchClass::Operational - )] - fn close(origin, + ))] + pub fn close( + origin: OriginFor, proposal_hash: T::Hash, - #[compact] index: ProposalIndex, - #[compact] proposal_weight_bound: Weight, - #[compact] length_bound: u32 + #[pallet::compact] index: ProposalIndex, + #[pallet::compact] proposal_weight_bound: Weight, + #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { let _ = ensure_signed(origin)?; @@ -618,25 +692,34 @@ decl_module! { let (proposal, len) = Self::validate_and_get_proposal( &proposal_hash, length_bound, - proposal_weight_bound + proposal_weight_bound, )?; - Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); + Self::deposit_event(Event::Closed(proposal_hash, yes_votes, no_votes)); let (proposal_weight, proposal_count) = - Self::do_approve_proposal(seats, voting, proposal_hash, proposal); - return Ok(Some( - T::WeightInfo::close_early_approved(len as u32, seats, proposal_count) - .saturating_add(proposal_weight) - ).into()); + Self::do_approve_proposal(seats, yes_votes, proposal_hash, proposal); + return Ok(( + Some( + T::WeightInfo::close_early_approved(len as u32, seats, proposal_count) + .saturating_add(proposal_weight), + ), + Pays::Yes, + ) + .into()) } else if disapproved { - Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); + Self::deposit_event(Event::Closed(proposal_hash, yes_votes, no_votes)); let proposal_count = Self::do_disapprove_proposal(proposal_hash); - return Ok(Some( - T::WeightInfo::close_early_disapproved(seats, proposal_count) - ).into()); + return Ok(( + Some(T::WeightInfo::close_early_disapproved(seats, proposal_count)), + Pays::No, + ) + .into()) } // Only allow actual closing of the proposal after the voting period has ended. - ensure!(system::Module::::block_number() >= voting.end, Error::::TooEarly); + ensure!( + frame_system::Pallet::::block_number() >= voting.end, + Error::::TooEarly + ); let prime_vote = Self::prime().map(|who| voting.ayes.iter().any(|a| a == &who)); @@ -654,25 +737,28 @@ decl_module! { let (proposal, len) = Self::validate_and_get_proposal( &proposal_hash, length_bound, - proposal_weight_bound + proposal_weight_bound, )?; - Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); + Self::deposit_event(Event::Closed(proposal_hash, yes_votes, no_votes)); let (proposal_weight, proposal_count) = - Self::do_approve_proposal(seats, voting, proposal_hash, proposal); - return Ok(Some( - T::WeightInfo::close_approved(len as u32, seats, proposal_count) - .saturating_add(proposal_weight) - ).into()); + Self::do_approve_proposal(seats, yes_votes, proposal_hash, proposal); + Ok(( + Some( + T::WeightInfo::close_approved(len as u32, seats, proposal_count) + .saturating_add(proposal_weight), + ), + Pays::Yes, + ) + .into()) } else { - Self::deposit_event(RawEvent::Closed(proposal_hash, yes_votes, no_votes)); + Self::deposit_event(Event::Closed(proposal_hash, yes_votes, no_votes)); let proposal_count = Self::do_disapprove_proposal(proposal_hash); - return Ok(Some( - T::WeightInfo::close_disapproved(seats, proposal_count) - ).into()); + Ok((Some(T::WeightInfo::close_disapproved(seats, proposal_count)), Pays::No).into()) } } - /// Disapprove a proposal, close, and remove it from the system, regardless of its current state. + /// Disapprove a proposal, close, and remove it from the system, regardless of its current + /// state. /// /// Must be called by the Root origin. /// @@ -685,8 +771,11 @@ decl_module! { /// * Reads: Proposals /// * Writes: Voting, Proposals, ProposalOf /// # - #[weight = T::WeightInfo::disapprove_proposal(T::MaxProposals::get())] - fn disapprove_proposal(origin, proposal_hash: T::Hash) -> DispatchResultWithPostInfo { + #[pallet::weight(T::WeightInfo::disapprove_proposal(T::MaxProposals::get()))] + pub fn disapprove_proposal( + origin: OriginFor, + proposal_hash: T::Hash, + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; let proposal_count = Self::do_disapprove_proposal(proposal_hash); Ok(Some(T::WeightInfo::disapprove_proposal(proposal_count)).into()) @@ -694,7 +783,17 @@ decl_module! { } } -impl, I: Instance> Module { +/// Return the weight of a dispatch call result as an `Option`. +/// +/// Will return the weight regardless of what the state of the result is. +fn get_result_weight(result: DispatchResultWithPostInfo) -> Option { + match result { + Ok(post_info) => post_info.actual_weight, + Err(err) => err.post_info.actual_weight, + } +} + +impl, I: 'static> Pallet { /// Check whether `who` is a member of the collective. pub fn is_member(who: &T::AccountId) -> bool { // Note: The dispatchables *do not* use this to check membership so make sure @@ -709,12 +808,12 @@ impl, I: Instance> Module { fn validate_and_get_proposal( hash: &T::Hash, length_bound: u32, - weight_bound: Weight - ) -> Result<(>::Proposal, usize), DispatchError> { + weight_bound: Weight, + ) -> Result<(>::Proposal, usize), DispatchError> { let key = ProposalOf::::hashed_key_for(hash); // read the length of the proposal storage entry directly - let proposal_len = storage::read(&key, &mut [0; 0], 0) - .ok_or(Error::::ProposalMissing)?; + let proposal_len = + storage::read(&key, &mut [0; 0], 0).ok_or(Error::::ProposalMissing)?; ensure!(proposal_len <= length_bound, Error::::WrongProposalLength); let proposal = ProposalOf::::get(hash).ok_or(Error::::ProposalMissing)?; let proposal_weight = proposal.get_dispatch_info().weight; @@ -738,18 +837,19 @@ impl, I: Instance> Module { /// - `P` is number of active proposals fn do_approve_proposal( seats: MemberCount, - voting: Votes, + yes_votes: MemberCount, proposal_hash: T::Hash, - proposal: >::Proposal, + proposal: >::Proposal, ) -> (Weight, u32) { - Self::deposit_event(RawEvent::Approved(proposal_hash)); + Self::deposit_event(Event::Approved(proposal_hash)); let dispatch_weight = proposal.get_dispatch_info().weight; - let origin = RawOrigin::Members(voting.threshold, seats).into(); + let origin = RawOrigin::Members(yes_votes, seats).into(); let result = proposal.dispatch(origin); - Self::deposit_event( - RawEvent::Executed(proposal_hash, result.map(|_| ()).map_err(|e| e.error)) - ); + Self::deposit_event(Event::Executed( + proposal_hash, + result.map(|_| ()).map_err(|e| e.error), + )); // default to the dispatch info weight for safety let proposal_weight = get_result_weight(result).unwrap_or(dispatch_weight); // P1 @@ -759,7 +859,7 @@ impl, I: Instance> Module { fn do_disapprove_proposal(proposal_hash: T::Hash) -> u32 { // disapproved - Self::deposit_event(RawEvent::Disapproved(proposal_hash)); + Self::deposit_event(Event::Disapproved(proposal_hash)); Self::remove_proposal(proposal_hash) } @@ -776,7 +876,7 @@ impl, I: Instance> Module { } } -impl, I: Instance> ChangeMembers for Module { +impl, I: 'static> ChangeMembers for Pallet { /// Update the members of the collective. Votes are updated and the prime is reset. /// /// NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but @@ -800,27 +900,32 @@ impl, I: Instance> ChangeMembers for Module { new: &[T::AccountId], ) { if new.len() > T::MaxMembers::get() as usize { - debug::error!( - "New members count exceeds maximum amount of members expected. (expected: {}, actual: {})", + log::error!( + target: "runtime::collective", + "New members count ({}) exceeds maximum amount of members expected ({}).", + new.len(), T::MaxMembers::get(), - new.len() ); } // remove accounts from all current voting in motions. let mut outgoing = outgoing.to_vec(); outgoing.sort(); for h in Self::proposals().into_iter() { - >::mutate(h, |v| + >::mutate(h, |v| { if let Some(mut votes) = v.take() { - votes.ayes = votes.ayes.into_iter() + votes.ayes = votes + .ayes + .into_iter() .filter(|i| outgoing.binary_search(i).is_err()) .collect(); - votes.nays = votes.nays.into_iter() + votes.nays = votes + .nays + .into_iter() .filter(|i| outgoing.binary_search(i).is_err()) .collect(); *v = Some(votes); } - ); + }); } Members::::put(new); Prime::::kill(); @@ -829,9 +934,13 @@ impl, I: Instance> ChangeMembers for Module { fn set_prime(prime: Option) { Prime::::set(prime); } + + fn get_prime() -> Option { + Prime::::get() + } } -impl, I: Instance> InitializeMembers for Module { +impl, I: 'static> InitializeMembers for Pallet { fn initialize_members(members: &[T::AccountId]) { if !members.is_empty() { assert!(>::get().is_empty(), "Members are already initialized!"); @@ -842,10 +951,12 @@ impl, I: Instance> InitializeMembers for Module /// Ensure that the origin `o` represents at least `n` members. Returns `Ok` or an `Err` /// otherwise. -pub fn ensure_members(o: OuterOrigin, n: MemberCount) - -> result::Result +pub fn ensure_members( + o: OuterOrigin, + n: MemberCount, +) -> result::Result where - OuterOrigin: Into, OuterOrigin>> + OuterOrigin: Into, OuterOrigin>>, { match o.into() { Ok(RawOrigin::Members(x, _)) if x >= n => Ok(n), @@ -853,12 +964,13 @@ where } } -pub struct EnsureMember(sp_std::marker::PhantomData<(AccountId, I)>); +pub struct EnsureMember(PhantomData<(AccountId, I)>); impl< - O: Into, O>> + From>, - AccountId: Default, - I, -> EnsureOrigin for EnsureMember { + O: Into, O>> + From>, + AccountId: Default, + I, + > EnsureOrigin for EnsureMember +{ type Success = AccountId; fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -873,13 +985,14 @@ impl< } } -pub struct EnsureMembers(sp_std::marker::PhantomData<(N, AccountId, I)>); +pub struct EnsureMembers(PhantomData<(N, AccountId, I)>); impl< - O: Into, O>> + From>, - N: U32, - AccountId, - I, -> EnsureOrigin for EnsureMembers { + O: Into, O>> + From>, + N: U32, + AccountId, + I, + > EnsureOrigin for EnsureMembers +{ type Success = (MemberCount, MemberCount); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -894,16 +1007,17 @@ impl< } } -pub struct EnsureProportionMoreThan( - sp_std::marker::PhantomData<(N, D, AccountId, I)> +pub struct EnsureProportionMoreThan( + PhantomData<(N, D, AccountId, I)>, ); impl< - O: Into, O>> + From>, - N: U32, - D: U32, - AccountId, - I, -> EnsureOrigin for EnsureProportionMoreThan { + O: Into, O>> + From>, + N: U32, + D: U32, + AccountId, + I, + > EnsureOrigin for EnsureProportionMoreThan +{ type Success = (); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -918,16 +1032,17 @@ impl< } } -pub struct EnsureProportionAtLeast( - sp_std::marker::PhantomData<(N, D, AccountId, I)> +pub struct EnsureProportionAtLeast( + PhantomData<(N, D, AccountId, I)>, ); impl< - O: Into, O>> + From>, - N: U32, - D: U32, - AccountId, - I, -> EnsureOrigin for EnsureProportionAtLeast { + O: Into, O>> + From>, + N: U32, + D: U32, + AccountId, + I, + > EnsureOrigin for EnsureProportionAtLeast +{ type Success = (); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -941,691 +1056,3 @@ impl< O::from(RawOrigin::Members(0u32, 0u32)) } } - -#[cfg(test)] -mod tests { - use super::*; - use frame_support::{Hashable, assert_ok, assert_noop, parameter_types, weights::Weight}; - use frame_system::{self as system, EventRecord, Phase}; - use hex_literal::hex; - use sp_core::H256; - use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, testing::Header, - BuildStorage, - }; - use crate as collective; - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const MotionDuration: u64 = 3; - pub const MaxProposals: u32 = 100; - pub const MaxMembers: u32 = 100; - } - impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = Call; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type PalletInfo = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - } - impl Trait for Test { - type Origin = Origin; - type Proposal = Call; - type Event = Event; - type MotionDuration = MotionDuration; - type MaxProposals = MaxProposals; - type MaxMembers = MaxMembers; - type DefaultVote = PrimeDefaultVote; - type WeightInfo = (); - } - impl Trait for Test { - type Origin = Origin; - type Proposal = Call; - type Event = Event; - type MotionDuration = MotionDuration; - type MaxProposals = MaxProposals; - type MaxMembers = MaxMembers; - type DefaultVote = MoreThanMajorityThenPrimeDefaultVote; - type WeightInfo = (); - } - impl Trait for Test { - type Origin = Origin; - type Proposal = Call; - type Event = Event; - type MotionDuration = MotionDuration; - type MaxProposals = MaxProposals; - type MaxMembers = MaxMembers; - type DefaultVote = PrimeDefaultVote; - type WeightInfo = (); - } - - pub type Block = sp_runtime::generic::Block; - pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; - - frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - System: system::{Module, Call, Event}, - Collective: collective::::{Module, Call, Event, Origin, Config}, - CollectiveMajority: collective::::{Module, Call, Event, Origin, Config}, - DefaultCollective: collective::{Module, Call, Event, Origin, Config}, - } - ); - - pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig { - collective_Instance1: Some(collective::GenesisConfig { - members: vec![1, 2, 3], - phantom: Default::default(), - }), - collective_Instance2: Some(collective::GenesisConfig { - members: vec![1, 2, 3, 4, 5], - phantom: Default::default(), - }), - collective: None, - }.build_storage().unwrap().into(); - ext.execute_with(|| System::set_block_number(1)); - ext - } - - fn make_proposal(value: u64) -> Call { - Call::System(frame_system::Call::remark(value.encode())) - } - - #[test] - fn motions_basic_environment_works() { - new_test_ext().execute_with(|| { - assert_eq!(Collective::members(), vec![1, 2, 3]); - assert_eq!(Collective::proposals(), Vec::::new()); - }); - } - - #[test] - fn close_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash = BlakeTwo256::hash_of(&proposal); - - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - - System::set_block_number(3); - assert_noop!( - Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len), - Error::::TooEarly - ); - - System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 2, 1))), - record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))) - ]); - }); - } - - #[test] - fn proposal_weight_limit_works_on_approve() { - new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash = BlakeTwo256::hash_of(&proposal); - // Set 1 as prime voter - Prime::::set(Some(1)); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - // With 1's prime vote, this should pass - System::set_block_number(4); - assert_noop!( - Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight - 100, proposal_len), - Error::::WrongProposalWeight - ); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); - }) - } - - #[test] - fn proposal_weight_limit_ignored_on_disapprove() { - new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash = BlakeTwo256::hash_of(&proposal); - - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - // No votes, this proposal wont pass - System::set_block_number(4); - assert_ok!( - Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight - 100, proposal_len) - ); - }) - } - - #[test] - fn close_with_prime_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members(Origin::root(), vec![1, 2, 3], Some(3), MaxMembers::get())); - - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - - System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 2, 1))), - record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))) - ]); - }); - } - - #[test] - fn close_with_voting_prime_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members(Origin::root(), vec![1, 2, 3], Some(1), MaxMembers::get())); - - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - - System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 3, 0))), - record(Event::collective_Instance1(RawEvent::Approved(hash.clone()))), - record(Event::collective_Instance1(RawEvent::Executed(hash.clone(), Err(DispatchError::BadOrigin)))) - ]); - }); - } - - #[test] - fn close_with_no_prime_but_majority_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(CollectiveMajority::set_members(Origin::root(), vec![1, 2, 3, 4, 5], Some(5), MaxMembers::get())); - - assert_ok!(CollectiveMajority::propose(Origin::signed(1), 5, Box::new(proposal.clone()), proposal_len)); - assert_ok!(CollectiveMajority::vote(Origin::signed(2), hash.clone(), 0, true)); - assert_ok!(CollectiveMajority::vote(Origin::signed(3), hash.clone(), 0, true)); - - System::set_block_number(4); - assert_ok!(CollectiveMajority::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::collective_Instance2(RawEvent::Proposed(1, 0, hash.clone(), 5))), - record(Event::collective_Instance2(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance2(RawEvent::Voted(3, hash.clone(), true, 3, 0))), - record(Event::collective_Instance2(RawEvent::Closed(hash.clone(), 5, 0))), - record(Event::collective_Instance2(RawEvent::Approved(hash.clone()))), - record(Event::collective_Instance2(RawEvent::Executed(hash.clone(), Err(DispatchError::BadOrigin)))) - ]); - }); - } - - #[test] - fn removal_of_old_voters_votes_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) - ); - Collective::change_members_sorted(&[4], &[1], &[2, 3, 4]); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) - ); - - let proposal = make_proposal(69); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose(Origin::signed(2), 2, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) - ); - Collective::change_members_sorted(&[], &[3], &[2, 4]); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) - ); - }); - } - - #[test] - fn removal_of_old_voters_votes_works_with_set_members() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) - ); - assert_ok!(Collective::set_members(Origin::root(), vec![2, 3, 4], None, MaxMembers::get())); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) - ); - - let proposal = make_proposal(69); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose(Origin::signed(2), 2, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) - ); - assert_ok!(Collective::set_members(Origin::root(), vec![2, 4], None, MaxMembers::get())); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) - ); - }); - } - - #[test] - fn propose_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash = proposal.blake2_256().into(); - let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_eq!(Collective::proposals(), vec![hash]); - assert_eq!(Collective::proposal_of(&hash), Some(proposal)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![1], nays: vec![], end }) - ); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 3, - )), - topics: vec![], - } - ]); - }); - } - - #[test] - fn limit_active_proposals() { - new_test_ext().execute_with(|| { - for i in 0..MaxProposals::get() { - let proposal = make_proposal(i as u64); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - } - let proposal = make_proposal(MaxProposals::get() as u64 + 1); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_noop!( - Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len), - Error::::TooManyProposals - ); - }) - } - - #[test] - fn correct_validate_and_get_proposal() { - new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); - let length = proposal.encode().len() as u32; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), length)); - - let hash = BlakeTwo256::hash_of(&proposal); - let weight = proposal.get_dispatch_info().weight; - assert_noop!( - Collective::validate_and_get_proposal(&BlakeTwo256::hash_of(&vec![3; 4]), length, weight), - Error::::ProposalMissing - ); - assert_noop!( - Collective::validate_and_get_proposal(&hash, length - 2, weight), - Error::::WrongProposalLength - ); - assert_noop!( - Collective::validate_and_get_proposal(&hash, length, weight - 10), - Error::::WrongProposalWeight - ); - let res = Collective::validate_and_get_proposal(&hash, length, weight); - assert_ok!(res.clone()); - let (retrieved_proposal, len) = res.unwrap(); - assert_eq!(length as usize, len); - assert_eq!(proposal, retrieved_proposal); - }) - } - - #[test] - fn motions_ignoring_non_collective_proposals_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_noop!( - Collective::propose(Origin::signed(42), 3, Box::new(proposal.clone()), proposal_len), - Error::::NotMember - ); - }); - } - - #[test] - fn motions_ignoring_non_collective_votes_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_noop!( - Collective::vote(Origin::signed(42), hash.clone(), 0, true), - Error::::NotMember, - ); - }); - } - - #[test] - fn motions_ignoring_bad_index_collective_vote_works() { - new_test_ext().execute_with(|| { - System::set_block_number(3); - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_noop!( - Collective::vote(Origin::signed(2), hash.clone(), 1, true), - Error::::WrongIndex, - ); - }); - } - - #[test] - fn motions_revoting_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) - ); - assert_noop!( - Collective::vote(Origin::signed(1), hash.clone(), 0, true), - Error::::DuplicateVote, - ); - assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, false)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1], end }) - ); - assert_noop!( - Collective::vote(Origin::signed(1), hash.clone(), 0, false), - Error::::DuplicateVote, - ); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 2, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Voted( - 1, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - false, - 0, - 1, - )), - topics: vec![], - } - ]); - }); - } - - #[test] - fn motions_reproposing_disapproved_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); - assert_ok!(Collective::close(Origin::signed(2), hash.clone(), 0, proposal_weight, proposal_len)); - assert_eq!(Collective::proposals(), vec![]); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); - assert_eq!(Collective::proposals(), vec![hash]); - }); - } - - #[test] - fn motions_disapproval_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); - assert_ok!(Collective::close(Origin::signed(2), hash.clone(), 0, proposal_weight, proposal_len)); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1( - RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 3, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Voted( - 2, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - false, - 1, - 1, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Closed( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), 1, 1, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Disapproved( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - )), - topics: vec![], - } - ]); - }); - } - - #[test] - fn motions_approval_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - assert_ok!(Collective::close(Origin::signed(2), hash.clone(), 0, proposal_weight, proposal_len)); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 2, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Voted( - 2, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - true, - 2, - 0, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Closed( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), 2, 0, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Approved( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Executed( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - Err(DispatchError::BadOrigin), - )), - topics: vec![], - } - ]); - }); - } - - #[test] - fn close_disapprove_does_not_care_about_weight_or_len() { - // This test confirms that if you close a proposal that would be disapproved, - // we do not care about the proposal length or proposal weight since it will - // not be read from storage or executed. - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); - // First we make the proposal succeed - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - // It will not close with bad weight/len information - assert_noop!( - Collective::close(Origin::signed(2), hash.clone(), 0, 0, 0), - Error::::WrongProposalLength, - ); - assert_noop!( - Collective::close(Origin::signed(2), hash.clone(), 0, 0, proposal_len), - Error::::WrongProposalWeight, - ); - // Now we make the proposal fail - assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, false)); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); - // It can close even if the weight/len information is bad - assert_ok!(Collective::close(Origin::signed(2), hash.clone(), 0, 0, 0)); - }) - } - - #[test] - fn disapprove_proposal_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); - // Proposal would normally succeed - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - // But Root can disapprove and remove it anyway - assert_ok!(Collective::disapprove_proposal(Origin::root(), hash.clone())); - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 2))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))), - ]); - }) - } -} diff --git a/frame/collective/src/migrations/mod.rs b/frame/collective/src/migrations/mod.rs new file mode 100644 index 0000000000000..26d07a0cd5ac8 --- /dev/null +++ b/frame/collective/src/migrations/mod.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Version 4. +pub mod v4; diff --git a/frame/collective/src/migrations/v4.rs b/frame/collective/src/migrations/v4.rs new file mode 100644 index 0000000000000..68284ba4df91d --- /dev/null +++ b/frame/collective/src/migrations/v4.rs @@ -0,0 +1,147 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_io::hashing::twox_128; + +use frame_support::{ + traits::{ + Get, GetStorageVersion, PalletInfoAccess, StorageVersion, + STORAGE_VERSION_STORAGE_KEY_POSTFIX, + }, + weights::Weight, +}; + +/// Migrate the entire storage of this pallet to a new prefix. +/// +/// This new prefix must be the same as the one set in construct_runtime. For safety, use +/// `PalletInfo` to get it, as: +/// `::PalletInfo::name::`. +/// +/// The migration will look into the storage version in order not to trigger a migration on an up +/// to date storage. Thus the on chain storage version must be less than 4 in order to trigger the +/// migration. +pub fn migrate>( + old_pallet_name: N, +) -> Weight { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name =

::name(); + + if new_pallet_name == old_pallet_name { + log::info!( + target: "runtime::collective", + "New pallet name is equal to the old pallet name. No migration needs to be done.", + ); + return 0 + } + + let on_chain_storage_version =

::on_chain_storage_version(); + log::info!( + target: "runtime::collective", + "Running migration to v4 for collective with storage version {:?}", + on_chain_storage_version, + ); + + if on_chain_storage_version < 4 { + frame_support::storage::migration::move_pallet( + old_pallet_name.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", old_pallet_name, new_pallet_name); + + StorageVersion::new(4).put::

(); + ::BlockWeights::get().max_block + } else { + log::warn!( + target: "runtime::collective", + "Attempted to apply migration to v4 but failed because storage version is {:?}", + on_chain_storage_version, + ); + 0 + } +} + +/// Some checks prior to migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn pre_migrate>(old_pallet_name: N) { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name =

::name(); + log_migration("pre-migration", old_pallet_name, new_pallet_name); + + if new_pallet_name == old_pallet_name { + return + } + + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + let storage_version_key = twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX); + + let mut new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + new_pallet_prefix.to_vec(), + new_pallet_prefix.to_vec(), + |key| Ok(key.to_vec()), + ); + + // Ensure nothing except the storage_version_key is stored in the new prefix. + assert!(new_pallet_prefix_iter.all(|key| key == storage_version_key)); + + assert!(

::on_chain_storage_version() < 4); +} + +/// Some checks for after migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn post_migrate>(old_pallet_name: N) { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name =

::name(); + log_migration("post-migration", old_pallet_name, new_pallet_name); + + if new_pallet_name == old_pallet_name { + return + } + + // Assert that nothing remains at the old prefix. + let old_pallet_prefix = twox_128(old_pallet_name.as_bytes()); + let old_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + old_pallet_prefix.to_vec(), + old_pallet_prefix.to_vec(), + |_| Ok(()), + ); + assert_eq!(old_pallet_prefix_iter.count(), 0); + + // NOTE: storage_version_key is already in the new prefix. + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + let new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + new_pallet_prefix.to_vec(), + new_pallet_prefix.to_vec(), + |_| Ok(()), + ); + assert!(new_pallet_prefix_iter.count() >= 1); + + assert_eq!(

-License: Unlicense \ No newline at end of file +License: Unlicense diff --git a/frame/example/src/benchmarking.rs b/frame/example/src/benchmarking.rs new file mode 100644 index 0000000000000..cdf6c152a4880 --- /dev/null +++ b/frame/example/src/benchmarking.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking for pallet-example. + +#![cfg(feature = "runtime-benchmarks")] + +use crate::*; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_system::RawOrigin; + +// To actually run this benchmark on pallet-example, we need to put this pallet into the +// runtime and compile it with `runtime-benchmarks` feature. The detail procedures are +// documented at: +// https://substrate.dev/docs/en/knowledgebase/runtime/benchmarking#how-to-benchmark +// +// The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. +// The exact command of how the estimate generated is printed at the top of the file. + +// Details on using the benchmarks macro can be seen at: +// https://substrate.dev/rustdocs/v3.0.0/frame_benchmarking/macro.benchmarks.html +benchmarks! { + // This will measure the execution time of `set_dummy` for b in [1..1000] range. + set_dummy_benchmark { + // This is the benchmark setup phase + let b in 1 .. 1000; + }: set_dummy(RawOrigin::Root, b.into()) // The execution phase is just running `set_dummy` extrinsic call + verify { + // This is the optional benchmark verification phase, asserting certain states. + assert_eq!(Pallet::::dummy(), Some(b.into())) + } + + // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. + // The benchmark execution phase is shorthanded. When the name of the benchmark case is the same + // as the extrinsic call. `_(...)` is used to represent the extrinsic name. + // The benchmark verification phase is omitted. + accumulate_dummy { + let b in 1 .. 1000; + // The caller account is whitelisted for DB reads/write by the benchmarking macro. + let caller: T::AccountId = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), b.into()) + + // This will measure the execution time of sorting a vector. + sort_vector { + let x in 0 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { + // The benchmark execution phase could also be a closure with custom code + m.sort(); + } +} + +// This line generates test cases for benchmarking, and could be run by: +// `cargo test -p pallet-example --all-features`, you will see an additional line of: +// `test benchmarking::benchmark_tests::test_benchmarks ... ok` in the result. +// +// The line generates three steps per benchmark, with repeat=1 and the three steps are +// [low, mid, high] of the range. +impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 4b10804fb10f7..23c4951c1a603 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! //! # Example Pallet //! //! @@ -25,27 +26,35 @@ //! //! ### Documentation Guidelines: //! -//! -//! +//! //! //! //! ### Documentation Template:
@@ -62,9 +71,9 @@ //! // Include the following links that shows what trait needs to be implemented to use the pallet //! // and the supported dispatchables that are documented in the Call enum. //! -//! - \[`::Trait`](./trait.Trait.html) -//! - \[`Call`](./enum.Call.html) -//! - \[`Module`](./struct.Module.html) +//! - \[`Config`] +//! - \[`Call`] +//! - \[`Pallet`] //! //! \## Overview //! @@ -83,12 +92,13 @@ //! //! \## Terminology //! -//! // Add terminology used in the custom pallet. Include concepts, storage items, or actions that you think -//! // deserve to be noted to give context to the rest of the documentation or pallet usage. The author needs to -//! // use some judgment about what is included. We don't want a list of every storage item nor types - the user -//! // can go to the code for that. For example, "transfer fee" is obvious and should not be included, but -//! // "free balance" and "reserved balance" should be noted to give context to the pallet. -//! // Please do not link to outside resources. The reference docs should be the ultimate source of truth. +//! // Add terminology used in the custom pallet. Include concepts, storage items, or actions that +//! you think // deserve to be noted to give context to the rest of the documentation or pallet +//! usage. The author needs to // use some judgment about what is included. We don't want a list of +//! every storage item nor types - the user // can go to the code for that. For example, "transfer +//! fee" is obvious and should not be included, but // "free balance" and "reserved balance" should +//! be noted to give context to the pallet. // Please do not link to outside resources. The +//! reference docs should be the ultimate source of truth. //! //! //! @@ -105,7 +115,8 @@ //! \#### //! //! // Describe requirements prior to interacting with the custom pallet. -//! // Describe the process of interacting with the custom pallet for this scenario and public API functions used. +//! // Describe the process of interacting with the custom pallet for this scenario and public API +//! functions used. //! //! \## Interface //! @@ -129,14 +140,16 @@ //! //! //! -//! // Reference documentation of aspects such as `storageItems` and `dispatchable` functions should only be -//! // included in the https://docs.rs Rustdocs for Substrate and not repeated in the README file. +//! // Reference documentation of aspects such as `storageItems` and `dispatchable` functions should +//! // only be included in the Rustdocs for Substrate and not repeated in the +//! // README file. //! //! \### Dispatchable Functions //! //! //! -//! // A brief description of dispatchable functions and a link to the rustdoc with their actual documentation. +//! // A brief description of dispatchable functions and a link to the rustdoc with their actual +//! documentation. //! //! // MUST have link to Call enum //! // MUST have origin information included in function doc @@ -153,7 +166,8 @@ //! //! //! -//! // It is up to the writer of the respective pallet (with respect to how much information to provide). +//! // It is up to the writer of the respective pallet (with respect to how much information to +//! provide). //! //! \#### Public Inspection functions - Immutable (getters) //! @@ -211,20 +225,21 @@ //! \```rust //! use ; //! -//! pub trait Trait: ::Trait { } +//! pub trait Config: ::Config { } //! \``` //! //! \### Simple Code Snippet //! -//! // Show a simple example (e.g. how to query a public getter function of ) +//! // Show a simple example (e.g. how to query a public getter function of +//! ) //! //! \### Example from FRAME //! //! // Show a usage example in an actual runtime //! //! // See: -//! // - Substrate TCR https://github.com/parity-samples/substrate-tcr -//! // - Substrate Kitties https://shawntabrizi.github.io/substrate-collectables-workshop/#/ +//! // - Substrate TCR +//! // - Substrate Kitties //! //! \## Genesis Config //! @@ -254,30 +269,44 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::marker::PhantomData; +use codec::{Decode, Encode}; use frame_support::{ - dispatch::DispatchResult, decl_module, decl_storage, decl_event, traits::IsSubType, - weights::{DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee, Pays}, + dispatch::DispatchResult, + traits::IsSubType, + weights::{ClassifyDispatch, DispatchClass, Pays, PaysFee, WeighData, Weight}, }; -use sp_std::prelude::*; -use frame_system::{ensure_signed, ensure_root}; -use codec::{Encode, Decode}; +use frame_system::ensure_signed; +use log::info; +use scale_info::TypeInfo; use sp_runtime::{ - traits::{ - SignedExtension, Bounded, SaturatedConversion, DispatchInfoOf, - }, + traits::{Bounded, DispatchInfoOf, SaturatedConversion, Saturating, SignedExtension}, transaction_validity::{ - ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, }; +use sp_std::{marker::PhantomData, prelude::*}; + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +#[cfg(test)] +mod tests; + +mod benchmarking; +pub mod weights; +pub use weights::*; + +/// A type alias for the balance type from this pallet's point of view. +type BalanceOf = ::Balance; +const MILLICENTS: u32 = 1_000_000_000; // A custom weight calculator tailored for the dispatch call `set_dummy()`. This actually examines // the arguments and makes a decision based upon them. // // The `WeightData` trait has access to the arguments of the dispatch that it wants to assign a -// weight to. Nonetheless, the trait itself can not make any assumptions about what the generic type +// weight to. Nonetheless, the trait itself cannot make any assumptions about what the generic type // of the arguments (`T`) is. Based on our needs, we could replace `T` with a more concrete type -// while implementing the trait. The `decl_module!` expects whatever implements `WeighData` to +// while implementing the trait. The `pallet::weight` expects whatever implements `WeighData` to // replace `T` with a tuple of the dispatch arguments. This is exactly how we will craft the // implementation below. // @@ -285,17 +314,25 @@ use sp_runtime::{ // - The final weight of each dispatch is calculated as the argument of the call multiplied by the // parameter given to the `WeightForSetDummy`'s constructor. // - assigns a dispatch class `operational` if the argument of the call is more than 1000. -struct WeightForSetDummy(BalanceOf); +// +// More information can be read at: +// - https://substrate.dev/docs/en/knowledgebase/learn-substrate/weight +// - https://substrate.dev/docs/en/knowledgebase/runtime/fees#default-weight-annotations +// +// Manually configuring weight is an advanced operation and what you really need may well be +// fulfilled by running the benchmarking toolchain. Refer to `benchmarking.rs` file. +struct WeightForSetDummy(BalanceOf); -impl WeighData<(&BalanceOf,)> for WeightForSetDummy -{ +impl WeighData<(&BalanceOf,)> for WeightForSetDummy { fn weigh_data(&self, target: (&BalanceOf,)) -> Weight { let multiplier = self.0; - (*target.0 * multiplier).saturated_into::() + // *target.0 is the amount passed into the extrinsic + let cents = *target.0 / >::from(MILLICENTS); + (cents * multiplier).saturated_into::() } } -impl ClassifyDispatch<(&BalanceOf,)> for WeightForSetDummy { +impl ClassifyDispatch<(&BalanceOf,)> for WeightForSetDummy { fn classify_dispatch(&self, target: (&BalanceOf,)) -> DispatchClass { if *target.0 > >::from(1000u32) { DispatchClass::Operational @@ -305,120 +342,107 @@ impl ClassifyDispatch<(&BalanceOf,)> for WeightFor } } -impl PaysFee<(&BalanceOf,)> for WeightForSetDummy { +impl PaysFee<(&BalanceOf,)> for WeightForSetDummy { fn pays_fee(&self, _target: (&BalanceOf,)) -> Pays { Pays::Yes } } -/// A type alias for the balance type from this pallet's point of view. -type BalanceOf = ::Balance; - -/// Our pallet's configuration trait. All our types and constants go in here. If the -/// pallet is dependent on specific other pallets, then their configuration traits -/// should be added to our implied traits list. -/// -/// `frame_system::Trait` should always be included in our implied traits. -pub trait Trait: pallet_balances::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; -} +// Definition of the pallet logic, to be aggregated at runtime definition through +// `construct_runtime`. +#[frame_support::pallet] +pub mod pallet { + // Import various types used to declare pallet in scope. + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// Our pallet's configuration trait. All our types and constants go in here. If the + /// pallet is dependent on specific other pallets, then their configuration traits + /// should be added to our implied traits list. + /// + /// `frame_system::Config` should always be included. + #[pallet::config] + pub trait Config: pallet_balances::Config + frame_system::Config { + // Setting a constant config parameter from the runtime + #[pallet::constant] + type MagicNumber: Get; + + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// Type representing the weight of this pallet + type WeightInfo: WeightInfo; + } -decl_storage! { - // A macro for the Storage trait, and its implementation, for this pallet. - // This allows for type-safe usage of the Substrate storage database, so you can - // keep things around between blocks. - // - // It is important to update your storage name so that your pallet's - // storage items are isolated from other pallets. - // ---------------------------------vvvvvvv - trait Store for Module as Example { - // Any storage declarations of the form: - // `pub? Name get(fn getter_name)? [config()|config(myname)] [build(|_| {...})] : (= )?;` - // where `` is either: - // - `Type` (a basic value item); or - // - `map hasher(HasherKind) KeyType => ValueType` (a map item). - // - // Note that there are two optional modifiers for the storage type declaration. - // - `Foo: Option`: - // - `Foo::put(1); Foo::get()` returns `Some(1)`; - // - `Foo::kill(); Foo::get()` returns `None`. - // - `Foo: u32`: - // - `Foo::put(1); Foo::get()` returns `1`; - // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). - // e.g. Foo: u32; - // e.g. pub Bar get(fn bar): map hasher(blake2_128_concat) T::AccountId => Vec<(T::Balance, u64)>; + // Simple declaration of the `Pallet` type. It is placeholder we use to implement traits and + // method. + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + // Pallet implements [`Hooks`] trait to define some logic to execute in some context. + #[pallet::hooks] + impl Hooks> for Pallet { + // `on_initialize` is executed at the beginning of the block before any extrinsic are + // dispatched. // - // For basic value items, you'll get a type which implements - // `frame_support::StorageValue`. For map items, you'll get a type which - // implements `frame_support::StorageMap`. - // - // If they have a getter (`get(getter_name)`), then your pallet will come - // equipped with `fn getter_name() -> Type` for basic value items or - // `fn getter_name(key: KeyType) -> ValueType` for map items. - Dummy get(fn dummy) config(): Option; - - // A map that has enumerable entries. - Bar get(fn bar) config(): map hasher(blake2_128_concat) T::AccountId => T::Balance; + // This function must return the weight consumed by `on_initialize` and `on_finalize`. + fn on_initialize(_n: T::BlockNumber) -> Weight { + // Anything that needs to be done at the start of the block. + // We don't do anything here. + 0 + } - // this one uses the default, we'll demonstrate the usage of 'mutate' API. - Foo get(fn foo) config(): T::Balance; - } -} + // `on_finalize` is executed at the end of block after all extrinsic are dispatched. + fn on_finalize(_n: T::BlockNumber) { + // Perform necessary data/state clean up here. + } -decl_event!( - /// Events are a simple means of reporting specific conditions and - /// circumstances that have happened that users, Dapps and/or chain explorers would find - /// interesting and otherwise difficult to detect. - pub enum Event where B = ::Balance { - // Just a normal `enum`, here's a dummy event to ensure it compiles. - /// Dummy event, just here so there's a generic type that's used. - Dummy(B), + // A runtime code run after every block and have access to extended set of APIs. + // + // For instance you can generate extrinsics for the upcoming produced block. + fn offchain_worker(_n: T::BlockNumber) { + // We don't do anything here. + // but we could dispatch extrinsic (transaction/unsigned/inherent) using + // sp_io::submit_extrinsic. + // To see example on offchain worker, please refer to example-offchain-worker pallet + // accompanied in this repository. + } } -); -// The module declaration. This states the entry points that we handle. The -// macro takes care of the marshalling of arguments and dispatch. -// -// Anyone can have these functions execute by signing and submitting -// an extrinsic. Ensure that calls into each of these execute in a time, memory and -// using storage space proportional to any costs paid for by the caller or otherwise the -// difficulty of forcing the call to happen. -// -// Generally you'll want to split these into three groups: -// - Public calls that are signed by an external account. -// - Root calls that are allowed to be made only by the governance system. -// - Unsigned calls that can be of two kinds: -// * "Inherent extrinsics" that are opinions generally held by the block -// authors that build child blocks. -// * Unsigned Transactions that are of intrinsic recognizable utility to the -// network, and are validated by the runtime. -// -// Information about where this dispatch initiated from is provided as the first argument -// "origin". As such functions must always look like: -// -// `fn foo(origin, bar: Bar, baz: Baz) -> Result;` -// -// The `Result` is required as part of the syntax (and expands to the conventional dispatch -// result of `Result<(), &'static str>`). -// -// When you come to `impl` them later in the pallet, you must specify the full type for `origin`: -// -// `fn foo(origin: T::Origin, bar: Bar, baz: Baz) { ... }` -// -// There are three entries in the `frame_system::Origin` enum that correspond -// to the above bullets: `::Signed(AccountId)`, `::Root` and `::None`. You should always match -// against them as the first thing you do in your function. There are three convenience calls -// in system that do the matching for you and return a convenient result: `ensure_signed`, -// `ensure_root` and `ensure_none`. -decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what its working on. - pub struct Module for enum Call where origin: T::Origin { - /// Deposit one of this pallet's events by using the default implementation. - /// It is also possible to provide a custom implementation. - /// For non-generic events, the generic parameter just needs to be dropped, so that it - /// looks like: `fn deposit_event() = default;`. - fn deposit_event() = default; + // The call declaration. This states the entry points that we handle. The + // macro takes care of the marshalling of arguments and dispatch. + // + // Anyone can have these functions execute by signing and submitting + // an extrinsic. Ensure that calls into each of these execute in a time, memory and + // using storage space proportional to any costs paid for by the caller or otherwise the + // difficulty of forcing the call to happen. + // + // Generally you'll want to split these into three groups: + // - Public calls that are signed by an external account. + // - Root calls that are allowed to be made only by the governance system. + // - Unsigned calls that can be of two kinds: + // * "Inherent extrinsics" that are opinions generally held by the block authors that build + // child blocks. + // * Unsigned Transactions that are of intrinsic recognizable utility to the network, and are + // validated by the runtime. + // + // Information about where this dispatch initiated from is provided as the first argument + // "origin". As such functions must always look like: + // + // `fn foo(origin: OriginFor, bar: Bar, baz: Baz) -> DispatchResultWithPostInfo { ... }` + // + // The `DispatchResultWithPostInfo` is required as part of the syntax (and can be found at + // `pallet_prelude::DispatchResultWithPostInfo`). + // + // There are three entries in the `frame_system::Origin` enum that correspond + // to the above bullets: `::Signed(AccountId)`, `::Root` and `::None`. You should always match + // against them as the first thing you do in your function. There are three convenience calls + // in system that do the matching for you and return a convenient result: `ensure_signed`, + // `ensure_root` and `ensure_none`. + #[pallet::call] + impl Pallet { /// This is your public interface. Be extremely careful. /// This is just a simple example of how to interact with the pallet from the external /// world. @@ -457,18 +481,24 @@ decl_module! { // // If you don't respect these rules, it is likely that your chain will be attackable. // - // Each transaction can define an optional `#[weight]` attribute to convey a set of static - // information about its dispatch. FRAME System and FRAME Executive pallet then use this - // information to properly execute the transaction, whilst keeping the total load of the - // chain in a moderate rate. + // Each transaction must define a `#[pallet::weight(..)]` attribute to convey a set of + // static information about its dispatch. FRAME System and FRAME Executive pallet then use + // this information to properly execute the transaction, whilst keeping the total load of + // the chain in a moderate rate. + // + // The parenthesized value of the `#[pallet::weight(..)]` attribute can be any type that + // implements a set of traits, namely [`WeighData`] and [`ClassifyDispatch`]. + // The former conveys the weight (a numeric representation of pure execution time and + // difficulty) of the transaction and the latter demonstrates the [`DispatchClass`] of the + // call. A higher weight means a larger transaction (less of which can be placed in a + // single block). // - // The _right-hand-side_ value of the `#[weight]` attribute can be any type that implements - // a set of traits, namely [`WeighData`] and [`ClassifyDispatch`]. The former conveys the - // weight (a numeric representation of pure execution time and difficulty) of the - // transaction and the latter demonstrates the [`DispatchClass`] of the call. A higher - // weight means a larger transaction (less of which can be placed in a single block). - #[weight = 0] - fn accumulate_dummy(origin, increase_by: T::Balance) -> DispatchResult { + // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the + // benchmark toolchain. + #[pallet::weight( + ::WeightInfo::accumulate_dummy((*increase_by).saturated_into()) + )] + pub fn accumulate_dummy(origin: OriginFor, increase_by: T::Balance) -> DispatchResult { // This is a public call, so we ensure that the origin is some signed account. let _sender = ensure_signed(origin)?; @@ -487,14 +517,15 @@ decl_module! { // Here's the new one of read and then modify the value. >::mutate(|dummy| { - let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); + // Using `saturating_add` instead of a regular `+` to avoid overflowing + let new_dummy = dummy.map_or(increase_by, |d| d.saturating_add(increase_by)); *dummy = Some(new_dummy); }); // Let's deposit an event to let the outside world know this happened. - Self::deposit_event(RawEvent::Dummy(increase_by)); + Self::deposit_event(Event::AccumulateDummy(increase_by)); - // All good. + // All good, no refund. Ok(()) } @@ -505,39 +536,103 @@ decl_module! { // calls to be executed - we don't need to care why. Because it's privileged, we can // assume it's a one-off operation and substantial processing/storage/memory can be used // without worrying about gameability or attack scenarios. - // If you do not specify `Result` explicitly as return value, it will be added automatically - // for you and `Ok(())` will be returned. - #[weight = WeightForSetDummy::(>::from(100u32))] - fn set_dummy(origin, #[compact] new_value: T::Balance) { + // + // The weight for this extrinsic we use our own weight object `WeightForSetDummy` to + // determine its weight + #[pallet::weight(WeightForSetDummy::(>::from(100u32)))] + pub fn set_dummy( + origin: OriginFor, + #[pallet::compact] new_value: T::Balance, + ) -> DispatchResult { ensure_root(origin)?; + + // Print out log or debug message in the console via log::{error, warn, info, debug, + // trace}, accepting format strings similar to `println!`. + // https://substrate.dev/rustdocs/v3.0.0/log/index.html + info!("New value is now: {:?}", new_value); + // Put the new value into storage. >::put(new_value); - } - // The signature could also look like: `fn on_initialize()`. - // This function could also very well have a weight annotation, similar to any other. The - // only difference is that it mut be returned, not annotated. - fn on_initialize(_n: T::BlockNumber) -> Weight { - // Anything that needs to be done at the start of the block. - // We don't do anything here. + Self::deposit_event(Event::SetDummy(new_value)); - 0 + // All good, no refund. + Ok(()) } + } - // The signature could also look like: `fn on_finalize()` - fn on_finalize(_n: T::BlockNumber) { - // Anything that needs to be done at the end of the block. - // We just kill our dummy storage item. - >::kill(); + /// Events are a simple means of reporting specific conditions and + /// circumstances that have happened that users, Dapps and/or chain explorers would find + /// interesting and otherwise difficult to detect. + #[pallet::event] + /// This attribute generate the function `deposit_event` to deposit one of this pallet event, + /// it is optional, it is also possible to provide a custom implementation. + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + // Just a normal `enum`, here's a dummy event to ensure it compiles. + /// Dummy event, just here so there's a generic type that's used. + AccumulateDummy(BalanceOf), + SetDummy(BalanceOf), + SetBar(T::AccountId, BalanceOf), + } + + // pallet::storage attributes allow for type-safe usage of the Substrate storage database, + // so you can keep things around between blocks. + // + // Any storage must be one of `StorageValue`, `StorageMap` or `StorageDoubleMap`. + // The first generic holds the prefix to use and is generated by the macro. + // The query kind is either `OptionQuery` (the default) or `ValueQuery`. + // - for `type Foo = StorageValue<_, u32, OptionQuery>`: + // - `Foo::put(1); Foo::get()` returns `Some(1)`; + // - `Foo::kill(); Foo::get()` returns `None`. + // - for `type Foo = StorageValue<_, u32, ValueQuery>`: + // - `Foo::put(1); Foo::get()` returns `1`; + // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). + #[pallet::storage] + // The getter attribute generate a function on `Pallet` placeholder: + // `fn getter_name() -> Type` for basic value items or + // `fn getter_name(key: KeyType) -> ValueType` for map items. + #[pallet::getter(fn dummy)] + pub(super) type Dummy = StorageValue<_, T::Balance>; + + // A map that has enumerable entries. + #[pallet::storage] + #[pallet::getter(fn bar)] + pub(super) type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance>; + + // this one uses the query kind: `ValueQuery`, we'll demonstrate the usage of 'mutate' API. + #[pallet::storage] + #[pallet::getter(fn foo)] + pub(super) type Foo = StorageValue<_, T::Balance, ValueQuery>; + + #[pallet::storage] + pub type CountedMap = CountedStorageMap<_, Blake2_128Concat, u8, u16>; + + // The genesis config type. + #[pallet::genesis_config] + pub struct GenesisConfig { + pub dummy: T::Balance, + pub bar: Vec<(T::AccountId, T::Balance)>, + pub foo: T::Balance, + } + + // The default value for the genesis config type. + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { dummy: Default::default(), bar: Default::default(), foo: Default::default() } } + } - // A runtime code run after every block and have access to extended set of APIs. - // - // For instance you can generate extrinsics for the upcoming produced block. - fn offchain_worker(_n: T::BlockNumber) { - // We don't do anything here. - // but we could dispatch extrinsic (transaction/unsigned/inherent) using - // sp_io::submit_extrinsic + // The build of genesis for the pallet. + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::put(&self.dummy); + for (a, b) in &self.bar { + >::insert(a, b); + } + >::put(&self.foo); } } } @@ -547,16 +642,17 @@ decl_module! { // - Public interface. These are functions that are `pub` and generally fall into inspector // functions that do not write to storage and operation functions that do. // - Private functions. These are your usual private utilities unavailable to other pallets. -impl Module { +impl Pallet { // Add public immutables and private mutables. #[allow(dead_code)] fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> DispatchResult { let _sender = ensure_signed(origin)?; let prev = >::get(); - // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an Option<> type. + // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an + // Option<> type. let result = >::mutate(|foo| { - *foo = *foo + increase_by; + *foo = foo.saturating_add(increase_by); *foo }); assert!(prev + increase_by == result); @@ -570,13 +666,14 @@ impl Module { // decodable type that implements `SignedExtension`. See the trait definition for the full list of // bounds. As a convention, you can follow this approach to create an extension for your pallet: // - If the extension does not carry any data, then use a tuple struct with just a `marker` -// (needed for the compiler to accept `T: Trait`) will suffice. +// (needed for the compiler to accept `T: Config`) will suffice. // - Otherwise, create a tuple struct which contains the external data. Of course, for the entire // struct to be decodable, each individual item also needs to be decodable. // // Note that a signed extension can also indicate that a particular data must be present in the // _signing payload_ of a transaction by providing an implementation for the `additional_signed` -// method. This example will not cover this type of extension. See `CheckRuntime` in FRAME System +// method. This example will not cover this type of extension. See `CheckSpecVersion` in +// [FRAME System](https://github.com/paritytech/substrate/tree/master/frame/system#signed-extensions) // for an example. // // Using the extension, you can add some hooks to the life cycle of each transaction. Note that by @@ -600,26 +697,29 @@ impl Module { /// /// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No /// particular reason why, just to demonstrate the power of signed extensions. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct WatchDummy(PhantomData); +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct WatchDummy(PhantomData); -impl sp_std::fmt::Debug for WatchDummy { +impl sp_std::fmt::Debug for WatchDummy { fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "WatchDummy") } } -impl SignedExtension for WatchDummy +impl SignedExtension for WatchDummy where - ::Call: IsSubType>, + ::Call: IsSubType>, { const IDENTIFIER: &'static str = "WatchDummy"; type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = (); type Pre = (); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } fn validate( &self, @@ -635,234 +735,14 @@ where // check for `set_dummy` match call.is_sub_type() { - Some(Call::set_dummy(..)) => { + Some(Call::set_dummy { .. }) => { sp_runtime::print("set_dummy was received."); let mut valid_tx = ValidTransaction::default(); valid_tx.priority = Bounded::max_value(); Ok(valid_tx) - } + }, _ => Ok(Default::default()), } } } - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking { - use super::*; - use frame_benchmarking::{benchmarks, account}; - use frame_system::RawOrigin; - - benchmarks!{ - _ { - // Define a common range for `b`. - let b in 1 .. 1000 => (); - } - - // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. - accumulate_dummy { - let b in ...; - let caller = account("caller", 0, 0); - }: _ (RawOrigin::Signed(caller), b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..1000] range. - set_dummy { - let b in ...; - }: set_dummy (RawOrigin::Root, b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..10] range. - another_set_dummy { - let b in 1 .. 10; - }: set_dummy (RawOrigin::Root, b.into()) - - // This will measure the execution time of sorting a vector. - sort_vector { - let x in 0 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); - } - }: { - m.sort(); - } - } - - #[cfg(test)] - mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_accumulate_dummy::()); - assert_ok!(test_benchmark_set_dummy::()); - assert_ok!(test_benchmark_another_set_dummy::()); - assert_ok!(test_benchmark_sort_vector::()); - }); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::{DispatchInfo, GetDispatchInfo}, traits::{OnInitialize, OnFinalize} - }; - use sp_core::H256; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - }; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - impl_outer_dispatch! { - pub enum OuterCall for Test where origin: Origin { - self::Example, - } - } - - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = OuterCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type PalletInfo = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Trait for Test { - type MaxLocks = (); - type Balance = u64; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - } - impl Trait for Test { - type Event = (); - } - type System = frame_system::Module; - type Example = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // We use default for brevity, but you can configure as desired if needed. - pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); - GenesisConfig::{ - dummy: 42, - // we configure the map with (key, value) pairs. - bar: vec![(1, 2), (2, 3)], - foo: 24, - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - #[test] - fn it_works_for_optional_value() { - new_test_ext().execute_with(|| { - // Check that GenesisBuilder works properly. - assert_eq!(Example::dummy(), Some(42)); - - // Check that accumulate works when we have Some value in Dummy already. - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 27)); - assert_eq!(Example::dummy(), Some(69)); - - // Check that finalizing the block removes Dummy from storage. - >::on_finalize(1); - assert_eq!(Example::dummy(), None); - - // Check that accumulate works when we Dummy has None in it. - >::on_initialize(2); - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 42)); - assert_eq!(Example::dummy(), Some(42)); - }); - } - - #[test] - fn it_works_for_default_value() { - new_test_ext().execute_with(|| { - assert_eq!(Example::foo(), 24); - assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); - assert_eq!(Example::foo(), 25); - }); - } - - #[test] - fn signed_ext_watch_dummy_works() { - new_test_ext().execute_with(|| { - let call = >::set_dummy(10).into(); - let info = DispatchInfo::default(); - - assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 150) - .unwrap() - .priority, - u64::max_value(), - ); - assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 250), - InvalidTransaction::ExhaustsResources.into(), - ); - }) - } - - #[test] - fn weights_work() { - // must have a defined weight. - let default_call = >::accumulate_dummy(10); - let info = default_call.get_dispatch_info(); - // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert_eq!(info.weight, 0); - - // must have a custom weight of `100 * arg = 2000` - let custom_call = >::set_dummy(20); - let info = custom_call.get_dispatch_info(); - assert_eq!(info.weight, 2000); - } -} diff --git a/frame/example/src/tests.rs b/frame/example/src/tests.rs new file mode 100644 index 0000000000000..4c2274572db81 --- /dev/null +++ b/frame/example/src/tests.rs @@ -0,0 +1,205 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-example. + +use crate::*; +use frame_support::{ + assert_ok, parameter_types, + traits::OnInitialize, + weights::{DispatchInfo, GetDispatchInfo}, +}; +use sp_core::H256; +// The testing primitives are very useful for avoiding having to work with signatures +// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; +// Reexport crate as its pallet name for construct_runtime. +use crate as pallet_example; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Example: pallet_example::{Pallet, Call, Storage, Config, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const MagicNumber: u64 = 1_000_000_000; +} +impl Config for Test { + type MagicNumber = MagicNumber; + type Event = Event; + type WeightInfo = (); +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = GenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + system: Default::default(), + balances: Default::default(), + example: pallet_example::GenesisConfig { + dummy: 42, + // we configure the map with (key, value) pairs. + bar: vec![(1, 2), (2, 3)], + foo: 24, + }, + } + .build_storage() + .unwrap(); + t.into() +} + +#[test] +fn it_works_for_optional_value() { + new_test_ext().execute_with(|| { + // Check that GenesisBuilder works properly. + let val1 = 42; + let val2 = 27; + assert_eq!(Example::dummy(), Some(val1)); + + // Check that accumulate works when we have Some value in Dummy already. + assert_ok!(Example::accumulate_dummy(Origin::signed(1), val2)); + assert_eq!(Example::dummy(), Some(val1 + val2)); + + // Check that accumulate works when we Dummy has None in it. + >::on_initialize(2); + assert_ok!(Example::accumulate_dummy(Origin::signed(1), val1)); + assert_eq!(Example::dummy(), Some(val1 + val2 + val1)); + }); +} + +#[test] +fn it_works_for_default_value() { + new_test_ext().execute_with(|| { + assert_eq!(Example::foo(), 24); + assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); + assert_eq!(Example::foo(), 25); + }); +} + +#[test] +fn set_dummy_works() { + new_test_ext().execute_with(|| { + let test_val = 133; + assert_ok!(Example::set_dummy(Origin::root(), test_val.into())); + assert_eq!(Example::dummy(), Some(test_val)); + }); +} + +#[test] +fn signed_ext_watch_dummy_works() { + new_test_ext().execute_with(|| { + let call = pallet_example::Call::set_dummy { new_value: 10 }.into(); + let info = DispatchInfo::default(); + + assert_eq!( + WatchDummy::(PhantomData) + .validate(&1, &call, &info, 150) + .unwrap() + .priority, + u64::MAX, + ); + assert_eq!( + WatchDummy::(PhantomData).validate(&1, &call, &info, 250), + InvalidTransaction::ExhaustsResources.into(), + ); + }) +} + +#[test] +fn counted_map_works() { + new_test_ext().execute_with(|| { + assert_eq!(CountedMap::::count(), 0); + CountedMap::::insert(3, 3); + assert_eq!(CountedMap::::count(), 1); + }) +} + +#[test] +fn weights_work() { + // must have a defined weight. + let default_call = pallet_example::Call::::accumulate_dummy { increase_by: 10 }; + let info1 = default_call.get_dispatch_info(); + // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` + assert!(info1.weight > 0); + + // `set_dummy` is simpler than `accumulate_dummy`, and the weight + // should be less. + let custom_call = pallet_example::Call::::set_dummy { new_value: 20 }; + let info2 = custom_call.get_dispatch_info(); + assert!(info1.weight > info2.weight); +} diff --git a/frame/example/src/weights.rs b/frame/example/src/weights.rs new file mode 100644 index 0000000000000..efcfdc6729b53 --- /dev/null +++ b/frame/example/src/weights.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_example +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-03-15, STEPS: `[100, ]`, REPEAT: 10, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/substrate +// benchmark +// --chain +// dev +// --execution +// wasm +// --wasm-execution +// compiled +// --pallet +// pallet_example +// --extrinsic +// * +// --steps +// 100 +// --repeat +// 10 +// --raw +// --output +// ./ +// --template +// ./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_example. +pub trait WeightInfo { + fn set_dummy_benchmark(b: u32, ) -> Weight; + fn accumulate_dummy(b: u32, ) -> Weight; + fn sort_vector(x: u32, ) -> Weight; +} + +/// Weights for pallet_example using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn set_dummy_benchmark(b: u32, ) -> Weight { + (5_834_000 as Weight) + .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn accumulate_dummy(b: u32, ) -> Weight { + (51_353_000 as Weight) + .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn sort_vector(x: u32, ) -> Weight { + (2_569_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn set_dummy_benchmark(b: u32, ) -> Weight { + (5_834_000 as Weight) + .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn accumulate_dummy(b: u32, ) -> Weight { + (51_353_000 as Weight) + .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn sort_vector(x: u32, ) -> Weight { + (2_569_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + } +} diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 3bd8da04e6cf1..1abbf50e6a4c4 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-executive" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,37 +13,38 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0", default-features = false, path = "../../primitives/tracing" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "4.0.0-dev", default-features = false, path = "../../primitives/tracing" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } [dev-dependencies] hex-literal = "0.3.1" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -pallet-indices = { version = "2.0.0", path = "../indices" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-transaction-payment = { version = "2.0.0", path = "../transaction-payment" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-transaction-payment = { version = "4.0.0-dev", path = "../transaction-payment" } +sp-version = { version = "4.0.0-dev", path = "../../primitives/version" } +sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } [features] default = ["std"] -with-tracing = [ - "sp-tracing/with-tracing" -] +with-tracing = ["sp-tracing/with-tracing"] std = [ "codec/std", + "scale-info/std", "frame-support/std", "frame-system/std", - "serde", "sp-core/std", "sp-runtime/std", "sp-tracing/std", "sp-std/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/executive/README.md b/frame/executive/README.md index 24b354902e876..ae3bbf1a9d994 100644 --- a/frame/executive/README.md +++ b/frame/executive/README.md @@ -22,7 +22,6 @@ The Executive module provides functions to: The Executive module provides the following implementations: -- `ExecuteBlock`: Trait that can be used to execute a block. - `Executive`: Type that can be used to make the FRAME available from the runtime. ## Usage @@ -36,7 +35,7 @@ The default Substrate node template declares the [`Executive`](https://docs.rs/f ```rust # /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive; +pub type Executive = executive::Executive; ``` ### Custom `OnRuntimeUpgrade` logic @@ -55,7 +54,7 @@ impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { } } -pub type Executive = executive::Executive; +pub type Executive = executive::Executive; ``` -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index bbd077227a29e..655a38fe1b540 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,7 +44,8 @@ //! //! ## Usage //! -//! The default Substrate node template declares the [`Executive`](./struct.Executive.html) type in its library. +//! The default Substrate node template declares the [`Executive`](./struct.Executive.html) type in +//! its library. //! //! ### Example //! @@ -58,7 +59,7 @@ //! # type Context = frame_system::ChainContext; //! # pub type Block = generic::Block; //! # pub type Balances = u64; -//! # pub type AllModules = u64; +//! # pub type AllPallets = u64; //! # pub enum Runtime {}; //! # use sp_runtime::transaction_validity::{ //! # TransactionValidity, UnknownTransaction, TransactionSource, @@ -72,7 +73,7 @@ //! # } //! # } //! /// Executive: handles dispatch to the various modules. -//! pub type Executive = executive::Executive; +//! pub type Executive = executive::Executive; //! ``` //! //! ### Custom `OnRuntimeUpgrade` logic @@ -89,7 +90,7 @@ //! # type Context = frame_system::ChainContext; //! # pub type Block = generic::Block; //! # pub type Balances = u64; -//! # pub type AllModules = u64; +//! # pub type AllPallets = u64; //! # pub enum Runtime {}; //! # use sp_runtime::transaction_validity::{ //! # TransactionValidity, UnknownTransaction, TransactionSource, @@ -110,33 +111,31 @@ //! } //! } //! -//! pub type Executive = executive::Executive; +//! pub type Executive = executive::Executive; //! ``` #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, marker::PhantomData}; +use codec::{Codec, Encode}; use frame_support::{ - storage::StorageValue, weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, - traits::{OnInitialize, OnFinalize, OnRuntimeUpgrade, OffchainWorker}, dispatch::PostDispatchInfo, + traits::{ + EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, OnFinalize, OnIdle, OnInitialize, + OnRuntimeUpgrade, + }, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, }; +use frame_system::DigestOf; use sp_runtime::{ - generic::Digest, ApplyExtrinsicResult, + generic::Digest, traits::{ - self, Header, Zero, One, Checkable, Applyable, CheckEqual, ValidateUnsigned, NumberFor, - Block as BlockT, Dispatchable, Saturating, + self, Applyable, CheckEqual, Checkable, Dispatchable, Header, NumberFor, One, Saturating, + ValidateUnsigned, Zero, }, - transaction_validity::{TransactionValidity, TransactionSource}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, }; -use codec::{Codec, Encode}; -use frame_system::{extrinsics_root, DigestOf}; - -/// Trait that can be used to execute a block. -pub trait ExecuteBlock { - /// Actually execute all transitions for `block`. - fn execute_block(block: Block); -} +use sp_std::{marker::PhantomData, prelude::*}; pub type CheckedOf = >::Checked; pub type CallOf = as Applyable>::Call; @@ -145,124 +144,161 @@ pub type OriginOf = as Dispatchable>::Origin; /// Main entry point for certain runtime actions as e.g. `execute_block`. /// /// Generic parameters: -/// - `System`: Something that implements `frame_system::Trait` +/// - `System`: Something that implements `frame_system::Config` /// - `Block`: The block type of the runtime /// - `Context`: The context that is used when checking an extrinsic. /// - `UnsignedValidator`: The unsigned transaction validator of the runtime. -/// - `AllModules`: Tuple that contains all modules. Will be used to call e.g. `on_initialize`. +/// - `AllPallets`: Tuple that contains all modules. Will be used to call e.g. `on_initialize`. /// - `OnRuntimeUpgrade`: Custom logic that should be called after a runtime upgrade. Modules are -/// already called by `AllModules`. It will be called before all modules will -/// be called. -pub struct Executive( - PhantomData<(System, Block, Context, UnsignedValidator, AllModules, OnRuntimeUpgrade)> +/// already called by `AllPallets`. It will be called before all modules will be called. +pub struct Executive( + PhantomData<(System, Block, Context, UnsignedValidator, AllPallets, OnRuntimeUpgrade)>, ); impl< - System: frame_system::Trait, - Block: traits::Block, - Context: Default, - UnsignedValidator, - AllModules: - OnRuntimeUpgrade + - OnInitialize + - OnFinalize + - OffchainWorker, - COnRuntimeUpgrade: OnRuntimeUpgrade, -> ExecuteBlock for - Executive + System: frame_system::Config + EnsureInherentsAreFirst, + Block: traits::Block
, + Context: Default, + UnsignedValidator, + AllPallets: OnRuntimeUpgrade + + OnInitialize + + OnIdle + + OnFinalize + + OffchainWorker, + COnRuntimeUpgrade: OnRuntimeUpgrade, + > ExecuteBlock + for Executive where Block::Extrinsic: Checkable + Codec, - CheckedOf: - Applyable + - GetDispatchInfo, - CallOf: Dispatchable, + CheckedOf: Applyable + GetDispatchInfo, + CallOf: + Dispatchable, OriginOf: From>, - UnsignedValidator: ValidateUnsigned>, + UnsignedValidator: ValidateUnsigned>, { fn execute_block(block: Block) { - Executive::::execute_block(block); + Executive::< + System, + Block, + Context, + UnsignedValidator, + AllPallets, + COnRuntimeUpgrade, + >::execute_block(block); } } impl< - System: frame_system::Trait, - Block: traits::Block, - Context: Default, - UnsignedValidator, - AllModules: - OnRuntimeUpgrade + - OnInitialize + - OnFinalize + - OffchainWorker, - COnRuntimeUpgrade: OnRuntimeUpgrade, -> Executive + System: frame_system::Config + EnsureInherentsAreFirst, + Block: traits::Block
, + Context: Default, + UnsignedValidator, + AllPallets: OnRuntimeUpgrade + + OnInitialize + + OnIdle + + OnFinalize + + OffchainWorker, + COnRuntimeUpgrade: OnRuntimeUpgrade, + > Executive where Block::Extrinsic: Checkable + Codec, - CheckedOf: - Applyable + - GetDispatchInfo, - CallOf: Dispatchable, + CheckedOf: Applyable + GetDispatchInfo, + CallOf: + Dispatchable, OriginOf: From>, - UnsignedValidator: ValidateUnsigned>, + UnsignedValidator: ValidateUnsigned>, { + /// Execute all `OnRuntimeUpgrade` of this runtime, and return the aggregate weight. + pub fn execute_on_runtime_upgrade() -> frame_support::weights::Weight { + let mut weight = 0; + weight = weight.saturating_add(COnRuntimeUpgrade::on_runtime_upgrade()); + weight = weight.saturating_add( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + ); + weight = weight.saturating_add(::on_runtime_upgrade()); + + weight + } + + /// Execute all `OnRuntimeUpgrade` of this runtime, including the pre and post migration checks. + /// + /// This should only be used for testing. + #[cfg(feature = "try-runtime")] + pub fn try_runtime_upgrade() -> Result { + < + (frame_system::Pallet::, COnRuntimeUpgrade, AllPallets) + as + OnRuntimeUpgrade + >::pre_upgrade()?; + + let weight = Self::execute_on_runtime_upgrade(); + + < + (frame_system::Pallet::, COnRuntimeUpgrade, AllPallets) + as + OnRuntimeUpgrade + >::post_upgrade()?; + + Ok(weight) + } + /// Start the execution of a particular block. pub fn initialize_block(header: &System::Header) { sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "init_block"); let digests = Self::extract_pre_digest(&header); - Self::initialize_block_impl( - header.number(), - header.parent_hash(), - header.extrinsics_root(), - &digests - ); + Self::initialize_block_impl(header.number(), header.parent_hash(), &digests); } fn extract_pre_digest(header: &System::Header) -> DigestOf { let mut digest = >::default(); - header.digest().logs() - .iter() - .for_each(|d| if d.as_pre_runtime().is_some() { + header.digest().logs().iter().for_each(|d| { + if d.as_pre_runtime().is_some() { digest.push(d.clone()) - }); + } + }); digest } fn initialize_block_impl( block_number: &System::BlockNumber, parent_hash: &System::Hash, - extrinsics_root: &System::Hash, digest: &Digest, ) { + let mut weight = 0; if Self::runtime_upgraded() { - // System is not part of `AllModules`, so we need to call this manually. - let mut weight = as OnRuntimeUpgrade>::on_runtime_upgrade(); - weight = weight.saturating_add(COnRuntimeUpgrade::on_runtime_upgrade()); - weight = weight.saturating_add(::on_runtime_upgrade()); - >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); + weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); } - >::initialize( + >::initialize( block_number, parent_hash, - extrinsics_root, digest, frame_system::InitKind::Full, ); - as OnInitialize>::on_initialize(*block_number); - let weight = >::on_initialize(*block_number) - .saturating_add(>::get()); - >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); + weight = weight.saturating_add( as OnInitialize< + System::BlockNumber, + >>::on_initialize(*block_number)); + weight = weight.saturating_add( + >::on_initialize(*block_number), + ); + weight = weight.saturating_add( + >::get().base_block, + ); + >::register_extra_weight_unchecked( + weight, + DispatchClass::Mandatory, + ); - frame_system::Module::::note_finished_initialize(); + frame_system::Pallet::::note_finished_initialize(); } /// Returns if the runtime was upgraded since the last time this function was called. fn runtime_upgraded() -> bool { - let last = frame_system::LastRuntimeUpgrade::get(); + let last = frame_system::LastRuntimeUpgrade::::get(); let current = >::get(); if last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) { - frame_system::LastRuntimeUpgrade::put( + frame_system::LastRuntimeUpgrade::::put( frame_system::LastRuntimeUpgradeInfo::from(current), ); true @@ -278,23 +314,23 @@ where // Check that `parent_hash` is correct. let n = header.number().clone(); assert!( - n > System::BlockNumber::zero() - && >::block_hash(n - System::BlockNumber::one()) == *header.parent_hash(), - "Parent hash should be valid." + n > System::BlockNumber::zero() && + >::block_hash(n - System::BlockNumber::one()) == + *header.parent_hash(), + "Parent hash should be valid.", ); - // Check that transaction trie root represents the transactions. - let xts_root = extrinsics_root::(&block.extrinsics()); - header.extrinsics_root().check_equal(&xts_root); - assert!(header.extrinsics_root() == &xts_root, "Transaction trie root must be valid."); + if let Err(i) = System::ensure_inherents_are_first(block) { + panic!("Invalid inherent position for extrinsic at index {}", i); + } } /// Actually execute all transitions for `block`. pub fn execute_block(block: Block) { sp_io::init_tracing(); sp_tracing::within_span! { - sp_tracing::info_span!( "execute_block", ?block); - { + sp_tracing::info_span!("execute_block", ?block); + Self::initialize_block(block.header()); // any initial checks @@ -312,32 +348,67 @@ where // any final checks Self::final_checks(&header); - } }; + } } /// Execute given extrinsics and take care of post-extrinsics book-keeping. - fn execute_extrinsics_with_book_keeping(extrinsics: Vec, block_number: NumberFor) { - extrinsics.into_iter().for_each(Self::apply_extrinsic_no_note); + fn execute_extrinsics_with_book_keeping( + extrinsics: Vec, + block_number: NumberFor, + ) { + extrinsics.into_iter().for_each(|e| { + if let Err(e) = Self::apply_extrinsic(e) { + let err: &'static str = e.into(); + panic!("{}", err) + } + }); // post-extrinsics book-keeping - >::note_finished_extrinsics(); - as OnFinalize>::on_finalize(block_number); - >::on_finalize(block_number); + >::note_finished_extrinsics(); + + Self::idle_and_finalize_hook(block_number); } /// Finalize the block - it is up the caller to ensure that all header fields are valid /// except state-root. pub fn finalize_block() -> System::Header { sp_io::init_tracing(); - sp_tracing::enter_span!( sp_tracing::Level::TRACE, "finalize_block" ); - >::note_finished_extrinsics(); - let block_number = >::block_number(); - as OnFinalize>::on_finalize(block_number); - >::on_finalize(block_number); - - // set up extrinsics - >::derive_extrinsics(); - >::finalize() + sp_tracing::enter_span!(sp_tracing::Level::TRACE, "finalize_block"); + >::note_finished_extrinsics(); + let block_number = >::block_number(); + + Self::idle_and_finalize_hook(block_number); + + >::finalize() + } + + fn idle_and_finalize_hook(block_number: NumberFor) { + let weight = >::block_weight(); + let max_weight = >::get().max_block; + let mut remaining_weight = max_weight.saturating_sub(weight.total()); + + if remaining_weight > 0 { + let mut used_weight = + as OnIdle>::on_idle( + block_number, + remaining_weight, + ); + remaining_weight = remaining_weight.saturating_sub(used_weight); + used_weight = >::on_idle( + block_number, + remaining_weight, + ) + .saturating_add(used_weight); + >::register_extra_weight_unchecked( + used_weight, + DispatchClass::Mandatory, + ); + } + + as OnFinalize>::on_finalize( + block_number, + ); + >::on_finalize(block_number); } /// Apply extrinsic outside of the block execution function. @@ -348,37 +419,24 @@ where sp_io::init_tracing(); let encoded = uxt.encode(); let encoded_len = encoded.len(); - Self::apply_extrinsic_with_len(uxt, encoded_len, Some(encoded)) - } - - /// Apply an extrinsic inside the block execution function. - fn apply_extrinsic_no_note(uxt: Block::Extrinsic) { - let l = uxt.encode().len(); - match Self::apply_extrinsic_with_len(uxt, l, None) { - Ok(_) => (), - Err(e) => { let err: &'static str = e.into(); panic!(err) }, - } + Self::apply_extrinsic_with_len(uxt, encoded_len, encoded) } /// Actually apply an extrinsic given its `encoded_len`; this doesn't note its hash. fn apply_extrinsic_with_len( uxt: Block::Extrinsic, encoded_len: usize, - to_note: Option>, + to_note: Vec, ) -> ApplyExtrinsicResult { - sp_tracing::enter_span!( - sp_tracing::info_span!("apply_extrinsic", - ext=?sp_core::hexdisplay::HexDisplay::from(&uxt.encode())) - ); + sp_tracing::enter_span!(sp_tracing::info_span!("apply_extrinsic", + ext=?sp_core::hexdisplay::HexDisplay::from(&uxt.encode()))); // Verify that the signature is good. let xt = uxt.check(&Default::default())?; // We don't need to make sure to `note_extrinsic` only after we know it's going to be // executed to prevent it from leaking in storage since at this point, it will either // execute or panic (and revert storage changes). - if let Some(encoded) = to_note { - >::note_extrinsic(encoded); - } + >::note_extrinsic(to_note); // AUDIT: Under no circumstances may this function panic from here onwards. @@ -386,7 +444,7 @@ where let dispatch_info = xt.get_dispatch_info(); let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; - >::note_applied_extrinsic(&r, dispatch_info); + >::note_applied_extrinsic(&r, dispatch_info); Ok(r.map(|_| ()).map_err(|e| e.error)) } @@ -394,7 +452,7 @@ where fn final_checks(header: &System::Header) { sp_tracing::enter_span!(sp_tracing::Level::TRACE, "final_checks"); // remove temporaries - let new_header = >::finalize(); + let new_header = >::finalize(); // check digest assert_eq!( @@ -412,30 +470,44 @@ where let storage_root = new_header.state_root(); header.state_root().check_equal(&storage_root); assert!(header.state_root() == storage_root, "Storage root must match that calculated."); + + assert!( + header.extrinsics_root() == new_header.extrinsics_root(), + "Transaction trie root must be valid.", + ); } /// Check a given signed transaction for validity. This doesn't execute any - /// side-effects; it merely checks whether the transaction would panic if it were included or not. + /// side-effects; it merely checks whether the transaction would panic if it were included or + /// not. /// /// Changes made to storage should be discarded. pub fn validate_transaction( source: TransactionSource, uxt: Block::Extrinsic, + block_hash: Block::Hash, ) -> TransactionValidity { sp_io::init_tracing(); use sp_tracing::{enter_span, within_span}; - enter_span!{ sp_tracing::Level::TRACE, "validate_transaction" }; + >::initialize( + &(frame_system::Pallet::::block_number() + One::one()), + &block_hash, + &Default::default(), + frame_system::InitKind::Inspection, + ); + + enter_span! { sp_tracing::Level::TRACE, "validate_transaction" }; - let encoded_len = within_span!{ sp_tracing::Level::TRACE, "using_encoded"; + let encoded_len = within_span! { sp_tracing::Level::TRACE, "using_encoded"; uxt.using_encoded(|d| d.len()) }; - let xt = within_span!{ sp_tracing::Level::TRACE, "check"; + let xt = within_span! { sp_tracing::Level::TRACE, "check"; uxt.check(&Default::default()) }?; - let dispatch_info = within_span!{ sp_tracing::Level::TRACE, "dispatch_info"; + let dispatch_info = within_span! { sp_tracing::Level::TRACE, "dispatch_info"; xt.get_dispatch_info() }; @@ -451,82 +523,85 @@ where // We need to keep events available for offchain workers, // hence we initialize the block manually. // OffchainWorker RuntimeApi should skip initialization. - let digests = Self::extract_pre_digest(header); + let digests = header.digest().clone(); - >::initialize( + >::initialize( header.number(), header.parent_hash(), - header.extrinsics_root(), &digests, frame_system::InitKind::Inspection, ); - // Initialize logger, so the log messages are visible - // also when running WASM. - frame_support::debug::RuntimeLogger::init(); + // Frame system only inserts the parent hash into the block hashes as normally we don't know + // the hash for the header before. However, here we are aware of the hash and we can add it + // as well. + frame_system::BlockHash::::insert(header.number(), header.hash()); - >::offchain_worker( - // to maintain backward compatibility we call module offchain workers - // with parent block number. - header.number().saturating_sub(1.into()) - ) + >::offchain_worker(*header.number()) } } - #[cfg(test)] mod tests { use super::*; + use frame_support::{ + assert_err, parameter_types, + traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons}, + weights::{IdentityFee, RuntimeDbWeight, Weight, WeightToFeePolynomial}, + }; + use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use hex_literal::hex; + use pallet_balances::Call as BalancesCall; + use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; use sp_runtime::{ - generic::Era, Perbill, DispatchError, testing::{Digest, Header, Block}, - traits::{Header as HeaderT, BlakeTwo256, IdentityLookup}, + generic::{DigestItem, Era}, + testing::{Block, Digest, Header}, + traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, IdentityLookup}, transaction_validity::{ - InvalidTransaction, ValidTransaction, TransactionValidityError, UnknownTransaction + InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, }, + DispatchError, }; - use frame_support::{ - parameter_types, - weights::{Weight, RuntimeDbWeight, IdentityFee, WeightToFeePolynomial}, - traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons, WithdrawReason}, - }; - use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; - use pallet_balances::Call as BalancesCall; - use hex_literal::hex; const TEST_KEY: &[u8] = &*b":test:key:"; mod custom { - use frame_support::weights::{Weight, DispatchClass}; + use frame_support::weights::{DispatchClass, Weight}; use sp_runtime::transaction_validity::{ - UnknownTransaction, TransactionSource, TransactionValidity + TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, }; - pub trait Trait: frame_system::Trait {} + pub trait Config: frame_system::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 100] fn some_function(origin) { // NOTE: does not make any different. - let _ = frame_system::ensure_signed(origin); + frame_system::ensure_signed(origin)?; } #[weight = (200, DispatchClass::Operational)] fn some_root_operation(origin) { - let _ = frame_system::ensure_root(origin); + frame_system::ensure_root(origin)?; } #[weight = 0] fn some_unsigned_message(origin) { - let _ = frame_system::ensure_none(origin); + frame_system::ensure_none(origin)?; } #[weight = 0] fn allowed_unsigned(origin) { - let _ = frame_system::ensure_root(origin)?; + frame_system::ensure_root(origin)?; } #[weight = 0] fn unallowed_unsigned(origin) { - let _ = frame_system::ensure_root(origin)?; + frame_system::ensure_root(origin)?; + } + + #[weight = 0] + fn inherent_call(origin) { + let _ = frame_system::ensure_none(origin)?; } // module hooks. @@ -536,29 +611,66 @@ mod tests { 175 } + fn on_idle(n: T::BlockNumber, remaining_weight: Weight) -> Weight { + println!("on_idle{}, {})", n, remaining_weight); + 175 + } + fn on_finalize() { println!("on_finalize(?)"); } fn on_runtime_upgrade() -> Weight { sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - 0 + 200 } + + fn offchain_worker(n: T::BlockNumber) { + assert_eq!(T::BlockNumber::from(1u32), n); + } + + #[weight = 0] + fn calculate_storage_root(_origin) { + let root = sp_io::storage::root(); + sp_io::storage::set("storage_root".as_bytes(), &root); + } + } + } + + impl frame_support::inherent::ProvideInherent for Module { + type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; + fn create_inherent(_data: &sp_inherents::InherentData) -> Option { + None + } + fn is_inherent(call: &Self::Call) -> bool { + *call == Call::::inherent_call {} } } - impl sp_runtime::traits::ValidateUnsigned for Module { + impl sp_runtime::traits::ValidateUnsigned for Module { type Call = Call; + // Inherent call is not validated as unsigned fn validate_unsigned( _source: TransactionSource, call: &Self::Call, ) -> TransactionValidity { match call { - Call::allowed_unsigned(..) => Ok(Default::default()), + Call::allowed_unsigned { .. } => Ok(Default::default()), _ => UnknownTransaction::NoUnsignedValidator.into(), } } + + // Inherent call is accepted for being dispatched + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned { .. } => Ok(()), + Call::inherent_call { .. } => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } + } } } @@ -568,26 +680,31 @@ mod tests { NodeBlock = TestBlock, UncheckedExtrinsic = TestUncheckedExtrinsic { - System: frame_system::{Module, Call, Config, Storage, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Custom: custom::{Module, Call, ValidateUnsigned}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + Custom: custom::{Pallet, Call, ValidateUnsigned, Inherent}, } ); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const BlockExecutionWeight: Weight = 10; - pub const ExtrinsicBaseWeight: Weight = 5; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::builder() + .base_block(10) + .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = 5) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = 1024.into()) + .build_or_panic(); pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, write: 100, }; } - impl frame_system::Trait for Runtime { - type BaseCallFilter = (); + impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type Call = Call; @@ -599,46 +716,42 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = DbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = RuntimeVersion; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } type Balance = u64; parameter_types! { pub const ExistentialDeposit: Balance = 1; } - impl pallet_balances::Trait for Runtime { + impl pallet_balances::Config for Runtime { type Balance = Balance; type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } parameter_types! { pub const TransactionByteFee: Balance = 0; } - impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = (); + impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } - impl custom::Trait for Runtime {} + impl custom::Config for Runtime {} pub struct RuntimeVersion; impl frame_support::traits::Get for RuntimeVersion { @@ -660,12 +773,7 @@ mod tests { ); type TestXt = sp_runtime::testing::TestXt; type TestBlock = Block; - type TestUncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< - ::AccountId, - ::Call, - (), - SignedExtra, - >; + type TestUncheckedExtrinsic = TestXt; // Will contain `true` when the custom runtime logic was called. const CUSTOM_ON_RUNTIME_KEY: &[u8] = &*b":custom:on_runtime"; @@ -675,7 +783,7 @@ mod tests { fn on_runtime_upgrade() -> Weight { sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); - 0 + 100 } } @@ -684,8 +792,8 @@ mod tests { Block, ChainContext, Runtime, - AllModules, - CustomOnRuntimeUpgrade + AllPallets, + CustomOnRuntimeUpgrade, >; fn extra(nonce: u64, fee: Balance) -> SignedExtra { @@ -693,7 +801,7 @@ mod tests { frame_system::CheckEra::from(Era::Immortal), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(fee) + pallet_transaction_payment::ChargeTransactionPayment::from(fee), ) } @@ -701,16 +809,23 @@ mod tests { Some((who, extra(nonce, fee))) } + fn call_transfer(dest: u64, value: u64) -> Call { + Call::Balances(BalancesCall::transfer { dest, value }) + } + #[test] fn balance_transfer_dispatch_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 211)], - }.assimilate_storage(&mut t).unwrap(); - let xt = TestXt::new(Call::Balances(BalancesCall::transfer(2, 69)), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight + ::ExtrinsicBaseWeight::get(); - let fee: Balance - = ::WeightToFee::calc(&weight); + pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } + .assimilate_storage(&mut t) + .unwrap(); + let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let fee: Balance = + ::WeightToFee::calc(&weight); let mut t = sp_io::TestExternalities::new(t); t.execute_with(|| { Executive::initialize_block(&Header::new( @@ -722,16 +837,16 @@ mod tests { )); let r = Executive::apply_extrinsic(xt); assert!(r.is_ok()); - assert_eq!(>::total_balance(&1), 142 - fee); - assert_eq!(>::total_balance(&2), 69); + assert_eq!(>::total_balance(&1), 142 - fee); + assert_eq!(>::total_balance(&2), 69); }); } fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 111 * balance_factor)], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); t.into() } @@ -742,9 +857,15 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("465a1569d309039bdf84b0479d28064ea29e6584584dc7d788904bb14489c6f6").into(), - extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), - digest: Digest { logs: vec![], }, + state_root: hex!( + "1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5" + ) + .into(), + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), + digest: Digest { logs: vec![] }, }, extrinsics: vec![], }); @@ -760,8 +881,11 @@ mod tests { parent_hash: [69u8; 32].into(), number: 1, state_root: [0u8; 32].into(), - extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), - digest: Digest { logs: vec![], }, + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), + digest: Digest { logs: vec![] }, }, extrinsics: vec![], }); @@ -776,9 +900,12 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48").into(), + state_root: hex!( + "49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48" + ) + .into(), extrinsics_root: [0u8; 32].into(), - digest: Digest { logs: vec![], }, + digest: Digest { logs: vec![] }, }, extrinsics: vec![], }); @@ -789,7 +916,7 @@ mod tests { fn bad_extrinsic_not_inserted() { let mut t = new_test_ext(1); // bad nonce check! - let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 69)), sign_extra(1, 30, 0)); + let xt = TestXt::new(call_transfer(33, 69), sign_extra(1, 30, 0)); t.execute_with(|| { Executive::initialize_block(&Header::new( 1, @@ -798,8 +925,11 @@ mod tests { [69u8; 32].into(), Digest::default(), )); - assert!(Executive::apply_extrinsic(xt).is_err()); - assert_eq!(>::extrinsic_index(), Some(0)); + assert_err!( + Executive::apply_extrinsic(xt), + TransactionValidityError::Invalid(InvalidTransaction::Future) + ); + assert_eq!(>::extrinsic_index(), Some(0)); }); } @@ -807,12 +937,16 @@ mod tests { fn block_weight_limit_enforced() { let mut t = new_test_ext(10000); // given: TestXt uses the encoded len as fixed Len: - let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); + let xt = TestXt::new( + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); let encoded = xt.encode(); let encoded_len = encoded.len() as Weight; - // Block execution weight + on_initialize weight - let base_block_weight = 175 + ::BlockExecutionWeight::get(); - let limit = AvailableBlockRatio::get() * MaximumBlockWeight::get() - base_block_weight; + // on_initialize weight + base block execution weight + let block_weights = ::BlockWeights::get(); + let base_block_weight = 175 + block_weights.base_block; + let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; let num_to_exhaust_block = limit / (encoded_len + 5); t.execute_with(|| { Executive::initialize_block(&Header::new( @@ -823,21 +957,25 @@ mod tests { Digest::default(), )); // Base block execution weight + `on_initialize` weight from the custom module. - assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::block_weight().total(), base_block_weight); for nonce in 0..=num_to_exhaust_block { let xt = TestXt::new( - Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, nonce.into(), 0), + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + sign_extra(1, nonce.into(), 0), ); let res = Executive::apply_extrinsic(xt); if nonce != num_to_exhaust_block { assert!(res.is_ok()); assert_eq!( - >::block_weight().total(), + >::block_weight().total(), //--------------------- on_initialize + block_execution + extrinsic_base weight (encoded_len + 5) * (nonce + 1) + base_block_weight, ); - assert_eq!(>::extrinsic_index(), Some(nonce as u32 + 1)); + assert_eq!( + >::extrinsic_index(), + Some(nonce as u32 + 1) + ); } else { assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); } @@ -847,14 +985,24 @@ mod tests { #[test] fn block_weight_and_size_is_stored_per_tx() { - let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); - let x1 = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 1, 0)); - let x2 = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 2, 0)); + let xt = TestXt::new( + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + let x1 = TestXt::new( + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + sign_extra(1, 1, 0), + ); + let x2 = TestXt::new( + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + sign_extra(1, 2, 0), + ); let len = xt.clone().encode().len() as u32; let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module - let base_block_weight = 175 + ::BlockExecutionWeight::get(); + let base_block_weight = + 175 + ::BlockWeights::get().base_block; Executive::initialize_block(&Header::new( 1, @@ -864,24 +1012,27 @@ mod tests { Digest::default(), )); - assert_eq!(>::block_weight().total(), base_block_weight); - assert_eq!(>::all_extrinsics_len(), 0); + assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::all_extrinsics_len(), 0); assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); // default weight for `TestXt` == encoded length. - let extrinsic_weight = len as Weight + ::ExtrinsicBaseWeight::get(); + let extrinsic_weight = len as Weight + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; assert_eq!( - >::block_weight().total(), + >::block_weight().total(), base_block_weight + 3 * extrinsic_weight, ); - assert_eq!(>::all_extrinsics_len(), 3 * len); + assert_eq!(>::all_extrinsics_len(), 3 * len); - let _ = >::finalize(); + let _ = >::finalize(); // All extrinsics length cleaned on `System::finalize` - assert_eq!(>::all_extrinsics_len(), 0); + assert_eq!(>::all_extrinsics_len(), 0); // New Block Executive::initialize_block(&Header::new( @@ -893,25 +1044,33 @@ mod tests { )); // Block weight cleaned up on `System::initialize` - assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::block_weight().total(), base_block_weight); }); } #[test] fn validate_unsigned() { - let valid = TestXt::new(Call::Custom(custom::Call::allowed_unsigned()), None); - let invalid = TestXt::new(Call::Custom(custom::Call::unallowed_unsigned()), None); + let valid = TestXt::new(Call::Custom(custom::Call::allowed_unsigned {}), None); + let invalid = TestXt::new(Call::Custom(custom::Call::unallowed_unsigned {}), None); let mut t = new_test_ext(1); let mut default_with_prio_3 = ValidTransaction::default(); default_with_prio_3.priority = 3; t.execute_with(|| { assert_eq!( - Executive::validate_transaction(TransactionSource::InBlock, valid.clone()), + Executive::validate_transaction( + TransactionSource::InBlock, + valid.clone(), + Default::default(), + ), Ok(default_with_prio_3), ); assert_eq!( - Executive::validate_transaction(TransactionSource::InBlock, invalid.clone()), + Executive::validate_transaction( + TransactionSource::InBlock, + invalid.clone(), + Default::default(), + ), Err(TransactionValidityError::Unknown(UnknownTransaction::NoUnsignedValidator)), ); assert_eq!(Executive::apply_extrinsic(valid), Ok(Err(DispatchError::BadOrigin))); @@ -928,20 +1087,19 @@ mod tests { let execute_with_lock = |lock: WithdrawReasons| { let mut t = new_test_ext(1); t.execute_with(|| { - as LockableCurrency>::set_lock( - id, - &1, - 110, - lock, + as LockableCurrency>::set_lock( + id, &1, 110, lock, ); let xt = TestXt::new( - Call::System(SystemCall::remark(vec![1u8])), + Call::System(SystemCall::remark { remark: vec![1u8] }), sign_extra(1, 0, 0), ); - let weight = xt.get_dispatch_info().weight - + ::ExtrinsicBaseWeight::get(); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; let fee: Balance = - ::WeightToFee::calc(&weight); + ::WeightToFee::calc(&weight); Executive::initialize_block(&Header::new( 1, H256::default(), @@ -950,33 +1108,33 @@ mod tests { Digest::default(), )); - if lock == WithdrawReasons::except(WithdrawReason::TransactionPayment) { + if lock == WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT) { assert!(Executive::apply_extrinsic(xt).unwrap().is_ok()); // tx fee has been deducted. - assert_eq!(>::total_balance(&1), 111 - fee); + assert_eq!(>::total_balance(&1), 111 - fee); } else { assert_eq!( Executive::apply_extrinsic(xt), Err(InvalidTransaction::Payment.into()), ); - assert_eq!(>::total_balance(&1), 111); + assert_eq!(>::total_balance(&1), 111); } }); }; execute_with_lock(WithdrawReasons::all()); - execute_with_lock(WithdrawReasons::except(WithdrawReason::TransactionPayment)); + execute_with_lock(WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT)); } #[test] fn block_hooks_weight_is_stored() { new_test_ext(1).execute_with(|| { - Executive::initialize_block(&Header::new_from_number(1)); + Executive::finalize_block(); // NOTE: might need updates over time if new weights are introduced. // For now it only accounts for the base block execution weight and // the `on_initialize` weight defined in the custom test module. - assert_eq!(>::block_weight().total(), 175 + 10); + assert_eq!(>::block_weight().total(), 175 + 175 + 10); }) } @@ -985,43 +1143,47 @@ mod tests { new_test_ext(1).execute_with(|| { RUNTIME_VERSION.with(|v| *v.borrow_mut() = Default::default()); // It should be added at genesis - assert!(frame_system::LastRuntimeUpgrade::exists()); + assert!(frame_system::LastRuntimeUpgrade::::exists()); assert!(!Executive::runtime_upgraded()); - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - ..Default::default() + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); assert!(Executive::runtime_upgraded()); assert_eq!( Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "".into() }), - frame_system::LastRuntimeUpgrade::get(), + frame_system::LastRuntimeUpgrade::::get(), ); - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), - ..Default::default() + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + ..Default::default() + } }); assert!(Executive::runtime_upgraded()); assert_eq!( Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::get(), + frame_system::LastRuntimeUpgrade::::get(), ); - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), - impl_version: 2, - ..Default::default() + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + impl_version: 2, + ..Default::default() + } }); assert!(!Executive::runtime_upgraded()); - frame_system::LastRuntimeUpgrade::take(); + frame_system::LastRuntimeUpgrade::::take(); assert!(Executive::runtime_upgraded()); assert_eq!( Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::get(), + frame_system::LastRuntimeUpgrade::::get(), ); }) } @@ -1056,11 +1218,41 @@ mod tests { fn custom_runtime_upgrade_is_called_before_modules() { new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - ..Default::default() + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); + assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + }); + } + + /// Regression test that ensures that the custom on runtime upgrade is called when executive is + /// used through the `ExecuteBlock` trait. + #[test] + fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { + let xt = TestXt::new( + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + + let header = new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); + // Let's build some fake block. Executive::initialize_block(&Header::new( 1, H256::default(), @@ -1069,8 +1261,168 @@ mod tests { Digest::default(), )); + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + // Reset to get the correct new genesis below. + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } + }); + + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + >>::execute_block(Block::new(header, vec![xt])); + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); }); } + + #[test] + fn all_weights_are_recorded_correctly() { + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called for maximum complexity + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + }); + + let block_number = 1; + + Executive::initialize_block(&Header::new( + block_number, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + // All weights that show up in the `initialize_block_impl` + let frame_system_upgrade_weight = frame_system::Pallet::::on_runtime_upgrade(); + let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); + let runtime_upgrade_weight = ::on_runtime_upgrade(); + let frame_system_on_initialize_weight = + frame_system::Pallet::::on_initialize(block_number); + let on_initialize_weight = + >::on_initialize(block_number); + let base_block_weight = + ::BlockWeights::get().base_block; + + // Weights are recorded correctly + assert_eq!( + frame_system::Pallet::::block_weight().total(), + frame_system_upgrade_weight + + custom_runtime_upgrade_weight + + runtime_upgrade_weight + + frame_system_on_initialize_weight + + on_initialize_weight + base_block_weight, + ); + }); + } + + #[test] + fn offchain_worker_works_as_expected() { + new_test_ext(1).execute_with(|| { + let parent_hash = sp_core::H256::from([69u8; 32]); + let mut digest = Digest::default(); + digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); + + let header = + Header::new(1, H256::default(), H256::default(), parent_hash, digest.clone()); + + Executive::offchain_worker(&header); + + assert_eq!(digest, System::digest()); + assert_eq!(parent_hash, System::block_hash(0)); + assert_eq!(header.hash(), System::block_hash(1)); + }); + } + + #[test] + fn calculating_storage_root_twice_works() { + let call = Call::Custom(custom::Call::calculate_storage_root {}); + let xt = TestXt::new(call, sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt])); + }); + } + + #[test] + #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] + fn invalid_inherent_position_fail() { + let xt1 = TestXt::new( + Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + sign_extra(1, 0, 0), + ); + let xt2 = TestXt::new(Call::Custom(custom::Call::inherent_call {}), None); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); + } + + #[test] + fn valid_inherents_position_works() { + let xt1 = TestXt::new(Call::Custom(custom::Call::inherent_call {}), None); + let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); + } } diff --git a/frame/gilt/Cargo.toml b/frame/gilt/Cargo.toml new file mode 100644 index 0000000000000..c275b693d8f27 --- /dev/null +++ b/frame/gilt/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "pallet-gilt" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for rewarding account freezing." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../../primitives/arithmetic" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } + +[dev-dependencies] +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-std/std", + "sp-runtime/std", + "sp-arithmetic/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/gilt/README.md b/frame/gilt/README.md new file mode 100644 index 0000000000000..4eaddae1786e7 --- /dev/null +++ b/frame/gilt/README.md @@ -0,0 +1,2 @@ + +License: Apache-2.0 diff --git a/frame/gilt/src/benchmarking.rs b/frame/gilt/src/benchmarking.rs new file mode 100644 index 0000000000000..55d34a35a7ce4 --- /dev/null +++ b/frame/gilt/src/benchmarking.rs @@ -0,0 +1,136 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Gilt Pallet + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::{ + dispatch::UnfilteredDispatchable, + traits::{Currency, EnsureOrigin, Get}, +}; +use frame_system::RawOrigin; +use sp_arithmetic::Perquintill; +use sp_runtime::traits::{Bounded, Zero}; +use sp_std::prelude::*; + +use crate::Pallet as Gilt; + +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +benchmarks! { + place_bid { + let l in 0..(T::MaxQueueLen::get() - 1); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + for i in 0..l { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + } + }: _(RawOrigin::Signed(caller.clone()), T::MinFreeze::get() * BalanceOf::::from(2u32), 1) + verify { + assert_eq!(QueueTotals::::get()[0], (l + 1, T::MinFreeze::get() * BalanceOf::::from(l + 2))); + } + + place_bid_max { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + for i in 0..T::MaxQueueLen::get() { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + } + }: { + Gilt::::place_bid( + RawOrigin::Signed(caller.clone()).into(), + T::MinFreeze::get() * BalanceOf::::from(2u32), + 1, + )? + } + verify { + assert_eq!(QueueTotals::::get()[0], ( + T::MaxQueueLen::get(), + T::MinFreeze::get() * BalanceOf::::from(T::MaxQueueLen::get() + 1), + )); + } + + retract_bid { + let l in 1..T::MaxQueueLen::get(); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + for i in 0..l { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + } + }: _(RawOrigin::Signed(caller.clone()), T::MinFreeze::get(), 1) + verify { + assert_eq!(QueueTotals::::get()[0], (l - 1, T::MinFreeze::get() * BalanceOf::::from(l - 1))); + } + + set_target { + let call = Call::::set_target { target: Default::default() }; + let origin = T::AdminOrigin::successful_origin(); + }: { call.dispatch_bypass_filter(origin)? } + + thaw { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(3u32)); + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + Gilt::::enlarge(T::MinFreeze::get() * BalanceOf::::from(2u32), 2); + Active::::mutate(0, |m_g| if let Some(ref mut g) = m_g { g.expiry = Zero::zero() }); + }: _(RawOrigin::Signed(caller.clone()), 0) + verify { + assert!(Active::::get(0).is_none()); + } + + pursue_target_noop { + }: { Gilt::::pursue_target(0) } + + pursue_target_per_item { + // bids taken + let b in 1..T::MaxQueueLen::get(); + + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(b + 1)); + + for _ in 0..b { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + } + + Call::::set_target { target: Perquintill::from_percent(100) } + .dispatch_bypass_filter(T::AdminOrigin::successful_origin())?; + + }: { Gilt::::pursue_target(b) } + + pursue_target_per_queue { + // total queues hit + let q in 1..T::QueueCount::get(); + + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(q + 1)); + + for i in 0..q { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), i + 1)?; + } + + Call::::set_target { target: Perquintill::from_percent(100) } + .dispatch_bypass_filter(T::AdminOrigin::successful_origin())?; + + }: { Gilt::::pursue_target(q) } +} + +impl_benchmark_test_suite!(Gilt, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs new file mode 100644 index 0000000000000..de114e4bb87de --- /dev/null +++ b/frame/gilt/src/lib.rs @@ -0,0 +1,631 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Gilt Pallet +//! A pallet allowing accounts to auction for being frozen and receive open-ended +//! inflation-protection in return. +//! +//! ## Overview +//! +//! Lock up tokens, for at least as long as you offer, and be free from both inflation and +//! intermediate reward or exchange until the tokens become unlocked. +//! +//! ## Design +//! +//! Queues for each of 1-`QueueCount` periods, given in blocks (`Period`). Queues are limited in +//! size to something sensible, `MaxQueueLen`. A secondary storage item with `QueueCount` x `u32` +//! elements with the number of items in each queue. +//! +//! Queues are split into two parts. The first part is a priority queue based on bid size. The +//! second part is just a FIFO (the size of the second part is set with `FifoQueueLen`). Items are +//! always prepended so that removal is always O(1) since removal often happens many times under a +//! single weighed function (`on_initialize`) yet placing bids only ever happens once per weighed +//! function (`place_bid`). If the queue has a priority portion, then it remains sorted in order of +//! bid size so that smaller bids fall off as it gets too large. +//! +//! Account may enqueue a balance with some number of `Period`s lock up, up to a maximum of +//! `QueueCount`. The balance gets reserved. There's a minimum of `MinFreeze` to avoid dust. +//! +//! Until your bid is turned into an issued gilt you can retract it instantly and the funds are +//! unreserved. +//! +//! There's a target proportion of effective total issuance (i.e. accounting for existing gilts) +//! which the we attempt to have frozen at any one time. It will likely be gradually increased over +//! time by governance. +//! +//! As the total funds frozen under gilts drops below `FrozenFraction` of the total effective +//! issuance, then bids are taken from queues, with the queue of the greatest period taking +//! priority. If the item in the queue's locked amount is greater than the amount left to be +//! frozen, then it is split up into multiple bids and becomes partially frozen under gilt. +//! +//! Once an account's balance is frozen, it remains frozen until the owner thaws the balance of the +//! account. This may happen no earlier than queue's period after the point at which the gilt is +//! issued. +//! +//! ## Suggested Values +//! +//! - `QueueCount`: 300 +//! - `Period`: 432,000 +//! - `MaxQueueLen`: 1000 +//! - `MinFreeze`: Around CHF 100 in value. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +pub mod weights; + +#[frame_support::pallet] +pub mod pallet { + pub use crate::weights::WeightInfo; + use frame_support::{ + pallet_prelude::*, + traits::{Currency, OnUnbalanced, ReservableCurrency}, + }; + use frame_system::pallet_prelude::*; + use scale_info::TypeInfo; + use sp_arithmetic::{PerThing, Perquintill}; + use sp_runtime::traits::{Saturating, Zero}; + use sp_std::prelude::*; + + type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + type PositiveImbalanceOf = <::Currency as Currency< + ::AccountId, + >>::PositiveImbalance; + type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, + >>::NegativeImbalance; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Overarching event type. + type Event: From> + IsType<::Event>; + + /// Currency type that this works on. + type Currency: ReservableCurrency; + + /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to + /// `From`. + type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned + + codec::FullCodec + + Copy + + MaybeSerializeDeserialize + + sp_std::fmt::Debug + + Default + + From + + TypeInfo; + + /// Origin required for setting the target proportion to be under gilt. + type AdminOrigin: EnsureOrigin; + + /// Unbalanced handler to account for funds created (in case of a higher total issuance over + /// freezing period). + type Deficit: OnUnbalanced>; + + /// Unbalanced handler to account for funds destroyed (in case of a lower total issuance + /// over freezing period). + type Surplus: OnUnbalanced>; + + /// The issuance to ignore. This is subtracted from the `Currency`'s `total_issuance` to get + /// the issuance by which we inflate or deflate the gilt. + #[pallet::constant] + type IgnoredIssuance: Get>; + + /// Number of duration queues in total. This sets the maximum duration supported, which is + /// this value multiplied by `Period`. + #[pallet::constant] + type QueueCount: Get; + + /// Maximum number of items that may be in each duration queue. + #[pallet::constant] + type MaxQueueLen: Get; + + /// Portion of the queue which is free from ordering and just a FIFO. + /// + /// Must be no greater than `MaxQueueLen`. + #[pallet::constant] + type FifoQueueLen: Get; + + /// The base period for the duration queues. This is the common multiple across all + /// supported freezing durations that can be bid upon. + #[pallet::constant] + type Period: Get; + + /// The minimum amount of funds that may be offered to freeze for a gilt. Note that this + /// does not actually limit the amount which may be frozen in a gilt since gilts may be + /// split up in order to satisfy the desired amount of funds under gilts. + /// + /// It should be at least big enough to ensure that there is no possible storage spam attack + /// or queue-filling attack. + #[pallet::constant] + type MinFreeze: Get>; + + /// The number of blocks between consecutive attempts to issue more gilts in an effort to + /// get to the target amount to be frozen. + /// + /// A larger value results in fewer storage hits each block, but a slower period to get to + /// the target. + #[pallet::constant] + type IntakePeriod: Get; + + /// The maximum amount of bids that can be turned into issued gilts each block. A larger + /// value here means less of the block available for transactions should there be a glut of + /// bids to make into gilts to reach the target. + #[pallet::constant] + type MaxIntakeBids: Get; + + /// Information on runtime weights. + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// A single bid on a gilt, an item of a *queue* in `Queues`. + #[derive(Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug, TypeInfo)] + pub struct GiltBid { + /// The amount bid. + pub amount: Balance, + /// The owner of the bid. + pub who: AccountId, + } + + /// Information representing an active gilt. + #[derive(Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug, TypeInfo)] + pub struct ActiveGilt { + /// The proportion of the effective total issuance (i.e. accounting for any eventual gilt + /// expansion or contraction that may eventually be claimed). + pub proportion: Perquintill, + /// The amount reserved under this gilt. + pub amount: Balance, + /// The account to whom this gilt belongs. + pub who: AccountId, + /// The time after which this gilt can be redeemed for the proportional amount of balance. + pub expiry: BlockNumber, + } + + /// An index for a gilt. + pub type ActiveIndex = u32; + + /// Overall information package on the active gilts. + /// + /// The way of determining the net issuance (i.e. after factoring in all maturing frozen funds) + /// is: + /// + /// `issuance - frozen + proportion * issuance` + /// + /// where `issuance = total_issuance - IgnoredIssuance` + #[derive(Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug, TypeInfo)] + pub struct ActiveGiltsTotal { + /// The total amount of funds held in reserve for all active gilts. + pub frozen: Balance, + /// The proportion of funds that the `frozen` balance represents to total issuance. + pub proportion: Perquintill, + /// The total number of gilts issued so far. + pub index: ActiveIndex, + /// The target proportion of gilts within total issuance. + pub target: Perquintill, + } + + /// The totals of items and balances within each queue. Saves a lot of storage reads in the + /// case of sparsely packed queues. + /// + /// The vector is indexed by duration in `Period`s, offset by one, so information on the queue + /// whose duration is one `Period` would be storage `0`. + #[pallet::storage] + pub type QueueTotals = StorageValue<_, Vec<(u32, BalanceOf)>, ValueQuery>; + + /// The queues of bids ready to become gilts. Indexed by duration (in `Period`s). + #[pallet::storage] + pub type Queues = + StorageMap<_, Blake2_128Concat, u32, Vec, T::AccountId>>, ValueQuery>; + + /// Information relating to the gilts currently active. + #[pallet::storage] + pub type ActiveTotal = StorageValue<_, ActiveGiltsTotal>, ValueQuery>; + + /// The currently active gilts, indexed according to the order of creation. + #[pallet::storage] + pub type Active = StorageMap< + _, + Blake2_128Concat, + ActiveIndex, + ActiveGilt< + BalanceOf, + ::AccountId, + ::BlockNumber, + >, + OptionQuery, + >; + + #[pallet::genesis_config] + #[derive(Default)] + pub struct GenesisConfig; + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + QueueTotals::::put(vec![(0, BalanceOf::::zero()); T::QueueCount::get() as usize]); + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A bid was successfully placed. + /// \[ who, amount, duration \] + BidPlaced(T::AccountId, BalanceOf, u32), + /// A bid was successfully removed (before being accepted as a gilt). + /// \[ who, amount, duration \] + BidRetracted(T::AccountId, BalanceOf, u32), + /// A bid was accepted as a gilt. The balance may not be released until expiry. + /// \[ index, expiry, who, amount \] + GiltIssued(ActiveIndex, T::BlockNumber, T::AccountId, BalanceOf), + /// An expired gilt has been thawed. + /// \[ index, who, original_amount, additional_amount \] + GiltThawed(ActiveIndex, T::AccountId, BalanceOf, BalanceOf), + } + + #[pallet::error] + pub enum Error { + /// The duration of the bid is less than one. + DurationTooSmall, + /// The duration is the bid is greater than the number of queues. + DurationTooBig, + /// The amount of the bid is less than the minimum allowed. + AmountTooSmall, + /// The queue for the bid's duration is full and the amount bid is too low to get in + /// through replacing an existing bid. + BidTooLow, + /// Gilt index is unknown. + Unknown, + /// Not the owner of the gilt. + NotOwner, + /// Gilt not yet at expiry date. + NotExpired, + /// The given bid for retraction is not found. + NotFound, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: T::BlockNumber) -> Weight { + if (n % T::IntakePeriod::get()).is_zero() { + Self::pursue_target(T::MaxIntakeBids::get()) + } else { + 0 + } + } + } + + #[pallet::call] + impl Pallet { + /// Place a bid for a gilt to be issued. + /// + /// Origin must be Signed, and account must have at least `amount` in free balance. + /// + /// - `amount`: The amount of the bid; these funds will be reserved. If the bid is + /// successfully elevated into an issued gilt, then these funds will continue to be + /// reserved until the gilt expires. Must be at least `MinFreeze`. + /// - `duration`: The number of periods for which the funds will be locked if the gilt is + /// issued. It will expire only after this period has elapsed after the point of issuance. + /// Must be greater than 1 and no more than `QueueCount`. + /// + /// Complexities: + /// - `Queues[duration].len()` (just take max). + #[pallet::weight(T::WeightInfo::place_bid_max())] + pub fn place_bid( + origin: OriginFor, + #[pallet::compact] amount: BalanceOf, + duration: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + ensure!(amount >= T::MinFreeze::get(), Error::::AmountTooSmall); + let queue_count = T::QueueCount::get() as usize; + let queue_index = duration.checked_sub(1).ok_or(Error::::DurationTooSmall)? as usize; + ensure!(queue_index < queue_count, Error::::DurationTooBig); + + let net = Queues::::try_mutate( + duration, + |q| -> Result<(u32, BalanceOf), DispatchError> { + let queue_full = q.len() == T::MaxQueueLen::get() as usize; + ensure!(!queue_full || q[0].amount < amount, Error::::BidTooLow); + T::Currency::reserve(&who, amount)?; + + // queue is + let mut bid = GiltBid { amount, who: who.clone() }; + let net = if queue_full { + sp_std::mem::swap(&mut q[0], &mut bid); + T::Currency::unreserve(&bid.who, bid.amount); + (0, amount - bid.amount) + } else { + q.insert(0, bid); + (1, amount) + }; + + let sorted_item_count = q.len().saturating_sub(T::FifoQueueLen::get() as usize); + if sorted_item_count > 1 { + q[0..sorted_item_count].sort_by_key(|x| x.amount); + } + + Ok(net) + }, + )?; + QueueTotals::::mutate(|qs| { + qs.resize(queue_count, (0, Zero::zero())); + qs[queue_index].0 += net.0; + qs[queue_index].1 = qs[queue_index].1.saturating_add(net.1); + }); + Self::deposit_event(Event::BidPlaced(who.clone(), amount, duration)); + + Ok(().into()) + } + + /// Retract a previously placed bid. + /// + /// Origin must be Signed, and the account should have previously issued a still-active bid + /// of `amount` for `duration`. + /// + /// - `amount`: The amount of the previous bid. + /// - `duration`: The duration of the previous bid. + #[pallet::weight(T::WeightInfo::place_bid(T::MaxQueueLen::get()))] + pub fn retract_bid( + origin: OriginFor, + #[pallet::compact] amount: BalanceOf, + duration: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + let queue_count = T::QueueCount::get() as usize; + let queue_index = duration.checked_sub(1).ok_or(Error::::DurationTooSmall)? as usize; + ensure!(queue_index < queue_count, Error::::DurationTooBig); + + let bid = GiltBid { amount, who }; + let new_len = Queues::::try_mutate(duration, |q| -> Result { + let pos = q.iter().position(|i| i == &bid).ok_or(Error::::NotFound)?; + q.remove(pos); + Ok(q.len() as u32) + })?; + + QueueTotals::::mutate(|qs| { + qs.resize(queue_count, (0, Zero::zero())); + qs[queue_index].0 = new_len; + qs[queue_index].1 = qs[queue_index].1.saturating_sub(bid.amount); + }); + + T::Currency::unreserve(&bid.who, bid.amount); + Self::deposit_event(Event::BidRetracted(bid.who, bid.amount, duration)); + + Ok(().into()) + } + + /// Set target proportion of gilt-funds. + /// + /// Origin must be `AdminOrigin`. + /// + /// - `target`: The target proportion of effective issued funds that should be under gilts + /// at any one time. + #[pallet::weight(T::WeightInfo::set_target())] + pub fn set_target( + origin: OriginFor, + #[pallet::compact] target: Perquintill, + ) -> DispatchResultWithPostInfo { + T::AdminOrigin::ensure_origin(origin)?; + ActiveTotal::::mutate(|totals| totals.target = target); + Ok(().into()) + } + + /// Remove an active but expired gilt. Reserved funds under gilt are freed and balance is + /// adjusted to ensure that the funds grow or shrink to maintain the equivalent proportion + /// of effective total issued funds. + /// + /// Origin must be Signed and the account must be the owner of the gilt of the given index. + /// + /// - `index`: The index of the gilt to be thawed. + #[pallet::weight(T::WeightInfo::thaw())] + pub fn thaw( + origin: OriginFor, + #[pallet::compact] index: ActiveIndex, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + // Look for `index` + let gilt = Active::::get(index).ok_or(Error::::Unknown)?; + // If found, check the owner is `who`. + ensure!(gilt.who == who, Error::::NotOwner); + let now = frame_system::Pallet::::block_number(); + ensure!(now >= gilt.expiry, Error::::NotExpired); + // Remove it + Active::::remove(index); + + // Multiply the proportion it is by the total issued. + let total_issuance = + T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); + ActiveTotal::::mutate(|totals| { + let nongilt_issuance = total_issuance.saturating_sub(totals.frozen); + let effective_issuance = + totals.proportion.left_from_one().saturating_reciprocal_mul(nongilt_issuance); + let gilt_value = gilt.proportion * effective_issuance; + + totals.frozen = totals.frozen.saturating_sub(gilt.amount); + totals.proportion = totals.proportion.saturating_sub(gilt.proportion); + + // Remove or mint the additional to the amount using `Deficit`/`Surplus`. + if gilt_value > gilt.amount { + // Unreserve full amount. + T::Currency::unreserve(&gilt.who, gilt.amount); + let amount = gilt_value - gilt.amount; + let deficit = T::Currency::deposit_creating(&gilt.who, amount); + T::Deficit::on_unbalanced(deficit); + } else { + if gilt_value < gilt.amount { + // We take anything reserved beyond the gilt's final value. + let rest = gilt.amount - gilt_value; + // `slash` might seem a little aggressive, but it's the only way to do it + // in case it's locked into the staking system. + let surplus = T::Currency::slash_reserved(&gilt.who, rest).0; + T::Surplus::on_unbalanced(surplus); + } + // Unreserve only its new value (less than the amount reserved). Everything + // should add up, but (defensive) in case it doesn't, unreserve takes lower + // priority over the funds. + let err_amt = T::Currency::unreserve(&gilt.who, gilt_value); + debug_assert!(err_amt.is_zero()); + } + + let e = Event::GiltThawed(index, gilt.who, gilt.amount, gilt_value); + Self::deposit_event(e); + }); + + Ok(().into()) + } + } + + /// Issuance information returned by `issuance()`. + pub struct IssuanceInfo { + /// The balance held in reserve over all active gilts. + pub reserved: Balance, + /// The issuance not held in reserve for active gilts. Together with `reserved` this sums + /// to `Currency::total_issuance`. + pub non_gilt: Balance, + /// The balance that `reserved` is effectively worth, at present. This is not issued funds + /// and could be less than `reserved` (though in most cases should be greater). + pub effective: Balance, + } + + impl Pallet { + /// Get the target amount of Gilts that we're aiming for. + pub fn target() -> Perquintill { + ActiveTotal::::get().target + } + + /// Returns information on the issuance of gilts. + pub fn issuance() -> IssuanceInfo> { + let totals = ActiveTotal::::get(); + + let total_issuance = T::Currency::total_issuance(); + let non_gilt = total_issuance.saturating_sub(totals.frozen); + let effective = totals.proportion.left_from_one().saturating_reciprocal_mul(non_gilt); + + IssuanceInfo { reserved: totals.frozen, non_gilt, effective } + } + + /// Attempt to enlarge our gilt-set from bids in order to satisfy our desired target amount + /// of funds frozen into gilts. + pub fn pursue_target(max_bids: u32) -> Weight { + let totals = ActiveTotal::::get(); + if totals.proportion < totals.target { + let missing = totals.target.saturating_sub(totals.proportion); + + let total_issuance = + T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); + let nongilt_issuance = total_issuance.saturating_sub(totals.frozen); + let effective_issuance = + totals.proportion.left_from_one().saturating_reciprocal_mul(nongilt_issuance); + let intake = missing * effective_issuance; + + let (bids_taken, queues_hit) = Self::enlarge(intake, max_bids); + let first_from_each_queue = T::WeightInfo::pursue_target_per_queue(queues_hit); + let rest_from_each_queue = T::WeightInfo::pursue_target_per_item(bids_taken) + .saturating_sub(T::WeightInfo::pursue_target_per_item(queues_hit)); + first_from_each_queue + rest_from_each_queue + } else { + T::WeightInfo::pursue_target_noop() + } + } + + /// Freeze additional funds from queue of bids up to `amount`. Use at most `max_bids` + /// from the queue. + /// + /// Return the number of bids taken and the number of distinct queues taken from. + pub fn enlarge(amount: BalanceOf, max_bids: u32) -> (u32, u32) { + let total_issuance = + T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); + let mut remaining = amount; + let mut bids_taken = 0; + let mut queues_hit = 0; + let now = frame_system::Pallet::::block_number(); + + ActiveTotal::::mutate(|totals| { + QueueTotals::::mutate(|qs| { + for duration in (1..=T::QueueCount::get()).rev() { + if qs[duration as usize - 1].0 == 0 { + continue + } + let queue_index = duration as usize - 1; + let expiry = + now.saturating_add(T::Period::get().saturating_mul(duration.into())); + Queues::::mutate(duration, |q| { + while let Some(mut bid) = q.pop() { + if remaining < bid.amount { + let overflow = bid.amount - remaining; + bid.amount = remaining; + q.push(GiltBid { amount: overflow, who: bid.who.clone() }); + } + let amount = bid.amount; + // Can never overflow due to block above. + remaining -= amount; + // Should never underflow since it should track the total of the + // bids exactly, but we'll be defensive. + qs[queue_index].1 = qs[queue_index].1.saturating_sub(bid.amount); + + // Now to activate the bid... + let nongilt_issuance = total_issuance.saturating_sub(totals.frozen); + let effective_issuance = totals + .proportion + .left_from_one() + .saturating_reciprocal_mul(nongilt_issuance); + let n = amount; + let d = effective_issuance; + let proportion = Perquintill::from_rational(n, d); + let who = bid.who; + let index = totals.index; + totals.frozen += bid.amount; + totals.proportion = totals.proportion.saturating_add(proportion); + totals.index += 1; + let e = Event::GiltIssued(index, expiry, who.clone(), amount); + Self::deposit_event(e); + let gilt = ActiveGilt { amount, proportion, who, expiry }; + Active::::insert(index, gilt); + + bids_taken += 1; + + if remaining.is_zero() || bids_taken == max_bids { + break + } + } + queues_hit += 1; + qs[queue_index].0 = q.len() as u32; + }); + if remaining.is_zero() || bids_taken == max_bids { + break + } + } + }); + }); + (bids_taken, queues_hit) + } + } +} diff --git a/frame/gilt/src/mock.rs b/frame/gilt/src/mock.rs new file mode 100644 index 0000000000000..ac3f4df1b71dd --- /dev/null +++ b/frame/gilt/src/mock.rs @@ -0,0 +1,150 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Gilt pallet. + +use crate as pallet_gilt; + +use frame_support::{ + ord_parameter_types, parameter_types, + traits::{Currency, GenesisBuild, OnFinalize, OnInitialize}, +}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, + Gilt: pallet_gilt::{Pallet, Call, Config, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; +} + +parameter_types! { + pub IgnoredIssuance: u64 = Balances::total_balance(&0); // Account zero is ignored. + pub const QueueCount: u32 = 3; + pub const MaxQueueLen: u32 = 3; + pub const FifoQueueLen: u32 = 1; + pub const Period: u64 = 3; + pub const MinFreeze: u64 = 2; + pub const IntakePeriod: u64 = 2; + pub const MaxIntakeBids: u32 = 2; +} +ord_parameter_types! { + pub const One: u64 = 1; +} + +impl pallet_gilt::Config for Test { + type Event = Event; + type Currency = Balances; + type CurrencyBalance = ::Balance; + type AdminOrigin = frame_system::EnsureSignedBy; + type Deficit = (); + type Surplus = (); + type IgnoredIssuance = IgnoredIssuance; + type QueueCount = QueueCount; + type MaxQueueLen = MaxQueueLen; + type FifoQueueLen = FifoQueueLen; + type Period = Period; + type MinFreeze = MinFreeze; + type IntakePeriod = IntakePeriod; + type MaxIntakeBids = MaxIntakeBids; + type WeightInfo = (); +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 100), (2, 100), (3, 100), (4, 100)], + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisBuild::::assimilate_storage(&crate::GenesisConfig, &mut t).unwrap(); + t.into() +} + +pub fn run_to_block(n: u64) { + while System::block_number() < n { + Gilt::on_finalize(System::block_number()); + Balances::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Balances::on_initialize(System::block_number()); + Gilt::on_initialize(System::block_number()); + } +} diff --git a/frame/gilt/src/tests.rs b/frame/gilt/src/tests.rs new file mode 100644 index 0000000000000..80315141e2325 --- /dev/null +++ b/frame/gilt/src/tests.rs @@ -0,0 +1,567 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Gilt pallet. + +use super::*; +use crate::{mock::*, Error}; +use frame_support::{assert_noop, assert_ok, dispatch::DispatchError, traits::Currency}; +use pallet_balances::Error as BalancesError; +use sp_arithmetic::Perquintill; + +#[test] +fn basic_setup_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + + for q in 0..3 { + assert!(Queues::::get(q).is_empty()); + } + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 0, + proportion: Perquintill::zero(), + index: 0, + target: Perquintill::zero(), + } + ); + assert_eq!(QueueTotals::::get(), vec![(0, 0); 3]); + }); +} + +#[test] +fn set_target_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + let e = DispatchError::BadOrigin; + assert_noop!(Gilt::set_target(Origin::signed(2), Perquintill::from_percent(50)), e); + assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(50))); + + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 0, + proportion: Perquintill::zero(), + index: 0, + target: Perquintill::from_percent(50), + } + ); + }); +} + +#[test] +fn place_bid_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_noop!(Gilt::place_bid(Origin::signed(1), 1, 2), Error::::AmountTooSmall); + assert_noop!( + Gilt::place_bid(Origin::signed(1), 101, 2), + BalancesError::::InsufficientBalance + ); + assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 4), Error::::DurationTooBig); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_eq!(Balances::reserved_balance(1), 10); + assert_eq!(Queues::::get(2), vec![GiltBid { amount: 10, who: 1 }]); + assert_eq!(QueueTotals::::get(), vec![(0, 0), (1, 10), (0, 0)]); + }); +} + +#[test] +fn place_bid_queuing_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 20, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 5, 2)); + assert_noop!(Gilt::place_bid(Origin::signed(1), 5, 2), Error::::BidTooLow); + assert_ok!(Gilt::place_bid(Origin::signed(1), 15, 2)); + assert_eq!(Balances::reserved_balance(1), 45); + + assert_ok!(Gilt::place_bid(Origin::signed(1), 25, 2)); + assert_eq!(Balances::reserved_balance(1), 60); + assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 2), Error::::BidTooLow); + assert_eq!( + Queues::::get(2), + vec![ + GiltBid { amount: 15, who: 1 }, + GiltBid { amount: 25, who: 1 }, + GiltBid { amount: 20, who: 1 }, + ] + ); + assert_eq!(QueueTotals::::get(), vec![(0, 0), (3, 60), (0, 0)]); + }); +} + +#[test] +fn place_bid_fails_when_queue_full() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(3), 10, 2)); + assert_noop!(Gilt::place_bid(Origin::signed(4), 10, 2), Error::::BidTooLow); + assert_ok!(Gilt::place_bid(Origin::signed(4), 10, 3)); + }); +} + +#[test] +fn multiple_place_bids_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 3)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); + + assert_eq!(Balances::reserved_balance(1), 40); + assert_eq!(Balances::reserved_balance(2), 10); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 10, who: 1 },]); + assert_eq!( + Queues::::get(2), + vec![ + GiltBid { amount: 10, who: 2 }, + GiltBid { amount: 10, who: 1 }, + GiltBid { amount: 10, who: 1 }, + ] + ); + assert_eq!(Queues::::get(3), vec![GiltBid { amount: 10, who: 1 },]); + assert_eq!(QueueTotals::::get(), vec![(1, 10), (3, 30), (1, 10)]); + }); +} + +#[test] +fn retract_single_item_queue_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::retract_bid(Origin::signed(1), 10, 1)); + + assert_eq!(Balances::reserved_balance(1), 10); + assert_eq!(Queues::::get(1), vec![]); + assert_eq!(Queues::::get(2), vec![GiltBid { amount: 10, who: 1 }]); + assert_eq!(QueueTotals::::get(), vec![(0, 0), (1, 10), (0, 0)]); + }); +} + +#[test] +fn retract_with_other_and_duplicate_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); + + assert_ok!(Gilt::retract_bid(Origin::signed(1), 10, 2)); + assert_eq!(Balances::reserved_balance(1), 20); + assert_eq!(Balances::reserved_balance(2), 10); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 10, who: 1 },]); + assert_eq!( + Queues::::get(2), + vec![GiltBid { amount: 10, who: 2 }, GiltBid { amount: 10, who: 1 },] + ); + assert_eq!(QueueTotals::::get(), vec![(1, 10), (2, 20), (0, 0)]); + }); +} + +#[test] +fn retract_non_existent_item_fails() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_noop!(Gilt::retract_bid(Origin::signed(1), 10, 1), Error::::NotFound); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_noop!(Gilt::retract_bid(Origin::signed(1), 20, 1), Error::::NotFound); + assert_noop!(Gilt::retract_bid(Origin::signed(1), 10, 2), Error::::NotFound); + assert_noop!(Gilt::retract_bid(Origin::signed(2), 10, 1), Error::::NotFound); + }); +} + +#[test] +fn basic_enlarge_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); + Gilt::enlarge(40, 2); + + // Takes 2/2, then stopped because it reaches its max amount + assert_eq!(Balances::reserved_balance(1), 40); + assert_eq!(Balances::reserved_balance(2), 40); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 }]); + assert_eq!(Queues::::get(2), vec![]); + assert_eq!(QueueTotals::::get(), vec![(1, 40), (0, 0), (0, 0)]); + + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 40, + proportion: Perquintill::from_percent(10), + index: 1, + target: Perquintill::zero(), + } + ); + assert_eq!( + Active::::get(0).unwrap(), + ActiveGilt { proportion: Perquintill::from_percent(10), amount: 40, who: 2, expiry: 7 } + ); + }); +} + +#[test] +fn enlarge_respects_bids_limit() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(3), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(4), 40, 3)); + Gilt::enlarge(100, 2); + + // Should have taken 4/3 and 2/2, then stopped because it's only allowed 2. + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 }]); + assert_eq!(Queues::::get(2), vec![GiltBid { amount: 40, who: 3 }]); + assert_eq!(Queues::::get(3), vec![]); + assert_eq!(QueueTotals::::get(), vec![(1, 40), (1, 40), (0, 0)]); + + assert_eq!( + Active::::get(0).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 4, + expiry: 10, + } + ); + assert_eq!( + Active::::get(1).unwrap(), + ActiveGilt { proportion: Perquintill::from_percent(10), amount: 40, who: 2, expiry: 7 } + ); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 80, + proportion: Perquintill::from_percent(20), + index: 2, + target: Perquintill::zero(), + } + ); + }); +} + +#[test] +fn enlarge_respects_amount_limit_and_will_split() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 80, 1)); + Gilt::enlarge(40, 2); + + // Takes 2/2, then stopped because it reaches its max amount + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 }]); + assert_eq!(QueueTotals::::get(), vec![(1, 40), (0, 0), (0, 0)]); + + assert_eq!( + Active::::get(0).unwrap(), + ActiveGilt { proportion: Perquintill::from_percent(10), amount: 40, who: 1, expiry: 4 } + ); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 40, + proportion: Perquintill::from_percent(10), + index: 1, + target: Perquintill::zero(), + } + ); + }); +} + +#[test] +fn basic_thaw_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + Gilt::enlarge(40, 1); + run_to_block(3); + assert_noop!(Gilt::thaw(Origin::signed(1), 0), Error::::NotExpired); + run_to_block(4); + assert_noop!(Gilt::thaw(Origin::signed(1), 1), Error::::Unknown); + assert_noop!(Gilt::thaw(Origin::signed(2), 0), Error::::NotOwner); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 0, + proportion: Perquintill::zero(), + index: 1, + target: Perquintill::zero(), + } + ); + assert_eq!(Active::::get(0), None); + assert_eq!(Balances::free_balance(1), 100); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn thaw_when_issuance_higher_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); + Gilt::enlarge(100, 1); + + // Everybody else's balances goes up by 50% + Balances::make_free_balance_be(&2, 150); + Balances::make_free_balance_be(&3, 150); + Balances::make_free_balance_be(&4, 150); + + run_to_block(4); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 150); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn thaw_with_ignored_issuance_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + // Give account zero some balance. + Balances::make_free_balance_be(&0, 200); + + assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); + Gilt::enlarge(100, 1); + + // Account zero transfers 50 into everyone else's accounts. + assert_ok!(Balances::transfer(Origin::signed(0), 2, 50)); + assert_ok!(Balances::transfer(Origin::signed(0), 3, 50)); + assert_ok!(Balances::transfer(Origin::signed(0), 4, 50)); + + run_to_block(4); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + + // Account zero changes have been ignored. + assert_eq!(Balances::free_balance(1), 150); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn thaw_when_issuance_lower_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); + Gilt::enlarge(100, 1); + + // Everybody else's balances goes down by 25% + Balances::make_free_balance_be(&2, 75); + Balances::make_free_balance_be(&3, 75); + Balances::make_free_balance_be(&4, 75); + + run_to_block(4); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 75); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn multiple_thaws_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 60, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 50, 1)); + Gilt::enlarge(200, 3); + + // Double everyone's free balances. + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 200); + Balances::make_free_balance_be(&4, 200); + + run_to_block(4); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + assert_ok!(Gilt::thaw(Origin::signed(1), 1)); + assert_ok!(Gilt::thaw(Origin::signed(2), 2)); + + assert_eq!(Balances::free_balance(1), 200); + assert_eq!(Balances::free_balance(2), 200); + }); +} + +#[test] +fn multiple_thaws_works_in_alternative_thaw_order() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 60, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 50, 1)); + Gilt::enlarge(200, 3); + + // Double everyone's free balances. + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 200); + Balances::make_free_balance_be(&4, 200); + + run_to_block(4); + assert_ok!(Gilt::thaw(Origin::signed(2), 2)); + assert_ok!(Gilt::thaw(Origin::signed(1), 1)); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 200); + assert_eq!(Balances::free_balance(2), 200); + }); +} + +#[test] +fn enlargement_to_target_works() { + new_test_ext().execute_with(|| { + run_to_block(2); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 3)); + assert_ok!(Gilt::place_bid(Origin::signed(3), 40, 3)); + assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(40))); + + run_to_block(3); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 },]); + assert_eq!( + Queues::::get(2), + vec![GiltBid { amount: 40, who: 2 }, GiltBid { amount: 40, who: 1 },] + ); + assert_eq!( + Queues::::get(3), + vec![GiltBid { amount: 40, who: 3 }, GiltBid { amount: 40, who: 2 },] + ); + assert_eq!(QueueTotals::::get(), vec![(1, 40), (2, 80), (2, 80)]); + + run_to_block(4); + // Two new gilts should have been issued to 2 & 3 for 40 each & duration of 3. + assert_eq!( + Active::::get(0).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 2, + expiry: 13, + } + ); + assert_eq!( + Active::::get(1).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 3, + expiry: 13, + } + ); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 80, + proportion: Perquintill::from_percent(20), + index: 2, + target: Perquintill::from_percent(40), + } + ); + + run_to_block(5); + // No change + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 80, + proportion: Perquintill::from_percent(20), + index: 2, + target: Perquintill::from_percent(40), + } + ); + + run_to_block(6); + // Two new gilts should have been issued to 1 & 2 for 40 each & duration of 2. + assert_eq!( + Active::::get(2).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 1, + expiry: 12, + } + ); + assert_eq!( + Active::::get(3).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 2, + expiry: 12, + } + ); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 160, + proportion: Perquintill::from_percent(40), + index: 4, + target: Perquintill::from_percent(40), + } + ); + + run_to_block(8); + // No change now. + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 160, + proportion: Perquintill::from_percent(40), + index: 4, + target: Perquintill::from_percent(40), + } + ); + + // Set target a bit higher to use up the remaining bid. + assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(60))); + run_to_block(10); + + // Two new gilts should have been issued to 1 & 2 for 40 each & duration of 2. + assert_eq!( + Active::::get(4).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 1, + expiry: 13, + } + ); + + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 200, + proportion: Perquintill::from_percent(50), + index: 5, + target: Perquintill::from_percent(60), + } + ); + }); +} diff --git a/frame/gilt/src/weights.rs b/frame/gilt/src/weights.rs new file mode 100644 index 0000000000000..f54d917cc160c --- /dev/null +++ b/frame/gilt/src/weights.rs @@ -0,0 +1,201 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_gilt +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_gilt +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/gilt/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_gilt. +pub trait WeightInfo { + fn place_bid(l: u32, ) -> Weight; + fn place_bid_max() -> Weight; + fn retract_bid(l: u32, ) -> Weight; + fn set_target() -> Weight; + fn thaw() -> Weight; + fn pursue_target_noop() -> Weight; + fn pursue_target_per_item(b: u32, ) -> Weight; + fn pursue_target_per_queue(q: u32, ) -> Weight; +} + +/// Weights for pallet_gilt using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + fn place_bid(l: u32, ) -> Weight { + (59_219_000 as Weight) + // Standard Error: 0 + .saturating_add((156_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + fn place_bid_max() -> Weight { + (184_943_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + fn retract_bid(l: u32, ) -> Weight { + (59_352_000 as Weight) + // Standard Error: 0 + .saturating_add((129_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Gilt ActiveTotal (r:1 w:1) + fn set_target() -> Weight { + (5_444_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Gilt Active (r:1 w:1) + // Storage: Gilt ActiveTotal (r:1 w:1) + fn thaw() -> Weight { + (71_399_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Gilt ActiveTotal (r:1 w:0) + fn pursue_target_noop() -> Weight { + (3_044_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + // Storage: Gilt ActiveTotal (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt Active (r:0 w:1) + fn pursue_target_per_item(b: u32, ) -> Weight { + (54_478_000 as Weight) + // Standard Error: 2_000 + .saturating_add((10_150_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) + } + // Storage: Gilt ActiveTotal (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt Active (r:0 w:1) + fn pursue_target_per_queue(q: u32, ) -> Weight { + (20_099_000 as Weight) + // Standard Error: 7_000 + .saturating_add((16_603_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + fn place_bid(l: u32, ) -> Weight { + (59_219_000 as Weight) + // Standard Error: 0 + .saturating_add((156_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + fn place_bid_max() -> Weight { + (184_943_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + fn retract_bid(l: u32, ) -> Weight { + (59_352_000 as Weight) + // Standard Error: 0 + .saturating_add((129_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Gilt ActiveTotal (r:1 w:1) + fn set_target() -> Weight { + (5_444_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Gilt Active (r:1 w:1) + // Storage: Gilt ActiveTotal (r:1 w:1) + fn thaw() -> Weight { + (71_399_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Gilt ActiveTotal (r:1 w:0) + fn pursue_target_noop() -> Weight { + (3_044_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + // Storage: Gilt ActiveTotal (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt Active (r:0 w:1) + fn pursue_target_per_item(b: u32, ) -> Weight { + (54_478_000 as Weight) + // Standard Error: 2_000 + .saturating_add((10_150_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) + } + // Storage: Gilt ActiveTotal (r:1 w:1) + // Storage: Gilt QueueTotals (r:1 w:1) + // Storage: Gilt Queues (r:1 w:1) + // Storage: Gilt Active (r:0 w:1) + fn pursue_target_per_queue(q: u32, ) -> Weight { + (20_099_000 as Weight) + // Standard Error: 7_000 + .saturating_add((16_603_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) + } +} diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 6cde1061df87c..53ab443783e5d 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-grandpa" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,37 +13,39 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0", default-features = false, path = "../../primitives/finality-grandpa" } -sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../primitives/application-crypto" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/finality-grandpa" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../authorship" } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -frame-benchmarking = { version = "2.0.0", path = "../benchmarking" } -grandpa = { package = "finality-grandpa", version = "0.12.3", features = ["derive-codec"] } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-offences = { version = "2.0.0", path = "../offences" } -pallet-staking = { version = "2.0.0", path = "../staking" } -pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0", path = "../timestamp" } +frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } +grandpa = { package = "finality-grandpa", version = "0.14.1", features = ["derive-codec"] } +sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-offences = { version = "4.0.0-dev", path = "../offences" } +pallet-staking = { version = "4.0.0-dev", path = "../staking" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } +pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } +frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "frame-benchmarking/std", "sp-application-crypto/std", "sp-core/std", @@ -56,5 +58,7 @@ std = [ "frame-system/std", "pallet-authorship/std", "pallet-session/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/grandpa/src/benchmarking.rs b/frame/grandpa/src/benchmarking.rs index 048f99fff7a9b..b0f70adb6061d 100644 --- a/frame/grandpa/src/benchmarking.rs +++ b/frame/grandpa/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,14 +19,12 @@ #![cfg_attr(not(feature = "std"), no_std)] -use super::{*, Module as Grandpa}; +use super::{Pallet as Grandpa, *}; use frame_benchmarking::benchmarks; use frame_system::RawOrigin; use sp_core::H256; benchmarks! { - _ { } - check_equivocation_proof { let x in 0 .. 1; @@ -65,8 +63,8 @@ benchmarks! { } note_stalled { - let delay = 1000.into(); - let best_finalized_block_number = 1.into(); + let delay = 1000u32.into(); + let best_finalized_block_number = 1u32.into(); }: _(RawOrigin::Root, delay, best_finalized_block_number) verify { @@ -78,15 +76,12 @@ benchmarks! { mod tests { use super::*; use crate::mock::*; - use frame_support::assert_ok; - #[test] - fn test_benchmarks() { - new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - assert_ok!(test_benchmark_check_equivocation_proof::()); - assert_ok!(test_benchmark_note_stalled::()); - }) - } + frame_benchmarking::impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), + crate::mock::Test, + ); #[test] fn test_generate_equivocation_report_blob() { @@ -108,10 +103,7 @@ mod tests { ); println!("equivocation_proof: {:?}", equivocation_proof); - println!( - "equivocation_proof.encode(): {:?}", - equivocation_proof.encode() - ); + println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); }); } } diff --git a/frame/grandpa/src/default_weights.rs b/frame/grandpa/src/default_weights.rs index 4893fc2cf1860..edc18a7ff8c93 100644 --- a/frame/grandpa/src/default_weights.rs +++ b/frame/grandpa/src/default_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,8 @@ //! This file was not auto-generated. use frame_support::weights::{ - Weight, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS, RocksDbWeight as DbWeight}, + constants::{RocksDbWeight as DbWeight, WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, + Weight, }; impl crate::WeightInfo for () { @@ -48,7 +49,6 @@ impl crate::WeightInfo for () { } fn note_stalled() -> Weight { - (3 * WEIGHT_PER_MICROS) - .saturating_add(DbWeight::get().writes(1)) + (3 * WEIGHT_PER_MICROS).saturating_add(DbWeight::get().writes(1)) } } diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index e9662a726c40e..8a23ce6e1ef1e 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! //! An opt-in utility module for reporting equivocations. //! //! This module defines an offence type for GRANDPA equivocations @@ -35,12 +34,11 @@ //! When using this module for enabling equivocation reporting it is required //! that the `ValidateUnsigned` for the GRANDPA pallet is used in the runtime //! definition. -//! use sp_std::prelude::*; use codec::{self as codec, Decode, Encode}; -use frame_support::{debug, traits::KeyOwnerProofSystem}; +use frame_support::traits::{Get, KeyOwnerProofSystem}; use sp_finality_grandpa::{EquivocationProof, RoundNumber, SetId}; use sp_runtime::{ transaction_validity::{ @@ -54,16 +52,20 @@ use sp_staking::{ SessionIndex, }; -use super::{Call, Module, Trait}; +use super::{Call, Config, Pallet}; /// A trait with utility methods for handling equivocation reports in GRANDPA. /// The offence type is generic, and the trait provides , reporting an offence /// triggered by a valid equivocation report, and also for creating and /// submitting equivocation report extrinsics (useful only in offchain context). -pub trait HandleEquivocation { +pub trait HandleEquivocation { /// The offence type used for reporting offences on valid equivocation reports. type Offence: GrandpaOffence; + /// The longevity, in blocks, that the equivocation report is valid for. When using the staking + /// pallet this should be equal to the bonding duration (in blocks, not eras). + type ReportLongevity: Get; + /// Report an offence proved by the given reporters. fn report_offence( reporters: Vec, @@ -86,8 +88,9 @@ pub trait HandleEquivocation { fn block_author() -> Option; } -impl HandleEquivocation for () { +impl HandleEquivocation for () { type Offence = GrandpaEquivocationOffence; + type ReportLongevity = (); fn report_offence( _reporters: Vec, @@ -119,31 +122,33 @@ impl HandleEquivocation for () { /// using existing subsystems that are part of frame (type bounds described /// below) and will dispatch to them directly, it's only purpose is to wire all /// subsystems together. -pub struct EquivocationHandler> { - _phantom: sp_std::marker::PhantomData<(I, R, O)>, +pub struct EquivocationHandler> { + _phantom: sp_std::marker::PhantomData<(I, R, L, O)>, } -impl Default for EquivocationHandler { +impl Default for EquivocationHandler { fn default() -> Self { - Self { - _phantom: Default::default(), - } + Self { _phantom: Default::default() } } } -impl HandleEquivocation for EquivocationHandler +impl HandleEquivocation for EquivocationHandler where // We use the authorship pallet to fetch the current block author and use // `offchain::SendTransactionTypes` for unsigned extrinsic creation and // submission. - T: Trait + pallet_authorship::Trait + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, // A system for reporting offences after valid equivocation reports are // processed. R: ReportOffence, + // The longevity (in blocks) that the equivocation report is valid for. When using the staking + // pallet this should be the bonding duration. + L: Get, // The offence type that should be used when reporting. O: GrandpaOffence, { type Offence = O; + type ReportLongevity = L; fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { R::report_offence(reporters, offence) @@ -159,18 +164,28 @@ where ) -> DispatchResult { use frame_system::offchain::SubmitTransaction; - let call = Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof); + let call = Call::report_equivocation_unsigned { + equivocation_proof: Box::new(equivocation_proof), + key_owner_proof, + }; match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { - Ok(()) => debug::info!("Submitted GRANDPA equivocation report."), - Err(e) => debug::error!("Error submitting equivocation report: {:?}", e), + Ok(()) => log::info!( + target: "runtime::afg", + "Submitted GRANDPA equivocation report.", + ), + Err(e) => log::error!( + target: "runtime::afg", + "Error submitting equivocation report: {:?}", + e, + ), } Ok(()) } fn block_author() -> Option { - Some(>::author()) + Some(>::author()) } } @@ -184,26 +199,32 @@ pub struct GrandpaTimeSlot { pub round: RoundNumber, } -/// A `ValidateUnsigned` implementation that restricts calls to `report_equivocation_unsigned` -/// to local calls (i.e. extrinsics generated on this node) or that already in a block. This -/// guarantees that only block authors can include unsigned equivocation reports. -impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::report_equivocation_unsigned(equivocation_proof, _) = call { +/// Methods for the `ValidateUnsigned` implementation: +/// It restricts calls to `report_equivocation_unsigned` to local calls (i.e. extrinsics generated +/// on this node) or that already in a block. This guarantees that only block authors can include +/// unsigned equivocation reports. +impl Pallet { + pub fn validate_unsigned(source: TransactionSource, call: &Call) -> TransactionValidity { + if let Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } = call { // discard equivocation report not coming from the local node match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, _ => { - debug::warn!( - target: "afg", + log::warn!( + target: "runtime::afg", "rejecting unsigned report equivocation transaction because it is not local/in-block." ); - return InvalidTransaction::Call.into(); - } + return InvalidTransaction::Call.into() + }, } + // check report staleness + is_known_offence::(equivocation_proof, key_owner_proof)?; + + let longevity = + >::ReportLongevity::get(); + ValidTransaction::with_tag_prefix("GrandpaEquivocation") // We assign the maximum priority for any equivocation report. .priority(TransactionPriority::max_value()) @@ -213,6 +234,7 @@ impl frame_support::unsigned::ValidateUnsigned for Module { equivocation_proof.set_id(), equivocation_proof.round(), )) + .longevity(longevity) // We don't propagate this. This can never be included on a remote node. .propagate(false) .build() @@ -221,38 +243,41 @@ impl frame_support::unsigned::ValidateUnsigned for Module { } } - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { - // check the membership proof to extract the offender's id - let key = ( - sp_finality_grandpa::KEY_TYPE, - equivocation_proof.offender().clone(), - ); - - let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) - .ok_or(InvalidTransaction::BadProof)?; - - // check if the offence has already been reported, - // and if so then we can discard the report. - let time_slot = - >::Offence::new_time_slot( - equivocation_proof.set_id(), - equivocation_proof.round(), - ); - - let is_known_offence = T::HandleEquivocation::is_known_offence(&[offender], &time_slot); - - if is_known_offence { - Err(InvalidTransaction::Stale.into()) - } else { - Ok(()) - } + pub fn pre_dispatch(call: &Call) -> Result<(), TransactionValidityError> { + if let Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } = call { + is_known_offence::(equivocation_proof, key_owner_proof) } else { Err(InvalidTransaction::Call.into()) } } } +fn is_known_offence( + equivocation_proof: &EquivocationProof, + key_owner_proof: &T::KeyOwnerProof, +) -> Result<(), TransactionValidityError> { + // check the membership proof to extract the offender's id + let key = (sp_finality_grandpa::KEY_TYPE, equivocation_proof.offender().clone()); + + let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) + .ok_or(InvalidTransaction::BadProof)?; + + // check if the offence has already been reported, + // and if so then we can discard the report. + let time_slot = >::Offence::new_time_slot( + equivocation_proof.set_id(), + equivocation_proof.round(), + ); + + let is_known_offence = T::HandleEquivocation::is_known_offence(&[offender], &time_slot); + + if is_known_offence { + Err(InvalidTransaction::Stale.into()) + } else { + Ok(()) + } +} + /// A grandpa equivocation offence report. #[allow(dead_code)] pub struct GrandpaEquivocationOffence { @@ -330,7 +355,7 @@ impl Offence fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index d4612e1760057..cd75deea770b4 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,20 +40,18 @@ use fg_primitives::{ GRANDPA_ENGINE_ID, }; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, dispatch::DispatchResultWithPostInfo, - storage, traits::KeyOwnerProofSystem, weights::{Pays, Weight}, Parameter, -}; -use frame_system::{ensure_none, ensure_root, ensure_signed}; -use sp_runtime::{ - generic::DigestItem, - traits::Zero, - DispatchResult, KeyTypeId, + dispatch::DispatchResultWithPostInfo, + storage, + traits::{KeyOwnerProofSystem, OneSessionHandler, StorageVersion}, + weights::{Pays, Weight}, }; +use sp_runtime::{generic::DigestItem, traits::Zero, DispatchResult, KeyTypeId}; use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::SessionIndex; -mod equivocation; mod default_weights; +mod equivocation; +pub mod migrations; #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; @@ -67,202 +65,139 @@ pub use equivocation::{ HandleEquivocation, }; -pub trait Trait: frame_system::Trait { - /// The event type of this module. - type Event: From + Into<::Event>; - - /// The function call. - type Call: From>; - - /// The proof of key ownership, used for validating equivocation reports. - /// The proof must include the session index and validator count of the - /// session at which the equivocation occurred. - type KeyOwnerProof: Parameter + GetSessionNumber + GetValidatorCount; - - /// The identification of a key owner, used when reporting equivocations. - type KeyOwnerIdentification: Parameter; - - /// A system for proving ownership of keys, i.e. that a given key was part - /// of a validator set, needed for validating equivocation reports. - type KeyOwnerProofSystem: KeyOwnerProofSystem< - (KeyTypeId, AuthorityId), - Proof = Self::KeyOwnerProof, - IdentificationTuple = Self::KeyOwnerIdentification, - >; - - /// The equivocation handling subsystem, defines methods to report an - /// offence (after the equivocation has been validated) and for submitting a - /// transaction to report an equivocation (from an offchain context). - /// NOTE: when enabling equivocation handling (i.e. this type isn't set to - /// `()`) you must use this pallet's `ValidateUnsigned` in the runtime - /// definition. - type HandleEquivocation: HandleEquivocation; - - /// Weights for this pallet. - type WeightInfo: WeightInfo; -} - -pub trait WeightInfo { - fn report_equivocation(validator_count: u32) -> Weight; - fn note_stalled() -> Weight; -} - -/// A stored pending change, old format. -// TODO: remove shim -// https://github.com/paritytech/substrate/issues/1614 -#[derive(Encode, Decode)] -pub struct OldStoredPendingChange { - /// The block number this was scheduled at. - pub scheduled_at: N, - /// The delay in blocks until it will be applied. - pub delay: N, - /// The next authority set. - pub next_authorities: AuthorityList, -} - -/// A stored pending change. -#[derive(Encode)] -pub struct StoredPendingChange { - /// The block number this was scheduled at. - pub scheduled_at: N, - /// The delay in blocks until it will be applied. - pub delay: N, - /// The next authority set. - pub next_authorities: AuthorityList, - /// If defined it means the change was forced and the given block number - /// indicates the median last finalized block when the change was signaled. - pub forced: Option, -} - -impl Decode for StoredPendingChange { - fn decode(value: &mut I) -> core::result::Result { - let old = OldStoredPendingChange::decode(value)?; - let forced = >::decode(value).unwrap_or(None); - - Ok(StoredPendingChange { - scheduled_at: old.scheduled_at, - delay: old.delay, - next_authorities: old.next_authorities, - forced, - }) +pub use pallet::*; + +use scale_info::TypeInfo; + +/// The current storage version. +const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The event type of this module. + type Event: From + + Into<::Event> + + IsType<::Event>; + + /// The function call. + type Call: From>; + + /// The proof of key ownership, used for validating equivocation reports + /// The proof must include the session index and validator count of the + /// session at which the equivocation occurred. + type KeyOwnerProof: Parameter + GetSessionNumber + GetValidatorCount; + + /// The identification of a key owner, used when reporting equivocations. + type KeyOwnerIdentification: Parameter; + + /// A system for proving ownership of keys, i.e. that a given key was part + /// of a validator set, needed for validating equivocation reports. + type KeyOwnerProofSystem: KeyOwnerProofSystem< + (KeyTypeId, AuthorityId), + Proof = Self::KeyOwnerProof, + IdentificationTuple = Self::KeyOwnerIdentification, + >; + + /// The equivocation handling subsystem, defines methods to report an + /// offence (after the equivocation has been validated) and for submitting a + /// transaction to report an equivocation (from an offchain context). + /// NOTE: when enabling equivocation handling (i.e. this type isn't set to + /// `()`) you must use this pallet's `ValidateUnsigned` in the runtime + /// definition. + type HandleEquivocation: HandleEquivocation; + + /// Weights for this pallet. + type WeightInfo: WeightInfo; } -} -/// Current state of the GRANDPA authority set. State transitions must happen in -/// the same order of states defined below, e.g. `Paused` implies a prior -/// `PendingPause`. -#[derive(Decode, Encode)] -#[cfg_attr(test, derive(Debug, PartialEq))] -pub enum StoredState { - /// The current authority set is live, and GRANDPA is enabled. - Live, - /// There is a pending pause event which will be enacted at the given block - /// height. - PendingPause { - /// Block at which the intention to pause was scheduled. - scheduled_at: N, - /// Number of blocks after which the change will be enacted. - delay: N - }, - /// The current GRANDPA authority set is paused. - Paused, - /// There is a pending resume event which will be enacted at the given block - /// height. - PendingResume { - /// Block at which the intention to resume was scheduled. - scheduled_at: N, - /// Number of blocks after which the change will be enacted. - delay: N, - }, -} - -decl_event! { - pub enum Event { - /// New authority set has been applied. \[authority_set\] - NewAuthorities(AuthorityList), - /// Current authority set has been paused. - Paused, - /// Current authority set has been resumed. - Resumed, - } -} - -decl_error! { - pub enum Error for Module { - /// Attempt to signal GRANDPA pause when the authority set isn't live - /// (either paused or already pending pause). - PauseFailed, - /// Attempt to signal GRANDPA resume when the authority set isn't paused - /// (either live or already pending resume). - ResumeFailed, - /// Attempt to signal GRANDPA change with one already pending. - ChangePending, - /// Cannot signal forced change so soon after last. - TooSoon, - /// A key ownership proof provided as part of an equivocation report is invalid. - InvalidKeyOwnershipProof, - /// An equivocation proof provided as part of an equivocation report is invalid. - InvalidEquivocationProof, - /// A given equivocation report is valid but already previously reported. - DuplicateOffenceReport, - } -} - -decl_storage! { - trait Store for Module as GrandpaFinality { - /// State of the current authority set. - State get(fn state): StoredState = StoredState::Live; - - /// Pending change: (signaled at, scheduled change). - PendingChange get(fn pending_change): Option>; + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_finalize(block_number: T::BlockNumber) { + // check for scheduled pending authority set changes + if let Some(pending_change) = >::get() { + // emit signal if we're at the block that scheduled the change + if block_number == pending_change.scheduled_at { + if let Some(median) = pending_change.forced { + Self::deposit_log(ConsensusLog::ForcedChange( + median, + ScheduledChange { + delay: pending_change.delay, + next_authorities: pending_change.next_authorities.clone(), + }, + )) + } else { + Self::deposit_log(ConsensusLog::ScheduledChange(ScheduledChange { + delay: pending_change.delay, + next_authorities: pending_change.next_authorities.clone(), + })); + } + } - /// next block number where we can force a change. - NextForced get(fn next_forced): Option; + // enact the change if we've reached the enacting block + if block_number == pending_change.scheduled_at + pending_change.delay { + Self::set_grandpa_authorities(&pending_change.next_authorities); + Self::deposit_event(Event::NewAuthorities(pending_change.next_authorities)); + >::kill(); + } + } - /// `true` if we are currently stalled. - Stalled get(fn stalled): Option<(T::BlockNumber, T::BlockNumber)>; + // check for scheduled pending state changes + match >::get() { + StoredState::PendingPause { scheduled_at, delay } => { + // signal change to pause + if block_number == scheduled_at { + Self::deposit_log(ConsensusLog::Pause(delay)); + } - /// The number of changes (both in terms of keys and underlying economic responsibilities) - /// in the "set" of Grandpa validators from genesis. - CurrentSetId get(fn current_set_id) build(|_| fg_primitives::SetId::default()): SetId; + // enact change to paused state + if block_number == scheduled_at + delay { + >::put(StoredState::Paused); + Self::deposit_event(Event::Paused); + } + }, + StoredState::PendingResume { scheduled_at, delay } => { + // signal change to resume + if block_number == scheduled_at { + Self::deposit_log(ConsensusLog::Resume(delay)); + } - /// A mapping from grandpa set ID to the index of the *most recent* session for which its - /// members were responsible. - /// - /// TWOX-NOTE: `SetId` is not under user control. - SetIdSession get(fn session_for_set): map hasher(twox_64_concat) SetId => Option; - } - add_extra_genesis { - config(authorities): AuthorityList; - build(|config| { - Module::::initialize(&config.authorities) - }) + // enact change to live state + if block_number == scheduled_at + delay { + >::put(StoredState::Live); + Self::deposit_event(Event::Resumed); + } + }, + _ => {}, + } + } } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; + #[pallet::call] + impl Pallet { /// Report voter equivocation/misbehavior. This method will verify the /// equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence /// will be reported. - #[weight = T::WeightInfo::report_equivocation(key_owner_proof.validator_count())] - fn report_equivocation( - origin, - equivocation_proof: EquivocationProof, + #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] + pub fn report_equivocation( + origin: OriginFor, + equivocation_proof: Box>, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { let reporter = ensure_signed(origin)?; - Self::do_report_equivocation( - Some(reporter), - equivocation_proof, - key_owner_proof, - ) + Self::do_report_equivocation(Some(reporter), *equivocation_proof, key_owner_proof) } /// Report voter equivocation/misbehavior. This method will verify the @@ -274,17 +209,17 @@ decl_module! { /// block authors will call it (validated in `ValidateUnsigned`), as such /// if the block author is defined it will be defined as the equivocation /// reporter. - #[weight = T::WeightInfo::report_equivocation(key_owner_proof.validator_count())] - fn report_equivocation_unsigned( - origin, - equivocation_proof: EquivocationProof, + #[pallet::weight(T::WeightInfo::report_equivocation(key_owner_proof.validator_count()))] + pub fn report_equivocation_unsigned( + origin: OriginFor, + equivocation_proof: Box>, key_owner_proof: T::KeyOwnerProof, ) -> DispatchResultWithPostInfo { ensure_none(origin)?; Self::do_report_equivocation( T::HandleEquivocation::block_author(), - equivocation_proof, + *equivocation_proof, key_owner_proof, ) } @@ -296,83 +231,173 @@ decl_module! { /// forced change will not be re-orged (e.g. 1000 blocks). The GRANDPA voters /// will start the new authority set using the given finalized block as base. /// Only callable by root. - #[weight = T::WeightInfo::note_stalled()] - fn note_stalled( - origin, + #[pallet::weight(T::WeightInfo::note_stalled())] + pub fn note_stalled( + origin: OriginFor, delay: T::BlockNumber, best_finalized_block_number: T::BlockNumber, - ) { + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; - Self::on_stalled(delay, best_finalized_block_number) + Ok(Self::on_stalled(delay, best_finalized_block_number).into()) } + } - fn on_finalize(block_number: T::BlockNumber) { - // check for scheduled pending authority set changes - if let Some(pending_change) = >::get() { - // emit signal if we're at the block that scheduled the change - if block_number == pending_change.scheduled_at { - if let Some(median) = pending_change.forced { - Self::deposit_log(ConsensusLog::ForcedChange( - median, - ScheduledChange { - delay: pending_change.delay, - next_authorities: pending_change.next_authorities.clone(), - } - )) - } else { - Self::deposit_log(ConsensusLog::ScheduledChange( - ScheduledChange { - delay: pending_change.delay, - next_authorities: pending_change.next_authorities.clone(), - } - )); - } - } + #[pallet::event] + #[pallet::generate_deposit(fn deposit_event)] + pub enum Event { + /// New authority set has been applied. \[authority_set\] + NewAuthorities(AuthorityList), + /// Current authority set has been paused. + Paused, + /// Current authority set has been resumed. + Resumed, + } - // enact the change if we've reached the enacting block - if block_number == pending_change.scheduled_at + pending_change.delay { - Self::set_grandpa_authorities(&pending_change.next_authorities); - Self::deposit_event( - Event::NewAuthorities(pending_change.next_authorities) - ); - >::kill(); - } - } + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; - // check for scheduled pending state changes - match >::get() { - StoredState::PendingPause { scheduled_at, delay } => { - // signal change to pause - if block_number == scheduled_at { - Self::deposit_log(ConsensusLog::Pause(delay)); - } + #[pallet::error] + pub enum Error { + /// Attempt to signal GRANDPA pause when the authority set isn't live + /// (either paused or already pending pause). + PauseFailed, + /// Attempt to signal GRANDPA resume when the authority set isn't paused + /// (either live or already pending resume). + ResumeFailed, + /// Attempt to signal GRANDPA change with one already pending. + ChangePending, + /// Cannot signal forced change so soon after last. + TooSoon, + /// A key ownership proof provided as part of an equivocation report is invalid. + InvalidKeyOwnershipProof, + /// An equivocation proof provided as part of an equivocation report is invalid. + InvalidEquivocationProof, + /// A given equivocation report is valid but already previously reported. + DuplicateOffenceReport, + } - // enact change to paused state - if block_number == scheduled_at + delay { - >::put(StoredState::Paused); - Self::deposit_event(Event::Paused); - } - }, - StoredState::PendingResume { scheduled_at, delay } => { - // signal change to resume - if block_number == scheduled_at { - Self::deposit_log(ConsensusLog::Resume(delay)); - } + #[pallet::type_value] + pub(super) fn DefaultForState() -> StoredState { + StoredState::Live + } - // enact change to live state - if block_number == scheduled_at + delay { - >::put(StoredState::Live); - Self::deposit_event(Event::Resumed); - } - }, - _ => {}, - } + /// State of the current authority set. + #[pallet::storage] + #[pallet::getter(fn state)] + pub(super) type State = + StorageValue<_, StoredState, ValueQuery, DefaultForState>; + + /// Pending change: (signaled at, scheduled change). + #[pallet::storage] + #[pallet::getter(fn pending_change)] + pub(super) type PendingChange = StorageValue<_, StoredPendingChange>; + + /// next block number where we can force a change. + #[pallet::storage] + #[pallet::getter(fn next_forced)] + pub(super) type NextForced = StorageValue<_, T::BlockNumber>; + + /// `true` if we are currently stalled. + #[pallet::storage] + #[pallet::getter(fn stalled)] + pub(super) type Stalled = StorageValue<_, (T::BlockNumber, T::BlockNumber)>; + + /// The number of changes (both in terms of keys and underlying economic responsibilities) + /// in the "set" of Grandpa validators from genesis. + #[pallet::storage] + #[pallet::getter(fn current_set_id)] + pub(super) type CurrentSetId = StorageValue<_, SetId, ValueQuery>; + + /// A mapping from grandpa set ID to the index of the *most recent* session for which its + /// members were responsible. + /// + /// TWOX-NOTE: `SetId` is not under user control. + #[pallet::storage] + #[pallet::getter(fn session_for_set)] + pub(super) type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub authorities: AuthorityList, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { authorities: Default::default() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + CurrentSetId::::put(fg_primitives::SetId::default()); + Pallet::::initialize(&self.authorities) } } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + Self::validate_unsigned(source, call) + } + + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + Self::pre_dispatch(call) + } + } +} + +pub trait WeightInfo { + fn report_equivocation(validator_count: u32) -> Weight; + fn note_stalled() -> Weight; +} + +/// A stored pending change. +#[derive(Encode, Decode, TypeInfo)] +pub struct StoredPendingChange { + /// The block number this was scheduled at. + pub scheduled_at: N, + /// The delay in blocks until it will be applied. + pub delay: N, + /// The next authority set. + pub next_authorities: AuthorityList, + /// If defined it means the change was forced and the given block number + /// indicates the median last finalized block when the change was signaled. + pub forced: Option, +} + +/// Current state of the GRANDPA authority set. State transitions must happen in +/// the same order of states defined below, e.g. `Paused` implies a prior +/// `PendingPause`. +#[derive(Decode, Encode, TypeInfo)] +#[cfg_attr(test, derive(Debug, PartialEq))] +pub enum StoredState { + /// The current authority set is live, and GRANDPA is enabled. + Live, + /// There is a pending pause event which will be enacted at the given block + /// height. + PendingPause { + /// Block at which the intention to pause was scheduled. + scheduled_at: N, + /// Number of blocks after which the change will be enacted. + delay: N, + }, + /// The current GRANDPA authority set is paused. + Paused, + /// There is a pending resume event which will be enacted at the given block + /// height. + PendingResume { + /// Block at which the intention to resume was scheduled. + scheduled_at: N, + /// Number of blocks after which the change will be enacted. + delay: N, + }, } -impl Module { +impl Pallet { /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { storage::unhashed::get_or_default::(GRANDPA_AUTHORITIES_KEY).into() @@ -380,21 +405,15 @@ impl Module { /// Set the current set of authorities, along with their respective weights. fn set_grandpa_authorities(authorities: &AuthorityList) { - storage::unhashed::put( - GRANDPA_AUTHORITIES_KEY, - &VersionedAuthorityList::from(authorities), - ); + storage::unhashed::put(GRANDPA_AUTHORITIES_KEY, &VersionedAuthorityList::from(authorities)); } /// Schedule GRANDPA to pause starting in the given number of blocks. /// Cannot be done when already paused. pub fn schedule_pause(in_blocks: T::BlockNumber) -> DispatchResult { if let StoredState::Live = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingPause { - delay: in_blocks, - scheduled_at, - }); + let scheduled_at = >::block_number(); + >::put(StoredState::PendingPause { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -405,11 +424,8 @@ impl Module { /// Schedule a resume of GRANDPA after pausing. pub fn schedule_resume(in_blocks: T::BlockNumber) -> DispatchResult { if let StoredState::Paused = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingResume { - delay: in_blocks, - scheduled_at, - }); + let scheduled_at = >::block_number(); + >::put(StoredState::PendingResume { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -437,7 +453,7 @@ impl Module { forced: Option, ) -> DispatchResult { if !>::exists() { - let scheduled_at = >::block_number(); + let scheduled_at = >::block_number(); if let Some(_) = forced { if Self::next_forced().map_or(false, |next| next > scheduled_at) { @@ -446,7 +462,7 @@ impl Module { // only allow the next forced change when twice the window has passed since // this one. - >::put(scheduled_at + in_blocks * 2.into()); + >::put(scheduled_at + in_blocks * 2u32.into()); } >::put(StoredPendingChange { @@ -465,24 +481,21 @@ impl Module { /// Deposit one of this module's logs. fn deposit_log(log: ConsensusLog) { let log: DigestItem = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); - >::deposit_log(log.into()); + >::deposit_log(log.into()); } // Perform module initialization, abstracted so that it can be called either through genesis // config builder or through `on_genesis_session`. fn initialize(authorities: &AuthorityList) { if !authorities.is_empty() { - assert!( - Self::grandpa_authorities().is_empty(), - "Authorities are already initialized!" - ); + assert!(Self::grandpa_authorities().is_empty(), "Authorities are already initialized!"); Self::set_grandpa_authorities(authorities); } // NOTE: initialize first session of first set. this is necessary for // the genesis set and session since we only update the set -> session // mapping whenever a new session starts, i.e. through `on_new_session`. - SetIdSession::insert(0, 0); + SetIdSession::::insert(0, 0); } fn do_report_equivocation( @@ -500,16 +513,16 @@ impl Module { let validator_count = key_owner_proof.validator_count(); // validate the key ownership proof extracting the id of the offender. - let offender = - T::KeyOwnerProofSystem::check_proof( - (fg_primitives::KEY_TYPE, equivocation_proof.offender().clone()), - key_owner_proof, - ).ok_or(Error::::InvalidKeyOwnershipProof)?; + let offender = T::KeyOwnerProofSystem::check_proof( + (fg_primitives::KEY_TYPE, equivocation_proof.offender().clone()), + key_owner_proof, + ) + .ok_or(Error::::InvalidKeyOwnershipProof)?; // validate equivocation proof (check votes are different and // signatures are valid). if !sp_finality_grandpa::check_equivocation_proof(equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()) } // fetch the current and previous sets last session index. on the @@ -517,31 +530,23 @@ impl Module { let previous_set_id_session_index = if set_id == 0 { None } else { - let session_index = - if let Some(session_id) = Self::session_for_set(set_id - 1) { - session_id - } else { - return Err(Error::::InvalidEquivocationProof.into()); - }; + let session_index = Self::session_for_set(set_id - 1) + .ok_or_else(|| Error::::InvalidEquivocationProof)?; Some(session_index) }; let set_id_session_index = - if let Some(session_id) = Self::session_for_set(set_id) { - session_id - } else { - return Err(Error::::InvalidEquivocationProof.into()); - }; + Self::session_for_set(set_id).ok_or_else(|| Error::::InvalidEquivocationProof)?; // check that the session id for the membership proof is within the // bounds of the set id reported in the equivocation. if session_index > set_id_session_index || previous_set_id_session_index - .map(|previous_index| session_index <= previous_index) - .unwrap_or(false) + .map(|previous_index| session_index <= previous_index) + .unwrap_or(false) { - return Err(Error::::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()) } // report to the offences module rewarding the sender. @@ -554,7 +559,8 @@ impl Module { set_id, round, ), - ).map_err(|_| Error::::DuplicateOffenceReport)?; + ) + .map_err(|_| Error::::DuplicateOffenceReport)?; // waive the fee since the report is valid and beneficial Ok(Pays::No.into()) @@ -583,24 +589,27 @@ impl Module { } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { type Public = AuthorityId; } -impl pallet_session::OneSessionHandler for Module - where T: pallet_session::Trait +impl OneSessionHandler for Pallet +where + T: pallet_session::Config, { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator + where + I: Iterator, { let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); Self::initialize(&authorities); } fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) - where I: Iterator + where + I: Iterator, { // Always issue a change if `session` says that the validators have changed. // Even if their session keys are the same as before, the underlying economic @@ -615,7 +624,7 @@ impl pallet_session::OneSessionHandler for Module }; if res.is_ok() { - CurrentSetId::mutate(|s| { + CurrentSetId::::mutate(|s| { *s += 1; *s }) @@ -633,8 +642,8 @@ impl pallet_session::OneSessionHandler for Module // if we didn't issue a change, we update the mapping to note that the current // set corresponds to the latest equivalent session (i.e. now). - let session_index = >::current_index(); - SetIdSession::insert(current_set_id, &session_index); + let session_index = >::current_index(); + SetIdSession::::insert(current_set_id, &session_index); } fn on_disabled(i: usize) { diff --git a/frame/grandpa/src/migrations.rs b/frame/grandpa/src/migrations.rs new file mode 100644 index 0000000000000..05c24e11b3939 --- /dev/null +++ b/frame/grandpa/src/migrations.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Version 4. +pub mod v4; diff --git a/frame/grandpa/src/migrations/v4.rs b/frame/grandpa/src/migrations/v4.rs new file mode 100644 index 0000000000000..094f276efef31 --- /dev/null +++ b/frame/grandpa/src/migrations/v4.rs @@ -0,0 +1,110 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + traits::{Get, StorageVersion}, + weights::Weight, +}; +use sp_io::hashing::twox_128; + +/// The old prefix. +pub const OLD_PREFIX: &[u8] = b"GrandpaFinality"; + +/// Migrate the entire storage of this pallet to a new prefix. +/// +/// This new prefix must be the same as the one set in construct_runtime. For safety, use +/// `PalletInfo` to get it, as: +/// `::PalletInfo::name::`. +/// +/// The old storage prefix, `GrandpaFinality` is hardcoded in the migration code. +pub fn migrate>(new_pallet_name: N) -> Weight { + if new_pallet_name.as_ref().as_bytes() == OLD_PREFIX { + log::info!( + target: "runtime::afg", + "New pallet name is equal to the old prefix. No migration needs to be done.", + ); + return 0 + } + let storage_version = StorageVersion::get::>(); + log::info!( + target: "runtime::afg", + "Running migration to v3.1 for grandpa with storage version {:?}", + storage_version, + ); + + if storage_version <= 3 { + log::info!("new prefix: {}", new_pallet_name.as_ref()); + frame_support::storage::migration::move_pallet( + OLD_PREFIX, + new_pallet_name.as_ref().as_bytes(), + ); + + StorageVersion::new(4).put::>(); + + ::BlockWeights::get().max_block + } else { + 0 + } +} + +/// Some checks prior to migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn pre_migration>(new: N) { + let new = new.as_ref(); + log::info!("pre-migration grandpa test with new = {}", new); + + // the next key must exist, and start with the hash of `OLD_PREFIX`. + let next_key = sp_io::storage::next_key(&twox_128(OLD_PREFIX)).unwrap(); + assert!(next_key.starts_with(&twox_128(OLD_PREFIX))); + + // The pallet version is already stored using the pallet name + let storage_key = StorageVersion::storage_key::>(); + + // ensure nothing is stored in the new prefix. + assert!( + sp_io::storage::next_key(&twox_128(new.as_bytes())).map_or( + // either nothing is there + true, + // or we ensure that it has no common prefix with twox_128(new), + // or isn't the pallet version that is already stored using the pallet name + |next_key| { + !next_key.starts_with(&twox_128(new.as_bytes())) || next_key == storage_key + }, + ), + "unexpected next_key({}) = {:?}", + new, + sp_core::hexdisplay::HexDisplay::from( + &sp_io::storage::next_key(&twox_128(new.as_bytes())).unwrap() + ), + ); + // ensure storage version is 3. + assert_eq!(StorageVersion::get::>(), 3); +} + +/// Some checks for after migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn post_migration() { + log::info!("post-migration grandpa"); + + // Assert that nothing remains at the old prefix + assert!(sp_io::storage::next_key(&twox_128(OLD_PREFIX)) + .map_or(true, |next_key| !next_key.starts_with(&twox_128(OLD_PREFIX)))); +} diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index d3461eec12dc4..26dda514516a3 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,18 +19,18 @@ #![cfg(test)] -use crate::{AuthorityId, AuthorityList, ConsensusLog, Module, Trait}; +use crate::{self as pallet_grandpa, AuthorityId, AuthorityList, Config, ConsensusLog}; use ::grandpa as finality_grandpa; use codec::Encode; +use frame_election_provider_support::onchain; use frame_support::{ - impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, - traits::{KeyOwnerProofSystem, OnFinalize, OnInitialize}, - weights::Weight, + parameter_types, + traits::{GenesisBuild, KeyOwnerProofSystem, OnFinalize, OnInitialize}, }; +use pallet_session::historical as pallet_session_historical; use pallet_staking::EraIndex; use sp_core::{crypto::KeyTypeId, H256}; use sp_finality_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; -use sp_io; use sp_keyring::Ed25519Keyring; use sp_runtime::{ curve::PiecewiseLinear, @@ -41,46 +41,44 @@ use sp_runtime::{ }; use sp_staking::SessionIndex; -impl_outer_origin! { - pub enum Origin for Test {} -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_grandpa::Grandpa, - pallet_staking::Staking, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned}, + Offences: pallet_offences::{Pallet, Storage, Event}, + Historical: pallet_session_historical::{Pallet}, } -} +); impl_opaque_keys! { pub struct TestSessionKeys { - pub grandpa_authority: super::Module, - } -} - -impl_outer_event! { - pub enum TestEvent for Test { - frame_system, - pallet_balances, - pallet_grandpa, - pallet_offences, - pallet_session, - pallet_staking, + pub grandpa_authority: super::Pallet, } } -#[derive(Clone, Eq, PartialEq)] -pub struct Test; - parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -90,21 +88,16 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } impl frame_system::offchain::SendTransactionTypes for Test @@ -122,8 +115,8 @@ parameter_types! { } /// Custom `SessionHandler` since we use `TestSessionKeys` as `Keys`. -impl pallet_session::Trait for Test { - type Event = TestEvent; +impl pallet_session::Config for Test { + type Event = Event; type ValidatorId = u64; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = pallet_session::PeriodicSessions; @@ -135,7 +128,7 @@ impl pallet_session::Trait for Test { type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -144,7 +137,7 @@ parameter_types! { pub const UncleGenerations: u64 = 0; } -impl pallet_authorship::Trait for Test { +impl pallet_authorship::Config for Test { type FindAuthor = (); type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -155,11 +148,13 @@ parameter_types! { pub const ExistentialDeposit: u128 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); - type Event = TestEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -169,7 +164,7 @@ parameter_types! { pub const MinimumPeriod: u64 = 3; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -195,13 +190,19 @@ parameter_types! { pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 64; pub const ElectionLookahead: u64 = 0; - pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; + pub const StakingUnsignedPriority: u64 = u64::MAX / 2; } -impl pallet_staking::Trait for Test { +impl onchain::Config for Test { + type Accuracy = Perbill; + type DataProvider = Staking; +} + +impl pallet_staking::Config for Test { + const MAX_NOMINATIONS: u32 = 16; type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type Event = TestEvent; + type Event = Event; type Currency = Balances; type Slash = (); type Reward = (); @@ -210,32 +211,28 @@ impl pallet_staking::Trait for Test { type SlashDeferDuration = SlashDeferDuration; type SlashCancelOrigin = frame_system::EnsureRoot; type SessionInterface = Self; - type UnixTime = pallet_timestamp::Module; - type RewardCurve = RewardCurve; + type UnixTime = pallet_timestamp::Pallet; + type EraPayout = pallet_staking::ConvertCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type UnsignedPriority = StakingUnsignedPriority; - type MaxIterations = (); - type MinSolutionScoreBump = (); - type OffchainSolutionWeightLimit = (); + type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } -parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); -} - -impl pallet_offences::Trait for Test { - type Event = TestEvent; +impl pallet_offences::Config for Test { + type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; - type WeightSoftLimit = OffencesWeightSoftLimit; } -impl Trait for Test { - type Event = TestEvent; +parameter_types! { + pub const ReportLongevity: u64 = + BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * Period::get(); +} + +impl Config for Test { + type Event = Event; type Call = Call; type KeyOwnerProofSystem = Historical; @@ -248,24 +245,12 @@ impl Trait for Test { AuthorityId, )>>::IdentificationTuple; - type HandleEquivocation = super::EquivocationHandler; + type HandleEquivocation = + super::EquivocationHandler; type WeightInfo = (); } -mod pallet_grandpa { - pub use crate::Event; -} - -pub type Balances = pallet_balances::Module; -pub type Historical = pallet_session::historical::Module; -pub type Offences = pallet_offences::Module; -pub type Session = pallet_session::Module; -pub type Staking = pallet_staking::Module; -pub type System = frame_system::Module; -pub type Timestamp = pallet_timestamp::Module; -pub type Grandpa = Module; - pub fn grandpa_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()) } @@ -287,8 +272,12 @@ pub fn new_test_ext(vec: Vec<(u64, u64)>) -> sp_io::TestExternalities { } pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) .unwrap(); // stashes are the index. @@ -299,38 +288,23 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx ( i as u64, i as u64, - TestSessionKeys { - grandpa_authority: AuthorityId::from(k.clone()), - }, - ) - }) - .collect(); - - // controllers are the index + 1000 - let stakers: Vec<_> = (0..authorities.len()) - .map(|i| { - ( - i as u64, - i as u64 + 1000, - 10_000, - pallet_staking::StakerStatus::::Validator, + TestSessionKeys { grandpa_authority: AuthorityId::from(k.clone()) }, ) }) .collect(); - let balances: Vec<_> = (0..authorities.len()) - .map(|i| (i as u64, 10_000_000)) - .collect(); - // NOTE: this will initialize the grandpa authorities // through OneSessionHandler::on_genesis_session pallet_session::GenesisConfig:: { keys: session_keys } .assimilate_storage(&mut t) .unwrap(); - pallet_balances::GenesisConfig:: { balances } - .assimilate_storage(&mut t) - .unwrap(); + // controllers are the index + 1000 + let stakers: Vec<_> = (0..authorities.len()) + .map(|i| { + (i as u64, i as u64 + 1000, 10_000, pallet_staking::StakerStatus::::Validator) + }) + .collect(); let staking_config = pallet_staking::GenesisConfig:: { stakers, @@ -360,13 +334,7 @@ pub fn start_session(session_index: SessionIndex) { System::parent_hash() }; - System::initialize( - &(i as u64 + 1), - &parent_hash, - &Default::default(), - &Default::default(), - Default::default(), - ); + System::initialize(&(i as u64 + 1), &parent_hash, &Default::default(), Default::default()); System::set_block_number((i + 1).into()); Timestamp::set_timestamp(System::block_number() * 6000); @@ -385,13 +353,7 @@ pub fn start_era(era_index: EraIndex) { } pub fn initialize_block(number: u64, parent_hash: H256) { - System::initialize( - &number, - &parent_hash, - &Default::default(), - &Default::default(), - Default::default(), - ); + System::initialize(&number, &parent_hash, &Default::default(), Default::default()); } pub fn generate_equivocation_proof( @@ -400,10 +362,7 @@ pub fn generate_equivocation_proof( vote2: (RoundNumber, H256, u64, &Ed25519Keyring), ) -> sp_finality_grandpa::EquivocationProof { let signed_prevote = |round, hash, number, keyring: &Ed25519Keyring| { - let prevote = finality_grandpa::Prevote { - target_hash: hash, - target_number: number, - }; + let prevote = finality_grandpa::Prevote { target_hash: hash, target_number: number }; let prevote_msg = finality_grandpa::Message::Prevote(prevote.clone()); let payload = sp_finality_grandpa::localized_payload(round, set_id, &prevote_msg); diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 4916808fe000f..98f54f966fadc 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,17 +19,16 @@ #![cfg(test)] -use super::{Call, *}; +use super::{Call, Event, *}; use crate::mock::*; -use codec::{Decode, Encode}; +use codec::Encode; use fg_primitives::ScheduledChange; use frame_support::{ - assert_err, assert_ok, - traits::{Currency, OnFinalize}, + assert_err, assert_noop, assert_ok, + traits::{Currency, OnFinalize, OneSessionHandler}, weights::{GetDispatchInfo, Pays}, }; use frame_system::{EventRecord, Phase}; -use pallet_session::OneSessionHandler; use sp_core::H256; use sp_keyring::Ed25519Keyring; use sp_runtime::testing::Digest; @@ -44,21 +43,24 @@ fn authorities_change_logged() { Grandpa::on_finalize(1); let header = System::finalize(); - assert_eq!(header.digest, Digest { - logs: vec![ - grandpa_log(ConsensusLog::ScheduledChange( - ScheduledChange { delay: 0, next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) } - )), - ], - }); - - assert_eq!(System::events(), vec![ - EventRecord { + assert_eq!( + header.digest, + Digest { + logs: vec![grandpa_log(ConsensusLog::ScheduledChange(ScheduledChange { + delay: 0, + next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + })),], + } + ); + + assert_eq!( + System::events(), + vec![EventRecord { phase: Phase::Finalization, event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), topics: vec![], - }, - ]); + },] + ); }); } @@ -69,13 +71,15 @@ fn authorities_change_logged_after_delay() { Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); Grandpa::on_finalize(1); let header = System::finalize(); - assert_eq!(header.digest, Digest { - logs: vec![ - grandpa_log(ConsensusLog::ScheduledChange( - ScheduledChange { delay: 1, next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) } - )), - ], - }); + assert_eq!( + header.digest, + Digest { + logs: vec![grandpa_log(ConsensusLog::ScheduledChange(ScheduledChange { + delay: 1, + next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + })),], + } + ); // no change at this height. assert_eq!(System::events(), vec![]); @@ -85,13 +89,14 @@ fn authorities_change_logged_after_delay() { Grandpa::on_finalize(2); let _header = System::finalize(); - assert_eq!(System::events(), vec![ - EventRecord { + assert_eq!( + System::events(), + vec![EventRecord { phase: Phase::Finalization, event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), topics: vec![], - }, - ]); + },] + ); }); } @@ -101,55 +106,44 @@ fn cannot_schedule_change_when_one_pending() { initialize_block(1, Default::default()); Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); assert!(>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), + Error::::ChangePending + ); Grandpa::on_finalize(1); let header = System::finalize(); initialize_block(2, header.hash()); assert!(>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), + Error::::ChangePending + ); Grandpa::on_finalize(2); let header = System::finalize(); initialize_block(3, header.hash()); assert!(!>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_ok()); + assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None)); Grandpa::on_finalize(3); let _header = System::finalize(); }); } -#[test] -fn new_decodes_from_old() { - let old = OldStoredPendingChange { - scheduled_at: 5u32, - delay: 100u32, - next_authorities: to_authorities(vec![(1, 5), (2, 10), (3, 2)]), - }; - - let encoded = old.encode(); - let new = StoredPendingChange::::decode(&mut &encoded[..]).unwrap(); - assert!(new.forced.is_none()); - assert_eq!(new.scheduled_at, old.scheduled_at); - assert_eq!(new.delay, old.delay); - assert_eq!(new.next_authorities, old.next_authorities); -} - #[test] fn dispatch_forced_change() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { initialize_block(1, Default::default()); - Grandpa::schedule_change( - to_authorities(vec![(4, 1), (5, 1), (6, 1)]), - 5, - Some(0), - ).unwrap(); + Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 5, Some(0)).unwrap(); assert!(>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)), + Error::::ChangePending + ); Grandpa::on_finalize(1); let mut header = System::finalize(); @@ -158,8 +152,14 @@ fn dispatch_forced_change() { initialize_block(i, header.hash()); assert!(>::get().unwrap().forced.is_some()); assert_eq!(Grandpa::next_forced(), Some(11)); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), + Error::::ChangePending + ); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)), + Error::::ChangePending + ); Grandpa::on_finalize(i); header = System::finalize(); @@ -170,8 +170,11 @@ fn dispatch_forced_change() { { initialize_block(7, header.hash()); assert!(!>::exists()); - assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)])); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_ok()); + assert_eq!( + Grandpa::grandpa_authorities(), + to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + ); + assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None)); Grandpa::on_finalize(7); header = System::finalize(); } @@ -180,8 +183,14 @@ fn dispatch_forced_change() { { initialize_block(8, header.hash()); assert!(>::exists()); - assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)])); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + assert_eq!( + Grandpa::grandpa_authorities(), + to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + ); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), + Error::::ChangePending + ); Grandpa::on_finalize(8); header = System::finalize(); } @@ -193,7 +202,10 @@ fn dispatch_forced_change() { assert!(!>::exists()); assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(5, 1)])); assert_eq!(Grandpa::next_forced(), Some(11)); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)), + Error::::TooSoon + ); Grandpa::on_finalize(i); header = System::finalize(); } @@ -201,7 +213,11 @@ fn dispatch_forced_change() { { initialize_block(11, header.hash()); assert!(!>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1), (7, 1)]), 5, Some(0)).is_ok()); + assert_ok!(Grandpa::schedule_change( + to_authorities(vec![(5, 1), (6, 1), (7, 1)]), + 5, + Some(0) + )); assert_eq!(Grandpa::next_forced(), Some(21)); Grandpa::on_finalize(11); header = System::finalize(); @@ -218,13 +234,7 @@ fn schedule_pause_only_when_live() { Grandpa::schedule_pause(1).unwrap(); // we've switched to the pending pause state - assert_eq!( - Grandpa::state(), - StoredState::PendingPause { - scheduled_at: 1u64, - delay: 1, - }, - ); + assert_eq!(Grandpa::state(), StoredState::PendingPause { scheduled_at: 1u64, delay: 1 }); Grandpa::on_finalize(1); let _ = System::finalize(); @@ -232,16 +242,13 @@ fn schedule_pause_only_when_live() { initialize_block(2, Default::default()); // signaling a pause now should fail - assert!(Grandpa::schedule_pause(1).is_err()); + assert_noop!(Grandpa::schedule_pause(1), Error::::PauseFailed); Grandpa::on_finalize(2); let _ = System::finalize(); // after finalizing block 2 the set should have switched to paused state - assert_eq!( - Grandpa::state(), - StoredState::Paused, - ); + assert_eq!(Grandpa::state(), StoredState::Paused); }); } @@ -251,22 +258,16 @@ fn schedule_resume_only_when_paused() { initialize_block(1, Default::default()); // the set is currently live, resuming it is an error - assert!(Grandpa::schedule_resume(1).is_err()); + assert_noop!(Grandpa::schedule_resume(1), Error::::ResumeFailed); - assert_eq!( - Grandpa::state(), - StoredState::Live, - ); + assert_eq!(Grandpa::state(), StoredState::Live); // we schedule a pause to be applied instantly Grandpa::schedule_pause(0).unwrap(); Grandpa::on_finalize(1); let _ = System::finalize(); - assert_eq!( - Grandpa::state(), - StoredState::Paused, - ); + assert_eq!(Grandpa::state(), StoredState::Paused); // we schedule the set to go back live in 2 blocks initialize_block(2, Default::default()); @@ -283,10 +284,7 @@ fn schedule_resume_only_when_paused() { let _ = System::finalize(); // it should be live at block 4 - assert_eq!( - Grandpa::state(), - StoredState::Live, - ); + assert_eq!(Grandpa::state(), StoredState::Live); }); } @@ -294,26 +292,11 @@ fn schedule_resume_only_when_paused() { fn time_slot_have_sane_ord() { // Ensure that `Ord` implementation is sane. const FIXTURE: &[GrandpaTimeSlot] = &[ - GrandpaTimeSlot { - set_id: 0, - round: 0, - }, - GrandpaTimeSlot { - set_id: 0, - round: 1, - }, - GrandpaTimeSlot { - set_id: 1, - round: 0, - }, - GrandpaTimeSlot { - set_id: 1, - round: 1, - }, - GrandpaTimeSlot { - set_id: 1, - round: 2, - } + GrandpaTimeSlot { set_id: 0, round: 0 }, + GrandpaTimeSlot { set_id: 0, round: 1 }, + GrandpaTimeSlot { set_id: 1, round: 0 }, + GrandpaTimeSlot { set_id: 1, round: 1 }, + GrandpaTimeSlot { set_id: 1, round: 2 }, ]; assert!(FIXTURE.windows(2).all(|f| f[0] < f[1])); } @@ -321,16 +304,9 @@ fn time_slot_have_sane_ord() { /// Returns a list with 3 authorities with known keys: /// Alice, Bob and Charlie. pub fn test_authorities() -> AuthorityList { - let authorities = vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - ]; + let authorities = vec![Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - authorities - .into_iter() - .map(|id| (id.public().into(), 1u64)) - .collect() + authorities.into_iter().map(|id| (id.public().into(), 1u64)).collect() } #[test] @@ -353,11 +329,7 @@ fn report_equivocation_current_set_works() { assert_eq!( Staking::eras_stakers(1, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -380,13 +352,11 @@ fn report_equivocation_current_set_works() { Historical::prove((sp_finality_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); // report the equivocation and the tx should be dispatched successfully - assert_ok!( - Grandpa::report_equivocation_unsigned( - Origin::none(), - equivocation_proof, - key_owner_proof, - ), - ); + assert_ok!(Grandpa::report_equivocation_unsigned( + Origin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ),); start_era(2); @@ -397,17 +367,13 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( Staking::eras_stakers(2, equivocation_validator_id), - pallet_staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); // check that the balances of all other validators are left intact. for validator in &validators { if *validator == equivocation_validator_id { - continue; + continue } assert_eq!(Balances::total_balance(validator), 10_000_000); @@ -415,11 +381,7 @@ fn report_equivocation_current_set_works() { assert_eq!( Staking::eras_stakers(2, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } }); @@ -451,11 +413,7 @@ fn report_equivocation_old_set_works() { assert_eq!( Staking::eras_stakers(2, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -472,13 +430,11 @@ fn report_equivocation_old_set_works() { // report the equivocation using the key ownership proof generated on // the old set, the tx should be dispatched successfully - assert_ok!( - Grandpa::report_equivocation_unsigned( - Origin::none(), - equivocation_proof, - key_owner_proof, - ), - ); + assert_ok!(Grandpa::report_equivocation_unsigned( + Origin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ),); start_era(3); @@ -490,17 +446,13 @@ fn report_equivocation_old_set_works() { assert_eq!( Staking::eras_stakers(3, equivocation_validator_id), - pallet_staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); // check that the balances of all other validators are left intact. for validator in &validators { if *validator == equivocation_validator_id { - continue; + continue } assert_eq!(Balances::total_balance(validator), 10_000_000); @@ -508,11 +460,7 @@ fn report_equivocation_old_set_works() { assert_eq!( Staking::eras_stakers(3, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } }); @@ -547,7 +495,7 @@ fn report_equivocation_invalid_set_id() { assert_err!( Grandpa::report_equivocation_unsigned( Origin::none(), - equivocation_proof, + Box::new(equivocation_proof), key_owner_proof, ), Error::::InvalidEquivocationProof, @@ -588,7 +536,7 @@ fn report_equivocation_invalid_session() { assert_err!( Grandpa::report_equivocation_unsigned( Origin::none(), - equivocation_proof, + Box::new(equivocation_proof), key_owner_proof, ), Error::::InvalidEquivocationProof, @@ -633,7 +581,7 @@ fn report_equivocation_invalid_key_owner_proof() { assert_err!( Grandpa::report_equivocation_unsigned( Origin::none(), - equivocation_proof, + Box::new(equivocation_proof), invalid_key_owner_proof, ), Error::::InvalidKeyOwnershipProof, @@ -664,7 +612,7 @@ fn report_equivocation_invalid_equivocation_proof() { assert_err!( Grandpa::report_equivocation_unsigned( Origin::none(), - equivocation_proof, + Box::new(equivocation_proof), key_owner_proof.clone(), ), Error::::InvalidEquivocationProof, @@ -707,8 +655,8 @@ fn report_equivocation_invalid_equivocation_proof() { #[test] fn report_equivocation_validate_unsigned_prevents_duplicates() { use sp_runtime::transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, - TransactionValidity, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + ValidTransaction, }; let authorities = test_authorities(); @@ -733,10 +681,10 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let key_owner_proof = Historical::prove((sp_finality_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); - let call = Call::report_equivocation_unsigned( - equivocation_proof.clone(), - key_owner_proof.clone(), - ); + let call = Call::report_equivocation_unsigned { + equivocation_proof: Box::new(equivocation_proof.clone()), + key_owner_proof: key_owner_proof.clone(), + }; // only local/inblock reports are allowed assert_eq!( @@ -748,11 +696,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { ); // the transaction is valid when passed as local - let tx_tag = ( - equivocation_key, - set_id, - 1u64, - ); + let tx_tag = (equivocation_key, set_id, 1u64); assert_eq!( ::validate_unsigned( @@ -763,7 +707,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { priority: TransactionPriority::max_value(), requires: vec![], provides: vec![("GrandpaEquivocation", tx_tag).encode()], - longevity: TransactionLongevity::max_value(), + longevity: ReportLongevity::get(), propagate: false, }) ); @@ -772,10 +716,23 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { assert_ok!(::pre_dispatch(&call)); // we submit the report - Grandpa::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) - .unwrap(); + Grandpa::report_equivocation_unsigned( + Origin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) + .unwrap(); // the report should now be considered stale and the transaction is invalid + // the check for staleness should be done on both `validate_unsigned` and on `pre_dispatch` + assert_err!( + ::validate_unsigned( + TransactionSource::Local, + &call, + ), + InvalidTransaction::Stale, + ); + assert_err!( ::pre_dispatch(&call), InvalidTransaction::Stale, @@ -848,23 +805,19 @@ fn always_schedules_a_change_on_new_session_when_stalled() { fn report_equivocation_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. - assert!( - (1..=100) - .map(::WeightInfo::report_equivocation) - .collect::>() - .windows(2) - .all(|w| w[0] == w[1]) - ); + assert!((1..=100) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] == w[1])); // after 100 validators the weight should keep increasing // with every extra validator. - assert!( - (100..=1000) - .map(::WeightInfo::report_equivocation) - .collect::>() - .windows(2) - .all(|w| w[0] < w[1]) - ); + assert!((100..=1000) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] < w[1])); } #[test] @@ -890,10 +843,10 @@ fn valid_equivocation_reports_dont_pay_fees() { Historical::prove((sp_finality_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); // check the dispatch info for the call. - let info = Call::::report_equivocation_unsigned( - equivocation_proof.clone(), - key_owner_proof.clone(), - ) + let info = Call::::report_equivocation_unsigned { + equivocation_proof: Box::new(equivocation_proof.clone()), + key_owner_proof: key_owner_proof.clone(), + } .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. @@ -903,7 +856,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // report the equivocation. let post_info = Grandpa::report_equivocation_unsigned( Origin::none(), - equivocation_proof.clone(), + Box::new(equivocation_proof.clone()), key_owner_proof.clone(), ) .unwrap(); @@ -917,7 +870,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // duplicate. let post_info = Grandpa::report_equivocation_unsigned( Origin::none(), - equivocation_proof, + Box::new(equivocation_proof), key_owner_proof, ) .err() diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 08777c44ad2b1..598be25c5ef38 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-identity" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,25 +13,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "max-encoded-len"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-std/std", "sp-io/std", "sp-runtime/std", @@ -39,4 +39,9 @@ std = [ "frame-support/std", "frame-system/std", ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/identity/README.md b/frame/identity/README.md index 8927febec6bbd..a67c259e2537a 100644 --- a/frame/identity/README.md +++ b/frame/identity/README.md @@ -1,6 +1,6 @@ # Identity Module -- [`identity::Trait`](https://docs.rs/pallet-identity/latest/pallet_identity/trait.Trait.html) +- [`identity::Config`](https://docs.rs/pallet-identity/latest/pallet_identity/trait.Config.html) - [`Call`](https://docs.rs/pallet-identity/latest/pallet_identity/enum.Call.html) ## Overview @@ -51,6 +51,6 @@ no state-bloat attack is viable. * `kill_identity` - Forcibly remove the associated identity; the deposit is lost. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index d39df27017b71..8bda24ddc73e1 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,33 +21,37 @@ use super::*; -use frame_system::{EventRecord, RawOrigin}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use crate::Pallet as Identity; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::{ensure, traits::Get}; +use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Module as Identity; - const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { - let events = frame_system::Module::::events(); - let system_event: ::Event = generic_event.into(); - // compare to the last event record - let EventRecord { event, .. } = &events[events.len() - 1]; - assert_eq!(event, &system_event); +fn assert_last_event(generic_event: ::Event) { + frame_system::Pallet::::assert_last_event(generic_event.into()); } // Adds `r` registrars to the Identity Pallet. These registrars will have set fees and fields. -fn add_registrars(r: u32) -> Result<(), &'static str> { +fn add_registrars(r: u32) -> Result<(), &'static str> { for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); let _ = T::Currency::make_free_balance_be(®istrar, BalanceOf::::max_value()); Identity::::add_registrar(RawOrigin::Root.into(), registrar.clone())?; - Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i.into(), 10.into())?; - let fields = IdentityFields( - IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot - | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter - ); + Identity::::set_fee( + RawOrigin::Signed(registrar.clone()).into(), + i.into(), + 10u32.into(), + )?; + let fields = + IdentityFields( + IdentityField::Display | + IdentityField::Legal | IdentityField::Web | + IdentityField::Riot | IdentityField::Email | + IdentityField::PgpFingerprint | + IdentityField::Image | IdentityField::Twitter, + ); Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), i.into(), fields)?; } @@ -57,10 +61,13 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { // Create `s` sub-accounts for the identity of `who` and return them. // Each will have 32 bytes of raw data added to it. -fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { +fn create_sub_accounts( + who: &T::AccountId, + s: u32, +) -> Result, &'static str> { let mut subs = Vec::new(); let who_origin = RawOrigin::Signed(who.clone()); - let data = Data::Raw(vec![0; 32]); + let data = Data::Raw(vec![0; 32].try_into().unwrap()); for i in 0..s { let sub_account = account("sub", i, SEED); @@ -70,14 +77,17 @@ fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result::max_value()); let info = create_identity_info::(1); - Identity::::set_identity(who_origin.clone().into(), info)?; + Identity::::set_identity(who_origin.clone().into(), Box::new(info))?; Ok(subs) } // Adds `s` sub-accounts to the identity of `who`. Each will have 32 bytes of raw data added to it. // This additionally returns the vector of sub-accounts so it can be modified if needed. -fn add_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { +fn add_sub_accounts( + who: &T::AccountId, + s: u32, +) -> Result, &'static str> { let who_origin = RawOrigin::Signed(who.clone()); let subs = create_sub_accounts::(who, s)?; @@ -88,11 +98,11 @@ fn add_sub_accounts(who: &T::AccountId, s: u32) -> Result(num_fields: u32) -> IdentityInfo { - let data = Data::Raw(vec![0; 32]); +fn create_identity_info(num_fields: u32) -> IdentityInfo { + let data = Data::Raw(vec![0; 32].try_into().unwrap()); let info = IdentityInfo { - additional: vec![(data.clone(), data.clone()); num_fields as usize], + additional: vec![(data.clone(), data.clone()); num_fields as usize].try_into().unwrap(), display: data.clone(), legal: data.clone(), web: data.clone(), @@ -107,25 +117,6 @@ fn create_identity_info(num_fields: u32) -> IdentityInfo { } benchmarks! { - // These are the common parameters along with their instancing. - _ { - let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - // extra parameter for the set_subs bench for previous sub accounts - let p in 1 .. T::MaxSubAccounts::get() => (); - let s in 1 .. T::MaxSubAccounts::get() => { - // Give them s many sub accounts - let caller: T::AccountId = whitelisted_caller(); - let _ = add_sub_accounts::(&caller, s)?; - }; - let x in 1 .. T::MaxAdditionalFields::get() => { - // Create their main identity with x additional fields - let info = create_identity_info::(x); - let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); - Identity::::set_identity(caller_origin, info)?; - }; - } - add_registrar { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); @@ -135,24 +126,22 @@ benchmarks! { } set_identity { - let r in ...; - // This X doesn't affect the caller ID up front like with the others, so we don't use the - // standard preparation. - let x in _ .. _ => (); + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let x in 1 .. T::MaxAdditionalFields::get(); let caller = { // The target user let caller: T::AccountId = whitelisted_caller(); let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); + let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Add an initial identity let initial_info = create_identity_info::(1); - Identity::::set_identity(caller_origin.clone(), initial_info)?; + Identity::::set_identity(caller_origin.clone(), Box::new(initial_info))?; // User requests judgement from all the registrars, and they approve for i in 0..r { - Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; + Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; Identity::::provide_judgement( RawOrigin::Signed(account("registrar", i, SEED)).into(), i, @@ -162,7 +151,7 @@ benchmarks! { } caller }; - }: _(RawOrigin::Signed(caller.clone()), create_identity_info::(x)) + }: _(RawOrigin::Signed(caller.clone()), Box::new(create_identity_info::(x))) verify { assert_last_event::(Event::::IdentitySet(caller).into()); } @@ -200,17 +189,27 @@ benchmarks! { clear_identity { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let caller_lookup = ::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in ...; - let s in ...; - let x in ...; + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let s in 1 .. T::MaxSubAccounts::get() => { + // Give them s many sub accounts + let caller: T::AccountId = whitelisted_caller(); + let _ = add_sub_accounts::(&caller, s)?; + }; + let x in 1 .. T::MaxAdditionalFields::get() => { + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller: T::AccountId = whitelisted_caller(); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + Identity::::set_identity(caller_origin, Box::new(info))?; + }; // User requests judgement from all the registrars, and they approve for i in 0..r { - Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; + Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; Identity::::provide_judgement( RawOrigin::Signed(account("registrar", i, SEED)).into(), i, @@ -228,22 +227,34 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in ...; - let x in ...; - }: _(RawOrigin::Signed(caller.clone()), r - 1, 10.into()) + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let x in 1 .. T::MaxAdditionalFields::get() => { + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller: T::AccountId = whitelisted_caller(); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + Identity::::set_identity(caller_origin, Box::new(info))?; + }; + }: _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into()) verify { assert_last_event::(Event::::JudgementRequested(caller, r-1).into()); } cancel_request { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let r in ...; - let x in ...; + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let x in 1 .. T::MaxAdditionalFields::get() => { + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller: T::AccountId = whitelisted_caller(); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + Identity::::set_identity(caller_origin, Box::new(info))?; + }; - Identity::::request_judgement(caller_origin, r - 1, 10.into())?; + Identity::::request_judgement(caller_origin, r - 1, 10u32.into())?; }: _(RawOrigin::Signed(caller.clone()), r - 1) verify { assert_last_event::(Event::::JudgementUnrequested(caller, r-1).into()); @@ -256,11 +267,11 @@ benchmarks! { Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().fee == 0.into(), "Fee already set."); - }: _(RawOrigin::Signed(caller), r, 100.into()) + ensure!(registrars[r as usize].as_ref().unwrap().fee == 0u32.into(), "Fee already set."); + }: _(RawOrigin::Signed(caller), r, 100u32.into()) verify { let registrars = Registrars::::get(); - ensure!(registrars[r as usize].as_ref().unwrap().fee == 100.into(), "Fee not changed."); + ensure!(registrars[r as usize].as_ref().unwrap().fee == 100u32.into(), "Fee not changed."); } set_account_id { @@ -300,7 +311,7 @@ benchmarks! { provide_judgement { // The user let user: T::AccountId = account("user", r, SEED); - let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); + let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); let user_lookup = ::unlookup(user.clone()); let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); @@ -308,37 +319,35 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - // For this x, it's the user identity that gts the fields, not the caller. - let x in _ .. _ => { + let x in 1 .. T::MaxAdditionalFields::get() => { let info = create_identity_info::(x); - Identity::::set_identity(user_origin.clone(), info)?; + Identity::::set_identity(user_origin.clone(), Box::new(info))?; }; Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; - Identity::::request_judgement(user_origin.clone(), r, 10.into())?; + Identity::::request_judgement(user_origin.clone(), r, 10u32.into())?; }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) verify { assert_last_event::(Event::::JudgementGiven(user, r).into()) } kill_identity { - let r in ...; - // Setting up our own account below. - let s in _ .. _ => {}; - let x in _ .. _ => {}; + let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; + let s in 1 .. T::MaxSubAccounts::get(); + let x in 1 .. T::MaxAdditionalFields::get(); let target: T::AccountId = account("target", 0, SEED); - let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); + let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); let info = create_identity_info::(x); - Identity::::set_identity(target_origin.clone(), info)?; + Identity::::set_identity(target_origin.clone(), Box::new(info))?; let _ = add_sub_accounts::(&target, s)?; // User requests judgement from all the registrars, and they approve for i in 0..r { - Identity::::request_judgement(target_origin.clone(), i, 10.into())?; + Identity::::request_judgement(target_origin.clone(), i, 10u32.into())?; Identity::::provide_judgement( RawOrigin::Signed(account("registrar", i, SEED)).into(), i, @@ -358,7 +367,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; let sub = account("new_sub", 0, SEED); - let data = Data::Raw(vec![0; 32]); + let data = Data::Raw(vec![0; 32].try_into().unwrap()); ensure!(SubsOf::::get(&caller).1.len() as u32 == s, "Subs not set."); }: _(RawOrigin::Signed(caller.clone()), T::Lookup::unlookup(sub), data) verify { @@ -370,7 +379,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let (sub, _) = add_sub_accounts::(&caller, s)?.remove(0); - let data = Data::Raw(vec![1; 32]); + let data = Data::Raw(vec![1; 32].try_into().unwrap()); ensure!(SuperOf::::get(&sub).unwrap().1 != data, "data already set"); }: _(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()), data.clone()) verify { @@ -395,7 +404,7 @@ benchmarks! { let sup = account("super", 0, SEED); let _ = add_sub_accounts::(&sup, s)?; let sup_origin = RawOrigin::Signed(sup).into(); - Identity::::add_sub(sup_origin, T::Lookup::unlookup(caller.clone()), Data::Raw(vec![0; 32]))?; + Identity::::add_sub(sup_origin, T::Lookup::unlookup(caller.clone()), Data::Raw(vec![0; 32].try_into().unwrap()))?; ensure!(SuperOf::::contains_key(&caller), "Sub doesn't exists"); }: _(RawOrigin::Signed(caller.clone())) verify { @@ -404,31 +413,4 @@ benchmarks! { } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_add_registrar::()); - assert_ok!(test_benchmark_set_identity::()); - assert_ok!(test_benchmark_set_subs_new::()); - assert_ok!(test_benchmark_set_subs_old::()); - assert_ok!(test_benchmark_clear_identity::()); - assert_ok!(test_benchmark_request_judgement::()); - assert_ok!(test_benchmark_cancel_request::()); - assert_ok!(test_benchmark_set_fee::()); - assert_ok!(test_benchmark_set_account_id::()); - assert_ok!(test_benchmark_set_fields::()); - assert_ok!(test_benchmark_provide_judgement::()); - assert_ok!(test_benchmark_kill_identity::()); - assert_ok!(test_benchmark_add_sub::()); - assert_ok!(test_benchmark_rename_sub::()); - assert_ok!(test_benchmark_remove_sub::()); - assert_ok!(test_benchmark_quit_sub::()); - }); - } -} +impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/identity/src/default_weights.rs b/frame/identity/src/default_weights.rs deleted file mode 100644 index 93b1c89ab93dd..0000000000000 --- a/frame/identity/src/default_weights.rs +++ /dev/null @@ -1,135 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn add_registrar(r: u32, ) -> Weight { - (39_603_000 as Weight) - .saturating_add((418_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_identity(r: u32, x: u32, ) -> Weight { - (110_679_000 as Weight) - .saturating_add((389_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_985_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_subs_new(s: u32, ) -> Weight { - (78_697_000 as Weight) - .saturating_add((15_225_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn set_subs_old(p: u32, ) -> Weight { - (71_308_000 as Weight) - .saturating_add((5_772_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) - } - fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (91_553_000 as Weight) - .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((5_749_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_621_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn request_judgement(r: u32, x: u32, ) -> Weight { - (110_856_000 as Weight) - .saturating_add((496_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_221_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn cancel_request(r: u32, x: u32, ) -> Weight { - (96_857_000 as Weight) - .saturating_add((311_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_204_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_fee(r: u32, ) -> Weight { - (16_276_000 as Weight) - .saturating_add((381_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_account_id(r: u32, ) -> Weight { - (18_530_000 as Weight) - .saturating_add((391_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_fields(r: u32, ) -> Weight { - (16_359_000 as Weight) - .saturating_add((379_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn provide_judgement(r: u32, x: u32, ) -> Weight { - (72_869_000 as Weight) - .saturating_add((423_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_187_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - (123_199_000 as Weight) - .saturating_add((71_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((5_730_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn add_sub(s: u32, ) -> Weight { - (110_070_000 as Weight) - .saturating_add((262_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn rename_sub(s: u32, ) -> Weight { - (37_130_000 as Weight) - .saturating_add((79_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn remove_sub(s: u32, ) -> Weight { - (103_295_000 as Weight) - .saturating_add((235_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn quit_sub(s: u32, ) -> Weight { - (65_716_000 as Weight) - .saturating_add((227_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } -} diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 1ff69af9a9036..a91381f1edd8b 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,10 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Identity Module +//! # Identity Pallet //! -//! - [`identity::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -68,414 +68,142 @@ //! * `kill_identity` - Forcibly remove the associated identity; the deposit is lost. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_std::{fmt::Debug, ops::Add, iter::once}; -use enumflags2::BitFlags; -use codec::{Encode, Decode}; -use sp_runtime::{DispatchError, RuntimeDebug, DispatchResult}; -use sp_runtime::traits::{StaticLookup, Zero, AppendZerosInput, Saturating}; -use frame_support::{ - decl_module, decl_event, decl_storage, ensure, decl_error, - dispatch::DispatchResultWithPostInfo, - traits::{Currency, ReservableCurrency, OnUnbalanced, Get, BalanceStatus, EnsureOrigin}, - weights::Weight, -}; -use frame_system::ensure_signed; - +mod benchmarking; #[cfg(test)] mod tests; -mod benchmarking; -mod default_weights; - -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; - -pub trait WeightInfo { - fn add_registrar(r: u32, ) -> Weight; - fn set_identity(r: u32, x: u32, ) -> Weight; - fn set_subs_new(s: u32, ) -> Weight; - fn set_subs_old(p: u32, ) -> Weight; - fn add_sub(p: u32, ) -> Weight; - fn rename_sub(p: u32, ) -> Weight; - fn remove_sub(p: u32, ) -> Weight; - fn quit_sub(p: u32, ) -> Weight; - fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight; - fn request_judgement(r: u32, x: u32, ) -> Weight; - fn cancel_request(r: u32, x: u32, ) -> Weight; - fn set_fee(r: u32, ) -> Weight; - fn set_account_id(r: u32, ) -> Weight; - fn set_fields(r: u32, ) -> Weight; - fn provide_judgement(r: u32, x: u32, ) -> Weight; - fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight; -} - -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The currency trait. - type Currency: ReservableCurrency; - - /// The amount held on deposit for a registered identity. - type BasicDeposit: Get>; - - /// The amount held on deposit per additional field for a registered identity. - type FieldDeposit: Get>; - - /// The amount held on deposit for a registered subaccount. This should account for the fact - /// that one storage item's value will increase by the size of an account ID, and there will be - /// another trie item whose value is the size of an account ID plus 32 bytes. - type SubAccountDeposit: Get>; +mod types; +pub mod weights; + +use frame_support::traits::{BalanceStatus, Currency, OnUnbalanced, ReservableCurrency}; +use sp_runtime::traits::{AppendZerosInput, Saturating, StaticLookup, Zero}; +use sp_std::{convert::TryInto, prelude::*}; +pub use weights::WeightInfo; + +pub use pallet::*; +pub use types::{ + Data, IdentityField, IdentityFields, IdentityInfo, Judgement, RegistrarIndex, RegistrarInfo, + Registration, +}; - /// The maximum number of sub-accounts allowed per identified account. - type MaxSubAccounts: Get; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; - /// Maximum number of additional fields that may be stored in an ID. Needed to bound the I/O - /// required to access an identity, but can be pretty high. - type MaxAdditionalFields: Get; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - /// Maxmimum number of registrars allowed in the system. Needed to bound the complexity - /// of, e.g., updating judgements. - type MaxRegistrars: Get; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// What to do with slashed funds. - type Slashed: OnUnbalanced>; + /// The currency trait. + type Currency: ReservableCurrency; - /// The origin which may forcibly set or remove a name. Root can always do this. - type ForceOrigin: EnsureOrigin; + /// The amount held on deposit for a registered identity + #[pallet::constant] + type BasicDeposit: Get>; - /// The origin which may add or remove registrars. Root can always do this. - type RegistrarOrigin: EnsureOrigin; + /// The amount held on deposit per additional field for a registered identity. + #[pallet::constant] + type FieldDeposit: Get>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// The amount held on deposit for a registered subaccount. This should account for the fact + /// that one storage item's value will increase by the size of an account ID, and there will + /// be another trie item whose value is the size of an account ID plus 32 bytes. + #[pallet::constant] + type SubAccountDeposit: Get>; -/// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater -/// than 32-bytes then it will be truncated when encoding. -/// -/// Can also be `None`. -#[derive(Clone, Eq, PartialEq, RuntimeDebug)] -pub enum Data { - /// No data here. - None, - /// The data is stored directly. - Raw(Vec), - /// Only the Blake2 hash of the data is stored. The preimage of the hash may be retrieved - /// through some hash-lookup service. - BlakeTwo256([u8; 32]), - /// Only the SHA2-256 hash of the data is stored. The preimage of the hash may be retrieved - /// through some hash-lookup service. - Sha256([u8; 32]), - /// Only the Keccak-256 hash of the data is stored. The preimage of the hash may be retrieved - /// through some hash-lookup service. - Keccak256([u8; 32]), - /// Only the SHA3-256 hash of the data is stored. The preimage of the hash may be retrieved - /// through some hash-lookup service. - ShaThree256([u8; 32]), -} + /// The maximum number of sub-accounts allowed per identified account. + #[pallet::constant] + type MaxSubAccounts: Get; -impl Decode for Data { - fn decode(input: &mut I) -> sp_std::result::Result { - let b = input.read_byte()?; - Ok(match b { - 0 => Data::None, - n @ 1 ..= 33 => { - let mut r = vec![0u8; n as usize - 1]; - input.read(&mut r[..])?; - Data::Raw(r) - } - 34 => Data::BlakeTwo256(<[u8; 32]>::decode(input)?), - 35 => Data::Sha256(<[u8; 32]>::decode(input)?), - 36 => Data::Keccak256(<[u8; 32]>::decode(input)?), - 37 => Data::ShaThree256(<[u8; 32]>::decode(input)?), - _ => return Err(codec::Error::from("invalid leading byte")), - }) - } -} + /// Maximum number of additional fields that may be stored in an ID. Needed to bound the I/O + /// required to access an identity, but can be pretty high. + #[pallet::constant] + type MaxAdditionalFields: Get; -impl Encode for Data { - fn encode(&self) -> Vec { - match self { - Data::None => vec![0u8; 1], - Data::Raw(ref x) => { - let l = x.len().min(32); - let mut r = vec![l as u8 + 1; l + 1]; - &mut r[1..].copy_from_slice(&x[..l as usize]); - r - } - Data::BlakeTwo256(ref h) => once(34u8).chain(h.iter().cloned()).collect(), - Data::Sha256(ref h) => once(35u8).chain(h.iter().cloned()).collect(), - Data::Keccak256(ref h) => once(36u8).chain(h.iter().cloned()).collect(), - Data::ShaThree256(ref h) => once(37u8).chain(h.iter().cloned()).collect(), - } - } -} -impl codec::EncodeLike for Data {} + /// Maxmimum number of registrars allowed in the system. Needed to bound the complexity + /// of, e.g., updating judgements. + #[pallet::constant] + type MaxRegistrars: Get; -impl Default for Data { - fn default() -> Self { - Self::None - } -} + /// What to do with slashed funds. + type Slashed: OnUnbalanced>; -/// An identifier for a single name registrar/identity verification service. -pub type RegistrarIndex = u32; - -/// An attestation of a registrar over how accurate some `IdentityInfo` is in describing an account. -/// -/// NOTE: Registrars may pay little attention to some fields. Registrars may want to make clear -/// which fields their attestation is relevant for by off-chain means. -#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub enum Judgement< - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq -> { - /// The default value; no opinion is held. - Unknown, - /// No judgement is yet in place, but a deposit is reserved as payment for providing one. - FeePaid(Balance), - /// The data appears to be reasonably acceptable in terms of its accuracy, however no in depth - /// checks (such as in-person meetings or formal KYC) have been conducted. - Reasonable, - /// The target is known directly by the registrar and the registrar can fully attest to the - /// the data's accuracy. - KnownGood, - /// The data was once good but is currently out of date. There is no malicious intent in the - /// inaccuracy. This judgement can be removed through updating the data. - OutOfDate, - /// The data is imprecise or of sufficiently low-quality to be problematic. It is not - /// indicative of malicious intent. This judgement can be removed through updating the data. - LowQuality, - /// The data is erroneous. This may be indicative of malicious intent. This cannot be removed - /// except by the registrar. - Erroneous, -} + /// The origin which may forcibly set or remove a name. Root can always do this. + type ForceOrigin: EnsureOrigin; -impl< - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq -> Judgement { - /// Returns `true` if this judgement is indicative of a deposit being currently held. This means - /// it should not be cleared or replaced except by an operation which utilizes the deposit. - fn has_deposit(&self) -> bool { - match self { - Judgement::FeePaid(_) => true, - _ => false, - } - } + /// The origin which may add or remove registrars. Root can always do this. + type RegistrarOrigin: EnsureOrigin; - /// Returns `true` if this judgement is one that should not be generally be replaced outside - /// of specialized handlers. Examples include "malicious" judgements and deposit-holding - /// judgements. - fn is_sticky(&self) -> bool { - match self { - Judgement::FeePaid(_) | Judgement::Erroneous => true, - _ => false, - } + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } -} -/// The fields that we use to identify the owner of an account with. Each corresponds to a field -/// in the `IdentityInfo` struct. -#[repr(u64)] -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, BitFlags, RuntimeDebug)] -pub enum IdentityField { - Display = 0b0000000000000000000000000000000000000000000000000000000000000001, - Legal = 0b0000000000000000000000000000000000000000000000000000000000000010, - Web = 0b0000000000000000000000000000000000000000000000000000000000000100, - Riot = 0b0000000000000000000000000000000000000000000000000000000000001000, - Email = 0b0000000000000000000000000000000000000000000000000000000000010000, - PgpFingerprint = 0b0000000000000000000000000000000000000000000000000000000000100000, - Image = 0b0000000000000000000000000000000000000000000000000000000001000000, - Twitter = 0b0000000000000000000000000000000000000000000000000000000010000000, -} + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(_); -/// Wrapper type for `BitFlags` that implements `Codec`. -#[derive(Clone, Copy, PartialEq, Default, RuntimeDebug)] -pub struct IdentityFields(BitFlags); - -impl Eq for IdentityFields {} -impl Encode for IdentityFields { - fn using_encoded R>(&self, f: F) -> R { - self.0.bits().using_encoded(f) - } -} -impl Decode for IdentityFields { - fn decode(input: &mut I) -> sp_std::result::Result { - let field = u64::decode(input)?; - Ok(Self(>::from_bits(field as u64).map_err(|_| "invalid value")?)) - } -} - -/// Information concerning the identity of the controller of an account. -/// -/// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra -/// fields in a backwards compatible way through a specialized `Decode` impl. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -#[cfg_attr(test, derive(Default))] -pub struct IdentityInfo { - /// Additional fields of the identity that are not catered for with the struct's explicit - /// fields. - pub additional: Vec<(Data, Data)>, - - /// A reasonable display name for the controller of the account. This should be whatever it is - /// that it is typically known as and should not be confusable with other entities, given - /// reasonable context. + /// Information that is pertinent to identify the entity behind an account. /// - /// Stored as UTF-8. - pub display: Data, - - /// The full legal name in the local jurisdiction of the entity. This might be a bit - /// long-winded. + /// TWOX-NOTE: OK ― `AccountId` is a secure hash. + #[pallet::storage] + #[pallet::getter(fn identity)] + pub(super) type IdentityOf = StorageMap< + _, + Twox64Concat, + T::AccountId, + Registration, T::MaxRegistrars, T::MaxAdditionalFields>, + OptionQuery, + >; + + /// The super-identity of an alternative "sub" identity together with its name, within that + /// context. If the account is not some other account's sub-identity, then just `None`. + #[pallet::storage] + #[pallet::getter(fn super_of)] + pub(super) type SuperOf = + StorageMap<_, Blake2_128Concat, T::AccountId, (T::AccountId, Data), OptionQuery>; + + /// Alternative "sub" identities of this account. /// - /// Stored as UTF-8. - pub legal: Data, - - /// A representative website held by the controller of the account. + /// The first item is the deposit, the second is a vector of the accounts. /// - /// NOTE: `https://` is automatically prepended. + /// TWOX-NOTE: OK ― `AccountId` is a secure hash. + #[pallet::storage] + #[pallet::getter(fn subs_of)] + pub(super) type SubsOf = StorageMap< + _, + Twox64Concat, + T::AccountId, + (BalanceOf, BoundedVec), + ValueQuery, + >; + + /// The set of registrars. Not expected to get very big as can only be added through a + /// special origin (likely a council motion). /// - /// Stored as UTF-8. - pub web: Data, - - /// The Riot/Matrix handle held by the controller of the account. - /// - /// Stored as UTF-8. - pub riot: Data, - - /// The email address of the controller of the account. - /// - /// Stored as UTF-8. - pub email: Data, - - /// The PGP/GPG public key of the controller of the account. - pub pgp_fingerprint: Option<[u8; 20]>, - - /// A graphic image representing the controller of the account. Should be a company, - /// organization or project logo or a headshot in the case of a human. - pub image: Data, - - /// The Twitter identity. The leading `@` character may be elided. - pub twitter: Data, -} - -/// Information concerning the identity of the controller of an account. -/// -/// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a -/// backwards compatible way through a specialized `Decode` impl. -#[derive(Clone, Encode, Eq, PartialEq, RuntimeDebug)] -pub struct Registration< - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq -> { - /// Judgements from the registrars on this identity. Stored ordered by `RegistrarIndex`. There - /// may be only a single judgement from each registrar. - pub judgements: Vec<(RegistrarIndex, Judgement)>, - - /// Amount held on deposit for this information. - pub deposit: Balance, - - /// Information on the identity. - pub info: IdentityInfo, -} - -impl < - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq + Zero + Add, -> Registration { - fn total_deposit(&self) -> Balance { - self.deposit + self.judgements.iter() - .map(|(_, ref j)| if let Judgement::FeePaid(fee) = j { *fee } else { Zero::zero() }) - .fold(Zero::zero(), |a, i| a + i) - } -} - -impl< - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq, -> Decode for Registration { - fn decode(input: &mut I) -> sp_std::result::Result { - let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?; - Ok(Self { judgements, deposit, info }) - } -} - -/// Information concerning a registrar. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct RegistrarInfo< - Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, - AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq -> { - /// The account of the registrar. - pub account: AccountId, - - /// Amount required to be given to the registrar for them to provide judgement. - pub fee: Balance, - - /// Relevant fields for this registrar. Registrar judgements are limited to attestations on - /// these fields. - pub fields: IdentityFields, -} - -decl_storage! { - trait Store for Module as Identity { - /// Information that is pertinent to identify the entity behind an account. - /// - /// TWOX-NOTE: OK ― `AccountId` is a secure hash. - pub IdentityOf get(fn identity): - map hasher(twox_64_concat) T::AccountId => Option>>; - - /// The super-identity of an alternative "sub" identity together with its name, within that - /// context. If the account is not some other account's sub-identity, then just `None`. - pub SuperOf get(fn super_of): - map hasher(blake2_128_concat) T::AccountId => Option<(T::AccountId, Data)>; - - /// Alternative "sub" identities of this account. - /// - /// The first item is the deposit, the second is a vector of the accounts. - /// - /// TWOX-NOTE: OK ― `AccountId` is a secure hash. - pub SubsOf get(fn subs_of): - map hasher(twox_64_concat) T::AccountId => (BalanceOf, Vec); - - /// The set of registrars. Not expected to get very big as can only be added through a - /// special origin (likely a council motion). - /// - /// The index into this can be cast to `RegistrarIndex` to get a valid value. - pub Registrars get(fn registrars): Vec, T::AccountId>>>; - } -} - -decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { - /// A name was set or reset (which will remove all judgements). \[who\] - IdentitySet(AccountId), - /// A name was cleared, and the given balance returned. \[who, deposit\] - IdentityCleared(AccountId, Balance), - /// A name was removed and the given balance slashed. \[who, deposit\] - IdentityKilled(AccountId, Balance), - /// A judgement was asked from a registrar. \[who, registrar_index\] - JudgementRequested(AccountId, RegistrarIndex), - /// A judgement request was retracted. \[who, registrar_index\] - JudgementUnrequested(AccountId, RegistrarIndex), - /// A judgement was given by a registrar. \[target, registrar_index\] - JudgementGiven(AccountId, RegistrarIndex), - /// A registrar was added. \[registrar_index\] - RegistrarAdded(RegistrarIndex), - /// A sub-identity was added to an identity and the deposit paid. \[sub, main, deposit\] - SubIdentityAdded(AccountId, AccountId, Balance), - /// A sub-identity was removed from an identity and the deposit freed. - /// \[sub, main, deposit\] - SubIdentityRemoved(AccountId, AccountId, Balance), - /// A sub-identity was cleared, and the given deposit repatriated from the - /// main identity account to the sub-identity account. \[sub, main, deposit\] - SubIdentityRevoked(AccountId, AccountId, Balance), - } -); - -decl_error! { - /// Error for the identity module. - pub enum Error for Module { + /// The index into this can be cast to `RegistrarIndex` to get a valid value. + #[pallet::storage] + #[pallet::getter(fn registrars)] + pub(super) type Registrars = StorageValue< + _, + BoundedVec, T::AccountId>>, T::MaxRegistrars>, + ValueQuery, + >; + + #[pallet::error] + pub enum Error { /// Too many subs-accounts. TooManySubAccounts, /// Account isn't found. @@ -507,39 +235,39 @@ decl_error! { /// Sender is not a sub-account. NotSub, /// Sub-account isn't owned by sender. - NotOwned + NotOwned, } -} - -decl_module! { - /// Identity module declaration. - pub struct Module for enum Call where origin: T::Origin { - /// The amount held on deposit for a registered identity. - const BasicDeposit: BalanceOf = T::BasicDeposit::get(); - - /// The amount held on deposit per additional field for a registered identity. - const FieldDeposit: BalanceOf = T::FieldDeposit::get(); - - /// The amount held on deposit for a registered subaccount. This should account for the fact - /// that one storage item's value will increase by the size of an account ID, and there will be - /// another trie item whose value is the size of an account ID plus 32 bytes. - const SubAccountDeposit: BalanceOf = T::SubAccountDeposit::get(); - - /// The maximum number of sub-accounts allowed per identified account. - const MaxSubAccounts: u32 = T::MaxSubAccounts::get(); - /// Maximum number of additional fields that may be stored in an ID. Needed to bound the I/O - /// required to access an identity, but can be pretty high. - const MaxAdditionalFields: u32 = T::MaxAdditionalFields::get(); - - /// Maxmimum number of registrars allowed in the system. Needed to bound the complexity - /// of, e.g., updating judgements. - const MaxRegistrars: u32 = T::MaxRegistrars::get(); - - type Error = Error; - - fn deposit_event() = default; + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A name was set or reset (which will remove all judgements). \[who\] + IdentitySet(T::AccountId), + /// A name was cleared, and the given balance returned. \[who, deposit\] + IdentityCleared(T::AccountId, BalanceOf), + /// A name was removed and the given balance slashed. \[who, deposit\] + IdentityKilled(T::AccountId, BalanceOf), + /// A judgement was asked from a registrar. \[who, registrar_index\] + JudgementRequested(T::AccountId, RegistrarIndex), + /// A judgement request was retracted. \[who, registrar_index\] + JudgementUnrequested(T::AccountId, RegistrarIndex), + /// A judgement was given by a registrar. \[target, registrar_index\] + JudgementGiven(T::AccountId, RegistrarIndex), + /// A registrar was added. \[registrar_index\] + RegistrarAdded(RegistrarIndex), + /// A sub-identity was added to an identity and the deposit paid. \[sub, main, deposit\] + SubIdentityAdded(T::AccountId, T::AccountId, BalanceOf), + /// A sub-identity was removed from an identity and the deposit freed. + /// \[sub, main, deposit\] + SubIdentityRemoved(T::AccountId, T::AccountId, BalanceOf), + /// A sub-identity was cleared, and the given deposit repatriated from the + /// main identity account to the sub-identity account. \[sub, main, deposit\] + SubIdentityRevoked(T::AccountId, T::AccountId, BalanceOf), + } + #[pallet::call] + /// Identity pallet declaration. + impl Pallet { /// Add a registrar to the system. /// /// The dispatch origin for this call must be `T::RegistrarOrigin`. @@ -553,21 +281,27 @@ decl_module! { /// - One storage mutation (codec `O(R)`). /// - One event. /// # - #[weight = T::WeightInfo::add_registrar(T::MaxRegistrars::get()) ] - fn add_registrar(origin, account: T::AccountId) -> DispatchResultWithPostInfo { + #[pallet::weight(T::WeightInfo::add_registrar(T::MaxRegistrars::get()))] + pub fn add_registrar( + origin: OriginFor, + account: T::AccountId, + ) -> DispatchResultWithPostInfo { T::RegistrarOrigin::ensure_origin(origin)?; let (i, registrar_count) = >::try_mutate( |registrars| -> Result<(RegistrarIndex, usize), DispatchError> { - ensure!(registrars.len() < T::MaxRegistrars::get() as usize, Error::::TooManyRegistrars); - registrars.push(Some(RegistrarInfo { - account, fee: Zero::zero(), fields: Default::default() - })); + registrars + .try_push(Some(RegistrarInfo { + account, + fee: Zero::zero(), + fields: Default::default(), + })) + .map_err(|_| Error::::TooManyRegistrars)?; Ok(((registrars.len() - 1) as RegistrarIndex, registrars.len())) - } + }, )?; - Self::deposit_event(RawEvent::RegistrarAdded(i)); + Self::deposit_event(Event::RegistrarAdded(i)); Ok(Some(T::WeightInfo::add_registrar(registrar_count as u32)).into()) } @@ -591,11 +325,14 @@ decl_module! { /// - One storage mutation (codec-read `O(X' + R)`, codec-write `O(X + R)`). /// - One event. /// # - #[weight = T::WeightInfo::set_identity( + #[pallet::weight( T::WeightInfo::set_identity( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X - )] - fn set_identity(origin, info: IdentityInfo) -> DispatchResultWithPostInfo { + ))] + pub fn set_identity( + origin: OriginFor, + info: Box>, + ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let extra_fields = info.additional.len() as u32; ensure!(extra_fields <= T::MaxAdditionalFields::get(), Error::::TooManyFields); @@ -605,10 +342,14 @@ decl_module! { Some(mut id) => { // Only keep non-positive judgements. id.judgements.retain(|j| j.1.is_sticky()); - id.info = info; + id.info = *info; id - } - None => Registration { info, judgements: Vec::new(), deposit: Zero::zero() }, + }, + None => Registration { + info: *info, + judgements: BoundedVec::default(), + deposit: Zero::zero(), + }, }; let old_deposit = id.deposit; @@ -617,17 +358,19 @@ decl_module! { T::Currency::reserve(&sender, id.deposit - old_deposit)?; } if old_deposit > id.deposit { - let _ = T::Currency::unreserve(&sender, old_deposit - id.deposit); + let err_amount = T::Currency::unreserve(&sender, old_deposit - id.deposit); + debug_assert!(err_amount.is_zero()); } let judgements = id.judgements.len(); >::insert(&sender, id); - Self::deposit_event(RawEvent::IdentitySet(sender)); + Self::deposit_event(Event::IdentitySet(sender)); Ok(Some(T::WeightInfo::set_identity( judgements as u32, // R - extra_fields // X - )).into()) + extra_fields, // X + )) + .into()) } /// Set the sub-accounts of the sender. @@ -657,34 +400,43 @@ decl_module! { // N storage items for N sub accounts. Right now the weight on this function // is a large overestimate due to the fact that it could potentially write // to 2 x T::MaxSubAccounts::get(). - #[weight = T::WeightInfo::set_subs_old(T::MaxSubAccounts::get()) // P: Assume max sub accounts removed. + #[pallet::weight(T::WeightInfo::set_subs_old(T::MaxSubAccounts::get()) // P: Assume max sub accounts removed. .saturating_add(T::WeightInfo::set_subs_new(subs.len() as u32)) // S: Assume all subs are new. - ] - fn set_subs(origin, subs: Vec<(T::AccountId, Data)>) -> DispatchResultWithPostInfo { + )] + pub fn set_subs( + origin: OriginFor, + subs: Vec<(T::AccountId, Data)>, + ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; ensure!(>::contains_key(&sender), Error::::NotFound); - ensure!(subs.len() <= T::MaxSubAccounts::get() as usize, Error::::TooManySubAccounts); + ensure!( + subs.len() <= T::MaxSubAccounts::get() as usize, + Error::::TooManySubAccounts + ); let (old_deposit, old_ids) = >::get(&sender); let new_deposit = T::SubAccountDeposit::get() * >::from(subs.len() as u32); - let not_other_sub = subs.iter().filter_map(|i| SuperOf::::get(&i.0)).all(|i| &i.0 == &sender); + let not_other_sub = + subs.iter().filter_map(|i| SuperOf::::get(&i.0)).all(|i| &i.0 == &sender); ensure!(not_other_sub, Error::::AlreadyClaimed); if old_deposit < new_deposit { T::Currency::reserve(&sender, new_deposit - old_deposit)?; } else if old_deposit > new_deposit { - let _ = T::Currency::unreserve(&sender, old_deposit - new_deposit); + let err_amount = T::Currency::unreserve(&sender, old_deposit - new_deposit); + debug_assert!(err_amount.is_zero()); } // do nothing if they're equal. for s in old_ids.iter() { >::remove(s); } - let ids = subs.into_iter().map(|(id, name)| { + let mut ids = BoundedVec::::default(); + for (id, name) in subs { >::insert(&id, (sender.clone(), name)); - id - }).collect::>(); + ids.try_push(id).expect("subs length is less than T::MaxSubAccounts; qed"); + } let new_subs = ids.len(); if ids.is_empty() { @@ -695,8 +447,10 @@ decl_module! { Ok(Some( T::WeightInfo::set_subs_old(old_ids.len() as u32) // P: Real number of old accounts removed. - .saturating_add(T::WeightInfo::set_subs_new(new_subs as u32)) // S: New subs added. - ).into()) + // S: New subs added + .saturating_add(T::WeightInfo::set_subs_new(new_subs as u32)), + ) + .into()) } /// Clear an account's identity info and all sub-accounts and return all deposits. @@ -717,12 +471,12 @@ decl_module! { /// - `2` storage reads and `S + 2` storage deletions. /// - One event. /// # - #[weight = T::WeightInfo::clear_identity( + #[pallet::weight(T::WeightInfo::clear_identity( T::MaxRegistrars::get().into(), // R T::MaxSubAccounts::get().into(), // S T::MaxAdditionalFields::get().into(), // X - )] - fn clear_identity(origin) -> DispatchResultWithPostInfo { + ))] + pub fn clear_identity(origin: OriginFor) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let (subs_deposit, sub_ids) = >::take(&sender); @@ -732,15 +486,17 @@ decl_module! { >::remove(sub); } - let _ = T::Currency::unreserve(&sender, deposit.clone()); + let err_amount = T::Currency::unreserve(&sender, deposit.clone()); + debug_assert!(err_amount.is_zero()); - Self::deposit_event(RawEvent::IdentityCleared(sender, deposit)); + Self::deposit_event(Event::IdentityCleared(sender, deposit)); Ok(Some(T::WeightInfo::clear_identity( - id.judgements.len() as u32, // R - sub_ids.len() as u32, // S - id.info.additional.len() as u32 // X - )).into()) + id.judgements.len() as u32, // R + sub_ids.len() as u32, // S + id.info.additional.len() as u32, // X + )) + .into()) } /// Request a judgement from a registrar. @@ -766,29 +522,34 @@ decl_module! { /// - Storage: 1 read `O(R)`, 1 mutate `O(X + R)`. /// - One event. /// # - #[weight = T::WeightInfo::request_judgement( + #[pallet::weight(T::WeightInfo::request_judgement( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X - )] - fn request_judgement(origin, - #[compact] reg_index: RegistrarIndex, - #[compact] max_fee: BalanceOf, + ))] + pub fn request_judgement( + origin: OriginFor, + #[pallet::compact] reg_index: RegistrarIndex, + #[pallet::compact] max_fee: BalanceOf, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let registrars = >::get(); - let registrar = registrars.get(reg_index as usize).and_then(Option::as_ref) + let registrar = registrars + .get(reg_index as usize) + .and_then(Option::as_ref) .ok_or(Error::::EmptyIndex)?; ensure!(max_fee >= registrar.fee, Error::::FeeChanged); let mut id = >::get(&sender).ok_or(Error::::NoIdentity)?; let item = (reg_index, Judgement::FeePaid(registrar.fee)); match id.judgements.binary_search_by_key(®_index, |x| x.0) { - Ok(i) => if id.judgements[i].1.is_sticky() { - Err(Error::::StickyJudgement)? - } else { - id.judgements[i] = item - }, - Err(i) => id.judgements.insert(i, item), + Ok(i) => + if id.judgements[i].1.is_sticky() { + Err(Error::::StickyJudgement)? + } else { + id.judgements[i] = item + }, + Err(i) => + id.judgements.try_insert(i, item).map_err(|_| Error::::TooManyRegistrars)?, } T::Currency::reserve(&sender, registrar.fee)?; @@ -797,12 +558,10 @@ decl_module! { let extra_fields = id.info.additional.len(); >::insert(&sender, id); - Self::deposit_event(RawEvent::JudgementRequested(sender, reg_index)); + Self::deposit_event(Event::JudgementRequested(sender, reg_index)); - Ok(Some(T::WeightInfo::request_judgement( - judgements as u32, - extra_fields as u32, - )).into()) + Ok(Some(T::WeightInfo::request_judgement(judgements as u32, extra_fields as u32)) + .into()) } /// Cancel a previous request. @@ -822,15 +581,20 @@ decl_module! { /// - One storage mutation `O(R + X)`. /// - One event /// # - #[weight = T::WeightInfo::cancel_request( + #[pallet::weight(T::WeightInfo::cancel_request( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X - )] - fn cancel_request(origin, reg_index: RegistrarIndex) -> DispatchResultWithPostInfo { + ))] + pub fn cancel_request( + origin: OriginFor, + reg_index: RegistrarIndex, + ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let mut id = >::get(&sender).ok_or(Error::::NoIdentity)?; - let pos = id.judgements.binary_search_by_key(®_index, |x| x.0) + let pos = id + .judgements + .binary_search_by_key(®_index, |x| x.0) .map_err(|_| Error::::NotFound)?; let fee = if let Judgement::FeePaid(fee) = id.judgements.remove(pos).1 { fee @@ -838,17 +602,15 @@ decl_module! { Err(Error::::JudgementGiven)? }; - let _ = T::Currency::unreserve(&sender, fee); + let err_amount = T::Currency::unreserve(&sender, fee); + debug_assert!(err_amount.is_zero()); let judgements = id.judgements.len(); let extra_fields = id.info.additional.len(); >::insert(&sender, id); - Self::deposit_event(RawEvent::JudgementUnrequested(sender, reg_index)); + Self::deposit_event(Event::JudgementUnrequested(sender, reg_index)); - Ok(Some(T::WeightInfo::cancel_request( - judgements as u32, - extra_fields as u32 - )).into()) + Ok(Some(T::WeightInfo::cancel_request(judgements as u32, extra_fields as u32)).into()) } /// Set the fee required for a judgement to be requested from a registrar. @@ -864,17 +626,25 @@ decl_module! { /// - One storage mutation `O(R)`. /// - Benchmark: 7.315 + R * 0.329 µs (min squares analysis) /// # - #[weight = T::WeightInfo::set_fee(T::MaxRegistrars::get())] // R - fn set_fee(origin, - #[compact] index: RegistrarIndex, - #[compact] fee: BalanceOf, + #[pallet::weight(T::WeightInfo::set_fee(T::MaxRegistrars::get()))] // R + pub fn set_fee( + origin: OriginFor, + #[pallet::compact] index: RegistrarIndex, + #[pallet::compact] fee: BalanceOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let registrars = >::mutate(|rs| -> Result { rs.get_mut(index as usize) .and_then(|x| x.as_mut()) - .and_then(|r| if r.account == who { r.fee = fee; Some(()) } else { None }) + .and_then(|r| { + if r.account == who { + r.fee = fee; + Some(()) + } else { + None + } + }) .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; @@ -894,9 +664,10 @@ decl_module! { /// - One storage mutation `O(R)`. /// - Benchmark: 8.823 + R * 0.32 µs (min squares analysis) /// # - #[weight = T::WeightInfo::set_account_id(T::MaxRegistrars::get())] // R - fn set_account_id(origin, - #[compact] index: RegistrarIndex, + #[pallet::weight(T::WeightInfo::set_account_id(T::MaxRegistrars::get()))] // R + pub fn set_account_id( + origin: OriginFor, + #[pallet::compact] index: RegistrarIndex, new: T::AccountId, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -904,7 +675,14 @@ decl_module! { let registrars = >::mutate(|rs| -> Result { rs.get_mut(index as usize) .and_then(|x| x.as_mut()) - .and_then(|r| if r.account == who { r.account = new; Some(()) } else { None }) + .and_then(|r| { + if r.account == who { + r.account = new; + Some(()) + } else { + None + } + }) .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; @@ -924,9 +702,10 @@ decl_module! { /// - One storage mutation `O(R)`. /// - Benchmark: 7.464 + R * 0.325 µs (min squares analysis) /// # - #[weight = T::WeightInfo::set_fields(T::MaxRegistrars::get())] // R - fn set_fields(origin, - #[compact] index: RegistrarIndex, + #[pallet::weight(T::WeightInfo::set_fields(T::MaxRegistrars::get()))] // R + pub fn set_fields( + origin: OriginFor, + #[pallet::compact] index: RegistrarIndex, fields: IdentityFields, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -934,13 +713,21 @@ decl_module! { let registrars = >::mutate(|rs| -> Result { rs.get_mut(index as usize) .and_then(|x| x.as_mut()) - .and_then(|r| if r.account == who { r.fields = fields; Some(()) } else { None }) + .and_then(|r| { + if r.account == who { + r.fields = fields; + Some(()) + } else { + None + } + }) .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; Ok(Some(T::WeightInfo::set_fields( - registrars as u32 // R - )).into()) + registrars as u32, // R + )) + .into()) } /// Provide a judgement for an account's identity. @@ -962,12 +749,13 @@ decl_module! { /// - Storage: 1 read `O(R)`, 1 mutate `O(R + X)`. /// - One event. /// # - #[weight = T::WeightInfo::provide_judgement( + #[pallet::weight(T::WeightInfo::provide_judgement( T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X - )] - fn provide_judgement(origin, - #[compact] reg_index: RegistrarIndex, + ))] + pub fn provide_judgement( + origin: OriginFor, + #[pallet::compact] reg_index: RegistrarIndex, target: ::Source, judgement: Judgement>, ) -> DispatchResultWithPostInfo { @@ -985,22 +773,28 @@ decl_module! { match id.judgements.binary_search_by_key(®_index, |x| x.0) { Ok(position) => { if let Judgement::FeePaid(fee) = id.judgements[position].1 { - let _ = T::Currency::repatriate_reserved(&target, &sender, fee, BalanceStatus::Free); + let _ = T::Currency::repatriate_reserved( + &target, + &sender, + fee, + BalanceStatus::Free, + ); } id.judgements[position] = item - } - Err(position) => id.judgements.insert(position, item), + }, + Err(position) => id + .judgements + .try_insert(position, item) + .map_err(|_| Error::::TooManyRegistrars)?, } let judgements = id.judgements.len(); let extra_fields = id.info.additional.len(); >::insert(&target, id); - Self::deposit_event(RawEvent::JudgementGiven(target, reg_index)); + Self::deposit_event(Event::JudgementGiven(target, reg_index)); - Ok(Some(T::WeightInfo::provide_judgement( - judgements as u32, - extra_fields as u32, - )).into()) + Ok(Some(T::WeightInfo::provide_judgement(judgements as u32, extra_fields as u32)) + .into()) } /// Remove an account's identity and sub-account information and slash the deposits. @@ -1022,12 +816,15 @@ decl_module! { /// - `S + 2` storage mutations. /// - One event. /// # - #[weight = T::WeightInfo::kill_identity( + #[pallet::weight(T::WeightInfo::kill_identity( T::MaxRegistrars::get().into(), // R T::MaxSubAccounts::get().into(), // S T::MaxAdditionalFields::get().into(), // X - )] - fn kill_identity(origin, target: ::Source) -> DispatchResultWithPostInfo { + ))] + pub fn kill_identity( + origin: OriginFor, + target: ::Source, + ) -> DispatchResultWithPostInfo { T::ForceOrigin::ensure_origin(origin)?; // Figure out who we're meant to be clearing. @@ -1042,13 +839,14 @@ decl_module! { // Slash their deposit from them. T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit).0); - Self::deposit_event(RawEvent::IdentityKilled(target, deposit)); + Self::deposit_event(Event::IdentityKilled(target, deposit)); Ok(Some(T::WeightInfo::kill_identity( - id.judgements.len() as u32, // R - sub_ids.len() as u32, // S - id.info.additional.len() as u32 // X - )).into()) + id.judgements.len() as u32, // R + sub_ids.len() as u32, // S + id.info.additional.len() as u32, // X + )) + .into()) } /// Add the given account to the sender's subs. @@ -1058,8 +856,12 @@ decl_module! { /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. - #[weight = T::WeightInfo::add_sub(T::MaxSubAccounts::get())] - fn add_sub(origin, sub: ::Source, data: Data) -> DispatchResult { + #[pallet::weight(T::WeightInfo::add_sub(T::MaxSubAccounts::get()))] + pub fn add_sub( + origin: OriginFor, + sub: ::Source, + data: Data, + ) -> DispatchResult { let sender = ensure_signed(origin)?; let sub = T::Lookup::lookup(sub)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); @@ -1069,15 +871,18 @@ decl_module! { SubsOf::::try_mutate(&sender, |(ref mut subs_deposit, ref mut sub_ids)| { // Ensure there is space and that the deposit is paid. - ensure!(sub_ids.len() < T::MaxSubAccounts::get() as usize, Error::::TooManySubAccounts); + ensure!( + sub_ids.len() < T::MaxSubAccounts::get() as usize, + Error::::TooManySubAccounts + ); let deposit = T::SubAccountDeposit::get(); T::Currency::reserve(&sender, deposit)?; SuperOf::::insert(&sub, (sender.clone(), data)); - sub_ids.push(sub.clone()); + sub_ids.try_push(sub.clone()).expect("sub ids length checked above; qed"); *subs_deposit = subs_deposit.saturating_add(deposit); - Self::deposit_event(RawEvent::SubIdentityAdded(sub, sender.clone(), deposit)); + Self::deposit_event(Event::SubIdentityAdded(sub, sender.clone(), deposit)); Ok(()) }) } @@ -1086,13 +891,18 @@ decl_module! { /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. - #[weight = T::WeightInfo::rename_sub(T::MaxSubAccounts::get())] - fn rename_sub(origin, sub: ::Source, data: Data) { + #[pallet::weight(T::WeightInfo::rename_sub(T::MaxSubAccounts::get()))] + pub fn rename_sub( + origin: OriginFor, + sub: ::Source, + data: Data, + ) -> DispatchResult { let sender = ensure_signed(origin)?; let sub = T::Lookup::lookup(sub)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); ensure!(SuperOf::::get(&sub).map_or(false, |x| x.0 == sender), Error::::NotOwned); SuperOf::::insert(&sub, (sender, data)); + Ok(()) } /// Remove the given account from the sender's subs. @@ -1102,8 +912,11 @@ decl_module! { /// /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. - #[weight = T::WeightInfo::remove_sub(T::MaxSubAccounts::get())] - fn remove_sub(origin, sub: ::Source) { + #[pallet::weight(T::WeightInfo::remove_sub(T::MaxSubAccounts::get()))] + pub fn remove_sub( + origin: OriginFor, + sub: ::Source, + ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); let sub = T::Lookup::lookup(sub)?; @@ -1114,9 +927,11 @@ decl_module! { sub_ids.retain(|x| x != &sub); let deposit = T::SubAccountDeposit::get().min(*subs_deposit); *subs_deposit -= deposit; - let _ = T::Currency::unreserve(&sender, deposit); - Self::deposit_event(RawEvent::SubIdentityRemoved(sub, sender, deposit)); + let err_amount = T::Currency::unreserve(&sender, deposit); + debug_assert!(err_amount.is_zero()); + Self::deposit_event(Event::SubIdentityRemoved(sub, sender, deposit)); }); + Ok(()) } /// Remove the sender as a sub-account. @@ -1129,25 +944,28 @@ decl_module! { /// /// NOTE: This should not normally be used, but is provided in the case that the non- /// controller of an account is maliciously registered as a sub-account. - #[weight = T::WeightInfo::quit_sub(T::MaxSubAccounts::get())] - fn quit_sub(origin) { + #[pallet::weight(T::WeightInfo::quit_sub(T::MaxSubAccounts::get()))] + pub fn quit_sub(origin: OriginFor) -> DispatchResult { let sender = ensure_signed(origin)?; let (sup, _) = SuperOf::::take(&sender).ok_or(Error::::NotSub)?; SubsOf::::mutate(&sup, |(ref mut subs_deposit, ref mut sub_ids)| { sub_ids.retain(|x| x != &sender); let deposit = T::SubAccountDeposit::get().min(*subs_deposit); *subs_deposit -= deposit; - let _ = T::Currency::repatriate_reserved(&sup, &sender, deposit, BalanceStatus::Free); - Self::deposit_event(RawEvent::SubIdentityRevoked(sender, sup.clone(), deposit)); + let _ = + T::Currency::repatriate_reserved(&sup, &sender, deposit, BalanceStatus::Free); + Self::deposit_event(Event::SubIdentityRevoked(sender, sup.clone(), deposit)); }); + Ok(()) } } } -impl Module { +impl Pallet { /// Get the subs of an account. pub fn subs(who: &T::AccountId) -> Vec<(T::AccountId, Data)> { - SubsOf::::get(who).1 + SubsOf::::get(who) + .1 .into_iter() .filter_map(|a| SuperOf::::get(&a).map(|x| (a, x.1))) .collect() diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 0637ac6aafc5f..c842b0e2f64be 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,67 +18,74 @@ // Tests for Identity Pallet use super::*; +use crate as pallet_identity; -use sp_runtime::traits::BadOrigin; -use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types, -}; +use codec::{Decode, Encode}; +use frame_support::{assert_noop, assert_ok, ord_parameter_types, parameter_types, BoundedVec}; +use frame_system::{EnsureOneOf, EnsureRoot, EnsureSignedBy}; use sp_core::H256; -use frame_system::{EnsureSignedBy, EnsureOneOf, EnsureRoot}; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Identity: pallet_identity::{Pallet, Call, Storage, Event}, + } +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type Balance = u64; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } parameter_types! { @@ -93,18 +100,10 @@ ord_parameter_types! { pub const One: u64 = 1; pub const Two: u64 = 2; } -type EnsureOneOrRoot = EnsureOneOf< - u64, - EnsureRoot, - EnsureSignedBy ->; -type EnsureTwoOrRoot = EnsureOneOf< - u64, - EnsureRoot, - EnsureSignedBy ->; -impl Trait for Test { - type Event = (); +type EnsureOneOrRoot = EnsureOneOf, EnsureSignedBy>; +type EnsureTwoOrRoot = EnsureOneOf, EnsureSignedBy>; +impl pallet_identity::Config for Test { + type Event = Event; type Currency = Balances; type Slashed = (); type BasicDeposit = BasicDeposit; @@ -117,49 +116,41 @@ impl Trait for Test { type ForceOrigin = EnsureTwoOrRoot; type WeightInfo = (); } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Identity = Module; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10), - (2, 10), - (3, 10), - (10, 100), - (20, 100), - (30, 100), - ], - }.assimilate_storage(&mut t).unwrap(); + balances: vec![(1, 10), (2, 10), (3, 10), (10, 100), (20, 100), (30, 100)], + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } -fn ten() -> IdentityInfo { +fn ten() -> IdentityInfo { IdentityInfo { - display: Data::Raw(b"ten".to_vec()), - legal: Data::Raw(b"The Right Ordinal Ten, Esq.".to_vec()), - .. Default::default() + display: Data::Raw(b"ten".to_vec().try_into().unwrap()), + legal: Data::Raw(b"The Right Ordinal Ten, Esq.".to_vec().try_into().unwrap()), + ..Default::default() } } -fn twenty() -> IdentityInfo { +fn twenty() -> IdentityInfo { IdentityInfo { - display: Data::Raw(b"twenty".to_vec()), - legal: Data::Raw(b"The Right Ordinal Twenty, Esq.".to_vec()), - .. Default::default() + display: Data::Raw(b"twenty".to_vec().try_into().unwrap()), + legal: Data::Raw(b"The Right Ordinal Twenty, Esq.".to_vec().try_into().unwrap()), + ..Default::default() } } #[test] fn editing_subaccounts_should_work() { new_test_ext().execute_with(|| { - let data = |x| Data::Raw(vec![x; 1]); + let data = |x| Data::Raw(vec![x; 1].try_into().unwrap()); assert_noop!(Identity::add_sub(Origin::signed(10), 20, data(1)), Error::::NoIdentity); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); // first sub account assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); @@ -173,7 +164,10 @@ fn editing_subaccounts_should_work() { assert_eq!(Balances::free_balance(10), 70); // third sub account is too many - assert_noop!(Identity::add_sub(Origin::signed(10), 3, data(3)), Error::::TooManySubAccounts); + assert_noop!( + Identity::add_sub(Origin::signed(10), 3, data(3)), + Error::::TooManySubAccounts + ); // rename first sub account assert_ok!(Identity::rename_sub(Origin::signed(10), 1, data(11))); @@ -199,10 +193,10 @@ fn editing_subaccounts_should_work() { #[test] fn resolving_subaccount_ownership_works() { new_test_ext().execute_with(|| { - let data = |x| Data::Raw(vec![x; 1]); + let data = |x| Data::Raw(vec![x; 1].try_into().unwrap()); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_identity(Origin::signed(20), twenty())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(Origin::signed(20), Box::new(twenty()))); // 10 claims 1 as a subaccount assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); @@ -210,7 +204,10 @@ fn resolving_subaccount_ownership_works() { assert_eq!(Balances::free_balance(10), 80); assert_eq!(Balances::reserved_balance(10), 20); // 20 cannot claim 1 now - assert_noop!(Identity::add_sub(Origin::signed(20), 1, data(1)), Error::::AlreadyClaimed); + assert_noop!( + Identity::add_sub(Origin::signed(20), 1, data(1)), + Error::::AlreadyClaimed + ); // 1 wants to be with 20 so it quits from 10 assert_ok!(Identity::quit_sub(Origin::signed(1))); // 1 gets the 10 that 10 paid. @@ -224,11 +221,11 @@ fn resolving_subaccount_ownership_works() { #[test] fn trailing_zeros_decodes_into_default_data() { - let encoded = Data::Raw(b"Hello".to_vec()).encode(); + let encoded = Data::Raw(b"Hello".to_vec().try_into().unwrap()).encode(); assert!(<(Data, Data)>::decode(&mut &encoded[..]).is_err()); let input = &mut &encoded[..]; let (a, b) = <(Data, Data)>::decode(&mut AppendZerosInput::new(input)).unwrap(); - assert_eq!(a, Data::Raw(b"Hello".to_vec())); + assert_eq!(a, Data::Raw(b"Hello".to_vec().try_into().unwrap())); assert_eq!(b, Data::None); } @@ -239,9 +236,10 @@ fn adding_registrar_should_work() { assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); let fields = IdentityFields(IdentityField::Display | IdentityField::Legal); assert_ok!(Identity::set_fields(Origin::signed(3), 0, fields)); - assert_eq!(Identity::registrars(), vec![ - Some(RegistrarInfo { account: 3, fee: 10, fields }) - ]); + assert_eq!( + Identity::registrars(), + vec![Some(RegistrarInfo { account: 3, fee: 10, fields })] + ); }); } @@ -265,14 +263,10 @@ fn registration_should_work() { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); let mut three_fields = ten(); - three_fields.additional.push(Default::default()); - three_fields.additional.push(Default::default()); - three_fields.additional.push(Default::default()); - assert_noop!( - Identity::set_identity(Origin::signed(10), three_fields), - Error::::TooManyFields - ); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + three_fields.additional.try_push(Default::default()).unwrap(); + three_fields.additional.try_push(Default::default()).unwrap(); + assert_eq!(three_fields.additional.try_push(Default::default()), Err(())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_eq!(Identity::identity(10).unwrap().info, ten()); assert_eq!(Balances::free_balance(10), 90); assert_ok!(Identity::clear_identity(Origin::signed(10))); @@ -295,7 +289,7 @@ fn uninvited_judgement_should_work() { Error::::InvalidTarget ); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_noop!( Identity::provide_judgement(Origin::signed(10), 0, 10, Judgement::Reasonable), Error::::InvalidIndex @@ -314,7 +308,7 @@ fn uninvited_judgement_should_work() { fn clearing_judgement_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); assert_ok!(Identity::clear_identity(Origin::signed(10))); assert_eq!(Identity::identity(10), None); @@ -324,7 +318,7 @@ fn clearing_judgement_should_work() { #[test] fn killing_slashing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_noop!(Identity::kill_identity(Origin::signed(1), 10), BadOrigin); assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); assert_eq!(Identity::identity(10), None); @@ -336,49 +330,55 @@ fn killing_slashing_should_work() { #[test] fn setting_subaccounts_should_work() { new_test_ext().execute_with(|| { - let mut subs = vec![(20, Data::Raw(vec![40; 1]))]; + let mut subs = vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))]; assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::NotFound); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 80); - assert_eq!(Identity::subs_of(10), (10, vec![20])); - assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); + assert_eq!(Identity::subs_of(10), (10, vec![20].try_into().unwrap())); + assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1].try_into().unwrap())))); // push another item and re-set it. - subs.push((30, Data::Raw(vec![50; 1]))); + subs.push((30, Data::Raw(vec![50; 1].try_into().unwrap()))); assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 70); - assert_eq!(Identity::subs_of(10), (20, vec![20, 30])); - assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); - assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); + assert_eq!(Identity::subs_of(10), (20, vec![20, 30].try_into().unwrap())); + assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1].try_into().unwrap())))); + assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1].try_into().unwrap())))); // switch out one of the items and re-set. - subs[0] = (40, Data::Raw(vec![60; 1])); + subs[0] = (40, Data::Raw(vec![60; 1].try_into().unwrap())); assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 70); // no change in the balance - assert_eq!(Identity::subs_of(10), (20, vec![40, 30])); + assert_eq!(Identity::subs_of(10), (20, vec![40, 30].try_into().unwrap())); assert_eq!(Identity::super_of(20), None); - assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); - assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1])))); + assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1].try_into().unwrap())))); + assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1].try_into().unwrap())))); // clear assert_ok!(Identity::set_subs(Origin::signed(10), vec![])); assert_eq!(Balances::free_balance(10), 90); - assert_eq!(Identity::subs_of(10), (0, vec![])); + assert_eq!(Identity::subs_of(10), (0, BoundedVec::default())); assert_eq!(Identity::super_of(30), None); assert_eq!(Identity::super_of(40), None); - subs.push((20, Data::Raw(vec![40; 1]))); - assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::TooManySubAccounts); + subs.push((20, Data::Raw(vec![40; 1].try_into().unwrap()))); + assert_noop!( + Identity::set_subs(Origin::signed(10), subs.clone()), + Error::::TooManySubAccounts + ); }); } #[test] fn clearing_account_should_remove_subaccounts_and_refund() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1]))])); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_subs( + Origin::signed(10), + vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] + )); assert_ok!(Identity::clear_identity(Origin::signed(10))); assert_eq!(Balances::free_balance(10), 100); assert!(Identity::super_of(20).is_none()); @@ -388,8 +388,11 @@ fn clearing_account_should_remove_subaccounts_and_refund() { #[test] fn killing_account_should_remove_subaccounts_and_not_refund() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1]))])); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_subs( + Origin::signed(10), + vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] + )); assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); assert_eq!(Balances::free_balance(10), 80); assert!(Identity::super_of(20).is_none()); @@ -402,14 +405,17 @@ fn cancelling_requested_judgement_should_work() { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NoIdentity); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); assert_ok!(Identity::cancel_request(Origin::signed(10), 0)); assert_eq!(Balances::free_balance(10), 90); assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NotFound); assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::JudgementGiven); + assert_noop!( + Identity::cancel_request(Origin::signed(10), 0), + Error::::JudgementGiven + ); }); } @@ -418,20 +424,29 @@ fn requesting_judgement_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 9), Error::::FeeChanged); + assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_noop!( + Identity::request_judgement(Origin::signed(10), 0, 9), + Error::::FeeChanged + ); assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); // 10 for the judgement request, 10 for the identity. assert_eq!(Balances::free_balance(10), 80); // Re-requesting won't work as we already paid. - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); + assert_noop!( + Identity::request_judgement(Origin::signed(10), 0, 10), + Error::::StickyJudgement + ); assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Erroneous)); // Registrar got their payment now. assert_eq!(Balances::free_balance(3), 20); // Re-requesting still won't work as it's erroneous. - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); + assert_noop!( + Identity::request_judgement(Origin::signed(10), 0, 10), + Error::::StickyJudgement + ); // Requesting from a second registrar still works. assert_ok!(Identity::add_registrar(Origin::signed(1), 4)); @@ -448,12 +463,24 @@ fn field_deposit_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(Origin::signed(10), IdentityInfo { - additional: vec![ - (Data::Raw(b"number".to_vec()), Data::Raw(10u32.encode())), - (Data::Raw(b"text".to_vec()), Data::Raw(b"10".to_vec())), - ], .. Default::default() - })); + assert_ok!(Identity::set_identity( + Origin::signed(10), + Box::new(IdentityInfo { + additional: vec![ + ( + Data::Raw(b"number".to_vec().try_into().unwrap()), + Data::Raw(10u32.encode().try_into().unwrap()) + ), + ( + Data::Raw(b"text".to_vec().try_into().unwrap()), + Data::Raw(b"10".to_vec().try_into().unwrap()) + ), + ] + .try_into() + .unwrap(), + ..Default::default() + }) + )); assert_eq!(Balances::free_balance(10), 70); }); } @@ -463,7 +490,10 @@ fn setting_account_id_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); // account 4 cannot change the first registrar's identity since it's owned by 3. - assert_noop!(Identity::set_account_id(Origin::signed(4), 0, 3), Error::::InvalidIndex); + assert_noop!( + Identity::set_account_id(Origin::signed(4), 0, 3), + Error::::InvalidIndex + ); // account 3 can, because that's the registrar's current account. assert_ok!(Identity::set_account_id(Origin::signed(3), 0, 4)); // account 4 can now, because that's their new ID. diff --git a/frame/identity/src/types.rs b/frame/identity/src/types.rs new file mode 100644 index 0000000000000..ed6aeb18e96a1 --- /dev/null +++ b/frame/identity/src/types.rs @@ -0,0 +1,473 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use codec::{Decode, Encode, MaxEncodedLen}; +use enumflags2::BitFlags; +use frame_support::{ + traits::{ConstU32, Get}, + BoundedVec, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, +}; +use scale_info::{ + build::{Fields, Variants}, + meta_type, Path, Type, TypeInfo, TypeParameter, +}; +use sp_runtime::{traits::Zero, RuntimeDebug}; +use sp_std::{fmt::Debug, iter::once, ops::Add, prelude::*}; + +/// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater +/// than 32-bytes then it will be truncated when encoding. +/// +/// Can also be `None`. +#[derive(Clone, Eq, PartialEq, RuntimeDebug, MaxEncodedLen)] +pub enum Data { + /// No data here. + None, + /// The data is stored directly. + Raw(BoundedVec>), + /// Only the Blake2 hash of the data is stored. The preimage of the hash may be retrieved + /// through some hash-lookup service. + BlakeTwo256([u8; 32]), + /// Only the SHA2-256 hash of the data is stored. The preimage of the hash may be retrieved + /// through some hash-lookup service. + Sha256([u8; 32]), + /// Only the Keccak-256 hash of the data is stored. The preimage of the hash may be retrieved + /// through some hash-lookup service. + Keccak256([u8; 32]), + /// Only the SHA3-256 hash of the data is stored. The preimage of the hash may be retrieved + /// through some hash-lookup service. + ShaThree256([u8; 32]), +} + +impl Decode for Data { + fn decode(input: &mut I) -> sp_std::result::Result { + let b = input.read_byte()?; + Ok(match b { + 0 => Data::None, + n @ 1..=33 => { + let mut r: BoundedVec<_, _> = vec![0u8; n as usize - 1] + .try_into() + .expect("bound checked in match arm condition; qed"); + input.read(&mut r[..])?; + Data::Raw(r) + }, + 34 => Data::BlakeTwo256(<[u8; 32]>::decode(input)?), + 35 => Data::Sha256(<[u8; 32]>::decode(input)?), + 36 => Data::Keccak256(<[u8; 32]>::decode(input)?), + 37 => Data::ShaThree256(<[u8; 32]>::decode(input)?), + _ => return Err(codec::Error::from("invalid leading byte")), + }) + } +} + +impl Encode for Data { + fn encode(&self) -> Vec { + match self { + Data::None => vec![0u8; 1], + Data::Raw(ref x) => { + let l = x.len().min(32); + let mut r = vec![l as u8 + 1; l + 1]; + r[1..].copy_from_slice(&x[..l as usize]); + r + }, + Data::BlakeTwo256(ref h) => once(34u8).chain(h.iter().cloned()).collect(), + Data::Sha256(ref h) => once(35u8).chain(h.iter().cloned()).collect(), + Data::Keccak256(ref h) => once(36u8).chain(h.iter().cloned()).collect(), + Data::ShaThree256(ref h) => once(37u8).chain(h.iter().cloned()).collect(), + } + } +} +impl codec::EncodeLike for Data {} + +/// Add a Raw variant with the given index and a fixed sized byte array +macro_rules! data_raw_variants { + ($variants:ident, $(($index:literal, $size:literal)),* ) => { + $variants + $( + .variant(concat!("Raw", stringify!($size)), |v| v + .index($index) + .fields(Fields::unnamed().field(|f| f.ty::<[u8; $size]>())) + ) + )* + } +} + +impl TypeInfo for Data { + type Identity = Self; + + fn type_info() -> Type { + let variants = Variants::new().variant("None", |v| v.index(0)); + + // create a variant for all sizes of Raw data from 0-32 + let variants = data_raw_variants!( + variants, + (1, 0), + (2, 1), + (3, 2), + (4, 3), + (5, 4), + (6, 5), + (7, 6), + (8, 7), + (9, 8), + (10, 9), + (11, 10), + (12, 11), + (13, 12), + (14, 13), + (15, 14), + (16, 15), + (17, 16), + (18, 17), + (19, 18), + (20, 19), + (21, 20), + (22, 21), + (23, 22), + (24, 23), + (25, 24), + (26, 25), + (27, 26), + (28, 27), + (29, 28), + (30, 29), + (31, 30), + (32, 31), + (33, 32) + ); + + let variants = variants + .variant("BlakeTwo256", |v| { + v.index(34).fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>())) + }) + .variant("Sha256", |v| { + v.index(35).fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>())) + }) + .variant("Keccak256", |v| { + v.index(36).fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>())) + }) + .variant("ShaThree256", |v| { + v.index(37).fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>())) + }); + + Type::builder().path(Path::new("Data", module_path!())).variant(variants) + } +} + +impl Default for Data { + fn default() -> Self { + Self::None + } +} + +/// An identifier for a single name registrar/identity verification service. +pub type RegistrarIndex = u32; + +/// An attestation of a registrar over how accurate some `IdentityInfo` is in describing an account. +/// +/// NOTE: Registrars may pay little attention to some fields. Registrars may want to make clear +/// which fields their attestation is relevant for by off-chain means. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub enum Judgement +{ + /// The default value; no opinion is held. + Unknown, + /// No judgement is yet in place, but a deposit is reserved as payment for providing one. + FeePaid(Balance), + /// The data appears to be reasonably acceptable in terms of its accuracy, however no in depth + /// checks (such as in-person meetings or formal KYC) have been conducted. + Reasonable, + /// The target is known directly by the registrar and the registrar can fully attest to the + /// the data's accuracy. + KnownGood, + /// The data was once good but is currently out of date. There is no malicious intent in the + /// inaccuracy. This judgement can be removed through updating the data. + OutOfDate, + /// The data is imprecise or of sufficiently low-quality to be problematic. It is not + /// indicative of malicious intent. This judgement can be removed through updating the data. + LowQuality, + /// The data is erroneous. This may be indicative of malicious intent. This cannot be removed + /// except by the registrar. + Erroneous, +} + +impl + Judgement +{ + /// Returns `true` if this judgement is indicative of a deposit being currently held. This means + /// it should not be cleared or replaced except by an operation which utilizes the deposit. + pub(crate) fn has_deposit(&self) -> bool { + match self { + Judgement::FeePaid(_) => true, + _ => false, + } + } + + /// Returns `true` if this judgement is one that should not be generally be replaced outside + /// of specialized handlers. Examples include "malicious" judgements and deposit-holding + /// judgements. + pub(crate) fn is_sticky(&self) -> bool { + match self { + Judgement::FeePaid(_) | Judgement::Erroneous => true, + _ => false, + } + } +} + +/// The fields that we use to identify the owner of an account with. Each corresponds to a field +/// in the `IdentityInfo` struct. +#[repr(u64)] +#[derive(Clone, Copy, PartialEq, Eq, BitFlags, RuntimeDebug, TypeInfo)] +pub enum IdentityField { + Display = 0b0000000000000000000000000000000000000000000000000000000000000001, + Legal = 0b0000000000000000000000000000000000000000000000000000000000000010, + Web = 0b0000000000000000000000000000000000000000000000000000000000000100, + Riot = 0b0000000000000000000000000000000000000000000000000000000000001000, + Email = 0b0000000000000000000000000000000000000000000000000000000000010000, + PgpFingerprint = 0b0000000000000000000000000000000000000000000000000000000000100000, + Image = 0b0000000000000000000000000000000000000000000000000000000001000000, + Twitter = 0b0000000000000000000000000000000000000000000000000000000010000000, +} + +/// Wrapper type for `BitFlags` that implements `Codec`. +#[derive(Clone, Copy, PartialEq, Default, RuntimeDebug)] +pub struct IdentityFields(pub(crate) BitFlags); + +impl MaxEncodedLen for IdentityFields { + fn max_encoded_len() -> usize { + u64::max_encoded_len() + } +} + +impl Eq for IdentityFields {} +impl Encode for IdentityFields { + fn using_encoded R>(&self, f: F) -> R { + self.0.bits().using_encoded(f) + } +} +impl Decode for IdentityFields { + fn decode(input: &mut I) -> sp_std::result::Result { + let field = u64::decode(input)?; + Ok(Self(>::from_bits(field as u64).map_err(|_| "invalid value")?)) + } +} +impl TypeInfo for IdentityFields { + type Identity = Self; + + fn type_info() -> Type { + Type::builder() + .path(Path::new("BitFlags", module_path!())) + .type_params(vec![TypeParameter::new("T", Some(meta_type::()))]) + .composite(Fields::unnamed().field(|f| f.ty::().type_name("IdentityField"))) + } +} + +/// Information concerning the identity of the controller of an account. +/// +/// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra +/// fields in a backwards compatible way through a specialized `Decode` impl. +#[derive( + CloneNoBound, Encode, Decode, Eq, MaxEncodedLen, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, +)] +#[codec(mel_bound(FieldLimit: Get))] +#[cfg_attr(test, derive(frame_support::DefaultNoBound))] +#[scale_info(skip_type_params(FieldLimit))] +pub struct IdentityInfo> { + /// Additional fields of the identity that are not catered for with the struct's explicit + /// fields. + pub additional: BoundedVec<(Data, Data), FieldLimit>, + + /// A reasonable display name for the controller of the account. This should be whatever it is + /// that it is typically known as and should not be confusable with other entities, given + /// reasonable context. + /// + /// Stored as UTF-8. + pub display: Data, + + /// The full legal name in the local jurisdiction of the entity. This might be a bit + /// long-winded. + /// + /// Stored as UTF-8. + pub legal: Data, + + /// A representative website held by the controller of the account. + /// + /// NOTE: `https://` is automatically prepended. + /// + /// Stored as UTF-8. + pub web: Data, + + /// The Riot/Matrix handle held by the controller of the account. + /// + /// Stored as UTF-8. + pub riot: Data, + + /// The email address of the controller of the account. + /// + /// Stored as UTF-8. + pub email: Data, + + /// The PGP/GPG public key of the controller of the account. + pub pgp_fingerprint: Option<[u8; 20]>, + + /// A graphic image representing the controller of the account. Should be a company, + /// organization or project logo or a headshot in the case of a human. + pub image: Data, + + /// The Twitter identity. The leading `@` character may be elided. + pub twitter: Data, +} + +/// Information concerning the identity of the controller of an account. +/// +/// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a +/// backwards compatible way through a specialized `Decode` impl. +#[derive( + CloneNoBound, Encode, Eq, MaxEncodedLen, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo, +)] +#[codec(mel_bound( + Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq + Zero + Add, + MaxJudgements: Get, + MaxAdditionalFields: Get, +))] +#[scale_info(skip_type_params(MaxJudgements, MaxAdditionalFields))] +pub struct Registration< + Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq, + MaxJudgements: Get, + MaxAdditionalFields: Get, +> { + /// Judgements from the registrars on this identity. Stored ordered by `RegistrarIndex`. There + /// may be only a single judgement from each registrar. + pub judgements: BoundedVec<(RegistrarIndex, Judgement), MaxJudgements>, + + /// Amount held on deposit for this information. + pub deposit: Balance, + + /// Information on the identity. + pub info: IdentityInfo, +} + +impl< + Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq + Zero + Add, + MaxJudgements: Get, + MaxAdditionalFields: Get, + > Registration +{ + pub(crate) fn total_deposit(&self) -> Balance { + self.deposit + + self.judgements + .iter() + .map(|(_, ref j)| if let Judgement::FeePaid(fee) = j { *fee } else { Zero::zero() }) + .fold(Zero::zero(), |a, i| a + i) + } +} + +impl< + Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq, + MaxJudgements: Get, + MaxAdditionalFields: Get, + > Decode for Registration +{ + fn decode(input: &mut I) -> sp_std::result::Result { + let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?; + Ok(Self { judgements, deposit, info }) + } +} + +/// Information concerning a registrar. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct RegistrarInfo< + Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, + AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq, +> { + /// The account of the registrar. + pub account: AccountId, + + /// Amount required to be given to the registrar for them to provide judgement. + pub fee: Balance, + + /// Relevant fields for this registrar. Registrar judgements are limited to attestations on + /// these fields. + pub fields: IdentityFields, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn manual_data_type_info() { + let mut registry = scale_info::Registry::new(); + let type_id = registry.register_type(&scale_info::meta_type::()); + let registry: scale_info::PortableRegistry = registry.into(); + let type_info = registry.resolve(type_id.id()).unwrap(); + + let check_type_info = |data: &Data| { + let variant_name = match data { + Data::None => "None".to_string(), + Data::BlakeTwo256(_) => "BlakeTwo256".to_string(), + Data::Sha256(_) => "Sha256".to_string(), + Data::Keccak256(_) => "Keccak256".to_string(), + Data::ShaThree256(_) => "ShaThree256".to_string(), + Data::Raw(bytes) => format!("Raw{}", bytes.len()), + }; + if let scale_info::TypeDef::Variant(variant) = type_info.type_def() { + let variant = variant + .variants() + .iter() + .find(|v| v.name() == &variant_name) + .expect(&format!("Expected to find variant {}", variant_name)); + + let field_arr_len = variant + .fields() + .first() + .and_then(|f| registry.resolve(f.ty().id())) + .map(|ty| { + if let scale_info::TypeDef::Array(arr) = ty.type_def() { + arr.len() + } else { + panic!("Should be an array type") + } + }) + .unwrap_or(0); + + let encoded = data.encode(); + assert_eq!(encoded[0], variant.index()); + assert_eq!(encoded.len() as u32 - 1, field_arr_len); + } else { + panic!("Should be a variant type") + }; + }; + + let mut data = vec![ + Data::None, + Data::BlakeTwo256(Default::default()), + Data::Sha256(Default::default()), + Data::Keccak256(Default::default()), + Data::ShaThree256(Default::default()), + ]; + + // A Raw instance for all possible sizes of the Raw data + for n in 0..32 { + data.push(Data::Raw(vec![0u8; n as usize].try_into().unwrap())) + } + + for d in data.iter() { + check_type_info(d); + } + } +} diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs new file mode 100644 index 0000000000000..611909f326eab --- /dev/null +++ b/frame/identity/src/weights.rs @@ -0,0 +1,401 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_identity +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_identity +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/identity/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_identity. +pub trait WeightInfo { + fn add_registrar(r: u32, ) -> Weight; + fn set_identity(r: u32, x: u32, ) -> Weight; + fn set_subs_new(s: u32, ) -> Weight; + fn set_subs_old(p: u32, ) -> Weight; + fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight; + fn request_judgement(r: u32, x: u32, ) -> Weight; + fn cancel_request(r: u32, x: u32, ) -> Weight; + fn set_fee(r: u32, ) -> Weight; + fn set_account_id(r: u32, ) -> Weight; + fn set_fields(r: u32, ) -> Weight; + fn provide_judgement(r: u32, x: u32, ) -> Weight; + fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight; + fn add_sub(s: u32, ) -> Weight; + fn rename_sub(s: u32, ) -> Weight; + fn remove_sub(s: u32, ) -> Weight; + fn quit_sub(s: u32, ) -> Weight; +} + +/// Weights for pallet_identity using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Identity Registrars (r:1 w:1) + fn add_registrar(r: u32, ) -> Weight { + (22_152_000 as Weight) + // Standard Error: 6_000 + .saturating_add((339_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:1) + fn set_identity(r: u32, x: u32, ) -> Weight { + (53_017_000 as Weight) + // Standard Error: 14_000 + .saturating_add((279_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((1_081_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SubsOf (r:1 w:1) + // Storage: Identity SuperOf (r:1 w:1) + fn set_subs_new(s: u32, ) -> Weight { + (44_693_000 as Weight) + // Standard Error: 1_000 + .saturating_add((6_631_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SubsOf (r:1 w:1) + // Storage: Identity SuperOf (r:0 w:1) + fn set_subs_old(p: u32, ) -> Weight { + (42_017_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_193_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + // Storage: Identity SubsOf (r:1 w:1) + // Storage: Identity IdentityOf (r:1 w:1) + // Storage: Identity SuperOf (r:0 w:100) + fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { + (50_989_000 as Weight) + // Standard Error: 11_000 + .saturating_add((258_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_184_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 1_000 + .saturating_add((579_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Identity Registrars (r:1 w:0) + // Storage: Identity IdentityOf (r:1 w:1) + fn request_judgement(r: u32, x: u32, ) -> Weight { + (55_562_000 as Weight) + // Standard Error: 5_000 + .saturating_add((317_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_137_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:1) + fn cancel_request(r: u32, x: u32, ) -> Weight { + (51_744_000 as Weight) + // Standard Error: 6_000 + .saturating_add((192_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_131_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Identity Registrars (r:1 w:1) + fn set_fee(r: u32, ) -> Weight { + (9_472_000 as Weight) + // Standard Error: 3_000 + .saturating_add((321_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Identity Registrars (r:1 w:1) + fn set_account_id(r: u32, ) -> Weight { + (9_705_000 as Weight) + // Standard Error: 3_000 + .saturating_add((312_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Identity Registrars (r:1 w:1) + fn set_fields(r: u32, ) -> Weight { + (9_537_000 as Weight) + // Standard Error: 3_000 + .saturating_add((318_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Identity Registrars (r:1 w:0) + // Storage: Identity IdentityOf (r:1 w:1) + fn provide_judgement(r: u32, x: u32, ) -> Weight { + (36_298_000 as Weight) + // Standard Error: 5_000 + .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_141_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Identity SubsOf (r:1 w:1) + // Storage: Identity IdentityOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Identity SuperOf (r:0 w:100) + fn kill_identity(r: u32, s: u32, _x: u32, ) -> Weight { + (63_238_000 as Weight) + // Standard Error: 10_000 + .saturating_add((246_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_184_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SuperOf (r:1 w:1) + // Storage: Identity SubsOf (r:1 w:1) + fn add_sub(s: u32, ) -> Weight { + (57_394_000 as Weight) + // Standard Error: 1_000 + .saturating_add((208_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SuperOf (r:1 w:1) + fn rename_sub(s: u32, ) -> Weight { + (18_274_000 as Weight) + // Standard Error: 0 + .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SuperOf (r:1 w:1) + // Storage: Identity SubsOf (r:1 w:1) + fn remove_sub(s: u32, ) -> Weight { + (58_184_000 as Weight) + // Standard Error: 1_000 + .saturating_add((195_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Identity SuperOf (r:1 w:1) + // Storage: Identity SubsOf (r:1 w:1) + fn quit_sub(s: u32, ) -> Weight { + (36_304_000 as Weight) + // Standard Error: 1_000 + .saturating_add((191_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Identity Registrars (r:1 w:1) + fn add_registrar(r: u32, ) -> Weight { + (22_152_000 as Weight) + // Standard Error: 6_000 + .saturating_add((339_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:1) + fn set_identity(r: u32, x: u32, ) -> Weight { + (53_017_000 as Weight) + // Standard Error: 14_000 + .saturating_add((279_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((1_081_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SubsOf (r:1 w:1) + // Storage: Identity SuperOf (r:1 w:1) + fn set_subs_new(s: u32, ) -> Weight { + (44_693_000 as Weight) + // Standard Error: 1_000 + .saturating_add((6_631_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SubsOf (r:1 w:1) + // Storage: Identity SuperOf (r:0 w:1) + fn set_subs_old(p: u32, ) -> Weight { + (42_017_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_193_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + // Storage: Identity SubsOf (r:1 w:1) + // Storage: Identity IdentityOf (r:1 w:1) + // Storage: Identity SuperOf (r:0 w:100) + fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { + (50_989_000 as Weight) + // Standard Error: 11_000 + .saturating_add((258_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_184_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 1_000 + .saturating_add((579_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Identity Registrars (r:1 w:0) + // Storage: Identity IdentityOf (r:1 w:1) + fn request_judgement(r: u32, x: u32, ) -> Weight { + (55_562_000 as Weight) + // Standard Error: 5_000 + .saturating_add((317_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_137_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:1) + fn cancel_request(r: u32, x: u32, ) -> Weight { + (51_744_000 as Weight) + // Standard Error: 6_000 + .saturating_add((192_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_131_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Identity Registrars (r:1 w:1) + fn set_fee(r: u32, ) -> Weight { + (9_472_000 as Weight) + // Standard Error: 3_000 + .saturating_add((321_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Identity Registrars (r:1 w:1) + fn set_account_id(r: u32, ) -> Weight { + (9_705_000 as Weight) + // Standard Error: 3_000 + .saturating_add((312_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Identity Registrars (r:1 w:1) + fn set_fields(r: u32, ) -> Weight { + (9_537_000 as Weight) + // Standard Error: 3_000 + .saturating_add((318_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Identity Registrars (r:1 w:0) + // Storage: Identity IdentityOf (r:1 w:1) + fn provide_judgement(r: u32, x: u32, ) -> Weight { + (36_298_000 as Weight) + // Standard Error: 5_000 + .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((1_141_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Identity SubsOf (r:1 w:1) + // Storage: Identity IdentityOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Identity SuperOf (r:0 w:100) + fn kill_identity(r: u32, s: u32, _x: u32, ) -> Weight { + (63_238_000 as Weight) + // Standard Error: 10_000 + .saturating_add((246_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_184_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SuperOf (r:1 w:1) + // Storage: Identity SubsOf (r:1 w:1) + fn add_sub(s: u32, ) -> Weight { + (57_394_000 as Weight) + // Standard Error: 1_000 + .saturating_add((208_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SuperOf (r:1 w:1) + fn rename_sub(s: u32, ) -> Weight { + (18_274_000 as Weight) + // Standard Error: 0 + .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Identity IdentityOf (r:1 w:0) + // Storage: Identity SuperOf (r:1 w:1) + // Storage: Identity SubsOf (r:1 w:1) + fn remove_sub(s: u32, ) -> Weight { + (58_184_000 as Weight) + // Standard Error: 1_000 + .saturating_add((195_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Identity SuperOf (r:1 w:1) + // Storage: Identity SubsOf (r:1 w:1) + fn quit_sub(s: u32, ) -> Weight { + (36_304_000 as Weight) + // Standard Error: 1_000 + .saturating_add((191_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index ef22d67688732..a1efd626c0690 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-im-online" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,35 +13,39 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../primitives/application-crypto" } +pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../authorship" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +log = { version = "0.4.14", default-features = false } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +pallet-session = { version = "4.0.0-dev", path = "../session" } [features] -default = ["std", "pallet-session/historical"] +default = ["std"] std = [ "sp-application-crypto/std", "pallet-authorship/std", "codec/std", + "scale-info/std", "sp-core/std", "sp-std/std", - "serde", - "pallet-session/std", "sp-io/std", "sp-runtime/std", "sp-staking/std", "frame-support/std", "frame-system/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/im-online/README.md b/frame/im-online/README.md index 9a65bb6a98086..46b2268f18b12 100644 --- a/frame/im-online/README.md +++ b/frame/im-online/README.md @@ -13,7 +13,7 @@ and includes the recent best block number of the local validators chain as well as the `NetworkState`. It is submitted as an Unsigned Transaction via off-chain workers. -- [`im_online::Trait`](https://docs.rs/pallet-im-online/latest/pallet_im_online/trait.Trait.html) +- [`im_online::Config`](https://docs.rs/pallet-im-online/latest/pallet_im_online/trait.Config.html) - [`Call`](https://docs.rs/pallet-im-online/latest/pallet_im_online/enum.Call.html) - [`Module`](https://docs.rs/pallet-im-online/latest/pallet_im_online/struct.Module.html) @@ -30,10 +30,10 @@ use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; use pallet_im_online::{self as im_online}; -pub trait Trait: im_online::Trait {} +pub trait Config: im_online::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; @@ -48,4 +48,4 @@ decl_module! { This module depends on the [Session module](https://docs.rs/pallet-session/latest/pallet_session/). -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs index b92be023ce480..1043a97f67def 100644 --- a/frame/im-online/src/benchmarking.rs +++ b/frame/im-online/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,22 +21,27 @@ use super::*; -use frame_system::RawOrigin; -use frame_benchmarking::benchmarks; -use sp_core::OpaquePeerId; -use sp_core::offchain::OpaqueMultiaddr; -use sp_runtime::traits::{ValidateUnsigned, Zero}; -use sp_runtime::transaction_validity::TransactionSource; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_support::traits::UnfilteredDispatchable; +use frame_system::RawOrigin; +use sp_core::{offchain::OpaqueMultiaddr, OpaquePeerId}; +use sp_runtime::{ + traits::{ValidateUnsigned, Zero}, + transaction_validity::TransactionSource, +}; -use crate::Module as ImOnline; +use crate::Pallet as ImOnline; const MAX_KEYS: u32 = 1000; const MAX_EXTERNAL_ADDRESSES: u32 = 100; -pub fn create_heartbeat(k: u32, e: u32) -> - Result<(crate::Heartbeat, ::Signature), &'static str> -{ +pub fn create_heartbeat( + k: u32, + e: u32, +) -> Result< + (crate::Heartbeat, ::Signature), + &'static str, +> { let mut keys = Vec::new(); for _ in 0..k { keys.push(T::AuthorityId::generate_pair(None)); @@ -51,20 +56,18 @@ pub fn create_heartbeat(k: u32, e: u32) -> block_number: T::BlockNumber::zero(), network_state, session_index: 0, - authority_index: k-1, + authority_index: k - 1, validators_len: keys.len() as u32, }; let encoded_heartbeat = input_heartbeat.encode(); - let authority_id = keys.get((k-1) as usize).ok_or("out of range")?; + let authority_id = keys.get((k - 1) as usize).ok_or("out of range")?; let signature = authority_id.sign(&encoded_heartbeat).ok_or("couldn't make signature")?; Ok((input_heartbeat, signature)) } benchmarks! { - _{ } - #[extra] heartbeat { let k in 1 .. MAX_KEYS; @@ -77,34 +80,22 @@ benchmarks! { let k in 1 .. MAX_KEYS; let e in 1 .. MAX_EXTERNAL_ADDRESSES; let (input_heartbeat, signature) = create_heartbeat::(k, e)?; - let call = Call::heartbeat(input_heartbeat, signature); + let call = Call::heartbeat { heartbeat: input_heartbeat, signature }; }: { - ImOnline::::validate_unsigned(TransactionSource::InBlock, &call)?; + ImOnline::::validate_unsigned(TransactionSource::InBlock, &call) + .map_err(<&str>::from)?; } validate_unsigned_and_then_heartbeat { let k in 1 .. MAX_KEYS; let e in 1 .. MAX_EXTERNAL_ADDRESSES; let (input_heartbeat, signature) = create_heartbeat::(k, e)?; - let call = Call::heartbeat(input_heartbeat, signature); + let call = Call::heartbeat { heartbeat: input_heartbeat, signature }; }: { - ImOnline::::validate_unsigned(TransactionSource::InBlock, &call)?; + ImOnline::::validate_unsigned(TransactionSource::InBlock, &call) + .map_err(<&str>::from)?; call.dispatch_bypass_filter(RawOrigin::None.into())?; } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Runtime}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_heartbeat::()); - assert_ok!(test_benchmark_validate_unsigned::()); - assert_ok!(test_benchmark_validate_unsigned_and_then_heartbeat::()); - }); - } -} +impl_benchmark_test_suite!(ImOnline, crate::mock::new_test_ext(), crate::mock::Runtime); diff --git a/frame/im-online/src/default_weight.rs b/frame/im-online/src/default_weight.rs deleted file mode 100644 index e6efb42f2e3d8..0000000000000 --- a/frame/im-online/src/default_weight.rs +++ /dev/null @@ -1,33 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (139830000 as Weight) - .saturating_add((211000 as Weight).saturating_mul(k as Weight)) - .saturating_add((654000 as Weight).saturating_mul(e as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } -} diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index ef9c6b9182af6..ab4f7001574e5 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # I'm online Module +//! # I'm online Pallet //! //! If the local node is a validator (i.e. contains an authority key), this module //! gossips a heartbeat transaction with each new session. The heartbeat functions @@ -30,9 +30,9 @@ //! as the [NetworkState](../../client/offchain/struct.NetworkState.html). //! It is submitted as an Unsigned Transaction via off-chain workers. //! -//! - [`im_online::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Interface //! @@ -47,14 +47,14 @@ //! use frame_system::ensure_signed; //! use pallet_im_online::{self as im_online}; //! -//! pub trait Trait: im_online::Trait {} +//! pub trait Config: im_online::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; -//! let _is_online = >::is_online(authority_index); +//! let _is_online = >::is_online(authority_index); //! Ok(()) //! } //! } @@ -69,40 +69,31 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +mod benchmarking; mod mock; mod tests; -mod benchmarking; -mod default_weight; +pub mod weights; +use codec::{Decode, Encode}; +use frame_support::traits::{ + EstimateNextSessionRotation, OneSessionHandler, ValidatorSet, ValidatorSetWithIdentification, +}; +use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; +pub use pallet::*; +use scale_info::TypeInfo; use sp_application_crypto::RuntimeAppPublic; -use codec::{Encode, Decode}; use sp_core::offchain::OpaqueNetworkState; -use sp_std::prelude::*; -use sp_std::convert::TryInto; -use pallet_session::historical::IdentificationTuple; use sp_runtime::{ - offchain::storage::StorageValueRef, - RuntimeDebug, - traits::{Convert, Member, Saturating, AtLeast32BitUnsigned}, Perbill, - transaction_validity::{ - TransactionValidity, ValidTransaction, InvalidTransaction, TransactionSource, - TransactionPriority, - }, + offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, + traits::{AtLeast32BitUnsigned, Convert, Saturating, TrailingZeroInput}, + PerThing, Perbill, Permill, RuntimeDebug, SaturatedConversion, }; use sp_staking::{ + offence::{Kind, Offence, ReportOffence}, SessionIndex, - offence::{ReportOffence, Offence, Kind}, -}; -use frame_support::{ - decl_module, decl_event, decl_storage, Parameter, debug, decl_error, - traits::Get, - weights::Weight, -}; -use frame_system::ensure_none; -use frame_system::offchain::{ - SendTransactionTypes, - SubmitTransaction, }; +use sp_std::{convert::TryInto, prelude::*}; +pub use weights::WeightInfo; pub mod sr25519 { mod app_sr25519 { @@ -124,7 +115,7 @@ pub mod sr25519 { pub mod ed25519 { mod app_ed25519 { - use sp_application_crypto::{app_crypto, key_types::IM_ONLINE, ed25519}; + use sp_application_crypto::{app_crypto, ed25519, key_types::IM_ONLINE}; app_crypto!(ed25519, IM_ONLINE); } @@ -150,7 +141,7 @@ const INCLUDE_THRESHOLD: u32 = 3; /// This stores the block number at which heartbeat was requested and when the worker /// has actually managed to produce it. /// Note we store such status for every `authority_index` separately. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] struct HeartbeatStatus { /// An index of the session that we are supposed to send heartbeat for. pub session_index: SessionIndex, @@ -182,7 +173,7 @@ impl HeartbeatStatus { - TooEarly(BlockNumber), + TooEarly, WaitingForInclusion(BlockNumber), AlreadyOnline(u32), FailedSigning, @@ -194,12 +185,13 @@ enum OffchainErr { impl sp_std::fmt::Debug for OffchainErr { fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { match *self { - OffchainErr::TooEarly(ref block) => - write!(fmt, "Too early to send heartbeat, next expected at {:?}", block), - OffchainErr::WaitingForInclusion(ref block) => - write!(fmt, "Heartbeat already sent at {:?}. Waiting for inclusion.", block), - OffchainErr::AlreadyOnline(auth_idx) => - write!(fmt, "Authority {} is already online", auth_idx), + OffchainErr::TooEarly => write!(fmt, "Too early to send heartbeat."), + OffchainErr::WaitingForInclusion(ref block) => { + write!(fmt, "Heartbeat already sent at {:?}. Waiting for inclusion.", block) + }, + OffchainErr::AlreadyOnline(auth_idx) => { + write!(fmt, "Authority {} is already online", auth_idx) + }, OffchainErr::FailedSigning => write!(fmt, "Failed to sign heartbeat"), OffchainErr::FailedToAcquireLock => write!(fmt, "Failed to acquire lock"), OffchainErr::NetworkState => write!(fmt, "Failed to fetch network state"), @@ -211,9 +203,10 @@ impl sp_std::fmt::Debug for OffchainErr - where BlockNumber: PartialEq + Eq + Decode + Encode, +where + BlockNumber: PartialEq + Eq + Decode + Encode, { /// Block number at the time heartbeat is created.. pub block_number: BlockNumber, @@ -227,107 +220,166 @@ pub struct Heartbeat pub validators_len: u32, } -pub trait WeightInfo { - fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight; -} - -pub trait Trait: SendTransactionTypes> + pallet_session::historical::Trait { - /// The identifier type for an authority. - type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + Ord; - - /// The overarching event type. - type Event: From> + Into<::Event>; +/// A type for representing the validator id in a session. +pub type ValidatorId = <::ValidatorSet as ValidatorSet< + ::AccountId, +>>::ValidatorId; + +/// A tuple of (ValidatorId, Identification) where `Identification` is the full identification of +/// `ValidatorId`. +pub type IdentificationTuple = ( + ValidatorId, + <::ValidatorSet as ValidatorSetWithIdentification< + ::AccountId, + >>::Identification, +); - /// An expected duration of the session. - /// - /// This parameter is used to determine the longevity of `heartbeat` transaction - /// and a rough time when we should start considering sending heartbeats, - /// since the workers avoids sending them at the very beginning of the session, assuming - /// there is a chance the authority will produce a block and they won't be necessary. - type SessionDuration: Get; - - /// A type that gives us the ability to submit unresponsiveness offence reports. - type ReportUnresponsiveness: - ReportOffence< +type OffchainResult = Result::BlockNumber>>; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{pallet_prelude::*, traits::Get, Parameter}; + use frame_system::{ensure_none, pallet_prelude::*}; + use sp_runtime::{ + traits::{MaybeSerializeDeserialize, Member}, + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + ValidTransaction, + }, + }; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: SendTransactionTypes> + frame_system::Config { + /// The identifier type for an authority. + type AuthorityId: Member + + Parameter + + RuntimeAppPublic + + Default + + Ord + + MaybeSerializeDeserialize; + + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// A type for retrieving the validators supposed to be online in a session. + type ValidatorSet: ValidatorSetWithIdentification; + + /// A trait that allows us to estimate the current session progress and also the + /// average session length. + /// + /// This parameter is used to determine the longevity of `heartbeat` transaction and a + /// rough time when we should start considering sending heartbeats, since the workers + /// avoids sending them at the very beginning of the session, assuming there is a + /// chance the authority will produce a block and they won't be necessary. + type NextSessionRotation: EstimateNextSessionRotation; + + /// A type that gives us the ability to submit unresponsiveness offence reports. + type ReportUnresponsiveness: ReportOffence< Self::AccountId, IdentificationTuple, UnresponsivenessOffence>, >; - /// A configuration for base priority of unsigned transactions. - /// - /// This is exposed so that it can be tuned for particular runtime, when - /// multiple pallets send unsigned transactions. - type UnsignedPriority: Get; + /// A configuration for base priority of unsigned transactions. + /// + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + #[pallet::constant] + type UnsignedPriority: Get; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } -decl_event!( - pub enum Event where - ::AuthorityId, - IdentificationTuple = IdentificationTuple, - { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// A new heartbeat was received from `AuthorityId` \[authority_id\] - HeartbeatReceived(AuthorityId), + HeartbeatReceived(T::AuthorityId), /// At the end of the session, no offence was committed. AllGood, /// At the end of the session, at least one validator was found to be \[offline\]. - SomeOffline(Vec), + SomeOffline(Vec>), } -); -decl_storage! { - trait Store for Module as ImOnline { - /// The block number after which it's ok to send heartbeats in current session. - /// - /// At the beginning of each session we set this to a value that should - /// fall roughly in the middle of the session duration. - /// The idea is to first wait for the validators to produce a block - /// in the current session, so that the heartbeat later on will not be necessary. - HeartbeatAfter get(fn heartbeat_after): T::BlockNumber; - - /// The current set of keys that may issue a heartbeat. - Keys get(fn keys): Vec; - - /// For each session index, we keep a mapping of `AuthIndex` to - /// `offchain::OpaqueNetworkState`. - ReceivedHeartbeats get(fn received_heartbeats): - double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) AuthIndex - => Option>; - - /// For each session index, we keep a mapping of `T::ValidatorId` to the - /// number of blocks authored by the given authority. - AuthoredBlocks get(fn authored_blocks): - double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) T::ValidatorId - => u32; - } - add_extra_genesis { - config(keys): Vec; - build(|config| Module::::initialize_keys(&config.keys)) - } -} - -decl_error! { - /// Error for the im-online module. - pub enum Error for Module { + #[pallet::error] + pub enum Error { /// Non existent public key. InvalidKey, /// Duplicated heartbeat. DuplicatedHeartbeat, } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; + /// The block number after which it's ok to send heartbeats in the current + /// session. + /// + /// At the beginning of each session we set this to a value that should fall + /// roughly in the middle of the session duration. The idea is to first wait for + /// the validators to produce a block in the current session, so that the + /// heartbeat later on will not be necessary. + /// + /// This value will only be used as a fallback if we fail to get a proper session + /// progress estimate from `NextSessionRotation`, as those estimates should be + /// more accurate then the value we calculate for `HeartbeatAfter`. + #[pallet::storage] + #[pallet::getter(fn heartbeat_after)] + pub(crate) type HeartbeatAfter = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// The current set of keys that may issue a heartbeat. + #[pallet::storage] + #[pallet::getter(fn keys)] + pub(crate) type Keys = StorageValue<_, Vec, ValueQuery>; + + /// For each session index, we keep a mapping of `AuthIndex` to + /// `offchain::OpaqueNetworkState`. + #[pallet::storage] + #[pallet::getter(fn received_heartbeats)] + pub(crate) type ReceivedHeartbeats = + StorageDoubleMap<_, Twox64Concat, SessionIndex, Twox64Concat, AuthIndex, Vec>; + + /// For each session index, we keep a mapping of `ValidatorId` to the + /// number of blocks authored by the given authority. + #[pallet::storage] + #[pallet::getter(fn authored_blocks)] + pub(crate) type AuthoredBlocks = StorageDoubleMap< + _, + Twox64Concat, + SessionIndex, + Twox64Concat, + ValidatorId, + u32, + ValueQuery, + >; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub keys: Vec, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { keys: Default::default() } + } + } - fn deposit_event() = default; + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_keys(&self.keys); + } + } + #[pallet::call] + impl Pallet { /// # - /// - Complexity: `O(K + E)` where K is length of `Keys` (heartbeat.validators_len) - /// and E is length of `heartbeat.network_state.external_address` + /// - Complexity: `O(K + E)` where K is length of `Keys` (heartbeat.validators_len) and E is + /// length of `heartbeat.network_state.external_address` /// - `O(K)`: decoding of length `K` /// - `O(E)`: decoding/encoding of length `E` /// - DbReads: pallet_session `Validators`, pallet_session `CurrentIndex`, `Keys`, @@ -336,50 +388,52 @@ decl_module! { /// # // NOTE: the weight includes the cost of validate_unsigned as it is part of the cost to // import block with such an extrinsic. - #[weight = ::WeightInfo::validate_unsigned_and_then_heartbeat( + #[pallet::weight(::WeightInfo::validate_unsigned_and_then_heartbeat( heartbeat.validators_len as u32, heartbeat.network_state.external_addresses.len() as u32, - )] - fn heartbeat( - origin, + ))] + pub fn heartbeat( + origin: OriginFor, heartbeat: Heartbeat, // since signature verification is done in `validate_unsigned` // we can skip doing it here again. _signature: ::Signature, - ) { + ) -> DispatchResult { ensure_none(origin)?; - let current_session = >::current_index(); - let exists = ::contains_key( - ¤t_session, - &heartbeat.authority_index - ); + let current_session = T::ValidatorSet::session_index(); + let exists = + ReceivedHeartbeats::::contains_key(¤t_session, &heartbeat.authority_index); let keys = Keys::::get(); let public = keys.get(heartbeat.authority_index as usize); if let (false, Some(public)) = (exists, public) { Self::deposit_event(Event::::HeartbeatReceived(public.clone())); let network_state = heartbeat.network_state.encode(); - ::insert( + ReceivedHeartbeats::::insert( ¤t_session, &heartbeat.authority_index, - &network_state + &network_state, ); + + Ok(()) } else if exists { Err(Error::::DuplicatedHeartbeat)? } else { Err(Error::::InvalidKey)? } } + } - // Runs after every block. - fn offchain_worker(now: T::BlockNumber) { + #[pallet::hooks] + impl Hooks> for Pallet { + fn offchain_worker(now: BlockNumberFor) { // Only send messages if we are a potential validator. if sp_io::offchain::is_validator() { for res in Self::send_heartbeats(now).into_iter().flatten() { if let Err(e) = res { - debug::debug!( - target: "imonline", + log::debug!( + target: "runtime::im-online", "Skipping heartbeat at {:?}: {:?}", now, e, @@ -387,40 +441,97 @@ decl_module! { } } } else { - debug::trace!( - target: "imonline", + log::trace!( + target: "runtime::im-online", "Skipping heartbeat at {:?}. Not a validator.", now, ) } } } -} -type OffchainResult = Result::BlockNumber>>; + /// Invalid transaction custom error. Returned when validators_len field in heartbeat is + /// incorrect. + pub(crate) const INVALID_VALIDATORS_LEN: u8 = 10; + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::heartbeat { heartbeat, signature } = call { + if >::is_online(heartbeat.authority_index) { + // we already received a heartbeat for this authority + return InvalidTransaction::Stale.into() + } + + // check if session index from heartbeat is recent + let current_session = T::ValidatorSet::session_index(); + if heartbeat.session_index != current_session { + return InvalidTransaction::Stale.into() + } + + // verify that the incoming (unverified) pubkey is actually an authority id + let keys = Keys::::get(); + if keys.len() as u32 != heartbeat.validators_len { + return InvalidTransaction::Custom(INVALID_VALIDATORS_LEN).into() + } + let authority_id = match keys.get(heartbeat.authority_index as usize) { + Some(id) => id, + None => return InvalidTransaction::BadProof.into(), + }; + + // check signature (this is expensive so we do it last). + let signature_valid = heartbeat.using_encoded(|encoded_heartbeat| { + authority_id.verify(&encoded_heartbeat, &signature) + }); + + if !signature_valid { + return InvalidTransaction::BadProof.into() + } + + ValidTransaction::with_tag_prefix("ImOnline") + .priority(T::UnsignedPriority::get()) + .and_provides((current_session, authority_id)) + .longevity( + TryInto::::try_into( + T::NextSessionRotation::average_session_length() / 2u32.into(), + ) + .unwrap_or(64_u64), + ) + .propagate(true) + .build() + } else { + InvalidTransaction::Call.into() + } + } + } +} /// Keep track of number of authored blocks per authority, uncles are counted as /// well since they're a valid proof of being online. -impl pallet_authorship::EventHandler for Module { - fn note_author(author: T::ValidatorId) { +impl + pallet_authorship::EventHandler, T::BlockNumber> for Pallet +{ + fn note_author(author: ValidatorId) { Self::note_authorship(author); } - fn note_uncle(author: T::ValidatorId, _age: T::BlockNumber) { + fn note_uncle(author: ValidatorId, _age: T::BlockNumber) { Self::note_authorship(author); } } -impl Module { +impl Pallet { /// Returns `true` if a heartbeat has been received for the authority at /// `authority_index` in the authorities series or if the authority has /// authored at least one block, during the current session. Otherwise /// `false`. pub fn is_online(authority_index: AuthIndex) -> bool { - let current_validators = >::validators(); + let current_validators = T::ValidatorSet::validators(); if authority_index >= current_validators.len() as u32 { - return false; + return false } let authority = ¤t_validators[authority_index as usize]; @@ -428,57 +539,86 @@ impl Module { Self::is_online_aux(authority_index, authority) } - fn is_online_aux(authority_index: AuthIndex, authority: &T::ValidatorId) -> bool { - let current_session = >::current_index(); + fn is_online_aux(authority_index: AuthIndex, authority: &ValidatorId) -> bool { + let current_session = T::ValidatorSet::session_index(); - ::contains_key(¤t_session, &authority_index) || - >::get( - ¤t_session, - authority, - ) != 0 + ReceivedHeartbeats::::contains_key(¤t_session, &authority_index) || + AuthoredBlocks::::get(¤t_session, authority) != 0 } /// Returns `true` if a heartbeat has been received for the authority at `authority_index` in /// the authorities series, during the current session. Otherwise `false`. pub fn received_heartbeat_in_current_session(authority_index: AuthIndex) -> bool { - let current_session = >::current_index(); - ::contains_key(¤t_session, &authority_index) + let current_session = T::ValidatorSet::session_index(); + ReceivedHeartbeats::::contains_key(¤t_session, &authority_index) } /// Note that the given authority has authored a block in the current session. - fn note_authorship(author: T::ValidatorId) { - let current_session = >::current_index(); + fn note_authorship(author: ValidatorId) { + let current_session = T::ValidatorSet::session_index(); - >::mutate( - ¤t_session, - author, - |authored| *authored += 1, - ); + AuthoredBlocks::::mutate(¤t_session, author, |authored| *authored += 1); } - pub(crate) fn send_heartbeats(block_number: T::BlockNumber) - -> OffchainResult>> - { - let heartbeat_after = >::get(); - if block_number < heartbeat_after { - return Err(OffchainErr::TooEarly(heartbeat_after)) + pub(crate) fn send_heartbeats( + block_number: T::BlockNumber, + ) -> OffchainResult>> { + const START_HEARTBEAT_RANDOM_PERIOD: Permill = Permill::from_percent(10); + const START_HEARTBEAT_FINAL_PERIOD: Permill = Permill::from_percent(80); + + // this should give us a residual probability of 1/SESSION_LENGTH of sending an heartbeat, + // i.e. all heartbeats spread uniformly, over most of the session. as the session progresses + // the probability of sending an heartbeat starts to increase exponentially. + let random_choice = |progress: Permill| { + // given session progress `p` and session length `l` + // the threshold formula is: p^6 + 1/l + let session_length = T::NextSessionRotation::average_session_length(); + let residual = Permill::from_rational(1u32, session_length.saturated_into()); + let threshold: Permill = progress.saturating_pow(6).saturating_add(residual); + + let seed = sp_io::offchain::random_seed(); + let random = ::decode(&mut TrailingZeroInput::new(seed.as_ref())) + .expect("input is padded with zeroes; qed"); + let random = Permill::from_parts(random % Permill::ACCURACY); + + random <= threshold + }; + + let should_heartbeat = if let (Some(progress), _) = + T::NextSessionRotation::estimate_current_session_progress(block_number) + { + // we try to get an estimate of the current session progress first since it should + // provide more accurate results. we will start an early heartbeat period where we'll + // randomly pick whether to heartbeat. after 80% of the session has elapsed, if we + // haven't sent an heartbeat yet we'll send one unconditionally. the idea is to prevent + // all nodes from sending the heartbeats at the same block and causing a temporary (but + // deterministic) spike in transactions. + progress >= START_HEARTBEAT_FINAL_PERIOD || + progress >= START_HEARTBEAT_RANDOM_PERIOD && random_choice(progress) + } else { + // otherwise we fallback to using the block number calculated at the beginning + // of the session that should roughly correspond to the middle of the session + let heartbeat_after = >::get(); + block_number >= heartbeat_after + }; + + if !should_heartbeat { + return Err(OffchainErr::TooEarly) } - let session_index = >::current_index(); - let validators_len = >::validators().len() as u32; - - Ok(Self::local_authority_keys() - .map(move |(authority_index, key)| - Self::send_single_heartbeat( - authority_index, - key, - session_index, - block_number, - validators_len, - ) - )) - } + let session_index = T::ValidatorSet::session_index(); + let validators_len = Keys::::decode_len().unwrap_or_default() as u32; + Ok(Self::local_authority_keys().map(move |(authority_index, key)| { + Self::send_single_heartbeat( + authority_index, + key, + session_index, + block_number, + validators_len, + ) + })) + } fn send_single_heartbeat( authority_index: u32, @@ -489,9 +629,9 @@ impl Module { ) -> OffchainResult { // A helper function to prepare heartbeat call. let prepare_heartbeat = || -> OffchainResult> { - let network_state = sp_io::offchain::network_state() - .map_err(|_| OffchainErr::NetworkState)?; - let heartbeat_data = Heartbeat { + let network_state = + sp_io::offchain::network_state().map_err(|_| OffchainErr::NetworkState)?; + let heartbeat = Heartbeat { block_number, network_state, session_index, @@ -499,41 +639,36 @@ impl Module { validators_len, }; - let signature = key.sign(&heartbeat_data.encode()).ok_or(OffchainErr::FailedSigning)?; + let signature = key.sign(&heartbeat.encode()).ok_or(OffchainErr::FailedSigning)?; - Ok(Call::heartbeat(heartbeat_data, signature)) + Ok(Call::heartbeat { heartbeat, signature }) }; if Self::is_online(authority_index) { - return Err(OffchainErr::AlreadyOnline(authority_index)); + return Err(OffchainErr::AlreadyOnline(authority_index)) } // acquire lock for that authority at current heartbeat to make sure we don't // send concurrent heartbeats. - Self::with_heartbeat_lock( - authority_index, - session_index, - block_number, - || { - let call = prepare_heartbeat()?; - debug::info!( - target: "imonline", - "[index: {:?}] Reporting im-online at block: {:?} (session: {:?}): {:?}", - authority_index, - block_number, - session_index, - call, - ); + Self::with_heartbeat_lock(authority_index, session_index, block_number, || { + let call = prepare_heartbeat()?; + log::info!( + target: "runtime::im-online", + "[index: {:?}] Reporting im-online at block: {:?} (session: {:?}): {:?}", + authority_index, + block_number, + session_index, + call, + ); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) - .map_err(|_| OffchainErr::SubmitTransaction)?; + SubmitTransaction::>::submit_unsigned_transaction(call.into()) + .map_err(|_| OffchainErr::SubmitTransaction)?; - Ok(()) - }, - ) + Ok(()) + }) } - fn local_authority_keys() -> impl Iterator { + fn local_authority_keys() -> impl Iterator { // on-chain storage // // At index `idx`: @@ -548,13 +683,12 @@ impl Module { local_keys.sort(); - authorities.into_iter() - .enumerate() - .filter_map(move |(index, authority)| { - local_keys.binary_search(&authority) - .ok() - .map(|location| (index as u32, local_keys[location].clone())) - }) + authorities.into_iter().enumerate().filter_map(move |(index, authority)| { + local_keys + .binary_search(&authority) + .ok() + .map(|location| (index as u32, local_keys[location].clone())) + }) } fn with_heartbeat_lock( @@ -569,23 +703,24 @@ impl Module { key }; let storage = StorageValueRef::persistent(&key); - let res = storage.mutate(|status: Option>>| { - // Check if there is already a lock for that particular block. - // This means that the heartbeat has already been sent, and we are just waiting - // for it to be included. However if it doesn't get included for INCLUDE_THRESHOLD - // we will re-send it. - match status { - // we are still waiting for inclusion. - Some(Some(status)) if status.is_recent(session_index, now) => { - Err(OffchainErr::WaitingForInclusion(status.sent_at)) - }, - // attempt to set new status - _ => Ok(HeartbeatStatus { - session_index, - sent_at: now, - }), - } - })?; + let res = storage.mutate( + |status: Result>, StorageRetrievalError>| { + // Check if there is already a lock for that particular block. + // This means that the heartbeat has already been sent, and we are just waiting + // for it to be included. However if it doesn't get included for INCLUDE_THRESHOLD + // we will re-send it. + match status { + // we are still waiting for inclusion. + Ok(Some(status)) if status.is_recent(session_index, now) => + Err(OffchainErr::WaitingForInclusion(status.sent_at)), + // attempt to set new status + _ => Ok(HeartbeatStatus { session_index, sent_at: now }), + } + }, + ); + if let Err(MutateStorageError::ValueFunctionFailed(err)) = res { + return Err(err) + } let mut new_status = res.map_err(|_| OffchainErr::FailedToAcquireLock)?; @@ -594,7 +729,7 @@ impl Module { // clear the lock in case we have failed to send transaction. if res.is_err() { - new_status.sent_at = 0.into(); + new_status.sent_at = 0u32.into(); storage.set(&new_status); } @@ -614,28 +749,30 @@ impl Module { } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { type Public = T::AuthorityId; } -impl pallet_session::OneSessionHandler for Module { +impl OneSessionHandler for Pallet { type Key = T::AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator + where + I: Iterator, { let keys = validators.map(|x| x.1).collect::>(); Self::initialize_keys(&keys); } fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, _queued_validators: I) - where I: Iterator + where + I: Iterator, { // Tell the offchain worker to start making the next session's heartbeats. // Since we consider producing blocks as being online, // the heartbeat is deferred a bit to prevent spamming. - let block_number = >::block_number(); - let half_session = T::SessionDuration::get() / 2.into(); + let block_number = >::block_number(); + let half_session = T::NextSessionRotation::average_session_length() / 2u32.into(); >::put(block_number + half_session); // Remember who the authorities are for the new session. @@ -643,27 +780,31 @@ impl pallet_session::OneSessionHandler for Module { } fn on_before_session_ending() { - let session_index = >::current_index(); + let session_index = T::ValidatorSet::session_index(); let keys = Keys::::get(); - let current_validators = >::validators(); + let current_validators = T::ValidatorSet::validators(); - let offenders = current_validators.into_iter().enumerate() - .filter(|(index, id)| - !Self::is_online_aux(*index as u32, id) - ).filter_map(|(_, id)| - T::FullIdentificationOf::convert(id.clone()).map(|full_id| (id, full_id)) - ).collect::>>(); + let offenders = current_validators + .into_iter() + .enumerate() + .filter(|(index, id)| !Self::is_online_aux(*index as u32, id)) + .filter_map(|(_, id)| { + >::IdentificationOf::convert( + id.clone() + ).map(|full_id| (id, full_id)) + }) + .collect::>>(); // Remove all received heartbeats and number of authored blocks from the // current session, they have already been processed and won't be needed // anymore. - ::remove_prefix(&>::current_index()); - >::remove_prefix(&>::current_index()); + ReceivedHeartbeats::::remove_prefix(&T::ValidatorSet::session_index(), None); + AuthoredBlocks::::remove_prefix(&T::ValidatorSet::session_index(), None); if offenders.is_empty() { - Self::deposit_event(RawEvent::AllGood); + Self::deposit_event(Event::::AllGood); } else { - Self::deposit_event(RawEvent::SomeOffline(offenders.clone())); + Self::deposit_event(Event::::SomeOffline(offenders.clone())); let validator_set_count = keys.len() as u32; let offence = UnresponsivenessOffence { session_index, validator_set_count, offenders }; @@ -678,63 +819,8 @@ impl pallet_session::OneSessionHandler for Module { } } -/// Invalid transaction custom error. Returned when validators_len field in heartbeat is incorrect. -const INVALID_VALIDATORS_LEN: u8 = 10; - -impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { - if let Call::heartbeat(heartbeat, signature) = call { - if >::is_online(heartbeat.authority_index) { - // we already received a heartbeat for this authority - return InvalidTransaction::Stale.into(); - } - - // check if session index from heartbeat is recent - let current_session = >::current_index(); - if heartbeat.session_index != current_session { - return InvalidTransaction::Stale.into(); - } - - // verify that the incoming (unverified) pubkey is actually an authority id - let keys = Keys::::get(); - if keys.len() as u32 != heartbeat.validators_len { - return InvalidTransaction::Custom(INVALID_VALIDATORS_LEN).into(); - } - let authority_id = match keys.get(heartbeat.authority_index as usize) { - Some(id) => id, - None => return InvalidTransaction::BadProof.into(), - }; - - // check signature (this is expensive so we do it last). - let signature_valid = heartbeat.using_encoded(|encoded_heartbeat| { - authority_id.verify(&encoded_heartbeat, &signature) - }); - - if !signature_valid { - return InvalidTransaction::BadProof.into(); - } - - ValidTransaction::with_tag_prefix("ImOnline") - .priority(T::UnsignedPriority::get()) - .and_provides((current_session, authority_id)) - .longevity(TryInto::::try_into( - T::SessionDuration::get() / 2.into() - ).unwrap_or(64_u64)) - .propagate(true) - .build() - } else { - InvalidTransaction::Call.into() - } - } -} - /// An offence that is filed if a validator didn't send a heartbeat message. -#[derive(RuntimeDebug)] +#[derive(RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Clone, PartialEq, Eq))] pub struct UnresponsivenessOffence { /// The current session index in which we report the unresponsive validators. @@ -773,7 +859,7 @@ impl Offence for UnresponsivenessOffence { // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% // when 13/30 are offline (around 5% when 1/3 are offline). if let Some(threshold) = offenders.checked_sub(validator_set_count / 10 + 1) { - let x = Perbill::from_rational_approximation(3 * threshold, validator_set_count); + let x = Perbill::from_rational(3 * threshold, validator_set_count); x.saturating_mul(Perbill::from_percent(7)) } else { Perbill::default() diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index dae4bb3447e56..e4031b04271b9 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,23 +21,37 @@ use std::cell::RefCell; -use crate::{Module, Trait}; -use sp_runtime::Perbill; -use sp_staking::{SessionIndex, offence::{ReportOffence, OffenceError}}; -use sp_runtime::testing::{Header, UintAuthorityId, TestXt}; -use sp_runtime::traits::{IdentityLookup, BlakeTwo256, ConvertInto}; +use frame_support::{parameter_types, weights::Weight}; +use pallet_session::historical as pallet_session_historical; use sp_core::H256; -use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types, weights::Weight}; - -impl_outer_origin!{ - pub enum Origin for Runtime {} -} - -impl_outer_dispatch! { - pub enum Call for Runtime where origin: Origin { - imonline::ImOnline, +use sp_runtime::{ + testing::{Header, TestXt, UintAuthorityId}, + traits::{BlakeTwo256, ConvertInto, IdentityLookup}, + Perbill, Permill, +}; +use sp_staking::{ + offence::{OffenceError, ReportOffence}, + SessionIndex, +}; + +use crate as imonline; +use crate::Config; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + ImOnline: imonline::{Pallet, Call, Storage, Config, Event}, + Historical: pallet_session_historical::{Pallet}, } -} +); thread_local! { pub static VALIDATORS: RefCell>> = RefCell::new(Some(vec![ @@ -58,13 +72,11 @@ impl pallet_session::SessionManager for TestSessionManager { impl pallet_session::historical::SessionManager for TestSessionManager { fn new_session(_new_index: SessionIndex) -> Option> { - VALIDATORS.with(|l| l - .borrow_mut() - .take() - .map(|validators| { - validators.iter().map(|v| (*v, *v)).collect() - }) - ) + VALIDATORS.with(|l| { + l.borrow_mut() + .take() + .map(|validators| validators.iter().map(|v| (*v, *v)).collect()) + }) } fn end_session(_: SessionIndex) {} fn start_session(_: SessionIndex) {} @@ -93,24 +105,21 @@ impl ReportOffence for OffenceHandler { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); t.into() } -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Runtime; - parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Runtime { - type BaseCallFilter = (); +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -120,21 +129,16 @@ impl frame_system::Trait for Runtime { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { @@ -146,20 +150,21 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type ShouldEndSession = pallet_session::PeriodicSessions; - type SessionManager = pallet_session::historical::NoteHistoricalRoot; - type SessionHandler = (ImOnline, ); + type SessionManager = + pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = (ImOnline,); type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type Keys = UintAuthorityId; - type Event = (); + type Event = Event; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = pallet_session::PeriodicSessions; type WeightInfo = (); } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = u64; type FullIdentificationOf = ConvertInto; } @@ -168,38 +173,70 @@ parameter_types! { pub const UncleGenerations: u32 = 5; } -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = (); type UncleGenerations = UncleGenerations; type FilterUncle = (); type EventHandler = ImOnline; } +thread_local! { + pub static MOCK_CURRENT_SESSION_PROGRESS: RefCell>> = RefCell::new(None); +} + +thread_local! { + pub static MOCK_AVERAGE_SESSION_LENGTH: RefCell> = RefCell::new(None); +} + +pub struct TestNextSessionRotation; + +impl frame_support::traits::EstimateNextSessionRotation for TestNextSessionRotation { + fn average_session_length() -> u64 { + // take the mock result if any and return it + let mock = MOCK_AVERAGE_SESSION_LENGTH.with(|p| p.borrow_mut().take()); + + mock.unwrap_or(pallet_session::PeriodicSessions::::average_session_length()) + } + + fn estimate_current_session_progress(now: u64) -> (Option, Weight) { + let (estimate, weight) = + pallet_session::PeriodicSessions::::estimate_current_session_progress( + now, + ); + + // take the mock result if any and return it + let mock = MOCK_CURRENT_SESSION_PROGRESS.with(|p| p.borrow_mut().take()); + + (mock.unwrap_or(estimate), weight) + } + + fn estimate_next_session_rotation(now: u64) -> (Option, Weight) { + pallet_session::PeriodicSessions::::estimate_next_session_rotation(now) + } +} + parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; } -impl Trait for Runtime { +impl Config for Runtime { type AuthorityId = UintAuthorityId; - type Event = (); + type Event = Event; + type ValidatorSet = Historical; + type NextSessionRotation = TestNextSessionRotation; type ReportUnresponsiveness = OffenceHandler; - type SessionDuration = Period; type UnsignedPriority = UnsignedPriority; type WeightInfo = (); } -impl frame_system::offchain::SendTransactionTypes for Runtime where +impl frame_system::offchain::SendTransactionTypes for Runtime +where Call: From, { type OverarchingCall = Call; type Extrinsic = Extrinsic; } -/// Im Online module. -pub type ImOnline = Module; -pub type System = frame_system::Module; -pub type Session = pallet_session::Module; - pub fn advance_session() { let now = System::block_number().max(1); System::set_block_number(now + 1); diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 22c6b4464c370..bb2c4c7cae548 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,22 +21,23 @@ use super::*; use crate::mock::*; -use sp_core::OpaquePeerId; -use sp_core::offchain::{ - OffchainExt, - TransactionPoolExt, - testing::{TestOffchainExt, TestTransactionPoolExt}, +use frame_support::{assert_noop, dispatch}; +use sp_core::{ + offchain::{ + testing::{TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, + }, + OpaquePeerId, +}; +use sp_runtime::{ + testing::UintAuthorityId, + transaction_validity::{InvalidTransaction, TransactionValidityError}, }; -use frame_support::{dispatch, assert_noop}; -use sp_runtime::{testing::UintAuthorityId, transaction_validity::TransactionValidityError}; #[test] fn test_unresponsiveness_slash_fraction() { // A single case of unresponsiveness is not slashed. - assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(1, 50), - Perbill::zero(), - ); + assert_eq!(UnresponsivenessOffence::<()>::slash_fraction(1, 50), Perbill::zero()); assert_eq!( UnresponsivenessOffence::<()>::slash_fraction(5, 50), @@ -74,17 +75,17 @@ fn should_report_offline_validators() { // then let offences = OFFENCES.with(|l| l.replace(vec![])); - assert_eq!(offences, vec![ - (vec![], UnresponsivenessOffence { - session_index: 2, - validator_set_count: 3, - offenders: vec![ - (1, 1), - (2, 2), - (3, 3), - ], - }) - ]); + assert_eq!( + offences, + vec![( + vec![], + UnresponsivenessOffence { + session_index: 2, + validator_set_count: 3, + offenders: vec![(1, 1), (2, 2), (3, 3),], + } + )] + ); // should not report when heartbeat is sent for (idx, v) in validators.into_iter().take(4).enumerate() { @@ -94,16 +95,17 @@ fn should_report_offline_validators() { // then let offences = OFFENCES.with(|l| l.replace(vec![])); - assert_eq!(offences, vec![ - (vec![], UnresponsivenessOffence { - session_index: 3, - validator_set_count: 6, - offenders: vec![ - (5, 5), - (6, 6), - ], - }) - ]); + assert_eq!( + offences, + vec![( + vec![], + UnresponsivenessOffence { + session_index: 3, + validator_set_count: 6, + offenders: vec![(5, 5), (6, 6),], + } + )] + ); }); } @@ -128,17 +130,16 @@ fn heartbeat( }; let signature = id.sign(&heartbeat.encode()).unwrap(); - ImOnline::pre_dispatch(&crate::Call::heartbeat(heartbeat.clone(), signature.clone())) - .map_err(|e| match e { - TransactionValidityError::Invalid(InvalidTransaction::Custom(INVALID_VALIDATORS_LEN)) => - "invalid validators len", - e @ _ => <&'static str>::from(e), - })?; - ImOnline::heartbeat( - Origin::none(), - heartbeat, - signature, - ) + ImOnline::pre_dispatch(&crate::Call::heartbeat { + heartbeat: heartbeat.clone(), + signature: signature.clone(), + }) + .map_err(|e| match e { + TransactionValidityError::Invalid(InvalidTransaction::Custom(INVALID_VALIDATORS_LEN)) => + "invalid validators len", + e @ _ => <&'static str>::from(e), + })?; + ImOnline::heartbeat(Origin::none(), heartbeat, signature) } #[test] @@ -190,8 +191,14 @@ fn late_heartbeat_and_invalid_keys_len_should_fail() { assert_eq!(Session::validators(), vec![1, 2, 3]); // when - assert_noop!(heartbeat(1, 3, 0, 1.into(), Session::validators()), "Transaction is outdated"); - assert_noop!(heartbeat(1, 1, 0, 1.into(), Session::validators()), "Transaction is outdated"); + assert_noop!( + heartbeat(1, 3, 0, 1.into(), Session::validators()), + "Transaction is outdated" + ); + assert_noop!( + heartbeat(1, 1, 0, 1.into(), Session::validators()), + "Transaction is outdated" + ); // invalid validators_len assert_noop!(heartbeat(1, 2, 0, 1.into(), vec![]), "invalid validators len"); @@ -205,7 +212,8 @@ fn should_generate_heartbeats() { let mut ext = new_test_ext(); let (offchain, _state) = TestOffchainExt::new(); let (pool, state) = TestTransactionPoolExt::new(); - ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); ext.register_extension(TransactionPoolExt::new(pool)); ext.execute_with(|| { @@ -230,17 +238,20 @@ fn should_generate_heartbeats() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::Call::ImOnline(crate::Call::heartbeat(h, ..)) => h, + crate::mock::Call::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), }; - assert_eq!(heartbeat, Heartbeat { - block_number: block, - network_state: sp_io::offchain::network_state().unwrap(), - session_index: 2, - authority_index: 2, - validators_len: 3, - }); + assert_eq!( + heartbeat, + Heartbeat { + block_number: block, + network_state: sp_io::offchain::network_state().unwrap(), + session_index: 2, + authority_index: 2, + validators_len: 3, + } + ); }); } @@ -310,7 +321,8 @@ fn should_not_send_a_report_if_already_online() { let mut ext = new_test_ext(); let (offchain, _state) = TestOffchainExt::new(); let (pool, pool_state) = TestTransactionPoolExt::new(); - ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); ext.register_extension(TransactionPoolExt::new(pool)); ext.execute_with(|| { @@ -341,16 +353,176 @@ fn should_not_send_a_report_if_already_online() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::Call::ImOnline(crate::Call::heartbeat(h, ..)) => h, + crate::mock::Call::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), }; - assert_eq!(heartbeat, Heartbeat { - block_number: 4, - network_state: sp_io::offchain::network_state().unwrap(), - session_index: 2, - authority_index: 0, - validators_len: 3, - }); + assert_eq!( + heartbeat, + Heartbeat { + block_number: 4, + network_state: sp_io::offchain::network_state().unwrap(), + session_index: 2, + authority_index: 0, + validators_len: 3, + } + ); + }); +} + +#[test] +fn should_handle_missing_progress_estimates() { + use frame_support::traits::OffchainWorker; + + let mut ext = new_test_ext(); + let (offchain, _state) = TestOffchainExt::new(); + let (pool, state) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + let block = 1; + + System::set_block_number(block); + UintAuthorityId::set_all_keys(vec![0, 1, 2]); + + // buffer new validators + Session::rotate_session(); + + // enact the change and buffer another one + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![0, 1, 2])); + Session::rotate_session(); + + // we will return `None` on the next call to `estimate_current_session_progress` + // and the offchain worker should fallback to checking `HeartbeatAfter` + MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); + ImOnline::offchain_worker(block); + + assert_eq!(state.read().transactions.len(), 3); + }); +} + +#[test] +fn should_handle_non_linear_session_progress() { + // NOTE: this is the reason why we started using `EstimateNextSessionRotation` to figure out if + // we should send a heartbeat, it's possible that between successive blocks we progress through + // the session more than just one block increment (in BABE session length is defined in slots, + // not block numbers). + + let mut ext = new_test_ext(); + let (offchain, _state) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + UintAuthorityId::set_all_keys(vec![0, 1, 2]); + + // buffer new validator + Session::rotate_session(); + + // mock the session length as being 10 blocks long, + // enact the change and buffer another one + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![0, 1, 2])); + + // mock the session length has being 10 which should make us assume the fallback for half + // session will be reached by block 5. + MOCK_AVERAGE_SESSION_LENGTH.with(|p| *p.borrow_mut() = Some(10)); + + Session::rotate_session(); + + // if we don't have valid results for the current session progres then + // we'll fallback to `HeartbeatAfter` and only heartbeat on block 5. + MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); + assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); + + MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); + assert!(ImOnline::send_heartbeats(5).ok().is_some()); + + // if we have a valid current session progress then we'll heartbeat as soon + // as we're past 80% of the session regardless of the block number + MOCK_CURRENT_SESSION_PROGRESS + .with(|p| *p.borrow_mut() = Some(Some(Permill::from_percent(81)))); + + assert!(ImOnline::send_heartbeats(2).ok().is_some()); + }); +} + +#[test] +fn test_does_not_heartbeat_early_in_the_session() { + let mut ext = new_test_ext(); + let (offchain, _state) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + // mock current session progress as being 5%. we only randomly start + // heartbeating after 10% of the session has elapsed. + MOCK_CURRENT_SESSION_PROGRESS + .with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(0.05)))); + assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); + }); +} + +#[test] +fn test_probability_of_heartbeating_increases_with_session_progress() { + let mut ext = new_test_ext(); + let (offchain, state) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + let set_test = |progress, random: f64| { + // the average session length is 100 blocks, therefore the residual + // probability of sending a heartbeat is 1% + MOCK_AVERAGE_SESSION_LENGTH.with(|p| *p.borrow_mut() = Some(100)); + MOCK_CURRENT_SESSION_PROGRESS + .with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(progress)))); + + let mut seed = [0u8; 32]; + let encoded = ((random * Permill::ACCURACY as f64) as u32).encode(); + seed[0..4].copy_from_slice(&encoded); + state.write().seed = seed; + }; + + let assert_too_early = |progress, random| { + set_test(progress, random); + assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); + }; + + let assert_heartbeat_ok = |progress, random| { + set_test(progress, random); + assert!(ImOnline::send_heartbeats(2).ok().is_some()); + }; + + assert_too_early(0.05, 1.0); + + assert_too_early(0.1, 0.1); + assert_too_early(0.1, 0.011); + assert_heartbeat_ok(0.1, 0.010); + + assert_too_early(0.4, 0.015); + assert_heartbeat_ok(0.4, 0.014); + + assert_too_early(0.5, 0.026); + assert_heartbeat_ok(0.5, 0.025); + + assert_too_early(0.6, 0.057); + assert_heartbeat_ok(0.6, 0.056); + + assert_too_early(0.65, 0.086); + assert_heartbeat_ok(0.65, 0.085); + + assert_too_early(0.7, 0.13); + assert_heartbeat_ok(0.7, 0.12); + + assert_too_early(0.75, 0.19); + assert_heartbeat_ok(0.75, 0.18); }); } diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs new file mode 100644 index 0000000000000..1eadd63cc9d6c --- /dev/null +++ b/frame/im-online/src/weights.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_im_online +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_im_online +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/im-online/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_im_online. +pub trait WeightInfo { + fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight; +} + +/// Weights for pallet_im_online using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Session Validators (r:1 w:0) + // Storage: Session CurrentIndex (r:1 w:0) + // Storage: ImOnline ReceivedHeartbeats (r:1 w:1) + // Storage: ImOnline AuthoredBlocks (r:1 w:0) + // Storage: ImOnline Keys (r:1 w:0) + fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { + (93_400_000 as Weight) + // Standard Error: 0 + .saturating_add((144_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 0 + .saturating_add((335_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Session Validators (r:1 w:0) + // Storage: Session CurrentIndex (r:1 w:0) + // Storage: ImOnline ReceivedHeartbeats (r:1 w:1) + // Storage: ImOnline AuthoredBlocks (r:1 w:0) + // Storage: ImOnline Keys (r:1 w:0) + fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { + (93_400_000 as Weight) + // Standard Error: 0 + .saturating_add((144_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 0 + .saturating_add((335_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index aea8dbf1a866e..17d04c43fa5d9 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-indices" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,27 +13,27 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-keyring = { version = "4.0.0-dev", optional = true, path = "../../primitives/keyring" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "serde", "sp-keyring", "codec/std", + "scale-info/std", "sp-core/std", "sp-std/std", "sp-io/std", @@ -44,4 +44,6 @@ std = [ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/indices/src/address.rs b/frame/indices/src/address.rs deleted file mode 100644 index 0fd8933381328..0000000000000 --- a/frame/indices/src/address.rs +++ /dev/null @@ -1,159 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Address type that is union of index and id for an account. - -#[cfg(feature = "std")] -use std::fmt; -use sp_std::convert::TryInto; -use crate::Member; -use codec::{Encode, Decode, Input, Output, Error}; - -/// An indices-aware address, which can be either a direct `AccountId` or -/// an index. -#[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Hash))] -pub enum Address where - AccountId: Member, - AccountIndex: Member, -{ - /// It's an account ID (pubkey). - Id(AccountId), - /// It's an account index. - Index(AccountIndex), -} - -#[cfg(feature = "std")] -impl fmt::Display for Address where - AccountId: Member, - AccountIndex: Member, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl From for Address where - AccountId: Member, - AccountIndex: Member, -{ - fn from(a: AccountId) -> Self { - Address::Id(a) - } -} - -fn need_more_than(a: T, b: T) -> Result { - if a < b { Ok(b) } else { Err("Invalid range".into()) } -} - -impl Decode for Address where - AccountId: Member + Decode, - AccountIndex: Member + Decode + PartialOrd + Ord + From + Copy, -{ - fn decode(input: &mut I) -> Result { - Ok(match input.read_byte()? { - x @ 0x00..=0xef => Address::Index(AccountIndex::from(x as u32)), - 0xfc => Address::Index(AccountIndex::from( - need_more_than(0xef, u16::decode(input)?)? as u32 - )), - 0xfd => Address::Index(AccountIndex::from( - need_more_than(0xffff, u32::decode(input)?)? - )), - 0xfe => Address::Index( - need_more_than(0xffffffffu32.into(), Decode::decode(input)?)? - ), - 0xff => Address::Id(Decode::decode(input)?), - _ => return Err("Invalid address variant".into()), - }) - } -} - -impl Encode for Address where - AccountId: Member + Encode, - AccountIndex: Member + Encode + PartialOrd + Ord + Copy + From + TryInto, -{ - fn encode_to(&self, dest: &mut T) { - match *self { - Address::Id(ref i) => { - dest.push_byte(255); - dest.push(i); - } - Address::Index(i) => { - let maybe_u32: Result = i.try_into(); - if let Ok(x) = maybe_u32 { - if x > 0xffff { - dest.push_byte(253); - dest.push(&x); - } - else if x >= 0xf0 { - dest.push_byte(252); - dest.push(&(x as u16)); - } - else { - dest.push_byte(x as u8); - } - - } else { - dest.push_byte(254); - dest.push(&i); - } - }, - } - } -} - -impl codec::EncodeLike for Address where - AccountId: Member + Encode, - AccountIndex: Member + Encode + PartialOrd + Ord + Copy + From + TryInto, -{} - -impl Default for Address where - AccountId: Member + Default, - AccountIndex: Member, -{ - fn default() -> Self { - Address::Id(Default::default()) - } -} - -#[cfg(test)] -mod tests { - use codec::{Encode, Decode}; - - type Address = super::Address<[u8; 8], u32>; - fn index(i: u32) -> Address { super::Address::Index(i) } - fn id(i: [u8; 8]) -> Address { super::Address::Id(i) } - - fn compare(a: Option
, d: &[u8]) { - if let Some(ref a) = a { - assert_eq!(d, &a.encode()[..]); - } - assert_eq!(Address::decode(&mut &d[..]).ok(), a); - } - - #[test] - fn it_should_work() { - compare(Some(index(2)), &[2][..]); - compare(None, &[240][..]); - compare(None, &[252, 239, 0][..]); - compare(Some(index(240)), &[252, 240, 0][..]); - compare(Some(index(304)), &[252, 48, 1][..]); - compare(None, &[253, 255, 255, 0, 0][..]); - compare(Some(index(0x10000)), &[253, 0, 0, 1, 0][..]); - compare(Some(id([42, 69, 42, 69, 42, 69, 42, 69])), &[255, 42, 69, 42, 69, 42, 69, 42, 69][..]); - } -} diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index 382bf07f1136e..ba0152008c41e 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,17 +20,15 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use sp_runtime::traits::Bounded; -use crate::Module as Indices; +use crate::Pallet as Indices; const SEED: u32 = 0; benchmarks! { - _ { } - claim { let account_index = T::AccountIndex::from(SEED); let caller: T::AccountId = whitelisted_caller(); @@ -95,20 +93,4 @@ benchmarks! { // TODO in another PR: lookup and unlookup trait weights (not critical) } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_claim::()); - assert_ok!(test_benchmark_transfer::()); - assert_ok!(test_benchmark_free::()); - assert_ok!(test_benchmark_force_transfer::()); - assert_ok!(test_benchmark_freeze::()); - }); - } -} +impl_benchmark_test_suite!(Indices, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/indices/src/default_weights.rs b/frame/indices/src/default_weights.rs deleted file mode 100644 index 6b3b9c13e40a0..0000000000000 --- a/frame/indices/src/default_weights.rs +++ /dev/null @@ -1,51 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn claim() -> Weight { - (56_237_000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn transfer() -> Weight { - (63_665_000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn free() -> Weight { - (50_736_000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn force_transfer() -> Weight { - (52_361_000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn freeze() -> Weight { - (46_483_000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } -} diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index edbaed17e536f..0901a89d41ad6 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,105 +20,64 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use codec::Codec; -use sp_runtime::traits::{ - StaticLookup, Member, LookupError, Zero, Saturating, AtLeast32Bit -}; -use frame_support::{Parameter, decl_module, decl_error, decl_event, decl_storage, ensure}; -use frame_support::dispatch::DispatchResult; -use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Reserved}; -use frame_support::weights::Weight; -use frame_system::{ensure_signed, ensure_root}; -use self::address::Address as RawAddress; - +mod benchmarking; mod mock; -pub mod address; mod tests; -mod benchmarking; -mod default_weights; - -pub type Address = RawAddress<::AccountId, ::AccountIndex>; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - -pub trait WeightInfo { - fn claim() -> Weight; - fn transfer() -> Weight; - fn free() -> Weight; - fn force_transfer() -> Weight; - fn freeze() -> Weight; -} - -/// The module's config trait. -pub trait Trait: frame_system::Trait { - /// Type used for storing an account's index; implies the maximum number of accounts the system - /// can hold. - type AccountIndex: Parameter + Member + Codec + Default + AtLeast32Bit + Copy; +pub mod weights; - /// The currency trait. - type Currency: ReservableCurrency; - - /// The deposit needed for reserving an index. - type Deposit: Get>; +use codec::Codec; +use frame_support::traits::{BalanceStatus::Reserved, Currency, ReservableCurrency}; +use sp_runtime::{ + traits::{AtLeast32Bit, LookupError, Saturating, StaticLookup, Zero}, + MultiAddress, +}; +use sp_std::prelude::*; +pub use weights::WeightInfo; + +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The module's config trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// Type used for storing an account's index; implies the maximum number of accounts the + /// system can hold. + type AccountIndex: Parameter + + Member + + MaybeSerializeDeserialize + + Codec + + Default + + AtLeast32Bit + + Copy; + + /// The currency trait. + type Currency: ReservableCurrency; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The deposit needed for reserving an index. + #[pallet::constant] + type Deposit: Get>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// The overarching event type. + type Event: From> + IsType<::Event>; -decl_storage! { - trait Store for Module as Indices { - /// The lookup from index to account. - pub Accounts build(|config: &GenesisConfig| - config.indices.iter() - .cloned() - .map(|(a, b)| (a, (b, Zero::zero(), false))) - .collect::>() - ): map hasher(blake2_128_concat) T::AccountIndex => Option<(T::AccountId, BalanceOf, bool)>; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } - add_extra_genesis { - config(indices): Vec<(T::AccountIndex, T::AccountId)>; - } -} -decl_event!( - pub enum Event where - ::AccountId, - ::AccountIndex - { - /// A account index was assigned. \[who, index\] - IndexAssigned(AccountId, AccountIndex), - /// A account index has been freed up (unassigned). \[index\] - IndexFreed(AccountIndex), - /// A account index has been frozen to its current account ID. \[who, index\] - IndexFrozen(AccountIndex, AccountId), - } -); - -decl_error! { - pub enum Error for Module { - /// The index was not already assigned. - NotAssigned, - /// The index is assigned to another account. - NotOwner, - /// The index was not available. - InUse, - /// The source and destination accounts are identical. - NotTransfer, - /// The index is permanent and may not be freed/changed. - Permanent, - } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = frame_system { - /// The deposit needed for reserving an index. - const Deposit: BalanceOf = T::Deposit::get(); - - fn deposit_event() = default; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + #[pallet::call] + impl Pallet { /// Assign an previously unassigned index. /// /// Payment: `Deposit` is reserved from the sender account. @@ -137,8 +96,8 @@ decl_module! { /// ------------------- /// - DB Weight: 1 Read/Write (Accounts) /// # - #[weight = T::WeightInfo::claim()] - fn claim(origin, index: T::AccountIndex) { + #[pallet::weight(T::WeightInfo::claim())] + pub fn claim(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; Accounts::::try_mutate(index, |maybe_value| { @@ -146,7 +105,8 @@ decl_module! { *maybe_value = Some((who.clone(), T::Deposit::get(), false)); T::Currency::reserve(&who, T::Deposit::get()) })?; - Self::deposit_event(RawEvent::IndexAssigned(who, index)); + Self::deposit_event(Event::IndexAssigned(who, index)); + Ok(()) } /// Assign an index already owned by the sender to another account. The balance reservation @@ -169,8 +129,12 @@ decl_module! { /// - Reads: Indices Accounts, System Account (recipient) /// - Writes: Indices Accounts, System Account (recipient) /// # - #[weight = T::WeightInfo::transfer()] - fn transfer(origin, new: T::AccountId, index: T::AccountIndex) { + #[pallet::weight(T::WeightInfo::transfer())] + pub fn transfer( + origin: OriginFor, + new: T::AccountId, + index: T::AccountIndex, + ) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(who != new, Error::::NotTransfer); @@ -182,7 +146,8 @@ decl_module! { *maybe_value = Some((new.clone(), amount.saturating_sub(lost), false)); Ok(()) })?; - Self::deposit_event(RawEvent::IndexAssigned(new, index)); + Self::deposit_event(Event::IndexAssigned(new, index)); + Ok(()) } /// Free up an index owned by the sender. @@ -203,8 +168,8 @@ decl_module! { /// ------------------- /// - DB Weight: 1 Read/Write (Accounts) /// # - #[weight = T::WeightInfo::free()] - fn free(origin, index: T::AccountIndex) { + #[pallet::weight(T::WeightInfo::free())] + pub fn free(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { @@ -214,7 +179,8 @@ decl_module! { T::Currency::unreserve(&who, amount); Ok(()) })?; - Self::deposit_event(RawEvent::IndexFreed(index)); + Self::deposit_event(Event::IndexFreed(index)); + Ok(()) } /// Force an index to an account. This doesn't require a deposit. If the index is already @@ -238,8 +204,13 @@ decl_module! { /// - Reads: Indices Accounts, System Account (original owner) /// - Writes: Indices Accounts, System Account (original owner) /// # - #[weight = T::WeightInfo::force_transfer()] - fn force_transfer(origin, new: T::AccountId, index: T::AccountIndex, freeze: bool) { + #[pallet::weight(T::WeightInfo::force_transfer())] + pub fn force_transfer( + origin: OriginFor, + new: T::AccountId, + index: T::AccountIndex, + freeze: bool, + ) -> DispatchResult { ensure_root(origin)?; Accounts::::mutate(index, |maybe_value| { @@ -248,10 +219,12 @@ decl_module! { } *maybe_value = Some((new.clone(), Zero::zero(), freeze)); }); - Self::deposit_event(RawEvent::IndexAssigned(new, index)); + Self::deposit_event(Event::IndexAssigned(new, index)); + Ok(()) } - /// Freeze an index so it will always point to the sender account. This consumes the deposit. + /// Freeze an index so it will always point to the sender account. This consumes the + /// deposit. /// /// The dispatch origin for this call must be _Signed_ and the signing account must have a /// non-frozen account `index`. @@ -268,8 +241,8 @@ decl_module! { /// ------------------- /// - DB Weight: 1 Read/Write (Accounts) /// # - #[weight = T::WeightInfo::freeze()] - fn freeze(origin, index: T::AccountIndex) { + #[pallet::weight(T::WeightInfo::freeze())] + pub fn freeze(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { @@ -280,12 +253,68 @@ decl_module! { *maybe_value = Some((account, Zero::zero(), true)); Ok(()) })?; - Self::deposit_event(RawEvent::IndexFrozen(index, who)); + Self::deposit_event(Event::IndexFrozen(index, who)); + Ok(()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A account index was assigned. \[index, who\] + IndexAssigned(T::AccountId, T::AccountIndex), + /// A account index has been freed up (unassigned). \[index\] + IndexFreed(T::AccountIndex), + /// A account index has been frozen to its current account ID. \[index, who\] + IndexFrozen(T::AccountIndex, T::AccountId), + } + + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { + /// The index was not already assigned. + NotAssigned, + /// The index is assigned to another account. + NotOwner, + /// The index was not available. + InUse, + /// The source and destination accounts are identical. + NotTransfer, + /// The index is permanent and may not be freed/changed. + Permanent, + } + + /// The lookup from index to account. + #[pallet::storage] + pub type Accounts = + StorageMap<_, Blake2_128Concat, T::AccountIndex, (T::AccountId, BalanceOf, bool)>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub indices: Vec<(T::AccountIndex, T::AccountId)>, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { indices: Default::default() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + for (a, b) in &self.indices { + >::insert(a, (b, >::zero(), false)) + } } } } -impl Module { +impl Pallet { // PUBLIC IMMUTABLES /// Lookup an T::AccountIndex to get an Id, if there's one there. @@ -294,18 +323,17 @@ impl Module { } /// Lookup an address to get an Id, if there's one there. - pub fn lookup_address( - a: address::Address - ) -> Option { + pub fn lookup_address(a: MultiAddress) -> Option { match a { - address::Address::Id(i) => Some(i), - address::Address::Index(i) => Self::lookup_index(i), + MultiAddress::Id(i) => Some(i), + MultiAddress::Index(i) => Self::lookup_index(i), + _ => None, } } } -impl StaticLookup for Module { - type Source = address::Address; +impl StaticLookup for Pallet { + type Source = MultiAddress; type Target = T::AccountId; fn lookup(a: Self::Source) -> Result { @@ -313,6 +341,6 @@ impl StaticLookup for Module { } fn unlookup(a: Self::Target) -> Self::Source { - address::Address::Id(a) + MultiAddress::Id(a) } } diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index cfbd2e38c3d3f..f4c87016141b5 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,40 +19,39 @@ #![cfg(test)] -use sp_runtime::testing::Header; -use sp_runtime::Perbill; +use crate::{self as pallet_indices, Config}; +use frame_support::parameter_types; use sp_core::H256; -use frame_support::{impl_outer_origin, impl_outer_event, parameter_types, weights::Weight}; -use crate::{self as indices, Module, Trait}; -use frame_system as system; -use pallet_balances as balances; +use sp_runtime::testing::Header; -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} -} -impl_outer_event!{ - pub enum MetaEvent for Test { - system, - balances, - indices, - } -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Test; +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Indices: pallet_indices::{Pallet, Call, Storage, Config, Event}, + } +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; - type Call = (); + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -60,32 +59,29 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = Indices; type Header = Header; - type Event = MetaEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type Event = MetaEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -95,22 +91,20 @@ parameter_types! { pub const Deposit: u64 = 1; } -impl Trait for Test { +impl Config for Test { type AccountIndex = u64; type Currency = Balances; type Deposit = Deposit; - type Event = MetaEvent; + type Event = Event; type WeightInfo = (); } pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } - -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; -pub type Indices = Module; diff --git a/frame/indices/src/tests.rs b/frame/indices/src/tests.rs index e288871d55307..37df20e9b9288 100644 --- a/frame/indices/src/tests.rs +++ b/frame/indices/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,15 +19,17 @@ #![cfg(test)] -use super::*; -use super::mock::*; -use frame_support::{assert_ok, assert_noop}; +use super::{mock::*, *}; +use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; #[test] fn claiming_should_work() { new_test_ext().execute_with(|| { - assert_noop!(Indices::claim(Some(0).into(), 0), BalancesError::::InsufficientBalance); + assert_noop!( + Indices::claim(Some(0).into(), 0), + BalancesError::::InsufficientBalance + ); assert_ok!(Indices::claim(Some(1).into(), 0)); assert_noop!(Indices::claim(Some(2).into(), 0), Error::::InUse); assert_eq!(Balances::reserved_balance(1), 1); diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs new file mode 100644 index 0000000000000..97db589739534 --- /dev/null +++ b/frame/indices/src/weights.rs @@ -0,0 +1,126 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_indices +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_indices +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/indices/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_indices. +pub trait WeightInfo { + fn claim() -> Weight; + fn transfer() -> Weight; + fn free() -> Weight; + fn force_transfer() -> Weight; + fn freeze() -> Weight; +} + +/// Weights for pallet_indices using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Indices Accounts (r:1 w:1) + fn claim() -> Weight { + (38_814_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Indices Accounts (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn transfer() -> Weight { + (47_274_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Indices Accounts (r:1 w:1) + fn free() -> Weight { + (39_692_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Indices Accounts (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn force_transfer() -> Weight { + (40_250_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Indices Accounts (r:1 w:1) + fn freeze() -> Weight { + (37_358_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Indices Accounts (r:1 w:1) + fn claim() -> Weight { + (38_814_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Indices Accounts (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn transfer() -> Weight { + (47_274_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Indices Accounts (r:1 w:1) + fn free() -> Weight { + (39_692_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Indices Accounts (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn force_transfer() -> Weight { + (40_250_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Indices Accounts (r:1 w:1) + fn freeze() -> Weight { + (37_358_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml new file mode 100644 index 0000000000000..f14d65310cc70 --- /dev/null +++ b/frame/lottery/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "pallet-lottery" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME Participation Lottery Pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +frame-support-test = { version = "3.0.0", path = "../support/test" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-std/std", + "frame-support/std", + "sp-runtime/std", + "frame-system/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-system/runtime-benchmarks", + "frame-support/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/lottery/src/benchmarking.rs b/frame/lottery/src/benchmarking.rs new file mode 100644 index 0000000000000..3b7035c72deb0 --- /dev/null +++ b/frame/lottery/src/benchmarking.rs @@ -0,0 +1,173 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Lottery pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::traits::{EnsureOrigin, OnInitialize, UnfilteredDispatchable}; +use frame_system::RawOrigin; +use sp_runtime::traits::{Bounded, Zero}; + +use crate::Pallet as Lottery; + +// Set up and start a lottery +fn setup_lottery(repeat: bool) -> Result<(), &'static str> { + let price = T::Currency::minimum_balance(); + let length = 10u32.into(); + let delay = 5u32.into(); + // Calls will be maximum length... + let mut calls = vec![ + frame_system::Call::::set_code { code: vec![] }.into(); + T::MaxCalls::get().saturating_sub(1) as usize + ]; + // Last call will be the match for worst case scenario. + calls.push(frame_system::Call::::remark { remark: vec![] }.into()); + let origin = T::ManagerOrigin::successful_origin(); + Lottery::::set_calls(origin.clone(), calls)?; + Lottery::::start_lottery(origin, price, length, delay, repeat)?; + Ok(()) +} + +benchmarks! { + buy_ticket { + let caller = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + setup_lottery::(false)?; + // force user to have a long vec of calls participating + let set_code_index: CallIndex = Lottery::::call_to_index( + &frame_system::Call::::set_code{ code: vec![] }.into() + )?; + let already_called: (u32, Vec) = ( + LotteryIndex::::get(), + vec![ + set_code_index; + T::MaxCalls::get().saturating_sub(1) as usize + ], + ); + Participants::::insert(&caller, already_called); + + let call = frame_system::Call::::remark { remark: vec![] }; + }: _(RawOrigin::Signed(caller), Box::new(call.into())) + verify { + assert_eq!(TicketsCount::::get(), 1); + } + + set_calls { + let n in 0 .. T::MaxCalls::get() as u32; + let calls = vec![frame_system::Call::::remark { remark: vec![] }.into(); n as usize]; + + let call = Call::::set_calls { calls }; + let origin = T::ManagerOrigin::successful_origin(); + assert!(CallIndices::::get().is_empty()); + }: { call.dispatch_bypass_filter(origin)? } + verify { + if !n.is_zero() { + assert!(!CallIndices::::get().is_empty()); + } + } + + start_lottery { + let price = BalanceOf::::max_value(); + let end = 10u32.into(); + let payout = 5u32.into(); + + let call = Call::::start_lottery { price, length: end, delay: payout, repeat: true }; + let origin = T::ManagerOrigin::successful_origin(); + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert!(crate::Lottery::::get().is_some()); + } + + stop_repeat { + setup_lottery::(true)?; + assert_eq!(crate::Lottery::::get().unwrap().repeat, true); + let call = Call::::stop_repeat {}; + let origin = T::ManagerOrigin::successful_origin(); + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_eq!(crate::Lottery::::get().unwrap().repeat, false); + } + + on_initialize_end { + setup_lottery::(false)?; + let winner = account("winner", 0, 0); + // User needs more than min balance to get ticket + T::Currency::make_free_balance_be(&winner, T::Currency::minimum_balance() * 10u32.into()); + // Make sure lottery account has at least min balance too + let lottery_account = Lottery::::account_id(); + T::Currency::make_free_balance_be(&lottery_account, T::Currency::minimum_balance() * 10u32.into()); + // Buy a ticket + let call = frame_system::Call::::remark { remark: vec![] }; + Lottery::::buy_ticket(RawOrigin::Signed(winner.clone()).into(), Box::new(call.into()))?; + // Kill user account for worst case + T::Currency::make_free_balance_be(&winner, 0u32.into()); + // Assert that lotto is set up for winner + assert_eq!(TicketsCount::::get(), 1); + assert!(!Lottery::::pot().1.is_zero()); + }: { + // Generate `MaxGenerateRandom` numbers for worst case scenario + for i in 0 .. T::MaxGenerateRandom::get() { + Lottery::::generate_random_number(i); + } + // Start lottery has block 15 configured for payout + Lottery::::on_initialize(15u32.into()); + } + verify { + assert!(crate::Lottery::::get().is_none()); + assert_eq!(TicketsCount::::get(), 0); + assert_eq!(Lottery::::pot().1, 0u32.into()); + assert!(!T::Currency::free_balance(&winner).is_zero()) + } + + on_initialize_repeat { + setup_lottery::(true)?; + let winner = account("winner", 0, 0); + // User needs more than min balance to get ticket + T::Currency::make_free_balance_be(&winner, T::Currency::minimum_balance() * 10u32.into()); + // Make sure lottery account has at least min balance too + let lottery_account = Lottery::::account_id(); + T::Currency::make_free_balance_be(&lottery_account, T::Currency::minimum_balance() * 10u32.into()); + // Buy a ticket + let call = frame_system::Call::::remark { remark: vec![] }; + Lottery::::buy_ticket(RawOrigin::Signed(winner.clone()).into(), Box::new(call.into()))?; + // Kill user account for worst case + T::Currency::make_free_balance_be(&winner, 0u32.into()); + // Assert that lotto is set up for winner + assert_eq!(TicketsCount::::get(), 1); + assert!(!Lottery::::pot().1.is_zero()); + }: { + // Generate `MaxGenerateRandom` numbers for worst case scenario + for i in 0 .. T::MaxGenerateRandom::get() { + Lottery::::generate_random_number(i); + } + // Start lottery has block 15 configured for payout + Lottery::::on_initialize(15u32.into()); + } + verify { + assert!(crate::Lottery::::get().is_some()); + assert_eq!(LotteryIndex::::get(), 2); + assert_eq!(TicketsCount::::get(), 0); + assert_eq!(Lottery::::pot().1, 0u32.into()); + assert!(!T::Currency::free_balance(&winner).is_zero()) + } +} + +impl_benchmark_test_suite!(Lottery, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs new file mode 100644 index 0000000000000..260b4c2d76ae9 --- /dev/null +++ b/frame/lottery/src/lib.rs @@ -0,0 +1,488 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A lottery pallet that uses participation in the network to purchase tickets. +//! +//! With this pallet, you can configure a lottery, which is a pot of money that +//! users contribute to, and that is reallocated to a single user at the end of +//! the lottery period. Just like a normal lottery system, to participate, you +//! need to "buy a ticket", which is used to fund the pot. +//! +//! The unique feature of this lottery system is that tickets can only be +//! purchased by making a "valid call" dispatched through this pallet. +//! By configuring certain calls to be valid for the lottery, you can encourage +//! users to make those calls on your network. An example of how this could be +//! used is to set validator nominations as a valid lottery call. If the lottery +//! is set to repeat every month, then users would be encouraged to re-nominate +//! validators every month. A user can ony purchase one ticket per valid call +//! per lottery. +//! +//! This pallet can be configured to use dynamically set calls or statically set +//! calls. Call validation happens through the `ValidateCall` implementation. +//! This pallet provides one implementation of this using the `CallIndices` +//! storage item. You can also make your own implementation at the runtime level +//! which can contain much more complex logic, such as validation of the +//! parameters, which this pallet alone cannot do. +//! +//! This pallet uses the modulus operator to pick a random winner. It is known +//! that this might introduce a bias if the random number chosen in a range that +//! is not perfectly divisible by the total number of participants. The +//! `MaxGenerateRandom` configuration can help mitigate this by generating new +//! numbers until we hit the limit or we find a "fair" number. This is best +//! effort only. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +pub mod weights; + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{DispatchResult, Dispatchable, GetDispatchInfo}, + ensure, + traits::{Currency, ExistenceRequirement::KeepAlive, Get, Randomness, ReservableCurrency}, + PalletId, RuntimeDebug, +}; +pub use pallet::*; +use sp_runtime::{ + traits::{AccountIdConversion, Saturating, Zero}, + ArithmeticError, DispatchError, +}; +use sp_std::prelude::*; +pub use weights::WeightInfo; + +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +// Any runtime call can be encoded into two bytes which represent the pallet and call index. +// We use this to uniquely match someone's incoming call with the calls configured for the lottery. +type CallIndex = (u8, u8); + +#[derive(Encode, Decode, Default, Eq, PartialEq, RuntimeDebug, scale_info::TypeInfo)] +pub struct LotteryConfig { + /// Price per entry. + price: Balance, + /// Starting block of the lottery. + start: BlockNumber, + /// Length of the lottery (start + length = end). + length: BlockNumber, + /// Delay for choosing the winner of the lottery. (start + length + delay = payout). + /// Randomness in the "payout" block will be used to determine the winner. + delay: BlockNumber, + /// Whether this lottery will repeat after it completes. + repeat: bool, +} + +pub trait ValidateCall { + fn validate_call(call: &::Call) -> bool; +} + +impl ValidateCall for () { + fn validate_call(_: &::Call) -> bool { + false + } +} + +impl ValidateCall for Pallet { + fn validate_call(call: &::Call) -> bool { + let valid_calls = CallIndices::::get(); + let call_index = match Self::call_to_index(&call) { + Ok(call_index) => call_index, + Err(_) => return false, + }; + valid_calls.iter().any(|c| call_index == *c) + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{pallet_prelude::*, traits::EnsureOrigin, weights::Weight, Parameter}; + use frame_system::{ensure_signed, pallet_prelude::*}; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// The pallet's config trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The Lottery's pallet id + #[pallet::constant] + type PalletId: Get; + + /// A dispatchable call. + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From>; + + /// The currency trait. + type Currency: ReservableCurrency; + + /// Something that provides randomness in the runtime. + type Randomness: Randomness; + + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// The manager origin. + type ManagerOrigin: EnsureOrigin; + + /// The max number of calls available in a single lottery. + #[pallet::constant] + type MaxCalls: Get; + + /// Used to determine if a call would be valid for purchasing a ticket. + /// + /// Be conscious of the implementation used here. We assume at worst that + /// a vector of `MaxCalls` indices are queried for any call validation. + /// You may need to provide a custom benchmark if this assumption is broken. + type ValidateCall: ValidateCall; + + /// Number of time we should try to generate a random number that has no modulo bias. + /// The larger this number, the more potential computation is used for picking the winner, + /// but also the more likely that the chosen winner is done fairly. + #[pallet::constant] + type MaxGenerateRandom: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A lottery has been started! + LotteryStarted, + /// A new set of calls have been set! + CallsUpdated, + /// A winner has been chosen! + Winner(T::AccountId, BalanceOf), + /// A ticket has been bought! + TicketBought(T::AccountId, CallIndex), + } + + #[pallet::error] + pub enum Error { + /// A lottery has not been configured. + NotConfigured, + /// A lottery is already in progress. + InProgress, + /// A lottery has already ended. + AlreadyEnded, + /// The call is not valid for an open lottery. + InvalidCall, + /// You are already participating in the lottery with this call. + AlreadyParticipating, + /// Too many calls for a single lottery. + TooManyCalls, + /// Failed to encode calls + EncodingFailed, + } + + #[pallet::storage] + pub(crate) type LotteryIndex = StorageValue<_, u32, ValueQuery>; + + /// The configuration for the current lottery. + #[pallet::storage] + pub(crate) type Lottery = + StorageValue<_, LotteryConfig>>; + + /// Users who have purchased a ticket. (Lottery Index, Tickets Purchased) + #[pallet::storage] + pub(crate) type Participants = + StorageMap<_, Twox64Concat, T::AccountId, (u32, Vec), ValueQuery>; + + /// Total number of tickets sold. + #[pallet::storage] + pub(crate) type TicketsCount = StorageValue<_, u32, ValueQuery>; + + /// Each ticket's owner. + /// + /// May have residual storage from previous lotteries. Use `TicketsCount` to see which ones + /// are actually valid ticket mappings. + #[pallet::storage] + pub(crate) type Tickets = StorageMap<_, Twox64Concat, u32, T::AccountId>; + + /// The calls stored in this pallet to be used in an active lottery if configured + /// by `Config::ValidateCall`. + #[pallet::storage] + pub(crate) type CallIndices = StorageValue<_, Vec, ValueQuery>; + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: T::BlockNumber) -> Weight { + Lottery::::mutate(|mut lottery| -> Weight { + if let Some(config) = &mut lottery { + let payout_block = + config.start.saturating_add(config.length).saturating_add(config.delay); + if payout_block <= n { + let (lottery_account, lottery_balance) = Self::pot(); + let ticket_count = TicketsCount::::get(); + + let winning_number = Self::choose_winner(ticket_count); + let winner = Tickets::::get(winning_number).unwrap_or(lottery_account); + // Not much we can do if this fails... + let res = T::Currency::transfer( + &Self::account_id(), + &winner, + lottery_balance, + KeepAlive, + ); + debug_assert!(res.is_ok()); + + Self::deposit_event(Event::::Winner(winner, lottery_balance)); + + TicketsCount::::kill(); + + if config.repeat { + // If lottery should repeat, increment index by 1. + LotteryIndex::::mutate(|index| *index = index.saturating_add(1)); + // Set a new start with the current block. + config.start = n; + return T::WeightInfo::on_initialize_repeat() + } else { + // Else, kill the lottery storage. + *lottery = None; + return T::WeightInfo::on_initialize_end() + } + // We choose not need to kill Participants and Tickets to avoid a large + // number of writes at one time. Instead, data persists between lotteries, + // but is not used if it is not relevant. + } + } + return T::DbWeight::get().reads(1) + }) + } + } + + #[pallet::call] + impl Pallet { + /// Buy a ticket to enter the lottery. + /// + /// This extrinsic acts as a passthrough function for `call`. In all + /// situations where `call` alone would succeed, this extrinsic should + /// succeed. + /// + /// If `call` is successful, then we will attempt to purchase a ticket, + /// which may fail silently. To detect success of a ticket purchase, you + /// should listen for the `TicketBought` event. + /// + /// This extrinsic must be called by a signed origin. + #[pallet::weight( + T::WeightInfo::buy_ticket() + .saturating_add(call.get_dispatch_info().weight) + )] + pub fn buy_ticket(origin: OriginFor, call: Box<::Call>) -> DispatchResult { + let caller = ensure_signed(origin.clone())?; + call.clone().dispatch(origin).map_err(|e| e.error)?; + + let _ = Self::do_buy_ticket(&caller, &call); + Ok(()) + } + + /// Set calls in storage which can be used to purchase a lottery ticket. + /// + /// This function only matters if you use the `ValidateCall` implementation + /// provided by this pallet, which uses storage to determine the valid calls. + /// + /// This extrinsic must be called by the Manager origin. + #[pallet::weight(T::WeightInfo::set_calls(calls.len() as u32))] + pub fn set_calls(origin: OriginFor, calls: Vec<::Call>) -> DispatchResult { + T::ManagerOrigin::ensure_origin(origin)?; + ensure!(calls.len() <= T::MaxCalls::get() as usize, Error::::TooManyCalls); + if calls.is_empty() { + CallIndices::::kill(); + } else { + let indices = Self::calls_to_indices(&calls)?; + CallIndices::::put(indices); + } + Self::deposit_event(Event::::CallsUpdated); + Ok(()) + } + + /// Start a lottery using the provided configuration. + /// + /// This extrinsic must be called by the `ManagerOrigin`. + /// + /// Parameters: + /// + /// * `price`: The cost of a single ticket. + /// * `length`: How long the lottery should run for starting at the current block. + /// * `delay`: How long after the lottery end we should wait before picking a winner. + /// * `repeat`: If the lottery should repeat when completed. + #[pallet::weight(T::WeightInfo::start_lottery())] + pub fn start_lottery( + origin: OriginFor, + price: BalanceOf, + length: T::BlockNumber, + delay: T::BlockNumber, + repeat: bool, + ) -> DispatchResult { + T::ManagerOrigin::ensure_origin(origin)?; + Lottery::::try_mutate(|lottery| -> DispatchResult { + ensure!(lottery.is_none(), Error::::InProgress); + let index = LotteryIndex::::get(); + let new_index = index.checked_add(1).ok_or(ArithmeticError::Overflow)?; + let start = frame_system::Pallet::::block_number(); + // Use new_index to more easily track everything with the current state. + *lottery = Some(LotteryConfig { price, start, length, delay, repeat }); + LotteryIndex::::put(new_index); + Ok(()) + })?; + // Make sure pot exists. + let lottery_account = Self::account_id(); + if T::Currency::total_balance(&lottery_account).is_zero() { + T::Currency::deposit_creating(&lottery_account, T::Currency::minimum_balance()); + } + Self::deposit_event(Event::::LotteryStarted); + Ok(()) + } + + /// If a lottery is repeating, you can use this to stop the repeat. + /// The lottery will continue to run to completion. + /// + /// This extrinsic must be called by the `ManagerOrigin`. + #[pallet::weight(T::WeightInfo::stop_repeat())] + pub fn stop_repeat(origin: OriginFor) -> DispatchResult { + T::ManagerOrigin::ensure_origin(origin)?; + Lottery::::mutate(|mut lottery| { + if let Some(config) = &mut lottery { + config.repeat = false + } + }); + Ok(()) + } + } +} + +impl Pallet { + /// The account ID of the lottery pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + T::PalletId::get().into_account() + } + + /// Return the pot account and amount of money in the pot. + // The existential deposit is not part of the pot so lottery account never gets deleted. + fn pot() -> (T::AccountId, BalanceOf) { + let account_id = Self::account_id(); + let balance = + T::Currency::free_balance(&account_id).saturating_sub(T::Currency::minimum_balance()); + + (account_id, balance) + } + + // Converts a vector of calls into a vector of call indices. + fn calls_to_indices(calls: &[::Call]) -> Result, DispatchError> { + let mut indices = Vec::with_capacity(calls.len()); + for c in calls.iter() { + let index = Self::call_to_index(c)?; + indices.push(index) + } + Ok(indices) + } + + // Convert a call to it's call index by encoding the call and taking the first two bytes. + fn call_to_index(call: &::Call) -> Result { + let encoded_call = call.encode(); + if encoded_call.len() < 2 { + Err(Error::::EncodingFailed)? + } + return Ok((encoded_call[0], encoded_call[1])) + } + + // Logic for buying a ticket. + fn do_buy_ticket(caller: &T::AccountId, call: &::Call) -> DispatchResult { + // Check the call is valid lottery + let config = Lottery::::get().ok_or(Error::::NotConfigured)?; + let block_number = frame_system::Pallet::::block_number(); + ensure!( + block_number < config.start.saturating_add(config.length), + Error::::AlreadyEnded + ); + ensure!(T::ValidateCall::validate_call(call), Error::::InvalidCall); + let call_index = Self::call_to_index(call)?; + let ticket_count = TicketsCount::::get(); + let new_ticket_count = ticket_count.checked_add(1).ok_or(ArithmeticError::Overflow)?; + // Try to update the participant status + Participants::::try_mutate( + &caller, + |(lottery_index, participating_calls)| -> DispatchResult { + let index = LotteryIndex::::get(); + // If lottery index doesn't match, then reset participating calls and index. + if *lottery_index != index { + *participating_calls = Vec::new(); + *lottery_index = index; + } else { + // Check that user is not already participating under this call. + ensure!( + !participating_calls.iter().any(|c| call_index == *c), + Error::::AlreadyParticipating + ); + } + // Check user has enough funds and send it to the Lottery account. + T::Currency::transfer(caller, &Self::account_id(), config.price, KeepAlive)?; + // Create a new ticket. + TicketsCount::::put(new_ticket_count); + Tickets::::insert(ticket_count, caller.clone()); + participating_calls.push(call_index); + Ok(()) + }, + )?; + + Self::deposit_event(Event::::TicketBought(caller.clone(), call_index)); + + Ok(()) + } + + // Randomly choose a winner from among the total number of participants. + fn choose_winner(total: u32) -> u32 { + let mut random_number = Self::generate_random_number(0); + + // Best effort attempt to remove bias from modulus operator. + for i in 1..T::MaxGenerateRandom::get() { + if random_number < u32::MAX - u32::MAX % total { + break + } + + random_number = Self::generate_random_number(i); + } + + random_number % total + } + + // Generate a random number from a given seed. + // Note that there is potential bias introduced by using modulus operator. + // You should call this function with different seed values until the random + // number lies within `u32::MAX - u32::MAX % n`. + // TODO: deal with randomness freshness + // https://github.com/paritytech/substrate/issues/8311 + fn generate_random_number(seed: u32) -> u32 { + let (random_seed, _) = T::Randomness::random(&(T::PalletId::get(), seed).encode()); + let random_number = ::decode(&mut random_seed.as_ref()) + .expect("secure hashes should always be bigger than u32; qed"); + random_number + } +} diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs new file mode 100644 index 0000000000000..d1f090aa26dcb --- /dev/null +++ b/frame/lottery/src/mock.rs @@ -0,0 +1,143 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities + +use super::*; +use crate as pallet_lottery; + +use frame_support::{ + parameter_types, + traits::{OnFinalize, OnInitialize}, +}; +use frame_support_test::TestRandomness; +use frame_system::EnsureRoot; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Lottery: pallet_lottery::{Pallet, Call, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: u32 = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const LotteryPalletId: PalletId = PalletId(*b"py/lotto"); + pub const MaxCalls: u32 = 2; + pub const MaxGenerateRandom: u32 = 10; +} + +impl Config for Test { + type PalletId = LotteryPalletId; + type Call = Call; + type Currency = Balances; + type Randomness = TestRandomness; + type Event = Event; + type ManagerOrigin = EnsureRoot; + type MaxCalls = MaxCalls; + type ValidateCall = Lottery; + type MaxGenerateRandom = MaxGenerateRandom; + type WeightInfo = (); +} + +pub type SystemCall = frame_system::Call; +pub type BalancesCall = pallet_balances::Call; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() +} + +/// Run until a particular block. +pub fn run_to_block(n: u64) { + while System::block_number() < n { + if System::block_number() > 1 { + Lottery::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + } + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Lottery::on_initialize(System::block_number()); + } +} diff --git a/frame/lottery/src/tests.rs b/frame/lottery/src/tests.rs new file mode 100644 index 0000000000000..623beea4a6b5b --- /dev/null +++ b/frame/lottery/src/tests.rs @@ -0,0 +1,253 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for the module. + +use super::*; +use frame_support::{assert_noop, assert_ok}; +use mock::{ + new_test_ext, run_to_block, Balances, BalancesCall, Call, Lottery, Origin, SystemCall, Test, +}; +use pallet_balances::Error as BalancesError; +use sp_runtime::traits::BadOrigin; + +#[test] +fn initial_state() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(Lottery::account_id()), 0); + assert!(crate::Lottery::::get().is_none()); + assert_eq!(Participants::::get(&1), (0, vec![])); + assert_eq!(TicketsCount::::get(), 0); + assert!(Tickets::::get(0).is_none()); + }); +} + +#[test] +fn basic_end_to_end_works() { + new_test_ext().execute_with(|| { + let price = 10; + let length = 20; + let delay = 5; + let calls = vec![ + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + ]; + + // Set calls for the lottery + assert_ok!(Lottery::set_calls(Origin::root(), calls)); + + // Start lottery, it repeats + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, true)); + assert!(crate::Lottery::::get().is_some()); + + assert_eq!(Balances::free_balance(&1), 100); + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 20 })); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + // 20 from the transfer, 10 from buying a ticket + assert_eq!(Balances::free_balance(&1), 100 - 20 - 10); + assert_eq!(Participants::::get(&1).1.len(), 1); + assert_eq!(TicketsCount::::get(), 1); + // 1 owns the 0 ticket + assert_eq!(Tickets::::get(0), Some(1)); + + // More ticket purchases + assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(3), call.clone())); + assert_ok!(Lottery::buy_ticket(Origin::signed(4), call.clone())); + assert_eq!(TicketsCount::::get(), 4); + + // Go to end + run_to_block(20); + assert_ok!(Lottery::buy_ticket(Origin::signed(5), call.clone())); + // Ticket isn't bought + assert_eq!(TicketsCount::::get(), 4); + + // Go to payout + run_to_block(25); + // User 1 wins + assert_eq!(Balances::free_balance(&1), 70 + 40); + // Lottery is reset and restarted + assert_eq!(TicketsCount::::get(), 0); + assert_eq!(LotteryIndex::::get(), 2); + assert_eq!( + crate::Lottery::::get().unwrap(), + LotteryConfig { price, start: 25, length, delay, repeat: true } + ); + }); +} + +#[test] +fn set_calls_works() { + new_test_ext().execute_with(|| { + assert!(!CallIndices::::exists()); + + let calls = vec![ + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + ]; + + assert_ok!(Lottery::set_calls(Origin::root(), calls)); + assert!(CallIndices::::exists()); + + let too_many_calls = vec![ + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + Call::System(SystemCall::remark { remark: vec![] }), + ]; + + assert_noop!( + Lottery::set_calls(Origin::root(), too_many_calls), + Error::::TooManyCalls, + ); + + // Clear calls + assert_ok!(Lottery::set_calls(Origin::root(), vec![])); + assert!(CallIndices::::get().is_empty()); + }); +} + +#[test] +fn start_lottery_works() { + new_test_ext().execute_with(|| { + let price = 10; + let length = 20; + let delay = 5; + + // Setup ignores bad origin + assert_noop!( + Lottery::start_lottery(Origin::signed(1), price, length, delay, false), + BadOrigin, + ); + + // All good + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, false)); + + // Can't open another one if lottery is already present + assert_noop!( + Lottery::start_lottery(Origin::root(), price, length, delay, false), + Error::::InProgress, + ); + }); +} + +#[test] +fn buy_ticket_works_as_simple_passthrough() { + // This test checks that even if the user could not buy a ticket, that `buy_ticket` acts + // as a simple passthrough to the real call. + new_test_ext().execute_with(|| { + // No lottery set up + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 20 })); + // This is just a basic transfer then + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_eq!(Balances::free_balance(&1), 100 - 20); + assert_eq!(TicketsCount::::get(), 0); + + // Lottery is set up, but too expensive to enter, so `do_buy_ticket` fails. + let calls = vec![ + Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + ]; + assert_ok!(Lottery::set_calls(Origin::root(), calls)); + + // Ticket price of 60 would kill the user's account + assert_ok!(Lottery::start_lottery(Origin::root(), 60, 10, 5, false)); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_eq!(Balances::free_balance(&1), 100 - 20 - 20); + assert_eq!(TicketsCount::::get(), 0); + + // If call would fail, the whole thing still fails the same + let fail_call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 1000 })); + assert_noop!( + Lottery::buy_ticket(Origin::signed(1), fail_call), + BalancesError::::InsufficientBalance, + ); + + let bad_origin_call = + Box::new(Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 })); + assert_noop!(Lottery::buy_ticket(Origin::signed(1), bad_origin_call), BadOrigin,); + + // User can call other txs, but doesn't get a ticket + let remark_call = + Box::new(Call::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), remark_call)); + assert_eq!(TicketsCount::::get(), 0); + + let successful_call = + Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 1 })); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), successful_call)); + assert_eq!(TicketsCount::::get(), 1); + }); +} + +#[test] +fn buy_ticket_works() { + new_test_ext().execute_with(|| { + // Set calls for the lottery. + let calls = vec![ + Call::System(SystemCall::remark { remark: vec![] }), + Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + ]; + assert_ok!(Lottery::set_calls(Origin::root(), calls)); + + // Can't buy ticket before start + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 1 })); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_eq!(TicketsCount::::get(), 0); + + // Start lottery + assert_ok!(Lottery::start_lottery(Origin::root(), 1, 20, 5, false)); + + // Go to start, buy ticket for transfer + run_to_block(5); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); + assert_eq!(TicketsCount::::get(), 1); + + // Can't buy another of the same ticket (even if call is slightly changed) + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 3, value: 30 })); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); + assert_eq!(TicketsCount::::get(), 1); + + // Buy ticket for remark + let call = Box::new(Call::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); + assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_eq!(TicketsCount::::get(), 2); + + // Go to end, can't buy tickets anymore + run_to_block(20); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); + assert_eq!(TicketsCount::::get(), 2); + + // Go to payout, can't buy tickets when there is no lottery open + run_to_block(25); + assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); + assert_eq!(TicketsCount::::get(), 0); + assert_eq!(LotteryIndex::::get(), 1); + }); +} + +#[test] +fn start_lottery_will_create_account() { + new_test_ext().execute_with(|| { + let price = 10; + let length = 20; + let delay = 5; + + assert_eq!(Balances::total_balance(&Lottery::account_id()), 0); + assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, false)); + assert_eq!(Balances::total_balance(&Lottery::account_id()), 1); + }); +} diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs new file mode 100644 index 0000000000000..5fbc61a32e577 --- /dev/null +++ b/frame/lottery/src/weights.rs @@ -0,0 +1,171 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_lottery +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_lottery +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/lottery/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_lottery. +pub trait WeightInfo { + fn buy_ticket() -> Weight; + fn set_calls(n: u32, ) -> Weight; + fn start_lottery() -> Weight; + fn stop_repeat() -> Weight; + fn on_initialize_end() -> Weight; + fn on_initialize_repeat() -> Weight; +} + +/// Weights for pallet_lottery using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Lottery Lottery (r:1 w:0) + // Storage: Lottery CallIndices (r:1 w:0) + // Storage: Lottery TicketsCount (r:1 w:1) + // Storage: Lottery Participants (r:1 w:1) + // Storage: Lottery LotteryIndex (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Lottery Tickets (r:0 w:1) + fn buy_ticket() -> Weight { + (70_034_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Lottery CallIndices (r:0 w:1) + fn set_calls(n: u32, ) -> Weight { + (15_243_000 as Weight) + // Standard Error: 8_000 + .saturating_add((312_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Lottery Lottery (r:1 w:1) + // Storage: Lottery LotteryIndex (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn start_lottery() -> Weight { + (57_312_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Lottery Lottery (r:1 w:1) + fn stop_repeat() -> Weight { + (6_964_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) + // Storage: Lottery Lottery (r:1 w:1) + // Storage: System Account (r:2 w:2) + // Storage: Lottery TicketsCount (r:1 w:1) + // Storage: Lottery Tickets (r:1 w:0) + fn on_initialize_end() -> Weight { + (110_470_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) + // Storage: Lottery Lottery (r:1 w:1) + // Storage: System Account (r:2 w:2) + // Storage: Lottery TicketsCount (r:1 w:1) + // Storage: Lottery Tickets (r:1 w:0) + // Storage: Lottery LotteryIndex (r:1 w:1) + fn on_initialize_repeat() -> Weight { + (114_794_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Lottery Lottery (r:1 w:0) + // Storage: Lottery CallIndices (r:1 w:0) + // Storage: Lottery TicketsCount (r:1 w:1) + // Storage: Lottery Participants (r:1 w:1) + // Storage: Lottery LotteryIndex (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Lottery Tickets (r:0 w:1) + fn buy_ticket() -> Weight { + (70_034_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Lottery CallIndices (r:0 w:1) + fn set_calls(n: u32, ) -> Weight { + (15_243_000 as Weight) + // Standard Error: 8_000 + .saturating_add((312_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Lottery Lottery (r:1 w:1) + // Storage: Lottery LotteryIndex (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn start_lottery() -> Weight { + (57_312_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Lottery Lottery (r:1 w:1) + fn stop_repeat() -> Weight { + (6_964_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) + // Storage: Lottery Lottery (r:1 w:1) + // Storage: System Account (r:2 w:2) + // Storage: Lottery TicketsCount (r:1 w:1) + // Storage: Lottery Tickets (r:1 w:0) + fn on_initialize_end() -> Weight { + (110_470_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) + // Storage: Lottery Lottery (r:1 w:1) + // Storage: System Account (r:2 w:2) + // Storage: Lottery TicketsCount (r:1 w:1) + // Storage: Lottery Tickets (r:1 w:0) + // Storage: Lottery LotteryIndex (r:1 w:1) + fn on_initialize_repeat() -> Weight { + (114_794_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } +} diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 1cac5d38c5f18..acc82f7678de6 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-membership" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,25 +13,37 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +log = { version = "0.4.0", default-features = false } -[dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", + "log/std", + "sp-core/std", + "sp-io/std", "sp-runtime/std", "sp-std/std", - "sp-io/std", "frame-support/std", "frame-system/std", + "frame-benchmarking/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "sp-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 492fda88dd170..7922d9efaf569 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,71 +18,120 @@ //! # Membership Module //! //! Allows control of membership of a set of `AccountId`s, useful for managing membership of of a -//! collective. A prime member may be set. +//! collective. A prime member may be set // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, - traits::{ChangeMembers, InitializeMembers, EnsureOrigin, Contains}, +use frame_support::traits::{ + ChangeMembers, Contains, Get, InitializeMembers, SortedMembers, StorageVersion, }; -use frame_system::ensure_signed; +use sp_std::prelude::*; -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; +pub mod migrations; +pub mod weights; - /// Required origin for adding a member (though can always be Root). - type AddOrigin: EnsureOrigin; +pub use pallet::*; +pub use weights::WeightInfo; - /// Required origin for removing a member (though can always be Root). - type RemoveOrigin: EnsureOrigin; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - /// Required origin for adding and removing a member in a single action. - type SwapOrigin: EnsureOrigin; + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); - /// Required origin for resetting membership. - type ResetOrigin: EnsureOrigin; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(PhantomData<(T, I)>); - /// Required origin for setting or resetting the prime member. - type PrimeOrigin: EnsureOrigin; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// The receiver of the signal for when the membership has been initialized. This happens pre- - /// genesis and will usually be the same as `MembershipChanged`. If you need to do something - /// different on initialization, then you can change this accordingly. - type MembershipInitialized: InitializeMembers; + /// Required origin for adding a member (though can always be Root). + type AddOrigin: EnsureOrigin; - /// The receiver of the signal for when the membership has changed. - type MembershipChanged: ChangeMembers; -} + /// Required origin for removing a member (though can always be Root). + type RemoveOrigin: EnsureOrigin; + + /// Required origin for adding and removing a member in a single action. + type SwapOrigin: EnsureOrigin; + + /// Required origin for resetting membership. + type ResetOrigin: EnsureOrigin; + + /// Required origin for setting or resetting the prime member. + type PrimeOrigin: EnsureOrigin; -decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Membership { - /// The current membership, stored as an ordered Vec. - Members get(fn members): Vec; + /// The receiver of the signal for when the membership has been initialized. This happens + /// pre-genesis and will usually be the same as `MembershipChanged`. If you need to do + /// something different on initialization, then you can change this accordingly. + type MembershipInitialized: InitializeMembers; + + /// The receiver of the signal for when the membership has changed. + type MembershipChanged: ChangeMembers; + + /// The maximum number of members that this membership can have. + /// + /// This is used for benchmarking. Re-run the benchmarks if this changes. + /// + /// This is not enforced in the code; the membership size can exceed this limit. + type MaxMembers: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } - /// The current prime member, if one exists. - Prime get(fn prime): Option; + /// The current membership, stored as an ordered Vec. + #[pallet::storage] + #[pallet::getter(fn members)] + pub type Members, I: 'static = ()> = + StorageValue<_, Vec, ValueQuery>; + + /// The current prime member, if one exists. + #[pallet::storage] + #[pallet::getter(fn prime)] + pub type Prime, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + pub members: Vec, + pub phantom: PhantomData, } - add_extra_genesis { - config(members): Vec; - config(phantom): sp_std::marker::PhantomData; - build(|config: &Self| { - let mut members = config.members.clone(); + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { members: Vec::new(), phantom: Default::default() } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + use sp_std::collections::btree_set::BTreeSet; + let members_set: BTreeSet<_> = self.members.iter().collect(); + assert_eq!( + members_set.len(), + self.members.len(), + "Members cannot contain duplicate accounts." + ); + + let mut members = self.members.clone(); members.sort(); T::MembershipInitialized::initialize_members(&members); >::put(members); - }) + } } -} -decl_event!( - pub enum Event where - ::AccountId, - >::Event, - { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { /// The given member was added; see the transaction for who. MemberAdded, /// The given member was removed; see the transaction for who. @@ -94,60 +143,62 @@ decl_event!( /// One of the members' keys changed. KeyChanged, /// Phantom member, never used. - Dummy(sp_std::marker::PhantomData<(AccountId, Event)>), + Dummy(PhantomData<(T::AccountId, >::Event)>), } -); -decl_error! { - /// Error for the nicks module. - pub enum Error for Module, I: Instance> { + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { /// Already a member. AlreadyMember, /// Not a member. NotMember, } -} - -decl_module! { - pub struct Module, I: Instance=DefaultInstance> - for enum Call - where origin: T::Origin - { - fn deposit_event() = default; + #[pallet::call] + impl, I: 'static> Pallet { /// Add a member `who` to the set. /// /// May only be called from `T::AddOrigin`. - #[weight = 50_000_000] - pub fn add_member(origin, who: T::AccountId) { + #[pallet::weight(50_000_000)] + pub fn add_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; let mut members = >::get(); let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; members.insert(location, who.clone()); + + Self::maybe_warn_max_members(&members); >::put(&members); T::MembershipChanged::change_members_sorted(&[who], &[], &members[..]); - Self::deposit_event(RawEvent::MemberAdded); + Self::deposit_event(Event::MemberAdded); + Ok(()) } /// Remove a member `who` from the set. /// /// May only be called from `T::RemoveOrigin`. - #[weight = 50_000_000] - pub fn remove_member(origin, who: T::AccountId) { + #[pallet::weight(50_000_000)] + pub fn remove_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; let mut members = >::get(); let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; members.remove(location); + + Self::maybe_warn_max_members(&members); >::put(&members); T::MembershipChanged::change_members_sorted(&[], &[who], &members[..]); Self::rejig_prime(&members); - Self::deposit_event(RawEvent::MemberRemoved); + Self::deposit_event(Event::MemberRemoved); + Ok(()) } /// Swap out one member `remove` for another `add`. @@ -155,35 +206,40 @@ decl_module! { /// May only be called from `T::SwapOrigin`. /// /// Prime membership is *not* passed from `remove` to `add`, if extant. - #[weight = 50_000_000] - pub fn swap_member(origin, remove: T::AccountId, add: T::AccountId) { + #[pallet::weight(50_000_000)] + pub fn swap_member( + origin: OriginFor, + remove: T::AccountId, + add: T::AccountId, + ) -> DispatchResult { T::SwapOrigin::ensure_origin(origin)?; - if remove == add { return Ok(()) } + if remove == add { + return Ok(()) + } let mut members = >::get(); let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; let _ = members.binary_search(&add).err().ok_or(Error::::AlreadyMember)?; members[location] = add.clone(); members.sort(); + + Self::maybe_warn_max_members(&members); >::put(&members); - T::MembershipChanged::change_members_sorted( - &[add], - &[remove], - &members[..], - ); + T::MembershipChanged::change_members_sorted(&[add], &[remove], &members[..]); Self::rejig_prime(&members); - Self::deposit_event(RawEvent::MembersSwapped); + Self::deposit_event(Event::MembersSwapped); + Ok(()) } /// Change the membership to a new set, disregarding the existing membership. Be nice and /// pass `members` pre-sorted. /// /// May only be called from `T::ResetOrigin`. - #[weight = 50_000_000] - pub fn reset_members(origin, members: Vec) { + #[pallet::weight(50_000_000)] + pub fn reset_members(origin: OriginFor, members: Vec) -> DispatchResult { T::ResetOrigin::ensure_origin(origin)?; let mut members = members; @@ -191,11 +247,12 @@ decl_module! { >::mutate(|m| { T::MembershipChanged::set_members_sorted(&members[..], m); Self::rejig_prime(&members); + Self::maybe_warn_max_members(&members); *m = members; }); - - Self::deposit_event(RawEvent::MembersReset); + Self::deposit_event(Event::MembersReset); + Ok(()) } /// Swap out the sending member for some other key `new`. @@ -203,16 +260,19 @@ decl_module! { /// May only be called from `Signed` origin of a current member. /// /// Prime membership is passed from the origin account to `new`, if extant. - #[weight = 50_000_000] - pub fn change_key(origin, new: T::AccountId) { + #[pallet::weight(50_000_000)] + pub fn change_key(origin: OriginFor, new: T::AccountId) -> DispatchResult { let remove = ensure_signed(origin)?; if remove != new { let mut members = >::get(); - let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; + let location = + members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; members[location] = new.clone(); members.sort(); + + Self::maybe_warn_max_members(&members); >::put(&members); T::MembershipChanged::change_members_sorted( @@ -227,33 +287,36 @@ decl_module! { } } - Self::deposit_event(RawEvent::KeyChanged); + Self::deposit_event(Event::KeyChanged); + Ok(()) } /// Set the prime member. Must be a current member. /// /// May only be called from `T::PrimeOrigin`. - #[weight = 50_000_000] - pub fn set_prime(origin, who: T::AccountId) { + #[pallet::weight(50_000_000)] + pub fn set_prime(origin: OriginFor, who: T::AccountId) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; Self::members().binary_search(&who).ok().ok_or(Error::::NotMember)?; Prime::::put(&who); T::MembershipChanged::set_prime(Some(who)); + Ok(()) } /// Remove the prime member if it exists. /// /// May only be called from `T::PrimeOrigin`. - #[weight = 50_000_000] - pub fn clear_prime(origin) { + #[pallet::weight(50_000_000)] + pub fn clear_prime(origin: OriginFor) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; Prime::::kill(); T::MembershipChanged::set_prime(None); + Ok(()) } } } -impl, I: Instance> Module { +impl, I: 'static> Pallet { fn rejig_prime(members: &[T::AccountId]) { if let Some(prime) = Prime::::get() { match members.binary_search(&prime) { @@ -262,9 +325,26 @@ impl, I: Instance> Module { } } } + + fn maybe_warn_max_members(members: &[T::AccountId]) { + if members.len() as u32 > T::MaxMembers::get() { + log::error!( + target: "runtime::membership", + "maximum number of members used for weight is exceeded, weights can be underestimated [{} > {}].", + members.len(), + T::MaxMembers::get(), + ) + } + } +} + +impl, I: 'static> Contains for Pallet { + fn contains(t: &T::AccountId) -> bool { + Self::members().binary_search(t).is_ok() + } } -impl, I: Instance> Contains for Module { +impl, I: 'static> SortedMembers for Pallet { fn sorted_members() -> Vec { Self::members() } @@ -274,57 +354,214 @@ impl, I: Instance> Contains for Module { } } +#[cfg(feature = "runtime-benchmarks")] +mod benchmark { + use super::{Pallet as Membership, *}; + use frame_benchmarking::{ + account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist, + }; + use frame_support::{assert_ok, traits::EnsureOrigin}; + use frame_system::RawOrigin; + + const SEED: u32 = 0; + + fn set_members, I: 'static>(members: Vec, prime: Option) { + let reset_origin = T::ResetOrigin::successful_origin(); + let prime_origin = T::PrimeOrigin::successful_origin(); + + assert_ok!(>::reset_members(reset_origin, members.clone())); + if let Some(prime) = prime.map(|i| members[i].clone()) { + assert_ok!(>::set_prime(prime_origin, prime)); + } else { + assert_ok!(>::clear_prime(prime_origin)); + } + } + + benchmarks_instance_pallet! { + add_member { + let m in 1 .. T::MaxMembers::get(); + + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + set_members::(members.clone(), None); + let new_member = account::("add", m, SEED); + }: { + assert_ok!(>::add_member(T::AddOrigin::successful_origin(), new_member.clone())); + } + verify { + assert!(>::get().contains(&new_member)); + #[cfg(test)] crate::tests::clean(); + } + + // the case of no prime or the prime being removed is surely cheaper than the case of + // reporting a new prime via `MembershipChanged`. + remove_member { + let m in 2 .. T::MaxMembers::get(); + + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + set_members::(members.clone(), Some(members.len() - 1)); + + let to_remove = members.first().cloned().unwrap(); + }: { + assert_ok!(>::remove_member(T::RemoveOrigin::successful_origin(), to_remove.clone())); + } verify { + assert!(!>::get().contains(&to_remove)); + // prime is rejigged + assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + #[cfg(test)] crate::tests::clean(); + } + + // we remove a non-prime to make sure it needs to be set again. + swap_member { + let m in 2 .. T::MaxMembers::get(); + + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + set_members::(members.clone(), Some(members.len() - 1)); + let add = account::("member", m, SEED); + let remove = members.first().cloned().unwrap(); + }: { + assert_ok!(>::swap_member( + T::SwapOrigin::successful_origin(), + remove.clone(), + add.clone(), + )); + } verify { + assert!(!>::get().contains(&remove)); + assert!(>::get().contains(&add)); + // prime is rejigged + assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + #[cfg(test)] crate::tests::clean(); + } + + // er keep the prime common between incoming and outgoing to make sure it is rejigged. + reset_member { + let m in 1 .. T::MaxMembers::get(); + + let members = (1..m+1).map(|i| account("member", i, SEED)).collect::>(); + set_members::(members.clone(), Some(members.len() - 1)); + let mut new_members = (m..2*m).map(|i| account("member", i, SEED)).collect::>(); + }: { + assert_ok!(>::reset_members(T::ResetOrigin::successful_origin(), new_members.clone())); + } verify { + new_members.sort(); + assert_eq!(>::get(), new_members); + // prime is rejigged + assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + #[cfg(test)] crate::tests::clean(); + } + + change_key { + let m in 1 .. T::MaxMembers::get(); + + // worse case would be to change the prime + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + let prime = members.last().cloned().unwrap(); + set_members::(members.clone(), Some(members.len() - 1)); + + let add = account::("member", m, SEED); + whitelist!(prime); + }: { + assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add.clone())); + } verify { + assert!(!>::get().contains(&prime)); + assert!(>::get().contains(&add)); + // prime is rejigged + assert_eq!(>::get().unwrap(), add); + #[cfg(test)] crate::tests::clean(); + } + + set_prime { + let m in 1 .. T::MaxMembers::get(); + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + let prime = members.last().cloned().unwrap(); + set_members::(members, None); + }: { + assert_ok!(>::set_prime(T::PrimeOrigin::successful_origin(), prime)); + } verify { + assert!(>::get().is_some()); + assert!(::get_prime().is_some()); + #[cfg(test)] crate::tests::clean(); + } + + clear_prime { + let m in 1 .. T::MaxMembers::get(); + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + let prime = members.last().cloned().unwrap(); + set_members::(members, None); + }: { + assert_ok!(>::clear_prime(T::PrimeOrigin::successful_origin())); + } verify { + assert!(>::get().is_none()); + assert!(::get_prime().is_none()); + #[cfg(test)] crate::tests::clean(); + } + } + + impl_benchmark_test_suite!(Membership, crate::tests::new_bench_ext(), crate::tests::Test); +} + #[cfg(test)] mod tests { use super::*; + use crate as pallet_membership; + + use sp_core::H256; + use sp_runtime::{ + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + }; - use std::cell::RefCell; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types + assert_noop, assert_ok, ord_parameter_types, parameter_types, traits::GenesisBuild, }; - use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; use frame_system::EnsureSignedBy; - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Membership: pallet_membership::{Pallet, Call, Storage, Config, Event}, + } + ); - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const MaxMembers: u32 = 10; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); + pub static Members: Vec = vec![]; + pub static Prime: Option = None; } - impl frame_system::Trait for Test { - type BaseCallFilter = (); + + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } ord_parameter_types! { pub const One: u64 = 1; @@ -334,15 +571,10 @@ mod tests { pub const Five: u64 = 5; } - thread_local! { - static MEMBERS: RefCell> = RefCell::new(vec![]); - static PRIME: RefCell> = RefCell::new(None); - } - pub struct TestChangeMembers; impl ChangeMembers for TestChangeMembers { fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { - let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); + let mut old_plus_incoming = Members::get(); old_plus_incoming.extend_from_slice(incoming); old_plus_incoming.sort(); let mut new_plus_outgoing = new.to_vec(); @@ -350,21 +582,25 @@ mod tests { new_plus_outgoing.sort(); assert_eq!(old_plus_incoming, new_plus_outgoing); - MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); - PRIME.with(|p| *p.borrow_mut() = None); + Members::set(new.to_vec()); + Prime::set(None); } fn set_prime(who: Option) { - PRIME.with(|p| *p.borrow_mut() = who); + Prime::set(who); + } + fn get_prime() -> Option { + Prime::get() } } + impl InitializeMembers for TestChangeMembers { fn initialize_members(members: &[u64]) { MEMBERS.with(|m| *m.borrow_mut() = members.to_vec()); } } - impl Trait for Test { - type Event = (); + impl Config for Test { + type Event = Event; type AddOrigin = EnsureSignedBy; type RemoveOrigin = EnsureSignedBy; type SwapOrigin = EnsureSignedBy; @@ -372,20 +608,33 @@ mod tests { type PrimeOrigin = EnsureSignedBy; type MembershipInitialized = TestChangeMembers; type MembershipChanged = TestChangeMembers; + type MaxMembers = MaxMembers; + type WeightInfo = (); } - type Membership = Module; - - fn new_test_ext() -> sp_io::TestExternalities { + pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); // We use default for brevity, but you can configure as desired if needed. - GenesisConfig::{ + pallet_membership::GenesisConfig:: { members: vec![10, 20, 30], - .. Default::default() - }.assimilate_storage(&mut t).unwrap(); + ..Default::default() + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } + #[cfg(feature = "runtime-benchmarks")] + pub(crate) fn new_bench_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default().build_storage::().unwrap().into() + } + + #[cfg(feature = "runtime-benchmarks")] + pub(crate) fn clean() { + Members::set(vec![]); + Prime::set(None); + } + #[test] fn query_membership_works() { new_test_ext().execute_with(|| { @@ -413,7 +662,10 @@ mod tests { fn add_member_works() { new_test_ext().execute_with(|| { assert_noop!(Membership::add_member(Origin::signed(5), 15), BadOrigin); - assert_noop!(Membership::add_member(Origin::signed(1), 10), Error::::AlreadyMember); + assert_noop!( + Membership::add_member(Origin::signed(1), 10), + Error::::AlreadyMember + ); assert_ok!(Membership::add_member(Origin::signed(1), 15)); assert_eq!(Membership::members(), vec![10, 15, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); @@ -424,7 +676,10 @@ mod tests { fn remove_member_works() { new_test_ext().execute_with(|| { assert_noop!(Membership::remove_member(Origin::signed(5), 20), BadOrigin); - assert_noop!(Membership::remove_member(Origin::signed(2), 15), Error::::NotMember); + assert_noop!( + Membership::remove_member(Origin::signed(2), 15), + Error::::NotMember + ); assert_ok!(Membership::set_prime(Origin::signed(5), 20)); assert_ok!(Membership::remove_member(Origin::signed(2), 20)); assert_eq!(Membership::members(), vec![10, 30]); @@ -438,8 +693,14 @@ mod tests { fn swap_member_works() { new_test_ext().execute_with(|| { assert_noop!(Membership::swap_member(Origin::signed(5), 10, 25), BadOrigin); - assert_noop!(Membership::swap_member(Origin::signed(3), 15, 25), Error::::NotMember); - assert_noop!(Membership::swap_member(Origin::signed(3), 10, 30), Error::::AlreadyMember); + assert_noop!( + Membership::swap_member(Origin::signed(3), 15, 25), + Error::::NotMember + ); + assert_noop!( + Membership::swap_member(Origin::signed(3), 10, 30), + Error::::AlreadyMember + ); assert_ok!(Membership::set_prime(Origin::signed(5), 20)); assert_ok!(Membership::swap_member(Origin::signed(3), 20, 20)); @@ -469,8 +730,14 @@ mod tests { fn change_key_works() { new_test_ext().execute_with(|| { assert_ok!(Membership::set_prime(Origin::signed(5), 10)); - assert_noop!(Membership::change_key(Origin::signed(3), 25), Error::::NotMember); - assert_noop!(Membership::change_key(Origin::signed(10), 20), Error::::AlreadyMember); + assert_noop!( + Membership::change_key(Origin::signed(3), 25), + Error::::NotMember + ); + assert_noop!( + Membership::change_key(Origin::signed(10), 20), + Error::::AlreadyMember + ); assert_ok!(Membership::change_key(Origin::signed(10), 40)); assert_eq!(Membership::members(), vec![20, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); @@ -507,4 +774,29 @@ mod tests { assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); }); } + + #[test] + #[should_panic(expected = "Members cannot contain duplicate accounts.")] + fn genesis_build_panics_with_duplicate_members() { + pallet_membership::GenesisConfig:: { + members: vec![1, 2, 3, 1], + phantom: Default::default(), + } + .build_storage() + .unwrap(); + } + + #[test] + fn migration_v4() { + new_test_ext().execute_with(|| { + use frame_support::traits::PalletInfo; + let old_pallet_name = + ::PalletInfo::name::().unwrap(); + let new_pallet_name = "NewMembership"; + + crate::migrations::v4::pre_migrate::(old_pallet_name, new_pallet_name); + crate::migrations::v4::migrate::(old_pallet_name, new_pallet_name); + crate::migrations::v4::post_migrate::(old_pallet_name, new_pallet_name); + }); + } } diff --git a/frame/membership/src/migrations/mod.rs b/frame/membership/src/migrations/mod.rs new file mode 100644 index 0000000000000..26d07a0cd5ac8 --- /dev/null +++ b/frame/membership/src/migrations/mod.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Version 4. +pub mod v4; diff --git a/frame/membership/src/migrations/v4.rs b/frame/membership/src/migrations/v4.rs new file mode 100644 index 0000000000000..9f4b15e468b38 --- /dev/null +++ b/frame/membership/src/migrations/v4.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_core::hexdisplay::HexDisplay; +use sp_io::{hashing::twox_128, storage}; + +use frame_support::{ + traits::{ + Get, GetStorageVersion, PalletInfoAccess, StorageVersion, + STORAGE_VERSION_STORAGE_KEY_POSTFIX, + }, + weights::Weight, +}; + +/// Migrate the entire storage of this pallet to a new prefix. +/// +/// This new prefix must be the same as the one set in construct_runtime. For safety, use +/// `PalletInfo` to get it, as: +/// `::PalletInfo::name::`. +/// +/// The migration will look into the storage version in order not to trigger a migration on an up +/// to date storage. Thus the on chain storage version must be less than 4 in order to trigger the +/// migration. +pub fn migrate>( + old_pallet_name: N, + new_pallet_name: N, +) -> Weight { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name = new_pallet_name.as_ref(); + + if new_pallet_name == old_pallet_name { + log::info!( + target: "runtime::membership", + "New pallet name is equal to the old prefix. No migration needs to be done.", + ); + return 0 + } + + let on_chain_storage_version =

::on_chain_storage_version(); + log::info!( + target: "runtime::membership", + "Running migration to v4 for membership with storage version {:?}", + on_chain_storage_version, + ); + + if on_chain_storage_version < 4 { + frame_support::storage::migration::move_pallet( + old_pallet_name.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", old_pallet_name, new_pallet_name); + + StorageVersion::new(4).put::

(); + ::BlockWeights::get().max_block + } else { + log::warn!( + target: "runtime::membership", + "Attempted to apply migration to v4 but failed because storage version is {:?}", + on_chain_storage_version, + ); + 0 + } +} + +/// Some checks prior to migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn pre_migrate>(old_pallet_name: N, new_pallet_name: N) { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name = new_pallet_name.as_ref(); + log_migration("pre-migration", old_pallet_name, new_pallet_name); + + let old_pallet_prefix = twox_128(old_pallet_name.as_bytes()); + assert!(storage::next_key(&old_pallet_prefix) + .map_or(true, |next_key| next_key.starts_with(&old_pallet_prefix))); + + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + let storage_version_key = + [&new_pallet_prefix, &twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX)[..]].concat(); + // ensure nothing is stored in the new prefix. + assert!( + storage::next_key(&new_pallet_prefix).map_or( + // either nothing is there + true, + // or we ensure that it has no common prefix with twox_128(new), + // or isn't the storage version that is already stored using the pallet name + |next_key| { + !next_key.starts_with(&new_pallet_prefix) || next_key == storage_version_key + }, + ), + "unexpected next_key({}) = {:?}", + new_pallet_name, + HexDisplay::from(&storage::next_key(&new_pallet_prefix).unwrap()), + ); + assert!(

::on_chain_storage_version() < 4); +} + +/// Some checks for after migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn post_migrate>(old_pallet_name: N, new_pallet_name: N) { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name = new_pallet_name.as_ref(); + log_migration("post-migration", old_pallet_name, new_pallet_name); + + let old_pallet_prefix = twox_128(old_pallet_name.as_bytes()); + #[cfg(test)] + { + let storage_version_key = + [&old_pallet_prefix, &twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX)[..]].concat(); + assert!(storage::next_key(&old_pallet_prefix) + .map_or(true, |next_key| !next_key.starts_with(&old_pallet_prefix) || + next_key == storage_version_key)); + } + #[cfg(not(test))] + { + // Assert that nothing remains at the old prefix + assert!(storage::next_key(&old_pallet_prefix) + .map_or(true, |next_key| !next_key.starts_with(&old_pallet_prefix))); + } + + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + // Assert that the storages have been moved to the new prefix + assert!(storage::next_key(&new_pallet_prefix) + .map_or(true, |next_key| next_key.starts_with(&new_pallet_prefix))); + + assert_eq!(

::on_chain_storage_version(), 4); +} + +fn log_migration(stage: &str, old_pallet_name: &str, new_pallet_name: &str) { + log::info!( + target: "runtime::membership", + "{}, prefix: '{}' ==> '{}'", + stage, + old_pallet_name, + new_pallet_name, + ); +} diff --git a/frame/membership/src/weights.rs b/frame/membership/src/weights.rs new file mode 100644 index 0000000000000..81a1b073faac3 --- /dev/null +++ b/frame/membership/src/weights.rs @@ -0,0 +1,218 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_membership +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_membership +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/membership/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_membership. +pub trait WeightInfo { + fn add_member(m: u32, ) -> Weight; + fn remove_member(m: u32, ) -> Weight; + fn swap_member(m: u32, ) -> Weight; + fn reset_member(m: u32, ) -> Weight; + fn change_key(m: u32, ) -> Weight; + fn set_prime(m: u32, ) -> Weight; + fn clear_prime(m: u32, ) -> Weight; +} + +/// Weights for pallet_membership using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn add_member(m: u32, ) -> Weight { + (23_668_000 as Weight) + // Standard Error: 3_000 + .saturating_add((142_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance1Membership Prime (r:1 w:0) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn remove_member(m: u32, ) -> Weight { + (29_149_000 as Weight) + // Standard Error: 0 + .saturating_add((111_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance1Membership Prime (r:1 w:0) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn swap_member(m: u32, ) -> Weight { + (29_289_000 as Weight) + // Standard Error: 0 + .saturating_add((126_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance1Membership Prime (r:1 w:0) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn reset_member(m: u32, ) -> Weight { + (30_178_000 as Weight) + // Standard Error: 1_000 + .saturating_add((286_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance1Membership Prime (r:1 w:1) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn change_key(m: u32, ) -> Weight { + (31_049_000 as Weight) + // Standard Error: 0 + .saturating_add((121_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:0) + // Storage: Instance1Membership Prime (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn set_prime(m: u32, ) -> Weight { + (8_006_000 as Weight) + // Standard Error: 0 + .saturating_add((89_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Instance1Membership Prime (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn clear_prime(m: u32, ) -> Weight { + (3_452_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn add_member(m: u32, ) -> Weight { + (23_668_000 as Weight) + // Standard Error: 3_000 + .saturating_add((142_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance1Membership Prime (r:1 w:0) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn remove_member(m: u32, ) -> Weight { + (29_149_000 as Weight) + // Standard Error: 0 + .saturating_add((111_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance1Membership Prime (r:1 w:0) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn swap_member(m: u32, ) -> Weight { + (29_289_000 as Weight) + // Standard Error: 0 + .saturating_add((126_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance1Membership Prime (r:1 w:0) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn reset_member(m: u32, ) -> Weight { + (30_178_000 as Weight) + // Standard Error: 1_000 + .saturating_add((286_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:1) + // Storage: Instance2Collective Proposals (r:1 w:0) + // Storage: Instance1Membership Prime (r:1 w:1) + // Storage: Instance2Collective Members (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn change_key(m: u32, ) -> Weight { + (31_049_000 as Weight) + // Standard Error: 0 + .saturating_add((121_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Instance1Membership Members (r:1 w:0) + // Storage: Instance1Membership Prime (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn set_prime(m: u32, ) -> Weight { + (8_006_000 as Weight) + // Standard Error: 0 + .saturating_add((89_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Instance1Membership Prime (r:0 w:1) + // Storage: Instance2Collective Prime (r:0 w:1) + fn clear_prime(m: u32, ) -> Weight { + (3_452_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml new file mode 100644 index 0000000000000..02b4be182ef82 --- /dev/null +++ b/frame/merkle-mountain-range/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "pallet-mmr" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME Merkle Mountain Range pallet." + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +mmr-lib = { package = "ckb-merkle-mountain-range", default-features = false, version = "0.3.1" } + +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } + +pallet-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "./primitives" } + +[dev-dependencies] +env_logger = "0.9" +hex-literal = "0.3" + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "mmr-lib/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "pallet-mmr-primitives/std", +] +runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/merkle-mountain-range/primitives/Cargo.toml b/frame/merkle-mountain-range/primitives/Cargo.toml new file mode 100644 index 0000000000000..07b2f8ae3a3a4 --- /dev/null +++ b/frame/merkle-mountain-range/primitives/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "pallet-mmr-primitives" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME Merkle Mountain Range primitives." + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.14", default-features = false } +serde = { version = "1.0.126", optional = true, features = ["derive"] } + +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } + +[dev-dependencies] +hex-literal = "0.3" + +[features] +default = ["std"] +std = [ + "codec/std", + "log/std", + "serde", + "sp-api/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", + "frame-support/std", + "frame-system/std", +] diff --git a/frame/merkle-mountain-range/primitives/src/lib.rs b/frame/merkle-mountain-range/primitives/src/lib.rs new file mode 100644 index 0000000000000..dac57bd42cd35 --- /dev/null +++ b/frame/merkle-mountain-range/primitives/src/lib.rs @@ -0,0 +1,603 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Merkle Mountain Range primitive types. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +use frame_support::RuntimeDebug; +use sp_runtime::traits::{self, One, Saturating}; +use sp_std::fmt; +#[cfg(not(feature = "std"))] +use sp_std::prelude::Vec; + +/// A provider of the MMR's leaf data. +pub trait LeafDataProvider { + /// A type that should end up in the leaf of MMR. + type LeafData: FullLeaf + codec::Decode; + + /// The method to return leaf data that should be placed + /// in the leaf node appended MMR at this block. + /// + /// This is being called by the `on_initialize` method of + /// this pallet at the very beginning of each block. + fn leaf_data() -> Self::LeafData; +} + +impl LeafDataProvider for () { + type LeafData = (); + + fn leaf_data() -> Self::LeafData { + () + } +} + +/// The most common use case for MMRs is to store historical block hashes, +/// so that any point in time in the future we can receive a proof about some past +/// blocks without using excessive on-chain storage. +/// +/// Hence we implement the [LeafDataProvider] for [frame_system::Pallet]. Since the +/// current block hash is not available (since the block is not finished yet), +/// we use the `parent_hash` here along with parent block number. +impl LeafDataProvider for frame_system::Pallet { + type LeafData = (::BlockNumber, ::Hash); + + fn leaf_data() -> Self::LeafData { + (Self::block_number().saturating_sub(One::one()), Self::parent_hash()) + } +} + +/// New MMR root notification hook. +pub trait OnNewRoot { + /// Function called by the pallet in case new MMR root has been computed. + fn on_new_root(root: &Hash); +} + +/// No-op implementation of [OnNewRoot]. +impl OnNewRoot for () { + fn on_new_root(_root: &Hash) {} +} + +/// A full leaf content stored in the offchain-db. +pub trait FullLeaf: Clone + PartialEq + fmt::Debug { + /// Encode the leaf either in it's full or compact form. + /// + /// NOTE the encoding returned here MUST be `Decode`able into `FullLeaf`. + fn using_encoded R>(&self, f: F, compact: bool) -> R; +} + +impl FullLeaf for T { + fn using_encoded R>(&self, f: F, _compact: bool) -> R { + codec::Encode::using_encoded(self, f) + } +} + +/// An element representing either full data or it's hash. +/// +/// See [Compact] to see how it may be used in practice to reduce the size +/// of proofs in case multiple [LeafDataProvider]s are composed together. +/// This is also used internally by the MMR to differentiate leaf nodes (data) +/// and inner nodes (hashes). +/// +/// [DataOrHash::hash] method calculates the hash of this element in it's compact form, +/// so should be used instead of hashing the encoded form (which will always be non-compact). +#[derive(RuntimeDebug, Clone, PartialEq)] +pub enum DataOrHash { + /// Arbitrary data in it's full form. + Data(L), + /// A hash of some data. + Hash(H::Output), +} + +impl From for DataOrHash { + fn from(l: L) -> Self { + Self::Data(l) + } +} + +mod encoding { + use super::*; + + /// A helper type to implement [codec::Codec] for [DataOrHash]. + #[derive(codec::Encode, codec::Decode)] + enum Either { + Left(A), + Right(B), + } + + impl codec::Encode for DataOrHash { + fn encode_to(&self, dest: &mut T) { + match self { + Self::Data(l) => l.using_encoded( + |data| Either::<&[u8], &H::Output>::Left(data).encode_to(dest), + false, + ), + Self::Hash(h) => Either::<&[u8], &H::Output>::Right(h).encode_to(dest), + } + } + } + + impl codec::Decode for DataOrHash { + fn decode(value: &mut I) -> Result { + let decoded: Either, H::Output> = Either::decode(value)?; + Ok(match decoded { + Either::Left(l) => DataOrHash::Data(L::decode(&mut &*l)?), + Either::Right(r) => DataOrHash::Hash(r), + }) + } + } +} + +impl DataOrHash { + /// Retrieve a hash of this item. + /// + /// Depending on the node type it's going to either be a contained value for [DataOrHash::Hash] + /// node, or a hash of SCALE-encoded [DataOrHash::Data] data. + pub fn hash(&self) -> H::Output { + match *self { + Self::Data(ref leaf) => leaf.using_encoded(::hash, true), + Self::Hash(ref hash) => hash.clone(), + } + } +} + +/// A composition of multiple leaf elements with compact form representation. +/// +/// When composing together multiple [LeafDataProvider]s you will end up with +/// a tuple of `LeafData` that each element provides. +/// +/// However this will cause the leaves to have significant size, while for some +/// use cases it will be enough to prove only one element of the tuple. +/// That's the rationale for [Compact] struct. We wrap each element of the tuple +/// into [DataOrHash] and each tuple element is hashed first before constructing +/// the final hash of the entire tuple. This allows you to replace tuple elements +/// you don't care about with their hashes. +#[derive(RuntimeDebug, Clone, PartialEq)] +pub struct Compact { + /// Internal tuple representation. + pub tuple: T, + _hash: sp_std::marker::PhantomData, +} + +impl sp_std::ops::Deref for Compact { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.tuple + } +} + +impl Compact { + /// Create a new [Compact] wrapper for a tuple. + pub fn new(tuple: T) -> Self { + Self { tuple, _hash: Default::default() } + } +} + +impl codec::Decode for Compact { + fn decode(value: &mut I) -> Result { + T::decode(value).map(Compact::new) + } +} + +macro_rules! impl_leaf_data_for_tuple { + ( $( $name:ident : $id:tt ),+ ) => { + /// [FullLeaf] implementation for `Compact, ...)>` + impl FullLeaf for Compact, )+ )> where + H: traits::Hash, + $( $name: FullLeaf ),+ + { + fn using_encoded R>(&self, f: F, compact: bool) -> R { + if compact { + codec::Encode::using_encoded(&( + $( DataOrHash::::Hash(self.tuple.$id.hash()), )+ + ), f) + } else { + codec::Encode::using_encoded(&self.tuple, f) + } + } + } + + /// [LeafDataProvider] implementation for `Compact, ...)>` + /// + /// This provides a compact-form encoding for tuples wrapped in [Compact]. + impl LeafDataProvider for Compact where + H: traits::Hash, + $( $name: LeafDataProvider ),+ + { + type LeafData = Compact< + H, + ( $( DataOrHash, )+ ), + >; + + fn leaf_data() -> Self::LeafData { + let tuple = ( + $( DataOrHash::Data($name::leaf_data()), )+ + ); + Compact::new(tuple) + } + } + + /// [LeafDataProvider] implementation for `(Tuple, ...)` + /// + /// This provides regular (non-compactable) composition of [LeafDataProvider]s. + impl<$( $name ),+> LeafDataProvider for ( $( $name, )+ ) where + ( $( $name::LeafData, )+ ): FullLeaf, + $( $name: LeafDataProvider ),+ + { + type LeafData = ( $( $name::LeafData, )+ ); + + fn leaf_data() -> Self::LeafData { + ( + $( $name::leaf_data(), )+ + ) + } + } + } +} + +/// Test functions implementation for `Compact, ...)>` +#[cfg(test)] +impl Compact, DataOrHash)> +where + H: traits::Hash, + A: FullLeaf, + B: FullLeaf, +{ + /// Retrieve a hash of this item in it's compact form. + pub fn hash(&self) -> H::Output { + self.using_encoded(::hash, true) + } +} + +impl_leaf_data_for_tuple!(A:0); +impl_leaf_data_for_tuple!(A:0, B:1); +impl_leaf_data_for_tuple!(A:0, B:1, C:2); +impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3); +impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); + +/// A MMR proof data for one of the leaves. +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +pub struct Proof { + /// The index of the leaf the proof is for. + pub leaf_index: u64, + /// Number of leaves in MMR, when the proof was generated. + pub leaf_count: u64, + /// Proof elements (hashes of siblings of inner nodes on the path to the leaf). + pub items: Vec, +} + +/// Merkle Mountain Range operation error. +#[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq)] +pub enum Error { + /// Error while pushing new node. + Push, + /// Error getting the new root. + GetRoot, + /// Error commiting changes. + Commit, + /// Error during proof generation. + GenerateProof, + /// Proof verification error. + Verify, + /// Leaf not found in the storage. + LeafNotFound, +} + +impl Error { + #![allow(unused_variables)] + /// Consume given error `e` with `self` and generate a native log entry with error details. + pub fn log_error(self, e: impl fmt::Debug) -> Self { + log::error!( + target: "runtime::mmr", + "[{:?}] MMR error: {:?}", + self, + e, + ); + self + } + + /// Consume given error `e` with `self` and generate a native log entry with error details. + pub fn log_debug(self, e: impl fmt::Debug) -> Self { + log::debug!( + target: "runtime::mmr", + "[{:?}] MMR error: {:?}", + self, + e, + ); + self + } +} + +/// A helper type to allow using arbitrary SCALE-encoded leaf data in the RuntimeApi. +/// +/// The point is to be able to verify MMR proofs from external MMRs, where we don't +/// know the exact leaf type, but it's enough for us to have it SCALE-encoded. +/// +/// Note the leaf type should be encoded in its compact form when passed through this type. +/// See [FullLeaf] documentation for details. +/// +/// This type does not implement SCALE encoding/decoding on purpose to avoid confusion, +/// it would have to be SCALE-compatible with the concrete leaf type, but due to SCALE limitations +/// it's not possible to know how many bytes the encoding of concrete leaf type uses. +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[derive(RuntimeDebug, Clone, PartialEq)] +pub struct OpaqueLeaf( + /// Raw bytes of the leaf type encoded in its compact form. + /// + /// NOTE it DOES NOT include length prefix (like `Vec` encoding would). + #[cfg_attr(feature = "std", serde(with = "sp_core::bytes"))] + pub Vec, +); + +impl OpaqueLeaf { + /// Convert a concrete MMR leaf into an opaque type. + pub fn from_leaf(leaf: &T) -> Self { + let encoded_leaf = leaf.using_encoded(|d| d.to_vec(), true); + OpaqueLeaf::from_encoded_leaf(encoded_leaf) + } + + /// Create a `OpaqueLeaf` given raw bytes of compact-encoded leaf. + pub fn from_encoded_leaf(encoded_leaf: Vec) -> Self { + OpaqueLeaf(encoded_leaf) + } + + /// Attempt to decode the leaf into expected concrete type. + pub fn try_decode(&self) -> Option { + codec::Decode::decode(&mut &*self.0).ok() + } +} + +impl FullLeaf for OpaqueLeaf { + fn using_encoded R>(&self, f: F, _compact: bool) -> R { + f(&self.0) + } +} + +/// A type-safe wrapper for the concrete leaf type. +/// +/// This structure serves merely to avoid passing raw `Vec` around. +/// It must be `Vec`-encoding compatible. +/// +/// It is different from [`OpaqueLeaf`], because it does implement `Codec` +/// and the encoding has to match raw `Vec` encoding. +#[derive(codec::Encode, codec::Decode, RuntimeDebug, PartialEq, Eq)] +pub struct EncodableOpaqueLeaf(pub Vec); + +impl EncodableOpaqueLeaf { + /// Convert a concrete leaf into encodable opaque version. + pub fn from_leaf(leaf: &T) -> Self { + let opaque = OpaqueLeaf::from_leaf(leaf); + Self::from_opaque_leaf(opaque) + } + + /// Given an opaque leaf, make it encodable. + pub fn from_opaque_leaf(opaque: OpaqueLeaf) -> Self { + Self(opaque.0) + } + + /// Try to convert into a [OpaqueLeaf]. + pub fn into_opaque_leaf(self) -> OpaqueLeaf { + // wrap into `OpaqueLeaf` type + OpaqueLeaf::from_encoded_leaf(self.0) + } +} + +sp_api::decl_runtime_apis! { + /// API to interact with MMR pallet. + pub trait MmrApi { + /// Generate MMR proof for a leaf under given index. + fn generate_proof(leaf_index: u64) -> Result<(EncodableOpaqueLeaf, Proof), Error>; + + /// Verify MMR proof against on-chain MMR. + /// + /// Note this function will use on-chain MMR root hash and check if the proof + /// matches the hash. + /// See [Self::verify_proof_stateless] for a stateless verifier. + fn verify_proof(leaf: EncodableOpaqueLeaf, proof: Proof) -> Result<(), Error>; + + /// Verify MMR proof against given root hash. + /// + /// Note this function does not require any on-chain storage - the + /// proof is verified against given MMR root hash. + /// + /// The leaf data is expected to be encoded in it's compact form. + fn verify_proof_stateless(root: Hash, leaf: EncodableOpaqueLeaf, proof: Proof) + -> Result<(), Error>; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use codec::Decode; + use sp_core::H256; + use sp_runtime::traits::Keccak256; + + pub(crate) fn hex(s: &str) -> H256 { + s.parse().unwrap() + } + + type Test = DataOrHash; + type TestCompact = Compact; + type TestProof = Proof<::Output>; + + #[test] + fn should_encode_decode_proof() { + // given + let proof: TestProof = Proof { + leaf_index: 5, + leaf_count: 10, + items: vec![ + hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), + hex("d3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), + hex("e3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), + ], + }; + + // when + let encoded = codec::Encode::encode(&proof); + let decoded = TestProof::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(proof)); + } + + #[test] + fn should_encode_decode_correctly_if_no_compact() { + // given + let cases = vec![ + Test::Data("Hello World!".into()), + Test::Hash(hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd")), + Test::Data("".into()), + Test::Data("3e48d6bcd417fb22e044747242451e2c0f3e602d1bcad2767c34808621956417".into()), + ]; + + // when + let encoded = cases.iter().map(codec::Encode::encode).collect::>(); + + let decoded = encoded.iter().map(|x| Test::decode(&mut &**x)).collect::>(); + + // then + assert_eq!( + decoded, + cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>() + ); + // check encoding correctness + assert_eq!(&encoded[0], &hex_literal::hex!("00343048656c6c6f20576f726c6421")); + assert_eq!( + encoded[1].as_slice(), + hex_literal::hex!("01c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd") + .as_ref() + ); + } + + #[test] + fn should_return_the_hash_correctly() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Hash(hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd")); + + // when + let a = a.hash(); + let b = b.hash(); + + // then + assert_eq!(a, hex("a9c321be8c24ba4dc2bd73f5300bde67dc57228ab8b68b607bb4c39c5374fac9")); + assert_eq!(b, hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd")); + } + + #[test] + fn compact_should_work() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Data("".into()); + + // when + let c: TestCompact = Compact::new((a.clone(), b.clone())); + let d: TestCompact = Compact::new((Test::Hash(a.hash()), Test::Hash(b.hash()))); + + // then + assert_eq!(c.hash(), d.hash()); + } + + #[test] + fn compact_should_encode_decode_correctly() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Data("".into()); + + let c: TestCompact = Compact::new((a.clone(), b.clone())); + let d: TestCompact = Compact::new((Test::Hash(a.hash()), Test::Hash(b.hash()))); + let cases = vec![c, d.clone()]; + + // when + let encoded_compact = + cases.iter().map(|c| c.using_encoded(|x| x.to_vec(), true)).collect::>(); + + let encoded = + cases.iter().map(|c| c.using_encoded(|x| x.to_vec(), false)).collect::>(); + + let decoded_compact = encoded_compact + .iter() + .map(|x| TestCompact::decode(&mut &**x)) + .collect::>(); + + let decoded = encoded.iter().map(|x| TestCompact::decode(&mut &**x)).collect::>(); + + // then + assert_eq!( + decoded, + cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>() + ); + + assert_eq!(decoded_compact, vec![Ok(d.clone()), Ok(d.clone())]); + } + + #[test] + fn opaque_leaves_should_be_full_leaf_compatible() { + // given + let a = Test::Data("Hello World!".into()); + let b = Test::Data("".into()); + + let c: TestCompact = Compact::new((a.clone(), b.clone())); + let d: TestCompact = Compact::new((Test::Hash(a.hash()), Test::Hash(b.hash()))); + let cases = vec![c, d.clone()]; + + let encoded_compact = cases + .iter() + .map(|c| c.using_encoded(|x| x.to_vec(), true)) + .map(OpaqueLeaf::from_encoded_leaf) + .collect::>(); + + let opaque = cases.iter().map(OpaqueLeaf::from_leaf).collect::>(); + + // then + assert_eq!(encoded_compact, opaque); + } + + #[test] + fn encode_opaque_leaf_should_be_scale_compatible() { + use codec::Encode; + + // given + let a = Test::Data("Hello World!".into()); + let case1 = EncodableOpaqueLeaf::from_leaf(&a); + let case2 = EncodableOpaqueLeaf::from_opaque_leaf(OpaqueLeaf(a.encode())); + let case3 = a.encode().encode(); + + // when + let encoded = vec![&case1, &case2].into_iter().map(|x| x.encode()).collect::>(); + let decoded = vec![&*encoded[0], &*encoded[1], &*case3] + .into_iter() + .map(|x| EncodableOpaqueLeaf::decode(&mut &*x)) + .collect::>(); + + // then + assert_eq!(case1, case2); + assert_eq!(encoded[0], encoded[1]); + // then encoding should also match double-encoded leaf. + assert_eq!(encoded[0], case3); + + assert_eq!(decoded[0], decoded[1]); + assert_eq!(decoded[1], decoded[2]); + assert_eq!(decoded[0], Ok(case2)); + assert_eq!(decoded[1], Ok(case1)); + } +} diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml new file mode 100644 index 0000000000000..5a0f114e50173 --- /dev/null +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "pallet-mmr-rpc" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Node-specific RPC methods for interaction with Merkle Mountain Range pallet." +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" +serde = { version = "1.0.126", features = ["derive"] } + +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } + +pallet-mmr-primitives = { version = "4.0.0-dev", path = "../primitives" } + +[dev-dependencies] +serde_json = "1.0.68" diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs new file mode 100644 index 0000000000000..4719893778f6a --- /dev/null +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -0,0 +1,208 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![warn(missing_docs)] + +//! Node-specific RPC methods for interaction with Merkle Mountain Range pallet. + +use std::sync::Arc; + +use codec::{Codec, Encode}; +use jsonrpc_core::{Error, ErrorCode, Result}; +use jsonrpc_derive::rpc; +use serde::{Deserialize, Serialize}; + +use pallet_mmr_primitives::{Error as MmrError, Proof}; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_core::Bytes; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; + +pub use pallet_mmr_primitives::MmrApi as MmrRuntimeApi; + +/// Retrieved MMR leaf and its proof. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct LeafProof { + /// Block hash the proof was generated for. + pub block_hash: BlockHash, + /// SCALE-encoded leaf data. + pub leaf: Bytes, + /// SCALE-encoded proof data. See [pallet_mmr_primitives::Proof]. + pub proof: Bytes, +} + +impl LeafProof { + /// Create new `LeafProof` from given concrete `leaf` and `proof`. + pub fn new(block_hash: BlockHash, leaf: Leaf, proof: Proof) -> Self + where + Leaf: Encode, + MmrHash: Encode, + { + Self { block_hash, leaf: Bytes(leaf.encode()), proof: Bytes(proof.encode()) } + } +} + +/// MMR RPC methods. +#[rpc] +pub trait MmrApi { + /// Generate MMR proof for given leaf index. + /// + /// This method calls into a runtime with MMR pallet included and attempts to generate + /// MMR proof for leaf at given `leaf_index`. + /// Optionally, a block hash at which the runtime should be queried can be specified. + /// + /// Returns the (full) leaf itself and a proof for this leaf (compact encoding, i.e. hash of + /// the leaf). Both parameters are SCALE-encoded. + #[rpc(name = "mmr_generateProof")] + fn generate_proof( + &self, + leaf_index: u64, + at: Option, + ) -> Result>; +} + +/// An implementation of MMR specific RPC methods. +pub struct Mmr { + client: Arc, + _marker: std::marker::PhantomData, +} + +impl Mmr { + /// Create new `Mmr` with the given reference to the client. + pub fn new(client: Arc) -> Self { + Self { client, _marker: Default::default() } + } +} + +impl MmrApi<::Hash> for Mmr +where + Block: BlockT, + C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, + C::Api: MmrRuntimeApi, + MmrHash: Codec + Send + Sync + 'static, +{ + fn generate_proof( + &self, + leaf_index: u64, + at: Option<::Hash>, + ) -> Result::Hash>> { + let api = self.client.runtime_api(); + let block_hash = at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash); + + let (leaf, proof) = api + .generate_proof_with_context( + &BlockId::hash(block_hash), + sp_core::ExecutionContext::OffchainCall(None), + leaf_index, + ) + .map_err(runtime_error_into_rpc_error)? + .map_err(mmr_error_into_rpc_error)?; + + Ok(LeafProof::new(block_hash, leaf, proof)) + } +} + +const RUNTIME_ERROR: i64 = 8000; +const MMR_ERROR: i64 = 8010; + +/// Converts a mmr-specific error into an RPC error. +fn mmr_error_into_rpc_error(err: MmrError) -> Error { + match err { + MmrError::LeafNotFound => Error { + code: ErrorCode::ServerError(MMR_ERROR + 1), + message: "Leaf was not found".into(), + data: Some(format!("{:?}", err).into()), + }, + MmrError::GenerateProof => Error { + code: ErrorCode::ServerError(MMR_ERROR + 2), + message: "Error while generating the proof".into(), + data: Some(format!("{:?}", err).into()), + }, + _ => Error { + code: ErrorCode::ServerError(MMR_ERROR), + message: "Unexpected MMR error".into(), + data: Some(format!("{:?}", err).into()), + }, + } +} + +/// Converts a runtime trap into an RPC error. +fn runtime_error_into_rpc_error(err: impl std::fmt::Debug) -> Error { + Error { + code: ErrorCode::ServerError(RUNTIME_ERROR), + message: "Runtime trapped".into(), + data: Some(format!("{:?}", err).into()), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::H256; + + #[test] + fn should_serialize_leaf_proof() { + // given + let leaf = vec![1_u8, 2, 3, 4]; + let proof = Proof { + leaf_index: 1, + leaf_count: 9, + items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], + }; + + let leaf_proof = LeafProof::new(H256::repeat_byte(0), leaf, proof); + + // when + let actual = serde_json::to_string(&leaf_proof).unwrap(); + + // then + assert_eq!( + actual, + r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaf":"0x1001020304","proof":"0x010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# + ); + } + + #[test] + fn should_deserialize_leaf_proof() { + // given + let expected = LeafProof { + block_hash: H256::repeat_byte(0), + leaf: Bytes(vec![1_u8, 2, 3, 4].encode()), + proof: Bytes( + Proof { + leaf_index: 1, + leaf_count: 9, + items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], + } + .encode(), + ), + }; + + // when + let actual: LeafProof = serde_json::from_str(r#"{ + "blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "leaf":"0x1001020304", + "proof":"0x010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" + }"#).unwrap(); + + // then + assert_eq!(actual, expected); + } +} diff --git a/frame/merkle-mountain-range/src/benchmarking.rs b/frame/merkle-mountain-range/src/benchmarking.rs new file mode 100644 index 0000000000000..2680b3d030067 --- /dev/null +++ b/frame/merkle-mountain-range/src/benchmarking.rs @@ -0,0 +1,40 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the MMR pallet. + +#![cfg_attr(not(feature = "std"), no_std)] + +use crate::*; +use frame_benchmarking::{benchmarks_instance_pallet, impl_benchmark_test_suite}; +use frame_support::traits::OnInitialize; + +benchmarks_instance_pallet! { + on_initialize { + let x in 1 .. 1_000; + + let leaves = x as u64; + }: { + for b in 0..leaves { + Pallet::::on_initialize((b as u32).into()); + } + } verify { + assert_eq!(crate::NumberOfLeaves::::get(), leaves); + } +} + +impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::mock::Test); diff --git a/frame/session/src/default_weights.rs b/frame/merkle-mountain-range/src/default_weights.rs similarity index 52% rename from frame/session/src/default_weights.rs rename to frame/merkle-mountain-range/src/default_weights.rs index f3082981c78bf..6308975ce7d22 100644 --- a/frame/session/src/default_weights.rs +++ b/frame/merkle-mountain-range/src/default_weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,22 +15,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 +//! Default weights for the MMR Pallet +//! This file was not auto-generated. -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; +use frame_support::weights::{ + constants::{RocksDbWeight as DbWeight, WEIGHT_PER_NANOS}, + Weight, +}; impl crate::WeightInfo for () { - fn set_keys() -> Weight { - (88_411_000 as Weight) - .saturating_add(DbWeight::get().reads(6 as Weight)) - .saturating_add(DbWeight::get().writes(5 as Weight)) - } - fn purge_keys() -> Weight { - (51_843_000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(5 as Weight)) + fn on_initialize(peaks: u64) -> Weight { + // Reading the parent hash. + let leaf_weight = DbWeight::get().reads(1); + // Blake2 hash cost. + let hash_weight = 2 * WEIGHT_PER_NANOS; + // No-op hook. + let hook_weight = 0; + + leaf_weight + .saturating_add(hash_weight) + .saturating_add(hook_weight) + .saturating_add(DbWeight::get().reads_writes(2 + peaks, 2 + peaks)) } } diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs new file mode 100644 index 0000000000000..01bf1b2254f09 --- /dev/null +++ b/frame/merkle-mountain-range/src/lib.rs @@ -0,0 +1,274 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Merkle Mountain Range +//! +//! ## Overview +//! +//! Details on Merkle Mountain Ranges (MMRs) can be found here: +//! +//! +//! The MMR pallet constructs a MMR from leaf data obtained on every block from +//! `LeafDataProvider`. MMR nodes are stored both in: +//! - on-chain storage - hashes only; not full leaf content) +//! - off-chain storage - via Indexing API we push full leaf content (and all internal nodes as +//! well) to the Off-chain DB, so that the data is available for Off-chain workers. +//! Hashing used for MMR is configurable independently from the rest of the runtime (i.e. not using +//! `frame_system::Hashing`) so something compatible with external chains can be used (like +//! Keccak256 for Ethereum compatibility). +//! +//! Depending on the usage context (off-chain vs on-chain) the pallet is able to: +//! - verify MMR leaf proofs (on-chain) +//! - generate leaf proofs (off-chain) +//! +//! See [primitives::Compact] documentation for how you can optimize proof size for leafs that are +//! composed from multiple elements. +//! +//! ## What for? +//! +//! Primary use case for this pallet is to generate MMR root hashes, that can latter on be used by +//! BEEFY protocol (see ). +//! MMR root hashes along with BEEFY will make it possible to build Super Light Clients (SLC) of +//! Substrate-based chains. The SLC will be able to follow finality and can be shown proofs of more +//! details that happened on the source chain. +//! In that case the chain which contains the pallet generates the Root Hashes and Proofs, which +//! are then presented to another chain acting as a light client which can verify them. +//! +//! Secondary use case is to archive historical data, but still be able to retrieve them on-demand +//! if needed. For instance if parent block hashes are stored in the MMR it's possible at any point +//! in time to provide a MMR proof about some past block hash, while this data can be safely pruned +//! from on-chain storage. +//! +//! NOTE This pallet is experimental and not proven to work in production. +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Encode; +use frame_support::weights::Weight; +use sp_runtime::traits; + +#[cfg(any(feature = "runtime-benchmarks", test))] +mod benchmarking; +mod default_weights; +mod mmr; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub use pallet::*; +pub use pallet_mmr_primitives as primitives; + +pub trait WeightInfo { + fn on_initialize(peaks: u64) -> Weight; +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); + + /// This pallet's configuration trait + #[pallet::config] + pub trait Config: frame_system::Config { + /// Prefix for elements stored in the Off-chain DB via Indexing API. + /// + /// Each node of the MMR is inserted both on-chain and off-chain via Indexing API. + /// The former does not store full leaf content, just it's compact version (hash), + /// and some of the inner mmr nodes might be pruned from on-chain storage. + /// The latter will contain all the entries in their full form. + /// + /// Each node is stored in the Off-chain DB under key derived from the + /// [`Self::INDEXING_PREFIX`] and it's in-tree index (MMR position). + const INDEXING_PREFIX: &'static [u8]; + + /// A hasher type for MMR. + /// + /// To construct trie nodes that result in merging (bagging) two peaks, depending on the + /// node kind we take either: + /// - The node (hash) itself if it's an inner node. + /// - The hash of SCALE-encoding of the leaf data if it's a leaf node. + /// + /// Then we create a tuple of these two hashes, SCALE-encode it (concatenate) and + /// hash, to obtain a new MMR inner node - the new peak. + type Hashing: traits::Hash>::Hash>; + + /// The hashing output type. + /// + /// This type is actually going to be stored in the MMR. + /// Required to be provided again, to satisfy trait bounds for storage items. + type Hash: traits::Member + + traits::MaybeSerializeDeserialize + + sp_std::fmt::Debug + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Default + + codec::Codec + + codec::EncodeLike + + scale_info::TypeInfo; + + /// Data stored in the leaf nodes. + /// + /// The [LeafData](primitives::LeafDataProvider) is responsible for returning the entire + /// leaf data that will be inserted to the MMR. + /// [LeafDataProvider](primitives::LeafDataProvider)s can be composed into tuples to put + /// multiple elements into the tree. In such a case it might be worth using + /// [primitives::Compact] to make MMR proof for one element of the tuple leaner. + /// + /// Note that the leaf at each block MUST be unique. You may want to include a block hash or + /// block number as an easiest way to ensure that. + type LeafData: primitives::LeafDataProvider; + + /// A hook to act on the new MMR root. + /// + /// For some applications it might be beneficial to make the MMR root available externally + /// apart from having it in the storage. For instance you might output it in the header + /// digest (see [`frame_system::Pallet::deposit_log`]) to make it available for Light + /// Clients. Hook complexity should be `O(1)`. + type OnNewRoot: primitives::OnNewRoot<>::Hash>; + + /// Weights for this pallet. + type WeightInfo: WeightInfo; + } + + /// Latest MMR Root hash. + #[pallet::storage] + #[pallet::getter(fn mmr_root_hash)] + pub type RootHash, I: 'static = ()> = + StorageValue<_, >::Hash, ValueQuery>; + + /// Current size of the MMR (number of leaves). + #[pallet::storage] + #[pallet::getter(fn mmr_leaves)] + pub type NumberOfLeaves = StorageValue<_, u64, ValueQuery>; + + /// Hashes of the nodes in the MMR. + /// + /// Note this collection only contains MMR peaks, the inner nodes (and leaves) + /// are pruned and only stored in the Offchain DB. + #[pallet::storage] + #[pallet::getter(fn mmr_peak)] + pub type Nodes, I: 'static = ()> = + StorageMap<_, Identity, u64, >::Hash, OptionQuery>; + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + fn on_initialize(_n: T::BlockNumber) -> Weight { + use primitives::LeafDataProvider; + let leaves = Self::mmr_leaves(); + let peaks_before = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + let data = T::LeafData::leaf_data(); + // append new leaf to MMR + let mut mmr: ModuleMmr = mmr::Mmr::new(leaves); + mmr.push(data).expect("MMR push never fails."); + + // update the size + let (leaves, root) = mmr.finalize().expect("MMR finalize never fails."); + >::on_new_root(&root); + + >::put(leaves); + >::put(root); + + let peaks_after = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); + T::WeightInfo::on_initialize(peaks_before.max(peaks_after)) + } + } +} + +/// A MMR specific to the pallet. +type ModuleMmr = mmr::Mmr>; + +/// Leaf data. +type LeafOf = <>::LeafData as primitives::LeafDataProvider>::LeafData; + +/// Hashing used for the pallet. +pub(crate) type HashingOf = >::Hashing; + +/// Stateless MMR proof verification. +/// +/// This function can be used to verify received MMR proof (`proof`) +/// for given leaf data (`leaf`) against a known MMR root hash (`root`). +/// +/// The verification does not require any storage access. +pub fn verify_leaf_proof( + root: H::Output, + leaf: mmr::Node, + proof: primitives::Proof, +) -> Result<(), primitives::Error> +where + H: traits::Hash, + L: primitives::FullLeaf, +{ + let is_valid = mmr::verify_leaf_proof::(root, leaf, proof)?; + if is_valid { + Ok(()) + } else { + Err(primitives::Error::Verify.log_debug(("The proof is incorrect.", root))) + } +} + +impl, I: 'static> Pallet { + fn offchain_key(pos: u64) -> sp_std::prelude::Vec { + (T::INDEXING_PREFIX, pos).encode() + } + + /// Generate a MMR proof for the given `leaf_index`. + /// + /// Note this method can only be used from an off-chain context + /// (Offchain Worker or Runtime API call), since it requires + /// all the leaves to be present. + /// It may return an error or panic if used incorrectly. + pub fn generate_proof( + leaf_index: u64, + ) -> Result<(LeafOf, primitives::Proof<>::Hash>), primitives::Error> { + let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); + mmr.generate_proof(leaf_index) + } + + /// Verify MMR proof for given `leaf`. + /// + /// This method is safe to use within the runtime code. + /// It will return `Ok(())` if the proof is valid + /// and an `Err(..)` if MMR is inconsistent (some leaves are missing) + /// or the proof is invalid. + pub fn verify_leaf( + leaf: LeafOf, + proof: primitives::Proof<>::Hash>, + ) -> Result<(), primitives::Error> { + if proof.leaf_count > Self::mmr_leaves() || + proof.leaf_count == 0 || + proof.items.len() as u32 > mmr::utils::NodesUtils::new(proof.leaf_count).depth() + { + return Err(primitives::Error::Verify + .log_debug("The proof has incorrect number of leaves or proof items.")) + } + + let mmr: ModuleMmr = mmr::Mmr::new(proof.leaf_count); + let is_valid = mmr.verify_leaf_proof(leaf, proof)?; + if is_valid { + Ok(()) + } else { + Err(primitives::Error::Verify.log_debug("The proof is incorrect.")) + } + } +} diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs new file mode 100644 index 0000000000000..d5036e58f432e --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + mmr::{ + storage::{OffchainStorage, RuntimeStorage, Storage}, + utils::NodesUtils, + Hasher, Node, NodeOf, + }, + primitives::{self, Error}, + Config, HashingOf, +}; +#[cfg(not(feature = "std"))] +use sp_std::vec; + +/// Stateless verification of the leaf proof. +pub fn verify_leaf_proof( + root: H::Output, + leaf: Node, + proof: primitives::Proof, +) -> Result +where + H: sp_runtime::traits::Hash, + L: primitives::FullLeaf, +{ + let size = NodesUtils::new(proof.leaf_count).size(); + let leaf_position = mmr_lib::leaf_index_to_pos(proof.leaf_index); + + let p = mmr_lib::MerkleProof::, Hasher>::new( + size, + proof.items.into_iter().map(Node::Hash).collect(), + ); + p.verify(Node::Hash(root), vec![(leaf_position, leaf)]) + .map_err(|e| Error::Verify.log_debug(e)) +} + +/// A wrapper around a MMR library to expose limited functionality. +/// +/// Available functions depend on the storage kind ([Runtime](crate::mmr::storage::RuntimeStorage) +/// vs [Off-chain](crate::mmr::storage::OffchainStorage)). +pub struct Mmr +where + T: Config, + I: 'static, + L: primitives::FullLeaf, + Storage: mmr_lib::MMRStore>, +{ + mmr: mmr_lib::MMR, Hasher, L>, Storage>, + leaves: u64, +} + +impl Mmr +where + T: Config, + I: 'static, + L: primitives::FullLeaf, + Storage: mmr_lib::MMRStore>, +{ + /// Create a pointer to an existing MMR with given number of leaves. + pub fn new(leaves: u64) -> Self { + let size = NodesUtils::new(leaves).size(); + Self { mmr: mmr_lib::MMR::new(size, Default::default()), leaves } + } + + /// Verify proof of a single leaf. + pub fn verify_leaf_proof( + &self, + leaf: L, + proof: primitives::Proof<>::Hash>, + ) -> Result { + let p = mmr_lib::MerkleProof::, Hasher, L>>::new( + self.mmr.mmr_size(), + proof.items.into_iter().map(Node::Hash).collect(), + ); + let position = mmr_lib::leaf_index_to_pos(proof.leaf_index); + let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; + p.verify(root, vec![(position, Node::Data(leaf))]) + .map_err(|e| Error::Verify.log_debug(e)) + } + + /// Return the internal size of the MMR (number of nodes). + #[cfg(test)] + pub fn size(&self) -> u64 { + self.mmr.mmr_size() + } +} + +/// Runtime specific MMR functions. +impl Mmr +where + T: Config, + I: 'static, + L: primitives::FullLeaf, +{ + /// Push another item to the MMR. + /// + /// Returns element position (index) in the MMR. + pub fn push(&mut self, leaf: L) -> Option { + let position = + self.mmr.push(Node::Data(leaf)).map_err(|e| Error::Push.log_error(e)).ok()?; + + self.leaves += 1; + + Some(position) + } + + /// Commit the changes to underlying storage, return current number of leaves and + /// calculate the new MMR's root hash. + pub fn finalize(self) -> Result<(u64, >::Hash), Error> { + let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; + self.mmr.commit().map_err(|e| Error::Commit.log_error(e))?; + Ok((self.leaves, root.hash())) + } +} + +/// Off-chain specific MMR functions. +impl Mmr +where + T: Config, + I: 'static, + L: primitives::FullLeaf + codec::Decode, +{ + /// Generate a proof for given leaf index. + /// + /// Proof generation requires all the nodes (or their hashes) to be available in the storage. + /// (i.e. you can't run the function in the pruned storage). + pub fn generate_proof( + &self, + leaf_index: u64, + ) -> Result<(L, primitives::Proof<>::Hash>), Error> { + let position = mmr_lib::leaf_index_to_pos(leaf_index); + let store = >::default(); + let leaf = match mmr_lib::MMRStore::get_elem(&store, position) { + Ok(Some(Node::Data(leaf))) => leaf, + e => return Err(Error::LeafNotFound.log_debug(e)), + }; + let leaf_count = self.leaves; + self.mmr + .gen_proof(vec![position]) + .map_err(|e| Error::GenerateProof.log_error(e)) + .map(|p| primitives::Proof { + leaf_index, + leaf_count, + items: p.proof_items().iter().map(|x| x.hash()).collect(), + }) + .map(|p| (leaf, p)) + } +} diff --git a/frame/merkle-mountain-range/src/mmr/mod.rs b/frame/merkle-mountain-range/src/mmr/mod.rs new file mode 100644 index 0000000000000..ec2dfe245bd41 --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/mod.rs @@ -0,0 +1,45 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod mmr; +pub mod storage; +pub mod utils; + +use crate::primitives::FullLeaf; +use sp_runtime::traits; + +pub use self::mmr::{verify_leaf_proof, Mmr}; + +/// Node type for runtime `T`. +pub type NodeOf = Node<>::Hashing, L>; + +/// A node stored in the MMR. +pub type Node = crate::primitives::DataOrHash; + +/// Default Merging & Hashing behavior for MMR. +pub struct Hasher(sp_std::marker::PhantomData<(H, L)>); + +impl mmr_lib::Merge for Hasher { + type Item = Node; + + fn merge(left: &Self::Item, right: &Self::Item) -> Self::Item { + let mut concat = left.hash().as_ref().to_vec(); + concat.extend_from_slice(right.hash().as_ref()); + + Node::Hash(::hash(&concat)) + } +} diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs new file mode 100644 index 0000000000000..09e24017816ec --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A MMR storage implementations. + +use codec::Encode; +#[cfg(not(feature = "std"))] +use sp_std::prelude::Vec; + +use crate::{ + mmr::{Node, NodeOf}, + primitives, Config, Nodes, NumberOfLeaves, Pallet, +}; + +/// A marker type for runtime-specific storage implementation. +/// +/// Allows appending new items to the MMR and proof verification. +/// MMR nodes are appended to two different storages: +/// 1. We add nodes (leaves) hashes to the on-chain storge (see [crate::Nodes]). +/// 2. We add full leaves (and all inner nodes as well) into the `IndexingAPI` during block +/// processing, so the values end up in the Offchain DB if indexing is enabled. +pub struct RuntimeStorage; + +/// A marker type for offchain-specific storage implementation. +/// +/// Allows proof generation and verification, but does not support appending new items. +/// MMR nodes are assumed to be stored in the Off-Chain DB. Note this storage type +/// DOES NOT support adding new items to the MMR. +pub struct OffchainStorage; + +/// A storage layer for MMR. +/// +/// There are two different implementations depending on the use case. +/// See docs for [RuntimeStorage] and [OffchainStorage]. +pub struct Storage(sp_std::marker::PhantomData<(StorageType, T, I, L)>); + +impl Default for Storage { + fn default() -> Self { + Self(Default::default()) + } +} + +impl mmr_lib::MMRStore> for Storage +where + T: Config, + I: 'static, + L: primitives::FullLeaf + codec::Decode, +{ + fn get_elem(&self, pos: u64) -> mmr_lib::Result>> { + let key = Pallet::::offchain_key(pos); + // Retrieve the element from Off-chain DB. + Ok(sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + .and_then(|v| codec::Decode::decode(&mut &*v).ok())) + } + + fn append(&mut self, _: u64, _: Vec>) -> mmr_lib::Result<()> { + panic!("MMR must not be altered in the off-chain context.") + } +} + +impl mmr_lib::MMRStore> for Storage +where + T: Config, + I: 'static, + L: primitives::FullLeaf, +{ + fn get_elem(&self, pos: u64) -> mmr_lib::Result>> { + Ok(>::get(pos).map(Node::Hash)) + } + + fn append(&mut self, pos: u64, elems: Vec>) -> mmr_lib::Result<()> { + let mut leaves = crate::NumberOfLeaves::::get(); + let mut size = crate::mmr::utils::NodesUtils::new(leaves).size(); + if pos != size { + return Err(mmr_lib::Error::InconsistentStore) + } + + for elem in elems { + // on-chain we only store the hash (even if it's a leaf) + >::insert(size, elem.hash()); + // Indexing API is used to store the full leaf content. + let key = Pallet::::offchain_key(size); + elem.using_encoded(|elem| sp_io::offchain_index::set(&key, elem)); + size += 1; + + if let Node::Data(..) = elem { + leaves += 1; + } + } + + NumberOfLeaves::::put(leaves); + + Ok(()) + } +} diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/frame/merkle-mountain-range/src/mmr/utils.rs new file mode 100644 index 0000000000000..8fc725f11e72f --- /dev/null +++ b/frame/merkle-mountain-range/src/mmr/utils.rs @@ -0,0 +1,126 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Merkle Mountain Range utilities. + +/// MMR nodes & size -related utilities. +pub struct NodesUtils { + no_of_leaves: u64, +} + +impl NodesUtils { + /// Create new instance of MMR nodes utilities for given number of leaves. + pub fn new(no_of_leaves: u64) -> Self { + Self { no_of_leaves } + } + + /// Calculate number of peaks in the MMR. + pub fn number_of_peaks(&self) -> u64 { + self.number_of_leaves().count_ones() as u64 + } + + /// Return the number of leaves in the MMR. + pub fn number_of_leaves(&self) -> u64 { + self.no_of_leaves + } + + /// Calculate the total size of MMR (number of nodes). + pub fn size(&self) -> u64 { + 2 * self.no_of_leaves - self.number_of_peaks() + } + + /// Calculate maximal depth of the MMR. + pub fn depth(&self) -> u32 { + if self.no_of_leaves == 0 { + return 0 + } + + 64 - self.no_of_leaves.next_power_of_two().leading_zeros() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_calculate_number_of_leaves_correctly() { + assert_eq!( + vec![0, 1, 2, 3, 4, 9, 15, 21] + .into_iter() + .map(|n| NodesUtils::new(n).depth()) + .collect::>(), + vec![0, 1, 2, 3, 3, 5, 5, 6] + ); + } + + #[test] + fn should_calculate_depth_correclty() { + assert_eq!( + vec![0, 1, 2, 3, 4, 9, 15, 21] + .into_iter() + .map(|n| NodesUtils::new(n).number_of_leaves()) + .collect::>(), + vec![0, 1, 2, 3, 4, 9, 15, 21] + ); + } + + #[test] + fn should_calculate_number_of_peaks_correctly() { + assert_eq!( + vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 21] + .into_iter() + .map(|n| NodesUtils::new(n).number_of_peaks()) + .collect::>(), + vec![0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 3] + ); + } + + #[test] + fn should_calculate_the_size_correctly() { + let _ = env_logger::try_init(); + + let leaves = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 21]; + let sizes = vec![0, 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 39]; + assert_eq!( + leaves + .clone() + .into_iter() + .map(|n| NodesUtils::new(n).size()) + .collect::>(), + sizes.clone() + ); + + // size cross-check + let mut actual_sizes = vec![]; + for s in &leaves[1..] { + crate::tests::new_test_ext().execute_with(|| { + let mut mmr = crate::mmr::Mmr::< + crate::mmr::storage::RuntimeStorage, + crate::mock::Test, + _, + _, + >::new(0); + for i in 0..*s { + mmr.push(i); + } + actual_sizes.push(mmr.size()); + }) + } + assert_eq!(sizes[1..], actual_sizes[..]); + } +} diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs new file mode 100644 index 0000000000000..3616a8d1d5242 --- /dev/null +++ b/frame/merkle-mountain-range/src/mock.rs @@ -0,0 +1,106 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate as pallet_mmr; +use crate::*; + +use codec::{Decode, Encode}; +use frame_support::parameter_types; +use pallet_mmr_primitives::{Compact, LeafDataProvider}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup, Keccak256}, +}; +use sp_std::{cell::RefCell, prelude::*}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + MMR: pallet_mmr::{Pallet, Storage}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = sp_core::sr25519::Public; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +impl Config for Test { + const INDEXING_PREFIX: &'static [u8] = b"mmr-"; + + type Hashing = Keccak256; + type Hash = H256; + type LeafData = Compact, LeafData)>; + type OnNewRoot = (); + type WeightInfo = (); +} + +#[derive(Encode, Decode, Clone, Default, Eq, PartialEq, Debug)] +pub struct LeafData { + pub a: u64, + pub b: Vec, +} + +impl LeafData { + pub fn new(a: u64) -> Self { + Self { a, b: Default::default() } + } +} + +thread_local! { + pub static LEAF_DATA: RefCell = RefCell::new(Default::default()); +} + +impl LeafDataProvider for LeafData { + type LeafData = Self; + + fn leaf_data() -> Self::LeafData { + LEAF_DATA.with(|r| r.borrow().clone()) + } +} diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs new file mode 100644 index 0000000000000..50512e9286951 --- /dev/null +++ b/frame/merkle-mountain-range/src/tests.rs @@ -0,0 +1,324 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{mock::*, *}; + +use frame_support::traits::OnInitialize; +use pallet_mmr_primitives::{Compact, Proof}; +use sp_core::{ + offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, + H256, +}; + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default().build_storage::().unwrap().into() +} + +fn register_offchain_ext(ext: &mut sp_io::TestExternalities) { + let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); +} + +fn new_block() -> u64 { + let number = frame_system::Pallet::::block_number() + 1; + let hash = H256::repeat_byte(number as u8); + LEAF_DATA.with(|r| r.borrow_mut().a = number); + + frame_system::Pallet::::initialize( + &number, + &hash, + &Default::default(), + frame_system::InitKind::Full, + ); + MMR::on_initialize(number) +} + +pub(crate) fn hex(s: &str) -> H256 { + s.parse().unwrap() +} + +type BlockNumber = ::BlockNumber; + +fn decode_node( + v: Vec, +) -> mmr::Node<::Hashing, ((BlockNumber, H256), LeafData)> { + use crate::primitives::DataOrHash; + type A = DataOrHash<::Hashing, (BlockNumber, H256)>; + type B = DataOrHash<::Hashing, LeafData>; + type Node = mmr::Node<::Hashing, (A, B)>; + let tuple: Node = codec::Decode::decode(&mut &v[..]).unwrap(); + + match tuple { + mmr::Node::Data((DataOrHash::Data(a), DataOrHash::Data(b))) => mmr::Node::Data((a, b)), + mmr::Node::Hash(hash) => mmr::Node::Hash(hash), + _ => unreachable!(), + } +} + +fn init_chain(blocks: usize) { + // given + for _ in 0..blocks { + new_block(); + } +} + +#[test] +fn should_start_empty() { + let _ = env_logger::try_init(); + new_test_ext().execute_with(|| { + // given + assert_eq!( + crate::RootHash::::get(), + "0000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() + ); + assert_eq!(crate::NumberOfLeaves::::get(), 0); + assert_eq!(crate::Nodes::::get(0), None); + + // when + let weight = new_block(); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 1); + assert_eq!( + crate::Nodes::::get(0), + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")) + ); + assert_eq!( + crate::RootHash::::get(), + hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0") + ); + assert!(weight != 0); + }); +} + +#[test] +fn should_append_to_mmr_when_on_initialize_is_called() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + ext.execute_with(|| { + // when + new_block(); + new_block(); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 2); + assert_eq!( + ( + crate::Nodes::::get(0), + crate::Nodes::::get(1), + crate::Nodes::::get(2), + crate::Nodes::::get(3), + crate::RootHash::::get(), + ), + ( + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), + Some(hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705")), + Some(hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")), + None, + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), + ) + ); + }); + + // make sure the leaves end up in the offchain DB + ext.persist_offchain_overlay(); + let offchain_db = ext.offchain_db(); + assert_eq!( + offchain_db.get(&MMR::offchain_key(0)).map(decode_node), + Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1),))) + ); + assert_eq!( + offchain_db.get(&MMR::offchain_key(1)).map(decode_node), + Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2),))) + ); + assert_eq!( + offchain_db.get(&MMR::offchain_key(2)).map(decode_node), + Some(mmr::Node::Hash(hex( + "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" + ))) + ); + assert_eq!(offchain_db.get(&MMR::offchain_key(3)), None); +} + +#[test] +fn should_construct_larger_mmr_correctly() { + let _ = env_logger::try_init(); + new_test_ext().execute_with(|| { + // when + init_chain(7); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 7); + assert_eq!( + ( + crate::Nodes::::get(0), + crate::Nodes::::get(10), + crate::RootHash::::get(), + ), + ( + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), + Some(hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c")), + hex("e45e25259f7930626431347fa4dd9aae7ac83b4966126d425ca70ab343709d2c"), + ) + ); + }); +} + +#[test] +fn should_generate_proofs_correctly() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + // given + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proofs now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + ext.execute_with(|| { + // when generate proofs for all leaves + let proofs = (0_u64..crate::NumberOfLeaves::::get()) + .into_iter() + .map(|leaf_index| crate::Pallet::::generate_proof(leaf_index).unwrap()) + .collect::>(); + + // then + assert_eq!( + proofs[0], + ( + Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),)), + Proof { + leaf_index: 0, + leaf_count: 7, + items: vec![ + hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), + hex("cb24f4614ad5b2a5430344c99545b421d9af83c46fd632d70a332200884b4d46"), + hex("dca421199bdcc55bb773c6b6967e8d16675de69062b52285ca63685241fdf626"), + ], + } + ) + ); + assert_eq!( + proofs[4], + ( + Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),)), + Proof { + leaf_index: 4, + leaf_count: 7, + items: vec![ + hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), + hex("8ed25570209d8f753d02df07c1884ddb36a3d9d4770e4608b188322151c657fe"), + hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c"), + ], + } + ) + ); + assert_eq!( + proofs[6], + ( + Compact::new(((6, H256::repeat_byte(7)).into(), LeafData::new(7).into(),)), + Proof { + leaf_index: 6, + leaf_count: 7, + items: vec![ + hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), + hex("7e4316ae2ebf7c3b6821cb3a46ca8b7a4f9351a9b40fcf014bb0a4fd8e8f29da"), + ], + } + ) + ); + }); +} + +#[test] +fn should_verify() { + let _ = env_logger::try_init(); + + // Start off with chain initialisation and storing indexing data off-chain + // (MMR Leafs) + let mut ext = new_test_ext(); + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proof now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + let (leaf, proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_proof(5).unwrap() + }); + + // Now to verify the proof, we really shouldn't require offchain storage or extension. + // Hence we initialize the storage once again, using different externalities and then + // verify. + let mut ext2 = new_test_ext(); + ext2.execute_with(|| { + init_chain(7); + // then + assert_eq!(crate::Pallet::::verify_leaf(leaf, proof5), Ok(())); + }); +} + +#[test] +fn verification_should_be_stateless() { + let _ = env_logger::try_init(); + + // Start off with chain initialisation and storing indexing data off-chain + // (MMR Leafs) + let mut ext = new_test_ext(); + ext.execute_with(|| init_chain(7)); + ext.persist_offchain_overlay(); + + // Try to generate proof now. This requires the offchain extensions to be present + // to retrieve full leaf data. + register_offchain_ext(&mut ext); + let (leaf, proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_proof(5).unwrap() + }); + let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + + // Verify proof without relying on any on-chain data. + let leaf = crate::primitives::DataOrHash::Data(leaf); + assert_eq!( + crate::verify_leaf_proof::<::Hashing, _>(root, leaf, proof5), + Ok(()) + ); +} + +#[test] +fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + // given + ext.execute_with(|| init_chain(7)); + + ext.persist_offchain_overlay(); + register_offchain_ext(&mut ext); + + ext.execute_with(|| { + // when + let (leaf, proof5) = crate::Pallet::::generate_proof(5).unwrap(); + new_block(); + + // then + assert_eq!(crate::Pallet::::verify_leaf(leaf, proof5), Ok(())); + }); +} diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml deleted file mode 100644 index 2934b15562c43..0000000000000 --- a/frame/metadata/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "frame-metadata" -version = "12.0.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Decodable variant of the RuntimeMetadata." -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } - -[features] -default = ["std"] -std = [ - "codec/std", - "sp-std/std", - "sp-core/std", - "serde", -] diff --git a/frame/metadata/README.md b/frame/metadata/README.md deleted file mode 100644 index 423af8602e3f0..0000000000000 --- a/frame/metadata/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Decodable variant of the RuntimeMetadata. - -This really doesn't belong here, but is necessary for the moment. In the future -it should be removed entirely to an external module for shimming on to the -codec-encoded metadata. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/metadata/src/lib.rs b/frame/metadata/src/lib.rs deleted file mode 100644 index 109f33f420191..0000000000000 --- a/frame/metadata/src/lib.rs +++ /dev/null @@ -1,430 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Decodable variant of the RuntimeMetadata. -//! -//! This really doesn't belong here, but is necessary for the moment. In the future -//! it should be removed entirely to an external module for shimming on to the -//! codec-encoded metadata. - -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(feature = "std")] -use serde::Serialize; -#[cfg(feature = "std")] -use codec::{Decode, Input, Error}; -use codec::{Encode, Output}; -use sp_std::vec::Vec; -use sp_core::RuntimeDebug; - -#[cfg(feature = "std")] -type StringBuf = String; - -/// Current prefix of metadata -pub const META_RESERVED: u32 = 0x6174656d; // 'meta' warn endianness - -/// On `no_std` we do not support `Decode` and thus `StringBuf` is just `&'static str`. -/// So, if someone tries to decode this stuff on `no_std`, they will get a compilation error. -#[cfg(not(feature = "std"))] -type StringBuf = &'static str; - -/// A type that decodes to a different type than it encodes. -/// The user needs to make sure that both types use the same encoding. -/// -/// For example a `&'static [ &'static str ]` can be decoded to a `Vec`. -#[derive(Clone)] -pub enum DecodeDifferent where B: 'static, O: 'static { - Encode(B), - Decoded(O), -} - -impl Encode for DecodeDifferent where B: Encode + 'static, O: Encode + 'static { - fn encode_to(&self, dest: &mut W) { - match self { - DecodeDifferent::Encode(b) => b.encode_to(dest), - DecodeDifferent::Decoded(o) => o.encode_to(dest), - } - } -} - -impl codec::EncodeLike for DecodeDifferent where B: Encode + 'static, O: Encode + 'static {} - -#[cfg(feature = "std")] -impl Decode for DecodeDifferent where B: 'static, O: Decode + 'static { - fn decode(input: &mut I) -> Result { - ::decode(input).map(|val| { - DecodeDifferent::Decoded(val) - }) - } -} - -impl PartialEq for DecodeDifferent -where - B: Encode + Eq + PartialEq + 'static, - O: Encode + Eq + PartialEq + 'static, -{ - fn eq(&self, other: &Self) -> bool { - self.encode() == other.encode() - } -} - -impl Eq for DecodeDifferent - where B: Encode + Eq + PartialEq + 'static, O: Encode + Eq + PartialEq + 'static -{} - -impl sp_std::fmt::Debug for DecodeDifferent - where - B: sp_std::fmt::Debug + Eq + 'static, - O: sp_std::fmt::Debug + Eq + 'static, -{ - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - match self { - DecodeDifferent::Encode(b) => b.fmt(f), - DecodeDifferent::Decoded(o) => o.fmt(f), - } - } -} - -#[cfg(feature = "std")] -impl serde::Serialize for DecodeDifferent - where - B: serde::Serialize + 'static, - O: serde::Serialize + 'static, -{ - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { - match self { - DecodeDifferent::Encode(b) => b.serialize(serializer), - DecodeDifferent::Decoded(o) => o.serialize(serializer), - } - } -} - -pub type DecodeDifferentArray = DecodeDifferent<&'static [B], Vec>; - -type DecodeDifferentStr = DecodeDifferent<&'static str, StringBuf>; - -/// All the metadata about a function. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct FunctionMetadata { - pub name: DecodeDifferentStr, - pub arguments: DecodeDifferentArray, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, -} - -/// All the metadata about a function argument. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct FunctionArgumentMetadata { - pub name: DecodeDifferentStr, - pub ty: DecodeDifferentStr, -} - -/// Newtype wrapper for support encoding functions (actual the result of the function). -#[derive(Clone, Eq)] -pub struct FnEncode(pub fn() -> E) where E: Encode + 'static; - -impl Encode for FnEncode { - fn encode_to(&self, dest: &mut W) { - self.0().encode_to(dest); - } -} - -impl codec::EncodeLike for FnEncode {} - -impl PartialEq for FnEncode { - fn eq(&self, other: &Self) -> bool { - self.0().eq(&other.0()) - } -} - -impl sp_std::fmt::Debug for FnEncode { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - self.0().fmt(f) - } -} - -#[cfg(feature = "std")] -impl serde::Serialize for FnEncode { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { - self.0().serialize(serializer) - } -} - -/// All the metadata about an outer event. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct OuterEventMetadata { - pub name: DecodeDifferentStr, - pub events: DecodeDifferentArray< - (&'static str, FnEncode<&'static [EventMetadata]>), - (StringBuf, Vec) - >, -} - -/// All the metadata about an event. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct EventMetadata { - pub name: DecodeDifferentStr, - pub arguments: DecodeDifferentArray<&'static str, StringBuf>, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, -} - -/// All the metadata about one storage entry. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct StorageEntryMetadata { - pub name: DecodeDifferentStr, - pub modifier: StorageEntryModifier, - pub ty: StorageEntryType, - pub default: ByteGetter, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, -} - -/// All the metadata about one module constant. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct ModuleConstantMetadata { - pub name: DecodeDifferentStr, - pub ty: DecodeDifferentStr, - pub value: ByteGetter, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, -} - -/// All the metadata about a module error. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct ErrorMetadata { - pub name: DecodeDifferentStr, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, -} - -/// All the metadata about errors in a module. -pub trait ModuleErrorMetadata { - fn metadata() -> &'static [ErrorMetadata]; -} - -impl ModuleErrorMetadata for &'static str { - fn metadata() -> &'static [ErrorMetadata] { - &[] - } -} - -/// A technical trait to store lazy initiated vec value as static dyn pointer. -pub trait DefaultByte: Send + Sync { - fn default_byte(&self) -> Vec; -} - -/// Wrapper over dyn pointer for accessing a cached once byte value. -#[derive(Clone)] -pub struct DefaultByteGetter(pub &'static dyn DefaultByte); - -/// Decode different for static lazy initiated byte value. -pub type ByteGetter = DecodeDifferent>; - -impl Encode for DefaultByteGetter { - fn encode_to(&self, dest: &mut W) { - self.0.default_byte().encode_to(dest) - } -} - -impl codec::EncodeLike for DefaultByteGetter {} - -impl PartialEq for DefaultByteGetter { - fn eq(&self, other: &DefaultByteGetter) -> bool { - let left = self.0.default_byte(); - let right = other.0.default_byte(); - left.eq(&right) - } -} - -impl Eq for DefaultByteGetter { } - -#[cfg(feature = "std")] -impl serde::Serialize for DefaultByteGetter { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { - self.0.default_byte().serialize(serializer) - } -} - -impl sp_std::fmt::Debug for DefaultByteGetter { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - self.0.default_byte().fmt(f) - } -} - -/// Hasher used by storage maps -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub enum StorageHasher { - Blake2_128, - Blake2_256, - Blake2_128Concat, - Twox128, - Twox256, - Twox64Concat, - Identity, -} - -/// A storage entry type. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub enum StorageEntryType { - Plain(DecodeDifferentStr), - Map { - hasher: StorageHasher, - key: DecodeDifferentStr, - value: DecodeDifferentStr, - // is_linked flag previously, unused now to keep backwards compat - unused: bool, - }, - DoubleMap { - hasher: StorageHasher, - key1: DecodeDifferentStr, - key2: DecodeDifferentStr, - value: DecodeDifferentStr, - key2_hasher: StorageHasher, - }, -} - -/// A storage entry modifier. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub enum StorageEntryModifier { - Optional, - Default, -} - -/// All metadata of the storage. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct StorageMetadata { - /// The common prefix used by all storage entries. - pub prefix: DecodeDifferent<&'static str, StringBuf>, - pub entries: DecodeDifferent<&'static [StorageEntryMetadata], Vec>, -} - -/// Metadata prefixed by a u32 for reserved usage -#[derive(Eq, Encode, PartialEq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct RuntimeMetadataPrefixed(pub u32, pub RuntimeMetadata); - -/// Metadata of the extrinsic used by the runtime. -#[derive(Eq, Encode, PartialEq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct ExtrinsicMetadata { - /// Extrinsic version. - pub version: u8, - /// The signed extensions in the order they appear in the extrinsic. - pub signed_extensions: Vec, -} - -/// The metadata of a runtime. -/// The version ID encoded/decoded through -/// the enum nature of `RuntimeMetadata`. -#[derive(Eq, Encode, PartialEq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub enum RuntimeMetadata { - /// Unused; enum filler. - V0(RuntimeMetadataDeprecated), - /// Version 1 for runtime metadata. No longer used. - V1(RuntimeMetadataDeprecated), - /// Version 2 for runtime metadata. No longer used. - V2(RuntimeMetadataDeprecated), - /// Version 3 for runtime metadata. No longer used. - V3(RuntimeMetadataDeprecated), - /// Version 4 for runtime metadata. No longer used. - V4(RuntimeMetadataDeprecated), - /// Version 5 for runtime metadata. No longer used. - V5(RuntimeMetadataDeprecated), - /// Version 6 for runtime metadata. No longer used. - V6(RuntimeMetadataDeprecated), - /// Version 7 for runtime metadata. No longer used. - V7(RuntimeMetadataDeprecated), - /// Version 8 for runtime metadata. No longer used. - V8(RuntimeMetadataDeprecated), - /// Version 9 for runtime metadata. No longer used. - V9(RuntimeMetadataDeprecated), - /// Version 10 for runtime metadata. No longer used. - V10(RuntimeMetadataDeprecated), - /// Version 11 for runtime metadata. No longer used. - V11(RuntimeMetadataDeprecated), - /// Version 12 for runtime metadata. - V12(RuntimeMetadataV12), -} - -/// Enum that should fail. -#[derive(Eq, PartialEq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize))] -pub enum RuntimeMetadataDeprecated { } - -impl Encode for RuntimeMetadataDeprecated { - fn encode_to(&self, _dest: &mut W) {} -} - -impl codec::EncodeLike for RuntimeMetadataDeprecated {} - -#[cfg(feature = "std")] -impl Decode for RuntimeMetadataDeprecated { - fn decode(_input: &mut I) -> Result { - Err("Decoding is not supported".into()) - } -} - -/// The metadata of a runtime. -#[derive(Eq, Encode, PartialEq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct RuntimeMetadataV12 { - /// Metadata of all the modules. - pub modules: DecodeDifferentArray, - /// Metadata of the extrinsic. - pub extrinsic: ExtrinsicMetadata, -} - -/// The latest version of the metadata. -pub type RuntimeMetadataLastVersion = RuntimeMetadataV12; - -/// All metadata about an runtime module. -#[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode, Serialize))] -pub struct ModuleMetadata { - pub name: DecodeDifferentStr, - pub storage: Option, StorageMetadata>>, - pub calls: ODFnA, - pub event: ODFnA, - pub constants: DFnA, - pub errors: DFnA, - /// Define the index of the module, this index will be used for the encoding of module event, - /// call and origin variants. - pub index: u8, -} - -type ODFnA = Option>; -type DFnA = DecodeDifferent, Vec>; - -impl Into for RuntimeMetadataPrefixed { - fn into(self) -> sp_core::OpaqueMetadata { - sp_core::OpaqueMetadata::new(self.encode()) - } -} - -impl Into for RuntimeMetadataLastVersion { - fn into(self) -> RuntimeMetadataPrefixed { - RuntimeMetadataPrefixed(META_RESERVED, RuntimeMetadata::V12(self)) - } -} diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 2be66ebb722c1..177334d4ccf8d 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-multisig" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,26 +13,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-runtime/std", "frame-support/std", "frame-system/std", @@ -42,4 +41,6 @@ std = [ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/multisig/README.md b/frame/multisig/README.md index 2209e876f8441..4eab00d108204 100644 --- a/frame/multisig/README.md +++ b/frame/multisig/README.md @@ -1,7 +1,7 @@ # Multisig Module A module for doing multisig dispatch. -- [`multisig::Trait`](https://docs.rs/pallet-multisig/latest/pallet_multisig/trait.Trait.html) +- [`multisig::Config`](https://docs.rs/pallet-multisig/latest/pallet_multisig/trait.Config.html) - [`Call`](https://docs.rs/pallet-multisig/latest/pallet_multisig/enum.Call.html) ## Overview @@ -24,6 +24,6 @@ not available or desired. * `cancel_as_multi` - Cancel a call from a composite origin. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index bf89ec8b09bd4..2e23dff156e07 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,20 +20,18 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use core::convert::TryInto; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; use sp_runtime::traits::Bounded; -use core::convert::TryInto; -use crate::Module as Multisig; +use crate::Pallet as Multisig; const SEED: u32 = 0; -fn setup_multi(s: u32, z: u32) - -> Result<(Vec, Vec), &'static str> -{ +fn setup_multi(s: u32, z: u32) -> Result<(Vec, Vec), &'static str> { let mut signatories: Vec = Vec::new(); - for i in 0 .. s { + for i in 0..s { let signatory = account("signatory", i, SEED); // Give them some balance for a possible deposit let balance = BalanceOf::::max_value(); @@ -42,20 +40,21 @@ fn setup_multi(s: u32, z: u32) } signatories.sort(); // Must first convert to outer call type. - let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); + let call: ::Call = + frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); let call_data = call.encode(); return Ok((signatories, call_data)) } benchmarks! { - _ { } - as_multi_threshold_1 { // Transaction Length let z in 0 .. 10_000; let max_signatories = T::MaxSignatories::get().into(); let (mut signatories, _) = setup_multi::(max_signatories, z)?; - let call: ::Call = frame_system::Call::::remark(vec![0; z as usize]).into(); + let call: ::Call = frame_system::Call::::remark { + remark: vec![0; z as usize] + }.into(); let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, 1); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; @@ -300,25 +299,4 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_as_multi_threshold_1::()); - assert_ok!(test_benchmark_as_multi_create::()); - assert_ok!(test_benchmark_as_multi_create_store::()); - assert_ok!(test_benchmark_as_multi_approve::()); - assert_ok!(test_benchmark_as_multi_approve_store::()); - assert_ok!(test_benchmark_as_multi_complete::()); - assert_ok!(test_benchmark_approve_as_multi_create::()); - assert_ok!(test_benchmark_approve_as_multi_approve::()); - assert_ok!(test_benchmark_approve_as_multi_complete::()); - assert_ok!(test_benchmark_cancel_as_multi::()); - }); - } -} +impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/multisig/src/default_weights.rs b/frame/multisig/src/default_weights.rs deleted file mode 100644 index 19d1528d9aaa6..0000000000000 --- a/frame/multisig/src/default_weights.rs +++ /dev/null @@ -1,89 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn as_multi_threshold_1(z: u32, ) -> Weight { - (17_161_000 as Weight) - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - } - fn as_multi_create(s: u32, z: u32, ) -> Weight { - (79_857_000 as Weight) - .saturating_add((131_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (90_218_000 as Weight) - .saturating_add((129_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (48_402_000 as Weight) - .saturating_add((132_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (88_390_000 as Weight) - .saturating_add((120_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (98_960_000 as Weight) - .saturating_add((276_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((6_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn approve_as_multi_create(s: u32, ) -> Weight { - (80_185_000 as Weight) - .saturating_add((121_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn approve_as_multi_approve(s: u32, ) -> Weight { - (48_386_000 as Weight) - .saturating_add((143_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn approve_as_multi_complete(s: u32, ) -> Weight { - (177_181_000 as Weight) - .saturating_add((273_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn cancel_as_multi(s: u32, ) -> Weight { - (126_334_000 as Weight) - .saturating_add((124_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } -} diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index b0119984038a6..43040ada45a98 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,15 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Multisig Module -//! A module for doing multisig dispatch. +//! # Multisig pallet +//! A pallet for doing multisig dispatch. //! -//! - [`multisig::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! This module contains functionality for multi-signature dispatch, a (potentially) stateful +//! This pallet contains functionality for multi-signature dispatch, a (potentially) stateful //! operation, allowing multiple signed //! origins (accounts) to coordinate and dispatch a call from a well-known origin, derivable //! deterministically from the set of account IDs and the threshold number of accounts from the @@ -41,79 +41,46 @@ //! * `cancel_as_multi` - Cancel a call from a composite origin. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use codec::{Encode, Decode}; +mod benchmarking; +mod tests; +pub mod weights; + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{ + DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, PostDispatchInfo, + }, + ensure, + traits::{Currency, Get, ReservableCurrency}, + weights::{GetDispatchInfo, Weight}, + RuntimeDebug, +}; +use frame_system::{self as system, RawOrigin}; +use scale_info::TypeInfo; use sp_io::hashing::blake2_256; -use frame_support::{decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, RuntimeDebug}; -use frame_support::{traits::{Get, ReservableCurrency, Currency}, - weights::{Weight, GetDispatchInfo}, - dispatch::{DispatchResultWithPostInfo, DispatchErrorWithPostInfo, PostDispatchInfo}, +use sp_runtime::{ + traits::{Dispatchable, Zero}, + DispatchError, }; -use frame_system::{self as system, ensure_signed, RawOrigin}; -use sp_runtime::{DispatchError, DispatchResult, traits::{Dispatchable, Zero}}; +use sp_std::prelude::*; +pub use weights::WeightInfo; -mod tests; -mod benchmarking; -mod default_weights; +pub use pallet::*; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; /// Just a bunch of bytes, but they should decode to a valid `Call`. pub type OpaqueCall = Vec; -pub trait WeightInfo { - fn as_multi_threshold_1(z: u32, ) -> Weight; - fn as_multi_create(s: u32, z: u32, ) -> Weight; - fn as_multi_create_store(s: u32, z: u32, ) -> Weight; - fn as_multi_approve(s: u32, z: u32, ) -> Weight; - fn as_multi_approve_store(s: u32, z: u32, ) -> Weight; - fn as_multi_complete(s: u32, z: u32, ) -> Weight; - fn approve_as_multi_create(s: u32, ) -> Weight; - fn approve_as_multi_approve(s: u32, ) -> Weight; - fn approve_as_multi_complete(s: u32, ) -> Weight; - fn cancel_as_multi(s: u32, ) -> Weight; -} - -/// Configuration trait. -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The overarching call type. - type Call: Parameter + Dispatchable - + GetDispatchInfo + From>; - - /// The currency mechanism. - type Currency: ReservableCurrency; - - /// The base amount of currency needed to reserve for creating a multisig execution or to store - /// a dispatch call for later. - /// - /// This is held for an additional storage item whose value size is - /// `4 + sizeof((BlockNumber, Balance, AccountId))` bytes and whose key size is - /// `32 + sizeof(AccountId)` bytes. - type DepositBase: Get>; - - /// The amount of currency needed per unit threshold when creating a multisig execution. - /// - /// This is held for adding 32 bytes more into a pre-existing storage value. - type DepositFactor: Get>; - - /// The maximum amount of signatories allowed in the multisig. - type MaxSignatories: Get; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} - /// A global extrinsic index, formed as the extrinsic index within a block, together with that /// block's height. This allows a transaction in which a multisig operation of a particular /// composite was created to be uniquely identified. -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug)] +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] pub struct Timepoint { /// The height of the chain at the point in time. height: BlockNumber, @@ -122,7 +89,7 @@ pub struct Timepoint { } /// An open multisig operation. -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug)] +#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] pub struct Multisig { /// The extrinsic when the multisig operation was opened. when: Timepoint, @@ -134,19 +101,77 @@ pub struct Multisig { approvals: Vec, } -decl_storage! { - trait Store for Module as Multisig { - /// The set of open multisig operations. - pub Multisigs: double_map - hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) [u8; 32] - => Option, T::AccountId>>; +type CallHash = [u8; 32]; - pub Calls: map hasher(identity) [u8; 32] => Option<(OpaqueCall, T::AccountId, BalanceOf)>; - } +enum CallOrHash { + Call(OpaqueCall, bool), + Hash([u8; 32]), } -decl_error! { - pub enum Error for Module { +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// The overarching call type. + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From>; + + /// The currency mechanism. + type Currency: ReservableCurrency; + + /// The base amount of currency needed to reserve for creating a multisig execution or to + /// store a dispatch call for later. + /// + /// This is held for an additional storage item whose value size is + /// `4 + sizeof((BlockNumber, Balance, AccountId))` bytes and whose key size is + /// `32 + sizeof(AccountId)` bytes. + #[pallet::constant] + type DepositBase: Get>; + + /// The amount of currency needed per unit threshold when creating a multisig execution. + /// + /// This is held for adding 32 bytes more into a pre-existing storage value. + #[pallet::constant] + type DepositFactor: Get>; + + /// The maximum amount of signatories allowed in the multisig. + #[pallet::constant] + type MaxSignatories: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// The set of open multisig operations. + #[pallet::storage] + pub type Multisigs = StorageDoubleMap< + _, + Twox64Concat, + T::AccountId, + Blake2_128Concat, + [u8; 32], + Multisig, T::AccountId>, + >; + + #[pallet::storage] + pub type Calls = + StorageMap<_, Identity, [u8; 32], (OpaqueCall, T::AccountId, BalanceOf)>; + + #[pallet::error] + pub enum Error { /// Threshold must be 2 or greater. MinimumThreshold, /// Call is already approved by this signatory. @@ -172,53 +197,36 @@ decl_error! { /// A timepoint was given, yet no multisig operation is underway. UnexpectedTimepoint, /// The maximum weight information provided was too low. - WeightTooLow, + MaxWeightTooLow, /// The data to be stored is already stored. AlreadyStored, } -} -decl_event! { - /// Events type. - pub enum Event where - AccountId = ::AccountId, - BlockNumber = ::BlockNumber, - CallHash = [u8; 32] - { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// A new multisig operation has begun. \[approving, multisig, call_hash\] - NewMultisig(AccountId, AccountId, CallHash), + NewMultisig(T::AccountId, T::AccountId, CallHash), /// A multisig operation has been approved by someone. /// \[approving, timepoint, multisig, call_hash\] - MultisigApproval(AccountId, Timepoint, AccountId, CallHash), + MultisigApproval(T::AccountId, Timepoint, T::AccountId, CallHash), /// A multisig operation has been executed. \[approving, timepoint, multisig, call_hash\] - MultisigExecuted(AccountId, Timepoint, AccountId, CallHash, DispatchResult), + MultisigExecuted( + T::AccountId, + Timepoint, + T::AccountId, + CallHash, + DispatchResult, + ), /// A multisig operation has been cancelled. \[cancelling, timepoint, multisig, call_hash\] - MultisigCancelled(AccountId, Timepoint, AccountId, CallHash), + MultisigCancelled(T::AccountId, Timepoint, T::AccountId, CallHash), } -} - -enum CallOrHash { - Call(OpaqueCall, bool), - Hash([u8; 32]), -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; - - /// The base amount of currency needed to reserve for creating a multisig execution or to store - /// a dispatch call for later. - const DepositBase: BalanceOf = T::DepositBase::get(); - - /// The amount of currency needed per unit threshold when creating a multisig execution. - const DepositFactor: BalanceOf = T::DepositFactor::get(); - /// The maximum amount of signatories allowed for a given multisig. - const MaxSignatories: u16 = T::MaxSignatories::get(); + #[pallet::hooks] + impl Hooks> for Pallet {} + #[pallet::call] + impl Pallet { /// Immediately dispatch a multi-signature call using a single approval from the caller. /// /// The dispatch origin for this call must be _Signed_. @@ -235,16 +243,20 @@ decl_module! { /// - DB Weight: None /// - Plus Call Weight /// # - #[weight = ( - T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) - .saturating_add(call.get_dispatch_info().weight) - // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), - call.get_dispatch_info().class, - )] - fn as_multi_threshold_1(origin, + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) + .saturating_add(dispatch_info.weight) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + dispatch_info.class, + ) + })] + pub fn as_multi_threshold_1( + origin: OriginFor, other_signatories: Vec, - call: Box<::Call>, + call: Box<::Call>, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let max_sigs = T::MaxSignatories::get() as usize; @@ -258,21 +270,26 @@ decl_module! { let call_len = call.using_encoded(|c| c.len()); let result = call.dispatch(RawOrigin::Signed(id).into()); - result.map(|post_dispatch_info| post_dispatch_info.actual_weight - .map(|actual_weight| - T::WeightInfo::as_multi_threshold_1(call_len as u32) - .saturating_add(actual_weight) - ).into() - ).map_err(|err| match err.post_info.actual_weight { - Some(actual_weight) => { - let weight_used = T::WeightInfo::as_multi_threshold_1(call_len as u32) - .saturating_add(actual_weight); - let post_info = Some(weight_used).into(); - let error = err.error.into(); - DispatchErrorWithPostInfo { post_info, error } - }, - None => err, - }) + result + .map(|post_dispatch_info| { + post_dispatch_info + .actual_weight + .map(|actual_weight| { + T::WeightInfo::as_multi_threshold_1(call_len as u32) + .saturating_add(actual_weight) + }) + .into() + }) + .map_err(|err| match err.post_info.actual_weight { + Some(actual_weight) => { + let weight_used = T::WeightInfo::as_multi_threshold_1(call_len as u32) + .saturating_add(actual_weight); + let post_info = Some(weight_used).into(); + let error = err.error.into(); + DispatchErrorWithPostInfo { post_info, error } + }, + None => err, + }) } /// Register approval for a dispatch to be made from a deterministic composite account if @@ -312,16 +329,15 @@ decl_module! { /// - I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove. /// - One event. /// - The weight of the `call`. - /// - Storage: inserts one item, value size bounded by `MaxSignatories`, with a - /// deposit taken for its lifetime of - /// `DepositBase + threshold * DepositFactor`. + /// - Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit + /// taken for its lifetime of `DepositBase + threshold * DepositFactor`. /// ------------------------------- /// - DB Weight: /// - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`) /// - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`) /// - Plus Call Weight /// # - #[weight = { + #[pallet::weight({ let s = other_signatories.len() as u32; let z = call.len() as u32; @@ -330,8 +346,9 @@ decl_module! { .max(T::WeightInfo::as_multi_approve(s, z)) .max(T::WeightInfo::as_multi_complete(s, z)) .saturating_add(*max_weight) - }] - fn as_multi(origin, + })] + pub fn as_multi( + origin: OriginFor, threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, @@ -340,7 +357,14 @@ decl_module! { max_weight: Weight, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - Self::operate(who, threshold, other_signatories, maybe_timepoint, CallOrHash::Call(call, store_call), max_weight) + Self::operate( + who, + threshold, + other_signatories, + maybe_timepoint, + CallOrHash::Call(call, store_call), + max_weight, + ) } /// Register approval for a dispatch to be made from a deterministic composite account if @@ -371,23 +395,23 @@ decl_module! { /// - Up to one binary search and insert (`O(logS + S)`). /// - I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove. /// - One event. - /// - Storage: inserts one item, value size bounded by `MaxSignatories`, with a - /// deposit taken for its lifetime of - /// `DepositBase + threshold * DepositFactor`. + /// - Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit + /// taken for its lifetime of `DepositBase + threshold * DepositFactor`. /// ---------------------------------- /// - DB Weight: /// - Read: Multisig Storage, [Caller Account] /// - Write: Multisig Storage, [Caller Account] /// # - #[weight = { + #[pallet::weight({ let s = other_signatories.len() as u32; T::WeightInfo::approve_as_multi_create(s) .max(T::WeightInfo::approve_as_multi_approve(s)) .max(T::WeightInfo::approve_as_multi_complete(s)) .saturating_add(*max_weight) - }] - fn approve_as_multi(origin, + })] + pub fn approve_as_multi( + origin: OriginFor, threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, @@ -395,7 +419,14 @@ decl_module! { max_weight: Weight, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - Self::operate(who, threshold, other_signatories, maybe_timepoint, CallOrHash::Hash(call_hash), max_weight) + Self::operate( + who, + threshold, + other_signatories, + maybe_timepoint, + CallOrHash::Hash(call_hash), + max_weight, + ) } /// Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously @@ -424,8 +455,9 @@ decl_module! { /// - Read: Multisig Storage, [Caller Account], Refund Account, Calls /// - Write: Multisig Storage, [Caller Account], Refund Account, Calls /// # - #[weight = T::WeightInfo::cancel_as_multi(other_signatories.len() as u32)] - fn cancel_as_multi(origin, + #[pallet::weight(T::WeightInfo::cancel_as_multi(other_signatories.len() as u32))] + pub fn cancel_as_multi( + origin: OriginFor, threshold: u16, other_signatories: Vec, timepoint: Timepoint, @@ -440,22 +472,22 @@ decl_module! { let id = Self::multi_account_id(&signatories, threshold); - let m = >::get(&id, call_hash) - .ok_or(Error::::NotFound)?; + let m = >::get(&id, call_hash).ok_or(Error::::NotFound)?; ensure!(m.when == timepoint, Error::::WrongTimepoint); ensure!(m.depositor == who, Error::::NotOwner); - let _ = T::Currency::unreserve(&m.depositor, m.deposit); + let err_amount = T::Currency::unreserve(&m.depositor, m.deposit); + debug_assert!(err_amount.is_zero()); >::remove(&id, &call_hash); Self::clear_call(&call_hash); - Self::deposit_event(RawEvent::MultisigCancelled(who, timepoint, id, call_hash)); + Self::deposit_event(Event::MultisigCancelled(who, timepoint, id, call_hash)); Ok(()) } } } -impl Module { +impl Pallet { /// Derive a multi-account ID from the sorted list of accounts and the threshold that are /// required. /// @@ -488,7 +520,7 @@ impl Module { let call_hash = blake2_256(&call); let call_len = call.len(); (call_hash, call_len, Some(call), should_store) - } + }, CallOrHash::Hash(h) => (h, 0, None, false), }; @@ -503,16 +535,20 @@ impl Module { // We only bother with the approval if we're below threshold. let maybe_pos = m.approvals.binary_search(&who).err().filter(|_| approvals < threshold); // Bump approvals if not yet voted and the vote is needed. - if maybe_pos.is_some() { approvals += 1; } + if maybe_pos.is_some() { + approvals += 1; + } // We only bother fetching/decoding call if we know that we're ready to execute. let maybe_approved_call = if approvals >= threshold { Self::get_call(&call_hash, maybe_call.as_ref().map(|c| c.as_ref())) - } else { None }; + } else { + None + }; if let Some((call, call_len)) = maybe_approved_call { // verify weight - ensure!(call.get_dispatch_info().weight <= max_weight, Error::::WeightTooLow); + ensure!(call.get_dispatch_info().weight <= max_weight, Error::::MaxWeightTooLow); // Clean up storage before executing call to avoid an possibility of reentrancy // attack. @@ -521,22 +557,34 @@ impl Module { T::Currency::unreserve(&m.depositor, m.deposit); let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); - Self::deposit_event(RawEvent::MultisigExecuted( - who, timepoint, id, call_hash, result.map(|_| ()).map_err(|e| e.error) + Self::deposit_event(Event::MultisigExecuted( + who, + timepoint, + id, + call_hash, + result.map(|_| ()).map_err(|e| e.error), )); - Ok(get_result_weight(result).map(|actual_weight| - T::WeightInfo::as_multi_complete( - other_signatories_len as u32, - call_len as u32 - ).saturating_add(actual_weight) - ).into()) + Ok(get_result_weight(result) + .map(|actual_weight| { + T::WeightInfo::as_multi_complete( + other_signatories_len as u32, + call_len as u32, + ) + .saturating_add(actual_weight) + }) + .into()) } else { // We cannot dispatch the call now; either it isn't available, or it is, but we // don't have threshold approvals even with our signature. // Store the call if desired. let stored = if let Some(data) = maybe_call.filter(|_| store) { - Self::store_call_and_reserve(who.clone(), &call_hash, data, BalanceOf::::zero())?; + Self::store_call_and_reserve( + who.clone(), + &call_hash, + data, + BalanceOf::::zero(), + )?; true } else { false @@ -546,7 +594,7 @@ impl Module { // Record approval. m.approvals.insert(pos, who.clone()); >::insert(&id, call_hash, m); - Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); + Self::deposit_event(Event::MultisigApproval(who, timepoint, id, call_hash)); } else { // If we already approved and didn't store the Call, then this was useless and // we report an error. @@ -559,10 +607,7 @@ impl Module { call_len as u32, ) } else { - T::WeightInfo::as_multi_approve( - other_signatories_len as u32, - call_len as u32, - ) + T::WeightInfo::as_multi_approve(other_signatories_len as u32, call_len as u32) }; // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) @@ -583,24 +628,22 @@ impl Module { false }; - >::insert(&id, call_hash, Multisig { - when: Self::timepoint(), - deposit, - depositor: who.clone(), - approvals: vec![who.clone()], - }); - Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); + >::insert( + &id, + call_hash, + Multisig { + when: Self::timepoint(), + deposit, + depositor: who.clone(), + approvals: vec![who.clone()], + }, + ); + Self::deposit_event(Event::NewMultisig(who, id, call_hash)); let final_weight = if stored { - T::WeightInfo::as_multi_create_store( - other_signatories_len as u32, - call_len as u32, - ) + T::WeightInfo::as_multi_create_store(other_signatories_len as u32, call_len as u32) } else { - T::WeightInfo::as_multi_create( - other_signatories_len as u32, - call_len as u32, - ) + T::WeightInfo::as_multi_create(other_signatories_len as u32, call_len as u32) }; // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) @@ -619,22 +662,27 @@ impl Module { other_deposit: BalanceOf, ) -> DispatchResult { ensure!(!Calls::::contains_key(hash), Error::::AlreadyStored); - let deposit = other_deposit + T::DepositBase::get() - + T::DepositFactor::get() * BalanceOf::::from(((data.len() + 31) / 32) as u32); + let deposit = other_deposit + + T::DepositBase::get() + + T::DepositFactor::get() * BalanceOf::::from(((data.len() + 31) / 32) as u32); T::Currency::reserve(&who, deposit)?; Calls::::insert(&hash, (data, who, deposit)); Ok(()) } /// Attempt to decode and return the call, provided by the user or from storage. - fn get_call(hash: &[u8; 32], maybe_known: Option<&[u8]>) -> Option<(::Call, usize)> { - maybe_known.map_or_else(|| { - Calls::::get(hash).and_then(|(data, ..)| { - Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) - }) - }, |data| { - Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) - }) + fn get_call( + hash: &[u8; 32], + maybe_known: Option<&[u8]>, + ) -> Option<(::Call, usize)> { + maybe_known.map_or_else( + || { + Calls::::get(hash).and_then(|(data, ..)| { + Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) + }) + }, + |data| Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())), + ) } /// Attempt to remove a call from storage, returning any deposit on it to the owner. @@ -647,15 +695,16 @@ impl Module { /// The current `Timepoint`. pub fn timepoint() -> Timepoint { Timepoint { - height: >::block_number(), - index: >::extrinsic_index().unwrap_or_default(), + height: >::block_number(), + index: >::extrinsic_index().unwrap_or_default(), } } /// Check that signatories is sorted and doesn't contain sender, then insert sender. - fn ensure_sorted_and_insert(other_signatories: Vec, who: T::AccountId) - -> Result, DispatchError> - { + fn ensure_sorted_and_insert( + other_signatories: Vec, + who: T::AccountId, + ) -> Result, DispatchError> { let mut signatories = other_signatories; let mut maybe_last = None; let mut index = 0; diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index ca15e04597eaa..3d311cf5d3dc8 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,46 +21,39 @@ use super::*; -use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, impl_outer_event, traits::Filter, -}; +use crate as pallet_multisig; +use frame_support::{assert_noop, assert_ok, parameter_types, traits::Contains}; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use crate as multisig; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; -impl_outer_event! { - pub enum TestEvent for Test { - system, - pallet_balances, - multisig, - } -} -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - frame_system::System, - pallet_balances::Balances, - multisig::Multisig, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, } -} +); -// For testing the pallet, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of pallets we want to use. -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -70,29 +63,26 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = TestEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -104,18 +94,18 @@ parameter_types! { pub const MaxSignatories: u16 = 3; } pub struct TestBaseCallFilter; -impl Filter for TestBaseCallFilter { - fn filter(c: &Call) -> bool { +impl Contains for TestBaseCallFilter { + fn contains(c: &Call) -> bool { match *c { Call::Balances(_) => true, // Needed for benchmarking - Call::System(frame_system::Call::remark(_)) => true, + Call::System(frame_system::Call::remark { .. }) => true, _ => false, } } } -impl Trait for Test { - type Event = TestEvent; +impl Config for Test { + type Event = Event; type Call = Call; type Currency = Balances; type DepositBase = DepositBase; @@ -123,35 +113,29 @@ impl Trait for Test { type MaxSignatories = MaxSignatories; type WeightInfo = (); } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Multisig = Module; -use pallet_balances::Call as BalancesCall; -use pallet_balances::Error as BalancesError; +use pallet_balances::{Call as BalancesCall, Error as BalancesError}; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } -fn last_event() -> TestEvent { - system::Module::::events().pop().map(|e| e.event).expect("Event expected") -} - -fn expect_event>(e: E) { - assert_eq!(last_event(), e.into()); -} - fn now() -> Timepoint { Multisig::timepoint() } +fn call_transfer(dest: u64, value: u64) -> Call { + Call::Balances(BalancesCall::transfer { dest, value }) +} + #[test] fn multisig_deposit_is_taken_and_returned() { new_test_ext().execute_with(|| { @@ -160,14 +144,30 @@ fn multisig_deposit_is_taken_and_returned() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); assert_eq!(Balances::free_balance(1), 2); assert_eq!(Balances::reserved_balance(1), 3); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -181,7 +181,7 @@ fn multisig_deposit_is_taken_and_returned_with_call_storage() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); @@ -189,7 +189,14 @@ fn multisig_deposit_is_taken_and_returned_with_call_storage() { assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::reserved_balance(1), 5); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash, call_weight)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash, + call_weight + )); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -203,22 +210,44 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(1), 1); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Multisig::as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), data, true, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + data, + true, + 0 + )); assert_eq!(Balances::free_balance(2), 3); assert_eq!(Balances::reserved_balance(2), 2); assert_eq!(Balances::free_balance(1), 1); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Multisig::approve_as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), hash, call_weight)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + hash, + call_weight + )); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::free_balance(2), 5); @@ -229,15 +258,33 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { #[test] fn cancel_multisig_returns_deposit() { new_test_ext().execute_with(|| { - let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(1), 6); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!( - Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); + assert_ok!(Multisig::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -251,11 +298,18 @@ fn timepoint_checking_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone(), 0), + Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + ), Error::::UnexpectedTimepoint, ); @@ -265,9 +319,17 @@ fn timepoint_checking_works() { Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], None, call.clone(), false, 0), Error::::NoTimepoint, ); - let later = Timepoint { index: 1, .. now() }; + let later = Timepoint { index: 1, ..now() }; assert_noop!( - Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(later), call.clone(), false, 0), + Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(later), + call.clone(), + false, + 0 + ), Error::::WrongTimepoint, ); }); @@ -281,14 +343,21 @@ fn multisig_2_of_3_works_with_call_storing() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data, true, 0)); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash, call_weight)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } @@ -301,14 +370,22 @@ fn multisig_2_of_3_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } @@ -321,15 +398,37 @@ fn multisig_3_of_3_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } @@ -337,35 +436,64 @@ fn multisig_3_of_3_works() { #[test] fn cancel_multisig_works() { new_test_ext().execute_with(|| { - let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_noop!( Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), Error::::NotOwner, ); - assert_ok!( - Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); + assert_ok!(Multisig::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); }); } #[test] fn cancel_multisig_with_call_storage_works() { new_test_ext().execute_with(|| { - let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_ok!(Multisig::as_multi(Origin::signed(1), 3, vec![2, 3], None, call, true, 0)); assert_eq!(Balances::free_balance(1), 4); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_noop!( Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), Error::::NotOwner, ); - assert_ok!( - Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); + assert_ok!(Multisig::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); assert_eq!(Balances::free_balance(1), 10); }); } @@ -373,11 +501,26 @@ fn cancel_multisig_with_call_storage_works() { #[test] fn cancel_multisig_with_alt_call_storage_works() { new_test_ext().execute_with(|| { - let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(1), 6); - assert_ok!(Multisig::as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), call, true, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + call, + true, + 0 + )); assert_eq!(Balances::free_balance(2), 8); assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); assert_eq!(Balances::free_balance(1), 10); @@ -393,13 +536,29 @@ fn multisig_2_of_3_as_multi_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } @@ -412,17 +571,49 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call1 = Call::Balances(BalancesCall::transfer(6, 10)); + let call1 = call_transfer(6, 10); let call1_weight = call1.get_dispatch_info().weight; let data1 = call1.encode(); - let call2 = Call::Balances(BalancesCall::transfer(7, 5)); + let call2 = call_transfer(7, 5); let call2_weight = call2.get_dispatch_info().weight; let data2 = call2.encode(); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data1.clone(), false, 0)); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], None, data2.clone(), false, 0)); - assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), data1, false, call1_weight)); - assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), data2, false, call2_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data1.clone(), + false, + 0 + )); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + None, + data2.clone(), + false, + 0 + )); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + data1, + false, + call1_weight + )); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + data2, + false, + call2_weight + )); assert_eq!(Balances::free_balance(6), 10); assert_eq!(Balances::free_balance(7), 5); @@ -437,26 +628,60 @@ fn multisig_2_of_3_cannot_reissue_same_call() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 10)); + let call = call_transfer(6, 10); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data.clone(), false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + data.clone(), + false, + call_weight + )); assert_eq!(Balances::free_balance(multi), 5); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); - assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), data.clone(), false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + data.clone(), + false, + call_weight + )); let err = DispatchError::from(BalancesError::::InsufficientBalance).stripped(); - expect_event(RawEvent::MultisigExecuted(3, now(), multi, hash, Err(err))); + System::assert_last_event( + pallet_multisig::Event::MultisigExecuted(3, now(), multi, hash, Err(err)).into(), + ); }); } #[test] fn minimum_threshold_check_works() { new_test_ext().execute_with(|| { - let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let call = call_transfer(6, 15).encode(); assert_noop!( Multisig::as_multi(Origin::signed(1), 0, vec![2], None, call.clone(), false, 0), Error::::MinimumThreshold, @@ -471,7 +696,7 @@ fn minimum_threshold_check_works() { #[test] fn too_many_signatories_fails() { new_test_ext().execute_with(|| { - let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let call = call_transfer(6, 15).encode(); assert_noop!( Multisig::as_multi(Origin::signed(1), 2, vec![2, 3, 4], None, call.clone(), false, 0), Error::::TooManySignatories, @@ -482,16 +707,44 @@ fn too_many_signatories_fails() { #[test] fn duplicate_approvals_are_ignored() { new_test_ext().execute_with(|| { - let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash.clone(), + 0 + )); assert_noop!( - Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash.clone(), 0), + Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + Some(now()), + hash.clone(), + 0 + ), Error::::AlreadyApproved, ); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_noop!( - Multisig::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash.clone(), 0), + Multisig::approve_as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + hash.clone(), + 0 + ), Error::::AlreadyApproved, ); }); @@ -505,7 +758,7 @@ fn multisig_1_of_3_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); + let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_noop!( Multisig::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone(), 0), @@ -515,7 +768,7 @@ fn multisig_1_of_3_works() { Multisig::as_multi(Origin::signed(1), 1, vec![2, 3], None, call.clone(), false, 0), Error::::MinimumThreshold, ); - let boxed_call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let boxed_call = Box::new(call_transfer(6, 15)); assert_ok!(Multisig::as_multi_threshold_1(Origin::signed(1), vec![2, 3], boxed_call)); assert_eq!(Balances::free_balance(6), 15); @@ -525,7 +778,7 @@ fn multisig_1_of_3_works() { #[test] fn multisig_filters() { new_test_ext().execute_with(|| { - let call = Box::new(Call::System(frame_system::Call::set_code(vec![]))); + let call = Box::new(Call::System(frame_system::Call::set_code { code: vec![] })); assert_noop!( Multisig::as_multi_threshold_1(Origin::signed(1), vec![2], call.clone()), DispatchError::BadOrigin, @@ -541,38 +794,76 @@ fn weight_check_works() { assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call = call_transfer(6, 15); let data = call.encode(); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); assert_eq!(Balances::free_balance(6), 0); assert_noop!( Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, 0), - Error::::WeightTooLow, + Error::::MaxWeightTooLow, ); }); } #[test] fn multisig_handles_no_preimage_after_all_approve() { - // This test checks the situation where everyone approves a multi-sig, but no-one provides the call data. - // In the end, any of the multisig callers can approve again with the call data and the call will go through. + // This test checks the situation where everyone approves a multi-sig, but no-one provides the + // call data. In the end, any of the multisig callers can approve again with the call data and + // the call will go through. new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - let call = Call::Balances(BalancesCall::transfer(6, 15)); + let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs new file mode 100644 index 0000000000000..1bc72d251808f --- /dev/null +++ b/frame/multisig/src/weights.rs @@ -0,0 +1,263 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_multisig +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_multisig +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/multisig/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_multisig. +pub trait WeightInfo { + fn as_multi_threshold_1(z: u32, ) -> Weight; + fn as_multi_create(s: u32, z: u32, ) -> Weight; + fn as_multi_create_store(s: u32, z: u32, ) -> Weight; + fn as_multi_approve(s: u32, z: u32, ) -> Weight; + fn as_multi_approve_store(s: u32, z: u32, ) -> Weight; + fn as_multi_complete(s: u32, z: u32, ) -> Weight; + fn approve_as_multi_create(s: u32, ) -> Weight; + fn approve_as_multi_approve(s: u32, ) -> Weight; + fn approve_as_multi_complete(s: u32, ) -> Weight; + fn cancel_as_multi(s: u32, ) -> Weight; +} + +/// Weights for pallet_multisig using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn as_multi_threshold_1(z: u32, ) -> Weight { + (19_405_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + fn as_multi_create(s: u32, z: u32, ) -> Weight { + (54_364_000 as Weight) + // Standard Error: 0 + .saturating_add((163_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + fn as_multi_create_store(s: u32, z: u32, ) -> Weight { + (59_545_000 as Weight) + // Standard Error: 0 + .saturating_add((168_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + (32_721_000 as Weight) + // Standard Error: 0 + .saturating_add((176_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { + (56_596_000 as Weight) + // Standard Error: 1_000 + .saturating_add((183_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + (72_391_000 as Weight) + // Standard Error: 1_000 + .saturating_add((268_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + fn approve_as_multi_create(s: u32, ) -> Weight { + (52_543_000 as Weight) + // Standard Error: 0 + .saturating_add((164_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:0) + fn approve_as_multi_approve(s: u32, ) -> Weight { + (30_764_000 as Weight) + // Standard Error: 0 + .saturating_add((180_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn approve_as_multi_complete(s: u32, ) -> Weight { + (113_631_000 as Weight) + // Standard Error: 3_000 + .saturating_add((283_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + fn cancel_as_multi(s: u32, ) -> Weight { + (86_310_000 as Weight) + // Standard Error: 0 + .saturating_add((166_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn as_multi_threshold_1(z: u32, ) -> Weight { + (19_405_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + fn as_multi_create(s: u32, z: u32, ) -> Weight { + (54_364_000 as Weight) + // Standard Error: 0 + .saturating_add((163_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + fn as_multi_create_store(s: u32, z: u32, ) -> Weight { + (59_545_000 as Weight) + // Standard Error: 0 + .saturating_add((168_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + (32_721_000 as Weight) + // Standard Error: 0 + .saturating_add((176_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { + (56_596_000 as Weight) + // Standard Error: 1_000 + .saturating_add((183_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + (72_391_000 as Weight) + // Standard Error: 1_000 + .saturating_add((268_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + fn approve_as_multi_create(s: u32, ) -> Weight { + (52_543_000 as Weight) + // Standard Error: 0 + .saturating_add((164_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:0) + fn approve_as_multi_approve(s: u32, ) -> Weight { + (30_764_000 as Weight) + // Standard Error: 0 + .saturating_add((180_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn approve_as_multi_complete(s: u32, ) -> Weight { + (113_631_000 as Weight) + // Standard Error: 3_000 + .saturating_add((283_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Multisig Multisigs (r:1 w:1) + // Storage: Multisig Calls (r:1 w:1) + fn cancel_as_multi(s: u32, ) -> Weight { + (86_310_000 as Weight) + // Standard Error: 0 + .saturating_add((166_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 8f348d665b7e3..431ee2c84157c 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-nicks" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,26 +13,27 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-std/std", "sp-io/std", "sp-runtime/std", "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/nicks/README.md b/frame/nicks/README.md index b4c88eff43152..a2a897b044f10 100644 --- a/frame/nicks/README.md +++ b/frame/nicks/README.md @@ -1,6 +1,6 @@ # Nicks Module -- [`nicks::Trait`](https://docs.rs/pallet-nicks/latest/pallet_nicks/trait.Trait.html) +- [`nicks::Config`](https://docs.rs/pallet-nicks/latest/pallet_nicks/trait.Config.html) - [`Call`](https://docs.rs/pallet-nicks/latest/pallet_nicks/enum.Call.html) ## Overview @@ -20,6 +20,6 @@ have not been designed to be economically secure. Do not use this pallet as-is i * `kill_name` - Forcibly remove the associated name; the deposit is lost. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index ddeadfb7680fe..16c7e2042dda0 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,16 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Nicks Module +//! # Nicks Pallet //! -//! - [`nicks::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! Nicks is an example module for keeping track of account names on-chain. It makes no effort to +//! Nicks is an example pallet for keeping track of account names on-chain. It makes no effort to //! create a name hierarchy, be a DNS replacement or provide reverse lookups. Furthermore, the -//! weights attached to this module's dispatchable functions are for demonstration purposes only and +//! weights attached to this pallet's dispatchable functions are for demonstration purposes only and //! have not been designed to be economically secure. Do not use this pallet as-is in production. //! //! ## Interface @@ -37,71 +37,76 @@ //! * `kill_name` - Forcibly remove the associated name; the deposit is lost. //! //! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] +use frame_support::traits::{Currency, OnUnbalanced, ReservableCurrency}; +pub use pallet::*; +use sp_runtime::traits::{StaticLookup, Zero}; use sp_std::prelude::*; -use sp_runtime::{ - traits::{StaticLookup, Zero} -}; -use frame_support::{ - decl_module, decl_event, decl_storage, ensure, decl_error, - traits::{Currency, EnsureOrigin, ReservableCurrency, OnUnbalanced, Get}, -}; -use frame_system::ensure_signed; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + ensure, + pallet_prelude::*, + traits::{EnsureOrigin, Get}, + }; + use frame_system::{ensure_signed, pallet_prelude::*}; - /// The currency trait. - type Currency: ReservableCurrency; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// Reservation fee. - type ReservationFee: Get>; + /// The currency trait. + type Currency: ReservableCurrency; - /// What to do with slashed funds. - type Slashed: OnUnbalanced>; + /// Reservation fee. + #[pallet::constant] + type ReservationFee: Get>; - /// The origin which may forcibly set or remove a name. Root can always do this. - type ForceOrigin: EnsureOrigin; + /// What to do with slashed funds. + type Slashed: OnUnbalanced>; - /// The minimum length a name may be. - type MinLength: Get; + /// The origin which may forcibly set or remove a name. Root can always do this. + type ForceOrigin: EnsureOrigin; - /// The maximum length a name may be. - type MaxLength: Get; -} + /// The minimum length a name may be. + #[pallet::constant] + type MinLength: Get; -decl_storage! { - trait Store for Module as Nicks { - /// The lookup table for names. - NameOf: map hasher(twox_64_concat) T::AccountId => Option<(Vec, BalanceOf)>; + /// The maximum length a name may be. + #[pallet::constant] + type MaxLength: Get; } -} -decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// A name was set. \[who\] - NameSet(AccountId), + NameSet(T::AccountId), /// A name was forcibly set. \[target\] - NameForced(AccountId), + NameForced(T::AccountId), /// A name was changed. \[who\] - NameChanged(AccountId), + NameChanged(T::AccountId), /// A name was cleared, and the given balance returned. \[who, deposit\] - NameCleared(AccountId, Balance), + NameCleared(T::AccountId, BalanceOf), /// A name was removed and the given balance slashed. \[target, deposit\] - NameKilled(AccountId, Balance), + NameKilled(T::AccountId, BalanceOf), } -); -decl_error! { - /// Error for the nicks module. - pub enum Error for Module { + /// Error for the nicks pallet. + #[pallet::error] + pub enum Error { /// A name is too short. TooShort, /// A name is too long. @@ -109,24 +114,18 @@ decl_error! { /// An account isn't named. Unnamed, } -} - -decl_module! { - /// Nicks module declaration. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - fn deposit_event() = default; + /// The lookup table for names. + #[pallet::storage] + pub(super) type NameOf = + StorageMap<_, Twox64Concat, T::AccountId, (Vec, BalanceOf)>; - /// Reservation fee. - const ReservationFee: BalanceOf = T::ReservationFee::get(); - - /// The minimum length a name may be. - const MinLength: u32 = T::MinLength::get() as u32; - - /// The maximum length a name may be. - const MaxLength: u32 = T::MaxLength::get() as u32; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + #[pallet::call] + impl Pallet { /// Set an account's name. The name should be a UTF-8-encoded string by convention, though /// we don't check it. /// @@ -143,24 +142,25 @@ decl_module! { /// - One storage read/write. /// - One event. /// # - #[weight = 50_000_000] - fn set_name(origin, name: Vec) { + #[pallet::weight(50_000_000)] + pub fn set_name(origin: OriginFor, name: Vec) -> DispatchResult { let sender = ensure_signed(origin)?; - ensure!(name.len() >= T::MinLength::get(), Error::::TooShort); - ensure!(name.len() <= T::MaxLength::get(), Error::::TooLong); + ensure!(name.len() >= T::MinLength::get() as usize, Error::::TooShort); + ensure!(name.len() <= T::MaxLength::get() as usize, Error::::TooLong); let deposit = if let Some((_, deposit)) = >::get(&sender) { - Self::deposit_event(RawEvent::NameChanged(sender.clone())); + Self::deposit_event(Event::::NameChanged(sender.clone())); deposit } else { let deposit = T::ReservationFee::get(); T::Currency::reserve(&sender, deposit.clone())?; - Self::deposit_event(RawEvent::NameSet(sender.clone())); + Self::deposit_event(Event::::NameSet(sender.clone())); deposit }; >::insert(&sender, (name, deposit)); + Ok(()) } /// Clear an account's name and return the deposit. Fails if the account was not named. @@ -173,15 +173,17 @@ decl_module! { /// - One storage read/write. /// - One event. /// # - #[weight = 70_000_000] - fn clear_name(origin) { + #[pallet::weight(70_000_000)] + pub fn clear_name(origin: OriginFor) -> DispatchResult { let sender = ensure_signed(origin)?; let deposit = >::take(&sender).ok_or(Error::::Unnamed)?.1; - let _ = T::Currency::unreserve(&sender, deposit.clone()); + let err_amount = T::Currency::unreserve(&sender, deposit.clone()); + debug_assert!(err_amount.is_zero()); - Self::deposit_event(RawEvent::NameCleared(sender, deposit)); + Self::deposit_event(Event::::NameCleared(sender, deposit)); + Ok(()) } /// Remove an account's name and take charge of the deposit. @@ -197,8 +199,11 @@ decl_module! { /// - One storage read/write. /// - One event. /// # - #[weight = 70_000_000] - fn kill_name(origin, target: ::Source) { + #[pallet::weight(70_000_000)] + pub fn kill_name( + origin: OriginFor, + target: ::Source, + ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; // Figure out who we're meant to be clearing. @@ -208,7 +213,8 @@ decl_module! { // Slash their deposit from them. T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit.clone()).0); - Self::deposit_event(RawEvent::NameKilled(target, deposit)); + Self::deposit_event(Event::::NameKilled(target, deposit)); + Ok(()) } /// Set a third-party account's name with no deposit. @@ -223,15 +229,20 @@ decl_module! { /// - One storage read/write. /// - One event. /// # - #[weight = 70_000_000] - fn force_name(origin, target: ::Source, name: Vec) { + #[pallet::weight(70_000_000)] + pub fn force_name( + origin: OriginFor, + target: ::Source, + name: Vec, + ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let target = T::Lookup::lookup(target)?; let deposit = >::get(&target).map(|x| x.1).unwrap_or_else(Zero::zero); >::insert(&target, (name, deposit)); - Self::deposit_event(RawEvent::NameForced(target)); + Self::deposit_event(Event::::NameForced(target)); + Ok(()) } } } @@ -239,63 +250,70 @@ decl_module! { #[cfg(test)] mod tests { use super::*; + use crate as pallet_nicks; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types - }; - use sp_core::H256; + use frame_support::{assert_noop, assert_ok, ord_parameter_types, parameter_types}; use frame_system::EnsureSignedBy; + use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, }; - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Nicks: pallet_nicks::{Pallet, Call, Storage, Event}, + } + ); - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { - type BaseCallFilter = (); + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -303,14 +321,14 @@ mod tests { } parameter_types! { pub const ReservationFee: u64 = 2; - pub const MinLength: usize = 3; - pub const MaxLength: usize = 16; + pub const MinLength: u32 = 3; + pub const MaxLength: u32 = 16; } ord_parameter_types! { pub const One: u64 = 1; } - impl Trait for Test { - type Event = (); + impl Config for Test { + type Event = Event; type Currency = Balances; type ReservationFee = ReservationFee; type Slashed = (); @@ -318,18 +336,12 @@ mod tests { type MinLength = MinLength; type MaxLength = MaxLength; } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Nicks = Module; fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10), - (2, 10), - ], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10)] } + .assimilate_storage(&mut t) + .unwrap(); t.into() } @@ -389,7 +401,10 @@ mod tests { pallet_balances::Error::::InsufficientBalance ); - assert_noop!(Nicks::set_name(Origin::signed(1), b"Ga".to_vec()), Error::::TooShort); + assert_noop!( + Nicks::set_name(Origin::signed(1), b"Ga".to_vec()), + Error::::TooShort + ); assert_noop!( Nicks::set_name(Origin::signed(1), b"Gavin James Wood, Esquire".to_vec()), Error::::TooLong diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml index 1448e99bd2a14..635e72e3a8b8a 100644 --- a/frame/node-authorization/Cargo.toml +++ b/frame/node-authorization/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-node-authorization" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,24 +12,27 @@ description = "FRAME pallet for node authorization" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +log = { version = "0.4.14", default-features = false } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "frame-support/std", "frame-system/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", + "log/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 91f89ad1d9100..016f12d2eb838 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,119 +37,119 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub mod weights; + +pub use pallet::*; use sp_core::OpaquePeerId as PeerId; -use sp_std::{ - collections::btree_set::BTreeSet, - iter::FromIterator, - prelude::*, -}; -use codec::Decode; -use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, - debug, ensure, - weights::{DispatchClass, Weight}, - traits::{Get, EnsureOrigin}, -}; -use frame_system::ensure_signed; - -pub trait WeightInfo { - fn add_well_known_node() -> Weight; - fn remove_well_known_node() -> Weight; - fn swap_well_known_node() -> Weight; - fn reset_well_known_nodes() -> Weight; - fn claim_node() -> Weight; - fn remove_claim() -> Weight; - fn transfer_node() -> Weight; - fn add_connections() -> Weight; - fn remove_connections() -> Weight; -} +use sp_std::{collections::btree_set::BTreeSet, iter::FromIterator, prelude::*}; +pub use weights::WeightInfo; -impl WeightInfo for () { - fn add_well_known_node() -> Weight { 50_000_000 } - fn remove_well_known_node() -> Weight { 50_000_000 } - fn swap_well_known_node() -> Weight { 50_000_000 } - fn reset_well_known_nodes() -> Weight { 50_000_000 } - fn claim_node() -> Weight { 50_000_000 } - fn remove_claim() -> Weight { 50_000_000 } - fn transfer_node() -> Weight { 50_000_000 } - fn add_connections() -> Weight { 50_000_000 } - fn remove_connections() -> Weight { 50_000_000 } -} +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; + use frame_system::pallet_prelude::*; -pub trait Trait: frame_system::Trait { - /// The event type of this module. - type Event: From> + Into<::Event>; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - /// The maximum number of well known nodes that are allowed to set - type MaxWellKnownNodes: Get; + /// The module configuration trait + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// The maximum length in bytes of PeerId - type MaxPeerIdLength: Get; + /// The maximum number of well known nodes that are allowed to set + #[pallet::constant] + type MaxWellKnownNodes: Get; - /// The origin which can add a well known node. - type AddOrigin: EnsureOrigin; + /// The maximum length in bytes of PeerId + #[pallet::constant] + type MaxPeerIdLength: Get; - /// The origin which can remove a well known node. - type RemoveOrigin: EnsureOrigin; + /// The origin which can add a well known node. + type AddOrigin: EnsureOrigin; - /// The origin which can swap the well known nodes. - type SwapOrigin: EnsureOrigin; + /// The origin which can remove a well known node. + type RemoveOrigin: EnsureOrigin; - /// The origin which can reset the well known nodes. - type ResetOrigin: EnsureOrigin; + /// The origin which can swap the well known nodes. + type SwapOrigin: EnsureOrigin; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// The origin which can reset the well known nodes. + type ResetOrigin: EnsureOrigin; -decl_storage! { - trait Store for Module as NodeAuthorization { - /// The set of well known nodes. This is stored sorted (just by value). - pub WellKnownNodes get(fn well_known_nodes): BTreeSet; - /// A map that maintains the ownership of each node. - pub Owners get(fn owners): - map hasher(blake2_128_concat) PeerId => T::AccountId; - /// The additional adapative connections of each node. - pub AdditionalConnections get(fn additional_connection): - map hasher(blake2_128_concat) PeerId => BTreeSet; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } - add_extra_genesis { - config(nodes): Vec<(PeerId, T::AccountId)>; - build(|config: &GenesisConfig| { - >::initialize_nodes(&config.nodes) - }) + + /// The set of well known nodes. This is stored sorted (just by value). + #[pallet::storage] + #[pallet::getter(fn well_known_nodes)] + pub type WellKnownNodes = StorageValue<_, BTreeSet, ValueQuery>; + + /// A map that maintains the ownership of each node. + #[pallet::storage] + #[pallet::getter(fn owners)] + pub type Owners = StorageMap<_, Blake2_128Concat, PeerId, T::AccountId>; + + /// The additional adapative connections of each node. + #[pallet::storage] + #[pallet::getter(fn additional_connection)] + pub type AdditionalConnections = + StorageMap<_, Blake2_128Concat, PeerId, BTreeSet, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub nodes: Vec<(PeerId, T::AccountId)>, } -} -decl_event! { - pub enum Event where - ::AccountId, - { + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { nodes: Vec::new() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_nodes(&self.nodes); + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// The given well known node was added. - NodeAdded(PeerId, AccountId), + NodeAdded(PeerId, T::AccountId), /// The given well known node was removed. NodeRemoved(PeerId), /// The given well known node was swapped; first item was removed, /// the latter was added. NodeSwapped(PeerId, PeerId), /// The given well known nodes were reset. - NodesReset(Vec<(PeerId, AccountId)>), + NodesReset(Vec<(PeerId, T::AccountId)>), /// The given node was claimed by a user. - NodeClaimed(PeerId, AccountId), + NodeClaimed(PeerId, T::AccountId), /// The given claim was removed by its owner. - ClaimRemoved(PeerId, AccountId), + ClaimRemoved(PeerId, T::AccountId), /// The node was transferred to another account. - NodeTransferred(PeerId, AccountId), + NodeTransferred(PeerId, T::AccountId), /// The allowed connections were added to a node. ConnectionsAdded(PeerId, Vec), /// The allowed connections were removed from a node. ConnectionsRemoved(PeerId, Vec), } -} -decl_error! { - /// Error for the node authorization module. - pub enum Error for Module { + #[pallet::error] + pub enum Error { /// The PeerId is too long. PeerIdTooLong, /// Too many well known nodes. @@ -167,41 +167,65 @@ decl_error! { /// No permisson to perform specific operation. PermissionDenied, } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// The maximum number of authorized well known nodes - const MaxWellKnownNodes: u32 = T::MaxWellKnownNodes::get(); - - /// The maximum length in bytes of PeerId - const MaxPeerIdLength: u32 = T::MaxPeerIdLength::get(); - type Error = Error; - - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet { + /// Set reserved node every block. It may not be enabled depends on the offchain + /// worker settings when starting the node. + fn offchain_worker(now: T::BlockNumber) { + let network_state = sp_io::offchain::network_state(); + match network_state { + Err(_) => log::error!( + target: "runtime::node-authorization", + "Error: failed to get network state of node at {:?}", + now, + ), + Ok(state) => { + let encoded_peer = state.peer_id.0; + match Decode::decode(&mut &encoded_peer[..]) { + Err(_) => log::error!( + target: "runtime::node-authorization", + "Error: failed to decode PeerId at {:?}", + now, + ), + Ok(node) => sp_io::offchain::set_authorized_nodes( + Self::get_authorized_nodes(&PeerId(node)), + true, + ), + } + }, + } + } + } + #[pallet::call] + impl Pallet { /// Add a node to the set of well known nodes. If the node is already claimed, the owner /// will be updated and keep the existing additional connection unchanged. /// /// May only be called from `T::AddOrigin`. /// /// - `node`: identifier of the node. - #[weight = (T::WeightInfo::add_well_known_node(), DispatchClass::Operational)] - pub fn add_well_known_node(origin, node: PeerId, owner: T::AccountId) { + #[pallet::weight((T::WeightInfo::add_well_known_node(), DispatchClass::Operational))] + pub fn add_well_known_node( + origin: OriginFor, + node: PeerId, + owner: T::AccountId, + ) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - let mut nodes = WellKnownNodes::get(); + let mut nodes = WellKnownNodes::::get(); ensure!(nodes.len() < T::MaxWellKnownNodes::get() as usize, Error::::TooManyNodes); ensure!(!nodes.contains(&node), Error::::AlreadyJoined); nodes.insert(node.clone()); - WellKnownNodes::put(&nodes); + WellKnownNodes::::put(&nodes); >::insert(&node, &owner); - Self::deposit_event(RawEvent::NodeAdded(node, owner)); + Self::deposit_event(Event::NodeAdded(node, owner)); + Ok(()) } /// Remove a node from the set of well known nodes. The ownership and additional @@ -210,21 +234,22 @@ decl_module! { /// May only be called from `T::RemoveOrigin`. /// /// - `node`: identifier of the node. - #[weight = (T::WeightInfo::remove_well_known_node(), DispatchClass::Operational)] - pub fn remove_well_known_node(origin, node: PeerId) { + #[pallet::weight((T::WeightInfo::remove_well_known_node(), DispatchClass::Operational))] + pub fn remove_well_known_node(origin: OriginFor, node: PeerId) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - let mut nodes = WellKnownNodes::get(); + let mut nodes = WellKnownNodes::::get(); ensure!(nodes.contains(&node), Error::::NotExist); nodes.remove(&node); - WellKnownNodes::put(&nodes); + WellKnownNodes::::put(&nodes); >::remove(&node); - AdditionalConnections::remove(&node); + AdditionalConnections::::remove(&node); - Self::deposit_event(RawEvent::NodeRemoved(node)); + Self::deposit_event(Event::NodeRemoved(node)); + Ok(()) } /// Swap a well known node to another. Both the ownership and additional connections @@ -234,26 +259,33 @@ decl_module! { /// /// - `remove`: the node which will be moved out from the list. /// - `add`: the node which will be put in the list. - #[weight = (T::WeightInfo::swap_well_known_node(), DispatchClass::Operational)] - pub fn swap_well_known_node(origin, remove: PeerId, add: PeerId) { + #[pallet::weight((T::WeightInfo::swap_well_known_node(), DispatchClass::Operational))] + pub fn swap_well_known_node( + origin: OriginFor, + remove: PeerId, + add: PeerId, + ) -> DispatchResult { T::SwapOrigin::ensure_origin(origin)?; ensure!(remove.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); ensure!(add.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - if remove == add { return Ok(()) } + if remove == add { + return Ok(()) + } - let mut nodes = WellKnownNodes::get(); + let mut nodes = WellKnownNodes::::get(); ensure!(nodes.contains(&remove), Error::::NotExist); ensure!(!nodes.contains(&add), Error::::AlreadyJoined); nodes.remove(&remove); nodes.insert(add.clone()); - WellKnownNodes::put(&nodes); + WellKnownNodes::::put(&nodes); Owners::::swap(&remove, &add); - AdditionalConnections::swap(&remove, &add); + AdditionalConnections::::swap(&remove, &add); - Self::deposit_event(RawEvent::NodeSwapped(remove, add)); + Self::deposit_event(Event::NodeSwapped(remove, add)); + Ok(()) } /// Reset all the well known nodes. This will not remove the ownership and additional @@ -263,29 +295,34 @@ decl_module! { /// May only be called from `T::ResetOrigin`. /// /// - `nodes`: the new nodes for the allow list. - #[weight = (T::WeightInfo::reset_well_known_nodes(), DispatchClass::Operational)] - pub fn reset_well_known_nodes(origin, nodes: Vec<(PeerId, T::AccountId)>) { + #[pallet::weight((T::WeightInfo::reset_well_known_nodes(), DispatchClass::Operational))] + pub fn reset_well_known_nodes( + origin: OriginFor, + nodes: Vec<(PeerId, T::AccountId)>, + ) -> DispatchResult { T::ResetOrigin::ensure_origin(origin)?; ensure!(nodes.len() < T::MaxWellKnownNodes::get() as usize, Error::::TooManyNodes); - + Self::initialize_nodes(&nodes); - Self::deposit_event(RawEvent::NodesReset(nodes)); + Self::deposit_event(Event::NodesReset(nodes)); + Ok(()) } /// A given node can be claimed by anyone. The owner should be the first to know its /// PeerId, so claim it right away! /// /// - `node`: identifier of the node. - #[weight = T::WeightInfo::claim_node()] - pub fn claim_node(origin, node: PeerId) { + #[pallet::weight(T::WeightInfo::claim_node())] + pub fn claim_node(origin: OriginFor, node: PeerId) -> DispatchResult { let sender = ensure_signed(origin)?; - + ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(!Owners::::contains_key(&node),Error::::AlreadyClaimed); + ensure!(!Owners::::contains_key(&node), Error::::AlreadyClaimed); Owners::::insert(&node, &sender); - Self::deposit_event(RawEvent::NodeClaimed(node, sender)); + Self::deposit_event(Event::NodeClaimed(node, sender)); + Ok(()) } /// A claim can be removed by its owner and get back the reservation. The additional @@ -293,122 +330,109 @@ decl_module! { /// needs to reach consensus among the network participants. /// /// - `node`: identifier of the node. - #[weight = T::WeightInfo::remove_claim()] - pub fn remove_claim(origin, node: PeerId) { + #[pallet::weight(T::WeightInfo::remove_claim())] + pub fn remove_claim(origin: OriginFor, node: PeerId) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(Owners::::contains_key(&node), Error::::NotClaimed); - ensure!(Owners::::get(&node) == sender, Error::::NotOwner); - ensure!(!WellKnownNodes::get().contains(&node), Error::::PermissionDenied); + let owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; + ensure!(owner == sender, Error::::NotOwner); + ensure!(!WellKnownNodes::::get().contains(&node), Error::::PermissionDenied); Owners::::remove(&node); - AdditionalConnections::remove(&node); + AdditionalConnections::::remove(&node); - Self::deposit_event(RawEvent::ClaimRemoved(node, sender)); + Self::deposit_event(Event::ClaimRemoved(node, sender)); + Ok(()) } /// A node can be transferred to a new owner. /// /// - `node`: identifier of the node. /// - `owner`: new owner of the node. - #[weight = T::WeightInfo::transfer_node()] - pub fn transfer_node(origin, node: PeerId, owner: T::AccountId) { + #[pallet::weight(T::WeightInfo::transfer_node())] + pub fn transfer_node( + origin: OriginFor, + node: PeerId, + owner: T::AccountId, + ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(Owners::::contains_key(&node), Error::::NotClaimed); - ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + let pre_owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; + ensure!(pre_owner == sender, Error::::NotOwner); Owners::::insert(&node, &owner); - Self::deposit_event(RawEvent::NodeTransferred(node, owner)); + Self::deposit_event(Event::NodeTransferred(node, owner)); + Ok(()) } /// Add additional connections to a given node. /// /// - `node`: identifier of the node. /// - `connections`: additonal nodes from which the connections are allowed. - #[weight = T::WeightInfo::add_connections()] + #[pallet::weight(T::WeightInfo::add_connections())] pub fn add_connections( - origin, + origin: OriginFor, node: PeerId, - connections: Vec - ) { + connections: Vec, + ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(Owners::::contains_key(&node), Error::::NotClaimed); - ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + let owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; + ensure!(owner == sender, Error::::NotOwner); - let mut nodes = AdditionalConnections::get(&node); + let mut nodes = AdditionalConnections::::get(&node); for add_node in connections.iter() { if *add_node == node { - continue; + continue } nodes.insert(add_node.clone()); } - AdditionalConnections::insert(&node, nodes); + AdditionalConnections::::insert(&node, nodes); - Self::deposit_event(RawEvent::ConnectionsAdded(node, connections)); + Self::deposit_event(Event::ConnectionsAdded(node, connections)); + Ok(()) } /// Remove additional connections of a given node. /// /// - `node`: identifier of the node. /// - `connections`: additonal nodes from which the connections are not allowed anymore. - #[weight = T::WeightInfo::remove_connections()] + #[pallet::weight(T::WeightInfo::remove_connections())] pub fn remove_connections( - origin, + origin: OriginFor, node: PeerId, - connections: Vec - ) { + connections: Vec, + ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(Owners::::contains_key(&node), Error::::NotClaimed); - ensure!(Owners::::get(&node) == sender, Error::::NotOwner); + let owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; + ensure!(owner == sender, Error::::NotOwner); - let mut nodes = AdditionalConnections::get(&node); + let mut nodes = AdditionalConnections::::get(&node); for remove_node in connections.iter() { nodes.remove(remove_node); } - AdditionalConnections::insert(&node, nodes); + AdditionalConnections::::insert(&node, nodes); - Self::deposit_event(RawEvent::ConnectionsRemoved(node, connections)); - } - - /// Set reserved node every block. It may not be enabled depends on the offchain - /// worker settings when starting the node. - fn offchain_worker(now: T::BlockNumber) { - let network_state = sp_io::offchain::network_state(); - match network_state { - Err(_) => debug::error!("Error: failed to get network state of node at {:?}", now), - Ok(state) => { - let encoded_peer = state.peer_id.0; - match Decode::decode(&mut &encoded_peer[..]) { - Err(_) => debug::error!("Error: failed to decode PeerId at {:?}", now), - Ok(node) => sp_io::offchain::set_authorized_nodes( - Self::get_authorized_nodes(&PeerId(node)), - true - ) - } - } - } + Self::deposit_event(Event::ConnectionsRemoved(node, connections)); + Ok(()) } } } -impl Module { +impl Pallet { fn initialize_nodes(nodes: &Vec<(PeerId, T::AccountId)>) { - let peer_ids = nodes.iter() - .map(|item| item.0.clone()) - .collect::>(); - WellKnownNodes::put(&peer_ids); + let peer_ids = nodes.iter().map(|item| item.0.clone()).collect::>(); + WellKnownNodes::::put(&peer_ids); for (node, who) in nodes.iter() { Owners::::insert(node, who); @@ -416,9 +440,9 @@ impl Module { } fn get_authorized_nodes(node: &PeerId) -> Vec { - let mut nodes = AdditionalConnections::get(node); + let mut nodes = AdditionalConnections::::get(node); - let mut well_known_nodes = WellKnownNodes::get(); + let mut well_known_nodes = WellKnownNodes::::get(); if well_known_nodes.contains(node) { well_known_nodes.remove(node); nodes.extend(well_known_nodes); @@ -427,435 +451,3 @@ impl Module { Vec::from_iter(nodes) } } - -#[cfg(test)] -mod tests { - use super::*; - - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, weights::Weight, - parameter_types, ord_parameter_types, - }; - use frame_system::EnsureSignedBy; - use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type PalletInfo = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - } - - ord_parameter_types! { - pub const One: u64 = 1; - pub const Two: u64 = 2; - pub const Three: u64 = 3; - pub const Four: u64 = 4; - } - parameter_types! { - pub const MaxWellKnownNodes: u32 = 4; - pub const MaxPeerIdLength: u32 = 2; - } - impl Trait for Test { - type Event = (); - type MaxWellKnownNodes = MaxWellKnownNodes; - type MaxPeerIdLength = MaxPeerIdLength; - type AddOrigin = EnsureSignedBy; - type RemoveOrigin = EnsureSignedBy; - type SwapOrigin = EnsureSignedBy; - type ResetOrigin = EnsureSignedBy; - type WeightInfo = (); - } - - type NodeAuthorization = Module; - - fn test_node(id: u8) -> PeerId { - PeerId(vec![id]) - } - - fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { - nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)], - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - #[test] - fn add_well_known_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(2), test_node(15), 15), - BadOrigin - ); - assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(1), PeerId(vec![1, 2, 3]), 15), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(20), 20), - Error::::AlreadyJoined - ); - - assert_ok!( - NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(15), 15) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(10), test_node(15), test_node(20), test_node(30)]) - ); - assert_eq!(Owners::::get(test_node(10)), 10); - assert_eq!(Owners::::get(test_node(20)), 20); - assert_eq!(Owners::::get(test_node(30)), 30); - assert_eq!(Owners::::get(test_node(15)), 15); - - assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(25), 25), - Error::::TooManyNodes - ); - }); - } - - #[test] - fn remove_well_known_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::remove_well_known_node(Origin::signed(3), test_node(20)), - BadOrigin - ); - assert_noop!( - NodeAuthorization::remove_well_known_node(Origin::signed(2), PeerId(vec![1, 2, 3])), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(40)), - Error::::NotExist - ); - - AdditionalConnections::insert( - test_node(20), - BTreeSet::from_iter(vec![test_node(40)]) - ); - assert!(AdditionalConnections::contains_key(test_node(20))); - - assert_ok!( - NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(20)) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(10), test_node(30)]) - ); - assert!(!Owners::::contains_key(test_node(20))); - assert!(!AdditionalConnections::contains_key(test_node(20))); - }); - } - - #[test] - fn swap_well_known_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(4), test_node(20), test_node(5) - ), - BadOrigin - ); - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), PeerId(vec![1, 2, 3]), test_node(20) - ), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), PeerId(vec![1, 2, 3]) - ), - Error::::PeerIdTooLong - ); - - assert_ok!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), test_node(20) - ) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(10), test_node(20), test_node(30)]) - ); - - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(15), test_node(5) - ), - Error::::NotExist - ); - assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), test_node(30) - ), - Error::::AlreadyJoined - ); - - AdditionalConnections::insert( - test_node(20), - BTreeSet::from_iter(vec![test_node(15)]) - ); - assert_ok!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), test_node(5) - ) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(5), test_node(10), test_node(30)]) - ); - assert!(!Owners::::contains_key(test_node(20))); - assert_eq!(Owners::::get(test_node(5)), 20); - assert!(!AdditionalConnections::contains_key(test_node(20))); - assert_eq!( - AdditionalConnections::get(test_node(5)), - BTreeSet::from_iter(vec![test_node(15)]) - ); - }); - } - - #[test] - fn reset_well_known_nodes_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::reset_well_known_nodes( - Origin::signed(3), - vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] - ), - BadOrigin - ); - assert_noop!( - NodeAuthorization::reset_well_known_nodes( - Origin::signed(4), - vec![ - (test_node(15), 15), - (test_node(5), 5), - (test_node(20), 20), - (test_node(25), 25), - ] - ), - Error::::TooManyNodes - ); - - assert_ok!( - NodeAuthorization::reset_well_known_nodes( - Origin::signed(4), - vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] - ) - ); - assert_eq!( - WellKnownNodes::get(), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(20)]) - ); - assert_eq!(Owners::::get(test_node(5)), 5); - assert_eq!(Owners::::get(test_node(15)), 15); - assert_eq!(Owners::::get(test_node(20)), 20); - }); - } - - #[test] - fn claim_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::claim_node(Origin::signed(1), PeerId(vec![1, 2, 3])), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::claim_node(Origin::signed(1), test_node(20)), - Error::::AlreadyClaimed - ); - - assert_ok!(NodeAuthorization::claim_node(Origin::signed(15), test_node(15))); - assert_eq!(Owners::::get(test_node(15)), 15); - }); - } - - #[test] - fn remove_claim_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(15), PeerId(vec![1, 2, 3])), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(15), test_node(15)), - Error::::NotClaimed - ); - - assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(15), test_node(20)), - Error::::NotOwner - ); - - assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(20), test_node(20)), - Error::::PermissionDenied - ); - - Owners::::insert(test_node(15), 15); - AdditionalConnections::insert( - test_node(15), - BTreeSet::from_iter(vec![test_node(20)]) - ); - assert_ok!(NodeAuthorization::remove_claim(Origin::signed(15), test_node(15))); - assert!(!Owners::::contains_key(test_node(15))); - assert!(!AdditionalConnections::contains_key(test_node(15))); - }); - } - - #[test] - fn transfer_node_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::transfer_node(Origin::signed(15), PeerId(vec![1, 2, 3]), 10), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::transfer_node(Origin::signed(15), test_node(15), 10), - Error::::NotClaimed - ); - - assert_noop!( - NodeAuthorization::transfer_node(Origin::signed(15), test_node(20), 10), - Error::::NotOwner - ); - - assert_ok!(NodeAuthorization::transfer_node(Origin::signed(20), test_node(20), 15)); - assert_eq!(Owners::::get(test_node(20)), 15); - }); - } - - #[test] - fn add_connections_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::add_connections( - Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] - ), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::add_connections( - Origin::signed(15), test_node(15), vec![test_node(5)] - ), - Error::::NotClaimed - ); - - assert_noop!( - NodeAuthorization::add_connections( - Origin::signed(15), test_node(20), vec![test_node(5)] - ), - Error::::NotOwner - ); - - assert_ok!( - NodeAuthorization::add_connections( - Origin::signed(20), - test_node(20), - vec![test_node(15), test_node(5), test_node(25), test_node(20)] - ) - ); - assert_eq!( - AdditionalConnections::get(test_node(20)), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) - ); - }); - } - - #[test] - fn remove_connections_works() { - new_test_ext().execute_with(|| { - assert_noop!( - NodeAuthorization::remove_connections( - Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] - ), - Error::::PeerIdTooLong - ); - assert_noop!( - NodeAuthorization::remove_connections( - Origin::signed(15), test_node(15), vec![test_node(5)] - ), - Error::::NotClaimed - ); - - assert_noop!( - NodeAuthorization::remove_connections( - Origin::signed(15), test_node(20), vec![test_node(5)] - ), - Error::::NotOwner - ); - - AdditionalConnections::insert( - test_node(20), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) - ); - assert_ok!( - NodeAuthorization::remove_connections( - Origin::signed(20), - test_node(20), - vec![test_node(15), test_node(5)] - ) - ); - assert_eq!( - AdditionalConnections::get(test_node(20)), - BTreeSet::from_iter(vec![test_node(25)]) - ); - }); - } - - #[test] - fn get_authorized_nodes_works() { - new_test_ext().execute_with(|| { - AdditionalConnections::insert( - test_node(20), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) - ); - - let mut authorized_nodes = Module::::get_authorized_nodes(&test_node(20)); - authorized_nodes.sort(); - assert_eq!( - authorized_nodes, - vec![test_node(5), test_node(10), test_node(15), test_node(25), test_node(30)] - ); - }); - } -} diff --git a/frame/node-authorization/src/mock.rs b/frame/node-authorization/src/mock.rs new file mode 100644 index 0000000000000..6c79f601c197d --- /dev/null +++ b/frame/node-authorization/src/mock.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for node-authorization pallet. + +use super::*; +use crate as pallet_node_authorization; + +use frame_support::{ord_parameter_types, parameter_types, traits::GenesisBuild}; +use frame_system::EnsureSignedBy; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + NodeAuthorization: pallet_node_authorization::{ + Pallet, Call, Storage, Config, Event, + }, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +ord_parameter_types! { + pub const One: u64 = 1; + pub const Two: u64 = 2; + pub const Three: u64 = 3; + pub const Four: u64 = 4; +} +parameter_types! { + pub const MaxWellKnownNodes: u32 = 4; + pub const MaxPeerIdLength: u32 = 2; +} +impl Config for Test { + type Event = Event; + type MaxWellKnownNodes = MaxWellKnownNodes; + type MaxPeerIdLength = MaxPeerIdLength; + type AddOrigin = EnsureSignedBy; + type RemoveOrigin = EnsureSignedBy; + type SwapOrigin = EnsureSignedBy; + type ResetOrigin = EnsureSignedBy; + type WeightInfo = (); +} + +pub fn test_node(id: u8) -> PeerId { + PeerId(vec![id]) +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_node_authorization::GenesisConfig:: { + nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)], + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() +} diff --git a/frame/node-authorization/src/tests.rs b/frame/node-authorization/src/tests.rs new file mode 100644 index 0000000000000..530904fa73488 --- /dev/null +++ b/frame/node-authorization/src/tests.rs @@ -0,0 +1,370 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for node-authorization pallet. + +use super::*; +use crate::mock::*; +use frame_support::{assert_noop, assert_ok}; +use sp_runtime::traits::BadOrigin; + +#[test] +fn add_well_known_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(2), test_node(15), 15), + BadOrigin + ); + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(1), PeerId(vec![1, 2, 3]), 15), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(20), 20), + Error::::AlreadyJoined + ); + + assert_ok!(NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(15), 15)); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(10), test_node(15), test_node(20), test_node(30)]) + ); + assert_eq!(Owners::::get(test_node(10)), Some(10)); + assert_eq!(Owners::::get(test_node(20)), Some(20)); + assert_eq!(Owners::::get(test_node(30)), Some(30)); + assert_eq!(Owners::::get(test_node(15)), Some(15)); + + assert_noop!( + NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(25), 25), + Error::::TooManyNodes + ); + }); +} + +#[test] +fn remove_well_known_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::remove_well_known_node(Origin::signed(3), test_node(20)), + BadOrigin + ); + assert_noop!( + NodeAuthorization::remove_well_known_node(Origin::signed(2), PeerId(vec![1, 2, 3])), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(40)), + Error::::NotExist + ); + + AdditionalConnections::::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(40)]), + ); + assert!(AdditionalConnections::::contains_key(test_node(20))); + + assert_ok!(NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(20))); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(10), test_node(30)]) + ); + assert!(!Owners::::contains_key(test_node(20))); + assert!(!AdditionalConnections::::contains_key(test_node(20))); + }); +} + +#[test] +fn swap_well_known_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::swap_well_known_node(Origin::signed(4), test_node(20), test_node(5)), + BadOrigin + ); + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), + PeerId(vec![1, 2, 3]), + test_node(20) + ), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), + test_node(20), + PeerId(vec![1, 2, 3]) + ), + Error::::PeerIdTooLong + ); + + assert_ok!(NodeAuthorization::swap_well_known_node( + Origin::signed(3), + test_node(20), + test_node(20) + )); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(10), test_node(20), test_node(30)]) + ); + + assert_noop!( + NodeAuthorization::swap_well_known_node(Origin::signed(3), test_node(15), test_node(5)), + Error::::NotExist + ); + assert_noop!( + NodeAuthorization::swap_well_known_node( + Origin::signed(3), + test_node(20), + test_node(30) + ), + Error::::AlreadyJoined + ); + + AdditionalConnections::::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(15)]), + ); + assert_ok!(NodeAuthorization::swap_well_known_node( + Origin::signed(3), + test_node(20), + test_node(5) + )); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(5), test_node(10), test_node(30)]) + ); + assert!(!Owners::::contains_key(test_node(20))); + assert_eq!(Owners::::get(test_node(5)), Some(20)); + assert!(!AdditionalConnections::::contains_key(test_node(20))); + assert_eq!( + AdditionalConnections::::get(test_node(5)), + BTreeSet::from_iter(vec![test_node(15)]) + ); + }); +} + +#[test] +fn reset_well_known_nodes_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::reset_well_known_nodes( + Origin::signed(3), + vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] + ), + BadOrigin + ); + assert_noop!( + NodeAuthorization::reset_well_known_nodes( + Origin::signed(4), + vec![ + (test_node(15), 15), + (test_node(5), 5), + (test_node(20), 20), + (test_node(25), 25), + ] + ), + Error::::TooManyNodes + ); + + assert_ok!(NodeAuthorization::reset_well_known_nodes( + Origin::signed(4), + vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] + )); + assert_eq!( + WellKnownNodes::::get(), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(20)]) + ); + assert_eq!(Owners::::get(test_node(5)), Some(5)); + assert_eq!(Owners::::get(test_node(15)), Some(15)); + assert_eq!(Owners::::get(test_node(20)), Some(20)); + }); +} + +#[test] +fn claim_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::claim_node(Origin::signed(1), PeerId(vec![1, 2, 3])), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::claim_node(Origin::signed(1), test_node(20)), + Error::::AlreadyClaimed + ); + + assert_ok!(NodeAuthorization::claim_node(Origin::signed(15), test_node(15))); + assert_eq!(Owners::::get(test_node(15)), Some(15)); + }); +} + +#[test] +fn remove_claim_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(15), PeerId(vec![1, 2, 3])), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(15), test_node(15)), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(15), test_node(20)), + Error::::NotOwner + ); + + assert_noop!( + NodeAuthorization::remove_claim(Origin::signed(20), test_node(20)), + Error::::PermissionDenied + ); + + Owners::::insert(test_node(15), 15); + AdditionalConnections::::insert( + test_node(15), + BTreeSet::from_iter(vec![test_node(20)]), + ); + assert_ok!(NodeAuthorization::remove_claim(Origin::signed(15), test_node(15))); + assert!(!Owners::::contains_key(test_node(15))); + assert!(!AdditionalConnections::::contains_key(test_node(15))); + }); +} + +#[test] +fn transfer_node_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::transfer_node(Origin::signed(15), PeerId(vec![1, 2, 3]), 10), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::transfer_node(Origin::signed(15), test_node(15), 10), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::transfer_node(Origin::signed(15), test_node(20), 10), + Error::::NotOwner + ); + + assert_ok!(NodeAuthorization::transfer_node(Origin::signed(20), test_node(20), 15)); + assert_eq!(Owners::::get(test_node(20)), Some(15)); + }); +} + +#[test] +fn add_connections_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::add_connections( + Origin::signed(15), + PeerId(vec![1, 2, 3]), + vec![test_node(5)] + ), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::add_connections( + Origin::signed(15), + test_node(15), + vec![test_node(5)] + ), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::add_connections( + Origin::signed(15), + test_node(20), + vec![test_node(5)] + ), + Error::::NotOwner + ); + + assert_ok!(NodeAuthorization::add_connections( + Origin::signed(20), + test_node(20), + vec![test_node(15), test_node(5), test_node(25), test_node(20)] + )); + assert_eq!( + AdditionalConnections::::get(test_node(20)), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) + ); + }); +} + +#[test] +fn remove_connections_works() { + new_test_ext().execute_with(|| { + assert_noop!( + NodeAuthorization::remove_connections( + Origin::signed(15), + PeerId(vec![1, 2, 3]), + vec![test_node(5)] + ), + Error::::PeerIdTooLong + ); + assert_noop!( + NodeAuthorization::remove_connections( + Origin::signed(15), + test_node(15), + vec![test_node(5)] + ), + Error::::NotClaimed + ); + + assert_noop!( + NodeAuthorization::remove_connections( + Origin::signed(15), + test_node(20), + vec![test_node(5)] + ), + Error::::NotOwner + ); + + AdditionalConnections::::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]), + ); + assert_ok!(NodeAuthorization::remove_connections( + Origin::signed(20), + test_node(20), + vec![test_node(15), test_node(5)] + )); + assert_eq!( + AdditionalConnections::::get(test_node(20)), + BTreeSet::from_iter(vec![test_node(25)]) + ); + }); +} + +#[test] +fn get_authorized_nodes_works() { + new_test_ext().execute_with(|| { + AdditionalConnections::::insert( + test_node(20), + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]), + ); + + let mut authorized_nodes = Pallet::::get_authorized_nodes(&test_node(20)); + authorized_nodes.sort(); + assert_eq!( + authorized_nodes, + vec![test_node(5), test_node(10), test_node(15), test_node(25), test_node(30)] + ); + }); +} diff --git a/frame/node-authorization/src/weights.rs b/frame/node-authorization/src/weights.rs new file mode 100644 index 0000000000000..dbb7956cff967 --- /dev/null +++ b/frame/node-authorization/src/weights.rs @@ -0,0 +1,49 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_node_authorization + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +pub trait WeightInfo { + fn add_well_known_node() -> Weight; + fn remove_well_known_node() -> Weight; + fn swap_well_known_node() -> Weight; + fn reset_well_known_nodes() -> Weight; + fn claim_node() -> Weight; + fn remove_claim() -> Weight; + fn transfer_node() -> Weight; + fn add_connections() -> Weight; + fn remove_connections() -> Weight; +} + +impl WeightInfo for () { + fn add_well_known_node() -> Weight { 50_000_000 } + fn remove_well_known_node() -> Weight { 50_000_000 } + fn swap_well_known_node() -> Weight { 50_000_000 } + fn reset_well_known_nodes() -> Weight { 50_000_000 } + fn claim_node() -> Weight { 50_000_000 } + fn remove_claim() -> Weight { 50_000_000 } + fn transfer_node() -> Weight { 50_000_000 } + fn add_connections() -> Weight { 50_000_000 } + fn remove_connections() -> Weight { 50_000_000 } +} diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index c5c8881007c22..8fdcbf46fa3e1 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-offences" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,29 +13,34 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../balances" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +serde = { version = "1.0.126", optional = true } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } [features] default = ["std"] std = [ "pallet-balances/std", "codec/std", + "scale-info/std", "sp-std/std", "serde", "sp-runtime/std", "sp-staking/std", "frame-support/std", "frame-system/std", + "log/std", ] runtime-benchmarks = [] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index 7a95cebc4fb21..b21e6cf9b7e13 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-offences-benchmarking" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,27 +13,32 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0", default-features = false, path = "../../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../../system" } -pallet-babe = { version = "2.0.0", default-features = false, path = "../../babe" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../../balances" } -pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../grandpa" } -pallet-im-online = { version = "2.0.0", default-features = false, path = "../../im-online" } -pallet-offences = { version = "2.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } -pallet-session = { version = "2.0.0", default-features = false, path = "../../session" } -pallet-staking = { version = "2.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../../primitives/staking" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../babe" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } +pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../grandpa" } +pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../im-online" } +pallet-offences = { version = "4.0.0-dev", default-features = false, features = [ + "runtime-benchmarks", +], path = "../../offences" } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } +pallet-staking = { version = "4.0.0-dev", default-features = false, features = [ + "runtime-benchmarks", +], path = "../../staking" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } +frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../../election-provider-support" } [dev-dependencies] -pallet-staking-reward-curve = { version = "2.0.0", path = "../../staking/reward-curve" } -pallet-timestamp = { version = "2.0.0", path = "../../timestamp" } -serde = { version = "1.0.101" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } +pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } [features] default = ["std"] @@ -50,6 +55,8 @@ std = [ "pallet-staking/std", "sp-runtime/std", "sp-staking/std", + "frame-election-provider-support/std", "sp-std/std", "codec/std", + "scale-info/std", ] diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index e35050992368a..35e3c1aec9403 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,26 +21,30 @@ mod mock; -use sp_std::prelude::*; -use sp_std::vec; +use sp_std::{prelude::*, vec}; -use frame_system::{RawOrigin, Module as System, Trait as SystemTrait}; -use frame_benchmarking::{benchmarks, account}; -use frame_support::traits::{Currency, OnInitialize}; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite}; +use frame_support::traits::{Currency, ValidatorSet, ValidatorSetWithIdentification}; +use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; -use sp_runtime::{Perbill, traits::{Convert, StaticLookup, Saturating, UniqueSaturatedInto}}; -use sp_staking::offence::{ReportOffence, Offence, OffenceDetails}; +use sp_runtime::{ + traits::{Convert, Saturating, StaticLookup, UniqueSaturatedInto}, + Perbill, +}; +use sp_staking::offence::{Offence, ReportOffence}; -use pallet_balances::{Trait as BalancesTrait}; use pallet_babe::BabeEquivocationOffence; +use pallet_balances::Config as BalancesConfig; use pallet_grandpa::{GrandpaEquivocationOffence, GrandpaTimeSlot}; -use pallet_im_online::{Trait as ImOnlineTrait, Module as ImOnline, UnresponsivenessOffence}; -use pallet_offences::{Trait as OffencesTrait, Module as Offences}; -use pallet_session::historical::{Trait as HistoricalTrait, IdentificationTuple}; -use pallet_session::{Trait as SessionTrait, SessionManager}; +use pallet_im_online::{Config as ImOnlineConfig, Pallet as ImOnline, UnresponsivenessOffence}; +use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; +use pallet_session::{ + historical::{Config as HistoricalConfig, IdentificationTuple}, + Config as SessionConfig, SessionManager, +}; use pallet_staking::{ - Module as Staking, Trait as StakingTrait, RewardDestination, ValidatorPrefs, - Exposure, IndividualExposure, ElectionStatus, MAX_NOMINATIONS, Event as StakingEvent + Config as StakingConfig, Event as StakingEvent, Exposure, IndividualExposure, + Pallet as Staking, RewardDestination, ValidatorPrefs, }; const SEED: u32 = 0; @@ -48,56 +52,58 @@ const SEED: u32 = 0; const MAX_REPORTERS: u32 = 100; const MAX_OFFENDERS: u32 = 100; const MAX_NOMINATORS: u32 = 100; -const MAX_DEFERRED_OFFENCES: u32 = 100; -pub struct Module(Offences); +pub struct Pallet(Offences); -pub trait Trait: - SessionTrait - + StakingTrait - + OffencesTrait - + ImOnlineTrait - + HistoricalTrait - + BalancesTrait +pub trait Config: + SessionConfig + + StakingConfig + + OffencesConfig + + ImOnlineConfig + + HistoricalConfig + + BalancesConfig + IdTupleConvert -{} +{ +} /// A helper trait to make sure we can convert `IdentificationTuple` coming from historical /// and the one required by offences. -pub trait IdTupleConvert { +pub trait IdTupleConvert { /// Convert identification tuple from `historical` trait to the one expected by `offences`. - fn convert(id: IdentificationTuple) -> ::IdentificationTuple; + fn convert(id: IdentificationTuple) -> ::IdentificationTuple; } -impl IdTupleConvert for T where - ::IdentificationTuple: From> +impl IdTupleConvert for T +where + ::IdentificationTuple: From>, { - fn convert(id: IdentificationTuple) -> ::IdentificationTuple { + fn convert(id: IdentificationTuple) -> ::IdentificationTuple { id.into() } } -type LookupSourceOf = <::Lookup as StaticLookup>::Source; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type LookupSourceOf = <::Lookup as StaticLookup>::Source; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; -struct Offender { +struct Offender { pub controller: T::AccountId, pub stash: T::AccountId, pub nominator_stashes: Vec, } -fn bond_amount() -> BalanceOf { - T::Currency::minimum_balance().saturating_mul(10_000.into()) +fn bond_amount() -> BalanceOf { + T::Currency::minimum_balance().saturating_mul(10_000u32.into()) } -fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { +fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { let stash: T::AccountId = account("stash", n, SEED); let controller: T::AccountId = account("controller", n, SEED); let controller_lookup: LookupSourceOf = T::Lookup::unlookup(controller.clone()); let reward_destination = RewardDestination::Staked; let raw_amount = bond_amount::(); // add twice as much balance to prevent the account from being killed. - let free_amount = raw_amount.saturating_mul(2.into()); + let free_amount = raw_amount.saturating_mul(2u32.into()); T::Currency::make_free_balance_be(&stash, free_amount); let amount: BalanceOf = raw_amount.into(); Staking::::bond( @@ -107,18 +113,20 @@ fn create_offender(n: u32, nominators: u32) -> Result, &'s reward_destination.clone(), )?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - }; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(controller.clone()).into(), validator_prefs)?; let mut individual_exposures = vec![]; let mut nominator_stashes = vec![]; // Create n nominators - for i in 0 .. nominators { - let nominator_stash: T::AccountId = account("nominator stash", n * MAX_NOMINATORS + i, SEED); - let nominator_controller: T::AccountId = account("nominator controller", n * MAX_NOMINATORS + i, SEED); - let nominator_controller_lookup: LookupSourceOf = T::Lookup::unlookup(nominator_controller.clone()); + for i in 0..nominators { + let nominator_stash: T::AccountId = + account("nominator stash", n * MAX_NOMINATORS + i, SEED); + let nominator_controller: T::AccountId = + account("nominator controller", n * MAX_NOMINATORS + i, SEED); + let nominator_controller_lookup: LookupSourceOf = + T::Lookup::unlookup(nominator_controller.clone()); T::Currency::make_free_balance_be(&nominator_stash, free_amount.into()); Staking::::bond( @@ -129,56 +137,92 @@ fn create_offender(n: u32, nominators: u32) -> Result, &'s )?; let selected_validators: Vec> = vec![controller_lookup.clone()]; - Staking::::nominate(RawOrigin::Signed(nominator_controller.clone()).into(), selected_validators)?; + Staking::::nominate( + RawOrigin::Signed(nominator_controller.clone()).into(), + selected_validators, + )?; - individual_exposures.push(IndividualExposure { - who: nominator_stash.clone(), - value: amount.clone(), - }); + individual_exposures + .push(IndividualExposure { who: nominator_stash.clone(), value: amount.clone() }); nominator_stashes.push(nominator_stash.clone()); } - let exposure = Exposure { - total: amount.clone() * n.into(), - own: amount, - others: individual_exposures, - }; + let exposure = + Exposure { total: amount.clone() * n.into(), own: amount, others: individual_exposures }; let current_era = 0u32; Staking::::add_era_stakers(current_era.into(), stash.clone().into(), exposure); Ok(Offender { controller, stash, nominator_stashes }) } -fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result< - (Vec>, Vec>), - &'static str -> { +fn make_offenders( + num_offenders: u32, + num_nominators: u32, +) -> Result<(Vec>, Vec>), &'static str> { Staking::::new_session(0); let mut offenders = vec![]; - for i in 0 .. num_offenders { + for i in 0..num_offenders { let offender = create_offender::(i + 1, num_nominators)?; offenders.push(offender); } Staking::::start_session(0); - let id_tuples = offenders.iter() - .map(|offender| - ::ValidatorIdOf::convert(offender.controller.clone()) - .expect("failed to get validator id from account id")) - .map(|validator_id| - ::FullIdentificationOf::convert(validator_id.clone()) - .map(|full_id| (validator_id, full_id)) - .expect("failed to convert validator id to full identification")) + let id_tuples = offenders + .iter() + .map(|offender| { + ::ValidatorIdOf::convert(offender.controller.clone()) + .expect("failed to get validator id from account id") + }) + .map(|validator_id| { + ::FullIdentificationOf::convert(validator_id.clone()) + .map(|full_id| (validator_id, full_id)) + .expect("failed to convert validator id to full identification") + }) .collect::>>(); Ok((id_tuples, offenders)) } +fn make_offenders_im_online( + num_offenders: u32, + num_nominators: u32, +) -> Result<(Vec>, Vec>), &'static str> { + Staking::::new_session(0); + + let mut offenders = vec![]; + for i in 0..num_offenders { + let offender = create_offender::(i + 1, num_nominators)?; + offenders.push(offender); + } + + Staking::::start_session(0); + + let id_tuples = offenders + .iter() + .map(|offender| { + < + ::ValidatorSet as ValidatorSet + >::ValidatorIdOf::convert(offender.controller.clone()) + .expect("failed to get validator id from account id") + }) + .map(|validator_id| { + < + ::ValidatorSet as ValidatorSetWithIdentification + >::IdentificationOf::convert(validator_id.clone()) + .map(|full_id| (validator_id, full_id)) + .expect("failed to convert validator id to full identification") + }) + .collect::>>(); + Ok((id_tuples, offenders)) +} + #[cfg(test)] -fn check_events::Event>>(expected: I) { - let events = System::::events() .into_iter() - .map(|frame_system::EventRecord { event, .. }| event).collect::>(); +fn check_events::Event>>(expected: I) { + let events = System::::events() + .into_iter() + .map(|frame_system::EventRecord { event, .. }| event) + .collect::>(); let expected = expected.collect::>(); let lengths = (events.len(), expected.len()); let length_mismatch = if lengths.0 != lengths.1 { @@ -191,25 +235,25 @@ fn check_events::Event>>(expecte pretty("--Got:", &events); pretty("--Expected:", &expected); format!("Mismatching length. Got: {}, expected: {}", lengths.0, lengths.1) - } else { Default::default() }; + } else { + Default::default() + }; for (idx, (a, b)) in events.into_iter().zip(expected).enumerate() { assert_eq!(a, b, "Mismatch at: {}. {}", idx, length_mismatch); } if !length_mismatch.is_empty() { - panic!(length_mismatch); + panic!("{}", length_mismatch); } } benchmarks! { - _ { } - report_offence_im_online { let r in 1 .. MAX_REPORTERS; // we skip 1 offender, because in such case there is no slashing let o in 2 .. MAX_OFFENDERS; - let n in 0 .. MAX_NOMINATORS.min(MAX_NOMINATIONS as u32); + let n in 0 .. MAX_NOMINATORS.min(::MAX_NOMINATIONS); // Make r reporters let mut reporters = vec![]; @@ -221,7 +265,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (offenders, raw_offenders) = make_offenders::(o, n)?; + let (offenders, raw_offenders) = make_offenders_im_online::(o, n)?; let keys = ImOnline::::keys(); let validator_set_count = keys.len() as u32; @@ -235,34 +279,40 @@ benchmarks! { }; assert_eq!(System::::event_count(), 0); }: { - let _ = ::ReportUnresponsiveness::report_offence( + let _ = ::ReportUnresponsiveness::report_offence( reporters.clone(), offence ); } verify { - // make sure the report was not deferred - assert!(Offences::::deferred_offences().is_empty()); - let slash_amount = slash_fraction * bond_amount::().unique_saturated_into() as u32; + let bond_amount: u32 = UniqueSaturatedInto::::unique_saturated_into(bond_amount::()); + let slash_amount = slash_fraction * bond_amount; let reward_amount = slash_amount * (1 + n) / 2; + let slash = |id| core::iter::once( + ::Event::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) + ); + let chill = |id| core::iter::once( + ::Event::from(StakingEvent::::Chilled(id)) + ); let mut slash_events = raw_offenders.into_iter() .flat_map(|offender| { - core::iter::once(offender.stash).chain(offender.nominator_stashes.into_iter()) + let nom_slashes = offender.nominator_stashes.into_iter().flat_map(|nom| slash(nom)); + chill(offender.stash.clone()) + .chain(slash(offender.stash)) + .chain(nom_slashes) }) - .map(|stash| ::Event::from( - StakingEvent::::Slash(stash, BalanceOf::::from(slash_amount)) - )) .collect::>(); let reward_events = reporters.into_iter() .flat_map(|reporter| vec![ frame_system::Event::::NewAccount(reporter.clone()).into(), - ::Event::from( + ::Event::from( pallet_balances::Event::::Endowed(reporter, (reward_amount / r).into()) ).into() ]); - // rewards are applied after first offender and it's nominators - let slash_rest = slash_events.split_off(1 + n as usize); + // Rewards are applied after first offender and it's nominators. + // We split after: offender slash + offender chill + nominator slashes. + let slash_rest = slash_events.split_off(2 + n as usize); // make sure that all slashes have been applied #[cfg(test)] @@ -271,18 +321,17 @@ benchmarks! { .chain(slash_events.into_iter().map(Into::into)) .chain(reward_events) .chain(slash_rest.into_iter().map(Into::into)) - .chain(std::iter::once(::Event::from( + .chain(std::iter::once(::Event::from( pallet_offences::Event::Offence( UnresponsivenessOffence::::ID, 0_u32.to_le_bytes().to_vec(), - true ) ).into())) ); } report_offence_grandpa { - let n in 0 .. MAX_NOMINATORS.min(MAX_NOMINATIONS as u32); + let n in 0 .. MAX_NOMINATORS.min(::MAX_NOMINATIONS); // for grandpa equivocation reports the number of reporters // and offenders is always 1 @@ -305,20 +354,19 @@ benchmarks! { let _ = Offences::::report_offence(reporters, offence); } verify { - // make sure the report was not deferred - assert!(Offences::::deferred_offences().is_empty()); // make sure that all slashes have been applied assert_eq!( System::::event_count(), 0 + 1 // offence + 2 // reporter (reward + endowment) + 1 // offenders slashed + + 1 // offenders chilled + n // nominators slashed ); } report_offence_babe { - let n in 0 .. MAX_NOMINATORS.min(MAX_NOMINATIONS as u32); + let n in 0 .. MAX_NOMINATORS.min(::MAX_NOMINATIONS); // for babe equivocation reports the number of reporters // and offenders is always 1 @@ -331,7 +379,7 @@ benchmarks! { let keys = ImOnline::::keys(); let offence = BabeEquivocationOffence { - slot: 0, + slot: 0u64.into(), session_index: 0, validator_set_count: keys.len() as u32, offender: T::convert(offenders.pop().unwrap()), @@ -341,70 +389,16 @@ benchmarks! { let _ = Offences::::report_offence(reporters, offence); } verify { - // make sure the report was not deferred - assert!(Offences::::deferred_offences().is_empty()); // make sure that all slashes have been applied assert_eq!( System::::event_count(), 0 + 1 // offence + 2 // reporter (reward + endowment) + 1 // offenders slashed + + 1 // offenders chilled + n // nominators slashed ); } - - on_initialize { - let d in 1 .. MAX_DEFERRED_OFFENCES; - let o = 10; - let n = 100; - - Staking::::put_election_status(ElectionStatus::Closed); - - let mut deferred_offences = vec![]; - let offenders = make_offenders::(o, n)?.0; - let offence_details = offenders.into_iter() - .map(|offender| OffenceDetails { - offender: T::convert(offender), - reporters: vec![], - }) - .collect::>(); - - for i in 0 .. d { - let fractions = offence_details.iter() - .map(|_| Perbill::from_percent(100 * (i + 1) / MAX_DEFERRED_OFFENCES)) - .collect::>(); - deferred_offences.push((offence_details.clone(), fractions.clone(), 0u32)); - } - - Offences::::set_deferred_offences(deferred_offences); - assert!(!Offences::::deferred_offences().is_empty()); - }: { - Offences::::on_initialize(0.into()); - } - verify { - // make sure that all deferred offences were reported with Ok status. - assert!(Offences::::deferred_offences().is_empty()); - assert_eq!( - System::::event_count(), d * (0 - + o // offenders slashed - + o * n // nominators slashed - )); - } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_report_offence_im_online::()); - assert_ok!(test_benchmark_report_offence_grandpa::()); - assert_ok!(test_benchmark_report_offence_babe::()); - assert_ok!(test_benchmark_on_initialize::()); - }); - } -} +impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 527e0ede81ab9..c4fd88def0e33 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,28 +20,30 @@ #![cfg(test)] use super::*; -use frame_support::{ - parameter_types, - weights::{Weight, constants::WEIGHT_PER_SECOND}, -}; +use frame_election_provider_support::onchain; +use frame_support::{parameter_types, weights::constants::WEIGHT_PER_SECOND}; use frame_system as system; +use pallet_session::historical as pallet_session_historical; use sp_runtime::{ - traits::{IdentityLookup, Block as BlockT}, testing::{Header, UintAuthorityId}, + traits::IdentityLookup, }; - type AccountId = u64; type AccountIndex = u32; type BlockNumber = u64; type Balance = u64; parameter_types! { - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -53,25 +55,22 @@ impl frame_system::Trait for Test { type Header = sp_runtime::testing::Header; type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnKilledAccount = (Balances,); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); + type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = Balance; type Event = Event; type DustRemoval = (); @@ -83,13 +82,13 @@ impl pallet_balances::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -110,7 +109,8 @@ impl pallet_session::SessionHandler for TestSessionHandler { _: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)], - ) {} + ) { + } fn on_disabled(_: usize) {} } @@ -120,7 +120,7 @@ parameter_types! { pub const Offset: u64 = 0; } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions; @@ -132,6 +132,7 @@ impl pallet_session::Trait for Test { type DisabledValidatorsThreshold = (); type WeightInfo = (); } + pallet_staking_reward_curve::build! { const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( min_inflation: 0_025_000, @@ -149,9 +150,15 @@ parameter_types! { pub type Extrinsic = sp_runtime::testing::TestXt; -impl pallet_staking::Trait for Test { +impl onchain::Config for Test { + type Accuracy = Perbill; + type DataProvider = Staking; +} + +impl pallet_staking::Config for Test { + const MAX_NOMINATIONS: u32 = 16; type Currency = Balances; - type UnixTime = pallet_timestamp::Module; + type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); type Event = Event; @@ -162,44 +169,39 @@ impl pallet_staking::Trait for Test { type SlashCancelOrigin = frame_system::EnsureRoot; type BondingDuration = (); type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type ElectionLookahead = (); - type Call = Call; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = (); - type MaxIterations = (); - type MinSolutionScoreBump = (); - type OffchainSolutionWeightLimit = (); + type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } -impl pallet_im_online::Trait for Test { +impl pallet_im_online::Config for Test { type AuthorityId = UintAuthorityId; type Event = Event; - type SessionDuration = Period; + type ValidatorSet = Historical; + type NextSessionRotation = pallet_session::PeriodicSessions; type ReportUnresponsiveness = Offences; type UnsignedPriority = (); type WeightInfo = (); } -parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); -} - -impl pallet_offences::Trait for Test { +impl pallet_offences::Config for Test { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; - type WeightSoftLimit = OffencesWeightSoftLimit; } -impl frame_system::offchain::SendTransactionTypes for Test where Call: From { +impl frame_system::offchain::SendTransactionTypes for Test +where + Call: From, +{ type Extrinsic = Extrinsic; type OverarchingCall = Call; } -impl crate::Trait for Test {} +impl crate::Config for Test {} pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; @@ -210,12 +212,13 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, - Balances: pallet_balances::{Module, Call, Storage, Config, Event}, - Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, - Session: pallet_session::{Module, Call, Storage, Event, Config}, - ImOnline: pallet_im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, - Offences: pallet_offences::{Module, Call, Storage, Event}, + System: system::{Pallet, Call, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config}, + Offences: pallet_offences::{Pallet, Storage, Event}, + Historical: pallet_session_historical::{Pallet}, } ); diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index bec1981301219..3392cd6e4a884 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,151 +15,134 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Offences Module +//! # Offences Pallet //! //! Tracks reported offences // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +mod migration; mod mock; mod tests; -use sp_std::vec::Vec; -use frame_support::{ - decl_module, decl_event, decl_storage, Parameter, debug, - traits::Get, - weights::Weight, -}; -use sp_runtime::{traits::{Hash, Zero}, Perbill}; +use codec::{Decode, Encode}; +use frame_support::weights::Weight; +use sp_runtime::{traits::Hash, Perbill}; use sp_staking::{ + offence::{Kind, Offence, OffenceDetails, OffenceError, OnOffenceHandler, ReportOffence}, SessionIndex, - offence::{Offence, ReportOffence, Kind, OnOffenceHandler, OffenceDetails, OffenceError}, }; -use codec::{Encode, Decode}; +use sp_std::prelude::*; + +pub use pallet::*; /// A binary blob which represents a SCALE codec-encoded `O::TimeSlot`. type OpaqueTimeSlot = Vec; /// A type alias for a report identifier. -type ReportIdOf = ::Hash; - -/// Type of data stored as a deferred offence -pub type DeferredOffenceOf = ( - Vec::AccountId, ::IdentificationTuple>>, - Vec, - SessionIndex, -); +type ReportIdOf = ::Hash; pub trait WeightInfo { - fn report_offence_im_online(r: u32, o: u32, n: u32, ) -> Weight; - fn report_offence_grandpa(r: u32, n: u32, ) -> Weight; - fn report_offence_babe(r: u32, n: u32, ) -> Weight; - fn on_initialize(d: u32, ) -> Weight; + fn report_offence_im_online(r: u32, o: u32, n: u32) -> Weight; + fn report_offence_grandpa(r: u32, n: u32) -> Weight; + fn report_offence_babe(r: u32, n: u32) -> Weight; + fn on_initialize(d: u32) -> Weight; } impl WeightInfo for () { - fn report_offence_im_online(_r: u32, _o: u32, _n: u32, ) -> Weight { 1_000_000_000 } - fn report_offence_grandpa(_r: u32, _n: u32, ) -> Weight { 1_000_000_000 } - fn report_offence_babe(_r: u32, _n: u32, ) -> Weight { 1_000_000_000 } - fn on_initialize(_d: u32, ) -> Weight { 1_000_000_000 } -} - -/// Offences trait -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From + Into<::Event>; - /// Full identification of the validator. - type IdentificationTuple: Parameter + Ord; - /// A handler called for every offence report. - type OnOffenceHandler: OnOffenceHandler; - /// The a soft limit on maximum weight that may be consumed while dispatching deferred offences in - /// `on_initialize`. - /// Note it's going to be exceeded before we stop adding to it, so it has to be set conservatively. - type WeightSoftLimit: Get; + fn report_offence_im_online(_r: u32, _o: u32, _n: u32) -> Weight { + 1_000_000_000 + } + fn report_offence_grandpa(_r: u32, _n: u32) -> Weight { + 1_000_000_000 + } + fn report_offence_babe(_r: u32, _n: u32) -> Weight { + 1_000_000_000 + } + fn on_initialize(_d: u32) -> Weight { + 1_000_000_000 + } } -decl_storage! { - trait Store for Module as Offences { - /// The primary structure that holds all offence records keyed by report identifiers. - Reports get(fn reports): - map hasher(twox_64_concat) ReportIdOf - => Option>; - - /// Deferred reports that have been rejected by the offence handler and need to be submitted - /// at a later time. - DeferredOffences get(fn deferred_offences): Vec>; - - /// A vector of reports of the same kind that happened at the same time slot. - ConcurrentReportsIndex: - double_map hasher(twox_64_concat) Kind, hasher(twox_64_concat) OpaqueTimeSlot - => Vec>; - - /// Enumerates all reports of a kind along with the time they happened. - /// - /// All reports are sorted by the time of offence. - /// - /// Note that the actual type of this mapping is `Vec`, this is because values of - /// different types are not supported at the moment so we are doing the manual serialization. - ReportsByKindIndex: map hasher(twox_64_concat) Kind => Vec; // (O::TimeSlot, ReportIdOf) +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// The pallet's config trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From + IsType<::Event>; + /// Full identification of the validator. + type IdentificationTuple: Parameter + Ord; + /// A handler called for every offence report. + type OnOffenceHandler: OnOffenceHandler; } -} -decl_event!( + /// The primary structure that holds all offence records keyed by report identifiers. + #[pallet::storage] + #[pallet::getter(fn reports)] + pub type Reports = StorageMap< + _, + Twox64Concat, + ReportIdOf, + OffenceDetails, + >; + + /// A vector of reports of the same kind that happened at the same time slot. + #[pallet::storage] + pub type ConcurrentReportsIndex = StorageDoubleMap< + _, + Twox64Concat, + Kind, + Twox64Concat, + OpaqueTimeSlot, + Vec>, + ValueQuery, + >; + + /// Enumerates all reports of a kind along with the time they happened. + /// + /// All reports are sorted by the time of offence. + /// + /// Note that the actual type of this mapping is `Vec`, this is because values of + /// different types are not supported at the moment so we are doing the manual serialization. + #[pallet::storage] + pub type ReportsByKindIndex = StorageMap< + _, + Twox64Concat, + Kind, + Vec, // (O::TimeSlot, ReportIdOf) + ValueQuery, + >; + + /// Events type. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// There is an offence reported of the given `kind` happened at the `session_index` and - /// (kind-specific) time slot. This event is not deposited for duplicate slashes. last - /// element indicates of the offence was applied (true) or queued (false) - /// \[kind, timeslot, applied\]. - Offence(Kind, OpaqueTimeSlot, bool), + /// (kind-specific) time slot. This event is not deposited for duplicate slashes. + /// \[kind, timeslot\]. + Offence(Kind, OpaqueTimeSlot), } -); -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - fn on_initialize(now: T::BlockNumber) -> Weight { - // only decode storage if we can actually submit anything again. - if !T::OnOffenceHandler::can_report() { - return 0; - } - - let limit = T::WeightSoftLimit::get(); - let mut consumed = Weight::zero(); - - >::mutate(|deferred| { - deferred.retain(|(offences, perbill, session)| { - if consumed >= limit { - true - } else { - // keep those that fail to be reported again. An error log is emitted here; this - // should not happen if staking's `can_report` is implemented properly. - match T::OnOffenceHandler::on_offence(&offences, &perbill, *session) { - Ok(weight) => { - consumed += weight; - false - }, - Err(_) => { - debug::native::error!( - target: "pallet-offences", - "re-submitting a deferred slash returned Err at {}. This should not happen with pallet-staking", - now, - ); - true - }, - } - } - }) - }); - - consumed + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> Weight { + migration::remove_deferred_storage::() } } } -impl> - ReportOffence for Module +impl> + ReportOffence for Pallet where T::IdentificationTuple: Clone, { @@ -168,34 +151,31 @@ where let time_slot = offence.time_slot(); let validator_set_count = offence.validator_set_count(); - // Go through all offenders in the offence report and find all offenders that was spotted + // Go through all offenders in the offence report and find all offenders that were spotted // in unique reports. - let TriageOutcome { concurrent_offenders } = match Self::triage_offence_report::( - reporters, - &time_slot, - offenders, - ) { - Some(triage) => triage, - // The report contained only duplicates, so there is no need to slash again. - None => return Err(OffenceError::DuplicateReport), - }; + let TriageOutcome { concurrent_offenders } = + match Self::triage_offence_report::(reporters, &time_slot, offenders) { + Some(triage) => triage, + // The report contained only duplicates, so there is no need to slash again. + None => return Err(OffenceError::DuplicateReport), + }; let offenders_count = concurrent_offenders.len() as u32; // The amount new offenders are slashed let new_fraction = O::slash_fraction(offenders_count, validator_set_count); - let slash_perbill: Vec<_> = (0..concurrent_offenders.len()) - .map(|_| new_fraction.clone()).collect(); + let slash_perbill: Vec<_> = + (0..concurrent_offenders.len()).map(|_| new_fraction.clone()).collect(); - let applied = Self::report_or_store_offence( + T::OnOffenceHandler::on_offence( &concurrent_offenders, &slash_perbill, offence.session_index(), ); // Deposit the event. - Self::deposit_event(Event::Offence(O::ID, time_slot.encode(), applied)); + Self::deposit_event(Event::Offence(O::ID, time_slot.encode())); Ok(()) } @@ -210,29 +190,7 @@ where } } -impl Module { - /// Tries (without checking) to report an offence. Stores them in [`DeferredOffences`] in case - /// it fails. Returns false in case it has to store the offence. - fn report_or_store_offence( - concurrent_offenders: &[OffenceDetails], - slash_perbill: &[Perbill], - session_index: SessionIndex, - ) -> bool { - match T::OnOffenceHandler::on_offence( - &concurrent_offenders, - &slash_perbill, - session_index, - ) { - Ok(_) => true, - Err(_) => { - >::mutate(|d| - d.push((concurrent_offenders.to_vec(), slash_perbill.to_vec(), session_index)) - ); - false - } - } - } - +impl Pallet { /// Compute the ID for the given report properties. /// /// The report id depends on the offence kind, time slot and the id of offender. @@ -260,10 +218,7 @@ impl Module { any_new = true; >::insert( &report_id, - OffenceDetails { - offender, - reporters: reporters.clone(), - }, + OffenceDetails { offender, reporters: reporters.clone() }, ); storage.insert(time_slot, report_id); @@ -272,28 +227,22 @@ impl Module { if any_new { // Load report details for the all reports happened at the same time. - let concurrent_offenders = storage.concurrent_reports + let concurrent_offenders = storage + .concurrent_reports .iter() .filter_map(|report_id| >::get(report_id)) .collect::>(); storage.save(); - Some(TriageOutcome { - concurrent_offenders, - }) + Some(TriageOutcome { concurrent_offenders }) } else { None } } - - #[cfg(feature = "runtime-benchmarks")] - pub fn set_deferred_offences(offences: Vec>) { - >::put(offences); - } } -struct TriageOutcome { +struct TriageOutcome { /// Other reports for the same report kinds. concurrent_offenders: Vec>, } @@ -304,44 +253,33 @@ struct TriageOutcome { /// This struct is responsible for aggregating storage writes and the underlying storage should not /// accessed directly meanwhile. #[must_use = "The changes are not saved without called `save`"] -struct ReportIndexStorage> { +struct ReportIndexStorage> { opaque_time_slot: OpaqueTimeSlot, concurrent_reports: Vec>, same_kind_reports: Vec<(O::TimeSlot, ReportIdOf)>, } -impl> ReportIndexStorage { +impl> ReportIndexStorage { /// Preload indexes from the storage for the specific `time_slot` and the kind of the offence. fn load(time_slot: &O::TimeSlot) -> Self { let opaque_time_slot = time_slot.encode(); - let same_kind_reports = ::get(&O::ID); + let same_kind_reports = ReportsByKindIndex::::get(&O::ID); let same_kind_reports = Vec::<(O::TimeSlot, ReportIdOf)>::decode(&mut &same_kind_reports[..]) .unwrap_or_default(); let concurrent_reports = >::get(&O::ID, &opaque_time_slot); - Self { - opaque_time_slot, - concurrent_reports, - same_kind_reports, - } + Self { opaque_time_slot, concurrent_reports, same_kind_reports } } /// Insert a new report to the index. fn insert(&mut self, time_slot: &O::TimeSlot, report_id: ReportIdOf) { // Insert the report id into the list while maintaining the ordering by the time // slot. - let pos = match self - .same_kind_reports - .binary_search_by_key(&time_slot, |&(ref when, _)| when) - { - Ok(pos) => pos, - Err(pos) => pos, - }; - self.same_kind_reports - .insert(pos, (time_slot.clone(), report_id)); + let pos = self.same_kind_reports.partition_point(|&(ref when, _)| when <= time_slot); + self.same_kind_reports.insert(pos, (time_slot.clone(), report_id)); // Update the list of concurrent reports. self.concurrent_reports.push(report_id); @@ -349,7 +287,7 @@ impl> ReportIndexStorage { /// Dump the indexes to the storage. fn save(self) { - ::insert(&O::ID, self.same_kind_reports.encode()); + ReportsByKindIndex::::insert(&O::ID, self.same_kind_reports.encode()); >::insert( &O::ID, &self.opaque_time_slot, diff --git a/frame/offences/src/migration.rs b/frame/offences/src/migration.rs new file mode 100644 index 0000000000000..b6e32cbe69e26 --- /dev/null +++ b/frame/offences/src/migration.rs @@ -0,0 +1,96 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Config, OffenceDetails, Perbill, SessionIndex}; +use frame_support::{ + generate_storage_alias, pallet_prelude::ValueQuery, traits::Get, weights::Weight, +}; +use sp_staking::offence::OnOffenceHandler; +use sp_std::vec::Vec; + +/// Type of data stored as a deferred offence +type DeferredOffenceOf = ( + Vec::AccountId, ::IdentificationTuple>>, + Vec, + SessionIndex, +); + +// Deferred reports that have been rejected by the offence handler and need to be submitted +// at a later time. +generate_storage_alias!( + Offences, + DeferredOffences => Value>, ValueQuery> +); + +pub fn remove_deferred_storage() -> Weight { + let mut weight = T::DbWeight::get().reads_writes(1, 1); + let deferred = >::take(); + log::info!(target: "runtime::offences", "have {} deferred offences, applying.", deferred.len()); + for (offences, perbill, session) in deferred.iter() { + let consumed = T::OnOffenceHandler::on_offence(&offences, &perbill, *session); + weight = weight.saturating_add(consumed); + } + + weight +} + +#[cfg(test)] +mod test { + use super::*; + use crate::mock::{new_test_ext, with_on_offence_fractions, Offences, Runtime as T}; + use frame_support::traits::OnRuntimeUpgrade; + use sp_runtime::Perbill; + use sp_staking::offence::OffenceDetails; + + #[test] + fn should_resubmit_deferred_offences() { + new_test_ext().execute_with(|| { + // given + assert_eq!(>::get().len(), 0); + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![]); + }); + + let offence_details = OffenceDetails::< + ::AccountId, + ::IdentificationTuple, + > { + offender: 5, + reporters: vec![], + }; + + // push deferred offence + >::append(( + vec![offence_details], + vec![Perbill::from_percent(5 + 1 * 100 / 5)], + 1, + )); + + // when + assert_eq!( + Offences::on_runtime_upgrade(), + ::DbWeight::get().reads_writes(1, 1), + ); + + // then + assert!(!>::exists()); + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![Perbill::from_percent(5 + 1 * 100 / 5)]); + }); + }) + } +} diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 58ee97a9bcbb5..5e4c94944b6fd 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,130 +19,104 @@ #![cfg(test)] -use std::cell::RefCell; -use crate::{Module, Trait}; +use crate as offences; +use crate::Config; use codec::Encode; -use sp_runtime::Perbill; -use sp_staking::{ - SessionIndex, - offence::{self, Kind, OffenceDetails}, +use frame_support::{ + parameter_types, + weights::{ + constants::{RocksDbWeight, WEIGHT_PER_SECOND}, + Weight, + }, }; -use sp_runtime::testing::Header; -use sp_runtime::traits::{IdentityLookup, BlakeTwo256}; use sp_core::H256; -use frame_support::{ - impl_outer_origin, impl_outer_event, parameter_types, StorageMap, StorageDoubleMap, - weights::{Weight, constants::{WEIGHT_PER_SECOND, RocksDbWeight}}, +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; -use frame_system as system; - -impl_outer_origin!{ - pub enum Origin for Runtime {} -} +use sp_staking::{ + offence::{self, Kind, OffenceDetails}, + SessionIndex, +}; +use std::cell::RefCell; pub struct OnOffenceHandler; thread_local! { pub static ON_OFFENCE_PERBILL: RefCell> = RefCell::new(Default::default()); - pub static CAN_REPORT: RefCell = RefCell::new(true); pub static OFFENCE_WEIGHT: RefCell = RefCell::new(Default::default()); } -impl - offence::OnOffenceHandler for OnOffenceHandler +impl offence::OnOffenceHandler + for OnOffenceHandler { fn on_offence( _offenders: &[OffenceDetails], slash_fraction: &[Perbill], _offence_session: SessionIndex, - ) -> Result { - if >::can_report() { - ON_OFFENCE_PERBILL.with(|f| { - *f.borrow_mut() = slash_fraction.to_vec(); - }); - - Ok(OFFENCE_WEIGHT.with(|w| *w.borrow())) - } else { - Err(()) - } - } + ) -> Weight { + ON_OFFENCE_PERBILL.with(|f| { + *f.borrow_mut() = slash_fraction.to_vec(); + }); - fn can_report() -> bool { - CAN_REPORT.with(|c| *c.borrow()) + OFFENCE_WEIGHT.with(|w| *w.borrow()) } } -pub fn set_can_report(can_report: bool) { - CAN_REPORT.with(|c| *c.borrow_mut() = can_report); -} - pub fn with_on_offence_fractions) -> R>(f: F) -> R { - ON_OFFENCE_PERBILL.with(|fractions| { - f(&mut *fractions.borrow_mut()) - }) + ON_OFFENCE_PERBILL.with(|fractions| f(&mut *fractions.borrow_mut())) } -pub fn set_offence_weight(new: Weight) { - OFFENCE_WEIGHT.with(|w| *w.borrow_mut() = new); -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Offences: offences::{Pallet, Storage, Event}, + } +); -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Runtime; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); } -impl frame_system::Trait for Runtime { - type BaseCallFilter = (); +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); -} - -impl Trait for Runtime { - type Event = TestEvent; +impl Config for Runtime { + type Event = Event; type IdentificationTuple = u64; type OnOffenceHandler = OnOffenceHandler; - type WeightSoftLimit = OffencesWeightSoftLimit; -} - -mod offences { - pub use crate::Event; -} - -impl_outer_event! { - pub enum TestEvent for Runtime { - system, - offences, - } } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -152,10 +126,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -/// Offences module. -pub type Offences = Module; -pub type System = frame_system::Module; - pub const KIND: [u8; 16] = *b"test_report_1234"; /// Returns all offence details for the specific `kind` happened at the specific time slot. @@ -196,10 +166,12 @@ impl offence::Offence for Offence { 1 } - fn slash_fraction( - offenders_count: u32, - validator_set_count: u32, - ) -> Perbill { + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { Perbill::from_percent(5 + offenders_count * 100 / validator_set_count) } } + +/// Create the report id for the given `offender` and `time_slot` combination. +pub fn report_id(time_slot: u128, offender: u64) -> H256 { + Offences::report_id::>(&time_slot, &offender) +} diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index ca9f46a198820..18cfa9410a6c6 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,12 +21,11 @@ use super::*; use crate::mock::{ - Offences, System, Offence, TestEvent, KIND, new_test_ext, with_on_offence_fractions, - offence_reports, set_can_report, set_offence_weight, + new_test_ext, offence_reports, report_id, with_on_offence_fractions, Event, Offence, Offences, + System, KIND, }; -use sp_runtime::Perbill; -use frame_support::traits::OnInitialize; use frame_system::{EventRecord, Phase}; +use sp_runtime::Perbill; #[test] fn should_report_an_authority_and_trigger_on_offence() { @@ -35,11 +34,7 @@ fn should_report_an_authority_and_trigger_on_offence() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; // when Offences::report_offence(vec![], offence).unwrap(); @@ -58,11 +53,7 @@ fn should_not_report_the_same_authority_twice_in_the_same_slot() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; Offences::report_offence(vec![], offence.clone()).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); @@ -80,7 +71,6 @@ fn should_not_report_the_same_authority_twice_in_the_same_slot() { }); } - #[test] fn should_report_in_different_time_slot() { new_test_ext().execute_with(|| { @@ -88,11 +78,7 @@ fn should_report_in_different_time_slot() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let mut offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let mut offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; Offences::report_offence(vec![], offence.clone()).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); @@ -118,11 +104,7 @@ fn should_deposit_event() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; // when Offences::report_offence(vec![], offence).unwrap(); @@ -132,7 +114,7 @@ fn should_deposit_event() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), + event: Event::Offences(crate::Event::Offence(KIND, time_slot.encode())), topics: vec![], }] ); @@ -146,11 +128,7 @@ fn doesnt_deposit_event_for_dups() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; Offences::report_offence(vec![], offence.clone()).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); @@ -167,7 +145,7 @@ fn doesnt_deposit_event_for_dups() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), + event: Event::Offences(crate::Event::Offence(KIND, time_slot.encode())), topics: vec![], }] ); @@ -182,33 +160,26 @@ fn reports_if_an_offence_is_dup() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = |time_slot, offenders| TestOffence { - validator_set_count: 5, - time_slot, - offenders, - }; + let offence = + |time_slot, offenders| TestOffence { validator_set_count: 5, time_slot, offenders }; let mut test_offence = offence(time_slot, vec![0]); // the report for authority 0 at time slot 42 should not be a known // offence - assert!( - !>::is_known_offence( - &test_offence.offenders, - &test_offence.time_slot - ) - ); + assert!(!>::is_known_offence( + &test_offence.offenders, + &test_offence.time_slot + )); // we report an offence for authority 0 at time slot 42 Offences::report_offence(vec![], test_offence.clone()).unwrap(); // the same report should be a known offence now - assert!( - >::is_known_offence( - &test_offence.offenders, - &test_offence.time_slot - ) - ); + assert!(>::is_known_offence( + &test_offence.offenders, + &test_offence.time_slot + )); // and reporting it again should yield a duplicate report error assert_eq!( @@ -220,28 +191,21 @@ fn reports_if_an_offence_is_dup() { test_offence.offenders.push(1); // it should not be a known offence anymore - assert!( - !>::is_known_offence( - &test_offence.offenders, - &test_offence.time_slot - ) - ); + assert!(!>::is_known_offence( + &test_offence.offenders, + &test_offence.time_slot + )); // and reporting it again should work without any error - assert_eq!( - Offences::report_offence(vec![], test_offence.clone()), - Ok(()) - ); + assert_eq!(Offences::report_offence(vec![], test_offence.clone()), Ok(())); // creating a new offence for the same authorities on the next slot // should be considered a new offence and thefore not known let test_offence_next_slot = offence(time_slot + 1, vec![0, 1]); - assert!( - !>::is_known_offence( - &test_offence_next_slot.offenders, - &test_offence_next_slot.time_slot - ) - ); + assert!(!>::is_known_offence( + &test_offence_next_slot.offenders, + &test_offence_next_slot.time_slot + )); }); } @@ -254,16 +218,8 @@ fn should_properly_count_offences() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence1 = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; - let offence2 = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![4], - }; + let offence1 = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; + let offence2 = Offence { validator_set_count: 5, time_slot, offenders: vec![4] }; Offences::report_offence(vec![], offence1).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); @@ -286,98 +242,47 @@ fn should_properly_count_offences() { }); } +/// We insert offences in sorted order using the time slot in the `same_kind_reports`. +/// This test ensures that it works as expected. #[test] -fn should_queue_and_resubmit_rejected_offence() { +fn should_properly_sort_offences() { new_test_ext().execute_with(|| { - set_can_report(false); - - // will get deferred - let offence = Offence { - validator_set_count: 5, - time_slot: 42, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence).unwrap(); - assert_eq!(Offences::deferred_offences().len(), 1); - // event also indicates unapplied. - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::offences(crate::Event::Offence(KIND, 42u128.encode(), false)), - topics: vec![], - }] - ); - - // will not dequeue - Offences::on_initialize(2); - - // again - let offence = Offence { - validator_set_count: 5, - time_slot: 62, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence).unwrap(); - assert_eq!(Offences::deferred_offences().len(), 2); - - set_can_report(true); - - // can be submitted - let offence = Offence { - validator_set_count: 5, - time_slot: 72, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence).unwrap(); - assert_eq!(Offences::deferred_offences().len(), 2); - - Offences::on_initialize(3); - assert_eq!(Offences::deferred_offences().len(), 0); - }) -} - -#[test] -fn weight_soft_limit_is_used() { - new_test_ext().execute_with(|| { - set_can_report(false); - // Only 2 can fit in one block - set_offence_weight(::WeightSoftLimit::get() / 2); - - // Queue 3 offences - // #1 - let offence = Offence { - validator_set_count: 5, - time_slot: 42, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence).unwrap(); - // #2 - let offence = Offence { - validator_set_count: 5, - time_slot: 62, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence).unwrap(); - // #3 - let offence = Offence { - validator_set_count: 5, - time_slot: 72, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence).unwrap(); - // 3 are queued - assert_eq!(Offences::deferred_offences().len(), 3); + // given + let time_slot = 42; + assert_eq!(offence_reports(KIND, time_slot), vec![]); - // Allow reporting - set_can_report(true); + let offence1 = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; + let offence2 = Offence { validator_set_count: 5, time_slot, offenders: vec![4] }; + let offence3 = + Offence { validator_set_count: 5, time_slot: time_slot + 1, offenders: vec![6, 7] }; + let offence4 = + Offence { validator_set_count: 5, time_slot: time_slot - 1, offenders: vec![3] }; + Offences::report_offence(vec![], offence1).unwrap(); + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); + f.clear(); + }); - Offences::on_initialize(3); - // Two are completed, one is left in the queue - assert_eq!(Offences::deferred_offences().len(), 1); + // when + // report for the second time + Offences::report_offence(vec![], offence2).unwrap(); + Offences::report_offence(vec![], offence3).unwrap(); + Offences::report_offence(vec![], offence4).unwrap(); - Offences::on_initialize(4); - // All are done now - assert_eq!(Offences::deferred_offences().len(), 0); - }) + // then + let same_kind_reports = Vec::<(u128, sp_core::H256)>::decode( + &mut &crate::ReportsByKindIndex::::get(KIND)[..], + ) + .unwrap(); + assert_eq!( + same_kind_reports, + vec![ + (time_slot - 1, report_id(time_slot - 1, 3)), + (time_slot, report_id(time_slot, 5)), + (time_slot, report_id(time_slot, 4)), + (time_slot + 1, report_id(time_slot + 1, 6)), + (time_slot + 1, report_id(time_slot + 1, 7)), + ] + ); + }); } diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 219e72502e0e4..83db82990d105 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-proxy" -version = "2.0.1" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,34 +13,34 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["max-encoded-len"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-utility = { version = "2.0.0", path = "../utility" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-utility = { version = "4.0.0-dev", path = "../utility" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-runtime/std", "frame-support/std", "frame-system/std", "sp-std/std", - "sp-io/std" + "sp-io/std", ] runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/proxy/README.md b/frame/proxy/README.md index 26969db638289..2eb83fab6d727 100644 --- a/frame/proxy/README.md +++ b/frame/proxy/README.md @@ -6,7 +6,7 @@ The accounts to which permission is delegated may be requied to announce the act wish to execute some duration prior to execution happens. In this case, the target account may reject the announcement and in doing so, veto the execution. -- [`proxy::Trait`](https://docs.rs/pallet-proxy/latest/pallet_proxy/trait.Trait.html) +- [`proxy::Config`](https://docs.rs/pallet-proxy/latest/pallet_proxy/trait.Config.html) - [`Call`](https://docs.rs/pallet-proxy/latest/pallet_proxy/enum.Call.html) ## Overview @@ -16,6 +16,6 @@ reject the announcement and in doing so, veto the execution. ### Dispatchable Functions [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 5f1d79741dd8e..e66f6782c19e1 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,22 +20,18 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_system::{RawOrigin, EventRecord}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use crate::Pallet as Proxy; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Module as Proxy; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { - let events = frame_system::Module::::events(); - let system_event: ::Event = generic_event.into(); - // compare to the last event record - let EventRecord { event, .. } = &events[events.len() - 1]; - assert_eq!(event, &system_event); +fn assert_last_event(generic_event: ::Event) { + frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { +fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| whitelisted_caller()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); for i in 0..n { @@ -49,10 +45,10 @@ fn add_proxies(n: u32, maybe_who: Option) -> Result<(), Ok(()) } -fn add_announcements( +fn add_announcements( n: u32, maybe_who: Option, - maybe_real: Option + maybe_real: Option, ) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -80,33 +76,29 @@ fn add_announcements( } benchmarks! { - _ { - let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; - } - proxy { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) verify { - assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) + assert_last_event::(Event::ProxyExecuted(Ok(())).into()) } proxy_announced { let a in 0 .. T::MaxPending::get() - 1; - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("anonymous", 0, SEED); let delegate: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), real.clone(), @@ -115,18 +107,18 @@ benchmarks! { add_announcements::(a, Some(delegate.clone()), None)?; }: _(RawOrigin::Signed(caller), delegate, real, Some(T::ProxyType::default()), Box::new(call)) verify { - assert_last_event::(RawEvent::ProxyExecuted(Ok(())).into()) + assert_last_event::(Event::ProxyExecuted(Ok(())).into()) } remove_announcement { let a in 0 .. T::MaxPending::get() - 1; - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real.clone(), @@ -141,13 +133,13 @@ benchmarks! { reject_announcement { let a in 0 .. T::MaxPending::get() - 1; - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real.clone(), @@ -162,22 +154,22 @@ benchmarks! { announce { let a in 0 .. T::MaxPending::get() - 1; - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); add_announcements::(a, Some(caller.clone()), None)?; - let call: ::Call = frame_system::Call::::remark(vec![]).into(); + let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); let call_hash = T::CallHasher::hash_of(&call); }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) verify { - assert_last_event::(RawEvent::Announced(real, caller, call_hash).into()); + assert_last_event::(Event::Announced(real, caller, call_hash).into()); } add_proxy { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _( RawOrigin::Signed(caller.clone()), @@ -191,7 +183,7 @@ benchmarks! { } remove_proxy { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _( RawOrigin::Signed(caller.clone()), @@ -205,7 +197,7 @@ benchmarks! { } remove_proxies { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _(RawOrigin::Signed(caller.clone())) verify { @@ -214,7 +206,7 @@ benchmarks! { } anonymous { - let p in ...; + let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _( RawOrigin::Signed(caller.clone()), @@ -223,8 +215,8 @@ benchmarks! { 0 ) verify { - let anon_account = Module::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); - assert_last_event::(RawEvent::AnonymousCreated( + let anon_account = Pallet::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); + assert_last_event::(Event::AnonymousCreated( anon_account, caller, T::ProxyType::default(), @@ -237,15 +229,15 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - Module::::anonymous( + Pallet::::anonymous( RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), T::BlockNumber::zero(), 0 )?; - let height = system::Module::::block_number(); - let ext_index = system::Module::::extrinsic_index().unwrap_or(0); - let anon = Module::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); + let height = system::Pallet::::block_number(); + let ext_index = system::Pallet::::extrinsic_index().unwrap_or(0); + let anon = Pallet::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); add_proxies::(p, Some(anon.clone()))?; ensure!(Proxies::::contains_key(&anon), "anon proxy not created"); @@ -255,25 +247,4 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_proxy::()); - assert_ok!(test_benchmark_proxy_announced::()); - assert_ok!(test_benchmark_remove_announcement::()); - assert_ok!(test_benchmark_reject_announcement::()); - assert_ok!(test_benchmark_announce::()); - assert_ok!(test_benchmark_add_proxy::()); - assert_ok!(test_benchmark_remove_proxy::()); - assert_ok!(test_benchmark_remove_proxies::()); - assert_ok!(test_benchmark_anonymous::()); - assert_ok!(test_benchmark_kill_anonymous::()); - }); - } -} +impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/proxy/src/default_weight.rs b/frame/proxy/src/default_weight.rs deleted file mode 100644 index 183c0b81c8a07..0000000000000 --- a/frame/proxy/src/default_weight.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn proxy(p: u32, ) -> Weight { - (26127000 as Weight) - .saturating_add((214000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - } - fn proxy_announced(a: u32, p: u32, ) -> Weight { - (55405000 as Weight) - .saturating_add((774000 as Weight).saturating_mul(a as Weight)) - .saturating_add((209000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn remove_announcement(a: u32, p: u32, ) -> Weight { - (35879000 as Weight) - .saturating_add((783000 as Weight).saturating_mul(a as Weight)) - .saturating_add((20000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn reject_announcement(a: u32, p: u32, ) -> Weight { - (36097000 as Weight) - .saturating_add((780000 as Weight).saturating_mul(a as Weight)) - .saturating_add((12000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn announce(a: u32, p: u32, ) -> Weight { - (53769000 as Weight) - .saturating_add((675000 as Weight).saturating_mul(a as Weight)) - .saturating_add((214000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn add_proxy(p: u32, ) -> Weight { - (36082000 as Weight) - .saturating_add((234000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn remove_proxy(p: u32, ) -> Weight { - (32885000 as Weight) - .saturating_add((267000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn remove_proxies(p: u32, ) -> Weight { - (31735000 as Weight) - .saturating_add((215000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn anonymous(p: u32, ) -> Weight { - (50907000 as Weight) - .saturating_add((61000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn kill_anonymous(p: u32, ) -> Weight { - (33926000 as Weight) - .saturating_add((208000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } -} diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 7649fe0ad440f..b73101fa73486 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,131 +15,76 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Proxy Module -//! A module allowing accounts to give permission to other accounts to dispatch types of calls from +//! # Proxy Pallet +//! A pallet allowing accounts to give permission to other accounts to dispatch types of calls from //! their signed origin. //! -//! The accounts to which permission is delegated may be requied to announce the action that they +//! The accounts to which permission is delegated may be required to announce the action that they //! wish to execute some duration prior to execution happens. In this case, the target account may //! reject the announcement and in doing so, veto the execution. //! -//! - [`proxy::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! -//! ## Overview -//! -//! ## Interface -//! -//! ### Dispatchable Functions -//! -//! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html +//! - [`Config`] +//! - [`Call`] // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_io::hashing::blake2_256; -use sp_runtime::{DispatchResult, traits::{Dispatchable, Zero, Hash, Member, Saturating}}; -use frame_support::{ - decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, RuntimeDebug, traits::{ - Get, ReservableCurrency, Currency, InstanceFilter, OriginTrait, IsType, IsSubType, - }, weights::{Weight, GetDispatchInfo}, dispatch::PostDispatchInfo, storage::IterableStorageMap, -}; -use frame_system::{self as system, ensure_signed}; -use frame_support::dispatch::DispatchError; - -mod tests; mod benchmarking; -mod default_weight; - -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - -pub trait WeightInfo { - fn proxy_announced(a: u32, p: u32, ) -> Weight; - fn remove_announcement(a: u32, p: u32, ) -> Weight; - fn reject_announcement(a: u32, p: u32, ) -> Weight; - fn announce(a: u32, p: u32, ) -> Weight; - fn proxy(p: u32, ) -> Weight; - fn add_proxy(p: u32, ) -> Weight; - fn remove_proxy(p: u32, ) -> Weight; - fn remove_proxies(p: u32, ) -> Weight; - fn anonymous(p: u32, ) -> Weight; - fn kill_anonymous(p: u32, ) -> Weight; -} - -/// Configuration trait. -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The overarching call type. - type Call: Parameter + Dispatchable - + GetDispatchInfo + From> + IsSubType> - + IsType<::Call>; - - /// The currency mechanism. - type Currency: ReservableCurrency; - - /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` fitler. - /// The instance filter determines whether a given call may be proxied under this type. - /// - /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. - type ProxyType: Parameter + Member + Ord + PartialOrd + InstanceFilter<::Call> - + Default; - - /// The base amount of currency needed to reserve for creating a proxy. - /// - /// This is held for an additional storage item whose value size is - /// `sizeof(Balance)` bytes and whose key size is `sizeof(AccountId)` bytes. - type ProxyDepositBase: Get>; - - /// The amount of currency needed per proxy added. - /// - /// This is held for adding 32 bytes plus an instance of `ProxyType` more into a pre-existing - /// storage value. - type ProxyDepositFactor: Get>; - - /// The maximum amount of proxies allowed for a single account. - type MaxProxies: Get; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; +mod tests; +pub mod weights; - /// The maximum amount of time-delayed announcements that are allowed to be pending. - type MaxPending: Get; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{ + dispatch::DispatchError, + ensure, + traits::{Currency, Get, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency}, + weights::GetDispatchInfo, + RuntimeDebug, +}; +use frame_system::{self as system}; +use scale_info::TypeInfo; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + traits::{Dispatchable, Hash, Saturating, Zero}, + DispatchResult, +}; +use sp_std::{convert::TryInto, prelude::*}; +pub use weights::WeightInfo; - /// The type of hash used for hashing the call. - type CallHasher: Hash; +pub use pallet::*; - /// The base amount of currency needed to reserve for creating an announcement. - /// - /// This is held when a new storage item holding a `Balance` is created (typically 16 bytes). - type AnnouncementDepositBase: Get>; +type CallHashOf = <::CallHasher as Hash>::Output; - /// The amount of currency needed per announcement made. - /// - /// This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes) - /// into a pre-existing storage value. - type AnnouncementDepositFactor: Get>; -} +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; /// The parameters under which a particular account has a proxy relationship with some other /// account. -#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] +#[derive( + Encode, + Decode, + Clone, + Copy, + Eq, + PartialEq, + Ord, + PartialOrd, + RuntimeDebug, + MaxEncodedLen, + TypeInfo, +)] pub struct ProxyDefinition { /// The account which may act on behalf of another. - delegate: AccountId, + pub delegate: AccountId, /// A value defining the subset of calls that it is allowed to make. - proxy_type: ProxyType, - /// The number of blocks that an announcement must be in place for before the corresponding call - /// may be dispatched. If zero, then no announcement is needed. - delay: BlockNumber, + pub proxy_type: ProxyType, + /// The number of blocks that an announcement must be in place for before the corresponding + /// call may be dispatched. If zero, then no announcement is needed. + pub delay: BlockNumber, } /// Details surrounding a specific instance of an announcement to make a call. -#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)] pub struct Announcement { /// The account which made the announcement. real: AccountId, @@ -149,82 +94,92 @@ pub struct Announcement { height: BlockNumber, } -type CallHashOf = <::CallHasher as Hash>::Output; - -decl_storage! { - trait Store for Module as Proxy { - /// The set of account proxies. Maps the account which has delegated to the accounts - /// which are being delegated to, together with the amount held on deposit. - pub Proxies get(fn proxies): map hasher(twox_64_concat) T::AccountId - => (Vec>, BalanceOf); - - /// The announcements made by the proxy (key). - pub Announcements get(fn announcements): map hasher(twox_64_concat) T::AccountId - => (Vec, T::BlockNumber>>, BalanceOf); - } -} - -decl_error! { - pub enum Error for Module { - /// There are too many proxies registered or too many announcements pending. - TooMany, - /// Proxy registration not found. - NotFound, - /// Sender is not a proxy of the account to be proxied. - NotProxy, - /// A call which is incompatible with the proxy type's filter was attempted. - Unproxyable, - /// Account is already a proxy. - Duplicate, - /// Call may not be made by proxy because it may escalate its privileges. - NoPermission, - /// Announcement, if made at all, was made too recently. - Unannounced, - } -} - -decl_event! { - /// Events type. - pub enum Event where - AccountId = ::AccountId, - ProxyType = ::ProxyType, - Hash = CallHashOf, - { - /// A proxy was executed correctly, with the given \[result\]. - ProxyExecuted(DispatchResult), - /// Anonymous account has been created by new proxy with given - /// disambiguation index and proxy type. \[anonymous, who, proxy_type, disambiguation_index\] - AnonymousCreated(AccountId, AccountId, ProxyType, u16), - /// An announcement was placed to make a call in the future. \[real, proxy, call_hash\] - Announced(AccountId, AccountId, Hash), - } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; +#[frame_support::pallet] +pub mod pallet { + use super::{DispatchResult, *}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(_); + + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// The overarching call type. + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From> + + IsSubType> + + IsType<::Call>; + + /// The currency mechanism. + type Currency: ReservableCurrency; + + /// A kind of proxy; specified with the proxy and passed in to the `IsProxyable` fitler. + /// The instance filter determines whether a given call may be proxied under this type. + /// + /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. + type ProxyType: Parameter + + Member + + Ord + + PartialOrd + + InstanceFilter<::Call> + + Default + + MaxEncodedLen; /// The base amount of currency needed to reserve for creating a proxy. - const ProxyDepositBase: BalanceOf = T::ProxyDepositBase::get(); + /// + /// This is held for an additional storage item whose value size is + /// `sizeof(Balance)` bytes and whose key size is `sizeof(AccountId)` bytes. + #[pallet::constant] + type ProxyDepositBase: Get>; /// The amount of currency needed per proxy added. - const ProxyDepositFactor: BalanceOf = T::ProxyDepositFactor::get(); + /// + /// This is held for adding 32 bytes plus an instance of `ProxyType` more into a + /// pre-existing storage value. Thus, when configuring `ProxyDepositFactor` one should take + /// into account `32 + proxy_type.encode().len()` bytes of data. + #[pallet::constant] + type ProxyDepositFactor: Get>; /// The maximum amount of proxies allowed for a single account. - const MaxProxies: u16 = T::MaxProxies::get(); + #[pallet::constant] + type MaxProxies: Get; - /// `MaxPending` metadata shadow. - const MaxPending: u32 = T::MaxPending::get(); + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; - /// `AnnouncementDepositBase` metadata shadow. - const AnnouncementDepositBase: BalanceOf = T::AnnouncementDepositBase::get(); + /// The maximum amount of time-delayed announcements that are allowed to be pending. + #[pallet::constant] + type MaxPending: Get; - /// `AnnouncementDepositFactor` metadata shadow. - const AnnouncementDepositFactor: BalanceOf = T::AnnouncementDepositFactor::get(); + /// The type of hash used for hashing the call. + type CallHasher: Hash; + + /// The base amount of currency needed to reserve for creating an announcement. + /// + /// This is held when a new storage item holding a `Balance` is created (typically 16 + /// bytes). + #[pallet::constant] + type AnnouncementDepositBase: Get>; + + /// The amount of currency needed per announcement made. + /// + /// This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes) + /// into a pre-existing storage value. + #[pallet::constant] + type AnnouncementDepositFactor: Get>; + } + #[pallet::call] + impl Pallet { /// Dispatch the given `call` from an account that the sender is authorised for through /// `add_proxy`. /// @@ -240,24 +195,27 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = { + #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy(T::MaxProxies::get().into()) .saturating_add(di.weight) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)), di.class) - }] - fn proxy(origin, + })] + pub fn proxy( + origin: OriginFor, real: T::AccountId, force_proxy_type: Option, - call: Box<::Call>, - ) { + call: Box<::Call>, + ) -> DispatchResult { let who = ensure_signed(origin)?; let def = Self::find_proxy(&real, &who, force_proxy_type)?; ensure!(def.delay.is_zero(), Error::::Unannounced); Self::do_proxy(def, real, *call); + + Ok(()) } /// Register a proxy account for the sender that is able to make calls on its behalf. @@ -273,8 +231,9 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::WeightInfo::add_proxy(T::MaxProxies::get().into())] - fn add_proxy(origin, + #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get().into()))] + pub fn add_proxy( + origin: OriginFor, delegate: T::AccountId, proxy_type: T::ProxyType, delay: T::BlockNumber, @@ -294,8 +253,9 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::WeightInfo::remove_proxy(T::MaxProxies::get().into())] - fn remove_proxy(origin, + #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get().into()))] + pub fn remove_proxy( + origin: OriginFor, delegate: T::AccountId, proxy_type: T::ProxyType, delay: T::BlockNumber, @@ -314,11 +274,13 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::WeightInfo::remove_proxies(T::MaxProxies::get().into())] - fn remove_proxies(origin) { + #[pallet::weight(T::WeightInfo::remove_proxies(T::MaxProxies::get().into()))] + pub fn remove_proxies(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; let (_, old_deposit) = Proxies::::take(&who); T::Currency::unreserve(&who, old_deposit); + + Ok(()) } /// Spawn a fresh new account that is guaranteed to be otherwise inaccessible, and @@ -344,21 +306,30 @@ decl_module! { /// Weight is a function of the number of proxies the user has (P). /// # /// TODO: Might be over counting 1 read - #[weight = T::WeightInfo::anonymous(T::MaxProxies::get().into())] - fn anonymous(origin, proxy_type: T::ProxyType, delay: T::BlockNumber, index: u16) { + #[pallet::weight(T::WeightInfo::anonymous(T::MaxProxies::get().into()))] + pub fn anonymous( + origin: OriginFor, + proxy_type: T::ProxyType, + delay: T::BlockNumber, + index: u16, + ) -> DispatchResult { let who = ensure_signed(origin)?; let anonymous = Self::anonymous_account(&who, &proxy_type, index, None); ensure!(!Proxies::::contains_key(&anonymous), Error::::Duplicate); + + let proxy_def = + ProxyDefinition { delegate: who.clone(), proxy_type: proxy_type.clone(), delay }; + let bounded_proxies: BoundedVec<_, T::MaxProxies> = + vec![proxy_def].try_into().map_err(|_| Error::::TooMany)?; + let deposit = T::ProxyDepositBase::get() + T::ProxyDepositFactor::get(); T::Currency::reserve(&who, deposit)?; - let proxy_def = ProxyDefinition { - delegate: who.clone(), - proxy_type: proxy_type.clone(), - delay, - }; - Proxies::::insert(&anonymous, (vec![proxy_def], deposit)); - Self::deposit_event(RawEvent::AnonymousCreated(anonymous, who, proxy_type, index)); + + Proxies::::insert(&anonymous, (bounded_proxies, deposit)); + Self::deposit_event(Event::AnonymousCreated(anonymous, who, proxy_type, index)); + + Ok(()) } /// Removes a previously spawned anonymous proxy. @@ -381,14 +352,15 @@ decl_module! { /// # /// Weight is a function of the number of proxies the user has (P). /// # - #[weight = T::WeightInfo::kill_anonymous(T::MaxProxies::get().into())] - fn kill_anonymous(origin, + #[pallet::weight(T::WeightInfo::kill_anonymous(T::MaxProxies::get().into()))] + pub fn kill_anonymous( + origin: OriginFor, spawner: T::AccountId, proxy_type: T::ProxyType, index: u16, - #[compact] height: T::BlockNumber, - #[compact] ext_index: u32, - ) { + #[pallet::compact] height: T::BlockNumber, + #[pallet::compact] ext_index: u32, + ) -> DispatchResult { let who = ensure_signed(origin)?; let when = (height, ext_index); @@ -397,6 +369,8 @@ decl_module! { let (_, deposit) = Proxies::::take(&who); T::Currency::unreserve(&spawner, deposit); + + Ok(()) } /// Publish the hash of a proxy-call that will be made in the future. @@ -420,32 +394,42 @@ decl_module! { /// - A: the number of announcements made. /// - P: the number of proxies the user has. /// # - #[weight = T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get().into())] - fn announce(origin, real: T::AccountId, call_hash: CallHashOf) { + #[pallet::weight(T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get().into()))] + pub fn announce( + origin: OriginFor, + real: T::AccountId, + call_hash: CallHashOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; - Proxies::::get(&real).0.into_iter() + Proxies::::get(&real) + .0 + .into_iter() .find(|x| &x.delegate == &who) .ok_or(Error::::NotProxy)?; let announcement = Announcement { real: real.clone(), call_hash: call_hash.clone(), - height: system::Module::::block_number(), + height: system::Pallet::::block_number(), }; Announcements::::try_mutate(&who, |(ref mut pending, ref mut deposit)| { - ensure!(pending.len() < T::MaxPending::get() as usize, Error::::TooMany); - pending.push(announcement); + pending.try_push(announcement).map_err(|_| Error::::TooMany)?; Self::rejig_deposit( &who, *deposit, T::AnnouncementDepositBase::get(), T::AnnouncementDepositFactor::get(), pending.len(), - ).map(|d| d.expect("Just pushed; pending.len() > 0; rejig_deposit returns Some; qed")) + ) + .map(|d| { + d.expect("Just pushed; pending.len() > 0; rejig_deposit returns Some; qed") + }) .map(|d| *deposit = d) })?; - Self::deposit_event(RawEvent::Announced(real, who, call_hash)); + Self::deposit_event(Event::Announced(real, who, call_hash)); + + Ok(()) } /// Remove a given announcement. @@ -464,10 +448,18 @@ decl_module! { /// - A: the number of announcements made. /// - P: the number of proxies the user has. /// # - #[weight = T::WeightInfo::remove_announcement(T::MaxPending::get(), T::MaxProxies::get().into())] - fn remove_announcement(origin, real: T::AccountId, call_hash: CallHashOf) { + #[pallet::weight( + T::WeightInfo::remove_announcement(T::MaxPending::get(), T::MaxProxies::get().into()) + )] + pub fn remove_announcement( + origin: OriginFor, + real: T::AccountId, + call_hash: CallHashOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; Self::edit_announcements(&who, |ann| ann.real != real || ann.call_hash != call_hash)?; + + Ok(()) } /// Remove the given announcement of a delegate. @@ -486,13 +478,23 @@ decl_module! { /// - A: the number of announcements made. /// - P: the number of proxies the user has. /// # - #[weight = T::WeightInfo::reject_announcement(T::MaxPending::get(), T::MaxProxies::get().into())] - fn reject_announcement(origin, delegate: T::AccountId, call_hash: CallHashOf) { + #[pallet::weight( + T::WeightInfo::reject_announcement(T::MaxPending::get(), T::MaxProxies::get().into()) + )] + pub fn reject_announcement( + origin: OriginFor, + delegate: T::AccountId, + call_hash: CallHashOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; - Self::edit_announcements(&delegate, |ann| ann.real != who || ann.call_hash != call_hash)?; + Self::edit_announcements(&delegate, |ann| { + ann.real != who || ann.call_hash != call_hash + })?; + + Ok(()) } - /// Dispatch the given `call` from an account that the sender is authorised for through + /// Dispatch the given `call` from an account that the sender is authorized for through /// `add_proxy`. /// /// Removes any corresponding announcement(s). @@ -509,36 +511,109 @@ decl_module! { /// - A: the number of announcements made. /// - P: the number of proxies the user has. /// # - #[weight = { + #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get().into()) .saturating_add(di.weight) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)), di.class) - }] - fn proxy_announced(origin, + })] + pub fn proxy_announced( + origin: OriginFor, delegate: T::AccountId, real: T::AccountId, force_proxy_type: Option, - call: Box<::Call>, - ) { + call: Box<::Call>, + ) -> DispatchResult { ensure_signed(origin)?; let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); - let now = system::Module::::block_number(); - Self::edit_announcements(&delegate, |ann| - ann.real != real || ann.call_hash != call_hash || now.saturating_sub(ann.height) < def.delay - ).map_err(|_| Error::::Unannounced)?; + let now = system::Pallet::::block_number(); + Self::edit_announcements(&delegate, |ann| { + ann.real != real || + ann.call_hash != call_hash || + now.saturating_sub(ann.height) < def.delay + }) + .map_err(|_| Error::::Unannounced)?; Self::do_proxy(def, real, *call); + + Ok(()) } } -} -impl Module { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A proxy was executed correctly, with the given \[result\]. + ProxyExecuted(DispatchResult), + /// Anonymous account has been created by new proxy with given + /// disambiguation index and proxy type. \[anonymous, who, proxy_type, + /// disambiguation_index\] + AnonymousCreated(T::AccountId, T::AccountId, T::ProxyType, u16), + /// An announcement was placed to make a call in the future. \[real, proxy, call_hash\] + Announced(T::AccountId, T::AccountId, CallHashOf), + /// A proxy was added. \[delegator, delegatee, proxy_type, delay\] + ProxyAdded(T::AccountId, T::AccountId, T::ProxyType, T::BlockNumber), + } + + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + #[pallet::error] + pub enum Error { + /// There are too many proxies registered or too many announcements pending. + TooMany, + /// Proxy registration not found. + NotFound, + /// Sender is not a proxy of the account to be proxied. + NotProxy, + /// A call which is incompatible with the proxy type's filter was attempted. + Unproxyable, + /// Account is already a proxy. + Duplicate, + /// Call may not be made by proxy because it may escalate its privileges. + NoPermission, + /// Announcement, if made at all, was made too recently. + Unannounced, + /// Cannot add self as proxy. + NoSelfProxy, + } + + /// The set of account proxies. Maps the account which has delegated to the accounts + /// which are being delegated to, together with the amount held on deposit. + #[pallet::storage] + #[pallet::getter(fn proxies)] + pub type Proxies = StorageMap< + _, + Twox64Concat, + T::AccountId, + ( + BoundedVec, T::MaxProxies>, + BalanceOf, + ), + ValueQuery, + >; + + /// The announcements made by the proxy (key). + #[pallet::storage] + #[pallet::getter(fn announcements)] + pub type Announcements = StorageMap< + _, + Twox64Concat, + T::AccountId, + ( + BoundedVec, T::BlockNumber>, T::MaxPending>, + BalanceOf, + ), + ValueQuery, + >; +} + +impl Pallet { /// Calculate the address of an anonymous account. /// /// - `who`: The spawner account. @@ -556,10 +631,12 @@ impl Module { index: u16, maybe_when: Option<(T::BlockNumber, u32)>, ) -> T::AccountId { - let (height, ext_index) = maybe_when.unwrap_or_else(|| ( - system::Module::::block_number(), - system::Module::::extrinsic_index().unwrap_or_default() - )); + let (height, ext_index) = maybe_when.unwrap_or_else(|| { + ( + system::Pallet::::block_number(), + system::Pallet::::extrinsic_index().unwrap_or_default(), + ) + }); let entropy = (b"modlpy/proxy____", who, height, ext_index, proxy_type, index) .using_encoded(blake2_256); T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() @@ -579,11 +656,15 @@ impl Module { proxy_type: T::ProxyType, delay: T::BlockNumber, ) -> DispatchResult { + ensure!(delegator != &delegatee, Error::::NoSelfProxy); Proxies::::try_mutate(delegator, |(ref mut proxies, ref mut deposit)| { - ensure!(proxies.len() < T::MaxProxies::get() as usize, Error::::TooMany); - let proxy_def = ProxyDefinition { delegate: delegatee, proxy_type, delay }; + let proxy_def = ProxyDefinition { + delegate: delegatee.clone(), + proxy_type: proxy_type.clone(), + delay, + }; let i = proxies.binary_search(&proxy_def).err().ok_or(Error::::Duplicate)?; - proxies.insert(i, proxy_def); + proxies.try_insert(i, proxy_def).map_err(|_| Error::::TooMany)?; let new_deposit = Self::deposit(proxies.len() as u32); if new_deposit > *deposit { T::Currency::reserve(delegator, new_deposit - *deposit)?; @@ -591,6 +672,12 @@ impl Module { T::Currency::unreserve(delegator, *deposit - new_deposit); } *deposit = new_deposit; + Self::deposit_event(Event::::ProxyAdded( + delegator.clone(), + delegatee, + proxy_type, + delay, + )); Ok(()) }) } @@ -642,26 +729,22 @@ impl Module { factor: BalanceOf, len: usize, ) -> Result>, DispatchError> { - let new_deposit = if len == 0 { - BalanceOf::::zero() - } else { - base + factor * (len as u32).into() - }; + let new_deposit = + if len == 0 { BalanceOf::::zero() } else { base + factor * (len as u32).into() }; if new_deposit > old_deposit { T::Currency::reserve(&who, new_deposit - old_deposit)?; } else if new_deposit < old_deposit { T::Currency::unreserve(&who, old_deposit - new_deposit); } - Ok(if len == 0 { - None - } else { - Some(new_deposit) - }) + Ok(if len == 0 { None } else { Some(new_deposit) }) } fn edit_announcements< - F: FnMut(&Announcement, T::BlockNumber>) -> bool - >(delegate: &T::AccountId, f: F) -> DispatchResult { + F: FnMut(&Announcement, T::BlockNumber>) -> bool, + >( + delegate: &T::AccountId, + f: F, + ) -> DispatchResult { Announcements::::try_mutate_exists(delegate, |x| { let (mut pending, old_deposit) = x.take().ok_or(Error::::NotFound)?; let orig_pending_len = pending.len(); @@ -673,18 +756,20 @@ impl Module { T::AnnouncementDepositBase::get(), T::AnnouncementDepositFactor::get(), pending.len(), - )?.map(|deposit| (pending, deposit)); + )? + .map(|deposit| (pending, deposit)); Ok(()) }) } - fn find_proxy( + pub fn find_proxy( real: &T::AccountId, delegate: &T::AccountId, force_proxy_type: Option, ) -> Result, DispatchError> { let f = |x: &ProxyDefinition| -> bool { - &x.delegate == delegate && force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) + &x.delegate == delegate && + force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) }; Ok(Proxies::::get(real).0.into_iter().find(f).ok_or(Error::::NotProxy)?) } @@ -692,53 +777,29 @@ impl Module { fn do_proxy( def: ProxyDefinition, real: T::AccountId, - call: ::Call, + call: ::Call, ) { // This is a freshly authenticated new account, the origin restrictions doesn't apply. let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); - origin.add_filter(move |c: &::Call| { - let c = ::Call::from_ref(c); + origin.add_filter(move |c: &::Call| { + let c = ::Call::from_ref(c); // We make sure the proxy call does access this pallet to change modify proxies. match c.is_sub_type() { - // Proxy call cannot add or remove a proxy with more permissions than it already has. - Some(Call::add_proxy(_, ref pt, _)) | Some(Call::remove_proxy(_, ref pt, _)) - if !def.proxy_type.is_superset(&pt) => false, - // Proxy call cannot remove all proxies or kill anonymous proxies unless it has full permissions. - Some(Call::remove_proxies(..)) | Some(Call::kill_anonymous(..)) - if def.proxy_type != T::ProxyType::default() => false, - _ => def.proxy_type.filter(c) + // Proxy call cannot add or remove a proxy with more permissions than it already + // has. + Some(Call::add_proxy { ref proxy_type, .. }) | + Some(Call::remove_proxy { ref proxy_type, .. }) + if !def.proxy_type.is_superset(&proxy_type) => + false, + // Proxy call cannot remove all proxies or kill anonymous proxies unless it has full + // permissions. + Some(Call::remove_proxies { .. }) | Some(Call::kill_anonymous { .. }) + if def.proxy_type != T::ProxyType::default() => + false, + _ => def.proxy_type.filter(c), } }); let e = call.dispatch(origin); - Self::deposit_event(RawEvent::ProxyExecuted(e.map(|_| ()).map_err(|e| e.error))); - } -} - -/// Migration utilities for upgrading the Proxy pallet between its different versions. -pub mod migration { - use super::*; - - /// Migration code for https://github.com/paritytech/substrate/pull/6770 - /// - /// Details: This migration was introduced between Substrate 2.0-RC6 and Substrate 2.0 releases. - /// Before this migration, the `Proxies` storage item used a tuple of `AccountId` and - /// `ProxyType` to represent the proxy definition. After #6770, we switched to use a struct - /// `ProxyDefinition` which additionally included a `BlockNumber` delay value. This function, - /// simply takes any existing proxies using the old tuple format, and migrates it to the new - /// struct by setting the delay to zero. - pub fn migrate_to_time_delayed_proxies() -> Weight { - Proxies::::translate::<(Vec<(T::AccountId, T::ProxyType)>, BalanceOf), _>( - |_, (targets, deposit)| Some(( - targets.into_iter() - .map(|(a, t)| ProxyDefinition { - delegate: a, - proxy_type: t, - delay: Zero::zero(), - }) - .collect::>(), - deposit, - )) - ); - T::MaximumBlockWeight::get() + Self::deposit_event(Event::ProxyExecuted(e.map(|_| ()).map_err(|e| e.error))); } } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index bcf3b678ed644..d319ebb1a5ab0 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,48 +21,44 @@ use super::*; +use crate as proxy; +use codec::{Decode, Encode}; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, impl_outer_event, RuntimeDebug, dispatch::DispatchError, traits::Filter, + assert_noop, assert_ok, dispatch::DispatchError, parameter_types, traits::Contains, + RuntimeDebug, }; -use codec::{Encode, Decode}; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use crate as proxy; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} -impl_outer_event! { - pub enum TestEvent for Test { - system, - pallet_balances, - proxy, - pallet_utility, - } -} -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - frame_system::System, - pallet_balances::Balances, - proxy::Proxy, - pallet_utility::Utility, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Proxy: proxy::{Pallet, Call, Storage, Event}, + Utility: pallet_utility::{Pallet, Call, Event}, } -} +); -// For testing the pallet, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of pallets we want to use. -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -72,36 +68,33 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = TestEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } -impl pallet_utility::Trait for Test { - type Event = TestEvent; +impl pallet_utility::Config for Test { + type Event = Event; type Call = Call; type WeightInfo = (); } @@ -113,19 +106,37 @@ parameter_types! { pub const AnnouncementDepositBase: u64 = 1; pub const AnnouncementDepositFactor: u64 = 1; } -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug)] +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] pub enum ProxyType { Any, JustTransfer, JustUtility, } -impl Default for ProxyType { fn default() -> Self { Self::Any } } +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} impl InstanceFilter for ProxyType { fn filter(&self, c: &Call) -> bool { match self { ProxyType::Any => true, - ProxyType::JustTransfer => matches!(c, Call::Balances(pallet_balances::Call::transfer(..))), - ProxyType::JustUtility => matches!(c, Call::Utility(..)), + ProxyType::JustTransfer => { + matches!(c, Call::Balances(pallet_balances::Call::transfer { .. })) + }, + ProxyType::JustUtility => matches!(c, Call::Utility { .. }), } } fn is_superset(&self, o: &Self) -> bool { @@ -133,18 +144,18 @@ impl InstanceFilter for ProxyType { } } pub struct BaseFilter; -impl Filter for BaseFilter { - fn filter(c: &Call) -> bool { +impl Contains for BaseFilter { + fn contains(c: &Call) -> bool { match *c { // Remark is used as a no-op call in the benchmarking - Call::System(SystemCall::remark(_)) => true, + Call::System(SystemCall::remark { .. }) => true, Call::System(_) => false, _ => true, } } } -impl Trait for Test { - type Event = TestEvent; +impl Config for Test { + type Event = Event; type Call = Call; type Currency = Balances; type ProxyType = ProxyType; @@ -158,74 +169,67 @@ impl Trait for Test { type AnnouncementDepositFactor = AnnouncementDepositFactor; } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Utility = pallet_utility::Module; -type Proxy = Module; - +use super::{Call as ProxyCall, Event as ProxyEvent}; use frame_system::Call as SystemCall; -use pallet_balances::Call as BalancesCall; -use pallet_balances::Error as BalancesError; -use pallet_balances::Event as BalancesEvent; -use pallet_utility::Call as UtilityCall; -use pallet_utility::Event as UtilityEvent; -use super::Call as ProxyCall; +use pallet_balances::{Call as BalancesCall, Error as BalancesError, Event as BalancesEvent}; +use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } -fn last_event() -> TestEvent { - system::Module::::events().pop().expect("Event expected").event -} - -fn expect_event>(e: E) { - assert_eq!(last_event(), e.into()); +fn last_events(n: usize) -> Vec { + system::Pallet::::events() + .into_iter() + .rev() + .take(n) + .rev() + .map(|e| e.event) + .collect() } -fn last_events(n: usize) -> Vec { - system::Module::::events().into_iter().rev().take(n).rev().map(|e| e.event).collect() +fn expect_events(e: Vec) { + assert_eq!(last_events(e.len()), e); } -fn expect_events(e: Vec) { - assert_eq!(last_events(e.len()), e); +fn call_transfer(dest: u64, value: u64) -> Call { + Call::Balances(BalancesCall::transfer { dest, value }) } #[test] fn announcement_works() { new_test_ext().execute_with(|| { assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + System::assert_last_event(ProxyEvent::ProxyAdded(1, 3, ProxyType::Any, 1).into()); assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); assert_eq!(Balances::reserved_balance(3), 0); assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); - assert_eq!(Announcements::::get(3), (vec![Announcement { - real: 1, - call_hash: [1; 32].into(), - height: 1, - }], 2)); - assert_eq!(Balances::reserved_balance(3), 2); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { real: 1, call_hash: [1; 32].into(), height: 1 }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); - assert_eq!(Announcements::::get(3), (vec![ - Announcement { - real: 1, - call_hash: [1; 32].into(), - height: 1, - }, - Announcement { - real: 2, - call_hash: [2; 32].into(), - height: 1, - }, - ], 3)); - assert_eq!(Balances::reserved_balance(3), 3); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![ + Announcement { real: 1, call_hash: [1; 32].into(), height: 1 }, + Announcement { real: 2, call_hash: [2; 32].into(), height: 1 }, + ] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); assert_noop!(Proxy::announce(Origin::signed(3), 2, [3; 32].into()), Error::::TooMany); }); @@ -241,12 +245,12 @@ fn remove_announcement_works() { let e = Error::::NotFound; assert_noop!(Proxy::remove_announcement(Origin::signed(3), 1, [0; 32].into()), e); assert_ok!(Proxy::remove_announcement(Origin::signed(3), 1, [1; 32].into())); - assert_eq!(Announcements::::get(3), (vec![Announcement { - real: 2, - call_hash: [2; 32].into(), - height: 1, - }], 2)); - assert_eq!(Balances::reserved_balance(3), 2); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { real: 2, call_hash: [2; 32].into(), height: 1 }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); }); } @@ -262,12 +266,12 @@ fn reject_announcement_works() { let e = Error::::NotFound; assert_noop!(Proxy::reject_announcement(Origin::signed(4), 3, [1; 32].into()), e); assert_ok!(Proxy::reject_announcement(Origin::signed(1), 3, [1; 32].into())); - assert_eq!(Announcements::::get(3), (vec![Announcement { - real: 2, - call_hash: [2; 32].into(), - height: 1, - }], 2)); - assert_eq!(Balances::reserved_balance(3), 2); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { real: 2, call_hash: [2; 32].into(), height: 1 }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); }); } @@ -282,14 +286,14 @@ fn announcer_must_be_proxy() { fn delayed_requires_pre_announcement() { new_test_ext().execute_with(|| { assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 1)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); + let call = Box::new(call_transfer(6, 1)); let e = Error::::Unannounced; assert_noop!(Proxy::proxy(Origin::signed(2), 1, None, call.clone()), e); let e = Error::::Unannounced; assert_noop!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone()), e); let call_hash = BlakeTwo256::hash_of(&call); assert_ok!(Proxy::announce(Origin::signed(2), 1, call_hash)); - system::Module::::set_block_number(2); + system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone())); }); } @@ -299,7 +303,7 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { new_test_ext().execute_with(|| { assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); + let call = Box::new(call_transfer(6, 1)); let call_hash = BlakeTwo256::hash_of(&call); assert_ok!(Proxy::announce(Origin::signed(3), 1, call_hash)); assert_ok!(Proxy::announce(Origin::signed(3), 2, call_hash)); @@ -307,75 +311,83 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { let e = Error::::Unannounced; assert_noop!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone()), e); - system::Module::::set_block_number(2); + system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone())); - assert_eq!(Announcements::::get(3), (vec![Announcement { - real: 2, - call_hash, - height: 1, - }], 2)); - assert_eq!(Balances::reserved_balance(3), 2); + let announcements = Announcements::::get(3); + assert_eq!(announcements.0, vec![Announcement { real: 2, call_hash, height: 1 }]); + assert_eq!(Balances::reserved_balance(3), announcements.1); }); } #[test] fn filtering_works() { new_test_ext().execute_with(|| { - Balances::mutate_account(&1, |a| a.free = 1000); + assert!(Balances::mutate_account(&1, |a| a.free = 1000).is_ok()); assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0)); assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); + let call = Box::new(call_transfer(6, 1)); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); let derivative_id = Utility::derivative_account_id(1, 0); - Balances::mutate_account(&derivative_id, |a| a.free = 1000); - let inner = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); + assert!(Balances::mutate_account(&derivative_id, |a| a.free = 1000).is_ok()); + let inner = Box::new(call_transfer(6, 1)); - let call = Box::new(Call::Utility(UtilityCall::as_derivative(0, inner.clone()))); + let call = + Box::new(Call::Utility(UtilityCall::as_derivative { index: 0, call: inner.clone() })); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); - let call = Box::new(Call::Utility(UtilityCall::batch(vec![*inner]))); + let call = Box::new(Call::Utility(UtilityCall::batch { calls: vec![*inner] })); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_events(vec![UtilityEvent::BatchCompleted.into(), RawEvent::ProxyExecuted(Ok(())).into()]); + expect_events(vec![ + UtilityEvent::BatchCompleted.into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), + ]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted(0, DispatchError::BadOrigin).into(), - RawEvent::ProxyExecuted(Ok(())).into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), ]); - let inner = Box::new(Call::Proxy(ProxyCall::add_proxy(5, ProxyType::Any, 0))); - let call = Box::new(Call::Utility(UtilityCall::batch(vec![*inner]))); + let inner = + Box::new(Call::Proxy(ProxyCall::new_call_variant_add_proxy(5, ProxyType::Any, 0))); + let call = Box::new(Call::Utility(UtilityCall::batch { calls: vec![*inner] })); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_events(vec![UtilityEvent::BatchCompleted.into(), RawEvent::ProxyExecuted(Ok(())).into()]); + expect_events(vec![ + UtilityEvent::BatchCompleted.into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), + ]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted(0, DispatchError::BadOrigin).into(), - RawEvent::ProxyExecuted(Ok(())).into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), ]); - let call = Box::new(Call::Proxy(ProxyCall::remove_proxies())); + let call = Box::new(Call::Proxy(ProxyCall::remove_proxies {})); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_events(vec![BalancesEvent::::Unreserved(1, 5).into(), RawEvent::ProxyExecuted(Ok(())).into()]); + expect_events(vec![ + BalancesEvent::::Unreserved(1, 5).into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), + ]); }); } @@ -383,7 +395,10 @@ fn filtering_works() { fn add_remove_proxies_works() { new_test_ext().execute_with(|| { assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); - assert_noop!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0), Error::::Duplicate); + assert_noop!( + Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0), + Error::::Duplicate + ); assert_eq!(Balances::reserved_balance(1), 2); assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 3); @@ -391,8 +406,14 @@ fn add_remove_proxies_works() { assert_eq!(Balances::reserved_balance(1), 4); assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 5); - assert_noop!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any, 0), Error::::TooMany); - assert_noop!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0), Error::::NotFound); + assert_noop!( + Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any, 0), + Error::::TooMany + ); + assert_noop!( + Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0), + Error::::NotFound + ); assert_ok!(Proxy::remove_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 4); assert_ok!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); @@ -401,6 +422,10 @@ fn add_remove_proxies_works() { assert_eq!(Balances::reserved_balance(1), 2); assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 0); + assert_noop!( + Proxy::add_proxy(Origin::signed(1), 1, ProxyType::Any, 0), + Error::::NoSelfProxy + ); }); } @@ -422,25 +447,30 @@ fn proxying_works() { assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); - assert_noop!(Proxy::proxy(Origin::signed(4), 1, None, call.clone()), Error::::NotProxy); + let call = Box::new(call_transfer(6, 1)); + assert_noop!( + Proxy::proxy(Origin::signed(4), 1, None, call.clone()), + Error::::NotProxy + ); assert_noop!( Proxy::proxy(Origin::signed(2), 1, Some(ProxyType::Any), call.clone()), Error::::NotProxy ); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); assert_eq!(Balances::free_balance(6), 1); - let call = Box::new(Call::System(SystemCall::set_code(vec![]))); + let call = Box::new(Call::System(SystemCall::set_code { code: vec![] })); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); - let call = Box::new(Call::Balances(BalancesCall::transfer_keep_alive(6, 1))); - assert_ok!(Call::Proxy(super::Call::proxy(1, None, call.clone())).dispatch(Origin::signed(2))); - expect_event(RawEvent::ProxyExecuted(Err(DispatchError::BadOrigin))); + let call = + Box::new(Call::Balances(BalancesCall::transfer_keep_alive { dest: 6, value: 1 })); + assert_ok!(Call::Proxy(super::Call::new_call_variant_proxy(1, None, call.clone())) + .dispatch(Origin::signed(2))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); assert_eq!(Balances::free_balance(6), 2); }); } @@ -450,30 +480,41 @@ fn anonymous_works() { new_test_ext().execute_with(|| { assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); - expect_event(RawEvent::AnonymousCreated(anon.clone(), 1, ProxyType::Any, 0)); + System::assert_last_event( + ProxyEvent::AnonymousCreated(anon.clone(), 1, ProxyType::Any, 0).into(), + ); // other calls to anonymous allowed as long as they're not exactly the same. assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::JustTransfer, 0, 0)); assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 1)); let anon2 = Proxy::anonymous_account(&2, &ProxyType::Any, 0, None); assert_ok!(Proxy::anonymous(Origin::signed(2), ProxyType::Any, 0, 0)); - assert_noop!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0), Error::::Duplicate); + assert_noop!( + Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0), + Error::::Duplicate + ); System::set_extrinsic_index(1); assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); System::set_extrinsic_index(0); System::set_block_number(2); assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); + let call = Box::new(call_transfer(6, 1)); assert_ok!(Balances::transfer(Origin::signed(3), anon, 5)); assert_ok!(Proxy::proxy(Origin::signed(1), anon, None, call)); - expect_event(RawEvent::ProxyExecuted(Ok(()))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); assert_eq!(Balances::free_balance(6), 1); - let call = Box::new(Call::Proxy(ProxyCall::kill_anonymous(1, ProxyType::Any, 0, 1, 0))); + let call = Box::new(Call::Proxy(ProxyCall::new_call_variant_kill_anonymous( + 1, + ProxyType::Any, + 0, + 1, + 0, + ))); assert_ok!(Proxy::proxy(Origin::signed(2), anon2, None, call.clone())); let de = DispatchError::from(Error::::NoPermission).stripped(); - expect_event(RawEvent::ProxyExecuted(Err(de))); + System::assert_last_event(ProxyEvent::ProxyExecuted(Err(de)).into()); assert_noop!( Proxy::kill_anonymous(Origin::signed(1), 1, ProxyType::Any, 0, 1, 0), Error::::NoPermission @@ -481,6 +522,9 @@ fn anonymous_works() { assert_eq!(Balances::free_balance(1), 0); assert_ok!(Proxy::proxy(Origin::signed(1), anon, None, call.clone())); assert_eq!(Balances::free_balance(1), 2); - assert_noop!(Proxy::proxy(Origin::signed(1), anon, None, call.clone()), Error::::NotProxy); + assert_noop!( + Proxy::proxy(Origin::signed(1), anon, None, call.clone()), + Error::::NotProxy + ); }); } diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs new file mode 100644 index 0000000000000..41aa3034bece1 --- /dev/null +++ b/frame/proxy/src/weights.rs @@ -0,0 +1,247 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_proxy +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_proxy +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/proxy/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_proxy. +pub trait WeightInfo { + fn proxy(p: u32, ) -> Weight; + fn proxy_announced(a: u32, p: u32, ) -> Weight; + fn remove_announcement(a: u32, p: u32, ) -> Weight; + fn reject_announcement(a: u32, p: u32, ) -> Weight; + fn announce(a: u32, p: u32, ) -> Weight; + fn add_proxy(p: u32, ) -> Weight; + fn remove_proxy(p: u32, ) -> Weight; + fn remove_proxies(p: u32, ) -> Weight; + fn anonymous(p: u32, ) -> Weight; + fn kill_anonymous(p: u32, ) -> Weight; +} + +/// Weights for pallet_proxy using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Proxy Proxies (r:1 w:0) + fn proxy(p: u32, ) -> Weight { + (23_213_000 as Weight) + // Standard Error: 2_000 + .saturating_add((153_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:0) + // Storage: Proxy Announcements (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn proxy_announced(a: u32, p: u32, ) -> Weight { + (53_286_000 as Weight) + // Standard Error: 2_000 + .saturating_add((549_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((138_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Proxy Announcements (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn remove_announcement(a: u32, _p: u32, ) -> Weight { + (36_864_000 as Weight) + // Standard Error: 2_000 + .saturating_add((550_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Proxy Announcements (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn reject_announcement(a: u32, _p: u32, ) -> Weight { + (36_755_000 as Weight) + // Standard Error: 1_000 + .saturating_add((550_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:0) + // Storage: Proxy Announcements (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn announce(a: u32, p: u32, ) -> Weight { + (50_765_000 as Weight) + // Standard Error: 2_000 + .saturating_add((547_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((141_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:1) + fn add_proxy(p: u32, ) -> Weight { + (35_556_000 as Weight) + // Standard Error: 3_000 + .saturating_add((211_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:1) + fn remove_proxy(p: u32, ) -> Weight { + (35_284_000 as Weight) + // Standard Error: 3_000 + .saturating_add((229_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:1) + fn remove_proxies(p: u32, ) -> Weight { + (34_449_000 as Weight) + // Standard Error: 2_000 + .saturating_add((146_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + // Storage: Proxy Proxies (r:1 w:1) + fn anonymous(p: u32, ) -> Weight { + (49_149_000 as Weight) + // Standard Error: 2_000 + .saturating_add((15_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:1) + fn kill_anonymous(p: u32, ) -> Weight { + (36_399_000 as Weight) + // Standard Error: 2_000 + .saturating_add((152_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Proxy Proxies (r:1 w:0) + fn proxy(p: u32, ) -> Weight { + (23_213_000 as Weight) + // Standard Error: 2_000 + .saturating_add((153_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:0) + // Storage: Proxy Announcements (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn proxy_announced(a: u32, p: u32, ) -> Weight { + (53_286_000 as Weight) + // Standard Error: 2_000 + .saturating_add((549_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((138_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Proxy Announcements (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn remove_announcement(a: u32, _p: u32, ) -> Weight { + (36_864_000 as Weight) + // Standard Error: 2_000 + .saturating_add((550_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Proxy Announcements (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn reject_announcement(a: u32, _p: u32, ) -> Weight { + (36_755_000 as Weight) + // Standard Error: 1_000 + .saturating_add((550_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:0) + // Storage: Proxy Announcements (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn announce(a: u32, p: u32, ) -> Weight { + (50_765_000 as Weight) + // Standard Error: 2_000 + .saturating_add((547_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 2_000 + .saturating_add((141_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:1) + fn add_proxy(p: u32, ) -> Weight { + (35_556_000 as Weight) + // Standard Error: 3_000 + .saturating_add((211_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:1) + fn remove_proxy(p: u32, ) -> Weight { + (35_284_000 as Weight) + // Standard Error: 3_000 + .saturating_add((229_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:1) + fn remove_proxies(p: u32, ) -> Weight { + (34_449_000 as Weight) + // Standard Error: 2_000 + .saturating_add((146_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + // Storage: Proxy Proxies (r:1 w:1) + fn anonymous(p: u32, ) -> Weight { + (49_149_000 as Weight) + // Standard Error: 2_000 + .saturating_add((15_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Proxy Proxies (r:1 w:1) + fn kill_anonymous(p: u32, ) -> Weight { + (36_399_000 as Weight) + // Standard Error: 2_000 + .saturating_add((152_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index d35f6960af5d9..5e8eb6b082879 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-randomness-collective-flip" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,23 +14,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] safe-mix = { version = "1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } [features] default = ["std"] std = [ "safe-mix/std", - "frame-system/std", "codec/std", - "frame-support/std", + "scale-info/std", "sp-runtime/std", "sp-std/std", + "frame-system/std", + "frame-support/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/randomness-collective-flip/README.md b/frame/randomness-collective-flip/README.md index 2af18d3d2f7b5..9885c734d9fad 100644 --- a/frame/randomness-collective-flip/README.md +++ b/frame/randomness-collective-flip/README.md @@ -22,10 +22,10 @@ the system trait. ```rust use frame_support::{decl_module, dispatch, traits::Randomness}; -pub trait Trait: frame_system::Trait {} +pub trait Config: frame_system::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn random_module_example(origin) -> dispatch::DispatchResult { let _random_value = >::random(&b"my context"[..]); diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 6b1b9f4f37448..1b1d5cb5cd823 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,7 +25,7 @@ //! //! ## Public Functions //! -//! See the [`Module`](./struct.Module.html) struct for details of publicly available functions. +//! See the [`Module`] struct for details of publicly available functions. //! //! ## Usage //! @@ -37,69 +37,92 @@ //! ### Example - Get random seed for the current block //! //! ``` -//! use frame_support::{decl_module, dispatch, traits::Randomness}; +//! use frame_support::traits::Randomness; //! -//! pub trait Trait: frame_system::Trait {} +//! #[frame_support::pallet] +//! pub mod pallet { +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; +//! use super::*; //! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn random_module_example(origin) -> dispatch::DispatchResult { -//! let _random_value = >::random(&b"my context"[..]); -//! Ok(()) -//! } -//! } +//! #[pallet::pallet] +//! #[pallet::generate_store(pub(super) trait Store)] +//! pub struct Pallet(_); +//! +//! #[pallet::config] +//! pub trait Config: frame_system::Config + pallet_randomness_collective_flip::Config {} +//! +//! #[pallet::call] +//! impl Pallet { +//! #[pallet::weight(0)] +//! pub fn random_module_example(origin: OriginFor) -> DispatchResult { +//! let _random_value = >::random(&b"my context"[..]); +//! Ok(()) +//! } +//! } //! } //! # fn main() { } //! ``` #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, convert::TryInto}; -use sp_runtime::traits::Hash; -use frame_support::{ - decl_module, decl_storage, traits::Randomness, - weights::Weight -}; use safe_mix::TripletMix; + use codec::Encode; -use frame_system::Trait; +use frame_support::traits::Randomness; +use sp_runtime::traits::{Hash, Saturating}; +use sp_std::{convert::TryInto, prelude::*}; const RANDOM_MATERIAL_LEN: u32 = 81; -fn block_number_to_index(block_number: T::BlockNumber) -> usize { +fn block_number_to_index(block_number: T::BlockNumber) -> usize { // on_initialize is called on the first block after genesis - let index = (block_number - 1.into()) % RANDOM_MATERIAL_LEN.into(); + let index = (block_number - 1u32.into()) % RANDOM_MATERIAL_LEN.into(); index.try_into().ok().expect("Something % 81 is always smaller than usize; qed") } -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn on_initialize(block_number: T::BlockNumber) -> Weight { - let parent_hash = >::parent_hash(); +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - >::mutate(|ref mut values| if values.len() < RANDOM_MATERIAL_LEN as usize { - values.push(parent_hash) - } else { - let index = block_number_to_index::(block_number); - values[index] = parent_hash; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(block_number: T::BlockNumber) -> Weight { + let parent_hash = >::parent_hash(); + + >::mutate(|ref mut values| { + if values.len() < RANDOM_MATERIAL_LEN as usize { + values.push(parent_hash) + } else { + let index = block_number_to_index::(block_number); + values[index] = parent_hash; + } }); - 0 + T::DbWeight::get().reads_writes(1, 1) } } -} -decl_storage! { - trait Store for Module as RandomnessCollectiveFlip { - /// Series of block headers from the last 81 blocks that acts as random seed material. This - /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of - /// the oldest hash. - RandomMaterial get(fn random_material): Vec; - } + /// Series of block headers from the last 81 blocks that acts as random seed material. This + /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of + /// the oldest hash. + #[pallet::storage] + #[pallet::getter(fn random_material)] + pub(super) type RandomMaterial = StorageValue<_, Vec, ValueQuery>; } -impl Randomness for Module { +impl Randomness for Pallet { /// This randomness uses a low-influence function, drawing upon the block hashes from the /// previous 81 blocks. Its result for any given subject will be known far in advance by anyone /// observing the chain. Any block producer has significant influence over their block hashes @@ -110,14 +133,15 @@ impl Randomness for Module { /// WARNING: Hashing the result of this function will remove any low-influence properties it has /// and mean that all bits of the resulting value are entirely manipulatable by the author of /// the parent block, who can determine the value of `parent_hash`. - fn random(subject: &[u8]) -> T::Hash { - let block_number = >::block_number(); + fn random(subject: &[u8]) -> (T::Hash, T::BlockNumber) { + let block_number = >::block_number(); let index = block_number_to_index::(block_number); let hash_series = >::get(); - if !hash_series.is_empty() { + let seed = if !hash_series.is_empty() { // Always the case after block 1 is initialized. - hash_series.iter() + hash_series + .iter() .cycle() .skip(index) .take(RANDOM_MATERIAL_LEN as usize) @@ -126,67 +150,78 @@ impl Randomness for Module { .triplet_mix() } else { T::Hash::default() - } + }; + + (seed, block_number.saturating_sub(RANDOM_MATERIAL_LEN.into())) } } #[cfg(test)] mod tests { use super::*; + use crate as pallet_randomness_collective_flip; + use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, Header as _, IdentityLookup}, }; + use frame_support::{ - impl_outer_origin, parameter_types, weights::Weight, traits::{Randomness, OnInitialize}, + parameter_types, + traits::{OnInitialize, Randomness}, }; - - #[derive(Clone, PartialEq, Eq)] - pub struct Test; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + use frame_system::limits; + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + CollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, + } + ); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights + ::simple_max(1024); + pub BlockLength: limits::BlockLength = limits::BlockLength + ::max(2 * 1024); } - impl frame_system::Trait for Test { - type BaseCallFilter = (); + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = BlockLength; + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } - type System = frame_system::Module; - type CollectiveFlip = Module; + impl pallet_randomness_collective_flip::Config for Test {} fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -195,7 +230,7 @@ mod tests { #[test] fn test_block_number_to_index() { - for i in 1 .. 1000 { + for i in 1..1000 { assert_eq!((i - 1) as usize % 81, block_number_to_index::(i)); } } @@ -203,14 +238,8 @@ mod tests { fn setup_blocks(blocks: u64) { let mut parent_hash = System::parent_hash(); - for i in 1 .. (blocks + 1) { - System::initialize( - &i, - &parent_hash, - &Default::default(), - &Default::default(), - frame_system::InitKind::Full, - ); + for i in 1..(blocks + 1) { + System::initialize(&i, &parent_hash, &Default::default(), frame_system::InitKind::Full); CollectiveFlip::on_initialize(i); let header = System::finalize(); @@ -272,8 +301,9 @@ mod tests { assert_eq!(CollectiveFlip::random_seed(), CollectiveFlip::random_seed()); assert_ne!(CollectiveFlip::random(b"random_1"), CollectiveFlip::random(b"random_2")); - let random = CollectiveFlip::random_seed(); + let (random, known_since) = CollectiveFlip::random_seed(); + assert_eq!(known_since, 162 - RANDOM_MATERIAL_LEN as u64); assert_ne!(random, H256::zero()); assert!(!CollectiveFlip::random_material().contains(&random)); }); diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 0ba2f5437c614..40a89e9b59f89 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-recovery" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,27 +13,27 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-std/std", "sp-io/std", "sp-runtime/std", "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/recovery/README.md b/frame/recovery/README.md index b6d3ae5aceeb3..31416c65c46a5 100644 --- a/frame/recovery/README.md +++ b/frame/recovery/README.md @@ -1,6 +1,6 @@ # Recovery Pallet -- [`recovery::Trait`](https://docs.rs/pallet-recovery/latest/pallet_recovery/trait.Trait.html) +- [`recovery::Config`](https://docs.rs/pallet-recovery/latest/pallet_recovery/trait.Config.html) - [`Call`](https://docs.rs/pallet-recovery/latest/pallet_recovery/enum.Call.html) ## Overview @@ -131,4 +131,4 @@ of this pallet are: * `set_recovered` - The ROOT origin is able to skip the recovery process and directly allow one account to access another. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index c97824497fded..797581788077b 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ //! # Recovery Pallet //! -//! - [`recovery::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -33,12 +33,12 @@ //! //! The recovery process for each recoverable account can be configured by the account owner. //! They are able to choose: -//! * `friends` - The list of friends that the account owner trusts to protect the -//! recovery process for their account. -//! * `threshold` - The number of friends that need to approve a recovery process for -//! the account to be successfully recovered. -//! * `delay_period` - The minimum number of blocks after the beginning of the recovery -//! process that need to pass before the account can be successfully recovered. +//! * `friends` - The list of friends that the account owner trusts to protect the recovery process +//! for their account. +//! * `threshold` - The number of friends that need to approve a recovery process for the account to +//! be successfully recovered. +//! * `delay_period` - The minimum number of blocks after the beginning of the recovery process that +//! need to pass before the account can be successfully recovered. //! //! There is a configurable deposit that all users need to pay to create a recovery //! configuration. This deposit is composed of a base deposit plus a multiplier for @@ -101,25 +101,23 @@ //! security of an account if used incorrectly. Some recommended practices for users //! of this pallet are: //! -//! * Configure a significant `delay_period` for your recovery process: As long as you -//! have access to your recoverable account, you need only check the blockchain once -//! every `delay_period` blocks to ensure that no recovery attempt is successful -//! against your account. Using off-chain notification systems can help with this, -//! but ultimately, setting a large `delay_period` means that even the most skilled -//! attacker will need to wait this long before they can access your account. -//! * Use a high threshold of approvals: Setting a value of 1 for the threshold means -//! that any of your friends would be able to recover your account. They would -//! simply need to start a recovery process and approve their own process. Similarly, -//! a threshold of 2 would mean that any 2 friends could work together to gain -//! access to your account. The only way to prevent against these kinds of attacks -//! is to choose a high threshold of approvals and select from a diverse friend -//! group that would not be able to reasonably coordinate with one another. -//! * Reset your configuration over time: Since the entire deposit of creating a -//! recovery configuration is returned to the user, the only cost of updating -//! your recovery configuration is the transaction fees for the calls. Thus, -//! it is strongly encouraged to regularly update your recovery configuration -//! as your life changes and your relationship with new and existing friends -//! change as well. +//! * Configure a significant `delay_period` for your recovery process: As long as you have access +//! to your recoverable account, you need only check the blockchain once every `delay_period` +//! blocks to ensure that no recovery attempt is successful against your account. Using off-chain +//! notification systems can help with this, but ultimately, setting a large `delay_period` means +//! that even the most skilled attacker will need to wait this long before they can access your +//! account. +//! * Use a high threshold of approvals: Setting a value of 1 for the threshold means that any of +//! your friends would be able to recover your account. They would simply need to start a recovery +//! process and approve their own process. Similarly, a threshold of 2 would mean that any 2 +//! friends could work together to gain access to your account. The only way to prevent against +//! these kinds of attacks is to choose a high threshold of approvals and select from a diverse +//! friend group that would not be able to reasonably coordinate with one another. +//! * Reset your configuration over time: Since the entire deposit of creating a recovery +//! configuration is returned to the user, the only cost of updating your recovery configuration +//! is the transaction fees for the calls. Thus, it is strongly encouraged to regularly update +//! your recovery configuration as your life changes and your relationship with new and existing +//! friends change as well. //! //! ## Interface //! @@ -131,40 +129,44 @@ //! * `initiate_recovery` - Start the recovery process for a recoverable account. //! //! #### For Friends of a Recoverable Account -//! * `vouch_recovery` - As a `friend` of a recoverable account, vouch for a recovery attempt on the account. +//! * `vouch_recovery` - As a `friend` of a recoverable account, vouch for a recovery attempt on the +//! account. //! //! #### For a User Who Successfully Recovered an Account //! -//! * `claim_recovery` - Claim access to the account that you have successfully completed the recovery process for. -//! * `as_recovered` - Send a transaction as an account that you have recovered. See other functions below. +//! * `claim_recovery` - Claim access to the account that you have successfully completed the +//! recovery process for. +//! * `as_recovered` - Send a transaction as an account that you have recovered. See other functions +//! below. //! //! #### For the Recoverable Account //! -//! * `close_recovery` - Close an active recovery process for your account and reclaim the recovery deposit. -//! * `remove_recovery` - Remove the recovery configuration from the account, making it un-recoverable. +//! * `close_recovery` - Close an active recovery process for your account and reclaim the recovery +//! deposit. +//! * `remove_recovery` - Remove the recovery configuration from the account, making it +//! un-recoverable. //! //! #### For Super Users //! -//! * `set_recovered` - The ROOT origin is able to skip the recovery process and directly allow -//! one account to access another. +//! * `set_recovered` - The ROOT origin is able to skip the recovery process and directly allow one +//! account to access another. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion}; use sp_std::prelude::*; -use sp_runtime::{ - traits::{Dispatchable, SaturatedConversion, CheckedAdd, CheckedMul}, - DispatchResult -}; -use codec::{Encode, Decode}; use frame_support::{ - decl_module, decl_event, decl_storage, decl_error, ensure, - Parameter, RuntimeDebug, weights::GetDispatchInfo, - traits::{Currency, ReservableCurrency, Get, BalanceStatus}, dispatch::PostDispatchInfo, + traits::{BalanceStatus, Currency, ReservableCurrency}, + weights::GetDispatchInfo, + RuntimeDebug, }; -use frame_system::{self as system, ensure_signed, ensure_root}; + +pub use pallet::*; #[cfg(test)] mod mock; @@ -172,45 +174,10 @@ mod mock; mod tests; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - -/// Configuration trait. -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The overarching call type. - type Call: Parameter + Dispatchable + GetDispatchInfo; - - /// The currency mechanism. - type Currency: ReservableCurrency; - - /// The base amount of currency needed to reserve for creating a recovery configuration. - /// - /// This is held for an additional storage item whose value size is - /// `2 + sizeof(BlockNumber, Balance)` bytes. - type ConfigDepositBase: Get>; - - /// The amount of currency needed per additional user when creating a recovery configuration. - /// - /// This is held for adding `sizeof(AccountId)` bytes more into a pre-existing storage value. - type FriendDepositFactor: Get>; - - /// The maximum amount of friends allowed in a recovery configuration. - type MaxFriends: Get; - - /// The base amount of currency needed to reserve for starting a recovery. - /// - /// This is primarily held for deterring malicious recovery attempts, and should - /// have a value large enough that a bad actor would choose not to place this - /// deposit. It also acts to fund additional storage item whose value size is - /// `sizeof(BlockNumber, Balance + T * AccountId)` bytes. Where T is a configurable - /// threshold. - type RecoveryDeposit: Get>; -} + <::Currency as Currency<::AccountId>>::Balance; /// An active recovery process. -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug)] +#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] pub struct ActiveRecovery { /// The block number when the recovery process started. created: BlockNumber, @@ -222,7 +189,7 @@ pub struct ActiveRecovery { } /// Configuration for recovering an account. -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug)] +#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] pub struct RecoveryConfig { /// The minimum number of blocks since the start of the recovery process before the account /// can be recovered. @@ -236,55 +203,85 @@ pub struct RecoveryConfig { threshold: u16, } -decl_storage! { - trait Store for Module as Recovery { - /// The set of recoverable accounts and their recovery configuration. - pub Recoverable get(fn recovery_config): - map hasher(twox_64_concat) T::AccountId - => Option, T::AccountId>>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ensure, pallet_prelude::*, traits::Get, Parameter}; + use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; + use sp_runtime::ArithmeticError; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// The overarching call type. + type Call: Parameter + + Dispatchable + + GetDispatchInfo; + + /// The currency mechanism. + type Currency: ReservableCurrency; - /// Active recovery attempts. + /// The base amount of currency needed to reserve for creating a recovery configuration. /// - /// First account is the account to be recovered, and the second account - /// is the user trying to recover the account. - pub ActiveRecoveries get(fn active_recovery): - double_map hasher(twox_64_concat) T::AccountId, hasher(twox_64_concat) T::AccountId => - Option, T::AccountId>>; + /// This is held for an additional storage item whose value size is + /// `2 + sizeof(BlockNumber, Balance)` bytes. + #[pallet::constant] + type ConfigDepositBase: Get>; - /// The list of allowed proxy accounts. + /// The amount of currency needed per additional user when creating a recovery + /// configuration. /// - /// Map from the user who can access it to the recovered account. - pub Proxy get(fn proxy): - map hasher(blake2_128_concat) T::AccountId => Option; + /// This is held for adding `sizeof(AccountId)` bytes more into a pre-existing storage + /// value. + #[pallet::constant] + type FriendDepositFactor: Get>; + + /// The maximum amount of friends allowed in a recovery configuration. + #[pallet::constant] + type MaxFriends: Get; + + /// The base amount of currency needed to reserve for starting a recovery. + /// + /// This is primarily held for deterring malicious recovery attempts, and should + /// have a value large enough that a bad actor would choose not to place this + /// deposit. It also acts to fund additional storage item whose value size is + /// `sizeof(BlockNumber, Balance + T * AccountId)` bytes. Where T is a configurable + /// threshold. + #[pallet::constant] + type RecoveryDeposit: Get>; } -} -decl_event! { /// Events type. - pub enum Event where - AccountId = ::AccountId, - { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// A recovery process has been set up for an \[account\]. - RecoveryCreated(AccountId), + RecoveryCreated(T::AccountId), /// A recovery process has been initiated for lost account by rescuer account. /// \[lost, rescuer\] - RecoveryInitiated(AccountId, AccountId), + RecoveryInitiated(T::AccountId, T::AccountId), /// A recovery process for lost account by rescuer account has been vouched for by sender. /// \[lost, rescuer, sender\] - RecoveryVouched(AccountId, AccountId, AccountId), + RecoveryVouched(T::AccountId, T::AccountId, T::AccountId), /// A recovery process for lost account by rescuer account has been closed. /// \[lost, rescuer\] - RecoveryClosed(AccountId, AccountId), + RecoveryClosed(T::AccountId, T::AccountId), /// Lost account has been successfully recovered by rescuer account. /// \[lost, rescuer\] - AccountRecovered(AccountId, AccountId), + AccountRecovered(T::AccountId, T::AccountId), /// A recovery process has been removed for an \[account\]. - RecoveryRemoved(AccountId), + RecoveryRemoved(T::AccountId), } -} -decl_error! { - pub enum Error for Module { + #[pallet::error] + pub enum Error { /// User is not allowed to make a call on behalf of this account NotAllowed, /// Threshold must be greater than zero @@ -313,32 +310,46 @@ decl_error! { Threshold, /// There are still active recovery attempts that need to be closed StillActive, - /// There was an overflow in a calculation - Overflow, /// This account is already set up for recovery AlreadyProxy, + /// Some internal state is broken. + BadState, } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - /// The base amount of currency needed to reserve for creating a recovery configuration. - const ConfigDepositBase: BalanceOf = T::ConfigDepositBase::get(); - - /// The amount of currency needed per additional user when creating a recovery configuration. - const FriendDepositFactor: BalanceOf = T::FriendDepositFactor::get(); - - /// The maximum amount of friends allowed in a recovery configuration. - const MaxFriends: u16 = T::MaxFriends::get(); - - /// The base amount of currency needed to reserve for starting a recovery. - const RecoveryDeposit: BalanceOf = T::RecoveryDeposit::get(); - - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; + /// The set of recoverable accounts and their recovery configuration. + #[pallet::storage] + #[pallet::getter(fn recovery_config)] + pub type Recoverable = StorageMap< + _, + Twox64Concat, + T::AccountId, + RecoveryConfig, T::AccountId>, + >; + + /// Active recovery attempts. + /// + /// First account is the account to be recovered, and the second account + /// is the user trying to recover the account. + #[pallet::storage] + #[pallet::getter(fn active_recovery)] + pub type ActiveRecoveries = StorageDoubleMap< + _, + Twox64Concat, + T::AccountId, + Twox64Concat, + T::AccountId, + ActiveRecovery, T::AccountId>, + >; + + /// The list of allowed proxy accounts. + /// + /// Map from the user who can access it to the recovered account. + #[pallet::storage] + #[pallet::getter(fn proxy)] + pub type Proxy = StorageMap<_, Blake2_128Concat, T::AccountId, T::AccountId>; + #[pallet::call] + impl Pallet { /// Send a call through a recovered account. /// /// The dispatch origin for this call must be _Signed_ and registered to @@ -352,23 +363,28 @@ decl_module! { /// - The weight of the `call` + 10,000. /// - One storage lookup to check account is recovered by `who`. O(1) /// # - #[weight = ( - call.get_dispatch_info().weight - .saturating_add(10_000) - // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), - call.get_dispatch_info().class - )] - fn as_recovered(origin, + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + dispatch_info.weight + .saturating_add(10_000) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + dispatch_info.class, + ) + })] + pub fn as_recovered( + origin: OriginFor, account: T::AccountId, - call: Box<::Call> + call: Box<::Call>, ) -> DispatchResult { let who = ensure_signed(origin)?; // Check `who` is allowed to make a call on behalf of `account` let target = Self::proxy(&who).ok_or(Error::::NotAllowed)?; ensure!(&target == &account, Error::::NotAllowed); call.dispatch(frame_system::RawOrigin::Signed(account).into()) - .map(|_| ()).map_err(|e| e.error) + .map(|_| ()) + .map_err(|e| e.error) } /// Allow ROOT to bypass the recovery process and set an a rescuer account @@ -384,12 +400,17 @@ decl_module! { /// - One storage write O(1) /// - One event /// # - #[weight = 0] - fn set_recovered(origin, lost: T::AccountId, rescuer: T::AccountId) { + #[pallet::weight(30_000_000)] + pub fn set_recovered( + origin: OriginFor, + lost: T::AccountId, + rescuer: T::AccountId, + ) -> DispatchResult { ensure_root(origin)?; // Create the recovery storage item. >::insert(&rescuer, &lost); - Self::deposit_event(RawEvent::AccountRecovered(lost, rescuer)); + Self::deposit_event(Event::::AccountRecovered(lost, rescuer)); + Ok(()) } /// Create a recovery configuration for your account. This makes your account recoverable. @@ -401,13 +422,13 @@ decl_module! { /// The dispatch origin for this call must be _Signed_. /// /// Parameters: - /// - `friends`: A list of friends you trust to vouch for recovery attempts. - /// Should be ordered and contain no duplicate values. - /// - `threshold`: The number of friends that must vouch for a recovery attempt - /// before the account can be recovered. Should be less than or equal to - /// the length of the list of friends. - /// - `delay_period`: The number of blocks after a recovery attempt is initialized - /// that needs to pass before the account can be recovered. + /// - `friends`: A list of friends you trust to vouch for recovery attempts. Should be + /// ordered and contain no duplicate values. + /// - `threshold`: The number of friends that must vouch for a recovery attempt before the + /// account can be recovered. Should be less than or equal to the length of the list of + /// friends. + /// - `delay_period`: The number of blocks after a recovery attempt is initialized that + /// needs to pass before the account can be recovered. /// /// # /// - Key: F (len of friends) @@ -419,12 +440,13 @@ decl_module! { /// /// Total Complexity: O(F + X) /// # - #[weight = 100_000_000] - fn create_recovery(origin, + #[pallet::weight(100_000_000)] + pub fn create_recovery( + origin: OriginFor, friends: Vec, threshold: u16, - delay_period: T::BlockNumber - ) { + delay_period: T::BlockNumber, + ) -> DispatchResult { let who = ensure_signed(origin)?; // Check account is not already set up for recovery ensure!(!>::contains_key(&who), Error::::AlreadyRecoverable); @@ -438,23 +460,20 @@ decl_module! { // Total deposit is base fee + number of friends * factor fee let friend_deposit = T::FriendDepositFactor::get() .checked_mul(&friends.len().saturated_into()) - .ok_or(Error::::Overflow)?; + .ok_or(ArithmeticError::Overflow)?; let total_deposit = T::ConfigDepositBase::get() .checked_add(&friend_deposit) - .ok_or(Error::::Overflow)?; + .ok_or(ArithmeticError::Overflow)?; // Reserve the deposit T::Currency::reserve(&who, total_deposit)?; // Create the recovery configuration - let recovery_config = RecoveryConfig { - delay_period, - deposit: total_deposit, - friends, - threshold, - }; + let recovery_config = + RecoveryConfig { delay_period, deposit: total_deposit, friends, threshold }; // Create the recovery configuration storage item >::insert(&who, recovery_config); - Self::deposit_event(RawEvent::RecoveryCreated(who)); + Self::deposit_event(Event::::RecoveryCreated(who)); + Ok(()) } /// Initiate the process for recovering a recoverable account. @@ -466,8 +485,8 @@ decl_module! { /// The dispatch origin for this call must be _Signed_. /// /// Parameters: - /// - `account`: The lost account that you want to recover. This account - /// needs to be recoverable (i.e. have a recovery configuration). + /// - `account`: The lost account that you want to recover. This account needs to be + /// recoverable (i.e. have a recovery configuration). /// /// # /// - One storage read to check that account is recoverable. O(F) @@ -479,25 +498,29 @@ decl_module! { /// /// Total Complexity: O(F + X) /// # - #[weight = 100_000_000] - fn initiate_recovery(origin, account: T::AccountId) { + #[pallet::weight(100_000_000)] + pub fn initiate_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Check that the account is recoverable ensure!(>::contains_key(&account), Error::::NotRecoverable); // Check that the recovery process has not already been started - ensure!(!>::contains_key(&account, &who), Error::::AlreadyStarted); + ensure!( + !>::contains_key(&account, &who), + Error::::AlreadyStarted + ); // Take recovery deposit let recovery_deposit = T::RecoveryDeposit::get(); T::Currency::reserve(&who, recovery_deposit)?; // Create an active recovery status let recovery_status = ActiveRecovery { - created: >::block_number(), + created: >::block_number(), deposit: recovery_deposit, friends: vec![], }; // Create the active recovery storage item >::insert(&account, &who, recovery_status); - Self::deposit_event(RawEvent::RecoveryInitiated(account, who)); + Self::deposit_event(Event::::RecoveryInitiated(account, who)); + Ok(()) } /// Allow a "friend" of a recoverable account to vouch for an active recovery @@ -508,8 +531,7 @@ decl_module! { /// /// Parameters: /// - `lost`: The lost account that you want to recover. - /// - `rescuer`: The account trying to rescue the lost account that you - /// want to vouch for. + /// - `rescuer`: The account trying to rescue the lost account that you want to vouch for. /// /// The combination of these two parameters must point to an active recovery /// process. @@ -525,13 +547,18 @@ decl_module! { /// /// Total Complexity: O(F + logF + V + logV) /// # - #[weight = 100_000_000] - fn vouch_recovery(origin, lost: T::AccountId, rescuer: T::AccountId) { + #[pallet::weight(100_000_000)] + pub fn vouch_recovery( + origin: OriginFor, + lost: T::AccountId, + rescuer: T::AccountId, + ) -> DispatchResult { let who = ensure_signed(origin)?; // Get the recovery configuration for the lost account. let recovery_config = Self::recovery_config(&lost).ok_or(Error::::NotRecoverable)?; // Get the active recovery process for the rescuer. - let mut active_recovery = Self::active_recovery(&lost, &rescuer).ok_or(Error::::NotStarted)?; + let mut active_recovery = + Self::active_recovery(&lost, &rescuer).ok_or(Error::::NotStarted)?; // Make sure the voter is a friend ensure!(Self::is_friend(&recovery_config.friends, &who), Error::::NotFriend); // Either insert the vouch, or return an error that the user already vouched. @@ -541,7 +568,8 @@ decl_module! { } // Update storage with the latest details >::insert(&lost, &rescuer, active_recovery); - Self::deposit_event(RawEvent::RecoveryVouched(lost, rescuer, who)); + Self::deposit_event(Event::::RecoveryVouched(lost, rescuer, who)); + Ok(()) } /// Allow a successful rescuer to claim their recovered account. @@ -551,8 +579,8 @@ decl_module! { /// `threshold` or more vouches, waited `delay_period` blocks since initiation. /// /// Parameters: - /// - `account`: The lost account that you want to claim has been successfully - /// recovered by you. + /// - `account`: The lost account that you want to claim has been successfully recovered by + /// you. /// /// # /// Key: F (len of friends in config), V (len of vouching friends) @@ -564,29 +592,33 @@ decl_module! { /// /// Total Complexity: O(F + V) /// # - #[weight = 100_000_000] - fn claim_recovery(origin, account: T::AccountId) { + #[pallet::weight(100_000_000)] + pub fn claim_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Get the recovery configuration for the lost account - let recovery_config = Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; + let recovery_config = + Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; // Get the active recovery process for the rescuer - let active_recovery = Self::active_recovery(&account, &who).ok_or(Error::::NotStarted)?; + let active_recovery = + Self::active_recovery(&account, &who).ok_or(Error::::NotStarted)?; ensure!(!Proxy::::contains_key(&who), Error::::AlreadyProxy); // Make sure the delay period has passed - let current_block_number = >::block_number(); - let recoverable_block_number = active_recovery.created + let current_block_number = >::block_number(); + let recoverable_block_number = active_recovery + .created .checked_add(&recovery_config.delay_period) - .ok_or(Error::::Overflow)?; + .ok_or(ArithmeticError::Overflow)?; ensure!(recoverable_block_number <= current_block_number, Error::::DelayPeriod); // Make sure the threshold is met ensure!( recovery_config.threshold as usize <= active_recovery.friends.len(), Error::::Threshold ); + frame_system::Pallet::::inc_consumers(&who).map_err(|_| Error::::BadState)?; // Create the recovery storage item Proxy::::insert(&who, &account); - system::Module::::inc_ref(&who); - Self::deposit_event(RawEvent::AccountRecovered(account, who)); + Self::deposit_event(Event::::AccountRecovered(account, who)); + Ok(()) } /// As the controller of a recoverable account, close an active recovery @@ -609,15 +641,23 @@ decl_module! { /// /// Total Complexity: O(V + X) /// # - #[weight = 30_000_000] - fn close_recovery(origin, rescuer: T::AccountId) { + #[pallet::weight(30_000_000)] + pub fn close_recovery(origin: OriginFor, rescuer: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Take the active recovery process started by the rescuer for this account. - let active_recovery = >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; + let active_recovery = + >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; // Move the reserved funds from the rescuer to the rescued account. // Acts like a slashing mechanism for those who try to maliciously recover accounts. - let _ = T::Currency::repatriate_reserved(&rescuer, &who, active_recovery.deposit, BalanceStatus::Free); - Self::deposit_event(RawEvent::RecoveryClosed(who, rescuer)); + let res = T::Currency::repatriate_reserved( + &rescuer, + &who, + active_recovery.deposit, + BalanceStatus::Free, + ); + debug_assert!(res.is_ok()); + Self::deposit_event(Event::::RecoveryClosed(who, rescuer)); + Ok(()) } /// Remove the recovery process for your account. Recovered accounts are still accessible. @@ -641,8 +681,8 @@ decl_module! { /// /// Total Complexity: O(F + X) /// # - #[weight = 30_000_000] - fn remove_recovery(origin) { + #[pallet::weight(30_000_000)] + pub fn remove_recovery(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; // Check there are no active recoveries let mut active_recoveries = >::iter_prefix_values(&who); @@ -652,7 +692,8 @@ decl_module! { // Unreserve the initial deposit for the recovery configuration. T::Currency::unreserve(&who, recovery_config.deposit); - Self::deposit_event(RawEvent::RecoveryRemoved(who)); + Self::deposit_event(Event::::RecoveryRemoved(who)); + Ok(()) } /// Cancel the ability to use `as_recovered` for `account`. @@ -666,18 +707,19 @@ decl_module! { /// # /// - One storage mutation to check account is recovered by `who`. O(1) /// # - #[weight = 0] - fn cancel_recovered(origin, account: T::AccountId) { + #[pallet::weight(30_000_000)] + pub fn cancel_recovered(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Check `who` is allowed to make a call on behalf of `account` ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); Proxy::::remove(&who); - system::Module::::dec_ref(&who); + frame_system::Pallet::::dec_consumers(&who); + Ok(()) } } } -impl Module { +impl Pallet { /// Check that friends list is sorted and has no duplicates. fn is_sorted_and_unique(friends: &Vec) -> bool { friends.windows(2).all(|w| w[0] < w[1]) diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 35373562487f7..f6d4a6b159431 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,47 +19,43 @@ use super::*; +use crate as recovery; use frame_support::{ - impl_outer_origin, impl_outer_dispatch, impl_outer_event, parameter_types, - weights::Weight, - traits::{OnInitialize, OnFinalize}, + parameter_types, + traits::{OnFinalize, OnInitialize}, }; use sp_core::H256; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, }; -use crate as recovery; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} -impl_outer_event! { - pub enum TestEvent for Test { - system, - pallet_balances, - recovery, - } -} -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_balances::Balances, - recovery::Recovery, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Recovery: recovery::{Pallet, Call, Storage, Event}, } -} - -#[derive(Clone, Eq, PartialEq)] -pub struct Test; +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -69,32 +65,29 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); - type Event = TestEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -107,8 +100,8 @@ parameter_types! { pub const RecoveryDeposit: u64 = 10; } -impl Trait for Test { - type Event = TestEvent; +impl Config for Test { + type Event = Event; type Call = Call; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; @@ -117,10 +110,6 @@ impl Trait for Test { type RecoveryDeposit = RecoveryDeposit; } -pub type Recovery = Module; -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; - pub type BalancesCall = pallet_balances::Call; pub type RecoveryCall = super::Call; @@ -128,7 +117,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } diff --git a/frame/recovery/src/tests.rs b/frame/recovery/src/tests.rs index 8e9484f0fb089..fe971319bc97c 100644 --- a/frame/recovery/src/tests.rs +++ b/frame/recovery/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,15 +18,11 @@ //! Tests for the module. use super::*; +use frame_support::{assert_noop, assert_ok, traits::Currency}; use mock::{ - Recovery, Balances, Test, Origin, Call, BalancesCall, RecoveryCall, - new_test_ext, run_to_block -}; -use sp_runtime::traits::{BadOrigin}; -use frame_support::{ - assert_noop, assert_ok, - traits::{Currency}, + new_test_ext, run_to_block, Balances, BalancesCall, Call, Origin, Recovery, RecoveryCall, Test, }; +use sp_runtime::traits::BadOrigin; #[test] fn basic_setup_works() { @@ -48,7 +44,7 @@ fn set_recovered_works() { // Root can set a recovered account though assert_ok!(Recovery::set_recovered(Origin::root(), 5, 1)); // Account 1 should now be able to make a call through account 5 - let call = Box::new(Call::Balances(BalancesCall::transfer(1, 100))); + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 1, value: 100 })); assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // Account 1 has successfully drained the funds from account 5 assert_eq!(Balances::free_balance(1), 200); @@ -68,7 +64,8 @@ fn recovery_life_cycle_works() { run_to_block(10); // Using account 1, the user begins the recovery process to recover the lost account assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - // Off chain, the user contacts their friends and asks them to vouch for the recovery attempt + // Off chain, the user contacts their friends and asks them to vouch for the recovery + // attempt assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); @@ -79,15 +76,15 @@ fn recovery_life_cycle_works() { assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); // Account 1 can use account 5 to close the active recovery process, claiming the deposited // funds used to initiate the recovery process into account 5. - let call = Box::new(Call::Recovery(RecoveryCall::close_recovery(1))); + let call = Box::new(Call::Recovery(RecoveryCall::close_recovery { rescuer: 1 })); assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // Account 1 can then use account 5 to remove the recovery configuration, claiming the // deposited funds used to create the recovery configuration into account 5. - let call = Box::new(Call::Recovery(RecoveryCall::remove_recovery())); + let call = Box::new(Call::Recovery(RecoveryCall::remove_recovery {})); assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // Account 1 should now be able to make a call through account 5 to get all of their funds assert_eq!(Balances::free_balance(5), 110); - let call = Box::new(Call::Balances(BalancesCall::transfer(1, 110))); + let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 1, value: 110 })); assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // All funds have been fully recovered! assert_eq!(Balances::free_balance(1), 200); @@ -118,7 +115,7 @@ fn malicious_recovery_fails() { assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); // shame on you assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // shame on you assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); // shame on you - // We met the threshold, lets try to recover the account...? + // We met the threshold, lets try to recover the account...? assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); // Account 1 needs to wait... run_to_block(19); @@ -136,7 +133,12 @@ fn malicious_recovery_fails() { assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); // Account 5 can remove their recovery config and pick some better friends assert_ok!(Recovery::remove_recovery(Origin::signed(5))); - assert_ok!(Recovery::create_recovery(Origin::signed(5), vec![22, 33, 44], threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + vec![22, 33, 44], + threshold, + delay_period + )); }); } @@ -174,9 +176,7 @@ fn create_recovery_handles_basic_errors() { Error::::NotSorted ); // Already configured - assert_ok!( - Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10) - ); + assert_ok!(Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10)); assert_noop!( Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10), Error::::AlreadyRecoverable @@ -191,17 +191,18 @@ fn create_recovery_works() { let threshold = 3; let delay_period = 10; // Account 5 sets up a recovery configuration on their account - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Deposit is taken, and scales with the number of friends they pick // Base 10 + 1 per friends = 13 total reserved assert_eq!(Balances::reserved_balance(5), 13); // Recovery configuration is correctly stored - let recovery_config = RecoveryConfig { - delay_period, - deposit: 13, - friends: friends.clone(), - threshold, - }; + let recovery_config = + RecoveryConfig { delay_period, deposit: 13, friends: friends.clone(), threshold }; assert_eq!(Recovery::recovery_config(5), Some(recovery_config)); }); } @@ -218,10 +219,18 @@ fn initiate_recovery_handles_basic_errors() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Same user cannot recover same account twice assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - assert_noop!(Recovery::initiate_recovery(Origin::signed(1), 5), Error::::AlreadyStarted); + assert_noop!( + Recovery::initiate_recovery(Origin::signed(1), 5), + Error::::AlreadyStarted + ); // No double deposit assert_eq!(Balances::reserved_balance(1), 10); }); @@ -234,17 +243,18 @@ fn initiate_recovery_works() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Recovery can be initiated assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Deposit is reserved assert_eq!(Balances::reserved_balance(1), 10); // Recovery status object is created correctly - let recovery_status = ActiveRecovery { - created: 0, - deposit: 10, - friends: vec![], - }; + let recovery_status = ActiveRecovery { created: 0, deposit: 10, friends: vec![] }; assert_eq!(>::get(&5, &1), Some(recovery_status)); // Multiple users can attempt to recover the same account assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); @@ -255,12 +265,20 @@ fn initiate_recovery_works() { fn vouch_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // Cannot vouch for non-recoverable account - assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::NotRecoverable); + assert_noop!( + Recovery::vouch_recovery(Origin::signed(2), 5, 1), + Error::::NotRecoverable + ); // Create a recovery process for next tests let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Cannot vouch a recovery process that has not started assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::NotStarted); // Initiate a recovery process @@ -269,7 +287,10 @@ fn vouch_recovery_handles_basic_errors() { assert_noop!(Recovery::vouch_recovery(Origin::signed(22), 5, 1), Error::::NotFriend); // Cannot vouch twice assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::AlreadyVouched); + assert_noop!( + Recovery::vouch_recovery(Origin::signed(2), 5, 1), + Error::::AlreadyVouched + ); }); } @@ -280,7 +301,12 @@ fn vouch_recovery_works() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Vouching works assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); @@ -288,11 +314,7 @@ fn vouch_recovery_works() { assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // Final recovery status object is updated correctly - let recovery_status = ActiveRecovery { - created: 0, - deposit: 10, - friends: vec![2, 3, 4], - }; + let recovery_status = ActiveRecovery { created: 0, deposit: 10, friends: vec![2, 3, 4] }; assert_eq!(>::get(&5, &1), Some(recovery_status)); }); } @@ -306,7 +328,12 @@ fn claim_recovery_handles_basic_errors() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Cannot claim an account which has not started the recovery process assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); @@ -328,7 +355,12 @@ fn claim_recovery_works() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); @@ -372,7 +404,12 @@ fn remove_recovery_works() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); // Cannot remove a recovery when there are active recoveries. diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 613762bb689e9..62b21fe04c9df 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-scheduler" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -10,34 +10,37 @@ description = "FRAME example pallet" readme = "README.md" [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +log = { version = "0.4.14", default-features = false } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core", default-features = false } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } +substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-runtime/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", "sp-io/std", - "sp-std/std" + "sp-std/std", + "log/std", ] runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/scheduler/README.md b/frame/scheduler/README.md index 47beb71e3a0d1..9a209031d7402 100644 --- a/frame/scheduler/README.md +++ b/frame/scheduler/README.md @@ -1,7 +1,7 @@ # Scheduler A module for scheduling dispatches. -- [`scheduler::Trait`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/trait.Trait.html) +- [`scheduler::Config`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/trait.Config.html) - [`Call`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/enum.Call.html) - [`Module`](https://docs.rs/pallet-scheduler/latest/pallet_scheduler/struct.Module.html) @@ -12,7 +12,7 @@ specified block number or at a specified period. These scheduled dispatches may be named or anonymous and may be canceled. **NOTE:** The scheduled calls will be dispatched with the default filter -for the origin: namely `frame_system::Trait::BaseCallFilter` for all origin +for the origin: namely `frame_system::Config::BaseCallFilter` for all origin except root which will get no filter. And not the filter contained in origin use to call `fn schedule`. @@ -31,4 +31,4 @@ then those filter will not be used when dispatching the schedule call. `Vec` parameter that can be used for identification. * `cancel_named` - the named complement to the cancel function. -License: Unlicense \ No newline at end of file +License: Unlicense diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 753e9244628ad..2c164eaede229 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,20 +20,20 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use sp_std::{vec, prelude::*}; -use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_support::{ensure, traits::OnInitialize}; -use frame_benchmarking::benchmarks; +use frame_system::RawOrigin; +use sp_std::{prelude::*, vec}; -use crate::Module as Scheduler; -use frame_system::Module as System; +use crate::Pallet as Scheduler; +use frame_system::Pallet as System; const BLOCK_NUMBER: u32 = 2; // Add `n` named items to the schedule -fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static str> { +fn fill_schedule(when: T::BlockNumber, n: u32) -> Result<(), &'static str> { // Essentially a no-op call. - let call = frame_system::Call::set_storage(vec![]); + let call = frame_system::Call::set_storage { items: vec![] }; for i in 0..n { // Named schedule is strictly heavier than anonymous Scheduler::::do_schedule_named( @@ -52,15 +52,13 @@ fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static } benchmarks! { - _ { } - schedule { let s in 0 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. - let call = Box::new(frame_system::Call::set_storage(vec![]).into()); + let call = Box::new(frame_system::Call::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; }: _(RawOrigin::Root, when, periodic, priority, call) @@ -97,7 +95,7 @@ benchmarks! { let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. - let call = Box::new(frame_system::Call::set_storage(vec![]).into()); + let call = Box::new(frame_system::Call::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; }: _(RawOrigin::Root, id, when, periodic, priority, call) @@ -143,20 +141,4 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_schedule::()); - assert_ok!(test_benchmark_cancel::()); - assert_ok!(test_benchmark_schedule_named::()); - assert_ok!(test_benchmark_cancel_named::()); - assert_ok!(test_benchmark_on_initialize::()); - }); - } -} +impl_benchmark_test_suite!(Scheduler, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/scheduler/src/default_weights.rs b/frame/scheduler/src/default_weights.rs deleted file mode 100644 index 920de1d37a07c..0000000000000 --- a/frame/scheduler/src/default_weights.rs +++ /dev/null @@ -1,50 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn schedule(s: u32, ) -> Weight { - (37_835_000 as Weight) - .saturating_add((81_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn cancel(s: u32, ) -> Weight { - (34_707_000 as Weight) - .saturating_add((3_125_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn schedule_named(s: u32, ) -> Weight { - (48_065_000 as Weight) - .saturating_add((110_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn cancel_named(s: u32, ) -> Weight { - (38_776_000 as Weight) - .saturating_add((3_138_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } -} diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 6bc2d72929633..ca9e15812a76d 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,20 +16,20 @@ // limitations under the License. //! # Scheduler -//! A module for scheduling dispatches. +//! A Pallet for scheduling dispatches. //! -//! - [`scheduler::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Overview //! -//! This module exposes capabilities for scheduling dispatches to occur at a +//! This Pallet exposes capabilities for scheduling dispatches to occur at a //! specified block number or at a specified period. These scheduled dispatches //! may be named or anonymous and may be canceled. //! //! **NOTE:** The scheduled calls will be dispatched with the default filter -//! for the origin: namely `frame_system::Trait::BaseCallFilter` for all origin +//! for the origin: namely `frame_system::Config::BaseCallFilter` for all origin //! except root which will get no filter. And not the filter contained in origin //! use to call `fn schedule`. //! @@ -40,71 +40,37 @@ //! //! ### Dispatchable Functions //! -//! * `schedule` - schedule a dispatch, which may be periodic, to occur at a -//! specified block and with a specified priority. -//! * `cancel` - cancel a scheduled dispatch, specified by block number and -//! index. -//! * `schedule_named` - augments the `schedule` interface with an additional -//! `Vec` parameter that can be used for identification. +//! * `schedule` - schedule a dispatch, which may be periodic, to occur at a specified block and +//! with a specified priority. +//! * `cancel` - cancel a scheduled dispatch, specified by block number and index. +//! * `schedule_named` - augments the `schedule` interface with an additional `Vec` parameter +//! that can be used for identification. //! * `cancel_named` - the named complement to the cancel function. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; -mod default_weights; +pub mod weights; -use sp_std::{prelude::*, marker::PhantomData, borrow::Borrow}; -use codec::{Encode, Decode, Codec}; -use sp_runtime::{RuntimeDebug, traits::{Zero, One, BadOrigin, Saturating}}; +use codec::{Codec, Decode, Encode}; use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, IterableStorageMap, - dispatch::{Dispatchable, DispatchError, DispatchResult, Parameter}, - traits::{Get, schedule::{self, DispatchTime}, OriginTrait, EnsureOrigin, IsType}, + dispatch::{DispatchError, DispatchResult, Dispatchable, Parameter}, + traits::{ + schedule::{self, DispatchTime}, + EnsureOrigin, Get, IsType, OriginTrait, + }, weights::{GetDispatchInfo, Weight}, }; use frame_system::{self as system, ensure_signed}; - -pub trait WeightInfo { - fn schedule(s: u32, ) -> Weight; - fn cancel(s: u32, ) -> Weight; - fn schedule_named(s: u32, ) -> Weight; - fn cancel_named(s: u32, ) -> Weight; -} - -/// Our pallet's configuration trait. All our types and constants go in here. If the -/// pallet is dependent on specific other pallets, then their configuration traits -/// should be added to our implied traits list. -/// -/// `system::Trait` should always be included in our implied traits. -pub trait Trait: system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The aggregated origin which the dispatch will take. - type Origin: OriginTrait + From + IsType<::Origin>; - - /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: From> + Codec + Clone + Eq; - - /// The aggregated call type. - type Call: Parameter + Dispatchable::Origin> + GetDispatchInfo + From>; - - /// The maximum weight that may be scheduled per block for any dispatchables of less priority - /// than `schedule::HARD_DEADLINE`. - type MaximumWeight: Get; - - /// Required origin to schedule or cancel calls. - type ScheduleOrigin: EnsureOrigin<::Origin>; - - /// The maximum number of scheduled calls in the queue for a single block. - /// Not strictly enforced, but used for weight estimation. - type MaxScheduledPerBlock: Get; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} +pub use pallet::*; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{BadOrigin, One, Saturating, Zero}, + RuntimeDebug, +}; +use sp_std::{borrow::Borrow, marker::PhantomData, prelude::*}; +pub use weights::WeightInfo; /// Just a simple index for naming period tasks. pub type PeriodicIndex = u32; @@ -122,7 +88,7 @@ struct ScheduledV1 { /// Information regarding an item to be executed in the future. #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] -#[derive(Clone, RuntimeDebug, Encode, Decode)] +#[derive(Clone, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct ScheduledV2 { /// The unique identity for this task, if there is one. maybe_id: Option>, @@ -138,12 +104,13 @@ pub struct ScheduledV2 { } /// The current version of Scheduled struct. -pub type Scheduled = ScheduledV2; +pub type Scheduled = + ScheduledV2; // A value placed in storage that represents the current version of the Scheduler storage. // This value is used by the `on_runtime_upgrade` logic to determine whether we run // storage migration logic. -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo)] enum Releases { V1, V2, @@ -155,35 +122,88 @@ impl Default for Releases { } } -decl_storage! { - trait Store for Module as Scheduler { - /// Items to be executed, indexed by the block number that they should be executed on. - pub Agenda: map hasher(twox_64_concat) T::BlockNumber - => Vec::Call, T::BlockNumber, T::PalletsOrigin, T::AccountId>>>; - - /// Lookup from identity to the block number and index of the task. - Lookup: map hasher(twox_64_concat) Vec => Option>; - - /// Storage version of the pallet. - /// - /// New networks start with last version. - StorageVersion build(|_| Releases::V2): Releases; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// `system::Config` should always be included in our implied traits. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// The aggregated origin which the dispatch will take. + type Origin: OriginTrait + + From + + IsType<::Origin>; + + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin: From> + Codec + Clone + Eq + TypeInfo; + + /// The aggregated call type. + type Call: Parameter + + Dispatchable::Origin> + + GetDispatchInfo + + From>; + + /// The maximum weight that may be scheduled per block for any dispatchables of less + /// priority than `schedule::HARD_DEADLINE`. + #[pallet::constant] + type MaximumWeight: Get; + + /// Required origin to schedule or cancel calls. + type ScheduleOrigin: EnsureOrigin<::Origin>; + + /// The maximum number of scheduled calls in the queue for a single block. + /// Not strictly enforced, but used for weight estimation. + #[pallet::constant] + type MaxScheduledPerBlock: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } -} -decl_event!( - pub enum Event where ::BlockNumber { + /// Items to be executed, indexed by the block number that they should be executed on. + #[pallet::storage] + pub type Agenda = StorageMap< + _, + Twox64Concat, + T::BlockNumber, + Vec::Call, T::BlockNumber, T::PalletsOrigin, T::AccountId>>>, + ValueQuery, + >; + + /// Lookup from identity to the block number and index of the task. + #[pallet::storage] + pub(crate) type Lookup = + StorageMap<_, Twox64Concat, Vec, TaskAddress>; + + /// Storage version of the pallet. + /// + /// New networks start with last version. + #[pallet::storage] + pub(crate) type StorageVersion = StorageValue<_, Releases, ValueQuery>; + + /// Events type. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// Scheduled some task. \[when, index\] - Scheduled(BlockNumber, u32), + Scheduled(T::BlockNumber, u32), /// Canceled some task. \[when, index\] - Canceled(BlockNumber, u32), + Canceled(T::BlockNumber, u32), /// Dispatched some task. \[task, id, result\] - Dispatched(TaskAddress, Option>, DispatchResult), + Dispatched(TaskAddress, Option>, DispatchResult), } -); -decl_error! { - pub enum Error for Module { + #[pallet::error] + pub enum Error { /// Failed to schedule a call FailedToSchedule, /// Cannot find the scheduled call. @@ -193,14 +213,135 @@ decl_error! { /// Reschedule failed because it does not change scheduled time. RescheduleNoChange, } -} -decl_module! { - /// Scheduler module declaration. - pub struct Module for enum Call where origin: ::Origin { - type Error = Error; - fn deposit_event() = default; + #[pallet::genesis_config] + pub struct GenesisConfig; + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + StorageVersion::::put(Releases::V2); + } + } + #[pallet::hooks] + impl Hooks> for Pallet { + /// Execute the scheduled calls + /// + /// # + /// - S = Number of already scheduled calls + /// - N = Named scheduled calls + /// - P = Periodic Calls + /// - Base Weight: 9.243 + 23.45 * S µs + /// - DB Weight: + /// - Read: Agenda + Lookup * N + Agenda(Future) * P + /// - Write: Agenda + Lookup * N + Agenda(future) * P + /// # + fn on_initialize(now: T::BlockNumber) -> Weight { + let limit = T::MaximumWeight::get(); + let mut queued = Agenda::::take(now) + .into_iter() + .enumerate() + .filter_map(|(index, s)| s.map(|inner| (index as u32, inner))) + .collect::>(); + if queued.len() as u32 > T::MaxScheduledPerBlock::get() { + log::warn!( + target: "runtime::scheduler", + "Warning: This block has more items queued in Scheduler than \ + expected from the runtime configuration. An update might be needed." + ); + } + queued.sort_by_key(|(_, s)| s.priority); + let base_weight: Weight = T::DbWeight::get().reads_writes(1, 2); // Agenda + Agenda(next) + let mut total_weight: Weight = 0; + queued + .into_iter() + .enumerate() + .scan(base_weight, |cumulative_weight, (order, (index, s))| { + *cumulative_weight = + cumulative_weight.saturating_add(s.call.get_dispatch_info().weight); + + let origin = + <::Origin as From>::from(s.origin.clone()) + .into(); + + if ensure_signed(origin).is_ok() { + // AccountData for inner call origin accountdata. + *cumulative_weight = + cumulative_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + } + + if s.maybe_id.is_some() { + // Remove/Modify Lookup + *cumulative_weight = + cumulative_weight.saturating_add(T::DbWeight::get().writes(1)); + } + if s.maybe_periodic.is_some() { + // Read/Write Agenda for future block + *cumulative_weight = + cumulative_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + } + + Some((order, index, *cumulative_weight, s)) + }) + .filter_map(|(order, index, cumulative_weight, mut s)| { + // We allow a scheduled call if any is true: + // - It's priority is `HARD_DEADLINE` + // - It does not push the weight past the limit. + // - It is the first item in the schedule + if s.priority <= schedule::HARD_DEADLINE || + cumulative_weight <= limit || + order == 0 + { + let r = s.call.clone().dispatch(s.origin.clone().into()); + let maybe_id = s.maybe_id.clone(); + if let &Some((period, count)) = &s.maybe_periodic { + if count > 1 { + s.maybe_periodic = Some((period, count - 1)); + } else { + s.maybe_periodic = None; + } + let next = now + period; + // If scheduled is named, place it's information in `Lookup` + if let Some(ref id) = s.maybe_id { + let next_index = Agenda::::decode_len(now + period).unwrap_or(0); + Lookup::::insert(id, (next, next_index as u32)); + } + Agenda::::append(next, Some(s)); + } else { + if let Some(ref id) = s.maybe_id { + Lookup::::remove(id); + } + } + Self::deposit_event(Event::Dispatched( + (now, index), + maybe_id, + r.map(|_| ()).map_err(|e| e.error), + )); + total_weight = cumulative_weight; + None + } else { + Some(Some(s)) + } + }) + .for_each(|unused| { + let next = now + One::one(); + Agenda::::append(next, unused); + }); + + total_weight + } + } + + #[pallet::call] + impl Pallet { /// Anonymously schedule a task. /// /// # @@ -211,16 +352,24 @@ decl_module! { /// - Write: Agenda /// - Will use base weight of 25 which should be good for up to 30 scheduled calls /// # - #[weight = T::WeightInfo::schedule(T::MaxScheduledPerBlock::get())] - fn schedule(origin, + #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] + pub fn schedule( + origin: OriginFor, when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, - ) { + call: Box<::Call>, + ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); - Self::do_schedule(DispatchTime::At(when), maybe_periodic, priority, origin.caller().clone(), *call)?; + let origin = ::Origin::from(origin); + Self::do_schedule( + DispatchTime::At(when), + maybe_periodic, + priority, + origin.caller().clone(), + *call, + )?; + Ok(()) } /// Cancel an anonymously scheduled task. @@ -233,11 +382,12 @@ decl_module! { /// - Write: Agenda, Lookup /// - Will use base weight of 100 which should be good for up to 30 scheduled calls /// # - #[weight = T::WeightInfo::cancel(T::MaxScheduledPerBlock::get())] - fn cancel(origin, when: T::BlockNumber, index: u32) { + #[pallet::weight(::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))] + pub fn cancel(origin: OriginFor, when: T::BlockNumber, index: u32) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_cancel(Some(origin.caller().clone()), (when, index))?; + Ok(()) } /// Schedule a named task. @@ -250,19 +400,26 @@ decl_module! { /// - Write: Agenda, Lookup /// - Will use base weight of 35 which should be good for more than 30 scheduled calls /// # - #[weight = T::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get())] - fn schedule_named(origin, + #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] + pub fn schedule_named( + origin: OriginFor, id: Vec, when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, - ) { + call: Box<::Call>, + ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule_named( - id, DispatchTime::At(when), maybe_periodic, priority, origin.caller().clone(), *call + id, + DispatchTime::At(when), + maybe_periodic, + priority, + origin.caller().clone(), + *call, )?; + Ok(()) } /// Cancel a named scheduled task. @@ -275,11 +432,12 @@ decl_module! { /// - Write: Agenda, Lookup /// - Will use base weight of 100 which should be good for up to 30 scheduled calls /// # - #[weight = T::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get())] - fn cancel_named(origin, id: Vec) { + #[pallet::weight(::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))] + pub fn cancel_named(origin: OriginFor, id: Vec) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_cancel_named(Some(origin.caller().clone()), id)?; + Ok(()) } /// Anonymously schedule a task after a delay. @@ -287,162 +445,82 @@ decl_module! { /// # /// Same as [`schedule`]. /// # - #[weight = T::WeightInfo::schedule(T::MaxScheduledPerBlock::get())] - fn schedule_after(origin, + #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] + pub fn schedule_after( + origin: OriginFor, after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, - ) { + call: Box<::Call>, + ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule( - DispatchTime::After(after), maybe_periodic, priority, origin.caller().clone(), *call + DispatchTime::After(after), + maybe_periodic, + priority, + origin.caller().clone(), + *call, )?; + Ok(()) } /// Schedule a named task after a delay. /// /// # - /// Same as [`schedule_named`]. + /// Same as [`schedule_named`](Self::schedule_named). /// # - #[weight = T::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get())] - fn schedule_named_after(origin, + #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] + pub fn schedule_named_after( + origin: OriginFor, id: Vec, after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box<::Call>, - ) { + call: Box<::Call>, + ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::Origin::from(origin); Self::do_schedule_named( - id, DispatchTime::After(after), maybe_periodic, priority, origin.caller().clone(), *call + id, + DispatchTime::After(after), + maybe_periodic, + priority, + origin.caller().clone(), + *call, )?; - } - - /// Execute the scheduled calls - /// - /// # - /// - S = Number of already scheduled calls - /// - N = Named scheduled calls - /// - P = Periodic Calls - /// - Base Weight: 9.243 + 23.45 * S µs - /// - DB Weight: - /// - Read: Agenda + Lookup * N + Agenda(Future) * P - /// - Write: Agenda + Lookup * N + Agenda(future) * P - /// # - fn on_initialize(now: T::BlockNumber) -> Weight { - let limit = T::MaximumWeight::get(); - let mut queued = Agenda::::take(now).into_iter() - .enumerate() - .filter_map(|(index, s)| s.map(|inner| (index as u32, inner))) - .collect::>(); - if queued.len() as u32 > T::MaxScheduledPerBlock::get() { - frame_support::debug::warn!( - "Warning: This block has more items queued in Scheduler than \ - expected from the runtime configuration. An update might be needed." - ); - } - queued.sort_by_key(|(_, s)| s.priority); - let base_weight: Weight = T::DbWeight::get().reads_writes(1, 2); // Agenda + Agenda(next) - let mut total_weight: Weight = 0; - queued.into_iter() - .enumerate() - .scan(base_weight, |cumulative_weight, (order, (index, s))| { - *cumulative_weight = cumulative_weight - .saturating_add(s.call.get_dispatch_info().weight); - - let origin = <::Origin as From>::from( - s.origin.clone() - ).into(); - - if ensure_signed(origin).is_ok() { - // AccountData for inner call origin accountdata. - *cumulative_weight = cumulative_weight - .saturating_add(T::DbWeight::get().reads_writes(1, 1)); - } - - if s.maybe_id.is_some() { - // Remove/Modify Lookup - *cumulative_weight = cumulative_weight.saturating_add(T::DbWeight::get().writes(1)); - } - if s.maybe_periodic.is_some() { - // Read/Write Agenda for future block - *cumulative_weight = cumulative_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); - } - - Some((order, index, *cumulative_weight, s)) - }) - .filter_map(|(order, index, cumulative_weight, mut s)| { - // We allow a scheduled call if any is true: - // - It's priority is `HARD_DEADLINE` - // - It does not push the weight past the limit. - // - It is the first item in the schedule - if s.priority <= schedule::HARD_DEADLINE || cumulative_weight <= limit || order == 0 { - let r = s.call.clone().dispatch(s.origin.clone().into()); - let maybe_id = s.maybe_id.clone(); - if let &Some((period, count)) = &s.maybe_periodic { - if count > 1 { - s.maybe_periodic = Some((period, count - 1)); - } else { - s.maybe_periodic = None; - } - let next = now + period; - // If scheduled is named, place it's information in `Lookup` - if let Some(ref id) = s.maybe_id { - let next_index = Agenda::::decode_len(now + period).unwrap_or(0); - Lookup::::insert(id, (next, next_index as u32)); - } - Agenda::::append(next, Some(s)); - } else { - if let Some(ref id) = s.maybe_id { - Lookup::::remove(id); - } - } - Self::deposit_event(RawEvent::Dispatched( - (now, index), - maybe_id, - r.map(|_| ()).map_err(|e| e.error) - )); - total_weight = cumulative_weight; - None - } else { - Some(Some(s)) - } - }) - .for_each(|unused| { - let next = now + One::one(); - Agenda::::append(next, unused); - }); - - total_weight + Ok(()) } } } -impl Module { +impl Pallet { /// Migrate storage format from V1 to V2. /// Return true if migration is performed. pub fn migrate_v1_to_t2() -> bool { - if StorageVersion::get() == Releases::V1 { - StorageVersion::put(Releases::V2); + if StorageVersion::::get() == Releases::V1 { + StorageVersion::::put(Releases::V2); Agenda::::translate::< - Vec::Call, T::BlockNumber>>>, _ - >(|_, agenda| Some( - agenda - .into_iter() - .map(|schedule| schedule.map(|schedule| ScheduledV2 { - maybe_id: schedule.maybe_id, - priority: schedule.priority, - call: schedule.call, - maybe_periodic: schedule.maybe_periodic, - origin: system::RawOrigin::Root.into(), - _phantom: Default::default(), - })) - .collect::>() - )); + Vec::Call, T::BlockNumber>>>, + _, + >(|_, agenda| { + Some( + agenda + .into_iter() + .map(|schedule| { + schedule.map(|schedule| ScheduledV2 { + maybe_id: schedule.maybe_id, + priority: schedule.priority, + call: schedule.call, + maybe_periodic: schedule.maybe_periodic, + origin: system::RawOrigin::Root.into(), + _phantom: Default::default(), + }) + }) + .collect::>(), + ) + }); true } else { @@ -453,30 +531,35 @@ impl Module { /// Helper to migrate scheduler when the pallet origin type has changed. pub fn migrate_origin + codec::Decode>() { Agenda::::translate::< - Vec::Call, T::BlockNumber, OldOrigin, T::AccountId>>>, _ - >(|_, agenda| Some( - agenda - .into_iter() - .map(|schedule| schedule.map(|schedule| Scheduled { - maybe_id: schedule.maybe_id, - priority: schedule.priority, - call: schedule.call, - maybe_periodic: schedule.maybe_periodic, - origin: schedule.origin.into(), - _phantom: Default::default(), - })) - .collect::>() - )); + Vec::Call, T::BlockNumber, OldOrigin, T::AccountId>>>, + _, + >(|_, agenda| { + Some( + agenda + .into_iter() + .map(|schedule| { + schedule.map(|schedule| Scheduled { + maybe_id: schedule.maybe_id, + priority: schedule.priority, + call: schedule.call, + maybe_periodic: schedule.maybe_periodic, + origin: schedule.origin.into(), + _phantom: Default::default(), + }) + }) + .collect::>(), + ) + }); } fn resolve_time(when: DispatchTime) -> Result { - let now = frame_system::Module::::block_number(); + let now = frame_system::Pallet::::block_number(); let when = match when { DispatchTime::At(x) => x, // The current block has already completed it's scheduled tasks, so // Schedule the task at lest one block after this current block. - DispatchTime::After(x) => now.saturating_add(x).saturating_add(One::one()) + DispatchTime::After(x) => now.saturating_add(x).saturating_add(One::one()), }; if when <= now { @@ -491,7 +574,7 @@ impl Module { maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call + call: ::Call, ) -> Result, DispatchError> { let when = Self::resolve_time(when)?; @@ -501,44 +584,49 @@ impl Module { // Remove one from the number of repetitions since we will schedule one now. .map(|(p, c)| (p, c - 1)); let s = Some(Scheduled { - maybe_id: None, priority, call, maybe_periodic, origin, _phantom: PhantomData::::default(), + maybe_id: None, + priority, + call, + maybe_periodic, + origin, + _phantom: PhantomData::::default(), }); Agenda::::append(when, s); let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; if index > T::MaxScheduledPerBlock::get() { - frame_support::debug::warn!( + log::warn!( + target: "runtime::scheduler", "Warning: There are more items queued in the Scheduler than \ - expected from the runtime configuration. An update might be needed." + expected from the runtime configuration. An update might be needed.", ); } - Self::deposit_event(RawEvent::Scheduled(when, index)); + Self::deposit_event(Event::Scheduled(when, index)); Ok((when, index)) } fn do_cancel( origin: Option, - (when, index): TaskAddress + (when, index): TaskAddress, ) -> Result<(), DispatchError> { - let scheduled = Agenda::::try_mutate( - when, - |agenda| { - agenda.get_mut(index as usize) - .map_or(Ok(None), |s| -> Result>, DispatchError> { - if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { - if *o != s.origin { - return Err(BadOrigin.into()); - } - }; - Ok(s.take()) - }) - }, - )?; + let scheduled = Agenda::::try_mutate(when, |agenda| { + agenda.get_mut(index as usize).map_or( + Ok(None), + |s| -> Result>, DispatchError> { + if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { + if *o != s.origin { + return Err(BadOrigin.into()) + } + }; + Ok(s.take()) + }, + ) + })?; if let Some(s) = scheduled { if let Some(id) = s.maybe_id { Lookup::::remove(id); } - Self::deposit_event(RawEvent::Canceled(when, index)); + Self::deposit_event(Event::Canceled(when, index)); Ok(()) } else { Err(Error::::NotFound)? @@ -552,7 +640,7 @@ impl Module { let new_time = Self::resolve_time(new_time)?; if new_time == when { - return Err(Error::::RescheduleNoChange.into()); + return Err(Error::::RescheduleNoChange.into()) } Agenda::::try_mutate(when, |agenda| -> DispatchResult { @@ -563,8 +651,8 @@ impl Module { })?; let new_index = Agenda::::decode_len(new_time).unwrap_or(1) as u32 - 1; - Self::deposit_event(RawEvent::Canceled(when, index)); - Self::deposit_event(RawEvent::Scheduled(new_time, new_index)); + Self::deposit_event(Event::Canceled(when, index)); + Self::deposit_event(Event::Scheduled(new_time, new_index)); Ok((new_time, new_index)) } @@ -575,7 +663,7 @@ impl Module { maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call, + call: ::Call, ) -> Result, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { @@ -591,19 +679,25 @@ impl Module { .map(|(p, c)| (p, c - 1)); let s = Scheduled { - maybe_id: Some(id.clone()), priority, call, maybe_periodic, origin, _phantom: Default::default() + maybe_id: Some(id.clone()), + priority, + call, + maybe_periodic, + origin, + _phantom: Default::default(), }; Agenda::::append(when, Some(s)); let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; if index > T::MaxScheduledPerBlock::get() { - frame_support::debug::warn!( + log::warn!( + target: "runtime::scheduler", "Warning: There are more items queued in the Scheduler than \ - expected from the runtime configuration. An update might be needed." + expected from the runtime configuration. An update might be needed.", ); } let address = (when, index); Lookup::::insert(&id, &address); - Self::deposit_event(RawEvent::Scheduled(when, index)); + Self::deposit_event(Event::Scheduled(when, index)); Ok(address) } @@ -616,14 +710,14 @@ impl Module { if let Some(s) = agenda.get_mut(i) { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { if *o != s.origin { - return Err(BadOrigin.into()); + return Err(BadOrigin.into()) } } *s = None; } Ok(()) })?; - Self::deposit_event(RawEvent::Canceled(when, index)); + Self::deposit_event(Event::Canceled(when, index)); Ok(()) } else { Err(Error::::NotFound)? @@ -637,33 +731,38 @@ impl Module { ) -> Result, DispatchError> { let new_time = Self::resolve_time(new_time)?; - Lookup::::try_mutate_exists(id, |lookup| -> Result, DispatchError> { - let (when, index) = lookup.ok_or(Error::::NotFound)?; + Lookup::::try_mutate_exists( + id, + |lookup| -> Result, DispatchError> { + let (when, index) = lookup.ok_or(Error::::NotFound)?; - if new_time == when { - return Err(Error::::RescheduleNoChange.into()); - } + if new_time == when { + return Err(Error::::RescheduleNoChange.into()) + } - Agenda::::try_mutate(when, |agenda| -> DispatchResult { - let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; - let task = task.take().ok_or(Error::::NotFound)?; - Agenda::::append(new_time, Some(task)); + Agenda::::try_mutate(when, |agenda| -> DispatchResult { + let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; + let task = task.take().ok_or(Error::::NotFound)?; + Agenda::::append(new_time, Some(task)); - Ok(()) - })?; + Ok(()) + })?; - let new_index = Agenda::::decode_len(new_time).unwrap_or(1) as u32 - 1; - Self::deposit_event(RawEvent::Canceled(when, index)); - Self::deposit_event(RawEvent::Scheduled(new_time, new_index)); + let new_index = Agenda::::decode_len(new_time).unwrap_or(1) as u32 - 1; + Self::deposit_event(Event::Canceled(when, index)); + Self::deposit_event(Event::Scheduled(new_time, new_index)); - *lookup = Some((new_time, new_index)); + *lookup = Some((new_time, new_index)); - Ok((new_time, new_index)) - }) + Ok((new_time, new_index)) + }, + ) } } -impl schedule::Anon::Call, T::PalletsOrigin> for Module { +impl schedule::Anon::Call, T::PalletsOrigin> + for Pallet +{ type Address = TaskAddress; fn schedule( @@ -671,7 +770,7 @@ impl schedule::Anon::Call, T::PalletsOrig maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call + call: ::Call, ) -> Result { Self::do_schedule(when, maybe_periodic, priority, origin, call) } @@ -692,7 +791,9 @@ impl schedule::Anon::Call, T::PalletsOrig } } -impl schedule::Named::Call, T::PalletsOrigin> for Module { +impl schedule::Named::Call, T::PalletsOrigin> + for Pallet +{ type Address = TaskAddress; fn schedule_named( @@ -701,7 +802,7 @@ impl schedule::Named::Call, T::PalletsOri maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: ::Call, + call: ::Call, ) -> Result { Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call).map_err(|_| ()) } @@ -718,7 +819,9 @@ impl schedule::Named::Call, T::PalletsOri } fn next_dispatch_time(id: Vec) -> Result { - Lookup::::get(id).and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)).ok_or(()) + Lookup::::get(id) + .and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)) + .ok_or(()) } } @@ -726,24 +829,28 @@ impl schedule::Named::Call, T::PalletsOri mod tests { use super::*; + use crate as scheduler; use frame_support::{ - impl_outer_event, impl_outer_origin, impl_outer_dispatch, parameter_types, assert_ok, ord_parameter_types, - assert_noop, assert_err, Hashable, - traits::{OnInitialize, OnFinalize, Filter}, + assert_err, assert_noop, assert_ok, ord_parameter_types, parameter_types, + traits::{Contains, OnFinalize, OnInitialize}, weights::constants::RocksDbWeight, + Hashable, }; + use frame_system::{EnsureOneOf, EnsureRoot, EnsureSignedBy}; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; - use frame_system::{EnsureOneOf, EnsureRoot, EnsureSignedBy}; use substrate_test_utils::assert_eq_uvec; - use crate as scheduler; - mod logger { + // Logger module to track execution. + #[frame_support::pallet] + pub mod logger { use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; use std::cell::RefCell; thread_local! { @@ -752,78 +859,87 @@ mod tests { pub fn log() -> Vec<(OriginCaller, u32)> { LOG.with(|log| log.borrow().clone()) } - pub trait Trait: system::Trait { - type Event: From + Into<::Event>; - } - decl_event! { - pub enum Event { - Logged(u32, Weight), - } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From> + IsType<::Event>; } - decl_module! { - pub struct Module for enum Call - where - origin: ::Origin, - ::Origin: OriginTrait - { - fn deposit_event() = default; - - #[weight = *weight] - fn log(origin, i: u32, weight: Weight) { - Self::deposit_event(Event::Logged(i, weight)); - LOG.with(|log| { - log.borrow_mut().push((origin.caller().clone(), i)); - }) - } - #[weight = *weight] - fn log_without_filter(origin, i: u32, weight: Weight) { - Self::deposit_event(Event::Logged(i, weight)); - LOG.with(|log| { - log.borrow_mut().push((origin.caller().clone(), i)); - }) - } - } + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + Logged(u32, Weight), } - } - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + #[pallet::call] + impl Pallet + where + ::Origin: OriginTrait, + { + #[pallet::weight(*weight)] + pub fn log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { + Self::deposit_event(Event::Logged(i, weight)); + LOG.with(|log| { + log.borrow_mut().push((origin.caller().clone(), i)); + }); + Ok(()) + } - impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - system::System, - logger::Logger, + #[pallet::weight(*weight)] + pub fn log_without_filter( + origin: OriginFor, + i: u32, + weight: Weight, + ) -> DispatchResult { + Self::deposit_event(Event::Logged(i, weight)); + LOG.with(|log| { + log.borrow_mut().push((origin.caller().clone(), i)); + }); + Ok(()) + } } } - impl_outer_event! { - pub enum Event for Test { - system, - logger, - scheduler, + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Logger: logger::{Pallet, Call, Event}, + Scheduler: scheduler::{Pallet, Call, Storage, Event}, } - } + ); // Scheduler must dispatch with root and no filter, this tests base filter is indeed not used. pub struct BaseFilter; - impl Filter for BaseFilter { - fn filter(call: &Call) -> bool { - !matches!(call, Call::Logger(logger::Call::log(_, _))) + impl Contains for BaseFilter { + fn contains(call: &Call) -> bool { + !matches!(call, Call::Logger(LoggerCall::log { .. })) } } - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 2_000_000_000_000; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2_000_000_000_000); } - impl system::Trait for Test { + impl system::Config for Test { type BaseCallFilter = BaseFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; type Origin = Origin; type Call = Call; type Index = u64; @@ -833,35 +949,30 @@ mod tests { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } - impl logger::Trait for Test { - type Event = (); + impl logger::Config for Test { + type Event = Event; } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; pub const MaxScheduledPerBlock: u32 = 10; } ord_parameter_types! { pub const One: u64 = 1; } - impl Trait for Test { - type Event = (); + impl Config for Test { + type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; type Call = Call; @@ -870,9 +981,8 @@ mod tests { type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = (); } - type System = system::Module; - type Logger = logger::Module; - type Scheduler = Module; + + pub type LoggerCall = logger::Call; pub fn new_test_ext() -> sp_io::TestExternalities { let t = system::GenesisConfig::default().build_storage::().unwrap(); @@ -894,8 +1004,8 @@ mod tests { #[test] fn basic_scheduling_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + assert!(!::BaseCallFilter::contains(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call)); run_to_block(3); assert!(logger::log().is_empty()); @@ -910,8 +1020,8 @@ mod tests { fn schedule_after_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + assert!(!::BaseCallFilter::contains(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 assert_ok!(Scheduler::do_schedule(DispatchTime::After(3), None, 127, root(), call)); run_to_block(5); @@ -927,8 +1037,8 @@ mod tests { fn schedule_after_zero_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + assert!(!::BaseCallFilter::contains(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::After(0), None, 127, root(), call)); // Will trigger on the next block. run_to_block(3); @@ -943,7 +1053,11 @@ mod tests { new_test_ext().execute_with(|| { // at #4, every 3 blocks, 3 times. assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), Some((3, 3)), 127, root(), Call::Logger(logger::Call::log(42, 1000)) + DispatchTime::At(4), + Some((3, 3)), + 127, + root(), + Call::Logger(logger::Call::log { i: 42, weight: 1000 }) )); run_to_block(3); assert!(logger::log().is_empty()); @@ -965,16 +1079,22 @@ mod tests { #[test] fn reschedule_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); - assert_eq!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call).unwrap(), (4, 0)); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + assert!(!::BaseCallFilter::contains(&call)); + assert_eq!( + Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call).unwrap(), + (4, 0) + ); run_to_block(3); assert!(logger::log().is_empty()); assert_eq!(Scheduler::do_reschedule((4, 0), DispatchTime::At(6)).unwrap(), (6, 0)); - assert_noop!(Scheduler::do_reschedule((6, 0), DispatchTime::At(6)), Error::::RescheduleNoChange); + assert_noop!( + Scheduler::do_reschedule((6, 0), DispatchTime::At(6)), + Error::::RescheduleNoChange + ); run_to_block(4); assert!(logger::log().is_empty()); @@ -990,18 +1110,33 @@ mod tests { #[test] fn reschedule_named_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); - assert_eq!(Scheduler::do_schedule_named( - 1u32.encode(), DispatchTime::At(4), None, 127, root(), call - ).unwrap(), (4, 0)); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + assert!(!::BaseCallFilter::contains(&call)); + assert_eq!( + Scheduler::do_schedule_named( + 1u32.encode(), + DispatchTime::At(4), + None, + 127, + root(), + call + ) + .unwrap(), + (4, 0) + ); run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), (6, 0)); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), + (6, 0) + ); - assert_noop!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)), Error::::RescheduleNoChange); + assert_noop!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)), + Error::::RescheduleNoChange + ); run_to_block(4); assert!(logger::log().is_empty()); @@ -1017,17 +1152,32 @@ mod tests { #[test] fn reschedule_named_perodic_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(logger::Call::log(42, 1000)); - assert!(!::BaseCallFilter::filter(&call)); - assert_eq!(Scheduler::do_schedule_named( - 1u32.encode(), DispatchTime::At(4), Some((3, 3)), 127, root(), call - ).unwrap(), (4, 0)); + let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + assert!(!::BaseCallFilter::contains(&call)); + assert_eq!( + Scheduler::do_schedule_named( + 1u32.encode(), + DispatchTime::At(4), + Some((3, 3)), + 127, + root(), + call + ) + .unwrap(), + (4, 0) + ); run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(5)).unwrap(), (5, 0)); - assert_eq!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), (6, 0)); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(5)).unwrap(), + (5, 0) + ); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), + (6, 0) + ); run_to_block(5); assert!(logger::log().is_empty()); @@ -1035,7 +1185,10 @@ mod tests { run_to_block(6); assert_eq!(logger::log(), vec![(root(), 42u32)]); - assert_eq!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(10)).unwrap(), (10, 0)); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(10)).unwrap(), + (10, 0) + ); run_to_block(9); assert_eq!(logger::log(), vec![(root(), 42u32)]); @@ -1056,11 +1209,22 @@ mod tests { new_test_ext().execute_with(|| { // at #4. Scheduler::do_schedule_named( - 1u32.encode(), DispatchTime::At(4), None, 127, root(), Call::Logger(logger::Call::log(69, 1000)) - ).unwrap(); + 1u32.encode(), + DispatchTime::At(4), + None, + 127, + root(), + Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), + ) + .unwrap(); let i = Scheduler::do_schedule( - DispatchTime::At(4), None, 127, root(), Call::Logger(logger::Call::log(42, 1000)) - ).unwrap(); + DispatchTime::At(4), + None, + 127, + root(), + Call::Logger(LoggerCall::log { i: 42, weight: 1000 }), + ) + .unwrap(); run_to_block(3); assert!(logger::log().is_empty()); assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); @@ -1080,8 +1244,9 @@ mod tests { Some((3, 3)), 127, root(), - Call::Logger(logger::Call::log(42, 1000)) - ).unwrap(); + Call::Logger(LoggerCall::log { i: 42, weight: 1000 }), + ) + .unwrap(); // same id results in error. assert!(Scheduler::do_schedule_named( 1u32.encode(), @@ -1089,12 +1254,19 @@ mod tests { None, 127, root(), - Call::Logger(logger::Call::log(69, 1000)) - ).is_err()); + Call::Logger(LoggerCall::log { i: 69, weight: 1000 }) + ) + .is_err()); // different id is ok. Scheduler::do_schedule_named( - 2u32.encode(), DispatchTime::At(8), None, 127, root(), Call::Logger(logger::Call::log(69, 1000)) - ).unwrap(); + 2u32.encode(), + DispatchTime::At(8), + None, + 127, + root(), + Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), + ) + .unwrap(); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); @@ -1114,13 +1286,14 @@ mod tests { None, 127, root(), - Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2)) + Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 2 }) )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, - root(), Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) + root(), + Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) )); // 69 and 42 do not fit together run_to_block(4); @@ -1138,16 +1311,17 @@ mod tests { None, 0, root(), - Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2)) + Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 2 }) )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 0, root(), - Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) + Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) )); - // With base weights, 69 and 42 should not fit together, but do because of hard deadlines + // With base weights, 69 and 42 should not fit together, but do because of hard + // deadlines run_to_block(4); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); @@ -1161,14 +1335,14 @@ mod tests { None, 1, root(), - Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 2)) + Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 2 }) )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 0, root(), - Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) + Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) )); run_to_block(4); assert_eq!(logger::log(), vec![(root(), 69u32), (root(), 42u32)]); @@ -1182,19 +1356,25 @@ mod tests { DispatchTime::At(4), None, 255, - root(), Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3)) + root(), + Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 3 }) )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, - root(), Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) + root(), + Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 126, - root(), Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2)) + root(), + Call::Logger(LoggerCall::log { + i: 2600, + weight: MaximumSchedulerWeight::get() / 2 + }) )); // 2600 does not fit with 69 or 42, but has higher priority, so will go through @@ -1209,25 +1389,29 @@ mod tests { #[test] fn on_initialize_weight_is_correct() { new_test_ext().execute_with(|| { - let base_weight: Weight = ::DbWeight::get().reads_writes(1, 2); + let base_weight: Weight = + ::DbWeight::get().reads_writes(1, 2); let base_multiplier = 0; - let named_multiplier = ::DbWeight::get().writes(1); - let periodic_multiplier = ::DbWeight::get().reads_writes(1, 1); + let named_multiplier = ::DbWeight::get().writes(1); + let periodic_multiplier = + ::DbWeight::get().reads_writes(1, 1); // Named - assert_ok!( - Scheduler::do_schedule_named( - 1u32.encode(), DispatchTime::At(1), None, 255, root(), - Call::Logger(logger::Call::log(3, MaximumSchedulerWeight::get() / 3)) - ) - ); + assert_ok!(Scheduler::do_schedule_named( + 1u32.encode(), + DispatchTime::At(1), + None, + 255, + root(), + Call::Logger(LoggerCall::log { i: 3, weight: MaximumSchedulerWeight::get() / 3 }) + )); // Anon Periodic assert_ok!(Scheduler::do_schedule( DispatchTime::At(1), Some((1000, 3)), 128, root(), - Call::Logger(logger::Call::log(42, MaximumSchedulerWeight::get() / 3)) + Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 3 }) )); // Anon assert_ok!(Scheduler::do_schedule( @@ -1235,33 +1419,52 @@ mod tests { None, 127, root(), - Call::Logger(logger::Call::log(69, MaximumSchedulerWeight::get() / 2)) + Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) )); // Named Periodic assert_ok!(Scheduler::do_schedule_named( - 2u32.encode(), DispatchTime::At(1), Some((1000, 3)), 126, root(), - Call::Logger(logger::Call::log(2600, MaximumSchedulerWeight::get() / 2))) - ); + 2u32.encode(), + DispatchTime::At(1), + Some((1000, 3)), + 126, + root(), + Call::Logger(LoggerCall::log { + i: 2600, + weight: MaximumSchedulerWeight::get() / 2 + }) + )); // Will include the named periodic only let actual_weight = Scheduler::on_initialize(1); let call_weight = MaximumSchedulerWeight::get() / 2; assert_eq!( - actual_weight, call_weight + base_weight + base_multiplier + named_multiplier + periodic_multiplier + actual_weight, + call_weight + + base_weight + base_multiplier + + named_multiplier + periodic_multiplier ); assert_eq!(logger::log(), vec![(root(), 2600u32)]); // Will include anon and anon periodic let actual_weight = Scheduler::on_initialize(2); let call_weight = MaximumSchedulerWeight::get() / 2 + MaximumSchedulerWeight::get() / 3; - assert_eq!(actual_weight, call_weight + base_weight + base_multiplier * 2 + periodic_multiplier); + assert_eq!( + actual_weight, + call_weight + base_weight + base_multiplier * 2 + periodic_multiplier + ); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); // Will include named only let actual_weight = Scheduler::on_initialize(3); let call_weight = MaximumSchedulerWeight::get() / 3; - assert_eq!(actual_weight, call_weight + base_weight + base_multiplier + named_multiplier); - assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32), (root(), 3u32)]); + assert_eq!( + actual_weight, + call_weight + base_weight + base_multiplier + named_multiplier + ); + assert_eq!( + logger::log(), + vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32), (root(), 3u32)] + ); // Will contain none let actual_weight = Scheduler::on_initialize(4); @@ -1272,9 +1475,16 @@ mod tests { #[test] fn root_calls_works() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(logger::Call::log(69, 1000))); - let call2 = Box::new(Call::Logger(logger::Call::log(42, 1000))); - assert_ok!(Scheduler::schedule_named(Origin::root(), 1u32.encode(), 4, None, 127, call)); + let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 })); + let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 })); + assert_ok!(Scheduler::schedule_named( + Origin::root(), + 1u32.encode(), + 4, + None, + 127, + call + )); assert_ok!(Scheduler::schedule(Origin::root(), 4, None, 127, call2)); run_to_block(3); // Scheduled calls are in the agenda. @@ -1293,8 +1503,8 @@ mod tests { new_test_ext().execute_with(|| { run_to_block(3); - let call = Box::new(Call::Logger(logger::Call::log(69, 1000))); - let call2 = Box::new(Call::Logger(logger::Call::log(42, 1000))); + let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 })); + let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 })); assert_err!( Scheduler::schedule_named(Origin::root(), 1u32.encode(), 2, None, 127, call), @@ -1316,12 +1526,23 @@ mod tests { #[test] fn should_use_orign() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(logger::Call::log(69, 1000))); - let call2 = Box::new(Call::Logger(logger::Call::log(42, 1000))); - assert_ok!( - Scheduler::schedule_named(system::RawOrigin::Signed(1).into(), 1u32.encode(), 4, None, 127, call) - ); - assert_ok!(Scheduler::schedule(system::RawOrigin::Signed(1).into(), 4, None, 127, call2)); + let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 })); + let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 })); + assert_ok!(Scheduler::schedule_named( + system::RawOrigin::Signed(1).into(), + 1u32.encode(), + 4, + None, + 127, + call + )); + assert_ok!(Scheduler::schedule( + system::RawOrigin::Signed(1).into(), + 4, + None, + 127, + call2 + )); run_to_block(3); // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); @@ -1337,37 +1558,69 @@ mod tests { #[test] fn should_check_orign() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(logger::Call::log(69, 1000))); - let call2 = Box::new(Call::Logger(logger::Call::log(42, 1000))); + let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 })); + let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 })); + assert_noop!( + Scheduler::schedule_named( + system::RawOrigin::Signed(2).into(), + 1u32.encode(), + 4, + None, + 127, + call + ), + BadOrigin + ); assert_noop!( - Scheduler::schedule_named(system::RawOrigin::Signed(2).into(), 1u32.encode(), 4, None, 127, call), + Scheduler::schedule(system::RawOrigin::Signed(2).into(), 4, None, 127, call2), BadOrigin ); - assert_noop!(Scheduler::schedule(system::RawOrigin::Signed(2).into(), 4, None, 127, call2), BadOrigin); }); } #[test] fn should_check_orign_for_cancel() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(logger::Call::log_without_filter(69, 1000))); - let call2 = Box::new(Call::Logger(logger::Call::log_without_filter(42, 1000))); - assert_ok!( - Scheduler::schedule_named(system::RawOrigin::Signed(1).into(), 1u32.encode(), 4, None, 127, call) - ); - assert_ok!(Scheduler::schedule(system::RawOrigin::Signed(1).into(), 4, None, 127, call2)); + let call = + Box::new(Call::Logger(LoggerCall::log_without_filter { i: 69, weight: 1000 })); + let call2 = + Box::new(Call::Logger(LoggerCall::log_without_filter { i: 42, weight: 1000 })); + assert_ok!(Scheduler::schedule_named( + system::RawOrigin::Signed(1).into(), + 1u32.encode(), + 4, + None, + 127, + call + )); + assert_ok!(Scheduler::schedule( + system::RawOrigin::Signed(1).into(), + 4, + None, + 127, + call2 + )); run_to_block(3); // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); - assert_noop!(Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), 1u32.encode()), BadOrigin); + assert_noop!( + Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), 1u32.encode()), + BadOrigin + ); assert_noop!(Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), BadOrigin); - assert_noop!(Scheduler::cancel_named(system::RawOrigin::Root.into(), 1u32.encode()), BadOrigin); + assert_noop!( + Scheduler::cancel_named(system::RawOrigin::Root.into(), 1u32.encode()), + BadOrigin + ); assert_noop!(Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), BadOrigin); run_to_block(5); assert_eq!( logger::log(), - vec![(system::RawOrigin::Signed(1).into(), 69u32), (system::RawOrigin::Signed(1).into(), 42u32)] + vec![ + (system::RawOrigin::Signed(1).into(), 69u32), + (system::RawOrigin::Signed(1).into(), 42u32) + ] ); }); } @@ -1381,98 +1634,97 @@ mod tests { Some(ScheduledV1 { maybe_id: None, priority: i as u8 + 10, - call: Call::Logger(logger::Call::log(96, 100)), + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), maybe_periodic: None, }), None, Some(ScheduledV1 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(logger::Call::log(69, 1000)), + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), maybe_periodic: Some((456u64, 10)), }), ]; - frame_support::migration::put_storage_value( - b"Scheduler", - b"Agenda", - &k, - old, - ); + frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); } - assert_eq!(StorageVersion::get(), Releases::V1); + assert_eq!(StorageVersion::::get(), Releases::V1); assert!(Scheduler::migrate_v1_to_t2()); - assert_eq_uvec!(Agenda::::iter().collect::>(), vec![ - ( - 0, - vec![ - Some(ScheduledV2 { - maybe_id: None, - priority: 10, - call: Call::Logger(logger::Call::log(96, 100)), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: Call::Logger(logger::Call::log(69, 1000)), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ]), - ( - 1, - vec![ - Some(ScheduledV2 { - maybe_id: None, - priority: 11, - call: Call::Logger(logger::Call::log(96, 100)), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: Call::Logger(logger::Call::log(69, 1000)), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ] - ), - ( - 2, - vec![ - Some(ScheduledV2 { - maybe_id: None, - priority: 12, - call: Call::Logger(logger::Call::log(96, 100)), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: Call::Logger(logger::Call::log(69, 1000)), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ] - ) - ]); + assert_eq_uvec!( + Agenda::::iter().collect::>(), + vec![ + ( + 0, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 10, + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ] + ), + ( + 1, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 11, + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ] + ), + ( + 2, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 12, + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ] + ) + ] + ); - assert_eq!(StorageVersion::get(), Releases::V2); + assert_eq!(StorageVersion::::get(), Releases::V2); }); } @@ -1485,7 +1737,7 @@ mod tests { Some(Scheduled { maybe_id: None, priority: i as u8 + 10, - call: Call::Logger(logger::Call::log(96, 100)), + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), origin: 3u32, maybe_periodic: None, _phantom: Default::default(), @@ -1495,17 +1747,12 @@ mod tests { maybe_id: Some(b"test".to_vec()), priority: 123, origin: 2u32, - call: Call::Logger(logger::Call::log(69, 1000)), + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), maybe_periodic: Some((456u64, 10)), _phantom: Default::default(), }), ]; - frame_support::migration::put_storage_value( - b"Scheduler", - b"Agenda", - &k, - old, - ); + frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); } impl Into for u32 { @@ -1520,73 +1767,77 @@ mod tests { Scheduler::migrate_origin::(); - assert_eq_uvec!(Agenda::::iter().collect::>(), vec![ - ( - 0, - vec![ - Some(ScheduledV2::<_, _, OriginCaller, u64> { - maybe_id: None, - priority: 10, - call: Call::Logger(logger::Call::log(96, 100)), - maybe_periodic: None, - origin: system::RawOrigin::Root.into(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: Call::Logger(logger::Call::log(69, 1000)), - maybe_periodic: Some((456u64, 10)), - origin: system::RawOrigin::None.into(), - _phantom: PhantomData::::default(), - }), - ]), - ( - 1, - vec![ - Some(ScheduledV2 { - maybe_id: None, - priority: 11, - call: Call::Logger(logger::Call::log(96, 100)), - maybe_periodic: None, - origin: system::RawOrigin::Root.into(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: Call::Logger(logger::Call::log(69, 1000)), - maybe_periodic: Some((456u64, 10)), - origin: system::RawOrigin::None.into(), - _phantom: PhantomData::::default(), - }), - ] - ), - ( - 2, - vec![ - Some(ScheduledV2 { - maybe_id: None, - priority: 12, - call: Call::Logger(logger::Call::log(96, 100)), - maybe_periodic: None, - origin: system::RawOrigin::Root.into(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: Call::Logger(logger::Call::log(69, 1000)), - maybe_periodic: Some((456u64, 10)), - origin: system::RawOrigin::None.into(), - _phantom: PhantomData::::default(), - }), - ] - ) - ]); + assert_eq_uvec!( + Agenda::::iter().collect::>(), + vec![ + ( + 0, + vec![ + Some(ScheduledV2::<_, _, OriginCaller, u64> { + maybe_id: None, + priority: 10, + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), + maybe_periodic: None, + origin: system::RawOrigin::Root.into(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), + maybe_periodic: Some((456u64, 10)), + origin: system::RawOrigin::None.into(), + _phantom: PhantomData::::default(), + }), + ] + ), + ( + 1, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 11, + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), + maybe_periodic: None, + origin: system::RawOrigin::Root.into(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), + maybe_periodic: Some((456u64, 10)), + origin: system::RawOrigin::None.into(), + _phantom: PhantomData::::default(), + }), + ] + ), + ( + 2, + vec![ + Some(ScheduledV2 { + maybe_id: None, + priority: 12, + call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), + maybe_periodic: None, + origin: system::RawOrigin::Root.into(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV2 { + maybe_id: Some(b"test".to_vec()), + priority: 123, + call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), + maybe_periodic: Some((456u64, 10)), + origin: system::RawOrigin::None.into(), + _phantom: PhantomData::::default(), + }), + ] + ) + ] + ); }); } } diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs new file mode 100644 index 0000000000000..d83aefdc453af --- /dev/null +++ b/frame/scheduler/src/weights.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_scheduler +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_scheduler +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/scheduler/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_scheduler. +pub trait WeightInfo { + fn schedule(s: u32, ) -> Weight; + fn cancel(s: u32, ) -> Weight; + fn schedule_named(s: u32, ) -> Weight; + fn cancel_named(s: u32, ) -> Weight; +} + +/// Weights for pallet_scheduler using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Scheduler Agenda (r:1 w:1) + fn schedule(s: u32, ) -> Weight { + (24_730_000 as Weight) + // Standard Error: 1_000 + .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn cancel(s: u32, ) -> Weight { + (23_272_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_261_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Scheduler Agenda (r:1 w:1) + fn schedule_named(s: u32, ) -> Weight { + (30_971_000 as Weight) + // Standard Error: 1_000 + .saturating_add((96_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Scheduler Agenda (r:1 w:1) + fn cancel_named(s: u32, ) -> Weight { + (25_778_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_270_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Scheduler Agenda (r:1 w:1) + fn schedule(s: u32, ) -> Weight { + (24_730_000 as Weight) + // Standard Error: 1_000 + .saturating_add((77_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Scheduler Agenda (r:1 w:1) + // Storage: Scheduler Lookup (r:0 w:1) + fn cancel(s: u32, ) -> Weight { + (23_272_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_261_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Scheduler Agenda (r:1 w:1) + fn schedule_named(s: u32, ) -> Weight { + (30_971_000 as Weight) + // Standard Error: 1_000 + .saturating_add((96_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Scheduler Agenda (r:1 w:1) + fn cancel_named(s: u32, ) -> Weight { + (25_778_000 as Weight) + // Standard Error: 4_000 + .saturating_add((1_270_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index b36bade8e925a..9d5f156c175d5 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-scored-pool" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,26 +13,27 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } [features] default = ["std"] std = [ "codec/std", - "serde", + "scale-info/std", "sp-io/std", "sp-runtime/std", "sp-std/std", "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/scored-pool/README.md b/frame/scored-pool/README.md index 948d5b497721b..bf20124edf52e 100644 --- a/frame/scored-pool/README.md +++ b/frame/scored-pool/README.md @@ -20,7 +20,7 @@ time. If an entity is currently a member, this results in removal from the `Pool` and `Members`; the entity is immediately replaced by the next highest scoring candidate in the pool, if available. -- [`scored_pool::Trait`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/trait.Trait.html) +- [`scored_pool::Trait`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/trait.Config.html) - [`Call`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/enum.Call.html) - [`Module`](https://docs.rs/pallet-scored-pool/latest/pallet_scored_pool/struct.Module.html) @@ -41,10 +41,10 @@ use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; use pallet_scored_pool::{self as scored_pool}; -pub trait Trait: scored_pool::Trait {} +pub trait Config: scored_pool::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn candidate(origin) -> dispatch::DispatchResult { let who = ensure_signed(origin)?; @@ -63,4 +63,4 @@ decl_module! { This module depends on the [System module](https://docs.rs/frame-system/latest/frame_system/). -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index 90d4aca4e42a4..a5cdb6274f995 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Scored Pool Module +//! # Scored Pool Pallet //! -//! The module maintains a scored membership pool. Each entity in the +//! The pallet maintains a scored membership pool. Each entity in the //! pool can be attributed a `Score`. From this pool a set `Members` //! is constructed. This set contains the `MemberCount` highest //! scoring entities. Unscored entities are never part of `Members`. @@ -37,9 +37,9 @@ //! from the `Pool` and `Members`; the entity is immediately replaced //! by the next highest scoring candidate in the pool, if available. //! -//! - [`scored_pool::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Interface //! @@ -58,15 +58,15 @@ //! use frame_system::ensure_signed; //! use pallet_scored_pool::{self as scored_pool}; //! -//! pub trait Trait: scored_pool::Trait {} +//! pub trait Config: scored_pool::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn candidate(origin) -> dispatch::DispatchResult { //! let who = ensure_signed(origin)?; //! -//! let _ = >::submit_candidacy( +//! let _ = >::submit_candidacy( //! T::Origin::from(Some(who.clone()).into()) //! ); //! Ok(()) @@ -79,7 +79,7 @@ //! //! ## Dependencies //! -//! This module depends on the [System module](../frame_system/index.html). +//! This pallet depends on the [System pallet](../frame_system/index.html). // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -91,20 +91,17 @@ mod mock; mod tests; use codec::FullCodec; -use sp_std::{ - fmt::Debug, - prelude::*, -}; use frame_support::{ - decl_module, decl_storage, decl_event, ensure, decl_error, - traits::{EnsureOrigin, ChangeMembers, InitializeMembers, Currency, Get, ReservableCurrency}, - weights::Weight, + ensure, + traits::{ChangeMembers, Currency, Get, InitializeMembers, ReservableCurrency}, }; -use frame_system::{ensure_root, ensure_signed}; -use sp_runtime::traits::{AtLeast32Bit, MaybeSerializeDeserialize, Zero, StaticLookup}; +pub use pallet::*; +use sp_runtime::traits::{AtLeast32Bit, StaticLookup, Zero}; +use sp_std::{fmt::Debug, prelude::*}; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type PoolT = Vec<(::AccountId, Option<>::Score>)>; +type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +type PoolT = Vec<(::AccountId, Option<>::Score>)>; /// The enum is supplied when refreshing the members set. /// Depending on the enum variant the corresponding associated @@ -116,96 +113,68 @@ enum ChangeReceiver { MembershipChanged, } -pub trait Trait: frame_system::Trait { - /// The currency used for deposits. - type Currency: Currency + ReservableCurrency; - - /// The score attributed to a member or candidate. - type Score: - AtLeast32Bit + Clone + Copy + Default + FullCodec + MaybeSerializeDeserialize + Debug; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - // The deposit which is reserved from candidates if they want to - // start a candidacy. The deposit gets returned when the candidacy is - // withdrawn or when the candidate is kicked. - type CandidateDeposit: Get>; - - /// Every `Period` blocks the `Members` are filled with the highest scoring - /// members in the `Pool`. - type Period: Get; - - /// The receiver of the signal for when the membership has been initialized. - /// This happens pre-genesis and will usually be the same as `MembershipChanged`. - /// If you need to do something different on initialization, then you can change - /// this accordingly. - type MembershipInitialized: InitializeMembers; - - /// The receiver of the signal for when the members have changed. - type MembershipChanged: ChangeMembers; - - /// Allows a configurable origin type to set a score to a candidate in the pool. - type ScoreOrigin: EnsureOrigin; - - /// Required origin for removing a member (though can always be Root). - /// Configurable origin which enables removing an entity. If the entity - /// is part of the `Members` it is immediately replaced by the next - /// highest scoring candidate, if available. - type KickOrigin: EnsureOrigin; -} - -decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as ScoredPool { - /// The current pool of candidates, stored as an ordered Vec - /// (ordered descending by score, `None` last, highest first). - Pool get(fn pool) config(): PoolT; - - /// A Map of the candidates. The information in this Map is redundant - /// to the information in the `Pool`. But the Map enables us to easily - /// check if a candidate is already in the pool, without having to - /// iterate over the entire pool (the `Pool` is not sorted by - /// `T::AccountId`, but by `T::Score` instead). - CandidateExists get(fn candidate_exists): map hasher(twox_64_concat) T::AccountId => bool; - - /// The current membership, stored as an ordered Vec. - Members get(fn members): Vec; - - /// Size of the `Members` set. - MemberCount get(fn member_count) config(): u32; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{pallet_prelude::*, traits::EnsureOrigin, weights::Weight}; + use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; + use sp_runtime::traits::MaybeSerializeDeserialize; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The currency used for deposits. + type Currency: Currency + ReservableCurrency; + + /// The score attributed to a member or candidate. + type Score: AtLeast32Bit + + Clone + + Copy + + Default + + FullCodec + + MaybeSerializeDeserialize + + Debug + + scale_info::TypeInfo; + + /// The overarching event type. + type Event: From> + IsType<::Event>; + + // The deposit which is reserved from candidates if they want to + // start a candidacy. The deposit gets returned when the candidacy is + // withdrawn or when the candidate is kicked. + #[pallet::constant] + type CandidateDeposit: Get>; + + /// Every `Period` blocks the `Members` are filled with the highest scoring + /// members in the `Pool`. + #[pallet::constant] + type Period: Get; + + /// The receiver of the signal for when the membership has been initialized. + /// This happens pre-genesis and will usually be the same as `MembershipChanged`. + /// If you need to do something different on initialization, then you can change + /// this accordingly. + type MembershipInitialized: InitializeMembers; + + /// The receiver of the signal for when the members have changed. + type MembershipChanged: ChangeMembers; + + /// Allows a configurable origin type to set a score to a candidate in the pool. + type ScoreOrigin: EnsureOrigin; + + /// Required origin for removing a member (though can always be Root). + /// Configurable origin which enables removing an entity. If the entity + /// is part of the `Members` it is immediately replaced by the next + /// highest scoring candidate, if available. + type KickOrigin: EnsureOrigin; } - add_extra_genesis { - config(members): Vec; - config(phantom): sp_std::marker::PhantomData; - build(|config| { - let mut pool = config.pool.clone(); - - // reserve balance for each candidate in the pool. - // panicking here is ok, since this just happens one time, pre-genesis. - pool - .iter() - .for_each(|(who, _)| { - T::Currency::reserve(&who, T::CandidateDeposit::get()) - .expect("balance too low to create candidacy"); - >::insert(who, true); - }); - // Sorts the `Pool` by score in a descending order. Entities which - // have a score of `None` are sorted to the beginning of the vec. - pool.sort_by_key(|(_, maybe_score)| - Reverse(maybe_score.unwrap_or_default()) - ); - - >::put(&pool); - >::refresh_members(pool, ChangeReceiver::MembershipInitialized); - }) - } -} - -decl_event!( - pub enum Event where - ::AccountId, - { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { /// The given member was removed. See the transaction for who. MemberRemoved, /// An entity has issued a candidacy. See the transaction for who. @@ -218,14 +187,11 @@ decl_event!( /// A score was attributed to the candidate. /// See the transaction for who. CandidateScored, - /// Phantom member, never used. - Dummy(sp_std::marker::PhantomData<(AccountId, I)>), } -); -decl_error! { - /// Error for the scored-pool module. - pub enum Error for Module, I: Instance> { + /// Error for the scored-pool pallet. + #[pallet::error] + pub enum Error { /// Already a member. AlreadyInPool, /// Index out of bounds. @@ -233,27 +199,85 @@ decl_error! { /// Index does not match requested account. WrongAccountIndex, } -} -decl_module! { - pub struct Module, I: Instance=DefaultInstance> - for enum Call - where origin: T::Origin - { - type Error = Error; + /// The current pool of candidates, stored as an ordered Vec + /// (ordered descending by score, `None` last, highest first). + #[pallet::storage] + #[pallet::getter(fn pool)] + pub(crate) type Pool, I: 'static = ()> = StorageValue<_, PoolT, ValueQuery>; + + /// A Map of the candidates. The information in this Map is redundant + /// to the information in the `Pool`. But the Map enables us to easily + /// check if a candidate is already in the pool, without having to + /// iterate over the entire pool (the `Pool` is not sorted by + /// `T::AccountId`, but by `T::Score` instead). + #[pallet::storage] + #[pallet::getter(fn candidate_exists)] + pub(crate) type CandidateExists, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, bool, ValueQuery>; + + /// The current membership, stored as an ordered Vec. + #[pallet::storage] + #[pallet::getter(fn members)] + pub(crate) type Members, I: 'static = ()> = + StorageValue<_, Vec, ValueQuery>; + + /// Size of the `Members` set. + #[pallet::storage] + #[pallet::getter(fn member_count)] + pub(crate) type MemberCount = StorageValue<_, u32, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + pub pool: PoolT, + pub member_count: u32, + } - fn deposit_event() = default; + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { pool: Default::default(), member_count: Default::default() } + } + } + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + let mut pool = self.pool.clone(); + + // reserve balance for each candidate in the pool. + // panicking here is ok, since this just happens one time, pre-genesis. + pool.iter().for_each(|(who, _)| { + T::Currency::reserve(&who, T::CandidateDeposit::get()) + .expect("balance too low to create candidacy"); + >::insert(who, true); + }); + + // Sorts the `Pool` by score in a descending order. Entities which + // have a score of `None` are sorted to the beginning of the vec. + pool.sort_by_key(|(_, maybe_score)| Reverse(maybe_score.unwrap_or_default())); + + >::put(self.member_count); + >::put(&pool); + >::refresh_members(pool, ChangeReceiver::MembershipInitialized); + } + } + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { /// Every `Period` blocks the `Members` set is refreshed from the /// highest scoring members in the pool. fn on_initialize(n: T::BlockNumber) -> Weight { if n % T::Period::get() == Zero::zero() { let pool = >::get(); - >::refresh_members(pool, ChangeReceiver::MembershipChanged); + >::refresh_members(pool, ChangeReceiver::MembershipChanged); } 0 } + } + #[pallet::call] + impl, I: 'static> Pallet { /// Add `origin` to the pool of candidates. /// /// This results in `CandidateDeposit` being reserved from @@ -265,8 +289,8 @@ decl_module! { /// /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. - #[weight = 0] - pub fn submit_candidacy(origin) { + #[pallet::weight(0)] + pub fn submit_candidacy(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!>::contains_key(&who), Error::::AlreadyInPool); @@ -275,11 +299,12 @@ decl_module! { // can be inserted as last element in pool, since entities with // `None` are always sorted to the end. - >::append((who.clone(), Option::<>::Score>::None)); + >::append((who.clone(), Option::<>::Score>::None)); >::insert(&who, true); - Self::deposit_event(RawEvent::CandidateAdded); + Self::deposit_event(Event::::CandidateAdded); + Ok(()) } /// An entity withdraws candidacy and gets its deposit back. @@ -292,18 +317,16 @@ decl_module! { /// /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. - #[weight = 0] - pub fn withdraw_candidacy( - origin, - index: u32 - ) { + #[pallet::weight(0)] + pub fn withdraw_candidacy(origin: OriginFor, index: u32) -> DispatchResult { let who = ensure_signed(origin)?; let pool = >::get(); Self::ensure_index(&pool, &who, index)?; Self::remove_member(pool, who, index)?; - Self::deposit_event(RawEvent::CandidateWithdrew); + Self::deposit_event(Event::::CandidateWithdrew); + Ok(()) } /// Kick a member `who` from the set. @@ -312,12 +335,12 @@ decl_module! { /// /// The `index` parameter of this function must be set to /// the index of `dest` in the `Pool`. - #[weight = 0] + #[pallet::weight(0)] pub fn kick( - origin, + origin: OriginFor, dest: ::Source, - index: u32 - ) { + index: u32, + ) -> DispatchResult { T::KickOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(dest)?; @@ -326,7 +349,8 @@ decl_module! { Self::ensure_index(&pool, &who, index)?; Self::remove_member(pool, who, index)?; - Self::deposit_event(RawEvent::CandidateKicked); + Self::deposit_event(Event::::CandidateKicked); + Ok(()) } /// Score a member `who` with `score`. @@ -335,13 +359,13 @@ decl_module! { /// /// The `index` parameter of this function must be set to /// the index of the `dest` in the `Pool`. - #[weight = 0] + #[pallet::weight(0)] pub fn score( - origin, + origin: OriginFor, dest: ::Source, index: u32, - score: T::Score - ) { + score: T::Score, + ) -> DispatchResult { T::ScoreOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(dest)?; @@ -357,15 +381,15 @@ decl_module! { // where we can insert while maintaining order. let item = (who, Some(score.clone())); let location = pool - .binary_search_by_key( - &Reverse(score), - |(_, maybe_score)| Reverse(maybe_score.unwrap_or_default()) - ) + .binary_search_by_key(&Reverse(score), |(_, maybe_score)| { + Reverse(maybe_score.unwrap_or_default()) + }) .unwrap_or_else(|l| l); pool.insert(location, item); >::put(&pool); - Self::deposit_event(RawEvent::CandidateScored); + Self::deposit_event(Event::::CandidateScored); + Ok(()) } /// Dispatchable call to change `MemberCount`. @@ -374,26 +398,23 @@ decl_module! { /// (this happens each `Period`). /// /// May only be called from root. - #[weight = 0] - pub fn change_member_count(origin, count: u32) { + #[pallet::weight(0)] + pub fn change_member_count(origin: OriginFor, count: u32) -> DispatchResult { ensure_root(origin)?; - >::put(&count); + MemberCount::::put(&count); + Ok(()) } } } -impl, I: Instance> Module { - +impl, I: 'static> Pallet { /// Fetches the `MemberCount` highest scoring members from /// `Pool` and puts them into `Members`. /// /// The `notify` parameter is used to deduct which associated /// type function to invoke at the end of the method. - fn refresh_members( - pool: PoolT, - notify: ChangeReceiver - ) { - let count = >::get(); + fn refresh_members(pool: PoolT, notify: ChangeReceiver) { + let count = MemberCount::::get(); let mut new_members: Vec = pool .into_iter() @@ -410,10 +431,7 @@ impl, I: Instance> Module { ChangeReceiver::MembershipInitialized => T::MembershipInitialized::initialize_members(&new_members), ChangeReceiver::MembershipChanged => - T::MembershipChanged::set_members_sorted( - &new_members[..], - &old_members[..], - ), + T::MembershipChanged::set_members_sorted(&new_members[..], &old_members[..]), } } @@ -424,9 +442,9 @@ impl, I: Instance> Module { fn remove_member( mut pool: PoolT, remove: T::AccountId, - index: u32 + index: u32, ) -> Result<(), Error> { - // all callers of this function in this module also check + // all callers of this function in this pallet also check // the index for validity before calling this function. // nevertheless we check again here, to assert that there was // no mistake when invoking this sensible function. @@ -445,17 +463,13 @@ impl, I: Instance> Module { T::Currency::unreserve(&remove, T::CandidateDeposit::get()); - Self::deposit_event(RawEvent::MemberRemoved); + Self::deposit_event(Event::::MemberRemoved); Ok(()) } /// Checks if `index` is a valid number and if the element found /// at `index` in `Pool` is equal to `who`. - fn ensure_index( - pool: &PoolT, - who: &T::AccountId, - index: u32 - ) -> Result<(), Error> { + fn ensure_index(pool: &PoolT, who: &T::AccountId, index: u32) -> Result<(), Error> { ensure!(index < pool.len() as u32, Error::::InvalidIndex); let (index_who, _index_score) = &pool[index as usize]; diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 59c0dc66cca60..5c5425ae2bdd8 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,69 +18,77 @@ //! Test utilities use super::*; +use crate as pallet_scored_pool; -use std::cell::RefCell; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight, ord_parameter_types}; +use frame_support::{ord_parameter_types, parameter_types, traits::GenesisBuild}; +use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, }; -use frame_system::EnsureSignedBy; +use std::cell::RefCell; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + ScoredPool: pallet_scored_pool::{Pallet, Call, Storage, Config, Event}, + } +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const CandidateDeposit: u64 = 25; pub const Period: u64 = 4; - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const ExistentialDeposit: u64 = 1; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } ord_parameter_types! { pub const KickOrigin: u64 = 2; pub const ScoreOrigin: u64 = 3; } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -114,8 +122,8 @@ impl InitializeMembers for TestChangeMembers { } } -impl Trait for Test { - type Event = (); +impl Config for Test { + type Event = Event; type KickOrigin = EnsureSignedBy; type MembershipInitialized = TestChangeMembers; type MembershipChanged = TestChangeMembers; @@ -126,9 +134,6 @@ impl Trait for Test { type ScoreOrigin = EnsureSignedBy; } -type System = frame_system::Module; -type Balances = pallet_balances::Module; - pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { @@ -141,32 +146,26 @@ pub fn new_test_ext() -> sp_io::TestExternalities { (40, 500_000), (99, 1), ], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::{ - pool: vec![ - (5, None), - (10, Some(1)), - (20, Some(2)), - (31, Some(2)), - (40, Some(3)), - ], + } + .assimilate_storage(&mut t) + .unwrap(); + pallet_scored_pool::GenesisConfig:: { + pool: vec![(5, None), (10, Some(1)), (20, Some(2)), (31, Some(2)), (40, Some(3))], member_count: 2, - .. Default::default() - }.assimilate_storage(&mut t).unwrap(); + ..Default::default() + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } /// Fetch an entity from the pool, if existent. pub fn fetch_from_pool(who: u64) -> Option<(u64, Option)> { - >::pool() - .into_iter() - .find(|item| item.0 == who) + >::pool().into_iter().find(|item| item.0 == who) } /// Find an entity in the pool. /// Returns its position in the `Pool` vec, if existent. pub fn find_in_pool(who: u64) -> Option { - >::pool() - .into_iter() - .position(|item| item.0 == who) + >::pool().into_iter().position(|item| item.0 == who) } diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 44b71bc00ba47..0503e308e76a5 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Tests for the module. +//! Tests for the pallet. use super::*; use mock::*; -use frame_support::{assert_ok, assert_noop, traits::OnInitialize}; +use frame_support::{assert_noop, assert_ok, traits::OnInitialize}; use sp_runtime::traits::BadOrigin; -type ScoredPool = Module; -type System = frame_system::Module; -type Balances = pallet_balances::Module; +type ScoredPool = Pallet; +type System = frame_system::Pallet; +type Balances = pallet_balances::Pallet; #[test] fn query_membership_works() { @@ -142,14 +142,12 @@ fn unscored_entities_must_not_be_used_for_filling_members() { // when // we remove every scored member - ScoredPool::pool() - .into_iter() - .for_each(|(who, score)| { - if let Some(_) = score { - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); - } - }); + ScoredPool::pool().into_iter().for_each(|(who, score)| { + if let Some(_) = score { + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); + } + }); // then // the `None` candidates should not have been filled in @@ -201,7 +199,10 @@ fn withdraw_candidacy_must_only_work_for_members() { new_test_ext().execute_with(|| { let who = 77; let index = 0; - assert_noop!( ScoredPool::withdraw_candidacy(Origin::signed(who), index), Error::::WrongAccountIndex); + assert_noop!( + ScoredPool::withdraw_candidacy(Origin::signed(who), index), + Error::::WrongAccountIndex + ); }); } @@ -210,9 +211,18 @@ fn oob_index_should_abort() { new_test_ext().execute_with(|| { let who = 40; let oob_index = ScoredPool::pool().len() as u32; - assert_noop!(ScoredPool::withdraw_candidacy(Origin::signed(who), oob_index), Error::::InvalidIndex); - assert_noop!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, oob_index, 99), Error::::InvalidIndex); - assert_noop!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, oob_index), Error::::InvalidIndex); + assert_noop!( + ScoredPool::withdraw_candidacy(Origin::signed(who), oob_index), + Error::::InvalidIndex + ); + assert_noop!( + ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, oob_index, 99), + Error::::InvalidIndex + ); + assert_noop!( + ScoredPool::kick(Origin::signed(KickOrigin::get()), who, oob_index), + Error::::InvalidIndex + ); }); } @@ -221,9 +231,18 @@ fn index_mismatches_should_abort() { new_test_ext().execute_with(|| { let who = 40; let index = 3; - assert_noop!(ScoredPool::withdraw_candidacy(Origin::signed(who), index), Error::::WrongAccountIndex); - assert_noop!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99), Error::::WrongAccountIndex); - assert_noop!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index), Error::::WrongAccountIndex); + assert_noop!( + ScoredPool::withdraw_candidacy(Origin::signed(who), index), + Error::::WrongAccountIndex + ); + assert_noop!( + ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99), + Error::::WrongAccountIndex + ); + assert_noop!( + ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index), + Error::::WrongAccountIndex + ); }); } diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index ea3a3d3cdf7fa..8f07de2e7a6db 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-session" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,30 +13,29 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -sp-trie = { version = "2.0.0", optional = true, default-features = false, path = "../../primitives/trie" } -impl-trait-for-tuples = "0.1.3" - -[dev-dependencies] -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } -lazy_static = "1.4.0" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } +sp-trie = { version = "4.0.0-dev", optional = true, default-features = false, path = "../../primitives/trie" } +log = { version = "0.4.0", default-features = false } +impl-trait-for-tuples = "0.2.1" [features] default = ["std", "historical"] historical = ["sp-trie"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-std/std", "sp-io/std", "frame-support/std", @@ -46,4 +45,6 @@ std = [ "sp-staking/std", "pallet-timestamp/std", "sp-trie/std", + "log/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/session/README.md b/frame/session/README.md index 60da8958f73d0..c47b5610de09c 100644 --- a/frame/session/README.md +++ b/frame/session/README.md @@ -3,7 +3,7 @@ The Session module allows validators to manage their session keys, provides a function for changing the session length, and handles session rotation. -- [`session::Trait`](https://docs.rs/pallet-session/latest/pallet_session/trait.Trait.html) +- [`session::Trait`](https://docs.rs/pallet-session/latest/pallet_session/trait.Config.html) - [`Call`](https://docs.rs/pallet-session/latest/pallet_session/enum.Call.html) - [`Module`](https://docs.rs/pallet-session/latest/pallet_session/struct.Module.html) @@ -71,7 +71,7 @@ The [Staking pallet](https://docs.rs/pallet-staking/latest/pallet_staking/) uses ```rust use pallet_session as session; -fn validators() -> Vec<::ValidatorId> { +fn validators() -> Vec<::ValidatorId> { >::validators() } ``` @@ -80,4 +80,4 @@ fn validators() -> Vec<::V - [Staking](https://docs.rs/pallet-staking/latest/pallet_staking/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index dea05934cd872..cc242085bf5e4 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-session-benchmarking" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,24 +13,29 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -frame-system = { version = "2.0.0", default-features = false, path = "../../system" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../benchmarking" } -frame-support = { version = "2.0.0", default-features = false, path = "../../support" } -pallet-staking = { version = "2.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -pallet-session = { version = "2.0.0", default-features = false, path = "../../session" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +pallet-staking = { version = "4.0.0-dev", default-features = false, features = [ + "runtime-benchmarks", +], path = "../../staking" } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } rand = { version = "0.7.2", default-features = false } [dev-dependencies] -serde = { version = "1.0.101" } -codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -pallet-staking-reward-curve = { version = "2.0.0", path = "../../staking/reward-curve" } -sp-io ={ version = "2.0.0", path = "../../../primitives/io" } -pallet-timestamp = { version = "2.0.0", path = "../../timestamp" } -pallet-balances = { version = "2.0.0", path = "../../balances" } +codec = { package = "parity-scale-codec", version = "2.0.0", features = [ + "derive", +] } +scale-info = "1.0" +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } +sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } +pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } +pallet-balances = { version = "4.0.0-dev", path = "../../balances" } +frame-election-provider-support = { version = "4.0.0-dev", path = "../../election-provider-support" } [features] default = ["std"] diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 277200b269569..8b84145c1acfd 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,46 +22,45 @@ mod mock; -use sp_std::prelude::*; -use sp_std::vec; +use sp_std::{prelude::*, vec}; -use frame_benchmarking::benchmarks; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_support::{ codec::Decode, - storage::{StorageValue, StorageMap}, traits::{KeyOwnerProofSystem, OnInitialize}, }; use frame_system::RawOrigin; use pallet_session::{historical::Module as Historical, Module as Session, *}; use pallet_staking::{ benchmarking::create_validator_with_nominators, testing_utils::create_validators, - MAX_NOMINATIONS, RewardDestination, + RewardDestination, }; use sp_runtime::traits::{One, StaticLookup}; const MAX_VALIDATORS: u32 = 1000; -pub struct Module(pallet_session::Module); -pub trait Trait: pallet_session::Trait + pallet_session::historical::Trait + pallet_staking::Trait {} +pub struct Pallet(pallet_session::Module); +pub trait Config: + pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config +{ +} -impl OnInitialize for Module { +impl OnInitialize for Pallet { fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight { pallet_session::Module::::on_initialize(n) } } benchmarks! { - _ { } - set_keys { - let n = MAX_NOMINATIONS as u32; + let n = ::MAX_NOMINATIONS; let (v_stash, _) = create_validator_with_nominators::( n, - MAX_NOMINATIONS as u32, + ::MAX_NOMINATIONS, false, RewardDestination::Staked, )?; - let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; + let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::default(); let proof: Vec = vec![0,1,2,3]; // Whitelist controller account from further DB operations. @@ -70,14 +69,14 @@ benchmarks! { }: _(RawOrigin::Signed(v_controller), keys, proof) purge_keys { - let n = MAX_NOMINATIONS as u32; + let n = ::MAX_NOMINATIONS; let (v_stash, _) = create_validator_with_nominators::( n, - MAX_NOMINATIONS as u32, + ::MAX_NOMINATIONS, false, RewardDestination::Staked )?; - let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; + let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::default(); let proof: Vec = vec![0,1,2,3]; Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; @@ -121,25 +120,17 @@ benchmarks! { /// Sets up the benchmark for checking a membership proof. It creates the given /// number of validators, sets random session keys and then creates a membership /// proof for the first authority and returns its key and the proof. -fn check_membership_proof_setup( +fn check_membership_proof_setup( n: u32, -) -> ( - (sp_runtime::KeyTypeId, &'static [u8; 32]), - sp_session::MembershipProof, -) { - pallet_staking::ValidatorCount::put(n); +) -> ((sp_runtime::KeyTypeId, &'static [u8; 32]), sp_session::MembershipProof) { + pallet_staking::ValidatorCount::::put(n); // create validators and set random session keys - for (n, who) in create_validators::(n, 1000) - .unwrap() - .into_iter() - .enumerate() - { - use rand::RngCore; - use rand::SeedableRng; + for (n, who) in create_validators::(n, 1000).unwrap().into_iter().enumerate() { + use rand::{RngCore, SeedableRng}; let validator = T::Lookup::lookup(who).unwrap(); - let controller = pallet_staking::Module::::bonded(validator).unwrap(); + let controller = pallet_staking::Pallet::::bonded(validator).unwrap(); let keys = { let mut keys = [0u8; 128]; @@ -159,7 +150,7 @@ fn check_membership_proof_setup( Session::::set_keys(RawOrigin::Signed(controller).into(), keys, proof).unwrap(); } - Module::::on_initialize(T::BlockNumber::one()); + Pallet::::on_initialize(T::BlockNumber::one()); // skip sessions until the new validator set is enacted while Session::::validators().len() < n as usize { @@ -171,17 +162,4 @@ fn check_membership_proof_setup( (key, Historical::::prove(key).unwrap()) } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set_keys::()); - assert_ok!(test_benchmark_purge_keys::()); - }); - } -} +impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 6a9cfc5f98a1b..c685db2bb2524 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,34 +19,36 @@ #![cfg(test)] +use frame_election_provider_support::onchain; +use frame_support::parameter_types; use sp_runtime::traits::IdentityLookup; -use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types}; type AccountId = u64; type AccountIndex = u32; type BlockNumber = u64; type Balance = u64; -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Staking = pallet_staking::Module; -type Session = pallet_session::Module; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_staking::Staking, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, } -} - -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; +); -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -56,29 +58,26 @@ impl frame_system::Trait for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = (); + type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnKilledAccount = Balances; + type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -88,13 +87,13 @@ impl pallet_balances::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -115,18 +114,19 @@ impl pallet_session::SessionHandler for TestSessionHandler { _: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)], - ) {} + ) { + } fn on_disabled(_: usize) {} } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; type SessionHandler = TestSessionHandler; - type Event = (); + type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type DisabledValidatorsThreshold = (); @@ -150,19 +150,26 @@ parameter_types! { pub type Extrinsic = sp_runtime::testing::TestXt; -impl frame_system::offchain::SendTransactionTypes for Test where +impl frame_system::offchain::SendTransactionTypes for Test +where Call: From, { type OverarchingCall = Call; type Extrinsic = Extrinsic; } -impl pallet_staking::Trait for Test { +impl onchain::Config for Test { + type Accuracy = sp_runtime::Perbill; + type DataProvider = Staking; +} + +impl pallet_staking::Config for Test { + const MAX_NOMINATIONS: u32 = 16; type Currency = Balances; - type UnixTime = pallet_timestamp::Module; + type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type Event = (); + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -170,19 +177,15 @@ impl pallet_staking::Trait for Test { type SlashCancelOrigin = frame_system::EnsureRoot; type BondingDuration = (); type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type ElectionLookahead = (); - type Call = Call; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = UnsignedPriority; - type MaxIterations = (); - type MinSolutionScoreBump = (); - type OffchainSolutionWeightLimit = (); + type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } -impl crate::Trait for Test {} +impl crate::Config for Test {} pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 20c3d57464c89..c9b13e3c7f262 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,23 +26,30 @@ //! These roots and proofs of inclusion can be generated at any time during the current session. //! Afterwards, the proofs can be fed to a consensus module when reporting misbehavior. -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_runtime::KeyTypeId; -use sp_runtime::traits::{Convert, OpaqueKeys}; +use super::{Module as SessionModule, SessionIndex}; +use codec::{Decode, Encode}; +use frame_support::{ + decl_module, decl_storage, print, + traits::{ValidatorSet, ValidatorSetWithIdentification}, + Parameter, +}; +use sp_runtime::{ + traits::{Convert, OpaqueKeys}, + KeyTypeId, +}; use sp_session::{MembershipProof, ValidatorCount}; -use frame_support::{decl_module, decl_storage}; -use frame_support::{Parameter, print}; -use sp_trie::{MemoryDB, Trie, TrieMut, Recorder, EMPTY_PREFIX}; -use sp_trie::trie_types::{TrieDBMut, TrieDB}; -use super::{SessionIndex, Module as SessionModule}; +use sp_std::prelude::*; +use sp_trie::{ + trie_types::{TrieDB, TrieDBMut}, + MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, +}; -mod shared; pub mod offchain; pub mod onchain; +mod shared; -/// Trait necessary for the historical module. -pub trait Trait: super::Trait { +/// Config necessary for the historical module. +pub trait Config: super::Config { /// Full identification of the validator. type FullIdentification: Parameter; @@ -57,7 +64,7 @@ pub trait Trait: super::Trait { } decl_storage! { - trait Store for Module as Session { + trait Store for Module as Session { /// Mapping from historical session indices to session-data root hash and validator count. HistoricalSessions get(fn historical_root): map hasher(twox_64_concat) SessionIndex => Option<(T::Hash, ValidatorCount)>; @@ -71,10 +78,10 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } -impl Module { +impl Module { /// Prune historical stored session roots up to (but not including) /// `up_to`. pub fn prune_up_to(up_to: SessionIndex) { @@ -102,12 +109,37 @@ impl Module { } } +impl ValidatorSet for Module { + type ValidatorId = T::ValidatorId; + type ValidatorIdOf = T::ValidatorIdOf; + + fn session_index() -> sp_staking::SessionIndex { + super::Module::::current_index() + } + + fn validators() -> Vec { + super::Module::::validators() + } +} + +impl ValidatorSetWithIdentification for Module { + type Identification = T::FullIdentification; + type IdentificationOf = T::FullIdentificationOf; +} + /// Specialization of the crate-level `SessionManager` which returns the set of full identification /// when creating a new session. -pub trait SessionManager: crate::SessionManager { +pub trait SessionManager: + crate::SessionManager +{ /// If there was a validator set change, its returns the set of new validators along with their /// full identifications. fn new_session(new_index: SessionIndex) -> Option>; + fn new_session_genesis( + new_index: SessionIndex, + ) -> Option> { + >::new_session(new_index) + } fn start_session(start_index: SessionIndex); fn end_session(end_index: SessionIndex); } @@ -116,19 +148,20 @@ pub trait SessionManager: crate::SessionManager /// sets the historical trie root of the ending session. pub struct NoteHistoricalRoot(sp_std::marker::PhantomData<(T, I)>); -impl crate::SessionManager for NoteHistoricalRoot - where I: SessionManager -{ - fn new_session(new_index: SessionIndex) -> Option> { - +impl> NoteHistoricalRoot { + fn do_new_session(new_index: SessionIndex, is_genesis: bool) -> Option> { StoredRange::mutate(|range| { range.get_or_insert_with(|| (new_index, new_index)).1 = new_index + 1; }); - let new_validators_and_id = >::new_session(new_index); - let new_validators = new_validators_and_id.as_ref().map(|new_validators| { - new_validators.iter().map(|(v, _id)| v.clone()).collect() - }); + let new_validators_and_id = if is_genesis { + >::new_session_genesis(new_index) + } else { + >::new_session(new_index) + }; + let new_validators_opt = new_validators_and_id + .as_ref() + .map(|new_validators| new_validators.iter().map(|(v, _id)| v.clone()).collect()); if let Some(new_validators) = new_validators_and_id { let count = new_validators.len() as ValidatorCount; @@ -137,7 +170,7 @@ impl crate::SessionManager for NoteHistoricalRoot { print("Failed to generate historical ancestry-inclusion proof."); print(reason); - } + }, }; } else { let previous_index = new_index.saturating_sub(1); @@ -146,7 +179,20 @@ impl crate::SessionManager for NoteHistoricalRoot crate::SessionManager for NoteHistoricalRoot +where + I: SessionManager, +{ + fn new_session(new_index: SessionIndex) -> Option> { + Self::do_new_session(new_index, false) + } + + fn new_session_genesis(new_index: SessionIndex) -> Option> { + Self::do_new_session(new_index, true) } fn start_session(start_index: SessionIndex) { @@ -160,17 +206,19 @@ impl crate::SessionManager for NoteHistoricalRoot = (::ValidatorId, ::FullIdentification); +pub type IdentificationTuple = + (::ValidatorId, ::FullIdentification); /// A trie instance for checking and generating proofs. -pub struct ProvingTrie { +pub struct ProvingTrie { db: MemoryDB, root: T::Hash, } -impl ProvingTrie { +impl ProvingTrie { fn generate_for(validators: I) -> Result - where I: IntoIterator + where + I: IntoIterator, { let mut db = MemoryDB::default(); let mut root = Default::default(); @@ -189,23 +237,20 @@ impl ProvingTrie { // map each key to the owner index. for key_id in T::Keys::key_ids() { let key = keys.get_raw(*key_id); - let res = (key_id, key).using_encoded(|k| - i.using_encoded(|v| trie.insert(k, v)) - ); + let res = + (key_id, key).using_encoded(|k| i.using_encoded(|v| trie.insert(k, v))); let _ = res.map_err(|_| "failed to insert into trie")?; } // map each owner index to the full identification. - let _ = i.using_encoded(|k| full_id.using_encoded(|v| trie.insert(k, v))) + let _ = i + .using_encoded(|k| full_id.using_encoded(|v| trie.insert(k, v))) .map_err(|_| "failed to insert into trie")?; } } - Ok(ProvingTrie { - db, - root, - }) + Ok(ProvingTrie { db, root }) } fn from_nodes(root: T::Hash, nodes: &[Vec]) -> Self { @@ -216,10 +261,7 @@ impl ProvingTrie { HashDBT::insert(&mut memory_db, EMPTY_PREFIX, &node[..]); } - ProvingTrie { - db: memory_db, - root, - } + ProvingTrie { db: memory_db, root } } /// Prove the full verification data for a given key and key ID. @@ -250,17 +292,19 @@ impl ProvingTrie { // nodes within the current `MemoryDB` are insufficient to query the item. fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { let trie = TrieDB::new(&self.db, &self.root).ok()?; - let val_idx = (key_id, key_data).using_encoded(|s| trie.get(s)) + let val_idx = (key_id, key_data) + .using_encoded(|s| trie.get(s)) .ok()? .and_then(|raw| u32::decode(&mut &*raw).ok())?; - val_idx.using_encoded(|s| trie.get(s)) + val_idx + .using_encoded(|s| trie.get(s)) .ok()? .and_then(|raw| >::decode(&mut &*raw).ok()) } } -impl> frame_support::traits::KeyOwnerProofSystem<(KeyTypeId, D)> +impl> frame_support::traits::KeyOwnerProofSystem<(KeyTypeId, D)> for Module { type Proof = MembershipProof; @@ -281,12 +325,11 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyTy let trie = ProvingTrie::::generate_for(validators).ok()?; let (id, data) = key; - trie.prove(id, data.as_ref()) - .map(|trie_nodes| MembershipProof { - session, - trie_nodes, - validator_count: count, - }) + trie.prove(id, data.as_ref()).map(|trie_nodes| MembershipProof { + session, + trie_nodes, + validator_count: count, + }) } fn check_proof(key: (KeyTypeId, D), proof: Self::Proof) -> Option> { @@ -298,7 +341,7 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyTy let count = >::validators().len() as ValidatorCount; if count != proof.validator_count { - return None; + return None } Some((owner, id)) @@ -308,7 +351,7 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyTy let (root, count) = >::get(&proof.session)?; if count != proof.validator_count { - return None; + return None } let trie = ProvingTrie::::from_nodes(root, &proof.trie_nodes); @@ -320,23 +363,28 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyTy #[cfg(test)] pub(crate) mod tests { use super::*; - use sp_runtime::key_types::DUMMY; - use sp_runtime::testing::UintAuthorityId; use crate::mock::{ - NEXT_VALIDATORS, force_new_session, - set_next_validators, Test, System, Session, + force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS, }; - use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; + use frame_support::{ + traits::{KeyOwnerProofSystem, OnInitialize}, + BasicExternalities, + }; + use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId}; type Historical = Module; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - crate::GenesisConfig:: { - keys: NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ), - }.assimilate_storage(&mut t).unwrap(); + let keys: Vec<_> = NEXT_VALIDATORS.with(|l| { + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() + }); + BasicExternalities::execute_with_storage(&mut t, || { + for (ref k, ..) in &keys { + frame_system::Pallet::::inc_providers(k); + } + }); + crate::GenesisConfig:: { keys }.assimilate_storage(&mut t).unwrap(); sp_io::TestExternalities::new(t) } @@ -384,7 +432,6 @@ pub(crate) mod tests { System::set_block_number(i); Session::on_initialize(i); - } assert_eq!(StoredRange::get(), Some((0, 100))); @@ -415,7 +462,6 @@ pub(crate) mod tests { System::set_block_number(i); Session::on_initialize(i); - } assert_eq!(StoredRange::get(), Some((100, 200))); diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index 97655d1a18b32..8583c2bb439be 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,22 +25,26 @@ //! This is used in conjunction with [`ProvingTrie`](super::ProvingTrie) and //! the off-chain indexing API. -use sp_runtime::{offchain::storage::StorageValueRef, KeyTypeId}; +use sp_runtime::{ + offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, + KeyTypeId, +}; use sp_session::MembershipProof; -use super::super::{Module as SessionModule, SessionIndex}; -use super::{IdentificationTuple, ProvingTrie, Trait}; +use super::{ + super::{Pallet as SessionModule, SessionIndex}, + Config, IdentificationTuple, ProvingTrie, +}; use super::shared; use sp_std::prelude::*; - /// A set of validators, which was used for a fixed session index. -struct ValidatorSet { +struct ValidatorSet { validator_set: Vec>, } -impl ValidatorSet { +impl ValidatorSet { /// Load the set of validators for a particular session index from the off-chain storage. /// /// If none is found or decodable given `prefix` and `session`, it will return `None`. @@ -49,6 +53,7 @@ impl ValidatorSet { let derived_key = shared::derive_key(shared::PREFIX, session_index); StorageValueRef::persistent(derived_key.as_ref()) .get::>() + .ok() .flatten() .map(|validator_set| Self { validator_set }) } @@ -61,7 +66,7 @@ impl ValidatorSet { /// Implement conversion into iterator for usage /// with [ProvingTrie](super::ProvingTrie::generate_for). -impl sp_std::iter::IntoIterator for ValidatorSet { +impl sp_std::iter::IntoIterator for ValidatorSet { type Item = (T::ValidatorId, T::FullIdentification); type IntoIter = sp_std::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { @@ -74,7 +79,7 @@ impl sp_std::iter::IntoIterator for ValidatorSet { /// Based on the yielded `MembershipProof` the implementer may decide what /// to do, i.e. in case of a failed proof, enqueue a transaction back on /// chain reflecting that, with all its consequences such as i.e. slashing. -pub fn prove_session_membership>( +pub fn prove_session_membership>( session_index: SessionIndex, session_key: (KeyTypeId, D), ) -> Option { @@ -83,36 +88,36 @@ pub fn prove_session_membership>( let trie = ProvingTrie::::generate_for(validators.into_iter()).ok()?; let (id, data) = session_key; - trie.prove(id, data.as_ref()) - .map(|trie_nodes| MembershipProof { - session: session_index, - trie_nodes, - validator_count: count, - }) + trie.prove(id, data.as_ref()).map(|trie_nodes| MembershipProof { + session: session_index, + trie_nodes, + validator_count: count, + }) } - /// Attempt to prune anything that is older than `first_to_keep` session index. /// /// Due to re-organisation it could be that the `first_to_keep` might be less /// than the stored one, in which case the conservative choice is made to keep records /// up to the one that is the lesser. -pub fn prune_older_than(first_to_keep: SessionIndex) { +pub fn prune_older_than(first_to_keep: SessionIndex) { let derived_key = shared::LAST_PRUNE.to_vec(); let entry = StorageValueRef::persistent(derived_key.as_ref()); - match entry.mutate(|current: Option>| -> Result<_, ()> { - match current { - Some(Some(current)) if current < first_to_keep => Ok(first_to_keep), - // do not move the cursor, if the new one would be behind ours - Some(Some(current)) => Ok(current), - None => Ok(first_to_keep), - // if the storage contains undecodable data, overwrite with current anyways - // which might leak some entries being never purged, but that is acceptable - // in this context - Some(None) => Ok(first_to_keep), - } - }) { - Ok(Ok(new_value)) => { + match entry.mutate( + |current: Result, StorageRetrievalError>| -> Result<_, ()> { + match current { + Ok(Some(current)) if current < first_to_keep => Ok(first_to_keep), + // do not move the cursor, if the new one would be behind ours + Ok(Some(current)) => Ok(current), + Ok(None) => Ok(first_to_keep), + // if the storage contains undecodable data, overwrite with current anyways + // which might leak some entries being never purged, but that is acceptable + // in this context + Err(_) => Ok(first_to_keep), + } + }, + ) { + Ok(new_value) => { // on a re-org this is not necessarily true, with the above they might be equal if new_value < first_to_keep { for session_index in new_value..first_to_keep { @@ -120,14 +125,14 @@ pub fn prune_older_than(first_to_keep: SessionIndex) { let _ = StorageValueRef::persistent(derived_key.as_ref()).clear(); } } - } - Ok(Err(_)) => {} // failed to store the value calculated with the given closure - Err(_) => {} // failed to calculate the value to store with the given closure + }, + Err(MutateStorageError::ConcurrentModification(_)) => {}, + Err(MutateStorageError::ValueFunctionFailed(_)) => {}, } } /// Keep the newest `n` items, and prune all items older than that. -pub fn keep_newest(n_to_keep: usize) { +pub fn keep_newest(n_to_keep: usize) { let session_index = >::current_index(); let n_to_keep = n_to_keep as SessionIndex; if n_to_keep < session_index { @@ -137,43 +142,42 @@ pub fn keep_newest(n_to_keep: usize) { #[cfg(test)] mod tests { - use super::super::{onchain, Module}; - use super::*; + use super::{ + super::{onchain, Module}, + *, + }; use crate::mock::{ force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS, }; use codec::Encode; use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; - use sp_core::crypto::key_types::DUMMY; - use sp_core::offchain::{ - testing::TestOffchainExt, - OffchainExt, - StorageKind, + use sp_core::{ + crypto::key_types::DUMMY, + offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt, StorageKind}, }; + use frame_support::BasicExternalities; use sp_runtime::testing::UintAuthorityId; type Historical = Module; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext = frame_system::GenesisConfig::default() + let mut t = frame_system::GenesisConfig::default() .build_storage::() .expect("Failed to create test externalities."); - crate::GenesisConfig:: { - keys: NEXT_VALIDATORS.with(|l| { - l.borrow() - .iter() - .cloned() - .map(|i| (i, i, UintAuthorityId(i).into())) - .collect() - }), - } - .assimilate_storage(&mut ext) - .unwrap(); + let keys: Vec<_> = NEXT_VALIDATORS.with(|l| { + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() + }); + BasicExternalities::execute_with_storage(&mut t, || { + for (ref k, ..) in &keys { + frame_system::Pallet::::inc_providers(k); + } + }); + crate::GenesisConfig:: { keys }.assimilate_storage(&mut t).unwrap(); - let mut ext = sp_io::TestExternalities::new(ext); + let mut ext = sp_io::TestExternalities::new(t); let (offchain, offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); @@ -182,19 +186,20 @@ mod tests { seed[0..4].copy_from_slice(&ITERATIONS.to_le_bytes()); offchain_state.write().seed = seed; - ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); ext } #[test] fn encode_decode_roundtrip() { + use super::super::{super::Config as SessionConfig, Config as HistoricalConfig}; use codec::{Decode, Encode}; - use super::super::super::Trait as SessionTrait; - use super::super::Trait as HistoricalTrait; let sample = ( - 22u32 as ::ValidatorId, - 7_777_777 as ::FullIdentification); + 22u32 as ::ValidatorId, + 7_777_777 as ::FullIdentification, + ); let encoded = sample.encode(); let decoded = Decode::decode(&mut encoded.as_slice()).expect("Must decode"); @@ -205,7 +210,7 @@ mod tests { fn onchain_to_offchain() { let mut ext = new_test_ext(); - const DATA: &[u8] = &[7,8,9,10,11]; + const DATA: &[u8] = &[7, 8, 9, 10, 11]; ext.execute_with(|| { b"alphaomega"[..].using_encoded(|key| sp_io::offchain_index::set(key, DATA)); }); @@ -213,15 +218,13 @@ mod tests { ext.persist_offchain_overlay(); ext.execute_with(|| { - let data = - b"alphaomega"[..].using_encoded(|key| { + let data = b"alphaomega"[..].using_encoded(|key| { sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, key) }); assert_eq!(data, Some(DATA.to_vec())); }); } - #[test] fn historical_proof_offchain() { let mut ext = new_test_ext(); @@ -246,8 +249,6 @@ mod tests { ext.persist_offchain_overlay(); ext.execute_with(|| { - - System::set_block_number(2); Session::on_initialize(2); assert_eq!(>::current_index(), 2); diff --git a/frame/session/src/historical/onchain.rs b/frame/session/src/historical/onchain.rs index 745603a49829b..514e343f4e0f6 100644 --- a/frame/session/src/historical/onchain.rs +++ b/frame/session/src/historical/onchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,9 +20,10 @@ use codec::Encode; use sp_runtime::traits::Convert; -use super::super::Trait as SessionTrait; -use super::super::{Module as SessionModule, SessionIndex}; -use super::Trait as HistoricalTrait; +use super::{ + super::{Config as SessionConfig, Pallet as SessionModule, SessionIndex}, + Config as HistoricalConfig, +}; use super::shared; use sp_std::prelude::*; @@ -35,14 +36,14 @@ use sp_std::prelude::*; /// `on_initialize(..)` or `on_finalization(..)`. /// **Must** be called during the session, which validator-set is to be stored for further /// off-chain processing. Otherwise the `FullIdentification` might not be available. -pub fn store_session_validator_set_to_offchain( +pub fn store_session_validator_set_to_offchain( session_index: SessionIndex, ) { let encoded_validator_list = >::validators() .into_iter() - .filter_map(|validator_id: ::ValidatorId| { + .filter_map(|validator_id: ::ValidatorId| { let full_identification = - <::FullIdentificationOf>::convert(validator_id.clone()); + <::FullIdentificationOf>::convert(validator_id.clone()); full_identification.map(|full_identification| (validator_id, full_identification)) }) .collect::>(); @@ -55,8 +56,8 @@ pub fn store_session_validator_set_to_offchain() { +pub fn store_current_session_validator_set_to_offchain() { store_session_validator_set_to_offchain::(>::current_index()); } diff --git a/frame/session/src/historical/shared.rs b/frame/session/src/historical/shared.rs index fda0361b05959..e801aa80eef4c 100644 --- a/frame/session/src/historical/shared.rs +++ b/frame/session/src/historical/shared.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,10 +18,9 @@ //! Shared logic between on-chain and off-chain components used for slashing using an off-chain //! worker. - use super::SessionIndex; -use sp_std::prelude::*; use codec::Encode; +use sp_std::prelude::*; pub(super) const PREFIX: &[u8] = b"session_historical"; pub(super) const LAST_PRUNE: &[u8] = b"session_historical_last_prune"; @@ -30,10 +29,11 @@ pub(super) const LAST_PRUNE: &[u8] = b"session_historical_last_prune"; pub(super) fn derive_key>(prefix: P, session_index: SessionIndex) -> Vec { let prefix: &[u8] = prefix.as_ref(); session_index.using_encoded(|encoded_session_index| { - prefix.into_iter() + prefix + .into_iter() .chain(b"/".into_iter()) .chain(encoded_session_index.into_iter()) .copied() .collect::>() }) -} \ No newline at end of file +} diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 1d81f38bdf87b..3f5d853d4fa21 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,46 +17,51 @@ //! # Session Module //! -//! The Session module allows validators to manage their session keys, provides a function for changing -//! the session length, and handles session rotation. +//! The Session module allows validators to manage their session keys, provides a function for +//! changing the session length, and handles session rotation. //! -//! - [`session::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Module`] //! //! ## Overview //! //! ### Terminology //! //! -//! - **Session:** A session is a period of time that has a constant set of validators. Validators can only join -//! or exit the validator set at a session change. It is measured in block numbers. The block where a session is -//! ended is determined by the `ShouldEndSession` trait. When the session is ending, a new validator set -//! can be chosen by `OnSessionEnding` implementations. -//! - **Session key:** A session key is actually several keys kept together that provide the various signing -//! functions required by network authorities/validators in pursuit of their duties. -//! - **Validator ID:** Every account has an associated validator ID. For some simple staking systems, this -//! may just be the same as the account ID. For staking systems using a stash/controller model, -//! the validator ID would be the stash account ID of the controller. +//! - **Session:** A session is a period of time that has a constant set of validators. Validators +//! can only join or exit the validator set at a session change. It is measured in block numbers. +//! The block where a session is ended is determined by the `ShouldEndSession` trait. When the +//! session is ending, a new validator set can be chosen by `OnSessionEnding` implementations. +//! +//! - **Session key:** A session key is actually several keys kept together that provide the various +//! signing functions required by network authorities/validators in pursuit of their duties. +//! - **Validator ID:** Every account has an associated validator ID. For some simple staking +//! systems, this may just be the same as the account ID. For staking systems using a +//! stash/controller model, the validator ID would be the stash account ID of the controller. +//! //! - **Session key configuration process:** Session keys are set using `set_keys` for use not in -//! the next session, but the session after next. They are stored in `NextKeys`, a mapping between -//! the caller's `ValidatorId` and the session keys provided. `set_keys` allows users to set their -//! session key prior to being selected as validator. -//! It is a public call since it uses `ensure_signed`, which checks that the origin is a signed account. -//! As such, the account ID of the origin stored in `NextKeys` may not necessarily be associated with -//! a block author or a validator. The session keys of accounts are removed once their account balance is zero. +//! the next session, but the session after next. They are stored in `NextKeys`, a mapping between +//! the caller's `ValidatorId` and the session keys provided. `set_keys` allows users to set their +//! session key prior to being selected as validator. It is a public call since it uses +//! `ensure_signed`, which checks that the origin is a signed account. As such, the account ID of +//! the origin stored in `NextKeys` may not necessarily be associated with a block author or a +//! validator. The session keys of accounts are removed once their account balance is zero. +//! //! - **Session length:** This pallet does not assume anything about the length of each session. -//! Rather, it relies on an implementation of `ShouldEndSession` to dictate a new session's start. -//! This pallet provides the `PeriodicSessions` struct for simple periodic sessions. -//! - **Session rotation configuration:** Configure as either a 'normal' (rewardable session where rewards are -//! applied) or 'exceptional' (slashable) session rotation. +//! Rather, it relies on an implementation of `ShouldEndSession` to dictate a new session's start. +//! This pallet provides the `PeriodicSessions` struct for simple periodic sessions. +//! +//! - **Session rotation configuration:** Configure as either a 'normal' (rewardable session where +//! rewards are applied) or 'exceptional' (slashable) session rotation. +//! //! - **Session rotation process:** At the beginning of each block, the `on_initialize` function -//! queries the provided implementation of `ShouldEndSession`. If the session is to end the newly -//! activated validator IDs and session keys are taken from storage and passed to the -//! `SessionHandler`. The validator set supplied by `SessionManager::new_session` and the corresponding session -//! keys, which may have been registered via `set_keys` during the previous session, are written -//! to storage where they will wait one session before being passed to the `SessionHandler` -//! themselves. +//! queries the provided implementation of `ShouldEndSession`. If the session is to end the newly +//! activated validator IDs and session keys are taken from storage and passed to the +//! `SessionHandler`. The validator set supplied by `SessionManager::new_session` and the +//! corresponding session keys, which may have been registered via `set_keys` during the previous +//! session, are written to storage where they will wait one session before being passed to the +//! `SessionHandler` themselves. //! //! ### Goals //! @@ -75,7 +80,7 @@ //! ### Public Functions //! //! - `rotate_session` - Change to the next session. Register the new authority set. Queue changes -//! for next session rotation. +//! for next session rotation. //! - `disable_index` - Disable a validator by index. //! - `disable` - Disable a validator by Validator ID //! @@ -83,13 +88,14 @@ //! //! ### Example from the FRAME //! -//! The [Staking pallet](../pallet_staking/index.html) uses the Session pallet to get the validator set. +//! The [Staking pallet](../pallet_staking/index.html) uses the Session pallet to get the validator +//! set. //! //! ``` //! use pallet_session as session; //! -//! fn validators() -> Vec<::ValidatorId> { -//! >::validators() +//! fn validators() -> Vec<::ValidatorId> { +//! >::validators() //! } //! # fn main(){} //! ``` @@ -100,30 +106,38 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, marker::PhantomData, ops::{Sub, Rem}}; +#[cfg(feature = "historical")] +pub mod historical; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +pub mod weights; + use codec::Decode; -use sp_runtime::{KeyTypeId, Perbill, RuntimeAppPublic, BoundToRuntimeAppPublic}; -use sp_runtime::traits::{Convert, Zero, Member, OpaqueKeys, Saturating}; -use sp_staking::SessionIndex; use frame_support::{ - ensure, decl_module, decl_event, decl_storage, decl_error, ConsensusEngineId, Parameter, + decl_error, decl_event, decl_module, decl_storage, + dispatch::{self, DispatchError, DispatchResult}, + ensure, traits::{ - Get, FindAuthor, ValidatorRegistration, EstimateNextSessionRotation, EstimateNextNewSession, + EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, OneSessionHandler, + ValidatorRegistration, ValidatorSet, }, - dispatch::{self, DispatchResult, DispatchError}, weights::Weight, + ConsensusEngineId, Parameter, }; use frame_system::ensure_signed; - -#[cfg(test)] -mod mock; -#[cfg(test)] -mod tests; - -#[cfg(feature = "historical")] -pub mod historical; - -mod default_weights; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, Convert, Member, One, OpaqueKeys, Zero}, + KeyTypeId, Perbill, Permill, RuntimeAppPublic, +}; +use sp_staking::SessionIndex; +use sp_std::{ + marker::PhantomData, + ops::{Rem, Sub}, + prelude::*, +}; +pub use weights::WeightInfo; /// Decides whether the session should be ended. pub trait ShouldEndSession { @@ -136,16 +150,14 @@ pub trait ShouldEndSession { /// The first session will have length of `Offset`, and /// the following sessions will have length of `Period`. /// This may prove nonsensical if `Offset` >= `Period`. -pub struct PeriodicSessions< - Period, - Offset, ->(PhantomData<(Period, Offset)>); +pub struct PeriodicSessions(PhantomData<(Period, Offset)>); impl< - BlockNumber: Rem + Sub + Zero + PartialOrd, - Period: Get, - Offset: Get, -> ShouldEndSession for PeriodicSessions { + BlockNumber: Rem + Sub + Zero + PartialOrd, + Period: Get, + Offset: Get, + > ShouldEndSession for PeriodicSessions +{ fn should_end_session(now: BlockNumber) -> bool { let offset = Offset::get(); now >= offset && ((now - offset) % Period::get()).is_zero() @@ -153,33 +165,60 @@ impl< } impl< - BlockNumber: Rem + Sub + Zero + PartialOrd + Saturating + Clone, - Period: Get, - Offset: Get, -> EstimateNextSessionRotation for PeriodicSessions { - fn estimate_next_session_rotation(now: BlockNumber) -> Option { + BlockNumber: AtLeast32BitUnsigned + Clone, + Period: Get, + Offset: Get, + > EstimateNextSessionRotation for PeriodicSessions +{ + fn average_session_length() -> BlockNumber { + Period::get() + } + + fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight) { let offset = Offset::get(); let period = Period::get(); - Some(if now > offset { + + // NOTE: we add one since we assume that the current block has already elapsed, + // i.e. when evaluating the last block in the session the progress should be 100% + // (0% is never returned). + let progress = if now >= offset { + let current = (now - offset) % period.clone() + One::one(); + Some(Permill::from_rational(current.clone(), period.clone())) + } else { + Some(Permill::from_rational(now + One::one(), offset)) + }; + + // Weight note: `estimate_current_session_progress` has no storage reads and trivial + // computational overhead. There should be no risk to the chain having this weight value be + // zero for now. However, this value of zero was not properly calculated, and so it would be + // reasonable to come back here and properly calculate the weight of this function. + (progress, Zero::zero()) + } + + fn estimate_next_session_rotation(now: BlockNumber) -> (Option, Weight) { + let offset = Offset::get(); + let period = Period::get(); + + let next_session = if now > offset { let block_after_last_session = (now.clone() - offset) % period.clone(); if block_after_last_session > Zero::zero() { - now.saturating_add( - period.saturating_sub(block_after_last_session) - ) + now.saturating_add(period.saturating_sub(block_after_last_session)) } else { - Zero::zero() + // this branch happens when the session is already rotated or will rotate in this + // block (depending on being called before or after `session::on_initialize`). Here, + // we assume the latter, namely that this is called after `session::on_initialize`, + // and thus we add period to it as well. + now + period } } else { offset - }) - } + }; - fn weight(_now: BlockNumber) -> Weight { - // Weight note: `estimate_next_session_rotation` has no storage reads and trivial computational overhead. - // There should be no risk to the chain having this weight value be zero for now. - // However, this value of zero was not properly calculated, and so it would be reasonable - // to come back here and properly calculate the weight of this function. - 0 + // Weight note: `estimate_next_session_rotation` has no storage reads and trivial + // computational overhead. There should be no risk to the chain having this weight value be + // zero for now. However, this value of zero was not properly calculated, and so it would be + // reasonable to come back here and properly calculate the weight of this function. + (Some(next_session), Zero::zero()) } } @@ -187,31 +226,40 @@ impl< pub trait SessionManager { /// Plan a new session, and optionally provide the new validator set. /// - /// Even if the validator-set is the same as before, if any underlying economic - /// conditions have changed (i.e. stake-weights), the new validator set must be returned. - /// This is necessary for consensus engines making use of the session module to - /// issue a validator-set change so misbehavior can be provably associated with the new - /// economic conditions as opposed to the old. - /// The returned validator set, if any, will not be applied until `new_index`. - /// `new_index` is strictly greater than from previous call. + /// Even if the validator-set is the same as before, if any underlying economic conditions have + /// changed (i.e. stake-weights), the new validator set must be returned. This is necessary for + /// consensus engines making use of the session module to issue a validator-set change so + /// misbehavior can be provably associated with the new economic conditions as opposed to the + /// old. The returned validator set, if any, will not be applied until `new_index`. `new_index` + /// is strictly greater than from previous call. /// /// The first session start at index 0. /// - /// `new_session(session)` is guaranteed to be called before `end_session(session-1)`. + /// `new_session(session)` is guaranteed to be called before `end_session(session-1)`. In other + /// words, a new session must always be planned before an ongoing one can be finished. fn new_session(new_index: SessionIndex) -> Option>; + /// Same as `new_session`, but it this should only be called at genesis. + /// + /// The session manager might decide to treat this in a different way. Default impl is simply + /// using [`new_session`](Self::new_session). + fn new_session_genesis(new_index: SessionIndex) -> Option> { + Self::new_session(new_index) + } /// End the session. /// /// Because the session pallet can queue validator set the ending session can be lower than the /// last new session index. fn end_session(end_index: SessionIndex); - /// Start the session. + /// Start an already planned session. /// - /// The session start to be used for validation + /// The session start to be used for validation. fn start_session(start_index: SessionIndex); } impl SessionManager for () { - fn new_session(_: SessionIndex) -> Option> { None } + fn new_session(_: SessionIndex) -> Option> { + None + } fn start_session(_: SessionIndex) {} fn end_session(_: SessionIndex) {} } @@ -221,7 +269,8 @@ pub trait SessionHandler { /// All the key type ids this session handler can process. /// /// The order must be the same as it expects them in - /// [`on_new_session`](Self::on_new_session) and [`on_genesis_session`](Self::on_genesis_session). + /// [`on_new_session`](Self::on_new_session) and + /// [`on_genesis_session`](Self::on_genesis_session). const KEY_TYPE_IDS: &'static [KeyTypeId]; /// The given validator set will be used for the genesis session. @@ -243,7 +292,7 @@ pub trait SessionHandler { /// A notification for end of the session. /// - /// Note it is triggered before any `SessionManager::end_session` handlers, + /// Note it is triggered before any [`SessionManager::end_session`] handlers, /// so we can still affect the validator set. fn on_before_session_ending() {} @@ -251,45 +300,9 @@ pub trait SessionHandler { fn on_disabled(validator_index: usize); } -/// A session handler for specific key type. -pub trait OneSessionHandler: BoundToRuntimeAppPublic { - /// The key type expected. - type Key: Decode + Default + RuntimeAppPublic; - - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator, ValidatorId: 'a; - - /// Session set has changed; act appropriately. Note that this can be called - /// before initialization of your module. - /// - /// `changed` is true when at least one of the session keys - /// or the underlying economic identities/distribution behind one the - /// session keys has changed, false otherwise. - /// - /// The `validators` are the validators of the incoming session, and `queued_validators` - /// will follow. - fn on_new_session<'a, I: 'a>( - changed: bool, - validators: I, - queued_validators: I, - ) where I: Iterator, ValidatorId: 'a; - - - /// A notification for end of the session. - /// - /// Note it is triggered before any `SessionManager::end_session` handlers, - /// so we can still affect the validator set. - fn on_before_session_ending() {} - - /// A validator got disabled. Act accordingly until a new session begins. - fn on_disabled(_validator_index: usize); -} - #[impl_trait_for_tuples::impl_for_tuples(1, 30)] -#[tuple_types_no_default_trait_bound] +#[tuple_types_custom_trait_bound(OneSessionHandler)] impl SessionHandler for Tuple { - for_tuples!( where #( Tuple: OneSessionHandler )* ); - for_tuples!( const KEY_TYPE_IDS: &'static [KeyTypeId] = &[ #( ::ID ),* ]; ); @@ -337,30 +350,21 @@ impl SessionHandler for Tuple { pub struct TestSessionHandler; impl SessionHandler for TestSessionHandler { const KEY_TYPE_IDS: &'static [KeyTypeId] = &[sp_runtime::key_types::DUMMY]; - fn on_genesis_session(_: &[(AId, Ks)]) {} - fn on_new_session(_: bool, _: &[(AId, Ks)], _: &[(AId, Ks)]) {} - fn on_before_session_ending() {} - fn on_disabled(_: usize) {} } -impl ValidatorRegistration for Module { +impl ValidatorRegistration for Module { fn is_registered(id: &T::ValidatorId) -> bool { Self::load_keys(id).is_some() } } -pub trait WeightInfo { - fn set_keys() -> Weight; - fn purge_keys() -> Weight; -} - -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From + Into<::Event>; /// A stable ID for a validator. type ValidatorId: Member + Parameter; @@ -398,7 +402,7 @@ pub trait Trait: frame_system::Trait { } decl_storage! { - trait Store for Module as Session { + trait Store for Module as Session { /// The current set of validators. Validators get(fn validators): Vec; @@ -444,10 +448,16 @@ decl_storage! { for (account, val, keys) in config.keys.iter().cloned() { >::inner_set_keys(&val, keys) .expect("genesis config must not contain duplicates; qed"); - frame_system::Module::::inc_ref(&account); + if frame_system::Pallet::::inc_consumers(&account).is_err() { + // This will leak a provider reference, however it only happens once (at + // genesis) so it's really not a big deal and we assume that the user wants to + // do this since it's the only way a non-endowed account can contain a session + // key. + frame_system::Pallet::::inc_providers(&account); + } } - let initial_validators_0 = T::SessionManager::new_session(0) + let initial_validators_0 = T::SessionManager::new_session_genesis(0) .unwrap_or_else(|| { frame_support::print("No initial validator provided by `SessionManager`, use \ session config keys to generate initial validator set."); @@ -455,7 +465,7 @@ decl_storage! { }); assert!(!initial_validators_0.is_empty(), "Empty validator set for session 0 in genesis block!"); - let initial_validators_1 = T::SessionManager::new_session(1) + let initial_validators_1 = T::SessionManager::new_session_genesis(1) .unwrap_or_else(|| initial_validators_0.clone()); assert!(!initial_validators_1.is_empty(), "Empty validator set for session 1 in genesis block!"); @@ -481,15 +491,15 @@ decl_storage! { decl_event!( pub enum Event { - /// New session has happened. Note that the argument is the \[session_index\], not the block - /// number as the type might suggest. + /// New session has happened. Note that the argument is the \[session_index\], not the + /// block number as the type might suggest. NewSession(SessionIndex), } ); decl_error! { /// Error for the session module. - pub enum Error for Module { + pub enum Error for Module { /// Invalid ownership proof. InvalidProof, /// No associated validator ID for account. @@ -498,11 +508,13 @@ decl_error! { DuplicatedKey, /// No keys are associated with this account. NoKeys, + /// Key setting account is not live, so it's impossible to associate keys. + NoAccount, } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -542,7 +554,7 @@ decl_module! { /// Actual cost depends on the number of length of `T::Keys::key_ids()` which is fixed. /// - DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account` /// - DbWrites: `NextKeys`, `origin account` - /// - DbWrites per key id: `KeyOwnder` + /// - DbWrites per key id: `KeyOwner` /// # #[weight = T::WeightInfo::purge_keys()] pub fn purge_keys(origin) { @@ -555,7 +567,7 @@ decl_module! { fn on_initialize(n: T::BlockNumber) -> Weight { if T::ShouldEndSession::should_end_session(n) { Self::rotate_session(); - T::MaximumBlockWeight::get() + T::BlockWeights::get().max_block } else { // NOTE: the non-database part of the weight for `should_end_session(n)` is // included as weight for empty block, the database part is expected to be in @@ -566,25 +578,24 @@ decl_module! { } } -impl Module { - /// Move on to next session. Register new validator set and session keys. Changes - /// to the validator set have a session of delay to take effect. This allows for - /// equivocation punishment after a fork. +impl Module { + /// Move on to next session. Register new validator set and session keys. Changes to the + /// validator set have a session of delay to take effect. This allows for equivocation + /// punishment after a fork. pub fn rotate_session() { let session_index = CurrentIndex::get(); + log::trace!(target: "runtime::session", "rotating session {:?}", session_index); let changed = QueuedChanged::get(); // Inform the session handlers that a session is going to end. T::SessionHandler::on_before_session_ending(); - T::SessionManager::end_session(session_index); // Get queued session keys and validators. let session_keys = >::get(); - let validators = session_keys.iter() - .map(|(validator, _)| validator.clone()) - .collect::>(); + let validators = + session_keys.iter().map(|(validator, _)| validator.clone()).collect::>(); >::put(&validators); if changed { @@ -600,16 +611,15 @@ impl Module { // Get next validator set. let maybe_next_validators = T::SessionManager::new_session(session_index + 1); - let (next_validators, next_identities_changed) - = if let Some(validators) = maybe_next_validators - { - // NOTE: as per the documentation on `OnSessionEnding`, we consider - // the validator set as having changed even if the validators are the - // same as before, as underlying economic conditions may have changed. - (validators, true) - } else { - (>::get(), false) - }; + let (next_validators, next_identities_changed) = + if let Some(validators) = maybe_next_validators { + // NOTE: as per the documentation on `OnSessionEnding`, we consider + // the validator set as having changed even if the validators are the + // same as before, as underlying economic conditions may have changed. + (validators, true) + } else { + (>::get(), false) + }; // Queue next session keys. let (queued_amalgamated, next_changed) = { @@ -619,7 +629,9 @@ impl Module { let mut now_session_keys = session_keys.iter(); let mut check_next_changed = |keys: &T::Keys| { - if changed { return } + if changed { + return + } // since a new validator set always leads to `changed` starting // as true, we can ensure that `now_session_keys` and `next_validators` // have the same length. this function is called once per iteration. @@ -630,7 +642,8 @@ impl Module { } } }; - let queued_amalgamated = next_validators.into_iter() + let queued_amalgamated = next_validators + .into_iter() .map(|a| { let k = Self::load_keys(&a).unwrap_or_default(); check_next_changed(&k); @@ -648,11 +661,7 @@ impl Module { Self::deposit_event(Event::NewSession(session_index)); // Tell everyone about the new session keys. - T::SessionHandler::on_new_session::( - changed, - &session_keys, - &queued_amalgamated, - ); + T::SessionHandler::on_new_session::(changed, &session_keys, &queued_amalgamated); } /// Disable the validator of index `i`. @@ -686,7 +695,61 @@ impl Module { /// session is already disabled. /// If used with the staking module it allows to force a new era in such case. pub fn disable(c: &T::ValidatorId) -> sp_std::result::Result { - Self::validators().iter().position(|i| i == c).map(Self::disable_index).ok_or(()) + Self::validators() + .iter() + .position(|i| i == c) + .map(Self::disable_index) + .ok_or(()) + } + + /// Upgrade the key type from some old type to a new type. Supports adding + /// and removing key types. + /// + /// This function should be used with extreme care and only during an + /// `on_runtime_upgrade` block. Misuse of this function can put your blockchain + /// into an unrecoverable state. + /// + /// Care should be taken that the raw versions of the + /// added keys are unique for every `ValidatorId, KeyTypeId` combination. + /// This is an invariant that the session module typically maintains internally. + /// + /// As the actual values of the keys are typically not known at runtime upgrade, + /// it's recommended to initialize the keys to a (unique) dummy value with the expectation + /// that all validators should invoke `set_keys` before those keys are actually + /// required. + pub fn upgrade_keys(upgrade: F) + where + Old: OpaqueKeys + Member + Decode, + F: Fn(T::ValidatorId, Old) -> T::Keys, + { + let old_ids = Old::key_ids(); + let new_ids = T::Keys::key_ids(); + + // Translate NextKeys, and key ownership relations at the same time. + >::translate::(|val, old_keys| { + // Clear all key ownership relations. Typically the overlap should + // stay the same, but no guarantees by the upgrade function. + for i in old_ids.iter() { + Self::clear_key_owner(*i, old_keys.get_raw(*i)); + } + + let new_keys = upgrade(val.clone(), old_keys); + + // And now set the new ones. + for i in new_ids.iter() { + Self::put_key_owner(*i, new_keys.get_raw(*i), &val); + } + + Some(new_keys) + }); + + let _ = >::translate::, _>(|k| { + k.map(|k| { + k.into_iter() + .map(|(val, old_keys)| (val.clone(), upgrade(val, old_keys))) + .collect::>() + }) + }); } /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. @@ -697,9 +760,11 @@ impl Module { let who = T::ValidatorIdOf::convert(account.clone()) .ok_or(Error::::NoAssociatedValidatorId)?; + ensure!(frame_system::Pallet::::can_inc_consumer(&account), Error::::NoAccount); let old_keys = Self::inner_set_keys(&who, keys)?; if old_keys.is_none() { - frame_system::Module::::inc_ref(&account); + let assertion = frame_system::Pallet::::inc_consumers(&account).is_ok(); + debug_assert!(assertion, "can_inc_consumer() returned true; no change since; qed"); } Ok(()) @@ -711,7 +776,10 @@ impl Module { /// /// This does not ensure that the reference counter in system is incremented appropriately, it /// must be done by the caller or the keys will be leaked in storage. - fn inner_set_keys(who: &T::ValidatorId, keys: T::Keys) -> Result, DispatchError> { + fn inner_set_keys( + who: &T::ValidatorId, + keys: T::Keys, + ) -> Result, DispatchError> { let old_keys = Self::load_keys(who); for id in T::Keys::key_ids() { @@ -722,10 +790,14 @@ impl Module { Self::key_owner(*id, key).map_or(true, |owner| &owner == who), Error::::DuplicatedKey, ); + } + + for id in T::Keys::key_ids() { + let key = keys.get_raw(*id); if let Some(old) = old_keys.as_ref().map(|k| k.get_raw(*id)) { if key == old { - continue; + continue } Self::clear_key_owner(*id, old); @@ -747,7 +819,7 @@ impl Module { let key_data = old_keys.get_raw(*id); Self::clear_key_owner(*id, key_data); } - frame_system::Module::::dec_ref(&account); + frame_system::Pallet::::dec_consumers(&account); Ok(()) } @@ -764,7 +836,8 @@ impl Module { >::insert(v, keys); } - fn key_owner(id: KeyTypeId, key_data: &[u8]) -> Option { + /// Query the owner of a session key by returning the owner's validator ID. + pub fn key_owner(id: KeyTypeId, key_data: &[u8]) -> Option { >::get((id, key_data)) } @@ -777,16 +850,30 @@ impl Module { } } +impl ValidatorSet for Module { + type ValidatorId = T::ValidatorId; + type ValidatorIdOf = T::ValidatorIdOf; + + fn session_index() -> sp_staking::SessionIndex { + Module::::current_index() + } + + fn validators() -> Vec { + Module::::validators() + } +} + /// Wraps the author-scraping logic for consensus engines that can recover /// the canonical index of an author. This then transforms it into the /// registering account-ID of that session key index. pub struct FindAccountFromAuthorIndex(sp_std::marker::PhantomData<(T, Inner)>); -impl> FindAuthor +impl> FindAuthor for FindAccountFromAuthorIndex { fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator + where + I: 'a + IntoIterator, { let i = Inner::find_author(digests)?; @@ -795,14 +882,20 @@ impl> FindAuthor } } -impl EstimateNextNewSession for Module { +impl EstimateNextNewSession for Module { + fn average_session_length() -> T::BlockNumber { + T::NextSessionRotation::average_session_length() + } + /// This session module always calls new_session and next_session at the same time, hence we /// do a simple proxy and pass the function to next rotation. - fn estimate_next_new_session(now: T::BlockNumber) -> Option { + fn estimate_next_new_session(now: T::BlockNumber) -> (Option, Weight) { T::NextSessionRotation::estimate_next_session_rotation(now) } +} - fn weight(now: T::BlockNumber) -> Weight { - T::NextSessionRotation::weight(now) +impl frame_support::traits::DisabledValidators for Module { + fn is_disabled(index: u32) -> bool { + >::disabled_validators().binary_search(&index).is_ok() } } diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 1d787ac53b438..449acaff5305d 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,15 +18,19 @@ //! Mock helpers for Session. use super::*; -use std::cell::RefCell; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use crate as pallet_session; +#[cfg(feature = "historical")] +use crate::historical as pallet_session_historical; +use frame_support::{parameter_types, BasicExternalities}; use sp_core::{crypto::key_types::DUMMY, H256}; use sp_runtime::{ - Perbill, impl_opaque_keys, - traits::{BlakeTwo256, IdentityLookup, ConvertInto}, + impl_opaque_keys, testing::{Header, UintAuthorityId}, + traits::{BlakeTwo256, ConvertInto, IdentityLookup}, + Perbill, }; use sp_staking::SessionIndex; +use std::cell::RefCell; impl_opaque_keys! { pub struct MockSessionKeys { @@ -40,10 +44,59 @@ impl From for MockSessionKeys { } } -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} +pub const KEY_ID_A: KeyTypeId = KeyTypeId([4; 4]); +pub const KEY_ID_B: KeyTypeId = KeyTypeId([9; 4]); + +#[derive(Debug, Clone, codec::Encode, codec::Decode, PartialEq, Eq)] +pub struct PreUpgradeMockSessionKeys { + pub a: [u8; 32], + pub b: [u8; 64], +} + +impl OpaqueKeys for PreUpgradeMockSessionKeys { + type KeyTypeIdProviders = (); + + fn key_ids() -> &'static [KeyTypeId] { + &[KEY_ID_A, KEY_ID_B] + } + + fn get_raw(&self, i: KeyTypeId) -> &[u8] { + match i { + i if i == KEY_ID_A => &self.a[..], + i if i == KEY_ID_B => &self.b[..], + _ => &[], + } + } } +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +#[cfg(feature = "historical")] +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Historical: pallet_session_historical::{Pallet}, + } +); + +#[cfg(not(feature = "historical"))] +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + } +); + thread_local! { pub static VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); pub static NEXT_VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); @@ -62,7 +115,12 @@ pub struct TestShouldEndSession; impl ShouldEndSession for TestShouldEndSession { fn should_end_session(now: u64) -> bool { let l = SESSION_LENGTH.with(|l| *l.borrow()); - now % l == 0 || FORCE_SESSION_END.with(|l| { let r = *l.borrow(); *l.borrow_mut() = false; r }) + now % l == 0 || + FORCE_SESSION_END.with(|l| { + let r = *l.borrow(); + *l.borrow_mut() = false; + r + }) } } @@ -76,11 +134,12 @@ impl SessionHandler for TestSessionHandler { _queued_validators: &[(u64, T)], ) { SESSION_CHANGED.with(|l| *l.borrow_mut() = changed); - AUTHORITIES.with(|l| - *l.borrow_mut() = validators.iter() + AUTHORITIES.with(|l| { + *l.borrow_mut() = validators + .iter() .map(|(_, id)| id.get::(DUMMY).unwrap_or_default()) .collect() - ); + }); } fn on_disabled(_validator_index: usize) { DISABLED.with(|l| *l.borrow_mut() = true) @@ -115,9 +174,7 @@ impl SessionManager for TestSessionManager { impl crate::historical::SessionManager for TestSessionManager { fn end_session(_: SessionIndex) {} fn start_session(_: SessionIndex) {} - fn new_session(new_index: SessionIndex) - -> Option> - { + fn new_session(new_index: SessionIndex) -> Option> { >::new_session(new_index) .map(|vals| vals.into_iter().map(|val| (val, val)).collect()) } @@ -128,11 +185,11 @@ pub fn authorities() -> Vec { } pub fn force_new_session() { - FORCE_SESSION_END.with(|l| *l.borrow_mut() = true ) + FORCE_SESSION_END.with(|l| *l.borrow_mut() = true) } pub fn set_session_length(x: u64) { - SESSION_LENGTH.with(|l| *l.borrow_mut() = x ) + SESSION_LENGTH.with(|l| *l.borrow_mut() = x) } pub fn session_changed() -> bool { @@ -153,54 +210,56 @@ pub fn reset_before_session_end_called() { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { - keys: NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ), - }.assimilate_storage(&mut t).unwrap(); + let keys: Vec<_> = NEXT_VALIDATORS + .with(|l| l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect()); + BasicExternalities::execute_with_storage(&mut t, || { + for (ref k, ..) in &keys { + frame_system::Pallet::::inc_providers(k); + } + frame_system::Pallet::::inc_providers(&4); + // An additional identity that we use. + frame_system::Pallet::::inc_providers(&69); + }); + pallet_session::GenesisConfig:: { keys } + .assimilate_storage(&mut t) + .unwrap(); sp_io::TestExternalities::new(t) } -#[derive(Clone, Eq, PartialEq)] -pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; pub const MinimumPeriod: u64 = 5; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -211,7 +270,7 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } -impl Trait for Test { +impl Config for Test { type ShouldEndSession = TestShouldEndSession; #[cfg(feature = "historical")] type SessionManager = crate::historical::NoteHistoricalRoot; @@ -221,17 +280,14 @@ impl Trait for Test { type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type Keys = MockSessionKeys; - type Event = (); + type Event = Event; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = (); type WeightInfo = (); } #[cfg(feature = "historical")] -impl crate::historical::Trait for Test { +impl crate::historical::Config for Test { type FullIdentification = u64; type FullIdentificationOf = sp_runtime::traits::ConvertInto; } - -pub type System = frame_system::Module; -pub type Session = Module; diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index 75def78046beb..23e1c6a993427 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,16 @@ // Tests for the Session Pallet use super::*; -use frame_support::{traits::OnInitialize, assert_ok}; -use sp_core::crypto::key_types::DUMMY; -use sp_runtime::testing::UintAuthorityId; +use codec::Decode; +use frame_support::{assert_noop, assert_ok, traits::OnInitialize}; use mock::{ - SESSION_CHANGED, TEST_SESSION_CHANGED, authorities, force_new_session, - set_next_validators, set_session_length, session_changed, Origin, System, Session, - reset_before_session_end_called, before_session_end_called, new_test_ext, + authorities, before_session_end_called, force_new_session, new_test_ext, + reset_before_session_end_called, session_changed, set_next_validators, set_session_length, + Origin, PreUpgradeMockSessionKeys, Session, System, Test, SESSION_CHANGED, + TEST_SESSION_CHANGED, }; +use sp_core::crypto::key_types::DUMMY; +use sp_runtime::testing::UintAuthorityId; fn initialize_block(block: u64) { SESSION_CHANGED.with(|l| *l.borrow_mut() = false); @@ -59,9 +61,9 @@ fn keys_cleared_on_kill() { let id = DUMMY; assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); - assert!(!System::allow_death(&1)); + assert!(System::is_provider_required(&1)); assert_ok!(Session::purge_keys(Origin::signed(1))); - assert!(System::allow_death(&1)); + assert!(!System::is_provider_required(&1)); assert_eq!(Session::load_keys(&1), None); assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), None); @@ -76,10 +78,10 @@ fn authorities_should_track_validators() { set_next_validators(vec![1, 2]); force_new_session(); initialize_block(1); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - ]); + assert_eq!( + Session::queued_keys(), + vec![(1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()),] + ); assert_eq!(Session::validators(), vec![1, 2, 3]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); assert!(before_session_end_called()); @@ -87,10 +89,10 @@ fn authorities_should_track_validators() { force_new_session(); initialize_block(2); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - ]); + assert_eq!( + Session::queued_keys(), + vec![(1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()),] + ); assert_eq!(Session::validators(), vec![1, 2]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); assert!(before_session_end_called()); @@ -100,22 +102,28 @@ fn authorities_should_track_validators() { assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(4).into(), vec![])); force_new_session(); initialize_block(3); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - (4, UintAuthorityId(4).into()), - ]); + assert_eq!( + Session::queued_keys(), + vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + (4, UintAuthorityId(4).into()), + ] + ); assert_eq!(Session::validators(), vec![1, 2]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); assert!(before_session_end_called()); force_new_session(); initialize_block(4); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - (4, UintAuthorityId(4).into()), - ]); + assert_eq!( + Session::queued_keys(), + vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + (4, UintAuthorityId(4).into()), + ] + ); assert_eq!(Session::validators(), vec![1, 2, 4]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)]); }); @@ -179,11 +187,14 @@ fn duplicates_are_not_allowed() { new_test_ext().execute_with(|| { System::set_block_number(1); Session::on_initialize(1); - assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_err()); - assert!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![]).is_ok()); + assert_noop!( + Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]), + Error::::DuplicatedKey, + ); + assert_ok!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![])); // is fine now that 1 has migrated off. - assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_ok()); + assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![])); }); } @@ -251,44 +262,76 @@ fn session_changed_flag_works() { #[test] fn periodic_session_works() { - struct Period; - struct Offset; - - impl Get for Period { - fn get() -> u64 { 10 } - } - - impl Get for Offset { - fn get() -> u64 { 3 } + frame_support::parameter_types! { + const Period: u64 = 10; + const Offset: u64 = 3; } - type P = PeriodicSessions; - for i in 0..3 { + // make sure that offset phase behaves correctly + for i in 0u64..3 { assert!(!P::should_end_session(i)); + assert_eq!(P::estimate_next_session_rotation(i).0.unwrap(), 3); + + // the last block of the session (i.e. the one before session rotation) + // should have progress 100%. + if P::estimate_next_session_rotation(i).0.unwrap() - 1 == i { + assert_eq!( + P::estimate_current_session_progress(i).0.unwrap(), + Permill::from_percent(100) + ); + } else { + assert!( + P::estimate_current_session_progress(i).0.unwrap() < Permill::from_percent(100) + ); + } } - assert!(P::should_end_session(3)); + // we end the session at block #3 and we consider this block the first one + // from the next session. since we're past the offset phase it represents + // 1/10 of progress. + assert!(P::should_end_session(3u64)); + assert_eq!(P::estimate_next_session_rotation(3u64).0.unwrap(), 3); + assert_eq!(P::estimate_current_session_progress(3u64).0.unwrap(), Permill::from_percent(10)); - for i in (1..10).map(|i| 3 + i) { + for i in (1u64..10).map(|i| 3 + i) { assert!(!P::should_end_session(i)); + assert_eq!(P::estimate_next_session_rotation(i).0.unwrap(), 13); + + // as with the offset phase the last block of the session must have 100% + // progress. + if P::estimate_next_session_rotation(i).0.unwrap() - 1 == i { + assert_eq!( + P::estimate_current_session_progress(i).0.unwrap(), + Permill::from_percent(100) + ); + } else { + assert!( + P::estimate_current_session_progress(i).0.unwrap() < Permill::from_percent(100) + ); + } } - assert!(P::should_end_session(13)); + // the new session starts and we proceed in 1/10 increments. + assert!(P::should_end_session(13u64)); + assert_eq!(P::estimate_next_session_rotation(13u64).0.unwrap(), 23); + assert_eq!(P::estimate_current_session_progress(13u64).0.unwrap(), Permill::from_percent(10)); + + assert!(!P::should_end_session(14u64)); + assert_eq!(P::estimate_next_session_rotation(14u64).0.unwrap(), 23); + assert_eq!(P::estimate_current_session_progress(14u64).0.unwrap(), Permill::from_percent(20)); } #[test] fn session_keys_generate_output_works_as_set_keys_input() { new_test_ext().execute_with(|| { let new_keys = mock::MockSessionKeys::generate(None); - assert_ok!( - Session::set_keys( - Origin::signed(2), - ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), - vec![], - ) - ); + assert_ok!(Session::set_keys( + Origin::signed(2), + ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), + vec![], + )); }); } @@ -308,3 +351,77 @@ fn return_true_if_more_than_third_is_disabled() { assert_eq!(Session::disable_index(3), true); }); } + +#[test] +fn upgrade_keys() { + use frame_support::storage; + use sp_core::crypto::key_types::DUMMY; + + // This test assumes certain mocks. + assert_eq!(mock::NEXT_VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); + assert_eq!(mock::VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); + + new_test_ext().execute_with(|| { + let pre_one = PreUpgradeMockSessionKeys { a: [1u8; 32], b: [1u8; 64] }; + + let pre_two = PreUpgradeMockSessionKeys { a: [2u8; 32], b: [2u8; 64] }; + + let pre_three = PreUpgradeMockSessionKeys { a: [3u8; 32], b: [3u8; 64] }; + + let val_keys = vec![(1u64, pre_one), (2u64, pre_two), (3u64, pre_three)]; + + // Set `QueuedKeys`. + { + let storage_key = >::hashed_key(); + assert!(storage::unhashed::exists(&storage_key)); + storage::unhashed::put(&storage_key, &val_keys); + } + + // Set `NextKeys`. + { + for &(i, ref keys) in val_keys.iter() { + let storage_key = >::hashed_key_for(i); + assert!(storage::unhashed::exists(&storage_key)); + storage::unhashed::put(&storage_key, keys); + } + } + + // Set `KeyOwner`. + { + for &(i, ref keys) in val_keys.iter() { + // clear key owner for `UintAuthorityId` keys set in genesis. + let presumed = UintAuthorityId(i); + let raw_prev = presumed.as_ref(); + + assert_eq!(Session::key_owner(DUMMY, raw_prev), Some(i)); + Session::clear_key_owner(DUMMY, raw_prev); + + Session::put_key_owner(mock::KEY_ID_A, keys.get_raw(mock::KEY_ID_A), &i); + Session::put_key_owner(mock::KEY_ID_B, keys.get_raw(mock::KEY_ID_B), &i); + } + } + + // Do the upgrade and check sanity. + let mock_keys_for = |val| mock::MockSessionKeys { dummy: UintAuthorityId(val) }; + Session::upgrade_keys::(|val, _old_keys| mock_keys_for(val)); + + // Check key ownership. + for (i, ref keys) in val_keys.iter() { + assert!(Session::key_owner(mock::KEY_ID_A, keys.get_raw(mock::KEY_ID_A)).is_none()); + assert!(Session::key_owner(mock::KEY_ID_B, keys.get_raw(mock::KEY_ID_B)).is_none()); + + let migrated_key = UintAuthorityId(*i); + assert_eq!(Session::key_owner(DUMMY, migrated_key.as_ref()), Some(*i)); + } + + // Check queued keys. + assert_eq!( + Session::queued_keys(), + vec![(1, mock_keys_for(1)), (2, mock_keys_for(2)), (3, mock_keys_for(3)),], + ); + + for i in 1u64..4 { + assert_eq!(>::get(&i), Some(mock_keys_for(i))); + } + }) +} diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs new file mode 100644 index 0000000000000..64e7ac19ea7a0 --- /dev/null +++ b/frame/session/src/weights.rs @@ -0,0 +1,91 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_session +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_session +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/session/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_session. +pub trait WeightInfo { + fn set_keys() -> Weight; + fn purge_keys() -> Weight; +} + +/// Weights for pallet_session using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Staking Ledger (r:1 w:0) + // Storage: Session NextKeys (r:1 w:1) + // Storage: Session KeyOwner (r:4 w:4) + fn set_keys() -> Weight { + (64_427_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Session NextKeys (r:1 w:1) + // Storage: Session KeyOwner (r:0 w:4) + fn purge_keys() -> Weight { + (42_497_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Staking Ledger (r:1 w:0) + // Storage: Session NextKeys (r:1 w:1) + // Storage: Session KeyOwner (r:4 w:4) + fn set_keys() -> Weight { + (64_427_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Session NextKeys (r:1 w:1) + // Storage: Session KeyOwner (r:0 w:4) + fn purge_keys() -> Weight { + (42_497_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } +} diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index 2f3f3adabc2c2..942b2844195f2 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-society" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,24 +13,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } rand_chacha = { version = "0.2", default-features = false } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io ={ version = "4.0.0-dev", path = "../../primitives/io" } +frame-support-test = { version = "3.0.0", path = "../support/test" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ "codec/std", - "serde", + "scale-info/std", "sp-runtime/std", "rand_chacha/std", "sp-std/std", @@ -41,3 +42,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/society/README.md b/frame/society/README.md index b4e1fbaf22cba..8099861866429 100644 --- a/frame/society/README.md +++ b/frame/society/README.md @@ -1,6 +1,6 @@ # Society Module -- [`society::Trait`](https://docs.rs/pallet-society/latest/pallet_society/trait.Trait.html) +- [`society::Config`](https://docs.rs/pallet-society/latest/pallet_society/trait.Config.html) - [`Call`](https://docs.rs/pallet-society/latest/pallet_society/enum.Call.html) ## Overview @@ -24,7 +24,7 @@ Of the non-suspended members, there is always a: Of the non-suspended members of the society, a random set of them are chosen as "skeptics". The mechanics of skeptics is explained in the -[member phase](#member-phase) below. +[member phase](https://docs.rs/pallet-society/latest/pallet_society/#member-phase) below. ### Mechanics @@ -225,4 +225,4 @@ make judgement on a suspended candidate. * `set_max_membership` - The ROOT origin can update the maximum member count for the society. The max membership count must be greater than 1. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index cbfe5a00de240..c6d63eed20ac0 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ //! # Society Module //! -//! - [`society::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -251,39 +251,52 @@ mod mock; #[cfg(test)] mod tests; -use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_runtime::{Percent, ModuleId, RuntimeDebug, +use codec::{Decode, Encode}; +use frame_support::{ + decl_error, decl_event, decl_module, decl_storage, + dispatch::DispatchResult, + ensure, traits::{ - StaticLookup, AccountIdConversion, Saturating, Zero, IntegerSquareRoot, Hash, - TrailingZeroInput, CheckedSub - } + BalanceStatus, ChangeMembers, Currency, EnsureOrigin, ExistenceRequirement::AllowDeath, + Get, Imbalance, OnUnbalanced, Randomness, ReservableCurrency, + }, + weights::Weight, + PalletId, }; -use frame_support::{decl_error, decl_module, decl_storage, decl_event, ensure, dispatch::DispatchResult}; -use frame_support::weights::Weight; -use frame_support::traits::{ - Currency, ReservableCurrency, Randomness, Get, ChangeMembers, BalanceStatus, - ExistenceRequirement::AllowDeath, EnsureOrigin, OnUnbalanced, Imbalance +use frame_system::{self as system, ensure_root, ensure_signed}; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaChaRng, +}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{ + AccountIdConversion, CheckedSub, Hash, IntegerSquareRoot, Saturating, StaticLookup, + TrailingZeroInput, Zero, + }, + Percent, RuntimeDebug, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use sp_std::prelude::*; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; /// The module's configuration trait. -pub trait Trait: system::Trait { +pub trait Config: system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The societies's module id - type ModuleId: Get; + type PalletId: Get; /// The currency type used for bidding. type Currency: ReservableCurrency; /// Something that provides randomness in the runtime. - type Randomness: Randomness; + type Randomness: Randomness; /// The minimum amount of a deposit required for a bid to be made. type CandidateDeposit: Get>; @@ -316,10 +329,13 @@ pub trait Trait: system::Trait { /// The number of blocks between membership challenges. type ChallengePeriod: Get; + + /// The maximum number of candidates that we accept per round. + type MaxCandidateIntake: Get; } /// A vote by a member on a candidate application. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum Vote { /// The member has been chosen to be skeptic and has not yet taken any action. Skeptic, @@ -330,7 +346,7 @@ pub enum Vote { } /// A judgement by the suspension judgement origin on a suspended candidate. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum Judgement { /// The suspension judgement origin takes no direct judgment /// and places the candidate back into the bid pool. @@ -342,7 +358,7 @@ pub enum Judgement { } /// Details of a payout given as a per-block linear "trickle". -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, Default)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, Default, TypeInfo)] pub struct Payout { /// Total value of the payout. value: Balance, @@ -355,7 +371,7 @@ pub struct Payout { } /// Status of a vouching member. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum VouchingStatus { /// Member is currently vouching for a user. Vouching, @@ -367,7 +383,7 @@ pub enum VouchingStatus { pub type StrikeCount = u32; /// A bid for entry into society. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug,)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Bid { /// The bidder/candidate trying to enter society who: AccountId, @@ -378,7 +394,7 @@ pub struct Bid { } /// A vote by a member on a candidate application. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum BidKind { /// The CandidateDeposit was paid for this bid. Deposit(Balance), @@ -403,7 +419,7 @@ impl BidKind { // This module's storage items. decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Society { + trait Store for Module, I: Instance=DefaultInstance> as Society { /// The first member. pub Founder get(fn founder) build(|config: &GenesisConfig| config.members.first().cloned()): Option; @@ -472,7 +488,7 @@ decl_storage! { // The module's dispatchable functions. decl_module! { /// The module declaration. - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { type Error = Error; /// The minimum amount of a deposit required for a bid to be made. const CandidateDeposit: BalanceOf = T::CandidateDeposit::get(); @@ -495,7 +511,10 @@ decl_module! { const ChallengePeriod: T::BlockNumber = T::ChallengePeriod::get(); /// The societies's module id - const ModuleId: ModuleId = T::ModuleId::get(); + const PalletId: PalletId = T::PalletId::get(); + + /// Maximum candidate intake per round. + const MaxCandidateIntake: u32 = T::MaxCandidateIntake::get(); // Used for handling module events. fn deposit_event() = default; @@ -533,7 +552,7 @@ decl_module! { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn bid(origin, value: BalanceOf) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!>::contains_key(&who), Error::::Suspended); @@ -572,7 +591,7 @@ decl_module! { /// /// Total Complexity: O(B + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn unbid(origin, pos: u32) -> DispatchResult { let who = ensure_signed(origin)?; @@ -584,7 +603,8 @@ decl_module! { // no reason that either should fail. match b.remove(pos).kind { BidKind::Deposit(deposit) => { - let _ = T::Currency::unreserve(&who, deposit); + let err_amount = T::Currency::unreserve(&who, deposit); + debug_assert!(err_amount.is_zero()); } BidKind::Vouch(voucher, _) => { >::remove(&voucher); @@ -642,7 +662,7 @@ decl_module! { /// /// Total Complexity: O(M + B + C + logM + logB + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn vouch(origin, who: T::AccountId, value: BalanceOf, tip: BalanceOf) -> DispatchResult { let voucher = ensure_signed(origin)?; // Check user is not suspended. @@ -683,7 +703,7 @@ decl_module! { /// /// Total Complexity: O(B) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn unvouch(origin, pos: u32) -> DispatchResult { let voucher = ensure_signed(origin)?; ensure!(Self::vouching(&voucher) == Some(VouchingStatus::Vouching), Error::::NotVouching); @@ -721,7 +741,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + C) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn vote(origin, candidate: ::Source, approve: bool) { let voter = ensure_signed(origin)?; let candidate = T::Lookup::lookup(candidate)?; @@ -752,7 +772,7 @@ decl_module! { /// /// Total Complexity: O(M + logM) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn defender_vote(origin, approve: bool) { let voter = ensure_signed(origin)?; let members = >::get(); @@ -784,7 +804,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + P + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] pub fn payout(origin) { let who = ensure_signed(origin)?; @@ -793,7 +813,7 @@ decl_module! { let mut payouts = >::get(&who); if let Some((when, amount)) = payouts.first() { - if when <= &>::block_number() { + if when <= &>::block_number() { T::Currency::transfer(&Self::payouts(), &who, *amount, AllowDeath)?; payouts.remove(0); if payouts.is_empty() { @@ -826,7 +846,7 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn found(origin, founder: T::AccountId, max_members: u32, rules: Vec) { T::FounderSetOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::AlreadyFounded); @@ -853,7 +873,7 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn unfound(origin) { let founder = ensure_signed(origin)?; ensure!(Founder::::get() == Some(founder.clone()), Error::::NotFounder); @@ -864,7 +884,7 @@ decl_module! { Founder::::kill(); Rules::::kill(); Candidates::::kill(); - SuspendedCandidates::::remove_all(); + SuspendedCandidates::::remove_all(None); Self::deposit_event(RawEvent::Unfounded(founder)); } @@ -895,7 +915,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + B) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn judge_suspended_member(origin, who: T::AccountId, forgive: bool) { T::SuspensionJudgementOrigin::ensure_origin(origin)?; ensure!(>::contains_key(&who), Error::::NotSuspended); @@ -966,7 +986,7 @@ decl_module! { /// /// Total Complexity: O(M + logM + B + X) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn judge_suspended_candidate(origin, who: T::AccountId, judgement: Judgement) { T::SuspensionJudgementOrigin::ensure_origin(origin)?; if let Some((value, kind)) = >::get(&who) { @@ -981,7 +1001,7 @@ decl_module! { // Reduce next pot by payout >::put(pot - value); // Add payout for new candidate - let maturity = >::block_number() + let maturity = >::block_number() + Self::lock_duration(Self::members().len() as u32); Self::pay_accepted_candidate(&who, value, kind, maturity); } @@ -990,7 +1010,8 @@ decl_module! { match kind { BidKind::Deposit(deposit) => { // Slash deposit and move it to the society account - let _ = T::Currency::repatriate_reserved(&who, &Self::account_id(), deposit, BalanceStatus::Free); + let res = T::Currency::repatriate_reserved(&who, &Self::account_id(), deposit, BalanceStatus::Free); + debug_assert!(res.is_ok()); } BidKind::Vouch(voucher, _) => { // Ban the voucher from vouching again @@ -1026,7 +1047,7 @@ decl_module! { /// /// Total Complexity: O(1) /// # - #[weight = T::MaximumBlockWeight::get() / 10] + #[weight = T::BlockWeights::get().max_block / 10] fn set_max_members(origin, max: u32) { ensure_root(origin)?; ensure!(max > 1, Error::::MaxMembers); @@ -1038,13 +1059,14 @@ decl_module! { let mut members = vec![]; let mut weight = 0; + let weights = T::BlockWeights::get(); // Run a candidate/membership rotation if (n % T::RotationPeriod::get()).is_zero() { members = >::get(); Self::rotate_period(&mut members); - weight += T::MaximumBlockWeight::get() / 20; + weight += weights.max_block / 20; } // Run a challenge rotation @@ -1055,7 +1077,7 @@ decl_module! { } Self::rotate_challenge(&mut members); - weight += T::MaximumBlockWeight::get() / 20; + weight += weights.max_block / 20; } weight @@ -1065,7 +1087,7 @@ decl_module! { decl_error! { /// Errors for this module. - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { /// An incorrect position was provided. BadPosition, /// User is not a member. @@ -1108,7 +1130,7 @@ decl_error! { decl_event! { /// Events for this module. pub enum Event where - AccountId = ::AccountId, + AccountId = ::AccountId, Balance = BalanceOf { /// The society is founded by the given identity. \[founder\] @@ -1151,7 +1173,7 @@ decl_event! { /// Simple ensure origin struct to filter for the founder account. pub struct EnsureFounder(sp_std::marker::PhantomData); -impl EnsureOrigin for EnsureFounder { +impl EnsureOrigin for EnsureFounder { type Success = T::AccountId; fn try_origin(o: T::Origin) -> Result { o.into().and_then(|o| match (o, Founder::::get()) { @@ -1178,18 +1200,17 @@ fn pick_item<'a, R: RngCore, T>(rng: &mut R, items: &'a [T]) -> Option<&'a T> { /// Pick a new PRN, in the range [0, `max`] (inclusive). fn pick_usize<'a, R: RngCore>(rng: &mut R, max: usize) -> usize { - (rng.next_u32() % (max as u32 + 1)) as usize } -impl, I: Instance> Module { +impl, I: Instance> Module { /// Puts a bid into storage ordered by smallest to largest value. /// Allows a maximum of 1000 bids in queue, removing largest value people first. fn put_bid( mut bids: Vec>>, who: &T::AccountId, value: BalanceOf, - bid_kind: BidKind> + bid_kind: BidKind>, ) { const MAX_BID_COUNT: usize = 1000; @@ -1197,7 +1218,8 @@ impl, I: Instance> Module { // Insert new elements after the existing ones. This ensures new bids // with the same bid value are further down the list than existing ones. Ok(pos) => { - let different_bid = bids.iter() + let different_bid = bids + .iter() // Easily extract the index we are on .enumerate() // Skip ahead to the suggested position @@ -1209,36 +1231,25 @@ impl, I: Instance> Module { // If the element is not at the end of the list, insert the new element // in the spot. if let Some((p, _)) = different_bid { - bids.insert(p, Bid { - value, - who: who.clone(), - kind: bid_kind, - }); + bids.insert(p, Bid { value, who: who.clone(), kind: bid_kind }); // If the element is at the end of the list, push the element on the end. } else { - bids.push(Bid { - value, - who: who.clone(), - kind: bid_kind, - }); + bids.push(Bid { value, who: who.clone(), kind: bid_kind }); } }, - Err(pos) => bids.insert(pos, Bid { - value, - who: who.clone(), - kind: bid_kind, - }), + Err(pos) => bids.insert(pos, Bid { value, who: who.clone(), kind: bid_kind }), } // Keep it reasonably small. if bids.len() > MAX_BID_COUNT { let Bid { who: popped, kind, .. } = bids.pop().expect("b.len() > 1000; qed"); match kind { BidKind::Deposit(deposit) => { - let _ = T::Currency::unreserve(&popped, deposit); - } + let err_amount = T::Currency::unreserve(&popped, deposit); + debug_assert!(err_amount.is_zero()); + }, BidKind::Vouch(voucher, _) => { >::remove(&voucher); - } + }, } Self::deposit_event(RawEvent::AutoUnbid(popped)); } @@ -1253,7 +1264,10 @@ impl, I: Instance> Module { } /// Check a user is a candidate. - fn is_candidate(candidates: &Vec>>, who: &T::AccountId) -> bool { + fn is_candidate( + candidates: &Vec>>, + who: &T::AccountId, + ) -> bool { // Looking up a candidate is the same as looking up a bid Self::is_bid(candidates, who) } @@ -1297,7 +1311,7 @@ impl, I: Instance> Module { T::MembershipChanged::change_members_sorted(&[], &[m.clone()], &members[..]); >::put(members); Ok(()) - } + }, } } @@ -1308,7 +1322,9 @@ impl, I: Instance> Module { let mut pot = >::get(); // we'll need a random seed here. - let seed = T::Randomness::random(phrase); + // TODO: deal with randomness freshness + // https://github.com/paritytech/substrate/issues/8312 + let (seed, _) = T::Randomness::random(phrase); // seed needs to be guaranteed to be 32 bytes. let seed = <[u8; 32]>::decode(&mut TrailingZeroInput::new(seed.as_ref())) .expect("input is padded with zeroes; qed"); @@ -1317,80 +1333,97 @@ impl, I: Instance> Module { // we assume there's at least one member or this logic won't work. if !members.is_empty() { let candidates = >::take(); - // NOTE: This may cause member length to surpass `MaxMembers`, but results in no consensus - // critical issues or side-effects. This is auto-correcting as members fall out of society. + // NOTE: This may cause member length to surpass `MaxMembers`, but results in no + // consensus critical issues or side-effects. This is auto-correcting as members fall + // out of society. members.reserve(candidates.len()); - let maturity = >::block_number() - + Self::lock_duration(members.len() as u32); + let maturity = + >::block_number() + Self::lock_duration(members.len() as u32); let mut rewardees = Vec::new(); let mut total_approvals = 0; let mut total_slash = >::zero(); let mut total_payouts = >::zero(); - let accepted = candidates.into_iter().filter_map(|Bid {value, who: candidate, kind }| { - let mut approval_count = 0; - - // Creates a vector of (vote, member) for the given candidate - // and tallies total number of approve votes for that candidate. - let votes = members.iter() - .filter_map(|m| >::take(&candidate, m).map(|v| (v, m))) - .inspect(|&(v, _)| if v == Vote::Approve { approval_count += 1 }) - .collect::>(); - - // Select one of the votes at random. - // Note that `Vote::Skeptical` and `Vote::Reject` both reject the candidate. - let is_accepted = pick_item(&mut rng, &votes).map(|x| x.0) == Some(Vote::Approve); - - let matching_vote = if is_accepted { Vote::Approve } else { Vote::Reject }; - - let bad_vote = |m: &T::AccountId| { - // Voter voted wrong way (or was just a lazy skeptic) then reduce their payout - // and increase their strikes. after MaxStrikes then they go into suspension. - let amount = Self::slash_payout(m, T::WrongSideDeduction::get()); - - let strikes = >::mutate(m, |s| { - *s += 1; - *s - }); - if strikes >= T::MaxStrikes::get() { - Self::suspend_member(m); - } - amount - }; - - // Collect the voters who had a matching vote. - rewardees.extend(votes.into_iter() - .filter_map(|(v, m)| - if v == matching_vote { Some(m) } else { - total_slash += bad_vote(m); - None + let accepted = candidates + .into_iter() + .filter_map(|Bid { value, who: candidate, kind }| { + let mut approval_count = 0; + + // Creates a vector of (vote, member) for the given candidate + // and tallies total number of approve votes for that candidate. + let votes = members + .iter() + .filter_map(|m| >::take(&candidate, m).map(|v| (v, m))) + .inspect(|&(v, _)| { + if v == Vote::Approve { + approval_count += 1 + } + }) + .collect::>(); + + // Select one of the votes at random. + // Note that `Vote::Skeptical` and `Vote::Reject` both reject the candidate. + let is_accepted = + pick_item(&mut rng, &votes).map(|x| x.0) == Some(Vote::Approve); + + let matching_vote = if is_accepted { Vote::Approve } else { Vote::Reject }; + + let bad_vote = |m: &T::AccountId| { + // Voter voted wrong way (or was just a lazy skeptic) then reduce their + // payout and increase their strikes. after MaxStrikes then they go into + // suspension. + let amount = Self::slash_payout(m, T::WrongSideDeduction::get()); + + let strikes = >::mutate(m, |s| { + *s += 1; + *s + }); + if strikes >= T::MaxStrikes::get() { + Self::suspend_member(m); } - ).cloned() - ); + amount + }; + + // Collect the voters who had a matching vote. + rewardees.extend( + votes + .into_iter() + .filter_map(|(v, m)| { + if v == matching_vote { + Some(m) + } else { + total_slash += bad_vote(m); + None + } + }) + .cloned(), + ); - if is_accepted { - total_approvals += approval_count; - total_payouts += value; - members.push(candidate.clone()); + if is_accepted { + total_approvals += approval_count; + total_payouts += value; + members.push(candidate.clone()); - Self::pay_accepted_candidate(&candidate, value, kind, maturity); + Self::pay_accepted_candidate(&candidate, value, kind, maturity); - // We track here the total_approvals so that every candidate has a unique range - // of numbers from 0 to `total_approvals` with length `approval_count` so each - // candidate is proportionally represented when selecting a "primary" below. - Some((candidate, total_approvals, value)) - } else { - // Suspend Candidate - >::insert(&candidate, (value, kind)); - Self::deposit_event(RawEvent::CandidateSuspended(candidate)); - None - } - }).collect::>(); + // We track here the total_approvals so that every candidate has a unique + // range of numbers from 0 to `total_approvals` with length `approval_count` + // so each candidate is proportionally represented when selecting a + // "primary" below. + Some((candidate, total_approvals, value)) + } else { + // Suspend Candidate + >::insert(&candidate, (value, kind)); + Self::deposit_event(RawEvent::CandidateSuspended(candidate)); + None + } + }) + .collect::>(); // Clean up all votes. - >::remove_all(); + >::remove_all(None); // Reward one of the voters who voted the right way. if !total_slash.is_zero() { @@ -1399,7 +1432,13 @@ impl, I: Instance> Module { Self::bump_payout(winner, maturity, total_slash); } else { // Move the slashed amount back from payouts account to local treasury. - let _ = T::Currency::transfer(&Self::payouts(), &Self::account_id(), total_slash, AllowDeath); + let res = T::Currency::transfer( + &Self::payouts(), + &Self::account_id(), + total_slash, + AllowDeath, + ); + debug_assert!(res.is_ok()); } } @@ -1410,7 +1449,13 @@ impl, I: Instance> Module { // this should never fail since we ensure we can afford the payouts in a previous // block, but there's not much we can do to recover if it fails anyway. - let _ = T::Currency::transfer(&Self::account_id(), &Self::payouts(), total_payouts, AllowDeath); + let res = T::Currency::transfer( + &Self::account_id(), + &Self::payouts(), + total_payouts, + AllowDeath, + ); + debug_assert!(res.is_ok()); } // if at least one candidate was accepted... @@ -1419,17 +1464,23 @@ impl, I: Instance> Module { // Choose a random number between 0 and `total_approvals` let primary_point = pick_usize(&mut rng, total_approvals - 1); // Find the zero bid or the user who falls on that point - let primary = accepted.iter().find(|e| e.2.is_zero() || e.1 > primary_point) - .expect("e.1 of final item == total_approvals; \ - worst case find will always return that item; qed") - .0.clone(); + let primary = accepted + .iter() + .find(|e| e.2.is_zero() || e.1 > primary_point) + .expect( + "e.1 of final item == total_approvals; \ + worst case find will always return that item; qed", + ) + .0 + .clone(); let accounts = accepted.into_iter().map(|x| x.0).collect::>(); // Then write everything back out, signal the changed membership and leave an event. members.sort(); - // NOTE: This may cause member length to surpass `MaxMembers`, but results in no consensus - // critical issues or side-effects. This is auto-correcting as members fall out of society. + // NOTE: This may cause member length to surpass `MaxMembers`, but results in no + // consensus critical issues or side-effects. This is auto-correcting as members + // fall out of society. >::put(&members[..]); >::put(&primary); @@ -1450,9 +1501,10 @@ impl, I: Instance> Module { >::put(&candidates); // Select sqrt(n) random members from the society and make them skeptics. - let pick_member = |_| pick_item(&mut rng, &members[..]).expect("exited if members empty; qed"); + let pick_member = + |_| pick_item(&mut rng, &members[..]).expect("exited if members empty; qed"); for skeptic in (0..members.len().integer_sqrt()).map(pick_member) { - for Bid{ who: c, .. } in candidates.iter() { + for Bid { who: c, .. } in candidates.iter() { >::insert(c, skeptic, Vote::Skeptic); } } @@ -1473,7 +1525,7 @@ impl, I: Instance> Module { // whole slash is accounted for. *amount -= rest; rest = Zero::zero(); - break; + break } } >::insert(who, &payouts[dropped..]); @@ -1483,10 +1535,12 @@ impl, I: Instance> Module { /// Bump the payout amount of `who`, to be unlocked at the given block number. fn bump_payout(who: &T::AccountId, when: T::BlockNumber, value: BalanceOf) { - if !value.is_zero(){ - >::mutate(who, |payouts| match payouts.binary_search_by_key(&when, |x| x.0) { - Ok(index) => payouts[index].1 += value, - Err(index) => payouts.insert(index, (when, value)), + if !value.is_zero() { + >::mutate(who, |payouts| { + match payouts.binary_search_by_key(&when, |x| x.0) { + Ok(index) => payouts[index].1 += value, + Err(index) => payouts.insert(index, (when, value)), + } }); } } @@ -1511,11 +1565,13 @@ impl, I: Instance> Module { BidKind::Deposit(deposit) => { // In the case that a normal deposit bid is accepted we unreserve // the deposit. - let _ = T::Currency::unreserve(candidate, deposit); + let err_amount = T::Currency::unreserve(candidate, deposit); + debug_assert!(err_amount.is_zero()); value - } + }, BidKind::Vouch(voucher, tip) => { - // Check that the voucher is still vouching, else some other logic may have removed their status. + // Check that the voucher is still vouching, else some other logic may have removed + // their status. if >::take(&voucher) == Some(VouchingStatus::Vouching) { // In the case that a vouched-for bid is accepted we unset the // vouching status and transfer the tip over to the voucher. @@ -1524,7 +1580,7 @@ impl, I: Instance> Module { } else { value } - } + }, }; Self::bump_payout(candidate, maturity, value); @@ -1539,14 +1595,12 @@ impl, I: Instance> Module { let mut approval_count = 0; let mut rejection_count = 0; // Tallies total number of approve and reject votes for the defender. - members.iter() - .filter_map(|m| >::take(m)) - .for_each(|v| { - match v { - Vote::Approve => approval_count += 1, - _ => rejection_count += 1, - } - }); + members.iter().filter_map(|m| >::take(m)).for_each( + |v| match v { + Vote::Approve => approval_count += 1, + _ => rejection_count += 1, + }, + ); if approval_count <= rejection_count { // User has failed the challenge @@ -1555,7 +1609,7 @@ impl, I: Instance> Module { } // Clean up all votes. - >::remove_all(); + >::remove_all(None); } // Avoid challenging if there's only two members since we never challenge the Head or @@ -1564,7 +1618,9 @@ impl, I: Instance> Module { // Start a new defender rotation let phrase = b"society_challenge"; // we'll need a random seed here. - let seed = T::Randomness::random(phrase); + // TODO: deal with randomness freshness + // https://github.com/paritytech/substrate/issues/8312 + let (seed, _) = T::Randomness::random(phrase); // seed needs to be guaranteed to be 32 bytes. let seed = <[u8; 32]>::decode(&mut TrailingZeroInput::new(seed.as_ref())) .expect("input is padded with zeroes; qed"); @@ -1584,7 +1640,7 @@ impl, I: Instance> Module { /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn account_id() -> T::AccountId { - T::ModuleId::get().into_account() + T::PalletId::get().into_account() } /// The account ID of the payouts pot. This is where payouts are made from. @@ -1592,7 +1648,7 @@ impl, I: Instance> Module { /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn payouts() -> T::AccountId { - T::ModuleId::get().into_sub_account(b"payouts") + T::PalletId::get().into_sub_account(b"payouts") } /// Return the duration of the lock, in blocks, with the given number of members. @@ -1610,11 +1666,11 @@ impl, I: Instance> Module { /// May be empty. pub fn take_selected( members_len: usize, - pot: BalanceOf + pot: BalanceOf, ) -> Vec>> { let max_members = MaxMembers::::get() as usize; - // No more than 10 will be returned. - let mut max_selections: usize = 10.min(max_members.saturating_sub(members_len)); + let mut max_selections: usize = + (T::MaxCandidateIntake::get() as usize).min(max_members.saturating_sub(members_len)); if max_selections > 0 { // Get the number of left-most bidders whose bids add up to less than `pot`. @@ -1669,7 +1725,7 @@ impl, I: Instance> Module { } } -impl OnUnbalanced> for Module { +impl OnUnbalanced> for Module { fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { let numeric_amount = amount.peek(); diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 212bcfd404ff1..38c2586323135 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,25 +18,35 @@ //! Test utilities use super::*; +use crate as pallet_society; use frame_support::{ - impl_outer_origin, parameter_types, ord_parameter_types, - traits::{OnInitialize, OnFinalize, TestRandomness}, + ord_parameter_types, parameter_types, + traits::{OnFinalize, OnInitialize}, }; +use frame_support_test::TestRandomness; +use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; -use frame_system::EnsureSignedBy; -impl_outer_origin! { - pub enum Origin for Test {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Society: pallet_society::{Pallet, Call, Storage, Event, Config}, + } +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const CandidateDeposit: u64 = 25; pub const WrongSideDeduction: u64 = 2; @@ -45,14 +55,12 @@ parameter_types! { pub const PeriodSpend: u64 = 1000; pub const MaxLockDuration: u64 = 100; pub const ChallengePeriod: u64 = 8; - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: u32 = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const ExistentialDeposit: u64 = 1; - pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); + pub const MaxCandidateIntake: u32 = 10; + pub const SocietyPalletId: PalletId = PalletId(*b"py/socie"); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } ord_parameter_types! { @@ -60,48 +68,48 @@ ord_parameter_types! { pub const SuspensionJudgementSetAccount: u128 = 2; } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = (); + type Call = Call; type Hashing = BlakeTwo256; type AccountId = u128; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type OnNewAccount = (); type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } -impl Trait for Test { - type Event = (); - type Currency = pallet_balances::Module; - type Randomness = TestRandomness; +impl Config for Test { + type Event = Event; + type Currency = pallet_balances::Pallet; + type Randomness = TestRandomness; type CandidateDeposit = CandidateDeposit; type WrongSideDeduction = WrongSideDeduction; type MaxStrikes = MaxStrikes; @@ -112,13 +120,10 @@ impl Trait for Test { type FounderSetOrigin = EnsureSignedBy; type SuspensionJudgementOrigin = EnsureSignedBy; type ChallengePeriod = ChallengePeriod; - type ModuleId = SocietyModuleId; + type MaxCandidateIntake = MaxCandidateIntake; + type PalletId = SocietyPalletId; } -pub type Society = Module; -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; - pub struct EnvBuilder { members: Vec, balance: u64, @@ -151,14 +156,16 @@ impl EnvBuilder { pub fn execute R>(mut self, f: F) -> R { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); self.balances.push((Society::account_id(), self.balance.max(self.pot))); - pallet_balances::GenesisConfig:: { - balances: self.balances, - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::{ + pallet_balances::GenesisConfig:: { balances: self.balances } + .assimilate_storage(&mut t) + .unwrap(); + pallet_society::GenesisConfig:: { members: self.members, pot: self.pot, max_members: self.max_members, - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext: sp_io::TestExternalities = t.into(); ext.execute_with(f) } @@ -205,12 +212,7 @@ pub fn run_to_block(n: u64) { pub fn create_bid( value: Balance, who: AccountId, - kind: BidKind -) -> Bid -{ - Bid { - who, - kind, - value - } + kind: BidKind, +) -> Bid { + Bid { who, kind, value } } diff --git a/frame/society/src/tests.rs b/frame/society/src/tests.rs index 0374c7bcd7a60..9f8e32dea5088 100644 --- a/frame/society/src/tests.rs +++ b/frame/society/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,9 +20,9 @@ use super::*; use mock::*; -use frame_support::{assert_ok, assert_noop}; -use sp_runtime::traits::BadOrigin; +use frame_support::{assert_noop, assert_ok}; use sp_core::blake2_256; +use sp_runtime::traits::BadOrigin; #[test] fn founding_works() { @@ -118,10 +118,13 @@ fn bidding_works() { assert_eq!(Society::pot(), 1000); assert_eq!(Balances::free_balance(Society::account_id()), 10_000); // Choose smallest bidding users whose total is less than pot - assert_eq!(Society::candidates(), vec![ - create_bid(300, 30, BidKind::Deposit(25)), - create_bid(400, 40, BidKind::Deposit(25)), - ]); + assert_eq!( + Society::candidates(), + vec![ + create_bid(300, 30, BidKind::Deposit(25)), + create_bid(400, 40, BidKind::Deposit(25)), + ] + ); // A member votes for these candidates to join the society assert_ok!(Society::vote(Origin::signed(10), 30, true)); assert_ok!(Society::vote(Origin::signed(10), 40, true)); @@ -132,7 +135,7 @@ fn bidding_works() { assert_eq!(Balances::free_balance(Society::account_id()), 9_300); assert_eq!(Society::pot(), 1_300); // Left over from the original bids is 50 who satisfies the condition of bid less than pot. - assert_eq!(Society::candidates(), vec![ create_bid(500, 50, BidKind::Deposit(25)) ]); + assert_eq!(Society::candidates(), vec![create_bid(500, 50, BidKind::Deposit(25))]); // 40, now a member, can vote for 50 assert_ok!(Society::vote(Origin::signed(40), 50, true)); run_to_block(12); @@ -144,7 +147,7 @@ fn bidding_works() { // No more candidates satisfy the requirements assert_eq!(Society::candidates(), vec![]); assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around - // Next period + // Next period run_to_block(16); // Same members assert_eq!(Society::members(), vec![10, 30, 40, 50]); @@ -153,7 +156,7 @@ fn bidding_works() { // No payouts assert_eq!(Balances::free_balance(Society::account_id()), 8_800); // Candidate 60 now qualifies based on the increased pot size. - assert_eq!(Society::candidates(), vec![ create_bid(1900, 60, BidKind::Deposit(25)) ]); + assert_eq!(Society::candidates(), vec![create_bid(1900, 60, BidKind::Deposit(25))]); // Candidate 60 is voted in. assert_ok!(Society::vote(Origin::signed(50), 60, true)); run_to_block(20); @@ -183,7 +186,7 @@ fn unbidding_works() { assert_eq!(Balances::reserved_balance(30), 0); // 20 wins candidacy run_to_block(4); - assert_eq!(Society::candidates(), vec![ create_bid(1000, 20, BidKind::Deposit(25)) ]); + assert_eq!(Society::candidates(), vec![create_bid(1000, 20, BidKind::Deposit(25))]); }); } @@ -350,7 +353,10 @@ fn suspended_candidate_rejected_works() { assert_eq!(Society::suspended_candidate(20).is_some(), true); // Normal user cannot make judgement on suspended candidate - assert_noop!(Society::judge_suspended_candidate(Origin::signed(20), 20, Judgement::Approve), BadOrigin); + assert_noop!( + Society::judge_suspended_candidate(Origin::signed(20), 20, Judgement::Approve), + BadOrigin + ); // Suspension judgement origin makes no direct judgement assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Rebid)); @@ -391,7 +397,10 @@ fn vouch_works() { assert_ok!(Society::vouch(Origin::signed(10), 20, 1000, 100)); assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // A member cannot vouch twice at the same time - assert_noop!(Society::vouch(Origin::signed(10), 30, 100, 0), Error::::AlreadyVouching); + assert_noop!( + Society::vouch(Origin::signed(10), 30, 100, 0), + Error::::AlreadyVouching + ); // Vouching creates the right kind of bid assert_eq!(>::get(), vec![create_bid(1000, 20, BidKind::Vouch(10, 100))]); // Vouched user can become candidate @@ -475,7 +484,10 @@ fn unvouch_works() { assert_eq!(Society::members(), vec![10]); // 10 cannot vouch again - assert_noop!(Society::vouch(Origin::signed(10), 30, 100, 0), Error::::AlreadyVouching); + assert_noop!( + Society::vouch(Origin::signed(10), 30, 100, 0), + Error::::AlreadyVouching + ); // 10 cannot unvouch either, so they are banned forever. assert_noop!(Society::unvouch(Origin::signed(10), 0), Error::::NotVouching); }); @@ -654,7 +666,7 @@ fn bad_vote_slash_works() { assert_eq!(>::get(30), 0); assert_eq!(>::get(40), 0); // Their payout is slashed, a random person is rewarded - assert_eq!(>::get(10), vec![(5, 100), (9,2)]); + assert_eq!(>::get(10), vec![(5, 100), (9, 2)]); assert_eq!(>::get(20), vec![(5, 98)]); assert_eq!(>::get(30), vec![(5, 100)]); assert_eq!(>::get(40), vec![(5, 100)]); @@ -672,7 +684,10 @@ fn user_cannot_bid_twice() { assert_noop!(Society::bid(Origin::signed(30), 100), Error::::AlreadyBid); // Cannot vouch when already bid assert_ok!(Society::add_member(&50)); - assert_noop!(Society::vouch(Origin::signed(50), 20, 100, 100), Error::::AlreadyBid); + assert_noop!( + Society::vouch(Origin::signed(50), 20, 100, 100), + Error::::AlreadyBid + ); }); } @@ -794,7 +809,11 @@ fn max_limits_work() { assert_eq!(Society::candidates().len(), 4); // Fill up members with suspended candidates from the first rotation for i in 100..104 { - assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), i, Judgement::Approve)); + assert_ok!(Society::judge_suspended_candidate( + Origin::signed(2), + i, + Judgement::Approve + )); } assert_eq!(Society::members().len(), 100); // Can't add any more members @@ -840,15 +859,18 @@ fn zero_bid_works() { assert_eq!(Society::pot(), 1000); assert_eq!(Balances::free_balance(Society::account_id()), 10_000); // Choose smallest bidding users whose total is less than pot, with only one zero bid. - assert_eq!(Society::candidates(), vec![ - create_bid(0, 30, BidKind::Deposit(25)), - create_bid(300, 50, BidKind::Deposit(25)), - create_bid(400, 60, BidKind::Deposit(25)), - ]); - assert_eq!(>::get(), vec![ - create_bid(0, 20, BidKind::Deposit(25)), - create_bid(0, 40, BidKind::Deposit(25)), - ]); + assert_eq!( + Society::candidates(), + vec![ + create_bid(0, 30, BidKind::Deposit(25)), + create_bid(300, 50, BidKind::Deposit(25)), + create_bid(400, 60, BidKind::Deposit(25)), + ] + ); + assert_eq!( + >::get(), + vec![create_bid(0, 20, BidKind::Deposit(25)), create_bid(0, 40, BidKind::Deposit(25)),] + ); // A member votes for these candidates to join the society assert_ok!(Society::vote(Origin::signed(10), 30, true)); assert_ok!(Society::vote(Origin::signed(10), 50, true)); @@ -878,7 +900,7 @@ fn bids_ordered_correctly() { for j in 0..5 { for i in 0..5 { - final_list.push(create_bid(j, 100 + (i * 5 + j) as u128, BidKind::Deposit(25))); + final_list.push(create_bid(j, 100 + (i * 5 + j) as u128, BidKind::Deposit(25))); } } diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 88b8c1270a4e1..aba19ba56357a 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,44 +13,47 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -static_assertions = "1.1.0" -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-npos-elections = { version = "2.0.0", default-features = false, path = "../../primitives/npos-elections" } -sp-io ={ version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0", default-features = false, features = ["historical"], path = "../session" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } +serde = { version = "1.0.126", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-session = { version = "4.0.0-dev", default-features = false, features = [ + "historical", +], path = "../session" } +pallet-authorship = { version = "4.0.0-dev", default-features = false, path = "../authorship" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../primitives/application-crypto" } +frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../election-provider-support" } +log = { version = "0.4.14", default-features = false } # Optional imports for benchmarking -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } -sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-timestamp = { version = "2.0.0", path = "../timestamp" } -pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } -frame-benchmarking = { version = "2.0.0", path = "../benchmarking" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } +substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } +frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } +frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } rand_chacha = { version = "0.2" } -parking_lot = "0.10.2" -hex = "0.4" [features] default = ["std"] std = [ "serde", "codec/std", + "scale-info/std", "sp-std/std", - "sp-npos-elections/std", "sp-io/std", "frame-support/std", "sp-runtime/std", @@ -59,8 +62,12 @@ std = [ "frame-system/std", "pallet-authorship/std", "sp-application-crypto/std", + "log/std", + "frame-election-provider-support/std", ] runtime-benchmarks = [ "frame-benchmarking", + "frame-election-provider-support/runtime-benchmarks", "rand_chacha", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/staking/README.md b/frame/staking/README.md index b7b2141e58a5b..072353b1a586c 100644 --- a/frame/staking/README.md +++ b/frame/staking/README.md @@ -2,7 +2,7 @@ The Staking module is used to manage funds at stake by network maintainers. -- [`staking::Trait`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html) +- [`staking::Config`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html) - [`Call`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html) - [`Module`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Module.html) @@ -57,7 +57,7 @@ There are three possible roles that any staked account pair can be in: `Validato and `Idle` (defined in [`StakerStatus`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.StakerStatus.html)). There are three corresponding instructions to change between roles, namely: [`validate`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.validate), -[`nominate`](./enum.Call.html#variant.nominate), and [`chill`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.chill). +[`nominate`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.nominate), and [`chill`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.chill). #### Validating @@ -81,7 +81,7 @@ between the validator and its nominators. This rule incentivizes the nominators the misbehaving/offline validators as much as possible, simply because the nominators will also lose funds if they vote poorly. -An account can become a nominator via the [`nominate`](enum.Call.html#variant.nominate) call. +An account can become a nominator via the [`nominate`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.nominate) call. #### Rewards and Slash @@ -90,7 +90,7 @@ valid behavior_ while _punishing any misbehavior or lack of availability_. Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the -validator as well as its nominators. Only the [`Trait::MaxNominatorRewardedPerValidator`] +validator as well as its nominators. Only the [`Config::MaxNominatorRewardedPerValidator`] biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each nominator's account. @@ -102,7 +102,7 @@ Slashing logic is further described in the documentation of the `slashing` modul Similar to slashing, rewards are also shared among a validator and its associated nominators. Yet, the reward funds are not always transferred to the stash account and can be configured. See -[Reward Calculation](#reward-calculation) for more details. +[Reward Calculation](https://docs.rs/pallet-staking/latest/pallet_staking/#reward-calculation) for more details. #### Chilling @@ -110,7 +110,7 @@ Finally, any of the roles above can choose to step back temporarily and just chi This means that if they are a nominator, they will not be considered as voters anymore and if they are validators, they will no longer be a candidate for the next election. -An account can step back via the [`chill`](enum.Call.html#variant.chill) call. +An account can step back via the [`chill`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.chill) call. ### Session managing @@ -137,10 +137,10 @@ use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; use pallet_staking::{self as staking}; -pub trait Trait: staking::Trait {} +pub trait Config: staking::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { /// Reward a validator. #[weight = 0] pub fn reward_myself(origin) -> dispatch::DispatchResult { @@ -157,7 +157,7 @@ decl_module! { ### Era payout The era payout is computed using yearly inflation curve defined at -[`T::RewardCurve`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html#associatedtype.RewardCurve) as such: +[`T::RewardCurve`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.RewardCurve) as such: ```nocompile staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year @@ -168,7 +168,7 @@ This payout is used to reward stakers as defined in next section remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout ``` The remaining reward is send to the configurable end-point -[`T::RewardRemainder`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Trait.html#associatedtype.RewardRemainder). +[`T::RewardRemainder`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.RewardRemainder). ### Reward Calculation @@ -176,14 +176,14 @@ Validators and nominators are rewarded at the end of each era. The total reward calculated using the era duration and the staking rate (the total amount of tokens staked by nominators and validators, divided by the total token supply). It aims to incentivize toward a defined staking rate. The full specification can be found -[here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model). +[here](https://research.web3.foundation/en/latest/polkadot/economics/1-token-economics.html#inflation-model). Total reward is split among validators and their nominators depending on the number of points they received during the era. Points are added to a validator using [`reward_by_ids`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.reward_by_ids) or [`reward_by_indices`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.reward_by_indices). -[`Module`](./struct.Module.html) implements +[`Module`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Module.html) implements [`pallet_authorship::EventHandler`](https://docs.rs/pallet-authorship/latest/pallet_authorship/trait.EventHandler.html) to add reward points to block producer and block producer of referenced uncles. @@ -198,11 +198,11 @@ validator and all of the nominators that nominated the validator, proportional t staked behind this validator (_i.e._ dividing the [`own`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html#structfield.own) or [`others`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html#structfield.others) by -[`total`](./struct.Exposure.html#structfield.total) in [`Exposure`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html)). +[`total`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html#structfield.total) in [`Exposure`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Exposure.html)). All entities who receive a reward have the option to choose their reward destination through the [`Payee`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.Payee.html) storage item (see -[`set_payee`](enum.Call.html#variant.set_payee)), to be one of the following: +[`set_payee`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.set_payee)), to be one of the following: - Controller account, (obviously) not increasing the staked value. - Stash account, not increasing the staked value. @@ -213,14 +213,14 @@ All entities who receive a reward have the option to choose their reward destina Any funds already placed into stash can be the target of the following operations: The controller account can free a portion (or all) of the funds using the -[`unbond`](enum.Call.html#variant.unbond) call. Note that the funds are not immediately -accessible. Instead, a duration denoted by [`BondingDuration`](https://docs.rs/pallet-staking/latest/pallet_staking/struct.BondingDuration.html) +[`unbond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.unbond) call. Note that the funds are not immediately +accessible. Instead, a duration denoted by [`BondingDuration`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.BondingDuration) (in number of eras) must pass until the funds can actually be removed. Once the `BondingDuration` is over, the [`withdraw_unbonded`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.withdraw_unbonded) call can be used to actually withdraw the funds. Note that there is a limitation to the number of fund-chunks that can be scheduled to be -unlocked in the future via [`unbond`](enum.Call.html#variant.unbond). In case this maximum +unlocked in the future via [`unbond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.unbond). In case this maximum (`MAX_UNLOCKING_CHUNKS`) is reached, the bonded account _must_ first wait until a successful call to `withdraw_unbonded` to remove some of the chunks. @@ -246,4 +246,4 @@ The Staking module depends on the [`GenesisConfig`](https://docs.rs/pallet-staki - [Session](https://docs.rs/pallet-session/latest/pallet_session/): Used to manage sessions. Also, a list of new validators is stored in the Session module's `Validators` at the end of each era. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/staking/fuzzer/.gitignore b/frame/staking/fuzzer/.gitignore deleted file mode 100644 index 3ebcb104d4a50..0000000000000 --- a/frame/staking/fuzzer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -hfuzz_target -hfuzz_workspace diff --git a/frame/staking/fuzzer/Cargo.lock b/frame/staking/fuzzer/Cargo.lock deleted file mode 100644 index e451e12d10131..0000000000000 --- a/frame/staking/fuzzer/Cargo.lock +++ /dev/null @@ -1,2294 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - -[[package]] -name = "ahash" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f33b5018f120946c1dcf279194f238a9f146725593ead1c08fa47ff22b0b5d3" -dependencies = [ - "const-random", -] - -[[package]] -name = "aho-corasick" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" -dependencies = [ - "memchr", -] - -[[package]] -name = "alga" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" -dependencies = [ - "approx", - "num-complex", - "num-traits", -] - -[[package]] -name = "approx" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" -dependencies = [ - "num-traits", -] - -[[package]] -name = "arbitrary" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75153c95fdedd7db9732dfbfc3702324a1627eec91ba56e37cd0ac78314ab2ed" - -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" - -[[package]] -name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" - -[[package]] -name = "autocfg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" - -[[package]] -name = "backtrace" -version = "0.3.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e692897359247cc6bb902933361652380af0f1b7651ae5c5013407f30e109e" -dependencies = [ - "backtrace-sys", - "cfg-if", - "libc", - "rustc-demangle", -] - -[[package]] -name = "backtrace-sys" -version = "0.1.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de8aba10a69c8e8d7622c5710229485ec32e9d55fdad160ea559c086fdcd118" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "base58" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" - -[[package]] -name = "bitmask" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da9b3d9f6f585199287a473f4f8dfab6566cf827d15c00c219f53c645687ead" - -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium", -] - -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - -[[package]] -name = "bumpalo" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" - -[[package]] -name = "byte-slice-cast" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "byteorder" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" - -[[package]] -name = "cc" -version = "1.0.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "clear_on_drop" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97276801e127ffb46b66ce23f35cc96bd454fa311294bced4bbace7baa8b1d17" -dependencies = [ - "cc", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags", -] - -[[package]] -name = "const-random" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f1af9ac737b2dd2d577701e59fd09ba34822f6f2ebdb30a7647405d9e55e16a" -dependencies = [ - "const-random-macro", - "proc-macro-hack", -] - -[[package]] -name = "const-random-macro" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e4c606eb459dd29f7c57b2e0879f2b6f14ee130918c2b78ccb58a9624e6c7a" -dependencies = [ - "getrandom", - "proc-macro-hack", -] - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array", - "subtle 1.0.0", -] - -[[package]] -name = "curve25519-dalek" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" -dependencies = [ - "byteorder", - "digest", - "rand_core 0.5.1", - "subtle 2.2.2", - "zeroize", -] - -[[package]] -name = "derive_more" -version = "0.99.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.0-pre.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" -dependencies = [ - "clear_on_drop", - "curve25519-dalek", - "rand 0.7.3", - "sha2", -] - -[[package]] -name = "either" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" - -[[package]] -name = "environmental" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "516aa8d7a71cb00a1c4146f0798549b93d083d4f189b3ced8f3de6b8f11ee6c4" - -[[package]] -name = "failure" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" -dependencies = [ - "backtrace", - "failure_derive", -] - -[[package]] -name = "failure_derive" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - -[[package]] -name = "fixed-hash" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32529fc42e86ec06e5047092082aab9ad459b070c5d2a76b14f4f5ce70bf2e84" -dependencies = [ - "byteorder", - "rand 0.7.3", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "frame-benchmarking" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "linregress", - "parity-scale-codec", - "sp-api", - "sp-io", - "sp-runtime", - "sp-runtime-interface", - "sp-std", -] - -[[package]] -name = "frame-metadata" -version = "11.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "serde", - "sp-core", - "sp-std", -] - -[[package]] -name = "frame-support" -version = "2.0.0-alpha.5" -dependencies = [ - "bitmask", - "frame-metadata", - "frame-support-procedural", - "impl-trait-for-tuples", - "log", - "once_cell", - "parity-scale-codec", - "paste", - "serde", - "sp-arithmetic", - "sp-core", - "sp-inherents", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std", - "tracing", -] - -[[package]] -name = "frame-support-procedural" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support-procedural-tools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support-procedural-tools-derive", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools-derive" -version = "2.0.0-alpha.5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-system" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "impl-trait-for-tuples", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-version", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "futures" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" - -[[package]] -name = "futures-executor" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-io" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" - -[[package]] -name = "futures-macro" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" - -[[package]] -name = "futures-task" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" - -[[package]] -name = "futures-util" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-utils", - "proc-macro-hack", - "proc-macro-nested", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -dependencies = [ - "typenum", -] - -[[package]] -name = "getrandom" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "hash-db" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" - -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" -dependencies = [ - "ahash", - "autocfg 0.1.7", -] - -[[package]] -name = "heck" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "hermit-abi" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" -dependencies = [ - "libc", -] - -[[package]] -name = "hex" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" - -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac", - "digest", -] - -[[package]] -name = "hmac-drbg" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" -dependencies = [ - "digest", - "generic-array", - "hmac", -] - -[[package]] -name = "impl-codec" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-serde" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e3cae7e99c7ff5a995da2cf78dd0a5383740eda71d98cf7b1910c301ac69b8" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-serde" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bbe9ea9b182f0fb1cabbd61f4ff9b7b7b9197955e95a7e4c27de5055eb29ff8" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "integer-sqrt" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" - -[[package]] -name = "js-sys" -version = "0.3.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "keccak" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" - -[[package]] -name = "libfuzzer-sys" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d718794b8e23533b9069bd2c4597d69e41cc7ab1c02700a502971aca0cdcf24" -dependencies = [ - "arbitrary", - "cc", -] - -[[package]] -name = "libm" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" - -[[package]] -name = "libsecp256k1" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" -dependencies = [ - "arrayref", - "crunchy", - "digest", - "hmac-drbg", - "rand 0.7.3", - "sha2", - "subtle 2.2.2", - "typenum", -] - -[[package]] -name = "linregress" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9290cf6f928576eeb9c096c6fad9d8d452a0a1a70a2bbffa6e36064eedc0aac9" -dependencies = [ - "failure", - "nalgebra", - "statrs", -] - -[[package]] -name = "lock_api" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "matrixmultiply" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4f7ec66360130972f34830bfad9ef05c6610a43938a467bcc9ab9369ab3478f" -dependencies = [ - "rawpointer", -] - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - -[[package]] -name = "memchr" -version = "2.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" - -[[package]] -name = "memory-db" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58381b20ebe2c578e75dececd9da411414903415349548ccc46aac3209cdfbc" -dependencies = [ - "ahash", - "hash-db", - "hashbrown", - "parity-util-mem", -] - -[[package]] -name = "memory_units" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" - -[[package]] -name = "merlin" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "nalgebra" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaa9fddbc34c8c35dd2108515587b8ce0cab396f17977b8c738568e4edb521a2" -dependencies = [ - "alga", - "approx", - "generic-array", - "matrixmultiply", - "num-complex", - "num-rational", - "num-traits", - "rand 0.6.5", - "typenum", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg 1.0.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-complex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" -dependencies = [ - "autocfg 1.0.0", - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" -dependencies = [ - "autocfg 1.0.0", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg 1.0.0", - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" -dependencies = [ - "autocfg 1.0.0", - "libm", -] - -[[package]] -name = "num_cpus" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "once_cell" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" -dependencies = [ - "parking_lot 0.9.0", -] - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "pallet-authorship" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-authorship", - "sp-core", - "sp-inherents", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-balances" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-indices" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-keyring", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-session" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "pallet-timestamp", - "parity-scale-codec", - "serde", - "sp-io", - "sp-runtime", - "sp-staking", - "sp-std", - "sp-trie", -] - -[[package]] -name = "pallet-staking" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-support", - "frame-system", - "pallet-authorship", - "pallet-indices", - "pallet-session", - "parity-scale-codec", - "rand 0.7.3", - "serde", - "sp-application-crypto", - "sp-core", - "sp-io", - "sp-npos-elections", - "sp-runtime", - "sp-staking", - "sp-std", - "static_assertions", -] - -[[package]] -name = "pallet-staking-fuzz" -version = "0.0.0" -dependencies = [ - "frame-support", - "frame-system", - "libfuzzer-sys", - "pallet-balances", - "pallet-indices", - "pallet-session", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-timestamp", - "parity-scale-codec", - "rand 0.7.3", - "sp-core", - "sp-io", - "sp-npos-elections", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-staking-reward-curve" -version = "2.0.0-alpha.5" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pallet-timestamp" -version = "2.0.0-alpha.5" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "parity-scale-codec", - "serde", - "sp-inherents", - "sp-runtime", - "sp-std", - "sp-timestamp", -] - -[[package]] -name = "parity-scale-codec" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "329c8f7f4244ddb5c37c103641027a76c530e65e8e4b8240b29f81ea40508b17" -dependencies = [ - "arrayvec 0.5.1", - "bitvec", - "byte-slice-cast", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0ec292e92e8ec7c58e576adacc1e3f399c597c8f263c42f18420abe58e7245" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-util-mem" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e42755f26e5ea21a6a819d9e63cbd70713e9867a2b767ec2cc65ca7659532c5" -dependencies = [ - "cfg-if", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot 0.10.0", - "primitive-types", - "winapi", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2", - "syn", - "synstructure", -] - -[[package]] -name = "parity-wasm" -version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" - -[[package]] -name = "parking_lot" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -dependencies = [ - "lock_api", - "parking_lot_core 0.6.2", - "rustc_version", -] - -[[package]] -name = "parking_lot" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc" -dependencies = [ - "lock_api", - "parking_lot_core 0.7.0", -] - -[[package]] -name = "parking_lot_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" -dependencies = [ - "cfg-if", - "cloudabi", - "libc", - "redox_syscall", - "rustc_version", - "smallvec 0.6.13", - "winapi", -] - -[[package]] -name = "parking_lot_core" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7582838484df45743c8434fbff785e8edf260c28748353d44bc0da32e0ceabf1" -dependencies = [ - "cfg-if", - "cloudabi", - "libc", - "redox_syscall", - "smallvec 1.2.0", - "winapi", -] - -[[package]] -name = "paste" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "092d791bf7847f70bbd49085489fba25fc2c193571752bff9e36e74e72403932" -dependencies = [ - "paste-impl", - "proc-macro-hack", -] - -[[package]] -name = "paste-impl" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406c23fb4c45cc6f68a9bbabb8ec7bd6f8cfcbd17e9e8f72c2460282f8325729" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pbkdf2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" -dependencies = [ - "byteorder", - "crypto-mac", -] - -[[package]] -name = "pin-utils" -version = "0.1.0-alpha.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" - -[[package]] -name = "ppv-lite86" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" - -[[package]] -name = "primitive-types" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5e4b9943a2da369aec5e96f7c10ebc74fcf434d39590d974b0a3460e6f67fbb" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-serde 0.3.0", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10d4b51f154c8a7fb96fd6dad097cb74b863943ec010ac94b9fd1be8861fe1e" -dependencies = [ - "toml", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfdefadc3d57ca21cf17990a28ef4c0f7c61383a28cb7604cf4a18e6ede1420" - -[[package]] -name = "proc-macro-nested" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" - -[[package]] -name = "proc-macro2" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "quote" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - -[[package]] -name = "rand" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "winapi", -] - -[[package]] -name = "rand" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -dependencies = [ - "autocfg 0.1.7", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.4.2", - "rand_hc 0.1.0", - "rand_isaac", - "rand_jitter", - "rand_os", - "rand_pcg", - "rand_xorshift", - "winapi", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom", -] - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rand_jitter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -dependencies = [ - "libc", - "rand_core 0.4.2", - "winapi", -] - -[[package]] -name = "rand_os" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -dependencies = [ - "cloudabi", - "fuchsia-cprng", - "libc", - "rand_core 0.4.2", - "rdrand", - "winapi", -] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = [ - "autocfg 0.1.7", - "rand_core 0.4.2", -] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "rawpointer" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "redox_syscall" -version = "0.1.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" - -[[package]] -name = "regex" -version = "1.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", - "thread_local", -] - -[[package]] -name = "regex-syntax" -version = "0.6.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" - -[[package]] -name = "rustc-demangle" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - -[[package]] -name = "schnorrkel" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" -dependencies = [ - "arrayref", - "arrayvec 0.5.1", - "curve25519-dalek", - "getrandom", - "merlin", - "rand 0.7.3", - "rand_core 0.5.1", - "sha2", - "subtle 2.2.2", - "zeroize", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - -[[package]] -name = "send_wrapper" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" - -[[package]] -name = "serde" -version = "1.0.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sha2" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" -dependencies = [ - "block-buffer", - "digest", - "fake-simd", - "opaque-debug", -] - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" - -[[package]] -name = "smallvec" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" - -[[package]] -name = "sp-api" -version = "2.0.0-alpha.5" -dependencies = [ - "hash-db", - "parity-scale-codec", - "sp-api-proc-macro", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-version", -] - -[[package]] -name = "sp-api-proc-macro" -version = "2.0.0-alpha.5" -dependencies = [ - "blake2-rfc", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-application-crypto" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-arithmetic" -version = "2.0.0-alpha.5" -dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "serde", - "sp-debug-derive", - "sp-std", -] - -[[package]] -name = "sp-authorship" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "sp-inherents", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-core" -version = "2.0.0-alpha.5" -dependencies = [ - "base58", - "blake2-rfc", - "byteorder", - "ed25519-dalek", - "futures", - "hash-db", - "hash256-std-hasher", - "hex", - "impl-serde 0.3.0", - "lazy_static", - "libsecp256k1", - "log", - "num-traits", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.10.0", - "primitive-types", - "rand 0.7.3", - "regex", - "schnorrkel", - "serde", - "sha2", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std", - "sp-storage", - "substrate-bip39", - "tiny-bip39", - "tiny-keccak", - "twox-hash", - "wasmi", - "zeroize", -] - -[[package]] -name = "sp-debug-derive" -version = "2.0.0-alpha.5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-externalities" -version = "0.8.0-alpha.5" -dependencies = [ - "environmental", - "sp-std", - "sp-storage", -] - -[[package]] -name = "sp-inherents" -version = "2.0.0-alpha.5" -dependencies = [ - "derive_more", - "parity-scale-codec", - "parking_lot 0.10.0", - "sp-core", - "sp-std", -] - -[[package]] -name = "sp-io" -version = "2.0.0-alpha.5" -dependencies = [ - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "sp-core", - "sp-externalities", - "sp-runtime-interface", - "sp-state-machine", - "sp-std", - "sp-trie", - "sp-wasm-interface", -] - -[[package]] -name = "sp-keyring" -version = "2.0.0-alpha.5" -dependencies = [ - "lazy_static", - "sp-core", - "sp-runtime", - "strum", -] - -[[package]] -name = "sp-panic-handler" -version = "2.0.0-alpha.5" -dependencies = [ - "backtrace", - "log", -] - -[[package]] -name = "sp-npos-elections" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "serde", - "sp-npos-elections-compact", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-npos-elections-compact" -version = "2.0.0-rc3" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-runtime" -version = "2.0.0-alpha.5" -dependencies = [ - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "parity-util-mem", - "paste", - "rand 0.7.3", - "serde", - "sp-application-crypto", - "sp-arithmetic", - "sp-core", - "sp-inherents", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-runtime-interface" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "primitive-types", - "sp-externalities", - "sp-runtime-interface-proc-macro", - "sp-std", - "sp-wasm-interface", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "2.0.0-alpha.5" -dependencies = [ - "Inflector", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-staking" -version = "2.0.0-alpha.5" -dependencies = [ - "parity-scale-codec", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-state-machine" -version = "0.8.0-alpha.5" -dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot 0.10.0", - "rand 0.7.3", - "sp-core", - "sp-externalities", - "sp-panic-handler", - "sp-trie", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-std" -version = "2.0.0-alpha.5" - -[[package]] -name = "sp-storage" -version = "2.0.0-alpha.5" -dependencies = [ - "impl-serde 0.2.3", - "serde", - "sp-debug-derive", - "sp-std", -] - -[[package]] -name = "sp-timestamp" -version = "2.0.0-alpha.5" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-api", - "sp-inherents", - "sp-runtime", - "sp-std", - "wasm-timer", -] - -[[package]] -name = "sp-trie" -version = "2.0.0-alpha.5" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "sp-core", - "sp-std", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-version" -version = "2.0.0-alpha.5" -dependencies = [ - "impl-serde 0.2.3", - "parity-scale-codec", - "serde", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-wasm-interface" -version = "2.0.0-alpha.5" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-std", - "wasmi", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "statrs" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10102ac8d55e35db2b3fafc26f81ba8647da2e15879ab686a67e6d19af2685e8" -dependencies = [ - "rand 0.5.6", -] - -[[package]] -name = "strum" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6138f8f88a16d90134763314e3fc76fa3ed6a7db4725d6acf9a3ef95a3188d22" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "substrate-bip39" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c004e8166d6e0aa3a9d5fa673e5b7098ff25f930de1013a21341988151e681bb" -dependencies = [ - "hmac", - "pbkdf2", - "schnorrkel", - "sha2", -] - -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - -[[package]] -name = "subtle" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" - -[[package]] -name = "syn" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "synstructure" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", -] - -[[package]] -name = "thread_local" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "tiny-bip39" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" -dependencies = [ - "failure", - "hmac", - "once_cell", - "pbkdf2", - "rand 0.7.3", - "rustc-hash", - "sha2", - "unicode-normalization", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2953ca5148619bc99695c1274cb54c5275bbb913c6adad87e72eaf8db9787f69" -dependencies = [ - "crunchy", -] - -[[package]] -name = "toml" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" -dependencies = [ - "serde", -] - -[[package]] -name = "tracing" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1721cc8cf7d770cc4257872507180f35a4797272f5962f24c806af9e7faf52ab" -dependencies = [ - "cfg-if", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fbad39da2f9af1cae3016339ad7f2c7a9e870f12e8fd04c4fd7ef35b30c0d2b" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa83a9a47081cd522c09c81b31aec2c9273424976f922ad61c053b58350b715" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "trie-db" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de9222c50cc325855621271157c973da27a0dcd26fa06f8edf81020bd2333df0" -dependencies = [ - "hash-db", - "hashbrown", - "log", - "rustc-hex", - "smallvec 1.2.0", -] - -[[package]] -name = "trie-root" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" -dependencies = [ - "hash-db", -] - -[[package]] -name = "twox-hash" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" -dependencies = [ - "rand 0.7.3", -] - -[[package]] -name = "typenum" -version = "1.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" - -[[package]] -name = "uint" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75a4cdd7b87b28840dba13c483b9a88ee6bbf16ba5c951ee1ecfcf723078e0d" -dependencies = [ - "byteorder", - "crunchy", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" -dependencies = [ - "smallvec 1.2.0", -] - -[[package]] -name = "unicode-segmentation" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" - -[[package]] -name = "unicode-xid" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasm-bindgen" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639" - -[[package]] -name = "wasm-timer" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" -dependencies = [ - "futures", - "js-sys", - "parking_lot 0.9.0", - "pin-utils", - "send_wrapper", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "wasmi" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" -dependencies = [ - "libc", - "memory_units", - "num-rational", - "num-traits", - "parity-wasm", - "wasmi-validation", -] - -[[package]] -name = "wasmi-validation" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "web-sys" -version = "0.3.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "zeroize" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] diff --git a/frame/staking/fuzzer/Cargo.toml b/frame/staking/fuzzer/Cargo.toml deleted file mode 100644 index e1431aa54d4a7..0000000000000 --- a/frame/staking/fuzzer/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "pallet-staking-fuzz" -version = "0.0.0" -authors = ["Automatically generated"] -publish = false -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "FRAME pallet staking fuzzing" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -honggfuzz = "0.5" -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -pallet-staking = { version = "2.0.0", path = "..", features = ["runtime-benchmarks"] } -pallet-staking-reward-curve = { version = "2.0.0", path = "../reward-curve" } -pallet-session = { version = "2.0.0", path = "../../session" } -pallet-indices = { version = "2.0.0", path = "../../indices" } -pallet-balances = { version = "2.0.0", path = "../../balances" } -pallet-timestamp = { version = "2.0.0", path = "../../timestamp" } -frame-system = { version = "2.0.0", path = "../../system" } -frame-support = { version = "2.0.0", path = "../../support" } -sp-std = { version = "2.0.0", path = "../../../primitives/std" } -sp-io ={ version = "2.0.0", path = "../../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-npos-elections = { version = "2.0.0", path = "../../../primitives/npos-elections" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } - -[[bin]] -name = "submit_solution" -path = "src/submit_solution.rs" diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index 96df7674e9f44..921e0d3b48d7d 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,39 +17,36 @@ //! Mock file for staking fuzzing. -use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types}; +use frame_support::parameter_types; type AccountId = u64; type AccountIndex = u32; type BlockNumber = u64; type Balance = u64; -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; -pub type Staking = pallet_staking::Module; -pub type Indices = pallet_indices::Module; -pub type Session = pallet_session::Module; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - staking::Staking, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event, ValidateUnsigned}, + Indices: pallet_indices::{Pallet, Call, Storage, Config, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, } -} - -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; +); -impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); + type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; type Call = Call; @@ -58,33 +55,34 @@ impl frame_system::Trait for Test { type AccountId = AccountId; type Lookup = Indices; type Header = sp_runtime::testing::Header; - type Event = (); + type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnKilledAccount = (Balances,); + type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = (); + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } -impl pallet_indices::Trait for Test { +impl pallet_indices::Config for Test { type AccountIndex = AccountIndex; - type Event = (); + type Event = Event; type Currency = Balances; type Deposit = (); type WeightInfo = (); @@ -92,13 +90,13 @@ impl pallet_indices::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -124,13 +122,13 @@ impl pallet_session::SessionHandler for TestSessionHandler { fn on_disabled(_: usize) {} } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; type SessionHandler = TestSessionHandler; - type Event = (); + type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type DisabledValidatorsThreshold = (); @@ -154,19 +152,35 @@ parameter_types! { pub type Extrinsic = sp_runtime::testing::TestXt; -impl frame_system::offchain::SendTransactionTypes for Test where +impl frame_system::offchain::SendTransactionTypes for Test +where Call: From, { type OverarchingCall = Call; type Extrinsic = Extrinsic; } -impl pallet_staking::Trait for Test { +pub struct MockElectionProvider; +impl frame_election_provider_support::ElectionProvider + for MockElectionProvider +{ + type Error = (); + type DataProvider = pallet_staking::Module; + + fn elect() -> Result< + (sp_npos_elections::Supports, frame_support::weights::Weight), + Self::Error + > { + Err(()) + } +} + +impl pallet_staking::Config for Test { type Currency = Balances; - type UnixTime = pallet_timestamp::Module; + type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type Event = (); + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -174,7 +188,7 @@ impl pallet_staking::Trait for Test { type SlashCancelOrigin = frame_system::EnsureRoot; type BondingDuration = (); type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type ElectionLookahead = (); type Call = Call; @@ -184,4 +198,5 @@ impl pallet_staking::Trait for Test { type UnsignedPriority = (); type OffchainSolutionWeightLimit = (); type WeightInfo = (); + type ElectionProvider = MockElectionProvider; } diff --git a/frame/staking/fuzzer/src/submit_solution.rs b/frame/staking/fuzzer/src/submit_solution.rs deleted file mode 100644 index 4f85066f7f66a..0000000000000 --- a/frame/staking/fuzzer/src/submit_solution.rs +++ /dev/null @@ -1,182 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Fuzzing for staking pallet. -//! -//! HFUZZ_RUN_ARGS="-n 8" cargo hfuzz run submit_solution - -use honggfuzz::fuzz; - -use mock::Test; -use pallet_staking::testing_utils::*; -use frame_support::{assert_ok, storage::StorageValue, traits::UnfilteredDispatchable}; -use frame_system::RawOrigin; -use sp_runtime::DispatchError; -use sp_core::offchain::{testing::TestOffchainExt, OffchainExt}; -use pallet_staking::{EraElectionStatus, ElectionStatus, Module as Staking, Call as StakingCall}; - -mod mock; - -#[repr(u32)] -#[allow(dead_code)] -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum Mode { - /// Initial submission. This will be rather cheap. - InitialSubmission, - /// A better submission that will replace the previous ones. This is the most expensive. - StrongerSubmission, - /// A weak submission that will be rejected. This will be rather cheap. - WeakerSubmission, -} - -pub fn new_test_ext(iterations: u32) -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = frame_system::GenesisConfig::default() - .build_storage::() - .map(Into::into) - .expect("Failed to create test externalities."); - - let (offchain, offchain_state) = TestOffchainExt::new(); - - let mut seed = [0u8; 32]; - seed[0..4].copy_from_slice(&iterations.to_le_bytes()); - offchain_state.write().seed = seed; - - ext.register_extension(OffchainExt::new(offchain)); - - ext -} - -fn main() { - let to_range = |x: u32, a: u32, b: u32| { - let collapsed = x % b; - if collapsed >= a { - collapsed - } else { - collapsed + a - } - }; - loop { - fuzz!(|data: (u32, u32, u32, u32, u32)| { - let (mut num_validators, mut num_nominators, mut edge_per_voter, mut to_elect, mode_u32) = data; - // always run with 5 iterations. - let mut ext = new_test_ext(5); - let mode: Mode = unsafe { std::mem::transmute(mode_u32) }; - num_validators = to_range(num_validators, 50, 1000); - num_nominators = to_range(num_nominators, 50, 2000); - edge_per_voter = to_range(edge_per_voter, 1, 16); - to_elect = to_range(to_elect, 20, num_validators); - - let do_reduce = true; - - println!("+++ instance with params {} / {} / {} / {} / {:?}({})", - num_nominators, - num_validators, - edge_per_voter, - to_elect, - mode, - mode_u32, - ); - - ext.execute_with(|| { - // initial setup - init_active_era(); - - assert_ok!(create_validators_with_nominators_for_era::( - num_validators, - num_nominators, - edge_per_voter as usize, - true, - None, - )); - - >::put(ElectionStatus::Open(1)); - assert!(>::create_stakers_snapshot().0); - - let origin = RawOrigin::Signed(create_funded_user::("fuzzer", 0, 100)); - - // stuff to submit - let (winners, compact, score, size) = match mode { - Mode::InitialSubmission => { - // No need to setup anything - get_seq_phragmen_solution::(do_reduce) - }, - Mode::StrongerSubmission => { - let (winners, compact, score, size) = get_weak_solution::(false); - println!("Weak on chain score = {:?}", score); - assert_ok!( - >::submit_election_solution( - origin.clone().into(), - winners, - compact, - score, - current_era::(), - size, - ) - ); - get_seq_phragmen_solution::(do_reduce) - }, - Mode::WeakerSubmission => { - let (winners, compact, score, size) = get_seq_phragmen_solution::(do_reduce); - println!("Strong on chain score = {:?}", score); - assert_ok!( - >::submit_election_solution( - origin.clone().into(), - winners, - compact, - score, - current_era::(), - size, - ) - ); - get_weak_solution::(false) - } - }; - - // must have chosen correct number of winners. - assert_eq!(winners.len() as u32, >::validator_count()); - - // final call and origin - let call = StakingCall::::submit_election_solution( - winners, - compact, - score, - current_era::(), - size, - ); - - // actually submit - match mode { - Mode::WeakerSubmission => { - assert_eq!( - call.dispatch_bypass_filter(origin.into()).unwrap_err().error, - DispatchError::Module { - index: 0, - error: 16, - message: Some("OffchainElectionWeakSubmission"), - }, - ); - }, - // NOTE: so exhaustive pattern doesn't work here.. maybe some rust issue? - // or due to `#[repr(u32)]`? - Mode::InitialSubmission | Mode::StrongerSubmission => { - assert_ok!(call.dispatch_bypass_filter(origin.into())); - } - }; - }) - }); - } -} diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index 19f7e51b8f6ce..4cbc2473cb526 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking-reward-curve" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,10 +15,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.7", features = ["full", "visit"] } +syn = { version = "1.0.58", features = ["full", "visit"] } quote = "1.0.3" -proc-macro2 = "1.0.6" -proc-macro-crate = "0.1.4" +proc-macro2 = "1.0.29" +proc-macro-crate = "1.0.0" [dev-dependencies] -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index 275669fe26b3b..06e35d11350e0 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +21,8 @@ mod log; use log::log2; use proc_macro::TokenStream; -use proc_macro2::{TokenStream as TokenStream2, Span}; -use proc_macro_crate::crate_name; +use proc_macro2::{Span, TokenStream as TokenStream2}; +use proc_macro_crate::{crate_name, FoundCrate}; use quote::{quote, ToTokens}; use std::convert::TryInto; use syn::parse::{Parse, ParseStream}; @@ -32,28 +32,27 @@ use syn::parse::{Parse, ParseStream}; /// [here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model)) /// for those parameters. Parameters are: /// - `min_inflation`: the minimal amount to be rewarded between validators, expressed as a fraction -/// of total issuance. Known as `I_0` in the literature. -/// Expressed in millionth, must be between 0 and 1_000_000. +/// of total issuance. Known as `I_0` in the literature. Expressed in millionth, must be between 0 +/// and 1_000_000. /// /// - `max_inflation`: the maximum amount to be rewarded between validators, expressed as a fraction -/// of total issuance. This is attained only when `ideal_stake` is achieved. -/// Expressed in millionth, must be between min_inflation and 1_000_000. +/// of total issuance. This is attained only when `ideal_stake` is achieved. Expressed in +/// millionth, must be between min_inflation and 1_000_000. /// /// - `ideal_stake`: the fraction of total issued tokens that should be actively staked behind -/// validators. Known as `x_ideal` in the literature. -/// Expressed in millionth, must be between 0_100_000 and 0_900_000. +/// validators. Known as `x_ideal` in the literature. Expressed in millionth, must be between +/// 0_100_000 and 0_900_000. /// /// - `falloff`: Known as `decay_rate` in the literature. A co-efficient dictating the strength of /// the global incentivization to get the `ideal_stake`. A higher number results in less typical -/// inflation at the cost of greater volatility for validators. -/// Expressed in millionth, must be between 0 and 1_000_000. +/// inflation at the cost of greater volatility for validators. Expressed in millionth, must be +/// between 0 and 1_000_000. /// /// - `max_piece_count`: The maximum number of pieces in the curve. A greater number uses more -/// resources but results in higher accuracy. -/// Must be between 2 and 1_000. +/// resources but results in higher accuracy. Must be between 2 and 1_000. /// -/// - `test_precision`: The maximum error allowed in the generated test. -/// Expressed in millionth, must be between 0 and 1_000_000. +/// - `test_precision`: The maximum error allowed in the generated test. Expressed in millionth, +/// must be between 0 and 1_000_000. /// /// # Example /// @@ -62,14 +61,14 @@ use syn::parse::{Parse, ParseStream}; /// use sp_runtime::curve::PiecewiseLinear; /// /// pallet_staking_reward_curve::build! { -/// const I_NPOS: PiecewiseLinear<'static> = curve!( -/// min_inflation: 0_025_000, -/// max_inflation: 0_100_000, -/// ideal_stake: 0_500_000, -/// falloff: 0_050_000, -/// max_piece_count: 40, -/// test_precision: 0_005_000, -/// ); +/// const I_NPOS: PiecewiseLinear<'static> = curve!( +/// min_inflation: 0_025_000, +/// max_inflation: 0_100_000, +/// ideal_stake: 0_500_000, +/// falloff: 0_050_000, +/// max_piece_count: 40, +/// test_precision: 0_005_000, +/// ); /// } /// ``` #[proc_macro] @@ -82,11 +81,14 @@ pub fn build(input: TokenStream) -> TokenStream { let test_module = generate_test_module(&input); let imports = match crate_name("sp-runtime") { - Ok(sp_runtime) => { + Ok(FoundCrate::Itself) => quote!( + extern crate sp_runtime as _sp_runtime; + ), + Ok(FoundCrate::Name(sp_runtime)) => { let ident = syn::Ident::new(&sp_runtime, Span::call_site()); quote!( extern crate #ident as _sp_runtime; ) }, - Err(e) => syn::Error::new(Span::call_site(), &e).to_compile_error(), + Err(e) => syn::Error::new(Span::call_site(), e).to_compile_error(), }; let const_name = input.ident; @@ -98,7 +100,8 @@ pub fn build(input: TokenStream) -> TokenStream { #declaration }; #test_module - ).into() + ) + .into() } const MILLION: u32 = 1_000_000; @@ -133,10 +136,10 @@ struct Bounds { impl Bounds { fn check(&self, value: u32) -> bool { - let wrong = (self.min_strict && value <= self.min) - || (!self.min_strict && value < self.min) - || (self.max_strict && value >= self.max) - || (!self.max_strict && value > self.max); + let wrong = (self.min_strict && value <= self.min) || + (!self.min_strict && value < self.min) || + (self.max_strict && value >= self.max) || + (!self.max_strict && value > self.max); !wrong } @@ -155,17 +158,24 @@ impl core::fmt::Display for Bounds { } } -fn parse_field(input: ParseStream, bounds: Bounds) - -> syn::Result -{ - ::parse(&input)?; - ::parse(&input)?; - let value_lit = syn::LitInt::parse(&input)?; +fn parse_field( + input: ParseStream, + bounds: Bounds, +) -> syn::Result { + ::parse(input)?; + ::parse(input)?; + let value_lit = syn::LitInt::parse(input)?; let value: u32 = value_lit.base10_parse()?; if !bounds.check(value) { - return Err(syn::Error::new(value_lit.span(), format!( - "Invalid {}: {}, must be in {}", Token::default().to_token_stream(), value, bounds, - ))); + return Err(syn::Error::new( + value_lit.span(), + format!( + "Invalid {}: {}, must be in {}", + Token::default().to_token_stream(), + value, + bounds, + ), + )) } Ok(value) @@ -175,65 +185,53 @@ impl Parse for INposInput { fn parse(input: ParseStream) -> syn::Result { let args_input; - ::parse(&input)?; - let ident = ::parse(&input)?; - ::parse(&input)?; - let typ = ::parse(&input)?; - ::parse(&input)?; - ::parse(&input)?; - ::parse(&input)?; + ::parse(input)?; + let ident = ::parse(input)?; + ::parse(input)?; + let typ = ::parse(input)?; + ::parse(input)?; + ::parse(input)?; + ::parse(input)?; syn::parenthesized!(args_input in input); - ::parse(&input)?; + ::parse(input)?; if !input.is_empty() { - return Err(input.error("expected end of input stream, no token expected")); + return Err(input.error("expected end of input stream, no token expected")) } - let min_inflation = parse_field::(&args_input, Bounds { - min: 0, - min_strict: true, - max: 1_000_000, - max_strict: false, - })?; + let min_inflation = parse_field::( + &args_input, + Bounds { min: 0, min_strict: true, max: 1_000_000, max_strict: false }, + )?; ::parse(&args_input)?; - let max_inflation = parse_field::(&args_input, Bounds { - min: min_inflation, - min_strict: true, - max: 1_000_000, - max_strict: false, - })?; + let max_inflation = parse_field::( + &args_input, + Bounds { min: min_inflation, min_strict: true, max: 1_000_000, max_strict: false }, + )?; ::parse(&args_input)?; - let ideal_stake = parse_field::(&args_input, Bounds { - min: 0_100_000, - min_strict: false, - max: 0_900_000, - max_strict: false, - })?; + let ideal_stake = parse_field::( + &args_input, + Bounds { min: 0_100_000, min_strict: false, max: 0_900_000, max_strict: false }, + )?; ::parse(&args_input)?; - let falloff = parse_field::(&args_input, Bounds { - min: 0_010_000, - min_strict: false, - max: 1_000_000, - max_strict: false, - })?; + let falloff = parse_field::( + &args_input, + Bounds { min: 0_010_000, min_strict: false, max: 1_000_000, max_strict: false }, + )?; ::parse(&args_input)?; - let max_piece_count = parse_field::(&args_input, Bounds { - min: 2, - min_strict: false, - max: 1_000, - max_strict: false, - })?; + let max_piece_count = parse_field::( + &args_input, + Bounds { min: 2, min_strict: false, max: 1_000, max_strict: false }, + )?; ::parse(&args_input)?; - let test_precision = parse_field::(&args_input, Bounds { - min: 0, - min_strict: false, - max: 1_000_000, - max_strict: false, - })?; + let test_precision = parse_field::( + &args_input, + Bounds { min: 0, min_strict: false, max: 1_000_000, max_strict: false }, + )?; >::parse(&args_input)?; if !args_input.is_empty() { - return Err(args_input.error("expected end of input stream, no token expected")); + return Err(args_input.error("expected end of input stream, no token expected")) } Ok(Self { @@ -262,7 +260,8 @@ impl INPoS { INPoS { i_0: input.min_inflation, i_ideal: (input.max_inflation as u64 * MILLION as u64 / input.ideal_stake as u64) - .try_into().unwrap(), + .try_into() + .unwrap(), i_ideal_times_x_ideal: input.max_inflation, x_ideal: input.ideal_stake, d: input.falloff, @@ -274,7 +273,7 @@ impl INPoS { // See web3 docs for the details fn compute_opposite_after_x_ideal(&self, y: u32) -> u32 { if y == self.i_0 { - return u32::max_value(); + return u32::MAX } // Note: the log term calculated here represents a per_million value let log = log2(self.i_ideal_times_x_ideal - self.i_0, y - self.i_0); @@ -288,14 +287,12 @@ impl INPoS { fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { let inpos = INPoS::from_input(input); - let mut points = vec![]; - points.push((0, inpos.i_0)); - points.push((inpos.x_ideal, inpos.i_ideal_times_x_ideal)); + let mut points = vec![(0, inpos.i_0), (inpos.x_ideal, inpos.i_ideal_times_x_ideal)]; // For each point p: (next_p.0 - p.0) < segment_length && (next_p.1 - p.1) < segment_length. // This ensures that the total number of segment doesn't overflow max_piece_count. - let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) - / (input.max_piece_count - 1); + let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) / + (input.max_piece_count - 1); let mut delta_y = max_length; let mut y = input.max_inflation; @@ -321,16 +318,15 @@ fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { let prev = points.last().unwrap(); // Compute the y corresponding to x=1_000_000 using the this point and the previous one. - let delta_y: u32 = ( - (next_x - 1_000_000) as u64 - * (prev.1 - next_y) as u64 - / (next_x - prev.0) as u64 - ).try_into().unwrap(); + let delta_y: u32 = ((next_x - 1_000_000) as u64 * (prev.1 - next_y) as u64 / + (next_x - prev.0) as u64) + .try_into() + .unwrap(); let y = next_y + delta_y; points.push((1_000_000, y)); - return points; + return points } points.push((next_x, next_y)); y = next_y; @@ -344,7 +340,8 @@ fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { fn generate_piecewise_linear(points: Vec<(u32, u32)>) -> TokenStream2 { let mut points_tokens = quote!(); - let max = points.iter() + let max = points + .iter() .map(|&(_, x)| x) .max() .unwrap_or(0) @@ -353,13 +350,15 @@ fn generate_piecewise_linear(points: Vec<(u32, u32)>) -> TokenStream2 { .unwrap_or(1_000_000_000); for (x, y) in points { - let error = || panic!(format!( - "Generated reward curve approximation doesn't fit into [0, 1] -> [0, 1] \ - because of point: + let error = || { + panic!( + "Generated reward curve approximation doesn't fit into [0, 1] -> [0, 1] because \ + of point: x = {:07} per million y = {:07} per million", - x, y - )); + x, y + ) + }; let x_perbill = x.checked_mul(1_000).unwrap_or_else(error); let y_perbill = y.checked_mul(1_000).unwrap_or_else(error); @@ -385,7 +384,7 @@ fn generate_test_module(input: &INposInput) -> TokenStream2 { let ident = &input.ident; let precision = input.test_precision; - let i_0 = inpos.i_0 as f64/ MILLION as f64; + let i_0 = inpos.i_0 as f64 / MILLION as f64; let i_ideal_times_x_ideal = inpos.i_ideal_times_x_ideal as f64 / MILLION as f64; let i_ideal = inpos.i_ideal as f64 / MILLION as f64; let x_ideal = inpos.x_ideal as f64 / MILLION as f64; @@ -407,7 +406,7 @@ fn generate_test_module(input: &INposInput) -> TokenStream2 { #[test] fn reward_curve_precision() { - for &base in [MILLION, u32::max_value()].iter() { + for &base in [MILLION, u32::MAX].iter() { let number_of_check = 100_000.min(base); for check_index in 0..=number_of_check { let i = (check_index as u64 * base as u64 / number_of_check as u64) as u32; @@ -420,14 +419,14 @@ fn generate_test_module(input: &INposInput) -> TokenStream2 { / float_res as u64 ) as u32; if err > #precision { - panic!(format!("\n\ + panic!("\n\ Generated reward curve approximation differ from real one:\n\t\ for i = {} and base = {}, f(i/base) * base = {},\n\t\ but approximation = {},\n\t\ err = {:07} millionth,\n\t\ try increase the number of segment: {} or the test_error: {}.\n", i, base, float_res, int_res, err, #max_piece_count, #precision - )); + ); } } } @@ -442,5 +441,5 @@ fn generate_test_module(input: &INposInput) -> TokenStream2 { ); } } - ).into() + ) } diff --git a/frame/staking/reward-curve/src/log.rs b/frame/staking/reward-curve/src/log.rs index 28acd5deed2bb..c196aaaa31a93 100644 --- a/frame/staking/reward-curve/src/log.rs +++ b/frame/staking/reward-curve/src/log.rs @@ -4,7 +4,7 @@ use std::convert::TryInto; macro_rules! pow2 { ($n:expr) => { 1_u32 << $n - } + }; } /// Returns the k_th per_million taylor term for a log2 function @@ -33,7 +33,7 @@ fn taylor_term(k: u32, y_num: u128, y_den: u128) -> u32 { /// * result represents a per-million output of log2 pub fn log2(p: u32, q: u32) -> u32 { assert!(p >= q); // keep p/q bound to [1, inf) - assert!(p <= u32::max_value()/2); + assert!(p <= u32::MAX / 2); // This restriction should not be mandatory. But function is only tested and used for this. assert!(p <= 1_000_000); @@ -46,14 +46,14 @@ pub fn log2(p: u32, q: u32) -> u32 { // find the power of 2 where q * 2^n <= p < q * 2^(n+1) let mut n = 0u32; - while !(p >= pow2!(n) * q) || !(p < pow2!(n + 1) * q) { + while (p < pow2!(n) * q) || (p >= pow2!(n + 1) * q) { n += 1; assert!(n < 32); // cannot represent 2^32 in u32 } assert!(p < pow2!(n + 1) * q); - let y_num: u32 = (p - pow2!(n) * q).try_into().unwrap(); - let y_den: u32 = (p + pow2!(n) * q).try_into().unwrap(); + let y_num: u32 = p - pow2!(n) * q; + let y_den: u32 = p + pow2!(n) * q; // Loop through each Taylor series coefficient until it reaches 10^-6 let mut res = n * 1_000_000u32; @@ -79,7 +79,7 @@ fn test_log() { let p: u32 = (1_000_000 as u64 * p as u64 / div as u64).try_into().unwrap(); let q: u32 = (1_000_000 as u64 * q as u64 / div as u64).try_into().unwrap(); - let res = - (log2(p, q) as i64); + let res = -(log2(p, q) as i64); let expected = ((q as f64 / p as f64).log(2.0) * 1_000_000 as f64).round() as i64; assert!((res - expected).abs() <= 6); } @@ -124,4 +124,4 @@ fn test_log_of_largest_input() { let expected = 19_931_568; let tolerance = 100; assert!((log2(p, q) as i32 - expected as i32).abs() < tolerance); -} \ No newline at end of file +} diff --git a/frame/staking/reward-curve/tests/test.rs b/frame/staking/reward-curve/tests/test.rs index 45ad59e00ad27..fda7df145d0f3 100644 --- a/frame/staking/reward-curve/tests/test.rs +++ b/frame/staking/reward-curve/tests/test.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/staking/reward-fn/Cargo.toml b/frame/staking/reward-fn/Cargo.toml new file mode 100644 index 0000000000000..076e05bf2a61e --- /dev/null +++ b/frame/staking/reward-fn/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "pallet-staking-reward-fn" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Reward function for FRAME staking pallet" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] + +[dependencies] +sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/arithmetic" } +log = { version = "0.4.14", default-features = false } + +[features] +default = ["std"] +std = [ + "sp-arithmetic/std", + "log/std", +] diff --git a/frame/staking/reward-fn/src/lib.rs b/frame/staking/reward-fn/src/lib.rs new file mode 100644 index 0000000000000..dd5e629b3984c --- /dev/null +++ b/frame/staking/reward-fn/src/lib.rs @@ -0,0 +1,227 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Useful function for inflation for nominated proof of stake. + +use core::convert::TryFrom; +use sp_arithmetic::{ + biguint::BigUint, + traits::{SaturatedConversion, Zero}, + PerThing, Perquintill, +}; + +/// Compute yearly inflation using function +/// +/// ```ignore +/// I(x) = for x between 0 and x_ideal: x / x_ideal, +/// for x between x_ideal and 1: 2^((x_ideal - x) / d) +/// ``` +/// +/// where: +/// * x is the stake rate, i.e. fraction of total issued tokens that actively staked behind +/// validators. +/// * d is the falloff or `decay_rate` +/// * x_ideal: the ideal stake rate. +/// +/// The result is meant to be scaled with minimum inflation and maximum inflation. +/// +/// (as detailed +/// [here](https://research.web3.foundation/en/latest/polkadot/economics/1-token-economics.html#inflation-model-with-parachains)) +/// +/// Arguments are: +/// * `stake`: The fraction of total issued tokens that actively staked behind validators. Known as +/// `x` in the literature. Must be between 0 and 1. +/// * `ideal_stake`: The fraction of total issued tokens that should be actively staked behind +/// validators. Known as `x_ideal` in the literature. Must be between 0 and 1. +/// * `falloff`: Known as `decay_rate` in the literature. A co-efficient dictating the strength of +/// the global incentivization to get the `ideal_stake`. A higher number results in less typical +/// inflation at the cost of greater volatility for validators. Must be more than 0.01. +pub fn compute_inflation(stake: P, ideal_stake: P, falloff: P) -> P { + if stake < ideal_stake { + // ideal_stake is more than 0 because it is strictly more than stake + return stake / ideal_stake + } + + if falloff < P::from_percent(1.into()) { + log::error!("Invalid inflation computation: falloff less than 1% is not supported"); + return PerThing::zero() + } + + let accuracy = { + let mut a = BigUint::from(Into::::into(P::ACCURACY)); + a.lstrip(); + a + }; + + let mut falloff = BigUint::from(falloff.deconstruct().into()); + falloff.lstrip(); + + let ln2 = { + /// `ln(2)` expressed in as perquintillionth. + const LN2: u64 = 0_693_147_180_559_945_309; + let ln2 = P::from_rational(LN2.into(), Perquintill::ACCURACY.into()); + BigUint::from(ln2.deconstruct().into()) + }; + + // falloff is stripped above. + let ln2_div_d = div_by_stripped(ln2.mul(&accuracy), &falloff); + + let inpos_param = INPoSParam { + x_ideal: BigUint::from(ideal_stake.deconstruct().into()), + x: BigUint::from(stake.deconstruct().into()), + accuracy, + ln2_div_d, + }; + + let res = compute_taylor_serie_part(&inpos_param); + + match u128::try_from(res.clone()) { + Ok(res) if res <= Into::::into(P::ACCURACY) => P::from_parts(res.saturated_into()), + // If result is beyond bounds there is nothing we can do + _ => { + log::error!("Invalid inflation computation: unexpected result {:?}", res); + P::zero() + }, + } +} + +/// Internal struct holding parameter info alongside other cached value. +/// +/// All expressed in part from `accuracy` +struct INPoSParam { + ln2_div_d: BigUint, + x_ideal: BigUint, + x: BigUint, + /// Must be stripped and have no leading zeros. + accuracy: BigUint, +} + +/// Compute `2^((x_ideal - x) / d)` using taylor serie. +/// +/// x must be strictly more than x_ideal. +/// +/// result is expressed with accuracy `INPoSParam.accuracy` +fn compute_taylor_serie_part(p: &INPoSParam) -> BigUint { + // The last computed taylor term. + let mut last_taylor_term = p.accuracy.clone(); + + // Whereas taylor sum is positive. + let mut taylor_sum_positive = true; + + // The sum of all taylor term. + let mut taylor_sum = last_taylor_term.clone(); + + for k in 1..300 { + last_taylor_term = compute_taylor_term(k, &last_taylor_term, p); + + if last_taylor_term.is_zero() { + break + } + + let last_taylor_term_positive = k % 2 == 0; + + if taylor_sum_positive == last_taylor_term_positive { + taylor_sum = taylor_sum.add(&last_taylor_term); + } else { + if taylor_sum >= last_taylor_term { + taylor_sum = taylor_sum + .sub(&last_taylor_term) + // NOTE: Should never happen as checked above + .unwrap_or_else(|e| e); + } else { + taylor_sum_positive = !taylor_sum_positive; + taylor_sum = last_taylor_term + .clone() + .sub(&taylor_sum) + // NOTE: Should never happen as checked above + .unwrap_or_else(|e| e); + } + } + } + + if !taylor_sum_positive { + return BigUint::zero() + } + + taylor_sum.lstrip(); + taylor_sum +} + +/// Return the absolute value of k-th taylor term of `2^((x_ideal - x))/d` i.e. +/// `((x - x_ideal) * ln(2) / d)^k / k!` +/// +/// x must be strictly more x_ideal. +/// +/// We compute the term from the last term using this formula: +/// +/// `((x - x_ideal) * ln(2) / d)^k / k! == previous_term * (x - x_ideal) * ln(2) / d / k` +/// +/// `previous_taylor_term` and result are expressed with accuracy `INPoSParam.accuracy` +fn compute_taylor_term(k: u32, previous_taylor_term: &BigUint, p: &INPoSParam) -> BigUint { + let x_minus_x_ideal = + p.x.clone() + .sub(&p.x_ideal) + // NOTE: Should never happen, as x must be more than x_ideal + .unwrap_or_else(|_| BigUint::zero()); + + let res = previous_taylor_term.clone().mul(&x_minus_x_ideal).mul(&p.ln2_div_d).div_unit(k); + + // p.accuracy is stripped by definition. + let res = div_by_stripped(res, &p.accuracy); + let mut res = div_by_stripped(res, &p.accuracy); + + res.lstrip(); + res +} + +/// Compute a div b. +/// +/// requires `b` to be stripped and have no leading zeros. +fn div_by_stripped(mut a: BigUint, b: &BigUint) -> BigUint { + a.lstrip(); + + if b.len() == 0 { + log::error!("Computation error: Invalid division"); + return BigUint::zero() + } + + if b.len() == 1 { + return a.div_unit(b.checked_get(0).unwrap_or(1)) + } + + if b.len() > a.len() { + return BigUint::zero() + } + + if b.len() == a.len() { + // 100_000^2 is more than 2^32-1, thus `new_a` has more limbs than `b`. + let mut new_a = a.mul(&BigUint::from(100_000u64.pow(2))); + new_a.lstrip(); + + debug_assert!(new_a.len() > b.len()); + return new_a + .div(b, false) + .map(|res| res.0) + .unwrap_or_else(|| BigUint::zero()) + .div_unit(100_000) + .div_unit(100_000) + } + + a.div(b, false).map(|res| res.0).unwrap_or_else(|| BigUint::zero()) +} diff --git a/frame/staking/reward-fn/tests/test.rs b/frame/staking/reward-fn/tests/test.rs new file mode 100644 index 0000000000000..dc5b661c4098d --- /dev/null +++ b/frame/staking/reward-fn/tests/test.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_arithmetic::{PerThing, PerU16, Perbill, Percent, Perquintill}; + +/// This test the precision and panics if error too big error. +/// +/// error is asserted to be less or equal to 8/accuracy or 8*f64::EPSILON +fn test_precision(stake: P, ideal_stake: P, falloff: P) { + let accuracy_f64 = Into::::into(P::ACCURACY) as f64; + let res = pallet_staking_reward_fn::compute_inflation(stake, ideal_stake, falloff); + let res = Into::::into(res.deconstruct()) as f64 / accuracy_f64; + + let expect = float_i_npos(stake, ideal_stake, falloff); + + let error = (res - expect).abs(); + + if error > 8f64 / accuracy_f64 && error > 8.0 * f64::EPSILON { + panic!( + "stake: {:?}, ideal_stake: {:?}, falloff: {:?}, res: {}, expect: {}", + stake, ideal_stake, falloff, res, expect + ); + } +} + +/// compute the inflation using floats +fn float_i_npos(stake: P, ideal_stake: P, falloff: P) -> f64 { + let accuracy_f64 = Into::::into(P::ACCURACY) as f64; + + let ideal_stake = Into::::into(ideal_stake.deconstruct()) as f64 / accuracy_f64; + let stake = Into::::into(stake.deconstruct()) as f64 / accuracy_f64; + let falloff = Into::::into(falloff.deconstruct()) as f64 / accuracy_f64; + + let x_ideal = ideal_stake; + let x = stake; + let d = falloff; + + if x < x_ideal { + x / x_ideal + } else { + 2_f64.powf((x_ideal - x) / d) + } +} + +#[test] +fn test_precision_for_minimum_falloff() { + fn test_falloff_precision_for_minimum_falloff() { + for stake in 0..1_000 { + let stake = P::from_rational(stake, 1_000); + let ideal_stake = P::zero(); + let falloff = P::from_rational(1, 100); + test_precision(stake, ideal_stake, falloff); + } + } + + test_falloff_precision_for_minimum_falloff::(); + + test_falloff_precision_for_minimum_falloff::(); + + test_falloff_precision_for_minimum_falloff::(); + + test_falloff_precision_for_minimum_falloff::(); +} + +#[test] +fn compute_inflation_works() { + fn compute_inflation_works() { + for stake in 0..100 { + for ideal_stake in 0..10 { + for falloff in 1..10 { + let stake = P::from_rational(stake, 100); + let ideal_stake = P::from_rational(ideal_stake, 10); + let falloff = P::from_rational(falloff, 100); + test_precision(stake, ideal_stake, falloff); + } + } + } + } + + compute_inflation_works::(); + + compute_inflation_works::(); + + compute_inflation_works::(); + + compute_inflation_works::(); +} diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index e9467fa50be15..bdc3d81f3c29b 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,48 +18,67 @@ //! Staking pallet benchmarking. use super::*; -use crate::Module as Staking; +use crate::Pallet as Staking; use testing_utils::*; -use sp_runtime::traits::One; +use frame_support::{ + pallet_prelude::*, + traits::{Currency, Get, Imbalance}, +}; +use sp_runtime::{ + traits::{StaticLookup, Zero}, + Perbill, Percent, +}; +use sp_staking::SessionIndex; +use sp_std::prelude::*; + +pub use frame_benchmarking::{ + account, benchmarks, impl_benchmark_test_suite, whitelist_account, whitelisted_caller, +}; use frame_system::RawOrigin; -pub use frame_benchmarking::{benchmarks, account, whitelisted_caller, whitelist_account}; +use sp_runtime::traits::{Bounded, One}; + const SEED: u32 = 0; const MAX_SPANS: u32 = 100; const MAX_VALIDATORS: u32 = 1000; +const MAX_NOMINATORS: u32 = 1000; const MAX_SLASHES: u32 = 1000; // Add slashing spans to a user account. Not relevant for actual use, only to benchmark // read and write operations. -fn add_slashing_spans(who: &T::AccountId, spans: u32) { - if spans == 0 { return } +fn add_slashing_spans(who: &T::AccountId, spans: u32) { + if spans == 0 { + return + } // For the first slashing span, we initialize let mut slashing_spans = crate::slashing::SlashingSpans::new(0); SpanSlash::::insert((who, 0), crate::slashing::SpanRecord::default()); - for i in 1 .. spans { + for i in 1..spans { assert!(slashing_spans.end_span(i)); SpanSlash::::insert((who, i), crate::slashing::SpanRecord::default()); } SlashingSpans::::insert(who, slashing_spans); } -// This function generates one validator being nominated by n nominators, and returns the validator -// stash account and the nominators' stash and controller. It also starts an era and creates pending payouts. -pub fn create_validator_with_nominators( +// This function clears all existing validators and nominators from the set, and generates one new +// validator being nominated by n nominators, and returns the validator stash account and the +// nominators' stash and controller. It also starts an era and creates pending payouts. +pub fn create_validator_with_nominators( n: u32, upper_bound: u32, dead: bool, - destination: RewardDestination + destination: RewardDestination, ) -> Result<(T::AccountId, Vec<(T::AccountId, T::AccountId)>), &'static str> { + // Clean up any existing state. + clear_validators_and_nominators::(); let mut points_total = 0; let mut points_individual = Vec::new(); let (v_stash, v_controller) = create_stash_controller::(0, 100, destination.clone())?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - }; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller).into(), validator_prefs)?; let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); @@ -69,25 +88,28 @@ pub fn create_validator_with_nominators( let mut nominators = Vec::new(); // Give the validator n nominators, but keep total users in the system the same. - for i in 0 .. upper_bound { + for i in 0..upper_bound { let (n_stash, n_controller) = if !dead { - create_stash_controller::(u32::max_value() - i, 100, destination.clone())? + create_stash_controller::(u32::MAX - i, 100, destination.clone())? } else { - create_stash_and_dead_controller::(u32::max_value() - i, 100, destination.clone())? + create_stash_and_dead_controller::(u32::MAX - i, 100, destination.clone())? }; if i < n { - Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), vec![stash_lookup.clone()])?; + Staking::::nominate( + RawOrigin::Signed(n_controller.clone()).into(), + vec![stash_lookup.clone()], + )?; nominators.push((n_stash, n_controller)); } } - ValidatorCount::put(1); + ValidatorCount::::put(1); // Start a new Era - let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + let new_validators = Staking::::try_trigger_new_era(SessionIndex::one(), true).unwrap(); - assert!(new_validators.len() == 1); - assert!(new_validators[0] == v_stash, "Our validator was not selected!"); + assert_eq!(new_validators.len(), 1); + assert_eq!(new_validators[0], v_stash, "Our validator was not selected!"); // Give Era Points let reward = EraRewardPoints:: { @@ -95,13 +117,13 @@ pub fn create_validator_with_nominators( individual: points_individual.into_iter().collect(), }; - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); ErasRewardPoints::::insert(current_era, reward); // Create reward pool let total_payout = T::Currency::minimum_balance() .saturating_mul(upper_bound.into()) - .saturating_mul(1000.into()); + .saturating_mul(1000u32.into()); >::insert(current_era, total_payout); Ok((v_stash, nominators)) @@ -110,14 +132,12 @@ pub fn create_validator_with_nominators( const USER_SEED: u32 = 999666; benchmarks! { - _{} - bond { let stash = create_funded_user::("stash", USER_SEED, 100); let controller = create_funded_user::("controller", USER_SEED, 100); let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); let reward_destination = RewardDestination::Staked; - let amount = T::Currency::minimum_balance() * 10.into(); + let amount = T::Currency::minimum_balance() * 10u32.into(); whitelist_account!(stash); }: _(RawOrigin::Signed(stash.clone()), controller_lookup, amount, reward_destination) verify { @@ -127,7 +147,7 @@ benchmarks! { bond_extra { let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; - let max_additional = T::Currency::minimum_balance() * 10.into(); + let max_additional = T::Currency::minimum_balance() * 10u32.into(); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_bonded: BalanceOf = ledger.active; whitelist_account!(stash); @@ -140,7 +160,7 @@ benchmarks! { unbond { let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; - let amount = T::Currency::minimum_balance() * 10.into(); + let amount = T::Currency::minimum_balance() * 10u32.into(); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_bonded: BalanceOf = ledger.active; whitelist_account!(controller); @@ -157,9 +177,9 @@ benchmarks! { let s in 0 .. MAX_SPANS; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; add_slashing_spans::(&stash, s); - let amount = T::Currency::minimum_balance() * 5.into(); // Half of total + let amount = T::Currency::minimum_balance() * 5u32.into(); // Half of total Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; - CurrentEra::put(EraIndex::max_value()); + CurrentEra::::put(EraIndex::max_value()); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_total: BalanceOf = ledger.total; whitelist_account!(controller); @@ -176,9 +196,9 @@ benchmarks! { let s in 0 .. MAX_SPANS; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; add_slashing_spans::(&stash, s); - let amount = T::Currency::minimum_balance() * 10.into(); + let amount = T::Currency::minimum_balance() * 10u32.into(); Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; - CurrentEra::put(EraIndex::max_value()); + CurrentEra::::put(EraIndex::max_value()); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_total: BalanceOf = ledger.total; whitelist_account!(controller); @@ -196,9 +216,72 @@ benchmarks! { assert!(Validators::::contains_key(stash)); } - // Worst case scenario, MAX_NOMINATIONS + kick { + // scenario: we want to kick `k` nominators from nominating us (we are a validator). + // we'll assume that `k` is under 128 for the purposes of determining the slope. + // each nominator should have `T::MAX_NOMINATIONS` validators nominated, and our validator + // should be somewhere in there. + let k in 1 .. 128; + + // these are the other validators; there are `T::MAX_NOMINATIONS - 1` of them, so + // there are a total of `T::MAX_NOMINATIONS` validators in the system. + let rest_of_validators = create_validators::(T::MAX_NOMINATIONS - 1, 100)?; + + // this is the validator that will be kicking. + let (stash, controller) = create_stash_controller::( + T::MAX_NOMINATIONS - 1, + 100, + Default::default(), + )?; + let stash_lookup: ::Source = T::Lookup::unlookup(stash.clone()); + + // they start validating. + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), Default::default())?; + + // we now create the nominators. there will be `k` of them; each will nominate all + // validators. we will then kick each of the `k` nominators from the main validator. + let mut nominator_stashes = Vec::with_capacity(k as usize); + for i in 0 .. k { + // create a nominator stash. + let (n_stash, n_controller) = create_stash_controller::( + T::MAX_NOMINATIONS + i, + 100, + Default::default(), + )?; + + // bake the nominations; we first clone them from the rest of the validators. + let mut nominations = rest_of_validators.clone(); + // then insert "our" validator somewhere in there (we vary it) to avoid accidental + // optimisations/pessimisations. + nominations.insert(i as usize % (nominations.len() + 1), stash_lookup.clone()); + // then we nominate. + Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), nominations)?; + + nominator_stashes.push(n_stash); + } + + // all nominators now should be nominating our validator... + for n in nominator_stashes.iter() { + assert!(Nominators::::get(n).unwrap().targets.contains(&stash)); + } + + // we need the unlookuped version of the nominator stash for the kick. + let kicks = nominator_stashes.iter() + .map(|n| T::Lookup::unlookup(n.clone())) + .collect::>(); + + whitelist_account!(controller); + }: _(RawOrigin::Signed(controller), kicks) + verify { + // all nominators now should *not* be nominating our validator... + for n in nominator_stashes.iter() { + assert!(!Nominators::::get(n).unwrap().targets.contains(&stash)); + } + } + + // Worst case scenario, T::MAX_NOMINATIONS nominate { - let n in 1 .. MAX_NOMINATIONS as u32; + let n in 1 .. T::MAX_NOMINATIONS; let (stash, controller) = create_stash_controller::(n + 1, 100, Default::default())?; let validators = create_validators::(n, 100)?; whitelist_account!(controller); @@ -235,17 +318,17 @@ benchmarks! { let validator_count = MAX_VALIDATORS; }: _(RawOrigin::Root, validator_count) verify { - assert_eq!(ValidatorCount::get(), validator_count); + assert_eq!(ValidatorCount::::get(), validator_count); } force_no_eras {}: _(RawOrigin::Root) - verify { assert_eq!(ForceEra::get(), Forcing::ForceNone); } + verify { assert_eq!(ForceEra::::get(), Forcing::ForceNone); } force_new_era {}: _(RawOrigin::Root) - verify { assert_eq!(ForceEra::get(), Forcing::ForceNew); } + verify { assert_eq!(ForceEra::::get(), Forcing::ForceNew); } force_new_era_always {}: _(RawOrigin::Root) - verify { assert_eq!(ForceEra::get(), Forcing::ForceAlways); } + verify { assert_eq!(ForceEra::::get(), Forcing::ForceAlways); } // Worst case scenario, the list of invulnerables is very long. set_invulnerables { @@ -286,8 +369,6 @@ benchmarks! { payout_stakers_dead_controller { let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; - // Clean up existing validators - Validators::::remove_all(); let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -295,7 +376,7 @@ benchmarks! { RewardDestination::Controller, )?; - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); // set the commission for this particular era as well. >::insert(current_era, validator.clone(), >::validators(&validator)); @@ -321,8 +402,6 @@ benchmarks! { payout_stakers_alive_staked { let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; - // Clean up existing validators - Validators::::remove_all(); let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -330,7 +409,7 @@ benchmarks! { RewardDestination::Staked, )?; - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); // set the commission for this particular era as well. >::insert(current_era, validator.clone(), >::validators(&validator)); @@ -362,7 +441,7 @@ benchmarks! { let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); let unlock_chunk = UnlockChunk::> { - value: 1.into(), + value: 1u32.into(), era: EraIndex::zero(), }; for _ in 0 .. l { @@ -380,8 +459,8 @@ benchmarks! { set_history_depth { let e in 1 .. 100; - HistoryDepth::put(e); - CurrentEra::put(e); + HistoryDepth::::put(e); + CurrentEra::::put(e); for i in 0 .. e { >::insert(i, T::AccountId::default(), Exposure::>::default()); >::insert(i, T::AccountId::default(), Exposure::>::default()); @@ -389,32 +468,45 @@ benchmarks! { >::insert(i, BalanceOf::::one()); >::insert(i, EraRewardPoints::::default()); >::insert(i, BalanceOf::::one()); - ErasStartSessionIndex::insert(i, i); + ErasStartSessionIndex::::insert(i, i); } - }: _(RawOrigin::Root, EraIndex::zero(), u32::max_value()) + }: _(RawOrigin::Root, EraIndex::zero(), u32::MAX) verify { - assert_eq!(HistoryDepth::get(), 0); + assert_eq!(HistoryDepth::::get(), 0); } reap_stash { let s in 1 .. MAX_SPANS; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), ValidatorPrefs::default())?; add_slashing_spans::(&stash, s); - T::Currency::make_free_balance_be(&stash, 0.into()); + T::Currency::make_free_balance_be(&stash, T::Currency::minimum_balance()); whitelist_account!(controller); + + assert!(Bonded::::contains_key(&stash)); + assert!(Validators::::contains_key(&stash)); + }: _(RawOrigin::Signed(controller), stash.clone(), s) verify { assert!(!Bonded::::contains_key(&stash)); + assert!(!Validators::::contains_key(&stash)); } new_era { let v in 1 .. 10; let n in 1 .. 100; - create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; + create_validators_with_nominators_for_era::( + v, + n, + ::MAX_NOMINATIONS as usize, + false, + None, + )?; let session_index = SessionIndex::one(); }: { - let validators = Staking::::new_era(session_index).ok_or("`new_era` failed")?; + let validators = Staking::::try_trigger_new_era(session_index, true) + .ok_or("`new_era` failed")?; assert!(validators.len() == v as usize); } @@ -422,12 +514,18 @@ benchmarks! { payout_all { let v in 1 .. 10; let n in 1 .. 100; - create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; + create_validators_with_nominators_for_era::( + v, + n, + ::MAX_NOMINATIONS as usize, + false, + None, + )?; // Start a new Era - let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + let new_validators = Staking::::try_trigger_new_era(SessionIndex::one(), true).unwrap(); assert!(new_validators.len() == v as usize); - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); let mut points_total = 0; let mut points_individual = Vec::new(); let mut payout_calls_arg = Vec::new(); @@ -447,7 +545,7 @@ benchmarks! { ErasRewardPoints::::insert(current_era, reward); // Create reward pool - let total_payout = T::Currency::minimum_balance() * 1000.into(); + let total_payout = T::Currency::minimum_balance() * 1000u32.into(); >::insert(current_era, total_payout); let caller: T::AccountId = whitelisted_caller(); @@ -463,14 +561,14 @@ benchmarks! { let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); let unlock_chunk = UnlockChunk::> { - value: 1.into(), + value: 1u32.into(), era: EraIndex::zero(), }; for _ in 0 .. l { staking_ledger.unlocking.push(unlock_chunk.clone()) } Ledger::::insert(controller, staking_ledger); - let slash_amount = T::Currency::minimum_balance() * 10.into(); + let slash_amount = T::Currency::minimum_balance() * 10u32.into(); let balance_before = T::Currency::free_balance(&stash); }: { crate::slashing::do_slash::( @@ -484,236 +582,94 @@ benchmarks! { assert!(balance_before > balance_after); } - // This benchmark create `v` validators intent, `n` nominators intent, in total creating `e` - // edges. - #[extra] - submit_solution_initial { - // number of validator intention. This will be equal to `ElectionSize::validators`. - let v in 200 .. 400; - // number of nominator intention. This will be equal to `ElectionSize::nominators`. - let n in 500 .. 1000; - // number of assignments. Basically, number of active nominators. This will be equal to - // `compact.len()`. - let a in 200 .. 400; - // number of winners, also ValidatorCount. This will be equal to `winner.len()`. - let w in 16 .. 100; - - ensure!(w as usize >= MAX_NOMINATIONS, "doesn't support lower value"); - - let winners = create_validators_with_nominators_for_era::( - v, - n, - MAX_NOMINATIONS, - false, - Some(w), - )?; - - // needed for the solution to be generates. - assert!(>::create_stakers_snapshot().0); - - // set number of winners - ValidatorCount::put(w); - - // create a assignments in total for the w winners. - let (winners, assignments) = create_assignments_for_offchain::(a, winners)?; - - let ( - winners, - compact, - score, - size - ) = offchain_election::prepare_submission::(assignments, winners, false, T::MaximumBlockWeight::get()).unwrap(); - - assert_eq!( - winners.len(), compact.unique_targets().len(), - "unique targets ({}) and winners ({}) count not same. This solution is not valid.", - compact.unique_targets().len(), - winners.len(), - ); - - // needed for the solution to be accepted - >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); - - let era = >::current_era().unwrap_or(0); - let caller: T::AccountId = account("caller", n, SEED); - whitelist_account!(caller); - }: { - let result = >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ); - assert!(result.is_ok()); - } - verify { - // new solution has been accepted. - assert_eq!(>::queued_score().unwrap(), score); - } - - // same as submit_solution_initial but we place a very weak solution on chian first. - submit_solution_better { + get_npos_voters { // number of validator intention. - let v in 200 .. 400; + let v in (MAX_VALIDATORS / 2) .. MAX_VALIDATORS; // number of nominator intention. - let n in 500 .. 1000; - // number of assignments. Basically, number of active nominators. - let a in 200 .. 400; - // number of winners, also ValidatorCount. - let w in 16 .. 100; - - ensure!(w as usize >= MAX_NOMINATIONS, "doesn't support lower value"); - - let winners = create_validators_with_nominators_for_era::( - v, - n, - MAX_NOMINATIONS, - false, - Some(w), - )?; - - // needed for the solution to be generates. - assert!(>::create_stakers_snapshot().0); - - // set number of winners - ValidatorCount::put(w); - - // create a assignments in total for the w winners. - let (winners, assignments) = create_assignments_for_offchain::(a, winners)?; + let n in (MAX_NOMINATORS / 2) .. MAX_NOMINATORS; + // total number of slashing spans. Assigned to validators randomly. + let s in 1 .. 20; - let single_winner = winners[0].0.clone(); + let validators = create_validators_with_nominators_for_era::(v, n, T::MAX_NOMINATIONS as usize, false, None)? + .into_iter() + .map(|v| T::Lookup::lookup(v).unwrap()) + .collect::>(); - let ( - winners, - compact, - score, - size - ) = offchain_election::prepare_submission::(assignments, winners, false, T::MaximumBlockWeight::get()).unwrap(); - - assert_eq!( - winners.len(), compact.unique_targets().len(), - "unique targets ({}) and winners ({}) count not same. This solution is not valid.", - compact.unique_targets().len(), - winners.len(), - ); - - // needed for the solution to be accepted - >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); - - let era = >::current_era().unwrap_or(0); - let caller: T::AccountId = account("caller", n, SEED); - whitelist_account!(caller); - - // submit a very bad solution on-chain - { - // this is needed to fool the chain to accept this solution. - ValidatorCount::put(1); - let (winners, compact, score, size) = get_single_winner_solution::(single_winner)?; - assert!( - >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ).is_ok()); - - // new solution has been accepted. - assert_eq!(>::queued_score().unwrap(), score); - ValidatorCount::put(w); - } + (0..s).for_each(|index| { + add_slashing_spans::(&validators[index as usize], 10); + }); }: { - let result = >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ); - assert!(result.is_ok()); - } - verify { - // new solution has been accepted. - assert_eq!(>::queued_score().unwrap(), score); + let voters = >::get_npos_voters(); + assert_eq!(voters.len() as u32, v + n); } - // This will be early rejected based on the score. - #[extra] - submit_solution_weaker { + get_npos_targets { // number of validator intention. - let v in 200 .. 400; + let v in (MAX_VALIDATORS / 2) .. MAX_VALIDATORS; // number of nominator intention. - let n in 500 .. 1000; - - create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None)?; - - // needed for the solution to be generates. - assert!(>::create_stakers_snapshot().0); - - // needed for the solution to be accepted - >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); - let era = >::current_era().unwrap_or(0); - let caller: T::AccountId = account("caller", n, SEED); - whitelist_account!(caller); - - // submit a seq-phragmen with all the good stuff on chain. - { - let (winners, compact, score, size) = get_seq_phragmen_solution::(true); - assert_eq!( - winners.len(), compact.unique_targets().len(), - "unique targets ({}) and winners ({}) count not same. This solution is not valid.", - compact.unique_targets().len(), - winners.len(), - ); - assert!( - >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ).is_ok() - ); + let n = MAX_NOMINATORS; - // new solution has been accepted. - assert_eq!(>::queued_score().unwrap(), score); - } - - // prepare a bad solution. This will be very early rejected. - let (winners, compact, score, size) = get_weak_solution::(true); + let _ = create_validators_with_nominators_for_era::(v, n, T::MAX_NOMINATIONS as usize, false, None)?; }: { - assert!( - >::submit_election_solution( - RawOrigin::Signed(caller.clone()).into(), - winners, - compact, - score.clone(), - era, - size, - ).is_err() - ); + let targets = >::get_npos_targets(); + assert_eq!(targets.len() as u32, v); + } + + set_staking_limits { + // This function always does the same thing... just write to 4 storage items. + }: _( + RawOrigin::Root, + BalanceOf::::max_value(), + BalanceOf::::max_value(), + Some(u32::MAX), + Some(u32::MAX), + Some(Percent::max_value()) + ) verify { + assert_eq!(MinNominatorBond::::get(), BalanceOf::::max_value()); + assert_eq!(MinValidatorBond::::get(), BalanceOf::::max_value()); + assert_eq!(MaxNominatorsCount::::get(), Some(u32::MAX)); + assert_eq!(MaxValidatorsCount::::get(), Some(u32::MAX)); + assert_eq!(ChillThreshold::::get(), Some(Percent::from_percent(100))); + } + + chill_other { + let (_, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), ValidatorPrefs::default())?; + Staking::::set_staking_limits( + RawOrigin::Root.into(), + BalanceOf::::max_value(), + BalanceOf::::max_value(), + Some(0), + Some(0), + Some(Percent::from_percent(0)) + )?; + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), controller.clone()) + verify { + assert!(!Validators::::contains_key(controller)); } } #[cfg(test)] mod tests { use super::*; - use crate::mock::{ExtBuilder, Test, Balances, Staking, Origin}; + use crate::mock::{Balances, ExtBuilder, Origin, Staking, Test}; use frame_support::assert_ok; #[test] fn create_validators_with_nominators_for_era_works() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let v = 10; let n = 100; - create_validators_with_nominators_for_era::(v, n, MAX_NOMINATIONS, false, None) - .unwrap(); + create_validators_with_nominators_for_era::( + v, + n, + ::MAX_NOMINATIONS as usize, + false, + None, + ) + .unwrap(); let count_validators = Validators::::iter().count(); let count_nominators = Nominators::::iter().count(); @@ -725,19 +681,20 @@ mod tests { #[test] fn create_validator_with_nominators_works() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let n = 10; let (validator_stash, nominators) = create_validator_with_nominators::( n, - ::MaxNominatorRewardedPerValidator::get() as u32, + ::MaxNominatorRewardedPerValidator::get() as u32, false, RewardDestination::Staked, - ).unwrap(); + ) + .unwrap(); assert_eq!(nominators.len() as u32, n); - let current_era = CurrentEra::get().unwrap(); + let current_era = CurrentEra::::get().unwrap(); let original_free_balance = Balances::free_balance(&validator_stash); assert_ok!(Staking::payout_stakers(Origin::signed(1337), validator_stash, current_era)); @@ -749,15 +706,16 @@ mod tests { #[test] fn add_slashing_spans_works() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let n = 10; let (validator_stash, _nominators) = create_validator_with_nominators::( n, - ::MaxNominatorRewardedPerValidator::get() as u32, + ::MaxNominatorRewardedPerValidator::get() as u32, false, RewardDestination::Staked, - ).unwrap(); + ) + .unwrap(); // Add 20 slashing spans let num_of_slashing_spans = 20; @@ -765,14 +723,14 @@ mod tests { let slashing_spans = SlashingSpans::::get(&validator_stash).unwrap(); assert_eq!(slashing_spans.iter().count(), num_of_slashing_spans as usize); - for i in 0 .. num_of_slashing_spans { + for i in 0..num_of_slashing_spans { assert!(SpanSlash::::contains_key((&validator_stash, i))); } // Test everything is cleaned up assert_ok!(Staking::kill_stash(&validator_stash, num_of_slashing_spans)); assert!(SlashingSpans::::get(&validator_stash).is_none()); - for i in 0 .. num_of_slashing_spans { + for i in 0..num_of_slashing_spans { assert!(!SpanSlash::::contains_key((&validator_stash, i))); } }); @@ -780,63 +738,31 @@ mod tests { #[test] fn test_payout_all() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { let v = 10; let n = 100; let selected_benchmark = SelectedBenchmark::payout_all; - let c = vec![(frame_benchmarking::BenchmarkParameter::v, v), (frame_benchmarking::BenchmarkParameter::n, n)]; + let c = vec![ + (frame_benchmarking::BenchmarkParameter::v, v), + (frame_benchmarking::BenchmarkParameter::n, n), + ]; let closure_to_benchmark = >::instance( &selected_benchmark, &c, - true - ).unwrap(); + true, + ) + .unwrap(); assert_ok!(closure_to_benchmark()); }); } - - #[test] - fn test_benchmarks() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { - assert_ok!(test_benchmark_bond::()); - assert_ok!(test_benchmark_bond_extra::()); - assert_ok!(test_benchmark_unbond::()); - assert_ok!(test_benchmark_withdraw_unbonded_update::()); - assert_ok!(test_benchmark_withdraw_unbonded_kill::()); - assert_ok!(test_benchmark_validate::()); - assert_ok!(test_benchmark_nominate::()); - assert_ok!(test_benchmark_chill::()); - assert_ok!(test_benchmark_set_payee::()); - assert_ok!(test_benchmark_set_controller::()); - assert_ok!(test_benchmark_set_validator_count::()); - assert_ok!(test_benchmark_force_no_eras::()); - assert_ok!(test_benchmark_force_new_era::()); - assert_ok!(test_benchmark_force_new_era_always::()); - assert_ok!(test_benchmark_set_invulnerables::()); - assert_ok!(test_benchmark_force_unstake::()); - assert_ok!(test_benchmark_cancel_deferred_slash::()); - assert_ok!(test_benchmark_payout_stakers_dead_controller::()); - assert_ok!(test_benchmark_payout_stakers_alive_staked::()); - assert_ok!(test_benchmark_rebond::()); - assert_ok!(test_benchmark_set_history_depth::()); - assert_ok!(test_benchmark_reap_stash::()); - assert_ok!(test_benchmark_new_era::()); - assert_ok!(test_benchmark_do_slash::()); - assert_ok!(test_benchmark_payout_all::()); - // only run one of them to same time on the CI. ignore the other two. - assert_ok!(test_benchmark_submit_solution_initial::()); - }); - } - - #[test] - #[ignore] - fn test_benchmarks_offchain() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { - assert_ok!(test_benchmark_submit_solution_better::()); - assert_ok!(test_benchmark_submit_solution_weaker::()); - }); - } - } + +impl_benchmark_test_suite!( + Staking, + crate::mock::ExtBuilder::default().has_stakers(true), + crate::mock::Test, + exec_name = build_and_execute +); diff --git a/frame/staking/src/default_weights.rs b/frame/staking/src/default_weights.rs deleted file mode 100644 index fa5a05f63824e..0000000000000 --- a/frame/staking/src/default_weights.rs +++ /dev/null @@ -1,169 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Default weights of pallet-staking. -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn bond() -> Weight { - (144278000 as Weight) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().writes(4 as Weight)) - } - fn bond_extra() -> Weight { - (110715000 as Weight) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn unbond() -> Weight { - (99840000 as Weight) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn withdraw_unbonded_update(s: u32, ) -> Weight { - (100728000 as Weight) - .saturating_add((63000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (168879000 as Weight) - .saturating_add((6666000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(7 as Weight)) - .saturating_add(DbWeight::get().writes(8 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn validate() -> Weight { - (35539000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn nominate(n: u32, ) -> Weight { - (48596000 as Weight) - .saturating_add((308000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn chill() -> Weight { - (35144000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn set_payee() -> Weight { - (24255000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_controller() -> Weight { - (52294000 as Weight) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn set_validator_count() -> Weight { - (5185000 as Weight) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn force_no_eras() -> Weight { - (5907000 as Weight) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn force_new_era() -> Weight { - (5917000 as Weight) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn force_new_era_always() -> Weight { - (5952000 as Weight) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_invulnerables(v: u32, ) -> Weight { - (6324000 as Weight) - .saturating_add((9000 as Weight).saturating_mul(v as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn force_unstake(s: u32, ) -> Weight { - (119691000 as Weight) - .saturating_add((6681000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(8 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn cancel_deferred_slash(s: u32, ) -> Weight { - (5820201000 as Weight) - .saturating_add((34672000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((92486000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) - } - fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((117324000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) - } - fn rebond(l: u32, ) -> Weight { - (71316000 as Weight) - .saturating_add((142000 as Weight).saturating_mul(l as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn set_history_depth(e: u32, ) -> Weight { - (0 as Weight) - .saturating_add((51901000 as Weight).saturating_mul(e as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(4 as Weight)) - .saturating_add(DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) - } - fn reap_stash(s: u32, ) -> Weight { - (147166000 as Weight) - .saturating_add((6661000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(8 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) - } - fn new_era(v: u32, n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1440459000 as Weight).saturating_mul(v as Weight)) - .saturating_add((182580000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(10 as Weight)) - .saturating_add(DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) - .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(DbWeight::get().writes(8 as Weight)) - .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) - } - fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { - (0 as Weight) - .saturating_add((964000 as Weight).saturating_mul(v as Weight)) - .saturating_add((432000 as Weight).saturating_mul(n as Weight)) - .saturating_add((204294000 as Weight).saturating_mul(a as Weight)) - .saturating_add((9546000 as Weight).saturating_mul(w as Weight)) - .saturating_add(DbWeight::get().reads(6 as Weight)) - .saturating_add(DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) - .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } -} diff --git a/frame/staking/src/inflation.rs b/frame/staking/src/inflation.rs index 2161fe20af829..8e44a8c5482e5 100644 --- a/frame/staking/src/inflation.rs +++ b/frame/staking/src/inflation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,29 +20,31 @@ //! The staking rate in NPoS is the total amount of tokens staked by nominators and validators, //! divided by the total token supply. -use sp_runtime::{Perbill, traits::AtLeast32BitUnsigned, curve::PiecewiseLinear}; +use sp_runtime::{curve::PiecewiseLinear, traits::AtLeast32BitUnsigned, Perbill}; /// The total payout to all validators (and their nominators) per era and maximum payout. /// /// Defined as such: -/// `staker-payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year` -/// `maximum-payout = max_yearly_inflation * total_tokens / era_per_year` +/// `staker-payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / +/// era_per_year` `maximum-payout = max_yearly_inflation * total_tokens / era_per_year` /// /// `era_duration` is expressed in millisecond. pub fn compute_total_payout( yearly_inflation: &PiecewiseLinear<'static>, npos_token_staked: N, total_tokens: N, - era_duration: u64 -) -> (N, N) where N: AtLeast32BitUnsigned + Clone { + era_duration: u64, +) -> (N, N) +where + N: AtLeast32BitUnsigned + Clone, +{ // Milliseconds per year for the Julian year (365.25 days). const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; - let portion = Perbill::from_rational_approximation(era_duration as u64, MILLISECONDS_PER_YEAR); - let payout = portion * yearly_inflation.calculate_for_fraction_times_denominator( - npos_token_staked, - total_tokens.clone(), - ); + let portion = Perbill::from_rational(era_duration as u64, MILLISECONDS_PER_YEAR); + let payout = portion * + yearly_inflation + .calculate_for_fraction_times_denominator(npos_token_staked, total_tokens.clone()); let maximum = portion * (yearly_inflation.maximum * total_tokens); (payout, maximum) } @@ -70,7 +72,7 @@ mod test { // not 10_000 due to rounding error. assert_eq!(super::compute_total_payout(&I_NPOS, 0, 100_000u64, YEAR).1, 9_993); - //super::I_NPOS.calculate_for_fraction_times_denominator(25, 100) + // super::I_NPOS.calculate_for_fraction_times_denominator(25, 100) assert_eq!(super::compute_total_payout(&I_NPOS, 0, 100_000u64, YEAR).0, 2_498); assert_eq!(super::compute_total_payout(&I_NPOS, 5_000, 100_000u64, YEAR).0, 3_248); assert_eq!(super::compute_total_payout(&I_NPOS, 25_000, 100_000u64, YEAR).0, 6_246); @@ -98,7 +100,8 @@ mod test { 2_500_000_000_000_000_000_000_000_000u128, 5_000_000_000_000_000_000_000_000_000u128, HOUR - ).0, + ) + .0, 57_038_500_000_000_000_000_000 ); } diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index cd3a71ffabc8a..31b35acdd99aa 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Staking Module +//! # Staking Pallet //! -//! The Staking module is used to manage funds at stake by network maintainers. +//! The Staking pallet is used to manage funds at stake by network maintainers. //! -//! - [`staking::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Overview //! -//! The Staking module is the means by which a set of network maintainers (known as _authorities_ in +//! The Staking pallet is the means by which a set of network maintainers (known as _authorities_ in //! some contexts and _validators_ in others) are chosen based upon those who voluntarily place //! funds under deposit. Under deposit, those funds are rewarded under normal operation but are held //! at pain of _slash_ (expropriation) should the staked maintainer be found not to be discharging @@ -59,22 +59,22 @@ //! //! #### Staking //! -//! Almost any interaction with the Staking module requires a process of _**bonding**_ (also known -//! as being a _staker_). To become *bonded*, a fund-holding account known as the _stash account_, +//! Almost any interaction with the Staking pallet requires a process of _**bonding**_ (also known +//! as being a _staker_). To become *bonded*, a fund-holding register known as the _stash account_, //! which holds some or all of the funds that become frozen in place as part of the staking process, //! is paired with an active **controller** account, which issues instructions on how they shall be //! used. //! -//! An account pair can become bonded using the [`bond`](./enum.Call.html#variant.bond) call. +//! An account pair can become bonded using the [`bond`](Call::bond) call. //! //! Stash accounts can change their associated controller using the -//! [`set_controller`](./enum.Call.html#variant.set_controller) call. +//! [`set_controller`](Call::set_controller) call. //! //! There are three possible roles that any staked account pair can be in: `Validator`, `Nominator` -//! and `Idle` (defined in [`StakerStatus`](./enum.StakerStatus.html)). There are three +//! and `Idle` (defined in [`StakerStatus`]). There are three //! corresponding instructions to change between roles, namely: -//! [`validate`](./enum.Call.html#variant.validate), -//! [`nominate`](./enum.Call.html#variant.nominate), and [`chill`](./enum.Call.html#variant.chill). +//! [`validate`](Call::validate), +//! [`nominate`](Call::nominate), and [`chill`](Call::chill). //! //! #### Validating //! @@ -86,7 +86,7 @@ //! by nominators and their votes. //! //! An account can become a validator candidate via the -//! [`validate`](./enum.Call.html#variant.validate) call. +//! [`validate`](Call::validate) call. //! //! #### Nomination //! @@ -98,16 +98,16 @@ //! the misbehaving/offline validators as much as possible, simply because the nominators will also //! lose funds if they vote poorly. //! -//! An account can become a nominator via the [`nominate`](enum.Call.html#variant.nominate) call. +//! An account can become a nominator via the [`nominate`](Call::nominate) call. //! //! #### Rewards and Slash //! -//! The **reward and slashing** procedure is the core of the Staking module, attempting to _embrace +//! The **reward and slashing** procedure is the core of the Staking pallet, attempting to _embrace //! valid behavior_ while _punishing any misbehavior or lack of availability_. //! //! Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the //! `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to the -//! validator as well as its nominators. Only the [`Trait::MaxNominatorRewardedPerValidator`] +//! validator as well as its nominators. Only the [`Config::MaxNominatorRewardedPerValidator`] //! biggest stakers can claim their reward. This is to limit the i/o cost to mutate storage for each //! nominator's account. //! @@ -115,7 +115,7 @@ //! determined, a value is deducted from the balance of the validator and all the nominators who //! voted for this validator (values are deducted from the _stash_ account of the slashed entity). //! -//! Slashing logic is further described in the documentation of the `slashing` module. +//! Slashing logic is further described in the documentation of the `slashing` pallet. //! //! Similar to slashing, rewards are also shared among a validator and its associated nominators. //! Yet, the reward funds are not always transferred to the stash account and can be configured. See @@ -127,23 +127,23 @@ //! This means that if they are a nominator, they will not be considered as voters anymore and if //! they are validators, they will no longer be a candidate for the next election. //! -//! An account can step back via the [`chill`](enum.Call.html#variant.chill) call. +//! An account can step back via the [`chill`](Call::chill) call. //! //! ### Session managing //! -//! The module implement the trait `SessionManager`. Which is the only API to query new validator +//! The pallet implement the trait `SessionManager`. Which is the only API to query new validator //! set and allowing these validator set to be rewarded once their era is ended. //! //! ## Interface //! //! ### Dispatchable Functions //! -//! The dispatchable functions of the Staking module enable the steps needed for entities to accept -//! and change their role, alongside some helper functions to get/set the metadata of the module. +//! The dispatchable functions of the Staking pallet enable the steps needed for entities to accept +//! and change their role, alongside some helper functions to get/set the metadata of the pallet. //! //! ### Public Functions //! -//! The Staking module contains many public storage items and (im)mutable functions. +//! The Staking pallet contains many public storage items and (im)mutable functions. //! //! ## Usage //! @@ -154,15 +154,15 @@ //! use frame_system::ensure_signed; //! use pallet_staking::{self as staking}; //! -//! pub trait Trait: staking::Trait {} +//! pub trait Config: staking::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! /// Reward a validator. //! #[weight = 0] //! pub fn reward_myself(origin) -> dispatch::DispatchResult { //! let reported = ensure_signed(origin)?; -//! >::reward_by_ids(vec![(reported, 10)]); +//! >::reward_by_ids(vec![(reported, 10)]); //! Ok(()) //! } //! } @@ -175,7 +175,7 @@ //! ### Era payout //! //! The era payout is computed using yearly inflation curve defined at -//! [`T::RewardCurve`](./trait.Trait.html#associatedtype.RewardCurve) as such: +//! [`Config::EraPayout`] as such: //! //! ```nocompile //! staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year @@ -186,7 +186,7 @@ //! remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout //! ``` //! The remaining reward is send to the configurable end-point -//! [`T::RewardRemainder`](./trait.Trait.html#associatedtype.RewardRemainder). +//! [`Config::RewardRemainder`]. //! //! ### Reward Calculation //! @@ -198,29 +198,28 @@ //! //! Total reward is split among validators and their nominators depending on the number of points //! they received during the era. Points are added to a validator using -//! [`reward_by_ids`](./enum.Call.html#variant.reward_by_ids) or -//! [`reward_by_indices`](./enum.Call.html#variant.reward_by_indices). +//! [`reward_by_ids`](Pallet::reward_by_ids). //! -//! [`Module`](./struct.Module.html) implements -//! [`pallet_authorship::EventHandler`](../pallet_authorship/trait.EventHandler.html) to add reward +//! [`Pallet`] implements +//! [`pallet_authorship::EventHandler`] to add reward //! points to block producer and block producer of referenced uncles. //! //! The validator and its nominator split their reward as following: //! //! The validator can declare an amount, named -//! [`commission`](./struct.ValidatorPrefs.html#structfield.commission), that does not get shared +//! [`commission`](ValidatorPrefs::commission), that does not get shared //! with the nominators at each reward payout through its -//! [`ValidatorPrefs`](./struct.ValidatorPrefs.html). This value gets deducted from the total reward +//! [`ValidatorPrefs`]. This value gets deducted from the total reward //! that is paid to the validator and its nominators. The remaining portion is split among the //! validator and all of the nominators that nominated the validator, proportional to the value //! staked behind this validator (_i.e._ dividing the -//! [`own`](./struct.Exposure.html#structfield.own) or -//! [`others`](./struct.Exposure.html#structfield.others) by -//! [`total`](./struct.Exposure.html#structfield.total) in [`Exposure`](./struct.Exposure.html)). +//! [`own`](Exposure::own) or +//! [`others`](Exposure::others) by +//! [`total`](Exposure::total) in [`Exposure`]). //! //! All entities who receive a reward have the option to choose their reward destination through the -//! [`Payee`](./struct.Payee.html) storage item (see -//! [`set_payee`](enum.Call.html#variant.set_payee)), to be one of the following: +//! [`Payee`] storage item (see +//! [`set_payee`](Call::set_payee)), to be one of the following: //! //! - Controller account, (obviously) not increasing the staked value. //! - Stash account, not increasing the staked value. @@ -231,14 +230,15 @@ //! Any funds already placed into stash can be the target of the following operations: //! //! The controller account can free a portion (or all) of the funds using the -//! [`unbond`](enum.Call.html#variant.unbond) call. Note that the funds are not immediately -//! accessible. Instead, a duration denoted by [`BondingDuration`](./struct.BondingDuration.html) -//! (in number of eras) must pass until the funds can actually be removed. Once the -//! `BondingDuration` is over, the [`withdraw_unbonded`](./enum.Call.html#variant.withdraw_unbonded) -//! call can be used to actually withdraw the funds. +//! [`unbond`](Call::unbond) call. Note that the funds are not immediately +//! accessible. Instead, a duration denoted by +//! [`Config::BondingDuration`] (in number of eras) must +//! pass until the funds can actually be removed. Once the `BondingDuration` is over, the +//! [`withdraw_unbonded`](Call::withdraw_unbonded) call can be used to actually +//! withdraw the funds. //! //! Note that there is a limitation to the number of fund-chunks that can be scheduled to be -//! unlocked in the future via [`unbond`](enum.Call.html#variant.unbond). In case this maximum +//! unlocked in the future via [`unbond`](Call::unbond). In case this maximum //! (`MAX_UNLOCKING_CHUNKS`) is reached, the bonded account _must_ first wait until a successful //! call to `withdraw_unbonded` to remove some of the chunks. //! @@ -255,144 +255,86 @@ //! //! ## GenesisConfig //! -//! The Staking module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). The +//! The Staking pallet depends on the [`GenesisConfig`]. The //! `GenesisConfig` is optional and allow to set some initial stakers. //! //! ## Related Modules //! //! - [Balances](../pallet_balances/index.html): Used to manage values at stake. //! - [Session](../pallet_session/index.html): Used to manage sessions. Also, a list of new -//! validators is stored in the Session module's `Validators` at the end of each era. +//! validators is stored in the Session pallet's `Validators` at the end of each era. #![recursion_limit = "128"] #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; #[cfg(test)] mod mock; -#[cfg(test)] -mod tests; #[cfg(any(feature = "runtime-benchmarks", test))] pub mod testing_utils; -#[cfg(any(feature = "runtime-benchmarks", test))] -pub mod benchmarking; +#[cfg(test)] +mod tests; -pub mod slashing; -pub mod offchain_election; pub mod inflation; -pub mod default_weights; +pub mod migrations; +pub mod slashing; +pub mod weights; -use sp_std::{ - result, - prelude::*, - collections::btree_map::BTreeMap, - convert::{TryInto, From}, - mem::size_of, -}; -use codec::{HasCompact, Encode, Decode}; +mod pallet; + +use codec::{Decode, Encode, HasCompact}; use frame_support::{ - decl_module, decl_event, decl_storage, ensure, decl_error, - weights::{Weight, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}}, - storage::IterableStorageMap, - dispatch::{ - DispatchResult, DispatchResultWithPostInfo, DispatchErrorWithPostInfo, - WithPostDispatchInfo, - }, - traits::{ - Currency, LockIdentifier, LockableCurrency, WithdrawReasons, OnUnbalanced, Imbalance, Get, - UnixTime, EstimateNextNewSession, EnsureOrigin, CurrencyToVote, IsSubType, - } + traits::{Currency, Get}, + weights::Weight, }; -use pallet_session::historical; +use scale_info::TypeInfo; use sp_runtime::{ - Percent, Perbill, PerU16, PerThing, InnerOf, RuntimeDebug, DispatchError, curve::PiecewiseLinear, - traits::{ - Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, - AtLeast32BitUnsigned, Dispatchable, - }, - transaction_validity::{ - TransactionValidityError, TransactionValidity, ValidTransaction, InvalidTransaction, - TransactionSource, TransactionPriority, - }, + traits::{AtLeast32BitUnsigned, Convert, Saturating, Zero}, + Perbill, RuntimeDebug, }; use sp_staking::{ + offence::{Offence, OffenceError, ReportOffence}, SessionIndex, - offence::{OnOffenceHandler, OffenceDetails, Offence, ReportOffence, OffenceError}, -}; -#[cfg(feature = "std")] -use sp_runtime::{Serialize, Deserialize}; -use frame_system::{ - self as system, ensure_signed, ensure_root, ensure_none, - offchain::SendTransactionTypes, -}; -use sp_npos_elections::{ - ExtendedBalance, Assignment, ElectionScore, ElectionResult as PrimitiveElectionResult, - build_support_map, evaluate_support, seq_phragmen, generate_solution_type, - is_score_better, VotingLimit, SupportMap, VoteWeight, }; +use sp_std::{collections::btree_map::BTreeMap, convert::From, prelude::*}; +pub use weights::WeightInfo; -const STAKING_ID: LockIdentifier = *b"staking "; -pub const MAX_UNLOCKING_CHUNKS: usize = 32; -pub const MAX_NOMINATIONS: usize = ::LIMIT; +pub use pallet::{pallet::*, *}; -pub(crate) const LOG_TARGET: &'static str = "staking"; +pub(crate) const LOG_TARGET: &'static str = "runtime::staking"; // syntactic sugar for logging. #[macro_export] macro_rules! log { ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { - frame_support::debug::$level!( + log::$level!( target: crate::LOG_TARGET, - $patter $(, $values)* + concat!("[{:?}] 💸 ", $patter), >::block_number() $(, $values)* ) }; } -/// Data type used to index nominators in the compact type -pub type NominatorIndex = u32; - -/// Data type used to index validators in the compact type. -pub type ValidatorIndex = u16; - -// Ensure the size of both ValidatorIndex and NominatorIndex. They both need to be well below usize. -static_assertions::const_assert!(size_of::() <= size_of::()); -static_assertions::const_assert!(size_of::() <= size_of::()); -static_assertions::const_assert!(size_of::() <= size_of::()); -static_assertions::const_assert!(size_of::() <= size_of::()); - -/// Maximum number of stakers that can be stored in a snapshot. -pub(crate) const MAX_VALIDATORS: usize = ValidatorIndex::max_value() as usize; -pub(crate) const MAX_NOMINATORS: usize = NominatorIndex::max_value() as usize; - /// Counter for the number of eras that have passed. pub type EraIndex = u32; /// Counter for the number of "reward" points earned by a given validator. pub type RewardPoint = u32; -// Note: Maximum nomination limit is set here -- 16. -generate_solution_type!( - #[compact] - pub struct CompactAssignments::(16) -); - -/// Accuracy used for on-chain election. -pub type ChainAccuracy = Perbill; - -/// Accuracy used for off-chain election. This better be small. -pub type OffchainAccuracy = PerU16; - -/// The balance type of this module. +/// The balance type of this pallet. pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = - <::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; +type PositiveImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::PositiveImbalance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; /// Information regarding the active era (era in used in session). -#[derive(Encode, Decode, RuntimeDebug)] +#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ActiveEraInfo { /// Index of era. pub index: EraIndex, @@ -406,7 +348,7 @@ pub struct ActiveEraInfo { /// Reward points of an era. Used to split era total payout between validators. /// /// This points will be used to reward validators and their respective nominators. -#[derive(PartialEq, Encode, Decode, Default, RuntimeDebug)] +#[derive(PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] pub struct EraRewardPoints { /// Total number of points. Equals the sum of reward points for each validator. total: RewardPoint, @@ -415,8 +357,8 @@ pub struct EraRewardPoints { } /// Indicates the initial status of the staker. -#[derive(RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum StakerStatus { /// Chilling. Idle, @@ -427,7 +369,7 @@ pub enum StakerStatus { } /// A destination account for payment. -#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum RewardDestination { /// Pay into the stash account, increasing the amount at stake accordingly. Staked, @@ -437,6 +379,8 @@ pub enum RewardDestination { Controller, /// Pay into a specified account. Account(AccountId), + /// Receive no reward. + None, } impl Default for RewardDestination { @@ -446,24 +390,26 @@ impl Default for RewardDestination { } /// Preference of what happens regarding validation. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ValidatorPrefs { /// Reward that validator takes up-front; only the rest is split between themselves and /// nominators. #[codec(compact)] pub commission: Perbill, + /// Whether or not this validator is accepting more nominations. If `true`, then no nominator + /// who is not already nominating this validator may nominate them. By default, validators + /// are accepting nominations. + pub blocked: bool, } impl Default for ValidatorPrefs { fn default() -> Self { - ValidatorPrefs { - commission: Default::default(), - } + ValidatorPrefs { commission: Default::default(), blocked: false } } } /// Just a Balance/BlockNumber tuple to encode when a chunk of funds will be unlocked. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct UnlockChunk { /// Amount of funds to be unlocked. #[codec(compact)] @@ -474,7 +420,7 @@ pub struct UnlockChunk { } /// The ledger of a (bonded) stash. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct StakingLedger { /// The stash account whose balance is actually locked and at stake. pub stash: AccountId, @@ -494,20 +440,23 @@ pub struct StakingLedger { pub claimed_rewards: Vec, } -impl< - AccountId, - Balance: HasCompact + Copy + Saturating + AtLeast32BitUnsigned, -> StakingLedger { +impl + StakingLedger +{ /// Remove entries from `unlocking` that are sufficiently old and reduce the /// total by the sum of their balances. fn consolidate_unlocked(self, current_era: EraIndex) -> Self { let mut total = self.total; - let unlocking = self.unlocking.into_iter() - .filter(|chunk| if chunk.era > current_era { - true - } else { - total = total.saturating_sub(chunk.value); - false + let unlocking = self + .unlocking + .into_iter() + .filter(|chunk| { + if chunk.era > current_era { + true + } else { + total = total.saturating_sub(chunk.value); + false + } }) .collect(); @@ -516,7 +465,7 @@ impl< total, active: self.active, unlocking, - claimed_rewards: self.claimed_rewards + claimed_rewards: self.claimed_rewards, } } @@ -546,7 +495,8 @@ impl< } } -impl StakingLedger where +impl StakingLedger +where Balance: AtLeast32BitUnsigned + Saturating + Copy, { /// Slash the validator for a given amount of balance. This can grow the value @@ -555,47 +505,42 @@ impl StakingLedger where /// /// Slashes from `active` funds first, and then `unlocking`, starting with the /// chunks that are closest to unlocking. - fn slash( - &mut self, - mut value: Balance, - minimum_balance: Balance, - ) -> Balance { + fn slash(&mut self, mut value: Balance, minimum_balance: Balance) -> Balance { let pre_total = self.total; let total = &mut self.total; let active = &mut self.active; - let slash_out_of = | - total_remaining: &mut Balance, - target: &mut Balance, - value: &mut Balance, - | { - let mut slash_from_target = (*value).min(*target); + let slash_out_of = + |total_remaining: &mut Balance, target: &mut Balance, value: &mut Balance| { + let mut slash_from_target = (*value).min(*target); - if !slash_from_target.is_zero() { - *target -= slash_from_target; + if !slash_from_target.is_zero() { + *target -= slash_from_target; - // don't leave a dust balance in the staking system. - if *target <= minimum_balance { - slash_from_target += *target; - *value += sp_std::mem::replace(target, Zero::zero()); - } + // Don't leave a dust balance in the staking system. + if *target <= minimum_balance { + slash_from_target += *target; + *value += sp_std::mem::replace(target, Zero::zero()); + } - *total_remaining = total_remaining.saturating_sub(slash_from_target); - *value -= slash_from_target; - } - }; + *total_remaining = total_remaining.saturating_sub(slash_from_target); + *value -= slash_from_target; + } + }; slash_out_of(total, active, &mut value); - let i = self.unlocking.iter_mut() + let i = self + .unlocking + .iter_mut() .map(|chunk| { slash_out_of(total, &mut chunk.value, &mut value); chunk.value }) - .take_while(|value| value.is_zero()) // take all fully-consumed chunks out. + .take_while(|value| value.is_zero()) // Take all fully-consumed chunks out. .count(); - // kill all drained chunks. + // Kill all drained chunks. let _ = self.unlocking.drain(..i); pre_total.saturating_sub(*total) @@ -603,7 +548,7 @@ impl StakingLedger where } /// A record of the nominations made by a specific account. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct Nominations { /// The targets of nomination. pub targets: Vec, @@ -619,7 +564,7 @@ pub struct Nominations { } /// The amount of exposure (to slashing) than an individual nominator has. -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct IndividualExposure { /// The stash account of the nominator in question. pub who: AccountId, @@ -629,7 +574,9 @@ pub struct IndividualExposure { } /// A snapshot of the stake backing a single validator in the system. -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, Default, RuntimeDebug)] +#[derive( + PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, Default, RuntimeDebug, TypeInfo, +)] pub struct Exposure { /// The total balance backing this validator. #[codec(compact)] @@ -643,7 +590,7 @@ pub struct Exposure { /// A pending slash record. The value of the slash has been computed but not applied yet, /// rather deferred for several eras. -#[derive(Encode, Decode, Default, RuntimeDebug)] +#[derive(Encode, Decode, Default, RuntimeDebug, TypeInfo)] pub struct UnappliedSlash { /// The stash ID of the offending validator. validator: AccountId, @@ -657,82 +604,10 @@ pub struct UnappliedSlash { payout: Balance, } -/// Indicate how an election round was computed. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] -pub enum ElectionCompute { - /// Result was forcefully computed on chain at the end of the session. - OnChain, - /// Result was submitted and accepted to the chain via a signed transaction. - Signed, - /// Result was submitted and accepted to the chain via an unsigned transaction (by an - /// authority). - Unsigned, -} - -/// The result of an election round. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] -pub struct ElectionResult { - /// Flat list of validators who have been elected. - elected_stashes: Vec, - /// Flat list of new exposures, to be updated in the [`Exposure`] storage. - exposures: Vec<(AccountId, Exposure)>, - /// Type of the result. This is kept on chain only to track and report the best score's - /// submission type. An optimisation could remove this. - compute: ElectionCompute, -} - -/// The status of the upcoming (offchain) election. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] -pub enum ElectionStatus { - /// Nothing has and will happen for now. submission window is not open. - Closed, - /// The submission window has been open since the contained block number. - Open(BlockNumber), -} - -/// Some indications about the size of the election. This must be submitted with the solution. -/// -/// Note that these values must reflect the __total__ number, not only those that are present in the -/// solution. In short, these should be the same size as the size of the values dumped in -/// `SnapshotValidators` and `SnapshotNominators`. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, Default)] -pub struct ElectionSize { - /// Number of validators in the snapshot of the current election round. - #[codec(compact)] - pub validators: ValidatorIndex, - /// Number of nominators in the snapshot of the current election round. - #[codec(compact)] - pub nominators: NominatorIndex, -} - - -impl ElectionStatus { - pub fn is_open_at(&self, n: BlockNumber) -> bool { - *self == Self::Open(n) - } - - pub fn is_closed(&self) -> bool { - match self { - Self::Closed => true, - _ => false - } - } - - pub fn is_open(&self) -> bool { - !self.is_closed() - } -} - -impl Default for ElectionStatus { - fn default() -> Self { - Self::Closed - } -} - /// Means for interacting with a specialized version of the `session` trait. /// -/// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Trait` -pub trait SessionInterface: frame_system::Trait { +/// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Config` +pub trait SessionInterface: frame_system::Config { /// Disable a given validator by stash ID. /// /// Returns `true` if new era should be forced at the end of this session. @@ -745,162 +620,88 @@ pub trait SessionInterface: frame_system::Trait { fn prune_historical_up_to(up_to: SessionIndex); } -impl SessionInterface<::AccountId> for T where - T: pallet_session::Trait::AccountId>, - T: pallet_session::historical::Trait< - FullIdentification = Exposure<::AccountId, BalanceOf>, +impl SessionInterface<::AccountId> for T +where + T: pallet_session::Config::AccountId>, + T: pallet_session::historical::Config< + FullIdentification = Exposure<::AccountId, BalanceOf>, FullIdentificationOf = ExposureOf, >, - T::SessionHandler: pallet_session::SessionHandler<::AccountId>, - T::SessionManager: pallet_session::SessionManager<::AccountId>, - T::ValidatorIdOf: - Convert<::AccountId, Option<::AccountId>>, + T::SessionHandler: pallet_session::SessionHandler<::AccountId>, + T::SessionManager: pallet_session::SessionManager<::AccountId>, + T::ValidatorIdOf: Convert< + ::AccountId, + Option<::AccountId>, + >, { - fn disable_validator(validator: &::AccountId) -> Result { - >::disable(validator) + fn disable_validator(validator: &::AccountId) -> Result { + >::disable(validator) } - fn validators() -> Vec<::AccountId> { - >::validators() + fn validators() -> Vec<::AccountId> { + >::validators() } fn prune_historical_up_to(up_to: SessionIndex) { - >::prune_up_to(up_to); + >::prune_up_to(up_to); } } -pub trait WeightInfo { - fn bond() -> Weight; - fn bond_extra() -> Weight; - fn unbond() -> Weight; - fn withdraw_unbonded_update(s: u32, ) -> Weight; - fn withdraw_unbonded_kill(s: u32, ) -> Weight; - fn validate() -> Weight; - fn nominate(n: u32, ) -> Weight; - fn chill() -> Weight; - fn set_payee() -> Weight; - fn set_controller() -> Weight; - fn set_validator_count() -> Weight; - fn force_no_eras() -> Weight; - fn force_new_era() -> Weight; - fn force_new_era_always() -> Weight; - fn set_invulnerables(v: u32, ) -> Weight; - fn force_unstake(s: u32, ) -> Weight; - fn cancel_deferred_slash(s: u32, ) -> Weight; - fn payout_stakers_alive_staked(n: u32, ) -> Weight; - fn payout_stakers_dead_controller(n: u32, ) -> Weight; - fn rebond(l: u32, ) -> Weight; - fn set_history_depth(e: u32, ) -> Weight; - fn reap_stash(s: u32, ) -> Weight; - fn new_era(v: u32, n: u32, ) -> Weight; - fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight; -} - -pub trait Trait: frame_system::Trait + SendTransactionTypes> { - /// The staking balance. - type Currency: LockableCurrency; - - /// Time used for computing era duration. - /// - /// It is guaranteed to start being called from the first `on_finalize`. Thus value at genesis - /// is not used. - type UnixTime: UnixTime; - - /// Convert a balance into a number used for election calculation. This must fit into a `u64` - /// but is allowed to be sensibly lossy. The `u64` is used to communicate with the - /// [`sp_npos_elections`] crate which accepts u64 numbers and does operations in 128. - /// Consequently, the backward convert is used convert the u128s from sp-elections back to a - /// [`BalanceOf`]. - type CurrencyToVote: CurrencyToVote>; - - /// Tokens have been minted and are unused for validator-reward. - /// See [Era payout](./index.html#era-payout). - type RewardRemainder: OnUnbalanced>; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// Handler for the unbalanced reduction when slashing a staker. - type Slash: OnUnbalanced>; - - /// Handler for the unbalanced increment when rewarding a staker. - type Reward: OnUnbalanced>; - - /// Number of sessions per era. - type SessionsPerEra: Get; - - /// Number of eras that staked funds must remain bonded for. - type BondingDuration: Get; - - /// Number of eras that slashes are deferred by, after computation. - /// - /// This should be less than the bonding duration. Set to 0 if slashes - /// should be applied immediately, without opportunity for intervention. - type SlashDeferDuration: Get; - - /// The origin which can cancel a deferred slash. Root can always do this. - type SlashCancelOrigin: EnsureOrigin; - - /// Interface for interacting with a session module. - type SessionInterface: self::SessionInterface; - - /// The NPoS reward curve used to define yearly inflation. - /// See [Era payout](./index.html#era-payout). - type RewardCurve: Get<&'static PiecewiseLinear<'static>>; - - /// Something that can estimate the next session change, accurately or as a best effort guess. - type NextNewSession: EstimateNextNewSession; - - /// The number of blocks before the end of the era from which election submissions are allowed. - /// - /// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will - /// be used. - /// - /// This is bounded by being within the last session. Hence, setting it to a value more than the - /// length of a session will be pointless. - type ElectionLookahead: Get; - - /// The overarching call type. - type Call: Dispatchable + From> + IsSubType> + Clone; - - /// Maximum number of balancing iterations to run in the offchain submission. - /// - /// If set to 0, balance_solution will not be executed at all. - type MaxIterations: Get; - - /// The threshold of improvement that should be provided for a new solution to be accepted. - type MinSolutionScoreBump: Get; - - /// The maximum number of nominators rewarded for each validator. - /// - /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim - /// their reward. This used to limit the i/o cost for the nominator payout. - type MaxNominatorRewardedPerValidator: Get; - - /// A configuration for base priority of unsigned transactions. +/// Handler for determining how much of a balance should be paid out on the current era. +pub trait EraPayout { + /// Determine the payout for this era. /// - /// This is exposed so that it can be tuned for particular runtime, when - /// multiple pallets send unsigned transactions. - type UnsignedPriority: Get; + /// Returns the amount to be paid to stakers in this era, as well as whatever else should be + /// paid out ("the rest"). + fn era_payout( + total_staked: Balance, + total_issuance: Balance, + era_duration_millis: u64, + ) -> (Balance, Balance); +} - /// Maximum weight that the unsigned transaction can have. - /// - /// Chose this value with care. On one hand, it should be as high as possible, so the solution - /// can contain as many nominators/validators as possible. On the other hand, it should be small - /// enough to fit in the block. - type OffchainSolutionWeightLimit: Get; +impl EraPayout for () { + fn era_payout( + _total_staked: Balance, + _total_issuance: Balance, + _era_duration_millis: u64, + ) -> (Balance, Balance) { + (Default::default(), Default::default()) + } +} - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; +/// Adaptor to turn a `PiecewiseLinear` curve definition into an `EraPayout` impl, used for +/// backwards compatibility. +pub struct ConvertCurve(sp_std::marker::PhantomData); +impl>> + EraPayout for ConvertCurve +{ + fn era_payout( + total_staked: Balance, + total_issuance: Balance, + era_duration_millis: u64, + ) -> (Balance, Balance) { + let (validator_payout, max_payout) = inflation::compute_total_payout( + &T::get(), + total_staked, + total_issuance, + // Duration of era; more than u64::MAX is rewarded as u64::MAX. + era_duration_millis, + ); + let rest = max_payout.saturating_sub(validator_payout.clone()); + (validator_payout, rest) + } } /// Mode of era-forcing. -#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum Forcing { /// Not forcing anything - just let whatever happen. NotForcing, /// Force a new era, then reset to `NotForcing` as soon as it is done. + /// Note that this will force to trigger an election until a new era is triggered, if the + /// election failed, the next session end will trigger a new election again, until success. ForceNew, /// Avoid a new era indefinitely. ForceNone, @@ -909,2551 +710,83 @@ pub enum Forcing { } impl Default for Forcing { - fn default() -> Self { Forcing::NotForcing } + fn default() -> Self { + Forcing::NotForcing + } } // A value placed in storage that represents the current version of the Staking storage. This value // is used by the `on_runtime_upgrade` logic to determine whether we run storage migration logic. // This should match directly with the semantic versions of the Rust crate. -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo)] enum Releases { V1_0_0Ancient, V2_0_0, V3_0_0, V4_0_0, + V5_0_0, // blockable validators. + V6_0_0, // removal of all storage associated with offchain phragmen. + V7_0_0, // keep track of number of nominators / validators in map } impl Default for Releases { fn default() -> Self { - Releases::V4_0_0 + Releases::V7_0_0 } } -decl_storage! { - trait Store for Module as Staking { - /// Number of eras to keep in history. - /// - /// Information is kept for eras in `[current_era - history_depth; current_era]`. - /// - /// Must be more than the number of eras delayed by session otherwise. I.e. active era must - /// always be in history. I.e. `active_era > current_era - history_depth` must be - /// guaranteed. - HistoryDepth get(fn history_depth) config(): u32 = 84; - - /// The ideal number of staking participants. - pub ValidatorCount get(fn validator_count) config(): u32; - - /// Minimum number of staking participants before emergency conditions are imposed. - pub MinimumValidatorCount get(fn minimum_validator_count) config(): u32; - - /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're - /// easy to initialize and the performance hit is minimal (we expect no more than four - /// invulnerables) and restricted to testnets. - pub Invulnerables get(fn invulnerables) config(): Vec; - - /// Map from all locked "stash" accounts to the controller account. - pub Bonded get(fn bonded): map hasher(twox_64_concat) T::AccountId => Option; - - /// Map from all (unlocked) "controller" accounts to the info regarding the staking. - pub Ledger get(fn ledger): - map hasher(blake2_128_concat) T::AccountId - => Option>>; - - /// Where the reward payment should be made. Keyed by stash. - pub Payee get(fn payee): map hasher(twox_64_concat) T::AccountId => RewardDestination; - - /// The map from (wannabe) validator stash key to the preferences of that validator. - pub Validators get(fn validators): - map hasher(twox_64_concat) T::AccountId => ValidatorPrefs; - - /// The map from nominator stash key to the set of stash keys of all validators to nominate. - pub Nominators get(fn nominators): - map hasher(twox_64_concat) T::AccountId => Option>; - - /// The current era index. - /// - /// This is the latest planned era, depending on how the Session pallet queues the validator - /// set, it might be active or not. - pub CurrentEra get(fn current_era): Option; - - /// The active era information, it holds index and start. - /// - /// The active era is the era currently rewarded. - /// Validator set of this era must be equal to `SessionInterface::validators`. - pub ActiveEra get(fn active_era): Option; - - /// The session index at which the era start for the last `HISTORY_DEPTH` eras. - pub ErasStartSessionIndex get(fn eras_start_session_index): - map hasher(twox_64_concat) EraIndex => Option; - - /// Exposure of validator at era. - /// - /// This is keyed first by the era index to allow bulk deletion and then the stash account. - /// - /// Is it removed after `HISTORY_DEPTH` eras. - /// If stakers hasn't been set or has been removed then empty exposure is returned. - pub ErasStakers get(fn eras_stakers): - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Exposure>; - - /// Clipped Exposure of validator at era. - /// - /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the - /// `T::MaxNominatorRewardedPerValidator` biggest stakers. - /// (Note: the field `total` and `own` of the exposure remains unchanged). - /// This is used to limit the i/o cost for the nominator payout. - /// - /// This is keyed fist by the era index to allow bulk deletion and then the stash account. - /// - /// Is it removed after `HISTORY_DEPTH` eras. - /// If stakers hasn't been set or has been removed then empty exposure is returned. - pub ErasStakersClipped get(fn eras_stakers_clipped): - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Exposure>; - - /// Similar to `ErasStakers`, this holds the preferences of validators. - /// - /// This is keyed first by the era index to allow bulk deletion and then the stash account. - /// - /// Is it removed after `HISTORY_DEPTH` eras. - // If prefs hasn't been set or has been removed then 0 commission is returned. - pub ErasValidatorPrefs get(fn eras_validator_prefs): - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => ValidatorPrefs; - - /// The total validator era payout for the last `HISTORY_DEPTH` eras. - /// - /// Eras that haven't finished yet or has been removed doesn't have reward. - pub ErasValidatorReward get(fn eras_validator_reward): - map hasher(twox_64_concat) EraIndex => Option>; - - /// Rewards for the last `HISTORY_DEPTH` eras. - /// If reward hasn't been set or has been removed then 0 reward is returned. - pub ErasRewardPoints get(fn eras_reward_points): - map hasher(twox_64_concat) EraIndex => EraRewardPoints; - - /// The total amount staked for the last `HISTORY_DEPTH` eras. - /// If total hasn't been set or has been removed then 0 stake is returned. - pub ErasTotalStake get(fn eras_total_stake): - map hasher(twox_64_concat) EraIndex => BalanceOf; - - /// Mode of era forcing. - pub ForceEra get(fn force_era) config(): Forcing; - - /// The percentage of the slash that is distributed to reporters. - /// - /// The rest of the slashed value is handled by the `Slash`. - pub SlashRewardFraction get(fn slash_reward_fraction) config(): Perbill; - - /// The amount of currency given to reporters of a slash event which was - /// canceled by extraordinary circumstances (e.g. governance). - pub CanceledSlashPayout get(fn canceled_payout) config(): BalanceOf; - - /// All unapplied slashes that are queued for later. - pub UnappliedSlashes: - map hasher(twox_64_concat) EraIndex => Vec>>; - - /// A mapping from still-bonded eras to the first session index of that era. - /// - /// Must contains information for eras for the range: - /// `[active_era - bounding_duration; active_era]` - BondedEras: Vec<(EraIndex, SessionIndex)>; - - /// All slashing events on validators, mapped by era to the highest slash proportion - /// and slash value of the era. - ValidatorSlashInEra: - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Option<(Perbill, BalanceOf)>; - - /// All slashing events on nominators, mapped by era to the highest slash value of the era. - NominatorSlashInEra: - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Option>; - - /// Slashing spans for stash accounts. - SlashingSpans get(fn slashing_spans): map hasher(twox_64_concat) T::AccountId => Option; - - /// Records information about the maximum slash of a stash within a slashing span, - /// as well as how much reward has been paid out. - SpanSlash: - map hasher(twox_64_concat) (T::AccountId, slashing::SpanIndex) - => slashing::SpanRecord>; - - /// The earliest era for which we have a pending, unapplied slash. - EarliestUnappliedSlash: Option; - - /// Snapshot of validators at the beginning of the current election window. This should only - /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. - pub SnapshotValidators get(fn snapshot_validators): Option>; - - /// Snapshot of nominators at the beginning of the current election window. This should only - /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. - pub SnapshotNominators get(fn snapshot_nominators): Option>; - - /// The next validator set. At the end of an era, if this is available (potentially from the - /// result of an offchain worker), it is immediately used. Otherwise, the on-chain election - /// is executed. - pub QueuedElected get(fn queued_elected): Option>>; - - /// The score of the current [`QueuedElected`]. - pub QueuedScore get(fn queued_score): Option; - - /// Flag to control the execution of the offchain election. When `Open(_)`, we accept - /// solutions to be submitted. - pub EraElectionStatus get(fn era_election_status): ElectionStatus; - - /// True if the current **planned** session is final. Note that this does not take era - /// forcing into account. - pub IsCurrentSessionFinal get(fn is_current_session_final): bool = false; +/// A `Convert` implementation that finds the stash of the given controller account, +/// if any. +pub struct StashOf(sp_std::marker::PhantomData); - /// True if network has been upgraded to this version. - /// Storage version of the pallet. - /// - /// This is set to v3.0.0 for new networks. - StorageVersion build(|_: &GenesisConfig| Releases::V4_0_0): Releases; - } - add_extra_genesis { - config(stakers): - Vec<(T::AccountId, T::AccountId, BalanceOf, StakerStatus)>; - build(|config: &GenesisConfig| { - for &(ref stash, ref controller, balance, ref status) in &config.stakers { - assert!( - T::Currency::free_balance(&stash) >= balance, - "Stash does not have enough balance to bond." - ); - let _ = >::bond( - T::Origin::from(Some(stash.clone()).into()), - T::Lookup::unlookup(controller.clone()), - balance, - RewardDestination::Staked, - ); - let _ = match status { - StakerStatus::Validator => { - >::validate( - T::Origin::from(Some(controller.clone()).into()), - Default::default(), - ) - }, - StakerStatus::Nominator(votes) => { - >::nominate( - T::Origin::from(Some(controller.clone()).into()), - votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), - ) - }, _ => Ok(()) - }; - } - }); +impl Convert> for StashOf { + fn convert(controller: T::AccountId) -> Option { + >::ledger(&controller).map(|l| l.stash) } } -decl_event!( - pub enum Event where Balance = BalanceOf, ::AccountId { - /// The era payout has been set; the first balance is the validator-payout; the second is - /// the remainder from the maximum amount of reward. - /// \[era_index, validator_payout, remainder\] - EraPayout(EraIndex, Balance, Balance), - /// The staker has been rewarded by this amount. \[stash, amount\] - Reward(AccountId, Balance), - /// One validator (and its nominators) has been slashed by the given amount. - /// \[validator, amount\] - Slash(AccountId, Balance), - /// An old slashing report from a prior era was discarded because it could - /// not be processed. \[session_index\] - OldSlashingReportDiscarded(SessionIndex), - /// A new set of stakers was elected with the given \[compute\]. - StakingElection(ElectionCompute), - /// A new solution for the upcoming election has been stored. \[compute\] - SolutionStored(ElectionCompute), - /// An account has bonded this amount. \[stash, amount\] - /// - /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, - /// it will not be emitted for staking rewards when they are added to stake. - Bonded(AccountId, Balance), - /// An account has unbonded this amount. \[stash, amount\] - Unbonded(AccountId, Balance), - /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` - /// from the unlocking queue. \[stash, amount\] - Withdrawn(AccountId, Balance), - } -); +/// A typed conversion from stash account ID to the active exposure of nominators +/// on that account. +/// +/// Active exposure is the exposure of the validator set currently validating, i.e. in +/// `active_era`. It can differ from the latest planned exposure in `current_era`. +pub struct ExposureOf(sp_std::marker::PhantomData); -decl_error! { - /// Error for the staking module. - pub enum Error for Module { - /// Not a controller account. - NotController, - /// Not a stash account. - NotStash, - /// Stash is already bonded. - AlreadyBonded, - /// Controller is already paired. - AlreadyPaired, - /// Targets cannot be empty. - EmptyTargets, - /// Duplicate index. - DuplicateIndex, - /// Slash record index out of bounds. - InvalidSlashIndex, - /// Can not bond with value less than minimum balance. - InsufficientValue, - /// Can not schedule more unlock chunks. - NoMoreChunks, - /// Can not rebond without unlocking chunks. - NoUnlockChunk, - /// Attempting to target a stash that still has funds. - FundedTarget, - /// Invalid era to reward. - InvalidEraToReward, - /// Invalid number of nominations. - InvalidNumberOfNominations, - /// Items are not sorted and unique. - NotSortedAndUnique, - /// Rewards for this era have already been claimed for this validator. - AlreadyClaimed, - /// The submitted result is received out of the open window. - OffchainElectionEarlySubmission, - /// The submitted result is not as good as the one stored on chain. - OffchainElectionWeakSubmission, - /// The snapshot data of the current window is missing. - SnapshotUnavailable, - /// Incorrect number of winners were presented. - OffchainElectionBogusWinnerCount, - /// One of the submitted winners is not an active candidate on chain (index is out of range - /// in snapshot). - OffchainElectionBogusWinner, - /// Error while building the assignment type from the compact. This can happen if an index - /// is invalid, or if the weights _overflow_. - OffchainElectionBogusCompact, - /// One of the submitted nominators is not an active nominator on chain. - OffchainElectionBogusNominator, - /// One of the submitted nominators has an edge to which they have not voted on chain. - OffchainElectionBogusNomination, - /// One of the submitted nominators has an edge which is submitted before the last non-zero - /// slash of the target. - OffchainElectionSlashedNomination, - /// A self vote must only be originated from a validator to ONLY themselves. - OffchainElectionBogusSelfVote, - /// The submitted result has unknown edges that are not among the presented winners. - OffchainElectionBogusEdge, - /// The claimed score does not match with the one computed from the data. - OffchainElectionBogusScore, - /// The election size is invalid. - OffchainElectionBogusElectionSize, - /// The call is not allowed at the given time due to restrictions of election period. - CallNotAllowed, - /// Incorrect previous history depth input provided. - IncorrectHistoryDepth, - /// Incorrect number of slashing spans provided. - IncorrectSlashingSpans, +impl Convert>>> + for ExposureOf +{ + fn convert(validator: T::AccountId) -> Option>> { + >::active_era() + .map(|active_era| >::eras_stakers(active_era.index, &validator)) } } -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Number of sessions per era. - const SessionsPerEra: SessionIndex = T::SessionsPerEra::get(); - - /// Number of eras that staked funds must remain bonded for. - const BondingDuration: EraIndex = T::BondingDuration::get(); - - /// Number of eras that slashes are deferred by, after computation. - /// - /// This should be less than the bonding duration. - /// Set to 0 if slashes should be applied immediately, without opportunity for - /// intervention. - const SlashDeferDuration: EraIndex = T::SlashDeferDuration::get(); - - /// The number of blocks before the end of the era from which election submissions are allowed. - /// - /// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will - /// be used. - /// - /// This is bounded by being within the last session. Hence, setting it to a value more than the - /// length of a session will be pointless. - const ElectionLookahead: T::BlockNumber = T::ElectionLookahead::get(); - - /// Maximum number of balancing iterations to run in the offchain submission. - /// - /// If set to 0, balance_solution will not be executed at all. - const MaxIterations: u32 = T::MaxIterations::get(); - - /// The threshold of improvement that should be provided for a new solution to be accepted. - const MinSolutionScoreBump: Perbill = T::MinSolutionScoreBump::get(); - - /// The maximum number of nominators rewarded for each validator. - /// - /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim - /// their reward. This used to limit the i/o cost for the nominator payout. - const MaxNominatorRewardedPerValidator: u32 = T::MaxNominatorRewardedPerValidator::get(); - - type Error = Error; - - fn deposit_event() = default; - - /// sets `ElectionStatus` to `Open(now)` where `now` is the block number at which the - /// election window has opened, if we are at the last session and less blocks than - /// `T::ElectionLookahead` is remaining until the next new session schedule. The offchain - /// worker, if applicable, will execute at the end of the current block, and solutions may - /// be submitted. - fn on_initialize(now: T::BlockNumber) -> Weight { - let mut consumed_weight = 0; - let mut add_weight = |reads, writes, weight| { - consumed_weight += T::DbWeight::get().reads_writes(reads, writes); - consumed_weight += weight; - }; - - if - // if we don't have any ongoing offchain compute. - Self::era_election_status().is_closed() && - // either current session final based on the plan, or we're forcing. - (Self::is_current_session_final() || Self::will_era_be_forced()) - { - if let Some(next_session_change) = T::NextNewSession::estimate_next_new_session(now) { - if let Some(remaining) = next_session_change.checked_sub(&now) { - if remaining <= T::ElectionLookahead::get() && !remaining.is_zero() { - // create snapshot. - let (did_snapshot, snapshot_weight) = Self::create_stakers_snapshot(); - add_weight(0, 0, snapshot_weight); - if did_snapshot { - // Set the flag to make sure we don't waste any compute here in the same era - // after we have triggered the offline compute. - >::put( - ElectionStatus::::Open(now) - ); - add_weight(0, 1, 0); - log!(info, "💸 Election window is Open({:?}). Snapshot created", now); - } else { - log!(warn, "💸 Failed to create snapshot at {:?}.", now); - } - } - } - } else { - log!(warn, "💸 Estimating next session change failed."); - } - add_weight(0, 0, T::NextNewSession::weight(now)) - } - // For `era_election_status`, `is_current_session_final`, `will_era_be_forced` - add_weight(3, 0, 0); - // Additional read from `on_finalize` - add_weight(1, 0, 0); - consumed_weight - } - - /// Check if the current block number is the one at which the election window has been set - /// to open. If so, it runs the offchain worker code. - fn offchain_worker(now: T::BlockNumber) { - use offchain_election::{set_check_offchain_execution_status, compute_offchain_election}; - - if Self::era_election_status().is_open_at(now) { - let offchain_status = set_check_offchain_execution_status::(now); - if let Err(why) = offchain_status { - log!(warn, "💸 skipping offchain worker in open election window due to [{}]", why); - } else { - if let Err(e) = compute_offchain_election::() { - log!(error, "💸 Error in election offchain worker: {:?}", e); - } else { - log!(debug, "💸 Executed offchain worker thread without errors."); - } - } - } - } - - fn on_finalize() { - // Set the start of the first era. - if let Some(mut active_era) = Self::active_era() { - if active_era.start.is_none() { - let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); - active_era.start = Some(now_as_millis_u64); - // This write only ever happens once, we don't include it in the weight in general - ActiveEra::put(active_era); - } - } - // `on_finalize` weight is tracked in `on_initialize` - } - - fn integrity_test() { - sp_io::TestExternalities::new_empty().execute_with(|| - assert!( - T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0, - "As per documentation, slash defer duration ({}) should be less than bonding duration ({}).", - T::SlashDeferDuration::get(), - T::BondingDuration::get(), - ) - ); - - use sp_runtime::UpperOf; - // see the documentation of `Assignment::try_normalize`. Now we can ensure that this - // will always return `Ok`. - // 1. Maximum sum of Vec must fit into `UpperOf`. - assert!( - >>::try_into(MAX_NOMINATIONS) - .unwrap() - .checked_mul(::one().deconstruct().try_into().unwrap()) - .is_some() - ); - - // 2. Maximum sum of Vec must fit into `UpperOf`. - assert!( - >>::try_into(MAX_NOMINATIONS) - .unwrap() - .checked_mul(::one().deconstruct().try_into().unwrap()) - .is_some() - ); - } - - /// Take the origin account as a stash and lock up `value` of its balance. `controller` will - /// be the account that controls it. - /// - /// `value` must be more than the `minimum_balance` specified by `T::Currency`. - /// - /// The dispatch origin for this call must be _Signed_ by the stash account. - /// - /// Emits `Bonded`. - /// - /// # - /// - Independent of the arguments. Moderate complexity. - /// - O(1). - /// - Three extra DB entries. - /// - /// NOTE: Two of the storage writes (`Self::bonded`, `Self::payee`) are _never_ cleaned - /// unless the `origin` falls below _existential deposit_ and gets removed as dust. - /// ------------------ - /// Weight: O(1) - /// DB Weight: - /// - Read: Bonded, Ledger, [Origin Account], Current Era, History Depth, Locks - /// - Write: Bonded, Payee, [Origin Account], Locks, Ledger - /// # - #[weight = T::WeightInfo::bond()] - pub fn bond(origin, - controller: ::Source, - #[compact] value: BalanceOf, - payee: RewardDestination, - ) { - let stash = ensure_signed(origin)?; - - if >::contains_key(&stash) { - Err(Error::::AlreadyBonded)? - } - - let controller = T::Lookup::lookup(controller)?; - - if >::contains_key(&controller) { - Err(Error::::AlreadyPaired)? - } - - // reject a bond which is considered to be _dust_. - if value < T::Currency::minimum_balance() { - Err(Error::::InsufficientValue)? - } - - // You're auto-bonded forever, here. We might improve this by only bonding when - // you actually validate/nominate and remove once you unbond __everything__. - >::insert(&stash, &controller); - >::insert(&stash, payee); - - system::Module::::inc_ref(&stash); - - let current_era = CurrentEra::get().unwrap_or(0); - let history_depth = Self::history_depth(); - let last_reward_era = current_era.saturating_sub(history_depth); - - let stash_balance = T::Currency::free_balance(&stash); - let value = value.min(stash_balance); - Self::deposit_event(RawEvent::Bonded(stash.clone(), value)); - let item = StakingLedger { - stash, - total: value, - active: value, - unlocking: vec![], - claimed_rewards: (last_reward_era..current_era).collect(), - }; - Self::update_ledger(&controller, &item); - } - - /// Add some extra amount that have appeared in the stash `free_balance` into the balance up - /// for staking. - /// - /// Use this if there are additional funds in your stash account that you wish to bond. - /// Unlike [`bond`] or [`unbond`] this function does not impose any limitation on the amount - /// that can be added. - /// - /// The dispatch origin for this call must be _Signed_ by the stash, not the controller and - /// it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// Emits `Bonded`. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - O(1). - /// - One DB entry. - /// ------------ - /// DB Weight: - /// - Read: Era Election Status, Bonded, Ledger, [Origin Account], Locks - /// - Write: [Origin Account], Locks, Ledger - /// # - #[weight = T::WeightInfo::bond_extra()] - fn bond_extra(origin, #[compact] max_additional: BalanceOf) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let stash = ensure_signed(origin)?; - - let controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; - let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - - let stash_balance = T::Currency::free_balance(&stash); - - if let Some(extra) = stash_balance.checked_sub(&ledger.total) { - let extra = extra.min(max_additional); - ledger.total += extra; - ledger.active += extra; - Self::deposit_event(RawEvent::Bonded(stash, extra)); - Self::update_ledger(&controller, &ledger); - } - } - - /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond - /// period ends. If this leaves an amount actively bonded less than - /// T::Currency::minimum_balance(), then it is increased to the full amount. - /// - /// Once the unlock period is done, you can call `withdraw_unbonded` to actually move - /// the funds out of management ready for transfer. - /// - /// No more than a limited number of unlocking chunks (see `MAX_UNLOCKING_CHUNKS`) - /// can co-exists at the same time. In that case, [`Call::withdraw_unbonded`] need - /// to be called first to remove some of the chunks (if possible). - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// Emits `Unbonded`. - /// - /// See also [`Call::withdraw_unbonded`]. - /// - /// # - /// - Independent of the arguments. Limited but potentially exploitable complexity. - /// - Contains a limited number of reads. - /// - Each call (requires the remainder of the bonded balance to be above `minimum_balance`) - /// will cause a new entry to be inserted into a vector (`Ledger.unlocking`) kept in storage. - /// The only way to clean the aforementioned storage item is also user-controlled via - /// `withdraw_unbonded`. - /// - One DB entry. - /// ---------- - /// Weight: O(1) - /// DB Weight: - /// - Read: EraElectionStatus, Ledger, CurrentEra, Locks, BalanceOf Stash, - /// - Write: Locks, Ledger, BalanceOf Stash, - /// - #[weight = T::WeightInfo::unbond()] - fn unbond(origin, #[compact] value: BalanceOf) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - ensure!( - ledger.unlocking.len() < MAX_UNLOCKING_CHUNKS, - Error::::NoMoreChunks, - ); - - let mut value = value.min(ledger.active); - - if !value.is_zero() { - ledger.active -= value; - - // Avoid there being a dust balance left in the staking system. - if ledger.active < T::Currency::minimum_balance() { - value += ledger.active; - ledger.active = Zero::zero(); - } - - // Note: in case there is no current era it is fine to bond one era more. - let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); - ledger.unlocking.push(UnlockChunk { value, era }); - Self::update_ledger(&controller, &ledger); - Self::deposit_event(RawEvent::Unbonded(ledger.stash, value)); - } - } - - /// Remove any unlocked chunks from the `unlocking` queue from our management. - /// - /// This essentially frees up that balance to be used by the stash account to do - /// whatever it wants. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// Emits `Withdrawn`. - /// - /// See also [`Call::unbond`]. - /// - /// # - /// - Could be dependent on the `origin` argument and how much `unlocking` chunks exist. - /// It implies `consolidate_unlocked` which loops over `Ledger.unlocking`, which is - /// indirectly user-controlled. See [`unbond`] for more detail. - /// - Contains a limited number of reads, yet the size of which could be large based on `ledger`. - /// - Writes are limited to the `origin` account key. - /// --------------- - /// Complexity O(S) where S is the number of slashing spans to remove - /// Update: - /// - Reads: EraElectionStatus, Ledger, Current Era, Locks, [Origin Account] - /// - Writes: [Origin Account], Locks, Ledger - /// Kill: - /// - Reads: EraElectionStatus, Ledger, Current Era, Bonded, Slashing Spans, [Origin - /// Account], Locks, BalanceOf stash - /// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, - /// [Origin Account], Locks, BalanceOf stash. - /// - Writes Each: SpanSlash * S - /// NOTE: Weight annotation is the kill scenario, we refund otherwise. - /// # - #[weight = T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans)] - fn withdraw_unbonded(origin, num_slashing_spans: u32) -> DispatchResultWithPostInfo { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let (stash, old_total) = (ledger.stash.clone(), ledger.total); - if let Some(current_era) = Self::current_era() { - ledger = ledger.consolidate_unlocked(current_era) - } - - let post_info_weight = if ledger.unlocking.is_empty() && ledger.active.is_zero() { - // This account must have called `unbond()` with some value that caused the active - // portion to fall below existential deposit + will have no more unlocking chunks - // left. We can now safely remove all staking-related information. - Self::kill_stash(&stash, num_slashing_spans)?; - // remove the lock. - T::Currency::remove_lock(STAKING_ID, &stash); - // This is worst case scenario, so we use the full weight and return None - None - } else { - // This was the consequence of a partial unbond. just update the ledger and move on. - Self::update_ledger(&controller, &ledger); - - // This is only an update, so we use less overall weight. - Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) - }; - - // `old_total` should never be less than the new total because - // `consolidate_unlocked` strictly subtracts balance. - if ledger.total < old_total { - // Already checked that this won't overflow by entry condition. - let value = old_total - ledger.total; - Self::deposit_event(RawEvent::Withdrawn(stash, value)); - } - - Ok(post_info_weight.into()) - } - - /// Declare the desire to validate for the origin controller. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - Contains a limited number of reads. - /// - Writes are limited to the `origin` account key. - /// ----------- - /// Weight: O(1) - /// DB Weight: - /// - Read: Era Election Status, Ledger - /// - Write: Nominators, Validators - /// # - #[weight = T::WeightInfo::validate()] - pub fn validate(origin, prefs: ValidatorPrefs) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let stash = &ledger.stash; - >::remove(stash); - >::insert(stash, prefs); - } - - /// Declare the desire to nominate `targets` for the origin controller. - /// - /// Effects will be felt at the beginning of the next era. This can only be called when - /// [`EraElectionStatus`] is `Closed`. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - The transaction's complexity is proportional to the size of `targets` (N) - /// which is capped at CompactAssignments::LIMIT (MAX_NOMINATIONS). - /// - Both the reads and writes follow a similar pattern. - /// --------- - /// Weight: O(N) - /// where N is the number of targets - /// DB Weight: - /// - Reads: Era Election Status, Ledger, Current Era - /// - Writes: Validators, Nominators - /// # - #[weight = T::WeightInfo::nominate(targets.len() as u32)] - pub fn nominate(origin, targets: Vec<::Source>) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let stash = &ledger.stash; - ensure!(!targets.is_empty(), Error::::EmptyTargets); - let targets = targets.into_iter() - .take(MAX_NOMINATIONS) - .map(|t| T::Lookup::lookup(t)) - .collect::, _>>()?; - - let nominations = Nominations { - targets, - // initial nominations are considered submitted at era 0. See `Nominations` doc - submitted_in: Self::current_era().unwrap_or(0), - suppressed: false, - }; - - >::remove(stash); - >::insert(stash, &nominations); - } - - /// Declare no desire to either validate or nominate. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - Contains one read. - /// - Writes are limited to the `origin` account key. - /// -------- - /// Weight: O(1) - /// DB Weight: - /// - Read: EraElectionStatus, Ledger - /// - Write: Validators, Nominators - /// # - #[weight = T::WeightInfo::chill()] - fn chill(origin) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - Self::chill_stash(&ledger.stash); - } - - /// (Re-)set the payment target for a controller. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - Contains a limited number of reads. - /// - Writes are limited to the `origin` account key. - /// --------- - /// - Weight: O(1) - /// - DB Weight: - /// - Read: Ledger - /// - Write: Payee - /// # - #[weight = T::WeightInfo::set_payee()] - fn set_payee(origin, payee: RewardDestination) { - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let stash = &ledger.stash; - >::insert(stash, payee); - } - - /// (Re-)set the controller of a stash. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - Contains a limited number of reads. - /// - Writes are limited to the `origin` account key. - /// ---------- - /// Weight: O(1) - /// DB Weight: - /// - Read: Bonded, Ledger New Controller, Ledger Old Controller - /// - Write: Bonded, Ledger New Controller, Ledger Old Controller - /// # - #[weight = T::WeightInfo::set_controller()] - fn set_controller(origin, controller: ::Source) { - let stash = ensure_signed(origin)?; - let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; - let controller = T::Lookup::lookup(controller)?; - if >::contains_key(&controller) { - Err(Error::::AlreadyPaired)? - } - if controller != old_controller { - >::insert(&stash, &controller); - if let Some(l) = >::take(&old_controller) { - >::insert(&controller, l); - } - } - } - - /// Sets the ideal number of validators. - /// - /// The dispatch origin must be Root. - /// - /// # - /// Weight: O(1) - /// Write: Validator Count - /// # - #[weight = T::WeightInfo::set_validator_count()] - fn set_validator_count(origin, #[compact] new: u32) { - ensure_root(origin)?; - ValidatorCount::put(new); - } - - /// Increments the ideal number of validators. - /// - /// The dispatch origin must be Root. - /// - /// # - /// Same as [`set_validator_count`]. - /// # - #[weight = T::WeightInfo::set_validator_count()] - fn increase_validator_count(origin, #[compact] additional: u32) { - ensure_root(origin)?; - ValidatorCount::mutate(|n| *n += additional); - } - - /// Scale up the ideal number of validators by a factor. - /// - /// The dispatch origin must be Root. - /// - /// # - /// Same as [`set_validator_count`]. - /// # - #[weight = T::WeightInfo::set_validator_count()] - fn scale_validator_count(origin, factor: Percent) { - ensure_root(origin)?; - ValidatorCount::mutate(|n| *n += factor * *n); - } - - /// Force there to be no new eras indefinitely. - /// - /// The dispatch origin must be Root. - /// - /// # - /// - No arguments. - /// - Weight: O(1) - /// - Write: ForceEra - /// # - #[weight = T::WeightInfo::force_no_eras()] - fn force_no_eras(origin) { - ensure_root(origin)?; - ForceEra::put(Forcing::ForceNone); - } - - /// Force there to be a new era at the end of the next session. After this, it will be - /// reset to normal (non-forced) behaviour. - /// - /// The dispatch origin must be Root. - /// - /// # - /// - No arguments. - /// - Weight: O(1) - /// - Write ForceEra - /// # - #[weight = T::WeightInfo::force_new_era()] - fn force_new_era(origin) { - ensure_root(origin)?; - ForceEra::put(Forcing::ForceNew); - } - - /// Set the validators who cannot be slashed (if any). - /// - /// The dispatch origin must be Root. - /// - /// # - /// - O(V) - /// - Write: Invulnerables - /// # - #[weight = T::WeightInfo::set_invulnerables(invulnerables.len() as u32)] - fn set_invulnerables(origin, invulnerables: Vec) { - ensure_root(origin)?; - >::put(invulnerables); - } - - /// Force a current staker to become completely unstaked, immediately. - /// - /// The dispatch origin must be Root. - /// - /// # - /// O(S) where S is the number of slashing spans to be removed - /// Reads: Bonded, Slashing Spans, Account, Locks - /// Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, Account, Locks - /// Writes Each: SpanSlash * S - /// # - #[weight = T::WeightInfo::force_unstake(*num_slashing_spans)] - fn force_unstake(origin, stash: T::AccountId, num_slashing_spans: u32) { - ensure_root(origin)?; +/// Filter historical offences out and only allow those from the bonding period. +pub struct FilterHistoricalOffences { + _inner: sp_std::marker::PhantomData<(T, R)>, +} - // remove all staking-related information. - Self::kill_stash(&stash, num_slashing_spans)?; +impl ReportOffence + for FilterHistoricalOffences, R> +where + T: Config, + R: ReportOffence, + O: Offence, +{ + fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { + // Disallow any slashing from before the current bonding period. + let offence_session = offence.session_index(); + let bonded_eras = BondedEras::::get(); - // remove the lock. - T::Currency::remove_lock(STAKING_ID, &stash); + if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() { + R::report_offence(reporters, offence) + } else { + >::deposit_event(Event::::OldSlashingReportDiscarded(offence_session)); + Ok(()) } - - /// Force there to be a new era at the end of sessions indefinitely. - /// - /// The dispatch origin must be Root. - /// - /// # - /// - Weight: O(1) - /// - Write: ForceEra - /// # - #[weight = T::WeightInfo::force_new_era_always()] - fn force_new_era_always(origin) { - ensure_root(origin)?; - ForceEra::put(Forcing::ForceAlways); - } - - /// Cancel enactment of a deferred slash. - /// - /// Can be called by the `T::SlashCancelOrigin`. - /// - /// Parameters: era and indices of the slashes for that era to kill. - /// - /// # - /// Complexity: O(U + S) - /// with U unapplied slashes weighted with U=1000 - /// and S is the number of slash indices to be canceled. - /// - Read: Unapplied Slashes - /// - Write: Unapplied Slashes - /// # - #[weight = T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32)] - fn cancel_deferred_slash(origin, era: EraIndex, slash_indices: Vec) { - T::SlashCancelOrigin::ensure_origin(origin)?; - - ensure!(!slash_indices.is_empty(), Error::::EmptyTargets); - ensure!(is_sorted_and_unique(&slash_indices), Error::::NotSortedAndUnique); - - let mut unapplied = ::UnappliedSlashes::get(&era); - let last_item = slash_indices[slash_indices.len() - 1]; - ensure!((last_item as usize) < unapplied.len(), Error::::InvalidSlashIndex); - - for (removed, index) in slash_indices.into_iter().enumerate() { - let index = (index as usize) - removed; - unapplied.remove(index); - } - - ::UnappliedSlashes::insert(&era, &unapplied); - } - - /// Pay out all the stakers behind a single validator for a single era. - /// - /// - `validator_stash` is the stash account of the validator. Their nominators, up to - /// `T::MaxNominatorRewardedPerValidator`, will also receive their rewards. - /// - `era` may be any era between `[current_era - history_depth; current_era]`. - /// - /// The origin of this call must be _Signed_. Any account can call this function, even if - /// it is not one of the stakers. - /// - /// This can only be called when [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - Time complexity: at most O(MaxNominatorRewardedPerValidator). - /// - Contains a limited number of reads and writes. - /// ----------- - /// N is the Number of payouts for the validator (including the validator) - /// Weight: - /// - Reward Destination Staked: O(N) - /// - Reward Destination Controller (Creating): O(N) - /// DB Weight: - /// - Read: EraElectionStatus, CurrentEra, HistoryDepth, ErasValidatorReward, - /// ErasStakersClipped, ErasRewardPoints, ErasValidatorPrefs (8 items) - /// - Read Each: Bonded, Ledger, Payee, Locks, System Account (5 items) - /// - Write Each: System Account, Locks, Ledger (3 items) - /// - /// NOTE: weights are assuming that payouts are made to alive stash account (Staked). - /// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here. - /// # - #[weight = T::WeightInfo::payout_stakers_alive_staked(T::MaxNominatorRewardedPerValidator::get())] - fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - ensure_signed(origin)?; - Self::do_payout_stakers(validator_stash, era) - } - - /// Rebond a portion of the stash scheduled to be unlocked. - /// - /// The dispatch origin must be signed by the controller, and it can be only called when - /// [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - Time complexity: O(L), where L is unlocking chunks - /// - Bounded by `MAX_UNLOCKING_CHUNKS`. - /// - Storage changes: Can't increase storage, only decrease it. - /// --------------- - /// - DB Weight: - /// - Reads: EraElectionStatus, Ledger, Locks, [Origin Account] - /// - Writes: [Origin Account], Locks, Ledger - /// # - #[weight = T::WeightInfo::rebond(MAX_UNLOCKING_CHUNKS as u32)] - fn rebond(origin, #[compact] value: BalanceOf) -> DispatchResultWithPostInfo { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); - - let ledger = ledger.rebond(value); - Self::update_ledger(&controller, &ledger); - Ok(Some( - 35 * WEIGHT_PER_MICROS - + 50 * WEIGHT_PER_NANOS * (ledger.unlocking.len() as Weight) - + T::DbWeight::get().reads_writes(3, 2) - ).into()) - } - - /// Set `HistoryDepth` value. This function will delete any history information - /// when `HistoryDepth` is reduced. - /// - /// Parameters: - /// - `new_history_depth`: The new history depth you would like to set. - /// - `era_items_deleted`: The number of items that will be deleted by this dispatch. - /// This should report all the storage items that will be deleted by clearing old - /// era history. Needed to report an accurate weight for the dispatch. Trusted by - /// `Root` to report an accurate number. - /// - /// Origin must be root. - /// - /// # - /// - E: Number of history depths removed, i.e. 10 -> 7 = 3 - /// - Weight: O(E) - /// - DB Weight: - /// - Reads: Current Era, History Depth - /// - Writes: History Depth - /// - Clear Prefix Each: Era Stakers, EraStakersClipped, ErasValidatorPrefs - /// - Writes Each: ErasValidatorReward, ErasRewardPoints, ErasTotalStake, ErasStartSessionIndex - /// # - #[weight = T::WeightInfo::set_history_depth(*_era_items_deleted)] - fn set_history_depth(origin, - #[compact] new_history_depth: EraIndex, - #[compact] _era_items_deleted: u32, - ) { - ensure_root(origin)?; - if let Some(current_era) = Self::current_era() { - HistoryDepth::mutate(|history_depth| { - let last_kept = current_era.checked_sub(*history_depth).unwrap_or(0); - let new_last_kept = current_era.checked_sub(new_history_depth).unwrap_or(0); - for era_index in last_kept..new_last_kept { - Self::clear_era_information(era_index); - } - *history_depth = new_history_depth - }) - } - } - - /// Remove all data structure concerning a staker/stash once its balance is zero. - /// This is essentially equivalent to `withdraw_unbonded` except it can be called by anyone - /// and the target `stash` must have no funds left. - /// - /// This can be called from any origin. - /// - /// - `stash`: The stash account to reap. Its balance must be zero. - /// - /// # - /// Complexity: O(S) where S is the number of slashing spans on the account. - /// DB Weight: - /// - Reads: Stash Account, Bonded, Slashing Spans, Locks - /// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, Stash Account, Locks - /// - Writes Each: SpanSlash * S - /// # - #[weight = T::WeightInfo::reap_stash(*num_slashing_spans)] - fn reap_stash(_origin, stash: T::AccountId, num_slashing_spans: u32) { - ensure!(T::Currency::total_balance(&stash).is_zero(), Error::::FundedTarget); - Self::kill_stash(&stash, num_slashing_spans)?; - T::Currency::remove_lock(STAKING_ID, &stash); - } - - /// Submit an election result to the chain. If the solution: - /// - /// 1. is valid. - /// 2. has a better score than a potentially existing solution on chain. - /// - /// then, it will be _put_ on chain. - /// - /// A solution consists of two pieces of data: - /// - /// 1. `winners`: a flat vector of all the winners of the round. - /// 2. `assignments`: the compact version of an assignment vector that encodes the edge - /// weights. - /// - /// Both of which may be computed using _phragmen_, or any other algorithm. - /// - /// Additionally, the submitter must provide: - /// - /// - The `score` that they claim their solution has. - /// - /// Both validators and nominators will be represented by indices in the solution. The - /// indices should respect the corresponding types ([`ValidatorIndex`] and - /// [`NominatorIndex`]). Moreover, they should be valid when used to index into - /// [`SnapshotValidators`] and [`SnapshotNominators`]. Any invalid index will cause the - /// solution to be rejected. These two storage items are set during the election window and - /// may be used to determine the indices. - /// - /// A solution is valid if: - /// - /// 0. It is submitted when [`EraElectionStatus`] is `Open`. - /// 1. Its claimed score is equal to the score computed on-chain. - /// 2. Presents the correct number of winners. - /// 3. All indexes must be value according to the snapshot vectors. All edge values must - /// also be correct and should not overflow the granularity of the ratio type (i.e. 256 - /// or billion). - /// 4. For each edge, all targets are actually nominated by the voter. - /// 5. Has correct self-votes. - /// - /// A solutions score is consisted of 3 parameters: - /// - /// 1. `min { support.total }` for each support of a winner. This value should be maximized. - /// 2. `sum { support.total }` for each support of a winner. This value should be minimized. - /// 3. `sum { support.total^2 }` for each support of a winner. This value should be - /// minimized (to ensure less variance) - /// - /// # - /// The transaction is assumed to be the longest path, a better solution. - /// - Initial solution is almost the same. - /// - Worse solution is retraced in pre-dispatch-checks which sets its own weight. - /// # - #[weight = T::WeightInfo::submit_solution_better( - size.validators.into(), - size.nominators.into(), - compact.len() as u32, - winners.len() as u32, - )] - pub fn submit_election_solution( - origin, - winners: Vec, - compact: CompactAssignments, - score: ElectionScore, - era: EraIndex, - size: ElectionSize, - ) -> DispatchResultWithPostInfo { - let _who = ensure_signed(origin)?; - Self::check_and_replace_solution( - winners, - compact, - ElectionCompute::Signed, - score, - era, - size, - ) - } - - /// Unsigned version of `submit_election_solution`. - /// - /// Note that this must pass the [`ValidateUnsigned`] check which only allows transactions - /// from the local node to be included. In other words, only the block author can include a - /// transaction in the block. - /// - /// # - /// See [`submit_election_solution`]. - /// # - #[weight = T::WeightInfo::submit_solution_better( - size.validators.into(), - size.nominators.into(), - compact.len() as u32, - winners.len() as u32, - )] - pub fn submit_election_solution_unsigned( - origin, - winners: Vec, - compact: CompactAssignments, - score: ElectionScore, - era: EraIndex, - size: ElectionSize, - ) -> DispatchResultWithPostInfo { - ensure_none(origin)?; - let adjustments = Self::check_and_replace_solution( - winners, - compact, - ElectionCompute::Unsigned, - score, - era, - size, - ).expect( - "An unsigned solution can only be submitted by validators; A validator should \ - always produce correct solutions, else this block should not be imported, thus \ - effectively depriving the validators from their authoring reward. Hence, this panic - is expected." - ); - - Ok(adjustments) - } - } -} - -impl Module { - /// The total balance that can be slashed from a stash account as of right now. - pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { - // Weight note: consider making the stake accessible through stash. - Self::bonded(stash).and_then(Self::ledger).map(|l| l.active).unwrap_or_default() - } - - /// Internal impl of [`slashable_balance_of`] that returns [`VoteWeight`]. - pub fn slashable_balance_of_vote_weight(stash: &T::AccountId, issuance: BalanceOf) -> VoteWeight { - T::CurrencyToVote::to_vote(Self::slashable_balance_of(stash), issuance) - } - - /// Returns a closure around `slashable_balance_of_vote_weight` that can be passed around. - /// - /// This prevents call sites from repeatedly requesting `total_issuance` from backend. But it is - /// important to be only used while the total issuance is not changing. - pub fn slashable_balance_of_fn() -> Box VoteWeight> { - // NOTE: changing this to unboxed `impl Fn(..)` return type and the module will still - // compile, while some types in mock fail to resolve. - let issuance = T::Currency::total_issuance(); - Box::new(move |who: &T::AccountId| -> VoteWeight { - Self::slashable_balance_of_vote_weight(who, issuance) - }) - } - - /// Dump the list of validators and nominators into vectors and keep them on-chain. - /// - /// This data is used to efficiently evaluate election results. returns `true` if the operation - /// is successful. - pub fn create_stakers_snapshot() -> (bool, Weight) { - let mut consumed_weight = 0; - let mut add_db_reads_writes = |reads, writes| { - consumed_weight += T::DbWeight::get().reads_writes(reads, writes); - }; - let validators = >::iter().map(|(v, _)| v).collect::>(); - let mut nominators = >::iter().map(|(n, _)| n).collect::>(); - - let num_validators = validators.len(); - let num_nominators = nominators.len(); - add_db_reads_writes((num_validators + num_nominators) as Weight, 0); - - if - num_validators > MAX_VALIDATORS || - num_nominators.saturating_add(num_validators) > MAX_NOMINATORS - { - log!( - warn, - "💸 Snapshot size too big [{} <> {}][{} <> {}].", - num_validators, - MAX_VALIDATORS, - num_nominators, - MAX_NOMINATORS, - ); - (false, consumed_weight) - } else { - // all validators nominate themselves; - nominators.extend(validators.clone()); - - >::put(validators); - >::put(nominators); - add_db_reads_writes(0, 2); - (true, consumed_weight) - } - } - - /// Clears both snapshots of stakers. - fn kill_stakers_snapshot() { - >::kill(); - >::kill(); - } - - fn do_payout_stakers( - validator_stash: T::AccountId, - era: EraIndex, - ) -> DispatchResult { - // Validate input data - let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; - ensure!(era <= current_era, Error::::InvalidEraToReward); - let history_depth = Self::history_depth(); - ensure!(era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward); - - // Note: if era has no reward to be claimed, era may be future. better not to update - // `ledger.claimed_rewards` in this case. - let era_payout = >::get(&era) - .ok_or_else(|| Error::::InvalidEraToReward)?; - - let controller = Self::bonded(&validator_stash).ok_or(Error::::NotStash)?; - let mut ledger = >::get(&controller).ok_or_else(|| Error::::NotController)?; - - ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); - match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => Err(Error::::AlreadyClaimed)?, - Err(pos) => ledger.claimed_rewards.insert(pos, era), - } - - let exposure = >::get(&era, &ledger.stash); - - /* Input data seems good, no errors allowed after this point */ - - >::insert(&controller, &ledger); - - // Get Era reward points. It has TOTAL and INDIVIDUAL - // Find the fraction of the era reward that belongs to the validator - // Take that fraction of the eras rewards to split to nominator and validator - // - // Then look at the validator, figure out the proportion of their reward - // which goes to them and each of their nominators. - - let era_reward_points = >::get(&era); - let total_reward_points = era_reward_points.total; - let validator_reward_points = era_reward_points.individual.get(&ledger.stash) - .map(|points| *points) - .unwrap_or_else(|| Zero::zero()); - - // Nothing to do if they have no reward points. - if validator_reward_points.is_zero() { return Ok(())} - - // This is the fraction of the total reward that the validator and the - // nominators will get. - let validator_total_reward_part = Perbill::from_rational_approximation( - validator_reward_points, - total_reward_points, - ); - - // This is how much validator + nominators are entitled to. - let validator_total_payout = validator_total_reward_part * era_payout; - - let validator_prefs = Self::eras_validator_prefs(&era, &validator_stash); - // Validator first gets a cut off the top. - let validator_commission = validator_prefs.commission; - let validator_commission_payout = validator_commission * validator_total_payout; - - let validator_leftover_payout = validator_total_payout - validator_commission_payout; - // Now let's calculate how this is split to the validator. - let validator_exposure_part = Perbill::from_rational_approximation( - exposure.own, - exposure.total, - ); - let validator_staking_payout = validator_exposure_part * validator_leftover_payout; - - // We can now make total validator payout: - if let Some(imbalance) = Self::make_payout( - &ledger.stash, - validator_staking_payout + validator_commission_payout - ) { - Self::deposit_event(RawEvent::Reward(ledger.stash, imbalance.peek())); - } - - // Lets now calculate how this is split to the nominators. - // Reward only the clipped exposures. Note this is not necessarily sorted. - for nominator in exposure.others.iter() { - let nominator_exposure_part = Perbill::from_rational_approximation( - nominator.value, - exposure.total, - ); - - let nominator_reward: BalanceOf = nominator_exposure_part * validator_leftover_payout; - // We can now make nominator payout: - if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { - Self::deposit_event(RawEvent::Reward(nominator.who.clone(), imbalance.peek())); - } - } - - Ok(()) - } - - /// Update the ledger for a controller. - /// - /// This will also update the stash lock. - fn update_ledger( - controller: &T::AccountId, - ledger: &StakingLedger> - ) { - T::Currency::set_lock( - STAKING_ID, - &ledger.stash, - ledger.total, - WithdrawReasons::all(), - ); - >::insert(controller, ledger); - } - - /// Chill a stash account. - fn chill_stash(stash: &T::AccountId) { - >::remove(stash); - >::remove(stash); - } - - /// Actually make a payment to a staker. This uses the currency's reward function - /// to pay the right payee for the given staker account. - fn make_payout(stash: &T::AccountId, amount: BalanceOf) -> Option> { - let dest = Self::payee(stash); - match dest { - RewardDestination::Controller => Self::bonded(stash) - .and_then(|controller| - Some(T::Currency::deposit_creating(&controller, amount)) - ), - RewardDestination::Stash => - T::Currency::deposit_into_existing(stash, amount).ok(), - RewardDestination::Staked => Self::bonded(stash) - .and_then(|c| Self::ledger(&c).map(|l| (c, l))) - .and_then(|(controller, mut l)| { - l.active += amount; - l.total += amount; - let r = T::Currency::deposit_into_existing(stash, amount).ok(); - Self::update_ledger(&controller, &l); - r - }), - RewardDestination::Account(dest_account) => { - Some(T::Currency::deposit_creating(&dest_account, amount)) - } - } - } - - /// Plan a new session potentially trigger a new era. - fn new_session(session_index: SessionIndex) -> Option> { - if let Some(current_era) = Self::current_era() { - // Initial era has been set. - - let current_era_start_session_index = Self::eras_start_session_index(current_era) - .unwrap_or_else(|| { - frame_support::print("Error: start_session_index must be set for current_era"); - 0 - }); - - let era_length = session_index.checked_sub(current_era_start_session_index) - .unwrap_or(0); // Must never happen. - - match ForceEra::get() { - Forcing::ForceNew => ForceEra::kill(), - Forcing::ForceAlways => (), - Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), - _ => { - // Either `ForceNone`, or `NotForcing && era_length < T::SessionsPerEra::get()`. - if era_length + 1 == T::SessionsPerEra::get() { - IsCurrentSessionFinal::put(true); - } else if era_length >= T::SessionsPerEra::get() { - // Should only happen when we are ready to trigger an era but we have ForceNone, - // otherwise previous arm would short circuit. - Self::close_election_window(); - } - return None - }, - } - - // new era. - Self::new_era(session_index) - } else { - // Set initial era - Self::new_era(session_index) - } - } - - /// Basic and cheap checks that we perform in validate unsigned, and in the execution. - /// - /// State reads: ElectionState, CurrentEr, QueuedScore. - /// - /// This function does weight refund in case of errors, which is based upon the fact that it is - /// called at the very beginning of the call site's function. - pub fn pre_dispatch_checks(score: ElectionScore, era: EraIndex) -> DispatchResultWithPostInfo { - // discard solutions that are not in-time - // check window open - ensure!( - Self::era_election_status().is_open(), - Error::::OffchainElectionEarlySubmission.with_weight(T::DbWeight::get().reads(1)), - ); - - // check current era. - if let Some(current_era) = Self::current_era() { - ensure!( - current_era == era, - Error::::OffchainElectionEarlySubmission.with_weight(T::DbWeight::get().reads(2)), - ) - } - - // assume the given score is valid. Is it better than what we have on-chain, if we have any? - if let Some(queued_score) = Self::queued_score() { - ensure!( - is_score_better(score, queued_score, T::MinSolutionScoreBump::get()), - Error::::OffchainElectionWeakSubmission.with_weight(T::DbWeight::get().reads(3)), - ) - } - - Ok(None.into()) - } - - /// Checks a given solution and if correct and improved, writes it on chain as the queued result - /// of the next round. This may be called by both a signed and an unsigned transaction. - pub fn check_and_replace_solution( - winners: Vec, - compact_assignments: CompactAssignments, - compute: ElectionCompute, - claimed_score: ElectionScore, - era: EraIndex, - election_size: ElectionSize, - ) -> DispatchResultWithPostInfo { - // Do the basic checks. era, claimed score and window open. - let _ = Self::pre_dispatch_checks(claimed_score, era)?; - - // before we read any further state, we check that the unique targets in compact is same as - // compact. is a all in-memory check and easy to do. Moreover, it ensures that the solution - // is not full of bogus edges that can cause lots of reads to SlashingSpans. Thus, we can - // assume that the storage access of this function is always O(|winners|), not - // O(|compact.edge_count()|). - ensure!( - compact_assignments.unique_targets().len() == winners.len(), - Error::::OffchainElectionBogusWinnerCount, - ); - - // Check that the number of presented winners is sane. Most often we have more candidates - // than we need. Then it should be `Self::validator_count()`. Else it should be all the - // candidates. - let snapshot_validators_length = >::decode_len() - .map(|l| l as u32) - .ok_or_else(|| Error::::SnapshotUnavailable)?; - - // size of the solution must be correct. - ensure!( - snapshot_validators_length == u32::from(election_size.validators), - Error::::OffchainElectionBogusElectionSize, - ); - - // check the winner length only here and when we know the length of the snapshot validators - // length. - let desired_winners = Self::validator_count().min(snapshot_validators_length); - ensure!(winners.len() as u32 == desired_winners, Error::::OffchainElectionBogusWinnerCount); - - let snapshot_nominators_len = >::decode_len() - .map(|l| l as u32) - .ok_or_else(|| Error::::SnapshotUnavailable)?; - - // rest of the size of the solution must be correct. - ensure!( - snapshot_nominators_len == election_size.nominators, - Error::::OffchainElectionBogusElectionSize, - ); - - // decode snapshot validators. - let snapshot_validators = Self::snapshot_validators() - .ok_or(Error::::SnapshotUnavailable)?; - - // check if all winners were legit; this is rather cheap. Replace with accountId. - let winners = winners.into_iter().map(|widx| { - // NOTE: at the moment, since staking is explicitly blocking any offence until election - // is closed, we don't check here if the account id at `snapshot_validators[widx]` is - // actually a validator. If this ever changes, this loop needs to also check this. - snapshot_validators.get(widx as usize).cloned().ok_or(Error::::OffchainElectionBogusWinner) - }).collect::, Error>>()?; - - // decode the rest of the snapshot. - let snapshot_nominators = Self::snapshot_nominators() - .ok_or(Error::::SnapshotUnavailable)?; - - // helpers - let nominator_at = |i: NominatorIndex| -> Option { - snapshot_nominators.get(i as usize).cloned() - }; - let validator_at = |i: ValidatorIndex| -> Option { - snapshot_validators.get(i as usize).cloned() - }; - - // un-compact. - let assignments = compact_assignments.into_assignment( - nominator_at, - validator_at, - ).map_err(|e| { - // log the error since it is not propagated into the runtime error. - log!(warn, "💸 un-compacting solution failed due to {:?}", e); - Error::::OffchainElectionBogusCompact - })?; - - // check all nominators actually including the claimed vote. Also check correct self votes. - // Note that we assume all validators and nominators in `assignments` are properly bonded, - // because they are coming from the snapshot via a given index. - for Assignment { who, distribution } in assignments.iter() { - let is_validator = >::contains_key(&who); - let maybe_nomination = Self::nominators(&who); - - if !(maybe_nomination.is_some() ^ is_validator) { - // all of the indices must map to either a validator or a nominator. If this is ever - // not the case, then the locking system of staking is most likely faulty, or we - // have bigger problems. - log!(error, "💸 detected an error in the staking locking and snapshot."); - // abort. - return Err(Error::::OffchainElectionBogusNominator.into()); - } - - if !is_validator { - // a normal vote - let nomination = maybe_nomination.expect( - "exactly one of `maybe_validator` and `maybe_nomination.is_some` is true. \ - is_validator is false; maybe_nomination is some; qed" - ); - - // NOTE: we don't really have to check here if the sum of all edges are the - // nominator correct. Un-compacting assures this by definition. - - for (t, _) in distribution { - // each target in the provided distribution must be actually nominated by the - // nominator after the last non-zero slash. - if nomination.targets.iter().find(|&tt| tt == t).is_none() { - return Err(Error::::OffchainElectionBogusNomination.into()); - } - - if ::SlashingSpans::get(&t).map_or( - false, - |spans| nomination.submitted_in < spans.last_nonzero_slash(), - ) { - return Err(Error::::OffchainElectionSlashedNomination.into()); - } - } - } else { - // a self vote - ensure!(distribution.len() == 1, Error::::OffchainElectionBogusSelfVote); - ensure!(distribution[0].0 == *who, Error::::OffchainElectionBogusSelfVote); - // defensive only. A compact assignment of length one does NOT encode the weight and - // it is always created to be 100%. - ensure!( - distribution[0].1 == OffchainAccuracy::one(), - Error::::OffchainElectionBogusSelfVote, - ); - } - } - - // convert into staked assignments. - let staked_assignments = sp_npos_elections::assignment_ratio_to_staked( - assignments, - Self::slashable_balance_of_fn(), - ); - - // build the support map thereof in order to evaluate. - let supports = build_support_map::( - &winners, - &staked_assignments, - ).map_err(|_| Error::::OffchainElectionBogusEdge)?; - - // Check if the score is the same as the claimed one. - let submitted_score = evaluate_support(&supports); - ensure!(submitted_score == claimed_score, Error::::OffchainElectionBogusScore); - - // At last, alles Ok. Exposures and store the result. - let exposures = Self::collect_exposure(supports); - log!( - info, - "💸 A better solution (with compute {:?} and score {:?}) has been validated and stored on chain.", - compute, - submitted_score, - ); - - // write new results. - >::put(ElectionResult { - elected_stashes: winners, - compute, - exposures, - }); - QueuedScore::put(submitted_score); - - // emit event. - Self::deposit_event(RawEvent::SolutionStored(compute)); - - Ok(None.into()) - } - - /// Start a session potentially starting an era. - fn start_session(start_session: SessionIndex) { - let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); - if let Some(next_active_era_start_session_index) = - Self::eras_start_session_index(next_active_era) - { - if next_active_era_start_session_index == start_session { - Self::start_era(start_session); - } else if next_active_era_start_session_index < start_session { - // This arm should never happen, but better handle it than to stall the - // staking pallet. - frame_support::print("Warning: A session appears to have been skipped."); - Self::start_era(start_session); - } - } - } - - /// End a session potentially ending an era. - fn end_session(session_index: SessionIndex) { - if let Some(active_era) = Self::active_era() { - if let Some(next_active_era_start_session_index) = - Self::eras_start_session_index(active_era.index + 1) - { - if next_active_era_start_session_index == session_index + 1 { - Self::end_era(active_era, session_index); - } - } - } - } - - /// * Increment `active_era.index`, - /// * reset `active_era.start`, - /// * update `BondedEras` and apply slashes. - fn start_era(start_session: SessionIndex) { - let active_era = ActiveEra::mutate(|active_era| { - let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0); - *active_era = Some(ActiveEraInfo { - index: new_index, - // Set new active era start in next `on_finalize`. To guarantee usage of `Time` - start: None, - }); - new_index - }); - - let bonding_duration = T::BondingDuration::get(); - - BondedEras::mutate(|bonded| { - bonded.push((active_era, start_session)); - - if active_era > bonding_duration { - let first_kept = active_era - bonding_duration; - - // prune out everything that's from before the first-kept index. - let n_to_prune = bonded.iter() - .take_while(|&&(era_idx, _)| era_idx < first_kept) - .count(); - - // kill slashing metadata. - for (pruned_era, _) in bonded.drain(..n_to_prune) { - slashing::clear_era_metadata::(pruned_era); - } - - if let Some(&(_, first_session)) = bonded.first() { - T::SessionInterface::prune_historical_up_to(first_session); - } - } - }); - - Self::apply_unapplied_slashes(active_era); - } - - /// Compute payout for era. - fn end_era(active_era: ActiveEraInfo, _session_index: SessionIndex) { - // Note: active_era_start can be None if end era is called during genesis config. - if let Some(active_era_start) = active_era.start { - let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); - - let era_duration = now_as_millis_u64 - active_era_start; - let (validator_payout, max_payout) = inflation::compute_total_payout( - &T::RewardCurve::get(), - Self::eras_total_stake(&active_era.index), - T::Currency::total_issuance(), - // Duration of era; more than u64::MAX is rewarded as u64::MAX. - era_duration.saturated_into::(), - ); - let rest = max_payout.saturating_sub(validator_payout); - - Self::deposit_event(RawEvent::EraPayout(active_era.index, validator_payout, rest)); - - // Set ending era reward. - >::insert(&active_era.index, validator_payout); - T::RewardRemainder::on_unbalanced(T::Currency::issue(rest)); - } - } - - /// Plan a new era. Return the potential new staking set. - fn new_era(start_session_index: SessionIndex) -> Option> { - // Increment or set current era. - let current_era = CurrentEra::mutate(|s| { - *s = Some(s.map(|s| s + 1).unwrap_or(0)); - s.unwrap() - }); - ErasStartSessionIndex::insert(¤t_era, &start_session_index); - - // Clean old era information. - if let Some(old_era) = current_era.checked_sub(Self::history_depth() + 1) { - Self::clear_era_information(old_era); - } - - // Set staking information for new era. - let maybe_new_validators = Self::select_and_update_validators(current_era); - - maybe_new_validators - } - - /// Remove all the storage items associated with the election. - fn close_election_window() { - // Close window. - >::put(ElectionStatus::Closed); - // Kill snapshots. - Self::kill_stakers_snapshot(); - // Don't track final session. - IsCurrentSessionFinal::put(false); - } - - /// Select the new validator set at the end of the era. - /// - /// Runs [`try_do_phragmen`] and updates the following storage items: - /// - [`EraElectionStatus`]: with `None`. - /// - [`ErasStakers`]: with the new staker set. - /// - [`ErasStakersClipped`]. - /// - [`ErasValidatorPrefs`]. - /// - [`ErasTotalStake`]: with the new total stake. - /// - [`SnapshotValidators`] and [`SnapshotNominators`] are both removed. - /// - /// Internally, [`QueuedElected`], snapshots and [`QueuedScore`] are also consumed. - /// - /// If the election has been successful, It passes the new set upwards. - /// - /// This should only be called at the end of an era. - fn select_and_update_validators(current_era: EraIndex) -> Option> { - if let Some(ElectionResult::> { - elected_stashes, - exposures, - compute, - }) = Self::try_do_election() { - // Totally close the election round and data. - Self::close_election_window(); - - // Populate Stakers and write slot stake. - let mut total_stake: BalanceOf = Zero::zero(); - exposures.into_iter().for_each(|(stash, exposure)| { - total_stake = total_stake.saturating_add(exposure.total); - >::insert(current_era, &stash, &exposure); - - let mut exposure_clipped = exposure; - let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; - if exposure_clipped.others.len() > clipped_max_len { - exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse()); - exposure_clipped.others.truncate(clipped_max_len); - } - >::insert(¤t_era, &stash, exposure_clipped); - }); - - // Insert current era staking information - >::insert(¤t_era, total_stake); - - // collect the pref of all winners - for stash in &elected_stashes { - let pref = Self::validators(stash); - >::insert(¤t_era, stash, pref); - } - - // emit event - Self::deposit_event(RawEvent::StakingElection(compute)); - - log!( - info, - "💸 new validator set of size {:?} has been elected via {:?} for era {:?}", - elected_stashes.len(), - compute, - current_era, - ); - - Some(elected_stashes) - } else { - None - } - } - - /// Select a new validator set from the assembled stakers and their role preferences. It tries - /// first to peek into [`QueuedElected`]. Otherwise, it runs a new on-chain phragmen election. - /// - /// If [`QueuedElected`] and [`QueuedScore`] exists, they are both removed. No further storage - /// is updated. - fn try_do_election() -> Option>> { - // an election result from either a stored submission or locally executed one. - let next_result = >::take().or_else(|| - Self::do_on_chain_phragmen() - ); - - // either way, kill this. We remove it here to make sure it always has the exact same - // lifetime as `QueuedElected`. - QueuedScore::kill(); - - next_result - } - - /// Execute election and return the new results. The edge weights are processed into support - /// values. - /// - /// This is basically a wrapper around [`do_phragmen`] which translates - /// `PrimitiveElectionResult` into `ElectionResult`. - /// - /// No storage item is updated. - pub fn do_on_chain_phragmen() -> Option>> { - if let Some(phragmen_result) = Self::do_phragmen::(0) { - let elected_stashes = phragmen_result.winners.iter() - .map(|(s, _)| s.clone()) - .collect::>(); - let assignments = phragmen_result.assignments; - - let staked_assignments = sp_npos_elections::assignment_ratio_to_staked( - assignments, - Self::slashable_balance_of_fn(), - ); - - let supports = build_support_map::( - &elected_stashes, - &staked_assignments, - ) - .map_err(|_| - log!( - error, - "💸 on-chain phragmen is failing due to a problem in the result. This must be a bug." - ) - ) - .ok()?; - - // collect exposures - let exposures = Self::collect_exposure(supports); - - // In order to keep the property required by `on_session_ending` that we must return the - // new validator set even if it's the same as the old, as long as any underlying - // economic conditions have changed, we don't attempt to do any optimization where we - // compare against the prior set. - Some(ElectionResult::> { - elected_stashes, - exposures, - compute: ElectionCompute::OnChain, - }) - } else { - // There were not enough candidates for even our minimal level of functionality. This is - // bad. We should probably disable all functionality except for block production and let - // the chain keep producing blocks until we can decide on a sufficiently substantial - // set. TODO: #2494 - None - } - } - - /// Execute phragmen election and return the new results. No post-processing is applied and the - /// raw edge weights are returned. - /// - /// Self votes are added and nominations before the most recent slashing span are ignored. - /// - /// No storage item is updated. - pub fn do_phragmen(iterations: usize) - -> Option> - where ExtendedBalance: From> - { - let weight_of = Self::slashable_balance_of_fn(); - let mut all_nominators: Vec<(T::AccountId, VoteWeight, Vec)> = Vec::new(); - let mut all_validators = Vec::new(); - for (validator, _) in >::iter() { - // append self vote - let self_vote = (validator.clone(), weight_of(&validator), vec![validator.clone()]); - all_nominators.push(self_vote); - all_validators.push(validator); - } - - let nominator_votes = >::iter().map(|(nominator, nominations)| { - let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; - - // Filter out nomination targets which were nominated before the most recent - // slashing span. - targets.retain(|stash| { - ::SlashingSpans::get(&stash).map_or( - true, - |spans| submitted_in >= spans.last_nonzero_slash(), - ) - }); - - (nominator, targets) - }); - all_nominators.extend(nominator_votes.map(|(n, ns)| { - let s = weight_of(&n); - (n, s, ns) - })); - - if all_validators.len() < Self::minimum_validator_count().max(1) as usize { - // If we don't have enough candidates, nothing to do. - log!(error, "💸 Chain does not have enough staking candidates to operate. Era {:?}.", Self::current_era()); - None - } else { - seq_phragmen::<_, Accuracy>( - Self::validator_count() as usize, - all_validators, - all_nominators, - Some((iterations, 0)), // exactly run `iterations` rounds. - ) - .map_err(|err| log!(error, "Call to seq-phragmen failed due to {}", err)) - .ok() - } - } - - /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a [`Exposure`] - fn collect_exposure( - supports: SupportMap, - ) -> Vec<(T::AccountId, Exposure>)> { - let total_issuance = T::Currency::total_issuance(); - let to_currency = |e: ExtendedBalance| T::CurrencyToVote::to_currency(e, total_issuance); - - supports.into_iter().map(|(validator, support)| { - // build `struct exposure` from `support` - let mut others = Vec::with_capacity(support.voters.len()); - let mut own: BalanceOf = Zero::zero(); - let mut total: BalanceOf = Zero::zero(); - support.voters - .into_iter() - .map(|(nominator, weight)| (nominator, to_currency(weight))) - .for_each(|(nominator, stake)| { - if nominator == validator { - own = own.saturating_add(stake); - } else { - others.push(IndividualExposure { who: nominator, value: stake }); - } - total = total.saturating_add(stake); - }); - - let exposure = Exposure { - own, - others, - total, - }; - - (validator, exposure) - }).collect::)>>() - } - - /// Remove all associated data of a stash account from the staking system. - /// - /// Assumes storage is upgraded before calling. - /// - /// This is called: - /// - after a `withdraw_unbond()` call that frees all of a stash's bonded balance. - /// - through `reap_stash()` if the balance has fallen to zero (through slashing). - fn kill_stash(stash: &T::AccountId, num_slashing_spans: u32) -> DispatchResult { - let controller = >::get(stash).ok_or(Error::::NotStash)?; - - slashing::clear_stash_metadata::(stash, num_slashing_spans)?; - - >::remove(stash); - >::remove(&controller); - - >::remove(stash); - >::remove(stash); - >::remove(stash); - - system::Module::::dec_ref(stash); - - Ok(()) - } - - /// Clear all era information for given era. - fn clear_era_information(era_index: EraIndex) { - >::remove_prefix(era_index); - >::remove_prefix(era_index); - >::remove_prefix(era_index); - >::remove(era_index); - >::remove(era_index); - >::remove(era_index); - ErasStartSessionIndex::remove(era_index); - } - - /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. - fn apply_unapplied_slashes(active_era: EraIndex) { - let slash_defer_duration = T::SlashDeferDuration::get(); - ::EarliestUnappliedSlash::mutate(|earliest| if let Some(ref mut earliest) = earliest { - let keep_from = active_era.saturating_sub(slash_defer_duration); - for era in (*earliest)..keep_from { - let era_slashes = ::UnappliedSlashes::take(&era); - for slash in era_slashes { - slashing::apply_slash::(slash); - } - } - - *earliest = (*earliest).max(keep_from) - }) - } - - /// Add reward points to validators using their stash account ID. - /// - /// Validators are keyed by stash account ID and must be in the current elected set. - /// - /// For each element in the iterator the given number of points in u32 is added to the - /// validator, thus duplicates are handled. - /// - /// At the end of the era each the total payout will be distributed among validator - /// relatively to their points. - /// - /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. - /// If you need to reward lots of validator consider using `reward_by_indices`. - pub fn reward_by_ids( - validators_points: impl IntoIterator - ) { - if let Some(active_era) = Self::active_era() { - >::mutate(active_era.index, |era_rewards| { - for (validator, points) in validators_points.into_iter() { - *era_rewards.individual.entry(validator).or_default() += points; - era_rewards.total += points; - } - }); - } - } - - /// Ensures that at the end of the current session there will be a new era. - fn ensure_new_era() { - match ForceEra::get() { - Forcing::ForceAlways | Forcing::ForceNew => (), - _ => ForceEra::put(Forcing::ForceNew), - } - } - - fn will_era_be_forced() -> bool { - match ForceEra::get() { - Forcing::ForceAlways | Forcing::ForceNew => true, - Forcing::ForceNone | Forcing::NotForcing => false, - } - } - - #[cfg(feature = "runtime-benchmarks")] - pub fn add_era_stakers(current_era: EraIndex, controller: T::AccountId, exposure: Exposure>) { - >::insert(¤t_era, &controller, &exposure); - } - - #[cfg(feature = "runtime-benchmarks")] - pub fn put_election_status(status: ElectionStatus::) { - >::put(status); - } - - #[cfg(feature = "runtime-benchmarks")] - pub fn set_slash_reward_fraction(fraction: Perbill) { - SlashRewardFraction::put(fraction); - } -} - -/// In this implementation `new_session(session)` must be called before `end_session(session-1)` -/// i.e. the new session must be planned before the ending of the previous session. -/// -/// Once the first new_session is planned, all session must start and then end in order, though -/// some session can lag in between the newest session planned and the latest session started. -impl pallet_session::SessionManager for Module { - fn new_session(new_index: SessionIndex) -> Option> { - Self::new_session(new_index) - } - fn start_session(start_index: SessionIndex) { - Self::start_session(start_index) - } - fn end_session(end_index: SessionIndex) { - Self::end_session(end_index) - } -} - -impl historical::SessionManager>> for Module { - fn new_session(new_index: SessionIndex) - -> Option>)>> - { - >::new_session(new_index).map(|validators| { - let current_era = Self::current_era() - // Must be some as a new era has been created. - .unwrap_or(0); - - validators.into_iter().map(|v| { - let exposure = Self::eras_stakers(current_era, &v); - (v, exposure) - }).collect() - }) - } - fn start_session(start_index: SessionIndex) { - >::start_session(start_index) - } - fn end_session(end_index: SessionIndex) { - >::end_session(end_index) - } -} - -/// Add reward points to block authors: -/// * 20 points to the block producer for producing a (non-uncle) block in the relay chain, -/// * 2 points to the block producer for each reference to a previously unreferenced uncle, and -/// * 1 point to the producer of each referenced uncle block. -impl pallet_authorship::EventHandler for Module - where - T: Trait + pallet_authorship::Trait + pallet_session::Trait -{ - fn note_author(author: T::AccountId) { - Self::reward_by_ids(vec![(author, 20)]) - } - fn note_uncle(author: T::AccountId, _age: T::BlockNumber) { - Self::reward_by_ids(vec![ - (>::author(), 2), - (author, 1) - ]) - } -} - -/// A `Convert` implementation that finds the stash of the given controller account, -/// if any. -pub struct StashOf(sp_std::marker::PhantomData); - -impl Convert> for StashOf { - fn convert(controller: T::AccountId) -> Option { - >::ledger(&controller).map(|l| l.stash) - } -} - -/// A typed conversion from stash account ID to the active exposure of nominators -/// on that account. -/// -/// Active exposure is the exposure of the validator set currently validating, i.e. in -/// `active_era`. It can differ from the latest planned exposure in `current_era`. -pub struct ExposureOf(sp_std::marker::PhantomData); - -impl Convert>>> - for ExposureOf -{ - fn convert(validator: T::AccountId) -> Option>> { - if let Some(active_era) = >::active_era() { - Some(>::eras_stakers(active_era.index, &validator)) - } else { - None - } - } -} - -/// This is intended to be used with `FilterHistoricalOffences`. -impl - OnOffenceHandler, Weight> -for Module where - T: pallet_session::Trait::AccountId>, - T: pallet_session::historical::Trait< - FullIdentification = Exposure<::AccountId, BalanceOf>, - FullIdentificationOf = ExposureOf, - >, - T::SessionHandler: pallet_session::SessionHandler<::AccountId>, - T::SessionManager: pallet_session::SessionManager<::AccountId>, - T::ValidatorIdOf: Convert< - ::AccountId, - Option<::AccountId>, - >, -{ - fn on_offence( - offenders: &[OffenceDetails>], - slash_fraction: &[Perbill], - slash_session: SessionIndex, - ) -> Result { - if !Self::can_report() { - return Err(()) - } - - let reward_proportion = SlashRewardFraction::get(); - let mut consumed_weight: Weight = 0; - let mut add_db_reads_writes = |reads, writes| { - consumed_weight += T::DbWeight::get().reads_writes(reads, writes); - }; - - let active_era = { - let active_era = Self::active_era(); - add_db_reads_writes(1, 0); - if active_era.is_none() { - // this offence need not be re-submitted. - return Ok(consumed_weight) - } - active_era.expect("value checked not to be `None`; qed").index - }; - let active_era_start_session_index = Self::eras_start_session_index(active_era) - .unwrap_or_else(|| { - frame_support::print("Error: start_session_index must be set for current_era"); - 0 - }); - add_db_reads_writes(1, 0); - - let window_start = active_era.saturating_sub(T::BondingDuration::get()); - - // fast path for active-era report - most likely. - // `slash_session` cannot be in a future active era. It must be in `active_era` or before. - let slash_era = if slash_session >= active_era_start_session_index { - active_era - } else { - let eras = BondedEras::get(); - add_db_reads_writes(1, 0); - - // reverse because it's more likely to find reports from recent eras. - match eras.iter().rev().filter(|&&(_, ref sesh)| sesh <= &slash_session).next() { - Some(&(ref slash_era, _)) => *slash_era, - // before bonding period. defensive - should be filtered out. - None => return Ok(consumed_weight), - } - }; - - ::EarliestUnappliedSlash::mutate(|earliest| { - if earliest.is_none() { - *earliest = Some(active_era) - } - }); - add_db_reads_writes(1, 1); - - let slash_defer_duration = T::SlashDeferDuration::get(); - - let invulnerables = Self::invulnerables(); - add_db_reads_writes(1, 0); - - for (details, slash_fraction) in offenders.iter().zip(slash_fraction) { - let (stash, exposure) = &details.offender; - - // Skip if the validator is invulnerable. - if invulnerables.contains(stash) { - continue - } - - let unapplied = slashing::compute_slash::(slashing::SlashParams { - stash, - slash: *slash_fraction, - exposure, - slash_era, - window_start, - now: active_era, - reward_proportion, - }); - - if let Some(mut unapplied) = unapplied { - let nominators_len = unapplied.others.len() as u64; - let reporters_len = details.reporters.len() as u64; - - { - let upper_bound = 1 /* Validator/NominatorSlashInEra */ + 2 /* fetch_spans */; - let rw = upper_bound + nominators_len * upper_bound; - add_db_reads_writes(rw, rw); - } - unapplied.reporters = details.reporters.clone(); - if slash_defer_duration == 0 { - // apply right away. - slashing::apply_slash::(unapplied); - { - let slash_cost = (6, 5); - let reward_cost = (2, 2); - add_db_reads_writes( - (1 + nominators_len) * slash_cost.0 + reward_cost.0 * reporters_len, - (1 + nominators_len) * slash_cost.1 + reward_cost.1 * reporters_len - ); - } - } else { - // defer to end of some `slash_defer_duration` from now. - ::UnappliedSlashes::mutate( - active_era, - move |for_later| for_later.push(unapplied), - ); - add_db_reads_writes(1, 1); - } - } else { - add_db_reads_writes(4 /* fetch_spans */, 5 /* kick_out_if_recent */) - } - } - - Ok(consumed_weight) - } - - fn can_report() -> bool { - Self::era_election_status().is_closed() - } -} - -/// Filter historical offences out and only allow those from the bonding period. -pub struct FilterHistoricalOffences { - _inner: sp_std::marker::PhantomData<(T, R)>, -} - -impl ReportOffence - for FilterHistoricalOffences, R> where - T: Trait, - R: ReportOffence, - O: Offence, -{ - fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { - // disallow any slashing from before the current bonding period. - let offence_session = offence.session_index(); - let bonded_eras = BondedEras::get(); - - if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() { - R::report_offence(reporters, offence) - } else { - >::deposit_event( - RawEvent::OldSlashingReportDiscarded(offence_session) - ); - Ok(()) - } - } + } fn is_known_offence(offenders: &[Offender], time_slot: &O::TimeSlot) -> bool { R::is_known_offence(offenders, time_slot) } } - -#[allow(deprecated)] -impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::submit_election_solution_unsigned( - _, - _, - score, - era, - _, - ) = call { - use offchain_election::DEFAULT_LONGEVITY; - - // discard solution not coming from the local OCW. - match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } - _ => { - log!(debug, "rejecting unsigned transaction because it is not local/in-block."); - return InvalidTransaction::Call.into(); - } - } - - if let Err(error_with_post_info) = Self::pre_dispatch_checks(*score, *era) { - let invalid = to_invalid(error_with_post_info); - log!( - debug, - "💸 validate unsigned pre dispatch checks failed due to error #{:?}.", - invalid, - ); - return invalid.into(); - } - - log!(debug, "💸 validateUnsigned succeeded for a solution at era {}.", era); - - ValidTransaction::with_tag_prefix("StakingOffchain") - // The higher the score[0], the better a solution is. - .priority(T::UnsignedPriority::get().saturating_add(score[0].saturated_into())) - // Defensive only. A single solution can exist in the pool per era. Each validator - // will run OCW at most once per era, hence there should never exist more than one - // transaction anyhow. - .and_provides(era) - // Note: this can be more accurate in the future. We do something like - // `era_end_block - current_block` but that is not needed now as we eagerly run - // offchain workers now and the above should be same as `T::ElectionLookahead` - // without the need to query more storage in the validation phase. If we randomize - // offchain worker, then we might re-consider this. - .longevity(TryInto::::try_into( - T::ElectionLookahead::get()).unwrap_or(DEFAULT_LONGEVITY) - ) - // We don't propagate this. This can never the validated at a remote node. - .propagate(false) - .build() - } else { - InvalidTransaction::Call.into() - } - } - - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - if let Call::submit_election_solution_unsigned( - _, - _, - score, - era, - _, - ) = call { - // IMPORTANT NOTE: These checks are performed in the dispatch call itself, yet we need - // to duplicate them here to prevent a block producer from putting a previously - // validated, yet no longer valid solution on chain. - // OPTIMISATION NOTE: we could skip this in the `submit_election_solution_unsigned` - // since we already do it here. The signed version needs it though. Yer for now we keep - // this duplicate check here so both signed and unsigned can use a singular - // `check_and_replace_solution`. - Self::pre_dispatch_checks(*score, *era) - .map(|_| ()) - .map_err(to_invalid) - .map_err(Into::into) - } else { - Err(InvalidTransaction::Call.into()) - } - } -} - -/// Check that list is sorted and has no duplicates. -fn is_sorted_and_unique(list: &[u32]) -> bool { - list.windows(2).all(|w| w[0] < w[1]) -} - -/// convert a DispatchErrorWithPostInfo to a custom InvalidTransaction with the inner code being the -/// error number. -fn to_invalid(error_with_post_info: DispatchErrorWithPostInfo) -> InvalidTransaction { - let error = error_with_post_info.error; - let error_number = match error { - DispatchError::Module { error, ..} => error, - _ => 0, - }; - InvalidTransaction::Custom(error_number) -} diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs new file mode 100644 index 0000000000000..d7fa2afc63082 --- /dev/null +++ b/frame/staking/src/migrations.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and + +//! Storage migrations for the Staking pallet. + +use super::*; + +pub mod v7 { + use super::*; + + pub fn pre_migrate() -> Result<(), &'static str> { + assert!(CounterForValidators::::get().is_zero(), "CounterForValidators already set."); + assert!(CounterForNominators::::get().is_zero(), "CounterForNominators already set."); + assert!(StorageVersion::::get() == Releases::V6_0_0); + Ok(()) + } + + pub fn migrate() -> Weight { + log!(info, "Migrating staking to Releases::V7_0_0"); + let validator_count = Validators::::iter().count() as u32; + let nominator_count = Nominators::::iter().count() as u32; + + CounterForValidators::::put(validator_count); + CounterForNominators::::put(nominator_count); + + StorageVersion::::put(Releases::V7_0_0); + log!(info, "Completed staking migration to Releases::V7_0_0"); + + T::DbWeight::get().reads_writes(validator_count.saturating_add(nominator_count).into(), 2) + } +} + +pub mod v6 { + use super::*; + use frame_support::{generate_storage_alias, traits::Get, weights::Weight}; + + // NOTE: value type doesn't matter, we just set it to () here. + generate_storage_alias!(Staking, SnapshotValidators => Value<()>); + generate_storage_alias!(Staking, SnapshotNominators => Value<()>); + generate_storage_alias!(Staking, QueuedElected => Value<()>); + generate_storage_alias!(Staking, QueuedScore => Value<()>); + generate_storage_alias!(Staking, EraElectionStatus => Value<()>); + generate_storage_alias!(Staking, IsCurrentSessionFinal => Value<()>); + + /// check to execute prior to migration. + pub fn pre_migrate() -> Result<(), &'static str> { + // these may or may not exist. + log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::exists()); + log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::exists()); + log!(info, "QueuedElected.exits()? {:?}", QueuedElected::exists()); + log!(info, "QueuedScore.exits()? {:?}", QueuedScore::exists()); + // these must exist. + assert!(IsCurrentSessionFinal::exists(), "IsCurrentSessionFinal storage item not found!"); + assert!(EraElectionStatus::exists(), "EraElectionStatus storage item not found!"); + Ok(()) + } + + /// Migrate storage to v6. + pub fn migrate() -> Weight { + log!(info, "Migrating staking to Releases::V6_0_0"); + + SnapshotValidators::kill(); + SnapshotNominators::kill(); + QueuedElected::kill(); + QueuedScore::kill(); + EraElectionStatus::kill(); + IsCurrentSessionFinal::kill(); + + StorageVersion::::put(Releases::V6_0_0); + log!(info, "Done."); + T::DbWeight::get().writes(6 + 1) + } +} diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 055ebb9730805..0357fa05cb1dd 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,18 +17,19 @@ //! Test utilities +use crate as staking; use crate::*; +use frame_election_provider_support::onchain; use frame_support::{ - assert_ok, impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, - traits::{Currency, FindAuthor, Get, OnFinalize, OnInitialize}, - weights::{constants::RocksDbWeight, Weight}, - IterableStorageMap, StorageDoubleMap, StorageMap, StorageValue, + assert_ok, parameter_types, + traits::{ + Currency, FindAuthor, GenesisBuild, Get, Hooks, Imbalance, OnInitialize, OnUnbalanced, + OneSessionHandler, + }, + weights::constants::RocksDbWeight, }; use sp_core::H256; use sp_io; -use sp_npos_elections::{ - build_support_map, evaluate_support, reduce, ExtendedBalance, StakedAssignment, ElectionScore, -}; use sp_runtime::{ curve::PiecewiseLinear, testing::{Header, TestXt, UintAuthorityId}, @@ -38,6 +39,7 @@ use sp_staking::offence::{OffenceDetails, OnOffenceHandler}; use std::{cell::RefCell, collections::HashSet}; pub const INIT_TIMESTAMP: u64 = 30_000; +pub const BLOCK_TIME: u64 = 1000; /// The AccountId alias in this test module. pub(crate) type AccountId = u64; @@ -47,30 +49,27 @@ pub(crate) type Balance = u128; thread_local! { static SESSION: RefCell<(Vec, HashSet)> = RefCell::new(Default::default()); - static SESSION_PER_ERA: RefCell = RefCell::new(3); - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); - static SLASH_DEFER_DURATION: RefCell = RefCell::new(0); - static ELECTION_LOOKAHEAD: RefCell = RefCell::new(0); - static PERIOD: RefCell = RefCell::new(1); - static MAX_ITERATIONS: RefCell = RefCell::new(0); } /// Another session handler struct to test on_disabled. pub struct OtherSessionHandler; -impl pallet_session::OneSessionHandler for OtherSessionHandler { +impl OneSessionHandler for OtherSessionHandler { type Key = UintAuthorityId; fn on_genesis_session<'a, I: 'a>(_: I) - where I: Iterator, AccountId: 'a {} + where + I: Iterator, + AccountId: 'a, + { + } - fn on_new_session<'a, I: 'a>(_: bool, validators: I, _: I,) - where I: Iterator, AccountId: 'a + fn on_new_session<'a, I: 'a>(_: bool, validators: I, _: I) + where + I: Iterator, + AccountId: 'a, { SESSION.with(|x| { - *x.borrow_mut() = ( - validators.map(|x| x.0.clone()).collect(), - HashSet::new(), - ) + *x.borrow_mut() = (validators.map(|x| x.0.clone()).collect(), HashSet::new()) }); } @@ -92,103 +91,54 @@ pub fn is_disabled(controller: AccountId) -> bool { SESSION.with(|d| d.borrow().1.contains(&stash)) } -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> Balance { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) - } -} - -pub struct SessionsPerEra; -impl Get for SessionsPerEra { - fn get() -> SessionIndex { - SESSION_PER_ERA.with(|v| *v.borrow()) - } -} -impl Get for SessionsPerEra { - fn get() -> BlockNumber { - SESSION_PER_ERA.with(|v| *v.borrow() as BlockNumber) - } -} - -pub struct ElectionLookahead; -impl Get for ElectionLookahead { - fn get() -> BlockNumber { - ELECTION_LOOKAHEAD.with(|v| *v.borrow()) - } -} - -pub struct Period; -impl Get for Period { - fn get() -> BlockNumber { - PERIOD.with(|v| *v.borrow()) - } -} - -pub struct SlashDeferDuration; -impl Get for SlashDeferDuration { - fn get() -> EraIndex { - SLASH_DEFER_DURATION.with(|v| *v.borrow()) - } -} - -pub struct MaxIterations; -impl Get for MaxIterations { - fn get() -> u32 { - MAX_ITERATIONS.with(|v| *v.borrow()) - } -} - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - staking::Staking, - } -} - -mod staking { - // Re-export needed for `impl_outer_event!`. - pub use super::super::*; -} -use frame_system as system; -use pallet_balances as balances; -use pallet_session as session; - -impl_outer_event! { - pub enum MetaEvent for Test { - system, - balances, - session, - staking, +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, } -} +); /// Author of block is always 11 pub struct Author11; impl FindAuthor for Author11 { fn find_author<'a, I>(_digests: I) -> Option - where I: 'a + IntoIterator, + where + I: 'a + IntoIterator, { Some(11) } } -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; - parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = frame_support::weights::constants::WEIGHT_PER_SECOND * 2; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::constants::WEIGHT_PER_SECOND * 2 + ); pub const MaxLocks: u32 = 1024; + pub static SessionsPerEra: SessionIndex = 3; + pub static ExistentialDeposit: Balance = 1; + pub static SlashDeferDuration: EraIndex = 0; + pub static Period: BlockNumber = 5; + pub static Offset: BlockNumber = 0; } -impl frame_system::Trait for Test { - type BaseCallFilter = (); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -198,33 +148,29 @@ impl frame_system::Trait for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; - type Event = MetaEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = MetaEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } parameter_types! { - pub const Offset: BlockNumber = 0; pub const UncleGenerations: u64 = 0; pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(25); } @@ -233,12 +179,12 @@ sp_runtime::impl_opaque_keys! { pub other: OtherSessionHandler, } } -impl pallet_session::Trait for Test { +impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions; type SessionHandler = (OtherSessionHandler,); - type Event = MetaEvent; + type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = crate::StashOf; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; @@ -246,20 +192,20 @@ impl pallet_session::Trait for Test { type WeightInfo = (); } -impl pallet_session::historical::Trait for Test { +impl pallet_session::historical::Config for Test { type FullIdentification = crate::Exposure; type FullIdentificationOf = crate::ExposureOf; } -impl pallet_authorship::Trait for Test { +impl pallet_authorship::Config for Test { type FindAuthor = Author11; type UncleGenerations = UncleGenerations; type FilterUncle = (); - type EventHandler = Module; + type EventHandler = Pallet; } parameter_types! { pub const MinimumPeriod: u64 = 5; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; @@ -279,9 +225,6 @@ parameter_types! { pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; pub const MaxNominatorRewardedPerValidator: u32 = 64; - pub const UnsignedPriority: u64 = 1 << 20; - pub const MinSolutionScoreBump: Perbill = Perbill::zero(); - pub const OffchainSolutionWeightLimit: Weight = MaximumBlockWeight::get(); } thread_local! { @@ -299,12 +242,18 @@ impl OnUnbalanced> for RewardRemainderMock { } } -impl Trait for Test { +impl onchain::Config for Test { + type Accuracy = Perbill; + type DataProvider = Staking; +} + +impl Config for Test { + const MAX_NOMINATIONS: u32 = 16; type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = RewardRemainderMock; - type Event = MetaEvent; + type Event = Event; type Slash = (); type Reward = (); type SessionsPerEra = SessionsPerEra; @@ -312,15 +261,11 @@ impl Trait for Test { type SlashCancelOrigin = frame_system::EnsureRoot; type BondingDuration = BondingDuration; type SessionInterface = Self; - type RewardCurve = RewardCurve; + type EraPayout = ConvertCurve; type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type MaxIterations = MaxIterations; - type MinSolutionScoreBump = MinSolutionScoreBump; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = UnsignedPriority; - type OffchainSolutionWeightLimit = OffchainSolutionWeightLimit; + type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } @@ -333,52 +278,46 @@ where } pub type Extrinsic = TestXt; +pub(crate) type StakingCall = crate::Call; +pub(crate) type TestRuntimeCall = ::Call; pub struct ExtBuilder { - session_length: BlockNumber, - election_lookahead: BlockNumber, - session_per_era: SessionIndex, - existential_deposit: Balance, - validator_pool: bool, nominate: bool, validator_count: u32, minimum_validator_count: u32, - slash_defer_duration: EraIndex, - fair: bool, - num_validators: Option, invulnerables: Vec, has_stakers: bool, - max_offchain_iterations: u32, + initialize_first_session: bool, + min_nominator_bond: Balance, + min_validator_bond: Balance, + balance_factor: Balance, + status: BTreeMap>, + stakes: BTreeMap, + stakers: Vec<(AccountId, AccountId, Balance, StakerStatus)>, } impl Default for ExtBuilder { fn default() -> Self { Self { - session_length: 1, - election_lookahead: 0, - session_per_era: 3, - existential_deposit: 1, - validator_pool: false, nominate: true, validator_count: 2, minimum_validator_count: 0, - slash_defer_duration: 0, - fair: true, - num_validators: None, + balance_factor: 1, invulnerables: vec![], has_stakers: true, - max_offchain_iterations: 0, + initialize_first_session: true, + min_nominator_bond: ExistentialDeposit::get(), + min_validator_bond: ExistentialDeposit::get(), + status: Default::default(), + stakes: Default::default(), + stakers: Default::default(), } } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: Balance) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn validator_pool(mut self, validator_pool: bool) -> Self { - self.validator_pool = validator_pool; + pub fn existential_deposit(self, existential_deposit: Balance) -> Self { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = existential_deposit); self } pub fn nominate(mut self, nominate: bool) -> Self { @@ -393,130 +332,167 @@ impl ExtBuilder { self.minimum_validator_count = count; self } - pub fn slash_defer_duration(mut self, eras: EraIndex) -> Self { - self.slash_defer_duration = eras; + pub fn slash_defer_duration(self, eras: EraIndex) -> Self { + SLASH_DEFER_DURATION.with(|v| *v.borrow_mut() = eras); self } - pub fn fair(mut self, is_fair: bool) -> Self { - self.fair = is_fair; + pub fn invulnerables(mut self, invulnerables: Vec) -> Self { + self.invulnerables = invulnerables; self } - pub fn num_validators(mut self, num_validators: u32) -> Self { - self.num_validators = Some(num_validators); + pub fn session_per_era(self, length: SessionIndex) -> Self { + SESSIONS_PER_ERA.with(|v| *v.borrow_mut() = length); self } - pub fn invulnerables(mut self, invulnerables: Vec) -> Self { - self.invulnerables = invulnerables; + pub fn period(self, length: BlockNumber) -> Self { + PERIOD.with(|v| *v.borrow_mut() = length); self } - pub fn session_per_era(mut self, length: SessionIndex) -> Self { - self.session_per_era = length; + pub fn has_stakers(mut self, has: bool) -> Self { + self.has_stakers = has; self } - pub fn election_lookahead(mut self, look: BlockNumber) -> Self { - self.election_lookahead = look; + pub fn initialize_first_session(mut self, init: bool) -> Self { + self.initialize_first_session = init; self } - pub fn session_length(mut self, length: BlockNumber) -> Self { - self.session_length = length; + pub fn offset(self, offset: BlockNumber) -> Self { + OFFSET.with(|v| *v.borrow_mut() = offset); self } - pub fn has_stakers(mut self, has: bool) -> Self { - self.has_stakers = has; + pub fn min_nominator_bond(mut self, amount: Balance) -> Self { + self.min_nominator_bond = amount; + self + } + pub fn min_validator_bond(mut self, amount: Balance) -> Self { + self.min_validator_bond = amount; + self + } + pub fn set_status(mut self, who: AccountId, status: StakerStatus) -> Self { + self.status.insert(who, status); self } - pub fn max_offchain_iterations(mut self, iterations: u32) -> Self { - self.max_offchain_iterations = iterations; + pub fn set_stake(mut self, who: AccountId, stake: Balance) -> Self { + self.stakes.insert(who, stake); self } - pub fn offchain_election_ext(self) -> Self { - self.session_per_era(4) - .session_length(5) - .election_lookahead(3) + pub fn add_staker( + mut self, + stash: AccountId, + ctrl: AccountId, + stake: Balance, + status: StakerStatus, + ) -> Self { + self.stakers.push((stash, ctrl, stake, status)); + self } - pub fn set_associated_constants(&self) { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - SLASH_DEFER_DURATION.with(|v| *v.borrow_mut() = self.slash_defer_duration); - SESSION_PER_ERA.with(|v| *v.borrow_mut() = self.session_per_era); - ELECTION_LOOKAHEAD.with(|v| *v.borrow_mut() = self.election_lookahead); - PERIOD.with(|v| *v.borrow_mut() = self.session_length); - MAX_ITERATIONS.with(|v| *v.borrow_mut() = self.max_offchain_iterations); + pub fn balance_factor(mut self, factor: Balance) -> Self { + self.balance_factor = factor; + self } - pub fn build(self) -> sp_io::TestExternalities { + fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); - self.set_associated_constants(); - let mut storage = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - let balance_factor = if self.existential_deposit > 1 { - 256 - } else { - 1 - }; - - let num_validators = self.num_validators.unwrap_or(self.validator_count); - let validators = (0..num_validators) - .map(|x| ((x + 1) * 10 + 1) as AccountId) - .collect::>(); + let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); let _ = pallet_balances::GenesisConfig:: { balances: vec![ - (1, 10 * balance_factor), - (2, 20 * balance_factor), - (3, 300 * balance_factor), - (4, 400 * balance_factor), - (10, balance_factor), - (11, balance_factor * 1000), - (20, balance_factor), - (21, balance_factor * 2000), - (30, balance_factor), - (31, balance_factor * 2000), - (40, balance_factor), - (41, balance_factor * 2000), - (100, 2000 * balance_factor), - (101, 2000 * balance_factor), + (1, 10 * self.balance_factor), + (2, 20 * self.balance_factor), + (3, 300 * self.balance_factor), + (4, 400 * self.balance_factor), + // controllers + (10, self.balance_factor), + (20, self.balance_factor), + (30, self.balance_factor), + (40, self.balance_factor), + (50, self.balance_factor), + // stashes + (11, self.balance_factor * 1000), + (21, self.balance_factor * 2000), + (31, self.balance_factor * 2000), + (41, self.balance_factor * 2000), + (51, self.balance_factor * 2000), + // optional nominator + (100, self.balance_factor * 2000), + (101, self.balance_factor * 2000), + // aux accounts + (60, self.balance_factor), + (61, self.balance_factor * 2000), + (70, self.balance_factor), + (71, self.balance_factor * 2000), + (80, self.balance_factor), + (81, self.balance_factor * 2000), // This allows us to have a total_payout different from 0. (999, 1_000_000_000_000), ], - }.assimilate_storage(&mut storage); + } + .assimilate_storage(&mut storage); let mut stakers = vec![]; if self.has_stakers { - let stake_21 = if self.fair { 1000 } else { 2000 }; - let stake_31 = if self.validator_pool { balance_factor * 1000 } else { 1 }; - let status_41 = if self.validator_pool { - StakerStatus::::Validator - } else { - StakerStatus::::Idle - }; - let nominated = if self.nominate { vec![11, 21] } else { vec![] }; stakers = vec![ - // (stash, controller, staked_amount, status) - (11, 10, balance_factor * 1000, StakerStatus::::Validator), - (21, 20, stake_21, StakerStatus::::Validator), - (31, 30, stake_31, StakerStatus::::Validator), - (41, 40, balance_factor * 1000, status_41), - // nominator - (101, 100, balance_factor * 500, StakerStatus::::Nominator(nominated)) + // (stash, ctrl, stake, status) + // these two will be elected in the default test where we elect 2. + (11, 10, self.balance_factor * 1000, StakerStatus::::Validator), + (21, 20, self.balance_factor * 1000, StakerStatus::::Validator), + // a loser validator + (31, 30, self.balance_factor * 500, StakerStatus::::Validator), + // an idle validator + (41, 40, self.balance_factor * 1000, StakerStatus::::Idle), ]; + // optionally add a nominator + if self.nominate { + stakers.push(( + 101, + 100, + self.balance_factor * 500, + StakerStatus::::Nominator(vec![11, 21]), + )) + } + // replace any of the status if needed. + self.status.into_iter().for_each(|(stash, status)| { + let (_, _, _, ref mut prev_status) = stakers + .iter_mut() + .find(|s| s.0 == stash) + .expect("set_status staker should exist; qed"); + *prev_status = status; + }); + // replaced any of the stakes if needed. + self.stakes.into_iter().for_each(|(stash, stake)| { + let (_, _, ref mut prev_stake, _) = stakers + .iter_mut() + .find(|s| s.0 == stash) + .expect("set_stake staker should exits; qed."); + *prev_stake = stake; + }); + // extend stakers if needed. + stakers.extend(self.stakers) } - let _ = GenesisConfig::{ - stakers: stakers, + + let _ = staking::GenesisConfig:: { + stakers, validator_count: self.validator_count, minimum_validator_count: self.minimum_validator_count, invulnerables: self.invulnerables, slash_reward_fraction: Perbill::from_percent(10), + min_nominator_bond: self.min_nominator_bond, + min_validator_bond: self.min_validator_bond, ..Default::default() } .assimilate_storage(&mut storage); let _ = pallet_session::GenesisConfig:: { - keys: validators.iter().map(|x| ( - *x, - *x, - SessionKeys { other: UintAuthorityId(*x as u64) } - )).collect(), - }.assimilate_storage(&mut storage); + keys: if self.has_stakers { + // genesis election will overwrite this, no worries. + Default::default() + } else { + // set some dummy validators in genesis. + (0..self.validator_count as u64) + .map(|x| (x, x, SessionKeys { other: UintAuthorityId(x as u64) })) + .collect() + }, + } + .assimilate_storage(&mut storage); let mut ext = sp_io::TestExternalities::from(storage); ext.execute_with(|| { @@ -524,13 +500,17 @@ impl ExtBuilder { SESSION.with(|x| *x.borrow_mut() = (validators.clone(), HashSet::new())); }); - // We consider all test to start after timestamp is initialized - // This must be ensured by having `timestamp::on_initialize` called before - // `staking::on_initialize` - ext.execute_with(|| { - System::set_block_number(1); - Timestamp::set_timestamp(INIT_TIMESTAMP); - }); + if self.initialize_first_session { + // We consider all test to start after timestamp is initialized This must be ensured by + // having `timestamp::on_initialize` called before `staking::on_initialize`. Also, if + // session length is 1, then it is already triggered. + ext.execute_with(|| { + System::set_block_number(1); + Session::on_initialize(1); + >::on_initialize(1); + Timestamp::set_timestamp(INIT_TIMESTAMP); + }); + } ext } @@ -541,24 +521,18 @@ impl ExtBuilder { } } -pub type System = frame_system::Module; -pub type Balances = pallet_balances::Module; -pub type Session = pallet_session::Module; -pub type Timestamp = pallet_timestamp::Module; -pub type Staking = Module; - -pub(crate) fn current_era() -> EraIndex { - Staking::current_era().unwrap() -} - fn post_conditions() { check_nominators(); check_exposures(); check_ledgers(); + check_count(); } -pub(crate) fn active_era() -> EraIndex { - Staking::active_era().unwrap().index +fn check_count() { + let nominator_count = Nominators::::iter().count() as u32; + let validator_count = Validators::::iter().count() as u32; + assert_eq!(nominator_count, CounterForNominators::::get()); + assert_eq!(validator_count, CounterForValidators::::get()); } fn check_ledgers() { @@ -583,42 +557,46 @@ fn check_nominators() { // in if the nomination was submitted before the current era. let era = active_era(); >::iter() - .filter_map(|(nominator, nomination)| - if nomination.submitted_in > era { - Some(nominator) - } else { - None - }) + .filter_map( + |(nominator, nomination)| { + if nomination.submitted_in > era { + Some(nominator) + } else { + None + } + }, + ) .for_each(|nominator| { - // must be bonded. - assert_is_stash(nominator); - let mut sum = 0; - Session::validators() - .iter() - .map(|v| Staking::eras_stakers(era, v)) - .for_each(|e| { - let individual = e.others.iter().filter(|e| e.who == nominator).collect::>(); - let len = individual.len(); - match len { - 0 => { /* not supporting this validator at all. */ }, - 1 => sum += individual[0].value, - _ => panic!("nominator cannot back a validator more than once."), - }; - }); - - let nominator_stake = Staking::slashable_balance_of(&nominator); - // a nominator cannot over-spend. - assert!( - nominator_stake >= sum, - "failed: Nominator({}) stake({}) >= sum divided({})", - nominator, - nominator_stake, - sum, - ); - - let diff = nominator_stake - sum; - assert!(diff < 100); - }); + // must be bonded. + assert_is_stash(nominator); + let mut sum = 0; + Session::validators() + .iter() + .map(|v| Staking::eras_stakers(era, v)) + .for_each(|e| { + let individual = + e.others.iter().filter(|e| e.who == nominator).collect::>(); + let len = individual.len(); + match len { + 0 => { /* not supporting this validator at all. */ }, + 1 => sum += individual[0].value, + _ => panic!("nominator cannot back a validator more than once."), + }; + }); + + let nominator_stake = Staking::slashable_balance_of(&nominator); + // a nominator cannot over-spend. + assert!( + nominator_stake >= sum, + "failed: Nominator({}) stake({}) >= sum divided({})", + nominator, + nominator_stake, + sum, + ); + + let diff = nominator_stake - sum; + assert!(diff < 100); + }); } fn assert_is_stash(acc: AccountId) { @@ -630,21 +608,28 @@ fn assert_ledger_consistent(ctrl: AccountId) { let ledger = Staking::ledger(ctrl).expect("Not a controller."); let real_total: Balance = ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); assert_eq!(real_total, ledger.total); + assert!( + ledger.active >= Balances::minimum_balance() || ledger.active == 0, + "{}: active ledger amount ({}) must be greater than ED {}", + ctrl, + ledger.active, + Balances::minimum_balance() + ); +} + +pub(crate) fn active_era() -> EraIndex { + Staking::active_era().unwrap().index +} + +pub(crate) fn current_era() -> EraIndex { + Staking::current_era().unwrap() } pub(crate) fn bond_validator(stash: AccountId, ctrl: AccountId, val: Balance) { let _ = Balances::make_free_balance_be(&stash, val); let _ = Balances::make_free_balance_be(&ctrl, val); - assert_ok!(Staking::bond( - Origin::signed(stash), - ctrl, - val, - RewardDestination::Controller, - )); - assert_ok!(Staking::validate( - Origin::signed(ctrl), - ValidatorPrefs::default() - )); + assert_ok!(Staking::bond(Origin::signed(stash), ctrl, val, RewardDestination::Controller)); + assert_ok!(Staking::validate(Origin::signed(ctrl), ValidatorPrefs::default())); } pub(crate) fn bond_nominator( @@ -655,69 +640,105 @@ pub(crate) fn bond_nominator( ) { let _ = Balances::make_free_balance_be(&stash, val); let _ = Balances::make_free_balance_be(&ctrl, val); - assert_ok!(Staking::bond( - Origin::signed(stash), - ctrl, - val, - RewardDestination::Controller, - )); + assert_ok!(Staking::bond(Origin::signed(stash), ctrl, val, RewardDestination::Controller)); assert_ok!(Staking::nominate(Origin::signed(ctrl), target)); } +/// Progress to the given block, triggering session and era changes as we progress. +/// +/// This will finalize the previous block, initialize up to the given block, essentially simulating +/// a block import/propose process where we first initialize the block, then execute some stuff (not +/// in the function), and then finalize the block. pub(crate) fn run_to_block(n: BlockNumber) { Staking::on_finalize(System::block_number()); - for b in System::block_number() + 1..=n { + for b in (System::block_number() + 1)..=n { System::set_block_number(b); Session::on_initialize(b); - Staking::on_initialize(b); + >::on_initialize(b); + Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); if b != n { Staking::on_finalize(System::block_number()); } } } +/// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. +pub(crate) fn start_session(session_index: SessionIndex) { + let end: u64 = if Offset::get().is_zero() { + (session_index as u64) * Period::get() + } else { + Offset::get() + (session_index.saturating_sub(1) as u64) * Period::get() + }; + run_to_block(end); + // session must have progressed properly. + assert_eq!( + Session::current_index(), + session_index, + "current session index = {}, expected = {}", + Session::current_index(), + session_index, + ); +} + +/// Go one session forward. pub(crate) fn advance_session() { let current_index = Session::current_index(); start_session(current_index + 1); } -pub(crate) fn start_session(session_index: SessionIndex) { - assert_eq!(>::get(), 1, "start_session can only be used with session length 1."); - for i in Session::current_index()..session_index { - Staking::on_finalize(System::block_number()); - System::set_block_number((i + 1).into()); - Timestamp::set_timestamp(System::block_number() * 1000 + INIT_TIMESTAMP); - Session::on_initialize(System::block_number()); - Staking::on_initialize(System::block_number()); - } - - assert_eq!(Session::current_index(), session_index); -} - -// This start and activate the era given. -// Because the mock use pallet-session which delays session by one, this will be one session after -// the election happened, not the first session after the election has happened. -pub(crate) fn start_era(era_index: EraIndex) { +/// Progress until the given era. +pub(crate) fn start_active_era(era_index: EraIndex) { start_session((era_index * >::get()).into()); - assert_eq!(Staking::current_era().unwrap(), era_index); - assert_eq!(Staking::active_era().unwrap().index, era_index); + assert_eq!(active_era(), era_index); + // One way or another, current_era must have changed before the active era, so they must match + // at this point. + assert_eq!(current_era(), active_era()); } pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { - inflation::compute_total_payout( - ::RewardCurve::get(), - Staking::eras_total_stake(Staking::active_era().unwrap().index), + let (payout, _rest) = ::EraPayout::era_payout( + Staking::eras_total_stake(active_era()), Balances::total_issuance(), duration, - ).0 + ); + assert!(payout > 0); + payout +} + +pub(crate) fn maximum_payout_for_duration(duration: u64) -> Balance { + let (payout, rest) = ::EraPayout::era_payout( + Staking::eras_total_stake(active_era()), + Balances::total_issuance(), + duration, + ); + payout + rest +} + +/// Time it takes to finish a session. +/// +/// Note, if you see `time_per_session() - BLOCK_TIME`, it is fine. This is because we set the +/// timestamp after on_initialize, so the timestamp is always one block old. +pub(crate) fn time_per_session() -> u64 { + Period::get() * BLOCK_TIME +} + +/// Time it takes to finish an era. +/// +/// Note, if you see `time_per_era() - BLOCK_TIME`, it is fine. This is because we set the +/// timestamp after on_initialize, so the timestamp is always one block old. +pub(crate) fn time_per_era() -> u64 { + time_per_session() * SessionsPerEra::get() as u64 +} + +/// Time that will be calculated for the reward per era. +pub(crate) fn reward_time_per_era() -> u64 { + time_per_era() - BLOCK_TIME } pub(crate) fn reward_all_elected() { - let rewards = ::SessionInterface::validators() - .into_iter() - .map(|v| (v, 1)); + let rewards = ::SessionInterface::validators().into_iter().map(|v| (v, 1)); - >::reward_by_ids(rewards) + >::reward_by_ids(rewards) } pub(crate) fn validator_controllers() -> Vec { @@ -735,30 +756,32 @@ pub(crate) fn on_offence_in_era( slash_fraction: &[Perbill], era: EraIndex, ) { - let bonded_eras = crate::BondedEras::get(); + let bonded_eras = crate::BondedEras::::get(); for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { - let _ = Staking::on_offence(offenders, slash_fraction, start_session).unwrap(); - return; + let _ = Staking::on_offence(offenders, slash_fraction, start_session); + return } else if bonded_era > era { - break; + break } } if Staking::active_era().unwrap().index == era { - let _ = - Staking::on_offence( - offenders, - slash_fraction, - Staking::eras_start_session_index(era).unwrap() - ).unwrap(); + let _ = Staking::on_offence( + offenders, + slash_fraction, + Staking::eras_start_session_index(era).unwrap(), + ); } else { panic!("cannot slash in era {}", era); } } pub(crate) fn on_offence_now( - offenders: &[OffenceDetails>], + offenders: &[OffenceDetails< + AccountId, + pallet_session::historical::IdentificationTuple, + >], slash_fraction: &[Perbill], ) { let now = Staking::active_era().unwrap().index; @@ -767,218 +790,25 @@ pub(crate) fn on_offence_now( pub(crate) fn add_slash(who: &AccountId) { on_offence_now( - &[ - OffenceDetails { - offender: (who.clone(), Staking::eras_stakers(Staking::active_era().unwrap().index, who.clone())), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (who.clone(), Staking::eras_stakers(active_era(), who.clone())), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); } -// winners will be chosen by simply their unweighted total backing stake. Nominator stake is -// distributed evenly. -pub(crate) fn horrible_npos_solution( - do_reduce: bool, -) -> (CompactAssignments, Vec, ElectionScore) { - let mut backing_stake_of: BTreeMap = BTreeMap::new(); - - // self stake - >::iter().for_each(|(who, _p)| { - *backing_stake_of.entry(who).or_insert(Zero::zero()) += Staking::slashable_balance_of(&who) - }); - - // add nominator stuff - >::iter().for_each(|(who, nomination)| { - nomination.targets.iter().for_each(|v| { - *backing_stake_of.entry(*v).or_insert(Zero::zero()) += - Staking::slashable_balance_of(&who) - }) - }); - - // elect winners - let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); - sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); - let winners: Vec = sorted - .iter() - .cloned() - .take(Staking::validator_count() as usize) - .collect(); - - // create assignments - let mut staked_assignment: Vec> = Vec::new(); - >::iter().for_each(|(who, nomination)| { - let mut dist: Vec<(AccountId, ExtendedBalance)> = Vec::new(); - nomination.targets.iter().for_each(|v| { - if winners.iter().find(|w| *w == v).is_some() { - dist.push((*v, ExtendedBalance::zero())); - } - }); - - if dist.len() == 0 { - return; - } - - // assign real stakes. just split the stake. - let stake = Staking::slashable_balance_of(&who) as ExtendedBalance; - let mut sum: ExtendedBalance = Zero::zero(); - let dist_len = dist.len(); - { - dist.iter_mut().for_each(|(_, w)| { - let partial = stake / (dist_len as ExtendedBalance); - *w = partial; - sum += partial; - }); - } - - // assign the leftover to last. - { - let leftover = stake - sum; - let last = dist.last_mut().unwrap(); - last.1 += leftover; - } - - staked_assignment.push(StakedAssignment { - who, - distribution: dist, - }); - }); - - // Ensure that this result is worse than seq-phragmen. Otherwise, it should not have been used - // for testing. - let score = { - let (_, _, better_score) = prepare_submission_with(true, true, 0, |_| {}); - - let support = build_support_map::(&winners, &staked_assignment).unwrap(); - let score = evaluate_support(&support); - - assert!(sp_npos_elections::is_score_better::( - better_score, - score, - MinSolutionScoreBump::get(), - )); - - score - }; - - if do_reduce { - reduce(&mut staked_assignment); - } - - let snapshot_validators = Staking::snapshot_validators().unwrap(); - let snapshot_nominators = Staking::snapshot_nominators().unwrap(); - let nominator_index = |a: &AccountId| -> Option { - snapshot_nominators.iter().position(|x| x == a).map(|i| i as NominatorIndex) - }; - let validator_index = |a: &AccountId| -> Option { - snapshot_validators.iter().position(|x| x == a).map(|i| i as ValidatorIndex) - }; - - // convert back to ratio assignment. This takes less space. - let assignments_reduced = - sp_npos_elections::assignment_staked_to_ratio::(staked_assignment); - - let compact = - CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) - .unwrap(); - - // winner ids to index - let winners = winners.into_iter().map(|w| validator_index(&w).unwrap()).collect::>(); - - (compact, winners, score) -} - -/// Note: this should always logically reproduce [`offchain_election::prepare_submission`], yet we -/// cannot do it since we want to have `tweak` injected into the process. -/// -/// If the input is being tweaked in a way that the score cannot be compute accurately, -/// `compute_real_score` can be set to true. In this case a `Default` score is returned. -pub(crate) fn prepare_submission_with( - compute_real_score: bool, - do_reduce: bool, - iterations: usize, - tweak: impl FnOnce(&mut Vec>), -) -> (CompactAssignments, Vec, ElectionScore) { - // run election on the default stuff. - let sp_npos_elections::ElectionResult { - winners, - assignments, - } = Staking::do_phragmen::(iterations).unwrap(); - let winners = sp_npos_elections::to_without_backing(winners); - - let mut staked = sp_npos_elections::assignment_ratio_to_staked( - assignments, - Staking::slashable_balance_of_fn(), - ); - - // apply custom tweaks. awesome for testing. - tweak(&mut staked); - - if do_reduce { - reduce(&mut staked); - } - - // convert back to ratio assignment. This takes less space. - let snapshot_validators = Staking::snapshot_validators().expect("snapshot not created."); - let snapshot_nominators = Staking::snapshot_nominators().expect("snapshot not created."); - let nominator_index = |a: &AccountId| -> Option { - snapshot_nominators - .iter() - .position(|x| x == a) - .map_or_else( - || { println!("unable to find nominator index for {:?}", a); None }, - |i| Some(i as NominatorIndex), - ) - }; - let validator_index = |a: &AccountId| -> Option { - snapshot_validators - .iter() - .position(|x| x == a) - .map_or_else( - || { println!("unable to find validator index for {:?}", a); None }, - |i| Some(i as ValidatorIndex), - ) - }; - - let assignments_reduced = sp_npos_elections::assignment_staked_to_ratio(staked); - - // re-compute score by converting, yet again, into staked type - let score = if compute_real_score { - let staked = sp_npos_elections::assignment_ratio_to_staked( - assignments_reduced.clone(), - Staking::slashable_balance_of_fn(), - ); - - let support_map = build_support_map::( - winners.as_slice(), - staked.as_slice(), - ).unwrap(); - evaluate_support::(&support_map) - } else { - Default::default() - }; - - let compact = - CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) - .expect("Failed to create compact"); - - // winner ids to index - let winners = winners.into_iter().map(|w| validator_index(&w).unwrap()).collect::>(); - - (compact, winners, score) -} - /// Make all validator and nominator request their payment pub(crate) fn make_all_reward_payment(era: EraIndex) { - let validators_with_reward = ErasRewardPoints::::get(era).individual.keys() + let validators_with_reward = ErasRewardPoints::::get(era) + .individual + .keys() .cloned() .collect::>(); // reward validators for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { let ledger = >::get(&validator_controller).unwrap(); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), ledger.stash, era)); } } @@ -994,23 +824,21 @@ macro_rules! assert_session_era { $session, ); assert_eq!( - Staking::active_era().unwrap().index, + Staking::current_era().unwrap(), $era, - "wrong active era {} != {}", - Staking::active_era().unwrap().index, + "wrong current era {} != {}", + Staking::current_era().unwrap(), $era, ); }; } -pub(crate) fn staking_events() -> Vec> { - System::events().into_iter().map(|r| r.event).filter_map(|e| { - if let MetaEvent::staking(inner) = e { - Some(inner) - } else { - None - } - }).collect() +pub(crate) fn staking_events() -> Vec> { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let Event::Staking(inner) = e { Some(inner) } else { None }) + .collect() } pub(crate) fn balances(who: &AccountId) -> (Balance, Balance) { diff --git a/frame/staking/src/offchain_election.rs b/frame/staking/src/offchain_election.rs deleted file mode 100644 index cb4d460f68035..0000000000000 --- a/frame/staking/src/offchain_election.rs +++ /dev/null @@ -1,592 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Helpers for offchain worker election. - -use crate::{ - Call, CompactAssignments, ElectionSize, Module, NominatorIndex, Nominators, OffchainAccuracy, - Trait, ValidatorIndex, WeightInfo, -}; -use codec::Decode; -use frame_support::{traits::Get, weights::Weight, IterableStorageMap}; -use frame_system::offchain::SubmitTransaction; -use sp_npos_elections::{ - build_support_map, evaluate_support, reduce, Assignment, ElectionResult, ElectionScore, - ExtendedBalance, -}; -use sp_runtime::{ - offchain::storage::StorageValueRef, traits::TrailingZeroInput, PerThing, RuntimeDebug, -}; -use sp_std::{convert::TryInto, prelude::*}; - -/// Error types related to the offchain election machinery. -#[derive(RuntimeDebug)] -pub enum OffchainElectionError { - /// election returned None. This means less candidate that minimum number of needed - /// validators were present. The chain is in trouble and not much that we can do about it. - ElectionFailed, - /// Submission to the transaction pool failed. - PoolSubmissionFailed, - /// The snapshot data is not available. - SnapshotUnavailable, - /// Error from npos-election crate. This usually relates to compact operation. - InternalElectionError(sp_npos_elections::Error), - /// One of the computed winners is invalid. - InvalidWinner, - /// A nominator is not available in the snapshot. - NominatorSnapshotCorrupt, -} - -impl From for OffchainElectionError { - fn from(e: sp_npos_elections::Error) -> Self { - Self::InternalElectionError(e) - } -} - -/// Storage key used to store the persistent offchain worker status. -pub(crate) const OFFCHAIN_HEAD_DB: &[u8] = b"parity/staking-election/"; -/// The repeat threshold of the offchain worker. This means we won't run the offchain worker twice -/// within a window of 5 blocks. -pub(crate) const OFFCHAIN_REPEAT: u32 = 5; -/// Default number of blocks for which the unsigned transaction should stay in the pool -pub(crate) const DEFAULT_LONGEVITY: u64 = 25; - -/// Checks if an execution of the offchain worker is permitted at the given block number, or not. -/// -/// This essentially makes sure that we don't run on previous blocks in case of a re-org, and we -/// don't run twice within a window of length [`OFFCHAIN_REPEAT`]. -/// -/// Returns `Ok(())` if offchain worker should happen, `Err(reason)` otherwise. -pub(crate) fn set_check_offchain_execution_status( - now: T::BlockNumber, -) -> Result<(), &'static str> { - let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); - let threshold = T::BlockNumber::from(OFFCHAIN_REPEAT); - - let mutate_stat = - storage.mutate::<_, &'static str, _>(|maybe_head: Option>| { - match maybe_head { - Some(Some(head)) if now < head => Err("fork."), - Some(Some(head)) if now >= head && now <= head + threshold => { - Err("recently executed.") - } - Some(Some(head)) if now > head + threshold => { - // we can run again now. Write the new head. - Ok(now) - } - _ => { - // value doesn't exists. Probably this node just booted up. Write, and run - Ok(now) - } - } - }); - - match mutate_stat { - // all good - Ok(Ok(_)) => Ok(()), - // failed to write. - Ok(Err(_)) => Err("failed to write to offchain db."), - // fork etc. - Err(why) => Err(why), - } -} - -/// The internal logic of the offchain worker of this module. This runs the phragmen election, -/// compacts and reduces the solution, computes the score and submits it back to the chain as an -/// unsigned transaction, without any signature. -pub(crate) fn compute_offchain_election() -> Result<(), OffchainElectionError> { - let iters = get_balancing_iters::(); - // compute raw solution. Note that we use `OffchainAccuracy`. - let ElectionResult { - winners, - assignments, - } = >::do_phragmen::(iters) - .ok_or(OffchainElectionError::ElectionFailed)?; - - // process and prepare it for submission. - let (winners, compact, score, size) = prepare_submission::( - assignments, - winners, - true, - T::OffchainSolutionWeightLimit::get(), - )?; - - crate::log!( - info, - "💸 prepared a seq-phragmen solution with {} balancing iterations and score {:?}", - iters, - score, - ); - - // defensive-only: current era can never be none except genesis. - let current_era = >::current_era().unwrap_or_default(); - - // send it. - let call = Call::submit_election_solution_unsigned( - winners, - compact, - score, - current_era, - size, - ).into(); - - SubmitTransaction::>::submit_unsigned_transaction(call) - .map_err(|_| OffchainElectionError::PoolSubmissionFailed) -} - -/// Get a random number of iterations to run the balancing. -/// -/// Uses the offchain seed to generate a random number. -pub fn get_balancing_iters() -> usize { - match T::MaxIterations::get() { - 0 => 0, - max @ _ => { - let seed = sp_io::offchain::random_seed(); - let random = ::decode(&mut TrailingZeroInput::new(seed.as_ref())) - .expect("input is padded with zeroes; qed") % max.saturating_add(1); - random as usize - } - } -} - -/// Find the maximum `len` that a compact can have in order to fit into the block weight. -/// -/// This only returns a value between zero and `size.nominators`. -pub fn maximum_compact_len( - winners_len: u32, - size: ElectionSize, - max_weight: Weight, -) -> u32 { - use sp_std::cmp::Ordering; - - if size.nominators < 1 { - return size.nominators; - } - - let max_voters = size.nominators.max(1); - let mut voters = max_voters; - - // helper closures. - let weight_with = |voters: u32| -> Weight { - W::submit_solution_better( - size.validators.into(), - size.nominators.into(), - voters, - winners_len, - ) - }; - - let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - match current_weight.cmp(&max_weight) { - Ordering::Less => { - let next_voters = voters.checked_add(step); - match next_voters { - Some(voters) if voters < max_voters => Ok(voters), - _ => Err(()), - } - }, - Ordering::Greater => voters.checked_sub(step).ok_or(()), - Ordering::Equal => Ok(voters), - } - }; - - // First binary-search the right amount of voters - let mut step = voters / 2; - let mut current_weight = weight_with(voters); - while step > 0 { - match next_voters(current_weight, voters, step) { - // proceed with the binary search - Ok(next) if next != voters => { - voters = next; - }, - // we are out of bounds, break out of the loop. - Err(()) => { - break; - }, - // we found the right value - early exit the function. - Ok(next) => return next - } - step = step / 2; - current_weight = weight_with(voters); - } - - - // Time to finish. - // We might have reduced less than expected due to rounding error. Increase one last time if we - // have any room left, the reduce until we are sure we are below limit. - while voters + 1 <= max_voters && weight_with(voters + 1) < max_weight { - voters += 1; - } - while voters.checked_sub(1).is_some() && weight_with(voters) > max_weight { - voters -= 1; - } - - debug_assert!( - weight_with(voters.min(size.nominators)) <= max_weight, - "weight_with({}) <= {}", voters.min(size.nominators), max_weight, - ); - voters.min(size.nominators) -} - -/// Greedily reduce the size of the a solution to fit into the block, w.r.t. weight. -/// -/// The weight of the solution is foremost a function of the number of voters (i.e. -/// `compact.len()`). Aside from this, the other components of the weight are invariant. The number -/// of winners shall not be changed (otherwise the solution is invalid) and the `ElectionSize` is -/// merely a representation of the total number of stakers. -/// -/// Thus, we reside to stripping away some voters. This means only changing the `compact` struct. -/// -/// Note that the solution is already computed, and the winners are elected based on the merit of -/// teh entire stake in the system. Nonetheless, some of the voters will be removed further down the -/// line. -/// -/// Indeed, the score must be computed **after** this step. If this step reduces the score too much, -/// then the solution will be discarded. -pub fn trim_to_weight( - maximum_allowed_voters: u32, - mut compact: CompactAssignments, - nominator_index: FN, -) -> Result -where - for<'r> FN: Fn(&'r T::AccountId) -> Option, -{ - match compact.len().checked_sub(maximum_allowed_voters as usize) { - Some(to_remove) if to_remove > 0 => { - // grab all voters and sort them by least stake. - let balance_of = >::slashable_balance_of_fn(); - let mut voters_sorted = >::iter() - .map(|(who, _)| (who.clone(), balance_of(&who))) - .collect::>(); - voters_sorted.sort_by_key(|(_, y)| *y); - - // start removing from the least stake. Iterate until we know enough have been removed. - let mut removed = 0; - for (maybe_index, _stake) in voters_sorted - .iter() - .map(|(who, stake)| (nominator_index(&who), stake)) - { - let index = maybe_index.ok_or(OffchainElectionError::NominatorSnapshotCorrupt)?; - if compact.remove_voter(index) { - crate::log!( - trace, - "💸 removed a voter at index {} with stake {:?} from compact to reduce the size", - index, - _stake, - ); - removed += 1 - } - - if removed >= to_remove { - break; - } - } - - crate::log!( - warn, - "💸 {} nominators out of {} had to be removed from compact solution due to size limits.", - removed, - compact.len() + removed, - ); - Ok(compact) - } - _ => { - // nada, return as-is - crate::log!( - info, - "💸 Compact solution did not get trimmed due to block weight limits.", - ); - Ok(compact) - } - } -} - -/// Takes an election result and spits out some data that can be submitted to the chain. -/// -/// This does a lot of stuff; read the inline comments. -pub fn prepare_submission( - assignments: Vec>, - winners: Vec<(T::AccountId, ExtendedBalance)>, - do_reduce: bool, - maximum_weight: Weight, -) -> Result< - ( - Vec, - CompactAssignments, - ElectionScore, - ElectionSize, - ), - OffchainElectionError, -> -where - ExtendedBalance: From<::Inner>, -{ - // make sure that the snapshot is available. - let snapshot_validators = - >::snapshot_validators().ok_or(OffchainElectionError::SnapshotUnavailable)?; - let snapshot_nominators = - >::snapshot_nominators().ok_or(OffchainElectionError::SnapshotUnavailable)?; - - // all helper closures that we'd ever need. - let nominator_index = |a: &T::AccountId| -> Option { - snapshot_nominators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - let validator_index = |a: &T::AccountId| -> Option { - snapshot_validators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - let nominator_at = |i: NominatorIndex| -> Option { - snapshot_nominators.get(i as usize).cloned() - }; - - let validator_at = |i: ValidatorIndex| -> Option { - snapshot_validators.get(i as usize).cloned() - }; - - // both conversions are safe; snapshots are not created if they exceed. - let size = ElectionSize { - validators: snapshot_validators.len() as ValidatorIndex, - nominators: snapshot_nominators.len() as NominatorIndex, - }; - - // Clean winners. - let winners = sp_npos_elections::to_without_backing(winners); - - // convert into absolute value and to obtain the reduced version. - let mut staked = sp_npos_elections::assignment_ratio_to_staked( - assignments, - >::slashable_balance_of_fn(), - ); - - // reduce - if do_reduce { - reduce(&mut staked); - } - - // Convert back to ratio assignment. This takes less space. - let low_accuracy_assignment = sp_npos_elections::assignment_staked_to_ratio_normalized(staked) - .map_err(|e| OffchainElectionError::from(e))?; - - // compact encode the assignment. - let compact = CompactAssignments::from_assignment( - low_accuracy_assignment, - nominator_index, - validator_index, - ) - .map_err(|e| OffchainElectionError::from(e))?; - - // potentially reduce the size of the compact to fit weight. - let maximum_allowed_voters = - maximum_compact_len::(winners.len() as u32, size, maximum_weight); - - crate::log!(debug, "💸 Maximum weight = {:?} // current weight = {:?} // maximum voters = {:?} // current votes = {:?}", - maximum_weight, - T::WeightInfo::submit_solution_better( - size.validators.into(), - size.nominators.into(), - compact.len() as u32, - winners.len() as u32, - ), - maximum_allowed_voters, - compact.len(), - ); - - let compact = trim_to_weight::(maximum_allowed_voters, compact, &nominator_index)?; - - // re-compute the score. We re-create what the chain will do. This is a bit verbose and wastes - // CPU time, but it is necessary to ensure that the score that we claim is the same as the one - // calculated by the chain. - let score = { - let compact = compact.clone(); - let assignments = compact.into_assignment(nominator_at, validator_at).unwrap(); - let staked = sp_npos_elections::assignment_ratio_to_staked( - assignments.clone(), - >::slashable_balance_of_fn(), - ); - - let support_map = build_support_map::(&winners, &staked) - .map_err(|_| OffchainElectionError::ElectionFailed)?; - evaluate_support::(&support_map) - }; - - // winners to index. Use a simple for loop for a more expressive early exit in case of error. - let mut winners_indexed: Vec = Vec::with_capacity(winners.len()); - for w in winners { - if let Some(idx) = snapshot_validators.iter().position(|v| *v == w) { - let compact_index: ValidatorIndex = idx - .try_into() - .map_err(|_| OffchainElectionError::InvalidWinner)?; - winners_indexed.push(compact_index); - } else { - return Err(OffchainElectionError::InvalidWinner); - } - } - - Ok((winners_indexed, compact, score, size)) -} - -#[cfg(test)] -mod test { - #![allow(unused_variables)] - use super::*; - use crate::ElectionSize; - - struct Staking; - - impl crate::WeightInfo for Staking { - fn bond() -> Weight { - unimplemented!() - } - fn bond_extra() -> Weight { - unimplemented!() - } - fn unbond() -> Weight { - unimplemented!() - } - fn withdraw_unbonded_update(s: u32) -> Weight { - unimplemented!() - } - fn withdraw_unbonded_kill(s: u32) -> Weight { - unimplemented!() - } - fn validate() -> Weight { - unimplemented!() - } - fn nominate(n: u32) -> Weight { - unimplemented!() - } - fn chill() -> Weight { - unimplemented!() - } - fn set_payee() -> Weight { - unimplemented!() - } - fn set_controller() -> Weight { - unimplemented!() - } - fn set_validator_count() -> Weight { - unimplemented!() - } - fn force_no_eras() -> Weight { - unimplemented!() - } - fn force_new_era() -> Weight { - unimplemented!() - } - fn force_new_era_always() -> Weight { - unimplemented!() - } - fn set_invulnerables(v: u32) -> Weight { - unimplemented!() - } - fn force_unstake(s: u32) -> Weight { - unimplemented!() - } - fn cancel_deferred_slash(s: u32) -> Weight { - unimplemented!() - } - fn payout_stakers_dead_controller(n: u32) -> Weight { - unimplemented!() - } - fn payout_stakers_alive_staked(n: u32) -> Weight { - unimplemented!() - } - fn rebond(l: u32) -> Weight { - unimplemented!() - } - fn set_history_depth(e: u32) -> Weight { - unimplemented!() - } - fn reap_stash(s: u32) -> Weight { - unimplemented!() - } - fn new_era(v: u32, n: u32) -> Weight { - unimplemented!() - } - fn submit_solution_better(v: u32, n: u32, a: u32, w: u32) -> Weight { - (0 * v + 0 * n + 1000 * a + 0 * w) as Weight - } - } - - #[test] - fn find_max_voter_binary_search_works() { - let size = ElectionSize { - validators: 0, - nominators: 10, - }; - - assert_eq!(maximum_compact_len::(0, size, 0), 0); - assert_eq!(maximum_compact_len::(0, size, 1), 0); - assert_eq!(maximum_compact_len::(0, size, 999), 0); - assert_eq!(maximum_compact_len::(0, size, 1000), 1); - assert_eq!(maximum_compact_len::(0, size, 1001), 1); - assert_eq!(maximum_compact_len::(0, size, 1990), 1); - assert_eq!(maximum_compact_len::(0, size, 1999), 1); - assert_eq!(maximum_compact_len::(0, size, 2000), 2); - assert_eq!(maximum_compact_len::(0, size, 2001), 2); - assert_eq!(maximum_compact_len::(0, size, 2010), 2); - assert_eq!(maximum_compact_len::(0, size, 2990), 2); - assert_eq!(maximum_compact_len::(0, size, 2999), 2); - assert_eq!(maximum_compact_len::(0, size, 3000), 3); - assert_eq!(maximum_compact_len::(0, size, 3333), 3); - assert_eq!(maximum_compact_len::(0, size, 5500), 5); - assert_eq!(maximum_compact_len::(0, size, 7777), 7); - assert_eq!(maximum_compact_len::(0, size, 9999), 9); - assert_eq!(maximum_compact_len::(0, size, 10_000), 10); - assert_eq!(maximum_compact_len::(0, size, 10_999), 10); - assert_eq!(maximum_compact_len::(0, size, 11_000), 10); - assert_eq!(maximum_compact_len::(0, size, 22_000), 10); - - let size = ElectionSize { - validators: 0, - nominators: 1, - }; - - assert_eq!(maximum_compact_len::(0, size, 0), 0); - assert_eq!(maximum_compact_len::(0, size, 1), 0); - assert_eq!(maximum_compact_len::(0, size, 999), 0); - assert_eq!(maximum_compact_len::(0, size, 1000), 1); - assert_eq!(maximum_compact_len::(0, size, 1001), 1); - assert_eq!(maximum_compact_len::(0, size, 1990), 1); - assert_eq!(maximum_compact_len::(0, size, 1999), 1); - assert_eq!(maximum_compact_len::(0, size, 2000), 1); - assert_eq!(maximum_compact_len::(0, size, 2001), 1); - assert_eq!(maximum_compact_len::(0, size, 2010), 1); - assert_eq!(maximum_compact_len::(0, size, 3333), 1); - - let size = ElectionSize { - validators: 0, - nominators: 2, - }; - - assert_eq!(maximum_compact_len::(0, size, 0), 0); - assert_eq!(maximum_compact_len::(0, size, 1), 0); - assert_eq!(maximum_compact_len::(0, size, 999), 0); - assert_eq!(maximum_compact_len::(0, size, 1000), 1); - assert_eq!(maximum_compact_len::(0, size, 1001), 1); - assert_eq!(maximum_compact_len::(0, size, 1999), 1); - assert_eq!(maximum_compact_len::(0, size, 2000), 2); - assert_eq!(maximum_compact_len::(0, size, 2001), 2); - assert_eq!(maximum_compact_len::(0, size, 2010), 2); - assert_eq!(maximum_compact_len::(0, size, 3333), 2); - } -} diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs new file mode 100644 index 0000000000000..fecd493eea022 --- /dev/null +++ b/frame/staking/src/pallet/impls.rs @@ -0,0 +1,1154 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementations for the Staking FRAME Pallet. + +use frame_election_provider_support::{data_provider, ElectionProvider, Supports, VoteWeight}; +use frame_support::{ + pallet_prelude::*, + traits::{ + Currency, CurrencyToVote, EstimateNextNewSession, Get, Imbalance, LockableCurrency, + OnUnbalanced, UnixTime, WithdrawReasons, + }, + weights::{Weight, WithPostDispatchInfo}, +}; +use pallet_session::historical; +use sp_runtime::{ + traits::{Bounded, Convert, SaturatedConversion, Saturating, Zero}, + Perbill, +}; +use sp_staking::{ + offence::{OffenceDetails, OnOffenceHandler}, + SessionIndex, +}; +use sp_std::{collections::btree_map::BTreeMap, prelude::*}; + +use crate::{ + log, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraIndex, EraPayout, Exposure, + ExposureOf, Forcing, IndividualExposure, Nominations, PositiveImbalanceOf, RewardDestination, + SessionInterface, StakingLedger, ValidatorPrefs, +}; + +use super::{pallet::*, STAKING_ID}; + +impl Pallet { + /// The total balance that can be slashed from a stash account as of right now. + pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { + // Weight note: consider making the stake accessible through stash. + Self::bonded(stash).and_then(Self::ledger).map(|l| l.active).unwrap_or_default() + } + + /// Internal impl of [`Self::slashable_balance_of`] that returns [`VoteWeight`]. + pub fn slashable_balance_of_vote_weight( + stash: &T::AccountId, + issuance: BalanceOf, + ) -> VoteWeight { + T::CurrencyToVote::to_vote(Self::slashable_balance_of(stash), issuance) + } + + /// Returns a closure around `slashable_balance_of_vote_weight` that can be passed around. + /// + /// This prevents call sites from repeatedly requesting `total_issuance` from backend. But it is + /// important to be only used while the total issuance is not changing. + pub fn slashable_balance_of_fn() -> Box VoteWeight> { + // NOTE: changing this to unboxed `impl Fn(..)` return type and the pallet will still + // compile, while some types in mock fail to resolve. + let issuance = T::Currency::total_issuance(); + Box::new(move |who: &T::AccountId| -> VoteWeight { + Self::slashable_balance_of_vote_weight(who, issuance) + }) + } + + pub(super) fn do_payout_stakers( + validator_stash: T::AccountId, + era: EraIndex, + ) -> DispatchResultWithPostInfo { + // Validate input data + let current_era = CurrentEra::::get().ok_or_else(|| { + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + let history_depth = Self::history_depth(); + ensure!( + era <= current_era && era >= current_era.saturating_sub(history_depth), + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + ); + + // Note: if era has no reward to be claimed, era may be future. better not to update + // `ledger.claimed_rewards` in this case. + let era_payout = >::get(&era).ok_or_else(|| { + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + + let controller = Self::bonded(&validator_stash).ok_or_else(|| { + Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + let mut ledger = >::get(&controller).ok_or(Error::::NotController)?; + + ledger + .claimed_rewards + .retain(|&x| x >= current_era.saturating_sub(history_depth)); + match ledger.claimed_rewards.binary_search(&era) { + Ok(_) => Err(Error::::AlreadyClaimed + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)))?, + Err(pos) => ledger.claimed_rewards.insert(pos, era), + } + + let exposure = >::get(&era, &ledger.stash); + + // Input data seems good, no errors allowed after this point + + >::insert(&controller, &ledger); + + // Get Era reward points. It has TOTAL and INDIVIDUAL + // Find the fraction of the era reward that belongs to the validator + // Take that fraction of the eras rewards to split to nominator and validator + // + // Then look at the validator, figure out the proportion of their reward + // which goes to them and each of their nominators. + + let era_reward_points = >::get(&era); + let total_reward_points = era_reward_points.total; + let validator_reward_points = era_reward_points + .individual + .get(&ledger.stash) + .map(|points| *points) + .unwrap_or_else(|| Zero::zero()); + + // Nothing to do if they have no reward points. + if validator_reward_points.is_zero() { + return Ok(Some(T::WeightInfo::payout_stakers_alive_staked(0)).into()) + } + + // This is the fraction of the total reward that the validator and the + // nominators will get. + let validator_total_reward_part = + Perbill::from_rational(validator_reward_points, total_reward_points); + + // This is how much validator + nominators are entitled to. + let validator_total_payout = validator_total_reward_part * era_payout; + + let validator_prefs = Self::eras_validator_prefs(&era, &validator_stash); + // Validator first gets a cut off the top. + let validator_commission = validator_prefs.commission; + let validator_commission_payout = validator_commission * validator_total_payout; + + let validator_leftover_payout = validator_total_payout - validator_commission_payout; + // Now let's calculate how this is split to the validator. + let validator_exposure_part = Perbill::from_rational(exposure.own, exposure.total); + let validator_staking_payout = validator_exposure_part * validator_leftover_payout; + + Self::deposit_event(Event::::PayoutStarted(era, ledger.stash.clone())); + + // We can now make total validator payout: + if let Some(imbalance) = + Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout) + { + Self::deposit_event(Event::::Rewarded(ledger.stash, imbalance.peek())); + } + + // Track the number of payout ops to nominators. Note: + // `WeightInfo::payout_stakers_alive_staked` always assumes at least a validator is paid + // out, so we do not need to count their payout op. + let mut nominator_payout_count: u32 = 0; + + // Lets now calculate how this is split to the nominators. + // Reward only the clipped exposures. Note this is not necessarily sorted. + for nominator in exposure.others.iter() { + let nominator_exposure_part = Perbill::from_rational(nominator.value, exposure.total); + + let nominator_reward: BalanceOf = + nominator_exposure_part * validator_leftover_payout; + // We can now make nominator payout: + if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { + // Note: this logic does not count payouts for `RewardDestination::None`. + nominator_payout_count += 1; + let e = Event::::Rewarded(nominator.who.clone(), imbalance.peek()); + Self::deposit_event(e); + } + } + + debug_assert!(nominator_payout_count <= T::MaxNominatorRewardedPerValidator::get()); + Ok(Some(T::WeightInfo::payout_stakers_alive_staked(nominator_payout_count)).into()) + } + + /// Update the ledger for a controller. + /// + /// This will also update the stash lock. + pub(crate) fn update_ledger( + controller: &T::AccountId, + ledger: &StakingLedger>, + ) { + T::Currency::set_lock(STAKING_ID, &ledger.stash, ledger.total, WithdrawReasons::all()); + >::insert(controller, ledger); + } + + /// Chill a stash account. + pub(crate) fn chill_stash(stash: &T::AccountId) { + let chilled_as_validator = Self::do_remove_validator(stash); + let chilled_as_nominator = Self::do_remove_nominator(stash); + if chilled_as_validator || chilled_as_nominator { + Self::deposit_event(Event::::Chilled(stash.clone())); + } + } + + /// Actually make a payment to a staker. This uses the currency's reward function + /// to pay the right payee for the given staker account. + fn make_payout(stash: &T::AccountId, amount: BalanceOf) -> Option> { + let dest = Self::payee(stash); + match dest { + RewardDestination::Controller => Self::bonded(stash) + .and_then(|controller| Some(T::Currency::deposit_creating(&controller, amount))), + RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), + RewardDestination::Staked => Self::bonded(stash) + .and_then(|c| Self::ledger(&c).map(|l| (c, l))) + .and_then(|(controller, mut l)| { + l.active += amount; + l.total += amount; + let r = T::Currency::deposit_into_existing(stash, amount).ok(); + Self::update_ledger(&controller, &l); + r + }), + RewardDestination::Account(dest_account) => + Some(T::Currency::deposit_creating(&dest_account, amount)), + RewardDestination::None => None, + } + } + + /// Plan a new session potentially trigger a new era. + fn new_session(session_index: SessionIndex, is_genesis: bool) -> Option> { + if let Some(current_era) = Self::current_era() { + // Initial era has been set. + let current_era_start_session_index = Self::eras_start_session_index(current_era) + .unwrap_or_else(|| { + frame_support::print("Error: start_session_index must be set for current_era"); + 0 + }); + + let era_length = + session_index.checked_sub(current_era_start_session_index).unwrap_or(0); // Must never happen. + + match ForceEra::::get() { + // Will be set to `NotForcing` again if a new era has been triggered. + Forcing::ForceNew => (), + // Short circuit to `try_trigger_new_era`. + Forcing::ForceAlways => (), + // Only go to `try_trigger_new_era` if deadline reached. + Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), + _ => { + // Either `Forcing::ForceNone`, + // or `Forcing::NotForcing if era_length >= T::SessionsPerEra::get()`. + return None + }, + } + + // New era. + let maybe_new_era_validators = Self::try_trigger_new_era(session_index, is_genesis); + if maybe_new_era_validators.is_some() && + matches!(ForceEra::::get(), Forcing::ForceNew) + { + ForceEra::::put(Forcing::NotForcing); + } + + maybe_new_era_validators + } else { + // Set initial era. + log!(debug, "Starting the first era."); + Self::try_trigger_new_era(session_index, is_genesis) + } + } + + /// Start a session potentially starting an era. + fn start_session(start_session: SessionIndex) { + let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); + // This is only `Some` when current era has already progressed to the next era, while the + // active era is one behind (i.e. in the *last session of the active era*, or *first session + // of the new current era*, depending on how you look at it). + if let Some(next_active_era_start_session_index) = + Self::eras_start_session_index(next_active_era) + { + if next_active_era_start_session_index == start_session { + Self::start_era(start_session); + } else if next_active_era_start_session_index < start_session { + // This arm should never happen, but better handle it than to stall the staking + // pallet. + frame_support::print("Warning: A session appears to have been skipped."); + Self::start_era(start_session); + } + } + } + + /// End a session potentially ending an era. + fn end_session(session_index: SessionIndex) { + if let Some(active_era) = Self::active_era() { + if let Some(next_active_era_start_session_index) = + Self::eras_start_session_index(active_era.index + 1) + { + if next_active_era_start_session_index == session_index + 1 { + Self::end_era(active_era, session_index); + } + } + } + } + + /// + /// * Increment `active_era.index`, + /// * reset `active_era.start`, + /// * update `BondedEras` and apply slashes. + fn start_era(start_session: SessionIndex) { + let active_era = ActiveEra::::mutate(|active_era| { + let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0); + *active_era = Some(ActiveEraInfo { + index: new_index, + // Set new active era start in next `on_finalize`. To guarantee usage of `Time` + start: None, + }); + new_index + }); + + let bonding_duration = T::BondingDuration::get(); + + BondedEras::::mutate(|bonded| { + bonded.push((active_era, start_session)); + + if active_era > bonding_duration { + let first_kept = active_era - bonding_duration; + + // Prune out everything that's from before the first-kept index. + let n_to_prune = + bonded.iter().take_while(|&&(era_idx, _)| era_idx < first_kept).count(); + + // Kill slashing metadata. + for (pruned_era, _) in bonded.drain(..n_to_prune) { + slashing::clear_era_metadata::(pruned_era); + } + + if let Some(&(_, first_session)) = bonded.first() { + T::SessionInterface::prune_historical_up_to(first_session); + } + } + }); + + Self::apply_unapplied_slashes(active_era); + } + + /// Compute payout for era. + fn end_era(active_era: ActiveEraInfo, _session_index: SessionIndex) { + // Note: active_era_start can be None if end era is called during genesis config. + if let Some(active_era_start) = active_era.start { + let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); + + let era_duration = (now_as_millis_u64 - active_era_start).saturated_into::(); + let staked = Self::eras_total_stake(&active_era.index); + let issuance = T::Currency::total_issuance(); + let (validator_payout, rest) = T::EraPayout::era_payout(staked, issuance, era_duration); + + Self::deposit_event(Event::::EraPaid(active_era.index, validator_payout, rest)); + + // Set ending era reward. + >::insert(&active_era.index, validator_payout); + T::RewardRemainder::on_unbalanced(T::Currency::issue(rest)); + } + } + + /// Plan a new era. + /// + /// * Bump the current era storage (which holds the latest planned era). + /// * Store start session index for the new planned era. + /// * Clean old era information. + /// * Store staking information for the new planned era + /// + /// Returns the new validator set. + pub fn trigger_new_era( + start_session_index: SessionIndex, + exposures: Vec<(T::AccountId, Exposure>)>, + ) -> Vec { + // Increment or set current era. + let new_planned_era = CurrentEra::::mutate(|s| { + *s = Some(s.map(|s| s + 1).unwrap_or(0)); + s.unwrap() + }); + ErasStartSessionIndex::::insert(&new_planned_era, &start_session_index); + + // Clean old era information. + if let Some(old_era) = new_planned_era.checked_sub(Self::history_depth() + 1) { + Self::clear_era_information(old_era); + } + + // Set staking information for the new era. + Self::store_stakers_info(exposures, new_planned_era) + } + + /// Potentially plan a new era. + /// + /// Get election result from `T::ElectionProvider`. + /// In case election result has more than [`MinimumValidatorCount`] validator trigger a new era. + /// + /// In case a new era is planned, the new validator set is returned. + pub(crate) fn try_trigger_new_era( + start_session_index: SessionIndex, + is_genesis: bool, + ) -> Option> { + let election_result = if is_genesis { + T::GenesisElectionProvider::elect().map_err(|e| { + log!(warn, "genesis election provider failed due to {:?}", e); + Self::deposit_event(Event::StakingElectionFailed); + }) + } else { + T::ElectionProvider::elect().map_err(|e| { + log!(warn, "election provider failed due to {:?}", e); + Self::deposit_event(Event::StakingElectionFailed); + }) + } + .ok()?; + + let exposures = Self::collect_exposures(election_result); + if (exposures.len() as u32) < Self::minimum_validator_count().max(1) { + // Session will panic if we ever return an empty validator set, thus max(1) ^^. + match CurrentEra::::get() { + Some(current_era) if current_era > 0 => log!( + warn, + "chain does not have enough staking candidates to operate for era {:?} ({} \ + elected, minimum is {})", + CurrentEra::::get().unwrap_or(0), + exposures.len(), + Self::minimum_validator_count(), + ), + None => { + // The initial era is allowed to have no exposures. + // In this case the SessionManager is expected to choose a sensible validator + // set. + // TODO: this should be simplified #8911 + CurrentEra::::put(0); + ErasStartSessionIndex::::insert(&0, &start_session_index); + }, + _ => (), + } + + Self::deposit_event(Event::StakingElectionFailed); + return None + } + + Self::deposit_event(Event::StakersElected); + Some(Self::trigger_new_era(start_session_index, exposures)) + } + + /// Process the output of the election. + /// + /// Store staking information for the new planned era + pub fn store_stakers_info( + exposures: Vec<(T::AccountId, Exposure>)>, + new_planned_era: EraIndex, + ) -> Vec { + let elected_stashes = exposures.iter().cloned().map(|(x, _)| x).collect::>(); + + // Populate stakers, exposures, and the snapshot of validator prefs. + let mut total_stake: BalanceOf = Zero::zero(); + exposures.into_iter().for_each(|(stash, exposure)| { + total_stake = total_stake.saturating_add(exposure.total); + >::insert(new_planned_era, &stash, &exposure); + + let mut exposure_clipped = exposure; + let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; + if exposure_clipped.others.len() > clipped_max_len { + exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse()); + exposure_clipped.others.truncate(clipped_max_len); + } + >::insert(&new_planned_era, &stash, exposure_clipped); + }); + + // Insert current era staking information + >::insert(&new_planned_era, total_stake); + + // Collect the pref of all winners. + for stash in &elected_stashes { + let pref = Self::validators(stash); + >::insert(&new_planned_era, stash, pref); + } + + if new_planned_era > 0 { + log!( + info, + "new validator set of size {:?} has been processed for era {:?}", + elected_stashes.len(), + new_planned_era, + ); + } + + elected_stashes + } + + /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a + /// [`Exposure`]. + fn collect_exposures( + supports: Supports, + ) -> Vec<(T::AccountId, Exposure>)> { + let total_issuance = T::Currency::total_issuance(); + let to_currency = |e: frame_election_provider_support::ExtendedBalance| { + T::CurrencyToVote::to_currency(e, total_issuance) + }; + + supports + .into_iter() + .map(|(validator, support)| { + // Build `struct exposure` from `support`. + let mut others = Vec::with_capacity(support.voters.len()); + let mut own: BalanceOf = Zero::zero(); + let mut total: BalanceOf = Zero::zero(); + support + .voters + .into_iter() + .map(|(nominator, weight)| (nominator, to_currency(weight))) + .for_each(|(nominator, stake)| { + if nominator == validator { + own = own.saturating_add(stake); + } else { + others.push(IndividualExposure { who: nominator, value: stake }); + } + total = total.saturating_add(stake); + }); + + let exposure = Exposure { own, others, total }; + (validator, exposure) + }) + .collect::)>>() + } + + /// Remove all associated data of a stash account from the staking system. + /// + /// Assumes storage is upgraded before calling. + /// + /// This is called: + /// - after a `withdraw_unbonded()` call that frees all of a stash's bonded balance. + /// - through `reap_stash()` if the balance has fallen to zero (through slashing). + pub(crate) fn kill_stash(stash: &T::AccountId, num_slashing_spans: u32) -> DispatchResult { + let controller = >::get(stash).ok_or(Error::::NotStash)?; + + slashing::clear_stash_metadata::(stash, num_slashing_spans)?; + + >::remove(stash); + >::remove(&controller); + + >::remove(stash); + Self::do_remove_validator(stash); + Self::do_remove_nominator(stash); + + frame_system::Pallet::::dec_consumers(stash); + + Ok(()) + } + + /// Clear all era information for given era. + pub(crate) fn clear_era_information(era_index: EraIndex) { + >::remove_prefix(era_index, None); + >::remove_prefix(era_index, None); + >::remove_prefix(era_index, None); + >::remove(era_index); + >::remove(era_index); + >::remove(era_index); + ErasStartSessionIndex::::remove(era_index); + } + + /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. + fn apply_unapplied_slashes(active_era: EraIndex) { + let slash_defer_duration = T::SlashDeferDuration::get(); + ::EarliestUnappliedSlash::mutate(|earliest| { + if let Some(ref mut earliest) = earliest { + let keep_from = active_era.saturating_sub(slash_defer_duration); + for era in (*earliest)..keep_from { + let era_slashes = ::UnappliedSlashes::take(&era); + for slash in era_slashes { + slashing::apply_slash::(slash); + } + } + + *earliest = (*earliest).max(keep_from) + } + }) + } + + /// Add reward points to validators using their stash account ID. + /// + /// Validators are keyed by stash account ID and must be in the current elected set. + /// + /// For each element in the iterator the given number of points in u32 is added to the + /// validator, thus duplicates are handled. + /// + /// At the end of the era each the total payout will be distributed among validator + /// relatively to their points. + /// + /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. + pub fn reward_by_ids(validators_points: impl IntoIterator) { + if let Some(active_era) = Self::active_era() { + >::mutate(active_era.index, |era_rewards| { + for (validator, points) in validators_points.into_iter() { + *era_rewards.individual.entry(validator).or_default() += points; + era_rewards.total += points; + } + }); + } + } + + /// Ensures that at the end of the current session there will be a new era. + pub(crate) fn ensure_new_era() { + match ForceEra::::get() { + Forcing::ForceAlways | Forcing::ForceNew => (), + _ => ForceEra::::put(Forcing::ForceNew), + } + } + + #[cfg(feature = "runtime-benchmarks")] + pub fn add_era_stakers( + current_era: EraIndex, + controller: T::AccountId, + exposure: Exposure>, + ) { + >::insert(¤t_era, &controller, &exposure); + } + + #[cfg(feature = "runtime-benchmarks")] + pub fn set_slash_reward_fraction(fraction: Perbill) { + SlashRewardFraction::::put(fraction); + } + + /// Get all of the voters that are eligible for the npos election. + /// + /// This will use all on-chain nominators, and all the validators will inject a self vote. + /// + /// This function is self-weighing as [`DispatchClass::Mandatory`]. + /// + /// ### Slashing + /// + /// All nominations that have been submitted before the last non-zero slash of the validator are + /// auto-chilled. + pub fn get_npos_voters() -> Vec<(T::AccountId, VoteWeight, Vec)> { + let weight_of = Self::slashable_balance_of_fn(); + let mut all_voters = Vec::new(); + + let mut validator_count = 0u32; + for (validator, _) in >::iter() { + // Append self vote. + let self_vote = (validator.clone(), weight_of(&validator), vec![validator.clone()]); + all_voters.push(self_vote); + validator_count.saturating_inc(); + } + + // Collect all slashing spans into a BTreeMap for further queries. + let slashing_spans = >::iter().collect::>(); + + let mut nominator_count = 0u32; + for (nominator, nominations) in Nominators::::iter() { + let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; + + // Filter out nomination targets which were nominated before the most recent + // slashing span. + targets.retain(|stash| { + slashing_spans + .get(stash) + .map_or(true, |spans| submitted_in >= spans.last_nonzero_slash()) + }); + + if !targets.is_empty() { + let vote_weight = weight_of(&nominator); + all_voters.push((nominator, vote_weight, targets)); + nominator_count.saturating_inc(); + } + } + + Self::register_weight(T::WeightInfo::get_npos_voters( + validator_count, + nominator_count, + slashing_spans.len() as u32, + )); + + all_voters + } + + /// Get the targets for an upcoming npos election. + /// + /// This function is self-weighing as [`DispatchClass::Mandatory`]. + pub fn get_npos_targets() -> Vec { + let mut validator_count = 0u32; + let targets = Validators::::iter() + .map(|(v, _)| { + validator_count.saturating_inc(); + v + }) + .collect::>(); + + Self::register_weight(T::WeightInfo::get_npos_targets(validator_count)); + + targets + } + + /// This function will add a nominator to the `Nominators` storage map, + /// and keep track of the `CounterForNominators`. + /// + /// If the nominator already exists, their nominations will be updated. + pub fn do_add_nominator(who: &T::AccountId, nominations: Nominations) { + if !Nominators::::contains_key(who) { + CounterForNominators::::mutate(|x| x.saturating_inc()) + } + Nominators::::insert(who, nominations); + } + + /// This function will remove a nominator from the `Nominators` storage map, + /// and keep track of the `CounterForNominators`. + /// + /// Returns true if `who` was removed from `Nominators`, otherwise false. + pub fn do_remove_nominator(who: &T::AccountId) -> bool { + if Nominators::::contains_key(who) { + Nominators::::remove(who); + CounterForNominators::::mutate(|x| x.saturating_dec()); + true + } else { + false + } + } + + /// This function will add a validator to the `Validators` storage map, + /// and keep track of the `CounterForValidators`. + /// + /// If the validator already exists, their preferences will be updated. + pub fn do_add_validator(who: &T::AccountId, prefs: ValidatorPrefs) { + if !Validators::::contains_key(who) { + CounterForValidators::::mutate(|x| x.saturating_inc()) + } + Validators::::insert(who, prefs); + } + + /// This function will remove a validator from the `Validators` storage map, + /// and keep track of the `CounterForValidators`. + /// + /// Returns true if `who` was removed from `Validators`, otherwise false. + pub fn do_remove_validator(who: &T::AccountId) -> bool { + if Validators::::contains_key(who) { + Validators::::remove(who); + CounterForValidators::::mutate(|x| x.saturating_dec()); + true + } else { + false + } + } + + /// Register some amount of weight directly with the system pallet. + /// + /// This is always mandatory weight. + fn register_weight(weight: Weight) { + >::register_extra_weight_unchecked( + weight, + DispatchClass::Mandatory, + ); + } +} + +impl frame_election_provider_support::ElectionDataProvider + for Pallet +{ + const MAXIMUM_VOTES_PER_VOTER: u32 = T::MAX_NOMINATIONS; + fn desired_targets() -> data_provider::Result { + Self::register_weight(T::DbWeight::get().reads(1)); + Ok(Self::validator_count()) + } + + fn voters( + maybe_max_len: Option, + ) -> data_provider::Result)>> { + let nominator_count = CounterForNominators::::get(); + let validator_count = CounterForValidators::::get(); + + let voter_count = nominator_count.saturating_add(validator_count) as usize; + debug_assert!(>::iter().count() as u32 == CounterForNominators::::get()); + debug_assert!(>::iter().count() as u32 == CounterForValidators::::get()); + + // register the extra 2 reads + Self::register_weight(T::DbWeight::get().reads(2)); + + if maybe_max_len.map_or(false, |max_len| voter_count > max_len) { + return Err("Voter snapshot too big") + } + + Ok(Self::get_npos_voters()) + } + + fn targets(maybe_max_len: Option) -> data_provider::Result> { + let target_count = CounterForValidators::::get() as usize; + + // register the extra 1 read + Self::register_weight(T::DbWeight::get().reads(1)); + + if maybe_max_len.map_or(false, |max_len| target_count > max_len) { + return Err("Target snapshot too big") + } + + Ok(Self::get_npos_targets()) + } + + fn next_election_prediction(now: T::BlockNumber) -> T::BlockNumber { + let current_era = Self::current_era().unwrap_or(0); + let current_session = Self::current_planned_session(); + let current_era_start_session_index = + Self::eras_start_session_index(current_era).unwrap_or(0); + // Number of session in the current era or the maximum session per era if reached. + let era_progress = current_session + .saturating_sub(current_era_start_session_index) + .min(T::SessionsPerEra::get()); + + let until_this_session_end = T::NextNewSession::estimate_next_new_session(now) + .0 + .unwrap_or_default() + .saturating_sub(now); + + let session_length = T::NextNewSession::average_session_length(); + + let sessions_left: T::BlockNumber = match ForceEra::::get() { + Forcing::ForceNone => Bounded::max_value(), + Forcing::ForceNew | Forcing::ForceAlways => Zero::zero(), + Forcing::NotForcing if era_progress >= T::SessionsPerEra::get() => Zero::zero(), + Forcing::NotForcing => T::SessionsPerEra::get() + .saturating_sub(era_progress) + // One session is computed in this_session_end. + .saturating_sub(1) + .into(), + }; + + now.saturating_add( + until_this_session_end.saturating_add(sessions_left.saturating_mul(session_length)), + ) + } + + #[cfg(feature = "runtime-benchmarks")] + fn add_voter(voter: T::AccountId, weight: VoteWeight, targets: Vec) { + use sp_std::convert::TryFrom; + let stake = >::try_from(weight).unwrap_or_else(|_| { + panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") + }); + >::insert(voter.clone(), voter.clone()); + >::insert( + voter.clone(), + StakingLedger { + stash: voter.clone(), + active: stake, + total: stake, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); + Self::do_add_nominator(&voter, Nominations { targets, submitted_in: 0, suppressed: false }); + } + + #[cfg(feature = "runtime-benchmarks")] + fn add_target(target: T::AccountId) { + let stake = MinValidatorBond::::get() * 100u32.into(); + >::insert(target.clone(), target.clone()); + >::insert( + target.clone(), + StakingLedger { + stash: target.clone(), + active: stake, + total: stake, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); + Self::do_add_validator( + &target, + ValidatorPrefs { commission: Perbill::zero(), blocked: false }, + ); + } + + #[cfg(feature = "runtime-benchmarks")] + fn clear() { + >::remove_all(None); + >::remove_all(None); + >::remove_all(None); + >::remove_all(None); + } + + #[cfg(feature = "runtime-benchmarks")] + fn put_snapshot( + voters: Vec<(T::AccountId, VoteWeight, Vec)>, + targets: Vec, + target_stake: Option, + ) { + use sp_std::convert::TryFrom; + targets.into_iter().for_each(|v| { + let stake: BalanceOf = target_stake + .and_then(|w| >::try_from(w).ok()) + .unwrap_or_else(|| MinNominatorBond::::get() * 100u32.into()); + >::insert(v.clone(), v.clone()); + >::insert( + v.clone(), + StakingLedger { + stash: v.clone(), + active: stake, + total: stake, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); + Self::do_add_validator( + &v, + ValidatorPrefs { commission: Perbill::zero(), blocked: false }, + ); + }); + + voters.into_iter().for_each(|(v, s, t)| { + let stake = >::try_from(s).unwrap_or_else(|_| { + panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") + }); + >::insert(v.clone(), v.clone()); + >::insert( + v.clone(), + StakingLedger { + stash: v.clone(), + active: stake, + total: stake, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); + Self::do_add_nominator( + &v, + Nominations { targets: t, submitted_in: 0, suppressed: false }, + ); + }); + } +} + +/// In this implementation `new_session(session)` must be called before `end_session(session-1)` +/// i.e. the new session must be planned before the ending of the previous session. +/// +/// Once the first new_session is planned, all session must start and then end in order, though +/// some session can lag in between the newest session planned and the latest session started. +impl pallet_session::SessionManager for Pallet { + fn new_session(new_index: SessionIndex) -> Option> { + log!(trace, "planning new session {}", new_index); + CurrentPlannedSession::::put(new_index); + Self::new_session(new_index, false) + } + fn new_session_genesis(new_index: SessionIndex) -> Option> { + log!(trace, "planning new session {} at genesis", new_index); + CurrentPlannedSession::::put(new_index); + Self::new_session(new_index, true) + } + fn start_session(start_index: SessionIndex) { + log!(trace, "starting session {}", start_index); + Self::start_session(start_index) + } + fn end_session(end_index: SessionIndex) { + log!(trace, "ending session {}", end_index); + Self::end_session(end_index) + } +} + +impl historical::SessionManager>> + for Pallet +{ + fn new_session( + new_index: SessionIndex, + ) -> Option>)>> { + >::new_session(new_index).map(|validators| { + let current_era = Self::current_era() + // Must be some as a new era has been created. + .unwrap_or(0); + + validators + .into_iter() + .map(|v| { + let exposure = Self::eras_stakers(current_era, &v); + (v, exposure) + }) + .collect() + }) + } + fn new_session_genesis( + new_index: SessionIndex, + ) -> Option>)>> { + >::new_session_genesis(new_index).map( + |validators| { + let current_era = Self::current_era() + // Must be some as a new era has been created. + .unwrap_or(0); + + validators + .into_iter() + .map(|v| { + let exposure = Self::eras_stakers(current_era, &v); + (v, exposure) + }) + .collect() + }, + ) + } + fn start_session(start_index: SessionIndex) { + >::start_session(start_index) + } + fn end_session(end_index: SessionIndex) { + >::end_session(end_index) + } +} + +/// Add reward points to block authors: +/// * 20 points to the block producer for producing a (non-uncle) block in the relay chain, +/// * 2 points to the block producer for each reference to a previously unreferenced uncle, and +/// * 1 point to the producer of each referenced uncle block. +impl pallet_authorship::EventHandler for Pallet +where + T: Config + pallet_authorship::Config + pallet_session::Config, +{ + fn note_author(author: T::AccountId) { + Self::reward_by_ids(vec![(author, 20)]) + } + fn note_uncle(author: T::AccountId, _age: T::BlockNumber) { + Self::reward_by_ids(vec![(>::author(), 2), (author, 1)]) + } +} + +/// This is intended to be used with `FilterHistoricalOffences`. +impl + OnOffenceHandler, Weight> + for Pallet +where + T: pallet_session::Config::AccountId>, + T: pallet_session::historical::Config< + FullIdentification = Exposure<::AccountId, BalanceOf>, + FullIdentificationOf = ExposureOf, + >, + T::SessionHandler: pallet_session::SessionHandler<::AccountId>, + T::SessionManager: pallet_session::SessionManager<::AccountId>, + T::ValidatorIdOf: Convert< + ::AccountId, + Option<::AccountId>, + >, +{ + fn on_offence( + offenders: &[OffenceDetails< + T::AccountId, + pallet_session::historical::IdentificationTuple, + >], + slash_fraction: &[Perbill], + slash_session: SessionIndex, + ) -> Weight { + let reward_proportion = SlashRewardFraction::::get(); + let mut consumed_weight: Weight = 0; + let mut add_db_reads_writes = |reads, writes| { + consumed_weight += T::DbWeight::get().reads_writes(reads, writes); + }; + + let active_era = { + let active_era = Self::active_era(); + add_db_reads_writes(1, 0); + if active_era.is_none() { + // This offence need not be re-submitted. + return consumed_weight + } + active_era.expect("value checked not to be `None`; qed").index + }; + let active_era_start_session_index = Self::eras_start_session_index(active_era) + .unwrap_or_else(|| { + frame_support::print("Error: start_session_index must be set for current_era"); + 0 + }); + add_db_reads_writes(1, 0); + + let window_start = active_era.saturating_sub(T::BondingDuration::get()); + + // Fast path for active-era report - most likely. + // `slash_session` cannot be in a future active era. It must be in `active_era` or before. + let slash_era = if slash_session >= active_era_start_session_index { + active_era + } else { + let eras = BondedEras::::get(); + add_db_reads_writes(1, 0); + + // Reverse because it's more likely to find reports from recent eras. + match eras.iter().rev().filter(|&&(_, ref sesh)| sesh <= &slash_session).next() { + Some(&(ref slash_era, _)) => *slash_era, + // Before bonding period. defensive - should be filtered out. + None => return consumed_weight, + } + }; + + ::EarliestUnappliedSlash::mutate(|earliest| { + if earliest.is_none() { + *earliest = Some(active_era) + } + }); + add_db_reads_writes(1, 1); + + let slash_defer_duration = T::SlashDeferDuration::get(); + + let invulnerables = Self::invulnerables(); + add_db_reads_writes(1, 0); + + for (details, slash_fraction) in offenders.iter().zip(slash_fraction) { + let (stash, exposure) = &details.offender; + + // Skip if the validator is invulnerable. + if invulnerables.contains(stash) { + continue + } + + let unapplied = slashing::compute_slash::(slashing::SlashParams { + stash, + slash: *slash_fraction, + exposure, + slash_era, + window_start, + now: active_era, + reward_proportion, + }); + + if let Some(mut unapplied) = unapplied { + let nominators_len = unapplied.others.len() as u64; + let reporters_len = details.reporters.len() as u64; + + { + let upper_bound = 1 /* Validator/NominatorSlashInEra */ + 2 /* fetch_spans */; + let rw = upper_bound + nominators_len * upper_bound; + add_db_reads_writes(rw, rw); + } + unapplied.reporters = details.reporters.clone(); + if slash_defer_duration == 0 { + // Apply right away. + slashing::apply_slash::(unapplied); + { + let slash_cost = (6, 5); + let reward_cost = (2, 2); + add_db_reads_writes( + (1 + nominators_len) * slash_cost.0 + reward_cost.0 * reporters_len, + (1 + nominators_len) * slash_cost.1 + reward_cost.1 * reporters_len, + ); + } + } else { + // Defer to end of some `slash_defer_duration` from now. + ::UnappliedSlashes::mutate(active_era, move |for_later| { + for_later.push(unapplied) + }); + add_db_reads_writes(1, 1); + } + } else { + add_db_reads_writes(4 /* fetch_spans */, 5 /* kick_out_if_recent */) + } + } + + consumed_weight + } +} diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs new file mode 100644 index 0000000000000..d99cd89f3b06c --- /dev/null +++ b/frame/staking/src/pallet/mod.rs @@ -0,0 +1,1550 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Staking FRAME Pallet. + +use frame_support::{ + pallet_prelude::*, + traits::{ + Currency, CurrencyToVote, EnsureOrigin, EstimateNextNewSession, Get, LockIdentifier, + LockableCurrency, OnUnbalanced, UnixTime, + }, + weights::Weight, +}; +use frame_system::{ensure_root, ensure_signed, offchain::SendTransactionTypes, pallet_prelude::*}; +use sp_runtime::{ + traits::{CheckedSub, SaturatedConversion, StaticLookup, Zero}, + DispatchError, Perbill, Percent, +}; +use sp_staking::SessionIndex; +use sp_std::{convert::From, prelude::*, result}; + +mod impls; + +pub use impls::*; + +use crate::{ + migrations, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraIndex, EraPayout, + EraRewardPoints, Exposure, Forcing, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, + Releases, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, + ValidatorPrefs, +}; + +pub const MAX_UNLOCKING_CHUNKS: usize = 32; +const STAKING_ID: LockIdentifier = *b"staking "; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + SendTransactionTypes> { + /// The staking balance. + type Currency: LockableCurrency; + + /// Time used for computing era duration. + /// + /// It is guaranteed to start being called from the first `on_finalize`. Thus value at + /// genesis is not used. + type UnixTime: UnixTime; + + /// Convert a balance into a number used for election calculation. This must fit into a + /// `u64` but is allowed to be sensibly lossy. The `u64` is used to communicate with the + /// [`sp_npos_elections`] crate which accepts u64 numbers and does operations in 128. + /// Consequently, the backward convert is used convert the u128s from sp-elections back to a + /// [`BalanceOf`]. + type CurrencyToVote: CurrencyToVote>; + + /// Something that provides the election functionality. + type ElectionProvider: frame_election_provider_support::ElectionProvider< + Self::AccountId, + Self::BlockNumber, + // we only accept an election provider that has staking as data provider. + DataProvider = Pallet, + >; + + /// Something that provides the election functionality at genesis. + type GenesisElectionProvider: frame_election_provider_support::ElectionProvider< + Self::AccountId, + Self::BlockNumber, + DataProvider = Pallet, + >; + + /// Maximum number of nominations per nominator. + const MAX_NOMINATIONS: u32; + + /// Tokens have been minted and are unused for validator-reward. + /// See [Era payout](./index.html#era-payout). + type RewardRemainder: OnUnbalanced>; + + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// Handler for the unbalanced reduction when slashing a staker. + type Slash: OnUnbalanced>; + + /// Handler for the unbalanced increment when rewarding a staker. + type Reward: OnUnbalanced>; + + /// Number of sessions per era. + #[pallet::constant] + type SessionsPerEra: Get; + + /// Number of eras that staked funds must remain bonded for. + #[pallet::constant] + type BondingDuration: Get; + + /// Number of eras that slashes are deferred by, after computation. + /// + /// This should be less than the bonding duration. Set to 0 if slashes + /// should be applied immediately, without opportunity for intervention. + #[pallet::constant] + type SlashDeferDuration: Get; + + /// The origin which can cancel a deferred slash. Root can always do this. + type SlashCancelOrigin: EnsureOrigin; + + /// Interface for interacting with a session pallet. + type SessionInterface: SessionInterface; + + /// The payout for validators and the system for the current era. + /// See [Era payout](./index.html#era-payout). + type EraPayout: EraPayout>; + + /// Something that can estimate the next session change, accurately or as a best effort + /// guess. + type NextNewSession: EstimateNextNewSession; + + /// The maximum number of nominators rewarded for each validator. + /// + /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can + /// claim their reward. This used to limit the i/o cost for the nominator payout. + #[pallet::constant] + type MaxNominatorRewardedPerValidator: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::extra_constants] + impl Pallet { + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + #[allow(non_snake_case)] + fn MaxNominations() -> u32 { + T::MAX_NOMINATIONS + } + } + + #[pallet::type_value] + pub(crate) fn HistoryDepthOnEmpty() -> u32 { + 84u32 + } + + /// Number of eras to keep in history. + /// + /// Information is kept for eras in `[current_era - history_depth; current_era]`. + /// + /// Must be more than the number of eras delayed by session otherwise. I.e. active era must + /// always be in history. I.e. `active_era > current_era - history_depth` must be + /// guaranteed. + #[pallet::storage] + #[pallet::getter(fn history_depth)] + pub(crate) type HistoryDepth = StorageValue<_, u32, ValueQuery, HistoryDepthOnEmpty>; + + /// The ideal number of staking participants. + #[pallet::storage] + #[pallet::getter(fn validator_count)] + pub type ValidatorCount = StorageValue<_, u32, ValueQuery>; + + /// Minimum number of staking participants before emergency conditions are imposed. + #[pallet::storage] + #[pallet::getter(fn minimum_validator_count)] + pub type MinimumValidatorCount = StorageValue<_, u32, ValueQuery>; + + /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're + /// easy to initialize and the performance hit is minimal (we expect no more than four + /// invulnerables) and restricted to testnets. + #[pallet::storage] + #[pallet::getter(fn invulnerables)] + pub type Invulnerables = StorageValue<_, Vec, ValueQuery>; + + /// Map from all locked "stash" accounts to the controller account. + #[pallet::storage] + #[pallet::getter(fn bonded)] + pub type Bonded = StorageMap<_, Twox64Concat, T::AccountId, T::AccountId>; + + /// The minimum active bond to become and maintain the role of a nominator. + #[pallet::storage] + pub type MinNominatorBond = StorageValue<_, BalanceOf, ValueQuery>; + + /// The minimum active bond to become and maintain the role of a validator. + #[pallet::storage] + pub type MinValidatorBond = StorageValue<_, BalanceOf, ValueQuery>; + + /// Map from all (unlocked) "controller" accounts to the info regarding the staking. + #[pallet::storage] + #[pallet::getter(fn ledger)] + pub type Ledger = + StorageMap<_, Blake2_128Concat, T::AccountId, StakingLedger>>; + + /// Where the reward payment should be made. Keyed by stash. + #[pallet::storage] + #[pallet::getter(fn payee)] + pub type Payee = + StorageMap<_, Twox64Concat, T::AccountId, RewardDestination, ValueQuery>; + + /// The map from (wannabe) validator stash key to the preferences of that validator. + /// + /// When updating this storage item, you must also update the `CounterForValidators`. + #[pallet::storage] + #[pallet::getter(fn validators)] + pub type Validators = + StorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; + + /// A tracker to keep count of the number of items in the `Validators` map. + #[pallet::storage] + pub type CounterForValidators = StorageValue<_, u32, ValueQuery>; + + /// The maximum validator count before we stop allowing new validators to join. + /// + /// When this value is not set, no limits are enforced. + #[pallet::storage] + pub type MaxValidatorsCount = StorageValue<_, u32, OptionQuery>; + + /// The map from nominator stash key to the set of stash keys of all validators to nominate. + /// + /// When updating this storage item, you must also update the `CounterForNominators`. + #[pallet::storage] + #[pallet::getter(fn nominators)] + pub type Nominators = + StorageMap<_, Twox64Concat, T::AccountId, Nominations>; + + /// A tracker to keep count of the number of items in the `Nominators` map. + #[pallet::storage] + pub type CounterForNominators = StorageValue<_, u32, ValueQuery>; + + /// The maximum nominator count before we stop allowing new validators to join. + /// + /// When this value is not set, no limits are enforced. + #[pallet::storage] + pub type MaxNominatorsCount = StorageValue<_, u32, OptionQuery>; + + /// The current era index. + /// + /// This is the latest planned era, depending on how the Session pallet queues the validator + /// set, it might be active or not. + #[pallet::storage] + #[pallet::getter(fn current_era)] + pub type CurrentEra = StorageValue<_, EraIndex>; + + /// The active era information, it holds index and start. + /// + /// The active era is the era being currently rewarded. Validator set of this era must be + /// equal to [`SessionInterface::validators`]. + #[pallet::storage] + #[pallet::getter(fn active_era)] + pub type ActiveEra = StorageValue<_, ActiveEraInfo>; + + /// The session index at which the era start for the last `HISTORY_DEPTH` eras. + /// + /// Note: This tracks the starting session (i.e. session index when era start being active) + /// for the eras in `[CurrentEra - HISTORY_DEPTH, CurrentEra]`. + #[pallet::storage] + #[pallet::getter(fn eras_start_session_index)] + pub type ErasStartSessionIndex = StorageMap<_, Twox64Concat, EraIndex, SessionIndex>; + + /// Exposure of validator at era. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + #[pallet::storage] + #[pallet::getter(fn eras_stakers)] + pub type ErasStakers = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + Exposure>, + ValueQuery, + >; + + /// Clipped Exposure of validator at era. + /// + /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the + /// `T::MaxNominatorRewardedPerValidator` biggest stakers. + /// (Note: the field `total` and `own` of the exposure remains unchanged). + /// This is used to limit the i/o cost for the nominator payout. + /// + /// This is keyed fist by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + #[pallet::storage] + #[pallet::getter(fn eras_stakers_clipped)] + pub type ErasStakersClipped = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + Exposure>, + ValueQuery, + >; + + /// Similar to `ErasStakers`, this holds the preferences of validators. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + // If prefs hasn't been set or has been removed then 0 commission is returned. + #[pallet::storage] + #[pallet::getter(fn eras_validator_prefs)] + pub type ErasValidatorPrefs = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + ValidatorPrefs, + ValueQuery, + >; + + /// The total validator era payout for the last `HISTORY_DEPTH` eras. + /// + /// Eras that haven't finished yet or has been removed doesn't have reward. + #[pallet::storage] + #[pallet::getter(fn eras_validator_reward)] + pub type ErasValidatorReward = StorageMap<_, Twox64Concat, EraIndex, BalanceOf>; + + /// Rewards for the last `HISTORY_DEPTH` eras. + /// If reward hasn't been set or has been removed then 0 reward is returned. + #[pallet::storage] + #[pallet::getter(fn eras_reward_points)] + pub type ErasRewardPoints = + StorageMap<_, Twox64Concat, EraIndex, EraRewardPoints, ValueQuery>; + + /// The total amount staked for the last `HISTORY_DEPTH` eras. + /// If total hasn't been set or has been removed then 0 stake is returned. + #[pallet::storage] + #[pallet::getter(fn eras_total_stake)] + pub type ErasTotalStake = + StorageMap<_, Twox64Concat, EraIndex, BalanceOf, ValueQuery>; + + /// Mode of era forcing. + #[pallet::storage] + #[pallet::getter(fn force_era)] + pub type ForceEra = StorageValue<_, Forcing, ValueQuery>; + + /// The percentage of the slash that is distributed to reporters. + /// + /// The rest of the slashed value is handled by the `Slash`. + #[pallet::storage] + #[pallet::getter(fn slash_reward_fraction)] + pub type SlashRewardFraction = StorageValue<_, Perbill, ValueQuery>; + + /// The amount of currency given to reporters of a slash event which was + /// canceled by extraordinary circumstances (e.g. governance). + #[pallet::storage] + #[pallet::getter(fn canceled_payout)] + pub type CanceledSlashPayout = StorageValue<_, BalanceOf, ValueQuery>; + + /// All unapplied slashes that are queued for later. + #[pallet::storage] + pub type UnappliedSlashes = StorageMap< + _, + Twox64Concat, + EraIndex, + Vec>>, + ValueQuery, + >; + + /// A mapping from still-bonded eras to the first session index of that era. + /// + /// Must contains information for eras for the range: + /// `[active_era - bounding_duration; active_era]` + #[pallet::storage] + pub(crate) type BondedEras = + StorageValue<_, Vec<(EraIndex, SessionIndex)>, ValueQuery>; + + /// All slashing events on validators, mapped by era to the highest slash proportion + /// and slash value of the era. + #[pallet::storage] + pub(crate) type ValidatorSlashInEra = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + (Perbill, BalanceOf), + >; + + /// All slashing events on nominators, mapped by era to the highest slash value of the era. + #[pallet::storage] + pub(crate) type NominatorSlashInEra = + StorageDoubleMap<_, Twox64Concat, EraIndex, Twox64Concat, T::AccountId, BalanceOf>; + + /// Slashing spans for stash accounts. + #[pallet::storage] + pub(crate) type SlashingSpans = + StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; + + /// Records information about the maximum slash of a stash within a slashing span, + /// as well as how much reward has been paid out. + #[pallet::storage] + pub(crate) type SpanSlash = StorageMap< + _, + Twox64Concat, + (T::AccountId, slashing::SpanIndex), + slashing::SpanRecord>, + ValueQuery, + >; + + /// The earliest era for which we have a pending, unapplied slash. + #[pallet::storage] + pub(crate) type EarliestUnappliedSlash = StorageValue<_, EraIndex>; + + /// The last planned session scheduled by the session pallet. + /// + /// This is basically in sync with the call to [`pallet_session::SessionManager::new_session`]. + #[pallet::storage] + #[pallet::getter(fn current_planned_session)] + pub type CurrentPlannedSession = StorageValue<_, SessionIndex, ValueQuery>; + + /// True if network has been upgraded to this version. + /// Storage version of the pallet. + /// + /// This is set to v7.0.0 for new networks. + #[pallet::storage] + pub(crate) type StorageVersion = StorageValue<_, Releases, ValueQuery>; + + /// The threshold for when users can start calling `chill_other` for other validators / + /// nominators. The threshold is compared to the actual number of validators / nominators + /// (`CountFor*`) in the system compared to the configured max (`Max*Count`). + #[pallet::storage] + pub(crate) type ChillThreshold = StorageValue<_, Percent, OptionQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub history_depth: u32, + pub validator_count: u32, + pub minimum_validator_count: u32, + pub invulnerables: Vec, + pub force_era: Forcing, + pub slash_reward_fraction: Perbill, + pub canceled_payout: BalanceOf, + pub stakers: + Vec<(T::AccountId, T::AccountId, BalanceOf, crate::StakerStatus)>, + pub min_nominator_bond: BalanceOf, + pub min_validator_bond: BalanceOf, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + history_depth: 84u32, + validator_count: Default::default(), + minimum_validator_count: Default::default(), + invulnerables: Default::default(), + force_era: Default::default(), + slash_reward_fraction: Default::default(), + canceled_payout: Default::default(), + stakers: Default::default(), + min_nominator_bond: Default::default(), + min_validator_bond: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + HistoryDepth::::put(self.history_depth); + ValidatorCount::::put(self.validator_count); + MinimumValidatorCount::::put(self.minimum_validator_count); + Invulnerables::::put(&self.invulnerables); + ForceEra::::put(self.force_era); + CanceledSlashPayout::::put(self.canceled_payout); + SlashRewardFraction::::put(self.slash_reward_fraction); + StorageVersion::::put(Releases::V7_0_0); + MinNominatorBond::::put(self.min_nominator_bond); + MinValidatorBond::::put(self.min_validator_bond); + + for &(ref stash, ref controller, balance, ref status) in &self.stakers { + assert!( + T::Currency::free_balance(&stash) >= balance, + "Stash does not have enough balance to bond." + ); + frame_support::assert_ok!(>::bond( + T::Origin::from(Some(stash.clone()).into()), + T::Lookup::unlookup(controller.clone()), + balance, + RewardDestination::Staked, + )); + frame_support::assert_ok!(match status { + crate::StakerStatus::Validator => >::validate( + T::Origin::from(Some(controller.clone()).into()), + Default::default(), + ), + crate::StakerStatus::Nominator(votes) => >::nominate( + T::Origin::from(Some(controller.clone()).into()), + votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), + ), + _ => Ok(()), + }); + } + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// The era payout has been set; the first balance is the validator-payout; the second is + /// the remainder from the maximum amount of reward. + /// \[era_index, validator_payout, remainder\] + EraPaid(EraIndex, BalanceOf, BalanceOf), + /// The nominator has been rewarded by this amount. \[stash, amount\] + Rewarded(T::AccountId, BalanceOf), + /// One validator (and its nominators) has been slashed by the given amount. + /// \[validator, amount\] + Slashed(T::AccountId, BalanceOf), + /// An old slashing report from a prior era was discarded because it could + /// not be processed. \[session_index\] + OldSlashingReportDiscarded(SessionIndex), + /// A new set of stakers was elected. + StakersElected, + /// An account has bonded this amount. \[stash, amount\] + /// + /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, + /// it will not be emitted for staking rewards when they are added to stake. + Bonded(T::AccountId, BalanceOf), + /// An account has unbonded this amount. \[stash, amount\] + Unbonded(T::AccountId, BalanceOf), + /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` + /// from the unlocking queue. \[stash, amount\] + Withdrawn(T::AccountId, BalanceOf), + /// A nominator has been kicked from a validator. \[nominator, stash\] + Kicked(T::AccountId, T::AccountId), + /// The election failed. No new era is planned. + StakingElectionFailed, + /// An account has stopped participating as either a validator or nominator. + /// \[stash\] + Chilled(T::AccountId), + /// The stakers' rewards are getting paid. \[era_index, validator_stash\] + PayoutStarted(EraIndex, T::AccountId), + } + + #[pallet::error] + pub enum Error { + /// Not a controller account. + NotController, + /// Not a stash account. + NotStash, + /// Stash is already bonded. + AlreadyBonded, + /// Controller is already paired. + AlreadyPaired, + /// Targets cannot be empty. + EmptyTargets, + /// Duplicate index. + DuplicateIndex, + /// Slash record index out of bounds. + InvalidSlashIndex, + /// Can not bond with value less than minimum required. + InsufficientBond, + /// Can not schedule more unlock chunks. + NoMoreChunks, + /// Can not rebond without unlocking chunks. + NoUnlockChunk, + /// Attempting to target a stash that still has funds. + FundedTarget, + /// Invalid era to reward. + InvalidEraToReward, + /// Invalid number of nominations. + InvalidNumberOfNominations, + /// Items are not sorted and unique. + NotSortedAndUnique, + /// Rewards for this era have already been claimed for this validator. + AlreadyClaimed, + /// Incorrect previous history depth input provided. + IncorrectHistoryDepth, + /// Incorrect number of slashing spans provided. + IncorrectSlashingSpans, + /// Internal state has become somehow corrupted and the operation cannot continue. + BadState, + /// Too many nomination targets supplied. + TooManyTargets, + /// A nomination target was supplied that was blocked or otherwise not a validator. + BadTarget, + /// The user has enough bond and thus cannot be chilled forcefully by an external person. + CannotChillOther, + /// There are too many nominators in the system. Governance needs to adjust the staking + /// settings to keep things safe for the runtime. + TooManyNominators, + /// There are too many validators in the system. Governance needs to adjust the staking + /// settings to keep things safe for the runtime. + TooManyValidators, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> Weight { + if StorageVersion::::get() == Releases::V6_0_0 { + migrations::v7::migrate::() + } else { + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + if StorageVersion::::get() == Releases::V6_0_0 { + migrations::v7::pre_migrate::() + } else { + Ok(()) + } + } + + fn on_initialize(_now: BlockNumberFor) -> Weight { + // just return the weight of the on_finalize. + T::DbWeight::get().reads(1) + } + + fn on_finalize(_n: BlockNumberFor) { + // Set the start of the first era. + if let Some(mut active_era) = Self::active_era() { + if active_era.start.is_none() { + let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); + active_era.start = Some(now_as_millis_u64); + // This write only ever happens once, we don't include it in the weight in + // general + ActiveEra::::put(active_era); + } + } + // `on_finalize` weight is tracked in `on_initialize` + } + + fn integrity_test() { + sp_std::if_std! { + sp_io::TestExternalities::new_empty().execute_with(|| + assert!( + T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0, + "As per documentation, slash defer duration ({}) should be less than bonding duration ({}).", + T::SlashDeferDuration::get(), + T::BondingDuration::get(), + ) + ); + } + } + } + + #[pallet::call] + impl Pallet { + /// Take the origin account as a stash and lock up `value` of its balance. `controller` will + /// be the account that controls it. + /// + /// `value` must be more than the `minimum_balance` specified by `T::Currency`. + /// + /// The dispatch origin for this call must be _Signed_ by the stash account. + /// + /// Emits `Bonded`. + /// # + /// - Independent of the arguments. Moderate complexity. + /// - O(1). + /// - Three extra DB entries. + /// + /// NOTE: Two of the storage writes (`Self::bonded`, `Self::payee`) are _never_ cleaned + /// unless the `origin` falls below _existential deposit_ and gets removed as dust. + /// ------------------ + /// # + #[pallet::weight(T::WeightInfo::bond())] + pub fn bond( + origin: OriginFor, + controller: ::Source, + #[pallet::compact] value: BalanceOf, + payee: RewardDestination, + ) -> DispatchResult { + let stash = ensure_signed(origin)?; + + if >::contains_key(&stash) { + Err(Error::::AlreadyBonded)? + } + + let controller = T::Lookup::lookup(controller)?; + + if >::contains_key(&controller) { + Err(Error::::AlreadyPaired)? + } + + // Reject a bond which is considered to be _dust_. + if value < T::Currency::minimum_balance() { + Err(Error::::InsufficientBond)? + } + + frame_system::Pallet::::inc_consumers(&stash).map_err(|_| Error::::BadState)?; + + // You're auto-bonded forever, here. We might improve this by only bonding when + // you actually validate/nominate and remove once you unbond __everything__. + >::insert(&stash, &controller); + >::insert(&stash, payee); + + let current_era = CurrentEra::::get().unwrap_or(0); + let history_depth = Self::history_depth(); + let last_reward_era = current_era.saturating_sub(history_depth); + + let stash_balance = T::Currency::free_balance(&stash); + let value = value.min(stash_balance); + Self::deposit_event(Event::::Bonded(stash.clone(), value)); + let item = StakingLedger { + stash, + total: value, + active: value, + unlocking: vec![], + claimed_rewards: (last_reward_era..current_era).collect(), + }; + Self::update_ledger(&controller, &item); + Ok(()) + } + + /// Add some extra amount that have appeared in the stash `free_balance` into the balance up + /// for staking. + /// + /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. + /// + /// Use this if there are additional funds in your stash account that you wish to bond. + /// Unlike [`bond`](Self::bond) or [`unbond`](Self::unbond) this function does not impose + /// any limitation on the amount that can be added. + /// + /// Emits `Bonded`. + /// + /// # + /// - Independent of the arguments. Insignificant complexity. + /// - O(1). + /// # + #[pallet::weight(T::WeightInfo::bond_extra())] + pub fn bond_extra( + origin: OriginFor, + #[pallet::compact] max_additional: BalanceOf, + ) -> DispatchResult { + let stash = ensure_signed(origin)?; + + let controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; + let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + + let stash_balance = T::Currency::free_balance(&stash); + if let Some(extra) = stash_balance.checked_sub(&ledger.total) { + let extra = extra.min(max_additional); + ledger.total += extra; + ledger.active += extra; + // Last check: the new active amount of ledger must be more than ED. + ensure!( + ledger.active >= T::Currency::minimum_balance(), + Error::::InsufficientBond + ); + + Self::deposit_event(Event::::Bonded(stash, extra)); + Self::update_ledger(&controller, &ledger); + } + Ok(()) + } + + /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond + /// period ends. If this leaves an amount actively bonded less than + /// T::Currency::minimum_balance(), then it is increased to the full amount. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// Once the unlock period is done, you can call `withdraw_unbonded` to actually move + /// the funds out of management ready for transfer. + /// + /// No more than a limited number of unlocking chunks (see `MAX_UNLOCKING_CHUNKS`) + /// can co-exists at the same time. In that case, [`Call::withdraw_unbonded`] need + /// to be called first to remove some of the chunks (if possible). + /// + /// If a user encounters the `InsufficientBond` error when calling this extrinsic, + /// they should call `chill` first in order to free up their bonded funds. + /// + /// Emits `Unbonded`. + /// + /// See also [`Call::withdraw_unbonded`]. + #[pallet::weight(T::WeightInfo::unbond())] + pub fn unbond( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + ) -> DispatchResult { + let controller = ensure_signed(origin)?; + let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!(ledger.unlocking.len() < MAX_UNLOCKING_CHUNKS, Error::::NoMoreChunks,); + + let mut value = value.min(ledger.active); + + if !value.is_zero() { + ledger.active -= value; + + // Avoid there being a dust balance left in the staking system. + if ledger.active < T::Currency::minimum_balance() { + value += ledger.active; + ledger.active = Zero::zero(); + } + + let min_active_bond = if Nominators::::contains_key(&ledger.stash) { + MinNominatorBond::::get() + } else if Validators::::contains_key(&ledger.stash) { + MinValidatorBond::::get() + } else { + Zero::zero() + }; + + // Make sure that the user maintains enough active bond for their role. + // If a user runs into this error, they should chill first. + ensure!(ledger.active >= min_active_bond, Error::::InsufficientBond); + + // Note: in case there is no current era it is fine to bond one era more. + let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); + ledger.unlocking.push(UnlockChunk { value, era }); + Self::update_ledger(&controller, &ledger); + Self::deposit_event(Event::::Unbonded(ledger.stash, value)); + } + Ok(()) + } + + /// Remove any unlocked chunks from the `unlocking` queue from our management. + /// + /// This essentially frees up that balance to be used by the stash account to do + /// whatever it wants. + /// + /// The dispatch origin for this call must be _Signed_ by the controller. + /// + /// Emits `Withdrawn`. + /// + /// See also [`Call::unbond`]. + /// + /// # + /// Complexity O(S) where S is the number of slashing spans to remove + /// NOTE: Weight annotation is the kill scenario, we refund otherwise. + /// # + #[pallet::weight(T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans))] + pub fn withdraw_unbonded( + origin: OriginFor, + num_slashing_spans: u32, + ) -> DispatchResultWithPostInfo { + let controller = ensure_signed(origin)?; + let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let (stash, old_total) = (ledger.stash.clone(), ledger.total); + if let Some(current_era) = Self::current_era() { + ledger = ledger.consolidate_unlocked(current_era) + } + + let post_info_weight = if ledger.unlocking.is_empty() && + ledger.active < T::Currency::minimum_balance() + { + // This account must have called `unbond()` with some value that caused the active + // portion to fall below existential deposit + will have no more unlocking chunks + // left. We can now safely remove all staking-related information. + Self::kill_stash(&stash, num_slashing_spans)?; + // Remove the lock. + T::Currency::remove_lock(STAKING_ID, &stash); + // This is worst case scenario, so we use the full weight and return None + None + } else { + // This was the consequence of a partial unbond. just update the ledger and move on. + Self::update_ledger(&controller, &ledger); + + // This is only an update, so we use less overall weight. + Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) + }; + + // `old_total` should never be less than the new total because + // `consolidate_unlocked` strictly subtracts balance. + if ledger.total < old_total { + // Already checked that this won't overflow by entry condition. + let value = old_total - ledger.total; + Self::deposit_event(Event::::Withdrawn(stash, value)); + } + + Ok(post_info_weight.into()) + } + + /// Declare the desire to validate for the origin controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + #[pallet::weight(T::WeightInfo::validate())] + pub fn validate(origin: OriginFor, prefs: ValidatorPrefs) -> DispatchResult { + let controller = ensure_signed(origin)?; + + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!(ledger.active >= MinValidatorBond::::get(), Error::::InsufficientBond); + let stash = &ledger.stash; + + // Only check limits if they are not already a validator. + if !Validators::::contains_key(stash) { + // If this error is reached, we need to adjust the `MinValidatorBond` and start + // calling `chill_other`. Until then, we explicitly block new validators to protect + // the runtime. + if let Some(max_validators) = MaxValidatorsCount::::get() { + ensure!( + CounterForValidators::::get() < max_validators, + Error::::TooManyValidators + ); + } + } + + Self::do_remove_nominator(stash); + Self::do_add_validator(stash, prefs); + Ok(()) + } + + /// Declare the desire to nominate `targets` for the origin controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// # + /// - The transaction's complexity is proportional to the size of `targets` (N) + /// which is capped at CompactAssignments::LIMIT (MAX_NOMINATIONS). + /// - Both the reads and writes follow a similar pattern. + /// # + #[pallet::weight(T::WeightInfo::nominate(targets.len() as u32))] + pub fn nominate( + origin: OriginFor, + targets: Vec<::Source>, + ) -> DispatchResult { + let controller = ensure_signed(origin)?; + + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!(ledger.active >= MinNominatorBond::::get(), Error::::InsufficientBond); + let stash = &ledger.stash; + + // Only check limits if they are not already a nominator. + if !Nominators::::contains_key(stash) { + // If this error is reached, we need to adjust the `MinNominatorBond` and start + // calling `chill_other`. Until then, we explicitly block new nominators to protect + // the runtime. + if let Some(max_nominators) = MaxNominatorsCount::::get() { + ensure!( + CounterForNominators::::get() < max_nominators, + Error::::TooManyNominators + ); + } + } + + ensure!(!targets.is_empty(), Error::::EmptyTargets); + ensure!(targets.len() <= T::MAX_NOMINATIONS as usize, Error::::TooManyTargets); + + let old = Nominators::::get(stash).map_or_else(Vec::new, |x| x.targets); + + let targets = targets + .into_iter() + .map(|t| T::Lookup::lookup(t).map_err(DispatchError::from)) + .map(|n| { + n.and_then(|n| { + if old.contains(&n) || !Validators::::get(&n).blocked { + Ok(n) + } else { + Err(Error::::BadTarget.into()) + } + }) + }) + .collect::, _>>()?; + + let nominations = Nominations { + targets, + // Initial nominations are considered submitted at era 0. See `Nominations` doc + submitted_in: Self::current_era().unwrap_or(0), + suppressed: false, + }; + + Self::do_remove_validator(stash); + Self::do_add_nominator(stash, nominations); + Ok(()) + } + + /// Declare no desire to either validate or nominate. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// # + /// - Independent of the arguments. Insignificant complexity. + /// - Contains one read. + /// - Writes are limited to the `origin` account key. + /// # + #[pallet::weight(T::WeightInfo::chill())] + pub fn chill(origin: OriginFor) -> DispatchResult { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + Self::chill_stash(&ledger.stash); + Ok(()) + } + + /// (Re-)set the payment target for a controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// # + /// - Independent of the arguments. Insignificant complexity. + /// - Contains a limited number of reads. + /// - Writes are limited to the `origin` account key. + /// --------- + /// - Weight: O(1) + /// - DB Weight: + /// - Read: Ledger + /// - Write: Payee + /// # + #[pallet::weight(T::WeightInfo::set_payee())] + pub fn set_payee( + origin: OriginFor, + payee: RewardDestination, + ) -> DispatchResult { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let stash = &ledger.stash; + >::insert(stash, payee); + Ok(()) + } + + /// (Re-)set the controller of a stash. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. + /// + /// # + /// - Independent of the arguments. Insignificant complexity. + /// - Contains a limited number of reads. + /// - Writes are limited to the `origin` account key. + /// ---------- + /// Weight: O(1) + /// DB Weight: + /// - Read: Bonded, Ledger New Controller, Ledger Old Controller + /// - Write: Bonded, Ledger New Controller, Ledger Old Controller + /// # + #[pallet::weight(T::WeightInfo::set_controller())] + pub fn set_controller( + origin: OriginFor, + controller: ::Source, + ) -> DispatchResult { + let stash = ensure_signed(origin)?; + let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; + let controller = T::Lookup::lookup(controller)?; + if >::contains_key(&controller) { + Err(Error::::AlreadyPaired)? + } + if controller != old_controller { + >::insert(&stash, &controller); + if let Some(l) = >::take(&old_controller) { + >::insert(&controller, l); + } + } + Ok(()) + } + + /// Sets the ideal number of validators. + /// + /// The dispatch origin must be Root. + /// + /// # + /// Weight: O(1) + /// Write: Validator Count + /// # + #[pallet::weight(T::WeightInfo::set_validator_count())] + pub fn set_validator_count( + origin: OriginFor, + #[pallet::compact] new: u32, + ) -> DispatchResult { + ensure_root(origin)?; + ValidatorCount::::put(new); + Ok(()) + } + + /// Increments the ideal number of validators. + /// + /// The dispatch origin must be Root. + /// + /// # + /// Same as [`Self::set_validator_count`]. + /// # + #[pallet::weight(T::WeightInfo::set_validator_count())] + pub fn increase_validator_count( + origin: OriginFor, + #[pallet::compact] additional: u32, + ) -> DispatchResult { + ensure_root(origin)?; + ValidatorCount::::mutate(|n| *n += additional); + Ok(()) + } + + /// Scale up the ideal number of validators by a factor. + /// + /// The dispatch origin must be Root. + /// + /// # + /// Same as [`Self::set_validator_count`]. + /// # + #[pallet::weight(T::WeightInfo::set_validator_count())] + pub fn scale_validator_count(origin: OriginFor, factor: Percent) -> DispatchResult { + ensure_root(origin)?; + ValidatorCount::::mutate(|n| *n += factor * *n); + Ok(()) + } + + /// Force there to be no new eras indefinitely. + /// + /// The dispatch origin must be Root. + /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// Thus the election process may be ongoing when this is called. In this case the + /// election will continue until the next era is triggered. + /// + /// # + /// - No arguments. + /// - Weight: O(1) + /// - Write: ForceEra + /// # + #[pallet::weight(T::WeightInfo::force_no_eras())] + pub fn force_no_eras(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + ForceEra::::put(Forcing::ForceNone); + Ok(()) + } + + /// Force there to be a new era at the end of the next session. After this, it will be + /// reset to normal (non-forced) behaviour. + /// + /// The dispatch origin must be Root. + /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// If this is called just before a new era is triggered, the election process may not + /// have enough blocks to get a result. + /// + /// # + /// - No arguments. + /// - Weight: O(1) + /// - Write ForceEra + /// # + #[pallet::weight(T::WeightInfo::force_new_era())] + pub fn force_new_era(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + ForceEra::::put(Forcing::ForceNew); + Ok(()) + } + + /// Set the validators who cannot be slashed (if any). + /// + /// The dispatch origin must be Root. + /// + /// # + /// - O(V) + /// - Write: Invulnerables + /// # + #[pallet::weight(T::WeightInfo::set_invulnerables(invulnerables.len() as u32))] + pub fn set_invulnerables( + origin: OriginFor, + invulnerables: Vec, + ) -> DispatchResult { + ensure_root(origin)?; + >::put(invulnerables); + Ok(()) + } + + /// Force a current staker to become completely unstaked, immediately. + /// + /// The dispatch origin must be Root. + /// + /// # + /// O(S) where S is the number of slashing spans to be removed + /// Reads: Bonded, Slashing Spans, Account, Locks + /// Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, + /// Account, Locks Writes Each: SpanSlash * S + /// # + #[pallet::weight(T::WeightInfo::force_unstake(*num_slashing_spans))] + pub fn force_unstake( + origin: OriginFor, + stash: T::AccountId, + num_slashing_spans: u32, + ) -> DispatchResult { + ensure_root(origin)?; + + // Remove all staking-related information. + Self::kill_stash(&stash, num_slashing_spans)?; + + // Remove the lock. + T::Currency::remove_lock(STAKING_ID, &stash); + Ok(()) + } + + /// Force there to be a new era at the end of sessions indefinitely. + /// + /// The dispatch origin must be Root. + /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// If this is called just before a new era is triggered, the election process may not + /// have enough blocks to get a result. + /// + /// # + /// - Weight: O(1) + /// - Write: ForceEra + /// # + #[pallet::weight(T::WeightInfo::force_new_era_always())] + pub fn force_new_era_always(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + ForceEra::::put(Forcing::ForceAlways); + Ok(()) + } + + /// Cancel enactment of a deferred slash. + /// + /// Can be called by the `T::SlashCancelOrigin`. + /// + /// Parameters: era and indices of the slashes for that era to kill. + /// + /// # + /// Complexity: O(U + S) + /// with U unapplied slashes weighted with U=1000 + /// and S is the number of slash indices to be canceled. + /// - Read: Unapplied Slashes + /// - Write: Unapplied Slashes + /// # + #[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32))] + pub fn cancel_deferred_slash( + origin: OriginFor, + era: EraIndex, + slash_indices: Vec, + ) -> DispatchResult { + T::SlashCancelOrigin::ensure_origin(origin)?; + + ensure!(!slash_indices.is_empty(), Error::::EmptyTargets); + ensure!(is_sorted_and_unique(&slash_indices), Error::::NotSortedAndUnique); + + let mut unapplied = ::UnappliedSlashes::get(&era); + let last_item = slash_indices[slash_indices.len() - 1]; + ensure!((last_item as usize) < unapplied.len(), Error::::InvalidSlashIndex); + + for (removed, index) in slash_indices.into_iter().enumerate() { + let index = (index as usize) - removed; + unapplied.remove(index); + } + + ::UnappliedSlashes::insert(&era, &unapplied); + Ok(()) + } + + /// Pay out all the stakers behind a single validator for a single era. + /// + /// - `validator_stash` is the stash account of the validator. Their nominators, up to + /// `T::MaxNominatorRewardedPerValidator`, will also receive their rewards. + /// - `era` may be any era between `[current_era - history_depth; current_era]`. + /// + /// The origin of this call must be _Signed_. Any account can call this function, even if + /// it is not one of the stakers. + /// + /// # + /// - Time complexity: at most O(MaxNominatorRewardedPerValidator). + /// - Contains a limited number of reads and writes. + /// ----------- + /// N is the Number of payouts for the validator (including the validator) + /// Weight: + /// - Reward Destination Staked: O(N) + /// - Reward Destination Controller (Creating): O(N) + /// + /// NOTE: weights are assuming that payouts are made to alive stash account (Staked). + /// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here. + /// # + #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked( + T::MaxNominatorRewardedPerValidator::get() + ))] + pub fn payout_stakers( + origin: OriginFor, + validator_stash: T::AccountId, + era: EraIndex, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + Self::do_payout_stakers(validator_stash, era) + } + + /// Rebond a portion of the stash scheduled to be unlocked. + /// + /// The dispatch origin must be signed by the controller. + /// + /// # + /// - Time complexity: O(L), where L is unlocking chunks + /// - Bounded by `MAX_UNLOCKING_CHUNKS`. + /// - Storage changes: Can't increase storage, only decrease it. + /// # + #[pallet::weight(T::WeightInfo::rebond(MAX_UNLOCKING_CHUNKS as u32))] + pub fn rebond( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + ) -> DispatchResultWithPostInfo { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); + + let initial_unlocking = ledger.unlocking.len() as u32; + let ledger = ledger.rebond(value); + // Last check: the new active amount of ledger must be more than ED. + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); + + Self::deposit_event(Event::::Bonded(ledger.stash.clone(), value)); + Self::update_ledger(&controller, &ledger); + + let removed_chunks = 1u32 // for the case where the last iterated chunk is not removed + .saturating_add(initial_unlocking) + .saturating_sub(ledger.unlocking.len() as u32); + Ok(Some(T::WeightInfo::rebond(removed_chunks)).into()) + } + + /// Set `HistoryDepth` value. This function will delete any history information + /// when `HistoryDepth` is reduced. + /// + /// Parameters: + /// - `new_history_depth`: The new history depth you would like to set. + /// - `era_items_deleted`: The number of items that will be deleted by this dispatch. This + /// should report all the storage items that will be deleted by clearing old era history. + /// Needed to report an accurate weight for the dispatch. Trusted by `Root` to report an + /// accurate number. + /// + /// Origin must be root. + /// + /// # + /// - E: Number of history depths removed, i.e. 10 -> 7 = 3 + /// - Weight: O(E) + /// - DB Weight: + /// - Reads: Current Era, History Depth + /// - Writes: History Depth + /// - Clear Prefix Each: Era Stakers, EraStakersClipped, ErasValidatorPrefs + /// - Writes Each: ErasValidatorReward, ErasRewardPoints, ErasTotalStake, + /// ErasStartSessionIndex + /// # + #[pallet::weight(T::WeightInfo::set_history_depth(*_era_items_deleted))] + pub fn set_history_depth( + origin: OriginFor, + #[pallet::compact] new_history_depth: EraIndex, + #[pallet::compact] _era_items_deleted: u32, + ) -> DispatchResult { + ensure_root(origin)?; + if let Some(current_era) = Self::current_era() { + HistoryDepth::::mutate(|history_depth| { + let last_kept = current_era.checked_sub(*history_depth).unwrap_or(0); + let new_last_kept = current_era.checked_sub(new_history_depth).unwrap_or(0); + for era_index in last_kept..new_last_kept { + Self::clear_era_information(era_index); + } + *history_depth = new_history_depth + }) + } + Ok(()) + } + + /// Remove all data structure concerning a staker/stash once its balance is at the minimum. + /// This is essentially equivalent to `withdraw_unbonded` except it can be called by anyone + /// and the target `stash` must have no funds left beyond the ED. + /// + /// This can be called from any origin. + /// + /// - `stash`: The stash account to reap. Its balance must be zero. + /// + /// # + /// Complexity: O(S) where S is the number of slashing spans on the account. + /// DB Weight: + /// - Reads: Stash Account, Bonded, Slashing Spans, Locks + /// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, + /// Stash Account, Locks + /// - Writes Each: SpanSlash * S + /// # + #[pallet::weight(T::WeightInfo::reap_stash(*num_slashing_spans))] + pub fn reap_stash( + _origin: OriginFor, + stash: T::AccountId, + num_slashing_spans: u32, + ) -> DispatchResult { + let at_minimum = T::Currency::total_balance(&stash) == T::Currency::minimum_balance(); + ensure!(at_minimum, Error::::FundedTarget); + Self::kill_stash(&stash, num_slashing_spans)?; + T::Currency::remove_lock(STAKING_ID, &stash); + Ok(()) + } + + /// Remove the given nominations from the calling validator. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// - `who`: A list of nominator stash accounts who are nominating this validator which + /// should no longer be nominating this validator. + /// + /// Note: Making this call only makes sense if you first set the validator preferences to + /// block any further nominations. + #[pallet::weight(T::WeightInfo::kick(who.len() as u32))] + pub fn kick( + origin: OriginFor, + who: Vec<::Source>, + ) -> DispatchResult { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let stash = &ledger.stash; + + for nom_stash in who + .into_iter() + .map(T::Lookup::lookup) + .collect::, _>>()? + .into_iter() + { + Nominators::::mutate(&nom_stash, |maybe_nom| { + if let Some(ref mut nom) = maybe_nom { + if let Some(pos) = nom.targets.iter().position(|v| v == stash) { + nom.targets.swap_remove(pos); + Self::deposit_event(Event::::Kicked( + nom_stash.clone(), + stash.clone(), + )); + } + } + }); + } + + Ok(()) + } + + /// Update the various staking limits this pallet. + /// + /// * `min_nominator_bond`: The minimum active bond needed to be a nominator. + /// * `min_validator_bond`: The minimum active bond needed to be a validator. + /// * `max_nominator_count`: The max number of users who can be a nominator at once. When + /// set to `None`, no limit is enforced. + /// * `max_validator_count`: The max number of users who can be a validator at once. When + /// set to `None`, no limit is enforced. + /// + /// Origin must be Root to call this function. + /// + /// NOTE: Existing nominators and validators will not be affected by this update. + /// to kick people under the new limits, `chill_other` should be called. + #[pallet::weight(T::WeightInfo::set_staking_limits())] + pub fn set_staking_limits( + origin: OriginFor, + min_nominator_bond: BalanceOf, + min_validator_bond: BalanceOf, + max_nominator_count: Option, + max_validator_count: Option, + threshold: Option, + ) -> DispatchResult { + ensure_root(origin)?; + MinNominatorBond::::set(min_nominator_bond); + MinValidatorBond::::set(min_validator_bond); + MaxNominatorsCount::::set(max_nominator_count); + MaxValidatorsCount::::set(max_validator_count); + ChillThreshold::::set(threshold); + Ok(()) + } + + /// Declare a `controller` to stop participating as either a validator or nominator. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_, but can be called by anyone. + /// + /// If the caller is the same as the controller being targeted, then no further checks are + /// enforced, and this function behaves just like `chill`. + /// + /// If the caller is different than the controller being targeted, the following conditions + /// must be met: + /// * A `ChillThreshold` must be set and checked which defines how close to the max + /// nominators or validators we must reach before users can start chilling one-another. + /// * A `MaxNominatorCount` and `MaxValidatorCount` must be set which is used to determine + /// how close we are to the threshold. + /// * A `MinNominatorBond` and `MinValidatorBond` must be set and checked, which determines + /// if this is a person that should be chilled because they have not met the threshold + /// bond required. + /// + /// This can be helpful if bond requirements are updated, and we need to remove old users + /// who do not satisfy these requirements. + // TODO: Maybe we can deprecate `chill` in the future. + // https://github.com/paritytech/substrate/issues/9111 + #[pallet::weight(T::WeightInfo::chill_other())] + pub fn chill_other(origin: OriginFor, controller: T::AccountId) -> DispatchResult { + // Anyone can call this function. + let caller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let stash = ledger.stash; + + // In order for one user to chill another user, the following conditions must be met: + // * A `ChillThreshold` is set which defines how close to the max nominators or + // validators we must reach before users can start chilling one-another. + // * A `MaxNominatorCount` and `MaxValidatorCount` which is used to determine how close + // we are to the threshold. + // * A `MinNominatorBond` and `MinValidatorBond` which is the final condition checked to + // determine this is a person that should be chilled because they have not met the + // threshold bond required. + // + // Otherwise, if caller is the same as the controller, this is just like `chill`. + if caller != controller { + let threshold = ChillThreshold::::get().ok_or(Error::::CannotChillOther)?; + let min_active_bond = if Nominators::::contains_key(&stash) { + let max_nominator_count = + MaxNominatorsCount::::get().ok_or(Error::::CannotChillOther)?; + let current_nominator_count = CounterForNominators::::get(); + ensure!( + threshold * max_nominator_count < current_nominator_count, + Error::::CannotChillOther + ); + MinNominatorBond::::get() + } else if Validators::::contains_key(&stash) { + let max_validator_count = + MaxValidatorsCount::::get().ok_or(Error::::CannotChillOther)?; + let current_validator_count = CounterForValidators::::get(); + ensure!( + threshold * max_validator_count < current_validator_count, + Error::::CannotChillOther + ); + MinValidatorBond::::get() + } else { + Zero::zero() + }; + + ensure!(ledger.active < min_active_bond, Error::::CannotChillOther); + } + + Self::chill_stash(&stash); + Ok(()) + } + } +} + +/// Check that list is sorted and has no duplicates. +fn is_sorted_and_unique(list: &[u32]) -> bool { + list.windows(2).all(|w| w[0] < w[1]) +} diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index af9a92f16a463..15ca85b4d046f 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,19 +47,23 @@ //! has multiple misbehaviors. However, accounting for such cases is necessary //! to deter a class of "rage-quit" attacks. //! -//! Based on research at https://research.web3.foundation/en/latest/polkadot/slashing/npos/ +//! Based on research at -use super::{ - EraIndex, Trait, Module, Store, BalanceOf, Exposure, Perbill, SessionInterface, - NegativeImbalanceOf, UnappliedSlash, Error, +use crate::{ + BalanceOf, Config, EraIndex, Error, Exposure, NegativeImbalanceOf, Pallet, Perbill, + SessionInterface, Store, UnappliedSlash, }; -use sp_runtime::{traits::{Zero, Saturating}, RuntimeDebug, DispatchResult}; +use codec::{Decode, Encode}; use frame_support::{ - StorageMap, StorageDoubleMap, ensure, - traits::{Currency, OnUnbalanced, Imbalance}, + ensure, + traits::{Currency, Imbalance, OnUnbalanced}, +}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{Saturating, Zero}, + DispatchResult, RuntimeDebug, }; use sp_std::vec::Vec; -use codec::{Encode, Decode}; /// The proportion of the slashing reward to be paid out on the first slashing detection. /// This is f_1 in the paper. @@ -69,7 +73,7 @@ const REWARD_F1: Perbill = Perbill::from_percent(50); pub type SpanIndex = u32; // A range of start..end eras for a slashing span. -#[derive(Encode, Decode)] +#[derive(Encode, Decode, TypeInfo)] #[cfg_attr(test, derive(Debug, PartialEq))] pub(crate) struct SlashingSpan { pub(crate) index: SpanIndex, @@ -84,7 +88,7 @@ impl SlashingSpan { } /// An encoding of all of a nominator's slashing spans. -#[derive(Encode, Decode, RuntimeDebug)] +#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] pub struct SlashingSpans { // the index of the current slashing span of the nominator. different for // every stash, resets when the account hits free balance 0. @@ -118,7 +122,9 @@ impl SlashingSpans { // that internal state is unchanged. pub(crate) fn end_span(&mut self, now: EraIndex) -> bool { let next_start = now + 1; - if next_start <= self.last_start { return false } + if next_start <= self.last_start { + return false + } let last_length = next_start - self.last_start; self.prior.insert(0, last_length); @@ -153,7 +159,8 @@ impl SlashingSpans { // If this returns `Some`, then it includes a range start..end of all the span // indices which were pruned. fn prune(&mut self, window_start: EraIndex) -> Option<(SpanIndex, SpanIndex)> { - let old_idx = self.iter() + let old_idx = self + .iter() .skip(1) // skip ongoing span. .position(|span| span.length.map_or(false, |len| span.start + len <= window_start)); @@ -163,7 +170,7 @@ impl SlashingSpans { self.prior.truncate(o); let new_earliest = self.span_index - self.prior.len() as SpanIndex; Some((earliest_span_index, new_earliest)) - } + }, None => None, }; @@ -174,7 +181,7 @@ impl SlashingSpans { } /// A slashing-span record for a particular stash. -#[derive(Encode, Decode, Default)] +#[derive(Encode, Decode, Default, TypeInfo)] pub(crate) struct SpanRecord { slashed: Balance, paid_out: Balance, @@ -190,7 +197,7 @@ impl SpanRecord { /// Parameters for performing a slash. #[derive(Clone)] -pub(crate) struct SlashParams<'a, T: 'a + Trait> { +pub(crate) struct SlashParams<'a, T: 'a + Config> { /// The stash account being slashed. pub(crate) stash: &'a T::AccountId, /// The proportion of the slash. @@ -214,18 +221,11 @@ pub(crate) struct SlashParams<'a, T: 'a + Trait> { /// /// The pending slash record returned does not have initialized reporters. Those have /// to be set at a higher level, if any. -pub(crate) fn compute_slash(params: SlashParams) - -> Option>> -{ - let SlashParams { - stash, - slash, - exposure, - slash_era, - window_start, - now, - reward_proportion, - } = params.clone(); +pub(crate) fn compute_slash( + params: SlashParams, +) -> Option>> { + let SlashParams { stash, slash, exposure, slash_era, window_start, now, reward_proportion } = + params.clone(); let mut reward_payout = Zero::zero(); let mut val_slashed = Zero::zero(); @@ -236,22 +236,17 @@ pub(crate) fn compute_slash(params: SlashParams) // kick out the validator even if they won't be slashed, // as long as the misbehavior is from their most recent slashing span. kick_out_if_recent::(params); - return None; + return None } - let (prior_slash_p, _era_slash) = as Store>::ValidatorSlashInEra::get( - &slash_era, - stash, - ).unwrap_or((Perbill::zero(), Zero::zero())); + let (prior_slash_p, _era_slash) = + as Store>::ValidatorSlashInEra::get(&slash_era, stash) + .unwrap_or((Perbill::zero(), Zero::zero())); // compare slash proportions rather than slash values to avoid issues due to rounding // error. if slash.deconstruct() > prior_slash_p.deconstruct() { - as Store>::ValidatorSlashInEra::insert( - &slash_era, - stash, - &(slash, own_slash), - ); + as Store>::ValidatorSlashInEra::insert(&slash_era, stash, &(slash, own_slash)); } else { // we slash based on the max in era - this new event is not the max, // so neither the validator or any nominators will need an update. @@ -260,7 +255,7 @@ pub(crate) fn compute_slash(params: SlashParams) // pays out some reward even if the latest report is not max-in-era. // we opt to avoid the nominator lookups and edits and leave more rewards // for more drastic misbehavior. - return None; + return None } // apply slash to validator. @@ -273,10 +268,7 @@ pub(crate) fn compute_slash(params: SlashParams) reward_proportion, ); - let target_span = spans.compare_and_update_span_slash( - slash_era, - own_slash, - ); + let target_span = spans.compare_and_update_span_slash(slash_era, own_slash); if target_span == Some(spans.span_index()) { // misbehavior occurred within the current slashing span - take appropriate @@ -285,12 +277,12 @@ pub(crate) fn compute_slash(params: SlashParams) // chill the validator - it misbehaved in the current span and should // not continue in the next election. also end the slashing span. spans.end_span(now); - >::chill_stash(stash); + >::chill_stash(stash); // make sure to disable validator till the end of this session if T::SessionInterface::disable_validator(stash).unwrap_or(false) { // force a new era, to select a new validator set - >::ensure_new_era() + >::ensure_new_era() } } } @@ -309,9 +301,7 @@ pub(crate) fn compute_slash(params: SlashParams) // doesn't apply any slash, but kicks out the validator if the misbehavior is from // the most recent slashing span. -fn kick_out_if_recent( - params: SlashParams, -) { +fn kick_out_if_recent(params: SlashParams) { // these are not updated by era-span or end-span. let mut reward_payout = Zero::zero(); let mut val_slashed = Zero::zero(); @@ -325,12 +315,12 @@ fn kick_out_if_recent( if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) { spans.end_span(params.now); - >::chill_stash(params.stash); + >::chill_stash(params.stash); // make sure to disable validator till the end of this session if T::SessionInterface::disable_validator(params.stash).unwrap_or(false) { // force a new era, to select a new validator set - >::ensure_new_era() + >::ensure_new_era() } } } @@ -338,20 +328,13 @@ fn kick_out_if_recent( /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. /// /// Returns the amount of reward to pay out. -fn slash_nominators( +fn slash_nominators( params: SlashParams, prior_slash_p: Perbill, nominators_slashed: &mut Vec<(T::AccountId, BalanceOf)>, ) -> BalanceOf { - let SlashParams { - stash: _, - slash, - exposure, - slash_era, - window_start, - now, - reward_proportion, - } = params; + let SlashParams { stash: _, slash, exposure, slash_era, window_start, now, reward_proportion } = + params; let mut reward_payout = Zero::zero(); @@ -367,18 +350,12 @@ fn slash_nominators( let own_slash_by_validator = slash * nominator.value; let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior); - let mut era_slash = as Store>::NominatorSlashInEra::get( - &slash_era, - stash, - ).unwrap_or_else(|| Zero::zero()); + let mut era_slash = as Store>::NominatorSlashInEra::get(&slash_era, stash) + .unwrap_or_else(|| Zero::zero()); era_slash += own_slash_difference; - as Store>::NominatorSlashInEra::insert( - &slash_era, - stash, - &era_slash, - ); + as Store>::NominatorSlashInEra::insert(&slash_era, stash, &era_slash); era_slash }; @@ -393,10 +370,7 @@ fn slash_nominators( reward_proportion, ); - let target_span = spans.compare_and_update_span_slash( - slash_era, - era_slash, - ); + let target_span = spans.compare_and_update_span_slash(slash_era, era_slash); if target_span == Some(spans.span_index()) { // End the span, but don't chill the nominator. its nomination @@ -418,7 +392,7 @@ fn slash_nominators( // dropping this struct applies any necessary slashes, which can lead to free balance // being 0, and the account being garbage-collected -- a dead account should get no new // metadata. -struct InspectingSpans<'a, T: Trait + 'a> { +struct InspectingSpans<'a, T: Config + 'a> { dirty: bool, window_start: EraIndex, stash: &'a T::AccountId, @@ -430,16 +404,16 @@ struct InspectingSpans<'a, T: Trait + 'a> { } // fetches the slashing spans record for a stash account, initializing it if necessary. -fn fetch_spans<'a, T: Trait + 'a>( +fn fetch_spans<'a, T: Config + 'a>( stash: &'a T::AccountId, window_start: EraIndex, paid_out: &'a mut BalanceOf, slash_of: &'a mut BalanceOf, reward_proportion: Perbill, ) -> InspectingSpans<'a, T> { - let spans = as Store>::SlashingSpans::get(stash).unwrap_or_else(|| { + let spans = as Store>::SlashingSpans::get(stash).unwrap_or_else(|| { let spans = SlashingSpans::new(window_start); - as Store>::SlashingSpans::insert(stash, &spans); + as Store>::SlashingSpans::insert(stash, &spans); spans }); @@ -455,7 +429,7 @@ fn fetch_spans<'a, T: Trait + 'a>( } } -impl<'a, T: 'a + Trait> InspectingSpans<'a, T> { +impl<'a, T: 'a + Config> InspectingSpans<'a, T> { fn span_index(&self) -> SpanIndex { self.spans.span_index } @@ -488,7 +462,7 @@ impl<'a, T: 'a + Trait> InspectingSpans<'a, T> { ) -> Option { let target_span = self.era_span(slash_era)?; let span_slash_key = (self.stash.clone(), target_span.index); - let mut span_record = as Store>::SpanSlash::get(&span_slash_key); + let mut span_record = as Store>::SpanSlash::get(&span_slash_key); let mut changed = false; let reward = if span_record.slashed < slash { @@ -497,8 +471,8 @@ impl<'a, T: 'a + Trait> InspectingSpans<'a, T> { span_record.slashed = slash; // compute reward. - let reward = REWARD_F1 - * (self.reward_proportion * slash).saturating_sub(span_record.paid_out); + let reward = + REWARD_F1 * (self.reward_proportion * slash).saturating_sub(span_record.paid_out); self.add_slash(difference, slash_era); changed = true; @@ -519,47 +493,52 @@ impl<'a, T: 'a + Trait> InspectingSpans<'a, T> { if changed { self.dirty = true; - as Store>::SpanSlash::insert(&span_slash_key, &span_record); + as Store>::SpanSlash::insert(&span_slash_key, &span_record); } Some(target_span.index) } } -impl<'a, T: 'a + Trait> Drop for InspectingSpans<'a, T> { +impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { fn drop(&mut self) { // only update on disk if we slashed this account. - if !self.dirty { return } + if !self.dirty { + return + } if let Some((start, end)) = self.spans.prune(self.window_start) { for span_index in start..end { - as Store>::SpanSlash::remove(&(self.stash.clone(), span_index)); + as Store>::SpanSlash::remove(&(self.stash.clone(), span_index)); } } - as Store>::SlashingSpans::insert(self.stash, &self.spans); + as Store>::SlashingSpans::insert(self.stash, &self.spans); } } /// Clear slashing metadata for an obsolete era. -pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { - as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era); - as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era); +pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { + as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era, None); + as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era, None); } /// Clear slashing metadata for a dead account. -pub(crate) fn clear_stash_metadata( +pub(crate) fn clear_stash_metadata( stash: &T::AccountId, num_slashing_spans: u32, ) -> DispatchResult { - let spans = match as Store>::SlashingSpans::get(stash) { + let spans = match as Store>::SlashingSpans::get(stash) { None => return Ok(()), Some(s) => s, }; - ensure!(num_slashing_spans as usize >= spans.iter().count(), Error::::IncorrectSlashingSpans); + ensure!( + num_slashing_spans as usize >= spans.iter().count(), + Error::::IncorrectSlashingSpans + ); - as Store>::SlashingSpans::remove(stash); + as Store>::SlashingSpans::remove(stash); // kill slashing-span metadata for account. // @@ -567,7 +546,7 @@ pub(crate) fn clear_stash_metadata( // in that case, they may re-bond, but it would count again as span 0. Further ancient // slashes would slash into this new bond, since metadata has now been cleared. for span in spans.iter() { - as Store>::SpanSlash::remove(&(stash.clone(), span.index)); + as Store>::SpanSlash::remove(&(stash.clone(), span.index)); } Ok(()) @@ -576,18 +555,18 @@ pub(crate) fn clear_stash_metadata( // apply the slash to a stash account, deducting any missing funds from the reward // payout, saturating at 0. this is mildly unfair but also an edge-case that // can only occur when overlapping locked funds have been slashed. -pub fn do_slash( +pub fn do_slash( stash: &T::AccountId, value: BalanceOf, reward_payout: &mut BalanceOf, slashed_imbalance: &mut NegativeImbalanceOf, ) { - let controller = match >::bonded(stash) { + let controller = match >::bonded(stash) { None => return, // defensive: should always exist. Some(c) => c, }; - let mut ledger = match >::ledger(&controller) { + let mut ledger = match >::ledger(&controller) { Some(ledger) => ledger, None => return, // nothing to do. }; @@ -603,17 +582,15 @@ pub fn do_slash( *reward_payout = reward_payout.saturating_sub(missing); } - >::update_ledger(&controller, &ledger); + >::update_ledger(&controller, &ledger); // trigger the event - >::deposit_event( - super::RawEvent::Slash(stash.clone(), value) - ); + >::deposit_event(super::Event::::Slashed(stash.clone(), value)); } } /// Apply a previously-unapplied slash. -pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash>) { +pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash>) { let mut slashed_imbalance = NegativeImbalanceOf::::zero(); let mut reward_payout = unapplied_slash.payout; @@ -625,20 +602,14 @@ pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash( - &nominator, - nominator_slash, - &mut reward_payout, - &mut slashed_imbalance, - ); + do_slash::(&nominator, nominator_slash, &mut reward_payout, &mut slashed_imbalance); } pay_reporters::(reward_payout, slashed_imbalance, &unapplied_slash.reporters); } - /// Apply a reward payout to some reporters, paying the rewards out of the slashed imbalance. -fn pay_reporters( +fn pay_reporters( reward_payout: BalanceOf, slashed_imbalance: NegativeImbalanceOf, reporters: &[T::AccountId], @@ -774,17 +745,13 @@ mod tests { assert_eq!(spans.prune(1000), Some((8, 10))); assert_eq!( spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 1000, length: None }, - ], + vec![SlashingSpan { index: 10, start: 1000, length: None },], ); assert_eq!(spans.prune(2000), None); assert_eq!( spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 2000, length: None }, - ], + vec![SlashingSpan { index: 10, start: 2000, length: None },], ); // now all in one shot. @@ -797,9 +764,7 @@ mod tests { assert_eq!(spans.prune(2000), Some((6, 10))); assert_eq!( spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 2000, length: None }, - ], + vec![SlashingSpan { index: 10, start: 2000, length: None },], ); } diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 57ad95bcf586f..795c066d09bb3 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,18 +18,31 @@ //! Testing utils for staking. Provides some common functions to setup staking state, such as //! bonding validators, nominators, and generating different types of solutions. -use crate::*; -use crate::Module as Staking; +use crate::{Pallet as Staking, *}; use frame_benchmarking::account; use frame_system::RawOrigin; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaChaRng, +}; use sp_io::hashing::blake2_256; -use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; -use sp_npos_elections::*; + +use frame_support::{pallet_prelude::*, traits::Currency}; +use sp_runtime::{traits::StaticLookup, Perbill}; +use sp_std::prelude::*; const SEED: u32 = 0; +/// This function removes all validators and nominators from storage. +pub fn clear_validators_and_nominators() { + Validators::::remove_all(None); + CounterForValidators::::kill(); + Nominators::::remove_all(None); + CounterForNominators::::kill(); +} + /// Grab a funded user. -pub fn create_funded_user( +pub fn create_funded_user( string: &'static str, n: u32, balance_factor: u32, @@ -43,50 +56,58 @@ pub fn create_funded_user( } /// Create a stash and controller pair. -pub fn create_stash_controller( +pub fn create_stash_controller( n: u32, balance_factor: u32, destination: RewardDestination, -) - -> Result<(T::AccountId, T::AccountId), &'static str> -{ +) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user::("stash", n, balance_factor); let controller = create_funded_user::("controller", n, balance_factor); - let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); - Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, destination)?; + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + controller_lookup, + amount, + destination, + )?; return Ok((stash, controller)) } /// Create a stash and controller pair, where the controller is dead, and payouts go to controller. /// This is used to test worst case payout scenarios. -pub fn create_stash_and_dead_controller( +pub fn create_stash_and_dead_controller( n: u32, balance_factor: u32, destination: RewardDestination, -) - -> Result<(T::AccountId, T::AccountId), &'static str> -{ +) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user::("stash", n, balance_factor); // controller has no funds let controller = create_funded_user::("controller", n, 0); - let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); - Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, destination)?; + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + controller_lookup, + amount, + destination, + )?; return Ok((stash, controller)) } /// create `max` validators. -pub fn create_validators( +pub fn create_validators( max: u32, balance_factor: u32, ) -> Result::Source>, &'static str> { let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); - for i in 0 .. max { - let (stash, controller) = create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - }; + for i in 0..max { + let (stash, controller) = + create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; let stash_lookup: ::Source = T::Lookup::unlookup(stash); validators.push(stash_lookup); @@ -97,288 +118,75 @@ pub fn create_validators( /// This function generates validators and nominators who are randomly nominating /// `edge_per_nominator` random validators (until `to_nominate` if provided). /// +/// NOTE: This function will remove any existing validators or nominators to ensure +/// we are working with a clean state. +/// /// Parameters: /// - `validators`: number of bonded validators /// - `nominators`: number of bonded nominators. /// - `edge_per_nominator`: number of edge (vote) per nominator. /// - `randomize_stake`: whether to randomize the stakes. -/// - `to_nominate`: if `Some(n)`, only the first `n` bonded validator are voted upon. -/// Else, all of them are considered and `edge_per_nominator` random validators are voted for. +/// - `to_nominate`: if `Some(n)`, only the first `n` bonded validator are voted upon. Else, all of +/// them are considered and `edge_per_nominator` random validators are voted for. /// -/// Return the validators choosen to be nominated. -pub fn create_validators_with_nominators_for_era( +/// Return the validators chosen to be nominated. +pub fn create_validators_with_nominators_for_era( validators: u32, nominators: u32, edge_per_nominator: usize, randomize_stake: bool, to_nominate: Option, ) -> Result::Source>, &'static str> { - let mut validators_stash: Vec<::Source> - = Vec::with_capacity(validators as usize); + clear_validators_and_nominators::(); + + let mut validators_stash: Vec<::Source> = + Vec::with_capacity(validators as usize); let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); // Create validators - for i in 0 .. validators { + for i in 0..validators { let balance_factor = if randomize_stake { rng.next_u32() % 255 + 10 } else { 100u32 }; - let (v_stash, v_controller) = create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - }; + let (v_stash, v_controller) = + create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; - let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); + let stash_lookup: ::Source = + T::Lookup::unlookup(v_stash.clone()); validators_stash.push(stash_lookup.clone()); } let to_nominate = to_nominate.unwrap_or(validators_stash.len() as u32) as usize; - let validator_choosen = validators_stash[0..to_nominate].to_vec(); + let validator_chosen = validators_stash[0..to_nominate].to_vec(); // Create nominators - for j in 0 .. nominators { + for j in 0..nominators { let balance_factor = if randomize_stake { rng.next_u32() % 255 + 10 } else { 100u32 }; - let (_n_stash, n_controller) = create_stash_controller::( - u32::max_value() - j, - balance_factor, - RewardDestination::Staked, - )?; + let (_n_stash, n_controller) = + create_stash_controller::(u32::MAX - j, balance_factor, RewardDestination::Staked)?; // Have them randomly validate - let mut available_validators = validator_choosen.clone(); + let mut available_validators = validator_chosen.clone(); let mut selected_validators: Vec<::Source> = Vec::with_capacity(edge_per_nominator); - for _ in 0 .. validators.min(edge_per_nominator as u32) { + for _ in 0..validators.min(edge_per_nominator as u32) { let selected = rng.next_u32() as usize % available_validators.len(); let validator = available_validators.remove(selected); selected_validators.push(validator); } - Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), selected_validators)?; + Staking::::nominate( + RawOrigin::Signed(n_controller.clone()).into(), + selected_validators, + )?; } - ValidatorCount::put(validators); + ValidatorCount::::put(validators); - Ok(validator_choosen) + Ok(validator_chosen) } - -/// Build a _really bad_ but acceptable solution for election. This should always yield a solution -/// which has a less score than the seq-phragmen. -pub fn get_weak_solution( - do_reduce: bool, -) -> (Vec, CompactAssignments, ElectionScore, ElectionSize) { - let mut backing_stake_of: BTreeMap> = BTreeMap::new(); - - // self stake - >::iter().for_each(|(who, _p)| { - *backing_stake_of.entry(who.clone()).or_insert_with(|| Zero::zero()) += - >::slashable_balance_of(&who) - }); - - // elect winners. We chose the.. least backed ones. - let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); - sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); - let winners: Vec = sorted - .iter() - .rev() - .cloned() - .take(>::validator_count() as usize) - .collect(); - - let mut staked_assignments: Vec> = Vec::new(); - // you could at this point start adding some of the nominator's stake, but for now we don't. - // This solution must be bad. - - // add self support to winners. - winners.iter().for_each(|w| { - staked_assignments.push(StakedAssignment { - who: w.clone(), - distribution: vec![( - w.clone(), - >::slashable_balance_of_vote_weight( - &w, - T::Currency::total_issuance(), - ).into(), - )], - }) - }); - - if do_reduce { - reduce(&mut staked_assignments); - } - - // helpers for building the compact - let snapshot_validators = >::snapshot_validators().unwrap(); - let snapshot_nominators = >::snapshot_nominators().unwrap(); - - let nominator_index = |a: &T::AccountId| -> Option { - snapshot_nominators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - let validator_index = |a: &T::AccountId| -> Option { - snapshot_validators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - - // convert back to ratio assignment. This takes less space. - let low_accuracy_assignment = assignment_staked_to_ratio_normalized(staked_assignments) - .expect("Failed to normalize"); - - // re-calculate score based on what the chain will decode. - let score = { - let staked = assignment_ratio_to_staked::<_, OffchainAccuracy, _>( - low_accuracy_assignment.clone(), - >::slashable_balance_of_fn(), - ); - - let support_map = build_support_map::( - winners.as_slice(), - staked.as_slice(), - ).unwrap(); - evaluate_support::(&support_map) - }; - - // compact encode the assignment. - let compact = CompactAssignments::from_assignment( - low_accuracy_assignment, - nominator_index, - validator_index, - ) - .unwrap(); - - // winners to index. - let winners = winners - .into_iter() - .map(|w| { - snapshot_validators - .iter() - .position(|v| *v == w) - .unwrap() - .try_into() - .unwrap() - }) - .collect::>(); - - let size = ElectionSize { - validators: snapshot_validators.len() as ValidatorIndex, - nominators: snapshot_nominators.len() as NominatorIndex, - }; - - (winners, compact, score, size) -} - -/// Create a solution for seq-phragmen. This uses the same internal function as used by the offchain -/// worker code. -pub fn get_seq_phragmen_solution( - do_reduce: bool, -) -> ( - Vec, - CompactAssignments, - ElectionScore, - ElectionSize, -) { - let iters = offchain_election::get_balancing_iters::(); - - let sp_npos_elections::ElectionResult { - winners, - assignments, - } = >::do_phragmen::(iters).unwrap(); - - offchain_election::prepare_submission::( - assignments, - winners, - do_reduce, - T::MaximumBlockWeight::get(), - ) - .unwrap() -} - -/// Returns a solution in which only one winner is elected with just a self vote. -pub fn get_single_winner_solution( - winner: T::AccountId, -) -> Result< - ( - Vec, - CompactAssignments, - ElectionScore, - ElectionSize, - ), - &'static str, -> { - let snapshot_validators = >::snapshot_validators().unwrap(); - let snapshot_nominators = >::snapshot_nominators().unwrap(); - - let val_index = snapshot_validators - .iter() - .position(|x| *x == winner) - .ok_or("not a validator")?; - let nom_index = snapshot_nominators - .iter() - .position(|x| *x == winner) - .ok_or("not a nominator")?; - - let stake = >::slashable_balance_of(&winner); - let stake = - ::to_vote(stake, T::Currency::total_issuance()) as ExtendedBalance; - - let val_index = val_index as ValidatorIndex; - let nom_index = nom_index as NominatorIndex; - - let winners = vec![val_index]; - let compact = CompactAssignments { - votes1: vec![(nom_index, val_index)], - ..Default::default() - }; - let score = [stake, stake, stake * stake]; - let size = ElectionSize { - validators: snapshot_validators.len() as ValidatorIndex, - nominators: snapshot_nominators.len() as NominatorIndex, - }; - - Ok((winners, compact, score, size)) -} - -/// get the active era. -pub fn current_era() -> EraIndex { - >::current_era().unwrap_or(0) -} - -/// initialize the first era. -pub fn init_active_era() { - ActiveEra::put(ActiveEraInfo { - index: 1, - start: None, - }) -} - -/// Create random assignments for the given list of winners. Each assignment will have -/// MAX_NOMINATIONS edges. -pub fn create_assignments_for_offchain( - num_assignments: u32, - winners: Vec<::Source>, -) -> Result< - ( - Vec<(T::AccountId, ExtendedBalance)>, - Vec>, - ), - &'static str -> { - let ratio = OffchainAccuracy::from_rational_approximation(1, MAX_NOMINATIONS); - let assignments: Vec> = >::iter() - .take(num_assignments as usize) - .map(|(n, t)| Assignment { - who: n, - distribution: t.targets.iter().map(|v| (v.clone(), ratio)).collect(), - }) - .collect(); - - ensure!(assignments.len() == num_assignments as usize, "must bench for `a` assignments"); - - let winners = winners.into_iter().map(|v| { - (::lookup(v).unwrap(), 0) - }).collect(); - - Ok((winners, assignments)) +/// get the current era. +pub fn current_era() -> EraIndex { + >::current_era().unwrap_or(0) } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 2a02d87aa2c57..97dfaa39c84a9 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,17 +17,27 @@ //! Tests for the module. -use super::*; +use super::{Event, *}; +use frame_election_provider_support::{ElectionProvider, Support}; +use frame_support::{ + assert_noop, assert_ok, + dispatch::WithPostDispatchInfo, + pallet_prelude::*, + traits::{Currency, Get, OnInitialize, ReservableCurrency}, + weights::{extract_actual_weight, GetDispatchInfo}, +}; use mock::*; +use pallet_balances::Error as BalancesError; use sp_runtime::{ - assert_eq_error_rate, traits::BadOrigin, + assert_eq_error_rate, + traits::{BadOrigin, Dispatchable}, + Perbill, Percent, }; -use sp_staking::offence::OffenceDetails; -use frame_support::{ - assert_ok, assert_noop, StorageMap, - traits::{Currency, ReservableCurrency, OnInitialize, OnFinalize}, +use sp_staking::{ + offence::{OffenceDetails, OnOffenceHandler}, + SessionIndex, }; -use pallet_balances::Error as BalancesError; +use sp_std::prelude::*; use substrate_test_utils::assert_eq_uvec; #[test] @@ -45,7 +55,10 @@ fn force_unstake_works() { // Force unstake requires root. assert_noop!(Staking::force_unstake(Origin::signed(11), 11, 2), BadOrigin); // Force unstake needs correct number of slashing spans (for weight calculation) - assert_noop!(Staking::force_unstake(Origin::signed(11), 11, 0), BadOrigin); + assert_noop!( + Staking::force_unstake(Origin::root(), 11, 0), + Error::::IncorrectSlashingSpans + ); // We now force them to unstake assert_ok!(Staking::force_unstake(Origin::root(), 11, 2)); // No longer bonded. @@ -87,55 +100,75 @@ fn basic_setup_works() { // Account 10 controls the stash from account 11, which is 100 * balance_factor units assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![] }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + }) ); // Account 20 controls the stash from account 21, which is 200 * balance_factor units assert_eq!( Staking::ledger(&20), - Some(StakingLedger { stash: 21, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![] }) + Some(StakingLedger { + stash: 21, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + }) ); // Account 1 does not control any stash assert_eq!(Staking::ledger(&1), None); // ValidatorPrefs are default - assert_eq_uvec!(>::iter().collect::>(), vec![ - (31, ValidatorPrefs::default()), - (21, ValidatorPrefs::default()), - (11, ValidatorPrefs::default()) - ]); + assert_eq_uvec!( + >::iter().collect::>(), + vec![ + (31, ValidatorPrefs::default()), + (21, ValidatorPrefs::default()), + (11, ValidatorPrefs::default()) + ] + ); assert_eq!( Staking::ledger(100), - Some(StakingLedger { stash: 101, total: 500, active: 500, unlocking: vec![], claimed_rewards: vec![] }) + Some(StakingLedger { + stash: 101, + total: 500, + active: 500, + unlocking: vec![], + claimed_rewards: vec![] + }) ); assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Staking::eras_stakers(active_era(), 11), Exposure { total: 1125, own: 1000, - others: vec![ IndividualExposure { who: 101, value: 125 }] + others: vec![IndividualExposure { who: 101, value: 125 }] }, ); assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + Staking::eras_stakers(active_era(), 21), Exposure { total: 1375, own: 1000, - others: vec![ IndividualExposure { who: 101, value: 375 }] + others: vec![IndividualExposure { who: 101, value: 375 }] }, ); // initial total stake = 1125 + 1375 - assert_eq!(Staking::eras_total_stake(Staking::active_era().unwrap().index), 2500); - + assert_eq!(Staking::eras_total_stake(active_era()), 2500); // The number of validators required. assert_eq!(Staking::validator_count(), 2); // Initial Era and session - assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(active_era(), 0); // Account 10 has `balance_factor` free balance assert_eq!(Balances::free_balance(10), 1); @@ -158,7 +191,7 @@ fn change_controller_works() { // change controller assert_ok!(Staking::set_controller(Origin::signed(11), 5)); assert_eq!(Staking::bonded(&11), Some(5)); - mock::start_era(1); + mock::start_active_era(1); // 10 is no longer in control. assert_noop!( @@ -171,12 +204,7 @@ fn change_controller_works() { #[test] fn rewards_should_work() { - // should check that: - // * rewards get recorded per session - // * rewards get paid per Era - // * `RewardRemainder::on_unbalanced` is called - // * Check that nominators are also rewarded - ExtBuilder::default().nominate(true).build_and_execute(|| { + ExtBuilder::default().nominate(true).session_per_era(3).build_and_execute(|| { let init_balance_10 = Balances::total_balance(&10); let init_balance_11 = Balances::total_balance(&11); let init_balance_20 = Balances::total_balance(&20); @@ -184,19 +212,19 @@ fn rewards_should_work() { let init_balance_100 = Balances::total_balance(&100); let init_balance_101 = Balances::total_balance(&101); - // Check state + // Set payees Payee::::insert(11, RewardDestination::Controller); Payee::::insert(21, RewardDestination::Controller); Payee::::insert(101, RewardDestination::Controller); - >::reward_by_ids(vec![(11, 50)]); - >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); // This is the second validator of the current elected set. - >::reward_by_ids(vec![(21, 50)]); + >::reward_by_ids(vec![(21, 50)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something + // Compute total payout now for whole duration of the session. + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); + let maximum_payout = maximum_payout_for_duration(reward_time_per_era()); start_session(1); @@ -207,57 +235,87 @@ fn rewards_should_work() { assert_eq!(Balances::total_balance(&100), init_balance_100); assert_eq!(Balances::total_balance(&101), init_balance_101); assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { - total: 50*3, - individual: vec![(11, 100), (21, 50)].into_iter().collect(), - }); - let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); - let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); - let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); - let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); + assert_eq!( + Staking::eras_reward_points(active_era()), + EraRewardPoints { + total: 50 * 3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + } + ); + let part_for_10 = Perbill::from_rational::(1000, 1125); + let part_for_20 = Perbill::from_rational::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational::(375, 1375); start_session(2); start_session(3); - assert_eq!(Staking::active_era().unwrap().index, 1); - assert_eq!(mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), 7050); - assert_eq!(*mock::staking_events().last().unwrap(), RawEvent::EraPayout(0, 2350, 7050)); + assert_eq!(active_era(), 1); + assert_eq!( + mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), + maximum_payout - total_payout_0, + ); + assert_eq!( + *mock::staking_events().last().unwrap(), + Event::EraPaid(0, total_payout_0, maximum_payout - total_payout_0) + ); mock::make_all_reward_payment(0); - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * total_payout_0 * 2 / 3, + 2, + ); assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2, + ); assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); assert_eq_error_rate!( Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * total_payout_0 * 2/3 - + part_for_100_from_20 * total_payout_0 * 1/3, + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2 / 3 + + part_for_100_from_20 * total_payout_0 * 1 / 3, 2 ); assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); assert_eq_uvec!(Session::validators(), vec![11, 21]); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_1 > 10); // Test is meaningful if reward something + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); - mock::start_era(2); - assert_eq!(mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), 7050*2); - assert_eq!(*mock::staking_events().last().unwrap(), RawEvent::EraPayout(1, 2350, 7050)); + mock::start_active_era(2); + assert_eq!( + mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), + maximum_payout * 2 - total_payout_0 - total_payout_1, + ); + assert_eq!( + *mock::staking_events().last().unwrap(), + Event::EraPaid(1, total_payout_1, maximum_payout - total_payout_1) + ); mock::make_all_reward_payment(1); - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * (total_payout_0 * 2 / 3 + total_payout_1), + 2, + ); assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2, + ); assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); assert_eq_error_rate!( Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) - + part_for_100_from_20 * total_payout_0 * 1/3, + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1 / 3, 2 ); assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); @@ -266,83 +324,98 @@ fn rewards_should_work() { #[test] fn staking_should_work() { - // should test: - // * new validators can be added to the default set - // * new ones will be chosen per era - // * either one can unlock the stash and back-down from being a validator via `chill`ing. - ExtBuilder::default() - .nominate(false) - .fair(false) // to give 20 more staked value - .build() - .execute_with(|| { - // --- Block 1: - start_session(1); - - // remember + compare this along with the test. - assert_eq_uvec!(validator_controllers(), vec![20, 10]); + ExtBuilder::default().nominate(false).build_and_execute(|| { + // remember + compare this along with the test. + assert_eq_uvec!(validator_controllers(), vec![20, 10]); - // put some money in account that we'll use. - for i in 1..5 { let _ = Balances::make_free_balance_be(&i, 2000); } + // put some money in account that we'll use. + for i in 1..5 { + let _ = Balances::make_free_balance_be(&i, 2000); + } - // --- Block 2: - start_session(2); - // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); + // --- Block 2: + start_session(2); + // add a new candidate for being a validator. account 3 controlled by 4. + assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); + assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); - // No effects will be seen so far. - assert_eq_uvec!(validator_controllers(), vec![20, 10]); + // No effects will be seen so far. + assert_eq_uvec!(validator_controllers(), vec![20, 10]); - // --- Block 3: - start_session(3); + // --- Block 3: + start_session(3); - // No effects will be seen so far. Era has not been yet triggered. - assert_eq_uvec!(validator_controllers(), vec![20, 10]); + // No effects will be seen so far. Era has not been yet triggered. + assert_eq_uvec!(validator_controllers(), vec![20, 10]); + // --- Block 4: the validators will now be queued. + start_session(4); + assert_eq!(active_era(), 1); - // --- Block 4: the validators will now be queued. - start_session(4); - assert_eq!(Staking::active_era().unwrap().index, 1); + // --- Block 5: the validators are still in queue. + start_session(5); - // --- Block 5: the validators are still in queue. - start_session(5); + // --- Block 6: the validators will now be changed. + start_session(6); - // --- Block 6: the validators will now be changed. - start_session(6); + assert_eq_uvec!(validator_controllers(), vec![20, 4]); + // --- Block 6: Unstake 4 as a validator, freeing up the balance stashed in 3 + // 4 will chill + Staking::chill(Origin::signed(4)).unwrap(); - assert_eq_uvec!(validator_controllers(), vec![20, 4]); - // --- Block 6: Unstake 4 as a validator, freeing up the balance stashed in 3 - // 4 will chill - Staking::chill(Origin::signed(4)).unwrap(); + // --- Block 7: nothing. 4 is still there. + start_session(7); + assert_eq_uvec!(validator_controllers(), vec![20, 4]); - // --- Block 7: nothing. 4 is still there. - start_session(7); - assert_eq_uvec!(validator_controllers(), vec![20, 4]); + // --- Block 8: + start_session(8); - // --- Block 8: - start_session(8); + // --- Block 9: 4 will not be a validator. + start_session(9); + assert_eq_uvec!(validator_controllers(), vec![20, 10]); - // --- Block 9: 4 will not be a validator. - start_session(9); - assert_eq_uvec!(validator_controllers(), vec![20, 10]); + // Note: the stashed value of 4 is still lock + assert_eq!( + Staking::ledger(&4), + Some(StakingLedger { + stash: 3, + total: 1500, + active: 1500, + unlocking: vec![], + claimed_rewards: vec![0], + }) + ); + // e.g. it cannot reserve more than 500 that it has free from the total 2000 + assert_noop!(Balances::reserve(&3, 501), BalancesError::::LiquidityRestrictions); + assert_ok!(Balances::reserve(&3, 409)); + }); +} - // Note: the stashed value of 4 is still lock - assert_eq!( - Staking::ledger(&4), - Some(StakingLedger { - stash: 3, - total: 1500, - active: 1500, - unlocking: vec![], - claimed_rewards: vec![0], - }) - ); - // e.g. it cannot reserve more than 500 that it has free from the total 2000 +#[test] +fn blocking_and_kicking_works() { + ExtBuilder::default() + .minimum_validator_count(1) + .validator_count(4) + .nominate(true) + .build_and_execute(|| { + // block validator 10/11 + assert_ok!(Staking::validate( + Origin::signed(10), + ValidatorPrefs { blocked: true, ..Default::default() } + )); + // attempt to nominate from 100/101... + assert_ok!(Staking::nominate(Origin::signed(100), vec![11])); + // should have worked since we're already nominated them + assert_eq!(Nominators::::get(&101).unwrap().targets, vec![11]); + // kick the nominator + assert_ok!(Staking::kick(Origin::signed(10), vec![101])); + // should have been kicked now + assert!(Nominators::::get(&101).unwrap().targets.is_empty()); + // attempt to nominate from 100/101... assert_noop!( - Balances::reserve(&3, 501), - BalancesError::::LiquidityRestrictions + Staking::nominate(Origin::signed(100), vec![11]), + Error::::BadTarget ); - assert_ok!(Balances::reserve(&3, 409)); }); } @@ -352,24 +425,20 @@ fn less_than_needed_candidates_works() { .minimum_validator_count(1) .validator_count(4) .nominate(false) - .num_validators(3) - .build() - .execute_with(|| { + .build_and_execute(|| { assert_eq!(Staking::validator_count(), 4); assert_eq!(Staking::minimum_validator_count(), 1); assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); - mock::start_era(1); + mock::start_active_era(1); // Previous set is selected. NO election algorithm is even executed. assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); // But the exposure is updated in a simple way. No external votes exists. // This is purely self-vote. - assert!( - ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index) - .all(|exposure| exposure.others.is_empty()) - ); + assert!(ErasStakers::::iter_prefix_values(active_era()) + .all(|exposure| exposure.others.is_empty())); }); } @@ -378,29 +447,39 @@ fn no_candidate_emergency_condition() { ExtBuilder::default() .minimum_validator_count(1) .validator_count(15) - .num_validators(4) - .validator_pool(true) + .set_status(41, StakerStatus::Validator) .nominate(false) - .build() - .execute_with(|| { + .build_and_execute(|| { // initial validators assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); - let prefs = ValidatorPrefs { commission: Perbill::one() }; + let prefs = ValidatorPrefs { commission: Perbill::one(), ..Default::default() }; ::Validators::insert(11, prefs.clone()); // set the minimum validator count. ::MinimumValidatorCount::put(10); // try to chill - let _ = Staking::chill(Origin::signed(10)); + let res = Staking::chill(Origin::signed(10)); + assert_ok!(res); + + let current_era = CurrentEra::::get(); - // trigger era - mock::start_era(1); + // try trigger new era + mock::run_to_block(20); + assert_eq!(*staking_events().last().unwrap(), Event::StakingElectionFailed); + // No new era is created + assert_eq!(current_era, CurrentEra::::get()); - // Previous ones are elected. chill is invalidates. TODO: #2494 + // Go to far further session to see if validator have changed + mock::run_to_block(100); + + // Previous ones are elected. chill is not effective in active era (as era hasn't + // changed) assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); - // Though the validator preferences has been removed. - assert!(Staking::validators(11) != prefs); + // The chill is still pending. + assert!(!::Validators::contains_key(11)); + // No new era is created. + assert_eq!(current_era, CurrentEra::::get()); }); } @@ -408,13 +487,18 @@ fn no_candidate_emergency_condition() { fn nominating_and_rewards_should_work() { ExtBuilder::default() .nominate(false) - .validator_pool(true) - .build() - .execute_with(|| { - // initial validators -- everyone is actually even. - assert_eq_uvec!(validator_controllers(), vec![40, 30]); - - // Set payee to controller + .set_status(41, StakerStatus::Validator) + .set_status(11, StakerStatus::Idle) + .set_status(31, StakerStatus::Idle) + .build_and_execute(|| { + // initial validators. + assert_eq_uvec!(validator_controllers(), vec![40, 20]); + + // re-validate with 11 and 31. + assert_ok!(Staking::validate(Origin::signed(10), Default::default())); + assert_ok!(Staking::validate(Origin::signed(30), Default::default())); + + // Set payee to controller. assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); assert_ok!(Staking::set_payee(Origin::signed(20), RewardDestination::Controller)); assert_ok!(Staking::set_payee(Origin::signed(30), RewardDestination::Controller)); @@ -427,35 +511,33 @@ fn nominating_and_rewards_should_work() { } // bond two account pairs and state interest in nomination. - // 2 will nominate for 10, 20, 30 assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21, 31])); - // 4 will nominate for 10, 20, 40 + assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); // the total reward for era 0 - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(41, 1)]); - >::reward_by_ids(vec![(31, 1)]); + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); + >::reward_by_ids(vec![(41, 1)]); + >::reward_by_ids(vec![(21, 1)]); - mock::start_era(1); + mock::start_active_era(1); // 10 and 20 have more votes, they will be chosen. assert_eq_uvec!(validator_controllers(), vec![20, 10]); - // OLD validators must have already received some rewards. + // old validators must have already received some rewards. + let initial_balance_40 = Balances::total_balance(&40); + let mut initial_balance_20 = Balances::total_balance(&20); mock::make_all_reward_payment(0); - assert_eq!(Balances::total_balance(&40), 1 + total_payout_0 / 2); - assert_eq!(Balances::total_balance(&30), 1 + total_payout_0 / 2); - - // ------ check the staked value of all parties. + assert_eq!(Balances::total_balance(&40), initial_balance_40 + total_payout_0 / 2); + assert_eq!(Balances::total_balance(&20), initial_balance_20 + total_payout_0 / 2); + initial_balance_20 = Balances::total_balance(&20); - // 30 and 40 are not chosen anymore - assert_eq!(ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index).count(), 2); + assert_eq!(ErasStakers::::iter_prefix_values(active_era()).count(), 2); assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Staking::eras_stakers(active_era(), 11), Exposure { total: 1000 + 800, own: 1000, @@ -466,7 +548,7 @@ fn nominating_and_rewards_should_work() { }, ); assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + Staking::eras_stakers(active_era(), 21), Exposure { total: 1000 + 1200, own: 1000, @@ -478,43 +560,45 @@ fn nominating_and_rewards_should_work() { ); // the total reward for era 1 - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(21, 2)]); - >::reward_by_ids(vec![(11, 1)]); + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); + >::reward_by_ids(vec![(21, 2)]); + >::reward_by_ids(vec![(11, 1)]); - mock::start_era(2); + mock::start_active_era(2); - // nothing else will happen, era ends and rewards are paid again, - // it is expected that nominators will also be paid. See below + // nothing else will happen, era ends and rewards are paid again, it is expected that + // nominators will also be paid. See below mock::make_all_reward_payment(1); let payout_for_10 = total_payout_1 / 3; let payout_for_20 = 2 * total_payout_1 / 3; - // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 + // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> + // 2/9 + 3/11 assert_eq_error_rate!( Balances::total_balance(&2), initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), - 1, + 2, ); - // Nominator 4: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 + // Nominator 4: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> + // 2/9 + 3/11 assert_eq_error_rate!( Balances::total_balance(&4), initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), - 1, + 2, ); // Validator 10: got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 assert_eq_error_rate!( Balances::total_balance(&10), initial_balance + 5 * payout_for_10 / 9, - 1, + 2, ); - // Validator 20: got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = 5/11 + // Validator 20: got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = + // 5/11 assert_eq_error_rate!( Balances::total_balance(&20), - initial_balance + 5 * payout_for_20 / 11, - 1, + initial_balance_20 + 5 * payout_for_20 / 11, + 2, ); }); } @@ -522,14 +606,11 @@ fn nominating_and_rewards_should_work() { #[test] fn nominators_also_get_slashed_pro_rata() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); let slash_percent = Perbill::from_percent(5); let initial_exposure = Staking::eras_stakers(active_era(), 11); // 101 is a nominator for 11 - assert_eq!( - initial_exposure.others.first().unwrap().who, - 101, - ); + assert_eq!(initial_exposure.others.first().unwrap().who, 101); // staked values; let nominator_stake = Staking::ledger(100).unwrap().active; @@ -542,13 +623,7 @@ fn nominators_also_get_slashed_pro_rata() { // 11 goes offline on_offence_now( - &[OffenceDetails { - offender: ( - 11, - initial_exposure.clone(), - ), - reporters: vec![], - }], + &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], &[slash_percent], ); @@ -558,25 +633,17 @@ fn nominators_also_get_slashed_pro_rata() { let slash_amount = slash_percent * exposed_stake; let validator_share = - Perbill::from_rational_approximation(exposed_validator, exposed_stake) * slash_amount; - let nominator_share = Perbill::from_rational_approximation( - exposed_nominator, - exposed_stake, - ) * slash_amount; + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; // both slash amounts need to be positive for the test to make sense. assert!(validator_share > 0); assert!(nominator_share > 0); // both stakes must have been decreased pro-rata. - assert_eq!( - Staking::ledger(100).unwrap().active, - nominator_stake - nominator_share, - ); - assert_eq!( - Staking::ledger(10).unwrap().active, - validator_stake - validator_share, - ); + assert_eq!(Staking::ledger(100).unwrap().active, nominator_stake - nominator_share); + assert_eq!(Staking::ledger(10).unwrap().active, validator_stake - validator_share); assert_eq!( balances(&101).0, // free balance nominator_balance - nominator_share, @@ -599,14 +666,16 @@ fn double_staking_should_fail() { ExtBuilder::default().build_and_execute(|| { let arbitrary_value = 5; // 2 = controller, 1 stashed => ok - assert_ok!( - Staking::bond(Origin::signed(1), 2, arbitrary_value, - RewardDestination::default()) - ); + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + arbitrary_value, + RewardDestination::default() + )); // 4 = not used so far, 1 stashed => not allowed. assert_noop!( - Staking::bond(Origin::signed(1), 4, arbitrary_value, - RewardDestination::default()), Error::::AlreadyBonded, + Staking::bond(Origin::signed(1), 4, arbitrary_value, RewardDestination::default()), + Error::::AlreadyBonded, ); // 1 = stashed => attempting to nominate should fail. assert_noop!(Staking::nominate(Origin::signed(1), vec![1]), Error::::NotController); @@ -618,7 +687,8 @@ fn double_staking_should_fail() { #[test] fn double_controlling_should_fail() { // should test (in the same order): - // * an account already bonded as controller CANNOT be reused as the controller of another account. + // * an account already bonded as controller CANNOT be reused as the controller of another + // account. ExtBuilder::default().build_and_execute(|| { let arbitrary_value = 5; // 2 = controller, 1 stashed => ok @@ -637,40 +707,87 @@ fn double_controlling_should_fail() { } #[test] -fn session_and_eras_work() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::active_era().unwrap().index, 0); - assert_eq!(Session::current_index(), 0); +fn session_and_eras_work_simple() { + ExtBuilder::default().period(1).build_and_execute(|| { + assert_eq!(active_era(), 0); + assert_eq!(current_era(), 0); + assert_eq!(Session::current_index(), 1); + assert_eq!(System::block_number(), 1); - // Session 1: No change. + // Session 1: this is basically a noop. This has already been started. start_session(1); assert_eq!(Session::current_index(), 1); - assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(active_era(), 0); + assert_eq!(System::block_number(), 1); // Session 2: No change. start_session(2); assert_eq!(Session::current_index(), 2); - assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(active_era(), 0); + assert_eq!(System::block_number(), 2); // Session 3: Era increment. start_session(3); assert_eq!(Session::current_index(), 3); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 3); // Session 4: No change. start_session(4); assert_eq!(Session::current_index(), 4); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 4); // Session 5: No change. start_session(5); assert_eq!(Session::current_index(), 5); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 5); // Session 6: Era increment. start_session(6); assert_eq!(Session::current_index(), 6); - assert_eq!(Staking::active_era().unwrap().index, 2); + assert_eq!(active_era(), 2); + assert_eq!(System::block_number(), 6); + }); +} + +#[test] +fn session_and_eras_work_complex() { + ExtBuilder::default().period(5).build_and_execute(|| { + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 0); + assert_eq!(System::block_number(), 1); + + start_session(1); + assert_eq!(Session::current_index(), 1); + assert_eq!(active_era(), 0); + assert_eq!(System::block_number(), 5); + + start_session(2); + assert_eq!(Session::current_index(), 2); + assert_eq!(active_era(), 0); + assert_eq!(System::block_number(), 10); + + start_session(3); + assert_eq!(Session::current_index(), 3); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 15); + + start_session(4); + assert_eq!(Session::current_index(), 4); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 20); + + start_session(5); + assert_eq!(Session::current_index(), 5); + assert_eq!(active_era(), 1); + assert_eq!(System::block_number(), 25); + + start_session(6); + assert_eq!(Session::current_index(), 6); + assert_eq!(active_era(), 2); + assert_eq!(System::block_number(), 30); }); } @@ -678,54 +795,62 @@ fn session_and_eras_work() { fn forcing_new_era_works() { ExtBuilder::default().build_and_execute(|| { // normal flow of session. - assert_eq!(Staking::active_era().unwrap().index, 0); - start_session(0); - assert_eq!(Staking::active_era().unwrap().index, 0); start_session(1); - assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(active_era(), 0); + start_session(2); - assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(active_era(), 0); + start_session(3); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); // no era change. - ForceEra::put(Forcing::ForceNone); + ForceEra::::put(Forcing::ForceNone); + start_session(4); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + start_session(5); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + start_session(6); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); + start_session(7); - assert_eq!(Staking::active_era().unwrap().index, 1); + assert_eq!(active_era(), 1); // back to normal. // this immediately starts a new session. - ForceEra::put(Forcing::NotForcing); + ForceEra::::put(Forcing::NotForcing); + start_session(8); - assert_eq!(Staking::active_era().unwrap().index, 1); // There is one session delay - start_session(9); - assert_eq!(Staking::active_era().unwrap().index, 2); + assert_eq!(active_era(), 1); + start_session(9); + assert_eq!(active_era(), 2); // forceful change - ForceEra::put(Forcing::ForceAlways); + ForceEra::::put(Forcing::ForceAlways); + start_session(10); - assert_eq!(Staking::active_era().unwrap().index, 2); // There is one session delay + assert_eq!(active_era(), 2); + start_session(11); - assert_eq!(Staking::active_era().unwrap().index, 3); + assert_eq!(active_era(), 3); + start_session(12); - assert_eq!(Staking::active_era().unwrap().index, 4); + assert_eq!(active_era(), 4); // just one forceful change - ForceEra::put(Forcing::ForceNew); + ForceEra::::put(Forcing::ForceNew); start_session(13); - assert_eq!(Staking::active_era().unwrap().index, 5); - assert_eq!(ForceEra::get(), Forcing::NotForcing); + assert_eq!(active_era(), 5); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + start_session(14); - assert_eq!(Staking::active_era().unwrap().index, 6); - start_session(15); - assert_eq!(Staking::active_era().unwrap().index, 6); + assert_eq!(active_era(), 6); + start_session(15); + assert_eq!(active_era(), 6); }); } @@ -738,7 +863,7 @@ fn cannot_transfer_staked_balance() { // Confirm account 11 has some free balance assert_eq!(Balances::free_balance(11), 1000); // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); // Confirm account 11 cannot transfer as a result assert_noop!( Balances::transfer(Origin::signed(11), 20, 1), @@ -757,13 +882,13 @@ fn cannot_transfer_staked_balance_2() { // Tests that a stash account cannot transfer funds // Same test as above but with 20, and more accurate. // 21 has 2000 free balance but 1000 at stake - ExtBuilder::default().nominate(false).fair(true).build_and_execute(|| { + ExtBuilder::default().nominate(false).build_and_execute(|| { // Confirm account 21 is stashed assert_eq!(Staking::bonded(&21), Some(20)); // Confirm account 21 has some free balance assert_eq!(Balances::free_balance(21), 2000); // Confirm account 21 (via controller 20) is totally staked - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), 21).total, 1000); // Confirm account 21 can transfer at most 1000 assert_noop!( Balances::transfer(Origin::signed(21), 20, 1001), @@ -782,12 +907,9 @@ fn cannot_reserve_staked_balance() { // Confirm account 11 has some free balance assert_eq!(Balances::free_balance(11), 1000); // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).own, 1000); + assert_eq!(Staking::eras_stakers(active_era(), 11).own, 1000); // Confirm account 11 cannot reserve as a result - assert_noop!( - Balances::reserve(&11, 1), - BalancesError::::LiquidityRestrictions, - ); + assert_noop!(Balances::reserve(&11, 1), BalancesError::::LiquidityRestrictions); // Give account 11 extra free balance let _ = Balances::make_free_balance_be(&11, 10000); @@ -807,20 +929,22 @@ fn reward_destination_works() { // Check the balance of the stash account assert_eq!(Balances::free_balance(11), 1000); // Check how much is at stake - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); + >::reward_by_ids(vec![(11, 1)]); - mock::start_era(1); + mock::start_active_era(1); mock::make_all_reward_payment(0); // Check that RewardDestination is Staked (default) @@ -828,23 +952,25 @@ fn reward_destination_works() { // Check that reward went to the stash account of validator assert_eq!(Balances::free_balance(11), 1000 + total_payout_0); // Check that amount at stake increased accordingly - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + total_payout_0, - active: 1000 + total_payout_0, - unlocking: vec![], - claimed_rewards: vec![0], - })); - - //Change RewardDestination to Stash + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + total_payout_0, + active: 1000 + total_payout_0, + unlocking: vec![], + claimed_rewards: vec![0], + }) + ); + + // Change RewardDestination to Stash >::insert(&11, RewardDestination::Stash); // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); + >::reward_by_ids(vec![(11, 1)]); - mock::start_era(2); + mock::start_active_era(2); mock::make_all_reward_payment(1); // Check that RewardDestination is Stash @@ -854,13 +980,16 @@ fn reward_destination_works() { // Record this value let recorded_stash_balance = 1000 + total_payout_0 + total_payout_1; // Check that amount at stake is NOT increased - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + total_payout_0, - active: 1000 + total_payout_0, - unlocking: vec![], - claimed_rewards: vec![0,1], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + total_payout_0, + active: 1000 + total_payout_0, + unlocking: vec![], + claimed_rewards: vec![0, 1], + }) + ); // Change RewardDestination to Controller >::insert(&11, RewardDestination::Controller); @@ -869,11 +998,10 @@ fn reward_destination_works() { assert_eq!(Balances::free_balance(10), 1); // Compute total payout now for whole duration as other parameter won't change - let total_payout_2 = current_total_payout_for_duration(3000); - assert!(total_payout_2 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); + let total_payout_2 = current_total_payout_for_duration(reward_time_per_era()); + >::reward_by_ids(vec![(11, 1)]); - mock::start_era(3); + mock::start_active_era(3); mock::make_all_reward_payment(2); // Check that RewardDestination is Controller @@ -881,13 +1009,16 @@ fn reward_destination_works() { // Check that reward went to the controller account assert_eq!(Balances::free_balance(10), 1 + total_payout_2); // Check that amount at stake is NOT increased - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + total_payout_0, - active: 1000 + total_payout_0, - unlocking: vec![], - claimed_rewards: vec![0,1,2], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + total_payout_0, + active: 1000 + total_payout_0, + unlocking: vec![], + claimed_rewards: vec![0, 1, 2], + }) + ); // Check that amount in staked account is NOT increased. assert_eq!(Balances::free_balance(11), recorded_stash_balance); }); @@ -900,27 +1031,27 @@ fn validator_payment_prefs_work() { // This test will focus on validator payment. ExtBuilder::default().build_and_execute(|| { let commission = Perbill::from_percent(40); - >::insert(&11, ValidatorPrefs { - commission: commission.clone(), - }); + >::insert( + &11, + ValidatorPrefs { commission: commission.clone(), ..Default::default() }, + ); // Reward controller so staked ratio doesn't change. >::insert(&11, RewardDestination::Controller); >::insert(&101, RewardDestination::Controller); - mock::start_era(1); + mock::start_active_era(1); mock::make_all_reward_payment(0); let balance_era_1_10 = Balances::total_balance(&10); let balance_era_1_100 = Balances::total_balance(&100); // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something - let exposure_1 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - >::reward_by_ids(vec![(11, 1)]); + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); + let exposure_1 = Staking::eras_stakers(active_era(), 11); + >::reward_by_ids(vec![(11, 1)]); - mock::start_era(2); + mock::start_active_era(2); mock::make_all_reward_payment(1); let taken_cut = commission * total_payout_1; @@ -930,7 +1061,6 @@ fn validator_payment_prefs_work() { assert_eq_error_rate!(Balances::total_balance(&10), balance_era_1_10 + reward_of_10, 2); assert_eq_error_rate!(Balances::total_balance(&100), balance_era_1_100 + reward_of_100, 2); }); - } #[test] @@ -944,13 +1074,16 @@ fn bond_extra_works() { // Check that account 10 is bonded to account 11 assert_eq!(Staking::bonded(&11), Some(10)); // Check how much is at stake - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -958,29 +1091,36 @@ fn bond_extra_works() { // Call the bond_extra function from controller, add only 100 assert_ok!(Staking::bond_extra(Origin::signed(11), 100)); // There should be 100 more `total` and `active` in the ledger - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + 100, - active: 1000 + 100, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); // Call the bond_extra function with a large number, should handle it assert_ok!(Staking::bond_extra(Origin::signed(11), Balance::max_value())); // The full amount of the funds should now be in the total and active - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000000, - active: 1000000, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000000, + active: 1000000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); }); } #[test] fn bond_extra_and_withdraw_unbonded_works() { + // // * Should test // * Given an account being bonded [and chosen as a validator](not mandatory) // * It can add extra funds to the bonded account. @@ -994,63 +1134,71 @@ fn bond_extra_and_withdraw_unbonded_works() { let _ = Balances::make_free_balance_be(&11, 1000000); // Initial config should be correct - assert_eq!(Staking::active_era().unwrap().index, 0); - assert_eq!(Session::current_index(), 0); + assert_eq!(active_era(), 0); // check the balance of a validator accounts. assert_eq!(Balances::total_balance(&10), 1); // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_era(1); + mock::start_active_era(1); // Initial state of 10 - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - })); assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + assert_eq!( + Staking::eras_stakers(active_era(), 11), Exposure { total: 1000, own: 1000, others: vec![] } ); // deposit the extra 100 units Staking::bond_extra(Origin::signed(11), 100).unwrap(); - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + 100, - active: 1000 + 100, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); // Exposure is a snapshot! only updated after the next era update. assert_ne!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Staking::eras_stakers(active_era(), 11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] } ); // trigger next era. - mock::start_era(2); - assert_eq!(Staking::active_era().unwrap().index, 2); + mock::start_active_era(2); + assert_eq!(active_era(), 2); // ledger should be the same. - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + 100, - active: 1000 + 100, - unlocking: vec![], - claimed_rewards: vec![], - })); - // Exposure is now updated. assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] } - ); - - // Unbond almost all of the funds in stash. + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + // Exposure is now updated. + assert_eq!( + Staking::eras_stakers(active_era(), 11), + Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] } + ); + + // Unbond almost all of the funds in stash. Staking::unbond(Origin::signed(10), 1000).unwrap(); assert_eq!( Staking::ledger(&10), @@ -1058,7 +1206,7 @@ fn bond_extra_and_withdraw_unbonded_works() { stash: 11, total: 1000 + 100, active: 100, - unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], + unlocking: vec![UnlockChunk { value: 1000, era: 2 + 3 }], claimed_rewards: vec![] }), ); @@ -1071,13 +1219,13 @@ fn bond_extra_and_withdraw_unbonded_works() { stash: 11, total: 1000 + 100, active: 100, - unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], + unlocking: vec![UnlockChunk { value: 1000, era: 2 + 3 }], claimed_rewards: vec![] }), ); // trigger next era. - mock::start_era(3); + mock::start_active_era(3); // nothing yet assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); @@ -1087,13 +1235,13 @@ fn bond_extra_and_withdraw_unbonded_works() { stash: 11, total: 1000 + 100, active: 100, - unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], + unlocking: vec![UnlockChunk { value: 1000, era: 2 + 3 }], claimed_rewards: vec![] }), ); // trigger next era. - mock::start_era(5); + mock::start_active_era(5); assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); // Now the value is free and the staking ledger is updated. @@ -1114,18 +1262,18 @@ fn bond_extra_and_withdraw_unbonded_works() { fn too_many_unbond_calls_should_not_work() { ExtBuilder::default().build_and_execute(|| { // locked at era 0 until 3 - for _ in 0..MAX_UNLOCKING_CHUNKS-1 { + for _ in 0..MAX_UNLOCKING_CHUNKS - 1 { assert_ok!(Staking::unbond(Origin::signed(10), 1)); } - mock::start_era(1); + mock::start_active_era(1); // locked at era 1 until 4 assert_ok!(Staking::unbond(Origin::signed(10), 1)); // can't do more. assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); - mock::start_era(3); + mock::start_active_era(3); assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); // free up. @@ -1139,423 +1287,422 @@ fn too_many_unbond_calls_should_not_work() { #[test] fn rebond_works() { + // // * Should test // * Given an account being bonded [and chosen as a validator](not mandatory) // * it can unbond a portion of its funds from the stash account. // * it can re-bond a portion of the funds scheduled to unlock. - ExtBuilder::default() - .nominate(false) - .build() - .execute_with(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee( - Origin::signed(10), - RewardDestination::Controller - )); + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); - // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_era(1); + // confirm that 10 is a normal validator and gets paid at the end of the era. + mock::start_active_era(1); - // Initial state of 10 - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); + // Initial state of 10 + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); - mock::start_era(2); - assert_eq!(Staking::active_era().unwrap().index, 2); + mock::start_active_era(2); + assert_eq!(active_era(), 2); - // Try to rebond some funds. We get an error since no fund is unbonded. - assert_noop!( - Staking::rebond(Origin::signed(10), 500), - Error::::NoUnlockChunk, - ); + // Try to rebond some funds. We get an error since no fund is unbonded. + assert_noop!(Staking::rebond(Origin::signed(10), 500), Error::::NoUnlockChunk); - // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 900).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![UnlockChunk { - value: 900, - era: 2 + 3, - }], - claimed_rewards: vec![], - }) - ); + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![UnlockChunk { value: 900, era: 2 + 3 }], + claimed_rewards: vec![], + }) + ); - // Re-bond all the funds unbonded. - Staking::rebond(Origin::signed(10), 900).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); + // Re-bond all the funds unbonded. + Staking::rebond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); - // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 900).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: vec![], - }) - ); + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![UnlockChunk { value: 900, era: 5 }], + claimed_rewards: vec![], + }) + ); - // Re-bond part of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 600, - unlocking: vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: vec![], - }) - ); + // Re-bond part of the funds unbonded. + Staking::rebond(Origin::signed(10), 500).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 600, + unlocking: vec![UnlockChunk { value: 400, era: 5 }], + claimed_rewards: vec![], + }) + ); - // Re-bond the remainder of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); + // Re-bond the remainder of the funds unbonded. + Staking::rebond(Origin::signed(10), 500).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); - // Unbond parts of the funds in stash. - Staking::unbond(Origin::signed(10), 300).unwrap(); - Staking::unbond(Origin::signed(10), 300).unwrap(); - Staking::unbond(Origin::signed(10), 300).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![ - UnlockChunk { value: 300, era: 5 }, - UnlockChunk { value: 300, era: 5 }, - UnlockChunk { value: 300, era: 5 }, - ], - claimed_rewards: vec![], - }) - ); + // Unbond parts of the funds in stash. + Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![ + UnlockChunk { value: 300, era: 5 }, + UnlockChunk { value: 300, era: 5 }, + UnlockChunk { value: 300, era: 5 }, + ], + claimed_rewards: vec![], + }) + ); - // Re-bond part of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 600, - unlocking: vec![ - UnlockChunk { value: 300, era: 5 }, - UnlockChunk { value: 100, era: 5 }, - ], - claimed_rewards: vec![], - }) - ); - }) + // Re-bond part of the funds unbonded. + Staking::rebond(Origin::signed(10), 500).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 600, + unlocking: vec![ + UnlockChunk { value: 300, era: 5 }, + UnlockChunk { value: 100, era: 5 }, + ], + claimed_rewards: vec![], + }) + ); + }) } #[test] fn rebond_is_fifo() { // Rebond should proceed by reversing the most recent bond operations. - ExtBuilder::default() - .nominate(false) - .build() - .execute_with(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee( - Origin::signed(10), - RewardDestination::Controller - )); + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); - // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_era(1); + // confirm that 10 is a normal validator and gets paid at the end of the era. + mock::start_active_era(1); - // Initial state of 10 - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); + // Initial state of 10 + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); - mock::start_era(2); + mock::start_active_era(2); - // Unbond some of the funds in stash. - Staking::unbond(Origin::signed(10), 400).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 600, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - ], - claimed_rewards: vec![], - }) - ); + // Unbond some of the funds in stash. + Staking::unbond(Origin::signed(10), 400).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 600, + unlocking: vec![UnlockChunk { value: 400, era: 2 + 3 },], + claimed_rewards: vec![], + }) + ); - mock::start_era(3); + mock::start_active_era(3); - // Unbond more of the funds in stash. - Staking::unbond(Origin::signed(10), 300).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 300, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - UnlockChunk { value: 300, era: 3 + 3 }, - ], - claimed_rewards: vec![], - }) - ); + // Unbond more of the funds in stash. + Staking::unbond(Origin::signed(10), 300).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 300, + unlocking: vec![ + UnlockChunk { value: 400, era: 2 + 3 }, + UnlockChunk { value: 300, era: 3 + 3 }, + ], + claimed_rewards: vec![], + }) + ); - mock::start_era(4); + mock::start_active_era(4); - // Unbond yet more of the funds in stash. - Staking::unbond(Origin::signed(10), 200).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - UnlockChunk { value: 300, era: 3 + 3 }, - UnlockChunk { value: 200, era: 4 + 3 }, - ], - claimed_rewards: vec![], - }) - ); + // Unbond yet more of the funds in stash. + Staking::unbond(Origin::signed(10), 200).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![ + UnlockChunk { value: 400, era: 2 + 3 }, + UnlockChunk { value: 300, era: 3 + 3 }, + UnlockChunk { value: 200, era: 4 + 3 }, + ], + claimed_rewards: vec![], + }) + ); - // Re-bond half of the unbonding funds. - Staking::rebond(Origin::signed(10), 400).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 500, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - UnlockChunk { value: 100, era: 3 + 3 }, - ], - claimed_rewards: vec![], - }) - ); - }) + // Re-bond half of the unbonding funds. + Staking::rebond(Origin::signed(10), 400).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 500, + unlocking: vec![ + UnlockChunk { value: 400, era: 2 + 3 }, + UnlockChunk { value: 100, era: 3 + 3 }, + ], + claimed_rewards: vec![], + }) + ); + }) } #[test] fn reward_to_stake_works() { - ExtBuilder::default().nominate(false).fair(false).build_and_execute(|| { - // Confirm validator count is 2 - assert_eq!(Staking::validator_count(), 2); - // Confirm account 10 and 20 are validators - assert!(>::contains_key(&11) && >::contains_key(&21)); - - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 2000); - - // Give the man some money. - let _ = Balances::make_free_balance_be(&10, 1000); - let _ = Balances::make_free_balance_be(&20, 1000); - - // Bypass logic and change current exposure - ErasStakers::::insert(0, 21, Exposure { total: 69, own: 69, others: vec![] }); - - // Now lets lower account 20 stake - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69); - >::insert(&20, StakingLedger { stash: 21, total: 69, active: 69, unlocking: vec![], claimed_rewards: vec![] }); + ExtBuilder::default() + .nominate(false) + .set_status(31, StakerStatus::Idle) + .set_status(41, StakerStatus::Idle) + .set_stake(21, 2000) + .build_and_execute(|| { + assert_eq!(Staking::validator_count(), 2); + // Confirm account 10 and 20 are validators + assert!(>::contains_key(&11) && >::contains_key(&21)); + + assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), 21).total, 2000); + + // Give the man some money. + let _ = Balances::make_free_balance_be(&10, 1000); + let _ = Balances::make_free_balance_be(&20, 1000); + + // Bypass logic and change current exposure + ErasStakers::::insert(0, 21, Exposure { total: 69, own: 69, others: vec![] }); + >::insert( + &20, + StakingLedger { + stash: 21, + total: 69, + active: 69, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); - >::reward_by_ids(vec![(21, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); + >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(21, 1)]); - // New era --> rewards are paid --> stakes are changed - mock::start_era(1); - mock::make_all_reward_payment(0); + // New era --> rewards are paid --> stakes are changed + mock::start_active_era(1); + mock::make_all_reward_payment(0); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69); + assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), 21).total, 69); - let _11_balance = Balances::free_balance(&11); - assert_eq!(_11_balance, 1000 + total_payout_0 / 2); + let _11_balance = Balances::free_balance(&11); + assert_eq!(_11_balance, 1000 + total_payout_0 / 2); - // Trigger another new era as the info are frozen before the era start. - mock::start_era(2); + // Trigger another new era as the info are frozen before the era start. + mock::start_active_era(2); - // -- new infos - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000 + total_payout_0 / 2); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69 + total_payout_0 / 2); - }); + // -- new infos + assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000 + total_payout_0 / 2); + assert_eq!(Staking::eras_stakers(active_era(), 21).total, 69 + total_payout_0 / 2); + }); } #[test] fn on_free_balance_zero_stash_removes_validator() { // Tests that validator storage items are cleaned up when stash is empty // Tests that storage items are untouched when controller is empty - ExtBuilder::default().existential_deposit(10).build_and_execute(|| { - // Check the balance of the validator account - assert_eq!(Balances::free_balance(10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Set some storage items which we expect to be cleaned up - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 0); - - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); - - // Check storage items do not exist - assert!(!>::contains_key(&10)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - }); -} + ExtBuilder::default() + .existential_deposit(10) + .balance_factor(10) + .build_and_execute(|| { + // Check the balance of the validator account + assert_eq!(Balances::free_balance(10), 10); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(11), 10 * 1000); + // Check these two accounts are bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Set payee information + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); + + // Check storage items that should be cleaned up + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of controller to 0 + let _ = Balances::slash(&10, Balance::max_value()); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(11), 10 * 1000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of stash to 0 + let _ = Balances::slash(&11, Balance::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 10); + + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); + + // Check storage items do not exist + assert!(!>::contains_key(&10)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + }); +} #[test] fn on_free_balance_zero_stash_removes_nominator() { // Tests that nominator storage items are cleaned up when stash is empty // Tests that storage items are untouched when controller is empty - ExtBuilder::default().existential_deposit(10).build_and_execute(|| { - // Make 10 a nominator - assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); - // Check that account 10 is a nominator - assert!(>::contains_key(11)); - // Check the balance of the nominator account - assert_eq!(Balances::free_balance(10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 256000); - - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - // Check total balance of account 10 - assert_eq!(Balances::total_balance(&10), 0); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 0); - - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); - - // Check storage items do not exist - assert!(!>::contains_key(&10)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - }); + ExtBuilder::default() + .existential_deposit(10) + .balance_factor(10) + .build_and_execute(|| { + // Make 10 a nominator + assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); + // Check that account 10 is a nominator + assert!(>::contains_key(11)); + // Check the balance of the nominator account + assert_eq!(Balances::free_balance(10), 10); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(11), 10_000); + + // Set payee information + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); + + // Check storage items that should be cleaned up + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of controller to 0 + let _ = Balances::slash(&10, Balance::max_value()); + // Check total balance of account 10 + assert_eq!(Balances::total_balance(&10), 0); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(11), 10_000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of stash to 0 + let _ = Balances::slash(&11, Balance::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 10); + + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); + + // Check storage items do not exist + assert!(!>::contains_key(&10)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + }); } - #[test] fn switching_roles() { - // Test that it should be possible to switch between roles (nominator, validator, idle) with minimal overhead. + // Test that it should be possible to switch between roles (nominator, validator, idle) with + // minimal overhead. ExtBuilder::default().nominate(false).build_and_execute(|| { // Reset reward destination - for i in &[10, 20] { assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); } + for i in &[10, 20] { + assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); + } assert_eq_uvec!(validator_controllers(), vec![20, 10]); // put some money in account that we'll use. - for i in 1..7 { let _ = Balances::deposit_creating(&i, 5000); } + for i in 1..7 { + let _ = Balances::deposit_creating(&i, 5000); + } // add 2 nominators assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::Controller)); @@ -1568,7 +1715,7 @@ fn switching_roles() { assert_ok!(Staking::bond(Origin::signed(5), 6, 1000, RewardDestination::Controller)); assert_ok!(Staking::validate(Origin::signed(6), ValidatorPrefs::default())); - mock::start_era(1); + mock::start_active_era(1); // with current nominators 10 and 5 have the most stake assert_eq_uvec!(validator_controllers(), vec![6, 10]); @@ -1582,32 +1729,35 @@ fn switching_roles() { // 2 : 2000 self vote + 250 vote. // Winners: 20 and 2 - mock::start_era(2); + mock::start_active_era(2); assert_eq_uvec!(validator_controllers(), vec![2, 20]); }); } #[test] -fn wrong_vote_is_null() { - ExtBuilder::default().nominate(false).validator_pool(true).build_and_execute(|| { - assert_eq_uvec!(validator_controllers(), vec![40, 30]); - - // put some money in account that we'll use. - for i in 1..3 { let _ = Balances::deposit_creating(&i, 5000); } - - // add 1 nominators - assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(2), vec![ - 11, 21, // good votes - 1, 2, 15, 1000, 25 // crap votes. No effect. - ])); +fn wrong_vote_is_moot() { + ExtBuilder::default() + .add_staker( + 61, + 60, + 500, + StakerStatus::Nominator(vec![ + 11, 21, // good votes + 1, 2, 15, 1000, 25, // crap votes. No effect. + ]), + ) + .build_and_execute(|| { + // the genesis validators already reflect the above vote, nonetheless start a new era. + mock::start_active_era(1); - // new block - mock::start_era(1); + // new validators + assert_eq_uvec!(validator_controllers(), vec![20, 10]); - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - }); + // our new voter is taken into account + assert!(Staking::eras_stakers(active_era(), 11).others.iter().any(|i| i.who == 61)); + assert!(Staking::eras_stakers(active_era(), 21).others.iter().any(|i| i.who == 61)); + }); } #[test] @@ -1617,14 +1767,14 @@ fn bond_with_no_staked_value() { ExtBuilder::default() .validator_count(3) .existential_deposit(5) + .balance_factor(5) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // Can't bond with 1 assert_noop!( Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller), - Error::::InsufficientValue, + Error::::InsufficientBond, ); // bonded with absolute minimum value possible. assert_ok!(Staking::bond(Origin::signed(1), 2, 5, RewardDestination::Controller)); @@ -1638,20 +1788,20 @@ fn bond_with_no_staked_value() { stash: 1, active: 0, total: 5, - unlocking: vec![UnlockChunk {value: 5, era: 3}], + unlocking: vec![UnlockChunk { value: 5, era: 3 }], claimed_rewards: vec![], }) ); - mock::start_era(1); - mock::start_era(2); + mock::start_active_era(1); + mock::start_active_era(2); // not yet removed. assert_ok!(Staking::withdraw_unbonded(Origin::signed(2), 0)); assert!(Staking::ledger(2).is_some()); assert_eq!(Balances::locks(&1)[0].amount, 5); - mock::start_era(3); + mock::start_active_era(3); // poof. Account 1 is removed from the staking system. assert_ok!(Staking::withdraw_unbonded(Origin::signed(2), 0)); @@ -1662,14 +1812,11 @@ fn bond_with_no_staked_value() { #[test] fn bond_with_little_staked_value_bounded() { - // Behavior when someone bonds with little staked value. - // Particularly when she votes and the candidate is elected. ExtBuilder::default() .validator_count(3) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { + .build_and_execute(|| { // setup assert_ok!(Staking::chill(Origin::signed(30))); assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); @@ -1680,151 +1827,147 @@ fn bond_with_little_staked_value_bounded() { assert_ok!(Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller)); assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); - // reward era 0 - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something + // 1 era worth of reward. BUT, we set the timestamp after on_initialize, so outdated by + // one block. + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); + reward_all_elected(); - mock::start_era(1); + mock::start_active_era(1); mock::make_all_reward_payment(0); // 2 is elected. assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); - // And has minimal stake - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, 0); + assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); // Old ones are rewarded. - assert_eq!(Balances::free_balance(10), init_balance_10 + total_payout_0 / 3); + assert_eq_error_rate!( + Balances::free_balance(10), + init_balance_10 + total_payout_0 / 3, + 1 + ); // no rewards paid to 2. This was initial election. assert_eq!(Balances::free_balance(2), init_balance_2); - // reward era 1 - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something + // reward era 2 + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); reward_all_elected(); - mock::start_era(2); + mock::start_active_era(2); mock::make_all_reward_payment(1); assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, 0); + assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); - assert_eq!(Balances::free_balance(2), init_balance_2 + total_payout_1 / 3); - assert_eq!( + // 2 is now rewarded. + assert_eq_error_rate!( + Balances::free_balance(2), + init_balance_2 + total_payout_1 / 3, + 1 + ); + assert_eq_error_rate!( Balances::free_balance(&10), init_balance_10 + total_payout_0 / 3 + total_payout_1 / 3, + 2, ); }); } #[test] -fn bond_with_duplicate_vote_should_be_ignored_by_npos_election() { +fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { ExtBuilder::default() .validator_count(2) .nominate(false) .minimum_validator_count(1) - .build() - .execute_with(|| { - // disable the nominator - assert_ok!(Staking::chill(Origin::signed(100))); - // make stakes equal. - assert_ok!(Staking::bond_extra(Origin::signed(31), 999)); - + .set_stake(31, 1000) + .build_and_execute(|| { + // ensure all have equal stake. assert_eq!( >::iter() .map(|(v, _)| (v, Staking::ledger(v - 1).unwrap().total)) .collect::>(), vec![(31, 1000), (21, 1000), (11, 1000)], ); + // no nominators shall exist. assert!(>::iter().map(|(n, _)| n).collect::>().is_empty()); - // give the man some money + // give the man some money. let initial_balance = 1000; - for i in [1, 2, 3, 4,].iter() { + for i in [1, 2, 3, 4].iter() { let _ = Balances::make_free_balance_be(i, initial_balance); } assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31,])); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31])); assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 31])); - // winners should be 21 and 31. Otherwise this election is taking duplicates into account. - let sp_npos_elections::ElectionResult { - winners, - assignments, - } = Staking::do_phragmen::(0).unwrap(); - let winners = sp_npos_elections::to_without_backing(winners); - - assert_eq!(winners, vec![31, 21]); - // only distribution to 21 and 31. - assert_eq!(assignments.iter().find(|a| a.who == 1).unwrap().distribution.len(), 2); + // winners should be 21 and 31. Otherwise this election is taking duplicates into + // account. + let supports = ::ElectionProvider::elect().unwrap(); + assert_eq!( + supports, + vec![ + (21, Support { total: 1800, voters: vec![(21, 1000), (3, 400), (1, 400)] }), + (31, Support { total: 2200, voters: vec![(31, 1000), (3, 600), (1, 600)] }) + ], + ); }); } #[test] -fn bond_with_duplicate_vote_should_be_ignored_by_npos_election_elected() { - // same as above but ensures that even when the duple is being elected, everything is sane. +fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { + // same as above but ensures that even when the dupe is being elected, everything is sane. ExtBuilder::default() .validator_count(2) .nominate(false) + .set_stake(31, 1000) .minimum_validator_count(1) - .build() - .execute_with(|| { - // disable the nominator - assert_ok!(Staking::chill(Origin::signed(100))); - // make stakes equal. - assert_ok!(Staking::bond_extra(Origin::signed(31), 99)); - + .build_and_execute(|| { + // ensure all have equal stake. assert_eq!( >::iter() .map(|(v, _)| (v, Staking::ledger(v - 1).unwrap().total)) .collect::>(), - vec![(31, 100), (21, 1000), (11, 1000)], + vec![(31, 1000), (21, 1000), (11, 1000)], ); - assert!(>::iter().map(|(n, _)| n).collect::>().is_empty()); - // give the man some money + // no nominators shall exist. + assert!(>::iter().collect::>().is_empty()); + + // give the man some money. let initial_balance = 1000; - for i in [1, 2, 3, 4,].iter() { + for i in [1, 2, 3, 4].iter() { let _ = Balances::make_free_balance_be(i, initial_balance); } assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31,])); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21])); assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 31])); - - // winners should be 21 and 31. Otherwise this election is taking duplicates into account. + assert_ok!(Staking::nominate(Origin::signed(4), vec![21])); - let sp_npos_elections::ElectionResult { - winners, - assignments, - } = Staking::do_phragmen::(0).unwrap(); - - let winners = sp_npos_elections::to_without_backing(winners); - assert_eq!(winners, vec![21, 11]); - // only distribution to 21 and 31. - assert_eq!(assignments.iter().find(|a| a.who == 1).unwrap().distribution.len(), 2); + // winners should be 21 and 11. + let supports = ::ElectionProvider::elect().unwrap(); + assert_eq!( + supports, + vec![ + (11, Support { total: 1500, voters: vec![(11, 1000), (1, 500)] }), + (21, Support { total: 2500, voters: vec![(21, 1000), (3, 1000), (1, 500)] }) + ], + ); }); } #[test] fn new_era_elects_correct_number_of_validators() { - ExtBuilder::default() - .nominate(true) - .validator_pool(true) - .fair(true) - .validator_count(1) - .build() - .execute_with(|| { - assert_eq!(Staking::validator_count(), 1); - assert_eq!(validator_controllers().len(), 1); + ExtBuilder::default().nominate(true).validator_count(1).build_and_execute(|| { + assert_eq!(Staking::validator_count(), 1); + assert_eq!(validator_controllers().len(), 1); - Session::on_initialize(System::block_number()); + Session::on_initialize(System::block_number()); - assert_eq!(validator_controllers().len(), 1); - }) + assert_eq!(validator_controllers().len(), 1); + }) } #[test] @@ -1842,7 +1985,7 @@ fn phragmen_should_not_overflow() { bond_nominator(7, 6, Votes::max_value() as Balance, vec![3, 5]); bond_nominator(9, 8, Votes::max_value() as Balance, vec![3, 5]); - mock::start_era(1); + mock::start_active_era(1); assert_eq_uvec!(validator_controllers(), vec![4, 2]); @@ -1855,8 +1998,8 @@ fn phragmen_should_not_overflow() { #[test] fn reward_validator_slashing_validator_does_not_overflow() { ExtBuilder::default().build_and_execute(|| { - let stake = u64::max_value() as Balance * 2; - let reward_slash = u64::max_value() as Balance * 2; + let stake = u64::MAX as Balance * 2; + let reward_slash = u64::MAX as Balance * 2; // Assert multiplication overflows in balance arithmetic. assert!(stake.checked_mul(reward_slash).is_none()); @@ -1886,20 +2029,22 @@ fn reward_validator_slashing_validator_does_not_overflow() { // it is 0. Staking::bond(Origin::signed(2), 20000, stake - 1, RewardDestination::default()).unwrap(); // Override exposure of 11 - ErasStakers::::insert(0, 11, Exposure { - total: stake, - own: 1, - others: vec![ IndividualExposure { who: 2, value: stake - 1 }] - }); + ErasStakers::::insert( + 0, + 11, + Exposure { + total: stake, + own: 1, + others: vec![IndividualExposure { who: 2, value: stake - 1 }], + }, + ); // Check slashing on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![], + }], &[Perbill::from_percent(100)], ); @@ -1913,12 +2058,12 @@ fn reward_from_authorship_event_handler_works() { ExtBuilder::default().build_and_execute(|| { use pallet_authorship::EventHandler; - assert_eq!(>::author(), 11); + assert_eq!(>::author(), 11); - >::note_author(11); - >::note_uncle(21, 1); + >::note_author(11); + >::note_uncle(21, 1); // Rewarding the same two times works. - >::note_uncle(11, 1); + >::note_uncle(11, 1); // Not mandatory but must be coherent with rewards assert_eq_uvec!(Session::validators(), vec![11, 21]); @@ -1926,7 +2071,7 @@ fn reward_from_authorship_event_handler_works() { // 21 is rewarded as an uncle producer // 11 is rewarded as a block producer and uncle referencer and uncle producer assert_eq!( - ErasRewardPoints::::get(Staking::active_era().unwrap().index), + ErasRewardPoints::::get(active_era()), EraRewardPoints { individual: vec![(11, 20 + 2 * 2 + 1), (21, 1)].into_iter().collect(), total: 26, @@ -1939,26 +2084,15 @@ fn reward_from_authorship_event_handler_works() { fn add_reward_points_fns_works() { ExtBuilder::default().build_and_execute(|| { // Not mandatory but must be coherent with rewards - assert_eq!(Session::validators(), vec![21, 11]); + assert_eq_uvec!(Session::validators(), vec![21, 11]); - >::reward_by_ids(vec![ - (21, 1), - (11, 1), - (11, 1), - ]); + >::reward_by_ids(vec![(21, 1), (11, 1), (11, 1)]); - >::reward_by_ids(vec![ - (21, 1), - (11, 1), - (11, 1), - ]); + >::reward_by_ids(vec![(21, 1), (11, 1), (11, 1)]); assert_eq!( - ErasRewardPoints::::get(Staking::active_era().unwrap().index), - EraRewardPoints { - individual: vec![(11, 4), (21, 2)].into_iter().collect(), - total: 6, - }, + ErasRewardPoints::::get(active_era()), + EraRewardPoints { individual: vec![(11, 4), (21, 2)].into_iter().collect(), total: 6 }, ); }) } @@ -1969,7 +2103,7 @@ fn unbonded_balance_is_not_slashable() { // total amount staked is slashable. assert_eq!(Staking::slashable_balance_of(&11), 1000); - assert_ok!(Staking::unbond(Origin::signed(10), 800)); + assert_ok!(Staking::unbond(Origin::signed(10), 800)); // only the active portion. assert_eq!(Staking::slashable_balance_of(&11), 200); @@ -1983,21 +2117,27 @@ fn era_is_always_same_length() { ExtBuilder::default().build_and_execute(|| { let session_per_era = >::get(); - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era); - mock::start_era(2); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era * 2u32); + mock::start_active_era(2); + assert_eq!( + Staking::eras_start_session_index(current_era()).unwrap(), + session_per_era * 2u32 + ); let session = Session::current_index(); - ForceEra::put(Forcing::ForceNew); + ForceEra::::put(Forcing::ForceNew); advance_session(); advance_session(); assert_eq!(current_era(), 3); assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2); - mock::start_era(4); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2u32 + session_per_era); + mock::start_active_era(4); + assert_eq!( + Staking::eras_start_session_index(current_era()).unwrap(), + session + 2u32 + session_per_era + ); }); } @@ -2006,10 +2146,7 @@ fn offence_forces_new_era() { ExtBuilder::default().build_and_execute(|| { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], }], &[Perbill::from_percent(5)], @@ -2027,10 +2164,7 @@ fn offence_ensures_new_era_without_clobbering() { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], }], &[Perbill::from_percent(5)], @@ -2048,10 +2182,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2060,7 +2191,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() { assert_eq!(Staking::force_era(), Forcing::ForceNew); assert!(!>::contains_key(11)); - mock::start_era(1); + mock::start_active_era(1); assert!(!Session::validators().contains(&11)); assert!(!>::contains_key(11)); @@ -2072,19 +2203,12 @@ fn slashing_performed_according_exposure() { // This test checks that slashing is performed according the exposure (or more precisely, // historical exposure), not the current balance. ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).own, 1000); + assert_eq!(Staking::eras_stakers(active_era(), 11).own, 1000); // Handle an offence with a historical exposure. on_offence_now( &[OffenceDetails { - offender: ( - 11, - Exposure { - total: 500, - own: 500, - others: vec![], - }, - ), + offender: (11, Exposure { total: 500, own: 500, others: vec![] }), reporters: vec![], }], &[Perbill::from_percent(50)], @@ -2098,17 +2222,14 @@ fn slashing_performed_according_exposure() { #[test] fn slash_in_old_span_does_not_deselect() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert!(>::contains_key(11)); assert!(Session::validators().contains(&11)); on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2117,24 +2238,21 @@ fn slash_in_old_span_does_not_deselect() { assert_eq!(Staking::force_era(), Forcing::ForceNew); assert!(!>::contains_key(11)); - mock::start_era(2); + mock::start_active_era(2); Staking::validate(Origin::signed(10), Default::default()).unwrap(); assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); assert!(!Session::validators().contains(&11)); - mock::start_era(3); + mock::start_active_era(3); // this staker is in a new slashing span now, having re-registered after // their prior slash. on_offence_in_era( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2148,10 +2266,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_in_era( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], }], // NOTE: A 100% slash here would clean up the account, causing de-registration. @@ -2174,14 +2289,11 @@ fn reporters_receive_their_slice() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, initial_balance); + assert_eq!(Staking::eras_stakers(active_era(), 11).total, initial_balance); on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![1, 2], }], &[Perbill::from_percent(50)], @@ -2204,14 +2316,11 @@ fn subsequent_reports_in_same_span_pay_out_less() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, initial_balance); + assert_eq!(Staking::eras_stakers(active_era(), 11).total, initial_balance); on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![1], }], &[Perbill::from_percent(20)], @@ -2224,10 +2333,7 @@ fn subsequent_reports_in_same_span_pay_out_less() { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![1], }], &[Perbill::from_percent(50)], @@ -2249,20 +2355,20 @@ fn invulnerables_are_not_slashed() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(21), 2000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); + let exposure = Staking::eras_stakers(active_era(), 21); let initial_balance = Staking::slashable_balance_of(&21); - let nominator_balances: Vec<_> = exposure.others - .iter().map(|o| Balances::free_balance(&o.who)).collect(); + let nominator_balances: Vec<_> = + exposure.others.iter().map(|o| Balances::free_balance(&o.who)).collect(); on_offence_now( &[ OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], }, OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), + offender: (21, Staking::eras_stakers(active_era(), 21)), reporters: vec![], }, ], @@ -2292,10 +2398,7 @@ fn dont_slash_if_fraction_is_zero() { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2315,12 +2418,10 @@ fn only_slash_for_max_in_era() { assert_eq!(Balances::free_balance(11), 1000); on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![], + }], &[Perbill::from_percent(50)], ); @@ -2329,12 +2430,10 @@ fn only_slash_for_max_in_era() { assert_eq!(Staking::force_era(), Forcing::ForceNew); on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![], + }], &[Perbill::from_percent(25)], ); @@ -2342,12 +2441,10 @@ fn only_slash_for_max_in_era() { assert_eq!(Balances::free_balance(11), 500); on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![], + }], &[Perbill::from_percent(60)], ); @@ -2359,49 +2456,51 @@ fn only_slash_for_max_in_era() { #[test] fn garbage_collection_after_slashing() { // ensures that `SlashingSpans` and `SpanSlash` of an account is removed after reaping. - ExtBuilder::default().existential_deposit(2).build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 256_000); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + ExtBuilder::default() + .existential_deposit(2) + .balance_factor(2) + .build_and_execute(|| { + assert_eq!(Balances::free_balance(11), 2000); + + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - ); + }], + &[Perbill::from_percent(10)], + ); - assert_eq!(Balances::free_balance(11), 256_000 - 25_600); - assert!(::SlashingSpans::get(&11).is_some()); - assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &25_600); + assert_eq!(Balances::free_balance(11), 2000 - 200); + assert!(::SlashingSpans::get(&11).is_some()); + assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &200); - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![], - }, - ], - &[Perbill::from_percent(100)], - ); + }], + &[Perbill::from_percent(100)], + ); - // validator and nominator slash in era are garbage-collected by era change, - // so we don't test those here. + // validator and nominator slash in era are garbage-collected by era change, + // so we don't test those here. - assert_eq!(Balances::free_balance(11), 0); - assert_eq!(Balances::total_balance(&11), 0); + assert_eq!(Balances::free_balance(11), 2); + assert_eq!(Balances::total_balance(&11), 2); - let slashing_spans = ::SlashingSpans::get(&11).unwrap(); - assert_eq!(slashing_spans.iter().count(), 2); + let slashing_spans = ::SlashingSpans::get(&11).unwrap(); + assert_eq!(slashing_spans.iter().count(), 2); - // reap_stash respects num_slashing_spans so that weight is accurate - assert_noop!(Staking::reap_stash(Origin::none(), 11, 0), Error::::IncorrectSlashingSpans); - assert_ok!(Staking::reap_stash(Origin::none(), 11, 2)); + // reap_stash respects num_slashing_spans so that weight is accurate + assert_noop!( + Staking::reap_stash(Origin::none(), 11, 0), + Error::::IncorrectSlashingSpans + ); + assert_ok!(Staking::reap_stash(Origin::none(), 11, 2)); - assert!(::SlashingSpans::get(&11).is_none()); - assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &0); - }) + assert!(::SlashingSpans::get(&11).is_none()); + assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &0); + }) } #[test] @@ -2409,22 +2508,17 @@ fn garbage_collection_on_window_pruning() { // ensures that `ValidatorSlashInEra` and `NominatorSlashInEra` are cleared after // `BondingDuration`. ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Balances::free_balance(11), 1000); - let now = Staking::active_era().unwrap().index; + let now = active_era(); let exposure = Staking::eras_stakers(now, 11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(now, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (11, Staking::eras_stakers(now, 11)), reporters: vec![] }], &[Perbill::from_percent(10)], ); @@ -2439,7 +2533,7 @@ fn garbage_collection_on_window_pruning() { assert!(::ValidatorSlashInEra::get(&now, &11).is_some()); assert!(::NominatorSlashInEra::get(&now, &101).is_some()); - mock::start_era(era); + mock::start_active_era(era); } assert!(::ValidatorSlashInEra::get(&now, &11).is_none()); @@ -2450,27 +2544,25 @@ fn garbage_collection_on_window_pruning() { #[test] fn slashing_nominators_by_span_max() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - mock::start_era(2); - mock::start_era(3); + mock::start_active_era(1); + mock::start_active_era(2); + mock::start_active_era(3); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(21), 2000); assert_eq!(Balances::free_balance(101), 2000); assert_eq!(Staking::slashable_balance_of(&21), 1000); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); + let exposure_11 = Staking::eras_stakers(active_era(), 11); + let exposure_21 = Staking::eras_stakers(active_era(), 21); let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value; let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_in_era( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![], + }], &[Perbill::from_percent(10)], 2, ); @@ -2487,24 +2579,16 @@ fn slashing_nominators_by_span_max() { let get_span = |account| ::SlashingSpans::get(&account).unwrap(); - assert_eq!( - get_span(11).iter().collect::>(), - expected_spans, - ); + assert_eq!(get_span(11).iter().collect::>(), expected_spans); - assert_eq!( - get_span(101).iter().collect::>(), - expected_spans, - ); + assert_eq!(get_span(101).iter().collect::>(), expected_spans); // second slash: higher era, higher value, same span. on_offence_in_era( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (21, Staking::eras_stakers(active_era(), 21)), + reporters: vec![], + }], &[Perbill::from_percent(30)], 3, ); @@ -2522,12 +2606,10 @@ fn slashing_nominators_by_span_max() { // third slash: in same era and on same validator as first, higher // in-era value, but lower slash value than slash 2. on_offence_in_era( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![], + }], &[Perbill::from_percent(20)], 2, ); @@ -2548,9 +2630,9 @@ fn slashing_nominators_by_span_max() { #[test] fn slashes_are_summed_across_spans() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - mock::start_era(2); - mock::start_era(3); + mock::start_active_era(1); + mock::start_active_era(2); + mock::start_active_era(3); assert_eq!(Balances::free_balance(21), 2000); assert_eq!(Staking::slashable_balance_of(&21), 1000); @@ -2558,12 +2640,10 @@ fn slashes_are_summed_across_spans() { let get_span = |account| ::SlashingSpans::get(&account).unwrap(); on_offence_now( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (21, Staking::eras_stakers(active_era(), 21)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); @@ -2578,17 +2658,15 @@ fn slashes_are_summed_across_spans() { // 21 has been force-chilled. re-signal intent to validate. Staking::validate(Origin::signed(20), Default::default()).unwrap(); - mock::start_era(4); + mock::start_active_era(4); assert_eq!(Staking::slashable_balance_of(&21), 900); on_offence_now( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (21, Staking::eras_stakers(active_era(), 21)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); @@ -2606,40 +2684,38 @@ fn slashes_are_summed_across_spans() { #[test] fn deferred_slashes_are_deferred() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + let exposure = Staking::eras_stakers(active_era(), 11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(2); + mock::start_active_era(2); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(3); + mock::start_active_era(3); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. - mock::start_era(4); + mock::start_active_era(4); assert_eq!(Balances::free_balance(11), 900); assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); @@ -2649,36 +2725,26 @@ fn deferred_slashes_are_deferred() { #[test] fn remove_deferred() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + let exposure = Staking::eras_stakers(active_era(), 11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(10)], ); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(2); + mock::start_active_era(2); on_offence_in_era( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(15)], 1, ); @@ -2694,20 +2760,20 @@ fn remove_deferred() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(3); + mock::start_active_era(3); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. - mock::start_era(4); + mock::start_active_era(4); // the first slash for 10% was cancelled, so no effect. assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(5); + mock::start_active_era(5); let slash_10 = Perbill::from_percent(10); let slash_15 = Perbill::from_percent(15); @@ -2725,60 +2791,38 @@ fn remove_deferred() { #[test] fn remove_multi_deferred() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + let exposure = Staking::eras_stakers(active_era(), 11); assert_eq!(Balances::free_balance(101), 2000); on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(10)], ); on_offence_now( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - } - ], + &[OffenceDetails { + offender: (21, Staking::eras_stakers(active_era(), 21)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], ); on_offence_now( - &[ - OffenceDetails { - offender: (42, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (42, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], ); on_offence_now( - &[ - OffenceDetails { - offender: (69, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (69, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], ); @@ -2809,1349 +2853,52 @@ fn remove_multi_deferred() { }) } -mod offchain_election { - use crate::*; - use codec::Encode; - use frame_support::{ - assert_noop, assert_ok, assert_err_with_weight, - dispatch::DispatchResultWithPostInfo, - }; - use sp_runtime::transaction_validity::TransactionSource; - use mock::*; - use parking_lot::RwLock; - use sp_core::offchain::{ - testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, - OffchainExt, TransactionPoolExt, - }; - use sp_io::TestExternalities; - use sp_npos_elections::StakedAssignment; - use frame_support::traits::OffchainWorker; - use std::sync::Arc; - use substrate_test_utils::assert_eq_uvec; - - fn percent(x: u16) -> OffchainAccuracy { - OffchainAccuracy::from_percent(x) - } - - /// setup a new set of validators and nominator storage items independent of the parent mock - /// file. This produces a edge graph that can be reduced. - pub fn build_offchain_election_test_ext() { - for i in (10..=40).step_by(10) { - // Note: we respect the convention of the mock (10, 11 pairs etc.) since these accounts - // have corresponding keys in session which makes everything more ergonomic and - // realistic. - bond_validator(i + 1, i, 100); - } +#[test] +fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { + ExtBuilder::default().build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21]); - let mut voter = 1; - bond_nominator(voter, 1000 + voter, 100, vec![11]); - voter = 2; - bond_nominator(voter, 1000 + voter, 100, vec![11, 11]); - voter = 3; - bond_nominator(voter, 1000 + voter, 100, vec![21, 41]); - voter = 4; - bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); - voter = 5; - bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); - } + // pre-slash balance + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - /// convert an externalities to one that can handle offchain worker tests. - fn offchainify(ext: &mut TestExternalities, iterations: u32) -> Arc> { - let (offchain, offchain_state) = TestOffchainExt::new(); - let (pool, pool_state) = TestTransactionPoolExt::new(); + // 11 and 21 both have the support of 100 + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); - let mut seed = [0_u8; 32]; - seed[0..4].copy_from_slice(&iterations.to_le_bytes()); - offchain_state.write().seed = seed; + assert_eq!(exposure_11.total, 1000 + 125); + assert_eq!(exposure_21.total, 1000 + 375); - ext.register_extension(OffchainExt::new(offchain)); - ext.register_extension(TransactionPoolExt::new(pool)); + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); - pool_state - } + // post-slash balance + let nominator_slash_amount_11 = 125 / 10; + assert_eq!(Balances::free_balance(11), 900); + assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); - fn election_size() -> ElectionSize { - ElectionSize { - validators: Staking::snapshot_validators().unwrap().len() as ValidatorIndex, - nominators: Staking::snapshot_nominators().unwrap().len() as NominatorIndex, + // This is the best way to check that the validator was chilled; `get` will + // return default value. + for (stash, _) in ::Validators::iter() { + assert!(stash != 11); } - } - fn submit_solution( - origin: Origin, - winners: Vec, - compact: CompactAssignments, - score: ElectionScore, - ) -> DispatchResultWithPostInfo { - Staking::submit_election_solution( - origin, - winners, - compact, - score, - current_era(), - election_size(), - ) - } + let nominations = ::Nominators::get(&101).unwrap(); - #[test] - fn is_current_session_final_works() { - ExtBuilder::default() - .session_per_era(3) - .build() - .execute_with(|| { - mock::start_era(1); - assert_eq!(Session::current_index(), 3); - assert_eq!(Staking::current_era(), Some(1)); - assert_eq!(Staking::is_current_session_final(), false); - - start_session(4); - assert_eq!(Session::current_index(), 4); - assert_eq!(Staking::current_era(), Some(1)); - assert_eq!(Staking::is_current_session_final(), true); - - start_session(5); - assert_eq!(Session::current_index(), 5); - // era changed. - assert_eq!(Staking::current_era(), Some(2)); - assert_eq!(Staking::is_current_session_final(), false); - }) - } + // and make sure that the vote will be ignored even if the validator + // re-registers. + let last_slash = ::SlashingSpans::get(&11).unwrap().last_nonzero_slash(); + assert!(nominations.submitted_in < last_slash); - #[test] - fn offchain_window_is_triggered() { - ExtBuilder::default() - .session_per_era(5) - .session_length(10) - .election_lookahead(3) - .build() - .execute_with(|| { - run_to_block(7); - assert_session_era!(0, 0); - - run_to_block(10); - assert_session_era!(1, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - - run_to_block(36); - assert_session_era!(3, 0); - - // fist era has session 0, which has 0 blocks length, so we have in total 40 blocks - // in the era. - run_to_block(37); - assert_session_era!(3, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - assert!(Staking::snapshot_nominators().is_some()); - assert!(Staking::snapshot_validators().is_some()); - - run_to_block(38); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - - run_to_block(39); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - - run_to_block(40); - assert_session_era!(4, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - - run_to_block(86); - assert_session_era!(8, 1); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - - // second era onwards has 50 blocks per era. - run_to_block(87); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(87)); - assert!(Staking::snapshot_nominators().is_some()); - assert!(Staking::snapshot_validators().is_some()); - - run_to_block(90); - assert_session_era!(9, 1); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - }) - } + // actually re-bond the slashed validator + assert_ok!(Staking::validate(Origin::signed(10), Default::default())); - #[test] - fn offchain_window_is_triggered_when_forcing() { - ExtBuilder::default() - .session_per_era(5) - .session_length(10) - .election_lookahead(3) - .build() - .execute_with(|| { - run_to_block(12); - ForceEra::put(Forcing::ForceNew); - run_to_block(13); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - run_to_block(17); // instead of 47 - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(17)); - - run_to_block(20); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - }) - } - - #[test] - fn offchain_window_is_triggered_when_force_always() { - ExtBuilder::default() - .session_per_era(5) - .session_length(10) - .election_lookahead(3) - .build() - .execute_with(|| { - - ForceEra::put(Forcing::ForceAlways); - run_to_block(16); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - run_to_block(17); // instead of 37 - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(17)); - - run_to_block(20); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - run_to_block(26); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - run_to_block(27); // next one again - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(27)); - }) - } - - #[test] - fn offchain_window_closes_when_forcenone() { - ExtBuilder::default() - .session_per_era(5) - .session_length(10) - .election_lookahead(3) - .build() - .execute_with(|| { - ForceEra::put(Forcing::ForceNone); - - run_to_block(36); - assert_session_era!(3, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - // opens - run_to_block(37); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - assert!(Staking::is_current_session_final()); - assert!(Staking::snapshot_validators().is_some()); - - // closes normally - run_to_block(40); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(!Staking::is_current_session_final()); - assert!(Staking::snapshot_validators().is_none()); - assert_session_era!(4, 0); - - run_to_block(47); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert_session_era!(4, 0); - - run_to_block(57); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert_session_era!(5, 0); - - run_to_block(67); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - // Will not open again as scheduled - run_to_block(87); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert_session_era!(8, 0); - - run_to_block(90); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert_session_era!(9, 0); - }) - } - - #[test] - fn offchain_window_on_chain_fallback_works() { - ExtBuilder::default().build_and_execute(|| { - start_session(1); - start_session(2); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - // some election must have happened by now. - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let MetaEvent::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::StakingElection(ElectionCompute::OnChain), - ); - }) - } - - #[test] - #[ignore] - fn offchain_wont_work_if_snapshot_fails() { - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(12); - assert!(Staking::snapshot_validators().is_some()); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - // validate more than the limit - let limit: NominatorIndex = ValidatorIndex::max_value() as NominatorIndex + 1; - let ctrl = 1_000_000; - for i in 0..limit { - bond_validator((1000 + i).into(), (1000 + i + ctrl).into(), 100); - } - - // window stays closed since no snapshot was taken. - run_to_block(27); - assert!(Staking::snapshot_validators().is_none()); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - }) - } - - #[test] - fn staking_is_locked_when_election_window_open() { - ExtBuilder::default() - .offchain_election_ext() - .election_lookahead(3) - .build() - .execute_with(|| { - run_to_block(12); - assert!(Staking::snapshot_validators().is_some()); - // given - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - // chill et. al. are now not allowed. - assert_noop!( - Staking::chill(Origin::signed(10)), - Error::::CallNotAllowed, - ); - }) - } - - #[test] - fn signed_result_can_be_submitted() { - // should check that we have a new validator set normally, event says that it comes from - // offchain. - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(12); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - assert!(Staking::snapshot_validators().is_some()); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - - let queued_result = Staking::queued_elected().unwrap(); - assert_eq!(queued_result.compute, ElectionCompute::Signed); - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let MetaEvent::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::SolutionStored(ElectionCompute::Signed), - ); - - run_to_block(15); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let MetaEvent::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::StakingElection(ElectionCompute::Signed), - ); - }) - } - - #[test] - fn signed_result_can_be_submitted_later() { - // same as `signed_result_can_be_submitted` but at a later block. - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(14); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution(Origin::signed(10), winners, compact, score)); - - let queued_result = Staking::queued_elected().unwrap(); - assert_eq!(queued_result.compute, ElectionCompute::Signed); - - run_to_block(15); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let MetaEvent::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::StakingElection(ElectionCompute::Signed), - ); - }) - } - - #[test] - fn early_solution_submission_is_rejected() { - // should check that we have a new validator set normally, event says that it comes from - // offchain. - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(11); - // submission is not yet allowed - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - // create all the indices just to build the solution. - Staking::create_stakers_snapshot(); - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - Staking::kill_stakers_snapshot(); - - assert_err_with_weight!( - Staking::submit_election_solution( - Origin::signed(10), - winners.clone(), - compact.clone(), - score, - current_era(), - ElectionSize::default(), - ), - Error::::OffchainElectionEarlySubmission, - Some(::DbWeight::get().reads(1)), - ); - }) - } - - #[test] - fn weak_solution_is_rejected() { - // A solution which is weaker than what we currently have on-chain is rejected. - ExtBuilder::default() - .offchain_election_ext() - .has_stakers(false) - .validator_count(4) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - // a good solution - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - - // a bad solution - let (compact, winners, score) = horrible_npos_solution(false); - assert_err_with_weight!( - submit_solution( - Origin::signed(10), - winners.clone(), - compact.clone(), - score, - ), - Error::::OffchainElectionWeakSubmission, - Some(::DbWeight::get().reads(3)) - ); - }) - } - - #[test] - fn better_solution_is_accepted() { - // A solution which is better than what we currently have on-chain is accepted. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - // a meeeeh solution - let (compact, winners, score) = horrible_npos_solution(false); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - - // a better solution - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - }) - } - - #[test] - fn offchain_worker_runs_when_window_open() { - // at the end of the first finalized block with ElectionStatus::open(_), it should execute. - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) - .build(); - let state = offchainify(&mut ext, 0); - ext.execute_with(|| { - run_to_block(12); - - // local key 11 is in the elected set. - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(state.read().transactions.len(), 0); - Staking::offchain_worker(12); - assert_eq!(state.read().transactions.len(), 1); - - let encoded = state.read().transactions[0].clone(); - let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); - - let call = extrinsic.call; - let inner = match call { - mock::Call::Staking(inner) => inner, - }; - - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &inner, - ), - TransactionValidity::Ok(ValidTransaction { - priority: UnsignedPriority::get() + 1125, // the proposed slot stake. - requires: vec![], - provides: vec![("StakingOffchain", current_era()).encode()], - longevity: 3, - propagate: false, - }) - ) - }) - } - - #[test] - fn offchain_worker_runs_with_balancing() { - // Offchain worker balances based on the number provided by randomness. See the difference - // in the priority, which comes from the computed score. - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) - .max_offchain_iterations(2) - .build(); - let state = offchainify(&mut ext, 2); - ext.execute_with(|| { - run_to_block(12); - - // local key 11 is in the elected set. - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(state.read().transactions.len(), 0); - Staking::offchain_worker(12); - assert_eq!(state.read().transactions.len(), 1); - - let encoded = state.read().transactions[0].clone(); - let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); - - let call = extrinsic.call; - let inner = match call { - mock::Call::Staking(inner) => inner, - }; - - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &inner, - ), - TransactionValidity::Ok(ValidTransaction { - // the proposed slot stake, with balance_solution. - priority: UnsignedPriority::get() + 1250, - requires: vec![], - provides: vec![("StakingOffchain", active_era()).encode()], - longevity: 3, - propagate: false, - }) - ) - }) - } - - #[test] - fn mediocre_submission_from_authority_is_early_rejected() { - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .build(); - let state = offchainify(&mut ext, 0); - ext.execute_with(|| { - run_to_block(12); - // put a good solution on-chain - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - ),); - - // now run the offchain worker in the same chain state. - Staking::offchain_worker(12); - assert_eq!(state.read().transactions.len(), 1); - - let encoded = state.read().transactions[0].clone(); - let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); - - let call = extrinsic.call; - let inner = match call { - mock::Call::Staking(inner) => inner, - }; - - // pass this call to ValidateUnsigned - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &inner, - ), - TransactionValidity::Err( - InvalidTransaction::Custom(>::OffchainElectionWeakSubmission.as_u8()).into(), - ), - ) - }) - } - - #[test] - fn invalid_election_correct_number_of_winners() { - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - ValidatorCount::put(3); - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - ValidatorCount::put(4); - - assert_eq!(winners.len(), 3); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusWinnerCount, - ); - }) - } - - #[test] - fn invalid_election_solution_size() { - ExtBuilder::default() - .offchain_election_ext() - .build() - .execute_with(|| { - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ElectionSize::default(), - ), - Error::::OffchainElectionBogusElectionSize, - ); - }) - } - - #[test] - fn invalid_election_correct_number_of_winners_1() { - // if we have too little validators, then the number of candidates is the bound. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(8) // we simply cannot elect 8 - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - ValidatorCount::put(3); - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - ValidatorCount::put(4); - - assert_eq!(winners.len(), 3); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusWinnerCount, - ); - }) - } - - #[test] - fn invalid_election_correct_number_of_winners_2() { - // if we have too little validators, then the number of candidates is the bound. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(8) // we simply cannot elect 8 - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - - assert_eq!(winners.len(), 4); - - // all good. We chose 4 and it works. - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - ),); - }) - } - - #[test] - fn invalid_election_out_of_bound_nominator_index() { - // A nominator index which is simply invalid - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (mut compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - - // index 9 doesn't exist. - compact.votes1.push((9, 2)); - - // The error type sadly cannot be more specific now. - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusCompact, - ); - }) - } - - #[test] - fn invalid_election_out_of_bound_validator_index() { - // A validator index which is out of bound - ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (mut compact, winners, score) = prepare_submission_with(true, true, 2, |_| {}); - - // index 4 doesn't exist. - compact.votes1.iter_mut().for_each(|(_, vidx)| if *vidx == 1 { *vidx = 4 }); - - // The error type sadly cannot be more specific now. - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusCompact, - ); - }) - } - - #[test] - fn invalid_election_out_of_bound_winner_index() { - // A winner index which is simply invalid - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (compact, _, score) = prepare_submission_with(true, true, 2, |_| {}); - - // index 4 doesn't exist. - let winners = vec![0, 1, 2, 4]; - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusWinner, - ); - }) - } - - #[test] - fn invalid_election_non_winner_validator_index() { - // An edge that points to a correct validator index who is NOT a winner. This is very - // similar to the test that raises `OffchainElectionBogusNomination`. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) // we select only 2. - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (compact, winners, score) = prepare_submission_with(false, true, 2, |a| { - // swap all 11 and 41s in the distribution with non-winners. Note that it is - // important that the count of winners and the count of unique targets remain - // valid. - a.iter_mut().for_each(| StakedAssignment { who, distribution } | - distribution.iter_mut().for_each(|(t, _)| { - if *t == 41 { *t = 31 } else { *t = 21 } - // if it is self vote, correct that. - if *who == 41 { *who = 31 } - if *who == 11 { *who = 21 } - }) - ); - }); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusNomination, - ); - }) - } - - #[test] - fn offchain_election_unique_target_count_is_checked() { - // Number of unique targets and and winners.len must match. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(2) // we select only 2. - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - - let (compact, winners, score) = prepare_submission_with(false, true, 2, |a| { - a.iter_mut() - .find(|x| x.who == 5) - // just add any new target. - .map(|x| { - // old value. - assert_eq!(x.distribution, vec![(41, 100)]); - // new value. - x.distribution = vec![(21, 50), (41, 50)] - }); - }); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusWinnerCount, - ); - }) - } - - #[test] - fn invalid_election_wrong_self_vote() { - // A self vote for someone else. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |a| { - // mutate a self vote to target someone else. That someone else is still among the - // winners - a.iter_mut().find(|x| x.who == 11).map(|x| { - x.distribution - .iter_mut() - .find(|y| y.0 == 11) - .map(|y| y.0 = 21) - }); - }); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusSelfVote, - ); - }) - } - - #[test] - fn invalid_election_wrong_self_vote_2() { - // A self validator voting for someone else next to self vote. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, true, 2, |a| { - // Remove the self vote. - a.retain(|x| x.who != 11); - // add is as a new double vote - a.push(StakedAssignment { - who: 11, - distribution: vec![(11, 50), (21, 50)], - }); - }); - - // This raises score issue. - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusSelfVote, - ); - }) - } - - #[test] - fn invalid_election_over_stake() { - // Someone's edge ratios sums to more than 100%. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - // Note: we don't reduce here to be able to tweak votes3. votes3 will vanish if you - // reduce. - let (mut compact, winners, score) = prepare_submission_with(true, false, 0, |_| {}); - - if let Some(c) = compact.votes3.iter_mut().find(|x| x.0 == 0) { - // by default it should have been (0, [(2, 33%), (1, 33%)], 0) - // now the sum is above 100% - c.1 = [(2, percent(66)), (1, percent(66))]; - } - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusCompact, - ); - }) - } - - #[test] - fn invalid_election_under_stake() { - // at the time of this writing, we cannot under stake someone. The compact assignment works - // in a way that some of the stakes are presented by the submitter, and the last one is read - // from chain by subtracting the rest from total. Hence, the sum is always correct. - // This test is only here as a demonstration. - } - - #[test] - fn invalid_election_invalid_target_stealing() { - // A valid voter who voted for someone who is a candidate, and is a correct winner, but is - // actually NOT nominated by this nominator. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, false, 0, |a| { - // 3 only voted for 20 and 40. We add a fake vote to 30. The stake sum is still - // correctly 100. - a.iter_mut() - .find(|x| x.who == 3) - .map(|x| x.distribution = vec![(21, 50), (41, 30), (31, 20)]); - }); - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusNomination, - ); - }) - } - - #[test] - fn nomination_slash_filter_is_checked() { - // If a nominator has voted for someone who has been recently slashed, that particular - // nomination should be disabled for the upcoming election. A solution must respect this - // rule. - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - - // finalize the round with fallback. This is needed since all nominator submission - // are in era zero and we want this one to pass with no problems. - run_to_block(15); - - // go to the next session to trigger mock::start_era and bump the active era - run_to_block(20); - - // slash 10. This must happen outside of the election window. - let offender_expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - on_offence_now( - &[OffenceDetails { - offender: (11, offender_expo.clone()), - reporters: vec![], - }], - &[Perbill::from_percent(50)], - ); - - // validate 10 again for the next round. But this guy will not have the votes that - // it should have had from 1 and 2. - assert_ok!(Staking::validate( - Origin::signed(10), - Default::default() - )); - - // open the election window and create snapshots. - run_to_block(32); - - // a solution that has been prepared after the slash. - let (compact, winners, score) = prepare_submission_with(true, false, 0, |a| { - // no one is allowed to vote for 10, except for itself. - a.into_iter() - .filter(|s| s.who != 11) - .for_each(|s| - assert!(s.distribution.iter().find(|(t, _)| *t == 11).is_none()) - ); - }); - - // can be submitted. - assert_ok!(submit_solution( - Origin::signed(10), - winners, - compact, - score, - )); - - // a wrong solution. - let (compact, winners, score) = prepare_submission_with(true, false, 0, |a| { - // add back the vote that has been filtered out. - a.push(StakedAssignment { - who: 1, - distribution: vec![(11, 100)] - }); - }); - - // is rejected. - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionSlashedNomination, - ); - }) - } - - #[test] - fn invalid_election_wrong_score() { - // A valid voter who's total distributed stake is more than what they bond - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_election_test_ext(); - run_to_block(12); - - let (compact, winners, mut score) = prepare_submission_with(true, true, 2, |_| {}); - score[0] += 1; - - assert_noop!( - submit_solution( - Origin::signed(10), - winners, - compact, - score, - ), - Error::::OffchainElectionBogusScore, - ); - }) - } - - #[test] - fn offchain_storage_is_set() { - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .build(); - let state = offchainify(&mut ext, 0); - - ext.execute_with(|| { - use offchain_election::OFFCHAIN_HEAD_DB; - use sp_runtime::offchain::storage::StorageValueRef; - - run_to_block(12); - - Staking::offchain_worker(12); - // it works - assert_eq!(state.read().transactions.len(), 1); - - // and it is set - let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); - assert_eq!(storage.get::().unwrap().unwrap(), 12); - }) - } - - #[test] - fn offchain_storage_prevents_duplicate() { - let mut ext = ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .build(); - let _ = offchainify(&mut ext, 0); - - ext.execute_with(|| { - use offchain_election::OFFCHAIN_HEAD_DB; - use sp_runtime::offchain::storage::StorageValueRef; - let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); - - run_to_block(12); - - // first run -- ok - assert_eq!( - offchain_election::set_check_offchain_execution_status::(12), - Ok(()), - ); - assert_eq!(storage.get::().unwrap().unwrap(), 12); - - // re-execute after the next. not allowed. - assert_eq!( - offchain_election::set_check_offchain_execution_status::(13), - Err("recently executed."), - ); - - // a fork like situation -- re-execute 10, 11, 12. But it won't go through. - assert_eq!( - offchain_election::set_check_offchain_execution_status::(10), - Err("fork."), - ); - assert_eq!( - offchain_election::set_check_offchain_execution_status::(11), - Err("fork."), - ); - assert_eq!( - offchain_election::set_check_offchain_execution_status::(12), - Err("recently executed."), - ); - }) - } - - #[test] - #[should_panic] - fn offence_is_blocked_when_window_open() { - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - run_to_block(12); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - let offender_expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 10); - - // panic from the impl in mock - on_offence_now( - &[OffenceDetails { - offender: (10, offender_expo.clone()), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); - }) - } -} - -#[test] -fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - - // pre-slash balance - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - - // 11 and 21 both have the support of 100 - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - - assert_eq!(exposure_11.total, 1000 + 125); - assert_eq!(exposure_21.total, 1000 + 375); - - on_offence_now( - &[OffenceDetails { - offender: (11, exposure_11.clone()), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); - - // post-slash balance - let nominator_slash_amount_11 = 125 / 10; - assert_eq!(Balances::free_balance(11), 900); - assert_eq!( - Balances::free_balance(101), - 2000 - nominator_slash_amount_11 - ); - - // This is the best way to check that the validator was chilled; `get` will - // return default value. - for (stash, _) in ::Validators::iter() { - assert!(stash != 11); - } - - let nominations = ::Nominators::get(&101).unwrap(); - - // and make sure that the vote will be ignored even if the validator - // re-registers. - let last_slash = ::SlashingSpans::get(&11) - .unwrap() - .last_nonzero_slash(); - assert!(nominations.submitted_in < last_slash); - - // actually re-bond the slashed validator - assert_ok!(Staking::validate(Origin::signed(10), Default::default())); - - mock::start_era(2); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + mock::start_active_era(2); + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); // 10 is re-elected, but without the support of 100 assert_eq!(exposure_11.total, 900); @@ -4168,45 +2915,45 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // * an invalid era to claim doesn't update last_reward // * double claim of one era fails ExtBuilder::default().nominate(true).build_and_execute(|| { + // Consumed weight for all payout_stakers dispatches that fail + let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); + let init_balance_10 = Balances::total_balance(&10); let init_balance_100 = Balances::total_balance(&100); - let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); - let part_for_100 = Perbill::from_rational_approximation::(125, 1125); + let part_for_10 = Perbill::from_rational::(1000, 1125); + let part_for_100 = Perbill::from_rational::(125, 1125); // Check state Payee::::insert(11, RewardDestination::Controller); Payee::::insert(101, RewardDestination::Controller); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something + let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); - mock::start_era(1); + mock::start_active_era(1); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); // Change total issuance in order to modify total payout let _ = Balances::deposit_creating(&999, 1_000_000_000); // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 10); // Test is meaningful if reward something + let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); assert!(total_payout_1 != total_payout_0); - mock::start_era(2); + mock::start_active_era(2); - >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(11, 1)]); // Change total issuance in order to modify total payout let _ = Balances::deposit_creating(&999, 1_000_000_000); // Compute total payout now for whole duration as other parameter won't change - let total_payout_2 = current_total_payout_for_duration(3000); - assert!(total_payout_2 > 10); // Test is meaningful if reward something + let total_payout_2 = current_total_payout_for_duration(reward_time_per_era()); assert!(total_payout_2 != total_payout_0); assert!(total_payout_2 != total_payout_1); - mock::start_era(Staking::history_depth() + 1); + mock::start_active_era(Staking::history_depth() + 1); - let active_era = Staking::active_era().unwrap().index; + let active_era = active_era(); // This is the latest planned era in staking, not the active era let current_era = Staking::current_era().unwrap(); @@ -4216,19 +2963,19 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { assert_noop!( Staking::payout_stakers(Origin::signed(1337), 11, 0), // Fail: Era out of history - Error::::InvalidEraToReward + Error::::InvalidEraToReward.with_weight(err_weight) ); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 2)); assert_noop!( Staking::payout_stakers(Origin::signed(1337), 11, 2), // Fail: Double claim - Error::::AlreadyClaimed + Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( Staking::payout_stakers(Origin::signed(1337), 11, active_era), // Fail: Era not finished yet - Error::::InvalidEraToReward + Error::::InvalidEraToReward.with_weight(err_weight) ); // Era 0 can't be rewarded anymore and current era can't be rewarded yet @@ -4248,20 +2995,15 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { #[test] fn zero_slash_keeps_nominators() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); + mock::start_active_era(1); assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + let exposure = Staking::eras_stakers(active_era(), 11); assert_eq!(Balances::free_balance(101), 2000); on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(0)], ); @@ -4285,25 +3027,33 @@ fn zero_slash_keeps_nominators() { #[test] fn six_session_delay() { - ExtBuilder::default().build_and_execute(|| { + ExtBuilder::default().initialize_first_session(false).build_and_execute(|| { use pallet_session::SessionManager; let val_set = Session::validators(); let init_session = Session::current_index(); - let init_active_era = Staking::active_era().unwrap().index; + let init_active_era = active_era(); + // pallet-session is delaying session by one, thus the next session to plan is +2. assert_eq!(>::new_session(init_session + 2), None); - assert_eq!(>::new_session(init_session + 3), Some(val_set.clone())); + assert_eq!( + >::new_session(init_session + 3), + Some(val_set.clone()) + ); assert_eq!(>::new_session(init_session + 4), None); assert_eq!(>::new_session(init_session + 5), None); - assert_eq!(>::new_session(init_session + 6), Some(val_set.clone())); + assert_eq!( + >::new_session(init_session + 6), + Some(val_set.clone()) + ); >::end_session(init_session); >::start_session(init_session + 1); - assert_eq!(Staking::active_era().unwrap().index, init_active_era); + assert_eq!(active_era(), init_active_era); + >::end_session(init_session + 1); >::start_session(init_session + 2); - assert_eq!(Staking::active_era().unwrap().index, init_active_era); + assert_eq!(active_era(), init_active_era); // Reward current era Staking::reward_by_ids(vec![(11, 1)]); @@ -4311,13 +3061,15 @@ fn six_session_delay() { // New active era is triggered here. >::end_session(init_session + 2); >::start_session(init_session + 3); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + assert_eq!(active_era(), init_active_era + 1); + >::end_session(init_session + 3); >::start_session(init_session + 4); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + assert_eq!(active_era(), init_active_era + 1); + >::end_session(init_session + 4); >::start_session(init_session + 5); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + assert_eq!(active_era(), init_active_era + 1); // Reward current era Staking::reward_by_ids(vec![(21, 2)]); @@ -4325,7 +3077,7 @@ fn six_session_delay() { // New active era is triggered here. >::end_session(init_session + 5); >::start_session(init_session + 6); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 2); + assert_eq!(active_era(), init_active_era + 2); // That reward are correct assert_eq!(Staking::eras_reward_points(init_active_era).total, 1); @@ -4335,38 +3087,31 @@ fn six_session_delay() { #[test] fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward() { - // Test: - // * If nominator nomination is below the $MaxNominatorRewardedPerValidator other nominator - // then the nominator can't claim its reward - // * A nominator can't claim another nominator reward ExtBuilder::default().build_and_execute(|| { - for i in 0..=::MaxNominatorRewardedPerValidator::get() { + for i in 0..=::MaxNominatorRewardedPerValidator::get() { let stash = 10_000 + i as AccountId; let controller = 20_000 + i as AccountId; let balance = 10_000 + i as Balance; Balances::make_free_balance_be(&stash, balance); - assert_ok!( - Staking::bond( - Origin::signed(stash), - controller, - balance, - RewardDestination::Stash - ) - ); + assert_ok!(Staking::bond( + Origin::signed(stash), + controller, + balance, + RewardDestination::Stash + )); assert_ok!(Staking::nominate(Origin::signed(controller), vec![11])); } - mock::start_era(1); + mock::start_active_era(1); - >::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something + >::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); - mock::start_era(2); + mock::start_active_era(2); mock::make_all_reward_payment(1); // Assert only nominators from 1 to Max are rewarded - for i in 0..=::MaxNominatorRewardedPerValidator::get() { + for i in 0..=::MaxNominatorRewardedPerValidator::get() { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; if stash == 10_000 { @@ -4381,7 +3126,7 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( #[test] fn set_history_depth_works() { ExtBuilder::default().build_and_execute(|| { - mock::start_era(10); + mock::start_active_era(10); Staking::set_history_depth(Origin::root(), 20, 0).unwrap(); assert!(::ErasTotalStake::contains_key(10 - 4)); assert!(::ErasTotalStake::contains_key(10 - 5)); @@ -4411,12 +3156,13 @@ fn test_payout_stakers() { bond_nominator(1000 + i, 100 + i, balance + i as Balance, vec![11]); } - mock::start_era(1); + mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); + + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + + mock::start_active_era(2); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); // Top 64 nominators of validator 11 automatically paid out, including the validator @@ -4433,30 +3179,42 @@ fn test_payout_stakers() { // We track rewards in `claimed_rewards` vec assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![1] }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![1] + }) ); for i in 3..16 { Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(i); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, i - 1)); + + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + + mock::start_active_era(i); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, i - 1)); } // We track rewards in `claimed_rewards` vec assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: (1..=14).collect() }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: (1..=14).collect() + }) ); for i in 16..100 { Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(i); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(i); } // We clean it up as history passes @@ -4464,7 +3222,13 @@ fn test_payout_stakers() { assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![15, 98] }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![15, 98] + }) ); // Out of order claims works. @@ -4473,7 +3237,13 @@ fn test_payout_stakers() { assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 42)); assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![15, 23, 42, 69, 98] }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![15, 23, 42, 69, 98] + }) ); }); } @@ -4482,6 +3252,9 @@ fn test_payout_stakers() { fn payout_stakers_handles_basic_errors() { // Here we will test payouts handle all errors. ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // Consumed weight for all payout_stakers dispatches that fail + let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); + // Same setup as the test above let balance = 1000; bond_validator(11, 10, balance); // Default(64) @@ -4491,35 +3264,164 @@ fn payout_stakers_handles_basic_errors() { bond_nominator(1000 + i, 100 + i, balance + i as Balance, vec![11]); } - mock::start_era(1); + mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); + + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + + mock::start_active_era(2); // Wrong Era, too big - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 2), Error::::InvalidEraToReward); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 2), + Error::::InvalidEraToReward.with_weight(err_weight) + ); // Wrong Staker - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 10, 1), Error::::NotStash); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 10, 1), + Error::::NotStash.with_weight(err_weight) + ); for i in 3..100 { Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(i); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(i); } // We are at era 99, with history depth of 84 // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 14), Error::::InvalidEraToReward); - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 99), Error::::InvalidEraToReward); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 14), + Error::::InvalidEraToReward.with_weight(err_weight) + ); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 99), + Error::::InvalidEraToReward.with_weight(err_weight) + ); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); // Can't claim again - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 15), Error::::AlreadyClaimed); - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 98), Error::::AlreadyClaimed); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 15), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 98), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + }); +} + +#[test] +fn payout_stakers_handles_weight_refund() { + // Note: this test relies on the assumption that `payout_stakers_alive_staked` is solely used by + // `payout_stakers` to calculate the weight of each payout op. + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let max_nom_rewarded = ::MaxNominatorRewardedPerValidator::get(); + // Make sure the configured value is meaningful for our use. + assert!(max_nom_rewarded >= 4); + let half_max_nom_rewarded = max_nom_rewarded / 2; + // Sanity check our max and half max nominator quantities. + assert!(half_max_nom_rewarded > 0); + assert!(max_nom_rewarded > half_max_nom_rewarded); + + let max_nom_rewarded_weight = + ::WeightInfo::payout_stakers_alive_staked(max_nom_rewarded); + let half_max_nom_rewarded_weight = + ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); + let zero_nom_payouts_weight = ::WeightInfo::payout_stakers_alive_staked(0); + assert!(zero_nom_payouts_weight > 0); + assert!(half_max_nom_rewarded_weight > zero_nom_payouts_weight); + assert!(max_nom_rewarded_weight > half_max_nom_rewarded_weight); + + let balance = 1000; + bond_validator(11, 10, balance); + + // Era 1 + start_active_era(1); + + // Reward just the validator. + Staking::reward_by_ids(vec![(11, 1)]); + + // Add some `half_max_nom_rewarded` nominators who will start backing the validator in the + // next era. + for i in 0..half_max_nom_rewarded { + bond_nominator((1000 + i).into(), (100 + i).into(), balance + i as Balance, vec![11]); + } + + // Era 2 + start_active_era(2); + + // Collect payouts when there are no nominators + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 1 }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); + + // The validator is not rewarded in this era; so there will be zero payouts to claim for + // this era. + + // Era 3 + start_active_era(3); + + // Collect payouts for an era where the validator did not receive any points. + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 2 }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); + + // Reward the validator and its nominators. + Staking::reward_by_ids(vec![(11, 1)]); + + // Era 4 + start_active_era(4); + + // Collect payouts when the validator has `half_max_nom_rewarded` nominators. + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 3 }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), half_max_nom_rewarded_weight); + + // Add enough nominators so that we are at the limit. They will be active nominators + // in the next era. + for i in half_max_nom_rewarded..max_nom_rewarded { + bond_nominator((1000 + i).into(), (100 + i).into(), balance + i as Balance, vec![11]); + } + + // Era 5 + start_active_era(5); + // We now have `max_nom_rewarded` nominators actively nominating our validator. + + // Reward the validator so we can collect for everyone in the next era. + Staking::reward_by_ids(vec![(11, 1)]); + + // Era 6 + start_active_era(6); + + // Collect payouts when the validator had `half_max_nom_rewarded` nominators. + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), max_nom_rewarded_weight); + + // Try and collect payouts for an era that has already been collected. + let call = + TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert!(result.is_err()); + // When there is an error the consumed weight == weight when there are 0 nominator payouts. + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); }); } @@ -4538,7 +3440,7 @@ fn bond_during_era_correctly_populates_claimed_rewards() { claimed_rewards: vec![], }) ); - mock::start_era(5); + mock::start_active_era(5); bond_validator(11, 10, 1000); assert_eq!( Staking::ledger(&10), @@ -4550,7 +3452,7 @@ fn bond_during_era_correctly_populates_claimed_rewards() { claimed_rewards: (0..5).collect(), }) ); - mock::start_era(99); + mock::start_active_era(99); bond_validator(13, 12, 1000); assert_eq!( Staking::ledger(&12), @@ -4569,75 +3471,42 @@ fn bond_during_era_correctly_populates_claimed_rewards() { fn offences_weight_calculated_correctly() { ExtBuilder::default().nominate(true).build_and_execute(|| { // On offence with zero offenders: 4 Reads, 1 Write - let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); - assert_eq!(Staking::on_offence(&[], &[Perbill::from_percent(50)], 0), Ok(zero_offence_weight)); + let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); + assert_eq!(Staking::on_offence(&[], &[Perbill::from_percent(50)], 0), zero_offence_weight); // On Offence with N offenders, Unapplied: 4 Reads, 1 Write + 4 Reads, 5 Writes - let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(4, 5); + let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) + + ::DbWeight::get().reads_writes(4, 5); - let offenders: Vec::AccountId, pallet_session::historical::IdentificationTuple>> + let offenders: Vec::AccountId, pallet_session::historical::IdentificationTuple>> = (1..10).map(|i| OffenceDetails { - offender: (i, Staking::eras_stakers(Staking::active_era().unwrap().index, i)), + offender: (i, Staking::eras_stakers(active_era(), i)), reporters: vec![], } ).collect(); - assert_eq!(Staking::on_offence(&offenders, &[Perbill::from_percent(50)], 0), Ok(n_offence_unapplied_weight)); + assert_eq!(Staking::on_offence(&offenders, &[Perbill::from_percent(50)], 0), n_offence_unapplied_weight); // On Offence with one offenders, Applied let one_offender = [ OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + offender: (11, Staking::eras_stakers(active_era(), 11)), reporters: vec![1], }, ]; let n = 1; // Number of offenders let rw = 3 + 3 * n; // rw reads and writes - let one_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(rw, rw) + let one_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) + + ::DbWeight::get().reads_writes(rw, rw) // One `slash_cost` - + ::DbWeight::get().reads_writes(6, 5) + + ::DbWeight::get().reads_writes(6, 5) // `slash_cost` * nominators (1) - + ::DbWeight::get().reads_writes(6, 5) + + ::DbWeight::get().reads_writes(6, 5) // `reward_cost` * reporters (1) - + ::DbWeight::get().reads_writes(2, 2); + + ::DbWeight::get().reads_writes(2, 2); - assert_eq!(Staking::on_offence(&one_offender, &[Perbill::from_percent(50)], 0), Ok(one_offence_unapplied_weight)); - }); -} - -#[test] -fn on_initialize_weight_is_correct() { - ExtBuilder::default().has_stakers(false).build_and_execute(|| { - assert_eq!(Validators::::iter().count(), 0); - assert_eq!(Nominators::::iter().count(), 0); - // When this pallet has nothing, we do 4 reads each block - let base_weight = ::DbWeight::get().reads(4); - assert_eq!(base_weight, Staking::on_initialize(0)); - }); - - ExtBuilder::default() - .offchain_election_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - crate::tests::offchain_election::build_offchain_election_test_ext(); - run_to_block(11); - Staking::on_finalize(System::block_number()); - System::set_block_number((System::block_number() + 1).into()); - Timestamp::set_timestamp(System::block_number() * 1000 + INIT_TIMESTAMP); - Session::on_initialize(System::block_number()); - - assert_eq!(Validators::::iter().count(), 4); - assert_eq!(Nominators::::iter().count(), 5); - // With 4 validators and 5 nominator, we should increase weight by: - // - (4 + 5) reads - // - 3 Writes - let final_weight = ::DbWeight::get().reads_writes(4 + 9, 3); - assert_eq!(final_weight, Staking::on_initialize(System::block_number())); + assert_eq!(Staking::on_offence(&one_offender, &[Perbill::from_percent(50)], 0), one_offence_unapplied_weight); }); } @@ -4655,12 +3524,11 @@ fn payout_creates_controller() { assert_ok!(Balances::transfer(Origin::signed(1337), 1234, 100)); assert_eq!(Balances::free_balance(1337), 0); - mock::start_era(1); + mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(2); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); // Controller is created @@ -4684,15 +3552,683 @@ fn payout_to_any_account_works() { // Reward Destination account doesn't exist assert_eq!(Balances::free_balance(42), 0); - mock::start_era(1); + mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); + // compute and ensure the reward amount is greater than zero. + let _ = current_total_payout_for_duration(reward_time_per_era()); + mock::start_active_era(2); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); // Payment is successful assert!(Balances::free_balance(42) > 0); }) } + +#[test] +fn session_buffering_with_offset() { + // similar to live-chains, have some offset for the first session + ExtBuilder::default() + .offset(2) + .period(5) + .session_per_era(5) + .build_and_execute(|| { + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 0); + + start_session(1); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 1); + assert_eq!(System::block_number(), 2); + + start_session(2); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 2); + assert_eq!(System::block_number(), 7); + + start_session(3); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 3); + assert_eq!(System::block_number(), 12); + + // active era is lagging behind by one session, because of how session module works. + start_session(4); + assert_eq!(current_era(), 1); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 4); + assert_eq!(System::block_number(), 17); + + start_session(5); + assert_eq!(current_era(), 1); + assert_eq!(active_era(), 1); + assert_eq!(Session::current_index(), 5); + assert_eq!(System::block_number(), 22); + + // go all the way to active 2. + start_active_era(2); + assert_eq!(current_era(), 2); + assert_eq!(active_era(), 2); + assert_eq!(Session::current_index(), 10); + }); +} + +#[test] +fn session_buffering_no_offset() { + // no offset, first session starts immediately + ExtBuilder::default() + .offset(0) + .period(5) + .session_per_era(5) + .build_and_execute(|| { + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 0); + + start_session(1); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 1); + assert_eq!(System::block_number(), 5); + + start_session(2); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 2); + assert_eq!(System::block_number(), 10); + + start_session(3); + assert_eq!(current_era(), 0); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 3); + assert_eq!(System::block_number(), 15); + + // active era is lagging behind by one session, because of how session module works. + start_session(4); + assert_eq!(current_era(), 1); + assert_eq!(active_era(), 0); + assert_eq!(Session::current_index(), 4); + assert_eq!(System::block_number(), 20); + + start_session(5); + assert_eq!(current_era(), 1); + assert_eq!(active_era(), 1); + assert_eq!(Session::current_index(), 5); + assert_eq!(System::block_number(), 25); + + // go all the way to active 2. + start_active_era(2); + assert_eq!(current_era(), 2); + assert_eq!(active_era(), 2); + assert_eq!(Session::current_index(), 10); + }); +} + +#[test] +fn cannot_rebond_to_lower_than_ed() { + ExtBuilder::default() + .existential_deposit(10) + .balance_factor(10) + .build_and_execute(|| { + // initial stuff. + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 10 * 1000, + active: 10 * 1000, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + + // unbond all of it. must be chilled first. + assert_ok!(Staking::chill(Origin::signed(20))); + assert_ok!(Staking::unbond(Origin::signed(20), 10 * 1000)); + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 10 * 1000, + active: 0, + unlocking: vec![UnlockChunk { value: 10 * 1000, era: 3 }], + claimed_rewards: vec![] + } + ); + + // now bond a wee bit more + assert_noop!(Staking::rebond(Origin::signed(20), 5), Error::::InsufficientBond); + }) +} + +#[test] +fn cannot_bond_extra_to_lower_than_ed() { + ExtBuilder::default() + .existential_deposit(10) + .balance_factor(10) + .build_and_execute(|| { + // initial stuff. + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 10 * 1000, + active: 10 * 1000, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + + // unbond all of it. must be chilled first. + assert_ok!(Staking::chill(Origin::signed(20))); + assert_ok!(Staking::unbond(Origin::signed(20), 10 * 1000)); + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 10 * 1000, + active: 0, + unlocking: vec![UnlockChunk { value: 10 * 1000, era: 3 }], + claimed_rewards: vec![] + } + ); + + // now bond a wee bit more + assert_noop!( + Staking::bond_extra(Origin::signed(21), 5), + Error::::InsufficientBond, + ); + }) +} + +#[test] +fn do_not_die_when_active_is_ed() { + let ed = 10; + ExtBuilder::default() + .existential_deposit(ed) + .balance_factor(ed) + .build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000 * ed, + active: 1000 * ed, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + + // when unbond all of it except ed. + assert_ok!(Staking::unbond(Origin::signed(20), 999 * ed)); + start_active_era(3); + assert_ok!(Staking::withdraw_unbonded(Origin::signed(20), 100)); + + // then + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: ed, + active: ed, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + }) +} + +#[test] +fn on_finalize_weight_is_nonzero() { + ExtBuilder::default().build_and_execute(|| { + let on_finalize_weight = ::DbWeight::get().reads(1); + assert!(>::on_initialize(1) >= on_finalize_weight); + }) +} + +mod election_data_provider { + use super::*; + use frame_election_provider_support::ElectionDataProvider; + + #[test] + fn targets_2sec_block() { + let mut validators = 1000; + while ::WeightInfo::get_npos_targets(validators) < + 2 * frame_support::weights::constants::WEIGHT_PER_SECOND + { + validators += 1; + } + + println!("Can create a snapshot of {} validators in 2sec block", validators); + } + + #[test] + fn voters_2sec_block() { + // we assume a network only wants up to 1000 validators in most cases, thus having 2000 + // candidates is as high as it gets. + let validators = 2000; + // we assume the worse case: each validator also has a slashing span. + let slashing_spans = validators; + let mut nominators = 1000; + + while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) < + 2 * frame_support::weights::constants::WEIGHT_PER_SECOND + { + nominators += 1; + } + + println!( + "Can create a snapshot of {} nominators [{} validators, each 1 slashing] in 2sec block", + nominators, validators + ); + } + + #[test] + fn voters_include_self_vote() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + assert!(>::iter().map(|(x, _)| x).all(|v| Staking::voters(None) + .unwrap() + .into_iter() + .find(|(w, _, t)| { v == *w && t[0] == *w }) + .is_some())) + }) + } + + #[test] + fn voters_exclude_slashed() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!( + >::voters(None) + .unwrap() + .iter() + .find(|x| x.0 == 101) + .unwrap() + .2, + vec![11, 21] + ); + + start_active_era(1); + add_slash(&11); + + // 11 is gone. + start_active_era(2); + assert_eq!( + >::voters(None) + .unwrap() + .iter() + .find(|x| x.0 == 101) + .unwrap() + .2, + vec![21] + ); + + // resubmit and it is back + assert_ok!(Staking::nominate(Origin::signed(100), vec![11, 21])); + assert_eq!( + >::voters(None) + .unwrap() + .iter() + .find(|x| x.0 == 101) + .unwrap() + .2, + vec![11, 21] + ); + }) + } + + #[test] + fn respects_len_limits() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Staking::voters(Some(1)).unwrap_err(), "Voter snapshot too big"); + assert_eq!(Staking::targets(Some(1)).unwrap_err(), "Target snapshot too big"); + }); + } + + #[test] + fn estimate_next_election_works() { + ExtBuilder::default().session_per_era(5).period(5).build_and_execute(|| { + // first session is always length 0. + for b in 1..20 { + run_to_block(b); + assert_eq!(Staking::next_election_prediction(System::block_number()), 20); + } + + // election + run_to_block(20); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45); + assert_eq!(staking_events().len(), 1); + assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); + + for b in 21..45 { + run_to_block(b); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45); + } + + // election + run_to_block(45); + assert_eq!(Staking::next_election_prediction(System::block_number()), 70); + assert_eq!(staking_events().len(), 3); + assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); + + Staking::force_no_eras(Origin::root()).unwrap(); + assert_eq!(Staking::next_election_prediction(System::block_number()), u64::MAX); + + Staking::force_new_era_always(Origin::root()).unwrap(); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); + + Staking::force_new_era(Origin::root()).unwrap(); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); + + // Do a fail election + MinimumValidatorCount::::put(1000); + run_to_block(50); + // Election: failed, next session is a new election + assert_eq!(Staking::next_election_prediction(System::block_number()), 50 + 5); + // The new era is still forced until a new era is planned. + assert_eq!(ForceEra::::get(), Forcing::ForceNew); + + MinimumValidatorCount::::put(2); + run_to_block(55); + assert_eq!(Staking::next_election_prediction(System::block_number()), 55 + 25); + assert_eq!(staking_events().len(), 6); + assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); + // The new era has been planned, forcing is changed from `ForceNew` to `NotForcing`. + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + }) + } +} + +#[test] +#[should_panic] +fn count_check_works() { + ExtBuilder::default().build_and_execute(|| { + // We should never insert into the validators or nominators map directly as this will + // not keep track of the count. This test should panic as we verify the count is accurate + // after every test using the `post_checks` in `mock`. + Validators::::insert(987654321, ValidatorPrefs::default()); + Nominators::::insert( + 987654321, + Nominations { targets: vec![], submitted_in: Default::default(), suppressed: false }, + ); + }) +} + +#[test] +fn min_bond_checks_work() { + ExtBuilder::default() + .existential_deposit(100) + .balance_factor(100) + .min_nominator_bond(1_000) + .min_validator_bond(1_500) + .build_and_execute(|| { + // 500 is not enough for any role + assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); + assert_noop!( + Staking::nominate(Origin::signed(4), vec![1]), + Error::::InsufficientBond + ); + assert_noop!( + Staking::validate(Origin::signed(4), ValidatorPrefs::default()), + Error::::InsufficientBond, + ); + + // 1000 is enough for nominator + assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_noop!( + Staking::validate(Origin::signed(4), ValidatorPrefs::default()), + Error::::InsufficientBond, + ); + + // 1500 is enough for validator + assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); + + // Can't unbond anything as validator + assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + + // Once they are a nominator, they can unbond 500 + assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_ok!(Staking::unbond(Origin::signed(4), 500)); + assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + + // Once they are chilled they can unbond everything + assert_ok!(Staking::chill(Origin::signed(4))); + assert_ok!(Staking::unbond(Origin::signed(4), 1000)); + }) +} + +#[test] +fn chill_other_works() { + ExtBuilder::default() + .existential_deposit(100) + .balance_factor(100) + .min_nominator_bond(1_000) + .min_validator_bond(1_500) + .build_and_execute(|| { + let initial_validators = CounterForValidators::::get(); + let initial_nominators = CounterForNominators::::get(); + for i in 0..15 { + let a = 4 * i; + let b = 4 * i + 1; + let c = 4 * i + 2; + let d = 4 * i + 3; + Balances::make_free_balance_be(&a, 100_000); + Balances::make_free_balance_be(&b, 100_000); + Balances::make_free_balance_be(&c, 100_000); + Balances::make_free_balance_be(&d, 100_000); + + // Nominator + assert_ok!(Staking::bond( + Origin::signed(a), + b, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(b), vec![1])); + + // Validator + assert_ok!(Staking::bond( + Origin::signed(c), + d, + 1500, + RewardDestination::Controller + )); + assert_ok!(Staking::validate(Origin::signed(d), ValidatorPrefs::default())); + } + + // To chill other users, we need to: + // * Set a minimum bond amount + // * Set a limit + // * Set a threshold + // + // If any of these are missing, we do not have enough information to allow the + // `chill_other` to succeed from one user to another. + + // Can't chill these users + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 3), + Error::::CannotChillOther + ); + + // Change the minimum bond... but no limits. + assert_ok!(Staking::set_staking_limits(Origin::root(), 1_500, 2_000, None, None, None)); + + // Still can't chill these users + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 3), + Error::::CannotChillOther + ); + + // Add limits, but no threshold + assert_ok!(Staking::set_staking_limits( + Origin::root(), + 1_500, + 2_000, + Some(10), + Some(10), + None + )); + + // Still can't chill these users + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 3), + Error::::CannotChillOther + ); + + // Add threshold, but no limits + assert_ok!(Staking::set_staking_limits( + Origin::root(), + 1_500, + 2_000, + None, + None, + Some(Percent::from_percent(0)) + )); + + // Still can't chill these users + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 3), + Error::::CannotChillOther + ); + + // Add threshold and limits + assert_ok!(Staking::set_staking_limits( + Origin::root(), + 1_500, + 2_000, + Some(10), + Some(10), + Some(Percent::from_percent(75)) + )); + + // 16 people total because tests start with 2 active one + assert_eq!(CounterForNominators::::get(), 15 + initial_nominators); + assert_eq!(CounterForValidators::::get(), 15 + initial_validators); + + // Users can now be chilled down to 7 people, so we try to remove 9 of them (starting + // with 16) + for i in 6..15 { + let b = 4 * i + 1; + let d = 4 * i + 3; + assert_ok!(Staking::chill_other(Origin::signed(1337), b)); + assert_ok!(Staking::chill_other(Origin::signed(1337), d)); + } + + // chill a nominator. Limit is not reached, not chill-able + assert_eq!(CounterForNominators::::get(), 7); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + // chill a validator. Limit is reached, chill-able. + assert_eq!(CounterForValidators::::get(), 9); + assert_ok!(Staking::chill_other(Origin::signed(1337), 3)); + }) +} + +#[test] +fn capped_stakers_works() { + ExtBuilder::default().build_and_execute(|| { + let validator_count = CounterForValidators::::get(); + assert_eq!(validator_count, 3); + let nominator_count = CounterForNominators::::get(); + assert_eq!(nominator_count, 1); + + // Change the maximums + let max = 10; + assert_ok!(Staking::set_staking_limits( + Origin::root(), + 10, + 10, + Some(max), + Some(max), + Some(Percent::from_percent(0)) + )); + + // can create `max - validator_count` validators + let mut some_existing_validator = AccountId::default(); + for i in 0..max - validator_count { + let (_, controller) = testing_utils::create_stash_controller::( + i + 10_000_000, + 100, + RewardDestination::Controller, + ) + .unwrap(); + assert_ok!(Staking::validate(Origin::signed(controller), ValidatorPrefs::default())); + some_existing_validator = controller; + } + + // but no more + let (_, last_validator) = testing_utils::create_stash_controller::( + 1337, + 100, + RewardDestination::Controller, + ) + .unwrap(); + + assert_noop!( + Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default()), + Error::::TooManyValidators, + ); + + // same with nominators + let mut some_existing_nominator = AccountId::default(); + for i in 0..max - nominator_count { + let (_, controller) = testing_utils::create_stash_controller::( + i + 20_000_000, + 100, + RewardDestination::Controller, + ) + .unwrap(); + assert_ok!(Staking::nominate(Origin::signed(controller), vec![1])); + some_existing_nominator = controller; + } + + // one more is too many + let (_, last_nominator) = testing_utils::create_stash_controller::( + 30_000_000, + 100, + RewardDestination::Controller, + ) + .unwrap(); + assert_noop!( + Staking::nominate(Origin::signed(last_nominator), vec![1]), + Error::::TooManyNominators + ); + + // Re-nominate works fine + assert_ok!(Staking::nominate(Origin::signed(some_existing_nominator), vec![1])); + // Re-validate works fine + assert_ok!(Staking::validate( + Origin::signed(some_existing_validator), + ValidatorPrefs::default() + )); + + // No problem when we set to `None` again + assert_ok!(Staking::set_staking_limits(Origin::root(), 10, 10, None, None, None)); + assert_ok!(Staking::nominate(Origin::signed(last_nominator), vec![1])); + assert_ok!(Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default())); + }) +} diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs new file mode 100644 index 0000000000000..0bcf179e29339 --- /dev/null +++ b/frame/staking/src/weights.rs @@ -0,0 +1,739 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_staking +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-18, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_staking +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/staking/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_staking. +pub trait WeightInfo { + fn bond() -> Weight; + fn bond_extra() -> Weight; + fn unbond() -> Weight; + fn withdraw_unbonded_update(s: u32, ) -> Weight; + fn withdraw_unbonded_kill(s: u32, ) -> Weight; + fn validate() -> Weight; + fn kick(k: u32, ) -> Weight; + fn nominate(n: u32, ) -> Weight; + fn chill() -> Weight; + fn set_payee() -> Weight; + fn set_controller() -> Weight; + fn set_validator_count() -> Weight; + fn force_no_eras() -> Weight; + fn force_new_era() -> Weight; + fn force_new_era_always() -> Weight; + fn set_invulnerables(v: u32, ) -> Weight; + fn force_unstake(s: u32, ) -> Weight; + fn cancel_deferred_slash(s: u32, ) -> Weight; + fn payout_stakers_dead_controller(n: u32, ) -> Weight; + fn payout_stakers_alive_staked(n: u32, ) -> Weight; + fn rebond(l: u32, ) -> Weight; + fn set_history_depth(e: u32, ) -> Weight; + fn reap_stash(s: u32, ) -> Weight; + fn new_era(v: u32, n: u32, ) -> Weight; + fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight; + fn get_npos_targets(v: u32, ) -> Weight; + fn set_staking_limits() -> Weight; + fn chill_other() -> Weight; +} + +/// Weights for pallet_staking using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Payee (r:0 w:1) + fn bond() -> Weight { + (73_523_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn bond_extra() -> Weight { + (58_129_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unbond() -> Weight { + (61_542_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn withdraw_unbonded_update(s: u32, ) -> Weight { + (53_160_000 as Weight) + // Standard Error: 0 + .saturating_add((53_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking SpanSlash (r:0 w:2) + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + (85_826_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_453_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking MinValidatorBond (r:1 w:0) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking MaxValidatorsCount (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking CounterForValidators (r:1 w:1) + fn validate() -> Weight { + (34_936_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + fn kick(k: u32, ) -> Weight { + (23_493_000 as Weight) + // Standard Error: 17_000 + .saturating_add((16_632_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking MinNominatorBond (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking MaxNominatorsCount (r:1 w:0) + // Storage: Staking Validators (r:2 w:0) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking CounterForNominators (r:1 w:1) + fn nominate(n: u32, ) -> Weight { + (41_733_000 as Weight) + // Standard Error: 11_000 + .saturating_add((5_840_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + fn chill() -> Weight { + (17_901_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Payee (r:0 w:1) + fn set_payee() -> Weight { + (13_760_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:2 w:2) + fn set_controller() -> Weight { + (28_388_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Staking ValidatorCount (r:0 w:1) + fn set_validator_count() -> Weight { + (2_537_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Staking ForceEra (r:0 w:1) + fn force_no_eras() -> Weight { + (2_749_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Staking ForceEra (r:0 w:1) + fn force_new_era() -> Weight { + (2_834_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Staking ForceEra (r:0 w:1) + fn force_new_era_always() -> Weight { + (2_800_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Staking Invulnerables (r:0 w:1) + fn set_invulnerables(v: u32, ) -> Weight { + (3_429_000 as Weight) + // Standard Error: 0 + .saturating_add((56_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:0 w:1) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking SpanSlash (r:0 w:2) + fn force_unstake(s: u32, ) -> Weight { + (61_799_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_451_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Staking UnappliedSlashes (r:1 w:1) + fn cancel_deferred_slash(s: u32, ) -> Weight { + (3_383_988_000 as Weight) + // Standard Error: 223_000 + .saturating_add((19_981_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking ErasValidatorReward (r:1 w:0) + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking ErasStakersClipped (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:1 w:0) + // Storage: Staking Payee (r:2 w:0) + // Storage: System Account (r:2 w:2) + fn payout_stakers_dead_controller(n: u32, ) -> Weight { + (124_714_000 as Weight) + // Standard Error: 23_000 + .saturating_add((47_575_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(10 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking ErasValidatorReward (r:1 w:0) + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking ErasStakersClipped (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:1 w:0) + // Storage: Staking Payee (r:2 w:0) + // Storage: System Account (r:2 w:2) + // Storage: Balances Locks (r:2 w:2) + fn payout_stakers_alive_staked(n: u32, ) -> Weight { + (160_203_000 as Weight) + // Standard Error: 24_000 + .saturating_add((61_321_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(11 as Weight)) + .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn rebond(l: u32, ) -> Weight { + (49_593_000 as Weight) + // Standard Error: 3_000 + .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:1) + // Storage: Staking ErasStakersClipped (r:0 w:2) + // Storage: Staking ErasValidatorPrefs (r:0 w:2) + // Storage: Staking ErasValidatorReward (r:0 w:1) + // Storage: Staking ErasRewardPoints (r:0 w:1) + // Storage: Staking ErasStakers (r:0 w:2) + // Storage: Staking ErasTotalStake (r:0 w:1) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) + fn set_history_depth(e: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 71_000 + .saturating_add((35_237_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) + } + // Storage: System Account (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking SlashingSpans (r:1 w:1) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking CounterForValidators (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:0 w:1) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking SpanSlash (r:0 w:1) + fn reap_stash(s: u32, ) -> Weight { + (72_484_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_452_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Staking CounterForNominators (r:1 w:0) + // Storage: Staking CounterForValidators (r:1 w:0) + // Storage: Staking Validators (r:2 w:0) + // Storage: Staking Bonded (r:101 w:0) + // Storage: Staking Ledger (r:101 w:0) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Nominators (r:101 w:0) + // Storage: Staking ValidatorCount (r:1 w:0) + // Storage: Staking MinimumValidatorCount (r:1 w:0) + // Storage: Staking CurrentEra (r:1 w:1) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking ErasStakersClipped (r:0 w:1) + // Storage: Staking ErasValidatorPrefs (r:0 w:1) + // Storage: Staking ErasStakers (r:0 w:1) + // Storage: Staking ErasTotalStake (r:0 w:1) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) + fn new_era(v: u32, n: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 856_000 + .saturating_add((305_057_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 43_000 + .saturating_add((47_890_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(9 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + // Storage: Staking Validators (r:501 w:0) + // Storage: Staking Bonded (r:1500 w:0) + // Storage: Staking Ledger (r:1500 w:0) + // Storage: Staking SlashingSpans (r:21 w:0) + // Storage: Staking Nominators (r:1001 w:0) + fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 98_000 + .saturating_add((25_610_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 98_000 + .saturating_add((28_064_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_346_000 + .saturating_add((18_123_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Staking Validators (r:501 w:0) + fn get_npos_targets(v: u32, ) -> Weight { + (30_422_000 as Weight) + // Standard Error: 33_000 + .saturating_add((11_252_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + } + // Storage: Staking MinValidatorBond (r:0 w:1) + // Storage: Staking MaxValidatorsCount (r:0 w:1) + // Storage: Staking ChillThreshold (r:0 w:1) + // Storage: Staking MaxNominatorsCount (r:0 w:1) + // Storage: Staking MinNominatorBond (r:0 w:1) + fn set_staking_limits() -> Weight { + (6_486_000 as Weight) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking ChillThreshold (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking MaxValidatorsCount (r:1 w:0) + // Storage: Staking CounterForValidators (r:1 w:1) + // Storage: Staking MinValidatorBond (r:1 w:0) + fn chill_other() -> Weight { + (58_222_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Payee (r:0 w:1) + fn bond() -> Weight { + (73_523_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn bond_extra() -> Weight { + (58_129_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unbond() -> Weight { + (61_542_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn withdraw_unbonded_update(s: u32, ) -> Weight { + (53_160_000 as Weight) + // Standard Error: 0 + .saturating_add((53_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking SpanSlash (r:0 w:2) + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + (85_826_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_453_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking MinValidatorBond (r:1 w:0) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking MaxValidatorsCount (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking CounterForValidators (r:1 w:1) + fn validate() -> Weight { + (34_936_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + fn kick(k: u32, ) -> Weight { + (23_493_000 as Weight) + // Standard Error: 17_000 + .saturating_add((16_632_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking MinNominatorBond (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking MaxNominatorsCount (r:1 w:0) + // Storage: Staking Validators (r:2 w:0) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking CounterForNominators (r:1 w:1) + fn nominate(n: u32, ) -> Weight { + (41_733_000 as Weight) + // Standard Error: 11_000 + .saturating_add((5_840_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + fn chill() -> Weight { + (17_901_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Payee (r:0 w:1) + fn set_payee() -> Weight { + (13_760_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:2 w:2) + fn set_controller() -> Weight { + (28_388_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Staking ValidatorCount (r:0 w:1) + fn set_validator_count() -> Weight { + (2_537_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Staking ForceEra (r:0 w:1) + fn force_no_eras() -> Weight { + (2_749_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Staking ForceEra (r:0 w:1) + fn force_new_era() -> Weight { + (2_834_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Staking ForceEra (r:0 w:1) + fn force_new_era_always() -> Weight { + (2_800_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Staking Invulnerables (r:0 w:1) + fn set_invulnerables(v: u32, ) -> Weight { + (3_429_000 as Weight) + // Standard Error: 0 + .saturating_add((56_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:0 w:1) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking SpanSlash (r:0 w:2) + fn force_unstake(s: u32, ) -> Weight { + (61_799_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_451_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Staking UnappliedSlashes (r:1 w:1) + fn cancel_deferred_slash(s: u32, ) -> Weight { + (3_383_988_000 as Weight) + // Standard Error: 223_000 + .saturating_add((19_981_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking ErasValidatorReward (r:1 w:0) + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking ErasStakersClipped (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:1 w:0) + // Storage: Staking Payee (r:2 w:0) + // Storage: System Account (r:2 w:2) + fn payout_stakers_dead_controller(n: u32, ) -> Weight { + (124_714_000 as Weight) + // Standard Error: 23_000 + .saturating_add((47_575_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(10 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking ErasValidatorReward (r:1 w:0) + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking ErasStakersClipped (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:1 w:0) + // Storage: Staking Payee (r:2 w:0) + // Storage: System Account (r:2 w:2) + // Storage: Balances Locks (r:2 w:2) + fn payout_stakers_alive_staked(n: u32, ) -> Weight { + (160_203_000 as Weight) + // Standard Error: 24_000 + .saturating_add((61_321_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(11 as Weight)) + .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn rebond(l: u32, ) -> Weight { + (49_593_000 as Weight) + // Standard Error: 3_000 + .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:1) + // Storage: Staking ErasStakersClipped (r:0 w:2) + // Storage: Staking ErasValidatorPrefs (r:0 w:2) + // Storage: Staking ErasValidatorReward (r:0 w:1) + // Storage: Staking ErasRewardPoints (r:0 w:1) + // Storage: Staking ErasStakers (r:0 w:2) + // Storage: Staking ErasTotalStake (r:0 w:1) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) + fn set_history_depth(e: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 71_000 + .saturating_add((35_237_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) + } + // Storage: System Account (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking SlashingSpans (r:1 w:1) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking CounterForValidators (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:0 w:1) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking SpanSlash (r:0 w:1) + fn reap_stash(s: u32, ) -> Weight { + (72_484_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_452_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Staking CounterForNominators (r:1 w:0) + // Storage: Staking CounterForValidators (r:1 w:0) + // Storage: Staking Validators (r:2 w:0) + // Storage: Staking Bonded (r:101 w:0) + // Storage: Staking Ledger (r:101 w:0) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Nominators (r:101 w:0) + // Storage: Staking ValidatorCount (r:1 w:0) + // Storage: Staking MinimumValidatorCount (r:1 w:0) + // Storage: Staking CurrentEra (r:1 w:1) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking ErasStakersClipped (r:0 w:1) + // Storage: Staking ErasValidatorPrefs (r:0 w:1) + // Storage: Staking ErasStakers (r:0 w:1) + // Storage: Staking ErasTotalStake (r:0 w:1) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) + fn new_era(v: u32, n: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 856_000 + .saturating_add((305_057_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 43_000 + .saturating_add((47_890_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(9 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + // Storage: Staking Validators (r:501 w:0) + // Storage: Staking Bonded (r:1500 w:0) + // Storage: Staking Ledger (r:1500 w:0) + // Storage: Staking SlashingSpans (r:21 w:0) + // Storage: Staking Nominators (r:1001 w:0) + fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 98_000 + .saturating_add((25_610_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 98_000 + .saturating_add((28_064_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_346_000 + .saturating_add((18_123_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + } + // Storage: Staking Validators (r:501 w:0) + fn get_npos_targets(v: u32, ) -> Weight { + (30_422_000 as Weight) + // Standard Error: 33_000 + .saturating_add((11_252_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + } + // Storage: Staking MinValidatorBond (r:0 w:1) + // Storage: Staking MaxValidatorsCount (r:0 w:1) + // Storage: Staking ChillThreshold (r:0 w:1) + // Storage: Staking MaxNominatorsCount (r:0 w:1) + // Storage: Staking MinNominatorBond (r:0 w:1) + fn set_staking_limits() -> Weight { + (6_486_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking ChillThreshold (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking MaxValidatorsCount (r:1 w:0) + // Storage: Staking CounterForValidators (r:1 w:1) + // Storage: Staking MinValidatorBond (r:1 w:0) + fn chill_other() -> Weight { + (58_222_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index 4713baea518f9..baacb66d5c751 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sudo" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,25 +13,26 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-std/std", "sp-io/std", "sp-runtime/std", "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/sudo/README.md b/frame/sudo/README.md index 233727ac1bd28..ac7de01615f3f 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -1,6 +1,6 @@ # Sudo Module -- [`sudo::Trait`](https://docs.rs/pallet-sudo/latest/pallet_sudo/trait.Trait.html) +- [`sudo::Config`](https://docs.rs/pallet-sudo/latest/pallet_sudo/trait.Config.html) - [`Call`](https://docs.rs/pallet-sudo/latest/pallet_sudo/enum.Call.html) ## Overview @@ -38,10 +38,10 @@ This is an example of a module that exposes a privileged function: use frame_support::{decl_module, dispatch}; use frame_system::ensure_root; -pub trait Trait: frame_system::Trait {} +pub trait Config: frame_system::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn privileged_function(origin) -> dispatch::DispatchResult { ensure_root(origin)?; @@ -64,7 +64,7 @@ You need to set an initial superuser account as the sudo `key`. * [Democracy](https://docs.rs/pallet-democracy/latest/pallet_democracy/) [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html [`Origin`]: https://docs.substrate.dev/docs/substrate-types -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 0d21e44326668..bab93ffcee162 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Sudo Module +//! # Sudo Pallet //! -//! - [`sudo::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! The Sudo module allows for a single account (called the "sudo key") +//! The Sudo pallet allows for a single account (called the "sudo key") //! to execute dispatchable functions that require a `Root` call //! or designate a new account to replace them as the sudo key. //! Only one account can be the sudo key at a time. @@ -31,7 +31,7 @@ //! //! ### Dispatchable Functions //! -//! Only the sudo key can call the dispatchable functions from the Sudo module. +//! Only the sudo key can call the dispatchable functions from the Sudo pallet. //! //! * `sudo` - Make a `Root` call to a dispatchable function. //! * `set_key` - Assign a new account to be the sudo key. @@ -40,86 +40,93 @@ //! //! ### Executing Privileged Functions //! -//! The Sudo module itself is not intended to be used within other modules. -//! Instead, you can build "privileged functions" (i.e. functions that require `Root` origin) in other modules. -//! You can execute these privileged functions by calling `sudo` with the sudo key account. -//! Privileged functions cannot be directly executed via an extrinsic. +//! The Sudo pallet itself is not intended to be used within other pallets. +//! Instead, you can build "privileged functions" (i.e. functions that require `Root` origin) in +//! other pallets. You can execute these privileged functions by calling `sudo` with the sudo key +//! account. Privileged functions cannot be directly executed via an extrinsic. //! //! Learn more about privileged functions and `Root` origin in the [`Origin`] type documentation. //! //! ### Simple Code Snippet //! -//! This is an example of a module that exposes a privileged function: +//! This is an example of a pallet that exposes a privileged function: //! //! ``` -//! use frame_support::{decl_module, dispatch}; -//! use frame_system::ensure_root; +//! +//! #[frame_support::pallet] +//! pub mod logger { +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; +//! use super::*; //! -//! pub trait Trait: frame_system::Trait {} +//! #[pallet::config] +//! pub trait Config: frame_system::Config {} //! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn privileged_function(origin) -> dispatch::DispatchResult { +//! #[pallet::pallet] +//! pub struct Pallet(PhantomData); +//! +//! #[pallet::call] +//! impl Pallet { +//! #[pallet::weight(0)] +//! pub fn privileged_function(origin: OriginFor) -> DispatchResultWithPostInfo { //! ensure_root(origin)?; //! //! // do something... //! -//! Ok(()) +//! Ok(().into()) //! } -//! } +//! } //! } //! # fn main() {} //! ``` //! //! ## Genesis Config //! -//! The Sudo module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). +//! The Sudo pallet depends on the [`GenesisConfig`]. //! You need to set an initial superuser account as the sudo `key`. //! -//! ## Related Modules +//! ## Related Pallets //! //! * [Democracy](../pallet_democracy/index.html) //! -//! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html //! [`Origin`]: https://docs.substrate.dev/docs/substrate-types #![cfg_attr(not(feature = "std"), no_std)] +use sp_runtime::{traits::StaticLookup, DispatchResult}; use sp_std::prelude::*; -use sp_runtime::{DispatchResult, traits::StaticLookup}; -use frame_support::{ - Parameter, decl_module, decl_event, decl_storage, decl_error, ensure, -}; -use frame_support::{ - weights::{Weight, GetDispatchInfo, Pays}, - traits::{UnfilteredDispatchable, Get}, - dispatch::DispatchResultWithPostInfo, -}; -use frame_system::ensure_signed; +use frame_support::{traits::UnfilteredDispatchable, weights::GetDispatchInfo}; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; +pub use pallet::*; - /// A sudo-able call. - type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; -} +#[frame_support::pallet] +pub mod pallet { + use super::{DispatchResult, *}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; -decl_module! { - /// Sudo module declaration. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; - fn deposit_event() = default; + /// A sudo-able call. + type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; + } + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(PhantomData); + + #[pallet::call] + impl Pallet { /// Authenticates the sudo key and dispatches a function call with `Root` origin. /// /// The dispatch origin for this call must be _Signed_. @@ -130,14 +137,20 @@ decl_module! { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # - #[weight = (call.get_dispatch_info().weight + 10_000, call.get_dispatch_info().class)] - fn sudo(origin, call: Box<::Call>) -> DispatchResultWithPostInfo { + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + (dispatch_info.weight.saturating_add(10_000), dispatch_info.class) + })] + pub fn sudo( + origin: OriginFor, + call: Box<::Call>, + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::Sudid(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -152,19 +165,24 @@ decl_module! { /// - O(1). /// - The weight of this call is defined by the caller. /// # - #[weight = (*_weight, call.get_dispatch_info().class)] - fn sudo_unchecked_weight(origin, call: Box<::Call>, _weight: Weight) -> DispatchResultWithPostInfo { + #[pallet::weight((*_weight, call.get_dispatch_info().class))] + pub fn sudo_unchecked_weight( + origin: OriginFor, + call: Box<::Call>, + _weight: Weight, + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::Sudid(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } - /// Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo key. + /// Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo + /// key. /// /// The dispatch origin for this call must be _Signed_. /// @@ -173,14 +191,17 @@ decl_module! { /// - Limited storage reads. /// - One DB change. /// # - #[weight = 0] - fn set_key(origin, new: ::Source) -> DispatchResultWithPostInfo { + #[pallet::weight(0)] + pub fn set_key( + origin: OriginFor, + new: ::Source, + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let new = T::Lookup::lookup(new)?; - Self::deposit_event(RawEvent::KeyChanged(Self::key())); + Self::deposit_event(Event::KeyChanged(Self::key())); >::put(new); // Sudo user does not pay a fee. Ok(Pays::No.into()) @@ -197,16 +218,20 @@ decl_module! { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # - #[weight = ( - call.get_dispatch_info().weight - .saturating_add(10_000) - // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), - call.get_dispatch_info().class - )] - fn sudo_as(origin, + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + dispatch_info.weight + .saturating_add(10_000) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + dispatch_info.class, + ) + })] + pub fn sudo_as( + origin: OriginFor, who: ::Source, - call: Box<::Call> + call: Box<::Call>, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -214,43 +239,54 @@ decl_module! { let who = T::Lookup::lookup(who)?; - let res = match call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()) { - Ok(_) => true, - Err(e) => { - sp_runtime::print(e); - false - } - }; + let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()); - Self::deposit_event(RawEvent::SudoAsDone(res)); + Self::deposit_event(Event::SudoAsDone(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } } -} -decl_event!( - pub enum Event where AccountId = ::AccountId { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// A sudo just took place. \[result\] Sudid(DispatchResult), /// The \[sudoer\] just switched identity; the old key is supplied. - KeyChanged(AccountId), + KeyChanged(T::AccountId), /// A sudo just took place. \[result\] - SudoAsDone(bool), + SudoAsDone(DispatchResult), + } + + #[pallet::error] + /// Error for the Sudo pallet + pub enum Error { + /// Sender must be the Sudo account + RequireSudo, } -); -decl_storage! { - trait Store for Module as Sudo { + /// The `AccountId` of the sudo key. + #[pallet::storage] + #[pallet::getter(fn key)] + pub(super) type Key = StorageValue<_, T::AccountId, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { /// The `AccountId` of the sudo key. - Key get(fn key) config(): T::AccountId; + pub key: T::AccountId, } -} -decl_error! { - /// Error for the Sudo module - pub enum Error for Module { - /// Sender must be the Sudo account - RequireSudo, + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { key: Default::default() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::put(&self.key); + } } } diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 7996cd05d071f..dad17384d5603 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,108 +18,113 @@ //! Test utilities use super::*; +use crate as sudo; use frame_support::{ - impl_outer_origin, impl_outer_dispatch, impl_outer_event, parameter_types, - weights::Weight, + parameter_types, + traits::{Contains, GenesisBuild}, }; +use frame_system::limits; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use sp_io; -use crate as sudo; -use frame_support::traits::Filter; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; // Logger module to track execution. +#[frame_support::pallet] pub mod logger { use super::*; - use frame_system::ensure_root; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - pub trait Trait: frame_system::Trait { - type Event: From> + Into<::Event>; + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From> + IsType<::Event>; } - decl_storage! { - trait Store for Module as Logger { - AccountLog get(fn account_log): Vec; - I32Log get(fn i32_log): Vec; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::weight(*weight)] + pub fn privileged_i32_log( + origin: OriginFor, + i: i32, + weight: Weight, + ) -> DispatchResultWithPostInfo { + // Ensure that the `origin` is `Root`. + ensure_root(origin)?; + >::append(i); + Self::deposit_event(Event::AppendI32(i, weight)); + Ok(().into()) } - } - decl_event! { - pub enum Event where AccountId = ::AccountId { - AppendI32(i32, Weight), - AppendI32AndAccount(AccountId, i32, Weight), + #[pallet::weight(*weight)] + pub fn non_privileged_log( + origin: OriginFor, + i: i32, + weight: Weight, + ) -> DispatchResultWithPostInfo { + // Ensure that the `origin` is some signed account. + let sender = ensure_signed(origin)?; + >::append(i); + >::append(sender.clone()); + Self::deposit_event(Event::AppendI32AndAccount(sender, i, weight)); + Ok(().into()) } } - decl_module! { - pub struct Module for enum Call where origin: ::Origin { - fn deposit_event() = default; - - #[weight = *weight] - fn privileged_i32_log(origin, i: i32, weight: Weight){ - // Ensure that the `origin` is `Root`. - ensure_root(origin)?; - ::append(i); - Self::deposit_event(RawEvent::AppendI32(i, weight)); - } - - #[weight = *weight] - fn non_privileged_log(origin, i: i32, weight: Weight){ - // Ensure that the `origin` is some signed account. - let sender = ensure_signed(origin)?; - ::append(i); - >::append(sender.clone()); - Self::deposit_event(RawEvent::AppendI32AndAccount(sender, i, weight)); - } - } + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + AppendI32(i32, Weight), + AppendI32AndAccount(T::AccountId, i32, Weight), } -} -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} + #[pallet::storage] + #[pallet::getter(fn account_log)] + pub(super) type AccountLog = StorageValue<_, Vec, ValueQuery>; -mod test_events { - pub use crate::Event; + #[pallet::storage] + #[pallet::getter(fn i32_log)] + pub(super) type I32Log = StorageValue<_, Vec, ValueQuery>; } -impl_outer_event! { - pub enum TestEvent for Test { - frame_system, - sudo, - logger, +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Sudo: sudo::{Pallet, Call, Config, Storage, Event}, + Logger: logger::{Pallet, Call, Storage, Event}, } -} - -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - sudo::Sudo, - logger::Logger, - } -} - -// For testing the pallet, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of pallets we want to use. -#[derive(Clone, Eq, PartialEq)] -pub struct Test; +); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(1024); } pub struct BlockEverything; -impl Filter for BlockEverything { - fn filter(_: &Call) -> bool { +impl Contains for BlockEverything { + fn contains(_: &Call) -> bool { false } } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = BlockEverything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -129,39 +134,29 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -// Implement the logger module's `Trait` on the Test runtime. -impl logger::Trait for Test { - type Event = TestEvent; +// Implement the logger module's `Config` on the Test runtime. +impl logger::Config for Test { + type Event = Event; } -// Implement the sudo module's `Trait` on the Test runtime. -impl Trait for Test { - type Event = TestEvent; +// Implement the sudo module's `Config` on the Test runtime. +impl Config for Test { + type Event = Event; type Call = Call; } -// Assign back to type variables in order to make dispatched calls of these modules later. -pub type Sudo = Module; -pub type Logger = logger::Module; -pub type System = frame_system::Module; - // New types for dispatchable functions. pub type SudoCall = sudo::Call; pub type LoggerCall = logger::Call; @@ -169,8 +164,8 @@ pub type LoggerCall = logger::Call; // Build test environment by setting the root `key` for the Genesis. pub fn new_test_ext(root_key: u64) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig::{ - key: root_key, - }.assimilate_storage(&mut t).unwrap(); + sudo::GenesisConfig:: { key: root_key } + .assimilate_storage(&mut t) + .unwrap(); t.into() } diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index cba1e1cf60540..2eb558e9471c4 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,16 +18,17 @@ //! Tests for the module. use super::*; +use frame_support::{assert_noop, assert_ok}; use mock::{ - Sudo, SudoCall, Origin, Call, Test, new_test_ext, LoggerCall, Logger, System, TestEvent, + new_test_ext, Call, Event as TestEvent, Logger, LoggerCall, Origin, Sudo, SudoCall, System, + Test, }; -use frame_support::{assert_ok, assert_noop}; #[test] fn test_setup_works() { // Environment setup, logger storage, and sudo `key` retrieval should work as expected. new_test_ext(1).execute_with(|| { - assert_eq!(Sudo::key(), 1u64); + assert_eq!(Sudo::key(), 1u64); assert!(Logger::i32_log().is_empty()); assert!(Logger::account_log().is_empty()); }); @@ -38,12 +39,12 @@ fn sudo_basics() { // Configure a default test environment and set the root `key` to 1. new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as `origin`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); assert_ok!(Sudo::sudo(Origin::signed(1), call)); assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when `sudo` is passed a non-root `key` as `origin`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); assert_noop!(Sudo::sudo(Origin::signed(2), call), Error::::RequireSudo); }); } @@ -55,10 +56,9 @@ fn sudo_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); assert_ok!(Sudo::sudo(Origin::signed(1), call)); - let expected_event = TestEvent::sudo(RawEvent::Sudid(Ok(()))); - assert!(System::events().iter().any(|a| a.event == expected_event)); + System::assert_has_event(TestEvent::Sudo(Event::Sudid(Ok(())))); }) } @@ -66,12 +66,12 @@ fn sudo_emits_events_correctly() { fn sudo_unchecked_weight_basics() { new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as origin. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when called with a non-root `key`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); assert_noop!( Sudo::sudo_unchecked_weight(Origin::signed(2), call, 1_000), Error::::RequireSudo, @@ -80,8 +80,8 @@ fn sudo_unchecked_weight_basics() { assert_eq!(Logger::i32_log(), vec![42i32]); // Controls the dispatched weight. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); - let sudo_unchecked_weight_call = SudoCall::sudo_unchecked_weight(call, 1_000); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); + let sudo_unchecked_weight_call = SudoCall::sudo_unchecked_weight { call, weight: 1_000 }; let info = sudo_unchecked_weight_call.get_dispatch_info(); assert_eq!(info.weight, 1_000); }); @@ -94,10 +94,9 @@ fn sudo_unchecked_weight_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); - let expected_event = TestEvent::sudo(RawEvent::Sudid(Ok(()))); - assert!(System::events().iter().any(|a| a.event == expected_event)); + System::assert_has_event(TestEvent::Sudo(Event::Sudid(Ok(())))); }) } @@ -106,11 +105,12 @@ fn set_key_basics() { new_test_ext(1).execute_with(|| { // A root `key` can change the root `key` assert_ok!(Sudo::set_key(Origin::signed(1), 2)); - assert_eq!(Sudo::key(), 2u64); + assert_eq!(Sudo::key(), 2u64); }); new_test_ext(1).execute_with(|| { - // A non-root `key` will trigger a `RequireSudo` error and a non-root `key` cannot change the root `key`. + // A non-root `key` will trigger a `RequireSudo` error and a non-root `key` cannot change + // the root `key`. assert_noop!(Sudo::set_key(Origin::signed(2), 3), Error::::RequireSudo); }); } @@ -123,12 +123,10 @@ fn set_key_emits_events_correctly() { // A root `key` can change the root `key`. assert_ok!(Sudo::set_key(Origin::signed(1), 2)); - let expected_event = TestEvent::sudo(RawEvent::KeyChanged(1)); - assert!(System::events().iter().any(|a| a.event == expected_event)); + System::assert_has_event(TestEvent::Sudo(Event::KeyChanged(1))); // Double check. assert_ok!(Sudo::set_key(Origin::signed(2), 4)); - let expected_event = TestEvent::sudo(RawEvent::KeyChanged(2)); - assert!(System::events().iter().any(|a| a.event == expected_event)); + System::assert_has_event(TestEvent::Sudo(Event::KeyChanged(2))); }); } @@ -136,34 +134,33 @@ fn set_key_emits_events_correctly() { fn sudo_as_basics() { new_test_ext(1).execute_with(|| { // A privileged function will not work when passed to `sudo_as`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1_000))); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert!(Logger::i32_log().is_empty()); assert!(Logger::account_log().is_empty()); // A non-privileged function should not work when called with a non-root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); + let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); assert_noop!(Sudo::sudo_as(Origin::signed(3), 2, call), Error::::RequireSudo); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); + let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert_eq!(Logger::i32_log(), vec![42i32]); - // The correct user makes the call within `sudo_as`. + // The correct user makes the call within `sudo_as`. assert_eq!(Logger::account_log(), vec![2]); }); } #[test] fn sudo_as_emits_events_correctly() { - new_test_ext(1).execute_with(|| { + new_test_ext(1).execute_with(|| { // Set block number to 1 because events are not emitted on block 0. System::set_block_number(1); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); + let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); - let expected_event = TestEvent::sudo(RawEvent::SudoAsDone(true)); - assert!(System::events().iter().any(|a| a.event == expected_event)); + System::assert_has_event(TestEvent::Sudo(Event::SudoAsDone(Ok(())))); }); } diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 3d40b65637262..b09ed65a114dc 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,49 +13,51 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = "0.4" -serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-metadata = { version = "12.0.0", default-features = false, path = "../metadata" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-tracing = { version = "2.0.0", default-features = false, path = "../../primitives/tracing" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-arithmetic = { version = "2.0.0", default-features = false, path = "../../primitives/arithmetic" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -frame-support-procedural = { version = "2.0.0", default-features = false, path = "./procedural" } -paste = "0.1.6" +serde = { version = "1.0.126", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "max-encoded-len"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-metadata = { version = "14.0.0", default-features = false, features = ["v14"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-tracing = { version = "4.0.0-dev", default-features = false, path = "../../primitives/tracing" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../../primitives/arithmetic" } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +frame-support-procedural = { version = "4.0.0-dev", default-features = false, path = "./procedural" } +paste = "1.0" once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } -bitmask = { version = "0.5.0", default-features = false } -impl-trait-for-tuples = "0.1.3" +sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../../primitives/state-machine" } +bitflags = "1.3" +impl-trait-for-tuples = "0.2.1" smallvec = "1.4.1" +log = { version = "0.4.14", default-features = false } [dev-dependencies] +assert_matches = "1.3.0" pretty_assertions = "0.6.1" -frame-system = { version = "2.0.0", path = "../system" } -parity-util-mem = { version = "0.7.0", features = ["primitive-types"] } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sp-api = { version = "2.0.0", default-features = false, path = "../../primitives/api" } +frame-system = { version = "4.0.0-dev", path = "../system" } +parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } [features] default = ["std"] std = [ "once_cell", - "bitmask/std", "serde", "sp-io/std", "codec/std", + "scale-info/std", "sp-std/std", "sp-runtime/std", "sp-tracing/std", "sp-arithmetic/std", "frame-metadata/std", "sp-inherents/std", + "sp-staking/std", "sp-state-machine", "frame-support-procedural/std", + "log/std", ] -nightly = [] -strict = [] runtime-benchmarks = [] +try-runtime = [] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 70662d710775d..e1ff6dcf39b7e 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,10 +15,11 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -frame-support-procedural-tools = { version = "2.0.0", path = "./tools" } -proc-macro2 = "1.0.6" +frame-support-procedural-tools = { version = "4.0.0-dev", path = "./tools" } +proc-macro2 = "1.0.29" quote = "1.0.3" -syn = { version = "1.0.7", features = ["full"] } +Inflector = "0.11.4" +syn = { version = "1.0.58", features = ["full"] } [features] default = ["std"] diff --git a/frame/support/procedural/src/clone_no_bound.rs b/frame/support/procedural/src/clone_no_bound.rs index 35854d23f4dbd..747900fd023f6 100644 --- a/frame/support/procedural/src/clone_no_bound.rs +++ b/frame/support/procedural/src/clone_no_bound.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,56 +30,61 @@ pub fn derive_clone_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke let impl_ = match input.data { syn::Data::Struct(struct_) => match struct_.fields { syn::Fields::Named(named) => { - let fields = named.named.iter() - .map(|i| &i.ident) - .map(|i| quote::quote_spanned!(i.span() => + let fields = named.named.iter().map(|i| &i.ident).map(|i| { + quote::quote_spanned!(i.span() => #i: core::clone::Clone::clone(&self.#i) - )); + ) + }); quote::quote!( Self { #( #fields, )* } ) }, syn::Fields::Unnamed(unnamed) => { - let fields = unnamed.unnamed.iter().enumerate() - .map(|(i, _)| syn::Index::from(i)) - .map(|i| quote::quote_spanned!(i.span() => - core::clone::Clone::clone(&self.#i) - )); + let fields = + unnamed.unnamed.iter().enumerate().map(|(i, _)| syn::Index::from(i)).map(|i| { + quote::quote_spanned!(i.span() => + core::clone::Clone::clone(&self.#i) + ) + }); quote::quote!( Self ( #( #fields, )* ) ) }, syn::Fields::Unit => { - quote::quote!( Self ) - } + quote::quote!(Self) + }, }, syn::Data::Enum(enum_) => { - let variants = enum_.variants.iter() - .map(|variant| { - let ident = &variant.ident; - match &variant.fields { - syn::Fields::Named(named) => { - let captured = named.named.iter().map(|i| &i.ident); - let cloned = captured.clone() - .map(|i| quote::quote_spanned!(i.span() => - #i: core::clone::Clone::clone(#i) - )); - quote::quote!( - Self::#ident { #( ref #captured, )* } => Self::#ident { #( #cloned, )*} + let variants = enum_.variants.iter().map(|variant| { + let ident = &variant.ident; + match &variant.fields { + syn::Fields::Named(named) => { + let captured = named.named.iter().map(|i| &i.ident); + let cloned = captured.clone().map(|i| { + quote::quote_spanned!(i.span() => + #i: core::clone::Clone::clone(#i) ) - }, - syn::Fields::Unnamed(unnamed) => { - let captured = unnamed.unnamed.iter().enumerate() - .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); - let cloned = captured.clone() - .map(|i| quote::quote_spanned!(i.span() => - core::clone::Clone::clone(#i) - )); - quote::quote!( - Self::#ident ( #( ref #captured, )* ) => Self::#ident ( #( #cloned, )*) + }); + quote::quote!( + Self::#ident { #( ref #captured, )* } => Self::#ident { #( #cloned, )*} + ) + }, + syn::Fields::Unnamed(unnamed) => { + let captured = unnamed + .unnamed + .iter() + .enumerate() + .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); + let cloned = captured.clone().map(|i| { + quote::quote_spanned!(i.span() => + core::clone::Clone::clone(#i) ) - }, - syn::Fields::Unit => quote::quote!( Self::#ident => Self::#ident ), - } - }); + }); + quote::quote!( + Self::#ident ( #( ref #captured, )* ) => Self::#ident ( #( #cloned, )*) + ) + }, + syn::Fields::Unit => quote::quote!( Self::#ident => Self::#ident ), + } + }); quote::quote!(match self { #( #variants, )* @@ -99,5 +104,6 @@ pub fn derive_clone_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke } } }; - ).into() + ) + .into() } diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs new file mode 100644 index 0000000000000..2532a680e21be --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -0,0 +1,146 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; +use quote::quote; +use syn::Ident; + +pub fn expand_outer_dispatch( + runtime: &Ident, + pallet_decls: &[Pallet], + scrate: &TokenStream, +) -> TokenStream { + let mut variant_defs = TokenStream::new(); + let mut variant_patterns = Vec::new(); + let mut query_call_part_macros = Vec::new(); + let mut pallet_names = Vec::new(); + + let pallets_with_call = pallet_decls.iter().filter(|decl| decl.exists_part("Call")); + + for pallet_declaration in pallets_with_call { + let name = &pallet_declaration.name; + let path = &pallet_declaration.path; + let index = pallet_declaration.index; + + variant_defs.extend( + quote!(#[codec(index = #index)] #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ),), + ); + variant_patterns.push(quote!(Call::#name(call))); + pallet_names.push(name); + query_call_part_macros.push(quote! { + #path::__substrate_call_check::is_call_part_defined!(#name); + }); + } + + quote! { + #( #query_call_part_macros )* + + #[derive( + Clone, PartialEq, Eq, + #scrate::codec::Encode, + #scrate::codec::Decode, + #scrate::scale_info::TypeInfo, + #scrate::RuntimeDebug, + )] + pub enum Call { + #variant_defs + } + impl #scrate::dispatch::GetDispatchInfo for Call { + fn get_dispatch_info(&self) -> #scrate::dispatch::DispatchInfo { + match self { + #( #variant_patterns => call.get_dispatch_info(), )* + } + } + } + impl #scrate::dispatch::GetCallMetadata for Call { + fn get_call_metadata(&self) -> #scrate::dispatch::CallMetadata { + use #scrate::dispatch::GetCallName; + match self { + #( + #variant_patterns => { + let function_name = call.get_call_name(); + let pallet_name = stringify!(#pallet_names); + #scrate::dispatch::CallMetadata { function_name, pallet_name } + } + )* + } + } + + fn get_module_names() -> &'static [&'static str] { + &[#( + stringify!(#pallet_names), + )*] + } + + fn get_call_names(module: &str) -> &'static [&'static str] { + use #scrate::dispatch::{Callable, GetCallName}; + match module { + #( + stringify!(#pallet_names) => + <<#pallet_names as Callable<#runtime>>::Call + as GetCallName>::get_call_names(), + )* + _ => unreachable!(), + } + } + } + impl #scrate::dispatch::Dispatchable for Call { + type Origin = Origin; + type Config = Call; + type Info = #scrate::weights::DispatchInfo; + type PostInfo = #scrate::weights::PostDispatchInfo; + fn dispatch(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { + if !::filter_call(&origin, &self) { + return #scrate::sp_std::result::Result::Err(#scrate::dispatch::DispatchError::BadOrigin.into()); + } + + #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(self, origin) + } + } + impl #scrate::traits::UnfilteredDispatchable for Call { + type Origin = Origin; + fn dispatch_bypass_filter(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { + match self { + #( + #variant_patterns => + #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(call, origin), + )* + } + } + } + + #( + impl #scrate::traits::IsSubType<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { + #[allow(unreachable_patterns)] + fn is_sub_type(&self) -> Option<&#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> { + match self { + #variant_patterns => Some(call), + // May be unreachable + _ => None, + } + } + } + + impl From<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { + fn from(call: #scrate::dispatch::CallableCallFor<#pallet_names, #runtime>) -> Self { + #variant_patterns + } + } + )* + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/config.rs b/frame/support/procedural/src/construct_runtime/expand/config.rs new file mode 100644 index 0000000000000..5e1b9d94700e6 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -0,0 +1,135 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use inflector::Inflector; +use proc_macro2::TokenStream; +use quote::{format_ident, quote, ToTokens}; +use syn::Ident; + +pub fn expand_outer_config( + runtime: &Ident, + pallet_decls: &[Pallet], + scrate: &TokenStream, +) -> TokenStream { + let mut types = TokenStream::new(); + let mut fields = TokenStream::new(); + let mut build_storage_calls = TokenStream::new(); + let mut query_genesis_config_part_macros = Vec::new(); + + for decl in pallet_decls { + if let Some(pallet_entry) = decl.find_part("Config") { + let path = &decl.path; + let pallet_name = &decl.name; + let path_str = path.into_token_stream().to_string(); + let config = format_ident!("{}Config", pallet_name); + let field_name = + &Ident::new(&pallet_name.to_string().to_snake_case(), decl.name.span()); + let part_is_generic = !pallet_entry.generics.params.is_empty(); + + types.extend(expand_config_types(runtime, decl, &config, part_is_generic)); + fields.extend(quote!(pub #field_name: #config,)); + build_storage_calls.extend(expand_config_build_storage_call( + scrate, + runtime, + decl, + &field_name, + )); + query_genesis_config_part_macros.push(quote! { + #path::__substrate_genesis_config_check::is_genesis_config_defined!(#pallet_name); + #[cfg(feature = "std")] + #path::__substrate_genesis_config_check::is_std_enabled_for_genesis!(#pallet_name, #path_str); + }); + } + } + + quote! { + #( #query_genesis_config_part_macros )* + + #types + + #[cfg(any(feature = "std", test))] + use #scrate::serde as __genesis_config_serde_import__; + #[cfg(any(feature = "std", test))] + #[derive(#scrate::serde::Serialize, #scrate::serde::Deserialize, Default)] + #[serde(rename_all = "camelCase")] + #[serde(deny_unknown_fields)] + #[serde(crate = "__genesis_config_serde_import__")] + pub struct GenesisConfig { + #fields + } + + #[cfg(any(feature = "std", test))] + impl #scrate::sp_runtime::BuildStorage for GenesisConfig { + fn assimilate_storage( + &self, + storage: &mut #scrate::sp_runtime::Storage, + ) -> std::result::Result<(), String> { + #build_storage_calls + + #scrate::BasicExternalities::execute_with_storage(storage, || { + ::on_genesis(); + }); + + Ok(()) + } + } + } +} + +fn expand_config_types( + runtime: &Ident, + decl: &Pallet, + config: &Ident, + part_is_generic: bool, +) -> TokenStream { + let path = &decl.path; + + match (decl.instance.as_ref(), part_is_generic) { + (Some(inst), true) => quote! { + #[cfg(any(feature = "std", test))] + pub type #config = #path::GenesisConfig<#runtime, #path::#inst>; + }, + (None, true) => quote! { + #[cfg(any(feature = "std", test))] + pub type #config = #path::GenesisConfig<#runtime>; + }, + (_, false) => quote! { + #[cfg(any(feature = "std", test))] + pub type #config = #path::GenesisConfig; + }, + } +} + +fn expand_config_build_storage_call( + scrate: &TokenStream, + runtime: &Ident, + decl: &Pallet, + field_name: &Ident, +) -> TokenStream { + let path = &decl.path; + let instance = if let Some(inst) = decl.instance.as_ref() { + quote!(#path::#inst) + } else { + quote!(#path::__InherentHiddenInstance) + }; + + quote! { + #scrate::sp_runtime::BuildModuleGenesisStorage:: + <#runtime, #instance>::build_module_genesis_storage(&self.#field_name, storage)?; + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs new file mode 100644 index 0000000000000..798646bf27334 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -0,0 +1,141 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{Generics, Ident}; + +pub fn expand_outer_event( + runtime: &Ident, + pallet_decls: &[Pallet], + scrate: &TokenStream, +) -> syn::Result { + let mut event_variants = TokenStream::new(); + let mut event_conversions = TokenStream::new(); + let mut query_event_part_macros = Vec::new(); + + for pallet_decl in pallet_decls { + if let Some(pallet_entry) = pallet_decl.find_part("Event") { + let path = &pallet_decl.path; + let pallet_name = &pallet_decl.name; + let index = pallet_decl.index; + let instance = pallet_decl.instance.as_ref(); + let generics = &pallet_entry.generics; + + if instance.is_some() && generics.params.is_empty() { + let msg = format!( + "Instantiable pallet with no generic `Event` cannot \ + be constructed: pallet `{}` must have generic `Event`", + pallet_name, + ); + return Err(syn::Error::new(pallet_name.span(), msg)) + } + + let part_is_generic = !generics.params.is_empty(); + let pallet_event = match (instance, part_is_generic) { + (Some(inst), true) => quote!(#path::Event::<#runtime, #path::#inst>), + (Some(inst), false) => quote!(#path::Event::<#path::#inst>), + (None, true) => quote!(#path::Event::<#runtime>), + (None, false) => quote!(#path::Event), + }; + + event_variants.extend(expand_event_variant( + runtime, + pallet_decl, + index, + instance, + generics, + )); + event_conversions.extend(expand_event_conversion(scrate, pallet_decl, &pallet_event)); + query_event_part_macros.push(quote! { + #path::__substrate_event_check::is_event_part_defined!(#pallet_name); + }); + } + } + + Ok(quote! { + #( #query_event_part_macros )* + + #[derive( + Clone, PartialEq, Eq, + #scrate::codec::Encode, + #scrate::codec::Decode, + #scrate::scale_info::TypeInfo, + #scrate::RuntimeDebug, + )] + #[allow(non_camel_case_types)] + pub enum Event { + #event_variants + } + + #event_conversions + }) +} + +fn expand_event_variant( + runtime: &Ident, + pallet: &Pallet, + index: u8, + instance: Option<&Ident>, + generics: &Generics, +) -> TokenStream { + let path = &pallet.path; + let variant_name = &pallet.name; + let part_is_generic = !generics.params.is_empty(); + + match instance { + Some(inst) if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime, #path::#inst>),) + }, + Some(inst) => { + quote!(#[codec(index = #index)] #variant_name(#path::Event<#path::#inst>),) + }, + None if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime>),) + }, + None => { + quote!(#[codec(index = #index)] #variant_name(#path::Event),) + }, + } +} + +fn expand_event_conversion( + scrate: &TokenStream, + pallet: &Pallet, + pallet_event: &TokenStream, +) -> TokenStream { + let variant_name = &pallet.name; + + quote! { + impl From<#pallet_event> for Event { + fn from(x: #pallet_event) -> Self { + Event::#variant_name(x) + } + } + impl #scrate::sp_std::convert::TryInto<#pallet_event> for Event { + type Error = (); + + fn try_into(self) -> #scrate::sp_std::result::Result<#pallet_event, Self::Error> { + match self { + Self::#variant_name(evt) => Ok(evt), + _ => Err(()), + } + } + } + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/frame/support/procedural/src/construct_runtime/expand/inherent.rs new file mode 100644 index 0000000000000..fd30416782687 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -0,0 +1,204 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{Ident, TypePath}; + +pub fn expand_outer_inherent( + runtime: &Ident, + block: &TypePath, + unchecked_extrinsic: &TypePath, + pallet_decls: &[Pallet], + scrate: &TokenStream, +) -> TokenStream { + let mut pallet_names = Vec::new(); + let mut query_inherent_part_macros = Vec::new(); + + for pallet_decl in pallet_decls { + if pallet_decl.exists_part("Inherent") { + let name = &pallet_decl.name; + let path = &pallet_decl.path; + + pallet_names.push(name); + query_inherent_part_macros.push(quote! { + #path::__substrate_inherent_check::is_inherent_part_defined!(#name); + }); + } + } + + quote! { + #( #query_inherent_part_macros )* + + trait InherentDataExt { + fn create_extrinsics(&self) -> + #scrate::inherent::Vec<<#block as #scrate::inherent::BlockT>::Extrinsic>; + fn check_extrinsics(&self, block: &#block) -> #scrate::inherent::CheckInherentsResult; + } + + impl InherentDataExt for #scrate::inherent::InherentData { + fn create_extrinsics(&self) -> + #scrate::inherent::Vec<<#block as #scrate::inherent::BlockT>::Extrinsic> + { + use #scrate::inherent::ProvideInherent; + + let mut inherents = Vec::new(); + + #( + if let Some(inherent) = #pallet_names::create_inherent(self) { + let inherent = <#unchecked_extrinsic as #scrate::inherent::Extrinsic>::new( + inherent.into(), + None, + ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return \ + `Some`; qed"); + + inherents.push(inherent); + } + )* + + inherents + } + + fn check_extrinsics(&self, block: &#block) -> #scrate::inherent::CheckInherentsResult { + use #scrate::inherent::{ProvideInherent, IsFatalError}; + use #scrate::traits::{IsSubType, ExtrinsicCall}; + use #scrate::sp_runtime::traits::Block as _; + + let mut result = #scrate::inherent::CheckInherentsResult::new(); + + for xt in block.extrinsics() { + // Inherents are before any other extrinsics. + // And signed extrinsics are not inherents. + if #scrate::inherent::Extrinsic::is_signed(xt).unwrap_or(false) { + break + } + + let mut is_inherent = false; + + #({ + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(call) { + is_inherent = true; + if let Err(e) = #pallet_names::check_inherent(call, self) { + result.put_error( + #pallet_names::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result; + } + } + } + } + })* + + // Inherents are before any other extrinsics. + // No module marked it as inherent thus it is not. + if !is_inherent { + break + } + } + + #( + match #pallet_names::is_inherent_required(self) { + Ok(Some(e)) => { + let found = block.extrinsics().iter().any(|xt| { + let is_signed = #scrate::inherent::Extrinsic::is_signed(xt) + .unwrap_or(false); + + if !is_signed { + let call = < + #unchecked_extrinsic as ExtrinsicCall + >::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + #pallet_names::is_inherent(&call) + } else { + false + } + } else { + // Signed extrinsics are not inherents. + false + } + }); + + if !found { + result.put_error( + #pallet_names::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result; + } + } + }, + Ok(None) => (), + Err(e) => { + result.put_error( + #pallet_names::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result; + } + }, + } + )* + + result + } + } + + impl #scrate::traits::EnsureInherentsAreFirst<#block> for #runtime { + fn ensure_inherents_are_first(block: &#block) -> Result<(), u32> { + use #scrate::inherent::ProvideInherent; + use #scrate::traits::{IsSubType, ExtrinsicCall}; + use #scrate::sp_runtime::traits::Block as _; + + let mut first_signed_observed = false; + + for (i, xt) in block.extrinsics().iter().enumerate() { + let is_signed = #scrate::inherent::Extrinsic::is_signed(xt).unwrap_or(false); + + let is_inherent = if is_signed { + // Signed extrinsics are not inherents. + false + } else { + let mut is_inherent = false; + #({ + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(&call) { + is_inherent = true; + } + } + })* + is_inherent + }; + + if !is_inherent { + first_signed_observed = true; + } + + if first_signed_observed && is_inherent { + return Err(i as u32) + } + } + + Ok(()) + } + } + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/frame/support/procedural/src/construct_runtime/expand/metadata.rs new file mode 100644 index 0000000000000..c8445e0bbc255 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -0,0 +1,176 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{Ident, TypePath}; + +pub fn expand_runtime_metadata( + runtime: &Ident, + pallet_declarations: &[Pallet], + scrate: &TokenStream, + extrinsic: &TypePath, +) -> TokenStream { + let pallets = pallet_declarations + .iter() + .filter_map(|pallet_declaration| { + pallet_declaration.find_part("Pallet").map(|_| { + let filtered_names: Vec<_> = pallet_declaration + .pallet_parts() + .iter() + .filter(|part| part.name() != "Pallet") + .map(|part| part.name()) + .collect(); + (pallet_declaration, filtered_names) + }) + }) + .map(|(decl, filtered_names)| { + let name = &decl.name; + let index = &decl.index; + let storage = expand_pallet_metadata_storage(&filtered_names, runtime, decl); + let calls = expand_pallet_metadata_calls(&filtered_names, runtime, decl); + let event = expand_pallet_metadata_events(&filtered_names, runtime, scrate, decl); + let constants = expand_pallet_metadata_constants(runtime, decl); + let errors = expand_pallet_metadata_errors(runtime, decl); + + quote! { + #scrate::metadata::PalletMetadata { + name: stringify!(#name), + index: #index, + storage: #storage, + calls: #calls, + event: #event, + constants: #constants, + error: #errors, + } + } + }) + .collect::>(); + + quote! { + impl #runtime { + pub fn metadata() -> #scrate::metadata::RuntimeMetadataPrefixed { + #scrate::metadata::RuntimeMetadataLastVersion::new( + #scrate::sp_std::vec![ #(#pallets),* ], + #scrate::metadata::ExtrinsicMetadata { + ty: #scrate::scale_info::meta_type::<#extrinsic>(), + version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, + signed_extensions: < + < + #extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata + >::SignedExtensions as #scrate::sp_runtime::traits::SignedExtension + >::metadata() + .into_iter() + .map(|meta| #scrate::metadata::SignedExtensionMetadata { + identifier: meta.identifier, + ty: meta.ty, + additional_signed: meta.additional_signed, + }) + .collect(), + }, + #scrate::scale_info::meta_type::<#runtime>() + ).into() + } + } + } +} + +fn expand_pallet_metadata_storage( + filtered_names: &[&'static str], + runtime: &Ident, + decl: &Pallet, +) -> TokenStream { + if filtered_names.contains(&"Storage") { + let instance = decl.instance.as_ref().into_iter(); + let path = &decl.path; + + quote! { + Some(#path::Pallet::<#runtime #(, #path::#instance)*>::storage_metadata()) + } + } else { + quote!(None) + } +} + +fn expand_pallet_metadata_calls( + filtered_names: &[&'static str], + runtime: &Ident, + decl: &Pallet, +) -> TokenStream { + if filtered_names.contains(&"Call") { + let instance = decl.instance.as_ref().into_iter(); + let path = &decl.path; + + quote! { + Some(#path::Pallet::<#runtime #(, #path::#instance)*>::call_functions()) + } + } else { + quote!(None) + } +} + +fn expand_pallet_metadata_events( + filtered_names: &[&'static str], + runtime: &Ident, + scrate: &TokenStream, + decl: &Pallet, +) -> TokenStream { + if filtered_names.contains(&"Event") { + let path = &decl.path; + let part_is_generic = !decl + .find_part("Event") + .expect("Event part exists; qed") + .generics + .params + .is_empty(); + let pallet_event = match (decl.instance.as_ref(), part_is_generic) { + (Some(inst), true) => quote!(#path::Event::<#runtime, #path::#inst>), + (Some(inst), false) => quote!(#path::Event::<#path::#inst>), + (None, true) => quote!(#path::Event::<#runtime>), + (None, false) => quote!(#path::Event), + }; + + quote! { + Some( + #scrate::metadata::PalletEventMetadata { + ty: #scrate::scale_info::meta_type::<#pallet_event>() + } + ) + } + } else { + quote!(None) + } +} + +fn expand_pallet_metadata_constants(runtime: &Ident, decl: &Pallet) -> TokenStream { + let path = &decl.path; + let instance = decl.instance.as_ref().into_iter(); + + quote! { + #path::Pallet::<#runtime #(, #path::#instance)*>::pallet_constants_metadata() + } +} + +fn expand_pallet_metadata_errors(runtime: &Ident, decl: &Pallet) -> TokenStream { + let path = &decl.path; + let instance = decl.instance.as_ref().into_iter(); + + quote! { + #path::Pallet::<#runtime #(, #path::#instance)*>::error_metadata() + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/mod.rs b/frame/support/procedural/src/construct_runtime/expand/mod.rs new file mode 100644 index 0000000000000..cf8b5eef8d105 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/mod.rs @@ -0,0 +1,32 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +mod call; +mod config; +mod event; +mod inherent; +mod metadata; +mod origin; +mod unsigned; + +pub use call::expand_outer_dispatch; +pub use config::expand_outer_config; +pub use event::expand_outer_event; +pub use inherent::expand_outer_inherent; +pub use metadata::expand_runtime_metadata; +pub use origin::expand_outer_origin; +pub use unsigned::expand_outer_validate_unsigned; diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs new file mode 100644 index 0000000000000..a65ad78527ff7 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -0,0 +1,355 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::{Pallet, SYSTEM_PALLET_NAME}; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{token, Generics, Ident}; + +pub fn expand_outer_origin( + runtime: &Ident, + pallets: &[Pallet], + pallets_token: token::Brace, + scrate: &TokenStream, +) -> syn::Result { + let system_pallet = + pallets.iter().find(|decl| decl.name == SYSTEM_PALLET_NAME).ok_or_else(|| { + syn::Error::new( + pallets_token.span, + "`System` pallet declaration is missing. \ + Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", + ) + })?; + + let mut caller_variants = TokenStream::new(); + let mut pallet_conversions = TokenStream::new(); + let mut query_origin_part_macros = Vec::new(); + + for pallet_decl in pallets.iter().filter(|pallet| pallet.name != SYSTEM_PALLET_NAME) { + if let Some(pallet_entry) = pallet_decl.find_part("Origin") { + let instance = pallet_decl.instance.as_ref(); + let index = pallet_decl.index; + let generics = &pallet_entry.generics; + let name = &pallet_decl.name; + let path = &pallet_decl.path; + + if instance.is_some() && generics.params.is_empty() { + let msg = format!( + "Instantiable pallet with no generic `Origin` cannot \ + be constructed: pallet `{}` must have generic `Origin`", + name + ); + return Err(syn::Error::new(name.span(), msg)) + } + + caller_variants.extend(expand_origin_caller_variant( + runtime, + pallet_decl, + index, + instance, + generics, + )); + pallet_conversions.extend(expand_origin_pallet_conversions( + scrate, + runtime, + pallet_decl, + instance, + generics, + )); + query_origin_part_macros.push(quote! { + #path::__substrate_origin_check::is_origin_part_defined!(#name); + }); + } + } + + let system_path = &system_pallet.path; + let system_index = system_pallet.index; + + Ok(quote! { + #( #query_origin_part_macros )* + + // WARNING: All instance must hold the filter `frame_system::Config::BaseCallFilter`, except + // when caller is system Root. One can use `OriginTrait::reset_filter` to do so. + #[derive(Clone)] + pub struct Origin { + caller: OriginCaller, + filter: #scrate::sp_std::rc::Rc::Call) -> bool>>, + } + + #[cfg(not(feature = "std"))] + impl #scrate::sp_std::fmt::Debug for Origin { + fn fmt( + &self, + fmt: &mut #scrate::sp_std::fmt::Formatter, + ) -> #scrate::sp_std::result::Result<(), #scrate::sp_std::fmt::Error> { + fmt.write_str("") + } + } + + #[cfg(feature = "std")] + impl #scrate::sp_std::fmt::Debug for Origin { + fn fmt( + &self, + fmt: &mut #scrate::sp_std::fmt::Formatter, + ) -> #scrate::sp_std::result::Result<(), #scrate::sp_std::fmt::Error> { + fmt.debug_struct("Origin") + .field("caller", &self.caller) + .field("filter", &"[function ptr]") + .finish() + } + } + + impl #scrate::traits::OriginTrait for Origin { + type Call = <#runtime as #system_path::Config>::Call; + type PalletsOrigin = OriginCaller; + type AccountId = <#runtime as #system_path::Config>::AccountId; + + fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static) { + let f = self.filter.clone(); + + self.filter = #scrate::sp_std::rc::Rc::new(Box::new(move |call| { + f(call) && filter(call) + })); + } + + fn reset_filter(&mut self) { + let filter = < + <#runtime as #system_path::Config>::BaseCallFilter + as #scrate::traits::Contains<<#runtime as #system_path::Config>::Call> + >::contains; + + self.filter = #scrate::sp_std::rc::Rc::new(Box::new(filter)); + } + + fn set_caller_from(&mut self, other: impl Into) { + self.caller = other.into().caller; + } + + fn filter_call(&self, call: &Self::Call) -> bool { + (self.filter)(call) + } + + fn caller(&self) -> &Self::PalletsOrigin { + &self.caller + } + + fn try_with_caller( + mut self, + f: impl FnOnce(Self::PalletsOrigin) -> Result, + ) -> Result { + match f(self.caller) { + Ok(r) => Ok(r), + Err(caller) => { self.caller = caller; Err(self) } + } + } + + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. + fn none() -> Self { + #system_path::RawOrigin::None.into() + } + /// Create with system root origin and no filter. + fn root() -> Self { + #system_path::RawOrigin::Root.into() + } + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + fn signed(by: <#runtime as #system_path::Config>::AccountId) -> Self { + #system_path::RawOrigin::Signed(by).into() + } + } + + #[derive( + Clone, PartialEq, Eq, #scrate::RuntimeDebug, #scrate::codec::Encode, + #scrate::codec::Decode, #scrate::scale_info::TypeInfo, + )] + #[allow(non_camel_case_types)] + pub enum OriginCaller { + #[codec(index = #system_index)] + system(#system_path::Origin<#runtime>), + #caller_variants + #[allow(dead_code)] + Void(#scrate::Void) + } + + // For backwards compatibility and ease of accessing these functions. + #[allow(dead_code)] + impl Origin { + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. + pub fn none() -> Self { + ::none() + } + /// Create with system root origin and no filter. + pub fn root() -> Self { + ::root() + } + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + pub fn signed(by: <#runtime as #system_path::Config>::AccountId) -> Self { + ::signed(by) + } + } + + impl From<#system_path::Origin<#runtime>> for OriginCaller { + fn from(x: #system_path::Origin<#runtime>) -> Self { + OriginCaller::system(x) + } + } + + impl #scrate::sp_std::convert::TryFrom for #system_path::Origin<#runtime> { + type Error = OriginCaller; + fn try_from(x: OriginCaller) + -> #scrate::sp_std::result::Result<#system_path::Origin<#runtime>, OriginCaller> + { + if let OriginCaller::system(l) = x { + Ok(l) + } else { + Err(x) + } + } + } + + impl From<#system_path::Origin<#runtime>> for Origin { + /// Convert to runtime origin: + /// * root origin is built with no filter + /// * others use `frame-system::Config::BaseCallFilter` + fn from(x: #system_path::Origin<#runtime>) -> Self { + let o: OriginCaller = x.into(); + o.into() + } + } + + impl From for Origin { + fn from(x: OriginCaller) -> Self { + let mut o = Origin { + caller: x, + filter: #scrate::sp_std::rc::Rc::new(Box::new(|_| true)), + }; + + // Root has no filter + if !matches!(o.caller, OriginCaller::system(#system_path::Origin::<#runtime>::Root)) { + #scrate::traits::OriginTrait::reset_filter(&mut o); + } + + o + } + } + + impl From for #scrate::sp_std::result::Result<#system_path::Origin<#runtime>, Origin> { + /// NOTE: converting to pallet origin loses the origin filter information. + fn from(val: Origin) -> Self { + if let OriginCaller::system(l) = val.caller { + Ok(l) + } else { + Err(val) + } + } + } + impl From::AccountId>> for Origin { + /// Convert to runtime origin with caller being system signed or none and use filter + /// `frame-system::Config::BaseCallFilter`. + fn from(x: Option<<#runtime as #system_path::Config>::AccountId>) -> Self { + <#system_path::Origin<#runtime>>::from(x).into() + } + } + + #pallet_conversions + }) +} + +fn expand_origin_caller_variant( + runtime: &Ident, + pallet: &Pallet, + index: u8, + instance: Option<&Ident>, + generics: &Generics, +) -> TokenStream { + let part_is_generic = !generics.params.is_empty(); + let variant_name = &pallet.name; + let path = &pallet.path; + + match instance { + Some(inst) if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime, #path::#inst>),) + }, + Some(inst) => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin<#path::#inst>),) + }, + None if part_is_generic => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime>),) + }, + None => { + quote!(#[codec(index = #index)] #variant_name(#path::Origin),) + }, + } +} + +fn expand_origin_pallet_conversions( + scrate: &TokenStream, + runtime: &Ident, + pallet: &Pallet, + instance: Option<&Ident>, + generics: &Generics, +) -> TokenStream { + let path = &pallet.path; + let variant_name = &pallet.name; + + let part_is_generic = !generics.params.is_empty(); + let pallet_origin = match instance { + Some(inst) if part_is_generic => quote!(#path::Origin<#runtime, #path::#inst>), + Some(inst) => quote!(#path::Origin<#path::#inst>), + None if part_is_generic => quote!(#path::Origin<#runtime>), + None => quote!(#path::Origin), + }; + + quote! { + impl From<#pallet_origin> for OriginCaller { + fn from(x: #pallet_origin) -> Self { + OriginCaller::#variant_name(x) + } + } + + impl From<#pallet_origin> for Origin { + /// Convert to runtime origin using `frame-system::Config::BaseCallFilter`. + fn from(x: #pallet_origin) -> Self { + let x: OriginCaller = x.into(); + x.into() + } + } + + impl From for #scrate::sp_std::result::Result<#pallet_origin, Origin> { + /// NOTE: converting to pallet origin loses the origin filter information. + fn from(val: Origin) -> Self { + if let OriginCaller::#variant_name(l) = val.caller { + Ok(l) + } else { + Err(val) + } + } + } + + impl #scrate::sp_std::convert::TryFrom for #pallet_origin { + type Error = OriginCaller; + fn try_from( + x: OriginCaller, + ) -> #scrate::sp_std::result::Result<#pallet_origin, OriginCaller> { + if let OriginCaller::#variant_name(l) = x { + Ok(l) + } else { + Err(x) + } + } + } + } +} diff --git a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs new file mode 100644 index 0000000000000..d51792dd4a8d5 --- /dev/null +++ b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs @@ -0,0 +1,72 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::TokenStream; +use quote::quote; +use syn::Ident; + +pub fn expand_outer_validate_unsigned( + runtime: &Ident, + pallet_decls: &[Pallet], + scrate: &TokenStream, +) -> TokenStream { + let mut pallet_names = Vec::new(); + let mut query_validate_unsigned_part_macros = Vec::new(); + + for pallet_decl in pallet_decls { + if pallet_decl.exists_part("ValidateUnsigned") { + let name = &pallet_decl.name; + let path = &pallet_decl.path; + + pallet_names.push(name); + query_validate_unsigned_part_macros.push(quote! { + #path::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined!(#name); + }); + } + } + + quote! { + #( #query_validate_unsigned_part_macros )* + + impl #scrate::unsigned::ValidateUnsigned for #runtime { + type Call = Call; + + fn pre_dispatch(call: &Self::Call) -> Result<(), #scrate::unsigned::TransactionValidityError> { + #[allow(unreachable_patterns)] + match call { + #( Call::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), )* + // pre-dispatch should not stop inherent extrinsics, validation should prevent + // including arbitrary (non-inherent) extrinsics to blocks. + _ => Ok(()), + } + } + + fn validate_unsigned( + #[allow(unused_variables)] + source: #scrate::unsigned::TransactionSource, + call: &Self::Call, + ) -> #scrate::unsigned::TransactionValidity { + #[allow(unreachable_patterns)] + match call { + #( Call::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), )* + _ => #scrate::unsigned::UnknownTransaction::NoUnsignedValidator.into(), + } + } + } + } +} diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index f355593defbe1..8aacd8f0aa810 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,88 +15,95 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod expand; mod parse; -use frame_support_procedural_tools::syn_ext as ext; -use frame_support_procedural_tools::{generate_crate_access, generate_hidden_includes}; -use parse::{ModuleDeclaration, RuntimeDefinition, WhereSection, ModulePart}; +use frame_support_procedural_tools::{ + generate_crate_access, generate_hidden_includes, syn_ext as ext, +}; +use parse::{PalletDeclaration, PalletPart, PalletPath, RuntimeDefinition, WhereSection}; use proc_macro::TokenStream; -use proc_macro2::{TokenStream as TokenStream2}; +use proc_macro2::TokenStream as TokenStream2; use quote::quote; -use syn::{Ident, Result, TypePath}; use std::collections::HashMap; +use syn::{Ident, Result}; -/// The fixed name of the system module. -const SYSTEM_MODULE_NAME: &str = "System"; +/// The fixed name of the system pallet. +const SYSTEM_PALLET_NAME: &str = "System"; -/// The complete definition of a module with the resulting fixed index. +/// The complete definition of a pallet with the resulting fixed index. #[derive(Debug, Clone)] -pub struct Module { +pub struct Pallet { pub name: Ident, pub index: u8, - pub module: Ident, + pub path: PalletPath, pub instance: Option, - pub module_parts: Vec, + pub pallet_parts: Vec, } -impl Module { - /// Get resolved module parts - fn module_parts(&self) -> &[ModulePart] { - &self.module_parts +impl Pallet { + /// Get resolved pallet parts + fn pallet_parts(&self) -> &[PalletPart] { + &self.pallet_parts } /// Find matching parts - fn find_part(&self, name: &str) -> Option<&ModulePart> { - self.module_parts.iter().find(|part| part.name() == name) + fn find_part(&self, name: &str) -> Option<&PalletPart> { + self.pallet_parts.iter().find(|part| part.name() == name) } - /// Return whether module contains part + /// Return whether pallet contains part fn exists_part(&self, name: &str) -> bool { self.find_part(name).is_some() } } -/// Convert from the parsed module to their final information. -/// Assign index to each modules using same rules as rust for fieldless enum. +/// Convert from the parsed pallet to their final information. +/// Assign index to each pallet using same rules as rust for fieldless enum. /// I.e. implicit are assigned number incrementedly from last explicit or 0. -fn complete_modules(decl: impl Iterator) -> syn::Result> { +fn complete_pallets(decl: impl Iterator) -> syn::Result> { let mut indices = HashMap::new(); let mut last_index: Option = None; + let mut names = HashMap::new(); + + decl.map(|pallet| { + let final_index = match pallet.index { + Some(i) => i, + None => last_index.map_or(Some(0), |i| i.checked_add(1)).ok_or_else(|| { + let msg = "Pallet index doesn't fit into u8, index is 256"; + syn::Error::new(pallet.name.span(), msg) + })?, + }; - decl - .map(|module| { - let final_index = match module.index { - Some(i) => i, - None => last_index.map_or(Some(0), |i| i.checked_add(1)) - .ok_or_else(|| { - let msg = "Module index doesn't fit into u8, index is 256"; - syn::Error::new(module.name.span(), msg) - })?, - }; - - last_index = Some(final_index); - - if let Some(used_module) = indices.insert(final_index, module.name.clone()) { - let msg = format!( - "Module indices are conflicting: Both modules {} and {} are at index {}", - used_module, - module.name, - final_index, - ); - let mut err = syn::Error::new(used_module.span(), &msg); - err.combine(syn::Error::new(module.name.span(), msg)); - return Err(err); - } + last_index = Some(final_index); + + if let Some(used_pallet) = indices.insert(final_index, pallet.name.clone()) { + let msg = format!( + "Pallet indices are conflicting: Both pallets {} and {} are at index {}", + used_pallet, pallet.name, final_index, + ); + let mut err = syn::Error::new(used_pallet.span(), &msg); + err.combine(syn::Error::new(pallet.name.span(), msg)); + return Err(err) + } - Ok(Module { - name: module.name, - index: final_index, - module: module.module, - instance: module.instance, - module_parts: module.module_parts, - }) + if let Some(used_pallet) = names.insert(pallet.name.clone(), pallet.name.span()) { + let msg = "Two pallets with the same name!"; + + let mut err = syn::Error::new(used_pallet, &msg); + err.combine(syn::Error::new(pallet.name.span(), &msg)); + return Err(err) + } + + Ok(Pallet { + name: pallet.name, + index: final_index, + path: pallet.path, + instance: pallet.instance, + pallet_parts: pallet.pallet_parts, }) - .collect() + }) + .collect() } pub fn construct_runtime(input: TokenStream) -> TokenStream { @@ -109,61 +116,30 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result { let RuntimeDefinition { name, - where_section: WhereSection { - block, - node_block, - unchecked_extrinsic, - .. - }, - modules: - ext::Braces { - content: ext::Punctuated { inner: modules, .. }, - token: modules_token, - }, + where_section: WhereSection { block, node_block, unchecked_extrinsic, .. }, + pallets: + ext::Braces { content: ext::Punctuated { inner: pallets, .. }, token: pallets_token }, .. } = definition; - let modules = complete_modules(modules.into_iter())?; - - let system_module = modules.iter() - .find(|decl| decl.name == SYSTEM_MODULE_NAME) - .ok_or_else(|| syn::Error::new( - modules_token.span, - "`System` module declaration is missing. \ - Please add this line: `System: frame_system::{Module, Call, Storage, Config, Event},`", - ))?; + let pallets = complete_pallets(pallets.into_iter())?; let hidden_crate_name = "construct_runtime"; let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); - let all_but_system_modules = modules.iter().filter(|module| module.name != SYSTEM_MODULE_NAME); - - let outer_event = decl_outer_event( - &name, - modules.iter(), - &scrate, - )?; - - let outer_origin = decl_outer_origin( - &name, - all_but_system_modules, - &system_module, - &scrate, - )?; - let all_modules = decl_all_modules(&name, modules.iter()); - let module_to_index = decl_pallet_runtime_setup(&modules, &scrate); - - let dispatch = decl_outer_dispatch(&name, modules.iter(), &scrate); - let metadata = decl_runtime_metadata(&name, modules.iter(), &scrate, &unchecked_extrinsic); - let outer_config = decl_outer_config(&name, modules.iter(), &scrate); - let inherent = decl_outer_inherent( - &block, - &unchecked_extrinsic, - modules.iter(), - &scrate, - ); - let validate_unsigned = decl_validate_unsigned(&name, modules.iter(), &scrate); + let outer_event = expand::expand_outer_event(&name, &pallets, &scrate)?; + + let outer_origin = expand::expand_outer_origin(&name, &pallets, pallets_token, &scrate)?; + let all_pallets = decl_all_pallets(&name, pallets.iter()); + let pallet_to_index = decl_pallet_runtime_setup(&pallets, &scrate); + + let dispatch = expand::expand_outer_dispatch(&name, &pallets, &scrate); + let metadata = expand::expand_runtime_metadata(&name, &pallets, &scrate, &unchecked_extrinsic); + let outer_config = expand::expand_outer_config(&name, &pallets, &scrate); + let inherent = + expand::expand_outer_inherent(&name, &block, &unchecked_extrinsic, &pallets, &scrate); + let validate_unsigned = expand::expand_outer_validate_unsigned(&name, &pallets, &scrate); let integrity_test = decl_integrity_test(&scrate); let res = quote!( @@ -175,7 +151,10 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result Result Result( - runtime: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, -) -> TokenStream2 { - let modules_tokens = module_declarations - .filter(|module_declaration| module_declaration.exists_part("ValidateUnsigned")) - .map(|module_declaration| &module_declaration.name); - quote!( - #scrate::impl_outer_validate_unsigned!( - impl ValidateUnsigned for #runtime { - #( #modules_tokens )* - } - ); - ) -} - -fn decl_outer_inherent<'a>( - block: &'a syn::TypePath, - unchecked_extrinsic: &'a syn::TypePath, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, -) -> TokenStream2 { - let modules_tokens = module_declarations.filter_map(|module_declaration| { - let maybe_config_part = module_declaration.find_part("Inherent"); - maybe_config_part.map(|_| { - let name = &module_declaration.name; - quote!(#name,) - }) - }); - quote!( - #scrate::impl_outer_inherent!( - impl Inherents where - Block = #block, - UncheckedExtrinsic = #unchecked_extrinsic - { - #(#modules_tokens)* - } - ); - ) -} - -fn decl_outer_config<'a>( - runtime: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, -) -> TokenStream2 { - let modules_tokens = module_declarations - .filter_map(|module_declaration| { - module_declaration.find_part("Config").map(|part| { - let transformed_generics: Vec<_> = part - .generics - .params - .iter() - .map(|param| quote!(<#param>)) - .collect(); - (module_declaration, transformed_generics) - }) - }) - .map(|(module_declaration, generics)| { - let module = &module_declaration.module; - let name = Ident::new( - &format!("{}Config", module_declaration.name), - module_declaration.name.span(), - ); - let instance = module_declaration.instance.as_ref().into_iter(); - quote!( - #name => - #module #(#instance)* #(#generics)*, - ) - }); - quote!( - #scrate::sp_runtime::impl_outer_config! { - pub struct GenesisConfig for #runtime { - #(#modules_tokens)* - } - } - ) + Ok(res) } -fn decl_runtime_metadata<'a>( +fn decl_all_pallets<'a>( runtime: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, - extrinsic: &TypePath, -) -> TokenStream2 { - let modules_tokens = module_declarations - .filter_map(|module_declaration| { - module_declaration.find_part("Module").map(|_| { - let filtered_names: Vec<_> = module_declaration - .module_parts() - .into_iter() - .filter(|part| part.name() != "Module") - .map(|part| part.ident()) - .collect(); - (module_declaration, filtered_names) - }) - }) - .map(|(module_declaration, filtered_names)| { - let module = &module_declaration.module; - let name = &module_declaration.name; - let instance = module_declaration - .instance - .as_ref() - .map(|name| quote!(<#name>)) - .into_iter(); - - let index = module_declaration.index; - - quote!( - #module::Module #(#instance)* as #name { index #index } with #(#filtered_names)*, - ) - }); - quote!( - #scrate::impl_runtime_metadata!{ - for #runtime with modules where Extrinsic = #extrinsic - #(#modules_tokens)* - } - ) -} - -fn decl_outer_dispatch<'a>( - runtime: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, -) -> TokenStream2 { - let modules_tokens = module_declarations - .filter(|module_declaration| module_declaration.exists_part("Call")) - .map(|module_declaration| { - let module = &module_declaration.module; - let name = &module_declaration.name; - let index = module_declaration.index.to_string(); - quote!(#[codec(index = #index)] #module::#name) - }); - - quote!( - #scrate::impl_outer_dispatch! { - pub enum Call for #runtime where origin: Origin { - #(#modules_tokens,)* - } - } - ) -} - -fn decl_outer_origin<'a>( - runtime_name: &'a Ident, - modules_except_system: impl Iterator, - system_module: &'a Module, - scrate: &'a TokenStream2, -) -> syn::Result { - let mut modules_tokens = TokenStream2::new(); - for module_declaration in modules_except_system { - match module_declaration.find_part("Origin") { - Some(module_entry) => { - let module = &module_declaration.module; - let instance = module_declaration.instance.as_ref(); - let generics = &module_entry.generics; - if instance.is_some() && generics.params.len() == 0 { - let msg = format!( - "Instantiable module with no generic `Origin` cannot \ - be constructed: module `{}` must have generic `Origin`", - module_declaration.name - ); - return Err(syn::Error::new(module_declaration.name.span(), msg)); - } - let index = module_declaration.index.to_string(); - let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); - modules_tokens.extend(tokens); - } - None => {} - } - } - - let system_name = &system_module.module; - let system_index = system_module.index.to_string(); - - Ok(quote!( - #scrate::impl_outer_origin! { - pub enum Origin for #runtime_name where - system = #system_name, - system_index = #system_index - { - #modules_tokens - } - } - )) -} - -fn decl_outer_event<'a>( - runtime_name: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, -) -> syn::Result { - let mut modules_tokens = TokenStream2::new(); - for module_declaration in module_declarations { - match module_declaration.find_part("Event") { - Some(module_entry) => { - let module = &module_declaration.module; - let instance = module_declaration.instance.as_ref(); - let generics = &module_entry.generics; - if instance.is_some() && generics.params.len() == 0 { - let msg = format!( - "Instantiable module with no generic `Event` cannot \ - be constructed: module `{}` must have generic `Event`", - module_declaration.name, - ); - return Err(syn::Error::new(module_declaration.name.span(), msg)); - } - - let index = module_declaration.index.to_string(); - let tokens = quote!(#[codec(index = #index)] #module #instance #generics,); - modules_tokens.extend(tokens); - } - None => {} - } - } - - Ok(quote!( - #scrate::impl_outer_event! { - pub enum Event for #runtime_name { - #modules_tokens - } - } - )) -} - -fn decl_all_modules<'a>( - runtime: &'a Ident, - module_declarations: impl Iterator, + pallet_declarations: impl Iterator, ) -> TokenStream2 { let mut types = TokenStream2::new(); let mut names = Vec::new(); - for module_declaration in module_declarations { - let type_name = &module_declaration.name; - let module = &module_declaration.module; + for pallet_declaration in pallet_declarations { + let type_name = &pallet_declaration.name; + let pallet = &pallet_declaration.path; let mut generics = vec![quote!(#runtime)]; - generics.extend( - module_declaration - .instance - .iter() - .map(|name| quote!(#module::#name)), - ); + generics.extend(pallet_declaration.instance.iter().map(|name| quote!(#pallet::#name))); let type_decl = quote!( - pub type #type_name = #module::Module <#(#generics),*>; + pub type #type_name = #pallet::Pallet <#(#generics),*>; ); types.extend(type_decl); - names.push(&module_declaration.name); + names.push(&pallet_declaration.name); } // Make nested tuple structure like (((Babe, Consensus), Grandpa), ...) - // But ignore the system module. - let all_modules = names.iter() - .filter(|n| **n != SYSTEM_MODULE_NAME) + // But ignore the system pallet. + let all_pallets = names + .iter() + .filter(|n| **n != SYSTEM_PALLET_NAME) + .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); + + let all_pallets_with_system = names + .iter() .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); quote!( #types - type AllModules = ( #all_modules ); + /// All pallets included in the runtime as a nested tuple of types. + /// Excludes the System pallet. + pub type AllPallets = ( #all_pallets ); + /// All pallets included in the runtime as a nested tuple of types. + pub type AllPalletsWithSystem = ( #all_pallets_with_system ); + + /// All modules included in the runtime as a nested tuple of types. + /// Excludes the System pallet. + #[deprecated(note = "use `AllPallets` instead")] + #[allow(dead_code)] + pub type AllModules = ( #all_pallets ); + /// All modules included in the runtime as a nested tuple of types. + #[deprecated(note = "use `AllPalletsWithSystem` instead")] + #[allow(dead_code)] + pub type AllModulesWithSystem = ( #all_pallets_with_system ); ) } fn decl_pallet_runtime_setup( - module_declarations: &[Module], + pallet_declarations: &[Pallet], scrate: &TokenStream2, ) -> TokenStream2 { - let names = module_declarations.iter().map(|d| &d.name); - let names2 = module_declarations.iter().map(|d| &d.name); - let name_strings = module_declarations.iter().map(|d| d.name.to_string()); - let indices = module_declarations.iter() - .map(|module| module.index as usize); + let names = pallet_declarations.iter().map(|d| &d.name); + let names2 = pallet_declarations.iter().map(|d| &d.name); + let name_strings = pallet_declarations.iter().map(|d| d.name.to_string()); + let indices = pallet_declarations.iter().map(|pallet| pallet.index as usize); quote!( /// Provides an implementation of `PalletInfo` to provide information @@ -517,7 +283,7 @@ fn decl_integrity_test(scrate: &TokenStream2) -> TokenStream2 { #[test] pub fn runtime_integrity_tests() { - ::integrity_test(); + ::integrity_test(); } } ) diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 4a45044d67f25..6f2fd82e73f4b 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,19 +16,21 @@ // limitations under the License. use frame_support_procedural_tools::syn_ext as ext; -use proc_macro2::Span; +use proc_macro2::{Span, TokenStream}; use std::collections::HashSet; use syn::{ + ext::IdentExt, parse::{Parse, ParseStream}, + punctuated::Punctuated, spanned::Spanned, - token, Error, Ident, Result, Token, + token, Error, Ident, Path, PathArguments, PathSegment, Result, Token, }; mod keyword { syn::custom_keyword!(Block); syn::custom_keyword!(NodeBlock); syn::custom_keyword!(UncheckedExtrinsic); - syn::custom_keyword!(Module); + syn::custom_keyword!(Pallet); syn::custom_keyword!(Call); syn::custom_keyword!(Storage); syn::custom_keyword!(Event); @@ -44,7 +46,7 @@ pub struct RuntimeDefinition { pub enum_token: Token![enum], pub name: Ident, pub where_section: WhereSection, - pub modules: ext::Braces>, + pub pallets: ext::Braces>, } impl Parse for RuntimeDefinition { @@ -54,7 +56,7 @@ impl Parse for RuntimeDefinition { enum_token: input.parse()?, name: input.parse()?, where_section: input.parse()?, - modules: input.parse()?, + pallets: input.parse()?, }) } } @@ -75,9 +77,9 @@ impl Parse for WhereSection { definitions.push(definition); if !input.peek(Token![,]) { if !input.peek(token::Brace) { - return Err(input.error("Expected `,` or `{`")); + return Err(input.error("Expected `,` or `{`")) } - break; + break } input.parse::()?; } @@ -85,23 +87,14 @@ impl Parse for WhereSection { let node_block = remove_kind(input, WhereKind::NodeBlock, &mut definitions)?.value; let unchecked_extrinsic = remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?.value; - if let Some(WhereDefinition { - ref kind_span, - ref kind, - .. - }) = definitions.first() - { + if let Some(WhereDefinition { ref kind_span, ref kind, .. }) = definitions.first() { let msg = format!( "`{:?}` was declared above. Please use exactly one declaration for `{:?}`.", kind, kind ); - return Err(Error::new(*kind_span, msg)); + return Err(Error::new(*kind_span, msg)) } - Ok(Self { - block, - node_block, - unchecked_extrinsic, - }) + Ok(Self { block, node_block, unchecked_extrinsic }) } } @@ -125,17 +118,11 @@ impl Parse for WhereDefinition { let (kind_span, kind) = if lookahead.peek(keyword::Block) { (input.parse::()?.span(), WhereKind::Block) } else if lookahead.peek(keyword::NodeBlock) { - ( - input.parse::()?.span(), - WhereKind::NodeBlock, - ) + (input.parse::()?.span(), WhereKind::NodeBlock) } else if lookahead.peek(keyword::UncheckedExtrinsic) { - ( - input.parse::()?.span(), - WhereKind::UncheckedExtrinsic, - ) + (input.parse::()?.span(), WhereKind::UncheckedExtrinsic) } else { - return Err(lookahead.error()); + return Err(lookahead.error()) }; Ok(Self { @@ -150,32 +137,31 @@ impl Parse for WhereDefinition { } #[derive(Debug, Clone)] -pub struct ModuleDeclaration { +pub struct PalletDeclaration { pub name: Ident, /// Optional fixed index (e.g. `MyPallet ... = 3,`) pub index: Option, - pub module: Ident, + pub path: PalletPath, pub instance: Option, - pub module_parts: Vec, + pub pallet_parts: Vec, } -impl Parse for ModuleDeclaration { +impl Parse for PalletDeclaration { fn parse(input: ParseStream) -> Result { let name = input.parse()?; let _: Token![:] = input.parse()?; - let module = input.parse()?; - let instance = if input.peek(Token![::]) && input.peek3(Token![<]) { - let _: Token![::] = input.parse()?; + let path = input.parse()?; + let instance = if input.peek(Token![<]) { let _: Token![<] = input.parse()?; let res = Some(input.parse()?); let _: Token![>] = input.parse()?; + let _: Token![::] = input.parse()?; res } else { None }; - let _: Token![::] = input.parse()?; - let module_parts = parse_module_parts(input)?; + let pallet_parts = parse_pallet_parts(input)?; let index = if input.peek(Token![=]) { input.parse::()?; @@ -186,41 +172,84 @@ impl Parse for ModuleDeclaration { None }; - let parsed = Self { - name, - module, - instance, - module_parts, - index, - }; + let parsed = Self { name, path, instance, pallet_parts, index }; Ok(parsed) } } -/// Parse [`ModulePart`]'s from a braces enclosed list that is split by commas, e.g. +/// A struct representing a path to a pallet. `PalletPath` is almost identical to the standard +/// Rust path with a few restrictions: +/// - No leading colons allowed +/// - Path segments can only consist of identifers; angle-bracketed or parenthesized segments will +/// result in a parsing error (except when specifying instances) +#[derive(Debug, Clone)] +pub struct PalletPath { + pub inner: Path, +} + +impl Parse for PalletPath { + fn parse(input: ParseStream) -> Result { + let mut lookahead = input.lookahead1(); + let mut segments = Punctuated::new(); + + if lookahead.peek(Token![crate]) || + lookahead.peek(Token![self]) || + lookahead.peek(Token![super]) || + lookahead.peek(Ident) + { + let ident = input.call(Ident::parse_any)?; + segments.push(PathSegment { ident, arguments: PathArguments::None }); + let _: Token![::] = input.parse()?; + lookahead = input.lookahead1(); + } else { + return Err(lookahead.error()) + } + + while lookahead.peek(Ident) { + let ident = input.parse()?; + segments.push(PathSegment { ident, arguments: PathArguments::None }); + let _: Token![::] = input.parse()?; + lookahead = input.lookahead1(); + } + + if !lookahead.peek(token::Brace) && !lookahead.peek(Token![<]) { + return Err(lookahead.error()) + } + + Ok(Self { inner: Path { leading_colon: None, segments } }) + } +} + +impl quote::ToTokens for PalletPath { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.inner.to_tokens(tokens); + } +} + +/// Parse [`PalletPart`]'s from a braces enclosed list that is split by commas, e.g. /// /// `{ Call, Event }` -fn parse_module_parts(input: ParseStream) -> Result> { - let module_parts :ext::Braces> = input.parse()?; +fn parse_pallet_parts(input: ParseStream) -> Result> { + let pallet_parts: ext::Braces> = input.parse()?; let mut resolved = HashSet::new(); - for part in module_parts.content.inner.iter() { + for part in pallet_parts.content.inner.iter() { if !resolved.insert(part.name()) { let msg = format!( "`{}` was already declared before. Please remove the duplicate declaration", part.name(), ); - return Err(Error::new(part.keyword.span(), msg)); + return Err(Error::new(part.keyword.span(), msg)) } } - Ok(module_parts.content.inner.into_iter().collect()) + Ok(pallet_parts.content.inner.into_iter().collect()) } #[derive(Debug, Clone)] -pub enum ModulePartKeyword { - Module(keyword::Module), +pub enum PalletPartKeyword { + Pallet(keyword::Pallet), Call(keyword::Call), Storage(keyword::Storage), Event(keyword::Event), @@ -230,12 +259,12 @@ pub enum ModulePartKeyword { ValidateUnsigned(keyword::ValidateUnsigned), } -impl Parse for ModulePartKeyword { +impl Parse for PalletPartKeyword { fn parse(input: ParseStream) -> Result { let lookahead = input.lookahead1(); - if lookahead.peek(keyword::Module) { - Ok(Self::Module(input.parse()?)) + if lookahead.peek(keyword::Pallet) { + Ok(Self::Pallet(input.parse()?)) } else if lookahead.peek(keyword::Call) { Ok(Self::Call(input.parse()?)) } else if lookahead.peek(keyword::Storage) { @@ -256,11 +285,11 @@ impl Parse for ModulePartKeyword { } } -impl ModulePartKeyword { +impl PalletPartKeyword { /// Returns the name of `Self`. fn name(&self) -> &'static str { match self { - Self::Module(_) => "Module", + Self::Pallet(_) => "Pallet", Self::Call(_) => "Call", Self::Storage(_) => "Storage", Self::Event(_) => "Event", @@ -271,26 +300,21 @@ impl ModulePartKeyword { } } - /// Returns the name as `Ident`. - fn ident(&self) -> Ident { - Ident::new(self.name(), self.span()) - } - - /// Returns `true` if this module part is allowed to have generic arguments. + /// Returns `true` if this pallet part is allowed to have generic arguments. fn allows_generic(&self) -> bool { Self::all_generic_arg().iter().any(|n| *n == self.name()) } - /// Returns the names of all module parts that allow to have a generic argument. + /// Returns the names of all pallet parts that allow to have a generic argument. fn all_generic_arg() -> &'static [&'static str] { &["Event", "Origin", "Config"] } } -impl Spanned for ModulePartKeyword { +impl Spanned for PalletPartKeyword { fn span(&self) -> Span { match self { - Self::Module(inner) => inner.span(), + Self::Pallet(inner) => inner.span(), Self::Call(inner) => inner.span(), Self::Storage(inner) => inner.span(), Self::Event(inner) => inner.span(), @@ -303,49 +327,41 @@ impl Spanned for ModulePartKeyword { } #[derive(Debug, Clone)] -pub struct ModulePart { - pub keyword: ModulePartKeyword, +pub struct PalletPart { + pub keyword: PalletPartKeyword, pub generics: syn::Generics, } -impl Parse for ModulePart { +impl Parse for PalletPart { fn parse(input: ParseStream) -> Result { - let keyword: ModulePartKeyword = input.parse()?; + let keyword: PalletPartKeyword = input.parse()?; let generics: syn::Generics = input.parse()?; if !generics.params.is_empty() && !keyword.allows_generic() { - let valid_generics = ModulePart::format_names(ModulePartKeyword::all_generic_arg()); + let valid_generics = PalletPart::format_names(PalletPartKeyword::all_generic_arg()); let msg = format!( "`{}` is not allowed to have generics. \ - Only the following modules are allowed to have generics: {}.", + Only the following pallets are allowed to have generics: {}.", keyword.name(), valid_generics, ); - return Err(syn::Error::new(keyword.span(), msg)); + return Err(syn::Error::new(keyword.span(), msg)) } - Ok(Self { - keyword, - generics, - }) + Ok(Self { keyword, generics }) } } -impl ModulePart { +impl PalletPart { pub fn format_names(names: &[&'static str]) -> String { - let res: Vec<_> = names.into_iter().map(|s| format!("`{}`", s)).collect(); + let res: Vec<_> = names.iter().map(|s| format!("`{}`", s)).collect(); res.join(", ") } - /// The name of this module part. + /// The name of this pallet part. pub fn name(&self) -> &'static str { self.keyword.name() } - - /// The name of this module part as `Ident`. - pub fn ident(&self) -> Ident { - self.keyword.ident() - } } fn remove_kind( diff --git a/frame/support/procedural/src/debug_no_bound.rs b/frame/support/procedural/src/debug_no_bound.rs index 2a818fb205fb8..acfd8d0cabc8a 100644 --- a/frame/support/procedural/src/debug_no_bound.rs +++ b/frame/support/procedural/src/debug_no_bound.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,9 +30,10 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke let impl_ = match input.data { syn::Data::Struct(struct_) => match struct_.fields { syn::Fields::Named(named) => { - let fields = named.named.iter() - .map(|i| &i.ident) - .map(|i| quote::quote_spanned!(i.span() => .field(stringify!(#i), &self.#i) )); + let fields = + named.named.iter().map(|i| &i.ident).map( + |i| quote::quote_spanned!(i.span() => .field(stringify!(#i), &self.#i) ), + ); quote::quote!( fmt.debug_struct(stringify!(#input_ident)) @@ -41,7 +42,10 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke ) }, syn::Fields::Unnamed(unnamed) => { - let fields = unnamed.unnamed.iter().enumerate() + let fields = unnamed + .unnamed + .iter() + .enumerate() .map(|(i, _)| syn::Index::from(i)) .map(|i| quote::quote_spanned!(i.span() => .field(&self.#i) )); @@ -51,46 +55,50 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke .finish() ) }, - syn::Fields::Unit => quote::quote!( fmt.write_str(stringify!(#input_ident)) ), + syn::Fields::Unit => quote::quote!(fmt.write_str(stringify!(#input_ident))), }, syn::Data::Enum(enum_) => { - let variants = enum_.variants.iter() - .map(|variant| { - let ident = &variant.ident; - let full_variant_str = format!("{}::{}", input_ident, ident); - match &variant.fields { - syn::Fields::Named(named) => { - let captured = named.named.iter().map(|i| &i.ident); - let debugged = captured.clone() - .map(|i| quote::quote_spanned!(i.span() => - .field(stringify!(#i), &#i) - )); - quote::quote!( - Self::#ident { #( ref #captured, )* } => { - fmt.debug_struct(#full_variant_str) - #( #debugged )* - .finish() - } + let variants = enum_.variants.iter().map(|variant| { + let ident = &variant.ident; + let full_variant_str = format!("{}::{}", input_ident, ident); + match &variant.fields { + syn::Fields::Named(named) => { + let captured = named.named.iter().map(|i| &i.ident); + let debugged = captured.clone().map(|i| { + quote::quote_spanned!(i.span() => + .field(stringify!(#i), &#i) ) - }, - syn::Fields::Unnamed(unnamed) => { - let captured = unnamed.unnamed.iter().enumerate() - .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); - let debugged = captured.clone() - .map(|i| quote::quote_spanned!(i.span() => .field(&#i))); - quote::quote!( - Self::#ident ( #( ref #captured, )* ) => { - fmt.debug_tuple(#full_variant_str) - #( #debugged )* - .finish() - } - ) - }, - syn::Fields::Unit => quote::quote!( - Self::#ident => fmt.write_str(#full_variant_str) - ), - } - }); + }); + quote::quote!( + Self::#ident { #( ref #captured, )* } => { + fmt.debug_struct(#full_variant_str) + #( #debugged )* + .finish() + } + ) + }, + syn::Fields::Unnamed(unnamed) => { + let captured = unnamed + .unnamed + .iter() + .enumerate() + .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); + let debugged = captured + .clone() + .map(|i| quote::quote_spanned!(i.span() => .field(&#i))); + quote::quote!( + Self::#ident ( #( ref #captured, )* ) => { + fmt.debug_tuple(#full_variant_str) + #( #debugged )* + .finish() + } + ) + }, + syn::Fields::Unit => quote::quote!( + Self::#ident => fmt.write_str(#full_variant_str) + ), + } + }); quote::quote!(match *self { #( #variants, )* @@ -110,5 +118,6 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke } } }; - ).into() + ) + .into() } diff --git a/frame/support/procedural/src/default_no_bound.rs b/frame/support/procedural/src/default_no_bound.rs new file mode 100644 index 0000000000000..38d6e19b1732f --- /dev/null +++ b/frame/support/procedural/src/default_no_bound.rs @@ -0,0 +1,103 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; + +/// Derive Clone but do not bound any generic. +pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input: syn::DeriveInput = match syn::parse(input) { + Ok(input) => input, + Err(e) => return e.to_compile_error().into(), + }; + + let name = &input.ident; + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + + let impl_ = match input.data { + syn::Data::Struct(struct_) => match struct_.fields { + syn::Fields::Named(named) => { + let fields = named.named.iter().map(|i| &i.ident).map(|i| { + quote::quote_spanned!(i.span() => + #i: core::default::Default::default() + ) + }); + + quote::quote!( Self { #( #fields, )* } ) + }, + syn::Fields::Unnamed(unnamed) => { + let fields = + unnamed.unnamed.iter().enumerate().map(|(i, _)| syn::Index::from(i)).map(|i| { + quote::quote_spanned!(i.span() => + core::default::Default::default() + ) + }); + + quote::quote!( Self ( #( #fields, )* ) ) + }, + syn::Fields::Unit => { + quote::quote!(Self) + }, + }, + syn::Data::Enum(enum_) => + if let Some(first_variant) = enum_.variants.first() { + let variant_ident = &first_variant.ident; + match &first_variant.fields { + syn::Fields::Named(named) => { + let fields = named.named.iter().map(|i| &i.ident).map(|i| { + quote::quote_spanned!(i.span() => + #i: core::default::Default::default() + ) + }); + + quote::quote!( #name :: #ty_generics :: #variant_ident { #( #fields, )* } ) + }, + syn::Fields::Unnamed(unnamed) => { + let fields = unnamed + .unnamed + .iter() + .enumerate() + .map(|(i, _)| syn::Index::from(i)) + .map(|i| { + quote::quote_spanned!(i.span() => + core::default::Default::default() + ) + }); + + quote::quote!( #name :: #ty_generics :: #variant_ident ( #( #fields, )* ) ) + }, + syn::Fields::Unit => quote::quote!( #name :: #ty_generics :: #variant_ident ), + } + } else { + quote::quote!(Self) + }, + syn::Data::Union(_) => { + let msg = "Union type not supported by `derive(CloneNoBound)`"; + return syn::Error::new(input.span(), msg).to_compile_error().into() + }, + }; + + quote::quote!( + const _: () = { + impl #impl_generics core::default::Default for #name #ty_generics #where_clause { + fn default() -> Self { + #impl_ + } + } + }; + ) + .into() +} diff --git a/frame/support/procedural/src/dummy_part_checker.rs b/frame/support/procedural/src/dummy_part_checker.rs new file mode 100644 index 0000000000000..792b17a8f7758 --- /dev/null +++ b/frame/support/procedural/src/dummy_part_checker.rs @@ -0,0 +1,62 @@ +use crate::COUNTER; +use proc_macro::TokenStream; + +pub fn generate_dummy_part_checker(input: TokenStream) -> TokenStream { + if !input.is_empty() { + return syn::Error::new(proc_macro2::Span::call_site(), "No arguments expected") + .to_compile_error() + .into() + } + + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + + let no_op_macro_ident = + syn::Ident::new(&format!("__dummy_part_checker_{}", count), proc_macro2::Span::call_site()); + + quote::quote!( + #[macro_export] + #[doc(hidden)] + macro_rules! #no_op_macro_ident { + ( $( $tt:tt )* ) => {}; + } + + #[doc(hidden)] + pub mod __substrate_genesis_config_check { + #[doc(hidden)] + pub use #no_op_macro_ident as is_genesis_config_defined; + #[doc(hidden)] + pub use #no_op_macro_ident as is_std_enabled_for_genesis; + } + + #[doc(hidden)] + pub mod __substrate_event_check { + #[doc(hidden)] + pub use #no_op_macro_ident as is_event_part_defined; + } + + #[doc(hidden)] + pub mod __substrate_inherent_check { + #[doc(hidden)] + pub use #no_op_macro_ident as is_inherent_part_defined; + } + + #[doc(hidden)] + pub mod __substrate_validate_unsigned_check { + #[doc(hidden)] + pub use #no_op_macro_ident as is_validate_unsigned_part_defined; + } + + #[doc(hidden)] + pub mod __substrate_call_check { + #[doc(hidden)] + pub use #no_op_macro_ident as is_call_part_defined; + } + + #[doc(hidden)] + pub mod __substrate_origin_check { + #[doc(hidden)] + pub use #no_op_macro_ident as is_origin_part_defined; + } + ) + .into() +} diff --git a/frame/support/procedural/src/key_prefix.rs b/frame/support/procedural/src/key_prefix.rs new file mode 100644 index 0000000000000..3f424e8b8b8dd --- /dev/null +++ b/frame/support/procedural/src/key_prefix.rs @@ -0,0 +1,99 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use proc_macro2::{Span, TokenStream}; +use quote::{format_ident, quote, ToTokens}; +use syn::{Ident, Result}; + +const MAX_IDENTS: usize = 18; + +pub fn impl_key_prefix_for_tuples(input: proc_macro::TokenStream) -> Result { + if !input.is_empty() { + return Err(syn::Error::new(Span::call_site(), "No arguments expected")) + } + + let mut all_trait_impls = TokenStream::new(); + + for i in 2..=MAX_IDENTS { + let current_tuple = (0..i) + .map(|n| Ident::new(&format!("Tuple{}", n), Span::call_site())) + .collect::>(); + + for prefix_count in 1..i { + let (prefixes, suffixes) = current_tuple.split_at(prefix_count); + + let hashers = current_tuple + .iter() + .map(|ident| format_ident!("Hasher{}", ident)) + .collect::>(); + let kargs = + prefixes.iter().map(|ident| format_ident!("KArg{}", ident)).collect::>(); + let partial_keygen = generate_keygen(prefixes); + let suffix_keygen = generate_keygen(suffixes); + let suffix_tuple = generate_tuple(suffixes); + + let trait_impls = quote! { + impl< + #(#current_tuple: FullCodec + StaticTypeInfo,)* + #(#hashers: StorageHasher,)* + #(#kargs: EncodeLike<#prefixes>),* + > HasKeyPrefix<( #( #kargs, )* )> for ( #( Key<#hashers, #current_tuple>, )* ) { + type Suffix = #suffix_tuple; + + fn partial_key(prefix: ( #( #kargs, )* )) -> Vec { + <#partial_keygen>::final_key(prefix) + } + } + + impl< + #(#current_tuple: FullCodec + StaticTypeInfo,)* + #(#hashers: ReversibleStorageHasher,)* + #(#kargs: EncodeLike<#prefixes>),* + > HasReversibleKeyPrefix<( #( #kargs, )* )> for ( #( Key<#hashers, #current_tuple>, )* ) { + fn decode_partial_key(key_material: &[u8]) -> Result { + <#suffix_keygen>::decode_final_key(key_material).map(|k| k.0) + } + } + }; + + all_trait_impls.extend(trait_impls); + } + } + + Ok(all_trait_impls) +} + +fn generate_tuple(idents: &[Ident]) -> TokenStream { + if idents.len() == 1 { + idents[0].to_token_stream() + } else { + quote!((#(#idents),*)) + } +} + +fn generate_keygen(idents: &[Ident]) -> TokenStream { + if idents.len() == 1 { + let key = &idents[0]; + let hasher = format_ident!("Hasher{}", key); + + quote!(Key<#hasher, #key>) + } else { + let hashers = idents.iter().map(|ident| format_ident!("Hasher{}", ident)); + + quote!((#(Key<#hashers, #idents>),*)) + } +} diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 009e02f3c265b..a8ac022c35c6b 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,16 +17,40 @@ //! Proc macro of Support code for the runtime. -#![recursion_limit="512"] +#![recursion_limit = "512"] -mod storage; +mod clone_no_bound; mod construct_runtime; -mod transactional; mod debug_no_bound; -mod clone_no_bound; +mod default_no_bound; +mod dummy_part_checker; +mod key_prefix; +mod pallet; mod partial_eq_no_bound; +mod storage; +mod transactional; use proc_macro::TokenStream; +use std::cell::RefCell; +pub(crate) use storage::INHERENT_INSTANCE_NAME; + +thread_local! { + /// A global counter, can be used to generate a relatively unique identifier. + static COUNTER: RefCell = RefCell::new(Counter(0)); +} + +/// Counter to generate a relatively unique identifier for macros querying for the existence of +/// pallet parts. This is necessary because declarative macros gets hoisted to the crate root, +/// which shares the namespace with other pallets containing the very same query macros. +struct Counter(u64); + +impl Counter { + fn inc(&mut self) -> u64 { + let ret = self.0; + self.0 += 1; + ret + } +} /// Declares strongly-typed wrappers around codec-compatible types in storage. /// @@ -34,7 +58,7 @@ use proc_macro::TokenStream; /// /// ```nocompile /// decl_storage! { -/// trait Store for Module as Example { +/// trait Store for Module as Example { /// Foo get(fn foo) config(): u32=12; /// Bar: map hasher(identity) u32 => u32; /// pub Zed build(|config| vec![(0, 0)]): map hasher(identity) u32 => u32; @@ -42,7 +66,7 @@ use proc_macro::TokenStream; /// } /// ``` /// -/// Declaration is set with the header `(pub) trait Store for Module as Example`, +/// Declaration is set with the header `(pub) trait Store for Module as Example`, /// with `Store` a (pub) trait generated associating each storage item to the `Module` and /// `as Example` setting the prefix used for storage items of this module. `Example` must be unique: /// another module with the same name and the same inner storage item name will conflict. @@ -68,23 +92,24 @@ use proc_macro::TokenStream; /// ``` /// /// * Map: `Foo: map hasher($hash) type => type`: Implements the -/// [`StorageMap`](../frame_support/storage/trait.StorageMap.html) trait using the -/// [`StorageMap generator`](../frame_support/storage/generator/trait.StorageMap.html). -/// And [`StoragePrefixedMap`](../frame_support/storage/trait.StoragePrefixedMap.html). +/// [`StorageMap`](../frame_support/storage/trait.StorageMap.html) trait using the [`StorageMap +/// generator`](../frame_support/storage/generator/trait.StorageMap.html). And +/// [`StoragePrefixedMap`](../frame_support/storage/trait.StoragePrefixedMap.html). /// /// `$hash` representing a choice of hashing algorithms available in the /// [`Hashable`](../frame_support/trait.Hashable.html) trait. You will generally want to use one /// of three hashers: /// * `blake2_128_concat`: The default, safe choice. Use if you are unsure or don't care. It is -/// secure against user-tainted keys, fairly fast and memory-efficient and supports -/// iteration over its keys and values. This must be used if the keys of your map can be -/// selected *en masse* by untrusted users. +/// secure against user-tainted keys, fairly fast and memory-efficient and supports iteration +/// over its keys and values. This must be used if the keys of your map can be selected *en +/// masse* by untrusted users. /// * `twox_64_concat`: This is an insecure hasher and can only be used safely if you know that /// the preimages cannot be chosen at will by untrusted users. It is memory-efficient, extremely /// performant and supports iteration over its keys and values. You can safely use this is the /// key is: /// - A (slowly) incrementing index. -/// - Known to be the result of a cryptographic hash (though `identity` is a better choice here). +/// - Known to be the result of a cryptographic hash (though `identity` is a better choice +/// here). /// - Known to be the public key of a cryptographic key pair in existence. /// * `identity`: This is not a hasher at all, and just uses the key material directly. Since it /// does no hashing or appending, it's the fastest possible hasher, however, it's also the least @@ -108,8 +133,9 @@ use proc_macro::TokenStream; /// /// * Double map: `Foo: double_map hasher($hash1) u32, hasher($hash2) u32 => u32`: Implements the /// [`StorageDoubleMap`](../frame_support/storage/trait.StorageDoubleMap.html) trait using the -/// [`StorageDoubleMap generator`](../frame_support/storage/generator/trait.StorageDoubleMap.html). -/// And [`StoragePrefixedMap`](../frame_support/storage/trait.StoragePrefixedMap.html). +/// [`StorageDoubleMap +/// generator`](../frame_support/storage/generator/trait.StorageDoubleMap.html). And +/// [`StoragePrefixedMap`](../frame_support/storage/trait.StoragePrefixedMap.html). /// /// `$hash1` and `$hash2` representing choices of hashing algorithms available in the /// [`Hashable`](../frame_support/trait.Hashable.html) trait. They must be chosen with care, see @@ -123,8 +149,8 @@ use proc_macro::TokenStream; /// /// Thus keys are stored at: /// ```nocompile -/// Twox128(module_prefix) ++ Twox128(storage_prefix) ++ Hasher1(encode(key1)) ++ Hasher2(encode(key2)) -/// ``` +/// Twox128(module_prefix) ++ Twox128(storage_prefix) ++ Hasher1(encode(key1)) ++ +/// Hasher2(encode(key2)) ``` /// /// Supported hashers (ordered from least to best security): /// @@ -151,6 +177,9 @@ use proc_macro::TokenStream; /// * \[optional\] `config(#field_name)`: `field_name` is optional if get is set. /// Will include the item in `GenesisConfig`. /// * \[optional\] `build(#closure)`: Closure called with storage overlays. +/// * \[optional\] `max_values(#expr)`: `expr` is an expression returning a `u32`. It is used to +/// implement `StorageInfoTrait`. Note this attribute is not available for storage value as the +/// maximum number of values is 1. /// * `#type`: Storage type. /// * \[optional\] `#default`: Value returned when none. /// @@ -168,18 +197,18 @@ use proc_macro::TokenStream; /// /// ```nocompile /// decl_storage! { -/// trait Store for Module as Example { +/// trait Store for Module as Example { /// /// // Your storage items /// } -/// add_extra_genesis { -/// config(genesis_field): GenesisFieldType; -/// config(genesis_field2): GenesisFieldType; -/// ... -/// build(|_: &Self| { -/// // Modification of storage -/// }) -/// } +/// add_extra_genesis { +/// config(genesis_field): GenesisFieldType; +/// config(genesis_field2): GenesisFieldType; +/// ... +/// build(|_: &Self| { +/// // Modification of storage +/// }) +/// } /// } /// ``` /// @@ -189,9 +218,9 @@ use proc_macro::TokenStream; /// construct_runtime!( /// pub enum Runtime with ... { /// ..., -/// Example: example::{Module, Storage, ..., Config}, +/// Example: example::{Pallet, Storage, ..., Config}, /// ..., -/// } +/// } /// ); /// ``` /// @@ -201,7 +230,7 @@ use proc_macro::TokenStream; /// (`DefaultInstance` type is optional): /// /// ```nocompile -/// trait Store for Module, I: Instance=DefaultInstance> as Example {} +/// trait Store for Module, I: Instance=DefaultInstance> as Example {} /// ``` /// /// Accessing the structure no requires the instance as generic parameter: @@ -213,7 +242,7 @@ use proc_macro::TokenStream; /// This macro supports a where clause which will be replicated to all generated types. /// /// ```nocompile -/// trait Store for Module as Example where T::AccountId: std::fmt::Display {} +/// trait Store for Module as Example where T::AccountId: std::fmt::Display {} /// ``` /// /// ## Limitations @@ -229,20 +258,29 @@ use proc_macro::TokenStream; /// add_extra_genesis { /// config(phantom): std::marker::PhantomData, /// } -/// ... +/// ``` /// /// This adds a field to your `GenesisConfig` with the name `phantom` that you can initialize with /// `Default::default()`. /// +/// ## PoV information +/// +/// To implement the trait `StorageInfoTrait` for storages an additional attribute can be used +/// `generate_storage_info`: +/// ```nocompile +/// decl_storage! { generate_storage_info +/// trait Store for ... +/// } +/// ``` #[proc_macro] pub fn decl_storage(input: TokenStream) -> TokenStream { storage::decl_storage_impl(input) } -/// Construct a runtime, with the given name and the given modules. +/// Construct a runtime, with the given name and the given pallets. /// /// The parameters here are specific types for `Block`, `NodeBlock`, and `UncheckedExtrinsic` -/// and the modules that are used by the runtime. +/// and the pallets that are used by the runtime. /// `Block` is the block type that is used in the runtime and `NodeBlock` is the block type /// that is used in the node. For instance they can differ in the extrinsics type. /// @@ -252,58 +290,70 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// construct_runtime!( /// pub enum Runtime where /// Block = Block, -/// NodeBlock = runtime::Block, +/// NodeBlock = node::Block, /// UncheckedExtrinsic = UncheckedExtrinsic /// { -/// System: system::{Module, Call, Event, Config} = 0, -/// Test: test::{Module, Call} = 1, -/// Test2: test_with_long_module::{Module, Event}, +/// System: system::{Pallet, Call, Event, Config} = 0, +/// Test: test::{Pallet, Call} = 1, +/// Test2: test_with_long_module::{Pallet, Event}, /// -/// // Module with instances -/// Test3_Instance1: test3::::{Module, Call, Storage, Event, Config, Origin}, -/// Test3_DefaultInstance: test3::{Module, Call, Storage, Event, Config, Origin} = 4, +/// // Pallets with instances +/// Test3_Instance1: test3::::{Pallet, Call, Storage, Event, Config, Origin}, +/// Test3_DefaultInstance: test3::{Pallet, Call, Storage, Event, Config, Origin} = 4, /// } /// ) /// ``` /// /// The identifier `System` is the name of the pallet and the lower case identifier `system` is the -/// name of the Rust module/crate for this Substrate module. The identifiers between the braces are -/// the module parts provided by the pallet. It is important to list these parts here to export +/// name of the Rust module/crate for this Substrate pallet. The identifiers between the braces are +/// the pallet parts provided by the pallet. It is important to list these parts here to export /// them correctly in the metadata or to make the pallet usable in the runtime. /// /// We provide support for the following module parts in a pallet: /// -/// - `Module` -/// - `Call` -/// - `Storage` -/// - `Event` or `Event` (if the event is generic) -/// - `Origin` or `Origin` (if the origin is generic) -/// - `Config` or `Config` (if the config is generic) -/// - `Inherent` - If the module provides/can check inherents. -/// - `ValidateUnsigned` - If the module validates unsigned extrinsics. -/// -/// `= $n` is an optional part allowing to define at which index the module variants in +/// - `Pallet` - Required for all pallets +/// - `Call` - If the pallet has callable functions +/// - `Storage` - If the pallet uses storage +/// - `Event` or `Event` (if the event is generic) - If the pallet emits events +/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instanciable origins +/// - `Config` or `Config` (if the config is generic) - If the pallet builds the genesis storage +/// with `GenesisConfig` +/// - `Inherent` - If the pallet provides/can check inherents. +/// - `ValidateUnsigned` - If the pallet validates unsigned extrinsics. +/// +/// `= $n` is an optional part allowing to define at which index the pallet variants in /// `OriginCaller`, `Call` and `Event` are encoded, and to define the ModuleToIndex value. /// /// if `= $n` is not given, then index is resolved same as fieldless enum in Rust /// (i.e. incrementedly from previous index): /// ```nocompile -/// module1 .. = 2, -/// module2 .., // Here module2 is given index 3 -/// module3 .. = 0, -/// module4 .., // Here module4 is given index 1 +/// pallet1 .. = 2, +/// pallet2 .., // Here pallet2 is given index 3 +/// pallet3 .. = 0, +/// pallet4 .., // Here pallet4 is given index 1 /// ``` /// /// # Note /// -/// The population of the genesis storage depends on the order of modules. So, if one of your -/// modules depends on another module, the module that is depended upon needs to come before -/// the module depending on it. +/// The population of the genesis storage depends on the order of pallets. So, if one of your +/// pallets depends on another pallet, the pallet that is depended upon needs to come before +/// the pallet depending on it. +/// +/// # Type definitions +/// +/// * The macro generates a type alias for each pallet to their `Module` (or `Pallet`). E.g. `type +/// System = frame_system::Pallet` #[proc_macro] pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) } +/// Macro to define a pallet. Docs are at `frame_support::pallet`. +#[proc_macro_attribute] +pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { + pallet::pallet(attr, item) +} + /// Execute the annotated function in a new storage transaction. /// /// The return type of the annotated function must be `Result`. All changes to storage performed @@ -335,7 +385,7 @@ pub fn derive_clone_no_bound(input: TokenStream) -> TokenStream { clone_no_bound::derive_clone_no_bound(input) } -/// Derive [`Debug`] but do not bound any generics. Docs are at `frame_support::DeriveNoBounds`. +/// Derive [`Debug`] but do not bound any generics. Docs are at `frame_support::DebugNoBound`. #[proc_macro_derive(DebugNoBound)] pub fn derive_debug_no_bound(input: TokenStream) -> TokenStream { debug_no_bound::derive_debug_no_bound(input) @@ -364,7 +414,8 @@ pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { } } }; - ).into() + ) + .into() } #[cfg(feature = "std")] @@ -395,10 +446,37 @@ pub fn derive_eq_no_bound(input: TokenStream) -> TokenStream { const _: () = { impl #impl_generics core::cmp::Eq for #name #ty_generics #where_clause {} }; - ).into() + ) + .into() +} + +/// derive `Default` but do no bound any generic. Docs are at `frame_support::DefaultNoBound`. +#[proc_macro_derive(DefaultNoBound)] +pub fn derive_default_no_bound(input: TokenStream) -> TokenStream { + default_no_bound::derive_default_no_bound(input) } #[proc_macro_attribute] pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStream { - transactional::require_transactional(attr, input).unwrap_or_else(|e| e.to_compile_error().into()) + transactional::require_transactional(attr, input) + .unwrap_or_else(|e| e.to_compile_error().into()) +} + +/// The number of module instances supported by the runtime, starting at index 1, +/// and up to `NUMBER_OF_INSTANCE`. +pub(crate) const NUMBER_OF_INSTANCE: u8 = 16; + +/// This macro is meant to be used by frame-support only. +/// It implements the trait `HasKeyPrefix` and `HasReversibleKeyPrefix` for tuple of `Key`. +#[proc_macro] +pub fn impl_key_prefix_for_tuples(input: TokenStream) -> TokenStream { + key_prefix::impl_key_prefix_for_tuples(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +/// Internal macro use by frame_support to generate dummy part checker for old pallet declaration +#[proc_macro] +pub fn __generate_dummy_part_checker(input: TokenStream) -> TokenStream { + dummy_part_checker::generate_dummy_part_checker(input) } diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs new file mode 100644 index 0000000000000..8f7bcdccaf22d --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -0,0 +1,288 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{pallet::Def, COUNTER}; +use syn::spanned::Spanned; + +/// +/// * Generate enum call and implement various trait on it. +/// * Implement Callable and call_function on `Pallet` +pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { + let (span, where_clause, methods, docs) = match def.call.as_ref() { + Some(call) => { + let span = call.attr_span; + let where_clause = call.where_clause.clone(); + let methods = call.methods.clone(); + let docs = call.docs.clone(); + + (span, where_clause, methods, docs) + }, + None => (def.item.span(), None, Vec::new(), Vec::new()), + }; + let frame_support = &def.frame_support; + let frame_system = &def.frame_system; + let type_impl_gen = &def.type_impl_generics(span); + let type_decl_bounded_gen = &def.type_decl_bounded_generics(span); + let type_use_gen = &def.type_use_generics(span); + let call_ident = syn::Ident::new("Call", span); + let pallet_ident = &def.pallet_struct.pallet; + + let fn_name = methods.iter().map(|method| &method.name).collect::>(); + let new_call_variant_fn_name = fn_name + .iter() + .map(|fn_name| quote::format_ident!("new_call_variant_{}", fn_name)) + .collect::>(); + + let new_call_variant_doc = fn_name + .iter() + .map(|fn_name| format!("Create a call with the variant `{}`.", fn_name)) + .collect::>(); + + let fn_weight = methods.iter().map(|method| &method.weight); + + let fn_doc = methods.iter().map(|method| &method.docs).collect::>(); + + let args_name = methods + .iter() + .map(|method| method.args.iter().map(|(_, name, _)| name.clone()).collect::>()) + .collect::>(); + + let args_name_stripped = methods + .iter() + .map(|method| { + method + .args + .iter() + .map(|(_, name, _)| { + syn::Ident::new(&name.to_string().trim_start_matches('_'), name.span()) + }) + .collect::>() + }) + .collect::>(); + + let make_args_name_pattern = |ref_tok| { + args_name + .iter() + .zip(args_name_stripped.iter()) + .map(|(args_name, args_name_stripped)| { + args_name + .iter() + .zip(args_name_stripped) + .map(|(args_name, args_name_stripped)| { + if args_name == args_name_stripped { + quote::quote!( #ref_tok #args_name ) + } else { + quote::quote!( #args_name_stripped: #ref_tok #args_name ) + } + }) + .collect::>() + }) + .collect::>() + }; + + let args_name_pattern = make_args_name_pattern(None); + let args_name_pattern_ref = make_args_name_pattern(Some(quote::quote!(ref))); + + let args_type = methods + .iter() + .map(|method| method.args.iter().map(|(_, _, type_)| type_.clone()).collect::>()) + .collect::>(); + + let args_compact_attr = methods.iter().map(|method| { + method + .args + .iter() + .map(|(is_compact, _, type_)| { + if *is_compact { + quote::quote_spanned!(type_.span() => #[codec(compact)] ) + } else { + quote::quote!() + } + }) + .collect::>() + }); + + let default_docs = [syn::parse_quote!( + r"Contains one variant per dispatchable that can be called by an extrinsic." + )]; + let docs = if docs.is_empty() { &default_docs[..] } else { &docs[..] }; + + let maybe_compile_error = if def.call.is_none() { + quote::quote! { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::call] defined, perhaps you should remove `Call` from \ + construct_runtime?", + )); + } + } else { + proc_macro2::TokenStream::new() + }; + + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + let macro_ident = syn::Ident::new(&format!("__is_call_part_defined_{}", count), span); + + quote::quote_spanned!(span => + #[doc(hidden)] + pub mod __substrate_call_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + #maybe_compile_error + }; + } + + #[doc(hidden)] + pub use #macro_ident as is_call_part_defined; + } + + #( #[doc = #docs] )* + #[derive( + #frame_support::RuntimeDebugNoBound, + #frame_support::CloneNoBound, + #frame_support::EqNoBound, + #frame_support::PartialEqNoBound, + #frame_support::codec::Encode, + #frame_support::codec::Decode, + #frame_support::scale_info::TypeInfo, + )] + #[codec(encode_bound())] + #[codec(decode_bound())] + #[scale_info(skip_type_params(#type_use_gen), capture_docs = "always")] + #[allow(non_camel_case_types)] + pub enum #call_ident<#type_decl_bounded_gen> #where_clause { + #[doc(hidden)] + #[codec(skip)] + __Ignore( + #frame_support::sp_std::marker::PhantomData<(#type_use_gen,)>, + #frame_support::Never, + ), + #( + #( #[doc = #fn_doc] )* + #fn_name { + #( #args_compact_attr #args_name_stripped: #args_type ),* + }, + )* + } + + impl<#type_impl_gen> #call_ident<#type_use_gen> #where_clause { + #( + #[doc = #new_call_variant_doc] + pub fn #new_call_variant_fn_name( + #( #args_name_stripped: #args_type ),* + ) -> Self { + Self::#fn_name { + #( #args_name_stripped ),* + } + } + )* + } + + impl<#type_impl_gen> #frame_support::dispatch::GetDispatchInfo + for #call_ident<#type_use_gen> + #where_clause + { + fn get_dispatch_info(&self) -> #frame_support::dispatch::DispatchInfo { + match *self { + #( + Self::#fn_name { #( #args_name_pattern_ref, )* } => { + let __pallet_base_weight = #fn_weight; + + let __pallet_weight = < + dyn #frame_support::dispatch::WeighData<( #( & #args_type, )* )> + >::weigh_data(&__pallet_base_weight, ( #( #args_name, )* )); + + let __pallet_class = < + dyn #frame_support::dispatch::ClassifyDispatch< + ( #( & #args_type, )* ) + > + >::classify_dispatch(&__pallet_base_weight, ( #( #args_name, )* )); + + let __pallet_pays_fee = < + dyn #frame_support::dispatch::PaysFee<( #( & #args_type, )* )> + >::pays_fee(&__pallet_base_weight, ( #( #args_name, )* )); + + #frame_support::dispatch::DispatchInfo { + weight: __pallet_weight, + class: __pallet_class, + pays_fee: __pallet_pays_fee, + } + }, + )* + Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"), + } + } + } + + impl<#type_impl_gen> #frame_support::dispatch::GetCallName for #call_ident<#type_use_gen> + #where_clause + { + fn get_call_name(&self) -> &'static str { + match *self { + #( Self::#fn_name { .. } => stringify!(#fn_name), )* + Self::__Ignore(_, _) => unreachable!("__PhantomItem cannot be used."), + } + } + + fn get_call_names() -> &'static [&'static str] { + &[ #( stringify!(#fn_name), )* ] + } + } + + impl<#type_impl_gen> #frame_support::traits::UnfilteredDispatchable + for #call_ident<#type_use_gen> + #where_clause + { + type Origin = #frame_system::pallet_prelude::OriginFor; + fn dispatch_bypass_filter( + self, + origin: Self::Origin + ) -> #frame_support::dispatch::DispatchResultWithPostInfo { + match self { + #( + Self::#fn_name { #( #args_name_pattern, )* } => { + #frame_support::sp_tracing::enter_span!( + #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) + ); + <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) + .map(Into::into).map_err(Into::into) + }, + )* + Self::__Ignore(_, _) => { + let _ = origin; // Use origin for empty Call enum + unreachable!("__PhantomItem cannot be used."); + }, + } + } + } + + impl<#type_impl_gen> #frame_support::dispatch::Callable for #pallet_ident<#type_use_gen> + #where_clause + { + type Call = #call_ident<#type_use_gen>; + } + + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause { + #[doc(hidden)] + pub fn call_functions() -> #frame_support::metadata::PalletCallMetadata { + #frame_support::scale_info::meta_type::<#call_ident<#type_use_gen>>().into() + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/config.rs b/frame/support/procedural/src/pallet/expand/config.rs new file mode 100644 index 0000000000000..dad26ccad6dc1 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/config.rs @@ -0,0 +1,45 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; +use frame_support_procedural_tools::get_doc_literals; + +/// +/// * Generate default rust doc +pub fn expand_config(def: &mut Def) -> proc_macro2::TokenStream { + let config = &def.config; + let config_item = { + let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[config.index]; + if let syn::Item::Trait(item) = item { + item + } else { + unreachable!("Checked by config parser") + } + }; + + if get_doc_literals(&config_item.attrs).is_empty() { + config_item.attrs.push(syn::parse_quote!( + #[doc = r" + Configuration trait of this pallet. + + Implement this type for a runtime in order to customize this pallet. + "] + )); + } + + Default::default() +} diff --git a/frame/support/procedural/src/pallet/expand/constants.rs b/frame/support/procedural/src/pallet/expand/constants.rs new file mode 100644 index 0000000000000..7cc245e8089df --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/constants.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +struct ConstDef { + /// Name of the associated type. + pub ident: syn::Ident, + /// The type in Get, e.g. `u32` in `type Foo: Get;`, but `Self` is replaced by `T` + pub type_: syn::Type, + /// The doc associated + pub doc: Vec, + /// default_byte implementation + pub default_byte_impl: proc_macro2::TokenStream, +} + +/// +/// * Impl fn module_constant_metadata for pallet. +pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); + let type_use_gen = &def.type_use_generics(proc_macro2::Span::call_site()); + let pallet_ident = &def.pallet_struct.pallet; + + let mut where_clauses = vec![&def.config.where_clause]; + where_clauses.extend(def.extra_constants.iter().map(|d| &d.where_clause)); + let completed_where_clause = super::merge_where_clauses(&where_clauses); + + let config_consts = def.config.consts_metadata.iter().map(|const_| { + let ident = &const_.ident; + let const_type = &const_.type_; + + ConstDef { + ident: const_.ident.clone(), + type_: const_.type_.clone(), + doc: const_.doc.clone(), + default_byte_impl: quote::quote!( + let value = >::get(); + #frame_support::codec::Encode::encode(&value) + ), + } + }); + + let extra_consts = def.extra_constants.iter().flat_map(|d| &d.extra_constants).map(|const_| { + let ident = &const_.ident; + + ConstDef { + ident: const_.ident.clone(), + type_: const_.type_.clone(), + doc: const_.doc.clone(), + default_byte_impl: quote::quote!( + let value = >::#ident(); + #frame_support::codec::Encode::encode(&value) + ), + } + }); + + let consts = config_consts.chain(extra_consts).map(|const_| { + let const_type = &const_.type_; + let ident = &const_.ident; + let ident_str = format!("{}", ident); + let doc = const_.doc.clone().into_iter(); + let default_byte_impl = &const_.default_byte_impl; + + quote::quote!({ + #frame_support::metadata::PalletConstantMetadata { + name: #ident_str, + ty: #frame_support::scale_info::meta_type::<#const_type>(), + value: { #default_byte_impl }, + docs: #frame_support::sp_std::vec![ #( #doc ),* ], + } + }) + }); + + quote::quote!( + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause{ + + #[doc(hidden)] + pub fn pallet_constants_metadata() + -> #frame_support::sp_std::vec::Vec<#frame_support::metadata::PalletConstantMetadata> + { + #frame_support::sp_std::vec![ #( #consts ),* ] + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs new file mode 100644 index 0000000000000..7a058bb32c922 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; +use frame_support_procedural_tools::get_doc_literals; + +/// +/// * impl various trait on Error +pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { + let error = if let Some(error) = &def.error { error } else { return Default::default() }; + + let error_ident = &error.error; + let frame_support = &def.frame_support; + let frame_system = &def.frame_system; + let type_impl_gen = &def.type_impl_generics(error.attr_span); + let type_use_gen = &def.type_use_generics(error.attr_span); + let config_where_clause = &def.config.where_clause; + + let phantom_variant: syn::Variant = syn::parse_quote!( + #[doc(hidden)] + #[codec(skip)] + __Ignore( + #frame_support::sp_std::marker::PhantomData<(#type_use_gen)>, + #frame_support::Never, + ) + ); + + let as_u8_matches = error.variants.iter().enumerate().map( + |(i, (variant, _))| quote::quote_spanned!(error.attr_span => Self::#variant => #i as u8,), + ); + + let as_str_matches = error.variants.iter().map(|(variant, _)| { + let variant_str = format!("{}", variant); + quote::quote_spanned!(error.attr_span => Self::#variant => #variant_str,) + }); + + let error_item = { + let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[error.index]; + if let syn::Item::Enum(item) = item { + item + } else { + unreachable!("Checked by error parser") + } + }; + + error_item.variants.insert(0, phantom_variant); + // derive TypeInfo for error metadata + error_item + .attrs + .push(syn::parse_quote!( #[derive(#frame_support::scale_info::TypeInfo)] )); + error_item.attrs.push(syn::parse_quote!( + #[scale_info(skip_type_params(#type_use_gen), capture_docs = "always")] + )); + + if get_doc_literals(&error_item.attrs).is_empty() { + error_item.attrs.push(syn::parse_quote!( + #[doc = r" + Custom [dispatch errors](https://substrate.dev/docs/en/knowledgebase/runtime/errors) + of this pallet. + "] + )); + } + + quote::quote_spanned!(error.attr_span => + impl<#type_impl_gen> #frame_support::sp_std::fmt::Debug for #error_ident<#type_use_gen> + #config_where_clause + { + fn fmt(&self, f: &mut #frame_support::sp_std::fmt::Formatter<'_>) + -> #frame_support::sp_std::fmt::Result + { + f.write_str(self.as_str()) + } + } + + impl<#type_impl_gen> #error_ident<#type_use_gen> #config_where_clause { + pub fn as_u8(&self) -> u8 { + match &self { + Self::__Ignore(_, _) => unreachable!("`__Ignore` can never be constructed"), + #( #as_u8_matches )* + } + } + + pub fn as_str(&self) -> &'static str { + match &self { + Self::__Ignore(_, _) => unreachable!("`__Ignore` can never be constructed"), + #( #as_str_matches )* + } + } + } + + impl<#type_impl_gen> From<#error_ident<#type_use_gen>> for &'static str + #config_where_clause + { + fn from(err: #error_ident<#type_use_gen>) -> &'static str { + err.as_str() + } + } + + impl<#type_impl_gen> From<#error_ident<#type_use_gen>> + for #frame_support::sp_runtime::DispatchError + #config_where_clause + { + fn from(err: #error_ident<#type_use_gen>) -> Self { + let index = < + ::PalletInfo + as #frame_support::traits::PalletInfo + >::index::>() + .expect("Every active module has an index in the runtime; qed") as u8; + + #frame_support::sp_runtime::DispatchError::Module { + index, + error: err.as_u8(), + message: Some(err.as_str()), + } + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs new file mode 100644 index 0000000000000..ebd2d7aeabaff --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -0,0 +1,173 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + pallet::{parse::event::PalletEventDepositAttr, Def}, + COUNTER, +}; +use frame_support_procedural_tools::get_doc_literals; +use syn::{spanned::Spanned, Ident}; + +/// +/// * Add __Ignore variant on Event +/// * Impl various trait on Event including metadata +/// * if deposit_event is defined, implement deposit_event on module. +pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + + let (event, macro_ident) = if let Some(event) = &def.event { + let ident = Ident::new(&format!("__is_event_part_defined_{}", count), event.attr_span); + (event, ident) + } else { + let macro_ident = + Ident::new(&format!("__is_event_part_defined_{}", count), def.item.span()); + + return quote::quote! { + #[doc(hidden)] + pub mod __substrate_event_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::event] defined, perhaps you should \ + remove `Event` from construct_runtime?", + )); + } + } + + #[doc(hidden)] + pub use #macro_ident as is_event_part_defined; + } + } + }; + + let event_where_clause = &event.where_clause; + + // NOTE: actually event where clause must be a subset of config where clause because of + // `type Event: From>`. But we merge either way for potential better error message + let completed_where_clause = + super::merge_where_clauses(&[&event.where_clause, &def.config.where_clause]); + + let event_ident = &event.event; + let frame_system = &def.frame_system; + let frame_support = &def.frame_support; + let event_use_gen = &event.gen_kind.type_use_gen(event.attr_span); + let event_impl_gen = &event.gen_kind.type_impl_gen(event.attr_span); + + let event_item = { + let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[event.index]; + if let syn::Item::Enum(item) = item { + item + } else { + unreachable!("Checked by event parser") + } + }; + + // Phantom data is added for generic event. + if event.gen_kind.is_generic() { + let variant = syn::parse_quote!( + #[doc(hidden)] + #[codec(skip)] + __Ignore( + #frame_support::sp_std::marker::PhantomData<(#event_use_gen)>, + #frame_support::Never, + ) + ); + + // Push ignore variant at the end. + event_item.variants.push(variant); + } + + if get_doc_literals(&event_item.attrs).is_empty() { + event_item.attrs.push(syn::parse_quote!( + #[doc = r" + The [event](https://substrate.dev/docs/en/knowledgebase/runtime/events) emitted + by this pallet. + "] + )); + } + + // derive some traits because system event require Clone, FullCodec, Eq, PartialEq and Debug + event_item.attrs.push(syn::parse_quote!( + #[derive( + #frame_support::CloneNoBound, + #frame_support::EqNoBound, + #frame_support::PartialEqNoBound, + #frame_support::RuntimeDebugNoBound, + #frame_support::codec::Encode, + #frame_support::codec::Decode, + #frame_support::scale_info::TypeInfo, + )] + )); + + // skip requirement for type params to implement `TypeInfo`, and require docs capture + event_item.attrs.push(syn::parse_quote!( + #[scale_info(skip_type_params(#event_use_gen), capture_docs = "always")] + )); + + let deposit_event = if let Some(deposit_event) = &event.deposit_event { + let event_use_gen = &event.gen_kind.type_use_gen(event.attr_span); + let trait_use_gen = &def.trait_use_generics(event.attr_span); + let type_impl_gen = &def.type_impl_generics(event.attr_span); + let type_use_gen = &def.type_use_generics(event.attr_span); + + let PalletEventDepositAttr { fn_vis, fn_span, .. } = deposit_event; + + quote::quote_spanned!(*fn_span => + impl<#type_impl_gen> Pallet<#type_use_gen> #completed_where_clause { + #fn_vis fn deposit_event(event: Event<#event_use_gen>) { + let event = < + ::Event as + From> + >::from(event); + + let event = < + ::Event as + Into<::Event> + >::into(event); + + <#frame_system::Pallet>::deposit_event(event) + } + } + ) + } else { + Default::default() + }; + + quote::quote_spanned!(event.attr_span => + #[doc(hidden)] + pub mod __substrate_event_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => {}; + } + + #[doc(hidden)] + pub use #macro_ident as is_event_part_defined; + } + + #deposit_event + + impl<#event_impl_gen> From<#event_ident<#event_use_gen>> for () #event_where_clause { + fn from(_: #event_ident<#event_use_gen>) {} + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/genesis_build.rs b/frame/support/procedural/src/pallet/expand/genesis_build.rs new file mode 100644 index 0000000000000..06acaf324254c --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -0,0 +1,72 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// +/// * implement the trait `sp_runtime::BuildModuleGenesisStorage` +/// * add #[cfg(features = "std")] to GenesisBuild implementation. +pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { + let genesis_config = if let Some(genesis_config) = &def.genesis_config { + genesis_config + } else { + return Default::default() + }; + let genesis_build = def.genesis_build.as_ref().expect("Checked by def parser"); + + let frame_support = &def.frame_support; + let type_impl_gen = &def.type_impl_generics(genesis_build.attr_span); + let type_use_gen = &def.type_use_generics(genesis_build.attr_span); + let trait_use_gen = if def.config.has_instance { + quote::quote_spanned!(genesis_build.attr_span => T, I) + } else { + // `__InherentHiddenInstance` used by construct_runtime here is alias for `()` + quote::quote_spanned!(genesis_build.attr_span => T, ()) + }; + let gen_cfg_ident = &genesis_config.genesis_config; + + let gen_cfg_use_gen = genesis_config.gen_kind.type_use_gen(genesis_build.attr_span); + + let genesis_build_item = + &mut def.item.content.as_mut().expect("Checked by def parser").1[genesis_build.index]; + + let genesis_build_item_impl = if let syn::Item::Impl(impl_) = genesis_build_item { + impl_ + } else { + unreachable!("Checked by genesis_build parser") + }; + + genesis_build_item_impl.attrs.push(syn::parse_quote!( #[cfg(feature = "std")] )); + let where_clause = &genesis_build.where_clause; + + quote::quote_spanned!(genesis_build.attr_span => + #[cfg(feature = "std")] + impl<#type_impl_gen> #frame_support::sp_runtime::BuildModuleGenesisStorage<#trait_use_gen> + for #gen_cfg_ident<#gen_cfg_use_gen> #where_clause + { + fn build_module_genesis_storage( + &self, + storage: &mut #frame_support::sp_runtime::Storage, + ) -> std::result::Result<(), std::string::String> { + #frame_support::BasicExternalities::execute_with_storage(storage, || { + >::build(self); + Ok(()) + }) + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/genesis_config.rs b/frame/support/procedural/src/pallet/expand/genesis_config.rs new file mode 100644 index 0000000000000..4bbba2c05908e --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -0,0 +1,147 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{pallet::Def, COUNTER}; +use frame_support_procedural_tools::get_doc_literals; +use syn::{spanned::Spanned, Ident}; + +/// +/// * add various derive trait on GenesisConfig struct. +pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + + let (genesis_config, def_macro_ident, std_macro_ident) = + if let Some(genesis_config) = &def.genesis_config { + let def_macro_ident = Ident::new( + &format!("__is_genesis_config_defined_{}", count), + genesis_config.genesis_config.span(), + ); + + let std_macro_ident = Ident::new( + &format!("__is_std_macro_defined_for_genesis_{}", count), + genesis_config.genesis_config.span(), + ); + + (genesis_config, def_macro_ident, std_macro_ident) + } else { + let def_macro_ident = + Ident::new(&format!("__is_genesis_config_defined_{}", count), def.item.span()); + + let std_macro_ident = + Ident::new(&format!("__is_std_enabled_for_genesis_{}", count), def.item.span()); + + return quote::quote! { + #[doc(hidden)] + pub mod __substrate_genesis_config_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #def_macro_ident { + ($pallet_name:ident) => { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::genesis_config] defined, perhaps you should \ + remove `Config` from construct_runtime?", + )); + } + } + + #[macro_export] + #[doc(hidden)] + macro_rules! #std_macro_ident { + ($pallet_name:ident, $pallet_path:expr) => {}; + } + + #[doc(hidden)] + pub use #def_macro_ident as is_genesis_config_defined; + #[doc(hidden)] + pub use #std_macro_ident as is_std_enabled_for_genesis; + } + } + }; + + let frame_support = &def.frame_support; + + let genesis_config_item = + &mut def.item.content.as_mut().expect("Checked by def parser").1[genesis_config.index]; + + let serde_crate = format!("{}::serde", frame_support); + + match genesis_config_item { + syn::Item::Enum(syn::ItemEnum { attrs, .. }) | + syn::Item::Struct(syn::ItemStruct { attrs, .. }) | + syn::Item::Type(syn::ItemType { attrs, .. }) => { + if get_doc_literals(&attrs).is_empty() { + attrs.push(syn::parse_quote!( + #[doc = r" + Can be used to configure the + [genesis state](https://substrate.dev/docs/en/knowledgebase/integrate/chain-spec#the-genesis-state) + of this pallet. + "] + )); + } + attrs.push(syn::parse_quote!( #[cfg(feature = "std")] )); + attrs.push(syn::parse_quote!( + #[derive(#frame_support::Serialize, #frame_support::Deserialize)] + )); + attrs.push(syn::parse_quote!( #[serde(rename_all = "camelCase")] )); + attrs.push(syn::parse_quote!( #[serde(deny_unknown_fields)] )); + attrs.push(syn::parse_quote!( #[serde(bound(serialize = ""))] )); + attrs.push(syn::parse_quote!( #[serde(bound(deserialize = ""))] )); + attrs.push(syn::parse_quote!( #[serde(crate = #serde_crate)] )); + }, + _ => unreachable!("Checked by genesis_config parser"), + } + + quote::quote! { + #[doc(hidden)] + pub mod __substrate_genesis_config_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #def_macro_ident { + ($pallet_name:ident) => {}; + } + + #[cfg(not(feature = "std"))] + #[macro_export] + #[doc(hidden)] + macro_rules! #std_macro_ident { + ($pallet_name:ident, $pallet_path:expr) => { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have the std feature enabled, this will cause the `", + $pallet_path, + "::GenesisConfig` type to be undefined." + )); + }; + } + + #[cfg(feature = "std")] + #[macro_export] + #[doc(hidden)] + macro_rules! #std_macro_ident { + ($pallet_name:ident, $pallet_path:expr) => {}; + } + + #[doc(hidden)] + pub use #def_macro_ident as is_genesis_config_defined; + #[doc(hidden)] + pub use #std_macro_ident as is_std_enabled_for_genesis; + } + } +} diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs new file mode 100644 index 0000000000000..e0b7e3669da43 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -0,0 +1,195 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// +/// * implement the individual traits using the Hooks trait +pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { + let (where_clause, span, has_runtime_upgrade) = match def.hooks.as_ref() { + Some(hooks) => { + let where_clause = hooks.where_clause.clone(); + let span = hooks.attr_span; + let has_runtime_upgrade = hooks.has_runtime_upgrade; + (where_clause, span, has_runtime_upgrade) + }, + None => (None, def.pallet_struct.attr_span, false), + }; + + let frame_support = &def.frame_support; + let type_impl_gen = &def.type_impl_generics(span); + let type_use_gen = &def.type_use_generics(span); + let pallet_ident = &def.pallet_struct.pallet; + let frame_system = &def.frame_system; + + let log_runtime_upgrade = if has_runtime_upgrade { + // a migration is defined here. + quote::quote! { + #frame_support::log::info!( + target: #frame_support::LOG_TARGET, + "⚠️ {} declares internal migrations (which *might* execute). \ + On-chain `{:?}` vs current storage version `{:?}`", + pallet_name, + ::on_chain_storage_version(), + ::current_storage_version(), + ); + } + } else { + // default. + quote::quote! { + #frame_support::log::info!( + target: #frame_support::LOG_TARGET, + "✅ no migration for {}", + pallet_name, + ); + } + }; + + let hooks_impl = if def.hooks.is_none() { + let frame_system = &def.frame_system; + quote::quote! { + impl<#type_impl_gen> + #frame_support::traits::Hooks<::BlockNumber> + for Pallet<#type_use_gen> {} + } + } else { + proc_macro2::TokenStream::new() + }; + + quote::quote_spanned!(span => + #hooks_impl + + impl<#type_impl_gen> + #frame_support::traits::OnFinalize<::BlockNumber> + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_finalize(n: ::BlockNumber) { + #frame_support::sp_tracing::enter_span!( + #frame_support::sp_tracing::trace_span!("on_finalize") + ); + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::on_finalize(n) + } + } + + impl<#type_impl_gen> + #frame_support::traits::OnIdle<::BlockNumber> + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_idle( + n: ::BlockNumber, + remaining_weight: #frame_support::weights::Weight + ) -> #frame_support::weights::Weight { + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::on_idle(n, remaining_weight) + } + } + + impl<#type_impl_gen> + #frame_support::traits::OnInitialize<::BlockNumber> + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_initialize( + n: ::BlockNumber + ) -> #frame_support::weights::Weight { + #frame_support::sp_tracing::enter_span!( + #frame_support::sp_tracing::trace_span!("on_initialize") + ); + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::on_initialize(n) + } + } + + impl<#type_impl_gen> + #frame_support::traits::OnRuntimeUpgrade + for #pallet_ident<#type_use_gen> #where_clause + { + fn on_runtime_upgrade() -> #frame_support::weights::Weight { + #frame_support::sp_tracing::enter_span!( + #frame_support::sp_tracing::trace_span!("on_runtime_update") + ); + + // log info about the upgrade. + let pallet_name = < + ::PalletInfo + as + #frame_support::traits::PalletInfo + >::name::().unwrap_or(""); + #log_runtime_upgrade + + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::on_runtime_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + < + Self + as + #frame_support::traits::Hooks<::BlockNumber> + >::pre_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + < + Self + as + #frame_support::traits::Hooks<::BlockNumber> + >::post_upgrade() + } + } + + impl<#type_impl_gen> + #frame_support::traits::OffchainWorker<::BlockNumber> + for #pallet_ident<#type_use_gen> #where_clause + { + fn offchain_worker(n: ::BlockNumber) { + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::offchain_worker(n) + } + } + + impl<#type_impl_gen> + #frame_support::traits::IntegrityTest + for #pallet_ident<#type_use_gen> #where_clause + { + fn integrity_test() { + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::integrity_test() + } + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/inherent.rs b/frame/support/procedural/src/pallet/expand/inherent.rs new file mode 100644 index 0000000000000..185211ecd4df2 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/inherent.rs @@ -0,0 +1,55 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{pallet::Def, COUNTER}; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{spanned::Spanned, Ident}; + +pub fn expand_inherents(def: &mut Def) -> TokenStream { + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + let macro_ident = Ident::new(&format!("__is_inherent_part_defined_{}", count), def.item.span()); + + let maybe_compile_error = if def.inherent.is_none() { + quote! { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::inherent] defined, perhaps you should \ + remove `Inherent` from construct_runtime?", + )); + } + } else { + TokenStream::new() + }; + + quote! { + #[doc(hidden)] + pub mod __substrate_inherent_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + #maybe_compile_error + } + } + + #[doc(hidden)] + pub use #macro_ident as is_inherent_part_defined; + } + } +} diff --git a/frame/support/procedural/src/pallet/expand/instances.rs b/frame/support/procedural/src/pallet/expand/instances.rs new file mode 100644 index 0000000000000..2ecb5ec481ac4 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/instances.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{pallet::Def, NUMBER_OF_INSTANCE}; +use proc_macro2::Span; + +/// +/// * Provide inherent instance to be used by construct_runtime +/// * Provide Instance1 ..= Instance16 for instantiable pallet +pub fn expand_instances(def: &mut Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let inherent_ident = syn::Ident::new(crate::INHERENT_INSTANCE_NAME, Span::call_site()); + let instances = if def.config.has_instance { + (1..=NUMBER_OF_INSTANCE) + .map(|i| syn::Ident::new(&format!("Instance{}", i), Span::call_site())) + .collect() + } else { + vec![] + }; + + quote::quote!( + /// Hidden instance generated to be internally used when module is used without + /// instance. + #[doc(hidden)] + pub type #inherent_ident = (); + + #( pub use #frame_support::instances::#instances; )* + ) +} diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs new file mode 100644 index 0000000000000..1c8883977c765 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod call; +mod config; +mod constants; +mod error; +mod event; +mod genesis_build; +mod genesis_config; +mod hooks; +mod inherent; +mod instances; +mod origin; +mod pallet_struct; +mod storage; +mod store_trait; +mod type_value; +mod validate_unsigned; + +use crate::pallet::Def; +use frame_support_procedural_tools::get_doc_literals; +use quote::ToTokens; + +/// Merge where clause together, `where` token span is taken from the first not none one. +pub fn merge_where_clauses(clauses: &[&Option]) -> Option { + let mut clauses = clauses.iter().filter_map(|f| f.as_ref()); + let mut res = clauses.next()?.clone(); + for other in clauses { + res.predicates.extend(other.predicates.iter().cloned()) + } + Some(res) +} + +/// Expand definition, in particular: +/// * add some bounds and variants to type defined, +/// * create some new types, +/// * impl stuff on them. +pub fn expand(mut def: Def) -> proc_macro2::TokenStream { + let constants = constants::expand_constants(&mut def); + let pallet_struct = pallet_struct::expand_pallet_struct(&mut def); + let config = config::expand_config(&mut def); + let call = call::expand_call(&mut def); + let error = error::expand_error(&mut def); + let event = event::expand_event(&mut def); + let storages = storage::expand_storages(&mut def); + let inherents = inherent::expand_inherents(&mut def); + let instances = instances::expand_instances(&mut def); + let store_trait = store_trait::expand_store_trait(&mut def); + let hooks = hooks::expand_hooks(&mut def); + let genesis_build = genesis_build::expand_genesis_build(&mut def); + let genesis_config = genesis_config::expand_genesis_config(&mut def); + let type_values = type_value::expand_type_values(&mut def); + let origins = origin::expand_origins(&mut def); + let validate_unsigned = validate_unsigned::expand_validate_unsigned(&mut def); + + if get_doc_literals(&def.item.attrs).is_empty() { + def.item.attrs.push(syn::parse_quote!( + #[doc = r" + The module that hosts all the + [FRAME](https://substrate.dev/docs/en/knowledgebase/runtime/frame) + types needed to add this pallet to a + [runtime](https://substrate.dev/docs/en/knowledgebase/runtime/). + "] + )); + } + + let new_items = quote::quote!( + #constants + #pallet_struct + #config + #call + #error + #event + #storages + #inherents + #instances + #store_trait + #hooks + #genesis_build + #genesis_config + #type_values + #origins + #validate_unsigned + ); + + def.item + .content + .as_mut() + .expect("This is checked by parsing") + .1 + .push(syn::Item::Verbatim(new_items)); + + def.item.into_token_stream() +} diff --git a/frame/support/procedural/src/pallet/expand/origin.rs b/frame/support/procedural/src/pallet/expand/origin.rs new file mode 100644 index 0000000000000..987512f69a02b --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/origin.rs @@ -0,0 +1,55 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{pallet::Def, COUNTER}; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{spanned::Spanned, Ident}; + +pub fn expand_origins(def: &mut Def) -> TokenStream { + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + let macro_ident = Ident::new(&format!("__is_origin_part_defined_{}", count), def.item.span()); + + let maybe_compile_error = if def.origin.is_none() { + quote! { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::origin] defined, perhaps you should \ + remove `Origin` from construct_runtime?", + )); + } + } else { + TokenStream::new() + }; + + quote! { + #[doc(hidden)] + pub mod __substrate_origin_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + #maybe_compile_error + } + } + + #[doc(hidden)] + pub use #macro_ident as is_origin_part_defined; + } + } +} diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs new file mode 100644 index 0000000000000..a217742fec55d --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -0,0 +1,215 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::{expand::merge_where_clauses, Def}; +use frame_support_procedural_tools::get_doc_literals; + +/// +/// * Add derive trait on Pallet +/// * Implement GetStorageVersion on Pallet +/// * Implement OnGenesis on Pallet +/// * Implement `fn error_metadata` on Pallet +/// * declare Module type alias for construct_runtime +/// * replace the first field type of `struct Pallet` with `PhantomData` if it is `_` +/// * implementation of `PalletInfoAccess` information +/// * implementation of `StorageInfoTrait` on Pallet +pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let frame_system = &def.frame_system; + let type_impl_gen = &def.type_impl_generics(def.pallet_struct.attr_span); + let type_use_gen = &def.type_use_generics(def.pallet_struct.attr_span); + let type_decl_gen = &def.type_decl_generics(def.pallet_struct.attr_span); + let pallet_ident = &def.pallet_struct.pallet; + let config_where_clause = &def.config.where_clause; + + let mut storages_where_clauses = vec![&def.config.where_clause]; + storages_where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); + let storages_where_clauses = merge_where_clauses(&storages_where_clauses); + + let pallet_item = { + let pallet_module_items = &mut def.item.content.as_mut().expect("Checked by def").1; + let item = &mut pallet_module_items[def.pallet_struct.index]; + if let syn::Item::Struct(item) = item { + item + } else { + unreachable!("Checked by pallet struct parser") + } + }; + + // If the first field type is `_` then we replace with `PhantomData` + if let Some(field) = pallet_item.fields.iter_mut().next() { + if field.ty == syn::parse_quote!(_) { + field.ty = syn::parse_quote!( + #frame_support::sp_std::marker::PhantomData<(#type_use_gen)> + ); + } + } + + if get_doc_literals(&pallet_item.attrs).is_empty() { + pallet_item.attrs.push(syn::parse_quote!( + #[doc = r" + The [pallet](https://substrate.dev/docs/en/knowledgebase/runtime/pallets) implementing + the on-chain logic. + "] + )); + } + + pallet_item.attrs.push(syn::parse_quote!( + #[derive( + #frame_support::CloneNoBound, + #frame_support::EqNoBound, + #frame_support::PartialEqNoBound, + #frame_support::RuntimeDebugNoBound, + )] + )); + + let pallet_error_metadata = if let Some(error_def) = &def.error { + let error_ident = &error_def.error; + quote::quote_spanned!(def.pallet_struct.attr_span => + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #config_where_clause { + pub fn error_metadata() -> Option<#frame_support::metadata::PalletErrorMetadata> { + Some(#frame_support::metadata::PalletErrorMetadata { + ty: #frame_support::scale_info::meta_type::<#error_ident<#type_use_gen>>() + }) + } + } + ) + } else { + quote::quote_spanned!(def.pallet_struct.attr_span => + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #config_where_clause { + pub fn error_metadata() -> Option<#frame_support::metadata::PalletErrorMetadata> { + None + } + } + ) + }; + + // Depending on the flag `generate_storage_info` we use partial or full storage info from + // storage. + let (storage_info_span, storage_info_trait, storage_info_method) = + if let Some(span) = def.pallet_struct.generate_storage_info { + ( + span, + quote::quote_spanned!(span => StorageInfoTrait), + quote::quote_spanned!(span => storage_info), + ) + } else { + let span = def.pallet_struct.attr_span; + ( + span, + quote::quote_spanned!(span => PartialStorageInfoTrait), + quote::quote_spanned!(span => partial_storage_info), + ) + }; + + let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); + let storage_cfg_attrs = + &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); + + let storage_info = quote::quote_spanned!(storage_info_span => + impl<#type_impl_gen> #frame_support::traits::StorageInfoTrait + for #pallet_ident<#type_use_gen> + #storages_where_clauses + { + fn storage_info() + -> #frame_support::sp_std::vec::Vec<#frame_support::traits::StorageInfo> + { + #[allow(unused_mut)] + let mut res = #frame_support::sp_std::vec![]; + + #( + #(#storage_cfg_attrs)* + { + let mut storage_info = < + #storage_names<#type_use_gen> + as #frame_support::traits::#storage_info_trait + >::#storage_info_method(); + res.append(&mut storage_info); + } + )* + + res + } + } + ); + + let storage_version = if let Some(v) = def.pallet_struct.storage_version.as_ref() { + quote::quote! { #v } + } else { + quote::quote! { #frame_support::traits::StorageVersion::default() } + }; + + quote::quote_spanned!(def.pallet_struct.attr_span => + #pallet_error_metadata + + /// Type alias to `Pallet`, to be used by `construct_runtime`. + /// + /// Generated by `pallet` attribute macro. + #[deprecated(note = "use `Pallet` instead")] + #[allow(dead_code)] + pub type Module<#type_decl_gen> = #pallet_ident<#type_use_gen>; + + // Implement `GetStorageVersion` for `Pallet` + impl<#type_impl_gen> #frame_support::traits::GetStorageVersion + for #pallet_ident<#type_use_gen> + #config_where_clause + { + fn current_storage_version() -> #frame_support::traits::StorageVersion { + #storage_version + } + + fn on_chain_storage_version() -> #frame_support::traits::StorageVersion { + #frame_support::traits::StorageVersion::get::() + } + } + + // Implement `OnGenesis` for `Pallet` + impl<#type_impl_gen> #frame_support::traits::OnGenesis + for #pallet_ident<#type_use_gen> + #config_where_clause + { + fn on_genesis() { + let storage_version = #storage_version; + storage_version.put::(); + } + } + + // Implement `PalletInfoAccess` for `Pallet` + impl<#type_impl_gen> #frame_support::traits::PalletInfoAccess + for #pallet_ident<#type_use_gen> + #config_where_clause + { + fn index() -> usize { + < + ::PalletInfo as #frame_support::traits::PalletInfo + >::index::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + + fn name() -> &'static str { + < + ::PalletInfo as #frame_support::traits::PalletInfo + >::name::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + } + + #storage_info + ) +} diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs new file mode 100644 index 0000000000000..a4f030722f1c1 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -0,0 +1,492 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::{ + parse::storage::{Metadata, QueryKind, StorageDef, StorageGenerics}, + Def, +}; +use std::collections::HashMap; + +/// Generate the prefix_ident related to the storage. +/// prefix_ident is used for the prefix struct to be given to storage as first generic param. +fn prefix_ident(storage: &StorageDef) -> syn::Ident { + let storage_ident = &storage.ident; + syn::Ident::new(&format!("_GeneratedPrefixForStorage{}", storage_ident), storage_ident.span()) +} + +/// Generate the counter_prefix_ident related to the storage. +/// counter_prefix_ident is used for the prefix struct to be given to counted storage map. +fn counter_prefix_ident(storage_ident: &syn::Ident) -> syn::Ident { + syn::Ident::new( + &format!("_GeneratedCounterPrefixForStorage{}", storage_ident), + storage_ident.span(), + ) +} + +/// Generate the counter_prefix related to the storage. +/// counter_prefix is used by counted storage map. +fn counter_prefix(prefix: &str) -> String { + format!("CounterFor{}", prefix) +} + +/// Check for duplicated storage prefixes. This step is necessary since users can specify an +/// alternative storage prefix using the #[pallet::storage_prefix] syntax, and we need to ensure +/// that the prefix specified by the user is not a duplicate of an existing one. +fn check_prefix_duplicates( + storage_def: &StorageDef, + // A hashmap of all already used prefix and their associated error if duplication + used_prefixes: &mut HashMap, +) -> syn::Result<()> { + let prefix = storage_def.prefix(); + let dup_err = syn::Error::new( + storage_def.prefix_span(), + format!("Duplicate storage prefixes found for `{}`", prefix), + ); + + if let Some(other_dup_err) = used_prefixes.insert(prefix.clone(), dup_err.clone()) { + let mut err = dup_err; + err.combine(other_dup_err); + return Err(err) + } + + if let Metadata::CountedMap { .. } = storage_def.metadata { + let counter_prefix = counter_prefix(&prefix); + let counter_dup_err = syn::Error::new( + storage_def.prefix_span(), + format!( + "Duplicate storage prefixes found for `{}`, used for counter associated to \ + counted storage map", + counter_prefix, + ), + ); + + if let Some(other_dup_err) = + used_prefixes.insert(counter_prefix.clone(), counter_dup_err.clone()) + { + let mut err = counter_dup_err; + err.combine(other_dup_err); + return Err(err) + } + } + + Ok(()) +} + +/// +/// * if generics are unnamed: replace the first generic `_` by the generated prefix structure +/// * if generics are named: reorder the generic, remove their name, and add the missing ones. +/// * Add `#[allow(type_alias_bounds)]` +pub fn process_generics(def: &mut Def) -> syn::Result<()> { + let frame_support = &def.frame_support; + + for storage_def in def.storages.iter_mut() { + let item = &mut def.item.content.as_mut().expect("Checked by def").1[storage_def.index]; + + let typ_item = match item { + syn::Item::Type(t) => t, + _ => unreachable!("Checked by def"), + }; + + typ_item.attrs.push(syn::parse_quote!(#[allow(type_alias_bounds)])); + + let typ_path = match &mut *typ_item.ty { + syn::Type::Path(p) => p, + _ => unreachable!("Checked by def"), + }; + + let args = match &mut typ_path.path.segments[0].arguments { + syn::PathArguments::AngleBracketed(args) => args, + _ => unreachable!("Checked by def"), + }; + + let prefix_ident = prefix_ident(&storage_def); + let type_use_gen = if def.config.has_instance { + quote::quote_spanned!(storage_def.attr_span => T, I) + } else { + quote::quote_spanned!(storage_def.attr_span => T) + }; + + let default_query_kind: syn::Type = + syn::parse_quote!(#frame_support::storage::types::OptionQuery); + let default_on_empty: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); + let default_max_values: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); + + if let Some(named_generics) = storage_def.named_generics.clone() { + args.args.clear(); + args.args.push(syn::parse_quote!( #prefix_ident<#type_use_gen> )); + match named_generics { + StorageGenerics::Value { value, query_kind, on_empty } => { + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(query_kind)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + args.args.push(syn::GenericArgument::Type(on_empty)); + }, + StorageGenerics::Map { hasher, key, value, query_kind, on_empty, max_values } => { + args.args.push(syn::GenericArgument::Type(hasher)); + args.args.push(syn::GenericArgument::Type(key)); + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(query_kind)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + args.args.push(syn::GenericArgument::Type(on_empty)); + let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); + args.args.push(syn::GenericArgument::Type(max_values)); + }, + StorageGenerics::CountedMap { + hasher, + key, + value, + query_kind, + on_empty, + max_values, + } => { + args.args.push(syn::GenericArgument::Type(hasher)); + args.args.push(syn::GenericArgument::Type(key)); + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(query_kind)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + args.args.push(syn::GenericArgument::Type(on_empty)); + let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); + args.args.push(syn::GenericArgument::Type(max_values)); + }, + StorageGenerics::DoubleMap { + hasher1, + key1, + hasher2, + key2, + value, + query_kind, + on_empty, + max_values, + } => { + args.args.push(syn::GenericArgument::Type(hasher1)); + args.args.push(syn::GenericArgument::Type(key1)); + args.args.push(syn::GenericArgument::Type(hasher2)); + args.args.push(syn::GenericArgument::Type(key2)); + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(query_kind)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + args.args.push(syn::GenericArgument::Type(on_empty)); + let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); + args.args.push(syn::GenericArgument::Type(max_values)); + }, + StorageGenerics::NMap { keygen, value, query_kind, on_empty, max_values } => { + args.args.push(syn::GenericArgument::Type(keygen)); + args.args.push(syn::GenericArgument::Type(value)); + let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(query_kind)); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + args.args.push(syn::GenericArgument::Type(on_empty)); + let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); + args.args.push(syn::GenericArgument::Type(max_values)); + }, + } + } else { + args.args[0] = syn::parse_quote!( #prefix_ident<#type_use_gen> ); + } + } + + Ok(()) +} + +/// +/// * generate StoragePrefix structs (e.g. for a storage `MyStorage` a struct with the name +/// `_GeneratedPrefixForStorage$NameOfStorage` is generated) and implements StorageInstance trait. +/// * if generics are unnamed: replace the first generic `_` by the generated prefix structure +/// * if generics are named: reorder the generic, remove their name, and add the missing ones. +/// * Add `#[allow(type_alias_bounds)]` on storages type alias +/// * generate metadatas +pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { + if let Err(e) = process_generics(def) { + return e.into_compile_error().into() + } + + // Check for duplicate prefixes + let mut prefix_set = HashMap::new(); + let mut errors = def + .storages + .iter() + .filter_map(|storage_def| check_prefix_duplicates(storage_def, &mut prefix_set).err()); + if let Some(mut final_error) = errors.next() { + errors.for_each(|error| final_error.combine(error)); + return final_error.into_compile_error() + } + + let frame_support = &def.frame_support; + let frame_system = &def.frame_system; + let pallet_ident = &def.pallet_struct.pallet; + + let entries_builder = def.storages.iter().map(|storage| { + let docs = &storage.docs; + + let ident = &storage.ident; + let gen = &def.type_use_generics(storage.attr_span); + let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); + + let cfg_attrs = &storage.cfg_attrs; + + quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* + { + <#full_ident as #frame_support::storage::StorageEntryMetadataBuilder>::build_metadata( + #frame_support::sp_std::vec![ + #( #docs, )* + ], + &mut entries, + ); + } + ) + }); + + let getters = def.storages.iter().map(|storage| { + if let Some(getter) = &storage.getter { + let completed_where_clause = + super::merge_where_clauses(&[&storage.where_clause, &def.config.where_clause]); + let docs = storage + .docs + .iter() + .map(|d| quote::quote_spanned!(storage.attr_span => #[doc = #d])); + + let ident = &storage.ident; + let gen = &def.type_use_generics(storage.attr_span); + let type_impl_gen = &def.type_impl_generics(storage.attr_span); + let type_use_gen = &def.type_use_generics(storage.attr_span); + let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); + + let cfg_attrs = &storage.cfg_attrs; + + match &storage.metadata { + Metadata::Value { value } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #( #docs )* + pub fn #getter() -> #query { + < + #full_ident as #frame_support::storage::StorageValue<#value> + >::get() + } + } + ) + }, + Metadata::Map { key, value } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #( #docs )* + pub fn #getter(k: KArg) -> #query where + KArg: #frame_support::codec::EncodeLike<#key>, + { + < + #full_ident as #frame_support::storage::StorageMap<#key, #value> + >::get(k) + } + } + ) + }, + Metadata::CountedMap { key, value } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #( #docs )* + pub fn #getter(k: KArg) -> #query where + KArg: #frame_support::codec::EncodeLike<#key>, + { + // NOTE: we can't use any trait here because CountedStorageMap + // doesn't implement any. + <#full_ident>::get(k) + } + } + ) + }, + Metadata::DoubleMap { key1, key2, value } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #( #docs )* + pub fn #getter(k1: KArg1, k2: KArg2) -> #query where + KArg1: #frame_support::codec::EncodeLike<#key1>, + KArg2: #frame_support::codec::EncodeLike<#key2>, + { + < + #full_ident as + #frame_support::storage::StorageDoubleMap<#key1, #key2, #value> + >::get(k1, k2) + } + } + ) + }, + Metadata::NMap { keygen, value, .. } => { + let query = match storage.query_kind.as_ref().expect("Checked by def") { + QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => + Option<#value> + ), + QueryKind::ValueQuery => quote::quote!(#value), + }; + quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + #( #docs )* + pub fn #getter(key: KArg) -> #query + where + KArg: #frame_support::storage::types::EncodeLikeTuple< + <#keygen as #frame_support::storage::types::KeyGenerator>::KArg + > + + #frame_support::storage::types::TupleToEncodedIter, + { + < + #full_ident as + #frame_support::storage::StorageNMap<#keygen, #value> + >::get(key) + } + } + ) + }, + } + } else { + Default::default() + } + }); + + let prefix_structs = def.storages.iter().map(|storage_def| { + let type_impl_gen = &def.type_impl_generics(storage_def.attr_span); + let type_use_gen = &def.type_use_generics(storage_def.attr_span); + let prefix_struct_ident = prefix_ident(&storage_def); + let prefix_struct_vis = &storage_def.vis; + let prefix_struct_const = storage_def.prefix(); + let config_where_clause = &def.config.where_clause; + + let cfg_attrs = &storage_def.cfg_attrs; + + let maybe_counter = if let Metadata::CountedMap { .. } = storage_def.metadata { + let counter_prefix_struct_ident = counter_prefix_ident(&storage_def.ident); + let counter_prefix_struct_const = counter_prefix(&prefix_struct_const); + + quote::quote_spanned!(storage_def.attr_span => + #(#cfg_attrs)* + #prefix_struct_vis struct #counter_prefix_struct_ident<#type_use_gen>( + core::marker::PhantomData<(#type_use_gen,)> + ); + #(#cfg_attrs)* + impl<#type_impl_gen> #frame_support::traits::StorageInstance + for #counter_prefix_struct_ident<#type_use_gen> + #config_where_clause + { + fn pallet_prefix() -> &'static str { + < + ::PalletInfo + as #frame_support::traits::PalletInfo + >::name::>() + .expect("Every active pallet has a name in the runtime; qed") + } + const STORAGE_PREFIX: &'static str = #counter_prefix_struct_const; + } + #(#cfg_attrs)* + impl<#type_impl_gen> #frame_support::storage::types::CountedStorageMapInstance + for #prefix_struct_ident<#type_use_gen> + #config_where_clause + { + type CounterPrefix = #counter_prefix_struct_ident<#type_use_gen>; + } + ) + } else { + proc_macro2::TokenStream::default() + }; + + quote::quote_spanned!(storage_def.attr_span => + #maybe_counter + + #(#cfg_attrs)* + #prefix_struct_vis struct #prefix_struct_ident<#type_use_gen>( + core::marker::PhantomData<(#type_use_gen,)> + ); + #(#cfg_attrs)* + impl<#type_impl_gen> #frame_support::traits::StorageInstance + for #prefix_struct_ident<#type_use_gen> + #config_where_clause + { + fn pallet_prefix() -> &'static str { + < + ::PalletInfo + as #frame_support::traits::PalletInfo + >::name::>() + .expect("Every active pallet has a name in the runtime; qed") + } + const STORAGE_PREFIX: &'static str = #prefix_struct_const; + } + ) + }); + + let mut where_clauses = vec![&def.config.where_clause]; + where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); + let completed_where_clause = super::merge_where_clauses(&where_clauses); + let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); + let type_use_gen = &def.type_use_generics(proc_macro2::Span::call_site()); + + quote::quote!( + impl<#type_impl_gen> #pallet_ident<#type_use_gen> + #completed_where_clause + { + #[doc(hidden)] + pub fn storage_metadata() -> #frame_support::metadata::PalletStorageMetadata { + #frame_support::metadata::PalletStorageMetadata { + prefix: < + ::PalletInfo as + #frame_support::traits::PalletInfo + >::name::<#pallet_ident<#type_use_gen>>() + .expect("Every active pallet has a name in the runtime; qed"), + entries: { + #[allow(unused_mut)] + let mut entries = #frame_support::sp_std::vec![]; + #( #entries_builder )* + entries + }, + } + } + } + + #( #getters )* + #( #prefix_structs )* + ) +} diff --git a/frame/support/procedural/src/pallet/expand/store_trait.rs b/frame/support/procedural/src/pallet/expand/store_trait.rs new file mode 100644 index 0000000000000..36cc08b732fe5 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/store_trait.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; +use syn::spanned::Spanned; + +/// If attribute `#[pallet::generate_store(..)]` is defined then: +/// * generate Store trait with all storages, +/// * implement Store trait for Pallet. +pub fn expand_store_trait(def: &mut Def) -> proc_macro2::TokenStream { + let (trait_vis, trait_store) = + if let Some(store) = &def.pallet_struct.store { store } else { return Default::default() }; + + let type_impl_gen = &def.type_impl_generics(trait_store.span()); + let type_use_gen = &def.type_use_generics(trait_store.span()); + let pallet_ident = &def.pallet_struct.pallet; + + let mut where_clauses = vec![&def.config.where_clause]; + where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); + let completed_where_clause = super::merge_where_clauses(&where_clauses); + + let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); + let storage_cfg_attrs = + &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); + + quote::quote_spanned!(trait_store.span() => + #trait_vis trait #trait_store { + #( + #(#storage_cfg_attrs)* + type #storage_names; + )* + } + impl<#type_impl_gen> #trait_store for #pallet_ident<#type_use_gen> + #completed_where_clause + { + #( + #(#storage_cfg_attrs)* + type #storage_names = #storage_names<#type_use_gen>; + )* + } + ) +} diff --git a/frame/support/procedural/src/pallet/expand/type_value.rs b/frame/support/procedural/src/pallet/expand/type_value.rs new file mode 100644 index 0000000000000..535a187773807 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/type_value.rs @@ -0,0 +1,74 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::Def; + +/// +/// * Generate the struct +/// * implement the `Get<..>` on it +/// * Rename the name of the function to internal name +pub fn expand_type_values(def: &mut Def) -> proc_macro2::TokenStream { + let mut expand = quote::quote!(); + let frame_support = &def.frame_support; + + for type_value in &def.type_values { + let fn_name_str = &type_value.ident.to_string(); + let fn_name_snakecase = inflector::cases::snakecase::to_snake_case(fn_name_str); + let fn_ident_renamed = syn::Ident::new( + &format!("__type_value_for_{}", fn_name_snakecase), + type_value.ident.span(), + ); + + let type_value_item = { + let item = &mut def.item.content.as_mut().expect("Checked by def").1[type_value.index]; + if let syn::Item::Fn(item) = item { + item + } else { + unreachable!("Checked by error parser") + } + }; + + // Rename the type_value function name + type_value_item.sig.ident = fn_ident_renamed.clone(); + + let vis = &type_value.vis; + let ident = &type_value.ident; + let type_ = &type_value.type_; + let where_clause = &type_value.where_clause; + + let (struct_impl_gen, struct_use_gen) = if type_value.is_generic { + ( + def.type_impl_generics(type_value.attr_span), + def.type_use_generics(type_value.attr_span), + ) + } else { + (Default::default(), Default::default()) + }; + + expand.extend(quote::quote_spanned!(type_value.attr_span => + #vis struct #ident<#struct_use_gen>(core::marker::PhantomData<((), #struct_use_gen)>); + impl<#struct_impl_gen> #frame_support::traits::Get<#type_> for #ident<#struct_use_gen> + #where_clause + { + fn get() -> #type_ { + #fn_ident_renamed::<#struct_use_gen>() + } + } + )); + } + expand +} diff --git a/frame/support/procedural/src/pallet/expand/validate_unsigned.rs b/frame/support/procedural/src/pallet/expand/validate_unsigned.rs new file mode 100644 index 0000000000000..5f30d712e9a51 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/validate_unsigned.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{pallet::Def, COUNTER}; +use proc_macro2::TokenStream; +use quote::quote; +use syn::{spanned::Spanned, Ident}; + +pub fn expand_validate_unsigned(def: &mut Def) -> TokenStream { + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + let macro_ident = + Ident::new(&format!("__is_validate_unsigned_part_defined_{}", count), def.item.span()); + + let maybe_compile_error = if def.validate_unsigned.is_none() { + quote! { + compile_error!(concat!( + "`", + stringify!($pallet_name), + "` does not have #[pallet::validate_unsigned] defined, perhaps you should \ + remove `ValidateUnsigned` from construct_runtime?", + )); + } + } else { + TokenStream::new() + }; + + quote! { + #[doc(hidden)] + pub mod __substrate_validate_unsigned_check { + #[macro_export] + #[doc(hidden)] + macro_rules! #macro_ident { + ($pallet_name:ident) => { + #maybe_compile_error + } + } + + #[doc(hidden)] + pub use #macro_ident as is_validate_unsigned_part_defined; + } + } +} diff --git a/frame/support/procedural/src/pallet/mod.rs b/frame/support/procedural/src/pallet/mod.rs new file mode 100644 index 0000000000000..93797906d04d9 --- /dev/null +++ b/frame/support/procedural/src/pallet/mod.rs @@ -0,0 +1,51 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation for pallet attribute macro. +//! +//! General workflow: +//! 1 - parse all pallet attributes: +//! This step remove all attributes `#[pallet::*]` from the ItemMod and build the `Def` struct +//! which holds the ItemMod without `#[pallet::*]` and information given by those attributes +//! 2 - expand from the parsed information +//! This step will modify the ItemMod by adding some derive attributes or phantom data variants +//! to user defined types. And also crate new types and implement block. + +mod expand; +mod parse; + +pub use parse::Def; +use syn::spanned::Spanned; + +pub fn pallet( + attr: proc_macro::TokenStream, + item: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + if !attr.is_empty() { + let msg = + "Invalid pallet macro call: expected no attributes, e.g. macro call must be just \ + `#[frame_support::pallet]` or `#[pallet]`"; + let span = proc_macro2::TokenStream::from(attr).span(); + return syn::Error::new(span, msg).to_compile_error().into() + } + + let item = syn::parse_macro_input!(item as syn::ItemMod); + match parse::Def::try_from(item) { + Ok(def) => expand::expand(def).into(), + Err(e) => e.to_compile_error().into(), + } +} diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs new file mode 100644 index 0000000000000..0563568f33311 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -0,0 +1,241 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use frame_support_procedural_tools::get_doc_literals; +use quote::ToTokens; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(Call); + syn::custom_keyword!(OriginFor); + syn::custom_keyword!(weight); + syn::custom_keyword!(compact); + syn::custom_keyword!(T); + syn::custom_keyword!(pallet); +} + +/// Definition of dispatchables typically `impl Pallet { ... }` +pub struct CallDef { + /// The where_clause used. + pub where_clause: Option, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The index of call item in pallet module. + pub index: usize, + /// Information on methods (used for expansion). + pub methods: Vec, + /// The span of the pallet::call attribute. + pub attr_span: proc_macro2::Span, + /// Docs, specified on the impl Block. + pub docs: Vec, +} + +#[derive(Clone)] +/// Definition of dispatchable typically: `#[weight...] fn foo(origin .., param1: ...) -> ..` +pub struct CallVariantDef { + /// Function name. + pub name: syn::Ident, + /// Information on args: `(is_compact, name, type)` + pub args: Vec<(bool, syn::Ident, Box)>, + /// Weight formula. + pub weight: syn::Expr, + /// Docs, used for metadata. + pub docs: Vec, +} + +/// Attributes for functions in call impl block. +/// Parse for `#[pallet::weight(expr)]` +pub struct FunctionAttr { + weight: syn::Expr, +} + +impl syn::parse::Parse for FunctionAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + content.parse::()?; + + let weight_content; + syn::parenthesized!(weight_content in content); + Ok(FunctionAttr { weight: weight_content.parse::()? }) + } +} + +/// Attribute for arguments in function in call impl block. +/// Parse for `#[pallet::compact]| +pub struct ArgAttrIsCompact; + +impl syn::parse::Parse for ArgAttrIsCompact { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + content.parse::()?; + Ok(ArgAttrIsCompact) + } +} + +/// Check the syntax is `OriginFor` +pub fn check_dispatchable_first_arg_type(ty: &syn::Type) -> syn::Result<()> { + pub struct CheckDispatchableFirstArg; + impl syn::parse::Parse for CheckDispatchableFirstArg { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + + Ok(Self) + } + } + + syn::parse2::(ty.to_token_stream()).map_err(|e| { + let msg = "Invalid type: expected `OriginFor`"; + let mut err = syn::Error::new(ty.span(), msg); + err.combine(e); + err + })?; + + Ok(()) +} + +impl CallDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) + }; + + let mut instances = vec![]; + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + + if let Some((_, _, for_)) = item.trait_ { + let msg = "Invalid pallet::call, expected no trait ident as in \ + `impl<..> Pallet<..> { .. }`"; + return Err(syn::Error::new(for_.span(), msg)) + } + + let mut methods = vec![]; + for impl_item in &mut item.items { + if let syn::ImplItem::Method(method) = impl_item { + if !matches!(method.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::call, dispatchable function must be public: \ + `pub fn`"; + + let span = match method.vis { + syn::Visibility::Inherited => method.sig.span(), + _ => method.vis.span(), + }; + + return Err(syn::Error::new(span, msg)) + } + + match method.sig.inputs.first() { + None => { + let msg = "Invalid pallet::call, must have at least origin arg"; + return Err(syn::Error::new(method.sig.span(), msg)) + }, + Some(syn::FnArg::Receiver(_)) => { + let msg = "Invalid pallet::call, first argument must be a typed argument, \ + e.g. `origin: OriginFor`"; + return Err(syn::Error::new(method.sig.span(), msg)) + }, + Some(syn::FnArg::Typed(arg)) => { + check_dispatchable_first_arg_type(&*arg.ty)?; + }, + } + + if let syn::ReturnType::Type(_, type_) = &method.sig.output { + helper::check_pallet_call_return_type(type_)?; + } else { + let msg = "Invalid pallet::call, require return type \ + DispatchResultWithPostInfo"; + return Err(syn::Error::new(method.sig.span(), msg)) + } + + let mut call_var_attrs: Vec = + helper::take_item_pallet_attrs(&mut method.attrs)?; + + if call_var_attrs.len() != 1 { + let msg = if call_var_attrs.is_empty() { + "Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]`" + } else { + "Invalid pallet::call, too many weight attributes given" + }; + return Err(syn::Error::new(method.sig.span(), msg)) + } + let weight = call_var_attrs.pop().unwrap().weight; + + let mut args = vec![]; + for arg in method.sig.inputs.iter_mut().skip(1) { + let arg = if let syn::FnArg::Typed(arg) = arg { + arg + } else { + unreachable!("Only first argument can be receiver"); + }; + + let arg_attrs: Vec = + helper::take_item_pallet_attrs(&mut arg.attrs)?; + + if arg_attrs.len() > 1 { + let msg = "Invalid pallet::call, argument has too many attributes"; + return Err(syn::Error::new(arg.span(), msg)) + } + + let arg_ident = if let syn::Pat::Ident(pat) = &*arg.pat { + pat.ident.clone() + } else { + let msg = "Invalid pallet::call, argument must be ident"; + return Err(syn::Error::new(arg.pat.span(), msg)) + }; + + args.push((!arg_attrs.is_empty(), arg_ident, arg.ty.clone())); + } + + let docs = get_doc_literals(&method.attrs); + + methods.push(CallVariantDef { name: method.sig.ident.clone(), weight, args, docs }); + } else { + let msg = "Invalid pallet::call, only method accepted"; + return Err(syn::Error::new(impl_item.span(), msg)) + } + } + + Ok(Self { + index, + attr_span, + instances, + methods, + where_clause: item.generics.where_clause.clone(), + docs: get_doc_literals(&item.attrs), + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs new file mode 100644 index 0000000000000..712c20ffc7b4c --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -0,0 +1,398 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use core::convert::TryFrom; +use frame_support_procedural_tools::get_doc_literals; +use quote::ToTokens; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(Config); + syn::custom_keyword!(From); + syn::custom_keyword!(T); + syn::custom_keyword!(I); + syn::custom_keyword!(config); + syn::custom_keyword!(IsType); + syn::custom_keyword!(Event); + syn::custom_keyword!(constant); + syn::custom_keyword!(frame_system); + syn::custom_keyword!(disable_frame_system_supertrait_check); +} + +/// Input definition for the pallet config. +pub struct ConfigDef { + /// The index of item in pallet module. + pub index: usize, + /// Whether the trait has instance (i.e. define with `Config`) + pub has_instance: bool, + /// Const associated type. + pub consts_metadata: Vec, + /// Whether the trait has the associated type `Event`, note that those bounds are checked: + /// * `IsType::Event` + /// * `From` or `From>` or `From>` + pub has_event_type: bool, + /// The where clause on trait definition but modified so `Self` is `T`. + pub where_clause: Option, + /// The span of the pallet::config attribute. + pub attr_span: proc_macro2::Span, +} + +/// Input definition for a constant in pallet config. +pub struct ConstMetadataDef { + /// Name of the associated type. + pub ident: syn::Ident, + /// The type in Get, e.g. `u32` in `type Foo: Get;`, but `Self` is replaced by `T` + pub type_: syn::Type, + /// The doc associated + pub doc: Vec, +} + +impl TryFrom<&syn::TraitItemType> for ConstMetadataDef { + type Error = syn::Error; + + fn try_from(trait_ty: &syn::TraitItemType) -> Result { + let err = |span, msg| { + syn::Error::new(span, format!("Invalid usage of `#[pallet::constant]`: {}", msg)) + }; + let doc = get_doc_literals(&trait_ty.attrs); + let ident = trait_ty.ident.clone(); + let bound = trait_ty + .bounds + .iter() + .find_map(|b| { + if let syn::TypeParamBound::Trait(tb) = b { + tb.path + .segments + .last() + .and_then(|s| if s.ident == "Get" { Some(s) } else { None }) + } else { + None + } + }) + .ok_or_else(|| err(trait_ty.span(), "`Get` trait bound not found"))?; + let type_arg = if let syn::PathArguments::AngleBracketed(ref ab) = bound.arguments { + if ab.args.len() == 1 { + if let syn::GenericArgument::Type(ref ty) = ab.args[0] { + Ok(ty) + } else { + Err(err(ab.args[0].span(), "Expected a type argument")) + } + } else { + Err(err(bound.span(), "Expected a single type argument")) + } + } else { + Err(err(bound.span(), "Expected trait generic args")) + }?; + let type_ = syn::parse2::(replace_self_by_t(type_arg.to_token_stream())) + .expect("Internal error: replacing `Self` by `T` should result in valid type"); + + Ok(Self { ident, type_, doc }) + } +} + +/// Parse for `#[pallet::disable_frame_system_supertrait_check]` +pub struct DisableFrameSystemSupertraitCheck; + +impl syn::parse::Parse for DisableFrameSystemSupertraitCheck { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + content.parse::()?; + Ok(Self) + } +} + +/// Parse for `#[pallet::constant]` +pub struct TypeAttrConst(proc_macro2::Span); + +impl Spanned for TypeAttrConst { + fn span(&self) -> proc_macro2::Span { + self.0 + } +} + +impl syn::parse::Parse for TypeAttrConst { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + Ok(TypeAttrConst(content.parse::()?.span())) + } +} + +/// Parse for `$ident::Config` +pub struct ConfigBoundParse(syn::Ident); + +impl syn::parse::Parse for ConfigBoundParse { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let ident = input.parse::()?; + input.parse::()?; + input.parse::()?; + + if input.peek(syn::token::Lt) { + input.parse::()?; + } + + Ok(Self(ident)) + } +} + +/// Parse for `IsType<::Event>` and retrieve `$ident` +pub struct IsTypeBoundEventParse(syn::Ident); + +impl syn::parse::Parse for IsTypeBoundEventParse { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + let ident = input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + + Ok(Self(ident)) + } +} + +/// Parse for `From` or `From>` or `From>` +pub struct FromEventParse { + is_generic: bool, + has_instance: bool, +} + +impl syn::parse::Parse for FromEventParse { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut is_generic = false; + let mut has_instance = false; + + input.parse::()?; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![<]) { + is_generic = true; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![,]) { + input.parse::()?; + input.parse::()?; + has_instance = true; + } + input.parse::]>()?; + } + input.parse::]>()?; + + Ok(Self { is_generic, has_instance }) + } +} + +/// Check if trait_item is `type Event`, if so checks its bounds are those expected. +/// (Event type is reserved type) +fn check_event_type( + frame_system: &syn::Ident, + trait_item: &syn::TraitItem, + trait_has_instance: bool, +) -> syn::Result { + if let syn::TraitItem::Type(type_) = trait_item { + if type_.ident == "Event" { + // Check event has no generics + if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { + let msg = "Invalid `type Event`, associated type `Event` is reserved and must have\ + no generics nor where_clause"; + return Err(syn::Error::new(trait_item.span(), msg)) + } + // Check bound contains IsType and From + + let has_is_type_bound = type_.bounds.iter().any(|s| { + syn::parse2::(s.to_token_stream()) + .map_or(false, |b| b.0 == *frame_system) + }); + + if !has_is_type_bound { + let msg = format!( + "Invalid `type Event`, associated type `Event` is reserved and must \ + bound: `IsType<::Event>`", + frame_system, + ); + return Err(syn::Error::new(type_.span(), msg)) + } + + let from_event_bound = type_ + .bounds + .iter() + .find_map(|s| syn::parse2::(s.to_token_stream()).ok()); + + let from_event_bound = if let Some(b) = from_event_bound { + b + } else { + let msg = "Invalid `type Event`, associated type `Event` is reserved and must \ + bound: `From` or `From>` or `From>`"; + return Err(syn::Error::new(type_.span(), msg)) + }; + + if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) + { + let msg = "Invalid `type Event`, associated type `Event` bounds inconsistent \ + `From`. Config and generic Event must be both with instance or \ + without instance"; + return Err(syn::Error::new(type_.span(), msg)) + } + + Ok(true) + } else { + Ok(false) + } + } else { + Ok(false) + } +} + +/// Replace ident `Self` by `T` +pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenStream { + input + .into_iter() + .map(|token_tree| match token_tree { + proc_macro2::TokenTree::Group(group) => + proc_macro2::Group::new(group.delimiter(), replace_self_by_t(group.stream())).into(), + proc_macro2::TokenTree::Ident(ident) if ident == "Self" => + proc_macro2::Ident::new("T", ident.span()).into(), + other => other, + }) + .collect() +} + +impl ConfigDef { + pub fn try_from( + frame_system: &syn::Ident, + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Trait(item) = item { + item + } else { + let msg = "Invalid pallet::config, expected trait definition"; + return Err(syn::Error::new(item.span(), msg)) + }; + + if !matches!(item.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::config, trait must be public"; + return Err(syn::Error::new(item.span(), msg)) + } + + syn::parse2::(item.ident.to_token_stream())?; + + let where_clause = { + let stream = replace_self_by_t(item.generics.where_clause.to_token_stream()); + syn::parse2::>(stream).expect( + "Internal error: replacing `Self` by `T` should result in valid where + clause", + ) + }; + + if item.generics.params.len() > 1 { + let msg = "Invalid pallet::config, expected no more than one generic"; + return Err(syn::Error::new(item.generics.params[2].span(), msg)) + } + + let has_instance = if item.generics.params.first().is_some() { + helper::check_config_def_gen(&item.generics, item.ident.span())?; + true + } else { + false + }; + + let mut has_event_type = false; + let mut consts_metadata = vec![]; + for trait_item in &mut item.items { + // Parse for event + has_event_type = + has_event_type || check_event_type(frame_system, trait_item, has_instance)?; + + // Parse for constant + let type_attrs_const: Vec = helper::take_item_pallet_attrs(trait_item)?; + + if type_attrs_const.len() > 1 { + let msg = "Invalid attribute in pallet::config, only one attribute is expected"; + return Err(syn::Error::new(type_attrs_const[1].span(), msg)) + } + + if type_attrs_const.len() == 1 { + match trait_item { + syn::TraitItem::Type(ref type_) => { + let constant = ConstMetadataDef::try_from(type_)?; + consts_metadata.push(constant); + }, + _ => { + let msg = + "Invalid pallet::constant in pallet::config, expected type trait \ + item"; + return Err(syn::Error::new(trait_item.span(), msg)) + }, + } + } + } + + let attr: Option = + helper::take_first_item_pallet_attr(&mut item.attrs)?; + + let disable_system_supertrait_check = attr.is_some(); + + let has_frame_system_supertrait = item.supertraits.iter().any(|s| { + syn::parse2::(s.to_token_stream()) + .map_or(false, |b| b.0 == *frame_system) + }); + + if !has_frame_system_supertrait && !disable_system_supertrait_check { + let found = if item.supertraits.is_empty() { + "none".to_string() + } else { + let mut found = item.supertraits.iter().fold(String::new(), |acc, s| { + format!("{}`{}`, ", acc, quote::quote!(#s).to_string()) + }); + found.pop(); + found.pop(); + found + }; + + let msg = format!( + "Invalid pallet::trait, expected explicit `{}::Config` as supertrait, \ + found {}. \ + (try `pub trait Config: frame_system::Config {{ ...` or \ + `pub trait Config: frame_system::Config {{ ...`). \ + To disable this check, use `#[pallet::disable_frame_system_supertrait_check]`", + frame_system, found, + ); + return Err(syn::Error::new(item.span(), msg)) + } + + Ok(Self { index, has_instance, consts_metadata, has_event_type, where_clause, attr_span }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/error.rs b/frame/support/procedural/src/pallet/parse/error.rs new file mode 100644 index 0000000000000..9c9a95105c53c --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/error.rs @@ -0,0 +1,90 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use frame_support_procedural_tools::get_doc_literals; +use quote::ToTokens; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(Error); +} + +/// This checks error declaration as a enum declaration with only variants without fields nor +/// discriminant. +pub struct ErrorDef { + /// The index of error item in pallet module. + pub index: usize, + /// Variants ident and doc literals (ordered as declaration order) + pub variants: Vec<(syn::Ident, Vec)>, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The keyword error used (contains span). + pub error: keyword::Error, + /// The span of the pallet::error attribute. + pub attr_span: proc_macro2::Span, +} + +impl ErrorDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Enum(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::error, expected item enum")) + }; + if !matches!(item.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::error, `Error` must be public"; + return Err(syn::Error::new(item.span(), msg)) + } + + let mut instances = vec![]; + instances.push(helper::check_type_def_gen_no_bounds(&item.generics, item.ident.span())?); + + if item.generics.where_clause.is_some() { + let msg = "Invalid pallet::error, where clause is not allowed on pallet error item"; + return Err(syn::Error::new(item.generics.where_clause.as_ref().unwrap().span(), msg)) + } + + let error = syn::parse2::(item.ident.to_token_stream())?; + + let variants = item + .variants + .iter() + .map(|variant| { + if !matches!(variant.fields, syn::Fields::Unit) { + let msg = "Invalid pallet::error, unexpected fields, must be `Unit`"; + return Err(syn::Error::new(variant.fields.span(), msg)) + } + if variant.discriminant.is_some() { + let msg = "Invalid pallet::error, unexpected discriminant, discriminant \ + are not supported"; + let span = variant.discriminant.as_ref().unwrap().0.span(); + return Err(syn::Error::new(span, msg)) + } + + Ok((variant.ident.clone(), get_doc_literals(&variant.attrs))) + }) + .collect::>()?; + + Ok(ErrorDef { attr_span, index, variants, instances, error }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/event.rs b/frame/support/procedural/src/pallet/parse/event.rs new file mode 100644 index 0000000000000..33de4aca8b599 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/event.rs @@ -0,0 +1,141 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use quote::ToTokens; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(Event); + syn::custom_keyword!(pallet); + syn::custom_keyword!(generate_deposit); + syn::custom_keyword!(deposit_event); +} + +/// Definition for pallet event enum. +pub struct EventDef { + /// The index of event item in pallet module. + pub index: usize, + /// The keyword Event used (contains span). + pub event: keyword::Event, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The kind of generic the type `Event` has. + pub gen_kind: super::GenericKind, + /// Whether the function `deposit_event` must be generated. + pub deposit_event: Option, + /// Where clause used in event definition. + pub where_clause: Option, + /// The span of the pallet::event attribute. + pub attr_span: proc_macro2::Span, +} + +/// Attribute for a pallet's Event. +/// +/// Syntax is: +/// * `#[pallet::generate_deposit($vis fn deposit_event)]` +pub struct PalletEventDepositAttr { + pub fn_vis: syn::Visibility, + // Span for the keyword deposit_event + pub fn_span: proc_macro2::Span, + // Span of the attribute + pub span: proc_macro2::Span, +} + +impl syn::parse::Parse for PalletEventDepositAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + let span = content.parse::()?.span(); + let generate_content; + syn::parenthesized!(generate_content in content); + let fn_vis = generate_content.parse::()?; + generate_content.parse::()?; + let fn_span = generate_content.parse::()?.span(); + + Ok(PalletEventDepositAttr { fn_vis, span, fn_span }) + } +} + +struct PalletEventAttrInfo { + deposit_event: Option, +} + +impl PalletEventAttrInfo { + fn from_attrs(attrs: Vec) -> syn::Result { + let mut deposit_event = None; + for attr in attrs { + if deposit_event.is_none() { + deposit_event = Some(attr) + } else { + return Err(syn::Error::new(attr.span, "Duplicate attribute")) + } + } + + Ok(PalletEventAttrInfo { deposit_event }) + } +} + +impl EventDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Enum(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::event, expected item enum")) + }; + + let event_attrs: Vec = + helper::take_item_pallet_attrs(&mut item.attrs)?; + let attr_info = PalletEventAttrInfo::from_attrs(event_attrs)?; + let deposit_event = attr_info.deposit_event; + + if !matches!(item.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::event, `Event` must be public"; + return Err(syn::Error::new(item.span(), msg)) + } + + let where_clause = item.generics.where_clause.clone(); + + let mut instances = vec![]; + // NOTE: Event is not allowed to be only generic on I because it is not supported + // by construct_runtime. + if let Some(u) = helper::check_type_def_optional_gen(&item.generics, item.ident.span())? { + instances.push(u); + } else { + // construct_runtime only allow non generic event for non instantiable pallet. + instances.push(helper::InstanceUsage { has_instance: false, span: item.ident.span() }) + } + + let has_instance = item.generics.type_params().any(|t| t.ident == "I"); + let has_config = item.generics.type_params().any(|t| t.ident == "T"); + let gen_kind = super::GenericKind::from_gens(has_config, has_instance) + .expect("Checked by `helper::check_type_def_optional_gen` above"); + + let event = syn::parse2::(item.ident.to_token_stream())?; + + Ok(EventDef { attr_span, index, instances, deposit_event, event, gen_kind, where_clause }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/extra_constants.rs b/frame/support/procedural/src/pallet/parse/extra_constants.rs new file mode 100644 index 0000000000000..c1324df6c22f1 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/extra_constants.rs @@ -0,0 +1,119 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use frame_support_procedural_tools::get_doc_literals; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(DispatchResultWithPostInfo); + syn::custom_keyword!(Call); + syn::custom_keyword!(OriginFor); + syn::custom_keyword!(weight); + syn::custom_keyword!(compact); + syn::custom_keyword!(T); + syn::custom_keyword!(pallet); +} + +/// Definition of extra constants typically `impl Pallet { ... }` +pub struct ExtraConstantsDef { + /// The where_clause used. + pub where_clause: Option, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The index of call item in pallet module. + pub index: usize, + /// The extra constant defined. + pub extra_constants: Vec, +} + +/// Input definition for an constant in pallet. +pub struct ExtraConstantDef { + /// Name of the function + pub ident: syn::Ident, + /// The type returned by the function + pub type_: syn::Type, + /// The doc associated + pub doc: Vec, +} + +impl ExtraConstantsDef { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) + }; + + let mut instances = vec![]; + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + + if let Some((_, _, for_)) = item.trait_ { + let msg = "Invalid pallet::call, expected no trait ident as in \ + `impl<..> Pallet<..> { .. }`"; + return Err(syn::Error::new(for_.span(), msg)) + } + + let mut extra_constants = vec![]; + for impl_item in &mut item.items { + let method = if let syn::ImplItem::Method(method) = impl_item { + method + } else { + let msg = "Invalid pallet::call, only method accepted"; + return Err(syn::Error::new(impl_item.span(), msg)) + }; + + if !method.sig.inputs.is_empty() { + let msg = "Invalid pallet::extra_constants, method must have 0 args"; + return Err(syn::Error::new(method.sig.span(), msg)) + } + + if !method.sig.generics.params.is_empty() { + let msg = "Invalid pallet::extra_constants, method must have 0 generics"; + return Err(syn::Error::new(method.sig.generics.params[0].span(), msg)) + } + + if method.sig.generics.where_clause.is_some() { + let msg = "Invalid pallet::extra_constants, method must have no where clause"; + return Err(syn::Error::new(method.sig.generics.where_clause.span(), msg)) + } + + let type_ = match &method.sig.output { + syn::ReturnType::Default => { + let msg = "Invalid pallet::extra_constants, method must have a return type"; + return Err(syn::Error::new(method.span(), msg)) + }, + syn::ReturnType::Type(_, type_) => *type_.clone(), + }; + + extra_constants.push(ExtraConstantDef { + ident: method.sig.ident.clone(), + type_, + doc: get_doc_literals(&method.attrs), + }); + } + + Ok(Self { + index, + instances, + where_clause: item.generics.where_clause.clone(), + extra_constants, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/genesis_build.rs b/frame/support/procedural/src/pallet/parse/genesis_build.rs new file mode 100644 index 0000000000000..82e297b4e26e8 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/genesis_build.rs @@ -0,0 +1,61 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; + +/// Definition for pallet genesis build implementation. +pub struct GenesisBuildDef { + /// The index of item in pallet module. + pub index: usize, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The where_clause used. + pub where_clause: Option, + /// The span of the pallet::genesis_build attribute. + pub attr_span: proc_macro2::Span, +} + +impl GenesisBuildDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + let msg = "Invalid pallet::genesis_build, expected item impl"; + return Err(syn::Error::new(item.span(), msg)) + }; + + let item_trait = &item + .trait_ + .as_ref() + .ok_or_else(|| { + let msg = "Invalid pallet::genesis_build, expected impl<..> GenesisBuild<..> \ + for GenesisConfig<..>"; + syn::Error::new(item.span(), msg) + })? + .1; + + let mut instances = vec![]; + instances.push(helper::check_genesis_builder_usage(&item_trait)?); + + Ok(Self { attr_span, index, instances, where_clause: item.generics.where_clause.clone() }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/genesis_config.rs b/frame/support/procedural/src/pallet/parse/genesis_config.rs new file mode 100644 index 0000000000000..a0cf7de1a846b --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/genesis_config.rs @@ -0,0 +1,73 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; + +/// Definition for pallet genesis config type. +/// +/// Either: +/// * `struct GenesisConfig` +/// * `enum GenesisConfig` +pub struct GenesisConfigDef { + /// The index of item in pallet module. + pub index: usize, + /// The kind of generic the type `GenesisConfig` has. + pub gen_kind: super::GenericKind, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The ident of genesis_config, can be used for span. + pub genesis_config: syn::Ident, +} + +impl GenesisConfigDef { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + let item_span = item.span(); + let (vis, ident, generics) = match &item { + syn::Item::Enum(item) => (&item.vis, &item.ident, &item.generics), + syn::Item::Struct(item) => (&item.vis, &item.ident, &item.generics), + _ => { + let msg = "Invalid pallet::genesis_config, expected enum or struct"; + return Err(syn::Error::new(item.span(), msg)) + }, + }; + + let mut instances = vec![]; + // NOTE: GenesisConfig is not allowed to be only generic on I because it is not supported + // by construct_runtime. + if let Some(u) = helper::check_type_def_optional_gen(&generics, ident.span())? { + instances.push(u); + } + + let has_instance = generics.type_params().any(|t| t.ident == "I"); + let has_config = generics.type_params().any(|t| t.ident == "T"); + let gen_kind = super::GenericKind::from_gens(has_config, has_instance) + .expect("Checked by `helper::check_type_def_optional_gen` above"); + + if !matches!(vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::genesis_config, GenesisConfig must be public"; + return Err(syn::Error::new(item_span, msg)) + } + + if ident != "GenesisConfig" { + let msg = "Invalid pallet::genesis_config, ident must `GenesisConfig`"; + return Err(syn::Error::new(ident.span(), msg)) + } + + Ok(GenesisConfigDef { index, genesis_config: ident.clone(), instances, gen_kind }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/helper.rs b/frame/support/procedural/src/pallet/parse/helper.rs new file mode 100644 index 0000000000000..2590e86b58b0e --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/helper.rs @@ -0,0 +1,598 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use quote::ToTokens; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(I); + syn::custom_keyword!(compact); + syn::custom_keyword!(GenesisBuild); + syn::custom_keyword!(Config); + syn::custom_keyword!(T); + syn::custom_keyword!(Pallet); + syn::custom_keyword!(origin); + syn::custom_keyword!(DispatchResult); + syn::custom_keyword!(DispatchResultWithPostInfo); +} + +/// A usage of instance, either the trait `Config` has been used with instance or without instance. +/// Used to check for consistency. +#[derive(Clone)] +pub struct InstanceUsage { + pub has_instance: bool, + pub span: proc_macro2::Span, +} + +/// Trait implemented for syn items to get mutable references on their attributes. +/// +/// NOTE: verbatim variants are not supported. +pub trait MutItemAttrs { + fn mut_item_attrs(&mut self) -> Option<&mut Vec>; +} + +/// Take the first pallet attribute (e.g. attribute like `#[pallet..]`) and decode it to `Attr` +pub fn take_first_item_pallet_attr(item: &mut impl MutItemAttrs) -> syn::Result> +where + Attr: syn::parse::Parse, +{ + let attrs = if let Some(attrs) = item.mut_item_attrs() { attrs } else { return Ok(None) }; + + if let Some(index) = attrs.iter().position(|attr| { + attr.path.segments.first().map_or(false, |segment| segment.ident == "pallet") + }) { + let pallet_attr = attrs.remove(index); + Ok(Some(syn::parse2(pallet_attr.into_token_stream())?)) + } else { + Ok(None) + } +} + +/// Take all the pallet attributes (e.g. attribute like `#[pallet..]`) and decode them to `Attr` +pub fn take_item_pallet_attrs(item: &mut impl MutItemAttrs) -> syn::Result> +where + Attr: syn::parse::Parse, +{ + let mut pallet_attrs = Vec::new(); + + while let Some(attr) = take_first_item_pallet_attr(item)? { + pallet_attrs.push(attr) + } + + Ok(pallet_attrs) +} + +/// Get all the cfg attributes (e.g. attribute like `#[cfg..]`) and decode them to `Attr` +pub fn get_item_cfg_attrs(attrs: &[syn::Attribute]) -> Vec { + attrs + .iter() + .filter_map(|attr| { + if attr.path.segments.first().map_or(false, |segment| segment.ident == "cfg") { + Some(attr.clone()) + } else { + None + } + }) + .collect::>() +} + +impl MutItemAttrs for syn::Item { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + match self { + Self::Const(item) => Some(item.attrs.as_mut()), + Self::Enum(item) => Some(item.attrs.as_mut()), + Self::ExternCrate(item) => Some(item.attrs.as_mut()), + Self::Fn(item) => Some(item.attrs.as_mut()), + Self::ForeignMod(item) => Some(item.attrs.as_mut()), + Self::Impl(item) => Some(item.attrs.as_mut()), + Self::Macro(item) => Some(item.attrs.as_mut()), + Self::Macro2(item) => Some(item.attrs.as_mut()), + Self::Mod(item) => Some(item.attrs.as_mut()), + Self::Static(item) => Some(item.attrs.as_mut()), + Self::Struct(item) => Some(item.attrs.as_mut()), + Self::Trait(item) => Some(item.attrs.as_mut()), + Self::TraitAlias(item) => Some(item.attrs.as_mut()), + Self::Type(item) => Some(item.attrs.as_mut()), + Self::Union(item) => Some(item.attrs.as_mut()), + Self::Use(item) => Some(item.attrs.as_mut()), + _ => None, + } + } +} + +impl MutItemAttrs for syn::TraitItem { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + match self { + Self::Const(item) => Some(item.attrs.as_mut()), + Self::Method(item) => Some(item.attrs.as_mut()), + Self::Type(item) => Some(item.attrs.as_mut()), + Self::Macro(item) => Some(item.attrs.as_mut()), + _ => None, + } + } +} + +impl MutItemAttrs for Vec { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + Some(self) + } +} + +impl MutItemAttrs for syn::ItemMod { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + Some(&mut self.attrs) + } +} + +/// Parse for `()` +struct Unit; +impl syn::parse::Parse for Unit { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let content; + syn::parenthesized!(content in input); + if !content.is_empty() { + let msg = "unexpected tokens, expected nothing inside parenthesis as `()`"; + return Err(syn::Error::new(content.span(), msg)) + } + Ok(Self) + } +} + +/// Parse for `'static` +struct StaticLifetime; +impl syn::parse::Parse for StaticLifetime { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let lifetime = input.parse::()?; + if lifetime.ident != "static" { + let msg = "unexpected tokens, expected `static`"; + return Err(syn::Error::new(lifetime.ident.span(), msg)) + } + Ok(Self) + } +} + +/// Check the syntax: `I: 'static = ()` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return the instance if found. +pub fn check_config_def_gen(gen: &syn::Generics, span: proc_macro2::Span) -> syn::Result<()> { + let expected = "expected `I: 'static = ()`"; + pub struct CheckTraitDefGenerics; + impl syn::parse::Parse for CheckTraitDefGenerics { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self) + } + } + + syn::parse2::(gen.params.to_token_stream()).map_err(|e| { + let msg = format!("Invalid generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })?; + + Ok(()) +} + +/// Check the syntax: +/// * either `T` +/// * or `T, I = ()` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return the instance if found. +pub fn check_type_def_gen_no_bounds( + gen: &syn::Generics, + span: proc_macro2::Span, +) -> syn::Result { + let expected = "expected `T` or `T, I = ()`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { has_instance: false, span: input.span() }; + + input.parse::()?; + if input.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + } + + Ok(Self(instance_usage)) + } + } + + let i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid type def generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })? + .0; + + Ok(i) +} + +/// Check the syntax: +/// * either `` (no generics +/// * or `T` +/// * or `T: Config` +/// * or `T, I = ()` +/// * or `T: Config, I: 'static = ()` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return some instance usage if there is some generic, or none otherwise. +pub fn check_type_def_optional_gen( + gen: &syn::Generics, + span: proc_macro2::Span, +) -> syn::Result> { + let expected = "expected `` or `T` or `T: Config` or `T, I = ()` or \ + `T: Config, I: 'static = ()`"; + pub struct Checker(Option); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + if input.is_empty() { + return Ok(Self(None)) + } + + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; + + input.parse::()?; + + if input.is_empty() { + return Ok(Self(Some(instance_usage))) + } + + let lookahead = input.lookahead1(); + if lookahead.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(Some(instance_usage))) + } else if lookahead.peek(syn::Token![:]) { + input.parse::()?; + input.parse::()?; + + if input.is_empty() { + return Ok(Self(Some(instance_usage))) + } + + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(Some(instance_usage))) + } else { + Err(lookahead.error()) + } + } + } + + let i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid type def generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })? + .0 + // Span can be call_site if generic is empty. Thus we replace it. + .map(|mut i| { + i.span = span; + i + }); + + Ok(i) +} + +/// Check the syntax: +/// * either `Pallet` +/// * or `Pallet` +/// +/// return the instance if found. +pub fn check_pallet_struct_usage(type_: &Box) -> syn::Result { + let expected = "expected `Pallet` or `Pallet`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; + + input.parse::()?; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + } + input.parse::]>()?; + + Ok(Self(instance_usage)) + } + } + + let i = syn::parse2::(type_.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid pallet struct: {}", expected); + let mut err = syn::Error::new(type_.span(), msg); + err.combine(e); + err + })? + .0; + + Ok(i) +} + +/// Check the generic is: +/// * either `T: Config` +/// * or `T: Config, I: 'static` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return whether it contains instance. +pub fn check_impl_gen(gen: &syn::Generics, span: proc_macro2::Span) -> syn::Result { + let expected = "expected `impl` or `impl, I: 'static>`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; + + input.parse::()?; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![<]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + } + + Ok(Self(instance_usage)) + } + } + + let i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let mut err = syn::Error::new(span, format!("Invalid generics: {}", expected)); + err.combine(e); + err + })? + .0; + + Ok(i) +} + +/// Check the syntax: +/// * or `T` +/// * or `T: Config` +/// * or `T, I = ()` +/// * or `T: Config, I: 'static = ()` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return the instance if found. +pub fn check_type_def_gen( + gen: &syn::Generics, + span: proc_macro2::Span, +) -> syn::Result { + let expected = "expected `T` or `T: Config` or `T, I = ()` or \ + `T: Config, I: 'static = ()`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; + + input.parse::()?; + + if input.is_empty() { + return Ok(Self(instance_usage)) + } + + let lookahead = input.lookahead1(); + if lookahead.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(instance_usage)) + } else if lookahead.peek(syn::Token![:]) { + input.parse::()?; + input.parse::()?; + + if input.is_empty() { + return Ok(Self(instance_usage)) + } + + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(instance_usage)) + } else { + Err(lookahead.error()) + } + } + } + + let mut i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid type def generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })? + .0; + + // Span can be call_site if generic is empty. Thus we replace it. + i.span = span; + + Ok(i) +} + +/// Check the syntax: +/// * either `GenesisBuild` +/// * or `GenesisBuild` +/// +/// return the instance if found. +pub fn check_genesis_builder_usage(type_: &syn::Path) -> syn::Result { + let expected = "expected `GenesisBuild` or `GenesisBuild`"; + pub struct Checker(InstanceUsage); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; + + input.parse::()?; + input.parse::()?; + input.parse::()?; + if input.peek(syn::Token![,]) { + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + } + input.parse::]>()?; + + Ok(Self(instance_usage)) + } + } + + let i = syn::parse2::(type_.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid genesis builder: {}", expected); + let mut err = syn::Error::new(type_.span(), msg); + err.combine(e); + err + })? + .0; + + Ok(i) +} + +/// Check the syntax: +/// * either `` (no generics) +/// * or `T: Config` +/// * or `T: Config, I: 'static` +/// +/// `span` is used in case generics is empty (empty generics has span == call_site). +/// +/// return the instance if found. +pub fn check_type_value_gen( + gen: &syn::Generics, + span: proc_macro2::Span, +) -> syn::Result> { + let expected = "expected `` or `T: Config` or `T: Config, I: 'static`"; + pub struct Checker(Option); + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + if input.is_empty() { + return Ok(Self(None)) + } + + input.parse::()?; + input.parse::()?; + input.parse::()?; + + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; + + if input.is_empty() { + return Ok(Self(Some(instance_usage))) + } + + instance_usage.has_instance = true; + input.parse::()?; + input.parse::()?; + input.parse::]>()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self(Some(instance_usage))) + } + } + + let i = syn::parse2::(gen.params.to_token_stream()) + .map_err(|e| { + let msg = format!("Invalid type def generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })? + .0 + // Span can be call_site if generic is empty. Thus we replace it. + .map(|mut i| { + i.span = span; + i + }); + + Ok(i) +} + +/// Check the keyword `DispatchResultWithPostInfo` or `DispatchResult`. +pub fn check_pallet_call_return_type(type_: &syn::Type) -> syn::Result<()> { + pub struct Checker; + impl syn::parse::Parse for Checker { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let lookahead = input.lookahead1(); + if lookahead.peek(keyword::DispatchResultWithPostInfo) { + input.parse::()?; + Ok(Self) + } else if lookahead.peek(keyword::DispatchResult) { + input.parse::()?; + Ok(Self) + } else { + Err(lookahead.error()) + } + } + } + + syn::parse2::(type_.to_token_stream()).map(|_| ()) +} diff --git a/frame/support/procedural/src/pallet/parse/hooks.rs b/frame/support/procedural/src/pallet/parse/hooks.rs new file mode 100644 index 0000000000000..1dd86498f22d5 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/hooks.rs @@ -0,0 +1,85 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; + +/// Implementation of the pallet hooks. +pub struct HooksDef { + /// The index of item in pallet. + pub index: usize, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, + /// The where_clause used. + pub where_clause: Option, + /// The span of the pallet::hooks attribute. + pub attr_span: proc_macro2::Span, + /// Boolean flag, set to true if the `on_runtime_upgrade` method of hooks was implemented. + pub has_runtime_upgrade: bool, +} + +impl HooksDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + let msg = "Invalid pallet::hooks, expected item impl"; + return Err(syn::Error::new(item.span(), msg)) + }; + + let mut instances = vec![]; + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + + let item_trait = &item + .trait_ + .as_ref() + .ok_or_else(|| { + let msg = "Invalid pallet::hooks, expected impl<..> Hooks \ + for Pallet<..>"; + syn::Error::new(item.span(), msg) + })? + .1; + + if item_trait.segments.len() != 1 || item_trait.segments[0].ident != "Hooks" { + let msg = format!( + "Invalid pallet::hooks, expected trait to be `Hooks` found `{}`\ + , you can import from `frame_support::pallet_prelude`", + quote::quote!(#item_trait) + ); + + return Err(syn::Error::new(item_trait.span(), msg)) + } + + let has_runtime_upgrade = item.items.iter().any(|i| match i { + syn::ImplItem::Method(method) => method.sig.ident == "on_runtime_upgrade", + _ => false, + }); + + Ok(Self { + attr_span, + index, + instances, + has_runtime_upgrade, + where_clause: item.generics.where_clause.clone(), + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/inherent.rs b/frame/support/procedural/src/pallet/parse/inherent.rs new file mode 100644 index 0000000000000..de5ad8f795db5 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/inherent.rs @@ -0,0 +1,59 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; + +/// The definition of the pallet inherent implementation. +pub struct InherentDef { + /// The index of inherent item in pallet module. + pub index: usize, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, +} + +impl InherentDef { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + let msg = "Invalid pallet::inherent, expected item impl"; + return Err(syn::Error::new(item.span(), msg)) + }; + + if item.trait_.is_none() { + let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; + return Err(syn::Error::new(item.span(), msg)) + } + + if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { + if last.ident != "ProvideInherent" { + let msg = "Invalid pallet::inherent, expected trait ProvideInherent"; + return Err(syn::Error::new(last.span(), msg)) + } + } else { + let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; + return Err(syn::Error::new(item.span(), msg)) + } + + let mut instances = vec![]; + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + + Ok(InherentDef { index, instances }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs new file mode 100644 index 0000000000000..96d4776e805bc --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -0,0 +1,469 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Parse for pallet macro. +//! +//! Parse the module into `Def` struct through `Def::try_from` function. + +pub mod call; +pub mod config; +pub mod error; +pub mod event; +pub mod extra_constants; +pub mod genesis_build; +pub mod genesis_config; +pub mod helper; +pub mod hooks; +pub mod inherent; +pub mod origin; +pub mod pallet_struct; +pub mod storage; +pub mod type_value; +pub mod validate_unsigned; + +use frame_support_procedural_tools::generate_crate_access_2018; +use syn::spanned::Spanned; + +/// Parsed definition of a pallet. +pub struct Def { + /// The module items. + /// (their order must not be modified because they are registered in individual definitions). + pub item: syn::ItemMod, + pub config: config::ConfigDef, + pub pallet_struct: pallet_struct::PalletStructDef, + pub hooks: Option, + pub call: Option, + pub storages: Vec, + pub error: Option, + pub event: Option, + pub origin: Option, + pub inherent: Option, + pub genesis_config: Option, + pub genesis_build: Option, + pub validate_unsigned: Option, + pub extra_constants: Option, + pub type_values: Vec, + pub frame_system: syn::Ident, + pub frame_support: syn::Ident, +} + +impl Def { + pub fn try_from(mut item: syn::ItemMod) -> syn::Result { + let frame_system = generate_crate_access_2018("frame-system")?; + let frame_support = generate_crate_access_2018("frame-support")?; + + let item_span = item.span(); + let items = &mut item + .content + .as_mut() + .ok_or_else(|| { + let msg = "Invalid pallet definition, expected mod to be inlined."; + syn::Error::new(item_span, msg) + })? + .1; + + let mut config = None; + let mut pallet_struct = None; + let mut hooks = None; + let mut call = None; + let mut error = None; + let mut event = None; + let mut origin = None; + let mut inherent = None; + let mut genesis_config = None; + let mut genesis_build = None; + let mut validate_unsigned = None; + let mut extra_constants = None; + let mut storages = vec![]; + let mut type_values = vec![]; + + for (index, item) in items.iter_mut().enumerate() { + let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; + + match pallet_attr { + Some(PalletAttr::Config(span)) if config.is_none() => + config = Some(config::ConfigDef::try_from(&frame_system, span, index, item)?), + Some(PalletAttr::Pallet(span)) if pallet_struct.is_none() => { + let p = pallet_struct::PalletStructDef::try_from(span, index, item)?; + pallet_struct = Some(p); + }, + Some(PalletAttr::Hooks(span)) if hooks.is_none() => { + let m = hooks::HooksDef::try_from(span, index, item)?; + hooks = Some(m); + }, + Some(PalletAttr::Call(span)) if call.is_none() => + call = Some(call::CallDef::try_from(span, index, item)?), + Some(PalletAttr::Error(span)) if error.is_none() => + error = Some(error::ErrorDef::try_from(span, index, item)?), + Some(PalletAttr::Event(span)) if event.is_none() => + event = Some(event::EventDef::try_from(span, index, item)?), + Some(PalletAttr::GenesisConfig(_)) if genesis_config.is_none() => { + let g = genesis_config::GenesisConfigDef::try_from(index, item)?; + genesis_config = Some(g); + }, + Some(PalletAttr::GenesisBuild(span)) if genesis_build.is_none() => { + let g = genesis_build::GenesisBuildDef::try_from(span, index, item)?; + genesis_build = Some(g); + }, + Some(PalletAttr::Origin(_)) if origin.is_none() => + origin = Some(origin::OriginDef::try_from(index, item)?), + Some(PalletAttr::Inherent(_)) if inherent.is_none() => + inherent = Some(inherent::InherentDef::try_from(index, item)?), + Some(PalletAttr::Storage(span)) => + storages.push(storage::StorageDef::try_from(span, index, item)?), + Some(PalletAttr::ValidateUnsigned(_)) if validate_unsigned.is_none() => { + let v = validate_unsigned::ValidateUnsignedDef::try_from(index, item)?; + validate_unsigned = Some(v); + }, + Some(PalletAttr::TypeValue(span)) => + type_values.push(type_value::TypeValueDef::try_from(span, index, item)?), + Some(PalletAttr::ExtraConstants(_)) => + extra_constants = + Some(extra_constants::ExtraConstantsDef::try_from(index, item)?), + Some(attr) => { + let msg = "Invalid duplicated attribute"; + return Err(syn::Error::new(attr.span(), msg)) + }, + None => (), + } + } + + if genesis_config.is_some() != genesis_build.is_some() { + let msg = format!( + "`#[pallet::genesis_config]` and `#[pallet::genesis_build]` attributes must be \ + either both used or both not used, instead genesis_config is {} and genesis_build \ + is {}", + genesis_config.as_ref().map_or("unused", |_| "used"), + genesis_build.as_ref().map_or("unused", |_| "used"), + ); + return Err(syn::Error::new(item_span, msg)) + } + + let def = Def { + item, + config: config + .ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::config]`"))?, + pallet_struct: pallet_struct + .ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::pallet]`"))?, + hooks, + call, + extra_constants, + genesis_config, + genesis_build, + validate_unsigned, + error, + event, + origin, + inherent, + storages, + type_values, + frame_system, + frame_support, + }; + + def.check_instance_usage()?; + def.check_event_usage()?; + + Ok(def) + } + + /// Check that usage of trait `Event` is consistent with the definition, i.e. it is declared + /// and trait defines type Event, or not declared and no trait associated type. + fn check_event_usage(&self) -> syn::Result<()> { + match (self.config.has_event_type, self.event.is_some()) { + (true, false) => { + let msg = "Invalid usage of Event, `Config` contains associated type `Event`, \ + but enum `Event` is not declared (i.e. no use of `#[pallet::event]`). \ + Note that type `Event` in trait is reserved to work alongside pallet event."; + Err(syn::Error::new(proc_macro2::Span::call_site(), msg)) + }, + (false, true) => { + let msg = "Invalid usage of Event, `Config` contains no associated type \ + `Event`, but enum `Event` is declared (in use of `#[pallet::event]`). \ + An Event associated type must be declare on trait `Config`."; + Err(syn::Error::new(proc_macro2::Span::call_site(), msg)) + }, + _ => Ok(()), + } + } + + /// Check that usage of trait `Config` is consistent with the definition, i.e. it is used with + /// instance iff it is defined with instance. + fn check_instance_usage(&self) -> syn::Result<()> { + let mut instances = vec![]; + instances.extend_from_slice(&self.pallet_struct.instances[..]); + instances.extend(&mut self.storages.iter().flat_map(|s| s.instances.clone())); + if let Some(call) = &self.call { + instances.extend_from_slice(&call.instances[..]); + } + if let Some(hooks) = &self.hooks { + instances.extend_from_slice(&hooks.instances[..]); + } + if let Some(event) = &self.event { + instances.extend_from_slice(&event.instances[..]); + } + if let Some(error) = &self.error { + instances.extend_from_slice(&error.instances[..]); + } + if let Some(inherent) = &self.inherent { + instances.extend_from_slice(&inherent.instances[..]); + } + if let Some(origin) = &self.origin { + instances.extend_from_slice(&origin.instances[..]); + } + if let Some(genesis_config) = &self.genesis_config { + instances.extend_from_slice(&genesis_config.instances[..]); + } + if let Some(genesis_build) = &self.genesis_build { + instances.extend_from_slice(&genesis_build.instances[..]); + } + if let Some(extra_constants) = &self.extra_constants { + instances.extend_from_slice(&extra_constants.instances[..]); + } + + let mut errors = instances.into_iter().filter_map(|instances| { + if instances.has_instance == self.config.has_instance { + return None + } + let msg = if self.config.has_instance { + "Invalid generic declaration, trait is defined with instance but generic use none" + } else { + "Invalid generic declaration, trait is defined without instance but generic use \ + some" + }; + Some(syn::Error::new(instances.span, msg)) + }); + + if let Some(mut first_error) = errors.next() { + for error in errors { + first_error.combine(error) + } + Err(first_error) + } else { + Ok(()) + } + } + + /// Depending on if pallet is instantiable: + /// * either `T: Config` + /// * or `T: Config, I: 'static` + pub fn type_impl_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => T: Config, I: 'static) + } else { + quote::quote_spanned!(span => T: Config) + } + } + + /// Depending on if pallet is instantiable: + /// * either `T: Config` + /// * or `T: Config, I: 'static = ()` + pub fn type_decl_bounded_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => T: Config, I: 'static = ()) + } else { + quote::quote_spanned!(span => T: Config) + } + } + + /// Depending on if pallet is instantiable: + /// * either `T` + /// * or `T, I = ()` + pub fn type_decl_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => T, I = ()) + } else { + quote::quote_spanned!(span => T) + } + } + + /// Depending on if pallet is instantiable: + /// * either `` + /// * or `` + /// to be used when using pallet trait `Config` + pub fn trait_use_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => ) + } else { + quote::quote_spanned!(span => ) + } + } + + /// Depending on if pallet is instantiable: + /// * either `T` + /// * or `T, I` + pub fn type_use_generics(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + if self.config.has_instance { + quote::quote_spanned!(span => T, I) + } else { + quote::quote_spanned!(span => T) + } + } +} + +/// Some generic kind for type which can be not generic, or generic over config, +/// or generic over config and instance, but not generic only over instance. +pub enum GenericKind { + None, + Config, + ConfigAndInstance, +} + +impl GenericKind { + /// Return Err if it is only generics over instance but not over config. + pub fn from_gens(has_config: bool, has_instance: bool) -> Result { + match (has_config, has_instance) { + (false, false) => Ok(GenericKind::None), + (true, false) => Ok(GenericKind::Config), + (true, true) => Ok(GenericKind::ConfigAndInstance), + (false, true) => Err(()), + } + } + + /// Return the generic to be used when using the type. + /// + /// Depending on its definition it can be: ``, `T` or `T, I` + pub fn type_use_gen(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + match self { + GenericKind::None => quote::quote!(), + GenericKind::Config => quote::quote_spanned!(span => T), + GenericKind::ConfigAndInstance => quote::quote_spanned!(span => T, I), + } + } + + /// Return the generic to be used in `impl<..>` when implementing on the type. + pub fn type_impl_gen(&self, span: proc_macro2::Span) -> proc_macro2::TokenStream { + match self { + GenericKind::None => quote::quote!(), + GenericKind::Config => quote::quote_spanned!(span => T: Config), + GenericKind::ConfigAndInstance => { + quote::quote_spanned!(span => T: Config, I: 'static) + }, + } + } + + /// Return whereas the type has some generic. + pub fn is_generic(&self) -> bool { + match self { + GenericKind::None => false, + GenericKind::Config | GenericKind::ConfigAndInstance => true, + } + } +} + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(origin); + syn::custom_keyword!(call); + syn::custom_keyword!(event); + syn::custom_keyword!(config); + syn::custom_keyword!(hooks); + syn::custom_keyword!(inherent); + syn::custom_keyword!(error); + syn::custom_keyword!(storage); + syn::custom_keyword!(genesis_build); + syn::custom_keyword!(genesis_config); + syn::custom_keyword!(validate_unsigned); + syn::custom_keyword!(type_value); + syn::custom_keyword!(pallet); + syn::custom_keyword!(generate_store); + syn::custom_keyword!(Store); + syn::custom_keyword!(extra_constants); +} + +/// Parse attributes for item in pallet module +/// syntax must be `pallet::` (e.g. `#[pallet::config]`) +enum PalletAttr { + Config(proc_macro2::Span), + Pallet(proc_macro2::Span), + Hooks(proc_macro2::Span), + Call(proc_macro2::Span), + Error(proc_macro2::Span), + Event(proc_macro2::Span), + Origin(proc_macro2::Span), + Inherent(proc_macro2::Span), + Storage(proc_macro2::Span), + GenesisConfig(proc_macro2::Span), + GenesisBuild(proc_macro2::Span), + ValidateUnsigned(proc_macro2::Span), + TypeValue(proc_macro2::Span), + ExtraConstants(proc_macro2::Span), +} + +impl PalletAttr { + fn span(&self) -> proc_macro2::Span { + match self { + Self::Config(span) => *span, + Self::Pallet(span) => *span, + Self::Hooks(span) => *span, + Self::Call(span) => *span, + Self::Error(span) => *span, + Self::Event(span) => *span, + Self::Origin(span) => *span, + Self::Inherent(span) => *span, + Self::Storage(span) => *span, + Self::GenesisConfig(span) => *span, + Self::GenesisBuild(span) => *span, + Self::ValidateUnsigned(span) => *span, + Self::TypeValue(span) => *span, + Self::ExtraConstants(span) => *span, + } + } +} + +impl syn::parse::Parse for PalletAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + let lookahead = content.lookahead1(); + if lookahead.peek(keyword::config) { + Ok(PalletAttr::Config(content.parse::()?.span())) + } else if lookahead.peek(keyword::pallet) { + Ok(PalletAttr::Pallet(content.parse::()?.span())) + } else if lookahead.peek(keyword::hooks) { + Ok(PalletAttr::Hooks(content.parse::()?.span())) + } else if lookahead.peek(keyword::call) { + Ok(PalletAttr::Call(content.parse::()?.span())) + } else if lookahead.peek(keyword::error) { + Ok(PalletAttr::Error(content.parse::()?.span())) + } else if lookahead.peek(keyword::event) { + Ok(PalletAttr::Event(content.parse::()?.span())) + } else if lookahead.peek(keyword::origin) { + Ok(PalletAttr::Origin(content.parse::()?.span())) + } else if lookahead.peek(keyword::inherent) { + Ok(PalletAttr::Inherent(content.parse::()?.span())) + } else if lookahead.peek(keyword::storage) { + Ok(PalletAttr::Storage(content.parse::()?.span())) + } else if lookahead.peek(keyword::genesis_config) { + Ok(PalletAttr::GenesisConfig(content.parse::()?.span())) + } else if lookahead.peek(keyword::genesis_build) { + Ok(PalletAttr::GenesisBuild(content.parse::()?.span())) + } else if lookahead.peek(keyword::validate_unsigned) { + Ok(PalletAttr::ValidateUnsigned(content.parse::()?.span())) + } else if lookahead.peek(keyword::type_value) { + Ok(PalletAttr::TypeValue(content.parse::()?.span())) + } else if lookahead.peek(keyword::extra_constants) { + Ok(PalletAttr::ExtraConstants(content.parse::()?.span())) + } else { + Err(lookahead.error()) + } + } +} diff --git a/frame/support/procedural/src/pallet/parse/origin.rs b/frame/support/procedural/src/pallet/parse/origin.rs new file mode 100644 index 0000000000000..c4e1197ac511c --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/origin.rs @@ -0,0 +1,72 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; + +/// Definition of the pallet origin type. +/// +/// Either: +/// * `type Origin` +/// * `struct Origin` +/// * `enum Origin` +pub struct OriginDef { + /// The index of item in pallet module. + pub index: usize, + pub has_instance: bool, + pub is_generic: bool, + /// A set of usage of instance, must be check for consistency with trait. + pub instances: Vec, +} + +impl OriginDef { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + let item_span = item.span(); + let (vis, ident, generics) = match &item { + syn::Item::Enum(item) => (&item.vis, &item.ident, &item.generics), + syn::Item::Struct(item) => (&item.vis, &item.ident, &item.generics), + syn::Item::Type(item) => (&item.vis, &item.ident, &item.generics), + _ => { + let msg = "Invalid pallet::origin, expected enum or struct or type"; + return Err(syn::Error::new(item.span(), msg)) + }, + }; + + let has_instance = generics.params.len() == 2; + let is_generic = !generics.params.is_empty(); + + let mut instances = vec![]; + if let Some(u) = helper::check_type_def_optional_gen(&generics, item.span())? { + instances.push(u); + } else { + // construct_runtime only allow generic event for instantiable pallet. + instances.push(helper::InstanceUsage { has_instance: false, span: ident.span() }) + } + + if !matches!(vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::origin, Origin must be public"; + return Err(syn::Error::new(item_span, msg)) + } + + if ident != "Origin" { + let msg = "Invalid pallet::origin, ident must `Origin`"; + return Err(syn::Error::new(ident.span(), msg)) + } + + Ok(OriginDef { index, has_instance, is_generic, instances }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs new file mode 100644 index 0000000000000..278f46e13818e --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -0,0 +1,171 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use quote::ToTokens; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(pallet); + syn::custom_keyword!(Pallet); + syn::custom_keyword!(generate_store); + syn::custom_keyword!(generate_storage_info); + syn::custom_keyword!(storage_version); + syn::custom_keyword!(Store); +} + +/// Definition of the pallet pallet. +pub struct PalletStructDef { + /// The index of item in pallet pallet. + pub index: usize, + /// A set of usage of instance, must be check for consistency with config trait. + pub instances: Vec, + /// The keyword Pallet used (contains span). + pub pallet: keyword::Pallet, + /// Whether the trait `Store` must be generated. + pub store: Option<(syn::Visibility, keyword::Store)>, + /// The span of the pallet::pallet attribute. + pub attr_span: proc_macro2::Span, + /// Whether to specify the storages max encoded len when implementing `StorageInfoTrait`. + /// Contains the span of the attribute. + pub generate_storage_info: Option, + /// The current storage version of the pallet. + pub storage_version: Option, +} + +/// Parse for one variant of: +/// * `#[pallet::generate_store($vis trait Store)]` +/// * `#[pallet::generate_storage_info]` +/// * `#[pallet::storage_version(STORAGE_VERSION)]` +pub enum PalletStructAttr { + GenerateStore { span: proc_macro2::Span, vis: syn::Visibility, keyword: keyword::Store }, + GenerateStorageInfoTrait(proc_macro2::Span), + StorageVersion { storage_version: syn::Path, span: proc_macro2::Span }, +} + +impl PalletStructAttr { + fn span(&self) -> proc_macro2::Span { + match self { + Self::GenerateStore { span, .. } => *span, + Self::GenerateStorageInfoTrait(span) => *span, + Self::StorageVersion { span, .. } => *span, + } + } +} + +impl syn::parse::Parse for PalletStructAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + let lookahead = content.lookahead1(); + if lookahead.peek(keyword::generate_store) { + let span = content.parse::()?.span(); + + let generate_content; + syn::parenthesized!(generate_content in content); + let vis = generate_content.parse::()?; + generate_content.parse::()?; + let keyword = generate_content.parse::()?; + Ok(Self::GenerateStore { vis, keyword, span }) + } else if lookahead.peek(keyword::generate_storage_info) { + let span = content.parse::()?.span(); + Ok(Self::GenerateStorageInfoTrait(span)) + } else if lookahead.peek(keyword::storage_version) { + let span = content.parse::()?.span(); + + let version_content; + syn::parenthesized!(version_content in content); + let storage_version = version_content.parse::()?; + + Ok(Self::StorageVersion { storage_version, span }) + } else { + Err(lookahead.error()) + } + } +} + +impl PalletStructDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Struct(item) = item { + item + } else { + let msg = "Invalid pallet::pallet, expected struct definition"; + return Err(syn::Error::new(item.span(), msg)) + }; + + let mut store = None; + let mut generate_storage_info = None; + let mut storage_version_found = None; + + let struct_attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; + for attr in struct_attrs { + match attr { + PalletStructAttr::GenerateStore { vis, keyword, .. } if store.is_none() => { + store = Some((vis, keyword)); + }, + PalletStructAttr::GenerateStorageInfoTrait(span) + if generate_storage_info.is_none() => + { + generate_storage_info = Some(span); + } + PalletStructAttr::StorageVersion { storage_version, .. } + if storage_version_found.is_none() => + { + storage_version_found = Some(storage_version); + } + attr => { + let msg = "Unexpected duplicated attribute"; + return Err(syn::Error::new(attr.span(), msg)) + }, + } + } + + let pallet = syn::parse2::(item.ident.to_token_stream())?; + + if !matches!(item.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::pallet, Pallet must be public"; + return Err(syn::Error::new(item.span(), msg)) + } + + if item.generics.where_clause.is_some() { + let msg = "Invalid pallet::pallet, where clause not supported on Pallet declaration"; + return Err(syn::Error::new(item.generics.where_clause.span(), msg)) + } + + let mut instances = vec![]; + instances.push(helper::check_type_def_gen_no_bounds(&item.generics, item.ident.span())?); + + Ok(Self { + index, + instances, + pallet, + store, + attr_span, + generate_storage_info, + storage_version: storage_version_found, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs new file mode 100644 index 0000000000000..8075daacb6f44 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -0,0 +1,709 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use frame_support_procedural_tools::get_doc_literals; +use quote::ToTokens; +use std::collections::HashMap; +use syn::spanned::Spanned; + +/// List of additional token to be used for parsing. +mod keyword { + syn::custom_keyword!(Error); + syn::custom_keyword!(pallet); + syn::custom_keyword!(getter); + syn::custom_keyword!(storage_prefix); + syn::custom_keyword!(OptionQuery); + syn::custom_keyword!(ValueQuery); +} + +/// Parse for one of the following: +/// * `#[pallet::getter(fn dummy)]` +/// * `#[pallet::storage_prefix = "CustomName"]` +pub enum PalletStorageAttr { + Getter(syn::Ident, proc_macro2::Span), + StorageName(syn::LitStr, proc_macro2::Span), +} + +impl PalletStorageAttr { + fn attr_span(&self) -> proc_macro2::Span { + match self { + Self::Getter(_, span) | Self::StorageName(_, span) => *span, + } + } +} + +impl syn::parse::Parse for PalletStorageAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let attr_span = input.span(); + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + + let lookahead = content.lookahead1(); + if lookahead.peek(keyword::getter) { + content.parse::()?; + + let generate_content; + syn::parenthesized!(generate_content in content); + generate_content.parse::()?; + Ok(Self::Getter(generate_content.parse::()?, attr_span)) + } else if lookahead.peek(keyword::storage_prefix) { + content.parse::()?; + content.parse::()?; + + let renamed_prefix = content.parse::()?; + // Ensure the renamed prefix is a proper Rust identifier + syn::parse_str::(&renamed_prefix.value()).map_err(|_| { + let msg = format!("`{}` is not a valid identifier", renamed_prefix.value()); + syn::Error::new(renamed_prefix.span(), msg) + })?; + + Ok(Self::StorageName(renamed_prefix, attr_span)) + } else { + Err(lookahead.error()) + } + } +} + +/// The value and key types used by storages. Needed to expand metadata. +pub enum Metadata { + Value { value: syn::Type }, + Map { value: syn::Type, key: syn::Type }, + CountedMap { value: syn::Type, key: syn::Type }, + DoubleMap { value: syn::Type, key1: syn::Type, key2: syn::Type }, + NMap { keys: Vec, keygen: syn::Type, value: syn::Type }, +} + +pub enum QueryKind { + OptionQuery, + ValueQuery, +} + +/// Definition of a storage, storage is a storage type like +/// `type MyStorage = StorageValue` +/// The keys and values types are parsed in order to get metadata +pub struct StorageDef { + /// The index of error item in pallet module. + pub index: usize, + /// Visibility of the storage type. + pub vis: syn::Visibility, + /// The type ident, to generate the StoragePrefix for. + pub ident: syn::Ident, + /// The keys and value metadata of the storage. + pub metadata: Metadata, + /// The doc associated to the storage. + pub docs: Vec, + /// A set of usage of instance, must be check for consistency with config. + pub instances: Vec, + /// Optional getter to generate. If some then query_kind is ensured to be some as well. + pub getter: Option, + /// Optional expression that evaluates to a type that can be used as StoragePrefix instead of + /// ident. + pub rename_as: Option, + /// Whereas the querytype of the storage is OptionQuery or ValueQuery. + /// Note that this is best effort as it can't be determined when QueryKind is generic, and + /// result can be false if user do some unexpected type alias. + pub query_kind: Option, + /// Where clause of type definition. + pub where_clause: Option, + /// The span of the pallet::storage attribute. + pub attr_span: proc_macro2::Span, + /// The `cfg` attributes. + pub cfg_attrs: Vec, + /// If generics are named (e.g. `StorageValue`) then this contains all the + /// generics of the storage. + /// If generics are not named, this is none. + pub named_generics: Option, +} + +/// The parsed generic from the +#[derive(Clone)] +pub enum StorageGenerics { + DoubleMap { + hasher1: syn::Type, + key1: syn::Type, + hasher2: syn::Type, + key2: syn::Type, + value: syn::Type, + query_kind: Option, + on_empty: Option, + max_values: Option, + }, + Map { + hasher: syn::Type, + key: syn::Type, + value: syn::Type, + query_kind: Option, + on_empty: Option, + max_values: Option, + }, + CountedMap { + hasher: syn::Type, + key: syn::Type, + value: syn::Type, + query_kind: Option, + on_empty: Option, + max_values: Option, + }, + Value { + value: syn::Type, + query_kind: Option, + on_empty: Option, + }, + NMap { + keygen: syn::Type, + value: syn::Type, + query_kind: Option, + on_empty: Option, + max_values: Option, + }, +} + +impl StorageGenerics { + /// Return the metadata from the defined generics + fn metadata(&self) -> syn::Result { + let res = match self.clone() { + Self::DoubleMap { value, key1, key2, .. } => Metadata::DoubleMap { value, key1, key2 }, + Self::Map { value, key, .. } => Metadata::Map { value, key }, + Self::CountedMap { value, key, .. } => Metadata::CountedMap { value, key }, + Self::Value { value, .. } => Metadata::Value { value }, + Self::NMap { keygen, value, .. } => + Metadata::NMap { keys: collect_keys(&keygen)?, keygen, value }, + }; + + Ok(res) + } + + /// Return the query kind from the defined generics + fn query_kind(&self) -> Option { + match &self { + Self::DoubleMap { query_kind, .. } | + Self::Map { query_kind, .. } | + Self::CountedMap { query_kind, .. } | + Self::Value { query_kind, .. } | + Self::NMap { query_kind, .. } => query_kind.clone(), + } + } +} + +enum StorageKind { + Value, + Map, + CountedMap, + DoubleMap, + NMap, +} + +/// Check the generics in the `map` contains the generics in `gen` may contains generics in +/// `optional_gen`, and doesn't contains any other. +fn check_generics( + map: &HashMap, + mandatory_generics: &[&str], + optional_generics: &[&str], + storage_type_name: &str, + args_span: proc_macro2::Span, +) -> syn::Result<()> { + let mut errors = vec![]; + + let expectation = { + let mut e = format!( + "`{}` expect generics {}and optional generics {}", + storage_type_name, + mandatory_generics + .iter() + .map(|name| format!("`{}`, ", name)) + .collect::(), + &optional_generics.iter().map(|name| format!("`{}`, ", name)).collect::(), + ); + e.pop(); + e.pop(); + e.push_str("."); + e + }; + + for (gen_name, gen_binding) in map { + if !mandatory_generics.contains(&gen_name.as_str()) && + !optional_generics.contains(&gen_name.as_str()) + { + let msg = format!( + "Invalid pallet::storage, Unexpected generic `{}` for `{}`. {}", + gen_name, storage_type_name, expectation, + ); + errors.push(syn::Error::new(gen_binding.span(), msg)); + } + } + + for mandatory_generic in mandatory_generics { + if !map.contains_key(&mandatory_generic.to_string()) { + let msg = format!( + "Invalid pallet::storage, cannot find `{}` generic, required for `{}`.", + mandatory_generic, storage_type_name + ); + errors.push(syn::Error::new(args_span, msg)); + } + } + + let mut errors = errors.drain(..); + if let Some(mut error) = errors.next() { + for other_error in errors { + error.combine(other_error); + } + Err(error) + } else { + Ok(()) + } +} + +/// Returns `(named generics, metadata, query kind)` +fn process_named_generics( + storage: &StorageKind, + args_span: proc_macro2::Span, + args: &[syn::Binding], +) -> syn::Result<(Option, Metadata, Option)> { + let mut parsed = HashMap::::new(); + + // Ensure no duplicate. + for arg in args { + if let Some(other) = parsed.get(&arg.ident.to_string()) { + let msg = "Invalid pallet::storage, Duplicated named generic"; + let mut err = syn::Error::new(arg.ident.span(), msg); + err.combine(syn::Error::new(other.ident.span(), msg)); + return Err(err) + } + parsed.insert(arg.ident.to_string(), arg.clone()); + } + + let generics = match storage { + StorageKind::Value => { + check_generics( + &parsed, + &["Value"], + &["QueryKind", "OnEmpty"], + "StorageValue", + args_span, + )?; + + StorageGenerics::Value { + value: parsed + .remove("Value") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), + on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), + } + }, + StorageKind::Map => { + check_generics( + &parsed, + &["Hasher", "Key", "Value"], + &["QueryKind", "OnEmpty", "MaxValues"], + "StorageMap", + args_span, + )?; + + StorageGenerics::Map { + hasher: parsed + .remove("Hasher") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + key: parsed + .remove("Key") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + value: parsed + .remove("Value") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), + on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), + max_values: parsed.remove("MaxValues").map(|binding| binding.ty), + } + }, + StorageKind::CountedMap => { + check_generics( + &parsed, + &["Hasher", "Key", "Value"], + &["QueryKind", "OnEmpty", "MaxValues"], + "CountedStorageMap", + args_span, + )?; + + StorageGenerics::CountedMap { + hasher: parsed + .remove("Hasher") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + key: parsed + .remove("Key") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + value: parsed + .remove("Value") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), + on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), + max_values: parsed.remove("MaxValues").map(|binding| binding.ty), + } + }, + StorageKind::DoubleMap => { + check_generics( + &parsed, + &["Hasher1", "Key1", "Hasher2", "Key2", "Value"], + &["QueryKind", "OnEmpty", "MaxValues"], + "StorageDoubleMap", + args_span, + )?; + + StorageGenerics::DoubleMap { + hasher1: parsed + .remove("Hasher1") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + key1: parsed + .remove("Key1") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + hasher2: parsed + .remove("Hasher2") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + key2: parsed + .remove("Key2") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + value: parsed + .remove("Value") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), + on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), + max_values: parsed.remove("MaxValues").map(|binding| binding.ty), + } + }, + StorageKind::NMap => { + check_generics( + &parsed, + &["Key", "Value"], + &["QueryKind", "OnEmpty", "MaxValues"], + "StorageNMap", + args_span, + )?; + + StorageGenerics::NMap { + keygen: parsed + .remove("Key") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + value: parsed + .remove("Value") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), + on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), + max_values: parsed.remove("MaxValues").map(|binding| binding.ty), + } + }, + }; + + let metadata = generics.metadata()?; + let query_kind = generics.query_kind(); + + Ok((Some(generics), metadata, query_kind)) +} + +/// Returns `(named generics, metadata, query kind)` +fn process_unnamed_generics( + storage: &StorageKind, + args_span: proc_macro2::Span, + args: &[syn::Type], +) -> syn::Result<(Option, Metadata, Option)> { + let retrieve_arg = |arg_pos| { + args.get(arg_pos).cloned().ok_or_else(|| { + let msg = format!( + "Invalid pallet::storage, unexpected number of generic argument, \ + expect at least {} args, found {}.", + arg_pos + 1, + args.len(), + ); + syn::Error::new(args_span, msg) + }) + }; + + let prefix_arg = retrieve_arg(0)?; + syn::parse2::(prefix_arg.to_token_stream()).map_err(|e| { + let msg = "Invalid pallet::storage, for unnamed generic arguments the type \ + first generic argument must be `_`, the argument is then replaced by macro."; + let mut err = syn::Error::new(prefix_arg.span(), msg); + err.combine(e); + err + })?; + + let res = match storage { + StorageKind::Value => + (None, Metadata::Value { value: retrieve_arg(1)? }, retrieve_arg(2).ok()), + StorageKind::Map => ( + None, + Metadata::Map { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, + retrieve_arg(4).ok(), + ), + StorageKind::CountedMap => ( + None, + Metadata::CountedMap { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, + retrieve_arg(4).ok(), + ), + StorageKind::DoubleMap => ( + None, + Metadata::DoubleMap { + key1: retrieve_arg(2)?, + key2: retrieve_arg(4)?, + value: retrieve_arg(5)?, + }, + retrieve_arg(6).ok(), + ), + StorageKind::NMap => { + let keygen = retrieve_arg(1)?; + let keys = collect_keys(&keygen)?; + (None, Metadata::NMap { keys, keygen, value: retrieve_arg(2)? }, retrieve_arg(3).ok()) + }, + }; + + Ok(res) +} + +/// Returns `(named generics, metadata, query kind)` +fn process_generics( + segment: &syn::PathSegment, +) -> syn::Result<(Option, Metadata, Option)> { + let storage_kind = match &*segment.ident.to_string() { + "StorageValue" => StorageKind::Value, + "StorageMap" => StorageKind::Map, + "CountedStorageMap" => StorageKind::CountedMap, + "StorageDoubleMap" => StorageKind::DoubleMap, + "StorageNMap" => StorageKind::NMap, + found => { + let msg = format!( + "Invalid pallet::storage, expected ident: `StorageValue` or \ + `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, \ + found `{}`.", + found, + ); + return Err(syn::Error::new(segment.ident.span(), msg)) + }, + }; + + let args_span = segment.arguments.span(); + + let args = match &segment.arguments { + syn::PathArguments::AngleBracketed(args) if args.args.len() != 0 => args, + _ => { + let msg = "Invalid pallet::storage, invalid number of generic generic arguments, \ + expect more that 0 generic arguments."; + return Err(syn::Error::new(segment.span(), msg)) + }, + }; + + if args.args.iter().all(|gen| matches!(gen, syn::GenericArgument::Type(_))) { + let args = args + .args + .iter() + .map(|gen| match gen { + syn::GenericArgument::Type(gen) => gen.clone(), + _ => unreachable!("It is asserted above that all generics are types"), + }) + .collect::>(); + process_unnamed_generics(&storage_kind, args_span, &args) + } else if args.args.iter().all(|gen| matches!(gen, syn::GenericArgument::Binding(_))) { + let args = args + .args + .iter() + .map(|gen| match gen { + syn::GenericArgument::Binding(gen) => gen.clone(), + _ => unreachable!("It is asserted above that all generics are bindings"), + }) + .collect::>(); + process_named_generics(&storage_kind, args_span, &args) + } else { + let msg = "Invalid pallet::storage, invalid generic declaration for storage. Expect only \ + type generics or binding generics, e.g. `` or \ + ``."; + Err(syn::Error::new(segment.span(), msg)) + } +} + +/// Parse the 2nd type argument to `StorageNMap` and return its keys. +fn collect_keys(keygen: &syn::Type) -> syn::Result> { + if let syn::Type::Tuple(tup) = keygen { + tup.elems.iter().map(extract_key).collect::>>() + } else { + Ok(vec![extract_key(keygen)?]) + } +} + +/// In `Key`, extract K and return it. +fn extract_key(ty: &syn::Type) -> syn::Result { + let typ = if let syn::Type::Path(typ) = ty { + typ + } else { + let msg = "Invalid pallet::storage, expected type path"; + return Err(syn::Error::new(ty.span(), msg)) + }; + + let key_struct = typ.path.segments.last().ok_or_else(|| { + let msg = "Invalid pallet::storage, expected type path with at least one segment"; + syn::Error::new(typ.path.span(), msg) + })?; + if key_struct.ident != "Key" && key_struct.ident != "NMapKey" { + let msg = "Invalid pallet::storage, expected Key or NMapKey struct"; + return Err(syn::Error::new(key_struct.ident.span(), msg)) + } + + let ty_params = if let syn::PathArguments::AngleBracketed(args) = &key_struct.arguments { + args + } else { + let msg = "Invalid pallet::storage, expected angle bracketed arguments"; + return Err(syn::Error::new(key_struct.arguments.span(), msg)) + }; + + if ty_params.args.len() != 2 { + let msg = format!( + "Invalid pallet::storage, unexpected number of generic arguments \ + for Key struct, expected 2 args, found {}", + ty_params.args.len() + ); + return Err(syn::Error::new(ty_params.span(), msg)) + } + + let key = match &ty_params.args[1] { + syn::GenericArgument::Type(key_ty) => key_ty.clone(), + _ => { + let msg = "Invalid pallet::storage, expected type"; + return Err(syn::Error::new(ty_params.args[1].span(), msg)) + }, + }; + + Ok(key) +} + +impl StorageDef { + /// Return the storage prefix for this storage item + pub fn prefix(&self) -> String { + self.rename_as + .as_ref() + .map(syn::LitStr::value) + .unwrap_or(self.ident.to_string()) + } + + /// Return either the span of the ident or the span of the literal in the + /// #[storage_prefix] attribute + pub fn prefix_span(&self) -> proc_macro2::Span { + self.rename_as.as_ref().map(syn::LitStr::span).unwrap_or(self.ident.span()) + } + + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Type(item) = item { + item + } else { + return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expect item type.")) + }; + + let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; + let (mut getters, mut names) = attrs + .into_iter() + .partition::, _>(|attr| matches!(attr, PalletStorageAttr::Getter(..))); + if getters.len() > 1 { + let msg = "Invalid pallet::storage, multiple argument pallet::getter found"; + return Err(syn::Error::new(getters[1].attr_span(), msg)) + } + if names.len() > 1 { + let msg = "Invalid pallet::storage, multiple argument pallet::storage_prefix found"; + return Err(syn::Error::new(names[1].attr_span(), msg)) + } + let getter = getters.pop().map(|attr| match attr { + PalletStorageAttr::Getter(ident, _) => ident, + _ => unreachable!(), + }); + let rename_as = names.pop().map(|attr| match attr { + PalletStorageAttr::StorageName(lit, _) => lit, + _ => unreachable!(), + }); + + let cfg_attrs = helper::get_item_cfg_attrs(&item.attrs); + + let mut instances = vec![]; + instances.push(helper::check_type_def_gen(&item.generics, item.ident.span())?); + + let where_clause = item.generics.where_clause.clone(); + let docs = get_doc_literals(&item.attrs); + + let typ = if let syn::Type::Path(typ) = &*item.ty { + typ + } else { + let msg = "Invalid pallet::storage, expected type path"; + return Err(syn::Error::new(item.ty.span(), msg)) + }; + + if typ.path.segments.len() != 1 { + let msg = "Invalid pallet::storage, expected type path with one segment"; + return Err(syn::Error::new(item.ty.span(), msg)) + } + + let (named_generics, metadata, query_kind) = process_generics(&typ.path.segments[0])?; + + let query_kind = query_kind + .map(|query_kind| match query_kind { + syn::Type::Path(path) + if path.path.segments.last().map_or(false, |s| s.ident == "OptionQuery") => + Some(QueryKind::OptionQuery), + syn::Type::Path(path) + if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => + Some(QueryKind::ValueQuery), + _ => None, + }) + .unwrap_or(Some(QueryKind::OptionQuery)); // This value must match the default generic. + + if query_kind.is_none() && getter.is_some() { + let msg = "Invalid pallet::storage, cannot generate getter because QueryKind is not \ + identifiable. QueryKind must be `OptionQuery`, `ValueQuery`, or default one to be \ + identifiable."; + return Err(syn::Error::new(getter.unwrap().span(), msg)) + } + + Ok(StorageDef { + attr_span, + index, + vis: item.vis.clone(), + ident: item.ident.clone(), + instances, + metadata, + docs, + getter, + rename_as, + query_kind, + where_clause, + cfg_attrs, + named_generics, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/type_value.rs b/frame/support/procedural/src/pallet/parse/type_value.rs new file mode 100644 index 0000000000000..7b9d57472db4b --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/type_value.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; + +/// Definition of type value. Just a function which is expanded to a struct implementing `Get`. +pub struct TypeValueDef { + /// The index of error item in pallet module. + pub index: usize, + /// Visibility of the struct to generate. + pub vis: syn::Visibility, + /// Ident of the struct to generate. + pub ident: syn::Ident, + /// The type return by Get. + pub type_: Box, + /// The block returning the value to get + pub block: Box, + /// If type value is generic over `T` (or `T` and `I` for instantiable pallet) + pub is_generic: bool, + /// A set of usage of instance, must be check for consistency with config. + pub instances: Vec, + /// The where clause of the function. + pub where_clause: Option, + /// The span of the pallet::type_value attribute. + pub attr_span: proc_macro2::Span, +} + +impl TypeValueDef { + pub fn try_from( + attr_span: proc_macro2::Span, + index: usize, + item: &mut syn::Item, + ) -> syn::Result { + let item = if let syn::Item::Fn(item) = item { + item + } else { + let msg = "Invalid pallet::type_value, expected item fn"; + return Err(syn::Error::new(item.span(), msg)) + }; + + if !item.attrs.is_empty() { + let msg = "Invalid pallet::type_value, unexpected attribute"; + return Err(syn::Error::new(item.attrs[0].span(), msg)) + } + + if let Some(span) = item + .sig + .constness + .as_ref() + .map(|t| t.span()) + .or_else(|| item.sig.asyncness.as_ref().map(|t| t.span())) + .or_else(|| item.sig.unsafety.as_ref().map(|t| t.span())) + .or_else(|| item.sig.abi.as_ref().map(|t| t.span())) + .or_else(|| item.sig.variadic.as_ref().map(|t| t.span())) + { + let msg = "Invalid pallet::type_value, unexpected token"; + return Err(syn::Error::new(span, msg)) + } + + if !item.sig.inputs.is_empty() { + let msg = "Invalid pallet::type_value, unexpected argument"; + return Err(syn::Error::new(item.sig.inputs[0].span(), msg)) + } + + let vis = item.vis.clone(); + let ident = item.sig.ident.clone(); + let block = item.block.clone(); + let type_ = match item.sig.output.clone() { + syn::ReturnType::Type(_, type_) => type_, + syn::ReturnType::Default => { + let msg = "Invalid pallet::type_value, expected return type"; + return Err(syn::Error::new(item.sig.span(), msg)) + }, + }; + + let mut instances = vec![]; + if let Some(usage) = helper::check_type_value_gen(&item.sig.generics, item.sig.span())? { + instances.push(usage); + } + + let is_generic = item.sig.generics.type_params().count() > 0; + let where_clause = item.sig.generics.where_clause.clone(); + + Ok(TypeValueDef { + attr_span, + index, + is_generic, + vis, + ident, + block, + type_, + instances, + where_clause, + }) + } +} diff --git a/frame/support/procedural/src/pallet/parse/validate_unsigned.rs b/frame/support/procedural/src/pallet/parse/validate_unsigned.rs new file mode 100644 index 0000000000000..87e2a326f1862 --- /dev/null +++ b/frame/support/procedural/src/pallet/parse/validate_unsigned.rs @@ -0,0 +1,61 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::helper; +use syn::spanned::Spanned; + +/// The definition of the pallet validate unsigned implementation. +pub struct ValidateUnsignedDef { + /// The index of validate unsigned item in pallet module. + pub index: usize, + /// A set of usage of instance, must be check for consistency with config. + pub instances: Vec, +} + +impl ValidateUnsignedDef { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { + let item = if let syn::Item::Impl(item) = item { + item + } else { + let msg = "Invalid pallet::validate_unsigned, expected item impl"; + return Err(syn::Error::new(item.span(), msg)) + }; + + if item.trait_.is_none() { + let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ + Pallet<..>"; + return Err(syn::Error::new(item.span(), msg)) + } + + if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { + if last.ident != "ValidateUnsigned" { + let msg = "Invalid pallet::validate_unsigned, expected trait ValidateUnsigned"; + return Err(syn::Error::new(last.span(), msg)) + } + } else { + let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ + Pallet<..>"; + return Err(syn::Error::new(item.span(), msg)) + } + + let mut instances = vec![]; + instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); + instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); + + Ok(ValidateUnsignedDef { index, instances }) + } +} diff --git a/frame/support/procedural/src/partial_eq_no_bound.rs b/frame/support/procedural/src/partial_eq_no_bound.rs index df8d661a2b269..3dbabf3f5d39a 100644 --- a/frame/support/procedural/src/partial_eq_no_bound.rs +++ b/frame/support/procedural/src/partial_eq_no_bound.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,41 +30,47 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: let impl_ = match input.data { syn::Data::Struct(struct_) => match struct_.fields { syn::Fields::Named(named) => { - let fields = named.named.iter() + let fields = named + .named + .iter() .map(|i| &i.ident) .map(|i| quote::quote_spanned!(i.span() => self.#i == other.#i )); quote::quote!( true #( && #fields )* ) }, syn::Fields::Unnamed(unnamed) => { - let fields = unnamed.unnamed.iter().enumerate() + let fields = unnamed + .unnamed + .iter() + .enumerate() .map(|(i, _)| syn::Index::from(i)) .map(|i| quote::quote_spanned!(i.span() => self.#i == other.#i )); quote::quote!( true #( && #fields )* ) }, syn::Fields::Unit => { - quote::quote!( true ) - } + quote::quote!(true) + }, }, syn::Data::Enum(enum_) => { - let variants = enum_.variants.iter() - .map(|variant| { + let variants = + enum_.variants.iter().map(|variant| { let ident = &variant.ident; match &variant.fields { syn::Fields::Named(named) => { let names = named.named.iter().map(|i| &i.ident); - let other_names = names.clone() - .enumerate() - .map(|(n, ident)| - syn::Ident::new(&format!("_{}", n), ident.span()) - ); + let other_names = names.clone().enumerate().map(|(n, ident)| { + syn::Ident::new(&format!("_{}", n), ident.span()) + }); let capture = names.clone(); - let other_capture = names.clone().zip(other_names.clone()) + let other_capture = names + .clone() + .zip(other_names.clone()) .map(|(i, other_i)| quote::quote!(#i: #other_i)); - let eq = names.zip(other_names) - .map(|(i, other_i)| quote::quote_spanned!(i.span() => #i == #other_i)); + let eq = names.zip(other_names).map( + |(i, other_i)| quote::quote_spanned!(i.span() => #i == #other_i), + ); quote::quote!( ( Self::#ident { #( #capture, )* }, @@ -73,12 +79,18 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: ) }, syn::Fields::Unnamed(unnamed) => { - let names = unnamed.unnamed.iter().enumerate() + let names = unnamed + .unnamed + .iter() + .enumerate() .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); - let other_names = unnamed.unnamed.iter().enumerate() - .map(|(i, f)| syn::Ident::new(&format!("_{}_other", i), f.span())); - let eq = names.clone().zip(other_names.clone()) - .map(|(i, other_i)| quote::quote_spanned!(i.span() => #i == #other_i)); + let other_names = + unnamed.unnamed.iter().enumerate().map(|(i, f)| { + syn::Ident::new(&format!("_{}_other", i), f.span()) + }); + let eq = names.clone().zip(other_names.clone()).map( + |(i, other_i)| quote::quote_spanned!(i.span() => #i == #other_i), + ); quote::quote!( ( Self::#ident ( #( #names, )* ), @@ -122,5 +134,6 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: } } }; - ).into() + ) + .into() } diff --git a/frame/support/procedural/src/storage/genesis_config/builder_def.rs b/frame/support/procedural/src/storage/genesis_config/builder_def.rs index a045794529c95..001cea0f2b788 100644 --- a/frame/support/procedural/src/storage/genesis_config/builder_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/builder_def.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,11 @@ //! Builder logic definition used to build genesis storage. +use super::super::{DeclStorageDefExt, StorageLineTypeDef}; use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::TokenStream; -use syn::spanned::Spanned; use quote::{quote, quote_spanned}; -use super::super::{DeclStorageDefExt, StorageLineTypeDef}; +use syn::spanned::Spanned; /// Definition of builder blocks, each block insert some value in the storage. /// They must be called inside externalities, and with `self` being the genesis config. @@ -53,13 +53,14 @@ impl BuilderDef { is_generic |= line.is_generic; data = Some(match &line.storage_type { - StorageLineTypeDef::Simple(_) if line.is_option => + StorageLineTypeDef::Simple(_) if line.is_option => { quote_spanned!(builder.span() => // NOTE: the type of `data` is specified when used later in the code let builder: fn(&Self) -> _ = #builder; let data = builder(self); let data = Option::as_ref(&data); - ), + ) + }, _ => quote_spanned!(builder.span() => // NOTE: the type of `data` is specified when used later in the code let builder: fn(&Self) -> _ = #builder; @@ -70,8 +71,9 @@ impl BuilderDef { is_generic |= line.is_generic; data = Some(match &line.storage_type { - StorageLineTypeDef::Simple(_) if line.is_option => - quote!( let data = Some(&self.#config); ), + StorageLineTypeDef::Simple(_) if line.is_option => { + quote!( let data = Some(&self.#config); ) + }, _ => quote!( let data = &self.#config; ), }); }; @@ -79,7 +81,7 @@ impl BuilderDef { if let Some(data) = data { blocks.push(match &line.storage_type { StorageLineTypeDef::Simple(_) if line.is_option => { - quote!{{ + quote! {{ #data let v: Option<&#value_type>= data; if let Some(v) = v { @@ -88,7 +90,7 @@ impl BuilderDef { }} }, StorageLineTypeDef::Simple(_) if !line.is_option => { - quote!{{ + quote! {{ #data let v: &#value_type = data; <#storage_struct as #scrate::#storage_trait>::put::<&#value_type>(v); @@ -97,7 +99,7 @@ impl BuilderDef { StorageLineTypeDef::Simple(_) => unreachable!(), StorageLineTypeDef::Map(map) => { let key = &map.key; - quote!{{ + quote! {{ #data let data: &#scrate::sp_std::vec::Vec<(#key, #value_type)> = data; data.iter().for_each(|(k, v)| { @@ -110,7 +112,7 @@ impl BuilderDef { StorageLineTypeDef::DoubleMap(map) => { let key1 = &map.key1; let key2 = &map.key2; - quote!{{ + quote! {{ #data let data: &#scrate::sp_std::vec::Vec<(#key1, #key2, #value_type)> = data; data.iter().for_each(|(k1, k2, v)| { @@ -120,6 +122,17 @@ impl BuilderDef { }); }} }, + StorageLineTypeDef::NMap(map) => { + let key_tuple = map.to_key_tuple(); + let key_arg = if map.keys.len() == 1 { quote!((k,)) } else { quote!(k) }; + quote! {{ + #data + let data: &#scrate::sp_std::vec::Vec<(#key_tuple, #value_type)> = data; + data.iter().for_each(|(k, v)| { + <#storage_struct as #scrate::#storage_trait>::insert(#key_arg, v); + }); + }} + }, }); } } @@ -133,10 +146,6 @@ impl BuilderDef { }); } - - Self { - blocks, - is_generic, - } + Self { blocks, is_generic } } } diff --git a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs index 6339134ea0d22..fbdaab06b4895 100644 --- a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,11 @@ //! Genesis config definition. +use super::super::{DeclStorageDefExt, StorageLineTypeDef}; use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::TokenStream; -use syn::{spanned::Spanned, parse_quote}; use quote::quote; -use super::super::{DeclStorageDefExt, StorageLineTypeDef}; +use syn::{parse_quote, spanned::Spanned}; pub struct GenesisConfigFieldDef { pub name: syn::Ident, @@ -33,11 +33,11 @@ pub struct GenesisConfigFieldDef { pub struct GenesisConfigDef { pub is_generic: bool, pub fields: Vec, - /// For example: `, I: Instance=DefaultInstance>`. + /// For example: `, I: Instance=DefaultInstance>`. pub genesis_struct_decl: TokenStream, /// For example: ``. pub genesis_struct: TokenStream, - /// For example: `, I: Instance>`. + /// For example: `, I: Instance>`. pub genesis_impl: TokenStream, /// The where clause to use to constrain generics if genesis config is generic. pub genesis_where_clause: Option, @@ -47,30 +47,28 @@ impl GenesisConfigDef { pub fn from_def(def: &DeclStorageDefExt) -> syn::Result { let fields = Self::get_genesis_config_field_defs(def)?; - let is_generic = fields.iter() + let is_generic = fields + .iter() .any(|field| ext::type_contains_ident(&field.typ, &def.module_runtime_generic)); - let ( - genesis_struct_decl, - genesis_impl, - genesis_struct, - genesis_where_clause - ) = if is_generic { - let runtime_generic = &def.module_runtime_generic; - let runtime_trait = &def.module_runtime_trait; - let optional_instance = &def.optional_instance; - let optional_instance_bound = &def.optional_instance_bound; - let optional_instance_bound_optional_default = &def.optional_instance_bound_optional_default; - let where_clause = &def.where_clause; - ( - quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound_optional_default>), - quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound>), - quote!(<#runtime_generic, #optional_instance>), - where_clause.clone(), - ) - } else { - (quote!(), quote!(), quote!(), None) - }; + let (genesis_struct_decl, genesis_impl, genesis_struct, genesis_where_clause) = + if is_generic { + let runtime_generic = &def.module_runtime_generic; + let runtime_trait = &def.module_runtime_trait; + let optional_instance = &def.optional_instance; + let optional_instance_bound = &def.optional_instance_bound; + let optional_instance_bound_optional_default = + &def.optional_instance_bound_optional_default; + let where_clause = &def.where_clause; + ( + quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound_optional_default>), + quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound>), + quote!(<#runtime_generic, #optional_instance>), + where_clause.clone(), + ) + } else { + (quote!(), quote!(), quote!(), None) + }; Ok(Self { is_generic, @@ -82,14 +80,14 @@ impl GenesisConfigDef { }) } - fn get_genesis_config_field_defs(def: &DeclStorageDefExt) - -> syn::Result> - { + fn get_genesis_config_field_defs( + def: &DeclStorageDefExt, + ) -> syn::Result> { let mut config_field_defs = Vec::new(); - for (config_field, line) in def.storage_lines.iter() - .filter_map(|line| line.config.as_ref().map(|config_field| (config_field.clone(), line))) - { + for (config_field, line) in def.storage_lines.iter().filter_map(|line| { + line.config.as_ref().map(|config_field| (config_field.clone(), line)) + }) { let value_type = &line.value_type; let typ = match &line.storage_type { @@ -104,17 +102,23 @@ impl GenesisConfigDef { parse_quote!( Vec<(#key1, #key2, #value_type)> ) }, + StorageLineTypeDef::NMap(map) => { + let key_tuple = map.to_key_tuple(); + parse_quote!( Vec<(#key_tuple, #value_type)> ) + }, }; - let default = line.default_value.as_ref() - .map(|d| { - if line.is_option { - quote!( #d.unwrap_or_default() ) - } else { - quote!( #d ) - } - }) - .unwrap_or_else(|| quote!( Default::default() )); + let default = + line.default_value + .as_ref() + .map(|d| { + if line.is_option { + quote!( #d.unwrap_or_default() ) + } else { + quote!( #d ) + } + }) + .unwrap_or_else(|| quote!(Default::default())); config_field_defs.push(GenesisConfigFieldDef { name: config_field, @@ -125,22 +129,26 @@ impl GenesisConfigDef { } for line in &def.extra_genesis_config_lines { - let attrs = line.attrs.iter() + let attrs = line + .attrs + .iter() .map(|attr| { let meta = attr.parse_meta()?; if meta.path().is_ident("cfg") { return Err(syn::Error::new( meta.span(), - "extra genesis config items do not support `cfg` attribute" - )); + "extra genesis config items do not support `cfg` attribute", + )) } Ok(meta) }) .collect::>()?; - let default = line.default.as_ref().map(|e| quote!( #e )) - .unwrap_or_else(|| quote!( Default::default() )); - + let default = line + .default + .as_ref() + .map(|e| quote!( #e )) + .unwrap_or_else(|| quote!(Default::default())); config_field_defs.push(GenesisConfigFieldDef { name: line.name.clone(), diff --git a/frame/support/procedural/src/storage/genesis_config/mod.rs b/frame/support/procedural/src/storage/genesis_config/mod.rs index 27fbdd2cd38b5..d2d1afb017736 100644 --- a/frame/support/procedural/src/storage/genesis_config/mod.rs +++ b/frame/support/procedural/src/storage/genesis_config/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,14 +18,14 @@ //! Declaration of genesis config structure and implementation of build storage trait and //! functions. -use proc_macro2::{TokenStream, Span}; -use quote::quote; use super::DeclStorageDefExt; -use genesis_config_def::GenesisConfigDef; -use builder_def::BuilderDef; +pub use builder_def::BuilderDef; +pub use genesis_config_def::GenesisConfigDef; +use proc_macro2::{Span, TokenStream}; +use quote::quote; -mod genesis_config_def; mod builder_def; +mod genesis_config_def; const DEFAULT_INSTANCE_NAME: &str = "__GeneratedInstance"; @@ -65,6 +65,7 @@ fn decl_genesis_config_and_impl_default( let genesis_struct = &genesis_config.genesis_struct; let genesis_impl = &genesis_config.genesis_impl; let genesis_where_clause = &genesis_config.genesis_where_clause; + let serde_crate = format!("{}::serde", scrate); quote!( /// Genesis config for the module, allow to build genesis storage. @@ -72,6 +73,7 @@ fn decl_genesis_config_and_impl_default( #[cfg(feature = "std")] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] + #[serde(crate = #serde_crate)] #serde_bug_bound pub struct GenesisConfig#genesis_struct_decl #genesis_where_clause { #( #config_fields )* @@ -116,19 +118,16 @@ fn impl_build_storage( let genesis_impl = &genesis_config.genesis_impl; let genesis_where_clause = &genesis_config.genesis_where_clause; - let ( - fn_generic, - fn_traitinstance, - fn_where_clause - ) = if !genesis_config.is_generic && builders.is_generic { - ( - quote!( <#runtime_generic: #runtime_trait, #optional_instance_bound> ), - quote!( #runtime_generic, #optional_instance ), - Some(&def.where_clause), - ) - } else { - (quote!(), quote!(), None) - }; + let (fn_generic, fn_traitinstance, fn_where_clause) = + if !genesis_config.is_generic && builders.is_generic { + ( + quote!( <#runtime_generic: #runtime_trait, #optional_instance_bound> ), + quote!( #runtime_generic, #optional_instance ), + Some(&def.where_clause), + ) + } else { + (quote!(), quote!(), None) + }; let builder_blocks = &builders.blocks; @@ -136,7 +135,7 @@ fn impl_build_storage( #scrate::sp_runtime::BuildModuleGenesisStorage<#runtime_generic, #inherent_instance> ); - quote!{ + quote! { #[cfg(feature = "std")] impl#genesis_impl GenesisConfig#genesis_struct #genesis_where_clause { /// Build the storage for this module. @@ -175,10 +174,8 @@ fn impl_build_storage( } } -pub fn genesis_config_and_build_storage( - scrate: &TokenStream, - def: &DeclStorageDefExt, -) -> TokenStream { +pub fn genesis_config_and_build_storage(def: &DeclStorageDefExt) -> TokenStream { + let scrate = &def.hidden_crate; let builders = BuilderDef::from_def(scrate, def); if !builders.blocks.is_empty() { let genesis_config = match GenesisConfigDef::from_def(def) { @@ -189,7 +186,7 @@ pub fn genesis_config_and_build_storage( decl_genesis_config_and_impl_default(scrate, &genesis_config); let impl_build_storage = impl_build_storage(scrate, def, &genesis_config, &builders); - quote!{ + quote! { #decl_genesis_config_and_impl_default #impl_build_storage } diff --git a/frame/support/procedural/src/storage/getters.rs b/frame/support/procedural/src/storage/getters.rs index 5507db4630596..988e6fa096243 100644 --- a/frame/support/procedural/src/storage/getters.rs +++ b/frame/support/procedural/src/storage/getters.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,14 +17,17 @@ //! Implementation of getters on module structure. +use super::{DeclStorageDefExt, StorageLineTypeDef}; use proc_macro2::TokenStream; use quote::quote; -use super::{DeclStorageDefExt, StorageLineTypeDef}; -pub fn impl_getters(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { +pub fn impl_getters(def: &DeclStorageDefExt) -> TokenStream { + let scrate = &def.hidden_crate; let mut getters = TokenStream::new(); - for (get_fn, line) in def.storage_lines.iter() + for (get_fn, line) in def + .storage_lines + .iter() .filter_map(|line| line.getter.as_ref().map(|get_fn| (get_fn, line))) { let attrs = &line.doc_attrs; @@ -34,7 +37,7 @@ pub fn impl_getters(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStrea let getter = match &line.storage_type { StorageLineTypeDef::Simple(value) => { - quote!{ + quote! { #( #[ #attrs ] )* pub fn #get_fn() -> #value { <#storage_struct as #scrate::#storage_trait>::get() @@ -44,7 +47,7 @@ pub fn impl_getters(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStrea StorageLineTypeDef::Map(map) => { let key = &map.key; let value = &map.value; - quote!{ + quote! { #( #[ #attrs ] )* pub fn #get_fn>(key: K) -> #value { <#storage_struct as #scrate::#storage_trait>::get(key) @@ -55,7 +58,7 @@ pub fn impl_getters(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStrea let key1 = &map.key1; let key2 = &map.key2; let value = &map.value; - quote!{ + quote! { pub fn #get_fn(k1: KArg1, k2: KArg2) -> #value where KArg1: #scrate::codec::EncodeLike<#key1>, @@ -65,6 +68,21 @@ pub fn impl_getters(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStrea } } }, + StorageLineTypeDef::NMap(map) => { + let keygen = map.to_keygen_struct(&def.hidden_crate); + let value = &map.value; + quote! { + pub fn #get_fn(key: KArg) -> #value + where + KArg: #scrate::storage::types::EncodeLikeTuple< + <#keygen as #scrate::storage::types::KeyGenerator>::KArg + > + + #scrate::storage::types::TupleToEncodedIter, + { + <#storage_struct as #scrate::#storage_trait>::get(key) + } + } + }, }; getters.extend(getter); } diff --git a/frame/support/procedural/src/storage/instance_trait.rs b/frame/support/procedural/src/storage/instance_trait.rs index a28c3ae622082..00a73d6fbd6e7 100644 --- a/frame/support/procedural/src/storage/instance_trait.rs +++ b/frame/support/procedural/src/storage/instance_trait.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,11 +18,11 @@ //! Implementation of the trait instance and the instance structures implementing it. //! (For not instantiable traits there is still the inherent instance implemented). -use proc_macro2::{TokenStream, Span}; -use quote::quote; use super::DeclStorageDefExt; +use crate::NUMBER_OF_INSTANCE; +use proc_macro2::{Span, TokenStream}; +use quote::quote; -const NUMBER_OF_INSTANCE: usize = 16; pub(crate) const INHERENT_INSTANCE_NAME: &str = "__InherentHiddenInstance"; // Used to generate an instance implementation. @@ -30,31 +30,34 @@ struct InstanceDef { prefix: String, instance_struct: syn::Ident, doc: TokenStream, + // Index is same as instance number. Default is 0. + index: u8, } -pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { +pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { + let scrate = &def.hidden_crate; let mut impls = TokenStream::new(); impls.extend(reexport_instance_trait(scrate, def)); // Implementation of instances. if let Some(module_instance) = &def.module_instance { - let instance_defs = (0..NUMBER_OF_INSTANCE) + let instance_defs = (1..=NUMBER_OF_INSTANCE) .map(|i| { let name = format!("Instance{}", i); InstanceDef { instance_struct: syn::Ident::new(&name, proc_macro2::Span::call_site()), prefix: name, doc: quote!(#[doc=r"Module instance"]), + index: i, } }) - .chain( - module_instance.instance_default.as_ref().map(|ident| InstanceDef { - prefix: String::new(), - instance_struct: ident.clone(), - doc: quote!(#[doc=r"Default module instance"]), - }) - ); + .chain(module_instance.instance_default.as_ref().map(|ident| InstanceDef { + prefix: String::new(), + instance_struct: ident.clone(), + doc: quote!(#[doc=r"Default module instance"]), + index: 0, + })); for instance_def in instance_defs { impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); @@ -65,8 +68,8 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre let inherent_instance = syn::Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()); // Implementation of inherent instance. - if let Some(default_instance) = def.module_instance.as_ref() - .and_then(|i| i.instance_default.as_ref()) + if let Some(default_instance) = + def.module_instance.as_ref().and_then(|i| i.instance_default.as_ref()) { impls.extend(quote! { /// Hidden instance generated to be internally used when module is used without @@ -83,6 +86,8 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre /// instance. #[doc(hidden)] ), + // This is just to make the type system happy. Not actually used. + index: 0, }; impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); } @@ -90,10 +95,7 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre impls } -fn reexport_instance_trait( - scrate: &TokenStream, - def: &DeclStorageDefExt, -) -> TokenStream { +fn reexport_instance_trait(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { if let Some(i) = def.module_instance.as_ref() { let instance_trait = &i.instance_trait; quote!( @@ -116,6 +118,7 @@ fn create_and_impl_instance_struct( let instance_struct = &instance_def.instance_struct; let prefix = format!("{}{}", instance_def.prefix, def.crate_name.to_string()); let doc = &instance_def.doc; + let index = instance_def.index; quote! { // Those trait are derived because of wrong bounds for generics @@ -123,12 +126,14 @@ fn create_and_impl_instance_struct( Clone, Eq, PartialEq, #scrate::codec::Encode, #scrate::codec::Decode, + #scrate::scale_info::TypeInfo, #scrate::RuntimeDebug, )] #doc pub struct #instance_struct; impl #instance_trait for #instance_struct { const PREFIX: &'static str = #prefix; + const INDEX: u8 = #index; } } } diff --git a/frame/support/procedural/src/storage/metadata.rs b/frame/support/procedural/src/storage/metadata.rs index 065320cd018ae..a90e5051c5b2e 100644 --- a/frame/support/procedural/src/storage/metadata.rs +++ b/frame/support/procedural/src/storage/metadata.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,32 +17,29 @@ //! Implementation of `storage_metadata` on module structure, used by construct_runtime. -use frame_support_procedural_tools::clean_type_string; +use super::{DeclStorageDefExt, StorageLineDefExt, StorageLineTypeDef}; +use frame_support_procedural_tools::get_doc_literals; use proc_macro2::TokenStream; use quote::quote; -use super::{DeclStorageDefExt, StorageLineDefExt, StorageLineTypeDef}; fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> TokenStream { let value_type = &line.value_type; - let value_type = clean_type_string("e!( #value_type ).to_string()); match &line.storage_type { StorageLineTypeDef::Simple(_) => { - quote!{ + quote! { #scrate::metadata::StorageEntryType::Plain( - #scrate::metadata::DecodeDifferent::Encode(#value_type), + #scrate::scale_info::meta_type::<#value_type>() ) } }, StorageLineTypeDef::Map(map) => { let hasher = map.hasher.into_metadata(); let key = &map.key; - let key = clean_type_string("e!(#key).to_string()); - quote!{ + quote! { #scrate::metadata::StorageEntryType::Map { - hasher: #scrate::metadata::#hasher, - key: #scrate::metadata::DecodeDifferent::Encode(#key), - value: #scrate::metadata::DecodeDifferent::Encode(#value_type), - unused: false, + hashers: #scrate::sp_std::vec! [ #scrate::metadata::#hasher ], + key: #scrate::scale_info::meta_type::<#key>(), + value: #scrate::scale_info::meta_type::<#value_type>(), } } }, @@ -50,16 +47,32 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> let hasher1 = map.hasher1.into_metadata(); let hasher2 = map.hasher2.into_metadata(); let key1 = &map.key1; - let key1 = clean_type_string("e!(#key1).to_string()); let key2 = &map.key2; - let key2 = clean_type_string("e!(#key2).to_string()); - quote!{ - #scrate::metadata::StorageEntryType::DoubleMap { - hasher: #scrate::metadata::#hasher1, - key1: #scrate::metadata::DecodeDifferent::Encode(#key1), - key2: #scrate::metadata::DecodeDifferent::Encode(#key2), - value: #scrate::metadata::DecodeDifferent::Encode(#value_type), - key2_hasher: #scrate::metadata::#hasher2, + quote! { + #scrate::metadata::StorageEntryType::Map { + hashers: #scrate::sp_std::vec! [ + #scrate::metadata::#hasher1, + #scrate::metadata::#hasher2, + ], + key: #scrate::scale_info::meta_type::<(#key1, #key2)>(), + value: #scrate::scale_info::meta_type::<#value_type>(), + } + } + }, + StorageLineTypeDef::NMap(map) => { + let key_tuple = &map.to_key_tuple(); + let hashers = map + .hashers + .iter() + .map(|hasher| hasher.to_storage_hasher_struct()) + .collect::>(); + quote! { + #scrate::metadata::StorageEntryType::Map { + hashers: #scrate::sp_std::vec! [ + #( #scrate::metadata::StorageHasher::#hashers, )* + ], + key: #scrate::scale_info::meta_type::<#key_tuple>(), + value: #scrate::scale_info::meta_type::<#value_type>(), } } }, @@ -71,12 +84,17 @@ fn default_byte_getter( line: &StorageLineDefExt, def: &DeclStorageDefExt, ) -> (TokenStream, TokenStream) { - let default = line.default_value.as_ref().map(|d| quote!( #d )) - .unwrap_or_else(|| quote!( Default::default() )); + let default = line + .default_value + .as_ref() + .map(|d| quote!( #d )) + .unwrap_or_else(|| quote!(Default::default())); let str_name = line.name.to_string(); - let struct_name = syn::Ident::new(&("__GetByteStruct".to_string() + &str_name), line.name.span()); - let cache_name = syn::Ident::new(&("__CACHE_GET_BYTE_STRUCT_".to_string() + &str_name), line.name.span()); + let struct_name = + syn::Ident::new(&("__GetByteStruct".to_string() + &str_name), line.name.span()); + let cache_name = + syn::Ident::new(&("__CACHE_GET_BYTE_STRUCT_".to_string() + &str_name), line.name.span()); let runtime_generic = &def.module_runtime_generic; let runtime_trait = &def.module_runtime_trait; @@ -101,8 +119,7 @@ fn default_byte_getter( #[cfg(feature = "std")] impl<#runtime_generic: #runtime_trait, #optional_instance_bound> - #scrate::metadata::DefaultByte - for #struct_name<#runtime_generic, #optional_instance> + #struct_name<#runtime_generic, #optional_instance> #where_clause { fn default_byte(&self) -> #scrate::sp_std::vec::Vec { @@ -114,16 +131,9 @@ fn default_byte_getter( } } - unsafe impl<#runtime_generic: #runtime_trait, #optional_instance_bound> Send - for #struct_name<#runtime_generic, #optional_instance> #where_clause {} - - unsafe impl<#runtime_generic: #runtime_trait, #optional_instance_bound> Sync - for #struct_name<#runtime_generic, #optional_instance> #where_clause {} - #[cfg(not(feature = "std"))] impl<#runtime_generic: #runtime_trait, #optional_instance_bound> - #scrate::metadata::DefaultByte - for #struct_name<#runtime_generic, #optional_instance> + #struct_name<#runtime_generic, #optional_instance> #where_clause { fn default_byte(&self) -> #scrate::sp_std::vec::Vec { @@ -140,7 +150,8 @@ fn default_byte_getter( (struct_def, struct_instance) } -pub fn impl_metadata(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { +pub fn impl_metadata(def: &DeclStorageDefExt) -> TokenStream { + let scrate = &def.hidden_crate; let mut entries = TokenStream::new(); let mut default_byte_getter_struct_defs = TokenStream::new(); @@ -155,30 +166,18 @@ pub fn impl_metadata(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre let ty = storage_line_metadata_type(scrate, line); - let ( - default_byte_getter_struct_def, - default_byte_getter_struct_instance, - ) = default_byte_getter(scrate, line, def); - - let mut docs = TokenStream::new(); - for attr in line.attrs.iter().filter_map(|v| v.parse_meta().ok()) { - if let syn::Meta::NameValue(meta) = attr { - if meta.path.is_ident("doc") { - let lit = meta.lit; - docs.extend(quote!(#lit,)); - } - } - } + let (default_byte_getter_struct_def, default_byte_getter_struct_instance) = + default_byte_getter(scrate, line, def); + + let docs = get_doc_literals(&line.attrs); let entry = quote! { #scrate::metadata::StorageEntryMetadata { - name: #scrate::metadata::DecodeDifferent::Encode(#str_name), + name: #str_name, modifier: #modifier, ty: #ty, - default: #scrate::metadata::DecodeDifferent::Encode( - #scrate::metadata::DefaultByteGetter(&#default_byte_getter_struct_instance) - ), - documentation: #scrate::metadata::DecodeDifferent::Encode(&[ #docs ]), + default: #default_byte_getter_struct_instance.default_byte(), + docs: #scrate::sp_std::vec![ #( #docs ),* ], }, }; @@ -195,9 +194,9 @@ pub fn impl_metadata(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre }; let store_metadata = quote!( - #scrate::metadata::StorageMetadata { - prefix: #scrate::metadata::DecodeDifferent::Encode(#prefix), - entries: #scrate::metadata::DecodeDifferent::Encode(&[ #entries ][..]), + #scrate::metadata::PalletStorageMetadata { + prefix: #prefix, + entries: #scrate::sp_std::vec![ #entries ], } ); @@ -210,7 +209,7 @@ pub fn impl_metadata(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre impl#module_impl #module_struct #where_clause { #[doc(hidden)] - pub fn storage_metadata() -> #scrate::metadata::StorageMetadata { + pub fn storage_metadata() -> #scrate::metadata::PalletStorageMetadata { #store_metadata } } diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index 0aa0a3cad7cd1..27964d7012a28 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,21 +17,27 @@ //! `decl_storage` input definition and expansion. -mod storage_struct; -mod parse; -mod store_trait; +mod genesis_config; mod getters; -mod metadata; mod instance_trait; -mod genesis_config; +mod metadata; +mod parse; +mod print_pallet_upgrade; +mod storage_info; +mod storage_struct; +mod store_trait; + +pub(crate) use instance_trait::INHERENT_INSTANCE_NAME; -use quote::quote; use frame_support_procedural_tools::{ - generate_crate_access, generate_hidden_includes, syn_ext as ext + generate_crate_access, generate_hidden_includes, syn_ext as ext, }; +use quote::quote; /// All information contained in input of decl_storage pub struct DeclStorageDef { + /// Whether to generate the storage info + generate_storage_info: bool, /// Name of the module used to import hidden imports. hidden_crate: Option, /// Visibility of store trait. @@ -42,7 +48,7 @@ pub struct DeclStorageDef { module_name: syn::Ident, /// Usually `T`. module_runtime_generic: syn::Ident, - /// Usually `Trait` + /// Usually `Config` module_runtime_trait: syn::Path, /// For instantiable module: usually `I: Instance=DefaultInstance`. module_instance: Option, @@ -66,8 +72,12 @@ impl syn::parse::Parse for DeclStorageDef { /// Extended version of `DeclStorageDef` with useful precomputed value. pub struct DeclStorageDefExt { + /// Whether to generate the storage info + generate_storage_info: bool, /// Name of the module used to import hidden imports. - hidden_crate: Option, + hidden_crate: proc_macro2::TokenStream, + /// Hidden imports used by the module. + hidden_imports: proc_macro2::TokenStream, /// Visibility of store trait. visibility: syn::Visibility, /// Name of store trait: usually `Store`. @@ -77,7 +87,7 @@ pub struct DeclStorageDefExt { module_name: syn::Ident, /// Usually `T`. module_runtime_generic: syn::Ident, - /// Usually `Trait`. + /// Usually `Config`. module_runtime_trait: syn::Path, /// For instantiable module: usually `I: Instance=DefaultInstance`. module_instance: Option, @@ -93,7 +103,7 @@ pub struct DeclStorageDefExt { crate_name: syn::Ident, /// Full struct expansion: `Module`. module_struct: proc_macro2::TokenStream, - /// Impl block for module: ``. + /// Impl block for module: ``. module_impl: proc_macro2::TokenStream, /// For instantiable: `I`. optional_instance: Option, @@ -105,28 +115,37 @@ pub struct DeclStorageDefExt { impl From for DeclStorageDefExt { fn from(mut def: DeclStorageDef) -> Self { + let hidden_crate_name = def + .hidden_crate + .as_ref() + .map(|i| i.to_string()) + .unwrap_or_else(|| "decl_storage".to_string()); + + let hidden_crate = generate_crate_access(&hidden_crate_name, "frame-support"); + let hidden_imports = generate_hidden_includes(&hidden_crate_name, "frame-support"); + let storage_lines = def.storage_lines.drain(..).collect::>(); - let storage_lines = storage_lines.into_iter() - .map(|line| StorageLineDefExt::from_def(line, &def)) + let storage_lines = storage_lines + .into_iter() + .map(|line| StorageLineDefExt::from_def(line, &def, &hidden_crate)) .collect(); - let ( - optional_instance, - optional_instance_bound, - optional_instance_bound_optional_default, - ) = if let Some(instance) = def.module_instance.as_ref() { - let instance_generic = &instance.instance_generic; - let instance_trait= &instance.instance_trait; - let optional_equal_instance_default = instance.instance_default.as_ref() - .map(|d| quote!( = #d )); - ( - Some(quote!(#instance_generic)), - Some(quote!(#instance_generic: #instance_trait)), - Some(quote!(#instance_generic: #instance_trait #optional_equal_instance_default)), - ) - } else { - (None, None, None) - }; + let (optional_instance, optional_instance_bound, optional_instance_bound_optional_default) = + if let Some(instance) = def.module_instance.as_ref() { + let instance_generic = &instance.instance_generic; + let instance_trait = &instance.instance_trait; + let optional_equal_instance_default = + instance.instance_default.as_ref().map(|d| quote!( = #d )); + ( + Some(quote!(#instance_generic)), + Some(quote!(#instance_generic: #instance_trait)), + Some( + quote!(#instance_generic: #instance_trait #optional_equal_instance_default), + ), + ) + } else { + (None, None, None) + }; let module_runtime_generic = &def.module_runtime_generic; let module_runtime_trait = &def.module_runtime_trait; @@ -141,7 +160,9 @@ impl From for DeclStorageDefExt { ); Self { - hidden_crate: def.hidden_crate, + hidden_crate, + hidden_imports, + generate_storage_info: def.generate_storage_info, visibility: def.visibility, store_trait: def.store_trait, module_name: def.module_name, @@ -181,6 +202,8 @@ pub struct StorageLineDef { getter: Option, /// The name of the field to be used in genesis config if any. config: Option, + /// The given max values with `max_values` attribute, or a none if not specified. + max_values: Option, /// The build function of the storage if any. build: Option, /// Default value of genesis config field and also for storage when no value available. @@ -198,6 +221,8 @@ pub struct StorageLineDefExt { getter: Option, /// The name of the field to be used in genesis config if any. config: Option, + /// The given max values with `max_values` attribute, or a none if not specified. + max_values: Option, /// The build function of the storage if any. build: Option, /// Default value of genesis config field and also for storage when no value available. @@ -212,7 +237,7 @@ pub struct StorageLineDefExt { storage_struct: proc_macro2::TokenStream, /// If storage is generic over runtime then `T`. optional_storage_runtime_comma: Option, - /// If storage is generic over runtime then `T: Trait`. + /// If storage is generic over runtime then `T: Config`. optional_storage_runtime_bound_comma: Option, /// The where clause to use to constrain generics if storage is generic over runtime. optional_storage_where_clause: Option, @@ -227,37 +252,42 @@ pub struct StorageLineDefExt { } impl StorageLineDefExt { - fn from_def(storage_def: StorageLineDef, def: &DeclStorageDef) -> Self { + fn from_def( + storage_def: StorageLineDef, + def: &DeclStorageDef, + hidden_crate: &proc_macro2::TokenStream, + ) -> Self { let is_generic = match &storage_def.storage_type { - StorageLineTypeDef::Simple(value) => { - ext::type_contains_ident(&value, &def.module_runtime_generic) - }, - StorageLineTypeDef::Map(map) => { - ext::type_contains_ident(&map.key, &def.module_runtime_generic) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - } - StorageLineTypeDef::DoubleMap(map) => { - ext::type_contains_ident(&map.key1, &def.module_runtime_generic) - || ext::type_contains_ident(&map.key2, &def.module_runtime_generic) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - } + StorageLineTypeDef::Simple(value) => + ext::type_contains_ident(&value, &def.module_runtime_generic), + StorageLineTypeDef::Map(map) => + ext::type_contains_ident(&map.key, &def.module_runtime_generic) || + ext::type_contains_ident(&map.value, &def.module_runtime_generic), + StorageLineTypeDef::DoubleMap(map) => + ext::type_contains_ident(&map.key1, &def.module_runtime_generic) || + ext::type_contains_ident(&map.key2, &def.module_runtime_generic) || + ext::type_contains_ident(&map.value, &def.module_runtime_generic), + StorageLineTypeDef::NMap(map) => + map.keys + .iter() + .any(|key| ext::type_contains_ident(key, &def.module_runtime_generic)) || + ext::type_contains_ident(&map.value, &def.module_runtime_generic), }; let query_type = match &storage_def.storage_type { StorageLineTypeDef::Simple(value) => value.clone(), StorageLineTypeDef::Map(map) => map.value.clone(), StorageLineTypeDef::DoubleMap(map) => map.value.clone(), + StorageLineTypeDef::NMap(map) => map.value.clone(), }; let is_option = ext::extract_type_option(&query_type).is_some(); - let value_type = ext::extract_type_option(&query_type).unwrap_or_else(|| query_type.clone()); + let value_type = + ext::extract_type_option(&query_type).unwrap_or_else(|| query_type.clone()); let module_runtime_generic = &def.module_runtime_generic; let module_runtime_trait = &def.module_runtime_trait; - let optional_storage_runtime_comma = if is_generic { - Some(quote!( #module_runtime_generic, )) - } else { - None - }; + let optional_storage_runtime_comma = + if is_generic { Some(quote!( #module_runtime_generic, )) } else { None }; let optional_storage_runtime_bound_comma = if is_generic { Some(quote!( #module_runtime_generic: #module_runtime_trait, )) } else { @@ -273,11 +303,8 @@ impl StorageLineDefExt { #storage_name<#optional_storage_runtime_comma #optional_instance_generic> ); - let optional_storage_where_clause = if is_generic { - def.where_clause.as_ref().map(|w| quote!( #w )) - } else { - None - }; + let optional_storage_where_clause = + if is_generic { def.where_clause.as_ref().map(|w| quote!( #w )) } else { None }; let storage_trait_truncated = match &storage_def.storage_type { StorageLineTypeDef::Simple(_) => { @@ -292,12 +319,18 @@ impl StorageLineDefExt { let key2 = &map.key2; quote!( StorageDoubleMap<#key1, #key2, #value_type> ) }, + StorageLineTypeDef::NMap(map) => { + let keygen = map.to_keygen_struct(hidden_crate); + quote!( StorageNMap<#keygen, #value_type> ) + }, }; let storage_trait = quote!( storage::#storage_trait_truncated ); let storage_generator_trait = quote!( storage::generator::#storage_trait_truncated ); - let doc_attrs = storage_def.attrs.iter() + let doc_attrs = storage_def + .attrs + .iter() .filter_map(|a| a.parse_meta().ok()) .filter(|m| m.path().is_ident("doc")) .collect(); @@ -308,6 +341,7 @@ impl StorageLineDefExt { name: storage_def.name, getter: storage_def.getter, config: storage_def.config, + max_values: storage_def.max_values, build: storage_def.build, default_value: storage_def.default_value, storage_type: storage_def.storage_type, @@ -329,6 +363,7 @@ impl StorageLineDefExt { pub enum StorageLineTypeDef { Map(MapDef), DoubleMap(Box), + NMap(NMapDef), Simple(syn::Type), } @@ -348,6 +383,43 @@ pub struct DoubleMapDef { pub value: syn::Type, } +pub struct NMapDef { + pub hashers: Vec, + pub keys: Vec, + pub value: syn::Type, +} + +impl NMapDef { + fn to_keygen_struct(&self, scrate: &proc_macro2::TokenStream) -> proc_macro2::TokenStream { + if self.keys.len() == 1 { + let hasher = &self.hashers[0].to_storage_hasher_struct(); + let key = &self.keys[0]; + return quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ) + } + + let key_hasher = self + .keys + .iter() + .zip(&self.hashers) + .map(|(key, hasher)| { + let hasher = hasher.to_storage_hasher_struct(); + quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ) + }) + .collect::>(); + quote!(( #(#key_hasher,)* )) + } + + fn to_key_tuple(&self) -> proc_macro2::TokenStream { + if self.keys.len() == 1 { + let key = &self.keys[0]; + return quote!(#key) + } + + let tuple = self.keys.iter().map(|key| quote!(#key)).collect::>(); + quote!(( #(#tuple,)* )) + } +} + pub struct ExtraGenesisLineDef { attrs: Vec, name: syn::Ident, @@ -369,25 +441,25 @@ pub enum HasherKind { impl HasherKind { fn to_storage_hasher_struct(&self) -> proc_macro2::TokenStream { match self { - HasherKind::Blake2_256 => quote!( Blake2_256 ), - HasherKind::Blake2_128 => quote!( Blake2_128 ), - HasherKind::Blake2_128Concat => quote!( Blake2_128Concat ), - HasherKind::Twox256 => quote!( Twox256 ), - HasherKind::Twox128 => quote!( Twox128 ), - HasherKind::Twox64Concat => quote!( Twox64Concat ), - HasherKind::Identity => quote!( Identity ), + HasherKind::Blake2_256 => quote!(Blake2_256), + HasherKind::Blake2_128 => quote!(Blake2_128), + HasherKind::Blake2_128Concat => quote!(Blake2_128Concat), + HasherKind::Twox256 => quote!(Twox256), + HasherKind::Twox128 => quote!(Twox128), + HasherKind::Twox64Concat => quote!(Twox64Concat), + HasherKind::Identity => quote!(Identity), } } fn into_metadata(&self) -> proc_macro2::TokenStream { match self { - HasherKind::Blake2_256 => quote!( StorageHasher::Blake2_256 ), - HasherKind::Blake2_128 => quote!( StorageHasher::Blake2_128 ), - HasherKind::Blake2_128Concat => quote!( StorageHasher::Blake2_128Concat ), - HasherKind::Twox256 => quote!( StorageHasher::Twox256 ), - HasherKind::Twox128 => quote!( StorageHasher::Twox128 ), - HasherKind::Twox64Concat => quote!( StorageHasher::Twox64Concat ), - HasherKind::Identity => quote!( StorageHasher::Identity ), + HasherKind::Blake2_256 => quote!(StorageHasher::Blake2_256), + HasherKind::Blake2_128 => quote!(StorageHasher::Blake2_128), + HasherKind::Blake2_128Concat => quote!(StorageHasher::Blake2_128Concat), + HasherKind::Twox256 => quote!(StorageHasher::Twox256), + HasherKind::Twox128 => quote!(StorageHasher::Twox128), + HasherKind::Twox64Concat => quote!(StorageHasher::Twox64Concat), + HasherKind::Identity => quote!(StorageHasher::Identity), } } } @@ -397,26 +469,27 @@ pub fn decl_storage_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStr let def = syn::parse_macro_input!(input as DeclStorageDef); let def_ext = DeclStorageDefExt::from(def); - let hidden_crate_name = def_ext.hidden_crate.as_ref().map(|i| i.to_string()) - .unwrap_or_else(|| "decl_storage".to_string()); - - let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); - let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); + print_pallet_upgrade::maybe_print_pallet_upgrade(&def_ext); + let scrate = &def_ext.hidden_crate; + let scrate_decl = &def_ext.hidden_imports; let store_trait = store_trait::decl_and_impl(&def_ext); - let getters = getters::impl_getters(&scrate, &def_ext); - let metadata = metadata::impl_metadata(&scrate, &def_ext); - let instance_trait = instance_trait::decl_and_impl(&scrate, &def_ext); - let genesis_config = genesis_config::genesis_config_and_build_storage(&scrate, &def_ext); - let storage_struct = storage_struct::decl_and_impl(&scrate, &def_ext); + let getters = getters::impl_getters(&def_ext); + let metadata = metadata::impl_metadata(&def_ext); + let instance_trait = instance_trait::decl_and_impl(&def_ext); + let genesis_config = genesis_config::genesis_config_and_build_storage(&def_ext); + let storage_struct = storage_struct::decl_and_impl(&def_ext); + let storage_info = storage_info::impl_storage_info(&def_ext); quote!( use #scrate::{ StorageValue as _, StorageMap as _, StorageDoubleMap as _, + StorageNMap as _, StoragePrefixedMap as _, IterableStorageMap as _, + IterableStorageNMap as _, IterableStorageDoubleMap as _, }; @@ -427,5 +500,7 @@ pub fn decl_storage_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStr #instance_trait #genesis_config #storage_struct - ).into() + #storage_info + ) + .into() } diff --git a/frame/support/procedural/src/storage/parse.rs b/frame/support/procedural/src/storage/parse.rs index 504af6d0ffcad..3a11846181a8f 100644 --- a/frame/support/procedural/src/storage/parse.rs +++ b/frame/support/procedural/src/storage/parse.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,18 +17,21 @@ //! Parsing of decl_storage input. -use frame_support_procedural_tools::{ToTokens, Parse, syn_ext as ext}; -use syn::{Ident, Token, spanned::Spanned}; +use frame_support_procedural_tools::{syn_ext as ext, Parse, ToTokens}; +use syn::{spanned::Spanned, Ident, Token}; mod keyword { + syn::custom_keyword!(generate_storage_info); syn::custom_keyword!(hiddencrate); syn::custom_keyword!(add_extra_genesis); syn::custom_keyword!(extra_genesis_skip_phantom_data_field); syn::custom_keyword!(config); + syn::custom_keyword!(max_values); syn::custom_keyword!(build); syn::custom_keyword!(get); syn::custom_keyword!(map); syn::custom_keyword!(double_map); + syn::custom_keyword!(nmap); syn::custom_keyword!(opaque_blake2_256); syn::custom_keyword!(opaque_blake2_128); syn::custom_keyword!(blake2_128_concat); @@ -47,7 +50,7 @@ mod keyword { pub struct Opt

{ pub inner: Option

, } -impl syn::export::ToTokens for Opt

{ +impl quote::ToTokens for Opt

{ fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { if let Some(ref p) = self.inner { p.to_tokens(tokens); @@ -72,6 +75,7 @@ macro_rules! impl_parse_for_opt { /// Parsing usage only #[derive(Parse, ToTokens, Debug)] struct StorageDefinition { + pub generate_storage_info: Opt, pub hidden_crate: Opt, pub visibility: syn::Visibility, pub trait_token: Token![trait], @@ -96,6 +100,12 @@ struct StorageDefinition { pub extra_genesis: Opt, } +#[derive(Parse, ToTokens, Debug)] +struct GenerateStorageInfo { + pub keyword: keyword::generate_storage_info, +} +impl_parse_for_opt!(GenerateStorageInfo => keyword::generate_storage_info); + #[derive(Parse, ToTokens, Debug)] struct SpecificHiddenCrate { pub keyword: keyword::hiddencrate, @@ -159,6 +169,7 @@ struct DeclStorageLine { pub name: Ident, pub getter: Opt, pub config: Opt, + pub max_values: Opt, pub build: Opt, pub coldot_token: Token![:], pub storage_type: DeclStorageType, @@ -187,6 +198,13 @@ struct DeclStorageConfig { impl_parse_for_opt!(DeclStorageConfig => keyword::config); +#[derive(Parse, ToTokens, Debug)] +struct DeclStorageMaxValues { + pub max_values_keyword: keyword::max_values, + pub expr: ext::Parens, +} +impl_parse_for_opt!(DeclStorageMaxValues => keyword::max_values); + #[derive(Parse, ToTokens, Debug)] struct DeclStorageBuild { pub build_keyword: keyword::build, @@ -199,6 +217,7 @@ impl_parse_for_opt!(DeclStorageBuild => keyword::build); enum DeclStorageType { Map(DeclStorageMap), DoubleMap(Box), + NMap(DeclStorageNMap), Simple(syn::Type), } @@ -208,6 +227,8 @@ impl syn::parse::Parse for DeclStorageType { Ok(Self::Map(input.parse()?)) } else if input.peek(keyword::double_map) { Ok(Self::DoubleMap(input.parse()?)) + } else if input.peek(keyword::nmap) { + Ok(Self::NMap(input.parse()?)) } else { Ok(Self::Simple(input.parse()?)) } @@ -235,7 +256,21 @@ struct DeclStorageDoubleMap { pub value: syn::Type, } -#[derive(ToTokens, Debug)] +#[derive(Parse, ToTokens, Debug)] +struct DeclStorageKey { + pub hasher: Opt, + pub key: syn::Type, +} + +#[derive(Parse, ToTokens, Debug)] +struct DeclStorageNMap { + pub map_keyword: keyword::nmap, + pub storage_keys: ext::PunctuatedTrailing, + pub ass_keyword: Token![=>], + pub value: syn::Type, +} + +#[derive(Clone, ToTokens, Debug)] enum Hasher { Blake2_256(keyword::opaque_blake2_256), Blake2_128(keyword::opaque_blake2_128), @@ -291,7 +326,7 @@ impl syn::parse::Parse for Opt { } } -#[derive(Parse, ToTokens, Debug)] +#[derive(Clone, Parse, ToTokens, Debug)] struct SetHasher { pub hasher_keyword: keyword::hasher, pub inner: ext::Parens, @@ -332,48 +367,35 @@ fn get_module_instance( it is now defined at frame_support::traits::Instance. Expect `Instance` found `{}`", instantiable.as_ref().unwrap(), ); - return Err(syn::Error::new(instantiable.span(), msg)); + return Err(syn::Error::new(instantiable.span(), msg)) } match (instance, instantiable, default_instance) { - (Some(instance), Some(instantiable), default_instance) => { + (Some(instance), Some(instantiable), default_instance) => Ok(Some(super::ModuleInstanceDef { instance_generic: instance, instance_trait: instantiable, instance_default: default_instance, - })) - }, + })), (None, None, None) => Ok(None), - (Some(instance), None, _) => Err( - syn::Error::new( - instance.span(), - format!( - "Expect instantiable trait bound for instance: {}. {}", - instance, - right_syntax, - ) - ) - ), - (None, Some(instantiable), _) => Err( - syn::Error::new( - instantiable.span(), - format!( - "Expect instance generic for bound instantiable: {}. {}", - instantiable, - right_syntax, - ) - ) - ), - (None, _, Some(default_instance)) => Err( - syn::Error::new( - default_instance.span(), - format!( - "Expect instance generic for default instance: {}. {}", - default_instance, - right_syntax, - ) - ) - ), + (Some(instance), None, _) => Err(syn::Error::new( + instance.span(), + format!("Expect instantiable trait bound for instance: {}. {}", instance, right_syntax), + )), + (None, Some(instantiable), _) => Err(syn::Error::new( + instantiable.span(), + format!( + "Expect instance generic for bound instantiable: {}. {}", + instantiable, right_syntax, + ), + )), + (None, _, Some(default_instance)) => Err(syn::Error::new( + default_instance.span(), + format!( + "Expect instance generic for default instance: {}. {}", + default_instance, right_syntax, + ), + )), } } @@ -382,43 +404,44 @@ pub fn parse(input: syn::parse::ParseStream) -> syn::Result { - extra_genesis_config_lines.push(super::ExtraGenesisLineDef{ + extra_genesis_config_lines.push(super::ExtraGenesisLineDef { attrs: def.attrs.inner, name: def.extra_field.content, typ: def.extra_type, default: def.default_value.inner.map(|o| o.expr), }); - } + }, AddExtraGenesisLineEnum::AddExtraGenesisBuild(def) => { if extra_genesis_build.is_some() { return Err(syn::Error::new( def.span(), - "Only one build expression allowed for extra genesis" + "Only one build expression allowed for extra genesis", )) } extra_genesis_build = Some(def.expr.content); - } + }, } } let storage_lines = parse_storage_line_defs(def.content.content.inner.into_iter())?; Ok(super::DeclStorageDef { + generate_storage_info: def.generate_storage_info.inner.is_some(), hidden_crate: def.hidden_crate.inner.map(|i| i.ident.content), visibility: def.visibility, module_name: def.module_ident, @@ -460,41 +483,65 @@ fn parse_storage_line_defs( }; if let Some(ref config) = config { - storage_lines.iter().filter_map(|sl| sl.config.as_ref()).try_for_each(|other_config| { - if other_config == config { - Err(syn::Error::new( - config.span(), - "`config()`/`get()` with the same name already defined.", - )) - } else { - Ok(()) - } - })?; + storage_lines.iter().filter_map(|sl| sl.config.as_ref()).try_for_each( + |other_config| { + if other_config == config { + Err(syn::Error::new( + config.span(), + "`config()`/`get()` with the same name already defined.", + )) + } else { + Ok(()) + } + }, + )?; } + let max_values = match &line.storage_type { + DeclStorageType::Map(_) | DeclStorageType::DoubleMap(_) | DeclStorageType::NMap(_) => + line.max_values.inner.map(|i| i.expr.content), + DeclStorageType::Simple(_) => + if let Some(max_values) = line.max_values.inner { + let msg = "unexpected max_values attribute for storage value."; + let span = max_values.max_values_keyword.span(); + return Err(syn::Error::new(span, msg)) + } else { + Some(syn::parse_quote!(1u32)) + }, + }; + let span = line.storage_type.span(); - let no_hasher_error = || syn::Error::new( - span, - "Default hasher has been removed, use explicit hasher(blake2_128_concat) instead." - ); + let no_hasher_error = || { + syn::Error::new( + span, + "Default hasher has been removed, use explicit hasher(blake2_128_concat) instead.", + ) + }; let storage_type = match line.storage_type { - DeclStorageType::Map(map) => super::StorageLineTypeDef::Map( - super::MapDef { - hasher: map.hasher.inner.ok_or_else(no_hasher_error)?.into(), - key: map.key, - value: map.value, - } - ), - DeclStorageType::DoubleMap(map) => super::StorageLineTypeDef::DoubleMap( - Box::new(super::DoubleMapDef { + DeclStorageType::Map(map) => super::StorageLineTypeDef::Map(super::MapDef { + hasher: map.hasher.inner.ok_or_else(no_hasher_error)?.into(), + key: map.key, + value: map.value, + }), + DeclStorageType::DoubleMap(map) => + super::StorageLineTypeDef::DoubleMap(Box::new(super::DoubleMapDef { hasher1: map.hasher1.inner.ok_or_else(no_hasher_error)?.into(), hasher2: map.hasher2.inner.ok_or_else(no_hasher_error)?.into(), key1: map.key1, key2: map.key2, value: map.value, - }) - ), + })), + DeclStorageType::NMap(map) => super::StorageLineTypeDef::NMap(super::NMapDef { + hashers: map + .storage_keys + .inner + .iter() + .map(|pair| Ok(pair.hasher.inner.clone().ok_or_else(no_hasher_error)?.into())) + .collect::, syn::Error>>()?, + keys: map.storage_keys.inner.iter().map(|pair| pair.key.clone()).collect(), + value: map.value, + }), DeclStorageType::Simple(expr) => super::StorageLineTypeDef::Simple(expr), }; @@ -504,6 +551,7 @@ fn parse_storage_line_defs( name: line.name, getter, config, + max_values, build: line.build.inner.map(|o| o.expr.content), default_value: line.default_value.inner.map(|o| o.expr), storage_type, diff --git a/frame/support/procedural/src/storage/print_pallet_upgrade.rs b/frame/support/procedural/src/storage/print_pallet_upgrade.rs new file mode 100644 index 0000000000000..03f09a7edb48e --- /dev/null +++ b/frame/support/procedural/src/storage/print_pallet_upgrade.rs @@ -0,0 +1,387 @@ +use super::StorageLineTypeDef; +use frame_support_procedural_tools::clean_type_string; +use quote::ToTokens; + +/// Environment variable that tells us to print pallet upgrade helper. +const PRINT_PALLET_UPGRADE: &str = "PRINT_PALLET_UPGRADE"; + +fn check_print_pallet_upgrade() -> bool { + std::env::var(PRINT_PALLET_UPGRADE).is_ok() +} + +/// Convert visibilty as now objects are defined in a module. +fn convert_vis(vis: &syn::Visibility) -> &'static str { + match vis { + syn::Visibility::Inherited => "pub(super)", + syn::Visibility::Public(_) => "pub", + _ => "/* TODO_VISIBILITY */", + } +} + +/// fn to convert to token stream then string using display and then call clean_type_string on it. +fn to_cleaned_string(t: impl quote::ToTokens) -> String { + clean_type_string(&format!("{}", t.into_token_stream())) +} + +/// Print an incomplete upgrade from decl_storage macro to new pallet attribute. +pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { + if !check_print_pallet_upgrade() { + return + } + + let scrate = "e::quote!(frame_support); + + let config_gen = + if def.optional_instance.is_some() { "" } else { Default::default() }; + + let impl_gen = + if def.optional_instance.is_some() { ", I: 'static>" } else { "" }; + + let decl_gen = if def.optional_instance.is_some() { "" } else { "" }; + + let full_decl_gen = if def.optional_instance.is_some() { + ", I: 'static = ()>" + } else { + "" + }; + + let use_gen = if def.optional_instance.is_some() { "" } else { "" }; + + let use_gen_tuple = if def.optional_instance.is_some() { "<(T, I)>" } else { "" }; + + let mut genesis_config = String::new(); + let mut genesis_build = String::new(); + + let genesis_config_builder_def = super::genesis_config::BuilderDef::from_def(scrate, def); + if !genesis_config_builder_def.blocks.is_empty() { + let genesis_config_def = match super::genesis_config::GenesisConfigDef::from_def(def) { + Ok(g) => g, + Err(err) => { + println!("Could not print upgrade due compile error: {:?}", err); + return + }, + }; + + let genesis_config_impl_gen = + if genesis_config_def.is_generic { impl_gen } else { Default::default() }; + + let genesis_config_use_gen = + if genesis_config_def.is_generic { use_gen } else { Default::default() }; + + let genesis_config_decl_gen = if genesis_config_def.is_generic { + if def.optional_instance.is_some() { + ", I: 'static = ()>" + } else { + "" + } + } else { + Default::default() + }; + + let mut genesis_config_decl_fields = String::new(); + let mut genesis_config_default_fields = String::new(); + for field in &genesis_config_def.fields { + genesis_config_decl_fields.push_str(&format!( + " + {attrs}pub {name}: {typ},", + attrs = field.attrs.iter().fold(String::new(), |res, attr| { + format!( + "{}#[{}] + ", + res, + attr.to_token_stream() + ) + }), + name = field.name, + typ = to_cleaned_string(&field.typ), + )); + + genesis_config_default_fields.push_str(&format!( + " + {name}: {default},", + name = field.name, + default = to_cleaned_string(&field.default), + )); + } + + genesis_config = format!( + " + #[pallet::genesis_config] + pub struct GenesisConfig{genesis_config_decl_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{{genesis_config_decl_fields} + }} + + #[cfg(feature = \"std\")] + impl{genesis_config_impl_gen} Default for GenesisConfig{genesis_config_use_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{ + fn default() -> Self {{ + Self {{{genesis_config_default_fields} + }} + }} + }}", + genesis_config_decl_gen = genesis_config_decl_gen, + genesis_config_decl_fields = genesis_config_decl_fields, + genesis_config_impl_gen = genesis_config_impl_gen, + genesis_config_default_fields = genesis_config_default_fields, + genesis_config_use_gen = genesis_config_use_gen, + ); + + let genesis_config_build = + genesis_config_builder_def.blocks.iter().fold(String::new(), |res, block| { + format!( + "{} + {}", + res, + to_cleaned_string(block), + ) + }); + + genesis_build = format!( + " + #[pallet::genesis_build] + impl{impl_gen} GenesisBuild{use_gen} for GenesisConfig{genesis_config_use_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{ + fn build(&self) {{{genesis_config_build} + }} + }}", + impl_gen = impl_gen, + use_gen = use_gen, + genesis_config_use_gen = genesis_config_use_gen, + genesis_config_build = genesis_config_build, + ); + } + + let mut storages = String::new(); + for line in &def.storage_lines { + let storage_vis = convert_vis(&line.visibility); + + let getter = if let Some(getter) = &line.getter { + format!( + " + #[pallet::getter(fn {getter})]", + getter = getter + ) + } else { + Default::default() + }; + + let value_type = &line.value_type; + + let default_value_type_value = line + .default_value + .as_ref() + .map(|default_expr| { + format!( + " + #[pallet::type_value] + {storage_vis} fn DefaultFor{name} /* TODO_MAYBE_GENERICS */ () -> {value_type} {{ + {default_expr} + }} +", + name = line.name, + storage_vis = storage_vis, + value_type = to_cleaned_string(&line.value_type), + default_expr = to_cleaned_string(&default_expr), + ) + }) + .unwrap_or_else(String::new); + + let comma_query_kind = if line.is_option { + if line.default_value.is_some() { + ", OptionQuery" + } else { + Default::default() + } + } else { + ", ValueQuery" + }; + + let comma_default_value_getter_name = line + .default_value + .as_ref() + .map(|_| format!(", DefaultFor{}", line.name)) + .unwrap_or_else(String::new); + + let typ = match &line.storage_type { + StorageLineTypeDef::Map(map) => { + format!( + "StorageMap<_, {hasher}, {key}, {value_type}{comma_query_kind}\ + {comma_default_value_getter_name}>", + hasher = &map.hasher.to_storage_hasher_struct(), + key = to_cleaned_string(&map.key), + value_type = to_cleaned_string(&value_type), + comma_query_kind = comma_query_kind, + comma_default_value_getter_name = comma_default_value_getter_name, + ) + }, + StorageLineTypeDef::DoubleMap(double_map) => { + format!( + "StorageDoubleMap<_, {hasher1}, {key1}, {hasher2}, {key2}, {value_type}\ + {comma_query_kind}{comma_default_value_getter_name}>", + hasher1 = double_map.hasher1.to_storage_hasher_struct(), + key1 = to_cleaned_string(&double_map.key1), + hasher2 = double_map.hasher2.to_storage_hasher_struct(), + key2 = to_cleaned_string(&double_map.key2), + value_type = to_cleaned_string(&value_type), + comma_query_kind = comma_query_kind, + comma_default_value_getter_name = comma_default_value_getter_name, + ) + }, + StorageLineTypeDef::NMap(map) => { + format!( + "StorageNMap<_, {keygen}, {value_type}{comma_query_kind}\ + {comma_default_value_getter_name}>", + keygen = map.to_keygen_struct(&def.hidden_crate), + value_type = to_cleaned_string(&value_type), + comma_query_kind = comma_query_kind, + comma_default_value_getter_name = comma_default_value_getter_name, + ) + }, + StorageLineTypeDef::Simple(_) => { + format!( + "StorageValue<_, {value_type}{comma_query_kind}\ + {comma_default_value_getter_name}>", + value_type = to_cleaned_string(&value_type), + comma_query_kind = comma_query_kind, + comma_default_value_getter_name = comma_default_value_getter_name, + ) + }, + }; + + let additional_comment = if line.is_option && line.default_value.is_some() { + " // TODO: This type of storage is no longer supported: `OptionQuery` cannot be used \ + alongside a not-none value on empty storage. Please use `ValueQuery` instead." + } else { + "" + }; + + storages.push_str(&format!( + " +{default_value_type_value}{doc} + #[pallet::storage]{getter} + {storage_vis} type {name}{full_decl_gen} = {typ};{additional_comment}", + default_value_type_value = default_value_type_value, + getter = getter, + storage_vis = storage_vis, + name = line.name, + full_decl_gen = full_decl_gen, + typ = typ, + additional_comment = additional_comment, + doc = line.doc_attrs.iter().fold(String::new(), |mut res, attr| { + if let syn::Meta::NameValue(name_value) = attr { + if name_value.path.is_ident("doc") { + if let syn::Lit::Str(string) = &name_value.lit { + res = format!( + "{} + ///{}", + res, + string.value(), + ); + } + } + } + res + }), + )); + } + + let deprecated_instance_stuff = if def.optional_instance.is_some() { + " + /// Old name for default instance generated by decl_storage. + #[deprecated(note=\"use `()` instead\")] + pub type DefaultInstance = (); + + /// Old name for instance trait used by old macros. + #[deprecated(note=\"use `'static` instead\")] + pub trait Instance: 'static {} + impl Instance for I {}" + } else { + "" + }; + + println!( + " +// Template for pallet upgrade for {pallet_name} + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet {{ + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + #[pallet::config] + pub trait Config{config_gen}: frame_system::Config + // TODO_MAYBE_ADDITIONAL_BOUNDS_AND_WHERE_CLAUSE + {{ + // TODO_ASSOCIATED_TYPE_AND_CONSTANTS + }} + + {deprecated_instance_stuff} + + #[pallet::pallet] + #[pallet::generate_store({store_vis} trait Store)] + pub struct Pallet{decl_gen}(PhantomData{use_gen_tuple}); + + #[pallet::hooks] + impl{impl_gen} Hooks> for Pallet{use_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{ + // TODO_ON_FINALIZE + // TODO_ON_INITIALIZE + // TODO_ON_RUNTIME_UPGRADE + // TODO_INTEGRITY_TEST + // TODO_OFFCHAIN_WORKER + }} + + #[pallet::call] + impl{impl_gen} Pallet{use_gen} + // TODO_MAYBE_WHERE_CLAUSE + {{ + // TODO_UPGRADE_DISPATCHABLES + }} + + #[pallet::inherent] + // TODO_INHERENT + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + // TODO_EVENT + + // TODO_REMOVE_IF_NO_EVENT + /// Old name generated by `decl_event`. + #[deprecated(note=\"use `Event` instead\")] + pub type RawEvent /* TODO_PUT_EVENT_GENERICS */ = Event /* TODO_PUT_EVENT_GENERICS */; + + #[pallet::error] + // TODO_ERROR + + #[pallet::origin] + // TODO_ORIGIN + + #[pallet::validate_unsigned] + // TODO_VALIDATE_UNSIGNED + + {storages} + + {genesis_config} + + {genesis_build} +}}", + config_gen = config_gen, + store_vis = convert_vis(&def.visibility), + impl_gen = impl_gen, + use_gen = use_gen, + use_gen_tuple = use_gen_tuple, + decl_gen = decl_gen, + storages = storages, + genesis_config = genesis_config, + genesis_build = genesis_build, + pallet_name = def.crate_name, + deprecated_instance_stuff = deprecated_instance_stuff, + ); +} diff --git a/frame/support/procedural/src/storage/storage_info.rs b/frame/support/procedural/src/storage/storage_info.rs new file mode 100644 index 0000000000000..844896409f851 --- /dev/null +++ b/frame/support/procedural/src/storage/storage_info.rs @@ -0,0 +1,59 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of trait `StorageInfoTrait` on module structure. + +use super::DeclStorageDefExt; +use proc_macro2::TokenStream; +use quote::quote; + +pub fn impl_storage_info(def: &DeclStorageDefExt) -> TokenStream { + let scrate = &def.hidden_crate; + + let mut res_append_storage = TokenStream::new(); + + for line in def.storage_lines.iter() { + let storage_struct = &line.storage_struct; + + let (trait_, method) = if def.generate_storage_info { + (quote!(#scrate::traits::StorageInfoTrait), quote!(storage_info)) + } else { + (quote!(#scrate::traits::PartialStorageInfoTrait), quote!(partial_storage_info)) + }; + + res_append_storage.extend(quote!( + let mut storage_info = < + #storage_struct as #trait_ + >::#method(); + res.append(&mut storage_info); + )); + } + + let module_struct = &def.module_struct; + let module_impl = &def.module_impl; + let where_clause = &def.where_clause; + + quote!( + impl#module_impl #scrate::traits::StorageInfoTrait for #module_struct #where_clause { + fn storage_info() -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> { + let mut res = #scrate::sp_std::vec![]; + #res_append_storage + res + } + } + ) +} diff --git a/frame/support/procedural/src/storage/storage_struct.rs b/frame/support/procedural/src/storage/storage_struct.rs index e89b06770a6c5..b318225681c1d 100644 --- a/frame/support/procedural/src/storage/storage_struct.rs +++ b/frame/support/procedural/src/storage/storage_struct.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,16 +17,15 @@ //! Implementation of storage structures and implementation of storage traits on them. -use proc_macro2::{TokenStream, Ident, Span}; +use super::{instance_trait::INHERENT_INSTANCE_NAME, DeclStorageDefExt, StorageLineTypeDef}; +use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use super::{ - DeclStorageDefExt, StorageLineTypeDef, - instance_trait::INHERENT_INSTANCE_NAME, -}; fn from_optional_value_to_query(is_option: bool, default: &Option) -> TokenStream { - let default = default.as_ref().map(|d| quote!( #d )) - .unwrap_or_else(|| quote!( Default::default() )); + let default = default + .as_ref() + .map(|d| quote!( #d )) + .unwrap_or_else(|| quote!(Default::default())); if !is_option { // raw type case @@ -40,18 +39,18 @@ fn from_optional_value_to_query(is_option: bool, default: &Option) -> fn from_query_to_optional_value(is_option: bool) -> TokenStream { if !is_option { // raw type case - quote!( Some(v) ) + quote!(Some(v)) } else { // Option<> type case - quote!( v ) + quote!(v) } } -pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { +pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { + let scrate = &def.hidden_crate; let mut impls = TokenStream::new(); for line in &def.storage_lines { - // Propagate doc attributes. let attrs = &line.doc_attrs; @@ -59,7 +58,8 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre let optional_storage_runtime_comma = &line.optional_storage_runtime_comma; let optional_storage_runtime_bound_comma = &line.optional_storage_runtime_bound_comma; let optional_storage_where_clause = &line.optional_storage_where_clause; - let optional_instance_bound_optional_default = &def.optional_instance_bound_optional_default; + let optional_instance_bound_optional_default = + &def.optional_instance_bound_optional_default; let optional_instance_bound = &def.optional_instance_bound; let optional_instance = &def.optional_instance; let name = &line.name; @@ -86,10 +86,8 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()) }; - let storage_name_bstr = syn::LitByteStr::new( - line.name.to_string().as_ref(), - line.name.span() - ); + let storage_name_bstr = + syn::LitByteStr::new(line.name.to_string().as_ref(), line.name.span()); let storage_generator_trait = &line.storage_generator_trait; let storage_struct = &line.storage_struct; @@ -204,12 +202,361 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre } } ) + }, + StorageLineTypeDef::NMap(_) => { + quote!( + impl<#impl_trait> #scrate::storage::StoragePrefixedMap<#value_type> + for #storage_struct #optional_storage_where_clause + { + fn module_prefix() -> &'static [u8] { + <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() + } + + fn storage_prefix() -> &'static [u8] { + #storage_name_bstr + } + } + + impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct + #optional_storage_where_clause + { + type Query = #query_type; + + fn module_prefix() -> &'static [u8] { + <#instance_or_inherent as #scrate::traits::Instance>::PREFIX.as_bytes() + } + + fn storage_prefix() -> &'static [u8] { + #storage_name_bstr + } + + fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { + #from_optional_value_to_query + } + + fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { + #from_query_to_optional_value + } + } + ) + }, + }; + + let max_values = if let Some(max_values) = &line.max_values { + quote::quote!({ + let max_values: u32 = (|| #max_values)(); + Some(max_values) + }) + } else { + quote::quote!(None) + }; + + let storage_info_impl = if def.generate_storage_info { + match &line.storage_type { + StorageLineTypeDef::Simple(_) => { + quote!( + impl<#impl_trait> #scrate::traits::StorageInfoTrait for #storage_struct + #optional_storage_where_clause + { + fn storage_info() + -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> + { + use #scrate::sp_runtime::SaturatedConversion; + + let max_size = < + #value_type as #scrate::codec::MaxEncodedLen + >::max_encoded_len() + .saturated_into(); + + #scrate::sp_std::vec![ + #scrate::traits::StorageInfo { + pallet_name: < + #storage_struct as #scrate::#storage_generator_trait + >::module_prefix().to_vec(), + storage_name: < + #storage_struct as #scrate::#storage_generator_trait + >::storage_prefix().to_vec(), + prefix: < + #storage_struct as #scrate::#storage_generator_trait + >::storage_value_final_key().to_vec(), + max_values: Some(1), + max_size: Some(max_size), + } + ] + } + } + ) + }, + StorageLineTypeDef::Map(map) => { + let key = &map.key; + quote!( + impl<#impl_trait> #scrate::traits::StorageInfoTrait for #storage_struct + #optional_storage_where_clause + { + fn storage_info() + -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> + { + use #scrate::sp_runtime::SaturatedConversion; + use #scrate::StorageHasher; + + let key_max_size = < + Self as #scrate::storage::generator::StorageMap<_, _> + >::Hasher::max_len::<#key>(); + + let max_size = < + #value_type as #scrate::codec::MaxEncodedLen + >::max_encoded_len() + .saturating_add(key_max_size) + .saturated_into(); + + #scrate::sp_std::vec![ + #scrate::traits::StorageInfo { + pallet_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::module_prefix().to_vec(), + storage_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::storage_prefix().to_vec(), + prefix: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::final_prefix().to_vec(), + max_values: #max_values, + max_size: Some(max_size), + } + ] + } + } + ) + }, + StorageLineTypeDef::DoubleMap(map) => { + let key1 = &map.key1; + let key2 = &map.key2; + quote!( + impl<#impl_trait> #scrate::traits::StorageInfoTrait for #storage_struct + #optional_storage_where_clause + { + fn storage_info() + -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> + { + use #scrate::sp_runtime::SaturatedConversion; + use #scrate::StorageHasher; + + let key1_max_size = < + Self as #scrate::storage::generator::StorageDoubleMap<_, _, _> + >::Hasher1::max_len::<#key1>(); + + let key2_max_size = < + Self as #scrate::storage::generator::StorageDoubleMap<_, _, _> + >::Hasher2::max_len::<#key2>(); + + let max_size = < + #value_type as #scrate::codec::MaxEncodedLen + >::max_encoded_len() + .saturating_add(key1_max_size) + .saturating_add(key2_max_size) + .saturated_into(); + + #scrate::sp_std::vec![ + #scrate::traits::StorageInfo { + pallet_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::module_prefix().to_vec(), + storage_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::storage_prefix().to_vec(), + prefix: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::final_prefix().to_vec(), + max_values: #max_values, + max_size: Some(max_size), + } + ] + } + } + ) + }, + StorageLineTypeDef::NMap(map) => { + let key = &map.to_keygen_struct(scrate); + quote!( + impl<#impl_trait> #scrate::traits::StorageInfoTrait for #storage_struct + #optional_storage_where_clause + { + fn storage_info() + -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> + { + use #scrate::sp_runtime::SaturatedConversion; + + let key_max_size = < + #key as #scrate::storage::types::KeyGeneratorMaxEncodedLen + >::key_max_encoded_len(); + + let max_size = < + #value_type as #scrate::codec::MaxEncodedLen + >::max_encoded_len() + .saturating_add(key_max_size) + .saturated_into(); + + #scrate::sp_std::vec![ + #scrate::traits::StorageInfo { + pallet_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::module_prefix().to_vec(), + storage_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::storage_prefix().to_vec(), + prefix: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::final_prefix().to_vec(), + max_values: #max_values, + max_size: Some(max_size), + } + ] + } + } + ) + }, + } + } else { + // Implement `__partial_storage_info` which doesn't require MaxEncodedLen on keys and + // values. + match &line.storage_type { + StorageLineTypeDef::Simple(_) => { + quote!( + impl<#impl_trait> #scrate::traits::PartialStorageInfoTrait + for #storage_struct + #optional_storage_where_clause + { + fn partial_storage_info() + -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> + { + #scrate::sp_std::vec![ + #scrate::traits::StorageInfo { + pallet_name: < + #storage_struct as #scrate::#storage_generator_trait + >::module_prefix().to_vec(), + storage_name: < + #storage_struct as #scrate::#storage_generator_trait + >::storage_prefix().to_vec(), + prefix: < + #storage_struct as #scrate::#storage_generator_trait + >::storage_value_final_key().to_vec(), + max_values: Some(1), + max_size: None, + } + ] + } + } + ) + }, + StorageLineTypeDef::Map(_) => { + quote!( + impl<#impl_trait> #scrate::traits::PartialStorageInfoTrait + for #storage_struct + #optional_storage_where_clause + { + fn partial_storage_info() + -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> + { + #scrate::sp_std::vec![ + #scrate::traits::StorageInfo { + pallet_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::module_prefix().to_vec(), + storage_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::storage_prefix().to_vec(), + prefix: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::final_prefix().to_vec(), + max_values: #max_values, + max_size: None, + } + ] + } + } + ) + }, + StorageLineTypeDef::DoubleMap(_) => { + quote!( + impl<#impl_trait> #scrate::traits::PartialStorageInfoTrait + for #storage_struct + #optional_storage_where_clause + { + fn partial_storage_info() + -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> + { + #scrate::sp_std::vec![ + #scrate::traits::StorageInfo { + pallet_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::module_prefix().to_vec(), + storage_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::storage_prefix().to_vec(), + prefix: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::final_prefix().to_vec(), + max_values: #max_values, + max_size: None, + } + ] + } + } + ) + }, + StorageLineTypeDef::NMap(_) => { + quote!( + impl<#impl_trait> #scrate::traits::PartialStorageInfoTrait + for #storage_struct + #optional_storage_where_clause + { + fn partial_storage_info() + -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> + { + #scrate::sp_std::vec![ + #scrate::traits::StorageInfo { + pallet_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::module_prefix().to_vec(), + storage_name: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::storage_prefix().to_vec(), + prefix: < + #storage_struct + as #scrate::storage::StoragePrefixedMap<#value_type> + >::final_prefix().to_vec(), + max_values: #max_values, + max_size: None, + } + ] + } + } + ) + }, } }; impls.extend(quote!( #struct_decl #struct_impl + #storage_info_impl )) } diff --git a/frame/support/procedural/src/storage/store_trait.rs b/frame/support/procedural/src/storage/store_trait.rs index 7efe65b5f3178..7dde92cf9a75d 100644 --- a/frame/support/procedural/src/storage/store_trait.rs +++ b/frame/support/procedural/src/storage/store_trait.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,26 +17,26 @@ //! Declaration of store trait and implementation on module structure. +use super::DeclStorageDefExt; use proc_macro2::TokenStream; use quote::quote; -use super::DeclStorageDefExt; pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { - let decl_store_items = def.storage_lines.iter() - .map(|sline| &sline.name) - .fold(TokenStream::new(), |mut items, name| { + let decl_store_items = def.storage_lines.iter().map(|sline| &sline.name).fold( + TokenStream::new(), + |mut items, name| { items.extend(quote!(type #name;)); items - }); + }, + ); - let impl_store_items = def.storage_lines.iter() - .fold(TokenStream::new(), |mut items, line| { - let name = &line.name; - let storage_struct = &line.storage_struct; + let impl_store_items = def.storage_lines.iter().fold(TokenStream::new(), |mut items, line| { + let name = &line.name; + let storage_struct = &line.storage_struct; - items.extend(quote!(type #name = #storage_struct;)); - items - }); + items.extend(quote!(type #name = #storage_struct;)); + items + }); let visibility = &def.visibility; let store_trait = &def.store_trait; diff --git a/frame/support/procedural/src/transactional.rs b/frame/support/procedural/src/transactional.rs index 3c2617a17e508..403f1cd02bac7 100644 --- a/frame/support/procedural/src/transactional.rs +++ b/frame/support/procedural/src/transactional.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,15 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +use frame_support_procedural_tools::generate_crate_access_2018; use proc_macro::TokenStream; use quote::quote; use syn::{ItemFn, Result}; -use frame_support_procedural_tools::generate_crate_access_2018; pub fn transactional(_attr: TokenStream, input: TokenStream) -> Result { let ItemFn { attrs, vis, sig, block } = syn::parse(input)?; - let crate_ = generate_crate_access_2018()?; + let crate_ = generate_crate_access_2018("frame-support")?; let output = quote! { #(#attrs)* #vis #sig { @@ -45,7 +45,7 @@ pub fn transactional(_attr: TokenStream, input: TokenStream) -> Result Result { let ItemFn { attrs, vis, sig, block } = syn::parse(input)?; - let crate_ = generate_crate_access_2018()?; + let crate_ = generate_crate_access_2018("frame-support")?; let output = quote! { #(#attrs)* #vis #sig { diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 2cff2473b85d4..ee59f53287efa 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,8 +12,8 @@ description = "Proc macro helpers for procedural macros" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-support-procedural-tools-derive = { version = "2.0.0", path = "./derive" } -proc-macro2 = "1.0.6" +frame-support-procedural-tools-derive = { version = "3.0.0", path = "./derive" } +proc-macro2 = "1.0.29" quote = "1.0.3" -syn = { version = "1.0.7", features = ["full", "visit"] } -proc-macro-crate = "0.1.4" +syn = { version = "1.0.58", features = ["full", "visit", "extra-traits"] } +proc-macro-crate = "1.0.0" diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index b616dd790d61e..12ec6a69f3967 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools-derive" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,6 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro2 = "1.0.6" +proc-macro2 = "1.0.29" quote = { version = "1.0.3", features = ["proc-macro"] } -syn = { version = "1.0.7", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } +syn = { version = "1.0.58", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } diff --git a/frame/support/procedural/tools/derive/src/lib.rs b/frame/support/procedural/tools/derive/src/lib.rs index 6e5d6c896cbf8..7922105895608 100644 --- a/frame/support/procedural/tools/derive/src/lib.rs +++ b/frame/support/procedural/tools/derive/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,14 +23,14 @@ use proc_macro::TokenStream; use proc_macro2::Span; -use syn::parse_macro_input; use quote::quote; +use syn::parse_macro_input; pub(crate) fn fields_idents( fields: impl Iterator, ) -> impl Iterator { fields.enumerate().map(|(ix, field)| { - field.ident.map(|i| quote!{#i}).unwrap_or_else(|| { + field.ident.map(|i| quote! {#i}).unwrap_or_else(|| { let f_ix: syn::Ident = syn::Ident::new(&format!("f_{}", ix), Span::call_site()); quote!( #f_ix ) }) @@ -42,10 +42,7 @@ pub(crate) fn fields_access( ) -> impl Iterator { fields.enumerate().map(|(ix, field)| { field.ident.map(|i| quote!( #i )).unwrap_or_else(|| { - let f_ix: syn::Index = syn::Index { - index: ix as u32, - span: Span::call_site(), - }; + let f_ix: syn::Index = syn::Index { index: ix as u32, span: Span::call_site() }; quote!( #f_ix ) }) }) @@ -64,15 +61,10 @@ pub fn derive_parse(input: TokenStream) -> TokenStream { } fn derive_parse_struct(input: syn::ItemStruct) -> TokenStream { - let syn::ItemStruct { - ident, - generics, - fields, - .. - } = input; + let syn::ItemStruct { ident, generics, fields, .. } = input; let field_names = { let name = fields_idents(fields.iter().map(Clone::clone)); - quote!{ + quote! { #( #name, )* @@ -110,12 +102,7 @@ pub fn derive_totokens(input: TokenStream) -> TokenStream { } fn derive_totokens_struct(input: syn::ItemStruct) -> TokenStream { - let syn::ItemStruct { - ident, - generics, - fields, - .. - } = input; + let syn::ItemStruct { ident, generics, fields, .. } = input; let fields = fields_access(fields.iter().map(Clone::clone)); let tokens = quote! { @@ -133,12 +120,7 @@ fn derive_totokens_struct(input: syn::ItemStruct) -> TokenStream { } fn derive_totokens_enum(input: syn::ItemEnum) -> TokenStream { - let syn::ItemEnum { - ident, - generics, - variants, - .. - } = input; + let syn::ItemEnum { ident, generics, variants, .. } = input; let variants = variants.iter().map(|v| { let v_ident = v.ident.clone(); let fields_build = if v.fields.iter().count() > 0 { diff --git a/frame/support/procedural/tools/src/lib.rs b/frame/support/procedural/tools/src/lib.rs index c5a27c809aff8..d7aba4c7cbf1c 100644 --- a/frame/support/procedural/tools/src/lib.rs +++ b/frame/support/procedural/tools/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,14 +22,14 @@ // reexport proc macros pub use frame_support_procedural_tools_derive::*; -use proc_macro_crate::crate_name; -use syn::parse::Error; +use proc_macro_crate::{crate_name, FoundCrate}; use quote::quote; +use syn::parse::Error; pub mod syn_ext; // FIXME #1569, remove the following functions, which are copied from sp-api-macros -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::Ident; fn generate_hidden_includes_mod_name(unique_id: &str) -> Ident { @@ -39,55 +39,46 @@ fn generate_hidden_includes_mod_name(unique_id: &str) -> Ident { /// Generates the access to the `frame-support` crate. pub fn generate_crate_access(unique_id: &str, def_crate: &str) -> TokenStream { if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { - quote::quote!( frame_support ) + quote::quote!(frame_support) } else { let mod_name = generate_hidden_includes_mod_name(unique_id); quote::quote!( self::#mod_name::hidden_include ) } } -/// Generate the crate access for the `frame-support` crate using 2018 syntax. +/// Generate the crate access for the crate using 2018 syntax. /// -/// Output will for example be `frame_support`. -pub fn generate_crate_access_2018() -> Result { - if std::env::var("CARGO_PKG_NAME").unwrap() == "frame-support" { - Ok(quote::quote!( frame_support )) - } else { - match crate_name("frame-support") { - Ok(name) => { - let name = Ident::new(&name, Span::call_site()); - Ok(quote!( #name )) - }, - Err(e) => { - Err(Error::new(Span::call_site(), &e)) - } - } +/// for `frame-support` output will for example be `frame_support`. +pub fn generate_crate_access_2018(def_crate: &str) -> Result { + match crate_name(def_crate) { + Ok(FoundCrate::Itself) => { + let name = def_crate.to_string().replace("-", "_"); + Ok(syn::Ident::new(&name, Span::call_site())) + }, + Ok(FoundCrate::Name(name)) => Ok(Ident::new(&name, Span::call_site())), + Err(e) => Err(Error::new(Span::call_site(), e)), } } /// Generates the hidden includes that are required to make the macro independent from its scope. pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream { - if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { - TokenStream::new() - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - - match crate_name(def_crate) { - Ok(name) => { - let name = Ident::new(&name, Span::call_site()); - quote::quote!( - #[doc(hidden)] - mod #mod_name { - pub extern crate #name as hidden_include; - } - ) - }, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - quote!( #err ) - } - } + let mod_name = generate_hidden_includes_mod_name(unique_id); + match crate_name(def_crate) { + Ok(FoundCrate::Itself) => quote!(), + Ok(FoundCrate::Name(name)) => { + let name = Ident::new(&name, Span::call_site()); + quote::quote!( + #[doc(hidden)] + mod #mod_name { + pub extern crate #name as hidden_include; + } + ) + }, + Err(e) => { + let err = Error::new(Span::call_site(), e).to_compile_error(); + quote!( #err ) + }, } } @@ -109,3 +100,21 @@ pub fn clean_type_string(input: &str) -> String { .replace("< ", "<") .replace(" >", ">") } + +/// Return all doc attributes literals found. +pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { + attrs + .iter() + .filter_map(|attr| { + if let Ok(syn::Meta::NameValue(meta)) = attr.parse_meta() { + if meta.path.get_ident().map_or(false, |ident| ident == "doc") { + Some(meta.lit) + } else { + None + } + } else { + None + } + }) + .collect() +} diff --git a/frame/support/procedural/tools/src/syn_ext.rs b/frame/support/procedural/tools/src/syn_ext.rs index 2ba4cf3f28a11..a9e9ef573985f 100644 --- a/frame/support/procedural/tools/src/syn_ext.rs +++ b/frame/support/procedural/tools/src/syn_ext.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,11 +19,15 @@ //! Extension to syn types, mainly for parsing // end::description[] -use syn::{visit::{Visit, self}, parse::{Parse, ParseStream, Result}, Ident}; +use frame_support_procedural_tools_derive::{Parse, ToTokens}; use proc_macro2::{TokenStream, TokenTree}; use quote::ToTokens; use std::iter::once; -use frame_support_procedural_tools_derive::{ToTokens, Parse}; +use syn::{ + parse::{Parse, ParseStream, Result}, + visit::{self, Visit}, + Ident, +}; /// stop parsing here getting remaining token as content /// Warn duplicate stream (part of) @@ -35,7 +39,6 @@ pub struct StopParse { // inner macro really dependant on syn naming convention, do not export macro_rules! groups_impl { ($name:ident, $tok:ident, $deli:ident, $parse:ident) => { - #[derive(Debug)] pub struct $name

{ pub token: syn::token::$tok, @@ -46,7 +49,7 @@ macro_rules! groups_impl { fn parse(input: ParseStream) -> Result { let syn::group::$name { token, content } = syn::group::$parse(input)?; let content = content.parse()?; - Ok($name { token, content, }) + Ok($name { token, content }) } } @@ -60,12 +63,12 @@ macro_rules! groups_impl { } } - impl Clone for $name

{ + impl Clone for $name

{ fn clone(&self) -> Self { Self { token: self.token.clone(), content: self.content.clone() } } } - } + }; } groups_impl!(Braces, Brace, Brace, parse_braces); @@ -73,23 +76,22 @@ groups_impl!(Brackets, Bracket, Bracket, parse_brackets); groups_impl!(Parens, Paren, Parenthesis, parse_parens); #[derive(Debug)] -pub struct PunctuatedInner { - pub inner: syn::punctuated::Punctuated, +pub struct PunctuatedInner { + pub inner: syn::punctuated::Punctuated, pub variant: V, } #[derive(Debug, Clone)] pub struct NoTrailing; - #[derive(Debug, Clone)] pub struct Trailing; -pub type Punctuated = PunctuatedInner; +pub type Punctuated = PunctuatedInner; -pub type PunctuatedTrailing = PunctuatedInner; +pub type PunctuatedTrailing = PunctuatedInner; -impl Parse for PunctuatedInner { +impl Parse for PunctuatedInner { fn parse(input: ParseStream) -> Result { Ok(PunctuatedInner { inner: syn::punctuated::Punctuated::parse_separated_nonempty(input)?, @@ -98,7 +100,7 @@ impl Parse for PunctuatedInner Parse for PunctuatedInner { +impl Parse for PunctuatedInner { fn parse(input: ParseStream) -> Result { Ok(PunctuatedInner { inner: syn::punctuated::Punctuated::parse_terminated(input)?, @@ -107,13 +109,13 @@ impl Parse for PunctuatedInner { } } -impl ToTokens for PunctuatedInner { +impl ToTokens for PunctuatedInner { fn to_tokens(&self, tokens: &mut TokenStream) { self.inner.to_tokens(tokens) } } -impl Clone for PunctuatedInner { +impl Clone for PunctuatedInner { fn clone(&self) -> Self { Self { inner: self.inner.clone(), variant: self.variant.clone() } } @@ -127,9 +129,7 @@ pub struct Meta { impl Parse for Meta { fn parse(input: ParseStream) -> Result { - Ok(Meta { - inner: syn::Meta::parse(input)?, - }) + Ok(Meta { inner: syn::Meta::parse(input)? }) } } @@ -151,9 +151,7 @@ pub struct OuterAttributes { impl Parse for OuterAttributes { fn parse(input: ParseStream) -> Result { let inner = syn::Attribute::parse_outer(input)?; - Ok(OuterAttributes { - inner, - }) + Ok(OuterAttributes { inner }) } } @@ -189,13 +187,11 @@ struct ContainsIdent<'a> { impl<'ast> ContainsIdent<'ast> { fn visit_tokenstream(&mut self, stream: TokenStream) { - stream.into_iter().for_each(|tt| - match tt { - TokenTree::Ident(id) => self.visit_ident(&id), - TokenTree::Group(ref group) => self.visit_tokenstream(group.stream()), - _ => {} - } - ) + stream.into_iter().for_each(|tt| match tt { + TokenTree::Ident(id) => self.visit_ident(&id), + TokenTree::Group(ref group) => self.visit_tokenstream(group.stream()), + _ => {}, + }) } fn visit_ident(&mut self, ident: &Ident) { @@ -218,10 +214,7 @@ impl<'ast> Visit<'ast> for ContainsIdent<'ast> { /// Check if a `Type` contains the given `Ident`. pub fn type_contains_ident(typ: &syn::Type, ident: &Ident) -> bool { - let mut visit = ContainsIdent { - result: false, - ident, - }; + let mut visit = ContainsIdent { result: false, ident }; visit::visit_type(&mut visit, typ); visit.result @@ -229,10 +222,7 @@ pub fn type_contains_ident(typ: &syn::Type, ident: &Ident) -> bool { /// Check if a `Expr` contains the given `Ident`. pub fn expr_contains_ident(expr: &syn::Expr, ident: &Ident) -> bool { - let mut visit = ContainsIdent { - result: false, - ident, - }; + let mut visit = ContainsIdent { result: false, ident }; visit::visit_expr(&mut visit, expr); visit.result diff --git a/frame/support/src/debug.rs b/frame/support/src/debug.rs deleted file mode 100644 index 04f5c529f0aff..0000000000000 --- a/frame/support/src/debug.rs +++ /dev/null @@ -1,247 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Runtime debugging and logging utilities. -//! -//! This module contains macros and functions that will allow -//! you to print logs out of the runtime code. -//! -//! First and foremost be aware that adding regular logging code to -//! your runtime will have a negative effect on the performance -//! and size of the blob. Luckily there are some ways to mitigate -//! this that are described below. -//! -//! First component to utilize debug-printing and logging is actually -//! located in `primitives` crate: `sp_core::RuntimeDebug`. -//! This custom-derive generates `core::fmt::Debug` implementation, -//! just like regular `derive(Debug)`, however it does not generate -//! any code when the code is compiled to WASM. This means that -//! you can safely sprinkle `RuntimeDebug` in your runtime codebase, -//! without affecting the size. This also allows you to print/log -//! both when the code is running natively or in WASM, but note -//! that WASM debug formatting of structs will be empty. -//! -//! ```rust,no_run -//! use frame_support::debug; -//! -//! #[derive(sp_core::RuntimeDebug)] -//! struct MyStruct { -//! a: u64, -//! } -//! -//! // First initialize the logger. -//! // -//! // This is only required when you want the logs to be printed -//! // also during non-native run. -//! // Note that enabling the logger has performance impact on -//! // WASM runtime execution and should be used sparingly. -//! debug::RuntimeLogger::init(); -//! -//! let x = MyStruct { a: 5 }; -//! // will log an info line `"My struct: MyStruct{a:5}"` when running -//! // natively, but will only print `"My struct: "` when running WASM. -//! debug::info!("My struct: {:?}", x); -//! -//! // same output here, although this will print to stdout -//! // (and without log format) -//! debug::print!("My struct: {:?}", x); -//! ``` -//! -//! If you want to avoid extra overhead in WASM, but still be able -//! to print / log when the code is executed natively you can use -//! macros coming from `native` sub-module. This module enables -//! logs conditionally and strips out logs in WASM. -//! -//! ```rust,no_run -//! use frame_support::debug::native; -//! -//! #[derive(sp_core::RuntimeDebug)] -//! struct MyStruct { -//! a: u64, -//! } -//! -//! // We don't initialize the logger, since -//! // we are not printing anything out in WASM. -//! // debug::RuntimeLogger::init(); -//! -//! let x = MyStruct { a: 5 }; -//! -//! // Displays an info log when running natively, nothing when WASM. -//! native::info!("My struct: {:?}", x); -//! -//! // same output to stdout, no overhead on WASM. -//! native::print!("My struct: {:?}", x); -//! ``` - -use sp_std::fmt::{self, Debug}; - -pub use log::{info, debug, error, trace, warn}; -pub use crate::runtime_print as print; -pub use sp_std::Writer; - -/// Native-only logging. -/// -/// Using any functions from this module will have any effect -/// only if the runtime is running natively (i.e. not via WASM) -#[cfg(feature = "std")] -pub mod native { - pub use super::{info, debug, error, trace, warn, print}; -} - -/// Native-only logging. -/// -/// Using any functions from this module will have any effect -/// only if the runtime is running natively (i.e. not via WASM) -#[cfg(not(feature = "std"))] -pub mod native { - #[macro_export] - macro_rules! noop { - ($($arg:tt)+) => {} - } - pub use noop as info; - pub use noop as debug; - pub use noop as error; - pub use noop as trace; - pub use noop as warn; - pub use noop as print; -} - -/// Print out a formatted message. -/// -/// # Example -/// -/// ``` -/// frame_support::runtime_print!("my value is {}", 3); -/// ``` -#[macro_export] -macro_rules! runtime_print { - ($($arg:tt)+) => { - { - use core::fmt::Write; - let mut w = $crate::sp_std::Writer::default(); - let _ = core::write!(&mut w, $($arg)+); - sp_io::misc::print_utf8(&w.inner()) - } - } -} - -/// Print out the debuggable type. -pub fn debug(data: &impl Debug) { - runtime_print!("{:?}", data); -} - -/// Runtime logger implementation - `log` crate backend. -/// -/// The logger should be initialized if you want to display -/// logs inside the runtime that is not necessarily running natively. -/// -/// When runtime is executed natively any log statements are displayed -/// even if this logger is NOT initialized. -/// -/// Note that even though the logs are not displayed in WASM, they -/// may still affect the size and performance of the generated runtime. -/// To lower the footprint make sure to only use macros from `native` -/// sub-module. -pub struct RuntimeLogger; - -impl RuntimeLogger { - /// Initialize the logger. - /// - /// This is a no-op when running natively (`std`). - #[cfg(feature = "std")] - pub fn init() {} - - /// Initialize the logger. - /// - /// This is a no-op when running natively (`std`). - #[cfg(not(feature = "std"))] - pub fn init() { - static LOGGER: RuntimeLogger = RuntimeLogger; - let _ = log::set_logger(&LOGGER); - - // Set max level to `TRACE` to ensure we propagate - // all log entries to the native side that will do the - // final filtering on what should be printed. - // - // If we don't set any level, logging is disabled - // completly. - log::set_max_level(log::LevelFilter::Trace); - } -} - -impl log::Log for RuntimeLogger { - fn enabled(&self, _metadata: &log::Metadata) -> bool { - // to avoid calling to host twice, we pass everything - // and let the host decide what to print. - // If someone is initializing the logger they should - // know what they are doing. - true - } - - fn log(&self, record: &log::Record) { - use fmt::Write; - let mut w = sp_std::Writer::default(); - let _ = core::write!(&mut w, "{}", record.args()); - - sp_io::logging::log( - record.level().into(), - record.target(), - w.inner(), - ); - } - - fn flush(&self) {} -} - -#[cfg(test)] -mod tests { - use substrate_test_runtime_client::{ - ExecutionStrategy, TestClientBuilderExt, DefaultTestClientBuilderExt, - TestClientBuilder, runtime::TestAPI, - }; - use sp_api::ProvideRuntimeApi; - use sp_runtime::generic::BlockId; - - #[test] - fn ensure_runtime_logger_works() { - let executable = std::env::current_exe().unwrap(); - let output = std::process::Command::new(executable) - .env("RUN_TEST", "1") - .env("RUST_LOG", "trace") - .args(&["--nocapture", "ensure_runtime_logger_works_implementation"]) - .output() - .unwrap(); - - let output = dbg!(String::from_utf8(output.stderr).unwrap()); - assert!(output.contains("Hey I'm runtime")); - } - - /// This is no actual test. It will be called by `ensure_runtime_logger_works` - /// to check that the runtime can print from the wasm side using the - /// `RuntimeLogger`. - #[test] - fn ensure_runtime_logger_works_implementation() { - if std::env::var("RUN_TEST").is_ok() { - sp_tracing::try_init_simple(); - - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(0); - runtime_api.do_trace_log(&block_id).expect("Logging should not fail"); - } - } -} diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 057bba6b8f74e..2e6777fee2af2 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,20 +18,24 @@ //! Dispatch system. Contains a macro for defining runtime modules and //! generating values representing lazy module function calls. -pub use crate::sp_std::{result, fmt, prelude::{Vec, Clone, Eq, PartialEq}, marker}; -pub use crate::codec::{Codec, EncodeLike, Decode, Encode, Input, Output, HasCompact, EncodeAsRef}; -pub use frame_metadata::{ - FunctionMetadata, DecodeDifferent, DecodeDifferentArray, FunctionArgumentMetadata, - ModuleConstantMetadata, DefaultByte, DefaultByteGetter, ModuleErrorMetadata, ErrorMetadata -}; -pub use crate::weights::{ - GetDispatchInfo, DispatchInfo, WeighData, ClassifyDispatch, TransactionPriority, Weight, - PaysFee, PostDispatchInfo, WithPostDispatchInfo, +pub use crate::{ + codec::{Codec, Decode, Encode, EncodeAsRef, EncodeLike, HasCompact, Input, Output}, + sp_std::{ + fmt, marker, + prelude::{Clone, Eq, PartialEq, Vec}, + result, + }, + traits::{ + CallMetadata, GetCallMetadata, GetCallName, GetStorageVersion, UnfilteredDispatchable, + }, + weights::{ + ClassifyDispatch, DispatchInfo, GetDispatchInfo, PaysFee, PostDispatchInfo, + TransactionPriority, WeighData, Weight, WithPostDispatchInfo, + }, }; pub use sp_runtime::{traits::Dispatchable, DispatchError}; -pub use crate::traits::{CallMetadata, GetCallMetadata, GetCallName, UnfilteredDispatchable}; -/// The return typ of a `Dispatchable` in frame. When returned explicitly from +/// The return type of a `Dispatchable` in frame. When returned explicitly from /// a dispatchable function it allows overriding the default `PostDispatchInfo` /// returned from a dispatch. pub type DispatchResultWithPostInfo = @@ -59,8 +63,8 @@ pub type CallableCallFor = >::Call; /// A type that can be used as a parameter in a dispatchable function. /// /// When using `decl_module` all arguments for call functions must implement this trait. -pub trait Parameter: Codec + EncodeLike + Clone + Eq + fmt::Debug {} -impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} +pub trait Parameter: Codec + EncodeLike + Clone + Eq + fmt::Debug + scale_info::TypeInfo {} +impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug + scale_info::TypeInfo {} /// Declares a `Module` struct and a `Call` enum, which implements the dispatch logic. /// @@ -70,35 +74,36 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{Trait, ensure_signed}; +/// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// /// // Private functions are dispatchable, but not available to other /// // FRAME pallets. /// #[weight = 0] /// fn my_function(origin, var: u64) -> dispatch::DispatchResult { -/// // Your implementation -/// Ok(()) +/// // Your implementation +/// Ok(()) /// } /// -/// // Public functions are both dispatchable and available to other +/// // Public functions are both dispatchable and available to other /// // FRAME pallets. /// #[weight = 0] -/// pub fn my_public_function(origin) -> dispatch::DispatchResult { +/// pub fn my_public_function(origin) -> dispatch::DispatchResult { /// // Your implementation -/// Ok(()) +/// Ok(()) +/// } /// } -/// } /// } /// # fn main() {} /// ``` /// /// The declaration is set with the header where: /// -/// * `Module`: The struct generated by the macro, with type `Trait`. -/// * `Call`: The enum generated for every pallet, which implements [`Callable`](./dispatch/trait.Callable.html). -/// * `origin`: Alias of `T::Origin`, declared by the [`impl_outer_origin!`](./macro.impl_outer_origin.html) macro. +/// * `Module`: The struct generated by the macro, with type `Config`. +/// * `Call`: The enum generated for every pallet, which implements +/// [`Callable`](./dispatch/trait.Callable.html). +/// * `origin`: Alias of `T::Origin`. /// * `Result`: The expected return type from pallet functions. /// /// The first parameter of dispatchable functions must always be `origin`. @@ -112,20 +117,20 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{Trait, ensure_signed}; +/// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// fn my_long_function(origin) -> dispatch::DispatchResult { -/// // Your implementation +/// // Your implementation /// Ok(()) /// } /// /// #[weight = 0] /// fn my_short_function(origin) { -/// // Your implementation +/// // Your implementation +/// } /// } -/// } /// } /// # fn main() {} /// ``` @@ -147,9 +152,9 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; -/// # use frame_system::{Trait, ensure_signed}; +/// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 1_000_000] /// fn my_long_function(origin, do_expensive_calc: bool) -> DispatchResultWithPostInfo { /// ensure_signed(origin).map_err(|e| e.with_weight(100_000))?; @@ -176,13 +181,13 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::transactional; -/// # use frame_system::Trait; +/// # use frame_system::Config; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// #[transactional] /// fn my_short_function(origin) { -/// // Your implementation +/// // Your implementation /// } /// } /// } @@ -197,16 +202,16 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{Trait, ensure_signed, ensure_root}; +/// # use frame_system::{Config, ensure_signed, ensure_root}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] -/// fn my_privileged_function(origin) -> dispatch::DispatchResult { +/// fn my_privileged_function(origin) -> dispatch::DispatchResult { /// ensure_root(origin)?; -/// // Your implementation +/// // Your implementation /// Ok(()) /// } -/// } +/// } /// } /// # fn main() {} /// ``` @@ -216,28 +221,30 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// Attributes on functions are supported, but must be in the order of: /// 1. Optional #\[doc\] attribute. /// 2. #\[weight\] attribute. -/// 3. Optional function attributes, for instance #\[transactional\]. Those function attributes will be written -/// only on the dispatchable functions implemented on `Module`, not on the `Call` enum variant. +/// 3. Optional function attributes, for instance #\[transactional\]. Those function attributes will +/// be written only on the dispatchable functions implemented on `Module`, not on the `Call` enum +/// variant. /// /// ## Multiple Module Instances Example /// -/// A Substrate module can be built such that multiple instances of the same module can be used within a single -/// runtime. For example, the [Balances module](../pallet_balances/index.html) can be added multiple times to your -/// runtime in order to support multiple, independent currencies for your blockchain. Here is an example of how -/// you would declare such a module using the `decl_module!` macro: +/// A Substrate module can be built such that multiple instances of the same module can be used +/// within a single runtime. For example, the [Balances module](../pallet_balances/index.html) can +/// be added multiple times to your runtime in order to support multiple, independent currencies for +/// your blockchain. Here is an example of how you would declare such a module using the +/// `decl_module!` macro: /// /// ``` /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; -/// # use frame_system::{self as system, ensure_signed}; +/// # use frame_system::ensure_signed; /// # pub struct DefaultInstance; -/// # pub trait Instance {} +/// # pub trait Instance: 'static {} /// # impl Instance for DefaultInstance {} -/// pub trait Trait: system::Trait {} +/// pub trait Config: frame_system::Config {} /// /// decl_module! { -/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { +/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { /// // Your implementation /// } /// } @@ -249,20 +256,20 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// /// ## Where clause /// -/// Besides the default `origin: T::Origin`, you can also pass other bounds to the module declaration. -/// This where bound will be replicated to all types generated by this macro. The chaining of multiple -/// trait bounds with `+` is not supported. If multiple bounds for one type are required, it needs to -/// be split up into multiple bounds. +/// Besides the default `origin: T::Origin`, you can also pass other bounds to the module +/// declaration. This where bound will be replicated to all types generated by this macro. The +/// chaining of multiple trait bounds with `+` is not supported. If multiple bounds for one type are +/// required, it needs to be split up into multiple bounds. /// /// ``` /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; /// # use frame_system::{self as system, ensure_signed}; -/// pub trait Trait: system::Trait where Self::AccountId: From {} +/// pub trait Config: system::Config where Self::AccountId: From {} /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { +/// pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { /// // Your implementation /// } /// } @@ -274,16 +281,18 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// The following are reserved function signatures: /// /// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.dev/docs/event-enum). -/// The default behavior is to call `deposit_event` from the [System module](../frame_system/index.html). -/// However, you can write your own implementation for events in your runtime. To use the default behavior, -/// add `fn deposit_event() = default;` to your `Module`. +/// The default behavior is to call `deposit_event` from the [System +/// module](../frame_system/index.html). However, you can write your own implementation for events +/// in your runtime. To use the default behavior, add `fn deposit_event() = default;` to your +/// `Module`. /// -/// The following reserved functions also take the block number (with type `T::BlockNumber`) as an optional input: +/// The following reserved functions also take the block number (with type `T::BlockNumber`) as an +/// optional input: /// /// * `on_runtime_upgrade`: Executes at the beginning of a block prior to on_initialize when there -/// is a runtime upgrade. This allows each module to upgrade its storage before the storage items are used. -/// As such, **calling other modules must be avoided**!! Using this function will implement the -/// [`OnRuntimeUpgrade`](../sp_runtime/traits/trait.OnRuntimeUpgrade.html) trait. +/// is a runtime upgrade. This allows each module to upgrade its storage before the storage items +/// are used. As such, **calling other modules must be avoided**!! Using this function will +/// implement the [`OnRuntimeUpgrade`](../sp_runtime/traits/trait.OnRuntimeUpgrade.html) trait. /// Function signature must be `fn on_runtime_upgrade() -> frame_support::weights::Weight`. /// /// * `on_initialize`: Executes at the beginning of a block. Using this function will @@ -292,17 +301,23 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// * `fn on_initialize(n: BlockNumber) -> frame_support::weights::Weight` or /// * `fn on_initialize() -> frame_support::weights::Weight` /// +/// * `on_idle`: Executes at the end of a block. Passes a remaining weight to provide a threshold +/// for when to execute non vital functions. Using this function will implement the +/// [`OnIdle`](./traits/trait.OnIdle.html) trait. +/// Function signature is: +/// * `fn on_idle(n: BlockNumber, remaining_weight: Weight) -> frame_support::weights::Weight` +/// /// * `on_finalize`: Executes at the end of a block. Using this function will /// implement the [`OnFinalize`](./traits/trait.OnFinalize.html) trait. /// Function signature can be either: /// * `fn on_finalize(n: BlockNumber) -> frame_support::weights::Weight` or /// * `fn on_finalize() -> frame_support::weights::Weight` /// -/// * `offchain_worker`: Executes at the beginning of a block and produces extrinsics for a future block -/// upon completion. Using this function will implement the +/// * `offchain_worker`: Executes at the beginning of a block and produces extrinsics for a future +/// block upon completion. Using this function will implement the /// [`OffchainWorker`](./traits/trait.OffchainWorker.html) trait. -/// * `integrity_test`: Executes in a test generated by `construct_runtime`, note it doesn't -/// execute in an externalities-provided environment. Implement +/// * `integrity_test`: Executes in a test generated by `construct_runtime`, note it doesn't execute +/// in an externalities-provided environment. Implement /// [`IntegrityTest`](./trait.IntegrityTest.html) trait. #[macro_export] macro_rules! decl_module { @@ -332,6 +347,8 @@ macro_rules! decl_module { {} {} {} + {} + {} [] $($t)* ); @@ -367,6 +384,8 @@ macro_rules! decl_module { {} {} {} + {} + {} [] $($t)* ); @@ -381,11 +400,13 @@ macro_rules! decl_module { {} { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $vis:vis fn deposit_event() = default; @@ -399,11 +420,13 @@ macro_rules! decl_module { { $vis fn deposit_event() = default; } { $( $on_initialize )* } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } { $( $error_type )* } - { $( $integrity_test)* } + { $( $integrity_test )* } + { $( $storage_version )* } [ $( $dispatchables )* ] $($rest)* ); @@ -416,11 +439,13 @@ macro_rules! decl_module { {} { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $vis:vis fn deposit_event @@ -441,11 +466,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )+ } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $vis:vis fn deposit_event() = default; @@ -462,11 +489,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } {} { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn on_finalize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } @@ -480,13 +509,15 @@ macro_rules! decl_module { { $( $deposit_event )* } { $( $on_initialize )* } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { fn on_finalize( $( $param_name : $param ),* ) { $( $impl )* } } { $( $offchain )* } { $( $constants )* } { $( $error_type )* } - { $( $integrity_test)* } + { $( $integrity_test )* } + { $( $storage_version )* } [ $( $dispatchables )* ] $($rest)* ); @@ -500,11 +531,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } {} { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* #[weight = $weight:expr] @@ -527,11 +560,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )+ } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* #[weight = $weight:expr] @@ -540,6 +575,75 @@ macro_rules! decl_module { ) => { compile_error!("`on_finalize` can only be passed once as input."); }; + + // Add on_idle + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } + {} + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + fn on_idle($param_name1:ident : $param1:ty, $param_name2:ident: $param2:ty $(,)? ) -> $return:ty { $( $impl:tt )* } + $($rest:tt)* + ) => { + $crate::decl_module!(@normalize + $(#[$attr])* + pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> + for enum $call_type where origin: $origin_type, system = $system + { $( $other_where_bounds )* } + { $( $deposit_event )* } + { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } + { + fn on_idle( $param_name1: $param1, $param_name2: $param2 ) -> $return { $( $impl )* } + } + { $( $on_finalize:tt )* } + { $( $offchain )* } + { $( $constants )* } + { $( $error_type )* } + { $( $integrity_test )* } + { $( $storage_version )* } + [ $( $dispatchables )* ] + $($rest)* + ); + }; + // compile_error for invalid on_idle function signature in decl_module + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + $(#[weight = $weight:expr])? + fn on_idle + $($rest:tt)* + ) => { + compile_error!("`on_idle` method is reserved and syntax doesn't match expected syntax."); + }; + // compile_error on_runtime_upgrade, without a given weight removed syntax. (@normalize $(#[$attr:meta])* @@ -551,11 +655,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } {} + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } @@ -576,11 +682,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } {} + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* #[weight = $weight:expr] @@ -603,11 +711,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } {} + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } @@ -623,11 +733,13 @@ macro_rules! decl_module { { fn on_runtime_upgrade( $( $param_name : $param ),* ) -> $return { $( $impl )* } } + { $( $on_idle )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } { $( $error_type )* } - { $( $integrity_test)* } + { $( $integrity_test )* } + { $( $storage_version )* } [ $( $dispatchables )* ] $($rest)* ); @@ -643,11 +755,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )+ } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } @@ -666,11 +780,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } {} + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn integrity_test() { $( $impl:tt )* } @@ -684,6 +800,7 @@ macro_rules! decl_module { { $( $deposit_event )* } { $( $on_initialize )* } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } @@ -692,6 +809,7 @@ macro_rules! decl_module { $(#[doc = $doc_attr])* fn integrity_test() { $( $impl)* } } + { $( $storage_version )* } [ $( $dispatchables )* ] $($rest)* ); @@ -707,11 +825,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )+ } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn integrity_test() { $( $impl:tt )* } @@ -730,11 +850,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } {} { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } @@ -755,11 +877,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } {} { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* #[weight = $weight:expr] @@ -782,11 +906,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } {} { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } @@ -802,11 +928,13 @@ macro_rules! decl_module { fn on_initialize( $( $param_name : $param ),* ) -> $return { $( $impl )* } } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } { $( $error_type )* } - { $( $integrity_test)* } + { $( $integrity_test )* } + { $( $storage_version )* } [ $( $dispatchables )* ] $($rest)* ); @@ -822,11 +950,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )+ } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } @@ -845,11 +975,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn offchain_worker( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } @@ -865,11 +997,13 @@ macro_rules! decl_module { { $( $deposit_event )* } { $( $on_initialize )* } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { $( $on_finalize )* } { fn offchain_worker( $( $param_name : $param ),* ) { $( $impl )* } } { $( $constants )* } { $( $error_type )* } - { $( $integrity_test)* } + { $( $integrity_test )* } + { $( $storage_version )* } [ $( $dispatchables )* ] $($rest)* ); @@ -885,11 +1019,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )+ } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* fn offchain_worker( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } @@ -909,11 +1045,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $( #[doc = $doc_attr:tt] )* const $name:ident: $ty:ty = $value:expr; @@ -930,6 +1068,7 @@ macro_rules! decl_module { { $( $deposit_event )* } { $( $on_initialize )* } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { $( $on_finalize )* } { $( $offchain )* } { @@ -938,7 +1077,8 @@ macro_rules! decl_module { $name: $ty = $value; } { $( $error_type )* } - { $( $integrity_test)* } + { $( $integrity_test )* } + { $( $storage_version )* } [ $( $dispatchables )* ] $($rest)* ); @@ -956,11 +1096,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* type Error = $error_type:ty; @@ -976,11 +1118,13 @@ macro_rules! decl_module { { $( $deposit_event )* } { $( $on_initialize )* } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } { $error_type } - { $( $integrity_test)* } + { $( $integrity_test )* } + { $( $storage_version )* } [ $( $dispatchables )* ] $($rest)* ); @@ -997,11 +1141,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $($t:tt)* ] $($rest:tt)* ) => { @@ -1015,16 +1161,64 @@ macro_rules! decl_module { { $( $deposit_event )* } { $( $on_initialize )* } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } - { &'static str } - { $( $integrity_test)* } + { __NO_ERROR_DEFINED } + { $( $integrity_test )* } + { $( $storage_version )* } [ $($t)* ] $($rest)* ); }; + // Parse storage version + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: + $trait_name:ident$(, $instance:ident: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + { $( $integrity_test:tt )* } + { } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + type StorageVersion = $storage_version:path; + $($rest:tt)* + ) => { + $crate::decl_module!(@normalize + $(#[$attr])* + pub struct $mod_type< + $trait_instance: $trait_name$(, $instance: $instantiable $(= $module_default_instance)?)? + > + for enum $call_type where origin: $origin_type, system = $system + { $( $other_where_bounds )* } + { $( $deposit_event )* } + { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } + { $( $on_idle )* } + { $( $on_finalize )* } + { $( $offchain )* } + { $( $constants )* } + { $( $error_type )* } + { $( $integrity_test)* } + { $storage_version } + [ $( $dispatchables )* ] + $($rest)* + ); + }; + // This puts the function statement into the [], decreasing `$rest` and moving toward finishing the parse. (@normalize $(#[$attr:meta])* @@ -1037,11 +1231,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } - { $error_type:ty } + { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* #[weight = $weight:expr] @@ -1061,11 +1257,13 @@ macro_rules! decl_module { { $( $deposit_event )* } { $( $on_initialize )* } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } - { $error_type } + { $( $error_type )* } { $( $integrity_test)* } + { $( $storage_version )* } [ $( $dispatchables )* $(#[doc = $doc_attr])* @@ -1091,11 +1289,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $(#[$fn_attr:meta])* @@ -1119,11 +1319,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $(#[weight = $weight:expr])? @@ -1147,11 +1349,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $(#[weight = $weight:expr])? @@ -1175,11 +1379,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] $(#[doc = $doc_attr:tt])* $(#[weight = $weight:expr])? @@ -1204,11 +1410,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } [ $( $dispatchables:tt )* ] ) => { $crate::decl_module!(@imp @@ -1221,11 +1429,13 @@ macro_rules! decl_module { { $( $deposit_event )* } { $( $on_initialize )* } { $( $on_runtime_upgrade )* } + { $( $on_idle )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } { $( $error_type )* } - { $( $integrity_test)* } + { $( $integrity_test )* } + { $( $storage_version )* } ); }; @@ -1255,11 +1465,11 @@ macro_rules! decl_module { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> $module<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { - /// Deposits an event using `frame_system::Module::deposit_event`. + /// Deposits an event using `frame_system::Pallet::deposit_event`. $vis fn deposit_event( event: impl Into<< $trait_instance as $trait_name $(<$instance>)? >::Event> ) { - <$system::Module<$trait_instance>>::deposit_event(event.into()) + <$system::Pallet<$trait_instance>>::deposit_event(event.into()) } } }; @@ -1270,11 +1480,11 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_initialize() -> $return:ty { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_initialize(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) -> $return { + fn on_initialize(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) -> $return { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_initialize")); { $( $impl )* } } @@ -1287,8 +1497,8 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_initialize($param:ident : $param_ty:ty) -> $return:ty { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_initialize($param: $param_ty) -> $return { @@ -1303,13 +1513,14 @@ macro_rules! decl_module { $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnInitialize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnInitialize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; (@impl_on_runtime_upgrade + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } fn on_runtime_upgrade() -> $return:ty { $( $impl:tt )* } @@ -1320,19 +1531,72 @@ macro_rules! decl_module { { fn on_runtime_upgrade() -> $return { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_runtime_upgrade")); - { $( $impl )* } + let pallet_name = << + $trait_instance + as + $system::Config + >::PalletInfo as $crate::traits::PalletInfo>::name::().unwrap_or(""); + + $crate::log::info!( + target: $crate::LOG_TARGET, + "⚠️ {} declares internal migrations (which *might* execute). \ + On-chain `{:?}` vs current storage version `{:?}`", + pallet_name, + ::on_chain_storage_version(), + ::current_storage_version(), + ); + + (|| { $( $impl )* })() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) } } }; (@impl_on_runtime_upgrade + { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> $crate::traits::OnRuntimeUpgrade for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* - {} + { + fn on_runtime_upgrade() -> $crate::dispatch::Weight { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_runtime_upgrade")); + let pallet_name = << + $trait_instance + as + $system::Config + >::PalletInfo as $crate::traits::PalletInfo>::name::().unwrap_or(""); + + $crate::log::info!( + target: $crate::LOG_TARGET, + "✅ no migration for {}", + pallet_name, + ); + + 0 + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) + } + } }; (@impl_integrity_test @@ -1370,11 +1634,11 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_finalize() { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_finalize(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) { + fn on_finalize(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_finalize")); { $( $impl )* } } @@ -1387,8 +1651,8 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn on_finalize($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_finalize($param: $param_ty) { @@ -1403,8 +1667,37 @@ macro_rules! decl_module { $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OnFinalize<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnFinalize<<$trait_instance as $system::Config>::BlockNumber> + for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* + { + } + }; + + (@impl_on_idle + { $system:ident } + $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; + { $( $other_where_bounds:tt )* } + fn on_idle($param1:ident : $param1_ty:ty, $param2:ident: $param2_ty:ty) -> $return:ty { $( $impl:tt )* } + ) => { + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnIdle<<$trait_instance as $system::Config>::BlockNumber> + for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* + { + fn on_idle($param1: $param1_ty, $param2: $param2_ty) -> $return { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_idle")); + { $( $impl )* } + } + } + }; + + (@impl_on_idle + { $system:ident } + $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; + { $( $other_where_bounds:tt )* } + ) => { + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OnIdle<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { } @@ -1416,11 +1709,11 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn offchain_worker() { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn offchain_worker(_block_number_not_used: <$trait_instance as $system::Trait>::BlockNumber) { $( $impl )* } + fn offchain_worker(_block_number_not_used: <$trait_instance as $system::Config>::BlockNumber) { $( $impl )* } } }; @@ -1430,8 +1723,8 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } fn offchain_worker($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn offchain_worker($param: $param_ty) { $( $impl )* } @@ -1443,8 +1736,8 @@ macro_rules! decl_module { $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } ) => { - impl<$trait_instance: $system::Trait + $trait_name$(, $instance: $instantiable)?> - $crate::traits::OffchainWorker<<$trait_instance as $system::Trait>::BlockNumber> + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::OffchainWorker<<$trait_instance as $system::Config>::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1453,7 +1746,6 @@ macro_rules! decl_module { (@impl_function $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; $origin_ty:ty; - $error_type:ty; $ignore:ident; $(#[$fn_attr:meta])* $vis:vis fn $name:ident ( @@ -1475,7 +1767,6 @@ macro_rules! decl_module { (@impl_function $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; $origin_ty:ty; - $error_type:ty; $ignore:ident; $(#[$fn_attr:meta])* $vis:vis fn $name:ident ( @@ -1499,7 +1790,7 @@ macro_rules! decl_module { variant $fn_name:ident; $( #[doc = $doc_attr:tt] )* #[compact] - $type:ty; + $name:ident : $type:ty; $( $rest:tt )* ) => { $crate::decl_module! { @@ -1511,7 +1802,7 @@ macro_rules! decl_module { { $( $current_params )* #[codec(compact)] - $type, + $name: $type, } variant $fn_name; $( #[doc = $doc_attr] )* @@ -1528,7 +1819,7 @@ macro_rules! decl_module { { $( $current_params:tt )* } variant $fn_name:ident; $(#[doc = $doc_attr:tt])* - $type:ty; + $name:ident : $type:ty; $( $rest:tt )* ) => { $crate::decl_module! { @@ -1539,7 +1830,7 @@ macro_rules! decl_module { { $( $generated_variants )* } { $( $current_params )* - $type, + $name: $type, } variant $fn_name; $( #[doc = $doc_attr] )* @@ -1569,9 +1860,9 @@ macro_rules! decl_module { $( $generated_variants )* #[allow(non_camel_case_types)] $(#[doc = $doc_attr])* - $fn_name ( + $fn_name { $( $current_params )* - ), + }, } {} $( @@ -1591,7 +1882,8 @@ macro_rules! decl_module { /// Dispatchable calls. /// /// Each variant of this enum maps to a dispatchable function from the associated module. - #[derive($crate::codec::Encode, $crate::codec::Decode)] + #[derive($crate::codec::Encode, $crate::codec::Decode, $crate::scale_info::TypeInfo)] + #[scale_info(skip_type_params($trait_instance $(, $instance)?), capture_docs = "always")] pub enum $call_type<$trait_instance: $trait_name$(, $instance: $instantiable $( = $module_default_instance)?)?> where $( $other_where_bounds )* { @@ -1602,6 +1894,45 @@ macro_rules! decl_module { } }; + // Implementation for `GetStorageVersion`. + (@impl_get_storage_version + $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; + { $( $other_where_bounds:tt )* } + $( $storage_version:tt )+ + ) => { + // Implement `GetStorageVersion` for `Pallet` + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::GetStorageVersion + for $module<$trait_instance $(, $instance)?> where $( $other_where_bounds )* + { + fn current_storage_version() -> $crate::traits::StorageVersion { + $( $storage_version )* + } + + fn on_chain_storage_version() -> $crate::traits::StorageVersion { + $crate::traits::StorageVersion::get::() + } + } + }; + + // Implementation for `GetStorageVersion` when no storage version is passed. + (@impl_get_storage_version + $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; + { $( $other_where_bounds:tt )* } + ) => { + // Implement `GetStorageVersion` for `Pallet` + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::GetStorageVersion + for $module<$trait_instance $(, $instance)?> where $( $other_where_bounds )* + { + fn current_storage_version() -> $crate::traits::StorageVersion { + Default::default() + } + + fn on_chain_storage_version() -> $crate::traits::StorageVersion { + $crate::traits::StorageVersion::get::() + } + } + }; + // The main macro expansion that actually renders the module code. (@imp @@ -1625,11 +1956,13 @@ macro_rules! decl_module { { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } { $( $on_runtime_upgrade:tt )* } + { $( $on_idle:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } - { $error_type:ty } + { $( $error_type:tt )* } { $( $integrity_test:tt )* } + { $( $storage_version:tt )* } ) => { $crate::__check_reserved_fn_name! { $( $fn_name )* } @@ -1642,6 +1975,11 @@ macro_rules! decl_module { >($crate::sp_std::marker::PhantomData<($trait_instance, $( $instance)?)>) where $( $other_where_bounds )*; + /// Type alias to `Module`, to be used by `construct_runtime`. + #[allow(dead_code)] + pub type Pallet<$trait_instance $(, $instance $( = $module_default_instance)?)?> + = $mod_type<$trait_instance $(, $instance)?>; + $crate::decl_module! { @impl_on_initialize { $system } @@ -1652,6 +1990,7 @@ macro_rules! decl_module { $crate::decl_module! { @impl_on_runtime_upgrade + { $system } $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; { $( $other_where_bounds )* } $( $on_runtime_upgrade )* @@ -1665,6 +2004,14 @@ macro_rules! decl_module { $( $on_finalize )* } + $crate::decl_module! { + @impl_on_idle + { $system } + $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; + { $( $other_where_bounds )* } + $( $on_idle )* + } + $crate::decl_module! { @impl_offchain { $system } @@ -1672,6 +2019,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } $( $offchain )* } + $crate::decl_module! { @impl_deposit_event $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; @@ -1698,7 +2046,6 @@ macro_rules! decl_module { @impl_function $mod_type<$trait_instance: $trait_name $(, $fn_instance: $fn_instantiable)?>; $origin_type; - $error_type; $from; $(#[doc = $doc_attr])* /// @@ -1723,11 +2070,35 @@ macro_rules! decl_module { $(#[doc = $doc_attr])* $( $(#[$codec_attr])* - $param; + $param_name : $param; )* )* } + $crate::paste::paste! { + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> + $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* + { + $( + #[doc = "Create a call with the variant `" $fn_name "`."] + pub fn [< new_call_variant_ $fn_name >]( + $( $param_name: $param ),* + ) -> Self { + Self::$fn_name { + $( $param_name ),* + } + } + )* + } + } + + $crate::decl_module! { + @impl_get_storage_version + $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; + { $( $other_where_bounds )* } + $( $storage_version )* + } + // Implement weight calculation function for Call impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::GetDispatchInfo for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* @@ -1735,24 +2106,24 @@ macro_rules! decl_module { fn get_dispatch_info(&self) -> $crate::dispatch::DispatchInfo { match *self { $( - $call_type::$fn_name( $( ref $param_name ),* ) => { - let base_weight = $weight; - let weight = >::weigh_data( - &base_weight, + $call_type::$fn_name { $( ref $param_name ),* } => { + let __pallet_base_weight = $weight; + let __pallet_weight = >::weigh_data( + &__pallet_base_weight, ($( $param_name, )*) ); - let class = >::classify_dispatch( - &base_weight, + let __pallet_class = >::classify_dispatch( + &__pallet_base_weight, ($( $param_name, )*) ); - let pays_fee = >::pays_fee( - &base_weight, + let __pallet_pays_fee = >::pays_fee( + &__pallet_base_weight, ($( $param_name, )*) ); $crate::dispatch::DispatchInfo { - weight, - class, - pays_fee, + weight: __pallet_weight, + class: __pallet_class, + pays_fee: __pallet_pays_fee, } }, )* @@ -1761,6 +2132,27 @@ macro_rules! decl_module { } } + // Implement PalletInfoAccess for the module. + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::PalletInfoAccess + for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* + { + fn index() -> usize { + < + <$trait_instance as $system::Config>::PalletInfo as $crate::traits::PalletInfo + >::index::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + + fn name() -> &'static str { + < + <$trait_instance as $system::Config>::PalletInfo as $crate::traits::PalletInfo + >::name::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + } + // Implement GetCallName for the Call. impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::GetCallName for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* @@ -1768,7 +2160,7 @@ macro_rules! decl_module { fn get_call_name(&self) -> &'static str { match *self { $( - $call_type::$fn_name( $( ref $param_name ),* ) => { + $call_type::$fn_name { $( ref $param_name ),* } => { // Don't generate any warnings for unused variables let _ = ( $( $param_name ),* ); stringify!($fn_name) @@ -1787,6 +2179,16 @@ macro_rules! decl_module { } } + // Implement `OnGenesis` for `Module` + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::OnGenesis + for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* + { + fn on_genesis() { + let storage_version = ::current_storage_version(); + storage_version.put::(); + } + } + // manual implementation of clone/eq/partialeq because using derive erroneously requires // clone/eq/partialeq from T. impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::Clone @@ -1795,22 +2197,23 @@ macro_rules! decl_module { fn clone(&self) -> Self { match *self { $( - $call_type::$fn_name( $( ref $param_name ),* ) => - $call_type::$fn_name( $( (*$param_name).clone() ),* ) + $call_type::$fn_name { $( ref $param_name ),* } => + $call_type::$fn_name { $( $param_name: (*$param_name).clone() ),* } ,)* _ => unreachable!(), } } } + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::PartialEq for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { fn eq(&self, _other: &Self) -> bool { match *self { $( - $call_type::$fn_name( $( ref $param_name ),* ) => { + $call_type::$fn_name { $( ref $param_name ),* } => { let self_params = ( $( $param_name, )* ); - if let $call_type::$fn_name( $( ref $param_name ),* ) = *_other { + if let $call_type::$fn_name { $( ref $param_name ),* } = *_other { self_params == ( $( $param_name, )* ) } else { match *_other { @@ -1824,6 +2227,7 @@ macro_rules! decl_module { } } } + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::Eq for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* {} @@ -1837,7 +2241,7 @@ macro_rules! decl_module { ) -> $crate::dispatch::result::Result<(), $crate::dispatch::fmt::Error> { match *self { $( - $call_type::$fn_name( $( ref $param_name ),* ) => + $call_type::$fn_name { $( ref $param_name ),* } => write!(_f, "{}{:?}", stringify!($fn_name), ( $( $param_name.clone(), )* ) @@ -1855,7 +2259,7 @@ macro_rules! decl_module { fn dispatch_bypass_filter(self, _origin: Self::Origin) -> $crate::dispatch::DispatchResultWithPostInfo { match self { $( - $call_type::$fn_name( $( $param_name ),* ) => { + $call_type::$fn_name { $( $param_name ),* } => { $crate::decl_module!( @call $from @@ -1884,192 +2288,79 @@ macro_rules! decl_module { )* } } + $crate::__impl_error_metadata! { + $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?> + { $( $other_where_bounds )* } + $( $error_type )* + } $crate::__impl_module_constants_metadata ! { $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?> { $( $other_where_bounds )* } $( $constants )* } - impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::ModuleErrorMetadata - for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* - { - fn metadata() -> &'static [$crate::dispatch::ErrorMetadata] { - <$error_type as $crate::dispatch::ModuleErrorMetadata>::metadata() - } - } + $crate::__generate_dummy_part_checker!(); } } -/// Implement a meta-dispatch module to dispatch to other dispatchers. +/// Implement metadata for dispatch. #[macro_export] -macro_rules! impl_outer_dispatch { +#[doc(hidden)] +macro_rules! __dispatch_impl_metadata { ( - $(#[$attr:meta])* - pub enum $call_type:ident for $runtime:ident where origin: $origin:ty { - $( - $( #[codec(index = $index:tt)] )? $module:ident::$camelcase:ident, - )* - } - ) => { - $(#[$attr])* - #[derive( - Clone, PartialEq, Eq, - $crate::codec::Encode, - $crate::codec::Decode, - $crate::RuntimeDebug, - )] - pub enum $call_type { - $( - $( #[codec(index = $index)] )? - $camelcase ( $crate::dispatch::CallableCallFor<$camelcase, $runtime> ) - ,)* - } - impl $crate::dispatch::GetDispatchInfo for $call_type { - fn get_dispatch_info(&self) -> $crate::dispatch::DispatchInfo { - match self { - $( $call_type::$camelcase(call) => call.get_dispatch_info(), )* - } - } - } - impl $crate::dispatch::GetCallMetadata for $call_type { - fn get_call_metadata(&self) -> $crate::dispatch::CallMetadata { - use $crate::dispatch::GetCallName; - match self { - $( $call_type::$camelcase(call) => { - let function_name = call.get_call_name(); - let pallet_name = stringify!($camelcase); - $crate::dispatch::CallMetadata { function_name, pallet_name } - }, )* - } - } - - fn get_module_names() -> &'static [&'static str] { - &[$( - stringify!($camelcase), - )*] - } - - fn get_call_names(module: &str) -> &'static [&'static str] { - use $crate::dispatch::{Callable, GetCallName}; - match module { - $( - stringify!($camelcase) => - <<$camelcase as Callable<$runtime>>::Call - as GetCallName>::get_call_names(), - )* - _ => unreachable!(), - } - } - } - impl $crate::dispatch::Dispatchable for $call_type { - type Origin = $origin; - type Trait = $call_type; - type Info = $crate::weights::DispatchInfo; - type PostInfo = $crate::weights::PostDispatchInfo; - fn dispatch( - self, - origin: $origin, - ) -> $crate::dispatch::DispatchResultWithPostInfo { - if !::filter_call(&origin, &self) { - return $crate::sp_std::result::Result::Err($crate::dispatch::DispatchError::BadOrigin.into()) - } - - $crate::traits::UnfilteredDispatchable::dispatch_bypass_filter(self, origin) - } - } - - impl $crate::traits::UnfilteredDispatchable for $call_type { - type Origin = $origin; - fn dispatch_bypass_filter( - self, - origin: $origin, - ) -> $crate::dispatch::DispatchResultWithPostInfo { - $crate::impl_outer_dispatch! { - @DISPATCH_MATCH - self - $call_type - origin - {} - 0; - $( $camelcase ),* - } - } - } - - $( - impl $crate::traits::IsSubType<$crate::dispatch::CallableCallFor<$camelcase, $runtime>> for $call_type { - #[allow(unreachable_patterns)] - fn is_sub_type(&self) -> Option<&$crate::dispatch::CallableCallFor<$camelcase, $runtime>> { - match *self { - $call_type::$camelcase(ref r) => Some(r), - // May be unreachable - _ => None, - } - } - } - - impl From<$crate::dispatch::CallableCallFor<$camelcase, $runtime>> for $call_type { - fn from(call: $crate::dispatch::CallableCallFor<$camelcase, $runtime>) -> Self { - $call_type::$camelcase(call) - } - } - )* - }; - (@DISPATCH_MATCH - $self:ident - $call_type:ident - $origin:ident - { $( $generated:tt )* } - $index:expr; - $name:ident - $( , $rest:ident )* - ) => { - $crate::impl_outer_dispatch! { - @DISPATCH_MATCH - $self - $call_type - $origin - { - $( $generated )* - $call_type::$name(call) => - $crate::traits::UnfilteredDispatchable::dispatch_bypass_filter(call, $origin), - } - $index + 1; - $( $rest ),* - } - }; - (@DISPATCH_MATCH - $self:ident + $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?> + { $( $other_where_bounds:tt )* } $call_type:ident - $origin:ident - { $( $generated:tt )* } - $index:expr; + $($rest:tt)* ) => { - match $self { - $( $generated )* + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $mod_type<$trait_instance $(, $instance)?> + where $( $other_where_bounds )* + { + #[doc(hidden)] + #[allow(dead_code)] + pub fn call_functions() -> $crate::metadata::PalletCallMetadata { + $crate::scale_info::meta_type::<$call_type<$trait_instance $(, $instance)?>>().into() + } } } } -/// Implement metadata for dispatch. +/// Implement metadata for pallet error. #[macro_export] #[doc(hidden)] -macro_rules! __dispatch_impl_metadata { +macro_rules! __impl_error_metadata { ( $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?> { $( $other_where_bounds:tt )* } - $($rest:tt)* + __NO_ERROR_DEFINED ) => { impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { #[doc(hidden)] #[allow(dead_code)] - pub fn call_functions() -> &'static [$crate::dispatch::FunctionMetadata] { - $crate::__call_to_functions!($($rest)*) + pub fn error_metadata() -> Option<$crate::metadata::PalletErrorMetadata> { + None } } - } + }; + ( + $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?> + { $( $other_where_bounds:tt )* } + $( $error_type:tt )* + ) => { + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $mod_type<$trait_instance $(, $instance)?> + where $( $other_where_bounds )* + { + #[doc(hidden)] + #[allow(dead_code)] + pub fn error_metadata() -> Option<$crate::metadata::PalletErrorMetadata> { + Some($crate::metadata::PalletErrorMetadata { + ty: $crate::scale_info::meta_type::<$( $error_type )*>() + }) + } + } + }; } /// Implement metadata for module constants. @@ -2139,7 +2430,7 @@ macro_rules! __impl_module_constants_metadata { { #[doc(hidden)] #[allow(dead_code)] - pub fn module_constants_metadata() -> &'static [$crate::dispatch::ModuleConstantMetadata] { + pub fn pallet_constants_metadata() -> $crate::sp_std::vec::Vec<$crate::metadata::PalletConstantMetadata> { // Create the `ByteGetter`s $( #[allow(non_upper_case_types)] @@ -2153,40 +2444,23 @@ macro_rules! __impl_module_constants_metadata { >); impl<$const_trait_instance: 'static + $const_trait_name $( , $const_instance: $const_instantiable)? - > $crate::dispatch::DefaultByte - for $default_byte_name <$const_trait_instance $(, $const_instance)?> + > $default_byte_name <$const_trait_instance $(, $const_instance)?> { fn default_byte(&self) -> $crate::dispatch::Vec { let value: $type = $value; $crate::dispatch::Encode::encode(&value) } } - - unsafe impl<$const_trait_instance: 'static + $const_trait_name $( - , $const_instance: $const_instantiable)? - > Send for $default_byte_name <$const_trait_instance $(, $const_instance)?> {} - - unsafe impl<$const_trait_instance: 'static + $const_trait_name $( - , $const_instance: $const_instantiable)? - > Sync for $default_byte_name <$const_trait_instance $(, $const_instance)?> {} )* - &[ + $crate::sp_std::vec![ $( - $crate::dispatch::ModuleConstantMetadata { - name: $crate::dispatch::DecodeDifferent::Encode(stringify!($name)), - ty: $crate::dispatch::DecodeDifferent::Encode(stringify!($type)), - value: $crate::dispatch::DecodeDifferent::Encode( - $crate::dispatch::DefaultByteGetter( - &$default_byte_name::< - $const_trait_instance $(, $const_instance)? - >( - $crate::dispatch::marker::PhantomData - ) - ) - ), - documentation: $crate::dispatch::DecodeDifferent::Encode( - &[ $( $doc_attr ),* ] - ), + $crate::metadata::PalletConstantMetadata { + name: stringify!($name), + ty: $crate::scale_info::meta_type::<$type>(), + value: $default_byte_name::<$const_trait_instance $(, $const_instance)?>( + Default::default() + ).default_byte(), + docs: $crate::sp_std::vec![ $( $doc_attr ),* ], } ),* ] @@ -2195,107 +2469,6 @@ macro_rules! __impl_module_constants_metadata { } } -/// Convert the list of calls into their JSON representation, joined by ",". -#[macro_export] -#[doc(hidden)] -macro_rules! __call_to_functions { - ( - $call_type:ident $origin_type:ty - { - $( - $(#[doc = $doc_attr:tt])* - fn $fn_name:ident($from:ident - $( - , $(#[$codec_attr:ident])* $param_name:ident : $param:ty - )* - ); - )* - } - ) => { - $crate::__functions_to_metadata!(0; $origin_type;; $( - fn $fn_name( $($(#[$codec_attr])* $param_name: $param ),* ); - $( $doc_attr ),*; - )*) - }; -} - - -/// Convert a list of functions into a list of `FunctionMetadata` items. -#[macro_export] -#[doc(hidden)] -macro_rules! __functions_to_metadata{ - ( - $fn_id:expr; - $origin_type:ty; - $( $function_metadata:expr ),*; - fn $fn_name:ident( - $( - $(#[$codec_attr:ident])* $param_name:ident : $param:ty - ),* - ); - $( $fn_doc:expr ),*; - $( $rest:tt )* - ) => { - $crate::__functions_to_metadata!( - $fn_id + 1; $origin_type; - $( $function_metadata, )* $crate::__function_to_metadata!( - fn $fn_name($( $(#[$codec_attr])* $param_name : $param ),*); $( $fn_doc ),*; $fn_id; - ); - $($rest)* - ) - }; - ( - $fn_id:expr; - $origin_type:ty; - $( $function_metadata:expr ),*; - ) => { - &[ $( $function_metadata ),* ] - } -} - -/// Convert a function into its metadata representation. -#[macro_export] -#[doc(hidden)] -macro_rules! __function_to_metadata { - ( - fn $fn_name:ident( - $( $(#[$codec_attr:ident])* $param_name:ident : $param:ty),* - ); - $( $fn_doc:expr ),*; - $fn_id:expr; - ) => { - $crate::dispatch::FunctionMetadata { - name: $crate::dispatch::DecodeDifferent::Encode(stringify!($fn_name)), - arguments: $crate::dispatch::DecodeDifferent::Encode(&[ - $( - $crate::dispatch::FunctionArgumentMetadata { - name: $crate::dispatch::DecodeDifferent::Encode(stringify!($param_name)), - ty: $crate::dispatch::DecodeDifferent::Encode( - $crate::__function_to_metadata!(@stringify_expand_attr - $(#[$codec_attr])* $param_name: $param - ) - ), - } - ),* - ]), - documentation: $crate::dispatch::DecodeDifferent::Encode(&[ $( $fn_doc ),* ]), - } - }; - - (@stringify_expand_attr #[compact] $param_name:ident : $param:ty) => { - concat!("Compact<", stringify!($param), ">") - }; - - (@stringify_expand_attr $param_name:ident : $param:ty) => { stringify!($param) }; - - (@stringify_expand_attr $(#[codec_attr:ident])* $param_name:ident : $param:ty) => { - compile_error!(concat!( - "Invalid attribute for parameter `", stringify!($param_name), - "`, the following attributes are supported: `#[compact]`" - )); - } -} - #[macro_export] #[doc(hidden)] macro_rules! __check_reserved_fn_name { @@ -2308,6 +2481,9 @@ macro_rules! __check_reserved_fn_name { (on_runtime_upgrade $( $rest:ident )*) => { $crate::__check_reserved_fn_name!(@compile_error on_runtime_upgrade); }; + (on_idle $( $rest:ident )*) => { + $crate::__check_reserved_fn_name!(@compile_error on_idle); + }; (on_finalize $( $rest:ident )*) => { $crate::__check_reserved_fn_name!(@compile_error on_finalize); }; @@ -2350,26 +2526,35 @@ macro_rules! __check_reserved_fn_name { #[allow(dead_code)] mod tests { use super::*; - use crate::weights::{DispatchInfo, DispatchClass, Pays}; - use crate::traits::{ - CallMetadata, GetCallMetadata, GetCallName, OnInitialize, OnFinalize, OnRuntimeUpgrade, - IntegrityTest, + use crate::{ + metadata::*, + traits::{ + Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, + PalletInfo, + }, + weights::{DispatchClass, DispatchInfo, Pays, RuntimeDbWeight}, }; - pub trait Trait: system::Trait + Sized where Self::AccountId: From { } + pub trait Config: system::Config + Sized + where + Self::AccountId: From, + { + } pub mod system { - use codec::{Encode, Decode}; + use super::*; - pub trait Trait { + pub trait Config: 'static { type AccountId; type Call; type BaseCallFilter; type Origin: crate::traits::OriginTrait; type BlockNumber: Into; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: Get; } - #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, scale_info::TypeInfo)] pub enum RawOrigin { Root, Signed(AccountId), @@ -2385,11 +2570,11 @@ mod tests { } } - pub type Origin = RawOrigin<::AccountId>; + pub type Origin = RawOrigin<::AccountId>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = system, T::AccountId: From { + pub struct Module for enum Call where origin: T::Origin, system = system, T::AccountId: From { /// Hi, this is a comment. #[weight = 0] fn aux_0(_origin) -> DispatchResult { unreachable!() } @@ -2413,6 +2598,10 @@ mod tests { fn operational(_origin) { unreachable!() } fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } 7 } + fn on_idle(n: T::BlockNumber, remaining_weight: Weight,) -> Weight { + if n.into() == 42 || remaining_weight == 42 { panic!("on_idle") } + 7 + } fn on_finalize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_finalize") } } fn on_runtime_upgrade() -> Weight { 10 } fn offchain_worker() {} @@ -2421,111 +2610,102 @@ mod tests { } } - const EXPECTED_METADATA: &'static [FunctionMetadata] = &[ - FunctionMetadata { - name: DecodeDifferent::Encode("aux_0"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[ - " Hi, this is a comment." - ]) - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_1"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("Compact") - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_2"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - }, - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("String"), - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_3"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_4"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_5"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - }, - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("Compact") - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("operational"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]; - + #[derive(scale_info::TypeInfo)] pub struct TraitImpl {} - impl Trait for TraitImpl { } + impl Config for TraitImpl {} type Test = Module; - impl_outer_origin!{ - pub enum OuterOrigin for TraitImpl where system = system {} + impl PalletInfo for TraitImpl { + fn index() -> Option { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::() { + return Some(0) + } + + None + } + fn name() -> Option<&'static str> { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::() { + return Some("Test") + } + + None + } } - impl_outer_dispatch! { - pub enum OuterCall for TraitImpl where origin: OuterOrigin { - self::Test, + pub struct OuterOrigin; + + impl crate::traits::OriginTrait for OuterOrigin { + type Call = ::Call; + type PalletsOrigin = OuterOrigin; + type AccountId = ::AccountId; + + fn add_filter(&mut self, _filter: impl Fn(&Self::Call) -> bool + 'static) { + unimplemented!("Not required in tests!") + } + + fn reset_filter(&mut self) { + unimplemented!("Not required in tests!") + } + + fn set_caller_from(&mut self, _other: impl Into) { + unimplemented!("Not required in tests!") + } + + fn filter_call(&self, _call: &Self::Call) -> bool { + unimplemented!("Not required in tests!") + } + + fn caller(&self) -> &Self::PalletsOrigin { + unimplemented!("Not required in tests!") + } + + fn try_with_caller( + self, + _f: impl FnOnce(Self::PalletsOrigin) -> Result, + ) -> Result { + unimplemented!("Not required in tests!") + } + + fn none() -> Self { + unimplemented!("Not required in tests!") + } + fn root() -> Self { + unimplemented!("Not required in tests!") + } + fn signed(_by: ::AccountId) -> Self { + unimplemented!("Not required in tests!") } } - impl system::Trait for TraitImpl { + impl system::Config for TraitImpl { type Origin = OuterOrigin; type AccountId = u32; - type Call = OuterCall; - type BaseCallFilter = (); + type Call = (); + type BaseCallFilter = frame_support::traits::Everything; type BlockNumber = u32; + type PalletInfo = Self; + type DbWeight = (); } #[test] fn module_json_metadata() { let metadata = Module::::call_functions(); - assert_eq!(EXPECTED_METADATA, metadata); + let expected_metadata = + PalletCallMetadata { ty: scale_info::meta_type::>() }; + assert_eq!(expected_metadata, metadata); } #[test] fn compact_attr() { - let call: Call = Call::aux_1(1); + let call: Call = Call::aux_1 { _data: 1 }; let encoded = call.encode(); assert_eq!(2, encoded.len()); assert_eq!(vec![1, 4], encoded); - let call: Call = Call::aux_5(1, 2); + let call: Call = Call::aux_5 { _data: 1, _data2: 2 }; let encoded = call.encode(); assert_eq!(6, encoded.len()); assert_eq!(vec![5, 1, 0, 0, 0, 8], encoded); @@ -2533,13 +2713,13 @@ mod tests { #[test] fn encode_is_correct_and_decode_works() { - let call: Call = Call::aux_0(); + let call: Call = Call::aux_0 {}; let encoded = call.encode(); assert_eq!(vec![0], encoded); let decoded = Call::::decode(&mut &encoded[..]).unwrap(); assert_eq!(decoded, call); - let call: Call = Call::aux_2(32, "hello".into()); + let call: Call = Call::aux_2 { _data: 32, _data2: "hello".into() }; let encoded = call.encode(); assert_eq!(vec![2, 32, 0, 0, 0, 20, 104, 101, 108, 108, 111], encoded); let decoded = Call::::decode(&mut &encoded[..]).unwrap(); @@ -2557,6 +2737,23 @@ mod tests { assert_eq!( as OnInitialize>::on_initialize(10), 7); } + #[test] + #[should_panic(expected = "on_idle")] + fn on_idle_should_work_1() { + as OnIdle>::on_idle(42, 9); + } + + #[test] + #[should_panic(expected = "on_idle")] + fn on_idle_should_work_2() { + as OnIdle>::on_idle(9, 42); + } + + #[test] + fn on_idle_should_work_3() { + assert_eq!( as OnIdle>::on_idle(10, 11), 7); + } + #[test] #[should_panic(expected = "on_finalize")] fn on_finalize_should_work() { @@ -2565,47 +2762,38 @@ mod tests { #[test] fn on_runtime_upgrade_should_work() { - assert_eq!( as OnRuntimeUpgrade>::on_runtime_upgrade(), 10); + sp_io::TestExternalities::default().execute_with(|| { + assert_eq!( as OnRuntimeUpgrade>::on_runtime_upgrade(), 10) + }); } #[test] fn weight_should_attach_to_call_enum() { // operational. assert_eq!( - Call::::operational().get_dispatch_info(), + Call::::operational {}.get_dispatch_info(), DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: Pays::Yes }, ); // custom basic assert_eq!( - Call::::aux_3().get_dispatch_info(), + Call::::aux_3 {}.get_dispatch_info(), DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes }, ); } #[test] fn call_name() { - let name = Call::::aux_3().get_call_name(); + let name = Call::::aux_3 {}.get_call_name(); assert_eq!("aux_3", name); } - #[test] - fn call_metadata() { - let call = OuterCall::Test(Call::::aux_3()); - let metadata = call.get_call_metadata(); - let expected = CallMetadata { function_name: "aux_3".into(), pallet_name: "Test".into() }; - assert_eq!(metadata, expected); - } - #[test] fn get_call_names() { let call_names = Call::::get_call_names(); - assert_eq!(["aux_0", "aux_1", "aux_2", "aux_3", "aux_4", "aux_5", "operational"], call_names); - } - - #[test] - fn get_module_names() { - let module_names = OuterCall::get_module_names(); - assert_eq!(["Test"], module_names); + assert_eq!( + ["aux_0", "aux_1", "aux_2", "aux_3", "aux_4", "aux_5", "operational"], + call_names + ); } #[test] @@ -2613,4 +2801,9 @@ mod tests { fn integrity_test_should_work() { as IntegrityTest>::integrity_test(); } + + #[test] + fn test_new_call_variant() { + Call::::new_call_variant_aux_0(); + } } diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs index 79ffde539cf66..836428c6bc7db 100644 --- a/frame/support/src/error.rs +++ b/frame/support/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,7 @@ //! Macro for declaring a module error. #[doc(hidden)] -pub use sp_runtime::traits::{LookupError, BadOrigin}; -#[doc(hidden)] -pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; +pub use sp_runtime::traits::{BadOrigin, LookupError}; /// Declare an error type for a runtime module. /// @@ -39,7 +37,7 @@ pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; /// # /// decl_error! { /// /// Errors that can occur in my module. -/// pub enum MyError for Module { +/// pub enum MyError for Module { /// /// Hey this is an error message that indicates bla. /// MyCoolErrorMessage, /// /// You are just not cool enough for my module! @@ -47,13 +45,13 @@ pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; /// } /// } /// -/// # use frame_system::Trait; +/// # use frame_system::Config; /// /// // You need to register the error type in `decl_module!` as well to make the error /// // exported in the metadata. /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::Origin { /// type Error = MyError; /// /// #[weight = 0] @@ -77,7 +75,7 @@ macro_rules! decl_error { $generic:ident: $trait:path $(, $inst_generic:ident: $instance:path)? > - $( where $( $where_ty:ty: $where_bound:path )* $(,)? )? + $( where $( $where_ty:ty: $where_bound:path ),* $(,)? )? { $( $( #[doc = $doc_attr:tt] )* @@ -87,10 +85,13 @@ macro_rules! decl_error { } ) => { $(#[$attr])* + #[derive($crate::scale_info::TypeInfo)] + #[scale_info(skip_type_params($generic $(, $inst_generic)?), capture_docs = "always")] pub enum $error<$generic: $trait $(, $inst_generic: $instance)?> - $( where $( $where_ty: $where_bound )* )? + $( where $( $where_ty: $where_bound ),* )? { #[doc(hidden)] + #[codec(skip)] __Ignore( $crate::sp_std::marker::PhantomData<($generic, $( $inst_generic)?)>, $crate::Never, @@ -103,7 +104,7 @@ macro_rules! decl_error { impl<$generic: $trait $(, $inst_generic: $instance)?> $crate::sp_std::fmt::Debug for $error<$generic $(, $inst_generic)?> - $( where $( $where_ty: $where_bound )* )? + $( where $( $where_ty: $where_bound ),* )? { fn fmt(&self, f: &mut $crate::sp_std::fmt::Formatter<'_>) -> $crate::sp_std::fmt::Result { f.write_str(self.as_str()) @@ -111,7 +112,7 @@ macro_rules! decl_error { } impl<$generic: $trait $(, $inst_generic: $instance)?> $error<$generic $(, $inst_generic)?> - $( where $( $where_ty: $where_bound )* )? + $( where $( $where_ty: $where_bound ),* )? { fn as_u8(&self) -> u8 { $crate::decl_error! { @@ -136,7 +137,7 @@ macro_rules! decl_error { impl<$generic: $trait $(, $inst_generic: $instance)?> From<$error<$generic $(, $inst_generic)?>> for &'static str - $( where $( $where_ty: $where_bound )* )? + $( where $( $where_ty: $where_bound ),* )? { fn from(err: $error<$generic $(, $inst_generic)?>) -> &'static str { err.as_str() @@ -145,7 +146,7 @@ macro_rules! decl_error { impl<$generic: $trait $(, $inst_generic: $instance)?> From<$error<$generic $(, $inst_generic)?>> for $crate::sp_runtime::DispatchError - $( where $( $where_ty: $where_bound )* )? + $( where $( $where_ty: $where_bound ),* )? { fn from(err: $error<$generic $(, $inst_generic)?>) -> Self { let index = <$generic::PalletInfo as $crate::traits::PalletInfo> @@ -159,24 +160,6 @@ macro_rules! decl_error { } } } - - impl<$generic: $trait $(, $inst_generic: $instance)?> $crate::error::ModuleErrorMetadata - for $error<$generic $(, $inst_generic)?> - $( where $( $where_ty: $where_bound )* )? - { - fn metadata() -> &'static [$crate::error::ErrorMetadata] { - &[ - $( - $crate::error::ErrorMetadata { - name: $crate::error::DecodeDifferent::Encode(stringify!($name)), - documentation: $crate::error::DecodeDifferent::Encode(&[ - $( $doc_attr ),* - ]), - } - ),* - ] - } - } }; (@GENERATE_AS_U8 $self:ident diff --git a/frame/support/src/event.rs b/frame/support/src/event.rs index 0f889f97f40a0..3d042a3122db8 100644 --- a/frame/support/src/event.rs +++ b/frame/support/src/event.rs @@ -1,15 +1,19 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Macros that define an Event types. Events can be used to easily report changes or conditions //! in your runtime to external entities like users, chain explorers, or dApps. @@ -17,8 +21,6 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnEncode}; - /// Implement the `Event` for a module. /// /// # Simple Event Example: @@ -31,13 +33,13 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// } /// ); /// -///# fn main() {} +/// # fn main() {} /// ``` /// /// # Generic Event Example: /// /// ```rust -/// trait Trait { +/// trait Config { /// type Balance; /// type Token; /// } @@ -45,7 +47,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// mod event1 { /// // Event that specifies the generic parameter explicitly (`Balance`). /// frame_support::decl_event!( -/// pub enum Event where Balance = ::Balance { +/// pub enum Event where Balance = ::Balance { /// Message(Balance), /// } /// ); @@ -56,7 +58,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// // If no name for the generic parameter is specified explicitly, /// // the name will be taken from the type name of the trait. /// frame_support::decl_event!( -/// pub enum Event where ::Balance { +/// pub enum Event where ::Balance { /// Message(Balance), /// } /// ); @@ -65,13 +67,13 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// mod event3 { /// // And we even support declaring multiple generic parameters! /// frame_support::decl_event!( -/// pub enum Event where ::Balance, ::Token { +/// pub enum Event where ::Balance, ::Token { /// Message(Balance, Token), /// } /// ); /// } /// -///# fn main() {} +/// # fn main() {} /// ``` /// /// The syntax for generic events requires the `where`. @@ -79,10 +81,10 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// # Generic Event with Instance Example: /// /// ```rust -///# struct DefaultInstance; -///# trait Instance {} -///# impl Instance for DefaultInstance {} -/// trait Trait { +/// # struct DefaultInstance; +/// # trait Instance {} +/// # impl Instance for DefaultInstance {} +/// trait Config { /// type Balance; /// type Token; /// } @@ -90,13 +92,13 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// // For module with instances, DefaultInstance is optional /// frame_support::decl_event!( /// pub enum Event where -/// ::Balance, -/// ::Token +/// ::Balance, +/// ::Token /// { /// Message(Balance, Token), /// } /// ); -///# fn main() {} +/// # fn main() {} /// ``` #[macro_export] macro_rules! decl_event { @@ -125,8 +127,10 @@ macro_rules! decl_event { Clone, PartialEq, Eq, $crate::codec::Encode, $crate::codec::Decode, + $crate::scale_info::TypeInfo, $crate::RuntimeDebug, )] + #[scale_info(capture_docs = "always")] /// Events for this module. /// $(#[$attr])* @@ -138,13 +142,6 @@ macro_rules! decl_event { impl From for () { fn from(_: Event) -> () { () } } - impl Event { - #[allow(dead_code)] - #[doc(hidden)] - pub fn metadata() -> &'static [ $crate::event::EventMetadata ] { - $crate::__events_to_metadata!(; $( $events )* ) - } - } } } @@ -258,18 +255,20 @@ macro_rules! __decl_generic_event { { $( $events:tt )* }; { ,$( $generic_param:ident = $generic_type:ty ),* }; ) => { - /// [`RawEvent`] specialized for the configuration [`Trait`] + /// [`RawEvent`] specialized for the configuration [`Config`] /// /// [`RawEvent`]: enum.RawEvent.html - /// [`Trait`]: trait.Trait.html + /// [`Config`]: trait.Config.html pub type Event<$event_generic_param $(, $instance $( = $event_default_instance)? )?> = RawEvent<$( $generic_type ),* $(, $instance)? >; #[derive( Clone, PartialEq, Eq, $crate::codec::Encode, $crate::codec::Decode, + $crate::scale_info::TypeInfo, $crate::RuntimeDebug, )] + #[scale_info(capture_docs = "always")] /// Events for this module. /// $(#[$attr])* @@ -286,540 +285,8 @@ macro_rules! __decl_generic_event { impl<$( $generic_param ),* $(, $instance)? > From> for () { fn from(_: RawEvent<$( $generic_param ),* $(, $instance)?>) -> () { () } } - impl<$( $generic_param ),* $(, $instance)?> RawEvent<$( $generic_param ),* $(, $instance)?> { - #[allow(dead_code)] - #[doc(hidden)] - pub fn metadata() -> &'static [$crate::event::EventMetadata] { - $crate::__events_to_metadata!(; $( $events )* ) - } - } }; (@cannot_parse $ty:ty) => { compile_error!(concat!("The type `", stringify!($ty), "` can't be parsed as an unnamed one, please name it `Name = ", stringify!($ty), "`")); } } - -#[macro_export] -#[doc(hidden)] -macro_rules! __events_to_metadata { - ( - $( $metadata:expr ),*; - $( #[doc = $doc_attr:tt] )* - $event:ident $( ( $( $param:path ),* $(,)? ) )*, - $( $rest:tt )* - ) => { - $crate::__events_to_metadata!( - $( $metadata, )* - $crate::event::EventMetadata { - name: $crate::event::DecodeDifferent::Encode(stringify!($event)), - arguments: $crate::event::DecodeDifferent::Encode(&[ - $( $( stringify!($param) ),* )* - ]), - documentation: $crate::event::DecodeDifferent::Encode(&[ - $( $doc_attr ),* - ]), - }; - $( $rest )* - ) - }; - ( - $( $metadata:expr ),*; - ) => { - &[ $( $metadata ),* ] - } -} - -/// Constructs an Event type for a runtime. This is usually called automatically by the -/// construct_runtime macro. -#[macro_export] -macro_rules! impl_outer_event { - // Macro transformations (to convert invocations with incomplete parameters to the canonical - // form) - ( - $(#[$attr:meta])* - pub enum $name:ident for $runtime:ident { - $( $rest_events:tt )* - } - ) => { - $crate::impl_outer_event!( - $( #[$attr] )*; - $name; - $runtime; - Modules { $( $rest_events )* }; - {}; - ); - }; - // Generic + Instance - ( - $(#[$attr:meta])*; - $name:ident; - $runtime:ident; - Modules { - $( #[codec(index = $index:tt)] )? $module:ident $instance:ident, - $( $rest_event_generic_instance:tt )* - }; - { $( $parsed:tt )* }; - ) => { - $crate::impl_outer_event!( - $( #[$attr] )*; - $name; - $runtime; - Modules { $( $rest_event_generic_instance )* }; - { $( $parsed )* $module::Event<$runtime>{ $instance } index { $( $index )? }, }; - ); - }; - // Instance - ( - $(#[$attr:meta])*; - $name:ident; - $runtime:ident; - Modules { - $( #[codec(index = $index:tt)] )? $module:ident $instance:ident, - $( $rest_event_instance:tt )* - }; - { $( $parsed:tt )* }; - ) => { - $crate::impl_outer_event!( - $( #[$attr] )*; - $name; - $runtime; - Modules { $( $rest_event_instance )* }; - { $( $parsed )* $module::Event { $instance } index { $( $index )? }, }; - ); - }; - // Generic - ( - $(#[$attr:meta])*; - $name:ident; - $runtime:ident; - Modules { - $( #[codec(index = $index:tt)] )? $module:ident, - $( $rest_event_generic:tt )* - }; - { $( $parsed:tt )* }; - ) => { - $crate::impl_outer_event!( - $( #[$attr] )*; - $name; - $runtime; - Modules { $( $rest_event_generic )* }; - { $( $parsed )* $module::Event<$runtime> index { $( $index )? }, }; - ); - }; - // No Generic and no Instance - ( - $(#[$attr:meta])*; - $name:ident; - $runtime:ident; - Modules { - $( #[codec(index = $index:tt)] )? $module:ident, - $( $rest_event_no_generic_no_instance:tt )* - }; - { $( $parsed:tt )* }; - ) => { - $crate::impl_outer_event!( - $( #[$attr] )*; - $name; - $runtime; - Modules { $( $rest_event_no_generic_no_instance )* }; - { $( $parsed )* $module::Event index { $( $index )? }, }; - ); - }; - - // The main macro expansion that actually renders the Event enum code. - ( - $(#[$attr:meta])*; - $name:ident; - $runtime:ident; - Modules {}; - { - $( - $module_name:ident::Event - $( <$generic_param:ident> )? - $( { $generic_instance:ident } )? - index { $( $index:tt )? }, - )* - }; - ) => { - $crate::paste::item! { - #[derive( - Clone, PartialEq, Eq, - $crate::codec::Encode, - $crate::codec::Decode, - $crate::RuntimeDebug, - )] - $(#[$attr])* - #[allow(non_camel_case_types)] - pub enum $name { - $( - $( #[codec(index = $index)] )? - [< $module_name $(_ $generic_instance )? >]( - $module_name::Event < $( $generic_param )? $(, $module_name::$generic_instance )? > - ), - )* - } - $( - impl From<$module_name::Event < $( $generic_param, )? $( $module_name::$generic_instance )? >> for $name { - fn from(x: $module_name::Event < $( $generic_param, )? $( $module_name::$generic_instance )? >) -> Self { - $name::[< $module_name $(_ $generic_instance )? >](x) - } - } - impl $crate::sp_std::convert::TryInto< - $module_name::Event < $( $generic_param, )? $( $module_name::$generic_instance )? > - > for $name { - type Error = (); - - fn try_into(self) -> $crate::sp_std::result::Result< - $module_name::Event < $( $generic_param, )? $( $module_name::$generic_instance )? >, Self::Error - > { - match self { - Self::[< $module_name $(_ $generic_instance )? >](evt) => Ok(evt), - _ => Err(()), - } - } - } - )* - } - $crate::__impl_outer_event_json_metadata!( - $runtime; - $name; - $( - $module_name::Event - < $( $generic_param )? $(, $module_name::$generic_instance )? > - $( $generic_instance )?, - )*; - ); - } -} - -#[macro_export] -#[doc(hidden)] -macro_rules! __impl_outer_event_json_metadata { - ( - $runtime:ident; - $event_name:ident; - $( $module_name:ident::Event < $( $generic_params:path ),* > $( $instance:ident )?, )*; - ) => { - impl $runtime { - #[allow(dead_code)] - pub fn outer_event_metadata() -> $crate::event::OuterEventMetadata { - $crate::event::OuterEventMetadata { - name: $crate::event::DecodeDifferent::Encode(stringify!($event_name)), - events: $crate::event::DecodeDifferent::Encode(&[ - $( - ( - stringify!($module_name), - $crate::event::FnEncode( - $module_name::Event ::< $( $generic_params ),* > ::metadata - ) - ) - ),* - ]) - } - } - - $crate::__impl_outer_event_json_metadata! { - @DECL_MODULE_EVENT_FNS - $( $module_name < $( $generic_params ),* > $( $instance )? ; )* - } - } - }; - - (@DECL_MODULE_EVENT_FNS - $( - $module_name:ident < $( $generic_params:path ),* > $( $instance:ident )? ; - )* - ) => { - $crate::paste::item! { - $( - #[allow(dead_code)] - pub fn [< __module_events_ $module_name $( _ $instance )? >] () -> - &'static [$crate::event::EventMetadata] - { - $module_name::Event ::< $( $generic_params ),* > ::metadata() - } - )* - } - } -} - -#[cfg(test)] -#[allow(dead_code)] -mod tests { - use super::*; - use serde::Serialize; - use codec::{Encode, Decode}; - - mod system { - pub trait Trait { - type Origin; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - } - - mod system_renamed { - pub trait Trait { - type Origin; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - } - - mod event_module { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - /// Event without renaming the generic parameter `Balance` and `Origin`. - pub enum Event where ::Balance, ::Origin - { - /// Hi, I am a comment. - TestEvent(Balance, Origin), - /// Dog - EventWithoutParams, - } - ); - } - - mod event_module2 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - /// Event with renamed generic parameter - pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin - { - TestEvent(BalanceRenamed), - TestOrigin(OriginRenamed), - } - ); - } - - mod event_module3 { - decl_event!( - pub enum Event { - HiEvent, - } - ); - } - - mod event_module4 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - /// Event finish formatting on an unnamed one with trailing comma - pub enum Event where - ::Balance, - ::Origin, - { - TestEvent(Balance, Origin), - } - ); - } - - mod event_module5 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - /// Event finish formatting on an named one with trailing comma - pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin, - { - TestEvent(BalanceRenamed, OriginRenamed), - TrailingCommaInArgs( - u32, - u32, - ), - } - ); - } - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] - pub struct TestRuntime; - - impl_outer_event! { - pub enum TestEvent for TestRuntime { - system, - event_module, - event_module2, - event_module3, - } - } - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] - pub struct TestRuntime2; - - impl_outer_event! { - pub enum TestEventSystemRenamed for TestRuntime2 { - system_renamed, - event_module, - #[codec(index = "5")] event_module2, - event_module3, - } - } - - impl event_module::Trait for TestRuntime { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl event_module2::Trait for TestRuntime { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl system::Trait for TestRuntime { - type Origin = u32; - type BlockNumber = u32; - } - - impl event_module::Trait for TestRuntime2 { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl event_module2::Trait for TestRuntime2 { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl system_renamed::Trait for TestRuntime2 { - type Origin = u32; - type BlockNumber = u32; - } - - const EXPECTED_METADATA: OuterEventMetadata = OuterEventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - events: DecodeDifferent::Encode(&[ - ( - "system", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("SystemEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - } - ]) - ), - ( - "event_module", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&[ "Balance", "Origin" ]), - documentation: DecodeDifferent::Encode(&[ " Hi, I am a comment." ]) - }, - EventMetadata { - name: DecodeDifferent::Encode("EventWithoutParams"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[ " Dog" ]), - }, - ]) - ), - ( - "event_module2", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&[ "BalanceRenamed" ]), - documentation: DecodeDifferent::Encode(&[]) - }, - EventMetadata { - name: DecodeDifferent::Encode("TestOrigin"), - arguments: DecodeDifferent::Encode(&[ "OriginRenamed" ]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]) - ), - ( - "event_module3", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("HiEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - ) - ]) - }; - - #[test] - fn outer_event_metadata() { - assert_eq!(EXPECTED_METADATA, TestRuntime::outer_event_metadata()); - } - - #[test] - fn test_codec() { - let runtime_1_event_module_2 = TestEvent::event_module2( - event_module2::Event::::TestEvent(3) - ); - assert_eq!(runtime_1_event_module_2.encode()[0], 2); - - let runtime_2_event_module_2 = TestEventSystemRenamed::event_module2( - event_module2::Event::::TestEvent(3) - ); - assert_eq!(runtime_2_event_module_2.encode()[0], 5); - - let runtime_2_event_module_3 = TestEventSystemRenamed::event_module3( - event_module3::Event::HiEvent - ); - assert_eq!(runtime_2_event_module_3.encode()[0], 3); - } -} diff --git a/frame/support/src/hash.rs b/frame/support/src/hash.rs index a5de205863d5c..f943bcf323090 100644 --- a/frame/support/src/hash.rs +++ b/frame/support/src/hash.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,10 @@ //! Hash utilities. -use codec::Codec; +use crate::metadata; +use codec::{Codec, MaxEncodedLen}; +use sp_io::hashing::{blake2_128, blake2_256, twox_128, twox_256, twox_64}; use sp_std::prelude::Vec; -use sp_io::hashing::{blake2_128, blake2_256, twox_64, twox_128, twox_256}; // This trait must be kept coherent with frame-support-procedural HasherKind usage pub trait Hashable: Sized { @@ -51,13 +52,19 @@ impl Hashable for T { fn twox_64_concat(&self) -> Vec { self.using_encoded(Twox64Concat::hash) } - fn identity(&self) -> Vec { self.encode() } + fn identity(&self) -> Vec { + self.encode() + } } /// Hasher to use to hash keys to insert to storage. pub trait StorageHasher: 'static { + const METADATA: metadata::StorageHasher; type Output: AsRef<[u8]>; fn hash(x: &[u8]) -> Self::Output; + + /// The max length of the final hash, for the given key type. + fn max_len() -> usize; } /// Hasher to use to hash keys to insert to storage. @@ -73,10 +80,14 @@ pub trait ReversibleStorageHasher: StorageHasher { /// Store the key directly. pub struct Identity; impl StorageHasher for Identity { + const METADATA: metadata::StorageHasher = metadata::StorageHasher::Identity; type Output = Vec; fn hash(x: &[u8]) -> Vec { x.to_vec() } + fn max_len() -> usize { + K::max_encoded_len() + } } impl ReversibleStorageHasher for Identity { fn reverse(x: &[u8]) -> &[u8] { @@ -87,19 +98,19 @@ impl ReversibleStorageHasher for Identity { /// Hash storage keys with `concat(twox64(key), key)` pub struct Twox64Concat; impl StorageHasher for Twox64Concat { + const METADATA: metadata::StorageHasher = metadata::StorageHasher::Twox64Concat; type Output = Vec; fn hash(x: &[u8]) -> Vec { - twox_64(x) - .iter() - .chain(x.into_iter()) - .cloned() - .collect::>() + twox_64(x).iter().chain(x.into_iter()).cloned().collect::>() + } + fn max_len() -> usize { + K::max_encoded_len().saturating_add(8) } } impl ReversibleStorageHasher for Twox64Concat { fn reverse(x: &[u8]) -> &[u8] { if x.len() < 8 { - crate::debug::error!("Invalid reverse: hash length too short"); + log::error!("Invalid reverse: hash length too short"); return &[] } &x[8..] @@ -109,19 +120,19 @@ impl ReversibleStorageHasher for Twox64Concat { /// Hash storage keys with `concat(blake2_128(key), key)` pub struct Blake2_128Concat; impl StorageHasher for Blake2_128Concat { + const METADATA: metadata::StorageHasher = metadata::StorageHasher::Blake2_128Concat; type Output = Vec; fn hash(x: &[u8]) -> Vec { - blake2_128(x) - .iter() - .chain(x.into_iter()) - .cloned() - .collect::>() + blake2_128(x).iter().chain(x.into_iter()).cloned().collect::>() + } + fn max_len() -> usize { + K::max_encoded_len().saturating_add(16) } } impl ReversibleStorageHasher for Blake2_128Concat { fn reverse(x: &[u8]) -> &[u8] { if x.len() < 16 { - crate::debug::error!("Invalid reverse: hash length too short"); + log::error!("Invalid reverse: hash length too short"); return &[] } &x[16..] @@ -131,37 +142,53 @@ impl ReversibleStorageHasher for Blake2_128Concat { /// Hash storage keys with blake2 128 pub struct Blake2_128; impl StorageHasher for Blake2_128 { + const METADATA: metadata::StorageHasher = metadata::StorageHasher::Blake2_128; type Output = [u8; 16]; fn hash(x: &[u8]) -> [u8; 16] { blake2_128(x) } + fn max_len() -> usize { + 16 + } } /// Hash storage keys with blake2 256 pub struct Blake2_256; impl StorageHasher for Blake2_256 { + const METADATA: metadata::StorageHasher = metadata::StorageHasher::Blake2_256; type Output = [u8; 32]; fn hash(x: &[u8]) -> [u8; 32] { blake2_256(x) } + fn max_len() -> usize { + 32 + } } /// Hash storage keys with twox 128 pub struct Twox128; impl StorageHasher for Twox128 { + const METADATA: metadata::StorageHasher = metadata::StorageHasher::Twox128; type Output = [u8; 16]; fn hash(x: &[u8]) -> [u8; 16] { twox_128(x) } + fn max_len() -> usize { + 16 + } } /// Hash storage keys with twox 256 pub struct Twox256; impl StorageHasher for Twox256 { + const METADATA: metadata::StorageHasher = metadata::StorageHasher::Twox256; type Output = [u8; 32]; fn hash(x: &[u8]) -> [u8; 32] { twox_256(x) } + fn max_len() -> usize { + 32 + } } #[cfg(test)] @@ -179,4 +206,17 @@ mod tests { let r = Blake2_128Concat::hash(b"foo"); assert_eq!(r.split_at(16), (&blake2_128(b"foo")[..], &b"foo"[..])) } + + #[test] + fn max_lengths() { + use codec::Encode; + let encoded_0u32 = &0u32.encode()[..]; + assert_eq!(Twox64Concat::hash(encoded_0u32).len(), Twox64Concat::max_len::()); + assert_eq!(Twox128::hash(encoded_0u32).len(), Twox128::max_len::()); + assert_eq!(Twox256::hash(encoded_0u32).len(), Twox256::max_len::()); + assert_eq!(Blake2_128::hash(encoded_0u32).len(), Blake2_128::max_len::()); + assert_eq!(Blake2_128Concat::hash(encoded_0u32).len(), Blake2_128Concat::max_len::()); + assert_eq!(Blake2_256::hash(encoded_0u32).len(), Blake2_256::max_len::()); + assert_eq!(Identity::hash(encoded_0u32).len(), Identity::max_len::()); + } } diff --git a/frame/support/src/inherent.rs b/frame/support/src/inherent.rs index 83a1872ab4f3d..2125f3e7f50a7 100644 --- a/frame/support/src/inherent.rs +++ b/frame/support/src/inherent.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,260 +15,70 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[doc(hidden)] -pub use crate::sp_std::vec::Vec; #[doc(hidden)] pub use crate::sp_runtime::traits::{Block as BlockT, Extrinsic}; #[doc(hidden)] -pub use sp_inherents::{InherentData, ProvideInherent, CheckInherentsResult, IsFatalError}; +pub use crate::sp_std::vec::Vec; +pub use sp_inherents::{ + CheckInherentsResult, InherentData, InherentIdentifier, IsFatalError, MakeFatalError, +}; -/// Implement the outer inherent. -/// All given modules need to implement `ProvideInherent`. -/// -/// # Example +/// A pallet that provides or verifies an inherent extrinsic. /// -/// ```nocompile -/// impl_outer_inherent! { -/// impl Inherents where Block = Block, UncheckedExtrinsic = UncheckedExtrinsic { -/// timestamp, -/// consensus, -/// aura, -/// } -/// } -/// ``` -#[macro_export] -macro_rules! impl_outer_inherent { - ( - impl Inherents where - Block = $block:ident, - UncheckedExtrinsic = $uncheckedextrinsic:ident - { - $( $module:ident, )* - } - ) => { - trait InherentDataExt { - fn create_extrinsics(&self) -> - $crate::inherent::Vec<<$block as $crate::inherent::BlockT>::Extrinsic>; - fn check_extrinsics(&self, block: &$block) -> $crate::inherent::CheckInherentsResult; - } - - impl InherentDataExt for $crate::inherent::InherentData { - fn create_extrinsics(&self) -> - $crate::inherent::Vec<<$block as $crate::inherent::BlockT>::Extrinsic> { - use $crate::inherent::{ProvideInherent, Extrinsic}; - - let mut inherents = Vec::new(); - - $( - if let Some(inherent) = $module::create_inherent(self) { - inherents.push($uncheckedextrinsic::new( - inherent.into(), - None, - ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return `Some`; qed")); - } - )* - - inherents - } - - fn check_extrinsics(&self, block: &$block) -> $crate::inherent::CheckInherentsResult { - use $crate::inherent::{ProvideInherent, IsFatalError}; - use $crate::traits::IsSubType; - - let mut result = $crate::inherent::CheckInherentsResult::new(); - for xt in block.extrinsics() { - if $crate::inherent::Extrinsic::is_signed(xt).unwrap_or(false) { - break - } - - $({ - if let Some(call) = IsSubType::<_>::is_sub_type(&xt.function) { - if let Err(e) = $module::check_inherent(call, self) { - result.put_error( - $module::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); - if e.is_fatal_error() { - return result - } - } - } - })* - } - - $( - match $module::is_inherent_required(self) { - Ok(Some(e)) => { - let found = block.extrinsics().iter().any(|xt| { - if $crate::inherent::Extrinsic::is_signed(xt).unwrap_or(false) { - return false - } - - let call: Option<&<$module as ProvideInherent>::Call> = - xt.function.is_sub_type(); - - call.is_some() - }); - - if !found { - result.put_error( - $module::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); - if e.is_fatal_error() { - return result - } - } - }, - Ok(None) => (), - Err(e) => { - result.put_error( - $module::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); - if e.is_fatal_error() { - return result - } - }, - } - )* - - result - } - } - }; -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::{traits, testing::{Header, self}}; - use crate::traits::IsSubType; - - #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] - enum Call { - Test(CallTest), - Test2(CallTest2), - } - - impl From for Call { - fn from(call: CallTest) -> Self { - Self::Test(call) - } - } - - impl From for Call { - fn from(call: CallTest2) -> Self { - Self::Test2(call) - } - } - - impl IsSubType for Call { - fn is_sub_type(&self) -> Option<&CallTest> { - match self { - Self::Test(test) => Some(test), - _ => None, - } - } - } - - impl IsSubType for Call { - fn is_sub_type(&self) -> Option<&CallTest2> { - match self { - Self::Test2(test) => Some(test), - _ => None, - } - } - } - - #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] - enum CallTest { - Something, - SomethingElse, - } - - #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] - enum CallTest2 { - Something, - } - - struct ModuleTest; - impl ProvideInherent for ModuleTest { - type Call = CallTest; - type Error = sp_inherents::MakeFatalError<()>; - const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"test1235"; - - fn create_inherent(_: &InherentData) -> Option { - Some(CallTest::Something) - } - - fn check_inherent(call: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { - match call { - CallTest::Something => Ok(()), - CallTest::SomethingElse => Err(().into()), - } - } - } - - struct ModuleTest2; - impl ProvideInherent for ModuleTest2 { - type Call = CallTest2; - type Error = sp_inherents::MakeFatalError<()>; - const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"test1234"; - - fn create_inherent(_: &InherentData) -> Option { - Some(CallTest2::Something) - } - } - - type Block = testing::Block; - - #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] - struct Extrinsic { - function: Call, +/// The pallet may provide the inherent, verify an inherent, or both provide and verify. +pub trait ProvideInherent { + /// The call type of the pallet. + type Call; + /// The error returned by `check_inherent`. + type Error: codec::Encode + IsFatalError; + /// The inherent identifier used by this inherent. + const INHERENT_IDENTIFIER: self::InherentIdentifier; + + /// Create an inherent out of the given `InherentData`. + fn create_inherent(data: &InherentData) -> Option; + + /// Determines whether this inherent is required in this block. + /// + /// - `Ok(None)` indicates that this inherent is not required in this block. The default + /// implementation returns this. + /// + /// - `Ok(Some(e))` indicates that this inherent is required in this block. `construct_runtime!` + /// will call this function from in its implementation of `fn check_extrinsics`. + /// If the inherent is not present, it will return `e`. + /// + /// - `Err(_)` indicates that this function failed and further operations should be aborted. + /// + /// NOTE: If inherent is required then the runtime asserts that the block contains at least + /// one inherent for which: + /// * type is [`Self::Call`], + /// * [`Self::is_inherent`] returns true. + fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { + Ok(None) } - impl traits::Extrinsic for Extrinsic { - type Call = Call; - type SignaturePayload = (); - - fn new(function: Call, _: Option<()>) -> Option { - Some(Self { function }) - } + /// Check whether the given inherent is valid. Checking the inherent is optional and can be + /// omitted by using the default implementation. + /// + /// When checking an inherent, the first parameter represents the inherent that is actually + /// included in the block by its author. Whereas the second parameter represents the inherent + /// data that the verifying node calculates. + /// + /// NOTE: A block can contains multiple inherent. + fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + Ok(()) } - parity_util_mem::malloc_size_of_is_0!(Extrinsic); - - impl_outer_inherent! { - impl Inherents where Block = Block, UncheckedExtrinsic = Extrinsic { - ModuleTest, - ModuleTest2, - } - } - - #[test] - fn create_inherents_works() { - let inherents = InherentData::new().create_extrinsics(); - - let expected = vec![ - Extrinsic { function: Call::Test(CallTest::Something) }, - Extrinsic { function: Call::Test2(CallTest2::Something) }, - ]; - assert_eq!(expected, inherents); - } - - #[test] - fn check_inherents_works() { - let block = Block::new( - Header::new_from_number(1), - vec![Extrinsic { function: Call::Test(CallTest::Something) }], - ); - - assert!(InherentData::new().check_extrinsics(&block).ok()); - - let block = Block::new( - Header::new_from_number(1), - vec![Extrinsic { function: Call::Test(CallTest::SomethingElse) }], - ); - - assert!(InherentData::new().check_extrinsics(&block).fatal_error()); - } + /// Return whether the call is an inherent call. + /// + /// NOTE: Signed extrinsics are not inherent, but signed extrinsic with the given call variant + /// can be dispatched. + /// + /// # Warning + /// + /// In FRAME, inherent are enforced to be before other extrinsics, for this reason, + /// pallets with unsigned transactions **must ensure** that no unsigned transaction call + /// is an inherent call, when implementing `ValidateUnsigned::validate_unsigned`. + /// Otherwise block producer can produce invalid blocks by including them after non inherent. + fn is_inherent(call: &Self::Call) -> bool; } diff --git a/frame/support/src/instances.rs b/frame/support/src/instances.rs new file mode 100644 index 0000000000000..9908d16076a08 --- /dev/null +++ b/frame/support/src/instances.rs @@ -0,0 +1,96 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Some instance placeholder to be used in [`frame_support::pallet`] attribute macro. +//! +//! [`frame_support::pallet`] attribute macro does only requires the instance generic `I` to be +//! static (contrary to `decl_*` macro which requires instance generic to implement +//! [`frame_support::traits::Instance`]). +//! +//! Thus support provides some instance types to be used, This allow some instantiable pallet to +//! depend on specific instance of another: +//! ``` +//! # mod another_pallet { pub trait Config {} } +//! pub trait Config: another_pallet::Config {} +//! ``` +//! +//! NOTE: [`frame_support::pallet`] will reexport them inside the module, in order to make them +//! accessible to [`frame_support::construct_runtime`]. + +/// Instance1 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance1; + +/// Instance2 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance2; + +/// Instance3 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance3; + +/// Instance4 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance4; + +/// Instance5 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance5; + +/// Instance6 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance6; + +/// Instance7 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance7; + +/// Instance8 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance8; + +/// Instance9 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance9; + +/// Instance10 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance10; + +/// Instance11 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance11; + +/// Instance12 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance12; + +/// Instance13 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance13; + +/// Instance14 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance14; + +/// Instance15 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance15; + +/// Instance16 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance16; diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 58fb3d031cf06..459698707366d 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,85 +22,296 @@ /// Export ourself as `frame_support` to make tests happy. extern crate self as frame_support; -#[macro_use] -extern crate bitmask; - #[doc(hidden)] pub use sp_tracing; -#[cfg(feature = "std")] -pub use serde; -pub use sp_core::Void; -#[doc(hidden)] -pub use sp_std; #[doc(hidden)] pub use codec; +#[doc(hidden)] +pub use frame_metadata as metadata; +#[doc(hidden)] +pub use log; #[cfg(feature = "std")] #[doc(hidden)] pub use once_cell; #[doc(hidden)] pub use paste; -#[cfg(feature = "std")] #[doc(hidden)] -pub use sp_state_machine::BasicExternalities; +pub use scale_info; +#[cfg(feature = "std")] +pub use serde; +pub use sp_core::Void; #[doc(hidden)] -pub use sp_io::{storage::root as storage_root, self}; +pub use sp_io::{self, storage::root as storage_root}; #[doc(hidden)] pub use sp_runtime::RuntimeDebug; +#[cfg(feature = "std")] +#[doc(hidden)] +pub use sp_state_machine::BasicExternalities; +#[doc(hidden)] +pub use sp_std; -#[macro_use] -pub mod debug; -#[macro_use] -mod origin; #[macro_use] pub mod dispatch; -pub mod storage; mod hash; +pub mod storage; #[macro_use] pub mod event; -#[macro_use] -pub mod metadata; -#[macro_use] pub mod inherent; #[macro_use] -pub mod unsigned; -#[macro_use] pub mod error; +pub mod instances; +pub mod migrations; pub mod traits; pub mod weights; -pub use self::hash::{ - Twox256, Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, Hashable, - StorageHasher, ReversibleStorageHasher -}; -pub use self::storage::{ - StorageValue, StorageMap, StorageDoubleMap, StoragePrefixedMap, IterableStorageMap, - IterableStorageDoubleMap, migration +#[doc(hidden)] +pub mod unsigned { + #[doc(hidden)] + pub use crate::sp_runtime::traits::ValidateUnsigned; + #[doc(hidden)] + pub use crate::sp_runtime::transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, + }; +} + +pub use self::{ + dispatch::{Callable, Parameter}, + hash::{ + Blake2_128, Blake2_128Concat, Blake2_256, Hashable, Identity, ReversibleStorageHasher, + StorageHasher, Twox128, Twox256, Twox64Concat, + }, + storage::{ + bounded_vec::{BoundedSlice, BoundedVec}, + migration, + weak_bounded_vec::WeakBoundedVec, + IterableStorageDoubleMap, IterableStorageMap, IterableStorageNMap, StorageDoubleMap, + StorageMap, StorageNMap, StoragePrefixedMap, StorageValue, + }, }; -pub use self::dispatch::{Parameter, Callable}; -pub use sp_runtime::{self, ConsensusEngineId, print, traits::Printable}; +pub use sp_runtime::{self, print, traits::Printable, ConsensusEngineId}; + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::TypeId; + +/// A unified log target for support operations. +pub const LOG_TARGET: &'static str = "runtime::frame-support"; /// A type that cannot be instantiated. -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum Never {} +/// A pallet identifier. These are per pallet and should be stored in a registry somewhere. +#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct PalletId(pub [u8; 8]); + +impl TypeId for PalletId { + const TYPE_ID: [u8; 4] = *b"modl"; +} + +/// Generate a new type alias for [`storage::types::StorageValue`], +/// [`storage::types::StorageMap`], [`storage::types::StorageDoubleMap`] +/// and [`storage::types::StorageNMap`]. +/// +/// Useful for creating a *storage-like* struct for test and migrations. +/// +/// ``` +/// # use frame_support::generate_storage_alias; +/// use frame_support::codec; +/// use frame_support::Twox64Concat; +/// // generate a storage value with type u32. +/// generate_storage_alias!(Prefix, StorageName => Value); +/// +/// // generate a double map from `(u32, u32)` (with hashers `Twox64Concat` for each key) +/// // to `Vec` +/// generate_storage_alias!( +/// OtherPrefix, OtherStorageName => DoubleMap< +/// (u32, Twox64Concat), +/// (u32, Twox64Concat), +/// Vec +/// > +/// ); +/// +/// // optionally specify the query type +/// use frame_support::pallet_prelude::{ValueQuery, OptionQuery}; +/// generate_storage_alias!(Prefix, ValueName => Value); +/// generate_storage_alias!( +/// Prefix, SomeStorageName => DoubleMap< +/// (u32, Twox64Concat), +/// (u32, Twox64Concat), +/// Vec, +/// ValueQuery +/// > +/// ); +/// +/// // generate a map from `Config::AccountId` (with hasher `Twox64Concat`) to `Vec` +/// trait Config { type AccountId: codec::FullCodec; } +/// generate_storage_alias!( +/// Prefix, GenericStorage => Map<(T::AccountId, Twox64Concat), Vec> +/// ); +/// # fn main() {} +/// ``` +#[macro_export] +macro_rules! generate_storage_alias { + // without generic for $name. + ($pallet:ident, $name:ident => Map<($key:ty, $hasher:ty), $value:ty $(, $querytype:ty)?>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageMap< + [<$name Instance>], + $hasher, + $key, + $value, + $( $querytype )? + >; + } + }; + ( + $pallet:ident, + $name:ident + => DoubleMap<($key1:ty, $hasher1:ty), ($key2:ty, $hasher2:ty), $value:ty $(, $querytype:ty)?> + ) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageDoubleMap< + [<$name Instance>], + $hasher1, + $key1, + $hasher2, + $key2, + $value, + $( $querytype )? + >; + } + }; + ( + $pallet:ident, + $name:ident + => NMap, $value:ty $(, $querytype:ty)?> + ) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageNMap< + [<$name Instance>], + ( + $( $crate::storage::types::Key<$hasher, $key>, )+ + ), + $value, + $( $querytype )? + >; + } + }; + ($pallet:ident, $name:ident => Value<$value:ty $(, $querytype:ty)?>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageValue< + [<$name Instance>], + $value, + $( $querytype )? + >; + } + }; + // with generic for $name. + ( + $pallet:ident, + $name:ident<$t:ident : $bounds:tt> + => Map<($key:ty, $hasher:ty), $value:ty $(, $querytype:ty)?> + ) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageMap< + [<$name Instance>], + $key, + $hasher, + $value, + $( $querytype )? + >; + } + }; + ( + $pallet:ident, + $name:ident<$t:ident : $bounds:tt> + => DoubleMap<($key1:ty, $hasher1:ty), ($key2:ty, $hasher2:ty), $value:ty $(, $querytype:ty)?> + ) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageDoubleMap< + [<$name Instance>], + $key1, + $hasher1, + $key2, + $hasher2, + $value, + $( $querytype )? + >; + } + }; + ( + $pallet:ident, + $name:ident<$t:ident : $bounds:tt> + => NMap<$(($key:ty, $hasher:ty),)+ $value:ty $(, $querytype:ty)?> + ) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageNMap< + [<$name Instance>], + ( + $( $crate::storage::types::Key<$hasher, $key>, )+ + ), + $value, + $( $querytype )? + >; + } + }; + ($pallet:ident, $name:ident<$t:ident : $bounds:tt> => Value<$value:ty $(, $querytype:ty)?>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageValue< + [<$name Instance>], + $value, + $( $querytype )? + >; + } + }; + // helper used in all arms. + (@GENERATE_INSTANCE_STRUCT $pallet:ident, $name:ident) => { + $crate::paste::paste! { + struct [<$name Instance>]; + impl $crate::traits::StorageInstance for [<$name Instance>] { + fn pallet_prefix() -> &'static str { stringify!($pallet) } + const STORAGE_PREFIX: &'static str = stringify!($name); + } + } + }; +} + /// Create new implementations of the [`Get`](crate::traits::Get) trait. /// -/// The so-called parameter type can be created in three different ways: +/// The so-called parameter type can be created in four different ways: /// -/// - Using `const` to create a parameter type that provides a `const` getter. -/// It is required that the `value` is const. +/// - Using `const` to create a parameter type that provides a `const` getter. It is required that +/// the `value` is const. /// /// - Declare the parameter type without `const` to have more freedom when creating the value. /// -/// - Using `storage` to create a storage parameter type. This type is special as it tries to -/// load the value from the storage under a fixed key. If the value could not be found in the -/// storage, the given default value will be returned. It is required that the value implements -/// [`Encode`](codec::Encode) and [`Decode`](codec::Decode). The key for looking up the value -/// in the storage is built using the following formular: +/// - Using `storage` to create a storage parameter type. This type is special as it tries to load +/// the value from the storage under a fixed key. If the value could not be found in the storage, +/// the given default value will be returned. It is required that the value implements +/// [`Encode`](codec::Encode) and [`Decode`](codec::Decode). The key for looking up the value in +/// the storage is built using the following formula: /// /// `twox_128(":" ++ NAME ++ ":")` where `NAME` is the name that is passed as type name. /// +/// - Using `static` to create a static parameter type. Its value is being provided by a static +/// variable with the equivalent name in `UPPER_SNAKE_CASE`. An additional `set` function is +/// provided in this case to alter the static variable. **This is intended for testing ONLY and is +/// ONLY available when `std` is enabled.** +/// /// # Examples /// /// ``` @@ -115,12 +326,14 @@ pub enum Never {} /// /// Visibility of the type is optional /// OtherArgument: u64 = non_const_expression(); /// pub storage StorageArgument: u64 = 5; +/// pub static StaticArgument: u32 = 7; /// } /// /// trait Config { /// type Parameter: Get; /// type OtherParameter: Get; /// type StorageParameter: Get; +/// type StaticParameter: Get; /// } /// /// struct Runtime; @@ -128,7 +341,10 @@ pub enum Never {} /// type Parameter = Argument; /// type OtherParameter = OtherArgument; /// type StorageParameter = StorageArgument; +/// type StaticParameter = StaticArgument; /// } +/// +/// // In testing, `StaticArgument` can be altered later: `StaticArgument::set(8)`. /// ``` /// /// # Invalid example: @@ -143,7 +359,6 @@ pub enum Never {} /// pub const Argument: u64 = non_const_expression(); /// } /// ``` - #[macro_export] macro_rules! parameter_types { ( @@ -187,7 +402,7 @@ macro_rules! parameter_types { impl> $crate::traits::Get for $name { fn get() -> I { - I::from($value) + I::from(Self::get()) } } }; @@ -201,13 +416,14 @@ macro_rules! parameter_types { impl> $crate::traits::Get for $name { fn get() -> I { - I::from($value) + I::from(Self::get()) } } }; (IMPL_STORAGE $name:ident, $type:ty, $value:expr) => { impl $name { /// Returns the key for this parameter type. + #[allow(unused)] pub fn key() -> [u8; 16] { $crate::sp_io::hashing::twox_128( concat!(":", stringify!($name), ":").as_bytes() @@ -218,6 +434,7 @@ macro_rules! parameter_types { /// /// This needs to be executed in an externalities provided /// environment. + #[allow(unused)] pub fn set(value: &$type) { $crate::storage::unhashed::put(&Self::key(), value); } @@ -226,6 +443,7 @@ macro_rules! parameter_types { /// /// This needs to be executed in an externalities provided /// environment. + #[allow(unused)] pub fn get() -> $type { $crate::storage::unhashed::get(&Self::key()).unwrap_or_else(|| $value) } @@ -236,7 +454,67 @@ macro_rules! parameter_types { I::from(Self::get()) } } - } + }; + ( + $( #[ $attr:meta ] )* + $vis:vis static $name:ident: $type:ty = $value:expr; + $( $rest:tt )* + ) => ( + $crate::parameter_types_impl_thread_local!( + $( #[ $attr ] )* + $vis static $name: $type = $value; + ); + $crate::parameter_types!( $( $rest )* ); + ); +} + +#[cfg(not(feature = "std"))] +#[macro_export] +macro_rules! parameter_types_impl_thread_local { + ( $( $any:tt )* ) => { + compile_error!("static parameter types is only available in std and for testing."); + }; +} + +#[cfg(feature = "std")] +#[macro_export] +macro_rules! parameter_types_impl_thread_local { + ( + $( + $( #[ $attr:meta ] )* + $vis:vis static $name:ident: $type:ty = $value:expr; + )* + ) => { + $crate::parameter_types_impl_thread_local!( + IMPL_THREAD_LOCAL $( $vis, $name, $type, $value, )* + ); + $crate::paste::item! { + $crate::parameter_types!( + $( + $( #[ $attr ] )* + $vis $name: $type = [<$name:snake:upper>].with(|v| v.borrow().clone()); + )* + ); + $( + impl $name { + /// Set the internal value. + pub fn set(t: $type) { + [<$name:snake:upper>].with(|v| *v.borrow_mut() = t); + } + } + )* + } + }; + (IMPL_THREAD_LOCAL $( $vis:vis, $name:ident, $type:ty, $value:expr, )* ) => { + $crate::paste::item! { + thread_local! { + $( + pub static [<$name:snake:upper>]: std::cell::RefCell<$type> = + std::cell::RefCell::new($value); + )* + } + } + }; } /// Macro for easily creating a new implementation of both the `Get` and `Contains` traits. Use @@ -256,35 +534,65 @@ macro_rules! ord_parameter_types { ); () => (); (IMPL $name:ident , $type:ty , $value:expr) => { - impl $crate::traits::Contains<$type> for $name { + impl $crate::traits::SortedMembers<$type> for $name { fn contains(t: &$type) -> bool { &$value == t } fn sorted_members() -> $crate::sp_std::prelude::Vec<$type> { vec![$value] } fn count() -> usize { 1 } #[cfg(feature = "runtime-benchmarks")] fn add(_: &$type) {} } + impl $crate::traits::Contains<$type> for $name { + fn contains(t: &$type) -> bool { &$value == t } + } + } +} + +/// Print out a formatted message. +/// +/// # Example +/// +/// ``` +/// frame_support::runtime_print!("my value is {}", 3); +/// ``` +#[macro_export] +macro_rules! runtime_print { + ($($arg:tt)+) => { + { + use core::fmt::Write; + let mut w = $crate::sp_std::Writer::default(); + let _ = core::write!(&mut w, $($arg)+); + $crate::sp_io::misc::print_utf8(&w.inner()) + } } } +/// Print out the debuggable type. +pub fn debug(data: &impl sp_std::fmt::Debug) { + runtime_print!("{:?}", data); +} + #[doc(inline)] pub use frame_support_procedural::{ - decl_storage, construct_runtime, transactional, RuntimeDebugNoBound + construct_runtime, decl_storage, transactional, RuntimeDebugNoBound, }; +#[doc(hidden)] +pub use frame_support_procedural::__generate_dummy_part_checker; + /// Derive [`Clone`] but do not bound any generic. /// /// This is useful for type generic over runtime: /// ``` /// # use frame_support::CloneNoBound; -/// trait Trait { -/// type C: Clone; +/// trait Config { +/// type C: Clone; /// } /// /// // Foo implements [`Clone`] because `C` bounds [`Clone`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Clone`]. /// #[derive(CloneNoBound)] -/// struct Foo { -/// c: T::C, +/// struct Foo { +/// c: T::C, /// } /// ``` pub use frame_support_procedural::CloneNoBound; @@ -294,15 +602,15 @@ pub use frame_support_procedural::CloneNoBound; /// This is useful for type generic over runtime: /// ``` /// # use frame_support::{EqNoBound, PartialEqNoBound}; -/// trait Trait { -/// type C: Eq; +/// trait Config { +/// type C: Eq; /// } /// /// // Foo implements [`Eq`] because `C` bounds [`Eq`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Eq`]. /// #[derive(PartialEqNoBound, EqNoBound)] -/// struct Foo { -/// c: T::C, +/// struct Foo { +/// c: T::C, /// } /// ``` pub use frame_support_procedural::EqNoBound; @@ -312,15 +620,15 @@ pub use frame_support_procedural::EqNoBound; /// This is useful for type generic over runtime: /// ``` /// # use frame_support::PartialEqNoBound; -/// trait Trait { -/// type C: PartialEq; +/// trait Config { +/// type C: PartialEq; /// } /// /// // Foo implements [`PartialEq`] because `C` bounds [`PartialEq`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`PartialEq`]. /// #[derive(PartialEqNoBound)] -/// struct Foo { -/// c: T::C, +/// struct Foo { +/// c: T::C, /// } /// ``` pub use frame_support_procedural::PartialEqNoBound; @@ -331,19 +639,38 @@ pub use frame_support_procedural::PartialEqNoBound; /// ``` /// # use frame_support::DebugNoBound; /// # use core::fmt::Debug; -/// trait Trait { -/// type C: Debug; +/// trait Config { +/// type C: Debug; /// } /// /// // Foo implements [`Debug`] because `C` bounds [`Debug`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Debug`]. /// #[derive(DebugNoBound)] -/// struct Foo { -/// c: T::C, +/// struct Foo { +/// c: T::C, /// } /// ``` pub use frame_support_procedural::DebugNoBound; +/// Derive [`Default`] but do not bound any generic. +/// +/// This is useful for type generic over runtime: +/// ``` +/// # use frame_support::DefaultNoBound; +/// # use core::default::Default; +/// trait Config { +/// type C: Default; +/// } +/// +/// // Foo implements [`Default`] because `C` bounds [`Default`]. +/// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Default`]. +/// #[derive(DefaultNoBound)] +/// struct Foo { +/// c: T::C, +/// } +/// ``` +pub use frame_support_procedural::DefaultNoBound; + /// Assert the annotated function is executed within a storage transaction. /// /// The assertion is enabled for native execution and when `debug_assertions` are enabled. @@ -381,8 +708,8 @@ pub use frame_support_procedural::require_transactional; #[macro_export] macro_rules! fail { ( $y:expr ) => {{ - return Err($y.into()); - }} + return Err($y.into()) + }}; } /// Evaluate `$x:expr` and if not true return `Err($y:expr)`. @@ -394,7 +721,7 @@ macro_rules! ensure { if !$x { $crate::fail!($y); } - }} + }}; } /// Evaluate an expression, assert it returns an expected `Err` value and that @@ -402,7 +729,6 @@ macro_rules! ensure { /// /// Used as `assert_noop(expression_to_assert, expected_error_expression)`. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_noop { ( $x:expr, @@ -411,18 +737,32 @@ macro_rules! assert_noop { let h = $crate::storage_root(); $crate::assert_err!($x, $y); assert_eq!(h, $crate::storage_root()); - } + }; +} + +/// Evaluate any expression and assert that runtime storage has not been mutated +/// (i.e. expression is a storage no-operation). +/// +/// Used as `assert_storage_noop(expression_to_assert)`. +#[macro_export] +macro_rules! assert_storage_noop { + ( + $x:expr + ) => { + let h = $crate::storage_root(); + $x; + assert_eq!(h, $crate::storage_root()); + }; } /// Assert an expression returns an error specified. /// /// Used as `assert_err!(expression_to_assert, expected_error_expression)` #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_err { ( $x:expr , $y:expr $(,)? ) => { assert_eq!($x, Err($y.into())); - } + }; } /// Assert an expression returns an error specified. @@ -430,16 +770,14 @@ macro_rules! assert_err { /// This can be used on`DispatchResultWithPostInfo` when the post info should /// be ignored. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_err_ignore_postinfo { ( $x:expr , $y:expr $(,)? ) => { $crate::assert_err!($x.map(|_| ()).map_err(|e| e.error), $y); - } + }; } /// Assert an expression returns error with the given weight. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_err_with_weight { ($call:expr, $err:expr, $weight:expr $(,)? ) => { if let Err(dispatch_err_with_post) = $call { @@ -448,7 +786,7 @@ macro_rules! assert_err_with_weight { } else { panic!("expected Err(_), got Ok(_).") } - } + }; } /// Panic if an expression doesn't evaluate to `Ok`. @@ -456,7 +794,6 @@ macro_rules! assert_err_with_weight { /// Used as `assert_ok!(expression_to_assert, expected_ok_expression)`, /// or `assert_ok!(expression_to_assert)` which would assert against `Ok(())`. #[macro_export] -#[cfg(feature = "std")] macro_rules! assert_ok { ( $x:expr $(,)? ) => { let is = $x; @@ -467,42 +804,56 @@ macro_rules! assert_ok { }; ( $x:expr, $y:expr $(,)? ) => { assert_eq!($x, Ok($y)); - } + }; } #[cfg(feature = "std")] #[doc(hidden)] -pub use serde::{Serialize, Deserialize}; +pub use serde::{Deserialize, Serialize}; #[cfg(test)] -mod tests { +pub mod tests { use super::*; - use codec::{Codec, EncodeLike}; - use frame_metadata::{ - DecodeDifferent, StorageEntryMetadata, StorageMetadata, StorageEntryType, - StorageEntryModifier, DefaultByteGetter, StorageHasher, + use crate::metadata::{ + PalletStorageMetadata, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, + StorageHasher, }; - use sp_std::{marker::PhantomData, result}; + use codec::{Codec, EncodeLike}; use sp_io::TestExternalities; + use sp_std::result; + + /// A PalletInfo implementation which just panics. + pub struct PanicPalletInfo; - pub trait Trait { - type BlockNumber: Codec + EncodeLike + Default; + impl crate::traits::PalletInfo for PanicPalletInfo { + fn index() -> Option { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + fn name() -> Option<&'static str> { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + } + + pub trait Config: 'static { + type BlockNumber: Codec + EncodeLike + Default + TypeInfo; type Origin; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: crate::traits::Get; } mod module { #![allow(dead_code)] - use super::Trait; + use super::Config; decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } } use self::module::Module; decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { pub Data get(fn data) build(|_| vec![(15u32, 42u64)]): map hasher(twox_64_concat) u32 => u64; pub OptionLinkedMap: map hasher(blake2_128_concat) u32 => Option; @@ -524,9 +875,11 @@ mod tests { } struct Test; - impl Trait for Test { + impl Config for Test { type BlockNumber = u32; type Origin = u32; + type PalletInfo = PanicPalletInfo; + type DbWeight = (); } fn new_test_ext() -> TestExternalities { @@ -535,7 +888,9 @@ mod tests { type Map = Data; - trait Sorted { fn sorted(self) -> Self; } + trait Sorted { + fn sorted(self) -> Self; + } impl Sorted for Vec { fn sorted(mut self) -> Self { self.sort(); @@ -589,13 +944,15 @@ mod tests { DataDM::insert(1, 0, 2); DataDM::insert(1, 1, 3); - let get_all = || vec![ - DataDM::get(0, 1), - DataDM::get(1, 0), - DataDM::get(1, 1), - DataDM::get(2, 0), - DataDM::get(2, 1), - ]; + let get_all = || { + vec![ + DataDM::get(0, 1), + DataDM::get(1, 0), + DataDM::get(1, 1), + DataDM::get(2, 0), + DataDM::get(2, 1), + ] + }; assert_eq!(get_all(), vec![1, 2, 3, 0, 0]); // Two existing @@ -661,15 +1018,24 @@ mod tests { Map::mutate(&key, |val| { *val = 15; }); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43), (key, 15)]); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43), (key, 15)] + ); Map::mutate(&key, |val| { *val = 17; }); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43), (key, 17)]); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43), (key, 17)] + ); // remove first Map::remove(&key); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43)]); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43)] + ); // remove last from the list Map::remove(&(key - 2)); @@ -712,12 +1078,14 @@ mod tests { DoubleMap::insert(&key1, &(key2 + 1), &4u64); DoubleMap::insert(&(key1 + 1), &key2, &4u64); DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64); - DoubleMap::remove_prefix(&key1); + assert!(matches!( + DoubleMap::remove_prefix(&key1, None), + sp_io::KillStorageResult::AllRemoved(0), // all in overlay + )); assert_eq!(DoubleMap::get(&key1, &key2), 0u64); assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); assert_eq!(DoubleMap::get(&(key1 + 1), &key2), 4u64); assert_eq!(DoubleMap::get(&(key1 + 1), &(key2 + 1)), 4u64); - }); } @@ -768,10 +1136,13 @@ mod tests { assert_eq!(DoubleMap::get(&key1, key2), 1); // no-op if `Err` - assert_noop!(DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { - *v = Some(2); - Err("nah") - }), "nah"); + assert_noop!( + DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { + *v = Some(2); + Err("nah") + }), + "nah" + ); // removed if mutated to`None` assert_ok!(DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { @@ -782,134 +1153,109 @@ mod tests { }); } - const EXPECTED_METADATA: StorageMetadata = StorageMetadata { - prefix: DecodeDifferent::Encode("Test"), - entries: DecodeDifferent::Encode( - &[ + fn expected_metadata() -> PalletStorageMetadata { + PalletStorageMetadata { + prefix: "Test", + entries: vec![ StorageEntryMetadata { - name: DecodeDifferent::Encode("Data"), + name: "Data", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map{ - hasher: StorageHasher::Twox64Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u64"), - unused: false, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Twox64Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructData(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0, 0, 0, 0, 0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("OptionLinkedMap"), + name: "OptionLinkedMap", modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u32"), - unused: false, + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructOptionLinkedMap(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericData"), + name: "GenericData", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map{ - hasher: StorageHasher::Identity, - key: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - unused: false + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Identity], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericData2"), + name: "GenericData2", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map{ - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - unused: false + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("DataDM"), + name: "DataDM", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Twox64Concat, - key1: DecodeDifferent::Encode("u32"), - key2: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u64"), - key2_hasher: StorageHasher::Blake2_128Concat, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Twox64Concat, StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructDataDM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0, 0, 0, 0, 0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericDataDM"), + name: "GenericDataDM", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("T::BlockNumber"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: StorageHasher::Identity, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Identity], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericDataDM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericData2DM"), + name: "GenericData2DM", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("T::BlockNumber"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: StorageHasher::Twox64Concat, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2DM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("AppendableDM"), + name: "AppendableDM", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("u32"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("Vec"), - key2_hasher: StorageHasher::Blake2_128Concat, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Blake2_128Concat, + ], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::>(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2DM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0], + docs: vec![], }, - ] - ), - }; + ], + } + } #[test] fn store_metadata() { let metadata = Module::::storage_metadata(); - pretty_assertions::assert_eq!(EXPECTED_METADATA, metadata); + pretty_assertions::assert_eq!(expected_metadata(), metadata); } parameter_types! { @@ -927,4 +1273,1150 @@ mod tests { assert_eq!(300, StorageParameter::get()); }) } + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub static Members: Vec = vec![]; + pub const Foo: Option = None; + } } + +/// Prelude to be used alongside pallet macro, for ease of use. +pub mod pallet_prelude { + #[cfg(feature = "std")] + pub use crate::traits::GenesisBuild; + pub use crate::{ + dispatch::{DispatchError, DispatchResult, DispatchResultWithPostInfo, Parameter}, + ensure, + inherent::{InherentData, InherentIdentifier, ProvideInherent}, + storage, + storage::{ + bounded_vec::BoundedVec, + types::{ + CountedStorageMap, Key as NMapKey, OptionQuery, StorageDoubleMap, StorageMap, + StorageNMap, StorageValue, ValueQuery, + }, + }, + traits::{ + ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, IsType, + PalletInfoAccess, StorageInfoTrait, + }, + weights::{DispatchClass, Pays, Weight}, + Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, + PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, + }; + pub use codec::{Decode, Encode, MaxEncodedLen}; + pub use sp_runtime::{ + traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, + transaction_validity::{ + InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, + TransactionTag, TransactionValidity, TransactionValidityError, UnknownTransaction, + ValidTransaction, + }, + }; + pub use sp_std::marker::PhantomData; +} + +/// `pallet` attribute macro allows to define a pallet to be used in `construct_runtime!`. +/// +/// It is define by a module item: +/// ```ignore +/// #[pallet] +/// pub mod pallet { +/// ... +/// } +/// ``` +/// +/// Inside the module the macro will parse item with the attribute: `#[pallet::*]`, some +/// attributes are mandatory, some other optional. +/// +/// The attribute are explained with the syntax of non instantiable pallets, to see how pallet +/// with instance work see below example. +/// +/// Note various type can be automatically imported using pallet_prelude in frame_support and +/// frame_system: +/// ```ignore +/// #[pallet] +/// pub mod pallet { +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// ... +/// } +/// ``` +/// +/// # Config trait: `#[pallet::config]` mandatory +/// +/// The trait defining generics of the pallet. +/// +/// Item must be defined as +/// ```ignore +/// #[pallet::config] +/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits +/// $optional_where_clause +/// { +/// ... +/// } +/// ``` +/// I.e. a regular trait definition named `Config`, with supertrait `frame_system::Config`, +/// optionally other supertrait and where clause. +/// +/// The associated type `Event` is reserved, if defined it must bounds `From` and +/// `IsType<::Event>`, see `#[pallet::event]` for more +/// information. +/// +/// To put `Get` associated type into metadatas, use the attribute `#[pallet::constant]`, e.g.: +/// ```ignore +/// #[pallet::config] +/// pub trait Config: frame_system::Config { +/// #[pallet::constant] +/// type Foo: Get; +/// } +/// ``` +/// +/// To bypass the `frame_system::Config` supertrait check, use the attribute +/// `#[pallet::disable_frame_system_supertrait_check]`, e.g.: +/// ```ignore +/// #[pallet::config] +/// #[pallet::disable_frame_system_supertrait_check] +/// pub trait Config: pallet_timestamp::Config {} +/// ``` +/// +/// ### Macro expansion: +/// +/// The macro expand pallet constant metadata with the information given by +/// `#[pallet::constant]`. +/// +/// # Pallet struct placeholder: `#[pallet::pallet]` mandatory +/// +/// The placeholder struct, on which is implemented pallet informations. +/// +/// Item must be defined as followed: +/// ```ignore +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// ``` +/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. +/// +/// To generate a `Store` trait associating all storages, use the attribute +/// `#[pallet::generate_store($vis trait Store)]`, e.g.: +/// ```ignore +/// #[pallet::pallet] +/// #[pallet::generate_store(pub(super) trait Store)] +/// pub struct Pallet(_); +/// ``` +/// More precisely the store trait contains an associated type for each storage. It is +/// implemented for `Pallet` allowing to access the storage from pallet struct. +/// +/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using +/// `::Foo`. +/// +/// To generate the full storage info (used for PoV calculation) use the attribute +/// `#[pallet::set_storage_max_encoded_len]`, e.g.: +/// ```ignore +/// #[pallet::pallet] +/// #[pallet::set_storage_max_encoded_len] +/// pub struct Pallet(_); +/// ``` +/// +/// This require all storage to implement the trait [`traits::StorageInfoTrait`], thus all keys +/// and value types must bound [`pallet_prelude::MaxEncodedLen`]. +/// +/// As the macro implements [`traits::GetStorageVersion`], the current storage version needs to +/// be communicated to the macro. This can be done by using the `storage_version` attribute: +/// +/// ```ignore +/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); +/// +/// #[pallet::pallet] +/// #[pallet::storage_version(STORAGE_VERSION)] +/// pub struct Pallet(_); +/// ``` +/// +/// If not present, the current storage version is set to the default value. +/// +/// ### Macro expansion: +/// +/// The macro add this attribute to the struct definition: +/// ```ignore +/// #[derive( +/// frame_support::CloneNoBound, +/// frame_support::EqNoBound, +/// frame_support::PartialEqNoBound, +/// frame_support::RuntimeDebugNoBound, +/// )] +/// ``` +/// and replace the type `_` by `PhantomData`. +/// +/// It implements on pallet: +/// * [`traits::GetStorageVersion`] +/// * [`traits::OnGenesis`]: contains some logic to write pallet version into storage. +/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. +/// +/// It declare `type Module` type alias for `Pallet`, used by [`construct_runtime`]. +/// +/// It implements [`traits::PalletInfoAccess`] on `Pallet` to ease access to pallet +/// informations given by [`frame_support::traits::PalletInfo`]. +/// (The implementation use the associated type `frame_system::Config::PalletInfo`). +/// +/// It implements [`traits::StorageInfoTrait`] on `Pallet` which give information about all +/// storages. +/// +/// If the attribute generate_store is set then the macro creates the trait `Store` and +/// implements it on `Pallet`. +/// +/// If the attribute set_storage_max_encoded_len is set then the macro call +/// [`traits::StorageInfoTrait`] for each storage in the implementation of +/// [`traits::StorageInfoTrait`] for the pallet. +/// Otherwise it implements [`traits::StorageInfoTrait`] for the pallet using the +/// [`traits::PartialStorageInfoTrait`] implementation of storages. +/// +/// # Hooks: `#[pallet::hooks]` optional +/// +/// Implementation of `Hooks` on `Pallet` allowing to define some specific pallet logic. +/// +/// Item must be defined as +/// ```ignore +/// #[pallet::hooks] +/// impl Hooks> for Pallet $optional_where_clause { +/// } +/// ``` +/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait +/// `Hooks>` (they are defined in preludes), for the type `Pallet` +/// and with an optional where clause. +/// +/// If no `#[pallet::hooks]` exists, then a default implementation corresponding to the +/// following code is automatically generated: +/// ```ignore +/// #[pallet::hooks] +/// impl Hooks> for Pallet {} +/// ``` +/// +/// ### Macro expansion: +/// +/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, +/// `OffchainWorker`, `IntegrityTest` using `Hooks` implementation. +/// +/// NOTE: OnRuntimeUpgrade is implemented with `Hooks::on_runtime_upgrade` and some additional +/// logic. E.g. logic to write pallet version into storage. +/// +/// NOTE: The macro also adds some tracing logic when implementing the above traits. The +/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. +/// +/// # Call: `#[pallet::call]` optional +/// +/// Implementation of pallet dispatchables. +/// +/// Item must be defined as: +/// ```ignore +/// #[pallet::call] +/// impl Pallet { +/// /// $some_doc +/// #[pallet::weight($ExpressionResultingInWeight)] +/// pub fn $fn_name( +/// origin: OriginFor, +/// $some_arg: $some_type, +/// // or with compact attribute: #[pallet::compact] $some_arg: $some_type, +/// ... +/// ) -> DispatchResultWithPostInfo { // or `-> DispatchResult` +/// ... +/// } +/// ... +/// } +/// ``` +/// I.e. a regular type implementation, with generic `T: Config`, on type `Pallet`, with +/// optional where clause. +/// +/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, +/// the first argument must be `origin: OriginFor`, compact encoding for argument can be +/// used using `#[pallet::compact]`, function must return `DispatchResultWithPostInfo` or +/// `DispatchResult`. +/// +/// All arguments must implement `Debug`, `PartialEq`, `Eq`, `Decode`, `Encode`, `Clone`. For +/// ease of use, bound the trait `Member` available in frame_support::pallet_prelude. +/// +/// If no `#[pallet::call]` exists, then a default implementation corresponding to the +/// following code is automatically generated: +/// ```ignore +/// #[pallet::call] +/// impl Pallet {} +/// ``` +/// +/// **WARNING**: modifying dispatchables, changing their order, removing some must be done with +/// care. Indeed this will change the outer runtime call type (which is an enum with one +/// variant per pallet), this outer runtime call can be stored on-chain (e.g. in +/// pallet-scheduler). Thus migration might be needed. +/// +/// ### Macro expansion +/// +/// The macro create an enum `Call` with one variant per dispatchable. This enum implements: +/// `Clone`, `Eq`, `PartialEq`, `Debug` (with stripped implementation in `not("std")`), +/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, `UnfilteredDispatchable`. +/// +/// The macro implement on `Pallet`, the `Callable` trait and a function `call_functions` which +/// returns the dispatchable metadatas. +/// +/// # Extra constants: `#[pallet::extra_constants]` optional +/// +/// Allow to define some extra constants to put into constant metadata. +/// +/// Item must be defined as: +/// ```ignore +/// #[pallet::extra_constants] +/// impl Pallet where $optional_where_clause { +/// /// $some_doc +/// $vis fn $fn_name() -> $some_return_type { +/// ... +/// } +/// ... +/// } +/// ``` +/// I.e. a regular rust implement block with some optional where clause and functions with 0 +/// args, 0 generics, and some return type. +/// +/// ### Macro expansion +/// +/// The macro add some extra constant to pallet constant metadata. +/// +/// # Error: `#[pallet::error]` optional +/// +/// Allow to define an error type to be return from dispatchable on error. +/// This error type informations are put into metadata. +/// +/// Item must be defined as: +/// ```ignore +/// #[pallet::error] +/// pub enum Error { +/// /// $some_optional_doc +/// $SomeFieldLessVariant, +/// ... +/// } +/// ``` +/// I.e. a regular rust enum named `Error`, with generic `T` and fieldless variants. +/// The generic `T` mustn't bound anything and where clause is not allowed. But bounds and +/// where clause shouldn't be needed for any usecase. +/// +/// ### Macro expansion +/// +/// The macro implements `Debug` trait and functions `as_u8` using variant position, and +/// `as_str` using variant doc. +/// +/// The macro implements `From>` for `&'static str`. +/// The macro implements `From>` for `DispatchError`. +/// +/// # Event: `#[pallet::event]` optional +/// +/// Allow to define pallet events, pallet events are stored in the block when they deposited +/// (and removed in next block). +/// +/// Item is defined as: +/// ```ignore +/// #[pallet::event] +/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional +/// pub enum Event<$some_generic> $optional_where_clause { +/// /// Some doc +/// $SomeName($SomeType, $YetanotherType, ...), +/// ... +/// } +/// ``` +/// I.e. an enum (with named or unnamed fields variant), named Event, with generic: none or `T` +/// or `T: Config`, and optional where clause. +/// +/// Each field must implement `Clone`, `Eq`, `PartialEq`, `Encode`, `Decode`, and `Debug` (on +/// std only). +/// For ease of use, bound the trait `Member` available in frame_support::pallet_prelude. +/// +/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generate a helper +/// function on `Pallet` to deposit event. +/// +/// NOTE: For instantiable pallet, event must be generic over T and I. +/// +/// ### Macro expansion: +/// +/// Macro will add on enum `Event` the attributes: +/// * `#[derive(frame_support::CloneNoBound)]`, +/// * `#[derive(frame_support::EqNoBound)]`, +/// * `#[derive(frame_support::PartialEqNoBound)]`, +/// * `#[derive(codec::Encode)]`, +/// * `#[derive(codec::Decode)]`, +/// * `#[derive(frame_support::RuntimeDebugNoBound)]` +/// +/// Macro implements `From>` for (). +/// +/// Macro implements metadata function on `Event` returning the `EventMetadata`. +/// +/// If `#[pallet::generate_deposit]` then macro implement `fn deposit_event` on `Pallet`. +/// +/// # Storage: `#[pallet::storage]` optional +/// +/// Allow to define some abstract storage inside runtime storage and also set its metadata. +/// This attribute can be used multiple times. +/// +/// Item is defined as: +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn $getter_name)] // optional +/// $vis type $StorageName<$some_generic> $optional_where_clause +/// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; +/// ``` +/// or with unnamed generic +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn $getter_name)] // optional +/// $vis type $StorageName<$some_generic> $optional_where_clause +/// = $StorageType<_, $some_generics, ...>; +/// ``` +/// I.e. it must be a type alias, with generics: `T` or `T: Config`, aliased type must be one +/// of `StorageValue`, `StorageMap` or `StorageDoubleMap` (defined in frame_support). +/// The generic arguments of the storage type can be given in two manner: named and unnamed. +/// For named generic argument: the name for each argument is the one as define on the storage +/// struct: +/// * [`pallet_prelude::StorageValue`] expect `Value` and optionally `QueryKind` and `OnEmpty`, +/// * [`pallet_prelude::StorageMap`] expect `Hasher`, `Key`, `Value` and optionally `QueryKind` +/// and `OnEmpty`, +/// * [`pallet_prelude::CountedStorageMap`] expect `Hasher`, `Key`, `Value` and optionally +/// `QueryKind` and `OnEmpty`, +/// * [`pallet_prelude::StorageDoubleMap`] expect `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` +/// and optionally `QueryKind` and `OnEmpty`. +/// +/// For unnamed generic argument: Their first generic must be `_` as it is replaced by the +/// macro and other generic must declared as a normal declaration of type generic in rust. +/// +/// The Prefix generic written by the macro is generated using +/// `PalletInfo::name::>()` and the name of the storage type. +/// E.g. if runtime names the pallet "MyExample" then the storage `type Foo = ...` use the +/// prefix: `Twox128(b"MyExample") ++ Twox128(b"Foo")`. +/// +/// For the `CountedStorageMap` variant, the Prefix also implements +/// `CountedStorageMapInstance`. It associate a `CounterPrefix`, which is implemented same as +/// above, but the storage prefix is prepend with `"CounterFor"`. +/// E.g. if runtime names the pallet "MyExample" then the storage +/// `type Foo = CountedStorageaMap<...>` will store its counter at the prefix: +/// `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. +/// +/// E.g: +/// ```ignore +/// #[pallet::storage] +/// pub(super) type MyStorage = StorageMap; +/// ``` +/// In this case the final prefix used by the map is +/// `Twox128(b"MyExample") ++ Twox128(b"OtherName")`. +/// +/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows to define a +/// getter function on `Pallet`. +/// +/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allow to define the storage +/// prefix to use, see how `Prefix` generic is implemented above. +/// +/// E.g: +/// ```ignore +/// #[pallet::storage] +/// #[pallet::storage_prefix = "foo"] +/// #[pallet::getter(fn my_storage)] +/// pub(super) type MyStorage = StorageMap; +/// ``` +/// or +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn my_storage)] +/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; +/// ``` +/// +/// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. +/// +/// E.g: +/// ```ignore +/// #[cfg(feature = "my-feature")] +/// #[pallet::storage] +/// pub(super) type MyStorage = StorageValue; +/// ``` +/// +/// All the `cfg` attributes are automatically copied to the items generated for the storage, +/// i.e. the getter, storage prefix, and the metadata element etc. +/// +/// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some +/// type alias then the generation of the getter might fail. In this case the getter can be +/// implemented manually. +/// +/// NOTE: The generic `Hasher` must implement the [`StorageHasher`] trait (or the type is not +/// usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the +/// storage item. Thus generic hasher is supported. +/// +/// ### Macro expansion +/// +/// For each storage item the macro generates a struct named +/// `_GeneratedPrefixForStorage$NameOfStorage`, and implements +/// [`StorageInstance`](traits::StorageInstance) on it using the pallet and storage name. It +/// then uses it as the first generic of the aliased type. +/// For `CountedStorageMap`, `CountedStorageMapInstance` is implemented, and another similar +/// struct is generated. +/// +/// For named generic, the macro will reorder the generics, and remove the names. +/// +/// The macro implements the function `storage_metadata` on `Pallet` implementing the metadata +/// for all storage items based on their kind: +/// * for a storage value, the type of the value is copied into the metadata +/// * for a storage map, the type of the values and the key's type is copied into the metadata +/// * for a storage double map, the type of the values, and the types of key1 and key2 are +/// copied into the metadata. +/// +/// # Type value: `#[pallet::type_value]` optional +/// +/// Helper to define a struct implementing `Get` trait. To ease use of storage types. +/// This attribute can be used multiple time. +/// +/// Item is defined as +/// ```ignore +/// #[pallet::type_value] +/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } +/// ``` +/// I.e.: a function definition with generics none or `T: Config` and a returned type. +/// +/// E.g.: +/// ```ignore +/// #[pallet::type_value] +/// fn MyDefault() -> T::Balance { 3.into() } +/// ``` +/// +/// NOTE: This attribute is meant to be used alongside `#[pallet::storage]` to defined some +/// specific default value in storage. +/// +/// ### Macro expansion +/// +/// Macro renames the function to some internal name, generate a struct with the original name +/// of the function and its generic, and implement `Get<$ReturnType>` by calling the user +/// defined function. +/// +/// # Genesis config: `#[pallet::genesis_config]` optional +/// +/// Allow to define the genesis configuration of the pallet. +/// +/// Item is defined as either an enum or a struct. +/// It needs to be public and implement trait GenesisBuild with `#[pallet::genesis_build]`. +/// The type generics is constrained to be either none, or `T` or `T: Config`. +/// +/// E.g: +/// ```ignore +/// #[pallet::genesis_config] +/// pub struct GenesisConfig { +/// _myfield: BalanceOf, +/// } +/// ``` +/// +/// ### Macro expansion +/// +/// Macro will add the following attribute on it: +/// * `#[cfg(feature = "std")]` +/// * `#[derive(Serialize, Deserialize)]` +/// * `#[serde(rename_all = "camelCase")]` +/// * `#[serde(deny_unknown_fields)]` +/// * `#[serde(bound(serialize = ""))]` +/// * `#[serde(bound(deserialize = ""))]` +/// +/// # Genesis build: `#[pallet::genesis_build]` optional +/// +/// Allow to define how genesis_configuration is built. +/// +/// Item is defined as +/// ```ignore +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig<$maybe_generics> { +/// fn build(&self) { $expr } +/// } +/// ``` +/// I.e. a rust trait implementation with generic `T: Config`, of trait `GenesisBuild` on +/// type `GenesisConfig` with generics none or `T`. +/// +/// E.g.: +/// ```ignore +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig { +/// fn build(&self) {} +/// } +/// ``` +/// +/// ### Macro expansion +/// +/// Macro will add the following attribute on it: +/// * `#[cfg(feature = "std")]` +/// +/// Macro will implement `sp_runtime::BuildModuleGenesisStorage` using `()` as second generic +/// for non-instantiable pallets. +/// +/// # Inherent: `#[pallet::inherent]` optional +/// +/// Allow the pallet to provide some inherent: +/// +/// Item is defined as: +/// ```ignore +/// #[pallet::inherent] +/// impl ProvideInherent for Pallet { +/// // ... regular trait implementation +/// } +/// ``` +/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type +/// `Pallet`, and some optional where clause. +/// +/// ### Macro expansion +/// +/// Macro make currently no use of this information, but it might use this information in the +/// future to give information directly to construct_runtime. +/// +/// # Validate unsigned: `#[pallet::validate_unsigned]` optional +/// +/// Allow the pallet to validate some unsigned transaction: +/// +/// Item is defined as: +/// ```ignore +/// #[pallet::validate_unsigned] +/// impl ValidateUnsigned for Pallet { +/// // ... regular trait implementation +/// } +/// ``` +/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type +/// `Pallet`, and some optional where clause. +/// +/// NOTE: There is also `sp_runtime::traits::SignedExtension` that can be used to add some +/// specific logic for transaction validation. +/// +/// ### Macro expansion +/// +/// Macro make currently no use of this information, but it might use this information in the +/// future to give information directly to construct_runtime. +/// +/// # Origin: `#[pallet::origin]` optional +/// +/// Allow to define some origin for the pallet. +/// +/// Item must be either a type alias or an enum or a struct. It needs to be public. +/// +/// E.g.: +/// ```ignore +/// #[pallet::origin] +/// pub struct Origin(PhantomData<(T)>); +/// ``` +/// +/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin +/// can be stored on-chain (e.g. in pallet-scheduler), thus any change must be done with care +/// as it might require some migration. +/// +/// NOTE: for instantiable pallet, origin must be generic over T and I. +/// +/// # General notes on instantiable pallet +/// +/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allow runtime +/// to implement multiple instance of the pallet, by using different type for the generic. +/// This is the sole purpose of the generic `I`. +/// But because `PalletInfo` requires `Pallet` placeholder to be static it is important to +/// bound `'static` whenever `PalletInfo` can be used. +/// And in order to have instantiable pallet usable as a regular pallet without instance, it is +/// important to bound `= ()` on every types. +/// +/// Thus impl bound look like `impl, I: 'static>`, and types look like +/// `SomeType` or `SomeType, I: 'static = ()>`. +/// +/// # Example for pallet without instance. +/// +/// ``` +/// pub use pallet::*; // reexport in crate namespace for `construct_runtime!` +/// +/// #[frame_support::pallet] +/// // NOTE: The name of the pallet is provided by `construct_runtime` and is used as +/// // the unique identifier for the pallet's storage. It is not defined in the pallet itself. +/// pub mod pallet { +/// use frame_support::pallet_prelude::*; // Import various types used in the pallet definition +/// use frame_system::pallet_prelude::*; // Import some system helper types. +/// +/// type BalanceOf = ::Balance; +/// +/// // Define the generic parameter of the pallet +/// // The macro parses `#[pallet::constant]` attributes and uses them to generate metadata +/// // for the pallet's constants. +/// #[pallet::config] +/// pub trait Config: frame_system::Config { +/// #[pallet::constant] // put the constant in metadata +/// type MyGetParam: Get; +/// type Balance: Parameter + From; +/// type Event: From> + IsType<::Event>; +/// } +/// +/// // Define some additional constant to put into the constant metadata. +/// #[pallet::extra_constants] +/// impl Pallet { +/// /// Some description +/// fn exra_constant_name() -> u128 { 4u128 } +/// } +/// +/// // Define the pallet struct placeholder, various pallet function are implemented on it. +/// #[pallet::pallet] +/// #[pallet::generate_store(pub(super) trait Store)] +/// pub struct Pallet(_); +/// +/// // Implement the pallet hooks. +/// #[pallet::hooks] +/// impl Hooks> for Pallet { +/// fn on_initialize(_n: BlockNumberFor) -> Weight { +/// unimplemented!(); +/// } +/// +/// // can implement also: on_finalize, on_runtime_upgrade, offchain_worker, ... +/// // see `Hooks` trait +/// } +/// +/// // Declare Call struct and implement dispatchables. +/// // +/// // WARNING: Each parameter used in functions must implement: Clone, Debug, Eq, PartialEq, +/// // Codec. +/// // +/// // The macro parses `#[pallet::compact]` attributes on function arguments and implements +/// // the `Call` encoding/decoding accordingly. +/// #[pallet::call] +/// impl Pallet { +/// /// Doc comment put in metadata +/// #[pallet::weight(0)] // Defines weight for call (function parameters are in scope) +/// pub fn toto( +/// origin: OriginFor, +/// #[pallet::compact] _foo: u32, +/// ) -> DispatchResultWithPostInfo { +/// let _ = origin; +/// unimplemented!(); +/// } +/// } +/// +/// // Declare the pallet `Error` enum (this is optional). +/// // The macro generates error metadata using the doc comment on each variant. +/// #[pallet::error] +/// pub enum Error { +/// /// doc comment put into metadata +/// InsufficientProposersBalance, +/// } +/// +/// // Declare pallet Event enum (this is optional). +/// // +/// // WARNING: Each type used in variants must implement: Clone, Debug, Eq, PartialEq, Codec. +/// // +/// // The macro generates event metadata, and derive Clone, Debug, Eq, PartialEq and Codec +/// #[pallet::event] +/// // Generate a funciton on Pallet to deposit an event. +/// #[pallet::generate_deposit(pub(super) fn deposit_event)] +/// pub enum Event { +/// /// doc comment put in metadata +/// // `::AccountId` is not defined in metadata list, the last +/// // Thus the metadata is `::AccountId`. +/// Proposed(::AccountId), +/// /// doc +/// // here metadata will be `Balance` as define in metadata list +/// Spending(BalanceOf), +/// // here metadata will be `Other` as define in metadata list +/// Something(u32), +/// } +/// +/// // Define a struct which implements `frame_support::traits::Get` (optional). +/// #[pallet::type_value] +/// pub(super) fn MyDefault() -> T::Balance { 3.into() } +/// +/// // Declare a storage item. Any amount of storage items can be declared (optional). +/// // +/// // Is expected either `StorageValue`, `StorageMap` or `StorageDoubleMap`. +/// // The macro generates the prefix type and replaces the first generic `_`. +/// // +/// // The macro expands the metadata for the storage item with the type used: +/// // * for a storage value the type of the value is copied into the metadata +/// // * for a storage map the type of the values and the type of the key is copied into the metadata +/// // * for a storage double map the types of the values and keys are copied into the +/// // metadata. +/// // +/// // NOTE: The generic `Hasher` must implement the `StorageHasher` trait (or the type is not +/// // usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the +/// // storage item. Thus generic hasher is supported. +/// #[pallet::storage] +/// pub(super) type MyStorageValue = +/// StorageValue>; +/// +/// // Another storage declaration +/// #[pallet::storage] +/// #[pallet::getter(fn my_storage)] +/// #[pallet::storage_prefix = "SomeOtherName"] +/// pub(super) type MyStorage = +/// StorageMap; +/// +/// // Declare the genesis config (optional). +/// // +/// // The macro accepts either a struct or an enum; it checks that generics are consistent. +/// // +/// // Type must implement the `Default` trait. +/// #[pallet::genesis_config] +/// #[derive(Default)] +/// pub struct GenesisConfig { +/// _myfield: u32, +/// } +/// +/// // Declare genesis builder. (This is need only if GenesisConfig is declared) +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig { +/// fn build(&self) {} +/// } +/// +/// // Declare a pallet origin (this is optional). +/// // +/// // The macro accept type alias or struct or enum, it checks generics are consistent. +/// #[pallet::origin] +/// pub struct Origin(PhantomData); +/// +/// // Declare validate_unsigned implementation (this is optional). +/// #[pallet::validate_unsigned] +/// impl ValidateUnsigned for Pallet { +/// type Call = Call; +/// fn validate_unsigned( +/// source: TransactionSource, +/// call: &Self::Call +/// ) -> TransactionValidity { +/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) +/// } +/// } +/// +/// // Declare inherent provider for pallet (this is optional). +/// #[pallet::inherent] +/// impl ProvideInherent for Pallet { +/// type Call = Call; +/// type Error = InherentError; +/// +/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; +/// +/// fn create_inherent(_data: &InherentData) -> Option { +/// unimplemented!(); +/// } +/// +/// fn is_inherent(_call: &Self::Call) -> bool { +/// unimplemented!(); +/// } +/// } +/// +/// // Regular rust code needed for implementing ProvideInherent trait +/// +/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] +/// #[cfg_attr(feature = "std", derive(codec::Decode))] +/// pub enum InherentError { +/// } +/// +/// impl sp_inherents::IsFatalError for InherentError { +/// fn is_fatal_error(&self) -> bool { +/// unimplemented!(); +/// } +/// } +/// +/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; +/// } +/// ``` +/// +/// # Example for pallet with instance. +/// +/// ``` +/// pub use pallet::*; +/// +/// #[frame_support::pallet] +/// pub mod pallet { +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// +/// type BalanceOf = >::Balance; +/// +/// #[pallet::config] +/// pub trait Config: frame_system::Config { +/// #[pallet::constant] +/// type MyGetParam: Get; +/// type Balance: Parameter + From; +/// type Event: From> + IsType<::Event>; +/// } +/// +/// #[pallet::extra_constants] +/// impl, I: 'static> Pallet { +/// /// Some description +/// fn exra_constant_name() -> u128 { 4u128 } +/// } +/// +/// #[pallet::pallet] +/// #[pallet::generate_store(pub(super) trait Store)] +/// pub struct Pallet(PhantomData<(T, I)>); +/// +/// #[pallet::hooks] +/// impl, I: 'static> Hooks> for Pallet { +/// } +/// +/// #[pallet::call] +/// impl, I: 'static> Pallet { +/// /// Doc comment put in metadata +/// #[pallet::weight(0)] +/// pub fn toto(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { +/// let _ = origin; +/// unimplemented!(); +/// } +/// } +/// +/// #[pallet::error] +/// pub enum Error { +/// /// doc comment put into metadata +/// InsufficientProposersBalance, +/// } +/// +/// #[pallet::event] +/// #[pallet::generate_deposit(pub(super) fn deposit_event)] +/// pub enum Event, I: 'static = ()> { +/// /// doc comment put in metadata +/// Proposed(::AccountId), +/// /// doc +/// Spending(BalanceOf), +/// Something(u32), +/// } +/// +/// #[pallet::type_value] +/// pub(super) fn MyDefault, I: 'static>() -> T::Balance { 3.into() } +/// +/// #[pallet::storage] +/// pub(super) type MyStorageValue, I: 'static = ()> = +/// StorageValue>; +/// +/// #[pallet::storage] +/// #[pallet::getter(fn my_storage)] +/// #[pallet::storage_prefix = "SomeOtherName"] +/// pub(super) type MyStorage = +/// StorageMap; +/// +/// #[pallet::genesis_config] +/// #[derive(Default)] +/// pub struct GenesisConfig { +/// _myfield: u32, +/// } +/// +/// #[pallet::genesis_build] +/// impl, I: 'static> GenesisBuild for GenesisConfig { +/// fn build(&self) {} +/// } +/// +/// #[pallet::origin] +/// pub struct Origin(PhantomData<(T, I)>); +/// +/// #[pallet::validate_unsigned] +/// impl, I: 'static> ValidateUnsigned for Pallet { +/// type Call = Call; +/// fn validate_unsigned( +/// source: TransactionSource, +/// call: &Self::Call +/// ) -> TransactionValidity { +/// Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) +/// } +/// } +/// +/// #[pallet::inherent] +/// impl, I: 'static> ProvideInherent for Pallet { +/// type Call = Call; +/// type Error = InherentError; +/// +/// const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; +/// +/// fn create_inherent(_data: &InherentData) -> Option { +/// unimplemented!(); +/// } +/// +/// fn is_inherent(_call: &Self::Call) -> bool { +/// unimplemented!(); +/// } +/// } +/// +/// // Regular rust code needed for implementing ProvideInherent trait +/// +/// #[derive(codec::Encode, sp_runtime::RuntimeDebug)] +/// #[cfg_attr(feature = "std", derive(codec::Decode))] +/// pub enum InherentError { +/// } +/// +/// impl sp_inherents::IsFatalError for InherentError { +/// fn is_fatal_error(&self) -> bool { +/// unimplemented!(); +/// } +/// } +/// +/// pub const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"testpall"; +/// } +/// ``` +/// +/// ## Upgrade guidelines: +/// +/// 1. Export the metadata of the pallet for later checks +/// - run your node with the pallet active +/// - query the metadata using the `state_getMetadata` RPC and curl, or use `subsee -p +/// > meta.json` +/// 2. generate the template upgrade for the pallet provided by decl_storage +/// with environment variable `PRINT_PALLET_UPGRADE`: +/// `PRINT_PALLET_UPGRADE=1 cargo check -p my_pallet` This template can be +/// used as information it contains all information for storages, genesis +/// config and genesis build. +/// 3. reorganize pallet to have trait `Config`, `decl_*` macros, `ValidateUnsigned`, +/// `ProvideInherent`, `Origin` all together in one file. Suggested order: +/// * Config, +/// * decl_module, +/// * decl_event, +/// * decl_error, +/// * decl_storage, +/// * origin, +/// * validate_unsigned, +/// * provide_inherent, +/// so far it should compile and all be correct. +/// 4. start writing the new pallet module +/// ```ignore +/// pub use pallet::*; +/// +/// #[frame_support::pallet] +/// pub mod pallet { +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// use super::*; +/// +/// #[pallet::pallet] +/// #[pallet::generate_store($visibility_of_trait_store trait Store)] +/// // NOTE: if the visibility of trait store is private but you want to make it available +/// // in super, then use `pub(super)` or `pub(crate)` to make it available in crate. +/// pub struct Pallet(_); +/// // pub struct Pallet(PhantomData); // for instantiable pallet +/// } +/// ``` +/// 5. **migrate Config**: move trait into the module with +/// * all const in decl_module to `#[pallet::constant]` +/// * add bound `IsType<::Event>` to `type Event` +/// 7. **migrate decl_module**: write: +/// ```ignore +/// #[pallet::hooks] +/// impl Hooks for Pallet { +/// } +/// ``` +/// and write inside +/// `on_initialize`, `on_finalize`, `on_runtime_upgrade`, `offchain_worker`, `integrity_test`. +/// +/// then write: +/// ```ignore +/// #[pallet::call] +/// impl Pallet { +/// } +/// ``` +/// and write inside all the calls in decl_module with a few changes in the signature: +/// - origin must now be written completely, e.g. `origin: OriginFor` +/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you +/// might +/// need to put `Ok(().into())` at the end or the function. +/// - `#[compact]` must now be written `#[pallet::compact]` +/// - `#[weight = ..]` must now be written `#[pallet::weight(..)]` +/// +/// 7. **migrate event**: +/// rewrite as a simple enum under with the attribute `#[pallet::event]`, +/// use `#[pallet::generate_deposit($vis fn deposit_event)]` to generate deposit_event, +/// 8. **migrate error**: rewrite it with attribute `#[pallet::error]`. +/// 9. **migrate storage**: +/// decl_storage provide an upgrade template (see 3.). All storages, genesis config, genesis +/// build and default implementation of genesis config can be taken from it directly. +/// +/// Otherwise here is the manual process: +/// +/// first migrate the genesis logic. write: +/// ```ignore +/// #[pallet::genesis_config] +/// struct GenesisConfig { +/// // fields of add_extra_genesis +/// } +/// impl Default for GenesisConfig { +/// // type default or default provided for fields +/// } +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig { +/// // for instantiable pallet: +/// // `impl GenesisBuild for GenesisConfig { +/// fn build() { +/// // The add_extra_genesis build logic +/// } +/// } +/// ``` +/// for each storages, if it contains config(..) then add a fields, and make its default to the +/// value in `= ..;` or the type default if none, if it contains no build then also add the +/// logic to build the value. +/// for each storages if it contains build(..) then add the logic to genesis_build. +/// +/// NOTE: in decl_storage: is executed first the individual config and build and at the end the +/// add_extra_genesis build +/// +/// Once this is done you can migrate storage individually, a few notes: +/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, +/// - for storage with `get(fn ..)` use `#[pallet::getter(fn ...)]` +/// - for storage with value being `Option<$something>` make generic `Value` being +/// `$something` +/// and generic `QueryKind` being `OptionQuery` (note: this is default). Otherwise make +/// `Value` the complete value type and `QueryKind` being `ValueQuery`. +/// - for storage with default value: `= $expr;` provide some specific OnEmpty generic. To do +/// so +/// use of `#[pallet::type_value]` to generate the wanted struct to put. +/// example: `MyStorage: u32 = 3u32` would be written: +/// ```ignore +/// #[pallet::type_value] fn MyStorageOnEmpty() -> u32 { 3u32 } +/// #[pallet::storage] +/// pub(super) type MyStorage = StorageValue<_, u32, ValueQuery, MyStorageOnEmpty>; +/// ``` +/// +/// NOTE: `decl_storage` also generates functions `assimilate_storage` and `build_storage` +/// directly on GenesisConfig, those are sometimes used in tests. In order not to break they +/// can be implemented manually, one can implement those functions by calling `GenesisBuild` +/// implementation. +/// +/// 10. **migrate origin**: move the origin to the pallet module under `#[pallet::origin]` +/// 11. **migrate validate_unsigned**: move the `ValidateUnsigned` implementation to the pallet +/// module under `#[pallet::validate_unsigned]` +/// 12. **migrate provide_inherent**: move the `ProvideInherent` implementation to the pallet +/// module under `#[pallet::inherent]` +/// 13. rename the usage of `Module` to `Pallet` inside the crate. +/// 14. migration is done, now double check migration with the checking migration guidelines. +/// +/// ## Checking upgrade guidelines: +/// +/// * compare metadata. Use [subsee](https://github.com/ascjones/subsee) to fetch the metadata +/// and do a diff of the resulting json before and after migration. This checks for: +/// * call, names, signature, docs +/// * event names, docs +/// * error names, docs +/// * storage names, hasher, prefixes, default value +/// * error , error, constant, +/// * manually check that: +/// * `Origin` is moved inside the macro under `#[pallet::origin]` if it exists +/// * `ValidateUnsigned` is moved inside the macro under `#[pallet::validate_unsigned)]` if it +/// exists +/// * `ProvideInherent` is moved inside macro under `#[pallet::inherent)]` if it exists +/// * `on_initialize`/`on_finalize`/`on_runtime_upgrade`/`offchain_worker` are moved to +/// `Hooks` +/// implementation +/// * storages with `config(..)` are converted to `GenesisConfig` field, and their default is +/// `= $expr;` if the storage have default value +/// * storages with `build($expr)` or `config(..)` are built in `GenesisBuild::build` +/// * `add_extra_genesis` fields are converted to `GenesisConfig` field with their correct +/// default if specified +/// * `add_extra_genesis` build is written into `GenesisBuild::build` +/// * storage items defined with [`pallet`] use the name of the pallet provided by +/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used the +/// `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). +/// Thus a runtime using the pallet must be careful with this change. +/// To handle this change: +/// * either ensure that the name of the pallet given to `construct_runtime!` is the same +/// as the name the pallet was giving to `decl_storage`, +/// * or do a storage migration from the old prefix used to the new prefix used. +/// +/// NOTE: The prefixes used by storage items are in the metadata. Thus, ensuring the metadata +/// hasn't changed does ensure that the `pallet_prefix`s used by the storage items haven't +/// changed. +/// +/// # Notes when macro fails to show proper error message spans: +/// +/// Rustc loses span for some macro input. Some tips to fix it: +/// * do not use inner attribute: +/// ```ignore +/// #[pallet] +/// pub mod pallet { +/// //! This inner attribute will make span fail +/// .. +/// } +/// ``` +/// * use the newest nightly possible. +pub use frame_support_procedural::pallet; diff --git a/frame/support/src/metadata.rs b/frame/support/src/metadata.rs deleted file mode 100644 index 9ae1d6ce663d5..0000000000000 --- a/frame/support/src/metadata.rs +++ /dev/null @@ -1,620 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub use frame_metadata::{ - DecodeDifferent, FnEncode, RuntimeMetadata, ModuleMetadata, RuntimeMetadataLastVersion, - DefaultByteGetter, RuntimeMetadataPrefixed, StorageEntryMetadata, StorageMetadata, - StorageEntryType, StorageEntryModifier, DefaultByte, StorageHasher, ModuleErrorMetadata, - ExtrinsicMetadata, -}; - -/// Implements the metadata support for the given runtime and all its modules. -/// -/// Example: -/// ``` -///# mod module0 { -///# pub trait Trait { -///# type Origin; -///# type BlockNumber; -///# } -///# frame_support::decl_module! { -///# pub struct Module for enum Call where origin: T::Origin {} -///# } -///# -///# frame_support::decl_storage! { -///# trait Store for Module as TestStorage {} -///# } -///# } -///# use module0 as module1; -///# use module0 as module2; -///# impl module0::Trait for Runtime { -///# type Origin = u32; -///# type BlockNumber = u32; -///# } -///# -///# type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic<(), (), (), ()>; -/// -/// struct Runtime; -/// frame_support::impl_runtime_metadata! { -/// for Runtime with modules where Extrinsic = UncheckedExtrinsic -/// module0::Module as Module0 { index 0 } with, -/// module1::Module as Module1 { index 1 } with, -/// module2::Module as Module2 { index 2 } with Storage, -/// }; -/// ``` -/// -/// In this example, just `MODULE3` implements the `Storage` trait. -#[macro_export] -macro_rules! impl_runtime_metadata { - ( - for $runtime:ident with modules where Extrinsic = $ext:ident - $( $rest:tt )* - ) => { - impl $runtime { - pub fn metadata() -> $crate::metadata::RuntimeMetadataPrefixed { - $crate::metadata::RuntimeMetadataLastVersion { - modules: $crate::__runtime_modules_to_metadata!($runtime;; $( $rest )*), - extrinsic: $crate::metadata::ExtrinsicMetadata { - version: <$ext as $crate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, - signed_extensions: < - < - $ext as $crate::sp_runtime::traits::ExtrinsicMetadata - >::SignedExtensions as $crate::sp_runtime::traits::SignedExtension - >::identifier() - .into_iter() - .map($crate::metadata::DecodeDifferent::Encode) - .collect(), - }, - }.into() - } - } - } -} - -#[macro_export] -#[doc(hidden)] -macro_rules! __runtime_modules_to_metadata { - ( - $runtime: ident; - $( $metadata:expr ),*; - $mod:ident::$module:ident $( < $instance:ident > )? as $name:ident - { index $index:tt } - $(with)+ $($kw:ident)* - , - $( $rest:tt )* - ) => { - $crate::__runtime_modules_to_metadata!( - $runtime; - $( $metadata, )* $crate::metadata::ModuleMetadata { - name: $crate::metadata::DecodeDifferent::Encode(stringify!($name)), - index: $index, - storage: $crate::__runtime_modules_to_metadata_calls_storage!( - $mod, $module $( <$instance> )?, $runtime, $(with $kw)* - ), - calls: $crate::__runtime_modules_to_metadata_calls_call!( - $mod, $module $( <$instance> )?, $runtime, $(with $kw)* - ), - event: $crate::__runtime_modules_to_metadata_calls_event!( - $mod, $module $( <$instance> )?, $runtime, $(with $kw)* - ), - constants: $crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - $mod::$module::<$runtime $(, $mod::$instance )?>::module_constants_metadata - ) - ), - errors: $crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - <$mod::$module::<$runtime $(, $mod::$instance )?> as $crate::metadata::ModuleErrorMetadata>::metadata - ) - ) - }; - $( $rest )* - ) - }; - ( - $runtime:ident; - $( $metadata:expr ),*; - ) => { - $crate::metadata::DecodeDifferent::Encode(&[ $( $metadata ),* ]) - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! __runtime_modules_to_metadata_calls_call { - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with Call - $(with $kws:ident)* - ) => { - Some($crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - $mod::$module::<$runtime $(, $mod::$instance )?>::call_functions - ) - )) - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with $_:ident - $(with $kws:ident)* - ) => { - $crate::__runtime_modules_to_metadata_calls_call! { - $mod, $module $( <$instance> )?, $runtime, $(with $kws)* - }; - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - ) => { - None - }; -} - - -#[macro_export] -#[doc(hidden)] -macro_rules! __runtime_modules_to_metadata_calls_event { - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with Event - $(with $kws:ident)* - ) => { - Some($crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - $crate::paste::expr!{ - $runtime:: [< __module_events_ $mod $(_ $instance)?>] - } - ) - )) - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with $_:ident - $(with $kws:ident)* - ) => { - $crate::__runtime_modules_to_metadata_calls_event!( $mod, $module $( <$instance> )?, $runtime, $(with $kws)* ); - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - ) => { - None - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! __runtime_modules_to_metadata_calls_storage { - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with Storage - $(with $kws:ident)* - ) => { - Some($crate::metadata::DecodeDifferent::Encode( - $crate::metadata::FnEncode( - $mod::$module::<$runtime $(, $mod::$instance )?>::storage_metadata - ) - )) - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - with $_:ident - $(with $kws:ident)* - ) => { - $crate::__runtime_modules_to_metadata_calls_storage! { - $mod, $module $( <$instance> )?, $runtime, $(with $kws)* - }; - }; - ( - $mod: ident, - $module: ident $( <$instance:ident> )?, - $runtime: ident, - ) => { - None - }; -} - - -#[cfg(test)] -// Do not complain about unused `dispatch` and `dispatch_aux`. -#[allow(dead_code)] -mod tests { - use super::*; - use frame_metadata::{ - EventMetadata, StorageEntryModifier, StorageEntryType, FunctionMetadata, StorageEntryMetadata, - ModuleMetadata, RuntimeMetadataPrefixed, DefaultByte, ModuleConstantMetadata, DefaultByteGetter, - ErrorMetadata, ExtrinsicMetadata, - }; - use codec::{Encode, Decode}; - use crate::traits::Get; - use sp_runtime::transaction_validity::TransactionValidityError; - - #[derive(Clone, Eq, Debug, PartialEq, Encode, Decode)] - struct TestExtension; - impl sp_runtime::traits::SignedExtension for TestExtension { - type AccountId = u32; - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - const IDENTIFIER: &'static str = "testextension"; - fn additional_signed(&self) -> Result { - Ok(1) - } - } - - #[derive(Clone, Eq, Debug, PartialEq, Encode, Decode)] - struct TestExtension2; - impl sp_runtime::traits::SignedExtension for TestExtension2 { - type AccountId = u32; - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - const IDENTIFIER: &'static str = "testextension2"; - fn additional_signed(&self) -> Result { - Ok(1) - } - } - - struct TestExtrinsic; - - impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic { - const VERSION: u8 = 1; - type SignedExtensions = (TestExtension, TestExtension2); - } - - mod system { - use super::*; - - pub trait Trait: 'static { - type BaseCallFilter; - const ASSOCIATED_CONST: u64 = 500; - type Origin: Into, Self::Origin>> - + From>; - type AccountId: From + Encode; - type BlockNumber: From + Encode; - type SomeValue: Get; - type PalletInfo: crate::traits::PalletInfo; - type Call; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Hi, I am a comment. - const BlockNumber: T::BlockNumber = 100.into(); - const GetType: T::AccountId = T::SomeValue::get().into(); - const ASSOCIATED_CONST: u64 = T::ASSOCIATED_CONST.into(); - } - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - - #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] - pub enum RawOrigin { - Root, - Signed(AccountId), - None, - } - - impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::None, - } - } - } - - pub type Origin = RawOrigin<::AccountId>; - } - - mod event_module { - use crate::dispatch::DispatchResult; - - pub trait Trait: super::system::Trait { - type Balance; - } - - decl_event!( - pub enum Event where ::Balance - { - /// Hi, I am a comment. - TestEvent(Balance), - } - ); - - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - #[weight = 0] - fn aux_0(_origin) -> DispatchResult { unreachable!() } - } - } - - crate::decl_error! { - pub enum Error for Module { - /// Some user input error - UserInputError, - /// Something bad happened - /// this could be due to many reasons - BadThingHappened, - } - } - } - - mod event_module2 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_event!( - pub enum Event where ::Balance - { - TestEvent(Balance), - } - ); - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - crate::decl_storage! { - trait Store for Module as TestStorage { - StorageMethod : Option; - } - add_extra_genesis { - build(|_| {}); - } - } - } - - type EventModule = event_module::Module; - type EventModule2 = event_module2::Module; - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] - pub struct TestRuntime; - - impl_outer_event! { - pub enum TestEvent for TestRuntime { - system, - event_module, - event_module2, - } - } - - impl_outer_origin! { - pub enum Origin for TestRuntime where system = system {} - } - - impl_outer_dispatch! { - pub enum Call for TestRuntime where origin: Origin { - event_module::EventModule, - event_module2::EventModule2, - } - } - - impl event_module::Trait for TestRuntime { - type Balance = u32; - } - - impl event_module2::Trait for TestRuntime { - type Origin = Origin; - type Balance = u32; - type BlockNumber = u32; - } - - crate::parameter_types! { - pub const SystemValue: u32 = 600; - } - - impl system::Trait for TestRuntime { - type BaseCallFilter = (); - type Origin = Origin; - type AccountId = u32; - type BlockNumber = u32; - type SomeValue = SystemValue; - type PalletInfo = (); - type Call = Call; - } - - impl_runtime_metadata!( - for TestRuntime with modules where Extrinsic = TestExtrinsic - system::Module as System { index 0 } with Event, - event_module::Module as Module { index 1 } with Event Call, - event_module2::Module as Module2 { index 2 } with Event Storage Call, - ); - - struct ConstantBlockNumberByteGetter; - impl DefaultByte for ConstantBlockNumberByteGetter { - fn default_byte(&self) -> Vec { - 100u32.encode() - } - } - - struct ConstantGetTypeByteGetter; - impl DefaultByte for ConstantGetTypeByteGetter { - fn default_byte(&self) -> Vec { - SystemValue::get().encode() - } - } - - struct ConstantAssociatedConstByteGetter; - impl DefaultByte for ConstantAssociatedConstByteGetter { - fn default_byte(&self) -> Vec { - ::ASSOCIATED_CONST.encode() - } - } - - #[test] - fn runtime_metadata() { - let expected_metadata: RuntimeMetadataLastVersion = RuntimeMetadataLastVersion { - modules: DecodeDifferent::Encode(&[ - ModuleMetadata { - name: DecodeDifferent::Encode("System"), - index: 0, - storage: None, - calls: None, - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("SystemEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - )), - constants: DecodeDifferent::Encode( - FnEncode(|| &[ - ModuleConstantMetadata { - name: DecodeDifferent::Encode("BlockNumber"), - ty: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode( - DefaultByteGetter(&ConstantBlockNumberByteGetter) - ), - documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]), - }, - ModuleConstantMetadata { - name: DecodeDifferent::Encode("GetType"), - ty: DecodeDifferent::Encode("T::AccountId"), - value: DecodeDifferent::Encode( - DefaultByteGetter(&ConstantGetTypeByteGetter) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - ModuleConstantMetadata { - name: DecodeDifferent::Encode("ASSOCIATED_CONST"), - ty: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode( - DefaultByteGetter(&ConstantAssociatedConstByteGetter) - ), - documentation: DecodeDifferent::Encode(&[]), - } - ]) - ), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module"), - index: 1, - storage: None, - calls: Some( - DecodeDifferent::Encode(FnEncode(|| &[ - FunctionMetadata { - name: DecodeDifferent::Encode("aux_0"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - } - ]))), - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&["Balance"]), - documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]) - } - ]) - )), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[ - ErrorMetadata { - name: DecodeDifferent::Encode("UserInputError"), - documentation: DecodeDifferent::Encode(&[" Some user input error"]), - }, - ErrorMetadata { - name: DecodeDifferent::Encode("BadThingHappened"), - documentation: DecodeDifferent::Encode(&[ - " Something bad happened", - " this could be due to many reasons", - ]), - }, - ])), - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module2"), - index: 2, - storage: Some(DecodeDifferent::Encode( - FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("TestStorage"), - entries: DecodeDifferent::Encode( - &[ - StorageEntryMetadata { - name: DecodeDifferent::Encode("StorageMethod"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter( - &event_module2::__GetByteStructStorageMethod( - std::marker::PhantomData:: - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), - } - ] - ) - }), - )), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[]))), - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&["Balance"]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - )), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - }, - ]), - extrinsic: ExtrinsicMetadata { - version: 1, - signed_extensions: vec![ - DecodeDifferent::Encode("testextension"), - DecodeDifferent::Encode("testextension2"), - ], - } - }; - - let metadata_encoded = TestRuntime::metadata().encode(); - let metadata_decoded = RuntimeMetadataPrefixed::decode(&mut &metadata_encoded[..]); - let expected_metadata: RuntimeMetadataPrefixed = expected_metadata.into(); - - pretty_assertions::assert_eq!(expected_metadata, metadata_decoded.unwrap()); - } -} diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs new file mode 100644 index 0000000000000..dc3402440fdd4 --- /dev/null +++ b/frame/support/src/migrations.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + traits::{GetStorageVersion, PalletInfoAccess}, + weights::{RuntimeDbWeight, Weight}, +}; + +/// Trait used by [`migrate_from_pallet_version_to_storage_version`] to do the actual migration. +pub trait PalletVersionToStorageVersionHelper { + fn migrate(db_weight: &RuntimeDbWeight) -> Weight; +} + +impl PalletVersionToStorageVersionHelper for T { + fn migrate(db_weight: &RuntimeDbWeight) -> Weight { + const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; + + fn pallet_version_key(name: &str) -> [u8; 32] { + crate::storage::storage_prefix(name.as_bytes(), PALLET_VERSION_STORAGE_KEY_POSTFIX) + } + + sp_io::storage::clear(&pallet_version_key(::name())); + + let version = ::current_storage_version(); + version.put::(); + + db_weight.writes(2) + } +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl PalletVersionToStorageVersionHelper for T { + fn migrate(db_weight: &RuntimeDbWeight) -> Weight { + let mut weight: Weight = 0; + + for_tuples!( #( weight = weight.saturating_add(T::migrate(db_weight)); )* ); + + weight + } +} + +/// Migrate from the `PalletVersion` struct to the new +/// [`StorageVersion`](crate::traits::StorageVersion) struct. +/// +/// This will remove all `PalletVersion's` from the state and insert the current storage version. +pub fn migrate_from_pallet_version_to_storage_version< + AllPallets: PalletVersionToStorageVersionHelper, +>( + db_weight: &RuntimeDbWeight, +) -> Weight { + AllPallets::migrate(db_weight) +} diff --git a/frame/support/src/origin.rs b/frame/support/src/origin.rs deleted file mode 100644 index b96a56c8e1d8f..0000000000000 --- a/frame/support/src/origin.rs +++ /dev/null @@ -1,533 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Macros that define an Origin type. Every function call to your runtime has an origin which -//! specifies where the extrinsic was generated from. - -/// Constructs an Origin type for a runtime. This is usually called automatically by the -/// construct_runtime macro. See also __create_decl_macro. -#[macro_export] -macro_rules! impl_outer_origin { - - // Macro transformations (to convert invocations with incomplete parameters to the canonical - // form) - ( - $(#[$attr:meta])* - pub enum $name:ident for $runtime:ident { - $( $rest_without_system:tt )* - } - ) => { - $crate::impl_outer_origin! { - $(#[$attr])* - pub enum $name for $runtime where system = frame_system { - $( $rest_without_system )* - } - } - }; - - ( - $(#[$attr:meta])* - pub enum $name:ident for $runtime:ident where - system = $system:ident - $(, system_index = $system_index:tt)? - { - $( $rest_with_system:tt )* - } - ) => { - $crate::paste::item! { - $crate::impl_outer_origin!( - $( #[$attr] )*; - $name; - [< $name Caller >]; - $runtime; - $system; - system_index { $( $system_index )? }; - Modules { $( $rest_with_system )* }; - ); - } - }; - - // Generic + Instance - ( - $(#[$attr:meta])*; - $name:ident; - $caller_name:ident; - $runtime:ident; - $system:ident; - system_index { $( $system_index:tt )? }; - Modules { - $( #[codec(index = $index:tt)] )? $module:ident $instance:ident - $(, $( $rest_module:tt )* )? - }; - $( $parsed:tt )* - ) => { - $crate::impl_outer_origin!( - $( #[$attr] )*; - $name; - $caller_name; - $runtime; - $system; - system_index { $( $system_index )? }; - Modules { $( $( $rest_module )* )? }; - $( $parsed )* $module <$runtime> { $instance } index { $( $index )? }, - ); - }; - - // Instance - ( - $(#[$attr:meta])*; - $name:ident; - $caller_name:ident; - $runtime:ident; - $system:ident; - system_index { $( $system_index:tt )? }; - Modules { - $( #[codec(index = $index:tt )] )? $module:ident $instance:ident - $(, $rest_module:tt )* - }; - $( $parsed:tt )* - ) => { - $crate::impl_outer_origin!( - $( #[$attr] )*; - $name; - $caller_name; - $runtime; - $system; - system_index { $( $system_index )? }; - Modules { $( $rest_module )* }; - $( $parsed )* $module { $instance } index { $( $index )? }, - ); - }; - - // Generic - ( - $(#[$attr:meta])*; - $name:ident; - $caller_name:ident; - $runtime:ident; - $system:ident; - system_index { $( $system_index:tt )? }; - Modules { - $( #[codec(index = $index:tt )] )? $module:ident - $(, $( $rest_module:tt )* )? - }; - $( $parsed:tt )* - ) => { - $crate::impl_outer_origin!( - $( #[$attr] )*; - $name; - $caller_name; - $runtime; - $system; - system_index { $( $system_index )? }; - Modules { $( $( $rest_module )* )? }; - $( $parsed )* $module <$runtime> index { $( $index )? }, - ); - }; - - // No Generic and no Instance - ( - $(#[$attr:meta])*; - $name:ident; - $caller_name:ident; - $runtime:ident; - $system:ident; - system_index { $( $system_index:tt )? }; - Modules { - $( #[codec(index = $index:tt )] )? $module:ident - $(, $( $rest_module:tt )* )? - }; - $( $parsed:tt )* - ) => { - $crate::impl_outer_origin!( - $( #[$attr] )*; - $name; - $caller_name; - $runtime; - $system; - system_index { $( $system_index )? }; - Modules { $( $( $rest_module )* )? }; - $( $parsed )* $module index { $( $index )? }, - ); - }; - - // The main macro expansion that actually renders the Origin enum code. - ( - $(#[$attr:meta])*; - $name:ident; - $caller_name:ident; - $runtime:ident; - $system:ident; - system_index { $( $system_index:tt )? }; - Modules { }; - $( - $module:ident - $( < $generic:ident > )? - $( { $generic_instance:ident } )? - index { $( $index:tt )? }, - )* - ) => { - // WARNING: All instance must hold the filter `frame_system::Trait::BaseCallFilter`, except - // when caller is system Root. One can use `OriginTrait::reset_filter` to do so. - #[derive(Clone)] - pub struct $name { - caller: $caller_name, - filter: $crate::sp_std::rc::Rc::Call) -> bool>>, - } - - #[cfg(not(feature = "std"))] - impl $crate::sp_std::fmt::Debug for $name { - fn fmt( - &self, - fmt: &mut $crate::sp_std::fmt::Formatter - ) -> $crate::sp_std::result::Result<(), $crate::sp_std::fmt::Error> { - fmt.write_str("") - } - } - - #[cfg(feature = "std")] - impl $crate::sp_std::fmt::Debug for $name { - fn fmt( - &self, - fmt: &mut $crate::sp_std::fmt::Formatter - ) -> $crate::sp_std::result::Result<(), $crate::sp_std::fmt::Error> { - fmt.debug_struct(stringify!($name)) - .field("caller", &self.caller) - .field("filter", &"[function ptr]") - .finish() - } - } - - impl $crate::traits::OriginTrait for $name { - type Call = <$runtime as $system::Trait>::Call; - type PalletsOrigin = $caller_name; - type AccountId = <$runtime as $system::Trait>::AccountId; - - fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static) { - let f = self.filter.clone(); - - self.filter = $crate::sp_std::rc::Rc::new(Box::new(move |call| { - f(call) && filter(call) - })); - } - - fn reset_filter(&mut self) { - let filter = < - <$runtime as $system::Trait>::BaseCallFilter - as $crate::traits::Filter<<$runtime as $system::Trait>::Call> - >::filter; - - self.filter = $crate::sp_std::rc::Rc::new(Box::new(filter)); - } - - fn set_caller_from(&mut self, other: impl Into) { - self.caller = other.into().caller - } - - fn filter_call(&self, call: &Self::Call) -> bool { - (self.filter)(call) - } - - fn caller(&self) -> &Self::PalletsOrigin { - &self.caller - } - - /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. - fn none() -> Self { - $system::RawOrigin::None.into() - } - /// Create with system root origin and no filter. - fn root() -> Self { - $system::RawOrigin::Root.into() - } - /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. - fn signed(by: <$runtime as $system::Trait>::AccountId) -> Self { - $system::RawOrigin::Signed(by).into() - } - } - - $crate::paste::item! { - #[derive(Clone, PartialEq, Eq, $crate::RuntimeDebug, $crate::codec::Encode, $crate::codec::Decode)] - $(#[$attr])* - #[allow(non_camel_case_types)] - pub enum $caller_name { - $( #[codec(index = $system_index)] )? - system($system::Origin<$runtime>), - $( - $( #[codec(index = $index)] )? - [< $module $( _ $generic_instance )? >] - ($module::Origin < $( $generic, )? $( $module::$generic_instance )? > ), - )* - #[allow(dead_code)] - Void($crate::Void) - } - } - - // For backwards compatibility and ease of accessing these functions. - #[allow(dead_code)] - impl $name { - /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. - pub fn none() -> Self { - <$name as $crate::traits::OriginTrait>::none() - } - /// Create with system root origin and no filter. - pub fn root() -> Self { - <$name as $crate::traits::OriginTrait>::root() - } - /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. - pub fn signed(by: <$runtime as $system::Trait>::AccountId) -> Self { - <$name as $crate::traits::OriginTrait>::signed(by) - } - } - - impl From<$system::Origin<$runtime>> for $caller_name { - fn from(x: $system::Origin<$runtime>) -> Self { - $caller_name::system(x) - } - } - impl From<$system::Origin<$runtime>> for $name { - /// Convert to runtime origin: - /// * root origin is built with no filter - /// * others use `frame-system::Trait::BaseCallFilter` - fn from(x: $system::Origin<$runtime>) -> Self { - let o: $caller_name = x.into(); - o.into() - } - } - - impl From<$caller_name> for $name { - fn from(x: $caller_name) -> Self { - let mut o = $name { - caller: x, - filter: $crate::sp_std::rc::Rc::new(Box::new(|_| true)), - }; - - // Root has no filter - if !matches!(o.caller, $caller_name::system($system::Origin::<$runtime>::Root)) { - $crate::traits::OriginTrait::reset_filter(&mut o); - } - - o - } - } - - impl Into<$crate::sp_std::result::Result<$system::Origin<$runtime>, $name>> for $name { - /// NOTE: converting to pallet origin loses the origin filter information. - fn into(self) -> $crate::sp_std::result::Result<$system::Origin<$runtime>, Self> { - if let $caller_name::system(l) = self.caller { - Ok(l) - } else { - Err(self) - } - } - } - impl From::AccountId>> for $name { - /// Convert to runtime origin with caller being system signed or none and use filter - /// `frame-system::Trait::BaseCallFilter`. - fn from(x: Option<<$runtime as $system::Trait>::AccountId>) -> Self { - <$system::Origin<$runtime>>::from(x).into() - } - } - - $( - $crate::paste::item! { - impl From<$module::Origin < $( $generic )? $(, $module::$generic_instance )? > > for $caller_name { - fn from(x: $module::Origin < $( $generic )? $(, $module::$generic_instance )? >) -> Self { - $caller_name::[< $module $( _ $generic_instance )? >](x) - } - } - - impl From<$module::Origin < $( $generic )? $(, $module::$generic_instance )? > > for $name { - /// Convert to runtime origin using `frame-system::Trait::BaseCallFilter`. - fn from(x: $module::Origin < $( $generic )? $(, $module::$generic_instance )? >) -> Self { - let x: $caller_name = x.into(); - x.into() - } - } - impl Into< - $crate::sp_std::result::Result< - $module::Origin < $( $generic )? $(, $module::$generic_instance )? >, - $name, - >> - for $name { - /// NOTE: converting to pallet origin loses the origin filter information. - fn into(self) -> $crate::sp_std::result::Result< - $module::Origin < $( $generic )? $(, $module::$generic_instance )? >, - Self, - > { - if let $caller_name::[< $module $( _ $generic_instance )? >](l) = self.caller { - Ok(l) - } else { - Err(self) - } - } - } - } - )* - } -} - -#[cfg(test)] -mod tests { - use codec::{Encode, Decode}; - use crate::traits::{Filter, OriginTrait}; - mod frame_system { - use super::*; - - pub trait Trait { - type AccountId; - type Call; - type BaseCallFilter; - } - - #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] - pub enum RawOrigin { - Root, - Signed(AccountId), - None, - } - - impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::None, - } - } - } - - pub type Origin = RawOrigin<::AccountId>; - } - - mod origin_without_generic { - use super::*; - - #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] - pub struct Origin; - } - - mod origin_with_generic { - use super::*; - - #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] - pub struct Origin { - t: T - } - } - - #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] - pub struct TestRuntime; - - pub struct BaseCallFilter; - impl Filter for BaseCallFilter { - fn filter(c: &u32) -> bool { - *c % 2 == 0 - } - } - - impl frame_system::Trait for TestRuntime { - type AccountId = u32; - type Call = u32; - type BaseCallFilter = BaseCallFilter; - } - - impl_outer_origin!( - pub enum OriginWithoutSystem for TestRuntime { - origin_without_generic, - origin_with_generic, - } - ); - - impl_outer_origin!( - pub enum OriginWithoutSystem2 for TestRuntime { - origin_with_generic, - origin_without_generic - } - ); - - impl_outer_origin!( - pub enum OriginWithSystem for TestRuntime where system = frame_system { - origin_without_generic, - origin_with_generic - } - ); - - impl_outer_origin!( - pub enum OriginWithSystem2 for TestRuntime where system = frame_system { - origin_with_generic, - origin_without_generic, - } - ); - - impl_outer_origin!( - pub enum OriginEmpty for TestRuntime where system = frame_system {} - ); - - impl_outer_origin!( - pub enum OriginIndices for TestRuntime where system = frame_system, system_index = "11" { - origin_with_generic, - #[codec(index = "10")] origin_without_generic, - } - ); - - #[test] - fn test_default_filter() { - assert_eq!(OriginWithSystem::root().filter_call(&0), true); - assert_eq!(OriginWithSystem::root().filter_call(&1), true); - assert_eq!(OriginWithSystem::none().filter_call(&0), true); - assert_eq!(OriginWithSystem::none().filter_call(&1), false); - assert_eq!(OriginWithSystem::signed(0).filter_call(&0), true); - assert_eq!(OriginWithSystem::signed(0).filter_call(&1), false); - assert_eq!(OriginWithSystem::from(Some(0)).filter_call(&0), true); - assert_eq!(OriginWithSystem::from(Some(0)).filter_call(&1), false); - assert_eq!(OriginWithSystem::from(None).filter_call(&0), true); - assert_eq!(OriginWithSystem::from(None).filter_call(&1), false); - assert_eq!(OriginWithSystem::from(origin_without_generic::Origin).filter_call(&0), true); - assert_eq!(OriginWithSystem::from(origin_without_generic::Origin).filter_call(&1), false); - - let mut origin = OriginWithSystem::from(Some(0)); - - origin.add_filter(|c| *c % 2 == 1); - assert_eq!(origin.filter_call(&0), false); - assert_eq!(origin.filter_call(&1), false); - - origin.set_caller_from(OriginWithSystem::root()); - assert!(matches!(origin.caller, OriginWithSystemCaller::system(frame_system::RawOrigin::Root))); - assert_eq!(origin.filter_call(&0), false); - assert_eq!(origin.filter_call(&1), false); - - origin.reset_filter(); - assert_eq!(origin.filter_call(&0), true); - assert_eq!(origin.filter_call(&1), false); - } - - #[test] - fn test_codec() { - use codec::Encode; - assert_eq!(OriginIndices::root().caller.encode()[0], 11); - let without_generic_variant = OriginIndicesCaller::origin_without_generic( - origin_without_generic::Origin - ); - assert_eq!(without_generic_variant.encode()[0], 10); - - assert_eq!(OriginWithoutSystem::root().caller.encode()[0], 0); - let without_generic_variant = OriginWithoutSystemCaller::origin_without_generic( - origin_without_generic::Origin - ); - assert_eq!(without_generic_variant.encode()[0], 1); - } -} diff --git a/frame/support/src/storage/bounded_btree_map.rs b/frame/support/src/storage/bounded_btree_map.rs new file mode 100644 index 0000000000000..d0c0aa7c4f155 --- /dev/null +++ b/frame/support/src/storage/bounded_btree_map.rs @@ -0,0 +1,460 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support a bounded BTreeMap. + +use crate::{storage::StorageDecodeLength, traits::Get}; +use codec::{Decode, Encode, MaxEncodedLen}; +use sp_std::{ + borrow::Borrow, collections::btree_map::BTreeMap, convert::TryFrom, fmt, marker::PhantomData, + ops::Deref, +}; + +/// A bounded map based on a B-Tree. +/// +/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing +/// the amount of work performed in a search. See [`BTreeMap`] for more details. +/// +/// Unlike a standard `BTreeMap`, there is an enforced upper limit to the number of items in the +/// map. All internal operations ensure this bound is respected. +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct BoundedBTreeMap(BTreeMap, PhantomData); + +impl Decode for BoundedBTreeMap +where + K: Decode + Ord, + V: Decode, + S: Get, +{ + fn decode(input: &mut I) -> Result { + let inner = BTreeMap::::decode(input)?; + if inner.len() > S::get() as usize { + return Err("BoundedBTreeMap exceeds its limit".into()) + } + Ok(Self(inner, PhantomData)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + BTreeMap::::skip(input) + } +} + +impl BoundedBTreeMap +where + S: Get, +{ + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } +} + +impl BoundedBTreeMap +where + K: Ord, + S: Get, +{ + /// Create a new `BoundedBTreeMap`. + /// + /// Does not allocate. + pub fn new() -> Self { + BoundedBTreeMap(BTreeMap::new(), PhantomData) + } + + /// Consume self, and return the inner `BTreeMap`. + /// + /// This is useful when a mutating API of the inner type is desired, and closure-based mutation + /// such as provided by [`try_mutate`][Self::try_mutate] is inconvenient. + pub fn into_inner(self) -> BTreeMap { + debug_assert!(self.0.len() <= Self::bound()); + self.0 + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut BTreeMap)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + // Clears the map, removing all elements. + pub fn clear(&mut self) { + self.0.clear() + } + + /// Return a mutable reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, but the ordering on the borrowed + /// form _must_ match the ordering on the key type. + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.0.get_mut(key) + } + + /// Exactly the same semantics as [`BTreeMap::insert`], but returns an `Err` (and is a noop) if + /// the new length of the map exceeds `S`. + /// + /// In the `Err` case, returns the inserted pair so it can be further used without cloning. + pub fn try_insert(&mut self, key: K, value: V) -> Result, (K, V)> { + if self.len() < Self::bound() || self.0.contains_key(&key) { + Ok(self.0.insert(key, value)) + } else { + Err((key, value)) + } + } + + /// Remove a key from the map, returning the value at the key if the key was previously in the + /// map. + /// + /// The key may be any borrowed form of the map's key type, but the ordering on the borrowed + /// form _must_ match the ordering on the key type. + pub fn remove(&mut self, key: &Q) -> Option + where + K: Borrow, + Q: Ord + ?Sized, + { + self.0.remove(key) + } + + /// Remove a key from the map, returning the value at the key if the key was previously in the + /// map. + /// + /// The key may be any borrowed form of the map's key type, but the ordering on the borrowed + /// form _must_ match the ordering on the key type. + pub fn remove_entry(&mut self, key: &Q) -> Option<(K, V)> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.0.remove_entry(key) + } +} + +impl Default for BoundedBTreeMap +where + K: Ord, + S: Get, +{ + fn default() -> Self { + Self::new() + } +} + +impl Clone for BoundedBTreeMap +where + BTreeMap: Clone, +{ + fn clone(&self) -> Self { + BoundedBTreeMap(self.0.clone(), PhantomData) + } +} + +#[cfg(feature = "std")] +impl fmt::Debug for BoundedBTreeMap +where + BTreeMap: fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("BoundedBTreeMap").field(&self.0).field(&Self::bound()).finish() + } +} + +impl PartialEq for BoundedBTreeMap +where + BTreeMap: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl Eq for BoundedBTreeMap where BTreeMap: Eq {} + +impl PartialEq> for BoundedBTreeMap +where + BTreeMap: PartialEq, +{ + fn eq(&self, other: &BTreeMap) -> bool { + self.0 == *other + } +} + +impl PartialOrd for BoundedBTreeMap +where + BTreeMap: PartialOrd, +{ + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for BoundedBTreeMap +where + BTreeMap: Ord, +{ + fn cmp(&self, other: &Self) -> sp_std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl IntoIterator for BoundedBTreeMap { + type Item = (K, V); + type IntoIter = sp_std::collections::btree_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl MaxEncodedLen for BoundedBTreeMap +where + K: MaxEncodedLen, + V: MaxEncodedLen, + S: Get, +{ + fn max_encoded_len() -> usize { + Self::bound() + .saturating_mul(K::max_encoded_len().saturating_add(V::max_encoded_len())) + .saturating_add(codec::Compact(S::get()).encoded_size()) + } +} + +impl Deref for BoundedBTreeMap +where + K: Ord, +{ + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef> for BoundedBTreeMap +where + K: Ord, +{ + fn as_ref(&self) -> &BTreeMap { + &self.0 + } +} + +impl From> for BTreeMap +where + K: Ord, +{ + fn from(map: BoundedBTreeMap) -> Self { + map.0 + } +} + +impl TryFrom> for BoundedBTreeMap +where + K: Ord, + S: Get, +{ + type Error = (); + + fn try_from(value: BTreeMap) -> Result { + (value.len() <= Self::bound()) + .then(move || BoundedBTreeMap(value, PhantomData)) + .ok_or(()) + } +} + +impl codec::DecodeLength for BoundedBTreeMap { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedBTreeMap` is stored just a `BTreeMap`, which is stored as a + // `Compact` with its length followed by an iteration of its items. We can just use + // the underlying implementation. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl StorageDecodeLength for BoundedBTreeMap {} + +impl codec::EncodeLike> for BoundedBTreeMap where + BTreeMap: Encode +{ +} + +#[cfg(test)] +pub mod test { + use super::*; + use crate::Twox128; + use sp_io::TestExternalities; + use sp_std::convert::TryInto; + + crate::parameter_types! { + pub const Seven: u32 = 7; + pub const Four: u32 = 4; + } + + crate::generate_storage_alias! { Prefix, Foo => Value> } + crate::generate_storage_alias! { Prefix, FooMap => Map<(u32, Twox128), BoundedBTreeMap> } + crate::generate_storage_alias! { + Prefix, + FooDoubleMap => DoubleMap<(u32, Twox128), (u32, Twox128), BoundedBTreeMap> + } + + fn map_from_keys(keys: &[K]) -> BTreeMap + where + K: Ord + Copy, + { + keys.iter().copied().zip(std::iter::repeat(())).collect() + } + + fn boundedmap_from_keys(keys: &[K]) -> BoundedBTreeMap + where + K: Ord + Copy, + S: Get, + { + map_from_keys(keys).try_into().unwrap() + } + + #[test] + fn decode_len_works() { + TestExternalities::default().execute_with(|| { + let bounded = boundedmap_from_keys::(&[1, 2, 3]); + Foo::put(bounded); + assert_eq!(Foo::decode_len().unwrap(), 3); + }); + + TestExternalities::default().execute_with(|| { + let bounded = boundedmap_from_keys::(&[1, 2, 3]); + FooMap::insert(1, bounded); + assert_eq!(FooMap::decode_len(1).unwrap(), 3); + assert!(FooMap::decode_len(0).is_none()); + assert!(FooMap::decode_len(2).is_none()); + }); + + TestExternalities::default().execute_with(|| { + let bounded = boundedmap_from_keys::(&[1, 2, 3]); + FooDoubleMap::insert(1, 1, bounded); + assert_eq!(FooDoubleMap::decode_len(1, 1).unwrap(), 3); + assert!(FooDoubleMap::decode_len(2, 1).is_none()); + assert!(FooDoubleMap::decode_len(1, 2).is_none()); + assert!(FooDoubleMap::decode_len(2, 2).is_none()); + }); + } + + #[test] + fn try_insert_works() { + let mut bounded = boundedmap_from_keys::(&[1, 2, 3]); + bounded.try_insert(0, ()).unwrap(); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.try_insert(9, ()).is_err()); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + } + + #[test] + fn deref_coercion_works() { + let bounded = boundedmap_from_keys::(&[1, 2, 3]); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded = boundedmap_from_keys::(&[1, 2, 3, 4, 5, 6]); + let bounded = bounded + .try_mutate(|v| { + v.insert(7, ()); + }) + .unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded + .try_mutate(|v| { + v.insert(8, ()); + }) + .is_none()); + } + + #[test] + fn btree_map_eq_works() { + let bounded = boundedmap_from_keys::(&[1, 2, 3, 4, 5, 6]); + assert_eq!(bounded, map_from_keys(&[1, 2, 3, 4, 5, 6])); + } + + #[test] + fn too_big_fail_to_decode() { + let v: Vec<(u32, u32)> = vec![(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]; + assert_eq!( + BoundedBTreeMap::::decode(&mut &v.encode()[..]), + Err("BoundedBTreeMap exceeds its limit".into()), + ); + } + + #[test] + fn unequal_eq_impl_insert_works() { + // given a struct with a strange notion of equality + #[derive(Debug)] + struct Unequal(u32, bool); + + impl PartialEq for Unequal { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + impl Eq for Unequal {} + + impl Ord for Unequal { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } + } + + impl PartialOrd for Unequal { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + let mut map = BoundedBTreeMap::::new(); + + // when the set is full + + for i in 0..4 { + map.try_insert(Unequal(i, false), i).unwrap(); + } + + // can't insert a new distinct member + map.try_insert(Unequal(5, false), 5).unwrap_err(); + + // but _can_ insert a distinct member which compares equal, though per the documentation, + // neither the set length nor the actual member are changed, but the value is + map.try_insert(Unequal(0, true), 6).unwrap(); + assert_eq!(map.len(), 4); + let (zero_key, zero_value) = map.get_key_value(&Unequal(0, true)).unwrap(); + assert_eq!(zero_key.0, 0); + assert_eq!(zero_key.1, false); + assert_eq!(*zero_value, 6); + } +} diff --git a/frame/support/src/storage/bounded_btree_set.rs b/frame/support/src/storage/bounded_btree_set.rs new file mode 100644 index 0000000000000..182884e655dd2 --- /dev/null +++ b/frame/support/src/storage/bounded_btree_set.rs @@ -0,0 +1,439 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support a bounded `BTreeSet`. + +use crate::{storage::StorageDecodeLength, traits::Get}; +use codec::{Decode, Encode, MaxEncodedLen}; +use sp_std::{ + borrow::Borrow, collections::btree_set::BTreeSet, convert::TryFrom, fmt, marker::PhantomData, + ops::Deref, +}; + +/// A bounded set based on a B-Tree. +/// +/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing +/// the amount of work performed in a search. See [`BTreeSet`] for more details. +/// +/// Unlike a standard `BTreeSet`, there is an enforced upper limit to the number of items in the +/// set. All internal operations ensure this bound is respected. +#[derive(Encode)] +pub struct BoundedBTreeSet(BTreeSet, PhantomData); + +impl Decode for BoundedBTreeSet +where + T: Decode + Ord, + S: Get, +{ + fn decode(input: &mut I) -> Result { + let inner = BTreeSet::::decode(input)?; + if inner.len() > S::get() as usize { + return Err("BoundedBTreeSet exceeds its limit".into()) + } + Ok(Self(inner, PhantomData)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + BTreeSet::::skip(input) + } +} + +impl BoundedBTreeSet +where + S: Get, +{ + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } +} + +impl BoundedBTreeSet +where + T: Ord, + S: Get, +{ + /// Create a new `BoundedBTreeSet`. + /// + /// Does not allocate. + pub fn new() -> Self { + BoundedBTreeSet(BTreeSet::new(), PhantomData) + } + + /// Consume self, and return the inner `BTreeSet`. + /// + /// This is useful when a mutating API of the inner type is desired, and closure-based mutation + /// such as provided by [`try_mutate`][Self::try_mutate] is inconvenient. + pub fn into_inner(self) -> BTreeSet { + debug_assert!(self.0.len() <= Self::bound()); + self.0 + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut BTreeSet)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + // Clears the set, removing all elements. + pub fn clear(&mut self) { + self.0.clear() + } + + /// Exactly the same semantics as [`BTreeSet::insert`], but returns an `Err` (and is a noop) if + /// the new length of the set exceeds `S`. + /// + /// In the `Err` case, returns the inserted item so it can be further used without cloning. + pub fn try_insert(&mut self, item: T) -> Result { + if self.len() < Self::bound() || self.0.contains(&item) { + Ok(self.0.insert(item)) + } else { + Err(item) + } + } + + /// Remove an item from the set, returning whether it was previously in the set. + /// + /// The item may be any borrowed form of the set's item type, but the ordering on the borrowed + /// form _must_ match the ordering on the item type. + pub fn remove(&mut self, item: &Q) -> bool + where + T: Borrow, + Q: Ord + ?Sized, + { + self.0.remove(item) + } + + /// Removes and returns the value in the set, if any, that is equal to the given one. + /// + /// The value may be any borrowed form of the set's value type, but the ordering on the borrowed + /// form _must_ match the ordering on the value type. + pub fn take(&mut self, value: &Q) -> Option + where + T: Borrow + Ord, + Q: Ord + ?Sized, + { + self.0.take(value) + } +} + +impl Default for BoundedBTreeSet +where + T: Ord, + S: Get, +{ + fn default() -> Self { + Self::new() + } +} + +impl Clone for BoundedBTreeSet +where + BTreeSet: Clone, +{ + fn clone(&self) -> Self { + BoundedBTreeSet(self.0.clone(), PhantomData) + } +} + +#[cfg(feature = "std")] +impl fmt::Debug for BoundedBTreeSet +where + BTreeSet: fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("BoundedBTreeSet").field(&self.0).field(&Self::bound()).finish() + } +} + +impl PartialEq for BoundedBTreeSet +where + BTreeSet: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl Eq for BoundedBTreeSet where BTreeSet: Eq {} + +impl PartialEq> for BoundedBTreeSet +where + BTreeSet: PartialEq, +{ + fn eq(&self, other: &BTreeSet) -> bool { + self.0 == *other + } +} + +impl PartialOrd for BoundedBTreeSet +where + BTreeSet: PartialOrd, +{ + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for BoundedBTreeSet +where + BTreeSet: Ord, +{ + fn cmp(&self, other: &Self) -> sp_std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl IntoIterator for BoundedBTreeSet { + type Item = T; + type IntoIter = sp_std::collections::btree_set::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl MaxEncodedLen for BoundedBTreeSet +where + T: MaxEncodedLen, + S: Get, +{ + fn max_encoded_len() -> usize { + Self::bound() + .saturating_mul(T::max_encoded_len()) + .saturating_add(codec::Compact(S::get()).encoded_size()) + } +} + +impl Deref for BoundedBTreeSet +where + T: Ord, +{ + type Target = BTreeSet; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef> for BoundedBTreeSet +where + T: Ord, +{ + fn as_ref(&self) -> &BTreeSet { + &self.0 + } +} + +impl From> for BTreeSet +where + T: Ord, +{ + fn from(set: BoundedBTreeSet) -> Self { + set.0 + } +} + +impl TryFrom> for BoundedBTreeSet +where + T: Ord, + S: Get, +{ + type Error = (); + + fn try_from(value: BTreeSet) -> Result { + (value.len() <= Self::bound()) + .then(move || BoundedBTreeSet(value, PhantomData)) + .ok_or(()) + } +} + +impl codec::DecodeLength for BoundedBTreeSet { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedBTreeSet` is stored just a `BTreeSet`, which is stored as a + // `Compact` with its length followed by an iteration of its items. We can just use + // the underlying implementation. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl StorageDecodeLength for BoundedBTreeSet {} + +impl codec::EncodeLike> for BoundedBTreeSet where BTreeSet: Encode {} + +#[cfg(test)] +pub mod test { + use super::*; + use crate::Twox128; + use sp_io::TestExternalities; + use sp_std::convert::TryInto; + + crate::parameter_types! { + pub const Seven: u32 = 7; + pub const Four: u32 = 4; + } + + crate::generate_storage_alias! { Prefix, Foo => Value> } + crate::generate_storage_alias! { Prefix, FooMap => Map<(u32, Twox128), BoundedBTreeSet> } + crate::generate_storage_alias! { + Prefix, + FooDoubleMap => DoubleMap<(u32, Twox128), (u32, Twox128), BoundedBTreeSet> + } + + fn map_from_keys(keys: &[T]) -> BTreeSet + where + T: Ord + Copy, + { + keys.iter().copied().collect() + } + + fn boundedmap_from_keys(keys: &[T]) -> BoundedBTreeSet + where + T: Ord + Copy, + S: Get, + { + map_from_keys(keys).try_into().unwrap() + } + + #[test] + fn decode_len_works() { + TestExternalities::default().execute_with(|| { + let bounded = boundedmap_from_keys::(&[1, 2, 3]); + Foo::put(bounded); + assert_eq!(Foo::decode_len().unwrap(), 3); + }); + + TestExternalities::default().execute_with(|| { + let bounded = boundedmap_from_keys::(&[1, 2, 3]); + FooMap::insert(1, bounded); + assert_eq!(FooMap::decode_len(1).unwrap(), 3); + assert!(FooMap::decode_len(0).is_none()); + assert!(FooMap::decode_len(2).is_none()); + }); + + TestExternalities::default().execute_with(|| { + let bounded = boundedmap_from_keys::(&[1, 2, 3]); + FooDoubleMap::insert(1, 1, bounded); + assert_eq!(FooDoubleMap::decode_len(1, 1).unwrap(), 3); + assert!(FooDoubleMap::decode_len(2, 1).is_none()); + assert!(FooDoubleMap::decode_len(1, 2).is_none()); + assert!(FooDoubleMap::decode_len(2, 2).is_none()); + }); + } + + #[test] + fn try_insert_works() { + let mut bounded = boundedmap_from_keys::(&[1, 2, 3]); + bounded.try_insert(0).unwrap(); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.try_insert(9).is_err()); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + } + + #[test] + fn deref_coercion_works() { + let bounded = boundedmap_from_keys::(&[1, 2, 3]); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded = boundedmap_from_keys::(&[1, 2, 3, 4, 5, 6]); + let bounded = bounded + .try_mutate(|v| { + v.insert(7); + }) + .unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded + .try_mutate(|v| { + v.insert(8); + }) + .is_none()); + } + + #[test] + fn btree_map_eq_works() { + let bounded = boundedmap_from_keys::(&[1, 2, 3, 4, 5, 6]); + assert_eq!(bounded, map_from_keys(&[1, 2, 3, 4, 5, 6])); + } + + #[test] + fn too_big_fail_to_decode() { + let v: Vec = vec![1, 2, 3, 4, 5]; + assert_eq!( + BoundedBTreeSet::::decode(&mut &v.encode()[..]), + Err("BoundedBTreeSet exceeds its limit".into()), + ); + } + + #[test] + fn unequal_eq_impl_insert_works() { + // given a struct with a strange notion of equality + #[derive(Debug)] + struct Unequal(u32, bool); + + impl PartialEq for Unequal { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + impl Eq for Unequal {} + + impl Ord for Unequal { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } + } + + impl PartialOrd for Unequal { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + let mut set = BoundedBTreeSet::::new(); + + // when the set is full + + for i in 0..4 { + set.try_insert(Unequal(i, false)).unwrap(); + } + + // can't insert a new distinct member + set.try_insert(Unequal(5, false)).unwrap_err(); + + // but _can_ insert a distinct member which compares equal, though per the documentation, + // neither the set length nor the actual member are changed + set.try_insert(Unequal(0, true)).unwrap(); + assert_eq!(set.len(), 4); + let zero_item = set.get(&Unequal(0, true)).unwrap(); + assert_eq!(zero_item.0, 0); + assert_eq!(zero_item.1, false); + } +} diff --git a/frame/support/src/storage/bounded_vec.rs b/frame/support/src/storage/bounded_vec.rs new file mode 100644 index 0000000000000..b45c294f8d4a4 --- /dev/null +++ b/frame/support/src/storage/bounded_vec.rs @@ -0,0 +1,460 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support putting a bounded vector into storage, as a raw value, map +//! or a double map. + +use crate::{ + storage::{StorageDecodeLength, StorageTryAppend}, + traits::Get, + WeakBoundedVec, +}; +use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; +use core::{ + ops::{Deref, Index, IndexMut}, + slice::SliceIndex, +}; +use sp_std::{convert::TryFrom, fmt, marker::PhantomData, prelude::*}; + +/// A bounded vector. +/// +/// It has implementations for efficient append and length decoding, as with a normal `Vec<_>`, once +/// put into storage as a raw value, map or double-map. +/// +/// As the name suggests, the length of the queue is always bounded. All internal operations ensure +/// this bound is respected. +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct BoundedVec(Vec, PhantomData); + +/// A bounded slice. +/// +/// Similar to a `BoundedVec`, but not owned and cannot be decoded. +#[derive(Encode)] +pub struct BoundedSlice<'a, T, S>(&'a [T], PhantomData); + +// `BoundedSlice`s encode to something which will always decode into a `BoundedVec`, +// `WeakBoundedVec`, or a `Vec`. +impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} +impl<'a, T: Encode + Decode, S: Get> EncodeLike> + for BoundedSlice<'a, T, S> +{ +} +impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} + +impl<'a, T, S: Get> TryFrom<&'a [T]> for BoundedSlice<'a, T, S> { + type Error = (); + fn try_from(t: &'a [T]) -> Result { + if t.len() < S::get() as usize { + Ok(BoundedSlice(t, PhantomData)) + } else { + Err(()) + } + } +} + +impl<'a, T, S> From> for &'a [T] { + fn from(t: BoundedSlice<'a, T, S>) -> Self { + t.0 + } +} + +impl> Decode for BoundedVec { + fn decode(input: &mut I) -> Result { + let inner = Vec::::decode(input)?; + if inner.len() > S::get() as usize { + return Err("BoundedVec exceeds its limit".into()) + } + Ok(Self(inner, PhantomData)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + Vec::::skip(input) + } +} + +// `BoundedVec`s encode to something which will always decode as a `Vec`. +impl> EncodeLike> for BoundedVec {} + +impl BoundedVec { + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: Vec) -> Self { + Self(t, Default::default()) + } + + /// Consume self, and return the inner `Vec`. Henceforth, the `Vec<_>` can be altered in an + /// arbitrary way. At some point, if the reverse conversion is required, `TryFrom>` can + /// be used. + /// + /// This is useful for cases if you need access to an internal API of the inner `Vec<_>` which + /// is not provided by the wrapper `BoundedVec`. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Exactly the same semantics as [`Vec::remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn remove(&mut self, index: usize) -> T { + self.0.remove(index) + } + + /// Exactly the same semantics as [`Vec::swap_remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn swap_remove(&mut self, index: usize) -> T { + self.0.swap_remove(index) + } + + /// Exactly the same semantics as [`Vec::retain`]. + pub fn retain bool>(&mut self, f: F) { + self.0.retain(f) + } + + /// Exactly the same semantics as [`Vec::get_mut`]. + pub fn get_mut>( + &mut self, + index: I, + ) -> Option<&mut >::Output> { + self.0.get_mut(index) + } +} + +impl> From> for Vec { + fn from(x: BoundedVec) -> Vec { + x.0 + } +} + +impl> BoundedVec { + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut Vec)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Exactly the same semantics as [`Vec::insert`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if `index > len`. + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.insert(index, element); + Ok(()) + } else { + Err(()) + } + } + + /// Exactly the same semantics as [`Vec::push`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds isize::MAX bytes. + pub fn try_push(&mut self, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.push(element); + Ok(()) + } else { + Err(()) + } + } +} + +impl Default for BoundedVec { + fn default() -> Self { + // the bound cannot be below 0, which is satisfied by an empty vector + Self::unchecked_from(Vec::default()) + } +} + +#[cfg(feature = "std")] +impl fmt::Debug for BoundedVec +where + T: fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("BoundedVec").field(&self.0).field(&Self::bound()).finish() + } +} + +impl Clone for BoundedVec +where + T: Clone, +{ + fn clone(&self) -> Self { + // bound is retained + Self::unchecked_from(self.0.clone()) + } +} + +impl> TryFrom> for BoundedVec { + type Error = (); + fn try_from(t: Vec) -> Result { + if t.len() <= Self::bound() { + // explicit check just above + Ok(Self::unchecked_from(t)) + } else { + Err(()) + } + } +} + +// It is okay to give a non-mutable reference of the inner vec to anyone. +impl AsRef> for BoundedVec { + fn as_ref(&self) -> &Vec { + &self.0 + } +} + +impl AsRef<[T]> for BoundedVec { + fn as_ref(&self) -> &[T] { + &self.0 + } +} + +impl AsMut<[T]> for BoundedVec { + fn as_mut(&mut self) -> &mut [T] { + &mut self.0 + } +} + +// will allow for immutable all operations of `Vec` on `BoundedVec`. +impl Deref for BoundedVec { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +// Allows for indexing similar to a normal `Vec`. Can panic if out of bound. +impl Index for BoundedVec +where + I: SliceIndex<[T]>, +{ + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + self.0.index(index) + } +} + +impl IndexMut for BoundedVec +where + I: SliceIndex<[T]>, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + self.0.index_mut(index) + } +} + +impl sp_std::iter::IntoIterator for BoundedVec { + type Item = T; + type IntoIter = sp_std::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl codec::DecodeLength for BoundedVec { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedVec` stored just a `Vec`, thus the length is at the beginning in + // `Compact` form, and same implementation as `Vec` can be used. + as codec::DecodeLength>::len(self_encoded) + } +} + +// NOTE: we could also implement this as: +// impl, S2: Get> PartialEq> for BoundedVec +// to allow comparison of bounded vectors with different bounds. +impl PartialEq for BoundedVec +where + T: PartialEq, +{ + fn eq(&self, rhs: &Self) -> bool { + self.0 == rhs.0 + } +} + +impl> PartialEq> for BoundedVec { + fn eq(&self, other: &Vec) -> bool { + &self.0 == other + } +} + +impl Eq for BoundedVec where T: Eq {} + +impl StorageDecodeLength for BoundedVec {} + +impl> StorageTryAppend for BoundedVec { + fn bound() -> usize { + S::get() as usize + } +} + +impl MaxEncodedLen for BoundedVec +where + T: MaxEncodedLen, + S: Get, + BoundedVec: Encode, +{ + fn max_encoded_len() -> usize { + // BoundedVec encodes like Vec which encodes like [T], which is a compact u32 + // plus each item in the slice: + // https://substrate.dev/rustdocs/v3.0.0/src/parity_scale_codec/codec.rs.html#798-808 + codec::Compact(S::get()) + .encoded_size() + .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use crate::Twox128; + use sp_io::TestExternalities; + use sp_std::convert::TryInto; + + crate::parameter_types! { + pub const Seven: u32 = 7; + pub const Four: u32 = 4; + } + + crate::generate_storage_alias! { Prefix, Foo => Value> } + crate::generate_storage_alias! { Prefix, FooMap => Map<(u32, Twox128), BoundedVec> } + crate::generate_storage_alias! { + Prefix, + FooDoubleMap => DoubleMap<(u32, Twox128), (u32, Twox128), BoundedVec> + } + + #[test] + fn try_append_is_correct() { + assert_eq!(BoundedVec::::bound(), 7); + } + + #[test] + fn decode_len_works() { + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + Foo::put(bounded); + assert_eq!(Foo::decode_len().unwrap(), 3); + }); + + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooMap::insert(1, bounded); + assert_eq!(FooMap::decode_len(1).unwrap(), 3); + assert!(FooMap::decode_len(0).is_none()); + assert!(FooMap::decode_len(2).is_none()); + }); + + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooDoubleMap::insert(1, 1, bounded); + assert_eq!(FooDoubleMap::decode_len(1, 1).unwrap(), 3); + assert!(FooDoubleMap::decode_len(2, 1).is_none()); + assert!(FooDoubleMap::decode_len(1, 2).is_none()); + assert!(FooDoubleMap::decode_len(2, 2).is_none()); + }); + } + + #[test] + fn try_insert_works() { + let mut bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } + + #[test] + #[should_panic(expected = "insertion index (is 9) should be <= len (is 3)")] + fn try_inert_panics_if_oob() { + let mut bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(9, 0).unwrap(); + } + + #[test] + fn try_push_works() { + let mut bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + bounded.try_push(0).unwrap(); + assert_eq!(*bounded, vec![1, 2, 3, 0]); + + assert!(bounded.try_push(9).is_err()); + } + + #[test] + fn deref_coercion_works() { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded: BoundedVec = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + let bounded = bounded.try_mutate(|v| v.push(7)).unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded.try_mutate(|v| v.push(8)).is_none()); + } + + #[test] + fn slice_indexing_works() { + let bounded: BoundedVec = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + assert_eq!(&bounded[0..=2], &[1, 2, 3]); + } + + #[test] + fn vec_eq_works() { + let bounded: BoundedVec = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + assert_eq!(bounded, vec![1, 2, 3, 4, 5, 6]); + } + + #[test] + fn too_big_vec_fail_to_decode() { + let v: Vec = vec![1, 2, 3, 4, 5]; + assert_eq!( + BoundedVec::::decode(&mut &v.encode()[..]), + Err("BoundedVec exceeds its limit".into()), + ); + } +} diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index 431b5e0930384..4b237aaa561fd 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,25 +21,24 @@ // NOTE: could replace unhashed by having only one kind of storage (top trie being the child info // of null length parent storage key). +pub use crate::sp_io::KillStorageResult; use crate::sp_std::prelude::*; -use codec::{Codec, Encode, Decode}; +use codec::{Codec, Decode, Encode}; pub use sp_core::storage::{ChildInfo, ChildType}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. -pub fn get( - child_info: &ChildInfo, - key: &[u8], -) -> Option { +pub fn get(child_info: &ChildInfo, key: &[u8]) -> Option { match child_info.child_type() { ChildType::ParentKeyId => { let storage_key = child_info.storage_key(); - sp_io::default_child_storage::get( - storage_key, - key, - ).and_then(|v| { + sp_io::default_child_storage::get(storage_key, key).and_then(|v| { Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { // TODO #3700: error should be handleable. - runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); + crate::runtime_print!( + "ERROR: Corrupted state in child trie at {:?}/{:?}", + storage_key, + key, + ); None }) }) @@ -49,20 +48,13 @@ pub fn get( /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. -pub fn get_or_default( - child_info: &ChildInfo, - key: &[u8], -) -> T { +pub fn get_or_default(child_info: &ChildInfo, key: &[u8]) -> T { get(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. -pub fn get_or( - child_info: &ChildInfo, - key: &[u8], - default_value: T, -) -> T { +pub fn get_or(child_info: &ChildInfo, key: &[u8], default_value: T) -> T { get(child_info, key).unwrap_or(default_value) } @@ -77,27 +69,16 @@ pub fn get_or_else T>( } /// Put `value` in storage under `key`. -pub fn put( - child_info: &ChildInfo, - key: &[u8], - value: &T, -) { +pub fn put(child_info: &ChildInfo, key: &[u8], value: &T) { match child_info.child_type() { - ChildType::ParentKeyId => value.using_encoded(|slice| - sp_io::default_child_storage::set( - child_info.storage_key(), - key, - slice, - ) - ), + ChildType::ParentKeyId => value.using_encoded(|slice| { + sp_io::default_child_storage::set(child_info.storage_key(), key, slice) + }), } } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. -pub fn take( - child_info: &ChildInfo, - key: &[u8], -) -> Option { +pub fn take(child_info: &ChildInfo, key: &[u8]) -> Option { let r = get(child_info, key); if r.is_some() { kill(child_info, key); @@ -107,20 +88,13 @@ pub fn take( /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. -pub fn take_or_default( - child_info: &ChildInfo, - key: &[u8], -) -> T { +pub fn take_or_default(child_info: &ChildInfo, key: &[u8]) -> T { take(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. -pub fn take_or( - child_info: &ChildInfo, - key: &[u8], - default_value: T, -) -> T { +pub fn take_or(child_info: &ChildInfo, key: &[u8], default_value: T) -> T { take(child_info, key).unwrap_or(default_value) } @@ -135,79 +109,77 @@ pub fn take_or_else T>( } /// Check to see if `key` has an explicit entry in storage. -pub fn exists( - child_info: &ChildInfo, - key: &[u8], -) -> bool { +pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::read( - child_info.storage_key(), - key, &mut [0;0][..], 0, - ).is_some(), + ChildType::ParentKeyId => + sp_io::default_child_storage::read(child_info.storage_key(), key, &mut [0; 0][..], 0) + .is_some(), } } /// Remove all `storage_key` key/values -pub fn kill_storage( - child_info: &ChildInfo, -) { +/// +/// Deletes all keys from the overlay and up to `limit` keys from the backend if +/// it is set to `Some`. No limit is applied when `limit` is set to `None`. +/// +/// The limit can be used to partially delete a child trie in case it is too large +/// to delete in one go (block). +/// +/// # Note +/// +/// Please note that keys that are residing in the overlay for that child trie when +/// issuing this call are all deleted without counting towards the `limit`. Only keys +/// written during the current block are part of the overlay. Deleting with a `limit` +/// mostly makes sense with an empty overlay for that child trie. +/// +/// Calling this function multiple times per block for the same `storage_key` does +/// not make much sense because it is not cumulative when called inside the same block. +/// Use this function to distribute the deletion of a single child trie across multiple +/// blocks. +pub fn kill_storage(child_info: &ChildInfo, limit: Option) -> KillStorageResult { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( - child_info.storage_key(), - ), + ChildType::ParentKeyId => + sp_io::default_child_storage::storage_kill(child_info.storage_key(), limit), } } /// Ensure `key` has no explicit entry in storage. -pub fn kill( - child_info: &ChildInfo, - key: &[u8], -) { +pub fn kill(child_info: &ChildInfo, key: &[u8]) { match child_info.child_type() { ChildType::ParentKeyId => { - sp_io::default_child_storage::clear( - child_info.storage_key(), - key, - ); + sp_io::default_child_storage::clear(child_info.storage_key(), key); }, } } /// Get a Vec of bytes from storage. -pub fn get_raw( - child_info: &ChildInfo, - key: &[u8], -) -> Option> { +pub fn get_raw(child_info: &ChildInfo, key: &[u8]) -> Option> { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::get( - child_info.storage_key(), - key, - ), + ChildType::ParentKeyId => sp_io::default_child_storage::get(child_info.storage_key(), key), } } /// Put a raw byte slice into storage. -pub fn put_raw( - child_info: &ChildInfo, - key: &[u8], - value: &[u8], -) { +pub fn put_raw(child_info: &ChildInfo, key: &[u8], value: &[u8]) { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::set( - child_info.storage_key(), - key, - value, - ), + ChildType::ParentKeyId => + sp_io::default_child_storage::set(child_info.storage_key(), key, value), } } /// Calculate current child root value. -pub fn root( - child_info: &ChildInfo, -) -> Vec { +pub fn root(child_info: &ChildInfo) -> Vec { + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::root(child_info.storage_key()), + } +} + +/// Return the length in bytes of the value without reading it. `None` if it does not exist. +pub fn len(child_info: &ChildInfo, key: &[u8]) -> Option { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::root( - child_info.storage_key(), - ), + ChildType::ParentKeyId => { + let mut buffer = [0; 0]; + sp_io::default_child_storage::read(child_info.storage_key(), key, &mut buffer, 0) + }, } } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index 9454ab401da28..636a10feb1ab3 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,11 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_std::prelude::*; -use sp_std::borrow::Borrow; -use codec::{FullCodec, FullEncode, Decode, Encode, EncodeLike}; -use crate::{storage::{self, unhashed, StorageAppend, PrefixIterator}, Never}; -use crate::hash::{StorageHasher, Twox128, ReversibleStorageHasher}; +use crate::{ + hash::{ReversibleStorageHasher, StorageHasher}, + storage::{self, storage_prefix, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, + Never, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; +use sp_std::{borrow::Borrow, prelude::*}; /// Generator for `StorageDoubleMap` used by `decl_storage`. /// @@ -60,17 +62,8 @@ pub trait StorageDoubleMap { /// The full prefix; just the hash of `module_prefix` concatenated to the hash of /// `storage_prefix`. fn prefix_hash() -> Vec { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - - let mut result = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() - ); - - result.extend_from_slice(&module_prefix_hashed[..]); - result.extend_from_slice(&storage_prefix_hashed[..]); - - result + let result = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + result.to_vec() } /// Convert an optional value retrieved from storage to the type queried. @@ -80,43 +73,36 @@ pub trait StorageDoubleMap { fn from_query_to_optional_value(v: Self::Query) -> Option; /// Generate the first part of the key used in top storage. - fn storage_double_map_final_key1(k1: KArg1) -> Vec where + fn storage_double_map_final_key1(k1: KArg1) -> Vec + where KArg1: EncodeLike, { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() - ); + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len()); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key_hashed.as_ref()); final_key } /// Generate the full key used in top storage. - fn storage_double_map_final_key(k1: KArg1, k2: KArg2) -> Vec where + fn storage_double_map_final_key(k1: KArg1, k2: KArg2) -> Vec + where KArg1: EncodeLike, KArg2: EncodeLike, { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key1_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); let key2_hashed = k2.borrow().using_encoded(Self::Hasher2::hash); let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() - + storage_prefix_hashed.len() - + key1_hashed.as_ref().len() - + key2_hashed.as_ref().len() + storage_prefix.len() + key1_hashed.as_ref().len() + key2_hashed.as_ref().len(), ); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key1_hashed.as_ref()); final_key.extend_from_slice(key2_hashed.as_ref()); @@ -124,7 +110,8 @@ pub trait StorageDoubleMap { } } -impl storage::StorageDoubleMap for G where +impl storage::StorageDoubleMap for G +where K1: FullEncode, K2: FullEncode, V: FullCodec, @@ -132,28 +119,40 @@ impl storage::StorageDoubleMap for G where { type Query = G::Query; - fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec where + fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec + where KArg1: EncodeLike, KArg2: EncodeLike, { Self::storage_double_map_final_key(k1, k2) } - fn contains_key(k1: KArg1, k2: KArg2) -> bool where + fn contains_key(k1: KArg1, k2: KArg2) -> bool + where KArg1: EncodeLike, KArg2: EncodeLike, { unhashed::exists(&Self::storage_double_map_final_key(k1, k2)) } - fn get(k1: KArg1, k2: KArg2) -> Self::Query where + fn get(k1: KArg1, k2: KArg2) -> Self::Query + where KArg1: EncodeLike, KArg2: EncodeLike, { G::from_optional_value_to_query(unhashed::get(&Self::storage_double_map_final_key(k1, k2))) } - fn take(k1: KArg1, k2: KArg2) -> Self::Query where + fn try_get(k1: KArg1, k2: KArg2) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + unhashed::get(&Self::storage_double_map_final_key(k1, k2)).ok_or(()) + } + + fn take(k1: KArg1, k2: KArg2) -> Self::Query + where KArg1: EncodeLike, KArg2: EncodeLike, { @@ -163,16 +162,12 @@ impl storage::StorageDoubleMap for G where G::from_optional_value_to_query(value) } - fn swap( - x_k1: XKArg1, - x_k2: XKArg2, - y_k1: YKArg1, - y_k2: YKArg2 - ) where + fn swap(x_k1: XKArg1, x_k2: XKArg2, y_k1: YKArg1, y_k2: YKArg2) + where XKArg1: EncodeLike, XKArg2: EncodeLike, YKArg1: EncodeLike, - YKArg2: EncodeLike + YKArg2: EncodeLike, { let final_x_key = Self::storage_double_map_final_key(x_k1, x_k2); let final_y_key = Self::storage_double_map_final_key(y_k1, y_k2); @@ -190,7 +185,8 @@ impl storage::StorageDoubleMap for G where } } - fn insert(k1: KArg1, k2: KArg2, val: VArg) where + fn insert(k1: KArg1, k2: KArg2, val: VArg) + where KArg1: EncodeLike, KArg2: EncodeLike, VArg: EncodeLike, @@ -198,19 +194,24 @@ impl storage::StorageDoubleMap for G where unhashed::put(&Self::storage_double_map_final_key(k1, k2), &val.borrow()) } - fn remove(k1: KArg1, k2: KArg2) where + fn remove(k1: KArg1, k2: KArg2) + where KArg1: EncodeLike, KArg2: EncodeLike, { unhashed::kill(&Self::storage_double_map_final_key(k1, k2)) } - fn remove_prefix(k1: KArg1) where KArg1: EncodeLike { - unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref()) + fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult + where + KArg1: EncodeLike, + { + unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref(), limit) } - fn iter_prefix_values(k1: KArg1) -> storage::PrefixIterator where - KArg1: ?Sized + EncodeLike + fn iter_prefix_values(k1: KArg1) -> storage::PrefixIterator + where + KArg1: ?Sized + EncodeLike, { let prefix = Self::storage_double_map_final_key1(k1); storage::PrefixIterator { @@ -218,15 +219,18 @@ impl storage::StorageDoubleMap for G where previous_key: prefix, drain: false, closure: |_raw_key, mut raw_value| V::decode(&mut raw_value), + phantom: Default::default(), } } - fn mutate(k1: KArg1, k2: KArg2, f: F) -> R where + fn mutate(k1: KArg1, k2: KArg2, f: F) -> R + where KArg1: EncodeLike, KArg2: EncodeLike, F: FnOnce(&mut Self::Query) -> R, { - Self::try_mutate(k1, k2, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") + Self::try_mutate(k1, k2, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") } fn mutate_exists(k1: KArg1, k2: KArg2, f: F) -> R @@ -235,10 +239,12 @@ impl storage::StorageDoubleMap for G where KArg2: EncodeLike, F: FnOnce(&mut Option) -> R, { - Self::try_mutate_exists(k1, k2, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") + Self::try_mutate_exists(k1, k2, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") } - fn try_mutate(k1: KArg1, k2: KArg2, f: F) -> Result where + fn try_mutate(k1: KArg1, k2: KArg2, f: F) -> Result + where KArg1: EncodeLike, KArg2: EncodeLike, F: FnOnce(&mut Self::Query) -> Result, @@ -275,11 +281,8 @@ impl storage::StorageDoubleMap for G where ret } - fn append( - k1: KArg1, - k2: KArg2, - item: EncodeLikeItem, - ) where + fn append(k1: KArg1, k2: KArg2, item: EncodeLikeItem) + where KArg1: EncodeLike, KArg2: EncodeLike, Item: Encode, @@ -295,22 +298,21 @@ impl storage::StorageDoubleMap for G where OldHasher2: StorageHasher, KeyArg1: EncodeLike, KeyArg2: EncodeLike, - >(key1: KeyArg1, key2: KeyArg2) -> Option { + >( + key1: KeyArg1, + key2: KeyArg2, + ) -> Option { let old_key = { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + let key1_hashed = key1.borrow().using_encoded(OldHasher1::hash); let key2_hashed = key2.borrow().using_encoded(OldHasher2::hash); let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() - + storage_prefix_hashed.len() - + key1_hashed.as_ref().len() - + key2_hashed.as_ref().len() + storage_prefix.len() + key1_hashed.as_ref().len() + key2_hashed.as_ref().len(), ); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key1_hashed.as_ref()); final_key.extend_from_slice(key2_hashed.as_ref()); @@ -323,16 +325,15 @@ impl storage::StorageDoubleMap for G where } } -impl< - K1: FullCodec, - K2: FullCodec, - V: FullCodec, - G: StorageDoubleMap, -> storage::IterableStorageDoubleMap for G where +impl> + storage::IterableStorageDoubleMap for G +where G::Hasher1: ReversibleStorageHasher, - G::Hasher2: ReversibleStorageHasher + G::Hasher2: ReversibleStorageHasher, { + type PartialKeyIterator = KeyPrefixIterator; type PrefixIterator = PrefixIterator<(K2, V)>; + type FullKeyIterator = KeyPrefixIterator<(K1, K2)>; type Iterator = PrefixIterator<(K1, K2, V)>; fn iter_prefix(k1: impl EncodeLike) -> Self::PrefixIterator { @@ -345,9 +346,41 @@ impl< let mut key_material = G::Hasher2::reverse(raw_key_without_prefix); Ok((K2::decode(&mut key_material)?, V::decode(&mut raw_value)?)) }, + phantom: Default::default(), + } + } + + fn iter_prefix_from( + k1: impl EncodeLike, + starting_raw_key: Vec, + ) -> Self::PrefixIterator { + let mut iter = Self::iter_prefix(k1); + iter.set_last_raw_key(starting_raw_key); + iter + } + + fn iter_key_prefix(k1: impl EncodeLike) -> Self::PartialKeyIterator { + let prefix = G::storage_double_map_final_key1(k1); + Self::PartialKeyIterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + closure: |raw_key_without_prefix| { + let mut key_material = G::Hasher2::reverse(raw_key_without_prefix); + K2::decode(&mut key_material) + }, } } + fn iter_key_prefix_from( + k1: impl EncodeLike, + starting_raw_key: Vec, + ) -> Self::PartialKeyIterator { + let mut iter = Self::iter_key_prefix(k1); + iter.set_last_raw_key(starting_raw_key); + iter + } + fn drain_prefix(k1: impl EncodeLike) -> Self::PrefixIterator { let mut iterator = Self::iter_prefix(k1); iterator.drain = true; @@ -367,26 +400,55 @@ impl< let k2 = K2::decode(&mut k2_material)?; Ok((k1, k2, V::decode(&mut raw_value)?)) }, + phantom: Default::default(), + } + } + + fn iter_from(starting_raw_key: Vec) -> Self::Iterator { + let mut iter = Self::iter(); + iter.set_last_raw_key(starting_raw_key); + iter + } + + fn iter_keys() -> Self::FullKeyIterator { + let prefix = G::prefix_hash(); + Self::FullKeyIterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + closure: |raw_key_without_prefix| { + let mut k1_k2_material = G::Hasher1::reverse(raw_key_without_prefix); + let k1 = K1::decode(&mut k1_k2_material)?; + let mut k2_material = G::Hasher2::reverse(k1_k2_material); + let k2 = K2::decode(&mut k2_material)?; + Ok((k1, k2)) + }, } } + fn iter_keys_from(starting_raw_key: Vec) -> Self::FullKeyIterator { + let mut iter = Self::iter_keys(); + iter.set_last_raw_key(starting_raw_key); + iter + } + fn drain() -> Self::Iterator { let mut iterator = Self::iter(); iterator.drain = true; iterator } - fn translate Option>(f: F) { + fn translate Option>(mut f: F) { let prefix = G::prefix_hash(); let mut previous_key = prefix.clone(); - while let Some(next) = sp_io::storage::next_key(&previous_key) - .filter(|n| n.starts_with(&prefix)) + while let Some(next) = + sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { previous_key = next; let value = match unhashed::get::(&previous_key) { Some(value) => value, None => { - crate::debug::error!("Invalid translate: fail to decode old value"); + log::error!("Invalid translate: fail to decode old value"); continue }, }; @@ -394,7 +456,7 @@ impl< let key1 = match K1::decode(&mut key_material) { Ok(key1) => key1, Err(_) => { - crate::debug::error!("Invalid translate: fail to decode key1"); + log::error!("Invalid translate: fail to decode key1"); continue }, }; @@ -403,7 +465,7 @@ impl< let key2 = match K2::decode(&mut key2_material) { Ok(key2) => key2, Err(_) => { - crate::debug::error!("Invalid translate: fail to decode key2"); + log::error!("Invalid translate: fail to decode key2"); continue }, }; @@ -419,26 +481,28 @@ impl< /// Test iterators for StorageDoubleMap #[cfg(test)] mod test_iterators { - use codec::{Encode, Decode}; use crate::{ hash::StorageHasher, - storage::{generator::StorageDoubleMap, IterableStorageDoubleMap, unhashed}, + storage::{generator::StorageDoubleMap, unhashed, IterableStorageDoubleMap}, }; + use codec::{Decode, Encode}; - pub trait Trait { + pub trait Config: 'static { type Origin; type BlockNumber; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: crate::traits::Get; } crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] struct NoDef(u32); crate::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { DoubleMap: double_map hasher(blake2_128_concat) u16, hasher(twox_64_concat) u32 => u64; } } @@ -457,6 +521,42 @@ mod test_iterators { prefix } + #[test] + fn double_map_iter_from() { + sp_io::TestExternalities::default().execute_with(|| { + use crate::hash::Identity; + crate::generate_storage_alias!( + MyModule, + MyDoubleMap => DoubleMap<(u64, Identity), (u64, Identity), u64> + ); + + MyDoubleMap::insert(1, 10, 100); + MyDoubleMap::insert(1, 21, 201); + MyDoubleMap::insert(1, 31, 301); + MyDoubleMap::insert(1, 41, 401); + MyDoubleMap::insert(2, 20, 200); + MyDoubleMap::insert(3, 30, 300); + MyDoubleMap::insert(4, 40, 400); + MyDoubleMap::insert(5, 50, 500); + + let starting_raw_key = MyDoubleMap::storage_double_map_final_key(1, 21); + let iter = MyDoubleMap::iter_key_prefix_from(1, starting_raw_key); + assert_eq!(iter.collect::>(), vec![31, 41]); + + let starting_raw_key = MyDoubleMap::storage_double_map_final_key(1, 31); + let iter = MyDoubleMap::iter_prefix_from(1, starting_raw_key); + assert_eq!(iter.collect::>(), vec![(41, 401)]); + + let starting_raw_key = MyDoubleMap::storage_double_map_final_key(2, 20); + let iter = MyDoubleMap::iter_keys_from(starting_raw_key); + assert_eq!(iter.collect::>(), vec![(3, 30), (4, 40), (5, 50)]); + + let starting_raw_key = MyDoubleMap::storage_double_map_final_key(3, 30); + let iter = MyDoubleMap::iter_from(starting_raw_key); + assert_eq!(iter.collect::>(), vec![(4, 40, 400), (5, 50, 500)]); + }); + } + #[test] fn double_map_reversible_reversible_iteration() { sp_io::TestExternalities::default().execute_with(|| { @@ -476,10 +576,12 @@ mod test_iterators { ); assert_eq!( - DoubleMap::iter_values().collect::>(), - vec![3, 0, 2, 1], + DoubleMap::iter_keys().collect::>(), + vec![(3, 3), (0, 0), (2, 2), (1, 1)], ); + assert_eq!(DoubleMap::iter_values().collect::>(), vec![3, 0, 2, 1]); + assert_eq!( DoubleMap::drain().collect::>(), vec![(3, 3, 3), (0, 0, 0), (2, 2, 2), (1, 1, 1)], @@ -505,10 +607,9 @@ mod test_iterators { vec![(1, 1), (2, 2), (0, 0), (3, 3)], ); - assert_eq!( - DoubleMap::iter_prefix_values(k1).collect::>(), - vec![1, 2, 0, 3], - ); + assert_eq!(DoubleMap::iter_key_prefix(k1).collect::>(), vec![1, 2, 0, 3]); + + assert_eq!(DoubleMap::iter_prefix_values(k1).collect::>(), vec![1, 2, 0, 3]); assert_eq!( DoubleMap::drain_prefix(k1).collect::>(), @@ -529,15 +630,12 @@ mod test_iterators { } // Wrong key1 - unhashed::put( - &[prefix.clone(), vec![1, 2, 3]].concat(), - &3u64.encode() - ); + unhashed::put(&[prefix.clone(), vec![1, 2, 3]].concat(), &3u64.encode()); // Wrong key2 unhashed::put( &[prefix.clone(), crate::Blake2_128Concat::hash(&1u16.encode())].concat(), - &3u64.encode() + &3u64.encode(), ); // Wrong value @@ -546,11 +644,12 @@ mod test_iterators { prefix.clone(), crate::Blake2_128Concat::hash(&1u16.encode()), crate::Twox64Concat::hash(&2u32.encode()), - ].concat(), + ] + .concat(), &vec![1], ); - DoubleMap::translate(|_k1, _k2, v: u64| Some(v*2)); + DoubleMap::translate(|_k1, _k2, v: u64| Some(v * 2)); assert_eq!( DoubleMap::iter().collect::>(), vec![(3, 3, 6), (0, 0, 0), (2, 2, 4), (1, 1, 2)], diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index 1c13de52e1640..1a4225173c4ae 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,14 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(not(feature = "std"))] -use sp_std::prelude::*; -use sp_std::borrow::Borrow; -use codec::{FullCodec, FullEncode, Decode, Encode, EncodeLike}; use crate::{ - storage::{self, unhashed, StorageAppend, PrefixIterator}, - Never, hash::{StorageHasher, Twox128, ReversibleStorageHasher}, + hash::{ReversibleStorageHasher, StorageHasher}, + storage::{self, storage_prefix, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, + Never, }; +use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; +use sp_std::borrow::Borrow; +#[cfg(not(feature = "std"))] +use sp_std::prelude::*; /// Generator for `StorageMap` used by `decl_storage`. /// @@ -51,17 +52,8 @@ pub trait StorageMap { /// The full prefix; just the hash of `module_prefix` concatenated to the hash of /// `storage_prefix`. fn prefix_hash() -> Vec { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - - let mut result = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() - ); - - result.extend_from_slice(&module_prefix_hashed[..]); - result.extend_from_slice(&storage_prefix_hashed[..]); - - result + let result = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + result.to_vec() } /// Convert an optional value retrieved from storage to the type queried. @@ -71,19 +63,16 @@ pub trait StorageMap { fn from_query_to_optional_value(v: Self::Query) -> Option; /// Generate the full key used in top storage. - fn storage_map_final_key(key: KeyArg) -> Vec where + fn storage_map_final_key(key: KeyArg) -> Vec + where KeyArg: EncodeLike, { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key_hashed = key.borrow().using_encoded(Self::Hasher::hash); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() - ); + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len()); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key_hashed.as_ref()); final_key @@ -98,11 +87,9 @@ pub struct StorageMapIterator { _phantom: ::sp_std::marker::PhantomData<(K, V, Hasher)>, } -impl< - K: Decode + Sized, - V: Decode + Sized, - Hasher: ReversibleStorageHasher -> Iterator for StorageMapIterator { +impl Iterator + for StorageMapIterator +{ type Item = (K, V); fn next(&mut self) -> Option<(K, V)> { @@ -117,29 +104,28 @@ impl< if self.drain { unhashed::kill(&self.previous_key) } - let mut key_material = Hasher::reverse(&self.previous_key[self.prefix.len()..]); + let mut key_material = + Hasher::reverse(&self.previous_key[self.prefix.len()..]); match K::decode(&mut key_material) { Ok(key) => Some((key, value)), Err(_) => continue, } - } + }, None => continue, } - } + }, None => None, } } } } -impl< - K: FullCodec, - V: FullCodec, - G: StorageMap, -> storage::IterableStorageMap for G where - G::Hasher: ReversibleStorageHasher +impl> storage::IterableStorageMap for G +where + G::Hasher: ReversibleStorageHasher, { type Iterator = PrefixIterator<(K, V)>; + type KeyIterator = KeyPrefixIterator; /// Enumerate all elements in the map. fn iter() -> Self::Iterator { @@ -152,9 +138,38 @@ impl< let mut key_material = G::Hasher::reverse(raw_key_without_prefix); Ok((K::decode(&mut key_material)?, V::decode(&mut raw_value)?)) }, + phantom: Default::default(), + } + } + + /// Enumerate all elements in the map after a given key. + fn iter_from(starting_raw_key: Vec) -> Self::Iterator { + let mut iter = Self::iter(); + iter.set_last_raw_key(starting_raw_key); + iter + } + + /// Enumerate all keys in the map. + fn iter_keys() -> Self::KeyIterator { + let prefix = G::prefix_hash(); + KeyPrefixIterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + closure: |raw_key_without_prefix| { + let mut key_material = G::Hasher::reverse(raw_key_without_prefix); + K::decode(&mut key_material) + }, } } + /// Enumerate all keys in the map after a given key. + fn iter_keys_from(starting_raw_key: Vec) -> Self::KeyIterator { + let mut iter = Self::iter_keys(); + iter.set_last_raw_key(starting_raw_key); + iter + } + /// Enumerate all elements in the map. fn drain() -> Self::Iterator { let mut iterator = Self::iter(); @@ -162,17 +177,17 @@ impl< iterator } - fn translate Option>(f: F) { + fn translate Option>(mut f: F) { let prefix = G::prefix_hash(); let mut previous_key = prefix.clone(); - while let Some(next) = sp_io::storage::next_key(&previous_key) - .filter(|n| n.starts_with(&prefix)) + while let Some(next) = + sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { previous_key = next; let value = match unhashed::get::(&previous_key) { Some(value) => value, None => { - crate::debug::error!("Invalid translate: fail to decode old value"); + log::error!("Invalid translate: fail to decode old value"); continue }, }; @@ -181,7 +196,7 @@ impl< let key = match K::decode(&mut key_material) { Ok(key) => key, Err(_) => { - crate::debug::error!("Invalid translate: fail to decode key"); + log::error!("Invalid translate: fail to decode key"); continue }, }; @@ -226,6 +241,10 @@ impl> storage::StorageMap G::from_optional_value_to_query(unhashed::get(Self::storage_map_final_key(key).as_ref())) } + fn try_get>(key: KeyArg) -> Result { + unhashed::get(Self::storage_map_final_key(key).as_ref()).ok_or(()) + } + fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { unhashed::put(Self::storage_map_final_key(key).as_ref(), &val) } @@ -235,16 +254,21 @@ impl> storage::StorageMap } fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R { - Self::try_mutate(key, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") + Self::try_mutate(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") } - fn mutate_exists, R, F: FnOnce(&mut Option) -> R>(key: KeyArg, f: F) -> R { - Self::try_mutate_exists(key, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") + fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F, + ) -> R { + Self::try_mutate_exists(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") } fn try_mutate, R, E, F: FnOnce(&mut Self::Query) -> Result>( key: KeyArg, - f: F + f: F, ) -> Result { let final_key = Self::storage_map_final_key(key); let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); @@ -261,7 +285,7 @@ impl> storage::StorageMap fn try_mutate_exists, R, E, F: FnOnce(&mut Option) -> Result>( key: KeyArg, - f: F + f: F, ) -> Result { let final_key = Self::storage_map_final_key(key); let mut val = unhashed::get(final_key.as_ref()); @@ -295,16 +319,13 @@ impl> storage::StorageMap fn migrate_key>(key: KeyArg) -> Option { let old_key = { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); let key_hashed = key.borrow().using_encoded(OldHasher::hash); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() - ); + let mut final_key = + Vec::with_capacity(storage_prefix.len() + key_hashed.as_ref().len()); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix); final_key.extend_from_slice(key_hashed.as_ref()); final_key @@ -319,26 +340,28 @@ impl> storage::StorageMap /// Test iterators for StorageMap #[cfg(test)] mod test_iterators { - use codec::{Encode, Decode}; use crate::{ hash::StorageHasher, - storage::{generator::StorageMap, IterableStorageMap, unhashed}, + storage::{generator::StorageMap, unhashed, IterableStorageMap}, }; + use codec::{Decode, Encode}; - pub trait Trait { + pub trait Config: 'static { type Origin; type BlockNumber; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: crate::traits::Get; } crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] struct NoDef(u32); crate::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { Map: map hasher(blake2_128_concat) u16 => u64; } } @@ -357,6 +380,28 @@ mod test_iterators { prefix } + #[test] + fn map_iter_from() { + sp_io::TestExternalities::default().execute_with(|| { + use crate::hash::Identity; + crate::generate_storage_alias!(MyModule, MyMap => Map<(u64, Identity), u64>); + + MyMap::insert(1, 10); + MyMap::insert(2, 20); + MyMap::insert(3, 30); + MyMap::insert(4, 40); + MyMap::insert(5, 50); + + let starting_raw_key = MyMap::storage_map_final_key(3); + let iter = MyMap::iter_from(starting_raw_key); + assert_eq!(iter.collect::>(), vec![(4, 40), (5, 50)]); + + let starting_raw_key = MyMap::storage_map_final_key(2); + let iter = MyMap::iter_keys_from(starting_raw_key); + assert_eq!(iter.collect::>(), vec![3, 4, 5]); + }); + } + #[test] fn map_reversible_reversible_iteration() { sp_io::TestExternalities::default().execute_with(|| { @@ -372,6 +417,8 @@ mod test_iterators { assert_eq!(Map::iter().collect::>(), vec![(3, 3), (0, 0), (2, 2), (1, 1)]); + assert_eq!(Map::iter_keys().collect::>(), vec![3, 0, 2, 1]); + assert_eq!(Map::iter_values().collect::>(), vec![3, 0, 2, 1]); assert_eq!(Map::drain().collect::>(), vec![(3, 3), (0, 0), (2, 2), (1, 1)]); @@ -398,7 +445,7 @@ mod test_iterators { &vec![1], ); - Map::translate(|_k1, v: u64| Some(v*2)); + Map::translate(|_k1, v: u64| Some(v * 2)); assert_eq!(Map::iter().collect::>(), vec![(3, 6), (0, 0), (2, 4), (1, 2)]); }) } diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index 7df7dfd317399..576bada2e262c 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,39 +24,48 @@ //! //! This is internal api and is subject to change. -mod map; mod double_map; +pub(crate) mod map; +mod nmap; mod value; -pub use map::StorageMap; pub use double_map::StorageDoubleMap; +pub use map::StorageMap; +pub use nmap::StorageNMap; pub use value::StorageValue; #[cfg(test)] #[allow(dead_code)] mod tests { - use sp_io::TestExternalities; + use crate::{ + assert_noop, assert_ok, + storage::{generator::StorageValue, unhashed, IterableStorageMap}, + }; use codec::Encode; - use crate::storage::{unhashed, generator::StorageValue, IterableStorageMap}; - use crate::{assert_noop, assert_ok}; + use sp_io::TestExternalities; + + struct Runtime; - struct Runtime {} - pub trait Trait { + pub trait Config: 'static { type Origin; type BlockNumber; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: crate::traits::Get; } - impl Trait for Runtime { + impl Config for Runtime { type Origin = u32; type BlockNumber = u32; + type PalletInfo = crate::tests::PanicPalletInfo; + type DbWeight = (); } decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin, system=self {} } crate::decl_storage! { - trait Store for Module as Runtime { + trait Store for Module as Runtime { Value get(fn value) config(): (u64, u64); NumberMap: map hasher(identity) u32 => u64; DoubleMap: double_map hasher(identity) u32, hasher(identity) u32 => u64; @@ -73,9 +82,10 @@ mod tests { // translate let translate_fn = |old: Option| -> Option<(u64, u64)> { - old.map(|o| (o.into(), (o*2).into())) + old.map(|o| (o.into(), (o * 2).into())) }; - let _ = Value::translate(translate_fn); + let res = Value::translate(translate_fn); + debug_assert!(res.is_ok()); // new storage should be `(1111, 1111 * 2)` assert_eq!(Value::get(), (1111, 2222)); @@ -97,11 +107,16 @@ mod tests { ); // do translation. - NumberMap::translate(|k: u32, v: u64| if k % 2 == 0 { Some((k as u64) << 32 | v) } else { None }); + NumberMap::translate( + |k: u32, v: u64| if k % 2 == 0 { Some((k as u64) << 32 | v) } else { None }, + ); assert_eq!( NumberMap::iter().collect::>(), - (0..50u32).map(|x| x * 2).map(|x| (x, (x as u64) << 32 | x as u64)).collect::>(), + (0..50u32) + .map(|x| x * 2) + .map(|x| (x, (x as u64) << 32 | x as u64)) + .collect::>(), ); }) } @@ -115,20 +130,29 @@ mod tests { assert_eq!(DoubleMap::get(0, 0), 0); // `assert_noop` ensures that the state does not change - assert_noop!(Value::try_mutate(|value| -> Result<(), &'static str> { - *value = (2, 2); - Err("don't change value") - }), "don't change value"); + assert_noop!( + Value::try_mutate(|value| -> Result<(), &'static str> { + *value = (2, 2); + Err("don't change value") + }), + "don't change value" + ); - assert_noop!(NumberMap::try_mutate(0, |value| -> Result<(), &'static str> { - *value = 4; - Err("don't change value") - }), "don't change value"); + assert_noop!( + NumberMap::try_mutate(0, |value| -> Result<(), &'static str> { + *value = 4; + Err("don't change value") + }), + "don't change value" + ); - assert_noop!(DoubleMap::try_mutate(0, 0, |value| -> Result<(), &'static str> { - *value = 6; - Err("don't change value") - }), "don't change value"); + assert_noop!( + DoubleMap::try_mutate(0, 0, |value| -> Result<(), &'static str> { + *value = 6; + Err("don't change value") + }), + "don't change value" + ); // Showing this explicitly for clarity assert_eq!(Value::get(), (0, 0)); diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs new file mode 100755 index 0000000000000..4845673d3d8c2 --- /dev/null +++ b/frame/support/src/storage/generator/nmap.rs @@ -0,0 +1,630 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Generator for `StorageNMap` used by `decl_storage` and storage types. +//! +//! By default each key value is stored at: +//! ```nocompile +//! Twox128(pallet_prefix) ++ Twox128(storage_prefix) +//! ++ Hasher1(encode(key1)) ++ Hasher2(encode(key2)) ++ ... ++ HasherN(encode(keyN)) +//! ``` +//! +//! # Warning +//! +//! If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` such as +//! `blake2_256` must be used. Otherwise, other values in storage with the same prefix can +//! be compromised. + +use crate::{ + storage::{ + self, storage_prefix, + types::{ + EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, KeyGenerator, + ReversibleKeyGenerator, TupleToEncodedIter, + }, + unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend, + }, + Never, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec}; +#[cfg(not(feature = "std"))] +use sp_std::prelude::*; + +/// Generator for `StorageNMap` used by `decl_storage` and storage types. +/// +/// By default each key value is stored at: +/// ```nocompile +/// Twox128(pallet_prefix) ++ Twox128(storage_prefix) +/// ++ Hasher1(encode(key1)) ++ Hasher2(encode(key2)) ++ ... ++ HasherN(encode(keyN)) +/// ``` +/// +/// # Warning +/// +/// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` such as +/// `blake2_256` must be used. Otherwise, other values in storage with the same prefix can +/// be compromised. +pub trait StorageNMap { + /// The type that get/take returns. + type Query; + + /// Module prefix. Used for generating final key. + fn module_prefix() -> &'static [u8]; + + /// Storage prefix. Used for generating final key. + fn storage_prefix() -> &'static [u8]; + + /// The full prefix; just the hash of `module_prefix` concatenated to the hash of + /// `storage_prefix`. + fn prefix_hash() -> Vec { + let result = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + result.to_vec() + } + + /// Convert an optional value retrieved from storage to the type queried. + fn from_optional_value_to_query(v: Option) -> Self::Query; + + /// Convert a query to an optional value into storage. + fn from_query_to_optional_value(v: Self::Query) -> Option; + + /// Generate a partial key used in top storage. + fn storage_n_map_partial_key(key: KP) -> Vec + where + K: HasKeyPrefix, + { + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + let key_hashed = >::partial_key(key); + + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len()); + + final_key.extend_from_slice(&storage_prefix); + final_key.extend_from_slice(key_hashed.as_ref()); + + final_key + } + + /// Generate the full key used in top storage. + fn storage_n_map_final_key(key: KArg) -> Vec + where + KG: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter, + { + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + let key_hashed = KG::final_key(key); + + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len()); + + final_key.extend_from_slice(&storage_prefix); + final_key.extend_from_slice(key_hashed.as_ref()); + + final_key + } +} + +impl storage::StorageNMap for G +where + K: KeyGenerator, + V: FullCodec, + G: StorageNMap, +{ + type Query = G::Query; + + fn hashed_key_for + TupleToEncodedIter>(key: KArg) -> Vec { + Self::storage_n_map_final_key::(key) + } + + fn contains_key + TupleToEncodedIter>(key: KArg) -> bool { + unhashed::exists(&Self::storage_n_map_final_key::(key)) + } + + fn get + TupleToEncodedIter>(key: KArg) -> Self::Query { + G::from_optional_value_to_query(unhashed::get(&Self::storage_n_map_final_key::(key))) + } + + fn try_get + TupleToEncodedIter>(key: KArg) -> Result { + unhashed::get(&Self::storage_n_map_final_key::(key)).ok_or(()) + } + + fn take + TupleToEncodedIter>(key: KArg) -> Self::Query { + let final_key = Self::storage_n_map_final_key::(key); + + let value = unhashed::take(&final_key); + G::from_optional_value_to_query(value) + } + + fn swap(key1: KArg1, key2: KArg2) + where + KOther: KeyGenerator, + KArg1: EncodeLikeTuple + TupleToEncodedIter, + KArg2: EncodeLikeTuple + TupleToEncodedIter, + { + let final_x_key = Self::storage_n_map_final_key::(key1); + let final_y_key = Self::storage_n_map_final_key::(key2); + + let v1 = unhashed::get_raw(&final_x_key); + if let Some(val) = unhashed::get_raw(&final_y_key) { + unhashed::put_raw(&final_x_key, &val); + } else { + unhashed::kill(&final_x_key); + } + if let Some(val) = v1 { + unhashed::put_raw(&final_y_key, &val); + } else { + unhashed::kill(&final_y_key); + } + } + + fn insert(key: KArg, val: VArg) + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + VArg: EncodeLike, + { + unhashed::put(&Self::storage_n_map_final_key::(key), &val); + } + + fn remove + TupleToEncodedIter>(key: KArg) { + unhashed::kill(&Self::storage_n_map_final_key::(key)); + } + + fn remove_prefix(partial_key: KP, limit: Option) -> sp_io::KillStorageResult + where + K: HasKeyPrefix, + { + unhashed::kill_prefix(&Self::storage_n_map_partial_key(partial_key), limit) + } + + fn iter_prefix_values(partial_key: KP) -> PrefixIterator + where + K: HasKeyPrefix, + { + let prefix = Self::storage_n_map_partial_key(partial_key); + PrefixIterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + closure: |_raw_key, mut raw_value| V::decode(&mut raw_value), + phantom: Default::default(), + } + } + + fn mutate(key: KArg, f: F) -> R + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Self::Query) -> R, + { + Self::try_mutate(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") + } + + fn try_mutate(key: KArg, f: F) -> Result + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Self::Query) -> Result, + { + let final_key = Self::storage_n_map_final_key::(key); + let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); + + let ret = f(&mut val); + if ret.is_ok() { + match G::from_query_to_optional_value(val) { + Some(ref val) => unhashed::put(final_key.as_ref(), val), + None => unhashed::kill(final_key.as_ref()), + } + } + ret + } + + fn mutate_exists(key: KArg, f: F) -> R + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Option) -> R, + { + Self::try_mutate_exists(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") + } + + fn try_mutate_exists(key: KArg, f: F) -> Result + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Option) -> Result, + { + let final_key = Self::storage_n_map_final_key::(key); + let mut val = unhashed::get(final_key.as_ref()); + + let ret = f(&mut val); + if ret.is_ok() { + match val { + Some(ref val) => unhashed::put(final_key.as_ref(), val), + None => unhashed::kill(final_key.as_ref()), + } + } + ret + } + + fn append(key: KArg, item: EncodeLikeItem) + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: StorageAppend, + { + let final_key = Self::storage_n_map_final_key::(key); + sp_io::storage::append(&final_key, item.encode()); + } + + fn migrate_keys(key: KArg, hash_fns: K::HArg) -> Option + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + { + let old_key = { + let storage_prefix = storage_prefix(Self::module_prefix(), Self::storage_prefix()); + let key_hashed = K::migrate_key(&key, hash_fns); + + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len()); + + final_key.extend_from_slice(&storage_prefix); + final_key.extend_from_slice(key_hashed.as_ref()); + + final_key + }; + unhashed::take(old_key.as_ref()).map(|value| { + unhashed::put(Self::storage_n_map_final_key::(key).as_ref(), &value); + value + }) + } +} + +impl> + storage::IterableStorageNMap for G +{ + type KeyIterator = KeyPrefixIterator; + type Iterator = PrefixIterator<(K::Key, V)>; + + fn iter_prefix(kp: KP) -> PrefixIterator<(>::Suffix, V)> + where + K: HasReversibleKeyPrefix, + { + let prefix = G::storage_n_map_partial_key(kp); + PrefixIterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + closure: |raw_key_without_prefix, mut raw_value| { + let partial_key = K::decode_partial_key(raw_key_without_prefix)?; + Ok((partial_key, V::decode(&mut raw_value)?)) + }, + phantom: Default::default(), + } + } + + fn iter_prefix_from( + kp: KP, + starting_raw_key: Vec, + ) -> PrefixIterator<(>::Suffix, V)> + where + K: HasReversibleKeyPrefix, + { + let mut iter = Self::iter_prefix(kp); + iter.set_last_raw_key(starting_raw_key); + iter + } + + fn iter_key_prefix(kp: KP) -> KeyPrefixIterator<>::Suffix> + where + K: HasReversibleKeyPrefix, + { + let prefix = G::storage_n_map_partial_key(kp); + KeyPrefixIterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + closure: K::decode_partial_key, + } + } + + fn iter_key_prefix_from( + kp: KP, + starting_raw_key: Vec, + ) -> KeyPrefixIterator<>::Suffix> + where + K: HasReversibleKeyPrefix, + { + let mut iter = Self::iter_key_prefix(kp); + iter.set_last_raw_key(starting_raw_key); + iter + } + + fn drain_prefix(kp: KP) -> PrefixIterator<(>::Suffix, V)> + where + K: HasReversibleKeyPrefix, + { + let mut iter = Self::iter_prefix(kp); + iter.drain = true; + iter + } + + fn iter() -> Self::Iterator { + Self::iter_from(G::prefix_hash()) + } + + fn iter_from(starting_raw_key: Vec) -> Self::Iterator { + let prefix = G::prefix_hash(); + Self::Iterator { + prefix, + previous_key: starting_raw_key, + drain: false, + closure: |raw_key_without_prefix, mut raw_value| { + let (final_key, _) = K::decode_final_key(raw_key_without_prefix)?; + Ok((final_key, V::decode(&mut raw_value)?)) + }, + phantom: Default::default(), + } + } + + fn iter_keys() -> Self::KeyIterator { + Self::iter_keys_from(G::prefix_hash()) + } + + fn iter_keys_from(starting_raw_key: Vec) -> Self::KeyIterator { + let prefix = G::prefix_hash(); + Self::KeyIterator { + prefix, + previous_key: starting_raw_key, + drain: false, + closure: |raw_key_without_prefix| { + let (final_key, _) = K::decode_final_key(raw_key_without_prefix)?; + Ok(final_key) + }, + } + } + + fn drain() -> Self::Iterator { + let mut iterator = Self::iter(); + iterator.drain = true; + iterator + } + + fn translate Option>(mut f: F) { + let prefix = G::prefix_hash(); + let mut previous_key = prefix.clone(); + while let Some(next) = + sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) + { + previous_key = next; + let value = match unhashed::get::(&previous_key) { + Some(value) => value, + None => { + log::error!("Invalid translate: fail to decode old value"); + continue + }, + }; + + let final_key = match K::decode_final_key(&previous_key[prefix.len()..]) { + Ok((final_key, _)) => final_key, + Err(_) => { + log::error!("Invalid translate: fail to decode key"); + continue + }, + }; + + match f(final_key, value) { + Some(new) => unhashed::put::(&previous_key, &new), + None => unhashed::kill(&previous_key), + } + } + } +} + +/// Test iterators for StorageNMap +#[cfg(test)] +mod test_iterators { + use crate::{ + hash::StorageHasher, + storage::{generator::StorageNMap, unhashed, IterableStorageNMap}, + }; + use codec::{Decode, Encode}; + + pub trait Config: 'static { + type Origin; + type BlockNumber; + type PalletInfo: crate::traits::PalletInfo; + type DbWeight: crate::traits::Get; + } + + crate::decl_module! { + pub struct Module for enum Call where origin: T::Origin, system=self {} + } + + #[derive(PartialEq, Eq, Clone, Encode, Decode)] + struct NoDef(u32); + + crate::decl_storage! { + trait Store for Module as Test { + NMap: nmap hasher(blake2_128_concat) u16, hasher(twox_64_concat) u32 => u64; + } + } + + fn key_before_prefix(mut prefix: Vec) -> Vec { + let last = prefix.iter_mut().last().unwrap(); + assert!(*last != 0, "mock function not implemented for this prefix"); + *last -= 1; + prefix + } + + fn key_after_prefix(mut prefix: Vec) -> Vec { + let last = prefix.iter_mut().last().unwrap(); + assert!(*last != 255, "mock function not implemented for this prefix"); + *last += 1; + prefix + } + + #[test] + fn n_map_iter_from() { + sp_io::TestExternalities::default().execute_with(|| { + use crate::{hash::Identity, storage::Key as NMapKey}; + crate::generate_storage_alias!( + MyModule, + MyNMap => NMap, u64> + ); + + MyNMap::insert((1, 1, 1), 11); + MyNMap::insert((1, 1, 2), 21); + MyNMap::insert((1, 1, 3), 31); + MyNMap::insert((1, 2, 1), 12); + MyNMap::insert((1, 2, 2), 22); + MyNMap::insert((1, 2, 3), 32); + MyNMap::insert((1, 3, 1), 13); + MyNMap::insert((1, 3, 2), 23); + MyNMap::insert((1, 3, 3), 33); + MyNMap::insert((2, 0, 0), 200); + + type Key = (NMapKey, NMapKey, NMapKey); + + let starting_raw_key = MyNMap::storage_n_map_final_key::((1, 2, 2)); + let iter = MyNMap::iter_key_prefix_from((1,), starting_raw_key); + assert_eq!(iter.collect::>(), vec![(2, 3), (3, 1), (3, 2), (3, 3)]); + + let starting_raw_key = MyNMap::storage_n_map_final_key::((1, 3, 1)); + let iter = MyNMap::iter_prefix_from((1, 3), starting_raw_key); + assert_eq!(iter.collect::>(), vec![(2, 23), (3, 33)]); + + let starting_raw_key = MyNMap::storage_n_map_final_key::((1, 3, 2)); + let iter = MyNMap::iter_keys_from(starting_raw_key); + assert_eq!(iter.collect::>(), vec![(1, 3, 3), (2, 0, 0)]); + + let starting_raw_key = MyNMap::storage_n_map_final_key::((1, 3, 3)); + let iter = MyNMap::iter_from(starting_raw_key); + assert_eq!(iter.collect::>(), vec![((2, 0, 0), 200)]); + }); + } + + #[test] + fn n_map_double_map_identical_key() { + sp_io::TestExternalities::default().execute_with(|| { + NMap::insert((1, 2), 50); + let key_hash = NMap::hashed_key_for((1, 2)); + + { + crate::generate_storage_alias!(Test, NMap => DoubleMap< + (u16, crate::Blake2_128Concat), + (u32, crate::Twox64Concat), + u64 + >); + + let value = NMap::get(1, 2).unwrap(); + assert_eq!(value, 50); + assert_eq!(NMap::hashed_key_for(1, 2), key_hash); + } + }); + } + + #[test] + fn n_map_reversible_reversible_iteration() { + sp_io::TestExternalities::default().execute_with(|| { + // All map iterator + let prefix = NMap::prefix_hash(); + + unhashed::put(&key_before_prefix(prefix.clone()), &1u64); + unhashed::put(&key_after_prefix(prefix.clone()), &1u64); + + for i in 0..4 { + NMap::insert((i as u16, i as u32), i as u64); + } + + assert_eq!( + NMap::iter().collect::>(), + vec![((3, 3), 3), ((0, 0), 0), ((2, 2), 2), ((1, 1), 1)], + ); + + assert_eq!(NMap::iter_keys().collect::>(), vec![(3, 3), (0, 0), (2, 2), (1, 1)]); + + assert_eq!(NMap::iter_values().collect::>(), vec![3, 0, 2, 1]); + + assert_eq!( + NMap::drain().collect::>(), + vec![((3, 3), 3), ((0, 0), 0), ((2, 2), 2), ((1, 1), 1)], + ); + + assert_eq!(NMap::iter().collect::>(), vec![]); + assert_eq!(unhashed::get(&key_before_prefix(prefix.clone())), Some(1u64)); + assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); + + // Prefix iterator + let k1 = 3 << 8; + let prefix = NMap::storage_n_map_partial_key((k1,)); + + unhashed::put(&key_before_prefix(prefix.clone()), &1u64); + unhashed::put(&key_after_prefix(prefix.clone()), &1u64); + + for i in 0..4 { + NMap::insert((k1, i as u32), i as u64); + } + + assert_eq!( + NMap::iter_prefix((k1,)).collect::>(), + vec![(1, 1), (2, 2), (0, 0), (3, 3)], + ); + + assert_eq!(NMap::iter_key_prefix((k1,)).collect::>(), vec![1, 2, 0, 3]); + + assert_eq!(NMap::iter_prefix_values((k1,)).collect::>(), vec![1, 2, 0, 3]); + + assert_eq!( + NMap::drain_prefix((k1,)).collect::>(), + vec![(1, 1), (2, 2), (0, 0), (3, 3)], + ); + + assert_eq!(NMap::iter_prefix((k1,)).collect::>(), vec![]); + assert_eq!(unhashed::get(&key_before_prefix(prefix.clone())), Some(1u64)); + assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); + + // Translate + let prefix = NMap::prefix_hash(); + + unhashed::put(&key_before_prefix(prefix.clone()), &1u64); + unhashed::put(&key_after_prefix(prefix.clone()), &1u64); + for i in 0..4 { + NMap::insert((i as u16, i as u32), i as u64); + } + + // Wrong key1 + unhashed::put(&[prefix.clone(), vec![1, 2, 3]].concat(), &3u64.encode()); + + // Wrong key2 + unhashed::put( + &[prefix.clone(), crate::Blake2_128Concat::hash(&1u16.encode())].concat(), + &3u64.encode(), + ); + + // Wrong value + unhashed::put( + &[ + prefix.clone(), + crate::Blake2_128Concat::hash(&1u16.encode()), + crate::Twox64Concat::hash(&2u32.encode()), + ] + .concat(), + &vec![1], + ); + + NMap::translate(|(_k1, _k2), v: u64| Some(v * 2)); + assert_eq!( + NMap::iter().collect::>(), + vec![((3, 3), 6), ((0, 0), 0), ((2, 2), 4), ((1, 1), 2)], + ); + }) + } +} diff --git a/frame/support/src/storage/generator/value.rs b/frame/support/src/storage/generator/value.rs index 2da3d91718438..3486eaa005c06 100644 --- a/frame/support/src/storage/generator/value.rs +++ b/frame/support/src/storage/generator/value.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{FullCodec, Encode, EncodeLike, Decode}; use crate::{ - Never, storage::{self, unhashed, StorageAppend}, - hash::{Twox128, StorageHasher}, + Never, }; +use codec::{Decode, Encode, EncodeLike, FullCodec}; /// Generator for `StorageValue` used by `decl_storage`. /// @@ -46,10 +45,7 @@ pub trait StorageValue { /// Generate the full key used in top storage. fn storage_value_final_key() -> [u8; 32] { - let mut final_key = [0u8; 32]; - final_key[0..16].copy_from_slice(&Twox128::hash(Self::module_prefix())); - final_key[16..32].copy_from_slice(&Twox128::hash(Self::storage_prefix())); - final_key + crate::storage::storage_prefix(Self::module_prefix(), Self::storage_prefix()) } } @@ -77,10 +73,9 @@ impl> storage::StorageValue for G { let key = Self::storage_value_final_key(); // attempt to get the length directly. - let maybe_old = match unhashed::get_raw(&key) { - Some(old_data) => Some(O::decode(&mut &old_data[..]).map_err(|_| ())?), - None => None, - }; + let maybe_old = unhashed::get_raw(&key) + .map(|old_data| O::decode(&mut &old_data[..]).map_err(|_| ())) + .transpose()?; let maybe_new = f(maybe_old); if let Some(new) = maybe_new.as_ref() { new.using_encoded(|d| unhashed::put_raw(&key, d)); diff --git a/frame/support/src/storage/hashed.rs b/frame/support/src/storage/hashed.rs index 96a487111a2af..241caff809b3d 100644 --- a/frame/support/src/storage/hashed.rs +++ b/frame/support/src/storage/hashed.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,8 +18,8 @@ //! Operation on runtime storage using hashed keys. use super::unhashed; +use codec::{Decode, Encode}; use sp_std::prelude::*; -use codec::{Encode, Decode}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(hash: &HashFn, key: &[u8]) -> Option diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index 75f90ba7b06cc..59422a282aab5 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,15 @@ //! Some utilities for helping access storage with arbitrary key types. +use crate::{ + hash::ReversibleStorageHasher, + storage::{storage_prefix, unhashed}, + StorageHasher, Twox128, +}; +use codec::{Decode, Encode}; use sp_std::prelude::*; -use codec::{Encode, Decode}; -use crate::{StorageHasher, Twox128}; -use crate::hash::ReversibleStorageHasher; + +use super::PrefixIterator; /// Utility to iterate through raw items in storage. pub struct StorageIterator { @@ -32,15 +37,22 @@ pub struct StorageIterator { impl StorageIterator { /// Construct iterator to iterate over map items in `module` for the map called `item`. + #[deprecated( + note = "Please use the storage_iter or storage_iter_with_suffix functions instead" + )] pub fn new(module: &[u8], item: &[u8]) -> Self { + #[allow(deprecated)] Self::with_suffix(module, item, &[][..]) } /// Construct iterator to iterate over map items in `module` for the map called `item`. + #[deprecated( + note = "Please use the storage_iter or storage_iter_with_suffix functions instead" + )] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); - prefix.extend_from_slice(&Twox128::hash(module)); - prefix.extend_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + prefix.extend_from_slice(&storage_prefix); prefix.extend_from_slice(suffix); let previous_key = prefix.clone(); Self { prefix, previous_key, drain: false, _phantom: Default::default() } @@ -70,10 +82,10 @@ impl Iterator for StorageIterator { frame_support::storage::unhashed::kill(&next); } Some((self.previous_key[self.prefix.len()..].to_vec(), value)) - } + }, None => continue, } - } + }, None => None, } } @@ -90,15 +102,22 @@ pub struct StorageKeyIterator { impl StorageKeyIterator { /// Construct iterator to iterate over map items in `module` for the map called `item`. + #[deprecated( + note = "Please use the storage_key_iter or storage_key_iter_with_suffix functions instead" + )] pub fn new(module: &[u8], item: &[u8]) -> Self { + #[allow(deprecated)] Self::with_suffix(module, item, &[][..]) } /// Construct iterator to iterate over map items in `module` for the map called `item`. + #[deprecated( + note = "Please use the storage_key_iter or storage_key_iter_with_suffix functions instead" + )] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); - prefix.extend_from_slice(&Twox128::hash(module)); - prefix.extend_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + prefix.extend_from_slice(&storage_prefix); prefix.extend_from_slice(suffix); let previous_key = prefix.clone(); Self { prefix, previous_key, drain: false, _phantom: Default::default() } @@ -133,19 +152,76 @@ impl Iterator frame_support::storage::unhashed::kill(&next); } Some((key, value)) - } + }, None => continue, } - } + }, Err(_) => continue, } - } + }, None => None, } } } } +/// Construct iterator to iterate over map items in `module` for the map called `item`. +pub fn storage_iter(module: &[u8], item: &[u8]) -> PrefixIterator<(Vec, T)> { + storage_iter_with_suffix(module, item, &[][..]) +} + +/// Construct iterator to iterate over map items in `module` for the map called `item`. +pub fn storage_iter_with_suffix( + module: &[u8], + item: &[u8], + suffix: &[u8], +) -> PrefixIterator<(Vec, T)> { + let mut prefix = Vec::new(); + let storage_prefix = storage_prefix(module, item); + prefix.extend_from_slice(&storage_prefix); + prefix.extend_from_slice(suffix); + let previous_key = prefix.clone(); + let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { + let value = T::decode(&mut &raw_value[..])?; + Ok((raw_key_without_prefix.to_vec(), value)) + }; + + PrefixIterator { prefix, previous_key, drain: false, closure, phantom: Default::default() } +} + +/// Construct iterator to iterate over map items in `module` for the map called `item`. +pub fn storage_key_iter( + module: &[u8], + item: &[u8], +) -> PrefixIterator<(K, T)> { + storage_key_iter_with_suffix::(module, item, &[][..]) +} + +/// Construct iterator to iterate over map items in `module` for the map called `item`. +pub fn storage_key_iter_with_suffix< + K: Decode + Sized, + T: Decode + Sized, + H: ReversibleStorageHasher, +>( + module: &[u8], + item: &[u8], + suffix: &[u8], +) -> PrefixIterator<(K, T)> { + let mut prefix = Vec::new(); + let storage_prefix = storage_prefix(module, item); + + prefix.extend_from_slice(&storage_prefix); + prefix.extend_from_slice(suffix); + let previous_key = prefix.clone(); + let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { + let mut key_material = H::reverse(raw_key_without_prefix); + let key = K::decode(&mut key_material)?; + let value = T::decode(&mut &raw_value[..])?; + Ok((key, value)) + }; + PrefixIterator { prefix, previous_key, drain: false, closure, phantom: Default::default() } +} + /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn have_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> bool { get_storage_value::<()>(module, item, hash).is_some() @@ -154,8 +230,8 @@ pub fn have_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> bool { /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn get_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> Option { let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + key[0..32].copy_from_slice(&storage_prefix); key[32..].copy_from_slice(hash); frame_support::storage::unhashed::get::(&key) } @@ -163,8 +239,8 @@ pub fn get_storage_value(module: &[u8], item: &[u8], hash: &[ /// Take a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn take_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> Option { let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + key[0..32].copy_from_slice(&storage_prefix); key[32..].copy_from_slice(hash); frame_support::storage::unhashed::take::(&key) } @@ -172,22 +248,23 @@ pub fn take_storage_value(module: &[u8], item: &[u8], hash: & /// Put a particular value into storage by the `module`, the map's `item` name and the key `hash`. pub fn put_storage_value(module: &[u8], item: &[u8], hash: &[u8], value: T) { let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + key[0..32].copy_from_slice(&storage_prefix); key[32..].copy_from_slice(hash); frame_support::storage::unhashed::put(&key, &value); } -/// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. +/// Remove all items under a storage prefix by the `module`, the map's `item` name and the key +/// `hash`. pub fn remove_storage_prefix(module: &[u8], item: &[u8], hash: &[u8]) { let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); + let storage_prefix = storage_prefix(module, item); + key[0..32].copy_from_slice(&storage_prefix); key[32..].copy_from_slice(hash); - frame_support::storage::unhashed::kill_prefix(&key) + frame_support::storage::unhashed::kill_prefix(&key, None); } -/// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. +/// Take a particular item in storage by the `module`, the map's `item` name and the key `hash`. pub fn take_storage_item( module: &[u8], item: &[u8], @@ -195,3 +272,219 @@ pub fn take_storage_item ) -> Option { take_storage_value(module, item, key.using_encoded(H::hash).as_ref()) } + +/// Move a storage from a pallet prefix to another pallet prefix. +/// +/// Keys used in pallet storages always start with: +/// `concat(twox_128(pallet_name), towx_128(storage_name))`. +/// +/// This function will remove all value for which the key start with +/// `concat(twox_128(old_pallet_name), towx_128(storage_name))` and insert them at the key with +/// the start replaced by `concat(twox_128(new_pallet_name), towx_128(storage_name))`. +/// +/// # Example +/// +/// If a pallet named "my_example" has 2 storages named "Foo" and "Bar" and the pallet is renamed +/// "my_new_example_name", a migration can be: +/// ``` +/// # use frame_support::storage::migration::move_storage_from_pallet; +/// # sp_io::TestExternalities::new_empty().execute_with(|| { +/// move_storage_from_pallet(b"Foo", b"my_example", b"my_new_example_name"); +/// move_storage_from_pallet(b"Bar", b"my_example", b"my_new_example_name"); +/// # }) +/// ``` +pub fn move_storage_from_pallet( + storage_name: &[u8], + old_pallet_name: &[u8], + new_pallet_name: &[u8], +) { + let new_prefix = storage_prefix(new_pallet_name, storage_name); + let old_prefix = storage_prefix(old_pallet_name, storage_name); + + move_prefix(&old_prefix, &new_prefix); + + if let Some(value) = unhashed::get_raw(&old_prefix) { + unhashed::put_raw(&new_prefix, &value); + unhashed::kill(&old_prefix); + } +} + +/// Move all storages from a pallet prefix to another pallet prefix. +/// +/// Keys used in pallet storages always start with: +/// `concat(twox_128(pallet_name), towx_128(storage_name))`. +/// +/// This function will remove all value for which the key start with `twox_128(old_pallet_name)` +/// and insert them at the key with the start replaced by `twox_128(new_pallet_name)`. +/// +/// NOTE: The value at the key `twox_128(old_pallet_name)` is not moved. +/// +/// # Example +/// +/// If a pallet named "my_example" has some storages and the pallet is renamed +/// "my_new_example_name", a migration can be: +/// ``` +/// # use frame_support::storage::migration::move_pallet; +/// # sp_io::TestExternalities::new_empty().execute_with(|| { +/// move_pallet(b"my_example", b"my_new_example_name"); +/// # }) +/// ``` +pub fn move_pallet(old_pallet_name: &[u8], new_pallet_name: &[u8]) { + move_prefix(&Twox128::hash(old_pallet_name), &Twox128::hash(new_pallet_name)) +} + +/// Move all `(key, value)` after some prefix to the another prefix +/// +/// This function will remove all value for which the key start with `from_prefix` +/// and insert them at the key with the start replaced by `to_prefix`. +/// +/// NOTE: The value at the key `from_prefix` is not moved. +pub fn move_prefix(from_prefix: &[u8], to_prefix: &[u8]) { + if from_prefix == to_prefix { + return + } + + let iter = PrefixIterator::<_> { + prefix: from_prefix.to_vec(), + previous_key: from_prefix.to_vec(), + drain: true, + closure: |key, value| Ok((key.to_vec(), value.to_vec())), + phantom: Default::default(), + }; + + for (key, value) in iter { + let full_key = [to_prefix, &key].concat(); + unhashed::put_raw(&full_key, &value); + } +} + +#[cfg(test)] +mod tests { + use super::{ + move_pallet, move_prefix, move_storage_from_pallet, storage_iter, storage_key_iter, + }; + use crate::{ + hash::StorageHasher, + pallet_prelude::{StorageMap, StorageValue, Twox128, Twox64Concat}, + }; + use sp_io::TestExternalities; + + struct OldPalletStorageValuePrefix; + impl frame_support::traits::StorageInstance for OldPalletStorageValuePrefix { + const STORAGE_PREFIX: &'static str = "foo_value"; + fn pallet_prefix() -> &'static str { + "my_old_pallet" + } + } + type OldStorageValue = StorageValue; + + struct OldPalletStorageMapPrefix; + impl frame_support::traits::StorageInstance for OldPalletStorageMapPrefix { + const STORAGE_PREFIX: &'static str = "foo_map"; + fn pallet_prefix() -> &'static str { + "my_old_pallet" + } + } + type OldStorageMap = StorageMap; + + struct NewPalletStorageValuePrefix; + impl frame_support::traits::StorageInstance for NewPalletStorageValuePrefix { + const STORAGE_PREFIX: &'static str = "foo_value"; + fn pallet_prefix() -> &'static str { + "my_new_pallet" + } + } + type NewStorageValue = StorageValue; + + struct NewPalletStorageMapPrefix; + impl frame_support::traits::StorageInstance for NewPalletStorageMapPrefix { + const STORAGE_PREFIX: &'static str = "foo_map"; + fn pallet_prefix() -> &'static str { + "my_new_pallet" + } + } + type NewStorageMap = StorageMap; + + #[test] + fn test_move_prefix() { + TestExternalities::new_empty().execute_with(|| { + OldStorageValue::put(3); + OldStorageMap::insert(1, 2); + OldStorageMap::insert(3, 4); + + move_prefix(&Twox128::hash(b"my_old_pallet"), &Twox128::hash(b"my_new_pallet")); + + assert_eq!(OldStorageValue::get(), None); + assert_eq!(OldStorageMap::iter().collect::>(), vec![]); + assert_eq!(NewStorageValue::get(), Some(3)); + assert_eq!(NewStorageMap::iter().collect::>(), vec![(1, 2), (3, 4)]); + }) + } + + #[test] + fn test_move_storage() { + TestExternalities::new_empty().execute_with(|| { + OldStorageValue::put(3); + OldStorageMap::insert(1, 2); + OldStorageMap::insert(3, 4); + + move_storage_from_pallet(b"foo_map", b"my_old_pallet", b"my_new_pallet"); + + assert_eq!(OldStorageValue::get(), Some(3)); + assert_eq!(OldStorageMap::iter().collect::>(), vec![]); + assert_eq!(NewStorageValue::get(), None); + assert_eq!(NewStorageMap::iter().collect::>(), vec![(1, 2), (3, 4)]); + + move_storage_from_pallet(b"foo_value", b"my_old_pallet", b"my_new_pallet"); + + assert_eq!(OldStorageValue::get(), None); + assert_eq!(OldStorageMap::iter().collect::>(), vec![]); + assert_eq!(NewStorageValue::get(), Some(3)); + assert_eq!(NewStorageMap::iter().collect::>(), vec![(1, 2), (3, 4)]); + }) + } + + #[test] + fn test_move_pallet() { + TestExternalities::new_empty().execute_with(|| { + OldStorageValue::put(3); + OldStorageMap::insert(1, 2); + OldStorageMap::insert(3, 4); + + move_pallet(b"my_old_pallet", b"my_new_pallet"); + + assert_eq!(OldStorageValue::get(), None); + assert_eq!(OldStorageMap::iter().collect::>(), vec![]); + assert_eq!(NewStorageValue::get(), Some(3)); + assert_eq!(NewStorageMap::iter().collect::>(), vec![(1, 2), (3, 4)]); + }) + } + + #[test] + fn test_storage_iter() { + TestExternalities::new_empty().execute_with(|| { + OldStorageValue::put(3); + OldStorageMap::insert(1, 2); + OldStorageMap::insert(3, 4); + + assert_eq!( + storage_key_iter::(b"my_old_pallet", b"foo_map") + .collect::>(), + vec![(1, 2), (3, 4)], + ); + + assert_eq!( + storage_iter(b"my_old_pallet", b"foo_map") + .drain() + .map(|t| t.1) + .collect::>(), + vec![2, 4], + ); + assert_eq!(OldStorageMap::iter().collect::>(), vec![]); + + // Empty because storage iterator skips over the entry under the first key + assert_eq!(storage_iter::(b"my_old_pallet", b"foo_value").drain().next(), None); + assert_eq!(OldStorageValue::get(), Some(3)); + }); + } +} diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 97c1eabe6d39d..35552e08fef1e 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,18 +17,32 @@ //! Stuff to do with the runtime's storage. -use sp_std::prelude::*; -use codec::{FullCodec, FullEncode, Encode, EncodeLike, Decode}; -use crate::hash::{Twox128, StorageHasher}; +pub use self::types::StorageEntryMetadataBuilder; +use crate::{ + hash::{ReversibleStorageHasher, StorageHasher}, + storage::types::{ + EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, KeyGenerator, + ReversibleKeyGenerator, TupleToEncodedIter, + }, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; +use sp_core::storage::ChildInfo; use sp_runtime::generic::{Digest, DigestItem}; pub use sp_runtime::TransactionOutcome; +use sp_std::prelude::*; +pub use types::Key; -pub mod unhashed; -pub mod hashed; +pub mod bounded_btree_map; +pub mod bounded_btree_set; +pub mod bounded_vec; pub mod child; #[doc(hidden)] pub mod generator; +pub mod hashed; pub mod migration; +pub mod types; +pub mod unhashed; +pub mod weak_bounded_vec; #[cfg(all(feature = "std", any(test, debug_assertions)))] mod debug_helper { @@ -61,7 +75,7 @@ mod debug_helper { let mut val = v.borrow_mut(); *val += 1; if *val > 10 { - crate::debug::warn!( + log::warn!( "Detected with_transaction with nest level {}. Nested usage of with_transaction is not recommended.", *val ); @@ -88,9 +102,7 @@ pub fn require_transaction() { /// /// Transactions can be nested to any depth. Commits happen to the parent transaction. pub fn with_transaction(f: impl FnOnce() -> TransactionOutcome) -> R { - use sp_io::storage::{ - start_transaction, commit_transaction, rollback_transaction, - }; + use sp_io::storage::{commit_transaction, rollback_transaction, start_transaction}; use TransactionOutcome::*; start_transaction(); @@ -99,15 +111,20 @@ pub fn with_transaction(f: impl FnOnce() -> TransactionOutcome) -> R { let _guard = debug_helper::inc_transaction_level(); match f() { - Commit(res) => { commit_transaction(); res }, - Rollback(res) => { rollback_transaction(); res }, + Commit(res) => { + commit_transaction(); + res + }, + Rollback(res) => { + rollback_transaction(); + res + }, } } /// A trait for working with macro-generated storage values under the substrate storage API. /// -/// Details on implementation can be found at -/// [`generator::StorageValue`] +/// Details on implementation can be found at [`generator::StorageValue`]. pub trait StorageValue { /// The type that get/take return. type Query; @@ -121,8 +138,9 @@ pub trait StorageValue { /// Load the value from the provided storage instance. fn get() -> Self::Query; - /// Try to get the underlying value from the provided storage instance; `Ok` if it exists, - /// `Err` if not. + /// Try to get the underlying value from the provided storage instance. + /// + /// Returns `Ok` if it exists, `Err` if not. fn try_get() -> Result; /// Translate a value from some previous type (`O`) to the current type. @@ -143,8 +161,8 @@ pub trait StorageValue { /// # Usage /// /// This would typically be called inside the module implementation of on_runtime_upgrade, while - /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More - /// precisely prior initialized modules doesn't make use of this storage). + /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. + /// (More precisely prior initialized modules doesn't make use of this storage). fn translate) -> Option>(f: F) -> Result, ()>; /// Store a value under this key into the provided storage instance. @@ -192,15 +210,17 @@ pub trait StorageValue { /// /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. - fn decode_len() -> Option where T: StorageDecodeLength { + fn decode_len() -> Option + where + T: StorageDecodeLength, + { T::decode_len(&Self::hashed_key()) } } /// A strongly-typed map in storage. /// -/// Details on implementation can be found at -/// [`generator::StorageMap`] +/// Details on implementation can be found at [`generator::StorageMap`]. pub trait StorageMap { /// The type that get/take return. type Query; @@ -214,6 +234,11 @@ pub trait StorageMap { /// Load the value associated with the given key from the map. fn get>(key: KeyArg) -> Self::Query; + /// Try to get the value for the given key from the map. + /// + /// Returns `Ok` if it exists, `Err` if not. + fn try_get>(key: KeyArg) -> Result; + /// Swap the values of two keys. fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2); @@ -232,8 +257,13 @@ pub trait StorageMap { f: F, ) -> Result; - /// Mutate the value under a key. Deletes the item if mutated to a `None`. - fn mutate_exists, R, F: FnOnce(&mut Option) -> R>(key: KeyArg, f: F) -> R; + /// Mutate the value under a key. + /// + /// Deletes the item if mutated to a `None`. + fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F, + ) -> R; /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. fn try_mutate_exists, R, E, F: FnOnce(&mut Option) -> Result>( @@ -273,7 +303,8 @@ pub trait StorageMap { /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. fn decode_len>(key: KeyArg) -> Option - where V: StorageDecodeLength, + where + V: StorageDecodeLength, { V::decode_len(&Self::hashed_key_for(key)) } @@ -295,57 +326,191 @@ pub trait StorageMap { pub trait IterableStorageMap: StorageMap { /// The type that iterates over all `(key, value)`. type Iterator: Iterator; + /// The type that itereates over all `key`s. + type KeyIterator: Iterator; - /// Enumerate all elements in the map in no particular order. If you alter the map while doing - /// this, you'll get undefined results. + /// Enumerate all elements in the map in lexicographical order of the encoded key. If you + /// alter the map while doing this, you'll get undefined results. fn iter() -> Self::Iterator; - /// Remove all elements from the map and iterate through them in no particular order. If you - /// add elements to the map while doing this, you'll get undefined results. + /// Enumerate all elements in the map after a specified `starting_raw_key` in lexicographical + /// order of the encoded key. If you alter the map while doing this, you'll get undefined + /// results. + fn iter_from(starting_raw_key: Vec) -> Self::Iterator; + + /// Enumerate all keys in the map in lexicographical order of the encoded key, skipping over + /// the elements. If you alter the map while doing this, you'll get undefined results. + fn iter_keys() -> Self::KeyIterator; + + /// Enumerate all keys in the map after a specified `starting_raw_key` in lexicographical order + /// of the encoded key. If you alter the map while doing this, you'll get undefined results. + fn iter_keys_from(starting_raw_key: Vec) -> Self::KeyIterator; + + /// Remove all elements from the map and iterate through them in lexicographical order of the + /// encoded key. If you add elements to the map while doing this, you'll get undefined results. fn drain() -> Self::Iterator; - /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// Translate the values of all elements by a function `f`, in the map in lexicographical order + /// of the encoded key. /// By returning `None` from `f` for an element, you'll remove it from the map. /// /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. - fn translate Option>(f: F); + fn translate Option>(f: F); } /// A strongly-typed double map in storage whose secondary keys and values can be iterated over. -pub trait IterableStorageDoubleMap< - K1: FullCodec, - K2: FullCodec, - V: FullCodec ->: StorageDoubleMap { +pub trait IterableStorageDoubleMap: + StorageDoubleMap +{ + /// The type that iterates over all `key2`. + type PartialKeyIterator: Iterator; + /// The type that iterates over all `(key2, value)`. type PrefixIterator: Iterator; + /// The type that iterates over all `(key1, key2)`. + type FullKeyIterator: Iterator; + /// The type that iterates over all `(key1, key2, value)`. type Iterator: Iterator; - /// Enumerate all elements in the map with first key `k1` in no particular order. If you add or - /// remove values whose first key is `k1` to the map while doing this, you'll get undefined - /// results. + /// Enumerate all elements in the map with first key `k1` in lexicographical order of the + /// encoded key. If you add or remove values whose first key is `k1` to the map while doing + /// this, you'll get undefined results. fn iter_prefix(k1: impl EncodeLike) -> Self::PrefixIterator; - /// Remove all elements from the map with first key `k1` and iterate through them in no - /// particular order. If you add elements with first key `k1` to the map while doing this, - /// you'll get undefined results. + /// Enumerate all elements in the map with first key `k1` after a specified `starting_raw_key` + /// in lexicographical order of the encoded key. If you add or remove values whose first key is + /// `k1` to the map while doing this, you'll get undefined results. + fn iter_prefix_from(k1: impl EncodeLike, starting_raw_key: Vec) + -> Self::PrefixIterator; + + /// Enumerate all second keys `k2` in the map with the same first key `k1` in lexicographical + /// order of the encoded key. If you add or remove values whose first key is `k1` to the map + /// while doing this, you'll get undefined results. + fn iter_key_prefix(k1: impl EncodeLike) -> Self::PartialKeyIterator; + + /// Enumerate all second keys `k2` in the map with the same first key `k1` after a specified + /// `starting_raw_key` in lexicographical order of the encoded key. If you add or remove values + /// whose first key is `k1` to the map while doing this, you'll get undefined results. + fn iter_key_prefix_from( + k1: impl EncodeLike, + starting_raw_key: Vec, + ) -> Self::PartialKeyIterator; + + /// Remove all elements from the map with first key `k1` and iterate through them in + /// lexicographical order of the encoded key. If you add elements with first key `k1` to the + /// map while doing this, you'll get undefined results. fn drain_prefix(k1: impl EncodeLike) -> Self::PrefixIterator; - /// Enumerate all elements in the map in no particular order. If you add or remove values to - /// the map while doing this, you'll get undefined results. + /// Enumerate all elements in the map in lexicographical order of the encoded key. If you add + /// or remove values to the map while doing this, you'll get undefined results. fn iter() -> Self::Iterator; - /// Remove all elements from the map and iterate through them in no particular order. If you - /// add elements to the map while doing this, you'll get undefined results. + /// Enumerate all elements in the map after a specified `starting_raw_key` in lexicographical + /// order of the encoded key. If you add or remove values to the map while doing this, you'll + /// get undefined results. + fn iter_from(starting_raw_key: Vec) -> Self::Iterator; + + /// Enumerate all keys `k1` and `k2` in the map in lexicographical order of the encoded key. If + /// you add or remove values to the map while doing this, you'll get undefined results. + fn iter_keys() -> Self::FullKeyIterator; + + /// Enumerate all keys `k1` and `k2` in the map after a specified `starting_raw_key` in + /// lexicographical order of the encoded key. If you add or remove values to the map while + /// doing this, you'll get undefined results. + fn iter_keys_from(starting_raw_key: Vec) -> Self::FullKeyIterator; + + /// Remove all elements from the map and iterate through them in lexicographical order of the + /// encoded key. If you add elements to the map while doing this, you'll get undefined results. fn drain() -> Self::Iterator; - /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// Translate the values of all elements by a function `f`, in the map in lexicographical order + /// of the encoded key. /// By returning `None` from `f` for an element, you'll remove it from the map. /// /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. - fn translate Option>(f: F); + fn translate Option>(f: F); +} + +/// A strongly-typed map with arbitrary number of keys in storage whose keys and values can be +/// iterated over. +pub trait IterableStorageNMap: StorageNMap { + /// The type that iterates over all `(key1, key2, key3, ... keyN)` tuples. + type KeyIterator: Iterator; + + /// The type that iterates over all `(key1, key2, key3, ... keyN), value)` tuples. + type Iterator: Iterator; + + /// Enumerate all elements in the map with prefix key `kp` in lexicographical order of the + /// encoded key. If you add or remove values whose prefix is `kp` to the map while doing this, + /// you'll get undefined results. + fn iter_prefix(kp: KP) -> PrefixIterator<(>::Suffix, V)> + where + K: HasReversibleKeyPrefix; + + /// Enumerate all elements in the map with prefix key `kp` after a specified `starting_raw_key` + /// in lexicographical order of the encoded key. If you add or remove values whose prefix is + /// `kp` to the map while doing this, you'll get undefined results. + fn iter_prefix_from( + kp: KP, + starting_raw_key: Vec, + ) -> PrefixIterator<(>::Suffix, V)> + where + K: HasReversibleKeyPrefix; + + /// Enumerate all suffix keys in the map with prefix key `kp` in lexicographical order of the + /// encoded key. If you add or remove values whose prefix is `kp` to the map while doing this, + /// you'll get undefined results. + fn iter_key_prefix(kp: KP) -> KeyPrefixIterator<>::Suffix> + where + K: HasReversibleKeyPrefix; + + /// Enumerate all suffix keys in the map with prefix key `kp` after a specified + /// `starting_raw_key` in lexicographical order of the encoded key. If you add or remove values + /// whose prefix is `kp` to the map while doing this, you'll get undefined results. + fn iter_key_prefix_from( + kp: KP, + starting_raw_key: Vec, + ) -> KeyPrefixIterator<>::Suffix> + where + K: HasReversibleKeyPrefix; + + /// Remove all elements from the map with prefix key `kp` and iterate through them in + /// lexicographical order of the encoded key. If you add elements with prefix key `kp` to the + /// map while doing this, you'll get undefined results. + fn drain_prefix(kp: KP) -> PrefixIterator<(>::Suffix, V)> + where + K: HasReversibleKeyPrefix; + + /// Enumerate all elements in the map in lexicographical order of the encoded key. If you add + /// or remove values to the map while doing this, you'll get undefined results. + fn iter() -> Self::Iterator; + + /// Enumerate all elements in the map after a specified `starting_raw_key` in lexicographical + /// order of the encoded key. If you add or remove values to the map while doing this, you'll + /// get undefined results. + fn iter_from(starting_raw_key: Vec) -> Self::Iterator; + + /// Enumerate all keys in the map in lexicographical order of the encoded key. If you add or + /// remove values to the map while doing this, you'll get undefined results. + fn iter_keys() -> Self::KeyIterator; + + /// Enumerate all keys in the map after `starting_raw_key` in lexicographical order of the + /// encoded key. If you add or remove values to the map while doing this, you'll get undefined + /// results. + fn iter_keys_from(starting_raw_key: Vec) -> Self::KeyIterator; + + /// Remove all elements from the map and iterate through them in lexicographical order of the + /// encoded key. If you add elements to the map while doing this, you'll get undefined results. + fn drain() -> Self::Iterator; + + /// Translate the values of all elements by a function `f`, in the map in lexicographical order + /// of the encoded key. + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + fn translate Option>(f: F); } /// An implementation of a map with a two keys. @@ -353,8 +518,7 @@ pub trait IterableStorageDoubleMap< /// It provides an important ability to efficiently remove all entries /// that have a common first key. /// -/// Details on implementation can be found at -/// [`generator::StorageDoubleMap`] +/// Details on implementation can be found at [`generator::StorageDoubleMap`]. pub trait StorageDoubleMap { /// The type that get/take returns. type Query; @@ -377,6 +541,14 @@ pub trait StorageDoubleMap { KArg1: EncodeLike, KArg2: EncodeLike; + /// Try to get the value for the given key from the double map. + /// + /// Returns `Ok` if it exists, `Err` if not. + fn try_get(k1: KArg1, k2: KArg2) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike; + /// Take a value from storage, removing it afterwards. fn take(k1: KArg1, k2: KArg2) -> Self::Query where @@ -405,11 +577,14 @@ pub trait StorageDoubleMap { KArg2: EncodeLike; /// Remove all values under the first key. - fn remove_prefix(k1: KArg1) where KArg1: ?Sized + EncodeLike; + fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult + where + KArg1: ?Sized + EncodeLike; /// Iterate over values that share the first key. fn iter_prefix_values(k1: KArg1) -> PrefixIterator - where KArg1: ?Sized + EncodeLike; + where + KArg1: ?Sized + EncodeLike; /// Mutate the value under the given keys. fn mutate(k1: KArg1, k2: KArg2, f: F) -> R @@ -448,11 +623,8 @@ pub trait StorageDoubleMap { /// If the storage item is not encoded properly, the storage will be overwritten /// and set to `[item]`. Any default value set for the storage item will be ignored /// on overwrite. - fn append( - k1: KArg1, - k2: KArg2, - item: EncodeLikeItem, - ) where + fn append(k1: KArg1, k2: KArg2, item: EncodeLikeItem) + where KArg1: EncodeLike, KArg2: EncodeLike, Item: Encode, @@ -472,10 +644,10 @@ pub trait StorageDoubleMap { /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. fn decode_len(key1: KArg1, key2: KArg2) -> Option - where - KArg1: EncodeLike, - KArg2: EncodeLike, - V: StorageDecodeLength, + where + KArg1: EncodeLike, + KArg2: EncodeLike, + V: StorageDecodeLength, { V::decode_len(&Self::hashed_key_for(key1, key2)) } @@ -489,13 +661,137 @@ pub trait StorageDoubleMap { OldHasher2: StorageHasher, KeyArg1: EncodeLike, KeyArg2: EncodeLike, - >(key1: KeyArg1, key2: KeyArg2) -> Option; + >( + key1: KeyArg1, + key2: KeyArg2, + ) -> Option; } -/// Iterate over a prefix and decode raw_key and raw_value into `T`. +/// An implementation of a map with an arbitrary number of keys. +/// +/// Details of implementation can be found at [`generator::StorageNMap`]. +pub trait StorageNMap { + /// The type that get/take returns. + type Query; + + /// Get the storage key used to fetch a value corresponding to a specific key. + fn hashed_key_for + TupleToEncodedIter>(key: KArg) -> Vec; + + /// Does the value (explicitly) exist in storage? + fn contains_key + TupleToEncodedIter>(key: KArg) -> bool; + + /// Load the value associated with the given key from the map. + fn get + TupleToEncodedIter>(key: KArg) -> Self::Query; + + /// Try to get the value for the given key from the map. + /// + /// Returns `Ok` if it exists, `Err` if not. + fn try_get + TupleToEncodedIter>(key: KArg) -> Result; + + /// Swap the values of two keys. + fn swap(key1: KArg1, key2: KArg2) + where + KOther: KeyGenerator, + KArg1: EncodeLikeTuple + TupleToEncodedIter, + KArg2: EncodeLikeTuple + TupleToEncodedIter; + + /// Store a value to be associated with the given key from the map. + fn insert(key: KArg, val: VArg) + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + VArg: EncodeLike; + + /// Remove the value under a key. + fn remove + TupleToEncodedIter>(key: KArg); + + /// Remove all values under the partial prefix key. + fn remove_prefix(partial_key: KP, limit: Option) -> sp_io::KillStorageResult + where + K: HasKeyPrefix; + + /// Iterate over values that share the partial prefix key. + fn iter_prefix_values(partial_key: KP) -> PrefixIterator + where + K: HasKeyPrefix; + + /// Mutate the value under a key. + fn mutate(key: KArg, f: F) -> R + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Self::Query) -> R; + + /// Mutate the item, only if an `Ok` value is returned. + fn try_mutate(key: KArg, f: F) -> Result + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Self::Query) -> Result; + + /// Mutate the value under a key. + /// + /// Deletes the item if mutated to a `None`. + fn mutate_exists(key: KArg, f: F) -> R + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Option) -> R; + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + fn try_mutate_exists(key: KArg, f: F) -> Result + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Option) -> Result; + + /// Take the value under a key. + fn take + TupleToEncodedIter>(key: KArg) -> Self::Query; + + /// Append the given items to the value in the storage. + /// + /// `V` is required to implement `codec::EncodeAppend`. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage will be overwritten + /// and set to `[item]`. Any default value set for the storage item will be ignored + /// on overwrite. + fn append(key: KArg, item: EncodeLikeItem) + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: StorageAppend; + + /// Read the length of the storage value without decoding the entire value under the + /// given `key`. + /// + /// `V` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + fn decode_len + TupleToEncodedIter>(key: KArg) -> Option + where + V: StorageDecodeLength, + { + V::decode_len(&Self::hashed_key_for(key)) + } + + /// Migrate an item with the given `key` from defunct `hash_fns` to the current hashers. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + fn migrate_keys(key: KArg, hash_fns: K::HArg) -> Option + where + KArg: EncodeLikeTuple + TupleToEncodedIter; +} + +/// Iterate or drain over a prefix and decode raw_key and raw_value into `T`. /// /// If any decoding fails it skips it and continues to the next key. -pub struct PrefixIterator { +/// +/// If draining, then the hook `OnRemoval::on_removal` is called after each removal. +pub struct PrefixIterator { prefix: Vec, previous_key: Vec, /// If true then value are removed while iterating @@ -503,9 +799,66 @@ pub struct PrefixIterator { /// Function that take `(raw_key_without_prefix, raw_value)` and decode `T`. /// `raw_key_without_prefix` is the raw storage key without the prefix iterated on. closure: fn(&[u8], &[u8]) -> Result, + phantom: core::marker::PhantomData, +} + +/// Trait for specialising on removal logic of [`PrefixIterator`]. +pub trait PrefixIteratorOnRemoval { + /// This function is called whenever a key/value is removed. + fn on_removal(key: &[u8], value: &[u8]); +} + +/// No-op implementation. +impl PrefixIteratorOnRemoval for () { + fn on_removal(_key: &[u8], _value: &[u8]) {} +} + +impl PrefixIterator { + /// Creates a new `PrefixIterator`, iterating after `previous_key` and filtering out keys that + /// are not prefixed with `prefix`. + /// + /// A `decode_fn` function must also be supplied, and it takes in two `&[u8]` parameters, + /// returning a `Result` containing the decoded type `T` if successful, and a `codec::Error` on + /// failure. The first `&[u8]` argument represents the raw, undecoded key without the prefix of + /// the current item, while the second `&[u8]` argument denotes the corresponding raw, + /// undecoded value. + pub fn new( + prefix: Vec, + previous_key: Vec, + decode_fn: fn(&[u8], &[u8]) -> Result, + ) -> Self { + PrefixIterator { + prefix, + previous_key, + drain: false, + closure: decode_fn, + phantom: Default::default(), + } + } + + /// Get the last key that has been iterated upon and return it. + pub fn last_raw_key(&self) -> &[u8] { + &self.previous_key + } + + /// Get the prefix that is being iterated upon for this iterator and return it. + pub fn prefix(&self) -> &[u8] { + &self.prefix + } + + /// Set the key that the iterator should start iterating after. + pub fn set_last_raw_key(&mut self, previous_key: Vec) { + self.previous_key = previous_key; + } + + /// Mutate this iterator into a draining iterator; items iterated are removed from storage. + pub fn drain(mut self) -> Self { + self.drain = true; + self + } } -impl Iterator for PrefixIterator { +impl Iterator for PrefixIterator { type Item = T; fn next(&mut self) -> Option { @@ -518,30 +871,244 @@ impl Iterator for PrefixIterator { let raw_value = match unhashed::get_raw(&self.previous_key) { Some(raw_value) => raw_value, None => { - crate::debug::error!( + log::error!( "next_key returned a key with no value at {:?}", - self.previous_key + self.previous_key, ); continue - } + }, }; if self.drain { - unhashed::kill(&self.previous_key) + unhashed::kill(&self.previous_key); + OnRemoval::on_removal(&self.previous_key, &raw_value); } let raw_key_without_prefix = &self.previous_key[self.prefix.len()..]; let item = match (self.closure)(raw_key_without_prefix, &raw_value[..]) { Ok(item) => item, Err(e) => { - crate::debug::error!( + log::error!( "(key, value) failed to decode at {:?}: {:?}", - self.previous_key, e + self.previous_key, + e, ); continue - } + }, }; Some(item) + }, + None => None, + } + } + } +} + +/// Iterate over a prefix and decode raw_key into `T`. +/// +/// If any decoding fails it skips it and continues to the next key. +pub struct KeyPrefixIterator { + prefix: Vec, + previous_key: Vec, + /// If true then value are removed while iterating + drain: bool, + /// Function that take `raw_key_without_prefix` and decode `T`. + /// `raw_key_without_prefix` is the raw storage key without the prefix iterated on. + closure: fn(&[u8]) -> Result, +} + +impl KeyPrefixIterator { + /// Creates a new `KeyPrefixIterator`, iterating after `previous_key` and filtering out keys + /// that are not prefixed with `prefix`. + /// + /// A `decode_fn` function must also be supplied, and it takes in a `&[u8]` parameter, returning + /// a `Result` containing the decoded key type `T` if successful, and a `codec::Error` on + /// failure. The `&[u8]` argument represents the raw, undecoded key without the prefix of the + /// current item. + pub fn new( + prefix: Vec, + previous_key: Vec, + decode_fn: fn(&[u8]) -> Result, + ) -> Self { + KeyPrefixIterator { prefix, previous_key, drain: false, closure: decode_fn } + } + + /// Get the last key that has been iterated upon and return it. + pub fn last_raw_key(&self) -> &[u8] { + &self.previous_key + } + + /// Get the prefix that is being iterated upon for this iterator and return it. + pub fn prefix(&self) -> &[u8] { + &self.prefix + } + + /// Set the key that the iterator should start iterating after. + pub fn set_last_raw_key(&mut self, previous_key: Vec) { + self.previous_key = previous_key; + } + + /// Mutate this iterator into a draining iterator; items iterated are removed from storage. + pub fn drain(mut self) -> Self { + self.drain = true; + self + } +} + +impl Iterator for KeyPrefixIterator { + type Item = T; + + fn next(&mut self) -> Option { + loop { + let maybe_next = sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix)); + + if let Some(next) = maybe_next { + self.previous_key = next; + if self.drain { + unhashed::kill(&self.previous_key); + } + let raw_key_without_prefix = &self.previous_key[self.prefix.len()..]; + + match (self.closure)(raw_key_without_prefix) { + Ok(item) => return Some(item), + Err(e) => { + log::error!("key failed to decode at {:?}: {:?}", self.previous_key, e); + continue + }, } + } + + return None + } + } +} + +/// Iterate over a prefix of a child trie and decode raw_key and raw_value into `T`. +/// +/// If any decoding fails it skips the key and continues to the next one. +pub struct ChildTriePrefixIterator { + /// The prefix iterated on + prefix: Vec, + /// child info for child trie + child_info: ChildInfo, + /// The last key iterated on + previous_key: Vec, + /// If true then values are removed while iterating + drain: bool, + /// Whether or not we should fetch the previous key + fetch_previous_key: bool, + /// Function that takes `(raw_key_without_prefix, raw_value)` and decode `T`. + /// `raw_key_without_prefix` is the raw storage key without the prefix iterated on. + closure: fn(&[u8], &[u8]) -> Result, +} + +impl ChildTriePrefixIterator { + /// Mutate this iterator into a draining iterator; items iterated are removed from storage. + pub fn drain(mut self) -> Self { + self.drain = true; + self + } +} + +impl ChildTriePrefixIterator<(Vec, T)> { + /// Construct iterator to iterate over child trie items in `child_info` with the prefix + /// `prefix`. + /// + /// NOTE: Iterator with [`Self::drain`] will remove any value who failed to decode + pub fn with_prefix(child_info: &ChildInfo, prefix: &[u8]) -> Self { + let prefix = prefix.to_vec(); + let previous_key = prefix.clone(); + let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { + let value = T::decode(&mut &raw_value[..])?; + Ok((raw_key_without_prefix.to_vec(), value)) + }; + + Self { + prefix, + child_info: child_info.clone(), + previous_key, + drain: false, + fetch_previous_key: true, + closure, + } + } +} + +impl ChildTriePrefixIterator<(K, T)> { + /// Construct iterator to iterate over child trie items in `child_info` with the prefix + /// `prefix`. + /// + /// NOTE: Iterator with [`Self::drain`] will remove any key or value who failed to decode + pub fn with_prefix_over_key( + child_info: &ChildInfo, + prefix: &[u8], + ) -> Self { + let prefix = prefix.to_vec(); + let previous_key = prefix.clone(); + let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { + let mut key_material = H::reverse(raw_key_without_prefix); + let key = K::decode(&mut key_material)?; + let value = T::decode(&mut &raw_value[..])?; + Ok((key, value)) + }; + + Self { + prefix, + child_info: child_info.clone(), + previous_key, + drain: false, + fetch_previous_key: true, + closure, + } + } +} + +impl Iterator for ChildTriePrefixIterator { + type Item = T; + + fn next(&mut self) -> Option { + loop { + let maybe_next = if self.fetch_previous_key { + self.fetch_previous_key = false; + Some(self.previous_key.clone()) + } else { + sp_io::default_child_storage::next_key( + &self.child_info.storage_key(), + &self.previous_key, + ) + .filter(|n| n.starts_with(&self.prefix)) + }; + break match maybe_next { + Some(next) => { + self.previous_key = next; + let raw_value = match child::get_raw(&self.child_info, &self.previous_key) { + Some(raw_value) => raw_value, + None => { + log::error!( + "next_key returned a key with no value at {:?}", + self.previous_key, + ); + continue + }, + }; + if self.drain { + child::kill(&self.child_info, &self.previous_key) + } + let raw_key_without_prefix = &self.previous_key[self.prefix.len()..]; + let item = match (self.closure)(raw_key_without_prefix, &raw_value[..]) { + Ok(item) => item, + Err(e) => { + log::error!( + "(key, value) failed to decode at {:?}: {:?}", + self.previous_key, + e, + ); + continue + }, + }; + + Some(item) + }, None => None, } } @@ -563,20 +1130,17 @@ pub trait StoragePrefixedMap { /// Final full prefix that prefixes all keys. fn final_prefix() -> [u8; 32] { - let mut final_key = [0u8; 32]; - final_key[0..16].copy_from_slice(&Twox128::hash(Self::module_prefix())); - final_key[16..32].copy_from_slice(&Twox128::hash(Self::storage_prefix())); - final_key + crate::storage::storage_prefix(Self::module_prefix(), Self::storage_prefix()) } /// Remove all value of the storage. - fn remove_all() { - sp_io::storage::clear_prefix(&Self::final_prefix()) + fn remove_all(limit: Option) -> sp_io::KillStorageResult { + sp_io::storage::clear_prefix(&Self::final_prefix(), limit) } /// Iter over all value of the storage. /// - /// NOTE: If a value failed to decode becaues storage is corrupted then it is skipped. + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. fn iter_values() -> PrefixIterator { let prefix = Self::final_prefix(); PrefixIterator { @@ -584,6 +1148,7 @@ pub trait StoragePrefixedMap { previous_key: prefix.to_vec(), drain: false, closure: |_raw_key, mut raw_value| Value::decode(&mut raw_value), + phantom: Default::default(), } } @@ -600,11 +1165,11 @@ pub trait StoragePrefixedMap { /// # Usage /// /// This would typically be called inside the module implementation of on_runtime_upgrade. - fn translate_values Option>(f: F) { + fn translate_values Option>(mut f: F) { let prefix = Self::final_prefix(); let mut previous_key = prefix.clone().to_vec(); - while let Some(next) = sp_io::storage::next_key(&previous_key) - .filter(|n| n.starts_with(&prefix)) + while let Some(next) = + sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { previous_key = next; let maybe_value = unhashed::get::(&previous_key); @@ -614,10 +1179,7 @@ pub trait StoragePrefixedMap { None => unhashed::kill(&previous_key), }, None => { - crate::debug::error!( - "old key failed to decode at {:?}", - previous_key - ); + log::error!("old key failed to decode at {:?}", previous_key); continue }, } @@ -652,30 +1214,197 @@ pub trait StorageDecodeLength: private::Sealed + codec::DecodeLength { } /// Provides `Sealed` trait to prevent implementing trait `StorageAppend` & `StorageDecodeLength` -/// outside of this crate. +/// & `EncodeLikeTuple` outside of this crate. mod private { use super::*; + use bounded_vec::BoundedVec; + use weak_bounded_vec::WeakBoundedVec; pub trait Sealed {} impl Sealed for Vec {} impl Sealed for Digest {} + impl Sealed for BoundedVec {} + impl Sealed for WeakBoundedVec {} + impl Sealed for bounded_btree_map::BoundedBTreeMap {} + impl Sealed for bounded_btree_set::BoundedBTreeSet {} + + macro_rules! impl_sealed_for_tuple { + ($($elem:ident),+) => { + paste::paste! { + impl<$($elem: Encode,)+> Sealed for ($($elem,)+) {} + impl<$($elem: Encode,)+> Sealed for &($($elem,)+) {} + } + }; + } + + impl_sealed_for_tuple!(A); + impl_sealed_for_tuple!(A, B); + impl_sealed_for_tuple!(A, B, C); + impl_sealed_for_tuple!(A, B, C, D); + impl_sealed_for_tuple!(A, B, C, D, E); + impl_sealed_for_tuple!(A, B, C, D, E, F); + impl_sealed_for_tuple!(A, B, C, D, E, F, G); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H, I); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H, I, J); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H, I, J, K); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, O); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, O, P); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, O, P, Q); + impl_sealed_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, O, P, Q, R); } impl StorageAppend for Vec {} impl StorageDecodeLength for Vec {} -/// We abuse the fact that SCALE does not put any marker into the encoding, i.e. -/// we only encode the internal vec and we can append to this vec. We have a test that ensures -/// that if the `Digest` format ever changes, we need to remove this here. +/// We abuse the fact that SCALE does not put any marker into the encoding, i.e. we only encode the +/// internal vec and we can append to this vec. We have a test that ensures that if the `Digest` +/// format ever changes, we need to remove this here. impl StorageAppend> for Digest {} +/// Marker trait that is implemented for types that support the `storage::append` api with a limit +/// on the number of element. +/// +/// This trait is sealed. +pub trait StorageTryAppend: StorageDecodeLength + private::Sealed { + fn bound() -> usize; +} + +/// Storage value that is capable of [`StorageTryAppend`](crate::storage::StorageTryAppend). +pub trait TryAppendValue, I: Encode> { + /// Try and append the `item` into the storage item. + /// + /// This might fail if bounds are not respected. + fn try_append>(item: LikeI) -> Result<(), ()>; +} + +impl TryAppendValue for StorageValueT +where + I: Encode, + T: FullCodec + StorageTryAppend, + StorageValueT: generator::StorageValue, +{ + fn try_append>(item: LikeI) -> Result<(), ()> { + let bound = T::bound(); + let current = Self::decode_len().unwrap_or_default(); + if current < bound { + // NOTE: we cannot reuse the implementation for `Vec` here because we never want to + // mark `BoundedVec` as `StorageAppend`. + let key = Self::storage_value_final_key(); + sp_io::storage::append(&key, item.encode()); + Ok(()) + } else { + Err(()) + } + } +} + +/// Storage map that is capable of [`StorageTryAppend`](crate::storage::StorageTryAppend). +pub trait TryAppendMap, I: Encode> { + /// Try and append the `item` into the storage map at the given `key`. + /// + /// This might fail if bounds are not respected. + fn try_append + Clone, LikeI: EncodeLike>( + key: LikeK, + item: LikeI, + ) -> Result<(), ()>; +} + +impl TryAppendMap for StorageMapT +where + K: FullCodec, + T: FullCodec + StorageTryAppend, + I: Encode, + StorageMapT: generator::StorageMap, +{ + fn try_append + Clone, LikeI: EncodeLike>( + key: LikeK, + item: LikeI, + ) -> Result<(), ()> { + let bound = T::bound(); + let current = Self::decode_len(key.clone()).unwrap_or_default(); + if current < bound { + let key = Self::storage_map_final_key(key); + sp_io::storage::append(&key, item.encode()); + Ok(()) + } else { + Err(()) + } + } +} + +/// Storage double map that is capable of [`StorageTryAppend`](crate::storage::StorageTryAppend). +pub trait TryAppendDoubleMap, I: Encode> { + /// Try and append the `item` into the storage double map at the given `key`. + /// + /// This might fail if bounds are not respected. + fn try_append< + LikeK1: EncodeLike + Clone, + LikeK2: EncodeLike + Clone, + LikeI: EncodeLike, + >( + key1: LikeK1, + key2: LikeK2, + item: LikeI, + ) -> Result<(), ()>; +} + +impl TryAppendDoubleMap for StorageDoubleMapT +where + K1: FullCodec, + K2: FullCodec, + T: FullCodec + StorageTryAppend, + I: Encode, + StorageDoubleMapT: generator::StorageDoubleMap, +{ + fn try_append< + LikeK1: EncodeLike + Clone, + LikeK2: EncodeLike + Clone, + LikeI: EncodeLike, + >( + key1: LikeK1, + key2: LikeK2, + item: LikeI, + ) -> Result<(), ()> { + let bound = T::bound(); + let current = Self::decode_len(key1.clone(), key2.clone()).unwrap_or_default(); + if current < bound { + let double_map_key = Self::storage_double_map_final_key(key1, key2); + sp_io::storage::append(&double_map_key, item.encode()); + Ok(()) + } else { + Err(()) + } + } +} + +/// Returns the storage prefix for a specific pallet name and storage name. +/// +/// The storage prefix is `concat(twox_128(pallet_name), twox_128(storage_name))`. +pub fn storage_prefix(pallet_name: &[u8], storage_name: &[u8]) -> [u8; 32] { + let pallet_hash = sp_io::hashing::twox_128(pallet_name); + let storage_hash = sp_io::hashing::twox_128(storage_name); + + let mut final_key = [0u8; 32]; + final_key[..16].copy_from_slice(&pallet_hash); + final_key[16..].copy_from_slice(&storage_hash); + + final_key +} + #[cfg(test)] mod test { use super::*; + use crate::{assert_ok, hash::Identity, Twox128}; + use bounded_vec::BoundedVec; + use core::convert::{TryFrom, TryInto}; + use generator::StorageValue as _; use sp_core::hashing::twox_128; use sp_io::TestExternalities; - use generator::StorageValue as _; + use weak_bounded_vec::WeakBoundedVec; #[test] fn prefixed_map_works() { @@ -721,7 +1450,7 @@ mod test { assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3, 4]); // test removal - MyStorage::remove_all(); + MyStorage::remove_all(None); assert!(MyStorage::iter_values().collect::>().is_empty()); // test migration @@ -731,7 +1460,7 @@ mod test { assert!(MyStorage::iter_values().collect::>().is_empty()); MyStorage::translate_values(|v: u32| Some(v as u64)); assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2]); - MyStorage::remove_all(); + MyStorage::remove_all(None); // test migration 2 unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u128); @@ -743,7 +1472,7 @@ mod test { assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); MyStorage::translate_values(|v: u128| Some(v as u64)); assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); - MyStorage::remove_all(); + MyStorage::remove_all(None); // test that other values are not modified. assert_eq!(unhashed::get(&key_before[..]), Some(32u64)); @@ -810,4 +1539,254 @@ mod test { }); }); } + + #[test] + fn key_prefix_iterator_works() { + TestExternalities::default().execute_with(|| { + use crate::{hash::Twox64Concat, storage::generator::StorageMap}; + struct MyStorageMap; + impl StorageMap for MyStorageMap { + type Query = u64; + type Hasher = Twox64Concat; + + fn module_prefix() -> &'static [u8] { + b"MyModule" + } + + fn storage_prefix() -> &'static [u8] { + b"MyStorageMap" + } + + fn from_optional_value_to_query(v: Option) -> Self::Query { + v.unwrap_or_default() + } + + fn from_query_to_optional_value(v: Self::Query) -> Option { + Some(v) + } + } + + let k = [twox_128(b"MyModule"), twox_128(b"MyStorageMap")].concat(); + assert_eq!(MyStorageMap::prefix_hash().to_vec(), k); + + // empty to start + assert!(MyStorageMap::iter_keys().collect::>().is_empty()); + + MyStorageMap::insert(1, 10); + MyStorageMap::insert(2, 20); + MyStorageMap::insert(3, 30); + MyStorageMap::insert(4, 40); + + // just looking + let mut keys = MyStorageMap::iter_keys().collect::>(); + keys.sort(); + assert_eq!(keys, vec![1, 2, 3, 4]); + + // draining the keys and values + let mut drained_keys = MyStorageMap::iter_keys().drain().collect::>(); + drained_keys.sort(); + assert_eq!(drained_keys, vec![1, 2, 3, 4]); + + // empty again + assert!(MyStorageMap::iter_keys().collect::>().is_empty()); + }); + } + + #[test] + fn prefix_iterator_pagination_works() { + TestExternalities::default().execute_with(|| { + use crate::{hash::Identity, storage::generator::map::StorageMap}; + crate::generate_storage_alias! { + MyModule, + MyStorageMap => Map<(u64, Identity), u64> + } + + MyStorageMap::insert(1, 10); + MyStorageMap::insert(2, 20); + MyStorageMap::insert(3, 30); + MyStorageMap::insert(4, 40); + MyStorageMap::insert(5, 50); + MyStorageMap::insert(6, 60); + MyStorageMap::insert(7, 70); + MyStorageMap::insert(8, 80); + MyStorageMap::insert(9, 90); + MyStorageMap::insert(10, 100); + + let op = |(_, v)| v / 10; + let mut final_vec = vec![]; + let mut iter = MyStorageMap::iter(); + + let elem = iter.next().unwrap(); + assert_eq!(elem, (1, 10)); + final_vec.push(op(elem)); + + let elem = iter.next().unwrap(); + assert_eq!(elem, (2, 20)); + final_vec.push(op(elem)); + + let stored_key = iter.last_raw_key().to_owned(); + assert_eq!(stored_key, MyStorageMap::storage_map_final_key(2)); + + let mut iter = MyStorageMap::iter_from(stored_key.clone()); + + final_vec.push(op(iter.next().unwrap())); + final_vec.push(op(iter.next().unwrap())); + final_vec.push(op(iter.next().unwrap())); + + assert_eq!(final_vec, vec![1, 2, 3, 4, 5]); + + let mut iter = PrefixIterator::<_>::new( + iter.prefix().to_vec(), + stored_key, + |mut raw_key_without_prefix, mut raw_value| { + let key = u64::decode(&mut raw_key_without_prefix)?; + Ok((key, u64::decode(&mut raw_value)?)) + }, + ); + let previous_key = MyStorageMap::storage_map_final_key(5); + iter.set_last_raw_key(previous_key); + + let remaining = iter.map(op).collect::>(); + assert_eq!(remaining.len(), 5); + assert_eq!(remaining, vec![6, 7, 8, 9, 10]); + + final_vec.extend_from_slice(&remaining); + + assert_eq!(final_vec, vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + }); + } + + #[test] + fn child_trie_prefixed_map_works() { + TestExternalities::default().execute_with(|| { + let child_info_a = child::ChildInfo::new_default(b"a"); + child::put(&child_info_a, &[1, 2, 3], &8u16); + child::put(&child_info_a, &[2], &8u16); + child::put(&child_info_a, &[2, 1, 3], &8u8); + child::put(&child_info_a, &[2, 2, 3], &8u16); + child::put(&child_info_a, &[3], &8u16); + + assert_eq!( + ChildTriePrefixIterator::with_prefix(&child_info_a, &[2]) + .collect::, u16)>>(), + vec![(vec![], 8), (vec![2, 3], 8),], + ); + + assert_eq!( + ChildTriePrefixIterator::with_prefix(&child_info_a, &[2]) + .drain() + .collect::, u16)>>(), + vec![(vec![], 8), (vec![2, 3], 8),], + ); + + // The only remaining is the ones outside prefix + assert_eq!( + ChildTriePrefixIterator::with_prefix(&child_info_a, &[]) + .collect::, u8)>>(), + vec![(vec![1, 2, 3], 8), (vec![3], 8),], + ); + + child::put(&child_info_a, &[1, 2, 3], &8u16); + child::put(&child_info_a, &[2], &8u16); + child::put(&child_info_a, &[2, 1, 3], &8u8); + child::put(&child_info_a, &[2, 2, 3], &8u16); + child::put(&child_info_a, &[3], &8u16); + + assert_eq!( + ChildTriePrefixIterator::with_prefix_over_key::(&child_info_a, &[2]) + .collect::>(), + vec![(u16::decode(&mut &[2, 3][..]).unwrap(), 8),], + ); + + assert_eq!( + ChildTriePrefixIterator::with_prefix_over_key::(&child_info_a, &[2]) + .drain() + .collect::>(), + vec![(u16::decode(&mut &[2, 3][..]).unwrap(), 8),], + ); + + // The only remaining is the ones outside prefix + assert_eq!( + ChildTriePrefixIterator::with_prefix(&child_info_a, &[]) + .collect::, u8)>>(), + vec![(vec![1, 2, 3], 8), (vec![3], 8),], + ); + }); + } + + crate::parameter_types! { + pub const Seven: u32 = 7; + pub const Four: u32 = 4; + } + + crate::generate_storage_alias! { Prefix, Foo => Value> } + crate::generate_storage_alias! { Prefix, FooMap => Map<(u32, Twox128), BoundedVec> } + crate::generate_storage_alias! { + Prefix, + FooDoubleMap => DoubleMap<(u32, Twox128), (u32, Twox128), BoundedVec> + } + + #[test] + fn try_append_works() { + TestExternalities::default().execute_with(|| { + let bounded: WeakBoundedVec = vec![1, 2, 3].try_into().unwrap(); + Foo::put(bounded); + assert_ok!(Foo::try_append(4)); + assert_ok!(Foo::try_append(5)); + assert_ok!(Foo::try_append(6)); + assert_ok!(Foo::try_append(7)); + assert_eq!(Foo::decode_len().unwrap(), 7); + assert!(Foo::try_append(8).is_err()); + }); + + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooMap::insert(1, bounded); + + assert_ok!(FooMap::try_append(1, 4)); + assert_ok!(FooMap::try_append(1, 5)); + assert_ok!(FooMap::try_append(1, 6)); + assert_ok!(FooMap::try_append(1, 7)); + assert_eq!(FooMap::decode_len(1).unwrap(), 7); + assert!(FooMap::try_append(1, 8).is_err()); + + // append to a non-existing + assert!(FooMap::get(2).is_none()); + assert_ok!(FooMap::try_append(2, 4)); + assert_eq!( + FooMap::get(2).unwrap(), + BoundedVec::::try_from(vec![4]).unwrap(), + ); + assert_ok!(FooMap::try_append(2, 5)); + assert_eq!( + FooMap::get(2).unwrap(), + BoundedVec::::try_from(vec![4, 5]).unwrap(), + ); + }); + + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooDoubleMap::insert(1, 1, bounded); + + assert_ok!(FooDoubleMap::try_append(1, 1, 4)); + assert_ok!(FooDoubleMap::try_append(1, 1, 5)); + assert_ok!(FooDoubleMap::try_append(1, 1, 6)); + assert_ok!(FooDoubleMap::try_append(1, 1, 7)); + assert_eq!(FooDoubleMap::decode_len(1, 1).unwrap(), 7); + assert!(FooDoubleMap::try_append(1, 1, 8).is_err()); + + // append to a non-existing + assert!(FooDoubleMap::get(2, 1).is_none()); + assert_ok!(FooDoubleMap::try_append(2, 1, 4)); + assert_eq!( + FooDoubleMap::get(2, 1).unwrap(), + BoundedVec::::try_from(vec![4]).unwrap(), + ); + assert_ok!(FooDoubleMap::try_append(2, 1, 5)); + assert_eq!( + FooDoubleMap::get(2, 1).unwrap(), + BoundedVec::::try_from(vec![4, 5]).unwrap(), + ); + }); + } } diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs new file mode 100644 index 0000000000000..0860a4ed541c6 --- /dev/null +++ b/frame/support/src/storage/types/counted_map.rs @@ -0,0 +1,1040 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage counted map type. + +use crate::{ + metadata::StorageEntryMetadata, + storage::{ + generator::StorageMap as _, + types::{ + OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder, StorageMap, StorageValue, + ValueQuery, + }, + StorageAppend, StorageDecodeLength, StorageTryAppend, + }, + traits::{Get, GetDefault, StorageInfo, StorageInfoTrait, StorageInstance}, + Never, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen, Ref}; +use sp_runtime::traits::Saturating; +use sp_std::prelude::*; + +/// A wrapper around a `StorageMap` and a `StorageValue` to keep track of how many items +/// are in a map, without needing to iterate all the values. +/// +/// This storage item has additional storage read and write overhead when manipulating values +/// compared to a regular storage map. +/// +/// For functions where we only add or remove a value, a single storage read is needed to check if +/// that value already exists. For mutate functions, two storage reads are used to check if the +/// value existed before and after the mutation. +/// +/// Whenever the counter needs to be updated, an additional read and write occurs to update that +/// counter. +pub struct CountedStorageMap< + Prefix, + Hasher, + Key, + Value, + QueryKind = OptionQuery, + OnEmpty = GetDefault, + MaxValues = GetDefault, +>(core::marker::PhantomData<(Prefix, Hasher, Key, Value, QueryKind, OnEmpty, MaxValues)>); + +/// The requirement for an instance of [`CountedStorageMap`]. +pub trait CountedStorageMapInstance: StorageInstance { + /// The prefix to use for the counter storage value. + type CounterPrefix: StorageInstance; +} + +// Private helper trait to access map from counted storage map. +trait MapWrapper { + type Map; +} + +impl MapWrapper + for CountedStorageMap +{ + type Map = StorageMap; +} + +type CounterFor

= StorageValue<

::CounterPrefix, u32, ValueQuery>; + +/// On removal logic for updating counter while draining upon some prefix with +/// [`crate::storage::PrefixIterator`]. +pub struct OnRemovalCounterUpdate(core::marker::PhantomData); + +impl crate::storage::PrefixIteratorOnRemoval + for OnRemovalCounterUpdate +{ + fn on_removal(_key: &[u8], _value: &[u8]) { + CounterFor::::mutate(|value| value.saturating_dec()); + } +} + +impl + CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Get the storage key used to fetch a value corresponding to a specific key. + pub fn hashed_key_for>(key: KeyArg) -> Vec { + ::Map::hashed_key_for(key) + } + + /// Does the value (explicitly) exist in storage? + pub fn contains_key>(key: KeyArg) -> bool { + ::Map::contains_key(key) + } + + /// Load the value associated with the given key from the map. + pub fn get>(key: KeyArg) -> QueryKind::Query { + ::Map::get(key) + } + + /// Try to get the value for the given key from the map. + /// + /// Returns `Ok` if it exists, `Err` if not. + pub fn try_get>(key: KeyArg) -> Result { + ::Map::try_get(key) + } + + /// Swap the values of two keys. + pub fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2) { + ::Map::swap(key1, key2) + } + + /// Store a value to be associated with the given key from the map. + pub fn insert + Clone, ValArg: EncodeLike>( + key: KeyArg, + val: ValArg, + ) { + if !::Map::contains_key(Ref::from(&key)) { + CounterFor::::mutate(|value| value.saturating_inc()); + } + ::Map::insert(key, val) + } + + /// Remove the value under a key. + pub fn remove + Clone>(key: KeyArg) { + if ::Map::contains_key(Ref::from(&key)) { + CounterFor::::mutate(|value| value.saturating_dec()); + } + ::Map::remove(key) + } + + /// Mutate the value under a key. + pub fn mutate + Clone, R, F: FnOnce(&mut QueryKind::Query) -> R>( + key: KeyArg, + f: F, + ) -> R { + Self::try_mutate(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") + } + + /// Mutate the item, only if an `Ok` value is returned. + pub fn try_mutate(key: KeyArg, f: F) -> Result + where + KeyArg: EncodeLike + Clone, + F: FnOnce(&mut QueryKind::Query) -> Result, + { + Self::try_mutate_exists(key, |option_value_ref| { + let option_value = core::mem::replace(option_value_ref, None); + let mut query = ::Map::from_optional_value_to_query(option_value); + let res = f(&mut query); + let option_value = ::Map::from_query_to_optional_value(query); + let _ = core::mem::replace(option_value_ref, option_value); + res + }) + } + + /// Mutate the value under a key. Deletes the item if mutated to a `None`. + pub fn mutate_exists + Clone, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F, + ) -> R { + Self::try_mutate_exists(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") + } + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + pub fn try_mutate_exists(key: KeyArg, f: F) -> Result + where + KeyArg: EncodeLike + Clone, + F: FnOnce(&mut Option) -> Result, + { + ::Map::try_mutate_exists(key, |option_value| { + let existed = option_value.is_some(); + let res = f(option_value); + let exist = option_value.is_some(); + + if res.is_ok() { + if existed && !exist { + // Value was deleted + CounterFor::::mutate(|value| value.saturating_dec()); + } else if !existed && exist { + // Value was added + CounterFor::::mutate(|value| value.saturating_inc()); + } + } + res + }) + } + + /// Take the value under a key. + pub fn take + Clone>(key: KeyArg) -> QueryKind::Query { + let removed_value = + ::Map::mutate_exists(key, |value| core::mem::replace(value, None)); + if removed_value.is_some() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + ::Map::from_optional_value_to_query(removed_value) + } + + /// Append the given items to the value in the storage. + /// + /// `Value` is required to implement `codec::EncodeAppend`. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage will be overwritten and set to + /// `[item]`. Any default value set for the storage item will be ignored on overwrite. + pub fn append(key: EncodeLikeKey, item: EncodeLikeItem) + where + EncodeLikeKey: EncodeLike + Clone, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend, + { + if !::Map::contains_key(Ref::from(&key)) { + CounterFor::::mutate(|value| value.saturating_inc()); + } + ::Map::append(key, item) + } + + /// Read the length of the storage value without decoding the entire value under the given + /// `key`. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. Otherwise + /// `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len>(key: KeyArg) -> Option + where + Value: StorageDecodeLength, + { + ::Map::decode_len(key) + } + + /// Migrate an item with the given `key` from a defunct `OldHasher` to the current hasher. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + pub fn migrate_key>( + key: KeyArg, + ) -> Option { + ::Map::migrate_key::(key) + } + + /// Remove all value of the storage. + pub fn remove_all() { + CounterFor::::set(0u32); + ::Map::remove_all(None); + } + + /// Iter over all value of the storage. + /// + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. + pub fn iter_values() -> crate::storage::PrefixIterator> { + let map_iterator = ::Map::iter_values(); + crate::storage::PrefixIterator { + prefix: map_iterator.prefix, + previous_key: map_iterator.previous_key, + drain: map_iterator.drain, + closure: map_iterator.closure, + phantom: Default::default(), + } + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade. + pub fn translate_values Option>(mut f: F) { + ::Map::translate_values(|old_value| { + let res = f(old_value); + if res.is_none() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + res + }) + } + + /// Try and append the given item to the value in the storage. + /// + /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. + pub fn try_append(key: KArg, item: EncodeLikeItem) -> Result<(), ()> + where + KArg: EncodeLike + Clone, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageTryAppend, + { + let bound = Value::bound(); + let current = ::Map::decode_len(Ref::from(&key)).unwrap_or_default(); + if current < bound { + CounterFor::::mutate(|value| value.saturating_inc()); + let key = ::Map::hashed_key_for(key); + sp_io::storage::append(&key, item.encode()); + Ok(()) + } else { + Err(()) + } + } + + /// Initialize the counter with the actual number of items in the map. + /// + /// This function iterates through all the items in the map and sets the counter. This operation + /// can be very heavy, so use with caution. + /// + /// Returns the number of items in the map which is used to set the counter. + pub fn initialize_counter() -> u32 { + let count = Self::iter_values().count() as u32; + CounterFor::::set(count); + count + } + + /// Return the count. + pub fn count() -> u32 { + CounterFor::::get() + } +} + +impl + CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher + crate::ReversibleStorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Enumerate all elements in the map in no particular order. + /// + /// If you alter the map while doing this, you'll get undefined results. + pub fn iter() -> crate::storage::PrefixIterator<(Key, Value), OnRemovalCounterUpdate> { + let map_iterator = ::Map::iter(); + crate::storage::PrefixIterator { + prefix: map_iterator.prefix, + previous_key: map_iterator.previous_key, + drain: map_iterator.drain, + closure: map_iterator.closure, + phantom: Default::default(), + } + } + + /// Remove all elements from the map and iterate through them in no particular order. + /// + /// If you add elements to the map while doing this, you'll get undefined results. + pub fn drain() -> crate::storage::PrefixIterator<(Key, Value), OnRemovalCounterUpdate> { + let map_iterator = ::Map::drain(); + crate::storage::PrefixIterator { + prefix: map_iterator.prefix, + previous_key: map_iterator.previous_key, + drain: map_iterator.drain, + closure: map_iterator.closure, + phantom: Default::default(), + } + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + pub fn translate Option>(mut f: F) { + ::Map::translate(|key, old_value| { + let res = f(key, old_value); + if res.is_none() { + CounterFor::::mutate(|value| value.saturating_dec()); + } + res + }) + } +} + +impl StorageEntryMetadataBuilder + for CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec + scale_info::StaticTypeInfo, + Value: FullCodec + scale_info::StaticTypeInfo, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + ::Map::build_metadata(docs, entries); + CounterFor::::build_metadata( + vec![&"Counter for the related counted storage map"], + entries, + ); + } +} + +impl crate::traits::StorageInfoTrait + for CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec + MaxEncodedLen, + Value: FullCodec + MaxEncodedLen, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn storage_info() -> Vec { + [::Map::storage_info(), CounterFor::::storage_info()].concat() + } +} + +/// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. +impl + crate::traits::PartialStorageInfoTrait + for CountedStorageMap +where + Prefix: CountedStorageMapInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn partial_storage_info() -> Vec { + [::Map::partial_storage_info(), CounterFor::::storage_info()] + .concat() + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + hash::*, + metadata::{StorageEntryModifier, StorageEntryType, StorageHasher}, + storage::{bounded_vec::BoundedVec, types::ValueQuery}, + traits::ConstU32, + }; + use sp_io::{hashing::twox_128, TestExternalities}; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct CounterPrefix; + impl StorageInstance for CounterPrefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "counter_for_foo"; + } + impl CountedStorageMapInstance for Prefix { + type CounterPrefix = CounterPrefix; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 97 + } + } + + #[test] + fn test_value_query() { + type A = CountedStorageMap; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"foo")); + k.extend(&3u16.twox_64_concat()); + assert_eq!(A::hashed_key_for(3).to_vec(), k); + + assert_eq!(A::contains_key(3), false); + assert_eq!(A::get(3), ADefault::get()); + assert_eq!(A::try_get(3), Err(())); + assert_eq!(A::count(), 0); + + // Insert non-existing. + A::insert(3, 10); + + assert_eq!(A::contains_key(3), true); + assert_eq!(A::get(3), 10); + assert_eq!(A::try_get(3), Ok(10)); + assert_eq!(A::count(), 1); + + // Swap non-existing with existing. + A::swap(4, 3); + + assert_eq!(A::contains_key(3), false); + assert_eq!(A::get(3), ADefault::get()); + assert_eq!(A::try_get(3), Err(())); + assert_eq!(A::contains_key(4), true); + assert_eq!(A::get(4), 10); + assert_eq!(A::try_get(4), Ok(10)); + assert_eq!(A::count(), 1); + + // Swap existing with non-existing. + A::swap(4, 3); + + assert_eq!(A::try_get(3), Ok(10)); + assert_eq!(A::contains_key(4), false); + assert_eq!(A::get(4), ADefault::get()); + assert_eq!(A::try_get(4), Err(())); + assert_eq!(A::count(), 1); + + A::insert(4, 11); + + assert_eq!(A::try_get(3), Ok(10)); + assert_eq!(A::try_get(4), Ok(11)); + assert_eq!(A::count(), 2); + + // Swap 2 existing. + A::swap(3, 4); + + assert_eq!(A::try_get(3), Ok(11)); + assert_eq!(A::try_get(4), Ok(10)); + assert_eq!(A::count(), 2); + + // Insert an existing key, shouldn't increment counted values. + A::insert(3, 11); + + assert_eq!(A::count(), 2); + + // Remove non-existing. + A::remove(2); + + assert_eq!(A::contains_key(2), false); + assert_eq!(A::count(), 2); + + // Remove existing. + A::remove(3); + + assert_eq!(A::try_get(3), Err(())); + assert_eq!(A::count(), 1); + + // Mutate non-existing to existing. + A::mutate(3, |query| { + assert_eq!(*query, ADefault::get()); + *query = 40; + }); + + assert_eq!(A::try_get(3), Ok(40)); + assert_eq!(A::count(), 2); + + // Mutate existing to existing. + A::mutate(3, |query| { + assert_eq!(*query, 40); + *query = 40; + }); + + assert_eq!(A::try_get(3), Ok(40)); + assert_eq!(A::count(), 2); + + // Try fail mutate non-existing to existing. + A::try_mutate(2, |query| { + assert_eq!(*query, ADefault::get()); + *query = 4; + Result::<(), ()>::Err(()) + }) + .err() + .unwrap(); + + assert_eq!(A::try_get(2), Err(())); + assert_eq!(A::count(), 2); + + // Try succeed mutate non-existing to existing. + A::try_mutate(2, |query| { + assert_eq!(*query, ADefault::get()); + *query = 41; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(2), Ok(41)); + assert_eq!(A::count(), 3); + + // Try succeed mutate existing to existing. + A::try_mutate(2, |query| { + assert_eq!(*query, 41); + *query = 41; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(2), Ok(41)); + assert_eq!(A::count(), 3); + + // Try fail mutate non-existing to existing. + A::try_mutate_exists(1, |query| { + assert_eq!(*query, None); + *query = Some(4); + Result::<(), ()>::Err(()) + }) + .err() + .unwrap(); + + assert_eq!(A::try_get(1), Err(())); + assert_eq!(A::count(), 3); + + // Try succeed mutate non-existing to existing. + A::try_mutate_exists(1, |query| { + assert_eq!(*query, None); + *query = Some(43); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(1), Ok(43)); + assert_eq!(A::count(), 4); + + // Try succeed mutate existing to existing. + A::try_mutate_exists(1, |query| { + assert_eq!(*query, Some(43)); + *query = Some(43); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(1), Ok(43)); + assert_eq!(A::count(), 4); + + // Try succeed mutate existing to non-existing. + A::try_mutate_exists(1, |query| { + assert_eq!(*query, Some(43)); + *query = None; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(A::try_get(1), Err(())); + assert_eq!(A::count(), 3); + + // Take exsisting. + assert_eq!(A::take(4), 10); + + assert_eq!(A::try_get(4), Err(())); + assert_eq!(A::count(), 2); + + // Take non-exsisting. + assert_eq!(A::take(4), ADefault::get()); + + assert_eq!(A::try_get(4), Err(())); + assert_eq!(A::count(), 2); + + // Remove all. + A::remove_all(); + + assert_eq!(A::count(), 0); + assert_eq!(A::initialize_counter(), 0); + + A::insert(1, 1); + A::insert(2, 2); + + // Iter values. + assert_eq!(A::iter_values().collect::>(), vec![2, 1]); + + // Iter drain values. + assert_eq!(A::iter_values().drain().collect::>(), vec![2, 1]); + assert_eq!(A::count(), 0); + + A::insert(1, 1); + A::insert(2, 2); + + // Test initialize_counter. + assert_eq!(A::initialize_counter(), 2); + }) + } + + #[test] + fn test_option_query() { + type B = CountedStorageMap; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"foo")); + k.extend(&3u16.twox_64_concat()); + assert_eq!(B::hashed_key_for(3).to_vec(), k); + + assert_eq!(B::contains_key(3), false); + assert_eq!(B::get(3), None); + assert_eq!(B::try_get(3), Err(())); + assert_eq!(B::count(), 0); + + // Insert non-existing. + B::insert(3, 10); + + assert_eq!(B::contains_key(3), true); + assert_eq!(B::get(3), Some(10)); + assert_eq!(B::try_get(3), Ok(10)); + assert_eq!(B::count(), 1); + + // Swap non-existing with existing. + B::swap(4, 3); + + assert_eq!(B::contains_key(3), false); + assert_eq!(B::get(3), None); + assert_eq!(B::try_get(3), Err(())); + assert_eq!(B::contains_key(4), true); + assert_eq!(B::get(4), Some(10)); + assert_eq!(B::try_get(4), Ok(10)); + assert_eq!(B::count(), 1); + + // Swap existing with non-existing. + B::swap(4, 3); + + assert_eq!(B::try_get(3), Ok(10)); + assert_eq!(B::contains_key(4), false); + assert_eq!(B::get(4), None); + assert_eq!(B::try_get(4), Err(())); + assert_eq!(B::count(), 1); + + B::insert(4, 11); + + assert_eq!(B::try_get(3), Ok(10)); + assert_eq!(B::try_get(4), Ok(11)); + assert_eq!(B::count(), 2); + + // Swap 2 existing. + B::swap(3, 4); + + assert_eq!(B::try_get(3), Ok(11)); + assert_eq!(B::try_get(4), Ok(10)); + assert_eq!(B::count(), 2); + + // Insert an existing key, shouldn't increment counted values. + B::insert(3, 11); + + assert_eq!(B::count(), 2); + + // Remove non-existing. + B::remove(2); + + assert_eq!(B::contains_key(2), false); + assert_eq!(B::count(), 2); + + // Remove existing. + B::remove(3); + + assert_eq!(B::try_get(3), Err(())); + assert_eq!(B::count(), 1); + + // Mutate non-existing to existing. + B::mutate(3, |query| { + assert_eq!(*query, None); + *query = Some(40) + }); + + assert_eq!(B::try_get(3), Ok(40)); + assert_eq!(B::count(), 2); + + // Mutate existing to existing. + B::mutate(3, |query| { + assert_eq!(*query, Some(40)); + *query = Some(40) + }); + + assert_eq!(B::try_get(3), Ok(40)); + assert_eq!(B::count(), 2); + + // Mutate existing to non-existing. + B::mutate(3, |query| { + assert_eq!(*query, Some(40)); + *query = None + }); + + assert_eq!(B::try_get(3), Err(())); + assert_eq!(B::count(), 1); + + B::insert(3, 40); + + // Try fail mutate non-existing to existing. + B::try_mutate(2, |query| { + assert_eq!(*query, None); + *query = Some(4); + Result::<(), ()>::Err(()) + }) + .err() + .unwrap(); + + assert_eq!(B::try_get(2), Err(())); + assert_eq!(B::count(), 2); + + // Try succeed mutate non-existing to existing. + B::try_mutate(2, |query| { + assert_eq!(*query, None); + *query = Some(41); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(2), Ok(41)); + assert_eq!(B::count(), 3); + + // Try succeed mutate existing to existing. + B::try_mutate(2, |query| { + assert_eq!(*query, Some(41)); + *query = Some(41); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(2), Ok(41)); + assert_eq!(B::count(), 3); + + // Try succeed mutate existing to non-existing. + B::try_mutate(2, |query| { + assert_eq!(*query, Some(41)); + *query = None; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(2), Err(())); + assert_eq!(B::count(), 2); + + B::insert(2, 41); + + // Try fail mutate non-existing to existing. + B::try_mutate_exists(1, |query| { + assert_eq!(*query, None); + *query = Some(4); + Result::<(), ()>::Err(()) + }) + .err() + .unwrap(); + + assert_eq!(B::try_get(1), Err(())); + assert_eq!(B::count(), 3); + + // Try succeed mutate non-existing to existing. + B::try_mutate_exists(1, |query| { + assert_eq!(*query, None); + *query = Some(43); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(1), Ok(43)); + assert_eq!(B::count(), 4); + + // Try succeed mutate existing to existing. + B::try_mutate_exists(1, |query| { + assert_eq!(*query, Some(43)); + *query = Some(43); + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(1), Ok(43)); + assert_eq!(B::count(), 4); + + // Try succeed mutate existing to non-existing. + B::try_mutate_exists(1, |query| { + assert_eq!(*query, Some(43)); + *query = None; + Result::<(), ()>::Ok(()) + }) + .unwrap(); + + assert_eq!(B::try_get(1), Err(())); + assert_eq!(B::count(), 3); + + // Take exsisting. + assert_eq!(B::take(4), Some(10)); + + assert_eq!(B::try_get(4), Err(())); + assert_eq!(B::count(), 2); + + // Take non-exsisting. + assert_eq!(B::take(4), None); + + assert_eq!(B::try_get(4), Err(())); + assert_eq!(B::count(), 2); + + // Remove all. + B::remove_all(); + + assert_eq!(B::count(), 0); + assert_eq!(B::initialize_counter(), 0); + + B::insert(1, 1); + B::insert(2, 2); + + // Iter values. + assert_eq!(B::iter_values().collect::>(), vec![2, 1]); + + // Iter drain values. + assert_eq!(B::iter_values().drain().collect::>(), vec![2, 1]); + assert_eq!(B::count(), 0); + + B::insert(1, 1); + B::insert(2, 2); + + // Test initialize_counter. + assert_eq!(B::initialize_counter(), 2); + }) + } + + #[test] + fn append_decode_len_works() { + type B = CountedStorageMap>; + + TestExternalities::default().execute_with(|| { + assert_eq!(B::decode_len(0), None); + B::append(0, 3); + assert_eq!(B::decode_len(0), Some(1)); + B::append(0, 3); + assert_eq!(B::decode_len(0), Some(2)); + B::append(0, 3); + assert_eq!(B::decode_len(0), Some(3)); + }) + } + + #[test] + fn try_append_decode_len_works() { + type B = CountedStorageMap>>; + + TestExternalities::default().execute_with(|| { + assert_eq!(B::decode_len(0), None); + B::try_append(0, 3).unwrap(); + assert_eq!(B::decode_len(0), Some(1)); + B::try_append(0, 3).unwrap(); + assert_eq!(B::decode_len(0), Some(2)); + B::try_append(0, 3).unwrap(); + assert_eq!(B::decode_len(0), Some(3)); + B::try_append(0, 3).err().unwrap(); + assert_eq!(B::decode_len(0), Some(3)); + }) + } + + #[test] + fn migrate_keys_works() { + type A = CountedStorageMap; + type B = CountedStorageMap; + TestExternalities::default().execute_with(|| { + A::insert(1, 1); + assert_eq!(B::migrate_key::(1), Some(1)); + assert_eq!(B::get(1), Some(1)); + }) + } + + #[test] + fn translate_values() { + type A = CountedStorageMap; + TestExternalities::default().execute_with(|| { + A::insert(1, 1); + A::insert(2, 2); + A::translate_values::(|old_value| if old_value == 1 { None } else { Some(1) }); + assert_eq!(A::count(), 1); + assert_eq!(A::get(2), Some(1)); + }) + } + + #[test] + fn test_iter_drain_translate() { + type A = CountedStorageMap; + TestExternalities::default().execute_with(|| { + A::insert(1, 1); + A::insert(2, 2); + + assert_eq!(A::iter().collect::>(), vec![(2, 2), (1, 1)]); + + assert_eq!(A::count(), 2); + + A::translate::( + |key, value| if key == 1 { None } else { Some(key as u32 * value) }, + ); + + assert_eq!(A::count(), 1); + + assert_eq!(A::drain().collect::>(), vec![(2, 4)]); + + assert_eq!(A::count(), 0); + }) + } + + #[test] + fn test_metadata() { + type A = CountedStorageMap; + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Twox64Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: 97u32.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "counter_for_foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec!["Counter for the related counted storage map"], + }, + ] + ); + } +} diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs new file mode 100644 index 0000000000000..b9af4a621b92a --- /dev/null +++ b/frame/support/src/storage/types/double_map.rs @@ -0,0 +1,830 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage map type. Implements StorageDoubleMap, StorageIterableDoubleMap, +//! StoragePrefixedDoubleMap traits and their methods directly. + +use crate::{ + metadata::{StorageEntryMetadata, StorageEntryType}, + storage::{ + types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, + StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, + }, + traits::{Get, GetDefault, StorageInfo, StorageInstance}, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; +use sp_arithmetic::traits::SaturatedConversion; +use sp_std::prelude::*; + +/// A type that allow to store values for `(key1, key2)` couple. Similar to `StorageMap` but allow +/// to iterate and remove value associated to first key. +/// +/// Each value is stored at: +/// ```nocompile +/// Twox128(Prefix::pallet_prefix()) +/// ++ Twox128(Prefix::STORAGE_PREFIX) +/// ++ Hasher1(encode(key1)) +/// ++ Hasher2(encode(key2)) +/// ``` +/// +/// # Warning +/// +/// If the key1s (or key2s) are not trusted (e.g. can be set by a user), a cryptographic `hasher` +/// such as `blake2_128_concat` must be used for Hasher1 (resp. Hasher2). Otherwise, other values +/// in storage can be compromised. +pub struct StorageDoubleMap< + Prefix, + Hasher1, + Key1, + Hasher2, + Key2, + Value, + QueryKind = OptionQuery, + OnEmpty = GetDefault, + MaxValues = GetDefault, +>( + core::marker::PhantomData<( + Prefix, + Hasher1, + Key1, + Hasher2, + Key2, + Value, + QueryKind, + OnEmpty, + MaxValues, + )>, +); + +impl + crate::storage::generator::StorageDoubleMap + for StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + type Query = QueryKind::Query; + type Hasher1 = Hasher1; + type Hasher2 = Hasher2; + fn module_prefix() -> &'static [u8] { + Prefix::pallet_prefix().as_bytes() + } + fn storage_prefix() -> &'static [u8] { + Prefix::STORAGE_PREFIX.as_bytes() + } + fn from_optional_value_to_query(v: Option) -> Self::Query { + QueryKind::from_optional_value_to_query(v) + } + fn from_query_to_optional_value(v: Self::Query) -> Option { + QueryKind::from_query_to_optional_value(v) + } +} + +impl + StoragePrefixedMap + for StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn module_prefix() -> &'static [u8] { + >::module_prefix() + } + fn storage_prefix() -> &'static [u8] { + >::storage_prefix() + } +} + +impl + StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Get the storage key used to fetch a value corresponding to a specific key. + pub fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::hashed_key_for(k1, k2) + } + + /// Does the value (explicitly) exist in storage? + pub fn contains_key(k1: KArg1, k2: KArg2) -> bool + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::contains_key(k1, k2) + } + + /// Load the value associated with the given key from the double map. + pub fn get(k1: KArg1, k2: KArg2) -> QueryKind::Query + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::get(k1, k2) + } + + /// Try to get the value for the given key from the double map. + /// + /// Returns `Ok` if it exists, `Err` if not. + pub fn try_get(k1: KArg1, k2: KArg2) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::try_get(k1, k2) + } + + /// Take a value from storage, removing it afterwards. + pub fn take(k1: KArg1, k2: KArg2) -> QueryKind::Query + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::take(k1, k2) + } + + /// Swap the values of two key-pairs. + pub fn swap( + x_k1: XKArg1, + x_k2: XKArg2, + y_k1: YKArg1, + y_k2: YKArg2, + ) where + XKArg1: EncodeLike, + XKArg2: EncodeLike, + YKArg1: EncodeLike, + YKArg2: EncodeLike, + { + >::swap(x_k1, x_k2, y_k1, y_k2) + } + + /// Store a value to be associated with the given keys from the double map. + pub fn insert(k1: KArg1, k2: KArg2, val: VArg) + where + KArg1: EncodeLike, + KArg2: EncodeLike, + VArg: EncodeLike, + { + >::insert(k1, k2, val) + } + + /// Remove the value under the given keys. + pub fn remove(k1: KArg1, k2: KArg2) + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + >::remove(k1, k2) + } + + /// Remove all values under the first key. + pub fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult + where + KArg1: ?Sized + EncodeLike, + { + >::remove_prefix(k1, limit) + } + + /// Iterate over values that share the first key. + pub fn iter_prefix_values(k1: KArg1) -> crate::storage::PrefixIterator + where + KArg1: ?Sized + EncodeLike, + { + >::iter_prefix_values(k1) + } + + /// Mutate the value under the given keys. + pub fn mutate(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut QueryKind::Query) -> R, + { + >::mutate(k1, k2, f) + } + + /// Mutate the value under the given keys when the closure returns `Ok`. + pub fn try_mutate(k1: KArg1, k2: KArg2, f: F) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut QueryKind::Query) -> Result, + { + >::try_mutate(k1, k2, f) + } + + /// Mutate the value under the given keys. Deletes the item if mutated to a `None`. + pub fn mutate_exists(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut Option) -> R, + { + >::mutate_exists(k1, k2, f) + } + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + pub fn try_mutate_exists(k1: KArg1, k2: KArg2, f: F) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut Option) -> Result, + { + >::try_mutate_exists(k1, k2, f) + } + + /// Append the given item to the value in the storage. + /// + /// `Value` is required to implement [`StorageAppend`]. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage will be overwritten + /// and set to `[item]`. Any default value set for the storage item will be ignored + /// on overwrite. + pub fn append(k1: KArg1, k2: KArg2, item: EncodeLikeItem) + where + KArg1: EncodeLike, + KArg2: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend, + { + >::append(k1, k2, item) + } + + /// Read the length of the storage value without decoding the entire value under the + /// given `key1` and `key2`. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len(key1: KArg1, key2: KArg2) -> Option + where + KArg1: EncodeLike, + KArg2: EncodeLike, + Value: StorageDecodeLength, + { + >::decode_len(key1, key2) + } + + /// Migrate an item with the given `key1` and `key2` from defunct `OldHasher1` and + /// `OldHasher2` to the current hashers. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + pub fn migrate_keys< + OldHasher1: crate::StorageHasher, + OldHasher2: crate::StorageHasher, + KeyArg1: EncodeLike, + KeyArg2: EncodeLike, + >( + key1: KeyArg1, + key2: KeyArg2, + ) -> Option { + >::migrate_keys::< + OldHasher1, + OldHasher2, + _, + _, + >(key1, key2) + } + + /// Remove all value of the storage. + pub fn remove_all(limit: Option) -> sp_io::KillStorageResult { + >::remove_all(limit) + } + + /// Iter over all value of the storage. + /// + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. + pub fn iter_values() -> crate::storage::PrefixIterator { + >::iter_values() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade. + pub fn translate_values Option>(f: F) { + >::translate_values(f) + } + + /// Try and append the given item to the value in the storage. + /// + /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. + pub fn try_append( + key1: KArg1, + key2: KArg2, + item: EncodeLikeItem, + ) -> Result<(), ()> + where + KArg1: EncodeLike + Clone, + KArg2: EncodeLike + Clone, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageTryAppend, + { + >::try_append( + key1, key2, item, + ) + } +} + +impl + StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher + crate::ReversibleStorageHasher, + Hasher2: crate::hash::StorageHasher + crate::ReversibleStorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Enumerate all elements in the map with first key `k1` in no particular order. + /// + /// If you add or remove values whose first key is `k1` to the map while doing this, you'll get + /// undefined results. + pub fn iter_prefix(k1: impl EncodeLike) -> crate::storage::PrefixIterator<(Key2, Value)> { + >::iter_prefix(k1) + } + + /// Enumerate all elements in the map with first key `k1` after a specified `starting_raw_key` + /// in no particular order. + /// + /// If you add or remove values whose first key is `k1` to the map while doing this, you'll get + /// undefined results. + pub fn iter_prefix_from( + k1: impl EncodeLike, + starting_raw_key: Vec, + ) -> crate::storage::PrefixIterator<(Key2, Value)> { + >::iter_prefix_from( + k1, + starting_raw_key, + ) + } + + /// Enumerate all second keys `k2` in the map with the same first key `k1` in no particular + /// order. + /// + /// If you add or remove values whose first key is `k1` to the map while doing this, you'll get + /// undefined results. + pub fn iter_key_prefix(k1: impl EncodeLike) -> crate::storage::KeyPrefixIterator { + >::iter_key_prefix(k1) + } + + /// Enumerate all second keys `k2` in the map with the same first key `k1` after a specified + /// `starting_raw_key` in no particular order. + /// + /// If you add or remove values whose first key is `k1` to the map while doing this, you'll get + /// undefined results. + pub fn iter_key_prefix_from( + k1: impl EncodeLike, + starting_raw_key: Vec, + ) -> crate::storage::KeyPrefixIterator { + >::iter_key_prefix_from( + k1, + starting_raw_key, + ) + } + + /// Remove all elements from the map with first key `k1` and iterate through them in no + /// particular order. + /// + /// If you add elements with first key `k1` to the map while doing this, you'll get undefined + /// results. + pub fn drain_prefix( + k1: impl EncodeLike, + ) -> crate::storage::PrefixIterator<(Key2, Value)> { + >::drain_prefix(k1) + } + + /// Enumerate all elements in the map in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter() -> crate::storage::PrefixIterator<(Key1, Key2, Value)> { + >::iter() + } + + /// Enumerate all elements in the map after a specified `starting_raw_key` in no particular + /// order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter_from( + starting_raw_key: Vec, + ) -> crate::storage::PrefixIterator<(Key1, Key2, Value)> { + >::iter_from( + starting_raw_key, + ) + } + + /// Enumerate all keys `k1` and `k2` in the map in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter_keys() -> crate::storage::KeyPrefixIterator<(Key1, Key2)> { + >::iter_keys() + } + + /// Enumerate all keys `k1` and `k2` in the map after a specified `starting_raw_key` in no + /// particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter_keys_from( + starting_raw_key: Vec, + ) -> crate::storage::KeyPrefixIterator<(Key1, Key2)> { + >::iter_keys_from( + starting_raw_key, + ) + } + + /// Remove all elements from the map and iterate through them in no particular order. + /// + /// If you add elements to the map while doing this, you'll get undefined results. + pub fn drain() -> crate::storage::PrefixIterator<(Key1, Key2, Value)> { + >::drain() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + pub fn translate Option>(f: F) { + >::translate(f) + } +} + +impl + StorageEntryMetadataBuilder + for StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec + scale_info::StaticTypeInfo, + Key2: FullCodec + scale_info::StaticTypeInfo, + Value: FullCodec + scale_info::StaticTypeInfo, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let entry = StorageEntryMetadata { + name: Prefix::STORAGE_PREFIX, + modifier: QueryKind::METADATA, + ty: StorageEntryType::Map { + hashers: vec![Hasher1::METADATA, Hasher2::METADATA], + key: scale_info::meta_type::<(Key1, Key2)>(), + value: scale_info::meta_type::(), + }, + default: OnEmpty::get().encode(), + docs, + }; + + entries.push(entry); + } +} + +impl + crate::traits::StorageInfoTrait + for StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec + MaxEncodedLen, + Key2: FullCodec + MaxEncodedLen, + Value: FullCodec + MaxEncodedLen, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn storage_info() -> Vec { + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: Some( + Hasher1::max_len::() + .saturating_add(Hasher2::max_len::()) + .saturating_add(Value::max_encoded_len()) + .saturated_into(), + ), + }] + } +} + +/// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. +impl + crate::traits::PartialStorageInfoTrait + for StorageDoubleMap +where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec, + Key2: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn partial_storage_info() -> Vec { + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: None, + }] + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + hash::*, + metadata::{StorageEntryModifier, StorageEntryType, StorageHasher}, + storage::types::ValueQuery, + }; + use sp_io::{hashing::twox_128, TestExternalities}; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 97 + } + } + + #[test] + fn test() { + type A = + StorageDoubleMap; + type AValueQueryWithAnOnEmpty = StorageDoubleMap< + Prefix, + Blake2_128Concat, + u16, + Twox64Concat, + u8, + u32, + ValueQuery, + ADefault, + >; + type B = StorageDoubleMap; + type C = StorageDoubleMap; + type WithLen = StorageDoubleMap>; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"foo")); + k.extend(&3u16.blake2_128_concat()); + k.extend(&30u8.twox_64_concat()); + assert_eq!(A::hashed_key_for(3, 30).to_vec(), k); + + assert_eq!(A::contains_key(3, 30), false); + assert_eq!(A::get(3, 30), None); + assert_eq!(AValueQueryWithAnOnEmpty::get(3, 30), 97); + + A::insert(3, 30, 10); + assert_eq!(A::contains_key(3, 30), true); + assert_eq!(A::get(3, 30), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get(3, 30), 10); + + A::swap(3, 30, 2, 20); + assert_eq!(A::contains_key(3, 30), false); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(3, 30), None); + assert_eq!(AValueQueryWithAnOnEmpty::get(3, 30), 97); + assert_eq!(A::get(2, 20), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get(2, 20), 10); + + A::remove(2, 20); + assert_eq!(A::contains_key(2, 20), false); + assert_eq!(A::get(2, 20), None); + + AValueQueryWithAnOnEmpty::mutate(2, 20, |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate(2, 20, |v| *v = *v * 2); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(97 * 4)); + + A::remove(2, 20); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, 20, |v| { + *v = *v * 2; + Ok(()) + }); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, 20, |v| { + *v = *v * 2; + Ok(()) + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(97 * 4)); + + A::remove(2, 20); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, 20, |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key(2, 20), false); + + A::remove(2, 20); + AValueQueryWithAnOnEmpty::mutate_exists(2, 20, |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(10)); + AValueQueryWithAnOnEmpty::mutate_exists(2, 20, |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(100)); + + A::remove(2, 20); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, 20, |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(10)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, 20, |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(100)); + assert_eq!(A::try_get(2, 20), Ok(100)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, 20, |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(100)); + + A::insert(2, 20, 10); + assert_eq!(A::take(2, 20), Some(10)); + assert_eq!(A::contains_key(2, 20), false); + assert_eq!(AValueQueryWithAnOnEmpty::take(2, 20), 97); + assert_eq!(A::contains_key(2, 20), false); + assert_eq!(A::try_get(2, 20), Err(())); + + B::insert(2, 20, 10); + assert_eq!(A::migrate_keys::(2, 20), Some(10)); + assert_eq!(A::contains_key(2, 20), true); + assert_eq!(A::get(2, 20), Some(10)); + + A::insert(3, 30, 10); + A::insert(4, 40, 10); + A::remove_all(None); + assert_eq!(A::contains_key(3, 30), false); + assert_eq!(A::contains_key(4, 40), false); + + A::insert(3, 30, 10); + A::insert(4, 40, 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + + C::insert(3, 30, 10); + C::insert(4, 40, 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 40, 20), (3, 30, 20)]); + + A::insert(3, 30, 10); + A::insert(4, 40, 10); + assert_eq!(A::iter().collect::>(), vec![(4, 40, 10), (3, 30, 10)]); + assert_eq!(A::drain().collect::>(), vec![(4, 40, 10), (3, 30, 10)]); + assert_eq!(A::iter().collect::>(), vec![]); + + C::insert(3, 30, 10); + C::insert(4, 40, 10); + A::translate::(|k1, k2, v| Some((k1 * k2 as u16 * v as u16).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 40, 1600), (3, 30, 900)]); + + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: 97u32.encode(), + docs: vec![], + } + ] + ); + + WithLen::remove_all(None); + assert_eq!(WithLen::decode_len(3, 30), None); + WithLen::append(0, 100, 10); + assert_eq!(WithLen::decode_len(0, 100), Some(1)); + + A::insert(3, 30, 11); + A::insert(3, 31, 12); + A::insert(4, 40, 13); + A::insert(4, 41, 14); + assert_eq!(A::iter_prefix_values(3).collect::>(), vec![12, 11]); + assert_eq!(A::iter_prefix(3).collect::>(), vec![(31, 12), (30, 11)]); + assert_eq!(A::iter_prefix_values(4).collect::>(), vec![13, 14]); + assert_eq!(A::iter_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); + + A::remove_prefix(3, None); + assert_eq!(A::iter_prefix(3).collect::>(), vec![]); + assert_eq!(A::iter_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); + + assert_eq!(A::drain_prefix(4).collect::>(), vec![(40, 13), (41, 14)]); + assert_eq!(A::iter_prefix(4).collect::>(), vec![]); + assert_eq!(A::drain_prefix(4).collect::>(), vec![]); + }) + } +} diff --git a/frame/support/src/storage/types/key.rs b/frame/support/src/storage/types/key.rs new file mode 100755 index 0000000000000..da265fd6e6c87 --- /dev/null +++ b/frame/support/src/storage/types/key.rs @@ -0,0 +1,268 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage key type. + +use crate::hash::{ReversibleStorageHasher, StorageHasher}; +use codec::{Encode, EncodeLike, FullCodec, MaxEncodedLen}; +use paste::paste; +use scale_info::StaticTypeInfo; +use sp_std::prelude::*; + +/// A type used exclusively by storage maps as their key type. +/// +/// The final key generated has the following form: +/// ```nocompile +/// Hasher1(encode(key1)) +/// ++ Hasher2(encode(key2)) +/// ++ ... +/// ++ HasherN(encode(keyN)) +/// ``` +pub struct Key(core::marker::PhantomData<(Hasher, KeyType)>); + +/// A trait that contains the current key as an associated type. +pub trait KeyGenerator { + type Key: EncodeLike + StaticTypeInfo; + type KArg: Encode; + type HashFn: FnOnce(&[u8]) -> Vec; + type HArg; + + const HASHER_METADATA: &'static [crate::metadata::StorageHasher]; + + /// Given a `key` tuple, calculate the final key by encoding each element individually and + /// hashing them using the corresponding hasher in the `KeyGenerator`. + fn final_key + TupleToEncodedIter>(key: KArg) -> Vec; + /// Given a `key` tuple, migrate the keys from using the old hashers as given by `hash_fns` + /// to using the newer hashers as specified by this `KeyGenerator`. + fn migrate_key + TupleToEncodedIter>( + key: &KArg, + hash_fns: Self::HArg, + ) -> Vec; +} + +/// The maximum length used by the key in storage. +pub trait KeyGeneratorMaxEncodedLen: KeyGenerator { + fn key_max_encoded_len() -> usize; +} + +/// A trait containing methods that are only implemented on the Key struct instead of the entire +/// tuple. +pub trait KeyGeneratorInner: KeyGenerator { + type Hasher: StorageHasher; + + /// Hash a given `encoded` byte slice using the `KeyGenerator`'s associated `StorageHasher`. + fn final_hash(encoded: &[u8]) -> Vec; +} + +impl KeyGenerator for Key { + type Key = K; + type KArg = (K,); + type HashFn = Box Vec>; + type HArg = (Self::HashFn,); + + const HASHER_METADATA: &'static [crate::metadata::StorageHasher] = &[H::METADATA]; + + fn final_key + TupleToEncodedIter>(key: KArg) -> Vec { + H::hash(&key.to_encoded_iter().next().expect("should have at least one element!")) + .as_ref() + .to_vec() + } + + fn migrate_key + TupleToEncodedIter>( + key: &KArg, + hash_fns: Self::HArg, + ) -> Vec { + (hash_fns.0)(&key.to_encoded_iter().next().expect("should have at least one element!")) + } +} + +impl KeyGeneratorMaxEncodedLen + for Key +{ + fn key_max_encoded_len() -> usize { + H::max_len::() + } +} + +impl KeyGeneratorInner for Key { + type Hasher = H; + + fn final_hash(encoded: &[u8]) -> Vec { + H::hash(encoded).as_ref().to_vec() + } +} + +#[impl_trait_for_tuples::impl_for_tuples(1, 18)] +#[tuple_types_custom_trait_bound(KeyGeneratorInner)] +impl KeyGenerator for Tuple { + for_tuples!( type Key = ( #(Tuple::Key),* ); ); + for_tuples!( type KArg = ( #(Tuple::Key),* ); ); + for_tuples!( type HArg = ( #(Tuple::HashFn),* ); ); + type HashFn = Box Vec>; + + const HASHER_METADATA: &'static [crate::metadata::StorageHasher] = + &[for_tuples!( #(Tuple::Hasher::METADATA),* )]; + + fn final_key + TupleToEncodedIter>(key: KArg) -> Vec { + let mut final_key = Vec::new(); + let mut iter = key.to_encoded_iter(); + for_tuples!( + #( + let next_encoded = iter.next().expect("KArg number should be equal to Key number"); + final_key.extend_from_slice(&Tuple::final_hash(&next_encoded)); + )* + ); + final_key + } + + fn migrate_key + TupleToEncodedIter>( + key: &KArg, + hash_fns: Self::HArg, + ) -> Vec { + let mut migrated_key = Vec::new(); + let mut iter = key.to_encoded_iter(); + for_tuples!( + #( + let next_encoded = iter.next().expect("KArg number should be equal to Key number"); + migrated_key.extend_from_slice(&(hash_fns.Tuple)(&next_encoded)); + )* + ); + migrated_key + } +} + +#[impl_trait_for_tuples::impl_for_tuples(1, 18)] +#[tuple_types_custom_trait_bound(KeyGeneratorInner + KeyGeneratorMaxEncodedLen)] +impl KeyGeneratorMaxEncodedLen for Tuple { + fn key_max_encoded_len() -> usize { + let mut len = 0usize; + for_tuples!( + #( + len = len.saturating_add(Tuple::key_max_encoded_len()); + )* + ); + len + } +} + +/// Marker trait to indicate that each element in the tuple encodes like the corresponding element +/// in another tuple. +/// +/// This trait is sealed. +pub trait EncodeLikeTuple: crate::storage::private::Sealed {} + +macro_rules! impl_encode_like_tuples { + ($($elem:ident),+) => { + paste! { + impl<$($elem: Encode,)+ $([<$elem $elem>]: Encode + EncodeLike<$elem>,)+> + EncodeLikeTuple<($($elem,)+)> for + ($([<$elem $elem>],)+) {} + impl<$($elem: Encode,)+ $([<$elem $elem>]: Encode + EncodeLike<$elem>,)+> + EncodeLikeTuple<($($elem,)+)> for + &($([<$elem $elem>],)+) {} + } + }; +} + +impl_encode_like_tuples!(A); +impl_encode_like_tuples!(A, B); +impl_encode_like_tuples!(A, B, C); +impl_encode_like_tuples!(A, B, C, D); +impl_encode_like_tuples!(A, B, C, D, E); +impl_encode_like_tuples!(A, B, C, D, E, F); +impl_encode_like_tuples!(A, B, C, D, E, F, G); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K, L); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K, L, M); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K, L, M, O); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K, L, M, O, P); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K, L, M, O, P, Q); +impl_encode_like_tuples!(A, B, C, D, E, F, G, H, I, J, K, L, M, O, P, Q, R); + +/// Trait to indicate that a tuple can be converted into an iterator of a vector of encoded bytes. +pub trait TupleToEncodedIter { + fn to_encoded_iter(&self) -> sp_std::vec::IntoIter>; +} + +#[impl_trait_for_tuples::impl_for_tuples(1, 18)] +#[tuple_types_custom_trait_bound(Encode)] +impl TupleToEncodedIter for Tuple { + fn to_encoded_iter(&self) -> sp_std::vec::IntoIter> { + [for_tuples!( #(self.Tuple.encode()),* )].to_vec().into_iter() + } +} + +impl TupleToEncodedIter for &T { + fn to_encoded_iter(&self) -> sp_std::vec::IntoIter> { + (*self).to_encoded_iter() + } +} + +/// A trait that indicates the hashers for the keys generated are all reversible. +pub trait ReversibleKeyGenerator: KeyGenerator { + type ReversibleHasher; + fn decode_final_key(key_material: &[u8]) -> Result<(Self::Key, &[u8]), codec::Error>; +} + +impl ReversibleKeyGenerator + for Key +{ + type ReversibleHasher = H; + + fn decode_final_key(key_material: &[u8]) -> Result<(Self::Key, &[u8]), codec::Error> { + let mut current_key_material = Self::ReversibleHasher::reverse(key_material); + let key = K::decode(&mut current_key_material)?; + Ok((key, current_key_material)) + } +} + +#[impl_trait_for_tuples::impl_for_tuples(2, 18)] +#[tuple_types_custom_trait_bound(ReversibleKeyGenerator + KeyGeneratorInner)] +impl ReversibleKeyGenerator for Tuple { + for_tuples!( type ReversibleHasher = ( #(Tuple::ReversibleHasher),* ); ); + + fn decode_final_key(key_material: &[u8]) -> Result<(Self::Key, &[u8]), codec::Error> { + let mut current_key_material = key_material; + Ok(( + (for_tuples! { + #({ + let (key, material) = Tuple::decode_final_key(current_key_material)?; + current_key_material = material; + key + }),* + }), + current_key_material, + )) + } +} + +/// Trait indicating whether a KeyGenerator has the prefix P. +pub trait HasKeyPrefix

: KeyGenerator { + type Suffix; + + fn partial_key(prefix: P) -> Vec; +} + +/// Trait indicating whether a ReversibleKeyGenerator has the prefix P. +pub trait HasReversibleKeyPrefix

: ReversibleKeyGenerator + HasKeyPrefix

{ + fn decode_partial_key(key_material: &[u8]) -> Result; +} + +frame_support_procedural::impl_key_prefix_for_tuples!(); diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs new file mode 100644 index 0000000000000..45340f9015eaa --- /dev/null +++ b/frame/support/src/storage/types/map.rs @@ -0,0 +1,613 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage map type. Implements StorageMap, StorageIterableMap, StoragePrefixedMap traits and their +//! methods directly. + +use crate::{ + metadata::{StorageEntryMetadata, StorageEntryType}, + storage::{ + types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, + StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, + }, + traits::{Get, GetDefault, StorageInfo, StorageInstance}, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; +use sp_arithmetic::traits::SaturatedConversion; +use sp_std::prelude::*; + +/// A type that allow to store value for given key. Allowing to insert/remove/iterate on values. +/// +/// Each value is stored at: +/// ```nocompile +/// Twox128(Prefix::pallet_prefix()) +/// ++ Twox128(Prefix::STORAGE_PREFIX) +/// ++ Hasher1(encode(key)) +/// ``` +/// +/// # Warning +/// +/// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` such as +/// `blake2_128_concat` must be used. Otherwise, other values in storage can be compromised. +pub struct StorageMap< + Prefix, + Hasher, + Key, + Value, + QueryKind = OptionQuery, + OnEmpty = GetDefault, + MaxValues = GetDefault, +>(core::marker::PhantomData<(Prefix, Hasher, Key, Value, QueryKind, OnEmpty, MaxValues)>); + +impl + crate::storage::generator::StorageMap + for StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + type Query = QueryKind::Query; + type Hasher = Hasher; + fn module_prefix() -> &'static [u8] { + Prefix::pallet_prefix().as_bytes() + } + fn storage_prefix() -> &'static [u8] { + Prefix::STORAGE_PREFIX.as_bytes() + } + fn from_optional_value_to_query(v: Option) -> Self::Query { + QueryKind::from_optional_value_to_query(v) + } + fn from_query_to_optional_value(v: Self::Query) -> Option { + QueryKind::from_query_to_optional_value(v) + } +} + +impl StoragePrefixedMap + for StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn module_prefix() -> &'static [u8] { + >::module_prefix() + } + fn storage_prefix() -> &'static [u8] { + >::storage_prefix() + } +} + +impl + StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Get the storage key used to fetch a value corresponding to a specific key. + pub fn hashed_key_for>(key: KeyArg) -> Vec { + >::hashed_key_for(key) + } + + /// Does the value (explicitly) exist in storage? + pub fn contains_key>(key: KeyArg) -> bool { + >::contains_key(key) + } + + /// Load the value associated with the given key from the map. + pub fn get>(key: KeyArg) -> QueryKind::Query { + >::get(key) + } + + /// Try to get the value for the given key from the map. + /// + /// Returns `Ok` if it exists, `Err` if not. + pub fn try_get>(key: KeyArg) -> Result { + >::try_get(key) + } + + /// Swap the values of two keys. + pub fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2) { + >::swap(key1, key2) + } + + /// Store a value to be associated with the given key from the map. + pub fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { + >::insert(key, val) + } + + /// Remove the value under a key. + pub fn remove>(key: KeyArg) { + >::remove(key) + } + + /// Mutate the value under a key. + pub fn mutate, R, F: FnOnce(&mut QueryKind::Query) -> R>( + key: KeyArg, + f: F, + ) -> R { + >::mutate(key, f) + } + + /// Mutate the item, only if an `Ok` value is returned. + pub fn try_mutate(key: KeyArg, f: F) -> Result + where + KeyArg: EncodeLike, + F: FnOnce(&mut QueryKind::Query) -> Result, + { + >::try_mutate(key, f) + } + + /// Mutate the value under a key. Deletes the item if mutated to a `None`. + pub fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F, + ) -> R { + >::mutate_exists(key, f) + } + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + pub fn try_mutate_exists(key: KeyArg, f: F) -> Result + where + KeyArg: EncodeLike, + F: FnOnce(&mut Option) -> Result, + { + >::try_mutate_exists(key, f) + } + + /// Take the value under a key. + pub fn take>(key: KeyArg) -> QueryKind::Query { + >::take(key) + } + + /// Append the given items to the value in the storage. + /// + /// `Value` is required to implement `codec::EncodeAppend`. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage will be overwritten + /// and set to `[item]`. Any default value set for the storage item will be ignored + /// on overwrite. + pub fn append(key: EncodeLikeKey, item: EncodeLikeItem) + where + EncodeLikeKey: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend, + { + >::append(key, item) + } + + /// Read the length of the storage value without decoding the entire value under the + /// given `key`. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len>(key: KeyArg) -> Option + where + Value: StorageDecodeLength, + { + >::decode_len(key) + } + + /// Migrate an item with the given `key` from a defunct `OldHasher` to the current hasher. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + pub fn migrate_key>( + key: KeyArg, + ) -> Option { + >::migrate_key::(key) + } + + /// Remove all value of the storage. + pub fn remove_all(limit: Option) -> sp_io::KillStorageResult { + >::remove_all(limit) + } + + /// Iter over all value of the storage. + /// + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. + pub fn iter_values() -> crate::storage::PrefixIterator { + >::iter_values() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade. + pub fn translate_values Option>(f: F) { + >::translate_values(f) + } + + /// Try and append the given item to the value in the storage. + /// + /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. + pub fn try_append(key: KArg, item: EncodeLikeItem) -> Result<(), ()> + where + KArg: EncodeLike + Clone, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageTryAppend, + { + >::try_append(key, item) + } +} + +impl + StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher + crate::ReversibleStorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Enumerate all elements in the map in no particular order. + /// + /// If you alter the map while doing this, you'll get undefined results. + pub fn iter() -> crate::storage::PrefixIterator<(Key, Value)> { + >::iter() + } + + /// Enumerate all elements in the map after a specified `starting_raw_key` in no + /// particular order. + /// + /// If you alter the map while doing this, you'll get undefined results. + pub fn iter_from(starting_raw_key: Vec) -> crate::storage::PrefixIterator<(Key, Value)> { + >::iter_from(starting_raw_key) + } + + /// Enumerate all keys in the map in no particular order. + /// + /// If you alter the map while doing this, you'll get undefined results. + pub fn iter_keys() -> crate::storage::KeyPrefixIterator { + >::iter_keys() + } + + /// Enumerate all keys in the map after a specified `starting_raw_key` in no particular + /// order. + /// + /// If you alter the map while doing this, you'll get undefined results. + pub fn iter_keys_from(starting_raw_key: Vec) -> crate::storage::KeyPrefixIterator { + >::iter_keys_from(starting_raw_key) + } + + /// Remove all elements from the map and iterate through them in no particular order. + /// + /// If you add elements to the map while doing this, you'll get undefined results. + pub fn drain() -> crate::storage::PrefixIterator<(Key, Value)> { + >::drain() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + pub fn translate Option>(f: F) { + >::translate(f) + } +} + +impl StorageEntryMetadataBuilder + for StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec + scale_info::StaticTypeInfo, + Value: FullCodec + scale_info::StaticTypeInfo, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let entry = StorageEntryMetadata { + name: Prefix::STORAGE_PREFIX, + modifier: QueryKind::METADATA, + ty: StorageEntryType::Map { + hashers: vec![Hasher::METADATA], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: OnEmpty::get().encode(), + docs, + }; + + entries.push(entry); + } +} + +impl crate::traits::StorageInfoTrait + for StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec + MaxEncodedLen, + Value: FullCodec + MaxEncodedLen, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn storage_info() -> Vec { + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: Some( + Hasher::max_len::() + .saturating_add(Value::max_encoded_len()) + .saturated_into(), + ), + }] + } +} + +/// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. +impl + crate::traits::PartialStorageInfoTrait + for StorageMap +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn partial_storage_info() -> Vec { + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: None, + }] + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + hash::*, + metadata::{StorageEntryModifier, StorageEntryType, StorageHasher}, + storage::types::ValueQuery, + }; + use sp_io::{hashing::twox_128, TestExternalities}; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 97 + } + } + + #[test] + fn test() { + type A = StorageMap; + type AValueQueryWithAnOnEmpty = + StorageMap; + type B = StorageMap; + type C = StorageMap; + type WithLen = StorageMap>; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"foo")); + k.extend(&3u16.blake2_128_concat()); + assert_eq!(A::hashed_key_for(3).to_vec(), k); + + assert_eq!(A::contains_key(3), false); + assert_eq!(A::get(3), None); + assert_eq!(AValueQueryWithAnOnEmpty::get(3), 97); + + A::insert(3, 10); + assert_eq!(A::contains_key(3), true); + assert_eq!(A::get(3), Some(10)); + assert_eq!(A::try_get(3), Ok(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get(3), 10); + + A::swap(3, 2); + assert_eq!(A::contains_key(3), false); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(3), None); + assert_eq!(A::try_get(3), Err(())); + assert_eq!(AValueQueryWithAnOnEmpty::get(3), 97); + assert_eq!(A::get(2), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get(2), 10); + + A::remove(2); + assert_eq!(A::contains_key(2), false); + assert_eq!(A::get(2), None); + + AValueQueryWithAnOnEmpty::mutate(2, |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate(2, |v| *v = *v * 2); + assert_eq!(AValueQueryWithAnOnEmpty::contains_key(2), true); + assert_eq!(AValueQueryWithAnOnEmpty::get(2), 97 * 4); + + A::remove(2); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, |v| { + *v = *v * 2; + Ok(()) + }); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, |v| { + *v = *v * 2; + Ok(()) + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(97 * 4)); + + A::remove(2); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key(2), false); + + A::remove(2); + AValueQueryWithAnOnEmpty::mutate_exists(2, |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(10)); + AValueQueryWithAnOnEmpty::mutate_exists(2, |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(100)); + + A::remove(2); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(10)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(100)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists(2, |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(100)); + + A::insert(2, 10); + assert_eq!(A::take(2), Some(10)); + assert_eq!(A::contains_key(2), false); + assert_eq!(AValueQueryWithAnOnEmpty::take(2), 97); + assert_eq!(A::contains_key(2), false); + + B::insert(2, 10); + assert_eq!(A::migrate_key::(2), Some(10)); + assert_eq!(A::contains_key(2), true); + assert_eq!(A::get(2), Some(10)); + + A::insert(3, 10); + A::insert(4, 10); + A::remove_all(None); + assert_eq!(A::contains_key(3), false); + assert_eq!(A::contains_key(4), false); + + A::insert(3, 10); + A::insert(4, 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + + C::insert(3, 10); + C::insert(4, 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 20), (3, 20)]); + + A::insert(3, 10); + A::insert(4, 10); + assert_eq!(A::iter().collect::>(), vec![(4, 10), (3, 10)]); + assert_eq!(A::drain().collect::>(), vec![(4, 10), (3, 10)]); + assert_eq!(A::iter().collect::>(), vec![]); + + C::insert(3, 10); + C::insert(4, 10); + A::translate::(|k, v| Some((k * v as u16).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 40), (3, 30)]); + + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: 97u32.encode(), + docs: vec![], + } + ] + ); + + WithLen::remove_all(None); + assert_eq!(WithLen::decode_len(3), None); + WithLen::append(0, 10); + assert_eq!(WithLen::decode_len(0), Some(1)); + }) + } +} diff --git a/frame/support/src/storage/types/mod.rs b/frame/support/src/storage/types/mod.rs new file mode 100644 index 0000000000000..bcab996f68323 --- /dev/null +++ b/frame/support/src/storage/types/mod.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage types to build abstraction on storage, they implements storage traits such as +//! StorageMap and others. + +use crate::metadata::{StorageEntryMetadata, StorageEntryModifier}; +use codec::FullCodec; +use sp_std::prelude::*; + +mod counted_map; +mod double_map; +mod key; +mod map; +mod nmap; +mod value; + +pub use counted_map::{CountedStorageMap, CountedStorageMapInstance}; +pub use double_map::StorageDoubleMap; +pub use key::{ + EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, Key, KeyGenerator, + KeyGeneratorMaxEncodedLen, ReversibleKeyGenerator, TupleToEncodedIter, +}; +pub use map::StorageMap; +pub use nmap::StorageNMap; +pub use value::StorageValue; + +/// Trait implementing how the storage optional value is converted into the queried type. +/// +/// It is implemented by: +/// * `OptionQuery` which convert an optional value to an optional value, user when querying storage +/// will get an optional value. +/// * `ValueQuery` which convert an optional value to a value, user when querying storage will get a +/// value. +pub trait QueryKindTrait { + /// Metadata for the storage kind. + const METADATA: StorageEntryModifier; + + /// Type returned on query + type Query: FullCodec + 'static; + + /// Convert an optional value (i.e. some if trie contains the value or none otherwise) to the + /// query. + fn from_optional_value_to_query(v: Option) -> Self::Query; + + /// Convert a query to an optional value. + fn from_query_to_optional_value(v: Self::Query) -> Option; +} + +/// Implement QueryKindTrait with query being `Option` +/// +/// NOTE: it doesn't support a generic `OnEmpty`. This means only `None` can be +/// returned when no value is found. To use another `OnEmpty` implementation, `ValueQuery` can be +/// used instead. +pub struct OptionQuery; +impl QueryKindTrait for OptionQuery +where + Value: FullCodec + 'static, +{ + const METADATA: StorageEntryModifier = StorageEntryModifier::Optional; + + type Query = Option; + + fn from_optional_value_to_query(v: Option) -> Self::Query { + // NOTE: OnEmpty is fixed to GetDefault, thus it returns `None` on no value. + v + } + + fn from_query_to_optional_value(v: Self::Query) -> Option { + v + } +} + +/// Implement QueryKindTrait with query being `Value` +pub struct ValueQuery; +impl QueryKindTrait for ValueQuery +where + Value: FullCodec + 'static, + OnEmpty: crate::traits::Get, +{ + const METADATA: StorageEntryModifier = StorageEntryModifier::Default; + + type Query = Value; + + fn from_optional_value_to_query(v: Option) -> Self::Query { + v.unwrap_or_else(|| OnEmpty::get()) + } + + fn from_query_to_optional_value(v: Self::Query) -> Option { + Some(v) + } +} + +/// Build the metadata of a storage. +/// +/// Implemented by each of the storage types: value, map, countedmap, doublemap and nmap. +pub trait StorageEntryMetadataBuilder { + /// Build into `entries` the storage metadata entries of a storage given some `docs`. + fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); +} diff --git a/frame/support/src/storage/types/nmap.rs b/frame/support/src/storage/types/nmap.rs new file mode 100755 index 0000000000000..96d6f383ae117 --- /dev/null +++ b/frame/support/src/storage/types/nmap.rs @@ -0,0 +1,1156 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage map type. Implements StorageDoubleMap, StorageIterableDoubleMap, +//! StoragePrefixedDoubleMap traits and their methods directly. + +use crate::{ + metadata::{StorageEntryMetadata, StorageEntryType}, + storage::{ + types::{ + EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, OptionQuery, QueryKindTrait, + StorageEntryMetadataBuilder, TupleToEncodedIter, + }, + KeyGenerator, PrefixIterator, StorageAppend, StorageDecodeLength, StoragePrefixedMap, + }, + traits::{Get, GetDefault, StorageInfo, StorageInstance}, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; +use sp_runtime::SaturatedConversion; +use sp_std::prelude::*; + +/// A type that allow to store values for an arbitrary number of keys in the form of +/// `(Key, Key, ..., Key)`. +/// +/// Each value is stored at: +/// ```nocompile +/// Twox128(Prefix::pallet_prefix()) +/// ++ Twox128(Prefix::STORAGE_PREFIX) +/// ++ Hasher1(encode(key1)) +/// ++ Hasher2(encode(key2)) +/// ++ ... +/// ++ HasherN(encode(keyN)) +/// ``` +/// +/// # Warning +/// +/// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` +/// such as `blake2_128_concat` must be used for the key hashers. Otherwise, other values +/// in storage can be compromised. +pub struct StorageNMap< + Prefix, + Key, + Value, + QueryKind = OptionQuery, + OnEmpty = GetDefault, + MaxValues = GetDefault, +>(core::marker::PhantomData<(Prefix, Key, Value, QueryKind, OnEmpty, MaxValues)>); + +impl + crate::storage::generator::StorageNMap + for StorageNMap +where + Prefix: StorageInstance, + Key: super::key::KeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + type Query = QueryKind::Query; + fn module_prefix() -> &'static [u8] { + Prefix::pallet_prefix().as_bytes() + } + fn storage_prefix() -> &'static [u8] { + Prefix::STORAGE_PREFIX.as_bytes() + } + fn from_optional_value_to_query(v: Option) -> Self::Query { + QueryKind::from_optional_value_to_query(v) + } + fn from_query_to_optional_value(v: Self::Query) -> Option { + QueryKind::from_query_to_optional_value(v) + } +} + +impl crate::storage::StoragePrefixedMap + for StorageNMap +where + Prefix: StorageInstance, + Key: super::key::KeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn module_prefix() -> &'static [u8] { + >::module_prefix() + } + fn storage_prefix() -> &'static [u8] { + >::storage_prefix() + } +} + +impl + StorageNMap +where + Prefix: StorageInstance, + Key: super::key::KeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Get the storage key used to fetch a value corresponding to a specific key. + pub fn hashed_key_for + TupleToEncodedIter>( + key: KArg, + ) -> Vec { + >::hashed_key_for(key) + } + + /// Does the value (explicitly) exist in storage? + pub fn contains_key + TupleToEncodedIter>(key: KArg) -> bool { + >::contains_key(key) + } + + /// Load the value associated with the given key from the map. + pub fn get + TupleToEncodedIter>( + key: KArg, + ) -> QueryKind::Query { + >::get(key) + } + + /// Try to get the value for the given key from the map. + /// + /// Returns `Ok` if it exists, `Err` if not. + pub fn try_get + TupleToEncodedIter>( + key: KArg, + ) -> Result { + >::try_get(key) + } + + /// Take a value from storage, removing it afterwards. + pub fn take + TupleToEncodedIter>( + key: KArg, + ) -> QueryKind::Query { + >::take(key) + } + + /// Swap the values of two key-pairs. + pub fn swap(key1: KArg1, key2: KArg2) + where + KOther: KeyGenerator, + KArg1: EncodeLikeTuple + TupleToEncodedIter, + KArg2: EncodeLikeTuple + TupleToEncodedIter, + { + >::swap::(key1, key2) + } + + /// Store a value to be associated with the given keys from the map. + pub fn insert(key: KArg, val: VArg) + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + VArg: EncodeLike, + { + >::insert(key, val) + } + + /// Remove the value under the given keys. + pub fn remove + TupleToEncodedIter>(key: KArg) { + >::remove(key) + } + + /// Remove all values under the first key. + pub fn remove_prefix(partial_key: KP, limit: Option) -> sp_io::KillStorageResult + where + Key: HasKeyPrefix, + { + >::remove_prefix(partial_key, limit) + } + + /// Iterate over values that share the first key. + pub fn iter_prefix_values(partial_key: KP) -> PrefixIterator + where + Key: HasKeyPrefix, + { + >::iter_prefix_values(partial_key) + } + + /// Mutate the value under the given keys. + pub fn mutate(key: KArg, f: F) -> R + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut QueryKind::Query) -> R, + { + >::mutate(key, f) + } + + /// Mutate the value under the given keys when the closure returns `Ok`. + pub fn try_mutate(key: KArg, f: F) -> Result + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut QueryKind::Query) -> Result, + { + >::try_mutate(key, f) + } + + /// Mutate the value under the given keys. Deletes the item if mutated to a `None`. + pub fn mutate_exists(key: KArg, f: F) -> R + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Option) -> R, + { + >::mutate_exists(key, f) + } + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + pub fn try_mutate_exists(key: KArg, f: F) -> Result + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + F: FnOnce(&mut Option) -> Result, + { + >::try_mutate_exists(key, f) + } + + /// Append the given item to the value in the storage. + /// + /// `Value` is required to implement [`StorageAppend`]. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage will be overwritten + /// and set to `[item]`. Any default value set for the storage item will be ignored + /// on overwrite. + pub fn append(key: KArg, item: EncodeLikeItem) + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend, + { + >::append(key, item) + } + + /// Read the length of the storage value without decoding the entire value under the + /// given `key1` and `key2`. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len + TupleToEncodedIter>( + key: KArg, + ) -> Option + where + Value: StorageDecodeLength, + { + >::decode_len(key) + } + + /// Migrate an item with the given `key` from defunct `hash_fns` to the current hashers. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + pub fn migrate_keys(key: KArg, hash_fns: Key::HArg) -> Option + where + KArg: EncodeLikeTuple + TupleToEncodedIter, + { + >::migrate_keys::<_>(key, hash_fns) + } + + /// Remove all value of the storage. + pub fn remove_all(limit: Option) -> sp_io::KillStorageResult { + >::remove_all(limit) + } + + /// Iter over all value of the storage. + /// + /// NOTE: If a value failed to decode because storage is corrupted then it is skipped. + pub fn iter_values() -> crate::storage::PrefixIterator { + >::iter_values() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade. + pub fn translate_values Option>(f: F) { + >::translate_values(f) + } +} + +impl + StorageNMap +where + Prefix: StorageInstance, + Key: super::key::ReversibleKeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + /// Enumerate all elements in the map with prefix key `kp` in no particular order. + /// + /// If you add or remove values whose prefix key is `kp` to the map while doing this, you'll get + /// undefined results. + pub fn iter_prefix( + kp: KP, + ) -> crate::storage::PrefixIterator<(>::Suffix, Value)> + where + Key: HasReversibleKeyPrefix, + { + >::iter_prefix(kp) + } + + /// Enumerate all elements in the map with prefix key `kp` after a specified `starting_raw_key` + /// in no particular order. + /// + /// If you add or remove values whose prefix key is `kp` to the map while doing this, you'll get + /// undefined results. + pub fn iter_prefix_from( + kp: KP, + starting_raw_key: Vec, + ) -> crate::storage::PrefixIterator<(>::Suffix, Value)> + where + Key: HasReversibleKeyPrefix, + { + >::iter_prefix_from( + kp, + starting_raw_key, + ) + } + + /// Enumerate all suffix keys in the map with prefix key `kp` in no particular order. + /// + /// If you add or remove values whose prefix key is `kp` to the map while doing this, you'll get + /// undefined results. + pub fn iter_key_prefix( + kp: KP, + ) -> crate::storage::KeyPrefixIterator<>::Suffix> + where + Key: HasReversibleKeyPrefix, + { + >::iter_key_prefix(kp) + } + + /// Enumerate all suffix keys in the map with prefix key `kp` after a specified + /// `starting_raw_key` in no particular order. + /// + /// If you add or remove values whose prefix key is `kp` to the map while doing this, you'll get + /// undefined results. + pub fn iter_key_prefix_from( + kp: KP, + starting_raw_key: Vec, + ) -> crate::storage::KeyPrefixIterator<>::Suffix> + where + Key: HasReversibleKeyPrefix, + { + >::iter_key_prefix_from( + kp, + starting_raw_key, + ) + } + + /// Remove all elements from the map with prefix key `kp` and iterate through them in no + /// particular order. + /// + /// If you add elements with prefix key `k1` to the map while doing this, you'll get undefined + /// results. + pub fn drain_prefix( + kp: KP, + ) -> crate::storage::PrefixIterator<(>::Suffix, Value)> + where + Key: HasReversibleKeyPrefix, + { + >::drain_prefix(kp) + } + + /// Enumerate all elements in the map in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter() -> crate::storage::PrefixIterator<(Key::Key, Value)> { + >::iter() + } + + /// Enumerate all elements in the map after a specified `starting_key` in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter_from( + starting_raw_key: Vec, + ) -> crate::storage::PrefixIterator<(Key::Key, Value)> { + >::iter_from(starting_raw_key) + } + + /// Enumerate all keys in the map in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter_keys() -> crate::storage::KeyPrefixIterator { + >::iter_keys() + } + + /// Enumerate all keys in the map after a specified `starting_raw_key` in no particular order. + /// + /// If you add or remove values to the map while doing this, you'll get undefined results. + pub fn iter_keys_from( + starting_raw_key: Vec, + ) -> crate::storage::KeyPrefixIterator { + >::iter_keys_from(starting_raw_key) + } + + /// Remove all elements from the map and iterate through them in no particular order. + /// + /// If you add elements to the map while doing this, you'll get undefined results. + pub fn drain() -> crate::storage::PrefixIterator<(Key::Key, Value)> { + >::drain() + } + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// + /// By returning `None` from `f` for an element, you'll remove it from the map. + /// + /// NOTE: If a value fail to decode because storage is corrupted then it is skipped. + pub fn translate Option>(f: F) { + >::translate(f) + } +} + +impl StorageEntryMetadataBuilder + for StorageNMap +where + Prefix: StorageInstance, + Key: super::key::KeyGenerator, + Value: FullCodec + scale_info::StaticTypeInfo, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let entry = StorageEntryMetadata { + name: Prefix::STORAGE_PREFIX, + modifier: QueryKind::METADATA, + ty: StorageEntryType::Map { + key: scale_info::meta_type::(), + hashers: Key::HASHER_METADATA.iter().cloned().collect(), + value: scale_info::meta_type::(), + }, + default: OnEmpty::get().encode(), + docs, + }; + + entries.push(entry); + } +} + +impl crate::traits::StorageInfoTrait + for StorageNMap +where + Prefix: StorageInstance, + Key: super::key::KeyGenerator + super::key::KeyGeneratorMaxEncodedLen, + Value: FullCodec + MaxEncodedLen, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn storage_info() -> Vec { + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: Some( + Key::key_max_encoded_len() + .saturating_add(Value::max_encoded_len()) + .saturated_into(), + ), + }] + } +} + +/// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. +impl crate::traits::PartialStorageInfoTrait + for StorageNMap +where + Prefix: StorageInstance, + Key: super::key::KeyGenerator, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: Get + 'static, + MaxValues: Get>, +{ + fn partial_storage_info() -> Vec { + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: None, + }] + } +} +#[cfg(test)] +mod test { + use super::*; + use crate::{ + hash::{StorageHasher as _, *}, + metadata::{StorageEntryModifier, StorageHasher}, + storage::types::{Key, ValueQuery}, + }; + use sp_io::{hashing::twox_128, TestExternalities}; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "Foo"; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 98 + } + } + + #[test] + fn test_1_key() { + type A = StorageNMap, u32, OptionQuery>; + type AValueQueryWithAnOnEmpty = + StorageNMap, u32, ValueQuery, ADefault>; + type B = StorageNMap, u32, ValueQuery>; + type C = StorageNMap, u8, ValueQuery>; + type WithLen = StorageNMap, Vec>; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"Foo")); + k.extend(&3u16.blake2_128_concat()); + assert_eq!(A::hashed_key_for((&3,)).to_vec(), k); + + assert_eq!(A::contains_key((3,)), false); + assert_eq!(A::get((3,)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((3,)), 98); + + A::insert((3,), 10); + assert_eq!(A::contains_key((3,)), true); + assert_eq!(A::get((3,)), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get((3,)), 10); + + { + crate::generate_storage_alias!(test, Foo => NMap< + Key<(u16, Blake2_128Concat)>, + u32 + >); + + assert_eq!(Foo::contains_key((3,)), true); + assert_eq!(Foo::get((3,)), Some(10)); + } + + A::swap::, _, _>((3,), (2,)); + assert_eq!(A::contains_key((3,)), false); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((3,)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((3,)), 98); + assert_eq!(A::get((2,)), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get((2,)), 10); + + A::remove((2,)); + assert_eq!(A::contains_key((2,)), false); + assert_eq!(A::get((2,)), None); + + AValueQueryWithAnOnEmpty::mutate((2,), |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate((2,), |v| *v = *v * 2); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(98 * 4)); + + A::remove((2,)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2,), |v| { + *v = *v * 2; + Ok(()) + }); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2,), |v| { + *v = *v * 2; + Ok(()) + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(98 * 4)); + + A::remove((2,)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2,), |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key((2,)), false); + + A::remove((2,)); + AValueQueryWithAnOnEmpty::mutate_exists((2,), |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(10)); + AValueQueryWithAnOnEmpty::mutate_exists((2,), |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(100)); + + A::remove((2,)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2,), |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(10)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2,), |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(100)); + assert_eq!(A::try_get((2,)), Ok(100)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2,), |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(100)); + + A::insert((2,), 10); + assert_eq!(A::take((2,)), Some(10)); + assert_eq!(A::contains_key((2,)), false); + assert_eq!(AValueQueryWithAnOnEmpty::take((2,)), 98); + assert_eq!(A::contains_key((2,)), false); + assert_eq!(A::try_get((2,)), Err(())); + + B::insert((2,), 10); + assert_eq!( + A::migrate_keys((2,), (Box::new(|key| Blake2_256::hash(key).to_vec()),),), + Some(10) + ); + assert_eq!(A::contains_key((2,)), true); + assert_eq!(A::get((2,)), Some(10)); + + A::insert((3,), 10); + A::insert((4,), 10); + A::remove_all(None); + assert_eq!(A::contains_key((3,)), false); + assert_eq!(A::contains_key((4,)), false); + + A::insert((3,), 10); + A::insert((4,), 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + + C::insert((3,), 10); + C::insert((4,), 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 20), (3, 20)]); + + A::insert((3,), 10); + A::insert((4,), 10); + assert_eq!(A::iter().collect::>(), vec![(4, 10), (3, 10)]); + assert_eq!(A::drain().collect::>(), vec![(4, 10), (3, 10)]); + assert_eq!(A::iter().collect::>(), vec![]); + + C::insert((3,), 10); + C::insert((4,), 10); + A::translate::(|k1, v| Some((k1 as u16 * v as u16).into())); + assert_eq!(A::iter().collect::>(), vec![(4, 40), (3, 30)]); + + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + }, + default: 98u32.encode(), + docs: vec![], + } + ] + ); + + WithLen::remove_all(None); + assert_eq!(WithLen::decode_len((3,)), None); + WithLen::append((0,), 10); + assert_eq!(WithLen::decode_len((0,)), Some(1)); + }); + } + + #[test] + fn test_2_keys() { + type A = StorageNMap< + Prefix, + (Key, Key), + u32, + OptionQuery, + >; + type AValueQueryWithAnOnEmpty = StorageNMap< + Prefix, + (Key, Key), + u32, + ValueQuery, + ADefault, + >; + type B = StorageNMap, Key), u32, ValueQuery>; + type C = StorageNMap< + Prefix, + (Key, Key), + u8, + ValueQuery, + >; + type WithLen = + StorageNMap, Key), Vec>; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"Foo")); + k.extend(&3u16.blake2_128_concat()); + k.extend(&30u8.twox_64_concat()); + assert_eq!(A::hashed_key_for((3, 30)).to_vec(), k); + + assert_eq!(A::contains_key((3, 30)), false); + assert_eq!(A::get((3, 30)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((3, 30)), 98); + + A::insert((3, 30), 10); + assert_eq!(A::contains_key((3, 30)), true); + assert_eq!(A::get((3, 30)), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get((3, 30)), 10); + + A::swap::<(Key, Key), _, _>((3, 30), (2, 20)); + assert_eq!(A::contains_key((3, 30)), false); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((3, 30)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((3, 30)), 98); + assert_eq!(A::get((2, 20)), Some(10)); + assert_eq!(AValueQueryWithAnOnEmpty::get((2, 20)), 10); + + A::remove((2, 20)); + assert_eq!(A::contains_key((2, 20)), false); + assert_eq!(A::get((2, 20)), None); + + AValueQueryWithAnOnEmpty::mutate((2, 20), |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate((2, 20), |v| *v = *v * 2); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(98 * 4)); + + A::remove((2, 20)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2, 20), |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key((2, 20)), false); + + A::remove((2, 20)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2, 20), |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key((2, 20)), false); + + A::remove((2, 20)); + AValueQueryWithAnOnEmpty::mutate_exists((2, 20), |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(10)); + AValueQueryWithAnOnEmpty::mutate_exists((2, 20), |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(100)); + + A::remove((2, 20)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20), |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(10)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20), |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(100)); + assert_eq!(A::try_get((2, 20)), Ok(100)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20), |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(100)); + + A::insert((2, 20), 10); + assert_eq!(A::take((2, 20)), Some(10)); + assert_eq!(A::contains_key((2, 20)), false); + assert_eq!(AValueQueryWithAnOnEmpty::take((2, 20)), 98); + assert_eq!(A::contains_key((2, 20)), false); + assert_eq!(A::try_get((2, 20)), Err(())); + + B::insert((2, 20), 10); + assert_eq!( + A::migrate_keys( + (2, 20), + ( + Box::new(|key| Blake2_256::hash(key).to_vec()), + Box::new(|key| Twox128::hash(key).to_vec()), + ), + ), + Some(10) + ); + assert_eq!(A::contains_key((2, 20)), true); + assert_eq!(A::get((2, 20)), Some(10)); + + A::insert((3, 30), 10); + A::insert((4, 40), 10); + A::remove_all(None); + assert_eq!(A::contains_key((3, 30)), false); + assert_eq!(A::contains_key((4, 40)), false); + + A::insert((3, 30), 10); + A::insert((4, 40), 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + + C::insert((3, 30), 10); + C::insert((4, 40), 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![((4, 40), 20), ((3, 30), 20)]); + + A::insert((3, 30), 10); + A::insert((4, 40), 10); + assert_eq!(A::iter().collect::>(), vec![((4, 40), 10), ((3, 30), 10)]); + assert_eq!(A::drain().collect::>(), vec![((4, 40), 10), ((3, 30), 10)]); + assert_eq!(A::iter().collect::>(), vec![]); + + C::insert((3, 30), 10); + C::insert((4, 40), 10); + A::translate::(|(k1, k2), v| Some((k1 * k2 as u16 * v as u16).into())); + assert_eq!(A::iter().collect::>(), vec![((4, 40), 1600), ((3, 30), 900)]); + + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u8)>(), + value: scale_info::meta_type::(), + }, + default: 98u32.encode(), + docs: vec![], + } + ] + ); + + WithLen::remove_all(None); + assert_eq!(WithLen::decode_len((3, 30)), None); + WithLen::append((0, 100), 10); + assert_eq!(WithLen::decode_len((0, 100)), Some(1)); + + A::insert((3, 30), 11); + A::insert((3, 31), 12); + A::insert((4, 40), 13); + A::insert((4, 41), 14); + assert_eq!(A::iter_prefix_values((3,)).collect::>(), vec![12, 11]); + assert_eq!(A::iter_prefix_values((4,)).collect::>(), vec![13, 14]); + }); + } + + #[test] + fn test_3_keys() { + type A = StorageNMap< + Prefix, + (Key, Key, Key), + u32, + OptionQuery, + >; + type AValueQueryWithAnOnEmpty = StorageNMap< + Prefix, + (Key, Key, Key), + u32, + ValueQuery, + ADefault, + >; + type B = StorageNMap< + Prefix, + (Key, Key, Key), + u32, + ValueQuery, + >; + type C = StorageNMap< + Prefix, + (Key, Key, Key), + u8, + ValueQuery, + >; + type WithLen = StorageNMap< + Prefix, + (Key, Key, Key), + Vec, + >; + + TestExternalities::default().execute_with(|| { + let mut k: Vec = vec![]; + k.extend(&twox_128(b"test")); + k.extend(&twox_128(b"Foo")); + k.extend(&1u16.blake2_128_concat()); + k.extend(&10u16.blake2_128_concat()); + k.extend(&100u16.twox_64_concat()); + assert_eq!(A::hashed_key_for((1, 10, 100)).to_vec(), k); + + assert_eq!(A::contains_key((1, 10, 100)), false); + assert_eq!(A::get((1, 10, 100)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 98); + + A::insert((1, 10, 100), 30); + assert_eq!(A::contains_key((1, 10, 100)), true); + assert_eq!(A::get((1, 10, 100)), Some(30)); + assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 30); + + A::swap::< + (Key, Key, Key), + _, + _, + >((1, 10, 100), (2, 20, 200)); + assert_eq!(A::contains_key((1, 10, 100)), false); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((1, 10, 100)), None); + assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 98); + assert_eq!(A::get((2, 20, 200)), Some(30)); + assert_eq!(AValueQueryWithAnOnEmpty::get((2, 20, 200)), 30); + + A::remove((2, 20, 200)); + assert_eq!(A::contains_key((2, 20, 200)), false); + assert_eq!(A::get((2, 20, 200)), None); + + AValueQueryWithAnOnEmpty::mutate((2, 20, 200), |v| *v = *v * 2); + AValueQueryWithAnOnEmpty::mutate((2, 20, 200), |v| *v = *v * 2); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(98 * 4)); + + A::remove((2, 20, 200)); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate((2, 20, 200), |v| { + *v = *v * 2; + Err(()) + }); + assert_eq!(A::contains_key((2, 20, 200)), false); + + A::remove((2, 20, 200)); + AValueQueryWithAnOnEmpty::mutate_exists((2, 20, 200), |v| { + assert!(v.is_none()); + *v = Some(10); + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(10)); + AValueQueryWithAnOnEmpty::mutate_exists((2, 20, 200), |v| { + *v = Some(v.unwrap() * 10); + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(100)); + + A::remove((2, 20, 200)); + let _: Result<(), ()> = + AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20, 200), |v| { + assert!(v.is_none()); + *v = Some(10); + Ok(()) + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(10)); + let _: Result<(), ()> = + AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20, 200), |v| { + *v = Some(v.unwrap() * 10); + Ok(()) + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(100)); + assert_eq!(A::try_get((2, 20, 200)), Ok(100)); + let _: Result<(), ()> = + AValueQueryWithAnOnEmpty::try_mutate_exists((2, 20, 200), |v| { + *v = Some(v.unwrap() * 10); + Err(()) + }); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(100)); + + A::insert((2, 20, 200), 10); + assert_eq!(A::take((2, 20, 200)), Some(10)); + assert_eq!(A::contains_key((2, 20, 200)), false); + assert_eq!(AValueQueryWithAnOnEmpty::take((2, 20, 200)), 98); + assert_eq!(A::contains_key((2, 20, 200)), false); + assert_eq!(A::try_get((2, 20, 200)), Err(())); + + B::insert((2, 20, 200), 10); + assert_eq!( + A::migrate_keys( + (2, 20, 200), + ( + Box::new(|key| Blake2_256::hash(key).to_vec()), + Box::new(|key| Blake2_256::hash(key).to_vec()), + Box::new(|key| Twox128::hash(key).to_vec()), + ), + ), + Some(10) + ); + assert_eq!(A::contains_key((2, 20, 200)), true); + assert_eq!(A::get((2, 20, 200)), Some(10)); + + A::insert((3, 30, 300), 10); + A::insert((4, 40, 400), 10); + A::remove_all(None); + assert_eq!(A::contains_key((3, 30, 300)), false); + assert_eq!(A::contains_key((4, 40, 400)), false); + + A::insert((3, 30, 300), 10); + A::insert((4, 40, 400), 10); + assert_eq!(A::iter_values().collect::>(), vec![10, 10]); + + C::insert((3, 30, 300), 10); + C::insert((4, 40, 400), 10); + A::translate_values::(|v| Some((v * 2).into())); + assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 20), ((3, 30, 300), 20)]); + + A::insert((3, 30, 300), 10); + A::insert((4, 40, 400), 10); + assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 10), ((3, 30, 300), 10)]); + assert_eq!( + A::drain().collect::>(), + vec![((4, 40, 400), 10), ((3, 30, 300), 10)] + ); + assert_eq!(A::iter().collect::>(), vec![]); + + C::insert((3, 30, 300), 10); + C::insert((4, 40, 400), 10); + A::translate::(|(k1, k2, k3), v| { + Some((k1 * k2 as u16 * v as u16 / k3 as u16).into()) + }); + assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 4), ((3, 30, 300), 3)]); + + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u16, u16)>(), + value: scale_info::meta_type::(), + }, + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "Foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat + ], + key: scale_info::meta_type::<(u16, u16, u16)>(), + value: scale_info::meta_type::(), + }, + default: 98u32.encode(), + docs: vec![], + } + ] + ); + + WithLen::remove_all(None); + assert_eq!(WithLen::decode_len((3, 30, 300)), None); + WithLen::append((0, 100, 1000), 10); + assert_eq!(WithLen::decode_len((0, 100, 1000)), Some(1)); + + A::insert((3, 30, 300), 11); + A::insert((3, 30, 301), 12); + A::insert((4, 40, 400), 13); + A::insert((4, 40, 401), 14); + assert_eq!(A::iter_prefix_values((3,)).collect::>(), vec![11, 12]); + assert_eq!(A::iter_prefix_values((4,)).collect::>(), vec![14, 13]); + assert_eq!(A::iter_prefix_values((3, 30)).collect::>(), vec![11, 12]); + assert_eq!(A::iter_prefix_values((4, 40)).collect::>(), vec![14, 13]); + }); + } +} diff --git a/frame/support/src/storage/types/value.rs b/frame/support/src/storage/types/value.rs new file mode 100644 index 0000000000000..c5e7173bd0af7 --- /dev/null +++ b/frame/support/src/storage/types/value.rs @@ -0,0 +1,375 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage value type. Implements StorageValue trait and its method directly. + +use crate::{ + metadata::{StorageEntryMetadata, StorageEntryType}, + storage::{ + generator::StorageValue as StorageValueT, + types::{OptionQuery, QueryKindTrait, StorageEntryMetadataBuilder}, + StorageAppend, StorageDecodeLength, StorageTryAppend, + }, + traits::{GetDefault, StorageInfo, StorageInstance}, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; +use sp_arithmetic::traits::SaturatedConversion; +use sp_std::prelude::*; + +/// A type that allow to store a value. +/// +/// Each value is stored at: +/// ```nocompile +/// Twox128(Prefix::pallet_prefix()) ++ Twox128(Prefix::STORAGE_PREFIX) +/// ``` +pub struct StorageValue( + core::marker::PhantomData<(Prefix, Value, QueryKind, OnEmpty)>, +); + +impl crate::storage::generator::StorageValue + for StorageValue +where + Prefix: StorageInstance, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + type Query = QueryKind::Query; + fn module_prefix() -> &'static [u8] { + Prefix::pallet_prefix().as_bytes() + } + fn storage_prefix() -> &'static [u8] { + Prefix::STORAGE_PREFIX.as_bytes() + } + fn from_optional_value_to_query(v: Option) -> Self::Query { + QueryKind::from_optional_value_to_query(v) + } + fn from_query_to_optional_value(v: Self::Query) -> Option { + QueryKind::from_query_to_optional_value(v) + } +} + +impl StorageValue +where + Prefix: StorageInstance, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + /// Get the storage key. + pub fn hashed_key() -> [u8; 32] { + >::hashed_key() + } + + /// Does the value (explicitly) exist in storage? + pub fn exists() -> bool { + >::exists() + } + + /// Load the value from the provided storage instance. + pub fn get() -> QueryKind::Query { + >::get() + } + + /// Try to get the underlying value from the provided storage instance; `Ok` if it exists, + /// `Err` if not. + pub fn try_get() -> Result { + >::try_get() + } + + /// Translate a value from some previous type (`O`) to the current type. + /// + /// `f: F` is the translation function. + /// + /// Returns `Err` if the storage item could not be interpreted as the old type, and Ok, along + /// with the new value if it could. + /// + /// NOTE: This operates from and to `Option<_>` types; no effort is made to respect the default + /// value of the original type. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade, + /// while ensuring **no usage of this storage are made before the call to + /// `on_runtime_upgrade`**. (More precisely prior initialized modules doesn't make use of this + /// storage). + pub fn translate) -> Option>( + f: F, + ) -> Result, ()> { + >::translate(f) + } + + /// Store a value under this key into the provided storage instance. + pub fn put>(val: Arg) { + >::put(val) + } + + /// Store a value under this key into the provided storage instance. + /// + /// this uses the query type rather than the underlying value. + pub fn set(val: QueryKind::Query) { + >::set(val) + } + + /// Mutate the value + pub fn mutate R>(f: F) -> R { + >::mutate(f) + } + + /// Mutate the value if closure returns `Ok` + pub fn try_mutate Result>( + f: F, + ) -> Result { + >::try_mutate(f) + } + + /// Clear the storage value. + pub fn kill() { + >::kill() + } + + /// Take a value from storage, removing it afterwards. + pub fn take() -> QueryKind::Query { + >::take() + } + + /// Append the given item to the value in the storage. + /// + /// `Value` is required to implement [`StorageAppend`]. + /// + /// # Warning + /// + /// If the storage item is not encoded properly, the storage item will be overwritten + /// and set to `[item]`. Any default value set for the storage item will be ignored + /// on overwrite. + pub fn append(item: EncodeLikeItem) + where + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageAppend, + { + >::append(item) + } + + /// Read the length of the storage value without decoding the entire value. + /// + /// `Value` is required to implement [`StorageDecodeLength`]. + /// + /// If the value does not exists or it fails to decode the length, `None` is returned. + /// Otherwise `Some(len)` is returned. + /// + /// # Warning + /// + /// `None` does not mean that `get()` does not return a value. The default value is completly + /// ignored by this function. + pub fn decode_len() -> Option + where + Value: StorageDecodeLength, + { + >::decode_len() + } + + /// Try and append the given item to the value in the storage. + /// + /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. + pub fn try_append(item: EncodeLikeItem) -> Result<(), ()> + where + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageTryAppend, + { + >::try_append(item) + } +} + +impl StorageEntryMetadataBuilder + for StorageValue +where + Prefix: StorageInstance, + Value: FullCodec + scale_info::StaticTypeInfo, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let entry = StorageEntryMetadata { + name: Prefix::STORAGE_PREFIX, + modifier: QueryKind::METADATA, + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: OnEmpty::get().encode(), + docs, + }; + + entries.push(entry); + } +} + +impl crate::traits::StorageInfoTrait + for StorageValue +where + Prefix: StorageInstance, + Value: FullCodec + MaxEncodedLen, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + fn storage_info() -> Vec { + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::hashed_key().to_vec(), + max_values: Some(1), + max_size: Some(Value::max_encoded_len().saturated_into()), + }] + } +} + +/// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. +impl crate::traits::PartialStorageInfoTrait + for StorageValue +where + Prefix: StorageInstance, + Value: FullCodec, + QueryKind: QueryKindTrait, + OnEmpty: crate::traits::Get + 'static, +{ + fn partial_storage_info() -> Vec { + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::hashed_key().to_vec(), + max_values: Some(1), + max_size: None, + }] + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{metadata::StorageEntryModifier, storage::types::ValueQuery}; + use sp_io::{hashing::twox_128, TestExternalities}; + + struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + const STORAGE_PREFIX: &'static str = "foo"; + } + + struct ADefault; + impl crate::traits::Get for ADefault { + fn get() -> u32 { + 97 + } + } + + #[test] + fn test() { + type A = StorageValue; + type AValueQueryWithAnOnEmpty = StorageValue; + type B = StorageValue; + type WithLen = StorageValue>; + + TestExternalities::default().execute_with(|| { + assert_eq!(A::hashed_key().to_vec(), [twox_128(b"test"), twox_128(b"foo")].concat()); + assert_eq!(A::exists(), false); + assert_eq!(A::get(), None); + assert_eq!(AValueQueryWithAnOnEmpty::get(), 97); + assert_eq!(A::try_get(), Err(())); + + A::put(2); + assert_eq!(A::exists(), true); + assert_eq!(A::get(), Some(2)); + assert_eq!(AValueQueryWithAnOnEmpty::get(), 2); + assert_eq!(A::try_get(), Ok(2)); + assert_eq!(A::try_get(), Ok(2)); + + B::put(4); + A::translate::(|v| v.map(Into::into)).unwrap(); + assert_eq!(A::try_get(), Ok(4)); + + A::set(None); + assert_eq!(A::try_get(), Err(())); + + A::set(Some(2)); + assert_eq!(A::try_get(), Ok(2)); + + A::mutate(|v| *v = Some(v.unwrap() * 2)); + assert_eq!(A::try_get(), Ok(4)); + + A::set(Some(4)); + let _: Result<(), ()> = A::try_mutate(|v| { + *v = Some(v.unwrap() * 2); + Ok(()) + }); + assert_eq!(A::try_get(), Ok(8)); + + let _: Result<(), ()> = A::try_mutate(|v| { + *v = Some(v.unwrap() * 2); + Err(()) + }); + assert_eq!(A::try_get(), Ok(8)); + + A::kill(); + AValueQueryWithAnOnEmpty::mutate(|v| *v = *v * 2); + assert_eq!(AValueQueryWithAnOnEmpty::try_get(), Ok(97 * 2)); + + AValueQueryWithAnOnEmpty::kill(); + let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(|v| { + *v = *v * 2; + Ok(()) + }); + assert_eq!(AValueQueryWithAnOnEmpty::try_get(), Ok(97 * 2)); + + A::kill(); + assert_eq!(A::try_get(), Err(())); + + let mut entries = vec![]; + A::build_metadata(vec![], &mut entries); + AValueQueryWithAnOnEmpty::build_metadata(vec![], &mut entries); + assert_eq!( + entries, + vec![ + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: Option::::None.encode(), + docs: vec![], + }, + StorageEntryMetadata { + name: "foo", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: 97u32.encode(), + docs: vec![], + } + ] + ); + + WithLen::kill(); + assert_eq!(WithLen::decode_len(), None); + WithLen::append(3); + assert_eq!(WithLen::decode_len(), Some(1)); + }); + } +} diff --git a/frame/support/src/storage/unhashed.rs b/frame/support/src/storage/unhashed.rs index 34b146b86f6ba..f700771b2d5cc 100644 --- a/frame/support/src/storage/unhashed.rs +++ b/frame/support/src/storage/unhashed.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,15 +17,15 @@ //! Operation on unhashed runtime storage. +use codec::{Decode, Encode}; use sp_std::prelude::*; -use codec::{Encode, Decode}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(key: &[u8]) -> Option { sp_io::storage::get(key).and_then(|val| { Decode::decode(&mut &val[..]).map(Some).unwrap_or_else(|_| { // TODO #3700: error should be handleable. - runtime_print!("ERROR: Corrupted state at {:?}", key); + crate::runtime_print!("ERROR: Corrupted state at {:?}", key); None }) }) @@ -83,7 +83,7 @@ pub fn take_or_else T>(key: &[u8], default_val /// Check to see if `key` has an explicit entry in storage. pub fn exists(key: &[u8]) -> bool { - sp_io::storage::read(key, &mut [0;0][..], 0).is_some() + sp_io::storage::exists(key) } /// Ensure `key` has no explicit entry in storage. @@ -92,8 +92,8 @@ pub fn kill(key: &[u8]) { } /// Ensure keys with the given `prefix` have no entries in storage. -pub fn kill_prefix(prefix: &[u8]) { - sp_io::storage::clear_prefix(prefix); +pub fn kill_prefix(prefix: &[u8], limit: Option) -> sp_io::KillStorageResult { + sp_io::storage::clear_prefix(prefix, limit) } /// Get a Vec of bytes from storage. diff --git a/frame/support/src/storage/weak_bounded_vec.rs b/frame/support/src/storage/weak_bounded_vec.rs new file mode 100644 index 0000000000000..9c30c45c3e2e1 --- /dev/null +++ b/frame/support/src/storage/weak_bounded_vec.rs @@ -0,0 +1,428 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support putting a bounded vector into storage, as a raw value, map +//! or a double map. + +use crate::{ + storage::{StorageDecodeLength, StorageTryAppend}, + traits::Get, +}; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::{ + ops::{Deref, Index, IndexMut}, + slice::SliceIndex, +}; +use sp_std::{convert::TryFrom, fmt, marker::PhantomData, prelude::*}; + +/// A weakly bounded vector. +/// +/// It has implementations for efficient append and length decoding, as with a normal `Vec<_>`, once +/// put into storage as a raw value, map or double-map. +/// +/// The length of the vec is not strictly bounded. Decoding a vec with more element that the bound +/// is accepted, and some method allow to bypass the restriction with warnings. +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct WeakBoundedVec(Vec, PhantomData); + +impl> Decode for WeakBoundedVec { + fn decode(input: &mut I) -> Result { + let inner = Vec::::decode(input)?; + Ok(Self::force_from(inner, Some("decode"))) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + Vec::::skip(input) + } +} + +impl WeakBoundedVec { + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: Vec) -> Self { + Self(t, Default::default()) + } + + /// Consume self, and return the inner `Vec`. Henceforth, the `Vec<_>` can be altered in an + /// arbitrary way. At some point, if the reverse conversion is required, `TryFrom>` can + /// be used. + /// + /// This is useful for cases if you need access to an internal API of the inner `Vec<_>` which + /// is not provided by the wrapper `WeakBoundedVec`. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Exactly the same semantics as [`Vec::remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn remove(&mut self, index: usize) -> T { + self.0.remove(index) + } + + /// Exactly the same semantics as [`Vec::swap_remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn swap_remove(&mut self, index: usize) -> T { + self.0.swap_remove(index) + } + + /// Exactly the same semantics as [`Vec::retain`]. + pub fn retain bool>(&mut self, f: F) { + self.0.retain(f) + } + + /// Exactly the same semantics as [`Vec::get_mut`]. + pub fn get_mut>( + &mut self, + index: I, + ) -> Option<&mut >::Output> { + self.0.get_mut(index) + } +} + +impl> WeakBoundedVec { + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } + + /// Create `Self` from `t` without any checks. Logs warnings if the bound is not being + /// respected. The additional scope can be used to indicate where a potential overflow is + /// happening. + pub fn force_from(t: Vec, scope: Option<&'static str>) -> Self { + if t.len() > Self::bound() { + log::warn!( + target: crate::LOG_TARGET, + "length of a bounded vector in scope {} is not respected.", + scope.unwrap_or("UNKNOWN"), + ); + } + + Self::unchecked_from(t) + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut Vec)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Exactly the same semantics as [`Vec::insert`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if `index > len`. + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.insert(index, element); + Ok(()) + } else { + Err(()) + } + } + + /// Exactly the same semantics as [`Vec::push`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds isize::MAX bytes. + pub fn try_push(&mut self, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.push(element); + Ok(()) + } else { + Err(()) + } + } +} + +impl Default for WeakBoundedVec { + fn default() -> Self { + // the bound cannot be below 0, which is satisfied by an empty vector + Self::unchecked_from(Vec::default()) + } +} + +#[cfg(feature = "std")] +impl fmt::Debug for WeakBoundedVec +where + T: fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("WeakBoundedVec").field(&self.0).field(&Self::bound()).finish() + } +} + +impl Clone for WeakBoundedVec +where + T: Clone, +{ + fn clone(&self) -> Self { + // bound is retained + Self::unchecked_from(self.0.clone()) + } +} + +impl> TryFrom> for WeakBoundedVec { + type Error = (); + fn try_from(t: Vec) -> Result { + if t.len() <= Self::bound() { + // explicit check just above + Ok(Self::unchecked_from(t)) + } else { + Err(()) + } + } +} + +// It is okay to give a non-mutable reference of the inner vec to anyone. +impl AsRef> for WeakBoundedVec { + fn as_ref(&self) -> &Vec { + &self.0 + } +} + +impl AsRef<[T]> for WeakBoundedVec { + fn as_ref(&self) -> &[T] { + &self.0 + } +} + +impl AsMut<[T]> for WeakBoundedVec { + fn as_mut(&mut self) -> &mut [T] { + &mut self.0 + } +} + +// will allow for immutable all operations of `Vec` on `WeakBoundedVec`. +impl Deref for WeakBoundedVec { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +// Allows for indexing similar to a normal `Vec`. Can panic if out of bound. +impl Index for WeakBoundedVec +where + I: SliceIndex<[T]>, +{ + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + self.0.index(index) + } +} + +impl IndexMut for WeakBoundedVec +where + I: SliceIndex<[T]>, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + self.0.index_mut(index) + } +} + +impl sp_std::iter::IntoIterator for WeakBoundedVec { + type Item = T; + type IntoIter = sp_std::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl codec::DecodeLength for WeakBoundedVec { + fn len(self_encoded: &[u8]) -> Result { + // `WeakBoundedVec` stored just a `Vec`, thus the length is at the beginning in + // `Compact` form, and same implementation as `Vec` can be used. + as codec::DecodeLength>::len(self_encoded) + } +} + +// NOTE: we could also implement this as: +// impl, S2: Get> PartialEq> for WeakBoundedVec to allow comparison of bounded vectors with different bounds. +impl PartialEq for WeakBoundedVec +where + T: PartialEq, +{ + fn eq(&self, rhs: &Self) -> bool { + self.0 == rhs.0 + } +} + +impl> PartialEq> for WeakBoundedVec { + fn eq(&self, other: &Vec) -> bool { + &self.0 == other + } +} + +impl Eq for WeakBoundedVec where T: Eq {} + +impl StorageDecodeLength for WeakBoundedVec {} + +impl> StorageTryAppend for WeakBoundedVec { + fn bound() -> usize { + S::get() as usize + } +} + +impl MaxEncodedLen for WeakBoundedVec +where + T: MaxEncodedLen, + S: Get, + WeakBoundedVec: Encode, +{ + fn max_encoded_len() -> usize { + // WeakBoundedVec encodes like Vec which encodes like [T], which is a compact u32 + // plus each item in the slice: + // https://substrate.dev/rustdocs/v3.0.0/src/parity_scale_codec/codec.rs.html#798-808 + codec::Compact(S::get()) + .encoded_size() + .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use crate::Twox128; + use sp_io::TestExternalities; + use sp_std::convert::TryInto; + + crate::parameter_types! { + pub const Seven: u32 = 7; + pub const Four: u32 = 4; + } + + crate::generate_storage_alias! { Prefix, Foo => Value> } + crate::generate_storage_alias! { Prefix, FooMap => Map<(u32, Twox128), WeakBoundedVec> } + crate::generate_storage_alias! { + Prefix, + FooDoubleMap => DoubleMap<(u32, Twox128), (u32, Twox128), WeakBoundedVec> + } + + #[test] + fn try_append_is_correct() { + assert_eq!(WeakBoundedVec::::bound(), 7); + } + + #[test] + fn decode_len_works() { + TestExternalities::default().execute_with(|| { + let bounded: WeakBoundedVec = vec![1, 2, 3].try_into().unwrap(); + Foo::put(bounded); + assert_eq!(Foo::decode_len().unwrap(), 3); + }); + + TestExternalities::default().execute_with(|| { + let bounded: WeakBoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooMap::insert(1, bounded); + assert_eq!(FooMap::decode_len(1).unwrap(), 3); + assert!(FooMap::decode_len(0).is_none()); + assert!(FooMap::decode_len(2).is_none()); + }); + + TestExternalities::default().execute_with(|| { + let bounded: WeakBoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooDoubleMap::insert(1, 1, bounded); + assert_eq!(FooDoubleMap::decode_len(1, 1).unwrap(), 3); + assert!(FooDoubleMap::decode_len(2, 1).is_none()); + assert!(FooDoubleMap::decode_len(1, 2).is_none()); + assert!(FooDoubleMap::decode_len(2, 2).is_none()); + }); + } + + #[test] + fn try_insert_works() { + let mut bounded: WeakBoundedVec = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } + + #[test] + #[should_panic(expected = "insertion index (is 9) should be <= len (is 3)")] + fn try_inert_panics_if_oob() { + let mut bounded: WeakBoundedVec = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(9, 0).unwrap(); + } + + #[test] + fn try_push_works() { + let mut bounded: WeakBoundedVec = vec![1, 2, 3].try_into().unwrap(); + bounded.try_push(0).unwrap(); + assert_eq!(*bounded, vec![1, 2, 3, 0]); + + assert!(bounded.try_push(9).is_err()); + } + + #[test] + fn deref_coercion_works() { + let bounded: WeakBoundedVec = vec![1, 2, 3].try_into().unwrap(); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded: WeakBoundedVec = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + let bounded = bounded.try_mutate(|v| v.push(7)).unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded.try_mutate(|v| v.push(8)).is_none()); + } + + #[test] + fn slice_indexing_works() { + let bounded: WeakBoundedVec = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + assert_eq!(&bounded[0..=2], &[1, 2, 3]); + } + + #[test] + fn vec_eq_works() { + let bounded: WeakBoundedVec = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + assert_eq!(bounded, vec![1, 2, 3, 4, 5, 6]); + } + + #[test] + fn too_big_succeed_to_decode() { + let v: Vec = vec![1, 2, 3, 4, 5]; + let w = WeakBoundedVec::::decode(&mut &v.encode()[..]).unwrap(); + assert_eq!(v, *w); + } +} diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 377bfaa56a551..efb5559ed0622 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,1836 +15,74 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Traits for FRAME. +//! Traits and associated utilities for use in the FRAME environment. //! //! NOTE: If you're looking for `parameter_types`, it has moved in to the top-level module. -use sp_std::{prelude::*, result, marker::PhantomData, ops::Div, fmt::Debug}; -use codec::{FullCodec, Codec, Encode, Decode, EncodeLike}; -use sp_core::u32_trait::Value as U32; -use sp_runtime::{ - RuntimeDebug, ConsensusEngineId, DispatchResult, DispatchError, - traits::{ - MaybeSerializeDeserialize, AtLeast32Bit, Saturating, TrailingZeroInput, Bounded, Zero, - BadOrigin, AtLeast32BitUnsigned, UniqueSaturatedFrom, UniqueSaturatedInto, - SaturatedConversion, +pub mod tokens; +pub use tokens::{ + currency::{ + Currency, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, + VestingSchedule, }, + fungible, fungibles, + imbalance::{Imbalance, OnUnbalanced, SignedImbalance}, + BalanceStatus, ExistenceRequirement, WithdrawReasons, }; -use crate::dispatch::Parameter; -use crate::storage::StorageMap; -use crate::weights::Weight; -use impl_trait_for_tuples::impl_for_tuples; -/// Re-expected for the macro. -#[doc(hidden)] -pub use sp_std::{mem::{swap, take}, cell::RefCell, vec::Vec, boxed::Box}; - -/// Simple trait for providing a filter over a reference to some type. -pub trait Filter { - /// Determine if a given value should be allowed through the filter (returns `true`) or not. - fn filter(_: &T) -> bool; -} - -impl Filter for () { - fn filter(_: &T) -> bool { true } -} - -/// Trait to add a constraint onto the filter. -pub trait FilterStack: Filter { - /// The type used to archive the stack. - type Stack; - - /// Add a new `constraint` onto the filter. - fn push(constraint: impl Fn(&T) -> bool + 'static); - - /// Removes the most recently pushed, and not-yet-popped, constraint from the filter. - fn pop(); - - /// Clear the filter, returning a value that may be used later to `restore` it. - fn take() -> Self::Stack; - - /// Restore the filter from a previous `take` operation. - fn restore(taken: Self::Stack); -} - -/// Guard type for pushing a constraint to a `FilterStack` and popping when dropped. -pub struct FilterStackGuard, T>(PhantomData<(F, T)>); - -/// Guard type for clearing all pushed constraints from a `FilterStack` and reinstating them when -/// dropped. -pub struct ClearFilterGuard, T>(Option, PhantomData); - -impl, T> FilterStackGuard { - /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when - /// this instance is dropped. - pub fn new(constraint: impl Fn(&T) -> bool + 'static) -> Self { - F::push(constraint); - Self(PhantomData) - } -} - -impl, T> Drop for FilterStackGuard { - fn drop(&mut self) { - F::pop(); - } -} - -impl, T> ClearFilterGuard { - /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when - /// this instance is dropped. - pub fn new() -> Self { - Self(Some(F::take()), PhantomData) - } -} - -impl, T> Drop for ClearFilterGuard { - fn drop(&mut self) { - if let Some(taken) = self.0.take() { - F::restore(taken); - } - } -} - -/// Simple trait for providing a filter over a reference to some type, given an instance of itself. -pub trait InstanceFilter: Sized + Send + Sync { - /// Determine if a given value should be allowed through the filter (returns `true`) or not. - fn filter(&self, _: &T) -> bool; - - /// Determines whether `self` matches at least everything that `_o` does. - fn is_superset(&self, _o: &Self) -> bool { false } -} - -impl InstanceFilter for () { - fn filter(&self, _: &T) -> bool { true } - fn is_superset(&self, _o: &Self) -> bool { true } -} - -#[macro_export] -macro_rules! impl_filter_stack { - ($target:ty, $base:ty, $call:ty, $module:ident) => { - #[cfg(feature = "std")] - mod $module { - #[allow(unused_imports)] - use super::*; - use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; - - thread_local! { - static FILTER: RefCell bool + 'static>>> = RefCell::new(Vec::new()); - } - - impl Filter<$call> for $target { - fn filter(call: &$call) -> bool { - <$base>::filter(call) && - FILTER.with(|filter| filter.borrow().iter().all(|f| f(call))) - } - } - - impl FilterStack<$call> for $target { - type Stack = Vec bool + 'static>>; - fn push(f: impl Fn(&$call) -> bool + 'static) { - FILTER.with(|filter| filter.borrow_mut().push(Box::new(f))); - } - fn pop() { - FILTER.with(|filter| filter.borrow_mut().pop()); - } - fn take() -> Self::Stack { - FILTER.with(|filter| take(filter.borrow_mut().as_mut())) - } - fn restore(mut s: Self::Stack) { - FILTER.with(|filter| swap(filter.borrow_mut().as_mut(), &mut s)); - } - } - } - - #[cfg(not(feature = "std"))] - mod $module { - #[allow(unused_imports)] - use super::*; - use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; - - struct ThisFilter(RefCell bool + 'static>>>); - // NOTE: Safe only in wasm (guarded above) because there's only one thread. - unsafe impl Send for ThisFilter {} - unsafe impl Sync for ThisFilter {} - - static FILTER: ThisFilter = ThisFilter(RefCell::new(Vec::new())); - - impl Filter<$call> for $target { - fn filter(call: &$call) -> bool { - <$base>::filter(call) && FILTER.0.borrow().iter().all(|f| f(call)) - } - } - - impl FilterStack<$call> for $target { - type Stack = Vec bool + 'static>>; - fn push(f: impl Fn(&$call) -> bool + 'static) { - FILTER.0.borrow_mut().push(Box::new(f)); - } - fn pop() { - FILTER.0.borrow_mut().pop(); - } - fn take() -> Self::Stack { - take(FILTER.0.borrow_mut().as_mut()) - } - fn restore(mut s: Self::Stack) { - swap(FILTER.0.borrow_mut().as_mut(), &mut s); - } - } - } - } -} - -/// Type that provide some integrity tests. -/// -/// This implemented for modules by `decl_module`. -#[impl_for_tuples(30)] -pub trait IntegrityTest { - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. - fn integrity_test() {} -} - -#[cfg(test)] -mod test_impl_filter_stack { - use super::*; - - pub struct IsCallable; - pub struct BaseFilter; - impl Filter for BaseFilter { - fn filter(x: &u32) -> bool { x % 2 == 0 } - } - impl_filter_stack!( - crate::traits::test_impl_filter_stack::IsCallable, - crate::traits::test_impl_filter_stack::BaseFilter, - u32, - is_callable - ); - - #[test] - fn impl_filter_stack_should_work() { - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - - IsCallable::push(|x| *x < 42); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - - IsCallable::push(|x| *x % 3 == 0); - assert!(IsCallable::filter(&36)); - assert!(!IsCallable::filter(&40)); - - IsCallable::pop(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - - let saved = IsCallable::take(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - - IsCallable::restore(saved); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - - IsCallable::pop(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - } - - #[test] - fn guards_should_work() { - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - { - let _guard_1 = FilterStackGuard::::new(|x| *x < 42); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - { - let _guard_2 = FilterStackGuard::::new(|x| *x % 3 == 0); - assert!(IsCallable::filter(&36)); - assert!(!IsCallable::filter(&40)); - } - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - { - let _guard_2 = ClearFilterGuard::::new(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - } - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - } - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - } -} - -/// An abstraction of a value stored within storage, but possibly as part of a larger composite -/// item. -pub trait StoredMap { - /// Get the item, or its default if it doesn't yet exist; we make no distinction between the - /// two. - fn get(k: &K) -> T; - /// Get whether the item takes up any storage. If this is `false`, then `get` will certainly - /// return the `T::default()`. If `true`, then there is no implication for `get` (i.e. it - /// may return any value, including the default). - /// - /// NOTE: This may still be `true`, even after `remove` is called. This is the case where - /// a single storage entry is shared between multiple `StoredMap` items single, without - /// additional logic to enforce it, deletion of any one them doesn't automatically imply - /// deletion of them all. - fn is_explicit(k: &K) -> bool; - /// Mutate the item. - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> R; - /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. - fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> R; - /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is - /// returned. It is removed or reset to default value if it has been mutated to `None` - fn try_mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> Result) -> Result; - /// Set the item to something new. - fn insert(k: &K, t: T) { Self::mutate(k, |i| *i = t); } - /// Remove the item or otherwise replace it with its default value; we don't care which. - fn remove(k: &K); -} - -/// A simple, generic one-parameter event notifier/handler. -pub trait Happened { - /// The thing happened. - fn happened(t: &T); -} - -impl Happened for () { - fn happened(_: &T) {} -} - -/// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this -/// wouldn't be needed as `StorageValue`s should blanket implement `StoredValue`s, however this -/// would break the ability to have custom impls of `StoredValue`. The other workaround is to -/// implement it directly in the macro. -/// -/// This form has the advantage that two additional types are provides, `Created` and `Removed`, -/// which are both generic events that can be tied to handlers to do something in the case of being -/// about to create an account where one didn't previously exist (at all; not just where it used to -/// be the default value), or where the account is being removed or reset back to the default value -/// where previously it did exist (though may have been in a default state). This works well with -/// system module's `CallOnCreatedAccount` and `CallKillAccount`. -pub struct StorageMapShim< - S, - Created, - Removed, - K, - T ->(sp_std::marker::PhantomData<(S, Created, Removed, K, T)>); -impl< - S: StorageMap, - Created: Happened, - Removed: Happened, - K: FullCodec, - T: FullCodec, -> StoredMap for StorageMapShim { - fn get(k: &K) -> T { S::get(k) } - fn is_explicit(k: &K) -> bool { S::contains_key(k) } - fn insert(k: &K, t: T) { - let existed = S::contains_key(&k); - S::insert(k, t); - if !existed { - Created::happened(k); - } - } - fn remove(k: &K) { - let existed = S::contains_key(&k); - S::remove(k); - if existed { - Removed::happened(&k); - } - } - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> R { - let existed = S::contains_key(&k); - let r = S::mutate(k, f); - if !existed { - Created::happened(k); - } - r - } - fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> R { - let (existed, exists, r) = S::mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let r = f(maybe_value); - (existed, maybe_value.is_some(), r) - }); - if !existed && exists { - Created::happened(k); - } else if existed && !exists { - Removed::happened(k); - } - r - } - fn try_mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> Result) -> Result { - S::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - f(maybe_value).map(|v| (existed, maybe_value.is_some(), v)) - }).map(|(existed, exists, v)| { - if !existed && exists { - Created::happened(k); - } else if existed && !exists { - Removed::happened(k); - } - v - }) - } -} - -/// Something that can estimate at which block the next session rotation will happen. This should -/// be the same logical unit that dictates `ShouldEndSession` to the session module. No Assumptions -/// are made about the scheduling of the sessions. -pub trait EstimateNextSessionRotation { - /// Return the block number at which the next session rotation is estimated to happen. - /// - /// None should be returned if the estimation fails to come to an answer - fn estimate_next_session_rotation(now: BlockNumber) -> Option; - - /// Return the weight of calling `estimate_next_session_rotation` - fn weight(now: BlockNumber) -> Weight; -} - -impl EstimateNextSessionRotation for () { - fn estimate_next_session_rotation(_: BlockNumber) -> Option { - Default::default() - } - - fn weight(_: BlockNumber) -> Weight { - 0 - } -} - -/// Something that can estimate at which block the next `new_session` will be triggered. This must -/// always be implemented by the session module. -pub trait EstimateNextNewSession { - /// Return the block number at which the next new session is estimated to happen. - fn estimate_next_new_session(now: BlockNumber) -> Option; - - /// Return the weight of calling `estimate_next_new_session` - fn weight(now: BlockNumber) -> Weight; -} - -impl EstimateNextNewSession for () { - fn estimate_next_new_session(_: BlockNumber) -> Option { - Default::default() - } - - fn weight(_: BlockNumber) -> Weight { - 0 - } -} - -/// Anything that can have a `::len()` method. -pub trait Len { - /// Return the length of data type. - fn len(&self) -> usize; -} - -impl Len for T where ::IntoIter: ExactSizeIterator { - fn len(&self) -> usize { - self.clone().into_iter().len() - } -} - -/// A trait for querying a single value from a type. -/// -/// It is not required that the value is constant. -pub trait Get { - /// Return the current value. - fn get() -> T; -} - -impl Get for () { - fn get() -> T { T::default() } -} - -/// A trait for querying whether a type can be said to "contain" a value. -pub trait Contains { - /// Return `true` if this "contains" the given value `t`. - fn contains(t: &T) -> bool { Self::sorted_members().binary_search(t).is_ok() } - - /// Get a vector of all members in the set, ordered. - fn sorted_members() -> Vec; - - /// Get the number of items in the set. - fn count() -> usize { Self::sorted_members().len() } - - /// Add an item that would satisfy `contains`. It does not make sure any other - /// state is correctly maintained or generated. - /// - /// **Should be used for benchmarking only!!!** - #[cfg(feature = "runtime-benchmarks")] - fn add(_t: &T) { unimplemented!() } -} - -/// A trait for querying bound for the length of an implementation of `Contains` -pub trait ContainsLengthBound { - /// Minimum number of elements contained - fn min_len() -> usize; - /// Maximum number of elements contained - fn max_len() -> usize; -} - -/// Determiner to say whether a given account is unused. -pub trait IsDeadAccount { - /// Is the given account dead? - fn is_dead_account(who: &AccountId) -> bool; -} - -impl IsDeadAccount for () { - fn is_dead_account(_who: &AccountId) -> bool { - true - } -} - -/// Handler for when a new account has been created. -#[impl_for_tuples(30)] -pub trait OnNewAccount { - /// A new account `who` has been registered. - fn on_new_account(who: &AccountId); -} - -/// The account with the given id was reaped. -#[impl_for_tuples(30)] -pub trait OnKilledAccount { - /// The account with the given id was reaped. - fn on_killed_account(who: &AccountId); -} - -/// A trait for finding the author of a block header based on the `PreRuntime` digests contained -/// within it. -pub trait FindAuthor { - /// Find the author of a block based on the pre-runtime digests. - fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator; -} - -impl FindAuthor for () { - fn find_author<'a, I>(_: I) -> Option - where I: 'a + IntoIterator - { - None - } -} - -/// A trait for verifying the seal of a header and returning the author. -pub trait VerifySeal { - /// Verify a header and return the author, if any. - fn verify_seal(header: &Header) -> Result, &'static str>; -} - -/// Something which can compute and check proofs of -/// a historical key owner and return full identification data of that -/// key owner. -pub trait KeyOwnerProofSystem { - /// The proof of membership itself. - type Proof: Codec; - /// The full identification of a key owner and the stash account. - type IdentificationTuple: Codec; - - /// Prove membership of a key owner in the current block-state. - /// - /// This should typically only be called off-chain, since it may be - /// computationally heavy. - /// - /// Returns `Some` iff the key owner referred to by the given `key` is a - /// member of the current set. - fn prove(key: Key) -> Option; - - /// Check a proof of membership on-chain. Return `Some` iff the proof is - /// valid and recent enough to check. - fn check_proof(key: Key, proof: Self::Proof) -> Option; -} - -impl KeyOwnerProofSystem for () { - // The proof and identification tuples is any bottom type to guarantee that the methods of this - // implementation can never be called or return anything other than `None`. - type Proof = crate::Void; - type IdentificationTuple = crate::Void; - - fn prove(_key: Key) -> Option { - None - } - - fn check_proof(_key: Key, _proof: Self::Proof) -> Option { - None - } -} - -/// Handler for when some currency "account" decreased in balance for -/// some reason. -/// -/// The only reason at present for an increase would be for validator rewards, but -/// there may be other reasons in the future or for other chains. -/// -/// Reasons for decreases include: -/// -/// - Someone got slashed. -/// - Someone paid for a transaction to be included. -pub trait OnUnbalanced { - /// Handler for some imbalances. The different imbalances might have different origins or - /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all - /// of them. Infallible. - fn on_unbalanceds(amounts: impl Iterator) where Imbalance: crate::traits::Imbalance { - Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) - } - - /// Handler for some imbalance. Infallible. - fn on_unbalanced(amount: Imbalance) { - amount.try_drop().unwrap_or_else(Self::on_nonzero_unbalanced) - } - - /// Actually handle a non-zero imbalance. You probably want to implement this rather than - /// `on_unbalanced`. - fn on_nonzero_unbalanced(amount: Imbalance) { drop(amount); } -} - -impl OnUnbalanced for () {} - -/// Simple boolean for whether an account needs to be kept in existence. -#[derive(Copy, Clone, Eq, PartialEq)] -pub enum ExistenceRequirement { - /// Operation must not result in the account going out of existence. - /// - /// Note this implies that if the account never existed in the first place, then the operation - /// may legitimately leave the account unchanged and still non-existent. - KeepAlive, - /// Operation may result in account going out of existence. - AllowDeath, -} - -/// A type for which some values make sense to be able to drop without further consideration. -pub trait TryDrop: Sized { - /// Drop an instance cleanly. Only works if its value represents "no-operation". - fn try_drop(self) -> Result<(), Self>; -} - -/// A trait for a not-quite Linear Type that tracks an imbalance. -/// -/// Functions that alter account balances return an object of this trait to -/// express how much account balances have been altered in aggregate. If -/// dropped, the currency system will take some default steps to deal with -/// the imbalance (`balances` module simply reduces or increases its -/// total issuance). Your module should generally handle it in some way, -/// good practice is to do so in a configurable manner using an -/// `OnUnbalanced` type for each situation in which your module needs to -/// handle an imbalance. -/// -/// Imbalances can either be Positive (funds were added somewhere without -/// being subtracted elsewhere - e.g. a reward) or Negative (funds deducted -/// somewhere without an equal and opposite addition - e.g. a slash or -/// system fee payment). -/// -/// Since they are unsigned, the actual type is always Positive or Negative. -/// The trait makes no distinction except to define the `Opposite` type. -/// -/// New instances of zero value can be created (`zero`) and destroyed -/// (`drop_zero`). -/// -/// Existing instances can be `split` and merged either consuming `self` with -/// `merge` or mutating `self` with `subsume`. If the target is an `Option`, -/// then `maybe_merge` and `maybe_subsume` might work better. Instances can -/// also be `offset` with an `Opposite` that is less than or equal to in value. -/// -/// You can always retrieve the raw balance value using `peek`. -#[must_use] -pub trait Imbalance: Sized + TryDrop { - /// The oppositely imbalanced type. They come in pairs. - type Opposite: Imbalance; - - /// The zero imbalance. Can be destroyed with `drop_zero`. - fn zero() -> Self; - - /// Drop an instance cleanly. Only works if its `self.value()` is zero. - fn drop_zero(self) -> Result<(), Self>; - - /// Consume `self` and return two independent instances; the first - /// is guaranteed to be at most `amount` and the second will be the remainder. - fn split(self, amount: Balance) -> (Self, Self); - - /// Consume `self` and return two independent instances; the amounts returned will be in - /// approximately the same ratio as `first`:`second`. - /// - /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should - /// fit into a `u32`. Overflow will safely saturate in both cases. - fn ration(self, first: u32, second: u32) -> (Self, Self) - where Balance: From + Saturating + Div - { - let total: u32 = first.saturating_add(second); - let amount1 = self.peek().saturating_mul(first.into()) / total.into(); - self.split(amount1) - } - - /// Consume self and add its two components, defined by the first component's balance, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn split_merge(self, amount: Balance, others: (Self, Self)) -> (Self, Self) { - let (a, b) = self.split(amount); - (a.merge(others.0), b.merge(others.1)) - } - - /// Consume self and add its two components, defined by the ratio `first`:`second`, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) - where Balance: From + Saturating + Div - { - let (a, b) = self.ration(first, second); - (a.merge(others.0), b.merge(others.1)) - } - - /// Consume self and add its two components, defined by the first component's balance, - /// element-wise into two pre-existing Imbalance refs. - /// - /// A convenient replacement for `split` and `subsume`. - fn split_merge_into(self, amount: Balance, others: &mut (Self, Self)) { - let (a, b) = self.split(amount); - others.0.subsume(a); - others.1.subsume(b); - } - - /// Consume self and add its two components, defined by the ratio `first`:`second`, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) - where Balance: From + Saturating + Div - { - let (a, b) = self.ration(first, second); - others.0.subsume(a); - others.1.subsume(b); - } - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - fn merge(self, other: Self) -> Self; - - /// Consume self to mutate `other` so that it combines both. Just like `subsume`, only with - /// reversed arguments. - fn merge_into(self, other: &mut Self) { - other.subsume(self) - } - - /// Consume `self` and maybe an `other` to return a new instance that combines - /// both. - fn maybe_merge(self, other: Option) -> Self { - if let Some(o) = other { - self.merge(o) - } else { - self - } - } - - /// Consume an `other` to mutate `self` into a new instance that combines - /// both. - fn subsume(&mut self, other: Self); - - /// Maybe consume an `other` to mutate `self` into a new instance that combines - /// both. - fn maybe_subsume(&mut self, other: Option) { - if let Some(o) = other { - self.subsume(o) - } - } - - /// Consume self and along with an opposite counterpart to return - /// a combined result. - /// - /// Returns `Ok` along with a new instance of `Self` if this instance has a - /// greater value than the `other`. Otherwise returns `Err` with an instance of - /// the `Opposite`. In both cases the value represents the combination of `self` - /// and `other`. - fn offset(self, other: Self::Opposite) -> Result; - - /// The raw value of self. - fn peek(&self) -> Balance; -} - -/// Either a positive or a negative imbalance. -pub enum SignedImbalance>{ - /// A positive imbalance (funds have been created but none destroyed). - Positive(P), - /// A negative imbalance (funds have been destroyed but none created). - Negative(P::Opposite), -} - -impl< - P: Imbalance, - N: Imbalance, - B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, -> SignedImbalance { - pub fn zero() -> Self { - SignedImbalance::Positive(P::zero()) - } - - pub fn drop_zero(self) -> Result<(), Self> { - match self { - SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), - SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), - } - } - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - pub fn merge(self, other: Self) -> Self { - match (self, other) { - (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => - SignedImbalance::Positive(one.merge(other)), - (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => - SignedImbalance::Negative(one.merge(other)), - (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => - if one.peek() > other.peek() { - SignedImbalance::Positive(one.offset(other).ok().unwrap_or_else(P::zero)) - } else { - SignedImbalance::Negative(other.offset(one).ok().unwrap_or_else(N::zero)) - }, - (one, other) => other.merge(one), - } - } -} - -/// Split an unbalanced amount two ways between a common divisor. -pub struct SplitTwoWays< - Balance, - Imbalance, - Part1, - Target1, - Part2, - Target2, ->(PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>); - -impl< - Balance: From + Saturating + Div, - I: Imbalance, - Part1: U32, - Target1: OnUnbalanced, - Part2: U32, - Target2: OnUnbalanced, -> OnUnbalanced for SplitTwoWays -{ - fn on_nonzero_unbalanced(amount: I) { - let total: u32 = Part1::VALUE + Part2::VALUE; - let amount1 = amount.peek().saturating_mul(Part1::VALUE.into()) / total.into(); - let (imb1, imb2) = amount.split(amount1); - Target1::on_unbalanced(imb1); - Target2::on_unbalanced(imb2); - } -} - -/// Abstraction over a fungible assets system. -pub trait Currency { - /// The balance of an account. - type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + - Default; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type PositiveImbalance: Imbalance; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type NegativeImbalance: Imbalance; - - // PUBLIC IMMUTABLES - - /// The combined balance of `who`. - fn total_balance(who: &AccountId) -> Self::Balance; - - /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no - /// balance changes in the meantime and only the reserved balance is not taken into account. - fn can_slash(who: &AccountId, value: Self::Balance) -> bool; - - /// The total amount of issuance in the system. - fn total_issuance() -> Self::Balance; - - /// The minimum balance any single account may have. This is equivalent to the `Balances` module's - /// `ExistentialDeposit`. - fn minimum_balance() -> Self::Balance; - - /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will - /// typically be used to reduce an account by the same amount with e.g. `settle`. - /// - /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example - /// in the case of underflow. - fn burn(amount: Self::Balance) -> Self::PositiveImbalance; - - /// Increase the total issuance by `amount` and return the according imbalance. The imbalance - /// will typically be used to increase an account by the same amount with e.g. - /// `resolve_into_existing` or `resolve_creating`. - /// - /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example - /// in the case of overflow. - fn issue(amount: Self::Balance) -> Self::NegativeImbalance; - - /// Produce a pair of imbalances that cancel each other out exactly. - /// - /// This is just the same as burning and issuing the same amount and has no effect on the - /// total issuance. - fn pair(amount: Self::Balance) -> (Self::PositiveImbalance, Self::NegativeImbalance) { - (Self::burn(amount.clone()), Self::issue(amount)) - } - - /// The 'free' balance of a given account. - /// - /// This is the only balance that matters in terms of most operations on tokens. It alone - /// is used to determine the balance when in the contract execution environment. When this - /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is - /// deleted: specifically `FreeBalance`. - /// - /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn free_balance(who: &AccountId) -> Self::Balance; - - /// Returns `Ok` iff the account is able to make a withdrawal of the given amount - /// for the given reason. Basically, it's just a dry-run of `withdraw`. - /// - /// `Err(...)` with the reason why not otherwise. - fn ensure_can_withdraw( - who: &AccountId, - _amount: Self::Balance, - reasons: WithdrawReasons, - new_balance: Self::Balance, - ) -> DispatchResult; - - // PUBLIC MUTABLES (DANGEROUS) - - /// Transfer some liquid free balance to another staker. - /// - /// This is a very high-level function. It will ensure all appropriate fees are paid - /// and no imbalance in the system remains. - fn transfer( - source: &AccountId, - dest: &AccountId, - value: Self::Balance, - existence_requirement: ExistenceRequirement, - ) -> DispatchResult; - - /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the - /// free balance. This function cannot fail. - /// - /// The resulting imbalance is the first item of the tuple returned. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then a non-zero second item will be returned. - fn slash( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// Mints `value` to the free balance of `who`. - /// - /// If `who` doesn't exist, nothing is done and an Err returned. - fn deposit_into_existing( - who: &AccountId, - value: Self::Balance - ) -> result::Result; - - /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on - /// success. - fn resolve_into_existing( - who: &AccountId, - value: Self::NegativeImbalance, - ) -> result::Result<(), Self::NegativeImbalance> { - let v = value.peek(); - match Self::deposit_into_existing(who, v) { - Ok(opposite) => Ok(drop(value.offset(opposite))), - _ => Err(value), - } - } - - /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. - /// - /// Infallible. - fn deposit_creating( - who: &AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance; - - /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on - /// success. - fn resolve_creating( - who: &AccountId, - value: Self::NegativeImbalance, - ) { - let v = value.peek(); - drop(value.offset(Self::deposit_creating(who, v))); - } - - /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is - /// `KeepAlive`, then no less than `ExistentialDeposit` must be left remaining. - /// - /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, - /// then it returns `Err`. - /// - /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value - /// is `value`. - fn withdraw( - who: &AccountId, - value: Self::Balance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result; - - /// Similar to withdraw, only accepts a `PositiveImbalance` and returns nothing on success. - fn settle( - who: &AccountId, - value: Self::PositiveImbalance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result<(), Self::PositiveImbalance> { - let v = value.peek(); - match Self::withdraw(who, v, reasons, liveness) { - Ok(opposite) => Ok(drop(value.offset(opposite))), - _ => Err(value), - } - } - - /// Ensure an account's free balance equals some value; this will create the account - /// if needed. - /// - /// Returns a signed imbalance and status to indicate if the account was successfully updated or update - /// has led to killing of the account. - fn make_free_balance_be( - who: &AccountId, - balance: Self::Balance, - ) -> SignedImbalance; -} - -/// Status of funds. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] -pub enum BalanceStatus { - /// Funds are free, as corresponding to `free` item in Balances. - Free, - /// Funds are reserved, as corresponding to `reserved` item in Balances. - Reserved, -} - -/// A currency where funds can be reserved from the user. -pub trait ReservableCurrency: Currency { - /// Same result as `reserve(who, value)` (but without the side-effects) assuming there - /// are no balance changes in the meantime. - fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; - - /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. - /// - /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` - /// is less than `value`, then a non-zero second item will be returned. - fn slash_reserved( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// The amount of the balance of a given account that is externally reserved; this can still get - /// slashed, but gets slashed last of all. - /// - /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens - /// that are still 'owned' by the account holder, but which are suspendable. - /// - /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' - /// is deleted: specifically, `ReservedBalance`. - /// - /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn reserved_balance(who: &AccountId) -> Self::Balance; - - /// Moves `value` from balance to reserved balance. - /// - /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will - /// be returned to notify of this. This is different behavior than `unreserve`. - fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult; - - /// Moves up to `value` from reserved balance to free balance. This function cannot fail. - /// - /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` - /// is less than `value`, then the remaining amount will be returned. - /// - /// # NOTES - /// - /// - This is different from `reserve`. - /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will - /// invoke `on_reserved_too_low` and could reap the account. - fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; - - /// Moves up to `value` from reserved balance of account `slashed` to balance of account - /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be - /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, - /// depending on the `status`. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then `Ok(non_zero)` will be returned. - fn repatriate_reserved( - slashed: &AccountId, - beneficiary: &AccountId, - value: Self::Balance, - status: BalanceStatus, - ) -> result::Result; -} - -/// An identifier for a lock. Used for disambiguating different locks so that -/// they can be individually replaced or removed. -pub type LockIdentifier = [u8; 8]; - -/// A currency whose accounts can have liquidity restrictions. -pub trait LockableCurrency: Currency { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// The maximum number of locks a user should have on their account. - type MaxLocks: Get; - - /// Create a new balance lock on account `who`. - /// - /// If the new lock is valid (i.e. not already expired), it will push the struct to - /// the `Locks` vec in storage. Note that you can lock more funds than a user has. - /// - /// If the lock `id` already exists, this will update it. - fn set_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all - /// parameters or creates a new one if it does not exist. - /// - /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it - /// applies the most severe constraints of the two, while `set_lock` replaces the lock - /// with the new parameters. As in, `extend_lock` will set: - /// - maximum `amount` - /// - bitwise mask of all `reasons` - fn extend_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Remove an existing lock. - fn remove_lock( - id: LockIdentifier, - who: &AccountId, - ); -} - -/// A vesting schedule over a currency. This allows a particular currency to have vesting limits -/// applied to it. -pub trait VestingSchedule { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// The currency that this schedule applies to. - type Currency: Currency; - - /// Get the amount that is currently being vested and cannot be transferred out of this account. - /// Returns `None` if the account has no vesting schedule. - fn vesting_balance(who: &AccountId) -> Option<>::Balance>; - - /// Adds a vesting schedule to a given account. - /// - /// If there already exists a vesting schedule for the given account, an `Err` is returned - /// and nothing is updated. - /// - /// Is a no-op if the amount to be vested is zero. - /// - /// NOTE: This doesn't alter the free balance of the account. - fn add_vesting_schedule( - who: &AccountId, - locked: >::Balance, - per_block: >::Balance, - starting_block: Self::Moment, - ) -> DispatchResult; - - /// Remove a vesting schedule for a given account. - /// - /// NOTE: This doesn't alter the free balance of the account. - fn remove_vesting_schedule(who: &AccountId); -} - -bitmask! { - /// Reasons for moving funds out of an account. - #[derive(Encode, Decode)] - pub mask WithdrawReasons: i8 where - - /// Reason for moving funds out of an account. - #[derive(Encode, Decode)] - flags WithdrawReason { - /// In order to pay for (system) transaction costs. - TransactionPayment = 0b00000001, - /// In order to transfer ownership. - Transfer = 0b00000010, - /// In order to reserve some funds for a later return or repatriation. - Reserve = 0b00000100, - /// In order to pay some other (higher-level) fees. - Fee = 0b00001000, - /// In order to tip a validator for transaction inclusion. - Tip = 0b00010000, - } -} - -pub trait Time { - type Moment: AtLeast32Bit + Parameter + Default + Copy; - - fn now() -> Self::Moment; -} - -/// Trait to deal with unix time. -pub trait UnixTime { - /// Return duration since `SystemTime::UNIX_EPOCH`. - fn now() -> core::time::Duration; -} - -impl WithdrawReasons { - /// Choose all variants except for `one`. - /// - /// ```rust - /// # use frame_support::traits::{WithdrawReason, WithdrawReasons}; - /// # fn main() { - /// assert_eq!( - /// WithdrawReason::Fee | WithdrawReason::Transfer | WithdrawReason::Reserve | WithdrawReason::Tip, - /// WithdrawReasons::except(WithdrawReason::TransactionPayment), - /// ); - /// # } - /// ``` - pub fn except(one: WithdrawReason) -> WithdrawReasons { - let mut mask = Self::all(); - mask.toggle(one); - mask - } -} - -/// Trait for type that can handle incremental changes to a set of account IDs. -pub trait ChangeMembers { - /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The - /// new set is given by `new`, and need not be sorted. - /// - /// This resets any previous value of prime. - fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { - new.sort(); - Self::change_members_sorted(incoming, outgoing, &new[..]); - } - - /// A number of members `_incoming` just joined the set and replaced some `_outgoing` ones. The - /// new set is thus given by `sorted_new` and **must be sorted**. - /// - /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. - /// - /// This resets any previous value of prime. - fn change_members_sorted( - incoming: &[AccountId], - outgoing: &[AccountId], - sorted_new: &[AccountId], - ); - - /// Set the new members; they **must already be sorted**. This will compute the diff and use it to - /// call `change_members_sorted`. - /// - /// This resets any previous value of prime. - fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { - let (incoming, outgoing) = Self::compute_members_diff(new_members, old_members); - Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); - } - - /// Set the new members; they **must already be sorted**. This will compute the diff and use it to - /// call `change_members_sorted`. - fn compute_members_diff( - new_members: &[AccountId], - old_members: &[AccountId] - ) -> (Vec, Vec) { - let mut old_iter = old_members.iter(); - let mut new_iter = new_members.iter(); - let mut incoming = Vec::new(); - let mut outgoing = Vec::new(); - let mut old_i = old_iter.next(); - let mut new_i = new_iter.next(); - loop { - match (old_i, new_i) { - (None, None) => break, - (Some(old), Some(new)) if old == new => { - old_i = old_iter.next(); - new_i = new_iter.next(); - } - (Some(old), Some(new)) if old < new => { - outgoing.push(old.clone()); - old_i = old_iter.next(); - } - (Some(old), None) => { - outgoing.push(old.clone()); - old_i = old_iter.next(); - } - (_, Some(new)) => { - incoming.push(new.clone()); - new_i = new_iter.next(); - } - } - } - (incoming, outgoing) - } - - /// Set the prime member. - fn set_prime(_prime: Option) {} -} - -impl ChangeMembers for () { - fn change_members(_: &[T], _: &[T], _: Vec) {} - fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} - fn set_members_sorted(_: &[T], _: &[T]) {} - fn set_prime(_: Option) {} -} - -/// Trait for type that can handle the initialization of account IDs at genesis. -pub trait InitializeMembers { - /// Initialize the members to the given `members`. - fn initialize_members(members: &[AccountId]); -} - -impl InitializeMembers for () { - fn initialize_members(_: &[T]) {} -} - -// A trait that is able to provide randomness. -pub trait Randomness { - /// Get a "random" value - /// - /// Being a deterministic blockchain, real randomness is difficult to come by. This gives you - /// something that approximates it. At best, this will be randomness which was - /// hard to predict a long time ago, but that has become easy to predict recently. - /// - /// `subject` is a context identifier and allows you to get a - /// different result to other callers of this function; use it like - /// `random(&b"my context"[..])`. - fn random(subject: &[u8]) -> Output; - - /// Get the basic random seed. - /// - /// In general you won't want to use this, but rather `Self::random` which allows you to give a - /// subject for the random result and whose value will be independently low-influence random - /// from any other such seeds. - fn random_seed() -> Output { - Self::random(&[][..]) - } -} - -/// Provides an implementation of [`Randomness`] that should only be used in tests! -pub struct TestRandomness; - -impl Randomness for TestRandomness { - fn random(subject: &[u8]) -> Output { - Output::decode(&mut TrailingZeroInput::new(subject)).unwrap_or_default() - } -} - -/// Trait to be used by block producing consensus engine modules to determine -/// how late the current block is (e.g. in a slot-based proposal mechanism how -/// many slots were skipped since the previous block). -pub trait Lateness { - /// Returns a generic measure of how late the current block is compared to - /// its parent. - fn lateness(&self) -> N; -} - -impl Lateness for () { - fn lateness(&self) -> N { - Zero::zero() - } -} - -/// Implementors of this trait provide information about whether or not some validator has -/// been registered with them. The [Session module](../../pallet_session/index.html) is an implementor. -pub trait ValidatorRegistration { - /// Returns true if the provided validator ID has been registered with the implementing runtime - /// module - fn is_registered(id: &ValidatorId) -> bool; -} - -/// Provides information about the pallet setup in the runtime. -/// -/// An implementor should be able to provide information about each pallet that -/// is configured in `construct_runtime!`. -pub trait PalletInfo { - /// Convert the given pallet `P` into its index as configured in the runtime. - fn index() -> Option; - /// Convert the given pallet `P` into its name as configured in the runtime. - fn name() -> Option<&'static str>; -} - -impl PalletInfo for () { - fn index() -> Option { Some(0) } - fn name() -> Option<&'static str> { Some("test") } -} - -/// The function and pallet name of the Call. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] -pub struct CallMetadata { - /// Name of the function. - pub function_name: &'static str, - /// Name of the pallet to which the function belongs. - pub pallet_name: &'static str, -} - -/// Gets the function name of the Call. -pub trait GetCallName { - /// Return all function names. - fn get_call_names() -> &'static [&'static str]; - /// Return the function name of the Call. - fn get_call_name(&self) -> &'static str; -} - -/// Gets the metadata for the Call - function name and pallet name. -pub trait GetCallMetadata { - /// Return all module names. - fn get_module_names() -> &'static [&'static str]; - /// Return all function names for the given `module`. - fn get_call_names(module: &str) -> &'static [&'static str]; - /// Return a [`CallMetadata`], containing function and pallet name of the Call. - fn get_call_metadata(&self) -> CallMetadata; -} - -/// The block finalization trait. Implementing this lets you express what should happen -/// for your module when the block is ending. -#[impl_for_tuples(30)] -pub trait OnFinalize { - /// The block is being finalized. Implement to have something happen. - fn on_finalize(_n: BlockNumber) {} -} - -/// The block initialization trait. Implementing this lets you express what should happen -/// for your module when the block is beginning (right before the first extrinsic is executed). -pub trait OnInitialize { - /// The block is being initialized. Implement to have something happen. - /// - /// Return the non-negotiable weight consumed in the block. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } -} - -#[impl_for_tuples(30)] -impl OnInitialize for Tuple { - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(_n.clone())); )* ); - weight - } -} - -/// The runtime upgrade trait. -/// -/// Implementing this lets you express what should happen when the runtime upgrades, -/// and changes may need to occur to your module. -pub trait OnRuntimeUpgrade { - /// Perform a module upgrade. - /// - /// # Warning - /// - /// This function will be called before we initialized any runtime state, aka `on_initialize` - /// wasn't called yet. So, information like the block number and any other - /// block local data are not accessible. - /// - /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { 0 } -} - -#[impl_for_tuples(30)] -impl OnRuntimeUpgrade for Tuple { - fn on_runtime_upgrade() -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); - weight - } -} - -/// Off-chain computation trait. -/// -/// Implementing this trait on a module allows you to perform long-running tasks -/// that make (by default) validators generate transactions that feed results -/// of those long-running computations back on chain. -/// -/// NOTE: This function runs off-chain, so it can access the block state, -/// but cannot preform any alterations. More specifically alterations are -/// not forbidden, but they are not persisted in any way after the worker -/// has finished. -#[impl_for_tuples(30)] -pub trait OffchainWorker { - /// This function is being called after every block import (when fully synced). - /// - /// Implement this and use any of the `Offchain` `sp_io` set of APIs - /// to perform off-chain computations, calls and submit transactions - /// with results to trigger any on-chain changes. - /// Any state alterations are lost and are not persisted. - fn offchain_worker(_n: BlockNumber) {} -} - -pub mod schedule { - use super::*; - - /// Information relating to the period of a scheduled task. First item is the length of the - /// period and the second is the number of times it should be executed in total before the task - /// is considered finished and removed. - pub type Period = (BlockNumber, u32); - - /// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning - /// higher priority. - pub type Priority = u8; - - /// The dispatch time of a scheduled task. - #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] - pub enum DispatchTime { - /// At specified block. - At(BlockNumber), - /// After specified number of blocks. - After(BlockNumber), - } - - /// The highest priority. We invert the value so that normal sorting will place the highest - /// priority at the beginning of the list. - pub const HIGHEST_PRIORITY: Priority = 0; - /// Anything of this value or lower will definitely be scheduled on the block that they ask for, even - /// if it breaches the `MaximumWeight` limitation. - pub const HARD_DEADLINE: Priority = 63; - /// The lowest priority. Most stuff should be around here. - pub const LOWEST_PRIORITY: Priority = 255; - - /// A type that can be used as a scheduler. - pub trait Anon { - /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + Debug; - - /// Schedule a dispatch to happen at the beginning of some block in the future. - /// - /// This is not named. - fn schedule( - when: DispatchTime, - maybe_periodic: Option>, - priority: Priority, - origin: Origin, - call: Call - ) -> Result; - - /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, - /// also. - /// - /// Will return an error if the `address` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - /// - /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For - /// that, you must name the task explicitly using the `Named` trait. - fn cancel(address: Self::Address) -> Result<(), ()>; - - /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed - /// only if it is executed *before* the currently scheduled block. For periodic tasks, - /// this dispatch is guaranteed to succeed only before the *initial* execution; for - /// others, use `reschedule_named`. - /// - /// Will return an error if the `address` is invalid. - fn reschedule( - address: Self::Address, - when: DispatchTime, - ) -> Result; - - /// Return the next dispatch time for a given task. - /// - /// Will return an error if the `address` is invalid. - fn next_dispatch_time(address: Self::Address) -> Result; - } - - /// A type that can be used as a scheduler. - pub trait Named { - /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; - - /// Schedule a dispatch to happen at the beginning of some block in the future. - /// - /// - `id`: The identity of the task. This must be unique and will return an error if not. - fn schedule_named( - id: Vec, - when: DispatchTime, - maybe_periodic: Option>, - priority: Priority, - origin: Origin, - call: Call - ) -> Result; - - /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances - /// of that, also. - /// - /// Will return an error if the `id` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - fn cancel_named(id: Vec) -> Result<(), ()>; - - /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed - /// only if it is executed *before* the currently scheduled block. - fn reschedule_named( - id: Vec, - when: DispatchTime, - ) -> Result; - - /// Return the next dispatch time for a given task. - /// - /// Will return an error if the `id` is invalid. - fn next_dispatch_time(id: Vec) -> Result; - } -} - -/// Some sort of check on the origin is performed by this object. -pub trait EnsureOrigin { - /// A return type. - type Success; - /// Perform the origin check. - fn ensure_origin(o: OuterOrigin) -> result::Result { - Self::try_origin(o).map_err(|_| BadOrigin) - } - /// Perform the origin check. - fn try_origin(o: OuterOrigin) -> result::Result; - - /// Returns an outer origin capable of passing `try_origin` check. - /// - /// ** Should be used for benchmarking only!!! ** - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> OuterOrigin; -} - -/// Type that can be dispatched with an origin but without checking the origin filter. -/// -/// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by -/// `construct_runtime` and `impl_outer_dispatch`. -pub trait UnfilteredDispatchable { - /// The origin type of the runtime, (i.e. `frame_system::Trait::Origin`). - type Origin; - - /// Dispatch this call but do not check the filter in origin. - fn dispatch_bypass_filter(self, origin: Self::Origin) -> crate::dispatch::DispatchResultWithPostInfo; -} - -/// Methods available on `frame_system::Trait::Origin`. -pub trait OriginTrait: Sized { - /// Runtime call type, as in `frame_system::Trait::Call` - type Call; - - /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin; - - /// The AccountId used across the system. - type AccountId; - - /// Add a filter to the origin. - fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static); - - /// Reset origin filters to default one, i.e `frame_system::Trait::BaseCallFilter`. - fn reset_filter(&mut self); - - /// Replace the caller with caller from the other origin - fn set_caller_from(&mut self, other: impl Into); - - /// Filter the call, if false then call is filtered out. - fn filter_call(&self, call: &Self::Call) -> bool; - - /// Get the caller. - fn caller(&self) -> &Self::PalletsOrigin; - - /// Create with system none origin and `frame-system::Trait::BaseCallFilter`. - fn none() -> Self; - - /// Create with system root origin and no filter. - fn root() -> Self; - - /// Create with system signed origin and `frame-system::Trait::BaseCallFilter`. - fn signed(by: Self::AccountId) -> Self; -} - -/// Trait to be used when types are exactly same. -/// -/// This allow to convert back and forth from type, a reference and a mutable reference. -pub trait IsType: Into + From { - /// Cast reference. - fn from_ref(t: &T) -> &Self; - - /// Cast reference. - fn into_ref(&self) -> &T; - - /// Cast mutable reference. - fn from_mut(t: &mut T) -> &mut Self; - - /// Cast mutable reference. - fn into_mut(&mut self) -> &mut T; -} - -impl IsType for T { - fn from_ref(t: &T) -> &Self { t } - fn into_ref(&self) -> &T { self } - fn from_mut(t: &mut T) -> &mut Self { t } - fn into_mut(&mut self) -> &mut T { self } -} - -/// An instance of a pallet in the storage. -/// -/// It is required that these instances are unique, to support multiple instances per pallet in the same runtime! -/// -/// E.g. for module MyModule default instance will have prefix "MyModule" and other instances -/// "InstanceNMyModule". -pub trait Instance: 'static { - /// Unique module prefix. E.g. "InstanceNMyModule" or "MyModule" - const PREFIX: &'static str ; -} - -/// A trait similar to `Convert` to convert values from `B` an abstract balance type -/// into u64 and back from u128. (This conversion is used in election and other places where complex -/// calculation over balance type is needed) -/// -/// Total issuance of the currency is passed in, but an implementation of this trait may or may not -/// use it. -/// -/// # WARNING -/// -/// the total issuance being passed in implies that the implementation must be aware of the fact -/// that its values can affect the outcome. This implies that if the vote value is dependent on the -/// total issuance, it should never ber written to storage for later re-use. -pub trait CurrencyToVote { - /// Convert balance to u64. - fn to_vote(value: B, issuance: B) -> u64; - - /// Convert u128 to balance. - fn to_currency(value: u128, issuance: B) -> B; -} - -/// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. -/// -/// The factor is the `(total_issuance / u64::max()).max(1)`, represented as u64. Let's look at the -/// important cases: -/// -/// If the chain's total issuance is less than u64::max(), this will always be 1, which means that -/// the factor will not have any effect. In this case, any account's balance is also less. Thus, -/// both of the conversions are basically an `as`; Any balance can fit in u64. -/// -/// If the chain's total issuance is more than 2*u64::max(), then a factor might be multiplied and -/// divided upon conversion. -pub struct U128CurrencyToVote; - -impl U128CurrencyToVote { - fn factor(issuance: u128) -> u128 { - (issuance / u64::max_value() as u128).max(1) - } -} - -impl CurrencyToVote for U128CurrencyToVote { - fn to_vote(value: u128, issuance: u128) -> u64 { - (value / Self::factor(issuance)).saturated_into() - } +mod members; +#[allow(deprecated)] +pub use members::{AllowAll, DenyAll, Filter}; +pub use members::{ + AsContains, ChangeMembers, Contains, ContainsLengthBound, Everything, InitializeMembers, + IsInVec, Nothing, SortedMembers, +}; - fn to_currency(value: u128, issuance: u128) -> u128 { - value.saturating_mul(Self::factor(issuance)) - } -} +mod validation; +pub use validation::{ + DisabledValidators, EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, + KeyOwnerProofSystem, Lateness, OneSessionHandler, ValidatorRegistration, ValidatorSet, + ValidatorSetWithIdentification, VerifySeal, +}; +mod filter; +pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter, IntegrityTest}; -/// A naive implementation of `CurrencyConvert` that simply saturates all conversions. -/// -/// # Warning -/// -/// This is designed to be used mostly for testing. Use with care, and think about the consequences. -pub struct SaturatingCurrencyToVote; +mod misc; +pub use misc::{ + Backing, ConstU32, EnsureInherentsAreFirst, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, + GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, Len, OffchainWorker, + OnKilledAccount, OnNewAccount, SameOrOther, Time, TryDrop, UnixTime, +}; -impl + UniqueSaturatedFrom> CurrencyToVote for SaturatingCurrencyToVote { - fn to_vote(value: B, _: B) -> u64 { - value.unique_saturated_into() - } +mod stored_map; +pub use stored_map::{StorageMapShim, StoredMap}; +mod randomness; +pub use randomness::Randomness; - fn to_currency(value: u128, _: B) -> B { - B::unique_saturated_from(value) - } -} +mod metadata; +pub use metadata::{ + CallMetadata, GetCallMetadata, GetCallName, GetStorageVersion, PalletInfo, PalletInfoAccess, + StorageVersion, STORAGE_VERSION_STORAGE_KEY_POSTFIX, +}; -/// Something that can be checked to be a of sub type `T`. -/// -/// This is useful for enums where each variant encapsulates a different sub type, and -/// you need access to these sub types. -/// -/// For example, in FRAME, this trait is implemented for the runtime `Call` enum. Pallets use this -/// to check if a certain call is an instance of the local pallet's `Call` enum. -/// -/// # Example -/// -/// ``` -/// # use frame_support::traits::IsSubType; -/// -/// enum Test { -/// String(String), -/// U32(u32), -/// } -/// -/// impl IsSubType for Test { -/// fn is_sub_type(&self) -> Option<&String> { -/// match self { -/// Self::String(ref r) => Some(r), -/// _ => None, -/// } -/// } -/// } -/// -/// impl IsSubType for Test { -/// fn is_sub_type(&self) -> Option<&u32> { -/// match self { -/// Self::U32(ref r) => Some(r), -/// _ => None, -/// } -/// } -/// } -/// -/// fn main() { -/// let data = Test::String("test".into()); -/// -/// assert_eq!("test", IsSubType::::is_sub_type(&data).unwrap().as_str()); -/// } -/// ``` -pub trait IsSubType { - /// Returns `Some(_)` if `self` is an instance of sub type `T`. - fn is_sub_type(&self) -> Option<&T>; -} +mod hooks; +#[cfg(feature = "std")] +pub use hooks::GenesisBuild; +pub use hooks::{ + Hooks, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, +}; +#[cfg(feature = "try-runtime")] +pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; -#[cfg(test)] -mod tests { - use super::*; +pub mod schedule; +mod storage; +pub use storage::{ + Instance, PartialStorageInfoTrait, StorageInfo, StorageInfoTrait, StorageInstance, +}; - #[test] - fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { - struct Test; - impl OnInitialize for Test { - fn on_initialize(_n: u8) -> crate::weights::Weight { - 10 - } - } - impl OnRuntimeUpgrade for Test { - fn on_runtime_upgrade() -> crate::weights::Weight { - 20 - } - } +mod dispatch; +pub use dispatch::{EnsureOrigin, OriginTrait, UnfilteredDispatchable}; - assert_eq!(<(Test, Test)>::on_initialize(0), 20); - assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); - } -} +mod voting; +pub use voting::{CurrencyToVote, SaturatingCurrencyToVote, U128CurrencyToVote}; diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs new file mode 100644 index 0000000000000..f82628ede18cc --- /dev/null +++ b/frame/support/src/traits/dispatch.rs @@ -0,0 +1,93 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with dispatching calls and the origin from which they are dispatched. + +use crate::dispatch::DispatchResultWithPostInfo; +use sp_runtime::traits::BadOrigin; + +/// Some sort of check on the origin is performed by this object. +pub trait EnsureOrigin { + /// A return type. + type Success; + /// Perform the origin check. + fn ensure_origin(o: OuterOrigin) -> Result { + Self::try_origin(o).map_err(|_| BadOrigin) + } + /// Perform the origin check. + fn try_origin(o: OuterOrigin) -> Result; + + /// Returns an outer origin capable of passing `try_origin` check. + /// + /// ** Should be used for benchmarking only!!! ** + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> OuterOrigin; +} + +/// Type that can be dispatched with an origin but without checking the origin filter. +/// +/// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by +/// `construct_runtime`. +pub trait UnfilteredDispatchable { + /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). + type Origin; + + /// Dispatch this call but do not check the filter in origin. + fn dispatch_bypass_filter(self, origin: Self::Origin) -> DispatchResultWithPostInfo; +} + +/// Methods available on `frame_system::Config::Origin`. +pub trait OriginTrait: Sized { + /// Runtime call type, as in `frame_system::Config::Call` + type Call; + + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin; + + /// The AccountId used across the system. + type AccountId; + + /// Add a filter to the origin. + fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static); + + /// Reset origin filters to default one, i.e `frame_system::Config::BaseCallFilter`. + fn reset_filter(&mut self); + + /// Replace the caller with caller from the other origin + fn set_caller_from(&mut self, other: impl Into); + + /// Filter the call, if false then call is filtered out. + fn filter_call(&self, call: &Self::Call) -> bool; + + /// Get the caller. + fn caller(&self) -> &Self::PalletsOrigin; + + /// Do something with the caller, consuming self but returning it if the caller was unused. + fn try_with_caller( + self, + f: impl FnOnce(Self::PalletsOrigin) -> Result, + ) -> Result; + + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. + fn none() -> Self; + + /// Create with system root origin and no filter. + fn root() -> Self; + + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + fn signed(by: Self::AccountId) -> Self; +} diff --git a/frame/support/src/traits/filter.rs b/frame/support/src/traits/filter.rs new file mode 100644 index 0000000000000..c67ffc3c3a11e --- /dev/null +++ b/frame/support/src/traits/filter.rs @@ -0,0 +1,286 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated utilities for dealing with abstract constraint filters. + +pub use super::members::Contains; +use sp_std::marker::PhantomData; + +/// Trait to add a constraint onto the filter. +pub trait FilterStack: Contains { + /// The type used to archive the stack. + type Stack; + + /// Add a new `constraint` onto the filter. + fn push(constraint: impl Fn(&T) -> bool + 'static); + + /// Removes the most recently pushed, and not-yet-popped, constraint from the filter. + fn pop(); + + /// Clear the filter, returning a value that may be used later to `restore` it. + fn take() -> Self::Stack; + + /// Restore the filter from a previous `take` operation. + fn restore(taken: Self::Stack); +} + +/// Guard type for pushing a constraint to a `FilterStack` and popping when dropped. +pub struct FilterStackGuard, T>(PhantomData<(F, T)>); + +/// Guard type for clearing all pushed constraints from a `FilterStack` and reinstating them when +/// dropped. +pub struct ClearFilterGuard, T>(Option, PhantomData); + +impl, T> FilterStackGuard { + /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when + /// this instance is dropped. + pub fn new(constraint: impl Fn(&T) -> bool + 'static) -> Self { + F::push(constraint); + Self(PhantomData) + } +} + +impl, T> Drop for FilterStackGuard { + fn drop(&mut self) { + F::pop(); + } +} + +impl, T> ClearFilterGuard { + /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when + /// this instance is dropped. + pub fn new() -> Self { + Self(Some(F::take()), PhantomData) + } +} + +impl, T> Drop for ClearFilterGuard { + fn drop(&mut self) { + if let Some(taken) = self.0.take() { + F::restore(taken); + } + } +} + +/// Simple trait for providing a filter over a reference to some type, given an instance of itself. +pub trait InstanceFilter: Sized + Send + Sync { + /// Determine if a given value should be allowed through the filter (returns `true`) or not. + fn filter(&self, _: &T) -> bool; + + /// Determines whether `self` matches at least everything that `_o` does. + fn is_superset(&self, _o: &Self) -> bool { + false + } +} + +impl InstanceFilter for () { + fn filter(&self, _: &T) -> bool { + true + } + fn is_superset(&self, _o: &Self) -> bool { + true + } +} + +/// Re-expected for the macro. +#[doc(hidden)] +pub use sp_std::{ + boxed::Box, + cell::RefCell, + mem::{swap, take}, + vec::Vec, +}; + +#[macro_export] +macro_rules! impl_filter_stack { + ($target:ty, $base:ty, $call:ty, $module:ident) => { + #[cfg(feature = "std")] + mod $module { + #[allow(unused_imports)] + use super::*; + use $crate::traits::filter::{swap, take, RefCell, Vec, Box, Contains, FilterStack}; + + thread_local! { + static FILTER: RefCell bool + 'static>>> = RefCell::new(Vec::new()); + } + + impl Contains<$call> for $target { + fn contains(call: &$call) -> bool { + <$base>::contains(call) && + FILTER.with(|filter| filter.borrow().iter().all(|f| f(call))) + } + } + + impl FilterStack<$call> for $target { + type Stack = Vec bool + 'static>>; + fn push(f: impl Fn(&$call) -> bool + 'static) { + FILTER.with(|filter| filter.borrow_mut().push(Box::new(f))); + } + fn pop() { + FILTER.with(|filter| filter.borrow_mut().pop()); + } + fn take() -> Self::Stack { + FILTER.with(|filter| take(filter.borrow_mut().as_mut())) + } + fn restore(mut s: Self::Stack) { + FILTER.with(|filter| swap(filter.borrow_mut().as_mut(), &mut s)); + } + } + } + + #[cfg(not(feature = "std"))] + mod $module { + #[allow(unused_imports)] + use super::*; + use $crate::traits::{swap, take, RefCell, Vec, Box, Contains, FilterStack}; + + struct ThisFilter(RefCell bool + 'static>>>); + // NOTE: Safe only in wasm (guarded above) because there's only one thread. + unsafe impl Send for ThisFilter {} + unsafe impl Sync for ThisFilter {} + + static FILTER: ThisFilter = ThisFilter(RefCell::new(Vec::new())); + + impl Contains<$call> for $target { + fn contains(call: &$call) -> bool { + <$base>::contains(call) && FILTER.0.borrow().iter().all(|f| f(call)) + } + } + + impl FilterStack<$call> for $target { + type Stack = Vec bool + 'static>>; + fn push(f: impl Fn(&$call) -> bool + 'static) { + FILTER.0.borrow_mut().push(Box::new(f)); + } + fn pop() { + FILTER.0.borrow_mut().pop(); + } + fn take() -> Self::Stack { + take(FILTER.0.borrow_mut().as_mut()) + } + fn restore(mut s: Self::Stack) { + swap(FILTER.0.borrow_mut().as_mut(), &mut s); + } + } + } + } +} + +/// Type that provide some integrity tests. +/// +/// This implemented for modules by `decl_module`. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait IntegrityTest { + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + +#[cfg(test)] +pub mod test_impl_filter_stack { + use super::*; + + pub struct IsCallable; + pub struct BaseFilter; + impl Contains for BaseFilter { + fn contains(x: &u32) -> bool { + x % 2 == 0 + } + } + impl_filter_stack!( + crate::traits::filter::test_impl_filter_stack::IsCallable, + crate::traits::filter::test_impl_filter_stack::BaseFilter, + u32, + is_callable + ); + + #[test] + fn impl_filter_stack_should_work() { + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(IsCallable::contains(&42)); + assert!(!IsCallable::contains(&43)); + + IsCallable::push(|x| *x < 42); + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(!IsCallable::contains(&42)); + + IsCallable::push(|x| *x % 3 == 0); + assert!(IsCallable::contains(&36)); + assert!(!IsCallable::contains(&40)); + + IsCallable::pop(); + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(!IsCallable::contains(&42)); + + let saved = IsCallable::take(); + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(IsCallable::contains(&42)); + assert!(!IsCallable::contains(&43)); + + IsCallable::restore(saved); + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(!IsCallable::contains(&42)); + + IsCallable::pop(); + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(IsCallable::contains(&42)); + assert!(!IsCallable::contains(&43)); + } + + #[test] + fn guards_should_work() { + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(IsCallable::contains(&42)); + assert!(!IsCallable::contains(&43)); + { + let _guard_1 = FilterStackGuard::::new(|x| *x < 42); + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(!IsCallable::contains(&42)); + { + let _guard_2 = FilterStackGuard::::new(|x| *x % 3 == 0); + assert!(IsCallable::contains(&36)); + assert!(!IsCallable::contains(&40)); + } + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(!IsCallable::contains(&42)); + { + let _guard_2 = ClearFilterGuard::::new(); + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(IsCallable::contains(&42)); + assert!(!IsCallable::contains(&43)); + } + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(!IsCallable::contains(&42)); + } + assert!(IsCallable::contains(&36)); + assert!(IsCallable::contains(&40)); + assert!(IsCallable::contains(&42)); + assert!(!IsCallable::contains(&43)); + } +} diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs new file mode 100644 index 0000000000000..adba88e5acbf3 --- /dev/null +++ b/frame/support/src/traits/hooks.rs @@ -0,0 +1,402 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for hooking tasks to events in a blockchain's lifecycle. + +use impl_trait_for_tuples::impl_for_tuples; +use sp_arithmetic::traits::Saturating; +use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize}; + +/// The block initialization trait. +/// +/// Implementing this lets you express what should happen for your pallet when the block is +/// beginning (right before the first extrinsic is executed). +pub trait OnInitialize { + /// The block is being initialized. Implement to have something happen. + /// + /// Return the non-negotiable weight consumed in the block. + /// + /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, + /// including inherent extrinsics. Hence for instance, if you runtime includes + /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { + 0 + } +} + +#[impl_for_tuples(30)] +impl OnInitialize for Tuple { + fn on_initialize(n: BlockNumber) -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(n.clone())); )* ); + weight + } +} + +/// The block finalization trait. +/// +/// Implementing this lets you express what should happen for your pallet when the block is ending. +#[impl_for_tuples(30)] +pub trait OnFinalize { + /// The block is being finalized. Implement to have something happen. + /// + /// NOTE: This function is called AFTER ALL extrinsics in a block are applied, + /// including inherent extrinsics. + fn on_finalize(_n: BlockNumber) {} +} + +/// The block's on idle trait. +/// +/// Implementing this lets you express what should happen for your pallet before +/// block finalization (see `on_finalize` hook) in case any remaining weight is left. +pub trait OnIdle { + /// The block is being finalized. + /// Implement to have something happen in case there is leftover weight. + /// Check the passed `remaining_weight` to make sure it is high enough to allow for + /// your pallet's extra computation. + /// + /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - + /// in a block are applied but before `on_finalize` is executed. + fn on_idle( + _n: BlockNumber, + _remaining_weight: crate::weights::Weight, + ) -> crate::weights::Weight { + 0 + } +} + +#[impl_for_tuples(30)] +impl OnIdle for Tuple { + fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { + let on_idle_functions: &[fn( + BlockNumber, + crate::weights::Weight, + ) -> crate::weights::Weight] = &[for_tuples!( #( Tuple::on_idle ),* )]; + let mut weight = 0; + let len = on_idle_functions.len(); + let start_index = n % (len as u32).into(); + let start_index = start_index.try_into().ok().expect( + "`start_index % len` always fits into `usize`, because `len` can be in maximum `usize::MAX`; qed" + ); + for on_idle in on_idle_functions.iter().cycle().skip(start_index).take(len) { + let adjusted_remaining_weight = remaining_weight.saturating_sub(weight); + weight = weight.saturating_add(on_idle(n, adjusted_remaining_weight)); + } + weight + } +} + +/// A trait that will be called at genesis. +/// +/// Implementing this trait for a pallet let's you express operations that should +/// happen at genesis. It will be called in an externalities provided environment and +/// will see the genesis state after all pallets have written their genesis state. +#[impl_for_tuples(30)] +pub trait OnGenesis { + /// Something that should happen at genesis. + fn on_genesis() {} +} + +/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgradeHelpersExt::storage_key`]. +#[cfg(feature = "try-runtime")] +pub const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; + +/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. +#[cfg(feature = "try-runtime")] +pub trait OnRuntimeUpgradeHelpersExt { + /// Generate a storage key unique to this runtime upgrade. + /// + /// This can be used to communicate data from pre-upgrade to post-upgrade state and check + /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. + #[cfg(feature = "try-runtime")] + fn storage_key(ident: &str) -> [u8; 32] { + crate::storage::storage_prefix(ON_RUNTIME_UPGRADE_PREFIX, ident.as_bytes()) + } + + /// Get temporary storage data written by [`Self::set_temp_storage`]. + /// + /// Returns `None` if either the data is unavailable or un-decodable. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being read from. + #[cfg(feature = "try-runtime")] + fn get_temp_storage(at: &str) -> Option { + sp_io::storage::get(&Self::storage_key(at)) + .and_then(|bytes| codec::Decode::decode(&mut &*bytes).ok()) + } + + /// Write some temporary data to a specific storage that can be read (potentially in + /// post-upgrade hook) via [`Self::get_temp_storage`]. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being written + /// to. + #[cfg(feature = "try-runtime")] + fn set_temp_storage(data: T, at: &str) { + sp_io::storage::set(&Self::storage_key(at), &data.encode()); + } +} + +#[cfg(feature = "try-runtime")] +impl OnRuntimeUpgradeHelpersExt for U {} + +/// The runtime upgrade trait. +/// +/// Implementing this lets you express what should happen when the runtime upgrades, +/// and changes may need to occur to your module. +pub trait OnRuntimeUpgrade { + /// Perform a module upgrade. + /// + /// # Warning + /// + /// This function will be called before we initialized any runtime state, aka `on_initialize` + /// wasn't called yet. So, information like the block number and any other + /// block local data are not accessible. + /// + /// Return the non-negotiable weight consumed for runtime upgrade. + fn on_runtime_upgrade() -> crate::weights::Weight { + 0 + } + + /// Execute some pre-checks prior to a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + /// Execute some post-checks after a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) + } +} + +#[impl_for_tuples(30)] +impl OnRuntimeUpgrade for Tuple { + fn on_runtime_upgrade() -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::pre_upgrade()); )* ); + result + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::post_upgrade()); )* ); + result + } +} + +/// The pallet hooks trait. Implementing this lets you express some logic to execute. +pub trait Hooks { + /// The block is being finalized. Implement to have something happen. + fn on_finalize(_n: BlockNumber) {} + + /// This will be run when the block is being finalized (before `on_finalize`). + /// Implement to have something happen using the remaining weight. + /// Will not fire if the remaining weight is 0. + /// Return the weight used, the hook will subtract it from current weight used + /// and pass the result to the next `on_idle` hook if it exists. + fn on_idle( + _n: BlockNumber, + _remaining_weight: crate::weights::Weight, + ) -> crate::weights::Weight { + 0 + } + + /// The block is being initialized. Implement to have something happen. + /// + /// Return the non-negotiable weight consumed in the block. + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { + 0 + } + + /// Perform a module upgrade. + /// + /// NOTE: this doesn't include all pallet logic triggered on runtime upgrade. For instance it + /// doesn't include the write of the pallet version in storage. The final complete logic + /// triggered on runtime upgrade is given by implementation of `OnRuntimeUpgrade` trait by + /// `Pallet`. + /// + /// # Warning + /// + /// This function will be called before we initialized any runtime state, aka `on_initialize` + /// wasn't called yet. So, information like the block number and any other + /// block local data are not accessible. + /// + /// Return the non-negotiable weight consumed for runtime upgrade. + fn on_runtime_upgrade() -> crate::weights::Weight { + 0 + } + + /// Execute some pre-checks prior to a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + /// Execute some post-checks after a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + /// Implementing this function on a module allows you to perform long-running tasks + /// that make (by default) validators generate transactions that feed results + /// of those long-running computations back on chain. + /// + /// NOTE: This function runs off-chain, so it can access the block state, + /// but cannot preform any alterations. More specifically alterations are + /// not forbidden, but they are not persisted in any way after the worker + /// has finished. + /// + /// This function is being called after every block import (when fully synced). + /// + /// Implement this and use any of the `Offchain` `sp_io` set of APIs + /// to perform off-chain computations, calls and submit transactions + /// with results to trigger any on-chain changes. + /// Any state alterations are lost and are not persisted. + fn offchain_worker(_n: BlockNumber) {} + + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + +/// A trait to define the build function of a genesis config, T and I are placeholder for pallet +/// trait and pallet instance. +#[cfg(feature = "std")] +pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + /// The build function is called within an externalities allowing storage APIs. + /// Thus one can write to storage using regular pallet storages. + fn build(&self); + + /// Build the storage using `build` inside default storage. + fn build_storage(&self) -> Result { + let mut storage = Default::default(); + self.assimilate_storage(&mut storage)?; + Ok(storage) + } + + /// Assimilate the storage for this module into pre-existing overlays. + fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { + sp_state_machine::BasicExternalities::execute_with_storage(storage, || { + self.build(); + Ok(()) + }) + } +} + +/// A trait which is called when the timestamp is set in the runtime. +#[impl_for_tuples(30)] +pub trait OnTimestampSet { + /// Called when the timestamp is set. + fn on_timestamp_set(moment: Moment); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { + struct Test; + impl OnInitialize for Test { + fn on_initialize(_n: u8) -> crate::weights::Weight { + 10 + } + } + impl OnRuntimeUpgrade for Test { + fn on_runtime_upgrade() -> crate::weights::Weight { + 20 + } + } + + assert_eq!(<(Test, Test)>::on_initialize(0), 20); + assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); + } + + #[test] + fn on_idle_round_robin_works() { + static mut ON_IDLE_INVOCATION_ORDER: sp_std::vec::Vec<&str> = sp_std::vec::Vec::new(); + + struct Test1; + struct Test2; + struct Test3; + type TestTuple = (Test1, Test2, Test3); + impl OnIdle for Test1 { + fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { + unsafe { + ON_IDLE_INVOCATION_ORDER.push("Test1"); + } + 0 + } + } + impl OnIdle for Test2 { + fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { + unsafe { + ON_IDLE_INVOCATION_ORDER.push("Test2"); + } + 0 + } + } + impl OnIdle for Test3 { + fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { + unsafe { + ON_IDLE_INVOCATION_ORDER.push("Test3"); + } + 0 + } + } + + unsafe { + TestTuple::on_idle(0, 0); + assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test1", "Test2", "Test3"].to_vec()); + ON_IDLE_INVOCATION_ORDER.clear(); + + TestTuple::on_idle(1, 0); + assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test2", "Test3", "Test1"].to_vec()); + ON_IDLE_INVOCATION_ORDER.clear(); + + TestTuple::on_idle(2, 0); + assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test3", "Test1", "Test2"].to_vec()); + ON_IDLE_INVOCATION_ORDER.clear(); + + TestTuple::on_idle(3, 0); + assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test1", "Test2", "Test3"].to_vec()); + ON_IDLE_INVOCATION_ORDER.clear(); + + TestTuple::on_idle(4, 0); + assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test2", "Test3", "Test1"].to_vec()); + ON_IDLE_INVOCATION_ORDER.clear(); + } + } +} diff --git a/frame/support/src/traits/members.rs b/frame/support/src/traits/members.rs new file mode 100644 index 0000000000000..a59869c2fc9a3 --- /dev/null +++ b/frame/support/src/traits/members.rs @@ -0,0 +1,248 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with the idea of membership. + +use sp_std::{marker::PhantomData, prelude::*}; + +/// A trait for querying whether a type can be said to "contain" a value. +pub trait Contains { + /// Return `true` if this "contains" the given value `t`. + fn contains(t: &T) -> bool; +} + +/// A [`Contains`] implementation that contains every value. +pub enum Everything {} +impl Contains for Everything { + fn contains(_: &T) -> bool { + true + } +} + +/// A [`Contains`] implementation that contains no value. +pub enum Nothing {} +impl Contains for Nothing { + fn contains(_: &T) -> bool { + false + } +} + +#[deprecated = "Use `Everything` instead"] +pub type AllowAll = Everything; +#[deprecated = "Use `Nothing` instead"] +pub type DenyAll = Nothing; +#[deprecated = "Use `Contains` instead"] +pub trait Filter { + fn filter(t: &T) -> bool; +} +#[allow(deprecated)] +impl> Filter for C { + fn filter(t: &T) -> bool { + Self::contains(t) + } +} + +#[impl_trait_for_tuples::impl_for_tuples(1, 30)] +impl Contains for Tuple { + fn contains(t: &T) -> bool { + for_tuples!( #( + if Tuple::contains(t) { return true } + )* ); + false + } +} + +/// Create a type which implements the `Contains` trait for a particular type with syntax similar +/// to `matches!`. +#[macro_export] +macro_rules! match_type { + ( pub type $n:ident: impl Contains<$t:ty> = { $phead:pat $( | $ptail:pat )* } ; ) => { + pub struct $n; + impl $crate::traits::Contains<$t> for $n { + fn contains(l: &$t) -> bool { + matches!(l, $phead $( | $ptail )* ) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + match_type! { + pub type OneOrTenToTwenty: impl Contains = { 1 | 10..=20 }; + } + + #[test] + fn match_type_works() { + for i in 0..=255 { + assert_eq!(OneOrTenToTwenty::contains(&i), i == 1 || i >= 10 && i <= 20); + } + } +} + +/// A trait for a set which can enumerate its members in order. +pub trait SortedMembers { + /// Get a vector of all members in the set, ordered. + fn sorted_members() -> Vec; + + /// Return `true` if this "contains" the given value `t`. + fn contains(t: &T) -> bool { + Self::sorted_members().binary_search(t).is_ok() + } + + /// Get the number of items in the set. + fn count() -> usize { + Self::sorted_members().len() + } + + /// Add an item that would satisfy `contains`. It does not make sure any other + /// state is correctly maintained or generated. + /// + /// **Should be used for benchmarking only!!!** + #[cfg(feature = "runtime-benchmarks")] + fn add(_t: &T) { + unimplemented!() + } +} + +/// Adapter struct for turning an `OrderedMembership` impl into a `Contains` impl. +pub struct AsContains(PhantomData<(OM,)>); +impl> Contains for AsContains { + fn contains(t: &T) -> bool { + OM::contains(t) + } +} + +/// Trivial utility for implementing `Contains`/`OrderedMembership` with a `Vec`. +pub struct IsInVec(PhantomData); +impl>> Contains for IsInVec { + fn contains(t: &X) -> bool { + T::get().contains(t) + } +} +impl>> SortedMembers for IsInVec { + fn sorted_members() -> Vec { + let mut r = T::get(); + r.sort(); + r + } +} + +/// A trait for querying bound for the length of an implementation of `Contains` +pub trait ContainsLengthBound { + /// Minimum number of elements contained + fn min_len() -> usize; + /// Maximum number of elements contained + fn max_len() -> usize; +} + +/// Trait for type that can handle the initialization of account IDs at genesis. +pub trait InitializeMembers { + /// Initialize the members to the given `members`. + fn initialize_members(members: &[AccountId]); +} + +impl InitializeMembers for () { + fn initialize_members(_: &[T]) {} +} + +/// Trait for type that can handle incremental changes to a set of account IDs. +pub trait ChangeMembers { + /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The + /// new set is given by `new`, and need not be sorted. + /// + /// This resets any previous value of prime. + fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { + new.sort(); + Self::change_members_sorted(incoming, outgoing, &new[..]); + } + + /// A number of members `_incoming` just joined the set and replaced some `_outgoing` ones. The + /// new set is thus given by `sorted_new` and **must be sorted**. + /// + /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. + /// + /// This resets any previous value of prime. + fn change_members_sorted( + incoming: &[AccountId], + outgoing: &[AccountId], + sorted_new: &[AccountId], + ); + + /// Set the new members; they **must already be sorted**. This will compute the diff and use it + /// to call `change_members_sorted`. + /// + /// This resets any previous value of prime. + fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { + let (incoming, outgoing) = Self::compute_members_diff_sorted(new_members, old_members); + Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); + } + + /// Compute diff between new and old members; they **must already be sorted**. + /// + /// Returns incoming and outgoing members. + fn compute_members_diff_sorted( + new_members: &[AccountId], + old_members: &[AccountId], + ) -> (Vec, Vec) { + let mut old_iter = old_members.iter(); + let mut new_iter = new_members.iter(); + let mut incoming = Vec::new(); + let mut outgoing = Vec::new(); + let mut old_i = old_iter.next(); + let mut new_i = new_iter.next(); + loop { + match (old_i, new_i) { + (None, None) => break, + (Some(old), Some(new)) if old == new => { + old_i = old_iter.next(); + new_i = new_iter.next(); + }, + (Some(old), Some(new)) if old < new => { + outgoing.push(old.clone()); + old_i = old_iter.next(); + }, + (Some(old), None) => { + outgoing.push(old.clone()); + old_i = old_iter.next(); + }, + (_, Some(new)) => { + incoming.push(new.clone()); + new_i = new_iter.next(); + }, + } + } + (incoming, outgoing) + } + + /// Set the prime member. + fn set_prime(_prime: Option) {} + + /// Get the current prime. + fn get_prime() -> Option { + None + } +} + +impl ChangeMembers for () { + fn change_members(_: &[T], _: &[T], _: Vec) {} + fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} + fn set_members_sorted(_: &[T], _: &[T]) {} + fn set_prime(_: Option) {} +} diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs new file mode 100644 index 0000000000000..e877f29e0a137 --- /dev/null +++ b/frame/support/src/traits/metadata.rs @@ -0,0 +1,181 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for managing information attached to pallets and their constituents. + +use codec::{Decode, Encode}; +use sp_runtime::RuntimeDebug; + +/// Provides information about the pallet setup in the runtime. +/// +/// An implementor should be able to provide information about each pallet that +/// is configured in `construct_runtime!`. +pub trait PalletInfo { + /// Convert the given pallet `P` into its index as configured in the runtime. + fn index() -> Option; + /// Convert the given pallet `P` into its name as configured in the runtime. + fn name() -> Option<&'static str>; +} + +/// Provides information about the pallet setup in the runtime. +/// +/// Access the information provided by [`PalletInfo`] for a specific pallet. +pub trait PalletInfoAccess { + /// Index of the pallet as configured in the runtime. + fn index() -> usize; + /// Name of the pallet as configured in the runtime. + fn name() -> &'static str; +} + +/// The function and pallet name of the Call. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] +pub struct CallMetadata { + /// Name of the function. + pub function_name: &'static str, + /// Name of the pallet to which the function belongs. + pub pallet_name: &'static str, +} + +/// Gets the function name of the Call. +pub trait GetCallName { + /// Return all function names. + fn get_call_names() -> &'static [&'static str]; + /// Return the function name of the Call. + fn get_call_name(&self) -> &'static str; +} + +/// Gets the metadata for the Call - function name and pallet name. +pub trait GetCallMetadata { + /// Return all module names. + fn get_module_names() -> &'static [&'static str]; + /// Return all function names for the given `module`. + fn get_call_names(module: &str) -> &'static [&'static str]; + /// Return a [`CallMetadata`], containing function and pallet name of the Call. + fn get_call_metadata(&self) -> CallMetadata; +} + +/// The storage key postfix that is used to store the [`StorageVersion`] per pallet. +/// +/// The full storage key is built by using: +/// Twox128([`PalletInfo::name`]) ++ Twox128([`STORAGE_VERSION_STORAGE_KEY_POSTFIX`]) +pub const STORAGE_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__STORAGE_VERSION__:"; + +/// The storage version of a pallet. +/// +/// Each storage version of a pallet is stored in the state under a fixed key. See +/// [`STORAGE_VERSION_STORAGE_KEY_POSTFIX`] for how this key is built. +#[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Ord, Clone, Copy, PartialOrd, Default)] +pub struct StorageVersion(u16); + +impl StorageVersion { + /// Creates a new instance of `Self`. + pub const fn new(version: u16) -> Self { + Self(version) + } + + /// Returns the storage key for a storage version. + /// + /// See [`STORAGE_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. + pub fn storage_key() -> [u8; 32] { + let pallet_name = P::name(); + crate::storage::storage_prefix(pallet_name.as_bytes(), STORAGE_VERSION_STORAGE_KEY_POSTFIX) + } + + /// Put this storage version for the given pallet into the storage. + /// + /// It will use the storage key that is associated with the given `Pallet`. + /// + /// # Panics + /// + /// This function will panic iff `Pallet` can not be found by `PalletInfo`. + /// In a runtime that is put together using + /// [`construct_runtime!`](crate::construct_runtime) this should never happen. + /// + /// It will also panic if this function isn't executed in an externalities + /// provided environment. + pub fn put(&self) { + let key = Self::storage_key::

(); + + crate::storage::unhashed::put(&key, self); + } + + /// Get the storage version of the given pallet from the storage. + /// + /// It will use the storage key that is associated with the given `Pallet`. + /// + /// # Panics + /// + /// This function will panic iff `Pallet` can not be found by `PalletInfo`. + /// In a runtime that is put together using + /// [`construct_runtime!`](crate::construct_runtime) this should never happen. + /// + /// It will also panic if this function isn't executed in an externalities + /// provided environment. + pub fn get() -> Self { + let key = Self::storage_key::

(); + + crate::storage::unhashed::get_or_default(&key) + } +} + +impl PartialEq for StorageVersion { + fn eq(&self, other: &u16) -> bool { + self.0 == *other + } +} + +impl PartialOrd for StorageVersion { + fn partial_cmp(&self, other: &u16) -> Option { + Some(self.0.cmp(other)) + } +} + +/// Provides information about the storage version of a pallet. +/// +/// It differentiates between current and on-chain storage version. Both should be only out of sync +/// when a new runtime upgrade was applied and the runtime migrations did not yet executed. +/// Otherwise it means that the pallet works with an unsupported storage version and unforeseen +/// stuff can happen. +/// +/// The current storage version is the version of the pallet as supported at runtime. The active +/// storage version is the version of the pallet in the storage. +/// +/// It is required to update the on-chain storage version manually when a migration was applied. +pub trait GetStorageVersion { + /// Returns the current storage version as supported by the pallet. + fn current_storage_version() -> StorageVersion; + /// Returns the on-chain storage version of the pallet as stored in the storage. + fn on_chain_storage_version() -> StorageVersion; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn check_storage_version_ordering() { + let version = StorageVersion::new(1); + assert!(version == StorageVersion::new(1)); + assert!(version < StorageVersion::new(2)); + assert!(version < StorageVersion::new(3)); + + let version = StorageVersion::new(2); + assert!(version < StorageVersion::new(3)); + assert!(version > StorageVersion::new(1)); + assert!(version < StorageVersion::new(5)); + } +} diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs new file mode 100644 index 0000000000000..1776e1ba320ea --- /dev/null +++ b/frame/support/src/traits/misc.rs @@ -0,0 +1,379 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Smaller traits used in FRAME which don't need their own file. + +use crate::dispatch::Parameter; +use sp_runtime::{traits::Block as BlockT, DispatchError}; + +/// Anything that can have a `::len()` method. +pub trait Len { + /// Return the length of data type. + fn len(&self) -> usize; +} + +impl Len for T +where + ::IntoIter: ExactSizeIterator, +{ + fn len(&self) -> usize { + self.clone().into_iter().len() + } +} + +/// A trait for querying a single value from a type. +/// +/// It is not required that the value is constant. +pub trait Get { + /// Return the current value. + fn get() -> T; +} + +impl Get for () { + fn get() -> T { + T::default() + } +} + +/// Implement Get by returning Default for any type that implements Default. +pub struct GetDefault; +impl Get for GetDefault { + fn get() -> T { + T::default() + } +} + +/// Implement `Get` and `Get>` using the given const. +pub struct ConstU32; + +impl Get for ConstU32 { + fn get() -> u32 { + T + } +} + +impl Get> for ConstU32 { + fn get() -> Option { + Some(T) + } +} + +/// A type for which some values make sense to be able to drop without further consideration. +pub trait TryDrop: Sized { + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self>; +} + +/// Return type used when we need to return one of two items, each of the opposite direction or +/// sign, with one (`Same`) being of the same type as the `self` or primary argument of the function +/// that returned it. +pub enum SameOrOther { + /// No item. + None, + /// An item of the same type as the `Self` on which the return function was called. + Same(A), + /// An item of the opposite type to the `Self` on which the return function was called. + Other(B), +} + +impl TryDrop for SameOrOther { + fn try_drop(self) -> Result<(), Self> { + if let SameOrOther::None = self { + Ok(()) + } else { + Err(self) + } + } +} + +impl SameOrOther { + /// Returns `Ok` with the inner value of `Same` if `self` is that, otherwise returns `Err` with + /// `self`. + pub fn try_same(self) -> Result { + match self { + SameOrOther::Same(a) => Ok(a), + x => Err(x), + } + } + + /// Returns `Ok` with the inner value of `Other` if `self` is that, otherwise returns `Err` with + /// `self`. + pub fn try_other(self) -> Result { + match self { + SameOrOther::Other(b) => Ok(b), + x => Err(x), + } + } + + /// Returns `Ok` if `self` is `None`, otherwise returns `Err` with `self`. + pub fn try_none(self) -> Result<(), Self> { + match self { + SameOrOther::None => Ok(()), + x => Err(x), + } + } + + pub fn same(self) -> Result + where + A: Default, + { + match self { + SameOrOther::Same(a) => Ok(a), + SameOrOther::None => Ok(A::default()), + SameOrOther::Other(b) => Err(b), + } + } + + pub fn other(self) -> Result + where + B: Default, + { + match self { + SameOrOther::Same(a) => Err(a), + SameOrOther::None => Ok(B::default()), + SameOrOther::Other(b) => Ok(b), + } + } +} + +/// Handler for when a new account has been created. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OnNewAccount { + /// A new account `who` has been registered. + fn on_new_account(who: &AccountId); +} + +/// The account with the given id was reaped. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OnKilledAccount { + /// The account with the given id was reaped. + fn on_killed_account(who: &AccountId); +} + +/// A simple, generic one-parameter event notifier/handler. +pub trait HandleLifetime { + /// An account was created. + fn created(_t: &T) -> Result<(), DispatchError> { + Ok(()) + } + + /// An account was killed. + fn killed(_t: &T) -> Result<(), DispatchError> { + Ok(()) + } +} + +impl HandleLifetime for () {} + +pub trait Time { + type Moment: sp_arithmetic::traits::AtLeast32Bit + Parameter + Default + Copy; + + fn now() -> Self::Moment; +} + +/// Trait to deal with unix time. +pub trait UnixTime { + /// Return duration since `SystemTime::UNIX_EPOCH`. + fn now() -> core::time::Duration; +} + +/// Trait to be used when types are exactly same. +/// +/// This allow to convert back and forth from type, a reference and a mutable reference. +pub trait IsType: Into + From { + /// Cast reference. + fn from_ref(t: &T) -> &Self; + + /// Cast reference. + fn into_ref(&self) -> &T; + + /// Cast mutable reference. + fn from_mut(t: &mut T) -> &mut Self; + + /// Cast mutable reference. + fn into_mut(&mut self) -> &mut T; +} + +impl IsType for T { + fn from_ref(t: &T) -> &Self { + t + } + fn into_ref(&self) -> &T { + self + } + fn from_mut(t: &mut T) -> &mut Self { + t + } + fn into_mut(&mut self) -> &mut T { + self + } +} + +/// Something that can be checked to be a of sub type `T`. +/// +/// This is useful for enums where each variant encapsulates a different sub type, and +/// you need access to these sub types. +/// +/// For example, in FRAME, this trait is implemented for the runtime `Call` enum. Pallets use this +/// to check if a certain call is an instance of the local pallet's `Call` enum. +/// +/// # Example +/// +/// ``` +/// # use frame_support::traits::IsSubType; +/// +/// enum Test { +/// String(String), +/// U32(u32), +/// } +/// +/// impl IsSubType for Test { +/// fn is_sub_type(&self) -> Option<&String> { +/// match self { +/// Self::String(ref r) => Some(r), +/// _ => None, +/// } +/// } +/// } +/// +/// impl IsSubType for Test { +/// fn is_sub_type(&self) -> Option<&u32> { +/// match self { +/// Self::U32(ref r) => Some(r), +/// _ => None, +/// } +/// } +/// } +/// +/// fn main() { +/// let data = Test::String("test".into()); +/// +/// assert_eq!("test", IsSubType::::is_sub_type(&data).unwrap().as_str()); +/// } +/// ``` +pub trait IsSubType { + /// Returns `Some(_)` if `self` is an instance of sub type `T`. + fn is_sub_type(&self) -> Option<&T>; +} + +/// Something that can execute a given block. +/// +/// Executing a block means that all extrinsics in a given block will be executed and the resulting +/// header will be checked against the header of the given block. +pub trait ExecuteBlock { + /// Execute the given `block`. + /// + /// This will execute all extrinsics in the block and check that the resulting header is + /// correct. + /// + /// # Panic + /// + /// Panics when an extrinsics panics or the resulting header doesn't match the expected header. + fn execute_block(block: Block); +} + +/// Off-chain computation trait. +/// +/// Implementing this trait on a module allows you to perform long-running tasks +/// that make (by default) validators generate transactions that feed results +/// of those long-running computations back on chain. +/// +/// NOTE: This function runs off-chain, so it can access the block state, +/// but cannot preform any alterations. More specifically alterations are +/// not forbidden, but they are not persisted in any way after the worker +/// has finished. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OffchainWorker { + /// This function is being called after every block import (when fully synced). + /// + /// Implement this and use any of the `Offchain` `sp_io` set of APIs + /// to perform off-chain computations, calls and submit transactions + /// with results to trigger any on-chain changes. + /// Any state alterations are lost and are not persisted. + fn offchain_worker(_n: BlockNumber) {} +} + +/// Some amount of backing from a group. The precise definition of what it means to "back" something +/// is left flexible. +pub struct Backing { + /// The number of members of the group that back some motion. + pub approvals: u32, + /// The total count of group members. + pub eligible: u32, +} + +/// Retrieve the backing from an object's ref. +pub trait GetBacking { + /// Returns `Some` `Backing` if `self` represents a fractional/groupwise backing of some + /// implicit motion. `None` if it does not. + fn get_backing(&self) -> Option; +} + +/// A trait to ensure the inherent are before non-inherent in a block. +/// +/// This is typically implemented on runtime, through `construct_runtime!`. +pub trait EnsureInherentsAreFirst { + /// Ensure the position of inherent is correct, i.e. they are before non-inherents. + /// + /// On error return the index of the inherent with invalid position (counting from 0). + fn ensure_inherents_are_first(block: &Block) -> Result<(), u32>; +} + +/// An extrinsic on which we can get access to call. +pub trait ExtrinsicCall: sp_runtime::traits::Extrinsic { + /// Get the call of the extrinsic. + fn call(&self) -> &Self::Call; +} + +#[cfg(feature = "std")] +impl ExtrinsicCall for sp_runtime::testing::TestXt +where + Call: codec::Codec + Sync + Send, +{ + fn call(&self) -> &Self::Call { + &self.call + } +} + +impl ExtrinsicCall + for sp_runtime::generic::UncheckedExtrinsic +where + Extra: sp_runtime::traits::SignedExtension, +{ + fn call(&self) -> &Self::Call { + &self.function + } +} + +/// Something that can estimate the fee of a (frame-based) call. +/// +/// Typically, the same pallet that will charge transaction fees will implement this. +pub trait EstimateCallFee { + /// Estimate the fee of this call. + /// + /// The dispatch info and the length is deduced from the call. The post info can optionally be + /// provided. + fn estimate_call_fee(call: &Call, post_info: crate::weights::PostDispatchInfo) -> Balance; +} + +// Useful for building mocks. +#[cfg(feature = "std")] +impl, const T: u32> EstimateCallFee for ConstU32 { + fn estimate_call_fee(_: &Call, _: crate::weights::PostDispatchInfo) -> Balance { + T.into() + } +} diff --git a/frame/support/src/traits/randomness.rs b/frame/support/src/traits/randomness.rs new file mode 100644 index 0000000000000..865893f99b393 --- /dev/null +++ b/frame/support/src/traits/randomness.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with on-chain randomness. + +/// A trait that is able to provide randomness. +/// +/// Being a deterministic blockchain, real randomness is difficult to come by, different +/// implementations of this trait will provide different security guarantees. At best, +/// this will be randomness which was hard to predict a long time ago, but that has become +/// easy to predict recently. +pub trait Randomness { + /// Get the most recently determined random seed, along with the time in the past + /// since when it was determinable by chain observers. + /// + /// `subject` is a context identifier and allows you to get a different result to + /// other callers of this function; use it like `random(&b"my context"[..])`. + /// + /// NOTE: The returned seed should only be used to distinguish commitments made before + /// the returned block number. If the block number is too early (i.e. commitments were + /// made afterwards), then ensure no further commitments may be made and repeatedly + /// call this on later blocks until the block number returned is later than the latest + /// commitment. + fn random(subject: &[u8]) -> (Output, BlockNumber); + + /// Get the basic random seed. + /// + /// In general you won't want to use this, but rather `Self::random` which allows + /// you to give a subject for the random result and whose value will be + /// independently low-influence random from any other such seeds. + /// + /// NOTE: The returned seed should only be used to distinguish commitments made before + /// the returned block number. If the block number is too early (i.e. commitments were + /// made afterwards), then ensure no further commitments may be made and repeatedly + /// call this on later blocks until the block number returned is later than the latest + /// commitment. + fn random_seed() -> (Output, BlockNumber) { + Self::random(&[][..]) + } +} diff --git a/frame/support/src/traits/schedule.rs b/frame/support/src/traits/schedule.rs new file mode 100644 index 0000000000000..a4a4f9c03ab12 --- /dev/null +++ b/frame/support/src/traits/schedule.rs @@ -0,0 +1,133 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated utilities for scheduling dispatchables in FRAME. + +use codec::{Codec, Decode, Encode, EncodeLike}; +use sp_runtime::{DispatchError, RuntimeDebug}; +use sp_std::{fmt::Debug, prelude::*}; + +/// Information relating to the period of a scheduled task. First item is the length of the +/// period and the second is the number of times it should be executed in total before the task +/// is considered finished and removed. +pub type Period = (BlockNumber, u32); + +/// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning +/// higher priority. +pub type Priority = u8; + +/// The dispatch time of a scheduled task. +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum DispatchTime { + /// At specified block. + At(BlockNumber), + /// After specified number of blocks. + After(BlockNumber), +} + +/// The highest priority. We invert the value so that normal sorting will place the highest +/// priority at the beginning of the list. +pub const HIGHEST_PRIORITY: Priority = 0; +/// Anything of this value or lower will definitely be scheduled on the block that they ask for, +/// even if it breaches the `MaximumWeight` limitation. +pub const HARD_DEADLINE: Priority = 63; +/// The lowest priority. Most stuff should be around here. +pub const LOWEST_PRIORITY: Priority = 255; + +/// A type that can be used as a scheduler. +pub trait Anon { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + Debug; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// This is not named. + fn schedule( + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Call, + ) -> Result; + + /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, + /// also. + /// + /// Will return an error if the `address` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + /// + /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For + /// that, you must name the task explicitly using the `Named` trait. + fn cancel(address: Self::Address) -> Result<(), ()>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. For periodic tasks, + /// this dispatch is guaranteed to succeed only before the *initial* execution; for + /// others, use `reschedule_named`. + /// + /// Will return an error if the `address` is invalid. + fn reschedule( + address: Self::Address, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an error if the `address` is invalid. + fn next_dispatch_time(address: Self::Address) -> Result; +} + +/// A type that can be used as a scheduler. +pub trait Named { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// - `id`: The identity of the task. This must be unique and will return an error if not. + fn schedule_named( + id: Vec, + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Call, + ) -> Result; + + /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances + /// of that, also. + /// + /// Will return an error if the `id` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + fn cancel_named(id: Vec) -> Result<(), ()>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. + fn reschedule_named( + id: Vec, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an error if the `id` is invalid. + fn next_dispatch_time(id: Vec) -> Result; +} diff --git a/frame/support/src/traits/storage.rs b/frame/support/src/traits/storage.rs new file mode 100644 index 0000000000000..9a88a3ed44046 --- /dev/null +++ b/frame/support/src/traits/storage.rs @@ -0,0 +1,89 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for encoding data related to pallet's storage items. + +use sp_std::prelude::*; + +/// An instance of a pallet in the storage. +/// +/// It is required that these instances are unique, to support multiple instances per pallet in the +/// same runtime! +/// +/// E.g. for module MyModule default instance will have prefix "MyModule" and other instances +/// "InstanceNMyModule". +pub trait Instance: 'static { + /// Unique module prefix. E.g. "InstanceNMyModule" or "MyModule" + const PREFIX: &'static str; + /// Unique numerical identifier for an instance. + const INDEX: u8; +} + +/// An instance of a storage in a pallet. +/// +/// Define an instance for an individual storage inside a pallet. +/// The pallet prefix is used to isolate the storage between pallets, and the storage prefix is +/// used to isolate storages inside a pallet. +/// +/// NOTE: These information can be used to define storages in pallet such as a `StorageMap` which +/// can use keys after `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)` +pub trait StorageInstance { + /// Prefix of a pallet to isolate it from other pallets. + fn pallet_prefix() -> &'static str; + + /// Prefix given to a storage to isolate from other storages in the pallet. + const STORAGE_PREFIX: &'static str; +} + +/// Metadata about storage from the runtime. +#[derive(codec::Encode, codec::Decode, crate::RuntimeDebug, Eq, PartialEq, Clone)] +pub struct StorageInfo { + /// Encoded string of pallet name. + pub pallet_name: Vec, + /// Encoded string of storage name. + pub storage_name: Vec, + /// The prefix of the storage. All keys after the prefix are considered part of this storage. + pub prefix: Vec, + /// The maximum number of values in the storage, or none if no maximum specified. + pub max_values: Option, + /// The maximum size of key/values in the storage, or none if no maximum specified. + pub max_size: Option, +} + +/// A trait to give information about storage. +/// +/// It can be used to calculate PoV worst case size. +pub trait StorageInfoTrait { + fn storage_info() -> Vec; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl StorageInfoTrait for Tuple { + fn storage_info() -> Vec { + let mut res = vec![]; + for_tuples!( #( res.extend_from_slice(&Tuple::storage_info()); )* ); + res + } +} + +/// Similar to [`StorageInfoTrait`], a trait to give partial information about storage. +/// +/// This is useful when a type can give some partial information with its generic parameter doesn't +/// implement some bounds. +pub trait PartialStorageInfoTrait { + fn partial_storage_info() -> Vec; +} diff --git a/frame/support/src/traits/stored_map.rs b/frame/support/src/traits/stored_map.rs new file mode 100644 index 0000000000000..715a5211be430 --- /dev/null +++ b/frame/support/src/traits/stored_map.rs @@ -0,0 +1,144 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated datatypes for managing abstract stored values. + +use crate::{storage::StorageMap, traits::misc::HandleLifetime}; +use codec::FullCodec; +use sp_runtime::DispatchError; + +/// An abstraction of a value stored within storage, but possibly as part of a larger composite +/// item. +pub trait StoredMap { + /// Get the item, or its default if it doesn't yet exist; we make no distinction between the + /// two. + fn get(k: &K) -> T; + + /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is + /// returned. It is removed or reset to default value if it has been mutated to `None` + fn try_mutate_exists>( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result; + + // Everything past here has a default implementation. + + /// Mutate the item. + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { + Self::mutate_exists(k, |maybe_account| match maybe_account { + Some(ref mut account) => f(account), + x @ None => { + let mut account = Default::default(); + let r = f(&mut account); + *x = Some(account); + r + }, + }) + } + + /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. + /// + /// This is infallible as long as the value does not get destroyed. + fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> Result { + Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }) + } + + /// Set the item to something new. + fn insert(k: &K, t: T) -> Result<(), DispatchError> { + Self::mutate(k, |i| *i = t) + } + + /// Remove the item or otherwise replace it with its default value; we don't care which. + fn remove(k: &K) -> Result<(), DispatchError> { + Self::mutate_exists(k, |x| *x = None) + } +} + +/// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this +/// wouldn't be needed as `StorageValue`s should blanket implement `StoredValue`s, however this +/// would break the ability to have custom impls of `StoredValue`. The other workaround is to +/// implement it directly in the macro. +/// +/// This form has the advantage that two additional types are provides, `Created` and `Removed`, +/// which are both generic events that can be tied to handlers to do something in the case of being +/// about to create an account where one didn't previously exist (at all; not just where it used to +/// be the default value), or where the account is being removed or reset back to the default value +/// where previously it did exist (though may have been in a default state). This works well with +/// system module's `CallOnCreatedAccount` and `CallKillAccount`. +pub struct StorageMapShim(sp_std::marker::PhantomData<(S, L, K, T)>); +impl< + S: StorageMap, + L: HandleLifetime, + K: FullCodec, + T: FullCodec + Default, + > StoredMap for StorageMapShim +{ + fn get(k: &K) -> T { + S::get(k) + } + fn insert(k: &K, t: T) -> Result<(), DispatchError> { + if !S::contains_key(&k) { + L::created(k)?; + } + S::insert(k, t); + Ok(()) + } + fn remove(k: &K) -> Result<(), DispatchError> { + if S::contains_key(&k) { + L::killed(&k)?; + S::remove(k); + } + Ok(()) + } + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { + if !S::contains_key(&k) { + L::created(k)?; + } + Ok(S::mutate(k, f)) + } + fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> Result { + S::try_mutate_exists(k, |maybe_value| { + let existed = maybe_value.is_some(); + let r = f(maybe_value); + let exists = maybe_value.is_some(); + + if !existed && exists { + L::created(k)?; + } else if existed && !exists { + L::killed(k)?; + } + Ok(r) + }) + } + fn try_mutate_exists>( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + S::try_mutate_exists(k, |maybe_value| { + let existed = maybe_value.is_some(); + let r = f(maybe_value)?; + let exists = maybe_value.is_some(); + + if !existed && exists { + L::created(k).map_err(E::from)?; + } else if existed && !exists { + L::killed(k).map_err(E::from)?; + } + Ok(r) + }) + } +} diff --git a/frame/support/src/traits/tokens.rs b/frame/support/src/traits/tokens.rs new file mode 100644 index 0000000000000..aca62bcad65c7 --- /dev/null +++ b/frame/support/src/traits/tokens.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for working with tokens and their associated datastructures. + +pub mod currency; +pub mod fungible; +pub mod fungibles; +pub mod imbalance; +mod misc; +pub mod nonfungible; +pub mod nonfungibles; +pub use imbalance::Imbalance; +pub use misc::{ + BalanceConversion, BalanceStatus, DepositConsequence, ExistenceRequirement, + WithdrawConsequence, WithdrawReasons, +}; diff --git a/frame/support/src/traits/tokens/currency.rs b/frame/support/src/traits/tokens/currency.rs new file mode 100644 index 0000000000000..bf078658477f5 --- /dev/null +++ b/frame/support/src/traits/tokens/currency.rs @@ -0,0 +1,201 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Currency trait and associated types. + +use super::{ + imbalance::{Imbalance, SignedImbalance}, + misc::{Balance, ExistenceRequirement, WithdrawReasons}, +}; +use crate::dispatch::{DispatchError, DispatchResult}; +use codec::MaxEncodedLen; +use sp_runtime::traits::MaybeSerializeDeserialize; +use sp_std::fmt::Debug; + +mod reservable; +pub use reservable::{NamedReservableCurrency, ReservableCurrency}; +mod lockable; +pub use lockable::{LockIdentifier, LockableCurrency, VestingSchedule}; + +/// Abstraction over a fungible assets system. +pub trait Currency { + /// The balance of an account. + type Balance: Balance + MaybeSerializeDeserialize + Debug + MaxEncodedLen; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type PositiveImbalance: Imbalance; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type NegativeImbalance: Imbalance; + + // PUBLIC IMMUTABLES + + /// The combined balance of `who`. + fn total_balance(who: &AccountId) -> Self::Balance; + + /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no + /// balance changes in the meantime and only the reserved balance is not taken into account. + fn can_slash(who: &AccountId, value: Self::Balance) -> bool; + + /// The total amount of issuance in the system. + fn total_issuance() -> Self::Balance; + + /// The minimum balance any single account may have. This is equivalent to the `Balances` + /// module's `ExistentialDeposit`. + fn minimum_balance() -> Self::Balance; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn burn(amount: Self::Balance) -> Self::PositiveImbalance; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(amount: Self::Balance) -> Self::NegativeImbalance; + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair(amount: Self::Balance) -> (Self::PositiveImbalance, Self::NegativeImbalance) { + (Self::burn(amount.clone()), Self::issue(amount)) + } + + /// The 'free' balance of a given account. + /// + /// This is the only balance that matters in terms of most operations on tokens. It alone + /// is used to determine the balance when in the contract execution environment. When this + /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is + /// deleted: specifically `FreeBalance`. + /// + /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn free_balance(who: &AccountId) -> Self::Balance; + + /// Returns `Ok` iff the account is able to make a withdrawal of the given amount + /// for the given reason. Basically, it's just a dry-run of `withdraw`. + /// + /// `Err(...)` with the reason why not otherwise. + fn ensure_can_withdraw( + who: &AccountId, + _amount: Self::Balance, + reasons: WithdrawReasons, + new_balance: Self::Balance, + ) -> DispatchResult; + + // PUBLIC MUTABLES (DANGEROUS) + + /// Transfer some liquid free balance to another staker. + /// + /// This is a very high-level function. It will ensure all appropriate fees are paid + /// and no imbalance in the system remains. + fn transfer( + source: &AccountId, + dest: &AccountId, + value: Self::Balance, + existence_requirement: ExistenceRequirement, + ) -> DispatchResult; + + /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the + /// free balance. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash(who: &AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance); + + /// Mints `value` to the free balance of `who`. + /// + /// If `who` doesn't exist, nothing is done and an Err returned. + fn deposit_into_existing( + who: &AccountId, + value: Self::Balance, + ) -> Result; + + /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on + /// success. + fn resolve_into_existing( + who: &AccountId, + value: Self::NegativeImbalance, + ) -> Result<(), Self::NegativeImbalance> { + let v = value.peek(); + match Self::deposit_into_existing(who, v) { + Ok(opposite) => Ok(drop(value.offset(opposite))), + _ => Err(value), + } + } + + /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. + /// + /// Infallible. + fn deposit_creating(who: &AccountId, value: Self::Balance) -> Self::PositiveImbalance; + + /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on + /// success. + fn resolve_creating(who: &AccountId, value: Self::NegativeImbalance) { + let v = value.peek(); + drop(value.offset(Self::deposit_creating(who, v))); + } + + /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is + /// `KeepAlive`, then no less than `ExistentialDeposit` must be left remaining. + /// + /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, + /// then it returns `Err`. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is `value`. + fn withdraw( + who: &AccountId, + value: Self::Balance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> Result; + + /// Similar to withdraw, only accepts a `PositiveImbalance` and returns nothing on success. + fn settle( + who: &AccountId, + value: Self::PositiveImbalance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> Result<(), Self::PositiveImbalance> { + let v = value.peek(); + match Self::withdraw(who, v, reasons, liveness) { + Ok(opposite) => Ok(drop(value.offset(opposite))), + _ => Err(value), + } + } + + /// Ensure an account's free balance equals some value; this will create the account + /// if needed. + /// + /// Returns a signed imbalance and status to indicate if the account was successfully updated or + /// update has led to killing of the account. + fn make_free_balance_be( + who: &AccountId, + balance: Self::Balance, + ) -> SignedImbalance; +} diff --git a/frame/support/src/traits/tokens/currency/lockable.rs b/frame/support/src/traits/tokens/currency/lockable.rs new file mode 100644 index 0000000000000..26463864a6471 --- /dev/null +++ b/frame/support/src/traits/tokens/currency/lockable.rs @@ -0,0 +1,108 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The lockable currency trait and some associated types. + +use super::{super::misc::WithdrawReasons, Currency}; +use crate::{dispatch::DispatchResult, traits::misc::Get}; + +/// An identifier for a lock. Used for disambiguating different locks so that +/// they can be individually replaced or removed. +pub type LockIdentifier = [u8; 8]; + +/// A currency whose accounts can have liquidity restrictions. +pub trait LockableCurrency: Currency { + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// The maximum number of locks a user should have on their account. + type MaxLocks: Get; + + /// Create a new balance lock on account `who`. + /// + /// If the new lock is valid (i.e. not already expired), it will push the struct to + /// the `Locks` vec in storage. Note that you can lock more funds than a user has. + /// + /// If the lock `id` already exists, this will update it. + fn set_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all + /// parameters or creates a new one if it does not exist. + /// + /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it + /// applies the most severe constraints of the two, while `set_lock` replaces the lock + /// with the new parameters. As in, `extend_lock` will set: + /// - maximum `amount` + /// - bitwise mask of all `reasons` + fn extend_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Remove an existing lock. + fn remove_lock(id: LockIdentifier, who: &AccountId); +} + +/// A vesting schedule over a currency. This allows a particular currency to have vesting limits +/// applied to it. +pub trait VestingSchedule { + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// The currency that this schedule applies to. + type Currency: Currency; + + /// Get the amount that is currently being vested and cannot be transferred out of this account. + /// Returns `None` if the account has no vesting schedule. + fn vesting_balance(who: &AccountId) + -> Option<>::Balance>; + + /// Adds a vesting schedule to a given account. + /// + /// If the account has `MaxVestingSchedules`, an Error is returned and nothing + /// is updated. + /// + /// Is a no-op if the amount to be vested is zero. + /// + /// NOTE: This doesn't alter the free balance of the account. + fn add_vesting_schedule( + who: &AccountId, + locked: >::Balance, + per_block: >::Balance, + starting_block: Self::Moment, + ) -> DispatchResult; + + /// Checks if `add_vesting_schedule` would work against `who`. + fn can_add_vesting_schedule( + who: &AccountId, + locked: >::Balance, + per_block: >::Balance, + starting_block: Self::Moment, + ) -> DispatchResult; + + /// Remove a vesting schedule for a given account. + /// + /// NOTE: This doesn't alter the free balance of the account. + fn remove_vesting_schedule(who: &AccountId, schedule_index: u32) -> DispatchResult; +} diff --git a/frame/support/src/traits/tokens/currency/reservable.rs b/frame/support/src/traits/tokens/currency/reservable.rs new file mode 100644 index 0000000000000..0ca7a93dc7f69 --- /dev/null +++ b/frame/support/src/traits/tokens/currency/reservable.rs @@ -0,0 +1,211 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The reservable currency trait. + +use super::{super::misc::BalanceStatus, Currency}; +use crate::dispatch::{DispatchError, DispatchResult}; + +/// A currency where funds can be reserved from the user. +pub trait ReservableCurrency: Currency { + /// Same result as `reserve(who, value)` (but without the side-effects) assuming there + /// are no balance changes in the meantime. + fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; + + /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. + /// + /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` + /// is less than `value`, then a non-zero second item will be returned. + fn slash_reserved( + who: &AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance); + + /// The amount of the balance of a given account that is externally reserved; this can still get + /// slashed, but gets slashed last of all. + /// + /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens + /// that are still 'owned' by the account holder, but which are suspendable. + /// + /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' + /// is deleted: specifically, `ReservedBalance`. + /// + /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn reserved_balance(who: &AccountId) -> Self::Balance; + + /// Moves `value` from balance to reserved balance. + /// + /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will + /// be returned to notify of this. This is different behavior than `unreserve`. + fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult; + + /// Moves up to `value` from reserved balance to free balance. This function cannot fail. + /// + /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` + /// is less than `value`, then the remaining amount will be returned. + /// + /// # NOTES + /// + /// - This is different from `reserve`. + /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will + /// invoke `on_reserved_too_low` and could reap the account. + fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; + + /// Moves up to `value` from reserved balance of account `slashed` to balance of account + /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be + /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, + /// depending on the `status`. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then `Ok(non_zero)` will be returned. + fn repatriate_reserved( + slashed: &AccountId, + beneficiary: &AccountId, + value: Self::Balance, + status: BalanceStatus, + ) -> Result; +} + +pub trait NamedReservableCurrency: ReservableCurrency { + /// An identifier for a reserve. Used for disambiguating different reserves so that + /// they can be individually replaced or removed. + type ReserveIdentifier; + + /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. + /// + /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` + /// is less than `value`, then a non-zero second item will be returned. + fn slash_reserved_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance); + + /// The amount of the balance of a given account that is externally reserved; this can still get + /// slashed, but gets slashed last of all. + /// + /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens + /// that are still 'owned' by the account holder, but which are suspendable. + /// + /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' + /// is deleted: specifically, `ReservedBalance`. + /// + /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn reserved_balance_named(id: &Self::ReserveIdentifier, who: &AccountId) -> Self::Balance; + + /// Moves `value` from balance to reserved balance. + /// + /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will + /// be returned to notify of this. This is different behavior than `unreserve`. + fn reserve_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + value: Self::Balance, + ) -> DispatchResult; + + /// Moves up to `value` from reserved balance to free balance. This function cannot fail. + /// + /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` + /// is less than `value`, then the remaining amount will be returned. + /// + /// # NOTES + /// + /// - This is different from `reserve`. + /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will + /// invoke `on_reserved_too_low` and could reap the account. + fn unreserve_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + value: Self::Balance, + ) -> Self::Balance; + + /// Moves up to `value` from reserved balance of account `slashed` to balance of account + /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be + /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, + /// depending on the `status`. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then `Ok(non_zero)` will be returned. + fn repatriate_reserved_named( + id: &Self::ReserveIdentifier, + slashed: &AccountId, + beneficiary: &AccountId, + value: Self::Balance, + status: BalanceStatus, + ) -> Result; + + /// Ensure the reserved balance is equal to `value`. + /// + /// This will reserve extra amount of current reserved balance is less than `value`. + /// And unreserve if current reserved balance is greater than `value`. + fn ensure_reserved_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + value: Self::Balance, + ) -> DispatchResult { + let current = Self::reserved_balance_named(id, who); + if current > value { + // we always have enough balance to unreserve here + Self::unreserve_named(id, who, current - value); + Ok(()) + } else if value > current { + // we checked value > current + Self::reserve_named(id, who, value - current) + } else { + // current == value + Ok(()) + } + } + + /// Unreserve all the named reserved balances, returning unreserved amount. + /// + /// Is a no-op if the value to be unreserved is zero. + fn unreserve_all_named(id: &Self::ReserveIdentifier, who: &AccountId) -> Self::Balance { + let value = Self::reserved_balance_named(id, who); + Self::unreserve_named(id, who, value); + value + } + + /// Slash all the reserved balance, returning the negative imbalance created. + /// + /// Is a no-op if the value to be slashed is zero. + fn slash_all_reserved_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + ) -> Self::NegativeImbalance { + let value = Self::reserved_balance_named(id, who); + Self::slash_reserved_named(id, who, value).0 + } + + /// Move all the named reserved balance of one account into the balance of another, according to + /// `status`. If `status` is `Reserved`, the balance will be reserved with given `id`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + fn repatriate_all_reserved_named( + id: &Self::ReserveIdentifier, + slashed: &AccountId, + beneficiary: &AccountId, + status: BalanceStatus, + ) -> DispatchResult { + let value = Self::reserved_balance_named(id, slashed); + Self::repatriate_reserved_named(id, slashed, beneficiary, value, status).map(|_| ()) + } +} diff --git a/frame/support/src/traits/tokens/fungible.rs b/frame/support/src/traits/tokens/fungible.rs new file mode 100644 index 0000000000000..b033236d447bb --- /dev/null +++ b/frame/support/src/traits/tokens/fungible.rs @@ -0,0 +1,335 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for dealing with a single fungible token class and any associated types. + +use super::{ + misc::{Balance, DepositConsequence, WithdrawConsequence}, + *, +}; +use crate::{ + dispatch::{DispatchError, DispatchResult}, + traits::misc::Get, +}; +use sp_runtime::traits::Saturating; + +mod balanced; +mod imbalance; +pub use balanced::{Balanced, Unbalanced}; +pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; + +/// Trait for providing balance-inspection access to a fungible asset. +pub trait Inspect { + /// Scalar type for representing balance of an account. + type Balance: Balance; + + /// The total amount of issuance in the system. + fn total_issuance() -> Self::Balance; + + /// The minimum balance any single account may have. + fn minimum_balance() -> Self::Balance; + + /// Get the balance of `who`. + fn balance(who: &AccountId) -> Self::Balance; + + /// Get the maximum amount that `who` can withdraw/transfer successfully. + fn reducible_balance(who: &AccountId, keep_alive: bool) -> Self::Balance; + + /// Returns `true` if the balance of `who` may be increased by `amount`. + fn can_deposit(who: &AccountId, amount: Self::Balance) -> DepositConsequence; + + /// Returns `Failed` if the balance of `who` may not be decreased by `amount`, otherwise + /// the consequence. + fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence; +} + +/// Trait for providing an ERC-20 style fungible asset. +pub trait Mutate: Inspect { + /// Increase the balance of `who` by exactly `amount`, minting new tokens. If that isn't + /// possible then an `Err` is returned and nothing is changed. + fn mint_into(who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Decrease the balance of `who` by at least `amount`, possibly slightly more in the case of + /// minimum_balance requirements, burning the tokens. If that isn't possible then an `Err` is + /// returned and nothing is changed. If successful, the amount of tokens reduced is returned. + fn burn_from(who: &AccountId, amount: Self::Balance) -> Result; + + /// Attempt to reduce the balance of `who` by as much as possible up to `amount`, and possibly + /// slightly more due to minimum_balance requirements. If no decrease is possible then an `Err` + /// is returned and nothing is changed. If successful, the amount of tokens reduced is returned. + /// + /// The default implementation just uses `withdraw` along with `reducible_balance` to ensure + /// that is doesn't fail. + fn slash(who: &AccountId, amount: Self::Balance) -> Result { + Self::burn_from(who, Self::reducible_balance(who, false).min(amount)) + } + + /// Transfer funds from one account into another. The default implementation uses `mint_into` + /// and `burn_from` and may generate unwanted events. + fn teleport( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + ) -> Result { + let extra = Self::can_withdraw(&source, amount).into_result()?; + Self::can_deposit(&dest, amount.saturating_add(extra)).into_result()?; + let actual = Self::burn_from(source, amount)?; + debug_assert!( + actual == amount.saturating_add(extra), + "can_withdraw must agree with withdraw; qed" + ); + match Self::mint_into(dest, actual) { + Ok(_) => Ok(actual), + Err(err) => { + debug_assert!(false, "can_deposit returned true previously; qed"); + // attempt to return the funds back to source + let revert = Self::mint_into(source, actual); + debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); + Err(err) + }, + } + } +} + +/// Trait for providing a fungible asset which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer funds from one account into another. + fn transfer( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + keep_alive: bool, + ) -> Result; +} + +/// Trait for inspecting a fungible asset which can be reserved. +pub trait InspectHold: Inspect { + /// Amount of funds held in reserve by `who`. + fn balance_on_hold(who: &AccountId) -> Self::Balance; + + /// Check to see if some `amount` of funds of `who` may be placed on hold. + fn can_hold(who: &AccountId, amount: Self::Balance) -> bool; +} + +/// Trait for mutating a fungible asset which can be reserved. +pub trait MutateHold: InspectHold + Transfer { + /// Hold some funds in an account. + fn hold(who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Release up to `amount` held funds in an account. + /// + /// The actual amount released is returned with `Ok`. + /// + /// If `best_effort` is `true`, then the amount actually unreserved and returned as the inner + /// value of `Ok` may be smaller than the `amount` passed. + fn release( + who: &AccountId, + amount: Self::Balance, + best_effort: bool, + ) -> Result; + + /// Transfer held funds into a destination account. + /// + /// If `on_hold` is `true`, then the destination account must already exist and the assets + /// transferred will still be on hold in the destination account. If not, then the destination + /// account need not already exist, but must be creatable. + /// + /// If `best_effort` is `true`, then an amount less than `amount` may be transferred without + /// error. + /// + /// The actual amount transferred is returned, or `Err` in the case of error and nothing is + /// changed. + fn transfer_held( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + best_effort: bool, + on_held: bool, + ) -> Result; +} + +/// Trait for slashing a fungible asset which can be reserved. +pub trait BalancedHold: Balanced + MutateHold { + /// Reduce the balance of some funds on hold in an account. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds that are on hold up to `amount` will be deducted as possible. If this is less + /// than `amount`, then a non-zero second item will be returned. + fn slash_held( + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance); +} + +impl + MutateHold> BalancedHold for T { + fn slash_held( + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance) { + let actual = match Self::release(who, amount, true) { + Ok(x) => x, + Err(_) => return (Imbalance::default(), amount), + }; + >::slash(who, actual) + } +} + +/// Convert a `fungibles` trait implementation into a `fungible` trait implementation by identifying +/// a single item. +pub struct ItemOf< + F: fungibles::Inspect, + A: Get<>::AssetId>, + AccountId, +>(sp_std::marker::PhantomData<(F, A, AccountId)>); + +impl< + F: fungibles::Inspect, + A: Get<>::AssetId>, + AccountId, + > Inspect for ItemOf +{ + type Balance = >::Balance; + fn total_issuance() -> Self::Balance { + >::total_issuance(A::get()) + } + fn minimum_balance() -> Self::Balance { + >::minimum_balance(A::get()) + } + fn balance(who: &AccountId) -> Self::Balance { + >::balance(A::get(), who) + } + fn reducible_balance(who: &AccountId, keep_alive: bool) -> Self::Balance { + >::reducible_balance(A::get(), who, keep_alive) + } + fn can_deposit(who: &AccountId, amount: Self::Balance) -> DepositConsequence { + >::can_deposit(A::get(), who, amount) + } + fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence { + >::can_withdraw(A::get(), who, amount) + } +} + +impl< + F: fungibles::Mutate, + A: Get<>::AssetId>, + AccountId, + > Mutate for ItemOf +{ + fn mint_into(who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::mint_into(A::get(), who, amount) + } + fn burn_from(who: &AccountId, amount: Self::Balance) -> Result { + >::burn_from(A::get(), who, amount) + } +} + +impl< + F: fungibles::Transfer, + A: Get<>::AssetId>, + AccountId, + > Transfer for ItemOf +{ + fn transfer( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + keep_alive: bool, + ) -> Result { + >::transfer(A::get(), source, dest, amount, keep_alive) + } +} + +impl< + F: fungibles::InspectHold, + A: Get<>::AssetId>, + AccountId, + > InspectHold for ItemOf +{ + fn balance_on_hold(who: &AccountId) -> Self::Balance { + >::balance_on_hold(A::get(), who) + } + fn can_hold(who: &AccountId, amount: Self::Balance) -> bool { + >::can_hold(A::get(), who, amount) + } +} + +impl< + F: fungibles::MutateHold, + A: Get<>::AssetId>, + AccountId, + > MutateHold for ItemOf +{ + fn hold(who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::hold(A::get(), who, amount) + } + fn release( + who: &AccountId, + amount: Self::Balance, + best_effort: bool, + ) -> Result { + >::release(A::get(), who, amount, best_effort) + } + fn transfer_held( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + best_effort: bool, + on_hold: bool, + ) -> Result { + >::transfer_held( + A::get(), + source, + dest, + amount, + best_effort, + on_hold, + ) + } +} + +impl< + F: fungibles::Unbalanced, + A: Get<>::AssetId>, + AccountId, + > Unbalanced for ItemOf +{ + fn set_balance(who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::set_balance(A::get(), who, amount) + } + fn set_total_issuance(amount: Self::Balance) -> () { + >::set_total_issuance(A::get(), amount) + } + fn decrease_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result { + >::decrease_balance(A::get(), who, amount) + } + fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { + >::decrease_balance_at_most(A::get(), who, amount) + } + fn increase_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result { + >::increase_balance(A::get(), who, amount) + } + fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { + >::increase_balance_at_most(A::get(), who, amount) + } +} diff --git a/frame/support/src/traits/tokens/fungible/balanced.rs b/frame/support/src/traits/tokens/fungible/balanced.rs new file mode 100644 index 0000000000000..7b33a595a1b55 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/balanced.rs @@ -0,0 +1,353 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The trait and associated types for sets of fungible tokens that manage total issuance without +//! requiring atomic balanced operations. + +use super::{super::Imbalance as ImbalanceT, *}; +use crate::{ + dispatch::{DispatchError, DispatchResult}, + traits::misc::{SameOrOther, TryDrop}, +}; +use sp_runtime::{ + traits::{CheckedAdd, Zero}, + ArithmeticError, TokenError, +}; +use sp_std::marker::PhantomData; + +/// A fungible token class where any creation and deletion of tokens is semi-explicit and where the +/// total supply is maintained automatically. +/// +/// This is auto-implemented when a token class has `Unbalanced` implemented. +pub trait Balanced: Inspect { + /// The type for managing what happens when an instance of `Debt` is dropped without being used. + type OnDropDebt: HandleImbalanceDrop; + /// The type for managing what happens when an instance of `Credit` is dropped without being + /// used. + type OnDropCredit: HandleImbalanceDrop; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn rescind(amount: Self::Balance) -> DebtOf; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(amount: Self::Balance) -> CreditOf; + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair(amount: Self::Balance) -> (DebtOf, CreditOf) { + (Self::rescind(amount), Self::issue(amount)) + } + + /// Deducts up to `value` from the combined balance of `who`. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash(who: &AccountId, amount: Self::Balance) -> (CreditOf, Self::Balance); + + /// Mints exactly `value` into the account of `who`. + /// + /// If `who` doesn't exist, nothing is done and an `Err` returned. This could happen because it + /// the account doesn't yet exist and it isn't possible to create it under the current + /// circumstances and with `value` in it. + fn deposit( + who: &AccountId, + value: Self::Balance, + ) -> Result, DispatchError>; + + /// Removes `value` balance from `who` account if possible. + /// + /// If the removal is not possible, then it returns `Err` and nothing is changed. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is no less than `value`. It may be more in the case that removing it reduced it below + /// `Self::minimum_balance()`. + fn withdraw( + who: &AccountId, + value: Self::Balance, + // TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError>; + + /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` + /// cannot be countered, then nothing is changed and the original `credit` is returned in an + /// `Err`. + /// + /// Please note: If `credit.peek()` is less than `Self::minimum_balance()`, then `who` must + /// already exist for this to succeed. + fn resolve( + who: &AccountId, + credit: CreditOf, + ) -> Result<(), CreditOf> { + let v = credit.peek(); + let debt = match Self::deposit(who, v) { + Err(_) => return Err(credit), + Ok(d) => d, + }; + let result = credit.offset(debt).try_drop(); + debug_assert!(result.is_ok(), "ok deposit return must be equal to credit value; qed"); + Ok(()) + } + + /// The balance of `who` is decreased in order to counter `debt`. If the whole of `debt` + /// cannot be countered, then nothing is changed and the original `debt` is returned in an + /// `Err`. + fn settle( + who: &AccountId, + debt: DebtOf, + // TODO: liveness: ExistenceRequirement, + ) -> Result, DebtOf> { + let amount = debt.peek(); + let credit = match Self::withdraw(who, amount) { + Err(_) => return Err(debt), + Ok(d) => d, + }; + match credit.offset(debt) { + SameOrOther::None => Ok(CreditOf::::zero()), + SameOrOther::Same(dust) => Ok(dust), + SameOrOther::Other(rest) => { + debug_assert!(false, "ok withdraw return must be at least debt value; qed"); + Err(rest) + }, + } + } +} + +/// A fungible token class where the balance can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Set the balance of `who` to `amount`. If this cannot be done for some reason (e.g. + /// because the account cannot be created or an overflow) then an `Err` is returned. + fn set_balance(who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Set the total issuance to `amount`. + fn set_total_issuance(amount: Self::Balance); + + /// Reduce the balance of `who` by `amount`. If it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + fn decrease_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result { + let old_balance = Self::balance(who); + let (mut new_balance, mut amount) = if old_balance < amount { + Err(TokenError::NoFunds)? + } else { + (old_balance - amount, amount) + }; + if new_balance < Self::minimum_balance() { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + // Defensive only - this should not fail now. + Self::set_balance(who, new_balance)?; + Ok(amount) + } + + /// Reduce the balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + /// + /// Return the imbalance by which the account was reduced. + fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { + let old_balance = Self::balance(who); + let (mut new_balance, mut amount) = if old_balance < amount { + (Zero::zero(), old_balance) + } else { + (old_balance - amount, amount) + }; + let minimum_balance = Self::minimum_balance(); + if new_balance < minimum_balance { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + let mut r = Self::set_balance(who, new_balance); + if r.is_err() { + // Some error, probably because we tried to destroy an account which cannot be + // destroyed. + if new_balance.is_zero() && amount >= minimum_balance { + new_balance = minimum_balance; + amount -= minimum_balance; + r = Self::set_balance(who, new_balance); + } + if r.is_err() { + // Still an error. Apparently it's not possible to reduce at all. + amount = Zero::zero(); + } + } + amount + } + + /// Increase the balance of `who` by `amount`. If it cannot be increased by that amount + /// for some reason, return `Err` and don't increase it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and an error will be returned if + /// `amount < Self::minimum_balance()` when the account of `who` is zero. + fn increase_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result { + let old_balance = Self::balance(who); + let new_balance = old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; + if new_balance < Self::minimum_balance() { + Err(TokenError::BelowMinimum)? + } + if old_balance != new_balance { + Self::set_balance(who, new_balance)?; + } + Ok(amount) + } + + /// Increase the balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance will be zero in the case that + /// `amount < Self::minimum_balance()`. + /// + /// Return the imbalance by which the account was increased. + fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { + let old_balance = Self::balance(who); + let mut new_balance = old_balance.saturating_add(amount); + let mut amount = new_balance - old_balance; + if new_balance < Self::minimum_balance() { + new_balance = Zero::zero(); + amount = Zero::zero(); + } + if old_balance == new_balance || Self::set_balance(who, new_balance).is_ok() { + amount + } else { + Zero::zero() + } + } +} + +/// Simple handler for an imbalance drop which increases the total issuance of the system by the +/// imbalance amount. Used for leftover debt. +pub struct IncreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for IncreaseIssuance +{ + fn handle(amount: U::Balance) { + U::set_total_issuance(U::total_issuance().saturating_add(amount)) + } +} + +/// Simple handler for an imbalance drop which decreases the total issuance of the system by the +/// imbalance amount. Used for leftover credit. +pub struct DecreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for DecreaseIssuance +{ + fn handle(amount: U::Balance) { + U::set_total_issuance(U::total_issuance().saturating_sub(amount)) + } +} + +/// An imbalance type which uses `DecreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that funds in someone's account have been removed and not yet placed anywhere +/// else. If it gets dropped, then those funds will be assumed to be "burned" and the total supply +/// will be accordingly decreased to ensure it equals the sum of the balances of all accounts. +type Credit = Imbalance< + >::Balance, + DecreaseIssuance, + IncreaseIssuance, +>; + +/// An imbalance type which uses `IncreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that there are funds in someone's account whose origin is as yet unaccounted +/// for. If it gets dropped, then those funds will be assumed to be "minted" and the total supply +/// will be accordingly increased to ensure it equals the sum of the balances of all accounts. +type Debt = Imbalance< + >::Balance, + IncreaseIssuance, + DecreaseIssuance, +>; + +/// Create some `Credit` item. Only for internal use. +fn credit>(amount: U::Balance) -> Credit { + Imbalance::new(amount) +} + +/// Create some `Debt` item. Only for internal use. +fn debt>(amount: U::Balance) -> Debt { + Imbalance::new(amount) +} + +impl> Balanced for U { + type OnDropCredit = DecreaseIssuance; + type OnDropDebt = IncreaseIssuance; + fn rescind(amount: Self::Balance) -> Debt { + let old = U::total_issuance(); + let new = old.saturating_sub(amount); + U::set_total_issuance(new); + debt(old - new) + } + fn issue(amount: Self::Balance) -> Credit { + let old = U::total_issuance(); + let new = old.saturating_add(amount); + U::set_total_issuance(new); + credit(new - old) + } + fn slash(who: &AccountId, amount: Self::Balance) -> (Credit, Self::Balance) { + let slashed = U::decrease_balance_at_most(who, amount); + // `slashed` could be less than, greater than or equal to `amount`. + // If slashed == amount, it means the account had at least amount in it and it could all be + // removed without a problem. + // If slashed > amount, it means the account had more than amount in it, but not enough more + // to push it over minimum_balance. + // If slashed < amount, it means the account didn't have enough in it to be reduced by + // `amount` without being destroyed. + (credit(slashed), amount.saturating_sub(slashed)) + } + fn deposit( + who: &AccountId, + amount: Self::Balance, + ) -> Result, DispatchError> { + let increase = U::increase_balance(who, amount)?; + Ok(debt(increase)) + } + fn withdraw( + who: &AccountId, + amount: Self::Balance, + // TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError> { + let decrease = U::decrease_balance(who, amount)?; + Ok(credit(decrease)) + } +} diff --git a/frame/support/src/traits/tokens/fungible/imbalance.rs b/frame/support/src/traits/tokens/fungible/imbalance.rs new file mode 100644 index 0000000000000..362e0c126d996 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -0,0 +1,152 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The imbalance type and its associates, which handles keeps everything adding up properly with +//! unbalanced operations. + +use super::{super::Imbalance as ImbalanceT, balanced::Balanced, misc::Balance, *}; +use crate::traits::misc::{SameOrOther, TryDrop}; +use sp_runtime::{traits::Zero, RuntimeDebug}; +use sp_std::marker::PhantomData; + +/// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or +/// debt (positive) imbalance. +pub trait HandleImbalanceDrop { + /// Some something with the imbalance's value which is being dropped. + fn handle(amount: Balance); +} + +/// An imbalance in the system, representing a divergence of recorded token supply from the sum of +/// the balances of all accounts. This is `must_use` in order to ensure it gets handled (placing +/// into an account, settling from an account or altering the supply). +/// +/// Importantly, it has a special `Drop` impl, and cannot be created outside of this module. +#[must_use] +#[derive(RuntimeDebug, Eq, PartialEq)] +pub struct Imbalance< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> { + amount: B, + _phantom: PhantomData<(OnDrop, OppositeOnDrop)>, +} + +impl, OppositeOnDrop: HandleImbalanceDrop> Drop + for Imbalance +{ + fn drop(&mut self) { + if !self.amount.is_zero() { + OnDrop::handle(self.amount) + } + } +} + +impl, OppositeOnDrop: HandleImbalanceDrop> TryDrop + for Imbalance +{ + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self> { + self.drop_zero() + } +} + +impl, OppositeOnDrop: HandleImbalanceDrop> Default + for Imbalance +{ + fn default() -> Self { + Self::zero() + } +} + +impl, OppositeOnDrop: HandleImbalanceDrop> + Imbalance +{ + pub(crate) fn new(amount: B) -> Self { + Self { amount, _phantom: PhantomData } + } +} + +impl, OppositeOnDrop: HandleImbalanceDrop> + ImbalanceT for Imbalance +{ + type Opposite = Imbalance; + + fn zero() -> Self { + Self { amount: Zero::zero(), _phantom: PhantomData } + } + + fn drop_zero(self) -> Result<(), Self> { + if self.amount.is_zero() { + sp_std::mem::forget(self); + Ok(()) + } else { + Err(self) + } + } + + fn split(self, amount: B) -> (Self, Self) { + let first = self.amount.min(amount); + let second = self.amount - first; + sp_std::mem::forget(self); + (Imbalance::new(first), Imbalance::new(second)) + } + fn merge(mut self, other: Self) -> Self { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + self + } + fn subsume(&mut self, other: Self) { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + } + fn offset( + self, + other: Imbalance, + ) -> SameOrOther> { + let (a, b) = (self.amount, other.amount); + sp_std::mem::forget((self, other)); + + if a == b { + SameOrOther::None + } else if a > b { + SameOrOther::Same(Imbalance::new(a - b)) + } else { + SameOrOther::Other(Imbalance::::new(b - a)) + } + } + fn peek(&self) -> B { + self.amount + } +} + +/// Imbalance implying that the total_issuance value is less than the sum of all account balances. +pub type DebtOf = Imbalance< + >::Balance, + // This will generally be implemented by increasing the total_issuance value. + >::OnDropDebt, + >::OnDropCredit, +>; + +/// Imbalance implying that the total_issuance value is greater than the sum of all account +/// balances. +pub type CreditOf = Imbalance< + >::Balance, + // This will generally be implemented by decreasing the total_issuance value. + >::OnDropCredit, + >::OnDropDebt, +>; diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs new file mode 100644 index 0000000000000..3f5a1c75860c2 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -0,0 +1,229 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for sets of fungible tokens and any associated types. + +use super::{ + misc::{AssetId, Balance}, + *, +}; +use crate::dispatch::{DispatchError, DispatchResult}; +use sp_runtime::traits::Saturating; + +mod balanced; +pub use balanced::{Balanced, Unbalanced}; +mod imbalance; +pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; + +/// Trait for providing balance-inspection access to a set of named fungible assets. +pub trait Inspect { + /// Means of identifying one asset class from another. + type AssetId: AssetId; + + /// Scalar type for representing balance of an account. + type Balance: Balance; + + /// The total amount of issuance in the system. + fn total_issuance(asset: Self::AssetId) -> Self::Balance; + + /// The minimum balance any single account may have. + fn minimum_balance(asset: Self::AssetId) -> Self::Balance; + + /// Get the `asset` balance of `who`. + fn balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance; + + /// Get the maximum amount of `asset` that `who` can withdraw/transfer successfully. + fn reducible_balance(asset: Self::AssetId, who: &AccountId, keep_alive: bool) -> Self::Balance; + + /// Returns `true` if the `asset` balance of `who` may be increased by `amount`. + fn can_deposit( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> DepositConsequence; + + /// Returns `Failed` if the `asset` balance of `who` may not be decreased by `amount`, otherwise + /// the consequence. + fn can_withdraw( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence; +} + +/// Trait for providing a set of named fungible assets which can be created and destroyed. +pub trait Mutate: Inspect { + /// Attempt to increase the `asset` balance of `who` by `amount`. + /// + /// If not possible then don't do anything. Possible reasons for failure include: + /// - Minimum balance not met. + /// - Account cannot be created (e.g. because there is no provider reference and/or the asset + /// isn't considered worth anything). + /// + /// Since this is an operation which should be possible to take alone, if successful it will + /// increase the overall supply of the underlying token. + fn mint_into(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Attempt to reduce the `asset` balance of `who` by `amount`. + /// + /// If not possible then don't do anything. Possible reasons for failure include: + /// - Less funds in the account than `amount` + /// - Liquidity requirements (locks, reservations) prevent the funds from being removed + /// - Operation would require destroying the account and it is required to stay alive (e.g. + /// because it's providing a needed provider reference). + /// + /// Since this is an operation which should be possible to take alone, if successful it will + /// reduce the overall supply of the underlying token. + /// + /// Due to minimum balance requirements, it's possible that the amount withdrawn could be up to + /// `Self::minimum_balance() - 1` more than the `amount`. The total amount withdrawn is returned + /// in an `Ok` result. This may be safely ignored if you don't mind the overall supply reducing. + fn burn_from( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result; + + /// Attempt to reduce the `asset` balance of `who` by as much as possible up to `amount`, and + /// possibly slightly more due to minimum_balance requirements. If no decrease is possible then + /// an `Err` is returned and nothing is changed. If successful, the amount of tokens reduced is + /// returned. + /// + /// The default implementation just uses `withdraw` along with `reducible_balance` to ensure + /// that is doesn't fail. + fn slash( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + Self::burn_from(asset, who, Self::reducible_balance(asset, who, false).min(amount)) + } + + /// Transfer funds from one account into another. The default implementation uses `mint_into` + /// and `burn_from` and may generate unwanted events. + fn teleport( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + ) -> Result { + let extra = Self::can_withdraw(asset, &source, amount).into_result()?; + Self::can_deposit(asset, &dest, amount.saturating_add(extra)).into_result()?; + let actual = Self::burn_from(asset, source, amount)?; + debug_assert!( + actual == amount.saturating_add(extra), + "can_withdraw must agree with withdraw; qed" + ); + match Self::mint_into(asset, dest, actual) { + Ok(_) => Ok(actual), + Err(err) => { + debug_assert!(false, "can_deposit returned true previously; qed"); + // attempt to return the funds back to source + let revert = Self::mint_into(asset, source, actual); + debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); + Err(err) + }, + } + } +} + +/// Trait for providing a set of named fungible assets which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer funds from one account into another. + fn transfer( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + keep_alive: bool, + ) -> Result; +} + +/// Trait for inspecting a set of named fungible assets which can be placed on hold. +pub trait InspectHold: Inspect { + /// Amount of funds held in hold. + fn balance_on_hold(asset: Self::AssetId, who: &AccountId) -> Self::Balance; + + /// Check to see if some `amount` of `asset` may be held on the account of `who`. + fn can_hold(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> bool; +} + +/// Trait for mutating a set of named fungible assets which can be placed on hold. +pub trait MutateHold: InspectHold + Transfer { + /// Hold some funds in an account. + fn hold(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Release some funds in an account from being on hold. + /// + /// If `best_effort` is `true`, then the amount actually released and returned as the inner + /// value of `Ok` may be smaller than the `amount` passed. + fn release( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + best_effort: bool, + ) -> Result; + + /// Transfer held funds into a destination account. + /// + /// If `on_hold` is `true`, then the destination account must already exist and the assets + /// transferred will still be on hold in the destination account. If not, then the destination + /// account need not already exist, but must be creatable. + /// + /// If `best_effort` is `true`, then an amount less than `amount` may be transferred without + /// error. + /// + /// The actual amount transferred is returned, or `Err` in the case of error and nothing is + /// changed. + fn transfer_held( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + best_effort: bool, + on_hold: bool, + ) -> Result; +} + +/// Trait for mutating one of several types of fungible assets which can be held. +pub trait BalancedHold: Balanced + MutateHold { + /// Release and slash some funds in an account. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `amount` will be deducted as possible. If this is less than `amount`, + /// then a non-zero second item will be returned. + fn slash_held( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance); +} + +impl + MutateHold> BalancedHold for T { + fn slash_held( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance) { + let actual = match Self::release(asset, who, amount, true) { + Ok(x) => x, + Err(_) => return (Imbalance::zero(asset), amount), + }; + >::slash(asset, who, actual) + } +} diff --git a/frame/support/src/traits/tokens/fungibles/balanced.rs b/frame/support/src/traits/tokens/fungibles/balanced.rs new file mode 100644 index 0000000000000..40a65305b87da --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/balanced.rs @@ -0,0 +1,393 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The trait and associated types for sets of fungible tokens that manage total issuance without +//! requiring atomic balanced operations. + +use super::*; +use crate::{ + dispatch::{DispatchError, DispatchResult}, + traits::misc::{SameOrOther, TryDrop}, +}; +use sp_arithmetic::traits::Saturating; +use sp_runtime::{ + traits::{CheckedAdd, Zero}, + ArithmeticError, TokenError, +}; +use sp_std::marker::PhantomData; + +/// A fungible token class where any creation and deletion of tokens is semi-explicit and where the +/// total supply is maintained automatically. +/// +/// This is auto-implemented when a token class has `Unbalanced` implemented. +pub trait Balanced: Inspect { + /// The type for managing what happens when an instance of `Debt` is dropped without being used. + type OnDropDebt: HandleImbalanceDrop; + /// The type for managing what happens when an instance of `Credit` is dropped without being + /// used. + type OnDropCredit: HandleImbalanceDrop; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn rescind(asset: Self::AssetId, amount: Self::Balance) -> DebtOf; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(asset: Self::AssetId, amount: Self::Balance) -> CreditOf; + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair( + asset: Self::AssetId, + amount: Self::Balance, + ) -> (DebtOf, CreditOf) { + (Self::rescind(asset, amount), Self::issue(asset, amount)) + } + + /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the + /// free balance. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance); + + /// Mints exactly `value` into the `asset` account of `who`. + /// + /// If `who` doesn't exist, nothing is done and an `Err` returned. This could happen because it + /// the account doesn't yet exist and it isn't possible to create it under the current + /// circumstances and with `value` in it. + fn deposit( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + ) -> Result, DispatchError>; + + /// Removes `value` free `asset` balance from `who` account if possible. + /// + /// If the removal is not possible, then it returns `Err` and nothing is changed. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is no less than `value`. It may be more in the case that removing it reduced it below + /// `Self::minimum_balance()`. + fn withdraw( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + // TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError>; + + /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` + /// cannot be countered, then nothing is changed and the original `credit` is returned in an + /// `Err`. + /// + /// Please note: If `credit.peek()` is less than `Self::minimum_balance()`, then `who` must + /// already exist for this to succeed. + fn resolve( + who: &AccountId, + credit: CreditOf, + ) -> Result<(), CreditOf> { + let v = credit.peek(); + let debt = match Self::deposit(credit.asset(), who, v) { + Err(_) => return Err(credit), + Ok(d) => d, + }; + if let Ok(result) = credit.offset(debt) { + let result = result.try_drop(); + debug_assert!(result.is_ok(), "ok deposit return must be equal to credit value; qed"); + } else { + debug_assert!(false, "debt.asset is credit.asset; qed"); + } + Ok(()) + } + + /// The balance of `who` is decreased in order to counter `debt`. If the whole of `debt` + /// cannot be countered, then nothing is changed and the original `debt` is returned in an + /// `Err`. + fn settle( + who: &AccountId, + debt: DebtOf, + // TODO: liveness: ExistenceRequirement, + ) -> Result, DebtOf> { + let amount = debt.peek(); + let asset = debt.asset(); + let credit = match Self::withdraw(asset, who, amount) { + Err(_) => return Err(debt), + Ok(d) => d, + }; + match credit.offset(debt) { + Ok(SameOrOther::None) => Ok(CreditOf::::zero(asset)), + Ok(SameOrOther::Same(dust)) => Ok(dust), + Ok(SameOrOther::Other(rest)) => { + debug_assert!(false, "ok withdraw return must be at least debt value; qed"); + Err(rest) + }, + Err(_) => { + debug_assert!(false, "debt.asset is credit.asset; qed"); + Ok(CreditOf::::zero(asset)) + }, + } + } +} + +/// A fungible token class where the balance can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Set the `asset` balance of `who` to `amount`. If this cannot be done for some reason (e.g. + /// because the account cannot be created or an overflow) then an `Err` is returned. + fn set_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Set the total issuance of `asset` to `amount`. + fn set_total_issuance(asset: Self::AssetId, amount: Self::Balance); + + /// Reduce the `asset` balance of `who` by `amount`. If it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + fn decrease_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + let old_balance = Self::balance(asset, who); + let (mut new_balance, mut amount) = if old_balance < amount { + Err(TokenError::NoFunds)? + } else { + (old_balance - amount, amount) + }; + if new_balance < Self::minimum_balance(asset) { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + // Defensive only - this should not fail now. + Self::set_balance(asset, who, new_balance)?; + Ok(amount) + } + + /// Reduce the `asset` balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + /// + /// Return the imbalance by which the account was reduced. + fn decrease_balance_at_most( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Self::Balance { + let old_balance = Self::balance(asset, who); + let (mut new_balance, mut amount) = if old_balance < amount { + (Zero::zero(), old_balance) + } else { + (old_balance - amount, amount) + }; + let minimum_balance = Self::minimum_balance(asset); + if new_balance < minimum_balance { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + let mut r = Self::set_balance(asset, who, new_balance); + if r.is_err() { + // Some error, probably because we tried to destroy an account which cannot be + // destroyed. + if new_balance.is_zero() && amount >= minimum_balance { + new_balance = minimum_balance; + amount -= minimum_balance; + r = Self::set_balance(asset, who, new_balance); + } + if r.is_err() { + // Still an error. Apparently it's not possible to reduce at all. + amount = Zero::zero(); + } + } + amount + } + + /// Increase the `asset` balance of `who` by `amount`. If it cannot be increased by that amount + /// for some reason, return `Err` and don't increase it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and an error will be returned if + /// `amount < Self::minimum_balance()` when the account of `who` is zero. + fn increase_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { + let old_balance = Self::balance(asset, who); + let new_balance = old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; + if new_balance < Self::minimum_balance(asset) { + Err(TokenError::BelowMinimum)? + } + if old_balance != new_balance { + Self::set_balance(asset, who, new_balance)?; + } + Ok(amount) + } + + /// Increase the `asset` balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance will be zero in the case that + /// `amount < Self::minimum_balance()`. + /// + /// Return the imbalance by which the account was increased. + fn increase_balance_at_most( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Self::Balance { + let old_balance = Self::balance(asset, who); + let mut new_balance = old_balance.saturating_add(amount); + let mut amount = new_balance - old_balance; + if new_balance < Self::minimum_balance(asset) { + new_balance = Zero::zero(); + amount = Zero::zero(); + } + if old_balance == new_balance || Self::set_balance(asset, who, new_balance).is_ok() { + amount + } else { + Zero::zero() + } + } +} + +/// Simple handler for an imbalance drop which increases the total issuance of the system by the +/// imbalance amount. Used for leftover debt. +pub struct IncreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for IncreaseIssuance +{ + fn handle(asset: U::AssetId, amount: U::Balance) { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_add(amount)) + } +} + +/// Simple handler for an imbalance drop which decreases the total issuance of the system by the +/// imbalance amount. Used for leftover credit. +pub struct DecreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for DecreaseIssuance +{ + fn handle(asset: U::AssetId, amount: U::Balance) { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_sub(amount)) + } +} + +/// An imbalance type which uses `DecreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that funds in someone's account have been removed and not yet placed anywhere +/// else. If it gets dropped, then those funds will be assumed to be "burned" and the total supply +/// will be accordingly decreased to ensure it equals the sum of the balances of all accounts. +type Credit = Imbalance< + >::AssetId, + >::Balance, + DecreaseIssuance, + IncreaseIssuance, +>; + +/// An imbalance type which uses `IncreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that there are funds in someone's account whose origin is as yet unaccounted +/// for. If it gets dropped, then those funds will be assumed to be "minted" and the total supply +/// will be accordingly increased to ensure it equals the sum of the balances of all accounts. +type Debt = Imbalance< + >::AssetId, + >::Balance, + IncreaseIssuance, + DecreaseIssuance, +>; + +/// Create some `Credit` item. Only for internal use. +fn credit>( + asset: U::AssetId, + amount: U::Balance, +) -> Credit { + Imbalance::new(asset, amount) +} + +/// Create some `Debt` item. Only for internal use. +fn debt>( + asset: U::AssetId, + amount: U::Balance, +) -> Debt { + Imbalance::new(asset, amount) +} + +impl> Balanced for U { + type OnDropCredit = DecreaseIssuance; + type OnDropDebt = IncreaseIssuance; + fn rescind(asset: Self::AssetId, amount: Self::Balance) -> Debt { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_sub(amount)); + debt(asset, amount) + } + fn issue(asset: Self::AssetId, amount: Self::Balance) -> Credit { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_add(amount)); + credit(asset, amount) + } + fn slash( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (Credit, Self::Balance) { + let slashed = U::decrease_balance_at_most(asset, who, amount); + // `slashed` could be less than, greater than or equal to `amount`. + // If slashed == amount, it means the account had at least amount in it and it could all be + // removed without a problem. + // If slashed > amount, it means the account had more than amount in it, but not enough more + // to push it over minimum_balance. + // If slashed < amount, it means the account didn't have enough in it to be reduced by + // `amount` without being destroyed. + (credit(asset, slashed), amount.saturating_sub(slashed)) + } + fn deposit( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result, DispatchError> { + let increase = U::increase_balance(asset, who, amount)?; + Ok(debt(asset, increase)) + } + fn withdraw( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + // TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError> { + let decrease = U::decrease_balance(asset, who, amount)?; + Ok(credit(asset, decrease)) + } +} diff --git a/frame/support/src/traits/tokens/fungibles/imbalance.rs b/frame/support/src/traits/tokens/fungibles/imbalance.rs new file mode 100644 index 0000000000000..c44c471646485 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -0,0 +1,179 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The imbalance type and its associates, which handles keeps everything adding up properly with +//! unbalanced operations. + +use super::{ + balanced::Balanced, + fungibles::{AssetId, Balance}, + *, +}; +use crate::traits::misc::{SameOrOther, TryDrop}; +use sp_runtime::{traits::Zero, RuntimeDebug}; +use sp_std::marker::PhantomData; + +/// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or +/// debt (positive) imbalance. +pub trait HandleImbalanceDrop { + fn handle(asset: AssetId, amount: Balance); +} + +/// An imbalance in the system, representing a divergence of recorded token supply from the sum of +/// the balances of all accounts. This is `must_use` in order to ensure it gets handled (placing +/// into an account, settling from an account or altering the supply). +/// +/// Importantly, it has a special `Drop` impl, and cannot be created outside of this module. +#[must_use] +#[derive(RuntimeDebug, Eq, PartialEq)] +pub struct Imbalance< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> { + asset: A, + amount: B, + _phantom: PhantomData<(OnDrop, OppositeOnDrop)>, +} + +impl< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, + > Drop for Imbalance +{ + fn drop(&mut self) { + if !self.amount.is_zero() { + OnDrop::handle(self.asset, self.amount) + } + } +} + +impl< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, + > TryDrop for Imbalance +{ + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self> { + self.drop_zero() + } +} + +impl< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, + > Imbalance +{ + pub fn zero(asset: A) -> Self { + Self { asset, amount: Zero::zero(), _phantom: PhantomData } + } + + pub(crate) fn new(asset: A, amount: B) -> Self { + Self { asset, amount, _phantom: PhantomData } + } + + pub fn drop_zero(self) -> Result<(), Self> { + if self.amount.is_zero() { + sp_std::mem::forget(self); + Ok(()) + } else { + Err(self) + } + } + + pub fn split(self, amount: B) -> (Self, Self) { + let first = self.amount.min(amount); + let second = self.amount - first; + let asset = self.asset; + sp_std::mem::forget(self); + (Imbalance::new(asset, first), Imbalance::new(asset, second)) + } + pub fn merge(mut self, other: Self) -> Result { + if self.asset == other.asset { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + Ok(self) + } else { + Err((self, other)) + } + } + pub fn subsume(&mut self, other: Self) -> Result<(), Self> { + if self.asset == other.asset { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + Ok(()) + } else { + Err(other) + } + } + pub fn offset( + self, + other: Imbalance, + ) -> Result< + SameOrOther>, + (Self, Imbalance), + > { + if self.asset == other.asset { + let (a, b) = (self.amount, other.amount); + let asset = self.asset; + sp_std::mem::forget((self, other)); + + if a == b { + Ok(SameOrOther::None) + } else if a > b { + Ok(SameOrOther::Same(Imbalance::new(asset, a - b))) + } else { + Ok(SameOrOther::Other(Imbalance::::new(asset, b - a))) + } + } else { + Err((self, other)) + } + } + pub fn peek(&self) -> B { + self.amount + } + + pub fn asset(&self) -> A { + self.asset + } +} + +/// Imbalance implying that the total_issuance value is less than the sum of all account balances. +pub type DebtOf = Imbalance< + >::AssetId, + >::Balance, + // This will generally be implemented by increasing the total_issuance value. + >::OnDropDebt, + >::OnDropCredit, +>; + +/// Imbalance implying that the total_issuance value is greater than the sum of all account +/// balances. +pub type CreditOf = Imbalance< + >::AssetId, + >::Balance, + // This will generally be implemented by decreasing the total_issuance value. + >::OnDropCredit, + >::OnDropDebt, +>; diff --git a/frame/support/src/traits/tokens/imbalance.rs b/frame/support/src/traits/tokens/imbalance.rs new file mode 100644 index 0000000000000..0f7b38a65efc8 --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance.rs @@ -0,0 +1,179 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The imbalance trait type and its associates, which handles keeps everything adding up properly +//! with unbalanced operations. + +use crate::traits::misc::{SameOrOther, TryDrop}; +use sp_runtime::traits::Saturating; +use sp_std::ops::Div; + +mod on_unbalanced; +mod signed_imbalance; +mod split_two_ways; +pub use on_unbalanced::OnUnbalanced; +pub use signed_imbalance::SignedImbalance; +pub use split_two_ways::SplitTwoWays; + +/// A trait for a not-quite Linear Type that tracks an imbalance. +/// +/// Functions that alter account balances return an object of this trait to +/// express how much account balances have been altered in aggregate. If +/// dropped, the currency system will take some default steps to deal with +/// the imbalance (`balances` module simply reduces or increases its +/// total issuance). Your module should generally handle it in some way, +/// good practice is to do so in a configurable manner using an +/// `OnUnbalanced` type for each situation in which your module needs to +/// handle an imbalance. +/// +/// Imbalances can either be Positive (funds were added somewhere without +/// being subtracted elsewhere - e.g. a reward) or Negative (funds deducted +/// somewhere without an equal and opposite addition - e.g. a slash or +/// system fee payment). +/// +/// Since they are unsigned, the actual type is always Positive or Negative. +/// The trait makes no distinction except to define the `Opposite` type. +/// +/// New instances of zero value can be created (`zero`) and destroyed +/// (`drop_zero`). +/// +/// Existing instances can be `split` and merged either consuming `self` with +/// `merge` or mutating `self` with `subsume`. If the target is an `Option`, +/// then `maybe_merge` and `maybe_subsume` might work better. Instances can +/// also be `offset` with an `Opposite` that is less than or equal to in value. +/// +/// You can always retrieve the raw balance value using `peek`. +#[must_use] +pub trait Imbalance: Sized + TryDrop + Default { + /// The oppositely imbalanced type. They come in pairs. + type Opposite: Imbalance; + + /// The zero imbalance. Can be destroyed with `drop_zero`. + fn zero() -> Self; + + /// Drop an instance cleanly. Only works if its `self.value()` is zero. + fn drop_zero(self) -> Result<(), Self>; + + /// Consume `self` and return two independent instances; the first + /// is guaranteed to be at most `amount` and the second will be the remainder. + fn split(self, amount: Balance) -> (Self, Self); + + /// Consume `self` and return two independent instances; the amounts returned will be in + /// approximately the same ratio as `first`:`second`. + /// + /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should + /// fit into a `u32`. Overflow will safely saturate in both cases. + fn ration(self, first: u32, second: u32) -> (Self, Self) + where + Balance: From + Saturating + Div, + { + let total: u32 = first.saturating_add(second); + if total == 0 { + return (Self::zero(), Self::zero()) + } + let amount1 = self.peek().saturating_mul(first.into()) / total.into(); + self.split(amount1) + } + + /// Consume self and add its two components, defined by the first component's balance, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn split_merge(self, amount: Balance, others: (Self, Self)) -> (Self, Self) { + let (a, b) = self.split(amount); + (a.merge(others.0), b.merge(others.1)) + } + + /// Consume self and add its two components, defined by the ratio `first`:`second`, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) + where + Balance: From + Saturating + Div, + { + let (a, b) = self.ration(first, second); + (a.merge(others.0), b.merge(others.1)) + } + + /// Consume self and add its two components, defined by the first component's balance, + /// element-wise into two pre-existing Imbalance refs. + /// + /// A convenient replacement for `split` and `subsume`. + fn split_merge_into(self, amount: Balance, others: &mut (Self, Self)) { + let (a, b) = self.split(amount); + others.0.subsume(a); + others.1.subsume(b); + } + + /// Consume self and add its two components, defined by the ratio `first`:`second`, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) + where + Balance: From + Saturating + Div, + { + let (a, b) = self.ration(first, second); + others.0.subsume(a); + others.1.subsume(b); + } + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + fn merge(self, other: Self) -> Self; + + /// Consume self to mutate `other` so that it combines both. Just like `subsume`, only with + /// reversed arguments. + fn merge_into(self, other: &mut Self) { + other.subsume(self) + } + + /// Consume `self` and maybe an `other` to return a new instance that combines + /// both. + fn maybe_merge(self, other: Option) -> Self { + if let Some(o) = other { + self.merge(o) + } else { + self + } + } + + /// Consume an `other` to mutate `self` into a new instance that combines + /// both. + fn subsume(&mut self, other: Self); + + /// Maybe consume an `other` to mutate `self` into a new instance that combines + /// both. + fn maybe_subsume(&mut self, other: Option) { + if let Some(o) = other { + self.subsume(o) + } + } + + /// Consume self and along with an opposite counterpart to return + /// a combined result. + /// + /// Returns `Ok` along with a new instance of `Self` if this instance has a + /// greater value than the `other`. Otherwise returns `Err` with an instance of + /// the `Opposite`. In both cases the value represents the combination of `self` + /// and `other`. + fn offset(self, other: Self::Opposite) -> SameOrOther; + + /// The raw value of self. + fn peek(&self) -> Balance; +} diff --git a/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs b/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs new file mode 100644 index 0000000000000..bc7df0e2acf33 --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs @@ -0,0 +1,55 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Trait for handling imbalances. + +use crate::traits::misc::TryDrop; + +/// Handler for when some currency "account" decreased in balance for +/// some reason. +/// +/// The only reason at present for an increase would be for validator rewards, but +/// there may be other reasons in the future or for other chains. +/// +/// Reasons for decreases include: +/// +/// - Someone got slashed. +/// - Someone paid for a transaction to be included. +pub trait OnUnbalanced { + /// Handler for some imbalances. The different imbalances might have different origins or + /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all + /// of them. Infallible. + fn on_unbalanceds(amounts: impl Iterator) + where + Imbalance: crate::traits::Imbalance, + { + Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) + } + + /// Handler for some imbalance. Infallible. + fn on_unbalanced(amount: Imbalance) { + amount.try_drop().unwrap_or_else(Self::on_nonzero_unbalanced) + } + + /// Actually handle a non-zero imbalance. You probably want to implement this rather than + /// `on_unbalanced`. + fn on_nonzero_unbalanced(amount: Imbalance) { + drop(amount); + } +} + +impl OnUnbalanced for () {} diff --git a/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs new file mode 100644 index 0000000000000..3e76d069f50e7 --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs @@ -0,0 +1,71 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Convenience type for managing an imbalance whose sign is unknown. + +use super::super::imbalance::Imbalance; +use crate::traits::misc::SameOrOther; +use codec::FullCodec; +use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize}; +use sp_std::fmt::Debug; + +/// Either a positive or a negative imbalance. +pub enum SignedImbalance> { + /// A positive imbalance (funds have been created but none destroyed). + Positive(PositiveImbalance), + /// A negative imbalance (funds have been destroyed but none created). + Negative(PositiveImbalance::Opposite), +} + +impl< + P: Imbalance, + N: Imbalance, + B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, + > SignedImbalance +{ + /// Create a `Positive` instance of `Self` whose value is zero. + pub fn zero() -> Self { + SignedImbalance::Positive(P::zero()) + } + + /// Drop `Self` if and only if it is equal to zero. Return `Err` with `Self` if not. + pub fn drop_zero(self) -> Result<(), Self> { + match self { + SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), + SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), + } + } + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + pub fn merge(self, other: Self) -> Self { + match (self, other) { + (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => + SignedImbalance::Positive(one.merge(other)), + (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => + SignedImbalance::Negative(one.merge(other)), + (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => { + match one.offset(other) { + SameOrOther::Same(positive) => SignedImbalance::Positive(positive), + SameOrOther::Other(negative) => SignedImbalance::Negative(negative), + SameOrOther::None => SignedImbalance::Positive(P::zero()), + } + }, + (one, other) => other.merge(one), + } + } +} diff --git a/frame/support/src/traits/tokens/imbalance/split_two_ways.rs b/frame/support/src/traits/tokens/imbalance/split_two_ways.rs new file mode 100644 index 0000000000000..882b43c2e914c --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance/split_two_ways.rs @@ -0,0 +1,46 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Means for splitting an imbalance into two and hanlding them differently. + +use super::super::imbalance::{Imbalance, OnUnbalanced}; +use sp_core::u32_trait::Value as U32; +use sp_runtime::traits::Saturating; +use sp_std::{marker::PhantomData, ops::Div}; + +/// Split an unbalanced amount two ways between a common divisor. +pub struct SplitTwoWays( + PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>, +); + +impl< + Balance: From + Saturating + Div, + I: Imbalance, + Part1: U32, + Target1: OnUnbalanced, + Part2: U32, + Target2: OnUnbalanced, + > OnUnbalanced for SplitTwoWays +{ + fn on_nonzero_unbalanced(amount: I) { + let total: u32 = Part1::VALUE + Part2::VALUE; + let amount1 = amount.peek().saturating_mul(Part1::VALUE.into()) / total.into(); + let (imb1, imb2) = amount.split(amount1); + Target1::on_unbalanced(imb1); + Target2::on_unbalanced(imb2); + } +} diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs new file mode 100644 index 0000000000000..214c28708a196 --- /dev/null +++ b/frame/support/src/traits/tokens/misc.rs @@ -0,0 +1,181 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Miscellaneous types. + +use codec::{Decode, Encode, FullCodec}; +use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; +use sp_core::RuntimeDebug; +use sp_runtime::{ArithmeticError, DispatchError, TokenError}; +use sp_std::fmt::Debug; + +/// One of a number of consequences of withdrawing a fungible from an account. +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum WithdrawConsequence { + /// Withdraw could not happen since the amount to be withdrawn is less than the total funds in + /// the account. + NoFunds, + /// The withdraw would mean the account dying when it needs to exist (usually because it is a + /// provider and there are consumer references on it). + WouldDie, + /// The asset is unknown. Usually because an `AssetId` has been presented which doesn't exist + /// on the system. + UnknownAsset, + /// There has been an underflow in the system. This is indicative of a corrupt state and + /// likely unrecoverable. + Underflow, + /// There has been an overflow in the system. This is indicative of a corrupt state and + /// likely unrecoverable. + Overflow, + /// Not enough of the funds in the account are unavailable for withdrawal. + Frozen, + /// Account balance would reduce to zero, potentially destroying it. The parameter is the + /// amount of balance which is destroyed. + ReducedToZero(Balance), + /// Account continued in existence. + Success, +} + +impl WithdrawConsequence { + /// Convert the type into a `Result` with `DispatchError` as the error or the additional + /// `Balance` by which the account will be reduced. + pub fn into_result(self) -> Result { + use WithdrawConsequence::*; + match self { + NoFunds => Err(TokenError::NoFunds.into()), + WouldDie => Err(TokenError::WouldDie.into()), + UnknownAsset => Err(TokenError::UnknownAsset.into()), + Underflow => Err(ArithmeticError::Underflow.into()), + Overflow => Err(ArithmeticError::Overflow.into()), + Frozen => Err(TokenError::Frozen.into()), + ReducedToZero(result) => Ok(result), + Success => Ok(Zero::zero()), + } + } +} + +/// One of a number of consequences of withdrawing a fungible from an account. +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum DepositConsequence { + /// Deposit couldn't happen due to the amount being too low. This is usually because the + /// account doesn't yet exist and the deposit wouldn't bring it to at least the minimum needed + /// for existance. + BelowMinimum, + /// Deposit cannot happen since the account cannot be created (usually because it's a consumer + /// and there exists no provider reference). + CannotCreate, + /// The asset is unknown. Usually because an `AssetId` has been presented which doesn't exist + /// on the system. + UnknownAsset, + /// An overflow would occur. This is practically unexpected, but could happen in test systems + /// with extremely small balance types or balances that approach the max value of the balance + /// type. + Overflow, + /// Account continued in existence. + Success, +} + +impl DepositConsequence { + /// Convert the type into a `Result` with `TokenError` as the error. + pub fn into_result(self) -> Result<(), DispatchError> { + use DepositConsequence::*; + Err(match self { + BelowMinimum => TokenError::BelowMinimum.into(), + CannotCreate => TokenError::CannotCreate.into(), + UnknownAsset => TokenError::UnknownAsset.into(), + Overflow => ArithmeticError::Overflow.into(), + Success => return Ok(()), + }) + } +} + +/// Simple boolean for whether an account needs to be kept in existence. +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum ExistenceRequirement { + /// Operation must not result in the account going out of existence. + /// + /// Note this implies that if the account never existed in the first place, then the operation + /// may legitimately leave the account unchanged and still non-existent. + KeepAlive, + /// Operation may result in account going out of existence. + AllowDeath, +} + +/// Status of funds. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] +pub enum BalanceStatus { + /// Funds are free, as corresponding to `free` item in Balances. + Free, + /// Funds are reserved, as corresponding to `reserved` item in Balances. + Reserved, +} + +bitflags::bitflags! { + /// Reasons for moving funds out of an account. + #[derive(Encode, Decode)] + pub struct WithdrawReasons: u8 { + /// In order to pay for (system) transaction costs. + const TRANSACTION_PAYMENT = 0b00000001; + /// In order to transfer ownership. + const TRANSFER = 0b00000010; + /// In order to reserve some funds for a later return or repatriation. + const RESERVE = 0b00000100; + /// In order to pay some other (higher-level) fees. + const FEE = 0b00001000; + /// In order to tip a validator for transaction inclusion. + const TIP = 0b00010000; + } +} + +impl WithdrawReasons { + /// Choose all variants except for `one`. + /// + /// ```rust + /// # use frame_support::traits::WithdrawReasons; + /// # fn main() { + /// assert_eq!( + /// WithdrawReasons::FEE | WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE | WithdrawReasons::TIP, + /// WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT), + /// ); + /// # } + /// ``` + pub fn except(one: WithdrawReasons) -> WithdrawReasons { + let mut flags = Self::all(); + flags.toggle(one); + flags + } +} + +/// Simple amalgamation trait to collect together properties for an AssetId under one roof. +pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug {} +impl AssetId for T {} + +/// Simple amalgamation trait to collect together properties for a Balance under one roof. +pub trait Balance: + AtLeast32BitUnsigned + FullCodec + Copy + Default + Debug + scale_info::TypeInfo +{ +} +impl Balance + for T +{ +} + +/// Converts a balance value into an asset balance. +pub trait BalanceConversion { + type Error; + fn to_asset_balance(balance: InBalance, asset_id: AssetId) -> Result; +} diff --git a/frame/support/src/traits/tokens/nonfungible.rs b/frame/support/src/traits/tokens/nonfungible.rs new file mode 100644 index 0000000000000..821884f6e3905 --- /dev/null +++ b/frame/support/src/traits/tokens/nonfungible.rs @@ -0,0 +1,193 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with a single non-fungible asset class. +//! +//! This assumes a single level namespace identified by `Inspect::InstanceId`, and could +//! reasonably be implemented by pallets which wants to expose a single collection of NFT-like +//! objects. +//! +//! For an NFT API which has dual-level namespacing, the traits in `nonfungibles` are better to +//! use. + +use super::nonfungibles; +use crate::{dispatch::DispatchResult, traits::Get}; +use codec::{Decode, Encode}; +use sp_runtime::TokenError; +use sp_std::prelude::*; + +/// Trait for providing an interface to a read-only NFT-like set of asset instances. +pub trait Inspect { + /// Type for identifying an asset instance. + type InstanceId; + + /// Returns the owner of asset `instance`, or `None` if the asset doesn't exist or has no + /// owner. + fn owner(instance: &Self::InstanceId) -> Option; + + /// Returns the attribute value of `instance` corresponding to `key`. + /// + /// By default this is `None`; no attributes are defined. + fn attribute(_instance: &Self::InstanceId, _key: &[u8]) -> Option> { + None + } + + /// Returns the strongly-typed attribute value of `instance` corresponding to `key`. + /// + /// By default this just attempts to use `attribute`. + fn typed_attribute(instance: &Self::InstanceId, key: &K) -> Option { + key.using_encoded(|d| Self::attribute(instance, d)) + .and_then(|v| V::decode(&mut &v[..]).ok()) + } + + /// Returns `true` if the asset `instance` may be transferred. + /// + /// Default implementation is that all assets are transferable. + fn can_transfer(_instance: &Self::InstanceId) -> bool { + true + } +} + +/// Interface for enumerating assets in existence or owned by a given account over a collection +/// of NFTs. +pub trait InspectEnumerable: Inspect { + /// Returns an iterator of the instances of an asset `class` in existence. + fn instances() -> Box>; + + /// Returns an iterator of the asset instances of all classes owned by `who`. + fn owned(who: &AccountId) -> Box>; +} + +/// Trait for providing an interface for NFT-like assets which may be minted, burned and/or have +/// attributes set on them. +pub trait Mutate: Inspect { + /// Mint some asset `instance` to be owned by `who`. + /// + /// By default, this is not a supported operation. + fn mint_into(_instance: &Self::InstanceId, _who: &AccountId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Burn some asset `instance`. + /// + /// By default, this is not a supported operation. + fn burn_from(_instance: &Self::InstanceId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Set attribute `value` of asset `instance`'s `key`. + /// + /// By default, this is not a supported operation. + fn set_attribute(_instance: &Self::InstanceId, _key: &[u8], _value: &[u8]) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Attempt to set the strongly-typed attribute `value` of `instance`'s `key`. + /// + /// By default this just attempts to use `set_attribute`. + fn set_typed_attribute( + instance: &Self::InstanceId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| value.using_encoded(|v| Self::set_attribute(instance, k, v))) + } +} + +/// Trait for providing a non-fungible set of assets which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer asset `instance` into `destination` account. + fn transfer(instance: &Self::InstanceId, destination: &AccountId) -> DispatchResult; +} + +/// Convert a `fungibles` trait implementation into a `fungible` trait implementation by identifying +/// a single item. +pub struct ItemOf< + F: nonfungibles::Inspect, + A: Get<>::ClassId>, + AccountId, +>(sp_std::marker::PhantomData<(F, A, AccountId)>); + +impl< + F: nonfungibles::Inspect, + A: Get<>::ClassId>, + AccountId, + > Inspect for ItemOf +{ + type InstanceId = >::InstanceId; + fn owner(instance: &Self::InstanceId) -> Option { + >::owner(&A::get(), instance) + } + fn attribute(instance: &Self::InstanceId, key: &[u8]) -> Option> { + >::attribute(&A::get(), instance, key) + } + fn typed_attribute(instance: &Self::InstanceId, key: &K) -> Option { + >::typed_attribute(&A::get(), instance, key) + } + fn can_transfer(instance: &Self::InstanceId) -> bool { + >::can_transfer(&A::get(), instance) + } +} + +impl< + F: nonfungibles::InspectEnumerable, + A: Get<>::ClassId>, + AccountId, + > InspectEnumerable for ItemOf +{ + fn instances() -> Box> { + >::instances(&A::get()) + } + fn owned(who: &AccountId) -> Box> { + >::owned_in_class(&A::get(), who) + } +} + +impl< + F: nonfungibles::Mutate, + A: Get<>::ClassId>, + AccountId, + > Mutate for ItemOf +{ + fn mint_into(instance: &Self::InstanceId, who: &AccountId) -> DispatchResult { + >::mint_into(&A::get(), instance, who) + } + fn burn_from(instance: &Self::InstanceId) -> DispatchResult { + >::burn_from(&A::get(), instance) + } + fn set_attribute(instance: &Self::InstanceId, key: &[u8], value: &[u8]) -> DispatchResult { + >::set_attribute(&A::get(), instance, key, value) + } + fn set_typed_attribute( + instance: &Self::InstanceId, + key: &K, + value: &V, + ) -> DispatchResult { + >::set_typed_attribute(&A::get(), instance, key, value) + } +} + +impl< + F: nonfungibles::Transfer, + A: Get<>::ClassId>, + AccountId, + > Transfer for ItemOf +{ + fn transfer(instance: &Self::InstanceId, destination: &AccountId) -> DispatchResult { + >::transfer(&A::get(), instance, destination) + } +} diff --git a/frame/support/src/traits/tokens/nonfungibles.rs b/frame/support/src/traits/tokens/nonfungibles.rs new file mode 100644 index 0000000000000..452ee2212d62a --- /dev/null +++ b/frame/support/src/traits/tokens/nonfungibles.rs @@ -0,0 +1,198 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with multiple collections of non-fungible assets. +//! +//! This assumes a dual-level namespace identified by `Inspect::InstanceId`, and could +//! reasonably be implemented by pallets which want to expose multiple independent collections of +//! NFT-like objects. +//! +//! For an NFT API which has single-level namespacing, the traits in `nonfungible` are better to +//! use. +//! +//! Implementations of these traits may be converted to implementations of corresponding +//! `nonfungible` traits by using the `nonfungible::ItemOf` type adapter. + +use crate::dispatch::DispatchResult; +use codec::{Decode, Encode}; +use sp_runtime::TokenError; +use sp_std::prelude::*; + +/// Trait for providing an interface to many read-only NFT-like sets of asset instances. +pub trait Inspect { + /// Type for identifying an asset instance. + type InstanceId; + + /// Type for identifying an asset class (an identifier for an independent collection of asset + /// instances). + type ClassId; + + /// Returns the owner of asset `instance` of `class`, or `None` if the asset doesn't exist (or + /// somehow has no owner). + fn owner(class: &Self::ClassId, instance: &Self::InstanceId) -> Option; + + /// Returns the owner of the asset `class`, if there is one. For many NFTs this may not make + /// any sense, so users of this API should not be surprised to find an asset class results in + /// `None` here. + fn class_owner(_class: &Self::ClassId) -> Option { + None + } + + /// Returns the attribute value of `instance` of `class` corresponding to `key`. + /// + /// By default this is `None`; no attributes are defined. + fn attribute( + _class: &Self::ClassId, + _instance: &Self::InstanceId, + _key: &[u8], + ) -> Option> { + None + } + + /// Returns the strongly-typed attribute value of `instance` of `class` corresponding to `key`. + /// + /// By default this just attempts to use `attribute`. + fn typed_attribute( + class: &Self::ClassId, + instance: &Self::InstanceId, + key: &K, + ) -> Option { + key.using_encoded(|d| Self::attribute(class, instance, d)) + .and_then(|v| V::decode(&mut &v[..]).ok()) + } + + /// Returns the attribute value of `class` corresponding to `key`. + /// + /// By default this is `None`; no attributes are defined. + fn class_attribute(_class: &Self::ClassId, _key: &[u8]) -> Option> { + None + } + + /// Returns the strongly-typed attribute value of `class` corresponding to `key`. + /// + /// By default this just attempts to use `class_attribute`. + fn typed_class_attribute(class: &Self::ClassId, key: &K) -> Option { + key.using_encoded(|d| Self::class_attribute(class, d)) + .and_then(|v| V::decode(&mut &v[..]).ok()) + } + + /// Returns `true` if the asset `instance` of `class` may be transferred. + /// + /// Default implementation is that all assets are transferable. + fn can_transfer(_class: &Self::ClassId, _instance: &Self::InstanceId) -> bool { + true + } +} + +/// Interface for enumerating assets in existence or owned by a given account over many collections +/// of NFTs. +pub trait InspectEnumerable: Inspect { + /// Returns an iterator of the asset classes in existence. + fn classes() -> Box>; + + /// Returns an iterator of the instances of an asset `class` in existence. + fn instances(class: &Self::ClassId) -> Box>; + + /// Returns an iterator of the asset instances of all classes owned by `who`. + fn owned(who: &AccountId) -> Box>; + + /// Returns an iterator of the asset instances of `class` owned by `who`. + fn owned_in_class( + class: &Self::ClassId, + who: &AccountId, + ) -> Box>; +} + +/// Trait for providing the ability to create classes of nonfungible assets. +pub trait Create: Inspect { + /// Create a `class` of nonfungible assets to be owned by `who` and managed by `admin`. + fn create_class(class: &Self::ClassId, who: &AccountId, admin: &AccountId) -> DispatchResult; +} + +/// Trait for providing an interface for multiple classes of NFT-like assets which may be minted, +/// burned and/or have attributes set on them. +pub trait Mutate: Inspect { + /// Mint some asset `instance` of `class` to be owned by `who`. + /// + /// By default, this is not a supported operation. + fn mint_into( + _class: &Self::ClassId, + _instance: &Self::InstanceId, + _who: &AccountId, + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Burn some asset `instance` of `class`. + /// + /// By default, this is not a supported operation. + fn burn_from(_class: &Self::ClassId, _instance: &Self::InstanceId) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Set attribute `value` of asset `instance` of `class`'s `key`. + /// + /// By default, this is not a supported operation. + fn set_attribute( + _class: &Self::ClassId, + _instance: &Self::InstanceId, + _key: &[u8], + _value: &[u8], + ) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Attempt to set the strongly-typed attribute `value` of `instance` of `class`'s `key`. + /// + /// By default this just attempts to use `set_attribute`. + fn set_typed_attribute( + class: &Self::ClassId, + instance: &Self::InstanceId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| value.using_encoded(|v| Self::set_attribute(class, instance, k, v))) + } + + /// Set attribute `value` of asset `class`'s `key`. + /// + /// By default, this is not a supported operation. + fn set_class_attribute(_class: &Self::ClassId, _key: &[u8], _value: &[u8]) -> DispatchResult { + Err(TokenError::Unsupported.into()) + } + + /// Attempt to set the strongly-typed attribute `value` of `class`'s `key`. + /// + /// By default this just attempts to use `set_attribute`. + fn set_typed_class_attribute( + class: &Self::ClassId, + key: &K, + value: &V, + ) -> DispatchResult { + key.using_encoded(|k| value.using_encoded(|v| Self::set_class_attribute(class, k, v))) + } +} + +/// Trait for providing a non-fungible sets of assets which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer asset `instance` of `class` into `destination` account. + fn transfer( + class: &Self::ClassId, + instance: &Self::InstanceId, + destination: &AccountId, + ) -> DispatchResult; +} diff --git a/frame/support/src/traits/validation.rs b/frame/support/src/traits/validation.rs new file mode 100644 index 0000000000000..f4107ef6e2b02 --- /dev/null +++ b/frame/support/src/traits/validation.rs @@ -0,0 +1,260 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with validation and validators. + +use crate::{dispatch::Parameter, weights::Weight}; +use codec::{Codec, Decode}; +use sp_runtime::{ + traits::{Convert, Zero}, + BoundToRuntimeAppPublic, ConsensusEngineId, Permill, RuntimeAppPublic, +}; +use sp_staking::SessionIndex; +use sp_std::prelude::*; + +/// A trait for online node inspection in a session. +/// +/// Something that can give information about the current validator set. +pub trait ValidatorSet { + /// Type for representing validator id in a session. + type ValidatorId: Parameter; + /// A type for converting `AccountId` to `ValidatorId`. + type ValidatorIdOf: Convert>; + + /// Returns current session index. + fn session_index() -> SessionIndex; + + /// Returns the active set of validators. + fn validators() -> Vec; +} + +/// [`ValidatorSet`] combined with an identification. +pub trait ValidatorSetWithIdentification: ValidatorSet { + /// Full identification of `ValidatorId`. + type Identification: Parameter; + /// A type for converting `ValidatorId` to `Identification`. + type IdentificationOf: Convert>; +} + +/// A trait for finding the author of a block header based on the `PreRuntime` digests contained +/// within it. +pub trait FindAuthor { + /// Find the author of a block based on the pre-runtime digests. + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator; +} + +impl FindAuthor for () { + fn find_author<'a, I>(_: I) -> Option + where + I: 'a + IntoIterator, + { + None + } +} + +/// A trait for verifying the seal of a header and returning the author. +pub trait VerifySeal { + /// Verify a header and return the author, if any. + fn verify_seal(header: &Header) -> Result, &'static str>; +} + +/// A session handler for specific key type. +pub trait OneSessionHandler: BoundToRuntimeAppPublic { + /// The key type expected. + type Key: Decode + Default + RuntimeAppPublic; + + /// The given validator set will be used for the genesis session. + /// It is guaranteed that the given validator set will also be used + /// for the second session, therefore the first call to `on_new_session` + /// should provide the same validator set. + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + ValidatorId: 'a; + + /// Session set has changed; act appropriately. Note that this can be called + /// before initialization of your module. + /// + /// `changed` is true when at least one of the session keys + /// or the underlying economic identities/distribution behind one the + /// session keys has changed, false otherwise. + /// + /// The `validators` are the validators of the incoming session, and `queued_validators` + /// will follow. + fn on_new_session<'a, I: 'a>(changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + ValidatorId: 'a; + + /// A notification for end of the session. + /// + /// Note it is triggered before any `SessionManager::end_session` handlers, + /// so we can still affect the validator set. + fn on_before_session_ending() {} + + /// A validator got disabled. Act accordingly until a new session begins. + fn on_disabled(_validator_index: usize); +} + +/// Something that can estimate at which block the next session rotation will happen (i.e. a new +/// session starts). +/// +/// The accuracy of the estimates is dependent on the specific implementation, but in order to get +/// the best estimate possible these methods should be called throughout the duration of the session +/// (rather than calling once and storing the result). +/// +/// This should be the same logical unit that dictates `ShouldEndSession` to the session module. No +/// assumptions are made about the scheduling of the sessions. +pub trait EstimateNextSessionRotation { + /// Return the average length of a session. + /// + /// This may or may not be accurate. + fn average_session_length() -> BlockNumber; + + /// Return an estimate of the current session progress. + /// + /// None should be returned if the estimation fails to come to an answer. + fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight); + + /// Return the block number at which the next session rotation is estimated to happen. + /// + /// None should be returned if the estimation fails to come to an answer. + fn estimate_next_session_rotation(now: BlockNumber) -> (Option, Weight); +} + +impl EstimateNextSessionRotation for () { + fn average_session_length() -> BlockNumber { + Zero::zero() + } + + fn estimate_current_session_progress(_: BlockNumber) -> (Option, Weight) { + (None, Zero::zero()) + } + + fn estimate_next_session_rotation(_: BlockNumber) -> (Option, Weight) { + (None, Zero::zero()) + } +} + +/// Something that can estimate at which block scheduling of the next session will happen (i.e when +/// we will try to fetch new validators). +/// +/// This only refers to the point when we fetch the next session details and not when we enact them +/// (for enactment there's `EstimateNextSessionRotation`). With `pallet-session` this should be +/// triggered whenever `SessionManager::new_session` is called. +/// +/// For example, if we are using a staking module this would be the block when the session module +/// would ask staking what the next validator set will be, as such this must always be implemented +/// by the session module. +pub trait EstimateNextNewSession { + /// Return the average length of a session. + /// + /// This may or may not be accurate. + fn average_session_length() -> BlockNumber; + + /// Return the block number at which the next new session is estimated to happen. + /// + /// None should be returned if the estimation fails to come to an answer. + fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight); +} + +impl EstimateNextNewSession for () { + fn average_session_length() -> BlockNumber { + Zero::zero() + } + + fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight) { + (None, Zero::zero()) + } +} + +/// Something which can compute and check proofs of +/// a historical key owner and return full identification data of that +/// key owner. +pub trait KeyOwnerProofSystem { + /// The proof of membership itself. + type Proof: Codec; + /// The full identification of a key owner and the stash account. + type IdentificationTuple: Codec; + + /// Prove membership of a key owner in the current block-state. + /// + /// This should typically only be called off-chain, since it may be + /// computationally heavy. + /// + /// Returns `Some` iff the key owner referred to by the given `key` is a + /// member of the current set. + fn prove(key: Key) -> Option; + + /// Check a proof of membership on-chain. Return `Some` iff the proof is + /// valid and recent enough to check. + fn check_proof(key: Key, proof: Self::Proof) -> Option; +} + +impl KeyOwnerProofSystem for () { + // The proof and identification tuples is any bottom type to guarantee that the methods of this + // implementation can never be called or return anything other than `None`. + type Proof = crate::Void; + type IdentificationTuple = crate::Void; + + fn prove(_key: Key) -> Option { + None + } + + fn check_proof(_key: Key, _proof: Self::Proof) -> Option { + None + } +} + +/// Trait to be used by block producing consensus engine modules to determine +/// how late the current block is (e.g. in a slot-based proposal mechanism how +/// many slots were skipped since the previous block). +pub trait Lateness { + /// Returns a generic measure of how late the current block is compared to + /// its parent. + fn lateness(&self) -> N; +} + +impl Lateness for () { + fn lateness(&self) -> N { + Zero::zero() + } +} + +/// Implementors of this trait provide information about whether or not some validator has +/// been registered with them. The [Session module](../../pallet_session/index.html) is an +/// implementor. +pub trait ValidatorRegistration { + /// Returns true if the provided validator ID has been registered with the implementing runtime + /// module + fn is_registered(id: &ValidatorId) -> bool; +} + +/// Trait used to check whether a given validator is currently disabled and should not be +/// participating in consensus (e.g. because they equivocated). +pub trait DisabledValidators { + /// Returns true if the given validator is disabled. + fn is_disabled(index: u32) -> bool; +} + +impl DisabledValidators for () { + fn is_disabled(_index: u32) -> bool { + false + } +} diff --git a/frame/support/src/traits/voting.rs b/frame/support/src/traits/voting.rs new file mode 100644 index 0000000000000..62c6217ad59bc --- /dev/null +++ b/frame/support/src/traits/voting.rs @@ -0,0 +1,89 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated data structures concerned with voting, and moving between tokens and +//! votes. + +use sp_arithmetic::traits::{SaturatedConversion, UniqueSaturatedFrom, UniqueSaturatedInto}; + +/// A trait similar to `Convert` to convert values from `B` an abstract balance type +/// into u64 and back from u128. (This conversion is used in election and other places where complex +/// calculation over balance type is needed) +/// +/// Total issuance of the currency is passed in, but an implementation of this trait may or may not +/// use it. +/// +/// # WARNING +/// +/// the total issuance being passed in implies that the implementation must be aware of the fact +/// that its values can affect the outcome. This implies that if the vote value is dependent on the +/// total issuance, it should never ber written to storage for later re-use. +pub trait CurrencyToVote { + /// Convert balance to u64. + fn to_vote(value: B, issuance: B) -> u64; + + /// Convert u128 to balance. + fn to_currency(value: u128, issuance: B) -> B; +} + +/// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. +/// +/// The factor is the `(total_issuance / u64::MAX).max(1)`, represented as u64. Let's look at the +/// important cases: +/// +/// If the chain's total issuance is less than u64::MAX, this will always be 1, which means that +/// the factor will not have any effect. In this case, any account's balance is also less. Thus, +/// both of the conversions are basically an `as`; Any balance can fit in u64. +/// +/// If the chain's total issuance is more than 2*u64::MAX, then a factor might be multiplied and +/// divided upon conversion. +pub struct U128CurrencyToVote; + +impl U128CurrencyToVote { + fn factor(issuance: u128) -> u128 { + (issuance / u64::MAX as u128).max(1) + } +} + +impl CurrencyToVote for U128CurrencyToVote { + fn to_vote(value: u128, issuance: u128) -> u64 { + (value / Self::factor(issuance)).saturated_into() + } + + fn to_currency(value: u128, issuance: u128) -> u128 { + value.saturating_mul(Self::factor(issuance)) + } +} + +/// A naive implementation of `CurrencyConvert` that simply saturates all conversions. +/// +/// # Warning +/// +/// This is designed to be used mostly for testing. Use with care, and think about the consequences. +pub struct SaturatingCurrencyToVote; + +impl + UniqueSaturatedFrom> CurrencyToVote + for SaturatingCurrencyToVote +{ + fn to_vote(value: B, _: B) -> u64 { + value.unique_saturated_into() + } + + fn to_currency(value: u128, _: B) -> B { + B::unique_saturated_from(value) + } +} diff --git a/frame/support/src/unsigned.rs b/frame/support/src/unsigned.rs deleted file mode 100644 index 16c434fe638bc..0000000000000 --- a/frame/support/src/unsigned.rs +++ /dev/null @@ -1,172 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[doc(hidden)] -pub use crate::sp_runtime::traits::ValidateUnsigned; -#[doc(hidden)] -pub use crate::sp_runtime::transaction_validity::{ - TransactionValidity, UnknownTransaction, TransactionValidityError, TransactionSource, -}; - - -/// Implement `ValidateUnsigned` for `Runtime`. -/// All given modules need to implement `ValidateUnsigned`. -/// -/// # Example -/// -/// ``` -/// # mod timestamp { -/// # pub struct Module; -/// # -/// # impl frame_support::unsigned::ValidateUnsigned for Module { -/// # type Call = Call; -/// # -/// # fn validate_unsigned(_source: frame_support::unsigned::TransactionSource, _call: &Self::Call) -/// -> frame_support::unsigned::TransactionValidity { -/// # unimplemented!(); -/// # } -/// # } -/// # -/// # pub enum Call { -/// # } -/// # } -/// # -/// # pub type Timestamp = timestamp::Module; -/// # -/// # -/// # pub enum Call { -/// # Timestamp(timestamp::Call), -/// # } -/// # #[allow(unused)] -/// pub struct Runtime; -/// -/// frame_support::impl_outer_validate_unsigned! { -/// impl ValidateUnsigned for Runtime { -/// Timestamp -/// } -/// } -/// ``` -#[macro_export] -macro_rules! impl_outer_validate_unsigned { - ( - impl ValidateUnsigned for $runtime:ident { - $( $module:ident )* - } - ) => { - impl $crate::unsigned::ValidateUnsigned for $runtime { - type Call = Call; - - fn pre_dispatch(call: &Self::Call) -> Result<(), $crate::unsigned::TransactionValidityError> { - #[allow(unreachable_patterns)] - match call { - $( Call::$module(inner_call) => $module::pre_dispatch(inner_call), )* - // pre-dispatch should not stop inherent extrinsics, validation should prevent - // including arbitrary (non-inherent) extrinsics to blocks. - _ => Ok(()), - } - } - - fn validate_unsigned( - #[allow(unused_variables)] - source: $crate::unsigned::TransactionSource, - call: &Self::Call, - ) -> $crate::unsigned::TransactionValidity { - #[allow(unreachable_patterns)] - match call { - $( Call::$module(inner_call) => $module::validate_unsigned(source, inner_call), )* - _ => $crate::unsigned::UnknownTransaction::NoUnsignedValidator.into(), - } - } - } - }; -} - -#[cfg(test)] -mod test_empty_call { - pub enum Call {} - - #[allow(unused)] - pub struct Runtime; - - impl_outer_validate_unsigned! { - impl ValidateUnsigned for Runtime { - } - } -} - -#[cfg(test)] -mod test_partial_and_full_call { - pub mod timestamp { - pub struct Module; - - impl super::super::ValidateUnsigned for Module { - type Call = Call; - - fn validate_unsigned( - _source: super::super::TransactionSource, - _call: &Self::Call - ) -> super::super::TransactionValidity { - unimplemented!(); - } - } - - pub enum Call { - Foo, - } - } - - mod test_full_unsigned { - pub type Timestamp = super::timestamp::Module; - - pub enum Call { - Timestamp(super::timestamp::Call), - } - - pub struct Runtime; - - impl_outer_validate_unsigned! { - impl ValidateUnsigned for Runtime { - Timestamp - } - } - - #[test] - fn used() { - let _ = Call::Timestamp(super::timestamp::Call::Foo); - let _ = Runtime; - } - } - - mod test_not_full_unsigned { - pub enum Call { - Timestamp(super::timestamp::Call), - } - - pub struct Runtime; - - impl_outer_validate_unsigned! { - impl ValidateUnsigned for Runtime { - } - } - - #[test] - fn used() { - let _ = Call::Timestamp(super::timestamp::Call::Foo); - let _ = Runtime; - } - } -} diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 1d19eeef70d79..115470a9bf034 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,9 +22,9 @@ //! //! - [`WeighData`]: the weight amount. //! - [`ClassifyDispatch`]: class of the dispatch. -//! - [`PaysFee`]: weather this weight should be translated to fee and deducted upon dispatch. +//! - [`PaysFee`]: whether this weight should be translated to fee and deducted upon dispatch. //! -//! Substrate then bundles then output information of the two traits into [`DispatchInfo`] struct +//! Substrate then bundles the output information of the three traits into [`DispatchInfo`] struct //! and provides it by implementing the [`GetDispatchInfo`] for all `Call` both inner and outer call //! types. //! @@ -39,9 +39,9 @@ //! `Yes`**. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 1000] //! fn dispatching(origin) { unimplemented!() } //! } @@ -52,10 +52,10 @@ //! 2.1 Define weight and class, **in which case `PaysFee` would be `Yes`**. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::DispatchClass; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = (1000, DispatchClass::Operational)] //! fn dispatching(origin) { unimplemented!() } //! } @@ -66,10 +66,10 @@ //! 2.2 Define weight and `PaysFee`, **in which case `ClassifyDispatch` would be `Normal`**. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::Pays; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = (1000, Pays::No)] //! fn dispatching(origin) { unimplemented!() } //! } @@ -80,10 +80,10 @@ //! 3. Define all 3 parameters. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::{DispatchClass, Pays}; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = (1000, DispatchClass::Operational, Pays::No)] //! fn dispatching(origin) { unimplemented!() } //! } @@ -91,19 +91,20 @@ //! # fn main() {} //! ``` //! -//! ### 2. Define weights as a function of input arguments using `FunctionOf` tuple struct. This struct works -//! in a similar manner as above. 3 items must be provided and each can be either a fixed value or a -//! function/closure with the same parameters list as the dispatchable function itself, wrapper in a -//! tuple. +//! ### 2. Define weights as a function of input arguments using `FunctionOf` tuple struct. +//! +//! This struct works in a similar manner as above. 3 items must be provided and each can be either +//! a fixed value or a function/closure with the same parameters list as the dispatchable function +//! itself, wrapper in a tuple. //! //! Using this only makes sense if you want to use a function for at least one of the elements. If //! all 3 are static values, providing a raw tuple is easier. //! //! ``` -//! # use frame_system::Trait; +//! # use frame_system::Config; //! # use frame_support::weights::{DispatchClass, FunctionOf, Pays}; //! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = FunctionOf( //! // weight, function. //! |args: (&u32, &u64)| *args.0 as u64 + args.1, @@ -126,18 +127,21 @@ //! - Ubuntu 19.10 (GNU/Linux 5.3.0-18-generic x86_64) //! - rustc 1.42.0 (b8cedc004 2020-03-09) +use crate::dispatch::{DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; +use serde::{Deserialize, Serialize}; +use smallvec::{smallvec, SmallVec}; +use sp_arithmetic::{ + traits::{BaseArithmetic, Saturating, Unsigned}, + Perbill, +}; use sp_runtime::{ - RuntimeDebug, - traits::SignedExtension, generic::{CheckedExtrinsic, UncheckedExtrinsic}, + traits::{SaturatedConversion, SignedExtension}, + RuntimeDebug, }; -use crate::dispatch::{DispatchErrorWithPostInfo, DispatchResultWithPostInfo, DispatchError}; -use sp_runtime::traits::SaturatedConversion; -use sp_arithmetic::{Perbill, traits::{BaseArithmetic, Saturating, Unsigned}}; -use smallvec::{smallvec, SmallVec}; /// Re-export priority as type pub use sp_runtime::transaction_validity::TransactionPriority; @@ -154,7 +158,7 @@ pub mod constants { pub const WEIGHT_PER_SECOND: Weight = 1_000_000_000_000; pub const WEIGHT_PER_MILLIS: Weight = WEIGHT_PER_SECOND / 1000; // 1_000_000_000 pub const WEIGHT_PER_MICROS: Weight = WEIGHT_PER_MILLIS / 1000; // 1_000_000 - pub const WEIGHT_PER_NANOS: Weight = WEIGHT_PER_MICROS / 1000; // 1_000 + pub const WEIGHT_PER_NANOS: Weight = WEIGHT_PER_MICROS / 1000; // 1_000 parameter_types! { /// Importing a block with 0 txs takes ~5 ms @@ -198,7 +202,7 @@ pub trait PaysFee { } /// Explicit enum to denote if a transaction pays fee or not. -#[derive(Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode)] +#[derive(Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode, TypeInfo)] pub enum Pays { /// Transactor will pay related fees. Yes, @@ -213,9 +217,12 @@ impl Default for Pays { } /// A generalized group of dispatch types. +/// +/// NOTE whenever upgrading the enum make sure to also update +/// [DispatchClass::all] and [DispatchClass::non_mandatory] helper functions. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum DispatchClass { /// A normal dispatch. Normal, @@ -223,8 +230,9 @@ pub enum DispatchClass { Operational, /// A mandatory dispatch. These kinds of dispatch are always included regardless of their /// weight, therefore it is critical that they are separately validated to ensure that a - /// malicious validator cannot craft a valid but impossibly heavy block. Usually this just means - /// ensuring that the extrinsic can only be included once and that it is always very light. + /// malicious validator cannot craft a valid but impossibly heavy block. Usually this just + /// means ensuring that the extrinsic can only be included once and that it is always very + /// light. /// /// Do *NOT* use it for extrinsics that can be heavy. /// @@ -242,9 +250,46 @@ impl Default for DispatchClass { } } +impl DispatchClass { + /// Returns an array containing all dispatch classes. + pub fn all() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational, DispatchClass::Mandatory] + } + + /// Returns an array of all dispatch classes except `Mandatory`. + pub fn non_mandatory() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational] + } +} + +/// A trait that represents one or many values of given type. +/// +/// Useful to accept as parameter type to let the caller pass either a single value directly +/// or an iterator. +pub trait OneOrMany { + /// The iterator type. + type Iter: Iterator; + /// Convert this item into an iterator. + fn into_iter(self) -> Self::Iter; +} + +impl OneOrMany for DispatchClass { + type Iter = sp_std::iter::Once; + fn into_iter(self) -> Self::Iter { + sp_std::iter::once(self) + } +} + +impl<'a> OneOrMany for &'a [DispatchClass] { + type Iter = sp_std::iter::Cloned>; + fn into_iter(self) -> Self::Iter { + self.iter().cloned() + } +} + /// Primitives related to priority management of Frame. pub mod priority { - /// The starting point of all Operational transactions. 3/4 of u64::max_value(). + /// The starting point of all Operational transactions. 3/4 of u64::MAX. pub const LIMIT: u64 = 13_835_058_055_282_163_711_u64; /// Wrapper for priority of different dispatch classes. @@ -267,7 +312,7 @@ pub mod priority { } /// A bundle of static information collected from the `#[weight = $x]` attributes. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct DispatchInfo { /// Weight of this transaction. pub weight: Weight, @@ -286,9 +331,15 @@ pub trait GetDispatchInfo { fn get_dispatch_info(&self) -> DispatchInfo; } +impl GetDispatchInfo for () { + fn get_dispatch_info(&self) -> DispatchInfo { + DispatchInfo::default() + } +} + /// Weight information that is only available post dispatch. /// NOTE: This can only be used to reduce the weight or fee, not increase it. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct PostDispatchInfo { /// Actual weight consumed by a call or `None` which stands for the worst case static weight. pub actual_weight: Option, @@ -331,43 +382,32 @@ pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &Dispatc match result { Ok(post_info) => &post_info, Err(err) => &err.post_info, - }.calc_actual_weight(info) + } + .calc_actual_weight(info) } impl From<(Option, Pays)> for PostDispatchInfo { fn from(post_weight_info: (Option, Pays)) -> Self { let (actual_weight, pays_fee) = post_weight_info; - Self { - actual_weight, - pays_fee, - } + Self { actual_weight, pays_fee } } } impl From for PostDispatchInfo { fn from(pays_fee: Pays) -> Self { - Self { - actual_weight: None, - pays_fee, - } + Self { actual_weight: None, pays_fee } } } impl From> for PostDispatchInfo { fn from(actual_weight: Option) -> Self { - Self { - actual_weight, - pays_fee: Default::default(), - } + Self { actual_weight, pays_fee: Default::default() } } } impl From<()> for PostDispatchInfo { fn from(_: ()) -> Self { - Self { - actual_weight: None, - pays_fee: Default::default(), - } + Self { actual_weight: None, pays_fee: Default::default() } } } @@ -400,8 +440,9 @@ pub trait WithPostDispatchInfo { fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; } -impl WithPostDispatchInfo for T where - T: Into +impl WithPostDispatchInfo for T +where + T: Into, { fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { DispatchErrorWithPostInfo { @@ -489,12 +530,12 @@ impl PaysFee for (Weight, Pays) { /// A struct to represent a weight which is a function of the input arguments. The given items have /// the following types: /// -/// - `WD`: a raw `Weight` value or a closure that returns a `Weight` with the same +/// - `WD`: a raw `Weight` value or a closure that returns a `Weight` with the same argument list as +/// the dispatched, wrapped in a tuple. +/// - `CD`: a raw `DispatchClass` value or a closure that returns a `DispatchClass` with the same /// argument list as the dispatched, wrapped in a tuple. -/// - `CD`: a raw `DispatchClass` value or a closure that returns a `DispatchClass` -/// with the same argument list as the dispatched, wrapped in a tuple. -/// - `PF`: a `Pays` variant for whether this dispatch pays fee or not or a closure that -/// returns a `Pays` variant with the same argument list as the dispatched, wrapped in a tuple. +/// - `PF`: a `Pays` variant for whether this dispatch pays fee or not or a closure that returns a +/// `Pays` variant with the same argument list as the dispatched, wrapped in a tuple. #[deprecated = "Function arguments are available directly inside the annotation now."] pub struct FunctionOf(pub WD, pub CD, pub PF); @@ -508,8 +549,9 @@ impl WeighData for FunctionOf { // `WeighData` as a closure #[allow(deprecated)] -impl WeighData for FunctionOf where - WD : Fn(Args) -> Weight +impl WeighData for FunctionOf +where + WD: Fn(Args) -> Weight, { fn weigh_data(&self, args: Args) -> Weight { (self.0)(args) @@ -526,8 +568,9 @@ impl ClassifyDispatch for FunctionOf // `ClassifyDispatch` as a raw value #[allow(deprecated)] -impl ClassifyDispatch for FunctionOf where - CD : Fn(Args) -> DispatchClass +impl ClassifyDispatch for FunctionOf +where + CD: Fn(Args) -> DispatchClass, { fn classify_dispatch(&self, args: Args) -> DispatchClass { (self.1)(args) @@ -544,8 +587,9 @@ impl PaysFee for FunctionOf { // `PaysFee` as a closure #[allow(deprecated)] -impl PaysFee for FunctionOf where - PF : Fn(Args) -> Pays +impl PaysFee for FunctionOf +where + PF: Fn(Args) -> Pays, { fn pays_fee(&self, args: Args) -> Pays { (self.2)(args) @@ -565,8 +609,7 @@ where } /// Implementation for checked extrinsic. -impl GetDispatchInfo - for CheckedExtrinsic +impl GetDispatchInfo for CheckedExtrinsic where Call: GetDispatchInfo, { @@ -580,16 +623,12 @@ where impl GetDispatchInfo for sp_runtime::testing::TestXt { fn get_dispatch_info(&self) -> DispatchInfo { // for testing: weight == size. - DispatchInfo { - weight: self.encode().len() as _, - pays_fee: Pays::Yes, - ..Default::default() - } + DispatchInfo { weight: self.encode().len() as _, pays_fee: Pays::Yes, ..Default::default() } } } /// The weight of database operations that the runtime can invoke. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct RuntimeDbWeight { pub read: Weight, pub write: Weight, @@ -621,7 +660,7 @@ impl RuntimeDbWeight { /// /// The `negative` value encodes whether the term is added or substracted from the /// overall polynomial result. -#[derive(Clone, Encode, Decode)] +#[derive(Clone, Encode, Decode, TypeInfo)] pub struct WeightToFeeCoefficient { /// The integral part of the coefficient. pub coeff_integer: Balance, @@ -656,32 +695,35 @@ pub trait WeightToFeePolynomial { /// This should not be overriden in most circumstances. Calculation is done in the /// `Balance` type and never overflows. All evaluation is saturating. fn calc(weight: &Weight) -> Self::Balance { - Self::polynomial().iter().fold(Self::Balance::saturated_from(0u32), |mut acc, args| { - let w = Self::Balance::saturated_from(*weight).saturating_pow(args.degree.into()); - - // The sum could get negative. Therefore we only sum with the accumulator. - // The Perbill Mul implementation is non overflowing. - let frac = args.coeff_frac * w; - let integer = args.coeff_integer.saturating_mul(w); - - if args.negative { - acc = acc.saturating_sub(frac); - acc = acc.saturating_sub(integer); - } else { - acc = acc.saturating_add(frac); - acc = acc.saturating_add(integer); - } + Self::polynomial() + .iter() + .fold(Self::Balance::saturated_from(0u32), |mut acc, args| { + let w = Self::Balance::saturated_from(*weight).saturating_pow(args.degree.into()); - acc - }) + // The sum could get negative. Therefore we only sum with the accumulator. + // The Perbill Mul implementation is non overflowing. + let frac = args.coeff_frac * w; + let integer = args.coeff_integer.saturating_mul(w); + + if args.negative { + acc = acc.saturating_sub(frac); + acc = acc.saturating_sub(integer); + } else { + acc = acc.saturating_add(frac); + acc = acc.saturating_add(integer); + } + + acc + }) } } /// Implementor of `WeightToFeePolynomial` that maps one unit of weight to one unit of fee. pub struct IdentityFee(sp_std::marker::PhantomData); -impl WeightToFeePolynomial for IdentityFee where - T: BaseArithmetic + From + Copy + Unsigned +impl WeightToFeePolynomial for IdentityFee +where + T: BaseArithmetic + From + Copy + Unsigned, { type Balance = T; @@ -695,17 +737,99 @@ impl WeightToFeePolynomial for IdentityFee where } } +/// A struct holding value for each `DispatchClass`. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct PerDispatchClass { + /// Value for `Normal` extrinsics. + normal: T, + /// Value for `Operational` extrinsics. + operational: T, + /// Value for `Mandatory` extrinsics. + mandatory: T, +} + +impl PerDispatchClass { + /// Create new `PerDispatchClass` with the same value for every class. + pub fn new(val: impl Fn(DispatchClass) -> T) -> Self { + Self { + normal: val(DispatchClass::Normal), + operational: val(DispatchClass::Operational), + mandatory: val(DispatchClass::Mandatory), + } + } + + /// Get a mutable reference to current value of given class. + pub fn get_mut(&mut self, class: DispatchClass) -> &mut T { + match class { + DispatchClass::Operational => &mut self.operational, + DispatchClass::Normal => &mut self.normal, + DispatchClass::Mandatory => &mut self.mandatory, + } + } + + /// Get current value for given class. + pub fn get(&self, class: DispatchClass) -> &T { + match class { + DispatchClass::Normal => &self.normal, + DispatchClass::Operational => &self.operational, + DispatchClass::Mandatory => &self.mandatory, + } + } +} + +impl PerDispatchClass { + /// Set the value of given class. + pub fn set(&mut self, new: T, class: impl OneOrMany) { + for class in class.into_iter() { + *self.get_mut(class) = new.clone(); + } + } +} + +impl PerDispatchClass { + /// Returns the total weight consumed by all extrinsics in the block. + pub fn total(&self) -> Weight { + let mut sum = 0; + for class in DispatchClass::all() { + sum = sum.saturating_add(*self.get(*class)); + } + sum + } + + /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. + pub fn add(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_add(weight); + } + + /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would + /// occur. + pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { + let value = self.get_mut(class); + *value = value.checked_add(weight).ok_or(())?; + Ok(()) + } + + /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of + /// `Weight`. + pub fn sub(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_sub(weight); + } +} + #[cfg(test)] #[allow(dead_code)] mod tests { - use crate::{decl_module, parameter_types, traits::Get}; use super::*; + use crate::{decl_module, parameter_types, traits::Get}; - pub trait Trait { + pub trait Config: 'static { type Origin; type Balance; type BlockNumber; type DbWeight: Get; + type PalletInfo: crate::traits::PalletInfo; } pub struct TraitImpl {} @@ -717,15 +841,16 @@ mod tests { }; } - impl Trait for TraitImpl { + impl Config for TraitImpl { type Origin = u32; type BlockNumber = u32; type Balance = u32; type DbWeight = DbWeight; + type PalletInfo = crate::tests::PanicPalletInfo; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin, system=self { // no arguments, fixed weight #[weight = 1000] fn f00(_origin) { unimplemented!(); } @@ -747,7 +872,7 @@ mod tests { fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] - fn f2(_origin) { unimplemented!(); } + fn f20(_origin) { unimplemented!(); } #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] fn f21(_origin) { unimplemented!(); } @@ -758,58 +883,65 @@ mod tests { #[test] fn weights_are_correct() { // #[weight = 1000] - let info = Call::::f00().get_dispatch_info(); + let info = Call::::f00 {}.get_dispatch_info(); assert_eq!(info.weight, 1000); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = (1000, DispatchClass::Mandatory)] - let info = Call::::f01().get_dispatch_info(); + let info = Call::::f01 {}.get_dispatch_info(); assert_eq!(info.weight, 1000); assert_eq!(info.class, DispatchClass::Mandatory); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = (1000, Pays::No)] - let info = Call::::f02().get_dispatch_info(); + let info = Call::::f02 {}.get_dispatch_info(); assert_eq!(info.weight, 1000); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::No); // #[weight = (1000, DispatchClass::Operational, Pays::No)] - let info = Call::::f03().get_dispatch_info(); + let info = Call::::f03 {}.get_dispatch_info(); assert_eq!(info.weight, 1000); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::No); - assert_eq!(Call::::f11(10, 20).get_dispatch_info().weight, 120); - assert_eq!(Call::::f11(10, 20).get_dispatch_info().class, DispatchClass::Normal); - assert_eq!(Call::::f12(10, 20).get_dispatch_info().weight, 0); - assert_eq!(Call::::f12(10, 20).get_dispatch_info().class, DispatchClass::Operational); - assert_eq!(Call::::f2().get_dispatch_info().weight, 12300); - assert_eq!(Call::::f21().get_dispatch_info().weight, 45600); - assert_eq!(Call::::f2().get_dispatch_info().class, DispatchClass::Normal); + // #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] + let info = Call::::f11 { _a: 13, _eb: 20 }.get_dispatch_info(); + assert_eq!(info.weight, 150); // 13*10 + 20 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = (0, DispatchClass::Operational, Pays::Yes)] + let info = Call::::f12 { _a: 10, _eb: 20 }.get_dispatch_info(); + assert_eq!(info.weight, 0); + assert_eq!(info.class, DispatchClass::Operational); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] + let info = Call::::f20 {}.get_dispatch_info(); + assert_eq!(info.weight, 12300); // 100*3 + 1000*2 + 10_1000 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] + let info = Call::::f21 {}.get_dispatch_info(); + assert_eq!(info.weight, 45600); // 100*6 + 1000*5 + 40_1000 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); } #[test] fn extract_actual_weight_works() { - let pre = DispatchInfo { - weight: 1000, - .. Default::default() - }; + let pre = DispatchInfo { weight: 1000, ..Default::default() }; assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), 7); assert_eq!(extract_actual_weight(&Ok(Some(1000).into()), &pre), 1000); - assert_eq!( - extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), - 9 - ); + assert_eq!(extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), 9); } #[test] fn extract_actual_weight_caps_at_pre_weight() { - let pre = DispatchInfo { - weight: 1000, - .. Default::default() - }; + let pre = DispatchInfo { weight: 1000, ..Default::default() }; assert_eq!(extract_actual_weight(&Ok(Some(1250).into()), &pre), 1000); assert_eq!( extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(1300)), &pre), @@ -819,7 +951,7 @@ mod tests { type Balance = u64; - // 0.5x^3 + 2.333x2 + 7x - 10_000 + // 0.5x^3 + 2.333x^2 + 7x - 10_000 struct Poly; impl WeightToFeePolynomial for Poly { type Balance = Balance; @@ -828,13 +960,13 @@ mod tests { smallvec![ WeightToFeeCoefficient { coeff_integer: 0, - coeff_frac: Perbill::from_fraction(0.5), + coeff_frac: Perbill::from_float(0.5), negative: false, degree: 3 }, WeightToFeeCoefficient { coeff_integer: 2, - coeff_frac: Perbill::from_rational_approximation(1u32, 3u32), + coeff_frac: Perbill::from_rational(1u32, 3u32), negative: false, degree: 2 }, @@ -856,13 +988,16 @@ mod tests { #[test] fn polynomial_works() { + // 100^3/2=500000 100^2*(2+1/3)=23333 700 -10000 assert_eq!(Poly::calc(&100), 514033); + // 10123^3/2=518677865433 10123^2*(2+1/3)=239108634 70861 -10000 assert_eq!(Poly::calc(&10_123), 518917034928); } #[test] fn polynomial_does_not_underflow() { assert_eq!(Poly::calc(&0), 0); + assert_eq!(Poly::calc(&10), 0); } #[test] diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index ee8ace5c983c5..e12880871e5c2 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-test" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,30 +12,39 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-io = { version = "2.0.0", path = "../../../primitives/io", default-features = false } -sp-state-machine = { version = "0.8.0", optional = true, path = "../../../primitives/state-machine" } -frame-support = { version = "2.0.0", default-features = false, path = "../" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../../primitives/inherents" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -trybuild = "1.0.33" +serde = { version = "1.0.126", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/arithmetic" } +sp-io = { version = "4.0.0-dev", path = "../../../primitives/io", default-features = false } +sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../../../primitives/state-machine" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } +sp-version = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/version" } +trybuild = "1.0.43" pretty_assertions = "0.6.1" rustversion = "1.0.0" -frame-metadata = { version = "12.0.0", default-features = false, path = "../../metadata" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +# The "std" feature for this pallet is never activated on purpose, in order to test construct_runtime error message +test-pallet = { package = "frame-support-test-pallet", default-features = false, path = "pallet" } [features] default = ["std"] std = [ "serde/std", "codec/std", + "scale-info/std", "sp-io/std", "frame-support/std", - "sp-inherents/std", + "frame-system/std", "sp-core/std", "sp-std/std", "sp-runtime/std", "sp-state-machine", ] +try-runtime = ["frame-support/try-runtime"] +# WARNING: CI only execute pallet test with this feature, +# if the feature intended to be used outside, CI and this message need to be updated. +conditional-storage = [] diff --git a/frame/support/test/pallet/Cargo.toml b/frame/support/test/pallet/Cargo.toml new file mode 100644 index 0000000000000..35eb4f34acae1 --- /dev/null +++ b/frame/support/test/pallet/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "frame-support-test-pallet" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../system" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "frame-support/std", + "frame-system/std", +] diff --git a/bin/node/runtime/src/weights/pallet_utility.rs b/frame/support/test/pallet/src/lib.rs similarity index 51% rename from bin/node/runtime/src/weights/pallet_utility.rs rename to frame/support/test/pallet/src/lib.rs index af48267d9b5de..f9f94b06a0a5a 100644 --- a/bin/node/runtime/src/weights/pallet_utility.rs +++ b/frame/support/test/pallet/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,22 +14,33 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +pub use pallet::*; -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 +#[frame_support::pallet] +pub mod pallet { + #[allow(unused_imports)] + use frame_support::pallet_prelude::*; + #[allow(unused_imports)] + use frame_system::pallet_prelude::*; -#![allow(unused_parens)] -#![allow(unused_imports)] + #[pallet::pallet] + pub struct Pallet(_); -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; + #[pallet::config] + pub trait Config: frame_system::Config {} -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - fn batch(c: u32, ) -> Weight { - (16461000 as Weight) - .saturating_add((1982000 as Weight).saturating_mul(c as Weight)) + #[pallet::genesis_config] + pub struct GenesisConfig {} + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self {} + } } - fn as_derivative() -> Weight { - (4086000 as Weight) + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) {} } } diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index d5f49299880ca..52c0a6270d47f 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,14 +23,49 @@ #![deny(warnings)] /// The configuration trait -pub trait Trait { +pub trait Config: 'static { /// The runtime origin type. - type Origin; + type Origin: codec::Codec + codec::EncodeLike + Default + scale_info::TypeInfo; /// The block number type. - type BlockNumber; + type BlockNumber: codec::Codec + codec::EncodeLike + Default + scale_info::TypeInfo; + /// The information about the pallet setup in the runtime. + type PalletInfo: frame_support::traits::PalletInfo; + /// The db weights. + type DbWeight: frame_support::traits::Get; } frame_support::decl_module! { /// Some test module - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=self {} +} + +/// A PalletInfo implementation which just panics. +pub struct PanicPalletInfo; + +impl frame_support::traits::PalletInfo for PanicPalletInfo { + fn index() -> Option { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + fn name() -> Option<&'static str> { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } +} + +/// Provides an implementation of [`frame_support::traits::Randomness`] that should only be used in +/// tests! +pub struct TestRandomness(sp_std::marker::PhantomData); + +impl frame_support::traits::Randomness + for TestRandomness +where + T: frame_system::Config, +{ + fn random(subject: &[u8]) -> (Output, T::BlockNumber) { + use sp_runtime::traits::TrailingZeroInput; + + ( + Output::decode(&mut TrailingZeroInput::new(subject)).unwrap_or_default(), + frame_system::Pallet::::block_number(), + ) + } } diff --git a/frame/utility/src/default_weights.rs b/frame/support/test/src/pallet_version.rs similarity index 56% rename from frame/utility/src/default_weights.rs rename to frame/support/test/src/pallet_version.rs index d63f010612ec1..bdea3859d65c6 100644 --- a/frame/utility/src/default_weights.rs +++ b/frame/support/test/src/pallet_version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,19 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 +use frame_support::{crate_to_pallet_version, traits::PalletVersion}; -#![allow(unused_parens)] -#![allow(unused_imports)] +#[test] +fn ensure_that_current_pallet_version_is_correct() { + let expected = PalletVersion { + major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(), + minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(), + patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(), + }; -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn batch(c: u32, ) -> Weight { - (16461000 as Weight) - .saturating_add((1982000 as Weight).saturating_mul(c as Weight)) - } - fn as_derivative() -> Weight { - (4086000 as Weight) - } + assert_eq!(expected, crate_to_pallet_version!()) } diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 9a17e44dbef4c..062993fe10fbb 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,12 +19,17 @@ //! * error declareed with decl_error works //! * integrity test is generated -#![recursion_limit="128"] +#![recursion_limit = "128"] -use sp_runtime::{generic, traits::{BlakeTwo256, Block as _, Verify}, DispatchError}; -use sp_core::{H256, sr25519}; -use sp_std::cell::RefCell; use frame_support::traits::PalletInfo as _; +use scale_info::TypeInfo; +use sp_core::{sr25519, H256}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, Verify}, + DispatchError, +}; +use sp_std::cell::RefCell; mod system; @@ -37,11 +42,11 @@ thread_local! { mod module1 { use super::*; - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call - where origin: ::Origin, system=system + pub struct Module, I: Instance = DefaultInstance> for enum Call + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -50,36 +55,36 @@ mod module1 { } } - #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] - pub struct Origin(pub core::marker::PhantomData::<(T, I)>); + #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode, TypeInfo)] + pub struct Origin(pub core::marker::PhantomData<(T, I)>); frame_support::decl_event! { pub enum Event where - ::AccountId + ::AccountId { A(AccountId), } } frame_support::decl_error! { - pub enum Error for Module, I: Instance> { + pub enum Error for Module, I: Instance> { Something } } frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module {} + trait Store for Module, I: Instance=DefaultInstance> as Module {} } } mod module2 { use super::*; - pub trait Trait: system::Trait {} + pub trait Config: system::Config {} frame_support::decl_module! { - pub struct Module for enum Call - where origin: ::Origin, system=system + pub struct Module for enum Call + where origin: ::Origin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -92,7 +97,7 @@ mod module2 { } } - #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] + #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode, TypeInfo)] pub struct Origin; frame_support::decl_event! { @@ -102,26 +107,130 @@ mod module2 { } frame_support::decl_error! { - pub enum Error for Module { + pub enum Error for Module { Something } } frame_support::decl_storage! { - trait Store for Module as Module {} + trait Store for Module as Module {} + } +} + +mod nested { + use super::*; + + pub mod module3 { + use super::*; + + pub trait Config: system::Config {} + + frame_support::decl_module! { + pub struct Module for enum Call + where origin: ::Origin, system=system + { + #[weight = 0] + pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { + Err(Error::::Something.into()) + } + + fn integrity_test() { + INTEGRITY_TEST_EXEC.with(|i| *i.borrow_mut() += 1); + } + } + } + + #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode, TypeInfo)] + pub struct Origin; + + frame_support::decl_event! { + pub enum Event { + A, + } + } + + frame_support::decl_error! { + pub enum Error for Module { + Something + } + } + + frame_support::decl_storage! { + trait Store for Module as Module {} + add_extra_genesis { + build(|_config| {}) + } + } } } -impl module1::Trait for Runtime {} -impl module2::Trait for Runtime {} +pub mod module3 { + use super::*; + + pub trait Config: system::Config {} + + frame_support::decl_module! { + pub struct Module for enum Call + where origin: ::Origin, system=system + { + #[weight = 0] + pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { + Err(Error::::Something.into()) + } + #[weight = 0] + pub fn aux_1(_origin, #[compact] _data: u32) -> frame_support::dispatch::DispatchResult { + unreachable!() + } + #[weight = 0] + pub fn aux_2(_origin, _data: i32, #[compact] _data2: u32) -> frame_support::dispatch::DispatchResult { + unreachable!() + } + #[weight = 0] + fn aux_3(_origin, _data: i32, _data2: String) -> frame_support::dispatch::DispatchResult { + unreachable!() + } + #[weight = 3] + fn aux_4(_origin) -> frame_support::dispatch::DispatchResult { unreachable!() } + #[weight = (5, frame_support::weights::DispatchClass::Operational)] + fn operational(_origin) { unreachable!() } + } + } + + #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode, TypeInfo)] + pub struct Origin(pub core::marker::PhantomData); + + frame_support::decl_event! { + pub enum Event { + A, + } + } + + frame_support::decl_error! { + pub enum Error for Module { + Something + } + } + + frame_support::decl_storage! { + trait Store for Module as Module {} + add_extra_genesis { + build(|_config| {}) + } + } +} + +impl module1::Config for Runtime {} +impl module2::Config for Runtime {} +impl nested::module3::Config for Runtime {} +impl module3::Config for Runtime {} pub type Signature = sr25519::Signature; pub type AccountId = ::Signer; pub type BlockNumber = u64; pub type Index = u64; -impl system::Trait for Runtime { - type BaseCallFilter = (); +impl system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; type Hash = H256; type Origin = Origin; type BlockNumber = BlockNumber; @@ -129,6 +238,7 @@ impl system::Trait for Runtime { type Event = Event; type PalletInfo = PalletInfo; type Call = Call; + type DbWeight = (); } frame_support::construct_runtime!( @@ -137,17 +247,19 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event, Origin} = 30, - Module1_1: module1::::{Module, Call, Storage, Event, Origin}, - Module2: module2::{Module, Call, Storage, Event, Origin}, - Module1_2: module1::::{Module, Call, Storage, Event, Origin}, - Module1_3: module1::::{Module, Storage} = 6, - Module1_4: module1::::{Module, Call} = 3, - Module1_5: module1::::{Module, Event}, - Module1_6: module1::::{Module, Call, Storage, Event, Origin} = 1, - Module1_7: module1::::{Module, Call, Storage, Event, Origin}, - Module1_8: module1::::{Module, Call, Storage, Event, Origin} = 12, - Module1_9: module1::::{Module, Call, Storage, Event, Origin}, + System: system::{Pallet, Call, Event, Origin} = 30, + Module1_1: module1::::{Pallet, Call, Storage, Event, Origin}, + Module2: module2::{Pallet, Call, Storage, Event, Origin}, + Module1_2: module1::::{Pallet, Call, Storage, Event, Origin}, + NestedModule3: nested::module3::{Pallet, Call, Config, Storage, Event, Origin}, + Module3: self::module3::{Pallet, Call, Config, Storage, Event, Origin}, + Module1_3: module1::::{Pallet, Storage} = 6, + Module1_4: module1::::{Pallet, Call} = 3, + Module1_5: module1::::{Pallet, Event}, + Module1_6: module1::::{Pallet, Call, Storage, Event, Origin} = 1, + Module1_7: module1::::{Pallet, Call, Storage, Event, Origin}, + Module1_8: module1::::{Pallet, Call, Storage, Event, Origin} = 12, + Module1_9: module1::::{Pallet, Call, Storage, Event, Origin}, } ); @@ -155,6 +267,82 @@ pub type Header = generic::Header; pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +mod origin_test { + use super::{module3, nested, system, Block, UncheckedExtrinsic}; + use frame_support::traits::{Contains, OriginTrait}; + + impl nested::module3::Config for RuntimeOriginTest {} + impl module3::Config for RuntimeOriginTest {} + + pub struct BaseCallFilter; + impl Contains for BaseCallFilter { + fn contains(c: &Call) -> bool { + match c { + Call::NestedModule3(_) => true, + _ => false, + } + } + } + + impl system::Config for RuntimeOriginTest { + type BaseCallFilter = BaseCallFilter; + type Hash = super::H256; + type Origin = Origin; + type BlockNumber = super::BlockNumber; + type AccountId = u32; + type Event = Event; + type PalletInfo = PalletInfo; + type Call = Call; + type DbWeight = (); + } + + frame_support::construct_runtime!( + pub enum RuntimeOriginTest where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Event, Origin}, + NestedModule3: nested::module3::{Pallet, Origin, Call}, + Module3: module3::{Pallet, Origin, Call}, + } + ); + + #[test] + fn origin_default_filter() { + let accepted_call = nested::module3::Call::fail {}.into(); + let rejected_call = module3::Call::fail {}.into(); + + assert_eq!(Origin::root().filter_call(&accepted_call), true); + assert_eq!(Origin::root().filter_call(&rejected_call), true); + assert_eq!(Origin::none().filter_call(&accepted_call), true); + assert_eq!(Origin::none().filter_call(&rejected_call), false); + assert_eq!(Origin::signed(0).filter_call(&accepted_call), true); + assert_eq!(Origin::signed(0).filter_call(&rejected_call), false); + assert_eq!(Origin::from(Some(0)).filter_call(&accepted_call), true); + assert_eq!(Origin::from(Some(0)).filter_call(&rejected_call), false); + assert_eq!(Origin::from(None).filter_call(&accepted_call), true); + assert_eq!(Origin::from(None).filter_call(&rejected_call), false); + assert_eq!(Origin::from(super::nested::module3::Origin).filter_call(&accepted_call), true); + assert_eq!(Origin::from(super::nested::module3::Origin).filter_call(&rejected_call), false); + + let mut origin = Origin::from(Some(0)); + + origin.add_filter(|c| matches!(c, Call::Module3(_))); + assert_eq!(origin.filter_call(&accepted_call), false); + assert_eq!(origin.filter_call(&rejected_call), false); + + origin.set_caller_from(Origin::root()); + assert!(matches!(origin.caller, OriginCaller::system(super::system::RawOrigin::Root))); + assert_eq!(origin.filter_call(&accepted_call), false); + assert_eq!(origin.filter_call(&rejected_call), false); + + origin.reset_filter(); + assert_eq!(origin.filter_call(&accepted_call), true); + assert_eq!(origin.filter_call(&rejected_call), false); + } +} + #[test] fn check_modules_error_type() { assert_eq!( @@ -169,6 +357,10 @@ fn check_modules_error_type() { Module1_2::fail(system::Origin::::Root.into()), Err(DispatchError::Module { index: 33, error: 0, message: Some("Something") }), ); + assert_eq!( + NestedModule3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { index: 34, error: 0, message: Some("Something") }), + ); assert_eq!( Module1_3::fail(system::Origin::::Root.into()), Err(DispatchError::Module { index: 6, error: 0, message: Some("Something") }), @@ -202,7 +394,7 @@ fn check_modules_error_type() { #[test] fn integrity_test_works() { __construct_runtime_integrity_test::runtime_integrity_tests(); - assert_eq!(INTEGRITY_TEST_EXEC.with(|i| *i.borrow()), 1); + assert_eq!(INTEGRITY_TEST_EXEC.with(|i| *i.borrow()), 2); } #[test] @@ -212,25 +404,31 @@ fn origin_codec() { let origin = OriginCaller::system(system::RawOrigin::None); assert_eq!(origin.encode()[0], 30); - let origin = OriginCaller::module1_Instance1(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_1(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 31); - let origin = OriginCaller::module2(module2::Origin); + let origin = OriginCaller::Module2(module2::Origin); assert_eq!(origin.encode()[0], 32); - let origin = OriginCaller::module1_Instance2(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_2(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 33); - let origin = OriginCaller::module1_Instance6(module1::Origin(Default::default())); + let origin = OriginCaller::NestedModule3(nested::module3::Origin); + assert_eq!(origin.encode()[0], 34); + + let origin = OriginCaller::Module3(module3::Origin(Default::default())); + assert_eq!(origin.encode()[0], 35); + + let origin = OriginCaller::Module1_6(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 1); - let origin = OriginCaller::module1_Instance7(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_7(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 2); - let origin = OriginCaller::module1_Instance8(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_8(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 12); - let origin = OriginCaller::module1_Instance9(module1::Origin(Default::default())); + let origin = OriginCaller::Module1_9(module1::Origin(Default::default())); assert_eq!(origin.encode()[0], 13); } @@ -250,6 +448,12 @@ fn event_codec() { let event = module1::Event::::A(Default::default()); assert_eq!(Event::from(event).encode()[0], 33); + let event = nested::module3::Event::A; + assert_eq!(Event::from(event).encode()[0], 34); + + let event = module3::Event::A; + assert_eq!(Event::from(event).encode()[0], 35); + let event = module1::Event::::A(Default::default()); assert_eq!(Event::from(event).encode()[0], 4); @@ -269,242 +473,266 @@ fn event_codec() { #[test] fn call_codec() { use codec::Encode; - assert_eq!(Call::System(system::Call::noop()).encode()[0], 30); - assert_eq!(Call::Module1_1(module1::Call::fail()).encode()[0], 31); - assert_eq!(Call::Module2(module2::Call::fail()).encode()[0], 32); - assert_eq!(Call::Module1_2(module1::Call::fail()).encode()[0], 33); - assert_eq!(Call::Module1_4(module1::Call::fail()).encode()[0], 3); - assert_eq!(Call::Module1_6(module1::Call::fail()).encode()[0], 1); - assert_eq!(Call::Module1_7(module1::Call::fail()).encode()[0], 2); - assert_eq!(Call::Module1_8(module1::Call::fail()).encode()[0], 12); - assert_eq!(Call::Module1_9(module1::Call::fail()).encode()[0], 13); + assert_eq!(Call::System(system::Call::noop {}).encode()[0], 30); + assert_eq!(Call::Module1_1(module1::Call::fail {}).encode()[0], 31); + assert_eq!(Call::Module2(module2::Call::fail {}).encode()[0], 32); + assert_eq!(Call::Module1_2(module1::Call::fail {}).encode()[0], 33); + assert_eq!(Call::NestedModule3(nested::module3::Call::fail {}).encode()[0], 34); + assert_eq!(Call::Module3(module3::Call::fail {}).encode()[0], 35); + assert_eq!(Call::Module1_4(module1::Call::fail {}).encode()[0], 3); + assert_eq!(Call::Module1_6(module1::Call::fail {}).encode()[0], 1); + assert_eq!(Call::Module1_7(module1::Call::fail {}).encode()[0], 2); + assert_eq!(Call::Module1_8(module1::Call::fail {}).encode()[0], 12); + assert_eq!(Call::Module1_9(module1::Call::fail {}).encode()[0], 13); +} + +#[test] +fn call_compact_attr() { + use codec::Encode; + let call: module3::Call = module3::Call::aux_1 { _data: 1 }; + let encoded = call.encode(); + assert_eq!(2, encoded.len()); + assert_eq!(vec![1, 4], encoded); + + let call: module3::Call = module3::Call::aux_2 { _data: 1, _data2: 2 }; + let encoded = call.encode(); + assert_eq!(6, encoded.len()); + assert_eq!(vec![2, 1, 0, 0, 0, 8], encoded); +} + +#[test] +fn call_encode_is_correct_and_decode_works() { + use codec::{Decode, Encode}; + let call: module3::Call = module3::Call::fail {}; + let encoded = call.encode(); + assert_eq!(vec![0], encoded); + let decoded = module3::Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); + + let call: module3::Call = module3::Call::aux_3 { _data: 32, _data2: "hello".into() }; + let encoded = call.encode(); + assert_eq!(vec![3, 32, 0, 0, 0, 20, 104, 101, 108, 108, 111], encoded); + let decoded = module3::Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); +} + +#[test] +fn call_weight_should_attach_to_call_enum() { + use frame_support::{ + dispatch::{DispatchInfo, GetDispatchInfo}, + weights::{DispatchClass, Pays}, + }; + // operational. + assert_eq!( + module3::Call::::operational {}.get_dispatch_info(), + DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: Pays::Yes }, + ); + // custom basic + assert_eq!( + module3::Call::::aux_4 {}.get_dispatch_info(), + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes }, + ); +} + +#[test] +fn call_name() { + use frame_support::dispatch::GetCallName; + let name = module3::Call::::aux_4 {}.get_call_name(); + assert_eq!("aux_4", name); +} + +#[test] +fn call_metadata() { + use frame_support::dispatch::{CallMetadata, GetCallMetadata}; + let call = Call::Module3(module3::Call::::aux_4 {}); + let metadata = call.get_call_metadata(); + let expected = CallMetadata { function_name: "aux_4".into(), pallet_name: "Module3".into() }; + assert_eq!(metadata, expected); +} + +#[test] +fn get_call_names() { + use frame_support::dispatch::GetCallName; + let call_names = module3::Call::::get_call_names(); + assert_eq!(["fail", "aux_1", "aux_2", "aux_3", "aux_4", "operational"], call_names); +} + +#[test] +fn get_module_names() { + use frame_support::dispatch::GetCallMetadata; + let module_names = Call::get_module_names(); + assert_eq!( + [ + "System", + "Module1_1", + "Module2", + "Module1_2", + "NestedModule3", + "Module3", + "Module1_4", + "Module1_6", + "Module1_7", + "Module1_8", + "Module1_9", + ], + module_names + ); +} + +#[test] +fn call_subtype_conversion() { + use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; + let call = Call::Module3(module3::Call::::fail {}); + let subcall: Option<&CallableCallFor> = call.is_sub_type(); + let subcall_none: Option<&CallableCallFor> = call.is_sub_type(); + assert_eq!(Some(&module3::Call::::fail {}), subcall); + assert_eq!(None, subcall_none); + + let from = Call::from(subcall.unwrap().clone()); + assert_eq!(from, call); } #[test] fn test_metadata() { - use frame_metadata::*; - let expected_metadata: RuntimeMetadataLastVersion = RuntimeMetadataLastVersion { - modules: DecodeDifferent::Encode(&[ - ModuleMetadata { - name: DecodeDifferent::Encode("System"), - storage: None, - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("noop"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("ExtrinsicSuccess"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - EventMetadata { - name: DecodeDifferent::Encode("ExtrinsicFailed"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - EventMetadata { - name: DecodeDifferent::Encode("Ignore"), - arguments: DecodeDifferent::Encode(&["BlockNumber"]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]))), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 30, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module1_1"), - storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("Instance1Module"), - entries: DecodeDifferent::Encode(&[]), - }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[ - FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 31, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module2"), - storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("Module"), - entries: DecodeDifferent::Encode(&[]), - }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[ - FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]))), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 32, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module1_2"), - storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("Instance2Module"), - entries: DecodeDifferent::Encode(&[]), - }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 33, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module1_3"), - storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("Instance3Module"), - entries: DecodeDifferent::Encode(&[]), - }))), - calls: None, - event: None, - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 6, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module1_4"), - storage: None, - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: None, - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 3, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module1_5"), - storage: None, - calls: None, - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 4, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module1_6"), - storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("Instance6Module"), - entries: DecodeDifferent::Encode(&[]), - }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 1, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module1_7"), - storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("Instance7Module"), - entries: DecodeDifferent::Encode(&[]), - }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 2, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module1_8"), - storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("Instance8Module"), - entries: DecodeDifferent::Encode(&[]), - }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 12, - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module1_9"), - storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("Instance9Module"), - entries: DecodeDifferent::Encode(&[]), - }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - index: 13, - }, - ]), - extrinsic: ExtrinsicMetadata { - version: 4, - signed_extensions: vec![DecodeDifferent::Encode("UnitSignedExtension")], + use frame_support::metadata::*; + use scale_info::meta_type; + + let pallets = vec![ + PalletMetadata { + name: "System", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: None, + index: 30, }, + PalletMetadata { + name: "Module1_1", + storage: Some(PalletStorageMetadata { prefix: "Instance1Module", entries: vec![] }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: None, + index: 31, + }, + PalletMetadata { + name: "Module2", + storage: Some(PalletStorageMetadata { prefix: "Module", entries: vec![] }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::().into()), + constants: vec![], + error: None, + index: 32, + }, + PalletMetadata { + name: "Module1_2", + storage: Some(PalletStorageMetadata { prefix: "Instance2Module", entries: vec![] }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: None, + index: 33, + }, + PalletMetadata { + name: "NestedModule3", + storage: Some(PalletStorageMetadata { prefix: "Module", entries: vec![] }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::().into()), + constants: vec![], + error: None, + index: 34, + }, + PalletMetadata { + name: "Module3", + storage: Some(PalletStorageMetadata { prefix: "Module", entries: vec![] }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::().into()), + constants: vec![], + error: None, + index: 35, + }, + PalletMetadata { + name: "Module1_3", + storage: Some(PalletStorageMetadata { prefix: "Instance3Module", entries: vec![] }), + calls: None, + event: None, + constants: vec![], + error: None, + index: 6, + }, + PalletMetadata { + name: "Module1_4", + storage: None, + calls: Some(meta_type::>().into()), + event: None, + constants: vec![], + error: None, + index: 3, + }, + PalletMetadata { + name: "Module1_5", + storage: None, + calls: None, + event: Some(meta_type::>().into()), + constants: vec![], + error: None, + index: 4, + }, + PalletMetadata { + name: "Module1_6", + storage: Some(PalletStorageMetadata { prefix: "Instance6Module", entries: vec![] }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: None, + index: 1, + }, + PalletMetadata { + name: "Module1_7", + storage: Some(PalletStorageMetadata { prefix: "Instance7Module", entries: vec![] }), + calls: Some(meta_type::>().into()), + event: Some(PalletEventMetadata { + ty: meta_type::>(), + }), + constants: vec![], + error: None, + index: 2, + }, + PalletMetadata { + name: "Module1_8", + storage: Some(PalletStorageMetadata { prefix: "Instance8Module", entries: vec![] }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: None, + index: 12, + }, + PalletMetadata { + name: "Module1_9", + storage: Some(PalletStorageMetadata { prefix: "Instance9Module", entries: vec![] }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![], + error: None, + index: 13, + }, + ]; + + let extrinsic = ExtrinsicMetadata { + ty: meta_type::(), + version: 4, + signed_extensions: vec![SignedExtensionMetadata { + identifier: "UnitSignedExtension", + ty: meta_type::<()>(), + additional_signed: meta_type::<()>(), + }], }; - pretty_assertions::assert_eq!(Runtime::metadata().1, RuntimeMetadata::V12(expected_metadata)); + + let expected_metadata: RuntimeMetadataPrefixed = + RuntimeMetadataLastVersion::new(pallets, extrinsic, meta_type::()).into(); + let actual_metadata = Runtime::metadata(); + pretty_assertions::assert_eq!(actual_metadata, expected_metadata); } #[test] @@ -521,6 +749,12 @@ fn pallet_in_runtime_is_correct() { assert_eq!(PalletInfo::index::().unwrap(), 33); assert_eq!(PalletInfo::name::().unwrap(), "Module1_2"); + assert_eq!(PalletInfo::index::().unwrap(), 34); + assert_eq!(PalletInfo::name::().unwrap(), "NestedModule3"); + + assert_eq!(PalletInfo::index::().unwrap(), 35); + assert_eq!(PalletInfo::name::().unwrap(), "Module3"); + assert_eq!(PalletInfo::index::().unwrap(), 6); assert_eq!(PalletInfo::name::().unwrap(), "Module1_3"); diff --git a/frame/support/test/tests/construct_runtime_ui.rs b/frame/support/test/tests/construct_runtime_ui.rs index e1624c76830ae..a55e800628582 100644 --- a/frame/support/test/tests/construct_runtime_ui.rs +++ b/frame/support/test/tests/construct_runtime_ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ use std::env; #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/construct_runtime_ui/*.rs"); diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr index 65368666c88fe..2e2028fd1b862 100644 --- a/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_index.stderr @@ -1,10 +1,10 @@ -error: Module indices are conflicting: Both modules System and Pallet1 are at index 0 +error: Pallet indices are conflicting: Both pallets System and Pallet1 are at index 0 --> $DIR/conflicting_index.rs:9:3 | 9 | System: system::{}, | ^^^^^^ -error: Module indices are conflicting: Both modules System and Pallet1 are at index 0 +error: Pallet indices are conflicting: Both pallets System and Pallet1 are at index 0 --> $DIR/conflicting_index.rs:10:3 | 10 | Pallet1: pallet1::{} = 0, diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr index b792ff5d2a541..bfa3706a456a4 100644 --- a/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_index_2.stderr @@ -1,10 +1,10 @@ -error: Module indices are conflicting: Both modules System and Pallet3 are at index 5 +error: Pallet indices are conflicting: Both pallets System and Pallet3 are at index 5 --> $DIR/conflicting_index_2.rs:9:3 | 9 | System: system::{} = 5, | ^^^^^^ -error: Module indices are conflicting: Both modules System and Pallet3 are at index 5 +error: Pallet indices are conflicting: Both pallets System and Pallet3 are at index 5 --> $DIR/conflicting_index_2.rs:12:3 | 12 | Pallet3: pallet3::{}, diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs new file mode 100644 index 0000000000000..7cc6cbd6bd6e2 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.rs @@ -0,0 +1,15 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet}, + Balance: balances::{Pallet}, + Balance: balances::{Pallet}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr new file mode 100644 index 0000000000000..27c5644e0d736 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/conflicting_module_name.stderr @@ -0,0 +1,11 @@ +error: Two pallets with the same name! + --> $DIR/conflicting_module_name.rs:10:3 + | +10 | Balance: balances::{Pallet}, + | ^^^^^^^ + +error: Two pallets with the same name! + --> $DIR/conflicting_module_name.rs:11:3 + | +11 | Balance: balances::{Pallet}, + | ^^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs b/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs index ec37456e58e79..836af597851d8 100644 --- a/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs +++ b/frame/support/test/tests/construct_runtime_ui/double_module_parts.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::{Config, Call, Config, Origin}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.rs b/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.rs new file mode 100644 index 0000000000000..bc6abfa82b9cd --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.rs @@ -0,0 +1,13 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + system: , + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.stderr b/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.stderr new file mode 100644 index 0000000000000..7102076e5acb0 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/empty_pallet_path.stderr @@ -0,0 +1,5 @@ +error: expected one of: `crate`, `self`, `super`, identifier + --> $DIR/empty_pallet_path.rs:9:11 + | +9 | system: , + | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs index b79d73ff5c022..b3f0d340d671f 100644 --- a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs +++ b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::::{Call, Origin}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr index fe880549211bc..06caa036b91ff 100644 --- a/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/generics_in_invalid_module.stderr @@ -1,4 +1,4 @@ -error: `Call` is not allowed to have generics. Only the following modules are allowed to have generics: `Event`, `Origin`, `Config`. +error: `Call` is not allowed to have generics. Only the following pallets are allowed to have generics: `Event`, `Origin`, `Config`. --> $DIR/generics_in_invalid_module.rs:10:36 | 10 | Balance: balances::::{Call, Origin}, diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr index 559a4637d67ff..50505b9130cbe 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr @@ -1,4 +1,4 @@ -error: expected curly braces +error: expected one of: identifier, curly braces, `<` --> $DIR/invalid_module_details.rs:9:19 | 9 | system: System::(), diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr index 66c9fc95cb546..29df6e4bd8cb5 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_details_keyword.stderr @@ -1,4 +1,4 @@ -error: expected one of: `Module`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` --> $DIR/invalid_module_details_keyword.rs:9:20 | 9 | system: System::{enum}, diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs index 3754d41d6e81c..e7d32559a6cc6 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::{Error}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr index 7442c6be3a9a3..bd3e672dc8b40 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_entry.stderr @@ -1,4 +1,4 @@ -error: expected one of: `Module`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` +error: expected one of: `Pallet`, `Call`, `Storage`, `Event`, `Config`, `Origin`, `Inherent`, `ValidateUnsigned` --> $DIR/invalid_module_entry.rs:10:23 | 10 | Balance: balances::{Error}, diff --git a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs index 5eb7df5d18c20..f748e643aa18a 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs +++ b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::::{Event}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr index f80b4bd66abdd..b1aa9b86cd0d6 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_event_generic_on_module_with_instance.stderr @@ -1,4 +1,4 @@ -error: Instantiable module with no generic `Event` cannot be constructed: module `Balance` must have generic `Event` +error: Instantiable pallet with no generic `Event` cannot be constructed: pallet `Balance` must have generic `Event` --> $DIR/missing_event_generic_on_module_with_instance.rs:10:3 | 10 | Balance: balances::::{Event}, diff --git a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs index 5e44ae84d87c6..7053acc185900 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs +++ b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.rs @@ -6,7 +6,7 @@ construct_runtime! { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module}, + System: system::{Pallet}, Balance: balances::::{Origin}, } } diff --git a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr index 0f7d36aafb863..63bb7442a8576 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_origin_generic_on_module_with_instance.stderr @@ -1,4 +1,4 @@ -error: Instantiable module with no generic `Origin` cannot be constructed: module `Balance` must have generic `Origin` +error: Instantiable pallet with no generic `Origin` cannot be constructed: pallet `Balance` must have generic `Origin` --> $DIR/missing_origin_generic_on_module_with_instance.rs:10:3 | 10 | Balance: balances::::{Origin}, diff --git a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr index 2ebe0721eb381..7648f5c1bfb33 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr @@ -1,7 +1,6 @@ -error: `System` module declaration is missing. Please add this line: `System: frame_system::{Module, Call, Storage, Config, Event},` +error: `System` pallet declaration is missing. Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},` --> $DIR/missing_system_module.rs:8:2 | -8 | { - | _____^ +8 | / { 9 | | } | |_____^ diff --git a/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr b/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr index c0ef5c8e60b9e..2e055f5d3726a 100644 --- a/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr +++ b/frame/support/test/tests/construct_runtime_ui/more_than_256_modules.stderr @@ -1,4 +1,4 @@ -error: Module index doesn't fit into u8, index is 256 +error: Pallet index doesn't fit into u8, index is 256 --> $DIR/more_than_256_modules.rs:10:3 | 10 | Pallet256: pallet256::{}, diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs new file mode 100644 index 0000000000000..89774eb8a7702 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs @@ -0,0 +1,24 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl test_pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: test_pallet::{Pallet, Config}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr new file mode 100644 index 0000000000000..5bc831f58988b --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -0,0 +1,88 @@ +error: `Pallet` does not have the std feature enabled, this will cause the `test_pallet::GenesisConfig` type to be undefined. + --> $DIR/no_std_genesis_config.rs:13:1 + | +13 | / construct_runtime! { +14 | | pub enum Runtime where +15 | | Block = Block, +16 | | NodeBlock = Block, +... | +21 | | } +22 | | } + | |_^ + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/no_std_genesis_config.rs:19:11 + | +19 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/no_std_genesis_config.rs:13:1 + | +13 | / construct_runtime! { +14 | | pub enum Runtime where +15 | | Block = Block, +16 | | NodeBlock = Block, +... | +21 | | } +22 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/no_std_genesis_config.rs:13:1 + | +13 | / construct_runtime! { +14 | | pub enum Runtime where +15 | | Block = Block, +16 | | NodeBlock = Block, +... | +21 | | } +22 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use frame_support_test::Pallet; + | +1 | use frame_system::Pallet; + | +1 | use test_pallet::Pallet; + | + +error[E0412]: cannot find type `GenesisConfig` in crate `test_pallet` + --> $DIR/no_std_genesis_config.rs:13:1 + | +13 | / construct_runtime! { +14 | | pub enum Runtime where +15 | | Block = Block, +16 | | NodeBlock = Block, +... | +21 | | } +22 | | } + | |_^ not found in `test_pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this struct + | +1 | use frame_system::GenesisConfig; + | + +error[E0277]: the trait bound `Runtime: frame_system::pallet::Config` is not satisfied + --> $DIR/no_std_genesis_config.rs:11:6 + | +11 | impl test_pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^^^^^^ the trait `frame_system::pallet::Config` is not implemented for `Runtime` + | + ::: $WORKSPACE/frame/support/test/pallet/src/lib.rs + | + | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `Config` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs new file mode 100644 index 0000000000000..c5b9fcca1f318 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Call}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr new file mode 100644 index 0000000000000..8781fe0df201a --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -0,0 +1,73 @@ +error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove `Call` from construct_runtime? + --> $DIR/undefined_call_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_call_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_call_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_call_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::pallet::Pallet; + | +1 | use frame_support_test::Pallet; + | +1 | use frame_system::Pallet; + | +1 | use test_pallet::Pallet; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_call_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs new file mode 100644 index 0000000000000..6aec45f240c90 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Event}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr new file mode 100644 index 0000000000000..fa837698aa642 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -0,0 +1,111 @@ +error: `Pallet` does not have #[pallet::event] defined, perhaps you should remove `Event` from construct_runtime? + --> $DIR/undefined_event_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_event_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0412]: cannot find type `Event` in module `pallet` + --> $DIR/undefined_event_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::Event; + | + +error[E0412]: cannot find type `Event` in module `pallet` + --> $DIR/undefined_event_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::Event; + | +1 | use frame_system::Event; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_event_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_event_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::pallet::Pallet; + | +1 | use frame_support_test::Pallet; + | +1 | use frame_system::Pallet; + | +1 | use test_pallet::Pallet; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_event_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs new file mode 100644 index 0000000000000..5e08fd96fa1ad --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Config}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr new file mode 100644 index 0000000000000..699f66a414ed2 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -0,0 +1,91 @@ +error: `Pallet` does not have #[pallet::genesis_config] defined, perhaps you should remove `Config` from construct_runtime? + --> $DIR/undefined_genesis_config_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_genesis_config_part.rs:28:17 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_genesis_config_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_genesis_config_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::pallet::Pallet; + | +1 | use frame_support_test::Pallet; + | +1 | use frame_system::Pallet; + | +1 | use test_pallet::Pallet; + | + +error[E0412]: cannot find type `GenesisConfig` in module `pallet` + --> $DIR/undefined_genesis_config_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this struct + | +1 | use frame_system::GenesisConfig; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_genesis_config_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs new file mode 100644 index 0000000000000..06c36a30f5506 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Inherent}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr new file mode 100644 index 0000000000000..88ff9ee910937 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -0,0 +1,73 @@ +error: `Pallet` does not have #[pallet::inherent] defined, perhaps you should remove `Inherent` from construct_runtime? + --> $DIR/undefined_inherent_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_inherent_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_inherent_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_inherent_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::pallet::Pallet; + | +1 | use frame_support_test::Pallet; + | +1 | use frame_system::Pallet; + | +1 | use test_pallet::Pallet; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_inherent_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs new file mode 100644 index 0000000000000..bec5c27ec0346 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, Origin}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr new file mode 100644 index 0000000000000..3b3aa75c1ea08 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -0,0 +1,111 @@ +error: `Pallet` does not have #[pallet::origin] defined, perhaps you should remove `Origin` from construct_runtime? + --> $DIR/undefined_origin_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_origin_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_origin_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0412]: cannot find type `Origin` in module `pallet` + --> $DIR/undefined_origin_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this type alias + | +1 | use frame_system::Origin; + | + +error[E0412]: cannot find type `Origin` in module `pallet` + --> $DIR/undefined_origin_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `pallet` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::Origin; + | +1 | use frame_system::Origin; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_origin_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::pallet::Pallet; + | +1 | use frame_support_test::Pallet; + | +1 | use frame_system::Pallet; + | +1 | use test_pallet::Pallet; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_origin_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs new file mode 100644 index 0000000000000..816f52b91cccb --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet::{Pallet, ValidateUnsigned}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr new file mode 100644 index 0000000000000..ac12c56d5c279 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -0,0 +1,73 @@ +error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you should remove `ValidateUnsigned` from construct_runtime? + --> $DIR/undefined_validate_unsigned_part.rs:5:1 + | +5 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ +... +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_- in this macro invocation + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_validate_unsigned_part.rs:28:11 + | +28 | System: system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ use of undeclared crate or module `system` + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_validate_unsigned_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing this enum + | +1 | use frame_system::RawOrigin; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `system` + --> $DIR/undefined_validate_unsigned_part.rs:22:1 + | +22 | / construct_runtime! { +23 | | pub enum Runtime where +24 | | Block = Block, +25 | | NodeBlock = Block, +... | +30 | | } +31 | | } + | |_^ not found in `system` + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider importing one of these items + | +1 | use crate::pallet::Pallet; + | +1 | use frame_support_test::Pallet; + | +1 | use frame_system::Pallet; + | +1 | use test_pallet::Pallet; + | + +error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied + --> $DIR/undefined_validate_unsigned_part.rs:20:6 + | +8 | pub trait Config: frame_system::Config {} + | -------------------- required by this bound in `pallet::Config` +... +20 | impl pallet::Config for Runtime {} + | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` diff --git a/frame/support/test/tests/decl_module_ui.rs b/frame/support/test/tests/decl_module_ui.rs index 7df64bc52f412..2c097bb6e1332 100644 --- a/frame/support/test/tests/decl_module_ui.rs +++ b/frame/support/test/tests/decl_module_ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #[test] fn decl_module_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/decl_module_ui/*.rs"); diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs index 56eff29c5dc1b..cc7c1ff219d8b 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { fn integrity_test() {} fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 25f3b891d9b47..3bf5f58b43a39 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -2,7 +2,7 @@ error: `integrity_test` can only be passed once as input. --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} @@ -16,7 +16,7 @@ error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs index 3e1bc25c8d59c..ddde7c72c1cc5 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { fn on_initialize() -> Weight { 0 } diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index 34c5ff3f941a1..2911d7ded8a23 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -2,7 +2,7 @@ error: `on_initialize` can only be passed once as input. --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | @@ -16,7 +16,7 @@ error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::Origin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index 800ce459fed35..347a3130daa79 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,20 +21,22 @@ mod tests { use frame_support::metadata::*; use sp_io::TestExternalities; - use std::marker::PhantomData; - use codec::{Encode, Decode, EncodeLike}; frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } - pub trait Trait { - type Origin: Encode + Decode + EncodeLike + std::default::Default; - type BlockNumber; + pub trait Config: frame_support_test::Config { + type Origin2: codec::Codec + + codec::EncodeLike + + Default + + codec::MaxEncodedLen + + scale_info::TypeInfo; } frame_support::decl_storage! { - trait Store for Module as TestStorage { + generate_storage_info + trait Store for Module as TestStorage { // non-getters: pub / $default /// Hello, this is doc! @@ -45,7 +47,7 @@ mod tests { // getters: pub / $default // we need at least one type which uses T, otherwise GenesisConfig will complain. - GETU32 get(fn u32_getter): T::Origin; + GETU32 get(fn u32_getter): T::Origin2; pub PUBGETU32 get(fn pub_u32_getter): u32; GETU32WITHCONFIG get(fn u32_getter_with_config) config(): u32; pub PUBGETU32WITHCONFIG get(fn pub_u32_getter_with_config) config(): u32; @@ -60,23 +62,29 @@ mod tests { GetOptU32WithBuilderNone get(fn opt_u32_with_builder_none) build(|_| None): Option; // map non-getters: pub / $default - MAPU32: map hasher(blake2_128_concat) u32 => Option; - pub PUBMAPU32: map hasher(blake2_128_concat) u32 => Option; - MAPU32MYDEF: map hasher(blake2_128_concat) u32 => Option; - pub PUBMAPU32MYDEF: map hasher(blake2_128_concat) u32 => Option; + MAPU32 max_values(3): map hasher(blake2_128_concat) u32 => Option<[u8; 4]>; + pub PUBMAPU32: map hasher(blake2_128_concat) u32 => Option<[u8; 4]>; // map getters: pub / $default - GETMAPU32 get(fn map_u32_getter): map hasher(blake2_128_concat) u32 => String; - pub PUBGETMAPU32 get(fn pub_map_u32_getter): map hasher(blake2_128_concat) u32 => String; - + GETMAPU32 get(fn map_u32_getter): map hasher(blake2_128_concat) u32 => [u8; 4]; + pub PUBGETMAPU32 get(fn pub_map_u32_getter): map hasher(blake2_128_concat) u32 => [u8; 4]; GETMAPU32MYDEF get(fn map_u32_getter_mydef): - map hasher(blake2_128_concat) u32 => String = "map".into(); + map hasher(blake2_128_concat) u32 => [u8; 4] = *b"mapd"; pub PUBGETMAPU32MYDEF get(fn pub_map_u32_getter_mydef): - map hasher(blake2_128_concat) u32 => String = "pubmap".into(); + map hasher(blake2_128_concat) u32 => [u8; 4] = *b"pubm"; + + DOUBLEMAP max_values(3): double_map + hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Option<[u8; 4]>; + + DOUBLEMAP2: double_map + hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Option<[u8; 4]>; - COMPLEXTYPE1: ::std::vec::Vec<::Origin>; - COMPLEXTYPE2: (Vec)>>, u32); + COMPLEXTYPE1: (::std::option::Option,); + COMPLEXTYPE2: ([[(u16, Option<()>); 32]; 12], u32); COMPLEXTYPE3: [u32; 25]; + + NMAP: nmap hasher(blake2_128_concat) u32, hasher(twox_64_concat) u16 => u8; + NMAP2: nmap hasher(blake2_128_concat) u32 => u8; } add_extra_genesis { build(|_| {}); @@ -85,306 +93,498 @@ mod tests { struct TraitImpl {} - impl Trait for TraitImpl { + impl frame_support_test::Config for TraitImpl { type Origin = u32; type BlockNumber = u32; + type PalletInfo = frame_support_test::PanicPalletInfo; + type DbWeight = (); } - const EXPECTED_METADATA: StorageMetadata = StorageMetadata { - prefix: DecodeDifferent::Encode("TestStorage"), - entries: DecodeDifferent::Encode( - &[ + impl Config for TraitImpl { + type Origin2 = u32; + } + + fn expected_metadata() -> PalletStorageMetadata { + PalletStorageMetadata { + prefix: "TestStorage", + entries: vec![ StorageEntryMetadata { - name: DecodeDifferent::Encode("U32"), + name: "U32", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[ " Hello, this is doc!" ]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0], + docs: vec![" Hello, this is doc!"], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBU32"), + name: "PUBU32", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("U32MYDEF"), + name: "U32MYDEF", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBU32MYDEF"), + name: "PUBU32MYDEF", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32"), + name: "GETU32", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Origin")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32"), + name: "PUBGETU32", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32WITHCONFIG"), + name: "GETU32WITHCONFIG", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32WITHCONFIG(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIG"), + name: "PUBGETU32WITHCONFIG", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIG(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32MYDEF"), + name: "GETU32MYDEF", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32MYDEF"), + name: "PUBGETU32MYDEF", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![3, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32WITHCONFIGMYDEF"), + name: "GETU32WITHCONFIGMYDEF", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32WITHCONFIGMYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![2, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEF"), + name: "PUBGETU32WITHCONFIGMYDEF", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIGMYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![1, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEFOPT"), + name: "PUBGETU32WITHCONFIGMYDEFOPT", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIGMYDEFOPT(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GetU32WithBuilder"), + name: "GetU32WithBuilder", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetU32WithBuilder(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GetOptU32WithBuilderSome"), + name: "GetOptU32WithBuilderSome", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetOptU32WithBuilderSome(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GetOptU32WithBuilderNone"), + name: "GetOptU32WithBuilderNone", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetOptU32WithBuilderNone(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("MAPU32"), + name: "MAPU32", modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::<[u8; 4]>(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBMAPU32"), + name: "PUBMAPU32", modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::<[u8; 4]>(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("MAPU32MYDEF"), - modifier: StorageEntryModifier::Optional, + name: "GETMAPU32", + modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::<[u8; 4]>(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBMAPU32MYDEF"), - modifier: StorageEntryModifier::Optional, + name: "PUBGETMAPU32", + modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::<[u8; 4]>(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GETMAPU32"), + name: "GETMAPU32MYDEF", modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::<[u8; 4]>(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![109, 97, 112, 100], // "map" + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETMAPU32"), + name: "PUBGETMAPU32MYDEF", modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, + hashers: vec![StorageHasher::Blake2_128Concat], + key: scale_info::meta_type::(), + value: scale_info::meta_type::<[u8; 4]>(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![112, 117, 98, 109], // "pubmap" + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("GETMAPU32MYDEF"), - modifier: StorageEntryModifier::Default, + name: "DOUBLEMAP", + modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Blake2_128Concat, + ], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::<[u8; 4]>(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETMAPU32MYDEF"), - modifier: StorageEntryModifier::Default, + name: "DOUBLEMAP2", + modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Blake2_128Concat, + ], + key: scale_info::meta_type::<(u32, u32)>(), + value: scale_info::meta_type::<[u8; 4]>(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "COMPLEXTYPE1", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(scale_info::meta_type::<(Option,)>()), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "COMPLEXTYPE2", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(scale_info::meta_type::<( + [[(u16, Option<()>); 32]; 12], + u32, + )>()), + default: [0u8; 1156].to_vec(), + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE1"), + name: "COMPLEXTYPE3", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("::std::vec::Vec<::Origin>")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE1(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::<[u32; 25]>()), + default: [0u8; 100].to_vec(), + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE2"), + name: "NMAP", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("(Vec)>>, u32)")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE2(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Map { + key: scale_info::meta_type::<(u32, u16)>(), + hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + value: scale_info::meta_type::(), + }, + default: vec![0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE3"), + name: "NMAP2", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("[u32; 25]")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE3(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Map { + key: scale_info::meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + value: scale_info::meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + ], + } + } + + #[test] + fn storage_info() { + use frame_support::{ + storage::storage_prefix as prefix, + traits::{StorageInfo, StorageInfoTrait}, + }; + + pretty_assertions::assert_eq!( + >::storage_info(), + vec![ + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"U32".to_vec(), + prefix: prefix(b"TestStorage", b"U32").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBU32".to_vec(), + prefix: prefix(b"TestStorage", b"PUBU32").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"U32MYDEF".to_vec(), + prefix: prefix(b"TestStorage", b"U32MYDEF").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBU32MYDEF".to_vec(), + prefix: prefix(b"TestStorage", b"PUBU32MYDEF").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"GETU32".to_vec(), + prefix: prefix(b"TestStorage", b"GETU32").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBGETU32".to_vec(), + prefix: prefix(b"TestStorage", b"PUBGETU32").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"GETU32WITHCONFIG".to_vec(), + prefix: prefix(b"TestStorage", b"GETU32WITHCONFIG").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBGETU32WITHCONFIG".to_vec(), + prefix: prefix(b"TestStorage", b"PUBGETU32WITHCONFIG").to_vec(), + max_values: Some(1), + max_size: Some(4), }, - ] - ), - }; + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"GETU32MYDEF".to_vec(), + prefix: prefix(b"TestStorage", b"GETU32MYDEF").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBGETU32MYDEF".to_vec(), + prefix: prefix(b"TestStorage", b"PUBGETU32MYDEF").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"GETU32WITHCONFIGMYDEF".to_vec(), + prefix: prefix(b"TestStorage", b"GETU32WITHCONFIGMYDEF").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBGETU32WITHCONFIGMYDEF".to_vec(), + prefix: prefix(b"TestStorage", b"PUBGETU32WITHCONFIGMYDEF").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBGETU32WITHCONFIGMYDEFOPT".to_vec(), + prefix: prefix(b"TestStorage", b"PUBGETU32WITHCONFIGMYDEFOPT").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"GetU32WithBuilder".to_vec(), + prefix: prefix(b"TestStorage", b"GetU32WithBuilder").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"GetOptU32WithBuilderSome".to_vec(), + prefix: prefix(b"TestStorage", b"GetOptU32WithBuilderSome").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"GetOptU32WithBuilderNone".to_vec(), + prefix: prefix(b"TestStorage", b"GetOptU32WithBuilderNone").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"MAPU32".to_vec(), + prefix: prefix(b"TestStorage", b"MAPU32").to_vec(), + max_values: Some(3), + max_size: Some(8 + 16), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBMAPU32".to_vec(), + prefix: prefix(b"TestStorage", b"PUBMAPU32").to_vec(), + max_values: None, + max_size: Some(8 + 16), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"GETMAPU32".to_vec(), + prefix: prefix(b"TestStorage", b"GETMAPU32").to_vec(), + max_values: None, + max_size: Some(8 + 16), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBGETMAPU32".to_vec(), + prefix: prefix(b"TestStorage", b"PUBGETMAPU32").to_vec(), + max_values: None, + max_size: Some(8 + 16), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"GETMAPU32MYDEF".to_vec(), + prefix: prefix(b"TestStorage", b"GETMAPU32MYDEF").to_vec(), + max_values: None, + max_size: Some(8 + 16), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PUBGETMAPU32MYDEF".to_vec(), + prefix: prefix(b"TestStorage", b"PUBGETMAPU32MYDEF").to_vec(), + max_values: None, + max_size: Some(8 + 16), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"DOUBLEMAP".to_vec(), + prefix: prefix(b"TestStorage", b"DOUBLEMAP").to_vec(), + max_values: Some(3), + max_size: Some(12 + 16 + 16), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"DOUBLEMAP2".to_vec(), + prefix: prefix(b"TestStorage", b"DOUBLEMAP2").to_vec(), + max_values: None, + max_size: Some(12 + 16 + 16), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"COMPLEXTYPE1".to_vec(), + prefix: prefix(b"TestStorage", b"COMPLEXTYPE1").to_vec(), + max_values: Some(1), + max_size: Some(5), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"COMPLEXTYPE2".to_vec(), + prefix: prefix(b"TestStorage", b"COMPLEXTYPE2").to_vec(), + max_values: Some(1), + max_size: Some(1156), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"COMPLEXTYPE3".to_vec(), + prefix: prefix(b"TestStorage", b"COMPLEXTYPE3").to_vec(), + max_values: Some(1), + max_size: Some(100), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"NMAP".to_vec(), + prefix: prefix(b"TestStorage", b"NMAP").to_vec(), + max_values: None, + max_size: Some(16 + 4 + 8 + 2 + 1), + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"NMAP2".to_vec(), + prefix: prefix(b"TestStorage", b"NMAP2").to_vec(), + max_values: None, + max_size: Some(16 + 4 + 1), + }, + ], + ); + } #[test] fn store_metadata() { let metadata = Module::::storage_metadata(); - pretty_assertions::assert_eq!(EXPECTED_METADATA, metadata); + pretty_assertions::assert_eq!(expected_metadata(), metadata); } #[test] @@ -414,19 +614,16 @@ mod tests { #[cfg(test)] #[allow(dead_code)] mod test2 { - pub trait Trait { - type Origin; - type BlockNumber; - } + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } type PairOf = (T, T); frame_support::decl_storage! { - trait Store for Module as TestStorage { + trait Store for Module as TestStorage { SingleDef : u32; PairDef : PairOf; Single : Option; @@ -441,24 +638,67 @@ mod test2 { struct TraitImpl {} - impl Trait for TraitImpl { + impl frame_support_test::Config for TraitImpl { type Origin = u32; type BlockNumber = u32; + type PalletInfo = frame_support_test::PanicPalletInfo; + type DbWeight = (); + } + + impl Config for TraitImpl {} + + #[test] + fn storage_info() { + use frame_support::{ + storage::storage_prefix as prefix, + traits::{StorageInfo, StorageInfoTrait}, + }; + pretty_assertions::assert_eq!( + >::storage_info(), + vec![ + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"SingleDef".to_vec(), + prefix: prefix(b"TestStorage", b"SingleDef").to_vec(), + max_values: Some(1), + max_size: None, + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"PairDef".to_vec(), + prefix: prefix(b"TestStorage", b"PairDef").to_vec(), + max_values: Some(1), + max_size: None, + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"Single".to_vec(), + prefix: prefix(b"TestStorage", b"Single").to_vec(), + max_values: Some(1), + max_size: None, + }, + StorageInfo { + pallet_name: b"TestStorage".to_vec(), + storage_name: b"Pair".to_vec(), + prefix: prefix(b"TestStorage", b"Pair").to_vec(), + max_values: Some(1), + max_size: None, + }, + ], + ); } } #[cfg(test)] #[allow(dead_code)] mod test3 { - pub trait Trait { - type Origin; - type BlockNumber; - } + pub trait Config: frame_support_test::Config {} + frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { Foo get(fn foo) config(initial_foo): u32; } } @@ -467,32 +707,33 @@ mod test3 { struct TraitImpl {} - impl Trait for TraitImpl { + impl frame_support_test::Config for TraitImpl { type Origin = u32; type BlockNumber = u32; + type PalletInfo = frame_support_test::PanicPalletInfo; + type DbWeight = (); } + + impl Config for TraitImpl {} } #[cfg(test)] #[allow(dead_code)] mod test_append_and_len { + use codec::{Decode, Encode}; use sp_io::TestExternalities; - use codec::{Encode, Decode}; - pub trait Trait { - type Origin; - type BlockNumber; - } + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } - #[derive(PartialEq, Eq, Clone, Encode, Decode)] + #[derive(PartialEq, Eq, Clone, Encode, Decode, scale_info::TypeInfo)] struct NoDef(u32); frame_support::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { NoDefault: Option; JustVec: Vec; @@ -511,11 +752,15 @@ mod test_append_and_len { struct Test {} - impl Trait for Test { + impl frame_support_test::Config for Test { type Origin = u32; type BlockNumber = u32; + type PalletInfo = frame_support_test::PanicPalletInfo; + type DbWeight = (); } + impl Config for Test {} + #[test] fn default_for_option() { TestExternalities::default().execute_with(|| { diff --git a/frame/support/test/tests/decl_storage_ui.rs b/frame/support/test/tests/decl_storage_ui.rs index 56529d62c28ff..99d2da87aca28 100644 --- a/frame/support/test/tests/decl_storage_ui.rs +++ b/frame/support/test/tests/decl_storage_ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #[test] fn decl_storage_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/decl_storage_ui/*.rs"); diff --git a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs index f4f4ad7d48a97..17f80c8c84755 100644 --- a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait { - type Origin; - type BlockNumber: codec::Codec + codec::EncodeLike + Default + Clone; -} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value config(value): u32; pub Value2 config(value): u32; } diff --git a/frame/support/test/tests/decl_storage_ui/config_duplicate.stderr b/frame/support/test/tests/decl_storage_ui/config_duplicate.stderr index 61f7c0bbe64a5..f6303f277b56b 100644 --- a/frame/support/test/tests/decl_storage_ui/config_duplicate.stderr +++ b/frame/support/test/tests/decl_storage_ui/config_duplicate.stderr @@ -1,5 +1,5 @@ error: `config()`/`get()` with the same name already defined. - --> $DIR/config_duplicate.rs:30:21 + --> $DIR/config_duplicate.rs:27:21 | -30 | pub Value2 config(value): u32; +27 | pub Value2 config(value): u32; | ^^^^^ diff --git a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs index 3caa2d9c33608..fec6aeb64cec4 100644 --- a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait { - type Origin; - type BlockNumber: codec::Codec + codec::EncodeLike + Default + Clone; -} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value get(fn value) config(): u32; pub Value2 config(value): u32; } diff --git a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.stderr b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.stderr index 02e7d41080339..9377b718c0660 100644 --- a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.stderr +++ b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.stderr @@ -1,5 +1,5 @@ error: `config()`/`get()` with the same name already defined. - --> $DIR/config_get_duplicate.rs:30:21 + --> $DIR/config_get_duplicate.rs:27:21 | -30 | pub Value2 config(value): u32; +27 | pub Value2 config(value): u32; | ^^^^^ diff --git a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs index 1c24b3bf28eec..13c57a638bb18 100644 --- a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait { - type Origin; - type BlockNumber: codec::Codec + codec::EncodeLike + Default + Clone; -} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + trait Store for Module as FinalKeysNone { pub Value get(fn value) config(): u32; pub Value2 get(fn value) config(): u32; } diff --git a/frame/support/test/tests/decl_storage_ui/get_duplicate.stderr b/frame/support/test/tests/decl_storage_ui/get_duplicate.stderr index d9ce420a6f214..0039b10fb43b6 100644 --- a/frame/support/test/tests/decl_storage_ui/get_duplicate.stderr +++ b/frame/support/test/tests/decl_storage_ui/get_duplicate.stderr @@ -1,5 +1,5 @@ error: `config()`/`get()` with the same name already defined. - --> $DIR/get_duplicate.rs:30:21 + --> $DIR/get_duplicate.rs:27:21 | -30 | pub Value2 get(fn value) config(): u32; +27 | pub Value2 get(fn value) config(): u32; | ^^^^^ diff --git a/frame/support/test/tests/derive_no_bound.rs b/frame/support/test/tests/derive_no_bound.rs index 29f813c6498bb..1827844664fa7 100644 --- a/frame/support/test/tests/derive_no_bound.rs +++ b/frame/support/test/tests/derive_no_bound.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Tests for DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, and RuntimeDebugNoBound +//! Tests for DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound, and +//! RuntimeDebugNoBound -use frame_support::{DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; +use frame_support::{ + CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, +}; #[derive(RuntimeDebugNoBound)] struct Unnamed(u64); @@ -28,19 +31,19 @@ fn runtime_debug_no_bound_display_correctly() { assert_eq!(format!("{:?}", Unnamed(1)), "Unnamed(1)"); } -trait Trait { - type C: std::fmt::Debug + Clone + Eq + PartialEq; +trait Config { + type C: std::fmt::Debug + Clone + Eq + PartialEq + Default; } struct Runtime; struct ImplNone; -impl Trait for Runtime { +impl Config for Runtime { type C = u32; } -#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] -struct StructNamed { +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] +struct StructNamed { a: u32, b: u64, c: T::C, @@ -56,6 +59,12 @@ fn test_struct_named() { phantom: Default::default(), }; + let a_default: StructNamed = Default::default(); + assert_eq!(a_default.a, 0); + assert_eq!(a_default.b, 0); + assert_eq!(a_default.c, 0); + assert_eq!(a_default.phantom, Default::default()); + let a_2 = a_1.clone(); assert_eq!(a_2.a, 1); assert_eq!(a_2.b, 2); @@ -76,54 +85,60 @@ fn test_struct_named() { assert!(b != a_1); } -#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] -struct StructUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>); +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] +struct StructUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>); #[test] fn test_struct_unnamed() { - let a_1 = StructUnnamed::( - 1, - 2, - 3, - Default::default(), - ); + let a_1 = StructUnnamed::(1, 2, 3, Default::default()); + + let a_default: StructUnnamed = Default::default(); + assert_eq!(a_default.0, 0); + assert_eq!(a_default.1, 0); + assert_eq!(a_default.2, 0); + assert_eq!(a_default.3, Default::default()); let a_2 = a_1.clone(); assert_eq!(a_2.0, 1); assert_eq!(a_2.1, 2); assert_eq!(a_2.2, 3); assert_eq!(a_2, a_1); - assert_eq!( - format!("{:?}", a_1), - String::from("StructUnnamed(1, 2, 3, PhantomData)") - ); + assert_eq!(format!("{:?}", a_1), String::from("StructUnnamed(1, 2, 3, PhantomData)")); - let b = StructUnnamed::( - 1, - 2, - 4, - Default::default(), - ); + let b = StructUnnamed::(1, 2, 4, Default::default()); assert!(b != a_1); } -#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] -enum Enum { +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] +enum Enum { VariantUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>), - VariantNamed { - a: u32, - b: u64, - c: T::C, - phantom: core::marker::PhantomData<(U, V)>, - }, + VariantNamed { a: u32, b: u64, c: T::C, phantom: core::marker::PhantomData<(U, V)> }, + VariantUnit, + VariantUnit2, +} + +// enum that will have a named default. +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] +enum Enum2 { + VariantNamed { a: u32, b: u64, c: T::C }, + VariantUnnamed(u32, u64, T::C), VariantUnit, VariantUnit2, } +// enum that will have a unit default. +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] +enum Enum3 { + VariantUnit, + VariantNamed { a: u32, b: u64, c: T::C }, + VariantUnnamed(u32, u64, T::C), + VariantUnit2, +} + #[test] fn test_enum() { - type TestEnum = Enum::; + type TestEnum = Enum; let variant_0 = TestEnum::VariantUnnamed(1, 2, 3, Default::default()); let variant_0_bis = TestEnum::VariantUnnamed(1, 2, 4, Default::default()); let variant_1 = TestEnum::VariantNamed { a: 1, b: 2, c: 3, phantom: Default::default() }; @@ -131,6 +146,16 @@ fn test_enum() { let variant_2 = TestEnum::VariantUnit; let variant_3 = TestEnum::VariantUnit2; + let default: TestEnum = Default::default(); + assert_eq!( + default, + // first variant is default. + TestEnum::VariantUnnamed(0, 0, 0, Default::default()) + ); + + assert_eq!(Enum2::::default(), Enum2::::VariantNamed { a: 0, b: 0, c: 0 }); + assert_eq!(Enum3::::default(), Enum3::::VariantUnit); + assert!(variant_0 != variant_0_bis); assert!(variant_1 != variant_1_bis); assert!(variant_0 != variant_1); @@ -159,12 +184,6 @@ fn test_enum() { format!("{:?}", variant_1), String::from("Enum::VariantNamed { a: 1, b: 2, c: 3, phantom: PhantomData }"), ); - assert_eq!( - format!("{:?}", variant_2), - String::from("Enum::VariantUnit"), - ); - assert_eq!( - format!("{:?}", variant_3), - String::from("Enum::VariantUnit2"), - ); + assert_eq!(format!("{:?}", variant_2), String::from("Enum::VariantUnit")); + assert_eq!(format!("{:?}", variant_3), String::from("Enum::VariantUnit2")); } diff --git a/frame/support/test/tests/derive_no_bound_ui.rs b/frame/support/test/tests/derive_no_bound_ui.rs index da276018f7f8e..434671e19b105 100644 --- a/frame/support/test/tests/derive_no_bound_ui.rs +++ b/frame/support/test/tests/derive_no_bound_ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #[test] fn derive_no_bound_ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/derive_no_bound_ui/*.rs"); diff --git a/frame/support/test/tests/derive_no_bound_ui/clone.rs b/frame/support/test/tests/derive_no_bound_ui/clone.rs index 6b80dcedc3880..2bc1cc492d171 100644 --- a/frame/support/test/tests/derive_no_bound_ui/clone.rs +++ b/frame/support/test/tests/derive_no_bound_ui/clone.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::CloneNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/clone.stderr b/frame/support/test/tests/derive_no_bound_ui/clone.stderr index 4b9cccf0b0fa1..4b253ad12451b 100644 --- a/frame/support/test/tests/derive_no_bound_ui/clone.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/clone.stderr @@ -1,7 +1,7 @@ -error[E0277]: the trait bound `::C: std::clone::Clone` is not satisfied +error[E0277]: the trait bound `::C: Clone` is not satisfied --> $DIR/clone.rs:7:2 | 7 | c: T::C, - | ^ the trait `std::clone::Clone` is not implemented for `::C` + | ^ the trait `Clone` is not implemented for `::C` | - = note: required by `std::clone::Clone::clone` + = note: required by `clone` diff --git a/frame/support/test/tests/derive_no_bound_ui/debug.rs b/frame/support/test/tests/derive_no_bound_ui/debug.rs index f2411da4b41bc..6016c3e6d98b8 100644 --- a/frame/support/test/tests/derive_no_bound_ui/debug.rs +++ b/frame/support/test/tests/derive_no_bound_ui/debug.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::DebugNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/debug.stderr b/frame/support/test/tests/derive_no_bound_ui/debug.stderr index 838bd7f68a65f..7580cab2ea0b3 100644 --- a/frame/support/test/tests/derive_no_bound_ui/debug.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/debug.stderr @@ -1,8 +1,8 @@ -error[E0277]: `::C` doesn't implement `std::fmt::Debug` +error[E0277]: `::C` doesn't implement `std::fmt::Debug` --> $DIR/debug.rs:7:2 | 7 | c: T::C, - | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = help: the trait `std::fmt::Debug` is not implemented for `::C` + = help: the trait `std::fmt::Debug` is not implemented for `::C` = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/derive_no_bound_ui/default.rs b/frame/support/test/tests/derive_no_bound_ui/default.rs new file mode 100644 index 0000000000000..0780a88e6753d --- /dev/null +++ b/frame/support/test/tests/derive_no_bound_ui/default.rs @@ -0,0 +1,10 @@ +trait Config { + type C; +} + +#[derive(frame_support::DefaultNoBound)] +struct Foo { + c: T::C, +} + +fn main() {} diff --git a/frame/support/test/tests/derive_no_bound_ui/default.stderr b/frame/support/test/tests/derive_no_bound_ui/default.stderr new file mode 100644 index 0000000000000..d58b5e9185268 --- /dev/null +++ b/frame/support/test/tests/derive_no_bound_ui/default.stderr @@ -0,0 +1,7 @@ +error[E0277]: the trait bound `::C: std::default::Default` is not satisfied + --> $DIR/default.rs:7:2 + | +7 | c: T::C, + | ^ the trait `std::default::Default` is not implemented for `::C` + | + = note: required by `std::default::Default::default` diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.rs b/frame/support/test/tests/derive_no_bound_ui/eq.rs index 9e4026734fbeb..a48452626368c 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.rs +++ b/frame/support/test/tests/derive_no_bound_ui/eq.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::EqNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.stderr b/frame/support/test/tests/derive_no_bound_ui/eq.stderr index 08341c4d65ab5..fce13d6f17f06 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/eq.stderr @@ -1,7 +1,12 @@ error[E0277]: can't compare `Foo` with `Foo` --> $DIR/eq.rs:6:8 | -6 | struct Foo { +6 | struct Foo { | ^^^ no implementation for `Foo == Foo` | - = help: the trait `std::cmp::PartialEq` is not implemented for `Foo` + ::: $RUST/core/src/cmp.rs + | + | pub trait Eq: PartialEq { + | --------------- required by this bound in `std::cmp::Eq` + | + = help: the trait `PartialEq` is not implemented for `Foo` diff --git a/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs b/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs index 1720776a40029..7bd6b7ef6a2e3 100644 --- a/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs +++ b/frame/support/test/tests/derive_no_bound_ui/partial_eq.rs @@ -1,9 +1,9 @@ -trait Trait { +trait Config { type C; } #[derive(frame_support::PartialEqNoBound)] -struct Foo { +struct Foo { c: T::C, } diff --git a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr index d85757c520aa1..64f844e547be0 100644 --- a/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/partial_eq.stderr @@ -1,7 +1,7 @@ -error[E0369]: binary operation `==` cannot be applied to type `::C` +error[E0369]: binary operation `==` cannot be applied to type `::C` --> $DIR/partial_eq.rs:7:2 | 7 | c: T::C, | ^ | - = note: the trait `std::cmp::PartialEq` is not implemented for `::C` + = note: the trait `std::cmp::PartialEq` is not implemented for `::C` diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index a9f0cdc8f184b..e89f961d893f5 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,25 +15,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::storage::unhashed; use codec::Encode; -use frame_support::{StorageDoubleMap, StorageMap, StorageValue, StoragePrefixedMap}; -use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; +use frame_support::{ + storage::unhashed, StorageDoubleMap, StorageMap, StoragePrefixedMap, StorageValue, +}; +use sp_io::{ + hashing::{blake2_128, twox_128, twox_64}, + TestExternalities, +}; mod no_instance { - use codec::{Encode, Decode, EncodeLike}; - - pub trait Trait { - type Origin; - type BlockNumber: Encode + Decode + EncodeLike + Default + Clone; - } + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } - frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { + frame_support::decl_storage! { + trait Store for Module as FinalKeysNone { pub Value config(value): u32; pub Map: map hasher(blake2_128_concat) u32 => u32; @@ -50,17 +49,15 @@ mod no_instance { } mod instance { - use super::no_instance; - - pub trait Trait: super::no_instance::Trait {} + pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module, I: Instance = DefaultInstance> - for enum Call where origin: T::Origin, system=no_instance {} + pub struct Module, I: Instance = DefaultInstance> + for enum Call where origin: T::Origin, system=frame_support_test {} } - frame_support::decl_storage!{ - trait Store for Module, I: Instance = DefaultInstance> + frame_support::decl_storage! { + trait Store for Module, I: Instance = DefaultInstance> as FinalKeysSome { pub Value config(value): u32; diff --git a/frame/support/test/tests/genesisconfig.rs b/frame/support/test/tests/genesisconfig.rs index af8b393800cf9..d488e8bfbfaff 100644 --- a/frame/support/test/tests/genesisconfig.rs +++ b/frame/support/test/tests/genesisconfig.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,31 +15,30 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub trait Trait { - type BlockNumber: codec::Codec + codec::EncodeLike + Default; - type Origin; -} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } frame_support::decl_storage! { - trait Store for Module as Test { + trait Store for Module as Test { pub AppendableDM config(t): double_map hasher(identity) u32, hasher(identity) T::BlockNumber => Vec; } } struct Test; -impl Trait for Test { +impl frame_support_test::Config for Test { type BlockNumber = u32; type Origin = (); + type PalletInfo = frame_support_test::PanicPalletInfo; + type DbWeight = (); } +impl Config for Test {} + #[test] fn init_genesis_config() { - GenesisConfig:: { - t: Default::default(), - }; + GenesisConfig:: { t: Default::default() }; } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index e1766082dd806..809edae14f80c 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,20 +15,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![recursion_limit="128"] +#![recursion_limit = "128"] -use codec::{Codec, EncodeLike, Encode, Decode}; -use sp_runtime::{generic, BuildStorage, traits::{BlakeTwo256, Block as _, Verify}}; +use codec::{Codec, Decode, Encode, EncodeLike}; use frame_support::{ - Parameter, traits::Get, parameter_types, + inherent::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}, metadata::{ - DecodeDifferent, StorageMetadata, StorageEntryModifier, StorageEntryType, DefaultByteGetter, - StorageEntryMetadata, StorageHasher, + PalletStorageMetadata, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, + StorageHasher, }, - StorageValue, StorageMap, StorageDoubleMap, + parameter_types, + traits::Get, + Parameter, StorageDoubleMap, StorageMap, StorageValue, +}; +use scale_info::TypeInfo; +use sp_core::{sr25519, H256}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, Verify}, + BuildStorage, }; -use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier, MakeFatalError}; -use sp_core::{H256, sr25519}; mod system; @@ -39,17 +45,21 @@ pub trait Currency {} // * Origin, Inherent, Event mod module1 { use super::*; + use sp_std::ops::Add; - pub trait Trait: system::Trait where ::BlockNumber: From { - type Event: From> + Into<::Event>; + pub trait Config: system::Config + where + ::BlockNumber: From, + { + type Event: From> + Into<::Event>; type Origin: From>; type SomeParameter: Get; - type GenericType: Default + Clone + Codec + EncodeLike; + type GenericType: Default + Clone + Codec + EncodeLike + TypeInfo; } frame_support::decl_module! { - pub struct Module, I: Instance> for enum Call where - origin: ::Origin, + pub struct Module, I: Instance> for enum Call where + origin: ::Origin, system = system, T::BlockNumber: From { @@ -66,7 +76,7 @@ mod module1 { } frame_support::decl_storage! { - trait Store for Module, I: Instance> as Module1 where + trait Store for Module, I: Instance> as Module1 where T::BlockNumber: From + std::fmt::Display { pub Value config(value): T::GenericType; @@ -82,7 +92,11 @@ mod module1 { } frame_support::decl_error! { - pub enum Error for Module, I: Instance> where T::BlockNumber: From { + pub enum Error for Module, I: Instance> where + T::BlockNumber: From, + T::BlockNumber: Add, + T::AccountId: AsRef<[u8]>, + { /// Test Test, } @@ -95,26 +109,37 @@ mod module1 { } } - #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] - pub enum Origin, I> where T::BlockNumber: From { + #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode, TypeInfo)] + pub enum Origin, I> + where + T::BlockNumber: From, + { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), } pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - impl, I: Instance> ProvideInherent for Module where - T::BlockNumber: From + impl, I: Instance> ProvideInherent for Module + where + T::BlockNumber: From, { type Call = Call; - type Error = MakeFatalError; + type Error = MakeFatalError<()>; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; fn create_inherent(_data: &InherentData) -> Option { unimplemented!(); } - fn check_inherent(_: &Self::Call, _: &InherentData) -> std::result::Result<(), Self::Error> { + fn check_inherent( + _: &Self::Call, + _: &InherentData, + ) -> std::result::Result<(), Self::Error> { + unimplemented!(); + } + + fn is_inherent(_call: &Self::Call) -> bool { unimplemented!(); } } @@ -126,17 +151,17 @@ mod module1 { mod module2 { use super::*; - pub trait Trait: system::Trait { + pub trait Config: system::Config { type Amount: Parameter + Default; - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; type Origin: From>; } - impl, I: Instance> Currency for Module {} + impl, I: Instance> Currency for Module {} frame_support::decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where - origin: ::Origin, + pub struct Module, I: Instance=DefaultInstance> for enum Call where + origin: ::Origin, system = system { fn deposit_event() = default; @@ -144,7 +169,7 @@ mod module2 { } frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module2 { + trait Store for Module, I: Instance=DefaultInstance> as Module2 { pub Value config(value): T::Amount; pub Map config(map): map hasher(identity) u64 => u64; pub DoubleMap config(double_map): double_map hasher(identity) u64, hasher(identity) u64 => u64; @@ -152,29 +177,36 @@ mod module2 { } frame_support::decl_event! { - pub enum Event where Amount = >::Amount { + pub enum Event where Amount = >::Amount { Variant(Amount), } } - #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] - pub enum Origin, I=DefaultInstance> { + #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode, TypeInfo)] + pub enum Origin, I = DefaultInstance> { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), } pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - impl, I: Instance> ProvideInherent for Module { + impl, I: Instance> ProvideInherent for Module { type Call = Call; - type Error = MakeFatalError; + type Error = MakeFatalError<()>; const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; fn create_inherent(_data: &InherentData) -> Option { unimplemented!(); } - fn check_inherent(_call: &Self::Call, _data: &InherentData) -> std::result::Result<(), Self::Error> { + fn check_inherent( + _call: &Self::Call, + _data: &InherentData, + ) -> std::result::Result<(), Self::Error> { + unimplemented!(); + } + + fn is_inherent(_call: &Self::Call) -> bool { unimplemented!(); } } @@ -185,13 +217,15 @@ mod module2 { mod module3 { use super::*; - pub trait Trait: module2::Trait + module2::Trait + system::Trait { + pub trait Config: + module2::Config + module2::Config + system::Config + { type Currency: Currency; type Currency2: Currency; } frame_support::decl_module! { - pub struct Module for enum Call where origin: ::Origin, system=system {} + pub struct Module for enum Call where origin: ::Origin, system=system {} } } @@ -199,39 +233,39 @@ parameter_types! { pub const SomeValue: u32 = 100; } -impl module1::Trait for Runtime { +impl module1::Config for Runtime { type Event = Event; type Origin = Origin; type SomeParameter = SomeValue; type GenericType = u32; } -impl module1::Trait for Runtime { +impl module1::Config for Runtime { type Event = Event; type Origin = Origin; type SomeParameter = SomeValue; type GenericType = u32; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u16; type Event = Event; type Origin = Origin; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u32; type Event = Event; type Origin = Origin; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u32; type Event = Event; type Origin = Origin; } -impl module2::Trait for Runtime { +impl module2::Config for Runtime { type Amount = u64; type Event = Event; type Origin = Origin; } -impl module3::Trait for Runtime { +impl module3::Config for Runtime { type Currency = Module2_2; type Currency2 = Module2_3; } @@ -241,15 +275,16 @@ pub type AccountId = ::Signer; pub type BlockNumber = u64; pub type Index = u64; -impl system::Trait for Runtime { - type BaseCallFilter= (); +impl system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; type Hash = H256; type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; type Event = Event; - type PalletInfo = (); + type PalletInfo = PalletInfo; type Call = Call; + type DbWeight = (); } frame_support::construct_runtime!( @@ -258,24 +293,24 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, + System: system::{Pallet, Call, Event}, Module1_1: module1::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, Module1_2: module1::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, - Module2: module2::{Module, Call, Storage, Event, Config, Origin, Inherent}, + Module2: module2::{Pallet, Call, Storage, Event, Config, Origin, Inherent}, Module2_1: module2::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, Module2_2: module2::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, Module2_3: module2::::{ - Module, Call, Storage, Event, Config, Origin, Inherent + Pallet, Call, Storage, Event, Config, Origin, Inherent }, - Module3: module3::{Module, Call}, + Module3: module3::{Pallet, Call}, } ); @@ -284,35 +319,32 @@ pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; fn new_test_ext() -> sp_io::TestExternalities { - GenesisConfig{ - module1_Instance1: Some(module1::GenesisConfig { - value: 3, - test: 2, - }), - module1_Instance2: Some(module1::GenesisConfig { - value: 4, - test: 5, - }), - module2: Some(module2::GenesisConfig { + GenesisConfig { + module_1_1: module1::GenesisConfig { value: 3, test: 2 }, + module_1_2: module1::GenesisConfig { value: 4, test: 5 }, + module_2: module2::GenesisConfig { value: 4, map: vec![(0, 0)], double_map: vec![(0, 0, 0)], - }), - module2_Instance1: Some(module2::GenesisConfig { + }, + module_2_1: module2::GenesisConfig { value: 4, map: vec![(0, 0)], double_map: vec![(0, 0, 0)], - }), - module2_Instance2: None, - module2_Instance3: None, - }.build_storage().unwrap().into() + }, + module_2_2: Default::default(), + module_2_3: Default::default(), + } + .build_storage() + .unwrap() + .into() } #[test] fn storage_instance_independence() { let mut storage = sp_core::storage::Storage { top: std::collections::BTreeMap::new(), - children_default: std::collections::HashMap::new() + children_default: std::collections::HashMap::new(), }; sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); @@ -345,7 +377,7 @@ fn storage_with_instance_basic_operation() { assert_eq!(Value::get(), 1); assert_eq!(Value::take(), 1); assert_eq!(Value::get(), 0); - Value::mutate(|a| *a=2); + Value::mutate(|a| *a = 2); assert_eq!(Value::get(), 2); Value::kill(); assert_eq!(Value::exists(), false); @@ -358,7 +390,7 @@ fn storage_with_instance_basic_operation() { assert_eq!(Map::get(key), 1); assert_eq!(Map::take(key), 1); assert_eq!(Map::get(key), 0); - Map::mutate(key, |a| *a=2); + Map::mutate(key, |a| *a = 2); assert_eq!(Map::get(key), 2); Map::remove(key); assert_eq!(Map::contains_key(key), false); @@ -372,73 +404,52 @@ fn storage_with_instance_basic_operation() { assert_eq!(DoubleMap::get(&key1, &key2), 1); assert_eq!(DoubleMap::take(&key1, &key2), 1); assert_eq!(DoubleMap::get(&key1, &key2), 0); - DoubleMap::mutate(&key1, &key2, |a| *a=2); + DoubleMap::mutate(&key1, &key2, |a| *a = 2); assert_eq!(DoubleMap::get(&key1, &key2), 2); DoubleMap::remove(&key1, &key2); assert_eq!(DoubleMap::get(&key1, &key2), 0); }); } -const EXPECTED_METADATA: StorageMetadata = StorageMetadata { - prefix: DecodeDifferent::Encode("Instance2Module2"), - entries: DecodeDifferent::Encode( - &[ +fn expected_metadata() -> PalletStorageMetadata { + PalletStorageMetadata { + prefix: "Instance2Module2", + entries: vec![ StorageEntryMetadata { - name: DecodeDifferent::Encode("Value"), + name: "Value", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Amount")), - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructValue( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("Map"), + name: "Map", modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Identity, - key: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode("u64"), - unused: false, + hashers: vec![StorageHasher::Identity], + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructMap( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), + default: [0u8; 8].to_vec(), + docs: vec![], }, StorageEntryMetadata { - name: DecodeDifferent::Encode("DoubleMap"), + name: "DoubleMap", modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap { - hasher: StorageHasher::Identity, - key2_hasher: StorageHasher::Identity, - key1: DecodeDifferent::Encode("u64"), - key2: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode("u64"), + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Identity, StorageHasher::Identity], + key: scale_info::meta_type::<(u64, u64)>(), + value: scale_info::meta_type::(), }, - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructDoubleMap( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), - } - ] - ) -}; + default: [0u8; 8].to_vec(), + docs: vec![], + }, + ], + } +} #[test] fn test_instance_storage_metadata() { let metadata = Module2_2::storage_metadata(); - pretty_assertions::assert_eq!(EXPECTED_METADATA, metadata); + pretty_assertions::assert_eq!(expected_metadata(), metadata); } diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index 34310c2f5876f..68ad2a50a21bc 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,31 +15,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::sp_runtime::generic; -use frame_support::sp_runtime::traits::{BlakeTwo256, Block as _, Verify}; -use frame_support::codec::{Encode, Decode}; -use sp_core::{H256, sr25519}; -use serde::{Serialize, Deserialize}; +use frame_support::{ + codec::{Decode, Encode}, + scale_info::TypeInfo, + sp_runtime::{ + generic, + traits::{BlakeTwo256, Verify}, + }, +}; +use serde::{Deserialize, Serialize}; +use sp_core::{sr25519, H256}; mod system; mod module { use super::*; - pub type Request = ( - ::AccountId, - Role, - ::BlockNumber, - ); + pub type Request = + (::AccountId, Role, ::BlockNumber); pub type Requests = Vec>; - #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug)] + #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug, TypeInfo)] pub enum Role { Storage, } - #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug)] - pub struct RoleParameters { + #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug, TypeInfo)] + pub struct RoleParameters { // minimum actors to maintain - if role is unstaking // and remaining actors would be less that this value - prevent or punish for unstaking pub min_actors: u32, @@ -65,7 +67,7 @@ mod module { pub startup_grace_period: T::BlockNumber, } - impl Default for RoleParameters { + impl Default for RoleParameters { fn default() -> Self { Self { max_actors: 10, @@ -81,27 +83,25 @@ mod module { } } - pub trait Trait: system::Trait {} + pub trait Config: system::Config + TypeInfo {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::Origin, system=system {} } #[derive(Encode, Decode, Copy, Clone, Serialize, Deserialize)] - pub struct Data { - pub data: T::BlockNumber, + pub struct Data { + pub data: T::BlockNumber, } - impl Default for Data { + impl Default for Data { fn default() -> Self { - Self { - data: T::BlockNumber::default(), - } + Self { data: T::BlockNumber::default() } } } frame_support::decl_storage! { - trait Store for Module as Actors { + trait Store for Module as Actors { /// requirements to enter and maintain status in roles pub Parameters get(fn parameters) build(|config: &GenesisConfig| { if config.enable_storage_role { @@ -157,18 +157,19 @@ pub type Header = generic::Header; pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -impl system::Trait for Runtime { - type BaseCallFilter = (); +impl system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; type Hash = H256; type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; type Event = Event; - type PalletInfo = (); + type PalletInfo = PalletInfo; type Call = Call; + type DbWeight = (); } -impl module::Trait for Runtime {} +impl module::Config for Runtime {} frame_support::construct_runtime!( pub enum Runtime where @@ -176,17 +177,14 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, - Module: module::{Module, Call, Storage, Config}, + System: system::{Pallet, Call, Event}, + Module: module::{Pallet, Call, Storage, Config}, } ); #[test] fn create_genesis_config() { GenesisConfig { - module: Some(module::GenesisConfig { - request_life_time: 0, - enable_storage_role: true, - }) + module: module::GenesisConfig { request_life_time: 0, enable_storage_role: true }, }; } diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs new file mode 100644 index 0000000000000..6a9a18ea48d4b --- /dev/null +++ b/frame/support/test/tests/pallet.rs @@ -0,0 +1,1677 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + dispatch::{Parameter, UnfilteredDispatchable}, + storage::unhashed, + traits::{ + GetCallName, GetStorageVersion, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade, + PalletInfoAccess, StorageVersion, + }, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays, RuntimeDbWeight}, +}; +use scale_info::{meta_type, TypeInfo}; +use sp_io::{ + hashing::{blake2_128, twox_128, twox_64}, + TestExternalities, +}; +use sp_runtime::DispatchError; + +pub struct SomeType1; +impl From for u64 { + fn from(_t: SomeType1) -> Self { + 0u64 + } +} + +pub struct SomeType2; +impl From for u64 { + fn from(_t: SomeType2) -> Self { + 100u64 + } +} + +pub struct SomeType3; +impl From for u64 { + fn from(_t: SomeType3) -> Self { + 0u64 + } +} + +pub struct SomeType4; +impl From for u64 { + fn from(_t: SomeType4) -> Self { + 0u64 + } +} + +pub struct SomeType5; +impl From for u64 { + fn from(_t: SomeType5) -> Self { + 0u64 + } +} + +pub struct SomeType6; +impl From for u64 { + fn from(_t: SomeType6) -> Self { + 0u64 + } +} + +pub struct SomeType7; +impl From for u64 { + fn from(_t: SomeType7) -> Self { + 0u64 + } +} + +pub trait SomeAssociation1 { + type _1: Parameter + codec::MaxEncodedLen + TypeInfo; +} +impl SomeAssociation1 for u64 { + type _1 = u64; +} + +pub trait SomeAssociation2 { + type _2: Parameter + codec::MaxEncodedLen + TypeInfo; +} +impl SomeAssociation2 for u64 { + type _2 = u64; +} + +#[frame_support::pallet] +pub mod pallet { + use super::{ + SomeAssociation1, SomeAssociation2, SomeType1, SomeType2, SomeType3, SomeType4, SomeType5, + SomeType6, SomeType7, StorageVersion, + }; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use scale_info::TypeInfo; + + type BalanceOf = ::Balance; + + pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(10); + + #[pallet::config] + pub trait Config: frame_system::Config + where + ::AccountId: From + SomeAssociation1, + { + /// Some comment + /// Some comment + #[pallet::constant] + type MyGetParam: Get; + + /// Some comment + /// Some comment + #[pallet::constant] + type MyGetParam2: Get; + + #[pallet::constant] + type MyGetParam3: Get<::_1>; + + type Balance: Parameter + Default + TypeInfo; + + type Event: From> + IsType<::Event>; + } + + #[pallet::extra_constants] + impl Pallet + where + T::AccountId: From + SomeAssociation1 + From, + { + /// Some doc + /// Some doc + fn some_extra() -> T::AccountId { + SomeType2.into() + } + + /// Some doc + fn some_extra_extra() -> T::AccountId { + SomeType1.into() + } + } + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + #[pallet::generate_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet + where + T::AccountId: From + From + SomeAssociation1, + { + fn on_initialize(_: BlockNumberFor) -> Weight { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType2); // Test for where clause + Self::deposit_event(Event::Something(10)); + 10 + } + fn on_finalize(_: BlockNumberFor) { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType2); // Test for where clause + Self::deposit_event(Event::Something(20)); + } + fn on_runtime_upgrade() -> Weight { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType2); // Test for where clause + Self::deposit_event(Event::Something(30)); + 30 + } + fn integrity_test() { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType2); // Test for where clause + } + } + + #[pallet::call] + impl Pallet + where + T::AccountId: From + From + SomeAssociation1, + { + /// Doc comment put in metadata + #[pallet::weight(Weight::from(*_foo))] + pub fn foo( + origin: OriginFor, + #[pallet::compact] _foo: u32, + _bar: u32, + ) -> DispatchResultWithPostInfo { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType3); // Test for where clause + let _ = origin; + Self::deposit_event(Event::Something(3)); + Ok(().into()) + } + + /// Doc comment put in metadata + #[pallet::weight(1)] + #[frame_support::transactional] + pub fn foo_transactional( + _origin: OriginFor, + #[pallet::compact] foo: u32, + ) -> DispatchResultWithPostInfo { + Self::deposit_event(Event::Something(0)); + if foo == 0 { + Err(Error::::InsufficientProposersBalance)?; + } + + Ok(().into()) + } + + // Test for DispatchResult return type + #[pallet::weight(1)] + pub fn foo_no_post_info(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + } + + #[pallet::error] + pub enum Error { + /// doc comment put into metadata + InsufficientProposersBalance, + } + + #[pallet::event] + #[pallet::generate_deposit(fn deposit_event)] + pub enum Event + where + T::AccountId: SomeAssociation1 + From, + { + /// doc comment put in metadata + Proposed(::AccountId), + /// doc + Spending(BalanceOf), + Something(u32), + SomethingElse(::_1), + } + + #[pallet::storage] + pub type ValueWhereClause + where + T::AccountId: SomeAssociation2, + = StorageValue<_, ::_2>; + + #[pallet::storage] + pub type Value = StorageValue; + + #[pallet::storage] + #[pallet::storage_prefix = "Value2"] + pub type RenamedValue = StorageValue; + + #[pallet::type_value] + pub fn MyDefault() -> u16 + where + T::AccountId: From + From + SomeAssociation1, + { + T::AccountId::from(SomeType7); // Test where clause works + 4u16 + } + + #[pallet::storage] + pub type Map + where + T::AccountId: From, + = StorageMap<_, Blake2_128Concat, u8, u16, ValueQuery, MyDefault>; + + #[pallet::storage] + pub type Map2 = + StorageMap>; + + #[pallet::storage] + pub type DoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; + + #[pallet::storage] + pub type DoubleMap2 = StorageDoubleMap< + Hasher1 = Twox64Concat, + Key1 = u16, + Hasher2 = Blake2_128Concat, + Key2 = u32, + Value = u64, + MaxValues = ConstU32<5>, + >; + + #[pallet::storage] + #[pallet::getter(fn nmap)] + pub type NMap = StorageNMap<_, storage::Key, u32>; + + #[pallet::storage] + #[pallet::getter(fn nmap2)] + pub type NMap2 = StorageNMap< + Key = (NMapKey, NMapKey), + Value = u64, + MaxValues = ConstU32<11>, + >; + + #[pallet::storage] + #[pallet::getter(fn conditional_value)] + #[cfg(feature = "conditional-storage")] + pub type ConditionalValue = StorageValue<_, u32>; + + #[cfg(feature = "conditional-storage")] + #[pallet::storage] + #[pallet::getter(fn conditional_map)] + pub type ConditionalMap = + StorageMap<_, Twox64Concat, u16, u32, OptionQuery, GetDefault, ConstU32<12>>; + + #[cfg(feature = "conditional-storage")] + #[pallet::storage] + #[pallet::getter(fn conditional_double_map)] + pub type ConditionalDoubleMap = + StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; + + #[cfg(feature = "conditional-storage")] + #[pallet::storage] + #[pallet::getter(fn conditional_nmap)] + pub type ConditionalNMap = + StorageNMap<_, (storage::Key, storage::Key), u32>; + + #[pallet::storage] + #[pallet::storage_prefix = "RenamedCountedMap"] + #[pallet::getter(fn counted_storage_map)] + pub type SomeCountedStorageMap = + CountedStorageMap; + + #[pallet::genesis_config] + #[derive(Default)] + pub struct GenesisConfig { + _myfield: u32, + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig + where + T::AccountId: From + SomeAssociation1 + From, + { + fn build(&self) { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType4); // Test for where clause + } + } + + #[pallet::origin] + #[derive( + EqNoBound, RuntimeDebugNoBound, CloneNoBound, PartialEqNoBound, Encode, Decode, TypeInfo, + )] + pub struct Origin(PhantomData); + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet + where + T::AccountId: From + SomeAssociation1 + From + From, + { + type Call = Call; + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType5); // Test for where clause + if matches!(call, Call::foo_transactional { .. }) { + return Ok(ValidTransaction::default()) + } + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet + where + T::AccountId: From + SomeAssociation1 + From + From, + { + type Call = Call; + type Error = InherentError; + + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + T::AccountId::from(SomeType1); // Test for where clause + T::AccountId::from(SomeType6); // Test for where clause + Some(Call::foo_no_post_info {}) + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::foo_no_post_info {} | Call::foo { .. }) + } + + fn check_inherent(call: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + match call { + Call::foo_no_post_info {} => Ok(()), + Call::foo { foo: 0, bar: 0 } => Err(InherentError::Fatal), + Call::foo { .. } => Ok(()), + _ => unreachable!("other calls are not inherents"), + } + } + + fn is_inherent_required(d: &InherentData) -> Result, Self::Error> { + match d.get_data::(b"required") { + Ok(Some(true)) => Ok(Some(InherentError::Fatal)), + Ok(Some(false)) | Ok(None) => Ok(None), + Err(_) => unreachable!("should not happen in tests"), + } + } + } + + #[derive(codec::Encode, sp_runtime::RuntimeDebug)] + #[cfg_attr(feature = "std", derive(codec::Decode))] + pub enum InherentError { + Fatal, + } + + impl frame_support::inherent::IsFatalError for InherentError { + fn is_fatal_error(&self) -> bool { + matches!(self, InherentError::Fatal) + } + } + + pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"testpall"; +} + +// Test that a pallet with non generic event and generic genesis_config is correctly handled +// and that a pallet without the attribute generate_storage_info is correctly handled. +#[frame_support::pallet] +pub mod pallet2 { + use super::{SomeAssociation1, SomeType1}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config + where + ::AccountId: From + SomeAssociation1, + { + type Event: From + IsType<::Event>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet where + T::AccountId: From + SomeAssociation1 + { + } + + #[pallet::call] + impl Pallet where T::AccountId: From + SomeAssociation1 {} + + #[pallet::storage] + pub type SomeValue = StorageValue<_, Vec>; + + #[pallet::storage] + pub type SomeCountedStorageMap = + CountedStorageMap; + + #[pallet::event] + pub enum Event { + /// Something + Something(u32), + } + + #[pallet::genesis_config] + pub struct GenesisConfig + where + T::AccountId: From + SomeAssociation1, + { + phantom: PhantomData, + } + + impl Default for GenesisConfig + where + T::AccountId: From + SomeAssociation1, + { + fn default() -> Self { + GenesisConfig { phantom: Default::default() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig + where + T::AccountId: From + SomeAssociation1, + { + fn build(&self) {} + } +} + +/// Test that the supertrait check works when we pass some parameter to the `frame_system::Config`. +#[frame_support::pallet] +pub mod pallet3 { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +frame_support::parameter_types!( + pub const MyGetParam: u32 = 10; + pub const MyGetParam2: u32 = 11; + pub const MyGetParam3: u32 = 12; + pub const BlockHashCount: u32 = 250; +); + +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Index = u64; + type BlockNumber = u32; + type Call = Call; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +impl pallet::Config for Runtime { + type Event = Event; + type MyGetParam = MyGetParam; + type MyGetParam2 = MyGetParam2; + type MyGetParam3 = MyGetParam3; + type Balance = u64; +} + +impl pallet2::Config for Runtime { + type Event = Event; +} + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Pallet, Call, Event}, + Example: pallet::{Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, + Example2: pallet2::{Pallet, Call, Event, Config, Storage}, + } +); + +#[test] +fn transactional_works() { + TestExternalities::default().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + + pallet::Call::::foo_transactional { foo: 0 } + .dispatch_bypass_filter(None.into()) + .err() + .unwrap(); + assert!(frame_system::Pallet::::events().is_empty()); + + pallet::Call::::foo_transactional { foo: 1 } + .dispatch_bypass_filter(None.into()) + .unwrap(); + assert_eq!( + frame_system::Pallet::::events() + .iter() + .map(|e| &e.event) + .collect::>(), + vec![&Event::Example(pallet::Event::Something(0))], + ); + }) +} + +#[test] +fn call_expand() { + let call_foo = pallet::Call::::foo { foo: 3, bar: 0 }; + assert_eq!( + call_foo.get_dispatch_info(), + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } + ); + assert_eq!(call_foo.get_call_name(), "foo"); + assert_eq!( + pallet::Call::::get_call_names(), + &["foo", "foo_transactional", "foo_no_post_info"], + ); +} + +#[test] +fn error_expand() { + assert_eq!( + format!("{:?}", pallet::Error::::InsufficientProposersBalance), + String::from("InsufficientProposersBalance"), + ); + assert_eq!( + <&'static str>::from(pallet::Error::::InsufficientProposersBalance), + "InsufficientProposersBalance", + ); + assert_eq!( + DispatchError::from(pallet::Error::::InsufficientProposersBalance), + DispatchError::Module { index: 1, error: 0, message: Some("InsufficientProposersBalance") }, + ); +} + +#[test] +fn instance_expand() { + // Assert same type. + let _: pallet::__InherentHiddenInstance = (); +} + +#[test] +fn inherent_expand() { + use frame_support::{ + inherent::{BlockT, InherentData}, + traits::EnsureInherentsAreFirst, + }; + use sp_core::Hasher; + use sp_runtime::{ + traits::{BlakeTwo256, Header}, + Digest, + }; + + let inherents = InherentData::new().create_extrinsics(); + + let expected = vec![UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info {}), + signature: None, + }]; + assert_eq!(expected, inherents); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info {}), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo { foo: 1, bar: 0 }), + signature: None, + }, + ], + ); + + assert!(InherentData::new().check_extrinsics(&block).ok()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info {}), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo { foo: 0, bar: 0 }), + signature: None, + }, + ], + ); + + assert!(InherentData::new().check_extrinsics(&block).fatal_error()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_transactional { foo: 0 }), + signature: None, + }], + ); + + let mut inherent = InherentData::new(); + inherent.put_data(*b"required", &true).unwrap(); + assert!(inherent.check_extrinsics(&block).fatal_error()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info {}), + signature: Some((1, (), ())), + }], + ); + + let mut inherent = InherentData::new(); + inherent.put_data(*b"required", &true).unwrap(); + assert!(inherent.check_extrinsics(&block).fatal_error()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo { foo: 1, bar: 1 }), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_transactional { foo: 0 }), + signature: None, + }, + ], + ); + + assert!(Runtime::ensure_inherents_are_first(&block).is_ok()); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo { foo: 1, bar: 1 }), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_transactional { foo: 0 }), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info {}), + signature: None, + }, + ], + ); + + assert_eq!(Runtime::ensure_inherents_are_first(&block).err().unwrap(), 2); + + let block = Block::new( + Header::new( + 1, + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + BlakeTwo256::hash(b"test"), + Digest::default(), + ), + vec![ + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo { foo: 1, bar: 1 }), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo { foo: 1, bar: 0 }), + signature: Some((1, (), ())), + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info {}), + signature: None, + }, + ], + ); + + assert_eq!(Runtime::ensure_inherents_are_first(&block).err().unwrap(), 2); +} + +#[test] +fn validate_unsigned_expand() { + use frame_support::pallet_prelude::{ + InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, + ValidateUnsigned, + }; + let call = pallet::Call::::foo_no_post_info {}; + + let validity = pallet::Pallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(); + assert_eq!(validity, TransactionValidityError::Invalid(InvalidTransaction::Call)); + + let call = pallet::Call::::foo_transactional { foo: 0 }; + + let validity = pallet::Pallet::validate_unsigned(TransactionSource::External, &call).unwrap(); + assert_eq!(validity, ValidTransaction::default()); +} + +#[test] +fn trait_store_expand() { + TestExternalities::default().execute_with(|| { + as pallet::Store>::Value::get(); + as pallet::Store>::Map::get(1); + as pallet::Store>::DoubleMap::get(1, 2); + }) +} + +#[test] +fn pallet_expand_deposit_event() { + TestExternalities::default().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + pallet::Call::::foo { foo: 3, bar: 0 } + .dispatch_bypass_filter(None.into()) + .unwrap(); + assert_eq!( + frame_system::Pallet::::events()[0].event, + Event::Example(pallet::Event::Something(3)), + ); + }) +} + +#[test] +fn pallet_new_call_variant() { + Call::Example(pallet::Call::new_call_variant_foo(3, 4)); +} + +#[test] +fn storage_expand() { + use frame_support::{pallet_prelude::*, storage::StoragePrefixedMap}; + + fn twox_64_concat(d: &[u8]) -> Vec { + let mut v = twox_64(d).to_vec(); + v.extend_from_slice(d); + v + } + + fn blake2_128_concat(d: &[u8]) -> Vec { + let mut v = blake2_128(d).to_vec(); + v.extend_from_slice(d); + v + } + + TestExternalities::default().execute_with(|| { + pallet::Value::::put(1); + let k = [twox_128(b"Example"), twox_128(b"Value")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + pallet::RenamedValue::::put(2); + let k = [twox_128(b"Example"), twox_128(b"Value2")].concat(); + assert_eq!(unhashed::get::(&k), Some(2)); + + pallet::Map::::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u16)); + assert_eq!(&k[..32], &>::final_prefix()); + + pallet::Map2::::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + pallet::DoubleMap::::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + pallet::DoubleMap2::::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(&k[..32], &>::final_prefix()); + + pallet::NMap::::insert((&1,), &3); + let mut k = [twox_128(b"Example"), twox_128(b"NMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + pallet::NMap2::::insert((&1, &2), &3); + let mut k = [twox_128(b"Example"), twox_128(b"NMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(&k[..32], &>::final_prefix()); + + #[cfg(feature = "conditional-storage")] + { + pallet::ConditionalValue::::put(1); + pallet::ConditionalMap::::insert(1, 2); + pallet::ConditionalDoubleMap::::insert(1, 2, 3); + pallet::ConditionalNMap::::insert((1, 2), 3); + } + + pallet::SomeCountedStorageMap::::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"RenamedCountedMap")].concat(); + k.extend(1u8.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + let k = [twox_128(b"Example"), twox_128(b"CounterForRenamedCountedMap")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + }) +} + +#[test] +fn pallet_hooks_expand() { + TestExternalities::default().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + + assert_eq!(AllPallets::on_initialize(1), 10); + AllPallets::on_finalize(1); + + assert_eq!(AllPallets::on_runtime_upgrade(), 30); + + assert_eq!( + frame_system::Pallet::::events()[0].event, + Event::Example(pallet::Event::Something(10)), + ); + assert_eq!( + frame_system::Pallet::::events()[1].event, + Event::Example(pallet::Event::Something(20)), + ); + assert_eq!( + frame_system::Pallet::::events()[2].event, + Event::Example(pallet::Event::Something(30)), + ); + }) +} + +#[test] +fn pallet_on_genesis() { + TestExternalities::default().execute_with(|| { + assert_eq!(pallet::Pallet::::on_chain_storage_version(), StorageVersion::new(0)); + pallet::Pallet::::on_genesis(); + assert_eq!( + pallet::Pallet::::current_storage_version(), + pallet::Pallet::::on_chain_storage_version(), + ); + }) +} + +#[test] +fn migrate_from_pallet_version_to_storage_version() { + const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; + + fn pallet_version_key(name: &str) -> [u8; 32] { + frame_support::storage::storage_prefix(name.as_bytes(), PALLET_VERSION_STORAGE_KEY_POSTFIX) + } + + TestExternalities::default().execute_with(|| { + // Insert some fake pallet versions + sp_io::storage::set(&pallet_version_key(Example::name()), &[1, 2, 3]); + sp_io::storage::set(&pallet_version_key(Example2::name()), &[1, 2, 3]); + sp_io::storage::set(&pallet_version_key(System::name()), &[1, 2, 3]); + + // Check that everyone currently is at version 0 + assert_eq!(Example::on_chain_storage_version(), StorageVersion::new(0)); + assert_eq!(Example2::on_chain_storage_version(), StorageVersion::new(0)); + assert_eq!(System::on_chain_storage_version(), StorageVersion::new(0)); + + let db_weight = RuntimeDbWeight { read: 0, write: 5 }; + let weight = frame_support::migrations::migrate_from_pallet_version_to_storage_version::< + AllPalletsWithSystem, + >(&db_weight); + + // 3 pallets, 2 writes and every write costs 5 weight. + assert_eq!(3 * 2 * 5, weight); + + // All pallet versions should be removed + assert!(sp_io::storage::get(&pallet_version_key(Example::name())).is_none()); + assert!(sp_io::storage::get(&pallet_version_key(Example2::name())).is_none()); + assert!(sp_io::storage::get(&pallet_version_key(System::name())).is_none()); + + assert_eq!(Example::on_chain_storage_version(), pallet::STORAGE_VERSION); + assert_eq!(Example2::on_chain_storage_version(), StorageVersion::new(0)); + assert_eq!(System::on_chain_storage_version(), StorageVersion::new(0)); + }); +} + +#[test] +fn metadata() { + use frame_support::metadata::*; + + let pallets = vec![ + PalletMetadata { + index: 0, + name: "System", + storage: None, + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![ + PalletConstantMetadata { + name: "BlockWeights", + ty: meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "BlockLength", + ty: meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "BlockHashCount", + ty: meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "DbWeight", + ty: meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "Version", + ty: meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "SS58Prefix", + ty: meta_type::(), + value: vec![], + docs: vec![], + }, + ], + error: Some(meta_type::>().into()), + }, + PalletMetadata { + index: 1, + name: "Example", + storage: Some(PalletStorageMetadata { + prefix: "Example", + entries: vec![ + StorageEntryMetadata { + name: "ValueWhereClause", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Value", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Value2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Map", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + key: meta_type::(), + value: meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + }, + default: vec![4, 0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Map2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::(), + value: meta_type::(), + hashers: vec![StorageHasher::Twox64Concat], + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "DoubleMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: meta_type::(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + key: meta_type::<(u8, u16)>(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "DoubleMap2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: meta_type::(), + key: meta_type::<(u16, u32)>(), + hashers: vec![ + StorageHasher::Twox64Concat, + StorageHasher::Blake2_128Concat, + ], + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "NMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "NMap2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::<(u16, u32)>(), + hashers: vec![ + StorageHasher::Twox64Concat, + StorageHasher::Blake2_128Concat, + ], + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { + name: "ConditionalValue", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0], + docs: vec![], + }, + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { + name: "ConditionalMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::(), + value: meta_type::(), + hashers: vec![StorageHasher::Twox64Concat], + }, + default: vec![0], + docs: vec![], + }, + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { + name: "ConditionalDoubleMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: meta_type::(), + key: meta_type::<(u8, u16)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + }, + default: vec![0], + docs: vec![], + }, + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { + name: "ConditionalNMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::<(u8, u16)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "RenamedCountedMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Twox64Concat], + key: meta_type::(), + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "CounterForRenamedCountedMap", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec!["Counter for the related counted storage map"], + }, + ], + }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![ + PalletConstantMetadata { + name: "MyGetParam", + ty: meta_type::(), + value: vec![10, 0, 0, 0], + docs: vec![" Some comment", " Some comment"], + }, + PalletConstantMetadata { + name: "MyGetParam2", + ty: meta_type::(), + value: vec![11, 0, 0, 0], + docs: vec![" Some comment", " Some comment"], + }, + PalletConstantMetadata { + name: "MyGetParam3", + ty: meta_type::(), + value: vec![12, 0, 0, 0, 0, 0, 0, 0], + docs: vec![], + }, + PalletConstantMetadata { + name: "some_extra", + ty: meta_type::(), + value: vec![100, 0, 0, 0, 0, 0, 0, 0], + docs: vec![" Some doc", " Some doc"], + }, + PalletConstantMetadata { + name: "some_extra_extra", + ty: meta_type::(), + value: vec![0, 0, 0, 0, 0, 0, 0, 0], + docs: vec![" Some doc"], + }, + ], + error: Some(PalletErrorMetadata { ty: meta_type::>() }), + }, + PalletMetadata { + index: 1, + name: "Example", + storage: Some(PalletStorageMetadata { + prefix: "Example", + entries: vec![ + StorageEntryMetadata { + name: "ValueWhereClause", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Value", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Value2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Map", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + key: meta_type::(), + value: meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + }, + default: vec![4, 0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Map2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::(), + value: meta_type::(), + hashers: vec![StorageHasher::Twox64Concat], + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "DoubleMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: meta_type::(), + key: meta_type::<(u8, u16)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "DoubleMap2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: meta_type::(), + key: meta_type::<(u16, u32)>(), + hashers: vec![ + StorageHasher::Twox64Concat, + StorageHasher::Blake2_128Concat, + ], + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "NMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "NMap2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::<(u16, u32)>(), + hashers: vec![ + StorageHasher::Twox64Concat, + StorageHasher::Blake2_128Concat, + ], + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { + name: "ConditionalValue", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0], + docs: vec![], + }, + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { + name: "ConditionalMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::(), + value: meta_type::(), + hashers: vec![StorageHasher::Twox64Concat], + }, + default: vec![0], + docs: vec![], + }, + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { + name: "ConditionalDoubleMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: meta_type::(), + key: meta_type::<(u8, u16)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + }, + default: vec![0], + docs: vec![], + }, + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { + name: "ConditionalNMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::<(u8, u16)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "RenamedCountedMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hashers: vec![StorageHasher::Twox64Concat], + key: meta_type::(), + value: meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "CounterForRenamedCountedMap", + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(meta_type::()), + default: vec![0, 0, 0, 0], + docs: vec!["Counter for the related counted storage map"], + }, + ], + }), + calls: Some(meta_type::>().into()), + event: Some(meta_type::>().into()), + constants: vec![ + PalletConstantMetadata { + name: "MyGetParam", + ty: meta_type::(), + value: vec![10, 0, 0, 0], + docs: vec![" Some comment", " Some comment"], + }, + PalletConstantMetadata { + name: "MyGetParam2", + ty: meta_type::(), + value: vec![11, 0, 0, 0], + docs: vec![" Some comment", " Some comment"], + }, + PalletConstantMetadata { + name: "MyGetParam3", + ty: meta_type::(), + value: vec![12, 0, 0, 0, 0, 0, 0, 0], + docs: vec![], + }, + PalletConstantMetadata { + name: "some_extra", + ty: meta_type::(), + value: vec![100, 0, 0, 0, 0, 0, 0, 0], + docs: vec![" Some doc", " Some doc"], + }, + PalletConstantMetadata { + name: "some_extra_extra", + ty: meta_type::(), + value: vec![0, 0, 0, 0, 0, 0, 0, 0], + docs: vec![" Some doc"], + }, + ], + error: Some(PalletErrorMetadata { ty: meta_type::>() }), + }, + PalletMetadata { + index: 2, + name: "Example2", + storage: Some(PalletStorageMetadata { + prefix: "Example2", + entries: vec![StorageEntryMetadata { + name: "SomeValue", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::>()), + default: vec![0], + docs: vec![], + }], + }), + calls: Some(meta_type::>().into()), + event: Some(PalletEventMetadata { ty: meta_type::() }), + constants: vec![], + error: None, + }, + ]; + + let extrinsic = ExtrinsicMetadata { + ty: meta_type::(), + version: 4, + signed_extensions: vec![SignedExtensionMetadata { + identifier: "UnitSignedExtension", + ty: meta_type::<()>(), + additional_signed: meta_type::<()>(), + }], + }; + + let expected_metadata: RuntimeMetadataPrefixed = + RuntimeMetadataLastVersion::new(pallets, extrinsic, meta_type::()).into(); + let expected_metadata = match expected_metadata.1 { + RuntimeMetadata::V14(metadata) => metadata, + _ => panic!("metadata has been bumped, test needs to be updated"), + }; + + let actual_metadata = match Runtime::metadata().1 { + RuntimeMetadata::V14(metadata) => metadata, + _ => panic!("metadata has been bumped, test needs to be updated"), + }; + + pretty_assertions::assert_eq!(actual_metadata.pallets[1], expected_metadata.pallets[1]); +} + +#[test] +fn test_pallet_info_access() { + assert_eq!(::name(), "System"); + assert_eq!(::name(), "Example"); + assert_eq!(::name(), "Example2"); + + assert_eq!(::index(), 0); + assert_eq!(::index(), 1); + assert_eq!(::index(), 2); +} + +#[test] +fn test_storage_info() { + use frame_support::{ + storage::storage_prefix as prefix, + traits::{StorageInfo, StorageInfoTrait}, + }; + + assert_eq!( + Example::storage_info(), + vec![ + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"ValueWhereClause".to_vec(), + prefix: prefix(b"Example", b"ValueWhereClause").to_vec(), + max_values: Some(1), + max_size: Some(8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"Value".to_vec(), + prefix: prefix(b"Example", b"Value").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"Value2".to_vec(), + prefix: prefix(b"Example", b"Value2").to_vec(), + max_values: Some(1), + max_size: Some(8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"Map".to_vec(), + prefix: prefix(b"Example", b"Map").to_vec(), + max_values: None, + max_size: Some(3 + 16), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"Map2".to_vec(), + prefix: prefix(b"Example", b"Map2").to_vec(), + max_values: Some(3), + max_size: Some(6 + 8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"DoubleMap".to_vec(), + prefix: prefix(b"Example", b"DoubleMap").to_vec(), + max_values: None, + max_size: Some(7 + 16 + 8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"DoubleMap2".to_vec(), + prefix: prefix(b"Example", b"DoubleMap2").to_vec(), + max_values: Some(5), + max_size: Some(14 + 8 + 16), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"NMap".to_vec(), + prefix: prefix(b"Example", b"NMap").to_vec(), + max_values: None, + max_size: Some(5 + 16), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"NMap2".to_vec(), + prefix: prefix(b"Example", b"NMap2").to_vec(), + max_values: Some(11), + max_size: Some(14 + 8 + 16), + }, + #[cfg(feature = "conditional-storage")] + { + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"ConditionalValue".to_vec(), + prefix: prefix(b"Example", b"ConditionalValue").to_vec(), + max_values: Some(1), + max_size: Some(4), + } + }, + #[cfg(feature = "conditional-storage")] + { + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"ConditionalMap".to_vec(), + prefix: prefix(b"Example", b"ConditionalMap").to_vec(), + max_values: Some(12), + max_size: Some(6 + 8), + } + }, + #[cfg(feature = "conditional-storage")] + { + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"ConditionalDoubleMap".to_vec(), + prefix: prefix(b"Example", b"ConditionalDoubleMap").to_vec(), + max_values: None, + max_size: Some(7 + 16 + 8), + } + }, + #[cfg(feature = "conditional-storage")] + { + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"ConditionalNMap".to_vec(), + prefix: prefix(b"Example", b"ConditionalNMap").to_vec(), + max_values: None, + max_size: Some(7 + 16 + 8), + } + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"RenamedCountedMap".to_vec(), + prefix: prefix(b"Example", b"RenamedCountedMap").to_vec(), + max_values: None, + max_size: Some(1 + 4 + 8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"CounterForRenamedCountedMap".to_vec(), + prefix: prefix(b"Example", b"CounterForRenamedCountedMap").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + ], + ); + + assert_eq!( + Example2::storage_info(), + vec![ + StorageInfo { + pallet_name: b"Example2".to_vec(), + storage_name: b"SomeValue".to_vec(), + prefix: prefix(b"Example2", b"SomeValue").to_vec(), + max_values: Some(1), + max_size: None, + }, + StorageInfo { + pallet_name: b"Example2".to_vec(), + storage_name: b"SomeCountedStorageMap".to_vec(), + prefix: prefix(b"Example2", b"SomeCountedStorageMap").to_vec(), + max_values: None, + max_size: None, + }, + StorageInfo { + pallet_name: b"Example2".to_vec(), + storage_name: b"CounterForSomeCountedStorageMap".to_vec(), + prefix: prefix(b"Example2", b"CounterForSomeCountedStorageMap").to_vec(), + max_values: Some(1), + max_size: Some(4), + }, + ], + ); +} diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs new file mode 100644 index 0000000000000..4523063252ab9 --- /dev/null +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -0,0 +1,367 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub trait SomeAssociation { + type A: frame_support::dispatch::Parameter + Default; +} +impl SomeAssociation for u64 { + type A = u64; +} + +mod pallet_old { + use super::SomeAssociation; + use frame_support::{ + decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, + }; + use frame_system::ensure_root; + + pub trait Config: frame_system::Config { + type SomeConst: Get; + type Balance: Parameter + + codec::HasCompact + + From + + Into + + Default + + SomeAssociation; + type Event: From> + Into<::Event>; + } + + decl_storage! { + trait Store for Module as Example { + /// Some documentation + Dummy get(fn dummy) config(): Option; + Bar get(fn bar) config(): map hasher(blake2_128_concat) T::AccountId => T::Balance; + Foo get(fn foo) config(): T::Balance = 3.into(); + Double get(fn double): double_map + hasher(blake2_128_concat) u32, + hasher(twox_64_concat) u64 + => ::A; + } + } + + decl_event!( + pub enum Event + where + Balance = ::Balance, + { + /// Dummy event, just here so there's a generic type that's used. + Dummy(Balance), + } + ); + + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + fn deposit_event() = default; + const SomeConst: T::Balance = T::SomeConst::get(); + + #[weight = >::into(new_value.clone())] + fn set_dummy(origin, #[compact] new_value: T::Balance) { + ensure_root(origin)?; + + >::put(&new_value); + Self::deposit_event(RawEvent::Dummy(new_value)); + } + + fn on_initialize(_n: T::BlockNumber) -> Weight { + >::put(T::Balance::from(10)); + 10 + } + + fn on_finalize(_n: T::BlockNumber) { + >::put(T::Balance::from(11)); + } + } + } + + decl_error! { + pub enum Error for Module { + /// Some wrong behavior + Wrong, + } + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::SomeAssociation; + use frame_support::{pallet_prelude::*, scale_info}; + use frame_system::{ensure_root, pallet_prelude::*}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Balance: Parameter + + codec::HasCompact + + From + + Into + + Default + + MaybeSerializeDeserialize + + SomeAssociation + + scale_info::StaticTypeInfo; + #[pallet::constant] + type SomeConst: Get; + type Event: From> + IsType<::Event>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks for Pallet { + fn on_initialize(_n: T::BlockNumber) -> Weight { + >::put(T::Balance::from(10)); + 10 + } + + fn on_finalize(_n: T::BlockNumber) { + >::put(T::Balance::from(11)); + } + } + + #[pallet::call] + impl Pallet { + #[pallet::weight(>::into(new_value.clone()))] + pub fn set_dummy( + origin: OriginFor, + #[pallet::compact] new_value: T::Balance, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + >::put(&new_value); + Self::deposit_event(Event::Dummy(new_value)); + + Ok(().into()) + } + } + + #[pallet::error] + pub enum Error { + /// Some wrong behavior + Wrong, + } + + #[pallet::event] + #[pallet::generate_deposit(fn deposit_event)] + pub enum Event { + /// Dummy event, just here so there's a generic type that's used. + Dummy(T::Balance), + } + + #[pallet::storage] + /// Some documentation + type Dummy = StorageValue<_, T::Balance, OptionQuery>; + + #[pallet::storage] + type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance, ValueQuery>; + + #[pallet::type_value] + pub fn OnFooEmpty() -> T::Balance { + 3.into() + } + #[pallet::storage] + type Foo = StorageValue<_, T::Balance, ValueQuery, OnFooEmpty>; + + #[pallet::storage] + type Double = StorageDoubleMap< + _, + Blake2_128Concat, + u32, + Twox64Concat, + u64, + ::A, + ValueQuery, + >; + + #[pallet::genesis_config] + pub struct GenesisConfig { + dummy: Option, + bar: Vec<(T::AccountId, T::Balance)>, + foo: T::Balance, + } + + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + dummy: Default::default(), + bar: Default::default(), + foo: OnFooEmpty::::get(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + if let Some(dummy) = self.dummy.as_ref() { + >::put(dummy); + } + for (k, v) in &self.bar { + >::insert(k, v); + } + >::put(&self.foo); + } + } +} + +frame_support::parameter_types!( + pub const SomeConst: u64 = 10; + pub const BlockHashCount: u32 = 250; +); + +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Index = u64; + type BlockNumber = u32; + type Call = Call; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +impl pallet::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet_old::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Pallet, Call, Event}, + // NOTE: name Example here is needed in order to have same module prefix + Example: pallet::{Pallet, Call, Event, Config, Storage}, + PalletOld: pallet_old::{Pallet, Call, Event, Config, Storage}, + } +); + +#[cfg(test)] +mod test { + use super::{pallet, pallet_old, Runtime}; + use codec::{Decode, Encode}; + use scale_info::{form::PortableForm, Variant}; + + #[test] + fn metadata() { + let metadata = Runtime::metadata(); + let (pallets, types) = match metadata.1 { + frame_support::metadata::RuntimeMetadata::V14(metadata) => + (metadata.pallets, metadata.types), + _ => unreachable!(), + }; + + let assert_meta_types = |ty_id1, ty_id2| { + let ty1 = types.resolve(ty_id1).map(|ty| ty.type_def()); + let ty2 = types.resolve(ty_id2).map(|ty| ty.type_def()); + pretty_assertions::assert_eq!(ty1, ty2); + }; + + let get_enum_variants = |ty_id| match types.resolve(ty_id).map(|ty| ty.type_def()) { + Some(ty) => match ty { + scale_info::TypeDef::Variant(var) => var.variants(), + _ => panic!("Expected variant type"), + }, + _ => panic!("No type found"), + }; + + let assert_enum_variants = |vs1: &[Variant], + vs2: &[Variant]| { + assert_eq!(vs1.len(), vs2.len()); + for i in 0..vs1.len() { + let v1 = &vs2[i]; + let v2 = &vs2[i]; + assert_eq!(v1.fields().len(), v2.fields().len()); + for f in 0..v1.fields().len() { + let f1 = &v1.fields()[f]; + let f2 = &v2.fields()[f]; + pretty_assertions::assert_eq!(f1.name(), f2.name()); + pretty_assertions::assert_eq!(f1.ty(), f2.ty()); + } + } + }; + + pretty_assertions::assert_eq!(pallets[1].storage, pallets[2].storage); + + let calls1 = pallets[1].calls.as_ref().unwrap(); + let calls2 = pallets[2].calls.as_ref().unwrap(); + assert_meta_types(calls1.ty.id(), calls2.ty.id()); + + // event: check variants and fields but ignore the type name which will be different + let event1_variants = get_enum_variants(pallets[1].event.as_ref().unwrap().ty.id()); + let event2_variants = get_enum_variants(pallets[2].event.as_ref().unwrap().ty.id()); + assert_enum_variants(event1_variants, event2_variants); + + let err1 = get_enum_variants(pallets[1].error.as_ref().unwrap().ty.id()) + .iter() + .filter(|v| v.name() == "__Ignore") + .cloned() + .collect::>(); + let err2 = get_enum_variants(pallets[2].error.as_ref().unwrap().ty.id()) + .iter() + .filter(|v| v.name() == "__Ignore") + .cloned() + .collect::>(); + assert_enum_variants(&err1, &err2); + + pretty_assertions::assert_eq!(pallets[1].constants, pallets[2].constants); + } + + #[test] + fn types() { + assert_eq!( + pallet_old::Event::::decode( + &mut &pallet::Event::::Dummy(10).encode()[..] + ) + .unwrap(), + pallet_old::Event::::Dummy(10), + ); + + assert_eq!( + pallet_old::Call::::decode( + &mut &pallet::Call::::set_dummy { new_value: 10 }.encode()[..] + ) + .unwrap(), + pallet_old::Call::::set_dummy { new_value: 10 }, + ); + } +} diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs new file mode 100644 index 0000000000000..768b9f28d35f3 --- /dev/null +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -0,0 +1,366 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod pallet_old { + use frame_support::{ + decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, + }; + use frame_system::ensure_root; + + pub trait Config: frame_system::Config { + type SomeConst: Get; + type Balance: Parameter + codec::HasCompact + From + Into + Default; + type Event: From> + Into<::Event>; + } + + decl_storage! { + trait Store for Module, I: Instance = DefaultInstance> as Example { + /// Some documentation + Dummy get(fn dummy) config(): Option; + Bar get(fn bar) config(): map hasher(blake2_128_concat) T::AccountId => T::Balance; + Foo get(fn foo) config(): T::Balance = 3.into(); + Double get(fn double): + double_map hasher(blake2_128_concat) u32, hasher(twox_64_concat) u64 => u16; + } + } + + decl_event!( + pub enum Event + where + Balance = >::Balance, + { + /// Dummy event, just here so there's a generic type that's used. + Dummy(Balance), + } + ); + + decl_module! { + pub struct Module, I: Instance = DefaultInstance> for enum Call + where origin: T::Origin + { + type Error = Error; + fn deposit_event() = default; + const SomeConst: T::Balance = T::SomeConst::get(); + + #[weight = >::into(new_value.clone())] + fn set_dummy(origin, #[compact] new_value: T::Balance) { + ensure_root(origin)?; + + >::put(&new_value); + Self::deposit_event(RawEvent::Dummy(new_value)); + } + + fn on_initialize(_n: T::BlockNumber) -> Weight { + >::put(T::Balance::from(10)); + 10 + } + + fn on_finalize(_n: T::BlockNumber) { + >::put(T::Balance::from(11)); + } + } + } + + decl_error! { + pub enum Error for Module, I: Instance> { + /// Some wrong behavior + Wrong, + } + } +} + +#[frame_support::pallet] +pub mod pallet { + use frame_support::{pallet_prelude::*, scale_info}; + use frame_system::{ensure_root, pallet_prelude::*}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Balance: Parameter + + codec::HasCompact + + From + + Into + + Default + + MaybeSerializeDeserialize + + scale_info::StaticTypeInfo; + #[pallet::constant] + type SomeConst: Get; + type Event: From> + IsType<::Event>; + } + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks for Pallet { + fn on_initialize(_n: T::BlockNumber) -> Weight { + >::put(T::Balance::from(10)); + 10 + } + + fn on_finalize(_n: T::BlockNumber) { + >::put(T::Balance::from(11)); + } + } + + #[pallet::call] + impl, I: 'static> Pallet { + #[pallet::weight(>::into(new_value.clone()))] + pub fn set_dummy( + origin: OriginFor, + #[pallet::compact] new_value: T::Balance, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + >::put(&new_value); + Self::deposit_event(Event::Dummy(new_value)); + + Ok(().into()) + } + } + + #[pallet::error] + pub enum Error { + /// Some wrong behavior + Wrong, + } + + #[pallet::event] + #[pallet::generate_deposit(fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// Dummy event, just here so there's a generic type that's used. + Dummy(T::Balance), + } + + #[pallet::storage] + /// Some documentation + type Dummy, I: 'static = ()> = StorageValue<_, T::Balance, OptionQuery>; + + #[pallet::storage] + type Bar, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance, ValueQuery>; + + #[pallet::storage] + type Foo, I: 'static = ()> = + StorageValue<_, T::Balance, ValueQuery, OnFooEmpty>; + #[pallet::type_value] + pub fn OnFooEmpty, I: 'static>() -> T::Balance { + 3.into() + } + + #[pallet::storage] + type Double = + StorageDoubleMap<_, Blake2_128Concat, u32, Twox64Concat, u64, u16, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + dummy: Option, + bar: Vec<(T::AccountId, T::Balance)>, + foo: T::Balance, + } + + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + dummy: Default::default(), + bar: Default::default(), + foo: OnFooEmpty::::get(), + } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + if let Some(dummy) = self.dummy.as_ref() { + >::put(dummy); + } + for (k, v) in &self.bar { + >::insert(k, v); + } + >::put(&self.foo); + } + } +} + +frame_support::parameter_types!( + pub const SomeConst: u64 = 10; + pub const BlockHashCount: u32 = 250; +); + +impl frame_system::Config for Runtime { + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Index = u64; + type BlockNumber = u32; + type Call = Call; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +impl pallet::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet_old::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet_old::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} +impl pallet_old::Config for Runtime { + type Event = Event; + type SomeConst = SomeConst; + type Balance = u64; +} + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Pallet, Call, Event}, + Example: pallet::{Pallet, Call, Event, Config, Storage}, + PalletOld: pallet_old::{Pallet, Call, Event, Config, Storage}, + Instance2Example: pallet::::{Pallet, Call, Event, Config, Storage}, + PalletOld2: pallet_old::::{Pallet, Call, Event, Config, Storage}, + Instance3Example: pallet::::{Pallet, Call, Event, Config, Storage}, + PalletOld3: pallet_old::::{Pallet, Call, Event, Config, Storage}, + } +); + +#[cfg(test)] +mod test { + use super::{pallet, pallet_old, Runtime}; + use codec::{Decode, Encode}; + use scale_info::{form::PortableForm, Variant}; + + #[test] + fn metadata() { + let metadata = Runtime::metadata(); + let (pallets, types) = match metadata.1 { + frame_support::metadata::RuntimeMetadata::V14(metadata) => + (metadata.pallets, metadata.types), + _ => unreachable!(), + }; + + let get_enum_variants = |ty_id| match types.resolve(ty_id).map(|ty| ty.type_def()) { + Some(ty) => match ty { + scale_info::TypeDef::Variant(var) => var.variants(), + _ => panic!("Expected variant type"), + }, + _ => panic!("No type found"), + }; + + let assert_enum_variants = |vs1: &[Variant], + vs2: &[Variant]| { + assert_eq!(vs1.len(), vs2.len()); + for i in 0..vs1.len() { + let v1 = &vs2[i]; + let v2 = &vs2[i]; + assert_eq!(v1.fields().len(), v2.fields().len()); + for f in 0..v1.fields().len() { + let f1 = &v1.fields()[f]; + let f2 = &v2.fields()[f]; + pretty_assertions::assert_eq!(f1.name(), f2.name()); + pretty_assertions::assert_eq!(f1.ty(), f2.ty()); + } + } + }; + + for i in vec![1, 3, 5].into_iter() { + pretty_assertions::assert_eq!(pallets[i].storage, pallets[i + 1].storage); + + let call1_variants = get_enum_variants(pallets[i].calls.as_ref().unwrap().ty.id()); + let call2_variants = get_enum_variants(pallets[i + 1].calls.as_ref().unwrap().ty.id()); + assert_enum_variants(call1_variants, call2_variants); + + // event: check variants and fields but ignore the type name which will be different + let event1_variants = get_enum_variants(pallets[i].event.as_ref().unwrap().ty.id()); + let event2_variants = get_enum_variants(pallets[i + 1].event.as_ref().unwrap().ty.id()); + assert_enum_variants(event1_variants, event2_variants); + + let err1 = get_enum_variants(pallets[i].error.as_ref().unwrap().ty.id()) + .iter() + .filter(|v| v.name() == "__Ignore") + .cloned() + .collect::>(); + let err2 = get_enum_variants(pallets[i + 1].error.as_ref().unwrap().ty.id()) + .iter() + .filter(|v| v.name() == "__Ignore") + .cloned() + .collect::>(); + assert_enum_variants(&err1, &err2); + + pretty_assertions::assert_eq!(pallets[i].constants, pallets[i + 1].constants); + } + } + + #[test] + fn types() { + assert_eq!( + pallet_old::Event::::decode( + &mut &pallet::Event::::Dummy(10).encode()[..] + ) + .unwrap(), + pallet_old::Event::::Dummy(10), + ); + + assert_eq!( + pallet_old::Call::::decode( + &mut &pallet::Call::::set_dummy { new_value: 10 }.encode()[..] + ) + .unwrap(), + pallet_old::Call::::set_dummy { new_value: 10 }, + ); + } +} diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs new file mode 100644 index 0000000000000..34586e8414216 --- /dev/null +++ b/frame/support/test/tests/pallet_instance.rs @@ -0,0 +1,779 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + dispatch::UnfilteredDispatchable, + storage::unhashed, + traits::{GetCallName, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, +}; +use sp_io::{ + hashing::{blake2_128, twox_128, twox_64}, + TestExternalities, +}; +use sp_runtime::DispatchError; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::{pallet_prelude::*, scale_info}; + use frame_system::pallet_prelude::*; + use sp_std::any::TypeId; + + type BalanceOf = >::Balance; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + type MyGetParam: Get; + type Balance: Parameter + Default + scale_info::StaticTypeInfo; + type Event: From> + IsType<::Event>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + fn on_initialize(_: BlockNumberFor) -> Weight { + if TypeId::of::() == TypeId::of::<()>() { + Self::deposit_event(Event::Something(10)); + 10 + } else { + Self::deposit_event(Event::Something(11)); + 11 + } + } + fn on_finalize(_: BlockNumberFor) { + if TypeId::of::() == TypeId::of::<()>() { + Self::deposit_event(Event::Something(20)); + } else { + Self::deposit_event(Event::Something(21)); + } + } + fn on_runtime_upgrade() -> Weight { + if TypeId::of::() == TypeId::of::<()>() { + Self::deposit_event(Event::Something(30)); + 30 + } else { + Self::deposit_event(Event::Something(31)); + 31 + } + } + fn integrity_test() {} + } + + #[pallet::call] + impl, I: 'static> Pallet { + /// Doc comment put in metadata + #[pallet::weight(Weight::from(*_foo))] + pub fn foo( + origin: OriginFor, + #[pallet::compact] _foo: u32, + ) -> DispatchResultWithPostInfo { + let _ = origin; + Self::deposit_event(Event::Something(3)); + Ok(().into()) + } + + /// Doc comment put in metadata + #[pallet::weight(1)] + #[frame_support::transactional] + pub fn foo_transactional( + origin: OriginFor, + #[pallet::compact] _foo: u32, + ) -> DispatchResultWithPostInfo { + let _ = origin; + Ok(().into()) + } + } + + #[pallet::error] + pub enum Error { + /// doc comment put into metadata + InsufficientProposersBalance, + } + + #[pallet::event] + #[pallet::generate_deposit(fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// doc comment put in metadata + Proposed(::AccountId), + /// doc + Spending(BalanceOf), + Something(u32), + } + + #[pallet::storage] + pub type Value = StorageValue<_, u32>; + + #[pallet::storage] + pub type Map = StorageMap<_, Blake2_128Concat, u8, u16>; + + #[pallet::storage] + pub type Map2 = StorageMap<_, Twox64Concat, u16, u32>; + + #[pallet::storage] + pub type DoubleMap = + StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; + + #[pallet::storage] + pub type DoubleMap2 = + StorageDoubleMap<_, Twox64Concat, u16, Blake2_128Concat, u32, u64>; + + #[pallet::storage] + #[pallet::getter(fn nmap)] + pub type NMap = StorageNMap<_, storage::Key, u32>; + + #[pallet::storage] + #[pallet::getter(fn nmap2)] + pub type NMap2 = + StorageNMap<_, (storage::Key, storage::Key), u64>; + + #[pallet::genesis_config] + #[derive(Default)] + pub struct GenesisConfig { + _myfield: u32, + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) {} + } + + #[pallet::origin] + #[derive( + EqNoBound, + RuntimeDebugNoBound, + CloneNoBound, + PartialEqNoBound, + Encode, + Decode, + scale_info::TypeInfo, + )] + #[scale_info(skip_type_params(T, I))] + pub struct Origin(PhantomData<(T, I)>); + + #[pallet::validate_unsigned] + impl, I: 'static> ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned( + _source: TransactionSource, + _call: &Self::Call, + ) -> TransactionValidity { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } + } + + #[pallet::inherent] + impl, I: 'static> ProvideInherent for Pallet { + type Call = Call; + type Error = InherentError; + + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + unimplemented!(); + } + + fn is_inherent(_call: &Self::Call) -> bool { + unimplemented!(); + } + } + + #[derive(codec::Encode, sp_runtime::RuntimeDebug)] + #[cfg_attr(feature = "std", derive(codec::Decode))] + pub enum InherentError {} + + impl frame_support::inherent::IsFatalError for InherentError { + fn is_fatal_error(&self) -> bool { + unimplemented!(); + } + } + + pub const INHERENT_IDENTIFIER: frame_support::inherent::InherentIdentifier = *b"testpall"; +} + +// Test that a instantiable pallet with a generic genesis_config is correctly handled +#[frame_support::pallet] +pub mod pallet2 { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From> + IsType<::Event>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::event] + pub enum Event, I: 'static = ()> { + /// Something + Something(u32), + } + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + phantom: PhantomData<(T, I)>, + } + + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { phantom: Default::default() } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) {} + } +} + +frame_support::parameter_types!( + pub const MyGetParam: u32 = 10; + pub const BlockHashCount: u32 = 250; +); + +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Index = u64; + type BlockNumber = u32; + type Call = Call; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +impl pallet::Config for Runtime { + type Event = Event; + type MyGetParam = MyGetParam; + type Balance = u64; +} +impl pallet::Config for Runtime { + type Event = Event; + type MyGetParam = MyGetParam; + type Balance = u64; +} +impl pallet2::Config for Runtime { + type Event = Event; +} +impl pallet2::Config for Runtime { + type Event = Event; +} + +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Pallet, Call, Event}, + Example: pallet::{Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, + Instance1Example: pallet::::{ + Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned + }, + Example2: pallet2::{Pallet, Event, Config, Storage}, + Instance1Example2: pallet2::::{Pallet, Event, Config, Storage}, + } +); + +#[test] +fn call_expand() { + let call_foo = pallet::Call::::foo { foo: 3 }; + assert_eq!( + call_foo.get_dispatch_info(), + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } + ); + assert_eq!(call_foo.get_call_name(), "foo"); + assert_eq!(pallet::Call::::get_call_names(), &["foo", "foo_transactional"]); + + let call_foo = pallet::Call::::foo { foo: 3 }; + assert_eq!( + call_foo.get_dispatch_info(), + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } + ); + assert_eq!(call_foo.get_call_name(), "foo"); + assert_eq!( + pallet::Call::::get_call_names(), + &["foo", "foo_transactional"], + ); +} + +#[test] +fn error_expand() { + assert_eq!( + format!("{:?}", pallet::Error::::InsufficientProposersBalance), + String::from("InsufficientProposersBalance"), + ); + assert_eq!( + <&'static str>::from(pallet::Error::::InsufficientProposersBalance), + "InsufficientProposersBalance", + ); + assert_eq!( + DispatchError::from(pallet::Error::::InsufficientProposersBalance), + DispatchError::Module { index: 1, error: 0, message: Some("InsufficientProposersBalance") }, + ); + + assert_eq!( + format!("{:?}", pallet::Error::::InsufficientProposersBalance), + String::from("InsufficientProposersBalance"), + ); + assert_eq!( + <&'static str>::from( + pallet::Error::::InsufficientProposersBalance + ), + "InsufficientProposersBalance", + ); + assert_eq!( + DispatchError::from( + pallet::Error::::InsufficientProposersBalance + ), + DispatchError::Module { index: 2, error: 0, message: Some("InsufficientProposersBalance") }, + ); +} + +#[test] +fn instance_expand() { + // assert same type + let _: pallet::__InherentHiddenInstance = (); +} + +#[test] +fn pallet_expand_deposit_event() { + TestExternalities::default().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + pallet::Call::::foo { foo: 3 } + .dispatch_bypass_filter(None.into()) + .unwrap(); + assert_eq!( + frame_system::Pallet::::events()[0].event, + Event::Example(pallet::Event::Something(3)), + ); + }); + + TestExternalities::default().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + pallet::Call::::foo { foo: 3 } + .dispatch_bypass_filter(None.into()) + .unwrap(); + assert_eq!( + frame_system::Pallet::::events()[0].event, + Event::Instance1Example(pallet::Event::Something(3)), + ); + }); +} + +#[test] +fn storage_expand() { + use frame_support::{pallet_prelude::*, storage::StoragePrefixedMap}; + + fn twox_64_concat(d: &[u8]) -> Vec { + let mut v = twox_64(d).to_vec(); + v.extend_from_slice(d); + v + } + + fn blake2_128_concat(d: &[u8]) -> Vec { + let mut v = blake2_128(d).to_vec(); + v.extend_from_slice(d); + v + } + + TestExternalities::default().execute_with(|| { + >::put(1); + let k = [twox_128(b"Example"), twox_128(b"Value")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + >::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u16)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert((&1,), &3); + let mut k = [twox_128(b"Example"), twox_128(b"NMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert((&1, &2), &3); + let mut k = [twox_128(b"Example"), twox_128(b"NMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(&k[..32], &>::final_prefix()); + }); + + TestExternalities::default().execute_with(|| { + >::put(1); + let k = [twox_128(b"Instance1Example"), twox_128(b"Value")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + >::insert(1, 2); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"Map")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u16)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(1, 2); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"Map2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert((&1,), &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"NMap")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &>::final_prefix()); + + >::insert((&1, &2), &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"NMap2")].concat(); + k.extend(1u16.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u64)); + assert_eq!(&k[..32], &>::final_prefix()); + }); +} + +#[test] +fn pallet_hooks_expand() { + TestExternalities::default().execute_with(|| { + frame_system::Pallet::::set_block_number(1); + + assert_eq!(AllPallets::on_initialize(1), 21); + AllPallets::on_finalize(1); + + assert_eq!(AllPallets::on_runtime_upgrade(), 61); + + // The order is indeed reversed due to https://github.com/paritytech/substrate/issues/6280 + assert_eq!( + frame_system::Pallet::::events()[0].event, + Event::Instance1Example(pallet::Event::Something(11)), + ); + assert_eq!( + frame_system::Pallet::::events()[1].event, + Event::Example(pallet::Event::Something(10)), + ); + assert_eq!( + frame_system::Pallet::::events()[2].event, + Event::Instance1Example(pallet::Event::Something(21)), + ); + assert_eq!( + frame_system::Pallet::::events()[3].event, + Event::Example(pallet::Event::Something(20)), + ); + assert_eq!( + frame_system::Pallet::::events()[4].event, + Event::Instance1Example(pallet::Event::Something(31)), + ); + assert_eq!( + frame_system::Pallet::::events()[5].event, + Event::Example(pallet::Event::Something(30)), + ); + }) +} + +#[test] +fn pallet_on_genesis() { + TestExternalities::default().execute_with(|| { + pallet::Pallet::::on_genesis(); + + pallet::Pallet::::on_genesis(); + }) +} + +#[test] +fn metadata() { + use frame_support::metadata::*; + + let system_pallet_metadata = PalletMetadata { + index: 0, + name: "System", + storage: None, + calls: Some(scale_info::meta_type::>().into()), + event: Some(PalletEventMetadata { + ty: scale_info::meta_type::>(), + }), + constants: vec![ + PalletConstantMetadata { + name: "BlockWeights", + ty: scale_info::meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "BlockLength", + ty: scale_info::meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "BlockHashCount", + ty: scale_info::meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "DbWeight", + ty: scale_info::meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "Version", + ty: scale_info::meta_type::(), + value: vec![], + docs: vec![], + }, + PalletConstantMetadata { + name: "SS58Prefix", + ty: scale_info::meta_type::(), + value: vec![], + docs: vec![], + }, + ], + error: Some(PalletErrorMetadata { + ty: scale_info::meta_type::>(), + }), + }; + + let example_pallet_metadata = PalletMetadata { + index: 1, + name: "Example", + storage: Some(PalletStorageMetadata { + prefix: "Example", + entries: vec![ + StorageEntryMetadata { + name: "Value", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(scale_info::meta_type::()), + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Map", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "Map2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + hashers: vec![StorageHasher::Twox64Concat], + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "DoubleMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: scale_info::meta_type::(), + key: scale_info::meta_type::<(u8, u16)>(), + hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "DoubleMap2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: scale_info::meta_type::(), + key: scale_info::meta_type::<(u16, u32)>(), + hashers: vec![StorageHasher::Twox64Concat, StorageHasher::Blake2_128Concat], + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "NMap", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: scale_info::meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + value: scale_info::meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + StorageEntryMetadata { + name: "NMap2", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: scale_info::meta_type::<(u16, u32)>(), + hashers: vec![StorageHasher::Twox64Concat, StorageHasher::Blake2_128Concat], + value: scale_info::meta_type::(), + }, + default: vec![0], + docs: vec![], + }, + ], + }), + calls: Some(scale_info::meta_type::>().into()), + event: Some(PalletEventMetadata { ty: scale_info::meta_type::>() }), + constants: vec![PalletConstantMetadata { + name: "MyGetParam", + ty: scale_info::meta_type::(), + value: vec![10, 0, 0, 0], + docs: vec![], + }], + error: Some(PalletErrorMetadata { ty: scale_info::meta_type::>() }), + }; + + let mut example_pallet_instance1_metadata = example_pallet_metadata.clone(); + example_pallet_instance1_metadata.name = "Instance1Example"; + example_pallet_instance1_metadata.index = 2; + match example_pallet_instance1_metadata.calls { + Some(ref mut calls_meta) => { + calls_meta.ty = scale_info::meta_type::>(); + }, + _ => unreachable!(), + } + match example_pallet_instance1_metadata.event { + Some(ref mut event_meta) => { + event_meta.ty = scale_info::meta_type::>(); + }, + _ => unreachable!(), + } + match example_pallet_instance1_metadata.error { + Some(ref mut error_meta) => { + error_meta.ty = scale_info::meta_type::>(); + }, + _ => unreachable!(), + } + match example_pallet_instance1_metadata.storage { + Some(ref mut storage_meta) => { + storage_meta.prefix = "Instance1Example"; + }, + _ => unreachable!(), + } + + let pallets = + vec![system_pallet_metadata, example_pallet_metadata, example_pallet_instance1_metadata]; + + let extrinsic = ExtrinsicMetadata { + ty: scale_info::meta_type::(), + version: 4, + signed_extensions: vec![SignedExtensionMetadata { + identifier: "UnitSignedExtension", + ty: scale_info::meta_type::<()>(), + additional_signed: scale_info::meta_type::<()>(), + }], + }; + + let expected_metadata: RuntimeMetadataPrefixed = + RuntimeMetadataLastVersion::new(pallets, extrinsic, scale_info::meta_type::()) + .into(); + let expected_metadata = match expected_metadata.1 { + RuntimeMetadata::V14(metadata) => metadata, + _ => panic!("metadata has been bumped, test needs to be updated"), + }; + + let actual_metadata = match Runtime::metadata().1 { + RuntimeMetadata::V14(metadata) => metadata, + _ => panic!("metadata has been bumped, test needs to be updated"), + }; + + pretty_assertions::assert_eq!(actual_metadata.pallets[1], expected_metadata.pallets[1]); + pretty_assertions::assert_eq!(actual_metadata.pallets[2], expected_metadata.pallets[2]); +} + +#[test] +fn test_pallet_info_access() { + assert_eq!(::name(), "System"); + assert_eq!(::name(), "Example"); + assert_eq!( + ::name(), + "Instance1Example" + ); + assert_eq!(::name(), "Example2"); + assert_eq!( + ::name(), + "Instance1Example2" + ); + + assert_eq!(::index(), 0); + assert_eq!(::index(), 1); + assert_eq!(::index(), 2); + assert_eq!(::index(), 3); + assert_eq!(::index(), 4); +} diff --git a/frame/support/test/tests/pallet_ui.rs b/frame/support/test/tests/pallet_ui.rs new file mode 100644 index 0000000000000..e5f4a54dfb000 --- /dev/null +++ b/frame/support/test/tests/pallet_ui.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[rustversion::attr(not(stable), ignore)] +#[test] +fn pallet_ui() { + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("SKIP_WASM_BUILD", "1"); + + let t = trybuild::TestCases::new(); + t.compile_fail("tests/pallet_ui/*.rs"); + t.pass("tests/pallet_ui/pass/*.rs"); +} diff --git a/frame/support/test/tests/pallet_ui/attr_non_empty.rs b/frame/support/test/tests/pallet_ui/attr_non_empty.rs new file mode 100644 index 0000000000000..5173d983bbd8e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/attr_non_empty.rs @@ -0,0 +1,6 @@ +#[frame_support::pallet [foo]] +mod foo { +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/attr_non_empty.stderr b/frame/support/test/tests/pallet_ui/attr_non_empty.stderr new file mode 100644 index 0000000000000..144af5a17ea5c --- /dev/null +++ b/frame/support/test/tests/pallet_ui/attr_non_empty.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet macro call: expected no attributes, e.g. macro call must be just `#[frame_support::pallet]` or `#[pallet]` + --> $DIR/attr_non_empty.rs:1:26 + | +1 | #[frame_support::pallet [foo]] + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs new file mode 100644 index 0000000000000..ee9d692eba9b3 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar: codec::Codec + scale_info::TypeInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr new file mode 100644 index 0000000000000..d1b040c16091f --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -0,0 +1,28 @@ +error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` + --> $DIR/call_argument_invalid_bound.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | + = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` + +error[E0277]: the trait bound `::Bar: Clone` is not satisfied + --> $DIR/call_argument_invalid_bound.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `::Bar` + | + = note: required by `clone` + +error[E0369]: binary operation `==` cannot be applied to type `&::Bar` + --> $DIR/call_argument_invalid_bound.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ + | +help: consider further restricting this bound + | +17 | #[pallet::call + std::cmp::PartialEq] + | ^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs new file mode 100644 index 0000000000000..d981b55c48620 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar: scale_info::TypeInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr new file mode 100644 index 0000000000000..84d4863672957 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -0,0 +1,54 @@ +error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` + --> $DIR/call_argument_invalid_bound_2.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | + = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` + +error[E0277]: the trait bound `::Bar: Clone` is not satisfied + --> $DIR/call_argument_invalid_bound_2.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `::Bar` + | + = note: required by `clone` + +error[E0369]: binary operation `==` cannot be applied to type `&::Bar` + --> $DIR/call_argument_invalid_bound_2.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ + | +help: consider further restricting this bound + | +17 | #[pallet::call + std::cmp::PartialEq] + | ^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is not satisfied + --> $DIR/call_argument_invalid_bound_2.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` + | + ::: $CARGO/parity-scale-codec-2.2.0/src/codec.rs + | + | fn encode_to(&self, dest: &mut T) { + | ------ required by this bound in `encode_to` + | + = note: required because of the requirements on the impl of `Encode` for `::Bar` + +error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied + --> $DIR/call_argument_invalid_bound_2.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar` + | + ::: $CARGO/parity-scale-codec-2.2.0/src/codec.rs + | + | fn decode(input: &mut I) -> Result; + | ----- required by this bound in `pallet::_::_parity_scale_codec::Decode::decode` + | + = note: required because of the requirements on the impl of `Decode` for `::Bar` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs new file mode 100644 index 0000000000000..e7f99d7ca4f2d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.rs @@ -0,0 +1,29 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + use codec::{Encode, Decode}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[derive(Encode, Decode, scale_info::TypeInfo)] + struct Bar; + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr new file mode 100644 index 0000000000000..73513907e85f3 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -0,0 +1,26 @@ +error[E0277]: `Bar` doesn't implement `std::fmt::Debug` + --> $DIR/call_argument_invalid_bound_3.rs:22:36 + | +22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ `Bar` cannot be formatted using `{:?}` + | + = help: the trait `std::fmt::Debug` is not implemented for `Bar` + = note: add `#[derive(Debug)]` or manually implement `std::fmt::Debug` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` + +error[E0277]: the trait bound `Bar: Clone` is not satisfied + --> $DIR/call_argument_invalid_bound_3.rs:22:36 + | +22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `Bar` + | + = note: required by `clone` + +error[E0369]: binary operation `==` cannot be applied to type `&Bar` + --> $DIR/call_argument_invalid_bound_3.rs:22:36 + | +22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ + | + = note: an implementation of `std::cmp::PartialEq` might be missing for `&Bar` diff --git a/frame/support/test/tests/pallet_ui/call_invalid_const.rs b/frame/support/test/tests/pallet_ui/call_invalid_const.rs new file mode 100644 index 0000000000000..1a28bc32e65c6 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_const.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + const Foo: u8 = 3u8; + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_invalid_const.stderr b/frame/support/test/tests/pallet_ui/call_invalid_const.stderr new file mode 100644 index 0000000000000..0acb3e864a512 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_const.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, only method accepted + --> $DIR/call_invalid_const.rs:17:3 + | +17 | const Foo: u8 = 3u8; + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs new file mode 100644 index 0000000000000..2502506fa6aa4 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + pub fn foo(origin: u8) {} + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr new file mode 100644 index 0000000000000..f17cd9016a6e4 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr @@ -0,0 +1,11 @@ +error: Invalid type: expected `OriginFor` + --> $DIR/call_invalid_origin_type.rs:17:22 + | +17 | pub fn foo(origin: u8) {} + | ^^ + +error: expected `OriginFor` + --> $DIR/call_invalid_origin_type.rs:17:22 + | +17 | pub fn foo(origin: u8) {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_invalid_return.rs b/frame/support/test/tests/pallet_ui/call_invalid_return.rs new file mode 100644 index 0000000000000..1ccdff5d07374 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_return.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + pub fn foo(origin: OriginFor) -> ::DispatchResult { todo!() } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_invalid_return.stderr b/frame/support/test/tests/pallet_ui/call_invalid_return.stderr new file mode 100644 index 0000000000000..6a851ed3fc283 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_return.stderr @@ -0,0 +1,5 @@ +error: expected `DispatchResultWithPostInfo` or `DispatchResult` + --> $DIR/call_invalid_return.rs:17:39 + | +17 | pub fn foo(origin: OriginFor) -> ::DispatchResult { todo!() } + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_invalid_vis.rs b/frame/support/test/tests/pallet_ui/call_invalid_vis.rs new file mode 100644 index 0000000000000..fe1c5aee453d4 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_vis.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar: codec::Codec; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_invalid_vis.stderr b/frame/support/test/tests/pallet_ui/call_invalid_vis.stderr new file mode 100644 index 0000000000000..321828a1ae28e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_vis.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, dispatchable function must be public: `pub fn` + --> $DIR/call_invalid_vis.rs:20:3 + | +20 | fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_invalid_vis_2.rs b/frame/support/test/tests/pallet_ui/call_invalid_vis_2.rs new file mode 100644 index 0000000000000..fb25e9876dc8d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_vis_2.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar: codec::Codec; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub(crate) fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + Ok(().into()) + } + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_invalid_vis_2.stderr b/frame/support/test/tests/pallet_ui/call_invalid_vis_2.stderr new file mode 100644 index 0000000000000..7d3113474af73 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_invalid_vis_2.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, dispatchable function must be public: `pub fn` + --> $DIR/call_invalid_vis_2.rs:20:3 + | +20 | pub(crate) fn foo(origin: OriginFor) -> DispatchResultWithPostInfo { + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/call_missing_weight.rs b/frame/support/test/tests/pallet_ui/call_missing_weight.rs new file mode 100644 index 0000000000000..4cdb85502b57f --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_missing_weight.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + pub fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_missing_weight.stderr b/frame/support/test/tests/pallet_ui/call_missing_weight.stderr new file mode 100644 index 0000000000000..ec45d478870c1 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_missing_weight.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]` + --> $DIR/call_missing_weight.rs:17:7 + | +17 | pub fn foo(origin: OriginFor) -> DispatchResultWithPostInfo {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_no_origin.rs b/frame/support/test/tests/pallet_ui/call_no_origin.rs new file mode 100644 index 0000000000000..231c75f43f4ad --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_no_origin.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + pub fn foo() {} + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_no_origin.stderr b/frame/support/test/tests/pallet_ui/call_no_origin.stderr new file mode 100644 index 0000000000000..97574ea1b644c --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_no_origin.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, must have at least origin arg + --> $DIR/call_no_origin.rs:17:7 + | +17 | pub fn foo() {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/call_no_return.rs b/frame/support/test/tests/pallet_ui/call_no_return.rs new file mode 100644 index 0000000000000..68a883c52c072 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_no_return.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + pub fn foo(origin: OriginFor) {} + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/call_no_return.stderr b/frame/support/test/tests/pallet_ui/call_no_return.stderr new file mode 100644 index 0000000000000..18ebbaff76d9d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/call_no_return.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, require return type DispatchResultWithPostInfo + --> $DIR/call_no_return.rs:17:7 + | +17 | pub fn foo(origin: OriginFor) {} + | ^^ diff --git a/frame/support/test/tests/pallet_ui/duplicate_call_attr.rs b/frame/support/test/tests/pallet_ui/duplicate_call_attr.rs new file mode 100644 index 0000000000000..b8a32a0bd9f69 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_call_attr.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + use frame_support::pallet_prelude::StorageValue; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_store(trait Store)] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue<_, u8>; + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/duplicate_call_attr.stderr b/frame/support/test/tests/pallet_ui/duplicate_call_attr.stderr new file mode 100644 index 0000000000000..c2956717bb2bb --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_call_attr.stderr @@ -0,0 +1,5 @@ +error: Invalid duplicated attribute + --> $DIR/duplicate_call_attr.rs:23:12 + | +23 | #[pallet::call] + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs new file mode 100644 index 0000000000000..5e99c84050c95 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_store(trait Store)] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::storage] + type Foo = StorageValue<_, u8>; + + #[pallet::storage] + #[pallet::storage_prefix = "Foo"] + type NotFoo = StorageValue<_, u16>; + + #[pallet::storage] + type CounterForBar = StorageValue<_, u16>; + + #[pallet::storage] + type Bar = CountedStorageMap<_, Twox64Concat, u16, u16>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr new file mode 100644 index 0000000000000..716888c9d8b65 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_storage_prefix.stderr @@ -0,0 +1,47 @@ +error: Duplicate storage prefixes found for `Foo` + --> $DIR/duplicate_storage_prefix.rs:16:29 + | +16 | #[pallet::storage_prefix = "Foo"] + | ^^^^^ + +error: Duplicate storage prefixes found for `Foo` + --> $DIR/duplicate_storage_prefix.rs:13:7 + | +13 | type Foo = StorageValue<_, u8>; + | ^^^ + +error: Duplicate storage prefixes found for `CounterForBar`, used for counter associated to counted storage map + --> $DIR/duplicate_storage_prefix.rs:23:7 + | +23 | type Bar = CountedStorageMap<_, Twox64Concat, u16, u16>; + | ^^^ + +error: Duplicate storage prefixes found for `CounterForBar` + --> $DIR/duplicate_storage_prefix.rs:20:7 + | +20 | type CounterForBar = StorageValue<_, u16>; + | ^^^^^^^^^^^^^ + +error[E0412]: cannot find type `_GeneratedPrefixForStorageFoo` in this scope + --> $DIR/duplicate_storage_prefix.rs:13:7 + | +13 | type Foo = StorageValue<_, u8>; + | ^^^ not found in this scope + +error[E0412]: cannot find type `_GeneratedPrefixForStorageNotFoo` in this scope + --> $DIR/duplicate_storage_prefix.rs:17:7 + | +17 | type NotFoo = StorageValue<_, u16>; + | ^^^^^^ not found in this scope + +error[E0412]: cannot find type `_GeneratedPrefixForStorageCounterForBar` in this scope + --> $DIR/duplicate_storage_prefix.rs:20:7 + | +20 | type CounterForBar = StorageValue<_, u16>; + | ^^^^^^^^^^^^^ not found in this scope + +error[E0412]: cannot find type `_GeneratedPrefixForStorageBar` in this scope + --> $DIR/duplicate_storage_prefix.rs:23:7 + | +23 | type Bar = CountedStorageMap<_, Twox64Concat, u16, u16>; + | ^^^ not found in this scope diff --git a/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs b/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs new file mode 100644 index 0000000000000..d675ddefe985b --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_store_attr.rs @@ -0,0 +1,26 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + use frame_support::pallet_prelude::StorageValue; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_store(trait Store)] + #[pallet::generate_store(trait Store)] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr b/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr new file mode 100644 index 0000000000000..232144b8deaca --- /dev/null +++ b/frame/support/test/tests/pallet_ui/duplicate_store_attr.stderr @@ -0,0 +1,5 @@ +error: Unexpected duplicated attribute + --> $DIR/duplicate_store_attr.rs:12:12 + | +12 | #[pallet::generate_store(trait Store)] + | ^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/error_no_fieldless.rs b/frame/support/test/tests/pallet_ui/error_no_fieldless.rs new file mode 100644 index 0000000000000..c9d444d6f90dd --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_no_fieldless.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::error] + pub enum Error { + U8(u8), + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/error_no_fieldless.stderr b/frame/support/test/tests/pallet_ui/error_no_fieldless.stderr new file mode 100644 index 0000000000000..1d69fbeff9aac --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_no_fieldless.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::error, unexpected fields, must be `Unit` + --> $DIR/error_no_fieldless.rs:20:5 + | +20 | U8(u8), + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/error_where_clause.rs b/frame/support/test/tests/pallet_ui/error_where_clause.rs new file mode 100644 index 0000000000000..29d7435bc4bc8 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_where_clause.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::error] + pub enum Error where u32: From {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/error_where_clause.stderr b/frame/support/test/tests/pallet_ui/error_where_clause.stderr new file mode 100644 index 0000000000000..8e9d0e60692d8 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_where_clause.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::error, where clause is not allowed on pallet error item + --> $DIR/error_where_clause.rs:19:20 + | +19 | pub enum Error where u32: From {} + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/error_wrong_item.rs b/frame/support/test/tests/pallet_ui/error_wrong_item.rs new file mode 100644 index 0000000000000..50e66dc8c0dce --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_wrong_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::error] + pub struct Foo; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/error_wrong_item.stderr b/frame/support/test/tests/pallet_ui/error_wrong_item.stderr new file mode 100644 index 0000000000000..8c0496782fb16 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_wrong_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::error, expected item enum + --> $DIR/error_wrong_item.rs:19:2 + | +19 | pub struct Foo; + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/error_wrong_item_name.rs b/frame/support/test/tests/pallet_ui/error_wrong_item_name.rs new file mode 100644 index 0000000000000..14107fafb06ea --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_wrong_item_name.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::error] + pub enum Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/error_wrong_item_name.stderr b/frame/support/test/tests/pallet_ui/error_wrong_item_name.stderr new file mode 100644 index 0000000000000..d7e54ad8a7516 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/error_wrong_item_name.stderr @@ -0,0 +1,5 @@ +error: expected `Error` + --> $DIR/error_wrong_item_name.rs:19:11 + | +19 | pub enum Foo {} + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.rs b/frame/support/test/tests/pallet_ui/event_field_not_member.rs new file mode 100644 index 0000000000000..0ecde4c130878 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, IsType}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar; + type Event: IsType<::Event> + From>; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr new file mode 100644 index 0000000000000..d48012a6c952d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -0,0 +1,28 @@ +error[E0277]: the trait bound `::Bar: Clone` is not satisfied + --> $DIR/event_field_not_member.rs:23:7 + | +23 | B { b: T::Bar }, + | ^ the trait `Clone` is not implemented for `::Bar` + | + = note: required by `clone` + +error[E0369]: binary operation `==` cannot be applied to type `&::Bar` + --> $DIR/event_field_not_member.rs:23:7 + | +23 | B { b: T::Bar }, + | ^ + | +help: consider further restricting this bound + | +22 | pub enum Event { + | ^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` + --> $DIR/event_field_not_member.rs:23:7 + | +23 | B { b: T::Bar }, + | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` + | + = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for the cast to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/event_not_in_trait.rs b/frame/support/test/tests/pallet_ui/event_not_in_trait.rs new file mode 100644 index 0000000000000..94151ba4c3d9d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_not_in_trait.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr new file mode 100644 index 0000000000000..dd96c700ce7e5 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr @@ -0,0 +1,7 @@ +error: Invalid usage of Event, `Config` contains no associated type `Event`, but enum `Event` is declared (in use of `#[pallet::event]`). An Event associated type must be declare on trait `Config`. + --> $DIR/event_not_in_trait.rs:1:1 + | +1 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs new file mode 100644 index 0000000000000..fa3bf04d3530d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar; + type Event; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr new file mode 100644 index 0000000000000..1f58a37576d0d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr @@ -0,0 +1,5 @@ +error: Invalid `type Event`, associated type `Event` is reserved and must bound: `IsType<::Event>` + --> $DIR/event_type_invalid_bound.rs:9:3 + | +9 | type Event; + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs new file mode 100644 index 0000000000000..564a539b89f57 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, IsType}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + type Bar; + type Event: IsType<::Event>; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Event { + B { b: T::Bar }, + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr new file mode 100644 index 0000000000000..8b8946f3b25eb --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr @@ -0,0 +1,5 @@ +error: Invalid `type Event`, associated type `Event` is reserved and must bound: `From` or `From>` or `From>` + --> $DIR/event_type_invalid_bound_2.rs:9:3 + | +9 | type Event: IsType<::Event>; + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/event_wrong_item.rs b/frame/support/test/tests/pallet_ui/event_wrong_item.rs new file mode 100644 index 0000000000000..d6690557c39d8 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_wrong_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub struct Foo; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_wrong_item.stderr b/frame/support/test/tests/pallet_ui/event_wrong_item.stderr new file mode 100644 index 0000000000000..21eb0ed35e936 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_wrong_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::event, expected item enum + --> $DIR/event_wrong_item.rs:19:2 + | +19 | pub struct Foo; + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/event_wrong_item_name.rs b/frame/support/test/tests/pallet_ui/event_wrong_item_name.rs new file mode 100644 index 0000000000000..d828965c5173c --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_wrong_item_name.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::event] + pub enum Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/event_wrong_item_name.stderr b/frame/support/test/tests/pallet_ui/event_wrong_item_name.stderr new file mode 100644 index 0000000000000..14e8615c56199 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/event_wrong_item_name.stderr @@ -0,0 +1,5 @@ +error: expected `Event` + --> $DIR/event_wrong_item_name.rs:19:11 + | +19 | pub enum Foo {} + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs new file mode 100644 index 0000000000000..da5e8d0c4da52 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.rs @@ -0,0 +1,26 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, GenesisBuild}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::genesis_config] + pub struct GenesisConfig; + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr new file mode 100644 index 0000000000000..4bc3cfdcbf9b7 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -0,0 +1,10 @@ +error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is not satisfied + --> $DIR/genesis_default_not_satisfied.rs:22:18 + | +22 | impl GenesisBuild for GenesisConfig {} + | ^^^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` + | + ::: $WORKSPACE/frame/support/src/traits/hooks.rs + | + | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + | ------- required by this bound in `GenesisBuild` diff --git a/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.rs b/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.rs new file mode 100644 index 0000000000000..9ae851005acb3 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.stderr b/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.stderr new file mode 100644 index 0000000000000..9afc1037a48ae --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_inconsistent_build_config.stderr @@ -0,0 +1,5 @@ +error: `#[pallet::genesis_config]` and `#[pallet::genesis_build]` attributes must be either both used or both not used, instead genesis_config is unused and genesis_build is used + --> $DIR/genesis_inconsistent_build_config.rs:2:1 + | +2 | mod pallet { + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/genesis_invalid_generic.rs b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.rs new file mode 100644 index 0000000000000..f1eae16f49600 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr new file mode 100644 index 0000000000000..f451f7b16aee5 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr @@ -0,0 +1,13 @@ +error: Invalid genesis builder: expected `GenesisBuild` or `GenesisBuild` + --> $DIR/genesis_invalid_generic.rs:19:7 + | +19 | impl GenesisBuild for GenesisConfig {} + | ^^^^^^^^^^^^ + +error: expected `<` + --> $DIR/genesis_invalid_generic.rs:1:1 + | +1 | #[frame_support::pallet] + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/genesis_wrong_name.rs b/frame/support/test/tests/pallet_ui/genesis_wrong_name.rs new file mode 100644 index 0000000000000..5e8b297ba4ccf --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_wrong_name.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::genesis_build] + impl Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/genesis_wrong_name.stderr b/frame/support/test/tests/pallet_ui/genesis_wrong_name.stderr new file mode 100644 index 0000000000000..dd2e65588f56b --- /dev/null +++ b/frame/support/test/tests/pallet_ui/genesis_wrong_name.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::genesis_build, expected impl<..> GenesisBuild<..> for GenesisConfig<..> + --> $DIR/genesis_wrong_name.rs:19:2 + | +19 | impl Foo {} + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.rs b/frame/support/test/tests/pallet_ui/hooks_invalid_item.rs new file mode 100644 index 0000000000000..7c66b3e6cecc1 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.rs @@ -0,0 +1,19 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr new file mode 100644 index 0000000000000..3d7303fafdcf5 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -0,0 +1,15 @@ +error[E0107]: missing generics for trait `Hooks` + --> $DIR/hooks_invalid_item.rs:12:18 + | +12 | impl Hooks for Pallet {} + | ^^^^^ expected 1 type argument + | +note: trait defined here, with 1 type parameter: `BlockNumber` + --> $DIR/hooks.rs:214:11 + | +214 | pub trait Hooks { + | ^^^^^ ----------- +help: use angle brackets to add missing type argument + | +12 | impl Hooks for Pallet {} + | ^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/inconsistent_instance_1.rs b/frame/support/test/tests/pallet_ui/inconsistent_instance_1.rs new file mode 100644 index 0000000000000..00b57a01235c3 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inconsistent_instance_1.rs @@ -0,0 +1,20 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/inconsistent_instance_1.stderr b/frame/support/test/tests/pallet_ui/inconsistent_instance_1.stderr new file mode 100644 index 0000000000000..06c7941a0bcb9 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inconsistent_instance_1.stderr @@ -0,0 +1,29 @@ +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:10:20 + | +10 | pub struct Pallet(core::marker::PhantomData); + | ^ + +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:16:7 + | +16 | impl Pallet {} + | ^ + +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:16:18 + | +16 | impl Pallet {} + | ^^^^^^ + +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:13:47 + | +13 | impl Hooks> for Pallet {} + | ^^^^^^ + +error: Invalid generic declaration, trait is defined with instance but generic use none + --> $DIR/inconsistent_instance_1.rs:13:7 + | +13 | impl Hooks> for Pallet {} + | ^ diff --git a/frame/support/test/tests/pallet_ui/inconsistent_instance_2.rs b/frame/support/test/tests/pallet_ui/inconsistent_instance_2.rs new file mode 100644 index 0000000000000..e7b51cb5ebef5 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inconsistent_instance_2.rs @@ -0,0 +1,20 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet {} + + #[pallet::call] + impl, I: 'static> Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/inconsistent_instance_2.stderr b/frame/support/test/tests/pallet_ui/inconsistent_instance_2.stderr new file mode 100644 index 0000000000000..9d61f2976b75a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inconsistent_instance_2.stderr @@ -0,0 +1,29 @@ +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:10:20 + | +10 | pub struct Pallet(core::marker::PhantomData<(T, I)>); + | ^ + +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:16:7 + | +16 | impl, I: 'static> Pallet {} + | ^ + +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:16:33 + | +16 | impl, I: 'static> Pallet {} + | ^^^^^^ + +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:13:62 + | +13 | impl, I: 'static> Hooks> for Pallet {} + | ^^^^^^ + +error: Invalid generic declaration, trait is defined without instance but generic use some + --> $DIR/inconsistent_instance_2.rs:13:7 + | +13 | impl, I: 'static> Hooks> for Pallet {} + | ^ diff --git a/frame/support/test/tests/pallet_ui/inherent_check_inner_span.rs b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.rs new file mode 100644 index 0000000000000..9704a7e1a442e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, ProvideInherent}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::inherent] + impl ProvideInherent for Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr new file mode 100644 index 0000000000000..bc34c55241a76 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr @@ -0,0 +1,11 @@ +error[E0046]: not all trait items implemented, missing: `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent`, `is_inherent` + --> $DIR/inherent_check_inner_span.rs:19:2 + | +19 | impl ProvideInherent for Pallet {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent`, `is_inherent` in implementation + | + = help: implement the missing item: `type Call = Type;` + = help: implement the missing item: `type Error = Type;` + = help: implement the missing item: `const INHERENT_IDENTIFIER: [u8; 8] = value;` + = help: implement the missing item: `fn create_inherent(_: &InherentData) -> std::option::Option<::Call> { todo!() }` + = help: implement the missing item: `fn is_inherent(_: &::Call) -> bool { todo!() }` diff --git a/frame/support/test/tests/pallet_ui/inherent_invalid_item.rs b/frame/support/test/tests/pallet_ui/inherent_invalid_item.rs new file mode 100644 index 0000000000000..97eda44721307 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inherent_invalid_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::inherent] + impl Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/inherent_invalid_item.stderr b/frame/support/test/tests/pallet_ui/inherent_invalid_item.stderr new file mode 100644 index 0000000000000..b62b1234bdeb0 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/inherent_invalid_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..> + --> $DIR/inherent_invalid_item.rs:19:2 + | +19 | impl Foo {} + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/mod_not_inlined.rs b/frame/support/test/tests/pallet_ui/mod_not_inlined.rs new file mode 100644 index 0000000000000..c74c7f5ef2a2b --- /dev/null +++ b/frame/support/test/tests/pallet_ui/mod_not_inlined.rs @@ -0,0 +1,5 @@ +#[frame_support::pallet] +mod foo; + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/mod_not_inlined.stderr b/frame/support/test/tests/pallet_ui/mod_not_inlined.stderr new file mode 100644 index 0000000000000..9ad93939d8c00 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/mod_not_inlined.stderr @@ -0,0 +1,13 @@ +error[E0658]: non-inline modules in proc macro input are unstable + --> $DIR/mod_not_inlined.rs:2:1 + | +2 | mod foo; + | ^^^^^^^^ + | + = note: see issue #54727 for more information + +error: Invalid pallet definition, expected mod to be inlined. + --> $DIR/mod_not_inlined.rs:2:1 + | +2 | mod foo; + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/pass/trait_constant_valid_bounds.rs b/frame/support/test/tests/pallet_ui/pass/trait_constant_valid_bounds.rs new file mode 100644 index 0000000000000..71eb4f2992b39 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pass/trait_constant_valid_bounds.rs @@ -0,0 +1,29 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + type U: Get; + + #[pallet::constant] + type V: Get + From; + + #[pallet::constant] + type W: From + Get; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs new file mode 100644 index 0000000000000..30b6d651f3b89 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, StorageValue}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + struct Bar; + + #[pallet::storage] + type Foo = StorageValue; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr new file mode 100644 index 0000000000000..239de4dba949b --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -0,0 +1,77 @@ +error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `build_metadata` + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `build_metadata` + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `build_metadata` + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `build_metadata` + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `partial_storage_info` + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `partial_storage_info` + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `partial_storage_info` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs new file mode 100644 index 0000000000000..ddb19121660da --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, StorageValue}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + struct Bar; + + #[pallet::storage] + type Foo = StorageValue<_, Bar>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr new file mode 100644 index 0000000000000..a5bf32a0ef2d2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -0,0 +1,77 @@ +error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `build_metadata` + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `build_metadata` + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `build_metadata` + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `build_metadata` + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `partial_storage_info` + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `partial_storage_info` + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `partial_storage_info` diff --git a/frame/support/test/tests/pallet_ui/storage_incomplete_item.rs b/frame/support/test/tests/pallet_ui/storage_incomplete_item.rs new file mode 100644 index 0000000000000..e451df8c78a02 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_incomplete_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_incomplete_item.stderr b/frame/support/test/tests/pallet_ui/storage_incomplete_item.stderr new file mode 100644 index 0000000000000..57f3ab78a5382 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_incomplete_item.stderr @@ -0,0 +1,13 @@ +error: free type alias without body + --> $DIR/storage_incomplete_item.rs:19:2 + | +19 | type Foo; + | ^^^^^^^^- + | | + | help: provide a definition for the type: `= ;` + +error[E0433]: failed to resolve: use of undeclared crate or module `pallet` + --> $DIR/storage_incomplete_item.rs:18:4 + | +18 | #[pallet::storage] + | ^^^^^^ use of undeclared crate or module `pallet` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.rs b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.rs new file mode 100644 index 0000000000000..76e3566100640 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, StorageValue}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_storage_info] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[derive(codec::Encode, codec::Decode, scale_info::TypeInfo)] + struct Bar; + + #[pallet::storage] + type Foo = StorageValue<_, Bar>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr new file mode 100644 index 0000000000000..ad415911bc933 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -0,0 +1,8 @@ +error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied + --> $DIR/storage_info_unsatisfied.rs:10:12 + | +10 | #[pallet::generate_storage_info] + | ^^^^^^^^^^^^^^^^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required by `storage_info` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs new file mode 100644 index 0000000000000..c5d773d716116 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.rs @@ -0,0 +1,27 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, StorageNMap, Twox64Concat, NMapKey}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_storage_info] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[derive(codec::Encode, codec::Decode, scale_info::TypeInfo)] + struct Bar; + + #[pallet::storage] + type Foo = StorageNMap<_, NMapKey, u32>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr new file mode 100644 index 0000000000000..6c92423c6a7fe --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -0,0 +1,9 @@ +error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied + --> $DIR/storage_info_unsatisfied_nmap.rs:10:12 + | +10 | #[pallet::generate_storage_info] + | ^^^^^^^^^^^^^^^^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` + = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` + = note: required by `storage_info` diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.rs b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.rs new file mode 100644 index 0000000000000..c6a88c083135d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + #[pallet::generate_store(pub trait Store)] + type Foo = StorageValue; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr new file mode 100644 index 0000000000000..bf93d99cf56bd --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -0,0 +1,5 @@ +error: expected `getter` or `storage_prefix` + --> $DIR/storage_invalid_attribute.rs:16:12 + | +16 | #[pallet::generate_store(pub trait Store)] + | ^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.rs b/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.rs new file mode 100644 index 0000000000000..c8df93c9b323d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.stderr new file mode 100644 index 0000000000000..b37f7e57f3552 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_first_generic.stderr @@ -0,0 +1,11 @@ +error: Invalid pallet::storage, for unnamed generic arguments the type first generic argument must be `_`, the argument is then replaced by macro. + --> $DIR/storage_invalid_first_generic.rs:19:29 + | +19 | type Foo = StorageValue; + | ^^ + +error: expected `_` + --> $DIR/storage_invalid_first_generic.rs:19:29 + | +19 | type Foo = StorageValue; + | ^^ diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.rs b/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.rs new file mode 100644 index 0000000000000..c3a08e05e2ac7 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.rs @@ -0,0 +1,18 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::storage] + #[pallet::storage_prefix = "pub"] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.stderr new file mode 100644 index 0000000000000..513970f98a4f7 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_invalid_rename_value.stderr @@ -0,0 +1,5 @@ +error: `pub` is not a valid identifier + --> $DIR/storage_invalid_rename_value.rs:13:29 + | +13 | #[pallet::storage_prefix = "pub"] + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_getters.rs b/frame/support/test/tests/pallet_ui/storage_multiple_getters.rs new file mode 100644 index 0000000000000..309b9b24136fa --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_multiple_getters.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + #[pallet::getter(fn get_foo)] + #[pallet::getter(fn foo_error)] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr b/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr new file mode 100644 index 0000000000000..188eed3cb0d17 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, multiple argument pallet::getter found + --> $DIR/storage_multiple_getters.rs:20:3 + | +20 | #[pallet::getter(fn foo_error)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_renames.rs b/frame/support/test/tests/pallet_ui/storage_multiple_renames.rs new file mode 100644 index 0000000000000..f3caef80a7ee2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_multiple_renames.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + #[pallet::storage_prefix = "Bar"] + #[pallet::storage_prefix = "Baz"] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr b/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr new file mode 100644 index 0000000000000..9288d131d95af --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, multiple argument pallet::storage_prefix found + --> $DIR/storage_multiple_renames.rs:20:3 + | +20 | #[pallet::storage_prefix = "Baz"] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_not_storage_type.rs b/frame/support/test/tests/pallet_ui/storage_not_storage_type.rs new file mode 100644 index 0000000000000..03eee6fc8ec7d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_not_storage_type.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = u8; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr new file mode 100644 index 0000000000000..4fd59183282d0 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. + --> $DIR/storage_not_storage_type.rs:19:16 + | +19 | type Foo = u8; + | ^^ diff --git a/frame/support/test/tests/pallet_ui/storage_value_duplicate_named_generic.rs b/frame/support/test/tests/pallet_ui/storage_value_duplicate_named_generic.rs new file mode 100644 index 0000000000000..1f076b1ecbfc6 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_duplicate_named_generic.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, StorageValue}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_value_duplicate_named_generic.stderr b/frame/support/test/tests/pallet_ui/storage_value_duplicate_named_generic.stderr new file mode 100644 index 0000000000000..3def9061fec8a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_duplicate_named_generic.stderr @@ -0,0 +1,11 @@ +error: Invalid pallet::storage, Duplicated named generic + --> $DIR/storage_value_duplicate_named_generic.rs:19:42 + | +19 | type Foo = StorageValue; + | ^^^^^ + +error: Invalid pallet::storage, Duplicated named generic + --> $DIR/storage_value_duplicate_named_generic.rs:19:29 + | +19 | type Foo = StorageValue; + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_value_generic_named_and_unnamed.rs b/frame/support/test/tests/pallet_ui/storage_value_generic_named_and_unnamed.rs new file mode 100644 index 0000000000000..fd0ea4794bc43 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_generic_named_and_unnamed.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, StorageValue, OptionQuery}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_value_generic_named_and_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_value_generic_named_and_unnamed.stderr new file mode 100644 index 0000000000000..61c01943cc3f5 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_generic_named_and_unnamed.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, invalid generic declaration for storage. Expect only type generics or binding generics, e.g. `` or ``. + --> $DIR/storage_value_generic_named_and_unnamed.rs:19:16 + | +19 | type Foo = StorageValue; + | ^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_value_no_generic.rs b/frame/support/test/tests/pallet_ui/storage_value_no_generic.rs new file mode 100644 index 0000000000000..e62bdafaa2643 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_no_generic.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_value_no_generic.stderr b/frame/support/test/tests/pallet_ui/storage_value_no_generic.stderr new file mode 100644 index 0000000000000..f7449c5ffda7d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_no_generic.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, invalid number of generic generic arguments, expect more that 0 generic arguments. + --> $DIR/storage_value_no_generic.rs:19:16 + | +19 | type Foo = StorageValue; + | ^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_value_unexpected_named_generic.rs b/frame/support/test/tests/pallet_ui/storage_value_unexpected_named_generic.rs new file mode 100644 index 0000000000000..a3e54448e42ad --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_unexpected_named_generic.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, StorageValue}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue

; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_value_unexpected_named_generic.stderr b/frame/support/test/tests/pallet_ui/storage_value_unexpected_named_generic.stderr new file mode 100644 index 0000000000000..f03b71ff5eb6e --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_value_unexpected_named_generic.stderr @@ -0,0 +1,11 @@ +error: Invalid pallet::storage, Unexpected generic `P` for `StorageValue`. `StorageValue` expect generics `Value`, and optional generics `QueryKind`, `OnEmpty`. + --> $DIR/storage_value_unexpected_named_generic.rs:19:29 + | +19 | type Foo = StorageValue

; + | ^ + +error: Invalid pallet::storage, cannot find `Value` generic, required for `StorageValue`. + --> $DIR/storage_value_unexpected_named_generic.rs:19:28 + | +19 | type Foo = StorageValue

; + | ^ diff --git a/frame/support/test/tests/pallet_ui/storage_wrong_item.rs b/frame/support/test/tests/pallet_ui/storage_wrong_item.rs new file mode 100644 index 0000000000000..56c4b86f2b35a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_wrong_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + impl Foo {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_wrong_item.stderr b/frame/support/test/tests/pallet_ui/storage_wrong_item.stderr new file mode 100644 index 0000000000000..d875d8acec66f --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_wrong_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, expect item type. + --> $DIR/storage_wrong_item.rs:19:2 + | +19 | impl Foo {} + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs b/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs new file mode 100644 index 0000000000000..3ebd1cb9fa608 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/store_trait_leak_private.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + use frame_support::pallet_prelude::StorageValue; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_store(pub trait Store)] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::storage] + type Foo = StorageValue<_, u8>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr b/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr new file mode 100644 index 0000000000000..d8c62faa303ee --- /dev/null +++ b/frame/support/test/tests/pallet_ui/store_trait_leak_private.stderr @@ -0,0 +1,8 @@ +error[E0446]: private type `_GeneratedPrefixForStorageFoo` in public interface + --> $DIR/store_trait_leak_private.rs:11:37 + | +11 | #[pallet::generate_store(pub trait Store)] + | ^^^^^ can't leak private type +... +20 | #[pallet::storage] + | ------- `_GeneratedPrefixForStorageFoo` declared as private diff --git a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.rs b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.rs new file mode 100644 index 0000000000000..ce599d5a31e71 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + type U; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr new file mode 100644 index 0000000000000..057ec6ffb2c75 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound.stderr @@ -0,0 +1,5 @@ +error: Invalid usage of `#[pallet::constant]`: `Get` trait bound not found + --> $DIR/trait_constant_invalid_bound.rs:9:3 + | +9 | type U; + | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.rs b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.rs new file mode 100644 index 0000000000000..47303f2b20a02 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + type U: Get<'static>; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.stderr b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.stderr new file mode 100644 index 0000000000000..8d830fed8f392 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_constant_invalid_bound_lifetime.stderr @@ -0,0 +1,5 @@ +error: Invalid usage of `#[pallet::constant]`: Expected a type argument + --> $DIR/trait_constant_invalid_bound_lifetime.rs:9:15 + | +9 | type U: Get<'static>; + | ^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/trait_invalid_item.rs b/frame/support/test/tests/pallet_ui/trait_invalid_item.rs new file mode 100644 index 0000000000000..8537659dcd037 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_invalid_item.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + const U: u8 = 3; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr b/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr new file mode 100644 index 0000000000000..72495d94b3079 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_invalid_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::constant in pallet::config, expected type trait item + --> $DIR/trait_invalid_item.rs:9:3 + | +9 | const U: u8 = 3; + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/trait_no_supertrait.rs b/frame/support/test/tests/pallet_ui/trait_no_supertrait.rs new file mode 100644 index 0000000000000..0fc987f7bbdd7 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_no_supertrait.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config { + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/trait_no_supertrait.stderr b/frame/support/test/tests/pallet_ui/trait_no_supertrait.stderr new file mode 100644 index 0000000000000..c38f43d28eb33 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/trait_no_supertrait.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::trait, expected explicit `frame_system::Config` as supertrait, found none. (try `pub trait Config: frame_system::Config { ...` or `pub trait Config: frame_system::Config { ...`). To disable this check, use `#[pallet::disable_frame_system_supertrait_check]` + --> $DIR/trait_no_supertrait.rs:7:2 + | +7 | pub trait Config { + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/type_value_error_in_block.rs b/frame/support/test/tests/pallet_ui/type_value_error_in_block.rs new file mode 100644 index 0000000000000..a13e1c7c5c2d2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_error_in_block.rs @@ -0,0 +1,25 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::type_value] fn Foo() -> u32 { + // Just wrong code to see span + u32::new() + } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr b/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr new file mode 100644 index 0000000000000..f46b89a067b06 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr @@ -0,0 +1,5 @@ +error[E0599]: no function or associated item named `new` found for type `u32` in the current scope + --> $DIR/type_value_error_in_block.rs:20:8 + | +20 | u32::new() + | ^^^ function or associated item not found in `u32` diff --git a/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.rs b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.rs new file mode 100644 index 0000000000000..b04d8b894676d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.rs @@ -0,0 +1,28 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::Hooks; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config + where ::AccountId: From + {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet + where ::AccountId: From + {} + + #[pallet::call] + impl Pallet + where ::AccountId: From + {} + + #[pallet::type_value] fn Foo() -> u32 { 3u32 } +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr new file mode 100644 index 0000000000000..85d7342b253d4 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr @@ -0,0 +1,47 @@ +error[E0277]: the trait bound `::AccountId: From` is not satisfied + --> $DIR/type_value_forgotten_where_clause.rs:24:34 + | +7 | pub trait Config: frame_system::Config + | ------ required by a bound in this +8 | where ::AccountId: From + | --------- required by this bound in `pallet::Config` +... +24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } + | ^^^^^^ the trait `From` is not implemented for `::AccountId` + | +help: consider further restricting the associated type + | +24 | #[pallet::type_value] fn Foo() -> u32 where ::AccountId: From { 3u32 } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `::AccountId: From` is not satisfied + --> $DIR/type_value_forgotten_where_clause.rs:24:12 + | +7 | pub trait Config: frame_system::Config + | ------ required by a bound in this +8 | where ::AccountId: From + | --------- required by this bound in `pallet::Config` +... +24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } + | ^^^^^^^^^^ the trait `From` is not implemented for `::AccountId` + | +help: consider further restricting the associated type + | +24 | #[pallet::type_value where ::AccountId: From] fn Foo() -> u32 { 3u32 } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `::AccountId: From` is not satisfied + --> $DIR/type_value_forgotten_where_clause.rs:24:12 + | +7 | pub trait Config: frame_system::Config + | ------ required by a bound in this +8 | where ::AccountId: From + | --------- required by this bound in `pallet::Config` +... +24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } + | ^^^^^^^^^^ the trait `From` is not implemented for `::AccountId` + | +help: consider further restricting the associated type + | +24 | #[pallet::type_value] fn Foo() -> u32 where ::AccountId: From { 3u32 } + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/type_value_invalid_item.rs b/frame/support/test/tests/pallet_ui/type_value_invalid_item.rs new file mode 100644 index 0000000000000..1b6c975b09ed1 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_invalid_item.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, PhantomData}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::type_value] struct Foo; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/type_value_invalid_item.stderr b/frame/support/test/tests/pallet_ui/type_value_invalid_item.stderr new file mode 100644 index 0000000000000..5ae618df8837c --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_invalid_item.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::type_value, expected item fn + --> $DIR/type_value_invalid_item.rs:18:24 + | +18 | #[pallet::type_value] struct Foo; + | ^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/type_value_no_return.rs b/frame/support/test/tests/pallet_ui/type_value_no_return.rs new file mode 100644 index 0000000000000..82eb3b17d0393 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_no_return.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::{Hooks, PhantomData}; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + #[pallet::type_value] fn Foo() {} +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/type_value_no_return.stderr b/frame/support/test/tests/pallet_ui/type_value_no_return.stderr new file mode 100644 index 0000000000000..65ac0243f9f64 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/type_value_no_return.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::type_value, expected return type + --> $DIR/type_value_no_return.rs:18:24 + | +18 | #[pallet::type_value] fn Foo() {} + | ^^ diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs new file mode 100644 index 0000000000000..1c47d13a619f2 --- /dev/null +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -0,0 +1,161 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub trait Trait: frame_system::Config { + type Balance: frame_support::dispatch::Parameter; + /// The overarching event type. + type Event: From> + Into<::Event>; +} + +frame_support::decl_storage! { + trait Store for Module as Example { + Dummy get(fn dummy) config(): Option; + } +} + +frame_support::decl_event!( + pub enum Event + where + B = ::Balance, + { + Dummy(B), + } +); + +frame_support::decl_error!( + pub enum Error for Module { + Dummy, + } +); + +frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + type Error = Error; + const Foo: u32 = u32::MAX; + + #[weight = 0] + fn accumulate_dummy(_origin, _increase_by: T::Balance) { + unimplemented!(); + } + + fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { + 0 + } + } +} + +impl sp_runtime::traits::ValidateUnsigned for Module { + type Call = Call; + + fn validate_unsigned( + _source: sp_runtime::transaction_validity::TransactionSource, + _call: &Self::Call, + ) -> sp_runtime::transaction_validity::TransactionValidity { + unimplemented!(); + } +} + +pub const INHERENT_IDENTIFIER: frame_support::inherent::InherentIdentifier = *b"12345678"; + +impl frame_support::inherent::ProvideInherent for Module { + type Call = Call; + type Error = frame_support::inherent::MakeFatalError<()>; + const INHERENT_IDENTIFIER: frame_support::inherent::InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &frame_support::inherent::InherentData) -> Option { + unimplemented!(); + } + + fn check_inherent( + _: &Self::Call, + _: &frame_support::inherent::InherentData, + ) -> std::result::Result<(), Self::Error> { + unimplemented!(); + } + + fn is_inherent(_call: &Self::Call) -> bool { + unimplemented!(); + } +} + +#[cfg(test)] +mod tests { + use crate as pallet_test; + + use frame_support::parameter_types; + + type SignedExtra = ( + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + ); + type TestBlock = sp_runtime::generic::Block; + type TestHeader = sp_runtime::generic::Header; + type TestUncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< + ::AccountId, + ::Call, + (), + SignedExtra, + >; + + frame_support::construct_runtime!( + pub enum Runtime where + Block = TestBlock, + NodeBlock = TestBlock, + UncheckedExtrinsic = TestUncheckedExtrinsic + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + PalletTest: pallet_test::{Pallet, Call, Storage, Event, Config, ValidateUnsigned, Inherent}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + } + + impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = sp_core::H256; + type Call = Call; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = TestHeader; + type Event = (); + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + } + + impl pallet_test::Trait for Runtime { + type Balance = u32; + type Event = (); + } +} diff --git a/frame/support/test/tests/reserved_keyword.rs b/frame/support/test/tests/reserved_keyword.rs index 382b2e498741f..d29b0477c3836 100644 --- a/frame/support/test/tests/reserved_keyword.rs +++ b/frame/support/test/tests/reserved_keyword.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ #[test] fn reserved_keyword() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/reserved_keyword/*.rs"); diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.rs b/frame/support/test/tests/reserved_keyword/on_initialize.rs index db71fe9a1e26a..72d53abfb1034 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.rs +++ b/frame/support/test/tests/reserved_keyword/on_initialize.rs @@ -4,10 +4,7 @@ macro_rules! reserved { mod $reserved { pub use frame_support::dispatch; - pub trait Trait { - type Origin; - type BlockNumber: Into; - } + pub trait Config: frame_support_test::Config {} pub mod system { use frame_support::dispatch; @@ -18,7 +15,7 @@ macro_rules! reserved { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { #[weight = 0] fn $reserved(_origin) -> dispatch::DispatchResult { unreachable!() } } diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.stderr b/frame/support/test/tests/reserved_keyword/on_initialize.stderr index dbe07195e89dd..3df392dee9005 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.stderr +++ b/frame/support/test/tests/reserved_keyword/on_initialize.stderr @@ -1,39 +1,39 @@ error: Invalid call fn name: `on_finalize`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. - --> $DIR/on_initialize.rs:31:1 + --> $DIR/on_initialize.rs:28:1 | -31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `on_initialize`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. - --> $DIR/on_initialize.rs:31:1 + --> $DIR/on_initialize.rs:28:1 | -31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `on_runtime_upgrade`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. - --> $DIR/on_initialize.rs:31:1 + --> $DIR/on_initialize.rs:28:1 | -31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `offchain_worker`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. - --> $DIR/on_initialize.rs:31:1 + --> $DIR/on_initialize.rs:28:1 | -31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `deposit_event`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. - --> $DIR/on_initialize.rs:31:1 + --> $DIR/on_initialize.rs:28:1 | -31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/storage_transaction.rs b/frame/support/test/tests/storage_transaction.rs index be8b678c6dfd0..4e97a87377b17 100644 --- a/frame/support/test/tests/storage_transaction.rs +++ b/frame/support/test/tests/storage_transaction.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,23 +15,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode, EncodeLike}; use frame_support::{ - assert_ok, assert_noop, transactional, - StorageMap, StorageValue, + assert_noop, assert_ok, dispatch::{DispatchError, DispatchResult}, storage::{with_transaction, TransactionOutcome::*}, + transactional, StorageMap, StorageValue, }; use sp_io::TestExternalities; use sp_std::result; -pub trait Trait { - type Origin; - type BlockNumber: Encode + Decode + EncodeLike + Default + Clone; -} +pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { #[weight = 0] #[transactional] fn value_commits(_origin, v: u32) { @@ -47,23 +43,27 @@ frame_support::decl_module! { } } -frame_support::decl_storage!{ - trait Store for Module as StorageTransactions { +frame_support::decl_storage! { + trait Store for Module as StorageTransactions { pub Value: u32; pub Map: map hasher(twox_64_concat) String => u32; } } struct Runtime; -impl Trait for Runtime { + +impl frame_support_test::Config for Runtime { type Origin = u32; type BlockNumber = u32; + type PalletInfo = frame_support_test::PanicPalletInfo; + type DbWeight = (); } +impl Config for Runtime {} + #[test] fn storage_transaction_basic_commit() { TestExternalities::default().execute_with(|| { - assert_eq!(Value::get(), 0); assert!(!Map::contains_key("val0")); @@ -83,7 +83,6 @@ fn storage_transaction_basic_commit() { #[test] fn storage_transaction_basic_rollback() { TestExternalities::default().execute_with(|| { - assert_eq!(Value::get(), 0); assert_eq!(Map::get("val0"), 0); @@ -196,7 +195,8 @@ fn transactional_annotation() { #[transactional] fn value_rollbacks(v: u32) -> result::Result { set_value(v)?; - Err("nah") + Err("nah")?; + Ok(v) } TestExternalities::default().execute_with(|| { diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index a7d4d43c341a9..4acc248d25f20 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,34 +15,42 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::codec::{Encode, Decode, EncodeLike}; +use frame_support::{ + codec::{Decode, Encode, EncodeLike}, + traits::Get, + weights::RuntimeDbWeight, +}; -pub trait Trait: 'static + Eq + Clone { +pub trait Config: 'static + Eq + Clone { type Origin: Into, Self::Origin>> + From>; - type BaseCallFilter: frame_support::traits::Filter; - type BlockNumber: Decode + Encode + EncodeLike + Clone + Default; + type BaseCallFilter: frame_support::traits::Contains; + type BlockNumber: Decode + Encode + EncodeLike + Clone + Default + scale_info::TypeInfo; type Hash; - type AccountId: Encode + EncodeLike + Decode; + type AccountId: Encode + EncodeLike + Decode + scale_info::TypeInfo; type Call; type Event: From>; type PalletInfo: frame_support::traits::PalletInfo; + type DbWeight: Get; } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::Origin, system=self { #[weight = 0] - fn noop(origin) {} + fn noop(_origin) {} } } -impl Module { +impl Module { pub fn deposit_event(_event: impl Into) {} } frame_support::decl_event!( - pub enum Event where BlockNumber = ::BlockNumber { + pub enum Event + where + BlockNumber = ::BlockNumber, + { ExtrinsicSuccess, ExtrinsicFailed, Ignore(BlockNumber), @@ -50,7 +58,7 @@ frame_support::decl_event!( ); frame_support::decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Test error documentation TestError, /// Error documentation @@ -60,7 +68,7 @@ frame_support::decl_error! { } /// Origin for the system module. -#[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] +#[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode, scale_info::TypeInfo)] pub enum RawOrigin { Root, Signed(AccountId), @@ -76,11 +84,12 @@ impl From> for RawOrigin { } } -pub type Origin = RawOrigin<::AccountId>; +pub type Origin = RawOrigin<::AccountId>; #[allow(dead_code)] pub fn ensure_root(o: OuterOrigin) -> Result<(), &'static str> - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { o.into().map(|_| ()).map_err(|_| "bad origin: expected to be a root origin") } diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index cebf761a907c7..389730107b439 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,19 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -impl-trait-for-tuples = "0.1.3" +serde = { version = "1.0.126", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io", default-features = false } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-version = { version = "4.0.0-dev", default-features = false, path = "../../primitives/version" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] criterion = "0.3.3" -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +sp-externalities = { version = "0.10.0-dev", path = "../../primitives/externalities" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] @@ -33,17 +34,20 @@ default = ["std"] std = [ "serde", "codec/std", + "scale-info/std", "sp-core/std", "sp-std/std", "sp-io/std", "frame-support/std", "sp-runtime/std", "sp-version/std", + "log/std", ] runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "frame-support/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] [[bench]] name = "bench" diff --git a/frame/system/README.md b/frame/system/README.md index adfa7aa35ddda..6766c3d73f4de 100644 --- a/frame/system/README.md +++ b/frame/system/README.md @@ -3,12 +3,12 @@ The System module provides low-level access to core types and cross-cutting utilities. It acts as the base layer for other pallets to interact with the Substrate framework components. -- [`system::Trait`](https://docs.rs/frame-system/latest/frame_system/trait.Trait.html) +- [`system::Config`](https://docs.rs/frame-system/latest/frame_system/pallet/trait.Config.html) ## Overview The System module defines the core data types used in a Substrate runtime. -It also provides several utility functions (see [`Module`](https://docs.rs/frame-system/latest/frame_system/struct.Module.html)) for other FRAME pallets. +It also provides several utility functions (see [`Pallet`](https://docs.rs/frame-system/latest/frame_system/pallet/struct.Pallet.html)) for other FRAME pallets. In addition, it manages the storage items for extrinsics data, indexes, event records, and digest items, among other things that support the execution of the current block. @@ -24,7 +24,7 @@ The System module does not implement any dispatchable functions. ### Public Functions -See the [`Module`](https://docs.rs/frame-system/latest/frame_system/struct.Module.html) struct for details of publicly available functions. +See the [`Pallet`](https://docs.rs/frame-system/latest/frame_system/pallet/struct.Pallet.html) struct for details of publicly available functions. ### Signed Extensions @@ -57,19 +57,19 @@ Import the System module and derive your module's configuration trait from the s use frame_support::{decl_module, dispatch}; use frame_system::{self as system, ensure_signed}; -pub trait Trait: system::Trait {} +pub trait Config: system::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn system_module_example(origin) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; - let _extrinsic_count = >::extrinsic_count(); - let _parent_hash = >::parent_hash(); + let _extrinsic_count = >::extrinsic_count(); + let _parent_hash = >::parent_hash(); Ok(()) } } } ``` -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 00c965136c0d0..97c19c5e8159a 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,21 +15,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{Criterion, criterion_group, criterion_main, black_box}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use frame_support::{decl_event, decl_module}; use frame_system as system; -use frame_support::{decl_module, decl_event, impl_outer_origin, impl_outer_event, weights::Weight}; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; mod module { use super::*; - pub trait Trait: system::Trait { - type Event: From + Into<::Event>; + pub trait Config: system::Config { + type Event: From + Into<::Event>; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { pub fn deposit_event() = default; } } @@ -41,31 +45,40 @@ mod module { ); } -impl_outer_origin!{ - pub enum Origin for Runtime {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -impl_outer_event! { - pub enum Event for Runtime { - system, - module, +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Module: module::{Pallet, Call, Event}, } -} +); frame_support::parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::with_sensible_defaults( + 4 * 1024 * 1024, Perbill::from_percent(75), + ); + pub BlockLength: frame_system::limits::BlockLength = + frame_system::limits::BlockLength::max_with_normal_ratio( + 4 * 1024 * 1024, Perbill::from_percent(75), + ); } -#[derive(Clone, Eq, PartialEq)] -pub struct Runtime; -impl system::Trait for Runtime { - type BaseCallFilter = (); +impl system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = BlockLength; + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; @@ -73,22 +86,17 @@ impl system::Trait for Runtime { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl module::Trait for Runtime { +impl module::Config for Runtime { type Event = Event; } @@ -100,17 +108,18 @@ fn deposit_events(n: usize) { let mut t = new_test_ext(); t.execute_with(|| { for _ in 0..n { - module::Module::::deposit_event( - module::Event::Complex(vec![1, 2, 3], 2, 3, 899) - ); + module::Module::::deposit_event(module::Event::Complex( + vec![1, 2, 3], + 2, + 3, + 899, + )); } }); } fn sr_system_benchmark(c: &mut Criterion) { - c.bench_function("deposit 100 events", |b| { - b.iter(|| deposit_events(black_box(100))) - }); + c.bench_function("deposit 100 events", |b| b.iter(|| deposit_events(black_box(100)))); } criterion_group!(benches, sr_system_benchmark); diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index 26b9bd9230e00..29bcccfd7d830 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system-benchmarking" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,22 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../benchmarking" } -frame-system = { version = "2.0.0", default-features = false, path = "../../system" } -frame-support = { version = "2.0.0", default-features = false, path = "../../support" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } [dev-dependencies] -serde = { version = "1.0.101" } -sp-io ={ version = "2.0.0", path = "../../../primitives/io" } +sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "sp-runtime/std", "sp-std/std", "frame-benchmarking/std", diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index 9b630520e65d7..beb61829bce37 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,25 +20,27 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::Encode; -use sp_std::vec; -use sp_std::prelude::*; -use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::{storage, traits::Get, weights::DispatchClass}; +use frame_system::{Call, DigestItemOf, Pallet as System, RawOrigin}; +use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_runtime::traits::Hash; -use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_support::traits::Get; -use frame_support::storage::{self, StorageMap}; -use frame_system::{Module as System, Call, RawOrigin, DigestItemOf, AccountInfo}; +use sp_std::{prelude::*, vec}; mod mock; -pub struct Module(System); -pub trait Trait: frame_system::Trait {} +pub struct Pallet(System); +pub trait Config: frame_system::Config {} benchmarks! { - _ { } - remark { - let b in 0 .. T::MaximumBlockLength::get(); + let b in 0 .. *T::BlockLength::get().max.get(DispatchClass::Normal) as u32; + let remark_message = vec![1; b as usize]; + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), remark_message) + + remark_with_event { + let b in 0 .. *T::BlockLength::get().max.get(DispatchClass::Normal) as u32; let remark_message = vec![1; b as usize]; let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), remark_message) @@ -77,6 +79,7 @@ benchmarks! { assert_eq!(System::::digest().logs.len(), (d + 1) as usize) } + #[skip_meta] set_storage { let i in 1 .. 1000; @@ -93,6 +96,7 @@ benchmarks! { assert_eq!(value, last_hash.as_ref().to_vec()); } + #[skip_meta] kill_storage { let i in 1 .. 1000; @@ -114,6 +118,7 @@ benchmarks! { assert_eq!(storage::unhashed::get_raw(last_hash.as_ref()), None); } + #[skip_meta] kill_prefix { let p in 1 .. 1000; @@ -135,41 +140,6 @@ benchmarks! { verify { assert_eq!(storage::unhashed::get_raw(&last_key), None); } - - suicide { - let caller: T::AccountId = whitelisted_caller(); - let account_info = AccountInfo:: { - nonce: 1337.into(), - refcount: 0, - data: T::AccountData::default() - }; - frame_system::Account::::insert(&caller, account_info); - let new_account_info = System::::account(caller.clone()); - assert_eq!(new_account_info.nonce, 1337.into()); - }: _(RawOrigin::Signed(caller.clone())) - verify { - let account_info = System::::account(&caller); - assert_eq!(account_info.nonce, 0.into()); - } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_remark::()); - assert_ok!(test_benchmark_set_heap_pages::()); - assert_ok!(test_benchmark_set_code_without_checks::()); - assert_ok!(test_benchmark_set_changes_trie_config::()); - assert_ok!(test_benchmark_set_storage::()); - assert_ok!(test_benchmark_kill_storage::()); - assert_ok!(test_benchmark_kill_prefix::()); - assert_ok!(test_benchmark_suicide::()); - }); - } -} +impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index 33255d7b50e19..d828fb22ff5ff 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,38 +20,29 @@ #![cfg(test)] use sp_runtime::traits::IdentityLookup; -use frame_support::{ - impl_outer_origin, - dispatch::{Dispatchable, DispatchInfo, PostDispatchInfo}, -}; type AccountId = u64; type AccountIndex = u32; type BlockNumber = u64; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -#[derive(Debug, codec::Encode, codec::Decode)] -pub struct Call; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -impl Dispatchable for Call { - type Origin = (); - type Trait = (); - type Info = DispatchInfo; - type PostInfo = PostDispatchInfo; - fn dispatch(self, _origin: Self::Origin) - -> sp_runtime::DispatchResultWithInfo { - panic!("Do not use dummy implementation for dispatch."); +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, } -} +); -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; - -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; @@ -61,24 +52,19 @@ impl frame_system::Trait for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = (); + type Event = Event; type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl crate::Trait for Test {} +impl crate::Config for Test {} pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index d00094364e3e8..fce29612b4d8c 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system-rpc-runtime-api" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,8 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } [features] default = ["std"] diff --git a/frame/system/rpc/runtime-api/src/lib.rs b/frame/system/rpc/runtime-api/src/lib.rs index 0ead94aabe016..319883c36d748 100644 --- a/frame/system/rpc/runtime-api/src/lib.rs +++ b/frame/system/rpc/runtime-api/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/system/src/default_weights.rs b/frame/system/src/default_weights.rs deleted file mode 100644 index 8b0c17a285157..0000000000000 --- a/frame/system/src/default_weights.rs +++ /dev/null @@ -1,55 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 - -#![allow(unused_parens)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn remark(_b: u32) -> Weight { - (1305000 as Weight) - } - fn set_heap_pages() -> Weight { - (2023000 as Weight) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn set_changes_trie_config() -> Weight { - (10026000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn set_storage(i: u32, ) -> Weight { - (0 as Weight) - .saturating_add((656000 as Weight).saturating_mul(i as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) - } - fn kill_storage(i: u32, ) -> Weight { - (4327000 as Weight) - .saturating_add((478000 as Weight).saturating_mul(i as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) - } - fn kill_prefix(p: u32, ) -> Weight { - (8349000 as Weight) - .saturating_add((838000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) - } - fn suicide() -> Weight { - (29247000 as Weight) - } -} diff --git a/frame/system/src/extensions/check_genesis.rs b/frame/system/src/extensions/check_genesis.rs index d0a346519ca23..6f409d5d3d4ad 100644 --- a/frame/system/src/extensions/check_genesis.rs +++ b/frame/system/src/extensions/check_genesis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,18 +15,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; -use crate::{Trait, Module}; +use crate::{Config, Pallet}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; use sp_runtime::{ traits::{SignedExtension, Zero}, transaction_validity::TransactionValidityError, }; /// Genesis hash check to provide replay protection between different networks. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckGenesis(sp_std::marker::PhantomData); +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct CheckGenesis(sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for CheckGenesis { +impl sp_std::fmt::Debug for CheckGenesis { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckGenesis") @@ -38,21 +40,21 @@ impl sp_std::fmt::Debug for CheckGenesis { } } -impl CheckGenesis { +impl CheckGenesis { /// Creates new `SignedExtension` to check genesis hash. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckGenesis { +impl SignedExtension for CheckGenesis { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = T::Hash; type Pre = (); const IDENTIFIER: &'static str = "CheckGenesis"; fn additional_signed(&self) -> Result { - Ok(>::block_hash(T::BlockNumber::zero())) + Ok(>::block_hash(T::BlockNumber::zero())) } } diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 7e3f65d0324d7..69cca765efea9 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,29 +15,30 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; -use crate::{Trait, Module, BlockHash}; -use frame_support::StorageMap; +use crate::{BlockHash, Config, Pallet}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; use sp_runtime::{ generic::Era, - traits::{SignedExtension, DispatchInfoOf, SaturatedConversion}, + traits::{DispatchInfoOf, SaturatedConversion, SignedExtension}, transaction_validity::{ - ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, }; /// Check for transaction mortality. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckMortality(Era, sp_std::marker::PhantomData); +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct CheckMortality(Era, sp_std::marker::PhantomData); -impl CheckMortality { +impl CheckMortality { /// utility constructor. Used only in client/factory code. pub fn from(era: Era) -> Self { Self(era, sp_std::marker::PhantomData) } } -impl sp_std::fmt::Debug for CheckMortality { +impl sp_std::fmt::Debug for CheckMortality { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckMortality({:?})", self.0) @@ -49,7 +50,7 @@ impl sp_std::fmt::Debug for CheckMortality { } } -impl SignedExtension for CheckMortality { +impl SignedExtension for CheckMortality { type AccountId = T::AccountId; type Call = T::Call; type AdditionalSigned = T::Hash; @@ -63,7 +64,7 @@ impl SignedExtension for CheckMortality { _info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { - let current_u64 = >::block_number().saturated_into::(); + let current_u64 = >::block_number().saturated_into::(); let valid_till = self.0.death(current_u64); Ok(ValidTransaction { longevity: valid_till.saturating_sub(current_u64), @@ -72,12 +73,12 @@ impl SignedExtension for CheckMortality { } fn additional_signed(&self) -> Result { - let current_u64 = >::block_number().saturated_into::(); + let current_u64 = >::block_number().saturated_into::(); let n = self.0.birth(current_u64).saturated_into::(); if !>::contains_key(n) { Err(InvalidTransaction::AncientBirthBlock.into()) } else { - Ok(>::block_hash(n)) + Ok(>::block_hash(n)) } } } @@ -85,7 +86,7 @@ impl SignedExtension for CheckMortality { #[cfg(test)] mod tests { use super::*; - use crate::mock::{Test, new_test_ext, System, CALL}; + use crate::mock::{new_test_ext, System, Test, CALL}; use frame_support::weights::{DispatchClass, DispatchInfo, Pays}; use sp_core::H256; @@ -94,7 +95,10 @@ mod tests { new_test_ext().execute_with(|| { // future assert_eq!( - CheckMortality::::from(Era::mortal(4, 2)).additional_signed().err().unwrap(), + CheckMortality::::from(Era::mortal(4, 2)) + .additional_signed() + .err() + .unwrap(), InvalidTransaction::AncientBirthBlock.into(), ); @@ -108,10 +112,11 @@ mod tests { #[test] fn signed_ext_check_era_should_change_longevity() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; let len = 0_usize; let ext = ( - crate::CheckWeight::::default(), + crate::CheckWeight::::new(), CheckMortality::::from(Era::mortal(16, 256)), ); System::set_block_number(17); diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index e7316457aaffc..081a0efa3db71 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; -use crate::Trait; -use frame_support::{ - weights::DispatchInfo, - StorageMap, -}; +use crate::Config; +use codec::{Decode, Encode}; +use frame_support::weights::DispatchInfo; +use scale_info::TypeInfo; use sp_runtime::{ - traits::{SignedExtension, DispatchInfoOf, Dispatchable, One}, + traits::{DispatchInfoOf, Dispatchable, One, SignedExtension}, transaction_validity::{ - ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, - TransactionLongevity, + InvalidTransaction, TransactionLongevity, TransactionValidity, TransactionValidityError, + ValidTransaction, }, }; use sp_std::vec; @@ -34,17 +32,18 @@ use sp_std::vec; /// /// Note that this does not set any priority by default. Make sure that AT LEAST one of the signed /// extension sets some kind of priority upon validating transactions. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckNonce(#[codec(compact)] T::Index); +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct CheckNonce(#[codec(compact)] T::Index); -impl CheckNonce { +impl CheckNonce { /// utility constructor. Used only in client/factory code. pub fn from(nonce: T::Index) -> Self { Self(nonce) } } -impl sp_std::fmt::Debug for CheckNonce { +impl sp_std::fmt::Debug for CheckNonce { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckNonce({})", self.0) @@ -56,8 +55,9 @@ impl sp_std::fmt::Debug for CheckNonce { } } -impl SignedExtension for CheckNonce where - T::Call: Dispatchable +impl SignedExtension for CheckNonce +where + T::Call: Dispatchable, { type AccountId = T::AccountId; type Call = T::Call; @@ -65,7 +65,9 @@ impl SignedExtension for CheckNonce where type Pre = (); const IDENTIFIER: &'static str = "CheckNonce"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } fn pre_dispatch( self, @@ -76,13 +78,12 @@ impl SignedExtension for CheckNonce where ) -> Result<(), TransactionValidityError> { let mut account = crate::Account::::get(who); if self.0 != account.nonce { - return Err( - if self.0 < account.nonce { - InvalidTransaction::Stale - } else { - InvalidTransaction::Future - }.into() - ) + return Err(if self.0 < account.nonce { + InvalidTransaction::Stale + } else { + InvalidTransaction::Future + } + .into()) } account.nonce += T::Index::one(); crate::Account::::insert(who, account); @@ -122,27 +123,42 @@ impl SignedExtension for CheckNonce where #[cfg(test)] mod tests { use super::*; - use crate::mock::{Test, new_test_ext, CALL}; + use crate::mock::{new_test_ext, Test, CALL}; + use frame_support::{assert_noop, assert_ok}; #[test] fn signed_ext_check_nonce_works() { new_test_ext().execute_with(|| { - crate::Account::::insert(1, crate::AccountInfo { - nonce: 1, - refcount: 0, - data: 0, - }); + crate::Account::::insert( + 1, + crate::AccountInfo { + nonce: 1, + consumers: 0, + providers: 0, + sufficients: 0, + data: 0, + }, + ); let info = DispatchInfo::default(); let len = 0_usize; // stale - assert!(CheckNonce::(0).validate(&1, CALL, &info, len).is_err()); - assert!(CheckNonce::(0).pre_dispatch(&1, CALL, &info, len).is_err()); + assert_noop!( + CheckNonce::(0).validate(&1, CALL, &info, len), + InvalidTransaction::Stale + ); + assert_noop!( + CheckNonce::(0).pre_dispatch(&1, CALL, &info, len), + InvalidTransaction::Stale + ); // correct - assert!(CheckNonce::(1).validate(&1, CALL, &info, len).is_ok()); - assert!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len).is_ok()); + assert_ok!(CheckNonce::(1).validate(&1, CALL, &info, len)); + assert_ok!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len)); // future - assert!(CheckNonce::(5).validate(&1, CALL, &info, len).is_ok()); - assert!(CheckNonce::(5).pre_dispatch(&1, CALL, &info, len).is_err()); + assert_ok!(CheckNonce::(5).validate(&1, CALL, &info, len)); + assert_noop!( + CheckNonce::(5).pre_dispatch(&1, CALL, &info, len), + InvalidTransaction::Future + ); }) } } diff --git a/frame/system/src/extensions/check_spec_version.rs b/frame/system/src/extensions/check_spec_version.rs index 8dc4d8d9ceddc..0217aefae6b9d 100644 --- a/frame/system/src/extensions/check_spec_version.rs +++ b/frame/system/src/extensions/check_spec_version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,18 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Trait, Module}; -use codec::{Encode, Decode}; -use sp_runtime::{ - traits::SignedExtension, - transaction_validity::TransactionValidityError, -}; +use crate::{Config, Pallet}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionValidityError}; /// Ensure the runtime version registered in the transaction is the same as at present. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckSpecVersion(sp_std::marker::PhantomData); +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct CheckSpecVersion(sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for CheckSpecVersion { +impl sp_std::fmt::Debug for CheckSpecVersion { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckSpecVersion") @@ -38,21 +37,21 @@ impl sp_std::fmt::Debug for CheckSpecVersion { } } -impl CheckSpecVersion { +impl CheckSpecVersion { /// Create new `SignedExtension` to check runtime version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckSpecVersion { +impl SignedExtension for CheckSpecVersion { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckSpecVersion"; fn additional_signed(&self) -> Result { - Ok(>::runtime_version().spec_version) + Ok(>::runtime_version().spec_version) } } diff --git a/frame/system/src/extensions/check_tx_version.rs b/frame/system/src/extensions/check_tx_version.rs index ee6f3349365b9..9418d3ff5d937 100644 --- a/frame/system/src/extensions/check_tx_version.rs +++ b/frame/system/src/extensions/check_tx_version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,18 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Trait, Module}; -use codec::{Encode, Decode}; -use sp_runtime::{ - traits::SignedExtension, - transaction_validity::TransactionValidityError, -}; +use crate::{Config, Pallet}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionValidityError}; /// Ensure the transaction version registered in the transaction is the same as at present. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct CheckTxVersion(sp_std::marker::PhantomData); +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct CheckTxVersion(sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for CheckTxVersion { +impl sp_std::fmt::Debug for CheckTxVersion { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckTxVersion") @@ -38,21 +37,21 @@ impl sp_std::fmt::Debug for CheckTxVersion { } } -impl CheckTxVersion { +impl CheckTxVersion { /// Create new `SignedExtension` to check transaction version. pub fn new() -> Self { Self(sp_std::marker::PhantomData) } } -impl SignedExtension for CheckTxVersion { +impl SignedExtension for CheckTxVersion { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckTxVersion"; fn additional_signed(&self) -> Result { - Ok(>::runtime_version().transaction_version) + Ok(>::runtime_version().transaction_version) } } diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 30052468fe253..92dc7382fa2d5 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,73 +15,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Trait, Module}; -use codec::{Encode, Decode}; +use crate::{limits::BlockWeights, Config, Pallet}; +use codec::{Decode, Encode}; +use frame_support::{ + traits::Get, + weights::{priority::FrameTransactionPriority, DispatchClass, DispatchInfo, PostDispatchInfo}, +}; +use scale_info::TypeInfo; use sp_runtime::{ - traits::{SignedExtension, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, Printable}, + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, transaction_validity::{ - ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, - TransactionPriority, + InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, + ValidTransaction, }, - Perbill, DispatchResult, -}; -use frame_support::{ - traits::{Get}, - weights::{PostDispatchInfo, DispatchInfo, DispatchClass, priority::FrameTransactionPriority}, - StorageValue, + DispatchResult, }; /// Block resource (weight) limit check. -#[derive(Encode, Decode, Clone, Eq, PartialEq, Default)] -pub struct CheckWeight(sp_std::marker::PhantomData); +#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct CheckWeight(sp_std::marker::PhantomData); -impl CheckWeight where - T::Call: Dispatchable +impl CheckWeight +where + T::Call: Dispatchable, { - /// Get the quota ratio of each dispatch class type. This indicates that all operational and mandatory - /// dispatches can use the full capacity of any resource, while user-triggered ones can consume - /// a portion. - fn get_dispatch_limit_ratio(class: DispatchClass) -> Perbill { - match class { - DispatchClass::Operational | DispatchClass::Mandatory - => ::one(), - DispatchClass::Normal => T::AvailableBlockRatio::get(), - } - } - - /// Checks if the current extrinsic does not exceed `MaximumExtrinsicWeight` limit. + /// Checks if the current extrinsic does not exceed the maximum weight a single extrinsic + /// with given `DispatchClass` can have. fn check_extrinsic_weight( info: &DispatchInfoOf, ) -> Result<(), TransactionValidityError> { - match info.class { - // Mandatory transactions are included in a block unconditionally, so - // we don't verify weight. - DispatchClass::Mandatory => Ok(()), - // Normal transactions must not exceed `MaximumExtrinsicWeight`. - DispatchClass::Normal => { - let maximum_weight = T::MaximumExtrinsicWeight::get(); - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - if extrinsic_weight > maximum_weight { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(()) - } - }, - // For operational transactions we make sure it doesn't exceed - // the space alloted for `Operational` class. - DispatchClass::Operational => { - let maximum_weight = T::MaximumBlockWeight::get(); - let operational_limit = - Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; - let operational_limit = - operational_limit.saturating_sub(T::BlockExecutionWeight::get()); - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - if extrinsic_weight > operational_limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(()) - } - }, + let max = T::BlockWeights::get().get(info.class).max_extrinsic; + match max { + Some(max) if info.weight > max => Err(InvalidTransaction::ExhaustsResources.into()), + _ => Ok(()), } } @@ -90,51 +57,10 @@ impl CheckWeight where /// Upon successes, it returns the new block weight as a `Result`. fn check_block_weight( info: &DispatchInfoOf, - ) -> Result { - let maximum_weight = T::MaximumBlockWeight::get(); - let mut all_weight = Module::::block_weight(); - match info.class { - // If we have a dispatch that must be included in the block, it ignores all the limits. - DispatchClass::Mandatory => { - let extrinsic_weight = info.weight.saturating_add(T::ExtrinsicBaseWeight::get()); - all_weight.add(extrinsic_weight, DispatchClass::Mandatory); - Ok(all_weight) - }, - // If we have a normal dispatch, we follow all the normal rules and limits. - DispatchClass::Normal => { - let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; - let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) - .ok_or(InvalidTransaction::ExhaustsResources)?; - all_weight.checked_add(extrinsic_weight, DispatchClass::Normal) - .map_err(|_| InvalidTransaction::ExhaustsResources)?; - if all_weight.get(DispatchClass::Normal) > normal_limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(all_weight) - } - }, - // If we have an operational dispatch, allow it if we have not used our full - // "operational space" (independent of existing fullness). - DispatchClass::Operational => { - let operational_limit = Self::get_dispatch_limit_ratio(DispatchClass::Operational) * maximum_weight; - let normal_limit = Self::get_dispatch_limit_ratio(DispatchClass::Normal) * maximum_weight; - let operational_space = operational_limit.saturating_sub(normal_limit); - - let extrinsic_weight = info.weight.checked_add(T::ExtrinsicBaseWeight::get()) - .ok_or(InvalidTransaction::ExhaustsResources)?; - all_weight.checked_add(extrinsic_weight, DispatchClass::Operational) - .map_err(|_| InvalidTransaction::ExhaustsResources)?; - - // If it would fit in normally, its okay - if all_weight.total() <= maximum_weight || - // If we have not used our operational space - all_weight.get(DispatchClass::Operational) <= operational_space { - Ok(all_weight) - } else { - Err(InvalidTransaction::ExhaustsResources.into()) - } - } - } + ) -> Result { + let maximum_weight = T::BlockWeights::get(); + let all_weight = Pallet::::block_weight(); + calculate_consumed_weight::(maximum_weight, all_weight, info) } /// Checks if the current extrinsic can fit into the block with respect to block length limits. @@ -144,27 +70,25 @@ impl CheckWeight where info: &DispatchInfoOf, len: usize, ) -> Result { - let current_len = Module::::all_extrinsics_len(); - let maximum_len = T::MaximumBlockLength::get(); - let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_len; + let length_limit = T::BlockLength::get(); + let current_len = Pallet::::all_extrinsics_len(); let added_len = len as u32; let next_len = current_len.saturating_add(added_len); - if next_len > limit { + if next_len > *length_limit.max.get(info.class) { Err(InvalidTransaction::ExhaustsResources.into()) } else { Ok(next_len) } } - /// get the priority of an extrinsic denoted by `info`. + /// Get the priority of an extrinsic denoted by `info`. /// /// Operational transaction will be given a fixed initial amount to be fairly distinguished from /// the normal ones. fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { match info.class { // Normal transaction. - DispatchClass::Normal => - FrameTransactionPriority::Normal(info.weight.into()).into(), + DispatchClass::Normal => FrameTransactionPriority::Normal(info.weight.into()).into(), // Don't use up the whole priority space, to allow things like `tip` to be taken into // account as well. DispatchClass::Operational => @@ -190,18 +114,15 @@ impl CheckWeight where let next_weight = Self::check_block_weight(info)?; Self::check_extrinsic_weight(info)?; - crate::AllExtrinsicsLen::put(next_len); - crate::BlockWeight::put(next_weight); + crate::AllExtrinsicsLen::::put(next_len); + crate::BlockWeight::::put(next_weight); Ok(()) } /// Do the validate checks. This can be applied to both signed and unsigned. /// /// It only checks that the block weight and length limit will not exceed. - pub fn do_validate( - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { + pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { // ignore the next length. If they return `Ok`, then it is below the limit. let _ = Self::check_block_length(info, len)?; // during validation we skip block limit check. Since the `validate_transaction` @@ -213,8 +134,56 @@ impl CheckWeight where } } -impl SignedExtension for CheckWeight where - T::Call: Dispatchable +pub fn calculate_consumed_weight( + maximum_weight: BlockWeights, + mut all_weight: crate::ConsumedWeight, + info: &DispatchInfoOf, +) -> Result +where + Call: Dispatchable, +{ + let extrinsic_weight = + info.weight.saturating_add(maximum_weight.get(info.class).base_extrinsic); + let limit_per_class = maximum_weight.get(info.class); + + // add the weight. If class is unlimited, use saturating add instead of checked one. + if limit_per_class.max_total.is_none() && limit_per_class.reserved.is_none() { + all_weight.add(extrinsic_weight, info.class) + } else { + all_weight + .checked_add(extrinsic_weight, info.class) + .map_err(|_| InvalidTransaction::ExhaustsResources)?; + } + + let per_class = *all_weight.get(info.class); + + // Check if we don't exceed per-class allowance + match limit_per_class.max_total { + Some(max) if per_class > max => return Err(InvalidTransaction::ExhaustsResources.into()), + // There is no `max_total` limit (`None`), + // or we are below the limit. + _ => {}, + } + + // In cases total block weight is exceeded, we need to fall back + // to `reserved` pool if there is any. + if all_weight.total() > maximum_weight.max_block { + match limit_per_class.reserved { + // We are over the limit in reserved pool. + Some(reserved) if per_class > reserved => + return Err(InvalidTransaction::ExhaustsResources.into()), + // There is either no limit in reserved pool (`None`), + // or we are below the limit. + _ => {}, + } + } + + Ok(all_weight) +} + +impl SignedExtension for CheckWeight +where + T::Call: Dispatchable, { type AccountId = T::AccountId; type Call = T::Call; @@ -222,7 +191,9 @@ impl SignedExtension for CheckWeight where type Pre = (); const IDENTIFIER: &'static str = "CheckWeight"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } fn pre_dispatch( self, @@ -277,15 +248,13 @@ impl SignedExtension for CheckWeight where // to them actually being useful. Block producers are thus not allowed to include mandatory // extrinsics that result in error. if let (DispatchClass::Mandatory, Err(e)) = (info.class, result) { - "Bad mandantory".print(); - e.print(); - + log::error!(target: "runtime::system", "Bad mandatory: {:?}", e); Err(InvalidTransaction::BadMandatory)? } let unspent = post_info.calc_unspent(info); if unspent > 0 { - crate::BlockWeight::mutate(|current_weight| { + crate::BlockWeight::::mutate(|current_weight| { current_weight.sub(unspent, info.class); }) } @@ -294,7 +263,7 @@ impl SignedExtension for CheckWeight where } } -impl sp_std::fmt::Debug for CheckWeight { +impl sp_std::fmt::Debug for CheckWeight { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "CheckWeight") @@ -309,18 +278,33 @@ impl sp_std::fmt::Debug for CheckWeight { #[cfg(test)] mod tests { use super::*; - use crate::{BlockWeight, AllExtrinsicsLen}; - use crate::mock::{Test, CALL, new_test_ext, System}; + use crate::{ + mock::{new_test_ext, System, Test, CALL}, + AllExtrinsicsLen, BlockWeight, + }; + use frame_support::{ + assert_err, assert_ok, + weights::{Pays, Weight}, + }; use sp_std::marker::PhantomData; - use frame_support::{assert_ok, assert_noop}; - use frame_support::weights::{Weight, Pays}; + + fn block_weights() -> crate::limits::BlockWeights { + ::BlockWeights::get() + } fn normal_weight_limit() -> Weight { - ::AvailableBlockRatio::get() * ::MaximumBlockWeight::get() + block_weights() + .get(DispatchClass::Normal) + .max_total + .unwrap_or_else(|| block_weights().max_block) + } + + fn block_weight_limit() -> Weight { + block_weights().max_block } fn normal_length_limit() -> u32 { - ::AvailableBlockRatio::get() * ::MaximumBlockLength::get() + *::BlockLength::get().max.get(DispatchClass::Normal) } #[test] @@ -341,7 +325,7 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); assert_eq!(System::block_weight().total(), Weight::max_value()); - assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); + assert!(System::block_weight().total() > block_weight_limit()); }); check(|max, len| { assert_ok!(CheckWeight::::do_validate(max, len)); @@ -352,13 +336,12 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: ::MaximumExtrinsicWeight::get() + 1, + weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + 1, class: DispatchClass::Normal, ..Default::default() }; let len = 0_usize; - - assert_noop!( + assert_err!( CheckWeight::::do_validate(&max, len), InvalidTransaction::ExhaustsResources ); @@ -368,18 +351,16 @@ mod tests { #[test] fn operational_extrinsic_limited_by_operational_space_limit() { new_test_ext().execute_with(|| { - let operational_limit = CheckWeight::::get_dispatch_limit_ratio( - DispatchClass::Operational - ) * ::MaximumBlockWeight::get(); - let base_weight = ::ExtrinsicBaseWeight::get(); - let block_base = ::BlockExecutionWeight::get(); - - let weight = operational_limit - base_weight - block_base; - let okay = DispatchInfo { - weight, - class: DispatchClass::Operational, - ..Default::default() - }; + let weights = block_weights(); + let operational_limit = weights + .get(DispatchClass::Operational) + .max_total + .unwrap_or_else(|| weights.max_block); + let base_weight = weights.get(DispatchClass::Normal).base_extrinsic; + + let weight = operational_limit - base_weight; + let okay = + DispatchInfo { weight, class: DispatchClass::Operational, ..Default::default() }; let max = DispatchInfo { weight: weight + 1, class: DispatchClass::Operational, @@ -394,7 +375,7 @@ mod tests { ..Default::default() }) ); - assert_noop!( + assert_err!( CheckWeight::::do_validate(&max, len), InvalidTransaction::ExhaustsResources ); @@ -406,7 +387,7 @@ mod tests { new_test_ext().execute_with(|| { System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Normal); assert_eq!(System::block_weight().total(), Weight::max_value()); - assert!(System::block_weight().total() > ::MaximumBlockWeight::get()); + assert!(System::block_weight().total() > block_weight_limit()); }); } @@ -419,15 +400,19 @@ mod tests { // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) // And Operational can be 256 to produce a full block (-5 for base) let max_normal = DispatchInfo { weight: 753, ..Default::default() }; - let rest_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; + let rest_operational = DispatchInfo { + weight: 251, + class: DispatchClass::Operational, + ..Default::default() + }; let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(System::block_weight().total(), 768); assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(::MaximumBlockWeight::get(), 1024); - assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), block_weight_limit()); // Checking single extrinsic should not take current block weight into account. assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); }); @@ -438,7 +423,11 @@ mod tests { new_test_ext().execute_with(|| { // We switch the order of `full_block_with_normal_and_operational` let max_normal = DispatchInfo { weight: 753, ..Default::default() }; - let rest_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; + let rest_operational = DispatchInfo { + weight: 251, + class: DispatchClass::Operational, + ..Default::default() + }; let len = 0_usize; @@ -446,8 +435,8 @@ mod tests { // Extra 15 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), 266); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(::MaximumBlockWeight::get(), 1024); - assert_eq!(System::block_weight().total(), ::MaximumBlockWeight::get()); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), block_weight_limit()); }); } @@ -456,18 +445,24 @@ mod tests { new_test_ext().execute_with(|| { // An on_initialize takes up the whole block! (Every time!) System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Mandatory); - let dispatch_normal = DispatchInfo { weight: 251, class: DispatchClass::Normal, ..Default::default() }; - let dispatch_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; + let dispatch_normal = + DispatchInfo { weight: 251, class: DispatchClass::Normal, ..Default::default() }; + let dispatch_operational = DispatchInfo { + weight: 251, + class: DispatchClass::Operational, + ..Default::default() + }; let len = 0_usize; - assert_noop!( + assert_err!( CheckWeight::::do_pre_dispatch(&dispatch_normal, len), InvalidTransaction::ExhaustsResources ); - // Thank goodness we can still do an operational transaction to possibly save the blockchain. + // Thank goodness we can still do an operational transaction to possibly save the + // blockchain. assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); // Not too much though - assert_noop!( + assert_err!( CheckWeight::::do_pre_dispatch(&dispatch_operational, len), InvalidTransaction::ExhaustsResources ); @@ -480,32 +475,47 @@ mod tests { fn signed_ext_check_weight_works_operational_tx() { new_test_ext().execute_with(|| { let normal = DispatchInfo { weight: 100, ..Default::default() }; - let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let op = DispatchInfo { + weight: 100, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; let len = 0_usize; let normal_limit = normal_weight_limit(); // given almost full block - BlockWeight::mutate(|current_weight| { - current_weight.put(normal_limit, DispatchClass::Normal) + BlockWeight::::mutate(|current_weight| { + current_weight.set(normal_limit, DispatchClass::Normal) }); // will not fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); + assert_err!( + CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), + InvalidTransaction::ExhaustsResources + ); // will fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); + assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); // likewise for length limit. let len = 100_usize; - AllExtrinsicsLen::put(normal_length_limit()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); + AllExtrinsicsLen::::put(normal_length_limit()); + assert_err!( + CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), + InvalidTransaction::ExhaustsResources + ); + assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); }) } #[test] fn signed_ext_check_weight_works() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; - let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let op = DispatchInfo { + weight: 100, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; let len = 0_usize; let priority = CheckWeight::(PhantomData) @@ -514,10 +524,8 @@ mod tests { .priority; assert_eq!(priority, 100); - let priority = CheckWeight::(PhantomData) - .validate(&1, CALL, &op, len) - .unwrap() - .priority; + let priority = + CheckWeight::(PhantomData).validate(&1, CALL, &op, len).unwrap().priority; assert_eq!(priority, frame_support::weights::priority::LIMIT + 100); }) } @@ -528,9 +536,13 @@ mod tests { let normal = DispatchInfo::default(); let normal_limit = normal_weight_limit() as usize; let reset_check_weight = |tx, s, f| { - AllExtrinsicsLen::put(0); + AllExtrinsicsLen::::put(0); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); - if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } + if f { + assert!(r.is_err()) + } else { + assert!(r.is_ok()) + } }; reset_check_weight(&normal, normal_limit - 1, false); @@ -538,7 +550,8 @@ mod tests { reset_check_weight(&normal, normal_limit + 1, true); // Operational ones don't have this limit. - let op = DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let op = + DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes }; reset_check_weight(&op, normal_limit, false); reset_check_weight(&op, normal_limit + 100, false); reset_check_weight(&op, 1024, false); @@ -546,28 +559,28 @@ mod tests { }) } - #[test] fn signed_ext_check_weight_works_normal_tx() { new_test_ext().execute_with(|| { let normal_limit = normal_weight_limit(); let small = DispatchInfo { weight: 100, ..Default::default() }; - let medium = DispatchInfo { - weight: normal_limit - ::ExtrinsicBaseWeight::get(), - ..Default::default() - }; - let big = DispatchInfo { - weight: normal_limit - ::ExtrinsicBaseWeight::get() + 1, - ..Default::default() - }; + let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; + let medium = + DispatchInfo { weight: normal_limit - base_extrinsic, ..Default::default() }; + let big = + DispatchInfo { weight: normal_limit - base_extrinsic + 1, ..Default::default() }; let len = 0_usize; let reset_check_weight = |i, f, s| { - BlockWeight::mutate(|current_weight| { - current_weight.put(s, DispatchClass::Normal) + BlockWeight::::mutate(|current_weight| { + current_weight.set(s, DispatchClass::Normal) }); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); - if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } + if f { + assert!(r.is_err()) + } else { + assert!(r.is_ok()) + } }; reset_check_weight(&small, false, 0); @@ -581,28 +594,22 @@ mod tests { new_test_ext().execute_with(|| { // This is half of the max block weight let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { - actual_weight: Some(128), - pays_fee: Default::default(), - }; + let post_info = + PostDispatchInfo { actual_weight: Some(128), pays_fee: Default::default() }; let len = 0_usize; + let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; // We allow 75% for normal transaction, so we put 25% - extrinsic base weight - BlockWeight::mutate(|current_weight| { - current_weight.put(256 - ::ExtrinsicBaseWeight::get(), DispatchClass::Normal) + BlockWeight::::mutate(|current_weight| { + current_weight.set(0, DispatchClass::Mandatory); + current_weight.set(256 - base_extrinsic, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); - assert_eq!(BlockWeight::get().total(), info.weight + 256); + assert_eq!(BlockWeight::::get().total(), info.weight + 256); - assert!( - CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .is_ok() - ); - assert_eq!( - BlockWeight::get().total(), - post_info.actual_weight.unwrap() + 256, - ); + assert_ok!(CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(()))); + assert_eq!(BlockWeight::::get().total(), post_info.actual_weight.unwrap() + 256); }) } @@ -610,29 +617,25 @@ mod tests { fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { new_test_ext().execute_with(|| { let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { - actual_weight: Some(700), - pays_fee: Default::default(), - }; + let post_info = + PostDispatchInfo { actual_weight: Some(700), pays_fee: Default::default() }; let len = 0_usize; - BlockWeight::mutate(|current_weight| { - current_weight.put(128, DispatchClass::Normal) + BlockWeight::::mutate(|current_weight| { + current_weight.set(0, DispatchClass::Mandatory); + current_weight.set(128, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!( - BlockWeight::get().total(), - info.weight + 128 + ::ExtrinsicBaseWeight::get(), + BlockWeight::::get().total(), + info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, ); - assert!( - CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .is_ok() - ); + assert_ok!(CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(()))); assert_eq!( - BlockWeight::get().total(), - info.weight + 128 + ::ExtrinsicBaseWeight::get(), + BlockWeight::::get().total(), + info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, ); }) } @@ -640,17 +643,86 @@ mod tests { #[test] fn zero_weight_extrinsic_still_has_base_weight() { new_test_ext().execute_with(|| { + let weights = block_weights(); let free = DispatchInfo { weight: 0, ..Default::default() }; let len = 0_usize; - // Initial weight from `BlockExecutionWeight` - assert_eq!(System::block_weight().total(), ::BlockExecutionWeight::get()); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len); - assert!(r.is_ok()); + // Initial weight from `weights.base_block` + assert_eq!(System::block_weight().total(), weights.base_block); + assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len)); assert_eq!( System::block_weight().total(), - ::ExtrinsicBaseWeight::get() + ::BlockExecutionWeight::get() + weights.get(DispatchClass::Normal).base_extrinsic + weights.base_block ); }) } + + #[test] + fn normal_and_mandatory_tracked_separately() { + new_test_ext().execute_with(|| { + // Max block is 1024 + // Max normal is 768 (75%) + // Max mandatory is unlimited + let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let mandatory = DispatchInfo { + weight: 1019, + class: DispatchClass::Mandatory, + ..Default::default() + }; + + let len = 0_usize; + + assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + assert_eq!(System::block_weight().total(), 768); + assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); + assert_eq!(block_weight_limit(), 1024); + assert_eq!(System::block_weight().total(), 1024 + 768); + assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); + }); + } + + #[test] + fn no_max_total_should_still_be_limited_by_max_block() { + // given + let maximum_weight = BlockWeights::builder() + .base_block(0) + .for_class(DispatchClass::non_mandatory(), |w| { + w.base_extrinsic = 0; + w.max_total = Some(20); + }) + .for_class(DispatchClass::Mandatory, |w| { + w.base_extrinsic = 0; + w.reserved = Some(5); + w.max_total = None; + }) + .build_or_panic(); + let all_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => 10, + DispatchClass::Operational => 10, + DispatchClass::Mandatory => 0, + }); + assert_eq!(maximum_weight.max_block, all_weight.total()); + + // fits into reserved + let mandatory1 = + DispatchInfo { weight: 5, class: DispatchClass::Mandatory, ..Default::default() }; + // does not fit into reserved and the block is full. + let mandatory2 = + DispatchInfo { weight: 6, class: DispatchClass::Mandatory, ..Default::default() }; + + // when + assert_ok!(calculate_consumed_weight::<::Call>( + maximum_weight.clone(), + all_weight.clone(), + &mandatory1 + )); + assert_err!( + calculate_consumed_weight::<::Call>( + maximum_weight, + all_weight, + &mandatory2 + ), + InvalidTransaction::ExhaustsResources + ); + } } diff --git a/frame/system/src/extensions/mod.rs b/frame/system/src/extensions/mod.rs index ff61353e2d176..0af9722e475d1 100644 --- a/frame/system/src/extensions/mod.rs +++ b/frame/system/src/extensions/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,4 +21,3 @@ pub mod check_nonce; pub mod check_spec_version; pub mod check_tx_version; pub mod check_weight; - diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index e9b7a6d9f710f..2e7f26eef16f4 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,20 +15,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # System Module +//! # System Pallet //! -//! The System module provides low-level access to core types and cross-cutting utilities. +//! The System pallet provides low-level access to core types and cross-cutting utilities. //! It acts as the base layer for other pallets to interact with the Substrate framework components. //! -//! - [`system::Trait`](./trait.Trait.html) +//! - [`Config`] //! //! ## Overview //! -//! The System module defines the core data types used in a Substrate runtime. -//! It also provides several utility functions (see [`Module`](./struct.Module.html)) for other FRAME pallets. +//! The System pallet defines the core data types used in a Substrate runtime. +//! It also provides several utility functions (see [`Pallet`]) for other FRAME pallets. //! -//! In addition, it manages the storage items for extrinsics data, indexes, event records, and digest items, -//! among other things that support the execution of the current block. +//! In addition, it manages the storage items for extrinsics data, indexes, event records, and +//! digest items, among other things that support the execution of the current block. //! //! It also handles low-level tasks like depositing logs, basic set up and take down of //! temporary storage entries, and access to previous block hashes. @@ -37,15 +37,15 @@ //! //! ### Dispatchable Functions //! -//! The System module does not implement any dispatchable functions. +//! The System pallet does not implement any dispatchable functions. //! //! ### Public Functions //! -//! See the [`Module`](./struct.Module.html) struct for details of publicly available functions. +//! See the [`Pallet`] struct for details of publicly available functions. //! //! ### Signed Extensions //! -//! The System module defines the following extensions: +//! The System pallet defines the following extensions: //! //! - [`CheckWeight`]: Checks the weight and length of the block and ensure that it does not //! exceed the limits. @@ -54,100 +54,74 @@ //! - [`CheckEra`]: Checks the era of the transaction. Contains a single payload of type `Era`. //! - [`CheckGenesis`]: Checks the provided genesis hash of the transaction. Must be a part of the //! signed payload of the transaction. -//! - [`CheckSpecVersion`]: Checks that the runtime version is the same as the one used to sign the -//! transaction. -//! - [`CheckTxVersion`]: Checks that the transaction version is the same as the one used to sign the -//! transaction. +//! - [`CheckSpecVersion`]: Checks that the runtime version is the same as the one used to sign +//! the transaction. +//! - [`CheckTxVersion`]: Checks that the transaction version is the same as the one used to sign +//! the transaction. //! //! Lookup the runtime aggregator file (e.g. `node/runtime`) to see the full list of signed //! extensions included in a chain. -//! -//! ## Usage -//! -//! ### Prerequisites -//! -//! Import the System module and derive your module's configuration trait from the system trait. -//! -//! ### Example - Get extrinsic count and parent hash for the current block -//! -//! ``` -//! use frame_support::{decl_module, dispatch}; -//! use frame_system::{self as system, ensure_signed}; -//! -//! pub trait Trait: system::Trait {} -//! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn system_module_example(origin) -> dispatch::DispatchResult { -//! let _sender = ensure_signed(origin)?; -//! let _extrinsic_count = >::extrinsic_count(); -//! let _parent_hash = >::parent_hash(); -//! Ok(()) -//! } -//! } -//! } -//! # fn main() { } -//! ``` #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] use serde::Serialize; -use sp_std::prelude::*; -#[cfg(any(feature = "std", test))] -use sp_std::map; -use sp_std::convert::Infallible; -use sp_std::marker::PhantomData; -use sp_std::fmt::Debug; -use sp_version::RuntimeVersion; use sp_runtime::{ - RuntimeDebug, Perbill, DispatchError, Either, generic, + generic, traits::{ - self, CheckEqual, AtLeast32Bit, Zero, Lookup, LookupError, - SimpleBitOps, Hash, Member, MaybeDisplay, BadOrigin, - MaybeSerialize, MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, - Dispatchable, AtLeast32BitUnsigned + self, AtLeast32Bit, AtLeast32BitUnsigned, BadOrigin, BlockNumberProvider, Bounded, + CheckEqual, Dispatchable, Hash, Lookup, LookupError, MaybeDisplay, MaybeMallocSizeOf, + MaybeSerializeDeserialize, Member, One, Saturating, SimpleBitOps, StaticLookup, Zero, }, - offchain::storage_lock::BlockNumberProvider, + DispatchError, Either, Perbill, RuntimeDebug, }; +#[cfg(any(feature = "std", test))] +use sp_std::map; +use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; +use sp_version::RuntimeVersion; -use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_support::{ - decl_module, decl_event, decl_storage, decl_error, Parameter, ensure, debug, + dispatch::{DispatchResult, DispatchResultWithPostInfo}, storage, traits::{ - Contains, Get, PalletInfo, OnNewAccount, OnKilledAccount, IsDeadAccount, Happened, - StoredMap, EnsureOrigin, OriginTrait, Filter, + Contains, EnsureOrigin, Get, HandleLifetime, OnKilledAccount, OnNewAccount, OriginTrait, + PalletInfo, SortedMembers, StoredMap, }, weights::{ - Weight, RuntimeDbWeight, DispatchInfo, DispatchClass, - extract_actual_weight, + extract_actual_weight, DispatchClass, DispatchInfo, PerDispatchClass, RuntimeDbWeight, + Weight, }, - dispatch::DispatchResultWithPostInfo, + Parameter, }; -use codec::{Encode, Decode, FullCodec, EncodeLike}; +use scale_info::TypeInfo; +use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; +#[cfg(feature = "std")] +use frame_support::traits::GenesisBuild; #[cfg(any(feature = "std", test))] use sp_io::TestExternalities; -pub mod offchain; +pub mod limits; #[cfg(test)] pub(crate) mod mock; +pub mod offchain; mod extensions; -mod weights; +#[cfg(feature = "std")] +pub mod mocking; #[cfg(test)] mod tests; -mod default_weights; +pub mod weights; pub use extensions::{ - check_mortality::CheckMortality, check_genesis::CheckGenesis, check_nonce::CheckNonce, + check_genesis::CheckGenesis, check_mortality::CheckMortality, check_nonce::CheckNonce, check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion, check_weight::CheckWeight, }; // Backward compatible re-export. pub use extensions::check_mortality::CheckMortality as CheckEra; +pub use weights::WeightInfo; /// Compute the trie root of a list of extrinsics. pub fn extrinsics_root(extrinsics: &[E]) -> H::Output { @@ -159,420 +133,220 @@ pub fn extrinsics_data_root(xts: Vec>) -> H::Output { H::ordered_trie_root(xts) } -pub trait WeightInfo { - fn remark(b: u32) -> Weight; - fn set_heap_pages() -> Weight; - fn set_changes_trie_config() -> Weight; - fn set_storage(i: u32, ) -> Weight; - fn kill_storage(i: u32, ) -> Weight; - fn kill_prefix(p: u32, ) -> Weight; - fn suicide() -> Weight; -} - -pub trait Trait: 'static + Eq + Clone { - /// The basic call filter to use in Origin. All origins are built with this filter as base, - /// except Root. - type BaseCallFilter: Filter; - - /// The `Origin` type used by dispatchable calls. - type Origin: - Into, Self::Origin>> - + From> - + Clone - + OriginTrait; - - /// The aggregated `Call` type. - type Call: Dispatchable + Debug; - - /// Account index (aka nonce) type. This stores the number of previous transactions associated - /// with a sender account. - type Index: - Parameter + Member + MaybeSerialize + Debug + Default + MaybeDisplay + AtLeast32Bit - + Copy; - - /// The block number type used by the runtime. - type BlockNumber: - Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + - AtLeast32BitUnsigned + Default + Bounded + Copy + sp_std::hash::Hash + - sp_std::str::FromStr + MaybeMallocSizeOf; - - /// The output of the `Hashing` function. - type Hash: - Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + SimpleBitOps + Ord - + Default + Copy + CheckEqual + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + MaybeMallocSizeOf; - - /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). - type Hashing: Hash; - - /// The user account identifier type for the runtime. - type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord - + Default; - - /// Converting trait to take a source type and convert to `AccountId`. - /// - /// Used to define the type and conversion mechanism for referencing accounts in transactions. - /// It's perfectly reasonable for this to be an identity conversion (with the source type being - /// `AccountId`), but other modules (e.g. Indices module) may provide more functional/efficient - /// alternatives. - type Lookup: StaticLookup; - - /// The block header. - type Header: Parameter + traits::Header< - Number = Self::BlockNumber, - Hash = Self::Hash, - >; - - /// The aggregated event type of the runtime. - type Event: Parameter + Member + From> + Debug; - - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount: Get; - - /// The maximum weight of a block. - type MaximumBlockWeight: Get; - - /// The weight of runtime database operations the runtime can invoke. - type DbWeight: Get; - - /// The base weight of executing a block, independent of the transactions in the block. - type BlockExecutionWeight: Get; - - /// The base weight of an Extrinsic in the block, independent of the of extrinsic being executed. - type ExtrinsicBaseWeight: Get; - - /// The maximal weight of a single Extrinsic. This should be set to at most - /// `MaximumBlockWeight - AverageOnInitializeWeight`. The limit only applies to extrinsics - /// containing `Normal` dispatch class calls. - type MaximumExtrinsicWeight: Get; - - /// The maximum length of a block (in bytes). - type MaximumBlockLength: Get; - - /// The portion of the block that is available to normal transaction. The rest can only be used - /// by operational transactions. This can be applied to any resource limit managed by the system - /// module, including weight and length. - type AvailableBlockRatio: Get; - - /// Get the chain's current version. - type Version: Get; - - /// Provides information about the pallet setup in the runtime. - /// - /// Expects the `PalletInfo` type that is being generated by `construct_runtime!` in the - /// runtime. - /// - /// For tests it is okay to use `()` as type, however it will provide "useless" data. - type PalletInfo: PalletInfo; - - /// Data to be associated with an account (other than nonce/transaction counter, which this - /// module does regardless). - type AccountData: Member + FullCodec + Clone + Default; - - /// Handler for when a new account has just been created. - type OnNewAccount: OnNewAccount; - - /// A function that is invoked when an account has been determined to be dead. - /// - /// All resources should be cleaned up associated with the given account. - type OnKilledAccount: OnKilledAccount; - - type SystemWeightInfo: WeightInfo; -} - -pub type DigestOf = generic::Digest<::Hash>; -pub type DigestItemOf = generic::DigestItem<::Hash>; - -pub type Key = Vec; -pub type KeyValue = (Vec, Vec); - -/// A phase of a block's execution. -#[derive(Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] -pub enum Phase { - /// Applying an extrinsic. - ApplyExtrinsic(u32), - /// Finalizing the block. - Finalization, - /// Initializing the block. - Initialization, -} - -impl Default for Phase { - fn default() -> Self { - Self::Initialization - } -} - -/// Record of an event happening. -#[derive(Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] -pub struct EventRecord { - /// The phase of the block it happened in. - pub phase: Phase, - /// The event itself. - pub event: E, - /// The list of the topics this event has. - pub topics: Vec, -} - -/// Origin for the System module. -#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode)] -pub enum RawOrigin { - /// The system itself ordained this dispatch to happen: this is the highest privilege level. - Root, - /// It is signed by some public key and we provide the `AccountId`. - Signed(AccountId), - /// It is signed by nobody, can be either: - /// * included and agreed upon by the validators anyway, - /// * or unsigned transaction validated by a module. - None, -} - -impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::None, - } - } -} - -/// Exposed trait-generic origin type. -pub type Origin = RawOrigin<::AccountId>; - -// Create a Hash with 69 for each byte, -// only used to build genesis config. -#[cfg(feature = "std")] -fn hash69 + Default>() -> T { - let mut h = T::default(); - h.as_mut().iter_mut().for_each(|byte| *byte = 69); - h -} - -/// This type alias represents an index of an event. -/// -/// We use `u32` here because this index is used as index for `Events` -/// which can't contain more than `u32::max_value()` items. -type EventIndex = u32; +/// An object to track the currently used extrinsic weight in a block. +pub type ConsumedWeight = PerDispatchClass; -/// Type used to encode the number of references an account has. -pub type RefCount = u32; +pub use pallet::*; -/// Information of an account. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] -pub struct AccountInfo { - /// The number of transactions this account has sent. - pub nonce: Index, - /// The number of other modules that currently depend on this account's existence. The account - /// cannot be reaped until this is zero. - pub refcount: RefCount, - /// The additional data that belongs to this account. Used to store the balance(s) in a lot of - /// chains. - pub data: AccountData, +/// Do something when we should be setting the code. +pub trait SetCode { + /// Set the code to the given blob. + fn set_code(code: Vec) -> DispatchResult; } -/// Stores the `spec_version` and `spec_name` of when the last runtime upgrade -/// happened. -#[derive(sp_runtime::RuntimeDebug, Encode, Decode)] -#[cfg_attr(feature = "std", derive(PartialEq))] -pub struct LastRuntimeUpgradeInfo { - pub spec_version: codec::Compact, - pub spec_name: sp_runtime::RuntimeString, -} - -impl LastRuntimeUpgradeInfo { - /// Returns if the runtime was upgraded in comparison of `self` and `current`. - /// - /// Checks if either the `spec_version` increased or the `spec_name` changed. - pub fn was_upgraded(&self, current: &sp_version::RuntimeVersion) -> bool { - current.spec_version > self.spec_version.0 || current.spec_name != self.spec_name - } -} - -impl From for LastRuntimeUpgradeInfo { - fn from(version: sp_version::RuntimeVersion) -> Self { - Self { - spec_version: version.spec_version.into(), - spec_name: version.spec_name, - } +impl SetCode for () { + fn set_code(code: Vec) -> DispatchResult { + >::update_code_in_storage(&code)?; + Ok(()) } } -decl_storage! { - trait Store for Module as System { - /// The full account information for a particular account ID. - pub Account get(fn account): - map hasher(blake2_128_concat) T::AccountId => AccountInfo; - - /// Total extrinsics count for the current block. - ExtrinsicCount: Option; - - /// The current weight for the block. - BlockWeight get(fn block_weight): weights::ExtrinsicsWeight; +#[frame_support::pallet] +pub mod pallet { + use crate::{self as frame_system, pallet_prelude::*, *}; + use frame_support::pallet_prelude::*; - /// Total length (in bytes) for all extrinsics put together, for the current block. - AllExtrinsicsLen: Option; + /// System configuration trait. Implemented by runtime. + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: 'static + Eq + Clone { + /// The basic call filter to use in Origin. All origins are built with this filter as base, + /// except Root. + type BaseCallFilter: Contains; - /// Map of block numbers to block hashes. - pub BlockHash get(fn block_hash) build(|_| vec![(T::BlockNumber::zero(), hash69())]): - map hasher(twox_64_concat) T::BlockNumber => T::Hash; + /// Block & extrinsics weights: base values and limits. + #[pallet::constant] + type BlockWeights: Get; - /// Extrinsics data for the current block (maps an extrinsic's index to its data). - ExtrinsicData get(fn extrinsic_data): map hasher(twox_64_concat) u32 => Vec; - - /// The current block number being processed. Set by `execute_block`. - Number get(fn block_number): T::BlockNumber; - - /// Hash of the previous block. - ParentHash get(fn parent_hash) build(|_| hash69()): T::Hash; - - /// Extrinsics root of the current block, also part of the block header. - ExtrinsicsRoot get(fn extrinsics_root): T::Hash; - - /// Digest of the current block, also part of the block header. - Digest get(fn digest): DigestOf; - - /// Events deposited for the current block. - Events get(fn events): Vec>; + /// The maximum length of a block (in bytes). + #[pallet::constant] + type BlockLength: Get; + + /// The `Origin` type used by dispatchable calls. + type Origin: Into, Self::Origin>> + + From> + + Clone + + OriginTrait; + + /// The aggregated `Call` type. + type Call: Dispatchable + Debug; + + /// Account index (aka nonce) type. This stores the number of previous transactions + /// associated with a sender account. + type Index: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + Default + + MaybeDisplay + + AtLeast32Bit + + Copy; + + /// The block number type used by the runtime. + type BlockNumber: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + AtLeast32BitUnsigned + + Default + + Bounded + + Copy + + sp_std::hash::Hash + + sp_std::str::FromStr + + MaybeMallocSizeOf + + MaxEncodedLen + + TypeInfo; + + /// The output of the `Hashing` function. + type Hash: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + SimpleBitOps + + Ord + + Default + + Copy + + CheckEqual + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + MaybeMallocSizeOf + + MaxEncodedLen; + + /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). + type Hashing: Hash + TypeInfo; + + /// The user account identifier type for the runtime. + type AccountId: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + Ord + + Default + + MaxEncodedLen; + + /// Converting trait to take a source type and convert to `AccountId`. + /// + /// Used to define the type and conversion mechanism for referencing accounts in + /// transactions. It's perfectly reasonable for this to be an identity conversion (with the + /// source type being `AccountId`), but other pallets (e.g. Indices pallet) may provide more + /// functional/efficient alternatives. + type Lookup: StaticLookup; + + /// The block header. + type Header: Parameter + traits::Header; + + /// The aggregated event type of the runtime. + type Event: Parameter + + Member + + From> + + Debug + + IsType<::Event>; + + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + #[pallet::constant] + type BlockHashCount: Get; - /// The number of events in the `Events` list. - EventCount get(fn event_count): EventIndex; + /// The weight of runtime database operations the runtime can invoke. + #[pallet::constant] + type DbWeight: Get; - // TODO: https://github.com/paritytech/substrate/issues/2553 - // Possibly, we can improve it by using something like: - // `Option<(BlockNumber, Vec)>`, however in this case we won't be able to use - // `EventTopics::append`. + /// Get the chain's current version. + #[pallet::constant] + type Version: Get; - /// Mapping between a topic (represented by T::Hash) and a vector of indexes - /// of events in the `>` list. + /// Provides information about the pallet setup in the runtime. /// - /// All topic vectors have deterministic storage locations depending on the topic. This - /// allows light-clients to leverage the changes trie storage tracking mechanism and - /// in case of changes fetch the list of events of interest. + /// Expects the `PalletInfo` type that is being generated by `construct_runtime!` in the + /// runtime. /// - /// The value has the type `(T::BlockNumber, EventIndex)` because if we used only just - /// the `EventIndex` then in case if the topic has the same contents on the next block - /// no notification will be triggered thus the event might be lost. - EventTopics get(fn event_topics): map hasher(blake2_128_concat) T::Hash => Vec<(T::BlockNumber, EventIndex)>; - - /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. - pub LastRuntimeUpgrade build(|_| Some(LastRuntimeUpgradeInfo::from(T::Version::get()))): Option; + /// For tests it is okay to use `()` as type, however it will provide "useless" data. + type PalletInfo: PalletInfo; - /// True if we have upgraded so that `type RefCount` is `u32`. False (default) if not. - UpgradedToU32RefCount build(|_| true): bool; - - /// The execution phase of the block. - ExecutionPhase: Option; - } - add_extra_genesis { - config(changes_trie_config): Option; - #[serde(with = "sp_core::bytes")] - config(code): Vec; + /// Data to be associated with an account (other than nonce/transaction counter, which this + /// pallet does regardless). + type AccountData: Member + FullCodec + Clone + Default + TypeInfo; - build(|config: &GenesisConfig| { - use codec::Encode; + /// Handler for when a new account has just been created. + type OnNewAccount: OnNewAccount; - sp_io::storage::set(well_known_keys::CODE, &config.code); - sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); + /// A function that is invoked when an account has been determined to be dead. + /// + /// All resources should be cleaned up associated with the given account. + type OnKilledAccount: OnKilledAccount; - if let Some(ref changes_trie_config) = config.changes_trie_config { - sp_io::storage::set( - well_known_keys::CHANGES_TRIE_CONFIG, - &changes_trie_config.encode(), - ); - } - }); - } -} + type SystemWeightInfo: WeightInfo; -decl_event!( - /// Event for the System module. - pub enum Event where AccountId = ::AccountId { - /// An extrinsic completed successfully. \[info\] - ExtrinsicSuccess(DispatchInfo), - /// An extrinsic failed. \[error, info\] - ExtrinsicFailed(DispatchError, DispatchInfo), - /// `:code` was updated. - CodeUpdated, - /// A new \[account\] was created. - NewAccount(AccountId), - /// An \[account\] was reaped. - KilledAccount(AccountId), - } -); + /// The designated SS85 prefix of this chain. + /// + /// This replaces the "ss58Format" property declared in the chain spec. Reason is + /// that the runtime should know about the prefix in order to make use of it as + /// an identifier of the chain. + #[pallet::constant] + type SS58Prefix: Get; -decl_error! { - /// Error for the System module - pub enum Error for Module { - /// The name of specification does not match between the current runtime - /// and the new runtime. - InvalidSpecName, - /// The specification version is not allowed to decrease between the current runtime - /// and the new runtime. - SpecVersionNeedsToIncrease, - /// Failed to extract the runtime version from the new runtime. + /// What to do if the runtime wants to change the code to something new. /// - /// Either calling `Core_version` or decoding `RuntimeVersion` failed. - FailedToExtractRuntimeVersion, - /// Suicide called when the account has non-default composite data. - NonDefaultComposite, - /// There is a non-zero reference count preventing the account from being purged. - NonZeroRefCount, + /// The default (`()`) implementation is responsible for setting the correct storage + /// entry and emitting corresponding event and log item. (see [`update_code_in_storage`]). + /// It's unlikely that this needs to be customized, unless you are writing a parachain using + /// `Cumulus`, where the actual code change is deferred. + type OnSetCode: SetCode; } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { - type Error = Error; - - /// The maximum number of blocks to allow in mortal eras. - const BlockHashCount: T::BlockNumber = T::BlockHashCount::get(); - /// The maximum weight of a block. - const MaximumBlockWeight: Weight = T::MaximumBlockWeight::get(); - - /// The weight of runtime database operations the runtime can invoke. - const DbWeight: RuntimeDbWeight = T::DbWeight::get(); - - /// The base weight of executing a block, independent of the transactions in the block. - const BlockExecutionWeight: Weight = T::BlockExecutionWeight::get(); - - /// The base weight of an Extrinsic in the block, independent of the of extrinsic being executed. - const ExtrinsicBaseWeight: Weight = T::ExtrinsicBaseWeight::get(); - - /// The maximum length of a block (in bytes). - const MaximumBlockLength: u32 = T::MaximumBlockLength::get(); + #[pallet::pallet] + #[pallet::generate_store(pub (super) trait Store)] + pub struct Pallet(_); + #[pallet::hooks] + impl Hooks> for Pallet { fn on_runtime_upgrade() -> frame_support::weights::Weight { - if !UpgradedToU32RefCount::get() { - Account::::translate::<(T::Index, u8, T::AccountData), _>(|_key, (nonce, rc, data)| - Some(AccountInfo { nonce, refcount: rc as RefCount, data }) - ); - UpgradedToU32RefCount::put(true); - T::MaximumBlockWeight::get() + if !UpgradedToTripleRefCount::::get() { + UpgradedToTripleRefCount::::put(true); + migrations::migrate_to_triple_ref_count::() } else { 0 } } + fn integrity_test() { + T::BlockWeights::get().validate().expect("The weights are invalid."); + } + } + + #[pallet::call] + impl Pallet { /// A dispatch that will fill the block weight up to the given ratio. // TODO: This should only be available for testing, rather than in general usage, but - // that's not possible at present (since it's within the decl_module macro). - #[weight = *_ratio * T::MaximumBlockWeight::get()] - fn fill_block(origin, _ratio: Perbill) { + // that's not possible at present (since it's within the pallet macro). + #[pallet::weight(*_ratio * T::BlockWeights::get().max_block)] + pub fn fill_block(origin: OriginFor, _ratio: Perbill) -> DispatchResultWithPostInfo { ensure_root(origin)?; + Ok(().into()) } /// Make some on-chain remark. /// /// # /// - `O(1)` - /// - Base Weight: 0.665 µs, independent of remark length. - /// - No DB operations. /// # - #[weight = T::SystemWeightInfo::remark(_remark.len() as u32)] - fn remark(origin, _remark: Vec) { + #[pallet::weight(T::SystemWeightInfo::remark(_remark.len() as u32))] + pub fn remark(origin: OriginFor, _remark: Vec) -> DispatchResultWithPostInfo { ensure_signed(origin)?; + Ok(().into()) } /// Set the number of pages in the WebAssembly environment's heap. @@ -582,30 +356,34 @@ decl_module! { /// - 1 storage write. /// - Base Weight: 1.405 µs /// - 1 write to HEAP_PAGES + /// - 1 digest item /// # - #[weight = (T::SystemWeightInfo::set_heap_pages(), DispatchClass::Operational)] - fn set_heap_pages(origin, pages: u64) { + #[pallet::weight((T::SystemWeightInfo::set_heap_pages(), DispatchClass::Operational))] + pub fn set_heap_pages(origin: OriginFor, pages: u64) -> DispatchResultWithPostInfo { ensure_root(origin)?; storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); + Self::deposit_log(generic::DigestItem::RuntimeEnvironmentUpdated); + Ok(().into()) } /// Set the new runtime code. /// /// # /// - `O(C + S)` where `C` length of `code` and `S` complexity of `can_set_code` + /// - 1 call to `can_set_code`: `O(S)` (calls `sp_io::misc::runtime_version` which is + /// expensive). /// - 1 storage write (codec `O(C)`). - /// - 1 call to `can_set_code`: `O(S)` (calls `sp_io::misc::runtime_version` which is expensive). + /// - 1 digest item. /// - 1 event. - /// The weight of this function is dependent on the runtime, but generally this is very expensive. - /// We will treat this as a full block. + /// The weight of this function is dependent on the runtime, but generally this is very + /// expensive. We will treat this as a full block. /// # - #[weight = (T::MaximumBlockWeight::get(), DispatchClass::Operational)] - pub fn set_code(origin, code: Vec) { + #[pallet::weight((T::BlockWeights::get().max_block, DispatchClass::Operational))] + pub fn set_code(origin: OriginFor, code: Vec) -> DispatchResultWithPostInfo { ensure_root(origin)?; Self::can_set_code(&code)?; - - storage::unhashed::put_raw(well_known_keys::CODE, &code); - Self::deposit_event(RawEvent::CodeUpdated); + T::OnSetCode::set_code(code)?; + Ok(().into()) } /// Set the new runtime code without doing any checks of the given `code`. @@ -613,14 +391,18 @@ decl_module! { /// # /// - `O(C)` where `C` length of `code` /// - 1 storage write (codec `O(C)`). + /// - 1 digest item. /// - 1 event. - /// The weight of this function is dependent on the runtime. We will treat this as a full block. - /// # - #[weight = (T::MaximumBlockWeight::get(), DispatchClass::Operational)] - pub fn set_code_without_checks(origin, code: Vec) { + /// The weight of this function is dependent on the runtime. We will treat this as a full + /// block. # + #[pallet::weight((T::BlockWeights::get().max_block, DispatchClass::Operational))] + pub fn set_code_without_checks( + origin: OriginFor, + code: Vec, + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; - storage::unhashed::put_raw(well_known_keys::CODE, &code); - Self::deposit_event(RawEvent::CodeUpdated); + T::OnSetCode::set_code(code)?; + Ok(().into()) } /// Set the new changes trie configuration. @@ -633,8 +415,11 @@ decl_module! { /// - DB Weight: /// - Writes: Changes Trie, System Digest /// # - #[weight = (T::SystemWeightInfo::set_changes_trie_config(), DispatchClass::Operational)] - pub fn set_changes_trie_config(origin, changes_trie_config: Option) { + #[pallet::weight((T::SystemWeightInfo::set_changes_trie_config(), DispatchClass::Operational))] + pub fn set_changes_trie_config( + origin: OriginFor, + changes_trie_config: Option, + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; match changes_trie_config.clone() { Some(changes_trie_config) => storage::unhashed::put_raw( @@ -644,96 +429,457 @@ decl_module! { None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } - let log = generic::DigestItem::ChangesTrieSignal( - generic::ChangesTrieSignal::NewConfiguration(changes_trie_config), - ); - Self::deposit_log(log.into()); - } + let log = generic::DigestItem::ChangesTrieSignal( + generic::ChangesTrieSignal::NewConfiguration(changes_trie_config), + ); + Self::deposit_log(log.into()); + Ok(().into()) + } + + /// Set some items of storage. + /// + /// # + /// - `O(I)` where `I` length of `items` + /// - `I` storage writes (`O(1)`). + /// - Base Weight: 0.568 * i µs + /// - Writes: Number of items + /// # + #[pallet::weight(( + T::SystemWeightInfo::set_storage(items.len() as u32), + DispatchClass::Operational, + ))] + pub fn set_storage( + origin: OriginFor, + items: Vec, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + for i in &items { + storage::unhashed::put_raw(&i.0, &i.1); + } + Ok(().into()) + } + + /// Kill some items from storage. + /// + /// # + /// - `O(IK)` where `I` length of `keys` and `K` length of one key + /// - `I` storage deletions. + /// - Base Weight: .378 * i µs + /// - Writes: Number of items + /// # + #[pallet::weight(( + T::SystemWeightInfo::kill_storage(keys.len() as u32), + DispatchClass::Operational, + ))] + pub fn kill_storage(origin: OriginFor, keys: Vec) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + for key in &keys { + storage::unhashed::kill(&key); + } + Ok(().into()) + } + + /// Kill all storage items with a key that starts with the given prefix. + /// + /// **NOTE:** We rely on the Root origin to provide us the number of subkeys under + /// the prefix we are removing to accurately calculate the weight of this function. + /// + /// # + /// - `O(P)` where `P` amount of keys with prefix `prefix` + /// - `P` storage deletions. + /// - Base Weight: 0.834 * P µs + /// - Writes: Number of subkeys + 1 + /// # + #[pallet::weight(( + T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)), + DispatchClass::Operational, + ))] + pub fn kill_prefix( + origin: OriginFor, + prefix: Key, + _subkeys: u32, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + storage::unhashed::kill_prefix(&prefix, None); + Ok(().into()) + } + + /// Make some on-chain remark and emit event. + /// + /// # + /// - `O(b)` where b is the length of the remark. + /// - 1 event. + /// # + #[pallet::weight(T::SystemWeightInfo::remark_with_event(remark.len() as u32))] + pub fn remark_with_event( + origin: OriginFor, + remark: Vec, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + let hash = T::Hashing::hash(&remark[..]); + Self::deposit_event(Event::Remarked(who, hash)); + Ok(().into()) + } + } + + /// Event for the System pallet. + #[pallet::event] + pub enum Event { + /// An extrinsic completed successfully. \[info\] + ExtrinsicSuccess(DispatchInfo), + /// An extrinsic failed. \[error, info\] + ExtrinsicFailed(DispatchError, DispatchInfo), + /// `:code` was updated. + CodeUpdated, + /// A new \[account\] was created. + NewAccount(T::AccountId), + /// An \[account\] was reaped. + KilledAccount(T::AccountId), + /// On on-chain remark happened. \[origin, remark_hash\] + Remarked(T::AccountId, T::Hash), + } + + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + /// Error for the System pallet + #[pallet::error] + pub enum Error { + /// The name of specification does not match between the current runtime + /// and the new runtime. + InvalidSpecName, + /// The specification version is not allowed to decrease between the current runtime + /// and the new runtime. + SpecVersionNeedsToIncrease, + /// Failed to extract the runtime version from the new runtime. + /// + /// Either calling `Core_version` or decoding `RuntimeVersion` failed. + FailedToExtractRuntimeVersion, + /// Suicide called when the account has non-default composite data. + NonDefaultComposite, + /// There is a non-zero reference count preventing the account from being purged. + NonZeroRefCount, + } + + /// Exposed trait-generic origin type. + #[pallet::origin] + pub type Origin = RawOrigin<::AccountId>; + + /// The full account information for a particular account ID. + #[pallet::storage] + #[pallet::getter(fn account)] + pub type Account = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + AccountInfo, + ValueQuery, + >; + + /// Total extrinsics count for the current block. + #[pallet::storage] + pub(super) type ExtrinsicCount = StorageValue<_, u32>; + + /// The current weight for the block. + #[pallet::storage] + #[pallet::getter(fn block_weight)] + pub(super) type BlockWeight = StorageValue<_, ConsumedWeight, ValueQuery>; + + /// Total length (in bytes) for all extrinsics put together, for the current block. + #[pallet::storage] + pub(super) type AllExtrinsicsLen = StorageValue<_, u32>; + + /// Map of block numbers to block hashes. + #[pallet::storage] + #[pallet::getter(fn block_hash)] + pub type BlockHash = + StorageMap<_, Twox64Concat, T::BlockNumber, T::Hash, ValueQuery>; + + /// Extrinsics data for the current block (maps an extrinsic's index to its data). + #[pallet::storage] + #[pallet::getter(fn extrinsic_data)] + pub(super) type ExtrinsicData = + StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; + + /// The current block number being processed. Set by `execute_block`. + #[pallet::storage] + #[pallet::getter(fn block_number)] + pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// Hash of the previous block. + #[pallet::storage] + #[pallet::getter(fn parent_hash)] + pub(super) type ParentHash = StorageValue<_, T::Hash, ValueQuery>; + + /// Digest of the current block, also part of the block header. + #[pallet::storage] + #[pallet::getter(fn digest)] + pub(super) type Digest = StorageValue<_, DigestOf, ValueQuery>; + + /// Events deposited for the current block. + /// + /// NOTE: This storage item is explicitly unbounded since it is never intended to be read + /// from within the runtime. + #[pallet::storage] + pub(super) type Events = + StorageValue<_, Vec>, ValueQuery>; + + /// The number of events in the `Events` list. + #[pallet::storage] + #[pallet::getter(fn event_count)] + pub(super) type EventCount = StorageValue<_, EventIndex, ValueQuery>; + + /// Mapping between a topic (represented by T::Hash) and a vector of indexes + /// of events in the `>` list. + /// + /// All topic vectors have deterministic storage locations depending on the topic. This + /// allows light-clients to leverage the changes trie storage tracking mechanism and + /// in case of changes fetch the list of events of interest. + /// + /// The value has the type `(T::BlockNumber, EventIndex)` because if we used only just + /// the `EventIndex` then in case if the topic has the same contents on the next block + /// no notification will be triggered thus the event might be lost. + #[pallet::storage] + #[pallet::getter(fn event_topics)] + pub(super) type EventTopics = + StorageMap<_, Blake2_128Concat, T::Hash, Vec<(T::BlockNumber, EventIndex)>, ValueQuery>; + + /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. + #[pallet::storage] + pub type LastRuntimeUpgrade = StorageValue<_, LastRuntimeUpgradeInfo>; + + /// True if we have upgraded so that `type RefCount` is `u32`. False (default) if not. + #[pallet::storage] + pub(super) type UpgradedToU32RefCount = StorageValue<_, bool, ValueQuery>; + + /// True if we have upgraded so that AccountInfo contains three types of `RefCount`. False + /// (default) if not. + #[pallet::storage] + pub(super) type UpgradedToTripleRefCount = StorageValue<_, bool, ValueQuery>; + + /// The execution phase of the block. + #[pallet::storage] + pub(super) type ExecutionPhase = StorageValue<_, Phase>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub changes_trie_config: Option, + #[serde(with = "sp_core::bytes")] + pub code: Vec, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { changes_trie_config: Default::default(), code: Default::default() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::insert::<_, T::Hash>(T::BlockNumber::zero(), hash69()); + >::put::(hash69()); + >::put(LastRuntimeUpgradeInfo::from(T::Version::get())); + >::put(true); + >::put(true); + + sp_io::storage::set(well_known_keys::CODE, &self.code); + sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); + if let Some(ref changes_trie_config) = self.changes_trie_config { + sp_io::storage::set( + well_known_keys::CHANGES_TRIE_CONFIG, + &changes_trie_config.encode(), + ); + } + } + } +} + +pub mod migrations { + use super::*; + + #[allow(dead_code)] + /// Migrate from unique `u8` reference counting to triple `u32` reference counting. + pub fn migrate_all() -> frame_support::weights::Weight { + Account::::translate::<(T::Index, u8, T::AccountData), _>(|_key, (nonce, rc, data)| { + Some(AccountInfo { + nonce, + consumers: rc as RefCount, + providers: 1, + sufficients: 0, + data, + }) + }); + T::BlockWeights::get().max_block + } + + #[allow(dead_code)] + /// Migrate from unique `u32` reference counting to triple `u32` reference counting. + pub fn migrate_to_dual_ref_count() -> frame_support::weights::Weight { + Account::::translate::<(T::Index, RefCount, T::AccountData), _>( + |_key, (nonce, consumers, data)| { + Some(AccountInfo { nonce, consumers, providers: 1, sufficients: 0, data }) + }, + ); + T::BlockWeights::get().max_block + } + + /// Migrate from dual `u32` reference counting to triple `u32` reference counting. + pub fn migrate_to_triple_ref_count() -> frame_support::weights::Weight { + Account::::translate::<(T::Index, RefCount, RefCount, T::AccountData), _>( + |_key, (nonce, consumers, providers, data)| { + Some(AccountInfo { nonce, consumers, providers, sufficients: 0, data }) + }, + ); + T::BlockWeights::get().max_block + } +} + +#[cfg(feature = "std")] +impl GenesisConfig { + /// Direct implementation of `GenesisBuild::build_storage`. + /// + /// Kept in order not to break dependency. + pub fn build_storage(&self) -> Result { + >::build_storage(self) + } + + /// Direct implementation of `GenesisBuild::assimilate_storage`. + /// + /// Kept in order not to break dependency. + pub fn assimilate_storage( + &self, + storage: &mut sp_runtime::Storage, + ) -> Result<(), String> { + >::assimilate_storage(self, storage) + } +} + +pub type DigestOf = generic::Digest<::Hash>; +pub type DigestItemOf = generic::DigestItem<::Hash>; + +pub type Key = Vec; +pub type KeyValue = (Vec, Vec); + +/// A phase of a block's execution. +#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] +pub enum Phase { + /// Applying an extrinsic. + ApplyExtrinsic(u32), + /// Finalizing the block. + Finalization, + /// Initializing the block. + Initialization, +} + +impl Default for Phase { + fn default() -> Self { + Self::Initialization + } +} + +/// Record of an event happening. +#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] +pub struct EventRecord { + /// The phase of the block it happened in. + pub phase: Phase, + /// The event itself. + pub event: E, + /// The list of the topics this event has. + pub topics: Vec, +} + +/// Origin for the System pallet. +#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo)] +pub enum RawOrigin { + /// The system itself ordained this dispatch to happen: this is the highest privilege level. + Root, + /// It is signed by some public key and we provide the `AccountId`. + Signed(AccountId), + /// It is signed by nobody, can be either: + /// * included and agreed upon by the validators anyway, + /// * or unsigned transaction validated by a pallet. + None, +} + +impl From> for RawOrigin { + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::None, + } + } +} + +// Create a Hash with 69 for each byte, +// only used to build genesis config. +#[cfg(feature = "std")] +fn hash69 + Default>() -> T { + let mut h = T::default(); + h.as_mut().iter_mut().for_each(|byte| *byte = 69); + h +} + +/// This type alias represents an index of an event. +/// +/// We use `u32` here because this index is used as index for `Events` +/// which can't contain more than `u32::MAX` items. +type EventIndex = u32; - /// Set some items of storage. - /// - /// # - /// - `O(I)` where `I` length of `items` - /// - `I` storage writes (`O(1)`). - /// - Base Weight: 0.568 * i µs - /// - Writes: Number of items - /// # - #[weight = ( - T::SystemWeightInfo::set_storage(items.len() as u32), - DispatchClass::Operational, - )] - fn set_storage(origin, items: Vec) { - ensure_root(origin)?; - for i in &items { - storage::unhashed::put_raw(&i.0, &i.1); - } - } +/// Type used to encode the number of references an account has. +pub type RefCount = u32; - /// Kill some items from storage. - /// - /// # - /// - `O(IK)` where `I` length of `keys` and `K` length of one key - /// - `I` storage deletions. - /// - Base Weight: .378 * i µs - /// - Writes: Number of items - /// # - #[weight = ( - T::SystemWeightInfo::kill_storage(keys.len() as u32), - DispatchClass::Operational, - )] - fn kill_storage(origin, keys: Vec) { - ensure_root(origin)?; - for key in &keys { - storage::unhashed::kill(&key); - } - } +/// Information of an account. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct AccountInfo { + /// The number of transactions this account has sent. + pub nonce: Index, + /// The number of other modules that currently depend on this account's existence. The account + /// cannot be reaped until this is zero. + pub consumers: RefCount, + /// The number of other modules that allow this account to exist. The account may not be reaped + /// until this and `sufficients` are both zero. + pub providers: RefCount, + /// The number of modules that allow this account to exist for their own purposes only. The + /// account may not be reaped until this and `providers` are both zero. + pub sufficients: RefCount, + /// The additional data that belongs to this account. Used to store the balance(s) in a lot of + /// chains. + pub data: AccountData, +} - /// Kill all storage items with a key that starts with the given prefix. - /// - /// **NOTE:** We rely on the Root origin to provide us the number of subkeys under - /// the prefix we are removing to accurately calculate the weight of this function. - /// - /// # - /// - `O(P)` where `P` amount of keys with prefix `prefix` - /// - `P` storage deletions. - /// - Base Weight: 0.834 * P µs - /// - Writes: Number of subkeys + 1 - /// # - #[weight = ( - T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)), - DispatchClass::Operational, - )] - fn kill_prefix(origin, prefix: Key, _subkeys: u32) { - ensure_root(origin)?; - storage::unhashed::kill_prefix(&prefix); - } +/// Stores the `spec_version` and `spec_name` of when the last runtime upgrade +/// happened. +#[derive(sp_runtime::RuntimeDebug, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub struct LastRuntimeUpgradeInfo { + pub spec_version: codec::Compact, + pub spec_name: sp_runtime::RuntimeString, +} - /// Kill the sending account, assuming there are no references outstanding and the composite - /// data is equal to its default value. - /// - /// # - /// - `O(1)` - /// - 1 storage read and deletion. - /// -------------------- - /// Base Weight: 8.626 µs - /// No DB Read or Write operations because caller is already in overlay - /// # - #[weight = (T::SystemWeightInfo::suicide(), DispatchClass::Operational)] - pub fn suicide(origin) { - let who = ensure_signed(origin)?; - let account = Account::::get(&who); - ensure!(account.refcount == 0, Error::::NonZeroRefCount); - ensure!(account.data == T::AccountData::default(), Error::::NonDefaultComposite); - Self::kill_account(&who); - } +impl LastRuntimeUpgradeInfo { + /// Returns if the runtime was upgraded in comparison of `self` and `current`. + /// + /// Checks if either the `spec_version` increased or the `spec_name` changed. + pub fn was_upgraded(&self, current: &sp_version::RuntimeVersion) -> bool { + current.spec_version > self.spec_version.0 || current.spec_name != self.spec_name + } +} + +impl From for LastRuntimeUpgradeInfo { + fn from(version: sp_version::RuntimeVersion) -> Self { + Self { spec_version: version.spec_version.into(), spec_name: version.spec_name } } } pub struct EnsureRoot(sp_std::marker::PhantomData); -impl< - O: Into, O>> + From>, - AccountId, -> EnsureOrigin for EnsureRoot { +impl, O>> + From>, AccountId> + EnsureOrigin for EnsureRoot +{ type Success = (); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -749,10 +895,9 @@ impl< } pub struct EnsureSigned(sp_std::marker::PhantomData); -impl< - O: Into, O>> + From>, - AccountId: Default, -> EnsureOrigin for EnsureSigned { +impl, O>> + From>, AccountId: Default> + EnsureOrigin for EnsureSigned +{ type Success = AccountId; fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -769,10 +914,11 @@ impl< pub struct EnsureSignedBy(sp_std::marker::PhantomData<(Who, AccountId)>); impl< - O: Into, O>> + From>, - Who: Contains, - AccountId: PartialEq + Clone + Ord + Default, -> EnsureOrigin for EnsureSignedBy { + O: Into, O>> + From>, + Who: SortedMembers, + AccountId: PartialEq + Clone + Ord + Default, + > EnsureOrigin for EnsureSignedBy +{ type Success = AccountId; fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -793,10 +939,9 @@ impl< } pub struct EnsureNone(sp_std::marker::PhantomData); -impl< - O: Into, O>> + From>, - AccountId, -> EnsureOrigin for EnsureNone { +impl, O>> + From>, AccountId> + EnsureOrigin for EnsureNone +{ type Success = (); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -829,17 +974,16 @@ impl EnsureOrigin for EnsureNever { /// Origin check will pass if `L` or `R` origin check passes. `L` is tested first. pub struct EnsureOneOf(sp_std::marker::PhantomData<(AccountId, L, R)>); impl< - AccountId, - O: Into, O>> + From>, - L: EnsureOrigin, - R: EnsureOrigin, -> EnsureOrigin for EnsureOneOf { + AccountId, + O: Into, O>> + From>, + L: EnsureOrigin, + R: EnsureOrigin, + > EnsureOrigin for EnsureOneOf +{ type Success = Either; fn try_origin(o: O) -> Result { - L::try_origin(o).map_or_else( - |o| R::try_origin(o).map(|o| Either::Right(o)), - |o| Ok(Either::Left(o)), - ) + L::try_origin(o) + .map_or_else(|o| R::try_origin(o).map(|o| Either::Right(o)), |o| Ok(Either::Left(o))) } #[cfg(feature = "runtime-benchmarks")] @@ -851,7 +995,8 @@ impl< /// Ensure that the origin `o` represents a signed extrinsic (i.e. transaction). /// Returns `Ok` with the account that signed the extrinsic or an `Err` otherwise. pub fn ensure_signed(o: OuterOrigin) -> Result - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { match o.into() { Ok(RawOrigin::Signed(t)) => Ok(t), @@ -861,7 +1006,8 @@ pub fn ensure_signed(o: OuterOrigin) -> Result(o: OuterOrigin) -> Result<(), BadOrigin> - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { match o.into() { Ok(RawOrigin::Root) => Ok(()), @@ -871,7 +1017,8 @@ pub fn ensure_root(o: OuterOrigin) -> Result<(), BadOrig /// Ensure that the origin `o` represents an unsigned extrinsic. Returns `Ok` or an `Err` otherwise. pub fn ensure_none(o: OuterOrigin) -> Result<(), BadOrigin> - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { match o.into() { Ok(RawOrigin::None) => Ok(()), @@ -901,36 +1048,248 @@ impl Default for InitKind { } /// Reference status; can be either referenced or unreferenced. +#[derive(RuntimeDebug)] pub enum RefStatus { Referenced, Unreferenced, } -impl Module { - /// Deposits an event into this block's event record. - pub fn deposit_event(event: impl Into) { - Self::deposit_event_indexed(&[], event.into()); +/// Some resultant status relevant to incrementing a provider/self-sufficient reference. +#[derive(Eq, PartialEq, RuntimeDebug)] +pub enum IncRefStatus { + /// Account was created. + Created, + /// Account already existed. + Existed, +} + +/// Some resultant status relevant to decrementing a provider/self-sufficient reference. +#[derive(Eq, PartialEq, RuntimeDebug)] +pub enum DecRefStatus { + /// Account was destroyed. + Reaped, + /// Account still exists. + Exists, +} + +impl Pallet { + pub fn account_exists(who: &T::AccountId) -> bool { + Account::::contains_key(who) + } + + /// Write code to the storage and emit related events and digest items. + /// + /// Note this function almost never should be used directly. It is exposed + /// for `OnSetCode` implementations that defer actual code being written to + /// the storage (for instance in case of parachains). + pub fn update_code_in_storage(code: &[u8]) -> DispatchResult { + storage::unhashed::put_raw(well_known_keys::CODE, code); + Self::deposit_log(generic::DigestItem::RuntimeEnvironmentUpdated); + Self::deposit_event(Event::CodeUpdated); + Ok(()) } /// Increment the reference counter on an account. + #[deprecated = "Use `inc_consumers` instead"] pub fn inc_ref(who: &T::AccountId) { - Account::::mutate(who, |a| a.refcount = a.refcount.saturating_add(1)); + let _ = Self::inc_consumers(who); } /// Decrement the reference counter on an account. This *MUST* only be done once for every time - /// you called `inc_ref` on `who`. + /// you called `inc_consumers` on `who`. + #[deprecated = "Use `dec_consumers` instead"] pub fn dec_ref(who: &T::AccountId) { - Account::::mutate(who, |a| a.refcount = a.refcount.saturating_sub(1)); + let _ = Self::dec_consumers(who); } /// The number of outstanding references for the account `who`. + #[deprecated = "Use `consumers` instead"] pub fn refs(who: &T::AccountId) -> RefCount { - Account::::get(who).refcount + Self::consumers(who) } /// True if the account has no outstanding references. + #[deprecated = "Use `!is_provider_required` instead"] pub fn allow_death(who: &T::AccountId) -> bool { - Account::::get(who).refcount == 0 + !Self::is_provider_required(who) + } + + /// Increment the provider reference counter on an account. + pub fn inc_providers(who: &T::AccountId) -> IncRefStatus { + Account::::mutate(who, |a| { + if a.providers == 0 && a.sufficients == 0 { + // Account is being created. + a.providers = 1; + Self::on_created_account(who.clone(), a); + IncRefStatus::Created + } else { + a.providers = a.providers.saturating_add(1); + IncRefStatus::Existed + } + }) + } + + /// Decrement the provider reference counter on an account. + /// + /// This *MUST* only be done once for every time you called `inc_providers` on `who`. + pub fn dec_providers(who: &T::AccountId) -> Result { + Account::::try_mutate_exists(who, |maybe_account| { + if let Some(mut account) = maybe_account.take() { + if account.providers == 0 { + // Logic error - cannot decrement beyond zero. + log::error!( + target: "runtime::system", + "Logic error: Unexpected underflow in reducing provider", + ); + account.providers = 1; + } + match (account.providers, account.consumers, account.sufficients) { + (1, 0, 0) => { + // No providers left (and no consumers) and no sufficients. Account dead. + + Pallet::::on_killed_account(who.clone()); + Ok(DecRefStatus::Reaped) + }, + (1, c, _) if c > 0 => { + // Cannot remove last provider if there are consumers. + Err(DispatchError::ConsumerRemaining) + }, + (x, _, _) => { + // Account will continue to exist as there is either > 1 provider or + // > 0 sufficients. + account.providers = x - 1; + *maybe_account = Some(account); + Ok(DecRefStatus::Exists) + }, + } + } else { + log::error!( + target: "runtime::system", + "Logic error: Account already dead when reducing provider", + ); + Ok(DecRefStatus::Reaped) + } + }) + } + + /// Increment the self-sufficient reference counter on an account. + pub fn inc_sufficients(who: &T::AccountId) -> IncRefStatus { + Account::::mutate(who, |a| { + if a.providers + a.sufficients == 0 { + // Account is being created. + a.sufficients = 1; + Self::on_created_account(who.clone(), a); + IncRefStatus::Created + } else { + a.sufficients = a.sufficients.saturating_add(1); + IncRefStatus::Existed + } + }) + } + + /// Decrement the sufficients reference counter on an account. + /// + /// This *MUST* only be done once for every time you called `inc_sufficients` on `who`. + pub fn dec_sufficients(who: &T::AccountId) -> DecRefStatus { + Account::::mutate_exists(who, |maybe_account| { + if let Some(mut account) = maybe_account.take() { + if account.sufficients == 0 { + // Logic error - cannot decrement beyond zero. + log::error!( + target: "runtime::system", + "Logic error: Unexpected underflow in reducing sufficients", + ); + } + match (account.sufficients, account.providers) { + (0, 0) | (1, 0) => { + Pallet::::on_killed_account(who.clone()); + DecRefStatus::Reaped + }, + (x, _) => { + account.sufficients = x - 1; + *maybe_account = Some(account); + DecRefStatus::Exists + }, + } + } else { + log::error!( + target: "runtime::system", + "Logic error: Account already dead when reducing provider", + ); + DecRefStatus::Reaped + } + }) + } + + /// The number of outstanding provider references for the account `who`. + pub fn providers(who: &T::AccountId) -> RefCount { + Account::::get(who).providers + } + + /// The number of outstanding sufficient references for the account `who`. + pub fn sufficients(who: &T::AccountId) -> RefCount { + Account::::get(who).sufficients + } + + /// The number of outstanding provider and sufficient references for the account `who`. + pub fn reference_count(who: &T::AccountId) -> RefCount { + let a = Account::::get(who); + a.providers + a.sufficients + } + + /// Increment the reference counter on an account. + /// + /// The account `who`'s `providers` must be non-zero or this will return an error. + pub fn inc_consumers(who: &T::AccountId) -> Result<(), DispatchError> { + Account::::try_mutate(who, |a| { + if a.providers > 0 { + a.consumers = a.consumers.saturating_add(1); + Ok(()) + } else { + Err(DispatchError::NoProviders) + } + }) + } + + /// Decrement the reference counter on an account. This *MUST* only be done once for every time + /// you called `inc_consumers` on `who`. + pub fn dec_consumers(who: &T::AccountId) { + Account::::mutate(who, |a| { + if a.consumers > 0 { + a.consumers -= 1; + } else { + log::error!( + target: "runtime::system", + "Logic error: Unexpected underflow in reducing consumer", + ); + } + }) + } + + /// The number of outstanding references for the account `who`. + pub fn consumers(who: &T::AccountId) -> RefCount { + Account::::get(who).consumers + } + + /// True if the account has some outstanding consumer references. + pub fn is_provider_required(who: &T::AccountId) -> bool { + Account::::get(who).consumers != 0 + } + + /// True if the account has no outstanding consumer references or more than one provider. + pub fn can_dec_provider(who: &T::AccountId) -> bool { + let a = Account::::get(who); + a.consumers == 0 || a.providers > 1 + } + + /// True if the account has at least one provider reference. + pub fn can_inc_consumer(who: &T::AccountId) -> bool { + Account::::get(who).providers > 0 + } + + /// Deposits an event into this block's event record. + pub fn deposit_event(event: impl Into) { + Self::deposit_event_indexed(&[], event.into()); } /// Deposits an event into this block's event record adding this event @@ -941,25 +1300,24 @@ impl Module { pub fn deposit_event_indexed(topics: &[T::Hash], event: T::Event) { let block_number = Self::block_number(); // Don't populate events on genesis. - if block_number.is_zero() { return } + if block_number.is_zero() { + return + } - let phase = ExecutionPhase::get().unwrap_or_default(); - let event = EventRecord { - phase, - event, - topics: topics.iter().cloned().collect::>(), - }; + let phase = ExecutionPhase::::get().unwrap_or_default(); + let event = + EventRecord { phase, event, topics: topics.iter().cloned().collect::>() }; // Index of the to be added event. let event_idx = { - let old_event_count = EventCount::get(); + let old_event_count = EventCount::::get(); let new_event_count = match old_event_count.checked_add(1) { // We've reached the maximum number of events at this block, just // don't do anything and leave the event_count unaltered. None => return, Some(nc) => nc, }; - EventCount::put(new_event_count); + EventCount::::put(new_event_count); old_event_count }; @@ -977,17 +1335,17 @@ impl Module { /// Gets extrinsics count. pub fn extrinsic_count() -> u32 { - ExtrinsicCount::get().unwrap_or_default() + ExtrinsicCount::::get().unwrap_or_default() } pub fn all_extrinsics_len() -> u32 { - AllExtrinsicsLen::get().unwrap_or_default() + AllExtrinsicsLen::::get().unwrap_or_default() } - /// Inform the system module of some additional weight that should be accounted for, in the + /// Inform the system pallet of some additional weight that should be accounted for, in the /// current block. /// - /// NOTE: use with extra care; this function is made public only be used for certain modules + /// NOTE: use with extra care; this function is made public only be used for certain pallets /// that need it. A runtime that does not have dynamic calls should never need this and should /// stick to static weights. A typical use case for this is inner calls or smart contract calls. /// Furthermore, it only makes sense to use this when it is presumably _cheap_ to provide the @@ -1000,7 +1358,7 @@ impl Module { /// /// Another potential use-case could be for the `on_initialize` and `on_finalize` hooks. pub fn register_extra_weight_unchecked(weight: Weight, class: DispatchClass) { - BlockWeight::mutate(|current_weight| { + BlockWeight::::mutate(|current_weight| { current_weight.add(weight, class); }); } @@ -1009,50 +1367,60 @@ impl Module { pub fn initialize( number: &T::BlockNumber, parent_hash: &T::Hash, - txs_root: &T::Hash, digest: &DigestOf, kind: InitKind, ) { // populate environment - ExecutionPhase::put(Phase::Initialization); + ExecutionPhase::::put(Phase::Initialization); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); >::put(number); >::put(digest); >::put(parent_hash); >::insert(*number - One::one(), parent_hash); - >::put(txs_root); // Remove previous block data from storage - BlockWeight::kill(); + BlockWeight::::kill(); // Kill inspectable storage entries in state when `InitKind::Full`. if let InitKind::Full = kind { >::kill(); - EventCount::kill(); - >::remove_all(); + EventCount::::kill(); + >::remove_all(None); } } - /// Remove temporary "environment" entries in storage. + /// Remove temporary "environment" entries in storage, compute the storage root and return the + /// resulting header for this block. pub fn finalize() -> T::Header { - ExecutionPhase::kill(); - ExtrinsicCount::kill(); - AllExtrinsicsLen::kill(); + ExecutionPhase::::kill(); + AllExtrinsicsLen::::kill(); + + // The following fields + // + // - > + // - > + // - > + // - > + // - > + // - > + // + // stay to be inspected by the client and will be cleared by `Self::initialize`. + let number = >::get(); + let parent_hash = >::get(); + let mut digest = >::get(); - let number = >::take(); - let parent_hash = >::take(); - let mut digest = >::take(); - let extrinsics_root = >::take(); + let extrinsics = (0..ExtrinsicCount::::take().unwrap_or_default()) + .map(ExtrinsicData::::take) + .collect(); + let extrinsics_root = extrinsics_data_root::(extrinsics); // move block hash pruning window by one block - let block_hash_count = ::get(); - if number > block_hash_count { - let to_remove = number - block_hash_count - One::one(); + let block_hash_count = T::BlockHashCount::get(); + let to_remove = number.saturating_sub(block_hash_count).saturating_sub(One::one()); - // keep genesis hash - if to_remove != Zero::zero() { - >::remove(to_remove); - } + // keep genesis hash + if !to_remove.is_zero() { + >::remove(to_remove); } let storage_root = T::Hash::decode(&mut &sp_io::storage::root()[..]) @@ -1064,20 +1432,18 @@ impl Module { if let Some(storage_changes_root) = storage_changes_root { let item = generic::DigestItem::ChangesTrieRoot( T::Hash::decode(&mut &storage_changes_root[..]) - .expect("Node is configured to use the same hash; qed") + .expect("Node is configured to use the same hash; qed"), ); digest.push(item); } - // The following fields - // - // - > - // - > - // - > - // - // stay to be inspected by the client and will be cleared by `Self::initialize`. - - ::new(number, extrinsics_root, storage_root, parent_hash, digest) + ::new( + number, + extrinsics_root, + storage_root, + parent_hash, + digest, + ) } /// Deposits a log and ensures it matches the block's log data. @@ -1090,7 +1456,7 @@ impl Module { >::append(item); } - /// Get the basic externalities for this module, useful for tests. + /// Get the basic externalities for this pallet, useful for tests. #[cfg(any(feature = "std", test))] pub fn externalities() -> TestExternalities { TestExternalities::new(sp_core::storage::Storage { @@ -1103,6 +1469,24 @@ impl Module { }) } + /// Get the current events deposited by the runtime. + /// + /// NOTE: This should only be used in tests. Reading events from the runtime can have a large + /// impact on the PoV size of a block. Users should use alternative and well bounded storage + /// items for any behavior like this. + #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] + pub fn events() -> Vec> { + Self::read_events_no_consensus() + } + + /// Get the current events deposited by the runtime. + /// + /// Should only be called if you know what you are doing and outside of the runtime block + /// execution else it can have a large impact on the PoV size of a block. + pub fn read_events_no_consensus() -> Vec> { + Events::::get() + } + /// Set the block number to something in particular. Can be used as an alternative to /// `initialize` for tests that don't need to bother with the other environment entries. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] @@ -1125,11 +1509,11 @@ impl Module { /// Set the current block weight. This should only be used in some integration tests. #[cfg(any(feature = "std", test))] - pub fn set_block_limits(weight: Weight, len: usize) { - BlockWeight::mutate(|current_weight| { - current_weight.put(weight, DispatchClass::Normal) + pub fn set_block_consumed_resources(weight: Weight, len: usize) { + BlockWeight::::mutate(|current_weight| { + current_weight.set(weight, DispatchClass::Normal) }); - AllExtrinsicsLen::put(len as u32); + AllExtrinsicsLen::::put(len as u32); } /// Reset events. Can be used as an alternative to @@ -1137,12 +1521,26 @@ impl Module { #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] pub fn reset_events() { >::kill(); - EventCount::kill(); - >::remove_all(); + EventCount::::kill(); + >::remove_all(None); + } + + /// Assert the given `event` exists. + #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] + pub fn assert_has_event(event: T::Event) { + assert!(Self::events().iter().any(|record| record.event == event)) + } + + /// Assert the last event equal to the given `event`. + #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] + pub fn assert_last_event(event: T::Event) { + assert_eq!(Self::events().last().expect("events expected").event, event); } /// Return the chain's current runtime version. - pub fn runtime_version() -> RuntimeVersion { T::Version::get() } + pub fn runtime_version() -> RuntimeVersion { + T::Version::get() + } /// Retrieve the account transaction counter from storage. pub fn account_nonce(who: impl EncodeLike) -> T::Index { @@ -1154,86 +1552,61 @@ impl Module { Account::::mutate(who, |a| a.nonce += T::Index::one()); } - /// Note what the extrinsic data of the current extrinsic index is. If this - /// is called, then ensure `derive_extrinsics` is also called before - /// block-building is completed. + /// Note what the extrinsic data of the current extrinsic index is. /// - /// NOTE: This function is called only when the block is being constructed locally. - /// `execute_block` doesn't note any extrinsics. + /// This is required to be called before applying an extrinsic. The data will used + /// in [`Self::finalize`] to calculate the correct extrinsics root. pub fn note_extrinsic(encoded_xt: Vec) { - ExtrinsicData::insert(Self::extrinsic_index().unwrap_or_default(), encoded_xt); + ExtrinsicData::::insert(Self::extrinsic_index().unwrap_or_default(), encoded_xt); } /// To be called immediately after an extrinsic has been applied. pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { info.weight = extract_actual_weight(r, &info); - Self::deposit_event( - match r { - Ok(_) => RawEvent::ExtrinsicSuccess(info), - Err(err) => { - sp_runtime::print(err); - RawEvent::ExtrinsicFailed(err.error, info) - }, - } - ); + Self::deposit_event(match r { + Ok(_) => Event::ExtrinsicSuccess(info), + Err(err) => { + log::trace!( + target: "runtime::system", + "Extrinsic failed at block({:?}): {:?}", + Self::block_number(), + err, + ); + Event::ExtrinsicFailed(err.error, info) + }, + }); let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32; storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &next_extrinsic_index); - ExecutionPhase::put(Phase::ApplyExtrinsic(next_extrinsic_index)); + ExecutionPhase::::put(Phase::ApplyExtrinsic(next_extrinsic_index)); } /// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block /// has been called. pub fn note_finished_extrinsics() { - let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX) - .unwrap_or_default(); - ExtrinsicCount::put(extrinsic_index); - ExecutionPhase::put(Phase::Finalization); + let extrinsic_index: u32 = + storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap_or_default(); + ExtrinsicCount::::put(extrinsic_index); + ExecutionPhase::::put(Phase::Finalization); } /// To be called immediately after finishing the initialization of the block - /// (e.g., called `on_initialize` for all modules). + /// (e.g., called `on_initialize` for all pallets). pub fn note_finished_initialize() { - ExecutionPhase::put(Phase::ApplyExtrinsic(0)) - } - - /// Remove all extrinsic data and save the extrinsics trie root. - pub fn derive_extrinsics() { - let extrinsics = (0..ExtrinsicCount::get().unwrap_or_default()) - .map(ExtrinsicData::take).collect(); - let xts_root = extrinsics_data_root::(extrinsics); - >::put(xts_root); + ExecutionPhase::::put(Phase::ApplyExtrinsic(0)) } /// An account is being created. - pub fn on_created_account(who: T::AccountId) { + pub fn on_created_account(who: T::AccountId, _a: &mut AccountInfo) { T::OnNewAccount::on_new_account(&who); - Self::deposit_event(RawEvent::NewAccount(who)); + Self::deposit_event(Event::NewAccount(who)); } /// Do anything that needs to be done after an account has been killed. fn on_killed_account(who: T::AccountId) { T::OnKilledAccount::on_killed_account(&who); - Self::deposit_event(RawEvent::KilledAccount(who)); - } - - /// Remove an account from storage. This should only be done when its refs are zero or you'll - /// get storage leaks in other modules. Nonetheless we assume that the calling logic knows best. - /// - /// This is a no-op if the account doesn't already exist. If it does then it will ensure - /// cleanups (those in `on_killed_account`) take place. - fn kill_account(who: &T::AccountId) { - if Account::::contains_key(who) { - let account = Account::::take(who); - if account.refcount > 0 { - debug::debug!( - target: "system", - "WARNING: Referenced account deleted. This is probably a bug." - ); - } - } - Module::::on_killed_account(who.clone()); + Self::deposit_event(Event::KilledAccount(who)); } /// Determine whether or not it is possible to update the code. @@ -1259,115 +1632,113 @@ impl Module { } } -/// Event handler which calls on_created_account when it happens. -pub struct CallOnCreatedAccount(PhantomData); -impl Happened for CallOnCreatedAccount { - fn happened(who: &T::AccountId) { - Module::::on_created_account(who.clone()); +/// Event handler which registers a provider when created. +pub struct Provider(PhantomData); +impl HandleLifetime for Provider { + fn created(t: &T::AccountId) -> Result<(), DispatchError> { + Pallet::::inc_providers(t); + Ok(()) + } + fn killed(t: &T::AccountId) -> Result<(), DispatchError> { + Pallet::::dec_providers(t).map(|_| ()) + } +} + +/// Event handler which registers a self-sufficient when created. +pub struct SelfSufficient(PhantomData); +impl HandleLifetime for SelfSufficient { + fn created(t: &T::AccountId) -> Result<(), DispatchError> { + Pallet::::inc_sufficients(t); + Ok(()) + } + fn killed(t: &T::AccountId) -> Result<(), DispatchError> { + Pallet::::dec_sufficients(t); + Ok(()) } } -/// Event handler which calls kill_account when it happens. -pub struct CallKillAccount(PhantomData); -impl Happened for CallKillAccount { - fn happened(who: &T::AccountId) { - Module::::kill_account(who) +/// Event handler which registers a consumer when created. +pub struct Consumer(PhantomData); +impl HandleLifetime for Consumer { + fn created(t: &T::AccountId) -> Result<(), DispatchError> { + Pallet::::inc_consumers(t) + } + fn killed(t: &T::AccountId) -> Result<(), DispatchError> { + Pallet::::dec_consumers(t); + Ok(()) } } -impl BlockNumberProvider for Module -{ - type BlockNumber = ::BlockNumber; +impl BlockNumberProvider for Pallet { + type BlockNumber = ::BlockNumber; fn current_block_number() -> Self::BlockNumber { - Module::::block_number() + Pallet::::block_number() } } -// Implement StoredMap for a simple single-item, kill-account-on-remove system. This works fine for -// storing a single item which is required to not be empty/default for the account to exist. -// Anything more complex will need more sophisticated logic. -impl StoredMap for Module { +fn is_providing(d: &T) -> bool { + d != &T::default() +} + +/// Implement StoredMap for a simple single-item, provide-when-not-default system. This works fine +/// for storing a single item which allows the account to continue existing as long as it's not +/// empty/default. +/// +/// Anything more complex will need more sophisticated logic. +impl StoredMap for Pallet { fn get(k: &T::AccountId) -> T::AccountData { Account::::get(k).data } - fn is_explicit(k: &T::AccountId) -> bool { - Account::::contains_key(k) - } - fn insert(k: &T::AccountId, data: T::AccountData) { - let existed = Account::::contains_key(k); - Account::::mutate(k, |a| a.data = data); - if !existed { - Self::on_created_account(k.clone()); - } - } - fn remove(k: &T::AccountId) { - Self::kill_account(k) - } - fn mutate(k: &T::AccountId, f: impl FnOnce(&mut T::AccountData) -> R) -> R { - let existed = Account::::contains_key(k); - let r = Account::::mutate(k, |a| f(&mut a.data)); - if !existed { - Self::on_created_account(k.clone()); - } - r - } - fn mutate_exists(k: &T::AccountId, f: impl FnOnce(&mut Option) -> R) -> R { - Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }).expect("Infallible; qed") - } - fn try_mutate_exists(k: &T::AccountId, f: impl FnOnce(&mut Option) -> Result) -> Result { - Account::::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let (maybe_prefix, mut maybe_data) = split_inner( - maybe_value.take(), - |account| ((account.nonce, account.refcount), account.data) - ); - f(&mut maybe_data).map(|result| { - *maybe_value = maybe_data.map(|data| { - let (nonce, refcount) = maybe_prefix.unwrap_or_default(); - AccountInfo { nonce, refcount, data } - }); - (existed, maybe_value.is_some(), result) - }) - }).map(|(existed, exists, v)| { - if !existed && exists { - Self::on_created_account(k.clone()); - } else if existed && !exists { - Self::on_killed_account(k.clone()); + + fn try_mutate_exists>( + k: &T::AccountId, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + let account = Account::::get(k); + let was_providing = is_providing(&account.data); + let mut some_data = if was_providing { Some(account.data) } else { None }; + let result = f(&mut some_data)?; + let is_providing = some_data.is_some(); + if !was_providing && is_providing { + Self::inc_providers(k); + } else if was_providing && !is_providing { + match Self::dec_providers(k)? { + DecRefStatus::Reaped => return Ok(result), + DecRefStatus::Exists => { + // Update value as normal... + }, } - v - }) + } else if !was_providing && !is_providing { + return Ok(result) + } + Account::::mutate(k, |a| a.data = some_data.unwrap_or_default()); + Ok(result) } } /// Split an `option` into two constituent options, as defined by a `splitter` function. -pub fn split_inner(option: Option, splitter: impl FnOnce(T) -> (R, S)) - -> (Option, Option) -{ +pub fn split_inner( + option: Option, + splitter: impl FnOnce(T) -> (R, S), +) -> (Option, Option) { match option { Some(inner) => { let (r, s) = splitter(inner); (Some(r), Some(s)) - } + }, None => (None, None), } } - -impl IsDeadAccount for Module { - fn is_dead_account(who: &T::AccountId) -> bool { - !Account::::contains_key(who) - } -} - -pub struct ChainContext(sp_std::marker::PhantomData); +pub struct ChainContext(PhantomData); impl Default for ChainContext { fn default() -> Self { - ChainContext(sp_std::marker::PhantomData) + ChainContext(PhantomData) } } -impl Lookup for ChainContext { +impl Lookup for ChainContext { type Source = ::Source; type Target = ::Target; @@ -1375,3 +1746,14 @@ impl Lookup for ChainContext { ::lookup(s) } } + +/// Prelude to be used alongside pallet macro, for ease of use. +pub mod pallet_prelude { + pub use crate::{ensure_none, ensure_root, ensure_signed}; + + /// Type alias for the `Origin` associated type of system config. + pub type OriginFor = ::Origin; + + /// Type alias for the `BlockNumber` associated type of system config. + pub type BlockNumberFor = ::BlockNumber; +} diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs new file mode 100644 index 0000000000000..687fb6f3dd367 --- /dev/null +++ b/frame/system/src/limits.rs @@ -0,0 +1,440 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Block resource limits configuration structures. +//! +//! FRAME defines two resources that are limited within a block: +//! - Weight (execution cost/time) +//! - Length (block size) +//! +//! `frame_system` tracks consumption of each of these resources separately for each +//! `DispatchClass`. This module contains configuration object for both resources, +//! which should be passed to `frame_system` configuration when runtime is being set up. + +use frame_support::weights::{constants, DispatchClass, OneOrMany, PerDispatchClass, Weight}; +use scale_info::TypeInfo; +use sp_runtime::{Perbill, RuntimeDebug}; + +/// Block length limit configuration. +#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode, TypeInfo)] +pub struct BlockLength { + /// Maximal total length in bytes for each extrinsic class. + /// + /// In the worst case, the total block length is going to be: + /// `MAX(max)` + pub max: PerDispatchClass, +} + +impl Default for BlockLength { + fn default() -> Self { + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, DEFAULT_NORMAL_RATIO) + } +} + +impl BlockLength { + /// Create new `BlockLength` with `max` for every class. + pub fn max(max: u32) -> Self { + Self { max: PerDispatchClass::new(|_| max) } + } + + /// Create new `BlockLength` with `max` for `Operational` & `Mandatory` + /// and `normal * max` for `Normal`. + pub fn max_with_normal_ratio(max: u32, normal: Perbill) -> Self { + Self { + max: PerDispatchClass::new(|class| { + if class == DispatchClass::Normal { + normal * max + } else { + max + } + }), + } + } +} + +#[derive(Default, RuntimeDebug)] +pub struct ValidationErrors { + pub has_errors: bool, + #[cfg(feature = "std")] + pub errors: Vec, +} + +macro_rules! error_assert { + ($cond : expr, $err : expr, $format : expr $(, $params: expr )*$(,)*) => { + if !$cond { + $err.has_errors = true; + #[cfg(feature = "std")] + { $err.errors.push(format!($format $(, &$params )*)); } + } + } +} + +/// A result of validating `BlockWeights` correctness. +pub type ValidationResult = Result; + +/// A ratio of `Normal` dispatch class within block, used as default value for +/// `BlockWeight` and `BlockLength`. The `Default` impls are provided mostly for convenience +/// to use in tests. +const DEFAULT_NORMAL_RATIO: Perbill = Perbill::from_percent(75); + +/// `DispatchClass`-specific weight configuration. +#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode, TypeInfo)] +pub struct WeightsPerClass { + /// Base weight of single extrinsic of given class. + pub base_extrinsic: Weight, + /// Maximal weight of single extrinsic. Should NOT include `base_extrinsic` cost. + /// + /// `None` indicates that this class of extrinsics doesn't have a limit. + pub max_extrinsic: Option, + /// Block maximal total weight for all extrinsics of given class. + /// + /// `None` indicates that weight sum of this class of extrinsics is not + /// restricted. Use this value carefully, since it might produce heavily oversized + /// blocks. + /// + /// In the worst case, the total weight consumed by the class is going to be: + /// `MAX(max_total) + MAX(reserved)`. + pub max_total: Option, + /// Block reserved allowance for all extrinsics of a particular class. + /// + /// Setting to `None` indicates that extrinsics of that class are allowed + /// to go over total block weight (but at most `max_total` for that class). + /// Setting to `Some(x)` guarantees that at least `x` weight of particular class + /// is processed in every block. + pub reserved: Option, +} + +/// Block weight limits & base values configuration. +/// +/// This object is responsible for defining weight limits and base weight values tracked +/// during extrinsic execution. +/// +/// Each block starts with `base_block` weight being consumed right away. Next up the +/// `on_initialize` pallet callbacks are invoked and their cost is added before any extrinsic +/// is executed. This cost is tracked as `Mandatory` dispatch class. +/// +/// ```text,ignore +/// | | `max_block` | | +/// | | | | +/// | | | | +/// | | | | +/// | | | #| `on_initialize` +/// | #| `base_block` | #| +/// |NOM| |NOM| +/// ||\_ Mandatory +/// |\__ Operational +/// \___ Normal +/// ``` +/// +/// The remaining capacity can be used to dispatch extrinsics. Note that each dispatch class +/// is being tracked separately, but the sum can't exceed `max_block` (except for `reserved`). +/// Below you can see a picture representing full block with 3 extrinsics (two `Operational` and +/// one `Normal`). Each class has it's own limit `max_total`, but also the sum cannot exceed +/// `max_block` value. +/// +/// ```text,ignore +/// -- `Mandatory` limit (unlimited) +/// | # | | | +/// | # | `Ext3` | - - `Operational` limit +/// |# | `Ext2` |- - `Normal` limit +/// | # | `Ext1` | # | +/// | #| `on_initialize` | ##| +/// | #| `base_block` |###| +/// |NOM| |NOM| +/// ``` +/// +/// It should be obvious now that it's possible for one class to reach it's limit (say `Normal`), +/// while the block has still capacity to process more transactions (`max_block` not reached, +/// `Operational` transactions can still go in). Setting `max_total` to `None` disables the +/// per-class limit. This is generally highly recommended for `Mandatory` dispatch class, while it +/// can be dangerous for `Normal` class and should only be done with extra care and consideration. +/// +/// Often it's desirable for some class of transactions to be added to the block despite it being +/// full. For instance one might want to prevent high-priority `Normal` transactions from pushing +/// out lower-priority `Operational` transactions. In such cases you might add a `reserved` capacity +/// for given class. +/// +/// ```test,ignore +/// _ +/// # \ +/// # `Ext8` - `reserved` +/// # _/ +/// | # | `Ext7 | - - `Operational` limit +/// |# | `Ext6` | | +/// |# | `Ext5` |-# - `Normal` limit +/// |# | `Ext4` |## | +/// | #| `on_initialize` |###| +/// | #| `base_block` |###| +/// |NOM| |NOM| +/// ``` +/// +/// In the above example, `Ext4-6` fill up the block almost up to `max_block`. `Ext7` would not fit +/// if there wasn't the extra `reserved` space for `Operational` transactions. Note that `max_total` +/// limit applies to `reserved` space as well (i.e. the sum of weights of `Ext7` & `Ext8` mustn't +/// exceed it). Setting `reserved` to `None` allows the extrinsics to always get into the block up +/// to their `max_total` limit. If `max_total` is set to `None` as well, all extrinsics witch +/// dispatchables of given class will always end up in the block (recommended for `Mandatory` +/// dispatch class). +/// +/// As a consequence of `reserved` space, total consumed block weight might exceed `max_block` +/// value, so this parameter should rather be thought of as "target block weight" than a hard limit. +#[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode, TypeInfo)] +pub struct BlockWeights { + /// Base weight of block execution. + pub base_block: Weight, + /// Maximal total weight consumed by all kinds of extrinsics (without `reserved` space). + pub max_block: Weight, + /// Weight limits for extrinsics of given dispatch class. + pub per_class: PerDispatchClass, +} + +impl Default for BlockWeights { + fn default() -> Self { + Self::with_sensible_defaults(1 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) + } +} + +impl BlockWeights { + /// Get per-class weight settings. + pub fn get(&self, class: DispatchClass) -> &WeightsPerClass { + self.per_class.get(class) + } + + /// Verifies correctness of this `BlockWeights` object. + pub fn validate(self) -> ValidationResult { + fn or_max(w: Option) -> Weight { + w.unwrap_or_else(|| Weight::max_value()) + } + let mut error = ValidationErrors::default(); + + for class in DispatchClass::all() { + let weights = self.per_class.get(*class); + let max_for_class = or_max(weights.max_total); + let base_for_class = weights.base_extrinsic; + let reserved = or_max(weights.reserved); + // Make sure that if total is set it's greater than base_block && + // base_for_class + error_assert!( + (max_for_class > self.base_block && max_for_class > base_for_class) + || max_for_class == 0, + &mut error, + "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", + class, max_for_class, self.base_block, base_for_class, + ); + // Max extrinsic can't be greater than max_for_class. + error_assert!( + weights.max_extrinsic.unwrap_or(0) <= max_for_class.saturating_sub(base_for_class), + &mut error, + "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", + class, + weights.max_extrinsic, + max_for_class.saturating_sub(base_for_class), + ); + // Max extrinsic should not be 0 + error_assert!( + weights.max_extrinsic.unwrap_or_else(|| Weight::max_value()) > 0, + &mut error, + "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", + class, weights.max_extrinsic, + ); + // Make sure that if reserved is set it's greater than base_for_class. + error_assert!( + reserved > base_for_class || reserved == 0, + &mut error, + "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", + class, + reserved, + base_for_class, + ); + // Make sure max block is greater than max_total if it's set. + error_assert!( + self.max_block >= weights.max_total.unwrap_or(0), + &mut error, + "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", + class, + self.max_block, + weights.max_total, + ); + // Make sure we can fit at least one extrinsic. + error_assert!( + self.max_block > base_for_class + self.base_block, + &mut error, + "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", + class, + self.max_block, + base_for_class + self.base_block, + ); + } + + if error.has_errors { + Err(error) + } else { + Ok(self) + } + } + + /// Create new weights definition, with both `Normal` and `Operational` + /// classes limited to given weight. + /// + /// Note there is no reservation for `Operational` class, so this constructor + /// is not suitable for production deployments. + pub fn simple_max(block_weight: Weight) -> Self { + Self::builder() + .base_block(0) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = 0; + }) + .for_class(DispatchClass::non_mandatory(), |weights| { + weights.max_total = block_weight.into(); + }) + .build() + .expect("We only specify max_total and leave base values as defaults; qed") + } + + /// Create a sensible default weights system given only expected maximal block weight and the + /// ratio that `Normal` extrinsics should occupy. + /// + /// Assumptions: + /// - Average block initialization is assumed to be `10%`. + /// - `Operational` transactions have reserved allowance (`1.0 - normal_ratio`) + pub fn with_sensible_defaults(expected_block_weight: Weight, normal_ratio: Perbill) -> Self { + let normal_weight = normal_ratio * expected_block_weight; + Self::builder() + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = normal_weight.into(); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = expected_block_weight.into(); + weights.reserved = (expected_block_weight - normal_weight).into(); + }) + .avg_block_initialization(Perbill::from_percent(10)) + .build() + .expect("Sensible defaults are tested to be valid; qed") + } + + /// Start constructing new `BlockWeights` object. + /// + /// By default all kinds except of `Mandatory` extrinsics are disallowed. + pub fn builder() -> BlockWeightsBuilder { + BlockWeightsBuilder { + weights: BlockWeights { + base_block: constants::BlockExecutionWeight::get(), + max_block: 0, + per_class: PerDispatchClass::new(|class| { + let initial = if class == DispatchClass::Mandatory { None } else { Some(0) }; + WeightsPerClass { + base_extrinsic: constants::ExtrinsicBaseWeight::get(), + max_extrinsic: None, + max_total: initial, + reserved: initial, + } + }), + }, + init_cost: None, + } + } +} + +/// An opinionated builder for `Weights` object. +pub struct BlockWeightsBuilder { + weights: BlockWeights, + init_cost: Option, +} + +impl BlockWeightsBuilder { + /// Set base block weight. + pub fn base_block(mut self, base_block: Weight) -> Self { + self.weights.base_block = base_block; + self + } + + /// Average block initialization weight cost. + /// + /// This value is used to derive maximal allowed extrinsic weight for each + /// class, based on the allowance. + /// + /// This is to make sure that extrinsics don't stay forever in the pool, + /// because they could seamingly fit the block (since they are below `max_block`), + /// but the cost of calling `on_initialize` alway prevents them from being included. + pub fn avg_block_initialization(mut self, init_cost: Perbill) -> Self { + self.init_cost = Some(init_cost); + self + } + + /// Set parameters for particular class. + /// + /// Note: `None` values of `max_extrinsic` will be overwritten in `build` in case + /// `avg_block_initialization` rate is set to a non-zero value. + pub fn for_class( + mut self, + class: impl OneOrMany, + action: impl Fn(&mut WeightsPerClass), + ) -> Self { + for class in class.into_iter() { + action(self.weights.per_class.get_mut(class)); + } + self + } + + /// Construct the `BlockWeights` object. + pub fn build(self) -> ValidationResult { + // compute max extrinsic size + let Self { mut weights, init_cost } = self; + + // compute max block size. + for class in DispatchClass::all() { + weights.max_block = match weights.per_class.get(*class).max_total { + Some(max) if max > weights.max_block => max, + _ => weights.max_block, + }; + } + // compute max size of single extrinsic + if let Some(init_weight) = init_cost.map(|rate| rate * weights.max_block) { + for class in DispatchClass::all() { + let per_class = weights.per_class.get_mut(*class); + if per_class.max_extrinsic.is_none() && init_cost.is_some() { + per_class.max_extrinsic = per_class + .max_total + .map(|x| x.saturating_sub(init_weight)) + .map(|x| x.saturating_sub(per_class.base_extrinsic)); + } + } + } + + // Validate the result + weights.validate() + } + + /// Construct the `BlockWeights` object or panic if it's invalid. + /// + /// This is a convenience method to be called whenever you construct a runtime. + pub fn build_or_panic(self) -> BlockWeights { + self.build().expect( + "Builder finished with `build_or_panic`; The panic is expected if runtime weights are not correct" + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn default_weights_are_valid() { + BlockWeights::default().validate().unwrap(); + } +} diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index cd67a74114073..9dd35691cab84 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,31 +15,34 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::*; -use sp_std::cell::RefCell; +use crate::{self as frame_system, *}; +use frame_support::parameter_types; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -use frame_support::{ - impl_outer_origin, parameter_types, - weights::PostDispatchInfo, -}; +use sp_std::cell::RefCell; -impl_outer_origin! { - pub enum Origin for Test where system = super {} -} +type UncheckedExtrinsic = mocking::MockUncheckedExtrinsic; +type Block = mocking::MockBlock; -#[derive(Clone, Eq, PartialEq, Debug, Default)] -pub struct Test; +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + } +); + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +const MAX_BLOCK_WEIGHT: Weight = 1024; parameter_types! { pub const BlockHashCount: u64 = 10; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumExtrinsicWeight: Weight = 768; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - pub const MaximumBlockLength: u32 = 1024; pub Version: RuntimeVersion = RuntimeVersion { spec_name: sp_version::create_runtime_str!("test"), impl_name: sp_version::create_runtime_str!("system-test"), @@ -49,39 +52,45 @@ parameter_types! { apis: sp_version::create_apis_vec!([]), transaction_version: 1, }; - pub const BlockExecutionWeight: Weight = 10; - pub const ExtrinsicBaseWeight: Weight = 5; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, write: 100, }; + pub RuntimeBlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + .base_block(10) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = 5; + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAX_BLOCK_WEIGHT); + weights.reserved = Some( + MAX_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(Perbill::from_percent(0)) + .build_or_panic(); + pub RuntimeBlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(1024, NORMAL_DISPATCH_RATIO); } -thread_local!{ +thread_local! { pub static KILLED: RefCell> = RefCell::new(vec![]); } pub struct RecordKilled; impl OnKilledAccount for RecordKilled { - fn on_killed_account(who: &u64) { KILLED.with(|r| r.borrow_mut().push(*who)) } -} - -#[derive(Debug, codec::Encode, codec::Decode)] -pub struct Call; - -impl Dispatchable for Call { - type Origin = Origin; - type Trait = (); - type Info = DispatchInfo; - type PostInfo = PostDispatchInfo; - fn dispatch(self, _origin: Self::Origin) - -> sp_runtime::DispatchResultWithInfo { - panic!("Do not use dummy implementation for dispatch."); + fn on_killed_account(who: &u64) { + KILLED.with(|r| r.borrow_mut().push(*who)) } } -impl Trait for Test { - type BaseCallFilter = (); +impl Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; type Origin = Origin; type Call = Call; type Index = u64; @@ -91,35 +100,35 @@ impl Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = DbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = Version; - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = u32; type OnNewAccount = (); type OnKilledAccount = RecordKilled; type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -pub type System = Module; -pub type SysEvent = ::Event; +pub type SysEvent = frame_system::Event; -pub const CALL: &::Call = &Call; +/// A simple call, which one doesn't matter. +pub const CALL: &::Call = + &Call::System(frame_system::Call::set_heap_pages { pages: 0u64 }); /// Create new externalities for `System` module tests. pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig::default().build_storage::().unwrap().into(); + let mut ext: sp_io::TestExternalities = + GenesisConfig::default().build_storage().unwrap().into(); // Add to each test the initial weight of a block - ext.execute_with(|| System::register_extra_weight_unchecked( - ::BlockExecutionWeight::get(), - DispatchClass::Mandatory - )); + ext.execute_with(|| { + System::register_extra_weight_unchecked( + ::BlockWeights::get().base_block, + DispatchClass::Mandatory, + ) + }); ext } diff --git a/frame/system/src/mocking.rs b/frame/system/src/mocking.rs new file mode 100644 index 0000000000000..7e6026b726186 --- /dev/null +++ b/frame/system/src/mocking.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provide types to help defining a mock environment when testing pallets. + +use sp_runtime::generic; + +/// An unchecked extrinsic type to be used in tests. +pub type MockUncheckedExtrinsic = generic::UncheckedExtrinsic< + ::AccountId, + ::Call, + Signature, + Extra, +>; + +/// An implementation of `sp_runtime::traits::Block` to be used in tests. +pub type MockBlock = generic::Block< + generic::Header<::BlockNumber, sp_runtime::traits::BlakeTwo256>, + MockUncheckedExtrinsic, +>; diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 6e6284b57fdc3..ed758a2556b77 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,10 +38,10 @@ //! //! To be able to use signing, the following trait should be implemented: //! -//! - [`AppCrypto`](./trait.AppCrypto.html): where an application-specific key -//! is defined and can be used by this module's helpers for signing. -//! - [`CreateSignedTransaction`](./trait.CreateSignedTransaction.html): where -//! the manner in which the transaction is constructed is defined. +//! - [`AppCrypto`](./trait.AppCrypto.html): where an application-specific key is defined and can be +//! used by this module's helpers for signing. +//! - [`CreateSignedTransaction`](./trait.CreateSignedTransaction.html): where the manner in which +//! the transaction is constructed is defined. //! //! #### Submit an unsigned transaction with a signed payload //! @@ -53,17 +53,20 @@ //! #### Submit a signed transaction //! //! [`Signer`](./struct.Signer.html) can be used to sign/verify payloads -//! #![warn(missing_docs)] use codec::Encode; -use sp_std::collections::btree_set::BTreeSet; -use sp_std::convert::{TryInto, TryFrom}; -use sp_std::prelude::{Box, Vec}; -use sp_runtime::app_crypto::RuntimeAppPublic; -use sp_runtime::traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}; -use frame_support::{debug, storage::StorageMap, RuntimeDebug}; +use frame_support::RuntimeDebug; +use sp_runtime::{ + app_crypto::RuntimeAppPublic, + traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}, +}; +use sp_std::{ + collections::btree_set::BTreeSet, + convert::{TryFrom, TryInto}, + prelude::{Box, Vec}, +}; /// Marker struct used to flag using all supported keys to sign a payload. pub struct ForAll {} @@ -77,7 +80,7 @@ pub struct ForAny {} /// utility function can be used. However, this struct is used by `Signer` /// to submit a signed transactions providing the signature along with the call. pub struct SubmitTransaction, OverarchingCall> { - _phantom: sp_std::marker::PhantomData<(T, OverarchingCall)> + _phantom: sp_std::marker::PhantomData<(T, OverarchingCall)>, } impl SubmitTransaction @@ -121,10 +124,7 @@ pub struct Signer, X = Fo impl, X> Default for Signer { fn default() -> Self { - Self { - accounts: Default::default(), - _phantom: Default::default(), - } + Self { accounts: Default::default(), _phantom: Default::default() } } } @@ -162,72 +162,73 @@ impl, X> Signer let keystore_accounts = self.keystore_accounts(); match self.accounts { None => Box::new(keystore_accounts), - Some(ref keys) => { - let keystore_lookup: BTreeSet<::Public> = keystore_accounts - .map(|account| account.public).collect(); - - Box::new(keys.into_iter() - .enumerate() - .map(|(index, key)| { - let account_id = key.clone().into_account(); - Account::new(index, account_id, key.clone()) - }) - .filter(move |account| keystore_lookup.contains(&account.public))) - } + Some(ref keys) => { + let keystore_lookup: BTreeSet<::Public> = + keystore_accounts.map(|account| account.public).collect(); + + Box::new( + keys.into_iter() + .enumerate() + .map(|(index, key)| { + let account_id = key.clone().into_account(); + Account::new(index, account_id, key.clone()) + }) + .filter(move |account| keystore_lookup.contains(&account.public)), + ) + }, } } fn keystore_accounts(&self) -> impl Iterator> { - C::RuntimeAppPublic::all() - .into_iter() - .enumerate() - .map(|(index, key)| { - let generic_public = C::GenericPublic::from(key); - let public = generic_public.into(); - let account_id = public.clone().into_account(); - Account::new(index, account_id, public) - }) + C::RuntimeAppPublic::all().into_iter().enumerate().map(|(index, key)| { + let generic_public = C::GenericPublic::from(key); + let public: T::Public = generic_public.into(); + let account_id = public.clone().into_account(); + Account::new(index, account_id, public) + }) } } - impl> Signer { - fn for_all(&self, f: F) -> Vec<(Account, R)> where + fn for_all(&self, f: F) -> Vec<(Account, R)> + where F: Fn(&Account) -> Option, { let accounts = self.accounts_from_keys(); accounts .into_iter() - .filter_map(|account| { - f(&account).map(|res| (account, res)) - }) + .filter_map(|account| f(&account).map(|res| (account, res))) .collect() } } impl> Signer { - fn for_any(&self, f: F) -> Option<(Account, R)> where + fn for_any(&self, f: F) -> Option<(Account, R)> + where F: Fn(&Account) -> Option, { let accounts = self.accounts_from_keys(); for account in accounts.into_iter() { let res = f(&account); if let Some(res) = res { - return Some((account, res)); + return Some((account, res)) } } None } } -impl> SignMessage for Signer { +impl> SignMessage + for Signer +{ type SignatureData = Vec<(Account, T::Signature)>; fn sign_message(&self, message: &[u8]) -> Self::SignatureData { self.for_all(|account| C::sign(message, account.public.clone())) } - fn sign(&self, f: F) -> Self::SignatureData where + fn sign(&self, f: F) -> Self::SignatureData + where F: Fn(&Account) -> TPayload, TPayload: SignedPayload, { @@ -235,14 +236,17 @@ impl> SignMessage for } } -impl> SignMessage for Signer { +impl> SignMessage + for Signer +{ type SignatureData = Option<(Account, T::Signature)>; fn sign_message(&self, message: &[u8]) -> Self::SignatureData { self.for_any(|account| C::sign(message, account.public.clone())) } - fn sign(&self, f: F) -> Self::SignatureData where + fn sign(&self, f: F) -> Self::SignatureData + where F: Fn(&Account) -> TPayload, TPayload: SignedPayload, { @@ -251,16 +255,14 @@ impl> SignMessage for } impl< - T: CreateSignedTransaction + SigningTypes, - C: AppCrypto, - LocalCall, -> SendSignedTransaction for Signer { + T: CreateSignedTransaction + SigningTypes, + C: AppCrypto, + LocalCall, + > SendSignedTransaction for Signer +{ type Result = Option<(Account, Result<(), ()>)>; - fn send_signed_transaction( - &self, - f: impl Fn(&Account) -> LocalCall, - ) -> Self::Result { + fn send_signed_transaction(&self, f: impl Fn(&Account) -> LocalCall) -> Self::Result { self.for_any(|account| { let call = f(account); self.send_single_signed_transaction(account, call) @@ -269,16 +271,14 @@ impl< } impl< - T: SigningTypes + CreateSignedTransaction, - C: AppCrypto, - LocalCall, -> SendSignedTransaction for Signer { + T: SigningTypes + CreateSignedTransaction, + C: AppCrypto, + LocalCall, + > SendSignedTransaction for Signer +{ type Result = Vec<(Account, Result<(), ()>)>; - fn send_signed_transaction( - &self, - f: impl Fn(&Account) -> LocalCall, - ) -> Self::Result { + fn send_signed_transaction(&self, f: impl Fn(&Account) -> LocalCall) -> Self::Result { self.for_all(|account| { let call = f(account); self.send_single_signed_transaction(account, call) @@ -287,10 +287,11 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, - C: AppCrypto, - LocalCall, -> SendUnsignedTransaction for Signer { + T: SigningTypes + SendTransactionTypes, + C: AppCrypto, + LocalCall, + > SendUnsignedTransaction for Signer +{ type Result = Option<(Account, Result<(), ()>)>; fn send_unsigned_transaction( @@ -304,7 +305,7 @@ impl< { self.for_any(|account| { let payload = f(account); - let signature= payload.sign::()?; + let signature = payload.sign::()?; let call = f2(payload, signature); self.submit_unsigned_transaction(call) }) @@ -312,10 +313,11 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, - C: AppCrypto, - LocalCall, -> SendUnsignedTransaction for Signer { + T: SigningTypes + SendTransactionTypes, + C: AppCrypto, + LocalCall, + > SendUnsignedTransaction for Signer +{ type Result = Vec<(Account, Result<(), ()>)>; fn send_unsigned_transaction( @@ -325,7 +327,8 @@ impl< ) -> Self::Result where F: Fn(&Account) -> TPayload, - TPayload: SignedPayload { + TPayload: SignedPayload, + { self.for_all(|account| { let payload = f(account); let signature = payload.sign::()?; @@ -353,16 +356,13 @@ impl Account { } } -impl Clone for Account where +impl Clone for Account +where T::AccountId: Clone, T::Public: Clone, { fn clone(&self) -> Self { - Self { - index: self.index, - id: self.id.clone(), - public: self.public.clone(), - } + Self { index: self.index, id: self.id.clone(), public: self.public.clone() } } } @@ -376,12 +376,9 @@ impl Clone for Account where /// The point of this trait is to be able to easily convert between `RuntimeAppPublic`, the wrapped /// (generic = non application-specific) crypto types and the `Public` type required by the runtime. /// -/// TODO [#5662] Potentially use `IsWrappedBy` types, or find some other way to make it easy to -/// obtain unwrapped crypto (and wrap it back). -/// -/// Example (pseudo-)implementation: +/// Example (pseudo-)implementation: /// ```ignore -/// // im-online specific crypto +/// // im-online specific crypto /// type RuntimeAppPublic = ImOnline(sr25519::Public); /// /// // wrapped "raw" crypto @@ -392,20 +389,20 @@ impl Clone for Account where /// type Public = MultiSigner: From; /// type Signature = MulitSignature: From; /// ``` +// TODO [#5662] Potentially use `IsWrappedBy` types, or find some other way to make it easy to +// obtain unwrapped crypto (and wrap it back). pub trait AppCrypto { /// A application-specific crypto. type RuntimeAppPublic: RuntimeAppPublic; /// A raw crypto public key wrapped by `RuntimeAppPublic`. - type GenericPublic: - From + type GenericPublic: From + Into + TryFrom + Into; /// A matching raw crypto `Signature` type. - type GenericSignature: - From<::Signature> + type GenericSignature: From<::Signature> + Into<::Signature> + TryFrom + Into; @@ -426,16 +423,15 @@ pub trait AppCrypto { fn verify(payload: &[u8], public: Public, signature: Signature) -> bool { let p: Self::GenericPublic = match public.try_into() { Ok(a) => a, - _ => return false + _ => return false, }; let x = Into::::into(p); let signature: Self::GenericSignature = match signature.try_into() { Ok(a) => a, - _ => return false + _ => return false, }; - let signature = Into::<< - Self::RuntimeAppPublic as RuntimeAppPublic - >::Signature>::into(signature); + let signature = + Into::<::Signature>::into(signature); x.verify(&payload, &signature) } @@ -445,11 +441,10 @@ pub trait AppCrypto { /// /// This trait adds extra bounds to `Public` and `Signature` types of the runtime /// that are necessary to use these types for signing. -/// -/// TODO [#5663] Could this be just `T::Signature as traits::Verify>::Signer`? -/// Seems that this may cause issues with bounds resolution. -pub trait SigningTypes: crate::Trait { - /// A public key that is capable of identifing `AccountId`s. +// TODO [#5663] Could this be just `T::Signature as traits::Verify>::Signer`? +// Seems that this may cause issues with bounds resolution. +pub trait SigningTypes: crate::Config { + /// A public key that is capable of identifying `AccountId`s. /// /// Usually that's either a raw crypto public key (e.g. `sr25519::Public`) or /// an aggregate type for multiple crypto public keys, like `MulitSigner`. @@ -458,23 +453,21 @@ pub trait SigningTypes: crate::Trait { + IdentifyAccount + core::fmt::Debug + codec::Codec - + Ord; + + Ord + + scale_info::TypeInfo; /// A matching `Signature` type. - type Signature: Clone - + PartialEq - + core::fmt::Debug - + codec::Codec; + type Signature: Clone + PartialEq + core::fmt::Debug + codec::Codec + scale_info::TypeInfo; } /// A definition of types required to submit transactions from within the runtime. pub trait SendTransactionTypes { /// The extrinsic type expected by the runtime. - type Extrinsic: ExtrinsicT + codec::Encode; + type Extrinsic: ExtrinsicT + codec::Encode; /// The runtime's call type. /// /// This has additional bound to be able to be created from pallet-local `Call` types. - type OverarchingCall: From; + type OverarchingCall: From + codec::Encode; } /// Create signed transaction. @@ -484,7 +477,9 @@ pub trait SendTransactionTypes { /// This will most likely include creation of `SignedExtra` (a set of `SignedExtensions`). /// Note that the result can be altered by inspecting the `Call` (for instance adjusting /// fees, or mortality depending on the `pallet` being called). -pub trait CreateSignedTransaction: SendTransactionTypes + SigningTypes { +pub trait CreateSignedTransaction: + SendTransactionTypes + SigningTypes +{ /// Attempt to create signed extrinsic data that encodes call from given account. /// /// Runtime implementation is free to construct the payload to sign and the signature @@ -516,18 +511,19 @@ pub trait SignMessage { /// /// This method expects `f` to return a `SignedPayload` /// object which is then used for signing. - fn sign(&self, f: F) -> Self::SignatureData where + fn sign(&self, f: F) -> Self::SignatureData + where F: Fn(&Account) -> TPayload, - TPayload: SignedPayload, - ; + TPayload: SignedPayload; } /// Submit a signed transaction to the transaction pool. pub trait SendSignedTransaction< T: SigningTypes + CreateSignedTransaction, C: AppCrypto, - LocalCall -> { + LocalCall, +> +{ /// A submission result. /// /// This should contain an indication of success and the account that was used for signing. @@ -539,10 +535,7 @@ pub trait SendSignedTransaction< /// to be returned. /// The call is then wrapped into a transaction (see `#CreateSignedTransaction`), signed and /// submitted to the pool. - fn send_signed_transaction( - &self, - f: impl Fn(&Account) -> LocalCall, - ) -> Self::Result; + fn send_signed_transaction(&self, f: impl Fn(&Account) -> LocalCall) -> Self::Result; /// Wraps the call into transaction, signs using given account and submits to the pool. fn send_single_signed_transaction( @@ -551,8 +544,8 @@ pub trait SendSignedTransaction< call: LocalCall, ) -> Option> { let mut account_data = crate::Account::::get(&account.id); - debug::native::debug!( - target: "offchain", + log::debug!( + target: "runtime::offchain", "Creating signed transaction from account: {:?} (nonce: {:?})", account.id, account_data.nonce, @@ -561,10 +554,9 @@ pub trait SendSignedTransaction< call.into(), account.public.clone(), account.id.clone(), - account_data.nonce + account_data.nonce, )?; - let res = SubmitTransaction:: - ::submit_transaction(call, Some(signature)); + let res = SubmitTransaction::::submit_transaction(call, Some(signature)); if res.is_ok() { // increment the nonce. This is fine, since the code should always @@ -578,10 +570,7 @@ pub trait SendSignedTransaction< } /// Submit an unsigned transaction onchain with a signed payload -pub trait SendUnsignedTransaction< - T: SigningTypes + SendTransactionTypes, - LocalCall, -> { +pub trait SendUnsignedTransaction, LocalCall> { /// A submission result. /// /// Should contain the submission result and the account(s) that signed the payload. @@ -603,12 +592,8 @@ pub trait SendUnsignedTransaction< TPayload: SignedPayload; /// Submits an unsigned call to the transaction pool. - fn submit_unsigned_transaction( - &self, - call: LocalCall - ) -> Option> { - Some(SubmitTransaction:: - ::submit_unsigned_transaction(call.into())) + fn submit_unsigned_transaction(&self, call: LocalCall) -> Option> { + Some(SubmitTransaction::::submit_unsigned_transaction(call.into())) } } @@ -633,14 +618,13 @@ pub trait SignedPayload: Encode { } } - #[cfg(test)] mod tests { use super::*; + use crate::mock::{Call, Test as TestRuntime, CALL}; use codec::Decode; - use crate::mock::{Test as TestRuntime, Call}; use sp_core::offchain::{testing, TransactionPoolExt}; - use sp_runtime::testing::{UintAuthorityId, TestSignature, TestXt}; + use sp_runtime::testing::{TestSignature, TestXt, UintAuthorityId}; impl SigningTypes for TestRuntime { type Public = UintAuthorityId; @@ -677,16 +661,8 @@ mod tests { type GenericSignature = TestSignature; } - fn assert_account( - next: Option<(Account, Result<(), ()>)>, - index: usize, - id: u64, - ) { - assert_eq!(next, Some((Account { - index, - id, - public: id.into(), - }, Ok(())))); + fn assert_account(next: Option<(Account, Result<(), ()>)>, index: usize, id: u64) { + assert_eq!(next, Some((Account { index, id, public: id.into() }, Ok(())))); } #[test] @@ -701,16 +677,10 @@ mod tests { t.execute_with(|| { // when - let result = Signer:: - ::all_accounts() + let result = Signer::::all_accounts() .send_unsigned_transaction( - |account| SimplePayload { - data: vec![1, 2, 3], - public: account.public.clone() - }, - |_payload, _signature| { - Call - } + |account| SimplePayload { data: vec![1, 2, 3], public: account.public.clone() }, + |_payload, _signature| CALL.clone(), ); // then @@ -742,16 +712,10 @@ mod tests { t.execute_with(|| { // when - let result = Signer:: - ::any_account() + let result = Signer::::any_account() .send_unsigned_transaction( - |account| SimplePayload { - data: vec![1, 2, 3], - public: account.public.clone() - }, - |_payload, _signature| { - Call - } + |account| SimplePayload { data: vec![1, 2, 3], public: account.public.clone() }, + |_payload, _signature| CALL.clone(), ); // then @@ -779,17 +743,11 @@ mod tests { t.execute_with(|| { // when - let result = Signer:: - ::all_accounts() + let result = Signer::::all_accounts() .with_filter(vec![0xf2.into(), 0xf1.into()]) .send_unsigned_transaction( - |account| SimplePayload { - data: vec![1, 2, 3], - public: account.public.clone() - }, - |_payload, _signature| { - Call - } + |account| SimplePayload { data: vec![1, 2, 3], public: account.public.clone() }, + |_payload, _signature| CALL.clone(), ); // then @@ -819,17 +777,11 @@ mod tests { t.execute_with(|| { // when - let result = Signer:: - ::any_account() + let result = Signer::::any_account() .with_filter(vec![0xf2.into(), 0xf1.into()]) .send_unsigned_transaction( - |account| SimplePayload { - data: vec![1, 2, 3], - public: account.public.clone() - }, - |_payload, _signature| { - Call - } + |account| SimplePayload { data: vec![1, 2, 3], public: account.public.clone() }, + |_payload, _signature| CALL.clone(), ); // then @@ -844,5 +796,4 @@ mod tests { assert_eq!(tx1.signature, None); }); } - } diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 55286d951cc27..a4dd3403f2c3a 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,10 +16,15 @@ // limitations under the License. use crate::*; -use mock::{*, Origin}; +use frame_support::{ + assert_noop, assert_ok, dispatch::PostDispatchInfo, weights::WithPostDispatchInfo, +}; +use mock::{Origin, *}; use sp_core::H256; -use sp_runtime::DispatchError; -use frame_support::weights::WithPostDispatchInfo; +use sp_runtime::{ + traits::{BlakeTwo256, Header}, + DispatchError, DispatchErrorWithPostInfo, +}; #[test] fn origin_works() { @@ -31,63 +36,143 @@ fn origin_works() { #[test] fn stored_map_works() { new_test_ext().execute_with(|| { - System::insert(&0, 42); - assert!(System::allow_death(&0)); + assert_ok!(System::insert(&0, 42)); + assert!(!System::is_provider_required(&0)); - System::inc_ref(&0); - assert!(!System::allow_death(&0)); + assert_eq!( + Account::::get(0), + AccountInfo { nonce: 0, providers: 1, consumers: 0, sufficients: 0, data: 42 } + ); + + assert_ok!(System::inc_consumers(&0)); + assert!(System::is_provider_required(&0)); - System::insert(&0, 69); - assert!(!System::allow_death(&0)); + assert_ok!(System::insert(&0, 69)); + assert!(System::is_provider_required(&0)); - System::dec_ref(&0); - assert!(System::allow_death(&0)); + System::dec_consumers(&0); + assert!(!System::is_provider_required(&0)); assert!(KILLED.with(|r| r.borrow().is_empty())); - System::kill_account(&0); + assert_ok!(System::remove(&0)); assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); }); } +#[test] +fn provider_ref_handover_to_self_sufficient_ref_works() { + new_test_ext().execute_with(|| { + assert_eq!(System::inc_providers(&0), IncRefStatus::Created); + System::inc_account_nonce(&0); + assert_eq!(System::account_nonce(&0), 1); + + // a second reference coming and going doesn't change anything. + assert_eq!(System::inc_sufficients(&0), IncRefStatus::Existed); + assert_eq!(System::dec_sufficients(&0), DecRefStatus::Exists); + assert_eq!(System::account_nonce(&0), 1); + + // a provider reference coming and going doesn't change anything. + assert_eq!(System::inc_providers(&0), IncRefStatus::Existed); + assert_eq!(System::dec_providers(&0).unwrap(), DecRefStatus::Exists); + assert_eq!(System::account_nonce(&0), 1); + + // decreasing the providers with a self-sufficient present should not delete the account + assert_eq!(System::inc_sufficients(&0), IncRefStatus::Existed); + assert_eq!(System::dec_providers(&0).unwrap(), DecRefStatus::Exists); + assert_eq!(System::account_nonce(&0), 1); + + // decreasing the sufficients should delete the account + assert_eq!(System::dec_sufficients(&0), DecRefStatus::Reaped); + assert_eq!(System::account_nonce(&0), 0); + }); +} + +#[test] +fn self_sufficient_ref_handover_to_provider_ref_works() { + new_test_ext().execute_with(|| { + assert_eq!(System::inc_sufficients(&0), IncRefStatus::Created); + System::inc_account_nonce(&0); + assert_eq!(System::account_nonce(&0), 1); + + // a second reference coming and going doesn't change anything. + assert_eq!(System::inc_providers(&0), IncRefStatus::Existed); + assert_eq!(System::dec_providers(&0).unwrap(), DecRefStatus::Exists); + assert_eq!(System::account_nonce(&0), 1); + + // a sufficient reference coming and going doesn't change anything. + assert_eq!(System::inc_sufficients(&0), IncRefStatus::Existed); + assert_eq!(System::dec_sufficients(&0), DecRefStatus::Exists); + assert_eq!(System::account_nonce(&0), 1); + + // decreasing the sufficients with a provider present should not delete the account + assert_eq!(System::inc_providers(&0), IncRefStatus::Existed); + assert_eq!(System::dec_sufficients(&0), DecRefStatus::Exists); + assert_eq!(System::account_nonce(&0), 1); + + // decreasing the providers should delete the account + assert_eq!(System::dec_providers(&0).unwrap(), DecRefStatus::Reaped); + assert_eq!(System::account_nonce(&0), 0); + }); +} + +#[test] +fn sufficient_cannot_support_consumer() { + new_test_ext().execute_with(|| { + assert_eq!(System::inc_sufficients(&0), IncRefStatus::Created); + System::inc_account_nonce(&0); + assert_eq!(System::account_nonce(&0), 1); + assert_noop!(System::inc_consumers(&0), DispatchError::NoProviders); + + assert_eq!(System::inc_providers(&0), IncRefStatus::Existed); + assert_ok!(System::inc_consumers(&0)); + assert_noop!(System::dec_providers(&0), DispatchError::ConsumerRemaining); + }); +} + +#[test] +fn provider_required_to_support_consumer() { + new_test_ext().execute_with(|| { + assert_noop!(System::inc_consumers(&0), DispatchError::NoProviders); + + assert_eq!(System::inc_providers(&0), IncRefStatus::Created); + System::inc_account_nonce(&0); + assert_eq!(System::account_nonce(&0), 1); + + assert_eq!(System::inc_providers(&0), IncRefStatus::Existed); + assert_eq!(System::dec_providers(&0).unwrap(), DecRefStatus::Exists); + assert_eq!(System::account_nonce(&0), 1); + + assert_ok!(System::inc_consumers(&0)); + assert_noop!(System::dec_providers(&0), DispatchError::ConsumerRemaining); + + System::dec_consumers(&0); + assert_eq!(System::dec_providers(&0).unwrap(), DecRefStatus::Reaped); + assert_eq!(System::account_nonce(&0), 0); + }); +} + #[test] fn deposit_event_should_work() { new_test_ext().execute_with(|| { - System::initialize( - &1, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&1, &[0u8; 32].into(), &Default::default(), InitKind::Full); System::note_finished_extrinsics(); System::deposit_event(SysEvent::CodeUpdated); System::finalize(); assert_eq!( System::events(), - vec![ - EventRecord { - phase: Phase::Finalization, - event: SysEvent::CodeUpdated, - topics: vec![], - } - ] + vec![EventRecord { + phase: Phase::Finalization, + event: SysEvent::CodeUpdated.into(), + topics: vec![], + }] ); - System::initialize( - &2, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&2, &[0u8; 32].into(), &Default::default(), InitKind::Full); System::deposit_event(SysEvent::NewAccount(32)); System::note_finished_initialize(); System::deposit_event(SysEvent::KilledAccount(42)); System::note_applied_extrinsic(&Ok(().into()), Default::default()); - System::note_applied_extrinsic( - &Err(DispatchError::BadOrigin.into()), - Default::default() - ); + System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.into()), Default::default()); System::note_finished_extrinsics(); System::deposit_event(SysEvent::NewAccount(3)); System::finalize(); @@ -96,17 +181,17 @@ fn deposit_event_should_work() { vec![ EventRecord { phase: Phase::Initialization, - event: SysEvent::NewAccount(32), + event: SysEvent::NewAccount(32).into(), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::KilledAccount(42), + event: SysEvent::KilledAccount(42).into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess(Default::default()), + event: SysEvent::ExtrinsicSuccess(Default::default()).into(), topics: vec![] }, EventRecord { @@ -114,12 +199,13 @@ fn deposit_event_should_work() { event: SysEvent::ExtrinsicFailed( DispatchError::BadOrigin.into(), Default::default() - ), + ) + .into(), topics: vec![] }, EventRecord { phase: Phase::Finalization, - event: SysEvent::NewAccount(3), + event: SysEvent::NewAccount(3).into(), topics: vec![] }, ] @@ -130,79 +216,56 @@ fn deposit_event_should_work() { #[test] fn deposit_event_uses_actual_weight() { new_test_ext().execute_with(|| { - System::initialize( - &1, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&1, &[0u8; 32].into(), &Default::default(), InitKind::Full); System::note_finished_initialize(); - let pre_info = DispatchInfo { - weight: 1000, - .. Default::default() - }; - System::note_applied_extrinsic( - &Ok(Some(300).into()), - pre_info, - ); - System::note_applied_extrinsic( - &Ok(Some(1000).into()), - pre_info, - ); + let pre_info = DispatchInfo { weight: 1000, ..Default::default() }; + System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); + System::note_applied_extrinsic(&Ok(Some(1000).into()), pre_info); System::note_applied_extrinsic( // values over the pre info should be capped at pre dispatch value &Ok(Some(1200).into()), pre_info, ); - System::note_applied_extrinsic( - &Err(DispatchError::BadOrigin.with_weight(999)), - pre_info, - ); + System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.with_weight(999)), pre_info); assert_eq!( System::events(), vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess( - DispatchInfo { - weight: 300, - .. Default::default() - }, - ), + event: SysEvent::ExtrinsicSuccess(DispatchInfo { + weight: 300, + ..Default::default() + },) + .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: SysEvent::ExtrinsicSuccess( - DispatchInfo { - weight: 1000, - .. Default::default() - }, - ), + event: SysEvent::ExtrinsicSuccess(DispatchInfo { + weight: 1000, + ..Default::default() + },) + .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: SysEvent::ExtrinsicSuccess( - DispatchInfo { - weight: 1000, - .. Default::default() - }, - ), + event: SysEvent::ExtrinsicSuccess(DispatchInfo { + weight: 1000, + ..Default::default() + },) + .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(3), event: SysEvent::ExtrinsicFailed( DispatchError::BadOrigin.into(), - DispatchInfo { - weight: 999, - .. Default::default() - }, - ), + DispatchInfo { weight: 999, ..Default::default() }, + ) + .into(), topics: vec![] }, ] @@ -215,25 +278,15 @@ fn deposit_event_topics() { new_test_ext().execute_with(|| { const BLOCK_NUMBER: u64 = 1; - System::initialize( - &BLOCK_NUMBER, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&BLOCK_NUMBER, &[0u8; 32].into(), &Default::default(), InitKind::Full); System::note_finished_extrinsics(); - let topics = vec![ - H256::repeat_byte(1), - H256::repeat_byte(2), - H256::repeat_byte(3), - ]; + let topics = vec![H256::repeat_byte(1), H256::repeat_byte(2), H256::repeat_byte(3)]; // We deposit a few events with different sets of topics. - System::deposit_event_indexed(&topics[0..3], SysEvent::NewAccount(1)); - System::deposit_event_indexed(&topics[0..1], SysEvent::NewAccount(2)); - System::deposit_event_indexed(&topics[1..2], SysEvent::NewAccount(3)); + System::deposit_event_indexed(&topics[0..3], SysEvent::NewAccount(1).into()); + System::deposit_event_indexed(&topics[0..1], SysEvent::NewAccount(2).into()); + System::deposit_event_indexed(&topics[1..2], SysEvent::NewAccount(3).into()); System::finalize(); @@ -243,17 +296,17 @@ fn deposit_event_topics() { vec![ EventRecord { phase: Phase::Finalization, - event: SysEvent::NewAccount(1), + event: SysEvent::NewAccount(1).into(), topics: topics[0..3].to_vec(), }, EventRecord { phase: Phase::Finalization, - event: SysEvent::NewAccount(2), + event: SysEvent::NewAccount(2).into(), topics: topics[0..1].to_vec(), }, EventRecord { phase: Phase::Finalization, - event: SysEvent::NewAccount(3), + event: SysEvent::NewAccount(3).into(), topics: topics[1..2].to_vec(), } ] @@ -261,18 +314,20 @@ fn deposit_event_topics() { // Check that the topic-events mapping reflects the deposited topics. // Note that these are indexes of the events. - assert_eq!( - System::event_topics(&topics[0]), - vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 1)], - ); - assert_eq!( - System::event_topics(&topics[1]), - vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 2)], - ); - assert_eq!( - System::event_topics(&topics[2]), - vec![(BLOCK_NUMBER, 0)], - ); + assert_eq!(System::event_topics(&topics[0]), vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 1)]); + assert_eq!(System::event_topics(&topics[1]), vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 2)]); + assert_eq!(System::event_topics(&topics[2]), vec![(BLOCK_NUMBER, 0)]); + }); +} + +#[test] +fn event_util_functions_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + System::deposit_event(SysEvent::CodeUpdated); + + System::assert_has_event(SysEvent::CodeUpdated.into()); + System::assert_last_event(SysEvent::CodeUpdated.into()); }); } @@ -281,48 +336,32 @@ fn prunes_block_hash_mappings() { new_test_ext().execute_with(|| { // simulate import of 15 blocks for n in 1..=15 { - System::initialize( - &n, - &[n as u8 - 1; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&n, &[n as u8 - 1; 32].into(), &Default::default(), InitKind::Full); System::finalize(); } // first 5 block hashes are pruned for n in 0..5 { - assert_eq!( - System::block_hash(n), - H256::zero(), - ); + assert_eq!(System::block_hash(n), H256::zero()); } // the remaining 10 are kept for n in 5..15 { - assert_eq!( - System::block_hash(n), - [n as u8; 32].into(), - ); + assert_eq!(System::block_hash(n), [n as u8; 32].into()); } }) } #[test] fn set_code_checks_works() { - struct CallInWasm(Vec); + struct ReadRuntimeVersion(Vec); - impl sp_core::traits::CallInWasm for CallInWasm { - fn call_in_wasm( + impl sp_core::traits::ReadRuntimeVersion for ReadRuntimeVersion { + fn read_runtime_version( &self, - _: &[u8], - _: Option>, - _: &str, - _: &[u8], - _: &mut dyn sp_externalities::Externalities, - _: sp_core::traits::MissingHostFunctions, + _wasm_code: &[u8], + _ext: &mut dyn sp_externalities::Externalities, ) -> Result, String> { Ok(self.0.clone()) } @@ -332,7 +371,7 @@ fn set_code_checks_works() { ("test", 1, 2, Err(Error::::SpecVersionNeedsToIncrease)), ("test", 1, 1, Err(Error::::SpecVersionNeedsToIncrease)), ("test2", 1, 1, Err(Error::::InvalidSpecName)), - ("test", 2, 1, Ok(())), + ("test", 2, 1, Ok(PostDispatchInfo::default())), ("test", 0, 1, Err(Error::::SpecVersionNeedsToIncrease)), ("test", 1, 0, Err(Error::::SpecVersionNeedsToIncrease)), ]; @@ -344,38 +383,49 @@ fn set_code_checks_works() { impl_version, ..Default::default() }; - let call_in_wasm = CallInWasm(version.encode()); + let read_runtime_version = ReadRuntimeVersion(version.encode()); let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::CallInWasmExt::new(call_in_wasm)); + ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(read_runtime_version)); ext.execute_with(|| { - let res = System::set_code( - RawOrigin::Root.into(), - vec![1, 2, 3, 4], - ); + let res = System::set_code(RawOrigin::Root.into(), vec![1, 2, 3, 4]); - assert_eq!(expected.map_err(DispatchError::from), res); + assert_runtime_updated_digest(if res.is_ok() { 1 } else { 0 }); + assert_eq!(expected.map_err(DispatchErrorWithPostInfo::from), res); }); } } +fn assert_runtime_updated_digest(num: usize) { + assert_eq!( + System::digest() + .logs + .into_iter() + .filter(|item| *item == generic::DigestItem::RuntimeEnvironmentUpdated) + .count(), + num, + "Incorrect number of Runtime Updated digest items", + ); +} + #[test] fn set_code_with_real_wasm_blob() { let executor = substrate_test_runtime_client::new_native_executor(); let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); + ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); ext.execute_with(|| { System::set_block_number(1); System::set_code( RawOrigin::Root.into(), substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec(), - ).unwrap(); + ) + .unwrap(); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, - event: SysEvent::CodeUpdated, + event: SysEvent::CodeUpdated.into(), topics: vec![], }], ); @@ -386,15 +436,16 @@ fn set_code_with_real_wasm_blob() { fn runtime_upgraded_with_set_storage() { let executor = substrate_test_runtime_client::new_native_executor(); let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); + ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); ext.execute_with(|| { System::set_storage( RawOrigin::Root.into(), vec![( well_known_keys::CODE.to_vec(), - substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec() + substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec(), )], - ).unwrap(); + ) + .unwrap(); }); } @@ -403,11 +454,12 @@ fn events_not_emitted_during_genesis() { new_test_ext().execute_with(|| { // Block Number is zero at genesis assert!(System::block_number().is_zero()); - System::on_created_account(Default::default()); + let mut account_data = AccountInfo::default(); + System::on_created_account(Default::default(), &mut account_data); assert!(System::events().is_empty()); // Events will be emitted starting on block 1 System::set_block_number(1); - System::on_created_account(Default::default()); + System::on_created_account(Default::default(), &mut account_data); assert!(System::events().len() == 1); }); } @@ -420,5 +472,31 @@ fn ensure_one_of_works() { assert_eq!(ensure_root_or_signed(RawOrigin::Root).unwrap(), Either::Left(())); assert_eq!(ensure_root_or_signed(RawOrigin::Signed(0)).unwrap(), Either::Right(0)); - assert!(ensure_root_or_signed(RawOrigin::None).is_err()) + assert!(ensure_root_or_signed(RawOrigin::None).is_err()); +} + +#[test] +fn extrinsics_root_is_calculated_correctly() { + new_test_ext().execute_with(|| { + System::initialize(&1, &[0u8; 32].into(), &Default::default(), InitKind::Full); + System::note_finished_initialize(); + System::note_extrinsic(vec![1]); + System::note_applied_extrinsic(&Ok(().into()), Default::default()); + System::note_extrinsic(vec![2]); + System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.into()), Default::default()); + System::note_finished_extrinsics(); + let header = System::finalize(); + + let ext_root = extrinsics_data_root::(vec![vec![1], vec![2]]); + assert_eq!(ext_root, *header.extrinsics_root()); + }); +} + +#[test] +fn runtime_updated_digest_emitted_when_heap_pages_changed() { + new_test_ext().execute_with(|| { + System::initialize(&1, &[0u8; 32].into(), &Default::default(), InitKind::Full); + System::set_heap_pages(RawOrigin::Root.into(), 5).unwrap(); + assert_runtime_updated_digest(1); + }); } diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 93295093c4fb8..281d26375c81b 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -1,13 +1,13 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,62 +15,136 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; -use frame_support::weights::{Weight, DispatchClass}; -use sp_runtime::RuntimeDebug; +//! Autogenerated weights for frame_system +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 -/// An object to track the currently used extrinsic weight in a block. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] -pub struct ExtrinsicsWeight { - normal: Weight, - operational: Weight, +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=frame_system +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/system/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for frame_system. +pub trait WeightInfo { + fn remark(b: u32, ) -> Weight; + fn remark_with_event(b: u32, ) -> Weight; + fn set_heap_pages() -> Weight; + fn set_changes_trie_config() -> Weight; + fn set_storage(i: u32, ) -> Weight; + fn kill_storage(i: u32, ) -> Weight; + fn kill_prefix(p: u32, ) -> Weight; } -impl ExtrinsicsWeight { - /// Returns the total weight consumed by all extrinsics in the block. - pub fn total(&self) -> Weight { - self.normal.saturating_add(self.operational) +/// Weights for frame_system using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn remark(b: u32, ) -> Weight { + (574_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) } - - /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. - pub fn add(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_add(weight); + fn remark_with_event(b: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) } - - /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would - /// occur. - pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { - let value = self.get_mut(class); - *value = value.checked_add(weight).ok_or(())?; - Ok(()) + // Storage: unknown [0x3a686561707061676573] (r:0 w:1) + fn set_heap_pages() -> Weight { + (1_891_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - - /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of - /// `Weight`. - pub fn sub(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_sub(weight); + // Storage: System Digest (r:1 w:1) + // Storage: unknown [0x3a6368616e6765735f74726965] (r:0 w:1) + fn set_changes_trie_config() -> Weight { + (7_370_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } - - /// Get the current weight of a specific dispatch class. - pub fn get(&self, class: DispatchClass) -> Weight { - match class { - DispatchClass::Operational => self.operational, - DispatchClass::Normal | DispatchClass::Mandatory => self.normal, - } + // Storage: Skipped Metadata (r:0 w:0) + fn set_storage(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((848_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } - - /// Get a mutable reference to the current weight of a specific dispatch class. - fn get_mut(&mut self, class: DispatchClass) -> &mut Weight { - match class { - DispatchClass::Operational => &mut self.operational, - DispatchClass::Normal | DispatchClass::Mandatory => &mut self.normal, - } + // Storage: Skipped Metadata (r:0 w:0) + fn kill_storage(i: u32, ) -> Weight { + (308_000 as Weight) + // Standard Error: 0 + .saturating_add((559_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } + // Storage: Skipped Metadata (r:0 w:0) + fn kill_prefix(p: u32, ) -> Weight { + (7_616_000 as Weight) + // Standard Error: 1_000 + .saturating_add((783_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } +} - /// Set the weight of a specific dispatch class. - pub fn put(&mut self, new: Weight, class: DispatchClass) { - *self.get_mut(class) = new; +// For backwards compatibility and tests +impl WeightInfo for () { + fn remark(b: u32, ) -> Weight { + (574_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) + } + fn remark_with_event(b: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + } + // Storage: unknown [0x3a686561707061676573] (r:0 w:1) + fn set_heap_pages() -> Weight { + (1_891_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: System Digest (r:1 w:1) + // Storage: unknown [0x3a6368616e6765735f74726965] (r:0 w:1) + fn set_changes_trie_config() -> Weight { + (7_370_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Skipped Metadata (r:0 w:0) + fn set_storage(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((848_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + // Storage: Skipped Metadata (r:0 w:0) + fn kill_storage(i: u32, ) -> Weight { + (308_000 as Weight) + // Standard Error: 0 + .saturating_add((559_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + // Storage: Skipped Metadata (r:0 w:0) + fn kill_prefix(p: u32, ) -> Weight { + (7_616_000 as Weight) + // Standard Error: 1_000 + .saturating_add((783_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } } diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 5a99c5d02c5af..1c95c4782b5c4 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-timestamp" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,33 +15,35 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } -impl-trait-for-tuples = "0.1.3" +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "max-encoded-len"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io", optional = true } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../primitives/timestamp" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io ={ version = "4.0.0-dev", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } [features] default = ["std"] std = [ "sp-inherents/std", "codec/std", + "scale-info/std", "sp-std/std", "sp-runtime/std", "frame-benchmarking/std", "frame-support/std", - "serde", "frame-system/std", - "sp-timestamp/std" + "sp-timestamp/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking", "sp-io"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/timestamp/README.md b/frame/timestamp/README.md index 5610caca4da51..5f8388b04f829 100644 --- a/frame/timestamp/README.md +++ b/frame/timestamp/README.md @@ -2,9 +2,9 @@ The Timestamp module provides functionality to get and set the on-chain time. -- [`timestamp::Trait`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/enum.Call.html) -- [`Module`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/struct.Module.html) +- [`timestamp::Config`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/pallet/trait.Config.html) +- [`Call`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/pallet/enum.Call.html) +- [`Pallet`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/pallet/struct.Pallet.html) ## Overview @@ -29,7 +29,7 @@ because of cumulative calculation errors and hence should be avoided. * `get` - Gets the current time for the current block. If this function is called prior to setting the timestamp, it will return the timestamp of the previous block. -### Trait Getters +### Config Getters * `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. @@ -48,10 +48,10 @@ trait from the timestamp trait. use frame_support::{decl_module, dispatch}; use frame_system::ensure_signed; -pub trait Trait: timestamp::Trait {} +pub trait Config: timestamp::Config {} decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { #[weight = 0] pub fn get_time(origin) -> dispatch::DispatchResult { let _sender = ensure_signed(origin)?; @@ -69,6 +69,6 @@ the Timestamp module for session management. ## Related Modules -* [Session](https://docs.rs/pallet-timestamppallet-session/latest/pallet_session/) +* [Session](https://docs.rs/pallet-session/latest/pallet_session/) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index a0700179a9336..97ddd4cddd63f 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,26 +20,24 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use sp_std::prelude::*; -use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, TrackedStorageKey}; use frame_support::{ensure, traits::OnFinalize}; -use frame_benchmarking::{benchmarks, TrackedStorageKey}; +use frame_system::RawOrigin; -use crate::Module as Timestamp; +use crate::Pallet as Timestamp; const MAX_TIME: u32 = 100; benchmarks! { - _ { } - set { let t = MAX_TIME; // Ignore write to `DidUpdate` since it transient. - let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + let did_update_key = crate::DidUpdate::::hashed_key().to_vec(); frame_benchmarking::benchmarking::add_to_whitelist(TrackedStorageKey { key: did_update_key, - has_been_read: false, - has_been_written: true, + reads: 0, + writes: 1, + whitelisted: false, }); }: _(RawOrigin::None, t.into()) verify { @@ -49,27 +47,14 @@ benchmarks! { on_finalize { let t = MAX_TIME; Timestamp::::set(RawOrigin::None.into(), t.into())?; - ensure!(DidUpdate::exists(), "Time was not set."); + ensure!(DidUpdate::::exists(), "Time was not set."); // Ignore read/write to `DidUpdate` since it is transient. - let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + let did_update_key = crate::DidUpdate::::hashed_key().to_vec(); frame_benchmarking::benchmarking::add_to_whitelist(did_update_key.into()); }: { Timestamp::::on_finalize(t.into()); } verify { - ensure!(!DidUpdate::exists(), "Time was not removed."); + ensure!(!DidUpdate::::exists(), "Time was not removed."); } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set::()); - assert_ok!(test_benchmark_on_finalize::()); - }); - } -} +impl_benchmark_test_suite!(Timestamp, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index f2a74d36e0231..153606bedbacf 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,25 +15,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Timestamp Module +//! # Timestamp Pallet //! -//! The Timestamp module provides functionality to get and set the on-chain time. +//! The Timestamp pallet provides functionality to get and set the on-chain time. //! -//! - [`timestamp::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Overview //! -//! The Timestamp module allows the validators to set and validate a timestamp with each block. +//! The Timestamp pallet allows the validators to set and validate a timestamp with each block. //! -//! It uses inherents for timestamp data, which is provided by the block author and validated/verified -//! by other validators. The timestamp can be set only once per block and must be set each block. -//! There could be a constraint on how much time must pass before setting the new timestamp. +//! It uses inherents for timestamp data, which is provided by the block author and +//! validated/verified by other validators. The timestamp can be set only once per block and must be +//! set each block. There could be a constraint on how much time must pass before setting the new +//! timestamp. //! -//! **NOTE:** The Timestamp module is the recommended way to query the on-chain time instead of using -//! an approach based on block numbers. The block number based time measurement can cause issues -//! because of cumulative calculation errors and hence should be avoided. +//! **NOTE:** The Timestamp pallet is the recommended way to query the on-chain time instead of +//! using an approach based on block numbers. The block number based time measurement can cause +//! issues because of cumulative calculation errors and hence should be avoided. //! //! ## Interface //! @@ -46,17 +47,18 @@ //! * `get` - Gets the current time for the current block. If this function is called prior to //! setting the timestamp, it will return the timestamp of the previous block. //! -//! ### Trait Getters +//! ### Config Getters //! //! * `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. //! //! ## Usage //! -//! The following example shows how to use the Timestamp module in your custom module to query the current timestamp. +//! The following example shows how to use the Timestamp pallet in your custom pallet to query the +//! current timestamp. //! //! ### Prerequisites //! -//! Import the Timestamp module into your custom module and derive the module configuration +//! Import the Timestamp pallet into your custom pallet and derive the pallet configuration //! trait from the timestamp trait. //! //! ### Get current timestamp @@ -66,10 +68,10 @@ //! # use pallet_timestamp as timestamp; //! use frame_system::ensure_signed; //! -//! pub trait Trait: timestamp::Trait {} +//! pub trait Config: timestamp::Config {} //! //! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! pub struct Module for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn get_time(origin) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; @@ -83,71 +85,92 @@ //! //! ### Example from the FRAME //! -//! The [Session module](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses -//! the Timestamp module for session management. +//! The [Session pallet](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses +//! the Timestamp pallet for session management. //! -//! ## Related Modules +//! ## Related Pallets //! //! * [Session](../pallet_session/index.html) #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; -mod default_weights; - -use sp_std::{result, cmp}; -use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier}; -#[cfg(feature = "std")] -use frame_support::debug; -use frame_support::{ - Parameter, decl_storage, decl_module, - traits::{Time, UnixTime, Get}, - weights::{DispatchClass, Weight}, -}; -use sp_runtime::{ - RuntimeString, - traits::{ - AtLeast32Bit, Zero, SaturatedConversion, Scale, +pub mod weights; + +use frame_support::traits::{OnTimestampSet, Time, UnixTime}; +use sp_runtime::traits::{AtLeast32Bit, SaturatedConversion, Scale, Zero}; +use sp_std::{cmp, result}; +use sp_timestamp::{InherentError, InherentType, INHERENT_IDENTIFIER}; +pub use weights::WeightInfo; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The pallet configuration trait + #[pallet::config] + pub trait Config: frame_system::Config { + /// Type used for expressing timestamp. + type Moment: Parameter + + Default + + AtLeast32Bit + + Scale + + Copy + + MaxEncodedLen + + scale_info::StaticTypeInfo; + + /// Something which can be notified when the timestamp is set. Set this to `()` if not + /// needed. + type OnTimestampSet: OnTimestampSet; + + /// The minimum period between blocks. Beware that this is different to the *expected* + /// period that the block production apparatus provides. Your chosen consensus system will + /// generally work with this to determine a sensible block time. e.g. For Aura, it will be + /// double this period on default settings. + #[pallet::constant] + type MinimumPeriod: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } -}; -use frame_system::ensure_none; -use sp_timestamp::{ - InherentError, INHERENT_IDENTIFIER, InherentType, - OnTimestampSet, -}; - -pub trait WeightInfo { - fn set() -> Weight; - fn on_finalize() -> Weight; -} -/// The module configuration trait -pub trait Trait: frame_system::Trait { - /// Type used for expressing timestamp. - type Moment: Parameter + Default + AtLeast32Bit - + Scale + Copy; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(PhantomData); - /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. - type OnTimestampSet: OnTimestampSet; + /// Current time for the current block. + #[pallet::storage] + #[pallet::getter(fn now)] + pub type Now = StorageValue<_, T::Moment, ValueQuery>; - /// The minimum period between blocks. Beware that this is different to the *expected* period - /// that the block production apparatus provides. Your chosen consensus system will generally - /// work with this to determine a sensible block time. e.g. For Aura, it will be double this - /// period on default settings. - type MinimumPeriod: Get; + /// Did the timestamp get updated in this block? + #[pallet::storage] + pub(super) type DidUpdate = StorageValue<_, bool, ValueQuery>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + #[pallet::hooks] + impl Hooks> for Pallet { + /// dummy `on_initialize` to return the weight used in `on_finalize`. + fn on_initialize(_n: BlockNumberFor) -> Weight { + // weight of `on_finalize` + T::WeightInfo::on_finalize() + } -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// The minimum period between blocks. Beware that this is different to the *expected* period - /// that the block production apparatus provides. Your chosen consensus system will generally - /// work with this to determine a sensible block time. e.g. For Aura, it will be double this - /// period on default settings. - const MinimumPeriod: T::Moment = T::MinimumPeriod::get(); + /// # + /// - `O(1)` + /// - 1 storage deletion (codec `O(1)`). + /// # + fn on_finalize(_n: BlockNumberFor) { + assert!(DidUpdate::::take(), "Timestamp must be updated once in the block"); + } + } + #[pallet::call] + impl Pallet { /// Set the current time. /// /// This call should be invoked exactly once per block. It will panic at the finalization @@ -160,54 +183,82 @@ decl_module! { /// /// # /// - `O(1)` (Note that implementations of `OnTimestampSet` must also be `O(1)`) - /// - 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in `on_finalize`) + /// - 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in + /// `on_finalize`) /// - 1 event handler `on_timestamp_set`. Must be `O(1)`. /// # - #[weight = ( + #[pallet::weight(( T::WeightInfo::set(), DispatchClass::Mandatory - )] - fn set(origin, #[compact] now: T::Moment) { + ))] + pub fn set(origin: OriginFor, #[pallet::compact] now: T::Moment) -> DispatchResult { ensure_none(origin)?; - assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); + assert!(!DidUpdate::::exists(), "Timestamp must be updated only once in the block"); let prev = Self::now(); assert!( prev.is_zero() || now >= prev + T::MinimumPeriod::get(), "Timestamp must increment by at least between sequential blocks" ); - ::Now::put(now); - ::DidUpdate::put(true); + Now::::put(now); + DidUpdate::::put(true); >::on_timestamp_set(now); - } - /// dummy `on_initialize` to return the weight used in `on_finalize`. - fn on_initialize() -> Weight { - // weight of `on_finalize` - T::WeightInfo::on_finalize() + Ok(()) } + } - /// # - /// - `O(1)` - /// - 1 storage deletion (codec `O(1)`). - /// # - fn on_finalize() { - assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let inherent_data = data + .get_data::(&INHERENT_IDENTIFIER) + .expect("Timestamp inherent data not correctly encoded") + .expect("Timestamp inherent data must be provided"); + let data = (*inherent_data).saturated_into::(); + + let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); + Some(Call::set { now: next_time.into() }) } - } -} -decl_storage! { - trait Store for Module as Timestamp { - /// Current time for the current block. - pub Now get(fn now) build(|_| 0.into()): T::Moment; + fn check_inherent( + call: &Self::Call, + data: &InherentData, + ) -> result::Result<(), Self::Error> { + const MAX_TIMESTAMP_DRIFT_MILLIS: sp_timestamp::Timestamp = + sp_timestamp::Timestamp::new(30 * 1000); + + let t: u64 = match call { + Call::set { ref now } => now.clone().saturated_into::(), + _ => return Ok(()), + }; + + let data = data + .get_data::(&INHERENT_IDENTIFIER) + .expect("Timestamp inherent data not correctly encoded") + .expect("Timestamp inherent data must be provided"); + + let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); + if t > *(data + MAX_TIMESTAMP_DRIFT_MILLIS) { + Err(InherentError::TooFarInFuture) + } else if t < minimum { + Err(InherentError::ValidAtTimestamp(minimum.into())) + } else { + Ok(()) + } + } - /// Did the timestamp get updated in this block? - DidUpdate: bool; + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::set { .. }) + } } } -impl Module { +impl Pallet { /// Get the current time for the current block. /// /// NOTE: if this function is called prior to setting the timestamp, @@ -217,54 +268,13 @@ impl Module { } /// Set the timestamp to something in particular. Only used for tests. - #[cfg(feature = "std")] + #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] pub fn set_timestamp(now: T::Moment) { - ::Now::put(now); + Now::::put(now); } } -fn extract_inherent_data(data: &InherentData) -> Result { - data.get_data::(&INHERENT_IDENTIFIER) - .map_err(|_| RuntimeString::from("Invalid timestamp inherent data encoding."))? - .ok_or_else(|| "Timestamp inherent data is not provided.".into()) -} - -impl ProvideInherent for Module { - type Call = Call; - type Error = InherentError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - let data: T::Moment = extract_inherent_data(data) - .expect("Gets and decodes timestamp inherent data") - .saturated_into(); - - let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); - Some(Call::set(next_time.into())) - } - - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - const MAX_TIMESTAMP_DRIFT_MILLIS: u64 = 30 * 1000; - - let t: u64 = match call { - Call::set(ref t) => t.clone().saturated_into::(), - _ => return Ok(()), - }; - - let data = extract_inherent_data(data).map_err(|e| InherentError::Other(e))?; - - let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); - if t > data + MAX_TIMESTAMP_DRIFT_MILLIS { - Err(InherentError::Other("Timestamp too far in future to accept".into())) - } else if t < minimum { - Err(InherentError::ValidAtTimestamp(minimum)) - } else { - Ok(()) - } - } -} - -impl Time for Module { +impl Time for Pallet { type Moment = T::Moment; /// Before the first set of now with inherent the value returned is zero. @@ -276,15 +286,16 @@ impl Time for Module { /// Before the timestamp inherent is applied, it returns the time of previous block. /// /// On genesis the time returned is not valid. -impl UnixTime for Module { +impl UnixTime for Pallet { fn now() -> core::time::Duration { // now is duration since unix epoch in millisecond as documented in // `sp_timestamp::InherentDataProvider`. let now = Self::now(); sp_std::if_std! { if now == T::Moment::zero() { - debug::error!( - "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0" + log::error!( + target: "runtime::timestamp", + "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0", ); } } @@ -295,66 +306,74 @@ impl UnixTime for Module { #[cfg(test)] mod tests { use super::*; + use crate as pallet_timestamp; - use frame_support::{impl_outer_origin, assert_ok, parameter_types, weights::Weight}; - use sp_io::TestExternalities; + use frame_support::{assert_ok, parameter_types}; use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + use sp_io::TestExternalities; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + }; pub fn new_test_ext() -> TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); TestExternalities::new(t) } - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + } + ); - #[derive(Clone, Eq, PartialEq)] - pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { - type BaseCallFilter = (); + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const MinimumPeriod: u64 = 5; } - impl Trait for Test { + impl Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } - type Timestamp = Module; #[test] fn timestamp_works() { @@ -376,7 +395,9 @@ mod tests { } #[test] - #[should_panic(expected = "Timestamp must increment by at least between sequential blocks")] + #[should_panic( + expected = "Timestamp must increment by at least between sequential blocks" + )] fn block_period_minimum_enforced() { new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs new file mode 100644 index 0000000000000..b4e7370ee7616 --- /dev/null +++ b/frame/timestamp/src/weights.rs @@ -0,0 +1,79 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_timestamp +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_timestamp +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/timestamp/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_timestamp. +pub trait WeightInfo { + fn set() -> Weight; + fn on_finalize() -> Weight; +} + +/// Weights for pallet_timestamp using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Timestamp Now (r:1 w:1) + // Storage: Babe CurrentSlot (r:1 w:0) + fn set() -> Weight { + (10_391_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn on_finalize() -> Weight { + (4_843_000 as Weight) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Timestamp Now (r:1 w:1) + // Storage: Babe CurrentSlot (r:1 w:0) + fn set() -> Weight { + (10_391_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn on_finalize() -> Weight { + (4_843_000 as Weight) + } +} diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml new file mode 100644 index 0000000000000..8ca395e1c5416 --- /dev/null +++ b/frame/tips/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "pallet-tips" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to manage tips" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +log = { version = "0.4.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.126", features = ["derive"], optional = true } + +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../treasury" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +sp-storage = { version = "4.0.0-dev", path = "../../primitives/storage" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } + +[features] +default = ["std"] +std = [ + "codec/std", + "log/std", + "scale-info/std", + "serde", + + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "frame-support/std", + "frame-system/std", + "pallet-treasury/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/tips/README.md b/frame/tips/README.md new file mode 100644 index 0000000000000..d885ce770f795 --- /dev/null +++ b/frame/tips/README.md @@ -0,0 +1,33 @@ +# Tipping Pallet ( pallet-tips ) + +**Note :: This pallet is tightly coupled to pallet-treasury** + +A subsystem to allow for an agile "tipping" process, whereby a reward may be given without first +having a pre-determined stakeholder group come to consensus on how much should be paid. + +A group of `Tippers` is determined through the config `Config`. After half of these have declared +some amount that they believe a particular reported reason deserves, then a countdown period is +entered where any remaining members can declare their tip amounts also. After the close of the +countdown period, the median of all declared tips is paid to the reported beneficiary, along with +any finders fee, in case of a public (and bonded) original report. + +### Terminology + +- **Tipping:** The process of gathering declarations of amounts to tip and taking the median amount + to be transferred from the treasury to a beneficiary account. +- **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a particular + individual (identified by an account ID) is worthy of a recognition by the treasury. +- **Finder:** The original public reporter of some reason for tipping. +- **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, + rather than the main beneficiary. + +## Interface + +### Dispatchable Functions + +- `report_awesome` - Report something worthy of a tip and register for a finders fee. +- `retract_tip` - Retract a previous (finders fee registered) report. +- `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. +- `tip` - Declare or redeclare an amount to tip for a particular reason. +- `close_tip` - Close and pay out a tip. +- `slash_tip` - Remove and slash an already-open tip. \ No newline at end of file diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs new file mode 100644 index 0000000000000..5e08121855210 --- /dev/null +++ b/frame/tips/src/benchmarking.rs @@ -0,0 +1,195 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Treasury tips benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::ensure; +use frame_system::RawOrigin; +use sp_runtime::traits::Saturating; + +use super::*; +use crate::Pallet as TipsMod; + +const SEED: u32 = 0; + +// Create the pre-requisite information needed to create a `report_awesome`. +fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) { + let caller = whitelisted_caller(); + let value = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * length.into() + + T::Currency::minimum_balance(); + let _ = T::Currency::make_free_balance_be(&caller, value); + let reason = vec![0; length as usize]; + let awesome_person = account("awesome", 0, SEED); + (caller, reason, awesome_person) +} + +// Create the pre-requisite information needed to call `tip_new`. +fn setup_tip( + r: u32, + t: u32, +) -> Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> { + let tippers_count = T::Tippers::count(); + + for i in 0..t { + let member = account("member", i, SEED); + T::Tippers::add(&member); + ensure!(T::Tippers::contains(&member), "failed to add tipper"); + } + + ensure!(T::Tippers::count() == tippers_count + t as usize, "problem creating tippers"); + let caller = account("member", t - 1, SEED); + let reason = vec![0; r as usize]; + let beneficiary = account("beneficiary", t, SEED); + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + Ok((caller, reason, beneficiary, value)) +} + +// Create `t` new tips for the tip proposal with `hash`. +// This function automatically makes the tip able to close. +fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> Result<(), &'static str> { + for i in 0..t { + let caller = account("member", i, SEED); + ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); + TipsMod::::tip(RawOrigin::Signed(caller).into(), hash, value)?; + } + Tips::::mutate(hash, |maybe_tip| { + if let Some(open_tip) = maybe_tip { + open_tip.closes = Some(T::BlockNumber::zero()); + } + }); + Ok(()) +} + +fn setup_pot_account() { + let pot_account = TipsMod::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); +} + +const MAX_BYTES: u32 = 16384; +const MAX_TIPPERS: u32 = 100; + +benchmarks! { + report_awesome { + let r in 0 .. MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), reason, awesome_person) + + retract_tip { + let r = MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + TipsMod::::report_awesome( + RawOrigin::Signed(caller.clone()).into(), + reason.clone(), + awesome_person.clone() + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), hash) + + tip_new { + let r in 0 .. MAX_BYTES; + let t in 1 .. MAX_TIPPERS; + + let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), reason, beneficiary, value) + + tip { + let t in 1 .. MAX_TIPPERS; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + TipsMod::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t - 1, hash.clone(), value)?; + let caller = account("member", t - 1, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), hash, value) + + close_tip { + let t in 1 .. MAX_TIPPERS; + + // Make sure pot is funded + setup_pot_account::(); + + // Set up a new tip proposal + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + TipsMod::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + + // Create a bunch of tips + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + + create_tips::(t, hash.clone(), value)?; + + let caller = account("caller", t, SEED); + // Whitelist caller account from further DB operations. + let caller_key = frame_system::Account::::hashed_key_for(&caller); + frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + }: _(RawOrigin::Signed(caller), hash) + + slash_tip { + let t in 1 .. MAX_TIPPERS; + + // Make sure pot is funded + setup_pot_account::(); + + // Set up a new tip proposal + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); + TipsMod::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + }: _(RawOrigin::Root, hash) +} + +impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs new file mode 100644 index 0000000000000..f4a4edb7b3999 --- /dev/null +++ b/frame/tips/src/lib.rs @@ -0,0 +1,599 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Tipping Pallet ( pallet-tips ) +//! +//! > NOTE: This pallet is tightly coupled with pallet-treasury. +//! +//! A subsystem to allow for an agile "tipping" process, whereby a reward may be given without first +//! having a pre-determined stakeholder group come to consensus on how much should be paid. +//! +//! A group of `Tippers` is determined through the config `Config`. After half of these have +//! declared some amount that they believe a particular reported reason deserves, then a countdown +//! period is entered where any remaining members can declare their tip amounts also. After the +//! close of the countdown period, the median of all declared tips is paid to the reported +//! beneficiary, along with any finders fee, in case of a public (and bonded) original report. +//! +//! +//! ### Terminology +//! +//! Tipping protocol: +//! - **Tipping:** The process of gathering declarations of amounts to tip and taking the median +//! amount to be transferred from the treasury to a beneficiary account. +//! - **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a +//! particular individual (identified by an account ID) is worthy of a recognition by the +//! treasury. +//! - **Finder:** The original public reporter of some reason for tipping. +//! - **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, +//! rather than the main beneficiary. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! Tipping protocol: +//! - `report_awesome` - Report something worthy of a tip and register for a finders fee. +//! - `retract_tip` - Retract a previous (finders fee registered) report. +//! - `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. +//! - `tip` - Declare or redeclare an amount to tip for a particular reason. +//! - `close_tip` - Close and pay out a tip. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +mod tests; + +pub mod migrations; +pub mod weights; + +use sp_runtime::{ + traits::{AccountIdConversion, BadOrigin, Hash, Zero}, + Percent, RuntimeDebug, +}; +use sp_std::prelude::*; + +use codec::{Decode, Encode}; +use frame_support::{ + traits::{ + ContainsLengthBound, Currency, EnsureOrigin, ExistenceRequirement::KeepAlive, Get, + OnUnbalanced, ReservableCurrency, SortedMembers, StorageVersion, + }, + Parameter, +}; + +pub use pallet::*; +pub use weights::WeightInfo; + +pub type BalanceOf = pallet_treasury::BalanceOf; +pub type NegativeImbalanceOf = pallet_treasury::NegativeImbalanceOf; + +/// An open tipping "motion". Retains all details of a tip including information on the finder +/// and the members who have voted. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] +pub struct OpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, +> { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded + /// string. A URL would be sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip. + finder: AccountId, + /// The amount held on deposit for this tip. + deposit: Balance, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + /// Whether this tip should result in the finder taking a fee. + finders_fee: bool, +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_treasury::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// Maximum acceptable reason length. + #[pallet::constant] + type MaximumReasonLength: Get; + + /// The amount held on deposit per byte within the tip report reason or bounty description. + #[pallet::constant] + type DataDepositPerByte: Get>; + + /// The period for which a tip remains open after is has achieved threshold tippers. + #[pallet::constant] + type TipCountdown: Get; + + /// The percent of the final tip which goes to the original reporter of the tip. + #[pallet::constant] + type TipFindersFee: Get; + + /// The amount held on deposit for placing a tip report. + #[pallet::constant] + type TipReportDepositBase: Get>; + + /// Origin from which tippers must come. + /// + /// `ContainsLengthBound::max_len` must be cost free (i.e. no storage read or heavy + /// operation). + type Tippers: SortedMembers + ContainsLengthBound; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + /// TipsMap that are not yet completed. Keyed by the hash of `(reason, who)` from the value. + /// This has the insecure enumerable hash function since the key itself is already + /// guaranteed to be a secure hash. + #[pallet::storage] + #[pallet::getter(fn tips)] + pub type Tips = StorageMap< + _, + Twox64Concat, + T::Hash, + OpenTip, T::BlockNumber, T::Hash>, + OptionQuery, + >; + + /// Simple preimage lookup from the reason's hash to the original data. Again, has an + /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. + #[pallet::storage] + #[pallet::getter(fn reasons)] + pub type Reasons = StorageMap<_, Identity, T::Hash, Vec, OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A new tip suggestion has been opened. \[tip_hash\] + NewTip(T::Hash), + /// A tip suggestion has reached threshold and is closing. \[tip_hash\] + TipClosing(T::Hash), + /// A tip suggestion has been closed. \[tip_hash, who, payout\] + TipClosed(T::Hash, T::AccountId, BalanceOf), + /// A tip suggestion has been retracted. \[tip_hash\] + TipRetracted(T::Hash), + /// A tip suggestion has been slashed. \[tip_hash, finder, deposit\] + TipSlashed(T::Hash, T::AccountId, BalanceOf), + } + + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { + /// The reason given is just too big. + ReasonTooBig, + /// The tip was already found/started. + AlreadyKnown, + /// The tip hash is unknown. + UnknownTip, + /// The account attempting to retract the tip is not the finder of the tip. + NotFinder, + /// The tip cannot be claimed/closed because there are not enough tippers yet. + StillOpen, + /// The tip cannot be claimed/closed because it's still in the countdown period. + Premature, + } + + #[pallet::call] + impl Pallet { + /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as + /// `DataDepositPerByte` for each byte in `reason`. + /// + /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be + /// a UTF-8-encoded URL. + /// - `who`: The account which should be credited for the tip. + /// + /// Emits `NewTip` if successful. + /// + /// # + /// - Complexity: `O(R)` where `R` length of `reason`. + /// - encoding and hashing of 'reason' + /// - DbReads: `Reasons`, `Tips` + /// - DbWrites: `Reasons`, `Tips` + /// # + #[pallet::weight(::WeightInfo::report_awesome(reason.len() as u32))] + pub fn report_awesome( + origin: OriginFor, + reason: Vec, + who: T::AccountId, + ) -> DispatchResult { + let finder = ensure_signed(origin)?; + + ensure!( + reason.len() <= T::MaximumReasonLength::get() as usize, + Error::::ReasonTooBig + ); + + let reason_hash = T::Hashing::hash(&reason[..]); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + let hash = T::Hashing::hash_of(&(&reason_hash, &who)); + ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); + + let deposit = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * (reason.len() as u32).into(); + T::Currency::reserve(&finder, deposit)?; + + Reasons::::insert(&reason_hash, &reason); + let tip = OpenTip { + reason: reason_hash, + who, + finder, + deposit, + closes: None, + tips: vec![], + finders_fee: true, + }; + Tips::::insert(&hash, tip); + Self::deposit_event(Event::NewTip(hash)); + Ok(()) + } + + /// Retract a prior tip-report from `report_awesome`, and cancel the process of tipping. + /// + /// If successful, the original deposit will be unreserved. + /// + /// The dispatch origin for this call must be _Signed_ and the tip identified by `hash` + /// must have been reported by the signing account through `report_awesome` (and not + /// through `tip_new`). + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. + /// + /// Emits `TipRetracted` if successful. + /// + /// # + /// - Complexity: `O(1)` + /// - Depends on the length of `T::Hash` which is fixed. + /// - DbReads: `Tips`, `origin account` + /// - DbWrites: `Reasons`, `Tips`, `origin account` + /// # + #[pallet::weight(::WeightInfo::retract_tip())] + pub fn retract_tip(origin: OriginFor, hash: T::Hash) -> DispatchResult { + let who = ensure_signed(origin)?; + let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; + ensure!(tip.finder == who, Error::::NotFinder); + + Reasons::::remove(&tip.reason); + Tips::::remove(&hash); + if !tip.deposit.is_zero() { + let err_amount = T::Currency::unreserve(&who, tip.deposit); + debug_assert!(err_amount.is_zero()); + } + Self::deposit_event(Event::TipRetracted(hash)); + Ok(()) + } + + /// Give a tip for something new; no finder's fee will be taken. + /// + /// The dispatch origin for this call must be _Signed_ and the signing account must be a + /// member of the `Tippers` set. + /// + /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be + /// a UTF-8-encoded URL. + /// - `who`: The account which should be credited for the tip. + /// - `tip_value`: The amount of tip that the sender would like to give. The median tip + /// value of active tippers will be given to the `who`. + /// + /// Emits `NewTip` if successful. + /// + /// # + /// - Complexity: `O(R + T)` where `R` length of `reason`, `T` is the number of tippers. + /// - `O(T)`: decoding `Tipper` vec of length `T`. `T` is charged as upper bound given by + /// `ContainsLengthBound`. The actual cost depends on the implementation of + /// `T::Tippers`. + /// - `O(R)`: hashing and encoding of reason of length `R` + /// - DbReads: `Tippers`, `Reasons` + /// - DbWrites: `Reasons`, `Tips` + /// # + #[pallet::weight(::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32))] + pub fn tip_new( + origin: OriginFor, + reason: Vec, + who: T::AccountId, + #[pallet::compact] tip_value: BalanceOf, + ) -> DispatchResult { + let tipper = ensure_signed(origin)?; + ensure!(T::Tippers::contains(&tipper), BadOrigin); + let reason_hash = T::Hashing::hash(&reason[..]); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + let hash = T::Hashing::hash_of(&(&reason_hash, &who)); + + Reasons::::insert(&reason_hash, &reason); + Self::deposit_event(Event::NewTip(hash.clone())); + let tips = vec![(tipper.clone(), tip_value)]; + let tip = OpenTip { + reason: reason_hash, + who, + finder: tipper, + deposit: Zero::zero(), + closes: None, + tips, + finders_fee: false, + }; + Tips::::insert(&hash, tip); + Ok(()) + } + + /// Declare a tip value for an already-open tip. + /// + /// The dispatch origin for this call must be _Signed_ and the signing account must be a + /// member of the `Tippers` set. + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the hash of the original tip `reason` and the beneficiary + /// account ID. + /// - `tip_value`: The amount of tip that the sender would like to give. The median tip + /// value of active tippers will be given to the `who`. + /// + /// Emits `TipClosing` if the threshold of tippers has been reached and the countdown period + /// has started. + /// + /// # + /// - Complexity: `O(T)` where `T` is the number of tippers. decoding `Tipper` vec of length + /// `T`, insert tip and check closing, `T` is charged as upper bound given by + /// `ContainsLengthBound`. The actual cost depends on the implementation of `T::Tippers`. + /// + /// Actually weight could be lower as it depends on how many tips are in `OpenTip` but it + /// is weighted as if almost full i.e of length `T-1`. + /// - DbReads: `Tippers`, `Tips` + /// - DbWrites: `Tips` + /// # + #[pallet::weight(::WeightInfo::tip(T::Tippers::max_len() as u32))] + pub fn tip( + origin: OriginFor, + hash: T::Hash, + #[pallet::compact] tip_value: BalanceOf, + ) -> DispatchResult { + let tipper = ensure_signed(origin)?; + ensure!(T::Tippers::contains(&tipper), BadOrigin); + + let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { + Self::deposit_event(Event::TipClosing(hash.clone())); + } + Tips::::insert(&hash, tip); + Ok(()) + } + + /// Close and payout a tip. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// The tip identified by `hash` must have finished its countdown period. + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. + /// + /// # + /// - Complexity: `O(T)` where `T` is the number of tippers. decoding `Tipper` vec of length + /// `T`. `T` is charged as upper bound given by `ContainsLengthBound`. The actual cost + /// depends on the implementation of `T::Tippers`. + /// - DbReads: `Tips`, `Tippers`, `tip finder` + /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` + /// # + #[pallet::weight(::WeightInfo::close_tip(T::Tippers::max_len() as u32))] + pub fn close_tip(origin: OriginFor, hash: T::Hash) -> DispatchResult { + ensure_signed(origin)?; + + let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; + ensure!(frame_system::Pallet::::block_number() >= *n, Error::::Premature); + // closed. + Reasons::::remove(&tip.reason); + Tips::::remove(hash); + Self::payout_tip(hash, tip); + Ok(()) + } + + /// Remove and slash an already-open tip. + /// + /// May only be called from `T::RejectOrigin`. + /// + /// As a result, the finder is slashed and the deposits are lost. + /// + /// Emits `TipSlashed` if successful. + /// + /// # + /// `T` is charged as upper bound given by `ContainsLengthBound`. + /// The actual cost depends on the implementation of `T::Tippers`. + /// # + #[pallet::weight(::WeightInfo::slash_tip(T::Tippers::max_len() as u32))] + pub fn slash_tip(origin: OriginFor, hash: T::Hash) -> DispatchResult { + T::RejectOrigin::ensure_origin(origin)?; + + let tip = Tips::::take(hash).ok_or(Error::::UnknownTip)?; + + if !tip.deposit.is_zero() { + let imbalance = T::Currency::slash_reserved(&tip.finder, tip.deposit).0; + T::OnSlash::on_unbalanced(imbalance); + } + Reasons::::remove(&tip.reason); + Self::deposit_event(Event::TipSlashed(hash, tip.finder, tip.deposit)); + Ok(()) + } + } +} + +impl Pallet { + // Add public immutables and private mutables. + + /// The account ID of the treasury pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + T::PalletId::get().into_account() + } + + /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it + /// closes, if so, then deposit the relevant event and set closing accordingly. + /// + /// `O(T)` and one storage access. + fn insert_tip_and_check_closing( + tip: &mut OpenTip, T::BlockNumber, T::Hash>, + tipper: T::AccountId, + tip_value: BalanceOf, + ) -> bool { + match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { + Ok(pos) => tip.tips[pos] = (tipper, tip_value), + Err(pos) => tip.tips.insert(pos, (tipper, tip_value)), + } + Self::retain_active_tips(&mut tip.tips); + let threshold = (T::Tippers::count() + 1) / 2; + if tip.tips.len() >= threshold && tip.closes.is_none() { + tip.closes = Some(frame_system::Pallet::::block_number() + T::TipCountdown::get()); + true + } else { + false + } + } + + /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. + fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { + let members = T::Tippers::sorted_members(); + let mut members_iter = members.iter(); + let mut member = members_iter.next(); + tips.retain(|(ref a, _)| loop { + match member { + None => break false, + Some(m) if m > a => break false, + Some(m) => { + member = members_iter.next(); + if m < a { + continue + } else { + break true + } + }, + } + }); + } + + /// Execute the payout of a tip. + /// + /// Up to three balance operations. + /// Plus `O(T)` (`T` is Tippers length). + fn payout_tip( + hash: T::Hash, + tip: OpenTip, T::BlockNumber, T::Hash>, + ) { + let mut tips = tip.tips; + Self::retain_active_tips(&mut tips); + tips.sort_by_key(|i| i.1); + + let treasury = Self::account_id(); + let max_payout = pallet_treasury::Pallet::::pot(); + + let mut payout = tips[tips.len() / 2].1.min(max_payout); + if !tip.deposit.is_zero() { + let err_amount = T::Currency::unreserve(&tip.finder, tip.deposit); + debug_assert!(err_amount.is_zero()); + } + + if tip.finders_fee && tip.finder != tip.who { + // pay out the finder's fee. + let finders_fee = T::TipFindersFee::get() * payout; + payout -= finders_fee; + // this should go through given we checked it's at most the free balance, but still + // we only make a best-effort. + let res = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); + debug_assert!(res.is_ok()); + } + + // same as above: best-effort only. + let res = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); + debug_assert!(res.is_ok()); + Self::deposit_event(Event::TipClosed(hash, tip.who, payout)); + } + + pub fn migrate_retract_tip_for_tip_new(module: &[u8], item: &[u8]) { + /// An open tipping "motion". Retains all details of a tip including information on the + /// finder and the members who have voted. + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] + pub struct OldOpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, + > { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 + /// encoded string. A URL would be sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip and the amount held on deposit. + finder: Option<(AccountId, Balance)>, + /// The block number at which this tip will close if `Some`. If `None`, then no closing + /// is scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + } + + use frame_support::{migration::storage_key_iter, Twox64Concat}; + + for (hash, old_tip) in storage_key_iter::< + T::Hash, + OldOpenTip, T::BlockNumber, T::Hash>, + Twox64Concat, + >(module, item) + .drain() + { + let (finder, deposit, finders_fee) = match old_tip.finder { + Some((finder, deposit)) => (finder, deposit, true), + None => (T::AccountId::default(), Zero::zero(), false), + }; + let new_tip = OpenTip { + reason: old_tip.reason, + who: old_tip.who, + finder, + deposit, + closes: old_tip.closes, + tips: old_tip.tips, + finders_fee, + }; + Tips::::insert(hash, new_tip) + } + } +} diff --git a/frame/tips/src/migrations/mod.rs b/frame/tips/src/migrations/mod.rs new file mode 100644 index 0000000000000..81139120da1c8 --- /dev/null +++ b/frame/tips/src/migrations/mod.rs @@ -0,0 +1,23 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Version 4. +/// +/// For backward compatability reasons, pallet-tips uses `Treasury` for storage module prefix +/// before calling this migration. After calling this migration, it will get replaced with +/// own storage identifier. +pub mod v4; diff --git a/frame/tips/src/migrations/v4.rs b/frame/tips/src/migrations/v4.rs new file mode 100644 index 0000000000000..69df1d08d2c8a --- /dev/null +++ b/frame/tips/src/migrations/v4.rs @@ -0,0 +1,195 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_io::hashing::twox_128; +use sp_std::str; + +use frame_support::{ + storage::StoragePrefixedMap, + traits::{ + Get, GetStorageVersion, PalletInfoAccess, StorageVersion, + STORAGE_VERSION_STORAGE_KEY_POSTFIX, + }, + weights::Weight, +}; + +use crate as pallet_tips; + +/// Migrate the entire storage of this pallet to a new prefix. +/// +/// This new prefix must be the same as the one set in construct_runtime. +/// For safety, use `PalletInfo` to get it, as: +/// `::PalletInfo::name::`. +/// +/// The migration will look into the storage version in order not to trigger a migration on an up +/// to date storage. Thus the on chain storage version must be less than 4 in order to trigger the +/// migration. +pub fn migrate>( + old_pallet_name: N, +) -> Weight { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name =

::name(); + + if new_pallet_name == old_pallet_name { + log::info!( + target: "runtime::tips", + "New pallet name is equal to the old prefix. No migration needs to be done.", + ); + return 0 + } + + let on_chain_storage_version =

::on_chain_storage_version(); + log::info!( + target: "runtime::tips", + "Running migration to v4 for tips with storage version {:?}", + on_chain_storage_version, + ); + + if on_chain_storage_version < 4 { + let storage_prefix = pallet_tips::Tips::::storage_prefix(); + frame_support::storage::migration::move_storage_from_pallet( + storage_prefix, + old_pallet_name.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", storage_prefix, old_pallet_name, new_pallet_name); + + let storage_prefix = pallet_tips::Reasons::::storage_prefix(); + frame_support::storage::migration::move_storage_from_pallet( + storage_prefix, + old_pallet_name.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", storage_prefix, old_pallet_name, new_pallet_name); + + StorageVersion::new(4).put::

(); + ::BlockWeights::get().max_block + } else { + log::warn!( + target: "runtime::tips", + "Attempted to apply migration to v4 but failed because storage version is {:?}", + on_chain_storage_version, + ); + 0 + } +} + +/// Some checks prior to migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn pre_migrate< + T: pallet_tips::Config, + P: GetStorageVersion + PalletInfoAccess, + N: AsRef, +>( + old_pallet_name: N, +) { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name =

::name(); + + let storage_prefix_tips = pallet_tips::Tips::::storage_prefix(); + let storage_prefix_reasons = pallet_tips::Reasons::::storage_prefix(); + + log_migration("pre-migration", storage_prefix_tips, old_pallet_name, new_pallet_name); + log_migration("pre-migration", storage_prefix_reasons, old_pallet_name, new_pallet_name); + + if new_pallet_name == old_pallet_name { + return + } + + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + let storage_version_key = twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX); + + let mut new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + new_pallet_prefix.to_vec(), + new_pallet_prefix.to_vec(), + |key| Ok(key.to_vec()), + ); + + // Ensure nothing except the storage_version_key is stored in the new prefix. + assert!(new_pallet_prefix_iter.all(|key| key == storage_version_key)); + + assert!(

::on_chain_storage_version() < 4); +} + +/// Some checks for after migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn post_migrate< + T: pallet_tips::Config, + P: GetStorageVersion + PalletInfoAccess, + N: AsRef, +>( + old_pallet_name: N, +) { + let old_pallet_name = old_pallet_name.as_ref(); + let new_pallet_name =

::name(); + + let storage_prefix_tips = pallet_tips::Tips::::storage_prefix(); + let storage_prefix_reasons = pallet_tips::Reasons::::storage_prefix(); + + log_migration("post-migration", storage_prefix_tips, old_pallet_name, new_pallet_name); + log_migration("post-migration", storage_prefix_reasons, old_pallet_name, new_pallet_name); + + if new_pallet_name == old_pallet_name { + return + } + + // Assert that no `Tips` and `Reasons` storages remains at the old prefix. + let old_pallet_prefix = twox_128(old_pallet_name.as_bytes()); + let old_tips_key = [&old_pallet_prefix, &twox_128(storage_prefix_tips)[..]].concat(); + let old_tips_key_iter = frame_support::storage::KeyPrefixIterator::new( + old_tips_key.to_vec(), + old_tips_key.to_vec(), + |_| Ok(()), + ); + assert_eq!(old_tips_key_iter.count(), 0); + + let old_reasons_key = [&old_pallet_prefix, &twox_128(storage_prefix_reasons)[..]].concat(); + let old_reasons_key_iter = frame_support::storage::KeyPrefixIterator::new( + old_reasons_key.to_vec(), + old_reasons_key.to_vec(), + |_| Ok(()), + ); + assert_eq!(old_reasons_key_iter.count(), 0); + + // Assert that the `Tips` and `Reasons` storages (if they exist) have been moved to the new + // prefix. + // NOTE: storage_version_key is already in the new prefix. + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + let new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + new_pallet_prefix.to_vec(), + new_pallet_prefix.to_vec(), + |_| Ok(()), + ); + assert!(new_pallet_prefix_iter.count() >= 1); + + assert_eq!(

::on_chain_storage_version(), 4); +} + +fn log_migration(stage: &str, storage_prefix: &[u8], old_pallet_name: &str, new_pallet_name: &str) { + log::info!( + target: "runtime::tips", + "{} prefix of storage '{}': '{}' ==> '{}'", + stage, + str::from_utf8(storage_prefix).unwrap_or(""), + old_pallet_name, + new_pallet_name, + ); +} diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs new file mode 100644 index 0000000000000..7ea80d78c5532 --- /dev/null +++ b/frame/tips/src/tests.rs @@ -0,0 +1,547 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Treasury pallet tests. + +#![cfg(test)] + +use std::cell::RefCell; + +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, Permill, +}; +use sp_storage::Storage; + +use frame_support::{ + assert_noop, assert_ok, pallet_prelude::GenesisBuild, parameter_types, + storage::StoragePrefixedMap, traits::SortedMembers, weights::Weight, PalletId, +}; + +use super::*; +use crate::{self as pallet_tips, Event as TipEvent}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, + Tips: pallet_tips::{Pallet, Call, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} +pub struct TenToFourteen; +impl SortedMembers for TenToFourteen { + fn sorted_members() -> Vec { + TEN_TO_FOURTEEN.with(|v| v.borrow().clone()) + } + #[cfg(feature = "runtime-benchmarks")] + fn add(new: &u128) { + TEN_TO_FOURTEEN.with(|v| { + let mut members = v.borrow_mut(); + members.push(*new); + members.sort(); + }) + } +} +impl ContainsLengthBound for TenToFourteen { + fn max_len() -> usize { + TEN_TO_FOURTEEN.with(|v| v.borrow().len()) + } + fn min_len() -> usize { + 0 + } +} +parameter_types! { + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: u64 = 1; + pub const SpendPeriod: u64 = 2; + pub const Burn: Permill = Permill::from_percent(50); + pub const DataDepositPerByte: u64 = 1; + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + pub const MaximumReasonLength: u32 = 16384; + pub const MaxApprovals: u32 = 100; +} +impl pallet_treasury::Config for Test { + type PalletId = TreasuryPalletId; + type Currency = pallet_balances::Pallet; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Event = Event; + type OnSlash = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); // Just gets burned. + type WeightInfo = (); + type SpendFunds = (); + type MaxApprovals = MaxApprovals; +} +parameter_types! { + pub const TipCountdown: u64 = 1; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: u64 = 1; +} +impl Config for Test { + type MaximumReasonLength = MaximumReasonLength; + type Tippers = TenToFourteen; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type DataDepositPerByte = DataDepositPerByte; + type Event = Event; + type WeightInfo = (); +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + // Total issuance will be 200 with treasury account initialized at ED. + balances: vec![(0, 100), (1, 98), (2, 1)], + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); + t.into() +} + +fn last_event() -> TipEvent { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let Event::Tips(inner) = e { Some(inner) } else { None }) + .last() + .unwrap() +} + +#[test] +fn genesis_config_works() { + new_test_ext().execute_with(|| { + assert_eq!(Treasury::pot(), 0); + assert_eq!(Treasury::proposal_count(), 0); + }); +} + +fn tip_hash() -> H256 { + BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u128)) +} + +#[test] +fn tip_new_cannot_be_used_twice() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_noop!( + Tips::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), + Error::::AlreadyKnown + ); + }); +} + +#[test] +fn report_awesome_and_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + + // other reports don't count. + assert_noop!( + Tips::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), + Error::::AlreadyKnown + ); + + let h = tip_hash(); + assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Tips::tip(Origin::signed(9), h.clone(), 10), BadOrigin); + System::set_block_number(2); + assert_ok!(Tips::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 102); + assert_eq!(Balances::free_balance(3), 8); + }); +} + +#[test] +fn report_awesome_from_beneficiary_and_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); + assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(Tips::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 110); + }); +} + +#[test] +fn close_tip_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + + let h = tip_hash(); + + assert_eq!(last_event(), TipEvent::NewTip(h)); + + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + + assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); + + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + + assert_eq!(last_event(), TipEvent::TipClosing(h)); + + assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::Premature); + + System::set_block_number(2); + assert_noop!(Tips::close_tip(Origin::none(), h.into()), BadOrigin); + assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + + assert_eq!(last_event(), TipEvent::TipClosed(h, 3, 10)); + + assert_noop!(Tips::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); + }); +} + +#[test] +fn slash_tip_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 100); + + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + + let h = tip_hash(); + assert_eq!(last_event(), TipEvent::NewTip(h)); + + // can't remove from any origin + assert_noop!(Tips::slash_tip(Origin::signed(0), h.clone()), BadOrigin); + + // can remove from root. + assert_ok!(Tips::slash_tip(Origin::root(), h.clone())); + assert_eq!(last_event(), TipEvent::TipSlashed(h, 0, 12)); + + // tipper slashed + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 88); + }); +} + +#[test] +fn retract_tip_works() { + new_test_ext().execute_with(|| { + // with report awesome + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + let h = tip_hash(); + assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Tips::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); + assert_ok!(Tips::retract_tip(Origin::signed(0), h.clone())); + System::set_block_number(2); + assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); + + // with tip new + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + let h = tip_hash(); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Tips::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); + assert_ok!(Tips::retract_tip(Origin::signed(10), h.clone())); + System::set_block_number(2); + assert_noop!(Tips::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); + }); +} + +#[test] +fn tip_median_calculation_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); + let h = tip_hash(); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 1000000)); + System::set_block_number(2); + assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); +} + +#[test] +fn tip_changing_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); + let h = tip_hash(); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10000)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10000)); + assert_ok!(Tips::tip(Origin::signed(13), h.clone(), 0)); + assert_ok!(Tips::tip(Origin::signed(14), h.clone(), 0)); + assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 1000)); + assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 100)); + assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); +} + +#[test] +fn test_last_reward_migration() { + let mut s = Storage::default(); + + #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] + pub struct OldOpenTip< + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, + > { + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded + /// string. A URL would be sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip and the amount held on deposit. + finder: Option<(AccountId, Balance)>, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, + } + + let reason1 = BlakeTwo256::hash(b"reason1"); + let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64)); + + let old_tip_finder = OldOpenTip:: { + reason: reason1, + who: 10, + finder: Some((20, 30)), + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + }; + + let reason2 = BlakeTwo256::hash(b"reason2"); + let hash2 = BlakeTwo256::hash_of(&(reason2, 20u64)); + + let old_tip_no_finder = OldOpenTip:: { + reason: reason2, + who: 20, + finder: None, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + }; + + let data = vec![ + (pallet_tips::Tips::::hashed_key_for(hash1), old_tip_finder.encode().to_vec()), + (pallet_tips::Tips::::hashed_key_for(hash2), old_tip_no_finder.encode().to_vec()), + ]; + + s.top = data.into_iter().collect(); + + sp_io::TestExternalities::new(s).execute_with(|| { + let module = pallet_tips::Tips::::module_prefix(); + let item = pallet_tips::Tips::::storage_prefix(); + Tips::migrate_retract_tip_for_tip_new(module, item); + + // Test w/ finder + assert_eq!( + pallet_tips::Tips::::get(hash1), + Some(OpenTip { + reason: reason1, + who: 10, + finder: 20, + deposit: 30, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + finders_fee: true, + }) + ); + + // Test w/o finder + assert_eq!( + pallet_tips::Tips::::get(hash2), + Some(OpenTip { + reason: reason2, + who: 20, + finder: Default::default(), + deposit: 0, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + finders_fee: false, + }) + ); + }); +} + +#[test] +fn test_migration_v4() { + let reason1 = BlakeTwo256::hash(b"reason1"); + let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64)); + + let tip = OpenTip:: { + reason: reason1, + who: 10, + finder: 20, + deposit: 30, + closes: Some(13), + tips: vec![(40, 50), (60, 70)], + finders_fee: true, + }; + + let data = vec![ + (pallet_tips::Reasons::::hashed_key_for(hash1), reason1.encode().to_vec()), + (pallet_tips::Tips::::hashed_key_for(hash1), tip.encode().to_vec()), + ]; + + let mut s = Storage::default(); + s.top = data.into_iter().collect(); + + sp_io::TestExternalities::new(s).execute_with(|| { + use frame_support::traits::PalletInfoAccess; + + let old_pallet = "Treasury"; + let new_pallet = ::name(); + frame_support::storage::migration::move_pallet( + new_pallet.as_bytes(), + old_pallet.as_bytes(), + ); + StorageVersion::new(0).put::(); + + crate::migrations::v4::pre_migrate::(old_pallet); + crate::migrations::v4::migrate::(old_pallet); + crate::migrations::v4::post_migrate::(old_pallet); + }); + + sp_io::TestExternalities::new(Storage::default()).execute_with(|| { + use frame_support::traits::PalletInfoAccess; + + let old_pallet = "Treasury"; + let new_pallet = ::name(); + frame_support::storage::migration::move_pallet( + new_pallet.as_bytes(), + old_pallet.as_bytes(), + ); + StorageVersion::new(0).put::(); + + crate::migrations::v4::pre_migrate::(old_pallet); + crate::migrations::v4::migrate::(old_pallet); + crate::migrations::v4::post_migrate::(old_pallet); + }); +} + +#[test] +fn genesis_funding_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let initial_funding = 100; + pallet_balances::GenesisConfig:: { + // Total issuance will be 200 with treasury account initialized with 100. + balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), initial_funding); + assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance()); + }); +} diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs new file mode 100644 index 0000000000000..3376afb066170 --- /dev/null +++ b/frame/tips/src/weights.rs @@ -0,0 +1,177 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_tips +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_tips +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/tips/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_tips. +pub trait WeightInfo { + fn report_awesome(r: u32, ) -> Weight; + fn retract_tip() -> Weight; + fn tip_new(r: u32, t: u32, ) -> Weight; + fn tip(t: u32, ) -> Weight; + fn close_tip(t: u32, ) -> Weight; + fn slash_tip(t: u32, ) -> Weight; +} + +/// Weights for pallet_tips using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Treasury Reasons (r:1 w:1) + // Storage: Treasury Tips (r:1 w:1) + fn report_awesome(r: u32, ) -> Weight { + (50_921_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Tips (r:1 w:1) + // Storage: Treasury Reasons (r:0 w:1) + fn retract_tip() -> Weight { + (46_352_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Members (r:1 w:0) + // Storage: Treasury Reasons (r:1 w:1) + // Storage: Treasury Tips (r:0 w:1) + fn tip_new(r: u32, t: u32, ) -> Weight { + (33_338_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((115_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Members (r:1 w:0) + // Storage: Treasury Tips (r:1 w:1) + fn tip(t: u32, ) -> Weight { + (22_702_000 as Weight) + // Standard Error: 0 + .saturating_add((538_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury Tips (r:1 w:1) + // Storage: Elections Members (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Treasury Reasons (r:0 w:1) + fn close_tip(t: u32, ) -> Weight { + (84_094_000 as Weight) + // Standard Error: 0 + .saturating_add((283_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Treasury Tips (r:1 w:1) + // Storage: Treasury Reasons (r:0 w:1) + fn slash_tip(t: u32, ) -> Weight { + (24_891_000 as Weight) + // Standard Error: 0 + .saturating_add((6_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Treasury Reasons (r:1 w:1) + // Storage: Treasury Tips (r:1 w:1) + fn report_awesome(r: u32, ) -> Weight { + (50_921_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Tips (r:1 w:1) + // Storage: Treasury Reasons (r:0 w:1) + fn retract_tip() -> Weight { + (46_352_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Members (r:1 w:0) + // Storage: Treasury Reasons (r:1 w:1) + // Storage: Treasury Tips (r:0 w:1) + fn tip_new(r: u32, t: u32, ) -> Weight { + (33_338_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((115_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Members (r:1 w:0) + // Storage: Treasury Tips (r:1 w:1) + fn tip(t: u32, ) -> Weight { + (22_702_000 as Weight) + // Standard Error: 0 + .saturating_add((538_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury Tips (r:1 w:1) + // Storage: Elections Members (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Treasury Reasons (r:0 w:1) + fn close_tip(t: u32, ) -> Weight { + (84_094_000 as Weight) + // Standard Error: 0 + .saturating_add((283_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Treasury Tips (r:1 w:1) + // Storage: Treasury Reasons (r:0 w:1) + fn slash_tip(t: u32, ) -> Weight { + (24_891_000 as Weight) + // Standard Error: 0 + .saturating_add((6_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 1fa4521900421..546939692bbaf 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,31 +13,36 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "./rpc/runtime-api" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.126", optional = true } smallvec = "1.4.1" -sp-io = { version = "2.0.0", path = "../../primitives/io", default-features = false } -sp-core = { version = "2.0.0", path = "../../primitives/core", default-features = false } + +sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io", default-features = false } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +serde_json = "1.0.68" +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ "serde", "codec/std", - "sp-std/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", "sp-runtime/std", + "sp-std/std", "frame-support/std", "frame-system/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "sp-io/std", - "sp-core/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-payment/README.md b/frame/transaction-payment/README.md index 10ad9579e92b7..bf114246e60fa 100644 --- a/frame/transaction-payment/README.md +++ b/frame/transaction-payment/README.md @@ -1,16 +1,16 @@ -# Transaction Payment Module +# Transaction Payment Pallet -This module provides the basic logic needed to pay the absolute minimum amount needed for a +This pallet provides the basic logic needed to pay the absolute minimum amount needed for a transaction to be included. This includes: - _weight fee_: A fee proportional to amount of weight a transaction consumes. - _length fee_: A fee proportional to the encoded length of the transaction. - _tip_: An optional tip. Tip increases the priority of the transaction, giving it a higher chance to be included by the transaction queue. -Additionally, this module allows one to configure: - - The mapping between one unit of weight to one unit of fee via [`Trait::WeightToFee`]. +Additionally, this pallet allows one to configure: + - The mapping between one unit of weight to one unit of fee via [`Config::WeightToFee`]. - A means of updating the fee for the next block, via defining a multiplier, based on the final state of the chain at the end of the previous block. This can be configured via - [`Trait::FeeMultiplierUpdate`] + [`Config::FeeMultiplierUpdate`] -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 26f073e60237f..3858c41a38763 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -1,26 +1,26 @@ [package] name = "pallet-transaction-payment-rpc" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" -description = "RPC interface for the transaction payment module." +description = "RPC interface for the transaction payment pallet." readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0", path = "../../../primitives/rpc" } -serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", path = "./runtime-api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" + +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-rpc = { version = "4.0.0-dev", path = "../../../primitives/rpc" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } diff --git a/frame/transaction-payment/rpc/README.md b/frame/transaction-payment/rpc/README.md index 21a8a7d37cae0..bf2ada1ff0ab3 100644 --- a/frame/transaction-payment/rpc/README.md +++ b/frame/transaction-payment/rpc/README.md @@ -1,3 +1,3 @@ -RPC interface for the transaction payment module. +RPC interface for the transaction payment pallet. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 881c4330eb9a4..2f78f2439c604 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,23 +13,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../support" } - -[dev-dependencies] -serde_json = "1.0.41" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/runtime" } +pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../transaction-payment" } [features] default = ["std"] std = [ - "serde", - "sp-api/std", "codec/std", - "sp-std/std", + "sp-api/std", "sp-runtime/std", - "frame-support/std", + "pallet-transaction-payment/std", ] diff --git a/frame/transaction-payment/rpc/runtime-api/README.md b/frame/transaction-payment/rpc/runtime-api/README.md index e453d9a3b7c8a..0d81abdb1eeb3 100644 --- a/frame/transaction-payment/rpc/runtime-api/README.md +++ b/frame/transaction-payment/rpc/runtime-api/README.md @@ -1,3 +1,3 @@ -Runtime API definition for transaction payment module. +Runtime API definition for transaction payment pallet. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 5575f8f7d0950..696550d3ef040 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,89 +15,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Runtime API definition for transaction payment module. +//! Runtime API definition for transaction payment pallet. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use frame_support::weights::{Weight, DispatchClass}; -use codec::{Encode, Codec, Decode}; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize, Serializer, Deserializer}; -use sp_runtime::traits::{MaybeDisplay, MaybeFromStr}; +use codec::Codec; +use sp_runtime::traits::MaybeDisplay; -/// Information related to a dispatchable's class, weight, and fee that can be queried from the runtime. -#[derive(Eq, PartialEq, Encode, Decode, Default)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] -pub struct RuntimeDispatchInfo { - /// Weight of this dispatch. - pub weight: Weight, - /// Class of this dispatch. - pub class: DispatchClass, - /// The inclusion fee of this dispatch. This does not include a tip or anything else that - /// depends on the signature (i.e. depends on a `SignedExtension`). - #[cfg_attr(feature = "std", serde(bound(serialize = "Balance: std::fmt::Display")))] - #[cfg_attr(feature = "std", serde(serialize_with = "serialize_as_string"))] - #[cfg_attr(feature = "std", serde(bound(deserialize = "Balance: std::str::FromStr")))] - #[cfg_attr(feature = "std", serde(deserialize_with = "deserialize_from_string"))] - pub partial_fee: Balance, -} - -#[cfg(feature = "std")] -fn serialize_as_string(t: &T, serializer: S) -> Result { - serializer.serialize_str(&t.to_string()) -} - -#[cfg(feature = "std")] -fn deserialize_from_string<'de, D: Deserializer<'de>, T: std::str::FromStr>(deserializer: D) -> Result { - let s = String::deserialize(deserializer)?; - s.parse::().map_err(|_| serde::de::Error::custom("Parse from string failed")) -} +pub use pallet_transaction_payment::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; sp_api::decl_runtime_apis! { pub trait TransactionPaymentApi where - Balance: Codec + MaybeDisplay + MaybeFromStr, + Balance: Codec + MaybeDisplay, { fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn should_serialize_and_deserialize_properly_with_string() { - let info = RuntimeDispatchInfo { - weight: 5, - class: DispatchClass::Normal, - partial_fee: 1_000_000_u64, - }; - - let json_str = r#"{"weight":5,"class":"normal","partialFee":"1000000"}"#; - - assert_eq!(serde_json::to_string(&info).unwrap(), json_str); - assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); - - // should not panic - serde_json::to_value(&info).unwrap(); - } - - #[test] - fn should_serialize_and_deserialize_properly_large_value() { - let info = RuntimeDispatchInfo { - weight: 5, - class: DispatchClass::Normal, - partial_fee: u128::max_value(), - }; - - let json_str = r#"{"weight":5,"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; - - assert_eq!(serde_json::to_string(&info).unwrap(), json_str); - assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); - - // should not panic - serde_json::to_value(&info).unwrap(); + fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; } } diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index 5043f0257fc36..945156d12a6a4 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,28 +15,34 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! RPC interface for the transaction payment module. +//! RPC interface for the transaction payment pallet. -use std::sync::Arc; +pub use self::gen_client::Client as TransactionPaymentClient; use codec::{Codec, Decode}; -use sp_blockchain::HeaderBackend; use jsonrpc_core::{Error as RpcError, ErrorCode, Result}; use jsonrpc_derive::rpc; -use sp_runtime::{generic::BlockId, traits::{Block as BlockT, MaybeDisplay, MaybeFromStr}}; +pub use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi as TransactionPaymentRuntimeApi; +use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; -pub use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi as TransactionPaymentRuntimeApi; -pub use self::gen_client::Client as TransactionPaymentClient; +use sp_rpc::number::NumberOrHex; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, MaybeDisplay}, +}; +use std::{convert::TryInto, sync::Arc}; #[rpc] pub trait TransactionPaymentApi { #[rpc(name = "payment_queryInfo")] - fn query_info( + fn query_info(&self, encoded_xt: Bytes, at: Option) -> Result; + #[rpc(name = "payment_queryFeeDetails")] + fn query_fee_details( &self, encoded_xt: Bytes, - at: Option - ) -> Result; + at: Option, + ) -> Result>; } /// A struct that implements the [`TransactionPaymentApi`]. @@ -48,7 +54,7 @@ pub struct TransactionPayment { impl TransactionPayment { /// Create new `TransactionPayment` with the given reference to the client. pub fn new(client: Arc) -> Self { - TransactionPayment { client, _marker: Default::default() } + Self { client, _marker: Default::default() } } } @@ -73,20 +79,19 @@ impl TransactionPaymentApi<::Hash, RuntimeDi for TransactionPayment where Block: BlockT, - C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, + C: 'static + ProvideRuntimeApi + HeaderBackend, C::Api: TransactionPaymentRuntimeApi, - Balance: Codec + MaybeDisplay + MaybeFromStr, + Balance: Codec + MaybeDisplay + Copy + TryInto, { fn query_info( &self, encoded_xt: Bytes, - at: Option<::Hash> + at: Option<::Hash>, ) -> Result> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. - self.client.info().best_hash - )); + self.client.info().best_hash)); let encoded_len = encoded_xt.len() as u32; @@ -101,4 +106,49 @@ where data: Some(format!("{:?}", e).into()), }) } + + fn query_fee_details( + &self, + encoded_xt: Bytes, + at: Option<::Hash>, + ) -> Result> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash)); + + let encoded_len = encoded_xt.len() as u32; + + let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::DecodeError.into()), + message: "Unable to query fee details.".into(), + data: Some(format!("{:?}", e).into()), + })?; + let fee_details = api.query_fee_details(&at, uxt, encoded_len).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), + message: "Unable to query fee details.".into(), + data: Some(format!("{:?}", e).into()), + })?; + + let try_into_rpc_balance = |value: Balance| { + value.try_into().map_err(|_| RpcError { + code: ErrorCode::InvalidParams, + message: format!("{} doesn't fit in NumberOrHex representation", value), + data: None, + }) + }; + + Ok(FeeDetails { + inclusion_fee: if let Some(inclusion_fee) = fee_details.inclusion_fee { + Some(InclusionFee { + base_fee: try_into_rpc_balance(inclusion_fee.base_fee)?, + len_fee: try_into_rpc_balance(inclusion_fee.len_fee)?, + adjusted_weight_fee: try_into_rpc_balance(inclusion_fee.adjusted_weight_fee)?, + }) + } else { + None + }, + tip: Default::default(), + }) + } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 09caae54cf348..e3a3bccc3d39a 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,69 +15,88 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Transaction Payment Module +//! # Transaction Payment Pallet //! -//! This module provides the basic logic needed to pay the absolute minimum amount needed for a +//! This pallet provides the basic logic needed to pay the absolute minimum amount needed for a //! transaction to be included. This includes: +//! - _base fee_: This is the minimum amount a user pays for a transaction. It is declared +//! as a base _weight_ in the runtime and converted to a fee using `WeightToFee`. //! - _weight fee_: A fee proportional to amount of weight a transaction consumes. //! - _length fee_: A fee proportional to the encoded length of the transaction. //! - _tip_: An optional tip. Tip increases the priority of the transaction, giving it a higher //! chance to be included by the transaction queue. //! -//! Additionally, this module allows one to configure: -//! - The mapping between one unit of weight to one unit of fee via [`Trait::WeightToFee`]. +//! The base fee and adjusted weight and length fees constitute the _inclusion fee_, which is +//! the minimum fee for a transaction to be included in a block. +//! +//! The formula of final fee: +//! ```ignore +//! inclusion_fee = base_fee + length_fee + [targeted_fee_adjustment * weight_fee]; +//! final_fee = inclusion_fee + tip; +//! ``` +//! +//! - `targeted_fee_adjustment`: This is a multiplier that can tune the final fee based on +//! the congestion of the network. +//! +//! Additionally, this pallet allows one to configure: +//! - The mapping between one unit of weight to one unit of fee via [`Config::WeightToFee`]. //! - A means of updating the fee for the next block, via defining a multiplier, based on the //! final state of the chain at the end of the previous block. This can be configured via -//! [`Trait::FeeMultiplierUpdate`] +//! [`Config::FeeMultiplierUpdate`] +//! - How the fees are paid via [`Config::OnChargeTransaction`]. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use frame_support::{ - decl_storage, decl_module, - traits::{Currency, Get, OnUnbalanced, ExistenceRequirement, WithdrawReason, Imbalance}, - weights::{ - Weight, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Pays, WeightToFeePolynomial, - WeightToFeeCoefficient, - }, - dispatch::DispatchResult, -}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; + use sp_runtime::{ - FixedU128, FixedPointNumber, FixedPointOperand, Perquintill, RuntimeDebug, + traits::{ + Convert, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SaturatedConversion, Saturating, + SignedExtension, Zero, + }, transaction_validity::{ - TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError, - TransactionValidity, + TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransaction, }, - traits::{ - Zero, Saturating, SignedExtension, SaturatedConversion, Convert, Dispatchable, - DispatchInfoOf, PostDispatchInfoOf, + FixedPointNumber, FixedPointOperand, FixedU128, Perquintill, RuntimeDebug, +}; +use sp_std::prelude::*; + +use frame_support::{ + dispatch::DispatchResult, + traits::{EstimateCallFee, Get}, + weights::{ + DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, Weight, + WeightToFeeCoefficient, WeightToFeePolynomial, }, }; -use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; + +mod payment; +mod types; + +pub use pallet::*; +pub use payment::*; +pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; /// Fee multiplier. pub type Multiplier = FixedU128; -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = <::OnChargeTransaction as OnChargeTransaction>::Balance; /// A struct to update the weight multiplier per block. It implements `Convert`, meaning that it can convert the previous multiplier to the next one. This should /// be called on `on_finalize` of a block, prior to potentially cleaning the weight data from the -/// system module. +/// system pallet. /// /// given: /// s = previous block weight /// s'= ideal block weight /// m = maximum block weight -/// diff = (s - s')/m -/// v = 0.00001 -/// t1 = (v * diff) -/// t2 = (v * diff)^2 / 2 -/// then: +/// diff = (s - s')/m +/// v = 0.00001 +/// t1 = (v * diff) +/// t2 = (v * diff)^2 / 2 +/// then: /// next_multiplier = prev_multiplier * (1 + t1 + t2) /// /// Where `(s', v)` must be given as the `Get` implementation of the `T` generic type. Moreover, `M` @@ -99,8 +118,8 @@ type NegativeImbalanceOf = /// - in a fully congested chain: `p >= v * k * (1 - s')`. /// - in an empty chain: `p >= v * k * (-s')`. /// -/// For example, when all blocks are full and there are 28800 blocks per day (default in `substrate-node`) -/// and v == 0.00001, s' == 0.1875, we'd have: +/// For example, when all blocks are full and there are 28800 blocks per day (default in +/// `substrate-node`) and v == 0.00001, s' == 0.1875, we'd have: /// /// p >= 0.00001 * 28800 * 0.8125 /// p >= 0.234 @@ -108,7 +127,7 @@ type NegativeImbalanceOf = /// Meaning that fees can change by around ~23% per day, given extreme congestion. /// /// More info can be found at: -/// https://w3f-research.readthedocs.io/en/latest/polkadot/Token%20Economics.html +/// pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M)>); /// Something that can convert the current multiplier to the next one. @@ -134,7 +153,11 @@ impl MultiplierUpdate for () { } impl MultiplierUpdate for TargetedFeeAdjustment - where T: frame_system::Trait, S: Get, V: Get, M: Get, +where + T: frame_system::Config, + S: Get, + V: Get, + M: Get, { fn min() -> Multiplier { M::get() @@ -148,7 +171,11 @@ impl MultiplierUpdate for TargetedFeeAdjustment } impl Convert for TargetedFeeAdjustment - where T: frame_system::Trait, S: Get, V: Get, M: Get, +where + T: frame_system::Config, + S: Get, + V: Get, + M: Get, { fn convert(previous: Multiplier) -> Multiplier { // Defensive only. The multiplier in storage should always be at most positive. Nonetheless @@ -157,14 +184,15 @@ impl Convert for TargetedFeeAdjustment::AvailableBlockRatio::get() * - ::MaximumBlockWeight::get(); + let normal_max_weight = weights + .get(DispatchClass::Normal) + .max_total + .unwrap_or_else(|| weights.max_block); + let current_block_weight = >::block_weight(); let normal_block_weight = - >::block_weight() - .get(frame_support::weights::DispatchClass::Normal) - .min(normal_max_weight); + *current_block_weight.get(DispatchClass::Normal).min(&normal_max_weight); let s = S::get(); let v = V::get(); @@ -197,10 +225,10 @@ impl Convert for TargetedFeeAdjustment + Send + Sync; - - /// Handler for the unbalanced reduction when taking transaction fees. This is either one or - /// two separate imbalances, the first is the transaction fee paid, the second is the tip paid, - /// if any. - type OnTransactionPayment: OnUnbalanced>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Handler for withdrawing, refunding and depositing the transaction fee. + /// Transaction fees are withdrawn before the transaction is executed. + /// After the transaction was executed the transaction weight can be + /// adjusted, depending on the used resources by the transaction. If the + /// transaction weight is lower than expected, parts of the transaction fee + /// might be refunded. In the end the fees can be deposited. + type OnChargeTransaction: OnChargeTransaction; - /// The fee to be paid for making a transaction; the per-byte portion. - type TransactionByteFee: Get>; + /// The fee to be paid for making a transaction; the per-byte portion. + #[pallet::constant] + type TransactionByteFee: Get>; - /// Convert a weight value into a deductible fee based on the currency type. - type WeightToFee: WeightToFeePolynomial>; + /// Convert a weight value into a deductible fee based on the currency type. + type WeightToFee: WeightToFeePolynomial>; - /// Update the multiplier of the next block, based on the previous block's weight. - type FeeMultiplierUpdate: MultiplierUpdate; -} + /// Update the multiplier of the next block, based on the previous block's weight. + type FeeMultiplierUpdate: MultiplierUpdate; + } -decl_storage! { - trait Store for Module as TransactionPayment { - pub NextFeeMultiplier get(fn next_fee_multiplier): Multiplier = Multiplier::saturating_from_integer(1); + #[pallet::extra_constants] + impl Pallet { + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + #[allow(non_snake_case)] + /// The polynomial that is applied in order to derive fee from weight. + fn WeightToFee() -> Vec>> { + T::WeightToFee::polynomial().to_vec() + } + } - StorageVersion build(|_: &GenesisConfig| Releases::V2): Releases; + #[pallet::type_value] + pub fn NextFeeMultiplierOnEmpty() -> Multiplier { + Multiplier::saturating_from_integer(1) } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// The fee to be paid for making a transaction; the per-byte portion. - const TransactionByteFee: BalanceOf = T::TransactionByteFee::get(); + #[pallet::storage] + #[pallet::getter(fn next_fee_multiplier)] + pub type NextFeeMultiplier = + StorageValue<_, Multiplier, ValueQuery, NextFeeMultiplierOnEmpty>; - /// The polynomial that is applied in order to derive fee from weight. - const WeightToFee: Vec>> = - T::WeightToFee::polynomial().to_vec(); + #[pallet::storage] + pub(super) type StorageVersion = StorageValue<_, Releases, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig; + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + StorageVersion::::put(Releases::V2); + } + } - fn on_finalize() { - NextFeeMultiplier::mutate(|fm| { + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_finalize(_: T::BlockNumber) { + >::mutate(|fm| { *fm = T::FeeMultiplierUpdate::convert(*fm); }); } fn integrity_test() { // given weight == u64, we build multipliers from `diff` of two weight values, which can - // at most be MaximumBlockWeight. Make sure that this can fit in a multiplier without + // at most be maximum block weight. Make sure that this can fit in a multiplier without // loss. use sp_std::convert::TryInto; assert!( ::max_value() >= - Multiplier::checked_from_integer( - ::MaximumBlockWeight::get().try_into().unwrap() - ).unwrap(), + Multiplier::checked_from_integer( + T::BlockWeights::get().max_block.try_into().unwrap() + ) + .unwrap(), ); // This is the minimum value of the multiplier. Make sure that if we collapse to this @@ -271,36 +337,41 @@ decl_module! { // that if we collapse to minimum, the trend will be positive with a weight value // which is 1% more than the target. let min_value = T::FeeMultiplierUpdate::min(); - let mut target = - T::FeeMultiplierUpdate::target() * - (T::AvailableBlockRatio::get() * T::MaximumBlockWeight::get()); - + let mut target = T::FeeMultiplierUpdate::target() * + T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( + "Setting `max_total` for `Normal` dispatch class is not compatible with \ + `transaction-payment` pallet.", + ); // add 1 percent; let addition = target / 100; if addition == 0 { // this is most likely because in a test setup we set everything to (). - return; + return } target += addition; + #[cfg(any(feature = "std", test))] sp_io::TestExternalities::new_empty().execute_with(|| { - >::set_block_limits(target, 0); + >::set_block_consumed_resources(target, 0); let next = T::FeeMultiplierUpdate::convert(min_value); - assert!(next > min_value, "The minimum bound of the multiplier is too low. When \ + assert!( + next > min_value, + "The minimum bound of the multiplier is too low. When \ block saturation is more than target by 1% and multiplier is minimal then \ the multiplier doesn't increase." ); - }) + }); } } } -impl Module where - BalanceOf: FixedPointOperand +impl Pallet +where + BalanceOf: FixedPointOperand, { /// Query the data that we know about the fee of a given `call`. /// - /// This module is not and cannot be aware of the internals of a signed extension, for example + /// This pallet is not and cannot be aware of the internals of a signed extension, for example /// a tip. It only interprets the extrinsic as some encoded value and accounts for its weight /// and length, the runtime's extrinsic base weight, and the current fee multiplier. /// @@ -311,9 +382,7 @@ impl Module where len: u32, ) -> RuntimeDispatchInfo> where - T: Send + Sync, - BalanceOf: Send + Sync, - T::Call: Dispatchable, + T::Call: Dispatchable, { // NOTE: we can actually make it understand `ChargeTransactionPayment`, but would be some // hassle for sure. We have to make it aware of the index of `ChargeTransactionPayment` in @@ -328,35 +397,36 @@ impl Module where RuntimeDispatchInfo { weight, class, partial_fee } } + /// Query the detailed fee of a given `call`. + pub fn query_fee_details( + unchecked_extrinsic: Extrinsic, + len: u32, + ) -> FeeDetails> + where + T::Call: Dispatchable, + { + let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); + Self::compute_fee_details(len, &dispatch_info, 0u32.into()) + } + /// Compute the final fee value for a particular transaction. - /// - /// The final fee is composed of: - /// - `base_fee`: This is the minimum amount a user pays for a transaction. It is declared - /// as a base _weight_ in the runtime and converted to a fee using `WeightToFee`. - /// - `len_fee`: The length fee, the amount paid for the encoded length (in bytes) of the - /// transaction. - /// - `weight_fee`: This amount is computed based on the weight of the transaction. Weight - /// accounts for the execution time of a transaction. - /// - `targeted_fee_adjustment`: This is a multiplier that can tune the final fee based on - /// the congestion of the network. - /// - (Optional) `tip`: If included in the transaction, the tip will be added on top. Only - /// signed transactions can have a tip. - /// - /// The base fee and adjusted weight and length fees constitute the _inclusion fee,_ which is - /// the minimum fee for a transaction to be included in a block. - /// - /// ```ignore - /// inclusion_fee = base_fee + len_fee + [targeted_fee_adjustment * weight_fee]; - /// final_fee = inclusion_fee + tip; - /// ``` - pub fn compute_fee( + pub fn compute_fee(len: u32, info: &DispatchInfoOf, tip: BalanceOf) -> BalanceOf + where + T::Call: Dispatchable, + { + Self::compute_fee_details(len, info, tip).final_fee() + } + + /// Compute the fee details for a particular transaction. + pub fn compute_fee_details( len: u32, info: &DispatchInfoOf, tip: BalanceOf, - ) -> BalanceOf where - T::Call: Dispatchable, + ) -> FeeDetails> + where + T::Call: Dispatchable, { - Self::compute_fee_raw(len, info.weight, tip, info.pays_fee) + Self::compute_fee_raw(len, info.weight, tip, info.pays_fee, info.class) } /// Compute the actual post dispatch fee for a particular transaction. @@ -368,10 +438,30 @@ impl Module where info: &DispatchInfoOf, post_info: &PostDispatchInfoOf, tip: BalanceOf, - ) -> BalanceOf where - T::Call: Dispatchable, + ) -> BalanceOf + where + T::Call: Dispatchable, + { + Self::compute_actual_fee_details(len, info, post_info, tip).final_fee() + } + + /// Compute the actual post dispatch fee details for a particular transaction. + pub fn compute_actual_fee_details( + len: u32, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + tip: BalanceOf, + ) -> FeeDetails> + where + T::Call: Dispatchable, { - Self::compute_fee_raw(len, post_info.calc_actual_weight(info), tip, post_info.pays_fee(info)) + Self::compute_fee_raw( + len, + post_info.calc_actual_weight(info), + tip, + post_info.pays_fee(info), + info.class, + ) } fn compute_fee_raw( @@ -379,7 +469,8 @@ impl Module where weight: Weight, tip: BalanceOf, pays_fee: Pays, - ) -> BalanceOf { + class: DispatchClass, + ) -> FeeDetails> { if pays_fee == Pays::Yes { let len = >::from(len); let per_byte = T::TransactionByteFee::get(); @@ -393,26 +484,31 @@ impl Module where // final adjusted weight fee. let adjusted_weight_fee = multiplier.saturating_mul_int(unadjusted_weight_fee); - let base_fee = Self::weight_to_fee(T::ExtrinsicBaseWeight::get()); - base_fee - .saturating_add(fixed_len_fee) - .saturating_add(adjusted_weight_fee) - .saturating_add(tip) + let base_fee = Self::weight_to_fee(T::BlockWeights::get().get(class).base_extrinsic); + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee, + len_fee: fixed_len_fee, + adjusted_weight_fee, + }), + tip, + } } else { - tip + FeeDetails { inclusion_fee: None, tip } } } fn weight_to_fee(weight: Weight) -> BalanceOf { // cap the weight to the maximum defined in runtime, otherwise it will be the // `Bounded` maximum of its data type, which is not desired. - let capped_weight = weight.min(::MaximumBlockWeight::get()); + let capped_weight = weight.min(T::BlockWeights::get().max_block); T::WeightToFee::calc(&capped_weight) } } -impl Convert> for Module where - T: Trait, +impl Convert> for Pallet +where + T: Config, BalanceOf: FixedPointOperand, { /// Compute the fee for the specified weight. @@ -421,17 +517,19 @@ impl Convert> for Module where /// share that the weight contributes to the overall fee of a transaction. It is mainly /// for informational purposes and not used in the actual fee calculation. fn convert(weight: Weight) -> BalanceOf { - NextFeeMultiplier::get().saturating_mul_int(Self::weight_to_fee(weight)) + >::get().saturating_mul_int(Self::weight_to_fee(weight)) } } /// Require the transactor pay for themselves and maybe include a tip to gain additional priority /// in the queue. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); -impl ChargeTransactionPayment where - T::Call: Dispatchable, +impl ChargeTransactionPayment +where + T::Call: Dispatchable, BalanceOf: Send + Sync + FixedPointOperand, { /// utility constructor. Used only in client/factory code. @@ -439,33 +537,31 @@ impl ChargeTransactionPayment where Self(fee) } + /// Returns the tip as being choosen by the transaction sender. + pub fn tip(&self) -> BalanceOf { + self.0 + } + fn withdraw_fee( &self, who: &T::AccountId, + call: &T::Call, info: &DispatchInfoOf, len: usize, - ) -> Result<(BalanceOf, Option>), TransactionValidityError> { + ) -> Result< + ( + BalanceOf, + <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, + ), + TransactionValidityError, + > { let tip = self.0; - let fee = Module::::compute_fee(len as u32, info, tip); + let fee = Pallet::::compute_fee(len as u32, info, tip); - // Only mess with balances if fee is not zero. - if fee.is_zero() { - return Ok((fee, None)); - } - - match T::Currency::withdraw( - who, - fee, - if tip.is_zero() { - WithdrawReason::TransactionPayment.into() - } else { - WithdrawReason::TransactionPayment | WithdrawReason::Tip - }, - ExistenceRequirement::KeepAlive, - ) { - Ok(imbalance) => Ok((fee, Some(imbalance))), - Err(_) => Err(InvalidTransaction::Payment.into()), - } + <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee( + who, call, info, fee, tip, + ) + .map(|i| (fee, i)) } /// Get an appropriate priority for a transaction with the given length and info. @@ -478,15 +574,21 @@ impl ChargeTransactionPayment where /// and the entire block weight `(1/1)`, its priority is `fee * min(1, 4) = fee * 1`. This means /// that the transaction which consumes more resources (either length or weight) with the same /// `fee` ends up having lower priority. - fn get_priority(len: usize, info: &DispatchInfoOf, final_fee: BalanceOf) -> TransactionPriority { - let weight_saturation = T::MaximumBlockWeight::get() / info.weight.max(1); - let len_saturation = T::MaximumBlockLength::get() as u64 / (len as u64).max(1); - let coefficient: BalanceOf = weight_saturation.min(len_saturation).saturated_into::>(); + fn get_priority( + len: usize, + info: &DispatchInfoOf, + final_fee: BalanceOf, + ) -> TransactionPriority { + let weight_saturation = T::BlockWeights::get().max_block / info.weight.max(1); + let max_block_length = *T::BlockLength::get().max.get(DispatchClass::Normal); + let len_saturation = max_block_length as u64 / (len as u64).max(1); + let coefficient: BalanceOf = + weight_saturation.min(len_saturation).saturated_into::>(); final_fee.saturating_mul(coefficient).saturated_into::() } } -impl sp_std::fmt::Debug for ChargeTransactionPayment { +impl sp_std::fmt::Debug for ChargeTransactionPayment { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "ChargeTransactionPayment<{:?}>", self.0) @@ -497,40 +599,47 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment } } -impl SignedExtension for ChargeTransactionPayment where +impl SignedExtension for ChargeTransactionPayment +where BalanceOf: Send + Sync + From + FixedPointOperand, - T::Call: Dispatchable, + T::Call: Dispatchable, { const IDENTIFIER: &'static str = "ChargeTransactionPayment"; type AccountId = T::AccountId; type Call = T::Call; type AdditionalSigned = (); - type Pre = (BalanceOf, Self::AccountId, Option>, BalanceOf); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + type Pre = ( + // tip + BalanceOf, + // who paid the fee + Self::AccountId, + // imbalance resulting from withdrawing the fee + <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, + ); + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } fn validate( &self, who: &Self::AccountId, - _call: &Self::Call, + call: &Self::Call, info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { - let (fee, _) = self.withdraw_fee(who, info, len)?; - Ok(ValidTransaction { - priority: Self::get_priority(len, info, fee), - ..Default::default() - }) + let (fee, _) = self.withdraw_fee(who, call, info, len)?; + Ok(ValidTransaction { priority: Self::get_priority(len, info, fee), ..Default::default() }) } fn pre_dispatch( self, who: &Self::AccountId, - _call: &Self::Call, + call: &Self::Call, info: &DispatchInfoOf, - len: usize + len: usize, ) -> Result { - let (fee, imbalance) = self.withdraw_fee(who, info, len)?; - Ok((self.0, who.clone(), imbalance, fee)) + let (_fee, imbalance) = self.withdraw_fee(who, call, info, len)?; + Ok((self.0, who.clone(), imbalance)) } fn post_dispatch( @@ -540,101 +649,105 @@ impl SignedExtension for ChargeTransactionPayment whe len: usize, _result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - let (tip, who, imbalance, fee) = pre; - if let Some(payed) = imbalance { - let actual_fee = Module::::compute_actual_fee( - len as u32, - info, - post_info, - tip, - ); - let refund = fee.saturating_sub(actual_fee); - let actual_payment = match T::Currency::deposit_into_existing(&who, refund) { - Ok(refund_imbalance) => { - // The refund cannot be larger than the up front payed max weight. - // `PostDispatchInfo::calc_unspent` guards against such a case. - match payed.offset(refund_imbalance) { - Ok(actual_payment) => actual_payment, - Err(_) => return Err(InvalidTransaction::Payment.into()), - } - } - // We do not recreate the account using the refund. The up front payment - // is gone in that case. - Err(_) => payed, - }; - let imbalances = actual_payment.split(tip); - T::OnTransactionPayment::on_unbalanceds(Some(imbalances.0).into_iter() - .chain(Some(imbalances.1))); - } + let (tip, who, imbalance) = pre; + let actual_fee = Pallet::::compute_actual_fee(len as u32, info, post_info, tip); + T::OnChargeTransaction::correct_and_deposit_fee( + &who, info, post_info, actual_fee, tip, imbalance, + )?; Ok(()) } } +impl EstimateCallFee> + for Pallet +where + BalanceOf: FixedPointOperand, + T::Call: Dispatchable, +{ + fn estimate_call_fee(call: &AnyCall, post_info: PostDispatchInfo) -> BalanceOf { + let len = call.encoded_size() as u32; + let info = call.get_dispatch_info(); + Self::compute_actual_fee(len, &info, &post_info, Zero::zero()) + } +} + #[cfg(test)] mod tests { use super::*; + use crate as pallet_transaction_payment; + + use std::cell::RefCell; + use codec::Encode; - use frame_support::{ - impl_outer_dispatch, impl_outer_origin, impl_outer_event, parameter_types, - weights::{ - DispatchClass, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Weight, - WeightToFeePolynomial, WeightToFeeCoefficients, WeightToFeeCoefficient, - }, - }; - use pallet_balances::Call as BalancesCall; - use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; + use smallvec::smallvec; + use sp_core::H256; use sp_runtime::{ testing::{Header, TestXt}, - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, IdentityLookup, One}, + transaction_validity::InvalidTransaction, Perbill, }; - use std::cell::RefCell; - use smallvec::smallvec; - const CALL: &::Call = - &Call::Balances(BalancesCall::transfer(2, 69)); + use frame_support::{ + assert_noop, assert_ok, parameter_types, + traits::{Currency, Imbalance, OnUnbalanced}, + weights::{ + DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo, Weight, + WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, + }, + }; + use frame_system as system; + use pallet_balances::Call as BalancesCall; - impl_outer_dispatch! { - pub enum Call for Runtime where origin: Origin { - pallet_balances::Balances, - frame_system::System, - } - } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; - impl_outer_event! { - pub enum Event for Runtime { - system, - pallet_balances, + frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, } - } - - #[derive(Clone, PartialEq, Eq, Debug)] - pub struct Runtime; + ); - use frame_system as system; - impl_outer_origin!{ - pub enum Origin for Runtime {} - } + const CALL: &::Call = + &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); thread_local! { static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); } - pub struct ExtrinsicBaseWeight; - impl Get for ExtrinsicBaseWeight { - fn get() -> u64 { EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()) } + pub struct BlockWeights; + impl Get for BlockWeights { + fn get() -> frame_system::limits::BlockWeights { + frame_system::limits::BlockWeights::builder() + .base_block(0) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); + }) + .for_class(DispatchClass::non_mandatory(), |weights| { + weights.max_total = 1024.into(); + }) + .build_or_panic() + } } parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub static TransactionByteFee: u64 = 1; + pub static WeightToFee: u64 = 1; } - impl frame_system::Trait for Runtime { - type BaseCallFilter = (); + impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -646,45 +759,32 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Runtime { + impl pallet_balances::Config for Runtime { type Balance = u64; type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } - thread_local! { - static TRANSACTION_BYTE_FEE: RefCell = RefCell::new(1); - static WEIGHT_TO_FEE: RefCell = RefCell::new(1); - } - - pub struct TransactionByteFee; - impl Get for TransactionByteFee { - fn get() -> u64 { TRANSACTION_BYTE_FEE.with(|v| *v.borrow()) } - } - pub struct WeightToFee; impl WeightToFeePolynomial for WeightToFee { type Balance = u64; @@ -698,33 +798,42 @@ mod tests { } } - impl Trait for Runtime { - type Currency = pallet_balances::Module; - type OnTransactionPayment = (); + thread_local! { + static TIP_UNBALANCED_AMOUNT: RefCell = RefCell::new(0); + static FEE_UNBALANCED_AMOUNT: RefCell = RefCell::new(0); + } + + pub struct DealWithFees; + impl OnUnbalanced> for DealWithFees { + fn on_unbalanceds( + mut fees_then_tips: impl Iterator>, + ) { + if let Some(fees) = fees_then_tips.next() { + FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() += fees.peek()); + if let Some(tips) = fees_then_tips.next() { + TIP_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() += tips.peek()); + } + } + } + } + + impl Config for Runtime { + type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = (); } - type Balances = pallet_balances::Module; - type System = frame_system::Module; - type TransactionPayment = Module; - pub struct ExtBuilder { balance_factor: u64, base_weight: u64, byte_fee: u64, - weight_to_fee: u64 + weight_to_fee: u64, } impl Default for ExtBuilder { fn default() -> Self { - Self { - balance_factor: 1, - base_weight: 0, - byte_fee: 1, - weight_to_fee: 1, - } + Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } } } @@ -761,12 +870,14 @@ mod tests { (3, 30 * self.balance_factor), (4, 40 * self.balance_factor), (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor) + (6, 60 * self.balance_factor), ] } else { vec![] }, - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } } @@ -778,24 +889,15 @@ mod tests { } fn post_info_from_weight(w: Weight) -> PostDispatchInfo { - PostDispatchInfo { - actual_weight: Some(w), - pays_fee: Default::default(), - } + PostDispatchInfo { actual_weight: Some(w), pays_fee: Default::default() } } fn post_info_from_pays(p: Pays) -> PostDispatchInfo { - PostDispatchInfo { - actual_weight: None, - pays_fee: p, - } + PostDispatchInfo { actual_weight: None, pays_fee: p } } fn default_post_info() -> PostDispatchInfo { - PostDispatchInfo { - actual_weight: None, - pays_fee: Default::default(), - } + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() } } #[test] @@ -804,33 +906,42 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - let len = 10; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(5), len) - .unwrap(); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(5), &default_post_info(), len, &Ok(())) - .is_ok() - ); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - .is_ok() - ); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); - }); + .execute_with(|| { + let len = 10; + let pre = ChargeTransactionPayment::::from(0) + .pre_dispatch(&1, CALL, &info_from_weight(5), len) + .unwrap(); + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(5), + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 5 + 10); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 0); + + FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() = 0); + + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 10 + 50); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5); + }); } #[test] @@ -839,45 +950,42 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - let len = 10; - NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); - - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - .is_ok() - ); - // 75 (3/2 of the returned 50 units of weight) is refunded - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); - }); + .execute_with(|| { + let len = 10; + >::put(Multiplier::saturating_from_rational(3, 2)); + + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + )); + // 75 (3/2 of the returned 50 units of weight) is refunded + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); + }); } #[test] fn signed_extension_transaction_payment_is_bounded() { - ExtBuilder::default() - .balance_factor(1000) - .byte_fee(0) - .build() - .execute_with(|| - { + ExtBuilder::default().balance_factor(1000).byte_fee(0).build().execute_with(|| { // maximum weight possible - assert!( - ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::max_value()), 10) - .is_ok() - ); + assert_ok!(ChargeTransactionPayment::::from(0).pre_dispatch( + &1, + CALL, + &info_from_weight(Weight::max_value()), + 10 + )); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), - (10000 - ::MaximumBlockWeight::get()) as u64 + (10000 - ::BlockWeights::get().max_block) as u64 ); }); } @@ -888,37 +996,38 @@ mod tests { .base_weight(100) .balance_factor(0) .build() - .execute_with(|| - { - // 1 ain't have a penny. - assert_eq!(Balances::free_balance(1), 0); - - let len = 100; - - // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. - let operational_transaction = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::No, - }; - assert!( - ChargeTransactionPayment::::from(0) - .validate(&1, CALL, &operational_transaction , len) - .is_ok() - ); - - // like a InsecureFreeNormal - let free_transaction = DispatchInfo { - weight: 0, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }; - assert!( - ChargeTransactionPayment::::from(0) - .validate(&1, CALL, &free_transaction , len) - .is_err() - ); - }); + .execute_with(|| { + // 1 ain't have a penny. + assert_eq!(Balances::free_balance(1), 0); + + let len = 100; + + // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. + let operational_transaction = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::No, + }; + assert_ok!(ChargeTransactionPayment::::from(0).validate( + &1, + CALL, + &operational_transaction, + len + )); + + // like a InsecureFreeNormal + let free_transaction = + DispatchInfo { weight: 0, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + assert_noop!( + ChargeTransactionPayment::::from(0).validate( + &1, + CALL, + &free_transaction, + len + ), + TransactionValidityError::Invalid(InvalidTransaction::Payment), + ); + }); } #[test] @@ -927,58 +1036,47 @@ mod tests { .base_weight(5) .balance_factor(10) .build() - .execute_with(|| - { - // all fees should be x1.5 - NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); - let len = 10; - - assert!( - ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(3), len) - .is_ok() - ); - assert_eq!( - Balances::free_balance(1), - 100 // original + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); + let len = 10; + + assert_ok!(ChargeTransactionPayment::::from(10) // tipped + .pre_dispatch(&1, CALL, &info_from_weight(3), len)); + assert_eq!( + Balances::free_balance(1), + 100 // original - 10 // tip - 5 // base - 10 // len - (3 * 3 / 2) // adjusted weight - ); - }) + ); + }) } #[test] fn query_info_works() { - let call = Call::Balances(BalancesCall::transfer(2, 69)); + let call = Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); let origin = 111111; let extra = (); let xt = TestXt::new(call, Some((origin, extra))); - let info = xt.get_dispatch_info(); + let info = xt.get_dispatch_info(); let ext = xt.encode(); let len = ext.len() as u32; - ExtBuilder::default() - .base_weight(5) - .weight_fee(2) - .build() - .execute_with(|| - { + ExtBuilder::default().base_weight(5).weight_fee(2).build().execute_with(|| { // all fees should be x1.5 - NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); + >::put(Multiplier::saturating_from_rational(3, 2)); assert_eq!( TransactionPayment::query_info(xt, len), RuntimeDispatchInfo { weight: info.weight, class: info.class, - partial_fee: - 5 * 2 /* base * weight_fee */ + partial_fee: 5 * 2 /* base * weight_fee */ + len as u64 /* len * 1 */ - + info.weight.min(MaximumBlockWeight::get()) as u64 * 2 * 3 / 2 /* weight */ + + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ }, ); - }); } @@ -989,37 +1087,36 @@ mod tests { .byte_fee(10) .balance_factor(0) .build() - .execute_with(|| - { - // Next fee multiplier is zero - assert_eq!(NextFeeMultiplier::get(), Multiplier::one()); - - // Tip only, no fees works - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::No, - }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 10), 10); - // No tip, only base fee works - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); - // Tip + base fee works - assert_eq!(Module::::compute_fee(0, &dispatch_info, 69), 169); - // Len (byte fee) + base fee works - assert_eq!(Module::::compute_fee(42, &dispatch_info, 0), 520); - // Weight fee + base fee works - let dispatch_info = DispatchInfo { - weight: 1000, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 1100); - }); + .execute_with(|| { + // Next fee multiplier is zero + assert_eq!(>::get(), Multiplier::one()); + + // Tip only, no fees works + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::No, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 10), 10); + // No tip, only base fee works + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); + // Tip + base fee works + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 69), 169); + // Len (byte fee) + base fee works + assert_eq!(Pallet::::compute_fee(42, &dispatch_info, 0), 520); + // Weight fee + base fee works + let dispatch_info = DispatchInfo { + weight: 1000, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 1100); + }); } #[test] @@ -1029,30 +1126,29 @@ mod tests { .byte_fee(10) .balance_factor(0) .build() - .execute_with(|| - { - // Add a next fee multiplier. Fees will be x3/2. - NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); - // Base fee is unaffected by multiplier - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); - - // Everything works together :) - let dispatch_info = DispatchInfo { - weight: 123, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - // 123 weight, 456 length, 100 base - assert_eq!( - Module::::compute_fee(456, &dispatch_info, 789), - 100 + (3 * 123 / 2) + 4560 + 789, - ); - }); + .execute_with(|| { + // Add a next fee multiplier. Fees will be x3/2. + >::put(Multiplier::saturating_from_rational(3, 2)); + // Base fee is unaffected by multiplier + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); + + // Everything works together :) + let dispatch_info = DispatchInfo { + weight: 123, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + // 123 weight, 456 length, 100 base + assert_eq!( + Pallet::::compute_fee(456, &dispatch_info, 789), + 100 + (3 * 123 / 2) + 4560 + 789, + ); + }); } #[test] @@ -1062,31 +1158,30 @@ mod tests { .byte_fee(10) .balance_factor(0) .build() - .execute_with(|| - { - // Add a next fee multiplier. All fees will be x1/2. - NextFeeMultiplier::put(Multiplier::saturating_from_rational(1, 2)); - - // Base fee is unaffected by multiplier. - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); - - // Everything works together. - let dispatch_info = DispatchInfo { - weight: 123, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - // 123 weight, 456 length, 100 base - assert_eq!( - Module::::compute_fee(456, &dispatch_info, 789), - 100 + (123 / 2) + 4560 + 789, - ); - }); + .execute_with(|| { + // Add a next fee multiplier. All fees will be x1/2. + >::put(Multiplier::saturating_from_rational(1, 2)); + + // Base fee is unaffected by multiplier. + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); + + // Everything works together. + let dispatch_info = DispatchInfo { + weight: 123, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + // 123 weight, 456 length, 100 base + assert_eq!( + Pallet::::compute_fee(456, &dispatch_info, 789), + 100 + (123 / 2) + 4560 + 789, + ); + }); } #[test] @@ -1096,23 +1191,18 @@ mod tests { .byte_fee(10) .balance_factor(0) .build() - .execute_with(|| - { - // Overflow is handled - let dispatch_info = DispatchInfo { - weight: Weight::max_value(), - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!( - Module::::compute_fee( - ::max_value(), - &dispatch_info, - ::max_value() - ), - ::max_value() - ); - }); + .execute_with(|| { + // Overflow is handled + let dispatch_info = DispatchInfo { + weight: Weight::max_value(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!( + Pallet::::compute_fee(u32::MAX, &dispatch_info, u64::MAX), + u64::MAX + ); + }); } #[test] @@ -1121,35 +1211,34 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - // So events are emitted - System::set_block_number(10); - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - // kill the account between pre and post dispatch - assert!(Balances::transfer(Some(2).into(), 3, Balances::free_balance(2)).is_ok()); - assert_eq!(Balances::free_balance(2), 0); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - .is_ok() - ); - assert_eq!(Balances::free_balance(2), 0); - // Transfer Event - assert!(System::events().iter().any(|event| { - event.event == Event::pallet_balances(pallet_balances::RawEvent::Transfer(2, 3, 80)) - })); - // Killed Event - assert!(System::events().iter().any(|event| { - event.event == Event::system(system::RawEvent::KilledAccount(2)) - })); - }); + .execute_with(|| { + // So events are emitted + System::set_block_number(10); + let len = 10; + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + // kill the account between pre and post dispatch + assert_ok!(Balances::transfer(Some(2).into(), 3, Balances::free_balance(2))); + assert_eq!(Balances::free_balance(2), 0); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(2), 0); + // Transfer Event + System::assert_has_event(Event::Balances(pallet_balances::Event::Transfer( + 2, 3, 80, + ))); + // Killed Event + System::assert_has_event(Event::System(system::Event::KilledAccount(2))); + }); } #[test] @@ -1158,21 +1247,22 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(101), len, &Ok(())) - .is_ok() - ); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - }); + .execute_with(|| { + let len = 10; + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(101), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + }); } #[test] @@ -1181,30 +1271,28 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - // So events are emitted - System::set_block_number(10); - let len = 10; - let dispatch_info = DispatchInfo { - weight: 100, - pays_fee: Pays::No, - class: DispatchClass::Normal, - }; - let user = 69; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&user, CALL, &dispatch_info, len) - .unwrap(); - assert_eq!(Balances::total_balance(&user), 0); - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &dispatch_info, &default_post_info(), len, &Ok(())) - .is_ok() - ); - assert_eq!(Balances::total_balance(&user), 0); - // No events for such a scenario - assert_eq!(System::events().len(), 0); - }); + .execute_with(|| { + // So events are emitted + System::set_block_number(10); + let len = 10; + let dispatch_info = + DispatchInfo { weight: 100, pays_fee: Pays::No, class: DispatchClass::Normal }; + let user = 69; + let pre = ChargeTransactionPayment::::from(0) + .pre_dispatch(&user, CALL, &dispatch_info, len) + .unwrap(); + assert_eq!(Balances::total_balance(&user), 0); + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &dispatch_info, + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Balances::total_balance(&user), 0); + // No events for such a scenario + assert_eq!(System::events().len(), 0); + }); } #[test] @@ -1213,32 +1301,36 @@ mod tests { .balance_factor(10) .base_weight(7) .build() - .execute_with(|| - { - let info = info_from_weight(100); - let post_info = post_info_from_weight(33); - let prev_balance = Balances::free_balance(2); - let len = 10; - let tip = 5; - - NextFeeMultiplier::put(Multiplier::saturating_from_rational(5, 4)); - - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + .execute_with(|| { + let info = info_from_weight(100); + let post_info = post_info_from_weight(33); + let prev_balance = Balances::free_balance(2); + let len = 10; + let tip = 5; + + >::put(Multiplier::saturating_from_rational(5, 4)); + + let pre = ChargeTransactionPayment::::from(tip) + .pre_dispatch(&2, CALL, &info, len) + .unwrap(); + + ChargeTransactionPayment::::post_dispatch( + pre, + &info, + &post_info, + len, + &Ok(()), + ) .unwrap(); - ChargeTransactionPayment:: - ::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .unwrap(); - - let refund_based_fee = prev_balance - Balances::free_balance(2); - let actual_fee = Module:: - ::compute_actual_fee(len as u32, &info, &post_info, tip); + let refund_based_fee = prev_balance - Balances::free_balance(2); + let actual_fee = + Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); - // 33 weight, 10 length, 7 base, 5 tip - assert_eq!(actual_fee, 7 + 10 + (33 * 5 / 4) + 5); - assert_eq!(refund_based_fee, actual_fee); - }); + // 33 weight, 10 length, 7 base, 5 tip + assert_eq!(actual_fee, 7 + 10 + (33 * 5 / 4) + 5); + assert_eq!(refund_based_fee, actual_fee); + }); } #[test] @@ -1247,31 +1339,35 @@ mod tests { .balance_factor(10) .base_weight(7) .build() - .execute_with(|| - { - let info = info_from_weight(100); - let post_info = post_info_from_pays(Pays::No); - let prev_balance = Balances::free_balance(2); - let len = 10; - let tip = 5; - - NextFeeMultiplier::put(Multiplier::saturating_from_rational(5, 4)); - - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + .execute_with(|| { + let info = info_from_weight(100); + let post_info = post_info_from_pays(Pays::No); + let prev_balance = Balances::free_balance(2); + let len = 10; + let tip = 5; + + >::put(Multiplier::saturating_from_rational(5, 4)); + + let pre = ChargeTransactionPayment::::from(tip) + .pre_dispatch(&2, CALL, &info, len) + .unwrap(); + + ChargeTransactionPayment::::post_dispatch( + pre, + &info, + &post_info, + len, + &Ok(()), + ) .unwrap(); - ChargeTransactionPayment:: - ::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .unwrap(); + let refund_based_fee = prev_balance - Balances::free_balance(2); + let actual_fee = + Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); - let refund_based_fee = prev_balance - Balances::free_balance(2); - let actual_fee = Module:: - ::compute_actual_fee(len as u32, &info, &post_info, tip); - - // Only 5 tip is paid - assert_eq!(actual_fee, 5); - assert_eq!(refund_based_fee, actual_fee); - }); + // Only 5 tip is paid + assert_eq!(actual_fee, 5); + assert_eq!(refund_based_fee, actual_fee); + }); } } diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs new file mode 100644 index 0000000000000..58e6ef63109a3 --- /dev/null +++ b/frame/transaction-payment/src/payment.rs @@ -0,0 +1,149 @@ +/// ! Traits and default implementation for paying transaction fees. +use crate::Config; + +use codec::FullCodec; +use sp_runtime::{ + traits::{ + AtLeast32BitUnsigned, DispatchInfoOf, MaybeSerializeDeserialize, PostDispatchInfoOf, + Saturating, Zero, + }, + transaction_validity::InvalidTransaction, +}; +use sp_std::{fmt::Debug, marker::PhantomData}; + +use frame_support::{ + traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, WithdrawReasons}, + unsigned::TransactionValidityError, +}; + +type NegativeImbalanceOf = + ::AccountId>>::NegativeImbalance; + +/// Handle withdrawing, refunding and depositing of transaction fees. +pub trait OnChargeTransaction { + /// The underlying integer type in which fees are calculated. + type Balance: AtLeast32BitUnsigned + + FullCodec + + Copy + + MaybeSerializeDeserialize + + Debug + + Default + + scale_info::TypeInfo; + type LiquidityInfo: Default; + + /// Before the transaction is executed the payment of the transaction fees + /// need to be secured. + /// + /// Note: The `fee` already includes the `tip`. + fn withdraw_fee( + who: &T::AccountId, + call: &T::Call, + dispatch_info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result; + + /// After the transaction was executed the actual fee can be calculated. + /// This function should refund any overpaid fees and optionally deposit + /// the corrected amount. + /// + /// Note: The `fee` already includes the `tip`. + fn correct_and_deposit_fee( + who: &T::AccountId, + dispatch_info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + corrected_fee: Self::Balance, + tip: Self::Balance, + already_withdrawn: Self::LiquidityInfo, + ) -> Result<(), TransactionValidityError>; +} + +/// Implements the transaction payment for a pallet implementing the `Currency` +/// trait (eg. the pallet_balances) using an unbalance handler (implementing +/// `OnUnbalanced`). +/// +/// The unbalance handler is given 2 unbalanceds in [`OnUnbalanced::on_unbalanceds`]: fee and +/// then tip. +pub struct CurrencyAdapter(PhantomData<(C, OU)>); + +/// Default implementation for a Currency and an OnUnbalanced handler. +/// +/// The unbalance handler is given 2 unbalanceds in [`OnUnbalanced::on_unbalanceds`]: fee and +/// then tip. +impl OnChargeTransaction for CurrencyAdapter +where + T: Config, + T::TransactionByteFee: Get<::AccountId>>::Balance>, + C: Currency<::AccountId>, + C::PositiveImbalance: Imbalance< + ::AccountId>>::Balance, + Opposite = C::NegativeImbalance, + >, + C::NegativeImbalance: Imbalance< + ::AccountId>>::Balance, + Opposite = C::PositiveImbalance, + >, + OU: OnUnbalanced>, +{ + type LiquidityInfo = Option>; + type Balance = ::AccountId>>::Balance; + + /// Withdraw the predicted fee from the transaction origin. + /// + /// Note: The `fee` already includes the `tip`. + fn withdraw_fee( + who: &T::AccountId, + _call: &T::Call, + _info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result { + if fee.is_zero() { + return Ok(None) + } + + let withdraw_reason = if tip.is_zero() { + WithdrawReasons::TRANSACTION_PAYMENT + } else { + WithdrawReasons::TRANSACTION_PAYMENT | WithdrawReasons::TIP + }; + + match C::withdraw(who, fee, withdraw_reason, ExistenceRequirement::KeepAlive) { + Ok(imbalance) => Ok(Some(imbalance)), + Err(_) => Err(InvalidTransaction::Payment.into()), + } + } + + /// Hand the fee and the tip over to the `[OnUnbalanced]` implementation. + /// Since the predicted fee might have been too high, parts of the fee may + /// be refunded. + /// + /// Note: The `corrected_fee` already includes the `tip`. + fn correct_and_deposit_fee( + who: &T::AccountId, + _dispatch_info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + corrected_fee: Self::Balance, + tip: Self::Balance, + already_withdrawn: Self::LiquidityInfo, + ) -> Result<(), TransactionValidityError> { + if let Some(paid) = already_withdrawn { + // Calculate how much refund we should return + let refund_amount = paid.peek().saturating_sub(corrected_fee); + // refund to the the account that paid the fees. If this fails, the + // account might have dropped below the existential balance. In + // that case we don't refund anything. + let refund_imbalance = C::deposit_into_existing(&who, refund_amount) + .unwrap_or_else(|_| C::PositiveImbalance::zero()); + // merge the imbalance caused by paying the fees and refunding parts of it again. + let adjusted_paid = paid + .offset(refund_imbalance) + .same() + .map_err(|_| TransactionValidityError::Invalid(InvalidTransaction::Payment))?; + // Call someone else to handle the imbalance (fee and tip separately) + let (tip, fee) = adjusted_paid.split(tip); + OU::on_unbalanceds(Some(fee).into_iter().chain(Some(tip))); + } + Ok(()) + } +} diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs new file mode 100644 index 0000000000000..3ce5bcf890bd1 --- /dev/null +++ b/frame/transaction-payment/src/types.rs @@ -0,0 +1,168 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types for transaction-payment RPC. + +use codec::{Decode, Encode}; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; + +use sp_runtime::traits::{AtLeast32BitUnsigned, Zero}; +use sp_std::prelude::*; + +use frame_support::weights::{DispatchClass, Weight}; + +/// The base fee and adjusted weight and length fees constitute the _inclusion fee_. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub struct InclusionFee { + /// This is the minimum amount a user pays for a transaction. It is declared + /// as a base _weight_ in the runtime and converted to a fee using `WeightToFee`. + pub base_fee: Balance, + /// The length fee, the amount paid for the encoded length (in bytes) of the transaction. + pub len_fee: Balance, + /// + /// - `targeted_fee_adjustment`: This is a multiplier that can tune the final fee based on the + /// congestion of the network. + /// - `weight_fee`: This amount is computed based on the weight of the transaction. Weight + /// accounts for the execution time of a transaction. + /// + /// adjusted_weight_fee = targeted_fee_adjustment * weight_fee + pub adjusted_weight_fee: Balance, +} + +impl InclusionFee { + /// Returns the total of inclusion fee. + /// + /// ```ignore + /// inclusion_fee = base_fee + len_fee + adjusted_weight_fee + /// ``` + pub fn inclusion_fee(&self) -> Balance { + self.base_fee + .saturating_add(self.len_fee) + .saturating_add(self.adjusted_weight_fee) + } +} + +/// The `FeeDetails` is composed of: +/// - (Optional) `inclusion_fee`: Only the `Pays::Yes` transaction can have the inclusion fee. +/// - `tip`: If included in the transaction, the tip will be added on top. Only signed +/// transactions can have a tip. +#[derive(Encode, Decode, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub struct FeeDetails { + /// The minimum fee for a transaction to be included in a block. + pub inclusion_fee: Option>, + // Do not serialize and deserialize `tip` as we actually can not pass any tip to the RPC. + #[cfg_attr(feature = "std", serde(skip))] + pub tip: Balance, +} + +impl FeeDetails { + /// Returns the final fee. + /// + /// ```ignore + /// final_fee = inclusion_fee + tip; + /// ``` + pub fn final_fee(&self) -> Balance { + self.inclusion_fee + .as_ref() + .map(|i| i.inclusion_fee()) + .unwrap_or_else(|| Zero::zero()) + .saturating_add(self.tip) + } +} + +/// Information related to a dispatchable's class, weight, and fee that can be queried from the +/// runtime. +#[derive(Eq, PartialEq, Encode, Decode, Default)] +#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +#[cfg_attr(feature = "std", serde(bound(serialize = "Balance: std::fmt::Display")))] +#[cfg_attr(feature = "std", serde(bound(deserialize = "Balance: std::str::FromStr")))] +pub struct RuntimeDispatchInfo { + /// Weight of this dispatch. + pub weight: Weight, + /// Class of this dispatch. + pub class: DispatchClass, + /// The inclusion fee of this dispatch. + /// + /// This does not include a tip or anything else that + /// depends on the signature (i.e. depends on a `SignedExtension`). + #[cfg_attr(feature = "std", serde(with = "serde_balance"))] + pub partial_fee: Balance, +} + +#[cfg(feature = "std")] +mod serde_balance { + use serde::{Deserialize, Deserializer, Serializer}; + + pub fn serialize( + t: &T, + serializer: S, + ) -> Result { + serializer.serialize_str(&t.to_string()) + } + + pub fn deserialize<'de, D: Deserializer<'de>, T: std::str::FromStr>( + deserializer: D, + ) -> Result { + let s = String::deserialize(deserializer)?; + s.parse::().map_err(|_| serde::de::Error::custom("Parse from string failed")) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_serialize_and_deserialize_properly_with_string() { + let info = RuntimeDispatchInfo { + weight: 5, + class: DispatchClass::Normal, + partial_fee: 1_000_000_u64, + }; + + let json_str = r#"{"weight":5,"class":"normal","partialFee":"1000000"}"#; + + assert_eq!(serde_json::to_string(&info).unwrap(), json_str); + assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); + + // should not panic + serde_json::to_value(&info).unwrap(); + } + + #[test] + fn should_serialize_and_deserialize_properly_large_value() { + let info = RuntimeDispatchInfo { + weight: 5, + class: DispatchClass::Normal, + partial_fee: u128::max_value(), + }; + + let json_str = r#"{"weight":5,"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; + + assert_eq!(serde_json::to_string(&info).unwrap(), json_str); + assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); + + // should not panic + serde_json::to_value(&info).unwrap(); + } +} diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml new file mode 100644 index 0000000000000..a4ebd5cfbc876 --- /dev/null +++ b/frame/transaction-storage/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "pallet-transaction-storage" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Unlicense" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Storage chain pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.126", optional = true } +hex-literal = { version = "0.3.1", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../balances" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } +sp-transaction-storage-proof = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-storage-proof" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +sp-transaction-storage-proof = { version = "4.0.0-dev", default-features = true, path = "../../primitives/transaction-storage-proof" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } + +[features] +default = ["std"] +runtime-benchmarks = ["frame-benchmarking", "hex-literal"] +std = [ + "serde", + "codec/std", + "scale-info/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "sp-io/std", + "sp-std/std", + "sp-inherents/std", +] diff --git a/frame/transaction-storage/README.md b/frame/transaction-storage/README.md new file mode 100644 index 0000000000000..0ed3ba279c2a5 --- /dev/null +++ b/frame/transaction-storage/README.md @@ -0,0 +1,82 @@ +# Transaction Storage Pallet + +Indexes transactions and manages storage proofs. + +Allows storing arbitrary data on the chain. Data is automatically removed after `StoragePeriod` blocks, unless the storage is renewed. +Validators must submit proof of storing a random chunk of data for block `N - StoragePeriod` when producing block `N`. + +# Running a chain + +The following describes how to set up a new storage chain. + +Start with generating a chain spec. + +```bash +cargo run --release -- build-spec --chain=local > sc_init.json +``` + +Edit the json chain spec file to customise the chain. The storage chain genesis params are configured in the `transactionStorage` section. +Note that `storagePeriod` is specified in blocks and changing it also requires code changes at the moment. + +Build a raw spec from the init spec. + +```bash +cargo run --release build-spec --chain=sc_init.json --raw > sc.json +``` + +Run a few validator nodes. + +```bash +cargo run --release -- --chain=sc.json -d /tmp/alice --storage-chain --keep-blocks=100800 --ipfs-server --validator --alice +cargo run --release -- --chain=sc.json -d /tmp/bob --storage-chain --keep-blocks=100800 --ipfs-server --validator --bob +``` + +`--storage-chain` enables transaction indexing. +`--keep-blocks=100800` enables block pruning. The value here should be greater or equal than the storage period. +`--ipfs-server` enables serving stored content over IPFS. + +Once the network is started, any other joining nodes need to sync with `--sync=fast`. Regular sync will fail because block pruning removes old blocks. The chain does not keep full block history. + +```bash +cargo run --release -- --chain=sc.json -d /tmp/charlie --storage-chain --keep-blocks=100800 --ipfs-server --validator --charlie --sync=fast +``` + +# Making transactions + +To store data use the `transactionStorage.store` extrinsic. And IPFS CID can be generated from the Blake2-256 hash of the data. + +```js +const util_crypto = require('@polkadot/util-crypto'); +const keyring_api = require('@polkadot/keyring'); +const polkadot_api = require('@polkadot/api'); +const fs = require('fs'); +const multihash = require('multihashes'); +const CID = require('cids') + +const wsProvider = new polkadot_api.WsProvider(); +const api = await polkadot_api.ApiPromise.create({ provider: wsProvider }); + +const keyring = new keyring_api.Keyring({ type: "sr25519" }); +const alice = keyring.addFromUri("//Alice"); + +const file = fs.readFileSync('cute_kitten.jpeg'); +const hash = util_crypto.blake2AsU8a(file) +const encoded_hash = multihash.encode(hash, 'blake2b-256'); + +const cid = new CID(1, 'blake2b-256', encoded_hash) +console.log(cid.toString()); + +const txHash = await api.tx.transactionStorage.store('0x' + file.toString('hex')).signAndSend(alice); +``` +Data can be queried over IPFS + +```bash +ipfs swarm connect +ipfs block get /ipfs/ > kitten.jpeg +``` + +To renew data and prevent it from being disposed after the storage period, use `transactionStorage.renew(block, index)` +where `block` is the block number of the previous store or renew transction, and index is the index of that transaction in the block. + + +License: Apache-2.0 diff --git a/frame/transaction-storage/src/benchmarking.rs b/frame/transaction-storage/src/benchmarking.rs new file mode 100644 index 0000000000000..d5da6a42b46f0 --- /dev/null +++ b/frame/transaction-storage/src/benchmarking.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for transaction-storage Pallet + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::traits::{Currency, OnFinalize, OnInitialize}; +use frame_system::{EventRecord, Pallet as System, RawOrigin}; +use sp_runtime::traits::{Bounded, One, Zero}; +use sp_std::*; +use sp_transaction_storage_proof::TransactionStorageProof; + +use crate::Pallet as TransactionStorage; + +const PROOF: &[u8] = &hex_literal::hex!( + " + 0104000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 000000000000000000000000000000014cd0780ffff80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe8 + 7d12a3662c4c0080e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb + 13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2 + f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f + 1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f + 3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a47 + 8e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cf + f93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e31 + 6a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f + 53cff93f3ca2f3dfe87d12a3662c4c80e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4c8 + 0e316a478e2f1fcb13cf22fd0b2dbb54a6f53cff93f3ca2f3dfe87d12a3662c4cbd05807777809a5d7a720ce5f9d9a012 + fbf25e92c30e732dadba8f312b05e02976313ea64d9f807d43bcbf8a3dc2f6b9e957d129e610c06d411e11743062dc1cf + 3ac289390ae4c8008592aa2d915f52941036afbe72bac4ebe7ce186c4ddc53f118e0ddd4decd8cc809a5d7a720ce5f9d9 + a012fbf25e92c30e732dadba8f312b05e02976313ea64d9f807d43bcbf8a3dc2f6b9e957d129e610c06d411e11743062d + c1cf3ac289390ae4c00809a5d7a720ce5f9d9a012fbf25e92c30e732dadba8f312b05e02976313ea64d9f807d43bcbf8a + 3dc2f6b9e957d129e610c06d411e11743062dc1cf3ac289390ae4c8008592aa2d915f52941036afbe72bac4ebe7ce186c + 4ddc53f118e0ddd4decd8cc809a5d7a720ce5f9d9a012fbf25e92c30e732dadba8f312b05e02976313ea64d9f807d43bc + bf8a3dc2f6b9e957d129e610c06d411e11743062dc1cf3ac289390ae4c8008592aa2d915f52941036afbe72bac4ebe7ce + 186c4ddc53f118e0ddd4decd8cccd0780ffff8081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb0 + 3bdb31008081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253 + 515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa139 + 8e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5 + f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3a + a1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2b + a8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f32 + 2d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa + 9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f0 + 2f322d3aa1398e0cb03bdb318081b825bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb318081b82 + 5bfa9b2ba8f5f253515e7db09eb1ad3d4f02f322d3aa1398e0cb03bdb31cd0780ffff80b4f23ac50c8e67d9b280f2b31a + 5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd1885 + 44c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2 + b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd + 188544c5f9b0080b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9 + b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84 + d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e + 67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977aca + ac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac5 + 0c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b89297 + 7acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b104401 + 0000 +" +); + +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +fn assert_last_event(generic_event: ::Event) { + let events = System::::events(); + let system_event: ::Event = generic_event.into(); + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +pub fn run_to_block(n: T::BlockNumber) { + while frame_system::Pallet::::block_number() < n { + crate::Pallet::::on_finalize(frame_system::Pallet::::block_number()); + frame_system::Pallet::::on_finalize(frame_system::Pallet::::block_number()); + frame_system::Pallet::::set_block_number( + frame_system::Pallet::::block_number() + One::one(), + ); + frame_system::Pallet::::on_initialize(frame_system::Pallet::::block_number()); + crate::Pallet::::on_initialize(frame_system::Pallet::::block_number()); + } +} + +benchmarks! { + store { + let l in 1 .. MaxTransactionSize::::get(); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]) + verify { + assert!(!BlockTransactions::::get().is_empty()); + assert_last_event::(Event::Stored(0).into()); + } + + renew { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; MaxTransactionSize::::get() as usize], + )?; + run_to_block::(1u32.into()); + }: _(RawOrigin::Signed(caller.clone()), T::BlockNumber::zero(), 0) + verify { + assert_last_event::(Event::Renewed(0).into()); + } + + check_proof_max { + run_to_block::(1u32.into()); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + for _ in 0 .. MaxBlockTransactions::::get() { + TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; MaxTransactionSize::::get() as usize], + )?; + } + run_to_block::(StoragePeriod::::get() + T::BlockNumber::one()); + let random_hash = [0u8]; + let mut encoded_proof = PROOF; + let proof = TransactionStorageProof::decode(&mut encoded_proof).unwrap(); + }: check_proof(RawOrigin::None, proof) + verify { + assert_last_event::(Event::ProofChecked.into()); + } +} + +impl_benchmark_test_suite!(TransactionStorage, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs new file mode 100644 index 0000000000000..2fe3c04e0229f --- /dev/null +++ b/frame/transaction-storage/src/lib.rs @@ -0,0 +1,437 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Transaction storage pallet. Indexes transactions and manages storage proofs. + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +pub mod weights; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{Dispatchable, GetDispatchInfo}, + traits::{Currency, OnUnbalanced, ReservableCurrency}, +}; +use sp_runtime::traits::{BlakeTwo256, Hash, One, Saturating, Zero}; +use sp_std::{prelude::*, result}; +use sp_transaction_storage_proof::{ + encode_index, random_chunk, InherentError, TransactionStorageProof, CHUNK_SIZE, + DEFAULT_STORAGE_PERIOD, INHERENT_IDENTIFIER, +}; + +/// A type alias for the balance type from this pallet's point of view. +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; +pub use weights::WeightInfo; + +/// Maximum bytes that can be stored in one transaction. +// Setting higher limit also requires raising the allocator limit. +pub const DEFAULT_MAX_TRANSACTION_SIZE: u32 = 8 * 1024 * 1024; +pub const DEFAULT_MAX_BLOCK_TRANSACTIONS: u32 = 512; + +/// State data for a stored transaction. +#[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, Eq, scale_info::TypeInfo)] +pub struct TransactionInfo { + /// Chunk trie root. + chunk_root: ::Output, + /// Plain hash of indexed data. + content_hash: ::Output, + /// Size of indexed data in bytes. + size: u32, + /// Total number of chunks added in the block with this transaction. This + /// is used find transaction info by block chunk index using binary search. + block_chunks: u32, +} + +fn num_chunks(bytes: u32) -> u32 { + ((bytes as u64 + CHUNK_SIZE as u64 - 1) / CHUNK_SIZE as u64) as u32 +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + /// A dispatchable call. + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From>; + /// The currency trait. + type Currency: ReservableCurrency; + /// Handler for the unbalanced decrease when fees are burned. + type FeeDestination: OnUnbalanced>; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::error] + pub enum Error { + /// Insufficient account balance. + InsufficientFunds, + /// Invalid configuration. + NotConfigured, + /// Renewed extrinsic is not found. + RenewedNotFound, + /// Attempting to store empty transaction + EmptyTransaction, + /// Proof was not expected in this block. + UnexpectedProof, + /// Proof failed verification. + InvalidProof, + /// Missing storage proof. + MissingProof, + /// Unable to verify proof becasue state data is missing. + MissingStateData, + /// Double proof check in the block. + DoubleCheck, + /// Storage proof was not checked in the block. + ProofNotChecked, + /// Transaction is too large. + TransactionTooLarge, + /// Too many transactions in the block. + TooManyTransactions, + /// Attempted to call `store` outside of block execution. + BadContext, + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: T::BlockNumber) -> Weight { + // Drop obsolete roots. The proof for `obsolete` will be checked later + // in this block, so we drop `obsolete` - 1. + let period = >::get(); + let obsolete = n.saturating_sub(period.saturating_add(One::one())); + if obsolete > Zero::zero() { + >::remove(obsolete); + >::remove(obsolete); + } + // 2 writes in `on_initialize` and 2 writes + 2 reads in `on_finalize` + T::DbWeight::get().reads_writes(2, 4) + } + + fn on_finalize(n: T::BlockNumber) { + assert!( + >::take() || { + // Proof is not required for early or empty blocks. + let number = >::block_number(); + let period = >::get(); + let target_number = number.saturating_sub(period); + target_number.is_zero() || >::get(target_number) == 0 + }, + "Storage proof must be checked once in the block" + ); + // Insert new transactions + let transactions = >::take(); + let total_chunks = transactions.last().map_or(0, |t| t.block_chunks); + if total_chunks != 0 { + >::insert(n, total_chunks); + >::insert(n, transactions); + } + } + } + + #[pallet::call] + impl Pallet { + /// Index and store data on chain. Minimum data size is 1 bytes, maximum is + /// `MaxTransactionSize`. Data will be removed after `STORAGE_PERIOD` blocks, unless `renew` + /// is called. # + /// - n*log(n) of data size, as all data is pushed to an in-memory trie. + /// Additionally contains a DB write. + /// # + #[pallet::weight(T::WeightInfo::store(data.len() as u32))] + pub fn store(origin: OriginFor, data: Vec) -> DispatchResult { + ensure!(data.len() > 0, Error::::EmptyTransaction); + ensure!( + data.len() <= MaxTransactionSize::::get() as usize, + Error::::TransactionTooLarge + ); + let sender = ensure_signed(origin)?; + Self::apply_fee(sender, data.len() as u32)?; + + // Chunk data and compute storage root + let chunk_count = num_chunks(data.len() as u32); + let chunks = data.chunks(CHUNK_SIZE).map(|c| c.to_vec()).collect(); + let root = sp_io::trie::blake2_256_ordered_root(chunks); + + let content_hash = sp_io::hashing::blake2_256(&data); + let extrinsic_index = >::extrinsic_index() + .ok_or_else(|| Error::::BadContext)?; + sp_io::transaction_index::index(extrinsic_index, data.len() as u32, content_hash); + + let mut index = 0; + >::mutate(|transactions| { + if transactions.len() + 1 > MaxBlockTransactions::::get() as usize { + return Err(Error::::TooManyTransactions) + } + let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunk_count; + index = transactions.len() as u32; + transactions.push(TransactionInfo { + chunk_root: root, + size: data.len() as u32, + content_hash: content_hash.into(), + block_chunks: total_chunks, + }); + Ok(()) + })?; + Self::deposit_event(Event::Stored(index)); + Ok(()) + } + + /// Renew previously stored data. Parameters are the block number that contains + /// previous `store` or `renew` call and transaction index within that block. + /// Transaction index is emitted in the `Stored` or `Renewed` event. + /// Applies same fees as `store`. + /// # + /// - Constant. + /// # + #[pallet::weight(T::WeightInfo::renew())] + pub fn renew( + origin: OriginFor, + block: T::BlockNumber, + index: u32, + ) -> DispatchResultWithPostInfo { + let sender = ensure_signed(origin)?; + let transactions = >::get(block).ok_or(Error::::RenewedNotFound)?; + let info = transactions.get(index as usize).ok_or(Error::::RenewedNotFound)?; + Self::apply_fee(sender, info.size)?; + + let extrinsic_index = >::extrinsic_index().unwrap(); + sp_io::transaction_index::renew(extrinsic_index, info.content_hash.into()); + + let mut index = 0; + >::mutate(|transactions| { + if transactions.len() + 1 > MaxBlockTransactions::::get() as usize { + return Err(Error::::TooManyTransactions) + } + let chunks = num_chunks(info.size); + let total_chunks = transactions.last().map_or(0, |t| t.block_chunks) + chunks; + index = transactions.len() as u32; + transactions.push(TransactionInfo { + chunk_root: info.chunk_root, + size: info.size, + content_hash: info.content_hash, + block_chunks: total_chunks, + }); + Ok(()) + })?; + Self::deposit_event(Event::Renewed(index)); + Ok(().into()) + } + + /// Check storage proof for block number `block_number() - StoragePeriod`. + /// If such block does not exist the proof is expected to be `None`. + /// # + /// - Linear w.r.t the number of indexed transactions in the proved block for random + /// probing. + /// There's a DB read for each transaction. + /// Here we assume a maximum of 100 probed transactions. + /// # + #[pallet::weight((T::WeightInfo::check_proof_max(), DispatchClass::Mandatory))] + pub fn check_proof( + origin: OriginFor, + proof: TransactionStorageProof, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + ensure!(!ProofChecked::::get(), Error::::DoubleCheck); + let number = >::block_number(); + let period = >::get(); + let target_number = number.saturating_sub(period); + ensure!(!target_number.is_zero(), Error::::UnexpectedProof); + let total_chunks = >::get(target_number); + ensure!(total_chunks != 0, Error::::UnexpectedProof); + let parent_hash = >::parent_hash(); + let selected_chunk_index = random_chunk(parent_hash.as_ref(), total_chunks); + let (info, chunk_index) = match >::get(target_number) { + Some(infos) => { + let index = match infos + .binary_search_by_key(&selected_chunk_index, |info| info.block_chunks) + { + Ok(index) => index, + Err(index) => index, + }; + let info = + infos.get(index).ok_or_else(|| Error::::MissingStateData)?.clone(); + let chunks = num_chunks(info.size); + let prev_chunks = info.block_chunks - chunks; + (info, selected_chunk_index - prev_chunks) + }, + None => Err(Error::::MissingStateData)?, + }; + ensure!( + sp_io::trie::blake2_256_verify_proof( + info.chunk_root, + &proof.proof, + &encode_index(chunk_index), + &proof.chunk, + ), + Error::::InvalidProof + ); + ProofChecked::::put(true); + Self::deposit_event(Event::ProofChecked); + Ok(().into()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Stored data under specified index. + Stored(u32), + /// Renewed data under specified index. + Renewed(u32), + /// Storage proof was successfully checked. + ProofChecked, + } + + /// Collection of transaction metadata by block number. + #[pallet::storage] + #[pallet::getter(fn transaction_roots)] + pub(super) type Transactions = + StorageMap<_, Blake2_128Concat, T::BlockNumber, Vec, OptionQuery>; + + /// Count indexed chunks for each block. + #[pallet::storage] + pub(super) type ChunkCount = + StorageMap<_, Blake2_128Concat, T::BlockNumber, u32, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn byte_fee)] + /// Storage fee per byte. + pub(super) type ByteFee = StorageValue<_, BalanceOf>; + + #[pallet::storage] + #[pallet::getter(fn entry_fee)] + /// Storage fee per transaction. + pub(super) type EntryFee = StorageValue<_, BalanceOf>; + + #[pallet::storage] + #[pallet::getter(fn max_transaction_size)] + /// Maximum data set in a single transaction in bytes. + pub(super) type MaxTransactionSize = StorageValue<_, u32, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn max_block_transactions)] + /// Maximum number of indexed transactions in the block. + pub(super) type MaxBlockTransactions = StorageValue<_, u32, ValueQuery>; + + /// Storage period for data in blocks. Should match `sp_storage_proof::DEFAULT_STORAGE_PERIOD` + /// for block authoring. + #[pallet::storage] + pub(super) type StoragePeriod = StorageValue<_, T::BlockNumber, ValueQuery>; + + // Intermediates + #[pallet::storage] + pub(super) type BlockTransactions = + StorageValue<_, Vec, ValueQuery>; + + /// Was the proof checked in this block? + #[pallet::storage] + pub(super) type ProofChecked = StorageValue<_, bool, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub byte_fee: BalanceOf, + pub entry_fee: BalanceOf, + pub storage_period: T::BlockNumber, + pub max_block_transactions: u32, + pub max_transaction_size: u32, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + byte_fee: 10u32.into(), + entry_fee: 1000u32.into(), + storage_period: DEFAULT_STORAGE_PERIOD.into(), + max_block_transactions: DEFAULT_MAX_BLOCK_TRANSACTIONS, + max_transaction_size: DEFAULT_MAX_TRANSACTION_SIZE, + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::put(&self.byte_fee); + >::put(&self.entry_fee); + >::put(&self.max_transaction_size); + >::put(&self.max_block_transactions); + >::put(&self.storage_period); + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let proof = data + .get_data::(&Self::INHERENT_IDENTIFIER) + .unwrap_or(None); + proof.map(|proof| Call::check_proof { proof }) + } + + fn check_inherent( + _call: &Self::Call, + _data: &InherentData, + ) -> result::Result<(), Self::Error> { + Ok(()) + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::check_proof { .. }) + } + } + + impl Pallet { + fn apply_fee(sender: T::AccountId, size: u32) -> DispatchResult { + let byte_fee = ByteFee::::get().ok_or(Error::::NotConfigured)?; + let entry_fee = EntryFee::::get().ok_or(Error::::NotConfigured)?; + let fee = byte_fee.saturating_mul(size.into()).saturating_add(entry_fee); + ensure!(T::Currency::can_slash(&sender, fee), Error::::InsufficientFunds); + let (credit, _) = T::Currency::slash(&sender, fee); + T::FeeDestination::on_unbalanced(credit); + Ok(()) + } + } +} diff --git a/frame/transaction-storage/src/mock.rs b/frame/transaction-storage/src/mock.rs new file mode 100644 index 0000000000000..38d14129d76e2 --- /dev/null +++ b/frame/transaction-storage/src/mock.rs @@ -0,0 +1,136 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for transaction-storage pallet. + +use crate as pallet_transaction_storage; +use crate::TransactionStorageProof; +use frame_support::{ + parameter_types, + traits::{OnFinalize, OnInitialize}, +}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +pub type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, + TransactionStorage: pallet_transaction_storage::{ + Pallet, Call, Storage, Config, Inherent, Event + }, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = (); +} + +impl pallet_transaction_storage::Config for Test { + type Event = Event; + type Call = Call; + type Currency = Balances; + type FeeDestination = (); + type WeightInfo = (); +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = GenesisConfig { + system: Default::default(), + balances: pallet_balances::GenesisConfig:: { + balances: vec![(1, 1000000000), (2, 100), (3, 100), (4, 100)], + }, + transaction_storage: pallet_transaction_storage::GenesisConfig:: { + storage_period: 10, + byte_fee: 2, + entry_fee: 200, + max_block_transactions: crate::DEFAULT_MAX_BLOCK_TRANSACTIONS, + max_transaction_size: crate::DEFAULT_MAX_TRANSACTION_SIZE, + }, + } + .build_storage() + .unwrap(); + t.into() +} + +pub fn run_to_block(n: u64, f: impl Fn() -> Option) { + while System::block_number() < n { + if let Some(proof) = f() { + TransactionStorage::check_proof(Origin::none(), proof).unwrap(); + } + TransactionStorage::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + TransactionStorage::on_initialize(System::block_number()); + } +} diff --git a/frame/transaction-storage/src/tests.rs b/frame/transaction-storage/src/tests.rs new file mode 100644 index 0000000000000..c443f51ffb50f --- /dev/null +++ b/frame/transaction-storage/src/tests.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for transction-storage pallet. + +use super::{Pallet as TransactionStorage, *}; +use crate::mock::*; +use frame_support::{assert_noop, assert_ok}; +use frame_system::RawOrigin; +use sp_transaction_storage_proof::registration::build_proof; + +const MAX_DATA_SIZE: u32 = DEFAULT_MAX_TRANSACTION_SIZE; + +#[test] +fn discards_data() { + new_test_ext().execute_with(|| { + run_to_block(1, || None); + let caller = 1; + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000 as usize] + )); + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000 as usize] + )); + let proof_provider = || { + let block_num = >::block_number(); + if block_num == 11 { + let parent_hash = >::parent_hash(); + Some( + build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000], vec![0u8; 2000]]) + .unwrap(), + ) + } else { + None + } + }; + run_to_block(11, proof_provider); + assert!(Transactions::::get(1).is_some()); + let transctions = Transactions::::get(1).unwrap(); + assert_eq!(transctions.len(), 2); + assert_eq!(ChunkCount::::get(1), 16); + run_to_block(12, proof_provider); + assert!(Transactions::::get(1).is_none()); + assert_eq!(ChunkCount::::get(1), 0); + }); +} + +#[test] +fn burns_fee() { + new_test_ext().execute_with(|| { + run_to_block(1, || None); + let caller = 1; + assert_noop!( + TransactionStorage::::store( + RawOrigin::Signed(5).into(), + vec![0u8; 2000 as usize] + ), + Error::::InsufficientFunds, + ); + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000 as usize] + )); + assert_eq!(Balances::free_balance(1), 1_000_000_000 - 2000 * 2 - 200); + }); +} + +#[test] +fn checks_proof() { + new_test_ext().execute_with(|| { + run_to_block(1, || None); + let caller = 1; + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; MAX_DATA_SIZE as usize] + )); + run_to_block(10, || None); + let parent_hash = >::parent_hash(); + let proof = + build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); + assert_noop!( + TransactionStorage::::check_proof(Origin::none(), proof,), + Error::::UnexpectedProof, + ); + run_to_block(11, || None); + let parent_hash = >::parent_hash(); + + let invalid_proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; 1000]]).unwrap(); + assert_noop!( + TransactionStorage::::check_proof(Origin::none(), invalid_proof,), + Error::::InvalidProof, + ); + + let proof = + build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); + assert_ok!(TransactionStorage::::check_proof(Origin::none(), proof)); + }); +} + +#[test] +fn renews_data() { + new_test_ext().execute_with(|| { + run_to_block(1, || None); + let caller = 1; + assert_ok!(TransactionStorage::::store( + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000] + )); + let info = BlockTransactions::::get().last().unwrap().clone(); + run_to_block(6, || None); + assert_ok!(TransactionStorage::::renew( + RawOrigin::Signed(caller.clone()).into(), + 1, // block + 0, // transaction + )); + assert_eq!(Balances::free_balance(1), 1_000_000_000 - 4000 * 2 - 200 * 2); + let proof_provider = || { + let block_num = >::block_number(); + if block_num == 11 || block_num == 16 { + let parent_hash = >::parent_hash(); + Some(build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000]]).unwrap()) + } else { + None + } + }; + run_to_block(16, proof_provider); + assert!(Transactions::::get(1).is_none()); + assert_eq!(Transactions::::get(6).unwrap().get(0), Some(info).as_ref()); + run_to_block(17, proof_provider); + assert!(Transactions::::get(6).is_none()); + }); +} diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs new file mode 100644 index 0000000000000..104b18d3f92ce --- /dev/null +++ b/frame/transaction-storage/src/weights.rs @@ -0,0 +1,128 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_transaction_storage +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_transaction_storage +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/transaction-storage/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_transaction_storage. +pub trait WeightInfo { + fn store(l: u32, ) -> Weight; + fn renew() -> Weight; + fn check_proof_max() -> Weight; +} + +/// Weights for pallet_transaction_storage using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: TransactionStorage MaxTransactionSize (r:1 w:0) + // Storage: TransactionStorage ByteFee (r:1 w:0) + // Storage: TransactionStorage EntryFee (r:1 w:0) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + // Storage: TransactionStorage BlockTransactions (r:1 w:1) + // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) + fn store(l: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((8_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: TransactionStorage Transactions (r:1 w:0) + // Storage: TransactionStorage ByteFee (r:1 w:0) + // Storage: TransactionStorage EntryFee (r:1 w:0) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + // Storage: TransactionStorage BlockTransactions (r:1 w:1) + // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) + fn renew() -> Weight { + (67_532_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: TransactionStorage ProofChecked (r:1 w:1) + // Storage: TransactionStorage StoragePeriod (r:1 w:0) + // Storage: TransactionStorage ChunkCount (r:1 w:0) + // Storage: System ParentHash (r:1 w:0) + // Storage: TransactionStorage Transactions (r:1 w:0) + fn check_proof_max() -> Weight { + (182_886_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: TransactionStorage MaxTransactionSize (r:1 w:0) + // Storage: TransactionStorage ByteFee (r:1 w:0) + // Storage: TransactionStorage EntryFee (r:1 w:0) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + // Storage: TransactionStorage BlockTransactions (r:1 w:1) + // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) + fn store(l: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((8_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: TransactionStorage Transactions (r:1 w:0) + // Storage: TransactionStorage ByteFee (r:1 w:0) + // Storage: TransactionStorage EntryFee (r:1 w:0) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + // Storage: TransactionStorage BlockTransactions (r:1 w:1) + // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) + fn renew() -> Weight { + (67_532_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: TransactionStorage ProofChecked (r:1 w:1) + // Storage: TransactionStorage StoragePeriod (r:1 w:0) + // Storage: TransactionStorage ChunkCount (r:1 w:0) + // Storage: System ParentHash (r:1 w:0) + // Storage: TransactionStorage Transactions (r:1 w:0) + fn check_proof_max() -> Weight { + (182_886_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index fd2d103e9f335..b2991f3febcad 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-treasury" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,26 +13,33 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = [ + "derive", + "max-encoded-len", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.126", features = ["derive"], optional = true } +impl-trait-for-tuples = "0.2.1" + +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../balances" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", + "serde", "sp-std/std", "sp-runtime/std", "frame-support/std", @@ -44,3 +51,4 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/treasury/README.md b/frame/treasury/README.md index 424b8e0eedf99..4945d79d14296 100644 --- a/frame/treasury/README.md +++ b/frame/treasury/README.md @@ -1,84 +1,25 @@ -# Treasury Module +# Treasury Pallet -The Treasury module provides a "pot" of funds that can be managed by stakeholders in the -system and a structure for making spending proposals from this pot. - -- [`treasury::Trait`](https://docs.rs/pallet-treasury/latest/pallet_treasury/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-treasury/latest/pallet_treasury/enum.Call.html) +The Treasury pallet provides a "pot" of funds that can be managed by stakeholders in the system and +a structure for making spending proposals from this pot. ## Overview -The Treasury Module itself provides the pot to store funds, and a means for stakeholders to -propose, approve, and deny expenditures. The chain will need to provide a method (e.g. -inflation, fees) for collecting funds. - -By way of example, the Council could vote to fund the Treasury with a portion of the block -reward and use the funds to pay developers. - -### Tipping - -A separate subsystem exists to allow for an agile "tipping" process, whereby a reward may be -given without first having a pre-determined stakeholder group come to consensus on how much -should be paid. - -A group of `Tippers` is determined through the config `Trait`. After half of these have declared -some amount that they believe a particular reported reason deserves, then a countdown period is -entered where any remaining members can declare their tip amounts also. After the close of the -countdown period, the median of all declared tips is paid to the reported beneficiary, along -with any finders fee, in case of a public (and bonded) original report. - -### Bounty - -A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that -needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after -the bounty is approved and funded by Council, to be delegated -with the responsibility of assigning a payout address once the specified set of objectives is completed. - -After the Council has activated a bounty, it delegates the work that requires expertise to a curator -in exchange of a deposit. Once the curator accepts the bounty, they -get to close the Active bounty. Closing the Active bounty enacts a delayed payout to the payout -address, the curator fee and the return of the curator deposit. The -delay allows for intervention through regular democracy. The Council gets to unassign the curator, -resulting in a new curator election. The Council also gets to cancel -the bounty if deemed necessary before assigning a curator or once the bounty is active or payout -is pending, resulting in the slash of the curator's deposit. +The Treasury Pallet itself provides the pot to store funds, and a means for stakeholders to propose, +approve, and deny expenditures. The chain will need to provide a method (e.g.inflation, fees) for +collecting funds. +By way of example, the Council could vote to fund the Treasury with a portion of the block reward +and use the funds to pay developers. ### Terminology - **Proposal:** A suggestion to allocate funds from the pot to a beneficiary. -- **Beneficiary:** An account who will receive the funds from a proposal iff -the proposal is approved. -- **Deposit:** Funds that a proposer must lock when making a proposal. The -deposit will be returned or slashed if the proposal is approved or rejected -respectively. -- **Pot:** Unspent funds accumulated by the treasury module. - -Tipping protocol: -- **Tipping:** The process of gathering declarations of amounts to tip and taking the median - amount to be transferred from the treasury to a beneficiary account. -- **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a - particular individual (identified by an account ID) is worthy of a recognition by the - treasury. -- **Finder:** The original public reporter of some reason for tipping. -- **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, - rather than the main beneficiary. - -Bounty: -- **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by -the Treasury. -- **Proposer:** An account proposing a bounty spending. -- **Curator:** An account managing the bounty and assigning a payout address receiving the reward -for the completion of work. -- **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on -deposit per byte within the bounty description. -- **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The deposit -is returned when/if the bounty is completed. -- **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is -rewarded. -- **Payout address:** The account to which the total or part of the bounty is assigned to. -- **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. -- **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. +- **Beneficiary:** An account who will receive the funds from a proposal if the proposal is + approved. +- **Deposit:** Funds that a proposer must lock when making a proposal. The deposit will be returned + or slashed if the proposal is approved or rejected respectively. +- **Pot:** Unspent funds accumulated by the treasury pallet. ## Interface @@ -86,33 +27,5 @@ rewarded. General spending/proposal protocol: - `propose_spend` - Make a spending proposal and stake the required deposit. -- `set_pot` - Set the spendable balance of funds. -- `configure` - Configure the module's proposal requirements. - `reject_proposal` - Reject a proposal, slashing the deposit. - `approve_proposal` - Accept the proposal, returning the deposit. - -Tipping protocol: -- `report_awesome` - Report something worthy of a tip and register for a finders fee. -- `retract_tip` - Retract a previous (finders fee registered) report. -- `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. -- `tip` - Declare or redeclare an amount to tip for a particular reason. -- `close_tip` - Close and pay out a tip. - -Bounty protocol: -- `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of -tasks and stake the required deposit. -- `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of work. -- `propose_curator` - Assign an account to a bounty as candidate curator. -- `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. -- `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. -- `award_bounty` - Close and pay out the specified amount for the completed work. -- `claim_bounty` - Claim a specific bounty amount from the Payout Address. -- `unassign_curator` - Unassign an accepted curator from a specific earmark. -- `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. - - -## GenesisConfig - -The Treasury module depends on the [`GenesisConfig`](https://docs.rs/pallet-treasury/latest/pallet_treasury/struct.GenesisConfig.html). - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index 1d6d7c6afceb7..2fe0bad704f2b 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,154 +19,45 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; +use super::{Pallet as Treasury, *}; +use frame_benchmarking::{account, benchmarks_instance_pallet, impl_benchmark_test_suite}; +use frame_support::{ensure, traits::OnInitialize}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks_instance, account, whitelisted_caller}; -use frame_support::traits::OnInitialize; - -use crate::Module as Treasury; const SEED: u32 = 0; // Create the pre-requisite information needed to create a treasury `propose_spend`. -fn setup_proposal, I: Instance>(u: u32) -> ( - T::AccountId, - BalanceOf, - ::Source, -) { +fn setup_proposal, I: 'static>( + u: u32, +) -> (T::AccountId, BalanceOf, ::Source) { let caller = account("caller", u, SEED); - let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100.into()); + let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100u32.into()); let _ = T::Currency::make_free_balance_be(&caller, value); let beneficiary = account("beneficiary", u, SEED); let beneficiary_lookup = T::Lookup::unlookup(beneficiary); (caller, value, beneficiary_lookup) } -// Create the pre-requisite information needed to create a `report_awesome`. -fn setup_awesome, I: Instance>(length: u32) -> (T::AccountId, Vec, T::AccountId) { - let caller = whitelisted_caller(); - let value = T::TipReportDepositBase::get() - + T::DataDepositPerByte::get() * length.into() - + T::Currency::minimum_balance(); - let _ = T::Currency::make_free_balance_be(&caller, value); - let reason = vec![0; length as usize]; - let awesome_person = account("awesome", 0, SEED); - (caller, reason, awesome_person) -} - -// Create the pre-requisite information needed to call `tip_new`. -fn setup_tip, I: Instance>(r: u32, t: u32) -> - Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> -{ - let tippers_count = T::Tippers::count(); - - for i in 0 .. t { - let member = account("member", i, SEED); - T::Tippers::add(&member); - ensure!(T::Tippers::contains(&member), "failed to add tipper"); - } - - ensure!(T::Tippers::count() == tippers_count + t as usize, "problem creating tippers"); - let caller = account("member", t - 1, SEED); - let reason = vec![0; r as usize]; - let beneficiary = account("beneficiary", t, SEED); - let value = T::Currency::minimum_balance().saturating_mul(100.into()); - Ok((caller, reason, beneficiary, value)) -} - -// Create `t` new tips for the tip proposal with `hash`. -// This function automatically makes the tip able to close. -fn create_tips, I: Instance>(t: u32, hash: T::Hash, value: BalanceOf) -> - Result<(), &'static str> -{ - for i in 0 .. t { - let caller = account("member", i, SEED); - ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); - Treasury::::tip(RawOrigin::Signed(caller).into(), hash, value)?; - } - Tips::::mutate(hash, |maybe_tip| { - if let Some(open_tip) = maybe_tip { - open_tip.closes = Some(T::BlockNumber::zero()); - } - }); - Ok(()) -} - // Create proposals that are approved for use in `on_initialize`. -fn create_approved_proposals, I: Instance>(n: u32) -> Result<(), &'static str> { - for i in 0 .. n { +fn create_approved_proposals, I: 'static>(n: u32) -> Result<(), &'static str> { + for i in 0..n { let (caller, value, lookup) = setup_proposal::(i); - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - lookup - )?; - let proposal_id = >::get() - 1; + Treasury::::propose_spend(RawOrigin::Signed(caller).into(), value, lookup)?; + let proposal_id = >::get() - 1; Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; } - ensure!(>::get().len() == n as usize, "Not all approved"); + ensure!(>::get().len() == n as usize, "Not all approved"); Ok(()) } -// Create bounties that are approved for use in `on_initialize`. -fn create_approved_bounties, I: Instance>(n: u32) -> Result<(), &'static str> { - for i in 0 .. n { - let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - } - ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); - Ok(()) -} - -// Create the pre-requisite information needed to create a treasury `propose_bounty`. -fn setup_bounty, I: Instance>(u: u32, d: u32) -> ( - T::AccountId, - T::AccountId, - BalanceOf, - BalanceOf, - Vec, -) { - let caller = account("caller", u, SEED); - let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100.into()); - let fee = value / 2.into(); - let deposit = T::BountyDepositBase::get() + T::DataDepositPerByte::get() * MAX_BYTES.into(); - let _ = T::Currency::make_free_balance_be(&caller, deposit); - let curator = account("curator", u, SEED); - let _ = T::Currency::make_free_balance_be(&curator, fee / 2.into()); - let reason = vec![0; d as usize]; - (caller, curator, fee, value, reason) -} - -fn create_bounty, I: Instance>() -> Result<( - ::Source, - BountyIndex, -), &'static str> { - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - let curator_lookup = T::Lookup::unlookup(curator.clone()); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - Treasury::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup.clone(), fee)?; - Treasury::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; - Ok((curator_lookup, bounty_id)) -} - -fn setup_pod_account, I: Instance>() { +fn setup_pot_account, I: 'static>() { let pot_account = Treasury::::account_id(); - let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into()); let _ = T::Currency::make_free_balance_be(&pot_account, value); } -const MAX_BYTES: u32 = 16384; -const MAX_TIPPERS: u32 = 100; - -benchmarks_instance! { - _ { } - +benchmarks_instance_pallet! { propose_spend { let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); // Whitelist caller account from further DB operations. @@ -185,6 +76,8 @@ benchmarks_instance! { }: _(RawOrigin::Root, proposal_id) approve_proposal { + let p in 0 .. T::MaxApprovals::get() - 1; + create_approved_proposals::(p)?; let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); Treasury::::propose_spend( RawOrigin::Signed(caller).into(), @@ -194,224 +87,13 @@ benchmarks_instance! { let proposal_id = Treasury::::proposal_count() - 1; }: _(RawOrigin::Root, proposal_id) - report_awesome { - let r in 0 .. MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, awesome_person) - - retract_tip { - let r = MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); - Treasury::::report_awesome( - RawOrigin::Signed(caller.clone()).into(), - reason.clone(), - awesome_person.clone() - )?; - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), hash) - - tip_new { - let r in 0 .. MAX_BYTES; - let t in 1 .. MAX_TIPPERS; - - let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, beneficiary, value) - - tip { - let t in 1 .. MAX_TIPPERS; - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let value = T::Currency::minimum_balance().saturating_mul(100.into()); - Treasury::::tip_new( - RawOrigin::Signed(member).into(), - reason.clone(), - beneficiary.clone(), - value - )?; - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t - 1, hash.clone(), value)?; - let caller = account("member", t - 1, SEED); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), hash, value) - - close_tip { - let t in 1 .. MAX_TIPPERS; - - // Make sure pot is funded - setup_pod_account::(); - - // Set up a new tip proposal - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let value = T::Currency::minimum_balance().saturating_mul(100.into()); - Treasury::::tip_new( - RawOrigin::Signed(member).into(), - reason.clone(), - beneficiary.clone(), - value - )?; - - // Create a bunch of tips - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t, hash.clone(), value)?; - - let caller = account("caller", t, SEED); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), hash) - - propose_bounty { - let d in 0 .. MAX_BYTES; - - let (caller, curator, fee, value, description) = setup_bounty::(0, d); - }: _(RawOrigin::Signed(caller), value, description) - - approve_bounty { - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - }: _(RawOrigin::Root, bounty_id) - - propose_curator { - setup_pod_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - let curator_lookup = T::Lookup::unlookup(curator.clone()); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) - - // Worst case when curator is inactive and any sender unassigns the curator. - unassign_curator { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; - frame_system::Module::::set_block_number(T::BountyUpdatePeriod::get() + 1.into()); - let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), bounty_id) - - accept_curator { - setup_pod_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); - let curator_lookup = T::Lookup::unlookup(curator.clone()); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - Treasury::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; - Treasury::::on_initialize(T::BlockNumber::zero()); - Treasury::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; - }: _(RawOrigin::Signed(curator), bounty_id) - - award_bounty { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - - let bounty_id = BountyCount::::get() - 1; - let curator = T::Lookup::lookup(curator_lookup)?; - let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); - }: _(RawOrigin::Signed(curator), bounty_id, beneficiary) - - claim_bounty { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - - let bounty_id = BountyCount::::get() - 1; - let curator = T::Lookup::lookup(curator_lookup)?; - - let beneficiary = T::Lookup::unlookup(account("beneficiary", 0, SEED)); - Treasury::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; - - frame_system::Module::::set_block_number(T::BountyDepositPayoutDelay::get()); - - }: _(RawOrigin::Signed(curator), bounty_id) - - close_bounty_proposed { - setup_pod_account::(); - let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); - Treasury::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; - let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) - - close_bounty_active { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) - - extend_bounty_expiry { - setup_pod_account::(); - let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(T::BlockNumber::zero()); - - let bounty_id = BountyCount::::get() - 1; - let curator = T::Lookup::lookup(curator_lookup)?; - }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) - on_initialize_proposals { - let p in 0 .. 100; - setup_pod_account::(); + let p in 0 .. T::MaxApprovals::get(); + setup_pot_account::(); create_approved_proposals::(p)?; }: { Treasury::::on_initialize(T::BlockNumber::zero()); } - - on_initialize_bounties { - let b in 0 .. 100; - setup_pod_account::(); - create_approved_bounties::(b)?; - }: { - Treasury::::on_initialize(T::BlockNumber::zero()); - } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose_spend::()); - assert_ok!(test_benchmark_reject_proposal::()); - assert_ok!(test_benchmark_approve_proposal::()); - assert_ok!(test_benchmark_report_awesome::()); - assert_ok!(test_benchmark_retract_tip::()); - assert_ok!(test_benchmark_tip_new::()); - assert_ok!(test_benchmark_tip::()); - assert_ok!(test_benchmark_close_tip::()); - assert_ok!(test_benchmark_propose_bounty::()); - assert_ok!(test_benchmark_approve_bounty::()); - assert_ok!(test_benchmark_propose_curator::()); - assert_ok!(test_benchmark_unassign_curator::()); - assert_ok!(test_benchmark_accept_curator::()); - assert_ok!(test_benchmark_award_bounty::()); - assert_ok!(test_benchmark_claim_bounty::()); - assert_ok!(test_benchmark_close_bounty_proposed::()); - assert_ok!(test_benchmark_close_bounty_active::()); - assert_ok!(test_benchmark_extend_bounty_expiry::()); - assert_ok!(test_benchmark_on_initialize_proposals::()); - assert_ok!(test_benchmark_on_initialize_bounties::()); - }); - } -} +impl_benchmark_test_suite!(Treasury, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/treasury/src/default_weights.rs b/frame/treasury/src/default_weights.rs deleted file mode 100644 index bf4f5fb789a54..0000000000000 --- a/frame/treasury/src/default_weights.rs +++ /dev/null @@ -1,138 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn propose_spend() -> Weight { - (79604000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn reject_proposal() -> Weight { - (61001000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn approve_proposal() -> Weight { - (17835000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn report_awesome(r: u32, ) -> Weight { - (101602000 as Weight) - .saturating_add((2000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn retract_tip() -> Weight { - (82970000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn tip_new(r: u32, t: u32, ) -> Weight { - (63995000 as Weight) - .saturating_add((2000 as Weight).saturating_mul(r as Weight)) - .saturating_add((153000 as Weight).saturating_mul(t as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn tip(t: u32, ) -> Weight { - (46765000 as Weight) - .saturating_add((711000 as Weight).saturating_mul(t as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn close_tip(t: u32, ) -> Weight { - (160874000 as Weight) - .saturating_add((379000 as Weight).saturating_mul(t as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn propose_bounty(d: u32, ) -> Weight { - (86198000 as Weight) - .saturating_add((1000 as Weight).saturating_mul(d as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(4 as Weight)) - } - fn approve_bounty() -> Weight { - (23063000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn propose_curator() -> Weight { - (18890000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn unassign_curator() -> Weight { - (66768000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn accept_curator() -> Weight { - (69131000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn award_bounty() -> Weight { - (48184000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn claim_bounty() -> Weight { - (243104000 as Weight) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(5 as Weight)) - } - fn close_bounty_proposed() -> Weight { - (65917000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn close_bounty_active() -> Weight { - (157232000 as Weight) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(4 as Weight)) - } - fn extend_bounty_expiry() -> Weight { - (46216000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn on_initialize_proposals(p: u32, ) -> Weight { - (119765000 as Weight) - .saturating_add((108368000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(DbWeight::get().writes(2 as Weight)) - .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) - } - fn on_initialize_bounties(b: u32, ) -> Weight { - (112536000 as Weight) - .saturating_add((107132000 as Weight).saturating_mul(b as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(DbWeight::get().writes(2 as Weight)) - .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) - } -} diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 7173f7c524fc4..646baa99b99b0 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,87 +15,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Treasury Module +//! # Treasury Pallet //! -//! The Treasury module provides a "pot" of funds that can be managed by stakeholders in the -//! system and a structure for making spending proposals from this pot. +//! The Treasury pallet provides a "pot" of funds that can be managed by stakeholders in the system +//! and a structure for making spending proposals from this pot. //! -//! - [`treasury::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! The Treasury Module itself provides the pot to store funds, and a means for stakeholders to +//! The Treasury Pallet itself provides the pot to store funds, and a means for stakeholders to //! propose, approve, and deny expenditures. The chain will need to provide a method (e.g. //! inflation, fees) for collecting funds. //! //! By way of example, the Council could vote to fund the Treasury with a portion of the block //! reward and use the funds to pay developers. //! -//! ### Tipping -//! -//! A separate subsystem exists to allow for an agile "tipping" process, whereby a reward may be -//! given without first having a pre-determined stakeholder group come to consensus on how much -//! should be paid. -//! -//! A group of `Tippers` is determined through the config `Trait`. After half of these have declared -//! some amount that they believe a particular reported reason deserves, then a countdown period is -//! entered where any remaining members can declare their tip amounts also. After the close of the -//! countdown period, the median of all declared tips is paid to the reported beneficiary, along -//! with any finders fee, in case of a public (and bonded) original report. -//! -//! ### Bounty -//! -//! A Bounty Spending is a reward for a specified body of work - or specified set of objectives - that -//! needs to be executed for a predefined Treasury amount to be paid out. A curator is assigned after -//! the bounty is approved and funded by Council, to be delegated -//! with the responsibility of assigning a payout address once the specified set of objectives is completed. -//! -//! After the Council has activated a bounty, it delegates the work that requires expertise to a curator -//! in exchange of a deposit. Once the curator accepts the bounty, they -//! get to close the Active bounty. Closing the Active bounty enacts a delayed payout to the payout -//! address, the curator fee and the return of the curator deposit. The -//! delay allows for intervention through regular democracy. The Council gets to unassign the curator, -//! resulting in a new curator election. The Council also gets to cancel -//! the bounty if deemed necessary before assigning a curator or once the bounty is active or payout -//! is pending, resulting in the slash of the curator's deposit. -//! //! //! ### Terminology //! //! - **Proposal:** A suggestion to allocate funds from the pot to a beneficiary. -//! - **Beneficiary:** An account who will receive the funds from a proposal iff -//! the proposal is approved. -//! - **Deposit:** Funds that a proposer must lock when making a proposal. The -//! deposit will be returned or slashed if the proposal is approved or rejected -//! respectively. -//! - **Pot:** Unspent funds accumulated by the treasury module. -//! -//! Tipping protocol: -//! - **Tipping:** The process of gathering declarations of amounts to tip and taking the median -//! amount to be transferred from the treasury to a beneficiary account. -//! - **Tip Reason:** The reason for a tip; generally a URL which embodies or explains why a -//! particular individual (identified by an account ID) is worthy of a recognition by the -//! treasury. -//! - **Finder:** The original public reporter of some reason for tipping. -//! - **Finders Fee:** Some proportion of the tip amount that is paid to the reporter of the tip, -//! rather than the main beneficiary. -//! -//! Bounty: -//! - **Bounty spending proposal:** A proposal to reward a predefined body of work upon completion by -//! the Treasury. -//! - **Proposer:** An account proposing a bounty spending. -//! - **Curator:** An account managing the bounty and assigning a payout address receiving the reward -//! for the completion of work. -//! - **Deposit:** The amount held on deposit for placing a bounty proposal plus the amount held on -//! deposit per byte within the bounty description. -//! - **Curator deposit:** The payment from a candidate willing to curate an approved bounty. The deposit -//! is returned when/if the bounty is completed. -//! - **Bounty value:** The total amount that should be paid to the Payout Address if the bounty is -//! rewarded. -//! - **Payout address:** The account to which the total or part of the bounty is assigned to. -//! - **Payout Delay:** The delay period for which a bounty beneficiary needs to wait before claiming. -//! - **Curator fee:** The reserved upfront payment for a curator for work related to the bounty. +//! - **Beneficiary:** An account who will receive the funds from a proposal iff the proposal is +//! approved. +//! - **Deposit:** Funds that a proposer must lock when making a proposal. The deposit will be +//! returned or slashed if the proposal is approved or rejected respectively. +//! - **Pot:** Unspent funds accumulated by the treasury pallet. //! //! ## Interface //! @@ -103,168 +48,78 @@ //! //! General spending/proposal protocol: //! - `propose_spend` - Make a spending proposal and stake the required deposit. -//! - `set_pot` - Set the spendable balance of funds. -//! - `configure` - Configure the module's proposal requirements. //! - `reject_proposal` - Reject a proposal, slashing the deposit. //! - `approve_proposal` - Accept the proposal, returning the deposit. //! -//! Tipping protocol: -//! - `report_awesome` - Report something worthy of a tip and register for a finders fee. -//! - `retract_tip` - Retract a previous (finders fee registered) report. -//! - `tip_new` - Report an item worthy of a tip and declare a specific amount to tip. -//! - `tip` - Declare or redeclare an amount to tip for a particular reason. -//! - `close_tip` - Close and pay out a tip. -//! -//! Bounty protocol: -//! - `propose_bounty` - Propose a specific treasury amount to be earmarked for a predefined set of -//! tasks and stake the required deposit. -//! - `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of work. -//! - `propose_curator` - Assign an account to a bounty as candidate curator. -//! - `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. -//! - `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. -//! - `award_bounty` - Close and pay out the specified amount for the completed work. -//! - `claim_bounty` - Claim a specific bounty amount from the Payout Address. -//! - `unassign_curator` - Unassign an accepted curator from a specific earmark. -//! - `close_bounty` - Cancel the earmark for a specific treasury amount and close the bounty. -//! -//! //! ## GenesisConfig //! -//! The Treasury module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). +//! The Treasury pallet depends on the [`GenesisConfig`]. #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use sp_std::prelude::*; -use frame_support::{decl_module, decl_storage, decl_event, ensure, print, decl_error, Parameter}; -use frame_support::traits::{ - Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{KeepAlive, AllowDeath}, - ReservableCurrency, WithdrawReason -}; -use sp_runtime::{Permill, ModuleId, Percent, RuntimeDebug, DispatchResult, traits::{ - Zero, StaticLookup, AccountIdConversion, Saturating, Hash, BadOrigin -}}; -use frame_support::dispatch::DispatchResultWithPostInfo; -use frame_support::weights::{Weight, DispatchClass}; -use frame_support::traits::{Contains, ContainsLengthBound, EnsureOrigin}; -use codec::{Encode, Decode}; -use frame_system::{self as system, ensure_signed}; - -mod tests; mod benchmarking; -mod default_weights; - -type BalanceOf = - <>::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = - <>::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = - <>::Currency as Currency<::AccountId>>::NegativeImbalance; - -pub trait WeightInfo { - fn propose_spend() -> Weight; - fn reject_proposal() -> Weight; - fn approve_proposal() -> Weight; - fn report_awesome(r: u32, ) -> Weight; - fn retract_tip() -> Weight; - fn tip_new(r: u32, t: u32, ) -> Weight; - fn tip(t: u32, ) -> Weight; - fn close_tip(t: u32, ) -> Weight; - fn propose_bounty(r: u32, ) -> Weight; - fn approve_bounty() -> Weight; - fn propose_curator() -> Weight; - fn unassign_curator() -> Weight; - fn accept_curator() -> Weight; - fn award_bounty() -> Weight; - fn claim_bounty() -> Weight; - fn close_bounty_proposed() -> Weight; - fn close_bounty_active() -> Weight; - fn extend_bounty_expiry() -> Weight; - fn on_initialize_proposals(p: u32, ) -> Weight; - fn on_initialize_bounties(b: u32, ) -> Weight; -} - -pub trait Trait: frame_system::Trait { - /// The treasury's module id, used for deriving its sovereign account ID. - type ModuleId: Get; - - /// The staking balance. - type Currency: Currency + ReservableCurrency; - - /// Origin from which approvals must come. - type ApproveOrigin: EnsureOrigin; - - /// Origin from which rejections must come. - type RejectOrigin: EnsureOrigin; - - /// Origin from which tippers must come. - /// - /// `ContainsLengthBound::max_len` must be cost free (i.e. no storage read or heavy operation). - type Tippers: Contains + ContainsLengthBound; - - /// The period for which a tip remains open after is has achieved threshold tippers. - type TipCountdown: Get; - - /// The percent of the final tip which goes to the original reporter of the tip. - type TipFindersFee: Get; - - /// The amount held on deposit for placing a tip report. - type TipReportDepositBase: Get>; - - /// The amount held on deposit per byte within the tip report reason or bounty description. - type DataDepositPerByte: Get>; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. - type OnSlash: OnUnbalanced>; - - /// Fraction of a proposal's value that should be bonded in order to place the proposal. - /// An accepted proposal gets these back. A rejected proposal does not. - type ProposalBond: Get; - - /// Minimum amount of funds that should be placed in a deposit for making a proposal. - type ProposalBondMinimum: Get>; - - /// Period between successive spends. - type SpendPeriod: Get; - - /// Percentage of spare funds (if any) that are burnt per spend period. - type Burn: Get; - - /// The amount held on deposit for placing a bounty proposal. - type BountyDepositBase: Get>; - - /// The delay period for which a bounty beneficiary need to wait before claim the payout. - type BountyDepositPayoutDelay: Get; - - /// Bounty duration in blocks. - type BountyUpdatePeriod: Get; - - /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. - type BountyCuratorDeposit: Get; +#[cfg(test)] +mod tests; +pub mod weights; - /// Minimum value for a bounty. - type BountyValueMinimum: Get>; +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; - /// Maximum acceptable reason length. - type MaximumReasonLength: Get; +use sp_runtime::{ + traits::{AccountIdConversion, Saturating, StaticLookup, Zero}, + Permill, RuntimeDebug, +}; +use sp_std::prelude::*; - /// Handler for the unbalanced decrease when treasury funds are burned. - type BurnDestination: OnUnbalanced>; +use frame_support::{ + print, + traits::{ + Currency, ExistenceRequirement::KeepAlive, Get, Imbalance, OnUnbalanced, + ReservableCurrency, WithdrawReasons, + }, + weights::Weight, + PalletId, +}; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; +pub use pallet::*; +pub use weights::WeightInfo; + +pub type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +pub type PositiveImbalanceOf = <>::Currency as Currency< + ::AccountId, +>>::PositiveImbalance; +pub type NegativeImbalanceOf = <>::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; + +/// A trait to allow the Treasury Pallet to spend it's funds for other purposes. +/// There is an expectation that the implementer of this trait will correctly manage +/// the mutable variables passed to it: +/// * `budget_remaining`: How much available funds that can be spent by the treasury. As funds are +/// spent, you must correctly deduct from this value. +/// * `imbalance`: Any imbalances that you create should be subsumed in here to maximize efficiency +/// of updating the total issuance. (i.e. `deposit_creating`) +/// * `total_weight`: Track any weight that your `spend_fund` implementation uses by updating this +/// value. +/// * `missed_any`: If there were items that you want to spend on, but there were not enough funds, +/// mark this value as `true`. This will prevent the treasury from burning the excess funds. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait SpendFunds, I: 'static = ()> { + fn spend_funds( + budget_remaining: &mut BalanceOf, + imbalance: &mut PositiveImbalanceOf, + total_weight: &mut Weight, + missed_any: &mut bool, + ); } /// An index of a proposal. Just a `u32`. pub type ProposalIndex = u32; /// A spending proposal. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] +#[derive(Encode, Decode, Clone, PartialEq, Eq, MaxEncodedLen, RuntimeDebug, TypeInfo)] pub struct Proposal { /// The account proposing it. proposer: AccountId, @@ -276,269 +131,183 @@ pub struct Proposal { bond: Balance, } -/// An open tipping "motion". Retains all details of a tip including information on the finder -/// and the members who have voted. -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] -pub struct OpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, -> { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip. - finder: AccountId, - /// The amount held on deposit for this tip. - deposit: Balance, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, - /// Whether this tip should result in the finder taking a fee. - finders_fee: bool, -} +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; -/// An index of a bounty. Just a `u32`. -pub type BountyIndex = u32; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(PhantomData<(T, I)>); -/// A bounty proposal. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct Bounty { - /// The account proposing it. - proposer: AccountId, - /// The (total) amount that should be paid if the bounty is rewarded. - value: Balance, - /// The curator fee. Included in value. - fee: Balance, - /// The deposit of curator. - curator_deposit: Balance, - /// The amount held on deposit (reserved) for making this proposal. - bond: Balance, - /// The status of this bounty. - status: BountyStatus, -} + #[pallet::config] + pub trait Config: frame_system::Config { + /// The staking balance. + type Currency: Currency + ReservableCurrency; -/// The status of a bounty proposal. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub enum BountyStatus { - /// The bounty is proposed and waiting for approval. - Proposed, - /// The bounty is approved and waiting to become active at next spend period. - Approved, - /// The bounty is funded and waiting for curator assignment. - Funded, - /// A curator has been proposed by the `ApproveOrigin`. Waiting for acceptance from the curator. - CuratorProposed { - /// The assigned curator of this bounty. - curator: AccountId, - }, - /// The bounty is active and waiting to be awarded. - Active { - /// The curator of this bounty. - curator: AccountId, - /// An update from the curator is due by this block, else they are considered inactive. - update_due: BlockNumber, - }, - /// The bounty is awarded and waiting to released after a delay. - PendingPayout { - /// The curator of this bounty. - curator: AccountId, - /// The beneficiary of the bounty. - beneficiary: AccountId, - /// When the bounty can be claimed. - unlock_at: BlockNumber, - }, -} + /// Origin from which approvals must come. + type ApproveOrigin: EnsureOrigin; -decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Treasury { - /// Number of proposals that have been made. - ProposalCount get(fn proposal_count): ProposalIndex; + /// Origin from which rejections must come. + type RejectOrigin: EnsureOrigin; - /// Proposals that have been made. - Proposals get(fn proposals): - map hasher(twox_64_concat) ProposalIndex - => Option>>; + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// Proposal indices that have been approved but not yet awarded. - Approvals get(fn approvals): Vec; + /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. + type OnSlash: OnUnbalanced>; - /// Tips that are not yet completed. Keyed by the hash of `(reason, who)` from the value. - /// This has the insecure enumerable hash function since the key itself is already - /// guaranteed to be a secure hash. - pub Tips get(fn tips): - map hasher(twox_64_concat) T::Hash - => Option, T::BlockNumber, T::Hash>>; + /// Fraction of a proposal's value that should be bonded in order to place the proposal. + /// An accepted proposal gets these back. A rejected proposal does not. + #[pallet::constant] + type ProposalBond: Get; - /// Simple preimage lookup from the reason's hash to the original data. Again, has an - /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. - pub Reasons get(fn reasons): map hasher(identity) T::Hash => Option>; + /// Minimum amount of funds that should be placed in a deposit for making a proposal. + #[pallet::constant] + type ProposalBondMinimum: Get>; - /// Number of bounty proposals that have been made. - pub BountyCount get(fn bounty_count): BountyIndex; + /// Period between successive spends. + #[pallet::constant] + type SpendPeriod: Get; - /// Bounties that have been made. - pub Bounties get(fn bounties): - map hasher(twox_64_concat) BountyIndex - => Option, T::BlockNumber>>; + /// Percentage of spare funds (if any) that are burnt per spend period. + #[pallet::constant] + type Burn: Get; - /// The description of each bounty. - pub BountyDescriptions get(fn bounty_descriptions): map hasher(twox_64_concat) BountyIndex => Option>; + /// The treasury's pallet id, used for deriving its sovereign account ID. + #[pallet::constant] + type PalletId: Get; - /// Bounty indices that have been approved but not yet funded. - pub BountyApprovals get(fn bounty_approvals): Vec; + /// Handler for the unbalanced decrease when treasury funds are burned. + type BurnDestination: OnUnbalanced>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + + /// Runtime hooks to external pallet using treasury to compute spend funds. + type SpendFunds: SpendFunds; + + /// The maximum number of approvals that can wait in the spending queue. + #[pallet::constant] + type MaxApprovals: Get; + } + + /// Number of proposals that have been made. + #[pallet::storage] + #[pallet::getter(fn proposal_count)] + pub(crate) type ProposalCount = StorageValue<_, ProposalIndex, ValueQuery>; + + /// Proposals that have been made. + #[pallet::storage] + #[pallet::getter(fn proposals)] + pub type Proposals, I: 'static = ()> = StorageMap< + _, + Twox64Concat, + ProposalIndex, + Proposal>, + OptionQuery, + >; + + /// Proposal indices that have been approved but not yet awarded. + #[pallet::storage] + #[pallet::getter(fn approvals)] + pub type Approvals, I: 'static = ()> = + StorageValue<_, BoundedVec, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig; + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self + } + } + + #[cfg(feature = "std")] + impl GenesisConfig { + /// Direct implementation of `GenesisBuild::assimilate_storage`. + #[deprecated( + note = "use ` as GenesisBuild>::assimilate_storage` instead" + )] + pub fn assimilate_storage, I: 'static>( + &self, + storage: &mut sp_runtime::Storage, + ) -> Result<(), String> { + >::assimilate_storage(self, storage) + } } - add_extra_genesis { - build(|_config| { + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { // Create Treasury account - let account_id = >::account_id(); + let account_id = >::account_id(); let min = T::Currency::minimum_balance(); if T::Currency::free_balance(&account_id) < min { - let _ = T::Currency::make_free_balance_be( - &account_id, - min, - ); + let _ = T::Currency::make_free_balance_be(&account_id, min); } - }); + } } -} -decl_event!( - pub enum Event - where - Balance = BalanceOf, - ::AccountId, - ::Hash, - { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { /// New proposal. \[proposal_index\] Proposed(ProposalIndex), /// We have ended a spend period and will now allocate funds. \[budget_remaining\] - Spending(Balance), + Spending(BalanceOf), /// Some funds have been allocated. \[proposal_index, award, beneficiary\] - Awarded(ProposalIndex, Balance, AccountId), + Awarded(ProposalIndex, BalanceOf, T::AccountId), /// A proposal was rejected; funds were slashed. \[proposal_index, slashed\] - Rejected(ProposalIndex, Balance), + Rejected(ProposalIndex, BalanceOf), /// Some of our funds have been burnt. \[burn\] - Burnt(Balance), + Burnt(BalanceOf), /// Spending has finished; this is the amount that rolls over until next spend. /// \[budget_remaining\] - Rollover(Balance), + Rollover(BalanceOf), /// Some funds have been deposited. \[deposit\] - Deposit(Balance), - /// A new tip suggestion has been opened. \[tip_hash\] - NewTip(Hash), - /// A tip suggestion has reached threshold and is closing. \[tip_hash\] - TipClosing(Hash), - /// A tip suggestion has been closed. \[tip_hash, who, payout\] - TipClosed(Hash, AccountId, Balance), - /// A tip suggestion has been retracted. \[tip_hash\] - TipRetracted(Hash), - /// New bounty proposal. [index] - BountyProposed(BountyIndex), - /// A bounty proposal was rejected; funds were slashed. [index, bond] - BountyRejected(BountyIndex, Balance), - /// A bounty proposal is funded and became active. [index] - BountyBecameActive(BountyIndex), - /// A bounty is awarded to a beneficiary. [index, beneficiary] - BountyAwarded(BountyIndex, AccountId), - /// A bounty is claimed by beneficiary. [index, payout, beneficiary] - BountyClaimed(BountyIndex, Balance, AccountId), - /// A bounty is cancelled. [index] - BountyCanceled(BountyIndex), - /// A bounty expiry is extended. [index] - BountyExtended(BountyIndex), + Deposit(BalanceOf), } -); -decl_error! { - /// Error for the treasury module. - pub enum Error for Module, I: Instance> { + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + /// Error for the treasury pallet. + #[pallet::error] + pub enum Error { /// Proposer's balance is too low. InsufficientProposersBalance, /// No proposal or bounty at that index. InvalidIndex, - /// The reason given is just too big. - ReasonTooBig, - /// The tip was already found/started. - AlreadyKnown, - /// The tip hash is unknown. - UnknownTip, - /// The account attempting to retract the tip is not the finder of the tip. - NotFinder, - /// The tip cannot be claimed/closed because there are not enough tippers yet. - StillOpen, - /// The tip cannot be claimed/closed because it's still in the countdown period. - Premature, - /// The bounty status is unexpected. - UnexpectedStatus, - /// Require bounty curator. - RequireCurator, - /// Invalid bounty value. - InvalidValue, - /// Invalid bounty fee. - InvalidFee, - /// A bounty payout is pending. - /// To cancel the bounty, you must unassign and slash the curator. - PendingPayout, + /// Too many approvals in the queue. + TooManyApprovals, } -} - -decl_module! { - pub struct Module, I: Instance=DefaultInstance> - for enum Call - where origin: T::Origin - { - /// Fraction of a proposal's value that should be bonded in order to place the proposal. - /// An accepted proposal gets these back. A rejected proposal does not. - const ProposalBond: Permill = T::ProposalBond::get(); - /// Minimum amount of funds that should be placed in a deposit for making a proposal. - const ProposalBondMinimum: BalanceOf = T::ProposalBondMinimum::get(); - - /// Period between successive spends. - const SpendPeriod: T::BlockNumber = T::SpendPeriod::get(); - - /// Percentage of spare funds (if any) that are burnt per spend period. - const Burn: Permill = T::Burn::get(); - - /// The period for which a tip remains open after is has achieved threshold tippers. - const TipCountdown: T::BlockNumber = T::TipCountdown::get(); - - /// The amount of the final tip which goes to the original reporter of the tip. - const TipFindersFee: Percent = T::TipFindersFee::get(); - - /// The amount held on deposit for placing a tip report. - const TipReportDepositBase: BalanceOf = T::TipReportDepositBase::get(); - - /// The amount held on deposit per byte within the tip report reason or bounty description. - const DataDepositPerByte: BalanceOf = T::DataDepositPerByte::get(); - - /// The treasury's module id, used for deriving its sovereign account ID. - const ModuleId: ModuleId = T::ModuleId::get(); - - /// The amount held on deposit for placing a bounty proposal. - const BountyDepositBase: BalanceOf = T::BountyDepositBase::get(); - - /// The delay period for which a bounty beneficiary need to wait before claim the payout. - const BountyDepositPayoutDelay: T::BlockNumber = T::BountyDepositPayoutDelay::get(); - - /// Percentage of the curator fee that will be reserved upfront as deposit for bounty curator. - const BountyCuratorDeposit: Permill = T::BountyCuratorDeposit::get(); - - const BountyValueMinimum: BalanceOf = T::BountyValueMinimum::get(); - - /// Maximum acceptable reason length. - const MaximumReasonLength: u32 = T::MaximumReasonLength::get(); - - type Error = Error; - - fn deposit_event() = default; + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + /// # + /// - Complexity: `O(A)` where `A` is the number of approvals + /// - Db reads and writes: `Approvals`, `pot account data` + /// - Db reads and writes per approval: `Proposals`, `proposer account data`, `beneficiary + /// account data` + /// - The weight is overestimated if some approvals got missed. + /// # + fn on_initialize(n: T::BlockNumber) -> Weight { + // Check to see if we should spend some funds! + if (n % T::SpendPeriod::get()).is_zero() { + Self::spend_funds() + } else { + 0 + } + } + } + #[pallet::call] + impl, I: 'static> Pallet { /// Put forward a suggestion for spending. A deposit proportional to the value /// is reserved and slashed if the proposal is rejected. It is returned once the /// proposal is awarded. @@ -548,12 +317,12 @@ decl_module! { /// - DbReads: `ProposalCount`, `origin account` /// - DbWrites: `ProposalCount`, `Proposals`, `origin account` /// # - #[weight = T::WeightInfo::propose_spend()] - fn propose_spend( - origin, - #[compact] value: BalanceOf, - beneficiary: ::Source - ) { + #[pallet::weight(T::WeightInfo::propose_spend())] + pub fn propose_spend( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + beneficiary: ::Source, + ) -> DispatchResult { let proposer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; @@ -562,10 +331,11 @@ decl_module! { .map_err(|_| Error::::InsufficientProposersBalance)?; let c = Self::proposal_count(); - >::put(c + 1); + >::put(c + 1); >::insert(c, Proposal { proposer, value, beneficiary, bond }); - Self::deposit_event(RawEvent::Proposed(c)); + Self::deposit_event(Event::Proposed(c)); + Ok(()) } /// Reject a proposed spend. The original deposit will be slashed. @@ -577,16 +347,21 @@ decl_module! { /// - DbReads: `Proposals`, `rejected proposer account` /// - DbWrites: `Proposals`, `rejected proposer account` /// # - #[weight = (T::WeightInfo::reject_proposal(), DispatchClass::Operational)] - fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { + #[pallet::weight((T::WeightInfo::reject_proposal(), DispatchClass::Operational))] + pub fn reject_proposal( + origin: OriginFor, + #[pallet::compact] proposal_id: ProposalIndex, + ) -> DispatchResult { T::RejectOrigin::ensure_origin(origin)?; - let proposal = >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; + let proposal = + >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; let value = proposal.bond; let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; T::OnSlash::on_unbalanced(imbalance); Self::deposit_event(Event::::Rejected(proposal_id, value)); + Ok(()) } /// Approve a proposal. At a later time, the proposal will be allocated to the beneficiary @@ -599,591 +374,22 @@ decl_module! { /// - DbReads: `Proposals`, `Approvals` /// - DbWrite: `Approvals` /// # - #[weight = (T::WeightInfo::approve_proposal(), DispatchClass::Operational)] - fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { + #[pallet::weight((T::WeightInfo::approve_proposal(T::MaxApprovals::get()), DispatchClass::Operational))] + pub fn approve_proposal( + origin: OriginFor, + #[pallet::compact] proposal_id: ProposalIndex, + ) -> DispatchResult { T::ApproveOrigin::ensure_origin(origin)?; ensure!(>::contains_key(proposal_id), Error::::InvalidIndex); - Approvals::::append(proposal_id); - } - - /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as - /// `DataDepositPerByte` for each byte in `reason`. - /// - /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be - /// a UTF-8-encoded URL. - /// - `who`: The account which should be credited for the tip. - /// - /// Emits `NewTip` if successful. - /// - /// # - /// - Complexity: `O(R)` where `R` length of `reason`. - /// - encoding and hashing of 'reason' - /// - DbReads: `Reasons`, `Tips` - /// - DbWrites: `Reasons`, `Tips` - /// # - #[weight = T::WeightInfo::report_awesome(reason.len() as u32)] - fn report_awesome(origin, reason: Vec, who: T::AccountId) { - let finder = ensure_signed(origin)?; - - ensure!(reason.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); - - let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); - let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); - - let deposit = T::TipReportDepositBase::get() - + T::DataDepositPerByte::get() * (reason.len() as u32).into(); - T::Currency::reserve(&finder, deposit)?; - - Reasons::::insert(&reason_hash, &reason); - let tip = OpenTip { - reason: reason_hash, - who, - finder, - deposit, - closes: None, - tips: vec![], - finders_fee: true - }; - Tips::::insert(&hash, tip); - Self::deposit_event(RawEvent::NewTip(hash)); - } - - /// Retract a prior tip-report from `report_awesome`, and cancel the process of tipping. - /// - /// If successful, the original deposit will be unreserved. - /// - /// The dispatch origin for this call must be _Signed_ and the tip identified by `hash` - /// must have been reported by the signing account through `report_awesome` (and not - /// through `tip_new`). - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. - /// - /// Emits `TipRetracted` if successful. - /// - /// # - /// - Complexity: `O(1)` - /// - Depends on the length of `T::Hash` which is fixed. - /// - DbReads: `Tips`, `origin account` - /// - DbWrites: `Reasons`, `Tips`, `origin account` - /// # - #[weight = T::WeightInfo::retract_tip()] - fn retract_tip(origin, hash: T::Hash) { - let who = ensure_signed(origin)?; - let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; - ensure!(tip.finder == who, Error::::NotFinder); - - Reasons::::remove(&tip.reason); - Tips::::remove(&hash); - if !tip.deposit.is_zero() { - let _ = T::Currency::unreserve(&who, tip.deposit); - } - Self::deposit_event(RawEvent::TipRetracted(hash)); - } - - /// Give a tip for something new; no finder's fee will be taken. - /// - /// The dispatch origin for this call must be _Signed_ and the signing account must be a - /// member of the `Tippers` set. - /// - /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be - /// a UTF-8-encoded URL. - /// - `who`: The account which should be credited for the tip. - /// - `tip_value`: The amount of tip that the sender would like to give. The median tip - /// value of active tippers will be given to the `who`. - /// - /// Emits `NewTip` if successful. - /// - /// # - /// - Complexity: `O(R + T)` where `R` length of `reason`, `T` is the number of tippers. - /// - `O(T)`: decoding `Tipper` vec of length `T` - /// `T` is charged as upper bound given by `ContainsLengthBound`. - /// The actual cost depends on the implementation of `T::Tippers`. - /// - `O(R)`: hashing and encoding of reason of length `R` - /// - DbReads: `Tippers`, `Reasons` - /// - DbWrites: `Reasons`, `Tips` - /// # - #[weight = T::WeightInfo::tip_new(reason.len() as u32, T::Tippers::max_len() as u32)] - fn tip_new(origin, reason: Vec, who: T::AccountId, #[compact] tip_value: BalanceOf) { - let tipper = ensure_signed(origin)?; - ensure!(T::Tippers::contains(&tipper), BadOrigin); - let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); - let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - - Reasons::::insert(&reason_hash, &reason); - Self::deposit_event(RawEvent::NewTip(hash.clone())); - let tips = vec![(tipper.clone(), tip_value)]; - let tip = OpenTip { - reason: reason_hash, - who, - finder: tipper, - deposit: Zero::zero(), - closes: None, - tips, - finders_fee: false, - }; - Tips::::insert(&hash, tip); - } - - /// Declare a tip value for an already-open tip. - /// - /// The dispatch origin for this call must be _Signed_ and the signing account must be a - /// member of the `Tippers` set. - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the hash of the original tip `reason` and the beneficiary - /// account ID. - /// - `tip_value`: The amount of tip that the sender would like to give. The median tip - /// value of active tippers will be given to the `who`. - /// - /// Emits `TipClosing` if the threshold of tippers has been reached and the countdown period - /// has started. - /// - /// # - /// - Complexity: `O(T)` where `T` is the number of tippers. - /// decoding `Tipper` vec of length `T`, insert tip and check closing, - /// `T` is charged as upper bound given by `ContainsLengthBound`. - /// The actual cost depends on the implementation of `T::Tippers`. - /// - /// Actually weight could be lower as it depends on how many tips are in `OpenTip` but it - /// is weighted as if almost full i.e of length `T-1`. - /// - DbReads: `Tippers`, `Tips` - /// - DbWrites: `Tips` - /// # - #[weight = T::WeightInfo::tip(T::Tippers::max_len() as u32)] - fn tip(origin, hash: T::Hash, #[compact] tip_value: BalanceOf) { - let tipper = ensure_signed(origin)?; - ensure!(T::Tippers::contains(&tipper), BadOrigin); - - let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { - Self::deposit_event(RawEvent::TipClosing(hash.clone())); - } - Tips::::insert(&hash, tip); - } - - /// Close and payout a tip. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// The tip identified by `hash` must have finished its countdown period. - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. - /// - /// # - /// - Complexity: `O(T)` where `T` is the number of tippers. - /// decoding `Tipper` vec of length `T`. - /// `T` is charged as upper bound given by `ContainsLengthBound`. - /// The actual cost depends on the implementation of `T::Tippers`. - /// - DbReads: `Tips`, `Tippers`, `tip finder` - /// - DbWrites: `Reasons`, `Tips`, `Tippers`, `tip finder` - /// # - #[weight = T::WeightInfo::close_tip(T::Tippers::max_len() as u32)] - fn close_tip(origin, hash: T::Hash) { - ensure_signed(origin)?; - - let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; - ensure!(system::Module::::block_number() >= *n, Error::::Premature); - // closed. - Reasons::::remove(&tip.reason); - Tips::::remove(hash); - Self::payout_tip(hash, tip); - } - - /// Propose a new bounty. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as - /// `DataDepositPerByte` for each byte in `reason`. It will be unreserved upon approval, - /// or slashed when rejected. - /// - /// - `curator`: The curator account whom will manage this bounty. - /// - `fee`: The curator fee. - /// - `value`: The total payment amount of this bounty, curator fee included. - /// - `description`: The description of this bounty. - #[weight = T::WeightInfo::propose_bounty(description.len() as u32)] - fn propose_bounty( - origin, - #[compact] value: BalanceOf, - description: Vec, - ) { - let proposer = ensure_signed(origin)?; - Self::create_bounty(proposer, description, value)?; - } - - /// Approve a bounty proposal. At a later time, the bounty will be funded and become active - /// and the original deposit will be returned. - /// - /// May only be called from `T::ApproveOrigin`. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::approve_bounty()] - fn approve_bounty(origin, #[compact] bounty_id: ProposalIndex) { - T::ApproveOrigin::ensure_origin(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); - - bounty.status = BountyStatus::Approved; - - BountyApprovals::::append(bounty_id); - - Ok(()) - })?; - } - - /// Assign a curator to a funded bounty. - /// - /// May only be called from `T::ApproveOrigin`. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::propose_curator()] - fn propose_curator( - origin, - #[compact] bounty_id: ProposalIndex, - curator: ::Source, - #[compact] fee: BalanceOf, - ) { - T::ApproveOrigin::ensure_origin(origin)?; - - let curator = T::Lookup::lookup(curator)?; - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - match bounty.status { - BountyStatus::Funded | BountyStatus::CuratorProposed { .. } => {}, - _ => return Err(Error::::UnexpectedStatus.into()), - }; - - ensure!(fee < bounty.value, Error::::InvalidFee); - - bounty.status = BountyStatus::CuratorProposed { curator }; - bounty.fee = fee; - - Ok(()) - })?; - } - - /// Unassign curator from a bounty. - /// - /// This function can only be called by the `RejectOrigin` a signed origin. - /// - /// If this function is called by the `RejectOrigin`, we assume that the curator is malicious - /// or inactive. As a result, we will slash the curator when possible. - /// - /// If the origin is the curator, we take this as a sign they are unable to do their job and - /// they willingly give up. We could slash them, but for now we allow them to recover their - /// deposit and exit without issue. (We may want to change this if it is abused.) - /// - /// Finally, the origin can be anyone if and only if the curator is "inactive". This allows - /// anyone in the community to call out that a curator is not doing their due diligence, and - /// we should pick a new curator. In this case the curator should also be slashed. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::unassign_curator()] - fn unassign_curator( - origin, - #[compact] bounty_id: ProposalIndex, - ) { - let maybe_sender = ensure_signed(origin.clone()) - .map(Some) - .or_else(|_| T::RejectOrigin::ensure_origin(origin).map(|_| None))?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - - let slash_curator = |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { - let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; - T::OnSlash::on_unbalanced(imbalance); - *curator_deposit = Zero::zero(); - }; - - match bounty.status { - BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { - // No curator to unassign at this point. - return Err(Error::::UnexpectedStatus.into()) - } - BountyStatus::CuratorProposed { ref curator } => { - // A curator has been proposed, but not accepted yet. - // Either `RejectOrigin` or the proposed curator can unassign the curator. - ensure!(maybe_sender.map_or(true, |sender| sender == *curator), BadOrigin); - }, - BountyStatus::Active { ref curator, ref update_due } => { - // The bounty is active. - match maybe_sender { - // If the `RejectOrigin` is calling this function, slash the curator. - None => { - slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... - }, - Some(sender) => { - // If the sender is not the curator, and the curator is inactive, - // slash the curator. - if sender != *curator { - let block_number = system::Module::::block_number(); - if *update_due < block_number { - slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... - } else { - // Curator has more time to give an update. - return Err(Error::::Premature.into()) - } - } else { - // Else this is the curator, willingly giving up their role. - // Give back their deposit. - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - // Continue to change bounty status below... - } - }, - } - }, - BountyStatus::PendingPayout { ref curator, .. } => { - // The bounty is pending payout, so only council can unassign a curator. - // By doing so, they are claiming the curator is acting maliciously, so - // we slash the curator. - ensure!(maybe_sender.is_none(), BadOrigin); - slash_curator(curator, &mut bounty.curator_deposit); - // Continue to change bounty status below... - } - }; - - bounty.status = BountyStatus::Funded; - Ok(()) - })?; - } - - /// Accept the curator role for a bounty. - /// A deposit will be reserved from curator and refund upon successful payout. - /// - /// May only be called from the curator. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = T::WeightInfo::accept_curator()] - fn accept_curator(origin, #[compact] bounty_id: ProposalIndex) { - let signer = ensure_signed(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - - match bounty.status { - BountyStatus::CuratorProposed { ref curator } => { - ensure!(signer == *curator, Error::::RequireCurator); - - let deposit = T::BountyCuratorDeposit::get() * bounty.fee; - T::Currency::reserve(curator, deposit)?; - bounty.curator_deposit = deposit; - - let update_due = system::Module::::block_number() + T::BountyUpdatePeriod::get(); - bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; - - Ok(()) - }, - _ => Err(Error::::UnexpectedStatus.into()), - } - })?; - } - - /// Award bounty to a beneficiary account. The beneficiary will be able to claim the funds after a delay. - /// - /// The dispatch origin for this call must be the curator of this bounty. - /// - /// - `bounty_id`: Bounty ID to award. - /// - `beneficiary`: The beneficiary account whom will receive the payout. - #[weight = T::WeightInfo::award_bounty()] - fn award_bounty(origin, #[compact] bounty_id: ProposalIndex, beneficiary: ::Source) { - let signer = ensure_signed(origin)?; - let beneficiary = T::Lookup::lookup(beneficiary)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let mut bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - match &bounty.status { - BountyStatus::Active { - curator, - .. - } => { - ensure!(signer == *curator, Error::::RequireCurator); - }, - _ => return Err(Error::::UnexpectedStatus.into()), - } - bounty.status = BountyStatus::PendingPayout { - curator: signer, - beneficiary: beneficiary.clone(), - unlock_at: system::Module::::block_number() + T::BountyDepositPayoutDelay::get(), - }; - - Ok(()) - })?; - - Self::deposit_event(Event::::BountyAwarded(bounty_id, beneficiary)); - } - - /// Claim the payout from an awarded bounty after payout delay. - /// - /// The dispatch origin for this call must be the beneficiary of this bounty. - /// - /// - `bounty_id`: Bounty ID to claim. - #[weight = T::WeightInfo::claim_bounty()] - fn claim_bounty(origin, #[compact] bounty_id: BountyIndex) { - let _ = ensure_signed(origin)?; // anyone can trigger claim - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let bounty = maybe_bounty.take().ok_or(Error::::InvalidIndex)?; - if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { - ensure!(system::Module::::block_number() >= unlock_at, Error::::Premature); - let bounty_account = Self::bounty_account_id(bounty_id); - let balance = T::Currency::free_balance(&bounty_account); - let fee = bounty.fee.min(balance); // just to be safe - let payout = balance.saturating_sub(fee); - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - let _ = T::Currency::transfer(&bounty_account, &curator, fee, AllowDeath); // should not fail - let _ = T::Currency::transfer(&bounty_account, &beneficiary, payout, AllowDeath); // should not fail - *maybe_bounty = None; - - BountyDescriptions::::remove(bounty_id); - - Self::deposit_event(Event::::BountyClaimed(bounty_id, payout, beneficiary)); - Ok(()) - } else { - Err(Error::::UnexpectedStatus.into()) - } - })?; - } - - /// Cancel a proposed or active bounty. All the funds will be sent to treasury and - /// the curator deposit will be unreserved if possible. - /// - /// Only `T::RejectOrigin` is able to cancel a bounty. - /// - /// - `bounty_id`: Bounty ID to cancel. - #[weight = T::WeightInfo::close_bounty_proposed().max(T::WeightInfo::close_bounty_active())] - fn close_bounty(origin, #[compact] bounty_id: BountyIndex) -> DispatchResultWithPostInfo { - T::RejectOrigin::ensure_origin(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResultWithPostInfo { - let bounty = maybe_bounty.as_ref().ok_or(Error::::InvalidIndex)?; - - match &bounty.status { - BountyStatus::Proposed => { - // The reject origin would like to cancel a proposed bounty. - BountyDescriptions::::remove(bounty_id); - let value = bounty.bond; - let imbalance = T::Currency::slash_reserved(&bounty.proposer, value).0; - T::OnSlash::on_unbalanced(imbalance); - *maybe_bounty = None; - - Self::deposit_event(Event::::BountyRejected(bounty_id, value)); - // Return early, nothing else to do. - return Ok(Some(T::WeightInfo::close_bounty_proposed()).into()) - }, - BountyStatus::Approved => { - // For weight reasons, we don't allow a council to cancel in this phase. - // We ask for them to wait until it is funded before they can cancel. - return Err(Error::::UnexpectedStatus.into()) - }, - BountyStatus::Funded | - BountyStatus::CuratorProposed { .. } => { - // Nothing extra to do besides the removal of the bounty below. - }, - BountyStatus::Active { curator, .. } => { - // Cancelled by council, refund deposit of the working curator. - let _ = T::Currency::unreserve(&curator, bounty.curator_deposit); - // Then execute removal of the bounty below. - }, - BountyStatus::PendingPayout { .. } => { - // Bounty is already pending payout. If council wants to cancel - // this bounty, it should mean the curator was acting maliciously. - // So the council should first unassign the curator, slashing their - // deposit. - return Err(Error::::PendingPayout.into()) - } - } - - let bounty_account = Self::bounty_account_id(bounty_id); - - BountyDescriptions::::remove(bounty_id); - - let balance = T::Currency::free_balance(&bounty_account); - let _ = T::Currency::transfer(&bounty_account, &Self::account_id(), balance, AllowDeath); // should not fail - *maybe_bounty = None; - - Self::deposit_event(Event::::BountyCanceled(bounty_id)); - Ok(Some(T::WeightInfo::close_bounty_active()).into()) - }) - } - - /// Extend the expiry time of an active bounty. - /// - /// The dispatch origin for this call must be the curator of this bounty. - /// - /// - `bounty_id`: Bounty ID to extend. - /// - `remark`: additional information. - #[weight = T::WeightInfo::extend_bounty_expiry()] - fn extend_bounty_expiry(origin, #[compact] bounty_id: BountyIndex, _remark: Vec) { - let signer = ensure_signed(origin)?; - - Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { - let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - - match bounty.status { - BountyStatus::Active { ref curator, ref mut update_due } => { - ensure!(*curator == signer, Error::::RequireCurator); - *update_due = (system::Module::::block_number() + T::BountyUpdatePeriod::get()).max(*update_due); - }, - _ => return Err(Error::::UnexpectedStatus.into()), - } - - Ok(()) - })?; - - Self::deposit_event(Event::::BountyExtended(bounty_id)); - } - - /// # - /// - Complexity: `O(A)` where `A` is the number of approvals - /// - Db reads and writes: `Approvals`, `pot account data` - /// - Db reads and writes per approval: - /// `Proposals`, `proposer account data`, `beneficiary account data` - /// - The weight is overestimated if some approvals got missed. - /// # - fn on_initialize(n: T::BlockNumber) -> Weight { - // Check to see if we should spend some funds! - if (n % T::SpendPeriod::get()).is_zero() { - Self::spend_funds() - } else { - 0 - } + Approvals::::try_append(proposal_id) + .map_err(|_| Error::::TooManyApprovals)?; + Ok(()) } } } -impl, I: Instance> Module { +impl, I: 'static> Pallet { // Add public immutables and private mutables. /// The account ID of the treasury pot. @@ -1191,14 +397,7 @@ impl, I: Instance> Module { /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn account_id() -> T::AccountId { - T::ModuleId::get().into_account() - } - - /// The account ID of a bounty account - pub fn bounty_account_id(id: BountyIndex) -> T::AccountId { - // only use two byte prefix to support 16 byte account id (used by test) - // "modl" ++ "py/trsry" ++ "bt" is 14 bytes, and two bytes remaining for bounty index - T::ModuleId::get().into_sub_account(("bt", id)) + T::PalletId::get().into_account() } /// The needed bond for a proposal whose spend is `value`. @@ -1206,90 +405,17 @@ impl, I: Instance> Module { T::ProposalBondMinimum::get().max(T::ProposalBond::get() * value) } - /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it - /// closes, if so, then deposit the relevant event and set closing accordingly. - /// - /// `O(T)` and one storage access. - fn insert_tip_and_check_closing( - tip: &mut OpenTip, T::BlockNumber, T::Hash>, - tipper: T::AccountId, - tip_value: BalanceOf, - ) -> bool { - match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { - Ok(pos) => tip.tips[pos] = (tipper, tip_value), - Err(pos) => tip.tips.insert(pos, (tipper, tip_value)), - } - Self::retain_active_tips(&mut tip.tips); - let threshold = (T::Tippers::count() + 1) / 2; - if tip.tips.len() >= threshold && tip.closes.is_none() { - tip.closes = Some(system::Module::::block_number() + T::TipCountdown::get()); - true - } else { - false - } - } - - /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. - fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { - let members = T::Tippers::sorted_members(); - let mut members_iter = members.iter(); - let mut member = members_iter.next(); - tips.retain(|(ref a, _)| loop { - match member { - None => break false, - Some(m) if m > a => break false, - Some(m) => { - member = members_iter.next(); - if m < a { - continue - } else { - break true; - } - } - } - }); - } - - /// Execute the payout of a tip. - /// - /// Up to three balance operations. - /// Plus `O(T)` (`T` is Tippers length). - fn payout_tip(hash: T::Hash, tip: OpenTip, T::BlockNumber, T::Hash>) { - let mut tips = tip.tips; - Self::retain_active_tips(&mut tips); - tips.sort_by_key(|i| i.1); - let treasury = Self::account_id(); - let max_payout = Self::pot(); - let mut payout = tips[tips.len() / 2].1.min(max_payout); - if !tip.deposit.is_zero() { - let _ = T::Currency::unreserve(&tip.finder, tip.deposit); - } - if tip.finders_fee { - if tip.finder != tip.who { - // pay out the finder's fee. - let finders_fee = T::TipFindersFee::get() * payout; - payout -= finders_fee; - // this should go through given we checked it's at most the free balance, but still - // we only make a best-effort. - let _ = T::Currency::transfer(&treasury, &tip.finder, finders_fee, KeepAlive); - } - } - // same as above: best-effort only. - let _ = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); - Self::deposit_event(RawEvent::TipClosed(hash, tip.who, payout)); - } - /// Spend some money! returns number of approvals before spend. - fn spend_funds() -> Weight { + pub fn spend_funds() -> Weight { let mut total_weight: Weight = Zero::zero(); let mut budget_remaining = Self::pot(); - Self::deposit_event(RawEvent::Spending(budget_remaining)); + Self::deposit_event(Event::Spending(budget_remaining)); let account_id = Self::account_id(); let mut missed_any = false; let mut imbalance = >::zero(); - let proposals_len = Approvals::::mutate(|v| { + let proposals_len = Approvals::::mutate(|v| { let proposals_approvals_len = v.len() as u32; v.retain(|&index| { // Should always be true, but shouldn't panic if false or we're screwed. @@ -1299,12 +425,13 @@ impl, I: Instance> Module { >::remove(index); // return their deposit. - let _ = T::Currency::unreserve(&p.proposer, p.bond); + let err_amount = T::Currency::unreserve(&p.proposer, p.bond); + debug_assert!(err_amount.is_zero()); // provide the allocation. imbalance.subsume(T::Currency::deposit_creating(&p.beneficiary, p.value)); - Self::deposit_event(RawEvent::Awarded(index, p.value, p.beneficiary)); + Self::deposit_event(Event::Awarded(index, p.value, p.beneficiary)); false } else { missed_any = true; @@ -1319,38 +446,13 @@ impl, I: Instance> Module { total_weight += T::WeightInfo::on_initialize_proposals(proposals_len); - let bounties_len = BountyApprovals::::mutate(|v| { - let bounties_approval_len = v.len() as u32; - v.retain(|&index| { - Bounties::::mutate(index, |bounty| { - // Should always be true, but shouldn't panic if false or we're screwed. - if let Some(bounty) = bounty { - if bounty.value <= budget_remaining { - budget_remaining -= bounty.value; - - bounty.status = BountyStatus::Funded; - - // return their deposit. - let _ = T::Currency::unreserve(&bounty.proposer, bounty.bond); - - // fund the bounty account - imbalance.subsume(T::Currency::deposit_creating(&Self::bounty_account_id(index), bounty.value)); - - Self::deposit_event(RawEvent::BountyBecameActive(index)); - false - } else { - missed_any = true; - true - } - } else { - false - } - }) - }); - bounties_approval_len - }); - - total_weight += T::WeightInfo::on_initialize_bounties(bounties_len); + // Call Runtime hooks to external pallet using treasury to compute spend funds. + T::SpendFunds::spend_funds( + &mut budget_remaining, + &mut imbalance, + &mut total_weight, + &mut missed_any, + ); if !missed_any { // burn some proportion of the remaining budget if we run a surplus. @@ -1360,124 +462,42 @@ impl, I: Instance> Module { let (debit, credit) = T::Currency::pair(burn); imbalance.subsume(debit); T::BurnDestination::on_unbalanced(credit); - Self::deposit_event(RawEvent::Burnt(burn)) + Self::deposit_event(Event::Burnt(burn)) } // Must never be an error, but better to be safe. // proof: budget_remaining is account free balance minus ED; // Thus we can't spend more than account free balance minus ED; // Thus account is kept alive; qed; - if let Err(problem) = T::Currency::settle( - &account_id, - imbalance, - WithdrawReason::Transfer.into(), - KeepAlive - ) { + if let Err(problem) = + T::Currency::settle(&account_id, imbalance, WithdrawReasons::TRANSFER, KeepAlive) + { print("Inconsistent state - couldn't settle imbalance for funds spent by treasury"); // Nothing else to do here. drop(problem); } - Self::deposit_event(RawEvent::Rollover(budget_remaining)); + Self::deposit_event(Event::Rollover(budget_remaining)); total_weight } /// Return the amount of money in the pot. // The existential deposit is not part of the pot so treasury account never gets deleted. - fn pot() -> BalanceOf { + pub fn pot() -> BalanceOf { T::Currency::free_balance(&Self::account_id()) // Must never be less than 0 but better be safe. .saturating_sub(T::Currency::minimum_balance()) } - - fn create_bounty( - proposer: T::AccountId, - description: Vec, - value: BalanceOf, - ) -> DispatchResult { - ensure!(description.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); - ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); - - let index = Self::bounty_count(); - - // reserve deposit for new bounty - let bond = T::BountyDepositBase::get() - + T::DataDepositPerByte::get() * (description.len() as u32).into(); - T::Currency::reserve(&proposer, bond) - .map_err(|_| Error::::InsufficientProposersBalance)?; - - BountyCount::::put(index + 1); - - let bounty = Bounty { - proposer, value, fee: 0.into(), curator_deposit: 0.into(), bond, status: BountyStatus::Proposed, - }; - - Bounties::::insert(index, &bounty); - BountyDescriptions::::insert(index, description); - - Self::deposit_event(RawEvent::BountyProposed(index)); - - Ok(()) - } - - pub fn migrate_retract_tip_for_tip_new() { - /// An open tipping "motion". Retains all details of a tip including information on the finder - /// and the members who have voted. - #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] - pub struct OldOpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, - > { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip and the amount held on deposit. - finder: Option<(AccountId, Balance)>, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, - } - - use frame_support::{Twox64Concat, migration::StorageKeyIterator}; - - for (hash, old_tip) in StorageKeyIterator::< - T::Hash, - OldOpenTip, T::BlockNumber, T::Hash>, - Twox64Concat, - >::new(I::PREFIX.as_bytes(), b"Tips").drain() - { - let (finder, deposit, finders_fee) = match old_tip.finder { - Some((finder, deposit)) => (finder, deposit, true), - None => (T::AccountId::default(), Zero::zero(), false), - }; - let new_tip = OpenTip { - reason: old_tip.reason, - who: old_tip.who, - finder, - deposit, - closes: old_tip.closes, - tips: old_tip.tips, - finders_fee - }; - Tips::::insert(hash, new_tip) - } - } } -impl, I: Instance> OnUnbalanced> for Module { +impl, I: 'static> OnUnbalanced> for Pallet { fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { let numeric_amount = amount.peek(); // Must resolve into existing but better to be safe. let _ = T::Currency::resolve_creating(&Self::account_id(), amount); - Self::deposit_event(RawEvent::Deposit(numeric_amount)); + Self::deposit_event(Event::Deposit(numeric_amount)); } } diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 88c4f23b91ae2..534661b2773bb 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,52 +19,51 @@ #![cfg(test)] -use super::*; use std::cell::RefCell; -use frame_support::{ - assert_noop, assert_ok, impl_outer_origin, impl_outer_event, parameter_types, weights::Weight, - traits::{Contains, OnInitialize} -}; + use sp_core::H256; use sp_runtime::{ - Perbill, ModuleId, testing::Header, - traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + traits::{BlakeTwo256, IdentityLookup}, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - - -mod treasury { - // Re-export needed for `impl_outer_event!`. - pub use super::super::*; -} +use frame_support::{ + assert_noop, assert_ok, pallet_prelude::GenesisBuild, parameter_types, traits::OnInitialize, + PalletId, +}; -impl_outer_event! { - pub enum Event for Test { - system, - pallet_balances, - treasury, +use super::*; +use crate as treasury; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Treasury: treasury::{Pallet, Call, Storage, Config, Event}, } -} - +); -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account @@ -72,25 +71,22 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type Event = Event; type DustRemoval = (); @@ -101,93 +97,46 @@ impl pallet_balances::Trait for Test { thread_local! { static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); } -pub struct TenToFourteen; -impl Contains for TenToFourteen { - fn sorted_members() -> Vec { - TEN_TO_FOURTEEN.with(|v| { - v.borrow().clone() - }) - } - #[cfg(feature = "runtime-benchmarks")] - fn add(new: &u128) { - TEN_TO_FOURTEEN.with(|v| { - let mut members = v.borrow_mut(); - members.push(*new); - members.sort(); - }) - } -} -impl ContainsLengthBound for TenToFourteen { - fn max_len() -> usize { - TEN_TO_FOURTEEN.with(|v| v.borrow().len()) - } - fn min_len() -> usize { 0 } -} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const ProposalBondMinimum: u64 = 1; pub const SpendPeriod: u64 = 2; pub const Burn: Permill = Permill::from_percent(50); - pub const TipCountdown: u64 = 1; - pub const TipFindersFee: Percent = Percent::from_percent(20); - pub const TipReportDepositBase: u64 = 1; - pub const DataDepositPerByte: u64 = 1; - pub const BountyDepositBase: u64 = 80; - pub const BountyDepositPayoutDelay: u64 = 3; - pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const BountyUpdatePeriod: u32 = 20; - pub const MaximumReasonLength: u32 = 16384; pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); pub const BountyValueMinimum: u64 = 1; + pub const MaxApprovals: u32 = 100; } -impl Trait for Test { - type ModuleId = TreasuryModuleId; - type Currency = pallet_balances::Module; +impl Config for Test { + type PalletId = TreasuryPalletId; + type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type Tippers = TenToFourteen; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type DataDepositPerByte = DataDepositPerByte; type Event = Event; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; - type BountyDepositBase = BountyDepositBase; - type BountyDepositPayoutDelay = BountyDepositPayoutDelay; - type BountyUpdatePeriod = BountyUpdatePeriod; - type BountyCuratorDeposit = BountyCuratorDeposit; - type BountyValueMinimum = BountyValueMinimum; - type MaximumReasonLength = MaximumReasonLength; - type BurnDestination = (); // Just gets burned. + type BurnDestination = (); // Just gets burned. type WeightInfo = (); + type SpendFunds = (); + type MaxApprovals = MaxApprovals; } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Treasury = Module; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized at ED. balances: vec![(0, 100), (1, 98), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisBuild::::assimilate_storage(&crate::GenesisConfig, &mut t).unwrap(); t.into() } -fn last_event() -> RawEvent { - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::treasury(inner) = e { Some(inner) } else { None } - }) - .last() - .unwrap() -} - #[test] fn genesis_config_works() { new_test_ext().execute_with(|| { @@ -196,163 +145,6 @@ fn genesis_config_works() { }); } -fn tip_hash() -> H256 { - BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u128)) -} - -#[test] -fn tip_new_cannot_be_used_twice() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - assert_noop!( - Treasury::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), - Error::::AlreadyKnown - ); - }); -} - -#[test] -fn report_awesome_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - - // other reports don't count. - assert_noop!( - Treasury::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), - Error::::AlreadyKnown - ); - - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::tip(Origin::signed(9), h.clone(), 10), BadOrigin); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 102); - assert_eq!(Balances::free_balance(3), 8); - }); -} - -#[test] -fn report_awesome_from_beneficiary_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 110); - }); -} - -#[test] -fn close_tip_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - - let h = tip_hash(); - - assert_eq!(last_event(), RawEvent::NewTip(h)); - - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); - - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - - assert_eq!(last_event(), RawEvent::TipClosing(h)); - - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::Premature); - - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::none(), h.into()), BadOrigin); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - - assert_eq!(last_event(), RawEvent::TipClosed(h, 3, 10)); - - assert_noop!(Treasury::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); - }); -} - -#[test] -fn retract_tip_works() { - new_test_ext().execute_with(|| { - // with report awesome - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); - assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone())); - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); - - // with tip new - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); - assert_ok!(Treasury::retract_tip(Origin::signed(10), h.clone())); - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); - }); -} - -#[test] -fn tip_median_calculation_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000000)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); -} - -#[test] -fn tip_changing_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(13), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(14), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 100)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); -} - #[test] fn minting_works() { new_test_ext().execute_with(|| { @@ -530,9 +322,9 @@ fn treasury_account_doesnt_get_deleted() { #[test] fn inexistent_account_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - balances: vec![(0, 100), (1, 99), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(0, 100), (1, 99), (2, 1)] } + .assimilate_storage(&mut t) + .unwrap(); // Treasury genesis config is not build thus treasury account does not exist let mut t: sp_io::TestExternalities = t.into(); @@ -559,605 +351,17 @@ fn inexistent_account_works() { }); } -#[test] -fn propose_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); - - assert_eq!(last_event(), RawEvent::BountyProposed(0)); - - let deposit: u64 = 85 + 5; - assert_eq!(Balances::reserved_balance(0), deposit); - assert_eq!(Balances::free_balance(0), 100 - deposit); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 10, - bond: deposit, - status: BountyStatus::Proposed, - }); - - assert_eq!(Treasury::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); - - assert_eq!(Treasury::bounty_count(), 1); - }); -} - -#[test] -fn propose_bounty_validation_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_noop!( - Treasury::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()), - Error::::ReasonTooBig - ); - - assert_noop!( - Treasury::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()), - Error::::InsufficientProposersBalance - ); - - assert_noop!( - Treasury::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()), - Error::::InvalidValue - ); - }); -} - -#[test] -fn close_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::InvalidIndex); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 10, b"12345".to_vec())); - - assert_ok!(Treasury::close_bounty(Origin::root(), 0)); - - let deposit: u64 = 80 + 5; - - assert_eq!(last_event(), RawEvent::BountyRejected(0, deposit)); - - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 100 - deposit); - - assert_eq!(Treasury::bounties(0), None); - assert!(!Bounties::::contains_key(0)); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn approve_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Treasury::approve_bounty(Origin::root(), 0), Error::::InvalidIndex); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - let deposit: u64 = 80 + 5; - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - value: 50, - curator_deposit: 0, - bond: deposit, - status: BountyStatus::Approved, - }); - assert_eq!(Treasury::bounty_approvals(), vec![0]); - - assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); - - // deposit not returned yet - assert_eq!(Balances::reserved_balance(0), deposit); - assert_eq!(Balances::free_balance(0), 100 - deposit); - - >::on_initialize(2); - - // return deposit - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 100); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 50, - bond: deposit, - status: BountyStatus::Funded, - }); - assert_eq!(Treasury::pot(), 100 - 50 - 25); // burn 25 - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 50); - }); -} - -#[test] -fn assign_curator_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_noop!(Treasury::propose_curator(Origin::root(), 0, 4, 4), Error::::InvalidIndex); - - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_noop!(Treasury::propose_curator(Origin::root(), 0, 4, 50), Error::::InvalidFee); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::CuratorProposed { - curator: 4, - }, - }); - - assert_noop!(Treasury::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); - assert_noop!(Treasury::accept_curator(Origin::signed(4), 0), pallet_balances::Error::::InsufficientBalance); - - Balances::make_free_balance_be(&4, 10); - - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 2, - value: 50, - bond: 85, - status: BountyStatus::Active { - curator: 4, - update_due: 22, - }, - }); - - assert_eq!(Balances::free_balance(&4), 8); - assert_eq!(Balances::reserved_balance(&4), 2); - }); -} - -#[test] -fn unassign_curator_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - - assert_noop!(Treasury::unassign_curator(Origin::signed(1), 0), BadOrigin); - - assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - - Balances::make_free_balance_be(&4, 10); - - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_ok!(Treasury::unassign_curator(Origin::root(), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_eq!(Balances::free_balance(&4), 8); - assert_eq!(Balances::reserved_balance(&4), 0); // slashed 2 - }); -} - -#[test] -fn award_and_claim_bounty_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 10); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 4)); - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_eq!(Balances::free_balance(4), 8); // inital 10 - 2 deposit - - assert_noop!(Treasury::award_bounty(Origin::signed(1), 0, 3), Error::::RequireCurator); - - assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 2, - value: 50, - bond: 85, - status: BountyStatus::PendingPayout { - curator: 4, - beneficiary: 3, - unlock_at: 5 - }, - }); - - assert_noop!(Treasury::claim_bounty(Origin::signed(1), 0), Error::::Premature); - - System::set_block_number(5); - >::on_initialize(5); - - assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10)); - - assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0)); - - assert_eq!(last_event(), RawEvent::BountyClaimed(0, 56, 3)); - - assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4 - assert_eq!(Balances::free_balance(3), 56); - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); - - assert_eq!(Treasury::bounties(0), None); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn claim_handles_high_fee() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 30); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 49)); - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_ok!(Treasury::award_bounty(Origin::signed(4), 0, 3)); - - System::set_block_number(5); - >::on_initialize(5); - - // make fee > balance - let _ = Balances::slash(&Treasury::bounty_account_id(0), 10); - - assert_ok!(Treasury::claim_bounty(Origin::signed(1), 0)); - - assert_eq!(last_event(), RawEvent::BountyClaimed(0, 0, 3)); - - assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10 - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); - - assert_eq!(Treasury::bounties(0), None); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn cancel_and_refund() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Balances::transfer(Origin::signed(0), Treasury::bounty_account_id(0), 10)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 60); - - assert_noop!(Treasury::close_bounty(Origin::signed(0), 0), BadOrigin); - - assert_ok!(Treasury::close_bounty(Origin::root(), 0)); - - assert_eq!(Treasury::pot(), 85); // - 25 + 10 - }); -} - -#[test] -fn award_and_cancel() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 0, 10)); - assert_ok!(Treasury::accept_curator(Origin::signed(0), 0)); - - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - - assert_ok!(Treasury::award_bounty(Origin::signed(0), 0, 3)); - - // Cannot close bounty directly when payout is happening... - assert_noop!(Treasury::close_bounty(Origin::root(), 0), Error::::PendingPayout); - - // Instead unassign the curator to slash them and then close. - assert_ok!(Treasury::unassign_curator(Origin::root(), 0)); - assert_ok!(Treasury::close_bounty(Origin::root(), 0)); - - assert_eq!(last_event(), RawEvent::BountyCanceled(0)); - - assert_eq!(Balances::free_balance(Treasury::bounty_account_id(0)), 0); - // Slashed. - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 0); - - assert_eq!(Treasury::bounties(0), None); - assert_eq!(Treasury::bounty_descriptions(0), None); - }); -} - -#[test] -fn expire_and_unassign() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 1, 10)); - assert_ok!(Treasury::accept_curator(Origin::signed(1), 0)); - - assert_eq!(Balances::free_balance(1), 93); - assert_eq!(Balances::reserved_balance(1), 5); - - System::set_block_number(22); - >::on_initialize(22); - - assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::::Premature); - - System::set_block_number(23); - >::on_initialize(23); - - assert_ok!(Treasury::unassign_curator(Origin::signed(0), 0)); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); - - assert_eq!(Balances::free_balance(1), 93); - assert_eq!(Balances::reserved_balance(1), 0); // slashed - - }); -} - -#[test] -fn extend_expiry() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 10); - assert_ok!(Treasury::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - - assert_ok!(Treasury::approve_bounty(Origin::root(), 0)); - - assert_noop!(Treasury::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), Error::::UnexpectedStatus); - - System::set_block_number(2); - >::on_initialize(2); - - assert_ok!(Treasury::propose_curator(Origin::root(), 0, 4, 10)); - assert_ok!(Treasury::accept_curator(Origin::signed(4), 0)); - - assert_eq!(Balances::free_balance(4), 5); - assert_eq!(Balances::reserved_balance(4), 5); - - System::set_block_number(10); - >::on_initialize(10); - - assert_noop!(Treasury::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), Error::::RequireCurator); - assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 5, - value: 50, - bond: 85, - status: BountyStatus::Active { curator: 4, update_due: 30 }, - }); - - assert_ok!(Treasury::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); - - assert_eq!(Treasury::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 5, - value: 50, - bond: 85, - status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same - }); - - System::set_block_number(25); - >::on_initialize(25); - - assert_noop!(Treasury::unassign_curator(Origin::signed(0), 0), Error::::Premature); - assert_ok!(Treasury::unassign_curator(Origin::signed(4), 0)); - - assert_eq!(Balances::free_balance(4), 10); // not slashed - assert_eq!(Balances::reserved_balance(4), 0); - }); -} - -#[test] -fn test_last_reward_migration() { - use sp_storage::Storage; - - let mut s = Storage::default(); - - #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] - pub struct OldOpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, - > { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip and the amount held on deposit. - finder: Option<(AccountId, Balance)>, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, - } - - let reason1 = BlakeTwo256::hash(b"reason1"); - let hash1 = BlakeTwo256::hash_of(&(reason1, 10u64)); - - let old_tip_finder = OldOpenTip:: { - reason: reason1, - who: 10, - finder: Some((20, 30)), - closes: Some(13), - tips: vec![(40, 50), (60, 70)] - }; - - let reason2 = BlakeTwo256::hash(b"reason2"); - let hash2 = BlakeTwo256::hash_of(&(reason2, 20u64)); - - let old_tip_no_finder = OldOpenTip:: { - reason: reason2, - who: 20, - finder: None, - closes: Some(13), - tips: vec![(40, 50), (60, 70)] - }; - - let data = vec![ - ( - Tips::::hashed_key_for(hash1), - old_tip_finder.encode().to_vec() - ), - ( - Tips::::hashed_key_for(hash2), - old_tip_no_finder.encode().to_vec() - ), - ]; - - s.top = data.into_iter().collect(); - sp_io::TestExternalities::new(s).execute_with(|| { - Treasury::migrate_retract_tip_for_tip_new(); - - // Test w/ finder - assert_eq!( - Tips::::get(hash1), - Some(OpenTip { - reason: reason1, - who: 10, - finder: 20, - deposit: 30, - closes: Some(13), - tips: vec![(40, 50), (60, 70)], - finders_fee: true, - }) - ); - - // Test w/o finder - assert_eq!( - Tips::::get(hash2), - Some(OpenTip { - reason: reason2, - who: 20, - finder: Default::default(), - deposit: 0, - closes: Some(13), - tips: vec![(40, 50), (60, 70)], - finders_fee: false, - }) - ); - }); -} - #[test] fn genesis_funding_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); let initial_funding = 100; - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized with 100. balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisBuild::::assimilate_storage(&crate::GenesisConfig, &mut t).unwrap(); let mut t: sp_io::TestExternalities = t.into(); t.execute_with(|| { @@ -1165,3 +369,23 @@ fn genesis_funding_works() { assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance()); }); } + +#[test] +fn max_approvals_limited() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), u64::MAX); + Balances::make_free_balance_be(&0, u64::MAX); + + for _ in 0..MaxApprovals::get() { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + } + + // One too many will fail + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_noop!( + Treasury::approve_proposal(Origin::root(), 0), + Error::::TooManyApprovals + ); + }); +} diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs new file mode 100644 index 0000000000000..126c8a1766268 --- /dev/null +++ b/frame/treasury/src/weights.rs @@ -0,0 +1,133 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_treasury +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_treasury +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/treasury/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_treasury. +pub trait WeightInfo { + fn propose_spend() -> Weight; + fn reject_proposal() -> Weight; + fn approve_proposal(p: u32, ) -> Weight; + fn on_initialize_proposals(p: u32, ) -> Weight; +} + +/// Weights for pallet_treasury using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Treasury ProposalCount (r:1 w:1) + // Storage: Treasury Proposals (r:0 w:1) + fn propose_spend() -> Weight { + (41_567_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Proposals (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn reject_proposal() -> Weight { + (38_993_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Proposals (r:1 w:0) + // Storage: Treasury Approvals (r:1 w:1) + fn approve_proposal(p: u32, ) -> Weight { + (13_543_000 as Weight) + // Standard Error: 1_000 + .saturating_add((55_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury Approvals (r:1 w:1) + // Storage: Treasury BountyApprovals (r:1 w:1) + // Storage: Treasury Proposals (r:2 w:2) + // Storage: System Account (r:4 w:4) + fn on_initialize_proposals(p: u32, ) -> Weight { + (51_708_000 as Weight) + // Standard Error: 21_000 + .saturating_add((57_926_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Treasury ProposalCount (r:1 w:1) + // Storage: Treasury Proposals (r:0 w:1) + fn propose_spend() -> Weight { + (41_567_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Proposals (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn reject_proposal() -> Weight { + (38_993_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Treasury Proposals (r:1 w:0) + // Storage: Treasury Approvals (r:1 w:1) + fn approve_proposal(p: u32, ) -> Weight { + (13_543_000 as Weight) + // Standard Error: 1_000 + .saturating_add((55_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Treasury Approvals (r:1 w:1) + // Storage: Treasury BountyApprovals (r:1 w:1) + // Storage: Treasury Proposals (r:2 w:2) + // Storage: System Account (r:4 w:4) + fn on_initialize_proposals(p: u32, ) -> Weight { + (51_708_000 as Weight) + // Standard Error: 21_000 + .saturating_add((57_926_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) + } +} diff --git a/frame/try-runtime/Cargo.toml b/frame/try-runtime/Cargo.toml new file mode 100644 index 0000000000000..0ff534767607d --- /dev/null +++ b/frame/try-runtime/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "frame-try-runtime" +version = "0.10.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for democracy" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sp-api = { version = "4.0.0-dev", path = "../../primitives/api", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../../primitives/std" , default-features = false } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" , default-features = false } + +frame-support = { version = "4.0.0-dev", path = "../support", default-features = false } + +[features] +default = [ "std" ] +std = [ + "sp-api/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", +] diff --git a/frame/try-runtime/src/lib.rs b/frame/try-runtime/src/lib.rs new file mode 100644 index 0000000000000..b2dfdfac6429e --- /dev/null +++ b/frame/try-runtime/src/lib.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Supporting types for try-runtime, testing and dry-running commands. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::weights::Weight; +use sp_std::prelude::*; + +sp_api::decl_runtime_apis! { + /// Runtime api for testing the execution of a runtime upgrade. + pub trait TryRuntime { + /// dry-run runtime upgrades, returning the total weight consumed. + /// + /// This should do EXACTLY the same operations as the runtime would have done in the case of + /// a runtime upgrade (e.g. pallet ordering must be the same) + /// + /// Returns the consumed weight of the migration in case of a successful one, combined with + /// the total allowed block weight of the runtime. + fn on_runtime_upgrade() -> Result<(Weight, Weight), sp_runtime::RuntimeString>; + } +} diff --git a/frame/uniques/Cargo.toml b/frame/uniques/Cargo.toml new file mode 100644 index 0000000000000..4f664ecc2b6a9 --- /dev/null +++ b/frame/uniques/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "pallet-uniques" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME NFT asset management pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +sp-std = { version = "4.0.0-dev", path = "../../primitives/std" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "frame-benchmarking/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "sp-runtime/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/uniques/README.md b/frame/uniques/README.md new file mode 100644 index 0000000000000..b924e338452ff --- /dev/null +++ b/frame/uniques/README.md @@ -0,0 +1,78 @@ +# Uniques Module + +A simple, secure module for dealing with non-fungible assets. + +## Overview + +The Uniques module provides functionality for asset management of non-fungible asset classes, including: + +* Asset Issuance +* Asset Transfer +* Asset Destruction + +To use it in your runtime, you need to implement the assets [`uniques::Config`](https://docs.rs/pallet-uniques/latest/pallet_uniques/pallet/trait.Config.html). + +The supported dispatchable functions are documented in the [`uniques::Call`](https://docs.rs/pallet-uniques/latest/pallet_uniques/pallet/enum.Call.html) enum. + +### Terminology + +* **Asset issuance:** The creation of a new asset instance. +* **Asset transfer:** The action of transferring an asset instance from one account to another. +* **Asset burning:** The destruction of an asset instance. +* **Non-fungible asset:** An asset for which each unit has unique characteristics. There is exactly + one instance of such an asset in existance and there is exactly one owning account. + +### Goals + +The Uniques pallet in Substrate is designed to make the following possible: + +* Allow accounts to permissionlessly create asset classes (collections of asset instances). +* Allow a named (permissioned) account to mint and burn unique assets within a class. +* Move asset instances between accounts permissionlessly. +* Allow a named (permissioned) account to freeze and unfreeze unique assets within a + class or the entire class. +* Allow the owner of an asset instance to delegate the ability to transfer the asset to some + named third-party. + +## Interface + +### Permissionless dispatchables +* `create`: Create a new asset class by placing a deposit. +* `transfer`: Transfer an asset instance to a new owner. +* `redeposit`: Update the deposit amount of an asset instance, potentially freeing funds. +* `approve_transfer`: Name a delegate who may authorise a transfer. +* `cancel_approval`: Revert the effects of a previous `approve_transfer`. + +### Permissioned dispatchables +* `destroy`: Destroy an asset class. +* `mint`: Mint a new asset instance within an asset class. +* `burn`: Burn an asset instance within an asset class. +* `freeze`: Prevent an individual asset from being transferred. +* `thaw`: Revert the effects of a previous `freeze`. +* `freeze_class`: Prevent all asset within a class from being transferred. +* `thaw_class`: Revert the effects of a previous `freeze_class`. +* `transfer_ownership`: Alter the owner of an asset class, moving all associated deposits. +* `set_team`: Alter the permissioned accounts of an asset class. + +### Metadata (permissioned) dispatchables +* `set_attribute`: Set a metadata attribute of an asset instance or class. +* `clear_attribute`: Remove a metadata attribute of an asset instance or class. +* `set_metadata`: Set general metadata of an asset instance. +* `clear_metadata`: Remove general metadata of an asset instance. +* `set_class_metadata`: Set general metadata of an asset class. +* `clear_class_metadata`: Remove general metadata of an asset class. + +### Force (i.e. governance) dispatchables +* `force_create`: Create a new asset class. +* `force_asset_status`: Alter the underlying characteristics of an asset class. + +Please refer to the [`Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum +and its associated variants for documentation on each function. + +## Related Modules + +* [`System`](https://docs.rs/frame-system/latest/frame_system/) +* [`Support`](https://docs.rs/frame-support/latest/frame_support/) +* [`Assets`](https://docs.rs/pallet-assets/latest/pallet_assetss/) + +License: Apache-2.0 diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs new file mode 100644 index 0000000000000..5c777dc961e9e --- /dev/null +++ b/frame/uniques/src/benchmarking.rs @@ -0,0 +1,384 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Assets pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_benchmarking::{ + account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist_account, + whitelisted_caller, +}; +use frame_support::{ + dispatch::UnfilteredDispatchable, + traits::{EnsureOrigin, Get}, + BoundedVec, +}; +use frame_system::RawOrigin as SystemOrigin; +use sp_runtime::traits::Bounded; +use sp_std::{convert::TryInto, prelude::*}; + +use crate::Pallet as Uniques; + +const SEED: u32 = 0; + +fn create_class, I: 'static>( +) -> (T::ClassId, T::AccountId, ::Source) { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let class = Default::default(); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + assert!(Uniques::::create( + SystemOrigin::Signed(caller.clone()).into(), + class, + caller_lookup.clone(), + ) + .is_ok()); + (class, caller, caller_lookup) +} + +fn add_class_metadata, I: 'static>( +) -> (T::AccountId, ::Source) { + let caller = Class::::get(T::ClassId::default()).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + assert!(Uniques::::set_class_metadata( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + vec![0; T::StringLimit::get() as usize].try_into().unwrap(), + false, + ) + .is_ok()); + (caller, caller_lookup) +} + +fn mint_instance, I: 'static>( + index: u16, +) -> (T::InstanceId, T::AccountId, ::Source) { + let caller = Class::::get(T::ClassId::default()).unwrap().admin; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let instance = index.into(); + assert!(Uniques::::mint( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + instance, + caller_lookup.clone(), + ) + .is_ok()); + (instance, caller, caller_lookup) +} + +fn add_instance_metadata, I: 'static>( + instance: T::InstanceId, +) -> (T::AccountId, ::Source) { + let caller = Class::::get(T::ClassId::default()).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + assert!(Uniques::::set_metadata( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + instance, + vec![0; T::StringLimit::get() as usize].try_into().unwrap(), + false, + ) + .is_ok()); + (caller, caller_lookup) +} + +fn add_instance_attribute, I: 'static>( + instance: T::InstanceId, +) -> (BoundedVec, T::AccountId, ::Source) { + let caller = Class::::get(T::ClassId::default()).unwrap().owner; + if caller != whitelisted_caller() { + whitelist_account!(caller); + } + let caller_lookup = T::Lookup::unlookup(caller.clone()); + let key: BoundedVec<_, _> = vec![0; T::KeyLimit::get() as usize].try_into().unwrap(); + assert!(Uniques::::set_attribute( + SystemOrigin::Signed(caller.clone()).into(), + Default::default(), + Some(instance), + key.clone(), + vec![0; T::ValueLimit::get() as usize].try_into().unwrap(), + ) + .is_ok()); + (key, caller, caller_lookup) +} + +fn assert_last_event, I: 'static>(generic_event: >::Event) { + let events = frame_system::Pallet::::events(); + let system_event: ::Event = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +benchmarks_instance_pallet! { + create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) + verify { + assert_last_event::(Event::Created(Default::default(), caller.clone(), caller).into()); + } + + force_create { + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + }: _(SystemOrigin::Root, Default::default(), caller_lookup, true) + verify { + assert_last_event::(Event::ForceCreated(Default::default(), caller).into()); + } + + destroy { + let n in 0 .. 1_000; + let m in 0 .. 1_000; + let a in 0 .. 1_000; + + let (class, caller, caller_lookup) = create_class::(); + add_class_metadata::(); + for i in 0..n { + mint_instance::(i as u16); + } + for i in 0..m { + add_instance_metadata::((i as u16).into()); + } + for i in 0..a { + add_instance_attribute::((i as u16).into()); + } + let witness = Class::::get(class).unwrap().destroy_witness(); + }: _(SystemOrigin::Signed(caller), class, witness) + verify { + assert_last_event::(Event::Destroyed(class).into()); + } + + mint { + let (class, caller, caller_lookup) = create_class::(); + let instance = Default::default(); + }: _(SystemOrigin::Signed(caller.clone()), class, instance, caller_lookup) + verify { + assert_last_event::(Event::Issued(class, instance, caller).into()); + } + + burn { + let (class, caller, caller_lookup) = create_class::(); + let (instance, ..) = mint_instance::(0); + }: _(SystemOrigin::Signed(caller.clone()), class, instance, Some(caller_lookup)) + verify { + assert_last_event::(Event::Burned(class, instance, caller).into()); + } + + transfer { + let (class, caller, caller_lookup) = create_class::(); + let (instance, ..) = mint_instance::(Default::default()); + + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + }: _(SystemOrigin::Signed(caller.clone()), class, instance, target_lookup) + verify { + assert_last_event::(Event::Transferred(class, instance, caller, target).into()); + } + + redeposit { + let i in 0 .. 5_000; + let (class, caller, caller_lookup) = create_class::(); + let instances = (0..i).map(|x| mint_instance::(x as u16).0).collect::>(); + Uniques::::force_asset_status( + SystemOrigin::Root.into(), + class, + caller_lookup.clone(), + caller_lookup.clone(), + caller_lookup.clone(), + caller_lookup.clone(), + true, + false, + )?; + }: _(SystemOrigin::Signed(caller.clone()), class, instances.clone()) + verify { + assert_last_event::(Event::Redeposited(class, instances).into()); + } + + freeze { + let (class, caller, caller_lookup) = create_class::(); + let (instance, ..) = mint_instance::(Default::default()); + }: _(SystemOrigin::Signed(caller.clone()), Default::default(), Default::default()) + verify { + assert_last_event::(Event::Frozen(Default::default(), Default::default()).into()); + } + + thaw { + let (class, caller, caller_lookup) = create_class::(); + let (instance, ..) = mint_instance::(Default::default()); + Uniques::::freeze( + SystemOrigin::Signed(caller.clone()).into(), + class, + instance, + )?; + }: _(SystemOrigin::Signed(caller.clone()), class, instance) + verify { + assert_last_event::(Event::Thawed(class, instance).into()); + } + + freeze_class { + let (class, caller, caller_lookup) = create_class::(); + }: _(SystemOrigin::Signed(caller.clone()), class) + verify { + assert_last_event::(Event::ClassFrozen(class).into()); + } + + thaw_class { + let (class, caller, caller_lookup) = create_class::(); + let origin = SystemOrigin::Signed(caller.clone()).into(); + Uniques::::freeze_class(origin, class)?; + }: _(SystemOrigin::Signed(caller.clone()), class) + verify { + assert_last_event::(Event::ClassThawed(class).into()); + } + + transfer_ownership { + let (class, caller, _) = create_class::(); + let target: T::AccountId = account("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); + T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); + }: _(SystemOrigin::Signed(caller), class, target_lookup) + verify { + assert_last_event::(Event::OwnerChanged(class, target).into()); + } + + set_team { + let (class, caller, _) = create_class::(); + let target0 = T::Lookup::unlookup(account("target", 0, SEED)); + let target1 = T::Lookup::unlookup(account("target", 1, SEED)); + let target2 = T::Lookup::unlookup(account("target", 2, SEED)); + }: _(SystemOrigin::Signed(caller), Default::default(), target0.clone(), target1.clone(), target2.clone()) + verify { + assert_last_event::(Event::TeamChanged( + class, + account("target", 0, SEED), + account("target", 1, SEED), + account("target", 2, SEED), + ).into()); + } + + force_asset_status { + let (class, caller, caller_lookup) = create_class::(); + let origin = T::ForceOrigin::successful_origin(); + let call = Call::::force_asset_status { + class, + owner: caller_lookup.clone(), + issuer: caller_lookup.clone(), + admin: caller_lookup.clone(), + freezer: caller_lookup.clone(), + free_holding: true, + is_frozen: false, + }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert_last_event::(Event::AssetStatusChanged(class).into()); + } + + set_attribute { + let key: BoundedVec<_, _> = vec![0u8; T::KeyLimit::get() as usize].try_into().unwrap(); + let value: BoundedVec<_, _> = vec![0u8; T::ValueLimit::get() as usize].try_into().unwrap(); + + let (class, caller, _) = create_class::(); + let (instance, ..) = mint_instance::(0); + add_instance_metadata::(instance); + }: _(SystemOrigin::Signed(caller), class, Some(instance), key.clone(), value.clone()) + verify { + assert_last_event::(Event::AttributeSet(class, Some(instance), key, value).into()); + } + + clear_attribute { + let (class, caller, _) = create_class::(); + let (instance, ..) = mint_instance::(0); + add_instance_metadata::(instance); + let (key, ..) = add_instance_attribute::(instance); + }: _(SystemOrigin::Signed(caller), class, Some(instance), key.clone()) + verify { + assert_last_event::(Event::AttributeCleared(class, Some(instance), key).into()); + } + + set_metadata { + let data: BoundedVec<_, _> = vec![0u8; T::StringLimit::get() as usize].try_into().unwrap(); + + let (class, caller, _) = create_class::(); + let (instance, ..) = mint_instance::(0); + }: _(SystemOrigin::Signed(caller), class, instance, data.clone(), false) + verify { + assert_last_event::(Event::MetadataSet(class, instance, data, false).into()); + } + + clear_metadata { + let (class, caller, _) = create_class::(); + let (instance, ..) = mint_instance::(0); + add_instance_metadata::(instance); + }: _(SystemOrigin::Signed(caller), class, instance) + verify { + assert_last_event::(Event::MetadataCleared(class, instance).into()); + } + + set_class_metadata { + let data: BoundedVec<_, _> = vec![0u8; T::StringLimit::get() as usize].try_into().unwrap(); + + let (class, caller, _) = create_class::(); + }: _(SystemOrigin::Signed(caller), class, data.clone(), false) + verify { + assert_last_event::(Event::ClassMetadataSet(class, data, false).into()); + } + + clear_class_metadata { + let (class, caller, _) = create_class::(); + add_class_metadata::(); + }: _(SystemOrigin::Signed(caller), class) + verify { + assert_last_event::(Event::ClassMetadataCleared(class).into()); + } + + approve_transfer { + let (class, caller, _) = create_class::(); + let (instance, ..) = mint_instance::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + }: _(SystemOrigin::Signed(caller.clone()), class, instance, delegate_lookup) + verify { + assert_last_event::(Event::ApprovedTransfer(class, instance, caller, delegate).into()); + } + + cancel_approval { + let (class, caller, _) = create_class::(); + let (instance, ..) = mint_instance::(0); + let delegate: T::AccountId = account("delegate", 0, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); + let origin = SystemOrigin::Signed(caller.clone()).into(); + Uniques::::approve_transfer(origin, class, instance, delegate_lookup.clone())?; + }: _(SystemOrigin::Signed(caller.clone()), class, instance, Some(delegate_lookup)) + verify { + assert_last_event::(Event::ApprovalCancelled(class, instance, caller, delegate).into()); + } +} + +impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs new file mode 100644 index 0000000000000..a878a4910f769 --- /dev/null +++ b/frame/uniques/src/functions.rs @@ -0,0 +1,145 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various pieces of common functionality. + +use super::*; +use frame_support::{ensure, traits::Get}; +use sp_runtime::{DispatchError, DispatchResult}; + +impl, I: 'static> Pallet { + pub(crate) fn do_transfer( + class: T::ClassId, + instance: T::InstanceId, + dest: T::AccountId, + with_details: impl FnOnce( + &ClassDetailsFor, + &mut InstanceDetailsFor, + ) -> DispatchResult, + ) -> DispatchResult { + let class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + ensure!(!class_details.is_frozen, Error::::Frozen); + + let mut details = Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; + ensure!(!details.is_frozen, Error::::Frozen); + with_details(&class_details, &mut details)?; + + Account::::remove((&details.owner, &class, &instance)); + Account::::insert((&dest, &class, &instance), ()); + let origin = details.owner; + details.owner = dest; + Asset::::insert(&class, &instance, &details); + + Self::deposit_event(Event::Transferred(class, instance, origin, details.owner)); + Ok(()) + } + + pub(super) fn do_create_class( + class: T::ClassId, + owner: T::AccountId, + admin: T::AccountId, + deposit: DepositBalanceOf, + free_holding: bool, + event: Event, + ) -> DispatchResult { + ensure!(!Class::::contains_key(class), Error::::InUse); + + T::Currency::reserve(&owner, deposit)?; + + Class::::insert( + class, + ClassDetails { + owner: owner.clone(), + issuer: admin.clone(), + admin: admin.clone(), + freezer: admin.clone(), + total_deposit: deposit, + free_holding, + instances: 0, + instance_metadatas: 0, + attributes: 0, + is_frozen: false, + }, + ); + + Self::deposit_event(event); + Ok(()) + } + + pub(super) fn do_mint( + class: T::ClassId, + instance: T::InstanceId, + owner: T::AccountId, + with_details: impl FnOnce(&ClassDetailsFor) -> DispatchResult, + ) -> DispatchResult { + ensure!(!Asset::::contains_key(class, instance), Error::::AlreadyExists); + + Class::::try_mutate(&class, |maybe_class_details| -> DispatchResult { + let class_details = maybe_class_details.as_mut().ok_or(Error::::Unknown)?; + + with_details(&class_details)?; + + let instances = + class_details.instances.checked_add(1).ok_or(ArithmeticError::Overflow)?; + class_details.instances = instances; + + let deposit = match class_details.free_holding { + true => Zero::zero(), + false => T::InstanceDeposit::get(), + }; + T::Currency::reserve(&class_details.owner, deposit)?; + class_details.total_deposit += deposit; + + let owner = owner.clone(); + Account::::insert((&owner, &class, &instance), ()); + let details = InstanceDetails { owner, approved: None, is_frozen: false, deposit }; + Asset::::insert(&class, &instance, details); + Ok(()) + })?; + + Self::deposit_event(Event::Issued(class, instance, owner)); + Ok(()) + } + + pub(super) fn do_burn( + class: T::ClassId, + instance: T::InstanceId, + with_details: impl FnOnce(&ClassDetailsFor, &InstanceDetailsFor) -> DispatchResult, + ) -> DispatchResult { + let owner = Class::::try_mutate( + &class, + |maybe_class_details| -> Result { + let class_details = maybe_class_details.as_mut().ok_or(Error::::Unknown)?; + let details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; + with_details(&class_details, &details)?; + + // Return the deposit. + T::Currency::unreserve(&class_details.owner, details.deposit); + class_details.total_deposit.saturating_reduce(details.deposit); + class_details.instances.saturating_dec(); + Ok(details.owner) + }, + )?; + + Asset::::remove(&class, &instance); + Account::::remove((&owner, &class, &instance)); + + Self::deposit_event(Event::Burned(class, instance, owner)); + Ok(()) + } +} diff --git a/frame/uniques/src/impl_nonfungibles.rs b/frame/uniques/src/impl_nonfungibles.rs new file mode 100644 index 0000000000000..c5d5c6089f865 --- /dev/null +++ b/frame/uniques/src/impl_nonfungibles.rs @@ -0,0 +1,164 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementations for `nonfungibles` traits. + +use super::*; +use frame_support::{ + traits::{ + tokens::nonfungibles::{Create, Inspect, InspectEnumerable, Mutate, Transfer}, + Get, + }, + BoundedSlice, +}; +use sp_runtime::DispatchResult; +use sp_std::convert::TryFrom; + +impl, I: 'static> Inspect<::AccountId> for Pallet { + type InstanceId = T::InstanceId; + type ClassId = T::ClassId; + + fn owner( + class: &Self::ClassId, + instance: &Self::InstanceId, + ) -> Option<::AccountId> { + Asset::::get(class, instance).map(|a| a.owner) + } + + fn class_owner(class: &Self::ClassId) -> Option<::AccountId> { + Class::::get(class).map(|a| a.owner) + } + + /// Returns the attribute value of `instance` of `class` corresponding to `key`. + /// + /// When `key` is empty, we return the instance metadata value. + /// + /// By default this is `None`; no attributes are defined. + fn attribute( + class: &Self::ClassId, + instance: &Self::InstanceId, + key: &[u8], + ) -> Option> { + if key.is_empty() { + // We make the empty key map to the instance metadata value. + InstanceMetadataOf::::get(class, instance).map(|m| m.data.into()) + } else { + let key = BoundedSlice::<_, _>::try_from(key).ok()?; + Attribute::::get((class, Some(instance), key)).map(|a| a.0.into()) + } + } + + /// Returns the attribute value of `instance` of `class` corresponding to `key`. + /// + /// When `key` is empty, we return the instance metadata value. + /// + /// By default this is `None`; no attributes are defined. + fn class_attribute(class: &Self::ClassId, key: &[u8]) -> Option> { + if key.is_empty() { + // We make the empty key map to the instance metadata value. + ClassMetadataOf::::get(class).map(|m| m.data.into()) + } else { + let key = BoundedSlice::<_, _>::try_from(key).ok()?; + Attribute::::get((class, Option::::None, key)).map(|a| a.0.into()) + } + } + + /// Returns `true` if the asset `instance` of `class` may be transferred. + /// + /// Default implementation is that all assets are transferable. + fn can_transfer(class: &Self::ClassId, instance: &Self::InstanceId) -> bool { + match (Class::::get(class), Asset::::get(class, instance)) { + (Some(cd), Some(id)) if !cd.is_frozen && !id.is_frozen => true, + _ => false, + } + } +} + +impl, I: 'static> Create<::AccountId> for Pallet { + /// Create a `class` of nonfungible assets to be owned by `who` and managed by `admin`. + fn create_class( + class: &Self::ClassId, + who: &T::AccountId, + admin: &T::AccountId, + ) -> DispatchResult { + Self::do_create_class( + class.clone(), + who.clone(), + admin.clone(), + T::ClassDeposit::get(), + false, + Event::Created(class.clone(), who.clone(), admin.clone()), + ) + } +} + +impl, I: 'static> Mutate<::AccountId> for Pallet { + fn mint_into( + class: &Self::ClassId, + instance: &Self::InstanceId, + who: &T::AccountId, + ) -> DispatchResult { + Self::do_mint(class.clone(), instance.clone(), who.clone(), |_| Ok(())) + } + + fn burn_from(class: &Self::ClassId, instance: &Self::InstanceId) -> DispatchResult { + Self::do_burn(class.clone(), instance.clone(), |_, _| Ok(())) + } +} + +impl, I: 'static> Transfer for Pallet { + fn transfer( + class: &Self::ClassId, + instance: &Self::InstanceId, + destination: &T::AccountId, + ) -> DispatchResult { + Self::do_transfer(class.clone(), instance.clone(), destination.clone(), |_, _| Ok(())) + } +} + +impl, I: 'static> InspectEnumerable for Pallet { + /// Returns an iterator of the asset classes in existence. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn classes() -> Box> { + Box::new(ClassMetadataOf::::iter_keys()) + } + + /// Returns an iterator of the instances of an asset `class` in existence. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn instances(class: &Self::ClassId) -> Box> { + Box::new(InstanceMetadataOf::::iter_key_prefix(class)) + } + + /// Returns an iterator of the asset instances of all classes owned by `who`. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn owned(who: &T::AccountId) -> Box> { + Box::new(Account::::iter_key_prefix((who,))) + } + + /// Returns an iterator of the asset instances of `class` owned by `who`. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn owned_in_class( + class: &Self::ClassId, + who: &T::AccountId, + ) -> Box> { + Box::new(Account::::iter_key_prefix((who, class))) + } +} diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs new file mode 100644 index 0000000000000..8c716694051b5 --- /dev/null +++ b/frame/uniques/src/lib.rs @@ -0,0 +1,1221 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Unique (Assets) Module +//! +//! A simple, secure module for dealing with non-fungible assets. +//! +//! ## Related Modules +//! +//! * [`System`](../frame_system/index.html) +//! * [`Support`](../frame_support/index.html) + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +pub mod mock; +#[cfg(test)] +mod tests; +pub mod weights; + +mod functions; +mod impl_nonfungibles; +mod types; +pub use types::*; + +use codec::{Decode, Encode, HasCompact}; +use frame_support::traits::{BalanceStatus::Reserved, Currency, ReservableCurrency}; +use frame_system::Config as SystemConfig; +use sp_runtime::{ + traits::{Saturating, StaticLookup, Zero}, + ArithmeticError, RuntimeDebug, +}; +use sp_std::prelude::*; + +pub use pallet::*; +pub use weights::WeightInfo; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + /// The module configuration trait. + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// Identifier for the class of asset. + type ClassId: Member + Parameter + Default + Copy + HasCompact; + + /// The type used to identify a unique asset within an asset class. + type InstanceId: Member + Parameter + Default + Copy + HasCompact + From; + + /// The currency mechanism, used for paying for reserves. + type Currency: ReservableCurrency; + + /// The origin which may forcibly create or destroy an asset or otherwise alter privileged + /// attributes. + type ForceOrigin: EnsureOrigin; + + /// The basic amount of funds that must be reserved for an asset class. + #[pallet::constant] + type ClassDeposit: Get>; + + /// The basic amount of funds that must be reserved for an asset instance. + #[pallet::constant] + type InstanceDeposit: Get>; + + /// The basic amount of funds that must be reserved when adding metadata to your asset. + #[pallet::constant] + type MetadataDepositBase: Get>; + + /// The basic amount of funds that must be reserved when adding an attribute to an asset. + #[pallet::constant] + type AttributeDepositBase: Get>; + + /// The additional funds that must be reserved for the number of bytes store in metadata, + /// either "normal" metadata or attribute metadata. + #[pallet::constant] + type DepositPerByte: Get>; + + /// The maximum length of data stored on-chain. + #[pallet::constant] + type StringLimit: Get; + + /// The maximum length of an attribute key. + #[pallet::constant] + type KeyLimit: Get; + + /// The maximum length of an attribute value. + #[pallet::constant] + type ValueLimit: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::storage] + /// Details of an asset class. + pub(super) type Class, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::ClassId, + ClassDetails>, + >; + + #[pallet::storage] + /// The assets held by any given account; set out this way so that assets owned by a single + /// account can be enumerated. + pub(super) type Account, I: 'static = ()> = StorageNMap< + _, + ( + NMapKey, // owner + NMapKey, + NMapKey, + ), + (), + OptionQuery, + >; + + #[pallet::storage] + /// The assets in existence and their ownership details. + pub(super) type Asset, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::ClassId, + Blake2_128Concat, + T::InstanceId, + InstanceDetails>, + OptionQuery, + >; + + #[pallet::storage] + /// Metadata of an asset class. + pub(super) type ClassMetadataOf, I: 'static = ()> = StorageMap< + _, + Blake2_128Concat, + T::ClassId, + ClassMetadata, T::StringLimit>, + OptionQuery, + >; + + #[pallet::storage] + /// Metadata of an asset instance. + pub(super) type InstanceMetadataOf, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + T::ClassId, + Blake2_128Concat, + T::InstanceId, + InstanceMetadata, T::StringLimit>, + OptionQuery, + >; + + #[pallet::storage] + /// Metadata of an asset class. + pub(super) type Attribute, I: 'static = ()> = StorageNMap< + _, + ( + NMapKey, + NMapKey>, + NMapKey>, + ), + (BoundedVec, DepositBalanceOf), + OptionQuery, + >; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// An asset class was created. \[ class, creator, owner \] + Created(T::ClassId, T::AccountId, T::AccountId), + /// An asset class was force-created. \[ class, owner \] + ForceCreated(T::ClassId, T::AccountId), + /// An asset `class` was destroyed. \[ class \] + Destroyed(T::ClassId), + /// An asset `instance` was issued. \[ class, instance, owner \] + Issued(T::ClassId, T::InstanceId, T::AccountId), + /// An asset `instance` was transferred. \[ class, instance, from, to \] + Transferred(T::ClassId, T::InstanceId, T::AccountId, T::AccountId), + /// An asset `instance` was destroyed. \[ class, instance, owner \] + Burned(T::ClassId, T::InstanceId, T::AccountId), + /// Some asset `instance` was frozen. \[ class, instance \] + Frozen(T::ClassId, T::InstanceId), + /// Some asset `instance` was thawed. \[ class, instance \] + Thawed(T::ClassId, T::InstanceId), + /// Some asset `class` was frozen. \[ class \] + ClassFrozen(T::ClassId), + /// Some asset `class` was thawed. \[ class \] + ClassThawed(T::ClassId), + /// The owner changed \[ class, new_owner \] + OwnerChanged(T::ClassId, T::AccountId), + /// The management team changed \[ class, issuer, admin, freezer \] + TeamChanged(T::ClassId, T::AccountId, T::AccountId, T::AccountId), + /// An `instance` of an asset `class` has been approved by the `owner` for transfer by a + /// `delegate`. + /// \[ class, instance, owner, delegate \] + ApprovedTransfer(T::ClassId, T::InstanceId, T::AccountId, T::AccountId), + /// An approval for a `delegate` account to transfer the `instance` of an asset `class` was + /// cancelled by its `owner`. + /// \[ class, instance, owner, delegate \] + ApprovalCancelled(T::ClassId, T::InstanceId, T::AccountId, T::AccountId), + /// An asset `class` has had its attributes changed by the `Force` origin. + /// \[ class \] + AssetStatusChanged(T::ClassId), + /// New metadata has been set for an asset class. \[ class, data, is_frozen \] + ClassMetadataSet(T::ClassId, BoundedVec, bool), + /// Metadata has been cleared for an asset class. \[ class \] + ClassMetadataCleared(T::ClassId), + /// New metadata has been set for an asset instance. + /// \[ class, instance, data, is_frozen \] + MetadataSet(T::ClassId, T::InstanceId, BoundedVec, bool), + /// Metadata has been cleared for an asset instance. \[ class, instance \] + MetadataCleared(T::ClassId, T::InstanceId), + /// Metadata has been cleared for an asset instance. \[ class, successful_instances \] + Redeposited(T::ClassId, Vec), + /// New attribute metadata has been set for an asset class or instance. + /// \[ class, maybe_instance, key, value \] + AttributeSet( + T::ClassId, + Option, + BoundedVec, + BoundedVec, + ), + /// Attribute metadata has been cleared for an asset class or instance. + /// \[ class, maybe_instance, key, maybe_value \] + AttributeCleared(T::ClassId, Option, BoundedVec), + } + + #[pallet::error] + pub enum Error { + /// The signing account has no permission to do the operation. + NoPermission, + /// The given asset ID is unknown. + Unknown, + /// The asset instance ID has already been used for an asset. + AlreadyExists, + /// The owner turned out to be different to what was expected. + WrongOwner, + /// Invalid witness data given. + BadWitness, + /// The asset ID is already taken. + InUse, + /// The asset instance or class is frozen. + Frozen, + /// The delegate turned out to be different to what was expected. + WrongDelegate, + /// There is no delegate approved. + NoDelegate, + /// No approval exists that would allow the transfer. + Unapproved, + } + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet {} + + impl, I: 'static> Pallet { + /// Get the owner of the asset instance, if the asset exists. + pub fn owner(class: T::ClassId, instance: T::InstanceId) -> Option { + Asset::::get(class, instance).map(|i| i.owner) + } + } + + #[pallet::call] + impl, I: 'static> Pallet { + /// Issue a new class of non-fungible assets from a public origin. + /// + /// This new asset class has no assets initially and its owner is the origin. + /// + /// The origin must be Signed and the sender must have sufficient funds free. + /// + /// `AssetDeposit` funds of sender are reserved. + /// + /// Parameters: + /// - `class`: The identifier of the new asset class. This must not be currently in use. + /// - `admin`: The admin of this class of assets. The admin is the initial address of each + /// member of the asset class's admin team. + /// + /// Emits `Created` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::create())] + pub fn create( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + admin: ::Source, + ) -> DispatchResult { + let owner = ensure_signed(origin)?; + let admin = T::Lookup::lookup(admin)?; + + Self::do_create_class( + class, + owner.clone(), + admin.clone(), + T::ClassDeposit::get(), + false, + Event::Created(class, owner, admin), + ) + } + + /// Issue a new class of non-fungible assets from a privileged origin. + /// + /// This new asset class has no assets initially. + /// + /// The origin must conform to `ForceOrigin`. + /// + /// Unlike `create`, no funds are reserved. + /// + /// - `class`: The identifier of the new asset. This must not be currently in use. + /// - `owner`: The owner of this class of assets. The owner has full superuser permissions + /// over this asset, but may later change and configure the permissions using + /// `transfer_ownership` and `set_team`. + /// + /// Emits `ForceCreated` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_create())] + pub fn force_create( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + owner: ::Source, + free_holding: bool, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + let owner = T::Lookup::lookup(owner)?; + + Self::do_create_class( + class, + owner.clone(), + owner.clone(), + Zero::zero(), + free_holding, + Event::ForceCreated(class, owner), + ) + } + + /// Destroy a class of fungible assets. + /// + /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the + /// owner of the asset `class`. + /// + /// - `class`: The identifier of the asset class to be destroyed. + /// - `witness`: Information on the instances minted in the asset class. This must be + /// correct. + /// + /// Emits `Destroyed` event when successful. + /// + /// Weight: `O(n + m)` where: + /// - `n = witness.instances` + /// - `m = witness.instance_metadatas` + /// - `a = witness.attributes` + #[pallet::weight(T::WeightInfo::destroy( + witness.instances, + witness.instance_metadatas, + witness.attributes, + ))] + pub fn destroy( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + witness: DestroyWitness, + ) -> DispatchResult { + let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { + Ok(_) => None, + Err(origin) => Some(ensure_signed(origin)?), + }; + Class::::try_mutate_exists(class, |maybe_details| { + let class_details = maybe_details.take().ok_or(Error::::Unknown)?; + if let Some(check_owner) = maybe_check_owner { + ensure!(class_details.owner == check_owner, Error::::NoPermission); + } + ensure!(class_details.instances == witness.instances, Error::::BadWitness); + ensure!( + class_details.instance_metadatas == witness.instance_metadatas, + Error::::BadWitness + ); + ensure!(class_details.attributes == witness.attributes, Error::::BadWitness); + + for (instance, details) in Asset::::drain_prefix(&class) { + Account::::remove((&details.owner, &class, &instance)); + } + InstanceMetadataOf::::remove_prefix(&class, None); + ClassMetadataOf::::remove(&class); + Attribute::::remove_prefix((&class,), None); + T::Currency::unreserve(&class_details.owner, class_details.total_deposit); + + Self::deposit_event(Event::Destroyed(class)); + + // NOTE: could use postinfo to reflect the actual number of + // accounts/sufficient/approvals + Ok(()) + }) + } + + /// Mint an asset instance of a particular class. + /// + /// The origin must be Signed and the sender must be the Issuer of the asset `class`. + /// + /// - `class`: The class of the asset to be minted. + /// - `instance`: The instance value of the asset to be minted. + /// - `beneficiary`: The initial owner of the minted asset. + /// + /// Emits `Issued` event when successful. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::mint())] + pub fn mint( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + #[pallet::compact] instance: T::InstanceId, + owner: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; + + Self::do_mint(class, instance, owner, |class_details| { + ensure!(class_details.issuer == origin, Error::::NoPermission); + Ok(()) + }) + } + + /// Destroy a single asset instance. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `class`. + /// + /// - `class`: The class of the asset to be burned. + /// - `instance`: The instance of the asset to be burned. + /// - `check_owner`: If `Some` then the operation will fail with `WrongOwner` unless the + /// asset is owned by this value. + /// + /// Emits `Burned` with the actual amount burned. + /// + /// Weight: `O(1)` + /// Modes: `check_owner.is_some()`. + #[pallet::weight(T::WeightInfo::burn())] + pub fn burn( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + #[pallet::compact] instance: T::InstanceId, + check_owner: Option<::Source>, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let check_owner = check_owner.map(T::Lookup::lookup).transpose()?; + + Self::do_burn(class, instance, |class_details, details| { + let is_permitted = class_details.admin == origin || details.owner == origin; + ensure!(is_permitted, Error::::NoPermission); + ensure!( + check_owner.map_or(true, |o| o == details.owner), + Error::::WrongOwner + ); + Ok(()) + }) + } + + /// Move an asset from the sender account to another. + /// + /// Origin must be Signed and the signing account must be either: + /// - the Admin of the asset `class`; + /// - the Owner of the asset `instance`; + /// - the approved delegate for the asset `instance` (in this case, the approval is reset). + /// + /// Arguments: + /// - `class`: The class of the asset to be transferred. + /// - `instance`: The instance of the asset to be transferred. + /// - `dest`: The account to receive ownership of the asset. + /// + /// Emits `Transferred`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::transfer())] + pub fn transfer( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + #[pallet::compact] instance: T::InstanceId, + dest: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + + Self::do_transfer(class, instance, dest, |class_details, details| { + if details.owner != origin && class_details.admin != origin { + let approved = details.approved.take().map_or(false, |i| i == origin); + ensure!(approved, Error::::NoPermission); + } + Ok(()) + }) + } + + /// Reevaluate the deposits on some assets. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `class`. + /// + /// - `class`: The class of the asset to be frozen. + /// - `instances`: The instances of the asset class whose deposits will be reevaluated. + /// + /// NOTE: This exists as a best-effort function. Any asset instances which are unknown or + /// in the case that the owner account does not have reservable funds to pay for a + /// deposit increase are ignored. Generally the owner isn't going to call this on instances + /// whose existing deposit is less than the refreshed deposit as it would only cost them, + /// so it's of little consequence. + /// + /// It will still return an error in the case that the class is unknown of the signer is + /// not permitted to call it. + /// + /// Weight: `O(instances.len())` + #[pallet::weight(T::WeightInfo::redeposit(instances.len() as u32))] + pub fn redeposit( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + instances: Vec, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let mut class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + ensure!(class_details.owner == origin, Error::::NoPermission); + let deposit = match class_details.free_holding { + true => Zero::zero(), + false => T::InstanceDeposit::get(), + }; + + let mut successful = Vec::with_capacity(instances.len()); + for instance in instances.into_iter() { + let mut details = match Asset::::get(&class, &instance) { + Some(x) => x, + None => continue, + }; + let old = details.deposit; + if old > deposit { + T::Currency::unreserve(&class_details.owner, old - deposit); + } else if deposit > old { + if T::Currency::reserve(&class_details.owner, deposit - old).is_err() { + // NOTE: No alterations made to class_details in this iteration so far, so + // this is OK to do. + continue + } + } else { + continue + } + class_details.total_deposit.saturating_accrue(deposit); + class_details.total_deposit.saturating_reduce(old); + details.deposit = deposit; + Asset::::insert(&class, &instance, &details); + successful.push(instance); + } + Class::::insert(&class, &class_details); + + Self::deposit_event(Event::::Redeposited(class, successful)); + + Ok(()) + } + + /// Disallow further unprivileged transfer of an asset instance. + /// + /// Origin must be Signed and the sender should be the Freezer of the asset `class`. + /// + /// - `class`: The class of the asset to be frozen. + /// - `instance`: The instance of the asset to be frozen. + /// + /// Emits `Frozen`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::freeze())] + pub fn freeze( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + #[pallet::compact] instance: T::InstanceId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let mut details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; + let class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + ensure!(class_details.freezer == origin, Error::::NoPermission); + + details.is_frozen = true; + Asset::::insert(&class, &instance, &details); + + Self::deposit_event(Event::::Frozen(class, instance)); + Ok(()) + } + + /// Re-allow unprivileged transfer of an asset instance. + /// + /// Origin must be Signed and the sender should be the Freezer of the asset `class`. + /// + /// - `class`: The class of the asset to be thawed. + /// - `instance`: The instance of the asset to be thawed. + /// + /// Emits `Thawed`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::thaw())] + pub fn thaw( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + #[pallet::compact] instance: T::InstanceId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let mut details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; + let class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + ensure!(class_details.admin == origin, Error::::NoPermission); + + details.is_frozen = false; + Asset::::insert(&class, &instance, &details); + + Self::deposit_event(Event::::Thawed(class, instance)); + Ok(()) + } + + /// Disallow further unprivileged transfers for a whole asset class. + /// + /// Origin must be Signed and the sender should be the Freezer of the asset `class`. + /// + /// - `class`: The asset class to be frozen. + /// + /// Emits `ClassFrozen`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::freeze_class())] + pub fn freeze_class( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + Class::::try_mutate(class, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.freezer, Error::::NoPermission); + + details.is_frozen = true; + + Self::deposit_event(Event::::ClassFrozen(class)); + Ok(()) + }) + } + + /// Re-allow unprivileged transfers for a whole asset class. + /// + /// Origin must be Signed and the sender should be the Admin of the asset `class`. + /// + /// - `class`: The class to be thawed. + /// + /// Emits `ClassThawed`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::thaw_class())] + pub fn thaw_class( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + Class::::try_mutate(class, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.admin, Error::::NoPermission); + + details.is_frozen = false; + + Self::deposit_event(Event::::ClassThawed(class)); + Ok(()) + }) + } + + /// Change the Owner of an asset class. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `class`. + /// + /// - `class`: The asset class whose owner should be changed. + /// - `owner`: The new Owner of this asset class. + /// + /// Emits `OwnerChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::transfer_ownership())] + pub fn transfer_ownership( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + owner: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; + + Class::::try_mutate(class, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + if details.owner == owner { + return Ok(()) + } + + // Move the deposit to the new owner. + T::Currency::repatriate_reserved( + &details.owner, + &owner, + details.total_deposit, + Reserved, + )?; + details.owner = owner.clone(); + + Self::deposit_event(Event::OwnerChanged(class, owner)); + Ok(()) + }) + } + + /// Change the Issuer, Admin and Freezer of an asset class. + /// + /// Origin must be Signed and the sender should be the Owner of the asset `class`. + /// + /// - `class`: The asset class whose team should be changed. + /// - `issuer`: The new Issuer of this asset class. + /// - `admin`: The new Admin of this asset class. + /// - `freezer`: The new Freezer of this asset class. + /// + /// Emits `TeamChanged`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_team())] + pub fn set_team( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let issuer = T::Lookup::lookup(issuer)?; + let admin = T::Lookup::lookup(admin)?; + let freezer = T::Lookup::lookup(freezer)?; + + Class::::try_mutate(class, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + + details.issuer = issuer.clone(); + details.admin = admin.clone(); + details.freezer = freezer.clone(); + + Self::deposit_event(Event::TeamChanged(class, issuer, admin, freezer)); + Ok(()) + }) + } + + /// Approve an instance to be transferred by a delegated third-party account. + /// + /// Origin must be Signed and must be the owner of the asset `instance`. + /// + /// - `class`: The class of the asset to be approved for delegated transfer. + /// - `instance`: The instance of the asset to be approved for delegated transfer. + /// - `delegate`: The account to delegate permission to transfer the asset. + /// + /// Emits `ApprovedTransfer` on success. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::approve_transfer())] + pub fn approve_transfer( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + #[pallet::compact] instance: T::InstanceId, + delegate: ::Source, + ) -> DispatchResult { + let maybe_check: Option = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + + let delegate = T::Lookup::lookup(delegate)?; + + let class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + let mut details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; + + if let Some(check) = maybe_check { + let permitted = &check == &class_details.admin || &check == &details.owner; + ensure!(permitted, Error::::NoPermission); + } + + details.approved = Some(delegate); + Asset::::insert(&class, &instance, &details); + + let delegate = details.approved.expect("set as Some above; qed"); + Self::deposit_event(Event::ApprovedTransfer(class, instance, details.owner, delegate)); + + Ok(()) + } + + /// Cancel the prior approval for the transfer of an asset by a delegate. + /// + /// Origin must be either: + /// - the `Force` origin; + /// - `Signed` with the signer being the Admin of the asset `class`; + /// - `Signed` with the signer being the Owner of the asset `instance`; + /// + /// Arguments: + /// - `class`: The class of the asset of whose approval will be cancelled. + /// - `instance`: The instance of the asset of whose approval will be cancelled. + /// - `maybe_check_delegate`: If `Some` will ensure that the given account is the one to + /// which permission of transfer is delegated. + /// + /// Emits `ApprovalCancelled` on success. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::cancel_approval())] + pub fn cancel_approval( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + #[pallet::compact] instance: T::InstanceId, + maybe_check_delegate: Option<::Source>, + ) -> DispatchResult { + let maybe_check: Option = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; + + let class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + let mut details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; + if let Some(check) = maybe_check { + let permitted = &check == &class_details.admin || &check == &details.owner; + ensure!(permitted, Error::::NoPermission); + } + let maybe_check_delegate = maybe_check_delegate.map(T::Lookup::lookup).transpose()?; + let old = details.approved.take().ok_or(Error::::NoDelegate)?; + if let Some(check_delegate) = maybe_check_delegate { + ensure!(check_delegate == old, Error::::WrongDelegate); + } + + Asset::::insert(&class, &instance, &details); + Self::deposit_event(Event::ApprovalCancelled(class, instance, details.owner, old)); + + Ok(()) + } + + /// Alter the attributes of a given asset. + /// + /// Origin must be `ForceOrigin`. + /// + /// - `class`: The identifier of the asset. + /// - `owner`: The new Owner of this asset. + /// - `issuer`: The new Issuer of this asset. + /// - `admin`: The new Admin of this asset. + /// - `freezer`: The new Freezer of this asset. + /// - `free_holding`: Whether a deposit is taken for holding an instance of this asset + /// class. + /// - `is_frozen`: Whether this asset class is frozen except for permissioned/admin + /// instructions. + /// + /// Emits `AssetStatusChanged` with the identity of the asset. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::force_asset_status())] + pub fn force_asset_status( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + owner: ::Source, + issuer: ::Source, + admin: ::Source, + freezer: ::Source, + free_holding: bool, + is_frozen: bool, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + + Class::::try_mutate(class, |maybe_asset| { + let mut asset = maybe_asset.take().ok_or(Error::::Unknown)?; + asset.owner = T::Lookup::lookup(owner)?; + asset.issuer = T::Lookup::lookup(issuer)?; + asset.admin = T::Lookup::lookup(admin)?; + asset.freezer = T::Lookup::lookup(freezer)?; + asset.free_holding = free_holding; + asset.is_frozen = is_frozen; + *maybe_asset = Some(asset); + + Self::deposit_event(Event::AssetStatusChanged(class)); + Ok(()) + }) + } + + /// Set an attribute for an asset class or instance. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// asset `class`. + /// + /// If the origin is Signed, then funds of signer are reserved according to the formula: + /// `MetadataDepositBase + DepositPerByte * (key.len + value.len)` taking into + /// account any already reserved funds. + /// + /// - `class`: The identifier of the asset class whose instance's metadata to set. + /// - `maybe_instance`: The identifier of the asset instance whose metadata to set. + /// - `key`: The key of the attribute. + /// - `value`: The value to which to set the attribute. + /// + /// Emits `AttributeSet`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_attribute())] + pub fn set_attribute( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + maybe_instance: Option, + key: BoundedVec, + value: BoundedVec, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &class_details.owner, Error::::NoPermission); + } + let maybe_is_frozen = match maybe_instance { + None => ClassMetadataOf::::get(class).map(|v| v.is_frozen), + Some(instance) => + InstanceMetadataOf::::get(class, instance).map(|v| v.is_frozen), + }; + ensure!(!maybe_is_frozen.unwrap_or(false), Error::::Frozen); + + let attribute = Attribute::::get((class, maybe_instance, &key)); + if attribute.is_none() { + class_details.attributes.saturating_inc(); + } + let old_deposit = attribute.map_or(Zero::zero(), |m| m.1); + class_details.total_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if !class_details.free_holding && maybe_check_owner.is_some() { + deposit = T::DepositPerByte::get() + .saturating_mul(((key.len() + value.len()) as u32).into()) + .saturating_add(T::AttributeDepositBase::get()); + } + class_details.total_deposit.saturating_accrue(deposit); + if deposit > old_deposit { + T::Currency::reserve(&class_details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&class_details.owner, old_deposit - deposit); + } + + Attribute::::insert((&class, maybe_instance, &key), (&value, deposit)); + Class::::insert(class, &class_details); + Self::deposit_event(Event::AttributeSet(class, maybe_instance, key, value)); + Ok(()) + } + + /// Set an attribute for an asset class or instance. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// asset `class`. + /// + /// If the origin is Signed, then funds of signer are reserved according to the formula: + /// `MetadataDepositBase + DepositPerByte * (key.len + value.len)` taking into + /// account any already reserved funds. + /// + /// - `class`: The identifier of the asset class whose instance's metadata to set. + /// - `instance`: The identifier of the asset instance whose metadata to set. + /// - `key`: The key of the attribute. + /// - `value`: The value to which to set the attribute. + /// + /// Emits `AttributeSet`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::clear_attribute())] + pub fn clear_attribute( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + maybe_instance: Option, + key: BoundedVec, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &class_details.owner, Error::::NoPermission); + } + let maybe_is_frozen = match maybe_instance { + None => ClassMetadataOf::::get(class).map(|v| v.is_frozen), + Some(instance) => + InstanceMetadataOf::::get(class, instance).map(|v| v.is_frozen), + }; + ensure!(!maybe_is_frozen.unwrap_or(false), Error::::Frozen); + + if let Some((_, deposit)) = Attribute::::take((class, maybe_instance, &key)) { + class_details.attributes.saturating_dec(); + class_details.total_deposit.saturating_reduce(deposit); + T::Currency::unreserve(&class_details.owner, deposit); + Class::::insert(class, &class_details); + Self::deposit_event(Event::AttributeCleared(class, maybe_instance, key)); + } + Ok(()) + } + + /// Set the metadata for an asset instance. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// asset `class`. + /// + /// If the origin is Signed, then funds of signer are reserved according to the formula: + /// `MetadataDepositBase + DepositPerByte * data.len` taking into + /// account any already reserved funds. + /// + /// - `class`: The identifier of the asset class whose instance's metadata to set. + /// - `instance`: The identifier of the asset instance whose metadata to set. + /// - `data`: The general information of this asset. Limited in length by `StringLimit`. + /// - `is_frozen`: Whether the metadata should be frozen against further changes. + /// + /// Emits `MetadataSet`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_metadata())] + pub fn set_metadata( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + #[pallet::compact] instance: T::InstanceId, + data: BoundedVec, + is_frozen: bool, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &class_details.owner, Error::::NoPermission); + } + + InstanceMetadataOf::::try_mutate_exists(class, instance, |metadata| { + let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); + ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + + if metadata.is_none() { + class_details.instance_metadatas.saturating_inc(); + } + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + class_details.total_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if !class_details.free_holding && maybe_check_owner.is_some() { + deposit = T::DepositPerByte::get() + .saturating_mul(((data.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + } + if deposit > old_deposit { + T::Currency::reserve(&class_details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&class_details.owner, old_deposit - deposit); + } + class_details.total_deposit.saturating_accrue(deposit); + + *metadata = Some(InstanceMetadata { deposit, data: data.clone(), is_frozen }); + + Class::::insert(&class, &class_details); + Self::deposit_event(Event::MetadataSet(class, instance, data, is_frozen)); + Ok(()) + }) + } + + /// Clear the metadata for an asset instance. + /// + /// Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the + /// asset `instance`. + /// + /// Any deposit is freed for the asset class owner. + /// + /// - `class`: The identifier of the asset class whose instance's metadata to clear. + /// - `instance`: The identifier of the asset instance whose metadata to clear. + /// + /// Emits `MetadataCleared`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::clear_metadata())] + pub fn clear_metadata( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + #[pallet::compact] instance: T::InstanceId, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut class_details = Class::::get(&class).ok_or(Error::::Unknown)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &class_details.owner, Error::::NoPermission); + } + + InstanceMetadataOf::::try_mutate_exists(class, instance, |metadata| { + let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); + ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + + if metadata.is_some() { + class_details.instance_metadatas.saturating_dec(); + } + let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; + T::Currency::unreserve(&class_details.owner, deposit); + class_details.total_deposit.saturating_reduce(deposit); + + Class::::insert(&class, &class_details); + Self::deposit_event(Event::MetadataCleared(class, instance)); + Ok(()) + }) + } + + /// Set the metadata for an asset class. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the asset `class`. + /// + /// If the origin is `Signed`, then funds of signer are reserved according to the formula: + /// `MetadataDepositBase + DepositPerByte * data.len` taking into + /// account any already reserved funds. + /// + /// - `class`: The identifier of the asset whose metadata to update. + /// - `data`: The general information of this asset. Limited in length by `StringLimit`. + /// - `is_frozen`: Whether the metadata should be frozen against further changes. + /// + /// Emits `ClassMetadataSet`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::set_class_metadata())] + pub fn set_class_metadata( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + data: BoundedVec, + is_frozen: bool, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let mut details = Class::::get(&class).ok_or(Error::::Unknown)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + ClassMetadataOf::::try_mutate_exists(class, |metadata| { + let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); + ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + details.total_deposit.saturating_reduce(old_deposit); + let mut deposit = Zero::zero(); + if maybe_check_owner.is_some() && !details.free_holding { + deposit = T::DepositPerByte::get() + .saturating_mul(((data.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + } + if deposit > old_deposit { + T::Currency::reserve(&details.owner, deposit - old_deposit)?; + } else if deposit < old_deposit { + T::Currency::unreserve(&details.owner, old_deposit - deposit); + } + details.total_deposit.saturating_accrue(deposit); + + Class::::insert(&class, details); + + *metadata = Some(ClassMetadata { deposit, data: data.clone(), is_frozen }); + + Self::deposit_event(Event::ClassMetadataSet(class, data, is_frozen)); + Ok(()) + }) + } + + /// Clear the metadata for an asset class. + /// + /// Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of + /// the asset `class`. + /// + /// Any deposit is freed for the asset class owner. + /// + /// - `class`: The identifier of the asset class whose metadata to clear. + /// + /// Emits `ClassMetadataCleared`. + /// + /// Weight: `O(1)` + #[pallet::weight(T::WeightInfo::clear_class_metadata())] + pub fn clear_class_metadata( + origin: OriginFor, + #[pallet::compact] class: T::ClassId, + ) -> DispatchResult { + let maybe_check_owner = T::ForceOrigin::try_origin(origin) + .map(|_| None) + .or_else(|origin| ensure_signed(origin).map(Some))?; + + let details = Class::::get(&class).ok_or(Error::::Unknown)?; + if let Some(check_owner) = &maybe_check_owner { + ensure!(check_owner == &details.owner, Error::::NoPermission); + } + + ClassMetadataOf::::try_mutate_exists(class, |metadata| { + let was_frozen = metadata.as_ref().map_or(false, |m| m.is_frozen); + ensure!(maybe_check_owner.is_none() || !was_frozen, Error::::Frozen); + + let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; + T::Currency::unreserve(&details.owner, deposit); + Self::deposit_event(Event::ClassMetadataCleared(class)); + Ok(()) + }) + } + } +} diff --git a/frame/uniques/src/mock.rs b/frame/uniques/src/mock.rs new file mode 100644 index 0000000000000..658e82a5143e2 --- /dev/null +++ b/frame/uniques/src/mock.rs @@ -0,0 +1,125 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Assets pallet. + +use super::*; +use crate as pallet_uniques; + +use frame_support::{construct_runtime, parameter_types}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Uniques: pallet_uniques::{Pallet, Call, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +parameter_types! { + pub const ClassDeposit: u64 = 2; + pub const InstanceDeposit: u64 = 1; + pub const KeyLimit: u32 = 50; + pub const ValueLimit: u32 = 50; + pub const StringLimit: u32 = 50; + pub const MetadataDepositBase: u64 = 1; + pub const AttributeDepositBase: u64 = 1; + pub const MetadataDepositPerByte: u64 = 1; +} + +impl Config for Test { + type Event = Event; + type ClassId = u32; + type InstanceId = u32; + type Currency = Balances; + type ForceOrigin = frame_system::EnsureRoot; + type ClassDeposit = ClassDeposit; + type InstanceDeposit = InstanceDeposit; + type MetadataDepositBase = MetadataDepositBase; + type AttributeDepositBase = AttributeDepositBase; + type DepositPerByte = MetadataDepositPerByte; + type StringLimit = StringLimit; + type KeyLimit = KeyLimit; + type ValueLimit = ValueLimit; + type WeightInfo = (); +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs new file mode 100644 index 0000000000000..8a4f978b7f4f5 --- /dev/null +++ b/frame/uniques/src/tests.rs @@ -0,0 +1,583 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Uniques pallet. + +use super::*; +use crate::mock::*; +use frame_support::{assert_noop, assert_ok, traits::Currency}; +use pallet_balances::Error as BalancesError; +use sp_std::convert::TryInto; + +fn assets() -> Vec<(u64, u32, u32)> { + let mut r: Vec<_> = Account::::iter().map(|x| x.0).collect(); + r.sort(); + let mut s: Vec<_> = Asset::::iter().map(|x| (x.2.owner, x.0, x.1)).collect(); + s.sort(); + assert_eq!(r, s); + for class in Asset::::iter() + .map(|x| x.0) + .scan(None, |s, item| { + if s.map_or(false, |last| last == item) { + *s = Some(item); + Some(None) + } else { + Some(Some(item)) + } + }) + .filter_map(|item| item) + { + let details = Class::::get(class).unwrap(); + let instances = Asset::::iter_prefix(class).count() as u32; + assert_eq!(details.instances, instances); + } + r +} + +macro_rules! bvec { + ($( $x:tt )*) => { + vec![$( $x )*].try_into().unwrap() + } +} + +fn attributes(class: u32) -> Vec<(Option, Vec, Vec)> { + let mut s: Vec<_> = Attribute::::iter_prefix((class,)) + .map(|(k, v)| (k.0, k.1.into(), v.0.into())) + .collect(); + s.sort(); + s +} + +#[test] +fn basic_setup_works() { + new_test_ext().execute_with(|| { + assert_eq!(assets(), vec![]); + }); +} + +#[test] +fn basic_minting_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_eq!(assets(), vec![(1, 0, 42)]); + + assert_ok!(Uniques::force_create(Origin::root(), 1, 2, true)); + assert_ok!(Uniques::mint(Origin::signed(2), 1, 69, 1)); + assert_eq!(assets(), vec![(1, 0, 42), (1, 1, 69)]); + }); +} + +#[test] +fn lifecycle_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_eq!(Balances::reserved_balance(&1), 2); + + assert_ok!(Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0, 0], false)); + assert_eq!(Balances::reserved_balance(&1), 5); + assert!(ClassMetadataOf::::contains_key(0)); + + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 10)); + assert_eq!(Balances::reserved_balance(&1), 6); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 20)); + assert_eq!(Balances::reserved_balance(&1), 7); + assert_eq!(assets(), vec![(10, 0, 42), (20, 0, 69)]); + assert_eq!(Class::::get(0).unwrap().instances, 2); + assert_eq!(Class::::get(0).unwrap().instance_metadatas, 0); + + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![42, 42], false)); + assert_eq!(Balances::reserved_balance(&1), 10); + assert!(InstanceMetadataOf::::contains_key(0, 42)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![69, 69], false)); + assert_eq!(Balances::reserved_balance(&1), 13); + assert!(InstanceMetadataOf::::contains_key(0, 69)); + + let w = Class::::get(0).unwrap().destroy_witness(); + assert_eq!(w.instances, 2); + assert_eq!(w.instance_metadatas, 2); + assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); + assert_eq!(Balances::reserved_balance(&1), 0); + + assert!(!Class::::contains_key(0)); + assert!(!Asset::::contains_key(0, 42)); + assert!(!Asset::::contains_key(0, 69)); + assert!(!ClassMetadataOf::::contains_key(0)); + assert!(!InstanceMetadataOf::::contains_key(0, 42)); + assert!(!InstanceMetadataOf::::contains_key(0, 69)); + assert_eq!(assets(), vec![]); + }); +} + +#[test] +fn destroy_with_bad_witness_should_not_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + + let w = Class::::get(0).unwrap().destroy_witness(); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_noop!(Uniques::destroy(Origin::signed(1), 0, w), Error::::BadWitness); + }); +} + +#[test] +fn mint_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_eq!(Uniques::owner(0, 42).unwrap(), 1); + assert_eq!(assets(), vec![(1, 0, 42)]); + }); +} + +#[test] +fn transfer_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 3)); + assert_eq!(assets(), vec![(3, 0, 42)]); + assert_noop!(Uniques::transfer(Origin::signed(2), 0, 42, 4), Error::::NoPermission); + + assert_ok!(Uniques::approve_transfer(Origin::signed(3), 0, 42, 2)); + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 4)); + }); +} + +#[test] +fn freezing_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::freeze(Origin::signed(1), 0, 42)); + assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + + assert_ok!(Uniques::thaw(Origin::signed(1), 0, 42)); + assert_ok!(Uniques::freeze_class(Origin::signed(1), 0)); + assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + + assert_ok!(Uniques::thaw_class(Origin::signed(1), 0)); + assert_ok!(Uniques::transfer(Origin::signed(1), 0, 42, 2)); + }); +} + +#[test] +fn origin_guards_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_noop!( + Uniques::transfer_ownership(Origin::signed(2), 0, 2), + Error::::NoPermission + ); + assert_noop!(Uniques::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); + assert_noop!(Uniques::freeze(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Uniques::thaw(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Uniques::mint(Origin::signed(2), 0, 69, 2), Error::::NoPermission); + assert_noop!(Uniques::burn(Origin::signed(2), 0, 42, None), Error::::NoPermission); + let w = Class::::get(0).unwrap().destroy_witness(); + assert_noop!(Uniques::destroy(Origin::signed(2), 0, w), Error::::NoPermission); + }); +} + +#[test] +fn transfer_owner_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 100); + assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Uniques::transfer_ownership(Origin::signed(1), 0, 2)); + assert_eq!(Balances::total_balance(&1), 98); + assert_eq!(Balances::total_balance(&2), 102); + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::reserved_balance(&2), 2); + + assert_noop!( + Uniques::transfer_ownership(Origin::signed(1), 0, 1), + Error::::NoPermission + ); + + // Mint and set metadata now and make sure that deposit gets transferred back. + assert_ok!(Uniques::set_class_metadata(Origin::signed(2), 0, bvec![0u8; 20], false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Uniques::transfer_ownership(Origin::signed(2), 0, 3)); + assert_eq!(Balances::total_balance(&2), 57); + assert_eq!(Balances::total_balance(&3), 145); + assert_eq!(Balances::reserved_balance(&2), 0); + assert_eq!(Balances::reserved_balance(&3), 45); + }); +} + +#[test] +fn set_team_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 2)); + assert_ok!(Uniques::freeze(Origin::signed(4), 0, 42)); + assert_ok!(Uniques::thaw(Origin::signed(3), 0, 42)); + assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 3)); + assert_ok!(Uniques::burn(Origin::signed(3), 0, 42, None)); + }); +} + +#[test] +fn set_class_metadata_should_work() { + new_test_ext().execute_with(|| { + // Cannot add metadata to unknown asset + assert_noop!( + Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), + Error::::Unknown, + ); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + // Cannot add metadata to unowned asset + assert_noop!( + Uniques::set_class_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), + Error::::NoPermission, + ); + + // Successfully add metadata and take deposit + Balances::make_free_balance_be(&1, 30); + assert_ok!(Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0u8; 20], false)); + assert_eq!(Balances::free_balance(&1), 9); + assert!(ClassMetadataOf::::contains_key(0)); + + // Force origin works, too. + assert_ok!(Uniques::set_class_metadata(Origin::root(), 0, bvec![0u8; 18], false)); + + // Update deposit + assert_ok!(Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0u8; 15], false)); + assert_eq!(Balances::free_balance(&1), 14); + assert_ok!(Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0u8; 25], false)); + assert_eq!(Balances::free_balance(&1), 4); + + // Cannot over-reserve + assert_noop!( + Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0u8; 40], false), + BalancesError::::InsufficientBalance, + ); + + // Can't set or clear metadata once frozen + assert_ok!(Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0u8; 15], true)); + assert_noop!( + Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0u8; 15], false), + Error::::Frozen, + ); + assert_noop!(Uniques::clear_class_metadata(Origin::signed(1), 0), Error::::Frozen); + + // Clear Metadata + assert_ok!(Uniques::set_class_metadata(Origin::root(), 0, bvec![0u8; 15], false)); + assert_noop!( + Uniques::clear_class_metadata(Origin::signed(2), 0), + Error::::NoPermission + ); + assert_noop!(Uniques::clear_class_metadata(Origin::signed(1), 1), Error::::Unknown); + assert_ok!(Uniques::clear_class_metadata(Origin::signed(1), 0)); + assert!(!ClassMetadataOf::::contains_key(0)); + }); +} + +#[test] +fn set_instance_metadata_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 30); + + // Cannot add metadata to unknown asset + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + // Cannot add metadata to unowned asset + assert_noop!( + Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false), + Error::::NoPermission, + ); + + // Successfully add metadata and take deposit + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 20], false)); + assert_eq!(Balances::free_balance(&1), 8); + assert!(InstanceMetadataOf::::contains_key(0, 42)); + + // Force origin works, too. + assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 18], false)); + + // Update deposit + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false)); + assert_eq!(Balances::free_balance(&1), 13); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 25], false)); + assert_eq!(Balances::free_balance(&1), 3); + + // Cannot over-reserve + assert_noop!( + Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 40], false), + BalancesError::::InsufficientBalance, + ); + + // Can't set or clear metadata once frozen + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], true)); + assert_noop!( + Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false), + Error::::Frozen, + ); + assert_noop!(Uniques::clear_metadata(Origin::signed(1), 0, 42), Error::::Frozen); + + // Clear Metadata + assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 15], false)); + assert_noop!( + Uniques::clear_metadata(Origin::signed(2), 0, 42), + Error::::NoPermission + ); + assert_noop!(Uniques::clear_metadata(Origin::signed(1), 1, 42), Error::::Unknown); + assert_ok!(Uniques::clear_metadata(Origin::signed(1), 0, 42)); + assert!(!InstanceMetadataOf::::contains_key(0, 42)); + }); +} + +#[test] +fn set_attribute_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![1], bvec![0])); + assert_eq!( + attributes(0), + vec![ + (None, bvec![0], bvec![0]), + (Some(0), bvec![0], bvec![0]), + (Some(0), bvec![1], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 9); + + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0; 10])); + assert_eq!( + attributes(0), + vec![ + (None, bvec![0], bvec![0; 10]), + (Some(0), bvec![0], bvec![0]), + (Some(0), bvec![1], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 18); + + assert_ok!(Uniques::clear_attribute(Origin::signed(1), 0, Some(0), bvec![1])); + assert_eq!( + attributes(0), + vec![(None, bvec![0], bvec![0; 10]), (Some(0), bvec![0], bvec![0]),] + ); + assert_eq!(Balances::reserved_balance(1), 15); + + let w = Class::::get(0).unwrap().destroy_witness(); + assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); + assert_eq!(attributes(0), vec![]); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn set_attribute_should_respect_freeze() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![0])); + assert_eq!( + attributes(0), + vec![ + (None, bvec![0], bvec![0]), + (Some(0), bvec![0], bvec![0]), + (Some(1), bvec![0], bvec![0]), + ] + ); + assert_eq!(Balances::reserved_balance(1), 9); + + assert_ok!(Uniques::set_class_metadata(Origin::signed(1), 0, bvec![], true)); + let e = Error::::Frozen; + assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0]), e); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1])); + + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 0, bvec![], true)); + let e = Error::::Frozen; + assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1]), e); + assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![1])); + }); +} + +#[test] +fn force_asset_status_should_work() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 2)); + assert_ok!(Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 65); + + // force asset status to be free holding + assert_ok!(Uniques::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, true, false)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 142, 1)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 169, 2)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 142, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 169, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 65); + + assert_ok!(Uniques::redeposit(Origin::signed(1), 0, bvec![0, 42, 50, 69, 100])); + assert_eq!(Balances::reserved_balance(1), 63); + + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 42); + + assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 21); + + assert_ok!(Uniques::set_class_metadata(Origin::signed(1), 0, bvec![0; 20], false)); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn burn_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 100); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); + + assert_noop!(Uniques::burn(Origin::signed(5), 0, 42, Some(5)), Error::::Unknown); + + assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 5)); + assert_ok!(Uniques::mint(Origin::signed(2), 0, 69, 5)); + assert_eq!(Balances::reserved_balance(1), 2); + + assert_noop!(Uniques::burn(Origin::signed(0), 0, 42, None), Error::::NoPermission); + assert_noop!(Uniques::burn(Origin::signed(5), 0, 42, Some(6)), Error::::WrongOwner); + + assert_ok!(Uniques::burn(Origin::signed(5), 0, 42, Some(5))); + assert_ok!(Uniques::burn(Origin::signed(3), 0, 69, Some(5))); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn approval_lifecycle_works() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 4)); + assert_noop!(Uniques::transfer(Origin::signed(3), 0, 42, 3), Error::::NoPermission); + assert!(Asset::::get(0, 42).unwrap().approved.is_none()); + + assert_ok!(Uniques::approve_transfer(Origin::signed(4), 0, 42, 2)); + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 2)); + }); +} + +#[test] +fn cancel_approval_works() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 1, 42, None), + Error::::Unknown + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 0, 43, None), + Error::::Unknown + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(3), 0, 42, None), + Error::::NoPermission + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(4)), + Error::::WrongDelegate + ); + + assert_ok!(Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(3))); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 0, 42, None), + Error::::NoDelegate + ); + }); +} + +#[test] +fn cancel_approval_works_with_admin() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 1, 42, None), + Error::::Unknown + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 0, 43, None), + Error::::Unknown + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(4)), + Error::::WrongDelegate + ); + + assert_ok!(Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(3))); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 0, 42, None), + Error::::NoDelegate + ); + }); +} + +#[test] +fn cancel_approval_works_with_force() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_noop!(Uniques::cancel_approval(Origin::root(), 1, 42, None), Error::::Unknown); + assert_noop!(Uniques::cancel_approval(Origin::root(), 0, 43, None), Error::::Unknown); + assert_noop!( + Uniques::cancel_approval(Origin::root(), 0, 42, Some(4)), + Error::::WrongDelegate + ); + + assert_ok!(Uniques::cancel_approval(Origin::root(), 0, 42, Some(3))); + assert_noop!( + Uniques::cancel_approval(Origin::root(), 0, 42, None), + Error::::NoDelegate + ); + }); +} diff --git a/frame/uniques/src/types.rs b/frame/uniques/src/types.rs new file mode 100644 index 0000000000000..1e4405aa09c84 --- /dev/null +++ b/frame/uniques/src/types.rs @@ -0,0 +1,122 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various basic types for use in the assets pallet. + +use super::*; +use frame_support::{traits::Get, BoundedVec}; +use scale_info::TypeInfo; + +pub(super) type DepositBalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +pub(super) type ClassDetailsFor = + ClassDetails<::AccountId, DepositBalanceOf>; +pub(super) type InstanceDetailsFor = + InstanceDetails<::AccountId, DepositBalanceOf>; + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct ClassDetails { + /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. + pub(super) owner: AccountId, + /// Can mint tokens. + pub(super) issuer: AccountId, + /// Can thaw tokens, force transfers and burn tokens from any account. + pub(super) admin: AccountId, + /// Can freeze tokens. + pub(super) freezer: AccountId, + /// The total balance deposited for the all storage associated with this asset class. Used by + /// `destroy`. + pub(super) total_deposit: DepositBalance, + /// If `true`, then no deposit is needed to hold instances of this class. + pub(super) free_holding: bool, + /// The total number of outstanding instances of this asset class. + pub(super) instances: u32, + /// The total number of outstanding instance metadata of this asset class. + pub(super) instance_metadatas: u32, + /// The total number of attributes for this asset class. + pub(super) attributes: u32, + /// Whether the asset is frozen for non-admin transfers. + pub(super) is_frozen: bool, +} + +/// Witness data for the destroy transactions. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct DestroyWitness { + /// The total number of outstanding instances of this asset class. + #[codec(compact)] + pub(super) instances: u32, + /// The total number of outstanding instance metadata of this asset class. + #[codec(compact)] + pub(super) instance_metadatas: u32, + #[codec(compact)] + /// The total number of attributes for this asset class. + pub(super) attributes: u32, +} + +impl ClassDetails { + pub fn destroy_witness(&self) -> DestroyWitness { + DestroyWitness { + instances: self.instances, + instance_metadatas: self.instance_metadatas, + attributes: self.attributes, + } + } +} + +/// Information concerning the ownership of a single unique asset. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo)] +pub struct InstanceDetails { + /// The owner of this asset. + pub(super) owner: AccountId, + /// The approved transferrer of this asset, if one is set. + pub(super) approved: Option, + /// Whether the asset can be transferred or not. + pub(super) is_frozen: bool, + /// The amount held in the pallet's default account for this asset. Free-hold assets will have + /// this as zero. + pub(super) deposit: DepositBalance, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo)] +#[scale_info(skip_type_params(StringLimit))] +pub struct ClassMetadata> { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + pub(super) deposit: DepositBalance, + /// General information concerning this asset. Limited in length by `StringLimit`. This will + /// generally be either a JSON dump or the hash of some JSON which can be found on a + /// hash-addressable global publication system such as IPFS. + pub(super) data: BoundedVec, + /// Whether the asset metadata may be changed by a non Force origin. + pub(super) is_frozen: bool, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo)] +#[scale_info(skip_type_params(StringLimit))] +pub struct InstanceMetadata> { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + pub(super) deposit: DepositBalance, + /// General information concerning this asset. Limited in length by `StringLimit`. This will + /// generally be either a JSON dump or the hash of some JSON which can be found on a + /// hash-addressable global publication system such as IPFS. + pub(super) data: BoundedVec, + /// Whether the asset metadata may be changed by a non Force origin. + pub(super) is_frozen: bool, +} diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs new file mode 100644 index 0000000000000..40d1ddfdc5566 --- /dev/null +++ b/frame/uniques/src/weights.rs @@ -0,0 +1,421 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_uniques +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_uniques +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/uniques/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_uniques. +pub trait WeightInfo { + fn create() -> Weight; + fn force_create() -> Weight; + fn destroy(n: u32, m: u32, a: u32, ) -> Weight; + fn mint() -> Weight; + fn burn() -> Weight; + fn transfer() -> Weight; + fn redeposit(i: u32, ) -> Weight; + fn freeze() -> Weight; + fn thaw() -> Weight; + fn freeze_class() -> Weight; + fn thaw_class() -> Weight; + fn transfer_ownership() -> Weight; + fn set_team() -> Weight; + fn force_asset_status() -> Weight; + fn set_attribute() -> Weight; + fn clear_attribute() -> Weight; + fn set_metadata() -> Weight; + fn clear_metadata() -> Weight; + fn set_class_metadata() -> Weight; + fn clear_class_metadata() -> Weight; + fn approve_transfer() -> Weight; + fn cancel_approval() -> Weight; +} + +/// Weights for pallet_uniques using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Uniques Class (r:1 w:1) + fn create() -> Weight { + (42_138_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn force_create() -> Weight { + (22_238_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques Attribute (r:0 w:1000) + // Storage: Uniques ClassMetadataOf (r:0 w:1) + // Storage: Uniques InstanceMetadataOf (r:0 w:1000) + // Storage: Uniques Account (r:0 w:20) + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 12_000 + .saturating_add((16_171_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 12_000 + .saturating_add((1_058_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 12_000 + .saturating_add((953_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Account (r:0 w:1) + fn mint() -> Weight { + (55_359_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Account (r:0 w:1) + fn burn() -> Weight { + (58_254_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Account (r:0 w:2) + fn transfer() -> Weight { + (42_906_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:100 w:100) + fn redeposit(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 9_000 + .saturating_add((25_237_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn freeze() -> Weight { + (30_153_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn thaw() -> Weight { + (31_212_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn freeze_class() -> Weight { + (22_689_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn thaw_class() -> Weight { + (22_647_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn transfer_ownership() -> Weight { + (50_902_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn set_team() -> Weight { + (23_632_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn force_asset_status() -> Weight { + (22_508_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:0) + // Storage: Uniques Attribute (r:1 w:1) + fn set_attribute() -> Weight { + (69_942_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:0) + // Storage: Uniques Attribute (r:1 w:1) + fn clear_attribute() -> Weight { + (62_314_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:1) + fn set_metadata() -> Weight { + (52_647_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:1) + fn clear_metadata() -> Weight { + (50_391_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassMetadataOf (r:1 w:1) + fn set_class_metadata() -> Weight { + (50_928_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques ClassMetadataOf (r:1 w:1) + fn clear_class_metadata() -> Weight { + (46_667_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + fn approve_transfer() -> Weight { + (32_111_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + fn cancel_approval() -> Weight { + (32_627_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Uniques Class (r:1 w:1) + fn create() -> Weight { + (42_138_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn force_create() -> Weight { + (22_238_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:1 w:0) + // Storage: Uniques Attribute (r:0 w:1000) + // Storage: Uniques ClassMetadataOf (r:0 w:1) + // Storage: Uniques InstanceMetadataOf (r:0 w:1000) + // Storage: Uniques Account (r:0 w:20) + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 12_000 + .saturating_add((16_171_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 12_000 + .saturating_add((1_058_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 12_000 + .saturating_add((953_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Account (r:0 w:1) + fn mint() -> Weight { + (55_359_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Account (r:0 w:1) + fn burn() -> Weight { + (58_254_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Account (r:0 w:2) + fn transfer() -> Weight { + (42_906_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques Asset (r:100 w:100) + fn redeposit(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 9_000 + .saturating_add((25_237_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn freeze() -> Weight { + (30_153_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Asset (r:1 w:1) + // Storage: Uniques Class (r:1 w:0) + fn thaw() -> Weight { + (31_212_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn freeze_class() -> Weight { + (22_689_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn thaw_class() -> Weight { + (22_647_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn transfer_ownership() -> Weight { + (50_902_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn set_team() -> Weight { + (23_632_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + fn force_asset_status() -> Weight { + (22_508_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:0) + // Storage: Uniques Attribute (r:1 w:1) + fn set_attribute() -> Weight { + (69_942_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:0) + // Storage: Uniques Attribute (r:1 w:1) + fn clear_attribute() -> Weight { + (62_314_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:1) + fn set_metadata() -> Weight { + (52_647_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques InstanceMetadataOf (r:1 w:1) + fn clear_metadata() -> Weight { + (50_391_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:1) + // Storage: Uniques ClassMetadataOf (r:1 w:1) + fn set_class_metadata() -> Weight { + (50_928_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques ClassMetadataOf (r:1 w:1) + fn clear_class_metadata() -> Weight { + (46_667_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + fn approve_transfer() -> Weight { + (32_111_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Uniques Class (r:1 w:0) + // Storage: Uniques Asset (r:1 w:1) + fn cancel_approval() -> Weight { + (32_627_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 098730aa30083..b5b8eab9cdbf3 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-utility" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,33 +13,35 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-runtime/std", "frame-support/std", "frame-system/std", "sp-io/std", - "sp-std/std" + "sp-std/std", ] runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/utility/README.md b/frame/utility/README.md index 3963969291180..1beeb66733dd4 100644 --- a/frame/utility/README.md +++ b/frame/utility/README.md @@ -1,7 +1,7 @@ # Utility Module A stateless module with helpers for dispatch management which does no re-authentication. -- [`utility::Trait`](https://docs.rs/pallet-utility/latest/pallet_utility/trait.Trait.html) +- [`utility::Config`](https://docs.rs/pallet-utility/latest/pallet_utility/trait.Config.html) - [`Call`](https://docs.rs/pallet-utility/latest/pallet_utility/enum.Call.html) ## Overview @@ -33,6 +33,6 @@ filtered by any proxy. * `as_derivative` - Dispatch a call from a derivative signed origin. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 1c1b3f5815005..210a6156499cf 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,27 +20,21 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_system::{RawOrigin, EventRecord}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_system::RawOrigin; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { - let events = frame_system::Module::::events(); - let system_event: ::Event = generic_event.into(); - // compare to the last event record - let EventRecord { event, .. } = &events[events.len() - 1]; - assert_eq!(event, &system_event); +fn assert_last_event(generic_event: ::Event) { + frame_system::Pallet::::assert_last_event(generic_event.into()); } benchmarks! { - _ { } - batch { let c in 0 .. 1000; - let mut calls: Vec<::Call> = Vec::new(); + let mut calls: Vec<::Call> = Vec::new(); for i in 0 .. c { - let call = frame_system::Call::remark(vec![]).into(); + let call = frame_system::Call::remark { remark: vec![] }.into(); calls.push(call); } let caller = whitelisted_caller(); @@ -51,24 +45,24 @@ benchmarks! { as_derivative { let caller = account("caller", SEED, SEED); - let call = Box::new(frame_system::Call::remark(vec![]).into()); + let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), SEED as u16, call) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_batch::()); - assert_ok!(test_benchmark_as_derivative::()); - }); + batch_all { + let c in 0 .. 1000; + let mut calls: Vec<::Call> = Vec::new(); + for i in 0 .. c { + let call = frame_system::Call::remark { remark: vec![] }.into(); + calls.push(call); + } + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), calls) + verify { + assert_last_event::(Event::BatchCompleted.into()) } } + +impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index d0bb99d917455..54de87c4740c8 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,15 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Utility Module -//! A stateless module with helpers for dispatch management which does no re-authentication. +//! # Utility Pallet +//! A stateless pallet with helpers for dispatch management which does no re-authentication. //! -//! - [`utility::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! This module contains two basic pieces of functionality: +//! This pallet contains two basic pieces of functionality: //! - Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a //! single dispatch. This can be useful to amalgamate proposals, combining `set_code` with //! corresponding `set_storage`s, for efficient multiple payouts with just a single signature @@ -32,11 +32,11 @@ //! an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative //! account IDs) and these can be stacked. This can be useful as a key management tool, where you //! need multiple distinct accounts (e.g. as controllers for many staking accounts), but where -//! it's perfectly fine to have each of them controlled by the same underlying keypair. -//! Derivative accounts are, for the purposes of proxy filtering considered exactly the same as -//! the oigin and are thus hampered with the origin's filters. +//! it's perfectly fine to have each of them controlled by the same underlying keypair. Derivative +//! accounts are, for the purposes of proxy filtering considered exactly the same as the origin +//! and are thus hampered with the origin's filters. //! -//! Since proxy filters are respected in all dispatches of this module, it should never need to be +//! Since proxy filters are respected in all dispatches of this pallet, it should never need to be //! filtered by any proxy. //! //! ## Interface @@ -48,89 +48,103 @@ //! //! #### For pseudonymal dispatch //! * `as_derivative` - Dispatch a call from a derivative signed origin. -//! -//! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_core::TypeId; -use sp_io::hashing::blake2_256; -use frame_support::{decl_module, decl_event, decl_storage, Parameter}; +mod benchmarking; +mod tests; +pub mod weights; + +use codec::{Decode, Encode}; use frame_support::{ - traits::{OriginTrait, UnfilteredDispatchable, Get}, - weights::{Weight, GetDispatchInfo, DispatchClass}, dispatch::PostDispatchInfo, + dispatch::PostDispatchInfo, + traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, + transactional, + weights::{extract_actual_weight, GetDispatchInfo}, }; -use frame_system::{ensure_signed, ensure_root}; -use sp_runtime::{DispatchError, DispatchResult, traits::Dispatchable}; +use sp_core::TypeId; +use sp_io::hashing::blake2_256; +use sp_runtime::traits::Dispatchable; +use sp_std::prelude::*; +pub use weights::WeightInfo; -mod tests; -mod benchmarking; -mod default_weights; +pub use pallet::*; -pub trait WeightInfo { - fn batch(c: u32, ) -> Weight; - fn as_derivative() -> Weight; -} +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; -/// Configuration trait. -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From + Into<::Event>; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - /// The overarching call type. - type Call: Parameter + Dispatchable - + GetDispatchInfo + From> - + UnfilteredDispatchable; + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From + IsType<::Event>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// The overarching call type. + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From> + + UnfilteredDispatchable + + IsSubType> + + IsType<::Call>; -decl_storage! { - trait Store for Module as Utility {} -} + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } -decl_event! { - /// Events type. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as /// well as the error. \[index, error\] BatchInterrupted(u32, DispatchError), /// Batch of dispatches completed fully with no error. BatchCompleted, + /// A single item within a Batch of dispatches has completed with no error. + ItemCompleted, } -} -/// A module identifier. These are per module and should be stored in a registry somewhere. -#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] -struct IndexedUtilityModuleId(u16); + #[pallet::extra_constants] + impl Pallet { + /// The limit on the number of batched calls. + fn batched_calls_limit() -> u32 { + let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; + let call_size = core::mem::size_of::<::Call>() as u32; + // The margin to take into account vec doubling capacity. + let margin_factor = 3; -impl TypeId for IndexedUtilityModuleId { - const TYPE_ID: [u8; 4] = *b"suba"; -} + allocator_limit / margin_factor / call_size + } + } -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; + #[pallet::error] + pub enum Error { + /// Too many calls batched. + TooManyCalls, + } + #[pallet::call] + impl Pallet { /// Send a batch of dispatch calls. /// /// May be called from any origin. /// - /// - `calls`: The calls to be dispatched from the same origin. + /// - `calls`: The calls to be dispatched from the same origin. The number of call must not + /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then call are dispatch without checking origin filter. (This includes - /// bypassing `frame_system::Trait::BaseCallFilter`). + /// bypassing `frame_system::Config::BaseCallFilter`). /// /// # - /// - Base weight: 14.39 + .987 * c µs - /// - Plus the sum of the weights of the `calls`. - /// - Plus one additional event. (repeat read/write) + /// - Complexity: O(C) where C is the number of calls to be batched. /// # /// /// This will return `Ok` in all circumstances. To determine the success of the batch, an @@ -138,36 +152,56 @@ decl_module! { /// `BatchInterrupted` event is deposited, along with the number of successful calls made /// and the error of the failed call. If all were successful, then the `BatchCompleted` /// event is deposited. - #[weight = ( - calls.iter() - .map(|call| call.get_dispatch_info().weight) + #[pallet::weight({ + let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); + let dispatch_weight = dispatch_infos.iter() + .map(|di| di.weight) .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) - .saturating_add(T::WeightInfo::batch(calls.len() as u32)), - { - let all_operational = calls.iter() - .map(|call| call.get_dispatch_info().class) + .saturating_add(T::WeightInfo::batch(calls.len() as u32)); + let dispatch_class = { + let all_operational = dispatch_infos.iter() + .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } - }, - )] - fn batch(origin, calls: Vec<::Call>) { + }; + (dispatch_weight, dispatch_class) + })] + pub fn batch( + origin: OriginFor, + calls: Vec<::Call>, + ) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); + let calls_len = calls.len(); + ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); + + // Track the actual weight of each of the batch calls. + let mut weight: Weight = 0; for (index, call) in calls.into_iter().enumerate() { + let info = call.get_dispatch_info(); + // If origin is root, don't apply any dispatch filters; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { call.dispatch(origin.clone()) }; + // Add the weight of this call. + weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { Self::deposit_event(Event::BatchInterrupted(index as u32, e.error)); - return Ok(()); + // Take the weight of this function itself into account. + let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); + // Return the actual used weight + base_weight of this call. + return Ok(Some(base_weight + weight).into()) } + Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); + let base_weight = T::WeightInfo::batch(calls_len as u32); + Ok(Some(base_weight + weight).into()) } /// Send a call through an indexed pseudonym of the sender. @@ -183,24 +217,124 @@ decl_module! { /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. - #[weight = ( - T::WeightInfo::as_derivative() - .saturating_add(call.get_dispatch_info().weight) - // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), - call.get_dispatch_info().class, - )] - fn as_derivative(origin, index: u16, call: Box<::Call>) -> DispatchResult { + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::as_derivative() + .saturating_add(dispatch_info.weight) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + dispatch_info.class, + ) + })] + pub fn as_derivative( + origin: OriginFor, + index: u16, + call: Box<::Call>, + ) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; let pseudonym = Self::derivative_account_id(who, index); origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym)); - call.dispatch(origin).map(|_| ()).map_err(|e| e.error) + let info = call.get_dispatch_info(); + let result = call.dispatch(origin); + // Always take into account the base weight of this call. + let mut weight = T::WeightInfo::as_derivative() + .saturating_add(T::DbWeight::get().reads_writes(1, 1)); + // Add the real weight of the dispatch. + weight = weight.saturating_add(extract_actual_weight(&result, &info)); + result + .map_err(|mut err| { + err.post_info = Some(weight).into(); + err + }) + .map(|_| Some(weight).into()) + } + + /// Send a batch of dispatch calls and atomically execute them. + /// The whole transaction will rollback and fail if any of the calls failed. + /// + /// May be called from any origin. + /// + /// - `calls`: The calls to be dispatched from the same origin. The number of call must not + /// exceed the constant: `batched_calls_limit` (available in constant metadata). + /// + /// If origin is root then call are dispatch without checking origin filter. (This includes + /// bypassing `frame_system::Config::BaseCallFilter`). + /// + /// # + /// - Complexity: O(C) where C is the number of calls to be batched. + /// # + #[pallet::weight({ + let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); + let dispatch_weight = dispatch_infos.iter() + .map(|di| di.weight) + .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) + .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); + let dispatch_class = { + let all_operational = dispatch_infos.iter() + .map(|di| di.class) + .all(|class| class == DispatchClass::Operational); + if all_operational { + DispatchClass::Operational + } else { + DispatchClass::Normal + } + }; + (dispatch_weight, dispatch_class) + })] + #[transactional] + pub fn batch_all( + origin: OriginFor, + calls: Vec<::Call>, + ) -> DispatchResultWithPostInfo { + let is_root = ensure_root(origin.clone()).is_ok(); + let calls_len = calls.len(); + ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); + + // Track the actual weight of each of the batch calls. + let mut weight: Weight = 0; + for (index, call) in calls.into_iter().enumerate() { + let info = call.get_dispatch_info(); + // If origin is root, bypass any dispatch filter; root can call anything. + let result = if is_root { + call.dispatch_bypass_filter(origin.clone()) + } else { + let mut filtered_origin = origin.clone(); + // Don't allow users to nest `batch_all` calls. + filtered_origin.add_filter(move |c: &::Call| { + let c = ::Call::from_ref(c); + !matches!(c.is_sub_type(), Some(Call::batch_all { .. })) + }); + call.dispatch(filtered_origin) + }; + // Add the weight of this call. + weight = weight.saturating_add(extract_actual_weight(&result, &info)); + result.map_err(|mut err| { + // Take the weight of this function itself into account. + let base_weight = T::WeightInfo::batch_all(index.saturating_add(1) as u32); + // Return the actual used weight + base_weight of this call. + err.post_info = Some(base_weight + weight).into(); + err + })?; + Self::deposit_event(Event::ItemCompleted); + } + Self::deposit_event(Event::BatchCompleted); + let base_weight = T::WeightInfo::batch_all(calls_len as u32); + Ok(Some(base_weight + weight).into()) } } } -impl Module { +/// A pallet identifier. These are per pallet and should be stored in a registry somewhere. +#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] +struct IndexedUtilityPalletId(u16); + +impl TypeId for IndexedUtilityPalletId { + const TYPE_ID: [u8; 4] = *b"suba"; +} + +impl Pallet { /// Derive a derivative account ID from the owner account and the sub-account index. pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 8e693b234a939..bbfbb417e23d1 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,45 +21,93 @@ use super::*; +use crate as utility; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, impl_outer_event, dispatch::DispatchError, traits::Filter, storage, + assert_err_ignore_postinfo, assert_noop, assert_ok, + dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable}, + parameter_types, storage, + traits::Contains, + weights::{Pays, Weight}, }; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use crate as utility; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} -impl_outer_event! { - pub enum TestEvent for Test { - frame_system, - pallet_balances, - utility, +// example module to test behaviors. +#[frame_support::pallet] +pub mod example { + use super::*; + use frame_support::{dispatch::WithPostDispatchInfo, pallet_prelude::*}; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(*_weight)] + pub fn noop(_origin: OriginFor, _weight: Weight) -> DispatchResult { + Ok(()) + } + + #[pallet::weight(*_start_weight)] + pub fn foobar( + origin: OriginFor, + err: bool, + _start_weight: Weight, + end_weight: Option, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + if err { + let error: DispatchError = "The cake is a lie.".into(); + if let Some(weight) = end_weight { + Err(error.with_weight(weight)) + } else { + Err(error)? + } + } else { + Ok(end_weight.into()) + } + } + + #[pallet::weight(0)] + pub fn big_variant(_origin: OriginFor, _arg: [u8; 400]) -> DispatchResult { + Ok(()) + } } } -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - frame_system::System, - pallet_balances::Balances, - utility::Utility, + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Utility: utility::{Pallet, Call, Event}, + Example: example::{Pallet, Call}, } -} +); -// For testing the pallet, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of pallets we want to use. -#[derive(Clone, Eq, PartialEq)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = Weight::max_value(); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::max_value()); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -69,30 +117,27 @@ impl frame_system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = TestEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type Event = TestEvent; + type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -102,46 +147,54 @@ parameter_types! { pub const MultisigDepositFactor: u64 = 1; pub const MaxSignatories: u16 = 3; } + +impl example::Config for Test {} + pub struct TestBaseCallFilter; -impl Filter for TestBaseCallFilter { - fn filter(c: &Call) -> bool { +impl Contains for TestBaseCallFilter { + fn contains(c: &Call) -> bool { match *c { - Call::Balances(_) => true, + // Transfer works. Use `transfer_keep_alive` for a call that doesn't pass the filter. + Call::Balances(pallet_balances::Call::transfer { .. }) => true, + Call::Utility(_) => true, // For benchmarking, this acts as a noop call - Call::System(frame_system::Call::remark(..)) => true, + Call::System(frame_system::Call::remark { .. }) => true, + // For tests + Call::Example(_) => true, _ => false, } } } -impl Trait for Test { - type Event = TestEvent; +impl Config for Test { + type Event = Event; type Call = Call; type WeightInfo = (); } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Utility = Module; + +type ExampleCall = example::Call; +type UtilityCall = crate::Call; use frame_system::Call as SystemCall; -use pallet_balances::Call as BalancesCall; -use pallet_balances::Error as BalancesError; +use pallet_balances::{Call as BalancesCall, Error as BalancesError}; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } -fn last_event() -> TestEvent { - frame_system::Module::::events().pop().map(|e| e.event).expect("Event expected") +fn call_transfer(dest: u64, value: u64) -> Call { + Call::Balances(BalancesCall::transfer { dest, value }) } -fn expect_event>(e: E) { - assert_eq!(last_event(), e.into()); +fn call_foobar(err: bool, start_weight: u64, end_weight: Option) -> Call { + Call::Example(ExampleCall::foobar { err, start_weight, end_weight }) } #[test] @@ -149,29 +202,94 @@ fn as_derivative_works() { new_test_ext().execute_with(|| { let sub_1_0 = Utility::derivative_account_id(1, 0); assert_ok!(Balances::transfer(Origin::signed(1), sub_1_0, 5)); - assert_noop!(Utility::as_derivative( - Origin::signed(1), - 1, - Box::new(Call::Balances(BalancesCall::transfer(6, 3))), - ), BalancesError::::InsufficientBalance); - assert_ok!(Utility::as_derivative( - Origin::signed(1), - 0, - Box::new(Call::Balances(BalancesCall::transfer(2, 3))), - )); + assert_err_ignore_postinfo!( + Utility::as_derivative(Origin::signed(1), 1, Box::new(call_transfer(6, 3)),), + BalancesError::::InsufficientBalance + ); + assert_ok!(Utility::as_derivative(Origin::signed(1), 0, Box::new(call_transfer(2, 3)),)); assert_eq!(Balances::free_balance(sub_1_0), 2); assert_eq!(Balances::free_balance(2), 13); }); } +#[test] +fn as_derivative_handles_weight_refund() { + new_test_ext().execute_with(|| { + let start_weight = 100; + let end_weight = 75; + let diff = start_weight - end_weight; + + // Full weight when ok + let inner_call = call_foobar(false, start_weight, None); + let call = + Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when ok + let inner_call = call_foobar(false, start_weight, Some(end_weight)); + let call = + Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_ok!(result); + // Diff is refunded + assert_eq!(extract_actual_weight(&result, &info), info.weight - diff); + + // Full weight when err + let inner_call = call_foobar(true, start_weight, None); + let call = + Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_noop!( + result, + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + // No weight is refunded + actual_weight: Some(info.weight), + pays_fee: Pays::Yes, + }, + error: DispatchError::Other("The cake is a lie."), + } + ); + + // Refund weight when err + let inner_call = call_foobar(true, start_weight, Some(end_weight)); + let call = + Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_noop!( + result, + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + // Diff is refunded + actual_weight: Some(info.weight - diff), + pays_fee: Pays::Yes, + }, + error: DispatchError::Other("The cake is a lie."), + } + ); + }); +} + #[test] fn as_derivative_filters() { new_test_ext().execute_with(|| { - assert_noop!(Utility::as_derivative( - Origin::signed(1), - 1, - Box::new(Call::System(frame_system::Call::suicide())), - ), DispatchError::BadOrigin); + assert_err_ignore_postinfo!( + Utility::as_derivative( + Origin::signed(1), + 1, + Box::new(Call::Balances(pallet_balances::Call::transfer_keep_alive { + dest: 2, + value: 1 + })), + ), + DispatchError::BadOrigin + ); }); } @@ -179,15 +297,19 @@ fn as_derivative_filters() { fn batch_with_root_works() { new_test_ext().execute_with(|| { let k = b"a".to_vec(); - let call = Call::System(frame_system::Call::set_storage(vec![(k.clone(), k.clone())])); - assert!(!TestBaseCallFilter::filter(&call)); + let call = + Call::System(frame_system::Call::set_storage { items: vec![(k.clone(), k.clone())] }); + assert!(!TestBaseCallFilter::contains(&call)); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); - assert_ok!(Utility::batch(Origin::root(), vec![ - Call::Balances(BalancesCall::force_transfer(1, 2, 5)), - Call::Balances(BalancesCall::force_transfer(1, 2, 5)), - call, // Check filters are correctly bypassed - ])); + assert_ok!(Utility::batch( + Origin::root(), + vec![ + Call::Balances(BalancesCall::force_transfer { source: 1, dest: 2, value: 5 }), + Call::Balances(BalancesCall::force_transfer { source: 1, dest: 2, value: 5 }), + call, // Check filters are correctly bypassed + ] + )); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::free_balance(2), 20); assert_eq!(storage::unhashed::get_raw(&k), Some(k)); @@ -199,12 +321,10 @@ fn batch_with_signed_works() { new_test_ext().execute_with(|| { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); - assert_ok!( - Utility::batch(Origin::signed(1), vec![ - Call::Balances(BalancesCall::transfer(2, 5)), - Call::Balances(BalancesCall::transfer(2, 5)) - ]), - ); + assert_ok!(Utility::batch( + Origin::signed(1), + vec![call_transfer(2, 5), call_transfer(2, 5)] + ),); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::free_balance(2), 20); }); @@ -213,12 +333,13 @@ fn batch_with_signed_works() { #[test] fn batch_with_signed_filters() { new_test_ext().execute_with(|| { - assert_ok!( - Utility::batch(Origin::signed(1), vec![ - Call::System(frame_system::Call::suicide()) - ]), + assert_ok!(Utility::batch( + Origin::signed(1), + vec![Call::Balances(pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 })] + ),); + System::assert_last_event( + utility::Event::BatchInterrupted(0, DispatchError::BadOrigin).into(), ); - expect_event(Event::BatchInterrupted(0, DispatchError::BadOrigin)); }); } @@ -227,13 +348,10 @@ fn batch_early_exit_works() { new_test_ext().execute_with(|| { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); - assert_ok!( - Utility::batch(Origin::signed(1), vec![ - Call::Balances(BalancesCall::transfer(2, 5)), - Call::Balances(BalancesCall::transfer(2, 10)), - Call::Balances(BalancesCall::transfer(2, 5)), - ]), - ); + assert_ok!(Utility::batch( + Origin::signed(1), + vec![call_transfer(2, 5), call_transfer(2, 10), call_transfer(2, 5),] + ),); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::free_balance(2), 15); }); @@ -241,17 +359,244 @@ fn batch_early_exit_works() { #[test] fn batch_weight_calculation_doesnt_overflow() { + use sp_runtime::Perbill; new_test_ext().execute_with(|| { - let big_call = Call::System(SystemCall::fill_block(Perbill::from_percent(50))); + let big_call = Call::System(SystemCall::fill_block { ratio: Perbill::from_percent(50) }); assert_eq!(big_call.get_dispatch_info().weight, Weight::max_value() / 2); // 3 * 50% saturates to 100% - let batch_call = Call::Utility(crate::Call::batch(vec![ - big_call.clone(), - big_call.clone(), - big_call.clone(), - ])); + let batch_call = Call::Utility(crate::Call::batch { + calls: vec![big_call.clone(), big_call.clone(), big_call.clone()], + }); assert_eq!(batch_call.get_dispatch_info().weight, Weight::max_value()); }); } + +#[test] +fn batch_handles_weight_refund() { + new_test_ext().execute_with(|| { + let start_weight = 100; + let end_weight = 75; + let diff = start_weight - end_weight; + let batch_len: Weight = 4; + + // Full weight when ok + let inner_call = call_foobar(false, start_weight, None); + let batch_calls = vec![inner_call; batch_len as usize]; + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when ok + let inner_call = call_foobar(false, start_weight, Some(end_weight)); + let batch_calls = vec![inner_call; batch_len as usize]; + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_ok!(result); + // Diff is refunded + assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + + // Full weight when err + let good_call = call_foobar(false, start_weight, None); + let bad_call = call_foobar(true, start_weight, None); + let batch_calls = vec![good_call, bad_call]; + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_ok!(result); + System::assert_last_event( + utility::Event::BatchInterrupted(1, DispatchError::Other("")).into(), + ); + // No weight is refunded + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when err + let good_call = call_foobar(false, start_weight, Some(end_weight)); + let bad_call = call_foobar(true, start_weight, Some(end_weight)); + let batch_calls = vec![good_call, bad_call]; + let batch_len = batch_calls.len() as Weight; + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_ok!(result); + System::assert_last_event( + utility::Event::BatchInterrupted(1, DispatchError::Other("")).into(), + ); + assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + + // Partial batch completion + let good_call = call_foobar(false, start_weight, Some(end_weight)); + let bad_call = call_foobar(true, start_weight, Some(end_weight)); + let batch_calls = vec![good_call, bad_call.clone(), bad_call]; + let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_ok!(result); + System::assert_last_event( + utility::Event::BatchInterrupted(1, DispatchError::Other("")).into(), + ); + assert_eq!( + extract_actual_weight(&result, &info), + // Real weight is 2 calls at end_weight + ::WeightInfo::batch(2) + end_weight * 2, + ); + }); +} + +#[test] +fn batch_all_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::batch_all( + Origin::signed(1), + vec![call_transfer(2, 5), call_transfer(2, 5)] + ),); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 20); + }); +} + +#[test] +fn batch_all_revert() { + new_test_ext().execute_with(|| { + let call = call_transfer(2, 5); + let info = call.get_dispatch_info(); + + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_noop!( + Utility::batch_all( + Origin::signed(1), + vec![call_transfer(2, 5), call_transfer(2, 10), call_transfer(2, 5),] + ), + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some( + ::WeightInfo::batch_all(2) + info.weight * 2 + ), + pays_fee: Pays::Yes + }, + error: pallet_balances::Error::::InsufficientBalance.into() + } + ); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + }); +} + +#[test] +fn batch_all_handles_weight_refund() { + new_test_ext().execute_with(|| { + let start_weight = 100; + let end_weight = 75; + let diff = start_weight - end_weight; + let batch_len: Weight = 4; + + // Full weight when ok + let inner_call = call_foobar(false, start_weight, None); + let batch_calls = vec![inner_call; batch_len as usize]; + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when ok + let inner_call = call_foobar(false, start_weight, Some(end_weight)); + let batch_calls = vec![inner_call; batch_len as usize]; + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_ok!(result); + // Diff is refunded + assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + + // Full weight when err + let good_call = call_foobar(false, start_weight, None); + let bad_call = call_foobar(true, start_weight, None); + let batch_calls = vec![good_call, bad_call]; + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_err_ignore_postinfo!(result, "The cake is a lie."); + // No weight is refunded + assert_eq!(extract_actual_weight(&result, &info), info.weight); + + // Refund weight when err + let good_call = call_foobar(false, start_weight, Some(end_weight)); + let bad_call = call_foobar(true, start_weight, Some(end_weight)); + let batch_calls = vec![good_call, bad_call]; + let batch_len = batch_calls.len() as Weight; + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_err_ignore_postinfo!(result, "The cake is a lie."); + assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + + // Partial batch completion + let good_call = call_foobar(false, start_weight, Some(end_weight)); + let bad_call = call_foobar(true, start_weight, Some(end_weight)); + let batch_calls = vec![good_call, bad_call.clone(), bad_call]; + let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(1)); + assert_err_ignore_postinfo!(result, "The cake is a lie."); + assert_eq!( + extract_actual_weight(&result, &info), + // Real weight is 2 calls at end_weight + ::WeightInfo::batch_all(2) + end_weight * 2, + ); + }); +} + +#[test] +fn batch_all_does_not_nest() { + new_test_ext().execute_with(|| { + let batch_all = Call::Utility(UtilityCall::batch_all { + calls: vec![call_transfer(2, 1), call_transfer(2, 1), call_transfer(2, 1)], + }); + + let info = batch_all.get_dispatch_info(); + + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + // A nested batch_all call will not pass the filter, and fail with `BadOrigin`. + assert_noop!( + Utility::batch_all(Origin::signed(1), vec![batch_all.clone()]), + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(::WeightInfo::batch_all(1) + info.weight), + pays_fee: Pays::Yes + }, + error: DispatchError::BadOrigin, + } + ); + + // And for those who want to get a little fancy, we check that the filter persists across + // other kinds of dispatch wrapping functions... in this case + // `batch_all(batch(batch_all(..)))` + let batch_nested = Call::Utility(UtilityCall::batch { calls: vec![batch_all] }); + // Batch will end with `Ok`, but does not actually execute as we can see from the event + // and balances. + assert_ok!(Utility::batch_all(Origin::signed(1), vec![batch_nested])); + System::assert_has_event( + utility::Event::BatchInterrupted(0, DispatchError::BadOrigin).into(), + ); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + }); +} + +#[test] +fn batch_limit() { + new_test_ext().execute_with(|| { + let calls = vec![Call::System(SystemCall::remark { remark: vec![] }); 40_000]; + assert_noop!(Utility::batch(Origin::signed(1), calls.clone()), Error::::TooManyCalls); + assert_noop!(Utility::batch_all(Origin::signed(1), calls), Error::::TooManyCalls); + }); +} diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs new file mode 100644 index 0000000000000..6ac23419e3ef7 --- /dev/null +++ b/frame/utility/src/weights.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_utility +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_utility +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/utility/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_utility. +pub trait WeightInfo { + fn batch(c: u32, ) -> Weight; + fn as_derivative() -> Weight; + fn batch_all(c: u32, ) -> Weight; +} + +/// Weights for pallet_utility using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn batch(c: u32, ) -> Weight { + (30_319_000 as Weight) + // Standard Error: 3_000 + .saturating_add((6_759_000 as Weight).saturating_mul(c as Weight)) + } + fn as_derivative() -> Weight { + (4_030_000 as Weight) + } + fn batch_all(c: u32, ) -> Weight { + (26_621_000 as Weight) + // Standard Error: 3_000 + .saturating_add((7_251_000 as Weight).saturating_mul(c as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn batch(c: u32, ) -> Weight { + (30_319_000 as Weight) + // Standard Error: 3_000 + .saturating_add((6_759_000 as Weight).saturating_mul(c as Weight)) + } + fn as_derivative() -> Weight { + (4_030_000 as Weight) + } + fn batch_all(c: u32, ) -> Weight { + (26_621_000 as Weight) + // Standard Error: 3_000 + .saturating_add((7_251_000 as Weight).saturating_mul(c as Weight)) + } +} diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index bea64c2b4f94d..806e0e6036862 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-vesting" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,30 +13,31 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +log = { version = "0.4.0", default-features = false } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } -hex-literal = "0.3.1" +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "sp-std/std", "sp-runtime/std", "frame-support/std", "frame-system/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/vesting/README.md b/frame/vesting/README.md index 921fa94a1a2a9..c3800eb994d4d 100644 --- a/frame/vesting/README.md +++ b/frame/vesting/README.md @@ -1,6 +1,6 @@ # Vesting Module -- [`vesting::Trait`](https://docs.rs/pallet-vesting/latest/pallet_vesting/trait.Trait.html) +- [`vesting::Config`](https://docs.rs/pallet-vesting/latest/pallet_vesting/trait.Config.html) - [`Call`](https://docs.rs/pallet-vesting/latest/pallet_vesting/enum.Call.html) ## Overview @@ -26,6 +26,6 @@ This module implements the `VestingSchedule` trait. "vested" so far. [`Call`]: ./enum.Call.html -[`Trait`]: ./trait.Trait.html +[`Config`]: ./trait.Config.html -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 7c5478472f8ab..5cdc14c8fdaca 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,59 +19,79 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; - -use frame_system::{RawOrigin, Module as System}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; -use sp_runtime::traits::Bounded; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::assert_ok; +use frame_system::{Pallet as System, RawOrigin}; +use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul}; -use crate::Module as Vesting; +use super::*; +use crate::Pallet as Vesting; const SEED: u32 = 0; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; -fn add_locks(who: &T::AccountId, n: u8) { +fn add_locks(who: &T::AccountId, n: u8) { for id in 0..n { let lock_id = [id; 8]; - let locked = 100; - let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; + let locked = 256u32; + let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(lock_id, who, locked.into(), reasons); } } -fn add_vesting_schedule(who: &T::AccountId) -> Result<(), &'static str> { - let locked = 100; - let per_block = 10; - let starting_block = 1; - - System::::set_block_number(0.into()); - - // Add schedule to avoid `NotVesting` error. - Vesting::::add_vesting_schedule( - &who, - locked.into(), - per_block.into(), - starting_block.into(), - )?; - Ok(()) +fn add_vesting_schedules( + target: ::Source, + n: u32, +) -> Result, &'static str> { + let min_transfer = T::MinVestedTransfer::get(); + let locked = min_transfer.checked_mul(&20u32.into()).unwrap(); + // Schedule has a duration of 20. + let per_block = min_transfer; + let starting_block = 1u32; + + let source: T::AccountId = account("source", 0, SEED); + let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); + + System::::set_block_number(T::BlockNumber::zero()); + + let mut total_locked: BalanceOf = Zero::zero(); + for _ in 0..n { + total_locked += locked; + + let schedule = VestingInfo::new(locked, per_block, starting_block.into()); + assert_ok!(Vesting::::do_vested_transfer( + source_lookup.clone(), + target.clone(), + schedule + )); + + // Top up to guarantee we can always transfer another schedule. + T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); + } + + Ok(total_locked.into()) } benchmarks! { - _ { } - vest_locked { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 1 .. T::MAX_VESTING_SCHEDULES; + + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); - let caller = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); add_locks::(&caller, l as u8); - add_vesting_schedule::(&caller)?; + let expected_balance = add_vesting_schedules::(caller_lookup, s)?; + // At block zero, everything is vested. - System::::set_block_number(T::BlockNumber::zero()); + assert_eq!(System::::block_number(), T::BlockNumber::zero()); assert_eq!( Vesting::::vesting_balance(&caller), - Some(100.into()), + Some(expected_balance.into()), "Vesting schedule not added", ); }: vest(RawOrigin::Signed(caller.clone())) @@ -79,20 +99,24 @@ benchmarks! { // Nothing happened since everything is still vested. assert_eq!( Vesting::::vesting_balance(&caller), - Some(100.into()), + Some(expected_balance.into()), "Vesting schedule was removed", ); } vest_unlocked { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 1 .. T::MAX_VESTING_SCHEDULES; + + let caller: T::AccountId = whitelisted_caller(); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); - let caller = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); add_locks::(&caller, l as u8); - add_vesting_schedule::(&caller)?; - // At block 20, everything is unvested. - System::::set_block_number(20.into()); + add_vesting_schedules::(caller_lookup, s)?; + + // At block 21, everything is unlocked. + System::::set_block_number(21u32.into()); assert_eq!( Vesting::::vesting_balance(&caller), Some(BalanceOf::::zero()), @@ -109,18 +133,20 @@ benchmarks! { } vest_other_locked { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); - T::Currency::make_free_balance_be(&other, BalanceOf::::max_value()); + add_locks::(&other, l as u8); - add_vesting_schedule::(&other)?; + let expected_balance = add_vesting_schedules::(other_lookup.clone(), s)?; + // At block zero, everything is vested. - System::::set_block_number(T::BlockNumber::zero()); + assert_eq!(System::::block_number(), T::BlockNumber::zero()); assert_eq!( Vesting::::vesting_balance(&other), - Some(100.into()), + Some(expected_balance), "Vesting schedule not added", ); @@ -130,21 +156,23 @@ benchmarks! { // Nothing happened since everything is still vested. assert_eq!( Vesting::::vesting_balance(&other), - Some(100.into()), + Some(expected_balance.into()), "Vesting schedule was removed", ); } vest_other_unlocked { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); - T::Currency::make_free_balance_be(&other, BalanceOf::::max_value()); + add_locks::(&other, l as u8); - add_vesting_schedule::(&other)?; - // At block 20, everything is unvested. - System::::set_block_number(20.into()); + add_vesting_schedules::(other_lookup.clone(), s)?; + // At block 21 everything is unlocked. + System::::set_block_number(21u32.into()); + assert_eq!( Vesting::::vesting_balance(&other), Some(BalanceOf::::zero()), @@ -154,7 +182,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); }: vest_other(RawOrigin::Signed(caller.clone()), other_lookup) verify { - // Vesting schedule is removed! + // Vesting schedule is removed. assert_eq!( Vesting::::vesting_balance(&other), None, @@ -163,84 +191,193 @@ benchmarks! { } vested_transfer { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 0 .. T::MAX_VESTING_SCHEDULES - 1; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let target: T::AccountId = account("target", 0, SEED); let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); + // Add one vesting schedules. + let mut expected_balance = add_vesting_schedules::(target_lookup.clone(), s)?; let transfer_amount = T::MinVestedTransfer::get(); + let per_block = transfer_amount.checked_div(&20u32.into()).unwrap(); + expected_balance += transfer_amount; - let vesting_schedule = VestingInfo { - locked: transfer_amount, - per_block: 10.into(), - starting_block: 1.into(), - }; + let vesting_schedule = VestingInfo::new( + transfer_amount, + per_block, + 1u32.into(), + ); }: _(RawOrigin::Signed(caller), target_lookup, vesting_schedule) verify { assert_eq!( - T::MinVestedTransfer::get(), + expected_balance, T::Currency::free_balance(&target), "Transfer didn't happen", ); assert_eq!( Vesting::::vesting_balance(&target), - Some(T::MinVestedTransfer::get()), - "Lock not created", + Some(expected_balance), + "Lock not correctly updated", ); } force_vested_transfer { - let l in 0 .. MaxLocksOf::::get(); + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 0 .. T::MAX_VESTING_SCHEDULES - 1; let source: T::AccountId = account("source", 0, SEED); let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); + let target: T::AccountId = account("target", 0, SEED); let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); + // Add one less than max vesting schedules + let mut expected_balance = add_vesting_schedules::(target_lookup.clone(), s)?; let transfer_amount = T::MinVestedTransfer::get(); + let per_block = transfer_amount.checked_div(&20u32.into()).unwrap(); + expected_balance += transfer_amount; - let vesting_schedule = VestingInfo { - locked: transfer_amount, - per_block: 10.into(), - starting_block: 1.into(), - }; + let vesting_schedule = VestingInfo::new( + transfer_amount, + per_block, + 1u32.into(), + ); }: _(RawOrigin::Root, source_lookup, target_lookup, vesting_schedule) verify { assert_eq!( - T::MinVestedTransfer::get(), + expected_balance, T::Currency::free_balance(&target), "Transfer didn't happen", ); assert_eq!( Vesting::::vesting_balance(&target), - Some(T::MinVestedTransfer::get()), - "Lock not created", + Some(expected_balance.into()), + "Lock not correctly updated", + ); + } + + not_unlocking_merge_schedules { + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 2 .. T::MAX_VESTING_SCHEDULES; + + let caller: T::AccountId = account("caller", 0, SEED); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + // Give target existing locks. + add_locks::(&caller, l as u8); + // Add max vesting schedules. + let expected_balance = add_vesting_schedules::(caller_lookup.clone(), s)?; + + // Schedules are not vesting at block 0. + assert_eq!(System::::block_number(), T::BlockNumber::zero()); + assert_eq!( + Vesting::::vesting_balance(&caller), + Some(expected_balance), + "Vesting balance should equal sum locked of all schedules", + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap().len(), + s as usize, + "There should be exactly max vesting schedules" + ); + }: merge_schedules(RawOrigin::Signed(caller.clone()), 0, s - 1) + verify { + let expected_schedule = VestingInfo::new( + T::MinVestedTransfer::get() * 20u32.into() * 2u32.into(), + T::MinVestedTransfer::get() * 2u32.into(), + 1u32.into(), + ); + let expected_index = (s - 2) as usize; + assert_eq!( + Vesting::::vesting(&caller).unwrap()[expected_index], + expected_schedule + ); + assert_eq!( + Vesting::::vesting_balance(&caller), + Some(expected_balance), + "Vesting balance should equal total locked of all schedules", + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap().len(), + (s - 1) as usize, + "Schedule count should reduce by 1" ); } -} -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - ExtBuilder::default().existential_deposit(256).build().execute_with(|| { - assert_ok!(test_benchmark_vest_locked::()); - assert_ok!(test_benchmark_vest_unlocked::()); - assert_ok!(test_benchmark_vest_other_locked::()); - assert_ok!(test_benchmark_vest_other_unlocked::()); - assert_ok!(test_benchmark_vested_transfer::()); - assert_ok!(test_benchmark_force_vested_transfer::()); - }); + unlocking_merge_schedules { + let l in 0 .. MaxLocksOf::::get() - 1; + let s in 2 .. T::MAX_VESTING_SCHEDULES; + + // Destination used just for currency transfers in asserts. + let test_dest: T::AccountId = account("test_dest", 0, SEED); + + let caller: T::AccountId = account("caller", 0, SEED); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + // Give target other locks. + add_locks::(&caller, l as u8); + // Add max vesting schedules. + let total_transferred = add_vesting_schedules::(caller_lookup.clone(), s)?; + + // Go to about half way through all the schedules duration. (They all start at 1, and have a duration of 20 or 21). + System::::set_block_number(11u32.into()); + // We expect half the original locked balance (+ any remainder that vests on the last block). + let expected_balance = total_transferred / 2u32.into(); + assert_eq!( + Vesting::::vesting_balance(&caller), + Some(expected_balance), + "Vesting balance should reflect that we are half way through all schedules duration", + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap().len(), + s as usize, + "There should be exactly max vesting schedules" + ); + // The balance is not actually transferable because it has not been unlocked. + assert!(T::Currency::transfer(&caller, &test_dest, expected_balance, ExistenceRequirement::AllowDeath).is_err()); + }: merge_schedules(RawOrigin::Signed(caller.clone()), 0, s - 1) + verify { + let expected_schedule = VestingInfo::new( + T::MinVestedTransfer::get() * 2u32.into() * 10u32.into(), + T::MinVestedTransfer::get() * 2u32.into(), + 11u32.into(), + ); + let expected_index = (s - 2) as usize; + assert_eq!( + Vesting::::vesting(&caller).unwrap()[expected_index], + expected_schedule, + "New schedule is properly created and placed" + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap()[expected_index], + expected_schedule + ); + assert_eq!( + Vesting::::vesting_balance(&caller), + Some(expected_balance), + "Vesting balance should equal half total locked of all schedules", + ); + assert_eq!( + Vesting::::vesting(&caller).unwrap().len(), + (s - 1) as usize, + "Schedule count should reduce by 1" + ); + // Since merge unlocks all schedules we can now transfer the balance. + assert_ok!( + T::Currency::transfer(&caller, &test_dest, expected_balance, ExistenceRequirement::AllowDeath) + ); } } + +impl_benchmark_test_suite!( + Vesting, + crate::mock::ExtBuilder::default().existential_deposit(256).build(), + crate::mock::Test, +); diff --git a/frame/vesting/src/default_weights.rs b/frame/vesting/src/default_weights.rs deleted file mode 100644 index dac9224d69ab0..0000000000000 --- a/frame/vesting/src/default_weights.rs +++ /dev/null @@ -1,62 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn vest_locked(l: u32, ) -> Weight { - (82109000 as Weight) - .saturating_add((332000 as Weight).saturating_mul(l as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn vest_unlocked(l: u32, ) -> Weight { - (88419000 as Weight) - .saturating_add((3000 as Weight).saturating_mul(l as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn vest_other_locked(l: u32, ) -> Weight { - (81277000 as Weight) - .saturating_add((321000 as Weight).saturating_mul(l as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn vest_other_unlocked(l: u32, ) -> Weight { - (87584000 as Weight) - .saturating_add((19000 as Weight).saturating_mul(l as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn vested_transfer(l: u32, ) -> Weight { - (185916000 as Weight) - .saturating_add((625000 as Weight).saturating_mul(l as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn force_vested_transfer(l: u32, ) -> Weight { - (185916000 as Weight) - .saturating_add((625000 as Weight).saturating_mul(l as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(4 as Weight)) - } -} diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 1583b06d69f83..27862a5ca4b72 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,15 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Vesting Module +//! # Vesting Pallet //! -//! - [`vesting::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! A simple module providing a means of placing a linear curve on an account's locked balance. This -//! module ensures that there is a lock in place preventing the balance to drop below the *unvested* +//! A simple pallet providing a means of placing a linear curve on an account's locked balance. This +//! pallet ensures that there is a lock in place preventing the balance to drop below the *unvested* //! amount for any reason other than transaction fee payment. //! //! As the amount vested increases over time, the amount unvested reduces. However, locks remain in @@ -34,172 +34,275 @@ //! //! ## Interface //! -//! This module implements the `VestingSchedule` trait. +//! This pallet implements the `VestingSchedule` trait. //! //! ### Dispatchable Functions //! //! - `vest` - Update the lock, reducing it in line with the amount "vested" so far. //! - `vest_other` - Update the lock of another account, reducing it in line with the amount //! "vested" so far. -//! -//! [`Call`]: ./enum.Call.html -//! [`Trait`]: ./trait.Trait.html #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_std::fmt::Debug; -use codec::{Encode, Decode}; -use sp_runtime::{DispatchResult, RuntimeDebug, traits::{ - StaticLookup, Zero, AtLeast32BitUnsigned, MaybeSerializeDeserialize, Convert -}}; -use frame_support::{decl_module, decl_event, decl_storage, decl_error, ensure, weights::Weight}; -use frame_support::traits::{ - Currency, LockableCurrency, VestingSchedule, WithdrawReason, LockIdentifier, - ExistenceRequirement, Get, +mod benchmarking; +mod migrations; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +mod vesting_info; + +pub mod weights; + +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{ + ensure, + pallet_prelude::*, + traits::{ + Currency, ExistenceRequirement, Get, LockIdentifier, LockableCurrency, VestingSchedule, + WithdrawReasons, + }, +}; +use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; +pub use pallet::*; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{ + AtLeast32BitUnsigned, Bounded, Convert, MaybeSerializeDeserialize, One, Saturating, + StaticLookup, Zero, + }, + RuntimeDebug, }; -use frame_system::{ensure_signed, ensure_root}; +use sp_std::{convert::TryInto, fmt::Debug, prelude::*}; +pub use vesting_info::*; +pub use weights::WeightInfo; -mod benchmarking; -mod default_weights; - -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; - -pub trait WeightInfo { - fn vest_locked(l: u32, ) -> Weight; - fn vest_unlocked(l: u32, ) -> Weight; - fn vest_other_locked(l: u32, ) -> Weight; - fn vest_other_unlocked(l: u32, ) -> Weight; - fn vested_transfer(l: u32, ) -> Weight; - fn force_vested_transfer(l: u32, ) -> Weight; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type MaxLocksOf = + <::Currency as LockableCurrency<::AccountId>>::MaxLocks; + +const VESTING_ID: LockIdentifier = *b"vesting "; + +// A value placed in storage that represents the current version of the Vesting storage. +// This value is used by `on_runtime_upgrade` to determine whether we run storage migration logic. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +enum Releases { + V0, + V1, } -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; +impl Default for Releases { + fn default() -> Self { + Releases::V0 + } +} - /// The currency trait. - type Currency: LockableCurrency; +/// Actions to take against a user's `Vesting` storage entry. +#[derive(Clone, Copy)] +enum VestingAction { + /// Do not actively remove any schedules. + Passive, + /// Remove the schedule specified by the index. + Remove(usize), + /// Remove the two schedules, specified by index, so they can be merged. + Merge(usize, usize), +} - /// Convert the block number into a balance. - type BlockNumberToBalance: Convert>; +impl VestingAction { + /// Whether or not the filter says the schedule index should be removed. + fn should_remove(&self, index: usize) -> bool { + match self { + Self::Passive => false, + Self::Remove(index1) => *index1 == index, + Self::Merge(index1, index2) => *index1 == index || *index2 == index, + } + } - /// The minimum amount transferred to call `vested_transfer`. - type MinVestedTransfer: Get>; + /// Pick the schedules that this action dictates should continue vesting undisturbed. + fn pick_schedules<'a, T: Config>( + &'a self, + schedules: Vec, T::BlockNumber>>, + ) -> impl Iterator, T::BlockNumber>> + 'a { + schedules.into_iter().enumerate().filter_map(move |(index, schedule)| { + if self.should_remove(index) { + None + } else { + Some(schedule) + } + }) + } +} - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; +// Wrapper for `T::MAX_VESTING_SCHEDULES` to satisfy `trait Get`. +pub struct MaxVestingSchedulesGet(PhantomData); +impl Get for MaxVestingSchedulesGet { + fn get() -> u32 { + T::MAX_VESTING_SCHEDULES + } } -const VESTING_ID: LockIdentifier = *b"vesting "; +#[frame_support::pallet] +pub mod pallet { + use super::*; -/// Struct to encode the vesting schedule of an individual account. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct VestingInfo { - /// Locked amount at genesis. - pub locked: Balance, - /// Amount that gets unlocked every block after `starting_block`. - pub per_block: Balance, - /// Starting block for unlocking(vesting). - pub starting_block: BlockNumber, -} + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; -impl< - Balance: AtLeast32BitUnsigned + Copy, - BlockNumber: AtLeast32BitUnsigned + Copy, -> VestingInfo { - /// Amount locked at block `n`. - pub fn locked_at< - BlockNumberToBalance: Convert - >(&self, n: BlockNumber) -> Balance { - // Number of blocks that count toward vesting - // Saturating to 0 when n < starting_block - let vested_block_count = n.saturating_sub(self.starting_block); - let vested_block_count = BlockNumberToBalance::convert(vested_block_count); - // Return amount that is still locked in vesting - let maybe_balance = vested_block_count.checked_mul(&self.per_block); - if let Some(balance) = maybe_balance { - self.locked.saturating_sub(balance) - } else { - Zero::zero() + /// The currency trait. + type Currency: LockableCurrency; + + /// Convert the block number into a balance. + type BlockNumberToBalance: Convert>; + + /// The minimum amount transferred to call `vested_transfer`. + #[pallet::constant] + type MinVestedTransfer: Get>; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + + /// Maximum number of vesting schedules an account may have at a given moment. + const MAX_VESTING_SCHEDULES: u32; + } + + #[pallet::extra_constants] + impl Pallet { + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + #[allow(non_snake_case)] + fn MaxVestingSchedules() -> u32 { + T::MAX_VESTING_SCHEDULES } } -} -decl_storage! { - trait Store for Module as Vesting { - /// Information regarding the vesting of a given account. - pub Vesting get(fn vesting): - map hasher(blake2_128_concat) T::AccountId - => Option, T::BlockNumber>>; + #[pallet::hooks] + impl Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + migrations::v1::pre_migrate::() + } + + fn on_runtime_upgrade() -> Weight { + if StorageVersion::::get() == Releases::V0 { + StorageVersion::::put(Releases::V1); + migrations::v1::migrate::().saturating_add(T::DbWeight::get().reads_writes(1, 1)) + } else { + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + migrations::v1::post_migrate::() + } + + fn integrity_test() { + assert!(T::MAX_VESTING_SCHEDULES > 0, "`MaxVestingSchedules` must ge greater than 0"); + } } - add_extra_genesis { - config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, BalanceOf)>; - build(|config: &GenesisConfig| { + + /// Information regarding the vesting of a given account. + #[pallet::storage] + #[pallet::getter(fn vesting)] + pub type Vesting = StorageMap< + _, + Blake2_128Concat, + T::AccountId, + BoundedVec, T::BlockNumber>, MaxVestingSchedulesGet>, + >; + + /// Storage version of the pallet. + /// + /// New networks start with latest version, as determined by the genesis build. + #[pallet::storage] + pub(crate) type StorageVersion = StorageValue<_, Releases, ValueQuery>; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] + pub struct Pallet(_); + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub vesting: Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, BalanceOf)>, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { vesting: Default::default() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { use sp_runtime::traits::Saturating; + + // Genesis uses the latest storage version. + StorageVersion::::put(Releases::V1); + // Generate initial vesting configuration // * who - Account which we are generating vesting configuration for // * begin - Block when the account will start to vest // * length - Number of blocks from `begin` until fully vested // * liquid - Number of units which can be spent before vesting begins - for &(ref who, begin, length, liquid) in config.vesting.iter() { + for &(ref who, begin, length, liquid) in self.vesting.iter() { let balance = T::Currency::free_balance(who); assert!(!balance.is_zero(), "Currencies must be init'd before vesting"); // Total genesis `balance` minus `liquid` equals funds locked for vesting let locked = balance.saturating_sub(liquid); let length_as_balance = T::BlockNumberToBalance::convert(length); let per_block = locked / length_as_balance.max(sp_runtime::traits::One::one()); + let vesting_info = VestingInfo::new(locked, per_block, begin); + if !vesting_info.is_valid() { + panic!("Invalid VestingInfo params at genesis") + }; + + Vesting::::try_append(who, vesting_info) + .expect("Too many vesting schedules at genesis."); - Vesting::::insert(who, VestingInfo { - locked: locked, - per_block: per_block, - starting_block: begin - }); - let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; + let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(VESTING_ID, who, locked, reasons); } - }) + } } -} -decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { - /// The amount vested has been updated. This could indicate more funds are available. The - /// balance given is the amount which is left unvested (and thus locked). + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// The amount vested has been updated. This could indicate a change in funds available. + /// The balance given is the amount which is left unvested (and thus locked). /// \[account, unvested\] - VestingUpdated(AccountId, Balance), - /// An \[account\] has become fully vested. No further vesting can happen. - VestingCompleted(AccountId), + VestingUpdated(T::AccountId, BalanceOf), + /// An \[account\] has become fully vested. + VestingCompleted(T::AccountId), } -); -decl_error! { - /// Error for the vesting module. - pub enum Error for Module { + /// Error for the vesting pallet. + #[pallet::error] + pub enum Error { /// The account given is not vesting. NotVesting, - /// An existing vesting schedule already exists for this account that cannot be clobbered. - ExistingVestingSchedule, + /// The account already has `MaxVestingSchedules` count of schedules and thus + /// cannot add another one. Consider merging existing schedules in order to add another. + AtMaxVestingSchedules, /// Amount being transferred is too low to create a vesting schedule. AmountLow, + /// An index was out of bounds of the vesting schedules. + ScheduleIndexOutOfBounds, + /// Failed to create a new schedule because some parameter was invalid. + InvalidScheduleParams, } -} - -decl_module! { - /// Vesting module declaration. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// The minimum amount to be transferred to create a new vesting schedule. - const MinVestedTransfer: BalanceOf = T::MinVestedTransfer::get(); - - fn deposit_event() = default; + #[pallet::call] + impl Pallet { /// Unlock any vested funds of the sender account. /// /// The dispatch origin for this call must be _Signed_ and the sender must have funds still - /// locked under this module. + /// locked under this pallet. /// /// Emits either `VestingCompleted` or `VestingUpdated`. /// @@ -209,12 +312,12 @@ decl_module! { /// - Reads: Vesting Storage, Balances Locks, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, [Sender Account] /// # - #[weight = T::WeightInfo::vest_locked(MaxLocksOf::::get()) - .max(T::WeightInfo::vest_unlocked(MaxLocksOf::::get())) - ] - fn vest(origin) -> DispatchResult { + #[pallet::weight(T::WeightInfo::vest_locked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + .max(T::WeightInfo::vest_unlocked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) + )] + pub fn vest(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; - Self::update_lock(who) + Self::do_vest(who) } /// Unlock any vested funds of a `target` account. @@ -222,7 +325,7 @@ decl_module! { /// The dispatch origin for this call must be _Signed_. /// /// - `target`: The account whose vested funds should be unlocked. Must have funds still - /// locked under this module. + /// locked under this pallet. /// /// Emits either `VestingCompleted` or `VestingUpdated`. /// @@ -232,48 +335,46 @@ decl_module! { /// - Reads: Vesting Storage, Balances Locks, Target Account /// - Writes: Vesting Storage, Balances Locks, Target Account /// # - #[weight = T::WeightInfo::vest_other_locked(MaxLocksOf::::get()) - .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get())) - ] - fn vest_other(origin, target: ::Source) -> DispatchResult { + #[pallet::weight(T::WeightInfo::vest_other_locked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) + )] + pub fn vest_other( + origin: OriginFor, + target: ::Source, + ) -> DispatchResult { ensure_signed(origin)?; - Self::update_lock(T::Lookup::lookup(target)?) + let who = T::Lookup::lookup(target)?; + Self::do_vest(who) } /// Create a vested transfer. /// /// The dispatch origin for this call must be _Signed_. /// - /// - `target`: The account that should be transferred the vested funds. - /// - `amount`: The amount of funds to transfer and will be vested. + /// - `target`: The account receiving the vested funds. /// - `schedule`: The vesting schedule attached to the transfer. /// /// Emits `VestingCreated`. /// + /// NOTE: This will unlock all schedules through the current block. + /// /// # /// - `O(1)`. /// - DbWeight: 3 Reads, 3 Writes /// - Reads: Vesting Storage, Balances Locks, Target Account, [Sender Account] /// - Writes: Vesting Storage, Balances Locks, Target Account, [Sender Account] /// # - #[weight = T::WeightInfo::vested_transfer(MaxLocksOf::::get())] + #[pallet::weight( + T::WeightInfo::vested_transfer(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + )] pub fn vested_transfer( - origin, + origin: OriginFor, target: ::Source, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { let transactor = ensure_signed(origin)?; - ensure!(schedule.locked >= T::MinVestedTransfer::get(), Error::::AmountLow); - - let who = T::Lookup::lookup(target)?; - ensure!(!Vesting::::contains_key(&who), Error::::ExistingVestingSchedule); - - T::Currency::transfer(&transactor, &who, schedule.locked, ExistenceRequirement::AllowDeath)?; - - Self::add_vesting_schedule(&who, schedule.locked, schedule.per_block, schedule.starting_block) - .expect("user does not have an existing vesting schedule; q.e.d."); - - Ok(()) + let transactor = ::unlookup(transactor); + Self::do_vested_transfer(transactor, target, schedule) } /// Force a vested transfer. @@ -282,74 +383,309 @@ decl_module! { /// /// - `source`: The account whose funds should be transferred. /// - `target`: The account that should be transferred the vested funds. - /// - `amount`: The amount of funds to transfer and will be vested. /// - `schedule`: The vesting schedule attached to the transfer. /// /// Emits `VestingCreated`. /// + /// NOTE: This will unlock all schedules through the current block. + /// /// # /// - `O(1)`. /// - DbWeight: 4 Reads, 4 Writes /// - Reads: Vesting Storage, Balances Locks, Target Account, Source Account /// - Writes: Vesting Storage, Balances Locks, Target Account, Source Account /// # - #[weight = T::WeightInfo::force_vested_transfer(MaxLocksOf::::get())] + #[pallet::weight( + T::WeightInfo::force_vested_transfer(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + )] pub fn force_vested_transfer( - origin, + origin: OriginFor, source: ::Source, target: ::Source, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { ensure_root(origin)?; - ensure!(schedule.locked >= T::MinVestedTransfer::get(), Error::::AmountLow); + Self::do_vested_transfer(source, target, schedule) + } + + /// Merge two vesting schedules together, creating a new vesting schedule that unlocks over + /// the highest possible start and end blocks. If both schedules have already started the + /// current block will be used as the schedule start; with the caveat that if one schedule + /// is finished by the current block, the other will be treated as the new merged schedule, + /// unmodified. + /// + /// NOTE: If `schedule1_index == schedule2_index` this is a no-op. + /// NOTE: This will unlock all schedules through the current block prior to merging. + /// NOTE: If both schedules have ended by the current block, no new schedule will be created + /// and both will be removed. + /// + /// Merged schedule attributes: + /// - `starting_block`: `MAX(schedule1.starting_block, scheduled2.starting_block, + /// current_block)`. + /// - `ending_block`: `MAX(schedule1.ending_block, schedule2.ending_block)`. + /// - `locked`: `schedule1.locked_at(current_block) + schedule2.locked_at(current_block)`. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `schedule1_index`: index of the first schedule to merge. + /// - `schedule2_index`: index of the second schedule to merge. + #[pallet::weight( + T::WeightInfo::not_unlocking_merge_schedules(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) + .max(T::WeightInfo::unlocking_merge_schedules(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) + )] + pub fn merge_schedules( + origin: OriginFor, + schedule1_index: u32, + schedule2_index: u32, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + if schedule1_index == schedule2_index { + return Ok(()) + }; + let schedule1_index = schedule1_index as usize; + let schedule2_index = schedule2_index as usize; - let target = T::Lookup::lookup(target)?; - let source = T::Lookup::lookup(source)?; - ensure!(!Vesting::::contains_key(&target), Error::::ExistingVestingSchedule); + let schedules = Self::vesting(&who).ok_or(Error::::NotVesting)?; + let merge_action = VestingAction::Merge(schedule1_index, schedule2_index); - T::Currency::transfer(&source, &target, schedule.locked, ExistenceRequirement::AllowDeath)?; + let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), merge_action)?; - Self::add_vesting_schedule(&target, schedule.locked, schedule.per_block, schedule.starting_block) - .expect("user does not have an existing vesting schedule; q.e.d."); + Self::write_vesting(&who, schedules)?; + Self::write_lock(&who, locked_now); Ok(()) } } } -impl Module { - /// (Re)set or remove the module's currency lock on `who`'s account in accordance with their - /// current unvested amount. - fn update_lock(who: T::AccountId) -> DispatchResult { - let vesting = Self::vesting(&who).ok_or(Error::::NotVesting)?; - let now = >::block_number(); - let locked_now = vesting.locked_at::(now); +impl Pallet { + // Create a new `VestingInfo`, based off of two other `VestingInfo`s. + // NOTE: We assume both schedules have had funds unlocked up through the current block. + fn merge_vesting_info( + now: T::BlockNumber, + schedule1: VestingInfo, T::BlockNumber>, + schedule2: VestingInfo, T::BlockNumber>, + ) -> Option, T::BlockNumber>> { + let schedule1_ending_block = schedule1.ending_block_as_balance::(); + let schedule2_ending_block = schedule2.ending_block_as_balance::(); + let now_as_balance = T::BlockNumberToBalance::convert(now); + + // Check if one or both schedules have ended. + match (schedule1_ending_block <= now_as_balance, schedule2_ending_block <= now_as_balance) { + // If both schedules have ended, we don't merge and exit early. + (true, true) => return None, + // If one schedule has ended, we treat the one that has not ended as the new + // merged schedule. + (true, false) => return Some(schedule2), + (false, true) => return Some(schedule1), + // If neither schedule has ended don't exit early. + _ => {}, + } + + let locked = schedule1 + .locked_at::(now) + .saturating_add(schedule2.locked_at::(now)); + // This shouldn't happen because we know at least one ending block is greater than now, + // thus at least a schedule a some locked balance. + debug_assert!( + !locked.is_zero(), + "merge_vesting_info validation checks failed to catch a locked of 0" + ); + + let ending_block = schedule1_ending_block.max(schedule2_ending_block); + let starting_block = now.max(schedule1.starting_block()).max(schedule2.starting_block()); + + let per_block = { + let duration = ending_block + .saturating_sub(T::BlockNumberToBalance::convert(starting_block)) + .max(One::one()); + (locked / duration).max(One::one()) + }; + + let schedule = VestingInfo::new(locked, per_block, starting_block); + debug_assert!(schedule.is_valid(), "merge_vesting_info schedule validation check failed"); + + Some(schedule) + } + + // Execute a vested transfer from `source` to `target` with the given `schedule`. + fn do_vested_transfer( + source: ::Source, + target: ::Source, + schedule: VestingInfo, T::BlockNumber>, + ) -> DispatchResult { + // Validate user inputs. + ensure!(schedule.locked() >= T::MinVestedTransfer::get(), Error::::AmountLow); + if !schedule.is_valid() { + return Err(Error::::InvalidScheduleParams.into()) + }; + let target = T::Lookup::lookup(target)?; + let source = T::Lookup::lookup(source)?; + + // Check we can add to this account prior to any storage writes. + Self::can_add_vesting_schedule( + &target, + schedule.locked(), + schedule.per_block(), + schedule.starting_block(), + )?; + + T::Currency::transfer( + &source, + &target, + schedule.locked(), + ExistenceRequirement::AllowDeath, + )?; + + // We can't let this fail because the currency transfer has already happened. + let res = Self::add_vesting_schedule( + &target, + schedule.locked(), + schedule.per_block(), + schedule.starting_block(), + ); + debug_assert!(res.is_ok(), "Failed to add a schedule when we had to succeed."); + + Ok(()) + } + + /// Iterate through the schedules to track the current locked amount and + /// filter out completed and specified schedules. + /// + /// Returns a tuple that consists of: + /// - Vec of vesting schedules, where completed schedules and those specified + /// by filter are removed. (Note the vec is not checked for respecting + /// bounded length.) + /// - The amount locked at the current block number based on the given schedules. + /// + /// NOTE: the amount locked does not include any schedules that are filtered out via `action`. + fn report_schedule_updates( + schedules: Vec, T::BlockNumber>>, + action: VestingAction, + ) -> (Vec, T::BlockNumber>>, BalanceOf) { + let now = >::block_number(); + + let mut total_locked_now: BalanceOf = Zero::zero(); + let filtered_schedules = action + .pick_schedules::(schedules) + .filter_map(|schedule| { + let locked_now = schedule.locked_at::(now); + if locked_now.is_zero() { + None + } else { + total_locked_now = total_locked_now.saturating_add(locked_now); + Some(schedule) + } + }) + .collect::>(); + + (filtered_schedules, total_locked_now) + } + + /// Write an accounts updated vesting lock to storage. + fn write_lock(who: &T::AccountId, total_locked_now: BalanceOf) { + if total_locked_now.is_zero() { + T::Currency::remove_lock(VESTING_ID, who); + Self::deposit_event(Event::::VestingCompleted(who.clone())); + } else { + let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; + T::Currency::set_lock(VESTING_ID, who, total_locked_now, reasons); + Self::deposit_event(Event::::VestingUpdated(who.clone(), total_locked_now)); + }; + } - if locked_now.is_zero() { - T::Currency::remove_lock(VESTING_ID, &who); + /// Write an accounts updated vesting schedules to storage. + fn write_vesting( + who: &T::AccountId, + schedules: Vec, T::BlockNumber>>, + ) -> Result<(), DispatchError> { + let schedules: BoundedVec< + VestingInfo, T::BlockNumber>, + MaxVestingSchedulesGet, + > = schedules.try_into().map_err(|_| Error::::AtMaxVestingSchedules)?; + + if schedules.len() == 0 { Vesting::::remove(&who); - Self::deposit_event(RawEvent::VestingCompleted(who)); } else { - let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; - T::Currency::set_lock(VESTING_ID, &who, locked_now, reasons); - Self::deposit_event(RawEvent::VestingUpdated(who, locked_now)); + Vesting::::insert(who, schedules) } + + Ok(()) + } + + /// Unlock any vested funds of `who`. + fn do_vest(who: T::AccountId) -> DispatchResult { + let schedules = Self::vesting(&who).ok_or(Error::::NotVesting)?; + + let (schedules, locked_now) = + Self::exec_action(schedules.to_vec(), VestingAction::Passive)?; + + Self::write_vesting(&who, schedules)?; + Self::write_lock(&who, locked_now); + Ok(()) } + + /// Execute a `VestingAction` against the given `schedules`. Returns the updated schedules + /// and locked amount. + fn exec_action( + schedules: Vec, T::BlockNumber>>, + action: VestingAction, + ) -> Result<(Vec, T::BlockNumber>>, BalanceOf), DispatchError> { + let (schedules, locked_now) = match action { + VestingAction::Merge(idx1, idx2) => { + // The schedule index is based off of the schedule ordering prior to filtering out + // any schedules that may be ending at this block. + let schedule1 = *schedules.get(idx1).ok_or(Error::::ScheduleIndexOutOfBounds)?; + let schedule2 = *schedules.get(idx2).ok_or(Error::::ScheduleIndexOutOfBounds)?; + + // The length of `schedules` decreases by 2 here since we filter out 2 schedules. + // Thus we know below that we can push the new merged schedule without error + // (assuming initial state was valid). + let (mut schedules, mut locked_now) = + Self::report_schedule_updates(schedules.to_vec(), action); + + let now = >::block_number(); + if let Some(new_schedule) = Self::merge_vesting_info(now, schedule1, schedule2) { + // Merging created a new schedule so we: + // 1) need to add it to the accounts vesting schedule collection, + schedules.push(new_schedule); + // (we use `locked_at` in case this is a schedule that started in the past) + let new_schedule_locked = + new_schedule.locked_at::(now); + // and 2) update the locked amount to reflect the schedule we just added. + locked_now = locked_now.saturating_add(new_schedule_locked); + } // In the None case there was no new schedule to account for. + + (schedules, locked_now) + }, + _ => Self::report_schedule_updates(schedules.to_vec(), action), + }; + + debug_assert!( + locked_now > Zero::zero() && schedules.len() > 0 || + locked_now == Zero::zero() && schedules.len() == 0 + ); + + Ok((schedules, locked_now)) + } } -impl VestingSchedule for Module where - BalanceOf: MaybeSerializeDeserialize + Debug +impl VestingSchedule for Pallet +where + BalanceOf: MaybeSerializeDeserialize + Debug, { - type Moment = T::BlockNumber; type Currency = T::Currency; + type Moment = T::BlockNumber; /// Get the amount that is currently being vested and cannot be transferred out of this account. fn vesting_balance(who: &T::AccountId) -> Option> { if let Some(v) = Self::vesting(who) { - let now = >::block_number(); - let locked_now = v.locked_at::(now); - Some(T::Currency::free_balance(who).min(locked_now)) + let now = >::block_number(); + let total_locked_now = v.iter().fold(Zero::zero(), |total, schedule| { + schedule.locked_at::(now).saturating_add(total) + }); + Some(T::Currency::free_balance(who).min(total_locked_now)) } else { None } @@ -357,506 +693,77 @@ impl VestingSchedule for Module where /// Adds a vesting schedule to a given account. /// - /// If there already exists a vesting schedule for the given account, an `Err` is returned - /// and nothing is updated. + /// If the account has `MaxVestingSchedules`, an Error is returned and nothing + /// is updated. /// /// On success, a linearly reducing amount of funds will be locked. In order to realise any /// reduction of the lock over time as it diminishes, the account owner must use `vest` or /// `vest_other`. /// /// Is a no-op if the amount to be vested is zero. + /// + /// NOTE: This doesn't alter the free balance of the account. fn add_vesting_schedule( who: &T::AccountId, locked: BalanceOf, per_block: BalanceOf, - starting_block: T::BlockNumber + starting_block: T::BlockNumber, ) -> DispatchResult { - if locked.is_zero() { return Ok(()) } - if Vesting::::contains_key(who) { - Err(Error::::ExistingVestingSchedule)? + if locked.is_zero() { + return Ok(()) } - let vesting_schedule = VestingInfo { - locked, - per_block, - starting_block + + let vesting_schedule = VestingInfo::new(locked, per_block, starting_block); + // Check for `per_block` or `locked` of 0. + if !vesting_schedule.is_valid() { + return Err(Error::::InvalidScheduleParams.into()) }; - Vesting::::insert(who, vesting_schedule); - // it can't fail, but even if somehow it did, we don't really care. - let _ = Self::update_lock(who.clone()); - Ok(()) - } - /// Remove a vesting schedule for a given account. - fn remove_vesting_schedule(who: &T::AccountId) { - Vesting::::remove(who); - // it can't fail, but even if somehow it did, we don't really care. - let _ = Self::update_lock(who.clone()); - } -} + let mut schedules = Self::vesting(who).unwrap_or_default(); -#[cfg(test)] -mod tests { - use super::*; + // NOTE: we must push the new schedule so that `exec_action` + // will give the correct new locked amount. + ensure!(schedules.try_push(vesting_schedule).is_ok(), Error::::AtMaxVestingSchedules); - use std::cell::RefCell; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - traits::Get - }; - use sp_core::H256; - use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup, Identity, BadOrigin}, - }; - use frame_system::RawOrigin; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } + let (schedules, locked_now) = + Self::exec_action(schedules.to_vec(), VestingAction::Passive)?; - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type PalletInfo = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - } - parameter_types! { - pub const MaxLocks: u32 = 10; - } - impl pallet_balances::Trait for Test { - type Balance = u64; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type MaxLocks = MaxLocks; - type WeightInfo = (); - } - parameter_types! { - pub const MinVestedTransfer: u64 = 256 * 2; - } - impl Trait for Test { - type Event = (); - type Currency = Balances; - type BlockNumberToBalance = Identity; - type MinVestedTransfer = MinVestedTransfer; - type WeightInfo = (); - } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Vesting = Module; + Self::write_vesting(&who, schedules)?; + Self::write_lock(who, locked_now); - thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); - } - pub struct ExistentialDeposit; - impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } + Ok(()) } - pub struct ExtBuilder { - existential_deposit: u64, - } - impl Default for ExtBuilder { - fn default() -> Self { - Self { - existential_deposit: 1, - } - } - } - impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn build(self) -> sp_io::TestExternalities { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10 * self.existential_deposit), - (2, 20 * self.existential_deposit), - (3, 30 * self.existential_deposit), - (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit) - ], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig:: { - vesting: vec![ - (1, 0, 10, 5 * self.existential_deposit), - (2, 10, 20, 0), - (12, 10, 20, 5 * self.existential_deposit) - ], - }.assimilate_storage(&mut t).unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext + // Ensure we can call `add_vesting_schedule` without error. This should always + // be called prior to `add_vesting_schedule`. + fn can_add_vesting_schedule( + who: &T::AccountId, + locked: BalanceOf, + per_block: BalanceOf, + starting_block: T::BlockNumber, + ) -> DispatchResult { + // Check for `per_block` or `locked` of 0. + if !VestingInfo::new(locked, per_block, starting_block).is_valid() { + return Err(Error::::InvalidScheduleParams.into()) } - } - - #[test] - fn check_vesting_status() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - let user2_free_balance = Balances::free_balance(&2); - let user12_free_balance = Balances::free_balance(&12); - assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance - assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance - assert_eq!(user12_free_balance, 256 * 10); // Account 12 has free balance - let user1_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 128, // Vesting over 10 blocks - starting_block: 0, - }; - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); // Account 12 has a vesting schedule - - // Account 1 has only 128 units vested from their illiquid 256 * 5 units at block 1 - assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); - // Account 2 has their full balance locked - assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); - // Account 12 has only their illiquid funds locked - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - System::set_block_number(10); - assert_eq!(System::block_number(), 10); - - // Account 1 has fully vested by block 10 - assert_eq!(Vesting::vesting_balance(&1), Some(0)); - // Account 2 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); - // Account 12 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative - assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 - assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 - - }); - } - - #[test] - fn unvested_balance_should_not_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_noop!( - Balances::transfer(Some(1).into(), 2, 56), - pallet_balances::Error::::LiquidityRestrictions, - ); // Account 1 cannot send more than vested amount - }); - } - - #[test] - fn vested_balance_should_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); - }); - } - - #[test] - fn vested_balance_should_transfer_using_vest_other() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest_other(Some(2).into(), 1)); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); - }); - } - - #[test] - fn extra_balance_should_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); - assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); - - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal - - let user2_free_balance = Balances::free_balance(&2); - assert_eq!(user2_free_balance, 300); // Account 2 has 100 more free balance than normal - - // Account 1 has only 5 units vested at block 1 (plus 150 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained - - // Account 2 has no units vested at block 1, but gained 100 - assert_eq!(Vesting::vesting_balance(&2), Some(200)); - assert_ok!(Vesting::vest(Some(2).into())); - assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained - }); - } - #[test] - fn liquid_funds_should_transfer_with_delayed_vesting() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user12_free_balance = Balances::free_balance(&12); - - assert_eq!(user12_free_balance, 2560); // Account 12 has free balance - // Account 12 has liquid funds - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - // Account 12 has delayed vesting - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); - - // Account 12 can still send liquid funds - assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); - }); - } - - #[test] - fn vested_transfer_works() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user3_free_balance = Balances::free_balance(&3); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user3_free_balance, 256 * 30); - assert_eq!(user4_free_balance, 256 * 40); - // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); - // Make the schedule for the new transfer. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); - // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); - // Ensure the transfer happened correctly. - let user3_free_balance_updated = Balances::free_balance(&3); - assert_eq!(user3_free_balance_updated, 256 * 25); - let user4_free_balance_updated = Balances::free_balance(&4); - assert_eq!(user4_free_balance_updated, 256 * 45); - // Account 4 has 5 * 256 locked. - assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); - - System::set_block_number(20); - assert_eq!(System::block_number(), 20); - - // Account 4 has 5 * 64 units vested by block 20. - assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - // Account 4 has fully vested. - assert_eq!(Vesting::vesting_balance(&4), Some(0)); - }); - } - - #[test] - fn vested_transfer_correctly_fails() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user2_free_balance = Balances::free_balance(&2); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - // Account 2 should already have a vesting schedule. - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); + ensure!( + (Vesting::::decode_len(who).unwrap_or_default() as u32) < T::MAX_VESTING_SCHEDULES, + Error::::AtMaxVestingSchedules + ); - // The vesting schedule we will try to create, fails due to pre-existence of schedule. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!( - Vesting::vested_transfer(Some(4).into(), 2, new_vesting_schedule), - Error::::ExistingVestingSchedule, - ); - - // Fails due to too low transfer amount. - let new_vesting_schedule_too_low = VestingInfo { - locked: 256 * 1, - per_block: 64, - starting_block: 10, - }; - assert_noop!( - Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), - Error::::AmountLow, - ); - - // Verify no currency transfer happened. - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - }); + Ok(()) } - #[test] - fn force_vested_transfer_works() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user3_free_balance = Balances::free_balance(&3); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user3_free_balance, 256 * 30); - assert_eq!(user4_free_balance, 256 * 40); - // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); - // Make the schedule for the new transfer. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!(Vesting::force_vested_transfer(Some(4).into(), 3, 4, new_vesting_schedule), BadOrigin); - assert_ok!(Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, new_vesting_schedule)); - // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); - // Ensure the transfer happened correctly. - let user3_free_balance_updated = Balances::free_balance(&3); - assert_eq!(user3_free_balance_updated, 256 * 25); - let user4_free_balance_updated = Balances::free_balance(&4); - assert_eq!(user4_free_balance_updated, 256 * 45); - // Account 4 has 5 * 256 locked. - assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); - - System::set_block_number(20); - assert_eq!(System::block_number(), 20); - - // Account 4 has 5 * 64 units vested by block 20. - assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - // Account 4 has fully vested. - assert_eq!(Vesting::vesting_balance(&4), Some(0)); - }); - } + /// Remove a vesting schedule for a given account. + fn remove_vesting_schedule(who: &T::AccountId, schedule_index: u32) -> DispatchResult { + let schedules = Self::vesting(who).ok_or(Error::::NotVesting)?; + let remove_action = VestingAction::Remove(schedule_index as usize); - #[test] - fn force_vested_transfer_correctly_fails() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user2_free_balance = Balances::free_balance(&2); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - // Account 2 should already have a vesting schedule. - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); + let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), remove_action)?; - // The vesting schedule we will try to create, fails due to pre-existence of schedule. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!( - Vesting::force_vested_transfer(RawOrigin::Root.into(), 4, 2, new_vesting_schedule), - Error::::ExistingVestingSchedule, - ); - - // Fails due to too low transfer amount. - let new_vesting_schedule_too_low = VestingInfo { - locked: 256 * 1, - per_block: 64, - starting_block: 10, - }; - assert_noop!( - Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, new_vesting_schedule_too_low), - Error::::AmountLow, - ); - - // Verify no currency transfer happened. - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - }); + Self::write_vesting(&who, schedules)?; + Self::write_lock(who, locked_now); + Ok(()) } } diff --git a/frame/vesting/src/migrations.rs b/frame/vesting/src/migrations.rs new file mode 100644 index 0000000000000..086257d285ea0 --- /dev/null +++ b/frame/vesting/src/migrations.rs @@ -0,0 +1,95 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage migrations for the vesting pallet. + +use super::*; + +// Migration from single schedule to multiple schedules. +pub(crate) mod v1 { + use super::*; + + #[cfg(feature = "try-runtime")] + pub(crate) fn pre_migrate() -> Result<(), &'static str> { + assert!(StorageVersion::::get() == Releases::V0, "Storage version too high."); + + log::debug!( + target: "runtime::vesting", + "migration: Vesting storage version v1 PRE migration checks succesful!" + ); + + Ok(()) + } + + /// Migrate from single schedule to multi schedule storage. + /// WARNING: This migration will delete schedules if `MaxVestingSchedules < 1`. + pub(crate) fn migrate() -> Weight { + let mut reads_writes = 0; + + Vesting::::translate::, T::BlockNumber>, _>( + |_key, vesting_info| { + reads_writes += 1; + let v: Option< + BoundedVec< + VestingInfo, T::BlockNumber>, + MaxVestingSchedulesGet, + >, + > = vec![vesting_info].try_into().ok(); + + if v.is_none() { + log::warn!( + target: "runtime::vesting", + "migration: Failed to move a vesting schedule into a BoundedVec" + ); + } + + v + }, + ); + + T::DbWeight::get().reads_writes(reads_writes, reads_writes) + } + + #[cfg(feature = "try-runtime")] + pub(crate) fn post_migrate() -> Result<(), &'static str> { + assert_eq!(StorageVersion::::get(), Releases::V1); + + for (_key, schedules) in Vesting::::iter() { + assert!( + schedules.len() == 1, + "A bounded vec with incorrect count of items was created." + ); + + for s in schedules { + // It is ok if this does not pass, but ideally pre-existing schedules would pass + // this validation logic so we can be more confident about edge cases. + if !s.is_valid() { + log::warn!( + target: "runtime::vesting", + "migration: A schedule does not pass new validation logic.", + ) + } + } + } + + log::debug!( + target: "runtime::vesting", + "migration: Vesting storage version v1 POST migration checks successful!" + ); + Ok(()) + } +} diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs new file mode 100644 index 0000000000000..cb8961150003b --- /dev/null +++ b/frame/vesting/src/mock.rs @@ -0,0 +1,155 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::parameter_types; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, Identity, IdentityLookup}, +}; + +use super::*; +use crate as pallet_vesting; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Vesting: pallet_vesting::{Pallet, Call, Storage, Event, Config}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); +} +impl frame_system::Config for Test { + type AccountData = pallet_balances::AccountData; + type AccountId = u64; + type BaseCallFilter = frame_support::traits::Everything; + type BlockHashCount = BlockHashCount; + type BlockLength = (); + type BlockNumber = u64; + type BlockWeights = (); + type Call = Call; + type DbWeight = (); + type Event = Event; + type Hash = H256; + type Hashing = BlakeTwo256; + type Header = Header; + type Index = u64; + type Lookup = IdentityLookup; + type OnKilledAccount = (); + type OnNewAccount = (); + type OnSetCode = (); + type Origin = Origin; + type PalletInfo = PalletInfo; + type SS58Prefix = (); + type SystemWeightInfo = (); + type Version = (); +} +parameter_types! { + pub const MaxLocks: u32 = 10; +} +impl pallet_balances::Config for Test { + type AccountStore = System; + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type MaxLocks = MaxLocks; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type WeightInfo = (); +} +parameter_types! { + pub const MinVestedTransfer: u64 = 256 * 2; + pub static ExistentialDeposit: u64 = 0; +} +impl Config for Test { + type BlockNumberToBalance = Identity; + type Currency = Balances; + type Event = Event; + const MAX_VESTING_SCHEDULES: u32 = 3; + type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); +} + +pub struct ExtBuilder { + existential_deposit: u64, + vesting_genesis_config: Option>, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { existential_deposit: 1, vesting_genesis_config: None } + } +} + +impl ExtBuilder { + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + + pub fn vesting_genesis_config(mut self, config: Vec<(u64, u64, u64, u64)>) -> Self { + self.vesting_genesis_config = Some(config); + self + } + + pub fn build(self) -> sp_io::TestExternalities { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![ + (1, 10 * self.existential_deposit), + (2, 20 * self.existential_deposit), + (3, 30 * self.existential_deposit), + (4, 40 * self.existential_deposit), + (12, 10 * self.existential_deposit), + (13, 9999 * self.existential_deposit), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + let vesting = if let Some(vesting_config) = self.vesting_genesis_config { + vesting_config + } else { + vec![ + (1, 0, 10, 5 * self.existential_deposit), + (2, 10, 20, 0), + (12, 10, 20, 5 * self.existential_deposit), + ] + }; + + pallet_vesting::GenesisConfig:: { vesting } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} diff --git a/frame/vesting/src/tests.rs b/frame/vesting/src/tests.rs new file mode 100644 index 0000000000000..2a6dd0520c3b0 --- /dev/null +++ b/frame/vesting/src/tests.rs @@ -0,0 +1,1157 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{assert_noop, assert_ok, assert_storage_noop, dispatch::EncodeLike}; +use frame_system::RawOrigin; +use sp_runtime::traits::{BadOrigin, Identity}; + +use super::{Vesting as VestingStorage, *}; +use crate::mock::{Balances, ExtBuilder, System, Test, Vesting}; + +/// A default existential deposit. +const ED: u64 = 256; + +/// Calls vest, and asserts that there is no entry for `account` +/// in the `Vesting` storage item. +fn vest_and_assert_no_vesting(account: u64) +where + u64: EncodeLike<::AccountId>, + T: pallet::Config, +{ + // Its ok for this to fail because the user may already have no schedules. + let _result = Vesting::vest(Some(account).into()); + assert!(!>::contains_key(account)); +} + +#[test] +fn check_vesting_status() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + let user2_free_balance = Balances::free_balance(&2); + let user12_free_balance = Balances::free_balance(&12); + assert_eq!(user1_free_balance, ED * 10); // Account 1 has free balance + assert_eq!(user2_free_balance, ED * 20); // Account 2 has free balance + assert_eq!(user12_free_balance, ED * 10); // Account 12 has free balance + let user1_vesting_schedule = VestingInfo::new( + ED * 5, + 128, // Vesting over 10 blocks + 0, + ); + let user2_vesting_schedule = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, + ); + let user12_vesting_schedule = VestingInfo::new( + ED * 5, + 64, // Vesting over 20 blocks + 10, + ); + assert_eq!(Vesting::vesting(&1).unwrap(), vec![user1_vesting_schedule]); // Account 1 has a vesting schedule + assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); // Account 2 has a vesting schedule + assert_eq!(Vesting::vesting(&12).unwrap(), vec![user12_vesting_schedule]); // Account 12 has a vesting schedule + + // Account 1 has only 128 units vested from their illiquid ED * 5 units at block 1 + assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); + // Account 2 has their full balance locked + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); + // Account 12 has only their illiquid funds locked + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - ED * 5)); + + System::set_block_number(10); + assert_eq!(System::block_number(), 10); + + // Account 1 has fully vested by block 10 + assert_eq!(Vesting::vesting_balance(&1), Some(0)); + // Account 2 has started vesting by block 10 + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); + // Account 12 has started vesting by block 10 + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - ED * 5)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative + assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 + assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 + + // Once we unlock the funds, they are removed from storage. + vest_and_assert_no_vesting::(1); + vest_and_assert_no_vesting::(2); + vest_and_assert_no_vesting::(12); + }); +} + +#[test] +fn check_vesting_status_for_multi_schedule_account() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert_eq!(System::block_number(), 1); + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, + ); + // Account 2 already has a vesting schedule. + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + // Account 2's free balance is from sched0. + let free_balance = Balances::free_balance(&2); + assert_eq!(free_balance, ED * (20)); + assert_eq!(Vesting::vesting_balance(&2), Some(free_balance)); + + // Add a 2nd schedule that is already unlocking by block #1. + let sched1 = VestingInfo::new( + ED * 10, + ED, // Vesting over 10 blocks + 0, + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); + // Free balance is equal to the two existing schedules total amount. + let free_balance = Balances::free_balance(&2); + assert_eq!(free_balance, ED * (10 + 20)); + // The most recently added schedule exists. + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + // sched1 has free funds at block #1, but nothing else. + assert_eq!(Vesting::vesting_balance(&2), Some(free_balance - sched1.per_block())); + + // Add a 3rd schedule. + let sched2 = VestingInfo::new( + ED * 30, + ED, // Vesting over 30 blocks + 5, + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched2)); + + System::set_block_number(9); + // Free balance is equal to the 3 existing schedules total amount. + let free_balance = Balances::free_balance(&2); + assert_eq!(free_balance, ED * (10 + 20 + 30)); + // sched1 and sched2 are freeing funds at block #9. + assert_eq!( + Vesting::vesting_balance(&2), + Some(free_balance - sched1.per_block() * 9 - sched2.per_block() * 4) + ); + + System::set_block_number(20); + // At block #20 sched1 is fully unlocked while sched2 and sched0 are partially unlocked. + assert_eq!( + Vesting::vesting_balance(&2), + Some( + free_balance - sched1.locked() - sched2.per_block() * 15 - sched0.per_block() * 10 + ) + ); + + System::set_block_number(30); + // At block #30 sched0 and sched1 are fully unlocked while sched2 is partially unlocked. + assert_eq!( + Vesting::vesting_balance(&2), + Some(free_balance - sched1.locked() - sched2.per_block() * 25 - sched0.locked()) + ); + + // At block #35 sched2 fully unlocks and thus all schedules funds are unlocked. + System::set_block_number(35); + assert_eq!(Vesting::vesting_balance(&2), Some(0)); + // Since we have not called any extrinsics that would unlock funds the schedules + // are still in storage, + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + // but once we unlock the funds, they are removed from storage. + vest_and_assert_no_vesting::(2); + }); +} + +#[test] +fn unvested_balance_should_not_transfer() { + ExtBuilder::default().existential_deposit(10).build().execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_noop!( + Balances::transfer(Some(1).into(), 2, 56), + pallet_balances::Error::::LiquidityRestrictions, + ); // Account 1 cannot send more than vested amount + }); +} + +#[test] +fn vested_balance_should_transfer() { + ExtBuilder::default().existential_deposit(10).build().execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + }); +} + +#[test] +fn vested_balance_should_transfer_with_multi_sched() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let sched0 = VestingInfo::new(5 * ED, 128, 0); + assert_ok!(Vesting::vested_transfer(Some(13).into(), 1, sched0)); + // Total 10*ED locked for all the schedules. + assert_eq!(Vesting::vesting(&1).unwrap(), vec![sched0, sched0]); + + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 3840); // Account 1 has free balance + + // Account 1 has only 256 units unlocking at block 1 (plus 1280 already fee). + assert_eq!(Vesting::vesting_balance(&1), Some(2304)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 2, 1536)); + }); +} + +#[test] +fn non_vested_cannot_vest() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert!(!>::contains_key(4)); + assert_noop!(Vesting::vest(Some(4).into()), Error::::NotVesting); + }); +} + +#[test] +fn vested_balance_should_transfer_using_vest_other() { + ExtBuilder::default().existential_deposit(10).build().execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest_other(Some(2).into(), 1)); + assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + }); +} + +#[test] +fn vested_balance_should_transfer_using_vest_other_with_multi_sched() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let sched0 = VestingInfo::new(5 * ED, 128, 0); + assert_ok!(Vesting::vested_transfer(Some(13).into(), 1, sched0)); + // Total of 10*ED of locked for all the schedules. + assert_eq!(Vesting::vesting(&1).unwrap(), vec![sched0, sched0]); + + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 3840); // Account 1 has free balance + + // Account 1 has only 256 units unlocking at block 1 (plus 1280 already free). + assert_eq!(Vesting::vesting_balance(&1), Some(2304)); + assert_ok!(Vesting::vest_other(Some(2).into(), 1)); + assert_ok!(Balances::transfer(Some(1).into(), 2, 1536)); + }); +} + +#[test] +fn non_vested_cannot_vest_other() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert!(!>::contains_key(4)); + assert_noop!(Vesting::vest_other(Some(3).into(), 4), Error::::NotVesting); + }); +} + +#[test] +fn extra_balance_should_transfer() { + ExtBuilder::default().existential_deposit(10).build().execute_with(|| { + assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); + assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); + + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal + + let user2_free_balance = Balances::free_balance(&2); + assert_eq!(user2_free_balance, 300); // Account 2 has 100 more free balance than normal + + // Account 1 has only 5 units vested at block 1 (plus 150 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained + + // Account 2 has no units vested at block 1, but gained 100 + assert_eq!(Vesting::vesting_balance(&2), Some(200)); + assert_ok!(Vesting::vest(Some(2).into())); + assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained + }); +} + +#[test] +fn liquid_funds_should_transfer_with_delayed_vesting() { + ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + let user12_free_balance = Balances::free_balance(&12); + + assert_eq!(user12_free_balance, 2560); // Account 12 has free balance + // Account 12 has liquid funds + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); + + // Account 12 has delayed vesting + let user12_vesting_schedule = VestingInfo::new( + 256 * 5, + 64, // Vesting over 20 blocks + 10, + ); + assert_eq!(Vesting::vesting(&12).unwrap(), vec![user12_vesting_schedule]); + + // Account 12 can still send liquid funds + assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); + }); +} + +#[test] +fn vested_transfer_works() { + ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + let user3_free_balance = Balances::free_balance(&3); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user3_free_balance, 256 * 30); + assert_eq!(user4_free_balance, 256 * 40); + // Account 4 should not have any vesting yet. + assert_eq!(Vesting::vesting(&4), None); + // Make the schedule for the new transfer. + let new_vesting_schedule = VestingInfo::new( + 256 * 5, + 64, // Vesting over 20 blocks + 10, + ); + assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); + // Now account 4 should have vesting. + assert_eq!(Vesting::vesting(&4).unwrap(), vec![new_vesting_schedule]); + // Ensure the transfer happened correctly. + let user3_free_balance_updated = Balances::free_balance(&3); + assert_eq!(user3_free_balance_updated, 256 * 25); + let user4_free_balance_updated = Balances::free_balance(&4); + assert_eq!(user4_free_balance_updated, 256 * 45); + // Account 4 has 5 * 256 locked. + assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + + System::set_block_number(20); + assert_eq!(System::block_number(), 20); + + // Account 4 has 5 * 64 units vested by block 20. + assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + // Account 4 has fully vested, + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + // and after unlocking its schedules are removed from storage. + vest_and_assert_no_vesting::(4); + }); +} + +#[test] +fn vested_transfer_correctly_fails() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let user2_free_balance = Balances::free_balance(&2); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user2_free_balance, ED * 20); + assert_eq!(user4_free_balance, ED * 40); + + // Account 2 should already have a vesting schedule. + let user2_vesting_schedule = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); + + // Fails due to too low transfer amount. + let new_vesting_schedule_too_low = + VestingInfo::new(::MinVestedTransfer::get() - 1, 64, 10); + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), + Error::::AmountLow, + ); + + // `per_block` is 0, which would result in a schedule with infinite duration. + let schedule_per_block_0 = + VestingInfo::new(::MinVestedTransfer::get(), 0, 10); + assert_noop!( + Vesting::vested_transfer(Some(13).into(), 4, schedule_per_block_0), + Error::::InvalidScheduleParams, + ); + + // `locked` is 0. + let schedule_locked_0 = VestingInfo::new(0, 1, 10); + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 4, schedule_locked_0), + Error::::AmountLow, + ); + + // Free balance has not changed. + assert_eq!(user2_free_balance, Balances::free_balance(&2)); + assert_eq!(user4_free_balance, Balances::free_balance(&4)); + // Account 4 has no schedules. + vest_and_assert_no_vesting::(4); + }); +} + +#[test] +fn vested_transfer_allows_max_schedules() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let mut user_4_free_balance = Balances::free_balance(&4); + let max_schedules = ::MAX_VESTING_SCHEDULES; + let sched = VestingInfo::new( + ::MinVestedTransfer::get(), + 1, // Vest over 2 * 256 blocks. + 10, + ); + + // Add max amount schedules to user 4. + for _ in 0..max_schedules { + assert_ok!(Vesting::vested_transfer(Some(13).into(), 4, sched)); + } + + // The schedules count towards vesting balance + let transferred_amount = ::MinVestedTransfer::get() * max_schedules as u64; + assert_eq!(Vesting::vesting_balance(&4), Some(transferred_amount)); + // and free balance. + user_4_free_balance += transferred_amount; + assert_eq!(Balances::free_balance(&4), user_4_free_balance); + + // Cannot insert a 4th vesting schedule when `MaxVestingSchedules` === 3, + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 4, sched), + Error::::AtMaxVestingSchedules, + ); + // so the free balance does not change. + assert_eq!(Balances::free_balance(&4), user_4_free_balance); + + // Account 4 has fully vested when all the schedules end, + System::set_block_number( + ::MinVestedTransfer::get() + sched.starting_block(), + ); + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + // and after unlocking its schedules are removed from storage. + vest_and_assert_no_vesting::(4); + }); +} + +#[test] +fn force_vested_transfer_works() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let user3_free_balance = Balances::free_balance(&3); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user3_free_balance, ED * 30); + assert_eq!(user4_free_balance, ED * 40); + // Account 4 should not have any vesting yet. + assert_eq!(Vesting::vesting(&4), None); + // Make the schedule for the new transfer. + let new_vesting_schedule = VestingInfo::new( + ED * 5, + 64, // Vesting over 20 blocks + 10, + ); + + assert_noop!( + Vesting::force_vested_transfer(Some(4).into(), 3, 4, new_vesting_schedule), + BadOrigin + ); + assert_ok!(Vesting::force_vested_transfer( + RawOrigin::Root.into(), + 3, + 4, + new_vesting_schedule + )); + // Now account 4 should have vesting. + assert_eq!(Vesting::vesting(&4).unwrap()[0], new_vesting_schedule); + assert_eq!(Vesting::vesting(&4).unwrap().len(), 1); + // Ensure the transfer happened correctly. + let user3_free_balance_updated = Balances::free_balance(&3); + assert_eq!(user3_free_balance_updated, ED * 25); + let user4_free_balance_updated = Balances::free_balance(&4); + assert_eq!(user4_free_balance_updated, ED * 45); + // Account 4 has 5 * ED locked. + assert_eq!(Vesting::vesting_balance(&4), Some(ED * 5)); + + System::set_block_number(20); + assert_eq!(System::block_number(), 20); + + // Account 4 has 5 * 64 units vested by block 20. + assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + // Account 4 has fully vested, + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + // and after unlocking its schedules are removed from storage. + vest_and_assert_no_vesting::(4); + }); +} + +#[test] +fn force_vested_transfer_correctly_fails() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let user2_free_balance = Balances::free_balance(&2); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user2_free_balance, ED * 20); + assert_eq!(user4_free_balance, ED * 40); + // Account 2 should already have a vesting schedule. + let user2_vesting_schedule = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_vesting_schedule]); + + // Too low transfer amount. + let new_vesting_schedule_too_low = + VestingInfo::new(::MinVestedTransfer::get() - 1, 64, 10); + assert_noop!( + Vesting::force_vested_transfer( + RawOrigin::Root.into(), + 3, + 4, + new_vesting_schedule_too_low + ), + Error::::AmountLow, + ); + + // `per_block` is 0. + let schedule_per_block_0 = + VestingInfo::new(::MinVestedTransfer::get(), 0, 10); + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 13, 4, schedule_per_block_0), + Error::::InvalidScheduleParams, + ); + + // `locked` is 0. + let schedule_locked_0 = VestingInfo::new(0, 1, 10); + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, schedule_locked_0), + Error::::AmountLow, + ); + + // Verify no currency transfer happened. + assert_eq!(user2_free_balance, Balances::free_balance(&2)); + assert_eq!(user4_free_balance, Balances::free_balance(&4)); + // Account 4 has no schedules. + vest_and_assert_no_vesting::(4); + }); +} + +#[test] +fn force_vested_transfer_allows_max_schedules() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let mut user_4_free_balance = Balances::free_balance(&4); + let max_schedules = ::MAX_VESTING_SCHEDULES; + let sched = VestingInfo::new( + ::MinVestedTransfer::get(), + 1, // Vest over 2 * 256 blocks. + 10, + ); + + // Add max amount schedules to user 4. + for _ in 0..max_schedules { + assert_ok!(Vesting::force_vested_transfer(RawOrigin::Root.into(), 13, 4, sched)); + } + + // The schedules count towards vesting balance. + let transferred_amount = ::MinVestedTransfer::get() * max_schedules as u64; + assert_eq!(Vesting::vesting_balance(&4), Some(transferred_amount)); + // and free balance. + user_4_free_balance += transferred_amount; + assert_eq!(Balances::free_balance(&4), user_4_free_balance); + + // Cannot insert a 4th vesting schedule when `MaxVestingSchedules` === 3 + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, sched), + Error::::AtMaxVestingSchedules, + ); + // so the free balance does not change. + assert_eq!(Balances::free_balance(&4), user_4_free_balance); + + // Account 4 has fully vested when all the schedules end, + System::set_block_number(::MinVestedTransfer::get() + 10); + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + // and after unlocking its schedules are removed from storage. + vest_and_assert_no_vesting::(4); + }); +} + +#[test] +fn merge_schedules_that_have_not_started() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vest over 20 blocks. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + assert_eq!(Balances::usable_balance(&2), 0); + + // Add a schedule that is identical to the one that already exists. + assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched0)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched0]); + assert_eq!(Balances::usable_balance(&2), 0); + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + + // Since we merged identical schedules, the new schedule finishes at the same + // time as the original, just with double the amount. + let sched1 = VestingInfo::new( + sched0.locked() * 2, + sched0.per_block() * 2, + 10, // Starts at the block the schedules are merged/ + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched1]); + + assert_eq!(Balances::usable_balance(&2), 0); + }); +} + +#[test] +fn merge_ongoing_schedules() { + // Merging two schedules that have started will vest both before merging. + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vest over 20 blocks. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + let sched1 = VestingInfo::new( + ED * 10, + ED, // Vest over 10 blocks. + sched0.starting_block() + 5, // Start at block 15. + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + + // Got to half way through the second schedule where both schedules are actively vesting. + let cur_block = 20; + System::set_block_number(cur_block); + + // Account 2 has no usable balances prior to the merge because they have not unlocked + // with `vest` yet. + assert_eq!(Balances::usable_balance(&2), 0); + + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + + // Merging schedules un-vests all pre-existing schedules prior to merging, which is + // reflected in account 2's updated usable balance. + let sched0_vested_now = sched0.per_block() * (cur_block - sched0.starting_block()); + let sched1_vested_now = sched1.per_block() * (cur_block - sched1.starting_block()); + assert_eq!(Balances::usable_balance(&2), sched0_vested_now + sched1_vested_now); + + // The locked amount is the sum of what both schedules have locked at the current block. + let sched2_locked = sched1 + .locked_at::(cur_block) + .saturating_add(sched0.locked_at::(cur_block)); + // End block of the new schedule is the greater of either merged schedule. + let sched2_end = sched1 + .ending_block_as_balance::() + .max(sched0.ending_block_as_balance::()); + let sched2_duration = sched2_end - cur_block; + // Based off the new schedules total locked and its duration, we can calculate the + // amount to unlock per block. + let sched2_per_block = sched2_locked / sched2_duration; + + let sched2 = VestingInfo::new(sched2_locked, sched2_per_block, cur_block); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2]); + + // And just to double check, we assert the new merged schedule we be cleaned up as expected. + System::set_block_number(30); + vest_and_assert_no_vesting::(2); + }); +} + +#[test] +fn merging_shifts_other_schedules_index() { + // Schedules being merged are filtered out, schedules to the right of any merged + // schedule shift left and the merged schedule is always last. + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let sched0 = VestingInfo::new( + ED * 10, + ED, // Vesting over 10 blocks. + 10, + ); + let sched1 = VestingInfo::new( + ED * 11, + ED, // Vesting over 11 blocks. + 11, + ); + let sched2 = VestingInfo::new( + ED * 12, + ED, // Vesting over 12 blocks. + 12, + ); + + // Account 3 starts out with no schedules, + assert_eq!(Vesting::vesting(&3), None); + // and some usable balance. + let usable_balance = Balances::usable_balance(&3); + assert_eq!(usable_balance, 30 * ED); + + let cur_block = 1; + assert_eq!(System::block_number(), cur_block); + + // Transfer the above 3 schedules to account 3. + assert_ok!(Vesting::vested_transfer(Some(4).into(), 3, sched0)); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 3, sched1)); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 3, sched2)); + + // With no schedules vested or merged they are in the order they are created + assert_eq!(Vesting::vesting(&3).unwrap(), vec![sched0, sched1, sched2]); + // and the usable balance has not changed. + assert_eq!(usable_balance, Balances::usable_balance(&3)); + + assert_ok!(Vesting::merge_schedules(Some(3).into(), 0, 2)); + + // Create the merged schedule of sched0 & sched2. + // The merged schedule will have the max possible starting block, + let sched3_start = sched1.starting_block().max(sched2.starting_block()); + // `locked` equal to the sum of the two schedules locked through the current block, + let sched3_locked = + sched2.locked_at::(cur_block) + sched0.locked_at::(cur_block); + // and will end at the max possible block. + let sched3_end = sched2 + .ending_block_as_balance::() + .max(sched0.ending_block_as_balance::()); + let sched3_duration = sched3_end - sched3_start; + let sched3_per_block = sched3_locked / sched3_duration; + let sched3 = VestingInfo::new(sched3_locked, sched3_per_block, sched3_start); + + // The not touched schedule moves left and the new merged schedule is appended. + assert_eq!(Vesting::vesting(&3).unwrap(), vec![sched1, sched3]); + // The usable balance hasn't changed since none of the schedules have started. + assert_eq!(Balances::usable_balance(&3), usable_balance); + }); +} + +#[test] +fn merge_ongoing_and_yet_to_be_started_schedules() { + // Merge an ongoing schedule that has had `vest` called and a schedule that has not already + // started. + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + // Fast forward to half way through the life of sched1. + let mut cur_block = + (sched0.starting_block() + sched0.ending_block_as_balance::()) / 2; + assert_eq!(cur_block, 20); + System::set_block_number(cur_block); + + // Prior to vesting there is no usable balance. + let mut usable_balance = 0; + assert_eq!(Balances::usable_balance(&2), usable_balance); + // Vest the current schedules (which is just sched0 now). + Vesting::vest(Some(2).into()).unwrap(); + + // After vesting the usable balance increases by the unlocked amount. + let sched0_vested_now = sched0.locked() - sched0.locked_at::(cur_block); + usable_balance += sched0_vested_now; + assert_eq!(Balances::usable_balance(&2), usable_balance); + + // Go forward a block. + cur_block += 1; + System::set_block_number(cur_block); + + // And add a schedule that starts after this block, but before sched0 finishes. + let sched1 = VestingInfo::new( + ED * 10, + 1, // Vesting over 256 * 10 (2560) blocks + cur_block + 1, + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); + + // Merge the schedules before sched1 starts. + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + // After merging, the usable balance only changes by the amount sched0 vested since we + // last called `vest` (which is just 1 block). The usable balance is not affected by + // sched1 because it has not started yet. + usable_balance += sched0.per_block(); + assert_eq!(Balances::usable_balance(&2), usable_balance); + + // The resulting schedule will have the later starting block of the two, + let sched2_start = sched1.starting_block(); + // `locked` equal to the sum of the two schedules locked through the current block, + let sched2_locked = + sched0.locked_at::(cur_block) + sched1.locked_at::(cur_block); + // and will end at the max possible block. + let sched2_end = sched0 + .ending_block_as_balance::() + .max(sched1.ending_block_as_balance::()); + let sched2_duration = sched2_end - sched2_start; + let sched2_per_block = sched2_locked / sched2_duration; + + let sched2 = VestingInfo::new(sched2_locked, sched2_per_block, sched2_start); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2]); + }); +} + +#[test] +fn merge_finished_and_ongoing_schedules() { + // If a schedule finishes by the current block we treat the ongoing schedule, + // without any alterations, as the merged one. + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // Vesting over 20 blocks. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + let sched1 = VestingInfo::new( + ED * 40, + ED, // Vesting over 40 blocks. + 10, + ); + assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); + + // Transfer a 3rd schedule, so we can demonstrate how schedule indices change. + // (We are not merging this schedule.) + let sched2 = VestingInfo::new( + ED * 30, + ED, // Vesting over 30 blocks. + 10, + ); + assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched2)); + + // The schedules are in expected order prior to merging. + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + + // Fast forward to sched0's end block. + let cur_block = sched0.ending_block_as_balance::(); + System::set_block_number(cur_block); + assert_eq!(System::block_number(), 30); + + // Prior to `merge_schedules` and with no vest/vest_other called the user has no usable + // balance. + assert_eq!(Balances::usable_balance(&2), 0); + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + + // sched2 is now the first, since sched0 & sched1 get filtered out while "merging". + // sched1 gets treated like the new merged schedule by getting pushed onto back + // of the vesting schedules vec. Note: sched0 finished at the current block. + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2, sched1]); + + // sched0 has finished, so its funds are fully unlocked. + let sched0_unlocked_now = sched0.locked(); + // The remaining schedules are ongoing, so their funds are partially unlocked. + let sched1_unlocked_now = sched1.locked() - sched1.locked_at::(cur_block); + let sched2_unlocked_now = sched2.locked() - sched2.locked_at::(cur_block); + + // Since merging also vests all the schedules, the users usable balance after merging + // includes all pre-existing schedules unlocked through the current block, including + // schedules not merged. + assert_eq!( + Balances::usable_balance(&2), + sched0_unlocked_now + sched1_unlocked_now + sched2_unlocked_now + ); + }); +} + +#[test] +fn merge_finishing_schedules_does_not_create_a_new_one() { + // If both schedules finish by the current block we don't create new one + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // 20 block duration. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + // Create sched1 and transfer it to account 2. + let sched1 = VestingInfo::new( + ED * 30, + ED, // 30 block duration. + 10, + ); + assert_ok!(Vesting::vested_transfer(Some(3).into(), 2, sched1)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + + let all_scheds_end = sched0 + .ending_block_as_balance::() + .max(sched1.ending_block_as_balance::()); + + assert_eq!(all_scheds_end, 40); + System::set_block_number(all_scheds_end); + + // Prior to merge_schedules and with no vest/vest_other called the user has no usable + // balance. + assert_eq!(Balances::usable_balance(&2), 0); + + // Merge schedule 0 and 1. + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + // The user no longer has any more vesting schedules because they both ended at the + // block they where merged, + assert!(!>::contains_key(&2)); + // and their usable balance has increased by the total amount locked in the merged + // schedules. + assert_eq!(Balances::usable_balance(&2), sched0.locked() + sched1.locked()); + }); +} + +#[test] +fn merge_finished_and_yet_to_be_started_schedules() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // 20 block duration. + 10, // Ends at block 30 + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + let sched1 = VestingInfo::new( + ED * 30, + ED * 2, // 30 block duration. + 35, + ); + assert_ok!(Vesting::vested_transfer(Some(13).into(), 2, sched1)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1]); + + let sched2 = VestingInfo::new( + ED * 40, + ED, // 40 block duration. + 30, + ); + // Add a 3rd schedule to demonstrate how sched1 shifts. + assert_ok!(Vesting::vested_transfer(Some(13).into(), 2, sched2)); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched1, sched2]); + + System::set_block_number(30); + + // At block 30, sched0 has finished unlocking while sched1 and sched2 are still fully + // locked, + assert_eq!(Vesting::vesting_balance(&2), Some(sched1.locked() + sched2.locked())); + // but since we have not vested usable balance is still 0. + assert_eq!(Balances::usable_balance(&2), 0); + + // Merge schedule 0 and 1. + assert_ok!(Vesting::merge_schedules(Some(2).into(), 0, 1)); + + // sched0 is removed since it finished, and sched1 is removed and then pushed on the back + // because it is treated as the merged schedule + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched2, sched1]); + + // The usable balance is updated because merging fully unlocked sched0. + assert_eq!(Balances::usable_balance(&2), sched0.locked()); + }); +} + +#[test] +fn merge_schedules_throws_proper_errors() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + // Account 2 should already have a vesting schedule. + let sched0 = VestingInfo::new( + ED * 20, + ED, // 20 block duration. + 10, + ); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0]); + + // Account 2 only has 1 vesting schedule. + assert_noop!( + Vesting::merge_schedules(Some(2).into(), 0, 1), + Error::::ScheduleIndexOutOfBounds + ); + + // Account 4 has 0 vesting schedules. + assert_eq!(Vesting::vesting(&4), None); + assert_noop!(Vesting::merge_schedules(Some(4).into(), 0, 1), Error::::NotVesting); + + // There are enough schedules to merge but an index is non-existent. + Vesting::vested_transfer(Some(3).into(), 2, sched0).unwrap(); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![sched0, sched0]); + assert_noop!( + Vesting::merge_schedules(Some(2).into(), 0, 2), + Error::::ScheduleIndexOutOfBounds + ); + + // It is a storage noop with no errors if the indexes are the same. + assert_storage_noop!(Vesting::merge_schedules(Some(2).into(), 0, 0).unwrap()); + }); +} + +#[test] +fn generates_multiple_schedules_from_genesis_config() { + let vesting_config = vec![ + // 5 * existential deposit locked. + (1, 0, 10, 5 * ED), + // 1 * existential deposit locked. + (2, 10, 20, 19 * ED), + // 2 * existential deposit locked. + (2, 10, 20, 18 * ED), + // 1 * existential deposit locked. + (12, 10, 20, 9 * ED), + // 2 * existential deposit locked. + (12, 10, 20, 8 * ED), + // 3 * existential deposit locked. + (12, 10, 20, 7 * ED), + ]; + ExtBuilder::default() + .existential_deposit(ED) + .vesting_genesis_config(vesting_config) + .build() + .execute_with(|| { + let user1_sched1 = VestingInfo::new(5 * ED, 128, 0u64); + assert_eq!(Vesting::vesting(&1).unwrap(), vec![user1_sched1]); + + let user2_sched1 = VestingInfo::new(1 * ED, 12, 10u64); + let user2_sched2 = VestingInfo::new(2 * ED, 25, 10u64); + assert_eq!(Vesting::vesting(&2).unwrap(), vec![user2_sched1, user2_sched2]); + + let user12_sched1 = VestingInfo::new(1 * ED, 12, 10u64); + let user12_sched2 = VestingInfo::new(2 * ED, 25, 10u64); + let user12_sched3 = VestingInfo::new(3 * ED, 38, 10u64); + assert_eq!( + Vesting::vesting(&12).unwrap(), + vec![user12_sched1, user12_sched2, user12_sched3] + ); + }); +} + +#[test] +#[should_panic] +fn multiple_schedules_from_genesis_config_errors() { + // MaxVestingSchedules is 3, but this config has 4 for account 12 so we panic when building + // from genesis. + let vesting_config = + vec![(12, 10, 20, ED), (12, 10, 20, ED), (12, 10, 20, ED), (12, 10, 20, ED)]; + ExtBuilder::default() + .existential_deposit(ED) + .vesting_genesis_config(vesting_config) + .build(); +} + +#[test] +fn build_genesis_has_storage_version_v1() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert_eq!(StorageVersion::::get(), Releases::V1); + }); +} + +#[test] +fn merge_vesting_handles_per_block_0() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let sched0 = VestingInfo::new( + ED, 0, // Vesting over 256 blocks. + 1, + ); + assert_eq!(sched0.ending_block_as_balance::(), 257); + let sched1 = VestingInfo::new( + ED * 2, + 0, // Vesting over 512 blocks. + 10, + ); + assert_eq!(sched1.ending_block_as_balance::(), 512u64 + 10); + + let merged = VestingInfo::new(764, 1, 10); + assert_eq!(Vesting::merge_vesting_info(5, sched0, sched1), Some(merged)); + }); +} + +#[test] +fn vesting_info_validate_works() { + let min_transfer = ::MinVestedTransfer::get(); + // Does not check for min transfer. + assert_eq!(VestingInfo::new(min_transfer - 1, 1u64, 10u64).is_valid(), true); + + // `locked` cannot be 0. + assert_eq!(VestingInfo::new(0, 1u64, 10u64).is_valid(), false); + + // `per_block` cannot be 0. + assert_eq!(VestingInfo::new(min_transfer + 1, 0u64, 10u64).is_valid(), false); + + // With valid inputs it does not error. + assert_eq!(VestingInfo::new(min_transfer, 1u64, 10u64).is_valid(), true); +} + +#[test] +fn vesting_info_ending_block_as_balance_works() { + // Treats `per_block` 0 as 1. + let per_block_0 = VestingInfo::new(256u32, 0u32, 10u32); + assert_eq!(per_block_0.ending_block_as_balance::(), 256 + 10); + + // `per_block >= locked` always results in a schedule ending the block after it starts + let per_block_gt_locked = VestingInfo::new(256u32, 256 * 2u32, 10u32); + assert_eq!( + per_block_gt_locked.ending_block_as_balance::(), + 1 + per_block_gt_locked.starting_block() + ); + let per_block_eq_locked = VestingInfo::new(256u32, 256u32, 10u32); + assert_eq!( + per_block_gt_locked.ending_block_as_balance::(), + per_block_eq_locked.ending_block_as_balance::() + ); + + // Correctly calcs end if `locked % per_block != 0`. (We need a block to unlock the remainder). + let imperfect_per_block = VestingInfo::new(256u32, 250u32, 10u32); + assert_eq!( + imperfect_per_block.ending_block_as_balance::(), + imperfect_per_block.starting_block() + 2u32, + ); + assert_eq!( + imperfect_per_block + .locked_at::(imperfect_per_block.ending_block_as_balance::()), + 0 + ); +} + +#[test] +fn per_block_works() { + let per_block_0 = VestingInfo::new(256u32, 0u32, 10u32); + assert_eq!(per_block_0.per_block(), 1u32); + assert_eq!(per_block_0.raw_per_block(), 0u32); + + let per_block_1 = VestingInfo::new(256u32, 1u32, 10u32); + assert_eq!(per_block_1.per_block(), 1u32); + assert_eq!(per_block_1.raw_per_block(), 1u32); +} + +// When an accounts free balance + schedule.locked is less than ED, the vested transfer will fail. +#[test] +fn vested_transfer_less_than_existential_deposit_fails() { + ExtBuilder::default().existential_deposit(4 * ED).build().execute_with(|| { + // MinVestedTransfer is less the ED. + assert!( + ::Currency::minimum_balance() > + ::MinVestedTransfer::get() + ); + + let sched = + VestingInfo::new(::MinVestedTransfer::get() as u64, 1u64, 10u64); + // The new account balance with the schedule's locked amount would be less than ED. + assert!( + Balances::free_balance(&99) + sched.locked() < + ::Currency::minimum_balance() + ); + + // vested_transfer fails. + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 99, sched), + pallet_balances::Error::::ExistentialDeposit, + ); + // force_vested_transfer fails. + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 99, sched), + pallet_balances::Error::::ExistentialDeposit, + ); + }); +} diff --git a/frame/vesting/src/vesting_info.rs b/frame/vesting/src/vesting_info.rs new file mode 100644 index 0000000000000..81bffa199fd72 --- /dev/null +++ b/frame/vesting/src/vesting_info.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Module to enforce private fields on `VestingInfo`. + +use super::*; + +/// Struct to encode the vesting schedule of an individual account. +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] +pub struct VestingInfo { + /// Locked amount at genesis. + locked: Balance, + /// Amount that gets unlocked every block after `starting_block`. + per_block: Balance, + /// Starting block for unlocking(vesting). + starting_block: BlockNumber, +} + +impl VestingInfo +where + Balance: AtLeast32BitUnsigned + Copy, + BlockNumber: AtLeast32BitUnsigned + Copy + Bounded, +{ + /// Instantiate a new `VestingInfo`. + pub fn new( + locked: Balance, + per_block: Balance, + starting_block: BlockNumber, + ) -> VestingInfo { + VestingInfo { locked, per_block, starting_block } + } + + /// Validate parameters for `VestingInfo`. Note that this does not check + /// against `MinVestedTransfer`. + pub fn is_valid(&self) -> bool { + !self.locked.is_zero() && !self.raw_per_block().is_zero() + } + + /// Locked amount at schedule creation. + pub fn locked(&self) -> Balance { + self.locked + } + + /// Amount that gets unlocked every block after `starting_block`. Corrects for `per_block` of 0. + /// We don't let `per_block` be less than 1, or else the vesting will never end. + /// This should be used whenever accessing `per_block` unless explicitly checking for 0 values. + pub fn per_block(&self) -> Balance { + self.per_block.max(One::one()) + } + + /// Get the unmodified `per_block`. Generally should not be used, but is useful for + /// validating `per_block`. + pub(crate) fn raw_per_block(&self) -> Balance { + self.per_block + } + + /// Starting block for unlocking(vesting). + pub fn starting_block(&self) -> BlockNumber { + self.starting_block + } + + /// Amount locked at block `n`. + pub fn locked_at>( + &self, + n: BlockNumber, + ) -> Balance { + // Number of blocks that count toward vesting; + // saturating to 0 when n < starting_block. + let vested_block_count = n.saturating_sub(self.starting_block); + let vested_block_count = BlockNumberToBalance::convert(vested_block_count); + // Return amount that is still locked in vesting. + vested_block_count + .checked_mul(&self.per_block()) // `per_block` accessor guarantees at least 1. + .map(|to_unlock| self.locked.saturating_sub(to_unlock)) + .unwrap_or(Zero::zero()) + } + + /// Block number at which the schedule ends (as type `Balance`). + pub fn ending_block_as_balance>( + &self, + ) -> Balance { + let starting_block = BlockNumberToBalance::convert(self.starting_block); + let duration = if self.per_block() >= self.locked { + // If `per_block` is bigger than `locked`, the schedule will end + // the block after starting. + One::one() + } else { + self.locked / self.per_block() + + if (self.locked % self.per_block()).is_zero() { + Zero::zero() + } else { + // `per_block` does not perfectly divide `locked`, so we need an extra block to + // unlock some amount less than `per_block`. + One::one() + } + }; + + starting_block.saturating_add(duration) + } +} diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs new file mode 100644 index 0000000000000..3ccc1a5bda362 --- /dev/null +++ b/frame/vesting/src/weights.rs @@ -0,0 +1,253 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_vesting +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-10, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_vesting +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/vesting/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_vesting. +pub trait WeightInfo { + fn vest_locked(l: u32, s: u32, ) -> Weight; + fn vest_unlocked(l: u32, s: u32, ) -> Weight; + fn vest_other_locked(l: u32, s: u32, ) -> Weight; + fn vest_other_unlocked(l: u32, s: u32, ) -> Weight; + fn vested_transfer(l: u32, s: u32, ) -> Weight; + fn force_vested_transfer(l: u32, s: u32, ) -> Weight; + fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight; + fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight; +} + +/// Weights for pallet_vesting using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vest_locked(l: u32, s: u32, ) -> Weight { + (50_642_000 as Weight) + // Standard Error: 1_000 + .saturating_add((144_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((177_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vest_unlocked(l: u32, s: u32, ) -> Weight { + (50_830_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((112_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn vest_other_locked(l: u32, s: u32, ) -> Weight { + (52_151_000 as Weight) + // Standard Error: 1_000 + .saturating_add((130_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((162_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { + (51_009_000 as Weight) + // Standard Error: 4_000 + .saturating_add((123_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 9_000 + .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vested_transfer(l: u32, s: u32, ) -> Weight { + (89_517_000 as Weight) + // Standard Error: 5_000 + .saturating_add((114_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 10_000 + .saturating_add((23_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:2 w:2) + // Storage: Balances Locks (r:1 w:1) + fn force_vested_transfer(l: u32, s: u32, ) -> Weight { + (87_903_000 as Weight) + // Standard Error: 6_000 + .saturating_add((121_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 12_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + (54_463_000 as Weight) + // Standard Error: 2_000 + .saturating_add((123_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 5_000 + .saturating_add((149_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + (53_674_000 as Weight) + // Standard Error: 1_000 + .saturating_add((137_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 4_000 + .saturating_add((152_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vest_locked(l: u32, s: u32, ) -> Weight { + (50_642_000 as Weight) + // Standard Error: 1_000 + .saturating_add((144_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((177_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vest_unlocked(l: u32, s: u32, ) -> Weight { + (50_830_000 as Weight) + // Standard Error: 1_000 + .saturating_add((115_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((112_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn vest_other_locked(l: u32, s: u32, ) -> Weight { + (52_151_000 as Weight) + // Standard Error: 1_000 + .saturating_add((130_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 3_000 + .saturating_add((162_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { + (51_009_000 as Weight) + // Standard Error: 4_000 + .saturating_add((123_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 9_000 + .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vested_transfer(l: u32, s: u32, ) -> Weight { + (89_517_000 as Weight) + // Standard Error: 5_000 + .saturating_add((114_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 10_000 + .saturating_add((23_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: System Account (r:2 w:2) + // Storage: Balances Locks (r:1 w:1) + fn force_vested_transfer(l: u32, s: u32, ) -> Weight { + (87_903_000 as Weight) + // Standard Error: 6_000 + .saturating_add((121_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 12_000 + .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + (54_463_000 as Weight) + // Standard Error: 2_000 + .saturating_add((123_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 5_000 + .saturating_add((149_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Vesting Vesting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + (53_674_000 as Weight) + // Standard Error: 1_000 + .saturating_add((137_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 4_000 + .saturating_add((152_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } +} diff --git a/primitives/allocator/Cargo.toml b/primitives/allocator/Cargo.toml deleted file mode 100644 index 93991a4aeb2ab..0000000000000 --- a/primitives/allocator/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "sp-allocator" -version = "2.0.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Collection of allocator implementations." -documentation = "https://docs.rs/sp-allocator" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -sp-std = { version = "2.0.0", path = "../std", default-features = false } -sp-core = { version = "2.0.0", path = "../core", default-features = false } -sp-wasm-interface = { version = "2.0.0", path = "../wasm-interface", default-features = false } -log = { version = "0.4.8", optional = true } -derive_more = { version = "0.99.2", optional = true } - -[features] -default = [ "std" ] -std = [ - "sp-std/std", - "sp-core/std", - "sp-wasm-interface/std", - "log", - "derive_more", -] diff --git a/primitives/allocator/README.md b/primitives/allocator/README.md deleted file mode 100644 index 361feaae591f9..0000000000000 --- a/primitives/allocator/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Collection of allocator implementations. - -This crate provides the following allocator implementations: -- A freeing-bump allocator: [`FreeingBumpHeapAllocator`](freeing_bump::FreeingBumpHeapAllocator) - -License: Apache-2.0 \ No newline at end of file diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index a3c480e92135f..7e751232acb50 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,20 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-api-proc-macro = { version = "2.0.0", path = "proc-macro" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-version = { version = "2.0.0", default-features = false, path = "../version" } -sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-api-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } +sp-version = { version = "4.0.0-dev", default-features = false, path = "../version" } +sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../state-machine" } hash-db = { version = "0.15.2", optional = true } +thiserror = { version = "1.0.21", optional = true } + +log = { version = "0.4.14", default-features = false } [dev-dependencies] sp-test-primitives = { version = "2.0.0", path = "../test-primitives" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "sp-core/std", @@ -35,4 +38,14 @@ std = [ "sp-state-machine", "sp-version/std", "hash-db", + "thiserror", + "log/std", ] +# Special feature to disable logging completly. +# +# By default `sp-api` initializes the `RuntimeLogger` for each runtime api function. However, +# logging functionality increases the code size. It is recommended to enable this feature when +# building a runtime for registering it on chain. +# +# This sets the max logging level to `off` for `log`. +disable-logging = ["log/max_level_off"] diff --git a/primitives/api/README.md b/primitives/api/README.md index 551de2f82e365..1cf9437373c77 100644 --- a/primitives/api/README.md +++ b/primitives/api/README.md @@ -3,8 +3,8 @@ Substrate runtime api The Substrate runtime api is the crucial interface between the node and the runtime. Every call that goes into the runtime is done with a runtime api. The runtime apis are not fixed. Every Substrate user can define its own apis with -[`decl_runtime_apis`](macro.decl_runtime_apis.html) and implement them in -the runtime with [`impl_runtime_apis`](macro.impl_runtime_apis.html). +[`decl_runtime_apis`](https://docs.rs/sp-api/latest/sp_api/macro.decl_runtime_apis.html) and implement them in +the runtime with [`impl_runtime_apis`](https://docs.rs/sp-api/latest/sp_api/macro.impl_runtime_apis.html). Every Substrate runtime needs to implement the [`Core`] runtime api. This api provides the basic functionality that every runtime needs to export. diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 9b1661cf5ef60..d5909967ac5a4 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-proc-macro" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,16 +12,15 @@ documentation = "https://docs.rs/sp-api-proc-macro" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] - [lib] proc-macro = true [dependencies] quote = "1.0.3" -syn = { version = "1.0.8", features = ["full", "fold", "extra-traits", "visit"] } -proc-macro2 = "1.0.6" +syn = { version = "1.0.58", features = ["full", "fold", "extra-traits", "visit"] } +proc-macro2 = "1.0.29" blake2-rfc = { version = "0.2.18", default-features = false } -proc-macro-crate = "0.1.4" +proc-macro-crate = "1.0.0" # Required for the doc tests [features] diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 8294c8bfbd684..510a2eeaa530a 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,21 +16,25 @@ // limitations under the License. use crate::utils::{ - generate_crate_access, generate_hidden_includes, generate_runtime_mod_name_for_trait, - fold_fn_decl_for_client_side, extract_parameter_names_types_and_borrows, - generate_native_call_generator_fn_name, return_type_extract_type, - generate_method_runtime_api_impl_name, generate_call_api_at_fn_name, prefix_function_with_trait, - replace_wild_card_parameter_names, AllowSelfRefInParameters, + extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, + generate_call_api_at_fn_name, generate_crate_access, generate_hidden_includes, + generate_method_runtime_api_impl_name, generate_native_call_generator_fn_name, + generate_runtime_mod_name_for_trait, prefix_function_with_trait, + replace_wild_card_parameter_names, return_type_extract_type, AllowSelfRefInParameters, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, parse::{Parse, ParseStream, Result, Error}, ReturnType, - fold::{self, Fold}, parse_quote, ItemTrait, Generics, GenericParam, Attribute, FnArg, Type, - visit::{Visit, self}, TraitBound, Meta, NestedMeta, Lit, TraitItem, Ident, TraitItemMethod, + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + visit::{self, Visit}, + Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, ReturnType, + TraitBound, TraitItem, TraitItemMethod, Type, }; use std::collections::HashMap; @@ -58,22 +62,9 @@ const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; /// /// Is used when a trait method was renamed. const RENAMED_ATTRIBUTE: &str = "renamed"; -/// The `skip_initialize_block` attribute. -/// -/// Is used when a trait method does not require that the block is initialized -/// before being called. -const SKIP_INITIALIZE_BLOCK_ATTRIBUTE: &str = "skip_initialize_block"; -/// The `initialize_block` attribute. -/// -/// A trait method tagged with this attribute, initializes the runtime at -/// certain block. -const INITIALIZE_BLOCK_ATTRIBUTE: &str = "initialize_block"; /// All attributes that we support in the declaration of a runtime api trait. -const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = &[ - CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, - RENAMED_ATTRIBUTE, SKIP_INITIALIZE_BLOCK_ATTRIBUTE, - INITIALIZE_BLOCK_ATTRIBUTE, -]; +const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = + &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; /// The structure used for parsing the runtime api declarations. struct RuntimeApiDecls { @@ -106,14 +97,12 @@ fn extend_generics_with_block(generics: &mut Generics) { /// attribute body as `TokenStream`. fn remove_supported_attributes(attrs: &mut Vec) -> HashMap<&'static str, Attribute> { let mut result = HashMap::new(); - attrs.retain(|v| { - match SUPPORTED_ATTRIBUTE_NAMES.iter().find(|a| v.path.is_ident(a)) { - Some(attribute) => { - result.insert(*attribute, v.clone()); - false - }, - None => true, - } + attrs.retain(|v| match SUPPORTED_ATTRIBUTE_NAMES.iter().find(|a| v.path.is_ident(a)) { + Some(attribute) => { + result.insert(*attribute, v.clone()); + false + }, + None => true, }); result @@ -187,14 +176,15 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { result.push(quote!( #[cfg(any(feature = "std", test))] fn convert_between_block_types - ( - input: &I, error_desc: &'static str, - ) -> std::result::Result + #crate_::ApiError>( + input: &I, + map_error: F, + ) -> std::result::Result { ::decode_with_depth_limit( #crate_::MAX_EXTRINSIC_DEPTH, - &mut &#crate_::Encode::encode(input)[..], - ).map_err(|e| format!("{} {}", error_desc, e.what())) + &#crate_::Encode::encode(input)[..], + ).map_err(map_error) } )); @@ -202,19 +192,26 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { for fn_ in fns { let params = extract_parameter_names_types_and_borrows(&fn_, AllowSelfRefInParameters::No)?; let trait_fn_name = &fn_.ident; + let function_name_str = fn_.ident.to_string(); let fn_name = generate_native_call_generator_fn_name(&fn_.ident); let output = return_type_replace_block_with_node_block(fn_.output.clone()); let output_ty = return_type_extract_type(&output); - let output = quote!( std::result::Result<#output_ty, String> ); + let output = quote!( std::result::Result<#output_ty, #crate_::ApiError> ); // Every type that is using the `Block` generic parameter, we need to encode/decode, // to make it compatible between the runtime/node. let conversions = params.iter().filter(|v| type_is_using_block(&v.1)).map(|(n, t, _)| { - let name_str = format!( - "Could not convert parameter `{}` between node and runtime:", quote!(#n) - ); + let param_name = quote!(#n).to_string(); + quote!( - let #n: #t = convert_between_block_types(&#n, #name_str)?; + let #n: #t = convert_between_block_types( + &#n, + |e| #crate_::ApiError::FailedToConvertParameter { + function: #function_name_str, + parameter: #param_name, + error: e, + }, + )?; ) }); // Same as for the input types, we need to check if we also need to convert the output, @@ -223,20 +220,24 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { quote!( convert_between_block_types( &res, - "Could not convert return value from runtime to node!" + |e| #crate_::ApiError::FailedToConvertReturnValue { + function: #function_name_str, + error: e, + }, ) ) } else { - quote!( Ok(res) ) + quote!(Ok(res)) }; let input_names = params.iter().map(|v| &v.0); // If the type is using the block generic type, we will encode/decode it to make it // compatible. To ensure that we forward it by ref/value, we use the value given by the // the user. Otherwise if it is not using the block, we don't need to add anything. - let input_borrows = params - .iter() - .map(|v| if type_is_using_block(&v.1) { v.2.clone() } else { None }); + let input_borrows = + params + .iter() + .map(|v| if type_is_using_block(&v.1) { v.2.clone() } else { None }); // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect // all the function inputs. @@ -305,28 +306,23 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { ); match meta { - Meta::List(list) => { + Meta::List(list) => if list.nested.len() > 2 && list.nested.is_empty() { err } else { let mut itr = list.nested.iter(); let old_name = match itr.next() { - Some(NestedMeta::Lit(Lit::Str(i))) => { - i.value() - }, + Some(NestedMeta::Lit(Lit::Str(i))) => i.value(), _ => return err, }; let version = match itr.next() { - Some(NestedMeta::Lit(Lit::Int(i))) => { - i.base10_parse()? - }, + Some(NestedMeta::Lit(Lit::Int(i))) => i.base10_parse()?, _ => return err, }; Ok((old_name, version)) - } - }, + }, _ => err, } } @@ -354,32 +350,19 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { fn_.span(), format!( "`{}` and `{}` are not supported at once.", - RENAMED_ATTRIBUTE, - CHANGED_IN_ATTRIBUTE - ) - )); + RENAMED_ATTRIBUTE, CHANGED_IN_ATTRIBUTE + ), + )) } // We do not need to generate this function for a method that signature was changed. if attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - continue; + continue } - let skip_initialize_block = attrs.contains_key(SKIP_INITIALIZE_BLOCK_ATTRIBUTE); - let update_initialized_block = if attrs.contains_key(INITIALIZE_BLOCK_ATTRIBUTE) { - quote!( - || *initialized_block.borrow_mut() = Some(*at) - ) - } else { - quote!(|| ()) - }; - // Parse the renamed attributes. let mut renames = Vec::new(); - if let Some((_, a)) = attrs - .iter() - .find(|a| a.0 == &RENAMED_ATTRIBUTE) - { + if let Some((_, a)) = attrs.iter().find(|a| a.0 == &RENAMED_ATTRIBUTE) { let (old_name, version) = parse_renamed_attribute(a)?; renames.push((version, prefix_function_with_trait(&trait_name, &old_name))); } @@ -391,86 +374,66 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { versions.push(version); old_names.push(old_name); (versions, old_names) - } + }, ); // Generate the generator function result.push(quote!( #[cfg(any(feature = "std", test))] + #[allow(clippy::too_many_arguments)] pub fn #fn_name< R: #crate_::Encode + #crate_::Decode + PartialEq, - NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, + NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, Block: #crate_::BlockT, T: #crate_::CallApiAt, - C: #crate_::Core, >( call_runtime_at: &T, - core_api: &C, at: &#crate_::BlockId, args: Vec, changes: &std::cell::RefCell<#crate_::OverlayedChanges>, - offchain_changes: &std::cell::RefCell<#crate_::OffchainOverlayedChanges>, storage_transaction_cache: &std::cell::RefCell< #crate_::StorageTransactionCache >, - initialized_block: &std::cell::RefCell>>, native_call: Option, context: #crate_::ExecutionContext, recorder: &Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, T::Error> { + ) -> std::result::Result<#crate_::NativeOrEncoded, #crate_::ApiError> { let version = call_runtime_at.runtime_version_at(at)?; - use #crate_::InitializeBlock; - let initialize_block = if #skip_initialize_block { - InitializeBlock::Skip - } else { - InitializeBlock::Do(&initialized_block) - }; - let update_initialized_block = #update_initialized_block; #( // Check if we need to call the function by an old name. if version.apis.iter().any(|(s, v)| { s == &ID && *v < #versions }) { - let params = #crate_::CallApiAtParams::<_, _, fn() -> _, _> { - core_api, + let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { at, function: #old_names, native_call: None, arguments: args, overlayed_changes: changes, - offchain_changes, storage_transaction_cache, - initialize_block, context, recorder, }; let ret = call_runtime_at.call_api_at(params)?; - update_initialized_block(); return Ok(ret) } )* let params = #crate_::CallApiAtParams { - core_api, at, function: #trait_fn_name, native_call, arguments: args, overlayed_changes: changes, - offchain_changes, storage_transaction_cache, - initialize_block, context, recorder, }; - let ret = call_runtime_at.call_api_at(params)?; - - update_initialized_block(); - Ok(ret) + call_runtime_at.call_api_at(params) } )); } @@ -487,27 +450,32 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { extend_generics_with_block(&mut decl.generics); let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); let found_attributes = remove_supported_attributes(&mut decl.attrs); - let api_version = get_api_version(&found_attributes).map(|v| { - generate_runtime_api_version(v as u32) - })?; + let api_version = + get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; let id = generate_runtime_api_id(&decl.ident.to_string()); let call_api_at_calls = generate_call_api_at_calls(&decl)?; // Remove methods that have the `changed_in` attribute as they are not required for the // runtime anymore. - decl.items = decl.items.iter_mut().filter_map(|i| match i { - TraitItem::Method(ref mut method) => { - if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - None - } else { - // Make sure we replace all the wild card parameter names. - replace_wild_card_parameter_names(&mut method.sig); - Some(TraitItem::Method(method.clone())) - } - } - r => Some(r.clone()), - }).collect(); + decl.items = decl + .items + .iter_mut() + .filter_map(|i| match i { + TraitItem::Method(ref mut method) => { + if remove_supported_attributes(&mut method.attrs) + .contains_key(CHANGED_IN_ATTRIBUTE) + { + None + } else { + // Make sure we replace all the wild card parameter names. + replace_wild_card_parameter_names(&mut method.sig); + Some(TraitItem::Method(method.clone())) + } + }, + r => Some(r.clone()), + }) + .collect(); let native_call_generators = generate_native_call_generators(&decl)?; @@ -564,8 +532,10 @@ impl<'a> ToClientSideDecl<'a> { result } - fn fold_trait_item_method(&mut self, method: TraitItemMethod) - -> (TraitItemMethod, Option, TraitItemMethod) { + fn fold_trait_item_method( + &mut self, + method: TraitItemMethod, + ) -> (TraitItemMethod, Option, TraitItemMethod) { let crate_ = self.crate_; let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); let fn_impl = self.create_method_runtime_api_impl(method.clone()); @@ -578,8 +548,9 @@ impl<'a> ToClientSideDecl<'a> { fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { let crate_ = self.crate_; let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); - let mut fn_decl_ctx = self.create_method_decl(method, quote!( context )); - fn_decl_ctx.sig.ident = Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); + let mut fn_decl_ctx = self.create_method_decl(method, quote!(context)); + fn_decl_ctx.sig.ident = + Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); fn_decl_ctx.sig.inputs.insert(2, context_arg); fn_decl_ctx @@ -587,9 +558,12 @@ impl<'a> ToClientSideDecl<'a> { /// Takes the given method and creates a `method_runtime_api_impl` method that will be /// implemented in the runtime for the client side. - fn create_method_runtime_api_impl(&mut self, mut method: TraitItemMethod) -> Option { + fn create_method_runtime_api_impl( + &mut self, + mut method: TraitItemMethod, + ) -> Option { if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - return None; + return None } let fn_sig = &method.sig; @@ -597,36 +571,35 @@ impl<'a> ToClientSideDecl<'a> { // Get types and if the value is borrowed from all parameters. // If there is an error, we push it as the block to the user. - let param_types = match extract_parameter_names_types_and_borrows( - fn_sig, - AllowSelfRefInParameters::No, - ) { - Ok(res) => res.into_iter().map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }).collect::>(), - Err(e) => { - self.errors.push(e.to_compile_error()); - Vec::new() - } - }; + let param_types = + match extract_parameter_names_types_and_borrows(fn_sig, AllowSelfRefInParameters::No) { + Ok(res) => res + .into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + Err(e) => { + self.errors.push(e.to_compile_error()); + Vec::new() + }, + }; let name = generate_method_runtime_api_impl_name(&self.trait_, &method.sig.ident); let block_id = self.block_id; let crate_ = self.crate_; - Some( - parse_quote!{ - #[doc(hidden)] - fn #name( - &self, - at: &#block_id, - context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, Self::Error>; - } - ) + Some(parse_quote! { + #[doc(hidden)] + fn #name( + &self, + at: &#block_id, + context: #crate_::ExecutionContext, + params: Option<( #( #param_types ),* )>, + params_encoded: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError>; + }) } /// Takes the method declared by the user and creates the declaration we require for the runtime @@ -645,12 +618,12 @@ impl<'a> ToClientSideDecl<'a> { Err(e) => { self.errors.push(e.to_compile_error()); Vec::new() - } + }, }; let params2 = params.clone(); let ret_type = return_type_extract_type(&method.sig.output); - fold_fn_decl_for_client_side(&mut method.sig, &self.block_id); + fold_fn_decl_for_client_side(&mut method.sig, &self.block_id, &self.crate_); let name_impl = generate_method_runtime_api_impl_name(&self.trait_, &method.sig.ident); let crate_ = self.crate_; @@ -666,7 +639,8 @@ impl<'a> ToClientSideDecl<'a> { Error::new( method.span(), "`changed_in` version can not be greater than the `api_version`", - ).to_compile_error() + ) + .to_compile_error(), ); } @@ -677,50 +651,48 @@ impl<'a> ToClientSideDecl<'a> { method.sig.ident = ident; method.attrs.push(parse_quote!( #[deprecated] )); - let panic = format!("Calling `{}` should not return a native value!", method.sig.ident); - (quote!( panic!(#panic) ), quote!( None )) + let panic = + format!("Calling `{}` should not return a native value!", method.sig.ident); + (quote!(panic!(#panic)), quote!(None)) }, - Ok(None) => (quote!( Ok(n) ), quote!( Some(( #( #params2 ),* )) )), + Ok(None) => (quote!(Ok(n)), quote!( Some(( #( #params2 ),* )) )), Err(e) => { self.errors.push(e.to_compile_error()); - (quote!( unimplemented!() ), quote!( None )) - } + (quote!(unimplemented!()), quote!(None)) + }, }; let function_name = method.sig.ident.to_string(); // Generate the default implementation that calls the `method_runtime_api_impl` method. - method.default = Some( - parse_quote! { - { - let runtime_api_impl_params_encoded = - #crate_::Encode::encode(&( #( &#params ),* )); - - self.#name_impl( - __runtime_api_at_param__, - #context, - #param_tuple, - runtime_api_impl_params_encoded, - ).and_then(|r| - match r { - #crate_::NativeOrEncoded::Native(n) => { - #native_handling - }, - #crate_::NativeOrEncoded::Encoded(r) => { - <#ret_type as #crate_::Decode>::decode(&mut &r[..]) - .map_err(|err| - format!( - "Failed to decode result of `{}`: {}", - #function_name, - err.what(), - ).into() - ) - } + method.default = Some(parse_quote! { + { + let runtime_api_impl_params_encoded = + #crate_::Encode::encode(&( #( &#params ),* )); + + self.#name_impl( + __runtime_api_at_param__, + #context, + #param_tuple, + runtime_api_impl_params_encoded, + ).and_then(|r| + match r { + #crate_::NativeOrEncoded::Native(n) => { + #native_handling + }, + #crate_::NativeOrEncoded::Encoded(r) => { + <#ret_type as #crate_::Decode>::decode(&mut &r[..]) + .map_err(|err| + #crate_::ApiError::FailedToDecodeReturnValue { + function: #function_name, + error: err, + } + ) } - ) - } + } + ) } - ); + }); method } @@ -737,13 +709,7 @@ impl<'a> Fold for ToClientSideDecl<'a> { if is_core_trait { // Add all the supertraits we want to have for `Core`. - let crate_ = &self.crate_; - input.supertraits = parse_quote!( - 'static - + Send - + Sync - + #crate_::ApiErrorExt - ); + input.supertraits = parse_quote!('static + Send + Sync); } else { // Add the `Core` runtime api as super trait. let crate_ = &self.crate_; @@ -763,24 +729,22 @@ fn parse_runtime_api_version(version: &Attribute) -> Result { let meta = version.parse_meta()?; let err = Err(Error::new( - meta.span(), - &format!( - "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", - api_version = API_VERSION_ATTRIBUTE - ) - ) - ); + meta.span(), + &format!( + "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", + api_version = API_VERSION_ATTRIBUTE + ), + )); match meta { - Meta::List(list) => { + Meta::List(list) => if list.nested.len() != 1 { err } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { i.base10_parse() } else { err - } - }, + }, _ => err, } } @@ -812,12 +776,12 @@ fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { let bounds = &t.bounds; quote! { #ident #colon_token #bounds } - }).chain(std::iter::once(quote! { __Sr_Api_Error__ })); + }); let ty_generics = trait_.generics.type_params().map(|t| { let ident = &t.ident; quote! { #ident } - }).chain(std::iter::once(quote! { Error = __Sr_Api_Error__ })); + }); quote!( #[cfg(any(feature = "std", test))] @@ -832,14 +796,18 @@ fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { /// Get changed in version from the user given attribute or `Ok(None)`, if no attribute was given. fn get_changed_in(found_attributes: &HashMap<&'static str, Attribute>) -> Result> { - found_attributes.get(&CHANGED_IN_ATTRIBUTE) + found_attributes + .get(&CHANGED_IN_ATTRIBUTE) .map(|v| parse_runtime_api_version(v).map(Some)) .unwrap_or(Ok(None)) } /// Get the api version from the user given attribute or `Ok(1)`, if no attribute was given. fn get_api_version(found_attributes: &HashMap<&'static str, Attribute>) -> Result { - found_attributes.get(&API_VERSION_ATTRIBUTE).map(parse_runtime_api_version).unwrap_or(Ok(1)) + found_attributes + .get(&API_VERSION_ATTRIBUTE) + .map(parse_runtime_api_version) + .unwrap_or(Ok(1)) } /// Generate the declaration of the trait for the client side. @@ -897,7 +865,10 @@ impl CheckTraitDecl { /// Check that the given method declarations are correct. /// /// Any error is stored in `self.errors`. - fn check_method_declarations<'a>(&mut self, methods: impl Iterator) { + fn check_method_declarations<'a>( + &mut self, + methods: impl Iterator, + ) { let mut method_to_signature_changed = HashMap::>>::new(); methods.into_iter().for_each(|method| { @@ -905,13 +876,23 @@ impl CheckTraitDecl { let changed_in = match get_changed_in(&attributes) { Ok(r) => r, - Err(e) => { self.errors.push(e); return; }, + Err(e) => { + self.errors.push(e); + return + }, }; method_to_signature_changed .entry(method.sig.ident.clone()) .or_default() .push(changed_in); + + if method.default.is_some() { + self.errors.push(Error::new( + method.default.span(), + "A runtime API function cannot have a default implementation!", + )); + } }); method_to_signature_changed.into_iter().for_each(|(f, changed)| { @@ -939,16 +920,13 @@ impl<'ast> Visit<'ast> for CheckTraitDecl { fn visit_generic_param(&mut self, input: &'ast GenericParam) { match input { - GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => { - self.errors.push( - Error::new( - input.span(), - "`Block: BlockT` generic parameter will be added automatically by the \ - `decl_runtime_apis!` macro!" - ) - ) - }, - _ => {} + GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => + self.errors.push(Error::new( + input.span(), + "`Block: BlockT` generic parameter will be added automatically by the \ + `decl_runtime_apis!` macro!", + )), + _ => {}, } visit::visit_generic_param(self, input); @@ -957,14 +935,12 @@ impl<'ast> Visit<'ast> for CheckTraitDecl { fn visit_trait_bound(&mut self, input: &'ast TraitBound) { if let Some(last_ident) = input.path.segments.last().map(|v| &v.ident) { if last_ident == "BlockT" || last_ident == BLOCK_GENERIC_IDENT { - self.errors.push( - Error::new( - input.span(), - "`Block: BlockT` generic parameter will be added automatically by the \ + self.errors.push(Error::new( + input.span(), + "`Block: BlockT` generic parameter will be added automatically by the \ `decl_runtime_apis!` macro! If you try to use a different trait than the \ - substrate `Block` trait, please rename it locally." - ) - ) + substrate `Block` trait, please rename it locally.", + )) } } @@ -992,7 +968,9 @@ pub fn decl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::Tok // Parse all trait declarations let RuntimeApiDecls { decls: api_decls } = parse_macro_input!(input as RuntimeApiDecls); - decl_runtime_apis_impl_inner(&api_decls).unwrap_or_else(|e| e.to_compile_error()).into() + decl_runtime_apis_impl_inner(&api_decls) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn decl_runtime_apis_impl_inner(api_decls: &[ItemTrait]) -> Result { @@ -1002,13 +980,11 @@ fn decl_runtime_apis_impl_inner(api_decls: &[ItemTrait]) -> Result let runtime_decls = generate_runtime_decls(api_decls)?; let client_side_decls = generate_client_side_decls(api_decls)?; - Ok( - quote!( - #hidden_includes + Ok(quote!( + #hidden_includes - #runtime_decls + #runtime_decls - #client_side_decls - ) - ) + #client_side_decls + )) } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 85f5a1797b1e3..bc0f027e1efaa 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,12 +16,12 @@ // limitations under the License. use crate::utils::{ - generate_crate_access, generate_hidden_includes, - generate_runtime_mod_name_for_trait, generate_method_runtime_api_impl_name, - extract_parameter_names_types_and_borrows, generate_native_call_generator_fn_name, - return_type_extract_type, generate_call_api_at_fn_name, prefix_function_with_trait, extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, - AllowSelfRefInParameters, RequireQualifiedTraitPath, + extract_parameter_names_types_and_borrows, generate_call_api_at_fn_name, generate_crate_access, + generate_hidden_includes, generate_method_runtime_api_impl_name, + generate_native_call_generator_fn_name, generate_runtime_mod_name_for_trait, + prefix_function_with_trait, return_type_extract_type, AllowSelfRefInParameters, + RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -29,9 +29,12 @@ use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, Path, Signature, Attribute, - ImplItem, parse::{Parse, ParseStream, Result, Error}, PathArguments, GenericArgument, TypePath, - fold::{self, Fold}, parse_quote, + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + Attribute, GenericArgument, Ident, ImplItem, ItemImpl, Path, PathArguments, Signature, Type, + TypePath, }; use std::collections::HashSet; @@ -66,9 +69,10 @@ fn generate_impl_call( signature: &Signature, runtime: &Type, input: &Ident, - impl_trait: &Path + impl_trait: &Path, ) -> Result { - let params = extract_parameter_names_types_and_borrows(signature, AllowSelfRefInParameters::No)?; + let params = + extract_parameter_names_types_and_borrows(signature, AllowSelfRefInParameters::No)?; let c = generate_crate_access(HIDDEN_INCLUDES_ID); let fn_name = &signature.ident; @@ -78,27 +82,25 @@ fn generate_impl_call( let ptypes = params.iter().map(|v| &v.1); let pborrow = params.iter().map(|v| &v.2); - Ok( - quote!( - let (#( #pnames ),*) : ( #( #ptypes ),* ) = - match #c::DecodeLimit::decode_all_with_depth_limit( - #c::MAX_EXTRINSIC_DEPTH, - &#input, - ) { - Ok(res) => res, - Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e.what()), - }; - - #[allow(deprecated)] - <#runtime as #impl_trait>::#fn_name(#( #pborrow #pnames2 ),*) - ) - ) + Ok(quote!( + let (#( #pnames ),*) : ( #( #ptypes ),* ) = + match #c::DecodeLimit::decode_all_with_depth_limit( + #c::MAX_EXTRINSIC_DEPTH, + &#input, + ) { + Ok(res) => res, + Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e), + }; + + #[allow(deprecated)] + <#runtime as #impl_trait>::#fn_name(#( #pborrow #pnames2 ),*) + )) } /// Generate all the implementation calls for the given functions. fn generate_impl_calls( impls: &[ItemImpl], - input: &Ident + input: &Ident, ) -> Result)>> { let mut impl_calls = Vec::new(); @@ -113,18 +115,14 @@ fn generate_impl_calls( for item in &impl_.items { if let ImplItem::Method(method) = item { - let impl_call = generate_impl_call( - &method.sig, - &impl_.self_ty, - input, - &impl_trait - )?; + let impl_call = + generate_impl_call(&method.sig, &impl_.self_ty, input, &impl_trait)?; impl_calls.push(( impl_trait_ident.clone(), - method.sig.ident.clone(), - impl_call, - filter_cfg_attrs(&impl_.attrs), + method.sig.ident.clone(), + impl_call, + filter_cfg_attrs(&impl_.attrs), )); } } @@ -137,15 +135,16 @@ fn generate_impl_calls( fn generate_dispatch_function(impls: &[ItemImpl]) -> Result { let data = Ident::new("__sp_api__input_data", Span::call_site()); let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let impl_calls = generate_impl_calls(impls, &data)? - .into_iter() - .map(|(trait_, fn_name, impl_, attrs)| { - let name = prefix_function_with_trait(&trait_, &fn_name); - quote!( - #( #attrs )* - #name => Some(#c::Encode::encode(&{ #impl_ })), - ) - }); + let impl_calls = + generate_impl_calls(impls, &data)? + .into_iter() + .map(|(trait_, fn_name, impl_, attrs)| { + let name = prefix_function_with_trait(&trait_, &fn_name); + quote!( + #( #attrs )* + #name => Some(#c::Encode::encode(&{ #impl_ })), + ) + }); Ok(quote!( #[cfg(feature = "std")] @@ -162,32 +161,34 @@ fn generate_dispatch_function(impls: &[ItemImpl]) -> Result { fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { let input = Ident::new("input", Span::call_site()); let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let impl_calls = generate_impl_calls(impls, &input)? - .into_iter() - .map(|(trait_, fn_name, impl_, attrs)| { - let fn_name = Ident::new( - &prefix_function_with_trait(&trait_, &fn_name), - Span::call_site() - ); - - quote!( - #( #attrs )* - #[cfg(not(feature = "std"))] - #[no_mangle] - pub unsafe fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { - let mut #input = if input_len == 0 { - &[0u8; 0] - } else { - unsafe { - #c::slice::from_raw_parts(input_data, input_len) - } - }; - let output = { #impl_ }; - #c::to_substrate_wasm_fn_return_value(&output) - } - ) - }); + let impl_calls = + generate_impl_calls(impls, &input)? + .into_iter() + .map(|(trait_, fn_name, impl_, attrs)| { + let fn_name = + Ident::new(&prefix_function_with_trait(&trait_, &fn_name), Span::call_site()); + + quote!( + #( #attrs )* + #[cfg(not(feature = "std"))] + #[no_mangle] + pub unsafe fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { + let mut #input = if input_len == 0 { + &[0u8; 0] + } else { + unsafe { + #c::slice::from_raw_parts(input_data, input_len) + } + }; + + #c::init_runtime_logger(); + + let output = (move || { #impl_ })(); + #c::to_substrate_wasm_fn_return_value(&output) + } + ) + }); Ok(quote!( #( #impl_calls )* )) } @@ -199,16 +200,10 @@ fn generate_runtime_api_base_structures() -> Result { pub struct RuntimeApi {} /// Implements all runtime apis for the client side. #[cfg(any(feature = "std", test))] - pub struct RuntimeApiImpl + 'static> - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - { + pub struct RuntimeApiImpl + 'static> { call: &'static C, commit_on_success: std::cell::RefCell, - initialized_block: std::cell::RefCell>>, changes: std::cell::RefCell<#crate_::OverlayedChanges>, - offchain_changes: std::cell::RefCell<#crate_::OffchainOverlayedChanges>, storage_transaction_cache: std::cell::RefCell< #crate_::StorageTransactionCache >, @@ -221,35 +216,16 @@ fn generate_runtime_api_base_structures() -> Result { #[cfg(any(feature = "std", test))] unsafe impl> Send for RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, {} #[cfg(any(feature = "std", test))] unsafe impl> Sync for RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, {} - #[cfg(any(feature = "std", test))] - impl> #crate_::ApiErrorExt - for RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - { - type Error = C::Error; - } - #[cfg(any(feature = "std", test))] impl> #crate_::ApiExt for RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, { type StateBackend = C::StateBackend; @@ -270,32 +246,43 @@ fn generate_runtime_api_base_structures() -> Result { fn has_api( &self, at: &#crate_::BlockId, - ) -> std::result::Result where Self: Sized { - self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, |v| v == A::VERSION)) + ) -> std::result::Result where Self: Sized { + self.call + .runtime_version_at(at) + .map(|v| v.has_api_with(&A::ID, |v| v == A::VERSION)) } fn has_api_with bool>( &self, at: &#crate_::BlockId, pred: P, - ) -> std::result::Result where Self: Sized { - self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, pred)) + ) -> std::result::Result where Self: Sized { + self.call + .runtime_version_at(at) + .map(|v| v.has_api_with(&A::ID, pred)) + } + + fn api_version( + &self, + at: &#crate_::BlockId, + ) -> std::result::Result, #crate_::ApiError> where Self: Sized { + self.call + .runtime_version_at(at) + .map(|v| v.api_version(&A::ID)) } fn record_proof(&mut self) { self.recorder = Some(Default::default()); } + fn proof_recorder(&self) -> Option<#crate_::ProofRecorder> { + self.recorder.clone() + } + fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { self.recorder .take() - .map(|recorder| { - let trie_nodes = recorder.read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - #crate_::StorageProof::new(trie_nodes) - }) + .map(|recorder| recorder.to_storage_proof()) } fn into_storage_changes( @@ -307,10 +294,9 @@ fn generate_runtime_api_base_structures() -> Result { >>, parent_hash: Block::Hash, ) -> std::result::Result< - #crate_::StorageChanges, + #crate_::StorageChanges, String > where Self: Sized { - self.initialized_block.borrow_mut().take(); self.changes.replace(Default::default()).into_storage_changes( backend, changes_trie_state, @@ -325,8 +311,6 @@ fn generate_runtime_api_base_structures() -> Result { for RuntimeApi where C: #crate_::CallApiAt + 'static, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, { type RuntimeApi = RuntimeApiImpl; @@ -336,9 +320,7 @@ fn generate_runtime_api_base_structures() -> Result { RuntimeApiImpl { call: unsafe { std::mem::transmute(call) }, commit_on_success: true.into(), - initialized_block: None.into(), changes: Default::default(), - offchain_changes: Default::default(), recorder: Default::default(), storage_transaction_cache: Default::default(), }.into() @@ -346,20 +328,13 @@ fn generate_runtime_api_base_structures() -> Result { } #[cfg(any(feature = "std", test))] - impl> RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - { + impl> RuntimeApiImpl { fn call_api_at< R: #crate_::Encode + #crate_::Decode + PartialEq, F: FnOnce( &C, - &Self, &std::cell::RefCell<#crate_::OverlayedChanges>, - &std::cell::RefCell<#crate_::OffchainOverlayedChanges>, &std::cell::RefCell<#crate_::StorageTransactionCache>, - &std::cell::RefCell>>, &Option<#crate_::ProofRecorder>, ) -> std::result::Result<#crate_::NativeOrEncoded, E>, E, @@ -372,11 +347,8 @@ fn generate_runtime_api_base_structures() -> Result { } let res = call_api_at( &self.call, - self, &self.changes, - &self.offchain_changes, &self.storage_transaction_cache, - &self.initialized_block, &self.recorder, ); @@ -440,7 +412,6 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { Ok(quote!( #( #impls_prepared )* )) } - /// Auxiliary data structure that is used to convert `impl Api for Runtime` to /// `impl Api for RuntimeApi`. /// This requires us to replace the runtime `Block` with the node `Block`, @@ -456,11 +427,8 @@ struct ApiRuntimeImplToApiRuntimeApiImpl<'a> { impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { fn fold_type_path(&mut self, input: TypePath) -> TypePath { - let new_ty_path = if input == *self.runtime_block { - parse_quote!( __SR_API_BLOCK__ ) - } else { - input - }; + let new_ty_path = + if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; fold::fold_type_path(self, new_ty_path) } @@ -477,12 +445,18 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { // Generate the access to the native parameters let param_tuple_access = if input.sig.inputs.len() == 1 { - vec![ quote!( p ) ] + vec![quote!(p)] } else { - input.sig.inputs.iter().enumerate().map(|(i, _)| { - let i = syn::Index::from(i); - quote!( p.#i ) - }).collect::>() + input + .sig + .inputs + .iter() + .enumerate() + .map(|(i, _)| { + let i = syn::Index::from(i); + quote!( p.#i ) + }) + .collect::>() }; let (param_types, error) = match extract_parameter_names_types_and_borrows( @@ -490,12 +464,14 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { AllowSelfRefInParameters::No, ) { Ok(res) => ( - res.into_iter().map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }).collect::>(), - None + res.into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + None, ), Err(e) => (Vec::new(), Some(e.to_compile_error())), }; @@ -509,15 +485,13 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { params_encoded: Vec, }; - input.sig.ident = generate_method_runtime_api_impl_name( - &self.impl_trait, - &input.sig.ident, - ); + input.sig.ident = + generate_method_runtime_api_impl_name(&self.impl_trait, &input.sig.ident); let ret_type = return_type_extract_type(&input.sig.output); // Generate the correct return type. input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, RuntimeApiImplCall::Error> + -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> ); // Generate the new method implementation that calls into the runtime. @@ -529,22 +503,16 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { self.call_api_at( | call_runtime_at, - core_api, changes, - offchain_changes, storage_transaction_cache, - initialized_block, recorder | { #runtime_mod_path #call_api_at_call( call_runtime_at, - core_api, at, params_encoded, changes, - offchain_changes, storage_transaction_cache, - initialized_block, params.map(|p| { #runtime_mod_path #native_call_generator_ident :: <#runtime, __SR_API_BLOCK__ #(, #trait_generic_arguments )*> ( @@ -560,7 +528,7 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { ) }; - let mut input = fold::fold_impl_item_method(self, input); + let mut input = fold::fold_impl_item_method(self, input); // We need to set the block, after we modified the rest of the ast, otherwise we would // modify our generated block as well. input.block = block; @@ -576,43 +544,34 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); // Implement the trait for the `RuntimeApiImpl` - input.self_ty = Box::new( - parse_quote!( RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall> ) - ); + input.self_ty = + Box::new(parse_quote!( RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall> )); + input.generics.params.push(parse_quote!( + __SR_API_BLOCK__: #crate_::BlockT + std::panic::UnwindSafe + + std::panic::RefUnwindSafe + )); input.generics.params.push( - parse_quote!( - __SR_API_BLOCK__: #crate_::BlockT + std::panic::UnwindSafe + - std::panic::RefUnwindSafe - ) - ); - input.generics.params.push( - parse_quote!( RuntimeApiImplCall: #crate_::CallApiAt<__SR_API_BLOCK__> + 'static ) + parse_quote!( RuntimeApiImplCall: #crate_::CallApiAt<__SR_API_BLOCK__> + 'static ), ); let where_clause = input.generics.make_where_clause(); - where_clause.predicates.push( - parse_quote! { - RuntimeApiImplCall::StateBackend: - #crate_::StateBackend<#crate_::HashFor<__SR_API_BLOCK__>> - } - ); + where_clause.predicates.push(parse_quote! { + RuntimeApiImplCall::StateBackend: + #crate_::StateBackend<#crate_::HashFor<__SR_API_BLOCK__>> + }); // Require that all types used in the function signatures are unwind safe. extract_all_signature_types(&input.items).iter().for_each(|i| { - where_clause.predicates.push( - parse_quote! { - #i: std::panic::UnwindSafe + std::panic::RefUnwindSafe - } - ); + where_clause.predicates.push(parse_quote! { + #i: std::panic::UnwindSafe + std::panic::RefUnwindSafe + }); }); - where_clause.predicates.push( - parse_quote! { - __SR_API_BLOCK__::Header: std::panic::UnwindSafe + std::panic::RefUnwindSafe - } - ); + where_clause.predicates.push(parse_quote! { + __SR_API_BLOCK__::Header: std::panic::UnwindSafe + std::panic::RefUnwindSafe + }); input.attrs = filter_cfg_attrs(&input.attrs); @@ -663,8 +622,11 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result /// runtime apis. fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { let mut result = Vec::with_capacity(impls.len()); + let mut sections = Vec::with_capacity(impls.len()); let mut processed_traits = HashSet::new(); + let c = generate_crate_access(HIDDEN_INCLUDES_ID); + for impl_ in impls { let mut path = extend_with_runtime_decl_path( extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(), @@ -679,14 +641,12 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { let span = trait_.span(); if !processed_traits.insert(trait_) { - return Err( - Error::new( - span, - "Two traits with the same name detected! \ + return Err(Error::new( + span, + "Two traits with the same name detected! \ The trait name is used to generate its ID. \ - Please rename one trait at the declaration!" - ) - ) + Please rename one trait at the declaration!", + )) } let id: Path = parse_quote!( #path ID ); @@ -697,12 +657,22 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { #( #attrs )* (#id, #version) )); - } - let c = generate_crate_access(HIDDEN_INCLUDES_ID); + sections.push(quote!( + #( #attrs )* + const _: () = { + // All sections with the same name are going to be merged by concatenation. + #[cfg(not(feature = "std"))] + #[link_section = "runtime_apis"] + static SECTION_CONTENTS: [u8; 12] = #c::serialize_runtime_api_info(#id, #version); + }; + )); + } Ok(quote!( const RUNTIME_API_VERSIONS: #c::ApisVec = #c::create_apis_vec!([ #( #result ),* ]); + + #( #sections )* )) } @@ -711,7 +681,9 @@ pub fn impl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::Tok // Parse all impl blocks let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); - impl_runtime_apis_impl_inner(&api_impls).unwrap_or_else(|e| e.to_compile_error()).into() + impl_runtime_apis_impl_inner(&api_impls) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { @@ -723,27 +695,25 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { let wasm_interface = generate_wasm_interface(api_impls)?; let api_impls_for_runtime_api = generate_api_impl_for_runtime_api(api_impls)?; - Ok( - quote!( - #hidden_includes + Ok(quote!( + #hidden_includes - #base_runtime_api + #base_runtime_api - #api_impls_for_runtime + #api_impls_for_runtime - #api_impls_for_runtime_api + #api_impls_for_runtime_api - #runtime_api_versions + #runtime_api_versions - pub mod api { - use super::*; + pub mod api { + use super::*; - #dispatch_impl + #dispatch_impl - #wasm_interface - } - ) - ) + #wasm_interface + } + )) } // Filters all attributes except the cfg ones. diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index 4dd48094683d9..b8731d70ca3cf 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,9 +21,9 @@ use proc_macro::TokenStream; +mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; -mod decl_runtime_apis; mod utils; #[proc_macro] diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 0e8f18e3e6f14..77f8a07f85c48 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,24 +16,33 @@ // limitations under the License. use crate::utils::{ - generate_crate_access, generate_hidden_includes, - generate_method_runtime_api_impl_name, extract_parameter_names_types_and_borrows, - return_type_extract_type, extract_block_type_from_trait_path, extract_impl_trait, - AllowSelfRefInParameters, RequireQualifiedTraitPath, + extract_block_type_from_trait_path, extract_impl_trait, + extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, + generate_method_runtime_api_impl_name, return_type_extract_type, AllowSelfRefInParameters, + RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; -use quote::quote; +use quote::{quote, quote_spanned}; use syn::{ - spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, ImplItem, TypePath, parse_quote, - parse::{Parse, ParseStream, Result, Error}, fold::{self, Fold}, + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + Attribute, Ident, ItemImpl, Pat, Type, TypePath, }; /// Unique identifier used to make the hidden includes unique for this macro. const HIDDEN_INCLUDES_ID: &str = "MOCK_IMPL_RUNTIME_APIS"; +/// The `advanced` attribute. +/// +/// If this attribute is given to a function, the function gets access to the `BlockId` as first +/// parameter and needs to return a `Result` with the appropiate error type. +const ADVANCED_ATTRIBUTE: &str = "advanced"; + /// The structure used for parsing the runtime api implementations. struct RuntimeApiImpls { impls: Vec, @@ -55,21 +64,11 @@ impl Parse for RuntimeApiImpls { } } -/// Implement the `ApiExt` trait, `ApiErrorExt` trait and the `Core` runtime api. -fn implement_common_api_traits( - error_type: Option, - block_type: TypePath, - self_ty: Type, -) -> Result { +/// Implement the `ApiExt` trait and the `Core` runtime api. +fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result { let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let error_type = error_type.map(|e| quote!(#e)).unwrap_or_else(|| quote!(String)); - Ok(quote!( - impl #crate_::ApiErrorExt for #self_ty { - type Error = #error_type; - } - impl #crate_::ApiExt<#block_type> for #self_ty { type StateBackend = #crate_::InMemoryBackend<#crate_::HashFor<#block_type>>; @@ -83,18 +82,25 @@ fn implement_common_api_traits( fn has_api( &self, _: &#crate_::BlockId<#block_type>, - ) -> std::result::Result where Self: Sized { + ) -> std::result::Result where Self: Sized { Ok(true) } fn has_api_with bool>( &self, - at: &#crate_::BlockId<#block_type>, + _: &#crate_::BlockId<#block_type>, pred: P, - ) -> std::result::Result where Self: Sized { + ) -> std::result::Result where Self: Sized { Ok(pred(A::VERSION)) } + fn api_version( + &self, + _: &#crate_::BlockId<#block_type>, + ) -> std::result::Result, #crate_::ApiError> where Self: Sized { + Ok(Some(A::VERSION)) + } + fn record_proof(&mut self) { unimplemented!("`record_proof` not implemented for runtime api mocks") } @@ -103,6 +109,10 @@ fn implement_common_api_traits( unimplemented!("`extract_proof` not implemented for runtime api mocks") } + fn proof_recorder(&self) -> Option<#crate_::ProofRecorder<#block_type>> { + unimplemented!("`proof_recorder` not implemented for runtime api mocks") + } + fn into_storage_changes( &self, _: &Self::StateBackend, @@ -126,7 +136,7 @@ fn implement_common_api_traits( _: #crate_::ExecutionContext, _: Option<()>, _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #error_type> { + ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #crate_::ApiError> { unimplemented!("Not required for testing!") } @@ -136,7 +146,7 @@ fn implement_common_api_traits( _: #crate_::ExecutionContext, _: Option<#block_type>, _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { unimplemented!("Not required for testing!") } @@ -146,13 +156,70 @@ fn implement_common_api_traits( _: #crate_::ExecutionContext, _: Option<&<#block_type as #crate_::BlockT>::Header>, _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { unimplemented!("Not required for testing!") } } )) } +/// Returns if the advanced attribute is present in the given `attributes`. +/// +/// If the attribute was found, it will be automatically removed from the vec. +fn has_advanced_attribute(attributes: &mut Vec) -> bool { + let mut found = false; + attributes.retain(|attr| { + if attr.path.is_ident(ADVANCED_ATTRIBUTE) { + found = true; + false + } else { + true + } + }); + + found +} + +/// Get the name and type of the `at` parameter that is passed to a runtime api function. +/// +/// If `is_advanced` is `false`, the name is `_`. +fn get_at_param_name( + is_advanced: bool, + param_names: &mut Vec, + param_types_and_borrows: &mut Vec<(TokenStream, bool)>, + function_span: Span, + default_block_id_type: &TokenStream, +) -> Result<(TokenStream, TokenStream)> { + if is_advanced { + if param_names.is_empty() { + return Err(Error::new( + function_span, + format!( + "If using the `{}` attribute, it is required that the function \ + takes at least one argument, the `BlockId`.", + ADVANCED_ATTRIBUTE, + ), + )) + } + + // `param_names` and `param_types` have the same length, so if `param_names` is not empty + // `param_types` can not be empty as well. + let ptype_and_borrows = param_types_and_borrows.remove(0); + let span = ptype_and_borrows.1.span(); + if !ptype_and_borrows.1 { + return Err(Error::new( + span, + "`BlockId` needs to be taken by reference and not by value!", + )) + } + + let name = param_names.remove(0); + Ok((quote!( #name ), ptype_and_borrows.0)) + } else { + Ok((quote!(_), default_block_id_type.clone())) + } +} + /// Auxialiry structure to fold a runtime api trait implementation into the expected format. /// /// This renames the methods, changes the method parameters and extracts the error type. @@ -161,68 +228,100 @@ struct FoldRuntimeApiImpl<'a> { block_type: &'a TypePath, /// The identifier of the trait being implemented. impl_trait: &'a Ident, - /// Stores the error type that is being found in the trait implementation as associated type - /// with the name `Error`. - error_type: &'a mut Option, } impl<'a> Fold for FoldRuntimeApiImpl<'a> { fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { let block = { let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + let is_advanced = has_advanced_attribute(&mut input.attrs); + let mut errors = Vec::new(); + + let (mut param_names, mut param_types_and_borrows) = + match extract_parameter_names_types_and_borrows( + &input.sig, + AllowSelfRefInParameters::YesButIgnore, + ) { + Ok(res) => ( + res.iter().map(|v| v.0.clone()).collect::>(), + res.iter() + .map(|v| { + let ty = &v.1; + let borrow = &v.2; + (quote_spanned!(ty.span() => #borrow #ty ), v.2.is_some()) + }) + .collect::>(), + ), + Err(e) => { + errors.push(e.to_compile_error()); + + (Default::default(), Default::default()) + }, + }; - let (param_names, param_types, error) = match extract_parameter_names_types_and_borrows( - &input.sig, - AllowSelfRefInParameters::YesButIgnore, + let block_type = &self.block_type; + let block_id_type = quote!( &#crate_::BlockId<#block_type> ); + + let (at_param_name, block_id_type) = match get_at_param_name( + is_advanced, + &mut param_names, + &mut param_types_and_borrows, + input.span(), + &block_id_type, ) { - Ok(res) => ( - res.iter().map(|v| v.0.clone()).collect::>(), - res.iter().map(|v| { - let ty = &v.1; - let borrow = &v.2; - quote!( #borrow #ty ) - }).collect::>(), - None - ), - Err(e) => (Vec::new(), Vec::new(), Some(e.to_compile_error())), + Ok(res) => res, + Err(e) => { + errors.push(e.to_compile_error()); + (quote!(_), block_id_type) + }, }; - let block_type = &self.block_type; - + let param_types = param_types_and_borrows.iter().map(|v| &v.0); // Rewrite the input parameters. input.sig.inputs = parse_quote! { &self, - _: &#crate_::BlockId<#block_type>, + #at_param_name: #block_id_type, _: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, + ___params___sp___api___: Option<( #( #param_types ),* )>, _: Vec, }; - input.sig.ident = generate_method_runtime_api_impl_name( - &self.impl_trait, - &input.sig.ident, - ); - let ret_type = return_type_extract_type(&input.sig.output); + input.sig.ident = + generate_method_runtime_api_impl_name(&self.impl_trait, &input.sig.ident); + + // When using advanced, the user needs to declare the correct return type on its own, + // otherwise do it for the user. + if !is_advanced { + let ret_type = return_type_extract_type(&input.sig.output); - // Generate the correct return type. - input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, Self::Error> - ); + // Generate the correct return type. + input.sig.output = parse_quote!( + -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> + ); + } let orig_block = input.block.clone(); + let construct_return_value = if is_advanced { + quote!( (move || #orig_block)() ) + } else { + quote! { + let __fn_implementation__ = move || #orig_block; + + Ok(#crate_::NativeOrEncoded::Native(__fn_implementation__())) + } + }; + // Generate the new method implementation that calls into the runtime. parse_quote!( { // Get the error to the user (if we have one). - #error + #( #errors )* - let (#( #param_names ),*) = params + let (#( #param_names ),*) = ___params___sp___api___ .expect("Mocked runtime apis don't support calling deprecated api versions"); - let __fn_implementation__ = move || #orig_block; - - Ok(#crate_::NativeOrEncoded::Native(__fn_implementation__())) + #construct_return_value } ) }; @@ -233,44 +332,12 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { input.block = block; input } - - fn fold_impl_item(&mut self, input: ImplItem) -> ImplItem { - match input { - ImplItem::Type(ty) => { - if ty.ident == "Error" { - if let Some(error_type) = self.error_type { - if *error_type != ty.ty { - let error = Error::new( - ty.span(), - "Error type can not change between runtime apis", - ); - ImplItem::Verbatim(error.to_compile_error()) - } else { - ImplItem::Verbatim(Default::default()) - } - } else { - *self.error_type = Some(ty.ty); - ImplItem::Verbatim(Default::default()) - } - } else { - let error = Error::new( - ty.span(), - "Only associated type with name `Error` is allowed", - ); - ImplItem::Verbatim(error.to_compile_error()) - } - }, - o => fold::fold_impl_item(self, o), - } - } } /// Result of [`generate_runtime_api_impls`]. struct GeneratedRuntimeApiImpls { /// All the runtime api implementations. impls: TokenStream, - /// The error type that should be used by the runtime apis. - error_type: Option, /// The block type that is being used by the runtime apis. block_type: TypePath, /// The type the traits are implemented for. @@ -283,7 +350,6 @@ struct GeneratedRuntimeApiImpls { /// extracts the error type, self type and the block type. fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result { let mut result = Vec::with_capacity(impls.len()); - let mut error_type = None; let mut global_block_type: Option = None; let mut self_ty: Option> = None; @@ -297,28 +363,24 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result { + Some(self_ty) => if self_ty == impl_.self_ty { Some(self_ty) } else { - let mut error =Error::new( + let mut error = Error::new( impl_.self_ty.span(), "Self type should not change between runtime apis", ); - error.combine(Error::new( - self_ty.span(), - "First self type found here", - )); + error.combine(Error::new(self_ty.span(), "First self type found here")); return Err(error) - } - }, + }, None => Some(impl_.self_ty.clone()), }; global_block_type = match global_block_type.take() { - Some(global_block_type) => { + Some(global_block_type) => if global_block_type == *block_type { Some(global_block_type) } else { @@ -333,23 +395,17 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Some(block_type.clone()), }; - let mut visitor = FoldRuntimeApiImpl { - block_type, - impl_trait: &impl_trait.ident, - error_type: &mut error_type, - }; + let mut visitor = FoldRuntimeApiImpl { block_type, impl_trait: &impl_trait.ident }; result.push(visitor.fold_item_impl(impl_.clone())); } Ok(GeneratedRuntimeApiImpls { impls: quote!( #( #result )* ), - error_type, block_type: global_block_type.expect("There is a least one runtime api; qed"), self_ty: *self_ty.expect("There is at least one runtime api; qed"), }) @@ -360,14 +416,16 @@ pub fn mock_impl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro // Parse all impl blocks let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); - mock_impl_runtime_apis_impl_inner(&api_impls).unwrap_or_else(|e| e.to_compile_error()).into() + mock_impl_runtime_apis_impl_inner(&api_impls) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn mock_impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); - let GeneratedRuntimeApiImpls { impls, error_type, block_type, self_ty } = + let GeneratedRuntimeApiImpls { impls, block_type, self_ty } = generate_runtime_api_impls(api_impls)?; - let api_traits = implement_common_api_traits(error_type, block_type, self_ty)?; + let api_traits = implement_common_api_traits(block_type, self_ty)?; Ok(quote!( #hidden_includes diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 534ddcfddd96e..a3f21638751e9 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,18 +15,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::{ - Result, Ident, Signature, parse_quote, Type, Pat, spanned::Spanned, FnArg, Error, token::And, - ImplItem, ReturnType, PathArguments, Path, GenericArgument, TypePath, ItemImpl, + parse_quote, spanned::Spanned, token::And, Error, FnArg, GenericArgument, Ident, ImplItem, + ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; use quote::quote; use std::env; -use proc_macro_crate::crate_name; +use proc_macro_crate::{crate_name, FoundCrate}; fn generate_hidden_includes_mod_name(unique_id: &'static str) -> Ident { Ident::new(&format!("sp_api_hidden_includes_{}", unique_id), Span::call_site()) @@ -34,37 +34,34 @@ fn generate_hidden_includes_mod_name(unique_id: &'static str) -> Ident { /// Generates the hidden includes that are required to make the macro independent from its scope. pub fn generate_hidden_includes(unique_id: &'static str) -> TokenStream { - if env::var("CARGO_PKG_NAME").unwrap() == "sp-api" { - TokenStream::new() - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - match crate_name("sp-api") { - Ok(client_name) => { - let client_name = Ident::new(&client_name, Span::call_site()); - quote!( - #[doc(hidden)] - mod #mod_name { - pub extern crate #client_name as sp_api; - } - ) - }, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - quote!( #err ) - } - } - - }.into() + let mod_name = generate_hidden_includes_mod_name(unique_id); + match crate_name("sp-api") { + Ok(FoundCrate::Itself) => quote!(), + Ok(FoundCrate::Name(client_name)) => { + let client_name = Ident::new(&client_name, Span::call_site()); + quote!( + #[doc(hidden)] + mod #mod_name { + pub extern crate #client_name as sp_api; + } + ) + }, + Err(e) => { + let err = Error::new(Span::call_site(), e).to_compile_error(); + quote!( #err ) + }, + } } /// Generates the access to the `sc_client` crate. pub fn generate_crate_access(unique_id: &'static str) -> TokenStream { if env::var("CARGO_PKG_NAME").unwrap() == "sp-api" { - quote!( sp_api ) + quote!(sp_api) } else { let mod_name = generate_hidden_includes_mod_name(unique_id); quote!( self::#mod_name::sp_api ) - }.into() + } + .into() } /// Generates the name of the module that contains the trait declaration for the runtime. @@ -80,7 +77,7 @@ pub fn generate_method_runtime_api_impl_name(trait_: &Ident, method: &Ident) -> /// Get the type of a `syn::ReturnType`. pub fn return_type_extract_type(rt: &ReturnType) -> Type { match rt { - ReturnType::Default => parse_quote!( () ), + ReturnType::Default => parse_quote!(()), ReturnType::Type(_, ref ty) => *ty.clone(), } } @@ -88,10 +85,13 @@ pub fn return_type_extract_type(rt: &ReturnType) -> Type { /// Replace the `_` (wild card) parameter names in the given signature with unique identifiers. pub fn replace_wild_card_parameter_names(input: &mut Signature) { let mut generated_pattern_counter = 0; - input.inputs.iter_mut().for_each(|arg| if let FnArg::Typed(arg) = arg { - arg.pat = Box::new( - generate_unique_pattern((*arg.pat).clone(), &mut generated_pattern_counter), - ); + input.inputs.iter_mut().for_each(|arg| { + if let FnArg::Typed(arg) = arg { + arg.pat = Box::new(generate_unique_pattern( + (*arg.pat).clone(), + &mut generated_pattern_counter, + )); + } }); } @@ -99,17 +99,18 @@ pub fn replace_wild_card_parameter_names(input: &mut Signature) { pub fn fold_fn_decl_for_client_side( input: &mut Signature, block_id: &TokenStream, + crate_: &TokenStream, ) { replace_wild_card_parameter_names(input); // Add `&self, at:& BlockId` as parameters to each function at the beginning. input.inputs.insert(0, parse_quote!( __runtime_api_at_param__: &#block_id )); - input.inputs.insert(0, parse_quote!( &self )); + input.inputs.insert(0, parse_quote!(&self)); // Wrap the output in a `Result` input.output = { let ty = return_type_extract_type(&input.output); - parse_quote!( -> std::result::Result<#ty, Self::Error> ) + parse_quote!( -> std::result::Result<#ty, #crate_::ApiError> ) }; } @@ -117,10 +118,8 @@ pub fn fold_fn_decl_for_client_side( pub fn generate_unique_pattern(pat: Pat, counter: &mut u32) -> Pat { match pat { Pat::Wild(_) => { - let generated_name = Ident::new( - &format!("__runtime_api_generated_name_{}__", counter), - pat.span(), - ); + let generated_name = + Ident::new(&format!("__runtime_api_generated_name_{}__", counter), pat.span()); *counter += 1; parse_quote!( #generated_name ) @@ -148,26 +147,20 @@ pub fn extract_parameter_names_types_and_borrows( match input { FnArg::Typed(arg) => { let (ty, borrow) = match &*arg.ty { - Type::Reference(t) => { - ((*t.elem).clone(), Some(t.and_token)) - }, - t => { (t.clone(), None) }, + Type::Reference(t) => ((*t.elem).clone(), Some(t.and_token)), + t => (t.clone(), None), }; - let name = generate_unique_pattern( - (*arg.pat).clone(), - &mut generated_pattern_counter, - ); + let name = + generate_unique_pattern((*arg.pat).clone(), &mut generated_pattern_counter); result.push((name, ty, borrow)); }, - FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => { - return Err(Error::new(input.span(), "`self` parameter not supported!")) - }, - FnArg::Receiver(recv) => { + FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => + return Err(Error::new(input.span(), "`self` parameter not supported!")), + FnArg::Receiver(recv) => if recv.mutability.is_some() || recv.reference.is_none() { return Err(Error::new(recv.span(), "Only `&self` is supported!")) - } - }, + }, } } @@ -193,26 +186,30 @@ pub fn prefix_function_with_trait(trait_: &Ident, function: &F) -> /// /// If a type is a reference, the inner type is extracted (without the reference). pub fn extract_all_signature_types(items: &[ImplItem]) -> Vec { - items.iter() + items + .iter() .filter_map(|i| match i { ImplItem::Method(method) => Some(&method.sig), _ => None, }) - .map(|sig| { + .flat_map(|sig| { let ret_ty = match &sig.output { ReturnType::Default => None, ReturnType::Type(_, ty) => Some((**ty).clone()), }; - sig.inputs.iter().filter_map(|i| match i { - FnArg::Typed(arg) => Some(&arg.ty), - _ => None, - }).map(|ty| match &**ty { - Type::Reference(t) => (*t.elem).clone(), - _ => (**ty).clone(), - }).chain(ret_ty) + sig.inputs + .iter() + .filter_map(|i| match i { + FnArg::Typed(arg) => Some(&arg.ty), + _ => None, + }) + .map(|ty| match &**ty { + Type::Reference(t) => (*t.elem).clone(), + _ => (**ty).clone(), + }) + .chain(ret_ty) }) - .flatten() .collect() } @@ -227,19 +224,20 @@ pub fn extract_block_type_from_trait_path(trait_: &Path) -> Result<&TypePath> { .ok_or_else(|| Error::new(span, "Empty path not supported"))?; match &generics.arguments { - PathArguments::AngleBracketed(ref args) => { - args.args.first().and_then(|v| match v { + PathArguments::AngleBracketed(ref args) => args + .args + .first() + .and_then(|v| match v { GenericArgument::Type(Type::Path(ref block)) => Some(block), - _ => None - }).ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")) - }, + _ => None, + }) + .ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")), PathArguments::None => { let span = trait_.segments.last().as_ref().unwrap().span(); Err(Error::new(span, "Missing `Block` generic parameter.")) }, - PathArguments::Parenthesized(_) => { - Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")) - }, + PathArguments::Parenthesized(_) => + Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")), } } @@ -256,19 +254,20 @@ pub fn extract_impl_trait<'a>( impl_: &'a ItemImpl, require: RequireQualifiedTraitPath, ) -> Result<&'a Path> { - impl_.trait_.as_ref().map(|v| &v.1).ok_or_else( - || Error::new(impl_.span(), "Only implementation of traits are supported!") - ).and_then(|p| { - if p.segments.len() > 1 || matches!(require, RequireQualifiedTraitPath::No) { - Ok(p) - } else { - Err( - Error::new( + impl_ + .trait_ + .as_ref() + .map(|v| &v.1) + .ok_or_else(|| Error::new(impl_.span(), "Only implementation of traits are supported!")) + .and_then(|p| { + if p.segments.len() > 1 || matches!(require, RequireQualifiedTraitPath::No) { + Ok(p) + } else { + Err(Error::new( p.span(), "The implemented trait has to be referenced with a path, \ - e.g. `impl client::Core for Runtime`." - ) - ) - } - }) + e.g. `impl client::Core for Runtime`.", + )) + } + }) } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index bad6c03058322..82954d193e605 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,19 +17,53 @@ //! Substrate runtime api //! -//! The Substrate runtime api is the crucial interface between the node and the runtime. -//! Every call that goes into the runtime is done with a runtime api. The runtime apis are not fixed. -//! Every Substrate user can define its own apis with -//! [`decl_runtime_apis`](macro.decl_runtime_apis.html) and implement them in -//! the runtime with [`impl_runtime_apis`](macro.impl_runtime_apis.html). +//! The Substrate runtime api is the interface between the node and the runtime. There isn't a fixed +//! set of runtime apis, instead it is up to the user to declare and implement these runtime apis. +//! The declaration of a runtime api is normally done outside of a runtime, while the implementation +//! of it has to be done in the runtime. We provide the [`decl_runtime_apis!`] macro for declaring +//! a runtime api and the [`impl_runtime_apis!`] for implementing them. The macro docs provide more +//! information on how to use them and what kind of attributes we support. //! -//! Every Substrate runtime needs to implement the [`Core`] runtime api. This api provides the basic -//! functionality that every runtime needs to export. +//! It is required that each runtime implements at least the [`Core`] runtime api. This runtime api +//! provides all the core functions that Substrate expects from a runtime. //! -//! Besides the macros and the [`Core`] runtime api, this crates provides the [`Metadata`] runtime -//! api, the [`ApiExt`] trait, the [`CallApiAt`] trait and the [`ConstructRuntimeApi`] trait. +//! # Versioning //! -//! On a meta level this implies, the client calls the generated API from the client perspective. +//! Runtime apis support versioning. Each runtime api itself has a version attached. It is also +//! supported to change function signatures or names in a non-breaking way. For more information on +//! versioning check the [`decl_runtime_apis!`] macro. +//! +//! All runtime apis and their versions are returned as part of the [`RuntimeVersion`]. This can be +//! used to check which runtime api version is currently provided by the on-chain runtime. +//! +//! # Testing +//! +//! For testing we provide the [`mock_impl_runtime_apis!`] macro that lets you implement a runtime +//! api for a mocked object to use it in tests. +//! +//! # Logging +//! +//! Substrate supports logging from the runtime in native and in wasm. For that purpose it provides +//! the [`RuntimeLogger`](sp_runtime::runtime_logger::RuntimeLogger). This runtime logger is +//! automatically enabled for each call into the runtime through the runtime api. As logging +//! introduces extra code that isn't actually required for the logic of your runtime and also +//! increases the final wasm blob size, it is recommended to disable the logging for on-chain +//! wasm blobs. This can be done by enabling the `disable-logging` feature of this crate. Be aware +//! that this feature instructs `log` and `tracing` to disable logging at compile time by setting +//! the `max_level_off` feature for these crates. So, you should not enable this feature for a +//! native build as otherwise the node will not output any log messages. +//! +//! # How does it work? +//! +//! Each runtime api is declared as a trait with functions. When compiled to WASM, each implemented +//! runtime api function is exported as a function with the following naming scheme +//! `${TRAIT_NAME}_${FUNCTION_NAME}`. Such a function has the following signature +//! `(ptr: *u8, length: u32) -> u64`. It takes a pointer to an `u8` array and its length as an +//! argument. This `u8` array is expected to be the SCALE encoded parameters of the function as +//! defined in the trait. The return value is an `u64` that represents `length << 32 | pointer` of +//! an `u8` array. This return value `u8` array contains the SCALE encoded return value as defined +//! by the trait function. The macros take care to encode the parameters and to decode the return +//! value. #![cfg_attr(not(feature = "std"), no_std)] @@ -37,42 +71,42 @@ extern crate self as sp_api; #[doc(hidden)] -#[cfg(feature = "std")] -pub use sp_state_machine::{ - OverlayedChanges, StorageProof, Backend as StateBackend, ChangesTrieState, InMemoryBackend, -}; -#[doc(hidden)] -#[cfg(feature = "std")] -pub use sp_core::NativeOrEncoded; +pub use codec::{self, Decode, DecodeLimit, Encode}; #[doc(hidden)] #[cfg(feature = "std")] pub use hash_db::Hasher; -#[cfg(feature = "std")] -pub use sp_core::offchain::storage::OffchainOverlayedChanges; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; #[doc(hidden)] +#[cfg(feature = "std")] +pub use sp_core::NativeOrEncoded; +use sp_core::OpaqueMetadata; +#[doc(hidden)] +pub use sp_core::{offchain, ExecutionContext}; +#[doc(hidden)] pub use sp_runtime::{ + generic::BlockId, traits::{ - Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, HashFor, NumberFor, - Header as HeaderT, Hash as HashT, + Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, Hash as HashT, HashFor, + Header as HeaderT, NumberFor, }, - generic::BlockId, transaction_validity::TransactionValidity, RuntimeString, TransactionOutcome, + transaction_validity::TransactionValidity, + RuntimeString, TransactionOutcome, }; #[doc(hidden)] -pub use sp_core::{offchain, ExecutionContext}; -#[doc(hidden)] -pub use sp_version::{ApiId, RuntimeVersion, ApisVec, create_apis_vec}; -#[doc(hidden)] -pub use sp_std::{slice, mem}; +#[cfg(feature = "std")] +pub use sp_state_machine::{ + Backend as StateBackend, ChangesTrieState, InMemoryBackend, OverlayedChanges, StorageProof, +}; #[cfg(feature = "std")] use sp_std::result; #[doc(hidden)] -pub use codec::{Encode, Decode, DecodeLimit}; -use sp_core::OpaqueMetadata; +pub use sp_std::{mem, slice}; +#[doc(hidden)] +pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; #[cfg(feature = "std")] -use std::{panic::UnwindSafe, cell::RefCell}; +use std::{cell::RefCell, panic::UnwindSafe}; /// Maximum nesting level for extrinsics. pub const MAX_EXTRINSIC_DEPTH: u32 = 256; @@ -83,11 +117,12 @@ pub const MAX_EXTRINSIC_DEPTH: u32 = 256; /// on the runtime side. The declaration for the runtime side is hidden in its own module. /// The client side declaration gets two extra parameters per function, /// `&self` and `at: &BlockId`. The runtime side declaration will match the given trait -/// declaration. Besides one exception, the macro adds an extra generic parameter `Block: BlockT` -/// to the client side and the runtime side. This generic parameter is usable by the user. +/// declaration. Besides one exception, the macro adds an extra generic parameter `Block: +/// BlockT` to the client side and the runtime side. This generic parameter is usable by the +/// user. /// /// For implementing these macros you should use the -/// [`impl_runtime_apis!`](macro.impl_runtime_apis.html) macro. +/// [`impl_runtime_apis!`] macro. /// /// # Example /// @@ -116,14 +151,14 @@ pub const MAX_EXTRINSIC_DEPTH: u32 = 256; /// # Runtime api trait versioning /// /// To support versioning of the traits, the macro supports the attribute `#[api_version(1)]`. -/// The attribute supports any `u32` as version. By default, each trait is at version `1`, if no -/// version is provided. We also support changing the signature of a method. This signature -/// change is highlighted with the `#[changed_in(2)]` attribute above a method. A method that is -/// tagged with this attribute is callable by the name `METHOD_before_version_VERSION`. This -/// method will only support calling into wasm, trying to call into native will fail (change the -/// spec version!). Such a method also does not need to be implemented in the runtime. It is -/// required that there exist the "default" of the method without the `#[changed_in(_)]` attribute, -/// this method will be used to call the current default implementation. +/// The attribute supports any `u32` as version. By default, each trait is at version `1`, if +/// no version is provided. We also support changing the signature of a method. This signature +/// change is highlighted with the `#[changed_in(2)]` attribute above a method. A method that +/// is tagged with this attribute is callable by the name `METHOD_before_version_VERSION`. This +/// method will only support calling into wasm, trying to call into native will fail (change +/// the spec version!). Such a method also does not need to be implemented in the runtime. It +/// is required that there exist the "default" of the method without the `#[changed_in(_)]` +/// attribute, this method will be used to call the current default implementation. /// /// ```rust /// sp_api::decl_runtime_apis! { @@ -148,22 +183,23 @@ pub const MAX_EXTRINSIC_DEPTH: u32 = 256; /// ``` /// /// To check if a given runtime implements a runtime api trait, the `RuntimeVersion` has the -/// function `has_api()`. Also the `ApiExt` provides a function `has_api(at: &BlockId)` to -/// check if the runtime at the given block id implements the requested runtime api trait. +/// function `has_api()`. Also the `ApiExt` provides a function `has_api(at: &BlockId)` +/// to check if the runtime at the given block id implements the requested runtime api trait. pub use sp_api_proc_macro::decl_runtime_apis; /// Tags given trait implementations as runtime apis. /// /// All traits given to this macro, need to be declared with the /// [`decl_runtime_apis!`](macro.decl_runtime_apis.html) macro. The implementation of the trait -/// should follow the declaration given to the [`decl_runtime_apis!`](macro.decl_runtime_apis.html) -/// macro, besides the `Block` type that is required as first generic parameter for each runtime -/// api trait. When implementing a runtime api trait, it is required that the trait is referenced -/// by a path, e.g. `impl my_trait::MyTrait for Runtime`. The macro will use this path to access -/// the declaration of the trait for the runtime side. +/// should follow the declaration given to the +/// [`decl_runtime_apis!`](macro.decl_runtime_apis.html) macro, besides the `Block` type that +/// is required as first generic parameter for each runtime api trait. When implementing a +/// runtime api trait, it is required that the trait is referenced by a path, e.g. `impl +/// my_trait::MyTrait for Runtime`. The macro will use this path to access the declaration of +/// the trait for the runtime side. /// -/// The macro also generates the api implementations for the client side and provides it through -/// the `RuntimeApi` type. The `RuntimeApi` is hidden behind a `feature` called `std`. +/// The macro also generates the api implementations for the client side and provides it +/// through the `RuntimeApi` type. The `RuntimeApi` is hidden behind a `feature` called `std`. /// /// To expose version information about all implemented api traits, the constant /// `RUNTIME_API_VERSIONS` is generated. This constant should be used to instantiate the `apis` @@ -241,20 +277,18 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// Mocks given trait implementations as runtime apis. /// -/// Accepts similar syntax as [`impl_runtime_apis!`](macro.impl_runtime_apis.html) and generates -/// simplified mock implementations of the given runtime apis. The difference in syntax is that the -/// trait does not need to be referenced by a qualified path, methods accept the `&self` parameter -/// and the error type can be specified as associated type. If no error type is specified `String` -/// is used as error type. +/// Accepts similar syntax as [`impl_runtime_apis!`] and generates +/// simplified mock implementations of the given runtime apis. The difference in syntax is that +/// the trait does not need to be referenced by a qualified path, methods accept the `&self` +/// parameter and the error type can be specified as associated type. If no error type is +/// specified [`String`] is used as error type. /// -/// Besides implementing the given traits, the [`Core`], [`ApiExt`] and [`ApiErrorExt`] are -/// implemented automatically. +/// Besides implementing the given traits, the [`Core`](sp_api::Core) and +/// [`ApiExt`](sp_api::ApiExt) are implemented automatically. /// /// # Example /// /// ```rust -/// use sp_version::create_runtime_str; -/// # /// # use sp_runtime::traits::Block as BlockT; /// # use sp_test_primitives::Block; /// # @@ -270,7 +304,6 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// # fn build_block() -> Block; /// # } /// # } -/// /// struct MockApi { /// balance: u64, /// } @@ -288,11 +321,6 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// } /// /// impl BlockBuilder for MockApi { -/// /// Sets the error type that is being used by the mock implementation. -/// /// The error type is used by all runtime apis. It is only required to -/// /// be specified in one trait implementation. -/// type Error = String; -/// /// fn build_block() -> Block { /// unimplemented!("Not Required in tests") /// } @@ -301,26 +329,80 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// /// # fn main() {} /// ``` +/// +/// # `advanced` attribute +/// +/// This attribute can be placed above individual function in the mock implementation to +/// request more control over the function declaration. From the client side each runtime api +/// function is called with the `at` parameter that is a [`BlockId`](sp_api::BlockId). When +/// using the `advanced` attribute, the macro expects that the first parameter of the function +/// is this `at` parameter. Besides that the macro also doesn't do the automatic return value +/// rewrite, which means that full return value must be specified. The full return value is +/// constructed like [`Result`]`<`[`NativeOrEncoded`](sp_api::NativeOrEncoded)`, +/// Error>` while `ReturnValue` being the return value that is specified in the trait +/// declaration. +/// +/// ## Example +/// ```rust +/// # use sp_runtime::{traits::Block as BlockT, generic::BlockId}; +/// # use sp_test_primitives::Block; +/// # use sp_core::NativeOrEncoded; +/// # use codec; +/// # +/// # sp_api::decl_runtime_apis! { +/// # /// Declare the api trait. +/// # pub trait Balance { +/// # /// Get the balance. +/// # fn get_balance() -> u64; +/// # /// Set the balance. +/// # fn set_balance(val: u64); +/// # } +/// # } +/// struct MockApi { +/// balance: u64, +/// } +/// +/// sp_api::mock_impl_runtime_apis! { +/// impl Balance for MockApi { +/// #[advanced] +/// fn get_balance(&self, at: &BlockId) -> Result, sp_api::ApiError> { +/// println!("Being called at: {}", at); +/// +/// Ok(self.balance.into()) +/// } +/// #[advanced] +/// fn set_balance(at: &BlockId, val: u64) -> Result, sp_api::ApiError> { +/// if let BlockId::Number(1) = at { +/// println!("Being called to set balance to: {}", val); +/// } +/// +/// Ok(().into()) +/// } +/// } +/// } +/// +/// # fn main() {} +/// ``` pub use sp_api_proc_macro::mock_impl_runtime_apis; /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] -pub type ProofRecorder = sp_state_machine::ProofRecorder>; +pub type ProofRecorder = sp_state_machine::ProofRecorder<::Hash>; /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] -pub type StorageTransactionCache = - sp_state_machine::StorageTransactionCache< - >>::Transaction, HashFor, NumberFor - >; +pub type StorageTransactionCache = sp_state_machine::StorageTransactionCache< + >>::Transaction, + HashFor, + NumberFor, +>; #[cfg(feature = "std")] -pub type StorageChanges = - sp_state_machine::StorageChanges< - >>::Transaction, - HashFor, - NumberFor - >; +pub type StorageChanges = sp_state_machine::StorageChanges< + >>::Transaction, + HashFor, + NumberFor, +>; /// Extract the state backend type for a type that implements `ProvideRuntimeApi`. #[cfg(feature = "std")] @@ -342,17 +424,42 @@ pub trait ConstructRuntimeApi> { fn construct_runtime_api<'a>(call: &'a C) -> ApiRef<'a, Self::RuntimeApi>; } -/// Extends the runtime api traits with an associated error type. This trait is given as super -/// trait to every runtime api trait. +/// Init the [`RuntimeLogger`](sp_runtime::runtime_logger::RuntimeLogger). +pub fn init_runtime_logger() { + #[cfg(not(feature = "disable-logging"))] + sp_runtime::runtime_logger::RuntimeLogger::init(); +} + +/// An error describing which API call failed. #[cfg(feature = "std")] -pub trait ApiErrorExt { - /// Error type used by the runtime apis. - type Error: std::fmt::Debug + From; +#[derive(Debug, thiserror::Error)] +pub enum ApiError { + #[error("Failed to decode return value of {function}")] + FailedToDecodeReturnValue { + function: &'static str, + #[source] + error: codec::Error, + }, + #[error("Failed to convert return value from runtime to node of {function}")] + FailedToConvertReturnValue { + function: &'static str, + #[source] + error: codec::Error, + }, + #[error("Failed to convert parameter `{parameter}` from node to runtime of {function}")] + FailedToConvertParameter { + function: &'static str, + parameter: &'static str, + #[source] + error: codec::Error, + }, + #[error(transparent)] + Application(#[from] Box), } /// Extends the runtime api implementation with some common functionality. #[cfg(feature = "std")] -pub trait ApiExt: ApiErrorExt { +pub trait ApiExt { /// The state backend that is used to store the block states. type StateBackend: StateBackend>; @@ -361,23 +468,31 @@ pub trait ApiExt: ApiErrorExt { /// Depending on the outcome of the closure, the transaction is committed or rolled-back. /// /// The internal result of the closure is returned afterwards. - fn execute_in_transaction TransactionOutcome, R>( - &self, - call: F, - ) -> R where Self: Sized; + fn execute_in_transaction TransactionOutcome, R>(&self, call: F) -> R + where + Self: Sized; /// Checks if the given api is implemented and versions match. - fn has_api( - &self, - at: &BlockId, - ) -> Result where Self: Sized; + fn has_api(&self, at: &BlockId) -> Result + where + Self: Sized; /// Check if the given api is implemented and the version passes a predicate. fn has_api_with bool>( &self, at: &BlockId, pred: P, - ) -> Result where Self: Sized; + ) -> Result + where + Self: Sized; + + /// Returns the version of the given api. + fn api_version( + &self, + at: &BlockId, + ) -> Result, ApiError> + where + Self: Sized; /// Start recording all accessed trie nodes for generating proofs. fn record_proof(&mut self); @@ -389,6 +504,9 @@ pub trait ApiExt: ApiErrorExt { /// If `record_proof` was not called before, this will return `None`. fn extract_proof(&mut self) -> Option; + /// Returns the current active proof recorder. + fn proof_recorder(&self) -> Option>; + /// Convert the api object into the storage changes that were done while executing runtime /// api functions. /// @@ -398,34 +516,14 @@ pub trait ApiExt: ApiErrorExt { backend: &Self::StateBackend, changes_trie_state: Option<&ChangesTrieState, NumberFor>>, parent_hash: Block::Hash, - ) -> Result, String> where Self: Sized; -} - -/// Before calling any runtime api function, the runtime need to be initialized -/// at the requested block. However, some functions like `execute_block` or -/// `initialize_block` itself don't require to have the runtime initialized -/// at the requested block. -/// -/// `call_api_at` is instructed by this enum to do the initialization or to skip -/// it. -#[cfg(feature = "std")] -#[derive(Clone, Copy)] -pub enum InitializeBlock<'a, Block: BlockT> { - /// Skip initializing the runtime for a given block. - /// - /// This is used by functions who do the initialization by themselves or don't require it. - Skip, - /// Initialize the runtime for a given block. - /// - /// If the stored `BlockId` is `Some(_)`, the runtime is currently initialized at this block. - Do(&'a RefCell>>), + ) -> Result, String> + where + Self: Sized; } /// Parameters for [`CallApiAt::call_api_at`]. #[cfg(feature = "std")] -pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend>> { - /// A reference to something that implements the [`Core`] api. - pub core_api: &'a C, +pub struct CallApiAtParams<'a, Block: BlockT, NC, Backend: StateBackend>> { /// The block id that determines the state that should be setup when calling the function. pub at: &'a BlockId, /// The name of the function that should be called. @@ -439,13 +537,8 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend, /// The overlayed changes that are on top of the state. pub overlayed_changes: &'a RefCell, - /// The overlayed changes to be applied to the offchain worker database. - pub offchain_changes: &'a RefCell, /// The cache for storage transactions. pub storage_transaction_cache: &'a RefCell>, - /// Determines if the function requires that `initialize_block` should be called before calling - /// the actual function. - pub initialize_block: InitializeBlock<'a, Block>, /// The context this function is executed in. pub context: ExecutionContext, /// The optional proof recorder for recording storage accesses. @@ -455,9 +548,6 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend { - /// Error type used by the implementation. - type Error: std::fmt::Debug + From; - /// The state backend that is used to store the block states. type StateBackend: StateBackend>; @@ -466,15 +556,14 @@ pub trait CallApiAt { fn call_api_at< 'a, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - C: Core, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, - params: CallApiAtParams<'a, Block, C, NC, Self::StateBackend>, - ) -> Result, Self::Error>; + params: CallApiAtParams<'a, Block, NC, Self::StateBackend>, + ) -> Result, ApiError>; /// Returns the runtime version at the given block. - fn runtime_version_at(&self, at: &BlockId) -> Result; + fn runtime_version_at(&self, at: &BlockId) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. @@ -527,9 +616,48 @@ pub trait RuntimeApiInfo { const VERSION: u32; } -/// Extracts the `Api::Error` for a type that provides a runtime api. -#[cfg(feature = "std")] -pub type ApiErrorFor = <>::Api as ApiErrorExt>::Error; +/// The number of bytes required to encode a [`RuntimeApiInfo`]. +/// +/// 8 bytes for `ID` and 4 bytes for a version. +pub const RUNTIME_API_INFO_SIZE: usize = 12; + +/// Crude and simple way to serialize the `RuntimeApiInfo` into a bunch of bytes. +pub const fn serialize_runtime_api_info(id: [u8; 8], version: u32) -> [u8; RUNTIME_API_INFO_SIZE] { + let version = version.to_le_bytes(); + + let mut r = [0; RUNTIME_API_INFO_SIZE]; + r[0] = id[0]; + r[1] = id[1]; + r[2] = id[2]; + r[3] = id[3]; + r[4] = id[4]; + r[5] = id[5]; + r[6] = id[6]; + r[7] = id[7]; + + r[8] = version[0]; + r[9] = version[1]; + r[10] = version[2]; + r[11] = version[3]; + r +} + +/// Deserialize the runtime API info serialized by [`serialize_runtime_api_info`]. +pub fn deserialize_runtime_api_info(bytes: [u8; RUNTIME_API_INFO_SIZE]) -> ([u8; 8], u32) { + use sp_std::convert::TryInto; + + let id: [u8; 8] = bytes[0..8] + .try_into() + .expect("the source slice size is equal to the dest array length; qed"); + + let version = u32::from_le_bytes( + bytes[8..12] + .try_into() + .expect("the source slice size is equal to the array length; qed"), + ); + + (id, version) +} #[derive(codec::Encode, codec::Decode)] pub struct OldRuntimeVersion { @@ -579,12 +707,9 @@ decl_runtime_apis! { #[changed_in(3)] fn version() -> OldRuntimeVersion; /// Execute the given block. - #[skip_initialize_block] fn execute_block(block: Block); /// Initialize a block with the given header. #[renamed("initialise_block", 2)] - #[skip_initialize_block] - #[initialize_block] fn initialize_block(header: &::Header); } diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 867cdd6e57e48..b78c9abb80dc6 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-test" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,22 +12,23 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", path = "../" } +sp-api = { version = "4.0.0-dev", path = "../" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-version = { version = "2.0.0", path = "../../version" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-blockchain = { version = "2.0.0", path = "../../blockchain" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } -codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -trybuild = "1.0.17" +sp-version = { version = "4.0.0-dev", path = "../../version" } +sp-tracing = { version = "4.0.0-dev", path = "../../tracing" } +sp-runtime = { version = "4.0.0-dev", path = "../../runtime" } +sp-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } +sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sp-state-machine = { version = "0.10.0-dev", path = "../../state-machine" } +trybuild = "1.0.43" rustversion = "1.0.0" [dev-dependencies] criterion = "0.3.0" -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-core = { version = "2.0.0", path = "../../core" } +futures = "0.3.9" +log = "0.4.14" +sp-core = { version = "4.0.0-dev", path = "../../core" } [[bench]] name = "bench" diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index 280b707902873..b3d96a2db6a56 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,14 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{Criterion, criterion_group, criterion_main}; -use substrate_test_runtime_client::{ - DefaultTestClientBuilderExt, TestClientBuilder, - TestClientBuilderExt, runtime::TestAPI, -}; +use criterion::{criterion_group, criterion_main, Criterion}; +use sp_api::ProvideRuntimeApi; use sp_runtime::generic::BlockId; use sp_state_machine::ExecutionStrategy; -use sp_api::ProvideRuntimeApi; +use substrate_test_runtime_client::{ + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, +}; fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("add one with same runtime api", |b| { @@ -58,13 +57,17 @@ fn sp_api_benchmark(c: &mut Criterion) { }); c.bench_function("calling function by function pointer in wasm", |b| { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| client.runtime_api().benchmark_indirect_call(&block_id).unwrap()) }); c.bench_function("calling function in wasm", |b| { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| client.runtime_api().benchmark_direct_call(&block_id).unwrap()) }); diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index f16f0bbe71c56..8d1b04a37a9fa 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,14 +16,14 @@ // limitations under the License. use sp_api::{ - RuntimeApiInfo, decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, - ApiExt, + decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, ApiError, ApiExt, RuntimeApiInfo, +}; +use sp_core::NativeOrEncoded; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, GetNodeBlockType}, }; - -use sp_runtime::{traits::{GetNodeBlockType, Block as BlockT}, generic::BlockId}; - use substrate_test_runtime_client::runtime::Block; -use sp_blockchain::Result; /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` /// trait are done by the `construct_runtime!` macro in a real runtime. @@ -103,9 +103,30 @@ mock_impl_runtime_apis! { unimplemented!() } - fn same_name() {} + #[advanced] + fn same_name(_: &BlockId) -> + Result< + NativeOrEncoded<()>, + ApiError + > + { + Ok(().into()) + } - fn wild_card(_: u32) {} + #[advanced] + fn wild_card(at: &BlockId, _: u32) -> + Result< + NativeOrEncoded<()>, + ApiError + > + { + if let BlockId::Number(1337) = at { + // yeah + Ok(().into()) + } else { + Err((Box::from("Test error") as Box).into()) + } + } } impl ApiWithCustomVersion for MockApi { @@ -115,40 +136,46 @@ mock_impl_runtime_apis! { type TestClient = substrate_test_runtime_client::client::Client< substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, + substrate_test_runtime_client::ExecutorDispatch, Block, RuntimeApi, >; #[test] fn test_client_side_function_signature() { - let _test: fn(&RuntimeApiImpl, &BlockId, u64) -> Result<()> = - RuntimeApiImpl::::test; - let _something_with_block: - fn(&RuntimeApiImpl, &BlockId, Block) -> Result = - RuntimeApiImpl::::something_with_block; + let _test: fn( + &RuntimeApiImpl, + &BlockId, + u64, + ) -> Result<(), ApiError> = RuntimeApiImpl::::test; + let _something_with_block: fn( + &RuntimeApiImpl, + &BlockId, + Block, + ) -> Result = RuntimeApiImpl::::something_with_block; #[allow(deprecated)] - let _same_name_before_version_2: - fn(&RuntimeApiImpl, &BlockId) -> Result = - RuntimeApiImpl::::same_name_before_version_2; + let _same_name_before_version_2: fn( + &RuntimeApiImpl, + &BlockId, + ) -> Result = RuntimeApiImpl::::same_name_before_version_2; } #[test] fn check_runtime_api_info() { - assert_eq!(&Api::::ID, &runtime_decl_for_Api::ID); - assert_eq!(Api::::VERSION, runtime_decl_for_Api::VERSION); - assert_eq!(Api::::VERSION, 1); + assert_eq!(&>::ID, &runtime_decl_for_Api::ID); + assert_eq!(>::VERSION, runtime_decl_for_Api::VERSION); + assert_eq!(>::VERSION, 1); assert_eq!( - ApiWithCustomVersion::::VERSION, + >::VERSION, runtime_decl_for_ApiWithCustomVersion::VERSION, ); assert_eq!( - &ApiWithCustomVersion::::ID, + &>::ID, &runtime_decl_for_ApiWithCustomVersion::ID, ); - assert_eq!(ApiWithCustomVersion::::VERSION, 2); + assert_eq!(>::VERSION, 2); } fn check_runtime_api_versions_contains() { @@ -157,19 +184,17 @@ fn check_runtime_api_versions_contains() { #[test] fn check_runtime_api_versions() { - check_runtime_api_versions_contains::>(); - check_runtime_api_versions_contains::>(); - check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); } #[test] fn mock_runtime_api_has_api() { let mock = MockApi { block: None }; - assert!( - mock.has_api::>(&BlockId::Number(0)).unwrap(), - ); - assert!(mock.has_api::>(&BlockId::Number(0)).unwrap()); + assert!(mock.has_api::>(&BlockId::Number(0)).unwrap()); + assert!(mock.has_api::>(&BlockId::Number(0)).unwrap()); } #[test] @@ -180,3 +205,15 @@ fn mock_runtime_api_panics_on_calling_old_version() { #[allow(deprecated)] let _ = mock.same_name_before_version_2(&BlockId::Number(0)); } + +#[test] +fn mock_runtime_api_works_with_advanced() { + let mock = MockApi { block: None }; + + Api::::same_name(&mock, &BlockId::Number(0)).unwrap(); + mock.wild_card(&BlockId::Number(1337), 1).unwrap(); + assert_eq!( + "Test error".to_string(), + mock.wild_card(&BlockId::Number(1336), 1).unwrap_err().to_string(), + ); +} diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index d72872959cefa..101f92fd6c7d7 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,21 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_api::ProvideRuntimeApi; +use sp_api::{Core, ProvideRuntimeApi}; +use sp_runtime::{ + generic::BlockId, + traits::{HashFor, Header as HeaderT}, +}; +use sp_state_machine::{ + create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionStrategy, +}; use substrate_test_runtime_client::{ prelude::*, + runtime::{Block, DecodeFails, Header, TestAPI, Transfer}, DefaultTestClientBuilderExt, TestClientBuilder, - runtime::{TestAPI, DecodeFails, Transfer, Block}, -}; -use sp_runtime::{generic::BlockId, traits::{Header as HeaderT, HashFor}}; -use sp_state_machine::{ - ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, }; -use sp_consensus::SelectChain; use codec::Encode; use sc_block_builder::BlockBuilderProvider; +use sp_consensus::SelectChain; fn calling_function_with_strat(strat: ExecutionStrategy) { let client = TestClientBuilder::new().set_execution_strategy(strat).build(); @@ -50,21 +52,22 @@ fn calling_wasm_runtime_function() { } #[test] -#[should_panic( - expected = - "Could not convert parameter `param` between node and runtime: DecodeFails always fails" -)] +#[should_panic(expected = "FailedToConvertParameter { function: \"fail_convert_parameter\"")] fn calling_native_runtime_function_with_non_decodable_parameter() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); runtime_api.fail_convert_parameter(&block_id, DecodeFails::new()).unwrap(); } #[test] -#[should_panic(expected = "Could not convert return value from runtime to node!")] +#[should_panic(expected = "FailedToConvertReturnValue { function: \"fail_convert_return_value\"")] fn calling_native_runtime_function_with_non_decodable_return_value() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); runtime_api.fail_convert_return_value(&block_id).unwrap(); @@ -72,7 +75,9 @@ fn calling_native_runtime_function_with_non_decodable_return_value() { #[test] fn calling_native_runtime_signature_changed_function() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); @@ -81,7 +86,9 @@ fn calling_native_runtime_signature_changed_function() { #[test] fn calling_wasm_runtime_signature_changed_old_function() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); @@ -106,10 +113,11 @@ fn calling_with_both_strategy_and_fail_on_native_should_work() { assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); } - #[test] fn calling_with_native_else_wasm_and_fail_on_wasm_should_work() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeElseWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeElseWasm) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); assert_eq!(runtime_api.fail_on_wasm(&block_id).unwrap(), 1); @@ -117,7 +125,9 @@ fn calling_with_native_else_wasm_and_fail_on_wasm_should_work() { #[test] fn calling_with_native_else_wasm_and_fail_on_native_should_work() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeElseWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeElseWasm) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); @@ -125,7 +135,9 @@ fn calling_with_native_else_wasm_and_fail_on_native_should_work() { #[test] fn use_trie_function() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); assert_eq!(runtime_api.use_trie(&block_id).unwrap(), 2); @@ -136,26 +148,21 @@ fn initialize_block_works() { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); + runtime_api + .initialize_block( + &block_id, + &Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap(); assert_eq!(runtime_api.get_block_number(&block_id).unwrap(), 1); } -#[test] -fn initialize_block_is_called_only_once() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.take_block_number(&block_id).unwrap(), Some(1)); - assert_eq!(runtime_api.take_block_number(&block_id).unwrap(), None); -} - -#[test] -fn initialize_block_is_skipped() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert!(runtime_api.without_initialize_block(&block_id).unwrap()); -} - #[test] fn record_proof_works() { let (client, longest_chain) = TestClientBuilder::new() @@ -163,10 +170,15 @@ fn record_proof_works() { .build_with_longest_chain(); let block_id = BlockId::Number(client.chain_info().best_number); - let storage_root = longest_chain.best_chain().unwrap().state_root().clone(); + let storage_root = futures::executor::block_on(longest_chain.best_chain()) + .unwrap() + .state_root() + .clone(); let runtime_code = sp_core::traits::RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(client.code_at(&block_id).unwrap().into()), + code_fetcher: &sp_core::traits::WrappedRuntimeCode( + client.code_at(&block_id).unwrap().into(), + ), hash: vec![1], heap_pages: None, }; @@ -176,7 +188,8 @@ fn record_proof_works() { nonce: 0, from: AccountKeyring::Alice.into(), to: Default::default(), - }.into_signed_tx(); + } + .into_signed_tx(); // Build the block and record proof let mut builder = client @@ -188,11 +201,12 @@ fn record_proof_works() { let backend = create_proof_check_backend::>( storage_root, proof.expect("Proof was generated"), - ).expect("Creates proof backend."); + ) + .expect("Creates proof backend."); // Use the proof backend to execute `execute_block`. let mut overlay = Default::default(); - let executor = NativeExecutor::::new( + let executor = NativeElseWasmExecutor::::new( WasmExecutionMethod::Interpreted, None, 8, @@ -205,7 +219,8 @@ fn record_proof_works() { "Core_execute_block", &block.encode(), &runtime_code, - ).expect("Executes block while using the proof backend"); + ) + .expect("Executes block while using the proof backend"); } #[test] @@ -214,7 +229,39 @@ fn call_runtime_api_with_multiple_arguments() { let data = vec![1, 2, 4, 5, 6, 7, 8, 8, 10, 12]; let block_id = BlockId::Number(client.chain_info().best_number); - client.runtime_api() + client + .runtime_api() .test_multiple_arguments(&block_id, data.clone(), data.clone(), data.len() as u32) .unwrap(); } + +#[test] +fn disable_logging_works() { + if std::env::var("RUN_TEST").is_ok() { + sp_tracing::try_init_simple(); + + let mut builder = + TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm); + builder.genesis_init_mut().set_wasm_code( + substrate_test_runtime_client::runtime::wasm_binary_logging_disabled_unwrap().to_vec(), + ); + + let client = builder.build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(0); + runtime_api.do_trace_log(&block_id).expect("Logging should not fail"); + log::error!("Logging from native works"); + } else { + let executable = std::env::current_exe().unwrap(); + let output = std::process::Command::new(executable) + .env("RUN_TEST", "1") + .env("RUST_LOG", "info") + .args(&["--nocapture", "disable_logging_works"]) + .output() + .unwrap(); + + let output = dbg!(String::from_utf8(output.stderr).unwrap()); + assert!(!output.contains("Hey I'm runtime")); + assert!(output.contains("Logging from native works")); + } +} diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index 2f7fd6d06bcd3..5a6025f463af0 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ use std::env; #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 851d2b8a4b652..9dd84c24b6781 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -5,7 +5,10 @@ error[E0053]: method `test` has an incompatible type for trait | --- type in trait ... 19 | fn test(data: String) {} - | ^^^^^^ expected `u64`, found struct `std::string::String` + | ^^^^^^ + | | + | expected `u64`, found struct `std::string::String` + | help: change the parameter type to match the trait: `u64` | = note: expected fn pointer `fn(u64)` found fn pointer `fn(std::string::String)` @@ -21,10 +24,20 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | |_- type in trait 16 | 17 | sp_api::impl_runtime_apis! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found struct `std::string::String` + | -^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found struct `std::string::String` + | | +18 | | impl self::Api for Runtime { +19 | | fn test(data: String) {} +20 | | } +... | +32 | | } +33 | | } + | |_- help: change the parameter type to match the trait: `std::option::Option` | - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId<__SR_API_BLOCK__>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId<__SR_API_BLOCK__>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.rs b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs similarity index 51% rename from primitives/api/test/tests/ui/mock_only_one_error_type.rs rename to primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs index 1c3f13dbb9bf1..fd654ffdc63d6 100644 --- a/primitives/api/test/tests/ui/mock_only_one_error_type.rs +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs @@ -1,12 +1,9 @@ use substrate_test_runtime_client::runtime::Block; +use sp_api::ApiError; sp_api::decl_runtime_apis! { pub trait Api { - fn test(data: u64); - } - - pub trait Api2 { - fn test(data: u64); + fn test(); } } @@ -14,15 +11,10 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { - type Error = u32; - - fn test(data: u64) {} - } - - impl Api2 for MockApi { - type Error = u64; - - fn test(data: u64) {} + #[advanced] + fn test(&self, _: BlockId) -> Result, ApiError> { + Ok(().into()) + } } } diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr new file mode 100644 index 0000000000000..47cd9e01d910f --- /dev/null +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr @@ -0,0 +1,13 @@ +error: `BlockId` needs to be taken by reference and not by value! + --> $DIR/mock_advanced_block_id_by_value.rs:12:1 + | +12 | / sp_api::mock_impl_runtime_apis! { +13 | | impl Api for MockApi { +14 | | #[advanced] +15 | | fn test(&self, _: BlockId) -> Result, ApiError> { +... | +18 | | } +19 | | } + | |_^ + | + = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/mock_only_error_associated_type.rs b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs similarity index 59% rename from primitives/api/test/tests/ui/mock_only_error_associated_type.rs rename to primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs index bbd3c71c94017..a15ef133fa6c4 100644 --- a/primitives/api/test/tests/ui/mock_only_error_associated_type.rs +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs @@ -1,8 +1,9 @@ use substrate_test_runtime_client::runtime::Block; +use sp_api::ApiError; sp_api::decl_runtime_apis! { pub trait Api { - fn test(data: u64); + fn test(); } } @@ -10,9 +11,10 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { - type OtherData = u32; - - fn test(data: u64) {} + #[advanced] + fn test(&self) -> Result, ApiError> { + Ok(().into()) + } } } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr new file mode 100644 index 0000000000000..87d3660316b1e --- /dev/null +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr @@ -0,0 +1,5 @@ +error: If using the `advanced` attribute, it is required that the function takes at least one argument, the `BlockId`. + --> $DIR/mock_advanced_missing_blockid.rs:15:3 + | +15 | fn test(&self) -> Result, ApiError> { + | ^^ diff --git a/primitives/api/test/tests/ui/mock_only_error_associated_type.stderr b/primitives/api/test/tests/ui/mock_only_error_associated_type.stderr deleted file mode 100644 index beced70413bb0..0000000000000 --- a/primitives/api/test/tests/ui/mock_only_error_associated_type.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Only associated type with name `Error` is allowed - --> $DIR/mock_only_error_associated_type.rs:13:3 - | -13 | type OtherData = u32; - | ^^^^ diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr deleted file mode 100644 index 65d05e83a7f69..0000000000000 --- a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr +++ /dev/null @@ -1,30 +0,0 @@ -error: Error type can not change between runtime apis - --> $DIR/mock_only_one_error_type.rs:23:3 - | -23 | type Error = u64; - | ^^^^ - -error[E0277]: the trait bound `u32: std::convert::From` is not satisfied - --> $DIR/mock_only_one_error_type.rs:15:1 - | -15 | / sp_api::mock_impl_runtime_apis! { -16 | | impl Api for MockApi { -17 | | type Error = u32; -18 | | -... | -26 | | } -27 | | } - | |_^ the trait `std::convert::From` is not implemented for `u32` - | - ::: $WORKSPACE/primitives/api/src/lib.rs:350:35 - | -350 | type Error: std::fmt::Debug + From; - | ------------ required by this bound in `sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ApiErrorExt` - | - = help: the following implementations were found: - > - > - > - > - and 18 others - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index ed5b64144a6f6..7385fe4745989 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -22,10 +22,20 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | |_- type in trait ... 12 | sp_api::mock_impl_runtime_apis! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` + | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `()` + | | +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_- help: change the parameter type to match the trait: `Option` | - = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait @@ -40,8 +50,18 @@ error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for t | |_- type in trait ... 12 | sp_api::mock_impl_runtime_apis! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `()` + | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `()` + | | +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_- help: change the parameter type to match the trait: `Option` | - = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime_client::substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/no_default_implementation.rs b/primitives/api/test/tests/ui/no_default_implementation.rs new file mode 100644 index 0000000000000..6af93d6b86539 --- /dev/null +++ b/primitives/api/test/tests/ui/no_default_implementation.rs @@ -0,0 +1,9 @@ +sp_api::decl_runtime_apis! { + pub trait Api { + fn test() { + println!("Hey, I'm a default implementation!"); + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/no_default_implementation.stderr b/primitives/api/test/tests/ui/no_default_implementation.stderr new file mode 100644 index 0000000000000..0ccece1441916 --- /dev/null +++ b/primitives/api/test/tests/ui/no_default_implementation.stderr @@ -0,0 +1,8 @@ +error: A runtime API function cannot have a default implementation! + --> $DIR/no_default_implementation.rs:3:13 + | +3 | fn test() { + | ___________________^ +4 | | println!("Hey, I'm a default implementation!"); +5 | | } + | |_________^ diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index c3e4850036090..a0a16c4a493db 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -5,7 +5,10 @@ error[E0053]: method `test` has an incompatible type for trait | --- type in trait ... 19 | fn test(data: &u64) { - | ^^^^ expected `u64`, found `&u64` + | ^^^^ + | | + | expected `u64`, found `&u64` + | help: change the parameter type to match the trait: `u64` | = note: expected fn pointer `fn(u64)` found fn pointer `fn(&u64)` @@ -21,10 +24,20 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | |_- type in trait 16 | 17 | sp_api::impl_runtime_apis! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found `&u64` + | -^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `&u64` + | | +18 | | impl self::Api for Runtime { +19 | | fn test(data: &u64) { +20 | | unimplemented!() +... | +34 | | } +35 | | } + | |_- help: change the parameter type to match the trait: `std::option::Option` | - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId<__SR_API_BLOCK__>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId<__SR_API_BLOCK__>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<&u64>, std::vec::Vec<_>) -> std::result::Result<_, _>` + = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` + found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> Result<_, _>` = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index 2ab6823759572..6849dc25f8561 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" description = "Provides facilities for generating application specific crypto wrapper types." @@ -15,15 +15,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.126", optional = true, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../io" } [features] default = [ "std" ] -std = [ "full_crypto", "sp-core/std", "codec/std", "serde", "sp-std/std", "sp-io/std" ] +std = [ + "full_crypto", + "sp-core/std", + "codec/std", + "scale-info/std", + "serde", + "sp-std/std", + "sp-io/std", +] # This feature enables all crypto primitives for `no_std` builds like microcontrollers # or Intel SGX. diff --git a/primitives/application-crypto/src/ecdsa.rs b/primitives/application-crypto/src/ecdsa.rs index 287ac8f3afcff..915e16ba3b1a2 100644 --- a/primitives/application-crypto/src/ecdsa.rs +++ b/primitives/application-crypto/src/ecdsa.rs @@ -1,22 +1,23 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Ecdsa crypto types. -use crate::{RuntimePublic, KeyTypeId}; +use crate::{KeyTypeId, RuntimePublic}; use sp_std::vec::Vec; @@ -32,9 +33,9 @@ mod app { } } -pub use app::{Public as AppPublic, Signature as AppSignature}; #[cfg(feature = "full_crypto")] pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/primitives/application-crypto/src/ed25519.rs b/primitives/application-crypto/src/ed25519.rs index e761745cf5425..09ce48fcb274c 100644 --- a/primitives/application-crypto/src/ed25519.rs +++ b/primitives/application-crypto/src/ed25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! Ed25519 crypto types. -use crate::{RuntimePublic, KeyTypeId}; +use crate::{KeyTypeId, RuntimePublic}; use sp_std::vec::Vec; @@ -33,9 +33,9 @@ mod app { } } -pub use app::{Public as AppPublic, Signature as AppSignature}; #[cfg(feature = "full_crypto")] pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/primitives/application-crypto/src/lib.rs b/primitives/application-crypto/src/lib.rs index 12e11d690541a..baa6560667059 100644 --- a/primitives/application-crypto/src/lib.rs +++ b/primitives/application-crypto/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,31 +18,32 @@ //! Traits and macros for constructing application specific strongly typed crypto wrappers. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] -#[doc(hidden)] -pub use sp_core::{self, crypto::{CryptoType, CryptoTypePublicPair, Public, Derive, IsWrappedBy, Wraps}, RuntimeDebug}; +pub use sp_core::crypto::{key_types, CryptoTypeId, KeyTypeId}; #[doc(hidden)] #[cfg(feature = "full_crypto")] -pub use sp_core::crypto::{SecretStringError, DeriveJunction, Ss58Codec, Pair}; -pub use sp_core::crypto::{KeyTypeId, CryptoTypeId, key_types}; +pub use sp_core::crypto::{DeriveJunction, Pair, SecretStringError, Ss58Codec}; +#[doc(hidden)] +pub use sp_core::{ + self, + crypto::{CryptoType, CryptoTypePublicPair, Derive, IsWrappedBy, Public, Wraps}, + RuntimeDebug, +}; #[doc(hidden)] pub use codec; #[doc(hidden)] +pub use scale_info; +#[doc(hidden)] #[cfg(feature = "std")] pub use serde; #[doc(hidden)] -pub use sp_std::{ - convert::TryFrom, - ops::Deref, - vec::Vec, -}; +pub use sp_std::{convert::TryFrom, ops::Deref, vec::Vec}; +pub mod ecdsa; pub mod ed25519; pub mod sr25519; -pub mod ecdsa; mod traits; pub use traits::*; @@ -51,7 +52,7 @@ pub use traits::*; /// Application-specific types whose identifier is `$key_type`. /// /// ```rust -///# use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; +/// # use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; /// // Declare a new set of crypto types using Ed25519 logic that identifies as `KeyTypeId` /// // of value `b"fuba"`. /// app_crypto!(ed25519, KeyTypeId(*b"_uba")); @@ -61,8 +62,17 @@ pub use traits::*; macro_rules! app_crypto { ($module:ident, $key_type:expr) => { $crate::app_crypto_public_full_crypto!($module::Public, $key_type, $module::CRYPTO_ID); - $crate::app_crypto_public_common!($module::Public, $module::Signature, $key_type, $module::CRYPTO_ID); - $crate::app_crypto_signature_full_crypto!($module::Signature, $key_type, $module::CRYPTO_ID); + $crate::app_crypto_public_common!( + $module::Public, + $module::Signature, + $key_type, + $module::CRYPTO_ID + ); + $crate::app_crypto_signature_full_crypto!( + $module::Signature, + $key_type, + $module::CRYPTO_ID + ); $crate::app_crypto_signature_common!($module::Signature, $key_type); $crate::app_crypto_pair!($module::Pair, $key_type, $module::CRYPTO_ID); }; @@ -72,7 +82,7 @@ macro_rules! app_crypto { /// Application-specific types whose identifier is `$key_type`. /// /// ```rust -///# use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; +/// # use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; /// // Declare a new set of crypto types using Ed25519 logic that identifies as `KeyTypeId` /// // of value `b"fuba"`. /// app_crypto!(ed25519, KeyTypeId(*b"_uba")); @@ -82,8 +92,17 @@ macro_rules! app_crypto { macro_rules! app_crypto { ($module:ident, $key_type:expr) => { $crate::app_crypto_public_not_full_crypto!($module::Public, $key_type, $module::CRYPTO_ID); - $crate::app_crypto_public_common!($module::Public, $module::Signature, $key_type, $module::CRYPTO_ID); - $crate::app_crypto_signature_not_full_crypto!($module::Signature, $key_type, $module::CRYPTO_ID); + $crate::app_crypto_public_common!( + $module::Public, + $module::Signature, + $key_type, + $module::CRYPTO_ID + ); + $crate::app_crypto_signature_not_full_crypto!( + $module::Signature, + $key_type, + $module::CRYPTO_ID + ); $crate::app_crypto_signature_common!($module::Signature, $key_type); }; } @@ -93,7 +112,7 @@ macro_rules! app_crypto { #[macro_export] macro_rules! app_crypto_pair { ($pair:ty, $key_type:expr, $crypto_type:expr) => { - $crate::wrap!{ + $crate::wrap! { /// A generic `AppPublic` wrapper type over $pair crypto; this has no specific App. #[derive(Clone)] pub struct Pair($pair); @@ -111,12 +130,16 @@ macro_rules! app_crypto_pair { $crate::app_crypto_pair_functions_if_std!($pair); - fn derive< - Iter: Iterator - >(&self, path: Iter, seed: Option) -> Result<(Self, Option), Self::DeriveError> { + fn derive>( + &self, + path: Iter, + seed: Option, + ) -> Result<(Self, Option), Self::DeriveError> { self.0.derive(path, seed).map(|x| (Self(x.0), x.1)) } - fn from_seed(seed: &Self::Seed) -> Self { Self(<$pair>::from_seed(seed)) } + fn from_seed(seed: &Self::Seed) -> Self { + Self(<$pair>::from_seed(seed)) + } fn from_seed_slice(seed: &[u8]) -> Result { <$pair>::from_seed_slice(seed).map(Self) } @@ -137,8 +160,12 @@ macro_rules! app_crypto_pair { ) -> bool { <$pair>::verify_weak(sig, message, pubkey) } - fn public(&self) -> Self::Public { Public(self.0.public()) } - fn to_raw_vec(&self) -> $crate::Vec { self.0.to_raw_vec() } + fn public(&self) -> Self::Public { + Public(self.0.public()) + } + fn to_raw_vec(&self) -> $crate::Vec { + self.0.to_raw_vec() + } } impl $crate::AppKey for Pair { @@ -167,22 +194,22 @@ macro_rules! app_crypto_pair_functions_if_std { (Self(r.0), r.1, r.2) } - fn from_phrase(phrase: &str, password: Option<&str>) - -> Result<(Self, Self::Seed), $crate::SecretStringError> - { + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, Self::Seed), $crate::SecretStringError> { <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) } - } + }; } #[doc(hidden)] #[cfg(not(feature = "std"))] #[macro_export] macro_rules! app_crypto_pair_functions_if_std { - ($pair:ty) => {} + ($pair:ty) => {}; } - /// Declares Public type which is functionally equivalent to `$public`, but is new /// Application-specific type whose identifier is `$key_type`. /// can only be used together with `full_crypto` feature @@ -191,15 +218,17 @@ macro_rules! app_crypto_pair_functions_if_std { #[macro_export] macro_rules! app_crypto_public_full_crypto { ($public:ty, $key_type:expr, $crypto_type:expr) => { - $crate::wrap!{ + $crate::wrap! { /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. #[derive( - Clone, Default, Eq, PartialEq, Ord, PartialOrd, + Clone, Default, Eq, Hash, PartialEq, PartialOrd, Ord, $crate::codec::Encode, $crate::codec::Decode, $crate::RuntimeDebug, + $crate::codec::MaxEncodedLen, + $crate::scale_info::TypeInfo, )] - #[derive(Hash)] + #[codec(crate = $crate::codec)] pub struct Public($public); } @@ -215,7 +244,7 @@ macro_rules! app_crypto_public_full_crypto { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; } - } + }; } /// Declares Public type which is functionally equivalent to `$public`, but is new @@ -226,13 +255,15 @@ macro_rules! app_crypto_public_full_crypto { #[macro_export] macro_rules! app_crypto_public_not_full_crypto { ($public:ty, $key_type:expr, $crypto_type:expr) => { - $crate::wrap!{ + $crate::wrap! { /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. #[derive( Clone, Default, Eq, PartialEq, Ord, PartialOrd, $crate::codec::Encode, $crate::codec::Decode, $crate::RuntimeDebug, + $crate::codec::MaxEncodedLen, + $crate::scale_info::TypeInfo, )] pub struct Public($public); } @@ -246,7 +277,7 @@ macro_rules! app_crypto_public_not_full_crypto { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; } - } + }; } /// Declares Public type which is functionally equivalent to `$public`, but is new @@ -259,15 +290,21 @@ macro_rules! app_crypto_public_common { $crate::app_crypto_public_common_if_std!(); impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { self.0.as_ref() } + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } } impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { self.0.as_mut() } + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } } impl $crate::Public for Public { - fn from_slice(x: &[u8]) -> Self { Self(<$public>::from_slice(x)) } + fn from_slice(x: &[u8]) -> Self { + Self(<$public>::from_slice(x)) + } fn to_public_crypto_pair(&self) -> $crate::CryptoTypePublicPair { $crate::CryptoTypePublicPair($crypto_type, self.to_raw_vec()) @@ -278,14 +315,20 @@ macro_rules! app_crypto_public_common { type Generic = $public; } - impl $crate::RuntimeAppPublic for Public where $public: $crate::RuntimePublic { + impl $crate::RuntimeAppPublic for Public + where + $public: $crate::RuntimePublic, + { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; type Signature = Signature; fn all() -> $crate::Vec { - <$public as $crate::RuntimePublic>::all($key_type).into_iter().map(Self).collect() + <$public as $crate::RuntimePublic>::all($key_type) + .into_iter() + .map(Self) + .collect() } fn generate_pair(seed: Option<$crate::Vec>) -> Self { @@ -293,11 +336,8 @@ macro_rules! app_crypto_public_common { } fn sign>(&self, msg: &M) -> Option { - <$public as $crate::RuntimePublic>::sign( - self.as_ref(), - $key_type, - msg, - ).map(Signature) + <$public as $crate::RuntimePublic>::sign(self.as_ref(), $key_type, msg) + .map(Signature) } fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { @@ -317,10 +357,7 @@ macro_rules! app_crypto_public_common { impl From<&Public> for $crate::CryptoTypePublicPair { fn from(key: &Public) -> Self { - $crate::CryptoTypePublicPair( - $crypto_type, - $crate::Public::to_raw_vec(key), - ) + $crate::CryptoTypePublicPair($crypto_type, $crate::Public::to_raw_vec(key)) } } @@ -331,7 +368,7 @@ macro_rules! app_crypto_public_common { <$public>::try_from(data).map(Into::into) } } - } + }; } /// Implements traits for the public key type if `feature = "std"` is enabled. @@ -341,8 +378,9 @@ macro_rules! app_crypto_public_common { macro_rules! app_crypto_public_common_if_std { () => { impl $crate::Derive for Public { - fn derive>(&self, - path: Iter + fn derive>( + &self, + path: Iter, ) -> Option { self.0.derive(path).map(Self) } @@ -356,8 +394,9 @@ macro_rules! app_crypto_public_common_if_std { } impl $crate::serde::Serialize for Public { - fn serialize(&self, serializer: S) -> std::result::Result where - S: $crate::serde::Serializer + fn serialize(&self, serializer: S) -> std::result::Result + where + S: $crate::serde::Serializer, { use $crate::Ss58Codec; serializer.serialize_str(&self.to_ss58check()) @@ -365,15 +404,16 @@ macro_rules! app_crypto_public_common_if_std { } impl<'de> $crate::serde::Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> std::result::Result where - D: $crate::serde::Deserializer<'de> + fn deserialize(deserializer: D) -> std::result::Result + where + D: $crate::serde::Deserializer<'de>, { use $crate::Ss58Codec; Public::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| $crate::serde::de::Error::custom(format!("{:?}", e))) } } - } + }; } #[cfg(not(feature = "std"))] @@ -382,10 +422,9 @@ macro_rules! app_crypto_public_common_if_std { macro_rules! app_crypto_public_common_if_std { () => { impl $crate::Derive for Public {} - } + }; } - /// Declares Signature type which is functionally equivalent to `$sig`, but is new /// Application-specific type whose identifier is `$key_type`. /// can only be used together with `full_crypto` feature @@ -400,6 +439,7 @@ macro_rules! app_crypto_signature_full_crypto { $crate::codec::Encode, $crate::codec::Decode, $crate::RuntimeDebug, + $crate::scale_info::TypeInfo, )] #[derive(Hash)] pub struct Signature($sig); @@ -417,7 +457,7 @@ macro_rules! app_crypto_signature_full_crypto { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; } - } + }; } /// Declares Signature type which is functionally equivalent to `$sig`, but is new @@ -433,6 +473,7 @@ macro_rules! app_crypto_signature_not_full_crypto { #[derive(Clone, Default, Eq, PartialEq, $crate::codec::Encode, $crate::codec::Decode, + $crate::scale_info::TypeInfo, $crate::RuntimeDebug, )] pub struct Signature($sig); @@ -447,7 +488,7 @@ macro_rules! app_crypto_signature_not_full_crypto { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; } - } + }; } /// Declares Signature type which is functionally equivalent to `$sig`, but is new @@ -460,11 +501,15 @@ macro_rules! app_crypto_signature_common { impl $crate::Deref for Signature { type Target = [u8]; - fn deref(&self) -> &Self::Target { self.0.as_ref() } + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } } impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { self.0.as_ref() } + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } } impl $crate::AppSignature for Signature { @@ -478,7 +523,7 @@ macro_rules! app_crypto_signature_common { Ok(<$sig>::try_from(data.as_slice())?.into()) } } - } + }; } /// Implement bidirectional `From` and on-way `AsRef`/`AsMut` for two types, `$inner` and `$outer`. @@ -546,10 +591,9 @@ macro_rules! with_pair { } } - #[doc(hidden)] #[macro_export] #[cfg(all(not(feature = "std"), not(feature = "full_crypto")))] macro_rules! with_pair { - ( $( $def:tt )* ) => {} + ( $( $def:tt )* ) => {}; } diff --git a/primitives/application-crypto/src/sr25519.rs b/primitives/application-crypto/src/sr25519.rs index 4700e0f756717..f51236f2ab384 100644 --- a/primitives/application-crypto/src/sr25519.rs +++ b/primitives/application-crypto/src/sr25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! Sr25519 crypto types. -use crate::{RuntimePublic, KeyTypeId}; +use crate::{KeyTypeId, RuntimePublic}; use sp_std::vec::Vec; @@ -33,9 +33,9 @@ mod app { } } -pub use app::{Public as AppPublic, Signature as AppSignature}; #[cfg(feature = "full_crypto")] pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/primitives/application-crypto/src/traits.rs b/primitives/application-crypto/src/traits.rs index f06e194aefddf..376d12f0c7a3e 100644 --- a/primitives/application-crypto/src/traits.rs +++ b/primitives/application-crypto/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,7 @@ use sp_core::crypto::Pair; use codec::Codec; -use sp_core::crypto::{KeyTypeId, CryptoType, CryptoTypeId, IsWrappedBy, Public}; +use sp_core::crypto::{CryptoType, CryptoTypeId, IsWrappedBy, KeyTypeId, Public}; use sp_std::{fmt::Debug, vec::Vec}; /// An application-specific key. @@ -57,7 +57,7 @@ impl MaybeHash for T {} /// Type which implements Debug and Hash in std, not when no-std (no-std variant with crypto). #[cfg(all(not(feature = "std"), feature = "full_crypto"))] -pub trait MaybeDebugHash: sp_std::hash::Hash {} +pub trait MaybeDebugHash: sp_std::hash::Hash {} #[cfg(all(not(feature = "std"), feature = "full_crypto"))] impl MaybeDebugHash for T {} @@ -66,15 +66,23 @@ pub trait AppPublic: AppKey + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + codec::Codec { /// The wrapped type which is just a plain instance of `Public`. - type Generic: - IsWrappedBy + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + codec::Codec; + type Generic: IsWrappedBy + + Public + + Ord + + PartialOrd + + Eq + + PartialEq + + Debug + + MaybeHash + + codec::Codec; } /// A application's key pair. #[cfg(feature = "full_crypto")] -pub trait AppPair: AppKey + Pair::Public> { +pub trait AppPair: AppKey + Pair::Public> { /// The wrapped type which is just a plain instance of `Pair`. - type Generic: IsWrappedBy + Pair::Public as AppPublic>::Generic>; + type Generic: IsWrappedBy + + Pair::Public as AppPublic>::Generic>; } /// A application's signature. @@ -122,7 +130,7 @@ pub trait RuntimeAppPublic: Sized { const CRYPTO_ID: CryptoTypeId; /// The signature that will be generated when signing with the corresponding private key. - type Signature: Codec + Debug + MaybeHash + Eq + PartialEq + Clone; + type Signature: Codec + Debug + MaybeHash + Eq + PartialEq + Clone + scale_info::TypeInfo; /// Returns all public keys for this application in the keystore. fn all() -> crate::Vec; diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index 0e7fdc7559ca1..468bfee3cc010 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -13,9 +13,9 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } -sp-keystore = { version = "0.8.0", path = "../../keystore" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../core" } +sp-keystore = { version = "0.10.0-dev", path = "../../keystore", default-features = false } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-api = { version = "2.0.0", path = "../../api" } -sp-application-crypto = { version = "2.0.0", path = "../" } +sp-runtime = { version = "4.0.0-dev", path = "../../runtime" } +sp-api = { version = "4.0.0-dev", path = "../../api" } +sp-application-crypto = { version = "4.0.0-dev", path = "../" } diff --git a/primitives/application-crypto/test/src/ecdsa.rs b/primitives/application-crypto/test/src/ecdsa.rs index 89def7cd68770..c4aa6a2afbd61 100644 --- a/primitives/application-crypto/test/src/ecdsa.rs +++ b/primitives/application-crypto/test/src/ecdsa.rs @@ -1,42 +1,37 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Integration tests for ecdsa -use std::sync::Arc; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::ecdsa::{AppPair, AppPublic}; +use sp_core::{crypto::Pair, testing::ECDSA}; +use sp_keystore::{testing::KeyStore, SyncCryptoStore}; use sp_runtime::generic::BlockId; -use sp_core::{ - crypto::Pair, - testing::ECDSA, -}; -use sp_keystore::{ - SyncCryptoStore, - testing::KeyStore, -}; +use std::sync::Arc; use substrate_test_runtime_client::{ - TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, - runtime::TestAPI, + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::ecdsa::{AppPair, AppPublic}; #[test] fn ecdsa_works_in_runtime() { let keystore = Arc::new(KeyStore::new()); let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client.runtime_api() + let (signature, public) = test_client + .runtime_api() .test_ecdsa_crypto(&BlockId::Number(0)) .expect("Tests `ecdsa` crypto."); diff --git a/primitives/application-crypto/test/src/ed25519.rs b/primitives/application-crypto/test/src/ed25519.rs index 9df198dc4f9d2..7cfd801388c78 100644 --- a/primitives/application-crypto/test/src/ed25519.rs +++ b/primitives/application-crypto/test/src/ed25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,28 +17,22 @@ //! Integration tests for ed25519 -use std::sync::Arc; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::ed25519::{AppPair, AppPublic}; +use sp_core::{crypto::Pair, testing::ED25519}; +use sp_keystore::{testing::KeyStore, SyncCryptoStore}; use sp_runtime::generic::BlockId; -use sp_core::{ - crypto::Pair, - testing::ED25519, -}; -use sp_keystore::{ - SyncCryptoStore, - testing::KeyStore, -}; +use std::sync::Arc; use substrate_test_runtime_client::{ - TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, - runtime::TestAPI, + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::ed25519::{AppPair, AppPublic}; #[test] fn ed25519_works_in_runtime() { let keystore = Arc::new(KeyStore::new()); let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client.runtime_api() + let (signature, public) = test_client + .runtime_api() .test_ed25519_crypto(&BlockId::Number(0)) .expect("Tests `ed25519` crypto."); diff --git a/primitives/application-crypto/test/src/lib.rs b/primitives/application-crypto/test/src/lib.rs index b78539239691a..6b7734764e793 100644 --- a/primitives/application-crypto/test/src/lib.rs +++ b/primitives/application-crypto/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,9 @@ //! Integration tests for application crypto +#[cfg(test)] +mod ecdsa; #[cfg(test)] mod ed25519; #[cfg(test)] mod sr25519; -#[cfg(test)] -mod ecdsa; diff --git a/primitives/application-crypto/test/src/sr25519.rs b/primitives/application-crypto/test/src/sr25519.rs index f96d7b7ef0006..12dfbc609fb01 100644 --- a/primitives/application-crypto/test/src/sr25519.rs +++ b/primitives/application-crypto/test/src/sr25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,28 +17,22 @@ //! Integration tests for sr25519 -use std::sync::Arc; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::sr25519::{AppPair, AppPublic}; +use sp_core::{crypto::Pair, testing::SR25519}; +use sp_keystore::{testing::KeyStore, SyncCryptoStore}; use sp_runtime::generic::BlockId; -use sp_core::{ - crypto::Pair, - testing::SR25519, -}; -use sp_keystore::{ - SyncCryptoStore, - testing::KeyStore, -}; +use std::sync::Arc; use substrate_test_runtime_client::{ - TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, - runtime::TestAPI, + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::sr25519::{AppPair, AppPublic}; #[test] fn sr25519_works_in_runtime() { let keystore = Arc::new(KeyStore::new()); let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client.runtime_api() + let (signature, public) = test_client + .runtime_api() .test_sr25519_crypto(&BlockId::Number(0)) .expect("Tests `sr25519` crypto."); diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index b8e482491a7d4..abdbd4e60d041 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,23 +15,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } integer-sqrt = "0.1.2" +static_assertions = "1.1.0" num-traits = { version = "0.2.8", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-debug-derive = { version = "2.0.0", default-features = false, path = "../../primitives/debug-derive" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +serde = { version = "1.0.126", optional = true, features = ["derive"] } +sp-debug-derive = { version = "3.0.0", default-features = false, path = "../debug-derive" } [dev-dependencies] rand = "0.7.2" criterion = "0.3" -serde_json = "1.0" -primitive-types = "0.7.0" +primitive-types = "0.10.1" [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "num-traits/std", "sp-std/std", "serde", diff --git a/primitives/arithmetic/benches/bench.rs b/primitives/arithmetic/benches/bench.rs index 7a576c8af144b..02db00aa0bf82 100644 --- a/primitives/arithmetic/benches/bench.rs +++ b/primitives/arithmetic/benches/bench.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{Criterion, Throughput, BenchmarkId, criterion_group, criterion_main}; -use sp_arithmetic::biguint::{BigUint, Single}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; use rand::Rng; +use sp_arithmetic::biguint::{BigUint, Single}; fn random_big_uint(size: usize) -> BigUint { let mut rng = rand::thread_rng(); @@ -73,7 +73,7 @@ fn bench_division(c: &mut Criterion) { } } -criterion_group!{ +criterion_group! { name = benches; config = Criterion::default(); targets = bench_addition, bench_subtraction, bench_multiplication, bench_division diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index 6a28142f9e825..d10eccfc7c74a 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -14,11 +14,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-arithmetic = { version = "2.0.0", path = ".." } +sp-arithmetic = { version = "4.0.0-dev", path = ".." } honggfuzz = "0.5.49" -primitive-types = "0.7.0" +primitive-types = "0.10.1" num-bigint = "0.2" -num-traits = "0.2" [[bin]] name = "biguint" diff --git a/primitives/arithmetic/fuzzer/src/biguint.rs b/primitives/arithmetic/fuzzer/src/biguint.rs index 481ac5561dda2..ca5b8379afff5 100644 --- a/primitives/arithmetic/fuzzer/src/biguint.rs +++ b/primitives/arithmetic/fuzzer/src/biguint.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -60,8 +60,13 @@ fn main() { let expected = ue.unwrap() + ve.unwrap(); let t = u.clone().add(&v); assert_eq!( - u128::try_from(t.clone()).unwrap(), expected, - "{:?} + {:?} ===> {:?} != {:?}", u, v, t, expected, + u128::try_from(t.clone()).unwrap(), + expected, + "{:?} + {:?} ===> {:?} != {:?}", + u, + v, + t, + expected, ); } @@ -74,8 +79,13 @@ fn main() { let t = t.unwrap(); let expected = expected.unwrap(); assert_eq!( - u128::try_from(t.clone()).unwrap(), expected, - "{:?} - {:?} ===> {:?} != {:?}", u, v, t, expected, + u128::try_from(t.clone()).unwrap(), + expected, + "{:?} - {:?} ===> {:?} != {:?}", + u, + v, + t, + expected, ); } } @@ -84,31 +94,51 @@ fn main() { let expected = ue.unwrap() * ve.unwrap(); let t = u.clone().mul(&v); assert_eq!( - u128::try_from(t.clone()).unwrap(), expected, - "{:?} * {:?} ===> {:?} != {:?}", u, v, t, expected, + u128::try_from(t.clone()).unwrap(), + expected, + "{:?} * {:?} ===> {:?} != {:?}", + u, + v, + t, + expected, ); } if check_digit_lengths(&u, &v, 4) { let (ue, ve) = (ue.unwrap(), ve.unwrap()); if ve == 0 { - return; + return } let (q, r) = (ue / ve, ue % ve); if let Some((qq, rr)) = u.clone().div(&v, true) { assert_eq!( - u128::try_from(qq.clone()).unwrap(), q, - "{:?} / {:?} ===> {:?} != {:?}", u, v, qq, q, + u128::try_from(qq.clone()).unwrap(), + q, + "{:?} / {:?} ===> {:?} != {:?}", + u, + v, + qq, + q, ); assert_eq!( - u128::try_from(rr.clone()).unwrap(), r, - "{:?} % {:?} ===> {:?} != {:?}", u, v, rr, r, + u128::try_from(rr.clone()).unwrap(), + r, + "{:?} % {:?} ===> {:?} != {:?}", + u, + v, + rr, + r, ); } else if v.len() == 1 { let qq = u.clone().div_unit(ve as Single); assert_eq!( - u128::try_from(qq.clone()).unwrap(), q, - "[single] {:?} / {:?} ===> {:?} != {:?}", u, v, qq, q, + u128::try_from(qq.clone()).unwrap(), + q, + "[single] {:?} / {:?} ===> {:?} != {:?}", + u, + v, + qq, + q, ); } else if v.msb() != 0 && u.msb() != 0 && u.len() > v.len() { panic!("div returned none for an unexpected reason"); @@ -175,7 +205,7 @@ fn assert_biguints_eq(a: &BigUint, b: &num_bigint::BigUint) { // `num_bigint::BigUint` doesn't expose it's internals, so we need to convert into that to // compare. - let limbs = (0 .. a.len()).map(|i| a.get(i)).collect(); + let limbs = (0..a.len()).map(|i| a.get(i)).collect(); let num_a = num_bigint::BigUint::new(limbs); assert!(&num_a == b, "\narithmetic: {:?}\nnum-bigint: {:?}", a, b); diff --git a/primitives/arithmetic/fuzzer/src/fixed_point.rs b/primitives/arithmetic/fuzzer/src/fixed_point.rs index 9a88197ac32ad..d8f058ae51e2c 100644 --- a/primitives/arithmetic/fuzzer/src/fixed_point.rs +++ b/primitives/arithmetic/fuzzer/src/fixed_point.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,7 +28,7 @@ //! [here](https://docs.rs/honggfuzz/). use honggfuzz::fuzz; -use sp_arithmetic::{FixedPointNumber, FixedI64, traits::Saturating}; +use sp_arithmetic::{traits::Saturating, FixedI64, FixedPointNumber}; fn main() { loop { @@ -38,7 +38,8 @@ fn main() { // Check `from_rational` and division are consistent. if y != 0 { - let f1 = FixedI64::saturating_from_integer(x) / FixedI64::saturating_from_integer(y); + let f1 = + FixedI64::saturating_from_integer(x) / FixedI64::saturating_from_integer(y); let f2 = FixedI64::saturating_from_rational(x, y); assert_eq!(f1.into_inner(), f2.into_inner()); } @@ -75,7 +76,8 @@ fn main() { let a = FixedI64::saturating_from_rational(2, 5); let b = a.saturating_mul_acc_int(x); let xx = FixedI64::saturating_from_integer(x); - let d = a.saturating_mul(xx).saturating_add(xx).into_inner() as i128 / FixedI64::accuracy() as i128; + let d = a.saturating_mul(xx).saturating_add(xx).into_inner() as i128 / + FixedI64::accuracy() as i128; assert_eq!(b, d); }); } diff --git a/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs b/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs index 5d06df3f1f8a2..3089d4b092183 100644 --- a/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs +++ b/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +16,8 @@ // limitations under the License. //! # Running -//! Running this fuzzer can be done with `cargo hfuzz run multiply_by_rational`. `honggfuzz` CLI options can -//! be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! Running this fuzzer can be done with `cargo hfuzz run multiply_by_rational`. `honggfuzz` CLI +//! options can be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. //! //! # Debugging a panic //! Once a panic is found, it can be debugged with @@ -60,7 +60,7 @@ fn main() { fn mul_div(a: u128, b: u128, c: u128) -> u128 { use primitive_types::U256; if a.is_zero() { - return Zero::zero(); + return Zero::zero() } let c = c.max(1); @@ -70,7 +70,7 @@ fn mul_div(a: u128, b: u128, c: u128) -> u128 { let ce: U256 = c.into(); let r = ae * be / ce; - if r > u128::max_value().into() { + if r > u128::MAX.into() { a } else { r.as_u128() diff --git a/primitives/arithmetic/fuzzer/src/normalize.rs b/primitives/arithmetic/fuzzer/src/normalize.rs index 3c1759d568523..2662565106e66 100644 --- a/primitives/arithmetic/fuzzer/src/normalize.rs +++ b/primitives/arithmetic/fuzzer/src/normalize.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - //! # Running //! Running this fuzzer can be done with `cargo hfuzz run normalize`. `honggfuzz` CLI options can //! be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. @@ -37,7 +36,9 @@ fn main() { loop { fuzz!(|data: (Vec, Ty)| { let (data, norm) = data; - if data.len() == 0 { return; } + if data.len() == 0 { + return + } let pre_sum: u128 = data.iter().map(|x| *x as u128).sum(); let normalized = data.normalize(norm); @@ -50,13 +51,7 @@ fn main() { let sum: u128 = normalized.iter().map(|x| *x as u128).sum(); // if this function returns Ok(), then it will ALWAYS be accurate. - assert_eq!( - sum, - norm as u128, - "sums don't match {:?}, {}", - normalized, - norm, - ); + assert_eq!(sum, norm as u128, "sums don't match {:?}, {}", normalized, norm); } else { panic!("Should have returned Ok for input = {:?}, target = {:?}", data, norm); } diff --git a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs index 8ddbd0c6d59d9..7b90faa94069f 100644 --- a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs +++ b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,100 +16,94 @@ // limitations under the License. //! # Running -//! Running this fuzzer can be done with `cargo hfuzz run per_thing_rational`. `honggfuzz` CLI options can -//! be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! Running this fuzzer can be done with `cargo hfuzz run per_thing_rational`. `honggfuzz` CLI +//! options can be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. //! //! # Debugging a panic //! Once a panic is found, it can be debugged with //! `cargo hfuzz run-debug per_thing_rational hfuzz_workspace/per_thing_rational/*.fuzz`. use honggfuzz::fuzz; -use sp_arithmetic::{ - PerThing, PerU16, Percent, Perbill, Perquintill, traits::SaturatedConversion, -}; +use sp_arithmetic::{traits::SaturatedConversion, PerThing, PerU16, Perbill, Percent, Perquintill}; fn main() { loop { - fuzz!(| - data: ((u16, u16), (u32, u32), (u64, u64)) - | { - + fuzz!(|data: ((u16, u16), (u32, u32), (u64, u64))| { let (u16_pair, u32_pair, u64_pair) = data; // peru16 let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); - let ratio = PerU16::from_rational_approximation(smaller, bigger); + let ratio = PerU16::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + PerU16::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); - let ratio = PerU16::from_rational_approximation(smaller, bigger); + let ratio = PerU16::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + PerU16::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = PerU16::from_rational_approximation(smaller, bigger); + let ratio = PerU16::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + PerU16::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); // percent let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); - let ratio = Percent::from_rational_approximation(smaller, bigger); + let ratio = Percent::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + Percent::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); - let ratio = Percent::from_rational_approximation(smaller, bigger); + let ratio = Percent::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + Percent::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = Percent::from_rational_approximation(smaller, bigger); + let ratio = Percent::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + Percent::from_float(smaller as f64 / bigger.max(1) as f64), 1, ); // perbill let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); - let ratio = Perbill::from_rational_approximation(smaller, bigger); + let ratio = Perbill::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), + Perbill::from_float(smaller as f64 / bigger.max(1) as f64), 100, ); let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = Perbill::from_rational_approximation(smaller, bigger); + let ratio = Perbill::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), + Perbill::from_float(smaller as f64 / bigger.max(1) as f64), 100, ); // perquintillion let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = Perquintill::from_rational_approximation(smaller, bigger); + let ratio = Perquintill::from_rational(smaller, bigger); assert_per_thing_equal_error( ratio, - Perquintill::from_fraction(smaller as f64 / bigger.max(1) as f64), + Perquintill::from_float(smaller as f64 / bigger.max(1) as f64), 1000, ); - }) } } diff --git a/primitives/arithmetic/src/biguint.rs b/primitives/arithmetic/src/biguint.rs index 03f2bb1e55f6f..17ed323dc0ce4 100644 --- a/primitives/arithmetic/src/biguint.rs +++ b/primitives/arithmetic/src/biguint.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,9 @@ //! Infinite precision unsigned integer for substrate runtime. -use num_traits::{Zero, One}; -use sp_std::{cmp::Ordering, ops, prelude::*, vec, cell::RefCell, convert::TryFrom}; +use codec::{Decode, Encode}; +use num_traits::{One, Zero}; +use sp_std::{cell::RefCell, cmp::Ordering, convert::TryFrom, ops, prelude::*, vec}; // A sensible value for this would be half of the dword size of the host machine. Since the // runtime is compiled to 32bit webassembly, using 32 and 64 for single and double respectively @@ -33,6 +34,10 @@ const SHIFT: usize = 32; /// short form of _Base_. Analogous to the value 10 in base-10 decimal numbers. const B: Double = Single::max_value() as Double + 1; +static_assertions::const_assert!( + sp_std::mem::size_of::() - sp_std::mem::size_of::() == SHIFT / 8 +); + /// Splits a [`Double`] limb number into a tuple of two [`Single`] limb numbers. pub fn split(a: Double) -> (Single, Single) { let al = a as Single; @@ -74,7 +79,7 @@ fn div_single(a: Double, b: Single) -> (Double, Single) { } /// Simple wrapper around an infinitely large integer, represented as limbs of [`Single`]. -#[derive(Clone, Default)] +#[derive(Encode, Decode, Clone, Default)] pub struct BigUint { /// digits (limbs) of this number (sorted as msb -> lsb). pub(crate) digits: Vec, @@ -100,7 +105,9 @@ impl BigUint { } /// Number of limbs. - pub fn len(&self) -> usize { self.digits.len() } + pub fn len(&self) -> usize { + self.digits.len() + } /// A naive getter for limb at `index`. Note that the order is lsb -> msb. /// @@ -151,7 +158,9 @@ impl BigUint { // by definition, a big-int number should never have leading zero limbs. This function // has the ability to cause this. There is nothing to do if the number already has 1 // limb only. call it a day and return. - if self.len().is_zero() { return; } + if self.len().is_zero() { + return + } let index = self.digits.iter().position(|&elem| elem != 0).unwrap_or(self.len() - 1); if index > 0 { @@ -163,7 +172,9 @@ impl BigUint { /// is already bigger than `size` limbs. pub fn lpad(&mut self, size: usize) { let n = self.len(); - if n >= size { return; } + if n >= size { + return + } let pad = size - n; let mut new_digits = (0..pad).map(|_| 0).collect::>(); new_digits.extend(self.digits.iter()); @@ -187,6 +198,7 @@ impl BigUint { let u = Double::from(self.checked_get(j).unwrap_or(0)); let v = Double::from(other.checked_get(j).unwrap_or(0)); let s = u + v + k; + // proof: any number % B will fit into `Single`. w.set(j, (s % B) as Single); k = s / B; } @@ -209,28 +221,24 @@ impl BigUint { let s = { let u = Double::from(self.checked_get(j).unwrap_or(0)); let v = Double::from(other.checked_get(j).unwrap_or(0)); - let mut needs_borrow = false; - let mut t = 0; - if let Some(v) = u.checked_sub(v) { - if let Some(v2) = v.checked_sub(k) { - t = v2 % B; - k = 0; - } else { - needs_borrow = true; - } + if let Some(v2) = u.checked_sub(v).and_then(|v1| v1.checked_sub(k)) { + // no borrow is needed. u - v - k can be computed as-is + let t = v2; + k = 0; + + t } else { - needs_borrow = true; - } - if needs_borrow { - t = u + B - v - k; + // borrow is needed. Add a `B` to u, before subtracting. + // PROOF: addition: `u + B < 2*B`, thus can fit in double. + // PROOF: subtraction: if `u - v - k < 0`, then `u + B - v - k < B`. + // NOTE: the order of operations is critical to ensure underflow won't happen. + let t = u + B - v - k; k = 1; + + t } - t }; - // PROOF: t either comes from `v2 % B`, or from `u + B - v - k`. The former is - // trivial. The latter will not overflow this branch will only happen if the sum of - // `u - v - k` part has been negative, hence `u + B - v - k < b`. w.set(j, s as Single); } @@ -258,16 +266,15 @@ impl BigUint { if self.get(j) == 0 { // Note: `with_capacity` allocates with 0. Explicitly set j + m to zero if // otherwise. - continue; + continue } let mut k = 0; for i in 0..m { // PROOF: (B−1) × (B−1) + (B−1) + (B−1) = B^2 −1 < B^2. addition is safe. - let t = - mul_single(self.get(j), other.get(i)) - + Double::from(w.get(i + j)) - + Double::from(k); + let t = mul_single(self.get(j), other.get(i)) + + Double::from(w.get(i + j)) + + Double::from(k); w.set(i + j, (t % B) as Single); // PROOF: (B^2 - 1) / B < B. conversion is safe. k = (t / B) as Single; @@ -287,9 +294,9 @@ impl BigUint { let mut out = Self::with_capacity(n); let mut r: Single = 0; // PROOF: (B-1) * B + (B-1) still fits in double - let with_r = |x: Double, r: Single| { Double::from(r) * B + x }; + let with_r = |x: Single, r: Single| Double::from(r) * B + Double::from(x); for d in (0..n).rev() { - let (q, rr) = div_single(with_r(self.get(d).into(), r), other) ; + let (q, rr) = div_single(with_r(self.get(d), r), other); out.set(d, q as Single); r = rr; } @@ -310,11 +317,7 @@ impl BigUint { /// /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. pub fn div(self, other: &Self, rem: bool) -> Option<(Self, Self)> { - if other.len() <= 1 - || other.msb() == 0 - || self.msb() == 0 - || self.len() <= other.len() - { + if other.len() <= 1 || other.msb() == 0 || self.msb() == 0 || self.len() <= other.len() { return None } let n = other.len(); @@ -326,7 +329,7 @@ impl BigUint { // PROOF: 0 <= normalizer_bits < SHIFT 0 <= normalizer < B. all conversions are // safe. let normalizer_bits = other.msb().leading_zeros() as Single; - let normalizer = (2 as Single).pow(normalizer_bits as u32) as Single; + let normalizer = 2_u32.pow(normalizer_bits as u32) as Single; // step D1. let mut self_norm = self.mul(&Self::from(normalizer)); @@ -341,11 +344,9 @@ impl BigUint { // step D3.0 Find an estimate of q[j], named qhat. let (qhat, rhat) = { // PROOF: this always fits into `Double`. In the context of Single = u8, and - // Double = u16, think of 255 * 256 + 255 which is just u16::max_value(). + // Double = u16, think of 255 * 256 + 255 which is just u16::MAX. let dividend = - Double::from(self_norm.get(j + n)) - * B - + Double::from(self_norm.get(j + n - 1)); + Double::from(self_norm.get(j + n)) * B + Double::from(self_norm.get(j + n - 1)); let divisor = other_norm.get(n - 1); div_single(dividend, divisor) }; @@ -376,23 +377,30 @@ impl BigUint { test(); while (*rhat.borrow() as Double) < B { - if !test() { break; } + if !test() { + break + } } let qhat = qhat.into_inner(); // we don't need rhat anymore. just let it go out of scope when it does. // step D4 - let lhs = Self { digits: (j..=j+n).rev().map(|d| self_norm.get(d)).collect() }; + let lhs = Self { digits: (j..=j + n).rev().map(|d| self_norm.get(d)).collect() }; let rhs = other_norm.clone().mul(&Self::from(qhat)); let maybe_sub = lhs.sub(&rhs); let mut negative = false; let sub = match maybe_sub { Ok(t) => t, - Err(t) => { negative = true; t } + Err(t) => { + negative = true; + t + }, }; - (j..=j+n).for_each(|d| { self_norm.set(d, sub.get(d - j)); }); + (j..=j + n).for_each(|d| { + self_norm.set(d, sub.get(d - j)); + }); // step D5 // PROOF: the `test()` specifically decreases qhat until it is below `B`. conversion @@ -402,9 +410,11 @@ impl BigUint { // step D6: add back if negative happened. if negative { q.set(j, q.get(j) - 1); - let u = Self { digits: (j..=j+n).rev().map(|d| self_norm.get(d)).collect() }; + let u = Self { digits: (j..=j + n).rev().map(|d| self_norm.get(d)).collect() }; let r = other_norm.clone().add(&u); - (j..=j+n).rev().for_each(|d| { self_norm.set(d, r.get(d - j)); }) + (j..=j + n).rev().for_each(|d| { + self_norm.set(d, r.get(d - j)); + }) } } @@ -414,9 +424,8 @@ impl BigUint { if normalizer_bits > 0 { let s = SHIFT as u32; let nb = normalizer_bits; - for d in 0..n-1 { - let v = self_norm.get(d) >> nb - | self_norm.get(d + 1).overflowing_shl(s - nb).0; + for d in 0..n - 1 { + let v = self_norm.get(d) >> nb | self_norm.get(d + 1).overflowing_shl(s - nb).0; r.set(d, v); } r.set(n - 1, self_norm.get(n - 1) >> normalizer_bits); @@ -444,7 +453,6 @@ impl sp_std::fmt::Debug for BigUint { fn fmt(&self, _: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { Ok(()) } - } impl PartialEq for BigUint { @@ -474,7 +482,7 @@ impl Ord for BigUint { Ordering::Equal => lhs.cmp(rhs), _ => len_cmp, } - } + }, } } } @@ -585,9 +593,14 @@ pub mod tests { let a = SHIFT / 2; let b = SHIFT * 3 / 2; let num: Double = 1 << a | 1 << b; - // example when `Single = u8` - // assert_eq!(num, 0b_0001_0000_0001_0000) + assert_eq!(num, 0x_0001_0000_0001_0000); assert_eq!(split(num), (1 << a, 1 << a)); + + let a = SHIFT / 2 + 4; + let b = SHIFT / 2 - 4; + let num: Double = 1 << (SHIFT + a) | 1 << b; + assert_eq!(num, 0x_0010_0000_0000_1000); + assert_eq!(split(num), (1 << a, 1 << b)); } #[test] @@ -626,18 +639,9 @@ pub mod tests { #[test] fn equality_works() { - assert_eq!( - BigUint { digits: vec![1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, - true, - ); - assert_eq!( - BigUint { digits: vec![3, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, - false, - ); - assert_eq!( - BigUint { digits: vec![0, 1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, - true, - ); + assert_eq!(BigUint { digits: vec![1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, true); + assert_eq!(BigUint { digits: vec![3, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, false); + assert_eq!(BigUint { digits: vec![0, 1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, true); } #[test] @@ -662,15 +666,9 @@ pub mod tests { fn can_try_build_numbers_from_types() { use sp_std::convert::TryFrom; assert_eq!(u64::try_from(with_limbs(1)).unwrap(), 1); - assert_eq!(u64::try_from(with_limbs(2)).unwrap(), u32::max_value() as u64 + 2); - assert_eq!( - u64::try_from(with_limbs(3)).unwrap_err(), - "cannot fit a number into u64", - ); - assert_eq!( - u128::try_from(with_limbs(3)).unwrap(), - u32::max_value() as u128 + u64::max_value() as u128 + 3 - ); + assert_eq!(u64::try_from(with_limbs(2)).unwrap(), u32::MAX as u64 + 2); + assert_eq!(u64::try_from(with_limbs(3)).unwrap_err(), "cannot fit a number into u64"); + assert_eq!(u128::try_from(with_limbs(3)).unwrap(), u32::MAX as u128 + u64::MAX as u128 + 3); } #[test] @@ -721,12 +719,14 @@ pub mod tests { let c = BigUint { digits: vec![1, 1, 2] }; let d = BigUint { digits: vec![0, 2] }; let e = BigUint { digits: vec![0, 1, 1, 2] }; + let f = BigUint { digits: vec![7, 8] }; assert!(a.clone().div(&b, true).is_none()); assert!(c.clone().div(&a, true).is_none()); assert!(c.clone().div(&d, true).is_none()); assert!(e.clone().div(&a, true).is_none()); + assert!(f.clone().div(&b, true).is_none()); assert!(c.clone().div(&b, true).is_some()); } @@ -734,6 +734,7 @@ pub mod tests { fn div_unit_works() { let a = BigUint { digits: vec![100] }; let b = BigUint { digits: vec![1, 100] }; + let c = BigUint { digits: vec![14, 28, 100] }; assert_eq!(a.clone().div_unit(1), a); assert_eq!(a.clone().div_unit(0), a); @@ -745,5 +746,9 @@ pub mod tests { assert_eq!(b.clone().div_unit(2), BigUint::from(((B + 100) / 2) as Single)); assert_eq!(b.clone().div_unit(7), BigUint::from(((B + 100) / 7) as Single)); + assert_eq!(c.clone().div_unit(1), c); + assert_eq!(c.clone().div_unit(0), c); + assert_eq!(c.clone().div_unit(2), BigUint { digits: vec![7, 14, 50] }); + assert_eq!(c.clone().div_unit(7), BigUint { digits: vec![2, 4, 14] }); } } diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index 970a24156027d..7a81f222c4926 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -1,37 +1,54 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Decimal Fixed Point implementations for Substrate runtime. -use sp_std::{ops::{self, Add, Sub, Mul, Div}, fmt::Debug, prelude::*, convert::{TryInto, TryFrom}}; -use codec::{Encode, Decode, CompactAs}; use crate::{ - helpers_128bit::multiply_by_rational, PerThing, + helpers_128bit::multiply_by_rational, traits::{ - SaturatedConversion, CheckedSub, CheckedAdd, CheckedMul, CheckedDiv, CheckedNeg, - Bounded, Saturating, UniqueSaturatedInto, Zero, One + Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedSub, One, + SaturatedConversion, Saturating, UniqueSaturatedInto, Zero, }, + PerThing, +}; +use codec::{CompactAs, Decode, Encode}; +use sp_std::{ + convert::{TryFrom, TryInto}, + fmt::Debug, + ops::{self, Add, Div, Mul, Sub}, + prelude::*, }; #[cfg(feature = "std")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; /// Integer types that can be used to interact with `FixedPointNumber` implementations. -pub trait FixedPointOperand: Copy + Clone + Bounded + Zero + Saturating - + PartialOrd + UniqueSaturatedInto + TryFrom + CheckedNeg {} +pub trait FixedPointOperand: + Copy + + Clone + + Bounded + + Zero + + Saturating + + PartialOrd + + UniqueSaturatedInto + + TryFrom + + CheckedNeg +{ +} impl FixedPointOperand for i128 {} impl FixedPointOperand for u128 {} @@ -52,11 +69,26 @@ impl FixedPointOperand for u8 {} /// to `Self::Inner::max_value() / Self::DIV`. /// This is also referred to as the _accuracy_ of the type in the documentation. pub trait FixedPointNumber: - Sized + Copy + Default + Debug - + Saturating + Bounded - + Eq + PartialEq + Ord + PartialOrd - + CheckedSub + CheckedAdd + CheckedMul + CheckedDiv - + Add + Sub + Div + Mul + Sized + + Copy + + Default + + Debug + + Saturating + + Bounded + + Eq + + PartialEq + + Ord + + PartialOrd + + CheckedSub + + CheckedAdd + + CheckedMul + + CheckedDiv + + Add + + Sub + + Div + + Mul + + Zero + + One { /// The underlying data type used for this fixed point number. type Inner: Debug + One + CheckedMul + CheckedDiv + FixedPointOperand; @@ -91,7 +123,7 @@ pub trait FixedPointNumber: /// /// Returns `None` if `int` exceeds accuracy. fn checked_from_integer(int: Self::Inner) -> Option { - int.checked_mul(&Self::DIV).map(|inner| Self::from_inner(inner)) + int.checked_mul(&Self::DIV).map(Self::from_inner) } /// Creates `self` from a rational number. Equal to `n / d`. @@ -107,7 +139,10 @@ pub trait FixedPointNumber: /// Creates `self` from a rational number. Equal to `n / d`. /// /// Returns `None` if `d == 0` or `n / d` exceeds accuracy. - fn checked_from_rational(n: N, d: D) -> Option { + fn checked_from_rational( + n: N, + d: D, + ) -> Option { if d == D::zero() { return None } @@ -116,9 +151,10 @@ pub trait FixedPointNumber: let d: I129 = d.into(); let negative = n.negative != d.negative; - multiply_by_rational(n.value, Self::DIV.unique_saturated_into(), d.value).ok() + multiply_by_rational(n.value, Self::DIV.unique_saturated_into(), d.value) + .ok() .and_then(|value| from_i129(I129 { value, negative })) - .map(|inner| Self::from_inner(inner)) + .map(Self::from_inner) } /// Checked multiplication for integer type `N`. Equal to `self * n`. @@ -129,7 +165,8 @@ pub trait FixedPointNumber: let rhs: I129 = n.into(); let negative = lhs.negative != rhs.negative; - multiply_by_rational(lhs.value, rhs.value, Self::DIV.unique_saturated_into()).ok() + multiply_by_rational(lhs.value, rhs.value, Self::DIV.unique_saturated_into()) + .ok() .and_then(|value| from_i129(I129 { value, negative })) } @@ -148,7 +185,8 @@ pub trait FixedPointNumber: let rhs: I129 = d.into(); let negative = lhs.negative != rhs.negative; - lhs.value.checked_div(rhs.value) + lhs.value + .checked_div(rhs.value) .and_then(|n| n.checked_div(Self::DIV.unique_saturated_into())) .and_then(|value| from_i129(I129 { value, negative })) } @@ -183,7 +221,7 @@ pub trait FixedPointNumber: if inner >= Self::Inner::zero() { self } else { - Self::from_inner(inner.checked_neg().unwrap_or_else(|| Self::Inner::max_value())) + Self::from_inner(inner.checked_neg().unwrap_or_else(Self::Inner::max_value)) } } @@ -194,21 +232,6 @@ pub trait FixedPointNumber: Self::one().checked_div(&self) } - /// Returns zero. - fn zero() -> Self { - Self::from_inner(Self::Inner::zero()) - } - - /// Checks if the number is zero. - fn is_zero(&self) -> bool { - self.into_inner() == Self::Inner::zero() - } - - /// Returns one. - fn one() -> Self { - Self::from_inner(Self::DIV) - } - /// Checks if the number is one. fn is_one(&self) -> bool { self.into_inner() == Self::Inner::one() @@ -226,10 +249,11 @@ pub trait FixedPointNumber: /// Returns the integer part. fn trunc(self) -> Self { - self.into_inner().checked_div(&Self::DIV) + self.into_inner() + .checked_div(&Self::DIV) .expect("panics only if DIV is zero, DIV is not zero; qed") .checked_mul(&Self::DIV) - .map(|inner| Self::from_inner(inner)) + .map(Self::from_inner) .expect("can not overflow since fixed number is >= integer part") } @@ -253,12 +277,10 @@ pub trait FixedPointNumber: fn ceil(self) -> Self { if self.is_negative() { self.trunc() + } else if self.frac() == Self::zero() { + self } else { - if self.frac() == Self::zero() { - self - } else { - self.saturating_add(Self::one()).trunc() - } + self.saturating_add(Self::one()).trunc() } } @@ -280,12 +302,10 @@ pub trait FixedPointNumber: let n = self.frac().saturating_mul(Self::saturating_from_integer(10)); if n < Self::saturating_from_integer(5) { self.trunc() + } else if self.is_positive() { + self.saturating_add(Self::one()).trunc() } else { - if self.is_positive() { - self.saturating_add(Self::one()).trunc() - } else { - self.saturating_sub(Self::one()).trunc() - } + self.saturating_sub(Self::one()).trunc() } } } @@ -299,7 +319,8 @@ struct I129 { impl From for I129 { fn from(n: N) -> I129 { if n < N::zero() { - let value: u128 = n.checked_neg() + let value: u128 = n + .checked_neg() .map(|n| n.unique_saturated_into()) .unwrap_or_else(|| N::max_value().unique_saturated_into().saturating_add(1)); I129 { value, negative: true } @@ -340,9 +361,20 @@ macro_rules! implement_fixed { $title:expr $(,)? ) => { /// A fixed point number representation in the range. - /// #[doc = $title] - #[derive(Encode, Decode, CompactAs, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] + #[derive( + Encode, + Decode, + CompactAs, + Default, + Copy, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + scale_info::TypeInfo, + )] pub struct $name($inner_type); impl From<$inner_type> for $name { @@ -379,12 +411,12 @@ macro_rules! implement_fixed { } #[cfg(any(feature = "std", test))] - pub fn from_fraction(x: f64) -> Self { + pub fn from_float(x: f64) -> Self { Self((x * (::DIV as f64)) as $inner_type) } #[cfg(any(feature = "std", test))] - pub fn to_fraction(self) -> f64 { + pub fn to_float(self) -> f64 { self.0 as f64 / ::DIV as f64 } } @@ -404,7 +436,7 @@ macro_rules! implement_fixed { fn saturating_pow(self, exp: usize) -> Self { if exp == 0 { - return Self::saturating_from_integer(1); + return Self::saturating_from_integer(1) } let exp = exp as u32; @@ -489,7 +521,8 @@ macro_rules! implement_fixed { let rhs: I129 = other.0.into(); let negative = lhs.negative != rhs.negative; - multiply_by_rational(lhs.value, Self::DIV as u128, rhs.value).ok() + multiply_by_rational(lhs.value, Self::DIV as u128, rhs.value) + .ok() .and_then(|value| from_i129(I129 { value, negative })) .map(Self) } @@ -501,7 +534,8 @@ macro_rules! implement_fixed { let rhs: I129 = other.0.into(); let negative = lhs.negative != rhs.negative; - multiply_by_rational(lhs.value, rhs.value, Self::DIV as u128).ok() + multiply_by_rational(lhs.value, rhs.value, Self::DIV as u128) + .ok() .and_then(|value| from_i129(I129 { value, negative })) .map(Self) } @@ -517,6 +551,22 @@ macro_rules! implement_fixed { } } + impl Zero for $name { + fn zero() -> Self { + Self::from_inner(::Inner::zero()) + } + + fn is_zero(&self) -> bool { + self.into_inner() == ::Inner::zero() + } + } + + impl One for $name { + fn one() -> Self { + Self::from_inner(Self::DIV) + } + } + impl sp_std::fmt::Debug for $name { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { @@ -526,7 +576,11 @@ macro_rules! implement_fixed { format!("{}{}", signum_for_zero, int) }; let precision = (Self::accuracy() as f64).log10() as usize; - let fractional = format!("{:0>weight$}", ((self.0 % Self::accuracy()) as i128).abs(), weight=precision); + let fractional = format!( + "{:0>weight$}", + ((self.0 % Self::accuracy()) as i128).abs(), + weight = precision + ); write!(f, "{}({}.{})", stringify!($name), integral, fractional) } @@ -536,10 +590,13 @@ macro_rules! implement_fixed { } } - impl From

for $name { + impl From

for $name + where + P::Inner: FixedPointOperand, + { fn from(p: P) -> Self { - let accuracy = P::ACCURACY.saturated_into(); - let value = p.deconstruct().saturated_into(); + let accuracy = P::ACCURACY; + let value = p.deconstruct(); $name::saturating_from_rational(value, accuracy) } } @@ -556,8 +613,8 @@ macro_rules! implement_fixed { type Err = &'static str; fn from_str(s: &str) -> Result { - let inner: ::Inner = s.parse() - .map_err(|_| "invalid string input for fixed point number")?; + let inner: ::Inner = + s.parse().map_err(|_| "invalid string input for fixed point number")?; Ok(Self::from_inner(inner)) } } @@ -584,7 +641,7 @@ macro_rules! implement_fixed { { use sp_std::str::FromStr; let s = String::deserialize(deserializer)?; - $name::from_str(&s).map_err(|err_str| de::Error::custom(err_str)) + $name::from_str(&s).map_err(de::Error::custom) } } @@ -612,53 +669,35 @@ macro_rules! implement_fixed { #[test] fn from_i129_works() { - let a = I129 { - value: 1, - negative: true, - }; + let a = I129 { value: 1, negative: true }; // Can't convert negative number to unsigned. assert_eq!(from_i129::(a), None); - let a = I129 { - value: u128::max_value() - 1, - negative: false, - }; + let a = I129 { value: u128::MAX - 1, negative: false }; // Max - 1 value fits. - assert_eq!(from_i129::(a), Some(u128::max_value() - 1)); + assert_eq!(from_i129::(a), Some(u128::MAX - 1)); - let a = I129 { - value: u128::max_value(), - negative: false, - }; + let a = I129 { value: u128::MAX, negative: false }; // Max value fits. - assert_eq!(from_i129::(a), Some(u128::max_value())); + assert_eq!(from_i129::(a), Some(u128::MAX)); - let a = I129 { - value: i128::max_value() as u128 + 1, - negative: true, - }; + let a = I129 { value: i128::MAX as u128 + 1, negative: true }; // Min value fits. - assert_eq!(from_i129::(a), Some(i128::min_value())); + assert_eq!(from_i129::(a), Some(i128::MIN)); - let a = I129 { - value: i128::max_value() as u128 + 1, - negative: false, - }; + let a = I129 { value: i128::MAX as u128 + 1, negative: false }; // Max + 1 does not fit. assert_eq!(from_i129::(a), None); - let a = I129 { - value: i128::max_value() as u128, - negative: false, - }; + let a = I129 { value: i128::MAX as u128, negative: false }; // Max value fits. - assert_eq!(from_i129::(a), Some(i128::max_value())); + assert_eq!(from_i129::(a), Some(i128::MAX)); } #[test] @@ -667,25 +706,25 @@ macro_rules! implement_fixed { let b = 1i32; // Pos + Pos => Max. - assert_eq!(to_bound::<_, _, i32>(a, b), i32::max_value()); + assert_eq!(to_bound::<_, _, i32>(a, b), i32::MAX); let a = -1i32; let b = -1i32; // Neg + Neg => Max. - assert_eq!(to_bound::<_, _, i32>(a, b), i32::max_value()); + assert_eq!(to_bound::<_, _, i32>(a, b), i32::MAX); let a = 1i32; let b = -1i32; // Pos + Neg => Min. - assert_eq!(to_bound::<_, _, i32>(a, b), i32::min_value()); + assert_eq!(to_bound::<_, _, i32>(a, b), i32::MIN); let a = -1i32; let b = 1i32; // Neg + Pos => Min. - assert_eq!(to_bound::<_, _, i32>(a, b), i32::min_value()); + assert_eq!(to_bound::<_, _, i32>(a, b), i32::MIN); let a = 1i32; let b = -1i32; @@ -726,7 +765,6 @@ macro_rules! implement_fixed { // Min. assert_eq!($name::max_value(), b); - } } @@ -851,8 +889,7 @@ macro_rules! implement_fixed { let accuracy = $name::accuracy(); // Case where integer fits. - let a = $name::checked_from_integer(42) - .expect("42 * accuracy <= inner_max; qed"); + let a = $name::checked_from_integer(42).expect("42 * accuracy <= inner_max; qed"); assert_eq!(a.into_inner(), 42 * accuracy); // Max integer that fit. @@ -930,7 +967,7 @@ macro_rules! implement_fixed { if $name::SIGNED { // Negative case: -2.5 let a = $name::saturating_from_rational(-5, 2); - assert_eq!(a.into_inner(), 0 - 25 * accuracy / 10); + assert_eq!(a.into_inner(), 0 - 25 * accuracy / 10); // Other negative case: -2.5 let a = $name::saturating_from_rational(5, -2); @@ -1050,7 +1087,10 @@ macro_rules! implement_fixed { if $name::SIGNED { // Min - 1 => Underflow => None. - let a = $name::checked_from_rational(inner_max as u128 + 2, 0.saturating_sub(accuracy)); + let a = $name::checked_from_rational( + inner_max as u128 + 2, + 0.saturating_sub(accuracy), + ); assert_eq!(a, None); let a = $name::checked_from_rational(inner_max, 0 - 3 * accuracy).unwrap(); @@ -1086,74 +1126,74 @@ macro_rules! implement_fixed { fn checked_mul_int_works() { let a = $name::saturating_from_integer(2); // Max - 1. - assert_eq!(a.checked_mul_int((i128::max_value() - 1) / 2), Some(i128::max_value() - 1)); + assert_eq!(a.checked_mul_int((i128::MAX - 1) / 2), Some(i128::MAX - 1)); // Max. - assert_eq!(a.checked_mul_int(i128::max_value() / 2), Some(i128::max_value() - 1)); + assert_eq!(a.checked_mul_int(i128::MAX / 2), Some(i128::MAX - 1)); // Max + 1 => None. - assert_eq!(a.checked_mul_int(i128::max_value() / 2 + 1), None); + assert_eq!(a.checked_mul_int(i128::MAX / 2 + 1), None); if $name::SIGNED { // Min - 1. - assert_eq!(a.checked_mul_int((i128::min_value() + 1) / 2), Some(i128::min_value() + 2)); + assert_eq!(a.checked_mul_int((i128::MIN + 1) / 2), Some(i128::MIN + 2)); // Min. - assert_eq!(a.checked_mul_int(i128::min_value() / 2), Some(i128::min_value())); + assert_eq!(a.checked_mul_int(i128::MIN / 2), Some(i128::MIN)); // Min + 1 => None. - assert_eq!(a.checked_mul_int(i128::min_value() / 2 - 1), None); + assert_eq!(a.checked_mul_int(i128::MIN / 2 - 1), None); let b = $name::saturating_from_rational(1, -2); assert_eq!(b.checked_mul_int(42i128), Some(-21)); - assert_eq!(b.checked_mul_int(u128::max_value()), None); - assert_eq!(b.checked_mul_int(i128::max_value()), Some(i128::max_value() / -2)); - assert_eq!(b.checked_mul_int(i128::min_value()), Some(i128::min_value() / -2)); + assert_eq!(b.checked_mul_int(u128::MAX), None); + assert_eq!(b.checked_mul_int(i128::MAX), Some(i128::MAX / -2)); + assert_eq!(b.checked_mul_int(i128::MIN), Some(i128::MIN / -2)); } let a = $name::saturating_from_rational(1, 2); assert_eq!(a.checked_mul_int(42i128), Some(21)); - assert_eq!(a.checked_mul_int(i128::max_value()), Some(i128::max_value() / 2)); - assert_eq!(a.checked_mul_int(i128::min_value()), Some(i128::min_value() / 2)); + assert_eq!(a.checked_mul_int(i128::MAX), Some(i128::MAX / 2)); + assert_eq!(a.checked_mul_int(i128::MIN), Some(i128::MIN / 2)); let c = $name::saturating_from_integer(255); assert_eq!(c.checked_mul_int(2i8), None); assert_eq!(c.checked_mul_int(2i128), Some(510)); - assert_eq!(c.checked_mul_int(i128::max_value()), None); - assert_eq!(c.checked_mul_int(i128::min_value()), None); + assert_eq!(c.checked_mul_int(i128::MAX), None); + assert_eq!(c.checked_mul_int(i128::MIN), None); } #[test] fn saturating_mul_int_works() { let a = $name::saturating_from_integer(2); // Max - 1. - assert_eq!(a.saturating_mul_int((i128::max_value() - 1) / 2), i128::max_value() - 1); + assert_eq!(a.saturating_mul_int((i128::MAX - 1) / 2), i128::MAX - 1); // Max. - assert_eq!(a.saturating_mul_int(i128::max_value() / 2), i128::max_value() - 1); + assert_eq!(a.saturating_mul_int(i128::MAX / 2), i128::MAX - 1); // Max + 1 => saturates to max. - assert_eq!(a.saturating_mul_int(i128::max_value() / 2 + 1), i128::max_value()); + assert_eq!(a.saturating_mul_int(i128::MAX / 2 + 1), i128::MAX); // Min - 1. - assert_eq!(a.saturating_mul_int((i128::min_value() + 1) / 2), i128::min_value() + 2); + assert_eq!(a.saturating_mul_int((i128::MIN + 1) / 2), i128::MIN + 2); // Min. - assert_eq!(a.saturating_mul_int(i128::min_value() / 2), i128::min_value()); + assert_eq!(a.saturating_mul_int(i128::MIN / 2), i128::MIN); // Min + 1 => saturates to min. - assert_eq!(a.saturating_mul_int(i128::min_value() / 2 - 1), i128::min_value()); + assert_eq!(a.saturating_mul_int(i128::MIN / 2 - 1), i128::MIN); if $name::SIGNED { let b = $name::saturating_from_rational(1, -2); assert_eq!(b.saturating_mul_int(42i32), -21); - assert_eq!(b.saturating_mul_int(i128::max_value()), i128::max_value() / -2); - assert_eq!(b.saturating_mul_int(i128::min_value()), i128::min_value() / -2); - assert_eq!(b.saturating_mul_int(u128::max_value()), u128::min_value()); + assert_eq!(b.saturating_mul_int(i128::MAX), i128::MAX / -2); + assert_eq!(b.saturating_mul_int(i128::MIN), i128::MIN / -2); + assert_eq!(b.saturating_mul_int(u128::MAX), u128::MIN); } let a = $name::saturating_from_rational(1, 2); assert_eq!(a.saturating_mul_int(42i32), 21); - assert_eq!(a.saturating_mul_int(i128::max_value()), i128::max_value() / 2); - assert_eq!(a.saturating_mul_int(i128::min_value()), i128::min_value() / 2); + assert_eq!(a.saturating_mul_int(i128::MAX), i128::MAX / 2); + assert_eq!(a.saturating_mul_int(i128::MIN), i128::MIN / 2); let c = $name::saturating_from_integer(255); - assert_eq!(c.saturating_mul_int(2i8), i8::max_value()); - assert_eq!(c.saturating_mul_int(-2i8), i8::min_value()); - assert_eq!(c.saturating_mul_int(i128::max_value()), i128::max_value()); - assert_eq!(c.saturating_mul_int(i128::min_value()), i128::min_value()); + assert_eq!(c.saturating_mul_int(2i8), i8::MAX); + assert_eq!(c.saturating_mul_int(-2i8), i8::MIN); + assert_eq!(c.saturating_mul_int(i128::MAX), i128::MAX); + assert_eq!(c.saturating_mul_int(i128::MIN), i128::MIN); } #[test] @@ -1165,15 +1205,15 @@ macro_rules! implement_fixed { // Max - 1. let b = $name::from_inner(inner_max - 1); - assert_eq!(a.checked_mul(&(b/2.into())), Some(b)); + assert_eq!(a.checked_mul(&(b / 2.into())), Some(b)); // Max. let c = $name::from_inner(inner_max); - assert_eq!(a.checked_mul(&(c/2.into())), Some(b)); + assert_eq!(a.checked_mul(&(c / 2.into())), Some(b)); // Max + 1 => None. let e = $name::from_inner(1); - assert_eq!(a.checked_mul(&(c/2.into()+e)), None); + assert_eq!(a.checked_mul(&(c / 2.into() + e)), None); if $name::SIGNED { // Min + 1. @@ -1194,8 +1234,14 @@ macro_rules! implement_fixed { let b = $name::saturating_from_rational(1, -2); assert_eq!(b.checked_mul(&42.into()), Some(0.saturating_sub(21).into())); - assert_eq!(b.checked_mul(&$name::max_value()), $name::max_value().checked_div(&0.saturating_sub(2).into())); - assert_eq!(b.checked_mul(&$name::min_value()), $name::min_value().checked_div(&0.saturating_sub(2).into())); + assert_eq!( + b.checked_mul(&$name::max_value()), + $name::max_value().checked_div(&0.saturating_sub(2).into()) + ); + assert_eq!( + b.checked_mul(&$name::min_value()), + $name::min_value().checked_div(&0.saturating_sub(2).into()) + ); assert_eq!(c.checked_mul(&$name::min_value()), None); } @@ -1205,8 +1251,14 @@ macro_rules! implement_fixed { assert_eq!(a.checked_mul(&42.into()), Some(21.into())); assert_eq!(c.checked_mul(&2.into()), Some(510.into())); assert_eq!(c.checked_mul(&$name::max_value()), None); - assert_eq!(a.checked_mul(&$name::max_value()), $name::max_value().checked_div(&2.into())); - assert_eq!(a.checked_mul(&$name::min_value()), $name::min_value().checked_div(&2.into())); + assert_eq!( + a.checked_mul(&$name::max_value()), + $name::max_value().checked_div(&2.into()) + ); + assert_eq!( + a.checked_mul(&$name::min_value()), + $name::min_value().checked_div(&2.into()) + ); } #[test] @@ -1225,32 +1277,44 @@ macro_rules! implement_fixed { assert_eq!(e.checked_div_int(2.into()), Some(3)); assert_eq!(f.checked_div_int(2.into()), Some(2)); - assert_eq!(a.checked_div_int(i128::max_value()), Some(0)); + assert_eq!(a.checked_div_int(i128::MAX), Some(0)); assert_eq!(a.checked_div_int(2), Some(inner_max / (2 * accuracy))); assert_eq!(a.checked_div_int(inner_max / accuracy), Some(1)); assert_eq!(a.checked_div_int(1i8), None); if b < c { // Not executed by unsigned inners. - assert_eq!(a.checked_div_int(0.saturating_sub(2)), Some(0.saturating_sub(inner_max / (2 * accuracy)))); - assert_eq!(a.checked_div_int(0.saturating_sub(inner_max / accuracy)), Some(0.saturating_sub(1))); - assert_eq!(b.checked_div_int(i128::min_value()), Some(0)); + assert_eq!( + a.checked_div_int(0.saturating_sub(2)), + Some(0.saturating_sub(inner_max / (2 * accuracy))) + ); + assert_eq!( + a.checked_div_int(0.saturating_sub(inner_max / accuracy)), + Some(0.saturating_sub(1)) + ); + assert_eq!(b.checked_div_int(i128::MIN), Some(0)); assert_eq!(b.checked_div_int(inner_min / accuracy), Some(1)); assert_eq!(b.checked_div_int(1i8), None); - assert_eq!(b.checked_div_int(0.saturating_sub(2)), Some(0.saturating_sub(inner_min / (2 * accuracy)))); - assert_eq!(b.checked_div_int(0.saturating_sub(inner_min / accuracy)), Some(0.saturating_sub(1))); - assert_eq!(c.checked_div_int(i128::min_value()), Some(0)); - assert_eq!(d.checked_div_int(i32::min_value()), Some(0)); + assert_eq!( + b.checked_div_int(0.saturating_sub(2)), + Some(0.saturating_sub(inner_min / (2 * accuracy))) + ); + assert_eq!( + b.checked_div_int(0.saturating_sub(inner_min / accuracy)), + Some(0.saturating_sub(1)) + ); + assert_eq!(c.checked_div_int(i128::MIN), Some(0)); + assert_eq!(d.checked_div_int(i32::MIN), Some(0)); } assert_eq!(b.checked_div_int(2), Some(inner_min / (2 * accuracy))); assert_eq!(c.checked_div_int(1), Some(0)); - assert_eq!(c.checked_div_int(i128::max_value()), Some(0)); + assert_eq!(c.checked_div_int(i128::MAX), Some(0)); assert_eq!(c.checked_div_int(1i8), Some(0)); assert_eq!(d.checked_div_int(1), Some(1)); - assert_eq!(d.checked_div_int(i32::max_value()), Some(0)); + assert_eq!(d.checked_div_int(i32::MAX), Some(0)); assert_eq!(d.checked_div_int(1i8), Some(1)); assert_eq!(a.checked_div_int(0), None); @@ -1296,7 +1360,10 @@ macro_rules! implement_fixed { if $name::SIGNED { assert_eq!($name::from_inner(inner_min).saturating_abs(), $name::max_value()); - assert_eq!($name::saturating_from_rational(-1, 2).saturating_abs(), (1, 2).into()); + assert_eq!( + $name::saturating_from_rational(-1, 2).saturating_abs(), + (1, 2).into() + ); } } @@ -1305,47 +1372,88 @@ macro_rules! implement_fixed { assert_eq!($name::zero().saturating_mul_acc_int(42i8), 42i8); assert_eq!($name::one().saturating_mul_acc_int(42i8), 2 * 42i8); - assert_eq!($name::one().saturating_mul_acc_int(i128::max_value()), i128::max_value()); - assert_eq!($name::one().saturating_mul_acc_int(i128::min_value()), i128::min_value()); + assert_eq!($name::one().saturating_mul_acc_int(i128::MAX), i128::MAX); + assert_eq!($name::one().saturating_mul_acc_int(i128::MIN), i128::MIN); - assert_eq!($name::one().saturating_mul_acc_int(u128::max_value() / 2), u128::max_value() - 1); - assert_eq!($name::one().saturating_mul_acc_int(u128::min_value()), u128::min_value()); + assert_eq!($name::one().saturating_mul_acc_int(u128::MAX / 2), u128::MAX - 1); + assert_eq!($name::one().saturating_mul_acc_int(u128::MIN), u128::MIN); if $name::SIGNED { let a = $name::saturating_from_rational(-1, 2); assert_eq!(a.saturating_mul_acc_int(42i8), 21i8); assert_eq!(a.saturating_mul_acc_int(42u8), 21u8); - assert_eq!(a.saturating_mul_acc_int(u128::max_value() - 1), u128::max_value() / 2); + assert_eq!(a.saturating_mul_acc_int(u128::MAX - 1), u128::MAX / 2); } } #[test] fn saturating_pow_should_work() { - assert_eq!($name::saturating_from_integer(2).saturating_pow(0), $name::saturating_from_integer(1)); - assert_eq!($name::saturating_from_integer(2).saturating_pow(1), $name::saturating_from_integer(2)); - assert_eq!($name::saturating_from_integer(2).saturating_pow(2), $name::saturating_from_integer(4)); - assert_eq!($name::saturating_from_integer(2).saturating_pow(3), $name::saturating_from_integer(8)); - assert_eq!($name::saturating_from_integer(2).saturating_pow(50), - $name::saturating_from_integer(1125899906842624i64)); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(0), + $name::saturating_from_integer(1) + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(1), + $name::saturating_from_integer(2) + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(2), + $name::saturating_from_integer(4) + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(3), + $name::saturating_from_integer(8) + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(50), + $name::saturating_from_integer(1125899906842624i64) + ); assert_eq!($name::saturating_from_integer(1).saturating_pow(1000), (1).into()); - assert_eq!($name::saturating_from_integer(1).saturating_pow(usize::max_value()), (1).into()); + assert_eq!( + $name::saturating_from_integer(1).saturating_pow(usize::MAX), + (1).into() + ); if $name::SIGNED { // Saturating. - assert_eq!($name::saturating_from_integer(2).saturating_pow(68), $name::max_value()); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(68), + $name::max_value() + ); assert_eq!($name::saturating_from_integer(-1).saturating_pow(1000), (1).into()); - assert_eq!($name::saturating_from_integer(-1).saturating_pow(1001), 0.saturating_sub(1).into()); - assert_eq!($name::saturating_from_integer(-1).saturating_pow(usize::max_value()), 0.saturating_sub(1).into()); - assert_eq!($name::saturating_from_integer(-1).saturating_pow(usize::max_value() - 1), (1).into()); + assert_eq!( + $name::saturating_from_integer(-1).saturating_pow(1001), + 0.saturating_sub(1).into() + ); + assert_eq!( + $name::saturating_from_integer(-1).saturating_pow(usize::MAX), + 0.saturating_sub(1).into() + ); + assert_eq!( + $name::saturating_from_integer(-1).saturating_pow(usize::MAX - 1), + (1).into() + ); } - assert_eq!($name::saturating_from_integer(114209).saturating_pow(5), $name::max_value()); - - assert_eq!($name::saturating_from_integer(1).saturating_pow(usize::max_value()), (1).into()); - assert_eq!($name::saturating_from_integer(0).saturating_pow(usize::max_value()), (0).into()); - assert_eq!($name::saturating_from_integer(2).saturating_pow(usize::max_value()), $name::max_value()); + assert_eq!( + $name::saturating_from_integer(114209).saturating_pow(5), + $name::max_value() + ); + + assert_eq!( + $name::saturating_from_integer(1).saturating_pow(usize::MAX), + (1).into() + ); + assert_eq!( + $name::saturating_from_integer(0).saturating_pow(usize::MAX), + (0).into() + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(usize::MAX), + $name::max_value() + ); } #[test] @@ -1370,9 +1478,18 @@ macro_rules! implement_fixed { if b < c { // Not executed by unsigned inners. - assert_eq!(a.checked_div(&0.saturating_sub(2).into()), Some($name::from_inner(0.saturating_sub(inner_max / 2)))); - assert_eq!(a.checked_div(&-$name::max_value()), Some(0.saturating_sub(1).into())); - assert_eq!(b.checked_div(&0.saturating_sub(2).into()), Some($name::from_inner(0.saturating_sub(inner_min / 2)))); + assert_eq!( + a.checked_div(&0.saturating_sub(2).into()), + Some($name::from_inner(0.saturating_sub(inner_max / 2))) + ); + assert_eq!( + a.checked_div(&-$name::max_value()), + Some(0.saturating_sub(1).into()) + ); + assert_eq!( + b.checked_div(&0.saturating_sub(2).into()), + Some($name::from_inner(0.saturating_sub(inner_min / 2))) + ); assert_eq!(c.checked_div(&$name::max_value()), Some(0.into())); assert_eq!(b.checked_div(&b), Some($name::one())); } @@ -1429,14 +1546,10 @@ macro_rules! implement_fixed { assert_eq!(n, i + f); - let n = $name::saturating_from_rational(5, 2) - .frac() - .saturating_mul(10.into()); + let n = $name::saturating_from_rational(5, 2).frac().saturating_mul(10.into()); assert_eq!(n, 5.into()); - let n = $name::saturating_from_rational(1, 2) - .frac() - .saturating_mul(10.into()); + let n = $name::saturating_from_rational(1, 2).frac().saturating_mul(10.into()); assert_eq!(n, 5.into()); if $name::SIGNED { @@ -1446,14 +1559,10 @@ macro_rules! implement_fixed { assert_eq!(n, i - f); // The sign is attached to the integer part unless it is zero. - let n = $name::saturating_from_rational(-5, 2) - .frac() - .saturating_mul(10.into()); + let n = $name::saturating_from_rational(-5, 2).frac().saturating_mul(10.into()); assert_eq!(n, 5.into()); - let n = $name::saturating_from_rational(-1, 2) - .frac() - .saturating_mul(10.into()); + let n = $name::saturating_from_rational(-1, 2).frac().saturating_mul(10.into()); assert_eq!(n, 0.saturating_sub(5).into()); } } @@ -1566,30 +1675,51 @@ macro_rules! implement_fixed { #[test] fn fmt_should_work() { let zero = $name::zero(); - assert_eq!(format!("{:?}", zero), format!("{}(0.{:0>weight$})", stringify!($name), 0, weight=precision())); + assert_eq!( + format!("{:?}", zero), + format!("{}(0.{:0>weight$})", stringify!($name), 0, weight = precision()) + ); let one = $name::one(); - assert_eq!(format!("{:?}", one), format!("{}(1.{:0>weight$})", stringify!($name), 0, weight=precision())); + assert_eq!( + format!("{:?}", one), + format!("{}(1.{:0>weight$})", stringify!($name), 0, weight = precision()) + ); let frac = $name::saturating_from_rational(1, 2); - assert_eq!(format!("{:?}", frac), format!("{}(0.{:0weight$})", stringify!($name), 0, weight=precision())); + assert_eq!( + format!("{:?}", neg), + format!("{}(-1.{:0>weight$})", stringify!($name), 0, weight = precision()) + ); let frac = $name::saturating_from_rational(-314, 100); - assert_eq!(format!("{:?}", frac), format!("{}(-3.{:0 u128 { @@ -58,12 +62,14 @@ pub fn to_big_uint(x: u128) -> biguint::BigUint { /// Safely and accurately compute `a * b / c`. The approach is: /// - Simply try `a * b / c`. -/// - Else, convert them both into big numbers and re-try. `Err` is returned if the result -/// cannot be safely casted back to u128. +/// - Else, convert them both into big numbers and re-try. `Err` is returned if the result cannot +/// be safely casted back to u128. /// /// Invariant: c must be greater than or equal to 1. pub fn multiply_by_rational(mut a: u128, mut b: u128, mut c: u128) -> Result { - if a.is_zero() || b.is_zero() { return Ok(Zero::zero()); } + if a.is_zero() || b.is_zero() { + return Ok(Zero::zero()) + } c = c.max(1); // a and b are interchangeable by definition in this function. It always helps to assume the @@ -102,9 +108,10 @@ pub fn multiply_by_rational(mut a: u128, mut b: u128, mut c: u128) -> Result (c / 2) { q = q.add(&to_big_uint(1)); } + let r: u128 = r.try_into().expect("reminder of div by c is always less than c; qed"); + if r > (c / 2) { + q = q.add(&to_big_uint(1)); + } q }; q.lstrip(); diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index f6521988c91a5..8671ceb0396e7 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,18 +34,18 @@ macro_rules! assert_eq_error_rate { } pub mod biguint; +pub mod fixed_point; pub mod helpers_128bit; -pub mod traits; pub mod per_things; -pub mod fixed_point; pub mod rational; +pub mod traits; -pub use fixed_point::{FixedPointNumber, FixedPointOperand, FixedI64, FixedI128, FixedU128}; -pub use per_things::{PerThing, InnerOf, UpperOf, Percent, PerU16, Permill, Perbill, Perquintill}; +pub use fixed_point::{FixedI128, FixedI64, FixedPointNumber, FixedPointOperand, FixedU128}; +pub use per_things::{InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, UpperOf}; pub use rational::{Rational128, RationalInfinite}; -use sp_std::{prelude::*, cmp::Ordering, fmt::Debug, convert::TryInto}; -use traits::{BaseArithmetic, One, Zero, SaturatedConversion, Unsigned}; +use sp_std::{cmp::Ordering, convert::TryInto, fmt::Debug, prelude::*}; +use traits::{BaseArithmetic, One, SaturatedConversion, Unsigned, Zero}; /// Trait for comparing two numbers with an threshold. /// @@ -82,7 +82,6 @@ where _ => Ordering::Equal, } } - } } @@ -114,19 +113,18 @@ impl_normalize_for_numeric!(u8, u16, u32, u64, u128); impl Normalizable

for Vec

{ fn normalize(&self, targeted_sum: P) -> Result, &'static str> { - let inners = self + let uppers = self .iter() - .map(|p| p.clone().deconstruct().into()) + .map(|p| >::from(p.clone().deconstruct())) .collect::>(); - let normalized = normalize(inners.as_ref(), targeted_sum.deconstruct().into())?; + let normalized = + normalize(uppers.as_ref(), >::from(targeted_sum.deconstruct()))?; - Ok( - normalized - .into_iter() - .map(|i: UpperOf

| P::from_parts(i.saturated_into())) - .collect() - ) + Ok(normalized + .into_iter() + .map(|i: UpperOf

| P::from_parts(i.saturated_into::())) + .collect()) } } @@ -160,7 +158,8 @@ impl Normalizable

for Vec

{ /// /// * This proof is used in the implementation as well. pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str> - where T: Clone + Copy + Ord + BaseArithmetic + Unsigned + Debug, +where + T: Clone + Copy + Ord + BaseArithmetic + Unsigned + Debug, { // compute sum and return error if failed. let mut sum = T::zero(); @@ -174,12 +173,12 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str // Nothing to do here. if count.is_zero() { - return Ok(Vec::::new()); + return Ok(Vec::::new()) } let diff = targeted_sum.max(sum) - targeted_sum.min(sum); if diff.is_zero() { - return Ok(input.to_vec()); + return Ok(input.to_vec()) } let needs_bump = targeted_sum > sum; @@ -201,24 +200,26 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str if !per_round.is_zero() { for _ in 0..count { - output_with_idx[min_index].1 = output_with_idx[min_index].1 + output_with_idx[min_index].1 = output_with_idx[min_index] + .1 .checked_add(&per_round) .expect("Proof provided in the module doc; qed."); if output_with_idx[min_index].1 >= threshold { min_index += 1; - min_index = min_index % count; + min_index %= count; } } } // continue with the previous min_index while !leftover.is_zero() { - output_with_idx[min_index].1 = output_with_idx[min_index].1 + output_with_idx[min_index].1 = output_with_idx[min_index] + .1 .checked_add(&T::one()) .expect("Proof provided in the module doc; qed."); if output_with_idx[min_index].1 >= threshold { min_index += 1; - min_index = min_index % count; + min_index %= count; } leftover -= One::one() } @@ -235,9 +236,8 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str if !per_round.is_zero() { for _ in 0..count { - output_with_idx[max_index].1 = output_with_idx[max_index].1 - .checked_sub(&per_round) - .unwrap_or_else(|| { + output_with_idx[max_index].1 = + output_with_idx[max_index].1.checked_sub(&per_round).unwrap_or_else(|| { let remainder = per_round - output_with_idx[max_index].1; leftover += remainder; output_with_idx[max_index].1.saturating_sub(per_round) @@ -287,7 +287,7 @@ mod normalize_tests { normalize(vec![8 as $type, 9, 7, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10], ); - } + }; } // it should work for all types as long as the length of vector can be converted to T. test_for!(u128); @@ -300,22 +300,13 @@ mod normalize_tests { #[test] fn fails_on_if_input_sum_large() { assert!(normalize(vec![1u8; 255].as_ref(), 10).is_ok()); - assert_eq!( - normalize(vec![1u8; 256].as_ref(), 10), - Err("sum of input cannot fit in `T`"), - ); + assert_eq!(normalize(vec![1u8; 256].as_ref(), 10), Err("sum of input cannot fit in `T`")); } #[test] fn does_not_fail_on_subtraction_overflow() { - assert_eq!( - normalize(vec![1u8, 100, 100].as_ref(), 10).unwrap(), - vec![1, 9, 0], - ); - assert_eq!( - normalize(vec![1u8, 8, 9].as_ref(), 1).unwrap(), - vec![0, 1, 0], - ); + assert_eq!(normalize(vec![1u8, 100, 100].as_ref(), 10).unwrap(), vec![1, 9, 0]); + assert_eq!(normalize(vec![1u8, 8, 9].as_ref(), 1).unwrap(), vec![0, 1, 0]); } #[test] @@ -326,11 +317,9 @@ mod normalize_tests { #[test] fn works_for_per_thing() { assert_eq!( - vec![ - Perbill::from_percent(33), - Perbill::from_percent(33), - Perbill::from_percent(33) - ].normalize(Perbill::one()).unwrap(), + vec![Perbill::from_percent(33), Perbill::from_percent(33), Perbill::from_percent(33)] + .normalize(Perbill::one()) + .unwrap(), vec![ Perbill::from_parts(333333334), Perbill::from_parts(333333333), @@ -339,11 +328,9 @@ mod normalize_tests { ); assert_eq!( - vec![ - Perbill::from_percent(20), - Perbill::from_percent(15), - Perbill::from_percent(30) - ].normalize(Perbill::one()).unwrap(), + vec![Perbill::from_percent(20), Perbill::from_percent(15), Perbill::from_percent(30)] + .normalize(Perbill::one()) + .unwrap(), vec![ Perbill::from_parts(316666668), Perbill::from_parts(383333332), @@ -358,11 +345,9 @@ mod normalize_tests { // could have a situation where the sum cannot be calculated in the inner type. Calculating // using the upper type of the per_thing should assure this to be okay. assert_eq!( - vec![ - PerU16::from_percent(40), - PerU16::from_percent(40), - PerU16::from_percent(40), - ].normalize(PerU16::one()).unwrap(), + vec![PerU16::from_percent(40), PerU16::from_percent(40), PerU16::from_percent(40),] + .normalize(PerU16::one()) + .unwrap(), vec![ PerU16::from_parts(21845), // 33% PerU16::from_parts(21845), // 33% @@ -373,82 +358,40 @@ mod normalize_tests { #[test] fn normalize_works_all_le() { - assert_eq!( - normalize(vec![8u32, 9, 7, 10].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![8u32, 9, 7, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10]); - assert_eq!( - normalize(vec![7u32, 7, 7, 7].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![7u32, 7, 7, 7].as_ref(), 40).unwrap(), vec![10, 10, 10, 10]); - assert_eq!( - normalize(vec![7u32, 7, 7, 10].as_ref(), 40).unwrap(), - vec![11, 11, 8, 10], - ); + assert_eq!(normalize(vec![7u32, 7, 7, 10].as_ref(), 40).unwrap(), vec![11, 11, 8, 10]); - assert_eq!( - normalize(vec![7u32, 8, 7, 10].as_ref(), 40).unwrap(), - vec![11, 8, 11, 10], - ); + assert_eq!(normalize(vec![7u32, 8, 7, 10].as_ref(), 40).unwrap(), vec![11, 8, 11, 10]); - assert_eq!( - normalize(vec![7u32, 7, 8, 10].as_ref(), 40).unwrap(), - vec![11, 11, 8, 10], - ); + assert_eq!(normalize(vec![7u32, 7, 8, 10].as_ref(), 40).unwrap(), vec![11, 11, 8, 10]); } #[test] fn normalize_works_some_ge() { - assert_eq!( - normalize(vec![8u32, 11, 9, 10].as_ref(), 40).unwrap(), - vec![10, 11, 9, 10], - ); + assert_eq!(normalize(vec![8u32, 11, 9, 10].as_ref(), 40).unwrap(), vec![10, 11, 9, 10]); } #[test] fn always_inc_min() { - assert_eq!( - normalize(vec![10u32, 7, 10, 10].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); - assert_eq!( - normalize(vec![10u32, 10, 7, 10].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); - assert_eq!( - normalize(vec![10u32, 10, 10, 7].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![10u32, 7, 10, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10]); + assert_eq!(normalize(vec![10u32, 10, 7, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10]); + assert_eq!(normalize(vec![10u32, 10, 10, 7].as_ref(), 40).unwrap(), vec![10, 10, 10, 10]); } #[test] fn normalize_works_all_ge() { - assert_eq!( - normalize(vec![12u32, 11, 13, 10].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![12u32, 11, 13, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10]); - assert_eq!( - normalize(vec![13u32, 13, 13, 13].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![13u32, 13, 13, 13].as_ref(), 40).unwrap(), vec![10, 10, 10, 10]); - assert_eq!( - normalize(vec![13u32, 13, 13, 10].as_ref(), 40).unwrap(), - vec![12, 9, 9, 10], - ); + assert_eq!(normalize(vec![13u32, 13, 13, 10].as_ref(), 40).unwrap(), vec![12, 9, 9, 10]); - assert_eq!( - normalize(vec![13u32, 12, 13, 10].as_ref(), 40).unwrap(), - vec![9, 12, 9, 10], - ); + assert_eq!(normalize(vec![13u32, 12, 13, 10].as_ref(), 40).unwrap(), vec![9, 12, 9, 10]); - assert_eq!( - normalize(vec![13u32, 13, 12, 10].as_ref(), 40).unwrap(), - vec![9, 9, 12, 10], - ); + assert_eq!(normalize(vec![13u32, 13, 12, 10].as_ref(), 40).unwrap(), vec![9, 9, 12, 10]); } } @@ -497,21 +440,21 @@ mod threshold_compare_tests { fn peru16_rational_does_not_overflow() { // A historical example that will panic only for per_thing type that are created with // maximum capacity of their type, e.g. PerU16. - let _ = PerU16::from_rational_approximation(17424870u32, 17424870); + let _ = PerU16::from_rational(17424870u32, 17424870); } #[test] fn saturating_mul_works() { - assert_eq!(Saturating::saturating_mul(2, i32::min_value()), i32::min_value()); - assert_eq!(Saturating::saturating_mul(2, i32::max_value()), i32::max_value()); + assert_eq!(Saturating::saturating_mul(2, i32::MIN), i32::MIN); + assert_eq!(Saturating::saturating_mul(2, i32::MAX), i32::MAX); } #[test] fn saturating_pow_works() { - assert_eq!(Saturating::saturating_pow(i32::min_value(), 0), 1); - assert_eq!(Saturating::saturating_pow(i32::max_value(), 0), 1); - assert_eq!(Saturating::saturating_pow(i32::min_value(), 3), i32::min_value()); - assert_eq!(Saturating::saturating_pow(i32::min_value(), 2), i32::max_value()); - assert_eq!(Saturating::saturating_pow(i32::max_value(), 2), i32::max_value()); + assert_eq!(Saturating::saturating_pow(i32::MIN, 0), 1); + assert_eq!(Saturating::saturating_pow(i32::MAX, 0), 1); + assert_eq!(Saturating::saturating_pow(i32::MIN, 3), i32::MIN); + assert_eq!(Saturating::saturating_pow(i32::MIN, 2), i32::MAX); + assert_eq!(Saturating::saturating_pow(i32::MAX, 2), i32::MAX); } } diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 035a704ba3009..f388c19de6b43 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,14 +16,21 @@ // limitations under the License. #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -use sp_std::{ops, fmt, prelude::*, convert::TryInto}; -use codec::{Encode, CompactAs}; use crate::traits::{ - SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, Unsigned, + BaseArithmetic, Bounded, CheckedAdd, CheckedMul, CheckedSub, One, SaturatedConversion, + Saturating, UniqueSaturatedInto, Unsigned, Zero, }; +use codec::{CompactAs, Encode}; +use num_traits::{Pow, SaturatingAdd, SaturatingSub}; use sp_debug_derive::RuntimeDebug; +use sp_std::{ + convert::{TryFrom, TryInto}, + fmt, ops, + ops::{Add, Sub}, + prelude::*, +}; /// Get the inner type of a `PerThing`. pub type InnerOf

=

::Inner; @@ -34,47 +41,74 @@ pub type UpperOf

=

::Upper; /// Something that implements a fixed point ration with an arbitrary granularity `X`, as _parts per /// `X`_. pub trait PerThing: - Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug + Sized + + Saturating + + Copy + + Default + + Eq + + PartialEq + + Ord + + PartialOrd + + Bounded + + fmt::Debug + + ops::Div + + ops::Mul + + Pow { /// The data type used to build this per-thingy. - type Inner: BaseArithmetic + Unsigned + Copy + fmt::Debug; + type Inner: BaseArithmetic + Unsigned + Copy + Into + fmt::Debug; /// A data type larger than `Self::Inner`, used to avoid overflow in some computations. /// It must be able to compute `ACCURACY^2`. - type Upper: - BaseArithmetic + Copy + From + TryInto + - UniqueSaturatedInto + Unsigned + fmt::Debug; + type Upper: BaseArithmetic + + Copy + + From + + TryInto + + UniqueSaturatedInto + + Unsigned + + fmt::Debug; /// The accuracy of this type. const ACCURACY: Self::Inner; /// Equivalent to `Self::from_parts(0)`. - fn zero() -> Self { Self::from_parts(Self::Inner::zero()) } + fn zero() -> Self { + Self::from_parts(Self::Inner::zero()) + } /// Return `true` if this is nothing. - fn is_zero(&self) -> bool { self.deconstruct() == Self::Inner::zero() } + fn is_zero(&self) -> bool { + self.deconstruct() == Self::Inner::zero() + } /// Equivalent to `Self::from_parts(Self::ACCURACY)`. - fn one() -> Self { Self::from_parts(Self::ACCURACY) } + fn one() -> Self { + Self::from_parts(Self::ACCURACY) + } /// Return `true` if this is one. - fn is_one(&self) -> bool { self.deconstruct() == Self::ACCURACY } + fn is_one(&self) -> bool { + self.deconstruct() == Self::ACCURACY + } /// Build this type from a percent. Equivalent to `Self::from_parts(x * Self::ACCURACY / 100)` - /// but more accurate. + /// but more accurate and can cope with potential type overflows. fn from_percent(x: Self::Inner) -> Self { - let a = x.min(100.into()); - let b = Self::ACCURACY; - // if Self::ACCURACY % 100 > 0 then we need the correction for accuracy - let c = rational_mul_correction::(b, a, 100.into(), Rounding::Nearest); - Self::from_parts(a / 100.into() * b + c) + let a: Self::Inner = x.min(100.into()); + let b: Self::Inner = 100.into(); + Self::from_rational::(a, b) } /// Return the product of multiplication of this value by itself. fn square(self) -> Self { let p = Self::Upper::from(self.deconstruct()); let q = Self::Upper::from(Self::ACCURACY); - Self::from_rational_approximation(p * p, q * q) + Self::from_rational::(p * p, q * q) + } + + /// Return the part left when `self` is saturating-subtracted from `Self::one()`. + fn left_from_one(self) -> Self { + Self::one().saturating_sub(self) } /// Multiplication that always rounds down to a whole number. The standard `Mul` rounds to the @@ -93,8 +127,15 @@ pub trait PerThing: /// # } /// ``` fn mul_floor(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Unsigned + where + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Unsigned, + Self::Inner: Into, { overflow_prune_mul::(b, self.deconstruct(), Rounding::Down) } @@ -115,8 +156,15 @@ pub trait PerThing: /// # } /// ``` fn mul_ceil(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Unsigned + where + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Unsigned, + Self::Inner: Into, { overflow_prune_mul::(b, self.deconstruct(), Rounding::Up) } @@ -131,9 +179,16 @@ pub trait PerThing: /// # } /// ``` fn saturating_reciprocal_mul(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + - Unsigned + where + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Saturating + + Unsigned, + Self::Inner: Into, { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Nearest) } @@ -151,9 +206,16 @@ pub trait PerThing: /// # } /// ``` fn saturating_reciprocal_mul_floor(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + - Unsigned + where + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Saturating + + Unsigned, + Self::Inner: Into, { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Down) } @@ -171,9 +233,16 @@ pub trait PerThing: /// # } /// ``` fn saturating_reciprocal_mul_ceil(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + - Unsigned + where + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Saturating + + Unsigned, + Self::Inner: Into, { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Up) } @@ -186,7 +255,14 @@ pub trait PerThing: /// Converts a fraction into `Self`. #[cfg(feature = "std")] - fn from_fraction(x: f64) -> Self; + fn from_float(x: f64) -> Self; + + /// Same as `Self::from_float`. + #[deprecated = "Use from_float instead"] + #[cfg(feature = "std")] + fn from_fraction(x: f64) -> Self { + Self::from_float(x) + } /// Approximate the fraction `p/q` into a per-thing fraction. This will never overflow. /// @@ -201,14 +277,41 @@ pub trait PerThing: /// # fn main () { /// // 989/100 is technically closer to 99%. /// assert_eq!( - /// Percent::from_rational_approximation(989u64, 1000), - /// Percent::from_parts(98), - /// ); + /// Percent::from_rational(989u64, 1000), + /// Percent::from_parts(98), + /// ); /// # } /// ``` + fn from_rational(p: N, q: N) -> Self + where + N: Clone + + Ord + + TryInto + + TryInto + + ops::Div + + ops::Rem + + ops::Add + + Unsigned, + Self::Inner: Into; + + /// Same as `Self::from_rational`. + #[deprecated = "Use from_rational instead"] fn from_rational_approximation(p: N, q: N) -> Self - where N: Clone + Ord + From + TryInto + TryInto + - ops::Div + ops::Rem + ops::Add + Unsigned; + where + N: Clone + + Ord + + TryInto + + TryInto + + ops::Div + + ops::Rem + + ops::Add + + Unsigned + + Zero + + One, + Self::Inner: Into, + { + Self::from_rational(p, q) + } } /// The rounding method to use. @@ -223,45 +326,40 @@ enum Rounding { /// Saturating reciprocal multiplication. Compute `x / self`, saturating at the numeric /// bounds instead of overflowing. -fn saturating_reciprocal_mul( - x: N, - part: P::Inner, - rounding: Rounding, -) -> N +fn saturating_reciprocal_mul(x: N, part: P::Inner, rounding: Rounding) -> N where - N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem + Saturating + Unsigned, + N: Clone + + UniqueSaturatedInto + + ops::Div + + ops::Mul + + ops::Add + + ops::Rem + + Saturating + + Unsigned, P: PerThing, + P::Inner: Into, { let maximum: N = P::ACCURACY.into(); - let c = rational_mul_correction::( - x.clone(), - P::ACCURACY, - part, - rounding, - ); + let c = rational_mul_correction::(x.clone(), P::ACCURACY, part, rounding); (x / part.into()).saturating_mul(maximum).saturating_add(c) } /// Overflow-prune multiplication. Accurately multiply a value by `self` without overflowing. -fn overflow_prune_mul( - x: N, - part: P::Inner, - rounding: Rounding, -) -> N +fn overflow_prune_mul(x: N, part: P::Inner, rounding: Rounding) -> N where - N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem + Unsigned, + N: Clone + + UniqueSaturatedInto + + ops::Div + + ops::Mul + + ops::Add + + ops::Rem + + Unsigned, P: PerThing, + P::Inner: Into, { let maximum: N = P::ACCURACY.into(); let part_n: N = part.into(); - let c = rational_mul_correction::( - x.clone(), - part, - P::ACCURACY, - rounding, - ); + let c = rational_mul_correction::(x.clone(), part, P::ACCURACY, rounding); (x / maximum) * part_n + c } @@ -269,19 +367,19 @@ where /// /// Take the remainder of `x / denom` and multiply by `numer / denom`. The result can be added /// to `x / denom * numer` for an accurate result. -fn rational_mul_correction( - x: N, - numer: P::Inner, - denom: P::Inner, - rounding: Rounding, -) -> N +fn rational_mul_correction(x: N, numer: P::Inner, denom: P::Inner, rounding: Rounding) -> N where - N: From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem + Unsigned, + N: UniqueSaturatedInto + + ops::Div + + ops::Mul + + ops::Add + + ops::Rem + + Unsigned, P: PerThing, + P::Inner: Into, { let numer_upper = P::Upper::from(numer); - let denom_n = N::from(denom); + let denom_n: N = denom.into(); let denom_upper = P::Upper::from(denom); let rem = x.rem(denom_n); // `rem` is less than `denom`, which fits in `P::Inner`. @@ -295,15 +393,19 @@ where // Already rounded down Rounding::Down => {}, // Round up if the fractional part of the result is non-zero. - Rounding::Up => if rem_mul_upper % denom_upper > 0.into() { - // `rem * numer / denom` is less than `numer`, so this will not overflow. - rem_mul_div_inner = rem_mul_div_inner + 1.into(); + Rounding::Up => { + if rem_mul_upper % denom_upper > 0.into() { + // `rem * numer / denom` is less than `numer`, so this will not overflow. + rem_mul_div_inner += 1.into(); + } }, // Round up if the fractional part of the result is greater than a half. An exact half is // rounded down. - Rounding::Nearest => if rem_mul_upper % denom_upper > denom_upper / 2.into() { - // `rem * numer / denom` is less than `numer`, so this will not overflow. - rem_mul_div_inner = rem_mul_div_inner + 1.into(); + Rounding::Nearest => { + if rem_mul_upper % denom_upper > denom_upper / 2.into() { + // `rem * numer / denom` is less than `numer`, so this will not overflow. + rem_mul_div_inner += 1.into(); + } }, } rem_mul_div_inner.into() @@ -323,7 +425,7 @@ macro_rules! implement_per_thing { /// #[doc = $title] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] - #[derive(Encode, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug)] + #[derive(Encode, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug, scale_info::TypeInfo)] pub struct $name($type); /// Implementation makes any compact encoding of `PerThing::Inner` valid, @@ -333,9 +435,9 @@ macro_rules! implement_per_thing { fn encode_as(&self) -> &Self::As { &self.0 } - fn decode_from(x: Self::As) -> Self { - // Saturates if `x` is more than `$max` internally. - Self::from_parts(x) + fn decode_from(x: Self::As) -> Result { + // Saturates if `x` is more than `$max` internally. + Ok(Self::from_parts(x)) } } @@ -359,19 +461,22 @@ macro_rules! implement_per_thing { /// NOTE: saturate to 0 or 1 if x is beyond `[0, 1]` #[cfg(feature = "std")] - fn from_fraction(x: f64) -> Self { + fn from_float(x: f64) -> Self { Self::from_parts((x.max(0.).min(1.) * $max as f64) as Self::Inner) } - fn from_rational_approximation(p: N, q: N) -> Self - where N: Clone + Ord + From + TryInto + TryInto - + ops::Div + ops::Rem + ops::Add + Unsigned + fn from_rational(p: N, q: N) -> Self + where + N: Clone + Ord + TryInto + TryInto + + ops::Div + ops::Rem + ops::Add + Unsigned + + Zero + One, + Self::Inner: Into, { let div_ceil = |x: N, f: N| -> N { let mut o = x.clone() / f.clone(); let r = x.rem(f.clone()); - if r > N::from(0) { - o = o + N::from(1); + if r > N::zero() { + o = o + N::one(); } o }; @@ -458,62 +563,85 @@ macro_rules! implement_per_thing { PerThing::square(self) } - /// See [`PerThing::from_fraction`]. + /// See [`PerThing::from_float`]. #[cfg(feature = "std")] - pub fn from_fraction(x: f64) -> Self { - ::from_fraction(x) + pub fn from_float(x: f64) -> Self { + ::from_float(x) } - /// See [`PerThing::from_rational_approximation`]. + /// See [`PerThing::from_rational`]. + #[deprecated = "Use `PerThing::from_rational` instead"] pub fn from_rational_approximation(p: N, q: N) -> Self - where N: Clone + Ord + From<$type> + TryInto<$type> + + where N: Clone + Ord + TryInto<$type> + + TryInto<$upper_type> + ops::Div + ops::Rem + + ops::Add + Unsigned, + $type: Into, + { + ::from_rational(p, q) + } + + /// See [`PerThing::from_rational`]. + pub fn from_rational(p: N, q: N) -> Self + where N: Clone + Ord + TryInto<$type> + TryInto<$upper_type> + ops::Div + ops::Rem + - ops::Add + Unsigned + ops::Add + Unsigned, + $type: Into, { - ::from_rational_approximation(p, q) + ::from_rational(p, q) } /// See [`PerThing::mul_floor`]. pub fn mul_floor(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + - ops::Rem + ops::Div + ops::Mul + - ops::Add + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + + ops::Rem + ops::Div + ops::Mul + + ops::Add + Unsigned, + $type: Into, + { PerThing::mul_floor(self, b) } /// See [`PerThing::mul_ceil`]. pub fn mul_ceil(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + - ops::Rem + ops::Div + ops::Mul + - ops::Add + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + + ops::Rem + ops::Div + ops::Mul + + ops::Add + Unsigned, + $type: Into, { PerThing::mul_ceil(self, b) } /// See [`PerThing::saturating_reciprocal_mul`]. pub fn saturating_reciprocal_mul(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + - ops::Div + ops::Mul + ops::Add + - Saturating + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + ops::Rem + + ops::Div + ops::Mul + ops::Add + + Saturating + Unsigned, + $type: Into, { PerThing::saturating_reciprocal_mul(self, b) } /// See [`PerThing::saturating_reciprocal_mul_floor`]. pub fn saturating_reciprocal_mul_floor(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + - ops::Div + ops::Mul + ops::Add + - Saturating + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + ops::Rem + + ops::Div + ops::Mul + ops::Add + + Saturating + Unsigned, + $type: Into, { PerThing::saturating_reciprocal_mul_floor(self, b) } /// See [`PerThing::saturating_reciprocal_mul_ceil`]. pub fn saturating_reciprocal_mul_ceil(self, b: N) -> N - where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + - ops::Div + ops::Mul + ops::Add + - Saturating + Unsigned + where + N: Clone + UniqueSaturatedInto<$type> + ops::Rem + + ops::Div + ops::Mul + ops::Add + + Saturating + Unsigned, + $type: Into, { PerThing::saturating_reciprocal_mul_ceil(self, b) } @@ -536,37 +664,13 @@ macro_rules! implement_per_thing { /// Saturating multiply. Compute `self * rhs`, saturating at the numeric bounds instead of /// overflowing. This operation is lossy. fn saturating_mul(self, rhs: Self) -> Self { - let a = self.0 as $upper_type; - let b = rhs.0 as $upper_type; - let m = <$upper_type>::from($max); - let parts = a * b / m; - // This will always fit into $type. - Self::from_parts(parts as $type) + self * rhs } /// Saturating exponentiation. Computes `self.pow(exp)`, saturating at the numeric /// bounds instead of overflowing. This operation is lossy. fn saturating_pow(self, exp: usize) -> Self { - if self.is_zero() || self.is_one() { - self - } else { - let p = <$name as PerThing>::Upper::from(self.deconstruct()); - let q = <$name as PerThing>::Upper::from(Self::ACCURACY); - let mut s = Self::one(); - for _ in 0..exp { - if s.is_zero() { - break; - } else { - // x^2 always fits in Self::Upper if x fits in Self::Inner. - // Verified by a test. - s = Self::from_rational_approximation( - <$name as PerThing>::Upper::from(s.deconstruct()) * p, - q * q, - ); - } - } - s - } + self.pow(exp) } } @@ -582,7 +686,7 @@ macro_rules! implement_per_thing { } } - impl crate::traits::Bounded for $name { + impl Bounded for $name { fn min_value() -> Self { ::zero() } @@ -592,13 +696,48 @@ macro_rules! implement_per_thing { } } + impl ops::Mul for $name { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + let a = self.0 as $upper_type; + let b = rhs.0 as $upper_type; + let m = <$upper_type>::from($max); + let parts = a * b / m; + // This will always fit into $type. + Self::from_parts(parts as $type) + } + } + + impl Pow for $name { + type Output = Self; + + fn pow(mut self, exp: usize) -> Self::Output { + if exp == 0 || self.is_one() { + return Self::one() + } + + let mut result = self; + let mut exp = exp - 1; + while exp > 0 && !result.is_zero() { + if exp % 2 != 0 { + result = result * self; + exp -= 1; + } + self = self.square(); + exp /= 2; + } + result + } + } + impl ops::Div for $name { type Output = Self; fn div(self, rhs: Self) -> Self::Output { let p = self.0; let q = rhs.0; - Self::from_rational_approximation(p, q) + Self::from_rational(p, q) } } @@ -613,8 +752,9 @@ macro_rules! implement_per_thing { /// This is tailored to be used with a balance type. impl ops::Mul for $name where - N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + N: Clone + UniqueSaturatedInto<$type> + ops::Rem + ops::Div + ops::Mul + ops::Add + Unsigned, + $type: Into, { type Output = N; fn mul(self, b: N) -> Self::Output { @@ -622,6 +762,78 @@ macro_rules! implement_per_thing { } } + impl ops::Div for $name where $type: TryFrom { + type Output = Self; + fn div(self, b: N) -> Self::Output { + <$type>::try_from(b).map_or(Self::zero(), |d| Self::from_parts(self.0 / d)) + } + } + + impl Add for $name { + type Output = $name; + + // For PerU16, $max == u16::MAX, so we need this `allow`. + #[allow(unused_comparisons)] + #[inline] + fn add(self, rhs: Self) -> Self::Output { + let inner = self.deconstruct().add(rhs.deconstruct()); + debug_assert!(inner <= $max); + $name::from_parts(inner) + } + } + + impl CheckedAdd for $name { + // For PerU16, $max == u16::MAX, so we need this `allow`. + #[allow(unused_comparisons)] + #[inline] + fn checked_add(&self, rhs: &Self) -> Option { + self.deconstruct() + .checked_add(rhs.deconstruct()) + .map(|inner| if inner > $max { None } else { Some($name::from_parts(inner)) }) + .flatten() + } + } + + impl Sub for $name { + type Output = $name; + + #[inline] + fn sub(self, rhs: Self) -> Self::Output { + $name::from_parts(self.deconstruct().sub(rhs.deconstruct())) + } + } + + impl CheckedSub for $name { + #[inline] + fn checked_sub(&self, v: &Self) -> Option { + self.deconstruct().checked_sub(v.deconstruct()).map($name::from_parts) + } + } + + impl SaturatingAdd for $name { + #[inline] + fn saturating_add(&self, v: &Self) -> Self { + $name::from_parts(self.deconstruct().saturating_add(v.deconstruct())) + } + } + + impl SaturatingSub for $name { + #[inline] + fn saturating_sub(&self, v: &Self) -> Self { + $name::from_parts(self.deconstruct().saturating_sub(v.deconstruct())) + } + } + + /// # Note + /// CheckedMul will never fail for PerThings. + impl CheckedMul for $name { + #[inline] + fn checked_mul(&self, rhs: &Self) -> Option { + Some(*self * *rhs) + } + } + + #[cfg(test)] mod $test_mod { use codec::{Encode, Decode}; @@ -631,13 +843,13 @@ macro_rules! implement_per_thing { #[test] fn macro_expanded_correctly() { // needed for the `from_percent` to work. UPDATE: this is no longer needed; yet note - // that tests that use percentage or fractions such as $name::from_fraction(0.2) to + // that tests that use percentage or fractions such as $name::from_float(0.2) to // create values will most likely be inaccurate when used with per_things that are // not multiples of 100. // assert!($max >= 100); // assert!($max % 100 == 0); - // needed for `from_rational_approximation` + // needed for `from_rational` assert!(2 * ($max as $upper_type) < <$upper_type>::max_value()); assert!(<$upper_type>::from($max) < <$upper_type>::max_value()); @@ -707,14 +919,24 @@ macro_rules! implement_per_thing { assert_eq!($name::from_percent(0), $name::from_parts(Zero::zero())); assert_eq!($name::from_percent(10), $name::from_parts($max / 10)); + assert_eq!($name::from_percent(50), $name::from_parts($max / 2)); assert_eq!($name::from_percent(100), $name::from_parts($max)); assert_eq!($name::from_percent(200), $name::from_parts($max)); - assert_eq!($name::from_fraction(0.0), $name::from_parts(Zero::zero())); - assert_eq!($name::from_fraction(0.1), $name::from_parts($max / 10)); - assert_eq!($name::from_fraction(1.0), $name::from_parts($max)); - assert_eq!($name::from_fraction(2.0), $name::from_parts($max)); - assert_eq!($name::from_fraction(-1.0), $name::from_parts(Zero::zero())); + assert_eq!($name::from_float(0.0), $name::from_parts(Zero::zero())); + assert_eq!($name::from_float(0.1), $name::from_parts($max / 10)); + assert_eq!($name::from_float(1.0), $name::from_parts($max)); + assert_eq!($name::from_float(2.0), $name::from_parts($max)); + assert_eq!($name::from_float(-1.0), $name::from_parts(Zero::zero())); + } + + #[test] + fn percent_trait_impl_works() { + assert_eq!(<$name as PerThing>::from_percent(0), $name::from_parts(Zero::zero())); + assert_eq!(<$name as PerThing>::from_percent(10), $name::from_parts($max / 10)); + assert_eq!(<$name as PerThing>::from_percent(50), $name::from_parts($max / 2)); + assert_eq!(<$name as PerThing>::from_percent(100), $name::from_parts($max)); + assert_eq!(<$name as PerThing>::from_percent(200), $name::from_parts($max)); } macro_rules! u256ify { @@ -727,7 +949,7 @@ macro_rules! implement_per_thing { ($num_type:tt) => { // multiplication from all sort of from_percent assert_eq!( - $name::from_fraction(1.0) * $num_type::max_value(), + $name::from_float(1.0) * $num_type::max_value(), $num_type::max_value() ); if $max % 100 == 0 { @@ -737,7 +959,7 @@ macro_rules! implement_per_thing { 1, ); assert_eq!( - $name::from_fraction(0.5) * $num_type::max_value(), + $name::from_float(0.5) * $num_type::max_value(), $num_type::max_value() / 2, ); assert_eq_error_rate!( @@ -747,30 +969,30 @@ macro_rules! implement_per_thing { ); } else { assert_eq!( - $name::from_fraction(0.99) * <$num_type>::max_value(), + $name::from_float(0.99) * <$num_type>::max_value(), ( ( - u256ify!($name::from_fraction(0.99).0) * + u256ify!($name::from_float(0.99).0) * u256ify!(<$num_type>::max_value()) / u256ify!($max) ).as_u128() ) as $num_type, ); assert_eq!( - $name::from_fraction(0.50) * <$num_type>::max_value(), + $name::from_float(0.50) * <$num_type>::max_value(), ( ( - u256ify!($name::from_fraction(0.50).0) * + u256ify!($name::from_float(0.50).0) * u256ify!(<$num_type>::max_value()) / u256ify!($max) ).as_u128() ) as $num_type, ); assert_eq!( - $name::from_fraction(0.01) * <$num_type>::max_value(), + $name::from_float(0.01) * <$num_type>::max_value(), ( ( - u256ify!($name::from_fraction(0.01).0) * + u256ify!($name::from_float(0.01).0) * u256ify!(<$num_type>::max_value()) / u256ify!($max) ).as_u128() @@ -778,7 +1000,7 @@ macro_rules! implement_per_thing { ); } - assert_eq!($name::from_fraction(0.0) * $num_type::max_value(), 0); + assert_eq!($name::from_float(0.0) * $num_type::max_value(), 0); // // multiplication with bounds assert_eq!($name::one() * $num_type::max_value(), $num_type::max_value()); @@ -792,7 +1014,7 @@ macro_rules! implement_per_thing { // accuracy test assert_eq!( - $name::from_rational_approximation(1 as $type, 3) * 30 as $type, + $name::from_rational(1 as $type, 3) * 30 as $type, 10, ); @@ -801,10 +1023,10 @@ macro_rules! implement_per_thing { #[test] fn per_thing_mul_rounds_to_nearest_number() { - assert_eq!($name::from_fraction(0.33) * 10u64, 3); - assert_eq!($name::from_fraction(0.34) * 10u64, 3); - assert_eq!($name::from_fraction(0.35) * 10u64, 3); - assert_eq!($name::from_fraction(0.36) * 10u64, 4); + assert_eq!($name::from_float(0.33) * 10u64, 3); + assert_eq!($name::from_float(0.34) * 10u64, 3); + assert_eq!($name::from_float(0.35) * 10u64, 3); + assert_eq!($name::from_float(0.36) * 10u64, 4); } #[test] @@ -822,33 +1044,33 @@ macro_rules! implement_per_thing { ($num_type:tt) => { // within accuracy boundary assert_eq!( - $name::from_rational_approximation(1 as $num_type, 0), + $name::from_rational(1 as $num_type, 0), $name::one(), ); assert_eq!( - $name::from_rational_approximation(1 as $num_type, 1), + $name::from_rational(1 as $num_type, 1), $name::one(), ); assert_eq_error_rate!( - $name::from_rational_approximation(1 as $num_type, 3).0, + $name::from_rational(1 as $num_type, 3).0, $name::from_parts($max / 3).0, 2 ); assert_eq!( - $name::from_rational_approximation(1 as $num_type, 10), - $name::from_fraction(0.10), + $name::from_rational(1 as $num_type, 10), + $name::from_float(0.10), ); assert_eq!( - $name::from_rational_approximation(1 as $num_type, 4), - $name::from_fraction(0.25), + $name::from_rational(1 as $num_type, 4), + $name::from_float(0.25), ); assert_eq!( - $name::from_rational_approximation(1 as $num_type, 4), - $name::from_rational_approximation(2 as $num_type, 8), + $name::from_rational(1 as $num_type, 4), + $name::from_rational(2 as $num_type, 8), ); // no accurate anymore but won't overflow. assert_eq_error_rate!( - $name::from_rational_approximation( + $name::from_rational( $num_type::max_value() - 1, $num_type::max_value() ).0 as $upper_type, @@ -856,7 +1078,7 @@ macro_rules! implement_per_thing { 2, ); assert_eq_error_rate!( - $name::from_rational_approximation( + $name::from_rational( $num_type::max_value() / 3, $num_type::max_value() ).0 as $upper_type, @@ -864,7 +1086,7 @@ macro_rules! implement_per_thing { 2, ); assert_eq!( - $name::from_rational_approximation(1, $num_type::max_value()), + $name::from_rational(1, $num_type::max_value()), $name::zero(), ); }; @@ -878,28 +1100,28 @@ macro_rules! implement_per_thing { // almost at the edge assert_eq!( - $name::from_rational_approximation(max_value - 1, max_value + 1), + $name::from_rational(max_value - 1, max_value + 1), $name::from_parts($max - 2), ); assert_eq!( - $name::from_rational_approximation(1, $max - 1), + $name::from_rational(1, $max - 1), $name::from_parts(1), ); assert_eq!( - $name::from_rational_approximation(1, $max), + $name::from_rational(1, $max), $name::from_parts(1), ); assert_eq!( - $name::from_rational_approximation(2, 2 * max_value - 1), + $name::from_rational(2, 2 * max_value - 1), $name::from_parts(1), ); assert_eq!( - $name::from_rational_approximation(1, max_value + 1), + $name::from_rational(1, max_value + 1), $name::zero(), ); assert_eq!( - $name::from_rational_approximation(3 * max_value / 2, 3 * max_value), - $name::from_fraction(0.5), + $name::from_rational(3 * max_value / 2, 3 * max_value), + $name::from_float(0.5), ); $(per_thing_from_rationale_approx_test!($test_units);)* @@ -907,66 +1129,66 @@ macro_rules! implement_per_thing { #[test] fn per_things_mul_operates_in_output_type() { - // assert_eq!($name::from_fraction(0.5) * 100u32, 50u32); - assert_eq!($name::from_fraction(0.5) * 100u64, 50u64); - assert_eq!($name::from_fraction(0.5) * 100u128, 50u128); + // assert_eq!($name::from_float(0.5) * 100u32, 50u32); + assert_eq!($name::from_float(0.5) * 100u64, 50u64); + assert_eq!($name::from_float(0.5) * 100u128, 50u128); } #[test] fn per_thing_saturating_op_works() { assert_eq_error_rate!( - $name::from_fraction(0.5).saturating_add($name::from_fraction(0.4)).0 as $upper_type, - $name::from_fraction(0.9).0 as $upper_type, + $name::from_float(0.5).saturating_add($name::from_float(0.4)).0 as $upper_type, + $name::from_float(0.9).0 as $upper_type, 2, ); assert_eq_error_rate!( - $name::from_fraction(0.5).saturating_add($name::from_fraction(0.5)).0 as $upper_type, + $name::from_float(0.5).saturating_add($name::from_float(0.5)).0 as $upper_type, $name::one().0 as $upper_type, 2, ); assert_eq!( - $name::from_fraction(0.6).saturating_add($name::from_fraction(0.5)), + $name::from_float(0.6).saturating_add($name::from_float(0.5)), $name::one(), ); assert_eq_error_rate!( - $name::from_fraction(0.6).saturating_sub($name::from_fraction(0.5)).0 as $upper_type, - $name::from_fraction(0.1).0 as $upper_type, + $name::from_float(0.6).saturating_sub($name::from_float(0.5)).0 as $upper_type, + $name::from_float(0.1).0 as $upper_type, 2, ); assert_eq!( - $name::from_fraction(0.6).saturating_sub($name::from_fraction(0.6)), - $name::from_fraction(0.0), + $name::from_float(0.6).saturating_sub($name::from_float(0.6)), + $name::from_float(0.0), ); assert_eq!( - $name::from_fraction(0.6).saturating_sub($name::from_fraction(0.7)), - $name::from_fraction(0.0), + $name::from_float(0.6).saturating_sub($name::from_float(0.7)), + $name::from_float(0.0), ); assert_eq_error_rate!( - $name::from_fraction(0.5).saturating_mul($name::from_fraction(0.5)).0 as $upper_type, - $name::from_fraction(0.25).0 as $upper_type, + $name::from_float(0.5).saturating_mul($name::from_float(0.5)).0 as $upper_type, + $name::from_float(0.25).0 as $upper_type, 2, ); assert_eq_error_rate!( - $name::from_fraction(0.2).saturating_mul($name::from_fraction(0.2)).0 as $upper_type, - $name::from_fraction(0.04).0 as $upper_type, + $name::from_float(0.2).saturating_mul($name::from_float(0.2)).0 as $upper_type, + $name::from_float(0.04).0 as $upper_type, 2, ); assert_eq_error_rate!( - $name::from_fraction(0.1).saturating_mul($name::from_fraction(0.1)).0 as $upper_type, - $name::from_fraction(0.01).0 as $upper_type, + $name::from_float(0.1).saturating_mul($name::from_float(0.1)).0 as $upper_type, + $name::from_float(0.01).0 as $upper_type, 1, ); } #[test] fn per_thing_square_works() { - assert_eq!($name::from_fraction(1.0).square(), $name::from_fraction(1.0)); - assert_eq!($name::from_fraction(0.5).square(), $name::from_fraction(0.25)); - assert_eq!($name::from_fraction(0.1).square(), $name::from_fraction(0.01)); + assert_eq!($name::from_float(1.0).square(), $name::from_float(1.0)); + assert_eq!($name::from_float(0.5).square(), $name::from_float(0.25)); + assert_eq!($name::from_float(0.1).square(), $name::from_float(0.01)); assert_eq!( - $name::from_fraction(0.02).square(), + $name::from_float(0.02).square(), $name::from_parts((4 * <$upper_type>::from($max) / 100 / 100) as $type) ); } @@ -975,30 +1197,30 @@ macro_rules! implement_per_thing { fn per_things_div_works() { // normal assert_eq_error_rate!( - ($name::from_fraction(0.1) / $name::from_fraction(0.20)).0 as $upper_type, - $name::from_fraction(0.50).0 as $upper_type, + ($name::from_float(0.1) / $name::from_float(0.20)).0 as $upper_type, + $name::from_float(0.50).0 as $upper_type, 2, ); assert_eq_error_rate!( - ($name::from_fraction(0.1) / $name::from_fraction(0.10)).0 as $upper_type, - $name::from_fraction(1.0).0 as $upper_type, + ($name::from_float(0.1) / $name::from_float(0.10)).0 as $upper_type, + $name::from_float(1.0).0 as $upper_type, 2, ); assert_eq_error_rate!( - ($name::from_fraction(0.1) / $name::from_fraction(0.0)).0 as $upper_type, - $name::from_fraction(1.0).0 as $upper_type, + ($name::from_float(0.1) / $name::from_float(0.0)).0 as $upper_type, + $name::from_float(1.0).0 as $upper_type, 2, ); // will not overflow assert_eq_error_rate!( - ($name::from_fraction(0.10) / $name::from_fraction(0.05)).0 as $upper_type, - $name::from_fraction(1.0).0 as $upper_type, + ($name::from_float(0.10) / $name::from_float(0.05)).0 as $upper_type, + $name::from_float(1.0).0 as $upper_type, 2, ); assert_eq_error_rate!( - ($name::from_fraction(1.0) / $name::from_fraction(0.5)).0 as $upper_type, - $name::from_fraction(1.0).0 as $upper_type, + ($name::from_float(1.0) / $name::from_float(0.5)).0 as $upper_type, + $name::from_float(1.0).0 as $upper_type, 2, ); } @@ -1023,11 +1245,13 @@ macro_rules! implement_per_thing { $name::from_parts($max / 2).square(), ); - // x^3 - assert_eq!( - $name::from_parts($max / 2).saturating_pow(3), - $name::from_parts($max / 8), - ); + // x^2 .. x^16 + for n in 1..=16 { + assert_eq!( + $name::from_parts($max / 2).saturating_pow(n), + $name::from_parts(($max as u128 / 2u128.pow(n as u32)) as $type), + ); + } // 0^n == 0 assert_eq!( @@ -1196,6 +1420,106 @@ macro_rules! implement_per_thing { assert_eq!((p.0).0, $max); assert_eq!($name::from(p), $name::max_value()); } + + #[allow(unused_imports)] + use super::*; + + #[test] + fn test_add_basic() { + assert_eq!($name::from_parts(1) + $name::from_parts(1), $name::from_parts(2)); + assert_eq!($name::from_parts(10) + $name::from_parts(10), $name::from_parts(20)); + } + + #[test] + fn test_basic_checked_add() { + assert_eq!( + $name::from_parts(1).checked_add(&$name::from_parts(1)), + Some($name::from_parts(2)) + ); + assert_eq!( + $name::from_parts(10).checked_add(&$name::from_parts(10)), + Some($name::from_parts(20)) + ); + assert_eq!( + $name::from_parts(<$type>::MAX).checked_add(&$name::from_parts(<$type>::MAX)), + None + ); + assert_eq!( + $name::from_parts($max).checked_add(&$name::from_parts(1)), + None + ); + } + + #[test] + fn test_basic_saturating_add() { + assert_eq!( + $name::from_parts(1).saturating_add($name::from_parts(1)), + $name::from_parts(2) + ); + assert_eq!( + $name::from_parts(10).saturating_add($name::from_parts(10)), + $name::from_parts(20) + ); + assert_eq!( + $name::from_parts(<$type>::MAX).saturating_add($name::from_parts(<$type>::MAX)), + $name::from_parts(<$type>::MAX) + ); + } + + #[test] + fn test_basic_sub() { + assert_eq!($name::from_parts(2) - $name::from_parts(1), $name::from_parts(1)); + assert_eq!($name::from_parts(20) - $name::from_parts(10), $name::from_parts(10)); + } + + #[test] + fn test_basic_checked_sub() { + assert_eq!( + $name::from_parts(2).checked_sub(&$name::from_parts(1)), + Some($name::from_parts(1)) + ); + assert_eq!( + $name::from_parts(20).checked_sub(&$name::from_parts(10)), + Some($name::from_parts(10)) + ); + assert_eq!($name::from_parts(0).checked_sub(&$name::from_parts(1)), None); + } + + #[test] + fn test_basic_saturating_sub() { + assert_eq!( + $name::from_parts(2).saturating_sub($name::from_parts(1)), + $name::from_parts(1) + ); + assert_eq!( + $name::from_parts(20).saturating_sub($name::from_parts(10)), + $name::from_parts(10) + ); + assert_eq!( + $name::from_parts(0).saturating_sub($name::from_parts(1)), + $name::from_parts(0) + ); + } + + #[test] + fn test_basic_checked_mul() { + assert_eq!( + $name::from_parts($max).checked_mul(&$name::from_parts($max)), + Some($name::from_percent(100)) + ); + assert_eq!( + $name::from_percent(100).checked_mul(&$name::from_percent(100)), + Some($name::from_percent(100)) + ); + assert_eq!( + $name::from_percent(50).checked_mul(&$name::from_percent(26)), + Some($name::from_percent(13)) + ); + assert_eq!( + $name::from_percent(0).checked_mul(&$name::from_percent(0)), + Some($name::from_percent(0)) + ); + } } }; } @@ -1245,15 +1569,7 @@ macro_rules! implement_per_thing_with_perthousand { } } -implement_per_thing!( - Percent, - test_per_cent, - [u32, u64, u128], - 100u8, - u8, - u16, - "_Percent_", -); +implement_per_thing!(Percent, test_per_cent, [u32, u64, u128], 100u8, u8, u16, "_Percent_",); implement_per_thing_with_perthousand!( PerU16, test_peru16, diff --git a/primitives/arithmetic/src/rational.rs b/primitives/arithmetic/src/rational.rs index 07556bc0e2d71..225e1d9521827 100644 --- a/primitives/arithmetic/src/rational.rs +++ b/primitives/arithmetic/src/rational.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,10 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::{biguint::BigUint, helpers_128bit}; +use num_traits::{Bounded, One, Zero}; use sp_std::{cmp::Ordering, prelude::*}; -use crate::helpers_128bit; -use num_traits::{Zero, One, Bounded}; -use crate::biguint::BigUint; /// A wrapper for any rational number with infinitely large numerator and denominator. /// @@ -160,9 +159,11 @@ impl Rational128 { /// accurately calculated. pub fn lcm(&self, other: &Self) -> Result { // this should be tested better: two large numbers that are almost the same. - if self.1 == other.1 { return Ok(self.1) } + if self.1 == other.1 { + return Ok(self.1) + } let g = helpers_128bit::gcd(self.1, other.1); - helpers_128bit::multiply_by_rational(self.1 , other.1, g) + helpers_128bit::multiply_by_rational(self.1, other.1, g) } /// A saturating add that assumes `self` and `other` have the same denominator. @@ -170,7 +171,7 @@ impl Rational128 { if other.is_zero() { self } else { - Self(self.0.saturating_add(other.0) ,self.1) + Self(self.0.saturating_add(other.0), self.1) } } @@ -179,7 +180,7 @@ impl Rational128 { if other.is_zero() { self } else { - Self(self.0.saturating_sub(other.0) ,self.1) + Self(self.0.saturating_sub(other.0), self.1) } } @@ -190,7 +191,9 @@ impl Rational128 { let lcm = self.lcm(&other).map_err(|_| "failed to scale to denominator")?; let self_scaled = self.to_den(lcm).map_err(|_| "failed to scale to denominator")?; let other_scaled = other.to_den(lcm).map_err(|_| "failed to scale to denominator")?; - let n = self_scaled.0.checked_add(other_scaled.0) + let n = self_scaled + .0 + .checked_add(other_scaled.0) .ok_or("overflow while adding numerators")?; Ok(Self(n, self_scaled.1)) } @@ -203,7 +206,9 @@ impl Rational128 { let self_scaled = self.to_den(lcm).map_err(|_| "failed to scale to denominator")?; let other_scaled = other.to_den(lcm).map_err(|_| "failed to scale to denominator")?; - let n = self_scaled.0.checked_sub(other_scaled.0) + let n = self_scaled + .0 + .checked_sub(other_scaled.0) .ok_or("overflow while subtracting numerators")?; Ok(Self(n, self_scaled.1)) } @@ -243,7 +248,8 @@ impl Ord for Rational128 { } else { // Don't even compute gcd. let self_n = helpers_128bit::to_big_uint(self.0) * helpers_128bit::to_big_uint(other.1); - let other_n = helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); + let other_n = + helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); self_n.cmp(&other_n) } } @@ -256,7 +262,8 @@ impl PartialEq for Rational128 { self.0.eq(&other.0) } else { let self_n = helpers_128bit::to_big_uint(self.0) * helpers_128bit::to_big_uint(other.1); - let other_n = helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); + let other_n = + helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); self_n.eq(&other_n) } } @@ -264,12 +271,11 @@ impl PartialEq for Rational128 { #[cfg(test)] mod tests { - use super::*; - use super::helpers_128bit::*; + use super::{helpers_128bit::*, *}; - const MAX128: u128 = u128::max_value(); - const MAX64: u128 = u64::max_value() as u128; - const MAX64_2: u128 = 2 * u64::max_value() as u128; + const MAX128: u128 = u128::MAX; + const MAX64: u128 = u64::MAX as u128; + const MAX64_2: u128 = 2 * u64::MAX as u128; fn r(p: u128, q: u128) -> Rational128 { Rational128(p, q) @@ -277,7 +283,9 @@ mod tests { fn mul_div(a: u128, b: u128, c: u128) -> u128 { use primitive_types::U256; - if a.is_zero() { return Zero::zero(); } + if a.is_zero() { + return Zero::zero() + } let c = c.max(1); // e for extended @@ -295,14 +303,8 @@ mod tests { #[test] fn truth_value_function_works() { - assert_eq!( - mul_div(2u128.pow(100), 8, 4), - 2u128.pow(101) - ); - assert_eq!( - mul_div(2u128.pow(100), 4, 8), - 2u128.pow(99) - ); + assert_eq!(mul_div(2u128.pow(100), 8, 4), 2u128.pow(101)); + assert_eq!(mul_div(2u128.pow(100), 4, 8), 2u128.pow(99)); // and it returns a if result cannot fit assert_eq!(mul_div(MAX128 - 10, 2, 1), MAX128 - 10); @@ -319,13 +321,10 @@ mod tests { assert_eq!(r(MAX128 / 2, MAX128).to_den(10), Ok(r(5, 10))); // large to perbill. This is very well needed for npos-elections. - assert_eq!( - r(MAX128 / 2, MAX128).to_den(1000_000_000), - Ok(r(500_000_000, 1000_000_000)) - ); + assert_eq!(r(MAX128 / 2, MAX128).to_den(1000_000_000), Ok(r(500_000_000, 1000_000_000))); // large to large - assert_eq!(r(MAX128 / 2, MAX128).to_den(MAX128/2), Ok(r(MAX128/4, MAX128/2))); + assert_eq!(r(MAX128 / 2, MAX128).to_den(MAX128 / 2), Ok(r(MAX128 / 4, MAX128 / 2))); } #[test] @@ -343,11 +342,11 @@ mod tests { // large numbers assert_eq!( - r(1_000_000_000, MAX128).lcm(&r(7_000_000_000, MAX128-1)), + r(1_000_000_000, MAX128).lcm(&r(7_000_000_000, MAX128 - 1)), Err("result cannot fit in u128"), ); assert_eq!( - r(1_000_000_000, MAX64).lcm(&r(7_000_000_000, MAX64-1)), + r(1_000_000_000, MAX64).lcm(&r(7_000_000_000, MAX64 - 1)), Ok(340282366920938463408034375210639556610), ); assert!(340282366920938463408034375210639556610 < MAX128); @@ -362,7 +361,7 @@ mod tests { // errors assert_eq!( - r(1, MAX128).checked_add(r(1, MAX128-1)), + r(1, MAX128).checked_add(r(1, MAX128 - 1)), Err("failed to scale to denominator"), ); assert_eq!( @@ -383,17 +382,14 @@ mod tests { // errors assert_eq!( - r(2, MAX128).checked_sub(r(1, MAX128-1)), + r(2, MAX128).checked_sub(r(1, MAX128 - 1)), Err("failed to scale to denominator"), ); assert_eq!( r(7, MAX128).checked_sub(r(MAX128, MAX128)), Err("overflow while subtracting numerators"), ); - assert_eq!( - r(1, 10).checked_sub(r(2,10)), - Err("overflow while subtracting numerators"), - ); + assert_eq!(r(1, 10).checked_sub(r(2, 10)), Err("overflow while subtracting numerators")); } #[test] @@ -428,7 +424,7 @@ mod tests { ); assert_eq!( // MAX128 % 7 == 3 - multiply_by_rational(MAX128, 11 , 13).unwrap(), + multiply_by_rational(MAX128, 11, 13).unwrap(), (MAX128 / 13 * 11) + (8 * 11 / 13), ); assert_eq!( @@ -437,14 +433,8 @@ mod tests { (MAX128 / 1000 * 555) + (455 * 555 / 1000), ); - assert_eq!( - multiply_by_rational(2 * MAX64 - 1, MAX64, MAX64).unwrap(), - 2 * MAX64 - 1, - ); - assert_eq!( - multiply_by_rational(2 * MAX64 - 1, MAX64 - 1, MAX64).unwrap(), - 2 * MAX64 - 3, - ); + assert_eq!(multiply_by_rational(2 * MAX64 - 1, MAX64, MAX64).unwrap(), 2 * MAX64 - 1); + assert_eq!(multiply_by_rational(2 * MAX64 - 1, MAX64 - 1, MAX64).unwrap(), 2 * MAX64 - 3); assert_eq!( multiply_by_rational(MAX64 + 100, MAX64_2, MAX64_2 / 2).unwrap(), @@ -459,31 +449,23 @@ mod tests { multiply_by_rational(2u128.pow(66) - 1, 2u128.pow(65) - 1, 2u128.pow(65)).unwrap(), 73786976294838206461, ); - assert_eq!( - multiply_by_rational(1_000_000_000, MAX128 / 8, MAX128 / 2).unwrap(), - 250000000, - ); + assert_eq!(multiply_by_rational(1_000_000_000, MAX128 / 8, MAX128 / 2).unwrap(), 250000000); assert_eq!( multiply_by_rational( 29459999999999999988000u128, 1000000000000000000u128, 10000000000000000000u128 - ).unwrap(), + ) + .unwrap(), 2945999999999999998800u128 ); } #[test] fn multiply_by_rational_a_b_are_interchangeable() { - assert_eq!( - multiply_by_rational(10, MAX128, MAX128 / 2), - Ok(20), - ); - assert_eq!( - multiply_by_rational(MAX128, 10, MAX128 / 2), - Ok(20), - ); + assert_eq!(multiply_by_rational(10, MAX128, MAX128 / 2), Ok(20)); + assert_eq!(multiply_by_rational(MAX128, 10, MAX128 / 2), Ok(20)); } #[test] diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index ce645cfe65d94..53341117b1fee 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,58 +17,131 @@ //! Primitive traits for the runtime arithmetic. -use sp_std::{self, convert::{TryFrom, TryInto}}; use codec::HasCompact; pub use integer_sqrt::IntegerSquareRoot; pub use num_traits::{ - Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, CheckedNeg, - CheckedShl, CheckedShr, checked_pow, Signed, Unsigned, + checked_pow, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedShl, + CheckedShr, CheckedSub, One, Signed, Unsigned, Zero, }; -use sp_std::ops::{ - Add, Sub, Mul, Div, Rem, AddAssign, SubAssign, MulAssign, DivAssign, - RemAssign, Shl, Shr +use sp_std::{ + self, + convert::{TryFrom, TryInto}, + ops::{ + Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, RemAssign, Shl, Shr, Sub, SubAssign, + }, }; /// A meta trait for arithmetic type operations, regardless of any limitation on size. pub trait BaseArithmetic: - From + - Zero + One + IntegerSquareRoot + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - Shl + Shr + - CheckedShl + CheckedShr + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Saturating + - PartialOrd + Ord + Bounded + HasCompact + Sized + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto -{} - -impl + - Zero + One + IntegerSquareRoot + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - Shl + Shr + - CheckedShl + CheckedShr + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Saturating + - PartialOrd + Ord + Bounded + HasCompact + Sized + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto -> BaseArithmetic for T {} + From + + Zero + + One + + IntegerSquareRoot + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + Shl + + Shr + + CheckedShl + + CheckedShr + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + CheckedRem + + Saturating + + PartialOrd + + Ord + + Bounded + + HasCompact + + Sized + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto +{ +} + +impl< + T: From + + Zero + + One + + IntegerSquareRoot + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + Shl + + Shr + + CheckedShl + + CheckedShr + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + CheckedRem + + Saturating + + PartialOrd + + Ord + + Bounded + + HasCompact + + Sized + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto, + > BaseArithmetic for T +{ +} /// A meta trait for arithmetic. /// @@ -127,9 +200,51 @@ pub trait Saturating { /// Saturating exponentiation. Compute `self.pow(exp)`, saturating at the numeric bounds /// instead of overflowing. fn saturating_pow(self, exp: usize) -> Self; + + /// Increment self by one, saturating. + fn saturating_inc(&mut self) + where + Self: One, + { + let mut o = Self::one(); + sp_std::mem::swap(&mut o, self); + *self = o.saturating_add(One::one()); + } + + /// Decrement self by one, saturating at zero. + fn saturating_dec(&mut self) + where + Self: One, + { + let mut o = Self::one(); + sp_std::mem::swap(&mut o, self); + *self = o.saturating_sub(One::one()); + } + + /// Increment self by some `amount`, saturating. + fn saturating_accrue(&mut self, amount: Self) + where + Self: One, + { + let mut o = Self::one(); + sp_std::mem::swap(&mut o, self); + *self = o.saturating_add(amount); + } + + /// Decrement self by some `amount`, saturating at zero. + fn saturating_reduce(&mut self, amount: Self) + where + Self: One, + { + let mut o = Self::one(); + sp_std::mem::swap(&mut o, self); + *self = o.saturating_sub(amount); + } } -impl Saturating for T { +impl Saturating + for T +{ fn saturating_add(self, o: Self) -> Self { ::saturating_add(self, o) } @@ -139,26 +254,24 @@ impl Self { - self.checked_mul(&o) - .unwrap_or_else(|| - if (self < T::zero()) != (o < T::zero()) { - Bounded::min_value() - } else { - Bounded::max_value() - } - ) + self.checked_mul(&o).unwrap_or_else(|| { + if (self < T::zero()) != (o < T::zero()) { + Bounded::min_value() + } else { + Bounded::max_value() + } + }) } fn saturating_pow(self, exp: usize) -> Self { let neg = self < T::zero() && exp % 2 != 0; - checked_pow(self, exp) - .unwrap_or_else(|| - if neg { - Bounded::min_value() - } else { - Bounded::max_value() - } - ) + checked_pow(self, exp).unwrap_or_else(|| { + if neg { + Bounded::min_value() + } else { + Bounded::max_value() + } + }) } } @@ -171,7 +284,10 @@ pub trait SaturatedConversion { /// This just uses `UniqueSaturatedFrom` internally but with this /// variant you can provide the destination type using turbofish syntax /// in case Rust happens not to assume the correct type. - fn saturated_from(t: T) -> Self where Self: UniqueSaturatedFrom { + fn saturated_from(t: T) -> Self + where + Self: UniqueSaturatedFrom, + { >::unique_saturated_from(t) } @@ -180,7 +296,10 @@ pub trait SaturatedConversion { /// This just uses `UniqueSaturatedInto` internally but with this /// variant you can provide the destination type using turbofish syntax /// in case Rust happens not to assume the correct type. - fn saturated_into(self) -> T where Self: UniqueSaturatedInto { + fn saturated_into(self) -> T + where + Self: UniqueSaturatedInto, + { >::unique_saturated_into(self) } } diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index ae373f1866ff6..6638e478b4cd7 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authority-discovery" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] description = "Authority discovery primitives" edition = "2018" @@ -13,17 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", default-features = false, version = "2.0.0" } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } [features] default = ["std"] std = [ "sp-application-crypto/std", "codec/std", + "scale-info/std", "sp-std/std", "sp-api/std", "sp-runtime/std" diff --git a/primitives/authority-discovery/src/lib.rs b/primitives/authority-discovery/src/lib.rs index 0ae47c9758ee6..871a35e6bf487 100644 --- a/primitives/authority-discovery/src/lib.rs +++ b/primitives/authority-discovery/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,11 +22,7 @@ use sp_std::vec::Vec; mod app { - use sp_application_crypto::{ - key_types::AUTHORITY_DISCOVERY, - app_crypto, - sr25519, - }; + use sp_application_crypto::{app_crypto, key_types::AUTHORITY_DISCOVERY, sr25519}; app_crypto!(sr25519, AUTHORITY_DISCOVERY); } diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index b6f463029077f..15e4dc57ff5ab 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authorship" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] description = "Authorship primitives" edition = "2018" @@ -13,10 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +async-trait = { version = "0.1.50", optional = true } [features] default = [ "std" ] @@ -25,4 +26,5 @@ std = [ "sp-std/std", "sp-inherents/std", "sp-runtime/std", + "async-trait", ] diff --git a/primitives/authorship/src/lib.rs b/primitives/authorship/src/lib.rs index a760c546a25d7..ac4b5fd315dc2 100644 --- a/primitives/authorship/src/lib.rs +++ b/primitives/authorship/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,11 +19,13 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{result::Result, prelude::*}; +use sp_std::{prelude::*, result::Result}; -use codec::{Encode, Decode}; -use sp_inherents::{Error, InherentIdentifier, InherentData, IsFatalError}; -use sp_runtime::RuntimeString; +#[cfg(feature = "std")] +use codec::Decode; +use codec::Encode; +use sp_inherents::{Error, InherentData, InherentIdentifier, IsFatalError}; +use sp_runtime::{traits::Header as HeaderT, RuntimeString}; /// The identifier for the `uncles` inherent. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"uncles00"; @@ -44,12 +46,12 @@ impl IsFatalError for InherentError { } /// Auxiliary trait to extract uncles inherent data. -pub trait UnclesInherentData { +pub trait UnclesInherentData { /// Get uncles. fn uncles(&self) -> Result, Error>; } -impl UnclesInherentData for InherentData { +impl UnclesInherentData for InherentData { fn uncles(&self) -> Result, Error> { Ok(self.get_data(&INHERENT_IDENTIFIER)?.unwrap_or_default()) } @@ -57,36 +59,43 @@ impl UnclesInherentData for InherentData { /// Provider for inherent data. #[cfg(feature = "std")] -pub struct InherentDataProvider { - inner: F, - _marker: std::marker::PhantomData, +pub struct InherentDataProvider { + uncles: Vec, } #[cfg(feature = "std")] -impl InherentDataProvider { - pub fn new(uncles_oracle: F) -> Self { - InherentDataProvider { inner: uncles_oracle, _marker: Default::default() } +impl InherentDataProvider { + /// Create a new inherent data provider with the given `uncles`. + pub fn new(uncles: Vec) -> Self { + InherentDataProvider { uncles } + } + + /// Create a new instance that is usable for checking inherents. + /// + /// This will always return an empty vec of uncles. + pub fn check_inherents() -> Self { + Self { uncles: Vec::new() } } } #[cfg(feature = "std")] -impl sp_inherents::ProvideInherentData for InherentDataProvider -where F: Fn() -> Vec -{ - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + inherent_data.put_data(INHERENT_IDENTIFIER, &self.uncles) } - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { - let uncles = (self.inner)(); - if !uncles.is_empty() { - inherent_data.put_data(INHERENT_IDENTIFIER, &uncles) - } else { - Ok(()) + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option> { + if *identifier != INHERENT_IDENTIFIER { + return None } - } - fn error_to_string(&self, _error: &[u8]) -> Option { - Some(format!("no further information")) + let error = InherentError::decode(&mut &error[..]).ok()?; + + Some(Err(Error::Application(Box::from(format!("{:?}", error))))) } } diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index 767307c2a842a..d7fa0f2ef85cf 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-block-builder" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } [features] default = [ "std" ] diff --git a/primitives/block-builder/src/lib.rs b/primitives/block-builder/src/lib.rs index 6367a18afa615..3741b19200647 100644 --- a/primitives/block-builder/src/lib.rs +++ b/primitives/block-builder/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,29 +19,29 @@ #![cfg_attr(not(feature = "std"), no_std)] +use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{traits::Block as BlockT, ApplyExtrinsicResult}; -use sp_inherents::{InherentData, CheckInherentsResult}; - sp_api::decl_runtime_apis! { /// The `BlockBuilder` api trait that provides the required functionality for building a block. - #[api_version(4)] + #[api_version(5)] pub trait BlockBuilder { /// Apply the given extrinsic. /// /// Returns an inclusion outcome which specifies if this extrinsic is included in /// this block or not. fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult; + /// Finish the current block. #[renamed("finalise_block", 3)] fn finalize_block() -> ::Header; + /// Generate inherent extrinsics. The inherent data will vary from chain to chain. fn inherent_extrinsics( inherent: InherentData, ) -> sp_std::vec::Vec<::Extrinsic>; + /// Check that the inherents are valid. The inherent data will vary from chain to chain. fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult; - /// Generate a random seed. - fn random_seed() -> ::Hash; } } diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 79c0b56616fac..66d9152c230df 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-blockchain" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,13 +14,14 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = "0.4.8" -lru = "0.4.0" -parking_lot = "0.10.0" -derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0", path = "../consensus/common" } -sp-runtime = { version = "2.0.0", path = "../runtime" } -sp-block-builder = { version = "2.0.0", path = "../block-builder" } -sp-state-machine = { version = "0.8.0", path = "../state-machine" } -sp-database = { version = "2.0.0", path = "../database" } +log = "0.4.11" +lru = "0.6.6" +parking_lot = "0.11.1" +thiserror = "1.0.21" +futures = "0.3.9" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-consensus = { version = "0.10.0-dev", path = "../consensus/common" } +sp-runtime = { version = "4.0.0-dev", path = "../runtime" } +sp-state-machine = { version = "0.10.0-dev", path = "../state-machine" } +sp-database = { version = "4.0.0-dev", path = "../database" } +sp-api = { version = "4.0.0-dev", path = "../api" } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 1328dfb5752fc..bb34a0449b5f7 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,11 +19,13 @@ use std::sync::Arc; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::generic::BlockId; -use sp_runtime::Justification; use log::warn; use parking_lot::RwLock; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + Justifications, +}; use crate::header_metadata::HeaderMetadata; @@ -38,7 +40,10 @@ pub trait HeaderBackend: Send + Sync { /// Get block status. fn status(&self, id: BlockId) -> Result; /// Get block number by hash. Returns `None` if the header is not in the chain. - fn number(&self, hash: Block::Hash) -> Result::Header as HeaderT>::Number>>; + fn number( + &self, + hash: Block::Hash, + ) -> Result::Header as HeaderT>::Number>>; /// Get block hash by number. Returns `None` if the header is not in the chain. fn hash(&self, number: NumberFor) -> Result>; @@ -53,39 +58,42 @@ pub trait HeaderBackend: Send + Sync { /// Convert an arbitrary block ID into a block hash. fn block_number_from_id(&self, id: &BlockId) -> Result>> { match *id { - BlockId::Hash(_) => Ok(self.header(*id)?.map(|h| h.number().clone())), + BlockId::Hash(h) => self.number(h), BlockId::Number(n) => Ok(Some(n)), } } /// Get block header. Returns `UnknownBlock` error if block is not found. fn expect_header(&self, id: BlockId) -> Result { - self.header(id)?.ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", id))) + self.header(id)? + .ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", id))) } - /// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is not found. + /// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is + /// not found. fn expect_block_number_from_id(&self, id: &BlockId) -> Result> { - self.block_number_from_id(id) - .and_then(|n| n.ok_or_else(|| - Error::UnknownBlock(format!("Expect block number from id: {}", id)) - )) + self.block_number_from_id(id).and_then(|n| { + n.ok_or_else(|| Error::UnknownBlock(format!("Expect block number from id: {}", id))) + }) } - /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is not found. + /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is + /// not found. fn expect_block_hash_from_id(&self, id: &BlockId) -> Result { - self.block_hash_from_id(id) - .and_then(|n| n.ok_or_else(|| - Error::UnknownBlock(format!("Expect block hash from id: {}", id)) - )) + self.block_hash_from_id(id).and_then(|n| { + n.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id))) + }) } } /// Blockchain database backend. Does not perform any validation. -pub trait Backend: HeaderBackend + HeaderMetadata { +pub trait Backend: + HeaderBackend + HeaderMetadata +{ /// Get block body. Returns `None` if block is not found. fn body(&self, id: BlockId) -> Result::Extrinsic>>>; - /// Get block justification. Returns `None` if justification does not exist. - fn justification(&self, id: BlockId) -> Result>; + /// Get block justifications. Returns `None` if no justification exists. + fn justifications(&self, id: BlockId) -> Result>; /// Get last finalized block hash. fn last_finalized(&self) -> Result; /// Returns data cache reference, if it is enabled on this backend. @@ -120,14 +128,14 @@ pub trait Backend: HeaderBackend + HeaderMetadata x, // target not in blockchain - None => { return Ok(None); }, + None => return Ok(None), } }; if let Some(max_number) = maybe_max_number { // target outside search range if target_header.number() > &max_number { - return Ok(None); + return Ok(None) } } @@ -148,12 +156,12 @@ pub trait Backend: HeaderBackend + HeaderMetadata= *target_header.number() { // header is on a dead fork. - return Ok(None); + return Ok(None) } self.leaves()? @@ -171,12 +179,13 @@ pub trait Backend: HeaderBackend + HeaderMetadata: HeaderBackend + HeaderMetadata: HeaderBackend + HeaderMetadata Result>>; + + /// Check if indexed transaction exists. + fn has_indexed_transaction(&self, hash: &Block::Hash) -> Result { + Ok(self.indexed_transaction(hash)?.is_some()) + } + + fn block_indexed_body(&self, id: BlockId) -> Result>>>; } /// Provides access to the optional cache. @@ -239,7 +259,9 @@ pub trait Cache: Send + Sync { &self, key: &well_known_cache_keys::Id, block: &BlockId, - ) -> Result, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>>; + ) -> Result< + Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>, + >; } /// Blockchain info @@ -255,12 +277,14 @@ pub struct Info { pub finalized_hash: Block::Hash, /// Last finalized block number. pub finalized_number: <::Header as HeaderT>::Number, + /// Last finalized state. + pub finalized_state: Option<(Block::Hash, <::Header as HeaderT>::Number)>, /// Number of concurrent leave forks. - pub number_leaves: usize + pub number_leaves: usize, } /// Block status. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BlockStatus { /// Already in the blockchain. InChain, @@ -271,7 +295,7 @@ pub enum BlockStatus { /// A list of all well known keys in the blockchain cache. pub mod well_known_cache_keys { /// The type representing cache keys. - pub type Id = sp_consensus::import_queue::CacheKeyId; + pub type Id = sp_consensus::CacheKeyId; /// A list of authorities. pub const AUTHORITIES: Id = *b"auth"; diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index bc412e8358c8d..ef3afa5bce942 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,139 +17,180 @@ //! Substrate client possible errors. -use std::{self, error, result}; -use sp_state_machine; -use sp_runtime::transaction_validity::TransactionValidityError; -use sp_consensus; -use derive_more::{Display, From}; use codec::Error as CodecError; +use sp_api::ApiError; +use sp_consensus; +use sp_runtime::transaction_validity::TransactionValidityError; +use sp_state_machine; +use std::{self, result}; /// Client Result type alias pub type Result = result::Result; /// Error when the runtime failed to apply an extrinsic. -#[derive(Debug, Display)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] pub enum ApplyExtrinsicFailed { /// The transaction cannot be included into the current block. /// /// This doesn't necessary mean that the transaction itself is invalid, but it might be just /// unappliable onto the current block. - #[display(fmt = "Extrinsic is not valid: {:?}", _0)] - Validity(TransactionValidityError), - /// This is used for miscellaneous errors that can be represented by string and not handleable. - /// - /// This will become obsolete with complete migration to v4 APIs. - #[display(fmt = "Extrinsic failed: {:?}", _0)] - Msg(String), + #[error("Extrinsic is not valid: {0:?}")] + Validity(#[from] TransactionValidityError), + + #[error("Application specific error")] + Application(#[source] Box), } /// Substrate Client error -#[derive(Debug, Display, From)] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +#[non_exhaustive] pub enum Error { - /// Consensus Error - #[display(fmt = "Consensus: {}", _0)] - Consensus(sp_consensus::Error), - /// Backend error. - #[display(fmt = "Backend error: {}", _0)] - #[from(ignore)] + #[error("Cancelled oneshot channel {0}")] + OneShotCancelled(#[from] futures::channel::oneshot::Canceled), + + #[error(transparent)] + Consensus(#[from] sp_consensus::Error), + + #[error("Backend error: {0}")] Backend(String), - /// Unknown block. - #[display(fmt = "UnknownBlock: {}", _0)] - #[from(ignore)] + + #[error("UnknownBlock: {0}")] UnknownBlock(String), - /// The `apply_extrinsic` is not valid due to the given `TransactionValidityError`. - #[display(fmt = "{:?}", _0)] - ApplyExtrinsicFailed(ApplyExtrinsicFailed), - /// Execution error. - #[display(fmt = "Execution: {}", _0)] + + #[error(transparent)] + ApplyExtrinsicFailed(#[from] ApplyExtrinsicFailed), + + #[error("Child type is invalid")] + InvalidChildType, + + #[error("RemoteBodyRequest: invalid extrinsics root expected: {expected} but got {received}")] + ExtrinsicRootInvalid { received: String, expected: String }, + + // `inner` cannot be made member, since it lacks `std::error::Error` trait bounds. + #[error("Execution failed: {0:?}")] Execution(Box), - /// Blockchain error. - #[display(fmt = "Blockchain: {}", _0)] - Blockchain(Box), - /// Invalid authorities set received from the runtime. - #[display(fmt = "Current state of blockchain has invalid authorities set")] + + #[error("Blockchain")] + Blockchain(#[source] Box), + + /// A error used by various storage subsystems. + /// + /// Eventually this will be replaced. + #[error("{0}")] + StorageChanges(sp_state_machine::DefaultError), + + #[error("Invalid child storage key")] + InvalidChildStorageKey, + + #[error("Current state of blockchain has invalid authorities set")] InvalidAuthoritiesSet, - /// Could not get runtime version. - #[display(fmt = "Failed to get runtime version: {}", _0)] - #[from(ignore)] + + #[error("Failed to get runtime version: {0}")] VersionInvalid(String), - /// Genesis config is invalid. - #[display(fmt = "Genesis config provided is invalid")] - GenesisInvalid, - /// Error decoding header justification. - #[display(fmt = "error decoding justification for header")] + + #[error("Provided state is invalid")] + InvalidState, + + #[error("error decoding justification for header")] JustificationDecode, - /// Justification for header is correctly encoded, but invalid. - #[display(fmt = "bad justification for header: {}", _0)] - #[from(ignore)] + + #[error("bad justification for header: {0}")] BadJustification(String), - /// Not available on light client. - #[display(fmt = "This method is not currently available when running in light client mode")] + + #[error("This method is not currently available when running in light client mode")] NotAvailableOnLightClient, - /// Invalid remote CHT-based proof. - #[display(fmt = "Remote node has responded with invalid header proof")] + + #[error("Remote node has responded with invalid header proof")] InvalidCHTProof, - /// Remote fetch has been cancelled. - #[display(fmt = "Remote data fetch has been cancelled")] + + #[error("Remote data fetch has been cancelled")] RemoteFetchCancelled, - /// Remote fetch has been failed. - #[display(fmt = "Remote data fetch has been failed")] + + #[error("Remote data fetch has been failed")] RemoteFetchFailed, - /// Error decoding call result. - #[display(fmt = "Error decoding call result of {}: {}", _0, _1)] - CallResultDecode(&'static str, CodecError), - /// Error converting a parameter between runtime and node. - #[display(fmt = "Error converting `{}` between runtime and node", _0)] - #[from(ignore)] - RuntimeParamConversion(String), - /// Changes tries are not supported. - #[display(fmt = "Changes tries are not supported by the runtime")] + + #[error("Error decoding call result of {0}")] + CallResultDecode(&'static str, #[source] CodecError), + + #[error("Error at calling runtime api: {0}")] + RuntimeApiError(#[from] ApiError), + + #[error("Runtime :code missing in storage")] + RuntimeCodeMissing, + + #[error("Changes tries are not supported by the runtime")] ChangesTriesNotSupported, - /// Error reading changes tries configuration. - #[display(fmt = "Error reading changes tries configuration")] + + #[error("Error reading changes tries configuration")] ErrorReadingChangesTriesConfig, - /// Key changes query has failed. - #[display(fmt = "Failed to check changes proof: {}", _0)] - #[from(ignore)] + + #[error("Failed to check changes proof: {0}")] ChangesTrieAccessFailed(String), - /// Last finalized block not parent of current. - #[display(fmt = "Did not finalize blocks in sequential order.")] - #[from(ignore)] + + #[error("Did not finalize blocks in sequential order.")] NonSequentialFinalization(String), - /// Safety violation: new best block not descendent of last finalized. - #[display(fmt = "Potential long-range attack: block not in finalized chain.")] + + #[error("Potential long-range attack: block not in finalized chain.")] NotInFinalizedChain, - /// Hash that is required for building CHT is missing. - #[display(fmt = "Failed to get hash of block for building CHT")] + + #[error("Failed to get hash of block for building CHT")] MissingHashRequiredForCHT, - /// Invalid calculated state root on block import. - #[display(fmt = "Calculated state root does not match.")] + + #[error("Calculated state root does not match.")] InvalidStateRoot, - /// Incomplete block import pipeline. - #[display(fmt = "Incomplete block import pipeline.")] + + #[error("Incomplete block import pipeline.")] IncompletePipeline, - #[display(fmt = "Transaction pool not ready for block production.")] + + #[error("Transaction pool not ready for block production.")] TransactionPoolNotReady, - #[display(fmt = "Database: {}", _0)] - DatabaseError(sp_database::error::DatabaseError), - /// A convenience variant for String - #[display(fmt = "{}", _0)] - Msg(String), + + #[error("Database")] + DatabaseError(#[from] sp_database::error::DatabaseError), + + #[error("Failed to get header for hash {0}")] + MissingHeader(String), + + #[error("State Database error: {0}")] + StateDatabase(String), + + #[error("Failed to set the chain head to a block that's too old.")] + SetHeadTooOld, + + #[error(transparent)] + Application(#[from] Box), + + // Should be removed/improved once + // the storage `fn`s returns typed errors. + #[error("Runtime code error: {0}")] + RuntimeCode(&'static str), + + // Should be removed/improved once + // the storage `fn`s returns typed errors. + #[error("Storage error: {0}")] + Storage(String), } -impl error::Error for Error { - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - Error::Consensus(e) => Some(e), - Error::Blockchain(e) => Some(e), - _ => None, - } +impl From> for Error { + fn from(e: Box) -> Self { + Self::from_state(e) } } -impl<'a> From<&'a str> for Error { - fn from(s: &'a str) -> Self { - Error::Msg(s.into()) +impl From> for Error { + fn from(e: Box) -> Self { + Self::from_state(e) + } +} + +impl From for ApiError { + fn from(err: Error) -> ApiError { + match err { + Error::RuntimeApiError(err) => err, + e => ApiError::Application(Box::new(e)), + } } } @@ -163,4 +204,14 @@ impl Error { pub fn from_state(e: Box) -> Self { Error::Execution(e) } + + /// Construct from a state db error. + // Can not be done directly, since that would make cargo run out of stack if + // `sc-state-db` is lib is added as dependency. + pub fn from_state_db(e: E) -> Self + where + E: std::fmt::Debug, + { + Error::StateDatabase(format!("{:?}", e)) + } } diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index b8d9c5c934581..928409963bcd4 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,9 @@ //! Implements tree backend, cached header metadata and algorithms //! to compute routes efficiently over the tree of headers. -use sp_runtime::traits::{Block as BlockT, NumberFor, Header}; -use parking_lot::RwLock; use lru::LruCache; +use parking_lot::RwLock; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; /// Set to the expected max difference between `best` and `finalized` blocks at sync. const LRU_CACHE_SIZE: usize = 5_000; @@ -86,10 +86,7 @@ pub fn lowest_common_ancestor + ?Sized>( backend.insert_header_metadata(orig_header_two.hash, orig_header_two); } - Ok(HashAndNumber { - hash: header_one.hash, - number: header_one.number, - }) + Ok(HashAndNumber { hash: header_one.hash, number: header_one.number }) } /// Compute a tree-route between two blocks. See tree-route docs for more details. @@ -105,51 +102,33 @@ pub fn tree_route>( let mut to_branch = Vec::new(); while to.number > from.number { - to_branch.push(HashAndNumber { - number: to.number, - hash: to.hash, - }); + to_branch.push(HashAndNumber { number: to.number, hash: to.hash }); to = backend.header_metadata(to.parent)?; } while from.number > to.number { - from_branch.push(HashAndNumber { - number: from.number, - hash: from.hash, - }); + from_branch.push(HashAndNumber { number: from.number, hash: from.hash }); from = backend.header_metadata(from.parent)?; } // numbers are equal now. walk backwards until the block is the same while to.hash != from.hash { - to_branch.push(HashAndNumber { - number: to.number, - hash: to.hash, - }); + to_branch.push(HashAndNumber { number: to.number, hash: to.hash }); to = backend.header_metadata(to.parent)?; - from_branch.push(HashAndNumber { - number: from.number, - hash: from.hash, - }); + from_branch.push(HashAndNumber { number: from.number, hash: from.hash }); from = backend.header_metadata(from.parent)?; } // add the pivot block. and append the reversed to-branch // (note that it's reverse order originals) let pivot = from_branch.len(); - from_branch.push(HashAndNumber { - number: to.number, - hash: to.hash, - }); + from_branch.push(HashAndNumber { number: to.number, hash: to.hash }); from_branch.extend(to_branch.into_iter().rev()); - Ok(TreeRoute { - route: from_branch, - pivot, - }) + Ok(TreeRoute { route: from_branch, pivot }) } /// Hash and number of a block. @@ -204,14 +183,16 @@ impl TreeRoute { /// Get the common ancestor block. This might be one of the two blocks of the /// route. pub fn common_block(&self) -> &HashAndNumber { - self.route.get(self.pivot).expect("tree-routes are computed between blocks; \ + self.route.get(self.pivot).expect( + "tree-routes are computed between blocks; \ which are included in the route; \ - thus it is never empty; qed") + thus it is never empty; qed", + ) } /// Get a slice of enacted blocks (descendents of the common ancestor) pub fn enacted(&self) -> &[HashAndNumber] { - &self.route[self.pivot + 1 ..] + &self.route[self.pivot + 1..] } } @@ -240,17 +221,13 @@ pub struct HeaderMetadataCache { impl HeaderMetadataCache { /// Creates a new LRU header metadata cache with `capacity`. pub fn new(capacity: usize) -> Self { - HeaderMetadataCache { - cache: RwLock::new(LruCache::new(capacity)), - } + HeaderMetadataCache { cache: RwLock::new(LruCache::new(capacity)) } } } impl Default for HeaderMetadataCache { fn default() -> Self { - HeaderMetadataCache { - cache: RwLock::new(LruCache::new(LRU_CACHE_SIZE)), - } + HeaderMetadataCache { cache: RwLock::new(LruCache::new(LRU_CACHE_SIZE)) } } } diff --git a/primitives/blockchain/src/lib.rs b/primitives/blockchain/src/lib.rs index 27b9c3585e9ca..cd36cabe15517 100644 --- a/primitives/blockchain/src/lib.rs +++ b/primitives/blockchain/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,9 @@ //! Substrate blockchain traits and primitives. mod backend; -mod header_metadata; mod error; +mod header_metadata; -pub use error::*; pub use backend::*; +pub use error::*; pub use header_metadata::*; diff --git a/primitives/chain-spec/Cargo.toml b/primitives/chain-spec/Cargo.toml deleted file mode 100644 index a94bd8ad0139e..0000000000000 --- a/primitives/chain-spec/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "sp-chain-spec" -version = "2.0.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Substrate chain configurations types." -readme = "README.md" - -[dependencies] -serde = { version = "1.0.101", features = ["derive"] } -serde_json = "1.0.41" diff --git a/primitives/chain-spec/README.md b/primitives/chain-spec/README.md deleted file mode 100644 index 375f14a441ab6..0000000000000 --- a/primitives/chain-spec/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Types and traits related to chain specifications. - -License: Apache-2.0 \ No newline at end of file diff --git a/primitives/chain-spec/src/lib.rs b/primitives/chain-spec/src/lib.rs deleted file mode 100644 index 869fae8236b76..0000000000000 --- a/primitives/chain-spec/src/lib.rs +++ /dev/null @@ -1,43 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Types and traits related to chain specifications. - -/// The type of a chain. -/// -/// This can be used by tools to determine the type of a chain for displaying -/// additional information or enabling additional features. -#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Clone)] -pub enum ChainType { - /// A development chain that runs mainly on one node. - Development, - /// A local chain that runs locally on multiple nodes for testing purposes. - Local, - /// A live chain. - Live, - /// Some custom chain type. - Custom(String), -} - -impl Default for ChainType { - fn default() -> Self { - Self::Live - } -} - -/// Arbitrary properties defined in chain spec as a JSON object -pub type Properties = serde_json::map::Map; diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 7ef5f67350baa..c228b88fd6570 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-aura" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -13,22 +13,30 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../inherents" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../timestamp" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../std" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../runtime" } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } +sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../timestamp" } +sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } +sp-consensus = { version = "0.10.0-dev", path = "../common", optional = true } +async-trait = { version = "0.1.50", optional = true } [features] default = ["std"] std = [ "sp-application-crypto/std", "codec/std", + "scale-info/std", "sp-std/std", "sp-api/std", "sp-runtime/std", "sp-inherents/std", "sp-timestamp/std", + "sp-consensus-slots/std", + "sp-consensus", + "async-trait", ] diff --git a/client/consensus/aura/src/digests.rs b/primitives/consensus/aura/src/digests.rs similarity index 55% rename from client/consensus/aura/src/digests.rs rename to primitives/consensus/aura/src/digests.rs index 3332e4c6a6dff..eaa29036d98a1 100644 --- a/client/consensus/aura/src/digests.rs +++ b/primitives/consensus/aura/src/digests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -21,47 +21,45 @@ //! This implements the digests for AuRa, to allow the private //! `CompatibleDigestItem` trait to appear in public interfaces. -use sp_core::Pair; -use sp_consensus_aura::AURA_ENGINE_ID; -use sp_runtime::generic::{DigestItem, OpaqueDigestItemId}; -use codec::{Encode, Codec}; -use std::fmt::Debug; - -type Signature

=

::Signature; +use crate::AURA_ENGINE_ID; +use codec::{Codec, Encode}; +use sp_consensus_slots::Slot; +use sp_runtime::generic::DigestItem; +use sp_std::fmt::Debug; /// A digest item which is usable with aura consensus. -pub trait CompatibleDigestItem: Sized { +pub trait CompatibleDigestItem: Sized { /// Construct a digest item which contains a signature on the hash. - fn aura_seal(signature: Signature

) -> Self; + fn aura_seal(signature: Signature) -> Self; /// If this item is an Aura seal, return the signature. - fn as_aura_seal(&self) -> Option>; + fn as_aura_seal(&self) -> Option; /// Construct a digest item which contains the slot number - fn aura_pre_digest(slot_num: u64) -> Self; + fn aura_pre_digest(slot: Slot) -> Self; /// If this item is an AuRa pre-digest, return the slot number - fn as_aura_pre_digest(&self) -> Option; + fn as_aura_pre_digest(&self) -> Option; } -impl CompatibleDigestItem

for DigestItem where - P: Pair, - Signature

: Codec, - Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static +impl CompatibleDigestItem for DigestItem +where + Signature: Codec, + Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static, { - fn aura_seal(signature: Signature

) -> Self { + fn aura_seal(signature: Signature) -> Self { DigestItem::Seal(AURA_ENGINE_ID, signature.encode()) } - fn as_aura_seal(&self) -> Option> { - self.try_to(OpaqueDigestItemId::Seal(&AURA_ENGINE_ID)) + fn as_aura_seal(&self) -> Option { + self.seal_try_to(&AURA_ENGINE_ID) } - fn aura_pre_digest(slot_num: u64) -> Self { - DigestItem::PreRuntime(AURA_ENGINE_ID, slot_num.encode()) + fn aura_pre_digest(slot: Slot) -> Self { + DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode()) } - fn as_aura_pre_digest(&self) -> Option { - self.try_to(OpaqueDigestItemId::PreRuntime(&AURA_ENGINE_ID)) + fn as_aura_pre_digest(&self) -> Option { + self.pre_runtime_try_to(&AURA_ENGINE_ID) } } diff --git a/primitives/consensus/aura/src/inherents.rs b/primitives/consensus/aura/src/inherents.rs index a18bd33703061..2a797b5d3f393 100644 --- a/primitives/consensus/aura/src/inherents.rs +++ b/primitives/consensus/aura/src/inherents.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,30 +16,25 @@ // limitations under the License. /// Contains the inherents for the AURA module - -use sp_inherents::{InherentIdentifier, InherentData, Error}; - -#[cfg(feature = "std")] -use sp_inherents::{InherentDataProviders, ProvideInherentData}; +use sp_inherents::{Error, InherentData, InherentIdentifier}; /// The Aura inherent identifier. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"auraslot"; /// The type of the Aura inherent. -pub type InherentType = u64; +pub type InherentType = sp_consensus_slots::Slot; /// Auxiliary trait to extract Aura inherent data. pub trait AuraInherentData { /// Get aura inherent data. - fn aura_inherent_data(&self) ->Result; + fn aura_inherent_data(&self) -> Result, Error>; /// Replace aura inherent data. fn aura_replace_inherent_data(&mut self, new: InherentType); } impl AuraInherentData for InherentData { - fn aura_inherent_data(&self) ->Result { + fn aura_inherent_data(&self) -> Result, Error> { self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Aura inherent data not found".into())) } fn aura_replace_inherent_data(&mut self, new: InherentType) { @@ -48,52 +43,54 @@ impl AuraInherentData for InherentData { } /// Provides the slot duration inherent data for `Aura`. +// TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 #[cfg(feature = "std")] pub struct InherentDataProvider { - slot_duration: u64, + slot: InherentType, } #[cfg(feature = "std")] impl InherentDataProvider { - pub fn new(slot_duration: u64) -> Self { - Self { - slot_duration - } + /// Create a new instance with the given slot. + pub fn new(slot: InherentType) -> Self { + Self { slot } } -} -#[cfg(feature = "std")] -impl ProvideInherentData for InherentDataProvider { - fn on_register( - &self, - providers: &InherentDataProviders, - ) ->Result<(), Error> { - if !providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { - // Add the timestamp inherent data provider, as we require it. - providers.register_provider(sp_timestamp::InherentDataProvider) - } else { - Ok(()) - } - } + /// Creates the inherent data provider by calculating the slot from the given + /// `timestamp` and `duration`. + pub fn from_timestamp_and_duration( + timestamp: sp_timestamp::Timestamp, + duration: std::time::Duration, + ) -> Self { + let slot = + InherentType::from((timestamp.as_duration().as_millis() / duration.as_millis()) as u64); - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER + Self { slot } } +} - fn provide_inherent_data( - &self, - inherent_data: &mut InherentData, - ) ->Result<(), Error> { - use sp_timestamp::TimestampInherentData; +#[cfg(feature = "std")] +impl sp_std::ops::Deref for InherentDataProvider { + type Target = InherentType; - let timestamp = inherent_data.timestamp_inherent_data()?; - let slot_num = timestamp / self.slot_duration; - inherent_data.put_data(INHERENT_IDENTIFIER, &slot_num) + fn deref(&self) -> &Self::Target { + &self.slot } +} - fn error_to_string(&self, error: &[u8]) -> Option { - use codec::Decode; +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + inherent_data.put_data(INHERENT_IDENTIFIER, &self.slot) + } - sp_inherents::Error::decode(&mut &error[..]).map(|e| e.into_string()).ok() + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + // There is no error anymore + None } } diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index cf0bcf2218a06..e6a319c1d1590 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,10 +19,11 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode, Codec}; -use sp_std::vec::Vec; +use codec::{Codec, Decode, Encode}; use sp_runtime::ConsensusEngineId; +use sp_std::vec::Vec; +pub mod digests; pub mod inherents; pub mod sr25519 { @@ -45,7 +46,7 @@ pub mod sr25519 { pub mod ed25519 { mod app_ed25519 { - use sp_application_crypto::{app_crypto, key_types::AURA, ed25519}; + use sp_application_crypto::{app_crypto, ed25519, key_types::AURA}; app_crypto!(ed25519, AURA); } @@ -61,6 +62,8 @@ pub mod ed25519 { pub type AuthorityId = app_ed25519::Public; } +pub use sp_consensus_slots::Slot; + /// The `ConsensusEngineId` of AuRa. pub const AURA_ENGINE_ID: ConsensusEngineId = [b'a', b'u', b'r', b'a']; @@ -71,24 +74,49 @@ pub type AuthorityIndex = u32; #[derive(Decode, Encode)] pub enum ConsensusLog { /// The authorities have changed. - #[codec(index = "1")] + #[codec(index = 1)] AuthoritiesChange(Vec), /// Disable the authority with given index. - #[codec(index = "2")] + #[codec(index = 2)] OnDisabled(AuthorityIndex), } sp_api::decl_runtime_apis! { /// API necessary for block authorship with aura. pub trait AuraApi { - /// Return the slot duration in seconds for Aura. - /// Currently, only the value provided by this type at genesis - /// will be used. + /// Returns the slot duration for Aura. /// - /// Dynamic slot duration may be supported in the future. - fn slot_duration() -> u64; + /// Currently, only the value provided by this type at genesis will be used. + fn slot_duration() -> SlotDuration; // Return the current set of authorities. fn authorities() -> Vec; } } + +/// Aura slot duration. +/// +/// Internally stored as milliseconds. +#[derive(sp_runtime::RuntimeDebug, Encode, Decode, PartialEq, Clone, Copy)] +pub struct SlotDuration(u64); + +impl SlotDuration { + /// Initialize from the given milliseconds. + pub fn from_millis(val: u64) -> Self { + Self(val) + } + + /// Returns the slot duration in milli seconds. + pub fn get(&self) -> u64 { + self.0 + } +} + +#[cfg(feature = "std")] +impl sp_consensus::SlotData for SlotDuration { + fn slot_duration(&self) -> std::time::Duration { + std::time::Duration::from_millis(self.0) + } + + const SLOT_KEY: &'static [u8] = b"aura_slot_duration"; +} diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 4a22e3f77be4c..5f6bfec219739 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-babe" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for BABE consensus" edition = "2018" @@ -13,25 +13,29 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } merlin = { version = "2.0", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-consensus = { version = "0.8.0", optional = true, path = "../common" } -sp-consensus-slots = { version = "0.8.0", default-features = false, path = "../slots" } -sp-consensus-vrf = { version = "0.8.0", path = "../vrf", default-features = false } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../inherents" } -sp-keystore = { version = "0.8.0", default-features = false, path = "../../keystore", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../timestamp" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../std" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } +sp-consensus = { version = "0.10.0-dev", optional = true, path = "../common" } +sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } +sp-consensus-vrf = { version = "0.10.0-dev", path = "../vrf", default-features = false } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../core" } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../inherents" } +sp-keystore = { version = "0.10.0-dev", default-features = false, path = "../../keystore", optional = true } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../runtime" } +sp-timestamp = { version = "4.0.0-dev", path = "../../timestamp", optional = true } +serde = { version = "1.0.126", features = ["derive"], optional = true } +async-trait = { version = "0.1.50", optional = true } [features] default = ["std"] std = [ "sp-application-crypto/std", "codec/std", + "scale-info/std", "merlin/std", "sp-std/std", "sp-api/std", @@ -42,5 +46,7 @@ std = [ "sp-inherents/std", "sp-keystore", "sp-runtime/std", - "sp-timestamp/std", + "serde", + "sp-timestamp", + "async-trait", ] diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index f7ae560afff34..470a028021ca1 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,11 +19,11 @@ use super::{ AllowedSlots, AuthorityId, AuthorityIndex, AuthoritySignature, BabeAuthorityWeight, - BabeEpochConfiguration, SlotNumber, BABE_ENGINE_ID, + BabeEpochConfiguration, Slot, BABE_ENGINE_ID, }; use codec::{Codec, Decode, Encode}; +use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; -use sp_runtime::{generic::OpaqueDigestItemId, DigestItem, RuntimeDebug}; use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof}; @@ -32,8 +32,8 @@ use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof}; pub struct PrimaryPreDigest { /// Authority index pub authority_index: super::AuthorityIndex, - /// Slot number - pub slot_number: SlotNumber, + /// Slot + pub slot: Slot, /// VRF output pub vrf_output: VRFOutput, /// VRF proof @@ -50,8 +50,8 @@ pub struct SecondaryPlainPreDigest { /// it makes things easier for higher-level users of the chain data to /// be aware of the author of a secondary-slot block. pub authority_index: super::AuthorityIndex, - /// Slot number - pub slot_number: SlotNumber, + /// Slot + pub slot: Slot, } /// BABE secondary deterministic slot assignment with VRF outputs. @@ -59,8 +59,8 @@ pub struct SecondaryPlainPreDigest { pub struct SecondaryVRFPreDigest { /// Authority index pub authority_index: super::AuthorityIndex, - /// Slot number - pub slot_number: SlotNumber, + /// Slot + pub slot: Slot, /// VRF output pub vrf_output: VRFOutput, /// VRF proof @@ -73,13 +73,13 @@ pub struct SecondaryVRFPreDigest { #[derive(Clone, RuntimeDebug, Encode, Decode)] pub enum PreDigest { /// A primary VRF-based slot assignment. - #[codec(index = "1")] + #[codec(index = 1)] Primary(PrimaryPreDigest), /// A secondary deterministic slot assignment. - #[codec(index = "2")] + #[codec(index = 2)] SecondaryPlain(SecondaryPlainPreDigest), /// A secondary deterministic slot assignment with VRF outputs. - #[codec(index = "3")] + #[codec(index = 3)] SecondaryVRF(SecondaryVRFPreDigest), } @@ -93,12 +93,12 @@ impl PreDigest { } } - /// Returns the slot number of the pre digest. - pub fn slot_number(&self) -> SlotNumber { + /// Returns the slot of the pre digest. + pub fn slot(&self) -> Slot { match self { - PreDigest::Primary(primary) => primary.slot_number, - PreDigest::SecondaryPlain(secondary) => secondary.slot_number, - PreDigest::SecondaryVRF(secondary) => secondary.slot_number, + PreDigest::Primary(primary) => primary.slot, + PreDigest::SecondaryPlain(secondary) => secondary.slot, + PreDigest::SecondaryVRF(secondary) => secondary.slot, } } @@ -134,23 +134,22 @@ pub struct NextEpochDescriptor { /// Information about the next epoch config, if changed. This is broadcast in the first /// block of the epoch, and applies using the same rules as `NextEpochDescriptor`. -#[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug)] +#[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug, scale_info::TypeInfo)] pub enum NextConfigDescriptor { /// Version 1. - #[codec(index = "1")] + #[codec(index = 1)] V1 { /// Value of `c` in `BabeEpochConfiguration`. c: (u64, u64), /// Value of `allowed_slots` in `BabeEpochConfiguration`. allowed_slots: AllowedSlots, - } + }, } impl From for BabeEpochConfiguration { fn from(desc: NextConfigDescriptor) -> Self { match desc { - NextConfigDescriptor::V1 { c, allowed_slots } => - Self { c, allowed_slots }, + NextConfigDescriptor::V1 { c, allowed_slots } => Self { c, allowed_slots }, } } } @@ -176,15 +175,16 @@ pub trait CompatibleDigestItem: Sized { fn as_next_config_descriptor(&self) -> Option; } -impl CompatibleDigestItem for DigestItem where - Hash: Send + Sync + Eq + Clone + Codec + 'static +impl CompatibleDigestItem for DigestItem +where + Hash: Send + Sync + Eq + Clone + Codec + 'static, { fn babe_pre_digest(digest: PreDigest) -> Self { DigestItem::PreRuntime(BABE_ENGINE_ID, digest.encode()) } fn as_babe_pre_digest(&self) -> Option { - self.try_to(OpaqueDigestItemId::PreRuntime(&BABE_ENGINE_ID)) + self.pre_runtime_try_to(&BABE_ENGINE_ID) } fn babe_seal(signature: AuthoritySignature) -> Self { @@ -192,11 +192,11 @@ impl CompatibleDigestItem for DigestItem where } fn as_babe_seal(&self) -> Option { - self.try_to(OpaqueDigestItemId::Seal(&BABE_ENGINE_ID)) + self.seal_try_to(&BABE_ENGINE_ID) } fn as_next_epoch_descriptor(&self) -> Option { - self.try_to(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)) + self.consensus_try_to(&BABE_ENGINE_ID) .and_then(|x: super::ConsensusLog| match x { super::ConsensusLog::NextEpochData(n) => Some(n), _ => None, @@ -204,7 +204,7 @@ impl CompatibleDigestItem for DigestItem where } fn as_next_config_descriptor(&self) -> Option { - self.try_to(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)) + self.consensus_try_to(&BABE_ENGINE_ID) .and_then(|x: super::ConsensusLog| match x { super::ConsensusLog::NextConfigData(n) => Some(n), _ => None, diff --git a/primitives/consensus/babe/src/inherents.rs b/primitives/consensus/babe/src/inherents.rs index 5384183f9e678..cecd61998a4db 100644 --- a/primitives/consensus/babe/src/inherents.rs +++ b/primitives/consensus/babe/src/inherents.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,32 +18,25 @@ //! Inherents for BABE use sp_inherents::{Error, InherentData, InherentIdentifier}; -#[cfg(feature = "std")] -use sp_inherents::{InherentDataProviders, ProvideInherentData}; -#[cfg(feature = "std")] -use sp_timestamp::TimestampInherentData; -#[cfg(feature = "std")] -use codec::Decode; use sp_std::result::Result; /// The BABE inherent identifier. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"babeslot"; /// The type of the BABE inherent. -pub type InherentType = u64; +pub type InherentType = sp_consensus_slots::Slot; /// Auxiliary trait to extract BABE inherent data. pub trait BabeInherentData { /// Get BABE inherent data. - fn babe_inherent_data(&self) -> Result; + fn babe_inherent_data(&self) -> Result, Error>; /// Replace BABE inherent data. fn babe_replace_inherent_data(&mut self, new: InherentType); } impl BabeInherentData for InherentData { - fn babe_inherent_data(&self) -> Result { + fn babe_inherent_data(&self) -> Result, Error> { self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "BABE inherent data not found".into())) } fn babe_replace_inherent_data(&mut self, new: InherentType) { @@ -52,41 +45,59 @@ impl BabeInherentData for InherentData { } /// Provides the slot duration inherent data for BABE. +// TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 #[cfg(feature = "std")] pub struct InherentDataProvider { - slot_duration: u64, + slot: InherentType, } #[cfg(feature = "std")] impl InherentDataProvider { - /// Constructs `Self` - pub fn new(slot_duration: u64) -> Self { - Self { slot_duration } + /// Create new inherent data provider from the given `slot`. + pub fn new(slot: InherentType) -> Self { + Self { slot } + } + + /// Creates the inherent data provider by calculating the slot from the given + /// `timestamp` and `duration`. + pub fn from_timestamp_and_duration( + timestamp: sp_timestamp::Timestamp, + duration: std::time::Duration, + ) -> Self { + let slot = + InherentType::from((timestamp.as_duration().as_millis() / duration.as_millis()) as u64); + + Self { slot } + } + + /// Returns the `slot` of this inherent data provider. + pub fn slot(&self) -> InherentType { + self.slot } } #[cfg(feature = "std")] -impl ProvideInherentData for InherentDataProvider { - fn on_register(&self, providers: &InherentDataProviders) -> Result<(), Error> { - if !providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { - // Add the timestamp inherent data provider, as we require it. - providers.register_provider(sp_timestamp::InherentDataProvider) - } else { - Ok(()) - } - } +impl sp_std::ops::Deref for InherentDataProvider { + type Target = InherentType; - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER + fn deref(&self) -> &Self::Target { + &self.slot } +} +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { - let timestamp = inherent_data.timestamp_inherent_data()?; - let slot_number = timestamp / self.slot_duration; - inherent_data.put_data(INHERENT_IDENTIFIER, &slot_number) + inherent_data.put_data(INHERENT_IDENTIFIER, &self.slot) } - fn error_to_string(&self, error: &[u8]) -> Option { - Error::decode(&mut &error[..]).map(|e| e.into_string()).ok() + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + // There is no error anymore + None } } diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 74f2659e6e8b2..4417670f4144b 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,6 +29,9 @@ pub use sp_consensus_vrf::schnorrkel::{ }; use codec::{Decode, Encode}; +use scale_info::TypeInfo; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; use sp_runtime::{traits::Header, ConsensusEngineId, RuntimeDebug}; @@ -76,8 +79,7 @@ pub const MEDIAN_ALGORITHM_CARDINALITY: usize = 1200; // arbitrary suggestion by /// The index of an authority. pub type AuthorityIndex = u32; -/// A slot number. -pub use sp_consensus_slots::SlotNumber; +pub use sp_consensus_slots::Slot; /// An equivocation proof for multiple block authorships on the same slot (i.e. double vote). pub type EquivocationProof = sp_consensus_slots::EquivocationProof; @@ -87,17 +89,17 @@ pub type EquivocationProof = sp_consensus_slots::EquivocationProof Transcript { +pub fn make_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { let mut transcript = Transcript::new(&BABE_ENGINE_ID); - transcript.append_u64(b"slot number", slot_number); + transcript.append_u64(b"slot number", *slot); transcript.append_u64(b"current epoch", epoch); transcript.append_message(b"chain randomness", &randomness[..]); transcript @@ -105,18 +107,14 @@ pub fn make_transcript( /// Make a VRF transcript data container #[cfg(feature = "std")] -pub fn make_transcript_data( - randomness: &Randomness, - slot_number: u64, - epoch: u64, -) -> VRFTranscriptData { +pub fn make_transcript_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VRFTranscriptData { VRFTranscriptData { label: &BABE_ENGINE_ID, items: vec![ - ("slot number", VRFTranscriptValue::U64(slot_number)), + ("slot number", VRFTranscriptValue::U64(*slot)), ("current epoch", VRFTranscriptValue::U64(epoch)), ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), - ] + ], } } @@ -126,14 +124,14 @@ pub enum ConsensusLog { /// The epoch has changed. This provides information about the _next_ /// epoch - information about the _current_ epoch (i.e. the one we've just /// entered) should already be available earlier in the chain. - #[codec(index = "1")] + #[codec(index = 1)] NextEpochData(NextEpochDescriptor), /// Disable the authority with given index. - #[codec(index = "2")] + #[codec(index = 2)] OnDisabled(AuthorityIndex), /// The epoch has changed, and the epoch after the current one will /// enact different epoch configurations. - #[codec(index = "3")] + #[codec(index = 3)] NextConfigData(NextConfigDescriptor), } @@ -147,7 +145,7 @@ pub struct BabeGenesisConfigurationV1 { pub slot_duration: u64, /// The duration of epochs in slots. - pub epoch_length: SlotNumber, + pub epoch_length: u64, /// A constant value that is used in the threshold calculation formula. /// Expressed as a rational where the first member of the tuple is the @@ -195,7 +193,7 @@ pub struct BabeGenesisConfiguration { pub slot_duration: u64, /// The duration of epochs in slots. - pub epoch_length: SlotNumber, + pub epoch_length: u64, /// A constant value that is used in the threshold calculation formula. /// Expressed as a rational where the first member of the tuple is the @@ -216,7 +214,8 @@ pub struct BabeGenesisConfiguration { } /// Types of allowed slots. -#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum AllowedSlots { /// Only allow primary slots. PrimarySlots, @@ -240,15 +239,16 @@ impl AllowedSlots { #[cfg(feature = "std")] impl sp_consensus::SlotData for BabeGenesisConfiguration { - fn slot_duration(&self) -> u64 { - self.slot_duration + fn slot_duration(&self) -> std::time::Duration { + std::time::Duration::from_millis(self.slot_duration) } const SLOT_KEY: &'static [u8] = b"babe_configuration"; } /// Configuration data used by the BABE consensus engine. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BabeEpochConfiguration { /// A constant value that is used in the threshold calculation formula. /// Expressed as a rational where the first member of the tuple is the @@ -273,20 +273,15 @@ where use digests::*; use sp_application_crypto::RuntimeAppPublic; - let find_pre_digest = |header: &H| { - header - .digest() - .logs() - .iter() - .find_map(|log| log.as_babe_pre_digest()) - }; + let find_pre_digest = + |header: &H| header.digest().logs().iter().find_map(|log| log.as_babe_pre_digest()); let verify_seal_signature = |mut header: H, offender: &AuthorityId| { let seal = header.digest_mut().pop()?.as_babe_seal()?; let pre_hash = header.hash(); if !offender.verify(&pre_hash.as_ref(), &seal) { - return None; + return None } Some(()) @@ -295,7 +290,7 @@ where let verify_proof = || { // we must have different headers for the equivocation to be valid if proof.first_header.hash() == proof.second_header.hash() { - return None; + return None } let first_pre_digest = find_pre_digest(&proof.first_header)?; @@ -303,15 +298,15 @@ where // both headers must be targetting the same slot and it must // be the same as the one in the proof. - if proof.slot_number != first_pre_digest.slot_number() || - first_pre_digest.slot_number() != second_pre_digest.slot_number() + if proof.slot != first_pre_digest.slot() || + first_pre_digest.slot() != second_pre_digest.slot() { - return None; + return None } // both headers must have been authored by the same authority if first_pre_digest.authority_index() != second_pre_digest.authority_index() { - return None; + return None } // we finally verify that the expected authority has signed both headers and @@ -350,6 +345,23 @@ impl OpaqueKeyOwnershipProof { } } +/// BABE epoch information +#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index. + pub epoch_index: u64, + /// The starting slot of the epoch. + pub start_slot: Slot, + /// The duration of this epoch. + pub duration: u64, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: [u8; VRF_OUTPUT_LENGTH], + /// Configuration of the epoch. + pub config: BabeEpochConfiguration, +} + sp_api::decl_runtime_apis! { /// API necessary for block authorship with BABE. #[api_version(2)] @@ -361,22 +373,29 @@ sp_api::decl_runtime_apis! { #[changed_in(2)] fn configuration() -> BabeGenesisConfigurationV1; - /// Returns the slot number that started the current epoch. - fn current_epoch_start() -> SlotNumber; + /// Returns the slot that started the current epoch. + fn current_epoch_start() -> Slot; + + /// Returns information regarding the current epoch. + fn current_epoch() -> Epoch; + + /// Returns information regarding the next epoch (which was already + /// previously announced). + fn next_epoch() -> Epoch; /// Generates a proof of key ownership for the given authority in the /// current epoch. An example usage of this module is coupled with the /// session historical module to prove that a given authority key is /// tied to a given staking identity during a specific session. Proofs /// of key ownership are necessary for submitting equivocation reports. - /// NOTE: even though the API takes a `slot_number` as parameter the current + /// NOTE: even though the API takes a `slot` as parameter the current /// implementations ignores this parameter and instead relies on this /// method being called at the correct block height, i.e. any point at /// which the epoch for the given slot is live on-chain. Future /// implementations will instead use indexed data through an offchain /// worker, not requiring older states to be available. fn generate_key_ownership_proof( - slot_number: SlotNumber, + slot: Slot, authority_id: AuthorityId, ) -> Option; diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index e8eaa06ee005f..ecfc1c1b31826 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,30 +13,24 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] - [dependencies] -derive_more = "0.99.2" -libp2p = { version = "0.28.1", default-features = false } -log = "0.4.8" -sp-core = { path= "../../core", version = "2.0.0"} -sp-inherents = { version = "2.0.0", path = "../../inherents" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +async-trait = "0.1.42" +codec = { package = "parity-scale-codec", version = "2.0.0", features = [ + "derive", +] } futures = { version = "0.3.1", features = ["thread-pool"] } +log = "0.4.8" +sp-core = { path = "../../core", version = "4.0.0-dev" } +sp-inherents = { version = "4.0.0-dev", path = "../../inherents" } +sp-state-machine = { version = "0.10.0-dev", path = "../../state-machine" } futures-timer = "3.0.1" -sp-std = { version = "2.0.0", path = "../../std" } -sp-version = { version = "2.0.0", path = "../../version" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-utils = { version = "2.0.0", path = "../../utils" } -sp-trie = { version = "2.0.0", path = "../../trie" } -sp-api = { version = "2.0.0", path = "../../api" } -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } -parking_lot = "0.10.0" -serde = { version = "1.0", features = ["derive"] } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.8.0"} -wasm-timer = "0.2.4" +sp-std = { version = "4.0.0-dev", path = "../../std" } +sp-version = { version = "4.0.0-dev", path = "../../version" } +sp-runtime = { version = "4.0.0-dev", path = "../../runtime" } +thiserror = "1.0.21" [dev-dependencies] -futures = "0.3.4" +futures = "0.3.9" sp-test-primitives = { version = "2.0.0", path = "../../test-primitives" } [features] diff --git a/primitives/consensus/common/src/block_validation.rs b/primitives/consensus/common/src/block_validation.rs index f8255130e6416..54a70a402b060 100644 --- a/primitives/consensus/common/src/block_validation.rs +++ b/primitives/consensus/common/src/block_validation.rs @@ -1,25 +1,26 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// http://www.apache.org/licenses/LICENSE-2.0 // -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Block announcement validation. use crate::BlockStatus; +use futures::FutureExt as _; use sp_runtime::{generic::BlockId, traits::Block}; use std::{error::Error, future::Future, pin::Pin, sync::Arc}; -use futures::FutureExt as _; /// A type which provides access to chain information. pub trait Chain { @@ -42,7 +43,12 @@ pub enum Validation { is_new_best: bool, }, /// Invalid block announcement. - Failure, + Failure { + /// Should we disconnect from this peer? + /// + /// This should be used if the peer for example send junk to spam us. + disconnect: bool, + }, } /// Type which checks incoming block announcements. @@ -53,6 +59,10 @@ pub trait BlockAnnounceValidator { /// /// Returning [`Validation::Failure`] will lead to a decrease of the /// peers reputation as it sent us invalid data. + /// + /// The returned future should only resolve to an error iff there was an internal error + /// validating the block announcement. If the block announcement itself is invalid, this should + /// *always* return [`Validation::Failure`]. fn validate( &mut self, header: &B::Header, @@ -68,8 +78,21 @@ impl BlockAnnounceValidator for DefaultBlockAnnounceValidator { fn validate( &mut self, _: &B::Header, - _: &[u8], + data: &[u8], ) -> Pin>> + Send>> { - async { Ok(Validation::Success { is_new_best: false }) }.boxed() + let is_empty = data.is_empty(); + + async move { + if !is_empty { + log::debug!( + target: "sync", + "Received unknown data alongside the block announcement.", + ); + Ok(Validation::Failure { disconnect: true }) + } else { + Ok(Validation::Success { is_new_best: false }) + } + } + .boxed() } } diff --git a/primitives/consensus/common/src/error.rs b/primitives/consensus/common/src/error.rs index 0da749589013d..546f30d3e8202 100644 --- a/primitives/consensus/common/src/error.rs +++ b/primitives/consensus/common/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,81 +16,84 @@ // limitations under the License. //! Error types in Consensus -use sp_version::RuntimeVersion; use sp_core::ed25519::Public; +use sp_version::RuntimeVersion; use std::error; /// Result type alias. pub type Result = std::result::Result; /// Error type. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] pub enum Error { /// Missing state at block with given descriptor. - #[display(fmt="State unavailable at block {}", _0)] + #[error("State unavailable at block {0}")] StateUnavailable(String), /// I/O terminated unexpectedly - #[display(fmt="I/O terminated unexpectedly.")] + #[error("I/O terminated unexpectedly.")] IoTerminated, /// Intermediate missing. - #[display(fmt="Missing intermediate.")] + #[error("Missing intermediate.")] NoIntermediate, /// Intermediate is of wrong type. - #[display(fmt="Invalid intermediate.")] + #[error("Invalid intermediate.")] InvalidIntermediate, /// Unable to schedule wake-up. - #[display(fmt="Timer error: {}", _0)] - FaultyTimer(std::io::Error), + #[error("Timer error: {0}")] + FaultyTimer(#[from] std::io::Error), /// Error while working with inherent data. - #[display(fmt="InherentData error: {}", _0)] - InherentData(sp_inherents::Error), + #[error("InherentData error: {0}")] + InherentData(#[from] sp_inherents::Error), /// Unable to propose a block. - #[display(fmt="Unable to create block proposal.")] + #[error("Unable to create block proposal.")] CannotPropose, /// Error checking signature - #[display(fmt="Message signature {:?} by {:?} is invalid.", _0, _1)] + #[error("Message signature {0:?} by {1:?} is invalid.")] InvalidSignature(Vec, Vec), /// Invalid authorities set received from the runtime. - #[display(fmt="Current state of blockchain has invalid authorities set")] + #[error("Current state of blockchain has invalid authorities set")] InvalidAuthoritiesSet, /// Account is not an authority. - #[display(fmt="Message sender {:?} is not a valid authority.", _0)] + #[error("Message sender {0:?} is not a valid authority")] InvalidAuthority(Public), /// Authoring interface does not match the runtime. - #[display(fmt="Authoring for current \ - runtime is not supported. Native ({}) cannot author for on-chain ({}).", native, on_chain)] + #[error( + "Authoring for current \ + runtime is not supported. Native ({native}) cannot author for on-chain ({on_chain})." + )] IncompatibleAuthoringRuntime { native: RuntimeVersion, on_chain: RuntimeVersion }, /// Authoring interface does not match the runtime. - #[display(fmt="Authoring for current runtime is not supported since it has no version.")] + #[error("Authoring for current runtime is not supported since it has no version.")] RuntimeVersionMissing, /// Authoring interface does not match the runtime. - #[display(fmt="Authoring in current build is not supported since it has no runtime.")] + #[error("Authoring in current build is not supported since it has no runtime.")] NativeRuntimeMissing, /// Justification requirements not met. - #[display(fmt="Invalid justification.")] + #[error("Invalid justification.")] InvalidJustification, /// Some other error. - #[display(fmt="Other error: {}", _0)] - Other(Box), + #[error(transparent)] + Other(#[from] Box), /// Error from the client while importing - #[display(fmt="Import failed: {}", _0)] - #[from(ignore)] + #[error("Import failed: {0}")] ClientImport(String), /// Error from the client while importing - #[display(fmt="Chain lookup failed: {}", _0)] - #[from(ignore)] + #[error("Chain lookup failed: {0}")] ChainLookup(String), /// Signing failed - #[display(fmt="Failed to sign using key: {:?}. Reason: {}", _0, _1)] - CannotSign(Vec, String) + #[error("Failed to sign using key: {0:?}. Reason: {1}")] + CannotSign(Vec, String), +} + +impl core::convert::From for Error { + fn from(p: Public) -> Self { + Self::InvalidAuthority(p) + } } -impl error::Error for Error { - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - Error::FaultyTimer(ref err) => Some(err), - Error::Other(ref err) => Some(&**err), - _ => None, - } +impl core::convert::From for Error { + fn from(s: String) -> Self { + Self::StateUnavailable(s) } } diff --git a/primitives/consensus/common/src/evaluation.rs b/primitives/consensus/common/src/evaluation.rs index 76fcd5310b06a..19be5e5526349 100644 --- a/primitives/consensus/common/src/evaluation.rs +++ b/primitives/consensus/common/src/evaluation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,8 @@ //! Block evaluation and evaluation errors. -use super::MAX_BLOCK_SIZE; - use codec::Encode; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, One, CheckedConversion}; +use sp_runtime::traits::{Block as BlockT, CheckedConversion, Header as HeaderT, One}; // This is just a best effort to encode the number. None indicated that it's too big to encode // in a u128. @@ -30,27 +28,19 @@ type BlockNumber = Option; pub type Result = std::result::Result; /// Error type. -#[derive(Debug, derive_more::Display)] +#[derive(Debug, thiserror::Error)] pub enum Error { /// Proposal provided not a block. - #[display(fmt="Proposal provided not a block: decoding error: {}", _0)] - BadProposalFormat(codec::Error), + #[error("Proposal provided not a block: decoding error: {0}")] + BadProposalFormat(#[from] codec::Error), /// Proposal had wrong parent hash. - #[display(fmt="Proposal had wrong parent hash. Expected {:?}, got {:?}", expected, got)] + #[error("Proposal had wrong parent hash. Expected {expected:?}, got {got:?}")] WrongParentHash { expected: String, got: String }, /// Proposal had wrong number. - #[display(fmt="Proposal had wrong number. Expected {:?}, got {:?}", expected, got)] + #[error("Proposal had wrong number. Expected {expected:?}, got {got:?}")] WrongNumber { expected: BlockNumber, got: BlockNumber }, - /// Proposal exceeded the maximum size. - #[display( - fmt="Proposal exceeded the maximum size of {} by {} bytes.", - "MAX_BLOCK_SIZE", "_0.saturating_sub(MAX_BLOCK_SIZE)" - )] - ProposalTooLarge(usize), } -impl std::error::Error for Error {} - /// Attempt to evaluate a substrate block as a node block, returning error /// upon any initial validity checks failing. pub fn evaluate_initial( @@ -58,27 +48,21 @@ pub fn evaluate_initial( parent_hash: &::Hash, parent_number: <::Header as HeaderT>::Number, ) -> Result<()> { - let encoded = Encode::encode(proposal); - let proposal = Block::decode(&mut &encoded[..]) - .map_err(|e| Error::BadProposalFormat(e))?; - - if encoded.len() > MAX_BLOCK_SIZE { - return Err(Error::ProposalTooLarge(encoded.len())) - } + let proposal = Block::decode(&mut &encoded[..]).map_err(|e| Error::BadProposalFormat(e))?; if *parent_hash != *proposal.header().parent_hash() { return Err(Error::WrongParentHash { expected: format!("{:?}", *parent_hash), - got: format!("{:?}", proposal.header().parent_hash()) - }); + got: format!("{:?}", proposal.header().parent_hash()), + }) } if parent_number + One::one() != *proposal.header().number() { return Err(Error::WrongNumber { expected: parent_number.checked_into::().map(|x| x + 1), got: (*proposal.header().number()).checked_into::(), - }); + }) } Ok(()) diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs deleted file mode 100644 index 92bd9966d75ec..0000000000000 --- a/primitives/consensus/common/src/import_queue.rs +++ /dev/null @@ -1,296 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Import Queue primitive: something which can verify and import blocks. -//! -//! This serves as an intermediate and abstracted step between synchronization -//! and import. Each mode of consensus will have its own requirements for block -//! verification. Some algorithms can verify in parallel, while others only -//! sequentially. -//! -//! The `ImportQueue` trait allows such verification strategies to be -//! instantiated. The `BasicQueue` and `BasicVerifier` traits allow serial -//! queues to be instantiated simply. - -use std::collections::HashMap; - -use sp_runtime::{Justification, traits::{Block as BlockT, Header as _, NumberFor}}; - -use crate::{ - error::Error as ConsensusError, - block_import::{ - BlockImport, BlockOrigin, BlockImportParams, ImportedAux, JustificationImport, ImportResult, - BlockCheckParams, FinalityProofImport, - }, - metrics::Metrics, -}; -pub use basic_queue::BasicQueue; - -/// A commonly-used Import Queue type. -/// -/// This defines the transaction type of the `BasicQueue` to be the transaction type for a client. -pub type DefaultImportQueue = BasicQueue>; - -mod basic_queue; -pub mod buffered_link; - -/// Shared block import struct used by the queue. -pub type BoxBlockImport = Box< - dyn BlockImport + Send + Sync ->; - -/// Shared justification import struct used by the queue. -pub type BoxJustificationImport = Box + Send + Sync>; - -/// Shared finality proof import struct used by the queue. -pub type BoxFinalityProofImport = Box< - dyn FinalityProofImport + Send + Sync ->; - -/// Maps to the Origin used by the network. -pub type Origin = libp2p::PeerId; - -/// Block data used by the queue. -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct IncomingBlock { - /// Block header hash. - pub hash: ::Hash, - /// Block header if requested. - pub header: Option<::Header>, - /// Block body if requested. - pub body: Option::Extrinsic>>, - /// Justification if requested. - pub justification: Option, - /// The peer, we received this from - pub origin: Option, - /// Allow importing the block skipping state verification if parent state is missing. - pub allow_missing_state: bool, - /// Re-validate existing block. - pub import_existing: bool, -} - -/// Type of keys in the blockchain cache that consensus module could use for its needs. -pub type CacheKeyId = [u8; 4]; - -/// Verify a justification of a block -pub trait Verifier: Send + Sync { - /// Verify the given data and return the BlockImportParams and an optional - /// new set of validators to import. If not, err with an Error-Message - /// presented to the User in the logs. - fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String>; -} - -/// Blocks import queue API. -/// -/// The `import_*` methods can be called in order to send elements for the import queue to verify. -/// Afterwards, call `poll_actions` to determine how to respond to these elements. -pub trait ImportQueue: Send { - /// Import bunch of blocks. - fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); - /// Import a block justification. - fn import_justification( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - justification: Justification - ); - /// Import block finality proof. - fn import_finality_proof( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec - ); - - /// Polls for actions to perform on the network. - /// - /// This method should behave in a way similar to `Future::poll`. It can register the current - /// task and notify later when more actions are ready to be polled. To continue the comparison, - /// it is as if this method always returned `Poll::Pending`. - fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link); -} - -/// Hooks that the verification queue can use to influence the synchronization -/// algorithm. -pub trait Link: Send { - /// Batch of blocks imported, with or without error. - fn blocks_processed( - &mut self, - _imported: usize, - _count: usize, - _results: Vec<(Result>, BlockImportError>, B::Hash)> - ) {} - /// Justification import result. - fn justification_imported(&mut self, _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool) {} - /// Request a justification for the given block. - fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} - /// Finality proof import result. - /// - /// Even though we have asked for finality proof of block A, provider could return proof of - /// some earlier block B, if the proof for A was too large. The sync module should continue - /// asking for proof of A in this case. - fn finality_proof_imported( - &mut self, - _who: Origin, - _request_block: (B::Hash, NumberFor), - _finalization_result: Result<(B::Hash, NumberFor), ()>, - ) {} - /// Request a finality proof for the given block. - fn request_finality_proof(&mut self, _hash: &B::Hash, _number: NumberFor) {} -} - -/// Block import successful result. -#[derive(Debug, PartialEq)] -pub enum BlockImportResult { - /// Imported known block. - ImportedKnown(N), - /// Imported unknown block. - ImportedUnknown(N, ImportedAux, Option), -} - -/// Block import error. -#[derive(Debug)] -pub enum BlockImportError { - /// Block missed header, can't be imported - IncompleteHeader(Option), - /// Block verification failed, can't be imported - VerificationFailed(Option, String), - /// Block is known to be Bad - BadBlock(Option), - /// Parent state is missing. - MissingState, - /// Block has an unknown parent - UnknownParent, - /// Block import has been cancelled. This can happen if the parent block fails to be imported. - Cancelled, - /// Other error. - Other(ConsensusError), -} - -/// Single block import function. -pub fn import_single_block, Transaction>( - import_handle: &mut dyn BlockImport, - block_origin: BlockOrigin, - block: IncomingBlock, - verifier: &mut V, -) -> Result>, BlockImportError> { - import_single_block_metered(import_handle, block_origin, block, verifier, None) -} - -/// Single block import function with metering. -pub(crate) fn import_single_block_metered, Transaction>( - import_handle: &mut dyn BlockImport, - block_origin: BlockOrigin, - block: IncomingBlock, - verifier: &mut V, - metrics: Option, -) -> Result>, BlockImportError> { - let peer = block.origin; - - let (header, justification) = match (block.header, block.justification) { - (Some(header), justification) => (header, justification), - (None, _) => { - if let Some(ref peer) = peer { - debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); - } else { - debug!(target: "sync", "Header {} was not provided ", block.hash); - } - return Err(BlockImportError::IncompleteHeader(peer)) - }, - }; - - trace!(target: "sync", "Header {} has {:?} logs", block.hash, header.digest().logs().len()); - - let number = header.number().clone(); - let hash = header.hash(); - let parent_hash = header.parent_hash().clone(); - - let import_handler = |import| { - match import { - Ok(ImportResult::AlreadyInChain) => { - trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); - Ok(BlockImportResult::ImportedKnown(number)) - }, - Ok(ImportResult::Imported(aux)) => Ok(BlockImportResult::ImportedUnknown(number, aux, peer.clone())), - Ok(ImportResult::MissingState) => { - debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash); - Err(BlockImportError::MissingState) - }, - Ok(ImportResult::UnknownParent) => { - debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent_hash); - Err(BlockImportError::UnknownParent) - }, - Ok(ImportResult::KnownBad) => { - debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); - Err(BlockImportError::BadBlock(peer.clone())) - }, - Err(e) => { - debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); - Err(BlockImportError::Other(e)) - } - } - }; - - match import_handler(import_handle.check_block(BlockCheckParams { - hash, - number, - parent_hash, - allow_missing_state: block.allow_missing_state, - import_existing: block.import_existing, - }))? { - BlockImportResult::ImportedUnknown { .. } => (), - r => return Ok(r), // Any other successful result means that the block is already imported. - } - - let started = wasm_timer::Instant::now(); - let (mut import_block, maybe_keys) = verifier.verify(block_origin, header, justification, block.body) - .map_err(|msg| { - if let Some(ref peer) = peer { - trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); - } else { - trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); - } - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(false, started.elapsed()); - } - BlockImportError::VerificationFailed(peer.clone(), msg) - })?; - - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(true, started.elapsed()); - } - - let mut cache = HashMap::new(); - if let Some(keys) = maybe_keys { - cache.extend(keys.into_iter()); - } - import_block.allow_missing_state = block.allow_missing_state; - - let imported = import_handle.import_block(import_block.convert_transaction(), cache); - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification_and_import(started.elapsed()); - } - import_handler(imported) -} diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs deleted file mode 100644 index ea0ca2cf3ee88..0000000000000 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ /dev/null @@ -1,686 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{mem, pin::Pin, time::Duration, marker::PhantomData}; -use futures::{prelude::*, task::Context, task::Poll}; -use futures_timer::Delay; -use sp_runtime::{Justification, traits::{Block as BlockT, Header as HeaderT, NumberFor}}; -use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded}; -use prometheus_endpoint::Registry; - -use crate::{ - block_import::BlockOrigin, - import_queue::{ - BlockImportResult, BlockImportError, Verifier, BoxBlockImport, BoxFinalityProofImport, - BoxJustificationImport, ImportQueue, Link, Origin, - IncomingBlock, import_single_block_metered, - buffered_link::{self, BufferedLinkSender, BufferedLinkReceiver}, - }, - metrics::Metrics, -}; - -/// Interface to a basic block import queue that is importing blocks sequentially in a separate -/// task, with plugable verification. -pub struct BasicQueue { - /// Channel to send finality work messages to the background task. - finality_sender: TracingUnboundedSender>, - /// Channel to send block import messages to the background task. - block_import_sender: TracingUnboundedSender>, - /// Results coming from the worker task. - result_port: BufferedLinkReceiver, - _phantom: PhantomData, -} - -impl Drop for BasicQueue { - fn drop(&mut self) { - // Flush the queue and close the receiver to terminate the future. - self.finality_sender.close_channel(); - self.block_import_sender.close_channel(); - self.result_port.close(); - } -} - -impl BasicQueue { - /// Instantiate a new basic queue, with given verifier. - /// - /// This creates a background task, and calls `on_start` on the justification importer and - /// finality proof importer. - pub fn new>( - verifier: V, - block_import: BoxBlockImport, - justification_import: Option>, - finality_proof_import: Option>, - spawner: &impl sp_core::traits::SpawnNamed, - prometheus_registry: Option<&Registry>, - ) -> Self { - let (result_sender, result_port) = buffered_link::buffered_link(); - - let metrics = prometheus_registry.and_then(|r| { - Metrics::register(r) - .map_err(|err| { - log::warn!("Failed to register Prometheus metrics: {}", err); - }) - .ok() - }); - - let (future, finality_sender, block_import_sender) = BlockImportWorker::new( - result_sender, - verifier, - block_import, - justification_import, - finality_proof_import, - metrics, - ); - - spawner.spawn_blocking("basic-block-import-worker", future.boxed()); - - Self { - finality_sender, - block_import_sender, - result_port, - _phantom: PhantomData, - } - } -} - -impl ImportQueue for BasicQueue { - fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { - if blocks.is_empty() { - return; - } - - trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); - let res = - self.block_import_sender.unbounded_send(worker_messages::ImportBlocks(origin, blocks)); - - if res.is_err() { - log::error!( - target: "sync", - "import_blocks: Background import task is no longer alive" - ); - } - } - - fn import_justification( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - justification: Justification, - ) { - let res = self.finality_sender.unbounded_send( - worker_messages::Finality::ImportJustification(who, hash, number, justification), - ); - - if res.is_err() { - log::error!( - target: "sync", - "import_justification: Background import task is no longer alive" - ); - } - } - - fn import_finality_proof( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec, - ) { - trace!(target: "sync", "Scheduling finality proof of {}/{} for import", number, hash); - let res = self.finality_sender.unbounded_send( - worker_messages::Finality::ImportFinalityProof(who, hash, number, finality_proof), - ); - - if res.is_err() { - log::error!( - target: "sync", - "import_finality_proof: Background import task is no longer alive" - ); - } - } - - fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { - if self.result_port.poll_actions(cx, link).is_err() { - log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); - } - } -} - -/// Messages destinated to the background worker. -mod worker_messages { - use super::*; - - pub struct ImportBlocks(pub BlockOrigin, pub Vec>); - - pub enum Finality { - ImportJustification(Origin, B::Hash, NumberFor, Justification), - ImportFinalityProof(Origin, B::Hash, NumberFor, Vec), - } -} - -struct BlockImportWorker { - result_sender: BufferedLinkSender, - justification_import: Option>, - finality_proof_import: Option>, - delay_between_blocks: Duration, - metrics: Option, - _phantom: PhantomData, -} - -impl BlockImportWorker { - fn new>( - result_sender: BufferedLinkSender, - verifier: V, - block_import: BoxBlockImport, - justification_import: Option>, - finality_proof_import: Option>, - metrics: Option, - ) -> ( - impl Future + Send, - TracingUnboundedSender>, - TracingUnboundedSender>, - ) { - use worker_messages::*; - - let (finality_sender, mut finality_port) = - tracing_unbounded("mpsc_import_queue_worker_finality"); - - let (block_import_sender, mut block_import_port) = - tracing_unbounded("mpsc_import_queue_worker_blocks"); - - let mut worker = BlockImportWorker { - result_sender, - justification_import, - finality_proof_import, - delay_between_blocks: Duration::new(0, 0), - metrics, - _phantom: PhantomData, - }; - - // Let's initialize `justification_import` and `finality_proof_import`. - if let Some(justification_import) = worker.justification_import.as_mut() { - for (hash, number) in justification_import.on_start() { - worker.result_sender.request_justification(&hash, number); - } - } - if let Some(finality_proof_import) = worker.finality_proof_import.as_mut() { - for (hash, number) in finality_proof_import.on_start() { - worker.result_sender.request_finality_proof(&hash, number); - } - } - - // The future below has two possible states: - // - // - Currently importing many blocks, in which case `importing` is `Some` and contains a - // `Future`, and `block_import` is `None`. - // - Something else, in which case `block_import` is `Some` and `importing` is None. - // - // Additionally, the task will prioritize processing of finality work messages over - // block import messages, hence why two distinct channels are used. - let mut block_import_verifier = Some((block_import, verifier)); - let mut importing = None; - - let future = futures::future::poll_fn(move |cx| { - loop { - // If the results sender is closed, that means that the import queue is shutting - // down and we should end this future. - if worker.result_sender.is_closed() { - return Poll::Ready(()) - } - - // Grab the next finality action request sent to the import queue. - let finality_work = match Stream::poll_next(Pin::new(&mut finality_port), cx) { - Poll::Ready(Some(msg)) => Some(msg), - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => None, - }; - - match finality_work { - Some(Finality::ImportFinalityProof(who, hash, number, proof)) => { - let (_, verif) = block_import_verifier - .as_mut() - .expect("block_import_verifier is always Some; qed"); - - worker.import_finality_proof(verif, who, hash, number, proof); - continue; - } - Some(Finality::ImportJustification(who, hash, number, justification)) => { - worker.import_justification(who, hash, number, justification); - continue; - } - None => {} - } - - // If we are in the process of importing a bunch of blocks, let's resume this - // process before doing anything more. - if let Some(imp_fut) = importing.as_mut() { - match Future::poll(Pin::new(imp_fut), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready((bi, verif)) => { - block_import_verifier = Some((bi, verif)); - importing = None; - }, - } - } - - debug_assert!(importing.is_none()); - debug_assert!(block_import_verifier.is_some()); - - // Grab the next block import request sent to the import queue. - let ImportBlocks(origin, blocks) = - match Stream::poll_next(Pin::new(&mut block_import_port), cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => return Poll::Pending, - }; - - // On blocks import request, we merely *start* the process and store - // a `Future` into `importing`. - let (block_import, verifier) = block_import_verifier - .take() - .expect("block_import_verifier is always Some; qed"); - - importing = Some(worker.import_batch(block_import, verifier, origin, blocks)); - } - }); - - (future, finality_sender, block_import_sender) - } - - /// Returns a `Future` that imports the given blocks and sends the results on - /// `self.result_sender`. - /// - /// For lifetime reasons, the `BlockImport` implementation must be passed by value, and is - /// yielded back in the output once the import is finished. - fn import_batch>( - &mut self, - block_import: BoxBlockImport, - verifier: V, - origin: BlockOrigin, - blocks: Vec>, - ) -> impl Future, V)> { - let mut result_sender = self.result_sender.clone(); - let metrics = self.metrics.clone(); - - import_many_blocks(block_import, origin, blocks, verifier, self.delay_between_blocks, metrics) - .then(move |(imported, count, results, block_import, verifier)| { - result_sender.blocks_processed(imported, count, results); - future::ready((block_import, verifier)) - }) - } - - fn import_finality_proof>( - &mut self, - verifier: &mut V, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec - ) { - let started = wasm_timer::Instant::now(); - let result = self.finality_proof_import.as_mut().map(|finality_proof_import| { - finality_proof_import.import_finality_proof(hash, number, finality_proof, verifier) - .map_err(|e| { - debug!( - "Finality proof import failed with {:?} for hash: {:?} number: {:?} coming from node: {:?}", - e, - hash, - number, - who, - ); - }) - }).unwrap_or(Err(())); - - if let Some(metrics) = self.metrics.as_ref() { - metrics.finality_proof_import_time.observe(started.elapsed().as_secs_f64()); - } - - trace!(target: "sync", "Imported finality proof for {}/{}", number, hash); - self.result_sender.finality_proof_imported(who, (hash, number), result); - } - - fn import_justification( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - justification: Justification - ) { - let started = wasm_timer::Instant::now(); - let success = self.justification_import.as_mut().map(|justification_import| { - justification_import.import_justification(hash, number, justification) - .map_err(|e| { - debug!( - target: "sync", - "Justification import failed with {:?} for hash: {:?} number: {:?} coming from node: {:?}", - e, - hash, - number, - who, - ); - e - }).is_ok() - }).unwrap_or(false); - - if let Some(metrics) = self.metrics.as_ref() { - metrics.justification_import_time.observe(started.elapsed().as_secs_f64()); - } - - self.result_sender.justification_imported(who, &hash, number, success); - } -} - -/// Import several blocks at once, returning import result for each block. -/// -/// For lifetime reasons, the `BlockImport` implementation must be passed by value, and is yielded -/// back in the output once the import is finished. -/// -/// The returned `Future` yields at every imported block, which makes the execution more -/// fine-grained and making it possible to interrupt the process. -fn import_many_blocks, Transaction>( - import_handle: BoxBlockImport, - blocks_origin: BlockOrigin, - blocks: Vec>, - verifier: V, - delay_between_blocks: Duration, - metrics: Option, -) -> impl Future< - Output = ( - usize, - usize, - Vec<(Result>, BlockImportError>, B::Hash)>, - BoxBlockImport, - V, - ), -> { - let count = blocks.len(); - - let blocks_range = match ( - blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), - blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), - ) { - (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), - (Some(first), Some(_)) => format!(" ({})", first), - _ => Default::default(), - }; - - trace!(target: "sync", "Starting import of {} blocks {}", count, blocks_range); - - let mut imported = 0; - let mut results = vec![]; - let mut has_error = false; - let mut blocks = blocks.into_iter(); - let mut import_handle = Some(import_handle); - let mut waiting = None; - let mut verifier = Some(verifier); - - // Blocks in the response/drain should be in ascending order. - - future::poll_fn(move |cx| { - // Handle the optional timer that makes us wait before the next import. - if let Some(waiting) = &mut waiting { - match Future::poll(Pin::new(waiting), cx) { - Poll::Ready(_) => {}, - Poll::Pending => return Poll::Pending, - } - } - waiting = None; - - // Is there any block left to import? - let block = match blocks.next() { - Some(b) => b, - None => { - // No block left to import, success! - let import_handle = import_handle.take() - .expect("Future polled again after it has finished (import handle is None)"); - let verifier = verifier.take() - .expect("Future polled again after it has finished (verifier handle is None)"); - let results = mem::replace(&mut results, Vec::new()); - return Poll::Ready((imported, count, results, import_handle, verifier)); - }, - }; - - // We extract the content of `import_handle` and `verifier` only when the future ends, - // therefore `import_handle` and `verifier` are always `Some` here. It is illegal to poll - // a `Future` again after it has ended. - let import_handle = import_handle.as_mut() - .expect("Future polled again after it has finished (import handle is None)"); - let verifier = verifier.as_mut() - .expect("Future polled again after it has finished (verifier handle is None)"); - - let block_number = block.header.as_ref().map(|h| h.number().clone()); - let block_hash = block.hash; - let import_result = if has_error { - Err(BlockImportError::Cancelled) - } else { - // The actual import. - import_single_block_metered( - &mut **import_handle, - blocks_origin.clone(), - block, - verifier, - metrics.clone(), - ) - }; - - if let Some(metrics) = metrics.as_ref() { - metrics.report_import::(&import_result); - } - - if import_result.is_ok() { - trace!(target: "sync", "Block imported successfully {:?} ({})", block_number, block_hash); - imported += 1; - } else { - has_error = true; - } - - results.push((import_result, block_hash)); - - // Notifies the current task again so that we re-execute this closure again for the next - // block. - if delay_between_blocks != Duration::new(0, 0) { - waiting = Some(Delay::new(delay_between_blocks)); - } - cx.waker().wake_by_ref(); - Poll::Pending - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - import_queue::{CacheKeyId, Verifier}, - BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, - }; - use futures::{executor::block_on, Future}; - use sp_test_primitives::{Block, BlockNumber, Extrinsic, Hash, Header}; - use std::collections::HashMap; - - impl Verifier for () { - fn verify( - &mut self, - origin: BlockOrigin, - header: Header, - _justification: Option, - _body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { - Ok((BlockImportParams::new(origin, header), None)) - } - } - - impl BlockImport for () { - type Error = crate::Error; - type Transaction = Extrinsic; - - fn check_block( - &mut self, - _block: BlockCheckParams, - ) -> Result { - Ok(ImportResult::imported(false)) - } - - fn import_block( - &mut self, - _block: BlockImportParams, - _cache: HashMap>, - ) -> Result { - Ok(ImportResult::imported(true)) - } - } - - impl JustificationImport for () { - type Error = crate::Error; - - fn import_justification( - &mut self, - _hash: Hash, - _number: BlockNumber, - _justification: Justification, - ) -> Result<(), Self::Error> { - Ok(()) - } - } - - #[derive(Debug, PartialEq)] - enum Event { - JustificationImported(Hash), - BlockImported(Hash), - } - - #[derive(Default)] - struct TestLink { - events: Vec, - } - - impl Link for TestLink { - fn blocks_processed( - &mut self, - _imported: usize, - _count: usize, - results: Vec<(Result, BlockImportError>, Hash)>, - ) { - if let Some(hash) = results.into_iter().find_map(|(r, h)| r.ok().map(|_| h)) { - self.events.push(Event::BlockImported(hash)); - } - } - - fn justification_imported( - &mut self, - _who: Origin, - hash: &Hash, - _number: BlockNumber, - _success: bool, - ) { - self.events.push(Event::JustificationImported(hash.clone())) - } - } - - #[test] - fn prioritizes_finality_work_over_block_import() { - let (result_sender, mut result_port) = buffered_link::buffered_link(); - - let (mut worker, mut finality_sender, mut block_import_sender) = - BlockImportWorker::new(result_sender, (), Box::new(()), Some(Box::new(())), None, None); - - let mut import_block = |n| { - let header = Header { - parent_hash: Hash::random(), - number: n, - extrinsics_root: Hash::random(), - state_root: Default::default(), - digest: Default::default(), - }; - - let hash = header.hash(); - - block_on(block_import_sender.send(worker_messages::ImportBlocks( - BlockOrigin::Own, - vec![IncomingBlock { - hash, - header: Some(header), - body: None, - justification: None, - origin: None, - allow_missing_state: false, - import_existing: false, - }], - ))) - .unwrap(); - - hash - }; - - let mut import_justification = || { - let hash = Hash::random(); - - block_on(finality_sender.send(worker_messages::Finality::ImportJustification( - libp2p::PeerId::random(), - hash, - 1, - Vec::new(), - ))) - .unwrap(); - - hash - }; - - let mut link = TestLink::default(); - - // we send a bunch of tasks to the worker - let block1 = import_block(1); - let block2 = import_block(2); - let block3 = import_block(3); - let justification1 = import_justification(); - let justification2 = import_justification(); - let block4 = import_block(4); - let block5 = import_block(5); - let block6 = import_block(6); - let justification3 = import_justification(); - - // we poll the worker until we have processed 9 events - block_on(futures::future::poll_fn(|cx| { - while link.events.len() < 9 { - match Future::poll(Pin::new(&mut worker), cx) { - Poll::Pending => {} - Poll::Ready(()) => panic!("import queue worker should not conclude."), - } - - result_port.poll_actions(cx, &mut link).unwrap(); - } - - Poll::Ready(()) - })); - - // all justification tasks must be done before any block import work - assert_eq!( - link.events, - vec![ - Event::JustificationImported(justification1), - Event::JustificationImported(justification2), - Event::JustificationImported(justification3), - Event::BlockImported(block1), - Event::BlockImported(block2), - Event::BlockImported(block3), - Event::BlockImported(block4), - Event::BlockImported(block5), - Event::BlockImported(block6), - ] - ); - } -} diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index fa4f233c680fa..d7979baf47c11 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -1,18 +1,19 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate Consensus Common. - -// Substrate Demo is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate Consensus Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate Consensus Common. If not, see . +// This file is part of Substrate. + +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Common utilities for building and using consensus engines in substrate. //! @@ -20,43 +21,27 @@ //! change. Implementors of traits should not rely on the interfaces to remain //! the same. -// This provides "unused" building blocks to other crates -#![allow(dead_code)] - -// our error-chain could potentially blow up otherwise -#![recursion_limit="128"] - -#[macro_use] extern crate log; - -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; +use futures::prelude::*; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, DigestFor, NumberFor, HashFor}, + generic::BlockId, + traits::{Block as BlockT, DigestFor, HashFor, NumberFor}, }; -use futures::prelude::*; -pub use sp_inherents::InherentData; +use sp_state_machine::StorageProof; pub mod block_validation; -pub mod offline_tracker; pub mod error; -pub mod block_import; -mod select_chain; -pub mod import_queue; pub mod evaluation; -mod metrics; - -// block size limit. -const MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512; +mod select_chain; pub use self::error::Error; -pub use block_import::{ - BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, BlockCheckParams, - ImportResult, JustificationImport, FinalityProofImport, -}; pub use select_chain::SelectChain; +pub use sp_inherents::InherentData; pub use sp_state_machine::Backend as StateBackend; -pub use import_queue::DefaultImportQueue; + +/// Type of keys in the blockchain cache that consensus module could use for its needs. +pub type CacheKeyId = [u8; 4]; /// Block status. #[derive(Debug, PartialEq, Eq)] @@ -73,6 +58,33 @@ pub enum BlockStatus { Unknown, } +/// Block data origin. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum BlockOrigin { + /// Genesis block built into the client. + Genesis, + /// Block is part of the initial sync with the network. + NetworkInitialSync, + /// Block was broadcasted on the network. + NetworkBroadcast, + /// Block that was received from the network and validated in the consensus process. + ConsensusBroadcast, + /// Block that was collated by this node. + Own, + /// Block was imported from a file. + File, +} + +impl From for sp_core::ExecutionContext { + fn from(origin: BlockOrigin) -> Self { + if origin == BlockOrigin::NetworkInitialSync { + sp_core::ExecutionContext::Syncing + } else { + sp_core::ExecutionContext::Importing + } + } +} + /// Environment for a Consensus instance. /// /// Creates proposer instance. @@ -81,7 +93,9 @@ pub trait Environment { type Proposer: Proposer + Send + 'static; /// A future that resolves to the proposer. type CreateProposer: Future> - + Send + Unpin + 'static; + + Send + + Unpin + + 'static; /// Error which can occur upon creation. type Error: From + std::fmt::Debug + 'static; @@ -91,48 +105,84 @@ pub trait Environment { } /// A proposal that is created by a [`Proposer`]. -pub struct Proposal { +pub struct Proposal { /// The block that was build. pub block: Block, - /// Optional proof that was recorded while building the block. - pub proof: Option, + /// Proof that was recorded while building the block. + pub proof: Proof, /// The storage changes while building this block. - pub storage_changes: sp_state_machine::StorageChanges, NumberFor>, + pub storage_changes: + sp_state_machine::StorageChanges, NumberFor>, } -/// Used as parameter to [`Proposer`] to tell the requirement on recording a proof. +/// Error that is returned when [`ProofRecording`] requested to record a proof, +/// but no proof was recorded. +#[derive(Debug, thiserror::Error)] +#[error("Proof should be recorded, but no proof was provided.")] +pub struct NoProofRecorded; + +/// A trait to express the state of proof recording on type system level. +/// +/// This is used by [`Proposer`] to signal if proof recording is enabled. This can be used by +/// downstream users of the [`Proposer`] trait to enforce that proof recording is activated when +/// required. The only two implementations of this trait are [`DisableProofRecording`] and +/// [`EnableProofRecording`]. /// -/// When `RecordProof::Yes` is given, all accessed trie nodes should be saved. These recorded -/// trie nodes can be used by a third party to proof this proposal without having access to the -/// full storage. -#[derive(Copy, Clone, PartialEq)] -pub enum RecordProof { - /// `Yes`, record a proof. - Yes, - /// `No`, don't record any proof. - No, +/// This trait is sealed and can not be implemented outside of this crate! +pub trait ProofRecording: Send + Sync + private::Sealed + 'static { + /// The proof type that will be used internally. + type Proof: Send + Sync + 'static; + /// Is proof recording enabled? + const ENABLED: bool; + /// Convert the given `storage_proof` into [`Self::Proof`]. + /// + /// Internally Substrate uses `Option` to express the both states of proof + /// recording (for now) and as [`Self::Proof`] is some different type, we need to provide a + /// function to convert this value. + /// + /// If the proof recording was requested, but `None` is given, this will return + /// `Err(NoProofRecorded)`. + fn into_proof(storage_proof: Option) -> Result; } -impl RecordProof { - /// Returns if `Self` == `Yes`. - pub fn yes(&self) -> bool { - match self { - Self::Yes => true, - Self::No => false, - } +/// Express that proof recording is disabled. +/// +/// For more information see [`ProofRecording`]. +pub struct DisableProofRecording; + +impl ProofRecording for DisableProofRecording { + type Proof = (); + const ENABLED: bool = false; + + fn into_proof(_: Option) -> Result { + Ok(()) } } -impl From for RecordProof { - fn from(val: bool) -> Self { - if val { - Self::Yes - } else { - Self::No - } +/// Express that proof recording is enabled. +/// +/// For more information see [`ProofRecording`]. +pub struct EnableProofRecording; + +impl ProofRecording for EnableProofRecording { + type Proof = sp_state_machine::StorageProof; + const ENABLED: bool = true; + + fn into_proof(proof: Option) -> Result { + proof.ok_or_else(|| NoProofRecorded) } } +/// Provides `Sealed` trait to prevent implementing trait [`ProofRecording`] outside of this crate. +mod private { + /// Special trait that prevents the implementation of [`super::ProofRecording`] outside of this + /// crate. + pub trait Sealed {} + + impl Sealed for super::DisableProofRecording {} + impl Sealed for super::EnableProofRecording {} +} + /// Logic for a proposer. /// /// This will encapsulate creation and evaluation of proposals at a specific @@ -145,8 +195,15 @@ pub trait Proposer { /// The transaction type used by the backend. type Transaction: Default + Send + 'static; /// Future that resolves to a committed proposal with an optional proof. - type Proposal: Future, Self::Error>> + - Send + Unpin + 'static; + type Proposal: Future, Self::Error>> + + Send + + Unpin + + 'static; + /// The supported proof recording by the implementator of this trait. See [`ProofRecording`] + /// for more information. + type ProofRecording: self::ProofRecording + Send + Sync + 'static; + /// The proof type used by [`Self::ProofRecording`]. + type Proof: Send + Sync + 'static; /// Create a proposal. /// @@ -154,6 +211,13 @@ pub trait Proposer { /// a maximum duration for building this proposal is given. If building the proposal takes /// longer than this maximum, the proposal will be very likely discarded. /// + /// If `block_size_limit` is given, the proposer should push transactions until the block size + /// limit is hit. Depending on the `finalize_block` implementation of the runtime, it probably + /// incorporates other operations (that are happening after the block limit is hit). So, + /// when the block size estimation also includes a proof that is recorded alongside the block + /// production, the proof can still grow. This means that the `block_size_limit` should not be + /// the hard limit of what is actually allowed. + /// /// # Return /// /// Returns a future that resolves to a [`Proposal`] or to [`Error`]. @@ -162,7 +226,7 @@ pub trait Proposer { inherent_data: InherentData, inherent_digests: DigestFor, max_duration: Duration, - record_proof: RecordProof, + block_size_limit: Option, ) -> Self::Proposal; } @@ -184,11 +248,19 @@ pub trait SyncOracle { pub struct NoNetwork; impl SyncOracle for NoNetwork { - fn is_major_syncing(&mut self) -> bool { false } - fn is_offline(&mut self) -> bool { false } + fn is_major_syncing(&mut self) -> bool { + false + } + fn is_offline(&mut self) -> bool { + false + } } -impl SyncOracle for Arc where T: ?Sized, for<'r> &'r T: SyncOracle { +impl SyncOracle for Arc +where + T: ?Sized, + for<'r> &'r T: SyncOracle, +{ fn is_major_syncing(&mut self) -> bool { <&T>::is_major_syncing(&mut &**self) } @@ -222,19 +294,16 @@ impl CanAuthorWithNativeVersion { } } -impl, Block: BlockT> CanAuthorWith - for CanAuthorWithNativeVersion +impl + sp_version::GetNativeVersion, Block: BlockT> + CanAuthorWith for CanAuthorWithNativeVersion { fn can_author_with(&self, at: &BlockId) -> Result<(), String> { match self.0.runtime_version(at) { Ok(version) => self.0.native_version().can_author_with(&version), - Err(e) => { - Err(format!( - "Failed to get runtime version at `{}` and will disable authoring. Error: {}", - at, - e, - )) - } + Err(e) => Err(format!( + "Failed to get runtime version at `{}` and will disable authoring. Error: {}", + at, e, + )), } } } @@ -262,16 +331,8 @@ impl CanAuthorWith for NeverCanAuthor { /// A type from which a slot duration can be obtained. pub trait SlotData { /// Gets the slot duration. - fn slot_duration(&self) -> u64; + fn slot_duration(&self) -> sp_std::time::Duration; /// The static slot key const SLOT_KEY: &'static [u8]; } - -impl SlotData for u64 { - fn slot_duration(&self) -> u64 { - *self - } - - const SLOT_KEY: &'static [u8] = b"aura_slot_duration"; -} diff --git a/primitives/consensus/common/src/offline_tracker.rs b/primitives/consensus/common/src/offline_tracker.rs deleted file mode 100644 index b96498041f25d..0000000000000 --- a/primitives/consensus/common/src/offline_tracker.rs +++ /dev/null @@ -1,137 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Tracks offline validators. - -use std::collections::HashMap; -use std::time::Duration; -use wasm_timer::Instant; - -// time before we report a validator. -const REPORT_TIME: Duration = Duration::from_secs(60 * 5); - -struct Observed { - last_round_end: Instant, - offline_since: Instant, -} - -impl Observed { - fn new() -> Observed { - let now = Instant::now(); - Observed { - last_round_end: now, - offline_since: now, - } - } - - fn note_round_end(&mut self, was_online: bool) { - let now = Instant::now(); - - self.last_round_end = now; - if was_online { - self.offline_since = now; - } - } - - fn is_active(&self) -> bool { - // can happen if clocks are not monotonic - if self.offline_since > self.last_round_end { return true } - self.last_round_end.duration_since(self.offline_since) < REPORT_TIME - } -} - -/// Tracks offline validators and can issue a report for those offline. -pub struct OfflineTracker { - observed: HashMap, -} - -impl OfflineTracker { - /// Create a new tracker. - pub fn new() -> Self { - OfflineTracker { observed: HashMap::new() } - } - - /// Note new consensus is starting with the given set of validators. - pub fn note_new_block(&mut self, validators: &[AuthorityId]) { - use std::collections::HashSet; - - let set: HashSet<_> = validators.iter().cloned().collect(); - self.observed.retain(|k, _| set.contains(k)); - } - - /// Note that a round has ended. - pub fn note_round_end(&mut self, validator: AuthorityId, was_online: bool) { - self.observed.entry(validator) - .or_insert_with(Observed::new) - .note_round_end(was_online); - } - - /// Generate a vector of indices for offline account IDs. - pub fn reports(&self, validators: &[AuthorityId]) -> Vec { - validators.iter() - .enumerate() - .filter_map(|(i, v)| if self.is_online(v) { - None - } else { - Some(i as u32) - }) - .collect() - } - - /// Whether reports on a validator set are consistent with our view of things. - pub fn check_consistency(&self, validators: &[AuthorityId], reports: &[u32]) -> bool { - reports.iter().cloned().all(|r| { - let v = match validators.get(r as usize) { - Some(v) => v, - None => return false, - }; - - // we must think all validators reported externally are offline. - let thinks_online = self.is_online(v); - !thinks_online - }) - } - - fn is_online(&self, v: &AuthorityId) -> bool { - self.observed.get(v).map(Observed::is_active).unwrap_or(true) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn validator_offline() { - let mut tracker = OfflineTracker::::new(); - let v1 = 1; - let v2 = 2; - let v3 = 3; - tracker.note_round_end(v1, true); - tracker.note_round_end(v2, true); - tracker.note_round_end(v3, true); - - let slash_time = REPORT_TIME + Duration::from_secs(5); - tracker.observed.get_mut(&v1).unwrap().offline_since -= slash_time; - tracker.observed.get_mut(&v2).unwrap().offline_since -= slash_time; - - assert_eq!(tracker.reports(&[v1, v2, v3]), vec![0, 1]); - - tracker.note_new_block(&[v1, v3]); - assert_eq!(tracker.reports(&[v1, v2, v3]), vec![0]); - } -} diff --git a/primitives/consensus/common/src/select_chain.rs b/primitives/consensus/common/src/select_chain.rs index fe0d3972043b9..5408fc86b7bd4 100644 --- a/primitives/consensus/common/src/select_chain.rs +++ b/primitives/consensus/common/src/select_chain.rs @@ -1,23 +1,23 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate Consensus Common. - -// Substrate Demo is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate Consensus Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate Consensus Common. If not, see . +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::error::Error; use sp_runtime::traits::{Block as BlockT, NumberFor}; - /// The SelectChain trait defines the strategy upon which the head is chosen /// if multiple forks are present for an opaque definition of "best" in the /// specific chain build. @@ -32,23 +32,24 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; /// some implementations. /// /// Non-deterministically finalizing chains may only use the `_authoring` functions. +#[async_trait::async_trait] pub trait SelectChain: Sync + Send + Clone { - - /// Get all leaves of the chain: block hashes that have no children currently. + /// Get all leaves of the chain, i.e. block hashes that have no children currently. /// Leaves that can never be finalized will not be returned. - fn leaves(&self) -> Result::Hash>, Error>; + async fn leaves(&self) -> Result::Hash>, Error>; /// Among those `leaves` deterministically pick one chain as the generally - /// best chain to author new blocks upon and probably finalize. - fn best_chain(&self) -> Result<::Header, Error>; + /// best chain to author new blocks upon and probably (but not necessarily) + /// finalize. + async fn best_chain(&self) -> Result<::Header, Error>; /// Get the best descendent of `target_hash` that we should attempt to /// finalize next, if any. It is valid to return the given `target_hash` /// itself if no better descendent exists. - fn finality_target( + async fn finality_target( &self, target_hash: ::Hash, - _maybe_max_number: Option> + _maybe_max_number: Option>, ) -> Result::Hash>, Error> { Ok(Some(target_hash)) } diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index cbcea886a7095..f93eeca2fb24e 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-pow" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../runtime" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../core" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/primitives/consensus/pow/src/lib.rs b/primitives/consensus/pow/src/lib.rs index 79c9b6f16c3bd..ac8bc589c136f 100644 --- a/primitives/consensus/pow/src/lib.rs +++ b/primitives/consensus/pow/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,9 +19,9 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::vec::Vec; -use sp_runtime::ConsensusEngineId; use codec::Decode; +use sp_runtime::ConsensusEngineId; +use sp_std::vec::Vec; /// The `ConsensusEngineId` of PoW. pub const POW_ENGINE_ID: ConsensusEngineId = [b'p', b'o', b'w', b'_']; diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml index e605d585b7229..3ad204f973961 100644 --- a/primitives/consensus/slots/Cargo.toml +++ b/primitives/consensus/slots/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-slots" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for slots-based consensus" edition = "2018" @@ -13,12 +13,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "max-encoded-len"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../runtime" } +sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../../arithmetic" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "sp-runtime/std", + "sp-arithmetic/std", ] diff --git a/primitives/consensus/slots/src/lib.rs b/primitives/consensus/slots/src/lib.rs index f898cf9da6e2a..89b57dca83082 100644 --- a/primitives/consensus/slots/src/lib.rs +++ b/primitives/consensus/slots/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,21 +19,90 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; -/// A slot number. -pub type SlotNumber = u64; +/// Unit type wrapper that represents a slot. +#[derive(Debug, Encode, MaxEncodedLen, Decode, Eq, Clone, Copy, Default, Ord, TypeInfo)] +pub struct Slot(u64); + +impl core::ops::Deref for Slot { + type Target = u64; + + fn deref(&self) -> &u64 { + &self.0 + } +} + +impl core::ops::Add for Slot { + type Output = Self; + + fn add(self, other: Self) -> Self { + Self(self.0 + other.0) + } +} + +impl core::ops::Add for Slot { + type Output = Self; + + fn add(self, other: u64) -> Self { + Self(self.0 + other) + } +} + +impl + Copy> core::cmp::PartialEq for Slot { + fn eq(&self, eq: &T) -> bool { + self.0 == (*eq).into() + } +} + +impl + Copy> core::cmp::PartialOrd for Slot { + fn partial_cmp(&self, other: &T) -> Option { + self.0.partial_cmp(&(*other).into()) + } +} + +impl Slot { + /// Saturating addition. + pub fn saturating_add>(self, rhs: T) -> Self { + Self(self.0.saturating_add(rhs.into())) + } + + /// Saturating subtraction. + pub fn saturating_sub>(self, rhs: T) -> Self { + Self(self.0.saturating_sub(rhs.into())) + } +} + +#[cfg(feature = "std")] +impl std::fmt::Display for Slot { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for Slot { + fn from(slot: u64) -> Slot { + Slot(slot) + } +} + +impl From for u64 { + fn from(slot: Slot) -> u64 { + slot.0 + } +} /// Represents an equivocation proof. An equivocation happens when a validator /// produces more than one block on the same slot. The proof of equivocation /// are the given distinct headers that were signed by the validator and which /// include the slot number. -#[derive(Clone, Debug, Decode, Encode, PartialEq)] +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct EquivocationProof { /// Returns the authority id of the equivocator. pub offender: Id, - /// The slot number at which the equivocation happened. - pub slot_number: SlotNumber, + /// The slot at which the equivocation happened. + pub slot: Slot, /// The first header involved in the equivocation. pub first_header: Header, /// The second header involved in the equivocation. diff --git a/primitives/consensus/vrf/Cargo.toml b/primitives/consensus/vrf/Cargo.toml index d0b7d2e2f7aa8..124cbf423f068 100644 --- a/primitives/consensus/vrf/Cargo.toml +++ b/primitives/consensus/vrf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-vrf" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for VRF based consensus" edition = "2018" @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "1.0.0", package = "parity-scale-codec", default-features = false } +codec = { version = "2.0.0", package = "parity-scale-codec", default-features = false } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } -sp-std = { version = "2.0.0", path = "../../std", default-features = false } -sp-core = { version = "2.0.0", path = "../../core", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "4.0.0-dev", path = "../../std", default-features = false } +sp-core = { version = "4.0.0-dev", path = "../../core", default-features = false } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../runtime" } [features] default = ["std"] diff --git a/primitives/consensus/vrf/src/lib.rs b/primitives/consensus/vrf/src/lib.rs index 430e11974bcd4..19391c6c1c84f 100644 --- a/primitives/consensus/vrf/src/lib.rs +++ b/primitives/consensus/vrf/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/consensus/vrf/src/schnorrkel.rs b/primitives/consensus/vrf/src/schnorrkel.rs index 65e68375865d0..687e0bd231820 100644 --- a/primitives/consensus/vrf/src/schnorrkel.rs +++ b/primitives/consensus/vrf/src/schnorrkel.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,19 @@ //! Schnorrkel-based VRF. -use codec::{Encode, Decode, EncodeLike}; -use sp_std::{convert::TryFrom, prelude::*}; -use sp_core::U512; -use sp_std::ops::{Deref, DerefMut}; +use codec::{Decode, Encode, EncodeLike}; use schnorrkel::errors::MultiSignatureStage; +use sp_core::U512; +use sp_std::{ + convert::TryFrom, + ops::{Deref, DerefMut}, + prelude::*, +}; -pub use schnorrkel::{SignatureError, PublicKey, vrf::{VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH}}; +pub use schnorrkel::{ + vrf::{VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}, + PublicKey, SignatureError, +}; /// The length of the Randomness. pub const RANDOMNESS_LENGTH: usize = VRF_OUTPUT_LENGTH; @@ -34,11 +40,15 @@ pub struct VRFOutput(pub schnorrkel::vrf::VRFOutput); impl Deref for VRFOutput { type Target = schnorrkel::vrf::VRFOutput; - fn deref(&self) -> &Self::Target { &self.0 } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl DerefMut for VRFOutput { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } impl Encode for VRFOutput { @@ -47,7 +57,7 @@ impl Encode for VRFOutput { } } -impl EncodeLike for VRFOutput { } +impl EncodeLike for VRFOutput {} impl Decode for VRFOutput { fn decode(i: &mut R) -> Result { @@ -82,11 +92,15 @@ impl Ord for VRFProof { impl Deref for VRFProof { type Target = schnorrkel::vrf::VRFProof; - fn deref(&self) -> &Self::Target { &self.0 } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl DerefMut for VRFProof { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } impl Encode for VRFProof { @@ -95,7 +109,7 @@ impl Encode for VRFProof { } } -impl EncodeLike for VRFProof { } +impl EncodeLike for VRFProof {} impl Decode for VRFProof { fn decode(i: &mut R) -> Result { @@ -113,8 +127,8 @@ impl TryFrom<[u8; VRF_PROOF_LENGTH]> for VRFProof { } fn convert_error(e: SignatureError) -> codec::Error { - use SignatureError::*; use MultiSignatureStage::*; + use SignatureError::*; match e { EquationFalse => "Signature error: `EquationFalse`".into(), PointDecompressionError => "Signature error: `PointDecompressionError`".into(), diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 8b71bd7bbb0d8..73c3d454ed584 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-core" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,54 +13,68 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -log = { version = "0.4.8", default-features = false } -serde = { version = "1.0.101", optional = true, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = [ + "derive", + "max-encoded-len", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +log = { version = "0.4.11", default-features = false } +serde = { version = "1.0.126", optional = true, features = ["derive"] } byteorder = { version = "1.3.2", default-features = false } -primitive-types = { version = "0.7.0", default-features = false, features = ["codec"] } +primitive-types = { version = "0.10.1", default-features = false, features = [ + "codec", + "scale-info" +] } impl-serde = { version = "0.3.0", optional = true } -wasmi = { version = "0.6.2", optional = true } +wasmi = { version = "0.9.0", optional = true } hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } base58 = { version = "0.1.0", optional = true } rand = { version = "0.7.3", optional = true, features = ["small_rng"] } substrate-bip39 = { version = "0.4.2", optional = true } -tiny-bip39 = { version = "0.7", optional = true } -regex = { version = "1.3.1", optional = true } +tiny-bip39 = { version = "0.8", optional = true } +regex = { version = "1.4.2", optional = true } num-traits = { version = "0.2.8", default-features = false } -zeroize = { version = "1.0.0", default-features = false } -secrecy = { version = "0.6.0", default-features = false } +zeroize = { version = "1.4.1", default-features = false } +secrecy = { version = "0.7.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } -parking_lot = { version = "0.10.0", optional = true } -sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +parking_lot = { version = "0.11.1", optional = true } +sp-debug-derive = { version = "3.0.0", path = "../debug-derive" } +sp-externalities = { version = "0.10.0-dev", optional = true, path = "../externalities" } +sp-storage = { version = "4.0.0-dev", default-features = false, path = "../storage" } +parity-util-mem = { version = "0.10.0", default-features = false, features = [ + "primitive-types", +] } futures = { version = "0.3.1", optional = true } dyn-clonable = { version = "0.9.0", optional = true } +thiserror = { version = "1.0.21", optional = true } # full crypto -ed25519-dalek = { version = "1.0.0-pre.4", default-features = false, features = ["u64_backend", "alloc"], optional = true } +ed25519-dalek = { version = "1.0.1", default-features = false, features = [ + "u64_backend", + "alloc", +], optional = true } blake2-rfc = { version = "0.2.18", default-features = false, optional = true } tiny-keccak = { version = "2.0.1", features = ["keccak"], optional = true } -schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false, optional = true } -sha2 = { version = "0.8.0", default-features = false, optional = true } +schnorrkel = { version = "0.9.1", features = [ + "preaudit_deprecated", + "u64_backend", +], default-features = false, optional = true } +sha2 = { version = "0.9.2", default-features = false, optional = true } hex = { version = "0.4", default-features = false, optional = true } twox-hash = { version = "1.5.0", default-features = false, optional = true } -libsecp256k1 = { version = "0.3.2", default-features = false, features = ["hmac"], optional = true } +libsecp256k1 = { version = "0.6", default-features = false, features = ["hmac", "static-context"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } +sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" } [dev-dependencies] -sp-serializer = { version = "2.0.0", path = "../serializer" } -pretty_assertions = "0.6.1" +sp-serializer = { version = "3.0.0", path = "../serializer" } hex-literal = "0.3.1" rand = "0.7.2" criterion = "0.3.3" serde_json = "1.0" -rand_chacha = "0.2.2" [[bench]] name = "bench" @@ -74,6 +88,7 @@ default = ["std"] std = [ "full_crypto", "log/std", + "thiserror", "wasmi", "lazy_static", "parking_lot", @@ -83,6 +98,7 @@ std = [ "primitive-types/rustc-hex", "impl-serde", "codec/std", + "scale-info/std", "hash256-std-hasher/std", "hash-db/std", "sp-std/std", @@ -94,7 +110,6 @@ std = [ "base58", "substrate-bip39", "tiny-bip39", - "serde", "byteorder/std", "rand", "sha2/std", diff --git a/primitives/core/benches/bench.rs b/primitives/core/benches/bench.rs index dc57af459daa0..44bcd657ba3f0 100644 --- a/primitives/core/benches/bench.rs +++ b/primitives/core/benches/bench.rs @@ -12,28 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. - - #[macro_use] extern crate criterion; -use criterion::{Criterion, black_box, Bencher, Fun}; -use std::time::Duration; -use sp_core::crypto::Pair as _; -use sp_core::hashing::{twox_128, blake2_128}; +use criterion::{black_box, Bencher, BenchmarkId, Criterion}; +use sp_core::{ + crypto::Pair as _, + hashing::{blake2_128, twox_128}, +}; const MAX_KEY_SIZE: u32 = 32; fn get_key(key_size: u32) -> Vec { - use rand::SeedableRng; - use rand::Rng; + use rand::{Rng, SeedableRng}; let rnd: [u8; 32] = rand::rngs::StdRng::seed_from_u64(12).gen(); let mut rnd = rnd.iter().cycle(); - (0..key_size) - .map(|_| rnd.next().unwrap().clone()) - .collect() + (0..key_size).map(|_| *rnd.next().unwrap()).collect() } fn bench_blake2_128(b: &mut Bencher, key: &Vec) { @@ -49,87 +45,115 @@ fn bench_twox_128(b: &mut Bencher, key: &Vec) { } fn bench_hash_128_fix_size(c: &mut Criterion) { + let mut group = c.benchmark_group("fix size hashing"); + let key = get_key(MAX_KEY_SIZE); - let blake_fn = Fun::new("blake2_128", bench_blake2_128); - let twox_fn = Fun::new("twox_128", bench_twox_128); - let fns = vec![blake_fn, twox_fn]; - c.bench_functions("fixed size hashing", fns, key); + group.bench_with_input("blake2_128", &key, bench_blake2_128); + group.bench_with_input("twox_128", &key, bench_twox_128); + + group.finish(); } fn bench_hash_128_dyn_size(c: &mut Criterion) { - let mut keys = Vec::new(); + let mut group = c.benchmark_group("dyn size hashing"); + for i in (2..MAX_KEY_SIZE).step_by(4) { - keys.push(get_key(i).clone()) + let key = get_key(i); + + group.bench_with_input( + BenchmarkId::new("blake2_128", format!("{}", i)), + &key, + bench_blake2_128, + ); + group.bench_with_input( + BenchmarkId::new("twox_128", format!("{}", i)), + &key, + bench_twox_128, + ); } - c.bench_function_over_inputs("dyn size hashing - blake2", |b, key| bench_blake2_128(b, &key), keys.clone()); - c.bench_function_over_inputs("dyn size hashing - twox", |b, key| bench_twox_128(b, &key), keys); + group.finish(); } fn bench_ed25519(c: &mut Criterion) { - c.bench_function_over_inputs("signing - ed25519", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + let mut group = c.benchmark_group("ed25519"); + + for msg_size in vec![32, 1024, 1024 * 1024] { + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::ed25519::Pair::generate().0; - b.iter(|| key.sign(&msg)) - }, vec![32, 1024, 1024 * 1024]); + group.bench_function(BenchmarkId::new("signing", format!("{}", msg_size)), |b| { + b.iter(|| key.sign(&msg)) + }); + } - c.bench_function_over_inputs("verifying - ed25519", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + for msg_size in vec![32, 1024, 1024 * 1024] { + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::ed25519::Pair::generate().0; let sig = key.sign(&msg); let public = key.public(); - b.iter(|| sp_core::ed25519::Pair::verify(&sig, &msg, &public)) - }, vec![32, 1024, 1024 * 1024]); + group.bench_function(BenchmarkId::new("verifying", format!("{}", msg_size)), |b| { + b.iter(|| sp_core::ed25519::Pair::verify(&sig, &msg, &public)) + }); + } + + group.finish(); } fn bench_sr25519(c: &mut Criterion) { - c.bench_function_over_inputs("signing - sr25519", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + let mut group = c.benchmark_group("sr25519"); + + for msg_size in vec![32, 1024, 1024 * 1024] { + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::sr25519::Pair::generate().0; - b.iter(|| key.sign(&msg)) - }, vec![32, 1024, 1024 * 1024]); + group.bench_function(BenchmarkId::new("signing", format!("{}", msg_size)), |b| { + b.iter(|| key.sign(&msg)) + }); + } - c.bench_function_over_inputs("verifying - sr25519", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + for msg_size in vec![32, 1024, 1024 * 1024] { + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::sr25519::Pair::generate().0; let sig = key.sign(&msg); let public = key.public(); - b.iter(|| sp_core::sr25519::Pair::verify(&sig, &msg, &public)) - }, vec![32, 1024, 1024 * 1024]); + group.bench_function(BenchmarkId::new("verifying", format!("{}", msg_size)), |b| { + b.iter(|| sp_core::sr25519::Pair::verify(&sig, &msg, &public)) + }); + } + + group.finish(); } fn bench_ecdsa(c: &mut Criterion) { - c.bench_function_over_inputs("signing - ecdsa", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + let mut group = c.benchmark_group("ecdsa"); + + for msg_size in vec![32, 1024, 1024 * 1024] { + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::ecdsa::Pair::generate().0; - b.iter(|| key.sign(&msg)) - }, vec![32, 1024, 1024 * 1024]); + group.bench_function(BenchmarkId::new("signing", format!("{}", msg_size)), |b| { + b.iter(|| key.sign(&msg)) + }); + } - c.bench_function_over_inputs("verifying - ecdsa", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + for msg_size in vec![32, 1024, 1024 * 1024] { + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::ecdsa::Pair::generate().0; let sig = key.sign(&msg); let public = key.public(); - b.iter(|| sp_core::ecdsa::Pair::verify(&sig, &msg, &public)) - }, vec![32, 1024, 1024 * 1024]); -} + group.bench_function(BenchmarkId::new("verifying", format!("{}", msg_size)), |b| { + b.iter(|| sp_core::ecdsa::Pair::verify(&sig, &msg, &public)) + }); + } -criterion_group!{ - name = benches; - config = Criterion::default().warm_up_time(Duration::from_millis(500)).without_plots(); - targets = bench_hash_128_fix_size, bench_hash_128_dyn_size, bench_ed25519, bench_sr25519, bench_ecdsa + group.finish(); } + +criterion_group!( + benches, + bench_hash_128_fix_size, + bench_hash_128_dyn_size, + bench_ed25519, + bench_sr25519, + bench_ecdsa, +); criterion_main!(benches); diff --git a/primitives/core/src/changes_trie.rs b/primitives/core/src/changes_trie.rs index 1d88242e43d69..f4ce83dc2c877 100644 --- a/primitives/core/src/changes_trie.rs +++ b/primitives/core/src/changes_trie.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,14 +17,17 @@ //! Substrate changes trie configuration. -#[cfg(any(feature = "std", test))] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use num_traits::Zero; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; /// Substrate changes trie configuration. -#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] -#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf) +)] +#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode, scale_info::TypeInfo)] pub struct ChangesTrieConfiguration { /// Interval (in blocks) at which level1-digests are created. Digests are not /// created when this is less or equal to 1. @@ -62,60 +65,62 @@ impl ChangesTrieConfiguration { } /// Do we need to build digest at given block? - pub fn is_digest_build_required_at_block( - &self, - zero: Number, - block: Number, - ) -> bool - where - Number: From + PartialEq + - ::sp_std::ops::Rem + ::sp_std::ops::Sub + - ::sp_std::cmp::PartialOrd + Zero, + pub fn is_digest_build_required_at_block(&self, zero: Number, block: Number) -> bool + where + Number: From + + PartialEq + + ::sp_std::ops::Rem + + ::sp_std::ops::Sub + + ::sp_std::cmp::PartialOrd + + Zero, { - block > zero - && self.is_digest_build_enabled() - && ((block - zero) % self.digest_interval.into()).is_zero() + block > zero && + self.is_digest_build_enabled() && + ((block - zero) % self.digest_interval.into()).is_zero() } /// Returns max digest interval. One if digests are not created at all. pub fn max_digest_interval(&self) -> u32 { if !self.is_digest_build_enabled() { - return 1; + return 1 } // we'll get >1 loop iteration only when bad configuration parameters are selected let mut current_level = self.digest_levels; loop { if let Some(max_digest_interval) = self.digest_interval.checked_pow(current_level) { - return max_digest_interval; + return max_digest_interval } - current_level = current_level - 1; + current_level -= 1; } } /// Returns max level digest block number that has been created at block <= passed block number. /// /// Returns None if digests are not created at all. - pub fn prev_max_level_digest_block( - &self, - zero: Number, - block: Number, - ) -> Option - where - Number: Clone + From + PartialOrd + PartialEq + - ::sp_std::ops::Add + ::sp_std::ops::Sub + - ::sp_std::ops::Div + ::sp_std::ops::Mul + Zero, + pub fn prev_max_level_digest_block(&self, zero: Number, block: Number) -> Option + where + Number: Clone + + From + + PartialOrd + + PartialEq + + ::sp_std::ops::Add + + ::sp_std::ops::Sub + + ::sp_std::ops::Div + + ::sp_std::ops::Mul + + Zero, { if block <= zero { - return None; + return None } - let (next_begin, next_end) = self.next_max_level_digest_range(zero.clone(), block.clone())?; + let (next_begin, next_end) = + self.next_max_level_digest_range(zero.clone(), block.clone())?; // if 'next' digest includes our block, then it is a also a previous digest if next_end == block { - return Some(block); + return Some(block) } // if previous digest ends at zero block, then there are no previous digest @@ -136,13 +141,18 @@ impl ChangesTrieConfiguration { zero: Number, mut block: Number, ) -> Option<(Number, Number)> - where - Number: Clone + From + PartialOrd + PartialEq + - ::sp_std::ops::Add + ::sp_std::ops::Sub + - ::sp_std::ops::Div + ::sp_std::ops::Mul, + where + Number: Clone + + From + + PartialOrd + + PartialEq + + ::sp_std::ops::Add + + ::sp_std::ops::Sub + + ::sp_std::ops::Div + + ::sp_std::ops::Mul, { if !self.is_digest_build_enabled() { - return None; + return None } if block <= zero { @@ -152,7 +162,7 @@ impl ChangesTrieConfiguration { let max_digest_interval: Number = self.max_digest_interval().into(); let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); if max_digests_since_zero == 0.into() { - return Some((zero.clone() + 1.into(), zero + max_digest_interval)); + return Some((zero.clone() + 1.into(), zero + max_digest_interval)) } let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); Some(if block == last_max_digest_block { @@ -169,14 +179,22 @@ impl ChangesTrieConfiguration { /// digest interval (in blocks) /// step between blocks we're interested in when digest is built /// ) - pub fn digest_level_at_block(&self, zero: Number, block: Number) -> Option<(u32, u32, u32)> - where - Number: Clone + From + PartialEq + - ::sp_std::ops::Rem + ::sp_std::ops::Sub + - ::sp_std::cmp::PartialOrd + Zero, + pub fn digest_level_at_block( + &self, + zero: Number, + block: Number, + ) -> Option<(u32, u32, u32)> + where + Number: Clone + + From + + PartialEq + + ::sp_std::ops::Rem + + ::sp_std::ops::Sub + + ::sp_std::cmp::PartialOrd + + Zero, { if !self.is_digest_build_required_at_block(zero.clone(), block.clone()) { - return None; + return None } let relative_block = block - zero; @@ -185,21 +203,18 @@ impl ChangesTrieConfiguration { let mut digest_step = 1u32; while current_level < self.digest_levels { let new_digest_interval = match digest_interval.checked_mul(self.digest_interval) { - Some(new_digest_interval) if (relative_block.clone() % new_digest_interval.into()).is_zero() - => new_digest_interval, + Some(new_digest_interval) + if (relative_block.clone() % new_digest_interval.into()).is_zero() => + new_digest_interval, _ => break, }; digest_step = digest_interval; digest_interval = new_digest_interval; - current_level = current_level + 1; + current_level += 1; } - Some(( - current_level, - digest_interval, - digest_step, - )) + Some((current_level, digest_interval, digest_step)) } } @@ -208,10 +223,7 @@ mod tests { use super::ChangesTrieConfiguration; fn config(interval: u32, levels: u32) -> ChangesTrieConfiguration { - ChangesTrieConfiguration { - digest_interval: interval, - digest_levels: levels, - } + ChangesTrieConfiguration { digest_interval: interval, digest_levels: levels } } #[test] @@ -226,7 +238,7 @@ mod tests { #[test] fn is_digest_build_required_at_block_works() { fn test_with_zero(zero: u64) { - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 0u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero)); assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 1u64)); assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 2u64)); assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4u64)); @@ -249,13 +261,16 @@ mod tests { #[test] fn digest_level_at_block_works() { fn test_with_zero(zero: u64) { - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 0u64), None); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero), None); assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 7u64), None); assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 63u64), None); assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 8u64), Some((1, 8, 1))); assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 64u64), Some((2, 64, 8))); assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 512u64), Some((3, 512, 64))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4096u64), Some((4, 4096, 512))); + assert_eq!( + config(8, 4).digest_level_at_block(zero, zero + 4096u64), + Some((4, 4096, 512)) + ); assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4112u64), Some((1, 8, 1))); } diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index b685c28c67fd3..4764a0cac1b14 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,37 +19,36 @@ //! Cryptographic utilities. // end::description[] -use crate::{sr25519, ed25519}; -use sp_std::hash::Hash; -use sp_std::vec::Vec; -use sp_std::str; #[cfg(feature = "std")] -use sp_std::convert::TryInto; -use sp_std::convert::TryFrom; +use crate::hexdisplay::HexDisplay; +use crate::{ed25519, sr25519}; +#[cfg(feature = "std")] +use base58::{FromBase58, ToBase58}; +use codec::{Decode, Encode, MaxEncodedLen}; #[cfg(feature = "std")] use parking_lot::Mutex; #[cfg(feature = "std")] -use rand::{RngCore, rngs::OsRng}; -use codec::{Encode, Decode}; +use rand::{rngs::OsRng, RngCore}; #[cfg(feature = "std")] use regex::Regex; +use scale_info::TypeInfo; +/// Trait for accessing reference to `SecretString`. +pub use secrecy::ExposeSecret; +/// A store for sensitive data. #[cfg(feature = "std")] -use base58::{FromBase58, ToBase58}; +pub use secrecy::SecretString; +use sp_runtime_interface::pass_by::PassByInner; #[cfg(feature = "std")] -use crate::hexdisplay::HexDisplay; +use sp_std::convert::TryInto; #[doc(hidden)] pub use sp_std::ops::Deref; -use sp_runtime_interface::pass_by::PassByInner; +use sp_std::{convert::TryFrom, hash::Hash, str, vec::Vec}; /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; -/// Trait for accessing reference to `SecretString`. -pub use secrecy::ExposeSecret; -/// A store for sensitive data. -#[cfg(feature = "std")] -pub use secrecy::SecretString; /// The root phrase for our publicly known keys. -pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; +pub const DEV_PHRASE: &str = + "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; /// The address of the associated root phrase for our publicly known keys. pub const DEV_ADDRESS: &str = "5DfhGyQdFobKM8NsWvEeAKk5EQQgYe9AydgJ7rMB6E1EqRzV"; @@ -118,22 +117,28 @@ pub enum DeriveJunction { #[cfg(feature = "full_crypto")] impl DeriveJunction { /// Consume self to return a soft derive junction with the same chain code. - pub fn soften(self) -> Self { DeriveJunction::Soft(self.unwrap_inner()) } + pub fn soften(self) -> Self { + DeriveJunction::Soft(self.unwrap_inner()) + } /// Consume self to return a hard derive junction with the same chain code. - pub fn harden(self) -> Self { DeriveJunction::Hard(self.unwrap_inner()) } + pub fn harden(self) -> Self { + DeriveJunction::Hard(self.unwrap_inner()) + } /// Create a new soft (vanilla) DeriveJunction from a given, encodable, value. /// /// If you need a hard junction, use `hard()`. pub fn soft(index: T) -> Self { let mut cc: [u8; JUNCTION_ID_LEN] = Default::default(); - index.using_encoded(|data| if data.len() > JUNCTION_ID_LEN { - let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); - let hash = hash_result.as_bytes(); - cc.copy_from_slice(hash); - } else { - cc[0..data.len()].copy_from_slice(data); + index.using_encoded(|data| { + if data.len() > JUNCTION_ID_LEN { + let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); + let hash = hash_result.as_bytes(); + cc.copy_from_slice(hash); + } else { + cc[0..data.len()].copy_from_slice(data); + } }); DeriveJunction::Soft(cc) } @@ -161,18 +166,12 @@ impl DeriveJunction { /// Return `true` if the junction is soft. pub fn is_soft(&self) -> bool { - match *self { - DeriveJunction::Soft(_) => true, - _ => false, - } + matches!(*self, DeriveJunction::Soft(_)) } /// Return `true` if the junction is hard. pub fn is_hard(&self) -> bool { - match *self { - DeriveJunction::Hard(_) => true, - _ => false, - } + matches!(*self, DeriveJunction::Hard(_)) } } @@ -180,11 +179,8 @@ impl DeriveJunction { impl> From for DeriveJunction { fn from(j: T) -> DeriveJunction { let j = j.as_ref(); - let (code, hard) = if j.starts_with('/') { - (&j[1..], true) - } else { - (j, false) - }; + let (code, hard) = + if let Some(stripped) = j.strip_prefix('/') { (stripped, true) } else { (j, false) }; let res = if let Ok(n) = str::parse::(code) { // number @@ -210,7 +206,7 @@ pub enum PublicError { BadBase58, /// Bad length. BadLength, - /// Unknown version. + /// Unknown identifier for the encoding. UnknownVersion, /// Invalid checksum. InvalidChecksum, @@ -218,56 +214,105 @@ pub enum PublicError { InvalidFormat, /// Invalid derivation path. InvalidPath, + /// Disallowed SS58 Address Format for this datatype. + FormatNotAllowed, } /// Key that can be encoded to/from SS58. +/// +/// See +/// for information on the codec. #[cfg(feature = "full_crypto")] pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { + /// A format filterer, can be used to ensure that `from_ss58check` family only decode for + /// allowed identifiers. By default just refuses the two reserved identifiers. + fn format_is_allowed(f: Ss58AddressFormat) -> bool { + !matches!(f, Ss58AddressFormat::Reserved46 | Ss58AddressFormat::Reserved47) + } + /// Some if the string is a properly encoded SS58Check address. #[cfg(feature = "std")] fn from_ss58check(s: &str) -> Result { - Self::from_ss58check_with_version(s) - .and_then(|(r, v)| match v { - v if !v.is_custom() => Ok(r), - v if v == *DEFAULT_VERSION.lock() => Ok(r), - _ => Err(PublicError::UnknownVersion), - }) + Self::from_ss58check_with_version(s).and_then(|(r, v)| match v { + v if !v.is_custom() => Ok(r), + v if v == *DEFAULT_VERSION.lock() => Ok(r), + _ => Err(PublicError::UnknownVersion), + }) } + /// Some if the string is a properly encoded SS58Check address. #[cfg(feature = "std")] fn from_ss58check_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { + const CHECKSUM_LEN: usize = 2; let mut res = Self::default(); - let len = res.as_mut().len(); - let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. - if d.len() != len + 3 { - // Invalid length. - return Err(PublicError::BadLength); + + // Must decode to our type. + let body_len = res.as_mut().len(); + + let data = s.from_base58().map_err(|_| PublicError::BadBase58)?; + if data.len() < 2 { + return Err(PublicError::BadLength) + } + let (prefix_len, ident) = match data[0] { + 0..=63 => (1, data[0] as u16), + 64..=127 => { + // weird bit manipulation owing to the combination of LE encoding and missing two + // bits from the left. + // d[0] d[1] are: 01aaaaaa bbcccccc + // they make the LE-encoded 16-bit value: aaaaaabb 00cccccc + // so the lower byte is formed of aaaaaabb and the higher byte is 00cccccc + let lower = (data[0] << 2) | (data[1] >> 6); + let upper = data[1] & 0b00111111; + (2, (lower as u16) | ((upper as u16) << 8)) + }, + _ => return Err(PublicError::UnknownVersion), + }; + if data.len() != prefix_len + body_len + CHECKSUM_LEN { + return Err(PublicError::BadLength) + } + let format = ident.try_into().map_err(|_: ()| PublicError::UnknownVersion)?; + if !Self::format_is_allowed(format) { + return Err(PublicError::FormatNotAllowed) } - let ver = d[0].try_into().map_err(|_: ()| PublicError::UnknownVersion)?; - if d[len + 1..len + 3] != ss58hash(&d[0..len + 1]).as_bytes()[0..2] { + let hash = ss58hash(&data[0..body_len + prefix_len]); + let checksum = &hash.as_bytes()[0..CHECKSUM_LEN]; + if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { // Invalid checksum. - return Err(PublicError::InvalidChecksum); + return Err(PublicError::InvalidChecksum) } - res.as_mut().copy_from_slice(&d[1..len + 1]); - Ok((res, ver)) + res.as_mut().copy_from_slice(&data[prefix_len..body_len + prefix_len]); + Ok((res, format)) } + /// Some if the string is a properly encoded SS58Check address, optionally with /// a derivation path following. #[cfg(feature = "std")] fn from_string(s: &str) -> Result { - Self::from_string_with_version(s) - .and_then(|(r, v)| match v { - v if !v.is_custom() => Ok(r), - v if v == *DEFAULT_VERSION.lock() => Ok(r), - _ => Err(PublicError::UnknownVersion), - }) + Self::from_string_with_version(s).and_then(|(r, v)| match v { + v if !v.is_custom() => Ok(r), + v if v == *DEFAULT_VERSION.lock() => Ok(r), + _ => Err(PublicError::UnknownVersion), + }) } /// Return the ss58-check string for this key. #[cfg(feature = "std")] fn to_ss58check_with_version(&self, version: Ss58AddressFormat) -> String { - let mut v = vec![version.into()]; + // We mask out the upper two bits of the ident - SS58 Prefix currently only supports 14-bits + let ident: u16 = u16::from(version) & 0b0011_1111_1111_1111; + let mut v = match ident { + 0..=63 => vec![ident as u8], + 64..=16_383 => { + // upper six bits of the lower byte(!) + let first = ((ident & 0b0000_0000_1111_1100) as u8) >> 2; + // lower two bits of the lower byte in the high pos, + // lower bits of the upper byte in the low pos + let second = ((ident >> 8) as u8) | ((ident & 0b0000_0000_0000_0011) as u8) << 6; + vec![first | 0b01000000, second] + }, + _ => unreachable!("masked out the upper two bits; qed"), + }; v.extend(self.as_ref()); let r = ss58hash(&v); v.extend(&r.as_bytes()[0..2]); @@ -276,7 +321,9 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { /// Return the ss58-check string for this key. #[cfg(feature = "std")] - fn to_ss58check(&self) -> String { self.to_ss58check_with_version(*DEFAULT_VERSION.lock()) } + fn to_ss58check(&self) -> String { + self.to_ss58check_with_version(*DEFAULT_VERSION.lock()) + } /// Some if the string is a properly encoded SS58Check address, optionally with /// a derivation path following. @@ -292,7 +339,7 @@ pub trait Derive: Sized { /// /// Will be `None` for public keys if there are any hard junctions in there. #[cfg(feature = "std")] - fn derive>(&self, _path: Iter) -> Option { + fn derive>(&self, _path: Iter) -> Option { None } } @@ -321,8 +368,8 @@ macro_rules! ss58_address_format { #[derive(Copy, Clone, PartialEq, Eq, crate::RuntimeDebug)] pub enum Ss58AddressFormat { $(#[doc = $desc] $identifier),*, - /// Use a manually provided numeric value. - Custom(u8), + /// Use a manually provided numeric value as a standard identifier + Custom(u16), } #[cfg(feature = "std")] @@ -356,15 +403,20 @@ macro_rules! ss58_address_format { /// Whether the address is custom. pub fn is_custom(&self) -> bool { - match self { - Self::Custom(_) => true, - _ => false, - } + matches!(self, Self::Custom(_)) + } + } + + impl TryFrom for Ss58AddressFormat { + type Error = (); + + fn try_from(x: u8) -> Result { + Ss58AddressFormat::try_from(x as u16) } } - impl From for u8 { - fn from(x: Ss58AddressFormat) -> u8 { + impl From for u16 { + fn from(x: Ss58AddressFormat) -> u16 { match x { $(Ss58AddressFormat::$identifier => $number),*, Ss58AddressFormat::Custom(n) => n, @@ -372,22 +424,13 @@ macro_rules! ss58_address_format { } } - impl TryFrom for Ss58AddressFormat { + impl TryFrom for Ss58AddressFormat { type Error = (); - fn try_from(x: u8) -> Result { + fn try_from(x: u16) -> Result { match x { $($number => Ok(Ss58AddressFormat::$identifier)),*, - _ => { - #[cfg(feature = "std")] - match Ss58AddressFormat::default() { - Ss58AddressFormat::Custom(n) if n == x => Ok(Ss58AddressFormat::Custom(x)), - _ => Err(()), - } - - #[cfg(not(feature = "std"))] - Err(()) - }, + _ => Ok(Ss58AddressFormat::Custom(x)), } } } @@ -403,7 +446,7 @@ macro_rules! ss58_address_format { fn try_from(x: &'a str) -> Result { match x { $($name => Ok(Ss58AddressFormat::$identifier)),*, - a => a.parse::().map(Ss58AddressFormat::Custom).map_err(|_| ParseError), + a => a.parse::().map(Ss58AddressFormat::Custom).map_err(|_| ParseError), } } } @@ -444,12 +487,12 @@ macro_rules! ss58_address_format { ss58_address_format!( PolkadotAccount => (0, "polkadot", "Polkadot Relay-chain, standard account (*25519).") - Reserved1 => - (1, "reserved1", "Reserved for future use (1).") + BareSr25519 => + (1, "sr25519", "Bare 32-bit Schnorr/Ristretto 25519 (S/R 25519) key.") KusamaAccount => (2, "kusama", "Kusama Relay-chain, standard account (*25519).") - Reserved3 => - (3, "reserved3", "Reserved for future use (3).") + BareEd25519 => + (3, "ed25519", "Bare 32-bit Edwards Ed25519 key.") KatalChainAccount => (4, "katalchain", "Katal Chain, standard account (*25519).") PlasmAccount => @@ -470,6 +513,10 @@ ss58_address_format!( (12, "polymath", "Polymath network, standard account (*25519).") SubstraTeeAccount => (13, "substratee", "Any SubstraTEE off-chain network private account (*25519).") + TotemAccount => + (14, "totem", "Any Totem Live Accounting network standard account (*25519).") + SynesthesiaAccount => + (15, "synesthesia", "Synesthesia mainnet, standard account (*25519).") KulupuAccount => (16, "kulupu", "Kulupu mainnet, standard account (*25519).") DarkAccount => @@ -490,29 +537,88 @@ ss58_address_format!( (24, "zero", "ZERO mainnet, standard account (*25519).") AlphavilleAccount => (25, "alphaville", "ZERO testnet, standard account (*25519).") + JupiterAccount => + (26, "jupiter", "Jupiter testnet, standard account (*25519).") SubsocialAccount => (28, "subsocial", "Subsocial network, standard account (*25519).") + DhiwayAccount => + (29, "cord", "Dhiway CORD network, standard account (*25519).") PhalaAccount => (30, "phala", "Phala Network, standard account (*25519).") + LitentryAccount => + (31, "litentry", "Litentry Network, standard account (*25519).") RobonomicsAccount => (32, "robonomics", "Any Robonomics network standard account (*25519).") DataHighwayAccount => (33, "datahighway", "DataHighway mainnet, standard account (*25519).") + AresAccount => + (34, "ares", "Ares Protocol, standard account (*25519).") + ValiuAccount => + (35, "vln", "Valiu Liquidity Network mainnet, standard account (*25519).") CentrifugeAccount => (36, "centrifuge", "Centrifuge Chain mainnet, standard account (*25519).") NodleAccount => (37, "nodle", "Nodle Chain mainnet, standard account (*25519).") + KiltAccount => + (38, "kilt", "KILT Chain mainnet, standard account (*25519).") + PolimecAccount => + (41, "poli", "Polimec Chain mainnet, standard account (*25519).") SubstrateAccount => (42, "substrate", "Any Substrate network, standard account (*25519).") - Reserved43 => - (43, "reserved43", "Reserved for future use (43).") + BareSecp256k1 => + (43, "secp256k1", "Bare ECDSA SECP256k1 key.") ChainXAccount => (44, "chainx", "ChainX mainnet, standard account (*25519).") + UniartsAccount => + (45, "uniarts", "UniArts Chain mainnet, standard account (*25519).") Reserved46 => (46, "reserved46", "Reserved for future use (46).") Reserved47 => (47, "reserved47", "Reserved for future use (47).") - // Note: 48 and above are reserved. + NeatcoinAccount => + (48, "neatcoin", "Neatcoin mainnet, standard account (*25519).") + PicassoAccount => + (49, "picasso", "Composable Canary Network, standard account (*25519).") + ComposableAccount => + (50, "composable", "Composable mainnet, standard account (*25519).") + HydraDXAccount => + (63, "hydradx", "HydraDX standard account (*25519).") + AventusAccount => + (65, "aventus", "Aventus Chain mainnet, standard account (*25519).") + CrustAccount => + (66, "crust", "Crust Network, standard account (*25519).") + EquilibriumAccount => + (67, "equilibrium", "Equilibrium Network, standard account (*25519).") + SoraAccount => + (69, "sora", "SORA Network, standard account (*25519).") + ZeitgeistAccount => + (73, "zeitgeist", "Zeitgeist network, standard account (*25519).") + MantaAccount => + (77, "manta", "Manta Network, standard account (*25519).") + CalamariAccount => + (78, "calamari", "Manta Canary Network, standard account (*25519).") + PolkaSmith => + (98, "polkasmith", "PolkaSmith Canary Network, standard account (*25519).") + PolkaFoundry => + (99, "polkafoundry", "PolkaFoundry Network, standard account (*25519).") + OriginTrailAccount => + (101, "origintrail-parachain", "OriginTrail Parachain, ethereumm account (ECDSA).") + HeikoAccount => + (110, "heiko", "Heiko, session key (*25519).") + CloverAccount => + (128, "clover", "Clover Finance, standard account (*25519).") + ParallelAccount => + (172, "parallel", "Parallel, session key (*25519).") + SocialAccount => + (252, "social-network", "Social Network, standard account (*25519).") + Moonbeam => + (1284, "moonbeam", "Moonbeam, session key (*25519).") + Moonriver => + (1285, "moonriver", "Moonriver, session key (*25519).") + BasiliskAccount => + (10041, "basilisk", "Basilisk standard account (*25519).") + + // Note: 16384 and above are reserved. ); /// Set the default "version" (actually, this is a bit of a misnomer and the version byte is @@ -525,25 +631,29 @@ pub fn set_default_ss58_version(version: Ss58AddressFormat) { *DEFAULT_VERSION.lock() = version } +#[cfg(feature = "std")] +lazy_static::lazy_static! { + static ref SS58_REGEX: Regex = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") + .expect("constructed from known-good static value; qed"); + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); + static ref JUNCTION_REGEX: Regex = Regex::new(r"/(/?[^/]+)") + .expect("constructed from known-good static value; qed"); +} + #[cfg(feature = "std")] impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { fn from_string(s: &str) -> Result { - let re = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); - let s = cap.name("ss58") - .map(|r| r.as_str()) - .unwrap_or(DEV_ADDRESS); - let addr = if s.starts_with("0x") { - let d = hex::decode(&s[2..]).map_err(|_| PublicError::InvalidFormat)?; + let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; + let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS); + let addr = if let Some(stripped) = s.strip_prefix("0x") { + let d = hex::decode(stripped).map_err(|_| PublicError::InvalidFormat)?; let mut r = Self::default(); if d.len() == r.as_ref().len() { r.as_mut().copy_from_slice(&d); r } else { - Err(PublicError::BadLength)? + return Err(PublicError::BadLength) } } else { Self::from_ss58check(s)? @@ -551,32 +661,23 @@ impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { if cap["path"].is_empty() { Ok(addr) } else { - let path = re_junction.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); - addr.derive(path) - .ok_or(PublicError::InvalidPath) + let path = + JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); + addr.derive(path).ok_or(PublicError::InvalidPath) } } fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { - let re = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); + let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; let (addr, v) = Self::from_ss58check_with_version( - cap.name("ss58") - .map(|r| r.as_str()) - .unwrap_or(DEV_ADDRESS) + cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS), )?; if cap["path"].is_empty() { Ok((addr, v)) } else { - let path = re_junction.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); - addr.derive(path) - .ok_or(PublicError::InvalidPath) - .map(|a| (a, v)) + let path = + JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); + addr.derive(path).ok_or(PublicError::InvalidPath).map(|a| (a, v)) } } } @@ -602,19 +703,35 @@ pub trait Public: fn from_slice(data: &[u8]) -> Self; /// Return a `Vec` filled with raw data. - fn to_raw_vec(&self) -> Vec { self.as_slice().to_vec() } + fn to_raw_vec(&self) -> Vec { + self.as_slice().to_vec() + } /// Return a slice filled with raw data. - fn as_slice(&self) -> &[u8] { self.as_ref() } + fn as_slice(&self) -> &[u8] { + self.as_ref() + } /// Return `CryptoTypePublicPair` from public key. fn to_public_crypto_pair(&self) -> CryptoTypePublicPair; } /// An opaque 32-byte cryptographic identifier. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Default, Encode, Decode)] +#[derive( + Clone, Eq, PartialEq, Ord, PartialOrd, Default, Encode, Decode, MaxEncodedLen, TypeInfo, +)] #[cfg_attr(feature = "std", derive(Hash))] pub struct AccountId32([u8; 32]); +impl AccountId32 { + /// Create a new instance from its raw inner byte value. + /// + /// Equivalent to this types `From<[u8; 32]>` implementation. For the lack of const + /// support in traits we have this constructor. + pub const fn new(inner: [u8; 32]) -> Self { + Self(inner) + } +} + impl UncheckedFrom for AccountId32 { fn unchecked_from(h: crate::hash::H256) -> Self { AccountId32(h.into()) @@ -649,8 +766,8 @@ impl AsMut<[u8; 32]> for AccountId32 { } impl From<[u8; 32]> for AccountId32 { - fn from(x: [u8; 32]) -> AccountId32 { - AccountId32(x) + fn from(x: [u8; 32]) -> Self { + Self::new(x) } } @@ -707,14 +824,20 @@ impl sp_std::fmt::Debug for AccountId32 { #[cfg(feature = "std")] impl serde::Serialize for AccountId32 { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { serializer.serialize_str(&self.to_ss58check()) } } #[cfg(feature = "std")] impl<'de> serde::Deserialize<'de> for AccountId32 { - fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { Ss58Codec::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| serde::de::Error::custom(format!("{:?}", e))) } @@ -749,11 +872,13 @@ mod dummy { pub struct Dummy; impl AsRef<[u8]> for Dummy { - fn as_ref(&self) -> &[u8] { &b""[..] } + fn as_ref(&self) -> &[u8] { + &b""[..] + } } impl AsMut<[u8]> for Dummy { - fn as_mut(&mut self) -> &mut[u8] { + fn as_mut(&mut self) -> &mut [u8] { unsafe { #[allow(mutable_transmutes)] sp_std::mem::transmute::<_, &'static mut [u8]>(&b""[..]) @@ -776,14 +901,18 @@ mod dummy { impl Derive for Dummy {} impl Public for Dummy { - fn from_slice(_: &[u8]) -> Self { Self } + fn from_slice(_: &[u8]) -> Self { + Self + } #[cfg(feature = "std")] - fn to_raw_vec(&self) -> Vec { vec![] } - fn as_slice(&self) -> &[u8] { b"" } + fn to_raw_vec(&self) -> Vec { + vec![] + } + fn as_slice(&self) -> &[u8] { + b"" + } fn to_public_crypto_pair(&self) -> CryptoTypePublicPair { - CryptoTypePublicPair( - CryptoTypeId(*b"dumm"), Public::to_raw_vec(self) - ) + CryptoTypePublicPair(CryptoTypeId(*b"dumm"), Public::to_raw_vec(self)) } } @@ -793,23 +922,41 @@ mod dummy { type Signature = Dummy; type DeriveError = (); #[cfg(feature = "std")] - fn generate_with_phrase(_: Option<&str>) -> (Self, String, Self::Seed) { Default::default() } + fn generate_with_phrase(_: Option<&str>) -> (Self, String, Self::Seed) { + Default::default() + } #[cfg(feature = "std")] - fn from_phrase(_: &str, _: Option<&str>) - -> Result<(Self, Self::Seed), SecretStringError> - { + fn from_phrase(_: &str, _: Option<&str>) -> Result<(Self, Self::Seed), SecretStringError> { Ok(Default::default()) } - fn derive< - Iter: Iterator, - >(&self, _: Iter, _: Option) -> Result<(Self, Option), Self::DeriveError> { Ok((Self, None)) } - fn from_seed(_: &Self::Seed) -> Self { Self } - fn from_seed_slice(_: &[u8]) -> Result { Ok(Self) } - fn sign(&self, _: &[u8]) -> Self::Signature { Self } - fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { true } - fn verify_weak, M: AsRef<[u8]>>(_: &[u8], _: M, _: P) -> bool { true } - fn public(&self) -> Self::Public { Self } - fn to_raw_vec(&self) -> Vec { vec![] } + fn derive>( + &self, + _: Iter, + _: Option, + ) -> Result<(Self, Option), Self::DeriveError> { + Ok((Self, None)) + } + fn from_seed(_: &Self::Seed) -> Self { + Self + } + fn from_seed_slice(_: &[u8]) -> Result { + Ok(Self) + } + fn sign(&self, _: &[u8]) -> Self::Signature { + Self + } + fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { + true + } + fn verify_weak, M: AsRef<[u8]>>(_: &[u8], _: M, _: P) -> bool { + true + } + fn public(&self) -> Self::Public { + Self + } + fn to_raw_vec(&self) -> Vec { + vec![] + } } } @@ -854,10 +1001,14 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// Returns the KeyPair from the English BIP39 seed `phrase`, or `None` if it's invalid. #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Self, Self::Seed), SecretStringError>; + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, Self::Seed), SecretStringError>; /// Derive a child key from a series of given junctions. - fn derive>(&self, + fn derive>( + &self, path: Iter, seed: Option, ) -> Result<(Self, Option), Self::DeriveError>; @@ -887,9 +1038,9 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// Get the public key. fn public(&self) -> Self::Public; - /// Interprets the string `s` in order to generate a key Pair. Returns both the pair and an optional seed, in the - /// case that the pair can be expressed as a direct derivation from a seed (some cases, such as Sr25519 derivations - /// with path components, cannot). + /// Interprets the string `s` in order to generate a key Pair. Returns both the pair and an + /// optional seed, in the case that the pair can be expressed as a direct derivation from a seed + /// (some cases, such as Sr25519 derivations with path components, cannot). /// /// This takes a helper function to do the key generation from a phrase, password and /// junction iterator. @@ -901,7 +1052,8 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// - the phrase may be followed by one or more items delimited by `/` characters. /// - the path may be followed by `///`, in which case everything after the `///` is treated /// as a password. - /// - If `s` begins with a `/` character it is prefixed with the Substrate public `DEV_PHRASE` and + /// - If `s` begins with a `/` character it is prefixed with the Substrate public `DEV_PHRASE` + /// and /// interpreted as above. /// /// In this case they are interpreted as HDKD junctions; purely numeric items are interpreted as @@ -911,28 +1063,25 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// There is no correspondence mapping between SURI strings and the keys they represent. /// Two different non-identical strings can actually lead to the same secret being derived. /// Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. - /// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will generally - /// be equivalent to no password at all. + /// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will + /// generally be equivalent to no password at all. /// /// `None` is returned if no matches are found. #[cfg(feature = "std")] - fn from_string_with_seed(s: &str, password_override: Option<&str>) - -> Result<(Self, Option), SecretStringError> - { - let re = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(SecretStringError::InvalidFormat)?; + fn from_string_with_seed( + s: &str, + password_override: Option<&str>, + ) -> Result<(Self, Option), SecretStringError> { + let cap = SECRET_PHRASE_REGEX.captures(s).ok_or(SecretStringError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); - let path = re_junction.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); + let path = JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); let password = password_override.or_else(|| cap.name("password").map(|m| m.as_str())); - let (root, seed) = if phrase.starts_with("0x") { - hex::decode(&phrase[2..]).ok() + let (root, seed) = if let Some(stripped) = phrase.strip_prefix("0x") { + hex::decode(stripped) + .ok() .and_then(|seed_vec| { let mut seed = Self::Seed::default(); if seed.as_ref().len() == seed_vec.len() { @@ -944,8 +1093,7 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { }) .ok_or(SecretStringError::InvalidSeed)? } else { - Self::from_phrase(phrase, password) - .map_err(|_| SecretStringError::InvalidPhrase)? + Self::from_phrase(phrase, password).map_err(|_| SecretStringError::InvalidPhrase)? }; root.derive(path, Some(seed)).map_err(|_| SecretStringError::InvalidPath) } @@ -976,19 +1124,25 @@ pub trait Wraps: Sized { type Inner: IsWrappedBy; } -impl IsWrappedBy for T where +impl IsWrappedBy for T +where Outer: AsRef + AsMut + From, T: From, { /// Get a reference to the inner from the outer. - fn from_ref(outer: &Outer) -> &Self { outer.as_ref() } + fn from_ref(outer: &Outer) -> &Self { + outer.as_ref() + } /// Get a mutable reference to the inner from the outer. - fn from_mut(outer: &mut Outer) -> &mut Self { outer.as_mut() } + fn from_mut(outer: &mut Outer) -> &mut Self { + outer.as_mut() + } } -impl UncheckedFrom for Outer where - Outer: Wraps, +impl UncheckedFrom for Outer +where + Outer: Wraps, Inner: IsWrappedBy + UncheckedFrom, { fn unchecked_from(t: T) -> Self { @@ -1012,9 +1166,21 @@ pub trait CryptoType { /// Values whose first character is `_` are reserved for private use and won't conflict with any /// public modules. #[derive( - Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode, PassByInner, - crate::RuntimeDebug + Copy, + Clone, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Encode, + Decode, + PassByInner, + crate::RuntimeDebug, + TypeInfo, )] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct KeyTypeId(pub [u8; 4]); impl From for KeyTypeId { @@ -1031,10 +1197,11 @@ impl From for u32 { impl<'a> TryFrom<&'a str> for KeyTypeId { type Error = (); + fn try_from(x: &'a str) -> Result { let b = x.as_bytes(); if b.len() != 4 { - return Err(()); + return Err(()) } let mut res = KeyTypeId::default(); res.0.copy_from_slice(&b[0..4]); @@ -1044,10 +1211,12 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { /// An identifier for a specific cryptographic algorithm used by a key pair #[derive(Debug, Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct CryptoTypeId(pub [u8; 4]); /// A type alias of CryptoTypeId & a public key #[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct CryptoTypePublicPair(pub CryptoTypeId, pub Vec); #[cfg(feature = "std")] @@ -1057,7 +1226,7 @@ impl sp_std::fmt::Display for CryptoTypePublicPair { Ok(id) => id.to_string(), Err(_) => { format!("{:#?}", self.0) - } + }, }; write!(f, "{}-{}", id, HexDisplay::from(&self.1)) } @@ -1093,16 +1262,16 @@ pub mod key_types { #[cfg(test)] mod tests { + use super::*; use crate::DeriveJunction; use hex_literal::hex; - use super::*; #[derive(Clone, Eq, PartialEq, Debug)] enum TestPair { Generated, GeneratedWithPhrase, - GeneratedFromPhrase{phrase: String, password: Option}, - Standard{phrase: String, password: Option, path: Vec}, + GeneratedFromPhrase { phrase: String, password: Option }, + Standard { phrase: String, password: Option, path: Vec }, Seed(Vec), } impl Default for TestPair { @@ -1148,9 +1317,7 @@ mod tests { vec![] } fn to_public_crypto_pair(&self) -> CryptoTypePublicPair { - CryptoTypePublicPair( - CryptoTypeId(*b"dumm"), self.to_raw_vec(), - ) + CryptoTypePublicPair(CryptoTypeId(*b"dumm"), self.to_raw_vec()) } } impl Pair for TestPair { @@ -1159,41 +1326,68 @@ mod tests { type Signature = [u8; 0]; type DeriveError = (); - fn generate() -> (Self, ::Seed) { (TestPair::Generated, [0u8; 8]) } + fn generate() -> (Self, ::Seed) { + (TestPair::Generated, [0u8; 8]) + } fn generate_with_phrase(_password: Option<&str>) -> (Self, String, ::Seed) { (TestPair::GeneratedWithPhrase, "".into(), [0u8; 8]) } - fn from_phrase(phrase: &str, password: Option<&str>) - -> Result<(Self, ::Seed), SecretStringError> - { - Ok((TestPair::GeneratedFromPhrase { - phrase: phrase.to_owned(), - password: password.map(Into::into) - }, [0u8; 8])) + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, ::Seed), SecretStringError> { + Ok(( + TestPair::GeneratedFromPhrase { + phrase: phrase.to_owned(), + password: password.map(Into::into), + }, + [0u8; 8], + )) + } + fn derive>( + &self, + path_iter: Iter, + _: Option<[u8; 8]>, + ) -> Result<(Self, Option<[u8; 8]>), Self::DeriveError> { + Ok(( + match self.clone() { + TestPair::Standard { phrase, password, path } => TestPair::Standard { + phrase, + password, + path: path.into_iter().chain(path_iter).collect(), + }, + TestPair::GeneratedFromPhrase { phrase, password } => + TestPair::Standard { phrase, password, path: path_iter.collect() }, + x => + if path_iter.count() == 0 { + x + } else { + return Err(()) + }, + }, + None, + )) + } + fn from_seed(_seed: &::Seed) -> Self { + TestPair::Seed(_seed.as_ref().to_owned()) + } + fn sign(&self, _message: &[u8]) -> Self::Signature { + [] } - fn derive>(&self, path_iter: Iter, _: Option<[u8; 8]>) - -> Result<(Self, Option<[u8; 8]>), Self::DeriveError> - { - Ok((match self.clone() { - TestPair::Standard {phrase, password, path} => - TestPair::Standard { phrase, password, path: path.into_iter().chain(path_iter).collect() }, - TestPair::GeneratedFromPhrase {phrase, password} => - TestPair::Standard { phrase, password, path: path_iter.collect() }, - x => if path_iter.count() == 0 { x } else { return Err(()) }, - }, None)) + fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { + true } - fn from_seed(_seed: &::Seed) -> Self { TestPair::Seed(_seed.as_ref().to_owned()) } - fn sign(&self, _message: &[u8]) -> Self::Signature { [] } - fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { true } fn verify_weak, M: AsRef<[u8]>>( _sig: &[u8], _message: M, - _pubkey: P - ) -> bool { true } - fn public(&self) -> Self::Public { TestPublic } - fn from_seed_slice(seed: &[u8]) - -> Result - { + _pubkey: P, + ) -> bool { + true + } + fn public(&self) -> Self::Public { + TestPublic + } + fn from_seed_slice(seed: &[u8]) -> Result { Ok(TestPair::Seed(seed.to_owned())) } fn to_raw_vec(&self) -> Vec { @@ -1225,43 +1419,83 @@ mod tests { fn interpret_std_secret_string_should_work() { assert_eq!( TestPair::from_string("hello world", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![] + }) ); assert_eq!( TestPair::from_string("hello world/1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::soft(1)]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::soft(1)] + }) ); assert_eq!( TestPair::from_string("hello world/DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::soft("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::soft("DOT")] + }) ); assert_eq!( TestPair::from_string("hello world//1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard(1)]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard(1)] + }) ); assert_eq!( TestPair::from_string("hello world//DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard("DOT")] + }) ); assert_eq!( TestPair::from_string("hello world//1/DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")] + }) ); assert_eq!( TestPair::from_string("hello world//DOT/1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard("DOT"), DeriveJunction::soft(1)]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard("DOT"), DeriveJunction::soft(1)] + }) ); assert_eq!( TestPair::from_string("hello world///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![] + }) ); assert_eq!( TestPair::from_string("hello world//1/DOT///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")] + }) ); assert_eq!( TestPair::from_string("hello world/1//DOT///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![DeriveJunction::soft(1), DeriveJunction::hard("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![DeriveJunction::soft(1), DeriveJunction::hard("DOT")] + }) ); } @@ -1269,25 +1503,40 @@ mod tests { fn accountid_32_from_str_works() { use std::str::FromStr; assert!(AccountId32::from_str("5G9VdMwXvzza9pS8qE8ZHJk3CheHW9uucBn9ngW4C1gmmzpv").is_ok()); - assert!(AccountId32::from_str("5c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").is_ok()); - assert!(AccountId32::from_str("0x5c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").is_ok()); + assert!(AccountId32::from_str( + "5c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .is_ok()); + assert!(AccountId32::from_str( + "0x5c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .is_ok()); assert_eq!( AccountId32::from_str("99G9VdMwXvzza9pS8qE8ZHJk3CheHW9uucBn9ngW4C1gmmzpv").unwrap_err(), "invalid ss58 address.", ); assert_eq!( - AccountId32::from_str("gc55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").unwrap_err(), + AccountId32::from_str( + "gc55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .unwrap_err(), "invalid hex address.", ); assert_eq!( - AccountId32::from_str("0xgc55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").unwrap_err(), + AccountId32::from_str( + "0xgc55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .unwrap_err(), "invalid hex address.", ); // valid hex but invalid length will be treated as ss58. assert_eq!( - AccountId32::from_str("55c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").unwrap_err(), + AccountId32::from_str( + "55c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .unwrap_err(), "invalid ss58 address.", ); } diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index a836eb0e4c22f..11e9b9d71d80e 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,28 +19,31 @@ //! Simple ECDSA API. // end::description[] -#[cfg(feature = "full_crypto")] -use sp_std::vec::Vec; - +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_runtime_interface::pass_by::PassByInner; use sp_std::cmp::Ordering; -use codec::{Encode, Decode}; -#[cfg(feature = "full_crypto")] -use core::convert::{TryFrom, TryInto}; #[cfg(feature = "std")] -use substrate_bip39::seed_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; +use crate::crypto::Ss58Codec; +use crate::crypto::{ + CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, UncheckedFrom, +}; #[cfg(feature = "full_crypto")] -use crate::{hashing::blake2_256, crypto::{Pair as TraitPair, DeriveJunction, SecretStringError}}; +use crate::{ + crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}, + hashing::blake2_256, +}; #[cfg(feature = "std")] -use crate::crypto::Ss58Codec; +use bip39::{Language, Mnemonic, MnemonicType}; +#[cfg(feature = "full_crypto")] +use core::convert::{TryFrom, TryInto}; +#[cfg(feature = "full_crypto")] +use libsecp256k1::{PublicKey, SecretKey}; #[cfg(feature = "std")] -use serde::{de, Serializer, Serialize, Deserializer, Deserialize}; -use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; -use sp_runtime_interface::pass_by::PassByInner; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "full_crypto")] -use secp256k1::{PublicKey, SecretKey}; +use sp_std::vec::Vec; /// An identifier used to match public keys against ecdsa keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecds"); @@ -52,7 +55,7 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecds"); type Seed = [u8; 32]; /// The ECDSA compressed public key. -#[derive(Clone, Encode, Decode, PassByInner)] +#[derive(Clone, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo)] pub struct Public(pub [u8; 33]); impl PartialOrd for Public { @@ -103,7 +106,7 @@ impl Public { /// This will convert the full public key into the compressed format. #[cfg(feature = "std")] pub fn from_full(full: &[u8]) -> Result { - secp256k1::PublicKey::parse_slice(full, None) + libsecp256k1::PublicKey::parse_slice(full, None) .map(|k| k.serialize_compressed()) .map(Self) .map_err(|_| ()) @@ -165,7 +168,6 @@ impl sp_std::convert::TryFrom<&[u8]> for Public { if data.len() == 33 { Ok(Self::from_slice(data)) } else { - Err(()) } } @@ -206,14 +208,20 @@ impl sp_std::fmt::Debug for Public { #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&self.to_ss58check()) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { Public::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e))) } @@ -227,7 +235,7 @@ impl sp_std::hash::Hash for Public { } /// A signature (a 512-bit value, plus 8 bits for recovery ID). -#[derive(Encode, Decode, PassByInner)] +#[derive(Encode, Decode, PassByInner, TypeInfo)] pub struct Signature(pub [u8; 65]); impl sp_std::convert::TryFrom<&[u8]> for Signature { @@ -246,18 +254,24 @@ impl sp_std::convert::TryFrom<&[u8]> for Signature { #[cfg(feature = "std")] impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&hex::encode(self)) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { let signature_hex = hex::decode(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Ok(Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?) + Signature::try_from(signature_hex.as_ref()) + .map_err(|e| de::Error::custom(format!("{:?}", e))) } } @@ -348,17 +362,29 @@ impl Signature { /// Recover the public key from this signature and a message. #[cfg(feature = "full_crypto")] pub fn recover>(&self, message: M) -> Option { - let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); + let message = libsecp256k1::Message::parse(&blake2_256(message.as_ref())); let sig: (_, _) = self.try_into().ok()?; - secp256k1::recover(&message, &sig.0, &sig.1) + libsecp256k1::recover(&message, &sig.0, &sig.1) .ok() .map(|recovered| Public(recovered.serialize_compressed())) } + + /// Recover the public key from this signature and a pre-hashed message. + #[cfg(feature = "full_crypto")] + pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option { + let message = libsecp256k1::Message::parse(message); + + let sig: (_, _) = self.try_into().ok()?; + + libsecp256k1::recover(&message, &sig.0, &sig.1) + .ok() + .map(|key| Public(key.serialize_compressed())) + } } #[cfg(feature = "full_crypto")] -impl From<(secp256k1::Signature, secp256k1::RecoveryId)> for Signature { - fn from(x: (secp256k1::Signature, secp256k1::RecoveryId)) -> Signature { +impl From<(libsecp256k1::Signature, libsecp256k1::RecoveryId)> for Signature { + fn from(x: (libsecp256k1::Signature, libsecp256k1::RecoveryId)) -> Signature { let mut r = Self::default(); r.0[0..64].copy_from_slice(&x.0.serialize()[..]); r.0[64] = x.1.serialize(); @@ -367,13 +393,12 @@ impl From<(secp256k1::Signature, secp256k1::RecoveryId)> for Signature { } #[cfg(feature = "full_crypto")] -impl<'a> TryFrom<&'a Signature> for (secp256k1::Signature, secp256k1::RecoveryId) { +impl<'a> TryFrom<&'a Signature> for (libsecp256k1::Signature, libsecp256k1::RecoveryId) { type Error = (); - fn try_from(x: &'a Signature) -> Result<(secp256k1::Signature, secp256k1::RecoveryId), Self::Error> { - Ok(( - secp256k1::Signature::parse_slice(&x.0[0..64]).expect("hardcoded to 64 bytes; qed"), - secp256k1::RecoveryId::parse(x.0[64]).map_err(|_| ())?, - )) + fn try_from( + x: &'a Signature, + ) -> Result<(libsecp256k1::Signature, libsecp256k1::RecoveryId), Self::Error> { + parse_signature_standard(&x.0).map_err(|_| ()) } } @@ -418,21 +443,22 @@ impl TraitPair for Pair { let phrase = mnemonic.phrase(); let (pair, seed) = Self::from_phrase(phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); - ( - pair, - phrase.to_owned(), - seed, - ) + (pair, phrase.to_owned(), seed) } /// Generate key pair from given recovery phrase and password. #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Pair, Seed), SecretStringError> { - let big_seed = seed_from_entropy( + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Pair, Seed), SecretStringError> { + let big_seed = substrate_bip39::seed_from_entropy( Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase)?.entropy(), + .map_err(|_| SecretStringError::InvalidPhrase)? + .entropy(), password.unwrap_or(""), - ).map_err(|_| SecretStringError::InvalidSeed)?; + ) + .map_err(|_| SecretStringError::InvalidSeed)?; let mut seed = Seed::default(); seed.copy_from_slice(&big_seed[0..32]); Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) @@ -450,16 +476,17 @@ impl TraitPair for Pair { /// /// You should never need to use this; generate(), generate_with_phrase fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = SecretKey::parse_slice(seed_slice) - .map_err(|_| SecretStringError::InvalidSeedLength)?; + let secret = + SecretKey::parse_slice(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; let public = PublicKey::from_secret_key(&secret); - Ok(Pair{ secret, public }) + Ok(Pair { public, secret }) } /// Derive a child key from a series of given junctions. - fn derive>(&self, + fn derive>( + &self, path: Iter, - _seed: Option + _seed: Option, ) -> Result<(Pair, Option), DeriveError> { let mut acc = self.secret.serialize(); for j in path { @@ -478,16 +505,19 @@ impl TraitPair for Pair { /// Sign a message. fn sign(&self, message: &[u8]) -> Signature { - let message = secp256k1::Message::parse(&blake2_256(message)); - secp256k1::sign(&message, &self.secret).into() + let message = libsecp256k1::Message::parse(&blake2_256(message)); + libsecp256k1::sign(&message, &self.secret).into() } /// Verify a signature on a message. Returns true if the signature is good. fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { - let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); - let sig: (_, _) = match sig.try_into() { Ok(x) => x, _ => return false }; - match secp256k1::recover(&message, &sig.0, &sig.1) { - Ok(actual) => &pubkey.0[..] == &actual.serialize_compressed()[..], + let message = libsecp256k1::Message::parse(&blake2_256(message.as_ref())); + let sig: (_, _) = match sig.try_into() { + Ok(x) => x, + _ => return false, + }; + match libsecp256k1::recover(&message, &sig.0, &sig.1) { + Ok(actual) => pubkey.0[..] == actual.serialize_compressed()[..], _ => false, } } @@ -497,11 +527,15 @@ impl TraitPair for Pair { /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct /// size. Use it only if you're coming from byte buffers and need the speed. fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); - if sig.len() != 65 { return false } - let ri = match secp256k1::RecoveryId::parse(sig[64]) { Ok(x) => x, _ => return false }; - let sig = match secp256k1::Signature::parse_slice(&sig[0..64]) { Ok(x) => x, _ => return false }; - match secp256k1::recover(&message, &sig, &ri) { + let message = libsecp256k1::Message::parse(&blake2_256(message.as_ref())); + if sig.len() != 65 { + return false + } + let (sig, ri) = match parse_signature_standard(&sig) { + Ok(sigri) => sigri, + _ => return false, + }; + match libsecp256k1::recover(&message, &sig, &ri) { Ok(actual) => pubkey.as_ref() == &actual.serialize()[1..], _ => false, } @@ -525,25 +559,79 @@ impl Pair { #[cfg(feature = "std")] pub fn from_legacy_string(s: &str, password_override: Option<&str>) -> Pair { Self::from_string(s, password_override).unwrap_or_else(|_| { - let mut padded_seed: Seed = [' ' as u8; 32]; + let mut padded_seed: Seed = [b' '; 32]; let len = s.len().min(32); padded_seed[..len].copy_from_slice(&s.as_bytes()[..len]); Self::from_seed(&padded_seed) }) } + + /// Sign a pre-hashed message + pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature { + let message = libsecp256k1::Message::parse(message); + libsecp256k1::sign(&message, &self.secret).into() + } + + /// Verify a signature on a pre-hashed message. Return `true` if the signature is valid + /// and thus matches the given `public` key. + pub fn verify_prehashed(sig: &Signature, message: &[u8; 32], public: &Public) -> bool { + let message = libsecp256k1::Message::parse(message); + + let sig: (_, _) = match sig.try_into() { + Ok(x) => x, + _ => return false, + }; + + match libsecp256k1::recover(&message, &sig.0, &sig.1) { + Ok(actual) => public.0[..] == actual.serialize_compressed()[..], + _ => false, + } + } + + /// Verify a signature on a message. Returns true if the signature is good. + /// Parses Signature using parse_overflowing_slice + pub fn verify_deprecated>(sig: &Signature, message: M, pubkey: &Public) -> bool { + let message = libsecp256k1::Message::parse(&blake2_256(message.as_ref())); + let (sig, ri) = match parse_signature_overflowing(&sig.0) { + Ok(sigri) => sigri, + _ => return false, + }; + match libsecp256k1::recover(&message, &sig, &ri) { + Ok(actual) => pubkey.0[..] == actual.serialize_compressed()[..], + _ => false, + } + } +} + +#[cfg(feature = "full_crypto")] +fn parse_signature_standard( + x: &[u8], +) -> Result<(libsecp256k1::Signature, libsecp256k1::RecoveryId), libsecp256k1::Error> { + let sig = libsecp256k1::Signature::parse_standard_slice(&x[..64])?; + let ri = libsecp256k1::RecoveryId::parse(x[64])?; + Ok((sig, ri)) +} + +#[cfg(feature = "full_crypto")] +fn parse_signature_overflowing( + x: &[u8], +) -> Result<(libsecp256k1::Signature, libsecp256k1::RecoveryId), libsecp256k1::Error> { + let sig = libsecp256k1::Signature::parse_overflowing_slice(&x[..64])?; + let ri = libsecp256k1::RecoveryId::parse(x[64])?; + Ok((sig, ri)) } impl CryptoType for Public { - #[cfg(feature="full_crypto")] + #[cfg(feature = "full_crypto")] type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature="full_crypto")] + #[cfg(feature = "full_crypto")] type Pair = Pair; } -#[cfg(feature="full_crypto")] +#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } @@ -551,15 +639,20 @@ impl CryptoType for Pair { #[cfg(test)] mod test { use super::*; + use crate::{ + crypto::{set_default_ss58_version, PublicError, DEV_PHRASE}, + keccak_256, + }; use hex_literal::hex; - use crate::crypto::{DEV_PHRASE, set_default_ss58_version}; use serde_json; #[test] fn default_phrase_should_be_used() { assert_eq!( Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), ); } @@ -578,9 +671,9 @@ mod test { #[test] fn test_vector_should_work() { - let pair = Pair::from_seed( - &hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60") - ); + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); let public = pair.public(); assert_eq!( public, @@ -591,7 +684,7 @@ mod test { let message = b""; let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); let signature = Signature::from_raw(signature); - assert!(&pair.sign(&message[..]) == &signature); + assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); } @@ -599,8 +692,9 @@ mod test { fn test_vector_by_string_should_work() { let pair = Pair::from_string( "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", - None - ).unwrap(); + None, + ) + .unwrap(); let public = pair.public(); assert_eq!( public, @@ -611,7 +705,7 @@ mod test { let message = b""; let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); let signature = Signature::from_raw(signature); - assert!(&pair.sign(&message[..]) == &signature); + assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); } @@ -677,19 +771,65 @@ mod test { } #[test] - fn ss58check_custom_format_works() { + fn ss58check_format_check_works() { use crate::crypto::Ss58AddressFormat; - // temp save default format version - let default_format = Ss58AddressFormat::default(); - // set current ss58 version is custom "200" `Ss58AddressFormat::Custom(200)` - set_default_ss58_version(Ss58AddressFormat::Custom(200)); - // custom addr encoded by version 200 - let addr = "2X64kMNEWAW5KLZMSKcGKEc96MyuaRsRUku7vomuYxKgqjVCRj"; - Public::from_ss58check(&addr).unwrap(); - set_default_ss58_version(default_format); - // set current ss58 version to default version - let addr = "KWAfgC2aRG5UVD6CpbPQXCx4YZZUhvWqqAJE6qcYc9Rtr6g5C"; - Public::from_ss58check(&addr).unwrap(); + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let format = Ss58AddressFormat::Reserved46; + let s = public.to_ss58check_with_version(format); + assert_eq!(Public::from_ss58check_with_version(&s), Err(PublicError::FormatNotAllowed)); + } + + #[test] + fn ss58check_full_roundtrip_works() { + use crate::crypto::Ss58AddressFormat; + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let format = Ss58AddressFormat::PolkadotAccount; + let s = public.to_ss58check_with_version(format); + let (k, f) = Public::from_ss58check_with_version(&s).unwrap(); + assert_eq!(k, public); + assert_eq!(f, format); + + let format = Ss58AddressFormat::Custom(64); + let s = public.to_ss58check_with_version(format); + let (k, f) = Public::from_ss58check_with_version(&s).unwrap(); + assert_eq!(k, public); + assert_eq!(f, format); + } + + #[test] + fn ss58check_custom_format_works() { + // We need to run this test in its own process to not interfere with other tests running in + // parallel and also relying on the ss58 version. + if std::env::var("RUN_CUSTOM_FORMAT_TEST") == Ok("1".into()) { + use crate::crypto::Ss58AddressFormat; + // temp save default format version + let default_format = Ss58AddressFormat::default(); + // set current ss58 version is custom "200" `Ss58AddressFormat::Custom(200)` + + set_default_ss58_version(Ss58AddressFormat::Custom(200)); + // custom addr encoded by version 200 + let addr = "4pbsSkWcBaYoFHrKJZp5fDVUKbqSYD9dhZZGvpp3vQ5ysVs5ybV"; + Public::from_ss58check(&addr).unwrap(); + + set_default_ss58_version(default_format); + // set current ss58 version to default version + let addr = "KWAfgC2aRG5UVD6CpbPQXCx4YZZUhvWqqAJE6qcYc9Rtr6g5C"; + Public::from_ss58check(&addr).unwrap(); + + println!("CUSTOM_FORMAT_SUCCESSFUL"); + } else { + let executable = std::env::current_exe().unwrap(); + let output = std::process::Command::new(executable) + .env("RUN_CUSTOM_FORMAT_TEST", "1") + .args(&["--nocapture", "ss58check_custom_format_works"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stdout).unwrap(); + assert!(output.contains("CUSTOM_FORMAT_SUCCESSFUL")); + } } #[test] @@ -707,11 +847,70 @@ mod test { #[test] fn signature_serialization_doesnt_panic() { fn deserialize_signature(text: &str) -> Result { - Ok(serde_json::from_str(text)?) + serde_json::from_str(text) } assert!(deserialize_signature("Not valid json.").is_err()); assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); // Poorly-sized assert!(deserialize_signature("\"abc123\"").is_err()); } + + #[test] + fn sign_prehashed_works() { + let (pair, _, _) = Pair::generate_with_phrase(Some("password")); + + // `msg` shouldn't be mangled + let msg = [0u8; 32]; + let sig1 = pair.sign_prehashed(&msg); + let sig2: Signature = + libsecp256k1::sign(&libsecp256k1::Message::parse(&msg), &pair.secret).into(); + + assert_eq!(sig1, sig2); + + // signature is actually different + let sig2 = pair.sign(&msg); + + assert_ne!(sig1, sig2); + + // using pre-hashed `msg` works + let msg = keccak_256(b"this should be hashed"); + let sig1 = pair.sign_prehashed(&msg); + let sig2: Signature = + libsecp256k1::sign(&libsecp256k1::Message::parse(&msg), &pair.secret).into(); + + assert_eq!(sig1, sig2); + } + + #[test] + fn verify_prehashed_works() { + let (pair, _, _) = Pair::generate_with_phrase(Some("password")); + + // `msg` and `sig` match + let msg = keccak_256(b"this should be hashed"); + let sig = pair.sign_prehashed(&msg); + assert!(Pair::verify_prehashed(&sig, &msg, &pair.public())); + + // `msg` and `sig` don't match + let msg = keccak_256(b"this is a different message"); + assert!(!Pair::verify_prehashed(&sig, &msg, &pair.public())); + } + + #[test] + fn recover_prehashed_works() { + let (pair, _, _) = Pair::generate_with_phrase(Some("password")); + + // recovered key matches signing key + let msg = keccak_256(b"this should be hashed"); + let sig = pair.sign_prehashed(&msg); + let key = sig.recover_prehashed(&msg).unwrap(); + assert_eq!(pair.public(), key); + + // recovered key is useable + assert!(Pair::verify_prehashed(&sig, &msg, &key)); + + // recovered key and signing key don't match + let msg = keccak_256(b"this is a different message"); + let key = sig.recover_prehashed(&msg).unwrap(); + assert_ne!(pair.public(), key); + } } diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index fcc84c5c2edcf..d786ee9d255ff 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,28 +22,29 @@ #[cfg(feature = "full_crypto")] use sp_std::vec::Vec; -use crate::{hash::H256, hash::H512}; -use codec::{Encode, Decode}; +use crate::hash::{H256, H512}; +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +#[cfg(feature = "std")] +use crate::crypto::Ss58Codec; +use crate::crypto::{ + CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, UncheckedFrom, +}; #[cfg(feature = "full_crypto")] -use blake2_rfc; +use crate::crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}; +#[cfg(feature = "std")] +use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] use core::convert::TryFrom; #[cfg(feature = "full_crypto")] use ed25519_dalek::{Signer as _, Verifier as _}; #[cfg(feature = "std")] -use substrate_bip39::seed_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; -#[cfg(feature = "full_crypto")] -use crate::crypto::{Pair as TraitPair, DeriveJunction, SecretStringError}; -#[cfg(feature = "std")] -use crate::crypto::Ss58Codec; -#[cfg(feature = "std")] -use serde::{de, Serializer, Serialize, Deserializer, Deserialize}; -use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; use sp_std::ops::Deref; +#[cfg(feature = "std")] +use substrate_bip39::seed_from_entropy; /// An identifier used to match public keys against ed25519 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ed25"); @@ -56,7 +57,20 @@ type Seed = [u8; 32]; /// A public key. #[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Encode, Decode, Default, PassByInner)] +#[derive( + PartialEq, + Eq, + PartialOrd, + Ord, + Clone, + Copy, + Encode, + Decode, + Default, + PassByInner, + MaxEncodedLen, + TypeInfo, +)] pub struct Public(pub [u8; 32]); /// A key pair. @@ -67,9 +81,9 @@ pub struct Pair(ed25519_dalek::Keypair); impl Clone for Pair { fn clone(&self) -> Self { Pair(ed25519_dalek::Keypair { - public: self.0.public.clone(), + public: self.0.public, secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()) - .expect("key is always the correct size; qed") + .expect("key is always the correct size; qed"), }) } } @@ -176,21 +190,27 @@ impl sp_std::fmt::Debug for Public { #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&self.to_ss58check()) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { Public::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e))) } } /// A signature (a 512-bit value). -#[derive(Encode, Decode, PassByInner)] +#[derive(Encode, Decode, PassByInner, TypeInfo)] pub struct Signature(pub [u8; 64]); impl sp_std::convert::TryFrom<&[u8]> for Signature { @@ -209,18 +229,24 @@ impl sp_std::convert::TryFrom<&[u8]> for Signature { #[cfg(feature = "std")] impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&hex::encode(self)) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { let signature_hex = hex::decode(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Ok(Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?) + Signature::try_from(signature_hex.as_ref()) + .map_err(|e| de::Error::custom(format!("{:?}", e))) } } @@ -335,15 +361,19 @@ pub struct LocalizedSignature { /// An error type for SS58 decoding. #[cfg(feature = "std")] -#[derive(Clone, Copy, Eq, PartialEq, Debug)] +#[derive(Clone, Copy, Eq, PartialEq, Debug, thiserror::Error)] pub enum PublicError { /// Bad alphabet. + #[error("Base 58 requirement is violated")] BadBase58, /// Bad length. + #[error("Length is bad")] BadLength, /// Unknown version. + #[error("Unknown version")] UnknownVersion, /// Invalid checksum. + #[error("Invalid checksum")] InvalidChecksum, } @@ -433,21 +463,22 @@ impl TraitPair for Pair { let phrase = mnemonic.phrase(); let (pair, seed) = Self::from_phrase(phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); - ( - pair, - phrase.to_owned(), - seed, - ) + (pair, phrase.to_owned(), seed) } /// Generate key pair from given recovery phrase and password. #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Pair, Seed), SecretStringError> { + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Pair, Seed), SecretStringError> { let big_seed = seed_from_entropy( Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase)?.entropy(), + .map_err(|_| SecretStringError::InvalidPhrase)? + .entropy(), password.unwrap_or(""), - ).map_err(|_| SecretStringError::InvalidSeed)?; + ) + .map_err(|_| SecretStringError::InvalidSeed)?; let mut seed = Seed::default(); seed.copy_from_slice(&big_seed[0..32]); Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) @@ -472,7 +503,8 @@ impl TraitPair for Pair { } /// Derive a child key from a series of given junctions. - fn derive>(&self, + fn derive>( + &self, path: Iter, _seed: Option, ) -> Result<(Pair, Option), DeriveError> { @@ -517,13 +549,10 @@ impl TraitPair for Pair { let sig = match ed25519_dalek::Signature::try_from(sig) { Ok(s) => s, - Err(_) => return false + Err(_) => return false, }; - match public_key.verify(message.as_ref(), &sig) { - Ok(_) => true, - _ => false, - } + public_key.verify(message.as_ref(), &sig).is_ok() } /// Return a vec filled with raw data. @@ -544,7 +573,7 @@ impl Pair { #[cfg(feature = "std")] pub fn from_legacy_string(s: &str, password_override: Option<&str>) -> Pair { Self::from_string(s, password_override).unwrap_or_else(|_| { - let mut padded_seed: Seed = [' ' as u8; 32]; + let mut padded_seed: Seed = [b' '; 32]; let len = s.len().min(32); padded_seed[..len].copy_from_slice(&s.as_bytes()[..len]); Self::from_seed(&padded_seed) @@ -570,15 +599,17 @@ impl CryptoType for Pair { #[cfg(test)] mod test { use super::*; - use hex_literal::hex; use crate::crypto::DEV_PHRASE; + use hex_literal::hex; use serde_json; #[test] fn default_phrase_should_be_used() { assert_eq!( Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), ); } @@ -597,17 +628,20 @@ mod test { #[test] fn test_vector_should_work() { - let pair = Pair::from_seed( - &hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60") - ); - let public = pair.public(); - assert_eq!(public, Public::from_raw( - hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a") + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" + )) + ); let message = b""; let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); let signature = Signature::from_raw(signature); - assert!(&pair.sign(&message[..]) == &signature); + assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); } @@ -615,16 +649,20 @@ mod test { fn test_vector_by_string_should_work() { let pair = Pair::from_string( "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", - None - ).unwrap(); + None, + ) + .unwrap(); let public = pair.public(); - assert_eq!(public, Public::from_raw( - hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a") - )); + assert_eq!( + public, + Public::from_raw(hex!( + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" + )) + ); let message = b""; let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); let signature = Signature::from_raw(signature); - assert!(&pair.sign(&message[..]) == &signature); + assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); } @@ -642,9 +680,12 @@ mod test { fn seeded_pair_should_work() { let pair = Pair::from_seed(b"12345678901234567890123456789012"); let public = pair.public(); - assert_eq!(public, Public::from_raw( - hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee") - )); + assert_eq!( + public, + Public::from_raw(hex!( + "2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee" + )) + ); let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); let signature = pair.sign(&message[..]); println!("Correct signature: {:?}", signature); @@ -701,7 +742,7 @@ mod test { #[test] fn signature_serialization_doesnt_panic() { fn deserialize_signature(text: &str) -> Result { - Ok(serde_json::from_str(text)?) + serde_json::from_str(text) } assert!(deserialize_signature("Not valid json.").is_err()); assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); diff --git a/primitives/core/src/hash.rs b/primitives/core/src/hash.rs index 20a6788c32070..55a9664c9dad4 100644 --- a/primitives/core/src/hash.rs +++ b/primitives/core/src/hash.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -43,7 +43,7 @@ mod tests { (H160::from_low_u64_be(16), "0x0000000000000000000000000000000000000010"), (H160::from_low_u64_be(1_000), "0x00000000000000000000000000000000000003e8"), (H160::from_low_u64_be(100_000), "0x00000000000000000000000000000000000186a0"), - (H160::from_low_u64_be(u64::max_value()), "0x000000000000000000000000ffffffffffffffff"), + (H160::from_low_u64_be(u64::MAX), "0x000000000000000000000000ffffffffffffffff"), ]; for (number, expected) in tests { @@ -55,13 +55,34 @@ mod tests { #[test] fn test_h256() { let tests = vec![ - (Default::default(), "0x0000000000000000000000000000000000000000000000000000000000000000"), - (H256::from_low_u64_be(2), "0x0000000000000000000000000000000000000000000000000000000000000002"), - (H256::from_low_u64_be(15), "0x000000000000000000000000000000000000000000000000000000000000000f"), - (H256::from_low_u64_be(16), "0x0000000000000000000000000000000000000000000000000000000000000010"), - (H256::from_low_u64_be(1_000), "0x00000000000000000000000000000000000000000000000000000000000003e8"), - (H256::from_low_u64_be(100_000), "0x00000000000000000000000000000000000000000000000000000000000186a0"), - (H256::from_low_u64_be(u64::max_value()), "0x000000000000000000000000000000000000000000000000ffffffffffffffff"), + ( + Default::default(), + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ( + H256::from_low_u64_be(2), + "0x0000000000000000000000000000000000000000000000000000000000000002", + ), + ( + H256::from_low_u64_be(15), + "0x000000000000000000000000000000000000000000000000000000000000000f", + ), + ( + H256::from_low_u64_be(16), + "0x0000000000000000000000000000000000000000000000000000000000000010", + ), + ( + H256::from_low_u64_be(1_000), + "0x00000000000000000000000000000000000000000000000000000000000003e8", + ), + ( + H256::from_low_u64_be(100_000), + "0x00000000000000000000000000000000000000000000000000000000000186a0", + ), + ( + H256::from_low_u64_be(u64::MAX), + "0x000000000000000000000000000000000000000000000000ffffffffffffffff", + ), ]; for (number, expected) in tests { @@ -72,9 +93,21 @@ mod tests { #[test] fn test_invalid() { - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); + assert!(ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000g\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::( + "\"0x00000000000000000000000000000000000000000000000000000000000000000\"" + ) + .unwrap_err() + .is_data()); assert!(ser::from_str::("\"\"").unwrap_err().is_data()); assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); diff --git a/primitives/core/src/hasher.rs b/primitives/core/src/hasher.rs index 8ccaa4d90a78d..01680de083762 100644 --- a/primitives/core/src/hasher.rs +++ b/primitives/core/src/hasher.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,9 @@ //! Substrate Blake2b Hasher implementation pub mod blake2 { - use hash_db::Hasher; - use hash256_std_hasher::Hash256StdHasher; use crate::hash::H256; + use hash256_std_hasher::Hash256StdHasher; + use hash_db::Hasher; /// Concrete implementation of Hasher using Blake2b 256-bit hashes #[derive(Debug)] @@ -38,9 +38,9 @@ pub mod blake2 { } pub mod keccak { - use hash_db::Hasher; - use hash256_std_hasher::Hash256StdHasher; use crate::hash::H256; + use hash256_std_hasher::Hash256StdHasher; + use hash_db::Hasher; /// Concrete implementation of Hasher using Keccak 256-bit hashes #[derive(Debug)] diff --git a/primitives/core/src/hashing.rs b/primitives/core/src/hashing.rs index f61700a5a43cd..4c719f7c69832 100644 --- a/primitives/core/src/hashing.rs +++ b/primitives/core/src/hashing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,11 +16,14 @@ // limitations under the License. //! Hashing functions. +//! +//! This module is gated by `full-crypto` feature. If you intend to use any of the functions +//! defined here within your runtime, you should most likely rather use `sp_io::hashing` instead, +//! unless you know what you're doing. Using `sp_io` will be more performant, since instead of +//! computing the hash in WASM it delegates that computation to the host client. -use blake2_rfc; use sha2::{Digest, Sha256}; use tiny_keccak::{Hasher, Keccak}; -use twox_hash; /// Do a Blake2 512-bit hash and place result in `dest`. pub fn blake2_512_into(data: &[u8], dest: &mut [u8; 64]) { @@ -72,7 +75,7 @@ pub fn blake2_64(data: &[u8]) -> [u8; 8] { /// Do a XX 64-bit hash and place result in `dest`. pub fn twox_64_into(data: &[u8], dest: &mut [u8; 8]) { - use ::core::hash::Hasher; + use core::hash::Hasher; let mut h0 = twox_hash::XxHash::with_seed(0); h0.write(data); let r0 = h0.finish(); @@ -89,7 +92,7 @@ pub fn twox_64(data: &[u8]) -> [u8; 8] { /// Do a XX 128-bit hash and place result in `dest`. pub fn twox_128_into(data: &[u8], dest: &mut [u8; 16]) { - use ::core::hash::Hasher; + use core::hash::Hasher; let mut h0 = twox_hash::XxHash::with_seed(0); let mut h1 = twox_hash::XxHash::with_seed(1); h0.write(data); @@ -146,11 +149,20 @@ pub fn keccak_256(data: &[u8]) -> [u8; 32] { output } +/// Do a keccak 512-bit hash and return result. +pub fn keccak_512(data: &[u8]) -> [u8; 64] { + let mut keccak = Keccak::v512(); + keccak.update(data); + let mut output = [0u8; 64]; + keccak.finalize(&mut output); + output +} + /// Do a sha2 256-bit hash and return result. pub fn sha2_256(data: &[u8]) -> [u8; 32] { let mut hasher = Sha256::new(); - hasher.input(data); + hasher.update(data); let mut output = [0u8; 32]; - output.copy_from_slice(&hasher.result()); + output.copy_from_slice(&hasher.finalize()); output } diff --git a/primitives/core/src/hexdisplay.rs b/primitives/core/src/hexdisplay.rs index 9d2b7a12d032e..4d91db1567920 100644 --- a/primitives/core/src/hexdisplay.rs +++ b/primitives/core/src/hexdisplay.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,9 @@ pub struct HexDisplay<'a>(&'a [u8]); impl<'a> HexDisplay<'a> { /// Create new instance that will display `d` as a hex string when displayed. - pub fn from(d: &'a R) -> Self { HexDisplay(d.as_bytes_ref()) } + pub fn from(d: &'a R) -> Self { + HexDisplay(d.as_bytes_ref()) + } } impl<'a> sp_std::fmt::Display for HexDisplay<'a> { @@ -60,15 +62,27 @@ pub trait AsBytesRef { } impl AsBytesRef for &[u8] { - fn as_bytes_ref(&self) -> &[u8] { self } + fn as_bytes_ref(&self) -> &[u8] { + self + } } impl AsBytesRef for [u8] { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } } impl AsBytesRef for sp_std::vec::Vec { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } +} + +impl AsBytesRef for sp_storage::StorageKey { + fn as_bytes_ref(&self) -> &[u8] { + self.as_ref() + } } macro_rules! impl_non_endians { @@ -79,9 +93,11 @@ macro_rules! impl_non_endians { )* } } -impl_non_endians!([u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], - [u8; 10], [u8; 12], [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], - [u8; 48], [u8; 56], [u8; 64], [u8; 65], [u8; 80], [u8; 96], [u8; 112], [u8; 128]); +impl_non_endians!( + [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 10], [u8; 12], + [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], [u8; 48], [u8; 56], + [u8; 64], [u8; 65], [u8; 80], [u8; 96], [u8; 112], [u8; 128] +); /// Format into ASCII + # + hex, suitable for storage key preimages. #[cfg(feature = "std")] @@ -97,7 +113,7 @@ pub fn ascii_format(asciish: &[u8]) -> String { latch = true; } r.push_str(&format!("{:02x}", *c)); - } + }, } } r diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index bef033df6c9ba..a6229fe43a1a5 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,6 @@ //! Shareable Substrate types. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] /// Initialize a key-value collection from array. @@ -32,17 +31,17 @@ macro_rules! map { ); } -use sp_runtime_interface::pass_by::{PassByEnum, PassByInner}; -use sp_std::prelude::*; -use sp_std::ops::Deref; +#[doc(hidden)] +pub use codec::{Decode, Encode}; +use scale_info::TypeInfo; #[cfg(feature = "std")] -use std::borrow::Cow; +pub use serde; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; +use sp_runtime_interface::pass_by::{PassByEnum, PassByInner}; +use sp_std::{ops::Deref, prelude::*}; #[cfg(feature = "std")] -pub use serde; -#[doc(hidden)] -pub use codec::{Encode, Decode}; +use std::borrow::Cow; pub use sp_debug_derive::RuntimeDebug; @@ -53,37 +52,39 @@ pub use impl_serde::serialize as bytes; pub mod hashing; #[cfg(feature = "full_crypto")] -pub use hashing::{blake2_128, blake2_256, twox_64, twox_128, twox_256, keccak_256}; -pub mod hexdisplay; +pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64}; pub mod crypto; +pub mod hexdisplay; pub mod u32_trait; -pub mod ed25519; -pub mod sr25519; +mod changes_trie; pub mod ecdsa; +pub mod ed25519; pub mod hash; #[cfg(feature = "std")] mod hasher; pub mod offchain; pub mod sandbox; -pub mod uint; -mod changes_trie; +pub mod sr25519; +pub mod testing; #[cfg(feature = "std")] pub mod traits; -pub mod testing; +pub mod uint; -pub use self::hash::{H160, H256, H512, convert_hash}; -pub use self::uint::{U256, U512}; +pub use self::{ + hash::{convert_hash, H160, H256, H512}, + uint::{U256, U512}, +}; pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; -pub use hash_db::Hasher; #[cfg(feature = "std")] pub use self::hasher::blake2::Blake2Hasher; #[cfg(feature = "std")] pub use self::hasher::keccak::KeccakHasher; +pub use hash_db::Hasher; pub use sp_storage as storage; @@ -117,13 +118,15 @@ impl ExecutionContext { use ExecutionContext::*; match self { - Importing | Syncing | BlockConstruction => - offchain::Capabilities::none(), - // Enable keystore and transaction pool by default for offchain calls. + Importing | Syncing | BlockConstruction => offchain::Capabilities::none(), + // Enable keystore, transaction pool and Offchain DB reads by default for offchain + // calls. OffchainCall(None) => [ offchain::Capability::Keystore, + offchain::Capability::OffchainDbRead, offchain::Capability::TransactionPool, - ][..].into(), + ][..] + .into(), OffchainCall(Some((_, capabilities))) => *capabilities, } } @@ -132,19 +135,31 @@ impl ExecutionContext { /// Hex-serialized shim for `Vec`. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))] -pub struct Bytes(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct Bytes(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); impl From> for Bytes { - fn from(s: Vec) -> Self { Bytes(s) } + fn from(s: Vec) -> Self { + Bytes(s) + } } impl From for Bytes { - fn from(s: OpaqueMetadata) -> Self { Bytes(s.0) } + fn from(s: OpaqueMetadata) -> Self { + Bytes(s.0) + } } impl Deref for Bytes { type Target = [u8]; - fn deref(&self) -> &[u8] { &self.0[..] } + fn deref(&self) -> &[u8] { + &self.0[..] + } +} + +impl codec::WrapperTypeEncode for Bytes {} + +impl codec::WrapperTypeDecode for Bytes { + type Wrapped = Vec; } #[cfg(feature = "std")] @@ -176,7 +191,19 @@ impl sp_std::ops::Deref for OpaqueMetadata { } /// Simple blob to hold a `PeerId` without committing to its format. -#[derive(Default, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, PassByInner)] +#[derive( + Default, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + PassByInner, + TypeInfo, +)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct OpaquePeerId(pub Vec); @@ -193,7 +220,14 @@ pub enum NativeOrEncoded { /// The native representation. Native(R), /// The encoded representation. - Encoded(Vec) + Encoded(Vec), +} + +#[cfg(feature = "std")] +impl From for NativeOrEncoded { + fn from(val: R) -> Self { + Self::Native(val) + } } #[cfg(feature = "std")] @@ -267,19 +301,19 @@ pub trait TypeId { /// A log level matching the one from `log` crate. /// -/// Used internally by `sp_io::log` method. +/// Used internally by `sp_io::logging::log` method. #[derive(Encode, Decode, PassByEnum, Copy, Clone)] pub enum LogLevel { /// `Error` log level. - Error = 1, + Error = 1_isize, /// `Warn` log level. - Warn = 2, + Warn = 2_isize, /// `Info` log level. - Info = 3, + Info = 3_isize, /// `Debug` log level. - Debug = 4, + Debug = 4_isize, /// `Trace` log level. - Trace = 5, + Trace = 5_isize, } impl From for LogLevel { @@ -320,6 +354,53 @@ impl From for log::Level { } } +/// Log level filter that expresses which log levels should be filtered. +/// +/// This enum matches the [`log::LevelFilter`] enum. +#[derive(Encode, Decode, PassByEnum, Copy, Clone)] +pub enum LogLevelFilter { + /// `Off` log level filter. + Off = 0_isize, + /// `Error` log level filter. + Error = 1_isize, + /// `Warn` log level filter. + Warn = 2_isize, + /// `Info` log level filter. + Info = 3_isize, + /// `Debug` log level filter. + Debug = 4_isize, + /// `Trace` log level filter. + Trace = 5_isize, +} + +impl From for log::LevelFilter { + fn from(l: LogLevelFilter) -> Self { + use self::LogLevelFilter::*; + match l { + Off => Self::Off, + Error => Self::Error, + Warn => Self::Warn, + Info => Self::Info, + Debug => Self::Debug, + Trace => Self::Trace, + } + } +} + +impl From for LogLevelFilter { + fn from(l: log::LevelFilter) -> Self { + use log::LevelFilter::*; + match l { + Off => Self::Off, + Error => Self::Error, + Warn => Self::Warn, + Info => Self::Info, + Debug => Self::Debug, + Trace => Self::Trace, + } + } +} + /// Encodes the given value into a buffer and returns the pointer and the length as a single `u64`. /// /// When Substrate calls into Wasm it expects a fixed signature for functions exported @@ -344,7 +425,7 @@ pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 { /// The void type - it cannot exist. // Oh rust, you crack me up... -#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug)] +#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub enum Void {} /// Macro for creating `Maybe*` marker traits. @@ -386,3 +467,8 @@ macro_rules! impl_maybe_marker { )+ } } + +/// The maximum number of bytes that can be allocated at one time. +// The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for +// everybody. +pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index d94fe1d830274..c0df1ac6c637f 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,14 @@ //! Offchain workers types -use codec::{Encode, Decode}; -use sp_std::{prelude::{Vec, Box}, convert::TryFrom}; use crate::{OpaquePeerId, RuntimeDebug}; -use sp_runtime_interface::pass_by::{PassByCodec, PassByInner, PassByEnum}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime_interface::pass_by::{PassByCodec, PassByEnum, PassByInner}; +use sp_std::{ + convert::TryFrom, + prelude::{Box, Vec}, +}; pub use crate::crypto::KeyTypeId; @@ -29,10 +33,10 @@ pub mod storage; #[cfg(feature = "std")] pub mod testing; -/// Local storage prefix used by the Offchain Worker API to -pub const STORAGE_PREFIX : &'static [u8] = b"storage"; +/// Persistent storage prefix used by the Offchain Worker API when creating a DB key. +pub const STORAGE_PREFIX: &[u8] = b"storage"; -/// Offchain workers local storage. +/// Offchain DB persistent (non-fork-aware) storage. pub trait OffchainStorage: Clone + Send + Sync { /// Persist a value in storage under given key and prefix. fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]); @@ -66,12 +70,12 @@ pub enum StorageKind { /// that is re-run at block `N(hash2)`. /// This storage can be used by offchain workers to handle forks /// and coordinate offchain workers running on different forks. - PERSISTENT = 1, + PERSISTENT = 1_isize, /// Local storage is revertible and fork-aware. It means that any value /// set by the offchain worker triggered at block `N(hash1)` is reverted /// if that block is reverted as non-canonical and is NOT available for the worker /// that is re-run at block `N(hash2)`. - LOCAL = 2, + LOCAL = 2_isize, } impl TryFrom for StorageKind { @@ -93,7 +97,9 @@ impl From for u32 { } /// Opaque type for offchain http requests. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug, Encode, Decode, PassByInner)] +#[derive( + Clone, Copy, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug, Encode, Decode, PassByInner, +)] #[cfg_attr(feature = "std", derive(Hash))] pub struct HttpRequestId(pub u16); @@ -108,11 +114,11 @@ impl From for u32 { #[repr(C)] pub enum HttpError { /// The requested action couldn't been completed within a deadline. - DeadlineReached = 1, + DeadlineReached = 1_isize, /// There was an IO Error while processing the request. - IoError = 2, + IoError = 2_isize, /// The ID of the request is invalid in this context. - Invalid = 3, + Invalid = 3_isize, } impl TryFrom for HttpError { @@ -123,7 +129,7 @@ impl TryFrom for HttpError { e if e == HttpError::DeadlineReached as u8 as u32 => Ok(HttpError::DeadlineReached), e if e == HttpError::IoError as u8 as u32 => Ok(HttpError::IoError), e if e == HttpError::Invalid as u8 as u32 => Ok(HttpError::Invalid), - _ => Err(()) + _ => Err(()), } } } @@ -181,7 +187,7 @@ impl TryFrom for HttpRequestStatus { /// A blob to hold information about the local node's network state /// without committing to its format. -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, PassByCodec)] +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, PassByCodec, TypeInfo)] #[cfg_attr(feature = "std", derive(Default))] pub struct OpaqueNetworkState { /// PeerId of the local node in SCALE encoded. @@ -191,7 +197,7 @@ pub struct OpaqueNetworkState { } /// Simple blob to hold a `Multiaddr` without committing to its format. -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, PassByInner)] +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, PassByInner, TypeInfo)] pub struct OpaqueMultiaddr(pub Vec); impl OpaqueMultiaddr { @@ -202,11 +208,15 @@ impl OpaqueMultiaddr { } /// Opaque timestamp type -#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode)] +#[derive( + Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode, +)] pub struct Timestamp(u64); /// Duration type -#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode)] +#[derive( + Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode, +)] pub struct Duration(u64); impl Duration { @@ -404,9 +414,9 @@ pub enum Capability { /// Access to opaque network state. NetworkState = 16, /// Access to offchain worker DB (read only). - OffchainWorkerDbRead = 32, + OffchainDbRead = 32, /// Access to offchain worker DB (writes). - OffchainWorkerDbWrite = 64, + OffchainDbWrite = 64, /// Manage the authorized nodes NodeAuthorization = 128, /// Access to an IPFS node. @@ -425,7 +435,7 @@ impl Capabilities { /// Return an object representing all capabilities enabled. pub fn all() -> Self { - Self(u8::max_value()) + Self(u8::MAX) } /// Return capabilities for rich offchain calls. @@ -433,11 +443,7 @@ impl Capabilities { /// Those calls should be allowed to sign and submit transactions /// and access offchain workers database (but read only!). pub fn rich_offchain_call() -> Self { - [ - Capability::TransactionPool, - Capability::Keystore, - Capability::OffchainWorkerDbRead, - ][..].into() + [Capability::TransactionPool, Capability::Keystore, Capability::OffchainDbRead][..].into() } /// Check if particular capability is enabled. @@ -480,56 +486,19 @@ pub trait Externalities: Send { /// Obviously fine in the off-chain worker context. fn random_seed(&mut self) -> [u8; 32]; - /// Sets a value in the local storage. - /// - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It _is_ persisted between runs. - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]); - - /// Removes a value in the local storage. - /// - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It _is_ persisted between runs. - fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]); - - /// Sets a value in the local storage if it matches current value. - /// - /// Since multiple offchain workers may be running concurrently, to prevent - /// data races use CAS to coordinate between them. - /// - /// Returns `true` if the value has been set, `false` otherwise. - /// - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It _is_ persisted between runs. - fn local_storage_compare_and_set( - &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool; - - /// Gets a value from the local storage. - /// - /// If the value does not exist in the storage `None` will be returned. - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It _is_ persisted between runs. - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option>; - /// Initiates a http request given HTTP verb and the URL. /// - /// Meta is a future-reserved field containing additional, parity-scale-codec encoded parameters. - /// Returns the id of newly started request. + /// Meta is a future-reserved field containing additional, parity-scale-codec encoded + /// parameters. Returns the id of newly started request. /// /// Returns an error if: /// - No new request identifier could be allocated. /// - The method or URI contain invalid characters. - /// fn http_request_start( &mut self, method: &str, uri: &str, - meta: &[u8] + meta: &[u8], ) -> Result; /// Append header to the request. @@ -544,12 +513,11 @@ pub trait Externalities: Send { /// /// An error doesn't poison the request, and you can continue as if the call had never been /// made. - /// fn http_request_add_header( &mut self, request_id: HttpRequestId, name: &str, - value: &str + value: &str, ) -> Result<(), ()>; /// Write a chunk of request body. @@ -564,14 +532,13 @@ pub trait Externalities: Send { /// - The request identifier is invalid. /// - `http_response_wait` has already been called on this request. /// - The deadline is reached. - /// - An I/O error has happened, for example the remote has closed our - /// request. The request is then considered invalid. - /// + /// - An I/O error has happened, for example the remote has closed our request. The request is + /// then considered invalid. fn http_request_write_body( &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError>; /// Block and wait for the responses for given requests. @@ -587,7 +554,7 @@ pub trait Externalities: Send { fn http_response_wait( &mut self, ids: &[HttpRequestId], - deadline: Option + deadline: Option, ) -> Vec; /// Read all response headers. @@ -599,10 +566,7 @@ pub trait Externalities: Send { /// /// Returns an empty list if the identifier is unknown/invalid, hasn't /// received a response, or has finished. - fn http_response_headers( - &mut self, - request_id: HttpRequestId - ) -> Vec<(Vec, Vec)>; + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)>; /// Read a chunk of body response to given buffer. /// @@ -620,14 +584,13 @@ pub trait Externalities: Send { /// Returns an error if: /// - The request identifier is invalid. /// - The deadline is reached. - /// - An I/O error has happened, for example the remote has closed our - /// request. The request is then considered invalid. - /// + /// - An I/O error has happened, for example the remote has closed our request. The request is + /// then considered invalid. fn http_response_read_body( &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result; /// Initiates an IPFS request. @@ -655,11 +618,11 @@ pub trait Externalities: Send { impl Externalities for Box { fn is_validator(&self) -> bool { - (& **self).is_validator() + (&**self).is_validator() } fn network_state(&self) -> Result { - (& **self).network_state() + (&**self).network_state() } fn timestamp(&mut self) -> Timestamp { @@ -674,33 +637,21 @@ impl Externalities for Box { (&mut **self).random_seed() } - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - (&mut **self).local_storage_set(kind, key, value) - } - - fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { - (&mut **self).local_storage_clear(kind, key) - } - - fn local_storage_compare_and_set( + fn http_request_start( &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool { - (&mut **self).local_storage_compare_and_set(kind, key, old_value, new_value) - } - - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - (&mut **self).local_storage_get(kind, key) - } - - fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { (&mut **self).http_request_start(method, uri, meta) } - fn http_request_add_header(&mut self, request_id: HttpRequestId, name: &str, value: &str) -> Result<(), ()> { + fn http_request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { (&mut **self).http_request_add_header(request_id, name, value) } @@ -708,12 +659,16 @@ impl Externalities for Box { &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError> { (&mut **self).http_request_write_body(request_id, chunk, deadline) } - fn http_response_wait(&mut self, ids: &[HttpRequestId], deadline: Option) -> Vec { + fn http_response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec { (&mut **self).http_response_wait(ids, deadline) } @@ -725,7 +680,7 @@ impl Externalities for Box { &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result { (&mut **self).http_response_read_body(request_id, buffer, deadline) } @@ -743,7 +698,7 @@ impl Externalities for Box { } } -/// An `OffchainExternalities` implementation with limited capabilities. +/// An `*Externalities` implementation with limited capabilities. pub struct LimitedExternalities { capabilities: Capabilities, externalities: T, @@ -752,10 +707,7 @@ pub struct LimitedExternalities { impl LimitedExternalities { /// Create new externalities limited to given `capabilities`. pub fn new(capabilities: Capabilities, externalities: T) -> Self { - Self { - capabilities, - externalities, - } + Self { capabilities, externalities } } /// Check if given capability is allowed. @@ -794,38 +746,22 @@ impl Externalities for LimitedExternalities { self.externalities.random_seed() } - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - self.check(Capability::OffchainWorkerDbWrite, "local_storage_set"); - self.externalities.local_storage_set(kind, key, value) - } - - fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { - self.check(Capability::OffchainWorkerDbWrite, "local_storage_clear"); - self.externalities.local_storage_clear(kind, key) - } - - fn local_storage_compare_and_set( + fn http_request_start( &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool { - self.check(Capability::OffchainWorkerDbWrite, "local_storage_compare_and_set"); - self.externalities.local_storage_compare_and_set(kind, key, old_value, new_value) - } - - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - self.check(Capability::OffchainWorkerDbRead, "local_storage_get"); - self.externalities.local_storage_get(kind, key) - } - - fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { self.check(Capability::Http, "http_request_start"); self.externalities.http_request_start(method, uri, meta) } - fn http_request_add_header(&mut self, request_id: HttpRequestId, name: &str, value: &str) -> Result<(), ()> { + fn http_request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { self.check(Capability::Http, "http_request_add_header"); self.externalities.http_request_add_header(request_id, name, value) } @@ -834,13 +770,17 @@ impl Externalities for LimitedExternalities { &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError> { self.check(Capability::Http, "http_request_write_body"); self.externalities.http_request_write_body(request_id, chunk, deadline) } - fn http_response_wait(&mut self, ids: &[HttpRequestId], deadline: Option) -> Vec { + fn http_response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec { self.check(Capability::Http, "http_response_wait"); self.externalities.http_response_wait(ids, deadline) } @@ -854,7 +794,7 @@ impl Externalities for LimitedExternalities { &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result { self.check(Capability::Http, "http_response_read_body"); self.externalities.http_response_read_body(request_id, buffer, deadline) @@ -878,18 +818,124 @@ impl Externalities for LimitedExternalities { #[cfg(feature = "std")] sp_externalities::decl_extension! { - /// The offchain extension that will be registered at the Substrate externalities. - pub struct OffchainExt(Box); + /// The offchain worker extension that will be registered at the Substrate externalities. + pub struct OffchainWorkerExt(Box); } #[cfg(feature = "std")] -impl OffchainExt { +impl OffchainWorkerExt { /// Create a new instance of `Self`. pub fn new(offchain: O) -> Self { Self(Box::new(offchain)) } } +/// A externalities extension for accessing the Offchain DB. +pub trait DbExternalities: Send { + /// Sets a value in the local storage. + /// + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It _is_ persisted between runs. + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]); + + /// Removes a value in the local storage. + /// + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It _is_ persisted between runs. + fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]); + + /// Sets a value in the local storage if it matches current value. + /// + /// Since multiple offchain workers may be running concurrently, to prevent + /// data races use CAS to coordinate between them. + /// + /// Returns `true` if the value has been set, `false` otherwise. + /// + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It _is_ persisted between runs. + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool; + + /// Gets a value from the local storage. + /// + /// If the value does not exist in the storage `None` will be returned. + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It _is_ persisted between runs. + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option>; +} + +impl DbExternalities for Box { + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + (&mut **self).local_storage_set(kind, key, value) + } + + fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { + (&mut **self).local_storage_clear(kind, key) + } + + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + (&mut **self).local_storage_compare_and_set(kind, key, old_value, new_value) + } + + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + (&mut **self).local_storage_get(kind, key) + } +} + +impl DbExternalities for LimitedExternalities { + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + self.check(Capability::OffchainDbWrite, "local_storage_set"); + self.externalities.local_storage_set(kind, key, value) + } + + fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { + self.check(Capability::OffchainDbWrite, "local_storage_clear"); + self.externalities.local_storage_clear(kind, key) + } + + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + self.check(Capability::OffchainDbWrite, "local_storage_compare_and_set"); + self.externalities + .local_storage_compare_and_set(kind, key, old_value, new_value) + } + + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + self.check(Capability::OffchainDbRead, "local_storage_get"); + self.externalities.local_storage_get(kind, key) + } +} + +#[cfg(feature = "std")] +sp_externalities::decl_extension! { + /// The offchain database extension that will be registered at the Substrate externalities. + pub struct OffchainDbExt(Box); +} + +#[cfg(feature = "std")] +impl OffchainDbExt { + /// Create a new instance of `OffchainDbExt`. + pub fn new(offchain: O) -> Self { + Self(Box::new(offchain)) + } +} + /// Abstraction over transaction pool. /// /// This trait is currently used within the `ExternalitiesExtension` @@ -917,6 +963,14 @@ impl TransactionPoolExt { } } +/// Change to be applied to the offchain worker db in regards to a key. +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub enum OffchainOverlayedChange { + /// Remove the data associated with the key + Remove, + /// Overwrite the value of an associated key + SetValue(Vec), +} #[cfg(test)] mod tests { diff --git a/primitives/core/src/offchain/storage.rs b/primitives/core/src/offchain/storage.rs index 7d7c711ed95f0..ff72006cffd60 100644 --- a/primitives/core/src/offchain/storage.rs +++ b/primitives/core/src/offchain/storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,11 @@ //! In-memory implementation of offchain workers database. -use std::collections::hash_map::{HashMap, Entry}; use crate::offchain::OffchainStorage; -use std::iter::Iterator; +use std::{ + collections::hash_map::{Entry, HashMap}, + iter::Iterator, +}; /// In-memory storage for offchain workers. #[derive(Debug, Clone, Default)] @@ -29,19 +31,19 @@ pub struct InMemOffchainStorage { impl InMemOffchainStorage { /// Consume the offchain storage and iterate over all key value pairs. - pub fn into_iter(self) -> impl Iterator,Vec)> { + pub fn into_iter(self) -> impl Iterator, Vec)> { self.storage.into_iter() } /// Iterate over all key value pairs by reference. - pub fn iter<'a>(&'a self) -> impl Iterator,&'a Vec)> { + pub fn iter(&self) -> impl Iterator, &Vec)> { self.storage.iter() } /// Remove a key and its associated value from the offchain database. pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { let key: Vec = prefix.iter().chain(key).cloned().collect(); - let _ = self.storage.remove(&key); + self.storage.remove(&key); } } @@ -71,10 +73,13 @@ impl OffchainStorage for InMemOffchainStorage { let key = prefix.iter().chain(key).cloned().collect(); match self.storage.entry(key) { - Entry::Vacant(entry) => if old_value.is_none() { - entry.insert(new_value.to_vec()); - true - } else { false }, + Entry::Vacant(entry) => + if old_value.is_none() { + entry.insert(new_value.to_vec()); + true + } else { + false + }, Entry::Occupied(ref mut entry) if Some(entry.get().as_slice()) == old_value => { entry.insert(new_value.to_vec()); true @@ -83,215 +88,3 @@ impl OffchainStorage for InMemOffchainStorage { } } } - - - - -/// Change to be applied to the offchain worker db in regards to a key. -#[derive(Debug,Clone,Hash,Eq,PartialEq)] -pub enum OffchainOverlayedChange { - /// Remove the data associated with the key - Remove, - /// Overwrite the value of an associated key - SetValue(Vec), -} - -/// In-memory storage for offchain workers recoding changes for the actual offchain storage implementation. -#[derive(Debug, Clone)] -pub enum OffchainOverlayedChanges { - /// Writing overlay changes to the offchain worker database is disabled by configuration. - Disabled, - /// Overlay changes can be recorded using the inner collection of this variant, - /// where the identifier is the tuple of `(prefix, key)`. - Enabled(HashMap<(Vec, Vec), OffchainOverlayedChange>), -} - -impl Default for OffchainOverlayedChanges { - fn default() -> Self { - Self::Disabled - } -} - -impl OffchainOverlayedChanges { - /// Create the disabled variant. - pub fn disabled() -> Self { - Self::Disabled - } - - /// Create the enabled variant. - pub fn enabled() -> Self { - Self::Enabled(HashMap::new()) - } - - /// Consume the offchain storage and iterate over all key value pairs. - pub fn into_iter(self) -> OffchainOverlayedChangesIntoIter { - OffchainOverlayedChangesIntoIter::new(self) - } - - /// Iterate over all key value pairs by reference. - pub fn iter<'a>(&'a self) -> OffchainOverlayedChangesIter { - OffchainOverlayedChangesIter::new(&self) - } - - /// Drain all elements of changeset. - pub fn drain<'a, 'd>(&'a mut self) -> OffchainOverlayedChangesDrain<'d> where 'a: 'd { - OffchainOverlayedChangesDrain::new(self) - } - - /// Remove a key and its associated value from the offchain database. - pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { - if let Self::Enabled(ref mut storage) = self { - let _ = storage.insert((prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::Remove); - } - } - - /// Set the value associated with a key under a prefix to the value provided. - pub fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - if let Self::Enabled(ref mut storage) = self { - let _ = storage.insert((prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::SetValue(value.to_vec())); - } - } - - /// Obtain a associated value to the given key in storage with prefix. - pub fn get(&self, prefix: &[u8], key: &[u8]) -> Option { - if let Self::Enabled(ref storage) = self { - let key = (prefix.to_vec(), key.to_vec()); - storage.get(&key).cloned() - } else { - None - } - } -} - -use std::collections::hash_map; - -/// Iterate by reference over the prepared offchain worker storage changes. -pub struct OffchainOverlayedChangesIter<'i> { - inner: Option, Vec), OffchainOverlayedChange>>, -} - -impl<'i> Iterator for OffchainOverlayedChangesIter<'i> { - type Item = (&'i (Vec, Vec), &'i OffchainOverlayedChange); - fn next(&mut self) -> Option { - if let Some(ref mut iter) = self.inner { - iter.next() - } else { - None - } - } -} - -impl<'i> OffchainOverlayedChangesIter<'i> { - /// Create a new iterator based on a refernce to the parent container. - pub fn new(container: &'i OffchainOverlayedChanges) -> Self { - match container { - OffchainOverlayedChanges::Enabled(inner) => Self { - inner: Some(inner.iter()) - }, - OffchainOverlayedChanges::Disabled => Self { inner: None, }, - } - } -} - - -/// Iterate by value over the prepared offchain worker storage changes. -pub struct OffchainOverlayedChangesIntoIter { - inner: Option,Vec),OffchainOverlayedChange>>, -} - -impl Iterator for OffchainOverlayedChangesIntoIter { - type Item = ((Vec, Vec), OffchainOverlayedChange); - fn next(&mut self) -> Option { - if let Some(ref mut iter) = self.inner { - iter.next() - } else { - None - } - } -} - -impl OffchainOverlayedChangesIntoIter { - /// Create a new iterator by consuming the collection. - pub fn new(container: OffchainOverlayedChanges) -> Self { - match container { - OffchainOverlayedChanges::Enabled(inner) => Self { - inner: Some(inner.into_iter()) - }, - OffchainOverlayedChanges::Disabled => Self { inner: None, }, - } - } -} - -/// Iterate over all items while draining them from the collection. -pub struct OffchainOverlayedChangesDrain<'d> { - inner: Option, Vec), OffchainOverlayedChange>>, -} - -impl<'d> Iterator for OffchainOverlayedChangesDrain<'d> { - type Item = ((Vec, Vec), OffchainOverlayedChange); - fn next(&mut self) -> Option { - if let Some(ref mut iter) = self.inner { - iter.next() - } else { - None - } - } -} - -impl<'d> OffchainOverlayedChangesDrain<'d> { - /// Create a new iterator by taking a mut reference to the collection, - /// for the lifetime of the created drain iterator. - pub fn new(container: &'d mut OffchainOverlayedChanges) -> Self { - match container { - OffchainOverlayedChanges::Enabled(ref mut inner) => Self { - inner: Some(inner.drain()) - }, - OffchainOverlayedChanges::Disabled => Self { inner: None, }, - } - } -} - - -#[cfg(test)] -mod test { - use super::*; - use super::super::STORAGE_PREFIX; - - #[test] - fn test_drain() { - let mut ooc = OffchainOverlayedChanges::enabled(); - ooc.set(STORAGE_PREFIX,b"kkk", b"vvv"); - let drained = ooc.drain().count(); - assert_eq!(drained, 1); - let leftover = ooc.iter().count(); - assert_eq!(leftover, 0); - - ooc.set(STORAGE_PREFIX, b"a", b"v"); - ooc.set(STORAGE_PREFIX, b"b", b"v"); - ooc.set(STORAGE_PREFIX, b"c", b"v"); - ooc.set(STORAGE_PREFIX, b"d", b"v"); - ooc.set(STORAGE_PREFIX, b"e", b"v"); - assert_eq!(ooc.iter().count(), 5); - } - - #[test] - fn test_accumulated_set_remove_set() { - let mut ooc = OffchainOverlayedChanges::enabled(); - ooc.set(STORAGE_PREFIX, b"ppp", b"qqq"); - ooc.remove(STORAGE_PREFIX, b"ppp"); - // keys are equiv, so it will overwrite the value and the overlay will contain - // one item - assert_eq!(ooc.iter().count(), 1); - - ooc.set(STORAGE_PREFIX, b"ppp", b"rrr"); - let mut iter = ooc.into_iter(); - assert_eq!( - iter.next(), - Some( - ((STORAGE_PREFIX.to_vec(), b"ppp".to_vec()), - OffchainOverlayedChange::SetValue(b"rrr".to_vec())) - ) - ); - assert_eq!(iter.next(), None); - } -} diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index 5d268e4b601c3..d6b3f661b27cb 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,27 +20,30 @@ //! Namely all ExecutionExtensions that allow mocking //! the extra APIs. +use crate::{ + offchain::{ + self, + storage::InMemOffchainStorage, + HttpError, + HttpRequestId as RequestId, + HttpRequestStatus as RequestStatus, + OffchainOverlayedChange, + OffchainStorage, + OpaqueNetworkState, + StorageKind, + Timestamp, + TransactionPool, + IpfsRequest, + IpfsRequestId, + IpfsRequestStatus, + IpfsResponse, + }, + OpaquePeerId, +}; use std::{ collections::{BTreeMap, VecDeque}, sync::Arc, }; -use crate::OpaquePeerId; -use crate::offchain::{ - self, - storage::{InMemOffchainStorage, OffchainOverlayedChange, OffchainOverlayedChanges}, - HttpError, - HttpRequestId as RequestId, - HttpRequestStatus as RequestStatus, - IpfsRequest, - IpfsRequestId, - IpfsRequestStatus, - IpfsResponse, - Timestamp, - StorageKind, - OpaqueNetworkState, - TransactionPool, - OffchainStorage, -}; use parking_lot::RwLock; @@ -74,23 +77,32 @@ pub struct TestPersistentOffchainDB { } impl TestPersistentOffchainDB { + const PREFIX: &'static [u8] = b""; + /// Create a new and empty offchain storage db for persistent items pub fn new() -> Self { - Self { - persistent: Arc::new(RwLock::new(InMemOffchainStorage::default())) - } + Self { persistent: Arc::new(RwLock::new(InMemOffchainStorage::default())) } } /// Apply a set of off-chain changes directly to the test backend - pub fn apply_offchain_changes(&mut self, changes: &mut OffchainOverlayedChanges) { + pub fn apply_offchain_changes( + &mut self, + changes: impl Iterator, Vec), OffchainOverlayedChange)>, + ) { let mut me = self.persistent.write(); - for ((_prefix, key), value_operation) in changes.drain() { + for ((_prefix, key), value_operation) in changes { match value_operation { - OffchainOverlayedChange::SetValue(val) => me.set(b"", key.as_slice(), val.as_slice()), - OffchainOverlayedChange::Remove => me.remove(b"", key.as_slice()), + OffchainOverlayedChange::SetValue(val) => + me.set(Self::PREFIX, key.as_slice(), val.as_slice()), + OffchainOverlayedChange::Remove => me.remove(Self::PREFIX, key.as_slice()), } } } + + /// Retrieve a key from the test backend. + pub fn get(&self, key: &[u8]) -> Option> { + OffchainStorage::get(self, Self::PREFIX, key) + } } impl OffchainStorage for TestPersistentOffchainDB { @@ -153,20 +165,17 @@ impl OffchainState { id: u16, expected: PendingRequest, response: impl Into>, - response_headers: impl IntoIterator, + response_headers: impl IntoIterator, ) { match self.requests.get_mut(&RequestId(id)) { None => { panic!("Missing pending request: {:?}.\n\nAll: {:?}", id, self.requests); - } + }, Some(req) => { - assert_eq!( - *req, - expected, - ); + assert_eq!(*req, expected); req.response = Some(response.into()); req.response_headers = response_headers.into_iter().collect(); - } + }, } } @@ -181,8 +190,8 @@ impl OffchainState { /// Add expected HTTP request. /// /// This method can be used to initialize expected HTTP requests and their responses - /// before running the actual code that utilizes them (for instance before calling into runtime). - /// Expected request has to be fulfilled before this struct is dropped, + /// before running the actual code that utilizes them (for instance before calling into + /// runtime). Expected request has to be fulfilled before this struct is dropped, /// the `response` and `response_headers` fields will be used to return results to the callers. /// Requests are expected to be performed in the insertion order. pub fn expect_request(&mut self, expected: PendingRequest) { @@ -215,7 +224,9 @@ impl TestOffchainExt { } /// Create new `TestOffchainExt` and a reference to the internal state. - pub fn with_offchain_db(offchain_db: TestPersistentOffchainDB) -> (Self, Arc>) { + pub fn with_offchain_db( + offchain_db: TestPersistentOffchainDB, + ) -> (Self, Arc>) { let (ext, state) = Self::new(); ext.0.write().persistent_storage = offchain_db; (ext, state) @@ -228,10 +239,7 @@ impl offchain::Externalities for TestOffchainExt { } fn network_state(&self) -> Result { - Ok(OpaqueNetworkState { - peer_id: Default::default(), - external_addresses: vec![], - }) + Ok(OpaqueNetworkState { peer_id: Default::default(), external_addresses: vec![] }) } fn timestamp(&mut self) -> Timestamp { @@ -246,53 +254,23 @@ impl offchain::Externalities for TestOffchainExt { self.0.read().seed } - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - let mut state = self.0.write(); - match kind { - StorageKind::LOCAL => state.local_storage.set(b"", key, value), - StorageKind::PERSISTENT => state.persistent_storage.set(b"", key, value), - }; - } - - fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { - let mut state = self.0.write(); - match kind { - StorageKind::LOCAL => state.local_storage.remove(b"", key), - StorageKind::PERSISTENT => state.persistent_storage.remove(b"", key), - }; - } - - fn local_storage_compare_and_set( + fn http_request_start( &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8] - ) -> bool { - let mut state = self.0.write(); - match kind { - StorageKind::LOCAL => state.local_storage.compare_and_set(b"", key, old_value, new_value), - StorageKind::PERSISTENT => state.persistent_storage.compare_and_set(b"", key, old_value, new_value), - } - } - - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - let state = self.0.read(); - match kind { - StorageKind::LOCAL => state.local_storage.get(b"", key), - StorageKind::PERSISTENT => state.persistent_storage.get(b"", key), - } - } - - fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { let mut state = self.0.write(); let id = RequestId(state.requests.len() as u16); - state.requests.insert(id.clone(), PendingRequest { - method: method.into(), - uri: uri.into(), - meta: meta.into(), - ..Default::default() - }); + state.requests.insert( + id, + PendingRequest { + method: method.into(), + uri: uri.into(), + meta: meta.into(), + ..Default::default() + }, + ); Ok(id) } @@ -315,7 +293,7 @@ impl offchain::Externalities for TestOffchainExt { &mut self, request_id: RequestId, chunk: &[u8], - _deadline: Option + _deadline: Option, ) -> Result<(), HttpError> { let mut state = self.0.write(); @@ -342,12 +320,15 @@ impl offchain::Externalities for TestOffchainExt { ) -> Vec { let state = self.0.read(); - ids.iter().map(|id| match state.requests.get(id) { - Some(req) if req.response.is_none() => - panic!("No `response` provided for request with id: {:?}", id), - None => RequestStatus::Invalid, - _ => RequestStatus::Finished(200), - }).collect() + ids.iter() + .map(|id| match state.requests.get(id) { + Some(req) if req.response.is_none() => { + panic!("No `response` provided for request with id: {:?}", id) + }, + None => RequestStatus::Invalid, + _ => RequestStatus::Finished(200), + }) + .collect() } fn http_response_headers(&mut self, request_id: RequestId) -> Vec<(Vec, Vec)> { @@ -367,11 +348,12 @@ impl offchain::Externalities for TestOffchainExt { &mut self, request_id: RequestId, buffer: &mut [u8], - _deadline: Option + _deadline: Option, ) -> Result { let mut state = self.0.write(); if let Some(req) = state.requests.get_mut(&request_id) { - let response = req.response + let response = req + .response .as_mut() .unwrap_or_else(|| panic!("No response provided for request: {:?}", request_id)); @@ -415,6 +397,48 @@ impl offchain::Externalities for TestOffchainExt { } } +impl offchain::DbExternalities for TestOffchainExt { + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + let mut state = self.0.write(); + match kind { + StorageKind::LOCAL => state.local_storage.set(b"", key, value), + StorageKind::PERSISTENT => state.persistent_storage.set(b"", key, value), + }; + } + + fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { + let mut state = self.0.write(); + match kind { + StorageKind::LOCAL => state.local_storage.remove(b"", key), + StorageKind::PERSISTENT => state.persistent_storage.remove(b"", key), + }; + } + + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + let mut state = self.0.write(); + match kind { + StorageKind::LOCAL => + state.local_storage.compare_and_set(b"", key, old_value, new_value), + StorageKind::PERSISTENT => + state.persistent_storage.compare_and_set(b"", key, old_value, new_value), + } + } + + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + let state = self.0.read(); + match kind { + StorageKind::LOCAL => state.local_storage.get(TestPersistentOffchainDB::PREFIX, key), + StorageKind::PERSISTENT => state.persistent_storage.get(key), + } + } +} + /// The internal state of the fake transaction pool. #[derive(Default)] pub struct PoolState { diff --git a/primitives/core/src/sandbox.rs b/primitives/core/src/sandbox.rs index 4cb5bd41d5826..acc3fda5e9b17 100644 --- a/primitives/core/src/sandbox.rs +++ b/primitives/core/src/sandbox.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,26 +17,24 @@ //! Definition of a sandbox environment. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::vec::Vec; /// Error error that can be returned from host function. -#[derive(Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Encode, Decode, crate::RuntimeDebug)] pub struct HostError; /// Describes an entity to define or import into the environment. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, crate::RuntimeDebug)] pub enum ExternEntity { /// Function that is specified by an index in a default table of /// a module that creates the sandbox. - #[codec(index = "1")] + #[codec(index = 1)] Function(u32), /// Linear memory that is specified by some identifier returned by sandbox /// module upon creation new sandboxed memory. - #[codec(index = "2")] + #[codec(index = 2)] Memory(u32), } @@ -44,8 +42,7 @@ pub enum ExternEntity { /// /// Each entry has a two-level name and description of an entity /// being defined. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, crate::RuntimeDebug)] pub struct Entry { /// Module name of which corresponding entity being defined. pub module_name: Vec, @@ -56,8 +53,7 @@ pub struct Entry { } /// Definition of runtime that could be used by sandboxed code. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, crate::RuntimeDebug)] pub struct EnvironmentDefinition { /// Vector of all entries in the environment definition. pub entries: Vec, @@ -91,8 +87,8 @@ pub const ERR_EXECUTION: u32 = -3i32 as u32; #[cfg(test)] mod tests { use super::*; - use std::fmt; use codec::Codec; + use std::fmt; fn roundtrip(s: S) { let encoded = s.encode(); @@ -101,28 +97,22 @@ mod tests { #[test] fn env_def_roundtrip() { - roundtrip(EnvironmentDefinition { - entries: vec![], - }); + roundtrip(EnvironmentDefinition { entries: vec![] }); roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"kernel"[..].into(), - field_name: b"memory"[..].into(), - entity: ExternEntity::Memory(1337), - }, - ], + entries: vec![Entry { + module_name: b"kernel"[..].into(), + field_name: b"memory"[..].into(), + entity: ExternEntity::Memory(1337), + }], }); roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"env"[..].into(), - field_name: b"abort"[..].into(), - entity: ExternEntity::Function(228), - }, - ], + entries: vec![Entry { + module_name: b"env"[..].into(), + field_name: b"abort"[..].into(), + entity: ExternEntity::Function(228), + }], }); } } diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index 9a757c8900542..4787c2d9d13ee 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,34 +21,39 @@ //! Note: `CHAIN_CODE_LENGTH` must be equal to `crate::crypto::JUNCTION_ID_LEN` //! for this to work. // end::description[] +#[cfg(feature = "std")] +use crate::crypto::Ss58Codec; #[cfg(feature = "full_crypto")] -use sp_std::vec::Vec; +use crate::crypto::{DeriveJunction, Infallible, Pair as TraitPair, SecretStringError}; +#[cfg(feature = "std")] +use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] -use schnorrkel::{signing_context, ExpansionMode, Keypair, SecretKey, MiniSecretKey, PublicKey, - derive::{Derivation, ChainCode, CHAIN_CODE_LENGTH} +use schnorrkel::{ + derive::{ChainCode, Derivation, CHAIN_CODE_LENGTH}, + signing_context, ExpansionMode, Keypair, MiniSecretKey, PublicKey, SecretKey, }; +#[cfg(feature = "full_crypto")] +use sp_std::vec::Vec; #[cfg(feature = "std")] use std::convert::TryFrom; #[cfg(feature = "std")] use substrate_bip39::mini_secret_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; -#[cfg(feature = "full_crypto")] -use crate::crypto::{ - Pair as TraitPair, DeriveJunction, Infallible, SecretStringError -}; -#[cfg(feature = "std")] -use crate::crypto::Ss58Codec; -use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; -use crate::hash::{H256, H512}; -use codec::{Encode, Decode}; +use crate::{ + crypto::{ + CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, + UncheckedFrom, + }, + hash::{H256, H512}, +}; +use codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; use sp_std::ops::Deref; -#[cfg(feature = "std")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "full_crypto")] use schnorrkel::keys::{MINI_SECRET_KEY_LENGTH, SECRET_KEY_LENGTH}; +#[cfg(feature = "std")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; // signing context @@ -60,7 +65,20 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"sr25"); /// An Schnorrkel/Ristretto x25519 ("sr25519") public key. #[cfg_attr(feature = "full_crypto", derive(Hash))] -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Encode, Decode, Default, PassByInner)] +#[derive( + PartialEq, + Eq, + PartialOrd, + Ord, + Clone, + Copy, + Encode, + Decode, + Default, + PassByInner, + MaxEncodedLen, + TypeInfo, +)] pub struct Public(pub [u8; 32]); /// An Schnorrkel/Ristretto x25519 ("sr25519") key pair. @@ -73,7 +91,7 @@ impl Clone for Pair { Pair(schnorrkel::Keypair { public: self.0.public, secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) - .expect("key is always the correct size; qed") + .expect("key is always the correct size; qed"), }) } } @@ -173,14 +191,20 @@ impl sp_std::fmt::Debug for Public { #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&self.to_ss58check()) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { Public::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e))) } @@ -189,7 +213,7 @@ impl<'de> Deserialize<'de> for Public { /// An Schnorrkel/Ristretto x25519 ("sr25519") signature. /// /// Instead of importing it for the local module, alias it to be available as a public type -#[derive(Encode, Decode, PassByInner)] +#[derive(Encode, Decode, PassByInner, TypeInfo)] pub struct Signature(pub [u8; 64]); impl sp_std::convert::TryFrom<&[u8]> for Signature { @@ -208,18 +232,24 @@ impl sp_std::convert::TryFrom<&[u8]> for Signature { #[cfg(feature = "std")] impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&hex::encode(self)) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { let signature_hex = hex::decode(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Ok(Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?) + Signature::try_from(signature_hex.as_ref()) + .map_err(|e| de::Error::custom(format!("{:?}", e))) } } @@ -347,7 +377,7 @@ impl Derive for Public { /// /// `None` if there are any hard junctions in there. #[cfg(feature = "std")] - fn derive>(&self, path: Iter) -> Option { + fn derive>(&self, path: Iter) -> Option { let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; for j in path { match j { @@ -448,7 +478,7 @@ impl AsRef for Pair { /// Derive a single hard junction. #[cfg(feature = "full_crypto")] fn derive_hard_junction(secret: &SecretKey, cc: &[u8; CHAIN_CODE_LENGTH]) -> MiniSecretKey { - secret.hard_derive_mini_secret_key(Some(ChainCode(cc.clone())), b"").0 + secret.hard_derive_mini_secret_key(Some(ChainCode(*cc)), b"").0 } /// The raw secret seed, which can be used to recreate the `Pair`. @@ -468,8 +498,7 @@ impl TraitPair for Pair { /// /// A MiniSecretKey is literally what Ed25519 calls a SecretKey, which is just 32 random bytes. fn from_seed(seed: &Seed) -> Pair { - Self::from_seed_slice(&seed[..]) - .expect("32 bytes can always build a key; qed") + Self::from_seed_slice(&seed[..]).expect("32 bytes can always build a key; qed") } /// Get the public key. @@ -485,21 +514,17 @@ impl TraitPair for Pair { /// You should never need to use this; generate(), generate_with_phrase(), from_phrase() fn from_seed_slice(seed: &[u8]) -> Result { match seed.len() { - MINI_SECRET_KEY_LENGTH => { - Ok(Pair( - MiniSecretKey::from_bytes(seed) - .map_err(|_| SecretStringError::InvalidSeed)? - .expand_to_keypair(ExpansionMode::Ed25519) - )) - } - SECRET_KEY_LENGTH => { - Ok(Pair( - SecretKey::from_bytes(seed) - .map_err(|_| SecretStringError::InvalidSeed)? - .to_keypair() - )) - } - _ => Err(SecretStringError::InvalidSeedLength) + MINI_SECRET_KEY_LENGTH => Ok(Pair( + MiniSecretKey::from_bytes(seed) + .map_err(|_| SecretStringError::InvalidSeed)? + .expand_to_keypair(ExpansionMode::Ed25519), + )), + SECRET_KEY_LENGTH => Ok(Pair( + SecretKey::from_bytes(seed) + .map_err(|_| SecretStringError::InvalidSeed)? + .to_keypair(), + )), + _ => Err(SecretStringError::InvalidSeedLength), } } #[cfg(feature = "std")] @@ -508,20 +533,20 @@ impl TraitPair for Pair { let phrase = mnemonic.phrase(); let (pair, seed) = Self::from_phrase(phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); - ( - pair, - phrase.to_owned(), - seed, - ) + (pair, phrase.to_owned(), seed) } #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Pair, Seed), SecretStringError> { + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Pair, Seed), SecretStringError> { Mnemonic::from_phrase(phrase, Language::English) .map_err(|_| SecretStringError::InvalidPhrase) .map(|m| Self::from_entropy(m.entropy(), password)) } - fn derive>(&self, + fn derive>( + &self, path: Iter, seed: Option, ) -> Result<(Pair, Option), Self::DeriveError> { @@ -529,17 +554,22 @@ impl TraitPair for Pair { if let Ok(msk) = MiniSecretKey::from_bytes(&s) { if msk.expand(ExpansionMode::Ed25519) == self.0.secret { Some(msk) - } else { None } - } else { None } - } else { None }; + } else { + None + } + } else { + None + } + } else { + None + }; let init = self.0.secret.clone(); let (result, seed) = path.fold((init, seed), |(acc, acc_seed), j| match (j, acc_seed) { - (DeriveJunction::Soft(cc), _) => - (acc.derived_key_simple(ChainCode(cc), &[]).0, None), + (DeriveJunction::Soft(cc), _) => (acc.derived_key_simple(ChainCode(cc), &[]).0, None), (DeriveJunction::Hard(cc), maybe_seed) => { let seed = derive_hard_junction(&acc, &cc); (seed.expand(ExpansionMode::Ed25519), maybe_seed.map(|_| seed)) - } + }, }); Ok((Self(result.into()), seed.map(|s| MiniSecretKey::to_bytes(&s)))) } @@ -593,9 +623,9 @@ impl Pair { // Match both schnorrkel 0.1.1 and 0.8.0+ signatures, supporting both wallets // that have not been upgraded and those that have. match PublicKey::from_bytes(pubkey.as_ref()) { - Ok(pk) => pk.verify_simple_preaudit_deprecated( - SIGNING_CTX, message.as_ref(), &sig.0[..], - ).is_ok(), + Ok(pk) => pk + .verify_simple_preaudit_deprecated(SIGNING_CTX, message.as_ref(), &sig.0[..]) + .is_ok(), Err(_) => false, } } @@ -639,20 +669,16 @@ pub fn verify_batch( for signature in signatures { match schnorrkel::Signature::from_bytes(signature.as_ref()) { Ok(s) => sr_signatures.push(s), - Err(_) => return false + Err(_) => return false, }; } - let mut messages: Vec = messages.into_iter().map( - |msg| signing_context(SIGNING_CTX).bytes(msg) - ).collect(); + let mut messages: Vec = messages + .into_iter() + .map(|msg| signing_context(SIGNING_CTX).bytes(msg)) + .collect(); - schnorrkel::verify_batch( - &mut messages, - &sr_signatures, - &sr_pub_keys, - true, - ).is_ok() + schnorrkel::verify_batch(&mut messages, &sr_signatures, &sr_pub_keys, true).is_ok() } #[cfg(test)] @@ -682,7 +708,9 @@ mod compatibility_test { #[test] fn verify_known_old_message_should_work() { - let public = Public::from_raw(hex!("b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918")); + let public = Public::from_raw(hex!( + "b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918" + )); // signature generated by the 1.1 version with the same ^^ public key. let signature = Signature::from_raw(hex!( "5a9755f069939f45d96aaf125cf5ce7ba1db998686f87f2fb3cbdea922078741a73891ba265f70c31436e18a9acd14d189d73c12317ab6c313285cd938453202" @@ -696,7 +724,7 @@ mod compatibility_test { #[cfg(test)] mod test { use super::*; - use crate::crypto::{Ss58Codec, DEV_PHRASE, DEV_ADDRESS}; + use crate::crypto::{Ss58Codec, DEV_ADDRESS, DEV_PHRASE}; use hex_literal::hex; use serde_json; @@ -704,10 +732,14 @@ mod test { fn default_phrase_should_be_used() { assert_eq!( Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), ); assert_eq!( - Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).as_ref().map(Pair::public), + Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None) + .as_ref() + .map(Pair::public), Pair::from_string("/Alice", None).as_ref().map(Pair::public) ); } @@ -762,7 +794,7 @@ mod test { "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); let path = Some(DeriveJunction::soft(1)); - let pair_1 = pair.derive(path.clone().into_iter(), None).unwrap().0; + let pair_1 = pair.derive(path.into_iter(), None).unwrap().0; let public_1 = pair.public().derive(path.into_iter()).unwrap(); assert_eq!(pair_1.public(), public_1); } @@ -850,12 +882,13 @@ mod test { #[test] fn verify_from_old_wasm_works() { - // The values in this test case are compared to the output of `node-test.js` in schnorrkel-js. + // The values in this test case are compared to the output of `node-test.js` in + // schnorrkel-js. // // This is to make sure that the wasm library is compatible. - let pk = Pair::from_seed( - &hex!("0000000000000000000000000000000000000000000000000000000000000000") - ); + let pk = Pair::from_seed(&hex!( + "0000000000000000000000000000000000000000000000000000000000000000" + )); let public = pk.public(); let js_signature = Signature::from_raw(hex!( "28a854d54903e056f89581c691c1f7d2ff39f8f896c9e9c22475e60902cc2b3547199e0e91fa32902028f2ca2355e8cdd16cfe19ba5e8b658c94aa80f3b81a00" @@ -879,7 +912,7 @@ mod test { #[test] fn signature_serialization_doesnt_panic() { fn deserialize_signature(text: &str) -> Result { - Ok(serde_json::from_str(text)?) + serde_json::from_str(text) } assert!(deserialize_signature("Not valid json.").is_err()); assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index cee8bec22aa08..a7fff0def83f2 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,7 @@ use crate::crypto::KeyTypeId; pub const ED25519: KeyTypeId = KeyTypeId(*b"ed25"); /// Key type for generic Sr 25519 key. pub const SR25519: KeyTypeId = KeyTypeId(*b"sr25"); -/// Key type for generic Sr 25519 key. +/// Key type for generic ECDSA key. pub const ECDSA: KeyTypeId = KeyTypeId(*b"ecds"); /// Macro for exporting functions from wasm in with the expected signature for using it with the @@ -143,6 +143,13 @@ impl TaskExecutor { } } +#[cfg(feature = "std")] +impl Default for TaskExecutor { + fn default() -> Self { + Self::new() + } +} + #[cfg(feature = "std")] impl crate::traits::SpawnNamed for TaskExecutor { fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { @@ -152,3 +159,17 @@ impl crate::traits::SpawnNamed for TaskExecutor { self.0.spawn_ok(future); } } + +#[cfg(feature = "std")] +impl crate::traits::SpawnEssentialNamed for TaskExecutor { + fn spawn_essential_blocking( + &self, + _: &'static str, + future: futures::future::BoxFuture<'static, ()>, + ) { + self.0.spawn_ok(future); + } + fn spawn_essential(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { + self.0.spawn_ok(future); + } +} diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index 406dba533899b..47639f9d87ba6 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,15 +26,15 @@ use std::{ pub use sp_externalities::{Externalities, ExternalitiesExt}; /// Code execution engine. -pub trait CodeExecutor: Sized + Send + Sync + CallInWasm + Clone + 'static { +pub trait CodeExecutor: Sized + Send + Sync + ReadRuntimeVersion + Clone + 'static { /// Externalities error type. - type Error: Display + Debug + Send + 'static; + type Error: Display + Debug + Send + Sync + 'static; /// Call a given method in the runtime. Returns a tuple of the result (either the output data /// or an execution error) together with a `bool`, which is true if native execution was used. fn call< R: codec::Codec + PartialEq, - NC: FnOnce() -> Result + UnwindSafe, + NC: FnOnce() -> Result> + UnwindSafe, >( &self, ext: &mut dyn Externalities, @@ -51,14 +51,14 @@ pub trait FetchRuntimeCode { /// Fetch the runtime `:code`. /// /// If the `:code` could not be found/not available, `None` should be returned. - fn fetch_runtime_code<'a>(&'a self) -> Option>; + fn fetch_runtime_code(&self) -> Option>; } /// Wrapper to use a `u8` slice or `Vec` as [`FetchRuntimeCode`]. pub struct WrappedRuntimeCode<'a>(pub std::borrow::Cow<'a, [u8]>); impl<'a> FetchRuntimeCode for WrappedRuntimeCode<'a> { - fn fetch_runtime_code<'b>(&'b self) -> Option> { + fn fetch_runtime_code(&self) -> Option> { Some(self.0.as_ref().into()) } } @@ -67,7 +67,7 @@ impl<'a> FetchRuntimeCode for WrappedRuntimeCode<'a> { pub struct NoneFetchRuntimeCode; impl FetchRuntimeCode for NoneFetchRuntimeCode { - fn fetch_runtime_code<'a>(&'a self) -> Option> { + fn fetch_runtime_code(&self) -> Option> { None } } @@ -99,16 +99,12 @@ impl<'a> RuntimeCode<'a> { /// /// This is only useful for tests that don't want to execute any code. pub fn empty() -> Self { - Self { - code_fetcher: &NoneFetchRuntimeCode, - hash: Vec::new(), - heap_pages: None, - } + Self { code_fetcher: &NoneFetchRuntimeCode, hash: Vec::new(), heap_pages: None } } } impl<'a> FetchRuntimeCode for RuntimeCode<'a> { - fn fetch_runtime_code<'b>(&'b self) -> Option> { + fn fetch_runtime_code(&self) -> Option> { self.code_fetcher.fetch_runtime_code() } } @@ -123,53 +119,42 @@ impl std::fmt::Display for CodeNotFound { } } -/// `Allow` or `Disallow` missing host functions when instantiating a WASM blob. -#[derive(Clone, Copy, Debug)] -pub enum MissingHostFunctions { - /// Any missing host function will be replaced by a stub that returns an error when - /// being called. - Allow, - /// Any missing host function will result in an error while instantiating the WASM blob, - Disallow, -} - -impl MissingHostFunctions { - /// Are missing host functions allowed? - pub fn allowed(self) -> bool { - matches!(self, Self::Allow) - } -} - -/// Something that can call a method in a WASM blob. -pub trait CallInWasm: Send + Sync { - /// Call the given `method` in the given `wasm_blob` using `call_data` (SCALE encoded arguments) - /// to decode the arguments for the method. +/// A trait that allows reading version information from the binary. +pub trait ReadRuntimeVersion: Send + Sync { + /// Reads the runtime version information from the given wasm code. + /// + /// The version information may be embedded into the wasm binary itself. If it is not present, + /// then this function may fallback to the legacy way of reading the version. + /// + /// The legacy mechanism involves instantiating the passed wasm runtime and calling + /// `Core_version` on it. This is a very expensive operation. /// - /// Returns the SCALE encoded return value of the method. + /// `ext` is only needed in case the calling into runtime happens. Otherwise it is ignored. /// - /// # Note + /// Compressed wasm blobs are supported and will be decompressed if needed. If uncompression + /// fails, the error is returned. /// - /// If `code_hash` is `Some(_)` the `wasm_code` module and instance will be cached internally, - /// otherwise it is thrown away after the call. - fn call_in_wasm( + /// # Errors + /// + /// If the version information present in binary, but is corrupted - returns an error. + /// + /// Otherwise, if there is no version information present, and calling into the runtime takes + /// place, then an error would be returned if `Core_version` is not provided. + fn read_runtime_version( &self, wasm_code: &[u8], - code_hash: Option>, - method: &str, - call_data: &[u8], ext: &mut dyn Externalities, - missing_host_functions: MissingHostFunctions, ) -> Result, String>; } sp_externalities::decl_extension! { - /// The call-in-wasm extension to register/retrieve from the externalities. - pub struct CallInWasmExt(Box); + /// An extension that provides functionality to read version information from a given wasm blob. + pub struct ReadRuntimeVersionExt(Box); } -impl CallInWasmExt { - /// Creates a new instance of `Self`. - pub fn new(inner: T) -> Self { +impl ReadRuntimeVersionExt { + /// Creates a new instance of the extension given a version determinator instance. + pub fn new(inner: T) -> Self { Self(Box::new(inner)) } } @@ -205,7 +190,7 @@ sp_externalities::decl_extension! { pub struct RuntimeSpawnExt(Box); } -/// Something that can spawn futures (blocking and non-blocking) with an assigned name. +/// Something that can spawn tasks (blocking and non-blocking) with an assigned name. #[dyn_clonable::clonable] pub trait SpawnNamed: Clone + Send + Sync { /// Spawn the given blocking future. @@ -227,3 +212,36 @@ impl SpawnNamed for Box { (**self).spawn(name, future) } } + +/// Something that can spawn essential tasks (blocking and non-blocking) with an assigned name. +/// +/// Essential tasks are special tasks that should take down the node when they end. +#[dyn_clonable::clonable] +pub trait SpawnEssentialNamed: Clone + Send + Sync { + /// Spawn the given blocking future. + /// + /// The given `name` is used to identify the future in tracing. + fn spawn_essential_blocking( + &self, + name: &'static str, + future: futures::future::BoxFuture<'static, ()>, + ); + /// Spawn the given non-blocking future. + /// + /// The given `name` is used to identify the future in tracing. + fn spawn_essential(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>); +} + +impl SpawnEssentialNamed for Box { + fn spawn_essential_blocking( + &self, + name: &'static str, + future: futures::future::BoxFuture<'static, ()>, + ) { + (**self).spawn_essential_blocking(name, future) + } + + fn spawn_essential(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>) { + (**self).spawn_essential(name, future) + } +} diff --git a/primitives/core/src/u32_trait.rs b/primitives/core/src/u32_trait.rs index 6f73e1f6ba719..37837e7c0548c 100644 --- a/primitives/core/src/u32_trait.rs +++ b/primitives/core/src/u32_trait.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,221 +24,547 @@ pub trait Value { } /// Type representing the value 0 for the `Value` trait. -pub struct _0; impl Value for _0 { const VALUE: u32 = 0; } +pub struct _0; +impl Value for _0 { + const VALUE: u32 = 0; +} /// Type representing the value 1 for the `Value` trait. -pub struct _1; impl Value for _1 { const VALUE: u32 = 1; } +pub struct _1; +impl Value for _1 { + const VALUE: u32 = 1; +} /// Type representing the value 2 for the `Value` trait. -pub struct _2; impl Value for _2 { const VALUE: u32 = 2; } +pub struct _2; +impl Value for _2 { + const VALUE: u32 = 2; +} /// Type representing the value 3 for the `Value` trait. -pub struct _3; impl Value for _3 { const VALUE: u32 = 3; } +pub struct _3; +impl Value for _3 { + const VALUE: u32 = 3; +} /// Type representing the value 4 for the `Value` trait. -pub struct _4; impl Value for _4 { const VALUE: u32 = 4; } +pub struct _4; +impl Value for _4 { + const VALUE: u32 = 4; +} /// Type representing the value 5 for the `Value` trait. -pub struct _5; impl Value for _5 { const VALUE: u32 = 5; } +pub struct _5; +impl Value for _5 { + const VALUE: u32 = 5; +} /// Type representing the value 6 for the `Value` trait. -pub struct _6; impl Value for _6 { const VALUE: u32 = 6; } +pub struct _6; +impl Value for _6 { + const VALUE: u32 = 6; +} /// Type representing the value 7 for the `Value` trait. -pub struct _7; impl Value for _7 { const VALUE: u32 = 7; } +pub struct _7; +impl Value for _7 { + const VALUE: u32 = 7; +} /// Type representing the value 8 for the `Value` trait. -pub struct _8; impl Value for _8 { const VALUE: u32 = 8; } +pub struct _8; +impl Value for _8 { + const VALUE: u32 = 8; +} /// Type representing the value 9 for the `Value` trait. -pub struct _9; impl Value for _9 { const VALUE: u32 = 9; } +pub struct _9; +impl Value for _9 { + const VALUE: u32 = 9; +} /// Type representing the value 10 for the `Value` trait. -pub struct _10; impl Value for _10 { const VALUE: u32 = 10; } +pub struct _10; +impl Value for _10 { + const VALUE: u32 = 10; +} /// Type representing the value 11 for the `Value` trait. -pub struct _11; impl Value for _11 { const VALUE: u32 = 11; } +pub struct _11; +impl Value for _11 { + const VALUE: u32 = 11; +} /// Type representing the value 12 for the `Value` trait. -pub struct _12; impl Value for _12 { const VALUE: u32 = 12; } +pub struct _12; +impl Value for _12 { + const VALUE: u32 = 12; +} /// Type representing the value 13 for the `Value` trait. -pub struct _13; impl Value for _13 { const VALUE: u32 = 13; } +pub struct _13; +impl Value for _13 { + const VALUE: u32 = 13; +} /// Type representing the value 14 for the `Value` trait. -pub struct _14; impl Value for _14 { const VALUE: u32 = 14; } +pub struct _14; +impl Value for _14 { + const VALUE: u32 = 14; +} /// Type representing the value 15 for the `Value` trait. -pub struct _15; impl Value for _15 { const VALUE: u32 = 15; } +pub struct _15; +impl Value for _15 { + const VALUE: u32 = 15; +} /// Type representing the value 16 for the `Value` trait. -pub struct _16; impl Value for _16 { const VALUE: u32 = 16; } +pub struct _16; +impl Value for _16 { + const VALUE: u32 = 16; +} /// Type representing the value 17 for the `Value` trait. -pub struct _17; impl Value for _17 { const VALUE: u32 = 17; } +pub struct _17; +impl Value for _17 { + const VALUE: u32 = 17; +} /// Type representing the value 18 for the `Value` trait. -pub struct _18; impl Value for _18 { const VALUE: u32 = 18; } +pub struct _18; +impl Value for _18 { + const VALUE: u32 = 18; +} /// Type representing the value 19 for the `Value` trait. -pub struct _19; impl Value for _19 { const VALUE: u32 = 19; } +pub struct _19; +impl Value for _19 { + const VALUE: u32 = 19; +} /// Type representing the value 20 for the `Value` trait. -pub struct _20; impl Value for _20 { const VALUE: u32 = 20; } +pub struct _20; +impl Value for _20 { + const VALUE: u32 = 20; +} /// Type representing the value 21 for the `Value` trait. -pub struct _21; impl Value for _21 { const VALUE: u32 = 21; } +pub struct _21; +impl Value for _21 { + const VALUE: u32 = 21; +} /// Type representing the value 22 for the `Value` trait. -pub struct _22; impl Value for _22 { const VALUE: u32 = 22; } +pub struct _22; +impl Value for _22 { + const VALUE: u32 = 22; +} /// Type representing the value 23 for the `Value` trait. -pub struct _23; impl Value for _23 { const VALUE: u32 = 23; } +pub struct _23; +impl Value for _23 { + const VALUE: u32 = 23; +} /// Type representing the value 24 for the `Value` trait. -pub struct _24; impl Value for _24 { const VALUE: u32 = 24; } +pub struct _24; +impl Value for _24 { + const VALUE: u32 = 24; +} /// Type representing the value 25 for the `Value` trait. -pub struct _25; impl Value for _25 { const VALUE: u32 = 25; } +pub struct _25; +impl Value for _25 { + const VALUE: u32 = 25; +} /// Type representing the value 26 for the `Value` trait. -pub struct _26; impl Value for _26 { const VALUE: u32 = 26; } +pub struct _26; +impl Value for _26 { + const VALUE: u32 = 26; +} /// Type representing the value 27 for the `Value` trait. -pub struct _27; impl Value for _27 { const VALUE: u32 = 27; } +pub struct _27; +impl Value for _27 { + const VALUE: u32 = 27; +} /// Type representing the value 28 for the `Value` trait. -pub struct _28; impl Value for _28 { const VALUE: u32 = 28; } +pub struct _28; +impl Value for _28 { + const VALUE: u32 = 28; +} /// Type representing the value 29 for the `Value` trait. -pub struct _29; impl Value for _29 { const VALUE: u32 = 29; } +pub struct _29; +impl Value for _29 { + const VALUE: u32 = 29; +} /// Type representing the value 30 for the `Value` trait. -pub struct _30; impl Value for _30 { const VALUE: u32 = 30; } +pub struct _30; +impl Value for _30 { + const VALUE: u32 = 30; +} /// Type representing the value 31 for the `Value` trait. -pub struct _31; impl Value for _31 { const VALUE: u32 = 31; } +pub struct _31; +impl Value for _31 { + const VALUE: u32 = 31; +} /// Type representing the value 32 for the `Value` trait. -pub struct _32; impl Value for _32 { const VALUE: u32 = 32; } +pub struct _32; +impl Value for _32 { + const VALUE: u32 = 32; +} /// Type representing the value 33 for the `Value` trait. -pub struct _33; impl Value for _33 { const VALUE: u32 = 33; } +pub struct _33; +impl Value for _33 { + const VALUE: u32 = 33; +} /// Type representing the value 34 for the `Value` trait. -pub struct _34; impl Value for _34 { const VALUE: u32 = 34; } +pub struct _34; +impl Value for _34 { + const VALUE: u32 = 34; +} /// Type representing the value 35 for the `Value` trait. -pub struct _35; impl Value for _35 { const VALUE: u32 = 35; } +pub struct _35; +impl Value for _35 { + const VALUE: u32 = 35; +} /// Type representing the value 36 for the `Value` trait. -pub struct _36; impl Value for _36 { const VALUE: u32 = 36; } +pub struct _36; +impl Value for _36 { + const VALUE: u32 = 36; +} /// Type representing the value 37 for the `Value` trait. -pub struct _37; impl Value for _37 { const VALUE: u32 = 37; } +pub struct _37; +impl Value for _37 { + const VALUE: u32 = 37; +} /// Type representing the value 38 for the `Value` trait. -pub struct _38; impl Value for _38 { const VALUE: u32 = 38; } +pub struct _38; +impl Value for _38 { + const VALUE: u32 = 38; +} /// Type representing the value 39 for the `Value` trait. -pub struct _39; impl Value for _39 { const VALUE: u32 = 39; } +pub struct _39; +impl Value for _39 { + const VALUE: u32 = 39; +} /// Type representing the value 40 for the `Value` trait. -pub struct _40; impl Value for _40 { const VALUE: u32 = 40; } +pub struct _40; +impl Value for _40 { + const VALUE: u32 = 40; +} /// Type representing the value 41 for the `Value` trait. -pub struct _41; impl Value for _41 { const VALUE: u32 = 41; } +pub struct _41; +impl Value for _41 { + const VALUE: u32 = 41; +} /// Type representing the value 42 for the `Value` trait. -pub struct _42; impl Value for _42 { const VALUE: u32 = 42; } +pub struct _42; +impl Value for _42 { + const VALUE: u32 = 42; +} /// Type representing the value 43 for the `Value` trait. -pub struct _43; impl Value for _43 { const VALUE: u32 = 43; } +pub struct _43; +impl Value for _43 { + const VALUE: u32 = 43; +} /// Type representing the value 44 for the `Value` trait. -pub struct _44; impl Value for _44 { const VALUE: u32 = 44; } +pub struct _44; +impl Value for _44 { + const VALUE: u32 = 44; +} /// Type representing the value 45 for the `Value` trait. -pub struct _45; impl Value for _45 { const VALUE: u32 = 45; } +pub struct _45; +impl Value for _45 { + const VALUE: u32 = 45; +} /// Type representing the value 46 for the `Value` trait. -pub struct _46; impl Value for _46 { const VALUE: u32 = 46; } +pub struct _46; +impl Value for _46 { + const VALUE: u32 = 46; +} /// Type representing the value 47 for the `Value` trait. -pub struct _47; impl Value for _47 { const VALUE: u32 = 47; } +pub struct _47; +impl Value for _47 { + const VALUE: u32 = 47; +} /// Type representing the value 48 for the `Value` trait. -pub struct _48; impl Value for _48 { const VALUE: u32 = 48; } +pub struct _48; +impl Value for _48 { + const VALUE: u32 = 48; +} /// Type representing the value 49 for the `Value` trait. -pub struct _49; impl Value for _49 { const VALUE: u32 = 49; } +pub struct _49; +impl Value for _49 { + const VALUE: u32 = 49; +} /// Type representing the value 50 for the `Value` trait. -pub struct _50; impl Value for _50 { const VALUE: u32 = 50; } +pub struct _50; +impl Value for _50 { + const VALUE: u32 = 50; +} /// Type representing the value 51 for the `Value` trait. -pub struct _51; impl Value for _51 { const VALUE: u32 = 51; } +pub struct _51; +impl Value for _51 { + const VALUE: u32 = 51; +} /// Type representing the value 52 for the `Value` trait. -pub struct _52; impl Value for _52 { const VALUE: u32 = 52; } +pub struct _52; +impl Value for _52 { + const VALUE: u32 = 52; +} /// Type representing the value 53 for the `Value` trait. -pub struct _53; impl Value for _53 { const VALUE: u32 = 53; } +pub struct _53; +impl Value for _53 { + const VALUE: u32 = 53; +} /// Type representing the value 54 for the `Value` trait. -pub struct _54; impl Value for _54 { const VALUE: u32 = 54; } +pub struct _54; +impl Value for _54 { + const VALUE: u32 = 54; +} /// Type representing the value 55 for the `Value` trait. -pub struct _55; impl Value for _55 { const VALUE: u32 = 55; } +pub struct _55; +impl Value for _55 { + const VALUE: u32 = 55; +} /// Type representing the value 56 for the `Value` trait. -pub struct _56; impl Value for _56 { const VALUE: u32 = 56; } +pub struct _56; +impl Value for _56 { + const VALUE: u32 = 56; +} /// Type representing the value 57 for the `Value` trait. -pub struct _57; impl Value for _57 { const VALUE: u32 = 57; } +pub struct _57; +impl Value for _57 { + const VALUE: u32 = 57; +} /// Type representing the value 58 for the `Value` trait. -pub struct _58; impl Value for _58 { const VALUE: u32 = 58; } +pub struct _58; +impl Value for _58 { + const VALUE: u32 = 58; +} /// Type representing the value 59 for the `Value` trait. -pub struct _59; impl Value for _59 { const VALUE: u32 = 59; } +pub struct _59; +impl Value for _59 { + const VALUE: u32 = 59; +} /// Type representing the value 60 for the `Value` trait. -pub struct _60; impl Value for _60 { const VALUE: u32 = 60; } +pub struct _60; +impl Value for _60 { + const VALUE: u32 = 60; +} /// Type representing the value 61 for the `Value` trait. -pub struct _61; impl Value for _61 { const VALUE: u32 = 61; } +pub struct _61; +impl Value for _61 { + const VALUE: u32 = 61; +} /// Type representing the value 62 for the `Value` trait. -pub struct _62; impl Value for _62 { const VALUE: u32 = 62; } +pub struct _62; +impl Value for _62 { + const VALUE: u32 = 62; +} /// Type representing the value 63 for the `Value` trait. -pub struct _63; impl Value for _63 { const VALUE: u32 = 63; } +pub struct _63; +impl Value for _63 { + const VALUE: u32 = 63; +} /// Type representing the value 64 for the `Value` trait. -pub struct _64; impl Value for _64 { const VALUE: u32 = 64; } +pub struct _64; +impl Value for _64 { + const VALUE: u32 = 64; +} /// Type representing the value 65 for the `Value` trait. -pub struct _65; impl Value for _65 { const VALUE: u32 = 65; } +pub struct _65; +impl Value for _65 { + const VALUE: u32 = 65; +} /// Type representing the value 66 for the `Value` trait. -pub struct _66; impl Value for _66 { const VALUE: u32 = 66; } +pub struct _66; +impl Value for _66 { + const VALUE: u32 = 66; +} /// Type representing the value 67 for the `Value` trait. -pub struct _67; impl Value for _67 { const VALUE: u32 = 67; } +pub struct _67; +impl Value for _67 { + const VALUE: u32 = 67; +} /// Type representing the value 68 for the `Value` trait. -pub struct _68; impl Value for _68 { const VALUE: u32 = 68; } +pub struct _68; +impl Value for _68 { + const VALUE: u32 = 68; +} /// Type representing the value 69 for the `Value` trait. -pub struct _69; impl Value for _69 { const VALUE: u32 = 69; } +pub struct _69; +impl Value for _69 { + const VALUE: u32 = 69; +} /// Type representing the value 70 for the `Value` trait. -pub struct _70; impl Value for _70 { const VALUE: u32 = 70; } +pub struct _70; +impl Value for _70 { + const VALUE: u32 = 70; +} /// Type representing the value 71 for the `Value` trait. -pub struct _71; impl Value for _71 { const VALUE: u32 = 71; } +pub struct _71; +impl Value for _71 { + const VALUE: u32 = 71; +} /// Type representing the value 72 for the `Value` trait. -pub struct _72; impl Value for _72 { const VALUE: u32 = 72; } +pub struct _72; +impl Value for _72 { + const VALUE: u32 = 72; +} /// Type representing the value 73 for the `Value` trait. -pub struct _73; impl Value for _73 { const VALUE: u32 = 73; } +pub struct _73; +impl Value for _73 { + const VALUE: u32 = 73; +} /// Type representing the value 74 for the `Value` trait. -pub struct _74; impl Value for _74 { const VALUE: u32 = 74; } +pub struct _74; +impl Value for _74 { + const VALUE: u32 = 74; +} /// Type representing the value 75 for the `Value` trait. -pub struct _75; impl Value for _75 { const VALUE: u32 = 75; } +pub struct _75; +impl Value for _75 { + const VALUE: u32 = 75; +} /// Type representing the value 76 for the `Value` trait. -pub struct _76; impl Value for _76 { const VALUE: u32 = 76; } +pub struct _76; +impl Value for _76 { + const VALUE: u32 = 76; +} /// Type representing the value 77 for the `Value` trait. -pub struct _77; impl Value for _77 { const VALUE: u32 = 77; } +pub struct _77; +impl Value for _77 { + const VALUE: u32 = 77; +} /// Type representing the value 78 for the `Value` trait. -pub struct _78; impl Value for _78 { const VALUE: u32 = 78; } +pub struct _78; +impl Value for _78 { + const VALUE: u32 = 78; +} /// Type representing the value 79 for the `Value` trait. -pub struct _79; impl Value for _79 { const VALUE: u32 = 79; } +pub struct _79; +impl Value for _79 { + const VALUE: u32 = 79; +} /// Type representing the value 80 for the `Value` trait. -pub struct _80; impl Value for _80 { const VALUE: u32 = 80; } +pub struct _80; +impl Value for _80 { + const VALUE: u32 = 80; +} /// Type representing the value 81 for the `Value` trait. -pub struct _81; impl Value for _81 { const VALUE: u32 = 81; } +pub struct _81; +impl Value for _81 { + const VALUE: u32 = 81; +} /// Type representing the value 82 for the `Value` trait. -pub struct _82; impl Value for _82 { const VALUE: u32 = 82; } +pub struct _82; +impl Value for _82 { + const VALUE: u32 = 82; +} /// Type representing the value 83 for the `Value` trait. -pub struct _83; impl Value for _83 { const VALUE: u32 = 83; } +pub struct _83; +impl Value for _83 { + const VALUE: u32 = 83; +} /// Type representing the value 84 for the `Value` trait. -pub struct _84; impl Value for _84 { const VALUE: u32 = 84; } +pub struct _84; +impl Value for _84 { + const VALUE: u32 = 84; +} /// Type representing the value 85 for the `Value` trait. -pub struct _85; impl Value for _85 { const VALUE: u32 = 85; } +pub struct _85; +impl Value for _85 { + const VALUE: u32 = 85; +} /// Type representing the value 86 for the `Value` trait. -pub struct _86; impl Value for _86 { const VALUE: u32 = 86; } +pub struct _86; +impl Value for _86 { + const VALUE: u32 = 86; +} /// Type representing the value 87 for the `Value` trait. -pub struct _87; impl Value for _87 { const VALUE: u32 = 87; } +pub struct _87; +impl Value for _87 { + const VALUE: u32 = 87; +} /// Type representing the value 88 for the `Value` trait. -pub struct _88; impl Value for _88 { const VALUE: u32 = 88; } +pub struct _88; +impl Value for _88 { + const VALUE: u32 = 88; +} /// Type representing the value 89 for the `Value` trait. -pub struct _89; impl Value for _89 { const VALUE: u32 = 89; } +pub struct _89; +impl Value for _89 { + const VALUE: u32 = 89; +} /// Type representing the value 90 for the `Value` trait. -pub struct _90; impl Value for _90 { const VALUE: u32 = 90; } +pub struct _90; +impl Value for _90 { + const VALUE: u32 = 90; +} /// Type representing the value 91 for the `Value` trait. -pub struct _91; impl Value for _91 { const VALUE: u32 = 91; } +pub struct _91; +impl Value for _91 { + const VALUE: u32 = 91; +} /// Type representing the value 92 for the `Value` trait. -pub struct _92; impl Value for _92 { const VALUE: u32 = 92; } +pub struct _92; +impl Value for _92 { + const VALUE: u32 = 92; +} /// Type representing the value 93 for the `Value` trait. -pub struct _93; impl Value for _93 { const VALUE: u32 = 93; } +pub struct _93; +impl Value for _93 { + const VALUE: u32 = 93; +} /// Type representing the value 94 for the `Value` trait. -pub struct _94; impl Value for _94 { const VALUE: u32 = 94; } +pub struct _94; +impl Value for _94 { + const VALUE: u32 = 94; +} /// Type representing the value 95 for the `Value` trait. -pub struct _95; impl Value for _95 { const VALUE: u32 = 95; } +pub struct _95; +impl Value for _95 { + const VALUE: u32 = 95; +} /// Type representing the value 96 for the `Value` trait. -pub struct _96; impl Value for _96 { const VALUE: u32 = 96; } +pub struct _96; +impl Value for _96 { + const VALUE: u32 = 96; +} /// Type representing the value 97 for the `Value` trait. -pub struct _97; impl Value for _97 { const VALUE: u32 = 97; } +pub struct _97; +impl Value for _97 { + const VALUE: u32 = 97; +} /// Type representing the value 98 for the `Value` trait. -pub struct _98; impl Value for _98 { const VALUE: u32 = 98; } +pub struct _98; +impl Value for _98 { + const VALUE: u32 = 98; +} /// Type representing the value 99 for the `Value` trait. -pub struct _99; impl Value for _99 { const VALUE: u32 = 99; } +pub struct _99; +impl Value for _99 { + const VALUE: u32 = 99; +} /// Type representing the value 100 for the `Value` trait. -pub struct _100; impl Value for _100 { const VALUE: u32 = 100; } +pub struct _100; +impl Value for _100 { + const VALUE: u32 = 100; +} /// Type representing the value 112 for the `Value` trait. -pub struct _112; impl Value for _112 { const VALUE: u32 = 112; } +pub struct _112; +impl Value for _112 { + const VALUE: u32 = 112; +} /// Type representing the value 128 for the `Value` trait. -pub struct _128; impl Value for _128 { const VALUE: u32 = 128; } +pub struct _128; +impl Value for _128 { + const VALUE: u32 = 128; +} /// Type representing the value 160 for the `Value` trait. -pub struct _160; impl Value for _160 { const VALUE: u32 = 160; } +pub struct _160; +impl Value for _160 { + const VALUE: u32 = 160; +} /// Type representing the value 192 for the `Value` trait. -pub struct _192; impl Value for _192 { const VALUE: u32 = 192; } +pub struct _192; +impl Value for _192 { + const VALUE: u32 = 192; +} /// Type representing the value 224 for the `Value` trait. -pub struct _224; impl Value for _224 { const VALUE: u32 = 224; } +pub struct _224; +impl Value for _224 { + const VALUE: u32 = 224; +} /// Type representing the value 256 for the `Value` trait. -pub struct _256; impl Value for _256 { const VALUE: u32 = 256; } +pub struct _256; +impl Value for _256 { + const VALUE: u32 = 256; +} /// Type representing the value 384 for the `Value` trait. -pub struct _384; impl Value for _384 { const VALUE: u32 = 384; } +pub struct _384; +impl Value for _384 { + const VALUE: u32 = 384; +} /// Type representing the value 512 for the `Value` trait. -pub struct _512; impl Value for _512 { const VALUE: u32 = 512; } - +pub struct _512; +impl Value for _512 { + const VALUE: u32 = 512; +} diff --git a/primitives/core/src/uint.rs b/primitives/core/src/uint.rs index ef1adc4a0e0ee..a74980332ad28 100644 --- a/primitives/core/src/uint.rs +++ b/primitives/core/src/uint.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ pub use primitive_types::{U256, U512}; #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Decode}; + use codec::{Decode, Encode}; use sp_serializer as ser; macro_rules! test { @@ -39,8 +39,8 @@ mod tests { ($name::from(16), "0x10"), ($name::from(1_000), "0x3e8"), ($name::from(100_000), "0x186a0"), - ($name::from(u64::max_value()), "0xffffffffffffffff"), - ($name::from(u64::max_value()) + $name::from(1), "0x10000000000000000"), + ($name::from(u64::MAX), "0xffffffffffffffff"), + ($name::from(u64::MAX) + $name::from(1), "0x10000000000000000"), ]; for (number, expected) in tests { @@ -55,34 +55,27 @@ mod tests { assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } - } + }; } test!(U256, test_u256); #[test] fn test_u256_codec() { - let res1 = vec![120, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0]; - let res2 = vec![0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]; + let res1 = vec![ + 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, + ]; + let res2 = vec![ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + ]; - assert_eq!( - U256::from(120).encode(), - res1); - assert_eq!( - U256::max_value().encode(), - res2); - assert_eq!( - U256::decode(&mut &res1[..]), - Ok(U256::from(120))); - assert_eq!( - U256::decode(&mut &res2[..]), - Ok(U256::max_value())); + assert_eq!(U256::from(120).encode(), res1); + assert_eq!(U256::max_value().encode(), res2); + assert_eq!(U256::decode(&mut &res1[..]), Ok(U256::from(120))); + assert_eq!(U256::decode(&mut &res2[..]), Ok(U256::max_value())); } #[test] @@ -91,10 +84,10 @@ mod tests { ser::to_string_pretty(&!U256::zero()), "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" ); - assert!( - ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"") - .unwrap_err() - .is_data() - ); + assert!(ser::from_str::( + "\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ) + .unwrap_err() + .is_data()); } } diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index cc3fe7cd1b474..c99651d4ef042 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-database" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -11,5 +11,6 @@ documentation = "https://docs.rs/sp-database" readme = "README.md" [dependencies] -parking_lot = "0.10.0" -kvdb = "0.7.0" +parking_lot = "0.11.1" +kvdb = "0.10.0" + diff --git a/primitives/database/src/error.rs b/primitives/database/src/error.rs index 2e5d4557a9791..4bf5a20aff401 100644 --- a/primitives/database/src/error.rs +++ b/primitives/database/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ /// The error type for database operations. #[derive(Debug)] -pub struct DatabaseError(pub Box); +pub struct DatabaseError(pub Box); impl std::fmt::Display for DatabaseError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { diff --git a/primitives/database/src/kvdb.rs b/primitives/database/src/kvdb.rs index f436979aaf4c1..1a2b0513dc28a 100644 --- a/primitives/database/src/kvdb.rs +++ b/primitives/database/src/kvdb.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,35 +16,93 @@ // limitations under the License. /// A wrapper around `kvdb::Database` that implements `sp_database::Database` trait - use ::kvdb::{DBTransaction, KeyValueDB}; -use crate::{Database, Change, ColumnId, Transaction, error}; +use crate::{error, Change, ColumnId, Database, Transaction}; struct DbAdapter(D); fn handle_err(result: std::io::Result) -> T { match result { Ok(r) => r, - Err(e) => { - panic!("Critical database eror: {:?}", e); - } + Err(e) => { + panic!("Critical database error: {:?}", e); + }, } } /// Wrap RocksDb database into a trait object that implements `sp_database::Database` -pub fn as_database(db: D) -> std::sync::Arc> { +pub fn as_database(db: D) -> std::sync::Arc> +where + D: KeyValueDB + 'static, + H: Clone + AsRef<[u8]>, +{ std::sync::Arc::new(DbAdapter(db)) } -impl Database for DbAdapter { +impl DbAdapter { + // Returns counter key and counter value if it exists. + fn read_counter(&self, col: ColumnId, key: &[u8]) -> error::Result<(Vec, Option)> { + // Add a key suffix for the counter + let mut counter_key = key.to_vec(); + counter_key.push(0); + Ok(match self.0.get(col, &counter_key).map_err(|e| error::DatabaseError(Box::new(e)))? { + Some(data) => { + let mut counter_data = [0; 4]; + if data.len() != 4 { + return Err(error::DatabaseError(Box::new(std::io::Error::new( + std::io::ErrorKind::Other, + format!("Unexpected counter len {}", data.len()), + )))) + } + counter_data.copy_from_slice(&data); + let counter = u32::from_le_bytes(counter_data); + (counter_key, Some(counter)) + }, + None => (counter_key, None), + }) + } +} + +impl> Database for DbAdapter { fn commit(&self, transaction: Transaction) -> error::Result<()> { let mut tx = DBTransaction::new(); for change in transaction.0.into_iter() { match change { Change::Set(col, key, value) => tx.put_vec(col, &key, value), Change::Remove(col, key) => tx.delete(col, &key), - _ => unimplemented!(), + Change::Store(col, key, value) => match self.read_counter(col, key.as_ref())? { + (counter_key, Some(mut counter)) => { + counter += 1; + tx.put(col, &counter_key, &counter.to_le_bytes()); + }, + (counter_key, None) => { + let d = 1u32.to_le_bytes(); + tx.put(col, &counter_key, &d); + tx.put_vec(col, key.as_ref(), value); + }, + }, + Change::Reference(col, key) => { + if let (counter_key, Some(mut counter)) = + self.read_counter(col, key.as_ref())? + { + counter += 1; + tx.put(col, &counter_key, &counter.to_le_bytes()); + } + }, + Change::Release(col, key) => { + if let (counter_key, Some(mut counter)) = + self.read_counter(col, key.as_ref())? + { + counter -= 1; + if counter == 0 { + tx.delete(col, &counter_key); + tx.delete(col, key.as_ref()); + } else { + tx.put(col, &counter_key, &counter.to_le_bytes()); + } + } + }, } } self.0.write(tx).map_err(|e| error::DatabaseError(Box::new(e))) @@ -54,7 +112,7 @@ impl Database for DbAdapter { handle_err(self.0.get(col, key)) } - fn lookup(&self, _hash: &H) -> Option> { - unimplemented!(); + fn contains(&self, col: ColumnId, key: &[u8]) -> bool { + handle_err(self.0.has_key(col, key)) } } diff --git a/primitives/database/src/lib.rs b/primitives/database/src/lib.rs index 1908eb49bb6c6..d30c7eb3323e9 100644 --- a/primitives/database/src/lib.rs +++ b/primitives/database/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,11 +18,11 @@ //! The main database trait, allowing Substrate to store data persistently. pub mod error; -mod mem; mod kvdb; +mod mem; -pub use mem::MemDb; pub use crate::kvdb::as_database; +pub use mem::MemDb; /// An identifier for a column. pub type ColumnId = u32; @@ -32,16 +32,9 @@ pub type ColumnId = u32; pub enum Change { Set(ColumnId, Vec, Vec), Remove(ColumnId, Vec), - Store(H, Vec), - Release(H), -} - -/// An alteration to the database that references the data. -pub enum ChangeRef<'a, H> { - Set(ColumnId, &'a [u8], &'a [u8]), - Remove(ColumnId, &'a [u8]), - Store(H, &'a [u8]), - Release(H), + Store(ColumnId, H, Vec), + Reference(ColumnId, H), + Release(ColumnId, H), } /// A series of changes to the database that can be committed atomically. They do not take effect @@ -67,54 +60,42 @@ impl Transaction { self.0.push(Change::Remove(col, key.to_vec())) } /// Store the `preimage` of `hash` into the database, so that it may be looked up later with - /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent + /// `Database::get`. This may be called multiple times, but subsequent /// calls will ignore `preimage` and simply increase the number of references on `hash`. - pub fn store(&mut self, hash: H, preimage: &[u8]) { - self.0.push(Change::Store(hash, preimage.to_vec())) + pub fn store(&mut self, col: ColumnId, hash: H, preimage: Vec) { + self.0.push(Change::Store(col, hash, preimage)) + } + /// Increase the number of references for `hash` in the database. + pub fn reference(&mut self, col: ColumnId, hash: H) { + self.0.push(Change::Reference(col, hash)) } /// Release the preimage of `hash` from the database. An equal number of these to the number of - /// corresponding `store`s must have been given before it is legal for `Database::lookup` to + /// corresponding `store`s must have been given before it is legal for `Database::get` to /// be unable to provide the preimage. - pub fn release(&mut self, hash: H) { - self.0.push(Change::Release(hash)) + pub fn release(&mut self, col: ColumnId, hash: H) { + self.0.push(Change::Release(col, hash)) } } -pub trait Database: Send + Sync { - /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` - /// will reflect the new state. - fn commit(&self, transaction: Transaction) -> error::Result<()> { - for change in transaction.0.into_iter() { - match change { - Change::Set(col, key, value) => self.set(col, &key, &value), - Change::Remove(col, key) => self.remove(col, &key), - Change::Store(hash, preimage) => self.store(&hash, &preimage), - Change::Release(hash) => self.release(&hash), - }?; - } - - Ok(()) - } - +pub trait Database>: Send + Sync { /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` /// will reflect the new state. - fn commit_ref<'a>(&self, transaction: &mut dyn Iterator>) -> error::Result<()> { - let mut tx = Transaction::new(); - for change in transaction { - match change { - ChangeRef::Set(col, key, value) => tx.set(col, key, value), - ChangeRef::Remove(col, key) => tx.remove(col, key), - ChangeRef::Store(hash, preimage) => tx.store(hash, preimage), - ChangeRef::Release(hash) => tx.release(hash), - } - } - self.commit(tx) - } + fn commit(&self, transaction: Transaction) -> error::Result<()>; /// Retrieve the value previously stored against `key` or `None` if /// `key` is not currently in the database. fn get(&self, col: ColumnId, key: &[u8]) -> Option>; + /// Check if the value exists in the database without retrieving it. + fn contains(&self, col: ColumnId, key: &[u8]) -> bool { + self.get(col, key).is_some() + } + + /// Check value size in the database possibly without retrieving it. + fn value_size(&self, col: ColumnId, key: &[u8]) -> Option { + self.get(col, key).map(|v| v.len()) + } + /// Call `f` with the value previously stored against `key`. /// /// This may be faster than `get` since it doesn't allocate. @@ -123,48 +104,11 @@ pub trait Database: Send + Sync { self.get(col, key).map(|v| f(&v)); } - /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. - fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) -> error::Result<()> { - let mut t = Transaction::new(); - t.set(col, key, value); - self.commit(t) - } - /// Remove the value of `key` in `col`. - fn remove(&self, col: ColumnId, key: &[u8]) -> error::Result<()> { - let mut t = Transaction::new(); - t.remove(col, key); - self.commit(t) - } - - /// Retrieve the first preimage previously `store`d for `hash` or `None` if no preimage is - /// currently stored. - fn lookup(&self, hash: &H) -> Option>; - - /// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage - /// is currently stored. + /// Check if database supports internal ref counting for state data. /// - /// This may be faster than `lookup` since it doesn't allocate. - /// Use `with_lookup` helper function if you need `f` to return a value from `f` - fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { - self.lookup(hash).map(|v| f(&v)); - } - - /// Store the `preimage` of `hash` into the database, so that it may be looked up later with - /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent - /// calls will ignore `preimage` and simply increase the number of references on `hash`. - fn store(&self, hash: &H, preimage: &[u8]) -> error::Result<()> { - let mut t = Transaction::new(); - t.store(hash.clone(), preimage); - self.commit(t) - } - - /// Release the preimage of `hash` from the database. An equal number of these to the number of - /// corresponding `store`s must have been given before it is legal for `Database::lookup` to - /// be unable to provide the preimage. - fn release(&self, hash: &H) -> error::Result<()> { - let mut t = Transaction::new(); - t.release(hash.clone()); - self.commit(t) + /// For backwards compatibility returns `false` by default. + fn supports_ref_counting(&self) -> bool { + false } } @@ -178,20 +122,16 @@ impl std::fmt::Debug for dyn Database { /// `key` is not currently in the database. /// /// This may be faster than `get` since it doesn't allocate. -pub fn with_get(db: &dyn Database, col: ColumnId, key: &[u8], mut f: impl FnMut(&[u8]) -> R) -> Option { +pub fn with_get>( + db: &dyn Database, + col: ColumnId, + key: &[u8], + mut f: impl FnMut(&[u8]) -> R, +) -> Option { let mut result: Option = None; - let mut adapter = |k: &_| { result = Some(f(k)); }; + let mut adapter = |k: &_| { + result = Some(f(k)); + }; db.with_get(col, key, &mut adapter); result } - -/// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage -/// is currently stored. -/// -/// This may be faster than `lookup` since it doesn't allocate. -pub fn with_lookup(db: &dyn Database, hash: &H, mut f: impl FnMut(&[u8]) -> R) -> Option { - let mut result: Option = None; - let mut adapter = |k: &_| { result = Some(f(k)); }; - db.with_lookup(hash, &mut adapter); - result -} diff --git a/primitives/database/src/mem.rs b/primitives/database/src/mem.rs index 51cb854334d50..d1b1861e98fdd 100644 --- a/primitives/database/src/mem.rs +++ b/primitives/database/src/mem.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,26 +17,52 @@ //! In-memory implementation of `Database` -use std::collections::HashMap; -use crate::{Database, Change, ColumnId, Transaction, error}; +use crate::{error, Change, ColumnId, Database, Transaction}; use parking_lot::RwLock; +use std::collections::{hash_map::Entry, HashMap}; #[derive(Default)] /// This implements `Database` as an in-memory hash map. `commit` is not atomic. -pub struct MemDb - (RwLock<(HashMap, Vec>>, HashMap>)>); +pub struct MemDb(RwLock, (u32, Vec)>>>); -impl Database for MemDb - where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash +impl Database for MemDb +where + H: Clone + AsRef<[u8]>, { fn commit(&self, transaction: Transaction) -> error::Result<()> { let mut s = self.0.write(); for change in transaction.0.into_iter() { match change { - Change::Set(col, key, value) => { s.0.entry(col).or_default().insert(key, value); }, - Change::Remove(col, key) => { s.0.entry(col).or_default().remove(&key); }, - Change::Store(hash, preimage) => { s.1.insert(hash, preimage); }, - Change::Release(hash) => { s.1.remove(&hash); }, + Change::Set(col, key, value) => { + s.entry(col).or_default().insert(key, (1, value)); + }, + Change::Remove(col, key) => { + s.entry(col).or_default().remove(&key); + }, + Change::Store(col, hash, value) => { + s.entry(col) + .or_default() + .entry(hash.as_ref().to_vec()) + .and_modify(|(c, _)| *c += 1) + .or_insert_with(|| (1, value)); + }, + Change::Reference(col, hash) => { + if let Entry::Occupied(mut entry) = + s.entry(col).or_default().entry(hash.as_ref().to_vec()) + { + entry.get_mut().0 += 1; + } + }, + Change::Release(col, hash) => { + if let Entry::Occupied(mut entry) = + s.entry(col).or_default().entry(hash.as_ref().to_vec()) + { + entry.get_mut().0 -= 1; + if entry.get().0 == 0 { + entry.remove(); + } + } + }, } } @@ -45,18 +71,11 @@ impl Database for MemDb fn get(&self, col: ColumnId, key: &[u8]) -> Option> { let s = self.0.read(); - s.0.get(&col).and_then(|c| c.get(key).cloned()) - } - - fn lookup(&self, hash: &H) -> Option> { - let s = self.0.read(); - s.1.get(hash).cloned() + s.get(&col).and_then(|c| c.get(key).map(|(_, v)| v.clone())) } } -impl MemDb - where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash -{ +impl MemDb { /// Create a new instance pub fn new() -> Self { MemDb::default() @@ -65,7 +84,6 @@ impl MemDb /// Count number of values in a column pub fn count(&self, col: ColumnId) -> usize { let s = self.0.read(); - s.0.get(&col).map(|c| c.len()).unwrap_or(0) + s.get(&col).map(|c| c.len()).unwrap_or(0) } } - diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index 10164553f857c..0d3ba805100c4 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-debug-derive" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -18,7 +18,7 @@ proc-macro = true [dependencies] quote = "1.0.3" -syn = "1.0.7" +syn = "1.0.58" proc-macro2 = "1.0" [features] diff --git a/primitives/debug-derive/src/impls.rs b/primitives/debug-derive/src/impls.rs index 1757b294d9d49..4d79ee9880160 100644 --- a/primitives/debug-derive/src/impls.rs +++ b/primitives/debug-derive/src/impls.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use quote::quote; use proc_macro2::TokenStream; -use syn::{Data, DeriveInput, parse_quote}; +use quote::quote; +use syn::{parse_quote, Data, DeriveInput}; pub fn debug_derive(ast: DeriveInput) -> proc_macro::TokenStream { let name_str = ast.ident.to_string(); @@ -28,11 +28,11 @@ pub fn debug_derive(ast: DeriveInput) -> proc_macro::TokenStream { let wh = generics.make_where_clause(); for t in ast.generics.type_params() { let name = &t.ident; - wh.predicates.push(parse_quote!{ #name : core::fmt::Debug }); + wh.predicates.push(parse_quote! { #name : core::fmt::Debug }); } generics.split_for_impl() }; - let gen = quote!{ + let gen = quote! { impl #impl_generics core::fmt::Debug for #name #ty_generics #where_clause { fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { #implementation @@ -62,32 +62,26 @@ mod implementation { mod implementation { use super::*; use proc_macro2::Span; - use syn::{Ident, Index, token::SelfValue}; + use syn::{token::SelfValue, Ident, Index}; /// Derive the inner implementation of `Debug::fmt` function. pub fn derive(name_str: &str, data: &Data) -> TokenStream { match *data { Data::Struct(ref s) => derive_struct(&name_str, &s.fields), - Data::Union(ref u) => derive_fields(&name_str, Fields::new(u.fields.named.iter(), None)), + Data::Union(ref u) => + derive_fields(&name_str, Fields::new(u.fields.named.iter(), None)), Data::Enum(ref e) => derive_enum(&name_str, &e), } } enum Fields { - Indexed { - indices: Vec, - }, - Unnamed { - vars: Vec, - }, - Named { - names: Vec, - this: Option, - }, + Indexed { indices: Vec }, + Unnamed { vars: Vec }, + Named { names: Vec, this: Option }, } impl Fields { - fn new<'a>(fields: impl Iterator, this: Option) -> Self { + fn new<'a>(fields: impl Iterator, this: Option) -> Self { let mut indices = vec![]; let mut names = vec![]; @@ -100,27 +94,17 @@ mod implementation { } if names.is_empty() { - Self::Indexed { - indices, - } + Self::Indexed { indices } } else { - Self::Named { - names, - this, - } + Self::Named { names, this } } } } - fn derive_fields<'a>( - name_str: &str, - fields: Fields, - ) -> TokenStream { + fn derive_fields<'a>(name_str: &str, fields: Fields) -> TokenStream { match fields { Fields::Named { names, this } => { - let names_str: Vec<_> = names.iter() - .map(|x| x.to_string()) - .collect(); + let names_str: Vec<_> = names.iter().map(|x| x.to_string()).collect(); let fields = match this { None => quote! { #( .field(#names_str, #names) )* }, @@ -132,16 +116,15 @@ mod implementation { #fields .finish() } - }, - Fields::Indexed { indices } => { + Fields::Indexed { indices } => { quote! { fmt.debug_tuple(#name_str) #( .field(&self.#indices) )* .finish() } }, - Fields::Unnamed { vars } => { + Fields::Unnamed { vars } => { quote! { fmt.debug_tuple(#name_str) #( .field(#vars) )* @@ -151,38 +134,33 @@ mod implementation { } } - fn derive_enum( - name: &str, - e: &syn::DataEnum, - ) -> TokenStream { - let v = e.variants - .iter() - .map(|v| { - let name = format!("{}::{}", name, v.ident); - let ident = &v.ident; - match v.fields { - syn::Fields::Named(ref f) => { - let names: Vec<_> = f.named.iter().flat_map(|f| f.ident.clone()).collect(); - let fields_impl = derive_fields(&name, Fields::Named { - names: names.clone(), - this: None, - }); - (ident, (quote!{ { #( ref #names ),* } }, fields_impl)) - }, - syn::Fields::Unnamed(ref f) => { - let names = f.unnamed.iter() - .enumerate() - .map(|(id, _)| Ident::new(&format!("a{}", id), Span::call_site())) - .collect::>(); - let fields_impl = derive_fields(&name, Fields::Unnamed { vars: names.clone() }); - (ident, (quote! { ( #( ref #names ),* ) }, fields_impl)) - }, - syn::Fields::Unit => { - let fields_impl = derive_fields(&name, Fields::Indexed { indices: vec![] }); - (ident, (quote! { }, fields_impl)) - }, - } - }); + fn derive_enum(name: &str, e: &syn::DataEnum) -> TokenStream { + let v = e.variants.iter().map(|v| { + let name = format!("{}::{}", name, v.ident); + let ident = &v.ident; + match v.fields { + syn::Fields::Named(ref f) => { + let names: Vec<_> = f.named.iter().flat_map(|f| f.ident.clone()).collect(); + let fields_impl = + derive_fields(&name, Fields::Named { names: names.clone(), this: None }); + (ident, (quote! { { #( ref #names ),* } }, fields_impl)) + }, + syn::Fields::Unnamed(ref f) => { + let names = f + .unnamed + .iter() + .enumerate() + .map(|(id, _)| Ident::new(&format!("a{}", id), Span::call_site())) + .collect::>(); + let fields_impl = derive_fields(&name, Fields::Unnamed { vars: names.clone() }); + (ident, (quote! { ( #( ref #names ),* ) }, fields_impl)) + }, + syn::Fields::Unit => { + let fields_impl = derive_fields(&name, Fields::Indexed { indices: vec![] }); + (ident, (quote! {}, fields_impl)) + }, + } + }); type Vecs = (Vec, Vec); let (variants, others): Vecs<_, _> = v.unzip(); @@ -196,23 +174,15 @@ mod implementation { } } - fn derive_struct( - name_str: &str, - fields: &syn::Fields, - ) -> TokenStream { + fn derive_struct(name_str: &str, fields: &syn::Fields) -> TokenStream { match *fields { syn::Fields::Named(ref f) => derive_fields( name_str, Fields::new(f.named.iter(), Some(syn::Token!(self)(Span::call_site()))), ), - syn::Fields::Unnamed(ref f) => derive_fields( - name_str, - Fields::new(f.unnamed.iter(), None), - ), - syn::Fields::Unit => derive_fields( - name_str, - Fields::Indexed { indices: vec![] }, - ), + syn::Fields::Unnamed(ref f) => + derive_fields(name_str, Fields::new(f.unnamed.iter(), None)), + syn::Fields::Unit => derive_fields(name_str, Fields::Indexed { indices: vec![] }), } } } diff --git a/primitives/debug-derive/src/lib.rs b/primitives/debug-derive/src/lib.rs index db370f890810d..7eaa3a0020e93 100644 --- a/primitives/debug-derive/src/lib.rs +++ b/primitives/debug-derive/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,9 +27,9 @@ //! //! ```rust //! #[derive(sp_debug_derive::RuntimeDebug)] -//! struct MyStruct; +//! struct MyStruct; //! -//! assert_eq!(format!("{:?}", MyStruct), "MyStruct"); +//! assert_eq!(format!("{:?}", MyStruct), "MyStruct"); //! ``` mod impls; @@ -38,6 +38,5 @@ use proc_macro::TokenStream; #[proc_macro_derive(RuntimeDebug)] pub fn debug_derive(input: TokenStream) -> TokenStream { - impls::debug_derive(syn::parse_macro_input!(input)) + impls::debug_derive(syn::parse_macro_input!(input)) } - diff --git a/primitives/debug-derive/tests/tests.rs b/primitives/debug-derive/tests/tests.rs index 6a03762b1c655..4f4c7f4caabc2 100644 --- a/primitives/debug-derive/tests/tests.rs +++ b/primitives/debug-derive/tests/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,33 +30,17 @@ struct Named { enum EnumLongName { A, B(A, String), - VariantLongName { - a: A, - b: String, - }, + VariantLongName { a: A, b: String }, } - #[test] fn should_display_proper_debug() { use self::EnumLongName as Enum; - assert_eq!( - format!("{:?}", Unnamed(1, "abc".into())), - "Unnamed(1, \"abc\")" - ); - assert_eq!( - format!("{:?}", Named { a: 1, b: "abc".into() }), - "Named { a: 1, b: \"abc\" }" - ); - assert_eq!( - format!("{:?}", Enum::::A), - "EnumLongName::A" - ); - assert_eq!( - format!("{:?}", Enum::B(1, "abc".into())), - "EnumLongName::B(1, \"abc\")" - ); + assert_eq!(format!("{:?}", Unnamed(1, "abc".into())), "Unnamed(1, \"abc\")"); + assert_eq!(format!("{:?}", Named { a: 1, b: "abc".into() }), "Named { a: 1, b: \"abc\" }"); + assert_eq!(format!("{:?}", Enum::::A), "EnumLongName::A"); + assert_eq!(format!("{:?}", Enum::B(1, "abc".into())), "EnumLongName::B(1, \"abc\")"); assert_eq!( format!("{:?}", Enum::VariantLongName { a: 1, b: "abc".into() }), "EnumLongName::VariantLongName { a: 1, b: \"abc\" }" diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 9000dde058cd4..52a6300688cd9 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-externalities" -version = "0.8.0" +version = "0.10.0-dev" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -14,10 +14,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-storage = { version = "2.0.0", path = "../storage", default-features = false } -sp-std = { version = "2.0.0", path = "../std", default-features = false } -environmental = { version = "1.1.2", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-storage = { version = "4.0.0-dev", path = "../storage", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../std", default-features = false } +environmental = { version = "1.1.3", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } [features] default = ["std"] diff --git a/primitives/externalities/src/extensions.rs b/primitives/externalities/src/extensions.rs index a7f5ee8bc739e..37086a707b648 100644 --- a/primitives/externalities/src/extensions.rs +++ b/primitives/externalities/src/extensions.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,12 +22,16 @@ //! //! It is required that each extension implements the [`Extension`] trait. +use crate::Error; use sp_std::{ - collections::btree_map::{BTreeMap, Entry}, any::{Any, TypeId}, ops::DerefMut, boxed::Box, + any::{Any, TypeId}, + boxed::Box, + collections::btree_map::{BTreeMap, Entry}, + ops::DerefMut, }; -use crate::Error; -/// Marker trait for types that should be registered as [`Externalities`](crate::Externalities) extension. +/// Marker trait for types that should be registered as [`Externalities`](crate::Externalities) +/// extension. /// /// As extensions are stored as `Box`, this trait should give more confidence that the correct /// type is registered and requested. @@ -92,16 +96,21 @@ macro_rules! decl_extension { /// /// This is a super trait of the [`Externalities`](crate::Externalities). pub trait ExtensionStore { - /// Tries to find a registered extension by the given `type_id` and returns it as a `&mut dyn Any`. + /// Tries to find a registered extension by the given `type_id` and returns it as a `&mut dyn + /// Any`. /// /// It is advised to use [`ExternalitiesExt::extension`](crate::ExternalitiesExt::extension) /// instead of this function to get type system support and automatic type downcasting. fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any>; - /// Register extension `extension` with speciifed `type_id`. + /// Register extension `extension` with specified `type_id`. /// /// It should return error if extension is already registered. - fn register_extension_with_type_id(&mut self, type_id: TypeId, extension: Box) -> Result<(), Error>; + fn register_extension_with_type_id( + &mut self, + type_id: TypeId, + extension: Box, + ) -> Result<(), Error>; /// Deregister extension with speicifed 'type_id' and drop it. /// @@ -129,10 +138,7 @@ impl Extensions { } /// Register the given extension. - pub fn register( - &mut self, - ext: E, - ) { + pub fn register(&mut self, ext: E) { let type_id = ext.type_id(); self.extensions.insert(type_id, Box::new(ext)); } @@ -154,7 +160,10 @@ impl Extensions { /// Return a mutable reference to the requested extension. pub fn get_mut(&mut self, ext_type_id: TypeId) -> Option<&mut dyn Any> { - self.extensions.get_mut(&ext_type_id).map(DerefMut::deref_mut).map(Extension::as_mut_any) + self.extensions + .get_mut(&ext_type_id) + .map(DerefMut::deref_mut) + .map(Extension::as_mut_any) } /// Deregister extension for the given `type_id`. @@ -165,7 +174,9 @@ impl Extensions { } /// Returns a mutable iterator over all extensions. - pub fn iter_mut<'a>(&'a mut self) -> impl Iterator)> { + pub fn iter_mut<'a>( + &'a mut self, + ) -> impl Iterator)> { self.extensions.iter_mut() } } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 388482964f18c..e6a8f8caa8d33 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,12 +25,16 @@ //! //! This crate exposes the main [`Externalities`] trait. -use sp_std::{any::{Any, TypeId}, vec::Vec, boxed::Box}; +use sp_std::{ + any::{Any, TypeId}, + boxed::Box, + vec::Vec, +}; use sp_storage::{ChildInfo, TrackedStorageKey}; +pub use extensions::{Extension, ExtensionStore, Extensions}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; -pub use extensions::{Extension, Extensions, ExtensionStore}; mod extensions; mod scope_limited; @@ -68,20 +72,12 @@ pub trait Externalities: ExtensionStore { /// This may be optimized for large values. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option>; + fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Read child runtime storage. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option>; + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Set storage entry `key` of current contract being called (effective immediately). fn set_storage(&mut self, key: Vec, value: Vec) { @@ -89,12 +85,7 @@ pub trait Externalities: ExtensionStore { } /// Set child storage entry `key` of current contract being called (effective immediately). - fn set_child_storage( - &mut self, - child_info: &ChildInfo, - key: Vec, - value: Vec, - ) { + fn set_child_storage(&mut self, child_info: &ChildInfo, key: Vec, value: Vec) { self.place_child_storage(child_info, key, Some(value)) } @@ -103,12 +94,9 @@ pub trait Externalities: ExtensionStore { self.place_storage(key.to_vec(), None); } - /// Clear a child storage entry (`key`) of current contract being called (effective immediately). - fn clear_child_storage( - &mut self, - child_info: &ChildInfo, - key: &[u8], - ) { + /// Clear a child storage entry (`key`) of current contract being called (effective + /// immediately). + fn clear_child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) { self.place_child_storage(child_info, key.to_vec(), None) } @@ -118,11 +106,7 @@ pub trait Externalities: ExtensionStore { } /// Whether a child storage entry exists. - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> bool { + fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { self.child_storage(child_info, key).is_some() } @@ -130,38 +114,43 @@ pub trait Externalities: ExtensionStore { fn next_storage_key(&self, key: &[u8]) -> Option>; /// Returns the key immediately following the given key, if it exists, in child storage. - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option>; + fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, child_info: &ChildInfo); + /// + /// Deletes all keys from the overlay and up to `limit` keys from the backend. No + /// limit is applied if `limit` is `None`. Returned boolean is `true` if the child trie was + /// removed completely and `false` if there are remaining keys after the function + /// returns. Returned `u32` is the number of keys that was removed at the end of the + /// operation. + /// + /// # Note + /// + /// An implementation is free to delete more keys than the specified limit as long as + /// it is able to do that in constant time. + fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> (bool, u32); /// Clear storage entries which keys are start with the given prefix. - fn clear_prefix(&mut self, prefix: &[u8]); + /// + /// `limit` and result works as for `kill_child_storage`. + fn clear_prefix(&mut self, prefix: &[u8], limit: Option) -> (bool, u32); /// Clear child storage entries which keys are start with the given prefix. + /// + /// `limit` and result works as for `kill_child_storage`. fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], - ); + limit: Option, + ) -> (bool, u32); - /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). + /// Set or clear a storage entry (`key`) of current contract being called (effective + /// immediately). fn place_storage(&mut self, key: Vec, value: Option>); /// Set or clear a child storage entry. - fn place_child_storage( - &mut self, - child_info: &ChildInfo, - key: Vec, - value: Option>, - ); - - /// Get the identity of the chain. - fn chain_id(&self) -> u64; + fn place_child_storage(&mut self, child_info: &ChildInfo, key: Vec, value: Option>); /// Get the trie root of the current storage map. /// @@ -176,19 +165,13 @@ pub trait Externalities: ExtensionStore { /// /// If the storage root equals the default hash as defined by the trie, the key in the top-level /// storage map will be removed. - fn child_storage_root( - &mut self, - child_info: &ChildInfo, - ) -> Vec; + fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec; /// Append storage item. /// - /// This assumes specific format of the storage item. Also there is no way to undo this operation. - fn storage_append( - &mut self, - key: Vec, - value: Vec, - ); + /// This assumes specific format of the storage item. Also there is no way to undo this + /// operation. + fn storage_append(&mut self, key: Vec, value: Vec); /// Get the changes trie root of the current storage overlay at a block with given `parent`. /// @@ -220,6 +203,16 @@ pub trait Externalities: ExtensionStore { /// no transaction is open that can be closed. fn storage_commit_transaction(&mut self) -> Result<(), ()>; + /// Index specified transaction slice and store it. + fn storage_index_transaction(&mut self, _index: u32, _hash: &[u8], _size: u32) { + unimplemented!("storage_index_transaction"); + } + + /// Renew existing piece of transaction storage. + fn storage_renew_transaction_index(&mut self, _index: u32, _hash: &[u8]) { + unimplemented!("storage_renew_transaction_index"); + } + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// Benchmarking related functionality and shouldn't be used anywhere else! /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -263,6 +256,23 @@ pub trait Externalities: ExtensionStore { /// /// Adds new storage keys to the DB tracking whitelist. fn set_whitelist(&mut self, new: Vec); + + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Returns estimated proof size for the state queries so far. + /// Proof is reset on commit and wipe. + fn proof_size(&self) -> Option { + None + } + + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Get all the keys that have been read or written to during the benchmark. + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)>; } /// Extension for the [`Externalities`] trait. @@ -284,7 +294,7 @@ pub trait ExternalitiesExt { impl ExternalitiesExt for &mut dyn Externalities { fn extension(&mut self) -> Option<&mut T> { - self.extension_by_type_id(TypeId::of::()).and_then(Any::downcast_mut) + self.extension_by_type_id(TypeId::of::()).and_then(::downcast_mut) } fn register_extension(&mut self, ext: T) -> Result<(), Error> { diff --git a/primitives/externalities/src/scope_limited.rs b/primitives/externalities/src/scope_limited.rs index 1f70276f02d36..15a670a9abeee 100644 --- a/primitives/externalities/src/scope_limited.rs +++ b/primitives/externalities/src/scope_limited.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,11 +21,12 @@ use crate::Externalities; environmental::environmental!(ext: trait Externalities); -/// Set the given externalities while executing the given closure. To get access to the externalities -/// while executing the given closure [`with_externalities`] grants access to them. The externalities -/// are only set for the same thread this function was called from. +/// Set the given externalities while executing the given closure. To get access to the +/// externalities while executing the given closure [`with_externalities`] grants access to them. +/// The externalities are only set for the same thread this function was called from. pub fn set_and_run_with_externalities(ext: &mut dyn Externalities, f: F) -> R - where F: FnOnce() -> R +where + F: FnOnce() -> R, { ext::using(ext, f) } diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index 88098139ceec0..c0c2a654270f7 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-finality-grandpa" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,16 +15,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -grandpa = { package = "finality-grandpa", version = "0.12.3", default-features = false, features = ["derive-codec"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +grandpa = { package = "finality-grandpa", version = "0.14.1", default-features = false, features = ["derive-codec"] } log = { version = "0.4.8", optional = true } -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-keystore = { version = "0.8.0", default-features = false, path = "../keystore", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +serde = { version = "1.0.126", optional = true, features = ["derive"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../application-crypto" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-keystore = { version = "0.10.0-dev", default-features = false, path = "../keystore", optional = true } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } [features] default = ["std"] @@ -32,6 +33,7 @@ std = [ "log", "serde", "codec/std", + "scale-info/std", "grandpa/std", "sp-api/std", "sp-application-crypto/std", diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index 2c569fafda4ce..d99a4c1882222 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,12 +25,12 @@ extern crate alloc; #[cfg(feature = "std")] use serde::Serialize; -use codec::{Encode, Decode, Input, Codec}; -use sp_runtime::{ConsensusEngineId, RuntimeDebug, traits::NumberFor}; -use sp_std::borrow::Cow; -use sp_std::vec::Vec; +use codec::{Codec, Decode, Encode, Input}; +use scale_info::TypeInfo; #[cfg(feature = "std")] -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{traits::NumberFor, ConsensusEngineId, RuntimeDebug}; +use sp_std::{borrow::Cow, vec::Vec}; #[cfg(feature = "std")] use log::debug; @@ -39,7 +39,7 @@ use log::debug; pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::GRANDPA; mod app { - use sp_application_crypto::{app_crypto, key_types::GRANDPA, ed25519}; + use sp_application_crypto::{app_crypto, ed25519, key_types::GRANDPA}; app_crypto!(ed25519, GRANDPA); } @@ -102,7 +102,7 @@ pub enum ConsensusLog { /// This should be a pure function: i.e. as long as the runtime can interpret /// the digest type it should return the same result regardless of the current /// state. - #[codec(index = "1")] + #[codec(index = 1)] ScheduledChange(ScheduledChange), /// Force an authority set change. /// @@ -118,18 +118,18 @@ pub enum ConsensusLog { /// This should be a pure function: i.e. as long as the runtime can interpret /// the digest type it should return the same result regardless of the current /// state. - #[codec(index = "2")] + #[codec(index = 2)] ForcedChange(N, ScheduledChange), /// Note that the authority with given index is disabled until the next change. - #[codec(index = "3")] + #[codec(index = 3)] OnDisabled(AuthorityIndex), /// A signal to pause the current authority set after the given delay. /// After finalizing the block at _delay_ the authorities should stop voting. - #[codec(index = "4")] + #[codec(index = 4)] Pause(N), /// A signal to resume the current authority set after the given delay. /// After authoring the block at _delay_ the authorities should resume voting. - #[codec(index = "5")] + #[codec(index = 5)] Resume(N), } @@ -171,7 +171,7 @@ impl ConsensusLog { /// GRANDPA happens when a voter votes on the same round (either at prevote or /// precommit stage) for different blocks. Proving is achieved by collecting the /// signed messages of conflicting votes. -#[derive(Clone, Debug, Decode, Encode, PartialEq)] +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct EquivocationProof { set_id: SetId, equivocation: Equivocation, @@ -181,10 +181,7 @@ impl EquivocationProof { /// Create a new `EquivocationProof` for the given set id and using the /// given equivocation as proof. pub fn new(set_id: SetId, equivocation: Equivocation) -> Self { - EquivocationProof { - set_id, - equivocation, - } + EquivocationProof { set_id, equivocation } } /// Returns the set id at which the equivocation occurred. @@ -208,7 +205,7 @@ impl EquivocationProof { /// Wrapper object for GRANDPA equivocation proofs, useful for unifying prevote /// and precommit equivocations under a common type. -#[derive(Clone, Debug, Decode, Encode, PartialEq)] +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub enum Equivocation { /// Proof of equivocation at prevote stage. Prevote(grandpa::Equivocation, AuthoritySignature>), @@ -252,6 +249,14 @@ impl Equivocation { Equivocation::Precommit(ref equivocation) => &equivocation.identity, } } + + /// Returns the round number when the equivocation happened. + pub fn round_number(&self) -> RoundNumber { + match self { + Equivocation::Prevote(ref equivocation) => equivocation.round_number, + Equivocation::Precommit(ref equivocation) => equivocation.round_number, + } + } } /// Verifies the equivocation proof by making sure that both votes target @@ -269,7 +274,7 @@ where if $equivocation.first.0.target_hash == $equivocation.second.0.target_hash && $equivocation.first.0.target_number == $equivocation.second.0.target_number { - return false; + return false } // check signatures on both votes are valid @@ -289,17 +294,17 @@ where report.set_id, ); - return valid_first && valid_second; + return valid_first && valid_second }; } match report.equivocation { Equivocation::Prevote(equivocation) => { check!(equivocation, grandpa::Message::Prevote); - } + }, Equivocation::Precommit(equivocation) => { check!(equivocation, grandpa::Message::Precommit); - } + }, } } @@ -382,8 +387,8 @@ where H: Encode, N: Encode, { - use sp_core::crypto::Public; use sp_application_crypto::AppKey; + use sp_core::crypto::Public; use sp_std::convert::TryInto; let encoded = localized_payload(round, set_id, &message); @@ -392,13 +397,13 @@ where AuthorityId::ID, &public.to_public_crypto_pair(), &encoded[..], - ).ok()?.try_into().ok()?; + ) + .ok() + .flatten()? + .try_into() + .ok()?; - Some(grandpa::SignedMessage { - message, - signature, - id: public, - }) + Some(grandpa::SignedMessage { message, signature, id: public }) } /// WASM function call to check for pending changes. @@ -449,7 +454,7 @@ impl<'a> Decode for VersionedAuthorityList<'a> { fn decode(value: &mut I) -> Result { let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; if version != AUTHORITIES_VERSION { - return Err("unknown Grandpa authorities version".into()); + return Err("unknown Grandpa authorities version".into()) } Ok(authorities.into()) } @@ -488,7 +493,7 @@ sp_api::decl_runtime_apis! { /// applied in the runtime after those N blocks have passed. /// /// The consensus protocol will coordinate the handoff externally. - #[api_version(2)] + #[api_version(3)] pub trait GrandpaApi { /// Get the current GRANDPA authorities and weights. This should not change except /// for when changes are scheduled and the corresponding delay has passed. @@ -526,5 +531,8 @@ sp_api::decl_runtime_apis! { set_id: SetId, authority_id: AuthorityId, ) -> Option; + + /// Get current GRANDPA authority set id. + fn current_set_id() -> SetId; } } diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index 10c66b73aec1a..23558750b5cf8 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-inherents" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,18 +15,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = { version = "0.10.0", optional = true } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -derive_more = { version = "0.99.2", optional = true } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-runtime = { version = "4.0.0-dev", path = "../runtime", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +thiserror = { version = "1.0.21", optional = true } +impl-trait-for-tuples = "0.2.0" +async-trait = { version = "0.1.50", optional = true } + +[dev-dependencies] +futures = "0.3.9" [features] default = [ "std" ] std = [ - "parking_lot", "sp-std/std", "codec/std", "sp-core/std", - "derive_more", + "thiserror", + "sp-runtime", + "async-trait", ] diff --git a/primitives/inherents/src/client_side.rs b/primitives/inherents/src/client_side.rs new file mode 100644 index 0000000000000..18877cae5f343 --- /dev/null +++ b/primitives/inherents/src/client_side.rs @@ -0,0 +1,127 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Error, InherentData, InherentIdentifier}; +use sp_runtime::traits::Block as BlockT; + +/// Something that can create inherent data providers. +/// +/// It is possible for the caller to provide custom arguments to the callee by setting the +/// `ExtraArgs` generic parameter. +/// +/// The crate already provides some convience implementations of this trait for +/// `Box` and closures. So, it should not be required to implement +/// this trait manually. +#[async_trait::async_trait] +pub trait CreateInherentDataProviders: Send + Sync { + /// The inherent data providers that will be created. + type InherentDataProviders: InherentDataProvider; + + /// Create the inherent data providers at the given `parent` block using the given `extra_args`. + async fn create_inherent_data_providers( + &self, + parent: Block::Hash, + extra_args: ExtraArgs, + ) -> Result>; +} + +#[async_trait::async_trait] +impl CreateInherentDataProviders for F +where + Block: BlockT, + F: Fn(Block::Hash, ExtraArgs) -> Fut + Sync + Send, + Fut: std::future::Future>> + + Send + + 'static, + IDP: InherentDataProvider + 'static, + ExtraArgs: Send + 'static, +{ + type InherentDataProviders = IDP; + + async fn create_inherent_data_providers( + &self, + parent: Block::Hash, + extra_args: ExtraArgs, + ) -> Result> { + (*self)(parent, extra_args).await + } +} + +#[async_trait::async_trait] +impl + CreateInherentDataProviders + for Box> +{ + type InherentDataProviders = IDPS; + + async fn create_inherent_data_providers( + &self, + parent: Block::Hash, + extra_args: ExtraArgs, + ) -> Result> { + (**self).create_inherent_data_providers(parent, extra_args).await + } +} + +/// Something that provides inherent data. +#[async_trait::async_trait] +pub trait InherentDataProvider: Send + Sync { + /// Convenience function for creating [`InherentData`]. + /// + /// Basically maps around [`Self::provide_inherent_data`]. + fn create_inherent_data(&self) -> Result { + let mut inherent_data = InherentData::new(); + self.provide_inherent_data(&mut inherent_data)?; + Ok(inherent_data) + } + + /// Provide inherent data that should be included in a block. + /// + /// The data should be stored in the given `InherentData` structure. + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error>; + + /// Convert the given encoded error to a string. + /// + /// If the given error could not be decoded, `None` should be returned. + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option>; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +#[async_trait::async_trait] +impl InherentDataProvider for Tuple { + for_tuples!( where #( Tuple: Send + Sync )* ); + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + for_tuples!( #( Tuple.provide_inherent_data(inherent_data)?; )* ); + Ok(()) + } + + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option> { + for_tuples!( #( + if let Some(r) = Tuple.try_handle_error(identifier, error).await { return Some(r) } + )* ); + + None + } +} diff --git a/primitives/inherents/src/lib.rs b/primitives/inherents/src/lib.rs index 9894296953528..90f4e455a42d3 100644 --- a/primitives/inherents/src/lib.rs +++ b/primitives/inherents/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,65 +15,189 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Provides types and traits for creating and checking inherents. +//! Substrate inherent extrinsics //! -//! Each inherent is added to a produced block. Each runtime decides on which inherents it -//! wants to attach to its blocks. All data that is required for the runtime to create the inherents -//! is stored in the `InherentData`. This `InherentData` is constructed by the node and given to -//! the runtime. +//! Inherent extrinsics are extrinsics that are inherently added to each block. However, it is up to +//! runtime implementation to require an inherent for each block or to make it optional. Inherents +//! are mainly used to pass data from the block producer to the runtime. So, inherents require some +//! part that is running on the client side and some part that is running on the runtime side. Any +//! data that is required by an inherent is passed as [`InherentData`] from the client to the +//! runtime when the inherents are constructed. //! -//! Types that provide data for inherents, should implement `InherentDataProvider` and need to be -//! registered at `InherentDataProviders`. +//! The process of constructing and applying inherents is the following: //! -//! In the runtime, modules need to implement `ProvideInherent` when they can create and/or check -//! inherents. By implementing `ProvideInherent`, a module is not enforced to create an inherent. -//! A module can also just check given inherents. For using a module as inherent provider, it needs -//! to be registered by the `construct_runtime!` macro. The macro documentation gives more -//! information on how that is done. +//! 1. The block producer first creates the [`InherentData`] by using the inherent data providers +//! that are created by [`CreateInherentDataProviders`]. +//! +//! 2. The [`InherentData`] is passed to the `inherent_extrinsics` function of the `BlockBuilder` +//! runtime api. This will call the runtime which will create all the inherents that should be +//! applied to the block. +//! +//! 3. Apply each inherent to the block like any normal extrinsic. +//! +//! On block import the inherents in the block are checked by calling the `check_inherents` runtime +//! API. This will also pass an instance of [`InherentData`] which the runtime can use to validate +//! all inherents. If some inherent data isn't required for validating an inherent, it can be +//! omitted when providing the inherent data providers for block import. +//! +//! # Providing inherent data +//! +//! To provide inherent data from the client side, [`InherentDataProvider`] should be implemented. +//! +//! ``` +//! use codec::Decode; +//! use sp_inherents::{InherentIdentifier, InherentData}; +//! +//! // This needs to be unique for the runtime. +//! const INHERENT_IDENTIFIER: InherentIdentifier = *b"testinh0"; +//! +//! /// Some custom inherent data provider +//! struct InherentDataProvider; +//! +//! #[async_trait::async_trait] +//! impl sp_inherents::InherentDataProvider for InherentDataProvider { +//! fn provide_inherent_data( +//! &self, +//! inherent_data: &mut InherentData, +//! ) -> Result<(), sp_inherents::Error> { +//! // We can insert any data that implements [`codec::Encode`]. +//! inherent_data.put_data(INHERENT_IDENTIFIER, &"hello") +//! } +//! +//! /// When validating the inherents, the runtime implementation can throw errors. We support +//! /// two error modes, fatal and non-fatal errors. A fatal error means that the block is invalid +//! /// and this function here should return `Err(_)` to not import the block. Non-fatal errors +//! /// are allowed to be handled here in this function and the function should return `Ok(())` +//! /// if it could be handled. A non-fatal error is for example that a block is in the future +//! /// from the point of view of the local node. In such a case the block import for example +//! /// should be delayed until the block is valid. +//! /// +//! /// If this functions returns `None`, it means that it is not responsible for this error or +//! /// that the error could not be interpreted. +//! async fn try_handle_error( +//! &self, +//! identifier: &InherentIdentifier, +//! mut error: &[u8], +//! ) -> Option> { +//! // Check if this error belongs to us. +//! if *identifier != INHERENT_IDENTIFIER { +//! return None; +//! } +//! +//! // For demonstration purposes we are using a `String` as error type. In real +//! // implementations it is advised to not use `String`. +//! Some(Err( +//! sp_inherents::Error::Application(Box::from(String::decode(&mut error).ok()?)) +//! )) +//! } +//! } +//! ``` +//! +//! In the service the relevant inherent data providers need to be passed the block production and +//! the block import. As already highlighted above, the providers can be different between import +//! and production. +//! +//! ``` +//! # use sp_runtime::testing::ExtrinsicWrapper; +//! # use sp_inherents::{InherentIdentifier, InherentData}; +//! # use futures::FutureExt; +//! # type Block = sp_runtime::testing::Block>; +//! # const INHERENT_IDENTIFIER: InherentIdentifier = *b"testinh0"; +//! # struct InherentDataProvider; +//! # #[async_trait::async_trait] +//! # impl sp_inherents::InherentDataProvider for InherentDataProvider { +//! # fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), sp_inherents::Error> { +//! # inherent_data.put_data(INHERENT_IDENTIFIER, &"hello") +//! # } +//! # async fn try_handle_error( +//! # &self, +//! # _: &InherentIdentifier, +//! # _: &[u8], +//! # ) -> Option> { +//! # None +//! # } +//! # } +//! +//! async fn cool_consensus_block_production( +//! // The second parameter to the trait are parameters that depend on what the caller +//! // can provide on extra data. +//! _: impl sp_inherents::CreateInherentDataProviders, +//! ) { +//! // do cool stuff +//! } +//! +//! async fn cool_consensus_block_import( +//! _: impl sp_inherents::CreateInherentDataProviders, +//! ) { +//! // do cool stuff +//! } +//! +//! async fn build_service(is_validator: bool) { +//! // For block import we don't pass any inherent data provider, because our runtime +//! // does not need any inherent data to validate the inherents. +//! let block_import = cool_consensus_block_import(|_parent, ()| async { Ok(()) }); +//! +//! let block_production = if is_validator { +//! // For block production we want to provide our inherent data provider +//! cool_consensus_block_production(|_parent, ()| async { +//! Ok(InherentDataProvider) +//! }).boxed() +//! } else { +//! futures::future::pending().boxed() +//! }; +//! +//! futures::pin_mut!(block_import); +//! +//! futures::future::select(block_import, block_production).await; +//! } +//! ``` +//! +//! # Creating the inherent +//! +//! As the inherents are created by the runtime, it depends on the runtime implementation on how +//! to create the inherents. As already described above the client side passes the [`InherentData`] +//! and expects the runtime to construct the inherents out of it. When validating the inherents, +//! [`CheckInherentsResult`] is used to communicate the result client side. #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] -use codec::{Encode, Decode}; - -use sp_std::{collections::btree_map::{BTreeMap, IntoIter, Entry}, vec::Vec}; - -#[cfg(feature = "std")] -use parking_lot::RwLock; - -#[cfg(feature = "std")] -use std::{sync::Arc, format}; +use codec::{Decode, Encode}; -/// An error that can occur within the inherent data system. -#[cfg(feature = "std")] -#[derive(Debug, Encode, Decode, derive_more::Display)] -pub struct Error(String); +use sp_std::{ + collections::btree_map::{BTreeMap, Entry, IntoIter}, + vec::Vec, +}; #[cfg(feature = "std")] -impl> From for Error { - fn from(data: T) -> Error { - Self(data.into()) - } -} +mod client_side; #[cfg(feature = "std")] -impl Error { - /// Convert this error into a `String`. - pub fn into_string(self) -> String { - self.0 - } -} - -/// An error that can occur within the inherent data system. -#[derive(Encode, sp_core::RuntimeDebug)] -#[cfg(not(feature = "std"))] -pub struct Error(&'static str); - -#[cfg(not(feature = "std"))] -impl From<&'static str> for Error { - fn from(data: &'static str) -> Error { - Self(data) - } +pub use client_side::*; + +/// Errors that occur in context of inherents. +#[derive(Debug)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[allow(missing_docs)] +pub enum Error { + #[cfg_attr( + feature = "std", + error("Inherent data already exists for identifier: {}", "String::from_utf8_lossy(_0)") + )] + InherentDataExists(InherentIdentifier), + #[cfg_attr( + feature = "std", + error("Failed to decode inherent data for identifier: {}", "String::from_utf8_lossy(_1)") + )] + DecodingFailed(#[cfg_attr(feature = "std", source)] codec::Error, InherentIdentifier), + #[cfg_attr( + feature = "std", + error("There was already a fatal error reported and no other errors are allowed") + )] + FatalErrorReported, + #[cfg(feature = "std")] + #[error(transparent)] + Application(#[from] Box), } /// An identifier for an inherent. @@ -83,7 +207,7 @@ pub type InherentIdentifier = [u8; 8]; #[derive(Clone, Default, Encode, Decode)] pub struct InherentData { /// All inherent data encoded with parity-scale-codec and an identifier. - data: BTreeMap> + data: BTreeMap>, } impl InherentData { @@ -110,20 +234,14 @@ impl InherentData { entry.insert(inherent.encode()); Ok(()) }, - Entry::Occupied(_) => { - Err("Inherent with same identifier already exists!".into()) - } + Entry::Occupied(_) => Err(Error::InherentDataExists(identifier)), } } /// Replace the data for an inherent. /// /// If it does not exist, the data is just inserted. - pub fn replace_data( - &mut self, - identifier: InherentIdentifier, - inherent: &I, - ) { + pub fn replace_data(&mut self, identifier: InherentIdentifier, inherent: &I) { self.data.insert(identifier, inherent.encode()); } @@ -139,13 +257,10 @@ impl InherentData { identifier: &InherentIdentifier, ) -> Result, Error> { match self.data.get(identifier) { - Some(inherent) => - I::decode(&mut &inherent[..]) - .map_err(|_| { - "Could not decode requested inherent type!".into() - }) - .map(Some), - None => Ok(None) + Some(inherent) => I::decode(&mut &inherent[..]) + .map_err(|e| Error::DecodingFailed(e, *identifier)) + .map(Some), + None => Ok(None), } } @@ -173,11 +288,7 @@ pub struct CheckInherentsResult { impl Default for CheckInherentsResult { fn default() -> Self { - Self { - okay: true, - errors: InherentData::new(), - fatal_error: false, - } + Self { okay: true, errors: InherentData::new(), fatal_error: false } } } @@ -202,7 +313,7 @@ impl CheckInherentsResult { ) -> Result<(), Error> { // Don't accept any other error if self.fatal_error { - return Err("No other errors are accepted after an hard error!".into()) + return Err(Error::FatalErrorReported) } if error.is_fatal_error() { @@ -251,123 +362,11 @@ impl CheckInherentsResult { impl PartialEq for CheckInherentsResult { fn eq(&self, other: &Self) -> bool { self.fatal_error == other.fatal_error && - self.okay == other.okay && - self.errors.data == other.errors.data + self.okay == other.okay && + self.errors.data == other.errors.data } } -/// All `InherentData` providers. -#[cfg(feature = "std")] -#[derive(Clone, Default)] -pub struct InherentDataProviders { - providers: Arc>>>, -} - -#[cfg(feature = "std")] -impl InherentDataProviders { - /// Create a new instance. - pub fn new() -> Self { - Self::default() - } - - /// Register an `InherentData` provider. - /// - /// The registration order is preserved and this order will also be used when creating the - /// inherent data. - /// - /// # Result - /// - /// Will return an error, if a provider with the same identifier already exists. - pub fn register_provider( - &self, - provider: P, - ) -> Result<(), Error> { - if self.has_provider(&provider.inherent_identifier()) { - Err( - format!( - "Inherent data provider with identifier {:?} already exists!", - &provider.inherent_identifier() - ).into() - ) - } else { - provider.on_register(self)?; - self.providers.write().push(Box::new(provider)); - Ok(()) - } - } - - /// Returns if a provider for the given identifier exists. - pub fn has_provider(&self, identifier: &InherentIdentifier) -> bool { - self.providers.read().iter().any(|p| p.inherent_identifier() == identifier) - } - - /// Create inherent data. - pub fn create_inherent_data(&self) -> Result { - let mut data = InherentData::new(); - self.providers.read().iter().try_for_each(|p| { - p.provide_inherent_data(&mut data) - .map_err(|e| format!("Error for `{:?}`: {:?}", p.inherent_identifier(), e)) - })?; - Ok(data) - } - - /// Converts a given encoded error into a `String`. - /// - /// Useful if the implementation encounters an error for an identifier it does not know. - pub fn error_to_string(&self, identifier: &InherentIdentifier, error: &[u8]) -> String { - let res = self.providers.read().iter().filter_map(|p| - if p.inherent_identifier() == identifier { - Some( - p.error_to_string(error) - .unwrap_or_else(|| error_to_string_fallback(identifier)) - ) - } else { - None - } - ).next(); - - match res { - Some(res) => res, - None => format!( - "Error while checking inherent of type \"{}\", but this inherent type is unknown.", - String::from_utf8_lossy(identifier) - ) - } - } -} - -/// Something that provides inherent data. -#[cfg(feature = "std")] -pub trait ProvideInherentData { - /// Is called when this inherent data provider is registered at the given - /// `InherentDataProviders`. - fn on_register(&self, _: &InherentDataProviders) -> Result<(), Error> { - Ok(()) - } - - /// The identifier of the inherent for that data will be provided. - fn inherent_identifier(&self) -> &'static InherentIdentifier; - - /// Provide inherent data that should be included in a block. - /// - /// The data should be stored in the given `InherentData` structure. - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error>; - - /// Convert the given encoded error to a string. - /// - /// If the given error could not be decoded, `None` should be returned. - fn error_to_string(&self, error: &[u8]) -> Option; -} - -/// A fallback function, if the decoding of an error fails. -#[cfg(feature = "std")] -fn error_to_string_fallback(identifier: &InherentIdentifier) -> String { - format!( - "Error while checking inherent of type \"{}\", but error could not be decoded.", - String::from_utf8_lossy(identifier) - ) -} - /// Did we encounter a fatal error while checking an inherent? /// /// A fatal error is everything that fails while checking an inherent error, e.g. the inherent @@ -381,9 +380,9 @@ pub trait IsFatalError { fn is_fatal_error(&self) -> bool; } -/// Auxiliary to make any given error resolve to `is_fatal_error() == true`. -#[derive(Encode)] -pub struct MakeFatalError(E); +/// Auxiliary to make any given error resolve to `is_fatal_error() == true` for [`IsFatalError`]. +#[derive(codec::Encode)] +pub struct MakeFatalError(E); impl From for MakeFatalError { fn from(err: E) -> Self { @@ -397,34 +396,10 @@ impl IsFatalError for MakeFatalError { } } -/// A module that provides an inherent and may also verifies it. -pub trait ProvideInherent { - /// The call type of the module. - type Call; - /// The error returned by `check_inherent`. - type Error: codec::Encode + IsFatalError; - /// The inherent identifier used by this inherent. - const INHERENT_IDENTIFIER: self::InherentIdentifier; - - /// Create an inherent out of the given `InherentData`. - fn create_inherent(data: &InherentData) -> Option; - - /// If `Some`, indicates that an inherent is required. Check will return the inner error if no - /// inherent is found. If `Err`, indicates that the check failed and further operations should - /// be aborted. - fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { Ok(None) } - - /// Check the given inherent if it is valid. - /// Checking the inherent is optional and can be omitted. - fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { - Ok(()) - } -} - #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Decode}; + use codec::{Decode, Encode}; const TEST_INHERENT_0: InherentIdentifier = *b"testinh0"; const TEST_INHERENT_1: InherentIdentifier = *b"testinh1"; @@ -462,94 +437,32 @@ mod tests { } #[derive(Clone)] - struct TestInherentDataProvider { - registered: Arc>, - } - - impl TestInherentDataProvider { - fn new() -> Self { - let inst = Self { - registered: Default::default(), - }; - - // just make sure - assert!(!inst.is_registered()); - - inst - } - - fn is_registered(&self) -> bool { - *self.registered.read() - } - } + struct TestInherentDataProvider; const ERROR_TO_STRING: &str = "Found error!"; - impl ProvideInherentData for TestInherentDataProvider { - fn on_register(&self, _: &InherentDataProviders) -> Result<(), Error> { - *self.registered.write() = true; - Ok(()) - } - - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &TEST_INHERENT_0 - } - + #[async_trait::async_trait] + impl InherentDataProvider for TestInherentDataProvider { fn provide_inherent_data(&self, data: &mut InherentData) -> Result<(), Error> { data.put_data(TEST_INHERENT_0, &42) } - fn error_to_string(&self, _: &[u8]) -> Option { - Some(ERROR_TO_STRING.into()) + async fn try_handle_error( + &self, + _: &InherentIdentifier, + _: &[u8], + ) -> Option> { + Some(Err(Error::Application(Box::from(ERROR_TO_STRING)))) } } #[test] - fn registering_inherent_provider() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - assert!(providers.has_provider(provider.inherent_identifier())); - - // Second time should fail - assert!(providers.register_provider(provider.clone()).is_err()); - } - - #[test] - fn create_inherent_data_from_all_providers() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); + fn create_inherent_data() { + let provider = TestInherentDataProvider; - let inherent_data = providers.create_inherent_data().unwrap(); + let inherent_data = provider.create_inherent_data().unwrap(); - assert_eq!( - inherent_data.get_data::(provider.inherent_identifier()).unwrap().unwrap(), - 42u32 - ); - } - - #[test] - fn encoded_error_to_string() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - - assert_eq!( - &providers.error_to_string(&TEST_INHERENT_0, &[1, 2]), ERROR_TO_STRING - ); - - assert!( - providers - .error_to_string(&TEST_INHERENT_1, &[1, 2]) - .contains("inherent type is unknown") - ); + assert_eq!(inherent_data.get_data::(&TEST_INHERENT_0).unwrap().unwrap(), 42u32); } #[test] diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index e8483b2ef68c5..d3a2b56705926 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-io" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,22 +15,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } hash-db = { version = "0.15.2", default-features = false } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-keystore = { version = "0.8.0", default-features = false, optional = true, path = "../keystore" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -libsecp256k1 = { version = "0.3.4", optional = true } -sp-state-machine = { version = "0.8.0", optional = true, path = "../../primitives/state-machine" } -sp-wasm-interface = { version = "2.0.0", path = "../../primitives/wasm-interface", default-features = false } -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "2.0.0", optional = true, path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -sp-tracing = { version = "2.0.0", default-features = false, path = "../tracing" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-keystore = { version = "0.10.0-dev", default-features = false, optional = true, path = "../keystore" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +libsecp256k1 = { version = "0.6", optional = true } +sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../state-machine" } +sp-wasm-interface = { version = "4.0.0-dev", path = "../wasm-interface", default-features = false } +sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" } +sp-maybe-compressed-blob = { version = "4.0.0-dev", optional = true, path = "../maybe-compressed-blob" } +sp-trie = { version = "4.0.0-dev", optional = true, path = "../trie" } +sp-externalities = { version = "0.10.0-dev", optional = true, path = "../externalities" } +sp-tracing = { version = "4.0.0-dev", default-features = false, path = "../tracing" } log = { version = "0.4.8", optional = true } futures = { version = "0.3.1", features = ["thread-pool"], optional = true } -parking_lot = { version = "0.10.0", optional = true } -tracing = { version = "0.1.19", default-features = false } +parking_lot = { version = "0.11.1", optional = true } +tracing = { version = "0.1.25", default-features = false } tracing-core = { version = "0.1.17", default-features = false} [features] @@ -47,6 +48,7 @@ std = [ "sp-runtime-interface/std", "sp-externalities", "sp-wasm-interface/std", + "sp-maybe-compressed-blob", "sp-tracing/std", "tracing/std", "tracing-core/std", diff --git a/primitives/io/src/batch_verifier.rs b/primitives/io/src/batch_verifier.rs index 39229b1200b91..b6da1d85907bd 100644 --- a/primitives/io/src/batch_verifier.rs +++ b/primitives/io/src/batch_verifier.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,12 @@ //! Batch/parallel verification. -use sp_core::{ed25519, sr25519, ecdsa, crypto::Pair, traits::SpawnNamed}; -use std::sync::{Arc, atomic::{AtomicBool, Ordering as AtomicOrdering}}; -use futures::{future::FutureExt, channel::oneshot}; +use futures::{channel::oneshot, future::FutureExt}; +use sp_core::{crypto::Pair, ecdsa, ed25519, sr25519, traits::SpawnNamed}; +use std::sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, +}; #[derive(Debug, Clone)] struct Sr25519BatchItem { @@ -61,7 +64,9 @@ impl BatchVerifier { name: &'static str, ) -> bool { // there is already invalid transaction encountered - if self.invalid.load(AtomicOrdering::Relaxed) { return false; } + if self.invalid.load(AtomicOrdering::Relaxed) { + return false + } let invalid_clone = self.invalid.clone(); let (sender, receiver) = oneshot::channel(); @@ -78,7 +83,8 @@ impl BatchVerifier { log::warn!("Verification halted while result was pending"); invalid_clone.store(true, AtomicOrdering::Relaxed); } - }.boxed(), + } + .boxed(), ); true @@ -110,7 +116,9 @@ impl BatchVerifier { pub_key: sr25519::Public, message: Vec, ) -> bool { - if self.invalid.load(AtomicOrdering::Relaxed) { return false; } + if self.invalid.load(AtomicOrdering::Relaxed) { + return false + } self.sr25519_items.push(Sr25519BatchItem { signature, pub_key, message }); if self.sr25519_items.len() >= 128 { @@ -163,7 +171,7 @@ impl BatchVerifier { ); if !Self::verify_sr25519_batch(std::mem::take(&mut self.sr25519_items)) { - return false; + return false } if pending.len() > 0 { @@ -172,10 +180,12 @@ impl BatchVerifier { "substrate_batch_verify_join", async move { futures::future::join_all(pending).await; - sender.send(()) - .expect("Channel never panics if receiver is live. \ - Receiver is always live until received this data; qed. "); - }.boxed(), + sender.send(()).expect( + "Channel never panics if receiver is live. \ + Receiver is always live until received this data; qed. ", + ); + } + .boxed(), ); if receiver.recv().is_err() { @@ -184,7 +194,7 @@ impl BatchVerifier { "Haven't received async result from verification task. Returning false.", ); - return false; + return false } } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 0cdd6edfd8599..6f6b26d76a960 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,52 +18,55 @@ //! I/O host interface for substrate runtime. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc_error_handler))] - -#![cfg_attr(feature = "std", - doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), - doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Substrate's runtime standard library as compiled without Rust's standard library." +)] use sp_std::vec::Vec; -#[cfg(feature = "std")] -use sp_std::ops::Deref; - #[cfg(feature = "std")] use tracing; #[cfg(feature = "std")] use sp_core::{ crypto::Pair, - traits::{CallInWasmExt, TaskExecutorExt, RuntimeSpawnExt}, - offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, + offchain::{OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, storage::ChildInfo, + traits::{RuntimeSpawnExt, TaskExecutorExt}, }; #[cfg(feature = "std")] use sp_keystore::{KeystoreExt, SyncCryptoStore}; use sp_core::{ - OpaquePeerId, crypto::KeyTypeId, ed25519, sr25519, ecdsa, H256, LogLevel, + crypto::KeyTypeId, + ecdsa, ed25519, offchain::{ Timestamp, HttpRequestId, HttpRequestStatus, HttpError, IpfsRequest, IpfsRequestId, IpfsRequestStatus, StorageKind, OpaqueNetworkState, }, + sr25519, LogLevel, LogLevelFilter, OpaquePeerId, H256, }; #[cfg(feature = "std")] -use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{trie_types::Layout, TrieConfiguration}; -use sp_runtime_interface::{runtime_interface, Pointer}; -use sp_runtime_interface::pass_by::PassBy; +use sp_runtime_interface::{ + pass_by::{PassBy, PassByCodec}, + runtime_interface, Pointer, +}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] -use sp_externalities::{ExternalitiesExt, Externalities}; +use sp_externalities::{Externalities, ExternalitiesExt}; #[cfg(feature = "std")] mod batch_verifier; @@ -71,6 +74,8 @@ mod batch_verifier; #[cfg(feature = "std")] use batch_verifier::BatchVerifier; +const LOG_TARGET: &str = "runtime::io"; + /// Error verifying ECDSA signature #[derive(Encode, Decode)] pub enum EcdsaVerifyError { @@ -82,6 +87,16 @@ pub enum EcdsaVerifyError { BadSignature, } +/// The outcome of calling `storage_kill`. Returned value is the number of storage items +/// removed from the trie from making the `storage_kill` call. +#[derive(PassByCodec, Encode, Decode)] +pub enum KillStorageResult { + /// No key remains in the child trie. + AllRemoved(u32), + /// At least one key still resides in the child trie due to the supplied limit. + SomeRemaining(u32), +} + /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -122,7 +137,41 @@ pub trait Storage { /// Clear the storage of each key-value pair where the key starts with the given `prefix`. fn clear_prefix(&mut self, prefix: &[u8]) { - Externalities::clear_prefix(*self, prefix) + let _ = Externalities::clear_prefix(*self, prefix, None); + } + + /// Clear the storage of each key-value pair where the key starts with the given `prefix`. + /// + /// # Limit + /// + /// Deletes all keys from the overlay and up to `limit` keys from the backend if + /// it is set to `Some`. No limit is applied when `limit` is set to `None`. + /// + /// The limit can be used to partially delete a prefix storage in case it is too large + /// to delete in one go (block). + /// + /// It returns a boolean false iff some keys are remaining in + /// the prefix after the functions returns. Also returns a `u32` with + /// the number of keys removed from the process. + /// + /// # Note + /// + /// Please note that keys that are residing in the overlay for that prefix when + /// issuing this call are all deleted without counting towards the `limit`. Only keys + /// written during the current block are part of the overlay. Deleting with a `limit` + /// mostly makes sense with an empty overlay for that prefix. + /// + /// Calling this function multiple times per block for the same `prefix` does + /// not make much sense because it is not cumulative when called inside the same block. + /// Use this function to distribute the deletion of a single child trie across multiple + /// blocks. + #[version(2)] + fn clear_prefix(&mut self, prefix: &[u8], limit: Option) -> KillStorageResult { + let (all_removed, num_removed) = Externalities::clear_prefix(*self, prefix, limit); + match all_removed { + true => KillStorageResult::AllRemoved(num_removed), + false => KillStorageResult::SomeRemaining(num_removed), + } } /// Append the encoded `value` to the storage item at `key`. @@ -210,13 +259,9 @@ pub trait Storage { pub trait DefaultChildStorage { /// Get a default child storage value for a given key. /// - /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the parent trie. - /// Result is `None` if the value for `key` in the child storage can not be found. - fn get( - &self, - storage_key: &[u8], - key: &[u8], - ) -> Option> { + /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the + /// parent trie. Result is `None` if the value for `key` in the child storage can not be found. + fn get(&self, storage_key: &[u8], key: &[u8]) -> Option> { let child_info = ChildInfo::new_default(storage_key); self.child_storage(&child_info, key).map(|s| s.to_vec()) } @@ -236,25 +281,19 @@ pub trait DefaultChildStorage { value_offset: u32, ) -> Option { let child_info = ChildInfo::new_default(storage_key); - self.child_storage(&child_info, key) - .map(|value| { - let value_offset = value_offset as usize; - let data = &value[value_offset.min(value.len())..]; - let written = std::cmp::min(data.len(), value_out.len()); - value_out[..written].copy_from_slice(&data[..written]); - data.len() as u32 - }) + self.child_storage(&child_info, key).map(|value| { + let value_offset = value_offset as usize; + let data = &value[value_offset.min(value.len())..]; + let written = std::cmp::min(data.len(), value_out.len()); + value_out[..written].copy_from_slice(&data[..written]); + data.len() as u32 + }) } /// Set a child storage value. /// /// Set `key` to `value` in the child storage denoted by `storage_key`. - fn set( - &mut self, - storage_key: &[u8], - key: &[u8], - value: &[u8], - ) { + fn set(&mut self, storage_key: &[u8], key: &[u8], value: &[u8]) { let child_info = ChildInfo::new_default(storage_key); self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } @@ -262,11 +301,7 @@ pub trait DefaultChildStorage { /// Clear a child storage key. /// /// For the default child storage at `storage_key`, clear value at `key`. - fn clear( - &mut self, - storage_key: &[u8], - key: &[u8], - ) { + fn clear(&mut self, storage_key: &[u8], key: &[u8]) { let child_info = ChildInfo::new_default(storage_key); self.clear_child_storage(&child_info, key); } @@ -275,22 +310,38 @@ pub trait DefaultChildStorage { /// /// If it exists, the child storage for `storage_key` /// is removed. - fn storage_kill( - &mut self, - storage_key: &[u8], - ) { + fn storage_kill(&mut self, storage_key: &[u8]) { let child_info = ChildInfo::new_default(storage_key); - self.kill_child_storage(&child_info); + self.kill_child_storage(&child_info, None); + } + + /// Clear a child storage key. + /// + /// See `Storage` module `clear_prefix` documentation for `limit` usage. + #[version(2)] + fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> bool { + let child_info = ChildInfo::new_default(storage_key); + let (all_removed, _num_removed) = self.kill_child_storage(&child_info, limit); + all_removed + } + + /// Clear a child storage key. + /// + /// See `Storage` module `clear_prefix` documentation for `limit` usage. + #[version(3)] + fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> KillStorageResult { + let child_info = ChildInfo::new_default(storage_key); + let (all_removed, num_removed) = self.kill_child_storage(&child_info, limit); + match all_removed { + true => KillStorageResult::AllRemoved(num_removed), + false => KillStorageResult::SomeRemaining(num_removed), + } } /// Check a child storage key. /// /// Check whether the given `key` exists in default child defined at `storage_key`. - fn exists( - &self, - storage_key: &[u8], - key: &[u8], - ) -> bool { + fn exists(&self, storage_key: &[u8], key: &[u8]) -> bool { let child_info = ChildInfo::new_default(storage_key); self.exists_child_storage(&child_info, key) } @@ -298,13 +349,27 @@ pub trait DefaultChildStorage { /// Clear child default key by prefix. /// /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. + fn clear_prefix(&mut self, storage_key: &[u8], prefix: &[u8]) { + let child_info = ChildInfo::new_default(storage_key); + let _ = self.clear_child_prefix(&child_info, prefix, None); + } + + /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. + /// + /// See `Storage` module `clear_prefix` documentation for `limit` usage. + #[version(2)] fn clear_prefix( &mut self, storage_key: &[u8], prefix: &[u8], - ) { + limit: Option, + ) -> KillStorageResult { let child_info = ChildInfo::new_default(storage_key); - self.clear_child_prefix(&child_info, prefix); + let (all_removed, num_removed) = self.clear_child_prefix(&child_info, prefix, limit); + match all_removed { + true => KillStorageResult::AllRemoved(num_removed), + false => KillStorageResult::SomeRemaining(num_removed), + } } /// Default child root calculation. @@ -313,10 +378,7 @@ pub trait DefaultChildStorage { /// The hashing algorithm is defined by the `Block`. /// /// Returns a `Vec` that holds the SCALE encoded hash. - fn root( - &mut self, - storage_key: &[u8], - ) -> Vec { + fn root(&mut self, storage_key: &[u8]) -> Vec { let child_info = ChildInfo::new_default(storage_key); self.child_storage_root(&child_info) } @@ -324,11 +386,7 @@ pub trait DefaultChildStorage { /// Child storage key iteration. /// /// Get the next key in storage after the given one in lexicographic order in child storage. - fn next_key( - &mut self, - storage_key: &[u8], - key: &[u8], - ) -> Option> { + fn next_key(&mut self, storage_key: &[u8], key: &[u8]) -> Option> { let child_info = ChildInfo::new_default(storage_key); self.next_child_storage_key(&child_info, key) } @@ -356,15 +414,34 @@ pub trait Trie { fn keccak_256_ordered_root(input: Vec>) -> H256 { Layout::::ordered_trie_root(input) } + + /// Verify trie proof + fn blake2_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { + sp_trie::verify_trie_proof::, _, _, _>( + &root, + proof, + &[(key, Some(value))], + ) + .is_ok() + } + + /// Verify trie proof + fn keccak_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { + sp_trie::verify_trie_proof::, _, _, _>( + &root, + proof, + &[(key, Some(value))], + ) + .is_ok() + } } -/// Interface that provides miscellaneous functions for communicating between the runtime and the node. +/// Interface that provides miscellaneous functions for communicating between the runtime and the +/// node. #[runtime_interface] pub trait Misc { - /// The current relay chain identifier. - fn chain_id(&self) -> u64 { - sp_externalities::Externalities::chain_id(*self) - } + // NOTE: We use the target 'runtime' for messages produced by general printing functions, + // instead of LOG_TARGET. /// Print a number. fn print_num(val: u64) { @@ -390,28 +467,34 @@ pub trait Misc { /// /// # Performance /// - /// Calling this function is very expensive and should only be done very occasionally. - /// For getting the runtime version, it requires instantiating the wasm blob and calling a - /// function in this blob. + /// This function may be very expensive to call depending on the wasm binary. It may be + /// relatively cheap if the wasm binary contains version information. In that case, + /// uncompression of the wasm blob is the dominating factor. + /// + /// If the wasm binary does not have the version information attached, then a legacy mechanism + /// may be involved. This means that a runtime call will be performed to query the version. + /// + /// Calling into the runtime may be incredible expensive and should be approached with care. fn runtime_version(&mut self, wasm: &[u8]) -> Option> { - // Create some dummy externalities, `Core_version` should not write data anyway. + use sp_core::traits::ReadRuntimeVersionExt; + let mut ext = sp_state_machine::BasicExternalities::default(); - self.extension::() - .expect("No `CallInWasmExt` associated for the current context!") - .call_in_wasm( - wasm, - None, - "Core_version", - &[], - &mut ext, - // If a runtime upgrade introduces new host functions that are not provided by - // the node, we should not fail at instantiation. Otherwise nodes that are - // updated could run this successfully and it could lead to a storage root - // mismatch when importing this block. - sp_core::traits::MissingHostFunctions::Allow, - ) - .ok() + match self + .extension::() + .expect("No `ReadRuntimeVersionExt` associated for the current context!") + .read_runtime_version(wasm, &mut ext) + { + Ok(v) => Some(v), + Err(err) => { + log::debug!( + target: LOG_TARGET, + "cannot read version from the given runtime: {}", + err, + ); + None + }, + } } } @@ -420,7 +503,8 @@ pub trait Misc { pub trait Crypto { /// Returns all `ed25519` public keys for the given key id from the keystore. fn ed25519_public_keys(&mut self, id: KeyTypeId) -> Vec { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::ed25519_public_keys(keystore, id) } @@ -433,7 +517,8 @@ pub trait Crypto { /// Returns the public key. fn ed25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> ed25519::Public { let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::ed25519_generate_new(keystore, id, seed) .expect("`ed25519_generate` failed") @@ -449,21 +534,19 @@ pub trait Crypto { pub_key: &ed25519::Public, msg: &[u8], ) -> Option { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) - .map(|sig| ed25519::Signature::from_slice(sig.as_slice())) .ok() + .flatten() + .map(|sig| ed25519::Signature::from_slice(sig.as_slice())) } /// Verify `ed25519` signature. /// /// Returns `true` when the verification was successful. - fn ed25519_verify( - sig: &ed25519::Signature, - msg: &[u8], - pub_key: &ed25519::Public, - ) -> bool { + fn ed25519_verify(sig: &ed25519::Signature, msg: &[u8], pub_key: &ed25519::Public) -> bool { ed25519::Pair::verify(sig, msg, pub_key) } @@ -481,20 +564,16 @@ pub trait Crypto { msg: &[u8], pub_key: &ed25519::Public, ) -> bool { - self.extension::().map( - |extension| extension.push_ed25519(sig.clone(), pub_key.clone(), msg.to_vec()) - ).unwrap_or_else(|| ed25519_verify(sig, msg, pub_key)) + self.extension::() + .map(|extension| extension.push_ed25519(sig.clone(), pub_key.clone(), msg.to_vec())) + .unwrap_or_else(|| ed25519_verify(sig, msg, pub_key)) } /// Verify `sr25519` signature. /// /// Returns `true` when the verification was successful. #[version(2)] - fn sr25519_verify( - sig: &sr25519::Signature, - msg: &[u8], - pub_key: &sr25519::Public, - ) -> bool { + fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pub_key: &sr25519::Public) -> bool { sr25519::Pair::verify(sig, msg, pub_key) } @@ -512,14 +591,15 @@ pub trait Crypto { msg: &[u8], pub_key: &sr25519::Public, ) -> bool { - self.extension::().map( - |extension| extension.push_sr25519(sig.clone(), pub_key.clone(), msg.to_vec()) - ).unwrap_or_else(|| sr25519_verify(sig, msg, pub_key)) + self.extension::() + .map(|extension| extension.push_sr25519(sig.clone(), pub_key.clone(), msg.to_vec())) + .unwrap_or_else(|| sr25519_verify(sig, msg, pub_key)) } /// Start verification extension. fn start_batch_verify(&mut self) { - let scheduler = self.extension::() + let scheduler = self + .extension::() .expect("No task executor associated with the current context!") .clone(); @@ -534,7 +614,8 @@ pub trait Crypto { /// /// Will panic if no `VerificationExt` is registered (`start_batch_verify` was not called). fn finish_batch_verify(&mut self) -> bool { - let result = self.extension::() + let result = self + .extension::() .expect("`finish_batch_verify` should only be called after `start_batch_verify`") .verify_and_clear(); @@ -546,7 +627,8 @@ pub trait Crypto { /// Returns all `sr25519` public keys for the given key id from the keystore. fn sr25519_public_keys(&mut self, id: KeyTypeId) -> Vec { - let keystore = &*** self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sr25519_public_keys(keystore, id) } @@ -559,7 +641,8 @@ pub trait Crypto { /// Returns the public key. fn sr25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> sr25519::Public { let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sr25519_generate_new(keystore, id, seed) .expect("`sr25519_generate` failed") @@ -575,11 +658,13 @@ pub trait Crypto { pub_key: &sr25519::Public, msg: &[u8], ) -> Option { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) - .map(|sig| sr25519::Signature::from_slice(sig.as_slice())) .ok() + .flatten() + .map(|sig| sr25519::Signature::from_slice(sig.as_slice())) } /// Verify an `sr25519` signature. @@ -592,7 +677,8 @@ pub trait Crypto { /// Returns all `ecdsa` public keys for the given key id from the keystore. fn ecdsa_public_keys(&mut self, id: KeyTypeId) -> Vec { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::ecdsa_public_keys(keystore, id) } @@ -605,10 +691,10 @@ pub trait Crypto { /// Returns the public key. fn ecdsa_generate(&mut self, id: KeyTypeId, seed: Option>) -> ecdsa::Public { let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::ecdsa_generate_new(keystore, id, seed) - .expect("`ecdsa_generate` failed") + SyncCryptoStore::ecdsa_generate_new(keystore, id, seed).expect("`ecdsa_generate` failed") } /// Sign the given `msg` with the `ecdsa` key that corresponds to the given public key and @@ -621,21 +707,27 @@ pub trait Crypto { pub_key: &ecdsa::Public, msg: &[u8], ) -> Option { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) - .map(|sig| ecdsa::Signature::from_slice(sig.as_slice())) .ok() + .flatten() + .map(|sig| ecdsa::Signature::from_slice(sig.as_slice())) } /// Verify `ecdsa` signature. /// /// Returns `true` when the verification was successful. - fn ecdsa_verify( - sig: &ecdsa::Signature, - msg: &[u8], - pub_key: &ecdsa::Public, - ) -> bool { + fn ecdsa_verify(sig: &ecdsa::Signature, msg: &[u8], pub_key: &ecdsa::Public) -> bool { + ecdsa::Pair::verify_deprecated(sig, msg, pub_key) + } + + /// Verify `ecdsa` signature. + /// + /// Returns `true` when the verification was successful. + #[version(2)] + fn ecdsa_verify(sig: &ecdsa::Signature, msg: &[u8], pub_key: &ecdsa::Public) -> bool { ecdsa::Pair::verify(sig, msg, pub_key) } @@ -653,9 +745,9 @@ pub trait Crypto { msg: &[u8], pub_key: &ecdsa::Public, ) -> bool { - self.extension::().map( - |extension| extension.push_ecdsa(sig.clone(), pub_key.clone(), msg.to_vec()) - ).unwrap_or_else(|| ecdsa_verify(sig, msg, pub_key)) + self.extension::() + .map(|extension| extension.push_ecdsa(sig.clone(), pub_key.clone(), msg.to_vec())) + .unwrap_or_else(|| ecdsa_verify(sig, msg, pub_key)) } /// Verify and recover a SECP256k1 ECDSA signature. @@ -669,32 +761,83 @@ pub trait Crypto { sig: &[u8; 65], msg: &[u8; 32], ) -> Result<[u8; 64], EcdsaVerifyError> { - let rs = secp256k1::Signature::parse_slice(&sig[0..64]) + let rs = libsecp256k1::Signature::parse_overflowing_slice(&sig[0..64]) .map_err(|_| EcdsaVerifyError::BadRS)?; - let v = secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) - .map_err(|_| EcdsaVerifyError::BadV)?; - let pubkey = secp256k1::recover(&secp256k1::Message::parse(msg), &rs, &v) + let v = libsecp256k1::RecoveryId::parse( + if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8 + ) + .map_err(|_| EcdsaVerifyError::BadV)?; + let pubkey = libsecp256k1::recover(&libsecp256k1::Message::parse(msg), &rs, &v) .map_err(|_| EcdsaVerifyError::BadSignature)?; let mut res = [0u8; 64]; res.copy_from_slice(&pubkey.serialize()[1..65]); Ok(res) } + /// Verify and recover a SECP256k1 ECDSA signature. + /// + /// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`. + /// - `msg` is the blake2-256 hash of the message. + /// + /// Returns `Err` if the signature is bad, otherwise the 64-byte pubkey + /// (doesn't include the 0x04 prefix). + #[version(2)] + fn secp256k1_ecdsa_recover( + sig: &[u8; 65], + msg: &[u8; 32], + ) -> Result<[u8; 64], EcdsaVerifyError> { + let rs = libsecp256k1::Signature::parse_standard_slice(&sig[0..64]) + .map_err(|_| EcdsaVerifyError::BadRS)?; + let v = libsecp256k1::RecoveryId::parse( + if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8 + ) + .map_err(|_| EcdsaVerifyError::BadV)?; + let pubkey = libsecp256k1::recover(&libsecp256k1::Message::parse(msg), &rs, &v) + .map_err(|_| EcdsaVerifyError::BadSignature)?; + let mut res = [0u8; 64]; + res.copy_from_slice(&pubkey.serialize()[1..65]); + Ok(res) + } + + /// Verify and recover a SECP256k1 ECDSA signature. + /// + /// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`. + /// - `msg` is the blake2-256 hash of the message. + /// + /// Returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey. + fn secp256k1_ecdsa_recover_compressed( + sig: &[u8; 65], + msg: &[u8; 32], + ) -> Result<[u8; 33], EcdsaVerifyError> { + let rs = libsecp256k1::Signature::parse_overflowing_slice(&sig[0..64]) + .map_err(|_| EcdsaVerifyError::BadRS)?; + let v = libsecp256k1::RecoveryId::parse( + if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8 + ) + .map_err(|_| EcdsaVerifyError::BadV)?; + let pubkey = libsecp256k1::recover(&libsecp256k1::Message::parse(msg), &rs, &v) + .map_err(|_| EcdsaVerifyError::BadSignature)?; + Ok(pubkey.serialize_compressed()) + } + /// Verify and recover a SECP256k1 ECDSA signature. /// /// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`. /// - `msg` is the blake2-256 hash of the message. /// /// Returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey. + #[version(2)] fn secp256k1_ecdsa_recover_compressed( sig: &[u8; 65], msg: &[u8; 32], ) -> Result<[u8; 33], EcdsaVerifyError> { - let rs = secp256k1::Signature::parse_slice(&sig[0..64]) + let rs = libsecp256k1::Signature::parse_standard_slice(&sig[0..64]) .map_err(|_| EcdsaVerifyError::BadRS)?; - let v = secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) - .map_err(|_| EcdsaVerifyError::BadV)?; - let pubkey = secp256k1::recover(&secp256k1::Message::parse(msg), &rs, &v) + let v = libsecp256k1::RecoveryId::parse( + if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8 + ) + .map_err(|_| EcdsaVerifyError::BadV)?; + let pubkey = libsecp256k1::recover(&libsecp256k1::Message::parse(msg), &rs, &v) .map_err(|_| EcdsaVerifyError::BadSignature)?; Ok(pubkey.serialize_compressed()) } @@ -708,6 +851,11 @@ pub trait Hashing { sp_core::hashing::keccak_256(data) } + /// Conduct a 512-bit Keccak hash. + fn keccak_512(data: &[u8]) -> [u8; 64] { + sp_core::hashing::keccak_512(data) + } + /// Conduct a 256-bit Sha2 hash. fn sha2_256(data: &[u8]) -> [u8; 32] { sp_core::hashing::sha2_256(data) @@ -739,6 +887,20 @@ pub trait Hashing { } } +/// Interface that provides transaction indexing API. +#[runtime_interface] +pub trait TransactionIndex { + /// Add transaction index. Returns indexed content hash. + fn index(&mut self, extrinsic: u32, size: u32, context_hash: [u8; 32]) { + self.storage_index_transaction(extrinsic, &context_hash, size); + } + + /// Conduct a 512-bit Keccak hash. + fn renew(&mut self, extrinsic: u32, context_hash: [u8; 32]) { + self.storage_renew_transaction_index(extrinsic, &context_hash); + } +} + /// Interface that provides functions to access the Offchain DB. #[runtime_interface] pub trait OffchainIndex { @@ -769,7 +931,7 @@ pub trait Offchain { /// Even if this function returns `true`, it does not mean that any keys are configured /// and that the validator is registered in the chain. fn is_validator(&mut self) -> bool { - self.extension::() + self.extension::() .expect("is_validator can be called only in the offchain worker context") .is_validator() } @@ -779,28 +941,30 @@ pub trait Offchain { /// The transaction will end up in the pool. fn submit_transaction(&mut self, data: Vec) -> Result<(), ()> { self.extension::() - .expect("submit_transaction can be called only in the offchain call context with - TransactionPool capabilities enabled") + .expect( + "submit_transaction can be called only in the offchain call context with + TransactionPool capabilities enabled", + ) .submit_transaction(data) } /// Returns information about the local node's network state. fn network_state(&mut self) -> Result { - self.extension::() + self.extension::() .expect("network_state can be called only in the offchain worker context") .network_state() } /// Returns current UNIX timestamp (in millis) fn timestamp(&mut self) -> Timestamp { - self.extension::() + self.extension::() .expect("timestamp can be called only in the offchain worker context") .timestamp() } /// Pause the execution until `deadline` is reached. fn sleep_until(&mut self, deadline: Timestamp) { - self.extension::() + self.extension::() .expect("sleep_until can be called only in the offchain worker context") .sleep_until(deadline) } @@ -810,7 +974,7 @@ pub trait Offchain { /// This is a truly random, non-deterministic seed generated by host environment. /// Obviously fine in the off-chain worker context. fn random_seed(&mut self) -> [u8; 32] { - self.extension::() + self.extension::() .expect("random_seed can be called only in the offchain worker context") .random_seed() } @@ -820,8 +984,11 @@ pub trait Offchain { /// Note this storage is not part of the consensus, it's only accessible by /// offchain worker tasks running on the same machine. It IS persisted between runs. fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - self.extension::() - .expect("local_storage_set can be called only in the offchain worker context") + self.extension::() + .expect( + "local_storage_set can be called only in the offchain call context with + OffchainDb extension", + ) .local_storage_set(kind, key, value) } @@ -830,8 +997,11 @@ pub trait Offchain { /// Note this storage is not part of the consensus, it's only accessible by /// offchain worker tasks running on the same machine. It IS persisted between runs. fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { - self.extension::() - .expect("local_storage_clear can be called only in the offchain worker context") + self.extension::() + .expect( + "local_storage_clear can be called only in the offchain call context with + OffchainDb extension", + ) .local_storage_clear(kind, key) } @@ -851,9 +1021,12 @@ pub trait Offchain { old_value: Option>, new_value: &[u8], ) -> bool { - self.extension::() - .expect("local_storage_compare_and_set can be called only in the offchain worker context") - .local_storage_compare_and_set(kind, key, old_value.as_ref().map(|v| v.deref()), new_value) + self.extension::() + .expect( + "local_storage_compare_and_set can be called only in the offchain call context + with OffchainDb extension", + ) + .local_storage_compare_and_set(kind, key, old_value.as_deref(), new_value) } /// Gets a value from the local storage. @@ -862,22 +1035,25 @@ pub trait Offchain { /// Note this storage is not part of the consensus, it's only accessible by /// offchain worker tasks running on the same machine. It IS persisted between runs. fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - self.extension::() - .expect("local_storage_get can be called only in the offchain worker context") + self.extension::() + .expect( + "local_storage_get can be called only in the offchain call context with + OffchainDb extension", + ) .local_storage_get(kind, key) } /// Initiates a http request given HTTP verb and the URL. /// - /// Meta is a future-reserved field containing additional, parity-scale-codec encoded parameters. - /// Returns the id of newly started request. + /// Meta is a future-reserved field containing additional, parity-scale-codec encoded + /// parameters. Returns the id of newly started request. fn http_request_start( &mut self, method: &str, uri: &str, meta: &[u8], ) -> Result { - self.extension::() + self.extension::() .expect("http_request_start can be called only in the offchain worker context") .http_request_start(method, uri, meta) } @@ -889,7 +1065,7 @@ pub trait Offchain { name: &str, value: &str, ) -> Result<(), ()> { - self.extension::() + self.extension::() .expect("http_request_add_header can be called only in the offchain worker context") .http_request_add_header(request_id, name, value) } @@ -906,7 +1082,7 @@ pub trait Offchain { chunk: &[u8], deadline: Option, ) -> Result<(), HttpError> { - self.extension::() + self.extension::() .expect("http_request_write_body can be called only in the offchain worker context") .http_request_write_body(request_id, chunk, deadline) } @@ -923,7 +1099,7 @@ pub trait Offchain { ids: &[HttpRequestId], deadline: Option, ) -> Vec { - self.extension::() + self.extension::() .expect("http_response_wait can be called only in the offchain worker context") .http_response_wait(ids, deadline) } @@ -933,7 +1109,7 @@ pub trait Offchain { /// Returns a vector of pairs `(HeaderKey, HeaderValue)`. /// NOTE response headers have to be read before response body. fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { - self.extension::() + self.extension::() .expect("http_response_headers can be called only in the offchain worker context") .http_response_headers(request_id) } @@ -952,7 +1128,7 @@ pub trait Offchain { buffer: &mut [u8], deadline: Option, ) -> Result { - self.extension::() + self.extension::() .expect("http_response_read_body can be called only in the offchain worker context") .http_response_read_body(request_id, buffer, deadline) .map(|r| r as u32) @@ -960,7 +1136,7 @@ pub trait Offchain { /// Initiates an IPFS request fn ipfs_request_start(&mut self, request: IpfsRequest) -> Result { - self.extension::() + self.extension::() .expect("ipfs_request_start can be called only in the offchain worker context") .ipfs_request_start(request) } @@ -971,14 +1147,14 @@ pub trait Offchain { ids: &[IpfsRequestId], deadline: Option, ) -> Vec { - self.extension::() + self.extension::() .expect("ipfs_response_wait can be called only in the offchain worker context") .ipfs_response_wait(ids, deadline) } /// Set the authorized nodes and authorized_only flag. fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { - self.extension::() + self.extension::() .expect("set_authorized_nodes can be called only in the offchain worker context") .set_authorized_nodes(nodes, authorized_only) } @@ -986,7 +1162,7 @@ pub trait Offchain { /// Wasm only interface that provides functions for calling into the allocator. #[runtime_interface(wasm_only)] -trait Allocator { +pub trait Allocator { /// Malloc the given number of bytes and return the pointer to the allocated memory location. fn malloc(&mut self, size: u32) -> Pointer { self.allocate_memory(size).expect("Failed to allocate memory") @@ -1009,14 +1185,14 @@ pub trait Logging { /// Instead of using directly, prefer setting up `RuntimeLogger` and using `log` macros. fn log(level: LogLevel, target: &str, message: &[u8]) { if let Ok(message) = std::str::from_utf8(message) { - log::log!( - target: target, - log::Level::from(level), - "{}", - message, - ) + log::log!(target: target, log::Level::from(level), "{}", message) } } + + /// Returns the max log level used by the host. + fn max_level() -> LogLevelFilter { + log::max_level().into() + } } #[derive(Encode, Decode)] @@ -1029,7 +1205,6 @@ impl PassBy for Crossing { } impl Crossing { - /// Convert into the inner type pub fn into_inner(self) -> T { self.0 @@ -1038,12 +1213,12 @@ impl Crossing { // useful for testing impl core::default::Default for Crossing - where T: core::default::Default + Encode + Decode +where + T: core::default::Default + Encode + Decode, { fn default() -> Self { Self(Default::default()) } - } /// Interface to provide tracing facilities for wasm. Modelled after tokios `tracing`-crate @@ -1051,18 +1226,17 @@ impl core::default::Default for Crossing #[runtime_interface(wasm_only, no_tracing)] pub trait WasmTracing { /// Whether the span described in `WasmMetadata` should be traced wasm-side - /// On the host converts into a static Metadata and checks against the global `tracing` dispatcher. + /// On the host converts into a static Metadata and checks against the global `tracing` + /// dispatcher. /// /// When returning false the calling code should skip any tracing-related execution. In general /// within the same block execution this is not expected to change and it doesn't have to be /// checked more than once per metadata. This exists for optimisation purposes but is still not - /// cheap as it will jump the wasm-native-barrier every time it is called. So an implementation might - /// chose to cache the result for the execution of the entire block. + /// cheap as it will jump the wasm-native-barrier every time it is called. So an implementation + /// might chose to cache the result for the execution of the entire block. fn enabled(&mut self, metadata: Crossing) -> bool { let metadata: &tracing_core::metadata::Metadata<'static> = (&metadata.into_inner()).into(); - tracing::dispatcher::get_default(|d| { - d.enabled(metadata) - }) + tracing::dispatcher::get_default(|d| d.enabled(metadata)) } /// Open a new span with the given attributes. Return the u64 Id of the span. @@ -1081,9 +1255,7 @@ pub trait WasmTracing { d.enter(&final_id); final_id.into_u64() }), - _ => { - 0 - } + _ => 0, } } @@ -1102,18 +1274,17 @@ pub trait WasmTracing { } } -#[cfg(all(not(feature="std"), feature="with-tracing"))] +#[cfg(all(not(feature = "std"), feature = "with-tracing"))] mod tracing_setup { + use super::{wasm_tracing, Crossing}; use core::sync::atomic::{AtomicBool, Ordering}; use tracing_core::{ - dispatcher::{Dispatch, set_global_default}, - span::{Id, Record, Attributes}, - Metadata, Event, + dispatcher::{set_global_default, Dispatch}, + span::{Attributes, Id, Record}, + Event, Metadata, }; - use super::{wasm_tracing, Crossing}; - - const TRACING_SET : AtomicBool = AtomicBool::new(false); + static TRACING_SET: AtomicBool = AtomicBool::new(false); /// The PassingTracingSubscriber implements `tracing_core::Subscriber` /// and pushes the information across the runtime interface to the host @@ -1132,12 +1303,12 @@ mod tracing_setup { /// Not implemented! We do not support recording values later /// Will panic when used. fn record(&self, span: &Id, values: &Record<'_>) { - unimplemented!{} // this usage is not supported + unimplemented! {} // this usage is not supported } /// Not implemented! We do not support recording values later /// Will panic when used. fn record_follows_from(&self, span: &Id, follows: &Id) { - unimplemented!{ } // this usage is not supported + unimplemented! {} // this usage is not supported } fn event(&self, event: &Event<'_>) { wasm_tracing::event(Crossing(event.into())) @@ -1147,7 +1318,6 @@ mod tracing_setup { } } - /// Initialize tracing of sp_tracing on wasm with `with-tracing` enabled. /// Can be called multiple times from within the same process and will only /// set the global bridging subscriber once. @@ -1160,11 +1330,11 @@ mod tracing_setup { } } -#[cfg(not(all(not(feature="std"), feature="with-tracing")))] +#[cfg(not(all(not(feature = "std"), feature = "with-tracing")))] mod tracing_setup { /// Initialize tracing of sp_tracing not necessary – noop. To enable build /// without std and with the `with-tracing`-feature. - pub fn init_tracing() { } + pub fn init_tracing() {} } pub use tracing_setup::init_tracing; @@ -1195,14 +1365,16 @@ pub trait Sandbox { return_val_len: u32, state_ptr: Pointer, ) -> u32 { - self.sandbox().invoke( - instance_idx, - &function, - &args, - return_val_ptr, - return_val_len, - state_ptr.into(), - ).expect("Failed to invoke function with sandbox") + self.sandbox() + .invoke( + instance_idx, + &function, + &args, + return_val_ptr, + return_val_len, + state_ptr.into(), + ) + .expect("Failed to invoke function with sandbox") } /// Create a new memory instance with the given `initial` and `maximum` size. @@ -1240,20 +1412,30 @@ pub trait Sandbox { /// Teardown the memory instance with the given `memory_idx`. fn memory_teardown(&mut self, memory_idx: u32) { - self.sandbox().memory_teardown(memory_idx).expect("Failed to teardown memory with sandbox") + self.sandbox() + .memory_teardown(memory_idx) + .expect("Failed to teardown memory with sandbox") } /// Teardown the sandbox instance with the given `instance_idx`. fn instance_teardown(&mut self, instance_idx: u32) { - self.sandbox().instance_teardown(instance_idx).expect("Failed to teardown sandbox instance") + self.sandbox() + .instance_teardown(instance_idx) + .expect("Failed to teardown sandbox instance") } /// Get the value from a global with the given `name`. The sandbox is determined by the given /// `instance_idx`. /// /// Returns `Some(_)` when the requested global variable could be found. - fn get_global_val(&mut self, instance_idx: u32, name: &str) -> Option { - self.sandbox().get_global_val(instance_idx, name).expect("Failed to get global from sandbox") + fn get_global_val( + &mut self, + instance_idx: u32, + name: &str, + ) -> Option { + self.sandbox() + .get_global_val(instance_idx, name) + .expect("Failed to get global from sandbox") } } @@ -1266,11 +1448,13 @@ pub trait RuntimeTasks { /// /// This should not be used directly. Use `sp_tasks::spawn` instead. fn spawn(dispatcher_ref: u32, entry: u32, payload: Vec) -> u64 { - sp_externalities::with_externalities(|mut ext|{ - let runtime_spawn = ext.extension::() + sp_externalities::with_externalities(|mut ext| { + let runtime_spawn = ext + .extension::() .expect("Cannot spawn without dynamic runtime dispatcher (RuntimeSpawnExt)"); runtime_spawn.spawn_call(dispatcher_ref, entry, payload) - }).expect("`RuntimeTasks::spawn`: called outside of externalities context") + }) + .expect("`RuntimeTasks::spawn`: called outside of externalities context") } /// Wasm host function for joining a task. @@ -1278,12 +1462,14 @@ pub trait RuntimeTasks { /// This should not be used directly. Use `join` of `sp_tasks::spawn` result instead. fn join(handle: u64) -> Vec { sp_externalities::with_externalities(|mut ext| { - let runtime_spawn = ext.extension::() + let runtime_spawn = ext + .extension::() .expect("Cannot join without dynamic runtime dispatcher (RuntimeSpawnExt)"); runtime_spawn.join(handle) - }).expect("`RuntimeTasks::join`: called outside of externalities context") + }) + .expect("`RuntimeTasks::join`: called outside of externalities context") } - } +} /// Allocator used by Substrate when executing the Wasm runtime. #[cfg(not(feature = "std"))] @@ -1353,15 +1539,14 @@ pub type SubstrateHostFunctions = ( crate::trie::HostFunctions, offchain_index::HostFunctions, runtime_tasks::HostFunctions, + transaction_index::HostFunctions, ); #[cfg(test)] mod tests { use super::*; + use sp_core::{map, storage::Storage, testing::TaskExecutor, traits::TaskExecutorExt}; use sp_state_machine::BasicExternalities; - use sp_core::{ - storage::Storage, map, traits::TaskExecutorExt, testing::TaskExecutor, - }; use std::any::TypeId; #[test] @@ -1417,7 +1602,10 @@ mod tests { }); t.execute_with(|| { - storage::clear_prefix(b":abc"); + assert!(matches!( + storage::clear_prefix(b":abc", None), + KillStorageResult::AllRemoved(2) + )); assert!(storage::get(b":a").is_some()); assert!(storage::get(b":abdd").is_some()); @@ -1458,11 +1646,7 @@ mod tests { } // push invlaid - crypto::sr25519_batch_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + crypto::sr25519_batch_verify(&Default::default(), &Vec::new(), &Default::default()); assert!(!crypto::finish_batch_verify()); crypto::start_batch_verify(); @@ -1482,11 +1666,7 @@ mod tests { ext.execute_with(|| { // invalid ed25519 signature crypto::start_batch_verify(); - crypto::ed25519_batch_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + crypto::ed25519_batch_verify(&Default::default(), &Vec::new(), &Default::default()); assert!(!crypto::finish_batch_verify()); // 2 valid ed25519 signatures @@ -1512,11 +1692,7 @@ mod tests { let signature = pair.sign(msg); crypto::ed25519_batch_verify(&signature, msg, &pair.public()); - crypto::ed25519_batch_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + crypto::ed25519_batch_verify(&Default::default(), &Vec::new(), &Default::default()); assert!(!crypto::finish_batch_verify()); @@ -1548,11 +1724,7 @@ mod tests { let signature = pair.sign(msg); crypto::sr25519_batch_verify(&signature, msg, &pair.public()); - crypto::sr25519_batch_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + crypto::sr25519_batch_verify(&Default::default(), &Vec::new(), &Default::default()); assert!(!crypto::finish_batch_verify()); }); diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index be4db5834458e..a14e98d3d8059 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keyring" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0", path = "../core" } -sp-runtime = { version = "2.0.0", path = "../runtime" } +sp-core = { version = "4.0.0-dev", path = "../core" } +sp-runtime = { version = "4.0.0-dev", path = "../runtime" } lazy_static = "1.4.0" -strum = { version = "0.16.0", features = ["derive"] } +strum = { version = "0.20.0", features = ["derive"] } diff --git a/primitives/keyring/src/ed25519.rs b/primitives/keyring/src/ed25519.rs index 17882027387c5..65341a360579b 100644 --- a/primitives/keyring/src/ed25519.rs +++ b/primitives/keyring/src/ed25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,14 @@ //! Support code for the runtime. A set of test accounts. -use std::{collections::HashMap, ops::Deref}; use lazy_static::lazy_static; -use sp_core::{ed25519::{Pair, Public, Signature}, Pair as PairT, Public as PublicT, H256}; pub use sp_core::ed25519; +use sp_core::{ + ed25519::{Pair, Public, Signature}, + Pair as PairT, Public as PublicT, H256, +}; use sp_runtime::AccountId32; +use std::{collections::HashMap, ops::Deref}; /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] @@ -79,7 +82,7 @@ impl Keyring { } /// Returns an iterator over all test accounts. - pub fn iter() -> impl Iterator { + pub fn iter() -> impl Iterator { ::iter() } @@ -114,13 +117,10 @@ impl From for sp_runtime::MultiSigner { } lazy_static! { - static ref PRIVATE_KEYS: HashMap = { - Keyring::iter().map(|i| (i, i.pair())).collect() - }; - - static ref PUBLIC_KEYS: HashMap = { - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect() - }; + static ref PRIVATE_KEYS: HashMap = + Keyring::iter().map(|i| (i, i.pair())).collect(); + static ref PUBLIC_KEYS: HashMap = + PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect(); } impl From for Public { @@ -185,26 +185,20 @@ mod tests { #[test] fn should_work() { - assert!( - Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Bob!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Bob.public(), - ) - ); + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Bob.public(), + )); } } diff --git a/primitives/keyring/src/lib.rs b/primitives/keyring/src/lib.rs index 55ed14d294f1d..d7fb7c4fd2f2b 100644 --- a/primitives/keyring/src/lib.rs +++ b/primitives/keyring/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/keyring/src/sr25519.rs b/primitives/keyring/src/sr25519.rs index 80397f0de9fc1..6a7aa3635a43a 100644 --- a/primitives/keyring/src/sr25519.rs +++ b/primitives/keyring/src/sr25519.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,14 @@ //! Support code for the runtime. A set of test accounts. -use std::collections::HashMap; -use std::ops::Deref; use lazy_static::lazy_static; -use sp_core::{sr25519::{Pair, Public, Signature}, Pair as PairT, Public as PublicT, H256}; pub use sp_core::sr25519; +use sp_core::{ + sr25519::{Pair, Public, Signature}, + Pair as PairT, Public as PublicT, H256, +}; use sp_runtime::AccountId32; +use std::{collections::HashMap, ops::Deref}; /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] @@ -80,7 +82,7 @@ impl Keyring { } /// Returns an iterator over all test accounts. - pub fn iter() -> impl Iterator { + pub fn iter() -> impl Iterator { ::iter() } @@ -135,19 +137,16 @@ impl std::str::FromStr for Keyring { "ferdie" => Ok(Keyring::Ferdie), "one" => Ok(Keyring::One), "two" => Ok(Keyring::Two), - _ => Err(ParseKeyringError) + _ => Err(ParseKeyringError), } } } lazy_static! { - static ref PRIVATE_KEYS: HashMap = { - Keyring::iter().map(|i| (i, i.pair())).collect() - }; - - static ref PUBLIC_KEYS: HashMap = { - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect() - }; + static ref PRIVATE_KEYS: HashMap = + Keyring::iter().map(|i| (i, i.pair())).collect(); + static ref PUBLIC_KEYS: HashMap = + PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect(); } impl From for AccountId32 { @@ -212,26 +211,20 @@ mod tests { #[test] fn should_work() { - assert!( - Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Bob!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Bob.public(), - ) - ); + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Bob.public(), + )); } } diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index d53d1ebd533c8..35c66ef93f7aa 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keystore" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,17 +13,25 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.30" +async-trait = "0.1.50" derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } futures = { version = "0.3.1" } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } merlin = { version = "2.0", default-features = false } -parking_lot = { version = "0.10.0", default-features = false } - -sp-core = { version = "2.0.0", path = "../core" } -sp-externalities = { version = "0.8.0", path = "../externalities", default-features = false } +parking_lot = { version = "0.11.1", default-features = false } +serde = { version = "1.0", optional = true} +sp-core = { version = "4.0.0-dev", path = "../core" } +sp-externalities = { version = "0.10.0-dev", path = "../externalities", default-features = false } [dev-dependencies] rand = "0.7.2" rand_chacha = "0.2.2" + + +[features] +default = ["std"] +std = [ + "serde", + "schnorrkel/std", +] diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index 068c174aecdfa..c45e8a6f5d2be 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,33 +19,30 @@ pub mod testing; pub mod vrf; -use std::sync::Arc; +use crate::vrf::{VRFSignature, VRFTranscriptData}; use async_trait::async_trait; use futures::{executor::block_on, future::join_all}; use sp_core::{ - crypto::{KeyTypeId, CryptoTypePublicPair}, - ed25519, sr25519, ecdsa, + crypto::{CryptoTypePublicPair, KeyTypeId}, + ecdsa, ed25519, sr25519, }; -use crate::vrf::{VRFTranscriptData, VRFSignature}; +use std::sync::Arc; /// CryptoStore error #[derive(Debug, derive_more::Display)] pub enum Error { /// Public key type is not supported - #[display(fmt="Key not supported: {:?}", _0)] + #[display(fmt = "Key not supported: {:?}", _0)] KeyNotSupported(KeyTypeId), - /// Pair not found for public key and KeyTypeId - #[display(fmt="Pair was not found: {}", _0)] - PairNotFound(String), /// Validation error - #[display(fmt="Validation error: {}", _0)] + #[display(fmt = "Validation error: {}", _0)] ValidationError(String), /// Keystore unavailable - #[display(fmt="Keystore unavailable")] + #[display(fmt = "Keystore unavailable")] Unavailable, /// Programming errors - #[display(fmt="An unknown keystore error occurred: {}", _0)] - Other(String) + #[display(fmt = "An unknown keystore error occurred: {}", _0)] + Other(String), } /// Something that generates, stores and provides access to keys. @@ -94,12 +91,7 @@ pub trait CryptoStore: Send + Sync { /// Places it into the file system store. /// /// `Err` if there's some sort of weird filesystem error, but should generally be `Ok`. - async fn insert_unknown( - &self, - _key_type: KeyTypeId, - _suri: &str, - _public: &[u8] - ) -> Result<(), ()>; + async fn insert_unknown(&self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()>; /// Find intersection between provided keys and supported keys /// @@ -108,7 +100,7 @@ pub trait CryptoStore: Send + Sync { async fn supported_keys( &self, id: KeyTypeId, - keys: Vec + keys: Vec, ) -> Result, Error>; /// List all supported keys /// @@ -125,37 +117,39 @@ pub trait CryptoStore: Send + Sync { /// Signs a message with the private key that matches /// the public key passed. /// - /// Returns the SCALE encoded signature if key is found & supported, - /// an error otherwise. + /// Returns the SCALE encoded signature if key is found and supported, `None` if the key doesn't + /// exist or an error when something failed. async fn sign_with( &self, id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, Error>; + ) -> Result>, Error>; /// Sign with any key /// /// Given a list of public keys, find the first supported key and /// sign the provided message with that key. /// - /// Returns a tuple of the used key and the SCALE encoded signature. + /// Returns a tuple of the used key and the SCALE encoded signature or `None` if no key could + /// be found to sign. async fn sign_with_any( &self, id: KeyTypeId, keys: Vec, - msg: &[u8] - ) -> Result<(CryptoTypePublicPair, Vec), Error> { + msg: &[u8], + ) -> Result)>, Error> { if keys.len() == 1 { - return self.sign_with(id, &keys[0], msg).await.map(|s| (keys[0].clone(), s)); + return Ok(self.sign_with(id, &keys[0], msg).await?.map(|s| (keys[0].clone(), s))) } else { for k in self.supported_keys(id, keys).await? { - if let Ok(sign) = self.sign_with(id, &k, msg).await { - return Ok((k, sign)); + if let Ok(Some(sign)) = self.sign_with(id, &k, msg).await { + return Ok(Some((k, sign))) } } } - Err(Error::KeyNotSupported(id)) + + Ok(None) } /// Sign with all keys @@ -164,15 +158,14 @@ pub trait CryptoStore: Send + Sync { /// each key given that the key is supported. /// /// Returns a list of `Result`s each representing the SCALE encoded - /// signature of each key or a Error for non-supported keys. + /// signature of each key, `None` if the key doesn't exist or a error when something failed. async fn sign_with_all( &self, id: KeyTypeId, keys: Vec, msg: &[u8], - ) -> Result, Error>>, ()> { - let futs = keys.iter() - .map(|k| self.sign_with(id, k, msg)); + ) -> Result>, Error>>, ()> { + let futs = keys.iter().map(|k| self.sign_with(id, k, msg)); Ok(join_all(futs).await) } @@ -187,16 +180,33 @@ pub trait CryptoStore: Send + Sync { /// Namely, VRFOutput and VRFProof which are returned /// inside the `VRFSignature` container struct. /// - /// This function will return an error in the cases where - /// the public key and key type provided do not match a private - /// key in the keystore. Or, in the context of remote signing - /// an error could be a network one. + /// This function will return `None` if the given `key_type` and `public` combination + /// doesn't exist in the keystore or an `Err` when something failed. async fn sr25519_vrf_sign( &self, key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> Result; + ) -> Result, Error>; + + /// Generate an ECDSA signature for a given pre-hashed message. + /// + /// Receives [`KeyTypeId`] and an [`ecdsa::Public`] key to be able to map + /// them to a private key that exists in the keystore. This private key is, + /// in turn, used for signing the provided pre-hashed message. + /// + /// The `msg` argument provided should be a hashed message for which an + /// ECDSA signature should be generated. + /// + /// Returns an [`ecdsa::Signature`] or `None` in case the given `id` and + /// `public` combination doesn't exist in the keystore. An `Err` will be + /// returned if generating the signature itself failed. + async fn ecdsa_sign_prehashed( + &self, + id: KeyTypeId, + public: &ecdsa::Public, + msg: &[u8; 32], + ) -> Result, Error>; } /// Sync version of the CryptoStore @@ -244,11 +254,8 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { /// If the given seed is `Some(_)`, the key pair will only be stored in memory. /// /// Returns the public key of the generated key pair. - fn ecdsa_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result; + fn ecdsa_generate_new(&self, id: KeyTypeId, seed: Option<&str>) + -> Result; /// Insert a new key. This doesn't require any known of the crypto; but a public key must be /// manually provided. @@ -265,7 +272,7 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { fn supported_keys( &self, id: KeyTypeId, - keys: Vec + keys: Vec, ) -> Result, Error>; /// List all supported keys @@ -285,37 +292,41 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { /// Signs a message with the private key that matches /// the public key passed. /// - /// Returns the SCALE encoded signature if key is found & supported, - /// an error otherwise. + /// Returns the SCALE encoded signature if key is found and supported, `None` if the key doesn't + /// exist or an error when something failed. fn sign_with( &self, id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, Error>; + ) -> Result>, Error>; /// Sign with any key /// /// Given a list of public keys, find the first supported key and /// sign the provided message with that key. /// - /// Returns a tuple of the used key and the SCALE encoded signature. + /// Returns a tuple of the used key and the SCALE encoded signature or `None` if no key could + /// be found to sign. fn sign_with_any( &self, id: KeyTypeId, keys: Vec, - msg: &[u8] - ) -> Result<(CryptoTypePublicPair, Vec), Error> { + msg: &[u8], + ) -> Result)>, Error> { if keys.len() == 1 { - return SyncCryptoStore::sign_with(self, id, &keys[0], msg).map(|s| (keys[0].clone(), s)); + return Ok( + SyncCryptoStore::sign_with(self, id, &keys[0], msg)?.map(|s| (keys[0].clone(), s)) + ) } else { for k in SyncCryptoStore::supported_keys(self, id, keys)? { - if let Ok(sign) = SyncCryptoStore::sign_with(self, id, &k, msg) { - return Ok((k, sign)); + if let Ok(Some(sign)) = SyncCryptoStore::sign_with(self, id, &k, msg) { + return Ok(Some((k, sign))) } } } - Err(Error::KeyNotSupported(id)) + + Ok(None) } /// Sign with all keys @@ -324,13 +335,13 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { /// each key given that the key is supported. /// /// Returns a list of `Result`s each representing the SCALE encoded - /// signature of each key or a Error for non-supported keys. + /// signature of each key, `None` if the key doesn't exist or an error when something failed. fn sign_with_all( &self, id: KeyTypeId, keys: Vec, msg: &[u8], - ) -> Result, Error>>, ()>{ + ) -> Result>, Error>>, ()> { Ok(keys.iter().map(|k| SyncCryptoStore::sign_with(self, id, k, msg)).collect()) } @@ -344,16 +355,33 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { /// Namely, VRFOutput and VRFProof which are returned /// inside the `VRFSignature` container struct. /// - /// This function will return an error in the cases where - /// the public key and key type provided do not match a private - /// key in the keystore. Or, in the context of remote signing - /// an error could be a network one. + /// This function will return `None` if the given `key_type` and `public` combination + /// doesn't exist in the keystore or an `Err` when something failed. fn sr25519_vrf_sign( &self, key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> Result; + ) -> Result, Error>; + + /// Generate an ECDSA signature for a given pre-hashed message. + /// + /// Receives [`KeyTypeId`] and an [`ecdsa::Public`] key to be able to map + /// them to a private key that exists in the keystore. This private key is, + /// in turn, used for signing the provided pre-hashed message. + /// + /// The `msg` argument provided should be a hashed message for which an + /// ECDSA signature should be generated. + /// + /// Returns an [`ecdsa::Signature`] or `None` in case the given `id` and + /// `public` combination doesn't exist in the keystore. An `Err` will be + /// returned if generating the signature itself failed. + fn ecdsa_sign_prehashed( + &self, + id: KeyTypeId, + public: &ecdsa::Public, + msg: &[u8; 32], + ) -> Result, Error>; } /// A pointer to a keystore. diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index a5e460951493b..718ba798dc0f3 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,18 +17,21 @@ //! Types that should only be used for testing! -use sp_core::crypto::KeyTypeId; use sp_core::{ - crypto::{Pair, Public, CryptoTypePublicPair}, - ed25519, sr25519, ecdsa, + crypto::{CryptoTypePublicPair, KeyTypeId, Pair, Public}, + ecdsa, ed25519, sr25519, }; + use crate::{ - {CryptoStore, SyncCryptoStorePtr, Error, SyncCryptoStore}, - vrf::{VRFTranscriptData, VRFSignature, make_transcript}, + vrf::{make_transcript, VRFSignature, VRFTranscriptData}, + CryptoStore, Error, SyncCryptoStore, SyncCryptoStorePtr, }; -use std::{collections::{HashMap, HashSet}, sync::Arc}; -use parking_lot::RwLock; use async_trait::async_trait; +use parking_lot::RwLock; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; /// A keystore implementation usable in tests. #[derive(Default)] @@ -44,29 +47,28 @@ impl KeyStore { } fn sr25519_key_pair(&self, id: KeyTypeId, pub_key: &sr25519::Public) -> Option { - self.keys.read().get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid")) - ) + self.keys.read().get(&id).and_then(|inner| { + inner.get(pub_key.as_slice()).map(|s| { + sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid") + }) + }) } fn ed25519_key_pair(&self, id: KeyTypeId, pub_key: &ed25519::Public) -> Option { - self.keys.read().get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid")) - ) + self.keys.read().get(&id).and_then(|inner| { + inner.get(pub_key.as_slice()).map(|s| { + ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid") + }) + }) } fn ecdsa_key_pair(&self, id: KeyTypeId, pub_key: &ecdsa::Public) -> Option { - self.keys.read().get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid")) - ) + self.keys.read().get(&id).and_then(|inner| { + inner + .get(pub_key.as_slice()) + .map(|s| ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid")) + }) } - } #[async_trait] @@ -132,7 +134,7 @@ impl CryptoStore for KeyStore { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, Error> { + ) -> Result>, Error> { SyncCryptoStore::sign_with(self, id, key, msg) } @@ -141,35 +143,48 @@ impl CryptoStore for KeyStore { key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> Result { + ) -> Result, Error> { SyncCryptoStore::sr25519_vrf_sign(self, key_type, public, transcript_data) } + + async fn ecdsa_sign_prehashed( + &self, + id: KeyTypeId, + public: &ecdsa::Public, + msg: &[u8; 32], + ) -> Result, Error> { + SyncCryptoStore::ecdsa_sign_prehashed(self, id, public, msg) + } } impl SyncCryptoStore for KeyStore { fn keys(&self, id: KeyTypeId) -> Result, Error> { - self.keys.read() + self.keys + .read() .get(&id) .map(|map| { - Ok(map.keys() - .fold(Vec::new(), |mut v, k| { - v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k.clone())); - v - })) + Ok(map.keys().fold(Vec::new(), |mut v, k| { + v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k.clone())); + v + })) }) .unwrap_or_else(|| Ok(vec![])) } fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys.read().get(&id) - .map(|keys| + self.keys + .read() + .get(&id) + .map(|keys| { keys.values() - .map(|s| sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid")) + .map(|s| { + sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid") + }) .map(|p| p.public()) .collect() - ) + }) .unwrap_or_default() } @@ -180,27 +195,40 @@ impl SyncCryptoStore for KeyStore { ) -> Result { match seed { Some(seed) => { - let pair = sr25519::Pair::from_string(seed, None) - .map_err(|_| Error::ValidationError("Generates an `sr25519` pair.".to_owned()))?; - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); + let pair = sr25519::Pair::from_string(seed, None).map_err(|_| { + Error::ValidationError("Generates an `sr25519` pair.".to_owned()) + })?; + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, None => { let (pair, phrase, _) = sr25519::Pair::generate_with_phrase(None); - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), phrase); + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), phrase); Ok(pair.public()) - } + }, } } fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys.read().get(&id) - .map(|keys| + self.keys + .read() + .get(&id) + .map(|keys| { keys.values() - .map(|s| ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid")) + .map(|s| { + ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid") + }) .map(|p| p.public()) .collect() - ) + }) .unwrap_or_default() } @@ -211,27 +239,40 @@ impl SyncCryptoStore for KeyStore { ) -> Result { match seed { Some(seed) => { - let pair = ed25519::Pair::from_string(seed, None) - .map_err(|_| Error::ValidationError("Generates an `ed25519` pair.".to_owned()))?; - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); + let pair = ed25519::Pair::from_string(seed, None).map_err(|_| { + Error::ValidationError("Generates an `ed25519` pair.".to_owned()) + })?; + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, None => { let (pair, phrase, _) = ed25519::Pair::generate_with_phrase(None); - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), phrase); + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), phrase); Ok(pair.public()) - } + }, } } fn ecdsa_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys.read().get(&id) - .map(|keys| + self.keys + .read() + .get(&id) + .map(|keys| { keys.values() - .map(|s| ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid")) + .map(|s| { + ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid") + }) .map(|p| p.public()) .collect() - ) + }) .unwrap_or_default() } @@ -244,24 +285,38 @@ impl SyncCryptoStore for KeyStore { Some(seed) => { let pair = ecdsa::Pair::from_string(seed, None) .map_err(|_| Error::ValidationError("Generates an `ecdsa` pair.".to_owned()))?; - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, None => { let (pair, phrase, _) = ecdsa::Pair::generate_with_phrase(None); - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), phrase); + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), phrase); Ok(pair.public()) - } + }, } } fn insert_unknown(&self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { - self.keys.write().entry(id).or_default().insert(public.to_owned(), suri.to_string()); + self.keys + .write() + .entry(id) + .or_default() + .insert(public.to_owned(), suri.to_string()); Ok(()) } fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - public_keys.iter().all(|(k, t)| self.keys.read().get(&t).and_then(|s| s.get(k)).is_some()) + public_keys + .iter() + .all(|(k, t)| self.keys.read().get(&t).and_then(|s| s.get(k)).is_some()) } fn supported_keys( @@ -280,29 +335,29 @@ impl SyncCryptoStore for KeyStore { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, Error> { + ) -> Result>, Error> { use codec::Encode; match key.0 { ed25519::CRYPTO_ID => { - let key_pair: ed25519::Pair = self - .ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())) - .ok_or_else(|| Error::PairNotFound("ed25519".to_owned()))?; - return Ok(key_pair.sign(msg).encode()); - } + let key_pair = + self.ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())); + + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() + }, sr25519::CRYPTO_ID => { - let key_pair: sr25519::Pair = self - .sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())) - .ok_or_else(|| Error::PairNotFound("sr25519".to_owned()))?; - return Ok(key_pair.sign(msg).encode()); - } + let key_pair = + self.sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())); + + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() + }, ecdsa::CRYPTO_ID => { - let key_pair: ecdsa::Pair = self - .ecdsa_key_pair(id, &ecdsa::Public::from_slice(key.1.as_slice())) - .ok_or_else(|| Error::PairNotFound("ecdsa".to_owned()))?; - return Ok(key_pair.sign(msg).encode()); - } - _ => Err(Error::KeyNotSupported(id)) + let key_pair = + self.ecdsa_key_pair(id, &ecdsa::Public::from_slice(key.1.as_slice())); + + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() + }, + _ => Err(Error::KeyNotSupported(id)), } } @@ -311,15 +366,23 @@ impl SyncCryptoStore for KeyStore { key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> Result { + ) -> Result, Error> { let transcript = make_transcript(transcript_data); - let pair = self.sr25519_key_pair(key_type, public) - .ok_or_else(|| Error::PairNotFound("Not found".to_owned()))?; + let pair = + if let Some(k) = self.sr25519_key_pair(key_type, public) { k } else { return Ok(None) }; + let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); - Ok(VRFSignature { - output: inout.to_output(), - proof, - }) + Ok(Some(VRFSignature { output: inout.to_output(), proof })) + } + + fn ecdsa_sign_prehashed( + &self, + id: KeyTypeId, + public: &ecdsa::Public, + msg: &[u8; 32], + ) -> Result, Error> { + let pair = self.ecdsa_key_pair(id, public); + pair.map(|k| k.sign_prehashed(msg)).map(Ok).transpose() } } @@ -338,15 +401,18 @@ impl Into> for KeyStore { #[cfg(test)] mod tests { use super::*; - use sp_core::{sr25519, testing::{ED25519, SR25519}}; - use crate::{SyncCryptoStore, vrf::VRFTranscriptValue}; + use crate::{vrf::VRFTranscriptValue, SyncCryptoStore}; + use sp_core::{ + sr25519, + testing::{ECDSA, ED25519, SR25519}, + }; #[test] fn store_key_and_extract() { let store = KeyStore::new(); - let public = SyncCryptoStore::ed25519_generate_new(&store, ED25519, None) - .expect("Generates key"); + let public = + SyncCryptoStore::ed25519_generate_new(&store, ED25519, None).expect("Generates key"); let public_keys = SyncCryptoStore::keys(&store, ED25519).unwrap(); @@ -360,12 +426,8 @@ mod tests { let secret_uri = "//Alice"; let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); - SyncCryptoStore::insert_unknown( - &store, - SR25519, - secret_uri, - key_pair.public().as_ref(), - ).expect("Inserts unknown key"); + SyncCryptoStore::insert_unknown(&store, SR25519, secret_uri, key_pair.public().as_ref()) + .expect("Inserts unknown key"); let public_keys = SyncCryptoStore::keys(&store, SR25519).unwrap(); @@ -385,7 +447,7 @@ mod tests { ("one", VRFTranscriptValue::U64(1)), ("two", VRFTranscriptValue::U64(2)), ("three", VRFTranscriptValue::Bytes("test".as_bytes().to_vec())), - ] + ], }; let result = SyncCryptoStore::sr25519_vrf_sign( @@ -394,22 +456,38 @@ mod tests { &key_pair.public(), transcript_data.clone(), ); - assert!(result.is_err()); + assert!(result.unwrap().is_none()); - SyncCryptoStore::insert_unknown( - &store, - SR25519, - secret_uri, - key_pair.public().as_ref(), - ).expect("Inserts unknown key"); + SyncCryptoStore::insert_unknown(&store, SR25519, secret_uri, key_pair.public().as_ref()) + .expect("Inserts unknown key"); - let result = SyncCryptoStore::sr25519_vrf_sign( - &store, - SR25519, - &key_pair.public(), - transcript_data, - ); + let result = + SyncCryptoStore::sr25519_vrf_sign(&store, SR25519, &key_pair.public(), transcript_data); + + assert!(result.unwrap().is_some()); + } + + #[test] + fn ecdsa_sign_prehashed_works() { + let store = KeyStore::new(); + + let suri = "//Alice"; + let pair = ecdsa::Pair::from_string(suri, None).unwrap(); + + let msg = sp_core::keccak_256(b"this should be a hashed message"); + + // no key in key store + let res = + SyncCryptoStore::ecdsa_sign_prehashed(&store, ECDSA, &pair.public(), &msg).unwrap(); + assert!(res.is_none()); + + // insert key, sign again + let res = + SyncCryptoStore::insert_unknown(&store, ECDSA, suri, pair.public().as_ref()).unwrap(); + assert_eq!((), res); - assert!(result.is_ok()); + let res = + SyncCryptoStore::ecdsa_sign_prehashed(&store, ECDSA, &pair.public(), &msg).unwrap(); + assert!(res.is_some()); } } diff --git a/primitives/keystore/src/vrf.rs b/primitives/keystore/src/vrf.rs index 750ca0eac6be7..383abb77e17c7 100644 --- a/primitives/keystore/src/vrf.rs +++ b/primitives/keystore/src/vrf.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,9 +20,11 @@ use codec::Encode; use merlin::Transcript; use schnorrkel::vrf::{VRFOutput, VRFProof}; + /// An enum whose variants represent possible /// accepted values to construct the VRF transcript #[derive(Clone, Encode)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum VRFTranscriptValue { /// Value is an array of bytes Bytes(Vec), @@ -57,22 +59,17 @@ pub fn make_transcript(data: VRFTranscriptData) -> Transcript { }, VRFTranscriptValue::U64(val) => { transcript.append_u64(label.as_bytes(), val); - } + }, } } transcript } - #[cfg(test)] mod tests { use super::*; - use crate::vrf::VRFTranscriptValue; use rand::RngCore; - use rand_chacha::{ - rand_core::SeedableRng, - ChaChaRng, - }; + use rand_chacha::{rand_core::SeedableRng, ChaChaRng}; #[test] fn transcript_creation_matches() { @@ -89,9 +86,7 @@ mod tests { }); let test = |t: Transcript| -> [u8; 16] { let mut b = [0u8; 16]; - t.build_rng() - .finalize(&mut ChaChaRng::from_seed([0u8;32])) - .fill_bytes(&mut b); + t.build_rng().finalize(&mut ChaChaRng::from_seed([0u8; 32])).fill_bytes(&mut b); b }; debug_assert!(test(orig_transcript) == test(new_transcript)); diff --git a/primitives/maybe-compressed-blob/Cargo.toml b/primitives/maybe-compressed-blob/Cargo.toml new file mode 100644 index 0000000000000..8d47c89ea8ebe --- /dev/null +++ b/primitives/maybe-compressed-blob/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "sp-maybe-compressed-blob" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Handling of blobs, usually Wasm code, which may be compresed" +documentation = "https://docs.rs/sp-maybe-compressed-blob" +readme = "README.md" + +[dependencies] +zstd = { version = "0.6.0", default-features = false } diff --git a/primitives/maybe-compressed-blob/README.md b/primitives/maybe-compressed-blob/README.md new file mode 100644 index 0000000000000..b5bb869c30e4f --- /dev/null +++ b/primitives/maybe-compressed-blob/README.md @@ -0,0 +1,3 @@ +Handling of blobs, typicaly validation code, which may be compressed. + +License: Apache-2.0 diff --git a/primitives/maybe-compressed-blob/src/lib.rs b/primitives/maybe-compressed-blob/src/lib.rs new file mode 100644 index 0000000000000..e8a7e42b4eace --- /dev/null +++ b/primitives/maybe-compressed-blob/src/lib.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Handling of blobs that may be compressed, based on an 8-byte magic identifier +//! at the head. + +use std::{borrow::Cow, io::Read}; + +// An arbitrary prefix, that indicates a blob beginning with should be decompressed with +// Zstd compression. +// +// This differs from the WASM magic bytes, so real WASM blobs will not have this prefix. +const ZSTD_PREFIX: [u8; 8] = [82, 188, 83, 118, 70, 219, 142, 5]; + +/// A recommendation for the bomb limit for code blobs. +/// +/// This may be adjusted upwards in the future, but is set much higher than the +/// expected maximum code size. When adjusting upwards, nodes should be updated +/// before performing a runtime upgrade to a blob with larger compressed size. +pub const CODE_BLOB_BOMB_LIMIT: usize = 50 * 1024 * 1024; + +/// A possible bomb was encountered. +#[derive(Debug, Clone, PartialEq)] +pub enum Error { + /// Decoded size was too large, and the code payload may be a bomb. + PossibleBomb, + /// The compressed value had an invalid format. + Invalid, +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match *self { + Error::PossibleBomb => write!(f, "Possible compression bomb encountered"), + Error::Invalid => write!(f, "Blob had invalid format"), + } + } +} + +impl std::error::Error for Error {} + +fn read_from_decoder( + decoder: impl Read, + blob_len: usize, + bomb_limit: usize, +) -> Result, Error> { + let mut decoder = decoder.take((bomb_limit + 1) as u64); + + let mut buf = Vec::with_capacity(blob_len); + decoder.read_to_end(&mut buf).map_err(|_| Error::Invalid)?; + + if buf.len() <= bomb_limit { + Ok(buf) + } else { + Err(Error::PossibleBomb) + } +} + +fn decompress_zstd(blob: &[u8], bomb_limit: usize) -> Result, Error> { + let decoder = zstd::Decoder::new(blob).map_err(|_| Error::Invalid)?; + + read_from_decoder(decoder, blob.len(), bomb_limit) +} + +/// Decode a blob, if it indicates that it is compressed. Provide a `bomb_limit`, which +/// is the limit of bytes which should be decompressed from the blob. +pub fn decompress(blob: &[u8], bomb_limit: usize) -> Result, Error> { + if blob.starts_with(&ZSTD_PREFIX) { + decompress_zstd(&blob[ZSTD_PREFIX.len()..], bomb_limit).map(Into::into) + } else { + Ok(blob.into()) + } +} + +/// Encode a blob as compressed. If the blob's size is over the bomb limit, +/// this will not compress the blob, as the decoder will not be able to be +/// able to differentiate it from a compression bomb. +pub fn compress(blob: &[u8], bomb_limit: usize) -> Option> { + use std::io::Write; + + if blob.len() > bomb_limit { + return None + } + + let mut buf = ZSTD_PREFIX.to_vec(); + + { + let mut v = zstd::Encoder::new(&mut buf, 3).ok()?.auto_finish(); + v.write_all(blob).ok()?; + } + + Some(buf) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Write; + + const BOMB_LIMIT: usize = 10; + + #[test] + fn refuse_to_encode_over_limit() { + let mut v = vec![0; BOMB_LIMIT + 1]; + assert!(compress(&v, BOMB_LIMIT).is_none()); + + let _ = v.pop(); + assert!(compress(&v, BOMB_LIMIT).is_some()); + } + + #[test] + fn compress_and_decompress() { + let v = vec![0; BOMB_LIMIT]; + + let compressed = compress(&v, BOMB_LIMIT).unwrap(); + + assert!(compressed.starts_with(&ZSTD_PREFIX)); + assert_eq!(&decompress(&compressed, BOMB_LIMIT).unwrap()[..], &v[..]) + } + + #[test] + fn decompresses_only_when_magic() { + let v = vec![0; BOMB_LIMIT + 1]; + + assert_eq!(&decompress(&v, BOMB_LIMIT).unwrap()[..], &v[..]); + } + + #[test] + fn possible_bomb_fails() { + let encoded_bigger_than_bomb = vec![0; BOMB_LIMIT + 1]; + let mut buf = ZSTD_PREFIX.to_vec(); + + { + let mut v = zstd::Encoder::new(&mut buf, 3).unwrap().auto_finish(); + v.write_all(&encoded_bigger_than_bomb[..]).unwrap(); + } + + assert_eq!(decompress(&buf[..], BOMB_LIMIT).err(), Some(Error::PossibleBomb)); + } +} diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index 4a66743028d19..b277df8f58f12 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-npos-elections" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,23 +13,28 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-npos-elections-compact = { version = "2.0.0", path = "./compact" } -sp-arithmetic = { version = "2.0.0", default-features = false, path = "../arithmetic" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.126", optional = true, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-npos-elections-solution-type = { version = "4.0.0-dev", path = "./solution-type" } +sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../arithmetic" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-runtime = { version = "4.0.0-dev", path = "../runtime", default-features = false } [dev-dependencies] -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } rand = "0.7.3" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } [features] default = ["std"] bench = [] std = [ "codec/std", + "scale-info/std", "serde", "sp-std/std", "sp-arithmetic/std", + "sp-core/std", + "sp-runtime/std", ] diff --git a/primitives/npos-elections/README.md b/primitives/npos-elections/README.md index a98351a6d89a7..b518e63615fa6 100644 --- a/primitives/npos-elections/README.md +++ b/primitives/npos-elections/README.md @@ -1,11 +1,58 @@ A set of election algorithms to be used with a substrate runtime, typically within the staking -sub-system. Notable implementation include +sub-system. Notable implementation include: - [`seq_phragmen`]: Implements the Phragmén Sequential Method. An un-ranked, relatively fast election method that ensures PJR, but does not provide a constant factor approximation of the maximin problem. -- [`balance_solution`]: Implements the star balancing algorithm. This iterative process can - increase a solutions score, as described in [`evaluate_support`]. +- [`phragmms`]: Implements a hybrid approach inspired by Phragmén which is executed faster but + it can achieve a constant factor approximation of the maximin problem, similar to that of the + MMS algorithm. +- [`balance_solution`]: Implements the star balancing algorithm. This iterative process can push + a solution toward being more `balances`, which in turn can increase its score. + +### Terminology + +This crate uses context-independent words, not to be confused with staking. This is because the +election algorithms of this crate, while designed for staking, can be used in other contexts as +well. + +`Voter`: The entity casting some votes to a number of `Targets`. This is the same as `Nominator` +in the context of staking. `Target`: The entities eligible to be voted upon. This is the same as +`Validator` in the context of staking. `Edge`: A mapping from a `Voter` to a `Target`. + +The goal of an election algorithm is to provide an `ElectionResult`. A data composed of: +- `winners`: A flat list of identifiers belonging to those who have won the election, usually + ordered in some meaningful way. They are zipped with their total backing stake. +- `assignment`: A mapping from each voter to their winner-only targets, zipped with a ration + denoting the amount of support given to that particular target. + +```rust +// the winners. +let winners = vec![(1, 100), (2, 50)]; +let assignments = vec![ + // A voter, giving equal backing to both 1 and 2. + Assignment { + who: 10, + distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))], + }, + // A voter, Only backing 1. + Assignment { who: 20, distribution: vec![(1, Perbill::from_percent(100))] }, +]; + +// the combination of the two makes the election result. +let election_result = ElectionResult { winners, assignments }; + +``` + +The `Assignment` field of the election result is voter-major, i.e. it is from the perspective of +the voter. The struct that represents the opposite is called a `Support`. This struct is usually +accessed in a map-like manner, i.e. keyed vy voters, therefor it is stored as a mapping called +`SupportMap`. + +Moreover, the support is built from absolute backing values, not ratios like the example above. +A struct similar to `Assignment` that has stake value instead of ratios is called an +`StakedAssignment`. + More information can be found at: https://arxiv.org/abs/2004.12990 diff --git a/primitives/npos-elections/benches/phragmen.rs b/primitives/npos-elections/benches/phragmen.rs deleted file mode 100644 index ce4e0196ab4f7..0000000000000 --- a/primitives/npos-elections/benches/phragmen.rs +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2019-2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - - -//! Benchmarks of the phragmen election algorithm. -//! Note that execution times will not be accurate in an absolute scale, since -//! - Everything is executed in the context of `TestExternalities` -//! - Everything is executed in native environment. - -#![cfg(feature = "bench")] -#![feature(test)] - -extern crate test; -use test::Bencher; - -use rand::{self, Rng}; -use sp_npos_elections::{ElectionResult, VoteWeight}; - -use std::collections::BTreeMap; -use sp_runtime::{Perbill, PerThing, traits::Zero}; -use sp_npos_elections::{ - balance_solution, assignment_ratio_to_staked, build_support_map, to_without_backing, VoteWeight, - ExtendedBalance, Assignment, StakedAssignment, IdentifierT, assignment_ratio_to_staked, - seq_phragmen, -}; - -// default params. Each will be scaled by the benchmarks individually. -const VALIDATORS: u64 = 100; -const NOMINATORS: u64 = 1_000; -const EDGES: u64 = 2; -const TO_ELECT: usize = 10; -const STAKE: VoteWeight = 1000; - -const PREFIX: AccountId = 1000_000; - -type AccountId = u64; - -mod bench_closure_and_slice { - use super::*; - - fn random_assignment() -> Assignment { - let mut rng = rand::thread_rng(); - let who = rng.next_u32(); - let distribution = (0..5) - .map(|x| (x + rng.next_u32(), Perbill::from_percent(rng.next_u32() % 100))) - .collect::>(); - Assignment { who, distribution } - } - - /// Converts a vector of ratio assignments into ones with absolute budget value. - pub fn assignment_ratio_to_staked_slice( - ratio: Vec>, - stakes: &[VoteWeight], - ) -> Vec> - where - T: sp_std::ops::Mul, - ExtendedBalance: From<::Inner>, - { - ratio - .into_iter() - .zip(stakes.into_iter().map(|x| *x as ExtendedBalance)) - .map(|(a, stake)| { - a.into_staked(stake.into(), true) - }) - .collect() - } - - #[bench] - fn closure(b: &mut Bencher) { - let assignments = (0..1000).map(|_| random_assignment()).collect::>>(); - let stake_of = |x: &u32| -> VoteWeight { (x * 2 + 100).into() }; - - // each have one clone of assignments - b.iter(|| assignment_ratio_to_staked(assignments.clone(), stake_of)); - } - - #[bench] - fn slice(b: &mut Bencher) { - let assignments = (0..1000).map(|_| random_assignment()).collect::>>(); - let stake_of = |x: &u32| -> VoteWeight { (x * 2 + 100).into() }; - - b.iter(|| { - let local = assignments.clone(); - let stakes = local.iter().map(|x| stake_of(&x.who)).collect::>(); - assignment_ratio_to_staked_slice(local, stakes.as_ref()); - }); - } -} - -fn do_phragmen( - b: &mut Bencher, - num_validators: u64, - num_nominators: u64, - to_elect: usize, - edge_per_voter: u64, - eq_iters: usize, - eq_tolerance: u128, -) { - assert!(num_validators > edge_per_voter); - let rr = |a, b| rand::thread_rng().gen_range(a as usize, b as usize) as VoteWeight; - - let mut candidates = Vec::with_capacity(num_validators as usize); - let mut stake_of_tree: BTreeMap = BTreeMap::new(); - - (1 ..= num_validators).for_each(|acc| { - candidates.push(acc); - stake_of_tree.insert(acc, STAKE + rr(10, 1000)); - }); - - let mut voters = Vec::with_capacity(num_nominators as usize); - (PREFIX ..= (PREFIX + num_nominators)).for_each(|acc| { - // all possible targets - let mut all_targets = candidates.clone(); - // we remove and pop into `targets` `edge_per_voter` times. - let targets = (0 .. edge_per_voter).map(|_| { - all_targets.remove(rr(0, all_targets.len()) as usize) - }) - .collect::>(); - - let stake = STAKE + rr(10, 1000); - stake_of_tree.insert(acc, stake); - voters.push((acc, stake, targets)); - }); - - b.iter(|| { - let ElectionResult { winners, assignments } = seq_phragmen::( - to_elect, - Zero::zero(), - candidates.clone(), - voters.clone(), - ).unwrap(); - - let stake_of = |who: &AccountId| -> VoteWeight { - *stake_of_tree.get(who).unwrap() - }; - - // Do the benchmarking with balancing. - if eq_iters > 0 { - let staked = assignment_ratio_to_staked(assignments, &stake_of); - let winners = to_without_backing(winners); - let mut support = build_support_map( - winners.as_ref(), - staked.as_ref(), - ).unwrap(); - - balance_solution( - staked.into_iter().map(|a| (a.clone(), stake_of(&a.who))).collect(), - &mut support, - eq_tolerance, - eq_iters, - ); - } - }) -} - -macro_rules! phragmen_benches { - ($($name:ident: $tup:expr,)*) => { - $( - #[bench] - fn $name(b: &mut Bencher) { - let (v, n, t, e, eq_iter, eq_tol) = $tup; - println!("----------------------"); - println!( - "++ Benchmark: {} Validators // {} Nominators // {} Edges-per-nominator // {} \ - total edges // electing {} // Equalize: {} iterations -- {} tolerance", - v, n, e, e * n, t, eq_iter, eq_tol, - ); - do_phragmen(b, v, n, t, e, eq_iter, eq_tol); - } - )* - } -} - -phragmen_benches! { - bench_1_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_1_2: (VALIDATORS * 2, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_1_3: (VALIDATORS * 4, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_1_4: (VALIDATORS * 8, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_1_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_1_2_eq: (VALIDATORS * 2, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_1_3_eq: (VALIDATORS * 4, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_1_4_eq: (VALIDATORS * 8, NOMINATORS, TO_ELECT, EDGES, 2, 0), - - bench_0_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_0_2: (VALIDATORS, NOMINATORS, TO_ELECT * 4, EDGES, 0, 0), - bench_0_3: (VALIDATORS, NOMINATORS, TO_ELECT * 8, EDGES, 0, 0), - bench_0_4: (VALIDATORS, NOMINATORS, TO_ELECT * 16, EDGES , 0, 0), - bench_0_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_0_2_eq: (VALIDATORS, NOMINATORS, TO_ELECT * 4, EDGES, 2, 0), - bench_0_3_eq: (VALIDATORS, NOMINATORS, TO_ELECT * 8, EDGES, 2, 0), - bench_0_4_eq: (VALIDATORS, NOMINATORS, TO_ELECT * 16, EDGES , 2, 0), - - bench_2_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_2_2: (VALIDATORS, NOMINATORS * 2, TO_ELECT, EDGES, 0, 0), - bench_2_3: (VALIDATORS, NOMINATORS * 4, TO_ELECT, EDGES, 0, 0), - bench_2_4: (VALIDATORS, NOMINATORS * 8, TO_ELECT, EDGES, 0, 0), - bench_2_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_2_2_eq: (VALIDATORS, NOMINATORS * 2, TO_ELECT, EDGES, 2, 0), - bench_2_3_eq: (VALIDATORS, NOMINATORS * 4, TO_ELECT, EDGES, 2, 0), - bench_2_4_eq: (VALIDATORS, NOMINATORS * 8, TO_ELECT, EDGES, 2, 0), - - bench_3_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0 ), - bench_3_2: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 2, 0, 0), - bench_3_3: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 4, 0, 0), - bench_3_4: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 8, 0, 0), - bench_3_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_3_2_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 2, 2, 0), - bench_3_3_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 4, 2, 0), - bench_3_4_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 8, 2, 0), -} diff --git a/primitives/npos-elections/compact/src/assignment.rs b/primitives/npos-elections/compact/src/assignment.rs deleted file mode 100644 index 8b61076521d7c..0000000000000 --- a/primitives/npos-elections/compact/src/assignment.rs +++ /dev/null @@ -1,205 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Code generation for the ratio assignment type' compact representation. - -use crate::field_name_for; -use proc_macro2::TokenStream as TokenStream2; -use quote::quote; - -fn from_impl(count: usize) -> TokenStream2 { - let from_impl_single = { - let name = field_name_for(1); - quote!(1 => compact.#name.push( - ( - index_of_voter(&who).or_invalid_index()?, - index_of_target(&distribution[0].0).or_invalid_index()?, - ) - ),) - }; - - let from_impl_double = { - let name = field_name_for(2); - quote!(2 => compact.#name.push( - ( - index_of_voter(&who).or_invalid_index()?, - ( - index_of_target(&distribution[0].0).or_invalid_index()?, - distribution[0].1, - ), - index_of_target(&distribution[1].0).or_invalid_index()?, - ) - ),) - }; - - let from_impl_rest = (3..=count).map(|c| { - let inner = (0..c-1).map(|i| - quote!((index_of_target(&distribution[#i].0).or_invalid_index()?, distribution[#i].1),) - ).collect::(); - - let field_name = field_name_for(c); - let last_index = c - 1; - let last = quote!(index_of_target(&distribution[#last_index].0).or_invalid_index()?); - - quote!( - #c => compact.#field_name.push( - ( - index_of_voter(&who).or_invalid_index()?, - [#inner], - #last, - ) - ), - ) - }).collect::(); - - quote!( - #from_impl_single - #from_impl_double - #from_impl_rest - ) -} - -fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { - let into_impl_single = { - let name = field_name_for(1); - quote!( - for (voter_index, target_index) in self.#name { - assignments.push(_npos::Assignment { - who: voter_at(voter_index).or_invalid_index()?, - distribution: vec![ - (target_at(target_index).or_invalid_index()?, #per_thing::one()) - ], - }) - } - ) - }; - - let into_impl_double = { - let name = field_name_for(2); - quote!( - for (voter_index, (t1_idx, p1), t2_idx) in self.#name { - if p1 >= #per_thing::one() { - return Err(_npos::Error::CompactStakeOverflow); - } - - // defensive only. Since Percent doesn't have `Sub`. - let p2 = _npos::sp_arithmetic::traits::Saturating::saturating_sub( - #per_thing::one(), - p1, - ); - - assignments.push( _npos::Assignment { - who: voter_at(voter_index).or_invalid_index()?, - distribution: vec![ - (target_at(t1_idx).or_invalid_index()?, p1), - (target_at(t2_idx).or_invalid_index()?, p2), - ] - }); - } - ) - }; - - let into_impl_rest = (3..=count).map(|c| { - let name = field_name_for(c); - quote!( - for (voter_index, inners, t_last_idx) in self.#name { - let mut sum = #per_thing::zero(); - let mut inners_parsed = inners - .iter() - .map(|(ref t_idx, p)| { - sum = _npos::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); - let target = target_at(*t_idx).or_invalid_index()?; - Ok((target, *p)) - }) - .collect::, _npos::Error>>()?; - - if sum >= #per_thing::one() { - return Err(_npos::Error::CompactStakeOverflow); - } - - // defensive only. Since Percent doesn't have `Sub`. - let p_last = _npos::sp_arithmetic::traits::Saturating::saturating_sub( - #per_thing::one(), - sum, - ); - - inners_parsed.push((target_at(t_last_idx).or_invalid_index()?, p_last)); - - assignments.push(_npos::Assignment { - who: voter_at(voter_index).or_invalid_index()?, - distribution: inners_parsed, - }); - } - ) - }).collect::(); - - quote!( - #into_impl_single - #into_impl_double - #into_impl_rest - ) -} - -pub(crate) fn assignment( - ident: syn::Ident, - voter_type: syn::Type, - target_type: syn::Type, - weight_type: syn::Type, - count: usize, -) -> TokenStream2 { - let from_impl = from_impl(count); - let into_impl = into_impl(count, weight_type.clone()); - - quote!( - use _npos::__OrInvalidIndex; - impl #ident { - pub fn from_assignment( - assignments: Vec<_npos::Assignment>, - index_of_voter: FV, - index_of_target: FT, - ) -> Result - where - A: _npos::IdentifierT, - for<'r> FV: Fn(&'r A) -> Option<#voter_type>, - for<'r> FT: Fn(&'r A) -> Option<#target_type>, - { - let mut compact: #ident = Default::default(); - - for _npos::Assignment { who, distribution } in assignments { - match distribution.len() { - 0 => continue, - #from_impl - _ => { - return Err(_npos::Error::CompactTargetOverflow); - } - } - }; - Ok(compact) - } - - pub fn into_assignment( - self, - voter_at: impl Fn(#voter_type) -> Option, - target_at: impl Fn(#target_type) -> Option, - ) -> Result>, _npos::Error> { - let mut assignments: Vec<_npos::Assignment> = Default::default(); - #into_impl - Ok(assignments) - } - } - ) -} diff --git a/primitives/npos-elections/compact/src/codec.rs b/primitives/npos-elections/compact/src/codec.rs deleted file mode 100644 index 6c5a3bc2134d3..0000000000000 --- a/primitives/npos-elections/compact/src/codec.rs +++ /dev/null @@ -1,203 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Code generation for the ratio assignment type' encode/decode impl. - -use crate::field_name_for; -use proc_macro2::TokenStream as TokenStream2; -use quote::quote; - -pub(crate) fn codec_impl( - ident: syn::Ident, - voter_type: syn::Type, - target_type: syn::Type, - weight_type: syn::Type, - count: usize, -) -> TokenStream2 { - let encode = encode_impl(ident.clone(), count); - let decode = decode_impl(ident, voter_type, target_type, weight_type, count); - - quote! { - #encode - #decode - } -} - -fn decode_impl( - ident: syn::Ident, - voter_type: syn::Type, - target_type: syn::Type, - weight_type: syn::Type, - count: usize, -) -> TokenStream2 { - let decode_impl_single = { - let name = field_name_for(1); - quote! { - let #name = - < - Vec<(_npos::codec::Compact<#voter_type>, _npos::codec::Compact<#target_type>)> - as - _npos::codec::Decode - >::decode(value)?; - let #name = #name - .into_iter() - .map(|(v, t)| (v.0, t.0)) - .collect::>(); - } - }; - - let decode_impl_double = { - let name = field_name_for(2); - quote! { - let #name = - < - Vec<( - _npos::codec::Compact<#voter_type>, - (_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>), - _npos::codec::Compact<#target_type>, - )> - as - _npos::codec::Decode - >::decode(value)?; - let #name = #name - .into_iter() - .map(|(v, (t1, w), t2)| (v.0, (t1.0, w.0), t2.0)) - .collect::>(); - } - }; - - let decode_impl_rest = (3..=count).map(|c| { - let name = field_name_for(c); - - let inner_impl = (0..c-1).map(|i| - quote! { ( (inner[#i].0).0, (inner[#i].1).0 ), } - ).collect::(); - - quote! { - let #name = - < - Vec<( - _npos::codec::Compact<#voter_type>, - [(_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>); #c-1], - _npos::codec::Compact<#target_type>, - )> - as _npos::codec::Decode - >::decode(value)?; - let #name = #name - .into_iter() - .map(|(v, inner, t_last)| ( - v.0, - [ #inner_impl ], - t_last.0, - )) - .collect::>(); - } - }).collect::(); - - - let all_field_names = (1..=count).map(|c| { - let name = field_name_for(c); - quote! { #name, } - }).collect::(); - - quote!( - impl _npos::codec::Decode for #ident { - fn decode(value: &mut I) -> Result { - #decode_impl_single - #decode_impl_double - #decode_impl_rest - - // The above code generates variables with the decoded value with the same name as - // filed names of the struct, i.e. `let votes4 = decode_value_of_votes4`. All we - // have to do is collect them into the main struct now. - Ok(#ident { #all_field_names }) - } - } - ) -} - -// General attitude is that we will convert inner values to `Compact` and then use the normal -// `Encode` implementation. -fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { - let encode_impl_single = { - let name = field_name_for(1); - quote! { - let #name = self.#name - .iter() - .map(|(v, t)| ( - _npos::codec::Compact(v.clone()), - _npos::codec::Compact(t.clone()), - )) - .collect::>(); - #name.encode_to(&mut r); - } - }; - - let encode_impl_double = { - let name = field_name_for(2); - quote! { - let #name = self.#name - .iter() - .map(|(v, (t1, w), t2)| ( - _npos::codec::Compact(v.clone()), - ( - _npos::codec::Compact(t1.clone()), - _npos::codec::Compact(w.clone()) - ), - _npos::codec::Compact(t2.clone()), - )) - .collect::>(); - #name.encode_to(&mut r); - } - }; - - let encode_impl_rest = (3..=count).map(|c| { - let name = field_name_for(c); - - // we use the knowledge of the length to avoid copy_from_slice. - let inners_compact_array = (0..c-1).map(|i| - quote!{( - _npos::codec::Compact(inner[#i].0.clone()), - _npos::codec::Compact(inner[#i].1.clone()), - ),} - ).collect::(); - - quote! { - let #name = self.#name - .iter() - .map(|(v, inner, t_last)| ( - _npos::codec::Compact(v.clone()), - [ #inners_compact_array ], - _npos::codec::Compact(t_last.clone()), - )) - .collect::>(); - #name.encode_to(&mut r); - } - }).collect::(); - - quote!( - impl _npos::codec::Encode for #ident { - fn encode(&self) -> Vec { - let mut r = vec![]; - #encode_impl_single - #encode_impl_double - #encode_impl_rest - r - } - } - ) -} diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs deleted file mode 100644 index b35c407c40cd5..0000000000000 --- a/primitives/npos-elections/compact/src/lib.rs +++ /dev/null @@ -1,429 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Proc macro for a npos compact assignment. - -use proc_macro::TokenStream; -use proc_macro2::{TokenStream as TokenStream2, Span, Ident}; -use proc_macro_crate::crate_name; -use quote::quote; -use syn::{parse::{Parse, ParseStream, Result}}; - -mod assignment; -mod codec; - -// prefix used for struct fields in compact. -const PREFIX: &'static str = "votes"; - -pub(crate) fn syn_err(message: &'static str) -> syn::Error { - syn::Error::new(Span::call_site(), message) -} - -/// Generates a struct to store the election result in a small way. This can encode a structure -/// which is the equivalent of a `sp_npos_elections::Assignment<_>`. -/// -/// The following data types can be configured by the macro. -/// -/// - The identifier of the voter. This can be any type that supports `parity-scale-codec`'s compact -/// encoding. -/// - The identifier of the target. This can be any type that supports `parity-scale-codec`'s -/// compact encoding. -/// - The accuracy of the ratios. This must be one of the `PerThing` types defined in -/// `sp-arithmetic`. -/// -/// Moreover, the maximum number of edges per voter (distribution per assignment) also need to be -/// specified. Attempting to convert from/to an assignment with more distributions will fail. -/// -/// -/// For example, the following generates a public struct with name `TestSolution` with `u16` voter -/// type, `u8` target type and `Perbill` accuracy with maximum of 8 edges per voter. -/// -/// ```ignore -/// generate_solution_type!(pub struct TestSolution::(8)) -/// ``` -/// -/// The given struct provides function to convert from/to Assignment: -/// -/// - [`from_assignment()`]. -/// - [`fn into_assignment()`]. -/// -/// The generated struct is by default deriving both `Encode` and `Decode`. This is okay but could -/// lead to many 0s in the solution. If prefixed with `#[compact]`, then a custom compact encoding -/// for numbers will be used, similar to how `parity-scale-codec`'s `Compact` works. -/// -/// ```ignore -/// generate_solution_type!( -/// #[compact] -/// pub struct TestSolutionCompact::(8) -/// ) -/// ``` -#[proc_macro] -pub fn generate_solution_type(item: TokenStream) -> TokenStream { - let SolutionDef { - vis, - ident, - count, - voter_type, - target_type, - weight_type, - compact_encoding, - } = syn::parse_macro_input!(item as SolutionDef); - - let imports = imports().unwrap_or_else(|e| e.to_compile_error()); - - let solution_struct = struct_def( - vis, - ident.clone(), - count, - voter_type.clone(), - target_type.clone(), - weight_type.clone(), - compact_encoding, - ).unwrap_or_else(|e| e.to_compile_error()); - - let assignment_impls = assignment::assignment( - ident.clone(), - voter_type.clone(), - target_type.clone(), - weight_type.clone(), - count, - ); - - quote!( - #imports - #solution_struct - #assignment_impls - ).into() -} - -fn struct_def( - vis: syn::Visibility, - ident: syn::Ident, - count: usize, - voter_type: syn::Type, - target_type: syn::Type, - weight_type: syn::Type, - compact_encoding: bool, -) -> Result { - if count <= 2 { - Err(syn_err("cannot build compact solution struct with capacity less than 3."))? - } - - let singles = { - let name = field_name_for(1); - quote!( - #name: Vec<(#voter_type, #target_type)>, - ) - }; - - let doubles = { - let name = field_name_for(2); - quote!( - #name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>, - ) - }; - - let rest = (3..=count).map(|c| { - let field_name = field_name_for(c); - let array_len = c - 1; - quote!( - #field_name: Vec<( - #voter_type, - [(#target_type, #weight_type); #array_len], - #target_type - )>, - ) - }).collect::(); - - let len_impl = len_impl(count); - let edge_count_impl = edge_count_impl(count); - let unique_targets_impl = unique_targets_impl(count); - let remove_voter_impl = remove_voter_impl(count); - - let derives_and_maybe_compact_encoding = if compact_encoding { - // custom compact encoding. - let compact_impl = codec::codec_impl( - ident.clone(), - voter_type.clone(), - target_type.clone(), - weight_type.clone(), - count, - ); - quote!{ - #compact_impl - #[derive(Default, PartialEq, Eq, Clone, Debug)] - } - } else { - // automatically derived. - quote!(#[derive(Default, PartialEq, Eq, Clone, Debug, _npos::codec::Encode, _npos::codec::Decode)]) - }; - - Ok(quote! ( - /// A struct to encode a election assignment in a compact way. - #derives_and_maybe_compact_encoding - #vis struct #ident { #singles #doubles #rest } - - impl _npos::VotingLimit for #ident { - const LIMIT: usize = #count; - } - - impl #ident { - /// Get the length of all the assignments that this type is encoding. This is basically - /// the same as the number of assignments, or the number of voters in total. - pub fn len(&self) -> usize { - let mut all_len = 0usize; - #len_impl - all_len - } - - /// Get the total count of edges. - pub fn edge_count(&self) -> usize { - let mut all_edges = 0usize; - #edge_count_impl - all_edges - } - - /// Get the number of unique targets in the whole struct. - /// - /// Once presented with a list of winners, this set and the set of winners must be - /// equal. - /// - /// The resulting indices are sorted. - pub fn unique_targets(&self) -> Vec<#target_type> { - let mut all_targets: Vec<#target_type> = Vec::with_capacity(self.average_edge_count()); - let mut maybe_insert_target = |t: #target_type| { - match all_targets.binary_search(&t) { - Ok(_) => (), - Err(pos) => all_targets.insert(pos, t) - } - }; - - #unique_targets_impl - - all_targets - } - - /// Get the average edge count. - pub fn average_edge_count(&self) -> usize { - self.edge_count().checked_div(self.len()).unwrap_or(0) - } - - /// Remove a certain voter. - /// - /// This will only search until the first instance of `to_remove`, and return true. If - /// no instance is found (no-op), then it returns false. - /// - /// In other words, if this return true, exactly one element must have been removed from - /// `self.len()`. - pub fn remove_voter(&mut self, to_remove: #voter_type) -> bool { - #remove_voter_impl - return false - } - } - )) -} - -fn remove_voter_impl(count: usize) -> TokenStream2 { - let field_name = field_name_for(1); - let single = quote! { - if let Some(idx) = self.#field_name.iter().position(|(x, _)| *x == to_remove) { - self.#field_name.remove(idx); - return true - } - }; - - let field_name = field_name_for(2); - let double = quote! { - if let Some(idx) = self.#field_name.iter().position(|(x, _, _)| *x == to_remove) { - self.#field_name.remove(idx); - return true - } - }; - - let rest = (3..=count) - .map(|c| { - let field_name = field_name_for(c); - quote! { - if let Some(idx) = self.#field_name.iter().position(|(x, _, _)| *x == to_remove) { - self.#field_name.remove(idx); - return true - } - } - }) - .collect::(); - - quote! { - #single - #double - #rest - } -} - -fn len_impl(count: usize) -> TokenStream2 { - (1..=count).map(|c| { - let field_name = field_name_for(c); - quote!( - all_len = all_len.saturating_add(self.#field_name.len()); - ) - }).collect::() -} - -fn edge_count_impl(count: usize) -> TokenStream2 { - (1..=count).map(|c| { - let field_name = field_name_for(c); - quote!( - all_edges = all_edges.saturating_add( - self.#field_name.len().saturating_mul(#c as usize) - ); - ) - }).collect::() -} - -fn unique_targets_impl(count: usize) -> TokenStream2 { - let unique_targets_impl_single = { - let field_name = field_name_for(1); - quote! { - self.#field_name.iter().for_each(|(_, t)| { - maybe_insert_target(*t); - }); - } - }; - - let unique_targets_impl_double = { - let field_name = field_name_for(2); - quote! { - self.#field_name.iter().for_each(|(_, (t1, _), t2)| { - maybe_insert_target(*t1); - maybe_insert_target(*t2); - }); - } - }; - - let unique_targets_impl_rest = (3..=count).map(|c| { - let field_name = field_name_for(c); - quote! { - self.#field_name.iter().for_each(|(_, inners, t_last)| { - inners.iter().for_each(|(t, _)| { - maybe_insert_target(*t); - }); - maybe_insert_target(*t_last); - }); - } - }).collect::(); - - quote! { - #unique_targets_impl_single - #unique_targets_impl_double - #unique_targets_impl_rest - } -} - -fn imports() -> Result { - if std::env::var("CARGO_PKG_NAME").unwrap() == "sp-npos-elections" { - Ok(quote! { - use crate as _npos; - }) - } else { - match crate_name("sp-npos-elections") { - Ok(sp_npos_elections) => { - let ident = syn::Ident::new(&sp_npos_elections, Span::call_site()); - Ok(quote!( extern crate #ident as _npos; )) - }, - Err(e) => Err(syn::Error::new(Span::call_site(), &e)), - } - } -} - -struct SolutionDef { - vis: syn::Visibility, - ident: syn::Ident, - voter_type: syn::Type, - target_type: syn::Type, - weight_type: syn::Type, - count: usize, - compact_encoding: bool, -} - -fn check_compact_attr(input: ParseStream) -> Result { - let mut attrs = input.call(syn::Attribute::parse_outer).unwrap_or_default(); - if attrs.len() == 1 { - let attr = attrs.pop().expect("Vec with len 1 can be popped."); - if attr.path.segments.len() == 1 { - let segment = attr.path.segments.first().expect("Vec with len 1 can be popped."); - if segment.ident == Ident::new("compact", Span::call_site()) { - Ok(true) - } else { - Err(syn_err("generate_solution_type macro can only accept #[compact] attribute.")) - } - } else { - Err(syn_err("generate_solution_type macro can only accept #[compact] attribute.")) - } - } else { - Ok(false) - } -} - -/// #[compact] pub struct CompactName::() -impl Parse for SolutionDef { - fn parse(input: ParseStream) -> syn::Result { - // optional #[compact] - let compact_encoding = check_compact_attr(input)?; - - // struct - let vis: syn::Visibility = input.parse()?; - let _ = ::parse(input)?; - let ident: syn::Ident = input.parse()?; - - // :: - let _ = ::parse(input)?; - let generics: syn::AngleBracketedGenericArguments = input.parse()?; - - if generics.args.len() != 3 { - return Err(syn_err("Must provide 3 generic args.")) - } - - let mut types: Vec = generics.args.iter().map(|t| - match t { - syn::GenericArgument::Type(ty) => Ok(ty.clone()), - _ => Err(syn_err("Wrong type of generic provided. Must be a `type`.")), - } - ).collect::>()?; - - let weight_type = types.pop().expect("Vector of length 3 can be popped; qed"); - let target_type = types.pop().expect("Vector of length 2 can be popped; qed"); - let voter_type = types.pop().expect("Vector of length 1 can be popped; qed"); - - // () - let count_expr: syn::ExprParen = input.parse()?; - let expr = count_expr.expr; - let expr_lit = match *expr { - syn::Expr::Lit(count_lit) => count_lit.lit, - _ => return Err(syn_err("Count must be literal.")) - }; - let int_lit = match expr_lit { - syn::Lit::Int(int_lit) => int_lit, - _ => return Err(syn_err("Count must be int literal.")) - }; - let count = int_lit.base10_parse::()?; - - Ok(Self { vis, ident, voter_type, target_type, weight_type, count, compact_encoding } ) - } -} - -fn field_name_for(n: usize) -> Ident { - Ident::new(&format!("{}{}", PREFIX, n), Span::call_site()) -} diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index 49740b2cf3cae..d6fcc09c8b586 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -14,12 +14,13 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-npos-elections = { version = "2.0.0", path = ".." } -sp-std = { version = "2.0.0", path = "../../std" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.7.3", features = ["std", "small_rng"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +sp-npos-elections = { version = "4.0.0-dev", path = ".." } +sp-runtime = { version = "4.0.0-dev", path = "../../runtime" } +structopt = "0.3.21" [[bin]] name = "reduce" @@ -36,3 +37,7 @@ path = "src/phragmms_balancing.rs" [[bin]] name = "compact" path = "src/compact.rs" + +[[bin]] +name = "phragmen_pjr" +path = "src/phragmen_pjr.rs" diff --git a/primitives/npos-elections/fuzzer/src/common.rs b/primitives/npos-elections/fuzzer/src/common.rs index a5099098f5a86..e97f7f7df8b11 100644 --- a/primitives/npos-elections/fuzzer/src/common.rs +++ b/primitives/npos-elections/fuzzer/src/common.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,10 +20,10 @@ // Each function will be used based on which fuzzer binary is being used. #![allow(dead_code)] -use sp_npos_elections::{ElectionResult, VoteWeight, phragmms, seq_phragmen}; -use sp_std::collections::btree_map::BTreeMap; +use rand::{self, seq::SliceRandom, Rng, RngCore}; +use sp_npos_elections::{phragmms, seq_phragmen, ElectionResult, VoteWeight}; use sp_runtime::Perbill; -use rand::{self, Rng, RngCore}; +use std::collections::{BTreeMap, HashSet}; /// converts x into the range [a, b] in a pseudo-fair way. pub fn to_range(x: usize, a: usize, b: usize) -> usize { @@ -39,11 +39,77 @@ pub fn to_range(x: usize, a: usize, b: usize) -> usize { pub enum ElectionType { Phragmen(Option<(usize, u128)>), - Phragmms(Option<(usize, u128)>) + Phragmms(Option<(usize, u128)>), } pub type AccountId = u64; +/// Generate a set of inputs suitable for fuzzing an election algorithm +/// +/// Given parameters governing how many candidates and voters should exist, generates a voting +/// scenario suitable for fuzz-testing an election algorithm. +/// +/// The returned candidate list is sorted. This sorting property should not affect the result of the +/// calculation. +/// +/// The returned voters list is sorted. This enables binary searching for a particular voter by +/// account id. This sorting property should not affect the results of the calculation. +/// +/// Each voter's selection of candidates to vote for is sorted. +/// +/// Note that this does not generate balancing parameters. +pub fn generate_random_npos_inputs( + candidate_count: usize, + voter_count: usize, + mut rng: impl Rng, +) -> (usize, Vec, Vec<(AccountId, VoteWeight, Vec)>) { + // cache for fast generation of unique candidate and voter ids + let mut used_ids = HashSet::with_capacity(candidate_count + voter_count); + + // always generate a sensible desired number of candidates: elections are uninteresting if we + // desire 0 candidates, or a number of candidates >= the actual number of candidates present + let rounds = rng.gen_range(1, candidate_count); + + // candidates are easy: just a completely random set of IDs + let mut candidates: Vec = Vec::with_capacity(candidate_count); + for _ in 0..candidate_count { + let mut id = rng.gen(); + // insert returns `false` when the value was already present + while !used_ids.insert(id) { + id = rng.gen(); + } + candidates.push(id); + } + candidates.sort_unstable(); + candidates.dedup(); + assert_eq!(candidates.len(), candidate_count); + + let mut voters = Vec::with_capacity(voter_count); + for _ in 0..voter_count { + let mut id = rng.gen(); + // insert returns `false` when the value was already present + while !used_ids.insert(id) { + id = rng.gen(); + } + + let vote_weight = rng.gen(); + + // it's not interesting if a voter chooses 0 or all candidates, so rule those cases out. + let n_candidates_chosen = rng.gen_range(1, candidates.len()); + + let mut chosen_candidates = Vec::with_capacity(n_candidates_chosen); + chosen_candidates.extend(candidates.choose_multiple(&mut rng, n_candidates_chosen)); + chosen_candidates.sort(); + voters.push((id, vote_weight, chosen_candidates)); + } + + voters.sort_unstable(); + voters.dedup_by_key(|(id, _weight, _chosen_candidates)| *id); + assert_eq!(voters.len(), voter_count); + + (rounds, candidates, voters) +} + pub fn generate_random_npos_result( voter_count: u64, target_count: u64, @@ -71,19 +137,20 @@ pub fn generate_random_npos_result( }); let mut voters = Vec::with_capacity(voter_count as usize); - (prefix ..= (prefix + voter_count)).for_each(|acc| { + (prefix..=(prefix + voter_count)).for_each(|acc| { let edge_per_this_voter = rng.gen_range(1, candidates.len()); // all possible targets let mut all_targets = candidates.clone(); // we remove and pop into `targets` `edge_per_this_voter` times. - let targets = (0..edge_per_this_voter).map(|_| { - let upper = all_targets.len() - 1; - let idx = rng.gen_range(0, upper); - all_targets.remove(idx) - }) - .collect::>(); - - let stake_var = rng.gen_range(ed, 100 * ed) ; + let targets = (0..edge_per_this_voter) + .map(|_| { + let upper = all_targets.len() - 1; + let idx = rng.gen_range(0, upper); + all_targets.remove(idx) + }) + .collect::>(); + + let stake_var = rng.gen_range(ed, 100 * ed); let stake = base_stake + stake_var; stake_of.insert(acc, stake); voters.push((acc, stake, targets)); @@ -91,20 +158,20 @@ pub fn generate_random_npos_result( ( match election_type { - ElectionType::Phragmen(conf) => - seq_phragmen::( - to_elect, - candidates.clone(), - voters.clone(), - conf, - ).unwrap(), - ElectionType::Phragmms(conf) => - phragmms::( - to_elect, - candidates.clone(), - voters.clone(), - conf, - ).unwrap(), + ElectionType::Phragmen(conf) => seq_phragmen::( + to_elect, + candidates.clone(), + voters.clone(), + conf, + ) + .unwrap(), + ElectionType::Phragmms(conf) => phragmms::( + to_elect, + candidates.clone(), + voters.clone(), + conf, + ) + .unwrap(), }, candidates, voters, diff --git a/primitives/npos-elections/fuzzer/src/compact.rs b/primitives/npos-elections/fuzzer/src/compact.rs index 91f734bb5b7cb..4e78c94b82572 100644 --- a/primitives/npos-elections/fuzzer/src/compact.rs +++ b/primitives/npos-elections/fuzzer/src/compact.rs @@ -1,30 +1,34 @@ use honggfuzz::fuzz; -use sp_npos_elections::generate_solution_type; -use sp_npos_elections::sp_arithmetic::Percent; +use sp_npos_elections::{generate_solution_type, sp_arithmetic::Percent}; use sp_runtime::codec::{Encode, Error}; fn main() { - generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::(16)); + generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::< + VoterIndex = u32, + TargetIndex = u32, + Accuracy = Percent, + >(16)); loop { fuzz!(|fuzzer_data: &[u8]| { let result_decoded: Result = ::decode(&mut &fuzzer_data[..]); - // Ignore errors as not every random sequence of bytes can be decoded as InnerTestSolutionCompact + // Ignore errors as not every random sequence of bytes can be decoded as + // InnerTestSolutionCompact if let Ok(decoded) = result_decoded { // Decoding works, let's re-encode it and compare results. let reencoded: std::vec::Vec = decoded.encode(); - // The reencoded value may or may not be equal to the original fuzzer output. However, the - // original decoder should be optimal (in the sense that there is no shorter encoding of - // the same object). So let's see if the fuzzer can find something shorter: + // The reencoded value may or may not be equal to the original fuzzer output. + // However, the original decoder should be optimal (in the sense that there is no + // shorter encoding of the same object). So let's see if the fuzzer can find + // something shorter: if fuzzer_data.len() < reencoded.len() { panic!("fuzzer_data.len() < reencoded.len()"); } - // The reencoded value should definitely be decodable (if unwrap() fails that is a valid - // panic/finding for the fuzzer): + // The reencoded value should definitely be decodable (if unwrap() fails that is a + // valid panic/finding for the fuzzer): let decoded2: InnerTestSolutionCompact = - ::decode( - &mut reencoded.as_slice(), - ).unwrap(); + ::decode(&mut reencoded.as_slice()) + .unwrap(); // And it should be equal to the original decoded object (resulting from directly // decoding fuzzer_data): assert_eq!(decoded, decoded2); diff --git a/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs b/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs index 67cc7ba3c9a9a..0c140a8ce6fad 100644 --- a/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs +++ b/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,23 +21,17 @@ mod common; use common::*; use honggfuzz::fuzz; +use rand::{self, SeedableRng}; use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, build_support_map, to_without_backing, VoteWeight, - evaluate_support, is_score_better, seq_phragmen, + assignment_ratio_to_staked_normalized, is_score_better, seq_phragmen, to_supports, + EvaluateSupport, VoteWeight, }; use sp_runtime::Perbill; -use rand::{self, SeedableRng}; fn main() { loop { fuzz!(|data: (usize, usize, usize, usize, u64)| { - let ( - mut target_count, - mut voter_count, - mut iterations, - mut to_elect, - seed, - ) = data; + let (mut target_count, mut voter_count, mut iterations, mut to_elect, seed) = data; let rng = rand::rngs::SmallRng::seed_from_u64(seed); target_count = to_range(target_count, 100, 200); voter_count = to_range(voter_count, 100, 200); @@ -48,12 +42,7 @@ fn main() { "++ [voter_count: {} / target_count:{} / to_elect:{} / iterations:{}]", voter_count, target_count, to_elect, iterations, ); - let ( - unbalanced, - candidates, - voters, - stake_of_tree, - ) = generate_random_npos_result( + let (unbalanced, candidates, voters, stake_of_tree) = generate_random_npos_result( voter_count as u64, target_count as u64, to_elect, @@ -61,19 +50,19 @@ fn main() { ElectionType::Phragmen(None), ); - let stake_of = |who: &AccountId| -> VoteWeight { - *stake_of_tree.get(who).unwrap() - }; + let stake_of = |who: &AccountId| -> VoteWeight { *stake_of_tree.get(who).unwrap() }; let unbalanced_score = { - let staked = assignment_ratio_to_staked_normalized(unbalanced.assignments.clone(), &stake_of).unwrap(); - let winners = to_without_backing(unbalanced.winners.clone()); - let support = build_support_map(winners.as_ref(), staked.as_ref()).unwrap(); + let staked = assignment_ratio_to_staked_normalized( + unbalanced.assignments.clone(), + &stake_of, + ) + .unwrap(); + let score = to_supports(staked.as_ref()).evaluate(); - let score = evaluate_support(&support); if score[0] == 0 { // such cases cannot be improved by balancing. - return; + return } score }; @@ -84,32 +73,31 @@ fn main() { candidates, voters, Some((iterations, 0)), - ).unwrap(); + ) + .unwrap(); let balanced_score = { - let staked = assignment_ratio_to_staked_normalized(balanced.assignments.clone(), &stake_of).unwrap(); - let winners = to_without_backing(balanced.winners); - let support = build_support_map(winners.as_ref(), staked.as_ref()).unwrap(); - - evaluate_support(&support) + let staked = assignment_ratio_to_staked_normalized( + balanced.assignments.clone(), + &stake_of, + ) + .unwrap(); + to_supports(staked.as_ref()).evaluate() }; let enhance = is_score_better(balanced_score, unbalanced_score, Perbill::zero()); println!( "iter = {} // {:?} -> {:?} [{}]", - iterations, - unbalanced_score, - balanced_score, - enhance, + iterations, unbalanced_score, balanced_score, enhance, ); - // The only guarantee of balancing is such that the first and third element of the score - // cannot decrease. + // The only guarantee of balancing is such that the first and third element of the + // score cannot decrease. assert!( balanced_score[0] >= unbalanced_score[0] && - balanced_score[1] == unbalanced_score[1] && - balanced_score[2] <= unbalanced_score[2] + balanced_score[1] == unbalanced_score[1] && + balanced_score[2] <= unbalanced_score[2] ); } }); diff --git a/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs b/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs new file mode 100644 index 0000000000000..f1110da8ef8b0 --- /dev/null +++ b/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs @@ -0,0 +1,117 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Fuzzing which ensures that running unbalanced sequential phragmen always produces a result +//! which satisfies our PJR checker. +//! +//! ## Running a single iteration +//! +//! Honggfuzz shuts down each individual loop iteration after a configurable time limit. +//! It can be helpful to run a single iteration on your hardware to help benchmark how long that +//! time limit should reasonably be. Simply run the program without the `fuzzing` configuration to +//! run a single iteration: `cargo run --bin phragmen_pjr`. +//! +//! ## Running +//! +//! Run with `HFUZZ_RUN_ARGS="-t 10" cargo hfuzz run phragmen_pjr`. +//! +//! Note the environment variable: by default, `cargo hfuzz` shuts down each iteration after 1 +//! second of runtime. We significantly increase that to ensure that the fuzzing gets a chance to +//! complete. Running a single iteration can help determine an appropriate value for this parameter. +//! +//! ## Debugging a panic +//! +//! Once a panic is found, it can be debugged with +//! `HFUZZ_RUN_ARGS="-t 10" cargo hfuzz run-debug phragmen_pjr hfuzz_workspace/phragmen_pjr/*.fuzz`. + +#[cfg(fuzzing)] +use honggfuzz::fuzz; + +#[cfg(not(fuzzing))] +use structopt::StructOpt; + +mod common; +use common::{generate_random_npos_inputs, to_range}; +use rand::{self, SeedableRng}; +use sp_npos_elections::{pjr_check_core, seq_phragmen_core, setup_inputs, standard_threshold}; + +type AccountId = u64; + +const MIN_CANDIDATES: usize = 250; +const MAX_CANDIDATES: usize = 1000; +const MIN_VOTERS: usize = 500; +const MAX_VOTERS: usize = 2500; + +#[cfg(fuzzing)] +fn main() { + loop { + fuzz!(|data: (usize, usize, u64)| { + let (candidate_count, voter_count, seed) = data; + iteration(candidate_count, voter_count, seed); + }); + } +} + +#[cfg(not(fuzzing))] +#[derive(Debug, StructOpt)] +struct Opt { + /// How many candidates participate in this election + #[structopt(short, long)] + candidates: Option, + + /// How many voters participate in this election + #[structopt(short, long)] + voters: Option, + + /// Random seed to use in this election + #[structopt(long)] + seed: Option, +} + +#[cfg(not(fuzzing))] +fn main() { + let opt = Opt::from_args(); + // candidates and voters by default use the maxima, which turn out to be one less than + // the constant. + iteration( + opt.candidates.unwrap_or(MAX_CANDIDATES - 1), + opt.voters.unwrap_or(MAX_VOTERS - 1), + opt.seed.unwrap_or_default(), + ); +} + +fn iteration(mut candidate_count: usize, mut voter_count: usize, seed: u64) { + let rng = rand::rngs::SmallRng::seed_from_u64(seed); + candidate_count = to_range(candidate_count, MIN_CANDIDATES, MAX_CANDIDATES); + voter_count = to_range(voter_count, MIN_VOTERS, MAX_VOTERS); + + let (rounds, candidates, voters) = + generate_random_npos_inputs(candidate_count, voter_count, rng); + + let (candidates, voters) = setup_inputs(candidates, voters); + + // Run seq-phragmen + let (candidates, voters) = seq_phragmen_core::(rounds, candidates, voters) + .expect("seq_phragmen must succeed"); + + let threshold = standard_threshold(rounds, voters.iter().map(|voter| voter.budget())); + + assert!( + pjr_check_core(&candidates, &voters, threshold).is_ok(), + "unbalanced sequential phragmen must satisfy PJR", + ); +} diff --git a/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs b/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs index 0aada6a5624dd..7b2aacfa85882 100644 --- a/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs +++ b/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,23 +21,17 @@ mod common; use common::*; use honggfuzz::fuzz; +use rand::{self, SeedableRng}; use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, build_support_map, to_without_backing, VoteWeight, - evaluate_support, is_score_better, phragmms, + assignment_ratio_to_staked_normalized, is_score_better, phragmms, to_supports, EvaluateSupport, + VoteWeight, }; use sp_runtime::Perbill; -use rand::{self, SeedableRng}; fn main() { loop { fuzz!(|data: (usize, usize, usize, usize, u64)| { - let ( - mut target_count, - mut voter_count, - mut iterations, - mut to_elect, - seed, - ) = data; + let (mut target_count, mut voter_count, mut iterations, mut to_elect, seed) = data; let rng = rand::rngs::SmallRng::seed_from_u64(seed); target_count = to_range(target_count, 100, 200); voter_count = to_range(voter_count, 100, 200); @@ -48,12 +42,7 @@ fn main() { "++ [voter_count: {} / target_count:{} / to_elect:{} / iterations:{}]", voter_count, target_count, to_elect, iterations, ); - let ( - unbalanced, - candidates, - voters, - stake_of_tree, - ) = generate_random_npos_result( + let (unbalanced, candidates, voters, stake_of_tree) = generate_random_npos_result( voter_count as u64, target_count as u64, to_elect, @@ -61,19 +50,19 @@ fn main() { ElectionType::Phragmms(None), ); - let stake_of = |who: &AccountId| -> VoteWeight { - *stake_of_tree.get(who).unwrap() - }; + let stake_of = |who: &AccountId| -> VoteWeight { *stake_of_tree.get(who).unwrap() }; let unbalanced_score = { - let staked = assignment_ratio_to_staked_normalized(unbalanced.assignments.clone(), &stake_of).unwrap(); - let winners = to_without_backing(unbalanced.winners.clone()); - let support = build_support_map(winners.as_ref(), staked.as_ref()).unwrap(); + let staked = assignment_ratio_to_staked_normalized( + unbalanced.assignments.clone(), + &stake_of, + ) + .unwrap(); + let score = to_supports(&staked).evaluate(); - let score = evaluate_support(&support); if score[0] == 0 { // such cases cannot be improved by balancing. - return; + return } score }; @@ -83,32 +72,29 @@ fn main() { candidates, voters, Some((iterations, 0)), - ).unwrap(); + ) + .unwrap(); let balanced_score = { - let staked = assignment_ratio_to_staked_normalized(balanced.assignments.clone(), &stake_of).unwrap(); - let winners = to_without_backing(balanced.winners); - let support = build_support_map(winners.as_ref(), staked.as_ref()).unwrap(); - - evaluate_support(&support) + let staked = + assignment_ratio_to_staked_normalized(balanced.assignments.clone(), &stake_of) + .unwrap(); + to_supports(staked.as_ref()).evaluate() }; let enhance = is_score_better(balanced_score, unbalanced_score, Perbill::zero()); println!( "iter = {} // {:?} -> {:?} [{}]", - iterations, - unbalanced_score, - balanced_score, - enhance, + iterations, unbalanced_score, balanced_score, enhance, ); // The only guarantee of balancing is such that the first and third element of the score // cannot decrease. assert!( balanced_score[0] >= unbalanced_score[0] && - balanced_score[1] == unbalanced_score[1] && - balanced_score[2] <= unbalanced_score[2] + balanced_score[1] == unbalanced_score[1] && + balanced_score[2] <= unbalanced_score[2] ); }); } diff --git a/primitives/npos-elections/fuzzer/src/reduce.rs b/primitives/npos-elections/fuzzer/src/reduce.rs index 0f0d9893e048e..5f8a4f0e13844 100644 --- a/primitives/npos-elections/fuzzer/src/reduce.rs +++ b/primitives/npos-elections/fuzzer/src/reduce.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,8 +34,8 @@ use honggfuzz::fuzz; mod common; use common::to_range; -use sp_npos_elections::{StakedAssignment, ExtendedBalance, build_support_map, reduce}; -use rand::{self, Rng, SeedableRng, RngCore}; +use rand::{self, Rng, RngCore, SeedableRng}; +use sp_npos_elections::{reduce, to_support_map, ExtendedBalance, StakedAssignment}; type Balance = u128; type AccountId = u64; @@ -50,13 +50,8 @@ fn main() { let rng = rand::rngs::SmallRng::seed_from_u64(seed); target_count = to_range(target_count, 100, 1000); voter_count = to_range(voter_count, 100, 2000); - let (assignments, winners) = generate_random_phragmen_assignment( - voter_count, - target_count, - 8, - 8, - rng - ); + let (assignments, winners) = + generate_random_phragmen_assignment(voter_count, target_count, 8, 8, rng); reduce_and_compare(&assignments, &winners); }); } @@ -82,46 +77,44 @@ fn generate_random_phragmen_assignment( (1..=voter_count).for_each(|acc| { let mut targets_to_chose_from = all_targets.clone(); - let targets_to_chose = if edge_per_voter_var > 0 { rng.gen_range( - avg_edge_per_voter - edge_per_voter_var, - avg_edge_per_voter + edge_per_voter_var, - ) } else { avg_edge_per_voter }; - - let distribution = (0..targets_to_chose).map(|_| { - let target = targets_to_chose_from.remove(rng.gen_range(0, targets_to_chose_from.len())); - if winners.iter().find(|w| **w == target).is_none() { - winners.push(target.clone()); - } - (target, rng.gen_range(1 * KSM, 100 * KSM)) - }).collect::>(); - - assignments.push(StakedAssignment { - who: (acc as AccountId), - distribution, - }); + let targets_to_chose = if edge_per_voter_var > 0 { + rng.gen_range( + avg_edge_per_voter - edge_per_voter_var, + avg_edge_per_voter + edge_per_voter_var, + ) + } else { + avg_edge_per_voter + }; + + let distribution = (0..targets_to_chose) + .map(|_| { + let target = + targets_to_chose_from.remove(rng.gen_range(0, targets_to_chose_from.len())); + if winners.iter().find(|w| **w == target).is_none() { + winners.push(target.clone()); + } + (target, rng.gen_range(1 * KSM, 100 * KSM)) + }) + .collect::>(); + + assignments.push(StakedAssignment { who: (acc as AccountId), distribution }); }); (assignments, winners) } fn assert_assignments_equal( - winners: &Vec, ass1: &Vec>, ass2: &Vec>, ) { - - let support_1 = build_support_map::(winners, ass1).unwrap(); - let support_2 = build_support_map::(winners, ass2).unwrap(); - + let support_1 = to_support_map::(ass1); + let support_2 = to_support_map::(ass2); for (who, support) in support_1.iter() { assert_eq!(support.total, support_2.get(who).unwrap().total); } } -fn reduce_and_compare( - assignment: &Vec>, - winners: &Vec, -) { +fn reduce_and_compare(assignment: &Vec>, winners: &Vec) { let mut altered_assignment = assignment.clone(); let n = assignment.len() as u32; let m = winners.len() as u32; @@ -139,15 +132,13 @@ fn reduce_and_compare( num_changed, ); - assert_assignments_equal( - winners, - &assignment, - &altered_assignment, - ); + assert_assignments_equal(&assignment, &altered_assignment); } fn assignment_len(assignments: &[StakedAssignment]) -> u32 { let mut counter = 0; - assignments.iter().for_each(|x| x.distribution.iter().for_each(|_| counter += 1)); + assignments + .iter() + .for_each(|x| x.distribution.iter().for_each(|_| counter += 1)); counter } diff --git a/primitives/npos-elections/solution-type/Cargo.toml b/primitives/npos-elections/solution-type/Cargo.toml new file mode 100644 index 0000000000000..cbe6750266f01 --- /dev/null +++ b/primitives/npos-elections/solution-type/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "sp-npos-elections-solution-type" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "NPoS Solution Type" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +proc-macro = true + +[dependencies] +syn = { version = "1.0.58", features = ["full", "visit"] } +quote = "1.0" +proc-macro2 = "1.0.29" +proc-macro-crate = "1.0.0" + +[dev-dependencies] +parity-scale-codec = "2.0.1" +scale-info = "1.0" +sp-arithmetic = { path = "../../arithmetic", version = "4.0.0-dev" } +# used by generate_solution_type: +sp-npos-elections = { path = "..", version = "4.0.0-dev" } +trybuild = "1.0.43" diff --git a/primitives/npos-elections/solution-type/src/codec.rs b/primitives/npos-elections/solution-type/src/codec.rs new file mode 100644 index 0000000000000..2dac076fcde42 --- /dev/null +++ b/primitives/npos-elections/solution-type/src/codec.rs @@ -0,0 +1,242 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Code generation for the ratio assignment type' encode/decode/info impl. + +use crate::vote_field; +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; + +pub(crate) fn codec_and_info_impl( + ident: syn::Ident, + voter_type: syn::Type, + target_type: syn::Type, + weight_type: syn::Type, + count: usize, +) -> TokenStream2 { + let encode = encode_impl(&ident, count); + let decode = decode_impl(&ident, &voter_type, &target_type, &weight_type, count); + let scale_info = scale_info_impl(&ident, &voter_type, &target_type, &weight_type, count); + + quote! { + #encode + #decode + #scale_info + } +} + +fn decode_impl( + ident: &syn::Ident, + voter_type: &syn::Type, + target_type: &syn::Type, + weight_type: &syn::Type, + count: usize, +) -> TokenStream2 { + let decode_impl_single = { + let name = vote_field(1); + quote! { + let #name = + < + _npos::sp_std::prelude::Vec<(_npos::codec::Compact<#voter_type>, _npos::codec::Compact<#target_type>)> + as + _npos::codec::Decode + >::decode(value)?; + let #name = #name + .into_iter() + .map(|(v, t)| (v.0, t.0)) + .collect::<_npos::sp_std::prelude::Vec<_>>(); + } + }; + + let decode_impl_rest = (2..=count) + .map(|c| { + let name = vote_field(c); + + let inner_impl = (0..c - 1) + .map(|i| quote! { ( (inner[#i].0).0, (inner[#i].1).0 ), }) + .collect::(); + + quote! { + let #name = + < + _npos::sp_std::prelude::Vec<( + _npos::codec::Compact<#voter_type>, + [(_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>); #c-1], + _npos::codec::Compact<#target_type>, + )> + as _npos::codec::Decode + >::decode(value)?; + let #name = #name + .into_iter() + .map(|(v, inner, t_last)| ( + v.0, + [ #inner_impl ], + t_last.0, + )) + .collect::<_npos::sp_std::prelude::Vec<_>>(); + } + }) + .collect::(); + + let all_field_names = (1..=count) + .map(|c| { + let name = vote_field(c); + quote! { #name, } + }) + .collect::(); + + quote!( + impl _npos::codec::Decode for #ident { + fn decode(value: &mut I) -> Result { + #decode_impl_single + #decode_impl_rest + + // The above code generates variables with the decoded value with the same name as + // filed names of the struct, i.e. `let votes4 = decode_value_of_votes4`. All we + // have to do is collect them into the main struct now. + Ok(#ident { #all_field_names }) + } + } + ) +} + +// General attitude is that we will convert inner values to `Compact` and then use the normal +// `Encode` implementation. +fn encode_impl(ident: &syn::Ident, count: usize) -> TokenStream2 { + let encode_impl_single = { + let name = vote_field(1); + quote! { + let #name = self.#name + .iter() + .map(|(v, t)| ( + _npos::codec::Compact(v.clone()), + _npos::codec::Compact(t.clone()), + )) + .collect::<_npos::sp_std::prelude::Vec<_>>(); + #name.encode_to(&mut r); + } + }; + + let encode_impl_rest = (2..=count) + .map(|c| { + let name = vote_field(c); + + // we use the knowledge of the length to avoid copy_from_slice. + let inners_solution_array = (0..c - 1) + .map(|i| { + quote! {( + _npos::codec::Compact(inner[#i].0.clone()), + _npos::codec::Compact(inner[#i].1.clone()), + ),} + }) + .collect::(); + + quote! { + let #name = self.#name + .iter() + .map(|(v, inner, t_last)| ( + _npos::codec::Compact(v.clone()), + [ #inners_solution_array ], + _npos::codec::Compact(t_last.clone()), + )) + .collect::<_npos::sp_std::prelude::Vec<_>>(); + #name.encode_to(&mut r); + } + }) + .collect::(); + + quote!( + impl _npos::codec::Encode for #ident { + fn encode(&self) -> _npos::sp_std::prelude::Vec { + let mut r = vec![]; + #encode_impl_single + #encode_impl_rest + r + } + } + ) +} + +fn scale_info_impl( + ident: &syn::Ident, + voter_type: &syn::Type, + target_type: &syn::Type, + weight_type: &syn::Type, + count: usize, +) -> TokenStream2 { + let scale_info_impl_single = { + let name = format!("{}", vote_field(1)); + quote! { + .field(|f| + f.ty::<_npos::sp_std::prelude::Vec< + (_npos::codec::Compact<#voter_type>, _npos::codec::Compact<#target_type>) + >>() + .name(#name) + ) + } + }; + + let scale_info_impl_double = { + let name = format!("{}", vote_field(2)); + quote! { + .field(|f| + f.ty::<_npos::sp_std::prelude::Vec<( + _npos::codec::Compact<#voter_type>, + (_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>), + _npos::codec::Compact<#target_type> + )>>() + .name(#name) + ) + } + }; + + let scale_info_impl_rest = (3..=count) + .map(|c| { + let name = format!("{}", vote_field(c)); + quote! { + .field(|f| + f.ty::<_npos::sp_std::prelude::Vec<( + _npos::codec::Compact<#voter_type>, + [ + (_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>); + #c - 1 + ], + _npos::codec::Compact<#target_type> + )>>() + .name(#name) + ) + } + }) + .collect::(); + + quote!( + impl _npos::scale_info::TypeInfo for #ident { + type Identity = Self; + + fn type_info() -> _npos::scale_info::Type<_npos::scale_info::form::MetaForm> { + _npos::scale_info::Type::builder() + .path(_npos::scale_info::Path::new(stringify!(#ident), module_path!())) + .composite( + _npos::scale_info::build::Fields::named() + #scale_info_impl_single + #scale_info_impl_double + #scale_info_impl_rest + ) + } + } + ) +} diff --git a/primitives/npos-elections/solution-type/src/from_assignment_helpers.rs b/primitives/npos-elections/solution-type/src/from_assignment_helpers.rs new file mode 100644 index 0000000000000..dc194baa6d9ea --- /dev/null +++ b/primitives/npos-elections/solution-type/src/from_assignment_helpers.rs @@ -0,0 +1,56 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Helpers to generate the push code for `from_assignment` implementations. This can be shared +//! between both single_page and double_page, thus extracted here. +//! +//! All of the code in this helper module assumes some variable names, namely `who` and +//! `distribution`. + +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; + +pub(crate) fn from_impl_single_push_code() -> TokenStream2 { + quote!(push(( + voter_index(&who).or_invalid_index()?, + target_index(&distribution[0].0).or_invalid_index()?, + ))) +} + +pub(crate) fn from_impl_rest_push_code(count: usize) -> TokenStream2 { + let inner = (0..count - 1).map(|i| { + quote!( + ( + target_index(&distribution[#i].0).or_invalid_index()?, + distribution[#i].1 + ) + ) + }); + + let last_index = count - 1; + let last = quote!(target_index(&distribution[#last_index].0).or_invalid_index()?); + + quote!( + push( + ( + voter_index(&who).or_invalid_index()?, + [ #( #inner ),* ], + #last, + ) + ) + ) +} diff --git a/primitives/npos-elections/solution-type/src/index_assignment.rs b/primitives/npos-elections/solution-type/src/index_assignment.rs new file mode 100644 index 0000000000000..d38dc3ec309d9 --- /dev/null +++ b/primitives/npos-elections/solution-type/src/index_assignment.rs @@ -0,0 +1,61 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Code generation for getting the solution representation from the `IndexAssignment` type. + +use crate::vote_field; +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; + +pub(crate) fn from_impl(struct_name: &syn::Ident, count: usize) -> TokenStream2 { + let from_impl_single = { + let name = vote_field(1); + quote!(1 => #struct_name.#name.push( + ( + *who, + distribution[0].0, + ) + ),) + }; + + let from_impl_rest = (2..=count) + .map(|c| { + let inner = (0..c - 1) + .map(|i| quote!((distribution[#i].0, distribution[#i].1),)) + .collect::(); + + let field_name = vote_field(c); + let last_index = c - 1; + let last = quote!(distribution[#last_index].0); + + quote!( + #c => #struct_name.#field_name.push( + ( + *who, + [#inner], + #last, + ) + ), + ) + }) + .collect::(); + + quote!( + #from_impl_single + #from_impl_rest + ) +} diff --git a/primitives/npos-elections/solution-type/src/lib.rs b/primitives/npos-elections/solution-type/src/lib.rs new file mode 100644 index 0000000000000..9b0ec56fc74de --- /dev/null +++ b/primitives/npos-elections/solution-type/src/lib.rs @@ -0,0 +1,246 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Proc macro for a npos solution type. + +use proc_macro::TokenStream; +use proc_macro2::{Ident, Span, TokenStream as TokenStream2}; +use proc_macro_crate::{crate_name, FoundCrate}; +use quote::quote; +use syn::parse::{Parse, ParseStream, Result}; + +mod codec; +mod from_assignment_helpers; +mod index_assignment; +mod single_page; + +/// Get the name of a filed based on voter count. +pub(crate) fn vote_field(n: usize) -> Ident { + quote::format_ident!("votes{}", n) +} + +/// Generate a `syn::Error`. +pub(crate) fn syn_err(message: &'static str) -> syn::Error { + syn::Error::new(Span::call_site(), message) +} + +/// Generates a struct to store the election result in a small/compact way. This can encode a +/// structure which is the equivalent of a `sp_npos_elections::Assignment<_>`. +/// +/// The following data types can be configured by the macro. +/// +/// - The identifier of the voter. This can be any type that supports `parity-scale-codec`'s compact +/// encoding. +/// - The identifier of the target. This can be any type that supports `parity-scale-codec`'s +/// compact encoding. +/// - The accuracy of the ratios. This must be one of the `PerThing` types defined in +/// `sp-arithmetic`. +/// +/// Moreover, the maximum number of edges per voter (distribution per assignment) also need to be +/// specified. Attempting to convert from/to an assignment with more distributions will fail. +/// +/// For example, the following generates a public struct with name `TestSolution` with `u16` voter +/// type, `u8` target type and `Perbill` accuracy with maximum of 4 edges per voter. +/// +/// ``` +/// # use sp_npos_elections_solution_type::generate_solution_type; +/// # use sp_arithmetic::per_things::Perbill; +/// generate_solution_type!(pub struct TestSolution::< +/// VoterIndex = u16, +/// TargetIndex = u8, +/// Accuracy = Perbill, +/// >(4)); +/// ``` +/// +/// The output of this macro will roughly look like: +/// +/// ```ignore +/// struct TestSolution { +/// voters1: vec![(u16 /* voter */, u8 /* target */)] +/// voters2: vec![ +/// (u16 /* voter */, [u8 /* first target*/, Perbill /* proportion for first target */], u8 /* last target */) +/// ] +/// voters3: vec![ +/// (u16 /* voter */, [ +/// (u8 /* first target*/, Perbill /* proportion for first target */ ), +/// (u8 /* second target */, Perbill /* proportion for second target*/) +/// ], u8 /* last target */) +/// ], +/// voters4: ..., +/// } +/// +/// impl NposSolution for TestSolution {}; +/// impl Solution for TestSolution {}; +/// ``` +/// +/// The given struct provides function to convert from/to `Assignment` as part of +/// [`sp_npos_elections::Solution`] trait: +/// +/// - `fn from_assignment<..>(..)` +/// - `fn into_assignment<..>(..)` +/// +/// ## Compact Encoding +/// +/// The generated struct is by default deriving both `Encode` and `Decode`. This is okay but could +/// lead to many `0`s in the solution. If prefixed with `#[compact]`, then a custom compact encoding +/// for numbers will be used, similar to how `parity-scale-codec`'s `Compact` works. +/// +/// ``` +/// # use sp_npos_elections_solution_type::generate_solution_type; +/// # use sp_npos_elections::NposSolution; +/// # use sp_arithmetic::per_things::Perbill; +/// generate_solution_type!( +/// #[compact] +/// pub struct TestSolutionCompact::(8) +/// ); +/// ``` +#[proc_macro] +pub fn generate_solution_type(item: TokenStream) -> TokenStream { + let solution_def = syn::parse_macro_input!(item as SolutionDef); + + let imports = imports().unwrap_or_else(|e| e.to_compile_error()); + + let def = single_page::generate(solution_def).unwrap_or_else(|e| e.to_compile_error()); + + quote!( + #imports + #def + ) + .into() +} + +struct SolutionDef { + vis: syn::Visibility, + ident: syn::Ident, + voter_type: syn::Type, + target_type: syn::Type, + weight_type: syn::Type, + count: usize, + compact_encoding: bool, +} + +fn check_attributes(input: ParseStream) -> syn::Result { + let mut attrs = input.call(syn::Attribute::parse_outer).unwrap_or_default(); + if attrs.len() > 1 { + let extra_attr = attrs.pop().expect("attributes vec with len > 1 can be popped"); + return Err(syn::Error::new_spanned( + extra_attr.clone(), + "compact solution can accept only #[compact]", + )) + } + if attrs.is_empty() { + return Ok(false) + } + let attr = attrs.pop().expect("attributes vec with len 1 can be popped."); + if attr.path.is_ident("compact") { + Ok(true) + } else { + Err(syn::Error::new_spanned(attr.clone(), "compact solution can accept only #[compact]")) + } +} + +impl Parse for SolutionDef { + fn parse(input: ParseStream) -> syn::Result { + // optional #[compact] + let compact_encoding = check_attributes(input)?; + + // struct + let vis: syn::Visibility = input.parse()?; + let _ = ::parse(input)?; + let ident: syn::Ident = input.parse()?; + + // :: + let _ = ::parse(input)?; + let generics: syn::AngleBracketedGenericArguments = input.parse()?; + + if generics.args.len() != 3 { + return Err(syn_err("Must provide 3 generic args.")) + } + + let expected_types = ["VoterIndex", "TargetIndex", "Accuracy"]; + + let mut types: Vec = generics + .args + .iter() + .zip(expected_types.iter()) + .map(|(t, expected)| match t { + syn::GenericArgument::Type(ty) => { + // this is now an error + Err(syn::Error::new_spanned( + ty, + format!("Expected binding: `{} = ...`", expected), + )) + }, + syn::GenericArgument::Binding(syn::Binding { ident, ty, .. }) => { + // check that we have the right keyword for this position in the argument list + if ident == expected { + Ok(ty.clone()) + } else { + Err(syn::Error::new_spanned(ident, format!("Expected `{}`", expected))) + } + }, + _ => Err(syn_err("Wrong type of generic provided. Must be a `type`.")), + }) + .collect::>()?; + + let weight_type = types.pop().expect("Vector of length 3 can be popped; qed"); + let target_type = types.pop().expect("Vector of length 2 can be popped; qed"); + let voter_type = types.pop().expect("Vector of length 1 can be popped; qed"); + + // () + let count_expr: syn::ExprParen = input.parse()?; + let count = parse_parenthesized_number::(count_expr)?; + + Ok(Self { vis, ident, voter_type, target_type, weight_type, count, compact_encoding }) + } +} + +fn parse_parenthesized_number(input_expr: syn::ExprParen) -> syn::Result +where + ::Err: std::fmt::Display, +{ + let expr = input_expr.expr; + let expr_lit = match *expr { + syn::Expr::Lit(count_lit) => count_lit.lit, + _ => return Err(syn_err("Count must be literal.")), + }; + let int_lit = match expr_lit { + syn::Lit::Int(int_lit) => int_lit, + _ => return Err(syn_err("Count must be int literal.")), + }; + int_lit.base10_parse::() +} + +fn imports() -> Result { + match crate_name("sp-npos-elections") { + Ok(FoundCrate::Itself) => Ok(quote! { use crate as _npos; }), + Ok(FoundCrate::Name(sp_npos_elections)) => { + let ident = syn::Ident::new(&sp_npos_elections, Span::call_site()); + Ok(quote!( extern crate #ident as _npos; )) + }, + Err(e) => Err(syn::Error::new(Span::call_site(), e)), + } +} + +#[cfg(test)] +mod tests { + #[test] + fn ui_fail() { + let cases = trybuild::TestCases::new(); + cases.compile_fail("tests/ui/fail/*.rs"); + } +} diff --git a/primitives/npos-elections/solution-type/src/single_page.rs b/primitives/npos-elections/solution-type/src/single_page.rs new file mode 100644 index 0000000000000..33017d558331a --- /dev/null +++ b/primitives/npos-elections/solution-type/src/single_page.rs @@ -0,0 +1,363 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{from_assignment_helpers::*, syn_err, vote_field}; +use proc_macro2::TokenStream as TokenStream2; +use quote::quote; +use syn::parse::Result; + +pub(crate) fn generate(def: crate::SolutionDef) -> Result { + let crate::SolutionDef { + vis, + ident, + count, + voter_type, + target_type, + weight_type, + compact_encoding, + } = def; + + if count <= 2 { + Err(syn_err("cannot build solution struct with capacity less than 3."))? + } + + let single = { + let name = vote_field(1); + // NOTE: we use the visibility of the struct for the fields as well.. could be made better. + quote!( + #vis #name: _npos::sp_std::prelude::Vec<(#voter_type, #target_type)>, + ) + }; + + let rest = (2..=count) + .map(|c| { + let field_name = vote_field(c); + let array_len = c - 1; + quote!( + #vis #field_name: _npos::sp_std::prelude::Vec<( + #voter_type, + [(#target_type, #weight_type); #array_len], + #target_type + )>, + ) + }) + .collect::(); + + let len_impl = len_impl(count); + let edge_count_impl = edge_count_impl(count); + let unique_targets_impl = unique_targets_impl(count); + let remove_voter_impl = remove_voter_impl(count); + + let derives_and_maybe_compact_encoding = if compact_encoding { + // custom compact encoding. + let compact_impl = crate::codec::codec_and_info_impl( + ident.clone(), + voter_type.clone(), + target_type.clone(), + weight_type.clone(), + count, + ); + quote! { + #compact_impl + #[derive(Default, PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] + } + } else { + // automatically derived. + quote!(#[derive( + Default, + PartialEq, + Eq, + Clone, + Debug, + _npos::codec::Encode, + _npos::codec::Decode, + _npos::scale_info::TypeInfo, + )]) + }; + + let struct_name = syn::Ident::new("solution", proc_macro2::Span::call_site()); + let assignment_name = syn::Ident::new("all_assignments", proc_macro2::Span::call_site()); + + let from_impl = from_impl(&struct_name, count); + let into_impl = into_impl(&assignment_name, count, weight_type.clone()); + let from_index_impl = crate::index_assignment::from_impl(&struct_name, count); + + Ok(quote! ( + /// A struct to encode a election assignment in a compact way. + #derives_and_maybe_compact_encoding + #vis struct #ident { #single #rest } + + use _npos::__OrInvalidIndex; + impl _npos::NposSolution for #ident { + const LIMIT: usize = #count; + type VoterIndex = #voter_type; + type TargetIndex = #target_type; + type Accuracy = #weight_type; + + fn remove_voter(&mut self, to_remove: Self::VoterIndex) -> bool { + #remove_voter_impl + return false + } + + fn from_assignment( + assignments: &[_npos::Assignment], + voter_index: FV, + target_index: FT, + ) -> Result + where + A: _npos::IdentifierT, + for<'r> FV: Fn(&'r A) -> Option, + for<'r> FT: Fn(&'r A) -> Option, + { + let mut #struct_name: #ident = Default::default(); + for _npos::Assignment { who, distribution } in assignments { + match distribution.len() { + 0 => continue, + #from_impl + _ => { + return Err(_npos::Error::SolutionTargetOverflow); + } + } + }; + Ok(#struct_name) + } + + fn into_assignment( + self, + voter_at: impl Fn(Self::VoterIndex) -> Option, + target_at: impl Fn(Self::TargetIndex) -> Option, + ) -> Result<_npos::sp_std::prelude::Vec<_npos::Assignment>, _npos::Error> { + let mut #assignment_name: _npos::sp_std::prelude::Vec<_npos::Assignment> = Default::default(); + #into_impl + Ok(#assignment_name) + } + + fn voter_count(&self) -> usize { + let mut all_len = 0usize; + #len_impl + all_len + } + + fn edge_count(&self) -> usize { + let mut all_edges = 0usize; + #edge_count_impl + all_edges + } + + fn unique_targets(&self) -> _npos::sp_std::prelude::Vec { + // NOTE: this implementation returns the targets sorted, but we don't use it yet per + // se, nor is the API enforcing it. + use _npos::sp_std::collections::btree_set::BTreeSet; + let mut all_targets: BTreeSet = BTreeSet::new(); + let mut maybe_insert_target = |t: Self::TargetIndex| { + all_targets.insert(t); + }; + + #unique_targets_impl + + all_targets.into_iter().collect() + } + } + + type __IndexAssignment = _npos::IndexAssignment< + <#ident as _npos::NposSolution>::VoterIndex, + <#ident as _npos::NposSolution>::TargetIndex, + <#ident as _npos::NposSolution>::Accuracy, + >; + impl<'a> _npos::sp_std::convert::TryFrom<&'a [__IndexAssignment]> for #ident { + type Error = _npos::Error; + fn try_from(index_assignments: &'a [__IndexAssignment]) -> Result { + let mut #struct_name = #ident::default(); + + for _npos::IndexAssignment { who, distribution } in index_assignments { + match distribution.len() { + 0 => {} + #from_index_impl + _ => { + return Err(_npos::Error::SolutionTargetOverflow); + } + } + }; + + Ok(#struct_name) + } + } + )) +} + +fn remove_voter_impl(count: usize) -> TokenStream2 { + let field_name = vote_field(1); + let single = quote! { + if let Some(idx) = self.#field_name.iter().position(|(x, _)| *x == to_remove) { + self.#field_name.remove(idx); + return true + } + }; + + let rest = (2..=count) + .map(|c| { + let field_name = vote_field(c); + quote! { + if let Some(idx) = self.#field_name.iter().position(|(x, _, _)| *x == to_remove) { + self.#field_name.remove(idx); + return true + } + } + }) + .collect::(); + + quote! { + #single + #rest + } +} + +fn len_impl(count: usize) -> TokenStream2 { + (1..=count) + .map(|c| { + let field_name = vote_field(c); + quote!( + all_len = all_len.saturating_add(self.#field_name.len()); + ) + }) + .collect::() +} + +fn edge_count_impl(count: usize) -> TokenStream2 { + (1..=count) + .map(|c| { + let field_name = vote_field(c); + quote!( + all_edges = all_edges.saturating_add( + self.#field_name.len().saturating_mul(#c as usize) + ); + ) + }) + .collect::() +} + +fn unique_targets_impl(count: usize) -> TokenStream2 { + let unique_targets_impl_single = { + let field_name = vote_field(1); + quote! { + self.#field_name.iter().for_each(|(_, t)| { + maybe_insert_target(*t); + }); + } + }; + + let unique_targets_impl_rest = (2..=count) + .map(|c| { + let field_name = vote_field(c); + quote! { + self.#field_name.iter().for_each(|(_, inners, t_last)| { + inners.iter().for_each(|(t, _)| { + maybe_insert_target(*t); + }); + maybe_insert_target(*t_last); + }); + } + }) + .collect::(); + + quote! { + #unique_targets_impl_single + #unique_targets_impl_rest + } +} + +pub(crate) fn from_impl(struct_name: &syn::Ident, count: usize) -> TokenStream2 { + let from_impl_single = { + let field = vote_field(1); + let push_code = from_impl_single_push_code(); + quote!(1 => #struct_name.#field.#push_code,) + }; + + let from_impl_rest = (2..=count) + .map(|c| { + let field = vote_field(c); + let push_code = from_impl_rest_push_code(c); + quote!(#c => #struct_name.#field.#push_code,) + }) + .collect::(); + + quote!( + #from_impl_single + #from_impl_rest + ) +} + +pub(crate) fn into_impl( + assignments: &syn::Ident, + count: usize, + per_thing: syn::Type, +) -> TokenStream2 { + let into_impl_single = { + let name = vote_field(1); + quote!( + for (voter_index, target_index) in self.#name { + #assignments.push(_npos::Assignment { + who: voter_at(voter_index).or_invalid_index()?, + distribution: vec![ + (target_at(target_index).or_invalid_index()?, #per_thing::one()) + ], + }) + } + ) + }; + + let into_impl_rest = (2..=count) + .map(|c| { + let name = vote_field(c); + quote!( + for (voter_index, inners, t_last_idx) in self.#name { + let mut sum = #per_thing::zero(); + let mut inners_parsed = inners + .iter() + .map(|(ref t_idx, p)| { + sum = _npos::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); + let target = target_at(*t_idx).or_invalid_index()?; + Ok((target, *p)) + }) + .collect::, _npos::Error>>()?; + + if sum >= #per_thing::one() { + return Err(_npos::Error::SolutionWeightOverflow); + } + + // defensive only. Since Percent doesn't have `Sub`. + let p_last = _npos::sp_arithmetic::traits::Saturating::saturating_sub( + #per_thing::one(), + sum, + ); + + inners_parsed.push((target_at(t_last_idx).or_invalid_index()?, p_last)); + + #assignments.push(_npos::Assignment { + who: voter_at(voter_index).or_invalid_index()?, + distribution: inners_parsed, + }); + } + ) + }) + .collect::(); + + quote!( + #into_impl_single + #into_impl_rest + ) +} diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/missing_accuracy.rs b/primitives/npos-elections/solution-type/tests/ui/fail/missing_accuracy.rs new file mode 100644 index 0000000000000..b74b857e45815 --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/missing_accuracy.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_solution_type::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + VoterIndex = u16, + TargetIndex = u8, + Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/missing_accuracy.stderr b/primitives/npos-elections/solution-type/tests/ui/fail/missing_accuracy.stderr new file mode 100644 index 0000000000000..b6bb8f39ede61 --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/missing_accuracy.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `Accuracy = ...` + --> $DIR/missing_accuracy.rs:6:2 + | +6 | Perbill, + | ^^^^^^^ diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/missing_target.rs b/primitives/npos-elections/solution-type/tests/ui/fail/missing_target.rs new file mode 100644 index 0000000000000..4c9cd51a32096 --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/missing_target.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_solution_type::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + VoterIndex = u16, + u8, + Accuracy = Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/missing_target.stderr b/primitives/npos-elections/solution-type/tests/ui/fail/missing_target.stderr new file mode 100644 index 0000000000000..d0c92c5bbd8e9 --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/missing_target.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `TargetIndex = ...` + --> $DIR/missing_target.rs:5:2 + | +5 | u8, + | ^^ diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/missing_voter.rs b/primitives/npos-elections/solution-type/tests/ui/fail/missing_voter.rs new file mode 100644 index 0000000000000..b87037f77f1e3 --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/missing_voter.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_solution_type::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + u16, + TargetIndex = u8, + Accuracy = Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/missing_voter.stderr b/primitives/npos-elections/solution-type/tests/ui/fail/missing_voter.stderr new file mode 100644 index 0000000000000..a825d460c2fa8 --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/missing_voter.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `VoterIndex = ...` + --> $DIR/missing_voter.rs:4:2 + | +4 | u16, + | ^^^ diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/no_annotations.rs b/primitives/npos-elections/solution-type/tests/ui/fail/no_annotations.rs new file mode 100644 index 0000000000000..cfca2841db633 --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/no_annotations.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_solution_type::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + u16, + u8, + Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/no_annotations.stderr b/primitives/npos-elections/solution-type/tests/ui/fail/no_annotations.stderr new file mode 100644 index 0000000000000..28f1c2091546f --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/no_annotations.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `VoterIndex = ...` + --> $DIR/no_annotations.rs:4:2 + | +4 | u16, + | ^^^ diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/swap_voter_target.rs b/primitives/npos-elections/solution-type/tests/ui/fail/swap_voter_target.rs new file mode 100644 index 0000000000000..443202d11b39b --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/swap_voter_target.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_solution_type::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + TargetIndex = u16, + VoterIndex = u8, + Accuracy = Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/swap_voter_target.stderr b/primitives/npos-elections/solution-type/tests/ui/fail/swap_voter_target.stderr new file mode 100644 index 0000000000000..5759fee7472fa --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/swap_voter_target.stderr @@ -0,0 +1,5 @@ +error: Expected `VoterIndex` + --> $DIR/swap_voter_target.rs:4:2 + | +4 | TargetIndex = u16, + | ^^^^^^^^^^^ diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/wrong_attribute.rs b/primitives/npos-elections/solution-type/tests/ui/fail/wrong_attribute.rs new file mode 100644 index 0000000000000..3008277e36b74 --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/wrong_attribute.rs @@ -0,0 +1,11 @@ +use sp_npos_elections_solution_type::generate_solution_type; + +generate_solution_type!( + #[pages(1)] pub struct TestSolution::< + VoterIndex = u8, + TargetIndex = u16, + Accuracy = Perbill, + >(8) +); + +fn main() {} diff --git a/primitives/npos-elections/solution-type/tests/ui/fail/wrong_attribute.stderr b/primitives/npos-elections/solution-type/tests/ui/fail/wrong_attribute.stderr new file mode 100644 index 0000000000000..ab700a3f2afcb --- /dev/null +++ b/primitives/npos-elections/solution-type/tests/ui/fail/wrong_attribute.stderr @@ -0,0 +1,5 @@ +error: compact solution can accept only #[compact] + --> $DIR/wrong_attribute.rs:4:2 + | +4 | #[pages(1)] pub struct TestSolution::< + | ^^^^^^^^^^^ diff --git a/primitives/npos-elections/src/assignments.rs b/primitives/npos-elections/src/assignments.rs new file mode 100644 index 0000000000000..bdd1e2cd281bb --- /dev/null +++ b/primitives/npos-elections/src/assignments.rs @@ -0,0 +1,209 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Structs and helpers for distributing a voter's stake among various winners. + +use crate::{Error, ExtendedBalance, IdentifierT, PerThing128, __OrInvalidIndex}; +#[cfg(feature = "std")] +use codec::{Decode, Encode}; +use sp_arithmetic::{ + traits::{Bounded, Zero}, + Normalizable, PerThing, +}; +use sp_core::RuntimeDebug; +use sp_std::vec::Vec; + +/// A voter's stake assignment among a set of targets, represented as ratios. +#[derive(RuntimeDebug, Clone, Default)] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] +pub struct Assignment { + /// Voter's identifier. + pub who: AccountId, + /// The distribution of the voter's stake. + pub distribution: Vec<(AccountId, P)>, +} + +impl Assignment { + /// Convert from a ratio assignment into one with absolute values aka. [`StakedAssignment`]. + /// + /// It needs `stake` which is the total budget of the voter. + /// + /// Note that this might create _un-normalized_ assignments, due to accuracy loss of `P`. Call + /// site might compensate by calling `try_normalize()` on the returned `StakedAssignment` as a + /// post-precessing. + /// + /// If an edge ratio is [`Bounded::min_value()`], it is dropped. This edge can never mean + /// anything useful. + pub fn into_staked(self, stake: ExtendedBalance) -> StakedAssignment { + let distribution = self + .distribution + .into_iter() + .filter_map(|(target, p)| { + // if this ratio is zero, then skip it. + if p.is_zero() { + None + } else { + // NOTE: this mul impl will always round to the nearest number, so we might both + // overflow and underflow. + let distribution_stake = p * stake; + Some((target, distribution_stake)) + } + }) + .collect::>(); + + StakedAssignment { who: self.who, distribution } + } + + /// Try and normalize this assignment. + /// + /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to 100%. + /// + /// ### Errors + /// + /// This will return only if the internal `normalize` fails. This can happen if sum of + /// `self.distribution.map(|p| p.deconstruct())` fails to fit inside `UpperOf

`. A user of + /// this crate may statically assert that this can never happen and safely `expect` this to + /// return `Ok`. + pub fn try_normalize(&mut self) -> Result<(), &'static str> { + self.distribution + .iter() + .map(|(_, p)| *p) + .collect::>() + .normalize(P::one()) + .map(|normalized_ratios| { + self.distribution.iter_mut().zip(normalized_ratios).for_each( + |((_, old), corrected)| { + *old = corrected; + }, + ) + }) + } +} + +/// A voter's stake assignment among a set of targets, represented as absolute values in the scale +/// of [`ExtendedBalance`]. +#[derive(RuntimeDebug, Clone, Default)] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] +pub struct StakedAssignment { + /// Voter's identifier + pub who: AccountId, + /// The distribution of the voter's stake. + pub distribution: Vec<(AccountId, ExtendedBalance)>, +} + +impl StakedAssignment { + /// Converts self into the normal [`Assignment`] type. + /// + /// NOTE: This will always round down, and thus the results might be less than a full 100% `P`. + /// Use a normalization post-processing to fix this. The data type returned here will + /// potentially get used to create a compact type; a compact type requires sum of ratios to be + /// less than 100% upon un-compacting. + /// + /// If an edge stake is so small that it cannot be represented in `T`, it is ignored. This edge + /// can never be re-created and does not mean anything useful anymore. + pub fn into_assignment(self) -> Assignment + where + AccountId: IdentifierT, + { + let stake = self.total(); + let distribution = self + .distribution + .into_iter() + .filter_map(|(target, w)| { + let per_thing = P::from_rational(w, stake); + if per_thing == Bounded::min_value() { + None + } else { + Some((target, per_thing)) + } + }) + .collect::>(); + + Assignment { who: self.who, distribution } + } + + /// Try and normalize this assignment. + /// + /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to + /// `stake`. + /// + /// NOTE: current implementation of `.normalize` is almost safe to `expect()` upon. The only + /// error case is when the input cannot fit in `T`, or the sum of input cannot fit in `T`. + /// Sadly, both of these are dependent upon the implementation of `VoteLimit`, i.e. the limit of + /// edges per voter which is enforced from upstream. Hence, at this crate, we prefer returning a + /// result and a use the name prefix `try_`. + pub fn try_normalize(&mut self, stake: ExtendedBalance) -> Result<(), &'static str> { + self.distribution + .iter() + .map(|(_, ref weight)| *weight) + .collect::>() + .normalize(stake) + .map(|normalized_weights| { + self.distribution.iter_mut().zip(normalized_weights.into_iter()).for_each( + |((_, weight), corrected)| { + *weight = corrected; + }, + ) + }) + } + + /// Get the total stake of this assignment (aka voter budget). + pub fn total(&self) -> ExtendedBalance { + self.distribution.iter().fold(Zero::zero(), |a, b| a.saturating_add(b.1)) + } +} +/// The [`IndexAssignment`] type is an intermediate between the assignments list +/// ([`&[Assignment]`][Assignment]) and `SolutionOf`. +/// +/// The voter and target identifiers have already been replaced with appropriate indices, +/// making it fast to repeatedly encode into a `SolutionOf`. This property turns out +/// to be important when trimming for solution length. +#[derive(RuntimeDebug, Clone, Default)] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] +pub struct IndexAssignment { + /// Index of the voter among the voters list. + pub who: VoterIndex, + /// The distribution of the voter's stake among winning targets. + /// + /// Targets are identified by their index in the canonical list. + pub distribution: Vec<(TargetIndex, P)>, +} + +impl IndexAssignment { + pub fn new( + assignment: &Assignment, + voter_index: impl Fn(&AccountId) -> Option, + target_index: impl Fn(&AccountId) -> Option, + ) -> Result { + Ok(Self { + who: voter_index(&assignment.who).or_invalid_index()?, + distribution: assignment + .distribution + .iter() + .map(|(target, proportion)| Some((target_index(target)?, proportion.clone()))) + .collect::>>() + .or_invalid_index()?, + }) + } +} + +/// A type alias for [`IndexAssignment`] made from [`crate::Solution`]. +pub type IndexAssignmentOf = IndexAssignment< + ::VoterIndex, + ::TargetIndex, + ::Accuracy, +>; diff --git a/primitives/npos-elections/src/balancing.rs b/primitives/npos-elections/src/balancing.rs index 04083cc9b0d43..63164049e5269 100644 --- a/primitives/npos-elections/src/balancing.rs +++ b/primitives/npos-elections/src/balancing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ //! //! See [`balance`] for more information. -use crate::{IdentifierT, Voter, ExtendedBalance, Edge}; +use crate::{Edge, ExtendedBalance, IdentifierT, Voter}; use sp_arithmetic::traits::Zero; use sp_std::prelude::*; @@ -36,7 +36,7 @@ use sp_std::prelude::*; /// change has been made (`difference = 0`). /// /// In almost all cases, a balanced solution will have a better score than an unbalanced solution, -/// yet this is not 100% guaranteed because the first element of a [`ElectionScore`] does not +/// yet this is not 100% guaranteed because the first element of a [`crate::ElectionScore`] does not /// directly related to balancing. /// /// Note that some reference implementation adopt an approach in which voters are balanced randomly @@ -57,19 +57,23 @@ pub fn balance( iterations: usize, tolerance: ExtendedBalance, ) -> usize { - if iterations == 0 { return 0; } + if iterations == 0 { + return 0 + } let mut iter = 0; loop { let mut max_diff = 0; for voter in voters.iter_mut() { let diff = balance_voter(voter, tolerance); - if diff > max_diff { max_diff = diff; } + if diff > max_diff { + max_diff = diff; + } } iter += 1; if max_diff <= tolerance || iter >= iterations { - break iter; + break iter } } } @@ -80,7 +84,8 @@ pub(crate) fn balance_voter( tolerance: ExtendedBalance, ) -> ExtendedBalance { // create a shallow copy of the elected ones. The original one will not be used henceforth. - let mut elected_edges = voter.edges + let mut elected_edges = voter + .edges .iter_mut() .filter(|e| e.candidate.borrow().elected) .collect::>>(); @@ -91,9 +96,8 @@ pub(crate) fn balance_voter( } // amount of stake from this voter that is used in edges. - let stake_used = elected_edges - .iter() - .fold(0, |a: ExtendedBalance, e| a.saturating_add(e.weight)); + let stake_used = + elected_edges.iter().fold(0, |a: ExtendedBalance, e| a.saturating_add(e.weight)); // backed stake of each of the elected edges. let backed_stakes = elected_edges @@ -104,13 +108,7 @@ pub(crate) fn balance_voter( // backed stake of all the edges for whom we've spent some stake. let backing_backed_stake = elected_edges .iter() - .filter_map(|e| - if e.weight > 0 { - Some(e.candidate.borrow().backed_stake) - } else { - None - } - ) + .filter_map(|e| if e.weight > 0 { Some(e.candidate.borrow().backed_stake) } else { None }) .collect::>(); let difference = if backing_backed_stake.len() > 0 { @@ -125,7 +123,7 @@ pub(crate) fn balance_voter( let mut difference = max_stake.saturating_sub(*min_stake); difference = difference.saturating_add(voter.budget.saturating_sub(stake_used)); if difference < tolerance { - return difference; + return difference } difference } else { @@ -156,12 +154,18 @@ pub(crate) fn balance_voter( cumulative_backed_stake = cumulative_backed_stake.saturating_add(backed_stake); } - let last_stake = elected_edges.get(last_index).expect( - "length of elected_edges is greater than or equal 2; last_index index is at \ - the minimum elected_edges.len() - 1; index is within range; qed" - ).candidate.borrow().backed_stake; + let last_stake = elected_edges + .get(last_index) + .expect( + "length of elected_edges is greater than or equal 2; last_index index is at the \ + minimum elected_edges.len() - 1; index is within range; qed", + ) + .candidate + .borrow() + .backed_stake; let ways_to_split = last_index + 1; - let excess = voter.budget + let excess = voter + .budget .saturating_add(cumulative_backed_stake) .saturating_sub(last_stake.saturating_mul(ways_to_split as ExtendedBalance)); diff --git a/primitives/npos-elections/src/helpers.rs b/primitives/npos-elections/src/helpers.rs index bfde63676c6e8..ca97aeb996e48 100644 --- a/primitives/npos-elections/src/helpers.rs +++ b/primitives/npos-elections/src/helpers.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,23 +17,19 @@ //! Helper methods for npos-elections. -use crate::{ - Assignment, ExtendedBalance, VoteWeight, IdentifierT, StakedAssignment, WithApprovalOf, Error, -}; -use sp_arithmetic::{PerThing, InnerOf}; +use crate::{Assignment, Error, IdentifierT, PerThing128, StakedAssignment, VoteWeight}; +use sp_arithmetic::PerThing; use sp_std::prelude::*; /// Converts a vector of ratio assignments into ones with absolute budget value. /// /// Note that this will NOT attempt at normalizing the result. -pub fn assignment_ratio_to_staked( +pub fn assignment_ratio_to_staked( ratios: Vec>, stake_of: FS, ) -> Vec> where for<'r> FS: Fn(&'r A) -> VoteWeight, - P: sp_std::ops::Mul, - ExtendedBalance: From>, { ratios .into_iter() @@ -45,19 +41,21 @@ where } /// Same as [`assignment_ratio_to_staked`] and try and do normalization. -pub fn assignment_ratio_to_staked_normalized( +pub fn assignment_ratio_to_staked_normalized( ratio: Vec>, stake_of: FS, ) -> Result>, Error> where for<'r> FS: Fn(&'r A) -> VoteWeight, - P: sp_std::ops::Mul, - ExtendedBalance: From>, { let mut staked = assignment_ratio_to_staked(ratio, &stake_of); - staked.iter_mut().map(|a| - a.try_normalize(stake_of(&a.who).into()).map_err(|err| Error::ArithmeticError(err)) - ).collect::>()?; + staked + .iter_mut() + .map(|a| { + a.try_normalize(stake_of(&a.who).into()) + .map_err(|err| Error::ArithmeticError(err)) + }) + .collect::>()?; Ok(staked) } @@ -66,32 +64,21 @@ where /// Note that this will NOT attempt at normalizing the result. pub fn assignment_staked_to_ratio( staked: Vec>, -) -> Vec> -where - ExtendedBalance: From>, -{ +) -> Vec> { staked.into_iter().map(|a| a.into_assignment()).collect() } /// Same as [`assignment_staked_to_ratio`] and try and do normalization. -pub fn assignment_staked_to_ratio_normalized( +pub fn assignment_staked_to_ratio_normalized( staked: Vec>, -) -> Result>, Error> -where - ExtendedBalance: From>, -{ +) -> Result>, Error> { let mut ratio = staked.into_iter().map(|a| a.into_assignment()).collect::>(); - ratio.iter_mut().map(|a| - a.try_normalize().map_err(|err| Error::ArithmeticError(err)) - ).collect::>()?; + for assignment in ratio.iter_mut() { + assignment.try_normalize().map_err(|err| Error::ArithmeticError(err))?; + } Ok(ratio) } -/// consumes a vector of winners with backing stake to just winners. -pub fn to_without_backing(winners: Vec>) -> Vec { - winners.into_iter().map(|(who, _)| who).collect::>() -} - #[cfg(test)] mod tests { use super::*; @@ -103,15 +90,15 @@ mod tests { Assignment { who: 1u32, distribution: vec![ - (10u32, Perbill::from_fraction(0.5)), - (20, Perbill::from_fraction(0.5)), + (10u32, Perbill::from_float(0.5)), + (20, Perbill::from_float(0.5)), ], }, Assignment { who: 2u32, distribution: vec![ - (10, Perbill::from_fraction(0.33)), - (20, Perbill::from_fraction(0.67)), + (10, Perbill::from_float(0.33)), + (20, Perbill::from_float(0.67)), ], }, ]; @@ -122,14 +109,8 @@ mod tests { assert_eq!( staked, vec![ - StakedAssignment { - who: 1u32, - distribution: vec![(10u32, 50), (20, 50),] - }, - StakedAssignment { - who: 2u32, - distribution: vec![(10u32, 33), (20, 67),] - } + StakedAssignment { who: 1u32, distribution: vec![(10u32, 50), (20, 50),] }, + StakedAssignment { who: 2u32, distribution: vec![(10u32, 33), (20, 67),] } ] ); } diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 11951d2065989..afe85ef53b3a7 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at @@ -18,11 +18,12 @@ //! - [`seq_phragmen`]: Implements the Phragmén Sequential Method. An un-ranked, relatively fast //! election method that ensures PJR, but does not provide a constant factor approximation of the //! maximin problem. -//! - [`phragmms`]: Implements a hybrid approach inspired by Phragmén which is executed faster but -//! it can achieve a constant factor approximation of the maximin problem, similar to that of the -//! MMS algorithm. -//! - [`balance_solution`]: Implements the star balancing algorithm. This iterative process can push -//! a solution toward being more `balances`, which in turn can increase its score. +//! - [`phragmms`](phragmms::phragmms): Implements a hybrid approach inspired by Phragmén which is +//! executed faster but it can achieve a constant factor approximation of the maximin problem, +//! similar to that of the MMS algorithm. +//! - [`balance`](balancing::balance): Implements the star balancing algorithm. This iterative +//! process can push a solution toward being more "balanced", which in turn can increase its +//! score. //! //! ### Terminology //! @@ -57,12 +58,11 @@ //! //! // the combination of the two makes the election result. //! let election_result = ElectionResult { winners, assignments }; -//! //! ``` //! //! The `Assignment` field of the election result is voter-major, i.e. it is from the perspective of //! the voter. The struct that represents the opposite is called a `Support`. This struct is usually -//! accessed in a map-like manner, i.e. keyed vy voters, therefor it is stored as a mapping called +//! accessed in a map-like manner, i.e. keyed by voters, therefor it is stored as a mapping called //! `SupportMap`. //! //! Moreover, the support is built from absolute backing values, not ratios like the example above. @@ -70,87 +70,71 @@ //! `StakedAssignment`. //! //! -//! More information can be found at: https://arxiv.org/abs/2004.12990 +//! More information can be found at: #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{ - prelude::*, collections::btree_map::BTreeMap, fmt::Debug, cmp::Ordering, rc::Rc, cell::RefCell, -}; -use sp_arithmetic::{ - PerThing, Rational128, ThresholdOrd, InnerOf, Normalizable, - traits::{Zero, Bounded}, -}; +use sp_arithmetic::{traits::Zero, Normalizable, PerThing, Rational128, ThresholdOrd}; +use sp_core::RuntimeDebug; +use sp_std::{cell::RefCell, cmp::Ordering, collections::btree_map::BTreeMap, prelude::*, rc::Rc}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -#[cfg(feature = "std")] -use codec::{Encode, Decode}; +use serde::{Deserialize, Serialize}; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -mod phragmen; -mod balancing; -mod phragmms; -mod node; -mod reduce; -mod helpers; - -pub use reduce::reduce; +mod assignments; +pub mod balancing; +pub mod helpers; +pub mod node; +pub mod phragmen; +pub mod phragmms; +pub mod pjr; +pub mod reduce; +pub mod traits; + +pub use assignments::{Assignment, IndexAssignment, IndexAssignmentOf, StakedAssignment}; +pub use balancing::*; pub use helpers::*; pub use phragmen::*; pub use phragmms::*; -pub use balancing::*; +pub use pjr::*; +pub use reduce::reduce; +pub use traits::{IdentifierT, NposSolution, PerThing128, __OrInvalidIndex}; -// re-export the compact macro, with the dependencies of the macro. +// re-export for the solution macro, with the dependencies of the macro. #[doc(hidden)] pub use codec; #[doc(hidden)] +pub use scale_info; +#[doc(hidden)] pub use sp_arithmetic; - -/// Simple Extension trait to easily convert `None` from index closures to `Err`. -/// -/// This is only generated and re-exported for the compact solution code to use. #[doc(hidden)] -pub trait __OrInvalidIndex { - fn or_invalid_index(self) -> Result; -} +pub use sp_std; -impl __OrInvalidIndex for Option { - fn or_invalid_index(self) -> Result { - self.ok_or(Error::CompactInvalidIndex) - } -} - -// re-export the compact solution type. -pub use sp_npos_elections_compact::generate_solution_type; +// re-export the solution type macro. +pub use sp_npos_elections_solution_type::generate_solution_type; -/// A trait to limit the number of votes per voter. The generated compact type will implement this. -pub trait VotingLimit { - const LIMIT: usize; -} - -/// an aggregator trait for a generic type of a voter/target identifier. This usually maps to -/// substrate's account id. -pub trait IdentifierT: Clone + Eq + Default + Ord + Debug + codec::Codec {} - -impl IdentifierT for T {} - -/// The errors that might occur in the this crate and compact. -#[derive(Debug, Eq, PartialEq)] +/// The errors that might occur in the this crate and solution-type. +#[derive(Eq, PartialEq, RuntimeDebug)] pub enum Error { - /// While going from compact to staked, the stake of all the edges has gone above the total and - /// the last stake cannot be assigned. - CompactStakeOverflow, - /// The compact type has a voter who's number of targets is out of bound. - CompactTargetOverflow, + /// While going from solution indices to ratio, the weight of all the edges has gone above the + /// total. + SolutionWeightOverflow, + /// The solution type has a voter who's number of targets is out of bound. + SolutionTargetOverflow, /// One of the index functions returned none. - CompactInvalidIndex, + SolutionInvalidIndex, + /// One of the page indices was invalid + SolutionInvalidPageIndex, /// An error occurred in some arithmetic operation. ArithmeticError(&'static str), + /// The data provided to create support map was invalid. + InvalidSupportEdge, } /// A type which is used in the API of this crate as a numeric weight of a vote, most often the @@ -160,17 +144,15 @@ pub type VoteWeight = u64; /// A type in which performing operations on vote weights are safe. pub type ExtendedBalance = u128; -/// The score of an assignment. This can be computed from the support map via [`evaluate_support`]. +/// The score of an assignment. This can be computed from the support map via +/// [`EvaluateSupport::evaluate`]. pub type ElectionScore = [ExtendedBalance; 3]; -/// A winner, with their respective approval stake. -pub type WithApprovalOf = (A, ExtendedBalance); - /// A pointer to a candidate struct with interior mutability. pub type CandidatePtr = Rc>>; /// A candidate entity for the election. -#[derive(Debug, Clone, Default)] +#[derive(RuntimeDebug, Clone, Default)] pub struct Candidate { /// Identifier. who: AccountId, @@ -189,6 +171,12 @@ pub struct Candidate { round: usize, } +impl Candidate { + pub fn to_ptr(self) -> CandidatePtr { + Rc::new(RefCell::new(self)) + } +} + /// A vote being casted by a [`Voter`] to a [`Candidate`] is an `Edge`. #[derive(Clone, Default)] pub struct Edge { @@ -233,22 +221,39 @@ impl std::fmt::Debug for Voter { } impl Voter { + /// Create a new `Voter`. + pub fn new(who: AccountId) -> Self { + Self { who, ..Default::default() } + } + + /// Returns `true` if `self` votes for `target`. + /// + /// Note that this does not take into account if `target` is elected (i.e. is *active*) or not. + pub fn votes_for(&self, target: &AccountId) -> bool { + self.edges.iter().any(|e| &e.who == target) + } + /// Returns none if this voter does not have any non-zero distributions. /// /// Note that this might create _un-normalized_ assignments, due to accuracy loss of `P`. Call /// site might compensate by calling `normalize()` on the returned `Assignment` as a /// post-precessing. - pub fn into_assignment(self) -> Option> - where - ExtendedBalance: From>, - { + pub fn into_assignment(self) -> Option> { let who = self.who; let budget = self.budget; - let distribution = self.edges.into_iter().filter_map(|e| { - let per_thing = P::from_rational_approximation(e.weight, budget); - // trim zero edges. - if per_thing.is_zero() { None } else { Some((e.who, per_thing)) } - }).collect::>(); + let distribution = self + .edges + .into_iter() + .filter_map(|e| { + let per_thing = P::from_rational(e.weight, budget); + // trim zero edges. + if per_thing.is_zero() { + None + } else { + Some((e.who, per_thing)) + } + }) + .collect::>(); if distribution.len() > 0 { Some(Assignment { who, distribution }) @@ -283,7 +288,7 @@ impl Voter { }) } - /// Same as [`try_normalize`] but the normalization is only limited between elected edges. + /// Same as [`Self::try_normalize`] but the normalization is only limited between elected edges. pub fn try_normalize_elected(&mut self) -> Result<(), &'static str> { let elected_edge_weights = self .edges @@ -308,170 +313,25 @@ impl Voter { } }) } + + /// This voter's budget + #[inline] + pub fn budget(&self) -> ExtendedBalance { + self.budget + } } /// Final result of the election. -#[derive(Debug)] +#[derive(RuntimeDebug)] pub struct ElectionResult { /// Just winners zipped with their approval stake. Note that the approval stake is merely the /// sub of their received stake and could be used for very basic sorting and approval voting. - pub winners: Vec>, + pub winners: Vec<(AccountId, ExtendedBalance)>, /// Individual assignments. for each tuple, the first elements is a voter and the second is the /// list of candidates that it supports. pub assignments: Vec>, } -/// A voter's stake assignment among a set of targets, represented as ratios. -#[derive(Debug, Clone, Default)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] -pub struct Assignment { - /// Voter's identifier. - pub who: AccountId, - /// The distribution of the voter's stake. - pub distribution: Vec<(AccountId, P)>, -} - -impl Assignment -where - ExtendedBalance: From>, -{ - /// Convert from a ratio assignment into one with absolute values aka. [`StakedAssignment`]. - /// - /// It needs `stake` which is the total budget of the voter. If `fill` is set to true, it - /// _tries_ to ensure that all the potential rounding errors are compensated and the - /// distribution's sum is exactly equal to the total budget, by adding or subtracting the - /// remainder from the last distribution. - /// - /// If an edge ratio is [`Bounded::min_value()`], it is dropped. This edge can never mean - /// anything useful. - pub fn into_staked(self, stake: ExtendedBalance) -> StakedAssignment - where - P: sp_std::ops::Mul, - { - let distribution = self.distribution - .into_iter() - .filter_map(|(target, p)| { - // if this ratio is zero, then skip it. - if p.is_zero() { - None - } else { - // NOTE: this mul impl will always round to the nearest number, so we might both - // overflow and underflow. - let distribution_stake = p * stake; - Some((target, distribution_stake)) - } - }) - .collect::>(); - - StakedAssignment { - who: self.who, - distribution, - } - } - - /// Try and normalize this assignment. - /// - /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to 100%. - /// - /// ### Errors - /// - /// This will return only if the internal `normalize` fails. This can happen if sum of - /// `self.distribution.map(|p| p.deconstruct())` fails to fit inside `UpperOf

`. A user of - /// this crate may statically assert that this can never happen and safely `expect` this to - /// return `Ok`. - pub fn try_normalize(&mut self) -> Result<(), &'static str> { - self.distribution - .iter() - .map(|(_, p)| *p) - .collect::>() - .normalize(P::one()) - .map(|normalized_ratios| - self.distribution - .iter_mut() - .zip(normalized_ratios) - .for_each(|((_, old), corrected)| { *old = corrected; }) - ) - } -} - -/// A voter's stake assignment among a set of targets, represented as absolute values in the scale -/// of [`ExtendedBalance`]. -#[derive(Debug, Clone, Default)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] -pub struct StakedAssignment { - /// Voter's identifier - pub who: AccountId, - /// The distribution of the voter's stake. - pub distribution: Vec<(AccountId, ExtendedBalance)>, -} - -impl StakedAssignment { - /// Converts self into the normal [`Assignment`] type. - /// - /// If `fill` is set to true, it _tries_ to ensure that all the potential rounding errors are - /// compensated and the distribution's sum is exactly equal to 100%, by adding or subtracting - /// the remainder from the last distribution. - /// - /// NOTE: it is quite critical that this attempt always works. The data type returned here will - /// potentially get used to create a compact type; a compact type requires sum of ratios to be - /// less than 100% upon un-compacting. - /// - /// If an edge stake is so small that it cannot be represented in `T`, it is ignored. This edge - /// can never be re-created and does not mean anything useful anymore. - pub fn into_assignment(self) -> Assignment - where - ExtendedBalance: From>, - AccountId: IdentifierT, - { - let stake = self.total(); - let distribution = self.distribution - .into_iter() - .filter_map(|(target, w)| { - let per_thing = P::from_rational_approximation(w, stake); - if per_thing == Bounded::min_value() { - None - } else { - Some((target, per_thing)) - } - }) - .collect::>(); - - Assignment { - who: self.who, - distribution, - } - } - - /// Try and normalize this assignment. - /// - /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to - /// `stake`. - /// - /// NOTE: current implementation of `.normalize` is almost safe to `expect()` upon. The only - /// error case is when the input cannot fit in `T`, or the sum of input cannot fit in `T`. - /// Sadly, both of these are dependent upon the implementation of `VoteLimit`, i.e. the limit of - /// edges per voter which is enforced from upstream. Hence, at this crate, we prefer returning a - /// result and a use the name prefix `try_`. - pub fn try_normalize(&mut self, stake: ExtendedBalance) -> Result<(), &'static str> { - self.distribution - .iter() - .map(|(_, ref weight)| *weight) - .collect::>() - .normalize(stake) - .map(|normalized_weights| - self.distribution - .iter_mut() - .zip(normalized_weights.into_iter()) - .for_each(|((_, weight), corrected)| { *weight = corrected; }) - ) - } - - /// Get the total stake of this assignment (aka voter budget). - pub fn total(&self) -> ExtendedBalance { - self.distribution.iter().fold(Zero::zero(), |a, b| a.saturating_add(b.1)) - } -} - /// A structure to demonstrate the election result from the perspective of the candidate, i.e. how /// much support each candidate is receiving. /// @@ -479,8 +339,8 @@ impl StakedAssignment { /// /// This, at the current version, resembles the `Exposure` defined in the Staking pallet, yet they /// do not necessarily have to be the same. -#[derive(Default, Debug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Eq, PartialEq))] +#[derive(Default, RuntimeDebug, Encode, Decode, Clone, Eq, PartialEq, scale_info::TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct Support { /// Total support. pub total: ExtendedBalance, @@ -488,90 +348,71 @@ pub struct Support { pub voters: Vec<(AccountId, ExtendedBalance)>, } -/// A linkage from a candidate and its [`Support`]. -pub type SupportMap = BTreeMap>; - -/// Build the support map from the given election result. It maps a flat structure like +/// A target-major representation of the the election outcome. /// -/// ```nocompile -/// assignments: vec![ -/// voter1, vec![(candidate1, w11), (candidate2, w12)], -/// voter2, vec![(candidate1, w21), (candidate2, w22)] -/// ] -/// ``` +/// Essentially a flat variant of [`SupportMap`]. /// -/// into a mapping of candidates and their respective support: -/// -/// ```nocompile -/// SupportMap { -/// candidate1: Support { -/// own:0, -/// total: w11 + w21, -/// others: vec![(candidate1, w11), (candidate2, w21)] -/// }, -/// candidate2: Support { -/// own:0, -/// total: w12 + w22, -/// others: vec![(candidate1, w12), (candidate2, w22)] -/// }, -/// } -/// ``` -/// -/// The second returned flag indicates the number of edges who didn't corresponded to an actual -/// winner from the given winner set. A value in this place larger than 0 indicates a potentially -/// faulty assignment. +/// The main advantage of this is that it is encodable. +pub type Supports = Vec<(A, Support)>; + +/// Linkage from a winner to their [`Support`]. /// -/// `O(E)` where `E` is the total number of edges. -pub fn build_support_map( - winners: &[AccountId], +/// This is more helpful than a normal [`Supports`] as it allows faster error checking. +pub type SupportMap = BTreeMap>; + +/// Build the support map from the assignments. +pub fn to_support_map( assignments: &[StakedAssignment], -) -> Result, AccountId> where - AccountId: IdentifierT, -{ - // Initialize the support of each candidate. - let mut supports = >::new(); - winners - .iter() - .for_each(|e| { supports.insert(e.clone(), Default::default()); }); +) -> SupportMap { + let mut supports = >>::new(); // build support struct. - for StakedAssignment { who, distribution } in assignments.iter() { - for (c, weight_extended) in distribution.iter() { - if let Some(support) = supports.get_mut(c) { - support.total = support.total.saturating_add(*weight_extended); - support.voters.push((who.clone(), *weight_extended)); - } else { - return Err(c.clone()) - } + for StakedAssignment { who, distribution } in assignments.into_iter() { + for (c, weight_extended) in distribution.into_iter() { + let mut support = supports.entry(c.clone()).or_default(); + support.total = support.total.saturating_add(*weight_extended); + support.voters.push((who.clone(), *weight_extended)); } } - Ok(supports) + + supports } -/// Evaluate a support map. The returned tuple contains: -/// -/// - Minimum support. This value must be **maximized**. -/// - Sum of all supports. This value must be **maximized**. -/// - Sum of all supports squared. This value must be **minimized**. -/// -/// `O(E)` where `E` is the total number of edges. -pub fn evaluate_support( - support: &SupportMap, -) -> ElectionScore { - let mut min_support = ExtendedBalance::max_value(); - let mut sum: ExtendedBalance = Zero::zero(); - // NOTE: The third element might saturate but fine for now since this will run on-chain and need - // to be fast. - let mut sum_squared: ExtendedBalance = Zero::zero(); - for (_, support) in support.iter() { - sum = sum.saturating_add(support.total); - let squared = support.total.saturating_mul(support.total); - sum_squared = sum_squared.saturating_add(squared); - if support.total < min_support { - min_support = support.total; +/// Same as [`to_support_map`] except it returns a +/// flat vector. +pub fn to_supports( + assignments: &[StakedAssignment], +) -> Supports { + to_support_map(assignments).into_iter().collect() +} + +/// Extension trait for evaluating a support map or vector. +pub trait EvaluateSupport { + /// Evaluate a support map. The returned tuple contains: + /// + /// - Minimum support. This value must be **maximized**. + /// - Sum of all supports. This value must be **maximized**. + /// - Sum of all supports squared. This value must be **minimized**. + fn evaluate(&self) -> ElectionScore; +} + +impl EvaluateSupport for Supports { + fn evaluate(&self) -> ElectionScore { + let mut min_support = ExtendedBalance::max_value(); + let mut sum: ExtendedBalance = Zero::zero(); + // NOTE: The third element might saturate but fine for now since this will run on-chain and + // need to be fast. + let mut sum_squared: ExtendedBalance = Zero::zero(); + for (_, support) in self { + sum = sum.saturating_add(support.total); + let squared = support.total.saturating_mul(support.total); + sum_squared = sum_squared.saturating_add(squared); + if support.total < min_support { + min_support = support.total; + } } + [min_support, sum, sum_squared] } - [min_support, sum, sum_squared] } /// Compares two sets of election scores based on desirability and returns true if `this` is better @@ -581,16 +422,11 @@ pub fn evaluate_support( /// greater or less than `that`. /// /// Note that the third component should be minimized. -pub fn is_score_better(this: ElectionScore, that: ElectionScore, epsilon: P) -> bool - where ExtendedBalance: From> -{ +pub fn is_score_better(this: ElectionScore, that: ElectionScore, epsilon: P) -> bool { match this .iter() - .enumerate() - .map(|(i, e)| ( - e.ge(&that[i]), - e.tcmp(&that[i], epsilon.mul_ceil(that[i])), - )) + .zip(that.iter()) + .map(|(thi, tha)| (thi.ge(&tha), thi.tcmp(&tha, epsilon.mul_ceil(*tha)))) .collect::>() .as_slice() { @@ -613,7 +449,7 @@ pub fn is_score_better(this: ElectionScore, that: ElectionScore, ep /// This will perform some cleanup that are most often important: /// - It drops any votes that are pointing to non-candidates. /// - It drops duplicate targets within a voter. -pub(crate) fn setup_inputs( +pub fn setup_inputs( initial_candidates: Vec, initial_voters: Vec<(AccountId, VoteWeight, Vec)>, ) -> (Vec>, Vec>) { @@ -625,38 +461,38 @@ pub(crate) fn setup_inputs( .enumerate() .map(|(idx, who)| { c_idx_cache.insert(who.clone(), idx); - Rc::new(RefCell::new(Candidate { who, ..Default::default() })) + Candidate { who, ..Default::default() }.to_ptr() }) .collect::>>(); - let voters = initial_voters.into_iter().map(|(who, voter_stake, votes)| { - let mut edges: Vec> = Vec::with_capacity(votes.len()); - for v in votes { - if edges.iter().any(|e| e.who == v) { - // duplicate edge. - continue; - } - if let Some(idx) = c_idx_cache.get(&v) { - // This candidate is valid + already cached. - let mut candidate = candidates[*idx].borrow_mut(); - candidate.approval_stake = - candidate.approval_stake.saturating_add(voter_stake.into()); - edges.push( - Edge { + let voters = initial_voters + .into_iter() + .filter_map(|(who, voter_stake, votes)| { + let mut edges: Vec> = Vec::with_capacity(votes.len()); + for v in votes { + if edges.iter().any(|e| e.who == v) { + // duplicate edge. + continue + } + if let Some(idx) = c_idx_cache.get(&v) { + // This candidate is valid + already cached. + let mut candidate = candidates[*idx].borrow_mut(); + candidate.approval_stake = + candidate.approval_stake.saturating_add(voter_stake.into()); + edges.push(Edge { who: v.clone(), candidate: Rc::clone(&candidates[*idx]), ..Default::default() - } - ); - } // else {} would be wrong votes. We don't really care about it. - } - Voter { - who, - edges: edges, - budget: voter_stake.into(), - load: Rational128::zero(), - } - }).collect::>(); + }); + } // else {} would be wrong votes. We don't really care about it. + } + if edges.is_empty() { + None + } else { + Some(Voter { who, edges, budget: voter_stake.into(), load: Rational128::zero() }) + } + }) + .collect::>(); - (candidates, voters,) + (candidates, voters) } diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index 32c9d1223862a..36fd78b5757ee 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,10 +19,44 @@ #![cfg(test)] -use crate::{seq_phragmen, ElectionResult, Assignment, VoteWeight, ExtendedBalance}; -use sp_arithmetic::{PerThing, InnerOf, traits::{SaturatedConversion, Zero, One}}; -use sp_std::collections::btree_map::BTreeMap; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, + hash::Hash, +}; + +use rand::{self, seq::SliceRandom, Rng}; +use sp_arithmetic::{ + traits::{One, SaturatedConversion, Zero}, + PerThing, +}; use sp_runtime::assert_eq_error_rate; +use sp_std::collections::btree_map::BTreeMap; + +use crate::{seq_phragmen, Assignment, ElectionResult, ExtendedBalance, PerThing128, VoteWeight}; + +pub type AccountId = u64; + +/// The candidate mask allows easy disambiguation between voters and candidates: accounts +/// for which this bit is set are candidates, and without it, are voters. +pub const CANDIDATE_MASK: AccountId = 1 << ((std::mem::size_of::() * 8) - 1); + +pub type TestAccuracy = sp_runtime::Perbill; + +crate::generate_solution_type! { + pub struct TestSolution::< + VoterIndex = u32, + TargetIndex = u16, + Accuracy = TestAccuracy, + >(16) +} + +pub fn p(p: u8) -> TestAccuracy { + TestAccuracy::from_percent(p.into()) +} + +pub type MockAssignment = crate::Assignment; +pub type Voter = (AccountId, VoteWeight, Vec); #[derive(Default, Debug)] pub(crate) struct _Candidate { @@ -57,26 +91,24 @@ pub(crate) struct _Support { pub(crate) type _Assignment = (A, f64); pub(crate) type _SupportMap = BTreeMap>; -pub(crate) type AccountId = u64; - #[derive(Debug, Clone)] pub(crate) struct _ElectionResult { pub winners: Vec<(A, ExtendedBalance)>, - pub assignments: Vec<(A, Vec<_Assignment>)> + pub assignments: Vec<(A, Vec<_Assignment>)>, } pub(crate) fn auto_generate_self_voters(candidates: &[A]) -> Vec<(A, Vec)> { candidates.iter().map(|c| (c.clone(), vec![c.clone()])).collect() } -pub(crate) fn elect_float( +pub(crate) fn elect_float( candidate_count: usize, initial_candidates: Vec, initial_voters: Vec<(A, Vec)>, - stake_of: FS, -) -> Option<_ElectionResult> where + stake_of: impl Fn(&A) -> VoteWeight, +) -> Option<_ElectionResult> +where A: Default + Ord + Copy, - for<'r> FS: Fn(&'r A) -> VoteWeight, { let mut elected_candidates: Vec<(A, ExtendedBalance)>; let mut assigned: Vec<(A, Vec<_Assignment>)>; @@ -99,17 +131,10 @@ pub(crate) fn elect_float( for v in votes { if let Some(idx) = c_idx_cache.get(&v) { candidates[*idx].approval_stake = candidates[*idx].approval_stake + voter_stake; - edges.push( - _Edge { who: v.clone(), candidate_index: *idx, ..Default::default() } - ); + edges.push(_Edge { who: v.clone(), candidate_index: *idx, ..Default::default() }); } } - _Voter { - who, - edges: edges, - budget: voter_stake, - load: 0f64, - } + _Voter { who, edges, budget: voter_stake, load: 0f64 } })); let to_elect = candidate_count.min(candidates.len()); @@ -155,7 +180,9 @@ pub(crate) fn elect_float( for n in &mut voters { let mut assignment = (n.who.clone(), vec![]); for e in &mut n.edges { - if let Some(c) = elected_candidates.iter().cloned().map(|(c, _)| c).find(|c| *c == e.who) { + if let Some(c) = + elected_candidates.iter().cloned().map(|(c, _)| c).find(|c| *c == e.who) + { if c != n.who { let ratio = e.load / n.load; assignment.1.push((e.who.clone(), ratio)); @@ -167,10 +194,7 @@ pub(crate) fn elect_float( } } - Some(_ElectionResult { - winners: elected_candidates, - assignments: assigned, - }) + Some(_ElectionResult { winners: elected_candidates, assignments: assigned }) } pub(crate) fn equalize_float( @@ -187,18 +211,14 @@ pub(crate) fn equalize_float( let mut max_diff = 0.0; for (voter, assignment) in assignments.iter_mut() { let voter_budget = stake_of(&voter); - let diff = do_equalize_float( - voter, - voter_budget, - assignment, - supports, - tolerance, - ); - if diff > max_diff { max_diff = diff; } + let diff = do_equalize_float(voter, voter_budget, assignment, supports, tolerance); + if diff > max_diff { + max_diff = diff; + } } if max_diff < tolerance { - break; + break } } } @@ -208,21 +228,20 @@ pub(crate) fn do_equalize_float( budget_balance: VoteWeight, elected_edges: &mut Vec<_Assignment>, support_map: &mut _SupportMap, - tolerance: f64 -) -> f64 where + tolerance: f64, +) -> f64 +where A: Ord + Clone, { let budget = budget_balance as f64; - if elected_edges.is_empty() { return 0.0; } + if elected_edges.is_empty() { + return 0.0 + } - let stake_used = elected_edges - .iter() - .fold(0.0, |s, e| s + e.1); + let stake_used = elected_edges.iter().fold(0.0, |s, e| s + e.1); - let backed_stakes_iter = elected_edges - .iter() - .filter_map(|e| support_map.get(&e.0)) - .map(|e| e.total); + let backed_stakes_iter = + elected_edges.iter().filter_map(|e| support_map.get(&e.0)).map(|e| e.total); let backing_backed_stake = elected_edges .iter() @@ -244,7 +263,7 @@ pub(crate) fn do_equalize_float( difference = max_stake - min_stake; difference = difference + budget - stake_used; if difference < tolerance { - return difference; + return difference } } else { difference = budget; @@ -259,11 +278,12 @@ pub(crate) fn do_equalize_float( e.1 = 0.0; }); - elected_edges.sort_by(|x, y| - support_map.get(&x.0) + elected_edges.sort_by(|x, y| { + support_map + .get(&x.0) .and_then(|x| support_map.get(&y.0).and_then(|y| x.total.partial_cmp(&y.total))) .unwrap_or(sp_std::cmp::Ordering::Equal) - ); + }); let mut cumulative_stake = 0.0; let mut last_index = elected_edges.len() - 1; @@ -294,66 +314,70 @@ pub(crate) fn do_equalize_float( difference } - -pub(crate) fn create_stake_of(stakes: &[(AccountId, VoteWeight)]) - -> Box VoteWeight> -{ +pub(crate) fn create_stake_of( + stakes: &[(AccountId, VoteWeight)], +) -> impl Fn(&AccountId) -> VoteWeight { let mut storage = BTreeMap::::new(); - stakes.iter().for_each(|s| { storage.insert(s.0, s.1); }); - let stake_of = move |who: &AccountId| -> VoteWeight { storage.get(who).unwrap().to_owned() }; - Box::new(stake_of) + stakes.iter().for_each(|s| { + storage.insert(s.0, s.1); + }); + move |who: &AccountId| -> VoteWeight { storage.get(who).unwrap().to_owned() } } - -pub fn check_assignments_sum(assignments: Vec>) { +pub fn check_assignments_sum(assignments: &[Assignment]) { for Assignment { distribution, .. } in assignments { let mut sum: u128 = Zero::zero(); - distribution.iter().for_each(|(_, p)| sum += p.deconstruct().saturated_into()); + distribution + .iter() + .for_each(|(_, p)| sum += p.deconstruct().saturated_into::()); assert_eq!(sum, T::ACCURACY.saturated_into(), "Assignment ratio sum is not 100%"); } } -pub(crate) fn run_and_compare( +pub(crate) fn run_and_compare( candidates: Vec, voters: Vec<(AccountId, Vec)>, - stake_of: &Box VoteWeight>, + stake_of: FS, to_elect: usize, ) where - ExtendedBalance: From>, - Output: sp_std::ops::Mul, + Output: PerThing128, + FS: Fn(&AccountId) -> VoteWeight, { // run fixed point code. let ElectionResult { winners, assignments } = seq_phragmen::<_, Output>( to_elect, candidates.clone(), - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - None - ).unwrap(); + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + None, + ) + .unwrap(); // run float poc code. - let truth_value = elect_float( - to_elect, - candidates, - voters, - &stake_of, - ).unwrap(); + let truth_value = elect_float(to_elect, candidates, voters, &stake_of).unwrap(); - assert_eq!(winners.iter().map(|(x, _)| x).collect::>(), truth_value.winners.iter().map(|(x, _)| x).collect::>()); + assert_eq!( + winners.iter().map(|(x, _)| x).collect::>(), + truth_value.winners.iter().map(|(x, _)| x).collect::>() + ); - for Assignment { who, distribution } in assignments.clone() { - if let Some(float_assignments) = truth_value.assignments.iter().find(|x| x.0 == who) { + for Assignment { who, distribution } in assignments.iter() { + if let Some(float_assignments) = truth_value.assignments.iter().find(|x| x.0 == *who) { for (candidate, per_thingy) in distribution { - if let Some(float_assignment) = float_assignments.1.iter().find(|x| x.0 == candidate ) { + if let Some(float_assignment) = + float_assignments.1.iter().find(|x| x.0 == *candidate) + { assert_eq_error_rate!( - Output::from_fraction(float_assignment.1).deconstruct(), + Output::from_float(float_assignment.1).deconstruct(), per_thingy.deconstruct(), Output::Inner::one(), ); } else { panic!( "candidate mismatch. This should never happen. could not find ({:?}, {:?})", - candidate, - per_thingy, + candidate, per_thingy, ) } } @@ -362,23 +386,18 @@ pub(crate) fn run_and_compare( } } - check_assignments_sum(assignments); + check_assignments_sum(&assignments); } -pub(crate) fn build_support_map_float( +pub(crate) fn build_support_map_float( result: &mut _ElectionResult, - stake_of: FS, -) -> _SupportMap - where for<'r> FS: Fn(&'r AccountId) -> VoteWeight -{ + stake_of: impl Fn(&AccountId) -> VoteWeight, +) -> _SupportMap { let mut supports = <_SupportMap>::new(); - result.winners - .iter() - .map(|(e, _)| (e, stake_of(e) as f64)) - .for_each(|(e, s)| { - let item = _Support { own: s, total: s, ..Default::default() }; - supports.insert(e.clone(), item); - }); + result.winners.iter().map(|(e, _)| (e, stake_of(e) as f64)).for_each(|(e, s)| { + let item = _Support { own: s, total: s, ..Default::default() }; + supports.insert(e.clone(), item); + }); for (n, assignment) in result.assignments.iter_mut() { for (c, r) in assignment.iter_mut() { @@ -393,3 +412,136 @@ pub(crate) fn build_support_map_float( } supports } + +/// Generate voter and assignment lists. Makes no attempt to be realistic about winner or assignment +/// fairness. +/// +/// Maintains these invariants: +/// +/// - candidate ids have `CANDIDATE_MASK` bit set +/// - voter ids do not have `CANDIDATE_MASK` bit set +/// - assignments have the same ordering as voters +/// - `assignments.distribution.iter().map(|(_, frac)| frac).sum() == One::one()` +/// - a coherent set of winners is chosen. +/// - the winner set is a subset of the candidate set. +/// - `assignments.distribution.iter().all(|(who, _)| winners.contains(who))` +pub fn generate_random_votes( + candidate_count: usize, + voter_count: usize, + mut rng: impl Rng, +) -> (Vec, Vec, Vec) { + // cache for fast generation of unique candidate and voter ids + let mut used_ids = HashSet::with_capacity(candidate_count + voter_count); + + // candidates are easy: just a completely random set of IDs + let mut candidates: Vec = Vec::with_capacity(candidate_count); + while candidates.len() < candidate_count { + let mut new = || rng.gen::() | CANDIDATE_MASK; + let mut id = new(); + // insert returns `false` when the value was already present + while !used_ids.insert(id) { + id = new(); + } + candidates.push(id); + } + + // voters are random ids, random weights, random selection from the candidates + let mut voters = Vec::with_capacity(voter_count); + while voters.len() < voter_count { + let mut new = || rng.gen::() & !CANDIDATE_MASK; + let mut id = new(); + // insert returns `false` when the value was already present + while !used_ids.insert(id) { + id = new(); + } + + let vote_weight = rng.gen(); + + // it's not interesting if a voter chooses 0 or all candidates, so rule those cases out. + // also, let's not generate any cases which result in a compact overflow. + let n_candidates_chosen = + rng.gen_range(1, candidates.len().min(::LIMIT)); + + let mut chosen_candidates = Vec::with_capacity(n_candidates_chosen); + chosen_candidates.extend(candidates.choose_multiple(&mut rng, n_candidates_chosen)); + voters.push((id, vote_weight, chosen_candidates)); + } + + // always generate a sensible number of winners: elections are uninteresting if nobody wins, + // or everybody wins + let num_winners = rng.gen_range(1, candidate_count); + let mut winners: HashSet = HashSet::with_capacity(num_winners); + winners.extend(candidates.choose_multiple(&mut rng, num_winners)); + assert_eq!(winners.len(), num_winners); + + let mut assignments = Vec::with_capacity(voters.len()); + for (voter_id, _, votes) in voters.iter() { + let chosen_winners = votes.iter().filter(|vote| winners.contains(vote)).cloned(); + let num_chosen_winners = chosen_winners.clone().count(); + + // distribute the available stake randomly + let stake_distribution = if num_chosen_winners == 0 { + continue + } else { + let mut available_stake = 1000; + let mut stake_distribution = Vec::with_capacity(num_chosen_winners); + for _ in 0..num_chosen_winners - 1 { + let stake = rng.gen_range(0, available_stake).min(1); + stake_distribution.push(TestAccuracy::from_perthousand(stake)); + available_stake -= stake; + } + stake_distribution.push(TestAccuracy::from_perthousand(available_stake)); + stake_distribution.shuffle(&mut rng); + stake_distribution + }; + + assignments.push(MockAssignment { + who: *voter_id, + distribution: chosen_winners.zip(stake_distribution).collect(), + }); + } + + (voters, assignments, candidates) +} + +fn generate_cache(voters: Voters) -> HashMap +where + Voters: Iterator, + Item: Hash + Eq + Copy, +{ + let mut cache = HashMap::new(); + for (idx, voter_id) in voters.enumerate() { + cache.insert(voter_id, idx); + } + cache +} + +/// Create a function that returns the index of a voter in the voters list. +pub fn make_voter_fn(voters: &[Voter]) -> impl Fn(&AccountId) -> Option +where + usize: TryInto, +{ + let cache = generate_cache(voters.iter().map(|(id, _, _)| *id)); + move |who| { + if cache.get(who).is_none() { + println!("WARNING: voter {} will raise InvalidIndex", who); + } + cache.get(who).cloned().and_then(|i| i.try_into().ok()) + } +} + +/// Create a function that returns the index of a candidate in the candidates list. +pub fn make_target_fn( + candidates: &[AccountId], +) -> impl Fn(&AccountId) -> Option +where + usize: TryInto, +{ + let cache = generate_cache(candidates.iter().cloned()); + move |who| { + if cache.get(who).is_none() { + println!("WARNING: target {} will raise InvalidIndex", who); + } + cache.get(who).cloned().and_then(|i| i.try_into().ok()) + } +} diff --git a/primitives/npos-elections/src/node.rs b/primitives/npos-elections/src/node.rs index d18c0e9016b64..62b728d52258b 100644 --- a/primitives/npos-elections/src/node.rs +++ b/primitives/npos-elections/src/node.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -55,11 +55,7 @@ impl sp_std::fmt::Debug for NodeId { f, "Node({:?}, {:?})", self.who, - if self.role == NodeRole::Voter { - "V" - } else { - "T" - } + if self.role == NodeRole::Voter { "V" } else { "T" } ) } } @@ -84,12 +80,7 @@ impl Eq for Node {} #[cfg(feature = "std")] impl fmt::Debug for Node { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "({:?} --> {:?})", - self.id, - self.parent.as_ref().map(|p| p.borrow().id.clone()) - ) + write!(f, "({:?} --> {:?})", self.id, self.parent.as_ref().map(|p| p.borrow().id.clone())) } } @@ -102,7 +93,7 @@ impl Node { /// Returns true if `other` is the parent of `who`. pub fn is_parent_of(who: &NodeRef, other: &NodeRef) -> bool { if who.borrow().parent.is_none() { - return false; + return false } who.borrow().parent.as_ref() == Some(other) } @@ -136,7 +127,7 @@ impl Node { while let Some(ref next_parent) = current.clone().borrow().parent { if visited.contains(next_parent) { - break; + break } parent_path.push(next_parent.clone()); current = next_parent.clone(); @@ -164,16 +155,7 @@ mod tests { #[test] fn basic_create_works() { let node = Node::new(id(10)); - assert_eq!( - node, - Node { - id: NodeId { - who: 10, - role: NodeRole::Target - }, - parent: None - } - ); + assert_eq!(node, Node { id: NodeId { who: 10, role: NodeRole::Target }, parent: None }); } #[test] @@ -194,9 +176,9 @@ mod tests { #[test] fn get_root_works() { - // D <-- A <-- B <-- C - // \ - // <-- E + // D <-- A <-- B <-- C + // \ + // <-- E let a = Node::new(id(1)).into_ref(); let b = Node::new(id(2)).into_ref(); let c = Node::new(id(3)).into_ref(); @@ -209,29 +191,20 @@ mod tests { Node::set_parent_of(&e, &a); Node::set_parent_of(&a, &d); - assert_eq!( - Node::root(&e), - (d.clone(), vec![e.clone(), a.clone(), d.clone()]), - ); + assert_eq!(Node::root(&e), (d.clone(), vec![e.clone(), a.clone(), d.clone()])); - assert_eq!(Node::root(&a), (d.clone(), vec![a.clone(), d.clone()]),); + assert_eq!(Node::root(&a), (d.clone(), vec![a.clone(), d.clone()])); - assert_eq!( - Node::root(&c), - (d.clone(), vec![c.clone(), b.clone(), a.clone(), d.clone()]), - ); + assert_eq!(Node::root(&c), (d.clone(), vec![c.clone(), b.clone(), a.clone(), d.clone()])); - // D A <-- B <-- C - // F <-- / \ - // <-- E + // D A <-- B <-- C + // F <-- / \ + // <-- E Node::set_parent_of(&a, &f); - assert_eq!(Node::root(&a), (f.clone(), vec![a.clone(), f.clone()]),); + assert_eq!(Node::root(&a), (f.clone(), vec![a.clone(), f.clone()])); - assert_eq!( - Node::root(&c), - (f.clone(), vec![c.clone(), b.clone(), a.clone(), f.clone()]), - ); + assert_eq!(Node::root(&c), (f.clone(), vec![c.clone(), b.clone(), a.clone(), f.clone()])); } #[test] diff --git a/primitives/npos-elections/src/phragmen.rs b/primitives/npos-elections/src/phragmen.rs index cfbeed1cdd3fb..5ed472284351a 100644 --- a/primitives/npos-elections/src/phragmen.rs +++ b/primitives/npos-elections/src/phragmen.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,19 +21,19 @@ //! to the Maximin problem. use crate::{ - IdentifierT, VoteWeight, Voter, CandidatePtr, ExtendedBalance, setup_inputs, ElectionResult, + balancing, setup_inputs, CandidatePtr, ElectionResult, ExtendedBalance, IdentifierT, + PerThing128, VoteWeight, Voter, }; -use sp_std::prelude::*; use sp_arithmetic::{ - PerThing, InnerOf, Rational128, helpers_128bit::multiply_by_rational, - traits::{Zero, Bounded}, + traits::{Bounded, Zero}, + Rational128, }; -use crate::balancing; +use sp_std::prelude::*; /// The denominator used for loads. Since votes are collected as u64, the smallest ratio that we /// might collect is `1/approval_stake` where approval stake is the sum of votes. Hence, some number -/// bigger than u64::max_value() is needed. For maximum accuracy we simply use u128; +/// bigger than u64::MAX is needed. For maximum accuracy we simply use u128; const DEN: ExtendedBalance = ExtendedBalance::max_value(); /// Execute sequential phragmen with potentially some rounds of `balancing`. The return type is list @@ -63,21 +63,21 @@ const DEN: ExtendedBalance = ExtendedBalance::max_value(); /// `expect` this to return `Ok`. /// /// This can only fail if the normalization fails. -pub fn seq_phragmen( - rounds: usize, - initial_candidates: Vec, - initial_voters: Vec<(AccountId, VoteWeight, Vec)>, - balance: Option<(usize, ExtendedBalance)>, -) -> Result, &'static str> where ExtendedBalance: From> { - let (candidates, voters) = setup_inputs(initial_candidates, initial_voters); - - let (candidates, mut voters) = seq_phragmen_core::( - rounds, - candidates, - voters, - )?; - - if let Some((iterations, tolerance)) = balance { +/// +/// Note that rounding errors can potentially cause the output of this function to fail a t-PJR +/// check where t is the standard threshold. The underlying algorithm is sound, but the conversions +/// between numeric types can be lossy. +pub fn seq_phragmen( + to_elect: usize, + candidates: Vec, + voters: Vec<(AccountId, VoteWeight, Vec)>, + balancing: Option<(usize, ExtendedBalance)>, +) -> Result, crate::Error> { + let (candidates, voters) = setup_inputs(candidates, voters); + + let (candidates, mut voters) = seq_phragmen_core::(to_elect, candidates, voters)?; + + if let Some((iterations, tolerance)) = balancing { // NOTE: might create zero-edges, but we will strip them again when we convert voter into // assignment. let _iters = balancing::balance::(&mut voters, iterations, tolerance); @@ -87,17 +87,22 @@ pub fn seq_phragmen( .into_iter() .filter(|c_ptr| c_ptr.borrow().elected) // defensive only: seq-phragmen-core returns only up to rounds. - .take(rounds) + .take(to_elect) .collect::>(); // sort winners based on desirability. winners.sort_by_key(|c_ptr| c_ptr.borrow().round); - let mut assignments = voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); - let _ = assignments.iter_mut().map(|a| a.try_normalize()).collect::>()?; - let winners = winners.into_iter().map(|w_ptr| - (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake) - ).collect(); + let mut assignments = + voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); + let _ = assignments + .iter_mut() + .map(|a| a.try_normalize().map_err(|e| crate::Error::ArithmeticError(e))) + .collect::>()?; + let winners = winners + .into_iter() + .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake)) + .collect(); Ok(ElectionResult { winners, assignments }) } @@ -108,16 +113,15 @@ pub fn seq_phragmen( /// `seq_phragmen` for more information. This function is left public in case a crate needs to use /// the implementation in a custom way. /// -/// To create th inputs needed for this function, see [`crate::setup_inputs`]. -/// /// This can only fail if the normalization fails. +// To create the inputs needed for this function, see [`crate::setup_inputs`]. pub fn seq_phragmen_core( - rounds: usize, + to_elect: usize, candidates: Vec>, mut voters: Vec>, -) -> Result<(Vec>, Vec>), &'static str> { +) -> Result<(Vec>, Vec>), crate::Error> { // we have already checked that we have more candidates than minimum_candidate_count. - let to_elect = rounds.min(candidates.len()); + let to_elect = to_elect.min(candidates.len()); // main election loop for round in 0..to_elect { @@ -144,7 +148,8 @@ pub fn seq_phragmen_core( voter.load.n(), voter.budget, candidate.approval_stake, - ).unwrap_or(Bounded::max_value()); + ) + .unwrap_or(Bounded::max_value()); let temp_d = voter.load.d(); let temp = Rational128::from(temp_n, temp_d); candidate.score = candidate.score.lazy_saturating_add(temp); @@ -180,13 +185,9 @@ pub fn seq_phragmen_core( for edge in &mut voter.edges { if edge.candidate.borrow().elected { // update internal state. - edge.weight = multiply_by_rational( - voter.budget, - edge.load.n(), - voter.load.n(), - ) - // If result cannot fit in u128. Not much we can do about it. - .unwrap_or(Bounded::max_value()); + edge.weight = multiply_by_rational(voter.budget, edge.load.n(), voter.load.n()) + // If result cannot fit in u128. Not much we can do about it. + .unwrap_or(Bounded::max_value()); } else { edge.weight = 0 } @@ -199,7 +200,7 @@ pub fn seq_phragmen_core( // edge of all candidates that eventually have a non-zero weight must be elected. debug_assert!(voter.edges.iter().all(|e| e.candidate.borrow().elected)); // inc budget to sum the budget. - voter.try_normalize_elected()?; + voter.try_normalize_elected().map_err(|e| crate::Error::ArithmeticError(e))?; } Ok((candidates, voters)) diff --git a/primitives/npos-elections/src/phragmms.rs b/primitives/npos-elections/src/phragmms.rs index 9b59e22c249b6..e9135a13190c6 100644 --- a/primitives/npos-elections/src/phragmms.rs +++ b/primitives/npos-elections/src/phragmms.rs @@ -1,6 +1,6 @@ - // This file is part of Substrate. +// This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,10 +22,10 @@ //! MMS algorithm. use crate::{ - IdentifierT, ElectionResult, ExtendedBalance, setup_inputs, VoteWeight, Voter, CandidatePtr, - balance, + balance, setup_inputs, CandidatePtr, ElectionResult, ExtendedBalance, IdentifierT, PerThing128, + VoteWeight, Voter, }; -use sp_arithmetic::{PerThing, InnerOf, Rational128, traits::Bounded}; +use sp_arithmetic::{traits::Bounded, PerThing, Rational128}; use sp_std::{prelude::*, rc::Rc}; /// Execute the phragmms method. @@ -41,15 +41,13 @@ use sp_std::{prelude::*, rc::Rc}; /// assignments, `assignment.distribution.map(|p| p.deconstruct()).sum()` fails to fit inside /// `UpperOf

`. A user of this crate may statically assert that this can never happen and safely /// `expect` this to return `Ok`. -pub fn phragmms( +pub fn phragmms( to_elect: usize, - initial_candidates: Vec, - initial_voters: Vec<(AccountId, VoteWeight, Vec)>, - balancing_config: Option<(usize, ExtendedBalance)>, -) -> Result, &'static str> - where ExtendedBalance: From> -{ - let (candidates, mut voters) = setup_inputs(initial_candidates, initial_voters); + candidates: Vec, + voters: Vec<(AccountId, VoteWeight, Vec)>, + balancing: Option<(usize, ExtendedBalance)>, +) -> Result, crate::Error> { + let (candidates, mut voters) = setup_inputs(candidates, voters); let mut winners = vec![]; for round in 0..to_elect { @@ -60,19 +58,25 @@ pub fn phragmms( round_winner.borrow_mut().elected = true; winners.push(round_winner); - if let Some((iterations, tolerance)) = balancing_config { + if let Some((iterations, tolerance)) = balancing { balance(&mut voters, iterations, tolerance); } } else { - break; + break } } - let mut assignments = voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); - let _ = assignments.iter_mut().map(|a| a.try_normalize()).collect::>()?; - let winners = winners.into_iter().map(|w_ptr| - (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake) - ).collect(); + let mut assignments = + voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); + let _ = assignments + .iter_mut() + .map(|a| a.try_normalize()) + .collect::>() + .map_err(|e| crate::Error::ArithmeticError(e))?; + let winners = winners + .into_iter() + .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake)) + .collect(); Ok(ElectionResult { winners, assignments }) } @@ -88,7 +92,7 @@ pub fn phragmms( pub(crate) fn calculate_max_score( candidates: &[CandidatePtr], voters: &[Voter], -) -> Option> where ExtendedBalance: From> { +) -> Option> { for c_ptr in candidates.iter() { let mut candidate = c_ptr.borrow_mut(); if !candidate.elected { @@ -103,10 +107,8 @@ pub(crate) fn calculate_max_score( for edge in voter.edges.iter() { let edge_candidate = edge.candidate.borrow(); if edge_candidate.elected { - let edge_contribution: ExtendedBalance = P::from_rational_approximation( - edge.weight, - edge_candidate.backed_stake, - ).deconstruct().into(); + let edge_contribution: ExtendedBalance = + P::from_rational(edge.weight, edge_candidate.backed_stake).deconstruct().into(); denominator_contribution += edge_contribution; } } @@ -127,7 +129,7 @@ pub(crate) fn calculate_max_score( for c_ptr in candidates.iter() { let mut candidate = c_ptr.borrow_mut(); - if candidate.approval_stake > 0 { + if candidate.approval_stake > 0 { // finalise the score value. let score_d = candidate.score.d(); let one: ExtendedBalance = P::ACCURACY.into(); @@ -155,7 +157,10 @@ pub(crate) fn calculate_max_score( // `RationalInfinite` as the score type does not introduce significant overhead. Then we // can switch the score type to `RationalInfinite` and ensure compatibility with any // crazy token scale. - let score_n = candidate.approval_stake.checked_mul(one).unwrap_or_else(|| Bounded::max_value()); + let score_n = candidate + .approval_stake + .checked_mul(one) + .unwrap_or_else(|| Bounded::max_value()); candidate.score = Rational128::from(score_n, score_d); // check if we have a new winner. @@ -182,8 +187,11 @@ pub(crate) fn apply_elected( elected_ptr: CandidatePtr, ) { let elected_who = elected_ptr.borrow().who.clone(); - let cutoff = elected_ptr.borrow().score.to_den(1) - .expect("(n / d) < u128::max() and (n' / 1) == (n / d), thus n' < u128::max()'; qed.") + let cutoff = elected_ptr + .borrow() + .score + .to_den(1) + .expect("(n / d) < u128::MAX and (n' / 1) == (n / d), thus n' < u128::MAX'; qed.") .n(); let mut elected_backed_stake = elected_ptr.borrow().backed_stake; @@ -195,18 +203,19 @@ pub(crate) fn apply_elected( elected_backed_stake = elected_backed_stake.saturating_add(new_edge_weight); // Iterate over all other edges. - for (_, edge) in voter.edges - .iter_mut() - .enumerate() - .filter(|(edge_index, edge_inner)| *edge_index != new_edge_index && edge_inner.weight > 0) - { + for (_, edge) in + voter.edges.iter_mut().enumerate().filter(|(edge_index, edge_inner)| { + *edge_index != new_edge_index && edge_inner.weight > 0 + }) { let mut edge_candidate = edge.candidate.borrow_mut(); if edge_candidate.backed_stake > cutoff { - let stake_to_take = edge.weight.saturating_mul(cutoff) / edge_candidate.backed_stake.max(1); + let stake_to_take = + edge.weight.saturating_mul(cutoff) / edge_candidate.backed_stake.max(1); // subtract this amount from this edge. edge.weight = edge.weight.saturating_sub(stake_to_take); - edge_candidate.backed_stake = edge_candidate.backed_stake.saturating_sub(stake_to_take); + edge_candidate.backed_stake = + edge_candidate.backed_stake.saturating_sub(stake_to_take); // inject it into the outer loop's edge. elected_backed_stake = elected_backed_stake.saturating_add(stake_to_take); @@ -225,41 +234,41 @@ pub(crate) fn apply_elected( #[cfg(test)] mod tests { use super::*; - use crate::{ElectionResult, Assignment}; + use crate::{Assignment, ElectionResult}; use sp_runtime::{Perbill, Percent}; use sp_std::rc::Rc; #[test] fn basic_election_manual_works() { //! Manually run the internal steps of phragmms. In each round we select a new winner by - //! `max_score`, then apply this change by `apply_elected`, and finally do a `balance` round. + //! `max_score`, then apply this change by `apply_elected`, and finally do a `balance` + //! round. let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 10, vec![1, 2]), - (20, 20, vec![1, 3]), - (30, 30, vec![2, 3]), - ]; + let voters = vec![(10, 10, vec![1, 2]), (20, 20, vec![1, 3]), (30, 30, vec![2, 3])]; let (candidates, mut voters) = setup_inputs(candidates, voters); // Round 1 - let winner = calculate_max_score::(candidates.as_ref(), voters.as_ref()).unwrap(); + let winner = + calculate_max_score::(candidates.as_ref(), voters.as_ref()).unwrap(); assert_eq!(winner.borrow().who, 3); assert_eq!(winner.borrow().score, 50u32.into()); apply_elected(&mut voters, Rc::clone(&winner)); assert_eq!( - voters.iter().find(|x| x.who == 30).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 30) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (30, vec![(2, 0), (3, 30)]), ); assert_eq!( - voters.iter().find(|x| x.who == 20).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 20) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (20, vec![(1, 0), (3, 20)]), ); @@ -272,30 +281,34 @@ mod tests { balance(&mut voters, 10, 0); // round 2 - let winner = calculate_max_score::(candidates.as_ref(), voters.as_ref()).unwrap(); + let winner = + calculate_max_score::(candidates.as_ref(), voters.as_ref()).unwrap(); assert_eq!(winner.borrow().who, 2); assert_eq!(winner.borrow().score, 25u32.into()); apply_elected(&mut voters, Rc::clone(&winner)); assert_eq!( - voters.iter().find(|x| x.who == 30).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 30) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (30, vec![(2, 15), (3, 15)]), ); assert_eq!( - voters.iter().find(|x| x.who == 20).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 20) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (20, vec![(1, 0), (3, 20)]), ); assert_eq!( - voters.iter().find(|x| x.who == 10).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 10) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (10, vec![(1, 0), (2, 10)]), ); @@ -308,24 +321,27 @@ mod tests { balance(&mut voters, 10, 0); assert_eq!( - voters.iter().find(|x| x.who == 30).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 30) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (30, vec![(2, 20), (3, 10)]), ); assert_eq!( - voters.iter().find(|x| x.who == 20).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 20) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (20, vec![(1, 0), (3, 20)]), ); assert_eq!( - voters.iter().find(|x| x.who == 10).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 10) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (10, vec![(1, 0), (2, 10)]), ); } @@ -333,25 +349,16 @@ mod tests { #[test] fn basic_election_works() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 10, vec![1, 2]), - (20, 20, vec![1, 3]), - (30, 30, vec![2, 3]), - ]; + let voters = vec![(10, 10, vec![1, 2]), (20, 20, vec![1, 3]), (30, 30, vec![2, 3])]; - let ElectionResult { winners, assignments } = phragmms::<_, Perbill>(2, candidates, voters, Some((2, 0))).unwrap(); + let ElectionResult { winners, assignments } = + phragmms::<_, Perbill>(2, candidates, voters, Some((2, 0))).unwrap(); assert_eq!(winners, vec![(3, 30), (2, 30)]); assert_eq!( assignments, vec![ - Assignment { - who: 10u64, - distribution: vec![(2, Perbill::one())], - }, - Assignment { - who: 20, - distribution: vec![(3, Perbill::one())], - }, + Assignment { who: 10u64, distribution: vec![(2, Perbill::one())] }, + Assignment { who: 20, distribution: vec![(3, Perbill::one())] }, Assignment { who: 30, distribution: vec![ @@ -376,24 +383,21 @@ mod tests { (130, 1000, vec![61, 71]), ]; - let ElectionResult { winners, assignments: _ } = phragmms::<_, Perbill>(4, candidates, voters, Some((2, 0))).unwrap(); - assert_eq!(winners, vec![ - (11, 3000), - (31, 2000), - (51, 1500), - (61, 1500), - ]); + let ElectionResult { winners, assignments: _ } = + phragmms::<_, Perbill>(4, candidates, voters, Some((2, 0))).unwrap(); + assert_eq!(winners, vec![(11, 3000), (31, 2000), (51, 1500), (61, 1500),]); } #[test] fn large_balance_wont_overflow() { let candidates = vec![1u32, 2, 3]; - let mut voters = (0..1000).map(|i| (10 + i, u64::max_value(), vec![1, 2, 3])).collect::>(); + let mut voters = (0..1000).map(|i| (10 + i, u64::MAX, vec![1, 2, 3])).collect::>(); // give a bit more to 1 and 3. - voters.push((2, u64::max_value(), vec![1, 3])); + voters.push((2, u64::MAX, vec![1, 3])); - let ElectionResult { winners, assignments: _ } = phragmms::<_, Perbill>(2, candidates, voters, Some((2, 0))).unwrap(); + let ElectionResult { winners, assignments: _ } = + phragmms::<_, Perbill>(2, candidates, voters, Some((2, 0))).unwrap(); assert_eq!(winners.into_iter().map(|(w, _)| w).collect::>(), vec![1u32, 3]); } } diff --git a/primitives/npos-elections/src/pjr.rs b/primitives/npos-elections/src/pjr.rs new file mode 100644 index 0000000000000..e27acf1408f96 --- /dev/null +++ b/primitives/npos-elections/src/pjr.rs @@ -0,0 +1,603 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implements functions and interfaces to check solutions for being t-PJR. +//! +//! PJR stands for proportional justified representation. PJR is an absolute measure to make +//! sure an NPoS solution adheres to a minimum standard. +//! +//! See [`pjr_check`] which is the main entry point of the module. + +use crate::{ + Candidate, CandidatePtr, Edge, ExtendedBalance, IdentifierT, Support, SupportMap, Supports, + VoteWeight, Voter, +}; +use sp_arithmetic::{traits::Zero, Perbill}; +use sp_std::{collections::btree_map::BTreeMap, rc::Rc, vec::Vec}; +/// The type used as the threshold. +/// +/// Just some reading sugar; Must always be same as [`ExtendedBalance`]; +type Threshold = ExtendedBalance; + +/// Compute the threshold corresponding to the standard PJR property +/// +/// `t-PJR` checks can check PJR according to an arbitrary threshold. The threshold can be any +/// value, but the property gets stronger as the threshold gets smaller. The strongest possible +/// `t-PJR` property corresponds to `t == 0`. +/// +/// However, standard PJR is less stringent than that. This function returns the threshold whose +/// strength corresponds to the standard PJR property. +/// +/// - `committee_size` is the number of winners of the election. +/// - `weights` is an iterator of voter stakes. If the sum of stakes is already known, +/// `std::iter::once(sum_of_stakes)` is appropriate here. +pub fn standard_threshold( + committee_size: usize, + weights: impl IntoIterator, +) -> Threshold { + weights + .into_iter() + .fold(Threshold::zero(), |acc, elem| acc.saturating_add(elem)) / + committee_size.max(1) as Threshold +} + +/// Check a solution to be PJR. +/// +/// The PJR property is true if `t-PJR` is true when `t == sum(stake) / committee_size`. +pub fn pjr_check( + supports: &Supports, + all_candidates: Vec, + all_voters: Vec<(AccountId, VoteWeight, Vec)>, +) -> Result<(), AccountId> { + let t = standard_threshold( + supports.len(), + all_voters.iter().map(|voter| voter.1 as ExtendedBalance), + ); + t_pjr_check(supports, all_candidates, all_voters, t) +} + +/// Check a solution to be t-PJR. +/// +/// ### Semantics +/// +/// The t-PJR property is defined in the paper ["Validator Election in Nominated +/// Proof-of-Stake"][NPoS], section 5, definition 1. +/// +/// In plain language, the t-PJR condition is: if there is a group of `N` voters +/// who have `r` common candidates and can afford to support each of them with backing stake `t` +/// (i.e `sum(stake(v) for v in voters) == r * t`), then this committee needs to be represented by +/// at least `r` elected candidates. +/// +/// Section 5 of the NPoS paper shows that this property can be tested by: for a feasible solution, +/// if `Max {score(c)} < t` where c is every unelected candidate, then this solution is t-PJR. There +/// may exist edge cases which satisfy the formal definition of t-PJR but do not pass this test, but +/// those should be rare enough that we can discount them. +/// +/// ### Interface +/// +/// In addition to data that can be computed from the [`Supports`] struct, a PJR check also +/// needs to inspect un-elected candidates and edges, thus `all_candidates` and `all_voters`. +/// +/// [NPoS]: https://arxiv.org/pdf/2004.12990v1.pdf +// ### Implementation Notes +// +// The paper uses mathematical notation, which priorities single-symbol names. For programmer ease, +// we map these to more descriptive names as follows: +// +// C => all_candidates +// N => all_voters +// (A, w) => (candidates, voters) +// +// Note that while the names don't explicitly say so, `candidates` are the winning candidates, and +// `voters` is the set of weighted edges from nominators to winning validators. +pub fn t_pjr_check( + supports: &Supports, + all_candidates: Vec, + all_voters: Vec<(AccountId, VoteWeight, Vec)>, + t: Threshold, +) -> Result<(), AccountId> { + // First order of business: derive `(candidates, voters)` from `supports`. + let (candidates, voters) = prepare_pjr_input(supports, all_candidates, all_voters); + // compute with threshold t. + pjr_check_core(candidates.as_ref(), voters.as_ref(), t) +} + +/// The internal implementation of the PJR check after having the data converted. +/// +/// [`pjr_check`] or [`t_pjr_check`] are typically easier to work with. +/// +/// This function returns an `AccountId` in the `Err` case. This is the counter_example: the ID of +/// the unelected candidate with the highest prescore, such that `pre_score(counter_example) >= t`. +pub fn pjr_check_core( + candidates: &[CandidatePtr], + voters: &[Voter], + t: Threshold, +) -> Result<(), AccountId> { + let unelected = candidates.iter().filter(|c| !c.borrow().elected); + let maybe_max_pre_score = unelected + .map(|c| (pre_score(Rc::clone(c), voters, t), c.borrow().who.clone())) + .max(); + // if unelected is empty then the solution is indeed PJR. + match maybe_max_pre_score { + Some((max_pre_score, counter_example)) if max_pre_score >= t => Err(counter_example), + _ => Ok(()), + } +} + +/// Validate a challenge to an election result. +/// +/// A challenge to an election result is valid if there exists some counter_example for which +/// `pre_score(counter_example) >= threshold`. Validating an existing counter_example is +/// computationally cheaper than re-running the PJR check. +/// +/// This function uses the standard threshold. +/// +/// Returns `true` if the challenge is valid: the proposed solution does not satisfy PJR. +/// Returns `false` if the challenge is invalid: the proposed solution does in fact satisfy PJR. +pub fn validate_pjr_challenge( + counter_example: AccountId, + supports: &Supports, + all_candidates: Vec, + all_voters: Vec<(AccountId, VoteWeight, Vec)>, +) -> bool { + let threshold = standard_threshold( + supports.len(), + all_voters.iter().map(|voter| voter.1 as ExtendedBalance), + ); + validate_t_pjr_challenge(counter_example, supports, all_candidates, all_voters, threshold) +} + +/// Validate a challenge to an election result. +/// +/// A challenge to an election result is valid if there exists some counter_example for which +/// `pre_score(counter_example) >= threshold`. Validating an existing counter_example is +/// computationally cheaper than re-running the PJR check. +/// +/// This function uses a supplied threshold. +/// +/// Returns `true` if the challenge is valid: the proposed solution does not satisfy PJR. +/// Returns `false` if the challenge is invalid: the proposed solution does in fact satisfy PJR. +pub fn validate_t_pjr_challenge( + counter_example: AccountId, + supports: &Supports, + all_candidates: Vec, + all_voters: Vec<(AccountId, VoteWeight, Vec)>, + threshold: Threshold, +) -> bool { + let (candidates, voters) = prepare_pjr_input(supports, all_candidates, all_voters); + validate_pjr_challenge_core(counter_example, &candidates, &voters, threshold) +} + +/// Validate a challenge to an election result. +/// +/// A challenge to an election result is valid if there exists some counter_example for which +/// `pre_score(counter_example) >= threshold`. Validating an existing counter_example is +/// computationally cheaper than re-running the PJR check. +/// +/// Returns `true` if the challenge is valid: the proposed solution does not satisfy PJR. +/// Returns `false` if the challenge is invalid: the proposed solution does in fact satisfy PJR. +fn validate_pjr_challenge_core( + counter_example: AccountId, + candidates: &[CandidatePtr], + voters: &[Voter], + threshold: Threshold, +) -> bool { + // Performing a linear search of the candidate list is not great, for obvious reasons. However, + // the alternatives are worse: + // + // - we could pre-sort the candidates list in `prepare_pjr_input` (n log n) which would let us + // binary search for the appropriate one here (log n). Overall runtime is `n log n` which is + // worse than the current runtime of `n`. + // + // - we could probably pre-sort the candidates list in `n` in `prepare_pjr_input` using some + // unsafe code leveraging the existing `candidates_index`: allocate an uninitialized vector of + // appropriate length, then copy in all the elements. We'd really prefer to avoid unsafe code + // in the runtime, though. + let candidate = + match candidates.iter().find(|candidate| candidate.borrow().who == counter_example) { + None => return false, + Some(candidate) => candidate.clone(), + }; + pre_score(candidate, &voters, threshold) >= threshold +} + +/// Convert the data types that the user runtime has into ones that can be used by this module. +/// +/// It is expected that this function's interface might change over time, or multiple variants of it +/// can be provided for different use cases. +/// +/// The ultimate goal, in any case, is to convert the election data into [`Candidate`] and [`Voter`] +/// types defined by this crate, whilst setting correct value for some of their fields, namely: +/// 1. Candidate [`backing_stake`](Candidate::backing_stake) and [`elected`](Candidate::elected) if +/// they are a winner. 2. Voter edge [`weight`](Edge::weight) if they are backing a winner. +/// 3. Voter [`budget`](Voter::budget). +/// +/// None of the `load` or `score` values are used and can be ignored. This is similar to +/// [`setup_inputs`] function of this crate. +/// +/// ### Performance (Weight) Notes +/// +/// Note that the current function is rather unfortunately inefficient. The most significant +/// slowdown is the fact that a typical solution that need to be checked for PJR only contains a +/// subset of the entire NPoS edge graph, encoded as `supports`. This only encodes the +/// edges that actually contribute to a winner's backing stake and ignores the rest to save space. +/// To check PJR, we need the entire voter set, including those edges that point to non-winners. +/// This could cause the caller runtime to have to read the entire list of voters, which is assumed +/// to be expensive. +/// +/// A sensible user of this module should make sure that the PJR check is executed and checked as +/// little as possible, and take sufficient economical measures to ensure that this function cannot +/// be abused. +fn prepare_pjr_input( + supports: &Supports, + all_candidates: Vec, + all_voters: Vec<(AccountId, VoteWeight, Vec)>, +) -> (Vec>, Vec>) { + let mut candidates_index: BTreeMap = BTreeMap::new(); + + // dump the staked assignments in a voter-major map for faster access down the road. + let mut assignment_map: BTreeMap> = + BTreeMap::new(); + for (winner_id, Support { voters, .. }) in supports.iter() { + for (voter_id, support) in voters.iter() { + assignment_map + .entry(voter_id.clone()) + .or_default() + .push((winner_id.clone(), *support)); + } + } + + // Convert Suppports into a SupportMap + // + // As a flat list, we're limited to linear search. That gives the production of `candidates`, + // below, a complexity of `O(s*c)`, where `s == supports.len()` and `c == all_candidates.len()`. + // For large lists, that's pretty bad. + // + // A `SupportMap`, as a `BTreeMap`, has access timing of `O(lg n)`. This means that constructing + // the map and then indexing from it gives us timing of `O((s + c) * lg(s))`. If in the future + // we get access to a deterministic `HashMap`, we can further improve that to `O(s+c)`. + // + // However, it does mean allocating sufficient space to store all the data again. + let supports: SupportMap = supports.iter().cloned().collect(); + + // collect all candidates and winners into a unified `Vec`. + let candidates = all_candidates + .into_iter() + .enumerate() + .map(|(i, c)| { + candidates_index.insert(c.clone(), i); + + // set the backing value and elected flag if the candidate is among the winners. + let who = c; + let maybe_support = supports.get(&who); + let elected = maybe_support.is_some(); + let backed_stake = maybe_support.map(|support| support.total).unwrap_or_default(); + + Candidate { who, elected, backed_stake, ..Default::default() }.to_ptr() + }) + .collect::>(); + + // collect all voters into a unified Vec. + let voters = all_voters + .into_iter() + .map(|(v, w, ts)| { + let mut edges: Vec> = Vec::with_capacity(ts.len()); + for t in ts { + if edges.iter().any(|e| e.who == t) { + // duplicate edge. + continue + } + + if let Some(idx) = candidates_index.get(&t) { + // if this edge is among the assignments, set the weight as well. + let weight = assignment_map + .get(&v) + .and_then(|d| { + d.iter().find_map(|(x, y)| if x == &t { Some(y) } else { None }) + }) + .cloned() + .unwrap_or_default(); + edges.push(Edge { + who: t, + candidate: Rc::clone(&candidates[*idx]), + weight, + ..Default::default() + }); + } + } + + let who = v; + let budget: ExtendedBalance = w.into(); + Voter { who, budget, edges, ..Default::default() } + }) + .collect::>(); + + (candidates, voters) +} + +/// The pre-score of an unelected candidate. +/// +/// This is the amount of stake that *all voter* can spare to devote to this candidate without +/// allowing the backing stake of any other elected candidate to fall below `t`. +/// +/// In essence, it is the sum(slack(n, t)) for all `n` who vote for `unelected`. +fn pre_score( + unelected: CandidatePtr, + voters: &[Voter], + t: Threshold, +) -> ExtendedBalance { + debug_assert!(!unelected.borrow().elected); + voters + .iter() + .filter(|ref v| v.votes_for(&unelected.borrow().who)) + .fold(Zero::zero(), |acc: ExtendedBalance, voter| acc.saturating_add(slack(voter, t))) +} + +/// The slack of a voter at a given state. +/// +/// The slack of each voter, with threshold `t` is the total amount of stake that this voter can +/// spare to a new potential member, whilst not dropping the backing stake of any of its currently +/// active members below `t`. In essence, for each of the current active candidates `c`, we assume +/// that we reduce the edge weight of `voter` to `c` from `w` to `w * min(1 / (t / support(c)))`. +/// +/// More accurately: +/// +/// 1. If `c` exactly has `t` backing or less, then we don't generate any slack. +/// 2. If `c` has more than `t`, then we reduce it to `t`. +fn slack(voter: &Voter, t: Threshold) -> ExtendedBalance { + let budget = voter.budget; + let leftover = voter.edges.iter().fold(Zero::zero(), |acc: ExtendedBalance, edge| { + let candidate = edge.candidate.borrow(); + if candidate.elected { + let extra = + Perbill::one().min(Perbill::from_rational(t, candidate.backed_stake)) * edge.weight; + acc.saturating_add(extra) + } else { + // No slack generated here. + acc + } + }); + + // NOTE: candidate for saturating_log_sub(). Defensive-only. + budget.saturating_sub(leftover) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn setup_voter(who: u32, votes: Vec<(u32, u128, bool)>) -> Voter { + let mut voter = Voter::new(who); + let mut budget = 0u128; + let candidates = votes + .into_iter() + .map(|(t, w, e)| { + budget += w; + Candidate { who: t, elected: e, backed_stake: w, ..Default::default() } + }) + .collect::>(); + let edges = candidates + .into_iter() + .map(|c| Edge { + who: c.who, + weight: c.backed_stake, + candidate: c.to_ptr(), + ..Default::default() + }) + .collect::>(); + voter.edges = edges; + voter.budget = budget; + voter + } + + fn assert_core_failure( + candidates: &[CandidatePtr], + voters: &[Voter], + t: Threshold, + ) { + let counter_example = pjr_check_core(candidates, voters, t).unwrap_err(); + assert!(validate_pjr_challenge_core(counter_example, candidates, voters, t)); + } + + #[test] + fn slack_works() { + let voter = setup_voter(10, vec![(1, 10, true), (2, 20, true)]); + + assert_eq!(slack(&voter, 15), 5); + assert_eq!(slack(&voter, 17), 3); + assert_eq!(slack(&voter, 10), 10); + assert_eq!(slack(&voter, 5), 20); + } + + #[test] + fn pre_score_works() { + // will give 5 slack + let v1 = setup_voter(10, vec![(1, 10, true), (2, 20, true), (3, 0, false)]); + // will give no slack + let v2 = setup_voter(20, vec![(1, 5, true), (2, 5, true)]); + // will give 10 slack. + let v3 = setup_voter(30, vec![(1, 20, true), (2, 20, true), (3, 0, false)]); + + let unelected = Candidate { who: 3u32, elected: false, ..Default::default() }.to_ptr(); + let score = pre_score(unelected, &vec![v1, v2, v3], 15); + + assert_eq!(score, 15); + } + + #[test] + fn can_convert_data_from_external_api() { + let all_candidates = vec![10, 20, 30, 40]; + let all_voters = vec![ + (1, 10, vec![10, 20, 30, 40]), + (2, 20, vec![10, 20, 30, 40]), + (3, 30, vec![10, 30]), + ]; + // tuples in voters vector are (AccountId, Balance) + let supports: Supports = vec![ + (20, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + (40, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + ]; + + let (candidates, voters) = prepare_pjr_input(&supports, all_candidates, all_voters); + + // elected flag and backing must be set correctly + assert_eq!( + candidates + .iter() + .map(|c| (c.borrow().who.clone(), c.borrow().elected, c.borrow().backed_stake)) + .collect::>(), + vec![(10, false, 0), (20, true, 15), (30, false, 0), (40, true, 15)], + ); + + // edge weight must be set correctly + assert_eq!( + voters + .iter() + .map(|v| ( + v.who, + v.budget, + v.edges.iter().map(|e| (e.who, e.weight)).collect::>(), + )) + .collect::>(), + vec![ + (1, 10, vec![(10, 0), (20, 5), (30, 0), (40, 5)]), + (2, 20, vec![(10, 0), (20, 10), (30, 0), (40, 10)]), + (3, 30, vec![(10, 0), (30, 0)]), + ], + ); + + // fyi. this is not PJR, obviously because the votes of 3 can bump the stake a lot but they + // are being ignored. + assert_core_failure(&candidates, &voters, 1); + assert_core_failure(&candidates, &voters, 10); + assert_core_failure(&candidates, &voters, 20); + } + + // These next tests ensure that the threshold phase change property holds for us, but that's not + // their real purpose. They were written to help develop an intuition about what the threshold + // value actually means in layman's terms. + // + // The results tend to support the intuition that the threshold is the voting power at and below + // which a voter's preferences can simply be ignored. + #[test] + fn find_upper_bound_for_threshold_scenario_1() { + let all_candidates = vec![10, 20, 30, 40]; + let all_voters = vec![ + (1, 10, vec![10, 20, 30, 40]), + (2, 20, vec![10, 20, 30, 40]), + (3, 30, vec![10, 30]), + ]; + // tuples in voters vector are (AccountId, Balance) + let supports: Supports = vec![ + (20, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + (40, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + ]; + + let (candidates, voters) = prepare_pjr_input(&supports, all_candidates, all_voters); + + find_threshold_phase_change_for_scenario(candidates, voters); + } + + #[test] + fn find_upper_bound_for_threshold_scenario_2() { + let all_candidates = vec![10, 20, 30, 40]; + let all_voters = vec![ + (1, 10, vec![10, 20, 30, 40]), + (2, 20, vec![10, 20, 30, 40]), + (3, 25, vec![10, 30]), + ]; + // tuples in voters vector are (AccountId, Balance) + let supports: Supports = vec![ + (20, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + (40, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + ]; + + let (candidates, voters) = prepare_pjr_input(&supports, all_candidates, all_voters); + + find_threshold_phase_change_for_scenario(candidates, voters); + } + + #[test] + fn find_upper_bound_for_threshold_scenario_3() { + let all_candidates = vec![10, 20, 30, 40]; + let all_voters = vec![ + (1, 10, vec![10, 20, 30, 40]), + (2, 20, vec![10, 20, 30, 40]), + (3, 35, vec![10, 30]), + ]; + // tuples in voters vector are (AccountId, Balance) + let supports: Supports = vec![ + (20, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + (40, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + ]; + + let (candidates, voters) = prepare_pjr_input(&supports, all_candidates, all_voters); + + find_threshold_phase_change_for_scenario(candidates, voters); + } + + fn find_threshold_phase_change_for_scenario( + candidates: Vec>, + voters: Vec>, + ) -> Threshold { + let mut threshold = 1; + let mut prev_threshold = 0; + + // find the binary range containing the threshold beyond which the PJR check succeeds + while pjr_check_core(&candidates, &voters, threshold).is_err() { + prev_threshold = threshold; + threshold = threshold + .checked_mul(2) + .expect("pjr check must fail before we run out of capacity in u128"); + } + + // now binary search within that range to find the phase threshold + let mut high_bound = threshold; + let mut low_bound = prev_threshold; + + while high_bound - low_bound > 1 { + // maintain the invariant that low_bound fails and high_bound passes + let test = low_bound + ((high_bound - low_bound) / 2); + if pjr_check_core(&candidates, &voters, test).is_ok() { + high_bound = test; + } else { + low_bound = test; + } + } + + println!("highest failing check: {}", low_bound); + println!("lowest succeeding check: {}", high_bound); + + // for a value to be a threshold, it must be the boundary between two conditions + let mut unexpected_failures = Vec::new(); + let mut unexpected_successes = Vec::new(); + for t in 0..=low_bound { + if pjr_check_core(&candidates, &voters, t).is_ok() { + unexpected_successes.push(t); + } + } + for t in high_bound..(high_bound * 2) { + if pjr_check_core(&candidates, &voters, t).is_err() { + unexpected_failures.push(t); + } + } + dbg!(&unexpected_successes, &unexpected_failures); + assert!(unexpected_failures.is_empty() && unexpected_successes.is_empty()); + + high_bound + } +} diff --git a/primitives/npos-elections/src/reduce.rs b/primitives/npos-elections/src/reduce.rs index 17d7dd1290f7d..8b90796af85ca 100644 --- a/primitives/npos-elections/src/reduce.rs +++ b/primitives/npos-elections/src/reduce.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,15 +45,17 @@ //! //! ### Resources: //! -//! 1. https://hackmd.io/JOn9x98iS0e0DPWQ87zGWg?view +//! 1. -use crate::node::{Node, NodeId, NodeRef, NodeRole}; -use crate::{ExtendedBalance, IdentifierT, StakedAssignment}; +use crate::{ + node::{Node, NodeId, NodeRef, NodeRole}, + ExtendedBalance, IdentifierT, StakedAssignment, +}; use sp_arithmetic::traits::{Bounded, Zero}; use sp_std::{ collections::btree_map::{BTreeMap, Entry::*}, - vec, prelude::*, + vec, }; /// Map type used for reduce_4. Can be easily swapped with HashMap. @@ -63,7 +65,7 @@ type Map = BTreeMap<(A, A), A>; fn combinations_2(input: &[T]) -> Vec<(T, T)> { let n = input.len(); if n < 2 { - return Default::default(); + return Default::default() } let mut comb = Vec::with_capacity(n * (n - 1) / 2); @@ -126,7 +128,7 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { match combination_map.entry((v1.clone(), v2.clone())) { Vacant(entry) => { entry.insert(who.clone()); - } + }, Occupied(mut entry) => { let other_who = entry.get_mut(); @@ -141,29 +143,30 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { .filter(|(t, _)| *t == v1 || *t == v2) .count() != 2 { - continue; + continue } // check if other_who voted for the same pair v1, v2. let maybe_other_assignments = assignments.iter().find(|a| a.who == *other_who); if maybe_other_assignments.is_none() { - continue; + continue } let other_assignment = maybe_other_assignments.expect("value is checked to be 'Some'"); // Collect potential cycle votes - let mut other_cycle_votes = other_assignment - .distribution - .iter() - .filter_map(|(t, w)| { - if *t == v1 || *t == v2 { - Some((t.clone(), *w)) - } else { - None - } - }) - .collect::>(); + let mut other_cycle_votes = + other_assignment + .distribution + .iter() + .filter_map(|(t, w)| { + if *t == v1 || *t == v2 { + Some((t.clone(), *w)) + } else { + None + } + }) + .collect::>(); let other_votes_count = other_cycle_votes.len(); @@ -175,21 +178,18 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { if other_votes_count < 2 { // This is not a cycle. Replace and continue. *other_who = who.clone(); - continue; + continue } else if other_votes_count == 2 { // This is a cycle. let mut who_cycle_votes: Vec<(A, ExtendedBalance)> = Vec::with_capacity(2); - assignments[assignment_index] - .distribution - .iter() - .for_each(|(t, w)| { - if *t == v1 || *t == v2 { - who_cycle_votes.push((t.clone(), *w)); - } - }); + assignments[assignment_index].distribution.iter().for_each(|(t, w)| { + if *t == v1 || *t == v2 { + who_cycle_votes.push((t.clone(), *w)); + } + }); if who_cycle_votes.len() != 2 { - continue; + continue } // Align the targets similarly. This helps with the circulation below. @@ -240,53 +240,39 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { // apply changes let mut remove_indices: Vec = Vec::with_capacity(1); increase_indices.into_iter().for_each(|i| { - let voter = if i < 2 { - who.clone() - } else { - other_who.clone() - }; + let voter = if i < 2 { who.clone() } else { other_who.clone() }; // Note: so this is pretty ambiguous. We should only look for one // assignment that meets this criteria and if we find multiple then that // is a corrupt input. Same goes for the next block. - assignments - .iter_mut() - .filter(|a| a.who == voter) - .for_each(|ass| { - ass.distribution - .iter_mut() - .position(|(t, _)| *t == cycle[i].0) - .map(|idx| { - let next_value = - ass.distribution[idx].1.saturating_add(min_value); - ass.distribution[idx].1 = next_value; - }); - }); + assignments.iter_mut().filter(|a| a.who == voter).for_each(|ass| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == cycle[i].0) + .map(|idx| { + let next_value = + ass.distribution[idx].1.saturating_add(min_value); + ass.distribution[idx].1 = next_value; + }); + }); }); decrease_indices.into_iter().for_each(|i| { - let voter = if i < 2 { - who.clone() - } else { - other_who.clone() - }; - assignments - .iter_mut() - .filter(|a| a.who == voter) - .for_each(|ass| { - ass.distribution - .iter_mut() - .position(|(t, _)| *t == cycle[i].0) - .map(|idx| { - let next_value = - ass.distribution[idx].1.saturating_sub(min_value); - if next_value.is_zero() { - ass.distribution.remove(idx); - remove_indices.push(i); - num_changed += 1; - } else { - ass.distribution[idx].1 = next_value; - } - }); - }); + let voter = if i < 2 { who.clone() } else { other_who.clone() }; + assignments.iter_mut().filter(|a| a.who == voter).for_each(|ass| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == cycle[i].0) + .map(|idx| { + let next_value = + ass.distribution[idx].1.saturating_sub(min_value); + if next_value.is_zero() { + ass.distribution.remove(idx); + remove_indices.push(i); + num_changed += 1; + } else { + ass.distribution[idx].1 = next_value; + } + }); + }); }); // remove either one of them. @@ -297,21 +283,21 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { match (who_removed, other_removed) { (false, true) => { *other_who = who.clone(); - } + }, (true, false) => { // nothing, other_who can stay there. - } + }, (true, true) => { // remove and don't replace entry.remove(); - } + }, (false, false) => { // Neither of the edges was removed? impossible. panic!("Duplicate voter (or other corrupt input)."); - } + }, } } - } + }, } } } @@ -350,7 +336,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let maybe_dist = assignments[assignment_index].distribution.get(dist_index); if maybe_dist.is_none() { // The rest of this loop is moot. - break; + break } let (target, _) = maybe_dist.expect("Value checked to be some").clone(); @@ -377,19 +363,19 @@ fn reduce_all(assignments: &mut Vec>) -> u32 (false, false) => { Node::set_parent_of(&target_node, &voter_node); dist_index += 1; - continue; - } + continue + }, (false, true) => { Node::set_parent_of(&voter_node, &target_node); dist_index += 1; - continue; - } + continue + }, (true, false) => { Node::set_parent_of(&target_node, &voter_node); dist_index += 1; - continue; - } - (true, true) => { /* don't continue and execute the rest */ } + continue + }, + (true, true) => { /* don't continue and execute the rest */ }, }; let (voter_root, voter_root_path) = Node::root(&voter_node); @@ -405,10 +391,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 // because roots are the same. #[cfg(feature = "std")] - debug_assert_eq!( - target_root_path.last().unwrap(), - voter_root_path.last().unwrap() - ); + debug_assert_eq!(target_root_path.last().unwrap(), voter_root_path.last().unwrap()); debug_assert!(common_count > 0); // cycle part of each path will be `path[path.len() - common_count - 1 : 0]` @@ -523,8 +506,8 @@ fn reduce_all(assignments: &mut Vec>) -> u32 }; if next_value.is_zero() { - // if the removed edge is from the current assignment, dis_index - // should NOT be increased. + // if the removed edge is from the current assignment, + // index should NOT be increased. if target_ass_index == assignment_index { should_inc_counter = false } @@ -568,8 +551,8 @@ fn reduce_all(assignments: &mut Vec>) -> u32 }; if next_value.is_zero() { - // if the removed edge is from the current assignment, dis_index - // should NOT be increased. + // if the removed edge is from the current assignment, + // index should NOT be increased. if target_ass_index == assignment_index { should_inc_counter = false } @@ -602,7 +585,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let current = voter_root_path[i].clone().borrow().id.who.clone(); let next = voter_root_path[i + 1].clone().borrow().id.who.clone(); if min_edge.contains(¤t) && min_edge.contains(&next) { - break; + break } Node::set_parent_of(&voter_root_path[i + 1], &voter_root_path[i]); } @@ -613,7 +596,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let current = target_root_path[i].clone().borrow().id.who.clone(); let next = target_root_path[i + 1].clone().borrow().id.who.clone(); if min_edge.contains(¤t) && min_edge.contains(&next) { - break; + break } Node::set_parent_of(&target_root_path[i + 1], &target_root_path[i]); } @@ -663,9 +646,9 @@ mod tests { #[test] fn merging_works() { - // D <-- A <-- B <-- C + // D <-- A <-- B <-- C // - // F <-- E + // F <-- E let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); @@ -682,17 +665,17 @@ mod tests { let path2 = vec![e.clone(), f.clone()]; merge(path1, path2); - // D <-- A <-- B <-- C - // | - // F --> E --> --> + // D <-- A <-- B <-- C + // | + // F --> E --> --> assert_eq!(e.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c } #[test] fn merge_with_len_one() { - // D <-- A <-- B <-- C + // D <-- A <-- B <-- C // - // F <-- E + // F <-- E let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); @@ -707,9 +690,9 @@ mod tests { let path2 = vec![f.clone()]; merge(path1, path2); - // D <-- A <-- B <-- C - // | - // F --> --> + // D <-- A <-- B <-- C + // | + // F --> --> assert_eq!(f.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c } @@ -718,14 +701,8 @@ mod tests { use super::*; let assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 25), (20, 75)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 50), (20, 50)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 25), (20, 75)] }, + StakedAssignment { who: 2, distribution: vec![(10, 50), (20, 50)] }, ]; let mut new_assignments = assignments.clone(); @@ -735,14 +712,8 @@ mod tests { assert_eq!( new_assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(20, 100),], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 75), (20, 25),], - }, + StakedAssignment { who: 1, distribution: vec![(20, 100),] }, + StakedAssignment { who: 2, distribution: vec![(10, 75), (20, 25),] }, ], ); } @@ -750,26 +721,11 @@ mod tests { #[test] fn basic_reduce_all_cycles_works() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 15), (40, 15)], - }, - StakedAssignment { - who: 4, - distribution: vec![(20, 10), (30, 10), (40, 20)], - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 20), (30, 10), (40, 20)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10)] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5)] }, + StakedAssignment { who: 3, distribution: vec![(20, 15), (40, 15)] }, + StakedAssignment { who: 4, distribution: vec![(20, 10), (30, 10), (40, 20)] }, + StakedAssignment { who: 5, distribution: vec![(20, 20), (30, 10), (40, 20)] }, ]; assert_eq!(3, reduce_all(&mut assignments)); @@ -777,26 +733,11 @@ mod tests { assert_eq!( assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10),] - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5),], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 30),], - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 40),] - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 15), (30, 20), (40, 15),], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10),] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5),] }, + StakedAssignment { who: 3, distribution: vec![(20, 30),] }, + StakedAssignment { who: 4, distribution: vec![(40, 40),] }, + StakedAssignment { who: 5, distribution: vec![(20, 15), (30, 20), (40, 15),] }, ], ) } @@ -804,26 +745,11 @@ mod tests { #[test] fn basic_reduce_works() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 15), (40, 15)], - }, - StakedAssignment { - who: 4, - distribution: vec![(20, 10), (30, 10), (40, 20)], - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 20), (30, 10), (40, 20)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10)] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5)] }, + StakedAssignment { who: 3, distribution: vec![(20, 15), (40, 15)] }, + StakedAssignment { who: 4, distribution: vec![(20, 10), (30, 10), (40, 20)] }, + StakedAssignment { who: 5, distribution: vec![(20, 20), (30, 10), (40, 20)] }, ]; assert_eq!(3, reduce(&mut assignments)); @@ -831,26 +757,11 @@ mod tests { assert_eq!( assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10),] - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5),], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 30),], - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 40),] - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 15), (30, 20), (40, 15),], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10),] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5),] }, + StakedAssignment { who: 3, distribution: vec![(20, 30),] }, + StakedAssignment { who: 4, distribution: vec![(40, 40),] }, + StakedAssignment { who: 5, distribution: vec![(20, 15), (30, 20), (40, 15),] }, ], ) } @@ -858,35 +769,14 @@ mod tests { #[test] fn should_deal_with_self_vote() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 15), (40, 15)], - }, - StakedAssignment { - who: 4, - distribution: vec![(20, 10), (30, 10), (40, 20)], - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 20), (30, 10), (40, 20)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10)] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5)] }, + StakedAssignment { who: 3, distribution: vec![(20, 15), (40, 15)] }, + StakedAssignment { who: 4, distribution: vec![(20, 10), (30, 10), (40, 20)] }, + StakedAssignment { who: 5, distribution: vec![(20, 20), (30, 10), (40, 20)] }, // self vote from 10 and 20 to itself. - StakedAssignment { - who: 10, - distribution: vec![(10, 100)], - }, - StakedAssignment { - who: 20, - distribution: vec![(20, 200)], - }, + StakedAssignment { who: 10, distribution: vec![(10, 100)] }, + StakedAssignment { who: 20, distribution: vec![(20, 200)] }, ]; assert_eq!(3, reduce(&mut assignments)); @@ -894,35 +784,14 @@ mod tests { assert_eq!( assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10),] - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5),], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 30),], - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 40),] - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 15), (30, 20), (40, 15),], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10),] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5),] }, + StakedAssignment { who: 3, distribution: vec![(20, 30),] }, + StakedAssignment { who: 4, distribution: vec![(40, 40),] }, + StakedAssignment { who: 5, distribution: vec![(20, 15), (30, 20), (40, 15),] }, // should stay untouched. - StakedAssignment { - who: 10, - distribution: vec![(10, 100)] - }, - StakedAssignment { - who: 20, - distribution: vec![(20, 200)] - }, + StakedAssignment { who: 10, distribution: vec![(10, 100)] }, + StakedAssignment { who: 20, distribution: vec![(20, 200)] }, ], ) } @@ -930,55 +799,23 @@ mod tests { #[test] fn reduce_3_common_votes_same_weight() { let mut assignments = vec![ - StakedAssignment { - who: 4, - distribution: vec![ - ( - 1000000, - 100, - ), - ( - 1000002, - 100, - ), - ( - 1000004, - 100, - ), - ], - }, - StakedAssignment { - who: 5, - distribution: vec![ - ( - 1000000, - 100, - ), - ( - 1000002, - 100, - ), - ( - 1000004, - 100, - ), - ], - }, - ]; + StakedAssignment { + who: 4, + distribution: vec![(1000000, 100), (1000002, 100), (1000004, 100)], + }, + StakedAssignment { + who: 5, + distribution: vec![(1000000, 100), (1000002, 100), (1000004, 100)], + }, + ]; reduce_4(&mut assignments); assert_eq!( assignments, vec![ - StakedAssignment { - who: 4, - distribution: vec![(1000000, 200,), (1000004, 100,),], - }, - StakedAssignment { - who: 5, - distribution: vec![(1000002, 200,), (1000004, 100,),], - }, + StakedAssignment { who: 4, distribution: vec![(1000000, 200,), (1000004, 100,),] }, + StakedAssignment { who: 5, distribution: vec![(1000002, 200,), (1000004, 100,),] }, ], ) } @@ -987,18 +824,9 @@ mod tests { #[should_panic] fn reduce_panics_on_duplicate_voter() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10), (20, 10)], - }, - StakedAssignment { - who: 1, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 15)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10), (20, 10)] }, + StakedAssignment { who: 1, distribution: vec![(10, 15), (20, 5)] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 15)] }, ]; reduce(&mut assignments); @@ -1007,10 +835,7 @@ mod tests { #[test] fn should_deal_with_duplicates_target() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 15), (20, 5)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 15), (20, 5)] }, StakedAssignment { who: 2, distribution: vec![ @@ -1029,10 +854,7 @@ mod tests { assert_eq!( assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 20),], - }, + StakedAssignment { who: 1, distribution: vec![(10, 20),] }, StakedAssignment { who: 2, distribution: vec![ diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index dc7a1a5fdfb97..bf9ca57677efa 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,23 +17,20 @@ //! Tests for npos-elections. -use crate::mock::*; use crate::{ - seq_phragmen, balancing, build_support_map, is_score_better, helpers::*, - Support, StakedAssignment, Assignment, ElectionResult, ExtendedBalance, setup_inputs, - seq_phragmen_core, Voter, + balancing, helpers::*, is_score_better, mock::*, seq_phragmen, seq_phragmen_core, setup_inputs, + to_support_map, Assignment, ElectionResult, ExtendedBalance, IndexAssignment, NposSolution, + StakedAssignment, Support, Voter, }; +use rand::{self, SeedableRng}; +use sp_arithmetic::{PerU16, Perbill, Percent, Permill}; +use std::convert::TryInto; use substrate_test_utils::assert_eq_uvec; -use sp_arithmetic::{Perbill, Permill, Percent, PerU16}; #[test] fn float_phragmen_poc_works() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, vec![1, 2]), - (20, vec![1, 3]), - (30, vec![2, 3]), - ]; + let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30), (1, 0), (2, 0), (3, 0)]); let mut phragmen_result = elect_float(2, candidates, voters, &stake_of).unwrap(); let winners = phragmen_result.clone().winners; @@ -42,44 +39,69 @@ fn float_phragmen_poc_works() { assert_eq_uvec!(winners, vec![(2, 40), (3, 50)]); assert_eq_uvec!( assignments, - vec![ - (10, vec![(2, 1.0)]), - (20, vec![(3, 1.0)]), - (30, vec![(2, 0.5), (3, 0.5)]), - ] + vec![(10, vec![(2, 1.0)]), (20, vec![(3, 1.0)]), (30, vec![(2, 0.5), (3, 0.5)]),] ); let mut support_map = build_support_map_float(&mut phragmen_result, &stake_of); assert_eq!( support_map.get(&2).unwrap(), - &_Support { own: 0.0, total: 25.0, others: vec![(10u64, 10.0), (30u64, 15.0)]} + &_Support { own: 0.0, total: 25.0, others: vec![(10u64, 10.0), (30u64, 15.0)] } ); assert_eq!( support_map.get(&3).unwrap(), - &_Support { own: 0.0, total: 35.0, others: vec![(20u64, 20.0), (30u64, 15.0)]} + &_Support { own: 0.0, total: 35.0, others: vec![(20u64, 20.0), (30u64, 15.0)] } ); equalize_float(phragmen_result.assignments, &mut support_map, 0.0, 2, stake_of); assert_eq!( support_map.get(&2).unwrap(), - &_Support { own: 0.0, total: 30.0, others: vec![(10u64, 10.0), (30u64, 20.0)]} + &_Support { own: 0.0, total: 30.0, others: vec![(10u64, 10.0), (30u64, 20.0)] } ); assert_eq!( support_map.get(&3).unwrap(), - &_Support { own: 0.0, total: 30.0, others: vec![(20u64, 20.0), (30u64, 10.0)]} + &_Support { own: 0.0, total: 30.0, others: vec![(20u64, 20.0), (30u64, 10.0)] } + ); +} + +#[test] +fn phragmen_core_test_without_edges() { + let candidates = vec![1, 2, 3]; + let voters = vec![(10, 10, vec![]), (20, 20, vec![]), (30, 30, vec![])]; + + let (candidates, voters) = setup_inputs(candidates, voters); + + assert_eq!( + voters + .iter() + .map(|v| ( + v.who, + v.budget, + (v.edges.iter().map(|e| (e.who, e.weight)).collect::>()), + )) + .collect::>(), + vec![] + ); + + assert_eq!( + candidates + .iter() + .map(|c_ptr| ( + c_ptr.borrow().who, + c_ptr.borrow().elected, + c_ptr.borrow().round, + c_ptr.borrow().backed_stake, + )) + .collect::>(), + vec![(1, false, 0, 0), (2, false, 0, 0), (3, false, 0, 0),] ); } #[test] fn phragmen_core_poc_works() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 10, vec![1, 2]), - (20, 20, vec![1, 3]), - (30, 30, vec![2, 3]), - ]; + let voters = vec![(10, 10, vec![1, 2]), (20, 20, vec![1, 3]), (30, 30, vec![2, 3])]; let (candidates, voters) = setup_inputs(candidates, voters); let (candidates, voters) = seq_phragmen_core(2, candidates, voters).unwrap(); @@ -93,11 +115,7 @@ fn phragmen_core_poc_works() { (v.edges.iter().map(|e| (e.who, e.weight)).collect::>()), )) .collect::>(), - vec![ - (10, 10, vec![(2, 10)]), - (20, 20, vec![(3, 20)]), - (30, 30, vec![(2, 15), (3, 15)]), - ] + vec![(10, 10, vec![(2, 10)]), (20, 20, vec![(3, 20)]), (30, 30, vec![(2, 15), (3, 15)]),] ); assert_eq!( @@ -108,12 +126,9 @@ fn phragmen_core_poc_works() { c_ptr.borrow().elected, c_ptr.borrow().round, c_ptr.borrow().backed_stake, - )).collect::>(), - vec![ - (1, false, 0, 0), - (2, true, 1, 25), - (3, true, 0, 35), - ] + )) + .collect::>(), + vec![(1, false, 0, 0), (2, true, 1, 25), (3, true, 0, 35),] ); } @@ -162,7 +177,8 @@ fn balancing_core_works() { c_ptr.borrow().elected, c_ptr.borrow().round, c_ptr.borrow().backed_stake, - )).collect::>(), + )) + .collect::>(), vec![ (1, true, 1, 37), (2, true, 2, 38), @@ -179,40 +195,30 @@ fn voter_normalize_ops_works() { use sp_std::{cell::RefCell, rc::Rc}; // normalize { - let c1 = Candidate { who: 10, elected: false ,..Default::default() }; - let c2 = Candidate { who: 20, elected: false ,..Default::default() }; - let c3 = Candidate { who: 30, elected: false ,..Default::default() }; + let c1 = Candidate { who: 10, elected: false, ..Default::default() }; + let c2 = Candidate { who: 20, elected: false, ..Default::default() }; + let c3 = Candidate { who: 30, elected: false, ..Default::default() }; let e1 = Edge { candidate: Rc::new(RefCell::new(c1)), weight: 30, ..Default::default() }; let e2 = Edge { candidate: Rc::new(RefCell::new(c2)), weight: 33, ..Default::default() }; let e3 = Edge { candidate: Rc::new(RefCell::new(c3)), weight: 30, ..Default::default() }; - let mut v = Voter { - who: 1, - budget: 100, - edges: vec![e1, e2, e3], - ..Default::default() - }; + let mut v = Voter { who: 1, budget: 100, edges: vec![e1, e2, e3], ..Default::default() }; v.try_normalize().unwrap(); assert_eq!(v.edges.iter().map(|e| e.weight).collect::>(), vec![34, 33, 33]); } // // normalize_elected { - let c1 = Candidate { who: 10, elected: false ,..Default::default() }; - let c2 = Candidate { who: 20, elected: true ,..Default::default() }; - let c3 = Candidate { who: 30, elected: true ,..Default::default() }; + let c1 = Candidate { who: 10, elected: false, ..Default::default() }; + let c2 = Candidate { who: 20, elected: true, ..Default::default() }; + let c3 = Candidate { who: 30, elected: true, ..Default::default() }; let e1 = Edge { candidate: Rc::new(RefCell::new(c1)), weight: 30, ..Default::default() }; let e2 = Edge { candidate: Rc::new(RefCell::new(c2)), weight: 33, ..Default::default() }; let e3 = Edge { candidate: Rc::new(RefCell::new(c3)), weight: 30, ..Default::default() }; - let mut v = Voter { - who: 1, - budget: 100, - edges: vec![e1, e2, e3], - ..Default::default() - }; + let mut v = Voter { who: 1, budget: 100, edges: vec![e1, e2, e3], ..Default::default() }; v.try_normalize_elected().unwrap(); assert_eq!(v.edges.iter().map(|e| e.weight).collect::>(), vec![30, 34, 66]); @@ -222,64 +228,45 @@ fn voter_normalize_ops_works() { #[test] fn phragmen_poc_works() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, vec![1, 2]), - (20, vec![1, 3]), - (30, vec![2, 3]), - ]; + let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30)]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(2, 25), (3, 35)]); assert_eq_uvec!( assignments, vec![ - Assignment { - who: 10u64, - distribution: vec![(2, Perbill::from_percent(100))], - }, - Assignment { - who: 20, - distribution: vec![(3, Perbill::from_percent(100))], - }, + Assignment { who: 10u64, distribution: vec![(2, Perbill::from_percent(100))] }, + Assignment { who: 20, distribution: vec![(3, Perbill::from_percent(100))] }, Assignment { who: 30, distribution: vec![ - (2, Perbill::from_percent(100/2)), - (3, Perbill::from_percent(100/2)), + (2, Perbill::from_percent(100 / 2)), + (3, Perbill::from_percent(100 / 2)), ], }, ] ); let staked = assignment_ratio_to_staked(assignments, &stake_of); - let winners = to_without_backing(winners); - let support_map = build_support_map::(&winners, &staked).unwrap(); + let support_map = to_support_map::(&staked); assert_eq_uvec!( staked, vec![ - StakedAssignment { - who: 10u64, - distribution: vec![(2, 10)], - }, - StakedAssignment { - who: 20, - distribution: vec![(3, 20)], - }, - StakedAssignment { - who: 30, - distribution: vec![ - (2, 15), - (3, 15), - ], - }, + StakedAssignment { who: 10u64, distribution: vec![(2, 10)] }, + StakedAssignment { who: 20, distribution: vec![(3, 20)] }, + StakedAssignment { who: 30, distribution: vec![(2, 15), (3, 15),] }, ] ); @@ -296,32 +283,26 @@ fn phragmen_poc_works() { #[test] fn phragmen_poc_works_with_balancing() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, vec![1, 2]), - (20, vec![1, 3]), - (30, vec![2, 3]), - ]; + let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30)]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), Some((4, 0)), - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(2, 30), (3, 30)]); assert_eq_uvec!( assignments, vec![ - Assignment { - who: 10u64, - distribution: vec![(2, Perbill::from_percent(100))], - }, - Assignment { - who: 20, - distribution: vec![(3, Perbill::from_percent(100))], - }, + Assignment { who: 10u64, distribution: vec![(2, Perbill::from_percent(100))] }, + Assignment { who: 20, distribution: vec![(3, Perbill::from_percent(100))] }, Assignment { who: 30, distribution: vec![ @@ -333,27 +314,14 @@ fn phragmen_poc_works_with_balancing() { ); let staked = assignment_ratio_to_staked(assignments, &stake_of); - let winners = to_without_backing(winners); - let support_map = build_support_map::(&winners, &staked).unwrap(); + let support_map = to_support_map::(&staked); assert_eq_uvec!( staked, vec![ - StakedAssignment { - who: 10u64, - distribution: vec![(2, 10)], - }, - StakedAssignment { - who: 20, - distribution: vec![(3, 20)], - }, - StakedAssignment { - who: 30, - distribution: vec![ - (2, 20), - (3, 10), - ], - }, + StakedAssignment { who: 10u64, distribution: vec![(2, 10)] }, + StakedAssignment { who: 20, distribution: vec![(3, 20)] }, + StakedAssignment { who: 30, distribution: vec![(2, 20), (3, 10),] }, ] ); @@ -367,48 +335,29 @@ fn phragmen_poc_works_with_balancing() { ); } - #[test] fn phragmen_poc_2_works() { let candidates = vec![10, 20, 30]; - let voters = vec![ - (2, vec![10, 20, 30]), - (4, vec![10, 20, 40]), - ]; - let stake_of = create_stake_of(&[ - (10, 1000), - (20, 1000), - (30, 1000), - (40, 1000), - (2, 500), - (4, 500), - ]); - - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); - run_and_compare::(candidates, voters, &stake_of, 2); + let voters = vec![(2, vec![10, 20, 30]), (4, vec![10, 20, 40])]; + let stake_of = + create_stake_of(&[(10, 1000), (20, 1000), (30, 1000), (40, 1000), (2, 500), (4, 500)]); + + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); + run_and_compare::(candidates, voters, &stake_of, 2); } #[test] fn phragmen_poc_3_works() { let candidates = vec![10, 20, 30]; - let voters = vec![ - (2, vec![10, 20, 30]), - (4, vec![10, 20, 40]), - ]; - let stake_of = create_stake_of(&[ - (10, 1000), - (20, 1000), - (30, 1000), - (2, 50), - (4, 1000), - ]); + let voters = vec![(2, vec![10, 20, 30]), (4, vec![10, 20, 40])]; + let stake_of = create_stake_of(&[(10, 1000), (20, 1000), (30, 1000), (2, 50), (4, 1000)]); - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); - run_and_compare::(candidates, voters, &stake_of, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); + run_and_compare::(candidates, voters, &stake_of, 2); } #[test] @@ -417,11 +366,11 @@ fn phragmen_accuracy_on_large_scale_only_candidates() { // candidate can have the maximum amount of tokens, and also supported by the maximum. let candidates = vec![1, 2, 3, 4, 5]; let stake_of = create_stake_of(&[ - (1, (u64::max_value() - 1).into()), - (2, (u64::max_value() - 4).into()), - (3, (u64::max_value() - 5).into()), - (4, (u64::max_value() - 3).into()), - (5, (u64::max_value() - 2).into()), + (1, (u64::MAX - 1).into()), + (2, (u64::MAX - 4).into()), + (3, (u64::MAX - 5).into()), + (4, (u64::MAX - 3).into()), + (5, (u64::MAX - 2).into()), ]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( @@ -432,96 +381,80 @@ fn phragmen_accuracy_on_large_scale_only_candidates() { .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(1, 18446744073709551614u128), (5, 18446744073709551613u128)]); assert_eq!(assignments.len(), 2); - check_assignments_sum(assignments); + check_assignments_sum(&assignments); } #[test] fn phragmen_accuracy_on_large_scale_voters_and_candidates() { let candidates = vec![1, 2, 3, 4, 5]; - let mut voters = vec![ - (13, vec![1, 3, 5]), - (14, vec![2, 4]), - ]; + let mut voters = vec![(13, vec![1, 3, 5]), (14, vec![2, 4])]; voters.extend(auto_generate_self_voters(&candidates)); let stake_of = create_stake_of(&[ - (1, (u64::max_value() - 1).into()), - (2, (u64::max_value() - 4).into()), - (3, (u64::max_value() - 5).into()), - (4, (u64::max_value() - 3).into()), - (5, (u64::max_value() - 2).into()), - (13, (u64::max_value() - 10).into()), - (14, u64::max_value().into()), + (1, (u64::MAX - 1).into()), + (2, (u64::MAX - 4).into()), + (3, (u64::MAX - 5).into()), + (4, (u64::MAX - 3).into()), + (5, (u64::MAX - 2).into()), + (13, (u64::MAX - 10).into()), + (14, u64::MAX.into()), ]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(2, 36893488147419103226u128), (1, 36893488147419103219u128)]); assert_eq!( assignments, vec![ - Assignment { - who: 13u64, - distribution: vec![(1, Perbill::one())], - }, - Assignment { - who: 14, - distribution: vec![(2, Perbill::one())], - }, - Assignment { - who: 1, - distribution: vec![(1, Perbill::one())], - }, - Assignment { - who: 2, - distribution: vec![(2, Perbill::one())], - }, + Assignment { who: 13u64, distribution: vec![(1, Perbill::one())] }, + Assignment { who: 14, distribution: vec![(2, Perbill::one())] }, + Assignment { who: 1, distribution: vec![(1, Perbill::one())] }, + Assignment { who: 2, distribution: vec![(2, Perbill::one())] }, ] ); - check_assignments_sum(assignments); + check_assignments_sum(&assignments); } #[test] fn phragmen_accuracy_on_small_scale_self_vote() { let candidates = vec![40, 10, 20, 30]; let voters = auto_generate_self_voters(&candidates); - let stake_of = create_stake_of(&[ - (40, 0), - (10, 1), - (20, 2), - (30, 1), - ]); + let stake_of = create_stake_of(&[(40, 0), (10, 1), (20, 2), (30, 1)]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 3, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); - check_assignments_sum(assignments); + check_assignments_sum(&assignments); } #[test] fn phragmen_accuracy_on_small_scale_no_self_vote() { let candidates = vec![40, 10, 20, 30]; - let voters = vec![ - (1, vec![10]), - (2, vec![20]), - (3, vec![30]), - (4, vec![40]), - ]; + let voters = vec![(1, vec![10]), (2, vec![20]), (3, vec![30]), (4, vec![40])]; let stake_of = create_stake_of(&[ (40, 1000), // don't care (10, 1000), // don't care @@ -536,27 +469,28 @@ fn phragmen_accuracy_on_small_scale_no_self_vote() { let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 3, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); - check_assignments_sum(assignments); - + check_assignments_sum(&assignments); } #[test] fn phragmen_large_scale_test() { - let candidates = vec![2, 4, 6, 8, 10, 12, 14, 16 ,18, 20, 22, 24]; - let mut voters = vec![ - (50, vec![2, 4, 6, 8, 10, 12, 14, 16 ,18, 20, 22, 24]), - ]; + let candidates = vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]; + let mut voters = vec![(50, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24])]; voters.extend(auto_generate_self_voters(&candidates)); let stake_of = create_stake_of(&[ - (2, 1), - (4, 100), - (6, 1000000), - (8, 100000000001000), + (2, 1), + (4, 100), + (6, 1000000), + (8, 100000000001000), (10, 100000000002000), (12, 100000000003000), (14, 400000000000000), @@ -571,12 +505,16 @@ fn phragmen_large_scale_test() { let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); - assert_eq_uvec!(to_without_backing(winners.clone()), vec![24, 22]); - check_assignments_sum(assignments); + assert_eq_uvec!(winners.iter().map(|(x, _)| *x).collect::>(), vec![24, 22]); + check_assignments_sum(&assignments); } #[test] @@ -588,18 +526,19 @@ fn phragmen_large_scale_test_2() { let mut voters = vec![(50, vec![2, 4])]; voters.extend(auto_generate_self_voters(&candidates)); - let stake_of = create_stake_of(&[ - (2, c_budget.into()), - (4, c_budget.into()), - (50, nom_budget.into()), - ]); + let stake_of = + create_stake_of(&[(2, c_budget.into()), (4, c_budget.into()), (50, nom_budget.into())]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(2, 500000000005000000u128), (4, 500000000003000000)]); @@ -613,18 +552,12 @@ fn phragmen_large_scale_test_2() { (4, Perbill::from_parts(500000000)), ], }, - Assignment { - who: 2, - distribution: vec![(2, Perbill::one())], - }, - Assignment { - who: 4, - distribution: vec![(4, Perbill::one())], - }, + Assignment { who: 2, distribution: vec![(2, Perbill::one())] }, + Assignment { who: 4, distribution: vec![(4, Perbill::one())] }, ], ); - check_assignments_sum(assignments); + check_assignments_sum(&assignments); } #[test] @@ -647,7 +580,6 @@ fn phragmen_linear_equalize() { (51, 1000), (61, 1000), (71, 1000), - (2, 2000), (4, 1000), (6, 1000), @@ -657,64 +589,54 @@ fn phragmen_linear_equalize() { (130, 1000), ]); - run_and_compare::(candidates, voters, &stake_of, 2); + run_and_compare::(candidates, voters, &stake_of, 2); } #[test] fn elect_has_no_entry_barrier() { let candidates = vec![10, 20, 30]; - let voters = vec![ - (1, vec![10]), - (2, vec![20]), - ]; - let stake_of = create_stake_of(&[ - (1, 10), - (2, 10), - ]); + let voters = vec![(1, vec![10]), (2, vec![20])]; + let stake_of = create_stake_of(&[(1, 10), (2, 10)]); let ElectionResult { winners, assignments: _ } = seq_phragmen::<_, Perbill>( 3, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); // 30 is elected with stake 0. The caller is responsible for stripping this. - assert_eq_uvec!(winners, vec![ - (10, 10), - (20, 10), - (30, 0), - ]); + assert_eq_uvec!(winners, vec![(10, 10), (20, 10), (30, 0),]); } #[test] fn phragmen_self_votes_should_be_kept() { let candidates = vec![5, 10, 20, 30]; - let voters = vec![ - (5, vec![5]), - (10, vec![10]), - (20, vec![20]), - (1, vec![10, 20]) - ]; - let stake_of = create_stake_of(&[ - (5, 5), - (10, 10), - (20, 20), - (1, 8), - ]); + let voters = vec![(5, vec![5]), (10, vec![10]), (20, vec![20]), (1, vec![10, 20])]; + let stake_of = create_stake_of(&[(5, 5), (10, 10), (20, 20), (1, 8)]); let result = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq!(result.winners, vec![(20, 24), (10, 14)]); assert_eq_uvec!( result.assignments, vec![ - Assignment { who: 1, distribution: vec![ + Assignment { + who: 1, + distribution: vec![ (10, Perbill::from_percent(50)), (20, Perbill::from_percent(50)), ] @@ -725,8 +647,7 @@ fn phragmen_self_votes_should_be_kept() { ); let staked_assignments = assignment_ratio_to_staked(result.assignments, &stake_of); - let winners = to_without_backing(result.winners); - let supports = build_support_map::(&winners, &staked_assignments).unwrap(); + let supports = to_support_map::(&staked_assignments); assert_eq!(supports.get(&5u64), None); assert_eq!( @@ -742,60 +663,36 @@ fn phragmen_self_votes_should_be_kept() { #[test] fn duplicate_target_is_ignored() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 100, vec![1, 1, 2, 3]), - (20, 100, vec![2, 3]), - (30, 50, vec![1, 1, 2]), - ]; + let voters = vec![(10, 100, vec![1, 1, 2, 3]), (20, 100, vec![2, 3]), (30, 50, vec![1, 1, 2])]; - let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( - 2, - candidates, - voters, - None, - ).unwrap(); - let winners = to_without_backing(winners); + let ElectionResult { winners, assignments } = + seq_phragmen::<_, Perbill>(2, candidates, voters, None).unwrap(); - assert_eq!(winners, vec![(2), (3)]); + assert_eq!(winners, vec![(2, 140), (3, 110)]); assert_eq!( assignments .into_iter() .map(|x| (x.who, x.distribution.into_iter().map(|(w, _)| w).collect::>())) .collect::>(), - vec![ - (10, vec![2, 3]), - (20, vec![2, 3]), - (30, vec![2]), - ], + vec![(10, vec![2, 3]), (20, vec![2, 3]), (30, vec![2]),], ); } #[test] fn duplicate_target_is_ignored_when_winner() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 100, vec![1, 1, 2, 3]), - (20, 100, vec![1, 2]), - ]; + let voters = vec![(10, 100, vec![1, 1, 2, 3]), (20, 100, vec![1, 2])]; - let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( - 2, - candidates, - voters, - None, - ).unwrap(); - let winners = to_without_backing(winners); + let ElectionResult { winners, assignments } = + seq_phragmen::<_, Perbill>(2, candidates, voters, None).unwrap(); - assert_eq!(winners, vec![1, 2]); + assert_eq!(winners, vec![(1, 100), (2, 100)]); assert_eq!( assignments .into_iter() .map(|x| (x.who, x.distribution.into_iter().map(|(w, _)| w).collect::>())) .collect::>(), - vec![ - (10, vec![1, 2]), - (20, vec![1, 2]), - ], + vec![(10, vec![1, 2]), (20, vec![1, 2]),], ); } @@ -805,10 +702,7 @@ mod assignment_convert_normalize { fn assignment_convert_works() { let staked = StakedAssignment { who: 1 as AccountId, - distribution: vec![ - (20, 100 as ExtendedBalance), - (30, 25), - ], + distribution: vec![(20, 100 as ExtendedBalance), (30, 25)], }; let assignment = staked.clone().into_assignment(); @@ -823,10 +717,7 @@ mod assignment_convert_normalize { } ); - assert_eq!( - assignment.into_staked(125), - staked, - ); + assert_eq!(assignment.into_staked(125), staked); } #[test] @@ -834,11 +725,9 @@ mod assignment_convert_normalize { assert_eq!( Assignment { who: 1, - distribution: vec![ - (2, Perbill::from_percent(33)), - (3, Perbill::from_percent(66)), - ] - }.into_staked(100), + distribution: vec![(2, Perbill::from_percent(33)), (3, Perbill::from_percent(66)),] + } + .into_staked(100), StakedAssignment { who: 1, distribution: vec![ @@ -857,7 +746,8 @@ mod assignment_convert_normalize { (3, 333_333_333_333_333), (4, 666_666_666_666_333), ], - }.into_assignment(), + } + .into_assignment(), Assignment { who: 1, distribution: vec![ @@ -878,7 +768,7 @@ mod assignment_convert_normalize { (2, Perbill::from_parts(330000000)), (3, Perbill::from_parts(660000000)), // sum is not 100%! - ] + ], }; a.try_normalize().unwrap(); assert_eq!( @@ -895,24 +785,9 @@ mod assignment_convert_normalize { #[test] fn staked_assignment_can_normalize() { - let mut a = StakedAssignment { - who: 1, - distribution: vec![ - (2, 33), - (3, 66), - ] - }; + let mut a = StakedAssignment { who: 1, distribution: vec![(2, 33), (3, 66)] }; a.try_normalize(100).unwrap(); - assert_eq!( - a, - StakedAssignment { - who: 1, - distribution: vec![ - (2, 34), - (3, 66), - ] - }, - ); + assert_eq!(a, StakedAssignment { who: 1, distribution: vec![(2, 34), (3, 66),] }); } } @@ -922,28 +797,16 @@ mod score { fn score_comparison_is_lexicographical_no_epsilon() { let epsilon = Perbill::zero(); // only better in the fist parameter, worse in the other two ✅ - assert_eq!( - is_score_better([12, 10, 35], [10, 20, 30], epsilon), - true, - ); + assert_eq!(is_score_better([12, 10, 35], [10, 20, 30], epsilon), true); // worse in the first, better in the other two ❌ - assert_eq!( - is_score_better([9, 30, 10], [10, 20, 30], epsilon), - false, - ); + assert_eq!(is_score_better([9, 30, 10], [10, 20, 30], epsilon), false); // equal in the first, the second one dictates. - assert_eq!( - is_score_better([10, 25, 40], [10, 20, 30], epsilon), - true, - ); + assert_eq!(is_score_better([10, 25, 40], [10, 20, 30], epsilon), true); // equal in the first two, the last one dictates. - assert_eq!( - is_score_better([10, 20, 40], [10, 20, 30], epsilon), - false, - ); + assert_eq!(is_score_better([10, 20, 40], [10, 20, 30], epsilon), false); } #[test] @@ -952,178 +815,127 @@ mod score { { // no more than 1 percent (10) better in the first param. - assert_eq!( - is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon), false); // now equal, still not better. - assert_eq!( - is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon), false); // now it is. - assert_eq!( - is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon), - true, - ); + assert_eq!(is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon), true); } { // First score score is epsilon better, but first score is no longer `ge`. Then this is // still not a good solution. - assert_eq!( - is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon), false); } { // first score is equal or better, but not epsilon. Then second one is the determinant. - assert_eq!( - is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon), false); - assert_eq!( - is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon), false); - assert_eq!( - is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon), - true, - ); + assert_eq!(is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon), true); } { // first score and second are equal or less than epsilon more, third is determinant. - assert_eq!( - is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon), false); - assert_eq!( - is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon), false); - assert_eq!( - is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon), - true, - ); + assert_eq!(is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon), true); } } #[test] fn score_comparison_large_value() { // some random value taken from eras in kusama. - let initial = [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446]; + let initial = + [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446]; // this claim is 0.04090% better in the third component. It should be accepted as better if // epsilon is smaller than 5/10_0000 - let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; + let claim = + [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(1u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(1u32, 10_000),), true, ); assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(2u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(2u32, 10_000),), true, ); assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(3u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(3u32, 10_000),), true, ); assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(4u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(4u32, 10_000),), true, ); assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(5u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(5u32, 10_000),), false, ); } } mod solution_type { + use super::*; use codec::{Decode, Encode}; - use super::AccountId; // these need to come from the same dev-dependency `sp-npos-elections`, not from the crate. - use crate::{ - generate_solution_type, Assignment, - Error as PhragmenError, - }; + use crate::{generate_solution_type, Assignment, Error as NposError, NposSolution}; use sp_std::{convert::TryInto, fmt::Debug}; - use sp_arithmetic::Percent; - - type TestAccuracy = Percent; - - generate_solution_type!(pub struct TestSolutionCompact::(16)); #[allow(dead_code)] mod __private { - // This is just to make sure that that the compact can be generated in a scope without any + // This is just to make sure that the solution can be generated in a scope without any // imports. use crate::generate_solution_type; - use sp_arithmetic::Percent; generate_solution_type!( #[compact] - struct InnerTestSolutionCompact::(12) + struct InnerTestSolutionIsolated::(12) ); - } #[test] fn solution_struct_works_with_and_without_compact() { // we use u32 size to make sure compact is smaller. let without_compact = { - generate_solution_type!(pub struct InnerTestSolution::(16)); - let compact = InnerTestSolution { + generate_solution_type!( + pub struct InnerTestSolution::< + VoterIndex = u32, + TargetIndex = u32, + Accuracy = TestAccuracy, + >(16) + ); + let solution = InnerTestSolution { votes1: vec![(2, 20), (4, 40)], - votes2: vec![ - (1, (10, TestAccuracy::from_percent(80)), 11), - (5, (50, TestAccuracy::from_percent(85)), 51), - ], + votes2: vec![(1, [(10, p(80))], 11), (5, [(50, p(85))], 51)], ..Default::default() }; - compact.encode().len() + solution.encode().len() }; let with_compact = { - generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::(16)); + generate_solution_type!( + #[compact] + pub struct InnerTestSolutionCompact::< + VoterIndex = u32, + TargetIndex = u32, + Accuracy = TestAccuracy, + >(16) + ); let compact = InnerTestSolutionCompact { votes1: vec![(2, 20), (4, 40)], - votes2: vec![ - (1, (10, TestAccuracy::from_percent(80)), 11), - (5, (50, TestAccuracy::from_percent(85)), 51), - ], + votes2: vec![(1, [(10, p(80))], 11), (5, [(50, p(85))], 51)], ..Default::default() }; @@ -1135,98 +947,65 @@ mod solution_type { #[test] fn solution_struct_is_codec() { - let compact = TestSolutionCompact { + let solution = TestSolution { votes1: vec![(2, 20), (4, 40)], - votes2: vec![ - (1, (10, TestAccuracy::from_percent(80)), 11), - (5, (50, TestAccuracy::from_percent(85)), 51), - ], + votes2: vec![(1, [(10, p(80))], 11), (5, [(50, p(85))], 51)], ..Default::default() }; - let encoded = compact.encode(); + let encoded = solution.encode(); - assert_eq!( - compact, - Decode::decode(&mut &encoded[..]).unwrap(), - ); - assert_eq!(compact.len(), 4); - assert_eq!(compact.edge_count(), 2 + 4); - assert_eq!(compact.unique_targets(), vec![10, 11, 20, 40, 50, 51]); + assert_eq!(solution, Decode::decode(&mut &encoded[..]).unwrap()); + assert_eq!(solution.voter_count(), 4); + assert_eq!(solution.edge_count(), 2 + 4); + assert_eq!(solution.unique_targets(), vec![10, 11, 20, 40, 50, 51]); } #[test] fn remove_voter_works() { - let mut compact = TestSolutionCompact { + let mut solution = TestSolution { votes1: vec![(0, 2), (1, 6)], - votes2: vec![ - (2, (0, TestAccuracy::from_percent(80)), 1), - (3, (7, TestAccuracy::from_percent(85)), 8), - ], - votes3: vec![ - ( - 4, - [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], - 5, - ), - ], + votes2: vec![(2, [(0, p(80))], 1), (3, [(7, p(85))], 8)], + votes3: vec![(4, [(3, p(50)), (4, p(25))], 5)], ..Default::default() }; - assert!(!compact.remove_voter(11)); - assert!(compact.remove_voter(2)); + assert!(!solution.remove_voter(11)); + assert!(solution.remove_voter(2)); assert_eq!( - compact, - TestSolutionCompact { + solution, + TestSolution { votes1: vec![(0, 2), (1, 6)], - votes2: vec![ - (3, (7, TestAccuracy::from_percent(85)), 8), - ], - votes3: vec![ - ( - 4, - [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], - 5, - ), - ], + votes2: vec![(3, [(7, p(85))], 8)], + votes3: vec![(4, [(3, p(50)), (4, p(25))], 5,)], ..Default::default() }, ); - assert!(compact.remove_voter(4)); + assert!(solution.remove_voter(4)); assert_eq!( - compact, - TestSolutionCompact { + solution, + TestSolution { votes1: vec![(0, 2), (1, 6)], - votes2: vec![ - (3, (7, TestAccuracy::from_percent(85)), 8), - ], + votes2: vec![(3, [(7, p(85))], 8)], ..Default::default() }, ); - assert!(compact.remove_voter(1)); + assert!(solution.remove_voter(1)); assert_eq!( - compact, - TestSolutionCompact { + solution, + TestSolution { votes1: vec![(0, 2)], - votes2: vec![ - (3, (7, TestAccuracy::from_percent(85)), 8), - ], + votes2: vec![(3, [(7, p(85))], 8),], ..Default::default() }, ); } #[test] - fn basic_from_and_into_compact_works_assignments() { - let voters = vec![ - 2 as AccountId, - 4, - 1, - 5, - 3, - ]; + fn from_and_into_assignment_works() { + let voters = vec![2 as AccountId, 4, 1, 5, 3]; let targets = vec![ 10 as AccountId, 11, @@ -1240,211 +1019,144 @@ mod solution_type { ]; let assignments = vec![ - Assignment { - who: 2 as AccountId, - distribution: vec![(20u64, TestAccuracy::from_percent(100))] - }, - Assignment { - who: 4, - distribution: vec![(40, TestAccuracy::from_percent(100))], - }, - Assignment { - who: 1, - distribution: vec![ - (10, TestAccuracy::from_percent(80)), - (11, TestAccuracy::from_percent(20)) - ], - }, - Assignment { - who: 5, - distribution: vec![ - (50, TestAccuracy::from_percent(85)), - (51, TestAccuracy::from_percent(15)), - ] - }, - Assignment { - who: 3, - distribution: vec![ - (30, TestAccuracy::from_percent(50)), - (31, TestAccuracy::from_percent(25)), - (32, TestAccuracy::from_percent(25)), - ], - }, + Assignment { who: 2 as AccountId, distribution: vec![(20u64, p(100))] }, + Assignment { who: 4, distribution: vec![(40, p(100))] }, + Assignment { who: 1, distribution: vec![(10, p(80)), (11, p(20))] }, + Assignment { who: 5, distribution: vec![(50, p(85)), (51, p(15))] }, + Assignment { who: 3, distribution: vec![(30, p(50)), (31, p(25)), (32, p(25))] }, ]; let voter_index = |a: &AccountId| -> Option { voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let target_index = |a: &AccountId| -> Option { + let target_index = |a: &AccountId| -> Option { targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let compacted = TestSolutionCompact::from_assignment( - assignments.clone(), - voter_index, - target_index, - ).unwrap(); + let solution = + TestSolution::from_assignment(&assignments, voter_index, target_index).unwrap(); // basically number of assignments that it is encoding. - assert_eq!(compacted.len(), assignments.len()); + assert_eq!(solution.voter_count(), assignments.len()); assert_eq!( - compacted.edge_count(), + solution.edge_count(), assignments.iter().fold(0, |a, b| a + b.distribution.len()), ); assert_eq!( - compacted, - TestSolutionCompact { + solution, + TestSolution { votes1: vec![(0, 2), (1, 6)], - votes2: vec![ - (2, (0, TestAccuracy::from_percent(80)), 1), - (3, (7, TestAccuracy::from_percent(85)), 8), - ], - votes3: vec![ - ( - 4, - [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], - 5, - ), - ], + votes2: vec![(2, [(0, p(80))], 1), (3, [(7, p(85))], 8)], + votes3: vec![(4, [(3, p(50)), (4, p(25))], 5)], ..Default::default() } ); - assert_eq!( - compacted.unique_targets(), - vec![0, 1, 2, 3, 4, 5, 6, 7, 8], - ); + assert_eq!(solution.unique_targets(), vec![0, 1, 2, 3, 4, 5, 6, 7, 8]); let voter_at = |a: u32| -> Option { voters.get(>::try_into(a).unwrap()).cloned() }; - let target_at = |a: u8| -> Option { - targets.get(>::try_into(a).unwrap()).cloned() + let target_at = |a: u16| -> Option { + targets.get(>::try_into(a).unwrap()).cloned() }; - assert_eq!( - compacted.into_assignment(voter_at, target_at).unwrap(), - assignments, - ); + assert_eq!(solution.into_assignment(voter_at, target_at).unwrap(), assignments); } #[test] fn unique_targets_len_edge_count_works() { - const ACC: TestAccuracy = TestAccuracy::from_percent(10); - // we don't really care about voters here so all duplicates. This is not invalid per se. - let compact = TestSolutionCompact { + let solution = TestSolution { votes1: vec![(99, 1), (99, 2)], - votes2: vec![ - (99, (3, ACC.clone()), 7), - (99, (4, ACC.clone()), 8), - ], - votes3: vec![ - (99, [(11, ACC.clone()), (12, ACC.clone())], 13), - ], + votes2: vec![(99, [(3, p(10))], 7), (99, [(4, p(10))], 8)], + votes3: vec![(99, [(11, p(10)), (12, p(10))], 13)], // ensure the last one is also counted. - votes16: vec![ - ( - 99, - [ - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - ], - 67, - ) - ], + votes16: vec![( + 99, + [ + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + (66, p(10)), + ], + 67, + )], ..Default::default() }; - assert_eq!(compact.unique_targets(), vec![1, 2, 3, 4, 7, 8, 11, 12, 13, 66, 67]); - assert_eq!(compact.edge_count(), 2 + (2 * 2) + 3 + 16); - assert_eq!(compact.len(), 6); + assert_eq!(solution.unique_targets(), vec![1, 2, 3, 4, 7, 8, 11, 12, 13, 66, 67]); + assert_eq!(solution.edge_count(), 2 + (2 * 2) + 3 + 16); + assert_eq!(solution.voter_count(), 6); // this one has some duplicates. - let compact = TestSolutionCompact { + let solution = TestSolution { votes1: vec![(99, 1), (99, 1)], - votes2: vec![ - (99, (3, ACC.clone()), 7), - (99, (4, ACC.clone()), 8), - ], - votes3: vec![ - (99, [(11, ACC.clone()), (11, ACC.clone())], 13), - ], + votes2: vec![(99, [(3, p(10))], 7), (99, [(4, p(10))], 8)], + votes3: vec![(99, [(11, p(10)), (11, p(10))], 13)], ..Default::default() }; - assert_eq!(compact.unique_targets(), vec![1, 3, 4, 7, 8, 11, 13]); - assert_eq!(compact.edge_count(), 2 + (2 * 2) + 3); - assert_eq!(compact.len(), 5); + assert_eq!(solution.unique_targets(), vec![1, 3, 4, 7, 8, 11, 13]); + assert_eq!(solution.edge_count(), 2 + (2 * 2) + 3); + assert_eq!(solution.voter_count(), 5); } #[test] - fn compact_into_assignment_must_report_overflow() { + fn solution_into_assignment_must_report_overflow() { // in votes2 - let compact = TestSolutionCompact { + let solution = TestSolution { votes1: Default::default(), - votes2: vec![(0, (1, TestAccuracy::from_percent(100)), 2)], + votes2: vec![(0, [(1, p(100))], 2)], ..Default::default() }; let voter_at = |a: u32| -> Option { Some(a as AccountId) }; - let target_at = |a: u8| -> Option { Some(a as AccountId) }; - + let target_at = |a: u16| -> Option { Some(a as AccountId) }; assert_eq!( - compact.into_assignment(&voter_at, &target_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, + solution.into_assignment(&voter_at, &target_at).unwrap_err(), + NposError::SolutionWeightOverflow, ); // in votes3 onwards - let compact = TestSolutionCompact { + let solution = TestSolution { votes1: Default::default(), votes2: Default::default(), - votes3: vec![(0, [(1, TestAccuracy::from_percent(70)), (2, TestAccuracy::from_percent(80))], 3)], + votes3: vec![(0, [(1, p(70)), (2, p(80))], 3)], ..Default::default() }; assert_eq!( - compact.into_assignment(&voter_at, &target_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, + solution.into_assignment(&voter_at, &target_at).unwrap_err(), + NposError::SolutionWeightOverflow, ); } #[test] fn target_count_overflow_is_detected() { let voter_index = |a: &AccountId| -> Option { Some(*a as u32) }; - let target_index = |a: &AccountId| -> Option { Some(*a as u8) }; + let target_index = |a: &AccountId| -> Option { Some(*a as u16) }; - let assignments = vec![ - Assignment { - who: 1 as AccountId, - distribution: - (10..27) - .map(|i| (i as AccountId, Percent::from_parts(i as u8))) - .collect::>(), - }, - ]; + let assignments = vec![Assignment { + who: 1 as AccountId, + distribution: (10..27).map(|i| (i as AccountId, p(i as u8))).collect::>(), + }]; - let compacted = TestSolutionCompact::from_assignment( - assignments.clone(), - voter_index, - target_index, - ); - assert_eq!(compacted.unwrap_err(), PhragmenError::CompactTargetOverflow); + let solution = TestSolution::from_assignment(&assignments, voter_index, target_index); + assert_eq!(solution.unwrap_err(), NposError::SolutionTargetOverflow); } #[test] @@ -1453,36 +1165,49 @@ mod solution_type { let targets = vec![10 as AccountId, 11]; let assignments = vec![ - Assignment { - who: 1 as AccountId, - distribution: vec![(10, Percent::from_percent(50)), (11, Percent::from_percent(50))], - }, - Assignment { - who: 2, - distribution: vec![], - }, + Assignment { who: 1 as AccountId, distribution: vec![(10, p(50)), (11, p(50))] }, + Assignment { who: 2, distribution: vec![] }, ]; let voter_index = |a: &AccountId| -> Option { voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let target_index = |a: &AccountId| -> Option { + let target_index = |a: &AccountId| -> Option { targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let compacted = TestSolutionCompact::from_assignment( - assignments.clone(), - voter_index, - target_index, - ).unwrap(); + let solution = + TestSolution::from_assignment(&assignments, voter_index, target_index).unwrap(); assert_eq!( - compacted, - TestSolutionCompact { + solution, + TestSolution { votes1: Default::default(), - votes2: vec![(0, (0, Percent::from_percent(50)), 1)], + votes2: vec![(0, [(0, p(50))], 1)], ..Default::default() } ); } } + +#[test] +fn index_assignments_generate_same_solution_as_plain_assignments() { + let rng = rand::rngs::SmallRng::seed_from_u64(0); + + let (voters, assignments, candidates) = generate_random_votes(1000, 2500, rng); + let voter_index = make_voter_fn(&voters); + let target_index = make_target_fn(&candidates); + + let solution = + TestSolution::from_assignment(&assignments, &voter_index, &target_index).unwrap(); + + let index_assignments = assignments + .into_iter() + .map(|assignment| IndexAssignment::new(&assignment, &voter_index, &target_index)) + .collect::, _>>() + .unwrap(); + + let index_compact = index_assignments.as_slice().try_into().unwrap(); + + assert_eq!(solution, index_compact); +} diff --git a/primitives/npos-elections/src/traits.rs b/primitives/npos-elections/src/traits.rs new file mode 100644 index 0000000000000..45b6fa368ae2a --- /dev/null +++ b/primitives/npos-elections/src/traits.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for the npos-election operations. + +use crate::{ + Assignment, ElectionScore, Error, EvaluateSupport, ExtendedBalance, IndexAssignmentOf, + VoteWeight, +}; +use codec::Encode; +use sp_arithmetic::{ + traits::{Bounded, UniqueSaturatedInto}, + PerThing, +}; +use sp_std::{ + convert::{TryFrom, TryInto}, + fmt::Debug, + ops::Mul, + prelude::*, +}; + +/// an aggregator trait for a generic type of a voter/target identifier. This usually maps to +/// substrate's account id. +pub trait IdentifierT: Clone + Eq + Default + Ord + Debug + codec::Codec {} +impl IdentifierT for T {} + +/// Aggregator trait for a PerThing that can be multiplied by u128 (ExtendedBalance). +pub trait PerThing128: PerThing + Mul {} +impl> PerThing128 for T {} + +/// Simple Extension trait to easily convert `None` from index closures to `Err`. +/// +/// This is only generated and re-exported for the solution code to use. +#[doc(hidden)] +pub trait __OrInvalidIndex { + fn or_invalid_index(self) -> Result; +} + +impl __OrInvalidIndex for Option { + fn or_invalid_index(self) -> Result { + self.ok_or(Error::SolutionInvalidIndex) + } +} + +/// An opaque index-based, NPoS solution type. +pub trait NposSolution +where + Self: Sized + for<'a> sp_std::convert::TryFrom<&'a [IndexAssignmentOf], Error = Error>, +{ + /// The maximum number of votes that are allowed. + const LIMIT: usize; + + /// The voter type. Needs to be an index (convert to usize). + type VoterIndex: UniqueSaturatedInto + + TryInto + + TryFrom + + Debug + + Copy + + Clone + + Bounded + + Encode; + + /// The target type. Needs to be an index (convert to usize). + type TargetIndex: UniqueSaturatedInto + + TryInto + + TryFrom + + Debug + + Copy + + Clone + + Bounded + + Encode; + + /// The weight/accuracy type of each vote. + type Accuracy: PerThing128; + + /// Get the length of all the voters that this type is encoding. + /// + /// This is basically the same as the number of assignments, or number of active voters. + fn voter_count(&self) -> usize; + + /// Get the total count of edges. + /// + /// This is effectively in the range of {[`Self::voter_count`], [`Self::voter_count`] * + /// [`Self::LIMIT`]}. + fn edge_count(&self) -> usize; + + /// Get the number of unique targets in the whole struct. + /// + /// Once presented with a list of winners, this set and the set of winners must be + /// equal. + fn unique_targets(&self) -> Vec; + + /// Get the average edge count. + fn average_edge_count(&self) -> usize { + self.edge_count().checked_div(self.voter_count()).unwrap_or(0) + } + + /// Compute the score of this solution type. + fn score( + self, + stake_of: FS, + voter_at: impl Fn(Self::VoterIndex) -> Option, + target_at: impl Fn(Self::TargetIndex) -> Option, + ) -> Result + where + for<'r> FS: Fn(&'r A) -> VoteWeight, + A: IdentifierT, + { + let ratio = self.into_assignment(voter_at, target_at)?; + let staked = crate::helpers::assignment_ratio_to_staked_normalized(ratio, stake_of)?; + let supports = crate::to_supports(&staked); + Ok(supports.evaluate()) + } + + /// Remove a certain voter. + /// + /// This will only search until the first instance of `to_remove`, and return true. If + /// no instance is found (no-op), then it returns false. + /// + /// In other words, if this return true, exactly **one** element must have been removed self. + fn remove_voter(&mut self, to_remove: Self::VoterIndex) -> bool; + + /// Build self from a list of assignments. + fn from_assignment( + assignments: &[Assignment], + voter_index: FV, + target_index: FT, + ) -> Result + where + A: IdentifierT, + for<'r> FV: Fn(&'r A) -> Option, + for<'r> FT: Fn(&'r A) -> Option; + + /// Convert self into a `Vec>` + fn into_assignment( + self, + voter_at: impl Fn(Self::VoterIndex) -> Option, + target_at: impl Fn(Self::TargetIndex) -> Option, + ) -> Result>, Error>; +} diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index 02041d5c678ef..dd54147b6c629 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers primitives" name = "sp-offchain" -version = "2.0.0" +version = "4.0.0-dev" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -13,17 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } - -[dev-dependencies] -sp-state-machine = { version = "0.8.0", default-features = false, path = "../state-machine" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } [features] default = ["std"] -std = [ - "sp-core/std", - "sp-api/std", - "sp-runtime/std" -] +std = ["sp-core/std", "sp-api/std", "sp-runtime/std"] diff --git a/primitives/offchain/src/lib.rs b/primitives/offchain/src/lib.rs index fa5ab808df8a1..72ceca80cfbf8 100644 --- a/primitives/offchain/src/lib.rs +++ b/primitives/offchain/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,19 +21,17 @@ #![warn(missing_docs)] /// Re-export of parent module scope storage prefix. -pub use sp_core::offchain::STORAGE_PREFIX as STORAGE_PREFIX; +pub use sp_core::offchain::STORAGE_PREFIX; sp_api::decl_runtime_apis! { /// The offchain worker api. #[api_version(2)] pub trait OffchainWorkerApi { /// Starts the off-chain task for given block number. - #[skip_initialize_block] #[changed_in(2)] fn offchain_worker(number: sp_runtime::traits::NumberFor); /// Starts the off-chain task for given block header. - #[skip_initialize_block] fn offchain_worker(header: &Block::Header); } } diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index acf454b960a7c..ad03baca24ebb 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-panic-handler" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,4 +15,3 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] backtrace = "0.3.38" -log = "0.4.8" diff --git a/primitives/panic-handler/src/lib.rs b/primitives/panic-handler/src/lib.rs index 2ac30dd636914..75b057cebf3e4 100644 --- a/primitives/panic-handler/src/lib.rs +++ b/primitives/panic-handler/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,11 +25,13 @@ //! temporarily be disabled by using an [`AbortGuard`]. use backtrace::Backtrace; -use std::io::{self, Write}; -use std::marker::PhantomData; -use std::panic::{self, PanicInfo}; -use std::cell::Cell; -use std::thread; +use std::{ + cell::Cell, + io::{self, Write}, + marker::PhantomData, + panic::{self, PanicInfo}, + thread, +}; thread_local! { static ON_PANIC: Cell = Cell::new(OnPanic::Abort); @@ -56,18 +58,19 @@ pub fn set(bug_url: &str, version: &str) { panic::set_hook(Box::new({ let version = version.to_string(); let bug_url = bug_url.to_string(); - move |c| { - panic_hook(c, &bug_url, &version) - } + move |c| panic_hook(c, &bug_url, &version) })); } macro_rules! ABOUT_PANIC { - () => (" + () => { + " This is a bug. Please report it at: {} -")} +" + }; +} /// Set aborting flag. Returns previous value of the flag. fn set_abort(on_panic: OnPanic) -> OnPanic { @@ -92,35 +95,27 @@ pub struct AbortGuard { /// Value that was in `ABORT` before we created this guard. previous_val: OnPanic, /// Marker so that `AbortGuard` doesn't implement `Send`. - _not_send: PhantomData> + _not_send: PhantomData>, } impl AbortGuard { /// Create a new guard. While the guard is alive, panics that happen in the current thread will /// unwind the stack (unless another guard is created afterwards). pub fn force_unwind() -> AbortGuard { - AbortGuard { - previous_val: set_abort(OnPanic::Unwind), - _not_send: PhantomData - } + AbortGuard { previous_val: set_abort(OnPanic::Unwind), _not_send: PhantomData } } /// Create a new guard. While the guard is alive, panics that happen in the current thread will /// abort the process (unless another guard is created afterwards). pub fn force_abort() -> AbortGuard { - AbortGuard { - previous_val: set_abort(OnPanic::Abort), - _not_send: PhantomData - } + AbortGuard { previous_val: set_abort(OnPanic::Abort), _not_send: PhantomData } } /// Create a new guard. While the guard is alive, panics that happen in the current thread will - /// **never** abort the process (even if `AbortGuard::force_abort()` guard will be created afterwards). + /// **never** abort the process (even if `AbortGuard::force_abort()` guard will be created + /// afterwards). pub fn never_abort() -> AbortGuard { - AbortGuard { - previous_val: set_abort(OnPanic::NeverAbort), - _not_send: PhantomData - } + AbortGuard { previous_val: set_abort(OnPanic::NeverAbort), _not_send: PhantomData } } } @@ -141,7 +136,7 @@ fn panic_hook(info: &PanicInfo, report_url: &str, version: &str) { None => match info.payload().downcast_ref::() { Some(s) => &s[..], None => "Box", - } + }, }; let thread = thread::current(); @@ -158,11 +153,7 @@ fn panic_hook(info: &PanicInfo, report_url: &str, version: &str) { let _ = writeln!(stderr, ""); let _ = writeln!(stderr, "{:?}", backtrace); let _ = writeln!(stderr, ""); - let _ = writeln!( - stderr, - "Thread '{}' panicked at '{}', {}:{}", - name, msg, file, line - ); + let _ = writeln!(stderr, "Thread '{}' panicked at '{}', {}:{}", name, msg, file, line); let _ = writeln!(stderr, ABOUT_PANIC!(), report_url); ON_PANIC.with(|val| { diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index 0c9fe8ebd6667..8e1b91a9acb21 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-rpc" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,8 +13,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", features = ["derive"] } -sp-core = { version = "2.0.0", path = "../core" } +serde = { version = "1.0.126", features = ["derive"] } +sp-core = { version = "4.0.0-dev", path = "../core" } +rustc-hash = "1.1.0" [dev-dependencies] -serde_json = "1.0.41" +serde_json = "1.0.68" diff --git a/primitives/rpc/src/lib.rs b/primitives/rpc/src/lib.rs index c479f0df8b60e..0d716d5a07c18 100644 --- a/primitives/rpc/src/lib.rs +++ b/primitives/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,21 +19,16 @@ #![warn(missing_docs)] -pub mod number; pub mod list; +pub mod number; +pub mod tracing; /// A util function to assert the result of serialization and deserialization is the same. #[cfg(test)] -pub(crate) fn assert_deser(s: &str, expected: T) where - T: std::fmt::Debug + serde::ser::Serialize + serde::de::DeserializeOwned + PartialEq +pub(crate) fn assert_deser(s: &str, expected: T) +where + T: std::fmt::Debug + serde::ser::Serialize + serde::de::DeserializeOwned + PartialEq, { - assert_eq!( - serde_json::from_str::(s).unwrap(), - expected - ); - assert_eq!( - serde_json::to_string(&expected).unwrap(), - s - ); + assert_eq!(serde_json::from_str::(s).unwrap(), expected); + assert_eq!(serde_json::to_string(&expected).unwrap(), s); } - diff --git a/primitives/rpc/src/list.rs b/primitives/rpc/src/list.rs index a80d5a22272c8..b3d0a4f546e94 100644 --- a/primitives/rpc/src/list.rs +++ b/primitives/rpc/src/list.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! RPC a lenient list or value type. -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// RPC list or value wrapper. /// diff --git a/primitives/rpc/src/number.rs b/primitives/rpc/src/number.rs index 3d7e74753526c..916f2c3d83266 100644 --- a/primitives/rpc/src/number.rs +++ b/primitives/rpc/src/number.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,12 @@ //! A number type that can be serialized both as a number or a string that encodes a number in a //! string. -use std::{convert::TryFrom, fmt::Debug}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use sp_core::U256; +use std::{ + convert::{TryFrom, TryInto}, + fmt::Debug, +}; /// A number type that can be serialized both as a number or a string that encodes a number in a /// string. @@ -39,6 +42,12 @@ pub enum NumberOrHex { Hex(U256), } +impl Default for NumberOrHex { + fn default() -> Self { + Self::Number(Default::default()) + } +} + impl NumberOrHex { /// Converts this number into an U256. pub fn into_u256(self) -> U256 { @@ -49,12 +58,24 @@ impl NumberOrHex { } } +impl From for NumberOrHex { + fn from(n: u32) -> Self { + NumberOrHex::Number(n.into()) + } +} + impl From for NumberOrHex { fn from(n: u64) -> Self { NumberOrHex::Number(n) } } +impl From for NumberOrHex { + fn from(n: u128) -> Self { + NumberOrHex::Hex(n.into()) + } +} + impl From for NumberOrHex { fn from(n: U256) -> Self { NumberOrHex::Hex(n) @@ -66,25 +87,28 @@ pub struct TryFromIntError(pub(crate) ()); impl TryFrom for u32 { type Error = TryFromIntError; - fn try_from(num_or_hex: NumberOrHex) -> Result { - let num_or_hex = num_or_hex.into_u256(); - if num_or_hex > U256::from(u32::max_value()) { - return Err(TryFromIntError(())); - } else { - Ok(num_or_hex.as_u32()) - } + fn try_from(num_or_hex: NumberOrHex) -> Result { + num_or_hex.into_u256().try_into().map_err(|_| TryFromIntError(())) } } impl TryFrom for u64 { type Error = TryFromIntError; - fn try_from(num_or_hex: NumberOrHex) -> Result { - let num_or_hex = num_or_hex.into_u256(); - if num_or_hex > U256::from(u64::max_value()) { - return Err(TryFromIntError(())); - } else { - Ok(num_or_hex.as_u64()) - } + fn try_from(num_or_hex: NumberOrHex) -> Result { + num_or_hex.into_u256().try_into().map_err(|_| TryFromIntError(())) + } +} + +impl TryFrom for u128 { + type Error = TryFromIntError; + fn try_from(num_or_hex: NumberOrHex) -> Result { + num_or_hex.into_u256().try_into().map_err(|_| TryFromIntError(())) + } +} + +impl From for U256 { + fn from(num_or_hex: NumberOrHex) -> U256 { + num_or_hex.into_u256() } } diff --git a/primitives/rpc/src/tracing.rs b/primitives/rpc/src/tracing.rs new file mode 100644 index 0000000000000..737ace241037c --- /dev/null +++ b/primitives/rpc/src/tracing.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types for working with tracing data + +use serde::{Deserialize, Serialize}; + +use rustc_hash::FxHashMap; + +/// Container for all related spans and events for the block being traced. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct BlockTrace { + /// Hash of the block being traced + pub block_hash: String, + /// Parent hash + pub parent_hash: String, + /// Module targets that were recorded by the tracing subscriber. + /// Empty string means record all targets. + pub tracing_targets: String, + /// Storage key targets used to filter out events that do not have one of the storage keys. + /// Empty string means do not filter out any events. + pub storage_keys: String, + /// Method targets used to filter out events that do not have one of the event method. + /// Empty string means do not filter out any events. + pub methods: String, + /// Vec of tracing spans + pub spans: Vec, + /// Vec of tracing events + pub events: Vec, +} + +/// Represents a tracing event, complete with recorded data. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct Event { + /// Event target + pub target: String, + /// Associated data + pub data: Data, + /// Parent id, if it exists + pub parent_id: Option, +} + +/// Represents a single instance of a tracing span. +/// +/// Exiting a span does not imply that the span will not be re-entered. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct Span { + /// id for this span + pub id: u64, + /// id of the parent span, if any + pub parent_id: Option, + /// Name of this span + pub name: String, + /// Target, typically module + pub target: String, + /// Indicates if the span is from wasm + pub wasm: bool, +} + +/// Holds associated values for a tracing span. +#[derive(Serialize, Deserialize, Default, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct Data { + /// HashMap of `String` values recorded while tracing + pub string_values: FxHashMap, +} + +/// Error response for the `state_traceBlock` RPC. +#[derive(Serialize, Deserialize, Default, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct TraceError { + /// Error message + pub error: String, +} + +/// Response for the `state_traceBlock` RPC. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub enum TraceBlockResponse { + /// Error block tracing response + TraceError(TraceError), + /// Successful block tracing response + BlockTrace(BlockTrace), +} diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index bc36098f05a54..dd1b84eabfe93 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,23 +14,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-wasm-interface = { version = "2.0.0", path = "../wasm-interface", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-tracing = { version = "2.0.0", default-features = false, path = "../tracing" } -sp-runtime-interface-proc-macro = { version = "2.0.0", path = "proc-macro" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-wasm-interface = { version = "4.0.0-dev", path = "../wasm-interface", default-features = false } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-tracing = { version = "4.0.0-dev", default-features = false, path = "../tracing" } +sp-runtime-interface-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } +sp-externalities = { version = "0.10.0-dev", optional = true, path = "../externalities" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } static_assertions = "1.0.0" -primitive-types = { version = "0.7.0", default-features = false } -sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } +primitive-types = { version = "0.10.1", default-features = false } +sp-storage = { version = "4.0.0-dev", default-features = false, path = "../storage" } +impl-trait-for-tuples = "0.2.1" [dev-dependencies] sp-runtime-interface-test-wasm = { version = "2.0.0", path = "test-wasm" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } -sp-core = { version = "2.0.0", path = "../core" } -sp-io = { version = "2.0.0", path = "../io" } +sp-state-machine = { version = "0.10.0-dev", path = "../state-machine" } +sp-core = { version = "4.0.0-dev", path = "../core" } +sp-io = { version = "4.0.0-dev", path = "../io" } rustversion = "1.0.0" -trybuild = "1.0.23" +trybuild = "1.0.43" [features] default = [ "std" ] diff --git a/primitives/runtime-interface/README.md b/primitives/runtime-interface/README.md index 666bfe4d5a861..49e13f1b2e743 100644 --- a/primitives/runtime-interface/README.md +++ b/primitives/runtime-interface/README.md @@ -7,18 +7,19 @@ maps to an external function call. These external functions are exported by the and they map to the same implementation as the native calls. # Using a type in a runtime interface - + Any type that should be used in a runtime interface as argument or return value needs to -implement [`RIType`]. The associated type [`FFIType`](RIType::FFIType) is the type that is used -in the FFI function to represent the actual type. For example `[T]` is represented by an `u64`. -The slice pointer and the length will be mapped to an `u64` value. For more information see -this [table](#ffi-type-and-conversion). The FFI function definition is used when calling from -the wasm runtime into the node. - -Traits are used to convert from a type to the corresponding [`RIType::FFIType`]. +implement [`RIType`]. The associated type [`FFIType`](https:/docs.rs/sp-runtime-interface/latest/sp_runtime_interface/trait.RIType.html#associatedtype.FFIType) +is the type that is used in the FFI function to represent the actual type. For example `[T]` is +represented by an `u64`. The slice pointer and the length will be mapped to an `u64` value. +For more information see this [table](https:/docs.rs/sp-runtime-interface/latest/sp_runtime_interface/#ffi-type-and-conversion). +The FFI function definition is used when calling from the wasm runtime into the node. + +Traits are used to convert from a type to the corresponding +[`RIType::FFIType`](https:/docs.rs/sp-runtime-interface/latest/sp_runtime_interface/trait.RIType.html#associatedtype.FFIType). Depending on where and how a type should be used in a function signature, a combination of the following traits need to be implemented: - + 1. Pass as function argument: [`wasm::IntoFFIValue`] and [`host::FromFFIValue`] 2. As function return value: [`wasm::FromFFIValue`] and [`host::IntoFFIValue`] 3. Pass as mutable function argument: [`host::IntoPreallocatedFFIValue`] @@ -26,7 +27,7 @@ following traits need to be implemented: The traits are implemented for most of the common types like `[T]`, `Vec`, arrays and primitive types. -For custom types, we provide the [`PassBy`](pass_by::PassBy) trait and strategies that define +For custom types, we provide the [`PassBy`](https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/pass_by#PassBy) trait and strategies that define how a type is passed between the wasm runtime and the node. Each strategy also provides a derive macro to simplify the implementation. @@ -52,7 +53,7 @@ trait RuntimeInterface { ``` For more information on declaring a runtime interface, see -[`#[runtime_interface]`](attr.runtime_interface.html). +[`#[runtime_interface]`](https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/attr.runtime_interface.html). # FFI type and conversion @@ -80,9 +81,9 @@ the host side and how they are converted into the corresponding type. | `[u8; N]` | `u32` | `v.as_ptr()` | | `*const T` | `u32` | `Identity` | | `Option` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | -| [`T where T: PassBy`](pass_by::Inner) | Depends on inner | Depends on inner | -| [`T where T: PassBy`](pass_by::Codec) | `u64`| v.len() 32bit << 32 | v.as_ptr() 32bit | +| [`T where T: PassBy`](https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/pass_by#Inner) | Depends on inner | Depends on inner | +| [`T where T: PassBy`](https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/pass_by#Codec) | `u64`| v.len() 32bit << 32 | v.as_ptr() 32bit | `Identity` means that the value is converted directly into the corresponding FFI type. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 8358d2170575f..1eb3bdd9039d9 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-proc-macro" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.5", features = ["full", "visit", "fold", "extra-traits"] } +syn = { version = "1.0.58", features = ["full", "visit", "fold", "extra-traits"] } quote = "1.0.3" -proc-macro2 = "1.0.3" +proc-macro2 = "1.0.29" Inflector = "0.11.4" -proc-macro-crate = "0.1.4" +proc-macro-crate = "1.0.0" diff --git a/primitives/runtime-interface/proc-macro/src/lib.rs b/primitives/runtime-interface/proc-macro/src/lib.rs index df43551398a12..6b0669a298e1c 100644 --- a/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/primitives/runtime-interface/proc-macro/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,12 +22,15 @@ //! //! 1. The [`#[runtime_interface]`](attr.runtime_interface.html) attribute macro for generating the //! runtime interfaces. -//! 2. The [`PassByCodec`](derive.PassByCodec.html) derive macro for implementing `PassBy` with `Codec`. -//! 3. The [`PassByEnum`](derive.PassByInner.html) derive macro for implementing `PassBy` with `Enum`. -//! 4. The [`PassByInner`](derive.PassByInner.html) derive macro for implementing `PassBy` with `Inner`. +//! 2. The [`PassByCodec`](derive.PassByCodec.html) derive macro for implementing `PassBy` with +//! `Codec`. 3. The [`PassByEnum`](derive.PassByInner.html) derive macro for implementing `PassBy` +//! with `Enum`. 4. The [`PassByInner`](derive.PassByInner.html) derive macro for implementing +//! `PassBy` with `Inner`. -use syn::{parse_macro_input, ItemTrait, DeriveInput, Result, Token}; -use syn::parse::{Parse, ParseStream}; +use syn::{ + parse::{Parse, ParseStream}, + parse_macro_input, DeriveInput, ItemTrait, Result, Token, +}; mod pass_by; mod runtime_interface; @@ -35,7 +38,7 @@ mod utils; struct Options { wasm_only: bool, - tracing: bool + tracing: bool, } impl Options { @@ -86,17 +89,21 @@ pub fn runtime_interface( #[proc_macro_derive(PassByCodec)] pub fn pass_by_codec(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); - pass_by::codec_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() + pass_by::codec_derive_impl(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } #[proc_macro_derive(PassByInner)] pub fn pass_by_inner(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); - pass_by::inner_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() + pass_by::inner_derive_impl(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } #[proc_macro_derive(PassByEnum)] pub fn pass_by_enum(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); pass_by::enum_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() -} \ No newline at end of file +} diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs b/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs index 5e51440938456..2be455d17a47b 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ use crate::utils::{generate_crate_access, generate_runtime_interface_include}; -use syn::{DeriveInput, Result, Generics, parse_quote}; +use syn::{parse_quote, DeriveInput, Generics, Result}; use quote::quote; @@ -53,7 +53,7 @@ pub fn derive_impl(mut input: DeriveInput) -> Result { fn add_trait_bounds(generics: &mut Generics) { let crate_ = generate_crate_access(); - generics.type_params_mut() + generics + .type_params_mut() .for_each(|type_param| type_param.bounds.push(parse_quote!(#crate_::codec::Codec))); } - diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs index 35ed9c0cb802f..f614e4d9f294d 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,11 +21,11 @@ use crate::utils::{generate_crate_access, generate_runtime_interface_include}; -use syn::{DeriveInput, Result, Data, Fields, Error, Ident}; +use syn::{Data, DeriveInput, Error, Fields, Ident, Result}; use quote::quote; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; /// The derive implementation for `PassBy` with `Enum`. pub fn derive_impl(input: DeriveInput) -> Result { @@ -81,22 +81,21 @@ pub fn derive_impl(input: DeriveInput) -> Result { /// enum or a variant is not an unit. fn get_enum_field_idents<'a>(data: &'a Data) -> Result>> { match data { - Data::Enum(d) => { + Data::Enum(d) => if d.variants.len() <= 256 { - Ok( - d.variants.iter().map(|v| if let Fields::Unit = v.fields { + Ok(d.variants.iter().map(|v| { + if let Fields::Unit = v.fields { Ok(&v.ident) } else { Err(Error::new( Span::call_site(), "`PassByEnum` only supports unit variants.", )) - }) - ) + } + })) } else { Err(Error::new(Span::call_site(), "`PassByEnum` only supports `256` variants.")) - } - }, - _ => Err(Error::new(Span::call_site(), "`PassByEnum` only supports enums as input type.")) + }, + _ => Err(Error::new(Span::call_site(), "`PassByEnum` only supports enums as input type.")), } } diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs index cf3bb965d0743..6eaa689d6293a 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,11 +22,11 @@ use crate::utils::{generate_crate_access, generate_runtime_interface_include}; -use syn::{DeriveInput, Result, Generics, parse_quote, Type, Data, Error, Fields, Ident}; +use syn::{parse_quote, Data, DeriveInput, Error, Fields, Generics, Ident, Result, Type}; use quote::quote; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; /// The derive implementation for `PassBy` with `Inner` and `PassByInner`. pub fn derive_impl(mut input: DeriveInput) -> Result { @@ -80,7 +80,8 @@ pub fn derive_impl(mut input: DeriveInput) -> Result { fn add_trait_bounds(generics: &mut Generics) { let crate_ = generate_crate_access(); - generics.type_params_mut() + generics + .type_params_mut() .for_each(|type_param| type_param.bounds.push(parse_quote!(#crate_::RIType))); } @@ -97,15 +98,13 @@ fn extract_inner_ty_and_name(data: &Data) -> Result<(Type, Option)> { Fields::Unnamed(ref unnamed) if unnamed.unnamed.len() == 1 => { let field = &unnamed.unnamed[0]; return Ok((field.ty.clone(), field.ident.clone())) - } + }, _ => {}, } } - Err( - Error::new( - Span::call_site(), - "Only newtype/one field structs are supported by `PassByInner`!", - ) - ) + Err(Error::new( + Span::call_site(), + "Only newtype/one field structs are supported by `PassByInner`!", + )) } diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/mod.rs b/primitives/runtime-interface/proc-macro/src/pass_by/mod.rs index ff5ea4849af77..80ac3396759fb 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/mod.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index 2725bd2c89ce5..c951dedb67713 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,15 +30,16 @@ //! are feature-gated, so that one is compiled for the native and the other for the wasm side. use crate::utils::{ - generate_crate_access, create_exchangeable_host_function_ident, get_function_arguments, - get_function_argument_names, get_runtime_interface, create_function_ident_with_version, + create_exchangeable_host_function_ident, create_function_ident_with_version, + generate_crate_access, get_function_argument_names, get_function_arguments, + get_runtime_interface, }; use syn::{ - Ident, ItemTrait, TraitItemMethod, FnArg, Signature, Result, spanned::Spanned, parse_quote, + parse_quote, spanned::Spanned, FnArg, Ident, ItemTrait, Result, Signature, TraitItemMethod, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::{quote, quote_spanned}; @@ -51,21 +52,22 @@ pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool, tracing: bool) -> Res let runtime_interface = get_runtime_interface(trait_def)?; // latest version dispatch - let token_stream: Result = runtime_interface.latest_versions() - .try_fold( - TokenStream::new(), - |mut t, (latest_version, method)| { - t.extend(function_for_method(method, latest_version, is_wasm_only)?); - Ok(t) - } - ); + let token_stream: Result = runtime_interface.latest_versions().try_fold( + TokenStream::new(), + |mut t, (latest_version, method)| { + t.extend(function_for_method(method, latest_version, is_wasm_only)?); + Ok(t) + }, + ); // earlier versions compatibility dispatch (only std variant) - let result: Result = runtime_interface.all_versions().try_fold(token_stream?, |mut t, (version, method)| - { - t.extend(function_std_impl(trait_name, method, version, is_wasm_only, tracing)?); - Ok(t) - }); + let result: Result = + runtime_interface + .all_versions() + .try_fold(token_stream?, |mut t, (version, method)| { + t.extend(function_std_impl(trait_name, method, version, is_wasm_only, tracing)?); + Ok(t) + }); result } @@ -76,21 +78,16 @@ fn function_for_method( latest_version: u32, is_wasm_only: bool, ) -> Result { - let std_impl = if !is_wasm_only { - function_std_latest_impl(method, latest_version)? - } else { - quote!() - }; + let std_impl = + if !is_wasm_only { function_std_latest_impl(method, latest_version)? } else { quote!() }; let no_std_impl = function_no_std_impl(method)?; - Ok( - quote! { - #std_impl + Ok(quote! { + #std_impl - #no_std_impl - } - ) + #no_std_impl + }) } /// Generates the bare function implementation for `cfg(not(feature = "std"))`. @@ -102,31 +99,27 @@ fn function_no_std_impl(method: &TraitItemMethod) -> Result { let return_value = &method.sig.output; let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); - Ok( - quote! { - #[cfg(not(feature = "std"))] - #( #attrs )* - pub fn #function_name( #( #args, )* ) #return_value { - // Call the host function - #host_function_name.get()( #( #arg_names, )* ) - } + Ok(quote! { + #[cfg(not(feature = "std"))] + #( #attrs )* + pub fn #function_name( #( #args, )* ) #return_value { + // Call the host function + #host_function_name.get()( #( #arg_names, )* ) } - ) + }) } /// Generate call to latest function version for `cfg((feature = "std")` /// /// This should generate simple `fn func(..) { func_version_(..) }`. -fn function_std_latest_impl( - method: &TraitItemMethod, - latest_version: u32, -) -> Result { +fn function_std_latest_impl(method: &TraitItemMethod, latest_version: u32) -> Result { let function_name = &method.sig.ident; let args = get_function_arguments(&method.sig).map(FnArg::Typed); let arg_names = get_function_argument_names(&method.sig).collect::>(); let return_value = &method.sig.output; let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); - let latest_function_name = create_function_ident_with_version(&method.sig.ident, latest_version); + let latest_function_name = + create_function_ident_with_version(&method.sig.ident, latest_version); Ok(quote_spanned! { method.span() => #[cfg(feature = "std")] @@ -153,17 +146,16 @@ fn function_std_impl( let crate_ = generate_crate_access(); let args = get_function_arguments(&method.sig).map(FnArg::Typed).chain( // Add the function context as last parameter when this is a wasm only interface. - iter::from_fn(|| + iter::from_fn(|| { if is_wasm_only { - Some( - parse_quote!( - mut __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext - ) - ) + Some(parse_quote!( + mut __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext + )) } else { None } - ).take(1), + }) + .take(1), ); let return_value = &method.sig.output; let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); @@ -179,15 +171,13 @@ fn function_std_impl( ) }; - Ok( - quote_spanned! { method.span() => - #[cfg(feature = "std")] - #( #attrs )* - fn #function_name( #( #args, )* ) #return_value { - #call_to_trait - } + Ok(quote_spanned! { method.span() => + #[cfg(feature = "std")] + #( #attrs )* + fn #function_name( #( #args, )* ) #return_value { + #call_to_trait } - ) + }) } /// Generate the call to the interface trait. @@ -199,10 +189,8 @@ fn generate_call_to_trait( ) -> TokenStream { let crate_ = generate_crate_access(); let method_name = create_function_ident_with_version(&method.sig.ident, version); - let expect_msg = format!( - "`{}` called outside of an Externalities-provided environment.", - method_name, - ); + let expect_msg = + format!("`{}` called outside of an Externalities-provided environment.", method_name); let arg_names = get_function_argument_names(&method.sig); if takes_self_argument(&method.sig) { @@ -239,8 +227,5 @@ fn generate_call_to_trait( /// Returns if the given `Signature` takes a `self` argument. fn takes_self_argument(sig: &Signature) -> bool { - match sig.inputs.first() { - Some(FnArg::Receiver(_)) => true, - _ => false, - } + matches!(sig.inputs.first(), Some(FnArg::Receiver(_))) } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index 7a4dbc5773a28..75498c09c18c7 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,35 +22,36 @@ //! executor. These implementations call the bare function interface. use crate::utils::{ - generate_crate_access, create_host_function_ident, get_function_argument_names, - get_function_argument_types_without_ref, get_function_argument_types_ref_and_mut, - get_function_argument_names_and_types_without_ref, get_function_arguments, - get_function_argument_types, create_exchangeable_host_function_ident, get_runtime_interface, - create_function_ident_with_version, + create_exchangeable_host_function_ident, create_function_ident_with_version, + create_host_function_ident, generate_crate_access, get_function_argument_names, + get_function_argument_names_and_types_without_ref, get_function_argument_types, + get_function_argument_types_ref_and_mut, get_function_argument_types_without_ref, + get_function_arguments, get_runtime_interface, }; use syn::{ - ItemTrait, TraitItemMethod, Result, ReturnType, Ident, Pat, Error, Signature, spanned::Spanned, + spanned::Spanned, Error, Ident, ItemTrait, Pat, Result, ReturnType, Signature, TraitItemMethod, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::{quote, ToTokens}; use inflector::Inflector; -use std::iter::{Iterator, self}; +use std::iter::{self, Iterator}; /// Generate the extern host functions for wasm and the `HostFunctions` struct that provides the /// implementations for the host functions on the host. pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { let trait_name = &trait_def.ident; - let extern_host_function_impls = get_runtime_interface(trait_def)? - .latest_versions() - .try_fold(TokenStream::new(), |mut t, (version, method)| { + let extern_host_function_impls = get_runtime_interface(trait_def)?.latest_versions().try_fold( + TokenStream::new(), + |mut t, (version, method)| { t.extend(generate_extern_host_function(method, version, trait_name)?); Ok::<_, Error>(t) - })?; + }, + )?; let exchangeable_host_functions = get_runtime_interface(trait_def)? .latest_versions() .try_fold(TokenStream::new(), |mut t, (_, m)| { @@ -59,27 +60,29 @@ pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result ret` to make the function implementations exchangeable. - #[cfg(not(feature = "std"))] - mod extern_host_function_impls { - use super::*; - - #extern_host_function_impls - } - - #exchangeable_host_functions + Ok(quote! { + /// The implementations of the extern host functions. This special implementation module + /// is required to change the extern host functions signature to + /// `unsafe fn name(args) -> ret` to make the function implementations exchangeable. + #[cfg(not(feature = "std"))] + mod extern_host_function_impls { + use super::*; - #host_functions_struct + #extern_host_function_impls } - ) + + #exchangeable_host_functions + + #host_functions_struct + }) } /// Generate the extern host function for the given method. -fn generate_extern_host_function(method: &TraitItemMethod, version: u32, trait_name: &Ident) -> Result { +fn generate_extern_host_function( + method: &TraitItemMethod, + version: u32, + trait_name: &Ident, +) -> Result { let crate_ = generate_crate_access(); let args = get_function_arguments(&method.sig); let arg_types = get_function_argument_types_without_ref(&method.sig); @@ -106,33 +109,31 @@ fn generate_extern_host_function(method: &TraitItemMethod, version: u32, trait_n ReturnType::Default => quote!(), ReturnType::Type(_, ref ty) => quote! { <#ty as #crate_::wasm::FromFFIValue>::from_ffi_value(result) - } + }, }; - Ok( - quote! { - #[doc = #doc_string] - pub fn #function ( #( #args ),* ) #return_value { - extern "C" { - /// The extern function. - pub fn #ext_function ( - #( #arg_names: <#arg_types as #crate_::RIType>::FFIType ),* - ) #ffi_return_value; - } + Ok(quote! { + #[doc = #doc_string] + pub fn #function ( #( #args ),* ) #return_value { + extern "C" { + /// The extern function. + pub fn #ext_function ( + #( #arg_names: <#arg_types as #crate_::RIType>::FFIType ),* + ) #ffi_return_value; + } - // Generate all wrapped ffi values. - #( - let #arg_names2 = <#arg_types2 as #crate_::wasm::IntoFFIValue>::into_ffi_value( - &#arg_names2, - ); - )* + // Generate all wrapped ffi values. + #( + let #arg_names2 = <#arg_types2 as #crate_::wasm::IntoFFIValue>::into_ffi_value( + &#arg_names2, + ); + )* - let result = unsafe { #ext_function( #( #arg_names3.get() ),* ) }; + let result = unsafe { #ext_function( #( #arg_names3.get() ),* ) }; - #convert_return_value - } + #convert_return_value } - ) + }) } /// Generate the host exchangeable function for the given method. @@ -144,48 +145,47 @@ fn generate_exchangeable_host_function(method: &TraitItemMethod) -> Result = #crate_::wasm::ExchangeableFunction::new(extern_host_function_impls::#function); - } - ) + Ok(quote! { + #[cfg(not(feature = "std"))] + #[allow(non_upper_case_globals)] + #[doc = #doc_string] + pub static #exchangeable_function : #crate_::wasm::ExchangeableFunction< + fn ( #( #arg_types ),* ) #output + > = #crate_::wasm::ExchangeableFunction::new(extern_host_function_impls::#function); + }) } /// Generate the `HostFunctions` struct that implements `wasm-interface::HostFunctions` to provide /// implementations for the extern host functions. -fn generate_host_functions_struct(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { +fn generate_host_functions_struct( + trait_def: &ItemTrait, + is_wasm_only: bool, +) -> Result { let crate_ = generate_crate_access(); let host_functions = get_runtime_interface(trait_def)? .all_versions() - .map(|(version, method)| + .map(|(version, method)| { generate_host_function_implementation(&trait_def.ident, method, version, is_wasm_only) - ) + }) .collect::>>()?; - Ok( - quote! { - /// Provides implementations for the extern host functions. - #[cfg(feature = "std")] - pub struct HostFunctions; - - #[cfg(feature = "std")] - impl #crate_::sp_wasm_interface::HostFunctions for HostFunctions { - fn host_functions() -> Vec<&'static dyn #crate_::sp_wasm_interface::Function> { - vec![ #( #host_functions ),* ] - } + Ok(quote! { + /// Provides implementations for the extern host functions. + #[cfg(feature = "std")] + pub struct HostFunctions; + + #[cfg(feature = "std")] + impl #crate_::sp_wasm_interface::HostFunctions for HostFunctions { + fn host_functions() -> Vec<&'static dyn #crate_::sp_wasm_interface::Function> { + vec![ #( #host_functions ),* ] } } - ) + }) } -/// Generates the host function struct that implements `wasm_interface::Function` and returns a static -/// reference to this struct. +/// Generates the host function struct that implements `wasm_interface::Function` and returns a +/// static reference to this struct. /// /// When calling from wasm into the host, we will call the `execute` function that calls the native /// implementation of the function. @@ -199,71 +199,65 @@ fn generate_host_function_implementation( let struct_name = Ident::new(&name.to_pascal_case(), Span::call_site()); let crate_ = generate_crate_access(); let signature = generate_wasm_interface_signature_for_host_function(&method.sig)?; - let wasm_to_ffi_values = generate_wasm_to_ffi_values( - &method.sig, - trait_name, - ).collect::>>()?; + let wasm_to_ffi_values = + generate_wasm_to_ffi_values(&method.sig, trait_name).collect::>>()?; let ffi_to_host_values = generate_ffi_to_host_value(&method.sig).collect::>>()?; let host_function_call = generate_host_function_call(&method.sig, version, is_wasm_only); let into_preallocated_ffi_value = generate_into_preallocated_ffi_value(&method.sig)?; let convert_return_value = generate_return_value_into_wasm_value(&method.sig); - Ok( - quote! { - { - struct #struct_name; - - impl #crate_::sp_wasm_interface::Function for #struct_name { - fn name(&self) -> &str { - #name - } - - fn signature(&self) -> #crate_::sp_wasm_interface::Signature { - #signature - } - - fn execute( - &self, - __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext, - args: &mut dyn Iterator, - ) -> std::result::Result, String> { - #( #wasm_to_ffi_values )* - #( #ffi_to_host_values )* - #host_function_call - #into_preallocated_ffi_value - #convert_return_value - } + Ok(quote! { + { + struct #struct_name; + + impl #crate_::sp_wasm_interface::Function for #struct_name { + fn name(&self) -> &str { + #name + } + + fn signature(&self) -> #crate_::sp_wasm_interface::Signature { + #signature } - &#struct_name as &dyn #crate_::sp_wasm_interface::Function + fn execute( + &self, + __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext, + args: &mut dyn Iterator, + ) -> std::result::Result, String> { + #( #wasm_to_ffi_values )* + #( #ffi_to_host_values )* + #host_function_call + #into_preallocated_ffi_value + #convert_return_value + } } + + &#struct_name as &dyn #crate_::sp_wasm_interface::Function } - ) + }) } /// Generate the `wasm_interface::Signature` for the given host function `sig`. fn generate_wasm_interface_signature_for_host_function(sig: &Signature) -> Result { let crate_ = generate_crate_access(); let return_value = match &sig.output { - ReturnType::Type(_, ty) => - quote! { - Some( <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE ) - }, - ReturnType::Default => quote!( None ), + ReturnType::Type(_, ty) => quote! { + Some( <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE ) + }, + ReturnType::Default => quote!(None), }; - let arg_types = get_function_argument_types_without_ref(sig) - .map(|ty| quote! { + let arg_types = get_function_argument_types_without_ref(sig).map(|ty| { + quote! { <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE - }); + } + }); - Ok( - quote! { - #crate_::sp_wasm_interface::Signature { - args: std::borrow::Cow::Borrowed(&[ #( #arg_types ),* ][..]), - return_value: #return_value, - } + Ok(quote! { + #crate_::sp_wasm_interface::Signature { + args: std::borrow::Cow::Borrowed(&[ #( #arg_types ),* ][..]), + return_value: #return_value, } - ) + }) } /// Generate the code that converts the wasm values given to `HostFunctions::execute` into the FFI @@ -279,24 +273,23 @@ fn generate_wasm_to_ffi_values<'a>( function_name, ); - get_function_argument_names_and_types_without_ref(sig) - .map(move |(name, ty)| { - let try_from_error = format!( - "Could not instantiate `{}` from wasm value while executing `{}` from interface `{}`!", - name.to_token_stream(), - function_name, - trait_name, - ); + get_function_argument_names_and_types_without_ref(sig).map(move |(name, ty)| { + let try_from_error = format!( + "Could not instantiate `{}` from wasm value while executing `{}` from interface `{}`!", + name.to_token_stream(), + function_name, + trait_name, + ); - let var_name = generate_ffi_value_var_name(&name)?; + let var_name = generate_ffi_value_var_name(&name)?; - Ok(quote! { - let val = args.next().ok_or_else(|| #error_message)?; - let #var_name = < - <#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::TryFromValue - >::try_from_value(val).ok_or_else(|| #try_from_error)?; - }) + Ok(quote! { + let val = args.next().ok_or_else(|| #error_message)?; + let #var_name = < + <#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::TryFromValue + >::try_from_value(val).ok_or_else(|| #try_from_error)?; }) + }) } /// Generate the code to convert the ffi values on the host to the host values using `FromFFIValue`. @@ -311,14 +304,12 @@ fn generate_ffi_to_host_value<'a>( .map(move |((name, ty), mut_access)| { let ffi_value_var_name = generate_ffi_value_var_name(&name)?; - Ok( - quote! { - let #mut_access #name = <#ty as #crate_::host::FromFFIValue>::from_ffi_value( - __function_context__, - #ffi_value_var_name, - )?; - } - ) + Ok(quote! { + let #mut_access #name = <#ty as #crate_::host::FromFFIValue>::from_ffi_value( + __function_context__, + #ffi_value_var_name, + )?; + }) }) } @@ -326,19 +317,17 @@ fn generate_ffi_to_host_value<'a>( fn generate_host_function_call(sig: &Signature, version: u32, is_wasm_only: bool) -> TokenStream { let host_function_name = create_function_ident_with_version(&sig.ident, version); let result_var_name = generate_host_function_result_var_name(&sig.ident); - let ref_and_mut = get_function_argument_types_ref_and_mut(sig).map(|ram| - ram.map(|(vr, vm)| quote!(#vr #vm)) - ); + let ref_and_mut = + get_function_argument_types_ref_and_mut(sig).map(|ram| ram.map(|(vr, vm)| quote!(#vr #vm))); let names = get_function_argument_names(sig); - let var_access = names.zip(ref_and_mut) - .map(|(n, ref_and_mut)| { - quote!( #ref_and_mut #n ) - }) + let var_access = names + .zip(ref_and_mut) + .map(|(n, ref_and_mut)| quote!( #ref_and_mut #n )) // If this is a wasm only interface, we add the function context as last parameter. .chain( iter::from_fn(|| if is_wasm_only { Some(quote!(__function_context__)) } else { None }) - .take(1) + .take(1), ); quote! { @@ -354,16 +343,15 @@ fn generate_host_function_result_var_name(name: &Ident) -> Ident { /// Generate the variable name that stores the FFI value. fn generate_ffi_value_var_name(pat: &Pat) -> Result { match pat { - Pat::Ident(pat_ident) => { + Pat::Ident(pat_ident) => if let Some(by_ref) = pat_ident.by_ref { Err(Error::new(by_ref.span(), "`ref` not supported!")) } else if let Some(sub_pattern) = &pat_ident.subpat { Err(Error::new(sub_pattern.0.span(), "Not supported!")) } else { Ok(Ident::new(&format!("{}_ffi_value", pat_ident.ident), Span::call_site())) - } - } - _ => Err(Error::new(pat.span(), "Not supported as variable name!")) + }, + _ => Err(Error::new(pat.span(), "Not supported as variable name!")), } } @@ -373,25 +361,23 @@ fn generate_ffi_value_var_name(pat: &Pat) -> Result { /// that the type implements `IntoPreAllocatedFFIValue`. fn generate_into_preallocated_ffi_value(sig: &Signature) -> Result { let crate_ = generate_crate_access(); - let ref_and_mut = get_function_argument_types_ref_and_mut(sig).map(|ram| - ram.and_then(|(vr, vm)| vm.map(|v| (vr, v))) - ); + let ref_and_mut = get_function_argument_types_ref_and_mut(sig) + .map(|ram| ram.and_then(|(vr, vm)| vm.map(|v| (vr, v)))); let names_and_types = get_function_argument_names_and_types_without_ref(sig); - ref_and_mut.zip(names_and_types) + ref_and_mut + .zip(names_and_types) .filter_map(|(ram, (name, ty))| ram.map(|_| (name, ty))) .map(|(name, ty)| { let ffi_var_name = generate_ffi_value_var_name(&name)?; - Ok( - quote! { - <#ty as #crate_::host::IntoPreallocatedFFIValue>::into_preallocated_ffi_value( - #name, - __function_context__, - #ffi_var_name, - )?; - } - ) + Ok(quote! { + <#ty as #crate_::host::IntoPreallocatedFFIValue>::into_preallocated_ffi_value( + #name, + __function_context__, + #ffi_var_name, + )?; + }) }) .collect() } @@ -401,7 +387,7 @@ fn generate_return_value_into_wasm_value(sig: &Signature) -> TokenStream { let crate_ = generate_crate_access(); match &sig.output { - ReturnType::Default => quote!( Ok(None) ), + ReturnType::Default => quote!(Ok(None)), ReturnType::Type(_, ty) => { let result_var_name = generate_host_function_result_var_name(&sig.ident); @@ -411,6 +397,6 @@ fn generate_return_value_into_wasm_value(sig: &Signature) -> TokenStream { __function_context__, ).map(#crate_::sp_wasm_interface::IntoValue::into_value).map(Some) } - } + }, } } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 02c291975738c..78feda663850c 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs index 70015d02426d4..c62e3ba87ccd3 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,15 +19,14 @@ //! default implementations and implements the trait for `&mut dyn Externalities`. use crate::utils::{ - generate_crate_access, - get_function_argument_types_without_ref, - get_runtime_interface, - create_function_ident_with_version, + create_function_ident_with_version, generate_crate_access, + get_function_argument_types_without_ref, get_runtime_interface, }; use syn::{ - ItemTrait, TraitItemMethod, Result, Error, fold::{self, Fold}, spanned::Spanned, - Visibility, Receiver, Type, Generics, + fold::{self, Fold}, + spanned::Spanned, + Error, Generics, ItemTrait, Receiver, Result, TraitItemMethod, Type, Visibility, }; use proc_macro2::TokenStream; @@ -40,13 +39,11 @@ pub fn process(trait_def: &ItemTrait, is_wasm_only: bool) -> Result let impl_trait = impl_trait_for_externalities(trait_def, is_wasm_only)?; let essential_trait_def = declare_essential_trait(trait_def)?; - Ok( - quote! { - #impl_trait + Ok(quote! { + #impl_trait - #essential_trait_def - } - ) + #essential_trait_def + }) } /// Converts the given trait definition into the essential trait definition without method @@ -66,12 +63,10 @@ impl ToEssentialTraitDef { let mut errors = self.errors; let methods = self.methods; if let Some(first_error) = errors.pop() { - Err( - errors.into_iter().fold(first_error, |mut o, n| { - o.combine(n); - o - }) - ) + Err(errors.into_iter().fold(first_error, |mut o, n| { + o.combine(n); + o + })) } else { Ok(methods) } @@ -101,12 +96,12 @@ impl Fold for ToEssentialTraitDef { } let arg_types = get_function_argument_types_without_ref(&method.sig); - arg_types.filter_map(|ty| - match *ty { + arg_types + .filter_map(|ty| match *ty { Type::ImplTrait(impl_trait) => Some(impl_trait), - _ => None - } - ).for_each(|invalid| self.push_error(&invalid, "`impl Trait` syntax not supported.")); + _ => None, + }) + .for_each(|invalid| self.push_error(&invalid, "`impl Trait` syntax not supported.")); self.error_on_generic_parameters(&method.sig.generics); @@ -145,13 +140,11 @@ fn declare_essential_trait(trait_def: &ItemTrait) -> Result { } let methods = folder.into_methods()?; - Ok( - quote! { - trait #trait_ { - #( #methods )* - } + Ok(quote! { + trait #trait_ { + #( #methods )* } - ) + }) } /// Implements the given trait definition for `dyn Externalities`. @@ -172,12 +165,10 @@ fn impl_trait_for_externalities(trait_def: &ItemTrait, is_wasm_only: bool) -> Re quote!( &mut dyn #crate_::Externalities ) }; - Ok( - quote! { - #[cfg(feature = "std")] - impl #trait_ for #impl_type { - #( #methods )* - } + Ok(quote! { + #[cfg(feature = "std")] + impl #trait_ for #impl_type { + #( #methods )* } - ) + }) } diff --git a/primitives/runtime-interface/proc-macro/src/utils.rs b/primitives/runtime-interface/proc-macro/src/utils.rs index 45f66e3bf6525..42ce09c573932 100644 --- a/primitives/runtime-interface/proc-macro/src/utils.rs +++ b/primitives/runtime-interface/proc-macro/src/utils.rs @@ -1,32 +1,35 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Util function used by this crate. -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::{ - Ident, Error, Signature, Pat, PatType, FnArg, Type, token, TraitItemMethod, ItemTrait, - TraitItem, parse_quote, spanned::Spanned, Result, Meta, NestedMeta, Lit, Attribute, + parse_quote, spanned::Spanned, token, Attribute, Error, FnArg, Ident, ItemTrait, Lit, Meta, + NestedMeta, Pat, PatType, Result, Signature, TraitItem, TraitItemMethod, Type, }; -use proc_macro_crate::crate_name; +use proc_macro_crate::{crate_name, FoundCrate}; -use std::env; -use std::collections::{BTreeMap, btree_map::Entry}; +use std::{ + collections::{btree_map::Entry, BTreeMap}, + env, +}; use quote::quote; @@ -53,8 +56,9 @@ impl<'a> RuntimeInterfaceFunction<'a> { pub fn latest_version(&self) -> (u32, &TraitItemMethod) { ( self.latest_version, - self.versions.get(&self.latest_version) - .expect("If latest_version has a value, the key with this value is in the versions; qed") + self.versions.get(&self.latest_version).expect( + "If latest_version has a value, the key with this value is in the versions; qed", + ), ) } } @@ -70,37 +74,37 @@ impl<'a> RuntimeInterface<'a> { } pub fn all_versions(&self) -> impl Iterator { - self.items.iter().flat_map(|(_, item)| item.versions.iter()).map(|(v, i)| (*v, *i)) + self.items + .iter() + .flat_map(|(_, item)| item.versions.iter()) + .map(|(v, i)| (*v, *i)) } - } +} /// Generates the include for the runtime-interface crate. pub fn generate_runtime_interface_include() -> TokenStream { - if env::var("CARGO_PKG_NAME").unwrap() == "sp-runtime-interface" { - TokenStream::new() - } else { - match crate_name("sp-runtime-interface") { - Ok(crate_name) => { - let crate_name = Ident::new(&crate_name, Span::call_site()); - quote!( - #[doc(hidden)] - extern crate #crate_name as proc_macro_runtime_interface; - ) - }, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - quote!( #err ) - } - } + match crate_name("sp-runtime-interface") { + Ok(FoundCrate::Itself) => quote!(), + Ok(FoundCrate::Name(crate_name)) => { + let crate_name = Ident::new(&crate_name, Span::call_site()); + quote!( + #[doc(hidden)] + extern crate #crate_name as proc_macro_runtime_interface; + ) + }, + Err(e) => { + let err = Error::new(Span::call_site(), e).to_compile_error(); + quote!( #err ) + }, } } /// Generates the access to the `sp-runtime-interface` crate. pub fn generate_crate_access() -> TokenStream { if env::var("CARGO_PKG_NAME").unwrap() == "sp-runtime-interface" { - quote!( sp_runtime_interface ) + quote!(sp_runtime_interface) } else { - quote!( proc_macro_runtime_interface ) + quote!(proc_macro_runtime_interface) } } @@ -112,26 +116,14 @@ pub fn create_exchangeable_host_function_ident(name: &Ident) -> Ident { /// Create the host function identifier for the given function name. pub fn create_host_function_ident(name: &Ident, version: u32, trait_name: &Ident) -> Ident { Ident::new( - &format!( - "ext_{}_{}_version_{}", - trait_name.to_string().to_snake_case(), - name, - version, - ), + &format!("ext_{}_{}_version_{}", trait_name.to_string().to_snake_case(), name, version), Span::call_site(), ) } /// Create the host function identifier for the given function name. pub fn create_function_ident_with_version(name: &Ident, version: u32) -> Ident { - Ident::new( - &format!( - "{}_version_{}", - name, - version, - ), - Span::call_site(), - ) + Ident::new(&format!("{}_version_{}", name, version), Span::call_site()) } /// Returns the function arguments of the given `Signature`, minus any `self` arguments. @@ -146,10 +138,8 @@ pub fn get_function_arguments<'a>(sig: &'a Signature) -> impl Iterator(sig: &'a Signature) -> impl Iterator( sig: &'a Signature, ) -> impl Iterator> + 'a { - get_function_arguments(sig) - .map(|pt| pt.ty) - .map(|ty| match *ty { - Type::Reference(type_ref) => type_ref.elem, - _ => ty, - }) + get_function_arguments(sig).map(|pt| pt.ty).map(|ty| match *ty { + Type::Reference(type_ref) => type_ref.elem, + _ => ty, + }) } /// Returns the function argument names and types, minus any `self`. If any of the arguments @@ -186,11 +174,10 @@ pub fn get_function_argument_types_without_ref<'a>( pub fn get_function_argument_names_and_types_without_ref<'a>( sig: &'a Signature, ) -> impl Iterator, Box)> + 'a { - get_function_arguments(sig) - .map(|pt| match *pt.ty { - Type::Reference(type_ref) => (pt.pat, type_ref.elem), - _ => (pt.pat, pt.ty), - }) + get_function_arguments(sig).map(|pt| match *pt.ty { + Type::Reference(type_ref) => (pt.pat, type_ref.elem), + _ => (pt.pat, pt.ty), + }) } /// Returns the `&`/`&mut` for all function argument types, minus the `self` arg. If a function @@ -198,23 +185,18 @@ pub fn get_function_argument_names_and_types_without_ref<'a>( pub fn get_function_argument_types_ref_and_mut<'a>( sig: &'a Signature, ) -> impl Iterator)>> + 'a { - get_function_arguments(sig) - .map(|pt| pt.ty) - .map(|ty| match *ty { - Type::Reference(type_ref) => Some((type_ref.and_token, type_ref.mutability)), - _ => None, - }) + get_function_arguments(sig).map(|pt| pt.ty).map(|ty| match *ty { + Type::Reference(type_ref) => Some((type_ref.and_token, type_ref.mutability)), + _ => None, + }) } /// Returns an iterator over all trait methods for the given trait definition. fn get_trait_methods<'a>(trait_def: &'a ItemTrait) -> impl Iterator { - trait_def - .items - .iter() - .filter_map(|i| match i { - TraitItem::Method(ref method) => Some(method), - _ => None, - }) + trait_def.items.iter().filter_map(|i| match i { + TraitItem::Method(ref method) => Some(method), + _ => None, + }) } /// Parse version attribute. @@ -224,36 +206,34 @@ fn parse_version_attribute(version: &Attribute) -> Result { let meta = version.parse_meta()?; let err = Err(Error::new( - meta.span(), - "Unexpected `version` attribute. The supported format is `#[version(1)]`", - ) - ); + meta.span(), + "Unexpected `version` attribute. The supported format is `#[version(1)]`", + )); match meta { - Meta::List(list) => { + Meta::List(list) => if list.nested.len() != 1 { err } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { i.base10_parse() } else { err - } - }, + }, _ => err, } } /// Return item version (`#[version(X)]`) attribute, if present. fn get_item_version(item: &TraitItemMethod) -> Result> { - item.attrs.iter().find(|attr| attr.path.is_ident("version")) + item.attrs + .iter() + .find(|attr| attr.path.is_ident("version")) .map(|attr| parse_version_attribute(attr)) .transpose() } /// Returns all runtime interface members, with versions. -pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) - -> Result> -{ +pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) -> Result> { let mut functions: BTreeMap> = BTreeMap::new(); for item in get_trait_methods(trait_def) { @@ -261,25 +241,26 @@ pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) let version = get_item_version(item)?.unwrap_or(1); match functions.entry(name.clone()) { - Entry::Vacant(entry) => { entry.insert(RuntimeInterfaceFunction::new(version, item)); }, + Entry::Vacant(entry) => { + entry.insert(RuntimeInterfaceFunction::new(version, item)); + }, Entry::Occupied(mut entry) => { if let Some(existing_item) = entry.get().versions.get(&version) { - let mut err = Error::new( - item.span(), - "Duplicated version attribute", - ); + let mut err = Error::new(item.span(), "Duplicated version attribute"); err.combine(Error::new( existing_item.span(), "Previous version with the same number defined here", )); - return Err(err); + return Err(err) } let interface_item = entry.get_mut(); - if interface_item.latest_version < version { interface_item.latest_version = version; } + if interface_item.latest_version < version { + interface_item.latest_version = version; + } interface_item.versions.insert(version, item); - } + }, } } @@ -289,12 +270,15 @@ pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) if next_expected != *version { return Err(Error::new( item.span(), - format!("Unexpected version attribute: missing version '{}' for this function", next_expected), - )); + format!( + "Unexpected version attribute: missing version '{}' for this function", + next_expected + ), + )) } next_expected += 1; } } Ok(RuntimeInterface { items: functions }) -} \ No newline at end of file +} diff --git a/primitives/runtime-interface/src/host.rs b/primitives/runtime-interface/src/host.rs index 4a01291e68455..a6ea96af90043 100644 --- a/primitives/runtime-interface/src/host.rs +++ b/primitives/runtime-interface/src/host.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index da57cf086beef..40f8e90479f95 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,14 +17,15 @@ //! Provides implementations for the runtime interface traits. -use crate::{ - RIType, Pointer, pass_by::{PassBy, Codec, Inner, PassByInner, Enum}, - util::{unpack_ptr_and_len, pack_ptr_and_len}, -}; #[cfg(feature = "std")] use crate::host::*; #[cfg(not(feature = "std"))] use crate::wasm::*; +use crate::{ + pass_by::{Codec, Enum, Inner, PassBy, PassByInner}, + util::{pack_ptr_and_len, unpack_ptr_and_len}, + Pointer, RIType, +}; #[cfg(all(not(feature = "std"), not(feature = "disable_target_static_assertions")))] use static_assertions::assert_eq_size; @@ -32,7 +33,7 @@ use static_assertions::assert_eq_size; #[cfg(feature = "std")] use sp_wasm_interface::{FunctionContext, Result}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::{any::TypeId, mem, vec::Vec}; @@ -195,7 +196,7 @@ impl FromFFIValue for Vec { let len = len as usize; if len == 0 { - return Vec::new(); + return Vec::new() } let data = unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }; @@ -230,7 +231,8 @@ impl FromFFIValue for [T] { if TypeId::of::() == TypeId::of::() { Ok(unsafe { mem::transmute(vec) }) } else { - Ok(Vec::::decode(&mut &vec[..]).expect("Wasm to host values are encoded correctly; qed")) + Ok(Vec::::decode(&mut &vec[..]) + .expect("Wasm to host values are encoded correctly; qed")) } } } @@ -247,13 +249,11 @@ impl IntoPreallocatedFFIValue for [u8] { let (ptr, len) = unpack_ptr_and_len(allocated); if (len as usize) < self_instance.len() { - Err( - format!( - "Preallocated buffer is not big enough (given {} vs needed {})!", - len, - self_instance.len() - ) - ) + Err(format!( + "Preallocated buffer is not big enough (given {} vs needed {})!", + len, + self_instance.len() + )) } else { context.write_memory(Pointer::new(ptr), &self_instance) } @@ -365,7 +365,12 @@ impl PassBy for Option { type PassBy = Codec; } -impl PassBy for (u32, u32, u32, u32) { +#[impl_trait_for_tuples::impl_for_tuples(30)] +#[tuple_types_no_default_trait_bound] +impl PassBy for Tuple +where + Self: codec::Codec, +{ type PassBy = Codec; } @@ -509,7 +514,8 @@ macro_rules! for_u128_i128 { type SelfInstance = $type; fn from_ffi_value(context: &mut dyn FunctionContext, arg: u32) -> Result<$type> { - let data = context.read_memory(Pointer::new(arg), mem::size_of::<$type>() as u32)?; + let data = + context.read_memory(Pointer::new(arg), mem::size_of::<$type>() as u32)?; let mut res = [0u8; mem::size_of::<$type>()]; res.copy_from_slice(&data); Ok(<$type>::from_le_bytes(res)) @@ -524,7 +530,7 @@ macro_rules! for_u128_i128 { Ok(addr.into()) } } - } + }; } for_u128_i128!(u128); diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index 7ff5f0d7a042d..27c4422ed9006 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,16 +26,18 @@ //! # Using a type in a runtime interface //! //! Any type that should be used in a runtime interface as argument or return value needs to -//! implement [`RIType`]. The associated type [`FFIType`](RIType::FFIType) is the type that is used -//! in the FFI function to represent the actual type. For example `[T]` is represented by an `u64`. -//! The slice pointer and the length will be mapped to an `u64` value. For more information see -//! this [table](#ffi-type-and-conversion). The FFI function definition is used when calling from -//! the wasm runtime into the node. +//! implement [`RIType`]. The associated type +//! [`FFIType`](./trait.RIType.html#associatedtype.FFIType) is the type that is used in the FFI +//! function to represent the actual type. For example `[T]` is represented by an `u64`. The slice +//! pointer and the length will be mapped to an `u64` value. For more information see this +//! [table](#ffi-type-and-conversion). The FFI function definition is used when calling from the +//! wasm runtime into the node. //! -//! Traits are used to convert from a type to the corresponding [`RIType::FFIType`]. +//! Traits are used to convert from a type to the corresponding +//! [`RIType::FFIType`](./trait.RIType.html#associatedtype.FFIType). //! Depending on where and how a type should be used in a function signature, a combination of the //! following traits need to be implemented: -//! +//! //! 1. Pass as function argument: [`wasm::IntoFFIValue`] and [`host::FromFFIValue`] //! 2. As function return value: [`wasm::FromFFIValue`] and [`host::IntoFFIValue`] //! 3. Pass as mutable function argument: [`host::IntoPreallocatedFFIValue`] @@ -43,7 +45,7 @@ //! The traits are implemented for most of the common types like `[T]`, `Vec`, arrays and //! primitive types. //! -//! For custom types, we provide the [`PassBy`](pass_by::PassBy) trait and strategies that define +//! For custom types, we provide the [`PassBy`](./pass_by#PassBy) trait and strategies that define //! how a type is passed between the wasm runtime and the node. Each strategy also provides a derive //! macro to simplify the implementation. //! @@ -69,7 +71,7 @@ //! ``` //! //! For more information on declaring a runtime interface, see -//! [`#[runtime_interface]`](attr.runtime_interface.html). +//! [`#[runtime_interface]`](./attr.runtime_interface.html). //! //! # FFI type and conversion //! @@ -92,13 +94,14 @@ //! | `&str` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | //! | `&[u8]` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | //! | `Vec` | `u64` | v.len() 32bit << 32 | v.as_ptr() 32bit | -//! | `Vec where T: Encode` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | -//! | `&[T] where T: Encode` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | -//! | `[u8; N]` | `u32` | `v.as_ptr()` | -//! | `*const T` | `u32` | `Identity` | -//! | `Option` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | -//! | [`T where T: PassBy`](pass_by::Inner) | Depends on inner | Depends on inner | -//! | [`T where T: PassBy`](pass_by::Codec) | `u64`| v.len() 32bit << 32 | v.as_ptr() 32bit | +//! | `Vec where T: Encode` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 +//! | e.as_ptr() 32bit | | `&[T] where T: Encode` | `u64` | `let e = +//! v.encode();`

e.len() 32bit << 32 | e.as_ptr() 32bit | | `[u8; N]` | +//! `u32` | `v.as_ptr()` | | `*const T` | `u32` | `Identity` | +//! | `Option` | `u64` | `let e = v.encode();`

e.len() 32bit << 32 | e.as_ptr() +//! 32bit | | [`T where T: PassBy`](./pass_by#Inner) | Depends on inner | +//! Depends on inner | | [`T where T: PassBy`](./pass_by#Codec)|`u64`|v.len() +//! 32bit << 32 |v.as_ptr() 32bit| //! //! `Identity` means that the value is converted directly into the corresponding FFI type. @@ -118,10 +121,10 @@ pub use sp_std; /// Attribute macro for transforming a trait declaration into a runtime interface. /// -/// A runtime interface is a fixed interface between a Substrate compatible runtime and the native -/// node. This interface is callable from a native and a wasm runtime. The macro will generate the -/// corresponding code for the native implementation and the code for calling from the wasm -/// side to the native implementation. +/// A runtime interface is a fixed interface between a Substrate compatible runtime and the +/// native node. This interface is callable from a native and a wasm runtime. The macro will +/// generate the corresponding code for the native implementation and the code for calling from +/// the wasm side to the native implementation. /// /// The macro expects the runtime interface declaration as trait declaration: /// @@ -272,47 +275,47 @@ pub use sp_std; /// The macro supports any kind of argument type, as long as it implements [`RIType`] and the /// required `FromFFIValue`/`IntoFFIValue`. The macro will convert each /// argument to the corresponding FFI representation and will call into the host using this FFI -/// representation. On the host each argument is converted back to the native representation and -/// the native implementation is called. Any return value is handled in the same way. +/// representation. On the host each argument is converted back to the native representation +/// and the native implementation is called. Any return value is handled in the same way. /// /// # Wasm only interfaces /// -/// Some interfaces are only required from within the wasm runtime e.g. the allocator interface. -/// To support this, the macro can be called like `#[runtime_interface(wasm_only)]`. This instructs -/// the macro to make two significant changes to the generated code: +/// Some interfaces are only required from within the wasm runtime e.g. the allocator +/// interface. To support this, the macro can be called like `#[runtime_interface(wasm_only)]`. +/// This instructs the macro to make two significant changes to the generated code: /// /// 1. The generated functions are not callable from the native side. -/// 2. The trait as shown above is not implemented for `Externalities` and is instead implemented -/// for `FunctionExecutor` (from `sp-wasm-interface`). +/// 2. The trait as shown above is not implemented for `Externalities` and is instead +/// implemented for `FunctionExecutor` (from `sp-wasm-interface`). /// /// # Disable tracing /// By addding `no_tracing` to the list of options you can prevent the wasm-side interface from -/// generating the default `sp-tracing`-calls. Note that this is rarely needed but only meant for -/// the case when that would create a circular dependency. You usually _do not_ want to add this -/// flag, as tracing doesn't cost you anything by default anyways (it is added as a no-op) but is -/// super useful for debugging later. -/// +/// generating the default `sp-tracing`-calls. Note that this is rarely needed but only meant +/// for the case when that would create a circular dependency. You usually _do not_ want to add +/// this flag, as tracing doesn't cost you anything by default anyways (it is added as a no-op) +/// but is super useful for debugging later. pub use sp_runtime_interface_proc_macro::runtime_interface; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_externalities::{ - set_and_run_with_externalities, with_externalities, Externalities, ExternalitiesExt, ExtensionStore, + set_and_run_with_externalities, with_externalities, ExtensionStore, Externalities, + ExternalitiesExt, }; #[doc(hidden)] pub use codec; -pub(crate) mod impls; #[cfg(feature = "std")] pub mod host; +pub(crate) mod impls; +pub mod pass_by; #[cfg(any(not(feature = "std"), doc))] pub mod wasm; -pub mod pass_by; mod util; -pub use util::{unpack_ptr_and_len, pack_ptr_and_len}; +pub use util::{pack_ptr_and_len, unpack_ptr_and_len}; /// Something that can be used by the runtime interface as type to communicate between wasm and the /// host. diff --git a/primitives/runtime-interface/src/pass_by.rs b/primitives/runtime-interface/src/pass_by.rs index 5ccb3a5e96ee1..7324e9363804b 100644 --- a/primitives/runtime-interface/src/pass_by.rs +++ b/primitives/runtime-interface/src/pass_by.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +20,10 @@ //! //! [`Codec`], [`Inner`] and [`Enum`] are the provided strategy implementations. -use crate::{RIType, util::{unpack_ptr_and_len, pack_ptr_and_len}}; +use crate::{ + util::{pack_ptr_and_len, unpack_ptr_and_len}, + RIType, +}; #[cfg(feature = "std")] use crate::host::*; @@ -30,15 +33,15 @@ use crate::wasm::*; #[cfg(feature = "std")] use sp_wasm_interface::{FunctionContext, Pointer, Result}; -use sp_std::{marker::PhantomData, convert::TryFrom}; +use sp_std::{convert::TryFrom, marker::PhantomData}; #[cfg(not(feature = "std"))] use sp_std::vec::Vec; /// Derive macro for implementing [`PassBy`] with the [`Codec`] strategy. /// -/// This requires that the type implements [`Encode`](codec::Encode) and [`Decode`](codec::Decode) -/// from `parity-scale-codec`. +/// This requires that the type implements [`Encode`](codec::Encode) and +/// [`Decode`](codec::Decode) from `parity-scale-codec`. /// /// # Example /// @@ -55,11 +58,12 @@ pub use sp_runtime_interface_proc_macro::PassByCodec; /// Derive macro for implementing [`PassBy`] with the [`Inner`] strategy. /// -/// Besides implementing [`PassBy`], this derive also implements the helper trait [`PassByInner`]. +/// Besides implementing [`PassBy`], this derive also implements the helper trait +/// [`PassByInner`]. /// /// The type is required to be a struct with just one field. The field type needs to implement -/// the required traits to pass it between the wasm and the native side. (See the runtime interface -/// crate for more information about these traits.) +/// the required traits to pass it between the wasm and the native side. (See the runtime +/// interface crate for more information about these traits.) /// /// # Example /// @@ -83,8 +87,8 @@ pub use sp_runtime_interface_proc_macro::PassByInner; /// Besides implementing [`PassBy`], this derive also implements `TryFrom` and /// `From for u8` for the type. /// -/// The type is required to be an enum with only unit variants and at maximum `256` variants. Also -/// it is required that the type implements `Copy`. +/// The type is required to be an enum with only unit variants and at maximum `256` variants. +/// Also it is required that the type implements `Copy`. /// /// # Example /// @@ -119,18 +123,12 @@ pub trait PassByImpl: RIType { /// Convert the given instance to the ffi value. /// /// For more information see: [`crate::host::IntoFFIValue::into_ffi_value`] - fn into_ffi_value( - instance: T, - context: &mut dyn FunctionContext, - ) -> Result; + fn into_ffi_value(instance: T, context: &mut dyn FunctionContext) -> Result; /// Create `T` from the given ffi value. /// /// For more information see: [`crate::host::FromFFIValue::from_ffi_value`] - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result; + fn from_ffi_value(context: &mut dyn FunctionContext, arg: Self::FFIType) -> Result; } /// Something that provides a strategy for passing a type between wasm and the host. @@ -220,10 +218,7 @@ pub struct Codec(PhantomData); #[cfg(feature = "std")] impl PassByImpl for Codec { - fn into_ffi_value( - instance: T, - context: &mut dyn FunctionContext, - ) -> Result { + fn into_ffi_value(instance: T, context: &mut dyn FunctionContext) -> Result { let vec = instance.encode(); let ptr = context.allocate_memory(vec.len() as u32)?; context.write_memory(ptr, &vec)?; @@ -231,14 +226,10 @@ impl PassByImpl for Codec { Ok(pack_ptr_and_len(ptr.into(), vec.len() as u32)) } - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result { + fn from_ffi_value(context: &mut dyn FunctionContext, arg: Self::FFIType) -> Result { let (ptr, len) = unpack_ptr_and_len(arg); let vec = context.read_memory(Pointer::new(ptr), len)?; - T::decode(&mut &vec[..]) - .map_err(|e| format!("Could not decode value from wasm: {}", e.what())) + T::decode(&mut &vec[..]).map_err(|e| format!("Could not decode value from wasm: {}", e)) } } @@ -330,35 +321,31 @@ pub struct Inner, I: RIType>(PhantomData<(T, I)>); #[cfg(feature = "std")] impl, I: RIType> PassByImpl for Inner - where I: IntoFFIValue + FromFFIValue +where + I: IntoFFIValue + FromFFIValue, { - fn into_ffi_value( - instance: T, - context: &mut dyn FunctionContext, - ) -> Result { + fn into_ffi_value(instance: T, context: &mut dyn FunctionContext) -> Result { instance.into_inner().into_ffi_value(context) } - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result { + fn from_ffi_value(context: &mut dyn FunctionContext, arg: Self::FFIType) -> Result { I::from_ffi_value(context, arg).map(T::from_inner) } } #[cfg(not(feature = "std"))] impl, I: RIType> PassByImpl for Inner - where I: IntoFFIValue + FromFFIValue +where + I: IntoFFIValue + FromFFIValue, { type Owned = I::Owned; fn into_ffi_value(instance: &T) -> WrappedFFIValue { - instance.inner().into_ffi_value() + instance.inner().into_ffi_value() } fn from_ffi_value(arg: Self::FFIType) -> T { - T::from_inner(I::from_ffi_value(arg)) + T::from_inner(I::from_ffi_value(arg)) } } @@ -415,17 +402,11 @@ pub struct Enum + TryFrom>(PhantomData); #[cfg(feature = "std")] impl + TryFrom> PassByImpl for Enum { - fn into_ffi_value( - instance: T, - _: &mut dyn FunctionContext, - ) -> Result { + fn into_ffi_value(instance: T, _: &mut dyn FunctionContext) -> Result { Ok(instance.into()) } - fn from_ffi_value( - _: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result { + fn from_ffi_value(_: &mut dyn FunctionContext, arg: Self::FFIType) -> Result { T::try_from(arg).map_err(|_| format!("Invalid enum discriminant: {}", arg)) } } diff --git a/primitives/runtime-interface/src/util.rs b/primitives/runtime-interface/src/util.rs index 604e37e8be397..31045c83c9dcd 100644 --- a/primitives/runtime-interface/src/util.rs +++ b/primitives/runtime-interface/src/util.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,8 +29,8 @@ pub fn pack_ptr_and_len(ptr: u32, len: u32) -> u64 { /// Unpacks an `u64` into the pointer and length. /// /// Runtime API functions return a 64-bit value which encodes a pointer in the least-significant -/// 32-bits and a length in the most-significant 32 bits. This interprets the returned value as a pointer, -/// length tuple. +/// 32-bits and a length in the most-significant 32 bits. This interprets the returned value as a +/// pointer, length tuple. pub fn unpack_ptr_and_len(val: u64) -> (u32, u32) { // The static assertions from above are changed into a runtime check. #[cfg(all(not(feature = "std"), feature = "disable_target_static_assertions"))] diff --git a/primitives/runtime-interface/src/wasm.rs b/primitives/runtime-interface/src/wasm.rs index 5511f60e30d21..28613f81a68b2 100644 --- a/primitives/runtime-interface/src/wasm.rs +++ b/primitives/runtime-interface/src/wasm.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -108,7 +108,7 @@ impl ExchangeableFunction { /// # Returns /// /// Returns the original implementation wrapped in [`RestoreImplementation`]. - pub fn replace_implementation(&'static self, new_impl: T) -> RestoreImplementation { + pub fn replace_implementation(&'static self, new_impl: T) -> RestoreImplementation { if let ExchangeableFunctionState::Replaced = self.0.get().1 { panic!("Trying to replace an already replaced implementation!") } @@ -139,6 +139,7 @@ pub struct RestoreImplementation(&'static ExchangeableFunctio impl Drop for RestoreImplementation { fn drop(&mut self) { - self.0.restore_orig_implementation(self.1.take().expect("Value is only taken on drop; qed")); + self.0 + .restore_orig_implementation(self.1.take().expect("Value is only taken on drop; qed")); } } diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index 59790eb172eb3..3ae5d78b0ef95 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -13,13 +13,13 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../" } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../io" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../core" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } [features] default = [ "std" ] diff --git a/primitives/runtime-interface/test-wasm-deprecated/build.rs b/primitives/runtime-interface/test-wasm-deprecated/build.rs index 4f111bc993007..a1c4b2d892cfe 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/build.rs +++ b/primitives/runtime-interface/test-wasm-deprecated/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build() diff --git a/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs index 174cdb8cdf85a..4a59e4fe8aa5f 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs +++ b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,11 +26,13 @@ use sp_runtime_interface::runtime_interface; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ - supported with the flag disabled.") + WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only supported with the flag \ + disabled.", + ) } /// This function is not used, but we require it for the compiler to include `sp-io`. diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index 39c8df976a5ba..7c7d3e10b2d0c 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -13,13 +13,13 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../" } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../io" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../core" } [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } [features] default = [ "std" ] diff --git a/primitives/runtime-interface/test-wasm/build.rs b/primitives/runtime-interface/test-wasm/build.rs index 4f111bc993007..a1c4b2d892cfe 100644 --- a/primitives/runtime-interface/test-wasm/build.rs +++ b/primitives/runtime-interface/test-wasm/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../../utils/wasm-builder") .export_heap_base() .import_memory() .build() diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 28895df2214d1..72acdd4ff8d6e 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ use sp_runtime_interface::runtime_interface; #[cfg(not(feature = "std"))] -use sp_std::{prelude::*, mem, convert::TryFrom}; +use sp_std::{convert::TryFrom, mem, prelude::*}; use sp_core::{sr25519::Public, wasm_export_functions}; @@ -30,11 +30,13 @@ use sp_core::{sr25519::Public, wasm_export_functions}; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ - supported with the flag disabled.") + WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only \ + supported with the flag disabled.", + ) } /// Used in the `test_array_as_mutable_reference` test. @@ -120,6 +122,16 @@ pub trait TestApi { fn test_versionning(&self, data: u32) -> bool { data == 42 } + + /// Returns the input values as tuple. + fn return_input_as_tuple( + a: Vec, + b: u32, + c: Option>, + d: u8, + ) -> (Vec, u32, Option>, u8) { + (a, b, c, d) + } } /// This function is not used, but we require it for the compiler to include `sp-io`. @@ -216,11 +228,11 @@ wasm_export_functions! { } fn test_u128_i128_as_parameter_and_return_value() { - for val in &[u128::max_value(), 1u128, 5000u128, u64::max_value() as u128] { + for val in &[u128::MAX, 1u128, 5000u128, u64::MAX as u128] { assert_eq!(*val, test_api::get_and_return_u128(*val)); } - for val in &[i128::max_value(), i128::min_value(), 1i128, 5000i128, u64::max_value() as i128] { + for val in &[i128::MAX, i128::MIN, 1i128, 5000i128, u64::MAX as i128] { assert_eq!(*val, test_api::get_and_return_i128(*val)); } } @@ -258,4 +270,18 @@ wasm_export_functions! { assert!(!test_api::test_versionning(50)); assert!(!test_api::test_versionning(102)); } + + fn test_return_input_as_tuple() { + let a = vec![1, 3, 4, 5]; + let b = 10000; + let c = Some(vec![2, 3]); + let d = 5; + + let res = test_api::return_input_as_tuple(a.clone(), b, c.clone(), d); + + assert_eq!(a, res.0); + assert_eq!(b, res.1); + assert_eq!(c, res.2); + assert_eq!(d, res.3); + } } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index d802f9cb6b39a..377729521fcfe 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -12,13 +12,13 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-runtime-interface = { version = "2.0.0", path = "../" } -sc-executor = { version = "0.8.0", path = "../../../client/executor" } +sp-runtime-interface = { version = "4.0.0-dev", path = "../" } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } +sc-executor-common = { version = "0.10.0-dev", path = "../../../client/executor/common" } sp-runtime-interface-test-wasm = { version = "2.0.0", path = "../test-wasm" } sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0", path = "../test-wasm-deprecated" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-core = { version = "2.0.0", path = "../../core" } -sp-io = { version = "2.0.0", path = "../../io" } -tracing = "0.1.19" +sp-state-machine = { version = "0.10.0-dev", path = "../../state-machine" } +sp-runtime = { version = "4.0.0-dev", path = "../../runtime" } +sp-io = { version = "4.0.0-dev", path = "../../io" } +tracing = "0.1.25" tracing-core = "0.1.17" diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index c66609daa2f29..82c50fffeb8d7 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,13 +20,16 @@ use sp_runtime_interface::*; -use sp_runtime_interface_test_wasm::{wasm_binary_unwrap, test_api::HostFunctions}; +use sp_runtime_interface_test_wasm::{test_api::HostFunctions, wasm_binary_unwrap}; use sp_runtime_interface_test_wasm_deprecated::wasm_binary_unwrap as wasm_binary_deprecated_unwrap; +use sc_executor_common::runtime_blob::RuntimeBlob; use sp_wasm_interface::HostFunctions as HostFunctionsT; -use sc_executor::CallInWasm; -use std::{collections::HashSet, sync::{Arc, Mutex}}; +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; type TestExternalities = sp_state_machine::TestExternalities; @@ -44,15 +47,17 @@ fn call_wasm_method_with_result( Some(8), host_functions, 8, - ); - executor.call_in_wasm( - binary, None, - method, - &[], - &mut ext_ext, - sp_core::traits::MissingHostFunctions::Disallow, - ).map_err(|e| format!("Failed to execute `{}`: {}", method, e))?; + ); + executor + .uncached_call( + RuntimeBlob::uncompress_if_needed(binary).expect("Failed to parse binary"), + &mut ext_ext, + false, + method, + &[], + ) + .map_err(|e| format!("Failed to execute `{}`: {}", method, e))?; Ok(ext) } @@ -80,7 +85,10 @@ fn test_set_storage() { #[test] fn test_return_value_into_mutable_reference() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_return_value_into_mutable_reference"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_return_value_into_mutable_reference", + ); } #[test] @@ -100,7 +108,8 @@ fn test_return_input_public_key() { #[test] fn host_function_not_found() { - let err = call_wasm_method_with_result::<()>(&wasm_binary_unwrap()[..], "test_return_data").unwrap_err(); + let err = call_wasm_method_with_result::<()>(&wasm_binary_unwrap()[..], "test_return_data") + .unwrap_err(); assert!(err.contains("Instantiation: Export ")); assert!(err.contains(" not found")); @@ -109,41 +118,56 @@ fn host_function_not_found() { #[test] #[should_panic(expected = "Invalid utf8 data provided")] fn test_invalid_utf8_data_should_return_an_error() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_invalid_utf8_data_should_return_an_error"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_invalid_utf8_data_should_return_an_error", + ); } #[test] fn test_overwrite_native_function_implementation() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_overwrite_native_function_implementation"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_overwrite_native_function_implementation", + ); } #[test] fn test_u128_i128_as_parameter_and_return_value() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_u128_i128_as_parameter_and_return_value"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_u128_i128_as_parameter_and_return_value", + ); } #[test] fn test_vec_return_value_memory_is_freed() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_vec_return_value_memory_is_freed"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_vec_return_value_memory_is_freed", + ); } #[test] fn test_encoded_return_value_memory_is_freed() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_encoded_return_value_memory_is_freed"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_encoded_return_value_memory_is_freed", + ); } #[test] fn test_array_return_value_memory_is_freed() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_array_return_value_memory_is_freed"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_array_return_value_memory_is_freed", + ); } #[test] fn test_versionining_with_new_host_works() { // We call to the new wasm binary with new host function. - call_wasm_method::( - &wasm_binary_unwrap()[..], - "test_versionning_works", - ); + call_wasm_method::(&wasm_binary_unwrap()[..], "test_versionning_works"); // we call to the old wasm binary with a new host functions // old versions of host functions should be called and test should be ok! @@ -156,7 +180,7 @@ fn test_versionining_with_new_host_works() { #[test] fn test_tracing() { use std::fmt; - use tracing::{span::Id as SpanId}; + use tracing::span::Id as SpanId; use tracing_core::field::{Field, Visit}; #[derive(Clone)] @@ -164,9 +188,8 @@ fn test_tracing() { struct FieldConsumer(&'static str, Option); impl Visit for FieldConsumer { - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - if field.name() == self.0 { + if field.name() == self.0 { self.1 = Some(format!("{:?}", value)) } } @@ -178,14 +201,16 @@ fn test_tracing() { } impl tracing::subscriber::Subscriber for TracingSubscriber { - fn enabled(&self, _: &tracing::Metadata) -> bool { true } + fn enabled(&self, _: &tracing::Metadata) -> bool { + true + } fn new_span(&self, span: &tracing::span::Attributes) -> tracing::Id { let mut inner = self.0.lock().unwrap(); let id = SpanId::from_u64((inner.spans.len() + 1) as _); let mut f = FieldConsumer("name", None); span.record(&mut f); - inner.spans.insert(f.1.unwrap_or_else(||span.metadata().name().to_owned())); + inner.spans.insert(f.1.unwrap_or_else(|| span.metadata().name().to_owned())); id } @@ -208,4 +233,9 @@ fn test_tracing() { let inner = subscriber.0.lock().unwrap(); assert!(inner.spans.contains("return_input_version_1")); -} \ No newline at end of file +} + +#[test] +fn test_return_input_as_tuple() { + call_wasm_method::(&wasm_binary_unwrap()[..], "test_return_input_as_tuple"); +} diff --git a/primitives/runtime-interface/tests/ui.rs b/primitives/runtime-interface/tests/ui.rs index 2f7fd6d06bcd3..5a6025f463af0 100644 --- a/primitives/runtime-interface/tests/ui.rs +++ b/primitives/runtime-interface/tests/ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ use std::env; #[test] fn ui() { // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 6579a17c77fec..5ac5bcf1963e0 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,26 +15,29 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -sp-arithmetic = { version = "2.0.0", default-features = false, path = "../arithmetic" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../io" } -log = { version = "0.4.8", optional = true } -paste = "0.1.6" +serde = { version = "1.0.126", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "max-encoded-len"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../application-crypto" } +sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../arithmetic" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../io" } +log = { version = "0.4.14", default-features = false } +paste = "1.0" rand = { version = "0.7.2", optional = true } -impl-trait-for-tuples = "0.1.3" -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +impl-trait-for-tuples = "0.2.1" +parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } hash256-std-hasher = { version = "0.15.2", default-features = false } either = { version = "1.5", default-features = false } [dev-dependencies] -serde_json = "1.0.41" +serde_json = "1.0.68" rand = "0.7.2" -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.10.0-dev", path = "../state-machine" } +sp-api = { version = "4.0.0-dev", path = "../api" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } [features] bench = [] @@ -44,13 +47,13 @@ std = [ "sp-application-crypto/std", "sp-arithmetic/std", "codec/std", - "log", + "scale-info/std", + "log/std", "sp-core/std", "rand", "sp-std/std", "sp-io/std", "serde", - "sp-inherents/std", "parity-util-mem/std", "hash256-std-hasher/std", "either/use_std", diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index 27eb89a76947e..d6bd94c2bff70 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,14 @@ //! Provides some utilities to define a piecewise linear function. -use crate::{Perbill, traits::{AtLeast32BitUnsigned, SaturatedConversion}}; +use crate::{ + traits::{AtLeast32BitUnsigned, SaturatedConversion}, + Perbill, +}; use core::ops::Sub; /// Piecewise Linear function in [0, 1] -> [0, 1]. -#[derive(PartialEq, Eq, sp_core::RuntimeDebug)] +#[derive(PartialEq, Eq, sp_core::RuntimeDebug, scale_info::TypeInfo)] pub struct PiecewiseLinear<'a> { /// Array of points. Must be in order from the lowest abscissas to the highest. pub points: &'a [(Perbill, Perbill)], @@ -29,23 +32,23 @@ pub struct PiecewiseLinear<'a> { pub maximum: Perbill, } -fn abs_sub + Clone>(a: N, b: N) -> N where { +fn abs_sub + Clone>(a: N, b: N) -> N where { a.clone().max(b.clone()) - a.min(b) } impl<'a> PiecewiseLinear<'a> { /// Compute `f(n/d)*d` with `n <= d`. This is useful to avoid loss of precision. - pub fn calculate_for_fraction_times_denominator(&self, n: N, d: N) -> N where - N: AtLeast32BitUnsigned + Clone + pub fn calculate_for_fraction_times_denominator(&self, n: N, d: N) -> N + where + N: AtLeast32BitUnsigned + Clone, { let n = n.min(d.clone()); - if self.points.len() == 0 { + if self.points.is_empty() { return N::zero() } - let next_point_index = self.points.iter() - .position(|p| n < p.0 * d.clone()); + let next_point_index = self.points.iter().position(|p| n < p.0 * d.clone()); let (prev, next) = if let Some(next_point_index) = next_point_index { if let Some(previous_point_index) = next_point_index.checked_sub(1) { @@ -80,7 +83,8 @@ impl<'a> PiecewiseLinear<'a> { // This is guaranteed not to overflow on whatever values nor lose precision. // `q` must be superior to zero. fn multiply_by_rational_saturating(value: N, p: u32, q: u32) -> N - where N: AtLeast32BitUnsigned + Clone +where + N: AtLeast32BitUnsigned + Clone, { let q = q.max(1); @@ -112,17 +116,14 @@ fn test_multiply_by_rational_saturating() { for value in 0..=div { for p in 0..=div { for q in 1..=div { - let value: u64 = (value as u128 * u64::max_value() as u128 / div as u128) - .try_into().unwrap(); - let p = (p as u64 * u32::max_value() as u64 / div as u64) - .try_into().unwrap(); - let q = (q as u64 * u32::max_value() as u64 / div as u64) - .try_into().unwrap(); + let value: u64 = + (value as u128 * u64::MAX as u128 / div as u128).try_into().unwrap(); + let p = (p as u64 * u32::MAX as u64 / div as u64).try_into().unwrap(); + let q = (q as u64 * u32::MAX as u64 / div as u64).try_into().unwrap(); assert_eq!( multiply_by_rational_saturating(value, p, q), - (value as u128 * p as u128 / q as u128) - .try_into().unwrap_or(u64::max_value()) + (value as u128 * p as u128 / q as u128).try_into().unwrap_or(u64::MAX) ); } } @@ -153,10 +154,8 @@ fn test_calculate_for_fraction_times_denominator() { let div = 100u32; for d in 0..=div { for n in 0..=d { - let d: u64 = (d as u128 * u64::max_value() as u128 / div as u128) - .try_into().unwrap(); - let n: u64 = (n as u128 * u64::max_value() as u128 / div as u128) - .try_into().unwrap(); + let d: u64 = (d as u128 * u64::MAX as u128 / div as u128).try_into().unwrap(); + let n: u64 = (n as u128 * u64::MAX as u128 / div as u128).try_into().unwrap(); let res = curve.calculate_for_fraction_times_denominator(n, d); let expected = formal_calculate_for_fraction_times_denominator(n, d); diff --git a/primitives/runtime/src/generic/block.rs b/primitives/runtime/src/generic/block.rs index 4a758b7416dec..21a01933bc691 100644 --- a/primitives/runtime/src/generic/block.rs +++ b/primitives/runtime/src/generic/block.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,14 +23,16 @@ use std::fmt; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use sp_std::prelude::*; -use sp_core::RuntimeDebug; -use crate::codec::{Codec, Encode, Decode}; -use crate::traits::{ - self, Member, Block as BlockT, Header as HeaderT, MaybeSerialize, MaybeMallocSizeOf, - NumberFor, +use crate::{ + codec::{Codec, Decode, Encode}, + traits::{ + self, Block as BlockT, Header as HeaderT, MaybeMallocSizeOf, MaybeSerialize, Member, + NumberFor, + }, + Justifications, }; -use crate::Justification; +use sp_core::RuntimeDebug; +use sp_std::prelude::*; /// Something to identify a block. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] @@ -54,6 +56,19 @@ impl BlockId { pub fn number(number: NumberFor) -> Self { BlockId::Number(number) } + + /// Check if this block ID refers to the pre-genesis state. + pub fn is_pre_genesis(&self) -> bool { + match self { + BlockId::Hash(hash) => hash == &Default::default(), + BlockId::Number(_) => false, + } + } + + /// Create a block ID for a pre-genesis state. + pub fn pre_genesis() -> Self { + BlockId::Hash(Default::default()) + } } impl Copy for BlockId {} @@ -112,5 +127,5 @@ pub struct SignedBlock { /// Full block. pub block: Block, /// Block justification. - pub justification: Option, + pub justifications: Option, } diff --git a/primitives/runtime/src/generic/checked_extrinsic.rs b/primitives/runtime/src/generic/checked_extrinsic.rs index f355308a59f97..b2044a6cf74fd 100644 --- a/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/primitives/runtime/src/generic/checked_extrinsic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,11 +18,13 @@ //! Generic implementation of an extrinsic that has passed the verification //! stage. -use crate::traits::{ - self, Member, MaybeDisplay, SignedExtension, Dispatchable, DispatchInfoOf, PostDispatchInfoOf, - ValidateUnsigned, +use crate::{ + traits::{ + self, DispatchInfoOf, Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, + SignedExtension, ValidateUnsigned, + }, + transaction_validity::{TransactionSource, TransactionValidity}, }; -use crate::transaction_validity::{TransactionValidity, TransactionSource}; /// Definition of something that the external world might want to say; its /// existence implies that it has been checked and is good, particularly with @@ -37,12 +39,11 @@ pub struct CheckedExtrinsic { pub function: Call, } -impl traits::Applyable for - CheckedExtrinsic +impl traits::Applyable for CheckedExtrinsic where AccountId: Member + MaybeDisplay, - Call: Member + Dispatchable, - Extra: SignedExtension, + Call: Member + Dispatchable, + Extra: SignedExtension, Origin: From>, { type Call = Call; @@ -64,7 +65,7 @@ where } } - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index ec0963e5ba002..87af9bc77a5fa 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,12 +22,18 @@ use serde::{Deserialize, Serialize}; use sp_std::prelude::*; -use crate::ConsensusEngineId; -use crate::codec::{Decode, Encode, Input, Error}; +use crate::{ + codec::{Decode, Encode, Error, Input}, + scale_info::{ + build::{Fields, Variants}, + meta_type, Path, Type, TypeInfo, TypeParameter, + }, + ConsensusEngineId, +}; use sp_core::{ChangesTrieConfiguration, RuntimeDebug}; /// Generic header digest. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] pub struct Digest { /// A list of logs in the digest. @@ -40,7 +46,7 @@ pub struct Digest { impl Default for Digest { fn default() -> Self { - Digest { logs: Vec::new(), } + Self { logs: Vec::new() } } } @@ -61,21 +67,22 @@ impl Digest { } /// Get reference to the first digest item that matches the passed predicate. - pub fn log) -> Option<&T>>(&self, predicate: F) -> Option<&T> { - self.logs().iter() - .filter_map(predicate) - .next() + pub fn log) -> Option<&T>>( + &self, + predicate: F, + ) -> Option<&T> { + self.logs().iter().find_map(predicate) } /// Get a conversion of the first digest item that successfully converts using the function. - pub fn convert_first) -> Option>(&self, predicate: F) -> Option { - self.logs().iter() - .filter_map(predicate) - .next() + pub fn convert_first) -> Option>( + &self, + predicate: F, + ) -> Option { + self.logs().iter().find_map(predicate) } } - /// Digest item that is able to encode/decode 'system' digest items and /// provide opaque access to other items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] @@ -115,38 +122,47 @@ pub enum DigestItem { /// Some other thing. Unsupported and experimental. Other(Vec), + + /// An indication for the light clients that the runtime execution + /// environment is updated. + /// + /// Currently this is triggered when: + /// 1. Runtime code blob is changed or + /// 2. `heap_pages` value is changed. + RuntimeEnvironmentUpdated, } /// Available changes trie signals. -#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(Debug, parity_util_mem::MallocSizeOf))] pub enum ChangesTrieSignal { /// New changes trie configuration is enacted, starting from **next block**. /// /// The block that emits this signal will contain changes trie (CT) that covers /// blocks range [BEGIN; current block], where BEGIN is (order matters): - /// - LAST_TOP_LEVEL_DIGEST_BLOCK+1 if top level digest CT has ever been created - /// using current configuration AND the last top level digest CT has been created - /// at block LAST_TOP_LEVEL_DIGEST_BLOCK; - /// - LAST_CONFIGURATION_CHANGE_BLOCK+1 if there has been CT configuration change - /// before and the last configuration change happened at block - /// LAST_CONFIGURATION_CHANGE_BLOCK; + /// - LAST_TOP_LEVEL_DIGEST_BLOCK+1 if top level digest CT has ever been created using current + /// configuration AND the last top level digest CT has been created at block + /// LAST_TOP_LEVEL_DIGEST_BLOCK; + /// - LAST_CONFIGURATION_CHANGE_BLOCK+1 if there has been CT configuration change before and + /// the last configuration change happened at block LAST_CONFIGURATION_CHANGE_BLOCK; /// - 1 otherwise. NewConfiguration(Option), } #[cfg(feature = "std")] impl serde::Serialize for DigestItem { - fn serialize(&self, seq: S) -> Result where S: serde::Serializer { - self.using_encoded(|bytes| { - sp_core::bytes::serialize(bytes, seq) - }) + fn serialize(&self, seq: S) -> Result + where + S: serde::Serializer, + { + self.using_encoded(|bytes| sp_core::bytes::serialize(bytes, seq)) } } #[cfg(feature = "std")] impl<'a, Hash: Decode> serde::Deserialize<'a> for DigestItem { - fn deserialize(de: D) -> Result where + fn deserialize(de: D) -> Result + where D: serde::Deserializer<'a>, { let r = sp_core::bytes::deserialize(de)?; @@ -155,6 +171,69 @@ impl<'a, Hash: Decode> serde::Deserialize<'a> for DigestItem { } } +impl TypeInfo for DigestItem +where + Hash: TypeInfo + 'static, +{ + type Identity = Self; + + fn type_info() -> Type { + Type::builder() + .path(Path::new("DigestItem", module_path!())) + .type_params(vec![TypeParameter::new("Hash", Some(meta_type::()))]) + .variant( + Variants::new() + .variant("ChangesTrieRoot", |v| { + v.index(DigestItemType::ChangesTrieRoot as u8) + .fields(Fields::unnamed().field(|f| f.ty::().type_name("Hash"))) + }) + .variant("PreRuntime", |v| { + v.index(DigestItemType::PreRuntime as u8).fields( + Fields::unnamed() + .field(|f| { + f.ty::().type_name("ConsensusEngineId") + }) + .field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("Consensus", |v| { + v.index(DigestItemType::Consensus as u8).fields( + Fields::unnamed() + .field(|f| { + f.ty::().type_name("ConsensusEngineId") + }) + .field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("Seal", |v| { + v.index(DigestItemType::Seal as u8).fields( + Fields::unnamed() + .field(|f| { + f.ty::().type_name("ConsensusEngineId") + }) + .field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("ChangesTrieSignal", |v| { + v.index(DigestItemType::ChangesTrieSignal as u8).fields( + Fields::unnamed().field(|f| { + f.ty::().type_name("ChangesTrieSignal") + }), + ) + }) + .variant("Other", |v| { + v.index(DigestItemType::Other as u8).fields( + Fields::unnamed().field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("RuntimeEnvironmentUpdated", |v| { + v.index(DigestItemType::RuntimeEnvironmentUpdated as u8) + .fields(Fields::unit()) + }), + ) + } +} + /// A 'referencing view' for digest item. Does not own its contents. Used by /// final runtime implementations for encoding/decoding its log items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] @@ -180,6 +259,8 @@ pub enum DigestItemRef<'a, Hash: 'a> { ChangesTrieSignal(&'a ChangesTrieSignal), /// Any 'non-system' digest item, opaque to the native code. Other(&'a Vec), + /// Runtime code or heap pages updated. + RuntimeEnvironmentUpdated, } /// Type of the digest item. Used to gain explicit control over `DigestItem` encoding @@ -195,6 +276,7 @@ pub enum DigestItemType { Seal = 5, PreRuntime = 6, ChangesTrieSignal = 7, + RuntimeEnvironmentUpdated = 8, } /// Type of a digest item that contains raw data; this also names the consensus engine ID where @@ -213,14 +295,15 @@ pub enum OpaqueDigestItemId<'a> { impl DigestItem { /// Returns a 'referencing view' for this digest item. - pub fn dref<'a>(&'a self) -> DigestItemRef<'a, Hash> { + pub fn dref(&self) -> DigestItemRef { match *self { - DigestItem::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), - DigestItem::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), - DigestItem::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), - DigestItem::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), - DigestItem::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), - DigestItem::Other(ref v) => DigestItemRef::Other(v), + Self::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), + Self::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), + Self::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), + Self::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), + Self::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), + Self::Other(ref v) => DigestItemRef::Other(v), + Self::RuntimeEnvironmentUpdated => DigestItemRef::RuntimeEnvironmentUpdated, } } @@ -251,10 +334,7 @@ impl DigestItem { /// Returns Some if `self` is a `DigestItem::Other`. pub fn as_other(&self) -> Option<&[u8]> { - match *self { - DigestItem::Other(ref v) => Some(&v[..]), - _ => None, - } + self.dref().as_other() } /// Returns the opaque data contained in the item if `Some` if this entry has the id given. @@ -267,6 +347,29 @@ impl DigestItem { pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { self.dref().try_to::(id) } + + /// Try to match this to a `Self::Seal`, check `id` matches and decode it. + /// + /// Returns `None` if this isn't a seal item, the `id` doesn't match or when the decoding fails. + pub fn seal_try_to(&self, id: &ConsensusEngineId) -> Option { + self.dref().seal_try_to(id) + } + + /// Try to match this to a `Self::Consensus`, check `id` matches and decode it. + /// + /// Returns `None` if this isn't a consensus item, the `id` doesn't match or + /// when the decoding fails. + pub fn consensus_try_to(&self, id: &ConsensusEngineId) -> Option { + self.dref().consensus_try_to(id) + } + + /// Try to match this to a `Self::PreRuntime`, check `id` matches and decode it. + /// + /// Returns `None` if this isn't a pre-runtime item, the `id` doesn't match or + /// when the decoding fails. + pub fn pre_runtime_try_to(&self, id: &ConsensusEngineId) -> Option { + self.dref().pre_runtime_try_to(id) + } } impl Encode for DigestItem { @@ -282,27 +385,23 @@ impl Decode for DigestItem { fn decode(input: &mut I) -> Result { let item_type: DigestItemType = Decode::decode(input)?; match item_type { - DigestItemType::ChangesTrieRoot => Ok(DigestItem::ChangesTrieRoot( - Decode::decode(input)?, - )), + DigestItemType::ChangesTrieRoot => Ok(Self::ChangesTrieRoot(Decode::decode(input)?)), DigestItemType::PreRuntime => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(DigestItem::PreRuntime(vals.0, vals.1)) + Ok(Self::PreRuntime(vals.0, vals.1)) }, DigestItemType::Consensus => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(DigestItem::Consensus(vals.0, vals.1)) - } + Ok(Self::Consensus(vals.0, vals.1)) + }, DigestItemType::Seal => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(DigestItem::Seal(vals.0, vals.1)) + Ok(Self::Seal(vals.0, vals.1)) }, - DigestItemType::ChangesTrieSignal => Ok(DigestItem::ChangesTrieSignal( - Decode::decode(input)?, - )), - DigestItemType::Other => Ok(DigestItem::Other( - Decode::decode(input)?, - )), + DigestItemType::ChangesTrieSignal => + Ok(Self::ChangesTrieSignal(Decode::decode(input)?)), + DigestItemType::Other => Ok(Self::Other(Decode::decode(input)?)), + DigestItemType::RuntimeEnvironmentUpdated => Ok(Self::RuntimeEnvironmentUpdated), } } } @@ -311,7 +410,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `ChangesTrieRoot`. pub fn as_changes_trie_root(&self) -> Option<&'a Hash> { match *self { - DigestItemRef::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), + Self::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), _ => None, } } @@ -319,7 +418,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `PreRuntime` pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &'a [u8])> { match *self { - DigestItemRef::PreRuntime(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), + Self::PreRuntime(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), _ => None, } } @@ -327,7 +426,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `Consensus` pub fn as_consensus(&self) -> Option<(ConsensusEngineId, &'a [u8])> { match *self { - DigestItemRef::Consensus(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), + Self::Consensus(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), _ => None, } } @@ -335,7 +434,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `Seal` pub fn as_seal(&self) -> Option<(ConsensusEngineId, &'a [u8])> { match *self { - DigestItemRef::Seal(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), + Self::Seal(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), _ => None, } } @@ -343,7 +442,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `ChangesTrieSignal`. pub fn as_changes_trie_signal(&self) -> Option<&'a ChangesTrieSignal> { match *self { - DigestItemRef::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), + Self::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), _ => None, } } @@ -351,7 +450,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `PreRuntime` pub fn as_other(&self) -> Option<&'a [u8]> { match *self { - DigestItemRef::Other(ref data) => Some(data), + Self::Other(ref data) => Some(data), _ => None, } } @@ -360,11 +459,12 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// return the opaque data it contains. pub fn try_as_raw(&self, id: OpaqueDigestItemId) -> Option<&'a [u8]> { match (id, self) { - (OpaqueDigestItemId::Consensus(w), &DigestItemRef::Consensus(v, s)) | - (OpaqueDigestItemId::Seal(w), &DigestItemRef::Seal(v, s)) | - (OpaqueDigestItemId::PreRuntime(w), &DigestItemRef::PreRuntime(v, s)) - if v == w => Some(&s[..]), - (OpaqueDigestItemId::Other, &DigestItemRef::Other(s)) => Some(&s[..]), + (OpaqueDigestItemId::Consensus(w), &Self::Consensus(v, s)) | + (OpaqueDigestItemId::Seal(w), &Self::Seal(v, s)) | + (OpaqueDigestItemId::PreRuntime(w), &Self::PreRuntime(v, s)) + if v == w => + Some(&s[..]), + (OpaqueDigestItemId::Other, &Self::Other(s)) => Some(&s[..]), _ => None, } } @@ -374,6 +474,38 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { self.try_as_raw(id).and_then(|mut x| Decode::decode(&mut x).ok()) } + + /// Try to match this to a `Self::Seal`, check `id` matches and decode it. + /// + /// Returns `None` if this isn't a seal item, the `id` doesn't match or when the decoding fails. + pub fn seal_try_to(&self, id: &ConsensusEngineId) -> Option { + match self { + Self::Seal(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), + _ => None, + } + } + + /// Try to match this to a `Self::Consensus`, check `id` matches and decode it. + /// + /// Returns `None` if this isn't a consensus item, the `id` doesn't match or + /// when the decoding fails. + pub fn consensus_try_to(&self, id: &ConsensusEngineId) -> Option { + match self { + Self::Consensus(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), + _ => None, + } + } + + /// Try to match this to a `Self::PreRuntime`, check `id` matches and decode it. + /// + /// Returns `None` if this isn't a pre-runtime item, the `id` doesn't match or + /// when the decoding fails. + pub fn pre_runtime_try_to(&self, id: &ConsensusEngineId) -> Option { + match self { + Self::PreRuntime(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), + _ => None, + } + } } impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { @@ -381,30 +513,33 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { let mut v = Vec::new(); match *self { - DigestItemRef::ChangesTrieRoot(changes_trie_root) => { + Self::ChangesTrieRoot(changes_trie_root) => { DigestItemType::ChangesTrieRoot.encode_to(&mut v); changes_trie_root.encode_to(&mut v); }, - DigestItemRef::Consensus(val, data) => { + Self::Consensus(val, data) => { DigestItemType::Consensus.encode_to(&mut v); (val, data).encode_to(&mut v); }, - DigestItemRef::Seal(val, sig) => { + Self::Seal(val, sig) => { DigestItemType::Seal.encode_to(&mut v); (val, sig).encode_to(&mut v); }, - DigestItemRef::PreRuntime(val, data) => { + Self::PreRuntime(val, data) => { DigestItemType::PreRuntime.encode_to(&mut v); (val, data).encode_to(&mut v); }, - DigestItemRef::ChangesTrieSignal(changes_trie_signal) => { + Self::ChangesTrieSignal(changes_trie_signal) => { DigestItemType::ChangesTrieSignal.encode_to(&mut v); changes_trie_signal.encode_to(&mut v); }, - DigestItemRef::Other(val) => { + Self::Other(val) => { DigestItemType::Other.encode_to(&mut v); val.encode_to(&mut v); }, + Self::RuntimeEnvironmentUpdated => { + DigestItemType::RuntimeEnvironmentUpdated.encode_to(&mut v); + }, } v @@ -415,7 +550,7 @@ impl ChangesTrieSignal { /// Try to cast this signal to NewConfiguration. pub fn as_new_configuration(&self) -> Option<&Option> { match self { - ChangesTrieSignal::NewConfiguration(config) => Some(config), + Self::NewConfiguration(config) => Some(config), } } } @@ -432,13 +567,61 @@ mod tests { logs: vec![ DigestItem::ChangesTrieRoot(4), DigestItem::Other(vec![1, 2, 3]), - DigestItem::Seal(*b"test", vec![1, 2, 3]) + DigestItem::Seal(*b"test", vec![1, 2, 3]), ], }; assert_eq!( - ::serde_json::to_string(&digest).unwrap(), + serde_json::to_string(&digest).unwrap(), r#"{"logs":["0x0204000000","0x000c010203","0x05746573740c010203"]}"# ); } + + #[test] + fn digest_item_type_info() { + let type_info = DigestItem::::type_info(); + let variants = if let scale_info::TypeDef::Variant(variant) = type_info.type_def() { + variant.variants() + } else { + panic!("Should be a TypeDef::TypeDefVariant") + }; + + // ensure that all variants are covered by manual TypeInfo impl + let check = |digest_item_type: DigestItemType| { + let (variant_name, digest_item) = match digest_item_type { + DigestItemType::Other => ("Other", DigestItem::::Other(Default::default())), + DigestItemType::ChangesTrieRoot => + ("ChangesTrieRoot", DigestItem::ChangesTrieRoot(Default::default())), + DigestItemType::Consensus => + ("Consensus", DigestItem::Consensus(Default::default(), Default::default())), + DigestItemType::Seal => + ("Seal", DigestItem::Seal(Default::default(), Default::default())), + DigestItemType::PreRuntime => + ("PreRuntime", DigestItem::PreRuntime(Default::default(), Default::default())), + DigestItemType::ChangesTrieSignal => ( + "ChangesTrieSignal", + DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( + Default::default(), + )), + ), + DigestItemType::RuntimeEnvironmentUpdated => + ("RuntimeEnvironmentUpdated", DigestItem::RuntimeEnvironmentUpdated), + }; + let encoded = digest_item.encode(); + let variant = variants + .iter() + .find(|v| v.name() == &variant_name) + .expect(&format!("Variant {} not found", variant_name)); + + assert_eq!(encoded[0], variant.index()) + }; + + check(DigestItemType::Other); + check(DigestItemType::ChangesTrieRoot); + check(DigestItemType::Consensus); + check(DigestItemType::Seal); + check(DigestItemType::PreRuntime); + check(DigestItemType::ChangesTrieSignal); + check(DigestItemType::RuntimeEnvironmentUpdated); + } } diff --git a/primitives/runtime/src/generic/era.rs b/primitives/runtime/src/generic/era.rs index 9bfab517a92ca..9d831b679c5e4 100644 --- a/primitives/runtime/src/generic/era.rs +++ b/primitives/runtime/src/generic/era.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,9 +18,9 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -use crate::codec::{Decode, Encode, Input, Output, Error}; +use crate::codec::{Decode, Encode, Error, Input, Output}; /// Era period pub type Period = u64; @@ -47,74 +47,68 @@ pub enum Era { Mortal(Period, Phase), } -/* - * E.g. with period == 4: - * 0 10 20 30 40 - * 0123456789012345678901234567890123456789012 - * |...| - * authored -/ \- expiry - * phase = 1 - * n = Q(current - phase, period) + phase - */ +// E.g. with period == 4: +// 0 10 20 30 40 +// 0123456789012345678901234567890123456789012 +// |...| +// authored -/ \- expiry +// phase = 1 +// n = Q(current - phase, period) + phase impl Era { - /// Create a new era based on a period (which should be a power of two between 4 and 65536 inclusive) - /// and a block number on which it should start (or, for long periods, be shortly after the start). + /// Create a new era based on a period (which should be a power of two between 4 and 65536 + /// inclusive) and a block number on which it should start (or, for long periods, be shortly + /// after the start). /// /// If using `Era` in the context of `FRAME` runtime, make sure that `period` /// does not exceed `BlockHashCount` parameter passed to `system` module, since that /// prunes old blocks and renders transactions immediately invalid. pub fn mortal(period: u64, current: u64) -> Self { - let period = period.checked_next_power_of_two() - .unwrap_or(1 << 16) - .max(4) - .min(1 << 16); + let period = period.checked_next_power_of_two().unwrap_or(1 << 16).max(4).min(1 << 16); let phase = current % period; let quantize_factor = (period >> 12).max(1); let quantized_phase = phase / quantize_factor * quantize_factor; - Era::Mortal(period, quantized_phase) + Self::Mortal(period, quantized_phase) } /// Create an "immortal" transaction. pub fn immortal() -> Self { - Era::Immortal + Self::Immortal } /// `true` if this is an immortal transaction. pub fn is_immortal(&self) -> bool { - match self { - Era::Immortal => true, - _ => false, - } + matches!(self, Self::Immortal) } /// Get the block number of the start of the era whose properties this object /// describes that `current` belongs to. pub fn birth(self, current: u64) -> u64 { match self { - Era::Immortal => 0, - Era::Mortal(period, phase) => (current.max(phase) - phase) / period * period + phase, + Self::Immortal => 0, + Self::Mortal(period, phase) => (current.max(phase) - phase) / period * period + phase, } } /// Get the block number of the first block at which the era has ended. pub fn death(self, current: u64) -> u64 { match self { - Era::Immortal => u64::max_value(), - Era::Mortal(period, _) => self.birth(current) + period, + Self::Immortal => u64::MAX, + Self::Mortal(period, _) => self.birth(current) + period, } } } impl Encode for Era { - fn encode_to(&self, output: &mut T) { + fn encode_to(&self, output: &mut T) { match self { - Era::Immortal => output.push_byte(0), - Era::Mortal(period, phase) => { + Self::Immortal => output.push_byte(0), + Self::Mortal(period, phase) => { let quantize_factor = (*period as u64 >> 12).max(1); - let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | ((phase / quantize_factor) << 4) as u16; - output.push(&encoded); - } + let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | + ((phase / quantize_factor) << 4) as u16; + encoded.encode_to(output); + }, } } } @@ -125,14 +119,14 @@ impl Decode for Era { fn decode(input: &mut I) -> Result { let first = input.read_byte()?; if first == 0 { - Ok(Era::Immortal) + Ok(Self::Immortal) } else { let encoded = first as u64 + ((input.read_byte()? as u64) << 8); let period = 2 << (encoded % (1 << 4)); let quantize_factor = (period >> 12).max(1); let phase = (encoded >> 4) * quantize_factor; if period >= 4 && phase < period { - Ok(Era::Mortal(period, phase)) + Ok(Self::Mortal(period, phase)) } else { Err("Invalid period and phase".into()) } @@ -140,6 +134,50 @@ impl Decode for Era { } } +/// Add Mortal{N}(u8) variants with the given indices, to describe custom encoding. +macro_rules! mortal_variants { + ($variants:ident, $($index:literal),* ) => { + $variants + $( + .variant(concat!(stringify!(Mortal), stringify!($index)), |v| v + .index($index) + .fields(scale_info::build::Fields::unnamed().field(|f| f.ty::())) + ) + )* + } +} + +impl scale_info::TypeInfo for Era { + type Identity = Self; + + fn type_info() -> scale_info::Type { + let variants = scale_info::build::Variants::new().variant("Immortal", |v| v.index(0)); + + // this is necessary since the size of the encoded Mortal variant is `u16`, conditional on + // the value of the first byte being > 0. + let variants = mortal_variants!( + variants, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 + ); + + scale_info::Type::builder() + .path(scale_info::Path::new("Era", module_path!())) + .variant(variants) + } +} + #[cfg(test)] mod tests { use super::*; @@ -148,15 +186,15 @@ mod tests { fn immortal_works() { let e = Era::immortal(); assert_eq!(e.birth(0), 0); - assert_eq!(e.death(0), u64::max_value()); + assert_eq!(e.death(0), u64::MAX); assert_eq!(e.birth(1), 0); - assert_eq!(e.death(1), u64::max_value()); - assert_eq!(e.birth(u64::max_value()), 0); - assert_eq!(e.death(u64::max_value()), u64::max_value()); + assert_eq!(e.death(1), u64::MAX); + assert_eq!(e.birth(u64::MAX), 0); + assert_eq!(e.death(u64::MAX), u64::MAX); assert!(e.is_immortal()); assert_eq!(e.encode(), vec![0u8]); - assert_eq!(e, Era::decode(&mut&[0u8][..]).unwrap()); + assert_eq!(e, Era::decode(&mut &[0u8][..]).unwrap()); } #[test] @@ -166,7 +204,7 @@ mod tests { let expected = vec![5 + 42 % 16 * 16, 42 / 16]; assert_eq!(e.encode(), expected); - assert_eq!(e, Era::decode(&mut&expected[..]).unwrap()); + assert_eq!(e, Era::decode(&mut &expected[..]).unwrap()); } #[test] @@ -175,7 +213,7 @@ mod tests { let expected = vec![(14 + 2500 % 16 * 16) as u8, (2500 / 16) as u8]; assert_eq!(e.encode(), expected); - assert_eq!(e, Era::decode(&mut&expected[..]).unwrap()); + assert_eq!(e, Era::decode(&mut &expected[..]).unwrap()); } #[test] diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index e6c800e5787ff..82f081c0d70b0 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,23 +17,22 @@ //! Generic implementation of a block header. +use crate::{ + codec::{Codec, Decode, Encode}, + generic::Digest, + scale_info::TypeInfo, + traits::{ + self, AtLeast32BitUnsigned, Hash as HashT, MaybeDisplay, MaybeMallocSizeOf, MaybeSerialize, + MaybeSerializeDeserialize, Member, SimpleBitOps, + }, +}; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use crate::codec::{Decode, Encode, Codec, Input, Output, HasCompact, EncodeAsRef, Error}; -use crate::traits::{ - self, Member, AtLeast32BitUnsigned, SimpleBitOps, Hash as HashT, - MaybeSerializeDeserialize, MaybeSerialize, MaybeDisplay, - MaybeMallocSizeOf, -}; -use crate::generic::Digest; use sp_core::U256; -use sp_std::{ - convert::TryFrom, - fmt::Debug, -}; +use sp_std::{convert::TryFrom, fmt::Debug}; /// Abstraction over a block header for a substrate chain. -#[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] +#[derive(Encode, Decode, PartialEq, Eq, Clone, sp_core::RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] @@ -41,9 +40,11 @@ pub struct Header + TryFrom, Hash: HashT> { /// The parent hash. pub parent_hash: Hash::Output, /// The block number. - #[cfg_attr(feature = "std", serde( - serialize_with = "serialize_number", - deserialize_with = "deserialize_number"))] + #[cfg_attr( + feature = "std", + serde(serialize_with = "serialize_number", deserialize_with = "deserialize_number") + )] + #[codec(compact)] pub number: Number, /// The state trie merkle root pub state_root: Hash::Output, @@ -71,81 +72,87 @@ where #[cfg(feature = "std")] pub fn serialize_number + TryFrom>( - val: &T, s: S, -) -> Result where S: serde::Serializer { + val: &T, + s: S, +) -> Result +where + S: serde::Serializer, +{ let u256: U256 = (*val).into(); serde::Serialize::serialize(&u256, s) } #[cfg(feature = "std")] -pub fn deserialize_number<'a, D, T: Copy + Into + TryFrom>( - d: D, -) -> Result where D: serde::Deserializer<'a> { +pub fn deserialize_number<'a, D, T: Copy + Into + TryFrom>(d: D) -> Result +where + D: serde::Deserializer<'a>, +{ let u256: U256 = serde::Deserialize::deserialize(d)?; TryFrom::try_from(u256).map_err(|_| serde::de::Error::custom("Try from failed")) } -impl Decode for Header where - Number: HasCompact + Copy + Into + TryFrom, - Hash: HashT, - Hash::Output: Decode, -{ - fn decode(input: &mut I) -> Result { - Ok(Header { - parent_hash: Decode::decode(input)?, - number: <::Type>::decode(input)?.into(), - state_root: Decode::decode(input)?, - extrinsics_root: Decode::decode(input)?, - digest: Decode::decode(input)?, - }) - } -} - -impl Encode for Header where - Number: HasCompact + Copy + Into + TryFrom, - Hash: HashT, - Hash::Output: Encode, -{ - fn encode_to(&self, dest: &mut T) { - dest.push(&self.parent_hash); - dest.push(&<<::Type as EncodeAsRef<_>>::RefType>::from(&self.number)); - dest.push(&self.state_root); - dest.push(&self.extrinsics_root); - dest.push(&self.digest); - } -} - -impl codec::EncodeLike for Header where - Number: HasCompact + Copy + Into + TryFrom, - Hash: HashT, - Hash::Output: Encode, -{} - -impl traits::Header for Header where - Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + MaybeDisplay + - AtLeast32BitUnsigned + Codec + Copy + Into + TryFrom + sp_std::str::FromStr + - MaybeMallocSizeOf, +impl traits::Header for Header +where + Number: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + MaybeDisplay + + AtLeast32BitUnsigned + + Codec + + Copy + + Into + + TryFrom + + sp_std::str::FromStr + + MaybeMallocSizeOf, Hash: HashT, - Hash::Output: Default + sp_std::hash::Hash + Copy + Member + Ord + - MaybeSerialize + Debug + MaybeDisplay + SimpleBitOps + Codec + MaybeMallocSizeOf, + Hash::Output: Default + + sp_std::hash::Hash + + Copy + + Member + + Ord + + MaybeSerialize + + Debug + + MaybeDisplay + + SimpleBitOps + + Codec + + MaybeMallocSizeOf, { type Number = Number; type Hash = ::Output; type Hashing = Hash; - fn number(&self) -> &Self::Number { &self.number } - fn set_number(&mut self, num: Self::Number) { self.number = num } + fn number(&self) -> &Self::Number { + &self.number + } + fn set_number(&mut self, num: Self::Number) { + self.number = num + } - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } - fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } + fn extrinsics_root(&self) -> &Self::Hash { + &self.extrinsics_root + } + fn set_extrinsics_root(&mut self, root: Self::Hash) { + self.extrinsics_root = root + } - fn state_root(&self) -> &Self::Hash { &self.state_root } - fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } + fn state_root(&self) -> &Self::Hash { + &self.state_root + } + fn set_state_root(&mut self, root: Self::Hash) { + self.state_root = root + } - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } - fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } + fn parent_hash(&self) -> &Self::Hash { + &self.parent_hash + } + fn set_parent_hash(&mut self, hash: Self::Hash) { + self.parent_hash = hash + } - fn digest(&self) -> &Digest { &self.digest } + fn digest(&self) -> &Digest { + &self.digest + } fn digest_mut(&mut self) -> &mut Digest { #[cfg(feature = "std")] @@ -160,22 +167,24 @@ impl traits::Header for Header where parent_hash: Self::Hash, digest: Digest, ) -> Self { - Header { - number, - extrinsics_root, - state_root, - parent_hash, - digest, - } + Self { number, extrinsics_root, state_root, parent_hash, digest } } } -impl Header where - Number: Member + sp_std::hash::Hash + Copy + MaybeDisplay + AtLeast32BitUnsigned + Codec + - Into + TryFrom, +impl Header +where + Number: Member + + sp_std::hash::Hash + + Copy + + MaybeDisplay + + AtLeast32BitUnsigned + + Codec + + Into + + TryFrom, Hash: HashT, - Hash::Output: Default + sp_std::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, - { + Hash::Output: + Default + sp_std::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, +{ /// Convenience helper for computing the hash of the header without having /// to import the trait. pub fn hash(&self) -> Hash::Output { @@ -186,6 +195,7 @@ impl Header where #[cfg(all(test, feature = "std"))] mod tests { use super::*; + use crate::traits::BlakeTwo256; #[test] fn should_serialize_numbers() { @@ -200,8 +210,8 @@ mod tests { assert_eq!(serialize(0), "\"0x0\"".to_owned()); assert_eq!(serialize(1), "\"0x1\"".to_owned()); - assert_eq!(serialize(u64::max_value() as u128), "\"0xffffffffffffffff\"".to_owned()); - assert_eq!(serialize(u64::max_value() as u128 + 1), "\"0x10000000000000000\"".to_owned()); + assert_eq!(serialize(u64::MAX as u128), "\"0xffffffffffffffff\"".to_owned()); + assert_eq!(serialize(u64::MAX as u128 + 1), "\"0x10000000000000000\"".to_owned()); } #[test] @@ -213,7 +223,68 @@ mod tests { assert_eq!(deserialize("\"0x0\""), 0); assert_eq!(deserialize("\"0x1\""), 1); - assert_eq!(deserialize("\"0xffffffffffffffff\""), u64::max_value() as u128); - assert_eq!(deserialize("\"0x10000000000000000\""), u64::max_value() as u128 + 1); + assert_eq!(deserialize("\"0xffffffffffffffff\""), u64::MAX as u128); + assert_eq!(deserialize("\"0x10000000000000000\""), u64::MAX as u128 + 1); + } + + #[test] + fn ensure_format_is_unchanged() { + let header = Header:: { + parent_hash: BlakeTwo256::hash(b"1"), + number: 2, + state_root: BlakeTwo256::hash(b"3"), + extrinsics_root: BlakeTwo256::hash(b"4"), + digest: crate::generic::Digest { + logs: vec![ + crate::generic::DigestItem::ChangesTrieRoot(BlakeTwo256::hash(b"5")), + crate::generic::DigestItem::Other(b"6".to_vec()), + ], + }, + }; + + let header_encoded = header.encode(); + assert_eq!( + header_encoded, + vec![ + 146, 205, 245, 120, 196, 112, 133, 165, 153, 34, 86, 240, 220, 249, 125, 11, 25, + 241, 241, 201, 222, 77, 95, 227, 12, 58, 206, 97, 145, 182, 229, 219, 8, 88, 19, + 72, 51, 123, 15, 62, 20, 134, 32, 23, 61, 170, 165, 249, 77, 0, 216, 129, 112, 93, + 203, 240, 170, 131, 239, 218, 186, 97, 210, 237, 225, 235, 134, 73, 33, 73, 151, + 87, 78, 32, 196, 100, 56, 138, 23, 36, 32, 210, 84, 3, 104, 43, 187, 184, 12, 73, + 104, 49, 200, 204, 31, 143, 13, 8, 2, 112, 178, 1, 53, 47, 36, 191, 28, 151, 112, + 185, 159, 143, 113, 32, 24, 33, 65, 28, 244, 20, 55, 124, 155, 140, 45, 188, 238, + 97, 219, 135, 214, 0, 4, 54 + ], + ); + assert_eq!(header, Header::::decode(&mut &header_encoded[..]).unwrap()); + + let header = Header:: { + parent_hash: BlakeTwo256::hash(b"1000"), + number: 2000, + state_root: BlakeTwo256::hash(b"3000"), + extrinsics_root: BlakeTwo256::hash(b"4000"), + digest: crate::generic::Digest { + logs: vec![ + crate::generic::DigestItem::Other(b"5000".to_vec()), + crate::generic::DigestItem::ChangesTrieRoot(BlakeTwo256::hash(b"6000")), + ], + }, + }; + + let header_encoded = header.encode(); + assert_eq!( + header_encoded, + vec![ + 197, 243, 254, 225, 31, 117, 21, 218, 179, 213, 92, 6, 247, 164, 230, 25, 47, 166, + 140, 117, 142, 159, 195, 202, 67, 196, 238, 26, 44, 18, 33, 92, 65, 31, 219, 225, + 47, 12, 107, 88, 153, 146, 55, 21, 226, 186, 110, 48, 167, 187, 67, 183, 228, 232, + 118, 136, 30, 254, 11, 87, 48, 112, 7, 97, 31, 82, 146, 110, 96, 87, 152, 68, 98, + 162, 227, 222, 78, 14, 244, 194, 120, 154, 112, 97, 222, 144, 174, 101, 220, 44, + 111, 126, 54, 34, 155, 220, 253, 124, 8, 0, 16, 53, 48, 48, 48, 2, 42, 105, 109, + 150, 206, 223, 24, 44, 164, 77, 27, 137, 177, 220, 25, 170, 140, 35, 156, 246, 233, + 112, 26, 23, 192, 61, 226, 14, 84, 219, 144, 252 + ], + ); + assert_eq!(header, Header::::decode(&mut &header_encoded[..]).unwrap()); } } diff --git a/primitives/runtime/src/generic/mod.rs b/primitives/runtime/src/generic/mod.rs index 2a25c063ead73..71127e88ec32c 100644 --- a/primitives/runtime/src/generic/mod.rs +++ b/primitives/runtime/src/generic/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,45 +19,20 @@ //! Generic implementations of Extrinsic/Header/Block. // end::description[] -mod unchecked_extrinsic; -mod era; -mod checked_extrinsic; -mod header; mod block; +mod checked_extrinsic; mod digest; +mod era; +mod header; #[cfg(test)] mod tests; +mod unchecked_extrinsic; -pub use self::unchecked_extrinsic::{UncheckedExtrinsic, SignedPayload}; -pub use self::era::{Era, Phase}; -pub use self::checked_extrinsic::CheckedExtrinsic; -pub use self::header::Header; -pub use self::block::{Block, SignedBlock, BlockId}; -pub use self::digest::{ - Digest, DigestItem, DigestItemRef, OpaqueDigestItemId, ChangesTrieSignal, +pub use self::{ + block::{Block, BlockId, SignedBlock}, + checked_extrinsic::CheckedExtrinsic, + digest::{ChangesTrieSignal, Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, + era::{Era, Phase}, + header::Header, + unchecked_extrinsic::{SignedPayload, UncheckedExtrinsic}, }; - -use crate::codec::Encode; -use sp_std::prelude::*; - -fn encode_with_vec_prefix)>(encoder: F) -> Vec { - let size = ::sp_std::mem::size_of::(); - let reserve = match size { - 0..=0b00111111 => 1, - 0..=0b00111111_11111111 => 2, - _ => 4, - }; - let mut v = Vec::with_capacity(reserve + size); - v.resize(reserve, 0); - encoder(&mut v); - - // need to prefix with the total length to ensure it's binary compatible with - // Vec. - let mut length: Vec<()> = Vec::new(); - length.resize(v.len() - reserve, ()); - length.using_encoded(|s| { - v.splice(0..reserve, s.iter().cloned()); - }); - - v -} diff --git a/primitives/runtime/src/generic/tests.rs b/primitives/runtime/src/generic/tests.rs index 56138094fa024..095bcb717bb11 100644 --- a/primitives/runtime/src/generic/tests.rs +++ b/primitives/runtime/src/generic/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,27 +17,23 @@ //! Tests for the generic implementations of Extrinsic/Header/Block. +use super::DigestItem; use crate::codec::{Decode, Encode}; use sp_core::H256; -use super::DigestItem; #[test] fn system_digest_item_encoding() { let item = DigestItem::ChangesTrieRoot::(H256::default()); let encoded = item.encode(); - assert_eq!(encoded, vec![ - // type = DigestItemType::ChangesTrieRoot - 2, - // trie root - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - ]); + assert_eq!( + encoded, + vec![ + // type = DigestItemType::ChangesTrieRoot + 2, // trie root + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ] + ); let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); assert_eq!(item, decoded); @@ -47,14 +43,15 @@ fn system_digest_item_encoding() { fn non_system_digest_item_encoding() { let item = DigestItem::Other::(vec![10, 20, 30]); let encoded = item.encode(); - assert_eq!(encoded, vec![ - // type = DigestItemType::Other - 0, - // length of other data - 12, - // authorities - 10, 20, 30, - ]); + assert_eq!( + encoded, + vec![ + // type = DigestItemType::Other + 0, // length of other data + 12, // authorities + 10, 20, 30, + ] + ); let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); assert_eq!(item, decoded); diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index ab9afdb28b602..95f4f2f3584d9 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,18 +17,19 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. -use sp_std::{fmt, prelude::*}; -use sp_io::hashing::blake2_256; -use codec::{Decode, Encode, EncodeLike, Input, Error}; use crate::{ + generic::CheckedExtrinsic, traits::{ - self, Member, MaybeDisplay, SignedExtension, Checkable, Extrinsic, ExtrinsicMetadata, - IdentifyAccount, + self, Checkable, Extrinsic, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, + SignedExtension, }, - generic::CheckedExtrinsic, - transaction_validity::{TransactionValidityError, InvalidTransaction}, + transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, }; +use codec::{Compact, Decode, Encode, EncodeLike, Error, Input}; +use scale_info::{build::Fields, meta_type, Path, StaticTypeInfo, Type, TypeInfo, TypeParameter}; +use sp_io::hashing::blake2_256; +use sp_std::{fmt, prelude::*}; /// Current version of the [`UncheckedExtrinsic`] format. const EXTRINSIC_VERSION: u8 = 4; @@ -38,7 +39,7 @@ const EXTRINSIC_VERSION: u8 = 4; #[derive(PartialEq, Eq, Clone)] pub struct UncheckedExtrinsic where - Extra: SignedExtension + Extra: SignedExtension, { /// The signature, address, number of extrinsics have come before from /// the same signer and an era describing the longevity of this transaction, @@ -48,11 +49,45 @@ where pub function: Call, } +/// Manual [`TypeInfo`] implementation because of custom encoding. The data is a valid encoded +/// `Vec`, but requires some logic to extract the signature and payload. +/// +/// See [`UncheckedExtrinsic::encode`] and [`UncheckedExtrinsic::decode`]. +impl TypeInfo + for UncheckedExtrinsic +where + Address: StaticTypeInfo, + Call: StaticTypeInfo, + Signature: StaticTypeInfo, + Extra: SignedExtension + StaticTypeInfo, +{ + type Identity = UncheckedExtrinsic; + + fn type_info() -> Type { + Type::builder() + .path(Path::new("UncheckedExtrinsic", module_path!())) + // Include the type parameter types, even though they are not used directly in any of + // the described fields. These type definitions can be used by downstream consumers + // to help construct the custom decoding from the opaque bytes (see below). + .type_params(vec![ + TypeParameter::new("Address", Some(meta_type::

())), + TypeParameter::new("Call", Some(meta_type::())), + TypeParameter::new("Signature", Some(meta_type::())), + TypeParameter::new("Extra", Some(meta_type::())), + ]) + .docs(&["UncheckedExtrinsic raw bytes, requires custom decoding routine"]) + // Because of the custom encoding, we can only accurately describe the encoding as an + // opaque `Vec`. Downstream consumers will need to manually implement the codec to + // encode/decode the `signature` and `function` fields. + .composite(Fields::unnamed().field(|f| f.ty::>())) + } +} + #[cfg(feature = "std")] impl parity_util_mem::MallocSizeOf for UncheckedExtrinsic where - Extra: SignedExtension + Extra: SignedExtension, { fn size_of(&self, _ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { // Instantiated only in runtime. @@ -64,24 +99,13 @@ impl UncheckedExtrinsic { /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed( - function: Call, - signed: Address, - signature: Signature, - extra: Extra - ) -> Self { - UncheckedExtrinsic { - signature: Some((signed, signature, extra)), - function, - } + pub fn new_signed(function: Call, signed: Address, signature: Signature, extra: Extra) -> Self { + Self { signature: Some((signed, signature, extra)), function } } /// New instance of an unsigned extrinsic aka "inherent". pub fn new_unsigned(function: Call) -> Self { - UncheckedExtrinsic { - signature: None, - function, - } + Self { signature: None, function } } } @@ -90,11 +114,7 @@ impl Extrinsic { type Call = Call; - type SignaturePayload = ( - Address, - Signature, - Extra, - ); + type SignaturePayload = (Address, Signature, Extra); fn is_signed(&self) -> Option { Some(self.signature.is_some()) @@ -102,25 +122,23 @@ impl Extrinsic fn new(function: Call, signed_data: Option) -> Option { Some(if let Some((address, signature, extra)) = signed_data { - UncheckedExtrinsic::new_signed(function, address, signature, extra) + Self::new_signed(function, address, signature, extra) } else { - UncheckedExtrinsic::new_unsigned(function) + Self::new_unsigned(function) }) } } -impl - Checkable -for - UncheckedExtrinsic +impl Checkable + for UncheckedExtrinsic where Address: Member + MaybeDisplay, Call: Encode + Member, Signature: Member + traits::Verify, - ::Signer: IdentifyAccount, - Extra: SignedExtension, + ::Signer: IdentifyAccount, + Extra: SignedExtension, AccountId: Member + MaybeDisplay, - Lookup: traits::Lookup, + Lookup: traits::Lookup, { type Checked = CheckedExtrinsic; @@ -134,23 +152,17 @@ where } let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { - signed: Some((signed, extra)), - function, - } - } - None => CheckedExtrinsic { - signed: None, - function: self.function, + CheckedExtrinsic { signed: Some((signed, extra)), function } }, + None => CheckedExtrinsic { signed: None, function: self.function }, }) } } impl ExtrinsicMetadata for UncheckedExtrinsic - where - Extra: SignedExtension, +where + Extra: SignedExtension, { const VERSION: u8 = EXTRINSIC_VERSION; type SignedExtensions = Extra; @@ -161,13 +173,10 @@ impl ExtrinsicMetadata /// Note that the payload that we sign to produce unchecked extrinsic signature /// is going to be different than the `SignaturePayload` - so the thing the extrinsic /// actually contains. -pub struct SignedPayload(( - Call, - Extra, - Extra::AdditionalSigned, -)); +pub struct SignedPayload((Call, Extra, Extra::AdditionalSigned)); -impl SignedPayload where +impl SignedPayload +where Call: Encode, Extra: SignedExtension, { @@ -191,7 +200,8 @@ impl SignedPayload where } } -impl Encode for SignedPayload where +impl Encode for SignedPayload +where Call: Encode, Extra: SignedExtension, { @@ -213,10 +223,10 @@ impl EncodeLike for SignedPayload where Call: Encode, Extra: SignedExtension, -{} +{ +} -impl Decode - for UncheckedExtrinsic +impl Decode for UncheckedExtrinsic where Address: Decode, Signature: Decode, @@ -228,25 +238,24 @@ where // with substrate's generic `Vec` type. Basically this just means accepting that there // will be a prefix of vector length (we don't need // to use this). - let _length_do_not_remove_me_see_above: Vec<()> = Decode::decode(input)?; + let _length_do_not_remove_me_see_above: Compact = Decode::decode(input)?; let version = input.read_byte()?; let is_signed = version & 0b1000_0000 != 0; let version = version & 0b0111_1111; if version != EXTRINSIC_VERSION { - return Err("Invalid transaction version".into()); + return Err("Invalid transaction version".into()) } - Ok(UncheckedExtrinsic { + Ok(Self { signature: if is_signed { Some(Decode::decode(input)?) } else { None }, function: Decode::decode(input)?, }) } } -impl Encode - for UncheckedExtrinsic +impl Encode for UncheckedExtrinsic where Address: Encode, Signature: Encode, @@ -254,19 +263,29 @@ where Extra: SignedExtension, { fn encode(&self) -> Vec { - super::encode_with_vec_prefix::(|v| { - // 1 byte version id. - match self.signature.as_ref() { - Some(s) => { - v.push(EXTRINSIC_VERSION | 0b1000_0000); - s.encode_to(v); - } - None => { - v.push(EXTRINSIC_VERSION & 0b0111_1111); - } - } - self.function.encode_to(v); - }) + let mut tmp = Vec::with_capacity(sp_std::mem::size_of::()); + + // 1 byte version id. + match self.signature.as_ref() { + Some(s) => { + tmp.push(EXTRINSIC_VERSION | 0b1000_0000); + s.encode_to(&mut tmp); + }, + None => { + tmp.push(EXTRINSIC_VERSION & 0b0111_1111); + }, + } + self.function.encode_to(&mut tmp); + + let compact_len = codec::Compact::(tmp.len() as u32); + + // Allocate the output buffer with the correct length + let mut output = Vec::with_capacity(compact_len.size_hint() + tmp.len()); + + compact_len.encode_to(&mut output); + output.extend(tmp); + + output } } @@ -277,22 +296,27 @@ where Signature: Encode, Call: Encode, Extra: SignedExtension, -{} +{ +} #[cfg(feature = "std")] impl serde::Serialize for UncheckedExtrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } #[cfg(feature = "std")] -impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> serde::Deserialize<'a> - for UncheckedExtrinsic +impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> + serde::Deserialize<'a> for UncheckedExtrinsic { - fn deserialize(de: D) -> Result where + fn deserialize(de: D) -> Result + where D: serde::Deserializer<'a>, { let r = sp_core::bytes::deserialize(de)?; @@ -327,21 +351,22 @@ where Extra: SignedExtension, { fn from(extrinsic: UncheckedExtrinsic) -> Self { - OpaqueExtrinsic::from_bytes(extrinsic.encode().as_slice()) - .expect( - "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ - raw Vec encoding; qed" - ) + Self::from_bytes(extrinsic.encode().as_slice()).expect( + "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ + raw Vec encoding; qed", + ) } } #[cfg(test)] mod tests { use super::*; + use crate::{ + codec::{Decode, Encode}, + testing::TestSignature as TestSig, + traits::{IdentityLookup, SignedExtension}, + }; use sp_io::hashing::blake2_256; - use crate::codec::{Encode, Decode}; - use crate::traits::{SignedExtension, IdentityLookup}; - use crate::testing::TestSignature as TestSig; type TestContext = IdentityLookup; type TestAccountId = u64; @@ -350,7 +375,7 @@ mod tests { const TEST_ACCOUNT: TestAccountId = 0; // NOTE: this is demonstration. One can simply use `()` for testing. - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Ord, PartialOrd)] + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Ord, PartialOrd, TypeInfo)] struct TestExtra; impl SignedExtension for TestExtra { const IDENTIFIER: &'static str = "TestExtra"; @@ -359,7 +384,9 @@ mod tests { type AdditionalSigned = (); type Pre = (); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } } type Ex = UncheckedExtrinsic; @@ -378,7 +405,7 @@ mod tests { vec![0u8; 0], TEST_ACCOUNT, TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra + TestExtra, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -389,9 +416,11 @@ mod tests { let ux = Ex::new_signed( vec![0u8; 0], TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 257], TestExtra) - .using_encoded(blake2_256)[..].to_owned()), - TestExtra + TestSig( + TEST_ACCOUNT, + (vec![0u8; 257], TestExtra).using_encoded(blake2_256)[..].to_owned(), + ), + TestExtra, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -452,4 +481,13 @@ mod tests { let opaque_encoded = opaque.encode(); assert_eq!(opaque_encoded, encoded); } + + #[test] + fn large_bad_prefix_should_work() { + let encoded = Compact::::from(u32::MAX).encode(); + assert_eq!( + Ex::decode(&mut &encoded[..]), + Err(Error::from("Not enough data to fill buffer")) + ); + } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 47081e9115c3a..80293fe734844 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,13 +19,15 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] - // to allow benchmarking #![cfg_attr(feature = "bench", feature(test))] -#[cfg(feature = "bench")] extern crate test; +#[cfg(feature = "bench")] +extern crate test; #[doc(hidden)] pub use codec; +#[doc(hidden)] +pub use scale_info; #[cfg(feature = "std")] #[doc(hidden)] pub use serde; @@ -41,46 +43,56 @@ pub use sp_application_crypto as app_crypto; #[cfg(feature = "std")] pub use sp_core::storage::{Storage, StorageChild}; -use sp_std::prelude::*; -use sp_std::convert::TryFrom; -use sp_core::{crypto::{self, Public}, ed25519, sr25519, ecdsa, hash::{H256, H512}}; +use sp_core::{ + crypto::{self, Public}, + ecdsa, ed25519, + hash::{H256, H512}, + sr25519, +}; +use sp_std::{convert::TryFrom, prelude::*}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; pub mod curve; pub mod generic; +mod multiaddress; pub mod offchain; +pub mod runtime_logger; +mod runtime_string; #[cfg(feature = "std")] pub mod testing; pub mod traits; pub mod transaction_validity; -pub mod random_number_generator; -mod runtime_string; pub use crate::runtime_string::*; +// Re-export Multiaddress +pub use multiaddress::MultiAddress; + /// Re-export these since they're only "kind of" generic. -pub use generic::{DigestItem, Digest}; +pub use generic::{Digest, DigestItem}; +pub use sp_application_crypto::{BoundToRuntimeAppPublic, RuntimeAppPublic}; /// Re-export this since it's part of the API of this crate. -pub use sp_core::{TypeId, crypto::{key_types, KeyTypeId, CryptoType, CryptoTypeId, AccountId32}}; -pub use sp_application_crypto::{RuntimeAppPublic, BoundToRuntimeAppPublic}; +pub use sp_core::{ + crypto::{key_types, AccountId32, CryptoType, CryptoTypeId, KeyTypeId}, + TypeId, +}; /// Re-export `RuntimeDebug`, to avoid dependency clutter. pub use sp_core::RuntimeDebug; +/// Re-export big_uint stuff. +pub use sp_arithmetic::biguint; +/// Re-export 128 bit helpers. +pub use sp_arithmetic::helpers_128bit; /// Re-export top-level arithmetic stuff. pub use sp_arithmetic::{ - PerThing, Perquintill, Perbill, Permill, Percent, PerU16, InnerOf, UpperOf, - Rational128, FixedI64, FixedI128, FixedU128, FixedPointNumber, FixedPointOperand, - traits::SaturatedConversion, + traits::SaturatedConversion, FixedI128, FixedI64, FixedPointNumber, FixedPointOperand, + FixedU128, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, Rational128, + UpperOf, }; -/// Re-export 128 bit helpers. -pub use sp_arithmetic::helpers_128bit; -/// Re-export big_uint stuff. -pub use sp_arithmetic::biguint; - -pub use random_number_generator::RandomNumberGenerator; pub use either::Either; @@ -91,21 +103,71 @@ pub use either::Either; /// the block itself would allow swapping justifications to change the block's hash /// (and thus fork the chain). Sending a `Justification` alongside a block instead /// bypasses this problem. -pub type Justification = Vec; +/// +/// Each justification is provided as an encoded blob, and is tagged with an ID +/// to identify the consensus engine that generated the proof (we might have +/// multiple justifications from different engines for the same block). +pub type Justification = (ConsensusEngineId, EncodedJustification); + +/// The encoded justification specific to a consensus engine. +pub type EncodedJustification = Vec; + +/// Collection of justifications for a given block, multiple justifications may +/// be provided by different consensus engines for the same block. +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +pub struct Justifications(Vec); + +impl Justifications { + /// Return an iterator over the justifications. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } -use traits::{Verify, Lazy}; + /// Append a justification. Returns false if a justification with the same + /// `ConsensusEngineId` already exists, in which case the justification is + /// not inserted. + pub fn append(&mut self, justification: Justification) -> bool { + if self.get(justification.0).is_some() { + return false + } + self.0.push(justification); + true + } -/// A module identifier. These are per module and should be stored in a registry somewhere. -#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] -pub struct ModuleId(pub [u8; 8]); + /// Return the encoded justification for the given consensus engine, if it + /// exists. + pub fn get(&self, engine_id: ConsensusEngineId) -> Option<&EncodedJustification> { + self.iter().find(|j| j.0 == engine_id).map(|j| &j.1) + } -impl TypeId for ModuleId { - const TYPE_ID: [u8; 4] = *b"modl"; + /// Return a copy of the encoded justification for the given consensus + /// engine, if it exists. + pub fn into_justification(self, engine_id: ConsensusEngineId) -> Option { + self.into_iter().find(|j| j.0 == engine_id).map(|j| j.1) + } } -#[cfg(feature = "std")] -pub use serde::{Serialize, Deserialize, de::DeserializeOwned}; +impl IntoIterator for Justifications { + type Item = Justification; + type IntoIter = sp_std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl From for Justifications { + fn from(justification: Justification) -> Self { + Self(vec![justification]) + } +} + +use traits::{Lazy, Verify}; + use crate::traits::IdentifyAccount; +#[cfg(feature = "std")] +pub use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// Complex storage builder stuff. #[cfg(feature = "std")] @@ -117,10 +179,7 @@ pub trait BuildStorage { Ok(storage) } /// Assimilate the storage for this module into pre-existing overlays. - fn assimilate_storage( - &self, - storage: &mut sp_core::storage::Storage, - ) -> Result<(), String>; + fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String>; } /// Something that can build the genesis storage of a module. @@ -135,17 +194,14 @@ pub trait BuildModuleGenesisStorage: Sized { #[cfg(feature = "std")] impl BuildStorage for sp_core::storage::Storage { - fn assimilate_storage( - &self, - storage: &mut sp_core::storage::Storage, - )-> Result<(), String> { + fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String> { storage.top.extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone()))); for (k, other_map) in self.children_default.iter() { let k = k.clone(); if let Some(map) = storage.children_default.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); if !map.child_info.try_update(&other_map.child_info) { - return Err("Incompatible child info update".to_string()); + return Err("Incompatible child info update".to_string()) } } else { storage.children_default.insert(k, other_map.clone()); @@ -157,10 +213,7 @@ impl BuildStorage for sp_core::storage::Storage { #[cfg(feature = "std")] impl BuildStorage for () { - fn assimilate_storage( - &self, - _: &mut sp_core::storage::Storage, - ) -> Result<(), String> { + fn assimilate_storage(&self, _: &mut sp_core::storage::Storage) -> Result<(), String> { Err("`assimilate_storage` not implemented for `()`".into()) } } @@ -170,7 +223,7 @@ pub type ConsensusEngineId = [u8; 4]; /// Signature verify that can work with any known signature types.. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[derive(Eq, PartialEq, Clone, Encode, Decode, RuntimeDebug)] +#[derive(Eq, PartialEq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum MultiSignature { /// An Ed25519 signature. Ed25519(ed25519::Signature), @@ -182,51 +235,63 @@ pub enum MultiSignature { impl From for MultiSignature { fn from(x: ed25519::Signature) -> Self { - MultiSignature::Ed25519(x) + Self::Ed25519(x) } } impl TryFrom for ed25519::Signature { type Error = (); fn try_from(m: MultiSignature) -> Result { - if let MultiSignature::Ed25519(x) = m { Ok(x) } else { Err(()) } + if let MultiSignature::Ed25519(x) = m { + Ok(x) + } else { + Err(()) + } } } impl From for MultiSignature { fn from(x: sr25519::Signature) -> Self { - MultiSignature::Sr25519(x) + Self::Sr25519(x) } } impl TryFrom for sr25519::Signature { type Error = (); fn try_from(m: MultiSignature) -> Result { - if let MultiSignature::Sr25519(x) = m { Ok(x) } else { Err(()) } + if let MultiSignature::Sr25519(x) = m { + Ok(x) + } else { + Err(()) + } } } impl From for MultiSignature { fn from(x: ecdsa::Signature) -> Self { - MultiSignature::Ecdsa(x) + Self::Ecdsa(x) } } impl TryFrom for ecdsa::Signature { type Error = (); fn try_from(m: MultiSignature) -> Result { - if let MultiSignature::Ecdsa(x) = m { Ok(x) } else { Err(()) } + if let MultiSignature::Ecdsa(x) = m { + Ok(x) + } else { + Err(()) + } } } impl Default for MultiSignature { fn default() -> Self { - MultiSignature::Ed25519(Default::default()) + Self::Ed25519(Default::default()) } } /// Public key for any known crypto algorithm. -#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Encode, Decode, RuntimeDebug)] +#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum MultiSigner { /// An Ed25519 identity. @@ -239,7 +304,7 @@ pub enum MultiSigner { impl Default for MultiSigner { fn default() -> Self { - MultiSigner::Ed25519(Default::default()) + Self::Ed25519(Default::default()) } } @@ -254,9 +319,9 @@ impl> crypto::UncheckedFrom for MultiSigner { impl AsRef<[u8]> for MultiSigner { fn as_ref(&self) -> &[u8] { match *self { - MultiSigner::Ed25519(ref who) => who.as_ref(), - MultiSigner::Sr25519(ref who) => who.as_ref(), - MultiSigner::Ecdsa(ref who) => who.as_ref(), + Self::Ed25519(ref who) => who.as_ref(), + Self::Sr25519(ref who) => who.as_ref(), + Self::Ecdsa(ref who) => who.as_ref(), } } } @@ -265,49 +330,61 @@ impl traits::IdentifyAccount for MultiSigner { type AccountId = AccountId32; fn into_account(self) -> AccountId32 { match self { - MultiSigner::Ed25519(who) => <[u8; 32]>::from(who).into(), - MultiSigner::Sr25519(who) => <[u8; 32]>::from(who).into(), - MultiSigner::Ecdsa(who) => sp_io::hashing::blake2_256(&who.as_ref()[..]).into(), + Self::Ed25519(who) => <[u8; 32]>::from(who).into(), + Self::Sr25519(who) => <[u8; 32]>::from(who).into(), + Self::Ecdsa(who) => sp_io::hashing::blake2_256(who.as_ref()).into(), } } } impl From for MultiSigner { fn from(x: ed25519::Public) -> Self { - MultiSigner::Ed25519(x) + Self::Ed25519(x) } } impl TryFrom for ed25519::Public { type Error = (); fn try_from(m: MultiSigner) -> Result { - if let MultiSigner::Ed25519(x) = m { Ok(x) } else { Err(()) } + if let MultiSigner::Ed25519(x) = m { + Ok(x) + } else { + Err(()) + } } } impl From for MultiSigner { fn from(x: sr25519::Public) -> Self { - MultiSigner::Sr25519(x) + Self::Sr25519(x) } } impl TryFrom for sr25519::Public { type Error = (); fn try_from(m: MultiSigner) -> Result { - if let MultiSigner::Sr25519(x) = m { Ok(x) } else { Err(()) } + if let MultiSigner::Sr25519(x) = m { + Ok(x) + } else { + Err(()) + } } } impl From for MultiSigner { fn from(x: ecdsa::Public) -> Self { - MultiSigner::Ecdsa(x) + Self::Ecdsa(x) } } impl TryFrom for ecdsa::Public { type Error = (); fn try_from(m: MultiSigner) -> Result { - if let MultiSigner::Ecdsa(x) = m { Ok(x) } else { Err(()) } + if let MultiSigner::Ecdsa(x) = m { + Ok(x) + } else { + Err(()) + } } } @@ -315,9 +392,9 @@ impl TryFrom for ecdsa::Public { impl std::fmt::Display for MultiSigner { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { match *self { - MultiSigner::Ed25519(ref who) => write!(fmt, "ed25519: {}", who), - MultiSigner::Sr25519(ref who) => write!(fmt, "sr25519: {}", who), - MultiSigner::Ecdsa(ref who) => write!(fmt, "ecdsa: {}", who), + Self::Ed25519(ref who) => write!(fmt, "ed25519: {}", who), + Self::Sr25519(ref who) => write!(fmt, "sr25519: {}", who), + Self::Ecdsa(ref who) => write!(fmt, "ecdsa: {}", who), } } } @@ -326,17 +403,19 @@ impl Verify for MultiSignature { type Signer = MultiSigner; fn verify>(&self, mut msg: L, signer: &AccountId32) -> bool { match (self, signer) { - (MultiSignature::Ed25519(ref sig), who) => sig.verify(msg, &ed25519::Public::from_slice(who.as_ref())), - (MultiSignature::Sr25519(ref sig), who) => sig.verify(msg, &sr25519::Public::from_slice(who.as_ref())), - (MultiSignature::Ecdsa(ref sig), who) => { + (Self::Ed25519(ref sig), who) => + sig.verify(msg, &ed25519::Public::from_slice(who.as_ref())), + (Self::Sr25519(ref sig), who) => + sig.verify(msg, &sr25519::Public::from_slice(who.as_ref())), + (Self::Ecdsa(ref sig), who) => { let m = sp_io::hashing::blake2_256(msg.get()); match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { Ok(pubkey) => - &sp_io::hashing::blake2_256(pubkey.as_ref()) - == >::as_ref(who), + &sp_io::hashing::blake2_256(pubkey.as_ref()) == + >::as_ref(who), _ => false, } - } + }, } } } @@ -352,22 +431,22 @@ impl Verify for AnySignature { let msg = msg.get(); sr25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) .map(|s| s.verify(msg, signer)) - .unwrap_or(false) - || ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) - .map(|s| s.verify(msg, &ed25519::Public::from_slice(signer.as_ref()))) - .unwrap_or(false) + .unwrap_or(false) || + ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) + .map(|s| s.verify(msg, &ed25519::Public::from_slice(signer.as_ref()))) + .unwrap_or(false) } } impl From for AnySignature { fn from(s: sr25519::Signature) -> Self { - AnySignature(s.into()) + Self(s.into()) } } impl From for AnySignature { fn from(s: ed25519::Signature) -> Self { - AnySignature(s.into()) + Self(s.into()) } } @@ -387,11 +466,15 @@ pub type DispatchResult = sp_std::result::Result<(), DispatchError>; pub type DispatchResultWithInfo = sp_std::result::Result>; /// Reason why a dispatch call failed. -#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize))] +#[derive(Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum DispatchError { /// Some error occurred. - Other(#[codec(skip)] &'static str), + Other( + #[codec(skip)] + #[cfg_attr(feature = "std", serde(skip_deserializing))] + &'static str, + ), /// Failed to lookup some data. CannotLookup, /// A bad origin. @@ -404,15 +487,25 @@ pub enum DispatchError { error: u8, /// Optional error message. #[codec(skip)] + #[cfg_attr(feature = "std", serde(skip_deserializing))] message: Option<&'static str>, }, + /// At least one consumer is remaining so the account cannot be destroyed. + ConsumerRemaining, + /// There are no providers so the account cannot be created. + NoProviders, + /// An error to do with tokens. + Token(TokenError), + /// An arithmetic error. + Arithmetic(ArithmeticError), } /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information about /// the `Dispatchable` that is only known post dispatch. #[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, RuntimeDebug)] -pub struct DispatchErrorWithPostInfo where - Info: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +pub struct DispatchErrorWithPostInfo +where + Info: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, { /// Additional information about the `Dispatchable` which is only known post dispatch. pub post_info: Info, @@ -424,22 +517,20 @@ impl DispatchError { /// Return the same error but without the attached message. pub fn stripped(self) -> Self { match self { - DispatchError::Module { index, error, message: Some(_) } - => DispatchError::Module { index, error, message: None }, + DispatchError::Module { index, error, message: Some(_) } => + DispatchError::Module { index, error, message: None }, m => m, } } } -impl From for DispatchErrorWithPostInfo where +impl From for DispatchErrorWithPostInfo +where T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable + Default, - E: Into + E: Into, { fn from(error: E) -> Self { - Self { - post_info: Default::default(), - error: error.into(), - } + Self { post_info: Default::default(), error: error.into() } } } @@ -455,9 +546,77 @@ impl From for DispatchError { } } +/// Description of what went wrong when trying to complete an operation on a token. +#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub enum TokenError { + /// Funds are unavailable. + NoFunds, + /// Account that must exist would die. + WouldDie, + /// Account cannot exist with the funds that would be given. + BelowMinimum, + /// Account cannot be created. + CannotCreate, + /// The asset in question is unknown. + UnknownAsset, + /// Funds exist but are frozen. + Frozen, + /// Operation is not supported by the asset. + Unsupported, +} + +impl From for &'static str { + fn from(e: TokenError) -> &'static str { + match e { + TokenError::NoFunds => "Funds are unavailable", + TokenError::WouldDie => "Account that must exist would die", + TokenError::BelowMinimum => "Account cannot exist with the funds that would be given", + TokenError::CannotCreate => "Account cannot be created", + TokenError::UnknownAsset => "The asset in question is unknown", + TokenError::Frozen => "Funds exist but are frozen", + TokenError::Unsupported => "Operation is not supported by the asset", + } + } +} + +impl From for DispatchError { + fn from(e: TokenError) -> DispatchError { + Self::Token(e) + } +} + +/// Arithmetic errors. +#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub enum ArithmeticError { + /// Underflow. + Underflow, + /// Overflow. + Overflow, + /// Division by zero. + DivisionByZero, +} + +impl From for &'static str { + fn from(e: ArithmeticError) -> &'static str { + match e { + ArithmeticError::Underflow => "An underflow would occur", + ArithmeticError::Overflow => "An overflow would occur", + ArithmeticError::DivisionByZero => "Division by zero", + } + } +} + +impl From for DispatchError { + fn from(e: ArithmeticError) -> DispatchError { + Self::Arithmetic(e) + } +} + impl From<&'static str> for DispatchError { fn from(err: &'static str) -> DispatchError { - DispatchError::Other(err) + Self::Other(err) } } @@ -465,15 +624,20 @@ impl From for &'static str { fn from(err: DispatchError) -> &'static str { match err { DispatchError::Other(msg) => msg, - DispatchError::CannotLookup => "Can not lookup", + DispatchError::CannotLookup => "Cannot lookup", DispatchError::BadOrigin => "Bad origin", DispatchError::Module { message, .. } => message.unwrap_or("Unknown module error"), + DispatchError::ConsumerRemaining => "Consumer remaining", + DispatchError::NoProviders => "No providers", + DispatchError::Token(e) => e.into(), + DispatchError::Arithmetic(e) => e.into(), } } } -impl From> for &'static str where - T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +impl From> for &'static str +where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, { fn from(err: DispatchErrorWithPostInfo) -> &'static str { err.error.into() @@ -485,7 +649,7 @@ impl traits::Printable for DispatchError { "DispatchError".print(); match self { Self::Other(err) => err.print(), - Self::CannotLookup => "Can not lookup".print(), + Self::CannotLookup => "Cannot lookup".print(), Self::BadOrigin => "Bad origin".print(), Self::Module { index, error, message } => { index.print(); @@ -493,13 +657,24 @@ impl traits::Printable for DispatchError { if let Some(msg) = message { msg.print(); } - } + }, + Self::ConsumerRemaining => "Consumer remaining".print(), + Self::NoProviders => "No providers".print(), + Self::Token(e) => { + "Token error: ".print(); + <&'static str>::from(*e).print(); + }, + Self::Arithmetic(e) => { + "Arithmetic error: ".print(); + <&'static str>::from(*e).print(); + }, } } } -impl traits::Printable for DispatchErrorWithPostInfo where - T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +impl traits::Printable for DispatchErrorWithPostInfo +where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, { fn print(&self) { self.error.print(); @@ -508,6 +683,30 @@ impl traits::Printable for DispatchErrorWithPostInfo where } } +impl PartialEq for DispatchError { + fn eq(&self, other: &Self) -> bool { + use DispatchError::*; + + match (self, other) { + (CannotLookup, CannotLookup) | + (BadOrigin, BadOrigin) | + (ConsumerRemaining, ConsumerRemaining) | + (NoProviders, NoProviders) => true, + + (Token(l), Token(r)) => l == r, + (Other(l), Other(r)) => l == r, + (Arithmetic(l), Arithmetic(r)) => l == r, + + ( + Module { index: index_l, error: error_l, .. }, + Module { index: index_r, error: error_r, .. }, + ) => (index_l == index_r) && (error_l == error_r), + + _ => false, + } + } +} + /// This type specifies the outcome of dispatching a call to a module. /// /// In case of failure an error specific to the module is returned. @@ -532,12 +731,13 @@ pub type DispatchOutcome = Result<(), DispatchError>; /// /// Examples of reasons preventing inclusion in a block: /// - More block weight is required to process the extrinsic than is left in the block being built. -/// This doesn't necessarily mean that the extrinsic is invalid, since it can still be -/// included in the next block if it has enough spare weight available. -/// - The sender doesn't have enough funds to pay the transaction inclusion fee. Including such -/// a transaction in the block doesn't make sense. +/// This doesn't necessarily mean that the extrinsic is invalid, since it can still be included in +/// the next block if it has enough spare weight available. +/// - The sender doesn't have enough funds to pay the transaction inclusion fee. Including such a +/// transaction in the block doesn't make sense. /// - The extrinsic supplied a bad signature. This transaction won't become valid ever. -pub type ApplyExtrinsicResult = Result; +pub type ApplyExtrinsicResult = + Result; /// Same as `ApplyExtrinsicResult` but augmented with `PostDispatchInfo` on success. pub type ApplyExtrinsicResultWithInfo = @@ -548,7 +748,7 @@ pub type ApplyExtrinsicResultWithInfo = pub fn verify_encoded_lazy( sig: &V, item: &T, - signer: &::AccountId + signer: &::AccountId, ) -> bool { // The `Lazy` trait expresses something like `X: FnMut &'a T>`. // unfortunately this is a lifetime relationship that can't @@ -565,120 +765,7 @@ pub fn verify_encoded_lazy( } } - sig.verify( - LazyEncode { inner: || item.encode(), encoded: None }, - signer, - ) -} - -/// Helper macro for `impl_outer_config` -#[macro_export] -macro_rules! __impl_outer_config_types { - // Generic + Instance - ( - $concrete:ident $config:ident $snake:ident { $instance:ident } < $ignore:ident >; - $( $rest:tt )* - ) => { - #[cfg(any(feature = "std", test))] - pub type $config = $snake::GenesisConfig<$concrete, $snake::$instance>; - $crate::__impl_outer_config_types! { $concrete $( $rest )* } - }; - // Generic - ( - $concrete:ident $config:ident $snake:ident < $ignore:ident >; - $( $rest:tt )* - ) => { - #[cfg(any(feature = "std", test))] - pub type $config = $snake::GenesisConfig<$concrete>; - $crate::__impl_outer_config_types! { $concrete $( $rest )* } - }; - // No Generic and maybe Instance - ( - $concrete:ident $config:ident $snake:ident $( { $instance:ident } )?; - $( $rest:tt )* - ) => { - #[cfg(any(feature = "std", test))] - pub type $config = $snake::GenesisConfig; - $crate::__impl_outer_config_types! { $concrete $( $rest )* } - }; - ($concrete:ident) => () -} - -/// Implement the output "meta" module configuration struct, -/// which is basically: -/// pub struct GenesisConfig { -/// rust_module_one: Option, -/// ... -/// } -#[macro_export] -macro_rules! impl_outer_config { - ( - pub struct $main:ident for $concrete:ident { - $( $config:ident => - $snake:ident $( $instance:ident )? $( <$generic:ident> )*, )* - } - ) => { - $crate::__impl_outer_config_types! { - $concrete $( $config $snake $( { $instance } )? $( <$generic> )*; )* - } - - $crate::paste::item! { - #[cfg(any(feature = "std", test))] - #[derive($crate::serde::Serialize, $crate::serde::Deserialize)] - #[serde(rename_all = "camelCase")] - #[serde(deny_unknown_fields)] - pub struct $main { - $( - pub [< $snake $(_ $instance )? >]: Option<$config>, - )* - } - #[cfg(any(feature = "std", test))] - impl $crate::BuildStorage for $main { - fn assimilate_storage( - &self, - storage: &mut $crate::Storage, - ) -> std::result::Result<(), String> { - $( - if let Some(ref extra) = self.[< $snake $(_ $instance )? >] { - $crate::impl_outer_config! { - @CALL_FN - $concrete; - $snake; - $( $instance )?; - extra; - storage; - } - } - )* - Ok(()) - } - } - } - }; - (@CALL_FN - $runtime:ident; - $module:ident; - $instance:ident; - $extra:ident; - $storage:ident; - ) => { - $crate::BuildModuleGenesisStorage::<$runtime, $module::$instance>::build_module_genesis_storage( - $extra, - $storage, - )?; - }; - (@CALL_FN - $runtime:ident; - $module:ident; - ; - $extra:ident; - $storage:ident; - ) => { - $crate::BuildModuleGenesisStorage::<$runtime, $module::__InherentHiddenInstance>::build_module_genesis_storage( - $extra, - $storage, - )?; - } + sig.verify(LazyEncode { inner: || item.encode(), encoded: None }, signer) } /// Checks that `$x` is equal to `$y` with an error rate of `$error`. @@ -720,7 +807,7 @@ pub struct OpaqueExtrinsic(Vec); impl OpaqueExtrinsic { /// Convert an encoded extrinsic to an `OpaqueExtrinsic`. pub fn from_bytes(mut bytes: &[u8]) -> Result { - OpaqueExtrinsic::decode(&mut bytes) + Self::decode(&mut bytes) } } @@ -743,17 +830,22 @@ impl sp_std::fmt::Debug for OpaqueExtrinsic { } } - #[cfg(feature = "std")] impl ::serde::Serialize for OpaqueExtrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { codec::Encode::using_encoded(&self.0, |bytes| ::sp_core::bytes::serialize(bytes, seq)) } } #[cfg(feature = "std")] impl<'a> ::serde::Deserialize<'a> for OpaqueExtrinsic { - fn deserialize(de: D) -> Result where D: ::serde::Deserializer<'a> { + fn deserialize(de: D) -> Result + where + D: ::serde::Deserializer<'a>, + { let r = ::sp_core::bytes::deserialize(de)?; Decode::decode(&mut &r[..]) .map_err(|e| ::serde::de::Error::custom(format!("Decode error: {}", e))) @@ -770,7 +862,6 @@ pub fn print(print: impl traits::Printable) { print.print(); } - /// Batching session. /// /// To be used in runtime only. Outside of runtime, just construct @@ -826,7 +917,7 @@ impl TransactionOutcome { #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Decode}; + use codec::{Decode, Encode}; use sp_core::crypto::Pair; #[test] @@ -837,21 +928,51 @@ mod tests { #[test] fn dispatch_error_encoding() { - let error = DispatchError::Module { - index: 1, - error: 2, - message: Some("error message"), - }; + let error = DispatchError::Module { index: 1, error: 2, message: Some("error message") }; let encoded = error.encode(); let decoded = DispatchError::decode(&mut &encoded[..]).unwrap(); assert_eq!(encoded, vec![3, 1, 2]); + assert_eq!(decoded, DispatchError::Module { index: 1, error: 2, message: None }); + } + + #[test] + fn dispatch_error_equality() { + use DispatchError::*; + + let variants = vec![ + Other("foo"), + Other("bar"), + CannotLookup, + BadOrigin, + Module { index: 1, error: 1, message: None }, + Module { index: 1, error: 2, message: None }, + Module { index: 2, error: 1, message: None }, + ConsumerRemaining, + NoProviders, + Token(TokenError::NoFunds), + Token(TokenError::WouldDie), + Token(TokenError::BelowMinimum), + Token(TokenError::CannotCreate), + Token(TokenError::UnknownAsset), + Token(TokenError::Frozen), + Arithmetic(ArithmeticError::Overflow), + Arithmetic(ArithmeticError::Underflow), + Arithmetic(ArithmeticError::DivisionByZero), + ]; + for (i, variant) in variants.iter().enumerate() { + for (j, other_variant) in variants.iter().enumerate() { + if i == j { + assert_eq!(variant, other_variant); + } else { + assert_ne!(variant, other_variant); + } + } + } + + // Ignores `message` field in `Module` variant. assert_eq!( - decoded, - DispatchError::Module { - index: 1, - error: 2, - message: None, - }, + Module { index: 1, error: 1, message: Some("foo") }, + Module { index: 1, error: 1, message: None }, ); } @@ -871,22 +992,17 @@ mod tests { assert!(multi_sig.verify(msg, &multi_signer.into_account())); } - #[test] #[should_panic(expected = "Signature verification has not been called")] fn batching_still_finishes_when_not_called_directly() { let mut ext = sp_state_machine::BasicExternalities::default(); - ext.register_extension( - sp_core::traits::TaskExecutorExt::new(sp_core::testing::TaskExecutor::new()), - ); + ext.register_extension(sp_core::traits::TaskExecutorExt::new( + sp_core::testing::TaskExecutor::new(), + )); ext.execute_with(|| { let _batching = SignatureBatching::start(); - sp_io::crypto::sr25519_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + sp_io::crypto::sr25519_verify(&Default::default(), &Vec::new(), &Default::default()); }); } @@ -894,9 +1010,9 @@ mod tests { #[should_panic(expected = "Hey, I'm an error")] fn batching_does_not_panic_while_thread_is_already_panicking() { let mut ext = sp_state_machine::BasicExternalities::default(); - ext.register_extension( - sp_core::traits::TaskExecutorExt::new(sp_core::testing::TaskExecutor::new()), - ); + ext.register_extension(sp_core::traits::TaskExecutorExt::new( + sp_core::testing::TaskExecutor::new(), + )); ext.execute_with(|| { let _batching = SignatureBatching::start(); diff --git a/primitives/runtime/src/multiaddress.rs b/primitives/runtime/src/multiaddress.rs new file mode 100644 index 0000000000000..46d80608352dc --- /dev/null +++ b/primitives/runtime/src/multiaddress.rs @@ -0,0 +1,70 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! MultiAddress type is a wrapper for multiple downstream account formats. + +use codec::{Decode, Encode}; +use sp_std::vec::Vec; + +/// A multi-format address wrapper for on-chain accounts. +#[derive(Encode, Decode, PartialEq, Eq, Clone, crate::RuntimeDebug, scale_info::TypeInfo)] +#[cfg_attr(feature = "std", derive(Hash))] +pub enum MultiAddress { + /// It's an account ID (pubkey). + Id(AccountId), + /// It's an account index. + Index(#[codec(compact)] AccountIndex), + /// It's some arbitrary raw bytes. + Raw(Vec), + /// It's a 32 byte representation. + Address32([u8; 32]), + /// Its a 20 byte representation. + Address20([u8; 20]), +} + +#[cfg(feature = "std")] +impl std::fmt::Display for MultiAddress +where + AccountId: std::fmt::Debug, + AccountIndex: std::fmt::Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + use sp_core::hexdisplay::HexDisplay; + match self { + Self::Raw(inner) => write!(f, "MultiAddress::Raw({})", HexDisplay::from(inner)), + Self::Address32(inner) => { + write!(f, "MultiAddress::Address32({})", HexDisplay::from(inner)) + }, + Self::Address20(inner) => { + write!(f, "MultiAddress::Address20({})", HexDisplay::from(inner)) + }, + _ => write!(f, "{:?}", self), + } + } +} + +impl From for MultiAddress { + fn from(a: AccountId) -> Self { + Self::Id(a) + } +} + +impl Default for MultiAddress { + fn default() -> Self { + Self::Id(Default::default()) + } +} diff --git a/primitives/runtime/src/offchain/http.rs b/primitives/runtime/src/offchain/http.rs index 12a0fcf1e5b45..469f2fb5aff3a 100644 --- a/primitives/runtime/src/offchain/http.rs +++ b/primitives/runtime/src/offchain/http.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -48,17 +48,15 @@ //! assert_eq!(body.error(), &None); //! ``` -use sp_std::str; -use sp_std::prelude::Vec; +use sp_core::{ + offchain::{ + HttpError, HttpRequestId as RequestId, HttpRequestStatus as RequestStatus, Timestamp, + }, + RuntimeDebug, +}; #[cfg(not(feature = "std"))] use sp_std::prelude::vec; -use sp_core::RuntimeDebug; -use sp_core::offchain::{ - Timestamp, - HttpRequestId as RequestId, - HttpRequestStatus as RequestStatus, - HttpError, -}; +use sp_std::{prelude::Vec, str}; /// Request method (HTTP verb) #[derive(Clone, PartialEq, Eq, RuntimeDebug)] @@ -103,10 +101,7 @@ mod header { impl Header { /// Creates new header given it's name and value. pub fn new(name: &str, value: &str) -> Self { - Header { - name: name.as_bytes().to_vec(), - value: value.as_bytes().to_vec(), - } + Header { name: name.as_bytes().to_vec(), value: value.as_bytes().to_vec() } } /// Returns the name of this header. @@ -166,13 +161,7 @@ impl<'a, T> Request<'a, T> { pub fn post(url: &'a str, body: T) -> Self { let req: Request = Request::default(); - Request { - url, - body, - method: Method::Post, - headers: req.headers, - deadline: req.deadline, - } + Request { url, body, method: Method::Post, headers: req.headers, deadline: req.deadline } } } @@ -213,7 +202,7 @@ impl<'a, T: Default> Request<'a, T> { } } -impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { +impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { /// Send the request and return a handle. /// /// Err is returned in case the deadline is reached @@ -222,19 +211,13 @@ impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { let meta = &[]; // start an http request. - let id = sp_io::offchain::http_request_start( - self.method.as_ref(), - self.url, - meta, - ).map_err(|_| HttpError::IoError)?; + let id = sp_io::offchain::http_request_start(self.method.as_ref(), self.url, meta) + .map_err(|_| HttpError::IoError)?; // add custom headers for header in &self.headers { - sp_io::offchain::http_request_add_header( - id, - header.name(), - header.value(), - ).map_err(|_| HttpError::IoError)? + sp_io::offchain::http_request_add_header(id, header.name(), header.value()) + .map_err(|_| HttpError::IoError)? } // write body @@ -245,9 +228,7 @@ impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { // finalize the request sp_io::offchain::http_request_write_body(id, &[], self.deadline)?; - Ok(PendingRequest { - id, - }) + Ok(PendingRequest { id }) } } @@ -285,8 +266,13 @@ impl PendingRequest { /// Attempts to wait for the request to finish, /// but will return `Err` in case the deadline is reached. - pub fn try_wait(self, deadline: impl Into>) -> Result { - Self::try_wait_all(vec![self], deadline).pop().expect("One request passed, one status received; qed") + pub fn try_wait( + self, + deadline: impl Into>, + ) -> Result { + Self::try_wait_all(vec![self], deadline) + .pop() + .expect("One request passed, one status received; qed") } /// Wait for all provided requests. @@ -302,10 +288,11 @@ impl PendingRequest { /// Attempt to wait for all provided requests, but up to given deadline. /// - /// Requests that are complete will resolve to an `Ok` others will return a `DeadlineReached` error. + /// Requests that are complete will resolve to an `Ok` others will return a `DeadlineReached` + /// error. pub fn try_wait_all( requests: Vec, - deadline: impl Into> + deadline: impl Into>, ) -> Vec> { let ids = requests.iter().map(|r| r.id).collect::>(); let statuses = sp_io::offchain::http_response_wait(&ids, deadline.into()); @@ -336,19 +323,13 @@ pub struct Response { impl Response { fn new(id: RequestId, code: u16) -> Self { - Self { - id, - code, - headers: None, - } + Self { id, code, headers: None } } /// Retrieve the headers for this response. pub fn headers(&mut self) -> &Headers { if self.headers.is_none() { - self.headers = Some( - Headers { raw: sp_io::offchain::http_response_headers(self.id) }, - ); + self.headers = Some(Headers { raw: sp_io::offchain::http_response_headers(self.id) }); } self.headers.as_ref().expect("Headers were just set; qed") } @@ -363,7 +344,7 @@ impl Response { /// /// Note that reading the body may return `None` in following cases: /// 1. Either the deadline you've set is reached (check via `#error`; -/// In such case you can resume the reader by setting a new deadline) +/// In such case you can resume the reader by setting a new deadline) /// 2. Or because of IOError. In such case the reader is not resumable and will keep /// returning `None`. /// 3. The body has been returned. The reader will keep returning `None`. @@ -423,32 +404,28 @@ impl Iterator for ResponseBody { fn next(&mut self) -> Option { if self.error.is_some() { - return None; + return None } if self.filled_up_to.is_none() { - let result = sp_io::offchain::http_response_read_body( - self.id, - &mut self.buffer, - self.deadline); + let result = + sp_io::offchain::http_response_read_body(self.id, &mut self.buffer, self.deadline); match result { Err(e) => { self.error = Some(e); - return None; - } - Ok(0) => { - return None; - } + return None + }, + Ok(0) => return None, Ok(size) => { self.position = 0; self.filled_up_to = Some(size as usize); - } + }, } } if Some(self.position) == self.filled_up_to { self.filled_up_to = None; - return self.next(); + return self.next() } let result = self.buffer[self.position]; @@ -508,7 +485,8 @@ impl<'a> HeadersIterator<'a> { /// /// Note that you have to call `next` prior to calling this pub fn current(&self) -> Option<(&str, &str)> { - self.collection.get(self.index?) + self.collection + .get(self.index?) .map(|val| (str::from_utf8(&val.0).unwrap_or(""), str::from_utf8(&val.1).unwrap_or(""))) } } @@ -516,24 +494,18 @@ impl<'a> HeadersIterator<'a> { #[cfg(test)] mod tests { use super::*; + use sp_core::offchain::{testing, OffchainWorkerExt}; use sp_io::TestExternalities; - use sp_core::offchain::{ - OffchainExt, - testing, - }; #[test] fn should_send_a_basic_request_and_get_response() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainWorkerExt::new(offchain)); t.execute_with(|| { let request: Request = Request::get("http://localhost:1234"); - let pending = request - .add_header("X-Auth", "hunter2") - .send() - .unwrap(); + let pending = request.add_header("X-Auth", "hunter2").send().unwrap(); // make sure it's sent correctly state.write().fulfill_pending_request( 0, @@ -567,7 +539,7 @@ mod tests { fn should_send_a_post_request() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainWorkerExt::new(offchain)); t.execute_with(|| { let pending = Request::default() diff --git a/primitives/runtime/src/offchain/mod.rs b/primitives/runtime/src/offchain/mod.rs index 12a301f921a1e..35f9352d81b08 100644 --- a/primitives/runtime/src/offchain/mod.rs +++ b/primitives/runtime/src/offchain/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/primitives/runtime/src/offchain/storage.rs b/primitives/runtime/src/offchain/storage.rs index 2f62d400c0b95..3bc5b10f161f7 100644 --- a/primitives/runtime/src/offchain/storage.rs +++ b/primitives/runtime/src/offchain/storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +28,25 @@ pub struct StorageValueRef<'a> { kind: StorageKind, } +/// Reason for not being able to provide the stored value +#[derive(Debug, PartialEq, Eq)] +pub enum StorageRetrievalError { + /// Value found but undecodable + Undecodable, +} + +/// Possible errors when mutating a storage value. +#[derive(Debug, PartialEq, Eq)] +pub enum MutateStorageError { + /// The underlying db failed to update due to a concurrent modification. + /// Contains the new value that was not stored. + ConcurrentModification(T), + /// The function given to us to create the value to be stored failed. + /// May be used to signal that having looked at the existing value, + /// they don't want to mutate it. + ValueFunctionFailed(E), +} + impl<'a> StorageValueRef<'a> { /// Create a new reference to a value in the persistent local storage. pub fn persistent(key: &'a [u8]) -> Self { @@ -45,9 +64,7 @@ impl<'a> StorageValueRef<'a> { /// if you happen to write a `get-check-set` pattern you should most likely /// be using `mutate` instead. pub fn set(&self, value: &impl codec::Encode) { - value.using_encoded(|val| { - sp_io::offchain::local_storage_set(self.kind, self.key, val) - }) + value.using_encoded(|val| sp_io::offchain::local_storage_set(self.kind, self.key, val)) } /// Remove the associated value from the storage. @@ -58,43 +75,47 @@ impl<'a> StorageValueRef<'a> { /// Retrieve & decode the value from storage. /// /// Note that if you want to do some checks based on the value - /// and write changes after that you should rather be using `mutate`. + /// and write changes after that, you should rather be using `mutate`. /// - /// The function returns `None` if the value was not found in storage, - /// otherwise a decoding of the value to requested type. - pub fn get(&self) -> Option> { + /// Returns the value if stored. + /// Returns an error if the value could not be decoded. + pub fn get(&self) -> Result, StorageRetrievalError> { sp_io::offchain::local_storage_get(self.kind, self.key) - .map(|val| T::decode(&mut &*val).ok()) + .map(|val| T::decode(&mut &*val).map_err(|_| StorageRetrievalError::Undecodable)) + .transpose() } - /// Retrieve & decode the value and set it to a new one atomically. + /// Retrieve & decode the current value and set it to a new value atomically. + /// + /// Function `mutate_val` takes as input the current value and should + /// return a new value that is attempted to be written to storage. /// - /// Function `f` should return a new value that we should attempt to write to storage. /// This function returns: - /// 1. `Ok(Ok(T))` in case the value has been successfully set. - /// 2. `Ok(Err(T))` in case the value was calculated by the passed closure `f`, - /// but it could not be stored. - /// 3. `Err(_)` in case `f` returns an error. - pub fn mutate(&self, f: F) -> Result, E> where + /// 1. `Ok(T)` in case the value has been successfully set. + /// 2. `Err(MutateStorageError::ConcurrentModification(T))` in case the value was calculated + /// by the passed closure `mutate_val`, but it could not be stored. + /// 3. `Err(MutateStorageError::ValueFunctionFailed(_))` in case `mutate_val` returns an error. + pub fn mutate(&self, mutate_val: F) -> Result> + where T: codec::Codec, - F: FnOnce(Option>) -> Result + F: FnOnce(Result, StorageRetrievalError>) -> Result, { let value = sp_io::offchain::local_storage_get(self.kind, self.key); - let decoded = value.as_deref().map(|mut v| T::decode(&mut v).ok()); - let val = f(decoded)?; + let decoded = value + .as_deref() + .map(|mut bytes| T::decode(&mut bytes).map_err(|_| StorageRetrievalError::Undecodable)) + .transpose(); + + let val = + mutate_val(decoded).map_err(|err| MutateStorageError::ValueFunctionFailed(err))?; + let set = val.using_encoded(|new_val| { - sp_io::offchain::local_storage_compare_and_set( - self.kind, - self.key, - value, - new_val, - ) + sp_io::offchain::local_storage_compare_and_set(self.kind, self.key, value, new_val) }); - if set { - Ok(Ok(val)) + Ok(val) } else { - Ok(Err(val)) + Err(MutateStorageError::ConcurrentModification(val)) } } } @@ -102,32 +123,25 @@ impl<'a> StorageValueRef<'a> { #[cfg(test)] mod tests { use super::*; + use sp_core::offchain::{testing, OffchainDbExt}; use sp_io::TestExternalities; - use sp_core::offchain::{ - OffchainExt, - OffchainStorage, - testing, - }; #[test] fn should_set_and_get() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainDbExt::new(offchain)); t.execute_with(|| { let val = StorageValue::persistent(b"testval"); - assert_eq!(val.get::(), None); + assert_eq!(val.get::(), Ok(None)); val.set(&15_u32); - assert_eq!(val.get::(), Some(Some(15_u32))); - assert_eq!(val.get::>(), Some(None)); - assert_eq!( - state.read().persistent_storage.get(b"", b"testval"), - Some(vec![15_u8, 0, 0, 0]) - ); + assert_eq!(val.get::(), Ok(Some(15_u32))); + assert_eq!(val.get::>(), Err(StorageRetrievalError::Undecodable)); + assert_eq!(state.read().persistent_storage.get(b"testval"), Some(vec![15_u8, 0, 0, 0])); }) } @@ -135,29 +149,26 @@ mod tests { fn should_mutate() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainDbExt::new(offchain)); t.execute_with(|| { let val = StorageValue::persistent(b"testval"); let result = val.mutate::(|val| { - assert_eq!(val, None); + assert_eq!(val, Ok(None)); Ok(16_u32) }); - assert_eq!(result, Ok(Ok(16_u32))); - assert_eq!(val.get::(), Some(Some(16_u32))); - assert_eq!( - state.read().persistent_storage.get(b"", b"testval"), - Some(vec![16_u8, 0, 0, 0]) - ); + assert_eq!(result, Ok(16_u32)); + assert_eq!(val.get::(), Ok(Some(16_u32))); + assert_eq!(state.read().persistent_storage.get(b"testval"), Some(vec![16_u8, 0, 0, 0])); // mutate again, but this time early-exit. let res = val.mutate::(|val| { - assert_eq!(val, Some(Some(16_u32))); + assert_eq!(val, Ok(Some(16_u32))); Err(()) }); - assert_eq!(res, Err(())); + assert_eq!(res, Err(MutateStorageError::ValueFunctionFailed(()))); }) } } diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index a3838f21fd13d..b4833bf345fc0 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -1,18 +1,19 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! # Off-chain Storage Lock //! @@ -37,8 +38,8 @@ //! # use codec::{Decode, Encode, Codec}; //! // in your off-chain worker code //! use sp_runtime::offchain::{ -//! storage::StorageValueRef, -//! storage_lock::{StorageLock, Time}, +//! storage::StorageValueRef, +//! storage_lock::{StorageLock, Time}, //! }; //! //! fn append_to_in_storage_vec<'a, T>(key: &'a [u8], _: T) where T: Codec { @@ -60,11 +61,14 @@ //! } //! ``` -use crate::offchain::storage::StorageValueRef; -use crate::traits::AtLeast32BitUnsigned; +use crate::{ + offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, + traits::BlockNumberProvider, +}; use codec::{Codec, Decode, Encode}; use sp_core::offchain::{Duration, Timestamp}; use sp_io::offchain; +use sp_std::fmt; /// Default expiry duration for time based locks in milliseconds. const STORAGE_LOCK_DEFAULT_EXPIRY_DURATION: Duration = Duration::from_millis(20_000); @@ -113,9 +117,7 @@ pub struct Time { impl Default for Time { fn default() -> Self { - Self { - expiration_duration: STORAGE_LOCK_DEFAULT_EXPIRY_DURATION, - } + Self { expiration_duration: STORAGE_LOCK_DEFAULT_EXPIRY_DURATION } } } @@ -155,10 +157,7 @@ pub struct BlockAndTimeDeadline { impl Clone for BlockAndTimeDeadline { fn clone(&self) -> Self { - Self { - block_number: self.block_number.clone(), - timestamp: self.timestamp.clone(), - } + Self { block_number: self.block_number.clone(), timestamp: self.timestamp } } } @@ -172,6 +171,18 @@ impl Default for BlockAndTimeDeadline { } } +impl fmt::Debug for BlockAndTimeDeadline +where + ::BlockNumber: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BlockAndTimeDeadline") + .field("block_number", &self.block_number) + .field("timestamp", &self.timestamp) + .finish() + } +} + /// Lockable based on block number and timestamp. /// /// Expiration is defined if both, block number _and_ timestamp @@ -201,7 +212,7 @@ impl Default for BlockAndTime { impl Clone for BlockAndTime { fn clone(&self) -> Self { Self { - expiration_block_number_offset: self.expiration_block_number_offset.clone(), + expiration_block_number_offset: self.expiration_block_number_offset, expiration_duration: self.expiration_duration, _phantom: core::marker::PhantomData::, } @@ -212,8 +223,8 @@ impl Lockable for BlockAndTime { type Deadline = BlockAndTimeDeadline; fn deadline(&self) -> Self::Deadline { - let block_number = ::current_block_number() - + self.expiration_block_number_offset.into(); + let block_number = ::current_block_number() + + self.expiration_block_number_offset.into(); BlockAndTimeDeadline { timestamp: offchain::timestamp().add(self.expiration_duration), block_number, @@ -221,8 +232,8 @@ impl Lockable for BlockAndTime { } fn has_expired(deadline: &Self::Deadline) -> bool { - offchain::timestamp() > deadline.timestamp - && ::current_block_number() > deadline.block_number + offchain::timestamp() > deadline.timestamp && + ::current_block_number() > deadline.block_number } fn snooze(deadline: &Self::Deadline) { @@ -258,27 +269,25 @@ impl<'a, L: Lockable + Default> StorageLock<'a, L> { impl<'a, L: Lockable> StorageLock<'a, L> { /// Create a new storage lock with an explicit instance of a lockable `L`. pub fn with_lockable(key: &'a [u8], lockable: L) -> Self { - Self { - value_ref: StorageValueRef::<'a>::persistent(key), - lockable, - } + Self { value_ref: StorageValueRef::<'a>::persistent(key), lockable } } /// Extend active lock's deadline fn extend_active_lock(&mut self) -> Result<::Deadline, ()> { - let res = self.value_ref.mutate(|s: Option>| -> Result<::Deadline, ()> { + let res = self.value_ref.mutate( + |s: Result, StorageRetrievalError>| -> Result<::Deadline, ()> { match s { // lock is present and is still active, extend the lock. - Some(Some(deadline)) if !::has_expired(&deadline) => + Ok(Some(deadline)) if !::has_expired(&deadline) => Ok(self.lockable.deadline()), // other cases _ => Err(()), } }); match res { - Ok(Ok(deadline)) => Ok(deadline), - Ok(Err(_)) => Err(()), - Err(e) => Err(e), + Ok(deadline) => Ok(deadline), + Err(MutateStorageError::ConcurrentModification(_)) => Err(()), + Err(MutateStorageError::ValueFunctionFailed(e)) => Err(e), } } @@ -288,25 +297,25 @@ impl<'a, L: Lockable> StorageLock<'a, L> { new_deadline: L::Deadline, ) -> Result<(), ::Deadline> { let res = self.value_ref.mutate( - |s: Option>| + |s: Result, StorageRetrievalError>| -> Result<::Deadline, ::Deadline> { match s { // no lock set, we can safely acquire it - None => Ok(new_deadline), + Ok(None) => Ok(new_deadline), // write was good, but read failed - Some(None) => Ok(new_deadline), + Err(_) => Ok(new_deadline), // lock is set, but it is expired. We can re-acquire it. - Some(Some(deadline)) if ::has_expired(&deadline) => + Ok(Some(deadline)) if ::has_expired(&deadline) => Ok(new_deadline), // lock is present and is still active - Some(Some(deadline)) => Err(deadline), + Ok(Some(deadline)) => Err(deadline), } }, ); match res { - Ok(Ok(_)) => Ok(()), - Ok(Err(deadline)) => Err(deadline), - Err(e) => Err(e), + Ok(_) => Ok(()), + Err(MutateStorageError::ConcurrentModification(deadline)) => Err(deadline), + Err(MutateStorageError::ValueFunctionFailed(e)) => Err(e), } } @@ -384,9 +393,7 @@ impl<'a> StorageLock<'a, Time> { pub fn with_deadline(key: &'a [u8], expiration_duration: Duration) -> Self { Self { value_ref: StorageValueRef::<'a>::persistent(key), - lockable: Time { - expiration_duration: expiration_duration, - }, + lockable: Time { expiration_duration }, } } } @@ -426,33 +433,10 @@ where } } -/// Bound for a block number source -/// used with [`BlockAndTime`](BlockAndTime). -pub trait BlockNumberProvider { - /// Type of `BlockNumber` to provide. - type BlockNumber: Codec + Clone + Ord + Eq + AtLeast32BitUnsigned; - /// Returns the current block number. - /// - /// Provides an abstraction over an arbitrary way of providing the - /// current block number. - /// - /// In case of using crate `sp_runtime` without the crate `frame` - /// system, it is already implemented for - /// `frame_system::Module` as: - /// - /// ```ignore - /// fn current_block_number() -> Self { - /// frame_system::Module::block_number() - /// } - /// ``` - /// . - fn current_block_number() -> Self::BlockNumber; -} - #[cfg(test)] mod tests { use super::*; - use sp_core::offchain::{testing, OffchainExt, OffchainStorage}; + use sp_core::offchain::{testing, OffchainDbExt, OffchainWorkerExt}; use sp_io::TestExternalities; const VAL_1: u32 = 0u32; @@ -462,7 +446,8 @@ mod tests { fn storage_lock_write_unlock_lock_read_unlock() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainDbExt::new(offchain.clone())); + t.register_extension(OffchainWorkerExt::new(offchain)); t.execute_with(|| { let mut lock = StorageLock::<'_, Time>::new(b"lock_1"); @@ -474,25 +459,26 @@ mod tests { val.set(&VAL_1); - assert_eq!(val.get::(), Some(Some(VAL_1))); + assert_eq!(val.get::(), Ok(Some(VAL_1))); } { let _guard = lock.lock(); val.set(&VAL_2); - assert_eq!(val.get::(), Some(Some(VAL_2))); + assert_eq!(val.get::(), Ok(Some(VAL_2))); } }); // lock must have been cleared at this point - assert_eq!(state.read().persistent_storage.get(b"", b"lock_1"), None); + assert_eq!(state.read().persistent_storage.get(b"lock_1"), None); } #[test] fn storage_lock_and_forget() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainDbExt::new(offchain.clone())); + t.register_extension(OffchainWorkerExt::new(offchain)); t.execute_with(|| { let mut lock = StorageLock::<'_, Time>::new(b"lock_2"); @@ -503,12 +489,12 @@ mod tests { val.set(&VAL_1); - assert_eq!(val.get::(), Some(Some(VAL_1))); + assert_eq!(val.get::(), Ok(Some(VAL_1))); guard.forget(); }); // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_2"); + let opt = state.read().persistent_storage.get(b"lock_2"); assert!(opt.is_some()); } @@ -516,7 +502,8 @@ mod tests { fn storage_lock_and_let_expire_and_lock_again() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainDbExt::new(offchain.clone())); + t.register_extension(OffchainWorkerExt::new(offchain)); t.execute_with(|| { let sleep_until = offchain::timestamp().add(Duration::from_millis(500)); @@ -540,7 +527,7 @@ mod tests { }); // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_3"); + let opt = state.read().persistent_storage.get(b"lock_3"); assert!(opt.is_some()); } @@ -548,7 +535,8 @@ mod tests { fn extend_active_lock() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainDbExt::new(offchain.clone())); + t.register_extension(OffchainWorkerExt::new(offchain)); t.execute_with(|| { let lock_expiration = Duration::from_millis(300); @@ -587,7 +575,7 @@ mod tests { }); // lock must have been cleared at this point - let opt = state.read().persistent_storage.get(b"", b"lock_4"); + let opt = state.read().persistent_storage.get(b"lock_4"); assert_eq!(opt.unwrap(), vec![132_u8, 3u8, 0, 0, 0, 0, 0, 0]); // 132 + 256 * 3 = 900 } } diff --git a/primitives/runtime/src/random_number_generator.rs b/primitives/runtime/src/random_number_generator.rs deleted file mode 100644 index 23d0421742bd8..0000000000000 --- a/primitives/runtime/src/random_number_generator.rs +++ /dev/null @@ -1,104 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A simple pseudo random number generator that allows a stream of random numbers to be efficiently -//! created from a single initial seed hash. - -use codec::{Encode, Decode}; -use crate::traits::{Hash, TrailingZeroInput}; - -/// Pseudo-random number streamer. This retains the state of the random number stream. It's as -/// secure as the combination of the seed with which it is constructed and the hash function it uses -/// to cycle elements. -/// -/// It can be saved and later reloaded using the Codec traits. -/// -/// Example: -/// ``` -/// use sp_runtime::traits::{Hash, BlakeTwo256}; -/// use sp_runtime::RandomNumberGenerator; -/// let random_seed = BlakeTwo256::hash(b"Sixty-nine"); -/// let mut rng = >::new(random_seed); -/// assert_eq!(rng.pick_u32(100), 59); -/// assert_eq!(rng.pick_item(&[1, 2, 3]), Some(&1)); -/// ``` -/// -/// This can use any cryptographic `Hash` function as the means of entropy-extension, and avoids -/// needless extensions of entropy. -/// -/// If you're persisting it over blocks, be aware that the sequence will start to repeat. This won't -/// be a practical issue unless you're using tiny hash types (e.g. 64-bit) and pulling hundred of -/// megabytes of data from it. -#[derive(Encode, Decode)] -pub struct RandomNumberGenerator { - current: Hashing::Output, - offset: u32, -} - -impl RandomNumberGenerator { - /// A new source of random data. - pub fn new(seed: Hashing::Output) -> Self { - Self { - current: seed, - offset: 0, - } - } - - fn offset(&self) -> usize { self.offset as usize } - - /// Returns a number at least zero, at most `max`. - pub fn pick_u32(&mut self, max: u32) -> u32 { - let needed = (4 - max.leading_zeros() / 8) as usize; - let top = ((1 << (needed as u64 * 8)) / ((max + 1) as u64) * ((max + 1) as u64) - 1) as u32; - loop { - if self.offset() + needed > self.current.as_ref().len() { - // rehash - self.current = ::hash(self.current.as_ref()); - self.offset = 0; - } - let data = &self.current.as_ref()[self.offset()..self.offset() + needed]; - self.offset += needed as u32; - let raw = u32::decode(&mut TrailingZeroInput::new(data)).unwrap_or(0); - if raw <= top { - break if max < u32::max_value() { - raw % (max + 1) - } else { - raw - } - } - } - } - - /// Returns a number at least zero, at most `max`. - /// - /// This returns a `usize`, but internally it only uses `u32` so avoid consensus problems. - pub fn pick_usize(&mut self, max: usize) -> usize { - self.pick_u32(max as u32) as usize - } - - /// Pick a random element from an array of `items`. - /// - /// This is guaranteed to return `Some` except in the case that the given array `items` is - /// empty. - pub fn pick_item<'a, T>(&mut self, items: &'a [T]) -> Option<&'a T> { - if items.is_empty() { - None - } else { - Some(&items[self.pick_usize(items.len() - 1)]) - } - } -} diff --git a/primitives/runtime/src/runtime_logger.rs b/primitives/runtime/src/runtime_logger.rs new file mode 100644 index 0000000000000..ff0e531ed814f --- /dev/null +++ b/primitives/runtime/src/runtime_logger.rs @@ -0,0 +1,102 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A logger that can be used to log from the runtime. +//! +//! See [`RuntimeLogger`] for more docs. + +/// Runtime logger implementation - `log` crate backend. +/// +/// The logger should be initialized if you want to display +/// logs inside the runtime that is not necessarily running natively. +pub struct RuntimeLogger; + +impl RuntimeLogger { + /// Initialize the logger. + /// + /// This is a no-op when running natively (`std`). + #[cfg(feature = "std")] + pub fn init() {} + + /// Initialize the logger. + /// + /// This is a no-op when running natively (`std`). + #[cfg(not(feature = "std"))] + pub fn init() { + static LOGGER: RuntimeLogger = RuntimeLogger; + let _ = log::set_logger(&LOGGER); + + // Use the same max log level as used by the host. + log::set_max_level(sp_io::logging::max_level().into()); + } +} + +impl log::Log for RuntimeLogger { + fn enabled(&self, _: &log::Metadata) -> bool { + // The final filtering is done by the host. This is not perfect, as we would still call into + // the host for log lines that will be thrown away. + true + } + + fn log(&self, record: &log::Record) { + use sp_std::fmt::Write; + let mut w = sp_std::Writer::default(); + let _ = ::core::write!(&mut w, "{}", record.args()); + + sp_io::logging::log(record.level().into(), record.target(), w.inner()); + } + + fn flush(&self) {} +} + +#[cfg(test)] +mod tests { + use sp_api::{BlockId, ProvideRuntimeApi}; + use std::{env, str::FromStr}; + use substrate_test_runtime_client::{ + runtime::TestAPI, DefaultTestClientBuilderExt, ExecutionStrategy, TestClientBuilder, + TestClientBuilderExt, + }; + + #[test] + fn ensure_runtime_logger_respects_host_max_log_level() { + if env::var("RUN_TEST").is_ok() { + sp_tracing::try_init_simple(); + log::set_max_level(log::LevelFilter::from_str(&env::var("RUST_LOG").unwrap()).unwrap()); + + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(0); + runtime_api.do_trace_log(&block_id).expect("Logging should not fail"); + } else { + for (level, should_print) in &[("trace", true), ("info", false)] { + let executable = std::env::current_exe().unwrap(); + let output = std::process::Command::new(executable) + .env("RUN_TEST", "1") + .env("RUST_LOG", level) + .args(&["--nocapture", "ensure_runtime_logger_respects_host_max_log_level"]) + .output() + .unwrap(); + + let output = String::from_utf8(output.stderr).unwrap(); + assert!(output.contains("Hey I'm runtime") == *should_print); + } + } + } +} diff --git a/primitives/runtime/src/runtime_string.rs b/primitives/runtime/src/runtime_string.rs index 7fd38f48df638..179e881451813 100644 --- a/primitives/runtime/src/runtime_string.rs +++ b/primitives/runtime/src/runtime_string.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_core::RuntimeDebug; use sp_std::vec::Vec; @@ -32,6 +32,29 @@ pub enum RuntimeString { Owned(Vec), } +impl scale_info::TypeInfo for RuntimeString { + type Identity = str; + + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } +} + +/// Convenience macro to use the format! interface to get a `RuntimeString::Owned` +#[macro_export] +macro_rules! format_runtime_string { + ($($args:tt)*) => {{ + #[cfg(feature = "std")] + { + sp_runtime::RuntimeString::Owned(format!($($args)*)) + } + #[cfg(not(feature = "std"))] + { + sp_runtime::RuntimeString::Owned(sp_std::alloc::format!($($args)*).as_bytes().to_vec()) + } + }}; +} + impl From<&'static str> for RuntimeString { fn from(data: &'static str) -> Self { Self::Borrowed(data) @@ -114,5 +137,7 @@ impl<'de> serde::Deserialize<'de> for RuntimeString { /// Create a const [`RuntimeString`]. #[macro_export] macro_rules! create_runtime_str { - ( $y:expr ) => {{ $crate::RuntimeString::Borrowed($y) }} + ( $y:expr ) => {{ + $crate::RuntimeString::Borrowed($y) + }}; } diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 97e128f363c89..781f342d43c1e 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,18 +17,28 @@ //! Testing utilities. -use serde::{Serialize, Serializer, Deserialize, de::Error as DeError, Deserializer}; -use std::{fmt::{self, Debug}, ops::Deref, cell::RefCell}; -use crate::codec::{Codec, Encode, Decode}; -use crate::traits::{ - self, Checkable, Applyable, BlakeTwo256, OpaqueKeys, - SignedExtension, Dispatchable, DispatchInfoOf, PostDispatchInfoOf, +use crate::{ + codec::{Codec, Decode, Encode}, + generic, + scale_info::TypeInfo, + traits::{ + self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, OpaqueKeys, + PostDispatchInfoOf, SignedExtension, ValidateUnsigned, + }, + transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, + ApplyExtrinsicResultWithInfo, CryptoTypeId, KeyTypeId, +}; +use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; +use sp_core::{ + crypto::{key_types, CryptoType, Dummy, Public}, + U256, +}; +pub use sp_core::{sr25519, H256}; +use std::{ + cell::RefCell, + fmt::{self, Debug}, + ops::Deref, }; -use crate::traits::ValidateUnsigned; -use crate::{generic, KeyTypeId, CryptoTypeId, ApplyExtrinsicResultWithInfo}; -pub use sp_core::{H256, sr25519}; -use sp_core::{crypto::{CryptoType, Dummy, key_types, Public}, U256}; -use crate::transaction_validity::{TransactionValidity, TransactionValidityError, TransactionSource}; /// A dummy type which can be used instead of regular cryptographic primitives. /// @@ -36,7 +46,21 @@ use crate::transaction_validity::{TransactionValidity, TransactionValidityError, /// 2. Can be converted to any `Public` key. /// 3. Implements `RuntimeAppPublic` so it can be used instead of regular application-specific /// crypto. -#[derive(Default, PartialEq, Eq, Clone, Encode, Decode, Debug, Hash, Serialize, Deserialize, PartialOrd, Ord)] +#[derive( + Default, + PartialEq, + Eq, + Clone, + Encode, + Decode, + Debug, + Hash, + Serialize, + Deserialize, + PartialOrd, + Ord, + TypeInfo, +)] pub struct UintAuthorityId(pub u64); impl From for UintAuthorityId { @@ -68,7 +92,10 @@ impl AsRef<[u8]> for UintAuthorityId { // Unsafe, i know, but it's test code and it's just there because it's really convenient to // keep `UintAuthorityId` as a u64 under the hood. unsafe { - std::slice::from_raw_parts(&self.0 as *const u64 as *const _, std::mem::size_of::()) + std::slice::from_raw_parts( + &self.0 as *const u64 as *const _, + std::mem::size_of::(), + ) } } } @@ -80,7 +107,7 @@ thread_local! { impl UintAuthorityId { /// Set the list of keys returned by the runtime call for all keys of that type. - pub fn set_all_keys>(keys: impl IntoIterator) { + pub fn set_all_keys>(keys: impl IntoIterator) { ALL_KEYS.with(|l| *l.borrow_mut() = keys.into_iter().map(Into::into).collect()) } } @@ -142,7 +169,7 @@ impl traits::IdentifyAccount for UintAuthorityId { } /// A dummy signature type, to match `UintAuthorityId`. -#[derive(Eq, PartialEq, Clone, Debug, Hash, Serialize, Deserialize, Encode, Decode)] +#[derive(Eq, PartialEq, Clone, Debug, Hash, Serialize, Deserialize, Encode, Decode, TypeInfo)] pub struct TestSignature(pub u64, pub Vec); impl traits::Verify for TestSignature { @@ -180,7 +207,8 @@ impl Header { pub struct ExtrinsicWrapper(Xt); impl traits::Extrinsic for ExtrinsicWrapper -where Xt: parity_util_mem::MallocSizeOf +where + Xt: parity_util_mem::MallocSizeOf, { type Call = (); type SignaturePayload = (); @@ -191,7 +219,10 @@ where Xt: parity_util_mem::MallocSizeOf } impl serde::Serialize for ExtrinsicWrapper { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } @@ -219,8 +250,9 @@ pub struct Block { pub extrinsics: Vec, } -impl traits::Block - for Block +impl< + Xt: 'static + Codec + Sized + Send + Sync + Serialize + Clone + Eq + Debug + traits::Extrinsic, + > traits::Block for Block { type Extrinsic = Xt; type Header = Header; @@ -243,11 +275,14 @@ impl Deserialize<'a> for Block where Block: Decode { +impl<'a, Xt> Deserialize<'a> for Block +where + Block: Decode, +{ fn deserialize>(de: D) -> Result { let r = >::deserialize(de)?; Decode::decode(&mut &r[..]) - .map_err(|e| DeError::custom(format!("Invalid value passed into decode: {}", e.what()))) + .map_err(|e| DeError::custom(format!("Invalid value passed into decode: {}", e))) } } @@ -255,7 +290,7 @@ impl<'a, Xt> Deserialize<'a> for Block where Block: Decode { /// with index only used if sender is some. /// /// If sender is some then the transaction is signed otherwise it is unsigned. -#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct TestXt { /// Signature of the extrinsic. pub signature: Option<(u64, Extra)>, @@ -273,8 +308,14 @@ impl TestXt { // Non-opaque extrinsics always 0. parity_util_mem::malloc_size_of_is_0!(any: TestXt); -impl Serialize for TestXt where TestXt: Encode { - fn serialize(&self, seq: S) -> Result where S: Serializer { +impl Serialize for TestXt +where + TestXt: Encode, +{ + fn serialize(&self, seq: S) -> Result + where + S: Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } @@ -287,7 +328,9 @@ impl Debug for TestXt { impl Checkable for TestXt { type Checked = Self; - fn check(self, _: &Context) -> Result { Ok(self) } + fn check(self, _: &Context) -> Result { + Ok(self) + } } impl traits::Extrinsic for TestXt { @@ -303,15 +346,26 @@ impl traits::Extrinsic for TestXt } } -impl Applyable for TestXt where - Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, - Extra: SignedExtension, +impl traits::ExtrinsicMetadata for TestXt +where + Call: Codec + Sync + Send, + Extra: SignedExtension, +{ + type SignedExtensions = Extra; + const VERSION: u8 = 0u8; +} + +impl Applyable for TestXt +where + Call: + 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, + Extra: SignedExtension, Origin: From>, { type Call = Call; /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( + fn validate>( &self, source: TransactionSource, info: &DispatchInfoOf, @@ -328,7 +382,7 @@ impl Applyable for TestXt where /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 4d2b1f062f716..65c063fde1696 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,30 +17,37 @@ //! Primitives for the runtime modules. -use sp_std::prelude::*; -use sp_std::{self, marker::PhantomData, convert::{TryFrom, TryInto}, fmt::Debug}; -use sp_io; +use crate::{ + codec::{Codec, Decode, Encode, MaxEncodedLen}, + generic::{Digest, DigestItem}, + scale_info::{MetaType, StaticTypeInfo, TypeInfo}, + transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, + ValidTransaction, + }, + DispatchResult, +}; +use impl_trait_for_tuples::impl_for_tuples; +#[cfg(feature = "std")] +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use sp_application_crypto::AppKey; +pub use sp_arithmetic::traits::{ + AtLeast32Bit, AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedShl, + CheckedShr, CheckedSub, IntegerSquareRoot, One, SaturatedConversion, Saturating, + UniqueSaturatedFrom, UniqueSaturatedInto, Zero, +}; +use sp_core::{self, Hasher, RuntimeDebug, TypeId}; +use sp_std::{ + self, + convert::{TryFrom, TryInto}, + fmt::Debug, + marker::PhantomData, + prelude::*, +}; #[cfg(feature = "std")] use std::fmt::Display; #[cfg(feature = "std")] use std::str::FromStr; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize, de::DeserializeOwned}; -use sp_core::{self, Hasher, TypeId, RuntimeDebug}; -use crate::codec::{Codec, Encode, Decode}; -use crate::transaction_validity::{ - ValidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, - UnknownTransaction, -}; -use crate::generic::{Digest, DigestItem}; -pub use sp_arithmetic::traits::{ - AtLeast32Bit, AtLeast32BitUnsigned, UniqueSaturatedInto, UniqueSaturatedFrom, Saturating, - SaturatedConversion, Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, - CheckedShl, CheckedShr, IntegerSquareRoot -}; -use sp_application_crypto::AppKey; -use impl_trait_for_tuples::impl_for_tuples; -use crate::DispatchResult; /// A lazy value. pub trait Lazy { @@ -51,7 +58,9 @@ pub trait Lazy { } impl<'a> Lazy<[u8]> for &'a [u8] { - fn get(&mut self) -> &[u8] { &**self } + fn get(&mut self) -> &[u8] { + &**self + } } /// Some type that is able to be collapsed into an account ID. It is not possible to recreate the @@ -65,17 +74,23 @@ pub trait IdentifyAccount { impl IdentifyAccount for sp_core::ed25519::Public { type AccountId = Self; - fn into_account(self) -> Self { self } + fn into_account(self) -> Self { + self + } } impl IdentifyAccount for sp_core::sr25519::Public { type AccountId = Self; - fn into_account(self) -> Self { self } + fn into_account(self) -> Self { + self + } } impl IdentifyAccount for sp_core::ecdsa::Public { type AccountId = Self; - fn into_account(self) -> Self { self } + fn into_account(self) -> Self { + self + } } /// Means of signature verification. @@ -85,7 +100,11 @@ pub trait Verify { /// Verify a signature. /// /// Return `true` if signature is valid for the value. - fn verify>(&self, msg: L, signer: &::AccountId) -> bool; + fn verify>( + &self, + msg: L, + signer: &::AccountId, + ) -> bool; } impl Verify for sp_core::ed25519::Signature { @@ -111,7 +130,7 @@ impl Verify for sp_core::ecdsa::Signature { self.as_ref(), &sp_io::hashing::blake2_256(msg.get()), ) { - Ok(pubkey) => &signer.as_ref()[..] == &pubkey[..], + Ok(pubkey) => signer.as_ref() == &pubkey[..], _ => false, } } @@ -126,19 +145,27 @@ pub trait AppVerify { } impl< - S: Verify::Public as sp_application_crypto::AppPublic>::Generic> + From, - T: sp_application_crypto::Wraps + sp_application_crypto::AppKey + sp_application_crypto::AppSignature + - AsRef + AsMut + From, -> AppVerify for T where + S: Verify::Public as sp_application_crypto::AppPublic>::Generic> + + From, + T: sp_application_crypto::Wraps + + sp_application_crypto::AppKey + + sp_application_crypto::AppSignature + + AsRef + + AsMut + + From, + > AppVerify for T +where ::Signer: IdentifyAccount::Signer>, - <::Public as sp_application_crypto::AppPublic>::Generic: - IdentifyAccount::Public as sp_application_crypto::AppPublic>::Generic>, + <::Public as sp_application_crypto::AppPublic>::Generic: IdentifyAccount< + AccountId = <::Public as sp_application_crypto::AppPublic>::Generic, + >, { type AccountId = ::Public; fn verify>(&self, msg: L, signer: &::Public) -> bool { use sp_application_crypto::IsWrappedBy; let inner: &S = self.as_ref(); - let inner_pubkey = <::Public as sp_application_crypto::AppPublic>::Generic::from_ref(&signer); + let inner_pubkey = + <::Public as sp_application_crypto::AppPublic>::Generic::from_ref(&signer); Verify::verify(inner, msg, inner_pubkey) } } @@ -184,7 +211,7 @@ pub trait Lookup { /// context. pub trait StaticLookup { /// Type to lookup from. - type Source: Codec + Clone + PartialEq + Debug; + type Source: Codec + Clone + PartialEq + Debug + TypeInfo; /// Type to lookup into. type Target; /// Attempt a lookup. @@ -196,17 +223,61 @@ pub trait StaticLookup { /// A lookup implementation returning the input value. #[derive(Default)] pub struct IdentityLookup(PhantomData); -impl StaticLookup for IdentityLookup { +impl StaticLookup for IdentityLookup { type Source = T; type Target = T; - fn lookup(x: T) -> Result { Ok(x) } - fn unlookup(x: T) -> T { x } + fn lookup(x: T) -> Result { + Ok(x) + } + fn unlookup(x: T) -> T { + x + } } impl Lookup for IdentityLookup { type Source = T; type Target = T; - fn lookup(&self, x: T) -> Result { Ok(x) } + fn lookup(&self, x: T) -> Result { + Ok(x) + } +} + +/// A lookup implementation returning the `AccountId` from a `MultiAddress`. +pub struct AccountIdLookup(PhantomData<(AccountId, AccountIndex)>); +impl StaticLookup for AccountIdLookup +where + AccountId: Codec + Clone + PartialEq + Debug, + AccountIndex: Codec + Clone + PartialEq + Debug, + crate::MultiAddress: Codec + StaticTypeInfo, +{ + type Source = crate::MultiAddress; + type Target = AccountId; + fn lookup(x: Self::Source) -> Result { + match x { + crate::MultiAddress::Id(i) => Ok(i), + _ => Err(LookupError), + } + } + fn unlookup(x: Self::Target) -> Self::Source { + crate::MultiAddress::Id(x) + } +} + +/// Perform a StaticLookup where there are multiple lookup sources of the same type. +impl StaticLookup for (A, B) +where + A: StaticLookup, + B: StaticLookup, +{ + type Source = A::Source; + type Target = A::Target; + + fn lookup(x: Self::Source) -> Result { + A::lookup(x.clone()).or_else(|_| B::lookup(x)) + } + fn unlookup(x: Self::Target) -> Self::Source { + A::unlookup(x) + } } /// Extensible conversion trait. Generic over both source and destination types. @@ -216,19 +287,25 @@ pub trait Convert { } impl Convert for () { - fn convert(_: A) -> B { Default::default() } + fn convert(_: A) -> B { + Default::default() + } } /// A structure that performs identity conversion. pub struct Identity; impl Convert for Identity { - fn convert(a: T) -> T { a } + fn convert(a: T) -> T { + a + } } /// A structure that performs standard conversion using the standard Rust conversion traits. pub struct ConvertInto; impl> Convert for ConvertInto { - fn convert(a: A) -> B { a.into() } + fn convert(a: A) -> B { + a.into() + } } /// Convenience type to work around the highly unergonomic syntax needed @@ -240,7 +317,10 @@ pub trait CheckedConversion { /// This just uses `TryFrom` internally but with this /// variant you can provide the destination type using turbofish syntax /// in case Rust happens not to assume the correct type. - fn checked_from(t: T) -> Option where Self: TryFrom { + fn checked_from(t: T) -> Option + where + Self: TryFrom, + { >::try_from(t).ok() } /// Consume self to return `Some` equivalent value of `Option`. @@ -248,7 +328,10 @@ pub trait CheckedConversion { /// This just uses `TryInto` internally but with this /// variant you can provide the destination type using turbofish syntax /// in case Rust happens not to assume the correct type. - fn checked_into(self) -> Option where Self: TryInto { + fn checked_into(self) -> Option + where + Self: TryInto, + { >::try_into(self).ok() } } @@ -273,11 +356,17 @@ macro_rules! impl_scale { ($self:ty, $other:ty) => { impl Scale<$other> for $self { type Output = Self; - fn mul(self, other: $other) -> Self::Output { self * (other as Self) } - fn div(self, other: $other) -> Self::Output { self / (other as Self) } - fn rem(self, other: $other) -> Self::Output { self % (other as Self) } + fn mul(self, other: $other) -> Self::Output { + self * (other as Self) + } + fn div(self, other: $other) -> Self::Output { + self / (other as Self) + } + fn rem(self, other: $other) -> Self::Output { + self % (other as Self) + } } - } + }; } impl_scale!(u128, u128); impl_scale!(u128, u64); @@ -306,31 +395,58 @@ pub trait Clear { } impl Clear for T { - fn is_clear(&self) -> bool { *self == Self::clear() } - fn clear() -> Self { Default::default() } + fn is_clear(&self) -> bool { + *self == Self::clear() + } + fn clear() -> Self { + Default::default() + } } /// A meta trait for all bit ops. pub trait SimpleBitOps: - Sized + Clear + - sp_std::ops::BitOr + - sp_std::ops::BitXor + - sp_std::ops::BitAnd -{} -impl + - sp_std::ops::BitXor + - sp_std::ops::BitAnd -> SimpleBitOps for T {} + Sized + + Clear + + sp_std::ops::BitOr + + sp_std::ops::BitXor + + sp_std::ops::BitAnd +{ +} +impl< + T: Sized + + Clear + + sp_std::ops::BitOr + + sp_std::ops::BitXor + + sp_std::ops::BitAnd, + > SimpleBitOps for T +{ +} /// Abstraction around hashing // Stupid bug in the Rust compiler believes derived // traits must be fulfilled by all type parameters. -pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + PartialEq + Hasher::Output> { +pub trait Hash: + 'static + + MaybeSerializeDeserialize + + Debug + + Clone + + Eq + + PartialEq + + Hasher::Output> +{ /// The hash type produced. - type Output: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash - + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + Encode + Decode; + type Output: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Default + + Encode + + Decode + + MaxEncodedLen + + TypeInfo; /// Produce the hash of some byte-slice. fn hash(s: &[u8]) -> Self::Output { @@ -350,7 +466,7 @@ pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + Parti } /// Blake2-256 Hash implementation. -#[derive(PartialEq, Eq, Clone, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BlakeTwo256; @@ -377,7 +493,7 @@ impl Hash for BlakeTwo256 { } /// Keccak-256 Hash implementation. -#[derive(PartialEq, Eq, Clone, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct Keccak256; @@ -432,7 +548,10 @@ impl CheckEqual for sp_core::H256 { } } -impl CheckEqual for super::generic::DigestItem where H: Encode { +impl CheckEqual for super::generic::DigestItem +where + H: Encode, +{ #[cfg(feature = "std")] fn check_equal(&self, other: &Self) { if self != other { @@ -486,16 +605,34 @@ pub trait IsMember { /// /// You can also create a `new` one from those fields. pub trait Header: - Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + - MaybeMallocSizeOf + 'static + Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static { /// Header number. - type Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Copy + - MaybeDisplay + AtLeast32BitUnsigned + Codec + sp_std::str::FromStr + MaybeMallocSizeOf; + type Number: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + Copy + + MaybeDisplay + + AtLeast32BitUnsigned + + Codec + + sp_std::str::FromStr + + MaybeMallocSizeOf; /// Header hash type - type Hash: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Ord - + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> - + AsMut<[u8]> + MaybeMallocSizeOf; + type Hash: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + Ord + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Codec + + AsRef<[u8]> + + AsMut<[u8]> + + MaybeMallocSizeOf + + TypeInfo; /// Hashing algorithm type Hashing: Hash; @@ -543,15 +680,28 @@ pub trait Header: /// `Extrinsic` pieces of information as well as a `Header`. /// /// You can get an iterator over each of the `extrinsics` and retrieve the `header`. -pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static { +pub trait Block: + Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static +{ /// Type for extrinsics. type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize + MaybeMallocSizeOf; /// Header type. - type Header: Header + MaybeMallocSizeOf; + type Header: Header + MaybeMallocSizeOf; /// Block hash type. - type Hash: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Ord - + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]> - + MaybeMallocSizeOf; + type Hash: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + Ord + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Codec + + AsRef<[u8]> + + AsMut<[u8]> + + MaybeMallocSizeOf + + TypeInfo; /// Returns a reference to the header. fn header(&self) -> &Self::Header; @@ -570,7 +720,6 @@ pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + May fn encode_from(header: &Self::Header, extrinsics: &[Self::Extrinsic]) -> Vec; } - /// Something that acts like an `Extrinsic`. pub trait Extrinsic: Sized + MaybeMallocSizeOf { /// The function call. @@ -585,15 +734,19 @@ pub trait Extrinsic: Sized + MaybeMallocSizeOf { /// Is this `Extrinsic` signed? /// If no information are available about signed/unsigned, `None` should be returned. - fn is_signed(&self) -> Option { None } + fn is_signed(&self) -> Option { + None + } /// Create new instance of the extrinsic. /// /// Extrinsics can be split into: /// 1. Inherents (no signature; created by validators during block production) - /// 2. Unsigned Transactions (no signature; represent "system calls" or other special kinds of calls) - /// 3. Signed Transactions (with signature; a regular transactions with known origin) - fn new(_call: Self::Call, _signed_data: Option) -> Option { None } + /// 2. Unsigned Transactions (no signature; represent "system calls" or other special kinds of + /// calls) 3. Signed Transactions (with signature; a regular transactions with known origin) + fn new(_call: Self::Call, _signed_data: Option) -> Option { + None + } } /// Implementor is an [`Extrinsic`] and provides metadata about this extrinsic. @@ -616,8 +769,8 @@ pub type DigestItemFor = DigestItem<<::Header as Header>::Hash>; /// A "checkable" piece of information, used by the standard Substrate Executive in order to /// check the validity of a piece of extrinsic information, usually by verifying the signature. -/// Implement for pieces of information that require some additional context `Context` in order to be -/// checked. +/// Implement for pieces of information that require some additional context `Context` in order to +/// be checked. pub trait Checkable: Sized { /// Returned if `check` succeeds. type Checked; @@ -655,7 +808,7 @@ pub trait Dispatchable { /// identifier for the caller. The origin can be empty in the case of an inherent extrinsic. type Origin; /// ... - type Trait; + type Config; /// An opaque set of information attached to the transaction. This could be constructed anywhere /// down the line in a runtime. The current Substrate runtime uses a struct with the same name /// to represent the dispatch class and weight. @@ -674,7 +827,7 @@ pub type PostDispatchInfoOf = ::PostInfo; impl Dispatchable for () { type Origin = (); - type Trait = (); + type Config = (); type Info = (); type PostInfo = (); fn dispatch(self, _origin: Self::Origin) -> crate::DispatchResultWithInfo { @@ -684,7 +837,9 @@ impl Dispatchable for () { /// Means by which a transaction may be extended. This type embodies both the data and the logic /// that should be additionally associated with the transaction. It should be plain old data. -pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq { +pub trait SignedExtension: + Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo +{ /// Unique identifier of this signed extension. /// /// This will be exposed in the metadata to identify the signed extension used @@ -699,7 +854,7 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq /// Any additional data that will go into the signed payload. This may be created dynamically /// from the transaction using the `additional_signed` function. - type AdditionalSigned: Encode; + type AdditionalSigned: Encode + TypeInfo; /// The type that encodes information that can be passed from pre_dispatch to post-dispatch. type Pre: Default; @@ -804,18 +959,33 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq Ok(()) } - /// Returns the list of unique identifier for this signed extension. + /// Returns the metadata for this signed extension. /// /// As a [`SignedExtension`] can be a tuple of [`SignedExtension`]s we need to return a `Vec` - /// that holds all the unique identifiers. Each individual `SignedExtension` must return - /// *exactly* one identifier. + /// that holds the metadata of each one. Each individual `SignedExtension` must return + /// *exactly* one [`SignedExtensionMetadata`]. /// - /// This method provides a default implementation that returns `vec![SELF::IDENTIFIER]`. - fn identifier() -> Vec<&'static str> { - sp_std::vec![Self::IDENTIFIER] + /// This method provides a default implementation that returns a vec containing a single + /// [`SignedExtensionMetadata`]. + fn metadata() -> Vec { + sp_std::vec![SignedExtensionMetadata { + identifier: Self::IDENTIFIER, + ty: scale_info::meta_type::(), + additional_signed: scale_info::meta_type::() + }] } } +/// Information about a [`SignedExtension`] for the runtime metadata. +pub struct SignedExtensionMetadata { + /// The unique identifier of the [`SignedExtension`]. + pub identifier: &'static str, + /// The type of the [`SignedExtension`]. + pub ty: MetaType, + /// The type of the [`SignedExtension`] additional signed data for the payload. + pub additional_signed: MetaType, +} + #[impl_for_tuples(1, 12)] impl SignedExtension for Tuple { for_tuples!( where #( Tuple: SignedExtension )* ); @@ -841,9 +1011,13 @@ impl SignedExtension for Tuple { Ok(valid) } - fn pre_dispatch(self, who: &Self::AccountId, call: &Self::Call, info: &DispatchInfoOf, len: usize) - -> Result - { + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info, len)? ),* ) )) } @@ -876,9 +1050,9 @@ impl SignedExtension for Tuple { Ok(()) } - fn identifier() -> Vec<&'static str> { + fn metadata() -> Vec { let mut ids = Vec::new(); - for_tuples!( #( ids.extend(Tuple::identifier()); )* ); + for_tuples!( #( ids.extend(Tuple::metadata()); )* ); ids } } @@ -891,7 +1065,9 @@ impl SignedExtension for () { type Call = (); type Pre = (); const IDENTIFIER: &'static str = "UnitSignedExtension"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } } /// An "executable" piece of information, used by the standard Substrate Executive in order to @@ -905,7 +1081,7 @@ pub trait Applyable: Sized + Send + Sync { type Call: Dispatchable; /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( + fn validate>( &self, source: TransactionSource, info: &DispatchInfoOf, @@ -914,7 +1090,7 @@ pub trait Applyable: Sized + Send + Sync { /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, @@ -983,7 +1159,9 @@ pub trait OpaqueKeys: Clone { T::decode(&mut self.get_raw(i)).ok() } /// Verify a proof of ownership for the keys. - fn ownership_proof_is_valid(&self, _proof: &[u8]) -> bool { true } + fn ownership_proof_is_valid(&self, _proof: &[u8]) -> bool { + true + } } /// Input that adds infinite number of zero after wrapped input. @@ -1019,7 +1197,7 @@ impl<'a, T: codec::Input> codec::Input for AppendZerosInput<'a, T> { into[i] = b; i += 1; } else { - break; + break } } i @@ -1062,7 +1240,9 @@ impl<'a> codec::Input for TrailingZeroInput<'a> { /// This type can be converted into and possibly from an AccountId (which itself is generic). pub trait AccountIdConversion: Sized { /// Convert into an account ID. This is infallible. - fn into_account(&self) -> AccountId { self.into_sub_account(&()) } + fn into_account(&self) -> AccountId { + self.into_sub_account(&()) + } /// Try to convert an account ID into this type. Might not succeed. fn try_from_account(a: &AccountId) -> Option { @@ -1088,14 +1268,16 @@ pub trait AccountIdConversion: Sized { /// fill AccountId. impl AccountIdConversion for Id { fn into_sub_account(&self, sub: S) -> T { - (Id::TYPE_ID, self, sub).using_encoded(|b| - T::decode(&mut TrailingZeroInput(b)) - ).unwrap_or_default() + (Id::TYPE_ID, self, sub) + .using_encoded(|b| T::decode(&mut TrailingZeroInput(b))) + .unwrap_or_default() } fn try_from_sub_account(x: &T) -> Option<(Self, S)> { x.using_encoded(|d| { - if &d[0..4] != Id::TYPE_ID { return None } + if &d[0..4] != Id::TYPE_ID { + return None + } let mut cursor = &d[4..]; let result = Decode::decode(&mut cursor).ok()?; if cursor.iter().all(|x| *x == 0) { @@ -1127,31 +1309,9 @@ macro_rules! count { }; } -/// Implement `OpaqueKeys` for a described struct. -/// -/// Every field type must implement [`BoundToRuntimeAppPublic`](crate::BoundToRuntimeAppPublic). -/// `KeyTypeIdProviders` is set to the types given as fields. -/// -/// ```rust -/// use sp_runtime::{ -/// impl_opaque_keys, KeyTypeId, BoundToRuntimeAppPublic, app_crypto::{sr25519, ed25519} -/// }; -/// -/// pub struct KeyModule; -/// impl BoundToRuntimeAppPublic for KeyModule { type Public = ed25519::AppPublic; } -/// -/// pub struct KeyModule2; -/// impl BoundToRuntimeAppPublic for KeyModule2 { type Public = sr25519::AppPublic; } -/// -/// impl_opaque_keys! { -/// pub struct Keys { -/// pub key_module: KeyModule, -/// pub key_module2: KeyModule2, -/// } -/// } -/// ``` +#[doc(hidden)] #[macro_export] -macro_rules! impl_opaque_keys { +macro_rules! impl_opaque_keys_inner { ( $( #[ $attr:meta ] )* pub struct $name:ident { @@ -1166,9 +1326,9 @@ macro_rules! impl_opaque_keys { Default, Clone, PartialEq, Eq, $crate::codec::Encode, $crate::codec::Decode, + $crate::scale_info::TypeInfo, $crate::RuntimeDebug, )] - #[cfg_attr(feature = "std", derive($crate::serde::Serialize, $crate::serde::Deserialize))] pub struct $name { $( $( #[ $inner_attr ] )* @@ -1259,6 +1419,83 @@ macro_rules! impl_opaque_keys { }; } +/// Implement `OpaqueKeys` for a described struct. +/// +/// Every field type must implement [`BoundToRuntimeAppPublic`](crate::BoundToRuntimeAppPublic). +/// `KeyTypeIdProviders` is set to the types given as fields. +/// +/// ```rust +/// use sp_runtime::{ +/// impl_opaque_keys, KeyTypeId, BoundToRuntimeAppPublic, app_crypto::{sr25519, ed25519} +/// }; +/// +/// pub struct KeyModule; +/// impl BoundToRuntimeAppPublic for KeyModule { type Public = ed25519::AppPublic; } +/// +/// pub struct KeyModule2; +/// impl BoundToRuntimeAppPublic for KeyModule2 { type Public = sr25519::AppPublic; } +/// +/// impl_opaque_keys! { +/// pub struct Keys { +/// pub key_module: KeyModule, +/// pub key_module2: KeyModule2, +/// } +/// } +/// ``` +#[macro_export] +#[cfg(feature = "std")] +macro_rules! impl_opaque_keys { + { + $( #[ $attr:meta ] )* + pub struct $name:ident { + $( + $( #[ $inner_attr:meta ] )* + pub $field:ident: $type:ty, + )* + } + } => { + $crate::paste::paste! { + use $crate::serde as [< __opaque_keys_serde_import__ $name >]; + + $crate::impl_opaque_keys_inner! { + $( #[ $attr ] )* + #[derive($crate::serde::Serialize, $crate::serde::Deserialize)] + #[serde(crate = "__opaque_keys_serde_import__" $name)] + pub struct $name { + $( + $( #[ $inner_attr ] )* + pub $field: $type, + )* + } + } + } + } +} + +#[macro_export] +#[cfg(not(feature = "std"))] +macro_rules! impl_opaque_keys { + { + $( #[ $attr:meta ] )* + pub struct $name:ident { + $( + $( #[ $inner_attr:meta ] )* + pub $field:ident: $type:ty, + )* + } + } => { + $crate::impl_opaque_keys_inner! { + $( #[ $attr ] )* + pub struct $name { + $( + $( #[ $inner_attr ] )* + pub $field: $type, + )* + } + } + } +} + /// Trait for things which can be printed from the runtime. pub trait Printable { /// Print the object. @@ -1349,22 +1586,53 @@ pub trait BlockIdTo { ) -> Result>, Self::Error>; } +/// Get current block number +pub trait BlockNumberProvider { + /// Type of `BlockNumber` to provide. + type BlockNumber: Codec + Clone + Ord + Eq + AtLeast32BitUnsigned; + + /// Returns the current block number. + /// + /// Provides an abstraction over an arbitrary way of providing the + /// current block number. + /// + /// In case of using crate `sp_runtime` with the crate `frame-system`, + /// it is already implemented for + /// `frame_system::Pallet` as: + /// + /// ```ignore + /// fn current_block_number() -> Self { + /// frame_system::Pallet::block_number() + /// } + /// ``` + /// . + fn current_block_number() -> Self::BlockNumber; + + /// Utility function only to be used in benchmarking scenarios, to be implemented optionally, + /// else a noop. + /// + /// It allows for setting the block number that will later be fetched + /// This is useful in case the block number provider is different than System + #[cfg(feature = "runtime-benchmarks")] + fn set_block_number(_block: Self::BlockNumber) {} +} + #[cfg(test)] mod tests { use super::*; - use crate::codec::{Encode, Decode, Input}; + use crate::codec::{Decode, Encode, Input}; use sp_core::{crypto::Pair, ecdsa}; mod t { - use sp_core::crypto::KeyTypeId; use sp_application_crypto::{app_crypto, sr25519}; + use sp_core::crypto::KeyTypeId; app_crypto!(sr25519, KeyTypeId(*b"test")); } #[test] fn app_verify_works() { - use t::*; use super::AppVerify; + use t::*; let s = Signature::default(); let _ = s.verify(&[0u8; 100][..], &Public::default()); diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index e9e2f2b3d3c2b..e114bb5985460 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,11 @@ //! Transaction validity interface. +use crate::{ + codec::{Decode, Encode}, + RuntimeDebug, +}; use sp_std::prelude::*; -use crate::codec::{Encode, Decode}; -use crate::RuntimeDebug; /// Priority for a transaction. Additive. Higher is better. pub type TransactionPriority = u64; @@ -33,7 +35,7 @@ pub type TransactionTag = Vec; /// An invalid transaction validity. #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(serde::Serialize))] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum InvalidTransaction { /// The call of the transaction is not expected. Call, @@ -54,6 +56,14 @@ pub enum InvalidTransaction { /// it will only be able to assume a bad signature and cannot express a more meaningful error. BadProof, /// The transaction birth block is ancient. + /// + /// # Possible causes + /// + /// For `FRAME`-based runtimes this would be caused by `current block number + /// - Era::birth block number > BlockHashCount`. (e.g. in Polkadot `BlockHashCount` = 2400, so + /// a + /// transaction with birth block number 1337 would be valid up until block number 1337 + 2400, + /// after which point the transaction would be considered to have an ancient birth block.) AncientBirthBlock, /// The transaction would exhaust the resources of current block. /// @@ -63,8 +73,8 @@ pub enum InvalidTransaction { /// Any other custom invalid validity that is not covered by this enum. Custom(u8), /// An extrinsic with a Mandatory dispatch resulted in Error. This is indicative of either a - /// malicious validator or a buggy `provide_inherent`. In any case, it can result in dangerously - /// overweight blocks and therefore if found, invalidates the block. + /// malicious validator or a buggy `provide_inherent`. In any case, it can result in + /// dangerously overweight blocks and therefore if found, invalidates the block. BadMandatory, /// A transaction with a mandatory dispatch. This is invalid; only inherent extrinsics are /// allowed to have mandatory dispatches. @@ -74,18 +84,12 @@ pub enum InvalidTransaction { impl InvalidTransaction { /// Returns if the reason for the invalidity was block resource exhaustion. pub fn exhausted_resources(&self) -> bool { - match self { - Self::ExhaustsResources => true, - _ => false, - } + matches!(self, Self::ExhaustsResources) } /// Returns if the reason for the invalidity was a mandatory call failing. pub fn was_mandatory(&self) -> bool { - match self { - Self::BadMandatory => true, - _ => false, - } + matches!(self, Self::BadMandatory) } } @@ -97,8 +101,7 @@ impl From for &'static str { InvalidTransaction::Stale => "Transaction is outdated", InvalidTransaction::BadProof => "Transaction has a bad signature", InvalidTransaction::AncientBirthBlock => "Transaction has an ancient birth block", - InvalidTransaction::ExhaustsResources => - "Transaction would exhaust the block limits", + InvalidTransaction::ExhaustsResources => "Transaction would exhaust the block limits", InvalidTransaction::Payment => "Inability to pay some fees (e.g. account balance too low)", InvalidTransaction::BadMandatory => @@ -112,7 +115,7 @@ impl From for &'static str { /// An unknown transaction validity. #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(serde::Serialize))] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum UnknownTransaction { /// Could not lookup some information that is required to validate the transaction. CannotLookup, @@ -136,7 +139,7 @@ impl From for &'static str { /// Errors that can occur while checking the validity of a transaction. #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(serde::Serialize))] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum TransactionValidityError { /// The transaction is invalid. Invalid(InvalidTransaction), @@ -184,18 +187,33 @@ impl From for TransactionValidityError { } } +#[cfg(feature = "std")] +impl std::error::Error for TransactionValidityError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + None + } +} + +#[cfg(feature = "std")] +impl std::fmt::Display for TransactionValidityError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s: &'static str = (*self).into(); + write!(f, "{}", s) + } +} + /// Information on a transaction's validity and, if valid, on how it relates to other transactions. pub type TransactionValidity = Result; -impl Into for InvalidTransaction { - fn into(self) -> TransactionValidity { - Err(self.into()) +impl From for TransactionValidity { + fn from(invalid_transaction: InvalidTransaction) -> Self { + Err(TransactionValidityError::Invalid(invalid_transaction)) } } -impl Into for UnknownTransaction { - fn into(self) -> TransactionValidity { - Err(self.into()) +impl From for TransactionValidity { + fn from(unknown_transaction: UnknownTransaction) -> Self { + Err(TransactionValidityError::Unknown(unknown_transaction)) } } @@ -204,7 +222,9 @@ impl Into for UnknownTransaction { /// Depending on the source we might apply different validation schemes. /// For instance we can disallow specific kinds of transactions if they were not produced /// by our local node (for instance off-chain workers). -#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, parity_util_mem::MallocSizeOf)] +#[derive( + Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, parity_util_mem::MallocSizeOf, +)] pub enum TransactionSource { /// Transaction is already included in block. /// @@ -263,7 +283,7 @@ pub struct ValidTransaction { impl Default for ValidTransaction { fn default() -> Self { - ValidTransaction { + Self { priority: 0, requires: vec![], provides: vec![], @@ -279,20 +299,23 @@ impl ValidTransaction { /// To avoid conflicts between different parts in runtime it's recommended to build `requires` /// and `provides` tags with a unique prefix. pub fn with_tag_prefix(prefix: &'static str) -> ValidTransactionBuilder { - ValidTransactionBuilder { - prefix: Some(prefix), - validity: Default::default(), - } + ValidTransactionBuilder { prefix: Some(prefix), validity: Default::default() } } /// Combine two instances into one, as a best effort. This will take the superset of each of the /// `provides` and `requires` tags, it will sum the priorities, take the minimum longevity and /// the logic *And* of the propagate flags. pub fn combine_with(mut self, mut other: ValidTransaction) -> Self { - ValidTransaction { + Self { priority: self.priority.saturating_add(other.priority), - requires: { self.requires.append(&mut other.requires); self.requires }, - provides: { self.provides.append(&mut other.provides); self.provides }, + requires: { + self.requires.append(&mut other.requires); + self.requires + }, + provides: { + self.provides.append(&mut other.provides); + self.provides + }, longevity: self.longevity.min(other.longevity), propagate: self.propagate && other.propagate, } @@ -396,7 +419,6 @@ impl From for ValidTransaction { } } - #[cfg(test)] mod tests { use super::*; @@ -414,7 +436,10 @@ mod tests { let encoded = v.encode(); assert_eq!( encoded, - vec![0, 5, 0, 0, 0, 0, 0, 0, 0, 4, 16, 1, 2, 3, 4, 4, 12, 4, 5, 6, 42, 0, 0, 0, 0, 0, 0, 0, 0] + vec![ + 0, 5, 0, 0, 0, 0, 0, 0, 0, 4, 16, 1, 2, 3, 4, 4, 12, 4, 5, 6, 42, 0, 0, 0, 0, 0, 0, + 0, 0 + ] ); // decode back @@ -434,12 +459,15 @@ mod tests { .priority(3) .priority(6) .into(); - assert_eq!(a, ValidTransaction { - propagate: false, - longevity: 5, - priority: 6, - requires: vec![(PREFIX, 1).encode(), (PREFIX, 2).encode()], - provides: vec![(PREFIX, 3).encode(), (PREFIX, 4).encode()], - }); + assert_eq!( + a, + ValidTransaction { + propagate: false, + longevity: 5, + priority: 6, + requires: vec![(PREFIX, 1).encode(), (PREFIX, 2).encode()], + provides: vec![(PREFIX, 3).encode(), (PREFIX, 4).encode()], + } + ); } } diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index 70ae56fb48108..a4d4a4d5d031a 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-sandbox" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,13 +12,20 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[target.'cfg(target_arch = "wasm32")'.dependencies] +wasmi = { version = "0.9.0", default-features = false, features = ["core"] } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +wasmi = "0.9.0" + [dependencies] -wasmi = { version = "0.6.2", optional = true } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../io" } -sp-wasm-interface = { version = "2.0.0", default-features = false, path = "../wasm-interface" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +wasmi = { version = "0.9.0", optional = true } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../io" } +sp-wasm-interface = { version = "4.0.0-dev", default-features = false, path = "../wasm-interface" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4", default-features = false } [dev-dependencies] wat = "1.0" @@ -33,5 +40,7 @@ std = [ "codec/std", "sp-io/std", "sp-wasm-interface/std", + "log/std", ] strict = [] +wasmer-sandbox = [] diff --git a/primitives/sandbox/with_std.rs b/primitives/sandbox/embedded_executor.rs similarity index 68% rename from primitives/sandbox/with_std.rs rename to primitives/sandbox/embedded_executor.rs index 0f46f49503cac..678da3c3aeaf5 100755 --- a/primitives/sandbox/with_std.rs +++ b/primitives/sandbox/embedded_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,16 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_std::collections::btree_map::BTreeMap; -use sp_std::fmt; - +use super::{Error, HostError, HostFuncType, ReturnValue, Value, TARGET}; +use alloc::string::String; +use log::debug; +use sp_std::{ + borrow::ToOwned, collections::btree_map::BTreeMap, fmt, marker::PhantomData, prelude::*, +}; use wasmi::{ - Externals, FuncInstance, FuncRef, GlobalDescriptor, GlobalRef, ImportResolver, - MemoryDescriptor, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, - RuntimeArgs, RuntimeValue, Signature, TableDescriptor, TableRef, Trap, TrapKind + memory_units::Pages, Externals, FuncInstance, FuncRef, GlobalDescriptor, GlobalRef, + ImportResolver, MemoryDescriptor, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, + RuntimeArgs, RuntimeValue, Signature, TableDescriptor, TableRef, Trap, TrapKind, }; -use wasmi::memory_units::Pages; -use super::{Error, Value, ReturnValue, HostFuncType, HostError}; #[derive(Clone)] pub struct Memory { @@ -37,7 +38,8 @@ impl Memory { memref: MemoryInstance::alloc( Pages(initial as usize), maximum.map(|m| Pages(m as usize)), - ).map_err(|_| Error::Module)?, + ) + .map_err(|_| Error::Module)?, }) } @@ -60,17 +62,13 @@ struct DefinedHostFunctions { impl Clone for DefinedHostFunctions { fn clone(&self) -> DefinedHostFunctions { - DefinedHostFunctions { - funcs: self.funcs.clone(), - } + DefinedHostFunctions { funcs: self.funcs.clone() } } } impl DefinedHostFunctions { fn new() -> DefinedHostFunctions { - DefinedHostFunctions { - funcs: Vec::new(), - } + DefinedHostFunctions { funcs: Vec::new() } } fn define(&mut self, f: HostFuncType) -> HostFuncIndex { @@ -102,16 +100,12 @@ impl<'a, T> Externals for GuestExternals<'a, T> { index: usize, args: RuntimeArgs, ) -> Result, Trap> { - let args = args.as_ref() - .iter() - .cloned() - .map(Into::into) - .collect::>(); + let args = args.as_ref().iter().cloned().map(to_interface).collect::>(); let result = (self.defined_host_functions.funcs[index])(self.state, &args); match result { Ok(value) => Ok(match value { - ReturnValue::Value(v) => Some(v.into()), + ReturnValue::Value(v) => Some(to_wasmi(v)), ReturnValue::Unit => None, }), Err(HostError) => Err(TrapKind::Host(Box::new(DummyHostError)).into()), @@ -143,8 +137,7 @@ impl EnvironmentDefinitionBuilder { N2: Into>, { let idx = self.defined_host_functions.define(f); - self.map - .insert((module.into(), field.into()), ExternVal::HostFunc(idx)); + self.map.insert((module.into(), field.into()), ExternVal::HostFunc(idx)); } pub fn add_memory(&mut self, module: N1, field: N2, mem: Memory) @@ -152,8 +145,7 @@ impl EnvironmentDefinitionBuilder { N1: Into>, N2: Into>, { - self.map - .insert((module.into(), field.into()), ExternVal::Memory(mem)); + self.map.insert((module.into(), field.into()), ExternVal::Memory(mem)); } } @@ -164,21 +156,17 @@ impl ImportResolver for EnvironmentDefinitionBuilder { field_name: &str, signature: &Signature, ) -> Result { - let key = ( - module_name.as_bytes().to_owned(), - field_name.as_bytes().to_owned(), - ); + let key = (module_name.as_bytes().to_owned(), field_name.as_bytes().to_owned()); let externval = self.map.get(&key).ok_or_else(|| { - wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name)) + debug!(target: TARGET, "Export {}:{} not found", module_name, field_name); + wasmi::Error::Instantiation(String::new()) })?; let host_func_idx = match *externval { ExternVal::HostFunc(ref idx) => idx, _ => { - return Err(wasmi::Error::Instantiation(format!( - "Export {}:{} is not a host func", - module_name, field_name - ))) - } + debug!(target: TARGET, "Export {}:{} is not a host func", module_name, field_name); + return Err(wasmi::Error::Instantiation(String::new())) + }, }; Ok(FuncInstance::alloc_host(signature.clone(), host_func_idx.0)) } @@ -189,9 +177,8 @@ impl ImportResolver for EnvironmentDefinitionBuilder { _field_name: &str, _global_type: &GlobalDescriptor, ) -> Result { - Err(wasmi::Error::Instantiation(format!( - "Importing globals is not supported yet" - ))) + debug!(target: TARGET, "Importing globals is not supported yet"); + Err(wasmi::Error::Instantiation(String::new())) } fn resolve_memory( @@ -200,21 +187,17 @@ impl ImportResolver for EnvironmentDefinitionBuilder { field_name: &str, _memory_type: &MemoryDescriptor, ) -> Result { - let key = ( - module_name.as_bytes().to_owned(), - field_name.as_bytes().to_owned(), - ); + let key = (module_name.as_bytes().to_owned(), field_name.as_bytes().to_owned()); let externval = self.map.get(&key).ok_or_else(|| { - wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name)) + debug!(target: TARGET, "Export {}:{} not found", module_name, field_name); + wasmi::Error::Instantiation(String::new()) })?; let memory = match *externval { ExternVal::Memory(ref m) => m, _ => { - return Err(wasmi::Error::Instantiation(format!( - "Export {}:{} is not a memory", - module_name, field_name - ))) - } + debug!(target: TARGET, "Export {}:{} is not a memory", module_name, field_name); + return Err(wasmi::Error::Instantiation(String::new())) + }, }; Ok(memory.memref.clone()) } @@ -225,16 +208,15 @@ impl ImportResolver for EnvironmentDefinitionBuilder { _field_name: &str, _table_type: &TableDescriptor, ) -> Result { - Err(wasmi::Error::Instantiation(format!( - "Importing tables is not supported yet" - ))) + debug!("Importing tables is not supported yet"); + Err(wasmi::Error::Instantiation(String::new())) } } pub struct Instance { instance: ModuleRef, defined_host_functions: DefinedHostFunctions, - _marker: std::marker::PhantomData, + _marker: PhantomData, } impl Instance { @@ -244,26 +226,19 @@ impl Instance { state: &mut T, ) -> Result, Error> { let module = Module::from_buffer(code).map_err(|_| Error::Module)?; - let not_started_instance = ModuleInstance::new(&module, env_def_builder) - .map_err(|_| Error::Module)?; - + let not_started_instance = + ModuleInstance::new(&module, env_def_builder).map_err(|_| Error::Module)?; let defined_host_functions = env_def_builder.defined_host_functions.clone(); let instance = { - let mut externals = GuestExternals { - state, - defined_host_functions: &defined_host_functions, - }; - let instance = not_started_instance.run_start(&mut externals) - .map_err(|_| Error::Execution)?; + let mut externals = + GuestExternals { state, defined_host_functions: &defined_host_functions }; + let instance = + not_started_instance.run_start(&mut externals).map_err(|_| Error::Execution)?; instance }; - Ok(Instance { - instance, - defined_host_functions, - _marker: std::marker::PhantomData::, - }) + Ok(Instance { instance, defined_host_functions, _marker: PhantomData:: }) } pub fn invoke( @@ -272,35 +247,49 @@ impl Instance { args: &[Value], state: &mut T, ) -> Result { - let args = args.iter().cloned().map(Into::into).collect::>(); + let args = args.iter().cloned().map(to_wasmi).collect::>(); - let mut externals = GuestExternals { - state, - defined_host_functions: &self.defined_host_functions, - }; - let result = self.instance - .invoke_export(&name, &args, &mut externals); + let mut externals = + GuestExternals { state, defined_host_functions: &self.defined_host_functions }; + let result = self.instance.invoke_export(&name, &args, &mut externals); match result { Ok(None) => Ok(ReturnValue::Unit), - Ok(Some(val)) => Ok(ReturnValue::Value(val.into())), + Ok(Some(val)) => Ok(ReturnValue::Value(to_interface(val))), Err(_err) => Err(Error::Execution), } } pub fn get_global_val(&self, name: &str) -> Option { - let global = self.instance - .export_by_name(name)? - .as_global()? - .get(); + let global = self.instance.export_by_name(name)?.as_global()?.get(); + + Some(to_interface(global)) + } +} + +/// Convert the substrate value type to the wasmi value type. +fn to_wasmi(value: Value) -> RuntimeValue { + match value { + Value::I32(val) => RuntimeValue::I32(val), + Value::I64(val) => RuntimeValue::I64(val), + Value::F32(val) => RuntimeValue::F32(val.into()), + Value::F64(val) => RuntimeValue::F64(val.into()), + } +} - Some(global.into()) +/// Convert the wasmi value type to the substrate value type. +fn to_interface(value: RuntimeValue) -> Value { + match value { + RuntimeValue::I32(val) => Value::I32(val), + RuntimeValue::I64(val) => Value::I64(val), + RuntimeValue::F32(val) => Value::F32(val.into()), + RuntimeValue::F64(val) => Value::F64(val.into()), } } #[cfg(test)] mod tests { - use crate::{Error, Value, ReturnValue, HostError, EnvironmentDefinitionBuilder, Instance}; + use crate::{EnvironmentDefinitionBuilder, Error, HostError, Instance, ReturnValue, Value}; use assert_matches::assert_matches; fn execute_sandboxed(code: &[u8], args: &[Value]) -> Result { @@ -310,7 +299,7 @@ mod tests { fn env_assert(_e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError); + return Err(HostError) } let condition = args[0].as_i32().ok_or_else(|| HostError)?; if condition != 0 { @@ -321,7 +310,7 @@ mod tests { } fn env_inc_counter(e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError); + return Err(HostError) } let inc_by = args[0].as_i32().ok_or_else(|| HostError)?; e.counter += inc_by as u32; @@ -330,7 +319,7 @@ mod tests { /// Function that takes one argument of any type and returns that value. fn env_polymorphic_id(_e: &mut State, args: &[Value]) -> Result { if args.len() != 1 { - return Err(HostError); + return Err(HostError) } Ok(ReturnValue::Value(args[0])) } @@ -350,7 +339,8 @@ mod tests { #[test] fn invoke_args() { - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) @@ -371,21 +361,19 @@ mod tests { ) ) ) - "#).unwrap(); - - let result = execute_sandboxed( - &code, - &[ - Value::I32(0x12345678), - Value::I64(0x1234567887654321), - ] - ); + "#, + ) + .unwrap(); + + let result = + execute_sandboxed(&code, &[Value::I32(0x12345678), Value::I64(0x1234567887654321)]); assert!(result.is_ok()); } #[test] fn return_value() { - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (func (export "call") (param $x i32) (result i32) (i32.add @@ -394,20 +382,18 @@ mod tests { ) ) ) - "#).unwrap(); - - let return_val = execute_sandboxed( - &code, - &[ - Value::I32(0x1336), - ] - ).unwrap(); + "#, + ) + .unwrap(); + + let return_val = execute_sandboxed(&code, &[Value::I32(0x1336)]).unwrap(); assert_eq!(return_val, ReturnValue::Value(Value::I32(0x1337))); } #[test] fn signatures_dont_matter() { - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "polymorphic_id" (func $id_i32 (param i32) (result i32))) (import "env" "polymorphic_id" (func $id_i64 (param i64) (result i64))) @@ -434,7 +420,9 @@ mod tests { ) ) ) - "#).unwrap(); + "#, + ) + .unwrap(); let return_val = execute_sandboxed(&code, &[]).unwrap(); assert_eq!(return_val, ReturnValue::Unit); @@ -449,7 +437,8 @@ mod tests { let mut env_builder = EnvironmentDefinitionBuilder::new(); env_builder.add_host_func("env", "returns_i32", env_returns_i32); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module ;; It's actually returns i32, but imported as if it returned i64 (import "env" "returns_i32" (func $returns_i32 (result i64))) @@ -460,15 +449,14 @@ mod tests { ) ) ) - "#).unwrap(); + "#, + ) + .unwrap(); // It succeeds since we are able to import functions with types we want. let mut instance = Instance::new(&code, &env_builder, &mut ()).unwrap(); // But this fails since we imported a function that returns i32 as if it returned i64. - assert_matches!( - instance.invoke("call", &[], &mut ()), - Err(Error::Execution) - ); + assert_matches!(instance.invoke("call", &[], &mut ()), Err(Error::Execution)); } } diff --git a/primitives/sandbox/without_std.rs b/primitives/sandbox/host_executor.rs similarity index 98% rename from primitives/sandbox/without_std.rs rename to primitives/sandbox/host_executor.rs index dfd3742c6e96f..d2836e2ffd1eb 100755 --- a/primitives/sandbox/without_std.rs +++ b/primitives/sandbox/host_executor.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,7 +39,7 @@ mod ffi { // We need to ensure that sizes of a callable function pointer and host function index is // indeed equal. // We can't use `static_assertions` create because it makes compiler panic, fallback to runtime assert. - // const_assert!(mem::size_of::() == mem::size_of::>(),); + // const_assert!(mem::size_of::() == mem::size_of::>()); assert!(mem::size_of::() == mem::size_of::>()); mem::transmute::>(idx) } diff --git a/primitives/sandbox/src/lib.rs b/primitives/sandbox/src/lib.rs index a1348370dfe4b..1724b4152ff3d 100755 --- a/primitives/sandbox/src/lib.rs +++ b/primitives/sandbox/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,9 +23,9 @@ //! and without the performance penalty of full wasm emulation inside wasm. //! //! This is achieved by using bindings to the wasm VM, which are published by the host API. -//! This API is thin and consists of only a handful functions. It contains functions for instantiating -//! modules and executing them, but doesn't contain functions for inspecting the module -//! structure. The user of this library is supposed to read the wasm module. +//! This API is thin and consists of only a handful functions. It contains functions for +//! instantiating modules and executing them, but doesn't contain functions for inspecting the +//! module structure. The user of this library is supposed to read the wasm module. //! //! When this crate is used in the `std` environment all these functions are implemented by directly //! calling the wasm VM. @@ -38,17 +38,22 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use sp_std::prelude::*; pub use sp_core::sandbox::HostError; -pub use sp_wasm_interface::{Value, ReturnValue}; +pub use sp_wasm_interface::{ReturnValue, Value}; + +/// The target used for logging. +const TARGET: &str = "runtime::sandbox"; mod imp { - #[cfg(feature = "std")] - include!("../with_std.rs"); + #[cfg(all(feature = "wasmer-sandbox", not(feature = "std")))] + include!("../host_executor.rs"); - #[cfg(not(feature = "std"))] - include!("../without_std.rs"); + #[cfg(not(all(feature = "wasmer-sandbox", not(feature = "std"))))] + include!("../embedded_executor.rs"); } /// Error that can occur while using this crate. @@ -100,9 +105,7 @@ impl Memory { /// /// Allocated memory is always zeroed. pub fn new(initial: u32, maximum: Option) -> Result { - Ok(Memory { - inner: imp::Memory::new(initial, maximum)?, - }) + Ok(Memory { inner: imp::Memory::new(initial, maximum)? }) } /// Read a memory area at the address `ptr` with the size of the provided slice `buf`. @@ -131,9 +134,7 @@ pub struct EnvironmentDefinitionBuilder { impl EnvironmentDefinitionBuilder { /// Construct a new `EnvironmentDefinitionBuilder`. pub fn new() -> EnvironmentDefinitionBuilder { - EnvironmentDefinitionBuilder { - inner: imp::EnvironmentDefinitionBuilder::new(), - } + EnvironmentDefinitionBuilder { inner: imp::EnvironmentDefinitionBuilder::new() } } /// Register a host function in this environment definition. @@ -172,16 +173,16 @@ impl Instance { /// run the `start` function (if it is present in the module) with the given `state`. /// /// Returns `Err(Error::Module)` if this module can't be instantiated with the given - /// environment. If execution of `start` function generated a trap, then `Err(Error::Execution)` will - /// be returned. + /// environment. If execution of `start` function generated a trap, then `Err(Error::Execution)` + /// will be returned. /// /// [`EnvironmentDefinitionBuilder`]: struct.EnvironmentDefinitionBuilder.html - pub fn new(code: &[u8], env_def_builder: &EnvironmentDefinitionBuilder, state: &mut T) - -> Result, Error> - { - Ok(Instance { - inner: imp::Instance::new(code, &env_def_builder.inner, state)?, - }) + pub fn new( + code: &[u8], + env_def_builder: &EnvironmentDefinitionBuilder, + state: &mut T, + ) -> Result, Error> { + Ok(Instance { inner: imp::Instance::new(code, &env_def_builder.inner, state)? }) } /// Invoke an exported function with the given name. @@ -192,8 +193,8 @@ impl Instance { /// /// - An export function name isn't a proper utf8 byte sequence, /// - This module doesn't have an exported function with the given name, - /// - If types of the arguments passed to the function doesn't match function signature - /// then trap occurs (as if the exported function was called via call_indirect), + /// - If types of the arguments passed to the function doesn't match function signature then + /// trap occurs (as if the exported function was called via call_indirect), /// - Trap occurred at the execution time. pub fn invoke( &mut self, diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index 5a4514db86da9..2200274e0628d 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-serializer" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,5 +14,5 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = "1.0.101" -serde_json = "1.0.41" +serde = "1.0.126" +serde_json = "1.0.68" diff --git a/primitives/serializer/src/lib.rs b/primitives/serializer/src/lib.rs index c1e03e58a7af8..ccdbbf27f179b 100644 --- a/primitives/serializer/src/lib.rs +++ b/primitives/serializer/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,7 @@ #![warn(missing_docs)] -pub use serde_json::{from_str, from_slice, from_reader, Result, Error}; +pub use serde_json::{from_reader, from_slice, from_str, Error, Result}; const PROOF: &str = "Serializers are infallible; qed"; @@ -37,6 +37,9 @@ pub fn encode(value: &T) -> Vec { } /// Serialize the given data structure as JSON into the IO stream. -pub fn to_writer(writer: W, value: &T) -> Result<()> { +pub fn to_writer( + writer: W, + value: &T, +) -> Result<()> { serde_json::to_writer(writer, value) } diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 4fccce6283142..8e1e2464e49ec 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-session" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,17 +13,19 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-staking = { version = "2.0.0", default-features = false, path = "../staking" } -sp-runtime = { version = "2.0.0", optional = true, path = "../runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../staking" } +sp-runtime = { version = "4.0.0-dev", optional = true, path = "../runtime" } [features] default = [ "std" ] std = [ "codec/std", + "scale-info/std", "sp-api/std", "sp-core/std", "sp-std/std", diff --git a/primitives/session/src/lib.rs b/primitives/session/src/lib.rs index 38a852dafd1dd..d85b6af4349e4 100644 --- a/primitives/session/src/lib.rs +++ b/primitives/session/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,15 +19,14 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; -#[cfg(feature = "std")] -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; #[cfg(feature = "std")] use sp_api::ProvideRuntimeApi; +#[cfg(feature = "std")] +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use sp_core::RuntimeDebug; -use sp_core::crypto::KeyTypeId; +use sp_core::{crypto::KeyTypeId, RuntimeDebug}; use sp_staking::SessionIndex; use sp_std::vec::Vec; @@ -54,7 +53,7 @@ sp_api::decl_runtime_apis! { pub type ValidatorCount = u32; /// Proof of membership of a specific key in a given session. -#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, RuntimeDebug)] +#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, RuntimeDebug, scale_info::TypeInfo)] pub struct MembershipProof { /// The session index on which the specific key is a member. pub session: SessionIndex, @@ -113,7 +112,7 @@ pub fn generate_initial_session_keys( client: std::sync::Arc, at: &BlockId, seeds: Vec, -) -> Result<(), sp_api::ApiErrorFor> +) -> Result<(), sp_api::ApiError> where Block: BlockT, T: ProvideRuntimeApi, diff --git a/primitives/sr-api/proc-macro/src/lib.rs b/primitives/sr-api/proc-macro/src/lib.rs deleted file mode 100644 index 0c506a1455dbe..0000000000000 --- a/primitives/sr-api/proc-macro/src/lib.rs +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2018-2019 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Macros for declaring and implementing runtime apis. - -#![recursion_limit = "512"] - -use proc_macro::TokenStream; - -mod impl_runtime_apis; -mod decl_runtime_apis; -mod utils; - -/// Tags given trait implementations as runtime apis. -/// -/// All traits given to this macro, need to be declared with the `decl_runtime_apis!` macro. -/// The implementation of the trait should follow the declaration given to the `decl_runtime_apis!` -/// macro, besides the `Block` type that is required as first generic parameter for each runtime -/// api trait. When implementing a runtime api trait, it is required that the trait is referenced -/// by a path, e.g. `impl my_trait::MyTrait for Runtime`. The macro will use this path to access -/// the declaration of the trait for the runtime side. -/// -/// The macro also generates the api implementations for the client side and provides it through -/// the `RuntimeApi` type. The `RuntimeApi` is hidden behind a `feature` called `std`. -/// -/// To expose version information about all implemented api traits, the constant -/// `RUNTIME_API_VERSIONS` is generated. This constant should be used to instantiate the `apis` -/// field of `RuntimeVersion`. -/// -/// # Example -/// -/// ```rust -/// use sp_version::create_runtime_str; -/// # -/// # use sp_runtime::traits::{GetNodeBlockType, Block as BlockT}; -/// # use test_client::runtime::Block; -/// # -/// # /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` -/// # /// trait are done by the `construct_runtime!` macro in a real runtime. -/// # pub struct Runtime {} -/// # impl GetNodeBlockType for Runtime { -/// # type NodeBlock = Block; -/// # } -/// # -/// # sp_api::decl_runtime_apis! { -/// # /// Declare the api trait. -/// # pub trait Balance { -/// # /// Get the balance. -/// # fn get_balance() -> u64; -/// # /// Set the balance. -/// # fn set_balance(val: u64); -/// # } -/// # pub trait BlockBuilder { -/// # fn build_block() -> Block; -/// # } -/// # } -/// -/// /// All runtime api implementations need to be done in one call of the macro! -/// sp_api::impl_runtime_apis! { -/// # impl sp_api::Core for Runtime { -/// # fn version() -> sp_version::RuntimeVersion { -/// # unimplemented!() -/// # } -/// # fn execute_block(_block: Block) {} -/// # fn initialize_block(_header: &::Header) {} -/// # } -/// -/// impl self::Balance for Runtime { -/// fn get_balance() -> u64 { -/// 1 -/// } -/// fn set_balance(_bal: u64) { -/// // Store the balance -/// } -/// } -/// -/// impl self::BlockBuilder for Runtime { -/// fn build_block() -> Block { -/// unimplemented!("Please implement me!") -/// } -/// } -/// } -/// -/// /// Runtime version. This needs to be declared for each runtime. -/// pub const VERSION: sp_version::RuntimeVersion = sp_version::RuntimeVersion { -/// spec_name: create_runtime_str!("node"), -/// impl_name: create_runtime_str!("test-node"), -/// authoring_version: 1, -/// spec_version: 1, -/// impl_version: 0, -/// // Here we are exposing the runtime api versions. -/// apis: RUNTIME_API_VERSIONS, -/// transaction_version: 1, -/// }; -/// -/// # fn main() {} -/// ``` -#[proc_macro] -pub fn impl_runtime_apis(input: TokenStream) -> TokenStream { - impl_runtime_apis::impl_runtime_apis_impl(input) -} - -/// Declares given traits as runtime apis. -/// -/// The macro will create two declarations, one for using on the client side and one for using -/// on the runtime side. The declaration for the runtime side is hidden in its own module. -/// The client side declaration gets two extra parameters per function, -/// `&self` and `at: &BlockId`. The runtime side declaration will match the given trait -/// declaration. Besides one exception, the macro adds an extra generic parameter `Block: BlockT` -/// to the client side and the runtime side. This generic parameter is usable by the user. -/// -/// For implementing these macros you should use the `impl_runtime_apis!` macro. -/// -/// # Example -/// -/// ```rust -/// sp_api::decl_runtime_apis! { -/// /// Declare the api trait. -/// pub trait Balance { -/// /// Get the balance. -/// fn get_balance() -> u64; -/// /// Set the balance. -/// fn set_balance(val: u64); -/// } -/// -/// /// You can declare multiple api traits in one macro call. -/// /// In one module you can call the macro at maximum one time. -/// pub trait BlockBuilder { -/// /// The macro adds an explicit `Block: BlockT` generic parameter for you. -/// /// You can use this generic parameter as you would defined it manually. -/// fn build_block() -> Block; -/// } -/// } -/// -/// # fn main() {} -/// ``` -/// -/// # Runtime api trait versioning -/// -/// To support versioning of the traits, the macro supports the attribute `#[api_version(1)]`. -/// The attribute supports any `u32` as version. By default, each trait is at version `1`, if no -/// version is provided. We also support changing the signature of a method. This signature -/// change is highlighted with the `#[changed_in(2)]` attribute above a method. A method that is -/// tagged with this attribute is callable by the name `METHOD_before_version_VERSION`. This -/// method will only support calling into wasm, trying to call into native will fail (change the -/// spec version!). Such a method also does not need to be implemented in the runtime. -/// -/// ```rust -/// sp_api::decl_runtime_apis! { -/// /// Declare the api trait. -/// #[api_version(2)] -/// pub trait Balance { -/// /// Get the balance. -/// fn get_balance() -> u64; -/// /// Set balance. -/// fn set_balance(val: u64); -/// /// Set balance, old version. -/// /// -/// /// Is callable by `set_balance_before_version_2`. -/// #[changed_in(2)] -/// fn set_balance(val: u16); -/// /// In version 2, we added this new function. -/// fn increase_balance(val: u64); -/// } -/// } -/// -/// # fn main() {} -/// ``` -/// -/// To check if a given runtime implements a runtime api trait, the `RuntimeVersion` has the -/// function `has_api()`. Also the `ApiExt` provides a function `has_api(at: &BlockId)` to -/// check if the runtime at the given block id implements the requested runtime api trait. -#[proc_macro] -pub fn decl_runtime_apis(input: TokenStream) -> TokenStream { - decl_runtime_apis::decl_runtime_apis_impl(input) -} diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index 315d5acc49dae..9e852319ede42 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-staking" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,14 +13,16 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "sp-runtime/std", "sp-std/std", ] diff --git a/primitives/staking/src/lib.rs b/primitives/staking/src/lib.rs index 3f6c1873ff031..4bb8ed93f88a1 100644 --- a/primitives/staking/src/lib.rs +++ b/primitives/staking/src/lib.rs @@ -1,16 +1,19 @@ - -// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index 650a17e7898a1..a91cb47c117b6 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,7 +20,7 @@ use sp_std::vec::Vec; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_runtime::Perbill; use crate::SessionIndex; @@ -84,10 +84,7 @@ pub trait Offence { /// /// `offenders_count` - the count of unique offending authorities. It is >0. /// `validator_set_count` - the cardinality of the validator set at the time of offence. - fn slash_fraction( - offenders_count: u32, - validator_set_count: u32, - ) -> Perbill; + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill; } /// Errors that may happen on offence reports. @@ -108,7 +105,7 @@ impl sp_runtime::traits::Printable for OffenceError { Self::Other(e) => { "Other".print(); e.print(); - } + }, } } } @@ -159,13 +156,7 @@ pub trait OnOffenceHandler { offenders: &[OffenceDetails], slash_fraction: &[Perbill], session: SessionIndex, - ) -> Result; - - /// Can an offence be reported now or not. This is an method to short-circuit a call into - /// `on_offence`. Ideally, a correct implementation should return `false` if `on_offence` will - /// return `Err`. Nonetheless, this is up to the implementation and this trait cannot guarantee - /// it. - fn can_report() -> bool; + ) -> Res; } impl OnOffenceHandler for () { @@ -173,15 +164,13 @@ impl OnOffenceHandler _offenders: &[OffenceDetails], _slash_fraction: &[Perbill], _session: SessionIndex, - ) -> Result { - Ok(Default::default()) + ) -> Res { + Default::default() } - - fn can_report() -> bool { true } } /// A details about an offending authority for a particular kind of offence. -#[derive(Clone, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, scale_info::TypeInfo)] pub struct OffenceDetails { /// The offending authority id pub offender: Offender, diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 8940488319931..457bbac5d2640 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-state-machine" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2018" @@ -14,25 +14,28 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = { version = "0.4.8", optional = true } -parking_lot = { version = "0.10.0", optional = true } +log = { version = "0.4.11", optional = true } +thiserror = { version = "1.0.21", optional = true } +parking_lot = { version = "0.11.1", optional = true } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.0", default-features = false } +trie-db = { version = "0.22.6", default-features = false } trie-root = { version = "0.16.0", default-features = false } -sp-trie = { version = "2.0.0", path = "../trie", default-features = false } -sp-core = { version = "2.0.0", path = "../core", default-features = false } -sp-panic-handler = { version = "2.0.0", path = "../panic-handler", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } +sp-trie = { version = "4.0.0-dev", path = "../trie", default-features = false } +sp-core = { version = "4.0.0-dev", path = "../core", default-features = false } +sp-panic-handler = { version = "3.0.0", path = "../panic-handler", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } num-traits = { version = "0.2.8", default-features = false } rand = { version = "0.7.2", optional = true } -sp-externalities = { version = "0.8.0", path = "../externalities", default-features = false } +sp-externalities = { version = "0.10.0-dev", path = "../externalities", default-features = false } smallvec = "1.4.1" -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +tracing = { version = "0.1.22", optional = true } [dev-dependencies] hex-literal = "0.3.1" -sp-runtime = { version = "2.0.0", path = "../runtime" } +sp-runtime = { version = "4.0.0-dev", path = "../runtime" } pretty_assertions = "0.6.1" +rand = { version = "0.7.2" } [features] default = ["std"] @@ -40,14 +43,16 @@ std = [ "codec/std", "hash-db/std", "num-traits/std", - "sp-core/std", - "sp-externalities/std", - "sp-std/std", + "sp-core/std", + "sp-externalities/std", + "sp-std/std", "sp-trie/std", "trie-db/std", "trie-root/std", "log", + "thiserror", "parking_lot", "rand", - "sp-panic-handler", + "sp-panic-handler", + "tracing" ] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 360fe9a985682..1b1a732f8d0fc 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,19 +17,16 @@ //! State machine backends. These manage the code and storage of contracts. -use hash_db::Hasher; -use codec::{Decode, Encode}; -use sp_core::{ - storage::{ChildInfo, well_known_keys, TrackedStorageKey} -}; use crate::{ - trie_backend::TrieBackend, - trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, + trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, ChildStorageCollection, + StorageCollection, StorageKey, StorageValue, UsageInfo, }; -use sp_std::vec::Vec; +use codec::{Decode, Encode}; +use hash_db::Hasher; +use sp_core::storage::{well_known_keys, ChildInfo, TrackedStorageKey}; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; +use sp_std::vec::Vec; /// A state backend is used to read state data and can have changes committed /// to it. @@ -90,13 +87,31 @@ pub trait Backend: sp_std::fmt::Debug { fn next_child_storage_key( &self, child_info: &ChildInfo, - key: &[u8] + key: &[u8], ) -> Result, Self::Error>; - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - fn for_keys_in_child_storage( + /// Iterate over storage starting at key, for a given prefix and child trie. + /// Aborts as soon as `f` returns false. + /// Warning, this fails at first error when usual iteration skips errors. + /// If `allow_missing` is true, iteration stops when it reaches a missing trie node. + /// Otherwise an error is produced. + /// + /// Returns `true` if trie end is reached. + fn apply_to_key_values_while, Vec) -> bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result; + + /// Retrieve all entries keys of storage and call `f` for each of those keys. + /// Aborts as soon as `f` returns false. + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ); @@ -110,7 +125,6 @@ pub trait Backend: sp_std::fmt::Debug { /// call `f` for each of those keys. fn for_key_values_with_prefix(&self, prefix: &[u8], f: F); - /// Retrieve all child entries keys which start with the given prefix and /// call `f` for each of those keys. fn for_child_keys_with_prefix( @@ -125,8 +139,10 @@ pub trait Backend: sp_std::fmt::Debug { /// Does not include child storage updates. fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord; + delta: impl Iterator)>, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. The second argument @@ -134,8 +150,10 @@ pub trait Backend: sp_std::fmt::Debug { fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord; + delta: impl Iterator)>, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord; /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(StorageKey, StorageValue)>; @@ -148,18 +166,14 @@ pub trait Backend: sp_std::fmt::Debug { } /// Get all keys of child storage with given prefix - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec { let mut all = Vec::new(); self.for_child_keys_with_prefix(child_info, prefix, |k| all.push(k.to_vec())); all } /// Try convert into trie backend. - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&self) -> Option<&TrieBackend> { None } @@ -168,18 +182,19 @@ pub trait Backend: sp_std::fmt::Debug { /// Does include child storage updates. fn full_storage_root<'a>( &self, - delta: impl Iterator)>, - child_deltas: impl Iterator)>, - )>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { + delta: impl Iterator)>, + child_deltas: impl Iterator< + Item = (&'a ChildInfo, impl Iterator)>), + >, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord + Encode, + { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { - let (child_root, empty, child_txs) = - self.child_storage_root(&child_info, child_delta); + let (child_root, empty, child_txs) = self.child_storage_root(&child_info, child_delta); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { @@ -188,13 +203,10 @@ pub trait Backend: sp_std::fmt::Debug { child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); } } - let (root, parent_txs) = self.storage_root(delta - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) - .chain( - child_roots - .iter() - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) - ) + let (root, parent_txs) = self.storage_root( + delta + .map(|(k, v)| (k, v.as_ref().map(|v| &v[..]))) + .chain(child_roots.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))), ); txs.consolidate(parent_txs); (root, txs) @@ -203,7 +215,7 @@ pub trait Backend: sp_std::fmt::Debug { /// Register stats from overlay of state machine. /// /// By default nothing is registered. - fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats); + fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats); /// Query backend usage statistics (i/o, memory) /// @@ -244,85 +256,15 @@ pub trait Backend: sp_std::fmt::Debug { /// Update the whitelist for tracking db reads/writes fn set_whitelist(&self, _: Vec) {} -} - -impl<'a, T: Backend, H: Hasher> Backend for &'a T { - type Error = T::Error; - type Transaction = T::Transaction; - type TrieBackendStorage = T::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result, Self::Error> { - (*self).storage(key) - } - - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - (*self).child_storage(child_info, key) - } - - fn for_keys_in_child_storage( - &self, - child_info: &ChildInfo, - f: F, - ) { - (*self).for_keys_in_child_storage(child_info, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { - (*self).next_storage_key(key) - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - (*self).next_child_storage_key(child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - (*self).for_keys_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - f: F, - ) { - (*self).for_child_keys_with_prefix(child_info, prefix, f) - } - - fn storage_root<'b>( - &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord { - (*self).storage_root(delta) - } - - fn child_storage_root<'b>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { - (*self).child_storage_root(child_info, delta) - } - fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - (*self).pairs() - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - (*self).for_key_values_with_prefix(prefix, f); + /// Estimate proof size + fn proof_size(&self) -> Option { + unimplemented!() } - fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } - - fn usage_info(&self) -> UsageInfo { - (*self).usage_info() + /// Extend storage info for benchmarking db + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { + unimplemented!() } } @@ -338,10 +280,7 @@ impl Consolidate for () { } } -impl Consolidate for Vec<( - Option, - StorageCollection, - )> { +impl Consolidate for Vec<(Option, StorageCollection)> { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } @@ -355,12 +294,15 @@ impl> Consolidate for sp_trie::GenericMem /// Insert input pairs into memory db. #[cfg(test)] -pub(crate) fn insert_into_memory_db(mdb: &mut sp_trie::MemoryDB, input: I) -> Option - where - H: Hasher, - I: IntoIterator, +pub(crate) fn insert_into_memory_db( + mdb: &mut sp_trie::MemoryDB, + input: I, +) -> Option +where + H: Hasher, + I: IntoIterator, { - use sp_trie::{TrieMut, trie_types::TrieDBMut}; + use sp_trie::{trie_types::TrieDBMut, TrieMut}; let mut root = ::Out::default(); { @@ -368,7 +310,7 @@ pub(crate) fn insert_into_memory_db(mdb: &mut sp_trie::MemoryDB, input: for (key, value) in input { if let Err(e) = trie.insert(&key, &value) { log::warn!(target: "trie", "Failed to write to trie: {}", e); - return None; + return None } } } @@ -384,8 +326,8 @@ pub struct BackendRuntimeCode<'a, B, H> { } #[cfg(feature = "std")] -impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode for - BackendRuntimeCode<'a, B, H> +impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode + for BackendRuntimeCode<'a, B, H> { fn fetch_runtime_code<'b>(&'b self) -> Option> { self.backend.storage(well_known_keys::CODE).ok().flatten().map(Into::into) @@ -393,23 +335,27 @@ impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode for } #[cfg(feature = "std")] -impl<'a, B: Backend, H: Hasher> BackendRuntimeCode<'a, B, H> where H::Out: Encode { +impl<'a, B: Backend, H: Hasher> BackendRuntimeCode<'a, B, H> +where + H::Out: Encode, +{ /// Create a new instance. pub fn new(backend: &'a B) -> Self { - Self { - backend, - _marker: std::marker::PhantomData, - } + Self { backend, _marker: std::marker::PhantomData } } /// Return the [`RuntimeCode`] build from the wrapped `backend`. pub fn runtime_code(&self) -> Result { - let hash = self.backend.storage_hash(well_known_keys::CODE) + let hash = self + .backend + .storage_hash(well_known_keys::CODE) .ok() .flatten() .ok_or("`:code` hash not found")? .encode(); - let heap_pages = self.backend.storage(well_known_keys::HEAP_PAGES) + let heap_pages = self + .backend + .storage(well_known_keys::HEAP_PAGES) .ok() .flatten() .and_then(|d| Decode::decode(&mut &d[..]).ok()); diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 5e3c9bed64f10..0bbd2d0a8e8e6 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,23 +17,25 @@ //! Basic implementation for Externalities. -use std::{ - collections::BTreeMap, any::{TypeId, Any}, iter::FromIterator, ops::Bound, -}; use crate::{Backend, StorageKey, StorageValue}; +use codec::Encode; use hash_db::Hasher; -use sp_trie::{TrieConfiguration, empty_child_trie_root}; -use sp_trie::trie_types::Layout; +use log::warn; use sp_core::{ storage::{ - well_known_keys::is_child_storage_key, Storage, - ChildInfo, StorageChild, TrackedStorageKey, + well_known_keys::is_child_storage_key, ChildInfo, Storage, StorageChild, TrackedStorageKey, }, - traits::Externalities, Blake2Hasher, + traits::Externalities, + Blake2Hasher, +}; +use sp_externalities::{Extension, Extensions}; +use sp_trie::{empty_child_trie_root, trie_types::Layout, TrieConfiguration}; +use std::{ + any::{Any, TypeId}, + collections::BTreeMap, + iter::FromIterator, + ops::Bound, }; -use log::warn; -use codec::Encode; -use sp_externalities::{Extensions, Extension}; /// Simple Map-based Externalities impl. #[derive(Debug)] @@ -105,13 +107,13 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.inner.top.eq(&other.inner.top) - && self.inner.children_default.eq(&other.inner.children_default) + self.inner.top.eq(&other.inner.top) && + self.inner.children_default.eq(&other.inner.children_default) } } impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { - fn from_iter>(iter: I) -> Self { + fn from_iter>(iter: I) -> Self { let mut t = Self::default(); t.inner.top.extend(iter); t @@ -119,16 +121,15 @@ impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { } impl Default for BasicExternalities { - fn default() -> Self { Self::new(Default::default()) } + fn default() -> Self { + Self::new(Default::default()) + } } impl From> for BasicExternalities { fn from(hashmap: BTreeMap) -> Self { BasicExternalities { - inner: Storage { - top: hashmap, - children_default: Default::default(), - }, + inner: Storage { top: hashmap, children_default: Default::default() }, extensions: Default::default(), } } @@ -145,20 +146,15 @@ impl Externalities for BasicExternalities { self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { - self.inner.children_default.get(child_info.storage_key()) - .and_then(|child| child.data.get(key)).cloned() + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + self.inner + .children_default + .get(child_info.storage_key()) + .and_then(|child| child.data.get(key)) + .cloned() } - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option> { + fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } @@ -167,25 +163,27 @@ impl Externalities for BasicExternalities { self.inner.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() } - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { + fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children_default.get(child_info.storage_key()) + self.inner + .children_default + .get(child_info.storage_key()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to set child storage key via main storage"); - return; + return } match maybe_value { - Some(value) => { self.inner.top.insert(key, value); } - None => { self.inner.top.remove(&key); } + Some(value) => { + self.inner.top.insert(key, value); + }, + None => { + self.inner.top.remove(&key); + }, } } @@ -195,7 +193,10 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - let child_map = self.inner.children_default.entry(child_info.storage_key().to_vec()) + let child_map = self + .inner + .children_default + .entry(child_info.storage_key().to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.to_owned(), @@ -207,67 +208,79 @@ impl Externalities for BasicExternalities { } } - fn kill_child_storage( - &mut self, - child_info: &ChildInfo, - ) { - self.inner.children_default.remove(child_info.storage_key()); + fn kill_child_storage(&mut self, child_info: &ChildInfo, _limit: Option) -> (bool, u32) { + let num_removed = self + .inner + .children_default + .remove(child_info.storage_key()) + .map(|c| c.data.len()) + .unwrap_or(0); + (true, num_removed as u32) } - fn clear_prefix(&mut self, prefix: &[u8]) { + fn clear_prefix(&mut self, prefix: &[u8], _limit: Option) -> (bool, u32) { if is_child_storage_key(prefix) { warn!( target: "trie", "Refuse to clear prefix that is part of child storage key via main storage" ); - return; + return (false, 0) } - let to_remove = self.inner.top.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + let to_remove = self + .inner + .top + .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) .cloned() .collect::>(); + let num_removed = to_remove.len(); for key in to_remove { self.inner.top.remove(&key); } + (true, num_removed as u32) } fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], - ) { + _limit: Option, + ) -> (bool, u32) { if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { - let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + let to_remove = child + .data + .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) .cloned() .collect::>(); + let num_removed = to_remove.len(); for key in to_remove { child.data.remove(&key); } + (true, num_removed as u32) + } else { + (true, 0) } } - fn storage_append( - &mut self, - key: Vec, - value: Vec, - ) { + fn storage_append(&mut self, key: Vec, value: Vec) { let current = self.inner.top.entry(key).or_default(); crate::ext::StorageAppend::new(current).append(value); } - fn chain_id(&self) -> u64 { 42 } - fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); - let prefixed_keys: Vec<_> = self.inner.children_default.iter().map(|(_k, v)| { - (v.child_info.prefixed_storage_key(), v.child_info.clone()) - }).collect(); + let prefixed_keys: Vec<_> = self + .inner + .children_default + .iter() + .map(|(_k, v)| (v.child_info.prefixed_storage_key(), v.child_info.clone())) + .collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. @@ -284,17 +297,16 @@ impl Externalities for BasicExternalities { Layout::::trie_root(self.inner.top.clone()).as_ref().into() } - fn child_storage_root( - &mut self, - child_info: &ChildInfo, - ) -> Vec { + fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); crate::in_memory_backend::new_in_mem::() - .child_storage_root(&child.child_info, delta).0 + .child_storage_root(&child.child_info, delta) + .0 } else { empty_child_trie_root::>() - }.encode() + } + .encode() } fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { @@ -332,6 +344,10 @@ impl Externalities for BasicExternalities { fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in Basic") } + + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { + unimplemented!("get_read_and_written_keys is not supported in Basic") + } } impl sp_externalities::ExtensionStore for BasicExternalities { @@ -347,7 +363,10 @@ impl sp_externalities::ExtensionStore for BasicExternalities { self.extensions.register_with_type_id(type_id, extension) } - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { if self.extensions.deregister(type_id) { Ok(()) } else { @@ -359,10 +378,11 @@ impl sp_externalities::ExtensionStore for BasicExternalities { #[cfg(test)] mod tests { use super::*; - use sp_core::map; - use sp_core::storage::{Storage, StorageChild}; - use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; + use sp_core::{ + map, + storage::{well_known_keys::CODE, Storage, StorageChild}, + }; #[test] fn commit_should_work() { @@ -370,7 +390,8 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); + const ROOT: [u8; 32] = + hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); assert_eq!(&ext.storage_root()[..], &ROOT); } @@ -396,7 +417,7 @@ mod tests { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], child_info: child_info.to_owned(), } - ] + ], }); assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); @@ -407,10 +428,32 @@ mod tests { ext.clear_child_storage(child_info, b"dog"); assert_eq!(ext.child_storage(child_info, b"dog"), None); - ext.kill_child_storage(child_info); + ext.kill_child_storage(child_info, None); assert_eq!(ext.child_storage(child_info, b"doe"), None); } + #[test] + fn kill_child_storage_returns_num_elements_removed() { + let child_info = ChildInfo::new_default(b"storage_key"); + let child_info = &child_info; + let mut ext = BasicExternalities::new(Storage { + top: Default::default(), + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + b"doe".to_vec() => b"reindeer".to_vec(), + b"dog".to_vec() => b"puppy".to_vec(), + b"hello".to_vec() => b"world".to_vec(), + ], + child_info: child_info.to_owned(), + } + ], + }); + + let res = ext.kill_child_storage(child_info, None); + assert_eq!(res, (true, 3)); + } + #[test] fn basic_externalities_is_empty() { // Make sure no values are set by default in `BasicExternalities`. diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index b23481411ae27..d3c6c12122c4f 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,23 +17,22 @@ //! Structures and functions required to build changes trie for given block. -use std::collections::BTreeMap; -use std::collections::btree_map::Entry; -use codec::{Decode, Encode}; -use hash_db::Hasher; -use num_traits::One; use crate::{ - StorageKey, backend::Backend, - overlayed_changes::{OverlayedChanges, OverlayedValue}, - trie_backend_essence::TrieBackendEssence, changes_trie::{ - AnchorBlockId, ConfigurationRange, Storage, BlockNumber, build_iterator::digest_build_iterator, - input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, + input::{ChildIndex, DigestIndex, ExtrinsicIndex, InputKey, InputPair}, + AnchorBlockId, BlockNumber, ConfigurationRange, Storage, }, + overlayed_changes::{OverlayedChanges, OverlayedValue}, + trie_backend_essence::TrieBackendEssence, + StorageKey, }; +use codec::{Decode, Encode}; +use hash_db::Hasher; +use num_traits::One; use sp_core::storage::{ChildInfo, PrefixedStorageKey}; +use std::collections::{btree_map::Entry, BTreeMap}; /// Prepare input pairs for building a changes trie of given block. /// @@ -45,66 +44,59 @@ pub(crate) fn prepare_input<'a, B, H, Number>( config: ConfigurationRange<'a, Number>, overlay: &'a OverlayedChanges, parent: &'a AnchorBlockId, -) -> Result<( - impl Iterator> + 'a, - Vec<(ChildIndex, impl Iterator> + 'a)>, +) -> Result< + ( + impl Iterator> + 'a, + Vec<(ChildIndex, impl Iterator> + 'a)>, Vec, - ), String> - where - B: Backend, - H: Hasher + 'a, - H::Out: Encode, - Number: BlockNumber, + ), + String, +> +where + B: Backend, + H: Hasher + 'a, + H::Out: Encode, + Number: BlockNumber, { let number = parent.number.clone() + One::one(); - let (extrinsics_input, children_extrinsics_input) = prepare_extrinsics_input( - backend, - &number, - overlay, - )?; - let (digest_input, mut children_digest_input, digest_input_blocks) = prepare_digest_input::( - parent, - config, - number, - storage, - )?; + let (extrinsics_input, children_extrinsics_input) = + prepare_extrinsics_input(backend, &number, overlay)?; + let (digest_input, mut children_digest_input, digest_input_blocks) = + prepare_digest_input::(parent, config, number, storage)?; let mut children_digest = Vec::with_capacity(children_extrinsics_input.len()); for (child_index, ext_iter) in children_extrinsics_input.into_iter() { let dig_iter = children_digest_input.remove(&child_index); children_digest.push(( child_index, - Some(ext_iter).into_iter().flatten() - .chain(dig_iter.into_iter().flatten()), + Some(ext_iter).into_iter().flatten().chain(dig_iter.into_iter().flatten()), )); } for (child_index, dig_iter) in children_digest_input.into_iter() { children_digest.push(( child_index, - None.into_iter().flatten() - .chain(Some(dig_iter).into_iter().flatten()), + None.into_iter().flatten().chain(Some(dig_iter).into_iter().flatten()), )); } - Ok(( - extrinsics_input.chain(digest_input), - children_digest, - digest_input_blocks, - )) + Ok((extrinsics_input.chain(digest_input), children_digest, digest_input_blocks)) } /// Prepare ExtrinsicIndex input pairs. fn prepare_extrinsics_input<'a, B, H, Number>( backend: &'a B, block: &Number, overlay: &'a OverlayedChanges, -) -> Result<( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - ), String> - where - B: Backend, - H: Hasher + 'a, - Number: BlockNumber, +) -> Result< + ( + impl Iterator> + 'a, + BTreeMap, impl Iterator> + 'a>, + ), + String, +> +where + B: Backend, + H: Hasher + 'a, + Number: BlockNumber, { let mut children_result = BTreeMap::new(); @@ -115,7 +107,9 @@ fn prepare_extrinsics_input<'a, B, H, Number>( }; let iter = prepare_extrinsics_input_inner( - backend, block, overlay, + backend, + block, + overlay, Some(child_info.clone()), child_changes, )?; @@ -132,12 +126,12 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( block: &Number, overlay: &'a OverlayedChanges, child_info: Option, - changes: impl Iterator -) -> Result> + 'a, String> - where - B: Backend, - H: Hasher, - Number: BlockNumber, + changes: impl Iterator, +) -> Result> + 'a, String> +where + B: Backend, + H: Hasher, + Number: BlockNumber, { changes .filter_map(|(k, v)| { @@ -148,68 +142,79 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( None } }) - .try_fold(BTreeMap::new(), |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, extrinsics)| { - match map.entry(k) { - Entry::Vacant(entry) => { - // ignore temporary values (values that have null value at the end of operation - // AND are not in storage at the beginning of operation - if let Some(child_info) = child_info.as_ref() { - if !overlay.child_storage(child_info, k).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_child_storage(&child_info, k) - .map_err(|e| format!("{}", e))? { - return Ok(map); + .try_fold( + BTreeMap::new(), + |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, extrinsics)| { + match map.entry(k) { + Entry::Vacant(entry) => { + // ignore temporary values (values that have null value at the end of + // operation AND are not in storage at the beginning of operation + if let Some(child_info) = child_info.as_ref() { + if !overlay + .child_storage(child_info, k) + .map(|v| v.is_some()) + .unwrap_or_default() + { + if !backend + .exists_child_storage(&child_info, k) + .map_err(|e| format!("{}", e))? + { + return Ok(map) + } } - } - } else { - if !overlay.storage(k).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { - return Ok(map); + } else { + if !overlay.storage(k).map(|v| v.is_some()).unwrap_or_default() { + if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { + return Ok(map) + } } - } - }; - - let extrinsics = extrinsics.into_iter().collect(); - entry.insert((ExtrinsicIndex { - block: block.clone(), - key: k.to_vec(), - }, extrinsics)); - }, - Entry::Occupied(mut entry) => { - // we do not need to check for temporary values here, because entry is Occupied - // AND we are checking it before insertion - let entry_extrinsics = &mut entry.get_mut().1; - entry_extrinsics.extend( - extrinsics.into_iter() - ); - entry_extrinsics.sort(); - }, - } + }; - Ok(map) - }) + let extrinsics = extrinsics.into_iter().collect(); + entry.insert(( + ExtrinsicIndex { block: block.clone(), key: k.to_vec() }, + extrinsics, + )); + }, + Entry::Occupied(mut entry) => { + // we do not need to check for temporary values here, because entry is + // Occupied AND we are checking it before insertion + let entry_extrinsics = &mut entry.get_mut().1; + entry_extrinsics.extend(extrinsics.into_iter()); + entry_extrinsics.sort(); + }, + } + + Ok(map) + }, + ) .map(|pairs| pairs.into_iter().map(|(_, (k, v))| InputPair::ExtrinsicIndex(k, v))) } - /// Prepare DigestIndex input pairs. fn prepare_digest_input<'a, H, Number>( parent: &'a AnchorBlockId, config: ConfigurationRange, block: Number, storage: &'a dyn Storage, -) -> Result<( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, +) -> Result< + ( + impl Iterator> + 'a, + BTreeMap, impl Iterator> + 'a>, Vec, - ), String> - where - H: Hasher, - H::Out: 'a + Encode, - Number: BlockNumber, + ), + String, +> +where + H: Hasher, + H::Out: 'a + Encode, + Number: BlockNumber, { let build_skewed_digest = config.end.as_ref() == Some(&block); let block_for_digest = if build_skewed_digest { - config.config.next_max_level_digest_range(config.zero.clone(), block.clone()) + config + .config + .next_max_level_digest_range(config.zero.clone(), block.clone()) .map(|(_, end)| end) .unwrap_or_else(|| block.clone()) } else { @@ -217,128 +222,159 @@ fn prepare_digest_input<'a, H, Number>( }; let digest_input_blocks = digest_build_iterator(config, block_for_digest).collect::>(); - digest_input_blocks.clone().into_iter() + digest_input_blocks + .clone() + .into_iter() .try_fold( - (BTreeMap::new(), BTreeMap::new()), move |(mut map, mut child_map), digest_build_block| { - let extrinsic_prefix = ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); - let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block.clone()); - let child_prefix = ChildIndex::key_neutral_prefix(digest_build_block.clone()); - let trie_root = storage.root(parent, digest_build_block.clone())?; - let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block.clone()))?; - - let insert_to_map = |map: &mut BTreeMap<_,_>, key: StorageKey| { - match map.entry(key.clone()) { - Entry::Vacant(entry) => { - entry.insert((DigestIndex { - block: block.clone(), - key, - }, vec![digest_build_block.clone()])); - }, - Entry::Occupied(mut entry) => { - // DigestIndexValue must be sorted. Here we are relying on the fact that digest_build_iterator() - // returns blocks in ascending order => we only need to check for duplicates - // - // is_dup_block could be true when key has been changed in both digest block - // AND other blocks that it covers - let is_dup_block = entry.get().1.last() == Some(&digest_build_block); - if !is_dup_block { - entry.get_mut().1.push(digest_build_block.clone()); - } - }, - } - }; - - // try to get all updated keys from cache - let populated_from_cache = storage.with_cached_changed_keys( - &trie_root, - &mut |changed_keys| { - for (storage_key, changed_keys) in changed_keys { - let map = match storage_key { - Some(storage_key) => child_map - .entry(ChildIndex:: { - block: block.clone(), - storage_key: storage_key.clone(), - }) - .or_default(), - None => &mut map, - }; - for changed_key in changed_keys.iter().cloned() { - insert_to_map(map, changed_key); - } + (BTreeMap::new(), BTreeMap::new()), + move |(mut map, mut child_map), digest_build_block| { + let extrinsic_prefix = + ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); + let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block.clone()); + let child_prefix = ChildIndex::key_neutral_prefix(digest_build_block.clone()); + let trie_root = storage.root(parent, digest_build_block.clone())?; + let trie_root = trie_root.ok_or_else(|| { + format!("No changes trie root for block {}", digest_build_block.clone()) + })?; + + let insert_to_map = |map: &mut BTreeMap<_, _>, key: StorageKey| { + match map.entry(key.clone()) { + Entry::Vacant(entry) => { + entry.insert(( + DigestIndex { block: block.clone(), key }, + vec![digest_build_block.clone()], + )); + }, + Entry::Occupied(mut entry) => { + // DigestIndexValue must be sorted. Here we are relying on the fact that + // digest_build_iterator() returns blocks in ascending order => we only + // need to check for duplicates + // + // is_dup_block could be true when key has been changed in both digest + // block AND other blocks that it covers + let is_dup_block = entry.get().1.last() == Some(&digest_build_block); + if !is_dup_block { + entry.get_mut().1.push(digest_build_block.clone()); + } + }, } + }; + + // try to get all updated keys from cache + let populated_from_cache = + storage.with_cached_changed_keys(&trie_root, &mut |changed_keys| { + for (storage_key, changed_keys) in changed_keys { + let map = match storage_key { + Some(storage_key) => child_map + .entry(ChildIndex:: { + block: block.clone(), + storage_key: storage_key.clone(), + }) + .or_default(), + None => &mut map, + }; + for changed_key in changed_keys.iter().cloned() { + insert_to_map(map, changed_key); + } + } + }); + if populated_from_cache { + return Ok((map, child_map)) } - ); - if populated_from_cache { - return Ok((map, child_map)); - } - let mut children_roots = BTreeMap::::new(); - { - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| - if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.insert(trie_key.storage_key, trie_root); + let mut children_roots = BTreeMap::::new(); + { + let trie_storage = TrieBackendEssence::<_, H>::new( + crate::changes_trie::TrieBackendStorageAdapter(storage), + trie_root, + ); + + trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| { + if let Ok(InputKey::ChildIndex::(trie_key)) = + Decode::decode(&mut key) + { + if let Ok(value) = >::decode(&mut value) { + let mut trie_root = ::Out::default(); + trie_root.as_mut().copy_from_slice(&value[..]); + children_roots.insert(trie_key.storage_key, trie_root); + } } }); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - insert_to_map(&mut map, trie_key.key); + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { + if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = + Decode::decode(&mut key) + { + insert_to_map(&mut map, trie_key.key); + } }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| - if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - insert_to_map(&mut map, trie_key.key); + trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { + if let Ok(InputKey::DigestIndex::(trie_key)) = + Decode::decode(&mut key) + { + insert_to_map(&mut map, trie_key.key); + } }); - } + } - for (storage_key, trie_root) in children_roots.into_iter() { - let child_index = ChildIndex:: { - block: block.clone(), - storage_key, - }; + for (storage_key, trie_root) in children_roots.into_iter() { + let child_index = ChildIndex:: { block: block.clone(), storage_key }; - let mut map = child_map.entry(child_index).or_default(); - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - insert_to_map(&mut map, trie_key.key); + let mut map = child_map.entry(child_index).or_default(); + let trie_storage = TrieBackendEssence::<_, H>::new( + crate::changes_trie::TrieBackendStorageAdapter(storage), + trie_root, + ); + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { + if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = + Decode::decode(&mut key) + { + insert_to_map(&mut map, trie_key.key); + } }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| - if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - insert_to_map(&mut map, trie_key.key); + trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { + if let Ok(InputKey::DigestIndex::(trie_key)) = + Decode::decode(&mut key) + { + insert_to_map(&mut map, trie_key.key); + } }); - } - Ok((map, child_map)) + } + Ok((map, child_map)) + }, + ) + .map(|(pairs, child_pairs)| { + ( + pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)), + child_pairs + .into_iter() + .map(|(sk, pairs)| { + (sk, pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v))) + }) + .collect(), + digest_input_blocks, + ) }) - .map(|(pairs, child_pairs)| ( - pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)), - child_pairs.into_iter().map(|(sk, pairs)| - (sk, pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)))).collect(), - digest_input_blocks, - )) } #[cfg(test)] mod test { - use sp_core::Blake2Hasher; - use crate::InMemoryBackend; - use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; - use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; use super::*; + use crate::{ + changes_trie::{ + build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, + storage::InMemoryStorage, + Configuration, RootsStorage, + }, + InMemoryBackend, + }; + use sp_core::Blake2Hasher; - fn prepare_for_build(zero: u64) -> ( + fn prepare_for_build( + zero: u64, + ) -> ( InMemoryBackend, InMemoryStorage, OverlayedChanges, @@ -353,57 +389,150 @@ mod test { (vec![103], vec![255]), (vec![104], vec![255]), (vec![105], vec![255]), - ].into_iter().collect::>().into(); + ] + .into_iter() + .collect::>() + .into(); let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); - let storage = InMemoryStorage::with_inputs(vec![ - (zero + 1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![105] }, vec![0, 2, 4]), - ]), - (zero + 2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0]), - ]), - (zero + 3, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 3, key: vec![100] }, vec![0]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 3, key: vec![105] }, vec![1]), - ]), - (zero + 4, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), - ]), - (zero + 5, Vec::new()), - (zero + 6, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 6, key: vec![105] }, vec![2]), - ]), - (zero + 7, Vec::new()), - (zero + 8, vec![ - InputPair::DigestIndex(DigestIndex { block: zero + 8, key: vec![105] }, vec![zero + 6]), - ]), - (zero + 9, Vec::new()), (zero + 10, Vec::new()), (zero + 11, Vec::new()), (zero + 12, Vec::new()), - (zero + 13, Vec::new()), (zero + 14, Vec::new()), (zero + 15, Vec::new()), - ], vec![(prefixed_child_trie_key1.clone(), vec![ - (zero + 1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![105] }, vec![0, 2, 4]), - ]), - (zero + 2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0]), - ]), - (zero + 4, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - ]), - ]), - ]); + let storage = InMemoryStorage::with_inputs( + vec![ + ( + zero + 1, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![100] }, + vec![1, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![101] }, + vec![0, 2], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![105] }, + vec![0, 2, 4], + ), + ], + ), + ( + zero + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 2, key: vec![102] }, + vec![0], + )], + ), + ( + zero + 3, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 3, key: vec![100] }, + vec![0], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 3, key: vec![105] }, + vec![1], + ), + ], + ), + ( + zero + 4, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![101] }, + vec![1], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![103] }, + vec![0, 1], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1, zero + 3], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1, zero + 3], + ), + ], + ), + (zero + 5, Vec::new()), + ( + zero + 6, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 6, key: vec![105] }, + vec![2], + )], + ), + (zero + 7, Vec::new()), + ( + zero + 8, + vec![InputPair::DigestIndex( + DigestIndex { block: zero + 8, key: vec![105] }, + vec![zero + 6], + )], + ), + (zero + 9, Vec::new()), + (zero + 10, Vec::new()), + (zero + 11, Vec::new()), + (zero + 12, Vec::new()), + (zero + 13, Vec::new()), + (zero + 14, Vec::new()), + (zero + 15, Vec::new()), + ], + vec![( + prefixed_child_trie_key1.clone(), + vec![ + ( + zero + 1, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![100] }, + vec![1, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![101] }, + vec![0, 2], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![105] }, + vec![0, 2, 4], + ), + ], + ), + ( + zero + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 2, key: vec![102] }, + vec![0], + )], + ), + ( + zero + 4, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 2, key: vec![102] }, + vec![0, 3], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2], + ), + ], + ), + ], + )], + ); let mut changes = OverlayedChanges::default(); changes.set_collect_extrinsics(true); @@ -446,12 +575,11 @@ mod test { (backend, storage, changes, config) } - fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { - ConfigurationRange { - config, - zero, - end: None, - } + fn configuration_range<'a>( + config: &'a Configuration, + zero: u64, + ) -> ConfigurationRange<'a, u64> { + ConfigurationRange { config, zero, end: None } } #[test] @@ -467,24 +595,48 @@ mod test { configuration_range(&config, zero), &changes, &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![103] }, vec![0, 1]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, vec![0, 2, 3]), - ]), - (ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2]), - ]), - ]); - + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5, key: vec![103] }, + vec![0, 1] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, + vec![0, 2, 3] + ),] + ), + ( + ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5, key: vec![100] }, + vec![0, 2] + ),] + ), + ] + ); } test_with_zero(0); @@ -505,33 +657,82 @@ mod test { configuration_range(&config, zero), &changes, &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), - ]), - (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), - ]), - ]); + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![103] }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1, zero + 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1, zero + 3] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1] + ), + ] + ), + ( + ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2] + ),] + ), + ] + ); } test_with_zero(0); @@ -552,31 +753,74 @@ mod test { configuration_range(&config, zero), &changes, &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![100] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![101] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![103] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![105] }, vec![zero + 4, zero + 8]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), - ]), - (ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2]), - ]), - ]); + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16, key: vec![103] }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![100] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![101] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![102] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![103] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![105] }, + vec![zero + 4, zero + 8] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![102] }, + vec![zero + 4] + ), + ] + ), + ( + ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16, key: vec![100] }, + vec![0, 2] + ),] + ), + ] + ); } test_with_zero(0); @@ -591,38 +835,67 @@ mod test { let parent = AnchorBlockId { hash: Default::default(), number: zero + 10 }; let mut configuration_range = configuration_range(&config, zero); - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range.clone(), - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![103] }, vec![0, 1]), - ]); + let changes_trie_nodes = + prepare_input(&backend, &storage, configuration_range.clone(), &changes, &parent) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![103] }, + vec![0, 1] + ), + ] + ); configuration_range.end = Some(zero + 11); - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range, - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![100] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![101] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![102] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![103] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![105] }, vec![zero + 4, zero + 8]), - ]); + let changes_trie_nodes = + prepare_input(&backend, &storage, configuration_range, &changes, &parent).unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![103] }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![100] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![101] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![102] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![103] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![105] }, + vec![zero + 4, zero + 8] + ), + ] + ); } test_with_zero(0); @@ -647,34 +920,82 @@ mod test { configuration_range(&config, zero), &changes, &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), - ]), - (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), - ]), - ]); - + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![103] }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1, zero + 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1, zero + 3] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1] + ), + ] + ), + ( + ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2] + ),] + ), + ] + ); } test_with_zero(0); @@ -710,44 +1031,50 @@ mod test { .complete(4, &trie_root4); storage.cache_mut().perform(cached_data4); - let (root_changes_trie_nodes, child_changes_tries_nodes, _) = prepare_input( - &backend, - &storage, - configuration_range(&config, 0), - &changes, - &parent, - ).unwrap(); - assert_eq!(root_changes_trie_nodes.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![8]), - ]); + let (root_changes_trie_nodes, child_changes_tries_nodes, _) = + prepare_input(&backend, &storage, configuration_range(&config, 0), &changes, &parent) + .unwrap(); + assert_eq!( + root_changes_trie_nodes.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 16, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), + InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![8]), + ] + ); let child_changes_tries_nodes = child_changes_tries_nodes .into_iter() .map(|(k, i)| (k, i.collect::>())) .collect::>(); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { - block: 16u64, - storage_key: child_trie_key1.clone(), - }).unwrap(), + child_changes_tries_nodes + .get(&ChildIndex { block: 16u64, storage_key: child_trie_key1.clone() }) + .unwrap(), &vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2, 3]), - + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 16u64, key: vec![100] }, + vec![0, 2, 3] + ), InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![103] }, vec![4]), InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![104] }, vec![4]), ], ); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }).unwrap(), + child_changes_tries_nodes + .get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }) + .unwrap(), &vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2]), - + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 16u64, key: vec![100] }, + vec![0, 2] + ), InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![105] }, vec![4]), InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![106] }, vec![4]), ], diff --git a/primitives/state-machine/src/changes_trie/build_cache.rs b/primitives/state-machine/src/changes_trie/build_cache.rs index ef83966795f5e..04820242d9d08 100644 --- a/primitives/state-machine/src/changes_trie/build_cache.rs +++ b/primitives/state-machine/src/changes_trie/build_cache.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,12 +34,13 @@ use sp_core::storage::PrefixedStorageKey; /// is inserted (because digest block will includes all keys from this entry). /// When there's a fork, entries are pruned when first changes trie is inserted. pub struct BuildCache { - /// Map of block (implies changes true) number => changes trie root. + /// Map of block (implies changes trie) number => changes trie root. roots_by_number: HashMap, /// Map of changes trie root => set of storage keys that are in this trie. /// The `Option>` in inner `HashMap` stands for the child storage key. /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. - /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by the key. + /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by + /// the key. changed_keys: HashMap, HashSet>>, } @@ -78,20 +79,20 @@ pub(crate) struct IncompleteCachedBuildData { } impl BuildCache - where - N: Eq + ::std::hash::Hash, - H: Eq + ::std::hash::Hash + Clone, +where + N: Eq + ::std::hash::Hash, + H: Eq + ::std::hash::Hash + Clone, { /// Create new changes trie build cache. pub fn new() -> Self { - BuildCache { - roots_by_number: HashMap::new(), - changed_keys: HashMap::new(), - } + BuildCache { roots_by_number: HashMap::new(), changed_keys: HashMap::new() } } /// Get cached changed keys for changes trie with given root. - pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { + pub fn get( + &self, + root: &H, + ) -> Option<&HashMap, HashSet>> { self.changed_keys.get(&root) } @@ -158,7 +159,9 @@ impl IncompleteCacheAction { pub(crate) fn set_digest_input_blocks(self, digest_input_blocks: Vec) -> Self { match self { IncompleteCacheAction::CacheBuildData(build_data) => - IncompleteCacheAction::CacheBuildData(build_data.set_digest_input_blocks(digest_input_blocks)), + IncompleteCacheAction::CacheBuildData( + build_data.set_digest_input_blocks(digest_input_blocks), + ), IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, } } @@ -180,10 +183,7 @@ impl IncompleteCacheAction { impl IncompleteCachedBuildData { /// Create new cached data. pub(crate) fn new() -> Self { - IncompleteCachedBuildData { - digest_input_blocks: Vec::new(), - changed_keys: HashMap::new(), - } + IncompleteCachedBuildData { digest_input_blocks: Vec::new(), changed_keys: HashMap::new() } } fn complete(self, block: N, trie_root: H) -> CachedBuildData { @@ -232,30 +232,42 @@ mod tests { #[test] fn obsolete_entries_are_purged_when_new_ct_is_built() { let mut cache = BuildCache::::new(); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![1]].into_iter().collect()) - .complete(1, 1))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![2]].into_iter().collect()) - .complete(2, 2))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![3]].into_iter().collect()) - .complete(3, 3))); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![1]].into_iter().collect()) + .complete(1, 1), + )); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![2]].into_iter().collect()) + .complete(2, 2), + )); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![3]].into_iter().collect()) + .complete(3, 3), + )); assert_eq!(cache.changed_keys.len(), 3); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .set_digest_input_blocks(vec![1, 2, 3]) - .complete(4, 4))); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .set_digest_input_blocks(vec![1, 2, 3]) + .complete(4, 4), + )); assert_eq!(cache.changed_keys.len(), 1); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![8]].into_iter().collect()) - .complete(8, 8))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![12]].into_iter().collect()) - .complete(12, 12))); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![8]].into_iter().collect()) + .complete(8, 8), + )); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![12]].into_iter().collect()) + .complete(12, 12), + )); assert_eq!(cache.changed_keys.len(), 3); diff --git a/primitives/state-machine/src/changes_trie/build_iterator.rs b/primitives/state-machine/src/changes_trie/build_iterator.rs index 3bafd608efa85..62bb00a2f8829 100644 --- a/primitives/state-machine/src/changes_trie/build_iterator.rs +++ b/primitives/state-machine/src/changes_trie/build_iterator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,8 +18,8 @@ //! Structures and functions to return blocks whose changes are to be included //! in given block's changes trie. +use crate::changes_trie::{BlockNumber, ConfigurationRange}; use num_traits::Zero; -use crate::changes_trie::{ConfigurationRange, BlockNumber}; /// Returns iterator of OTHER blocks that are required for inclusion into /// changes trie of given block. Blocks are guaranteed to be returned in @@ -31,24 +31,31 @@ pub fn digest_build_iterator<'a, Number: BlockNumber>( block: Number, ) -> DigestBuildIterator { // prepare digest build parameters - let (_, _, digest_step) = match config.config.digest_level_at_block(config.zero, block.clone()) { + let (_, _, digest_step) = match config.config.digest_level_at_block(config.zero, block.clone()) + { Some((current_level, digest_interval, digest_step)) => (current_level, digest_interval, digest_step), None => return DigestBuildIterator::empty(), }; - DigestBuildIterator::new(block.clone(), config.end.unwrap_or(block), config.config.digest_interval, digest_step) + DigestBuildIterator::new( + block.clone(), + config.end.unwrap_or(block), + config.config.digest_interval, + digest_step, + ) } /// Changes trie build iterator that returns numbers of OTHER blocks that are /// required for inclusion into changes trie of given block. #[derive(Debug)] pub struct DigestBuildIterator { - /// Block we're building changes trie for. It could (logically) be a post-end block if we are creating - /// skewed digest. + /// Block we're building changes trie for. It could (logically) be a post-end block if we are + /// creating skewed digest. block: Number, - /// Block that is a last block where current configuration is active. We have never yet created anything - /// after this block => digest that we're creating can't reference any blocks that are >= end. + /// Block that is a last block where current configuration is active. We have never yet created + /// anything after this block => digest that we're creating can't reference any blocks that are + /// >= end. end: Number, /// Interval of L1 digest blocks. digest_interval: u32, @@ -56,7 +63,6 @@ pub struct DigestBuildIterator { max_step: u32, // Mutable data below: - /// Step of current blocks range. current_step: u32, /// Reverse step of current blocks range. @@ -98,7 +104,7 @@ impl Iterator for DigestBuildIterator { if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { if next < self.end { self.last_block = Some(next.clone()); - return Some(next); + return Some(next) } } @@ -112,14 +118,16 @@ impl Iterator for DigestBuildIterator { self.current_step_reverse * self.digest_interval }; if next_step_reverse > self.max_step { - return None; + return None } self.current_step_reverse = next_step_reverse; self.current_range = Some(BlocksRange::new( match self.last_block.clone() { Some(last_block) => last_block + self.current_step.into(), - None => self.block.clone() - (self.current_step * self.digest_interval - self.current_step).into(), + None => + self.block.clone() - + (self.current_step * self.digest_interval - self.current_step).into(), }, self.block.clone(), self.current_step.into(), @@ -143,11 +151,7 @@ struct BlocksRange { impl BlocksRange { pub fn new(begin: Number, end: Number, step: Number) -> Self { - BlocksRange { - current: begin, - end, - step, - } + BlocksRange { current: begin, end, step } } } @@ -156,7 +160,7 @@ impl Iterator for BlocksRange { fn next(&mut self) -> Option { if self.current >= self.end { - return None; + return None } let current = Some(self.current.clone()); @@ -167,8 +171,8 @@ impl Iterator for BlocksRange { #[cfg(test)] mod tests { - use crate::changes_trie::Configuration; use super::*; + use crate::changes_trie::Configuration; fn digest_build_iterator( digest_interval: u32, @@ -179,10 +183,7 @@ mod tests { ) -> DigestBuildIterator { super::digest_build_iterator( ConfigurationRange { - config: &Configuration { - digest_interval, - digest_levels, - }, + config: &Configuration { digest_interval, digest_levels }, zero, end, }, @@ -215,9 +216,21 @@ mod tests { fn test_with_zero(zero: u64) { let empty = (0, 0, 0); assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 0), empty, "block is 0"); - assert_eq!(digest_build_iterator_basic(0, 16, zero, zero + 64), empty, "digest_interval is 0"); - assert_eq!(digest_build_iterator_basic(1, 16, zero, zero + 64), empty, "digest_interval is 1"); - assert_eq!(digest_build_iterator_basic(4, 0, zero, zero + 64), empty, "digest_levels is 0"); + assert_eq!( + digest_build_iterator_basic(0, 16, zero, zero + 64), + empty, + "digest_interval is 0" + ); + assert_eq!( + digest_build_iterator_basic(1, 16, zero, zero + 64), + empty, + "digest_interval is 1" + ); + assert_eq!( + digest_build_iterator_basic(4, 0, zero, zero + 64), + empty, + "digest_levels is 0" + ); assert_eq!( digest_build_iterator_basic(4, 16, zero, zero + 1), empty, @@ -238,12 +251,11 @@ mod tests { empty, "digest is not required for this block", ); - assert_eq!(digest_build_iterator_basic( - ::std::u32::MAX / 2 + 1, - 16, - zero, - ::std::u64::MAX, - ), empty, "digest_interval * 2 is greater than u64::MAX"); + assert_eq!( + digest_build_iterator_basic(::std::u32::MAX / 2 + 1, 16, zero, ::std::u64::MAX,), + empty, + "digest_interval * 2 is greater than u64::MAX" + ); } test_with_zero(0); @@ -326,18 +338,37 @@ mod tests { #[test] fn digest_iterator_returns_level1_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 16, None), + assert_eq!( + digest_build_iterator_blocks(16, 1, zero, zero + 16, None), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - .iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 256, None), + .iter() + .map(|item| zero + item) + .collect::>() + ); + assert_eq!( + digest_build_iterator_blocks(16, 1, zero, zero + 256, None), [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] - .iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 32, None), + .iter() + .map(|item| zero + item) + .collect::>() + ); + assert_eq!( + digest_build_iterator_blocks(16, 2, zero, zero + 32, None), [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] - .iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), - [4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079] - .iter().map(|item| zero + item).collect::>()); + .iter() + .map(|item| zero + item) + .collect::>() + ); + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), + [ + 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, + 4078, 4079 + ] + .iter() + .map(|item| zero + item) + .collect::>() + ); } test_with_zero(0); @@ -348,21 +379,30 @@ mod tests { #[test] fn digest_iterator_returns_level1_and_level2_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 256, None), + assert_eq!( + digest_build_iterator_blocks(16, 2, zero, zero + 256, None), [ // level2 points to previous 16-1 level1 digests: 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, // level2 is a level1 digest of 16-1 previous blocks: 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - ].iter().map(|item| zero + item).collect::>(), + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 4096, None), + assert_eq!( + digest_build_iterator_blocks(16, 2, zero, zero + 4096, None), [ // level2 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - // level2 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - ].iter().map(|item| zero + item).collect::>(), + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, + 4064, 4080, // level2 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, + 4094, 4095, + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); } @@ -374,15 +414,20 @@ mod tests { #[test] fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, None), + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4096, None), [ // level3 points to previous 16-1 level2 digests: - 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, - // level3 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - // level3 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - ].iter().map(|item| zero + item).collect::>(), + 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, + 3840, // level3 points to previous 16-1 level1 digests: + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, + 4064, 4080, // level3 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, + 4094, 4095, + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); } @@ -394,15 +439,20 @@ mod tests { #[test] fn digest_iterator_returns_skewed_digest_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1338)), + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1338)), [ // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: 256, 512, 768, 1024, 1280, // level3 MUST point to previous 16-1 level1 digests, BUT there are only 3: 1296, 1312, 1328, - // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 9: + // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only + // 9: 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, - ].iter().map(|item| zero + item).collect::>(), + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); } @@ -414,14 +464,19 @@ mod tests { #[test] fn digest_iterator_returns_skewed_digest_blocks_skipping_level() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1284)), + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1284)), [ // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: 256, 512, 768, 1024, 1280, - // level3 MUST point to previous 16-1 level1 digests, BUT there are NO ANY L1-digests: - // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 3: + // level3 MUST point to previous 16-1 level1 digests, BUT there are NO ANY + // L1-digests: level3 MUST be a level1 digest of 16-1 previous blocks, BUT + // there are only 3: 1281, 1282, 1283, - ].iter().map(|item| zero + item).collect::>(), + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); } diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index f9398b3ce5dd4..9343a226a3aa8 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,20 +18,22 @@ //! Functions + iterator that traverses changes tries and returns all //! (block, extrinsic) pairs where given key has been changed. -use std::cell::RefCell; -use std::collections::VecDeque; -use codec::{Decode, Encode, Codec}; +use crate::{ + changes_trie::{ + input::{ChildIndex, DigestIndex, DigestIndexValue, ExtrinsicIndex, ExtrinsicIndexValue}, + storage::{InMemoryStorage, TrieBackendAdapter}, + surface_iterator::{surface_iterator, SurfaceIterator}, + AnchorBlockId, BlockNumber, ConfigurationRange, RootsStorage, Storage, + }, + proving_backend::ProvingBackendRecorder, + trie_backend_essence::TrieBackendEssence, +}; +use codec::{Codec, Decode, Encode}; use hash_db::Hasher; use num_traits::Zero; use sp_core::storage::PrefixedStorageKey; use sp_trie::Recorder; -use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; -use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; -use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; -use crate::changes_trie::input::ChildIndex; -use crate::changes_trie::surface_iterator::{surface_iterator, SurfaceIterator}; -use crate::proving_backend::ProvingBackendRecorder; -use crate::trie_backend_essence::{TrieBackendEssence}; +use std::{cell::RefCell, collections::VecDeque}; /// Return changes of given key at given blocks range. /// `max` is the number of best known block. @@ -57,12 +59,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( begin: begin.clone(), end, config: config.clone(), - surface: surface_iterator( - config, - max, - begin, - end.number.clone(), - )?, + surface: surface_iterator(config, max, begin, end.number.clone())?, extrinsics: Default::default(), blocks: Default::default(), @@ -72,7 +69,6 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( }) } - /// Returns proof of changes of given key at given blocks range. /// `max` is the number of best known block. pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( @@ -83,7 +79,10 @@ pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( max: Number, storage_key: Option<&PrefixedStorageKey>, key: &[u8], -) -> Result>, String> where H::Out: Codec { +) -> Result>, String> +where + H::Out: Codec, +{ // we can't query any roots before root let max = std::cmp::min(max, end.number.clone()); @@ -96,12 +95,7 @@ pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( begin: begin.clone(), end, config: config.clone(), - surface: surface_iterator( - config, - max, - begin, - end.number.clone(), - )?, + surface: surface_iterator(config, max, begin, end.number.clone())?, extrinsics: Default::default(), blocks: Default::default(), @@ -130,8 +124,11 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( end: &AnchorBlockId, max: Number, storage_key: Option<&PrefixedStorageKey>, - key: &[u8] -) -> Result, String> where H::Out: Encode { + key: &[u8], +) -> Result, String> +where + H::Out: Encode, +{ key_changes_proof_check_with_db( config, roots_storage, @@ -153,8 +150,11 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( end: &AnchorBlockId, max: Number, storage_key: Option<&PrefixedStorageKey>, - key: &[u8] -) -> Result, String> where H::Out: Encode { + key: &[u8], +) -> Result, String> +where + H::Out: Encode, +{ // we can't query any roots before root let max = std::cmp::min(max, end.number.clone()); @@ -167,28 +167,24 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( begin: begin.clone(), end, config: config.clone(), - surface: surface_iterator( - config, - max, - begin, - end.number.clone(), - )?, + surface: surface_iterator(config, max, begin, end.number.clone())?, extrinsics: Default::default(), blocks: Default::default(), _hasher: ::std::marker::PhantomData::::default(), }, - }.collect() + } + .collect() } /// Drilldown iterator - receives 'digest points' from surface iterator and explores /// every point until extrinsic is found. pub struct DrilldownIteratorEssence<'a, H, Number> - where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, +where + H: Hasher, + Number: BlockNumber, + H::Out: 'a, { storage_key: Option<&'a PrefixedStorageKey>, key: &'a [u8], @@ -206,14 +202,14 @@ pub struct DrilldownIteratorEssence<'a, H, Number> } impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> - where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, +where + H: Hasher, + Number: BlockNumber, + H::Out: 'a, { pub fn next(&mut self, trie_reader: F) -> Option> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, + where + F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, { match self.do_next(trie_reader) { Ok(Some(res)) => Some(Ok(res)), @@ -223,25 +219,26 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> } fn do_next(&mut self, mut trie_reader: F) -> Result, String> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, + where + F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, { loop { if let Some((block, extrinsic)) = self.extrinsics.pop_front() { - return Ok(Some((block, extrinsic))); + return Ok(Some((block, extrinsic))) } if let Some((block, level)) = self.blocks.pop_front() { // not having a changes trie root is an error because: // we never query roots for future blocks // AND trie roots for old blocks are known (both on full + light node) - let trie_root = self.roots_storage.root(&self.end, block.clone())? - .ok_or_else(|| format!("Changes trie root for block {} is not found", block.clone()))?; + let trie_root = + self.roots_storage.root(&self.end, block.clone())?.ok_or_else(|| { + format!("Changes trie root for block {} is not found", block.clone()) + })?; let trie_root = if let Some(storage_key) = self.storage_key { - let child_key = ChildIndex { - block: block.clone(), - storage_key: storage_key.clone(), - }.encode(); + let child_key = + ChildIndex { block: block.clone(), storage_key: storage_key.clone() } + .encode(); if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? .and_then(|v| >::decode(&mut &v[..]).ok()) .map(|v| { @@ -251,7 +248,7 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> }) { trie_root } else { - continue; + continue } } else { trie_root @@ -260,43 +257,62 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> // only return extrinsics for blocks before self.max // most of blocks will be filtered out before pushing to `self.blocks` // here we just throwing away changes at digest blocks we're processing - debug_assert!(block >= self.begin, "We shall not touch digests earlier than a range' begin"); + debug_assert!( + block >= self.begin, + "We shall not touch digests earlier than a range' begin" + ); if block <= self.end.number { - let extrinsics_key = ExtrinsicIndex { block: block.clone(), key: self.key.to_vec() }.encode(); + let extrinsics_key = + ExtrinsicIndex { block: block.clone(), key: self.key.to_vec() }.encode(); let extrinsics = trie_reader(self.storage, trie_root, &extrinsics_key); if let Some(extrinsics) = extrinsics? { if let Ok(extrinsics) = ExtrinsicIndexValue::decode(&mut &extrinsics[..]) { - self.extrinsics.extend(extrinsics.into_iter().rev().map(|e| (block.clone(), e))); + self.extrinsics + .extend(extrinsics.into_iter().rev().map(|e| (block.clone(), e))); } } } - let blocks_key = DigestIndex { block: block.clone(), key: self.key.to_vec() }.encode(); + let blocks_key = + DigestIndex { block: block.clone(), key: self.key.to_vec() }.encode(); let blocks = trie_reader(self.storage, trie_root, &blocks_key); if let Some(blocks) = blocks? { if let Ok(blocks) = >::decode(&mut &blocks[..]) { // filter level0 blocks here because we tend to use digest blocks, - // AND digest block changes could also include changes for out-of-range blocks + // AND digest block changes could also include changes for out-of-range + // blocks let begin = self.begin.clone(); let end = self.end.number.clone(); let config = self.config.clone(); - self.blocks.extend(blocks.into_iter() - .rev() - .filter(|b| level.map(|level| level > 1).unwrap_or(true) || (*b >= begin && *b <= end)) - .map(|b| { - let prev_level = level - .map(|level| Some(level - 1)) - .unwrap_or_else(|| - Some(config.config.digest_level_at_block(config.zero.clone(), b.clone()) - .map(|(level, _, _)| level) - .unwrap_or_else(|| Zero::zero()))); - (b, prev_level) - }) + self.blocks.extend( + blocks + .into_iter() + .rev() + .filter(|b| { + level.map(|level| level > 1).unwrap_or(true) || + (*b >= begin && *b <= end) + }) + .map(|b| { + let prev_level = + level.map(|level| Some(level - 1)).unwrap_or_else(|| { + Some( + config + .config + .digest_level_at_block( + config.zero.clone(), + b.clone(), + ) + .map(|(level, _, _)| level) + .unwrap_or_else(|| Zero::zero()), + ) + }); + (b, prev_level) + }), ); } } - continue; + continue } match self.surface.next() { @@ -310,46 +326,50 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> /// Exploring drilldown operator. pub struct DrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> - where H::Out: Encode +where + H::Out: Encode, { type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { - self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) + self.essence.next(|storage, root, key| { + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key) + }) } } /// Proving drilldown iterator. struct ProvingDrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, proof_recorder: RefCell>, } impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a, { /// Consume the iterator, extracting the gathered proof in lexicographical order /// by value. pub fn extract_proof(self) -> Vec> { - self.proof_recorder.into_inner().drain() + self.proof_recorder + .into_inner() + .drain() .into_iter() .map(|n| n.data.to_vec()) .collect() @@ -357,32 +377,34 @@ impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> } impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a + Codec, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a + Codec, { type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { - let proof_recorder = &mut *self.proof_recorder.try_borrow_mut() + let proof_recorder = &mut *self + .proof_recorder + .try_borrow_mut() .expect("only fails when already borrowed; storage() is non-reentrant; qed"); - self.essence.next(|storage, root, key| + self.essence.next(|storage, root, key| { ProvingBackendRecorder::<_, H> { backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), proof_recorder, - }.storage(key)) + } + .storage(key) + }) } } #[cfg(test)] mod tests { - use std::iter::FromIterator; - use crate::changes_trie::Configuration; - use crate::changes_trie::input::InputPair; - use crate::changes_trie::storage::InMemoryStorage; - use sp_runtime::traits::BlakeTwo256; use super::*; + use crate::changes_trie::{input::InputPair, storage::InMemoryStorage, Configuration}; + use sp_runtime::traits::BlakeTwo256; + use std::iter::FromIterator; fn child_key() -> PrefixedStorageKey { let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); @@ -391,64 +413,98 @@ mod tests { fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { let config = Configuration { digest_interval: 4, digest_levels: 2 }; - let backend = InMemoryStorage::with_inputs(vec![ - // digest: 1..4 => [(3, 0)] - (1, vec![ - ]), - (2, vec![ - ]), - (3, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 3, key: vec![42] }, vec![0]), - ]), - (4, vec![ - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![42] }, vec![3]), - ]), - // digest: 5..8 => [(6, 3), (8, 1+2)] - (5, vec![]), - (6, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 6, key: vec![42] }, vec![3]), - ]), - (7, vec![]), - (8, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 8, key: vec![42] }, vec![1, 2]), - InputPair::DigestIndex(DigestIndex { block: 8, key: vec![42] }, vec![6]), - ]), - // digest: 9..12 => [] - (9, vec![]), - (10, vec![]), - (11, vec![]), - (12, vec![]), - // digest: 0..16 => [4, 8] - (13, vec![]), - (14, vec![]), - (15, vec![]), - (16, vec![ - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![4, 8]), - ]), - ], vec![(child_key(), vec![ - (1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![42] }, vec![0]), - ]), - (2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 2, key: vec![42] }, vec![3]), - ]), - (16, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![42] }, vec![5]), - - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![2]), - ]), - ]), - ]); + let backend = InMemoryStorage::with_inputs( + vec![ + // digest: 1..4 => [(3, 0)] + (1, vec![]), + (2, vec![]), + ( + 3, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 3, key: vec![42] }, + vec![0], + )], + ), + (4, vec![InputPair::DigestIndex(DigestIndex { block: 4, key: vec![42] }, vec![3])]), + // digest: 5..8 => [(6, 3), (8, 1+2)] + (5, vec![]), + ( + 6, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 6, key: vec![42] }, + vec![3], + )], + ), + (7, vec![]), + ( + 8, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 8, key: vec![42] }, + vec![1, 2], + ), + InputPair::DigestIndex(DigestIndex { block: 8, key: vec![42] }, vec![6]), + ], + ), + // digest: 9..12 => [] + (9, vec![]), + (10, vec![]), + (11, vec![]), + (12, vec![]), + // digest: 0..16 => [4, 8] + (13, vec![]), + (14, vec![]), + (15, vec![]), + ( + 16, + vec![InputPair::DigestIndex( + DigestIndex { block: 16, key: vec![42] }, + vec![4, 8], + )], + ), + ], + vec![( + child_key(), + vec![ + ( + 1, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 1, key: vec![42] }, + vec![0], + )], + ), + ( + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 2, key: vec![42] }, + vec![3], + )], + ), + ( + 16, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 16, key: vec![42] }, + vec![5], + ), + InputPair::DigestIndex( + DigestIndex { block: 16, key: vec![42] }, + vec![2], + ), + ], + ), + ], + )], + ); (config, backend) } - fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { - ConfigurationRange { - config, - zero, - end: None, - } + fn configuration_range<'a>( + config: &'a Configuration, + zero: u64, + ) -> ConfigurationRange<'a, u64> { + ConfigurationRange { config, zero, end: None } } #[test] @@ -462,7 +518,8 @@ mod tests { 16, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); let drilldown_result = key_changes::( @@ -473,7 +530,8 @@ mod tests { 4, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![])); let drilldown_result = key_changes::( @@ -484,7 +542,8 @@ mod tests { 4, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(3, 0)])); let drilldown_result = key_changes::( @@ -495,7 +554,8 @@ mod tests { 7, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); let drilldown_result = key_changes::( @@ -506,7 +566,8 @@ mod tests { 8, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); let drilldown_result = key_changes::( @@ -517,7 +578,8 @@ mod tests { 8, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3)])); } @@ -534,7 +596,9 @@ mod tests { 1000, None, &[42], - ).and_then(|i| i.collect::, _>>()).is_err()); + ) + .and_then(|i| i.collect::, _>>()) + .is_err()); assert!(key_changes::( configuration_range(&config, 0), @@ -544,7 +608,9 @@ mod tests { 1000, Some(&child_key()), &[42], - ).and_then(|i| i.collect::, _>>()).is_err()); + ) + .and_then(|i| i.collect::, _>>()) + .is_err()); } #[test] @@ -558,7 +624,8 @@ mod tests { 50, None, &[42], - ).is_err()); + ) + .is_err()); assert!(key_changes::( configuration_range(&config, 0), &storage, @@ -567,10 +634,10 @@ mod tests { 100, None, &[42], - ).is_err()); + ) + .is_err()); } - #[test] fn proving_drilldown_iterator_works() { // happens on remote full node: @@ -578,13 +645,27 @@ mod tests { // create drilldown iterator that records all trie nodes during drilldown let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof = key_changes_proof::( - configuration_range(&remote_config, 0), &remote_storage, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, None, &[42]).unwrap(); + configuration_range(&remote_config, 0), + &remote_storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 16 }, + 16, + None, + &[42], + ) + .unwrap(); let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof_child = key_changes_proof::( - configuration_range(&remote_config, 0), &remote_storage, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]).unwrap(); + configuration_range(&remote_config, 0), + &remote_storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 16 }, + 16, + Some(&child_key()), + &[42], + ) + .unwrap(); // happens on local light node: @@ -592,14 +673,28 @@ mod tests { let (local_config, local_storage) = prepare_for_drilldown(); local_storage.clear_storage(); let local_result = key_changes_proof_check::( - configuration_range(&local_config, 0), &local_storage, remote_proof, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, None, &[42]); + configuration_range(&local_config, 0), + &local_storage, + remote_proof, + 1, + &AnchorBlockId { hash: Default::default(), number: 16 }, + 16, + None, + &[42], + ); let (local_config, local_storage) = prepare_for_drilldown(); local_storage.clear_storage(); let local_result_child = key_changes_proof_check::( - configuration_range(&local_config, 0), &local_storage, remote_proof_child, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]); + configuration_range(&local_config, 0), + &local_storage, + remote_proof_child, + 1, + &AnchorBlockId { hash: Default::default(), number: 16 }, + 16, + Some(&child_key()), + &[42], + ); // check that drilldown result is the same as if it was happening at the full node assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); @@ -620,12 +715,22 @@ mod tests { // regular blocks: 89, 90, 91 let mut input = (1u64..92u64).map(|b| (b, vec![])).collect::>(); // changed at block#63 and covered by L3 digest at block#64 - input[63 - 1].1.push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 63, key: vec![42] }, vec![0])); - input[64 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 64, key: vec![42] }, vec![63])); + input[63 - 1] + .1 + .push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 63, key: vec![42] }, vec![0])); + input[64 - 1] + .1 + .push(InputPair::DigestIndex(DigestIndex { block: 64, key: vec![42] }, vec![63])); // changed at block#79 and covered by L2 digest at block#80 + skewed digest at block#91 - input[79 - 1].1.push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 79, key: vec![42] }, vec![1])); - input[80 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 80, key: vec![42] }, vec![79])); - input[91 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 91, key: vec![42] }, vec![80])); + input[79 - 1] + .1 + .push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 79, key: vec![42] }, vec![1])); + input[80 - 1] + .1 + .push(InputPair::DigestIndex(DigestIndex { block: 80, key: vec![42] }, vec![79])); + input[91 - 1] + .1 + .push(InputPair::DigestIndex(DigestIndex { block: 91, key: vec![42] }, vec![80])); let storage = InMemoryStorage::with_inputs(input, vec![]); let drilldown_result = key_changes::( @@ -636,7 +741,8 @@ mod tests { 100_000u64, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(79, 1), (63, 0)])); } } diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 56971f708975f..af0a423e57267 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,11 +17,8 @@ //! Different types of changes trie input pairs. -use codec::{Decode, Encode, Input, Output, Error}; -use crate::{ - StorageKey, StorageValue, - changes_trie::BlockNumber -}; +use crate::{changes_trie::BlockNumber, StorageKey, StorageValue}; +use codec::{Decode, Encode, Error, Input, Output}; use sp_core::storage::PrefixedStorageKey; /// Key of { changed key => set of extrinsic indices } mapping. @@ -66,9 +63,11 @@ pub type ChildIndexValue = Vec; pub enum InputPair { /// Element of { key => set of extrinsics where key has been changed } element mapping. ExtrinsicIndex(ExtrinsicIndex, ExtrinsicIndexValue), - /// Element of { key => set of blocks/digest blocks where key has been changed } element mapping. + /// Element of { key => set of blocks/digest blocks where key has been changed } element + /// mapping. DigestIndex(DigestIndex, DigestIndexValue), - /// Element of { childtrie key => Childchange trie } where key has been changed } element mapping. + /// Element of { childtrie key => Childchange trie } where key has been changed } element + /// mapping. ChildIndex(ChildIndex, ChildIndexValue), } @@ -123,7 +122,7 @@ impl ExtrinsicIndex { } impl Encode for ExtrinsicIndex { - fn encode_to(&self, dest: &mut W) { + fn encode_to(&self, dest: &mut W) { dest.push_byte(1); self.block.encode_to(dest); self.key.encode_to(dest); @@ -140,9 +139,8 @@ impl DigestIndex { } } - impl Encode for DigestIndex { - fn encode_to(&self, dest: &mut W) { + fn encode_to(&self, dest: &mut W) { dest.push_byte(2); self.block.encode_to(dest); self.key.encode_to(dest); @@ -158,7 +156,7 @@ impl ChildIndex { } impl Encode for ChildIndex { - fn encode_to(&self, dest: &mut W) { + fn encode_to(&self, dest: &mut W) { dest.push_byte(3); self.block.encode_to(dest); self.storage_key.encode_to(dest); diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index fd7b38c052f9e..40148095247dd 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -58,63 +58,86 @@ mod prune; mod storage; mod surface_iterator; -pub use self::build_cache::{BuildCache, CachedBuildData, CacheAction}; -pub use self::storage::InMemoryStorage; -pub use self::changes_iterator::{ - key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, +pub use self::{ + build_cache::{BuildCache, CacheAction, CachedBuildData}, + changes_iterator::{ + key_changes, key_changes_proof, key_changes_proof_check, key_changes_proof_check_with_db, + }, + prune::prune, + storage::InMemoryStorage, }; -pub use self::prune::prune; -use std::collections::{HashMap, HashSet}; -use std::convert::TryInto; -use hash_db::{Hasher, Prefix}; -use num_traits::{One, Zero}; -use codec::{Decode, Encode}; -use sp_core; -use sp_core::storage::PrefixedStorageKey; -use sp_trie::{MemoryDB, DBValue, TrieMut}; -use sp_trie::trie_types::TrieDBMut; use crate::{ - StorageKey, backend::Backend, - overlayed_changes::OverlayedChanges, changes_trie::{ build::prepare_input, - build_cache::{IncompleteCachedBuildData, IncompleteCacheAction}, + build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, }, + overlayed_changes::OverlayedChanges, + StorageKey, +}; +use codec::{Decode, Encode}; +use hash_db::{Hasher, Prefix}; +use num_traits::{One, Zero}; +use sp_core::{self, storage::PrefixedStorageKey}; +use sp_trie::{trie_types::TrieDBMut, DBValue, MemoryDB, TrieMut}; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, }; /// Requirements for block number that can be used with changes tries. pub trait BlockNumber: - Send + Sync + 'static + - std::fmt::Display + - Clone + - From + TryInto + One + Zero + - PartialEq + Ord + - std::hash::Hash + - std::ops::Add + ::std::ops::Sub + - std::ops::Mul + ::std::ops::Div + - std::ops::Rem + - std::ops::AddAssign + - num_traits::CheckedMul + num_traits::CheckedSub + - Decode + Encode -{} - -impl BlockNumber for T where T: - Send + Sync + 'static + - std::fmt::Display + - Clone + - From + TryInto + One + Zero + - PartialEq + Ord + - std::hash::Hash + - std::ops::Add + ::std::ops::Sub + - std::ops::Mul + ::std::ops::Div + - std::ops::Rem + - std::ops::AddAssign + - num_traits::CheckedMul + num_traits::CheckedSub + - Decode + Encode, -{} + Send + + Sync + + 'static + + std::fmt::Display + + Clone + + From + + TryInto + + One + + Zero + + PartialEq + + Ord + + std::hash::Hash + + std::ops::Add + + ::std::ops::Sub + + std::ops::Mul + + ::std::ops::Div + + std::ops::Rem + + std::ops::AddAssign + + num_traits::CheckedMul + + num_traits::CheckedSub + + Decode + + Encode +{ +} + +impl BlockNumber for T where + T: Send + + Sync + + 'static + + std::fmt::Display + + Clone + + From + + TryInto + + One + + Zero + + PartialEq + + Ord + + std::hash::Hash + + std::ops::Add + + ::std::ops::Sub + + std::ops::Mul + + ::std::ops::Div + + std::ops::Rem + + std::ops::AddAssign + + num_traits::CheckedMul + + num_traits::CheckedSub + + Decode + + Encode +{ +} /// Block identifier that could be used to determine fork of this block. #[derive(Debug)] @@ -130,8 +153,8 @@ pub struct State<'a, H, Number> { /// Configuration that is active at given block. pub config: Configuration, /// Configuration activation block number. Zero if it is the first configuration on the chain, - /// or number of the block that have emit NewConfiguration signal (thus activating configuration - /// starting from the **next** block). + /// or number of the block that have emit NewConfiguration signal (thus activating + /// configuration starting from the **next** block). pub zero: Number, /// Underlying changes tries storage reference. pub storage: &'a dyn Storage, @@ -143,7 +166,11 @@ pub trait RootsStorage: Send + Sync { fn build_anchor(&self, hash: H::Out) -> Result, String>; /// Get changes trie root for the block with given number which is an ancestor (or the block /// itself) of the anchor_block (i.e. anchor_block.number >= block). - fn root(&self, anchor: &AnchorBlockId, block: Number) -> Result, String>; + fn root( + &self, + anchor: &AnchorBlockId, + block: Number, + ) -> Result, String>; } /// Changes trie storage. Provides access to trie roots and trie nodes. @@ -162,9 +189,13 @@ pub trait Storage: RootsStorage { } /// Changes trie storage -> trie backend essence adapter. -pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a dyn Storage); +pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>( + pub &'a dyn Storage, +); -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { +impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage + for TrieBackendStorageAdapter<'a, H, N> +{ type Overlay = sp_trie::MemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { @@ -188,26 +219,14 @@ pub struct ConfigurationRange<'a, N> { impl<'a, H, Number> State<'a, H, Number> { /// Create state with given config and storage. - pub fn new( - config: Configuration, - zero: Number, - storage: &'a dyn Storage, - ) -> Self { - Self { - config, - zero, - storage, - } + pub fn new(config: Configuration, zero: Number, storage: &'a dyn Storage) -> Self { + Self { config, zero, storage } } } impl<'a, H, Number: Clone> Clone for State<'a, H, Number> { fn clone(&self) -> Self { - State { - config: self.config.clone(), - zero: self.zero.clone(), - storage: self.storage, - } + State { config: self.config.clone(), zero: self.zero.clone(), storage: self.storage } } } @@ -227,20 +246,24 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( parent_hash: H::Out, panic_on_storage_error: bool, ) -> Result, H::Out, CacheAction)>, ()> - where - H::Out: Ord + 'static + Encode, +where + H::Out: Ord + 'static + Encode, { /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. fn maybe_panic( res: std::result::Result, panic: bool, ) -> std::result::Result { - res.map(Ok) - .unwrap_or_else(|e| if panic { - panic!("changes trie: storage access is not allowed to fail within runtime: {:?}", e) + res.map(Ok).unwrap_or_else(|e| { + if panic { + panic!( + "changes trie: storage access is not allowed to fail within runtime: {:?}", + e + ) } else { Err(()) - }) + } + }) } // when storage isn't provided, changes tries aren't created @@ -253,20 +276,22 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; let block = parent.number.clone() + One::one(); - // prepare configuration range - we already know zero block. Current block may be the end block if configuration - // has been changed in this block - let is_config_changed = match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { - Some(Some(new_config)) => new_config != &state.config.encode()[..], - Some(None) => true, - None => false, - }; + // prepare configuration range - we already know zero block. Current block may be the end block + // if configuration has been changed in this block + let is_config_changed = + match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { + Some(Some(new_config)) => new_config != &state.config.encode()[..], + Some(None) => true, + None => false, + }; let config_range = ConfigurationRange { config: &state.config, zero: state.zero.clone(), end: if is_config_changed { Some(block.clone()) } else { None }, }; - // storage errors are considered fatal (similar to situations when runtime fetches values from storage) + // storage errors are considered fatal (similar to situations when runtime fetches values from + // storage) let (input_pairs, child_input_pairs, digest_input_blocks) = maybe_panic( prepare_input::( backend, @@ -303,10 +328,8 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; } - cache_action = cache_action.insert( - Some(child_index.storage_key.clone()), - storage_changed_keys, - ); + cache_action = + cache_action.insert(Some(child_index.storage_key.clone()), storage_changed_keys); } if not_empty { child_roots.push(input::InputPair::ChildIndex(child_index, root.as_ref().to_vec())); @@ -331,10 +354,7 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; } - cache_action = cache_action.insert( - None, - storage_changed_keys, - ); + cache_action = cache_action.insert(None, storage_changed_keys); } let cache_action = cache_action.complete(block, &root); @@ -350,20 +370,21 @@ fn prepare_cached_build_data( // because it'll never be used again for building other tries // => let's clear the cache if !config.config.is_digest_build_enabled() { - return IncompleteCacheAction::Clear; + return IncompleteCacheAction::Clear } // when this is the last block where current configuration is active // => let's clear the cache if config.end.as_ref() == Some(&block) { - return IncompleteCacheAction::Clear; + return IncompleteCacheAction::Clear } // we do not need to cache anything when top-level digest trie is created, because // it'll never be used again for building other tries // => let's clear the cache match config.config.digest_level_at_block(config.zero.clone(), block) { - Some((digest_level, _, _)) if digest_level == config.config.digest_levels => IncompleteCacheAction::Clear, + Some((digest_level, _, _)) if digest_level == config.config.digest_levels => + IncompleteCacheAction::Clear, _ => IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()), } } @@ -399,6 +420,9 @@ mod tests { fn cache_is_cleared_when_end_block_of_configuration_is_built() { let config = Configuration { digest_interval: 8, digest_levels: 2 }; let config_range = ConfigurationRange { zero: 0, end: Some(4u32), config: &config }; - assert_eq!(prepare_cached_build_data(config_range.clone(), 4u32), IncompleteCacheAction::Clear); + assert_eq!( + prepare_cached_build_data(config_range.clone(), 4u32), + IncompleteCacheAction::Clear + ); } } diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 54456f97add1f..2ca540562b47f 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,16 +17,20 @@ //! Changes trie pruning-related functions. +use crate::{ + changes_trie::{ + input::{ChildIndex, InputKey}, + storage::TrieBackendAdapter, + AnchorBlockId, BlockNumber, Storage, + }, + proving_backend::ProvingBackendRecorder, + trie_backend_essence::TrieBackendEssence, +}; +use codec::{Codec, Decode}; use hash_db::Hasher; -use sp_trie::Recorder; use log::warn; use num_traits::One; -use crate::proving_backend::ProvingBackendRecorder; -use crate::trie_backend_essence::TrieBackendEssence; -use crate::changes_trie::{AnchorBlockId, Storage, BlockNumber}; -use crate::changes_trie::storage::TrieBackendAdapter; -use crate::changes_trie::input::{ChildIndex, InputKey}; -use codec::{Decode, Codec}; +use sp_trie::Recorder; /// Prune obsolete changes tries. Pruning happens at the same block, where highest /// level digest is created. Pruning guarantees to save changes tries for last @@ -38,12 +42,14 @@ pub fn prune( last: Number, current_block: &AnchorBlockId, mut remove_trie_node: F, -) where H::Out: Codec { +) where + H::Out: Codec, +{ // delete changes trie for every block in range let mut block = first; loop { if block >= last.clone() + One::one() { - break; + break } let prev_block = block.clone(); @@ -56,7 +62,7 @@ pub fn prune( Err(error) => { // try to delete other tries warn!(target: "trie", "Failed to read changes trie root from DB: {}", error); - continue; + continue }, }; let children_roots = { @@ -66,9 +72,9 @@ pub fn prune( ); let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); let mut children_roots = Vec::new(); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { - if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) { - if let Ok(value) = >::decode(&mut &value[..]) { + trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| { + if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut key) { + if let Ok(value) = >::decode(&mut value) { let mut trie_root = ::Out::default(); trie_root.as_mut().copy_from_slice(&value[..]); children_roots.push(trie_root); @@ -91,8 +97,9 @@ fn prune_trie( storage: &dyn Storage, root: H::Out, remove_trie_node: &mut F, -) where H::Out: Codec { - +) where + H::Out: Codec, +{ // enumerate all changes trie' keys, recording all nodes that have been 'touched' // (effectively - all changes trie nodes) let mut proof_recorder: Recorder = Default::default(); @@ -113,14 +120,13 @@ fn prune_trie( #[cfg(test)] mod tests { - use std::collections::HashSet; - use sp_trie::MemoryDB; - use sp_core::H256; - use crate::backend::insert_into_memory_db; - use crate::changes_trie::storage::InMemoryStorage; + use super::*; + use crate::{backend::insert_into_memory_db, changes_trie::storage::InMemoryStorage}; use codec::Encode; + use sp_core::H256; use sp_runtime::traits::BlakeTwo256; - use super::*; + use sp_trie::MemoryDB; + use std::collections::HashSet; fn prune_by_collect( storage: &dyn Storage, @@ -130,8 +136,9 @@ mod tests { ) -> HashSet { let mut pruned_trie_nodes = HashSet::new(); let anchor = AnchorBlockId { hash: Default::default(), number: current_block }; - prune(storage, first, last, &anchor, - |node| { pruned_trie_nodes.insert(node); }); + prune(storage, first, last, &anchor, |node| { + pruned_trie_nodes.insert(node); + }); pruned_trie_nodes } @@ -139,28 +146,36 @@ mod tests { fn prune_works() { fn prepare_storage() -> InMemoryStorage { let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); - let child_key = ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() }.encode(); + let child_key = + ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() } + .encode(); let mut mdb1 = MemoryDB::::default(); - let root1 = insert_into_memory_db::( - &mut mdb1, vec![(vec![10], vec![20])]).unwrap(); + let root1 = + insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]) + .unwrap(); let mut mdb2 = MemoryDB::::default(); let root2 = insert_into_memory_db::( &mut mdb2, vec![(vec![11], vec![21]), (vec![12], vec![22])], - ).unwrap(); + ) + .unwrap(); let mut mdb3 = MemoryDB::::default(); - let ch_root3 = insert_into_memory_db::( - &mut mdb3, vec![(vec![110], vec![120])]).unwrap(); - let root3 = insert_into_memory_db::(&mut mdb3, vec![ - (vec![13], vec![23]), - (vec![14], vec![24]), - (child_key, ch_root3.as_ref().encode()), - ]).unwrap(); + let ch_root3 = + insert_into_memory_db::(&mut mdb3, vec![(vec![110], vec![120])]) + .unwrap(); + let root3 = insert_into_memory_db::( + &mut mdb3, + vec![ + (vec![13], vec![23]), + (vec![14], vec![24]), + (child_key, ch_root3.as_ref().encode()), + ], + ) + .unwrap(); let mut mdb4 = MemoryDB::::default(); - let root4 = insert_into_memory_db::( - &mut mdb4, - vec![(vec![15], vec![25])], - ).unwrap(); + let root4 = + insert_into_memory_db::(&mut mdb4, vec![(vec![15], vec![25])]) + .unwrap(); let storage = InMemoryStorage::new(); storage.insert(65, root1, mdb1); storage.insert(66, root2, mdb2); diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 51b7ff6f50f71..bd5e3a32b5657 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,22 +17,21 @@ //! Changes trie storage utilities. -use std::collections::{BTreeMap, HashSet, HashMap}; -use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; -use sp_core::storage::PrefixedStorageKey; -use sp_trie::DBValue; -use sp_trie::MemoryDB; -use parking_lot::RwLock; use crate::{ - StorageKey, + changes_trie::{AnchorBlockId, BlockNumber, BuildCache, RootsStorage, Storage}, trie_backend_essence::TrieBackendStorage, - changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, + StorageKey, }; +use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use parking_lot::RwLock; +use sp_core::storage::PrefixedStorageKey; +use sp_trie::{DBValue, MemoryDB}; +use std::collections::{BTreeMap, HashMap, HashSet}; #[cfg(test)] use crate::backend::insert_into_memory_db; #[cfg(test)] -use crate::changes_trie::input::{InputPair, ChildIndex}; +use crate::changes_trie::input::{ChildIndex, InputPair}; /// In-memory implementation of changes trie storage. pub struct InMemoryStorage { @@ -55,10 +54,7 @@ impl InMemoryStorage { /// Creates storage from given in-memory database. pub fn with_db(mdb: MemoryDB) -> Self { Self { - data: RwLock::new(InMemoryStorageData { - roots: BTreeMap::new(), - mdb, - }), + data: RwLock::new(InMemoryStorageData { roots: BTreeMap::new(), mdb }), cache: BuildCache::new(), } } @@ -72,7 +68,7 @@ impl InMemoryStorage { pub fn with_proof(proof: Vec>) -> Self { use hash_db::HashDB; - let mut proof_db = MemoryDB::::default(); + let mut proof_db = MemoryDB::::default(); for item in proof { proof_db.insert(EMPTY_PREFIX, &item); } @@ -104,7 +100,8 @@ impl InMemoryStorage { let mut roots = BTreeMap::new(); for (storage_key, child_input) in children_inputs { for (block, pairs) in child_input { - let root = insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); + let root = + insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); if let Some(root) = root { let ix = if let Some(ix) = top_inputs.iter().position(|v| v.0 == block) { @@ -129,17 +126,14 @@ impl InMemoryStorage { } InMemoryStorage { - data: RwLock::new(InMemoryStorageData { - roots, - mdb, - }), + data: RwLock::new(InMemoryStorageData { roots, mdb }), cache: BuildCache::new(), } } #[cfg(test)] pub fn clear_storage(&self) { - self.data.write().mdb = MemoryDB::default(); // use new to be more correct + self.data.write().mdb = MemoryDB::default(); // use new to be more correct } #[cfg(test)] @@ -165,13 +159,20 @@ impl InMemoryStorage { impl RootsStorage for InMemoryStorage { fn build_anchor(&self, parent_hash: H::Out) -> Result, String> { - self.data.read().roots.iter() + self.data + .read() + .roots + .iter() .find(|(_, v)| **v == parent_hash) .map(|(k, _)| AnchorBlockId { hash: parent_hash, number: k.clone() }) .ok_or_else(|| format!("Can't find associated number for block {:?}", parent_hash)) } - fn root(&self, _anchor_block: &AnchorBlockId, block: Number) -> Result, String> { + fn root( + &self, + _anchor_block: &AnchorBlockId, + block: Number, + ) -> Result, String> { Ok(self.data.read().roots.get(&block).cloned()) } } @@ -201,9 +202,9 @@ impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { } impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, +where + Number: BlockNumber, + H: Hasher, { type Overlay = MemoryDB; diff --git a/primitives/state-machine/src/changes_trie/surface_iterator.rs b/primitives/state-machine/src/changes_trie/surface_iterator.rs index b9c9d09f0f73b..b3e5a490cd184 100644 --- a/primitives/state-machine/src/changes_trie/surface_iterator.rs +++ b/primitives/state-machine/src/changes_trie/surface_iterator.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The best way to understand how this iterator works is to imagine some 2D terrain that have some mountains -//! (digest changes tries) and valleys (changes tries for regular blocks). There are gems (blocks) beneath the -//! terrain. Given the request to find all gems in the range [X1; X2] this iterator will return **minimal set** -//! of points at the terrain (mountains and valleys) inside this range that have to be drilled down to -//! search for gems. +//! The best way to understand how this iterator works is to imagine some 2D terrain that have some +//! mountains (digest changes tries) and valleys (changes tries for regular blocks). There are gems +//! (blocks) beneath the terrain. Given the request to find all gems in the range [X1; X2] this +//! iterator will return **minimal set** of points at the terrain (mountains and valleys) inside +//! this range that have to be drilled down to search for gems. +use crate::changes_trie::{BlockNumber, ConfigurationRange}; use num_traits::One; -use crate::changes_trie::{ConfigurationRange, BlockNumber}; /// Returns surface iterator for given range of blocks. /// @@ -34,12 +34,8 @@ pub fn surface_iterator<'a, Number: BlockNumber>( begin: Number, end: Number, ) -> Result, String> { - let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest( - config.clone(), - max.clone(), - begin.clone(), - end, - )?; + let (current, current_begin, digest_step, digest_level) = + lower_bound_max_digest(config.clone(), max.clone(), begin.clone(), end)?; Ok(SurfaceIterator { config, begin, @@ -54,9 +50,9 @@ pub fn surface_iterator<'a, Number: BlockNumber>( /// Surface iterator - only traverses top-level digests from given range and tries to find /// all valid digest changes. /// -/// Iterator item is the tuple of (last block of the current point + digest level of the current point). -/// Digest level is Some(0) when it is regular block, is Some(non-zero) when it is digest block and None -/// if it is skewed digest block. +/// Iterator item is the tuple of (last block of the current point + digest level of the current +/// point). Digest level is Some(0) when it is regular block, is Some(non-zero) when it is digest +/// block and None if it is skewed digest block. pub struct SurfaceIterator<'a, Number: BlockNumber> { config: ConfigurationRange<'a, Number>, begin: Number, @@ -89,7 +85,8 @@ impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { self.begin.clone(), next, ); - let (current, current_begin, digest_step, digest_level) = match max_digest_interval { + let (current, current_begin, digest_step, digest_level) = match max_digest_interval + { Err(err) => return Some(Err(err)), Ok(range) => range, }; @@ -114,14 +111,21 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( end: Number, ) -> Result<(Number, Number, u32, Option), String> { if end > max || begin > end { - return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)); + return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)) } - if begin <= config.zero || config.end.as_ref().map(|config_end| end > *config_end).unwrap_or(false) { - return Err(format!("changes trie range is not covered by configuration: {}..{}/{}..{}", - begin, end, config.zero, match config.end.as_ref() { + if begin <= config.zero || + config.end.as_ref().map(|config_end| end > *config_end).unwrap_or(false) + { + return Err(format!( + "changes trie range is not covered by configuration: {}..{}/{}..{}", + begin, + end, + config.zero, + match config.end.as_ref() { Some(config_end) => format!("{}", config_end), None => "None".into(), - })); + } + )) } let mut digest_level = 0u32; @@ -135,10 +139,16 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( let new_digest_level = digest_level + 1; let new_digest_step = digest_step * config.config.digest_interval; let new_digest_interval = config.config.digest_interval * { - if digest_interval == 0 { 1 } else { digest_interval } + if digest_interval == 0 { + 1 + } else { + digest_interval + } }; - let new_digest_begin = config.zero.clone() + ((current.clone() - One::one() - config.zero.clone()) - / new_digest_interval.into()) * new_digest_interval.into(); + let new_digest_begin = config.zero.clone() + + ((current.clone() - One::one() - config.zero.clone()) / + new_digest_interval.into()) * + new_digest_interval.into(); let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); let new_current = new_digest_begin.clone() + new_digest_interval.into(); @@ -150,16 +160,20 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( skewed_digest_end.clone(), ); if let Some(skewed_digest_start) = skewed_digest_start { - let skewed_digest_range = (skewed_digest_end.clone() - skewed_digest_start.clone()) - .try_into().ok() - .expect("skewed digest range is always <= max level digest range;\ - max level digest range always fits u32; qed"); + let skewed_digest_range = (skewed_digest_end.clone() - + skewed_digest_start.clone()) + .try_into() + .ok() + .expect( + "skewed digest range is always <= max level digest range;\ + max level digest range always fits u32; qed", + ); return Ok(( skewed_digest_end.clone(), skewed_digest_start, skewed_digest_range, None, - )); + )) } } } @@ -169,7 +183,7 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( if begin < new_digest_begin { current_begin = new_digest_begin; } - break; + break } // we can (and will) use this digest @@ -181,30 +195,24 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( // if current digest covers the whole range => no need to use next level digest if current_begin <= begin && new_digest_end >= end { - break; + break } } } - Ok(( - current, - current_begin, - digest_step, - Some(digest_level), - )) + Ok((current, current_begin, digest_step, Some(digest_level))) } #[cfg(test)] mod tests { - use crate::changes_trie::{Configuration}; use super::*; + use crate::changes_trie::Configuration; - fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { - ConfigurationRange { - config, - zero, - end: None, - } + fn configuration_range<'a>( + config: &'a Configuration, + zero: u64, + ) -> ConfigurationRange<'a, u64> { + ConfigurationRange { config, zero, end: None } } #[test] @@ -213,13 +221,15 @@ mod tests { // when config activates at 0 assert_eq!( - lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64).unwrap(), + lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64) + .unwrap(), (192, 176, 16, Some(2)), ); // when config activates at 30 assert_eq!( - lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 50u64, 210u64).unwrap(), + lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 50u64, 210u64) + .unwrap(), (222, 206, 16, Some(2)), ); } @@ -230,40 +240,61 @@ mod tests { // when config activates at 0 assert_eq!( - surface_iterator( - configuration_range(&config, 0u64), - 100_000u64, - 40u64, - 180u64, - ).unwrap().collect::>(), + surface_iterator(configuration_range(&config, 0u64), 100_000u64, 40u64, 180u64,) + .unwrap() + .collect::>(), vec![ - Ok((192, Some(2))), Ok((176, Some(2))), Ok((160, Some(2))), Ok((144, Some(2))), - Ok((128, Some(2))), Ok((112, Some(2))), Ok((96, Some(2))), Ok((80, Some(2))), - Ok((64, Some(2))), Ok((48, Some(2))), + Ok((192, Some(2))), + Ok((176, Some(2))), + Ok((160, Some(2))), + Ok((144, Some(2))), + Ok((128, Some(2))), + Ok((112, Some(2))), + Ok((96, Some(2))), + Ok((80, Some(2))), + Ok((64, Some(2))), + Ok((48, Some(2))), ], ); // when config activates at 30 assert_eq!( - surface_iterator( - configuration_range(&config, 30u64), - 100_000u64, - 40u64, - 180u64, - ).unwrap().collect::>(), + surface_iterator(configuration_range(&config, 30u64), 100_000u64, 40u64, 180u64,) + .unwrap() + .collect::>(), vec![ - Ok((190, Some(2))), Ok((174, Some(2))), Ok((158, Some(2))), Ok((142, Some(2))), Ok((126, Some(2))), - Ok((110, Some(2))), Ok((94, Some(2))), Ok((78, Some(2))), Ok((62, Some(2))), Ok((46, Some(2))), + Ok((190, Some(2))), + Ok((174, Some(2))), + Ok((158, Some(2))), + Ok((142, Some(2))), + Ok((126, Some(2))), + Ok((110, Some(2))), + Ok((94, Some(2))), + Ok((78, Some(2))), + Ok((62, Some(2))), + Ok((46, Some(2))), ], ); // when config activates at 0 AND max block is before next digest assert_eq!( - surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64).unwrap().collect::>(), + surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64) + .unwrap() + .collect::>(), vec![ - Ok((183, Some(0))), Ok((182, Some(0))), Ok((181, Some(0))), Ok((180, Some(1))), - Ok((176, Some(2))), Ok((160, Some(2))), Ok((144, Some(2))), Ok((128, Some(2))), Ok((112, Some(2))), - Ok((96, Some(2))), Ok((80, Some(2))), Ok((64, Some(2))), Ok((48, Some(2))), + Ok((183, Some(0))), + Ok((182, Some(0))), + Ok((181, Some(0))), + Ok((180, Some(1))), + Ok((176, Some(2))), + Ok((160, Some(2))), + Ok((144, Some(2))), + Ok((128, Some(2))), + Ok((112, Some(2))), + Ok((96, Some(2))), + Ok((80, Some(2))), + Ok((64, Some(2))), + Ok((48, Some(2))), ], ); } @@ -276,10 +307,19 @@ mod tests { // when config activates at 0 AND ends at 170 config_range.end = Some(170); assert_eq!( - surface_iterator(config_range, 100_000u64, 40u64, 170u64).unwrap().collect::>(), + surface_iterator(config_range, 100_000u64, 40u64, 170u64) + .unwrap() + .collect::>(), vec![ - Ok((170, None)), Ok((160, Some(2))), Ok((144, Some(2))), Ok((128, Some(2))), Ok((112, Some(2))), - Ok((96, Some(2))), Ok((80, Some(2))), Ok((64, Some(2))), Ok((48, Some(2))), + Ok((170, None)), + Ok((160, Some(2))), + Ok((144, Some(2))), + Ok((128, Some(2))), + Ok((112, Some(2))), + Ok((96, Some(2))), + Ok((80, Some(2))), + Ok((64, Some(2))), + Ok((48, Some(2))), ], ); } diff --git a/primitives/state-machine/src/error.rs b/primitives/state-machine/src/error.rs index 489f6e6666001..acc5b6080c7a3 100644 --- a/primitives/state-machine/src/error.rs +++ b/primitives/state-machine/src/error.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,15 +16,14 @@ // limitations under the License. /// State Machine Errors - use sp_std::fmt; /// State Machine Error bound. /// /// This should reflect Wasm error type bound for future compatibility. -pub trait Error: 'static + fmt::Debug + fmt::Display + Send {} +pub trait Error: 'static + fmt::Debug + fmt::Display + Send + Sync {} -impl Error for T {} +impl Error for T {} /// Externalities Error. /// @@ -32,17 +31,18 @@ impl Error for T {} /// would not be executed unless externalities were available. This is included for completeness, /// and as a transition away from the pre-existing framework. #[derive(Debug, Eq, PartialEq)] +#[allow(missing_docs)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] pub enum ExecutionError { - /// Backend error. + #[cfg_attr(feature = "std", error("Backend error {0:?}"))] Backend(crate::DefaultError), - /// The entry `:code` doesn't exist in storage so there's no way we can execute anything. + + #[cfg_attr(feature = "std", error("`:code` entry does not exist in storage"))] CodeEntryDoesNotExist, - /// Backend is incompatible with execution proof generation process. + + #[cfg_attr(feature = "std", error("Unable to generate proof"))] UnableToGenerateProof, - /// Invalid execution proof. - InvalidProof, -} -impl fmt::Display for ExecutionError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Externalities Error") } + #[cfg_attr(feature = "std", error("Invalid execution proof"))] + InvalidProof, } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 53aab42999d5e..c9693ca6a88c1 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,27 +18,28 @@ //! Concrete externalities implementation. use crate::{ - StorageKey, StorageValue, OverlayedChanges, - backend::Backend, overlayed_changes::OverlayedExtensions, + backend::Backend, overlayed_changes::OverlayedExtensions, IndexOperation, OverlayedChanges, + StorageKey, StorageValue, }; +use codec::{Decode, Encode, EncodeAppend}; use hash_db::Hasher; use sp_core::{ - storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, hexdisplay::HexDisplay, + storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, }; -use sp_trie::{trie_types::Layout, empty_child_trie_root}; -use sp_externalities::{ - Externalities, Extensions, Extension, ExtensionStore, -}; -use codec::{Decode, Encode, EncodeAppend}; +use sp_externalities::{Extension, ExtensionStore, Extensions, Externalities}; +use sp_trie::{empty_child_trie_root, trie_types::Layout}; -use sp_std::{fmt, any::{Any, TypeId}, vec::Vec, vec, boxed::Box}; -use crate::{warn, trace, log_error}; -#[cfg(feature = "std")] -use sp_core::offchain::storage::OffchainOverlayedChanges; #[cfg(feature = "std")] use crate::changes_trie::State as ChangesTrieState; -use crate::StorageTransactionCache; +use crate::{log_error, trace, warn, StorageTransactionCache}; +use sp_std::{ + any::{Any, TypeId}, + boxed::Box, + cmp::Ordering, + fmt, vec, + vec::Vec, +}; #[cfg(feature = "std")] use std::error; @@ -48,7 +49,6 @@ const BENCHMARKING_FN: &str = "\ For that reason client started transactions before calling into runtime are not allowed. Without client transactions the loop condition garantuees the success of the tx close."; - #[cfg(feature = "std")] fn guard() -> sp_panic_handler::AbortGuard { sp_panic_handler::AbortGuard::force_abort() @@ -93,16 +93,13 @@ impl error::Error for Error { /// Wraps a read-only backend, call executor, and current overlayed changes. pub struct Ext<'a, H, N, B> - where - H: Hasher, - B: 'a + Backend, - N: crate::changes_trie::BlockNumber, +where + H: Hasher, + B: 'a + Backend, + N: crate::changes_trie::BlockNumber, { /// The overlayed changes to write to. overlay: &'a mut OverlayedChanges, - /// The overlayed changes destined for the Offchain DB. - #[cfg(feature = "std")] - offchain_overlay: &'a mut OffchainOverlayedChanges, /// The storage backend to read from. backend: &'a B, /// The cache for the storage transactions. @@ -119,12 +116,11 @@ pub struct Ext<'a, H, N, B> extensions: Option>, } - impl<'a, H, N, B> Ext<'a, H, N, B> - where - H: Hasher, - B: Backend, - N: crate::changes_trie::BlockNumber, +where + H: Hasher, + B: Backend, + N: crate::changes_trie::BlockNumber, { /// Create a new `Ext`. #[cfg(not(feature = "std"))] @@ -133,20 +129,13 @@ impl<'a, H, N, B> Ext<'a, H, N, B> storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, ) -> Self { - Ext { - overlay, - backend, - id: 0, - storage_transaction_cache, - _phantom: Default::default(), - } + Ext { overlay, backend, id: 0, storage_transaction_cache, _phantom: Default::default() } } /// Create a new `Ext` from overlayed changes and read-only backend #[cfg(feature = "std")] pub fn new( overlay: &'a mut OverlayedChanges, - offchain_overlay: &'a mut OffchainOverlayedChanges, storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, changes_trie_state: Option>, @@ -154,7 +143,6 @@ impl<'a, H, N, B> Ext<'a, H, N, B> ) -> Self { Self { overlay, - offchain_overlay, backend, changes_trie_state, storage_transaction_cache, @@ -170,12 +158,6 @@ impl<'a, H, N, B> Ext<'a, H, N, B> fn mark_dirty(&mut self) { self.storage_transaction_cache.reset(); } - - /// Read only accessor for the scheduled overlay changes. - #[cfg(feature = "std")] - pub fn get_offchain_storage_changes(&self) -> &OffchainOverlayedChanges { - &*self.offchain_overlay - } } #[cfg(test)] @@ -189,7 +171,9 @@ where pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { use std::collections::HashMap; - self.backend.pairs().iter() + self.backend + .pairs() + .iter() .map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec()))) .chain(self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned()))) .collect::>() @@ -206,88 +190,93 @@ where B: Backend, N: crate::changes_trie::BlockNumber, { - #[cfg(feature = "std")] fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) { - use ::sp_core::offchain::STORAGE_PREFIX; - match value { - Some(value) => self.offchain_overlay.set(STORAGE_PREFIX, key, value), - None => self.offchain_overlay.remove(STORAGE_PREFIX, key), - } + self.overlay.set_offchain_storage(key, value) } - #[cfg(not(feature = "std"))] - fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} - fn storage(&self, key: &[u8]) -> Option { let _guard = guard(); - let result = self.overlay.storage(key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| - self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); - trace!(target: "state", "{:04x}: Get {}={:?}", - self.id, - HexDisplay::from(&key), - result.as_ref().map(HexDisplay::from) + let result = self + .overlay + .storage(key) + .map(|x| x.map(|x| x.to_vec())) + .unwrap_or_else(|| self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); + + // NOTE: be careful about touching the key names – used outside substrate! + trace!( + target: "state", + method = "Get", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + result = ?result.as_ref().map(HexDisplay::from), + result_encoded = %HexDisplay::from( + &result + .as_ref() + .map(|v| EncodeOpaqueValue(v.clone())) + .encode() + ), ); + result } fn storage_hash(&self, key: &[u8]) -> Option> { let _guard = guard(); - let result = self.overlay + let result = self + .overlay .storage(key) .map(|x| x.map(|x| H::hash(x))) .unwrap_or_else(|| self.backend.storage_hash(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); - trace!(target: "state", "{:04x}: Hash {}={:?}", - self.id, - HexDisplay::from(&key), - result, + trace!( + target: "state", + method = "Hash", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + ?result, ); result.map(|r| r.encode()) } - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { let _guard = guard(); - let result = self.overlay + let result = self + .overlay .child_storage(child_info, key) .map(|x| x.map(|x| x.to_vec())) - .unwrap_or_else(|| - self.backend.child_storage(child_info, key) - .expect(EXT_NOT_ALLOWED_TO_FAIL) - ); - - trace!(target: "state", "{:04x}: GetChild({}) {}={:?}", - self.id, - HexDisplay::from(&child_info.storage_key()), - HexDisplay::from(&key), - result.as_ref().map(HexDisplay::from) + .unwrap_or_else(|| { + self.backend.child_storage(child_info, key).expect(EXT_NOT_ALLOWED_TO_FAIL) + }); + + trace!( + target: "state", + method = "ChildGet", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + key = %HexDisplay::from(&key), + result = ?result.as_ref().map(HexDisplay::from) ); result } - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option> { + fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { let _guard = guard(); - let result = self.overlay + let result = self + .overlay .child_storage(child_info, key) .map(|x| x.map(|x| H::hash(x))) - .unwrap_or_else(|| - self.backend.child_storage_hash(child_info, key) - .expect(EXT_NOT_ALLOWED_TO_FAIL) - ); - - trace!(target: "state", "{:04x}: ChildHash({}) {}={:?}", - self.id, - HexDisplay::from(&child_info.storage_key()), - HexDisplay::from(&key), - result, + .unwrap_or_else(|| { + self.backend.child_storage_hash(child_info, key).expect(EXT_NOT_ALLOWED_TO_FAIL) + }); + + trace!( + target: "state", + method = "ChildHash", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + key = %HexDisplay::from(&key), + ?result, ); result.map(|r| r.encode()) @@ -300,92 +289,140 @@ where _ => self.backend.exists_storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL), }; - trace!(target: "state", "{:04x}: Exists {}={:?}", - self.id, - HexDisplay::from(&key), - result, + trace!( + target: "state", + method = "Exists", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + %result, ); result } - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> bool { + fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { let _guard = guard(); let result = match self.overlay.child_storage(child_info, key) { Some(x) => x.is_some(), - _ => self.backend + _ => self + .backend .exists_child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL), }; - trace!(target: "state", "{:04x}: ChildExists({}) {}={:?}", - self.id, - HexDisplay::from(&child_info.storage_key()), - HexDisplay::from(&key), - result, + trace!( + target: "state", + method = "ChildExists", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + key = %HexDisplay::from(&key), + %result, ); result } fn next_storage_key(&self, key: &[u8]) -> Option { - let next_backend_key = self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL); - let next_overlay_key_change = self.overlay.next_storage_key_change(key); - - match (next_backend_key, next_overlay_key_change) { - (Some(backend_key), Some(overlay_key)) if &backend_key[..] < overlay_key.0 => Some(backend_key), - (backend_key, None) => backend_key, - (_, Some(overlay_key)) => if overlay_key.1.value().is_some() { - Some(overlay_key.0.to_vec()) - } else { - self.next_storage_key(&overlay_key.0[..]) + let mut next_backend_key = + self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL); + let mut overlay_changes = self.overlay.iter_after(key).peekable(); + + match (&next_backend_key, overlay_changes.peek()) { + (_, None) => next_backend_key, + (Some(_), Some(_)) => { + while let Some(overlay_key) = overlay_changes.next() { + let cmp = next_backend_key.as_deref().map(|v| v.cmp(&overlay_key.0)); + + // If `backend_key` is less than the `overlay_key`, we found out next key. + if cmp == Some(Ordering::Less) { + return next_backend_key + } else if overlay_key.1.value().is_some() { + // If there exists a value for the `overlay_key` in the overlay + // (aka the key is still valid), it means we have found our next key. + return Some(overlay_key.0.to_vec()) + } else if cmp == Some(Ordering::Equal) { + // If the `backend_key` and `overlay_key` are equal, it means that we need + // to search for the next backend key, because the overlay has overwritten + // this key. + next_backend_key = self + .backend + .next_storage_key(&overlay_key.0) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + } + } + + next_backend_key + }, + (None, Some(_)) => { + // Find the next overlay key that has a value attached. + overlay_changes.find_map(|k| k.1.value().as_ref().map(|_| k.0.to_vec())) }, } } - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { - let next_backend_key = self.backend + fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + let mut next_backend_key = self + .backend .next_child_storage_key(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); - let next_overlay_key_change = self.overlay.next_child_storage_key_change( - child_info.storage_key(), - key - ); + let mut overlay_changes = + self.overlay.child_iter_after(child_info.storage_key(), key).peekable(); + + match (&next_backend_key, overlay_changes.peek()) { + (_, None) => next_backend_key, + (Some(_), Some(_)) => { + while let Some(overlay_key) = overlay_changes.next() { + let cmp = next_backend_key.as_deref().map(|v| v.cmp(&overlay_key.0)); + + // If `backend_key` is less than the `overlay_key`, we found out next key. + if cmp == Some(Ordering::Less) { + return next_backend_key + } else if overlay_key.1.value().is_some() { + // If there exists a value for the `overlay_key` in the overlay + // (aka the key is still valid), it means we have found our next key. + return Some(overlay_key.0.to_vec()) + } else if cmp == Some(Ordering::Equal) { + // If the `backend_key` and `overlay_key` are equal, it means that we need + // to search for the next backend key, because the overlay has overwritten + // this key. + next_backend_key = self + .backend + .next_child_storage_key(child_info, &overlay_key.0) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + } + } - match (next_backend_key, next_overlay_key_change) { - (Some(backend_key), Some(overlay_key)) if &backend_key[..] < overlay_key.0 => Some(backend_key), - (backend_key, None) => backend_key, - (_, Some(overlay_key)) => if overlay_key.1.value().is_some() { - Some(overlay_key.0.to_vec()) - } else { - self.next_child_storage_key( - child_info, - &overlay_key.0[..], - ) + next_backend_key + }, + (None, Some(_)) => { + // Find the next overlay key that has a value attached. + overlay_changes.find_map(|k| k.1.value().as_ref().map(|_| k.0.to_vec())) }, } } fn place_storage(&mut self, key: StorageKey, value: Option) { - trace!(target: "state", "{:04x}: Put {}={:?}", - self.id, - HexDisplay::from(&key), - value.as_ref().map(HexDisplay::from) - ); let _guard = guard(); if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to directly set child storage key"); - return; + return } + // NOTE: be careful about touching the key names – used outside substrate! + trace!( + target: "state", + method = "Put", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + value = ?value.as_ref().map(HexDisplay::from), + value_encoded = %HexDisplay::from( + &value + .as_ref() + .map(|v| EncodeOpaqueValue(v.clone())) + .encode() + ), + ); + self.mark_dirty(); self.overlay.set_storage(key, value); } @@ -396,11 +433,13 @@ where key: StorageKey, value: Option, ) { - trace!(target: "state", "{:04x}: PutChild({}) {}={:?}", - self.id, - HexDisplay::from(&child_info.storage_key()), - HexDisplay::from(&key), - value.as_ref().map(HexDisplay::from) + trace!( + target: "state", + method = "ChildPut", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + key = %HexDisplay::from(&key), + value = ?value.as_ref().map(HexDisplay::from), ); let _guard = guard(); @@ -408,105 +447,105 @@ where self.overlay.set_child_storage(child_info, key, value); } - fn kill_child_storage( - &mut self, - child_info: &ChildInfo, - ) { - trace!(target: "state", "{:04x}: KillChild({})", - self.id, - HexDisplay::from(&child_info.storage_key()), + fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> (bool, u32) { + trace!( + target: "state", + method = "ChildKill", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), ); let _guard = guard(); - self.mark_dirty(); self.overlay.clear_child_storage(child_info); - self.backend.for_keys_in_child_storage(child_info, |key| { - self.overlay.set_child_storage(child_info, key.to_vec(), None); - }); + self.limit_remove_from_backend(Some(child_info), None, limit) } - fn clear_prefix(&mut self, prefix: &[u8]) { - trace!(target: "state", "{:04x}: ClearPrefix {}", - self.id, - HexDisplay::from(&prefix), + fn clear_prefix(&mut self, prefix: &[u8], limit: Option) -> (bool, u32) { + trace!( + target: "state", + method = "ClearPrefix", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + prefix = %HexDisplay::from(&prefix), ); let _guard = guard(); - if is_child_storage_key(prefix) { - warn!(target: "trie", "Refuse to directly clear prefix that is part of child storage key"); - return; + + if sp_core::storage::well_known_keys::starts_with_child_storage_key(prefix) { + warn!( + target: "trie", + "Refuse to directly clear prefix that is part or contains of child storage key", + ); + return (false, 0) } self.mark_dirty(); self.overlay.clear_prefix(prefix); - self.backend.for_keys_with_prefix(prefix, |key| { - self.overlay.set_storage(key.to_vec(), None); - }); + self.limit_remove_from_backend(None, Some(prefix), limit) } fn clear_child_prefix( &mut self, child_info: &ChildInfo, prefix: &[u8], - ) { - trace!(target: "state", "{:04x}: ClearChildPrefix({}) {}", - self.id, - HexDisplay::from(&child_info.storage_key()), - HexDisplay::from(&prefix), + limit: Option, + ) -> (bool, u32) { + trace!( + target: "state", + method = "ChildClearPrefix", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + prefix = %HexDisplay::from(&prefix), ); let _guard = guard(); self.mark_dirty(); self.overlay.clear_child_prefix(child_info, prefix); - self.backend.for_child_keys_with_prefix(child_info, prefix, |key| { - self.overlay.set_child_storage(child_info, key.to_vec(), None); - }); + self.limit_remove_from_backend(Some(child_info), Some(prefix), limit) } - fn storage_append( - &mut self, - key: Vec, - value: Vec, - ) { - trace!(target: "state", "{:04x}: Append {}={}", - self.id, - HexDisplay::from(&key), - HexDisplay::from(&value), + fn storage_append(&mut self, key: Vec, value: Vec) { + trace!( + target: "state", + method = "Append", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + value = %HexDisplay::from(&value), ); let _guard = guard(); self.mark_dirty(); let backend = &mut self.backend; - let current_value = self.overlay.value_mut_or_insert_with( - &key, - || backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() - ); + let current_value = self.overlay.value_mut_or_insert_with(&key, || { + backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() + }); StorageAppend::new(current_value).append(value); } - fn chain_id(&self) -> u64 { - 42 - } - fn storage_root(&mut self) -> Vec { let _guard = guard(); if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root { - trace!(target: "state", "{:04x}: Root(cached) {}", - self.id, - HexDisplay::from(&root.as_ref()), + trace!( + target: "state", + method = "StorageRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + storage_root = %HexDisplay::from(&root.as_ref()), + cached = true, ); - return root.encode(); + return root.encode() } let root = self.overlay.storage_root(self.backend, self.storage_transaction_cache); - trace!(target: "state", "{:04x}: Root {}", self.id, HexDisplay::from(&root.as_ref())); + trace!( + target: "state", + method = "StorageRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + storage_root = %HexDisplay::from(&root.as_ref()), + cached = false, + ); root.encode() } - fn child_storage_root( - &mut self, - child_info: &ChildInfo, - ) -> Vec { + fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec { let _guard = guard(); let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); @@ -514,13 +553,14 @@ where let root = self .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or_else( - || empty_child_trie_root::>() - ); - trace!(target: "state", "{:04x}: ChildRoot({})(cached) {}", - self.id, - HexDisplay::from(&storage_key), - HexDisplay::from(&root.as_ref()), + .unwrap_or_else(|| empty_child_trie_root::>()); + trace!( + target: "state", + method = "ChildStorageRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&storage_key), + storage_root = %HexDisplay::from(&root.as_ref()), + cached = true, ); root.encode() } else { @@ -533,9 +573,9 @@ where if let Some((root, is_empty, _)) = root { let root = root.encode(); - // We store update in the overlay in order to be able to use 'self.storage_transaction' - // cache. This is brittle as it rely on Ext only querying the trie backend for - // storage root. + // We store update in the overlay in order to be able to use + // 'self.storage_transaction' cache. This is brittle as it rely on Ext only querying + // the trie backend for storage root. // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. if is_empty { @@ -544,59 +584,114 @@ where self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone())); } - trace!(target: "state", "{:04x}: ChildRoot({}) {}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&root.as_ref()), + trace!( + target: "state", + method = "ChildStorageRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&storage_key.as_ref()), + storage_root = %HexDisplay::from(&root.as_ref()), + cached = false, ); + root } else { // empty overlay let root = self .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or_else( - || empty_child_trie_root::>() - ); - trace!(target: "state", "{:04x}: ChildRoot({})(no_change) {}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&root.as_ref()), + .unwrap_or_else(|| empty_child_trie_root::>()); + + trace!( + target: "state", + method = "ChildStorageRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&storage_key.as_ref()), + storage_root = %HexDisplay::from(&root.as_ref()), + cached = false, ); + root.encode() } } } + fn storage_index_transaction(&mut self, index: u32, hash: &[u8], size: u32) { + trace!( + target: "state", + method = "IndexTransaction", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + %index, + tx_hash = %HexDisplay::from(&hash), + %size, + ); + + self.overlay.add_transaction_index(IndexOperation::Insert { + extrinsic: index, + hash: hash.to_vec(), + size, + }); + } + + /// Renew existing piece of data storage. + fn storage_renew_transaction_index(&mut self, index: u32, hash: &[u8]) { + trace!( + target: "state", + method = "RenewTransactionIndex", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + %index, + tx_hash = %HexDisplay::from(&hash), + ); + + self.overlay + .add_transaction_index(IndexOperation::Renew { extrinsic: index, hash: hash.to_vec() }); + } + #[cfg(not(feature = "std"))] fn storage_changes_root(&mut self, _parent_hash: &[u8]) -> Result>, ()> { Ok(None) } #[cfg(feature = "std")] - fn storage_changes_root(&mut self, parent_hash: &[u8]) -> Result>, ()> { + fn storage_changes_root(&mut self, mut parent_hash: &[u8]) -> Result>, ()> { let _guard = guard(); - let root = self.overlay.changes_trie_root( - self.backend, - self.changes_trie_state.as_ref(), - Decode::decode(&mut &parent_hash[..]).map_err(|e| - trace!( - target: "state", - "Failed to decode changes root parent hash: {}", - e, - ) - )?, - true, - self.storage_transaction_cache, - ); + if let Some(ref root) = self.storage_transaction_cache.changes_trie_transaction_storage_root + { + trace!( + target: "state", + method = "ChangesRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + parent_hash = %HexDisplay::from(&parent_hash), + ?root, + cached = true, + ); - trace!(target: "state", "{:04x}: ChangesRoot({}) {:?}", - self.id, - HexDisplay::from(&parent_hash), - root, - ); + Ok(Some(root.encode())) + } else { + let root = self.overlay.changes_trie_root( + self.backend, + self.changes_trie_state.as_ref(), + Decode::decode(&mut parent_hash).map_err(|e| { + trace!( + target: "state", + error = %e, + "Failed to decode changes root parent hash", + ) + })?, + true, + self.storage_transaction_cache, + ); - root.map(|r| r.map(|o| o.encode())) + trace!( + target: "state", + method = "ChangesRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + parent_hash = %HexDisplay::from(&parent_hash), + ?root, + cached = false, + ); + + root.map(|r| r.map(|o| o.encode())) + } } fn storage_start_transaction(&mut self) { @@ -616,13 +711,15 @@ where for _ in 0..self.overlay.transaction_depth() { self.overlay.rollback_transaction().expect(BENCHMARKING_FN); } - self.overlay.drain_storage_changes( - &self.backend, - #[cfg(feature = "std")] - None, - Default::default(), - self.storage_transaction_cache, - ).expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay + .drain_storage_changes( + self.backend, + #[cfg(feature = "std")] + None, + Default::default(), + self.storage_transaction_cache, + ) + .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); self.overlay @@ -634,19 +731,24 @@ where for _ in 0..self.overlay.transaction_depth() { self.overlay.commit_transaction().expect(BENCHMARKING_FN); } - let changes = self.overlay.drain_storage_changes( - &self.backend, - #[cfg(feature = "std")] - None, - Default::default(), - self.storage_transaction_cache, - ).expect(EXT_NOT_ALLOWED_TO_FAIL); - self.backend.commit( - changes.transaction_storage_root, - changes.transaction, - changes.main_storage_changes, - changes.child_storage_changes, - ).expect(EXT_NOT_ALLOWED_TO_FAIL); + let changes = self + .overlay + .drain_storage_changes( + self.backend, + #[cfg(feature = "std")] + None, + Default::default(), + self.storage_transaction_cache, + ) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.backend + .commit( + changes.transaction_storage_root, + changes.transaction, + changes.main_storage_changes, + changes.child_storage_changes, + ) + .expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); self.overlay .enter_runtime() @@ -668,6 +770,65 @@ where fn set_whitelist(&mut self, new: Vec) { self.backend.set_whitelist(new) } + + fn proof_size(&self) -> Option { + self.backend.proof_size() + } + + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { + self.backend.get_read_and_written_keys() + } +} + +impl<'a, H, N, B> Ext<'a, H, N, B> +where + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + B: Backend, + N: crate::changes_trie::BlockNumber, +{ + fn limit_remove_from_backend( + &mut self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + limit: Option, + ) -> (bool, u32) { + let mut num_deleted: u32 = 0; + + if let Some(limit) = limit { + let mut all_deleted = true; + self.backend.apply_to_keys_while(child_info, prefix, |key| { + if num_deleted == limit { + all_deleted = false; + return false + } + if let Some(num) = num_deleted.checked_add(1) { + num_deleted = num; + } else { + all_deleted = false; + return false + } + if let Some(child_info) = child_info { + self.overlay.set_child_storage(child_info, key.to_vec(), None); + } else { + self.overlay.set_storage(key.to_vec(), None); + } + true + }); + (all_deleted, num_deleted) + } else { + self.backend.apply_to_keys_while(child_info, prefix, |key| { + num_deleted = num_deleted.saturating_add(1); + if let Some(child_info) = child_info { + self.overlay.set_child_storage(child_info, key.to_vec(), None); + } else { + self.overlay.set_storage(key.to_vec(), None); + } + true + }); + (true, num_deleted) + } + } } /// Implement `Encode` by forwarding the stored raw vec. @@ -704,7 +865,7 @@ impl<'a> StorageAppend<'a> { "Failed to append value, resetting storage item to `[value]`.", ); value.encode() - } + }, }; } } @@ -760,7 +921,10 @@ where } } - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { if let Some(ref mut extensions) = self.extensions { if extensions.deregister(type_id) { Ok(()) @@ -776,25 +940,19 @@ where #[cfg(test)] mod tests { use super::*; + use crate::{ + changes_trie::{ + Configuration as ChangesTrieConfiguration, InMemoryStorage as TestChangesTrieStorage, + }, + InMemoryBackend, + }; + use codec::Encode; use hex_literal::hex; use num_traits::Zero; - use codec::Encode; use sp_core::{ - H256, - Blake2Hasher, map, - offchain, - storage::{ - Storage, - StorageChild, - well_known_keys::EXTRINSIC_INDEX, - }, - }; - use crate::{ - changes_trie::{ - Configuration as ChangesTrieConfiguration, - InMemoryStorage as TestChangesTrieStorage, - }, InMemoryBackend, + storage::{well_known_keys::EXTRINSIC_INDEX, Storage, StorageChild}, + Blake2Hasher, H256, }; type TestBackend = InMemoryBackend; @@ -806,52 +964,41 @@ mod tests { changes.set_extrinsic_index(1); changes.set_storage(vec![1], Some(vec![100])); changes.set_storage(EXTRINSIC_INDEX.to_vec(), Some(3u32.encode())); + changes.set_offchain_storage(b"k1", Some(b"v1")); + changes.set_offchain_storage(b"k2", Some(b"v2")); changes } - fn prepare_offchain_overlay_with_changes() -> OffchainOverlayedChanges { - let mut ooc = OffchainOverlayedChanges::enabled(); - ooc.set(offchain::STORAGE_PREFIX, b"k1", b"v1"); - ooc.set(offchain::STORAGE_PREFIX, b"k2", b"v2"); - ooc - } - fn changes_trie_config() -> ChangesTrieConfiguration { - ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, - } + ChangesTrieConfiguration { digest_interval: 0, digest_levels: 0 } } #[test] fn storage_changes_root_is_none_when_storage_is_not_provided() { let mut overlay = prepare_overlay_with_changes(); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); } #[test] fn storage_changes_root_is_none_when_state_is_not_provided() { let mut overlay = prepare_overlay_with_changes(); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); } #[test] fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { let mut overlay = prepare_overlay_with_changes(); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, state, None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); assert_eq!( ext.storage_changes_root(&H256::default().encode()).unwrap(), Some(hex!("bb0c2ef6e1d36d5490f9766cfcc7dfe2a6ca804504c3bb206053890d6dd02376").to_vec()), @@ -861,14 +1008,13 @@ mod tests { #[test] fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() { let mut overlay = prepare_overlay_with_changes(); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let mut cache = StorageTransactionCache::default(); overlay.set_collect_extrinsics(false); overlay.set_storage(vec![1], None); let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, state, None); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); assert_eq!( ext.storage_changes_root(&H256::default().encode()).unwrap(), Some(hex!("96f5aae4690e7302737b6f9b7f8567d5bbb9eac1c315f80101235a92d9ec27f4").to_vec()), @@ -881,17 +1027,17 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_storage(vec![20], None); overlay.set_storage(vec![30], Some(vec![31])); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let backend = Storage { top: map![ vec![10] => vec![10], vec![20] => vec![20], vec![40] => vec![40] ], - children_default: map![] - }.into(); + children_default: map![], + } + .into(); - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); @@ -907,12 +1053,41 @@ mod tests { drop(ext); overlay.set_storage(vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); } + #[test] + fn next_storage_key_works_with_a_lot_empty_values_in_overlay() { + let mut cache = StorageTransactionCache::default(); + let mut overlay = OverlayedChanges::default(); + overlay.set_storage(vec![20], None); + overlay.set_storage(vec![21], None); + overlay.set_storage(vec![22], None); + overlay.set_storage(vec![23], None); + overlay.set_storage(vec![24], None); + overlay.set_storage(vec![25], None); + overlay.set_storage(vec![26], None); + overlay.set_storage(vec![27], None); + overlay.set_storage(vec![28], None); + overlay.set_storage(vec![29], None); + let backend = Storage { + top: map![ + vec![30] => vec![30] + ], + children_default: map![], + } + .into(); + + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + + assert_eq!(ext.next_storage_key(&[5]), Some(vec![30])); + + drop(ext); + } + #[test] fn next_child_storage_key_works() { let child_info = ChildInfo::new_default(b"Child1"); @@ -934,12 +1109,10 @@ mod tests { child_info: child_info.to_owned(), } ], - }.into(); - - - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); + } + .into(); - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); @@ -955,7 +1128,7 @@ mod tests { drop(ext); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); @@ -969,7 +1142,6 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); - let mut offchain_overlay = prepare_offchain_overlay_with_changes(); let backend = Storage { top: map![], children_default: map![ @@ -982,9 +1154,10 @@ mod tests { child_info: child_info.to_owned(), } ], - }.into(); + } + .into(); - let ext = TestExt::new(&mut overlay, &mut offchain_overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( @@ -993,10 +1166,7 @@ mod tests { ); assert_eq!(ext.child_storage(child_info, &[20]), None); - assert_eq!( - ext.child_storage_hash(child_info, &[20]), - None, - ); + assert_eq!(ext.child_storage_hash(child_info, &[20]), None); assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![31])); assert_eq!( @@ -1005,6 +1175,45 @@ mod tests { ); } + #[test] + fn clear_prefix_cannot_delete_a_child_root() { + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; + let mut cache = StorageTransactionCache::default(); + let mut overlay = OverlayedChanges::default(); + let backend = Storage { + top: map![], + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + vec![30] => vec![40] + ], + child_info: child_info.to_owned(), + } + ], + } + .into(); + + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + + use sp_core::storage::well_known_keys; + let mut ext = ext; + let mut not_under_prefix = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec(); + not_under_prefix[4] = 88; + not_under_prefix.extend(b"path"); + ext.set_storage(not_under_prefix.clone(), vec![10]); + + ext.clear_prefix(&[], None); + ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None); + let mut under_prefix = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec(); + under_prefix.extend(b"path"); + ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None); + assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![40])); + assert_eq!(ext.storage(not_under_prefix.as_slice()), Some(vec![10])); + ext.clear_prefix(¬_under_prefix[..5], None); + assert_eq!(ext.storage(not_under_prefix.as_slice()), None); + } + #[test] fn storage_append_works() { let mut data = Vec::new(); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index f211f60202730..f9f94c0c50d60 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -1,62 +1,30 @@ -// Copyright 2017-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! State machine in memory backend. use crate::{ - StorageKey, StorageValue, StorageCollection, - trie_backend::TrieBackend, -}; -use std::{collections::{BTreeMap, HashMap}}; -use hash_db::Hasher; -use sp_trie::{ - MemoryDB, TrieMut, - trie_types::TrieDBMut, + backend::Backend, trie_backend::TrieBackend, StorageCollection, StorageKey, StorageValue, }; use codec::Codec; +use hash_db::Hasher; use sp_core::storage::{ChildInfo, Storage}; - -/// Insert input pairs into memory db. -fn insert_into_memory_db(mut root: H::Out, mdb: &mut MemoryDB, input: I) -> H::Out -where - H: Hasher, - I: IntoIterator)>, -{ - { - let mut trie = if root == Default::default() { - TrieDBMut::::new(mdb, &mut root) - } else { - TrieDBMut::::from_existing(mdb, &mut root).unwrap() - }; - for (key, value) in input { - if let Err(e) = match value { - Some(value) => { - trie.insert(&key, &value) - }, - None => { - trie.remove(&key) - }, - } { - panic!("Failed to write to trie: {}", e); - } - } - trie.commit(); - } - root -} +use sp_trie::{empty_trie_root, Layout, MemoryDB}; +use std::collections::{BTreeMap, HashMap}; /// Create a new empty instance of in-memory backend. pub fn new_in_mem() -> TrieBackend, H> @@ -64,9 +32,7 @@ where H::Out: Codec + Ord, { let db = MemoryDB::default(); - let mut backend = TrieBackend::new(db, Default::default()); - backend.insert(std::iter::empty()); - backend + TrieBackend::new(db, empty_trie_root::>()) } impl TrieBackend, H> @@ -74,9 +40,7 @@ where H::Out: Codec + Ord, { /// Copy the state, with applied updates - pub fn update< - T: IntoIterator, StorageCollection)> - >( + pub fn update, StorageCollection)>>( &self, changes: T, ) -> Self { @@ -86,38 +50,19 @@ where } /// Insert values into backend trie. - pub fn insert< - T: IntoIterator, StorageCollection)> - >( + pub fn insert, StorageCollection)>>( &mut self, changes: T, ) { - let mut new_child_roots = Vec::new(); - let mut root_map = None; - let root = self.root().clone(); - for (child_info, map) in changes { - if let Some(child_info) = child_info.as_ref() { - let prefix_storage_key = child_info.prefixed_storage_key(); - let ch = insert_into_memory_db::(root, self.backend_storage_mut(), map.clone().into_iter()); - new_child_roots.push((prefix_storage_key.into_inner(), Some(ch.as_ref().into()))); - } else { - root_map = Some(map); - } - } + let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); + let (root, transaction) = self.full_storage_root( + top.iter().map(|(_, v)| v).flatten().map(|(k, v)| (&k[..], v.as_deref())), + child.iter().filter_map(|v| { + v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) + }), + ); - let root = match root_map { - Some(map) => insert_into_memory_db::( - root, - self.backend_storage_mut(), - map.into_iter().chain(new_child_roots.into_iter()), - ), - None => insert_into_memory_db::( - root, - self.backend_storage_mut(), - new_child_roots.into_iter(), - ), - }; - self.essence.set_root(root); + self.apply_transaction(root, transaction); } /// Merge trie nodes into this backend. @@ -127,6 +72,13 @@ where Self::new(clone, root) } + /// Apply the given transaction to this backend and set the root to the given value. + pub fn apply_transaction(&mut self, root: H::Out, transaction: MemoryDB) { + let mut storage = sp_std::mem::take(self).into_storage(); + storage.consolidate(transaction); + *self = TrieBackend::new(storage, root); + } + /// Compare with another in-memory backend. pub fn eq(&self, other: &Self) -> bool { self.root() == other.root() @@ -158,7 +110,11 @@ where { fn from(inner: HashMap, BTreeMap>) -> Self { let mut backend = new_in_mem(); - backend.insert(inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect()))); + backend.insert( + inner + .into_iter() + .map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), + ); backend } } @@ -168,8 +124,11 @@ where H::Out: Codec + Ord, { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> - = inners.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); + let mut inner: HashMap, BTreeMap> = inners + .children_default + .into_iter() + .map(|(_k, c)| (Some(c.child_info), c.data)) + .collect(); inner.insert(None, inners.top); inner.into() } @@ -186,16 +145,13 @@ where } } -impl From, StorageCollection)>> - for TrieBackend, H> +impl From, StorageCollection)>> for TrieBackend, H> where H::Out: Codec + Ord, { - fn from( - inner: Vec<(Option, StorageCollection)>, - ) -> Self { - let mut expanded: HashMap, BTreeMap> - = HashMap::new(); + fn from(inner: Vec<(Option, StorageCollection)>) -> Self { + let mut expanded: HashMap, BTreeMap> = + HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); for (key, value) in key_values { @@ -211,8 +167,8 @@ where #[cfg(test)] mod tests { use super::*; - use sp_runtime::traits::BlakeTwo256; use crate::backend::Backend; + use sp_runtime::traits::BlakeTwo256; /// Assert in memory backend with only child trie keys works as trie backend. #[test] @@ -220,16 +176,25 @@ mod tests { let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; - let mut storage = storage.update( - vec![( - Some(child_info.clone()), - vec![(b"2".to_vec(), Some(b"3".to_vec()))] - )] - ); + let storage = storage + .update(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), - Some(b"3".to_vec())); + assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); } + + #[test] + fn insert_multiple_times_child_data_works() { + let mut storage = new_in_mem::(); + let child_info = ChildInfo::new_default(b"1"); + + storage + .insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); + storage + .insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])]); + + assert_eq!(storage.child_storage(&child_info, &b"2"[..]), Ok(Some(b"3".to_vec()))); + assert_eq!(storage.child_storage(&child_info, &b"1"[..]), Ok(Some(b"3".to_vec()))); + } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 28148b6411a13..05d2c6d20ccee 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,23 +22,23 @@ pub mod backend; #[cfg(feature = "std")] -mod in_memory_backend; +mod basic; #[cfg(feature = "std")] mod changes_trie; mod error; mod ext; #[cfg(feature = "std")] -mod testing; -#[cfg(feature = "std")] -mod basic; +mod in_memory_backend; pub(crate) mod overlayed_changes; #[cfg(feature = "std")] mod proving_backend; -mod trie_backend; -mod trie_backend_essence; -mod stats; #[cfg(feature = "std")] mod read_only; +mod stats; +#[cfg(feature = "std")] +mod testing; +mod trie_backend; +mod trie_backend_essence; #[cfg(feature = "std")] pub use std_reexport::*; @@ -46,19 +46,21 @@ pub use std_reexport::*; #[cfg(feature = "std")] pub use execution::*; #[cfg(feature = "std")] -pub use log::{debug, warn, trace, error as log_error}; +pub use log::{debug, error as log_error, warn}; +#[cfg(feature = "std")] +pub use tracing::trace; /// In no_std we skip logs for state_machine, this macro /// is a noops. #[cfg(not(feature = "std"))] #[macro_export] macro_rules! warn { - (target: $target:expr, $($arg:tt)+) => ( + (target: $target:expr, $($arg:tt)+) => { () - ); - ($($arg:tt)+) => ( + }; + ($($arg:tt)+) => { () - ); + }; } /// In no_std we skip logs for state_machine, this macro @@ -66,12 +68,12 @@ macro_rules! warn { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! debug { - (target: $target:expr, $($arg:tt)+) => ( + (target: $target:expr, $($arg:tt)+) => { () - ); - ($($arg:tt)+) => ( + }; + ($($arg:tt)+) => { () - ); + }; } /// In no_std we skip logs for state_machine, this macro @@ -79,12 +81,12 @@ macro_rules! debug { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! trace { - (target: $target:expr, $($arg:tt)+) => ( + (target: $target:expr, $($arg:tt)+) => { () - ); - ($($arg:tt)+) => ( + }; + ($($arg:tt)+) => { () - ); + }; } /// In no_std we skip logs for state_machine, this macro @@ -92,12 +94,12 @@ macro_rules! trace { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! log_error { - (target: $target:expr, $($arg:tt)+) => ( + (target: $target:expr, $($arg:tt)+) => { () - ); - ($($arg:tt)+) => ( + }; + ($($arg:tt)+) => { () - ); + }; } /// Default error type to use with state machine trie backend. @@ -115,17 +117,19 @@ impl sp_std::fmt::Display for DefaultError { } } -pub use crate::overlayed_changes::{ - OverlayedChanges, StorageKey, StorageValue, - StorageCollection, ChildStorageCollection, - StorageChanges, StorageTransactionCache, +pub use crate::{ + backend::Backend, + ext::Ext, + overlayed_changes::{ + ChildStorageCollection, IndexOperation, OffchainChangesCollection, + OffchainOverlayedChanges, OverlayedChanges, StorageChanges, StorageCollection, StorageKey, + StorageTransactionCache, StorageValue, + }, + stats::{StateMachineStats, UsageInfo, UsageUnit}, + trie_backend::TrieBackend, + trie_backend_essence::{Storage, TrieBackendStorage}, }; -pub use crate::backend::Backend; -pub use crate::trie_backend_essence::{TrieBackendStorage, Storage}; -pub use crate::trie_backend::TrieBackend; -pub use crate::stats::{UsageInfo, UsageUnit, StateMachineStats}; pub use error::{Error, ExecutionError}; -pub use crate::ext::Ext; #[cfg(not(feature = "std"))] mod changes_trie { @@ -138,46 +142,45 @@ mod changes_trie { #[cfg(feature = "std")] mod std_reexport { - pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, DBValue, MemoryDB}; - pub use crate::testing::TestExternalities; - pub use crate::basic::BasicExternalities; - pub use crate::read_only::{ReadOnlyExternalities, InspectState}; - pub use crate::changes_trie::{ - AnchorBlockId as ChangesTrieAnchorBlockId, - State as ChangesTrieState, - Storage as ChangesTrieStorage, - RootsStorage as ChangesTrieRootsStorage, - InMemoryStorage as InMemoryChangesTrieStorage, - BuildCache as ChangesTrieBuildCache, - CacheAction as ChangesTrieCacheAction, - ConfigurationRange as ChangesTrieConfigurationRange, - key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, - prune as prune_changes_tries, - disabled_state as disabled_changes_trie_state, - BlockNumber as ChangesTrieBlockNumber, + pub use crate::{ + basic::BasicExternalities, + changes_trie::{ + disabled_state as disabled_changes_trie_state, key_changes, key_changes_proof, + key_changes_proof_check, key_changes_proof_check_with_db, prune as prune_changes_tries, + AnchorBlockId as ChangesTrieAnchorBlockId, BlockNumber as ChangesTrieBlockNumber, + BuildCache as ChangesTrieBuildCache, CacheAction as ChangesTrieCacheAction, + ConfigurationRange as ChangesTrieConfigurationRange, + InMemoryStorage as InMemoryChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, + State as ChangesTrieState, Storage as ChangesTrieStorage, + }, + error::{Error, ExecutionError}, + in_memory_backend::new_in_mem, + proving_backend::{ + create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, + }, + read_only::{InspectState, ReadOnlyExternalities}, + testing::TestExternalities, }; - pub use crate::proving_backend::{ - create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, + pub use sp_trie::{ + trie_types::{Layout, TrieDBMut}, + DBValue, MemoryDB, StorageProof, TrieMut, }; - pub use crate::error::{Error, ExecutionError}; - pub use crate::in_memory_backend::new_in_mem; } #[cfg(feature = "std")] mod execution { use super::*; - use std::{fmt, result, collections::HashMap, panic::UnwindSafe}; - use log::{warn, trace}; + use codec::{Codec, Decode, Encode}; use hash_db::Hasher; - use codec::{Decode, Encode, Codec}; use sp_core::{ - offchain::storage::OffchainOverlayedChanges, - storage::ChildInfo, NativeOrEncoded, NeverNativeValue, hexdisplay::HexDisplay, - traits::{CodeExecutor, CallInWasmExt, RuntimeCode, SpawnNamed}, + hexdisplay::HexDisplay, + storage::ChildInfo, + traits::{CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed}, + NativeOrEncoded, NeverNativeValue, }; use sp_externalities::Extensions; - + use std::{collections::HashMap, fmt, panic::UnwindSafe, result}; + use tracing::{trace, warn}; const PROOF_CLOSE_TRANSACTION: &str = "\ Closing a transaction that was started in this function. Client initiated transactions @@ -189,10 +192,8 @@ mod execution { pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; /// Type of changes trie transaction. - pub type ChangesTrieTransaction = ( - MemoryDB, - ChangesTrieCacheAction<::Out, N>, - ); + pub type ChangesTrieTransaction = + (MemoryDB, ChangesTrieCacheAction<::Out, N>); /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; @@ -205,7 +206,8 @@ mod execution { NativeWhenPossible, /// Use the given wasm module. AlwaysWasm, - /// Run with both the wasm and the native variant (if compatible). Report any discrepancy as an error. + /// Run with both the wasm and the native variant (if compatible). Report any discrepancy + /// as an error. Both, /// First native, then if that fails or is not possible, wasm. NativeElseWasm, @@ -229,10 +231,12 @@ mod execution { /// otherwise fall back to the wasm. NativeWhenPossible, /// Use the given wasm module. The backend on which code is executed code could be - /// trusted to provide all storage or not (i.e. the light client cannot be trusted to provide - /// for all storage queries since the storage entries it has come from an external node). + /// trusted to provide all storage or not (i.e. the light client cannot be trusted to + /// provide for all storage queries since the storage entries it has come from an external + /// node). AlwaysWasm(BackendTrustLevel), - /// Run with both the wasm and the native variant (if compatible). Call `F` in the case of any discrepancy. + /// Run with both the wasm and the native variant (if compatible). Call `F` in the case of + /// any discrepancy. Both(F), /// First native, then if that fails or is not possible, wasm. NativeElseWasm, @@ -255,14 +259,14 @@ mod execution { self, ) -> ExecutionManager> { match self { - ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), + ExecutionStrategy::AlwaysWasm => + ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { warn!( "Consensus error between wasm {:?} and native {:?}. Using wasm.", - wasm_result, - native_result, + wasm_result, native_result, ); warn!(" Native result {:?}", native_result); warn!(" Wasm result {:?}", wasm_result); @@ -277,37 +281,43 @@ mod execution { ExecutionManager::NativeElseWasm } - /// Evaluate to ExecutionManager::AlwaysWasm with trusted backend, without having to figure out the type. + /// Evaluate to ExecutionManager::AlwaysWasm with trusted backend, without having to figure out + /// the type. fn always_wasm() -> ExecutionManager> { ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) } - /// Evaluate ExecutionManager::AlwaysWasm with untrusted backend, without having to figure out the type. + /// Evaluate ExecutionManager::AlwaysWasm with untrusted backend, without having to figure out + /// the type. fn always_untrusted_wasm() -> ExecutionManager> { ExecutionManager::AlwaysWasm(BackendTrustLevel::Untrusted) } /// The substrate state machine. pub struct StateMachine<'a, B, H, N, Exec> - where - H: Hasher, - B: Backend, - N: ChangesTrieBlockNumber, + where + H: Hasher, + B: Backend, + N: ChangesTrieBlockNumber, { backend: &'a B, exec: &'a Exec, method: &'a str, call_data: &'a [u8], overlay: &'a mut OverlayedChanges, - offchain_overlay: &'a mut OffchainOverlayedChanges, extensions: Extensions, changes_trie_state: Option>, storage_transaction_cache: Option<&'a mut StorageTransactionCache>, runtime_code: &'a RuntimeCode<'a>, stats: StateMachineStats, + /// The hash of the block the state machine will be executed on. + /// + /// Used for logging. + parent_hash: Option, } - impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> where + impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> + where H: Hasher, B: Backend, N: ChangesTrieBlockNumber, @@ -317,7 +327,8 @@ mod execution { } } - impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where + impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> + where H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, @@ -329,7 +340,6 @@ mod execution { backend: &'a B, changes_trie_state: Option>, overlay: &'a mut OverlayedChanges, - offchain_overlay: &'a mut OffchainOverlayedChanges, exec: &'a Exec, method: &'a str, call_data: &'a [u8], @@ -337,7 +347,7 @@ mod execution { runtime_code: &'a RuntimeCode, spawn_handle: impl SpawnNamed + Send + 'static, ) -> Self { - extensions.register(CallInWasmExt::new(exec.clone())); + extensions.register(ReadRuntimeVersionExt::new(exec.clone())); extensions.register(sp_core::traits::TaskExecutorExt::new(spawn_handle)); Self { @@ -347,11 +357,11 @@ mod execution { call_data, extensions, overlay, - offchain_overlay, changes_trie_state, storage_transaction_cache: None, runtime_code, stats: StateMachineStats::default(), + parent_hash: None, } } @@ -368,6 +378,14 @@ mod execution { self } + /// Set the given `parent_hash` as the hash of the parent block. + /// + /// This will be used for improved logging. + pub fn set_parent_hash(mut self, parent_hash: H::Out) -> Self { + self.parent_hash = Some(parent_hash); + self + } + /// Execute a call using the given state backend, overlayed changes, and call executor. /// /// On an error, no prospective changes are written to the overlay. @@ -377,24 +395,24 @@ mod execution { /// /// Returns the SCALE encoded result of the executed function. pub fn execute(&mut self, strategy: ExecutionStrategy) -> Result, Box> { - // We are not giving a native call and thus we are sure that the result can never be a native - // value. + // We are not giving a native call and thus we are sure that the result can never be a + // native value. self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( strategy.get_manager(), None, - ).map(NativeOrEncoded::into_encoded) + ) + .map(NativeOrEncoded::into_encoded) } fn execute_aux( &mut self, use_native: bool, native_call: Option, - ) -> ( - CallResult, - bool, - ) where + ) -> (CallResult, bool) + where R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result> + + UnwindSafe, { let mut cache = StorageTransactionCache::default(); @@ -403,24 +421,27 @@ mod execution { None => &mut cache, }; - self.overlay.enter_runtime().expect("StateMachine is never called from the runtime; qed"); + self.overlay + .enter_runtime() + .expect("StateMachine is never called from the runtime; qed"); let mut ext = Ext::new( self.overlay, - self.offchain_overlay, cache, self.backend, self.changes_trie_state.clone(), Some(&mut self.extensions), ); - let id = ext.id; + let ext_id = ext.id; + trace!( - target: "state", "{:04x}: Call {} at {:?}. Input={:?}", - id, - self.method, - self.backend, - HexDisplay::from(&self.call_data), + target: "state", + ext_id = %HexDisplay::from(&ext_id.to_le_bytes()), + method = %self.method, + parent_hash = %self.parent_hash.map(|h| format!("{:?}", h)).unwrap_or_else(|| String::from("None")), + input = ?HexDisplay::from(&self.call_data), + "Call", ); let (result, was_native) = self.exec.call( @@ -432,14 +453,16 @@ mod execution { native_call, ); - self.overlay.exit_runtime() + self.overlay + .exit_runtime() .expect("Runtime is not able to call this function in the overlay; qed"); trace!( - target: "state", "{:04x}: Return. Native={:?}, Result={:?}", - id, - was_native, - result, + target: "state", + ext_id = %HexDisplay::from(&ext_id.to_le_bytes()), + ?was_native, + ?result, + "Return", ); (result, was_native) @@ -450,27 +473,25 @@ mod execution { mut native_call: Option, on_consensus_failure: Handler, ) -> CallResult - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, { self.overlay.start_transaction(); let (result, was_native) = self.execute_aux(true, native_call.take()); if was_native { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux( - false, - native_call, - ); + let (wasm_result, _) = self.execute_aux(false, native_call); - if (result.is_ok() && wasm_result.is_ok() - && result.as_ref().ok() == wasm_result.as_ref().ok()) - || result.is_err() && wasm_result.is_err() + if (result.is_ok() && + wasm_result.is_ok() && result.as_ref().ok() == wasm_result.as_ref().ok()) || + result.is_err() && wasm_result.is_err() { result } else { @@ -486,25 +507,20 @@ mod execution { &mut self, mut native_call: Option, ) -> CallResult - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, { self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux( - true, - native_call.take(), - ); + let (result, was_native) = self.execute_aux(true, native_call.take()); if !was_native || result.is_ok() { self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); result } else { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux( - false, - native_call, - ); + let (wasm_result, _) = self.execute_aux(false, native_call); wasm_result } } @@ -523,40 +539,33 @@ mod execution { manager: ExecutionManager, mut native_call: Option, ) -> Result, Box> - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, { let changes_tries_enabled = self.changes_trie_state.is_some(); self.overlay.set_collect_extrinsics(changes_tries_enabled); let result = { match manager { - ExecutionManager::Both(on_consensus_failure) => { - self.execute_call_with_both_strategy( - native_call.take(), - on_consensus_failure, - ) - }, - ExecutionManager::NativeElseWasm => { - self.execute_call_with_native_else_wasm_strategy( - native_call.take(), - ) - }, + ExecutionManager::Both(on_consensus_failure) => self + .execute_call_with_both_strategy(native_call.take(), on_consensus_failure), + ExecutionManager::NativeElseWasm => + self.execute_call_with_native_else_wasm_strategy(native_call.take()), ExecutionManager::AlwaysWasm(trust_level) => { let _abort_guard = match trust_level { BackendTrustLevel::Trusted => None, - BackendTrustLevel::Untrusted => Some(sp_panic_handler::AbortGuard::never_abort()), + BackendTrustLevel::Untrusted => + Some(sp_panic_handler::AbortGuard::never_abort()), }; self.execute_aux(false, native_call).0 }, - ExecutionManager::NativeWhenPossible => { - self.execute_aux(true, native_call).0 - }, + ExecutionManager::NativeWhenPossible => self.execute_aux(true, native_call).0, } }; @@ -566,7 +575,7 @@ mod execution { /// Prove execution using the given state backend, overlayed changes, and call executor. pub fn prove_execution( - mut backend: B, + backend: &mut B, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Spawn, @@ -582,7 +591,8 @@ mod execution { N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let trie_backend = backend.as_trie_backend() + let trie_backend = backend + .as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_execution_on_trie_backend::<_, _, N, _, _>( trie_backend, @@ -621,13 +631,11 @@ mod execution { N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let mut offchain_overlay = OffchainOverlayedChanges::default(); let proving_backend = proving_backend::ProvingBackend::new(trie_backend); let mut sm = StateMachine::<_, H, N, Exec>::new( &proving_backend, None, overlay, - &mut offchain_overlay, exec, method, call_data, @@ -691,12 +699,10 @@ mod execution { N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut sm = StateMachine::<_, H, N, Exec>::new( trie_backend, None, overlay, - &mut offchain_overlay, exec, method, call_data, @@ -708,14 +714,12 @@ mod execution { sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( always_untrusted_wasm(), None, - ).map(NativeOrEncoded::into_encoded) + ) + .map(NativeOrEncoded::into_encoded) } /// Generate storage read proof. - pub fn prove_read( - mut backend: B, - keys: I, - ) -> Result> + pub fn prove_read(backend: B, keys: I) -> Result> where B: Backend, H: Hasher, @@ -723,16 +727,74 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() - .ok_or_else( - || Box::new(ExecutionError::UnableToGenerateProof) as Box - )?; + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_read_on_trie_backend(trie_backend, keys) } + /// Generate range storage read proof. + pub fn prove_range_read_with_size( + backend: B, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + size_limit: usize, + start_at: Option<&[u8]>, + ) -> Result<(StorageProof, u32), Box> + where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + { + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_range_read_with_size_on_trie_backend( + trie_backend, + child_info, + prefix, + size_limit, + start_at, + ) + } + + /// Generate range storage read proof on an existing trie backend. + pub fn prove_range_read_with_size_on_trie_backend( + trie_backend: &TrieBackend, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + size_limit: usize, + start_at: Option<&[u8]>, + ) -> Result<(StorageProof, u32), Box> + where + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + { + let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); + let mut count = 0; + proving_backend + .apply_to_key_values_while( + child_info, + prefix, + start_at, + |_key, _value| { + if count == 0 || proving_backend.estimate_encoded_size() <= size_limit { + count += 1; + true + } else { + false + } + }, + false, + ) + .map_err(|e| Box::new(e) as Box)?; + Ok((proving_backend.extract_proof(), count)) + } + /// Generate child storage read proof. pub fn prove_child_read( - mut backend: B, + backend: B, child_info: &ChildInfo, keys: I, ) -> Result> @@ -743,7 +805,8 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() + let trie_backend = backend + .as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_child_read_on_trie_backend(trie_backend, child_info, keys) } @@ -812,6 +875,29 @@ mod execution { Ok(result) } + /// Check child storage range proof, generated by `prove_range_read` call. + pub fn read_range_proof_check( + root: H::Out, + proof: StorageProof, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + count: Option, + start_at: Option<&[u8]>, + ) -> Result<(Vec<(Vec, Vec)>, bool), Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + let proving_backend = create_proof_check_backend::(root, proof)?; + read_range_proof_check_on_proving_backend( + &proving_backend, + child_info, + prefix, + count, + start_at, + ) + } + /// Check child storage read proof, generated by `prove_child_read` call. pub fn read_child_proof_check( root: H::Out, @@ -860,31 +946,62 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - proving_backend.child_storage(child_info, key) + proving_backend + .child_storage(child_info, key) .map_err(|e| Box::new(e) as Box) } + + /// Check storage range proof on pre-created proving backend. + /// + /// Returns a vector with the read `key => value` pairs and a `bool` that is set to `true` when + /// all `key => value` pairs could be read and no more are left. + pub fn read_range_proof_check_on_proving_backend( + proving_backend: &TrieBackend, H>, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + count: Option, + start_at: Option<&[u8]>, + ) -> Result<(Vec<(Vec, Vec)>, bool), Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + let mut values = Vec::new(); + let result = proving_backend.apply_to_key_values_while( + child_info, + prefix, + start_at, + |key, value| { + values.push((key.to_vec(), value.to_vec())); + count.as_ref().map_or(true, |c| (values.len() as u32) < *c) + }, + true, + ); + match result { + Ok(completed) => Ok((values, completed)), + Err(e) => Err(Box::new(e) as Box), + } + } } #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use codec::Encode; - use super::*; - use super::ext::Ext; - use super::changes_trie::Configuration as ChangesTrieConfig; + use super::{changes_trie::Configuration as ChangesTrieConfig, ext::Ext, *}; + use crate::execution::CallResult; + use codec::{Decode, Encode}; use sp_core::{ - map, traits::{Externalities, RuntimeCode}, testing::TaskExecutor, + map, + storage::ChildInfo, + testing::TaskExecutor, + traits::{CodeExecutor, Externalities, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, }; use sp_runtime::traits::BlakeTwo256; - use std::{result, collections::HashMap}; - use codec::Decode; - use sp_core::{ - offchain::storage::OffchainOverlayedChanges, - storage::ChildInfo, NativeOrEncoded, NeverNativeValue, - traits::CodeExecutor, + use std::{ + collections::{BTreeMap, HashMap}, + panic::UnwindSafe, + result, }; - use crate::execution::CallResult; - #[derive(Clone)] struct DummyCodeExecutor { @@ -899,7 +1016,7 @@ mod tests { fn call< R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result, + NC: FnOnce() -> result::Result> + UnwindSafe, >( &self, ext: &mut dyn Externalities, @@ -912,12 +1029,7 @@ mod tests { if self.change_changes_trie_config { ext.place_storage( sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), - Some( - ChangesTrieConfig { - digest_interval: 777, - digest_levels: 333, - }.encode() - ) + Some(ChangesTrieConfig { digest_interval: 777, digest_levels: 333 }.encode()), ); } @@ -925,38 +1037,24 @@ mod tests { match (using_native, self.native_succeeds, self.fallback_succeeds, native_call) { (true, true, _, Some(call)) => { let res = sp_externalities::set_and_run_with_externalities(ext, || call()); - ( - res.map(NativeOrEncoded::Native).map_err(|_| 0), - true - ) - }, - (true, true, _, None) | (false, _, true, None) => { - ( - Ok( - NativeOrEncoded::Encoded( - vec![ - ext.storage(b"value1").unwrap()[0] + - ext.storage(b"value2").unwrap()[0] - ] - ) - ), - using_native - ) + (res.map(NativeOrEncoded::Native).map_err(|_| 0), true) }, + (true, true, _, None) | (false, _, true, None) => ( + Ok(NativeOrEncoded::Encoded(vec![ + ext.storage(b"value1").unwrap()[0] + ext.storage(b"value2").unwrap()[0], + ])), + using_native, + ), _ => (Err(0), using_native), } } } - impl sp_core::traits::CallInWasm for DummyCodeExecutor { - fn call_in_wasm( + impl sp_core::traits::ReadRuntimeVersion for DummyCodeExecutor { + fn read_runtime_version( &self, _: &[u8], - _: Option>, - _: &str, - _: &[u8], _: &mut dyn Externalities, - _: sp_core::traits::MissingHostFunctions, ) -> std::result::Result, String> { unimplemented!("Not required in tests.") } @@ -966,14 +1064,12 @@ mod tests { fn execute_works() { let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let mut offchain_overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); let mut state_machine = StateMachine::new( &backend, changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, - &mut offchain_overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -987,25 +1083,19 @@ mod tests { TaskExecutor::new(), ); - assert_eq!( - state_machine.execute(ExecutionStrategy::NativeWhenPossible).unwrap(), - vec![66], - ); + assert_eq!(state_machine.execute(ExecutionStrategy::NativeWhenPossible).unwrap(), vec![66]); } - #[test] fn execute_works_with_native_else_wasm() { let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let mut offchain_overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); let mut state_machine = StateMachine::new( &backend, changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, - &mut offchain_overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1027,14 +1117,12 @@ mod tests { let mut consensus_failed = false; let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let mut offchain_overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); let mut state_machine = StateMachine::new( &backend, changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, - &mut offchain_overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1048,15 +1136,15 @@ mod tests { TaskExecutor::new(), ); - assert!( - state_machine.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + assert!(state_machine + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( ExecutionManager::Both(|we, _ne| { consensus_failed = true; we }), None, - ).is_err() - ); + ) + .is_err()); assert!(consensus_failed); } @@ -1070,17 +1158,18 @@ mod tests { }; // fetch execution proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); + let mut remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(std::iter::empty()).0; let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( - remote_backend, + &mut remote_backend, &mut Default::default(), &executor, TaskExecutor::new(), "test", &[], &RuntimeCode::empty(), - ).unwrap(); + ) + .unwrap(); // check proof locally let local_result = execution_proof_check::( @@ -1092,7 +1181,8 @@ mod tests { "test", &[], &RuntimeCode::empty(), - ).unwrap(); + ) + .unwrap(); // check that both results are correct assert_eq!(remote_result, vec![66]); @@ -1107,7 +1197,7 @@ mod tests { b"abc".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"3".to_vec() ]; - let mut state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from(initial); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1117,23 +1207,24 @@ mod tests { overlay.set_storage(b"abd".to_vec(), Some(b"69".to_vec())); overlay.set_storage(b"bbd".to_vec(), Some(b"42".to_vec())); + let overlay_limit = overlay.clone(); { - let mut offchain_overlay = Default::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), None, ); - ext.clear_prefix(b"ab"); + ext.clear_prefix(b"ab", None); } overlay.commit_transaction().unwrap(); assert_eq!( - overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())) + overlay + .changes() + .map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ b"abc".to_vec() => None.into(), @@ -1145,68 +1236,149 @@ mod tests { b"bbd".to_vec() => Some(b"42".to_vec()).into() ], ); + + let mut overlay = overlay_limit; + { + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + assert_eq!((false, 1), ext.clear_prefix(b"ab", Some(1))); + } + overlay.commit_transaction().unwrap(); + + assert_eq!( + overlay + .changes() + .map(|(k, v)| (k.clone(), v.value().cloned())) + .collect::>(), + map![ + b"abb".to_vec() => None.into(), + b"aba".to_vec() => None.into(), + b"abd".to_vec() => None.into(), + + b"bab".to_vec() => Some(b"228".to_vec()).into(), + b"bbd".to_vec() => Some(b"42".to_vec()).into() + ], + ); + } + + #[test] + fn limited_child_kill_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let initial: HashMap<_, BTreeMap<_, _>> = map![ + Some(child_info.clone()) => map![ + b"a".to_vec() => b"0".to_vec(), + b"b".to_vec() => b"1".to_vec(), + b"c".to_vec() => b"2".to_vec(), + b"d".to_vec() => b"3".to_vec() + ], + ]; + let backend = InMemoryBackend::::from(initial); + + let mut overlay = OverlayedChanges::default(); + overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"2".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"3".to_vec(), Some(b"1312".to_vec())); + overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec())); + + { + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + &backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); + } + + assert_eq!( + overlay + .children() + .flat_map(|(iter, _child_info)| iter) + .map(|(k, v)| (k.clone(), v.value().clone())) + .collect::>(), + map![ + b"1".to_vec() => None.into(), + b"2".to_vec() => None.into(), + b"3".to_vec() => None.into(), + b"4".to_vec() => None.into(), + b"a".to_vec() => None.into(), + b"b".to_vec() => None.into(), + ], + ); + } + + #[test] + fn limited_child_kill_off_by_one_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let initial: HashMap<_, BTreeMap<_, _>> = map![ + Some(child_info.clone()) => map![ + b"a".to_vec() => b"0".to_vec(), + b"b".to_vec() => b"1".to_vec(), + b"c".to_vec() => b"2".to_vec(), + b"d".to_vec() => b"3".to_vec() + ], + ]; + let backend = InMemoryBackend::::from(initial); + let mut overlay = OverlayedChanges::default(); + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + &backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + assert_eq!(ext.kill_child_storage(&child_info, Some(0)), (false, 0)); + assert_eq!(ext.kill_child_storage(&child_info, Some(1)), (false, 1)); + assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); + assert_eq!(ext.kill_child_storage(&child_info, Some(3)), (false, 3)); + assert_eq!(ext.kill_child_storage(&child_info, Some(4)), (true, 4)); + // Only 4 items to remove + assert_eq!(ext.kill_child_storage(&child_info, Some(5)), (true, 4)); + assert_eq!(ext.kill_child_storage(&child_info, None), (true, 4)); } #[test] fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; - let mut state = new_in_mem::(); + let state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), None, ); - ext.set_child_storage( - child_info, - b"abc".to_vec(), - b"def".to_vec() - ); - assert_eq!( - ext.child_storage( - child_info, - b"abc" - ), - Some(b"def".to_vec()) - ); - ext.kill_child_storage( - child_info, - ); - assert_eq!( - ext.child_storage( - child_info, - b"abc" - ), - None - ); + ext.set_child_storage(child_info, b"abc".to_vec(), b"def".to_vec()); + assert_eq!(ext.child_storage(child_info, b"abc"), Some(b"def".to_vec())); + ext.kill_child_storage(child_info, None); + assert_eq!(ext.child_storage(child_info, b"abc"), None); } #[test] fn append_storage_works() { - let reference_data = vec![ - b"data1".to_vec(), - b"2".to_vec(), - b"D3".to_vec(), - b"d4".to_vec(), - ]; + let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()]; let key = b"key".to_vec(); - let mut state = new_in_mem::(); + let state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1214,16 +1386,12 @@ mod tests { ); ext.storage_append(key.clone(), reference_data[0].encode()); - assert_eq!( - ext.storage(key.as_slice()), - Some(vec![reference_data[0].clone()].encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } overlay.start_transaction(); { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1233,46 +1401,40 @@ mod tests { for i in reference_data.iter().skip(1) { ext.storage_append(key.clone(), i.encode()); } - assert_eq!( - ext.storage(key.as_slice()), - Some(reference_data.encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(reference_data.encode())); } overlay.rollback_transaction().unwrap(); { let ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), None, ); - assert_eq!( - ext.storage(key.as_slice()), - Some(vec![reference_data[0].clone()].encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } } #[test] fn remove_with_append_then_rollback_appended_then_append_again() { - #[derive(codec::Encode, codec::Decode)] - enum Item { InitializationItem, DiscardedItem, CommitedItem } + enum Item { + InitializationItem, + DiscardedItem, + CommitedItem, + } let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); - let mut state = new_in_mem::(); + let state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut overlay = OverlayedChanges::default(); // For example, block initialization with event. { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1287,17 +1449,13 @@ mod tests { { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), None, ); - assert_eq!( - ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem].encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); ext.storage_append(key.clone(), Item::DiscardedItem.encode()); @@ -1312,17 +1470,13 @@ mod tests { { let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), None, ); - assert_eq!( - ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem].encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); ext.storage_append(key.clone(), Item::CommitedItem.encode()); @@ -1330,7 +1484,6 @@ mod tests { ext.storage(key.as_slice()), Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), ); - } overlay.start_transaction(); @@ -1338,7 +1491,6 @@ mod tests { { let ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1351,26 +1503,34 @@ mod tests { } } + fn test_compact(remote_proof: StorageProof, remote_root: &sp_core::H256) -> StorageProof { + let compact_remote_proof = + remote_proof.into_compact_proof::(remote_root.clone()).unwrap(); + compact_remote_proof + .to_storage_proof::(Some(remote_root)) + .unwrap() + .0 + } + #[test] fn prove_read_and_proof_check_works() { let child_info = ChildInfo::new_default(b"sub1"); + let missing_child_info = ChildInfo::new_default(b"sub1sub2"); // key will include other child root to proof. let child_info = &child_info; + let missing_child_info = &missing_child_info; // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_root = remote_backend.storage_root(std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); - // check proof locally - let local_result1 = read_proof_check::( - remote_root, - remote_proof.clone(), - &[b"value2"], - ).unwrap(); - let local_result2 = read_proof_check::( - remote_root, - remote_proof.clone(), - &[&[0xff]], - ).is_ok(); - // check that results are correct + let remote_proof = test_compact(remote_proof, &remote_root); + // check proof locally + let local_result1 = + read_proof_check::(remote_root, remote_proof.clone(), &[b"value2"]) + .unwrap(); + let local_result2 = + read_proof_check::(remote_root, remote_proof.clone(), &[&[0xff]]) + .is_ok(); + // check that results are correct assert_eq!( local_result1.into_iter().collect::>(), vec![(b"value2".to_vec(), Some(vec![24]))], @@ -1378,50 +1538,241 @@ mod tests { assert_eq!(local_result2, false); // on child trie let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let remote_proof = prove_child_read( - remote_backend, - child_info, - &[b"value3"], - ).unwrap(); + let remote_root = remote_backend.storage_root(std::iter::empty()).0; + let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap(); + let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), child_info, &[b"value3"], - ).unwrap(); + ) + .unwrap(); let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), child_info, &[b"value2"], - ).unwrap(); + ) + .unwrap(); + let local_result3 = read_child_proof_check::( + remote_root, + remote_proof.clone(), + missing_child_info, + &[b"dummy"], + ) + .unwrap(); + assert_eq!( local_result1.into_iter().collect::>(), vec![(b"value3".to_vec(), Some(vec![142]))], ); - assert_eq!( - local_result2.into_iter().collect::>(), - vec![(b"value2".to_vec(), None)], + assert_eq!(local_result2.into_iter().collect::>(), vec![(b"value2".to_vec(), None)]); + assert_eq!(local_result3.into_iter().collect::>(), vec![(b"dummy".to_vec(), None)]); + } + + #[test] + fn child_read_compact_stress_test() { + use rand::{rngs::SmallRng, RngCore, SeedableRng}; + let mut storage: HashMap, BTreeMap> = + Default::default(); + let mut seed = [0; 16]; + for i in 0..50u32 { + let mut child_infos = Vec::new(); + let seed_partial = &mut seed[0..4]; + seed_partial.copy_from_slice(&i.to_be_bytes()[..]); + let mut rand = SmallRng::from_seed(seed); + + let nb_child_trie = rand.next_u32() as usize % 25; + for _ in 0..nb_child_trie { + let key_len = 1 + (rand.next_u32() % 10); + let mut key = vec![0; key_len as usize]; + rand.fill_bytes(&mut key[..]); + let child_info = ChildInfo::new_default(key.as_slice()); + let nb_item = 1 + rand.next_u32() % 25; + let mut items = BTreeMap::new(); + for item in 0..nb_item { + let key_len = 1 + (rand.next_u32() % 10); + let mut key = vec![0; key_len as usize]; + rand.fill_bytes(&mut key[..]); + let value = vec![item as u8; item as usize + 28]; + items.insert(key, value); + } + child_infos.push(child_info.clone()); + storage.insert(Some(child_info), items); + } + + let trie: InMemoryBackend = storage.clone().into(); + let trie_root = trie.root().clone(); + let backend = crate::ProvingBackend::new(&trie); + let mut queries = Vec::new(); + for c in 0..(5 + nb_child_trie / 2) { + // random existing query + let child_info = if c < 5 { + // 4 missing child trie + let key_len = 1 + (rand.next_u32() % 10); + let mut key = vec![0; key_len as usize]; + rand.fill_bytes(&mut key[..]); + ChildInfo::new_default(key.as_slice()) + } else { + child_infos[rand.next_u32() as usize % nb_child_trie].clone() + }; + + if let Some(values) = storage.get(&Some(child_info.clone())) { + for _ in 0..(1 + values.len() / 2) { + let ix = rand.next_u32() as usize % values.len(); + for (i, (key, value)) in values.iter().enumerate() { + if i == ix { + assert_eq!( + &backend + .child_storage(&child_info, key.as_slice()) + .unwrap() + .unwrap(), + value + ); + queries.push(( + child_info.clone(), + key.clone(), + Some(value.clone()), + )); + break + } + } + } + } + for _ in 0..4 { + let key_len = 1 + (rand.next_u32() % 10); + let mut key = vec![0; key_len as usize]; + rand.fill_bytes(&mut key[..]); + let result = backend.child_storage(&child_info, key.as_slice()).unwrap(); + queries.push((child_info.clone(), key, result)); + } + } + + let storage_proof = backend.extract_proof(); + let remote_proof = test_compact(storage_proof, &trie_root); + let proof_check = + create_proof_check_backend::(trie_root, remote_proof).unwrap(); + + for (child_info, key, expected) in queries { + assert_eq!( + proof_check.child_storage(&child_info, key.as_slice()).unwrap(), + expected, + ); + } + } + } + + #[test] + fn prove_read_with_size_limit_works() { + let remote_backend = trie_backend::tests::test_trie(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let (proof, count) = + prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); + // Alwasys contains at least some nodes. + assert_eq!(proof.into_memory_db::().drain().len(), 3); + assert_eq!(count, 1); + + let remote_backend = trie_backend::tests::test_trie(); + let (proof, count) = + prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); + assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); + assert_eq!(count, 85); + let (results, completed) = read_range_proof_check::( + remote_root, + proof.clone(), + None, + None, + Some(count), + None, + ) + .unwrap(); + assert_eq!(results.len() as u32, count); + assert_eq!(completed, false); + // When checking without count limit, proof may actually contain extra values. + let (results, completed) = + read_range_proof_check::(remote_root, proof, None, None, None, None) + .unwrap(); + assert_eq!(results.len() as u32, 101); + assert_eq!(completed, false); + + let remote_backend = trie_backend::tests::test_trie(); + let (proof, count) = + prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); + assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); + assert_eq!(count, 132); + let (results, completed) = read_range_proof_check::( + remote_root, + proof.clone(), + None, + None, + None, + None, + ) + .unwrap(); + assert_eq!(results.len() as u32, count); + assert_eq!(completed, true); + } + + #[test] + fn compact_multiple_child_trie() { + // this root will be queried + let child_info1 = ChildInfo::new_default(b"sub1"); + // this root will not be include in proof + let child_info2 = ChildInfo::new_default(b"sub2"); + // this root will be include in proof + let child_info3 = ChildInfo::new_default(b"sub"); + let remote_backend = trie_backend::tests::test_trie(); + let (remote_root, transaction) = remote_backend.full_storage_root( + std::iter::empty(), + vec![ + ( + &child_info1, + vec![(&b"key1"[..], Some(&b"val2"[..])), (&b"key2"[..], Some(&b"val3"[..]))] + .into_iter(), + ), + ( + &child_info2, + vec![(&b"key3"[..], Some(&b"val4"[..])), (&b"key4"[..], Some(&b"val5"[..]))] + .into_iter(), + ), + ( + &child_info3, + vec![(&b"key5"[..], Some(&b"val6"[..])), (&b"key6"[..], Some(&b"val7"[..]))] + .into_iter(), + ), + ] + .into_iter(), ); + let mut remote_storage = remote_backend.into_storage(); + remote_storage.consolidate(transaction); + let remote_backend = TrieBackend::new(remote_storage, remote_root); + let remote_proof = prove_child_read(remote_backend, &child_info1, &[b"key1"]).unwrap(); + let remote_proof = test_compact(remote_proof, &remote_root); + let local_result1 = read_child_proof_check::( + remote_root, + remote_proof.clone(), + &child_info1, + &[b"key1"], + ) + .unwrap(); + assert_eq!(local_result1.len(), 1); + assert_eq!(local_result1.get(&b"key1"[..]), Some(&Some(b"val2".to_vec()))); } #[test] fn child_storage_uuid() { - let child_info_1 = ChildInfo::new_default(b"sub_test1"); let child_info_2 = ChildInfo::new_default(b"sub_test2"); use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); - let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut transaction = { let backend = test_trie(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, &backend, changes_trie::disabled_state::<_, u64>(), @@ -1449,7 +1800,7 @@ mod tests { b"aaa".to_vec() => b"0".to_vec(), b"bbb".to_vec() => b"".to_vec() ]; - let mut state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from(initial); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1462,11 +1813,9 @@ mod tests { assert_eq!(overlay.storage(b"bbb"), None); { - let mut offchain_overlay = Default::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, backend, changes_trie::disabled_state::<_, u64>(), @@ -1490,14 +1839,12 @@ mod tests { let backend = trie_backend::tests::test_trie(); let mut overlayed_changes = Default::default(); - let mut offchain_overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); let mut state_machine = StateMachine::new( &backend, changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, - &mut offchain_overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1512,16 +1859,19 @@ mod tests { ); let run_state_machine = |state_machine: &mut StateMachine<_, _, _, _>| { - state_machine.execute_using_consensus_failure_handler:: _, _, _>( - ExecutionManager::NativeWhenPossible, - Some(|| { - sp_externalities::with_externalities(|mut ext| { - ext.register_extension(DummyExt(2)).unwrap(); - }).unwrap(); - - Ok(()) - }), - ).unwrap(); + state_machine + .execute_using_consensus_failure_handler:: _, _, _>( + ExecutionManager::NativeWhenPossible, + Some(|| { + sp_externalities::with_externalities(|mut ext| { + ext.register_extension(DummyExt(2)).unwrap(); + }) + .unwrap(); + + Ok(()) + }), + ) + .unwrap(); }; run_state_machine(&mut state_machine); diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index 5e4fd77c68563..1ffd569e2828b 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,23 +17,26 @@ //! Houses the code that implements the transactional overlay storage. -use super::{StorageKey, StorageValue, Extrinsics}; +use super::{Extrinsics, StorageKey, StorageValue}; -#[cfg(feature = "std")] -use std::collections::HashSet as Set; #[cfg(not(feature = "std"))] use sp_std::collections::btree_set::BTreeSet as Set; +#[cfg(feature = "std")] +use std::collections::HashSet as Set; -use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; -use smallvec::SmallVec; use crate::warn; +use smallvec::SmallVec; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + hash::Hash, +}; const PROOF_OVERLAY_NON_EMPTY: &str = "\ An OverlayValue is always created with at least one transaction and dropped as soon as the last transaction is removed; qed"; -type DirtyKeysSets = SmallVec<[Set; 5]>; -type Transactions = SmallVec<[InnerValue; 5]>; +type DirtyKeysSets = SmallVec<[Set; 5]>; +type Transactions = SmallVec<[InnerValue; 5]>; /// Error returned when trying to commit or rollback while no transaction is open or /// when the runtime is trying to close a transaction started by the client. @@ -62,32 +65,44 @@ pub enum ExecutionMode { #[derive(Debug, Default, Clone)] #[cfg_attr(test, derive(PartialEq))] -struct InnerValue { +struct InnerValue { /// Current value. None if value has been deleted. - value: Option, + value: V, /// The set of extrinsic indices where the values has been changed. /// Is filled only if runtime has announced changes trie support. extrinsics: Extrinsics, } /// An overlay that contains all versions of a value for a specific key. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] #[cfg_attr(test, derive(PartialEq))] -pub struct OverlayedValue { +pub struct OverlayedEntry { /// The individual versions of that value. /// One entry per transactions during that the value was actually written. - transactions: Transactions, + transactions: Transactions, } +impl Default for OverlayedEntry { + fn default() -> Self { + Self { transactions: SmallVec::new() } + } +} + +/// History of value, with removal support. +pub type OverlayedValue = OverlayedEntry>; + +/// Change set for basic key value with extrinsics index recording and removal support. +pub type OverlayedChangeSet = OverlayedMap>; + /// Holds a set of changes with the ability modify them using nested transactions. -#[derive(Debug, Default, Clone)] -pub struct OverlayedChangeSet { +#[derive(Debug, Clone)] +pub struct OverlayedMap { /// Stores the changes that this overlay constitutes. - changes: BTreeMap, + changes: BTreeMap>, /// Stores which keys are dirty per transaction. Needed in order to determine which /// values to merge into the parent transaction on commit. The length of this vector /// therefore determines how many nested transactions are currently open (depth). - dirty_keys: DirtyKeysSets, + dirty_keys: DirtyKeysSets, /// The number of how many transactions beginning from the first transactions are started /// by the client. Those transactions are protected against close (commit, rollback) /// when in runtime mode. @@ -96,32 +111,50 @@ pub struct OverlayedChangeSet { execution_mode: ExecutionMode, } +impl Default for OverlayedMap { + fn default() -> Self { + Self { + changes: BTreeMap::new(), + dirty_keys: SmallVec::new(), + num_client_transactions: Default::default(), + execution_mode: Default::default(), + } + } +} + impl Default for ExecutionMode { fn default() -> Self { Self::Client } } -impl OverlayedValue { +impl OverlayedEntry { /// The value as seen by the current transaction. - pub fn value(&self) -> Option<&StorageValue> { - self.transactions.last().expect(PROOF_OVERLAY_NON_EMPTY).value.as_ref() + pub fn value_ref(&self) -> &V { + &self.transactions.last().expect(PROOF_OVERLAY_NON_EMPTY).value + } + + /// The value as seen by the current transaction. + pub fn into_value(mut self) -> V { + self.transactions.pop().expect(PROOF_OVERLAY_NON_EMPTY).value } /// Unique list of extrinsic indices which modified the value. pub fn extrinsics(&self) -> BTreeSet { let mut set = BTreeSet::new(); - self.transactions.iter().for_each(|t| t.extrinsics.copy_extrinsics_into(&mut set)); + self.transactions + .iter() + .for_each(|t| t.extrinsics.copy_extrinsics_into(&mut set)); set } /// Mutable reference to the most recent version. - fn value_mut(&mut self) -> &mut Option { + fn value_mut(&mut self) -> &mut V { &mut self.transactions.last_mut().expect(PROOF_OVERLAY_NON_EMPTY).value } /// Remove the last version and return it. - fn pop_transaction(&mut self) -> InnerValue { + fn pop_transaction(&mut self) -> InnerValue { self.transactions.pop().expect(PROOF_OVERLAY_NON_EMPTY) } @@ -134,17 +167,9 @@ impl OverlayedValue { /// /// This makes sure that the old version is not overwritten and can be properly /// rolled back when required. - fn set( - &mut self, - value: Option, - first_write_in_tx: bool, - at_extrinsic: Option, - ) { + fn set(&mut self, value: V, first_write_in_tx: bool, at_extrinsic: Option) { if first_write_in_tx || self.transactions.is_empty() { - self.transactions.push(InnerValue { - value, - .. Default::default() - }); + self.transactions.push(InnerValue { value, extrinsics: Default::default() }); } else { *self.value_mut() = value; } @@ -155,15 +180,22 @@ impl OverlayedValue { } } +impl OverlayedEntry> { + /// The value as seen by the current transaction. + pub fn value(&self) -> Option<&StorageValue> { + self.value_ref().as_ref() + } +} + /// Inserts a key into the dirty set. /// /// Returns true iff we are currently have at least one open transaction and if this /// is the first write to the given key that transaction. -fn insert_dirty(set: &mut DirtyKeysSets, key: StorageKey) -> bool { +fn insert_dirty(set: &mut DirtyKeysSets, key: K) -> bool { set.last_mut().map(|dk| dk.insert(key)).unwrap_or_default() } -impl OverlayedChangeSet { +impl OverlayedMap { /// Create a new changeset at the same transaction state but without any contents. /// /// This changeset might be created when there are already open transactions. @@ -171,10 +203,10 @@ impl OverlayedChangeSet { pub fn spawn_child(&self) -> Self { use sp_std::iter::repeat; Self { + changes: Default::default(), dirty_keys: repeat(Set::new()).take(self.transaction_depth()).collect(), num_client_transactions: self.num_client_transactions, execution_mode: self.execution_mode, - .. Default::default() } } @@ -184,81 +216,38 @@ impl OverlayedChangeSet { } /// Get an optional reference to the value stored for the specified key. - pub fn get(&self, key: &[u8]) -> Option<&OverlayedValue> { + pub fn get(&self, key: &Q) -> Option<&OverlayedEntry> + where + K: sp_std::borrow::Borrow, + Q: Ord + ?Sized, + { self.changes.get(key) } /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn set( - &mut self, - key: StorageKey, - value: Option, - at_extrinsic: Option, - ) { + pub fn set(&mut self, key: K, value: V, at_extrinsic: Option) { let overlayed = self.changes.entry(key.clone()).or_default(); overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); } - /// Get a mutable reference for a value. - /// - /// Can be rolled back or committed when called inside a transaction. - #[must_use = "A change was registered, so this value MUST be modified."] - pub fn modify( - &mut self, - key: StorageKey, - init: impl Fn() -> StorageValue, - at_extrinsic: Option, - ) -> &mut Option { - let overlayed = self.changes.entry(key.clone()).or_default(); - let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); - let clone_into_new_tx = if let Some(tx) = overlayed.transactions.last() { - if first_write_in_tx { - Some(tx.value.clone()) - } else { - None - } - } else { - Some(Some(init())) - }; - - if let Some(cloned) = clone_into_new_tx { - overlayed.set(cloned, first_write_in_tx, at_extrinsic); - } - overlayed.value_mut() - } - - /// Set all values to deleted which are matched by the predicate. - /// - /// Can be rolled back or committed when called inside a transaction. - pub fn clear_where( - &mut self, - predicate: impl Fn(&[u8], &OverlayedValue) -> bool, - at_extrinsic: Option, - ) { - for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { - val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); - } - } - /// Get a list of all changes as seen by current transaction. - pub fn changes(&self) -> impl Iterator { + pub fn changes(&self) -> impl Iterator)> { self.changes.iter() } - /// Get the change that is next to the supplied key. - pub fn next_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { - use sp_std::ops::Bound; - let range = (Bound::Excluded(key), Bound::Unbounded); - self.changes.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v)) + /// Get a list of all changes as seen by current transaction, consumes + /// the overlay. + pub fn into_changes(self) -> impl Iterator)> { + self.changes.into_iter() } /// Consume this changeset and return all committed changes. /// /// Panics: /// Panics if there are open transactions: `transaction_depth() > 0` - pub fn drain_commited(self) -> impl Iterator)> { + pub fn drain_commited(self) -> impl Iterator { assert!(self.transaction_depth() == 0, "Drain is not allowed with open transactions."); self.changes.into_iter().map(|(k, mut v)| (k, v.pop_transaction().value)) } @@ -276,7 +265,7 @@ impl OverlayedChangeSet { /// Calling this while already inside the runtime will return an error. pub fn enter_runtime(&mut self) -> Result<(), AlreadyInRuntime> { if let ExecutionMode::Runtime = self.execution_mode { - return Err(AlreadyInRuntime); + return Err(AlreadyInRuntime) } self.execution_mode = ExecutionMode::Runtime; self.num_client_transactions = self.transaction_depth(); @@ -289,7 +278,7 @@ impl OverlayedChangeSet { /// Calling this while already outside the runtime will return an error. pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { if let ExecutionMode::Client = self.execution_mode { - return Err(NotInRuntime); + return Err(NotInRuntime) } self.execution_mode = ExecutionMode::Client; if self.has_open_runtime_transactions() { @@ -341,11 +330,13 @@ impl OverlayedChangeSet { } for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { - let overlayed = self.changes.get_mut(&key).expect("\ + let overlayed = self.changes.get_mut(&key).expect( + "\ A write to an OverlayedValue is recorded in the dirty key set. Before an OverlayedValue is removed, its containing dirty set is removed. This function is only called for keys that are in the dirty set. qed\ - "); + ", + ); if rollback { overlayed.pop_transaction(); @@ -384,6 +375,56 @@ impl OverlayedChangeSet { } } +impl OverlayedChangeSet { + /// Get a mutable reference for a value. + /// + /// Can be rolled back or committed when called inside a transaction. + #[must_use = "A change was registered, so this value MUST be modified."] + pub fn modify( + &mut self, + key: StorageKey, + init: impl Fn() -> StorageValue, + at_extrinsic: Option, + ) -> &mut Option { + let overlayed = self.changes.entry(key.clone()).or_default(); + let first_write_in_tx = insert_dirty(&mut self.dirty_keys, key); + let clone_into_new_tx = if let Some(tx) = overlayed.transactions.last() { + if first_write_in_tx { + Some(tx.value.clone()) + } else { + None + } + } else { + Some(Some(init())) + }; + + if let Some(cloned) = clone_into_new_tx { + overlayed.set(cloned, first_write_in_tx, at_extrinsic); + } + overlayed.value_mut() + } + + /// Set all values to deleted which are matched by the predicate. + /// + /// Can be rolled back or committed when called inside a transaction. + pub fn clear_where( + &mut self, + predicate: impl Fn(&[u8], &OverlayedValue) -> bool, + at_extrinsic: Option, + ) { + for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) { + val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic); + } + } + + /// Get the iterator over all changes that follow the supplied `key`. + pub fn changes_after(&self, key: &[u8]) -> impl Iterator { + use sp_std::ops::Bound; + let range = (Bound::Excluded(key), Bound::Unbounded); + self.changes.range::<[u8], _>(range).map(|(k, v)| (k.as_slice(), v)) + } +} + #[cfg(test)] mod test { use super::*; @@ -393,9 +434,12 @@ mod test { type Drained<'a> = Vec<(&'a [u8], Option<&'a [u8]>)>; fn assert_changes(is: &OverlayedChangeSet, expected: &Changes) { - let is: Changes = is.changes().map(|(k, v)| { - (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().into_iter().collect())) - }).collect(); + let is: Changes = is + .changes() + .map(|(k, v)| { + (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().into_iter().collect())) + }) + .collect(); assert_eq!(&is, expected); } @@ -403,7 +447,8 @@ mod test { let is = is.drain_commited().collect::>(); let expected = expected .iter() - .map(|(k, v)| (k.to_vec(), v.0.map(From::from))).collect::>(); + .map(|(k, v)| (k.to_vec(), v.0.map(From::from))) + .collect::>(); assert_eq!(is, expected); } @@ -411,7 +456,8 @@ mod test { let is = is.drain_commited().collect::>(); let expected = expected .iter() - .map(|(k, v)| (k.to_vec(), v.map(From::from))).collect::>(); + .map(|(k, v)| (k.to_vec(), v.map(From::from))) + .collect::>(); assert_eq!(is, expected); } @@ -424,10 +470,7 @@ mod test { changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(9)); - assert_drained(changeset, vec![ - (b"key0", Some(b"val0-1")), - (b"key1", Some(b"val1")), - ]); + assert_drained(changeset, vec![(b"key0", Some(b"val0-1")), (b"key1", Some(b"val1"))]); } #[test] @@ -549,10 +592,8 @@ mod test { changeset.rollback_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); - let rolled_back: Changes = vec![ - (b"key0", (Some(b"val0-1"), vec![1, 10])), - (b"key1", (Some(b"val1"), vec![1])), - ]; + let rolled_back: Changes = + vec![(b"key0", (Some(b"val0-1"), vec![1, 10])), (b"key1", (Some(b"val1"), vec![1]))]; assert_changes(&changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); @@ -626,21 +667,27 @@ mod test { changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)); - assert_changes(&changeset, &vec![ - (b"del1", (None, vec![3, 5])), - (b"del2", (None, vec![4, 5])), - (b"key0", (Some(b"val0"), vec![1])), - (b"key1", (Some(b"val1"), vec![2])), - ]); + assert_changes( + &changeset, + &vec![ + (b"del1", (None, vec![3, 5])), + (b"del2", (None, vec![4, 5])), + (b"key0", (Some(b"val0"), vec![1])), + (b"key1", (Some(b"val1"), vec![2])), + ], + ); changeset.rollback_transaction().unwrap(); - assert_changes(&changeset, &vec![ - (b"del1", (Some(b"delval1"), vec![3])), - (b"del2", (Some(b"delval2"), vec![4])), - (b"key0", (Some(b"val0"), vec![1])), - (b"key1", (Some(b"val1"), vec![2])), - ]); + assert_changes( + &changeset, + &vec![ + (b"del1", (Some(b"delval1"), vec![3])), + (b"del2", (Some(b"delval2"), vec![4])), + (b"key0", (Some(b"val0"), vec![1])), + (b"key1", (Some(b"val1"), vec![2])), + ], + ); } #[test] @@ -657,30 +704,53 @@ mod test { changeset.set(b"key4".to_vec(), Some(b"val4".to_vec()), Some(4)); changeset.set(b"key11".to_vec(), Some(b"val11".to_vec()), Some(11)); - assert_eq!(changeset.next_change(b"key0").unwrap().0, b"key1"); - assert_eq!(changeset.next_change(b"key0").unwrap().1.value(), Some(&b"val1".to_vec())); - assert_eq!(changeset.next_change(b"key1").unwrap().0, b"key11"); - assert_eq!(changeset.next_change(b"key1").unwrap().1.value(), Some(&b"val11".to_vec())); - assert_eq!(changeset.next_change(b"key11").unwrap().0, b"key2"); - assert_eq!(changeset.next_change(b"key11").unwrap().1.value(), Some(&b"val2".to_vec())); - assert_eq!(changeset.next_change(b"key2").unwrap().0, b"key3"); - assert_eq!(changeset.next_change(b"key2").unwrap().1.value(), Some(&b"val3".to_vec())); - assert_eq!(changeset.next_change(b"key3").unwrap().0, b"key4"); - assert_eq!(changeset.next_change(b"key3").unwrap().1.value(), Some(&b"val4".to_vec())); - assert_eq!(changeset.next_change(b"key4"), None); + assert_eq!(changeset.changes_after(b"key0").next().unwrap().0, b"key1"); + assert_eq!( + changeset.changes_after(b"key0").next().unwrap().1.value(), + Some(&b"val1".to_vec()) + ); + assert_eq!(changeset.changes_after(b"key1").next().unwrap().0, b"key11"); + assert_eq!( + changeset.changes_after(b"key1").next().unwrap().1.value(), + Some(&b"val11".to_vec()) + ); + assert_eq!(changeset.changes_after(b"key11").next().unwrap().0, b"key2"); + assert_eq!( + changeset.changes_after(b"key11").next().unwrap().1.value(), + Some(&b"val2".to_vec()) + ); + assert_eq!(changeset.changes_after(b"key2").next().unwrap().0, b"key3"); + assert_eq!( + changeset.changes_after(b"key2").next().unwrap().1.value(), + Some(&b"val3".to_vec()) + ); + assert_eq!(changeset.changes_after(b"key3").next().unwrap().0, b"key4"); + assert_eq!( + changeset.changes_after(b"key3").next().unwrap().1.value(), + Some(&b"val4".to_vec()) + ); + assert_eq!(changeset.changes_after(b"key4").next(), None); changeset.rollback_transaction().unwrap(); - assert_eq!(changeset.next_change(b"key0").unwrap().0, b"key1"); - assert_eq!(changeset.next_change(b"key0").unwrap().1.value(), Some(&b"val1".to_vec())); - assert_eq!(changeset.next_change(b"key1").unwrap().0, b"key2"); - assert_eq!(changeset.next_change(b"key1").unwrap().1.value(), Some(&b"val2".to_vec())); - assert_eq!(changeset.next_change(b"key11").unwrap().0, b"key2"); - assert_eq!(changeset.next_change(b"key11").unwrap().1.value(), Some(&b"val2".to_vec())); - assert_eq!(changeset.next_change(b"key2"), None); - assert_eq!(changeset.next_change(b"key3"), None); - assert_eq!(changeset.next_change(b"key4"), None); - + assert_eq!(changeset.changes_after(b"key0").next().unwrap().0, b"key1"); + assert_eq!( + changeset.changes_after(b"key0").next().unwrap().1.value(), + Some(&b"val1".to_vec()) + ); + assert_eq!(changeset.changes_after(b"key1").next().unwrap().0, b"key2"); + assert_eq!( + changeset.changes_after(b"key1").next().unwrap().1.value(), + Some(&b"val2".to_vec()) + ); + assert_eq!(changeset.changes_after(b"key11").next().unwrap().0, b"key2"); + assert_eq!( + changeset.changes_after(b"key11").next().unwrap().1.value(), + Some(&b"val2".to_vec()) + ); + assert_eq!(changeset.changes_after(b"key2").next(), None); + assert_eq!(changeset.changes_after(b"key3").next(), None); + assert_eq!(changeset.changes_after(b"key4").next(), None); } #[test] @@ -740,9 +810,7 @@ mod test { changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); - assert_drained(changeset, vec![ - (b"key0", Some(b"val0")), - ]); + assert_drained(changeset, vec![(b"key0", Some(b"val0"))]); } #[test] diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 6ef09fc81505d..a0558e06a380e 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,37 +18,37 @@ //! The overlayed changes to state. mod changeset; +mod offchain; -use crate::{ - backend::Backend, - stats::StateMachineStats, -}; -use sp_std::{vec::Vec, any::{TypeId, Any}, boxed::Box}; use self::changeset::OverlayedChangeSet; +use crate::{backend::Backend, stats::StateMachineStats}; +pub use offchain::OffchainOverlayedChanges; +use sp_std::{ + any::{Any, TypeId}, + boxed::Box, + vec::Vec, +}; +use crate::{changes_trie::BlockNumber, DefaultError}; #[cfg(feature = "std")] use crate::{ + changes_trie::{build_changes_trie, State as ChangesTrieState}, ChangesTrieTransaction, - changes_trie::{ - build_changes_trie, - State as ChangesTrieState, - }, }; -use crate::changes_trie::BlockNumber; -#[cfg(feature = "std")] -use std::collections::{HashMap as Map, hash_map::Entry as MapEntry}; +use codec::{Decode, Encode}; +use hash_db::Hasher; +use sp_core::{ + offchain::OffchainOverlayedChange, + storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}, +}; +use sp_externalities::{Extension, Extensions}; #[cfg(not(feature = "std"))] use sp_std::collections::btree_map::{BTreeMap as Map, Entry as MapEntry}; use sp_std::collections::btree_set::BTreeSet; -use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; #[cfg(feature = "std")] -use sp_core::offchain::storage::OffchainOverlayedChanges; -use hash_db::Hasher; -use crate::DefaultError; -use sp_externalities::{Extensions, Extension}; +use std::collections::{hash_map::Entry as MapEntry, HashMap as Map}; -pub use self::changeset::{OverlayedValue, NoOpenTransaction, AlreadyInRuntime, NotInRuntime}; +pub use self::changeset::{AlreadyInRuntime, NoOpenTransaction, NotInRuntime, OverlayedValue}; /// Changes that are made outside of extrinsics are marked with this index; pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; @@ -65,6 +65,9 @@ pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; +/// In memory array of storage values. +pub type OffchainChangesCollection = Vec<((Vec, Vec), OffchainOverlayedChange)>; + /// Keep trace of extrinsics index for a modified value. #[derive(Debug, Default, Eq, PartialEq, Clone)] pub struct Extrinsics(Vec); @@ -97,12 +100,37 @@ pub struct OverlayedChanges { top: OverlayedChangeSet, /// Child storage changes. The map key is the child storage key without the common prefix. children: Map, + /// Offchain related changes. + offchain: OffchainOverlayedChanges, + /// Transaction index changes, + transaction_index_ops: Vec, /// True if extrinsics stats must be collected. collect_extrinsics: bool, /// Collect statistic on this execution. stats: StateMachineStats, } +/// Transcation index operation. +#[derive(Debug, Clone)] +pub enum IndexOperation { + /// Insert transaction into index. + Insert { + /// Extrinsic index in the current block. + extrinsic: u32, + /// Data content hash. + hash: Vec, + /// Indexed data size. + size: u32, + }, + /// Renew existing transaction storage. + Renew { + /// Extrinsic index in the current block. + extrinsic: u32, + /// Referenced index hash. + hash: Vec, + }, +} + /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. /// /// This contains all the changes to the storage and transactions to apply theses changes to the @@ -115,8 +143,7 @@ pub struct StorageChanges { /// All changes to the child storages. pub child_storage_changes: ChildStorageCollection, /// Offchain state changes to write to the offchain database. - #[cfg(feature = "std")] - pub offchain_storage_changes: OffchainOverlayedChanges, + pub offchain_storage_changes: OffchainChangesCollection, /// A transaction for the backend that contains all changes from /// [`main_storage_changes`](StorageChanges::main_storage_changes) and from /// [`child_storage_changes`](StorageChanges::child_storage_changes). @@ -132,18 +159,25 @@ pub struct StorageChanges { /// Phantom data for block number until change trie support no_std. #[cfg(not(feature = "std"))] pub _ph: sp_std::marker::PhantomData, + + /// Changes to the transaction index, + #[cfg(feature = "std")] + pub transaction_index_changes: Vec, } #[cfg(feature = "std")] impl StorageChanges { /// Deconstruct into the inner values - pub fn into_inner(self) -> ( + pub fn into_inner( + self, + ) -> ( StorageCollection, ChildStorageCollection, - OffchainOverlayedChanges, + OffchainChangesCollection, Transaction, H::Out, Option>, + Vec, ) { ( self.main_storage_changes, @@ -152,6 +186,7 @@ impl StorageChanges { self.transaction, self.transaction_storage_root, self.changes_trie_transaction, + self.transaction_index_changes, ) } } @@ -182,7 +217,9 @@ impl StorageTransactionCache Default for StorageTransactionCache { +impl Default + for StorageTransactionCache +{ fn default() -> Self { Self { transaction: None, @@ -197,12 +234,13 @@ impl Default for StorageTransactionCache } } -impl Default for StorageChanges { +impl Default + for StorageChanges +{ fn default() -> Self { Self { main_storage_changes: Default::default(), child_storage_changes: Default::default(), - #[cfg(feature = "std")] offchain_storage_changes: Default::default(), transaction: Default::default(), transaction_storage_root: Default::default(), @@ -210,6 +248,8 @@ impl Default for StorageChanges changes_trie_transaction: None, #[cfg(not(feature = "std"))] _ph: Default::default(), + #[cfg(feature = "std")] + transaction_index_changes: Default::default(), } } } @@ -268,7 +308,7 @@ impl OverlayedChanges { /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub(crate) fn set_storage(&mut self, key: StorageKey, val: Option) { + pub fn set_storage(&mut self, key: StorageKey, val: Option) { let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); self.top.set(key, val, self.extrinsic_index()); @@ -290,12 +330,10 @@ impl OverlayedChanges { self.stats.tally_write_overlay(size_write); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; - let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| - ( - top.spawn_child(), - child_info.clone() - ) - ); + let (changeset, info) = self + .children + .entry(storage_key) + .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); changeset.set(key, val, extrinsic_index); @@ -304,19 +342,14 @@ impl OverlayedChanges { /// Clear child storage of given storage key. /// /// Can be rolled back or committed when called inside a transaction. - pub(crate) fn clear_child_storage( - &mut self, - child_info: &ChildInfo, - ) { + pub(crate) fn clear_child_storage(&mut self, child_info: &ChildInfo) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; - let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| - ( - top.spawn_child(), - child_info.clone() - ) - ); + let (changeset, info) = self + .children + .entry(storage_key) + .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); changeset.clear_where(|_, _| true, extrinsic_index); @@ -332,20 +365,14 @@ impl OverlayedChanges { /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction - pub(crate) fn clear_child_prefix( - &mut self, - child_info: &ChildInfo, - prefix: &[u8], - ) { + pub(crate) fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; - let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| - ( - top.spawn_child(), - child_info.clone() - ) - ); + let (changeset, info) = self + .children + .entry(storage_key) + .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); changeset.clear_where(|key, _| key.starts_with(prefix), extrinsic_index); @@ -366,12 +393,13 @@ impl OverlayedChanges { /// transaction was open. Any transaction must be closed by either `rollback_transaction` or /// `commit_transaction` before this overlay can be converted into storage changes. /// - /// Changes made without any open transaction are committed immediatly. + /// Changes made without any open transaction are committed immediately. pub fn start_transaction(&mut self) { self.top.start_transaction(); for (_, (changeset, _)) in self.children.iter_mut() { changeset.start_transaction(); } + self.offchain.overlay_mut().start_transaction(); } /// Rollback the last transaction started by `start_transaction`. @@ -381,10 +409,15 @@ impl OverlayedChanges { pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { self.top.rollback_transaction()?; retain_map(&mut self.children, |_, (changeset, _)| { - changeset.rollback_transaction() + changeset + .rollback_transaction() .expect("Top and children changesets are started in lockstep; qed"); !changeset.is_empty() }); + self.offchain + .overlay_mut() + .rollback_transaction() + .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -395,9 +428,14 @@ impl OverlayedChanges { pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { self.top.commit_transaction()?; for (_, (changeset, _)) in self.children.iter_mut() { - changeset.commit_transaction() + changeset + .commit_transaction() .expect("Top and children changesets are started in lockstep; qed"); } + self.offchain + .overlay_mut() + .commit_transaction() + .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -408,9 +446,14 @@ impl OverlayedChanges { pub fn enter_runtime(&mut self) -> Result<(), AlreadyInRuntime> { self.top.enter_runtime()?; for (_, (changeset, _)) in self.children.iter_mut() { - changeset.enter_runtime() + changeset + .enter_runtime() .expect("Top and children changesets are entering runtime in lockstep; qed") } + self.offchain + .overlay_mut() + .enter_runtime() + .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -421,9 +464,14 @@ impl OverlayedChanges { pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { self.top.exit_runtime()?; for (_, (changeset, _)) in self.children.iter_mut() { - changeset.exit_runtime() + changeset + .exit_runtime() .expect("Top and children changesets are entering runtime in lockstep; qed"); } + self.offchain + .overlay_mut() + .exit_runtime() + .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -433,51 +481,75 @@ impl OverlayedChanges { /// /// Panics: /// Panics if `transaction_depth() > 0` - fn drain_committed(&mut self) -> ( - impl Iterator)>, - impl Iterator)>, ChildInfo))>, + fn drain_committed( + &mut self, + ) -> ( + impl Iterator)>, + impl Iterator< + Item = ( + StorageKey, + (impl Iterator)>, ChildInfo), + ), + >, ) { use sp_std::mem::take; ( take(&mut self.top).drain_commited(), - take(&mut self.children).into_iter() - .map(|(key, (val, info))| ( - key, - (val.drain_commited(), info) - ) - ), + take(&mut self.children) + .into_iter() + .map(|(key, (val, info))| (key, (val.drain_commited(), info))), ) } + /// Consume all changes (top + children) and return them. + /// + /// After calling this function no more changes are contained in this changeset. + /// + /// Panics: + /// Panics if `transaction_depth() > 0` + pub fn offchain_drain_committed( + &mut self, + ) -> impl Iterator { + self.offchain.drain() + } + /// Get an iterator over all child changes as seen by the current transaction. - pub fn children(&self) - -> impl Iterator, &ChildInfo)> { + pub fn children( + &self, + ) -> impl Iterator, &ChildInfo)> { self.children.iter().map(|(_, v)| (v.0.changes(), &v.1)) } /// Get an iterator over all top changes as been by the current transaction. - pub fn changes(&self) -> impl Iterator { + pub fn changes(&self) -> impl Iterator { self.top.changes() } /// Get an optional iterator over all child changes stored under the supplied key. - pub fn child_changes(&self, key: &[u8]) - -> Option<(impl Iterator, &ChildInfo)> { + pub fn child_changes( + &self, + key: &[u8], + ) -> Option<(impl Iterator, &ChildInfo)> { self.children.get(key).map(|(overlay, info)| (overlay.changes(), info)) } + /// Get an list of all index operations. + pub fn transaction_index_ops(&self) -> &[IndexOperation] { + &self.transaction_index_ops + } + /// Convert this instance with all changes into a [`StorageChanges`] instance. #[cfg(feature = "std")] - pub fn into_storage_changes< - B: Backend, H: Hasher, N: BlockNumber - >( + pub fn into_storage_changes, H: Hasher, N: BlockNumber>( mut self, backend: &B, changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: StorageTransactionCache, ) -> Result, DefaultError> - where H::Out: Ord + Encode + 'static { + where + H::Out: Ord + Encode + 'static, + { self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) } @@ -485,49 +557,55 @@ impl OverlayedChanges { pub fn drain_storage_changes, H: Hasher, N: BlockNumber>( &mut self, backend: &B, - #[cfg(feature = "std")] - changes_trie_state: Option<&ChangesTrieState>, + #[cfg(feature = "std")] changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: &mut StorageTransactionCache, ) -> Result, DefaultError> - where H::Out: Ord + Encode + 'static { + where + H::Out: Ord + Encode + 'static, + { // If the transaction does not exist, we generate it. if cache.transaction.is_none() { self.storage_root(backend, &mut cache); } - let (transaction, transaction_storage_root) = cache.transaction.take() + let (transaction, transaction_storage_root) = cache + .transaction + .take() .and_then(|t| cache.transaction_storage_root.take().map(|tr| (t, tr))) .expect("Transaction was be generated as part of `storage_root`; qed"); // If the transaction does not exist, we generate it. #[cfg(feature = "std")] if cache.changes_trie_transaction.is_none() { - self.changes_trie_root( - backend, - changes_trie_state, - parent_hash, - false, - &mut cache, - ).map_err(|_| "Failed to generate changes trie transaction")?; + self.changes_trie_root(backend, changes_trie_state, parent_hash, false, &mut cache) + .map_err(|_| "Failed to generate changes trie transaction")?; } #[cfg(feature = "std")] - let changes_trie_transaction = cache.changes_trie_transaction + let changes_trie_transaction = cache + .changes_trie_transaction .take() .expect("Changes trie transaction was generated by `changes_trie_root`; qed"); let (main_storage_changes, child_storage_changes) = self.drain_committed(); + let offchain_storage_changes = self.offchain_drain_committed().collect(); + + #[cfg(feature = "std")] + let transaction_index_changes = std::mem::take(&mut self.transaction_index_ops); Ok(StorageChanges { main_storage_changes: main_storage_changes.collect(), - child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), - #[cfg(feature = "std")] - offchain_storage_changes: Default::default(), + child_storage_changes: child_storage_changes + .map(|(sk, it)| (sk, it.0.collect())) + .collect(), + offchain_storage_changes, transaction, transaction_storage_root, #[cfg(feature = "std")] changes_trie_transaction, + #[cfg(feature = "std")] + transaction_index_changes, #[cfg(not(feature = "std"))] _ph: Default::default(), }) @@ -550,7 +628,8 @@ impl OverlayedChanges { true => Some( self.storage(EXTRINSIC_INDEX) .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) - .unwrap_or(NO_EXTRINSIC_INDEX)), + .unwrap_or(NO_EXTRINSIC_INDEX), + ), false => None, } } @@ -564,13 +643,13 @@ impl OverlayedChanges { backend: &B, cache: &mut StorageTransactionCache, ) -> H::Out - where H::Out: Ord + Encode, + where + H::Out: Ord + Encode, { let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); - let child_delta = self.children() - .map(|(changes, info)| (info, changes.map( - |(k, v)| (&k[..], v.value().map(|v| &v[..])) - ))); + let child_delta = self.children().map(|(changes, info)| { + (info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))) + }); let (root, transaction) = backend.full_storage_root(delta, child_delta); @@ -595,14 +674,18 @@ impl OverlayedChanges { parent_hash: H::Out, panic_on_storage_error: bool, cache: &mut StorageTransactionCache, - ) -> Result, ()> where H::Out: Ord + Encode + 'static { + ) -> Result, ()> + where + H::Out: Ord + Encode + 'static, + { build_changes_trie::<_, H, N>( backend, changes_trie_state, self, parent_hash, panic_on_storage_error, - ).map(|r| { + ) + .map(|r| { let root = r.as_ref().map(|r| r.1).clone(); cache.changes_trie_transaction = Some(r.map(|(db, _, cache)| (db, cache))); cache.changes_trie_transaction_storage_root = Some(root); @@ -610,41 +693,60 @@ impl OverlayedChanges { }) } - /// Returns the next (in lexicographic order) storage key in the overlayed alongside its value. - /// If no value is next then `None` is returned. - pub fn next_storage_key_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { - self.top.next_change(key) + /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) + /// alongside its value. + pub fn iter_after(&self, key: &[u8]) -> impl Iterator { + self.top.changes_after(key) } - /// Returns the next (in lexicographic order) child storage key in the overlayed alongside its - /// value. If no value is next then `None` is returned. - pub fn next_child_storage_key_change( + /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) + /// alongside its value for the given `storage_key` child. + pub fn child_iter_after( &self, storage_key: &[u8], - key: &[u8] - ) -> Option<(&[u8], &OverlayedValue)> { + key: &[u8], + ) -> impl Iterator { self.children .get(storage_key) - .and_then(|(overlay, _)| - overlay.next_change(key) - ) + .map(|(overlay, _)| overlay.changes_after(key)) + .into_iter() + .flatten() + } + + /// Read only access ot offchain overlay. + pub fn offchain(&self) -> &OffchainOverlayedChanges { + &self.offchain + } + + /// Write a key value pair to the offchain storage overlay. + pub fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) { + use sp_core::offchain::STORAGE_PREFIX; + match value { + Some(value) => self.offchain.set(STORAGE_PREFIX, key, value), + None => self.offchain.remove(STORAGE_PREFIX, key), + } + } + + /// Add transaction index operation. + pub fn add_transaction_index(&mut self, op: IndexOperation) { + self.transaction_index_ops.push(op) } } #[cfg(feature = "std")] fn retain_map(map: &mut Map, f: F) - where - K: std::cmp::Eq + std::hash::Hash, - F: FnMut(&K, &mut V) -> bool, +where + K: std::cmp::Eq + std::hash::Hash, + F: FnMut(&K, &mut V) -> bool, { map.retain(f); } #[cfg(not(feature = "std"))] fn retain_map(map: &mut Map, mut f: F) - where - K: Ord, - F: FnMut(&K, &mut V) -> bool, +where + K: Ord, + F: FnMut(&K, &mut V) -> bool, { let old = sp_std::mem::replace(map, Map::default()); for (k, mut v) in old.into_iter() { @@ -716,18 +818,13 @@ impl<'a> OverlayedExtensions<'a> { #[cfg(test)] mod tests { - use hex_literal::hex; - use sp_core::{Blake2Hasher, traits::Externalities}; - use crate::InMemoryBackend; - use crate::ext::Ext; use super::*; + use crate::{ext::Ext, InMemoryBackend}; + use hex_literal::hex; + use sp_core::{traits::Externalities, Blake2Hasher}; use std::collections::BTreeMap; - fn assert_extrinsics( - overlay: &OverlayedChangeSet, - key: impl AsRef<[u8]>, - expected: Vec, - ) { + fn assert_extrinsics(overlay: &OverlayedChangeSet, key: impl AsRef<[u8]>, expected: Vec) { assert_eq!( overlay.get(key.as_ref()).unwrap().extrinsics().into_iter().collect::>(), expected @@ -767,6 +864,63 @@ mod tests { assert!(overlayed.storage(&key).unwrap().is_none()); } + #[test] + fn offchain_overlayed_storage_transactions_works() { + use sp_core::offchain::STORAGE_PREFIX; + fn check_offchain_content( + state: &OverlayedChanges, + nb_commit: usize, + expected: Vec<(Vec, Option>)>, + ) { + let mut state = state.clone(); + for _ in 0..nb_commit { + state.commit_transaction().unwrap(); + } + let offchain_data: Vec<_> = state.offchain_drain_committed().collect(); + let expected: Vec<_> = expected + .into_iter() + .map(|(key, value)| { + let change = match value { + Some(value) => OffchainOverlayedChange::SetValue(value), + None => OffchainOverlayedChange::Remove, + }; + ((STORAGE_PREFIX.to_vec(), key), change) + }) + .collect(); + assert_eq!(offchain_data, expected); + } + + let mut overlayed = OverlayedChanges::default(); + + let key = vec![42, 69, 169, 142]; + + check_offchain_content(&overlayed, 0, vec![]); + + overlayed.start_transaction(); + + overlayed.set_offchain_storage(key.as_slice(), Some(&[1, 2, 3][..])); + check_offchain_content(&overlayed, 1, vec![(key.clone(), Some(vec![1, 2, 3]))]); + + overlayed.commit_transaction().unwrap(); + + check_offchain_content(&overlayed, 0, vec![(key.clone(), Some(vec![1, 2, 3]))]); + + overlayed.start_transaction(); + + overlayed.set_offchain_storage(key.as_slice(), Some(&[][..])); + check_offchain_content(&overlayed, 1, vec![(key.clone(), Some(vec![]))]); + + overlayed.set_offchain_storage(key.as_slice(), None); + check_offchain_content(&overlayed, 1, vec![(key.clone(), None)]); + + overlayed.rollback_transaction().unwrap(); + + check_offchain_content(&overlayed, 0, vec![(key.clone(), Some(vec![1, 2, 3]))]); + + overlayed.set_offchain_storage(key.as_slice(), None); + check_offchain_content(&overlayed, 0, vec![(key.clone(), None)]); + } + #[test] fn overlayed_storage_root_works() { let initial: BTreeMap<_, _> = vec![ @@ -774,7 +928,9 @@ mod tests { (b"dog".to_vec(), b"puppyXXX".to_vec()), (b"dogglesworth".to_vec(), b"catXXX".to_vec()), (b"doug".to_vec(), b"notadog".to_vec()), - ].into_iter().collect(); + ] + .into_iter() + .collect(); let backend = InMemoryBackend::::from(initial); let mut overlay = OverlayedChanges::default(); overlay.set_collect_extrinsics(false); @@ -789,17 +945,16 @@ mod tests { overlay.set_storage(b"dogglesworth".to_vec(), Some(b"cat".to_vec())); overlay.set_storage(b"doug".to_vec(), None); - let mut offchain_overlay = Default::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, - &mut offchain_overlay, &mut cache, &backend, crate::changes_trie::disabled_state::<_, u64>(), None, ); - const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); + const ROOT: [u8; 32] = + hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); assert_eq!(&ext.storage_root()[..], &ROOT); } @@ -857,28 +1012,28 @@ mod tests { overlay.set_storage(vec![30], None); // next_prospective < next_committed - let next_to_5 = overlay.next_storage_key_change(&[5]).unwrap(); + let next_to_5 = overlay.iter_after(&[5]).next().unwrap(); assert_eq!(next_to_5.0.to_vec(), vec![10]); assert_eq!(next_to_5.1.value(), Some(&vec![10])); // next_committed < next_prospective - let next_to_10 = overlay.next_storage_key_change(&[10]).unwrap(); + let next_to_10 = overlay.iter_after(&[10]).next().unwrap(); assert_eq!(next_to_10.0.to_vec(), vec![20]); assert_eq!(next_to_10.1.value(), Some(&vec![20])); // next_committed == next_prospective - let next_to_20 = overlay.next_storage_key_change(&[20]).unwrap(); + let next_to_20 = overlay.iter_after(&[20]).next().unwrap(); assert_eq!(next_to_20.0.to_vec(), vec![30]); assert_eq!(next_to_20.1.value(), None); // next_committed, no next_prospective - let next_to_30 = overlay.next_storage_key_change(&[30]).unwrap(); + let next_to_30 = overlay.iter_after(&[30]).next().unwrap(); assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value(), Some(&vec![40])); overlay.set_storage(vec![50], Some(vec![50])); // next_prospective, no next_committed - let next_to_40 = overlay.next_storage_key_change(&[40]).unwrap(); + let next_to_40 = overlay.iter_after(&[40]).next().unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); assert_eq!(next_to_40.1.value(), Some(&vec![50])); } @@ -898,28 +1053,28 @@ mod tests { overlay.set_child_storage(child_info, vec![30], None); // next_prospective < next_committed - let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).unwrap(); + let next_to_5 = overlay.child_iter_after(child, &[5]).next().unwrap(); assert_eq!(next_to_5.0.to_vec(), vec![10]); assert_eq!(next_to_5.1.value(), Some(&vec![10])); // next_committed < next_prospective - let next_to_10 = overlay.next_child_storage_key_change(child, &[10]).unwrap(); + let next_to_10 = overlay.child_iter_after(child, &[10]).next().unwrap(); assert_eq!(next_to_10.0.to_vec(), vec![20]); assert_eq!(next_to_10.1.value(), Some(&vec![20])); // next_committed == next_prospective - let next_to_20 = overlay.next_child_storage_key_change(child, &[20]).unwrap(); + let next_to_20 = overlay.child_iter_after(child, &[20]).next().unwrap(); assert_eq!(next_to_20.0.to_vec(), vec![30]); assert_eq!(next_to_20.1.value(), None); // next_committed, no next_prospective - let next_to_30 = overlay.next_child_storage_key_change(child, &[30]).unwrap(); + let next_to_30 = overlay.child_iter_after(child, &[30]).next().unwrap(); assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value(), Some(&vec![40])); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed - let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).unwrap(); + let next_to_40 = overlay.child_iter_after(child, &[40]).next().unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); assert_eq!(next_to_40.1.value(), Some(&vec![50])); } diff --git a/primitives/state-machine/src/overlayed_changes/offchain.rs b/primitives/state-machine/src/overlayed_changes/offchain.rs new file mode 100644 index 0000000000000..ac67ca3303008 --- /dev/null +++ b/primitives/state-machine/src/overlayed_changes/offchain.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Overlayed changes for offchain indexing. + +use super::changeset::OverlayedMap; +use sp_core::offchain::OffchainOverlayedChange; +use sp_std::prelude::Vec; + +/// In-memory storage for offchain workers recoding changes for the actual offchain storage +/// implementation. +#[derive(Debug, Clone, Default)] +pub struct OffchainOverlayedChanges(OverlayedMap<(Vec, Vec), OffchainOverlayedChange>); + +/// Item for iterating over offchain changes. +/// +/// First element i a tuple of `(prefix, key)`, second element ist the actual change +/// (remove or set value). +type OffchainOverlayedChangesItem<'i> = (&'i (Vec, Vec), &'i OffchainOverlayedChange); + +/// Iterator over offchain changes, owned memory version. +type OffchainOverlayedChangesItemOwned = ((Vec, Vec), OffchainOverlayedChange); + +impl OffchainOverlayedChanges { + /// Consume the offchain storage and iterate over all key value pairs. + pub fn into_iter(self) -> impl Iterator { + self.0.into_changes().map(|kv| (kv.0, kv.1.into_value())) + } + + /// Iterate over all key value pairs by reference. + pub fn iter<'a>(&'a self) -> impl Iterator> { + self.0.changes().map(|kv| (kv.0, kv.1.value_ref())) + } + + /// Drain all elements of changeset. + pub fn drain(&mut self) -> impl Iterator { + sp_std::mem::take(self).into_iter() + } + + /// Remove a key and its associated value from the offchain database. + pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { + let _ = self + .0 + .set((prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::Remove, None); + } + + /// Set the value associated with a key under a prefix to the value provided. + pub fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { + let _ = self.0.set( + (prefix.to_vec(), key.to_vec()), + OffchainOverlayedChange::SetValue(value.to_vec()), + None, + ); + } + + /// Obtain a associated value to the given key in storage with prefix. + pub fn get(&self, prefix: &[u8], key: &[u8]) -> Option { + let key = (prefix.to_vec(), key.to_vec()); + self.0.get(&key).map(|entry| entry.value_ref()).cloned() + } + + /// Reference to inner change set. + pub fn overlay(&self) -> &OverlayedMap<(Vec, Vec), OffchainOverlayedChange> { + &self.0 + } + + /// Mutable reference to inner change set. + pub fn overlay_mut( + &mut self, + ) -> &mut OverlayedMap<(Vec, Vec), OffchainOverlayedChange> { + &mut self.0 + } +} + +#[cfg(test)] +mod test { + use super::*; + use sp_core::offchain::STORAGE_PREFIX; + + #[test] + fn test_drain() { + let mut ooc = OffchainOverlayedChanges::default(); + ooc.set(STORAGE_PREFIX, b"kkk", b"vvv"); + let drained = ooc.drain().count(); + assert_eq!(drained, 1); + let leftover = ooc.iter().count(); + assert_eq!(leftover, 0); + + ooc.set(STORAGE_PREFIX, b"a", b"v"); + ooc.set(STORAGE_PREFIX, b"b", b"v"); + ooc.set(STORAGE_PREFIX, b"c", b"v"); + ooc.set(STORAGE_PREFIX, b"d", b"v"); + ooc.set(STORAGE_PREFIX, b"e", b"v"); + assert_eq!(ooc.iter().count(), 5); + } + + #[test] + fn test_accumulated_set_remove_set() { + let mut ooc = OffchainOverlayedChanges::default(); + ooc.set(STORAGE_PREFIX, b"ppp", b"qqq"); + ooc.remove(STORAGE_PREFIX, b"ppp"); + // keys are equiv, so it will overwrite the value and the overlay will contain + // one item + assert_eq!(ooc.iter().count(), 1); + + ooc.set(STORAGE_PREFIX, b"ppp", b"rrr"); + let mut iter = ooc.into_iter(); + assert_eq!( + iter.next(), + Some(( + (STORAGE_PREFIX.to_vec(), b"ppp".to_vec()), + OffchainOverlayedChange::SetValue(b"rrr".to_vec()) + )) + ); + assert_eq!(iter.next(), None); + } +} diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 0888c561cae30..690266dab1e72 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,20 +17,28 @@ //! Proving state machine backend. -use std::{sync::Arc, collections::HashMap}; -use parking_lot::RwLock; -use codec::{Decode, Codec}; +use crate::{ + trie_backend::TrieBackend, + trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, + Backend, DBValue, Error, ExecutionError, +}; +use codec::{Codec, Decode, Encode}; +use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; use log::debug; -use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; +use parking_lot::RwLock; +use sp_core::storage::ChildInfo; use sp_trie::{ - MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProof, + empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, + MemoryDB, StorageProof, +}; +pub use sp_trie::{ + trie_types::{Layout, TrieError}, + Recorder, +}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, }; -pub use sp_trie::{Recorder, trie_types::{Layout, TrieError}}; -use crate::trie_backend::TrieBackend; -use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; -use crate::{Error, ExecutionError, Backend, DBValue}; -use sp_core::storage::ChildInfo; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -39,18 +47,15 @@ pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Has } impl<'a, S, H> ProvingBackendRecorder<'a, S, H> - where - S: TrieBackendStorage, - H: Hasher, - H::Out: Codec, +where + S: TrieBackendStorage, + H: Hasher, + H::Out: Codec, { /// Produce proof for a key query. pub fn storage(&mut self, key: &[u8]) -> Result>, String> { let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); let map_e = |e| format!("Trie lookup error: {}", e); @@ -59,25 +64,24 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> self.backend.root(), key, &mut *self.proof_recorder, - ).map_err(map_e) + ) + .map_err(map_e) } /// Produce proof for a child key query. pub fn child_storage( &mut self, child_info: &ChildInfo, - key: &[u8] + key: &[u8], ) -> Result>, String> { let storage_key = child_info.storage_key(); - let root = self.storage(storage_key)? + let root = self + .storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) .unwrap_or_else(|| empty_child_trie_root::>()); let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); let map_e = |e| format!("Trie lookup error: {}", e); @@ -86,17 +90,15 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> &eph, &root.as_ref(), key, - &mut *self.proof_recorder - ).map_err(map_e) + &mut *self.proof_recorder, + ) + .map_err(map_e) } /// Produce proof for the whole backend. pub fn record_all_keys(&mut self) { let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); let mut iter = move || -> Result<(), Box>> { let root = self.backend.root(); @@ -109,24 +111,86 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> } } -/// Global proof recorder, act as a layer over a hash db for recording queried -/// data. -pub type ProofRecorder = Arc::Out, Option>>>; +#[derive(Default)] +struct ProofRecorderInner { + /// All the records that we have stored so far. + records: HashMap>, + /// The encoded size of all recorded values. + encoded_size: usize, +} + +/// Global proof recorder, act as a layer over a hash db for recording queried data. +#[derive(Clone, Default)] +pub struct ProofRecorder { + inner: Arc>>, +} + +impl ProofRecorder { + /// Record the given `key` => `val` combination. + pub fn record(&self, key: Hash, val: Option) { + let mut inner = self.inner.write(); + let encoded_size = if let Entry::Vacant(entry) = inner.records.entry(key) { + let encoded_size = val.as_ref().map(Encode::encoded_size).unwrap_or(0); + + entry.insert(val); + encoded_size + } else { + 0 + }; + + inner.encoded_size += encoded_size; + } + + /// Returns the value at the given `key`. + pub fn get(&self, key: &Hash) -> Option> { + self.inner.read().records.get(key).cloned() + } + + /// Returns the estimated encoded size of the proof. + /// + /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual + /// encoded proof. + pub fn estimate_encoded_size(&self) -> usize { + let inner = self.inner.read(); + inner.encoded_size + codec::Compact(inner.records.len() as u32).encoded_size() + } + + /// Convert into a [`StorageProof`]. + pub fn to_storage_proof(&self) -> StorageProof { + let trie_nodes = self + .inner + .read() + .records + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + + StorageProof::new(trie_nodes) + } + + /// Reset the internal state. + pub fn reset(&self) { + let mut inner = self.inner.write(); + inner.records.clear(); + inner.encoded_size = 0; + } +} /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ( +pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher>( TrieBackend, H>, ); /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { backend: &'a S, - proof_recorder: ProofRecorder, + proof_recorder: ProofRecorder, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> - where H::Out: Codec +where + H::Out: Codec, { /// Create new proving backend. pub fn new(backend: &'a TrieBackend) -> Self { @@ -137,25 +201,25 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> /// Create new proving backend with the given recorder. pub fn new_with_recorder( backend: &'a TrieBackend, - proof_recorder: ProofRecorder, + proof_recorder: ProofRecorder, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); - let recorder = ProofRecorderBackend { - backend: essence.backend_storage(), - proof_recorder, - }; + let recorder = ProofRecorderBackend { backend: essence.backend_storage(), proof_recorder }; ProvingBackend(TrieBackend::new(recorder, root)) } /// Extracting the gathered unordered proof. pub fn extract_proof(&self) -> StorageProof { - let trie_nodes = self.0.essence().backend_storage().proof_recorder - .read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - StorageProof::new(trie_nodes) + self.0.essence().backend_storage().proof_recorder.to_storage_proof() + } + + /// Returns the estimated encoded size of the proof. + /// + /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual + /// encoded proof. + pub fn estimate_encoded_size(&self) -> usize { + self.0.essence().backend_storage().proof_recorder.estimate_encoded_size() } } @@ -165,11 +229,12 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage type Overlay = S::Overlay; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - if let Some(v) = self.proof_recorder.read().get(key) { - return Ok(v.clone()); + if let Some(v) = self.proof_recorder.get(key) { + return Ok(v) } - let backend_value = self.backend.get(key, prefix)?; - self.proof_recorder.write().insert(key.clone(), backend_value.clone()); + + let backend_value = self.backend.get(key, prefix)?; + self.proof_recorder.record(key.clone(), backend_value.clone()); Ok(backend_value) } } @@ -183,10 +248,10 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug } impl<'a, S, H> Backend for ProvingBackend<'a, S, H> - where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - H::Out: Ord + Codec, +where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + H::Out: Ord + Codec, { type Error = String; type Transaction = S::Overlay; @@ -204,12 +269,24 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.child_storage(child_info, key) } - fn for_keys_in_child_storage( + fn apply_to_key_values_while, Vec) -> bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.0.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.0.for_keys_in_child_storage(child_info, f) + self.0.apply_to_keys_while(child_info, prefix, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -238,7 +315,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> prefix: &[u8], f: F, ) { - self.0.for_child_keys_with_prefix( child_info, prefix, f) + self.0.for_child_keys_with_prefix(child_info, prefix, f) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -249,30 +326,32 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.keys(prefix) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { self.0.child_keys(child_info, prefix) } fn storage_root<'b>( &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord, + { self.0.storage_root(delta) } fn child_storage_root<'b>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord, + { self.0.child_storage_root(child_info, delta) } - fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } + fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} fn usage_info(&self) -> crate::stats::UsageInfo { self.0.usage_info() @@ -299,15 +378,16 @@ where #[cfg(test)] mod tests { - use crate::InMemoryBackend; - use crate::trie_backend::tests::test_trie; use super::*; - use crate::proving_backend::create_proof_check_backend; - use sp_trie::PrefixedMemoryDB; + use crate::{ + proving_backend::create_proof_check_backend, trie_backend::tests::test_trie, + InMemoryBackend, + }; use sp_runtime::traits::BlakeTwo256; + use sp_trie::PrefixedMemoryDB; fn test_proving<'a>( - trie_backend: &'a TrieBackend,BlakeTwo256>, + trie_backend: &'a TrieBackend, BlakeTwo256>, ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { ProvingBackend::new(trie_backend) } @@ -331,7 +411,7 @@ mod tests { use sp_core::H256; let result = create_proof_check_backend::( H256::from_low_u64_be(1), - StorageProof::empty() + StorageProof::empty(), ); assert!(result.is_err()); } @@ -343,8 +423,8 @@ mod tests { assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty()); + let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty()); assert_eq!(trie_root, proving_root); assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } @@ -353,7 +433,7 @@ mod tests { fn proof_recorded_and_checked() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)]); + let in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -367,7 +447,8 @@ mod tests { let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); } @@ -379,48 +460,38 @@ mod tests { let child_info_2 = &child_info_2; let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_1.clone()), - (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_2.clone()), - (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(contents); + let in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; - let in_memory_root = in_memory.full_storage_root( - std::iter::empty(), - child_storage_keys.iter().map(|k|(k, std::iter::empty())) - ).0; - (0..64).for_each(|i| assert_eq!( - in_memory.storage(&[i]).unwrap().unwrap(), - vec![i] - )); - (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), - vec![i] - )); - (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), - vec![i] - )); + let in_memory_root = in_memory + .full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + ) + .0; + (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + (28..65).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) + }); + (10..15).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) + }); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(::std::iter::empty()).0; + let trie_root = trie.storage_root(std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!( - trie.storage(&[i]).unwrap().unwrap(), - vec![i] - )); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); let proving = ProvingBackend::new(trie); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert!(proof_check.storage(&[0]).is_err()); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); // note that it is include in root because proof close @@ -431,13 +502,38 @@ mod tests { assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); - assert_eq!( - proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), - vec![64] - ); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert_eq!(proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), vec![64]); + } + + #[test] + fn storage_proof_encoded_size_estimation_works() { + let trie_backend = test_trie(); + let backend = test_proving(&trie_backend); + + let check_estimation = + |backend: &ProvingBackend<'_, PrefixedMemoryDB, BlakeTwo256>| { + let storage_proof = backend.extract_proof(); + let estimation = + backend.0.essence().backend_storage().proof_recorder.estimate_encoded_size(); + + assert_eq!(storage_proof.encoded_size(), estimation); + }; + + assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); + check_estimation(&backend); + + assert_eq!(backend.storage(b"value1").unwrap(), Some(vec![42])); + check_estimation(&backend); + + assert_eq!(backend.storage(b"value2").unwrap(), Some(vec![24])); + check_estimation(&backend); + + assert!(backend.storage(b"doesnotexist").unwrap().is_none()); + check_estimation(&backend); + + assert!(backend.storage(b"doesnotexist2").unwrap().is_none()); + check_estimation(&backend); } } diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 1b70958145c70..5b7d568b0311e 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,17 +17,18 @@ //! Read-only version of Externalities. -use std::{ - any::{TypeId, Any}, - marker::PhantomData, -}; use crate::{Backend, StorageKey, StorageValue}; +use codec::Encode; use hash_db::Hasher; use sp_core::{ storage::{ChildInfo, TrackedStorageKey}, - traits::Externalities, Blake2Hasher, + traits::Externalities, + Blake2Hasher, +}; +use std::{ + any::{Any, TypeId}, + marker::PhantomData, }; -use codec::Encode; /// Trait for inspecting state in any backend. /// @@ -79,39 +80,34 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< } fn storage(&self, key: &[u8]) -> Option { - self.backend.storage(key).expect("Backed failed for storage in ReadOnlyExternalities") + self.backend + .storage(key) + .expect("Backed failed for storage in ReadOnlyExternalities") } fn storage_hash(&self, key: &[u8]) -> Option> { self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { - self.backend.child_storage(child_info, key).expect("Backed failed for child_storage in ReadOnlyExternalities") + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + self.backend + .child_storage(child_info, key) + .expect("Backed failed for child_storage in ReadOnlyExternalities") } - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option> { + fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } fn next_storage_key(&self, key: &[u8]) -> Option { - self.backend.next_storage_key(key).expect("Backed failed for next_storage_key in ReadOnlyExternalities") + self.backend + .next_storage_key(key) + .expect("Backed failed for next_storage_key in ReadOnlyExternalities") } - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { - self.backend.next_child_storage_key(child_info, key) + fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + self.backend + .next_child_storage_key(child_info, key) .expect("Backed failed for next_child_storage_key in ReadOnlyExternalities") } @@ -128,14 +124,11 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("place_child_storage not supported in ReadOnlyExternalities") } - fn kill_child_storage( - &mut self, - _child_info: &ChildInfo, - ) { + fn kill_child_storage(&mut self, _child_info: &ChildInfo, _limit: Option) -> (bool, u32) { unimplemented!("kill_child_storage is not supported in ReadOnlyExternalities") } - fn clear_prefix(&mut self, _prefix: &[u8]) { + fn clear_prefix(&mut self, _prefix: &[u8], _limit: Option) -> (bool, u32) { unimplemented!("clear_prefix is not supported in ReadOnlyExternalities") } @@ -143,28 +136,20 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< &mut self, _child_info: &ChildInfo, _prefix: &[u8], - ) { + _limit: Option, + ) -> (bool, u32) { unimplemented!("clear_child_prefix is not supported in ReadOnlyExternalities") } - fn storage_append( - &mut self, - _key: Vec, - _value: Vec, - ) { + fn storage_append(&mut self, _key: Vec, _value: Vec) { unimplemented!("storage_append is not supported in ReadOnlyExternalities") } - fn chain_id(&self) -> u64 { 42 } - fn storage_root(&mut self) -> Vec { unimplemented!("storage_root is not supported in ReadOnlyExternalities") } - fn child_storage_root( - &mut self, - _child_info: &ChildInfo, - ) -> Vec { + fn child_storage_root(&mut self, _child_info: &ChildInfo) -> Vec { unimplemented!("child_storage_root is not supported in ReadOnlyExternalities") } @@ -203,9 +188,15 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in ReadOnlyExternalities") } + + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { + unimplemented!("get_read_and_written_keys is not supported in ReadOnlyExternalities") + } } -impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore for ReadOnlyExternalities<'a, H, B> { +impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore + for ReadOnlyExternalities<'a, H, B> +{ fn extension_by_type_id(&mut self, _type_id: TypeId) -> Option<&mut dyn Any> { unimplemented!("extension_by_type_id is not supported in ReadOnlyExternalities") } @@ -218,7 +209,10 @@ impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore for Rea unimplemented!("register_extension_with_type_id is not supported in ReadOnlyExternalities") } - fn deregister_extension_by_type_id(&mut self, _type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + _type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { unimplemented!("deregister_extension_by_type_id is not supported in ReadOnlyExternalities") } } diff --git a/primitives/state-machine/src/stats.rs b/primitives/state-machine/src/stats.rs index f84de6a5bad07..affd71f9d2e5d 100644 --- a/primitives/state-machine/src/stats.rs +++ b/primitives/state-machine/src/stats.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,9 +17,9 @@ //! Usage statistics for state db -#[cfg(feature = "std")] -use std::time::{Instant, Duration}; use sp_std::cell::RefCell; +#[cfg(feature = "std")] +use std::time::{Duration, Instant}; /// Measured count of operations and total bytes. #[derive(Clone, Debug, Default)] diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 4dcd308285625..ec1772ba8666f 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,61 +17,60 @@ //! Test implementation for Externalities. -use std::{any::{Any, TypeId}, panic::{AssertUnwindSafe, UnwindSafe}}; +use std::{ + any::{Any, TypeId}, + panic::{AssertUnwindSafe, UnwindSafe}, +}; use crate::{ - backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, - StorageKey, StorageValue, + backend::Backend, changes_trie::{ - Configuration as ChangesTrieConfiguration, - InMemoryStorage as ChangesTrieInMemoryStorage, - BlockNumber as ChangesTrieBlockNumber, - State as ChangesTrieState, + BlockNumber as ChangesTrieBlockNumber, Configuration as ChangesTrieConfiguration, + InMemoryStorage as ChangesTrieInMemoryStorage, State as ChangesTrieState, }, + ext::Ext, + InMemoryBackend, OverlayedChanges, StorageKey, StorageTransactionCache, StorageValue, }; -use codec::{Decode, Encode}; +use codec::Decode; use hash_db::Hasher; use sp_core::{ - offchain::{ - testing::TestPersistentOffchainDB, - storage::OffchainOverlayedChanges - }, + offchain::testing::TestPersistentOffchainDB, storage::{ - well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES, is_child_storage_key}, + well_known_keys::{is_child_storage_key, CHANGES_TRIE_CONFIG, CODE}, Storage, }, - traits::TaskExecutorExt, testing::TaskExecutor, + traits::TaskExecutorExt, }; -use sp_externalities::{Extensions, Extension}; +use sp_externalities::{Extension, ExtensionStore, Extensions}; /// Simple HashMap-based Externalities impl. pub struct TestExternalities where H::Out: codec::Codec + Ord, { + /// The overlay changed storage. overlay: OverlayedChanges, - offchain_overlay: OffchainOverlayedChanges, offchain_db: TestPersistentOffchainDB, - storage_transaction_cache: StorageTransactionCache< - as Backend>::Transaction, H, N - >, - backend: InMemoryBackend, + storage_transaction_cache: + StorageTransactionCache< as Backend>::Transaction, H, N>, + /// Storage backend. + pub backend: InMemoryBackend, changes_trie_config: Option, changes_trie_storage: ChangesTrieInMemoryStorage, - extensions: Extensions, + /// Extensions. + pub extensions: Extensions, } impl TestExternalities - where - H::Out: Ord + 'static + codec::Codec +where + H::Out: Ord + 'static + codec::Codec, { /// Get externalities implementation. pub fn ext(&mut self) -> Ext> { Ext::new( &mut self.overlay, - &mut self.offchain_overlay, &mut self.storage_transaction_cache, &self.backend, match self.changes_trie_config.clone() { @@ -99,18 +98,17 @@ impl TestExternalities /// Create a new instance of `TestExternalities` with code and storage. pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { let mut overlay = OverlayedChanges::default(); - let changes_trie_config = storage.top.get(CHANGES_TRIE_CONFIG) + let changes_trie_config = storage + .top + .get(CHANGES_TRIE_CONFIG) .and_then(|v| Decode::decode(&mut &v[..]).ok()); overlay.set_collect_extrinsics(changes_trie_config.is_some()); assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); - storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); storage.top.insert(CODE.to_vec(), code.to_vec()); - let offchain_overlay = OffchainOverlayedChanges::enabled(); - let mut extensions = Extensions::default(); extensions.register(TaskExecutorExt::new(TaskExecutor::new())); @@ -118,7 +116,6 @@ impl TestExternalities TestExternalities { overlay, - offchain_overlay, offchain_db, changes_trie_config, extensions, @@ -128,9 +125,14 @@ impl TestExternalities } } + /// Returns the overlayed changes. + pub fn overlayed_changes(&self) -> &OverlayedChanges { + &self.overlay + } + /// Move offchain changes from overlay to the persistent store. pub fn persist_offchain_overlay(&mut self) { - self.offchain_db.apply_offchain_changes(&mut self.offchain_overlay); + self.offchain_db.apply_offchain_changes(self.overlay.offchain_drain_committed()); } /// A shared reference type around the offchain worker storage. @@ -153,25 +155,43 @@ impl TestExternalities &mut self.changes_trie_storage } - /// Return a new backend with all pending value. - pub fn commit_all(&self) -> InMemoryBackend { - let top: Vec<_> = self.overlay.changes() - .map(|(k, v)| (k.clone(), v.value().cloned())) - .collect(); + /// Return a new backend with all pending changes. + /// + /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open + /// transactions. + fn as_backend(&self) -> InMemoryBackend { + let top: Vec<_> = + self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); let mut transaction = vec![(None, top)]; for (child_changes, child_info) in self.overlay.children() { transaction.push(( Some(child_info.clone()), - child_changes - .map(|(k, v)| (k.clone(), v.value().cloned())) - .collect(), + child_changes.map(|(k, v)| (k.clone(), v.value().cloned())).collect(), )) } self.backend.update(transaction) } + /// Commit all pending changes to the underlying backend. + /// + /// # Panic + /// + /// This will panic if there are still open transactions. + pub fn commit_all(&mut self) -> Result<(), String> { + let changes = self.overlay.drain_storage_changes::<_, _, N>( + &self.backend, + None, + Default::default(), + &mut Default::default(), + )?; + + self.backend + .apply_transaction(changes.transaction_storage_root, changes.transaction); + Ok(()) + } + /// Execute the given closure while `self` is set as externalities. /// /// Returns the result of the given closure. @@ -184,18 +204,21 @@ impl TestExternalities /// /// Returns the result of the given closure, if no panics occured. /// Otherwise, returns `Err`. - pub fn execute_with_safe(&mut self, f: impl FnOnce() -> R + UnwindSafe) -> Result { + pub fn execute_with_safe( + &mut self, + f: impl FnOnce() -> R + UnwindSafe, + ) -> Result { let mut ext = AssertUnwindSafe(self.ext()); - std::panic::catch_unwind(move || + std::panic::catch_unwind(move || { sp_externalities::set_and_run_with_externalities(&mut *ext, f) - ).map_err(|e| { - format!("Closure panicked: {:?}", e) }) + .map_err(|e| format!("Closure panicked: {:?}", e)) } } impl std::fmt::Debug for TestExternalities - where H::Out: Ord + codec::Codec, +where + H::Out: Ord + codec::Codec, { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "overlay: {:?}\nbackend: {:?}", self.overlay, self.backend.pairs()) @@ -203,33 +226,36 @@ impl std::fmt::Debug for TestExternalities } impl PartialEq for TestExternalities - where - H::Out: Ord + 'static + codec::Codec +where + H::Out: Ord + 'static + codec::Codec, { /// This doesn't test if they are in the same state, only if they contains the /// same data at this state fn eq(&self, other: &TestExternalities) -> bool { - self.commit_all().eq(&other.commit_all()) + self.as_backend().eq(&other.as_backend()) } } impl Default for TestExternalities - where - H::Out: Ord + 'static + codec::Codec, +where + H::Out: Ord + 'static + codec::Codec, { - fn default() -> Self { Self::new(Default::default()) } + fn default() -> Self { + Self::new(Default::default()) + } } impl From for TestExternalities - where - H::Out: Ord + 'static + codec::Codec, +where + H::Out: Ord + 'static + codec::Codec, { fn from(storage: Storage) -> Self { Self::new(storage) } } -impl sp_externalities::ExtensionStore for TestExternalities where +impl sp_externalities::ExtensionStore for TestExternalities +where H: Hasher, H::Out: Ord + codec::Codec, N: ChangesTrieBlockNumber, @@ -246,7 +272,10 @@ impl sp_externalities::ExtensionStore for TestExternalities where self.extensions.register_with_type_id(type_id, extension) } - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { if self.extensions.deregister(type_id) { Ok(()) } else { @@ -255,12 +284,31 @@ impl sp_externalities::ExtensionStore for TestExternalities where } } +impl sp_externalities::ExternalitiesExt for TestExternalities +where + H: Hasher, + H::Out: Ord + codec::Codec, + N: ChangesTrieBlockNumber, +{ + fn extension(&mut self) -> Option<&mut T> { + self.extension_by_type_id(TypeId::of::()).and_then(::downcast_mut) + } + + fn register_extension(&mut self, ext: T) -> Result<(), sp_externalities::Error> { + self.register_extension_with_type_id(TypeId::of::(), Box::new(ext)) + } + + fn deregister_extension(&mut self) -> Result<(), sp_externalities::Error> { + self.deregister_extension_by_type_id(TypeId::of::()) + } +} + #[cfg(test)] mod tests { use super::*; - use sp_core::{H256, traits::Externalities}; - use sp_runtime::traits::BlakeTwo256; use hex_literal::hex; + use sp_core::{storage::ChildInfo, traits::Externalities, H256}; + use sp_runtime::traits::BlakeTwo256; #[test] fn commit_should_work() { @@ -269,7 +317,8 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - let root = H256::from(hex!("2a340d3dfd52f5992c6b117e9e45f479e6da5afffafeb26ab619cf137a95aeb8")); + let root = + H256::from(hex!("ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489")); assert_eq!(H256::from_slice(ext.storage_root().as_slice()), root); } @@ -287,6 +336,47 @@ mod tests { #[test] fn check_send() { fn assert_send() {} - assert_send::>(); + assert_send::>(); + } + + #[test] + fn commit_all_and_kill_child_storage() { + let mut ext = TestExternalities::::default(); + let child_info = ChildInfo::new_default(&b"test_child"[..]); + + { + let mut ext = ext.ext(); + ext.place_child_storage(&child_info, b"doe".to_vec(), Some(b"reindeer".to_vec())); + ext.place_child_storage(&child_info, b"dog".to_vec(), Some(b"puppy".to_vec())); + ext.place_child_storage(&child_info, b"dog2".to_vec(), Some(b"puppy2".to_vec())); + } + + ext.commit_all().unwrap(); + + { + let mut ext = ext.ext(); + + assert!(!ext.kill_child_storage(&child_info, Some(2)).0, "Should not delete all keys"); + + assert!(ext.child_storage(&child_info, &b"doe"[..]).is_none()); + assert!(ext.child_storage(&child_info, &b"dog"[..]).is_none()); + assert!(ext.child_storage(&child_info, &b"dog2"[..]).is_some()); + } + } + + #[test] + fn as_backend_generates_same_backend_as_commit_all() { + let mut ext = TestExternalities::::default(); + { + let mut ext = ext.ext(); + ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); + ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); + ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); + } + + let backend = ext.as_backend(); + + ext.commit_all().unwrap(); + assert!(ext.backend.eq(&backend), "Both backend should be equal."); } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 4eaa0870baed0..7cb725a80503d 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,29 +17,33 @@ //! Trie-based state machine backend. -use crate::{warn, debug}; -use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildType}; -use codec::{Codec, Decode}; use crate::{ - StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, + debug, + trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, + warn, Backend, StorageKey, StorageValue, }; +use codec::{Codec, Decode}; +use hash_db::Hasher; +use sp_core::storage::{ChildInfo, ChildType}; use sp_std::{boxed::Box, vec::Vec}; +use sp_trie::{ + child_delta_trie_root, delta_trie_root, empty_child_trie_root, + trie_types::{Layout, TrieDB, TrieError}, + Trie, +}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { - pub (crate) essence: TrieBackendEssence, + pub(crate) essence: TrieBackendEssence, } -impl, H: Hasher> TrieBackend where H::Out: Codec { +impl, H: Hasher> TrieBackend +where + H::Out: Codec, +{ /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { - TrieBackend { - essence: TrieBackendEssence::new(storage, root), - } + TrieBackend { essence: TrieBackendEssence::new(storage, root) } } /// Get backend essence reference. @@ -52,11 +56,6 @@ impl, H: Hasher> TrieBackend where H::Out: Codec self.essence.backend_storage() } - /// Get backend storage reference. - pub fn backend_storage_mut(&mut self) -> &mut S { - self.essence.backend_storage_mut() - } - /// Get trie root. pub fn root(&self) -> &H::Out { self.essence.root() @@ -74,7 +73,8 @@ impl, H: Hasher> sp_std::fmt::Debug for TrieBackend, H: Hasher> Backend for TrieBackend where +impl, H: Hasher> Backend for TrieBackend +where H::Out: Ord + Codec, { type Error = crate::DefaultError; @@ -113,12 +113,25 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.for_key_values_with_prefix(prefix, f) } - fn for_keys_in_child_storage( + fn apply_to_key_values_while, Vec) -> bool>( &self, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.essence + .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, f: F, ) { - self.essence.for_keys_in_child_storage(child_info, f) + self.essence.apply_to_keys_while(child_info, prefix, f) } fn for_child_keys_with_prefix( @@ -147,7 +160,7 @@ impl, H: Hasher> Backend for TrieBackend where Err(e) => { debug!(target: "trie", "Error extracting trie values: {}", e); Vec::new() - } + }, } } @@ -165,21 +178,23 @@ impl, H: Hasher> Backend for TrieBackend where Ok(v) }; - collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() + collect_all() + .map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)) + .unwrap_or_default() } fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord, + { let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); + let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); match delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta) { Ok(ret) => root = ret, @@ -193,17 +208,21 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord, + { let default_root = match child_info.child_type() { - ChildType::ParentKeyId => empty_child_trie_root::>() + ChildType::ParentKeyId => empty_child_trie_root::>(), }; let mut write_overlay = S::Overlay::default(); let prefixed_storage_key = child_info.prefixed_storage_key(); let mut root = match self.storage(prefixed_storage_key.as_slice()) { - Ok(value) => - value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or_else(|| default_root.clone()), + Ok(value) => value + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .unwrap_or_else(|| default_root.clone()), Err(e) => { warn!(target: "trie", "Failed to read child storage root: {}", e); default_root.clone() @@ -211,10 +230,7 @@ impl, H: Hasher> Backend for TrieBackend where }; { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); + let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); match child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), @@ -232,11 +248,11 @@ impl, H: Hasher> Backend for TrieBackend where (root, is_default, write_overlay) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn as_trie_backend(&self) -> Option<&TrieBackend> { Some(self) } - fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } + fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} fn usage_info(&self) -> crate::UsageInfo { crate::UsageInfo::empty() @@ -249,12 +265,12 @@ impl, H: Hasher> Backend for TrieBackend where #[cfg(test)] pub mod tests { - use std::{collections::HashSet, iter}; - use sp_core::H256; + use super::*; use codec::Encode; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_core::H256; use sp_runtime::traits::BlakeTwo256; - use super::*; + use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; + use std::{collections::HashSet, iter}; const CHILD_KEY_1: &[u8] = b"sub1"; @@ -300,9 +316,27 @@ pub mod tests { fn read_from_child_storage_returns_some() { let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), + test_trie + .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") + .unwrap(), Some(vec![142u8]), ); + // Change cache entry to check that caching is active. + test_trie + .essence + .cache + .write() + .child_root + .entry(b"sub1".to_vec()) + .and_modify(|value| { + *value = None; + }); + assert_eq!( + test_trie + .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") + .unwrap(), + None, + ); } #[test] @@ -320,7 +354,9 @@ pub mod tests { assert!(TrieBackend::, BlakeTwo256>::new( PrefixedMemoryDB::default(), Default::default(), - ).pairs().is_empty()); + ) + .pairs() + .is_empty()); } #[test] @@ -335,9 +371,8 @@ pub mod tests { #[test] fn storage_root_transaction_is_non_empty() { - let (new_root, mut tx) = test_trie().storage_root( - iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), - ); + let (new_root, mut tx) = + test_trie().storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..])))); assert!(!tx.drain().is_empty()); assert!(new_root != test_trie().storage_root(iter::empty()).0); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 37bbbb7cf9822..557a098fbaf79 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,24 +18,28 @@ //! Trie-based state machine backend essence used to read values //! from storage. -#[cfg(feature = "std")] -use std::sync::Arc; -use sp_std::{ops::Deref, boxed::Box, vec::Vec}; -use crate::{warn, debug}; +use crate::{backend::Consolidate, debug, warn, StorageKey, StorageValue}; +use codec::Encode; use hash_db::{self, Hasher, Prefix}; -use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - empty_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use crate::{backend::Consolidate, StorageKey, StorageValue}; +#[cfg(feature = "std")] +use parking_lot::RwLock; use sp_core::storage::ChildInfo; -use codec::Encode; +use sp_std::{boxed::Box, ops::Deref, vec::Vec}; +use sp_trie::{ + empty_child_trie_root, read_child_trie_value, read_trie_value, + trie_types::{Layout, TrieDB, TrieError}, + DBValue, KeySpacedDB, MemoryDB, PrefixedMemoryDB, Trie, TrieDBIterator, +}; +#[cfg(feature = "std")] +use std::collections::HashMap; +#[cfg(feature = "std")] +use std::sync::Arc; #[cfg(not(feature = "std"))] macro_rules! format { - ($($arg:tt)+) => ( + ($($arg:tt)+) => { crate::DefaultError - ); + }; } type Result = sp_std::result::Result; @@ -46,20 +50,40 @@ pub trait Storage: Send + Sync { fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } +/// Local cache for child root. +#[cfg(feature = "std")] +pub(crate) struct Cache { + pub child_root: HashMap, Option>>, +} + +#[cfg(feature = "std")] +impl Cache { + fn new() -> Self { + Cache { child_root: HashMap::new() } + } +} + /// Patricia trie-based pairs storage essence. pub struct TrieBackendEssence, H: Hasher> { storage: S, root: H::Out, empty: H::Out, + #[cfg(feature = "std")] + pub(crate) cache: Arc>, } -impl, H: Hasher> TrieBackendEssence where H::Out: Encode { +impl, H: Hasher> TrieBackendEssence +where + H::Out: Encode, +{ /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackendEssence { storage, root, empty: H::hash(&[0u8]), + #[cfg(feature = "std")] + cache: Arc::new(RwLock::new(Cache::new())), } } @@ -68,11 +92,6 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &self.storage } - /// Get backend storage reference. - pub fn backend_storage_mut(&mut self) -> &mut S { - &mut self.storage - } - /// Get trie root. pub fn root(&self) -> &H::Out { &self.root @@ -80,9 +99,19 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Set trie root. This is useful for testing. pub fn set_root(&mut self, root: H::Out) { + // If root did change so can have cached content. + self.reset_cache(); self.root = root; } + #[cfg(feature = "std")] + fn reset_cache(&mut self) { + self.cache = Arc::new(RwLock::new(Cache::new())); + } + + #[cfg(not(feature = "std"))] + fn reset_cache(&mut self) {} + /// Consumes self and returns underlying storage. pub fn into_storage(self) -> S { self.storage @@ -96,7 +125,24 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Access the root of the child storage in its parent trie fn child_root(&self, child_info: &ChildInfo) -> Result> { - self.storage(child_info.prefixed_storage_key().as_slice()) + #[cfg(feature = "std")] + { + if let Some(result) = self.cache.read().child_root.get(child_info.storage_key()) { + return Ok(result.clone()) + } + } + + let result = self.storage(child_info.prefixed_storage_key().as_slice())?; + + #[cfg(feature = "std")] + { + self.cache + .write() + .child_root + .insert(child_info.storage_key().to_vec(), result.clone()); + } + + Ok(result) } /// Return the next key in the child trie i.e. the minimum key that is strictly superior to @@ -114,7 +160,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let mut hash = H::Out::default(); if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); + return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())) } // note: child_root and hash must be same size, panics otherwise. hash.as_mut().copy_from_slice(&child_root[..]); @@ -138,10 +184,9 @@ impl, H: Hasher> TrieBackendEssence where H::Out: dyn_eph = self; } - let trie = TrieDB::::new(dyn_eph, root) - .map_err(|e| format!("TrieDB creation error: {}", e))?; - let mut iter = trie.iter() - .map_err(|e| format!("TrieDB iteration error: {}", e))?; + let trie = + TrieDB::::new(dyn_eph, root).map_err(|e| format!("TrieDB creation error: {}", e))?; + let mut iter = trie.iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; // The key just after the one given in input, basically `key++0`. // Note: We are sure this is the next key if: @@ -157,8 +202,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let next_element = iter.next(); let next_key = if let Some(next_element) = next_element { - let (next_key, _) = next_element - .map_err(|e| format!("TrieDB iterator next error: {}", e))?; + let (next_key, _) = + next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; Some(next_key) } else { None @@ -180,7 +225,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: &ChildInfo, key: &[u8], ) -> Result> { - let root = self.child_root(child_info)? + let root = self + .child_root(child_info)? .unwrap_or_else(|| empty_child_trie_root::>().encode()); let map_e = |e| format!("Trie lookup error: {}", e); @@ -189,73 +235,145 @@ impl, H: Hasher> TrieBackendEssence where H::Out: .map_err(map_e) } - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage( + /// Retrieve all entries keys of storage and call `f` for each of those keys. + /// Aborts as soon as `f` returns false. + /// + /// Returns `true` when all keys were iterated. + pub fn apply_to_key_values_while( &self, - child_info: &ChildInfo, - f: F, - ) { - let root = match self.child_root(child_info) { - Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: impl FnMut(Vec, Vec) -> bool, + allow_missing_nodes: bool, + ) -> Result { + let mut child_root; + let root = if let Some(child_info) = child_info.as_ref() { + if let Some(fetched_child_root) = self.child_root(child_info)? { + child_root = H::Out::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + child_root.as_mut().copy_from_slice(fetched_child_root.as_slice()); + + &child_root + } else { + return Ok(true) } + } else { + &self.root }; - if let Err(e) = for_keys_in_child_trie::, _, _>( - child_info.keyspace(), - self, - &root, - f, - ) { - debug!(target: "trie", "Error while iterating child storage: {}", e); - } + self.trie_iter_inner(&root, prefix, f, child_info, start_at, allow_missing_nodes) + } + + /// Retrieve all entries keys of a storage and call `f` for each of those keys. + /// Aborts as soon as `f` returns false. + pub fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + mut f: F, + ) { + let mut child_root = H::Out::default(); + let root = if let Some(child_info) = child_info.as_ref() { + let root_vec = match self.child_root(child_info) { + Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), + Err(e) => { + debug!(target: "trie", "Error while iterating child storage: {}", e); + return + }, + }; + child_root.as_mut().copy_from_slice(&root_vec); + &child_root + } else { + &self.root + }; + + let _ = self.trie_iter_inner( + root, + prefix, + |k, _v| { + f(&k); + true + }, + child_info, + None, + false, + ); } /// Execute given closure for all keys starting with prefix. - pub fn for_child_keys_with_prefix( + pub fn for_child_keys_with_prefix( &self, child_info: &ChildInfo, prefix: &[u8], - mut f: F, + mut f: impl FnMut(&[u8]), ) { let root_vec = match self.child_root(child_info) { Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } + return + }, }; let mut root = H::Out::default(); root.as_mut().copy_from_slice(&root_vec); - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) + let _ = self.trie_iter_inner( + &root, + Some(prefix), + |k, _v| { + f(&k); + true + }, + Some(child_info), + None, + false, + ); } /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) + let _ = self.trie_iter_inner( + &self.root, + Some(prefix), + |k, _v| { + f(&k); + true + }, + None, + None, + false, + ); } - fn keys_values_with_prefix_inner( + fn trie_iter_inner, Vec) -> bool>( &self, root: &H::Out, - prefix: &[u8], + prefix: Option<&[u8]>, mut f: F, child_info: Option<&ChildInfo>, - ) { - let mut iter = move |db| -> sp_std::result::Result<(), Box>> { + start_at: Option<&[u8]>, + allow_missing_nodes: bool, + ) -> Result { + let mut iter = move |db| -> sp_std::result::Result>> { let trie = TrieDB::::new(db, root)?; - for x in TrieDBIterator::new_prefixed(&trie, prefix)? { + let prefix = prefix.unwrap_or(&[]); + let iterator = if let Some(start_at) = start_at { + TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)? + } else { + TrieDBIterator::new_prefixed(&trie, prefix)? + }; + for x in iterator { let (key, value) = x?; debug_assert!(key.starts_with(prefix)); - f(&key, &value); + if !f(key, value) { + return Ok(false) + } } - Ok(()) + Ok(true) }; let result = if let Some(child_info) = child_info { @@ -264,14 +382,27 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } else { iter(self) }; - if let Err(e) = result { - debug!(target: "trie", "Error while iterating by prefix: {}", e); + match result { + Ok(completed) => Ok(completed), + Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes => + Ok(false), + Err(e) => Err(format!("TrieDB iteration error: {}", e)), } } /// Execute given closure for all key and values starting with prefix. - pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, f, None) + pub fn for_key_values_with_prefix(&self, prefix: &[u8], mut f: F) { + let _ = self.trie_iter_inner( + &self.root, + Some(prefix), + |k, v| { + f(&k, &v); + true + }, + None, + None, + false, + ); } } @@ -283,16 +414,17 @@ pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { + self + } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + self + } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { - Ephemeral { - storage, - overlay, - } + Ephemeral { storage, overlay } } } @@ -380,13 +512,15 @@ impl TrieBackendStorage for MemoryDB { impl, H: Hasher> hash_db::AsHashDB for TrieBackendEssence { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { + self + } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + self + } } -impl, H: Hasher> hash_db::HashDB - for TrieBackendEssence -{ +impl, H: Hasher> hash_db::HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { return Some([0u8].to_vec()) @@ -429,12 +563,11 @@ impl, H: Hasher> hash_db::HashDBRef } } - #[cfg(test)] mod test { - use sp_core::{Blake2Hasher, H256}; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; + use sp_core::{Blake2Hasher, H256}; + use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; #[test] fn next_storage_key_and_next_child_storage_key_work() { @@ -478,20 +611,10 @@ mod test { let mdb = essence_1.into_storage(); let essence_2 = TrieBackendEssence::new(mdb, root_2); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"4"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"5"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"6"), Ok(None) - ); + assert_eq!(essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))); + assert_eq!(essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))); + assert_eq!(essence_2.next_child_storage_key(child_info, b"4"), Ok(Some(b"6".to_vec()))); + assert_eq!(essence_2.next_child_storage_key(child_info, b"5"), Ok(Some(b"6".to_vec()))); + assert_eq!(essence_2.next_child_storage_key(child_info, b"6"), Ok(None)); } } diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index 5b988cabc150a..bf815c1c80c56 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-std" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" diff --git a/primitives/std/src/lib.rs b/primitives/std/src/lib.rs index b323c43720da1..3af4d07ac6297 100644 --- a/primitives/std/src/lib.rs +++ b/primitives/std/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,11 +19,14 @@ //! or client/alloc to be used with any code that depends on the runtime. #![cfg_attr(not(feature = "std"), no_std)] - -#![cfg_attr(feature = "std", - doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), - doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Substrate's runtime standard library as compiled without Rust's standard library." +)] #[macro_export] macro_rules! map { @@ -55,7 +58,7 @@ macro_rules! if_std { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! if_std { - ( $( $code:tt )* ) => {} + ( $( $code:tt )* ) => {}; } #[cfg(feature = "std")] @@ -64,7 +67,6 @@ include!("../with_std.rs"); #[cfg(not(feature = "std"))] include!("../without_std.rs"); - /// A target for `core::write!` macro - constructs a string in memory. #[derive(Default)] pub struct Writer(vec::Vec); @@ -92,10 +94,12 @@ impl Writer { /// /// This should include only things which are in the normal std prelude. pub mod prelude { - pub use crate::vec::Vec; - pub use crate::boxed::Box; - pub use crate::cmp::{Eq, PartialEq, Reverse}; - pub use crate::clone::Clone; + pub use crate::{ + boxed::Box, + clone::Clone, + cmp::{Eq, PartialEq, Reverse}, + vec::Vec, + }; // Re-export `vec!` macro here, but not in `std` mode, since // std's prelude already brings `vec!` into the scope. diff --git a/primitives/std/with_std.rs b/primitives/std/with_std.rs index 92e804b27e1d0..8a283e8fe333c 100644 --- a/primitives/std/with_std.rs +++ b/primitives/std/with_std.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,6 +37,7 @@ pub use std::sync; pub use std::result; pub use std::slice; pub use std::str; +pub use core::time; pub use std::vec; pub mod collections { diff --git a/primitives/std/without_std.rs b/primitives/std/without_std.rs index 3c130d547a1e4..38c3a8421dacb 100755 --- a/primitives/std/without_std.rs +++ b/primitives/std/without_std.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,6 +39,7 @@ pub use core::result; pub use core::slice; // Allow interpreting vectors of bytes as strings, but not constructing them. pub use core::str; +pub use core::time; // We are trying to avoid certain things here, such as `core::string` // (if you need `String` you are probably doing something wrong, since // runtime doesn't require anything human readable). diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 4f14ba38f2147..1a05fb9969197 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-storage" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" description = "Storage related primitives" @@ -14,12 +14,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -serde = { version = "1.0.101", optional = true, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +serde = { version = "1.0.126", optional = true, features = ["derive"] } impl-serde = { version = "0.3.1", optional = true } ref-cast = "1.0.0" -sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-debug-derive = { version = "3.0.0", path = "../debug-derive" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index b253733e7b29e..45474a44693ab 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,38 +20,78 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use sp_debug_derive::RuntimeDebug; -use sp_std::{vec::Vec, ops::{Deref, DerefMut}}; +use codec::{Decode, Encode}; use ref_cast::RefCast; -use codec::{Encode, Decode}; +use sp_std::{ + ops::{Deref, DerefMut}, + vec::Vec, +}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone, Encode, Decode) +)] pub struct StorageKey( - #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] - pub Vec, + #[cfg_attr(feature = "std", serde(with = "impl_serde::serialize"))] pub Vec, ); +impl AsRef<[u8]> for StorageKey { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + /// Storage key with read/write tracking information. #[derive(PartialEq, Eq, RuntimeDebug, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Hash, PartialOrd, Ord))] pub struct TrackedStorageKey { pub key: Vec, - pub has_been_read: bool, - pub has_been_written: bool, + pub reads: u32, + pub writes: u32, + pub whitelisted: bool, +} + +impl TrackedStorageKey { + /// Create a default `TrackedStorageKey` + pub fn new(key: Vec) -> Self { + Self { key, reads: 0, writes: 0, whitelisted: false } + } + /// Check if this key has been "read", i.e. it exists in the memory overlay. + /// + /// Can be true if the key has been read, has been written to, or has been + /// whitelisted. + pub fn has_been_read(&self) -> bool { + self.whitelisted || self.reads > 0u32 || self.has_been_written() + } + /// Check if this key has been "written", i.e. a new value will be committed to the database. + /// + /// Can be true if the key has been written to, or has been whitelisted. + pub fn has_been_written(&self) -> bool { + self.whitelisted || self.writes > 0u32 + } + /// Add a storage read to this key. + pub fn add_read(&mut self) { + self.reads += 1; + } + /// Add a storage write to this key. + pub fn add_write(&mut self) { + self.writes += 1; + } + /// Whitelist this key. + pub fn whitelist(&mut self) { + self.whitelisted = true; + } } -// Easily convert a key to a `TrackedStorageKey` that has been read and written to. +// Easily convert a key to a `TrackedStorageKey` that has been whitelisted. impl From> for TrackedStorageKey { fn from(key: Vec) -> Self { - Self { - key: key, - has_been_read: true, - has_been_written: true, - } + Self { key, reads: 0, writes: 0, whitelisted: true } } } @@ -61,8 +101,7 @@ impl From> for TrackedStorageKey { #[repr(transparent)] #[derive(RefCast)] pub struct PrefixedStorageKey( - #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] - Vec, + #[cfg_attr(feature = "std", serde(with = "impl_serde::serialize"))] Vec, ); impl Deref for PrefixedStorageKey { @@ -102,10 +141,12 @@ impl PrefixedStorageKey { /// Storage data associated to a [`StorageKey`]. #[derive(PartialEq, Eq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone, Encode, Decode, Default) +)] pub struct StorageData( - #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] - pub Vec, + #[cfg_attr(feature = "std", serde(with = "impl_serde::serialize"))] pub Vec, ); /// Map of data to use in a storage, it is a collection of @@ -180,6 +221,15 @@ pub mod well_known_keys { // Other code might depend on this, so be careful changing this. key.starts_with(CHILD_STORAGE_KEY_PREFIX) } + + /// Returns if the given `key` starts with [`CHILD_STORAGE_KEY_PREFIX`] or collides with it. + pub fn starts_with_child_storage_key(key: &[u8]) -> bool { + if key.len() > CHILD_STORAGE_KEY_PREFIX.len() { + key.starts_with(CHILD_STORAGE_KEY_PREFIX) + } else { + CHILD_STORAGE_KEY_PREFIX.starts_with(key) + } + } } /// Information related to a child state. @@ -201,9 +251,7 @@ impl ChildInfo { /// Same as `new_default` but with `Vec` as input. pub fn new_default_from_vec(storage_key: Vec) -> Self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { - data: storage_key, - }) + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data: storage_key }) } /// Try to update with another instance, return false if both instance @@ -228,9 +276,7 @@ impl ChildInfo { /// child trie. pub fn storage_key(&self) -> &[u8] { match self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { - data, - }) => &data[..], + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => &data[..], } } @@ -238,9 +284,8 @@ impl ChildInfo { /// this trie. pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { - data, - }) => ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => + ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), } } @@ -248,9 +293,7 @@ impl ChildInfo { /// this trie. pub fn into_prefixed_storage_key(self) -> PrefixedStorageKey { match self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { - mut data, - }) => { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { mut data }) => { ChildType::ParentKeyId.do_prefix_key(&mut data); PrefixedStorageKey(data) }, diff --git a/primitives/tasks/Cargo.toml b/primitives/tasks/Cargo.toml index 0c0f410824c81..ee503ae9b855f 100644 --- a/primitives/tasks/Cargo.toml +++ b/primitives/tasks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-tasks" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -15,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { version = "0.4.8", optional = true } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -sp-io = { version = "2.0.0", default-features = false, path = "../io" } -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +sp-externalities = { version = "0.10.0-dev", optional = true, path = "../externalities" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../io" } +sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } [dev-dependencies] -codec = { package = "parity-scale-codec", default-features = false, version = "1.3.1" } +codec = { package = "parity-scale-codec", default-features = false, version = "2.0.0" } [features] default = ["std"] diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 8994d069e4c76..975a81af4f53d 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,12 +18,12 @@ //! Async externalities. -use std::any::{TypeId, Any}; use sp_core::{ storage::{ChildInfo, TrackedStorageKey}, - traits::{Externalities, SpawnNamed, TaskExecutorExt, RuntimeSpawnExt, RuntimeSpawn}, + traits::{Externalities, RuntimeSpawn, RuntimeSpawnExt, SpawnNamed, TaskExecutorExt}, }; use sp_externalities::{Extensions, ExternalitiesExt as _}; +use std::any::{Any, TypeId}; /// Simple state-less externalities for use in async context. /// @@ -34,7 +34,9 @@ pub struct AsyncExternalities { } /// New Async externalities. -pub fn new_async_externalities(scheduler: Box) -> Result { +pub fn new_async_externalities( + scheduler: Box, +) -> Result { let mut res = AsyncExternalities { extensions: Default::default() }; let mut ext = &mut res as &mut dyn Externalities; ext.register_extension::(TaskExecutorExt(scheduler.clone())) @@ -74,19 +76,11 @@ impl Externalities for AsyncExternalities { panic!("`storage_hash`: should not be used in async externalities!") } - fn child_storage( - &self, - _child_info: &ChildInfo, - _key: &[u8], - ) -> Option { + fn child_storage(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option { panic!("`child_storage`: should not be used in async externalities!") } - fn child_storage_hash( - &self, - _child_info: &ChildInfo, - _key: &[u8], - ) -> Option> { + fn child_storage_hash(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option> { panic!("`child_storage_hash`: should not be used in async externalities!") } @@ -94,11 +88,7 @@ impl Externalities for AsyncExternalities { panic!("`next_storage_key`: should not be used in async externalities!") } - fn next_child_storage_key( - &self, - _child_info: &ChildInfo, - _key: &[u8], - ) -> Option { + fn next_child_storage_key(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option { panic!("`next_child_storage_key`: should not be used in async externalities!") } @@ -115,14 +105,11 @@ impl Externalities for AsyncExternalities { panic!("`place_child_storage`: should not be used in async externalities!") } - fn kill_child_storage( - &mut self, - _child_info: &ChildInfo, - ) { + fn kill_child_storage(&mut self, _child_info: &ChildInfo, _limit: Option) -> (bool, u32) { panic!("`kill_child_storage`: should not be used in async externalities!") } - fn clear_prefix(&mut self, _prefix: &[u8]) { + fn clear_prefix(&mut self, _prefix: &[u8], _limit: Option) -> (bool, u32) { panic!("`clear_prefix`: should not be used in async externalities!") } @@ -130,28 +117,20 @@ impl Externalities for AsyncExternalities { &mut self, _child_info: &ChildInfo, _prefix: &[u8], - ) { + _limit: Option, + ) -> (bool, u32) { panic!("`clear_child_prefix`: should not be used in async externalities!") } - fn storage_append( - &mut self, - _key: Vec, - _value: Vec, - ) { + fn storage_append(&mut self, _key: Vec, _value: Vec) { panic!("`storage_append`: should not be used in async externalities!") } - fn chain_id(&self) -> u64 { 42 } - fn storage_root(&mut self) -> Vec { panic!("`storage_root`: should not be used in async externalities!") } - fn child_storage_root( - &mut self, - _child_info: &ChildInfo, - ) -> Vec { + fn child_storage_root(&mut self, _child_info: &ChildInfo) -> Vec { panic!("`child_storage_root`: should not be used in async externalities!") } @@ -190,6 +169,10 @@ impl Externalities for AsyncExternalities { fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in AsyncExternalities") } + + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { + unimplemented!("get_read_and_written_keys is not supported in AsyncExternalities") + } } impl sp_externalities::ExtensionStore for AsyncExternalities { @@ -205,7 +188,10 @@ impl sp_externalities::ExtensionStore for AsyncExternalities { self.extensions.register_with_type_id(type_id, extension) } - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { if self.extensions.deregister(type_id) { Ok(()) } else { diff --git a/primitives/tasks/src/lib.rs b/primitives/tasks/src/lib.rs index 030e178109d7b..e9c80ae5ff4c8 100644 --- a/primitives/tasks/src/lib.rs +++ b/primitives/tasks/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,7 +49,6 @@ //! //! When allowing unbounded parallelism, malicious transactions can exploit it and partition //! network consensus based on how much resources nodes have. -//! #![cfg_attr(not(feature = "std"), no_std)] @@ -61,9 +60,9 @@ pub use async_externalities::{new_async_externalities, AsyncExternalities}; #[cfg(feature = "std")] mod inner { - use std::{panic::AssertUnwindSafe, sync::mpsc}; - use sp_externalities::ExternalitiesExt as _; use sp_core::traits::TaskExecutorExt; + use sp_externalities::ExternalitiesExt as _; + use std::{panic::AssertUnwindSafe, sync::mpsc}; /// Task handle (wasm). /// @@ -77,55 +76,62 @@ mod inner { impl DataJoinHandle { /// Join handle returned by `spawn` function pub fn join(self) -> Vec { - self.receiver.recv().expect("Spawned runtime task terminated before sending result.") + self.receiver + .recv() + .expect("Spawned runtime task terminated before sending result.") } } /// Spawn new runtime task (native). pub fn spawn(entry_point: fn(Vec) -> Vec, data: Vec) -> DataJoinHandle { - let scheduler = sp_externalities::with_externalities(|mut ext| ext.extension::() - .expect("No task executor associated with the current context!") - .clone() - ).expect("Spawn called outside of externalities context!"); + let scheduler = sp_externalities::with_externalities(|mut ext| { + ext.extension::() + .expect("No task executor associated with the current context!") + .clone() + }) + .expect("Spawn called outside of externalities context!"); let (sender, receiver) = mpsc::channel(); let extra_scheduler = scheduler.clone(); - scheduler.spawn("parallel-runtime-spawn", Box::pin(async move { - let result = match crate::new_async_externalities(extra_scheduler) { - Ok(mut ext) => { - let mut ext = AssertUnwindSafe(&mut ext); - match std::panic::catch_unwind(move || { - sp_externalities::set_and_run_with_externalities( - &mut **ext, - move || entry_point(data), - ) - }) { - Ok(result) => result, - Err(panic) => { - log::error!( - target: "runtime", - "Spawned task panicked: {:?}", - panic, - ); - - // This will drop sender without sending anything. - return; + scheduler.spawn( + "parallel-runtime-spawn", + Box::pin(async move { + let result = match crate::new_async_externalities(extra_scheduler) { + Ok(mut ext) => { + let mut ext = AssertUnwindSafe(&mut ext); + match std::panic::catch_unwind(move || { + sp_externalities::set_and_run_with_externalities( + &mut **ext, + move || entry_point(data), + ) + }) { + Ok(result) => result, + Err(panic) => { + log::error!( + target: "runtime", + "Spawned task panicked: {:?}", + panic, + ); + + // This will drop sender without sending anything. + return + }, } - } - }, - Err(e) => { - log::error!( - target: "runtime", - "Unable to run async task: {}", - e, - ); - - return; - }, - }; - - let _ = sender.send(result); - })); + }, + Err(e) => { + log::error!( + target: "runtime", + "Unable to run async task: {}", + e, + ); + + return + }, + }; + + let _ = sender.send(result); + }), + ); DataJoinHandle { receiver } } @@ -146,7 +152,11 @@ mod inner { /// /// NOTE: Since this dynamic dispatch function and the invoked function are compiled with /// the same compiler, there should be no problem with ABI incompatibility. - extern "C" fn dispatch_wrapper(func_ref: *const u8, payload_ptr: *mut u8, payload_len: u32) -> u64 { + extern "C" fn dispatch_wrapper( + func_ref: *const u8, + payload_ptr: *mut u8, + payload_len: u32, + ) -> u64 { let payload_len = payload_len as usize; let output = unsafe { let payload = Vec::from_raw_parts(payload_ptr, payload_len, payload_len); @@ -160,11 +170,8 @@ mod inner { pub fn spawn(entry_point: fn(Vec) -> Vec, payload: Vec) -> DataJoinHandle { let func_ptr: usize = unsafe { mem::transmute(entry_point) }; - let handle = sp_io::runtime_tasks::spawn( - dispatch_wrapper as usize as _, - func_ptr as u32, - payload, - ); + let handle = + sp_io::runtime_tasks::spawn(dispatch_wrapper as usize as _, func_ptr as u32, payload); DataJoinHandle { handle } } @@ -185,7 +192,7 @@ mod inner { } } -pub use inner::{DataJoinHandle, spawn}; +pub use inner::{spawn, DataJoinHandle}; #[cfg(test)] mod tests { @@ -211,7 +218,7 @@ mod tests { #[test] fn panicking() { - let res = sp_io::TestExternalities::default().execute_with_safe(||{ + let res = sp_io::TestExternalities::default().execute_with_safe(|| { spawn(async_panicker, vec![5, 2, 1]).join(); }); @@ -220,28 +227,30 @@ mod tests { #[test] fn many_joins() { - sp_io::TestExternalities::default().execute_with_safe(|| { - // converges to 1 only after 1000+ steps - let mut running_val = 9780657630u64; - let mut data = vec![]; - let handles = (0..1024).map( - |_| { - running_val = if running_val % 2 == 0 { - running_val / 2 - } else { - 3 * running_val + 1 - }; - data.push(running_val as u8); - (spawn(async_runner, data.clone()), data.clone()) + sp_io::TestExternalities::default() + .execute_with_safe(|| { + // converges to 1 only after 1000+ steps + let mut running_val = 9780657630u64; + let mut data = vec![]; + let handles = (0..1024) + .map(|_| { + running_val = if running_val % 2 == 0 { + running_val / 2 + } else { + 3 * running_val + 1 + }; + data.push(running_val as u8); + (spawn(async_runner, data.clone()), data.clone()) + }) + .collect::>(); + + for (handle, mut data) in handles { + let result = handle.join(); + data.sort(); + + assert_eq!(result, data); } - ).collect::>(); - - for (handle, mut data) in handles { - let result = handle.join(); - data.sort(); - - assert_eq!(result, data); - } - }).expect("Failed to run with externalities"); + }) + .expect("Failed to run with externalities"); } } diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 18468a33ae42e..5aed5d679dd49 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -12,12 +12,12 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } +serde = { version = "1.0.126", optional = true, features = ["derive"] } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } +parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } [features] default = [ diff --git a/primitives/test-primitives/src/lib.rs b/primitives/test-primitives/src/lib.rs index 27c7ec5e10e65..d988160b1dc7b 100644 --- a/primitives/test-primitives/src/lib.rs +++ b/primitives/test-primitives/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,13 +19,13 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; -use sp_application_crypto::sr25519; pub use sp_application_crypto; +use sp_application_crypto::sr25519; pub use sp_core::{hash::H256, RuntimeDebug}; -use sp_runtime::traits::{BlakeTwo256, Verify, Extrinsic as ExtrinsicT,}; +use sp_runtime::traits::{BlakeTwo256, Extrinsic as ExtrinsicT, Verify}; /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] @@ -37,7 +37,10 @@ pub enum Extrinsic { #[cfg(feature = "std")] impl serde::Serialize for Extrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } @@ -80,8 +83,5 @@ pub type Header = sp_runtime::generic::Header; /// Changes trie configuration (optionally) used in tests. pub fn changes_trie_config() -> sp_core::ChangesTrieConfiguration { - sp_core::ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - } + sp_core::ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2 } } diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 79dae29102220..60daf9642df6b 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-timestamp" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,13 +13,15 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } -impl-trait-for-tuples = "0.1.3" -wasm-timer = { version = "0.2", optional = true } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } +thiserror = { version = "1.0.21", optional = true } +log = { version = "0.4.8", optional = true } +futures-timer = { version = "3.0.2", optional = true } +async-trait = { version = "0.1.50", optional = true } [features] default = [ "std" ] @@ -29,5 +31,8 @@ std = [ "sp-runtime/std", "codec/std", "sp-inherents/std", - "wasm-timer", + "thiserror", + "log", + "futures-timer", + "async-trait", ] diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index 89bfcc20e0e6d..02a579497b527 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,36 +19,118 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::Encode; -#[cfg(feature = "std")] -use codec::Decode; -#[cfg(feature = "std")] -use sp_inherents::ProvideInherentData; -use sp_inherents::{InherentIdentifier, IsFatalError, InherentData}; - -use sp_runtime::RuntimeString; +use codec::{Decode, Encode}; +use sp_inherents::{InherentData, InherentIdentifier, IsFatalError}; +use sp_std::time::Duration; /// The identifier for the `timestamp` inherent. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"timstap0"; + /// The type of the inherent. -pub type InherentType = u64; +pub type InherentType = Timestamp; + +/// Unit type wrapper that represents a timestamp. +/// +/// Such a timestamp is the time since the UNIX_EPOCH in milliseconds at a given point in time. +#[derive(Debug, Encode, Decode, Eq, Clone, Copy, Default, Ord)] +pub struct Timestamp(u64); + +impl Timestamp { + /// Create new `Self`. + pub const fn new(inner: u64) -> Self { + Self(inner) + } + + /// Returns `self` as [`Duration`]. + pub fn as_duration(self) -> Duration { + Duration::from_millis(self.0) + } + + /// Checked subtraction that returns `None` on an underflow. + pub fn checked_sub(self, other: Self) -> Option { + self.0.checked_sub(other.0).map(Self) + } +} + +impl sp_std::ops::Deref for Timestamp { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl core::ops::Add for Timestamp { + type Output = Self; + + fn add(self, other: Self) -> Self { + Self(self.0 + other.0) + } +} + +impl core::ops::Add for Timestamp { + type Output = Self; + + fn add(self, other: u64) -> Self { + Self(self.0 + other) + } +} + +impl + Copy> core::cmp::PartialEq for Timestamp { + fn eq(&self, eq: &T) -> bool { + self.0 == (*eq).into() + } +} + +impl + Copy> core::cmp::PartialOrd for Timestamp { + fn partial_cmp(&self, other: &T) -> Option { + self.0.partial_cmp(&(*other).into()) + } +} + +#[cfg(feature = "std")] +impl std::fmt::Display for Timestamp { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for Timestamp { + fn from(timestamp: u64) -> Self { + Timestamp(timestamp) + } +} + +impl From for u64 { + fn from(timestamp: Timestamp) -> u64 { + timestamp.0 + } +} + +impl From for Timestamp { + fn from(duration: Duration) -> Self { + Timestamp(duration.as_millis() as u64) + } +} /// Errors that can occur while checking the timestamp inherent. #[derive(Encode, sp_runtime::RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Decode))] +#[cfg_attr(feature = "std", derive(Decode, thiserror::Error))] pub enum InherentError { /// The timestamp is valid in the future. /// This is a non-fatal-error and will not stop checking the inherents. + #[cfg_attr(feature = "std", error("Block will be valid at {0}."))] ValidAtTimestamp(InherentType), - /// Some other error. - Other(RuntimeString), + /// The block timestamp is too far in the future + #[cfg_attr(feature = "std", error("The timestamp of the block is too far in the future."))] + TooFarInFuture, } impl IsFatalError for InherentError { fn is_fatal_error(&self) -> bool { match self { InherentError::ValidAtTimestamp(_) => false, - InherentError::Other(_) => true, + InherentError::TooFarInFuture => true, } } } @@ -68,50 +150,119 @@ impl InherentError { /// Auxiliary trait to extract timestamp inherent data. pub trait TimestampInherentData { /// Get timestamp inherent data. - fn timestamp_inherent_data(&self) -> Result; + fn timestamp_inherent_data(&self) -> Result, sp_inherents::Error>; } impl TimestampInherentData for InherentData { - fn timestamp_inherent_data(&self) -> Result { + fn timestamp_inherent_data(&self) -> Result, sp_inherents::Error> { self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Timestamp inherent data not found".into())) } } +/// The current timestamp using the system time. +/// +/// This timestamp is the time since the UNIX epoch. +#[cfg(feature = "std")] +fn current_timestamp() -> std::time::Duration { + use std::time::SystemTime; + + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .expect("Current time is always after unix epoch; qed") +} + /// Provide duration since unix epoch in millisecond for timestamp inherent. #[cfg(feature = "std")] -pub struct InherentDataProvider; +pub struct InherentDataProvider { + max_drift: InherentType, + timestamp: InherentType, +} #[cfg(feature = "std")] -impl ProvideInherentData for InherentDataProvider { - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER +impl InherentDataProvider { + /// Create `Self` while using the system time to get the timestamp. + pub fn from_system_time() -> Self { + Self { + max_drift: std::time::Duration::from_secs(60).into(), + timestamp: current_timestamp().into(), + } } + /// Create `Self` using the given `timestamp`. + pub fn new(timestamp: InherentType) -> Self { + Self { max_drift: std::time::Duration::from_secs(60).into(), timestamp } + } + + /// With the given maximum drift. + /// + /// By default the maximum drift is 60 seconds. + /// + /// The maximum drift is used when checking the inherents of a runtime. If the current timestamp + /// plus the maximum drift is smaller than the timestamp in the block, the block will be + /// rejected as being too far in the future. + pub fn with_max_drift(mut self, max_drift: std::time::Duration) -> Self { + self.max_drift = max_drift.into(); + self + } + + /// Returns the timestamp of this inherent data provider. + pub fn timestamp(&self) -> InherentType { + self.timestamp + } +} + +#[cfg(feature = "std")] +impl sp_std::ops::Deref for InherentDataProvider { + type Target = InherentType; + + fn deref(&self) -> &Self::Target { + &self.timestamp + } +} + +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { fn provide_inherent_data( &self, inherent_data: &mut InherentData, ) -> Result<(), sp_inherents::Error> { - use wasm_timer::SystemTime; - - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH) - .map_err(|_| { - "Current time is before unix epoch".into() - }).and_then(|d| { - let duration: InherentType = d.as_millis() as u64; - inherent_data.put_data(INHERENT_IDENTIFIER, &duration) - }) + inherent_data.put_data(INHERENT_IDENTIFIER, &InherentType::from(self.timestamp)) } - fn error_to_string(&self, error: &[u8]) -> Option { - InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) - } -} + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option> { + if *identifier != INHERENT_IDENTIFIER { + return None + } + match InherentError::try_from(&INHERENT_IDENTIFIER, error)? { + InherentError::ValidAtTimestamp(valid) => { + let max_drift = self.max_drift; + let timestamp = self.timestamp; + // halt import until timestamp is valid. + // reject when too far ahead. + if valid > timestamp + max_drift { + return Some(Err(sp_inherents::Error::Application(Box::from( + InherentError::TooFarInFuture, + )))) + } -/// A trait which is called when the timestamp is set. -#[impl_trait_for_tuples::impl_for_tuples(30)] -pub trait OnTimestampSet { - fn on_timestamp_set(moment: Moment); + let diff = valid.checked_sub(timestamp).unwrap_or_default(); + log::info!( + target: "timestamp", + "halting for block {} milliseconds in the future", + diff.0, + ); + + futures_timer::Delay::new(diff.as_duration()).await; + + Some(Ok(())) + }, + o => Some(Err(sp_inherents::Error::Application(Box::from(o)))), + } + } } diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 1000952b39fd4..3be09dcd576df 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-tracing" -version = "2.0.0" +version = "4.0.0-dev" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -11,26 +11,32 @@ readme = "README.md" [package.metadata.docs.rs] # let's default to wasm32 -default-target = "wasm32-unknown-unknown" +default-target = "wasm32-unknown-unknown" # with the tracing enabled features = ["with-tracing"] # allowing for linux-gnu here, too, allows for `std` to show up as well targets = ["x86_64-unknown-linux-gnu", "wasm32-unknown-unknown"] [dependencies] -sp-std = { version = "2.0.0", path = "../std", default-features = false} -codec = { version = "1.3.1", package = "parity-scale-codec", default-features = false, features = ["derive"]} -tracing = { version = "0.1.21", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../std", default-features = false } +codec = { version = "2.0.0", package = "parity-scale-codec", default-features = false, features = [ + "derive", +] } +tracing = { version = "0.1.25", default-features = false } tracing-core = { version = "0.1.17", default-features = false } log = { version = "0.4.8", optional = true } -tracing-subscriber = { version = "0.2.10", optional = true, features = ["tracing-log"] } +tracing-subscriber = { version = "0.2.19", optional = true, features = [ + "tracing-log", +] } +parking_lot = { version = "0.10.0", optional = true } +erased-serde = { version = "0.3.9", optional = true } +serde = { version = "1.0.126", optional = true } +serde_json = { version = "1.0.68", optional = true } +slog = { version = "2.5.2", features = ["nested-values"], optional = true } [features] -default = [ "std" ] -with-tracing = [ - "codec/derive", - "codec/full", -] +default = ["std"] +with-tracing = ["codec/derive", "codec/full"] std = [ "with-tracing", "tracing/std", @@ -39,4 +45,9 @@ std = [ "sp-std/std", "log", "tracing-subscriber", + "parking_lot", + "erased-serde", + "serde", + "serde_json", + "slog", ] diff --git a/primitives/tracing/README.md b/primitives/tracing/README.md index a93c97ff62fab..d66bb90016c71 100644 --- a/primitives/tracing/README.md +++ b/primitives/tracing/README.md @@ -1,6 +1,6 @@ Substrate tracing primitives and macros. -To trace functions or invidual code in Substrate, this crate provides [`within_span`] +To trace functions or individual code in Substrate, this crate provides [`within_span`] and [`enter_span`]. See the individual docs for how to use these macros. Note that to allow traces from wasm execution environment there are diff --git a/primitives/tracing/src/lib.rs b/primitives/tracing/src/lib.rs index cb67d8a0c5a22..9522e6df633ac 100644 --- a/primitives/tracing/src/lib.rs +++ b/primitives/tracing/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,14 +28,34 @@ //! Additionally, we have a const: `WASM_TRACE_IDENTIFIER`, which holds a span name used //! to signal that the 'actual' span name and target should be retrieved instead from //! the associated Fields mentioned above. +//! +//! Note: The `tracing` crate requires trace metadata to be static. This does not work +//! for wasm code in substrate, as it is regularly updated with new code from on-chain +//! events. The workaround for this is for the wasm tracing wrappers to put the +//! `name` and `target` data in the `values` map (normally they would be in the static +//! metadata assembled at compile time). #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +use tracing; +pub use tracing::{ + debug, debug_span, error, error_span, event, info, info_span, span, trace, trace_span, warn, + warn_span, Level, Span, +}; + +pub use crate::types::{ + WasmEntryAttributes, WasmFieldName, WasmFields, WasmLevel, WasmMetadata, WasmValue, + WasmValuesSet, +}; +#[cfg(feature = "std")] +pub use crate::types::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; + /// Tracing facilities and helpers. /// /// This is modeled after the `tracing`/`tracing-core` interface and uses that more or /// less directly for the native side. Because of certain optimisations the these crates -/// have done, the wasm implementation diverges slightly and is optimised for thtat use +/// have done, the wasm implementation diverges slightly and is optimised for that use /// case (like being able to cross the wasm/native boundary via scale codecs). /// /// One of said optimisations is that all macros will yield to a `noop` in non-std unless @@ -56,19 +76,18 @@ /// ```rust /// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "fn wide span"); /// { -/// sp_tracing::enter_span!(sp_tracing::trace_span!("outer-span")); -/// { -/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); -/// // .. -/// } // inner span exists here -/// } // outer span exists here +/// sp_tracing::enter_span!(sp_tracing::trace_span!("outer-span")); +/// { +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); +/// // .. +/// } // inner span exists here +/// } // outer span exists here /// /// sp_tracing::within_span! { -/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); +/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); /// 1 + 1; /// // some other complex code /// } // debug span ends here -/// /// ``` /// /// @@ -86,38 +105,18 @@ /// and call `set_tracing_subscriber` at the very beginning of your execution – /// the default subscriber is doing nothing, so any spans or events happening before /// will not be recorded! -/// - mod types; -#[cfg(feature = "std")] -use tracing; - -pub use tracing::{ - debug, debug_span, error, error_span, info, info_span, trace, trace_span, warn, warn_span, - span, event, Level, Span, -}; - -pub use crate::types::{ - WasmMetadata, WasmEntryAttributes, WasmValuesSet, WasmValue, WasmFields, WasmLevel, WasmFieldName -}; - - /// Try to init a simple tracing subscriber with log compatibility layer. /// Ignores any error. Useful for testing. #[cfg(feature = "std")] pub fn try_init_simple() { let _ = tracing_subscriber::fmt() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .with_writer(std::io::stderr).try_init(); + .with_writer(std::io::stderr) + .try_init(); } -#[cfg(feature = "std")] -pub use crate::types::{ - WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER -}; - - /// Runs given code within a tracing span, measuring it's execution time. /// /// If tracing is not enabled, the code is still executed. Pass in level and name or @@ -127,20 +126,20 @@ pub use crate::types::{ /// /// ``` /// sp_tracing::within_span! { -/// sp_tracing::Level::TRACE, +/// sp_tracing::Level::TRACE, /// "test-span"; /// 1 + 1; /// // some other complex code /// } /// /// sp_tracing::within_span! { -/// sp_tracing::span!(sp_tracing::Level::WARN, "warn-span", you_can_pass="any params"); +/// sp_tracing::span!(sp_tracing::Level::WARN, "warn-span", you_can_pass="any params"); /// 1 + 1; /// // some other complex code /// } /// /// sp_tracing::within_span! { -/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); +/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); /// 1 + 1; /// // some other complex code /// } @@ -164,7 +163,7 @@ macro_rules! within_span { $( $code:tt )* ) => { { - $crate::within_span!($crate::span!($crate::Level::TRACE, $name); $( $code )*) + $crate::within_span!($crate::span!($lvl, $name); $( $code )*) } }; } @@ -187,13 +186,12 @@ macro_rules! within_span { }; } - /// Enter a span - noop for `no_std` without `with-tracing` #[cfg(all(not(feature = "std"), not(feature = "with-tracing")))] #[macro_export] macro_rules! enter_span { - ( $lvl:expr, $name:expr ) => ( ); - ( $name:expr ) => ( ) // no-op + ( $lvl:expr, $name:expr ) => {}; + ( $name:expr ) => {}; // no-op } /// Enter a span. @@ -215,13 +213,12 @@ macro_rules! enter_span { /// sp_tracing::enter_span!(sp_tracing::info_span!("info-span", params="value")); /// /// { -/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "outer-span"); -/// { -/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); -/// // .. -/// } // inner span exists here -/// } // outer span exists here -/// +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "outer-span"); +/// { +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); +/// // .. +/// } // inner span exists here +/// } // outer span exists here /// ``` #[cfg(any(feature = "std", feature = "with-tracing"))] #[macro_export] @@ -233,6 +230,6 @@ macro_rules! enter_span { let __tracing_guard__ = __within_span__.enter(); }; ( $lvl:expr, $name:expr ) => { - $crate::enter_span!($crate::span!($crate::Level::TRACE, $name)) + $crate::enter_span!($crate::span!($lvl, $name)) }; } diff --git a/primitives/tracing/src/types.rs b/primitives/tracing/src/types.rs index 050ac4c314166..377bd0f42c6eb 100644 --- a/primitives/tracing/src/types.rs +++ b/primitives/tracing/src/types.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,15 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +use codec::{Decode, Encode}; /// Types for wasm based tracing. Loosly inspired by `tracing-core` but /// optimised for the specific use case. - -use core::{format_args, fmt::Debug}; -use sp_std::{ - vec, vec::Vec, -}; -use sp_std::Writer; -use codec::{Encode, Decode}; +use core::{fmt::Debug, format_args}; +use sp_std::{vec, vec::Vec, Writer}; /// The Tracing Level – the user can filter by this #[derive(Clone, Encode, Decode, Debug)] @@ -37,24 +33,21 @@ pub enum WasmLevel { /// Further information for debugging purposes DEBUG, /// The lowest level, keeping track of minute detail - TRACE + TRACE, } - impl From<&tracing_core::Level> for WasmLevel { fn from(l: &tracing_core::Level) -> WasmLevel { - match l { - &tracing_core::Level::ERROR => WasmLevel::ERROR, - &tracing_core::Level::WARN => WasmLevel::WARN, - &tracing_core::Level::INFO => WasmLevel::INFO, - &tracing_core::Level::DEBUG => WasmLevel::DEBUG, - &tracing_core::Level::TRACE => WasmLevel::TRACE, + match *l { + tracing_core::Level::ERROR => WasmLevel::ERROR, + tracing_core::Level::WARN => WasmLevel::WARN, + tracing_core::Level::INFO => WasmLevel::INFO, + tracing_core::Level::DEBUG => WasmLevel::DEBUG, + tracing_core::Level::TRACE => WasmLevel::TRACE, } } } - - impl core::default::Default for WasmLevel { fn default() -> Self { WasmLevel::TRACE @@ -82,41 +75,27 @@ pub enum WasmValue { impl core::fmt::Debug for WasmValue { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { match self { - WasmValue::U8(ref i) => { - f.write_fmt(format_args!("{}_u8", i)) - } - WasmValue::I8(ref i) => { - f.write_fmt(format_args!("{}_i8", i)) - } - WasmValue::U32(ref i) => { - f.write_fmt(format_args!("{}_u32", i)) - } - WasmValue::I32(ref i) => { - f.write_fmt(format_args!("{}_i32", i)) - } - WasmValue::I64(ref i) => { - f.write_fmt(format_args!("{}_i64", i)) - } - WasmValue::U64(ref i) => { - f.write_fmt(format_args!("{}_u64", i)) - } - WasmValue::Bool(ref i) => { - f.write_fmt(format_args!("{}_bool", i)) - } + WasmValue::U8(ref i) => f.write_fmt(format_args!("{}_u8", i)), + WasmValue::I8(ref i) => f.write_fmt(format_args!("{}_i8", i)), + WasmValue::U32(ref i) => f.write_fmt(format_args!("{}_u32", i)), + WasmValue::I32(ref i) => f.write_fmt(format_args!("{}_i32", i)), + WasmValue::I64(ref i) => f.write_fmt(format_args!("{}_i64", i)), + WasmValue::U64(ref i) => f.write_fmt(format_args!("{}_u64", i)), + WasmValue::Bool(ref i) => f.write_fmt(format_args!("{}_bool", i)), WasmValue::Formatted(ref i) | WasmValue::Str(ref i) => { if let Ok(v) = core::str::from_utf8(i) { f.write_fmt(format_args!("{}", v)) } else { f.write_fmt(format_args!("{:?}", i)) } - } + }, WasmValue::Encoded(ref v) => { f.write_str("Scale(")?; - for byte in v { - f.write_fmt(format_args!("{:02x}", byte))?; - } + for byte in v { + f.write_fmt(format_args!("{:02x}", byte))?; + } f.write_str(")") - } + }, } } } @@ -129,7 +108,7 @@ impl From for WasmValue { impl From<&i8> for WasmValue { fn from(inp: &i8) -> WasmValue { - WasmValue::I8(inp.clone()) + WasmValue::I8(*inp) } } @@ -246,7 +225,7 @@ impl WasmFields { impl From> for WasmFields { fn from(v: Vec) -> WasmFields { - WasmFields(v.into()) + WasmFields(v) } } @@ -299,7 +278,6 @@ impl core::fmt::Debug for WasmValuesSet { } } - impl From)>> for WasmValuesSet { fn from(v: Vec<(WasmFieldName, Option)>) -> Self { WasmValuesSet(v) @@ -326,34 +304,20 @@ impl WasmValuesSet { impl tracing_core::field::Visit for WasmValuesSet { fn record_debug(&mut self, field: &tracing_core::field::Field, value: &dyn Debug) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(format_args!("{:?}", value))) - )) + self.0 + .push((field.name().into(), Some(WasmValue::from(format_args!("{:?}", value))))) } fn record_i64(&mut self, field: &tracing_core::field::Field, value: i64) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(value)) - )) + self.0.push((field.name().into(), Some(WasmValue::from(value)))) } fn record_u64(&mut self, field: &tracing_core::field::Field, value: u64) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(value)) - )) + self.0.push((field.name().into(), Some(WasmValue::from(value)))) } fn record_bool(&mut self, field: &tracing_core::field::Field, value: bool) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(value)) - )) + self.0.push((field.name().into(), Some(WasmValue::from(value)))) } fn record_str(&mut self, field: &tracing_core::field::Field, value: &str) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(value)) - )) + self.0.push((field.name().into(), Some(WasmValue::from(value)))) } } /// Metadata provides generic information about the specifc location of the @@ -388,7 +352,7 @@ impl From<&tracing_core::Metadata<'_>> for WasmMetadata { line: wm.line().unwrap_or_default(), module_path: wm.module_path().map(|m| m.as_bytes().to_vec()).unwrap_or_default(), is_span: wm.is_span(), - fields: wm.fields().into() + fields: wm.fields().into(), } } } @@ -419,12 +383,11 @@ impl core::default::Default for WasmMetadata { line: Default::default(), module_path: Default::default(), is_span: true, - fields: WasmFields::empty() + fields: WasmFields::empty(), } } } - fn decode_field(field: &[u8]) -> &str { core::str::from_utf8(field).unwrap_or_default() } @@ -447,7 +410,7 @@ impl From<&tracing_core::Event<'_>> for WasmEntryAttributes { WasmEntryAttributes { parent_id: evt.parent().map(|id| id.into_u64()), metadata: evt.metadata().into(), - fields: fields + fields, } } } @@ -459,7 +422,7 @@ impl From<&tracing_core::span::Attributes<'_>> for WasmEntryAttributes { WasmEntryAttributes { parent_id: attrs.parent().map(|id| id.into_u64()), metadata: attrs.metadata().into(), - fields: fields + fields, } } } @@ -478,92 +441,175 @@ impl core::default::Default for WasmEntryAttributes { mod std_features { use tracing_core::callsite; - use tracing; /// Static entry use for wasm-originated metadata. pub struct WasmCallsite; impl callsite::Callsite for WasmCallsite { - fn set_interest(&self, _: tracing_core::Interest) { unimplemented!() } - fn metadata(&self) -> &tracing_core::Metadata { unimplemented!() } + fn set_interest(&self, _: tracing_core::Interest) { + unimplemented!() + } + fn metadata(&self) -> &tracing_core::Metadata { + unimplemented!() + } } - static CALLSITE: WasmCallsite = WasmCallsite; + static CALLSITE: WasmCallsite = WasmCallsite; /// The identifier we are using to inject the wasm events in the generic `tracing` system - pub static WASM_TRACE_IDENTIFIER: &'static str = "wasm_tracing"; + pub static WASM_TRACE_IDENTIFIER: &str = "wasm_tracing"; /// The fieldname for the wasm-originated name - pub static WASM_NAME_KEY: &'static str = "name"; + pub static WASM_NAME_KEY: &str = "name"; /// The fieldname for the wasm-originated target - pub static WASM_TARGET_KEY: &'static str = "target"; + pub static WASM_TARGET_KEY: &str = "target"; /// The the list of all static field names we construct from the given metadata - pub static GENERIC_FIELDS: &'static [&'static str] = &[WASM_TARGET_KEY, WASM_NAME_KEY, - "file", "line", "module_path", "params"]; + pub static GENERIC_FIELDS: &[&str] = + &[WASM_TARGET_KEY, WASM_NAME_KEY, "file", "line", "module_path", "params"]; // Implementation Note: // the original `tracing` crate generates these static metadata entries at every `span!` and - // `event!` location to allow for highly optimised filtering. For us to allow level-based emitting - // of wasm events we need these static metadata entries to inject into that system. We then provide - // generic `From`-implementations picking the right metadata to refer to. - - static SPAN_ERROR_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::ERROR, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + // `event!` location to allow for highly optimised filtering. For us to allow level-based + // emitting of wasm events we need these static metadata entries to inject into that system. We + // then provide generic `From`-implementations picking the right metadata to refer to. + + static SPAN_ERROR_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::ERROR, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static SPAN_WARN_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::WARN, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + static SPAN_WARN_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::WARN, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static SPAN_INFO_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::INFO, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + static SPAN_INFO_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::INFO, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static SPAN_DEBUG_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::DEBUG, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + static SPAN_DEBUG_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::DEBUG, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static SPAN_TRACE_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::TRACE, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + static SPAN_TRACE_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::TRACE, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static EVENT_ERROR_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::ERROR, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_ERROR_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::ERROR, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); - static EVENT_WARN_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::WARN, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_WARN_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::WARN, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); - static EVENT_INFO_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::INFO, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_INFO_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::INFO, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); - static EVENT_DEBUG_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::DEBUG, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_DEBUG_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::DEBUG, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); - static EVENT_TRACE_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::TRACE, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_TRACE_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::TRACE, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); // FIXME: this could be done a lot in 0.2 if they opt for using `Cow` instead - // https://github.com/paritytech/substrate/issues/7134 + // https://github.com/paritytech/substrate/issues/7134 impl From<&crate::WasmMetadata> for &'static tracing_core::Metadata<'static> { fn from(wm: &crate::WasmMetadata) -> &'static tracing_core::Metadata<'static> { match (&wm.level, wm.is_span) { @@ -589,12 +635,12 @@ mod std_features { let line = a.metadata.line; let module_path = std::str::from_utf8(&a.metadata.module_path).unwrap_or_default(); let params = a.fields; - let metadata : &tracing_core::metadata::Metadata<'static> = (&a.metadata).into(); + let metadata: &tracing_core::metadata::Metadata<'static> = (&a.metadata).into(); tracing::span::Span::child_of( - a.parent_id.map(|i|tracing_core::span::Id::from_u64(i)), + a.parent_id.map(tracing_core::span::Id::from_u64), &metadata, - &tracing::valueset!{ metadata.fields(), target, name, file, line, module_path, ?params } + &tracing::valueset! { metadata.fields(), target, name, file, line, module_path, ?params }, ) } } @@ -608,12 +654,12 @@ mod std_features { let line = self.metadata.line; let module_path = std::str::from_utf8(&self.metadata.module_path).unwrap_or_default(); let params = self.fields; - let metadata : &tracing_core::metadata::Metadata<'static> = (&self.metadata).into(); + let metadata: &tracing_core::metadata::Metadata<'static> = (&self.metadata).into(); tracing_core::Event::child_of( - self.parent_id.map(|i|tracing_core::span::Id::from_u64(i)), + self.parent_id.map(tracing_core::span::Id::from_u64), &metadata, - &tracing::valueset!{ metadata.fields(), target, name, file, line, module_path, ?params } + &tracing::valueset! { metadata.fields(), target, name, file, line, module_path, ?params }, ) } } diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 57ba3a28ac3c1..3f77014ac53b0 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "sp-transaction-pool" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" -description = "Transaction pool primitives types & Runtime API." +description = "Transaction pool runtime facing API." documentation = "https://docs.rs/sp-transaction-pool" readme = "README.md" @@ -14,24 +14,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", optional = true } -derive_more = { version = "0.99.2", optional = true } -futures = { version = "0.3.1", optional = true } -log = { version = "0.4.8", optional = true } -serde = { version = "1.0.101", features = ["derive"], optional = true} -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-blockchain = { version = "2.0.0", optional = true, path = "../blockchain" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } [features] default = [ "std" ] std = [ - "codec", - "derive_more", - "futures", - "log", - "serde", "sp-api/std", - "sp-blockchain", "sp-runtime/std", ] diff --git a/primitives/transaction-pool/src/error.rs b/primitives/transaction-pool/src/error.rs deleted file mode 100644 index 531b397cb946c..0000000000000 --- a/primitives/transaction-pool/src/error.rs +++ /dev/null @@ -1,83 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Transaction pool errors. - -use sp_runtime::transaction_validity::{ - TransactionPriority as Priority, InvalidTransaction, UnknownTransaction, -}; - -/// Transaction pool result. -pub type Result = std::result::Result; - -/// Transaction pool error type. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum Error { - /// Transaction is not verifiable yet, but might be in the future. - #[display(fmt="Unknown transaction validity: {:?}", _0)] - UnknownTransaction(UnknownTransaction), - /// Transaction is invalid. - #[display(fmt="Invalid transaction validity: {:?}", _0)] - InvalidTransaction(InvalidTransaction), - /// The transaction validity returned no "provides" tag. - /// - /// Such transactions are not accepted to the pool, since we use those tags - /// to define identity of transactions (occupance of the same "slot"). - #[display(fmt="The transaction does not provide any tags, so the pool can't identify it.")] - NoTagsProvided, - /// The transaction is temporarily banned. - #[display(fmt="Temporarily Banned")] - TemporarilyBanned, - /// The transaction is already in the pool. - #[display(fmt="[{:?}] Already imported", _0)] - AlreadyImported(Box), - /// The transaction cannot be imported cause it's a replacement and has too low priority. - #[display(fmt="Too low priority ({} > {})", old, new)] - TooLowPriority { - /// Transaction already in the pool. - old: Priority, - /// Transaction entering the pool. - new: Priority - }, - /// Deps cycle detected and we couldn't import transaction. - #[display(fmt="Cycle Detected")] - CycleDetected, - /// Transaction was dropped immediately after it got inserted. - #[display(fmt="Transaction couldn't enter the pool because of the limit.")] - ImmediatelyDropped, - /// Invalid block id. - InvalidBlockId(String), - /// The pool is not accepting future transactions. - #[display(fmt="The pool is not accepting future transactions")] - RejectedFutureTransaction, -} - -impl std::error::Error for Error {} - -/// Transaction pool error conversion. -pub trait IntoPoolError: std::error::Error + Send + Sized { - /// Try to extract original `Error` - /// - /// This implementation is optional and used only to - /// provide more descriptive error messages for end users - /// of RPC API. - fn into_pool_error(self) -> std::result::Result { Err(self) } -} - -impl IntoPoolError for Error { - fn into_pool_error(self) -> std::result::Result { Ok(self) } -} diff --git a/primitives/transaction-pool/src/lib.rs b/primitives/transaction-pool/src/lib.rs index b991c541521c2..3c71149255ce0 100644 --- a/primitives/transaction-pool/src/lib.rs +++ b/primitives/transaction-pool/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,20 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Transaction pool primitives types & Runtime API. +//! Transaction pool runtime facing API. #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] pub mod runtime_api; -#[cfg(feature = "std")] -pub mod error; -#[cfg(feature = "std")] -mod pool; - -#[cfg(feature = "std")] -pub use pool::*; - -pub use sp_runtime::transaction_validity::{ - TransactionLongevity, TransactionPriority, TransactionTag, TransactionSource, -}; diff --git a/primitives/transaction-pool/src/runtime_api.rs b/primitives/transaction-pool/src/runtime_api.rs index 9080c023f5890..be631ee03b9d7 100644 --- a/primitives/transaction-pool/src/runtime_api.rs +++ b/primitives/transaction-pool/src/runtime_api.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,27 +17,39 @@ //! Tagged Transaction Queue Runtime API. -use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource}; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, +}; sp_api::decl_runtime_apis! { /// The `TaggedTransactionQueue` api trait for interfering with the transaction queue. - #[api_version(2)] + #[api_version(3)] pub trait TaggedTransactionQueue { /// Validate the transaction. #[changed_in(2)] fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity; + /// Validate the transaction. + #[changed_in(3)] + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity; + /// Validate the transaction. /// /// This method is invoked by the transaction pool to learn details about given transaction. /// The implementation should make sure to verify the correctness of the transaction - /// against current state. + /// against current state. The given `block_hash` corresponds to the hash of the block + /// that is used as current state. + /// /// Note that this call may be performed by the pool multiple times and transactions /// might be verified in any possible order. fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, + block_hash: Block::Hash, ) -> TransactionValidity; } } diff --git a/primitives/transaction-storage-proof/Cargo.toml b/primitives/transaction-storage-proof/Cargo.toml new file mode 100644 index 0000000000000..8a41105b20b74 --- /dev/null +++ b/primitives/transaction-storage-proof/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "sp-transaction-storage-proof" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +description = "Transaction storage proof primitives" +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-trie = { version = "4.0.0-dev", optional = true, path = "../trie" } +sp-core = { version = "4.0.0-dev", path = "../core", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +log = { version = "0.4.8", optional = true } +async-trait = { version = "0.1.50", optional = true } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "scale-info/std", + "sp-std/std", + "sp-inherents/std", + "sp-runtime/std", + "sp-trie/std", + "sp-core", + "log", + "async-trait", +] diff --git a/primitives/transaction-storage-proof/README.md b/primitives/transaction-storage-proof/README.md new file mode 100644 index 0000000000000..4a93e1d41fa3f --- /dev/null +++ b/primitives/transaction-storage-proof/README.md @@ -0,0 +1,5 @@ +Transaction Storage Proof Primitives + +Contains types and basic code to extract storage proofs for indexed transactions. + +License: Apache-2.0 diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs new file mode 100644 index 0000000000000..4b01a8d45d454 --- /dev/null +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -0,0 +1,246 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage proof primitives. Constains types and basic code to extract storage +//! proofs for indexed transactions. + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_std::{prelude::*, result::Result}; + +use codec::{Decode, Encode}; +use sp_inherents::{InherentData, InherentIdentifier, IsFatalError}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +pub use sp_inherents::Error; + +/// The identifier for the proof inherent. +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"tx_proof"; +/// Storage period for data. +pub const DEFAULT_STORAGE_PERIOD: u32 = 100800; +/// Proof trie value size. +pub const CHUNK_SIZE: usize = 256; + +/// Errors that can occur while checking the storage proof. +#[derive(Encode, sp_runtime::RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Decode))] +pub enum InherentError { + InvalidProof, + TrieError, +} + +impl IsFatalError for InherentError { + fn is_fatal_error(&self) -> bool { + true + } +} + +/// Holds a chunk of data retrieved from storage along with +/// a proof that the data was stored at that location in the trie. +#[derive(Encode, Decode, Clone, PartialEq, Debug, scale_info::TypeInfo)] +pub struct TransactionStorageProof { + /// Data chunk that is proved to exist. + pub chunk: Vec, + /// Trie nodes that compose the proof. + pub proof: Vec>, +} + +/// Auxiliary trait to extract storage proof. +pub trait TransactionStorageProofInherentData { + /// Get the proof. + fn storage_proof(&self) -> Result, Error>; +} + +impl TransactionStorageProofInherentData for InherentData { + fn storage_proof(&self) -> Result, Error> { + Ok(self.get_data(&INHERENT_IDENTIFIER)?) + } +} + +/// Provider for inherent data. +#[cfg(feature = "std")] +pub struct InherentDataProvider { + proof: Option, +} + +#[cfg(feature = "std")] +impl InherentDataProvider { + pub fn new(proof: Option) -> Self { + InherentDataProvider { proof } + } +} + +#[cfg(feature = "std")] +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + if let Some(proof) = &self.proof { + inherent_data.put_data(INHERENT_IDENTIFIER, proof) + } else { + Ok(()) + } + } + + async fn try_handle_error( + &self, + identifier: &InherentIdentifier, + error: &[u8], + ) -> Option> { + if *identifier != INHERENT_IDENTIFIER { + return None + } + + let error = InherentError::decode(&mut &error[..]).ok()?; + + Some(Err(Error::Application(Box::from(format!("{:?}", error))))) + } +} + +/// A utility function to extract a chunk index from the source of randomness. +pub fn random_chunk(random_hash: &[u8], total_chunks: u32) -> u32 { + let mut buf = [0u8; 8]; + buf.copy_from_slice(&random_hash[0..8]); + let random_u64 = u64::from_be_bytes(buf); + (random_u64 % total_chunks as u64) as u32 +} + +/// A utility function to encode transaction index as trie key. +pub fn encode_index(input: u32) -> Vec { + codec::Encode::encode(&codec::Compact(input)) +} + +/// An interface to request indexed data from the client. +pub trait IndexedBody { + /// Get all indexed transactions for a block, + /// including renewed transactions. + /// + /// Note that this will only fetch transactions + /// that are indexed by the runtime with `storage_index_transaction`. + fn block_indexed_body(&self, number: NumberFor) -> Result>>, Error>; + + /// Get block number for a block hash. + fn number(&self, hash: B::Hash) -> Result>, Error>; +} + +#[cfg(feature = "std")] +pub mod registration { + use super::*; + use sp_runtime::traits::{Block as BlockT, One, Saturating, Zero}; + use sp_trie::TrieMut; + + type Hasher = sp_core::Blake2Hasher; + type TrieLayout = sp_trie::Layout; + + /// Create a new inherent data provider instance for a given parent block hash. + pub fn new_data_provider( + client: &C, + parent: &B::Hash, + ) -> Result + where + B: BlockT, + C: IndexedBody, + { + let parent_number = client.number(parent.clone())?.unwrap_or(Zero::zero()); + let number = parent_number + .saturating_add(One::one()) + .saturating_sub(DEFAULT_STORAGE_PERIOD.into()); + if number.is_zero() { + // Too early to collect proofs. + return Ok(InherentDataProvider::new(None)) + } + + let proof = match client.block_indexed_body(number)? { + Some(transactions) if !transactions.is_empty() => + Some(build_proof(parent.as_ref(), transactions)?), + Some(_) | None => { + // Nothing was indexed in that block. + None + }, + }; + Ok(InherentDataProvider::new(proof)) + } + + /// Build a proof for a given source of randomness and indexed transactions. + pub fn build_proof( + random_hash: &[u8], + transactions: Vec>, + ) -> Result { + let mut db = sp_trie::MemoryDB::::default(); + + let mut target_chunk = None; + let mut target_root = Default::default(); + let mut target_chunk_key = Default::default(); + let mut chunk_proof = Default::default(); + + let total_chunks: u64 = transactions + .iter() + .map(|t| ((t.len() + CHUNK_SIZE - 1) / CHUNK_SIZE) as u64) + .sum(); + let mut buf = [0u8; 8]; + buf.copy_from_slice(&random_hash[0..8]); + let random_u64 = u64::from_be_bytes(buf); + let target_chunk_index = random_u64 % total_chunks; + // Generate tries for each transaction. + let mut chunk_index = 0; + for transaction in transactions { + let mut transaction_root = sp_trie::empty_trie_root::(); + { + let mut trie = + sp_trie::TrieDBMut::::new(&mut db, &mut transaction_root); + let chunks = transaction.chunks(CHUNK_SIZE).map(|c| c.to_vec()); + for (index, chunk) in chunks.enumerate() { + let index = encode_index(index as u32); + trie.insert(&index, &chunk).map_err(|e| Error::Application(Box::new(e)))?; + if chunk_index == target_chunk_index { + target_chunk = Some(chunk); + target_chunk_key = index; + } + chunk_index += 1; + } + trie.commit(); + } + if target_chunk.is_some() && target_root == Default::default() { + target_root = transaction_root.clone(); + chunk_proof = sp_trie::generate_trie_proof::( + &db, + transaction_root.clone(), + &[target_chunk_key.clone()], + ) + .map_err(|e| Error::Application(Box::new(e)))?; + } + } + + Ok(TransactionStorageProof { proof: chunk_proof, chunk: target_chunk.unwrap() }) + } + + #[test] + fn build_proof_check() { + use std::str::FromStr; + let random = [0u8; 32]; + let proof = build_proof(&random, vec![vec![42]]).unwrap(); + let root = sp_core::H256::from_str( + "0xff8611a4d212fc161dae19dd57f0f1ba9309f45d6207da13f2d3eab4c6839e91", + ) + .unwrap(); + sp_trie::verify_trie_proof::( + &root, + &proof.proof, + &[(encode_index(0), Some(proof.chunk))], + ) + .unwrap(); + } +} diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 7b7629bbf9bb2..5a2de4f16f9a4 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-trie" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] description = "Patricia trie stuff using a parity-scale-codec node format" repository = "https://github.com/paritytech/substrate/" @@ -18,26 +18,28 @@ name = "bench" harness = false [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.0", default-features = false } +trie-db = { version = "0.22.6", default-features = false } trie-root = { version = "0.16.0", default-features = false } -memory-db = { version = "0.24.0", default-features = false } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } +memory-db = { version = "0.27.0", default-features = false } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } [dev-dependencies] -trie-bench = "0.25.0" +trie-bench = "0.28.0" trie-standardmap = "0.15.2" criterion = "0.3.3" hex-literal = "0.3.1" -sp-runtime = { version = "2.0.0", path = "../runtime" } +sp-runtime = { version = "4.0.0-dev", path = "../runtime" } [features] default = ["std"] std = [ "sp-std/std", "codec/std", + "scale-info/std", "hash-db/std", "memory-db/std", "trie-db/std", diff --git a/primitives/trie/benches/bench.rs b/primitives/trie/benches/bench.rs index d385b4bacd4c0..8c84c6354f2c3 100644 --- a/primitives/trie/benches/bench.rs +++ b/primitives/trie/benches/bench.rs @@ -1,20 +1,21 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use criterion::{Criterion, criterion_group, criterion_main}; +use criterion::{criterion_group, criterion_main, Criterion}; criterion_group!(benches, benchmark); criterion_main!(benches); diff --git a/primitives/trie/src/error.rs b/primitives/trie/src/error.rs index 2d3a1b79287c3..30a164c614755 100644 --- a/primitives/trie/src/error.rs +++ b/primitives/trie/src/error.rs @@ -1,15 +1,24 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 // -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -#[cfg(feature="std")] -use std::fmt; -#[cfg(feature="std")] +#[cfg(feature = "std")] use std::error::Error as StdError; +#[cfg(feature = "std")] +use std::fmt; #[derive(Debug, PartialEq, Eq, Clone)] /// Error for trie node decoding. @@ -17,7 +26,7 @@ pub enum Error { /// Bad format. BadFormat, /// Decoding error. - Decode(codec::Error) + Decode(codec::Error), } impl From for Error { @@ -26,7 +35,7 @@ impl From for Error { } } -#[cfg(feature="std")] +#[cfg(feature = "std")] impl StdError for Error { fn description(&self) -> &str { match self { @@ -36,11 +45,11 @@ impl StdError for Error { } } -#[cfg(feature="std")] +#[cfg(feature = "std")] impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Error::Decode(e) => write!(f, "Decode error: {}", e.what()), + Error::Decode(e) => write!(f, "Decode error: {}", e), Error::BadFormat => write!(f, "Bad format"), } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 73a4a8029b2d7..8ba13284d379f 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -1,49 +1,55 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Utility functions to interact with Substrate's Base-16 Modified Merkle Patricia tree ("trie"). #![cfg_attr(not(feature = "std"), no_std)] mod error; -mod node_header; mod node_codec; +mod node_header; mod storage_proof; +mod trie_codec; mod trie_stream; -use sp_std::{boxed::Box, marker::PhantomData, vec::Vec, borrow::Borrow}; -use hash_db::{Hasher, Prefix}; -use trie_db::proof::{generate_proof, verify_proof}; -pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. pub use error::Error; -/// The Substrate format implementation of `TrieStream`. -pub use trie_stream::TrieStream; +/// Various re-exports from the `hash-db` crate. +pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; +use hash_db::{Hasher, Prefix}; +pub use memory_db::prefixed_key; +/// Various re-exports from the `memory-db` crate. +pub use memory_db::KeyFunction; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::StorageProof; +use sp_std::{borrow::Borrow, boxed::Box, marker::PhantomData, vec::Vec}; +pub use storage_proof::{CompactProof, StorageProof}; +/// Trie codec reexport, mainly child trie support +/// for trie compact proof. +pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; +pub use trie_db::proof::VerifyError; +use trie_db::proof::{generate_proof, verify_proof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ - Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, nibble_ops, TrieDBIterator, + nibble_ops, CError, DBValue, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, + TrieLayout, TrieMut, }; -/// Various re-exports from the `memory-db` crate. -pub use memory_db::KeyFunction; -pub use memory_db::prefixed_key; -/// Various re-exports from the `hash-db` crate. -pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; +/// The Substrate format implementation of `TrieStream`. +pub use trie_stream::TrieStream; #[derive(Default)] /// substrate trie layout @@ -57,7 +63,8 @@ impl TrieLayout for Layout { } impl TrieConfiguration for Layout { - fn trie_root(input: I) -> ::Out where + fn trie_root(input: I) -> ::Out + where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -65,7 +72,8 @@ impl TrieConfiguration for Layout { trie_root::trie_root_no_extension::(input) } - fn trie_root_unhashed(input: I) -> Vec where + fn trie_root_unhashed(input: I) -> Vec + where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -93,19 +101,14 @@ pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a `KeyFunction` for prefixing keys internally (avoiding /// key conflict for non random keys). -pub type PrefixedMemoryDB = memory_db::MemoryDB< - H, memory_db::PrefixedKey, trie_db::DBValue, MemTracker ->; +pub type PrefixedMemoryDB = + memory_db::MemoryDB, trie_db::DBValue, MemTracker>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a noops `KeyFunction` (key addressing must be hashed or using /// an encoding scheme that avoid key conflict). -pub type MemoryDB = memory_db::MemoryDB< - H, memory_db::HashKey, trie_db::DBValue, MemTracker, ->; +pub type MemoryDB = memory_db::MemoryDB, trie_db::DBValue, MemTracker>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type GenericMemoryDB = memory_db::MemoryDB< - H, KF, trie_db::DBValue, MemTracker ->; +pub type GenericMemoryDB = memory_db::MemoryDB; /// Persistent trie database read-access interface for the a given hasher. pub type TrieDB<'a, L> = trie_db::TrieDB<'a, L>; @@ -142,8 +145,9 @@ pub fn generate_trie_proof<'a, L: TrieConfiguration, I, K, DB>( db: &DB, root: TrieHash, keys: I, -) -> Result>, Box>> where - I: IntoIterator, +) -> Result>, Box>> +where + I: IntoIterator, K: 'a + AsRef<[u8]>, DB: hash_db::HashDBRef, { @@ -163,8 +167,9 @@ pub fn verify_trie_proof<'a, L: TrieConfiguration, I, K, V>( root: &TrieHash, proof: &[Vec], items: I, -) -> Result<(), VerifyError, error::Error>> where - I: IntoIterator)>, +) -> Result<(), VerifyError, error::Error>> +where + I: IntoIterator)>, K: 'a + AsRef<[u8]>, V: 'a + AsRef<[u8]>, { @@ -175,8 +180,9 @@ pub fn verify_trie_proof<'a, L: TrieConfiguration, I, K, V>( pub fn delta_trie_root( db: &mut DB, mut root: TrieHash, - delta: I -) -> Result, Box>> where + delta: I, +) -> Result, Box>> +where I: IntoIterator, A: Borrow<[u8]>, B: Borrow>, @@ -184,7 +190,7 @@ pub fn delta_trie_root( DB: hash_db::HashDB, { { - let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; + let mut trie = TrieDBMut::::from_existing(db, &mut root)?; let mut delta = delta.into_iter().collect::>(); delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); @@ -204,40 +210,44 @@ pub fn delta_trie_root( pub fn read_trie_value>( db: &DB, root: &TrieHash, - key: &[u8] + key: &[u8], ) -> Result>, Box>> { - Ok(TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) + TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) } /// Read a value from the trie with given Query. pub fn read_trie_value_with< L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef + Q: Query, + DB: hash_db::HashDBRef, >( db: &DB, root: &TrieHash, key: &[u8], - query: Q + query: Q, ) -> Result>, Box>> { - Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + TrieDB::::new(&*db, root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec())) +} + +/// Determine the empty trie root. +pub fn empty_trie_root() -> ::Out { + L::trie_root::<_, Vec, Vec>(core::iter::empty()) } /// Determine the empty child trie root. -pub fn empty_child_trie_root( -) -> ::Out { +pub fn empty_child_trie_root() -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root( - input: I, -) -> ::Out - where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, +pub fn child_trie_root(input: I) -> ::Out +where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, { L::trie_root(input) } @@ -250,59 +260,30 @@ pub fn child_delta_trie_root( root_data: RD, delta: I, ) -> Result<::Out, Box>> - where - I: IntoIterator, - A: Borrow<[u8]>, - B: Borrow>, - V: Borrow<[u8]>, - RD: AsRef<[u8]>, - DB: hash_db::HashDB +where + I: IntoIterator, + A: Borrow<[u8]>, + B: Borrow>, + V: Borrow<[u8]>, + RD: AsRef<[u8]>, + DB: hash_db::HashDB, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_data.as_ref()); let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - delta_trie_root::( - &mut db, - root, - delta, - ) -} - -/// Call `f` for all keys in a child trie. -pub fn for_keys_in_child_trie( - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - mut f: F -) -> Result<(), Box>> - where - DB: hash_db::HashDBRef -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - let trie = TrieDB::::new(&db, &root)?; - let iter = trie.iter()?; - - for x in iter { - let (key, _) = x?; - f(&key); - } - - Ok(()) + delta_trie_root::(&mut db, root, delta) } /// Record all keys for a given root. pub fn record_all_keys( db: &DB, root: &TrieHash, - recorder: &mut Recorder> -) -> Result<(), Box>> where - DB: hash_db::HashDBRef + recorder: &mut Recorder>, +) -> Result<(), Box>> +where + DB: hash_db::HashDBRef, { let trie = TrieDB::::new(&*db, root)?; let iter = trie.iter()?; @@ -324,36 +305,38 @@ pub fn read_child_trie_value( keyspace: &[u8], db: &DB, root_slice: &[u8], - key: &[u8] + key: &[u8], ) -> Result>, Box>> - where - DB: hash_db::HashDBRef +where + DB: hash_db::HashDBRef, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_slice); let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) + TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec())) } /// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( +pub fn read_child_trie_value_with, DB>( keyspace: &[u8], db: &DB, root_slice: &[u8], key: &[u8], - query: Q + query: Q, ) -> Result>, Box>> - where - DB: hash_db::HashDBRef +where + DB: hash_db::HashDBRef, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_slice); let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + TrieDB::::new(&db, &root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec())) } /// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the @@ -375,7 +358,8 @@ fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) (result, prefix.1) } -impl<'a, DB, H> KeySpacedDB<'a, DB, H> where +impl<'a, DB, H> KeySpacedDB<'a, DB, H> +where H: Hasher, { /// instantiate new keyspaced db @@ -384,7 +368,8 @@ impl<'a, DB, H> KeySpacedDB<'a, DB, H> where } } -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where +impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> +where H: Hasher, { /// instantiate new keyspaced db @@ -393,7 +378,8 @@ impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where } } -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where +impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> +where DB: hash_db::HashDBRef, H: Hasher, T: From<&'static [u8]>, @@ -409,7 +395,8 @@ impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where } } -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where +impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> +where DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, @@ -440,12 +427,15 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where } } -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where +impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> +where DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + fn as_hash_db(&self) -> &dyn hash_db::HashDB { + &*self + } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { &mut *self @@ -455,7 +445,7 @@ impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where /// Constants used into trie simplification codec. mod trie_constants { pub const EMPTY_TRIE: u8 = 0; - pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; + pub const NIBBLE_SIZE_BOUND: usize = u16::MAX as usize; pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; @@ -464,12 +454,12 @@ mod trie_constants { #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Decode, Compact}; - use sp_core::Blake2Hasher; + use codec::{Compact, Decode, Encode}; use hash_db::{HashDB, Hasher}; - use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; - use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; + use sp_core::Blake2Hasher; + use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; + use trie_standardmap::{Alphabet, StandardMap, ValueMode}; type Layout = super::Layout; @@ -508,7 +498,8 @@ mod tests { let t = TrieDB::::new(&mut memdb, &root).unwrap(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), - t.iter().unwrap() + t.iter() + .unwrap() .map(|x| x.map(|y| (y.0, y.1.to_vec())).unwrap()) .collect::>() ); @@ -522,9 +513,11 @@ mod tests { let mut empty = TrieDBMut::::new(&mut db, &mut root); empty.commit(); let root1 = empty.root().as_ref().to_vec(); - let root2: Vec = Layout::trie_root::<_, Vec, Vec>( - std::iter::empty(), - ).as_ref().iter().cloned().collect(); + let root2: Vec = Layout::trie_root::<_, Vec, Vec>(std::iter::empty()) + .as_ref() + .iter() + .cloned() + .collect(); assert_eq!(root1, root2); } @@ -545,20 +538,16 @@ mod tests { #[test] fn branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0x10][..]), - (&[0xba][..], &[0x11][..]), - ]; + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xba][..], &[0x11][..])]; check_equivalent::(&input); check_iteration::(&input); } #[test] fn extension_and_branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0x10][..]), - (&[0xab][..], &[0x11][..]), - ]; + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xab][..], &[0x11][..])]; check_equivalent::(&input); check_iteration::(&input); } @@ -584,7 +573,7 @@ mod tests { let input: Vec<(&[u8], &[u8])> = vec![ (&[0xaa][..], &[0xa0][..]), (&[0xaa, 0xaa][..], &[0xaa][..]), - (&[0xaa, 0xbb][..], &[0xab][..]) + (&[0xaa, 0xbb][..], &[0xab][..]), ]; check_equivalent::(&input); check_iteration::(&input); @@ -607,7 +596,10 @@ mod tests { #[test] fn single_long_leaf_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), + ( + &[0xaa][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), (&[0xba][..], &[0x11][..]), ]; check_equivalent::(&input); @@ -617,8 +609,14 @@ mod tests { #[test] fn two_long_leaves_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), - (&[0xba][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]) + ( + &[0xaa][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), + ( + &[0xba][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), ]; check_equivalent::(&input); check_iteration::(&input); @@ -627,11 +625,11 @@ mod tests { fn populate_trie<'db, T: TrieConfiguration>( db: &'db mut dyn HashDB, root: &'db mut TrieHash, - v: &[(Vec, Vec)] + v: &[(Vec, Vec)], ) -> TrieDBMut<'db, T> { let mut t = TrieDBMut::::new(db, root); for i in 0..v.len() { - let key: &[u8]= &v[i].0; + let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; t.insert(key, val).unwrap(); } @@ -643,7 +641,7 @@ mod tests { v: &[(Vec, Vec)], ) { for i in v { - let key: &[u8]= &i.0; + let key: &[u8] = &i.0; t.remove(key).unwrap(); } } @@ -661,7 +659,8 @@ mod tests { journal_key: 0, value_mode: ValueMode::Index, count: 100, - }.make_with(seed.as_fixed_bytes_mut()); + } + .make_with(seed.as_fixed_bytes_mut()); let real = Layout::trie_root(x.clone()); let mut memdb = MemoryDB::default(); @@ -707,17 +706,18 @@ mod tests { #[test] fn codec_trie_single_tuple() { - let input = vec![ - (vec![0xaa], vec![0xbb]) - ]; + let input = vec![(vec![0xaa], vec![0xbb])]; let trie = Layout::trie_root_unhashed::<_, _, _>(input); println!("trie: {:#x?}", trie); - assert_eq!(trie, vec![ - 0x42, // leaf 0x40 (2^6) with (+) key of 2 nibbles (0x02) - 0xaa, // key data - to_compact(1), // length of value in bytes as Compact - 0xbb // value data - ]); + assert_eq!( + trie, + vec![ + 0x42, // leaf 0x40 (2^6) with (+) key of 2 nibbles (0x02) + 0xaa, // key data + to_compact(1), // length of value in bytes as Compact + 0xbb // value data + ] + ); } #[test] @@ -726,21 +726,21 @@ mod tests { let trie = Layout::trie_root_unhashed::<_, _, _>(input); println!("trie: {:#x?}", trie); let mut ex = Vec::::new(); - ex.push(0x80); // branch, no value (0b_10..) no nibble - ex.push(0x12); // slots 1 & 4 are taken from 0-7 - ex.push(0x00); // no slots from 8-15 - ex.push(to_compact(0x05)); // first slot: LEAF, 5 bytes long. - ex.push(0x43); // leaf 0x40 with 3 nibbles - ex.push(0x03); // first nibble - ex.push(0x14); // second & third nibble - ex.push(to_compact(0x01)); // 1 byte data - ex.push(0xff); // value data - ex.push(to_compact(0x05)); // second slot: LEAF, 5 bytes long. - ex.push(0x43); // leaf with 3 nibbles - ex.push(0x08); // first nibble - ex.push(0x19); // second & third nibble - ex.push(to_compact(0x01)); // 1 byte data - ex.push(0xfe); // value data + ex.push(0x80); // branch, no value (0b_10..) no nibble + ex.push(0x12); // slots 1 & 4 are taken from 0-7 + ex.push(0x00); // no slots from 8-15 + ex.push(to_compact(0x05)); // first slot: LEAF, 5 bytes long. + ex.push(0x43); // leaf 0x40 with 3 nibbles + ex.push(0x03); // first nibble + ex.push(0x14); // second & third nibble + ex.push(to_compact(0x01)); // 1 byte data + ex.push(0xff); // value data + ex.push(to_compact(0x05)); // second slot: LEAF, 5 bytes long. + ex.push(0x43); // leaf with 3 nibbles + ex.push(0x08); // first nibble + ex.push(0x19); // second & third nibble + ex.push(to_compact(0x01)); // 1 byte data + ex.push(0xfe); // value data assert_eq!(trie, ex); } @@ -780,27 +780,25 @@ mod tests { populate_trie::(&mut memdb, &mut root, &pairs); let non_included_key: Vec = hex!("0909").to_vec(); - let proof = generate_trie_proof::( - &memdb, - root, - &[non_included_key.clone()] - ).unwrap(); + let proof = + generate_trie_proof::(&memdb, root, &[non_included_key.clone()]) + .unwrap(); // Verifying that the K was not included into the trie should work. assert!(verify_trie_proof::>( - &root, - &proof, - &[(non_included_key.clone(), None)], - ).is_ok() - ); + &root, + &proof, + &[(non_included_key.clone(), None)], + ) + .is_ok()); // Verifying that the K was included into the trie should fail. assert!(verify_trie_proof::>( - &root, - &proof, - &[(non_included_key, Some(hex!("1010").to_vec()))], - ).is_err() - ); + &root, + &proof, + &[(non_included_key, Some(hex!("1010").to_vec()))], + ) + .is_err()); } #[test] @@ -814,71 +812,71 @@ mod tests { let mut root = Default::default(); populate_trie::(&mut memdb, &mut root, &pairs); - let proof = generate_trie_proof::( - &memdb, - root, - &[pairs[0].0.clone()] - ).unwrap(); + let proof = + generate_trie_proof::(&memdb, root, &[pairs[0].0.clone()]).unwrap(); // Check that a K, V included into the proof are verified. assert!(verify_trie_proof::( - &root, - &proof, - &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] - ).is_ok() - ); + &root, + &proof, + &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] + ) + .is_ok()); // Absence of the V is not verified with the proof that has K, V included. assert!(verify_trie_proof::>( - &root, - &proof, - &[(pairs[0].0.clone(), None)] - ).is_err() - ); + &root, + &proof, + &[(pairs[0].0.clone(), None)] + ) + .is_err()); // K not included into the trie is not verified. assert!(verify_trie_proof::( - &root, - &proof, - &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] - ).is_err() - ); + &root, + &proof, + &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] + ) + .is_err()); // K included into the trie but not included into the proof is not verified. assert!(verify_trie_proof::( - &root, - &proof, - &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] - ).is_err() - ); + &root, + &proof, + &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] + ) + .is_err()); } #[test] fn generate_storage_root_with_proof_works_independently_from_the_delta_order() { let proof = StorageProof::decode(&mut &include_bytes!("../test-res/proof")[..]).unwrap(); - let storage_root = sp_core::H256::decode( - &mut &include_bytes!("../test-res/storage_root")[..], - ).unwrap(); + let storage_root = + sp_core::H256::decode(&mut &include_bytes!("../test-res/storage_root")[..]).unwrap(); // Delta order that is "invalid" so that it would require a different proof. let invalid_delta = Vec::<(Vec, Option>)>::decode( &mut &include_bytes!("../test-res/invalid-delta-order")[..], - ).unwrap(); + ) + .unwrap(); // Delta order that is "valid" let valid_delta = Vec::<(Vec, Option>)>::decode( &mut &include_bytes!("../test-res/valid-delta-order")[..], - ).unwrap(); + ) + .unwrap(); let proof_db = proof.into_memory_db::(); let first_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, valid_delta, - ).unwrap(); + ) + .unwrap(); let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, - ).unwrap(); + ) + .unwrap(); assert_eq!(first_storage_root, second_storage_root); } diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 8a61f372cf2aa..d5ffb3219cf68 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -1,32 +1,32 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! `NodeCodec` implementation for Substrate's trie format. -use sp_std::marker::PhantomData; -use sp_std::ops::Range; -use sp_std::vec::Vec; -use sp_std::borrow::Borrow; -use codec::{Encode, Decode, Input, Compact}; +use super::node_header::{NodeHeader, NodeKind}; +use crate::{error::Error, trie_constants}; +use codec::{Compact, Decode, Encode, Input}; use hash_db::Hasher; -use trie_db::{self, node::{NibbleSlicePlan, NodePlan, NodeHandlePlan}, ChildReference, - nibble_ops, Partial, NodeCodec as NodeCodecT}; -use crate::error::Error; -use crate::trie_constants; -use super::{node_header::{NodeHeader, NodeKind}}; +use sp_std::{borrow::Borrow, marker::PhantomData, ops::Range, vec::Vec}; +use trie_db::{ + self, nibble_ops, + node::{NibbleSlicePlan, NodeHandlePlan, NodePlan}, + ChildReference, NodeCodec as NodeCodecT, Partial, +}; /// Helper struct for trie node decoder. This implements `codec::Input` on a byte slice, while /// tracking the absolute position. This is similar to `std::io::Cursor` but does not implement @@ -38,15 +38,12 @@ struct ByteSliceInput<'a> { impl<'a> ByteSliceInput<'a> { fn new(data: &'a [u8]) -> Self { - ByteSliceInput { - data, - offset: 0, - } + ByteSliceInput { data, offset: 0 } } fn take(&mut self, count: usize) -> Result, codec::Error> { if self.offset + count > self.data.len() { - return Err("out of data".into()); + return Err("out of data".into()) } let range = self.offset..(self.offset + count); @@ -57,11 +54,8 @@ impl<'a> ByteSliceInput<'a> { impl<'a> Input for ByteSliceInput<'a> { fn remaining_len(&mut self) -> Result, codec::Error> { - let remaining = if self.offset <= self.data.len() { - Some(self.data.len() - self.offset) - } else { - None - }; + let remaining = + if self.offset <= self.data.len() { Some(self.data.len() - self.offset) } else { None }; Ok(remaining) } @@ -73,7 +67,7 @@ impl<'a> Input for ByteSliceInput<'a> { fn read_byte(&mut self) -> Result { if self.offset + 1 > self.data.len() { - return Err("out of data".into()); + return Err("out of data".into()) } let byte = self.data[self.offset]; @@ -102,10 +96,11 @@ impl NodeCodecT for NodeCodec { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat); + return Err(Error::BadFormat) } let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / nibble_ops::NIBBLE_PER_BYTE, + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / + nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); let bitmap_range = input.take(BITMAP_LENGTH)?; @@ -117,8 +112,8 @@ impl NodeCodecT for NodeCodec { None }; let mut children = [ - None, None, None, None, None, None, None, None, - None, None, None, None, None, None, None, None, + None, None, None, None, None, None, None, None, None, None, None, None, None, + None, None, None, ]; for i in 0..nibble_ops::NIBBLE_LENGTH { if bitmap.value_at(i) { @@ -136,15 +131,16 @@ impl NodeCodecT for NodeCodec { value, children, }) - } + }, NodeHeader::Leaf(nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat); + return Err(Error::BadFormat) } let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / nibble_ops::NIBBLE_PER_BYTE, + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / + nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); let count = >::decode(&mut input)?.0 as usize; @@ -152,7 +148,7 @@ impl NodeCodecT for NodeCodec { partial: NibbleSlicePlan::new(partial, partial_padding), value: input.take(count)?, }) - } + }, } } @@ -198,26 +194,28 @@ impl NodeCodecT for NodeCodec { }; let bitmap_index = output.len(); let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; - (0..BITMAP_LENGTH).for_each(|_|output.push(0)); + (0..BITMAP_LENGTH).for_each(|_| output.push(0)); if let Some(value) = maybe_value { value.encode_to(&mut output); }; - Bitmap::encode(children.map(|maybe_child| match maybe_child.borrow() { - Some(ChildReference::Hash(h)) => { - h.as_ref().encode_to(&mut output); - true - } - &Some(ChildReference::Inline(inline_data, len)) => { - inline_data.as_ref()[..len].encode_to(&mut output); - true - } - None => false, - }), bitmap.as_mut()); + Bitmap::encode( + children.map(|maybe_child| match maybe_child.borrow() { + Some(ChildReference::Hash(h)) => { + h.as_ref().encode_to(&mut output); + true + }, + &Some(ChildReference::Inline(inline_data, len)) => { + inline_data.as_ref()[..len].encode_to(&mut output); + true + }, + None => false, + }), + bitmap.as_mut(), + ); output[bitmap_index..bitmap_index + BITMAP_LENGTH] .copy_from_slice(&bitmap[..BITMAP_LENGTH]); output } - } // utils @@ -258,7 +256,7 @@ fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { if number_nibble_encoded > 0 { output.push(nibble_ops::pad_right((partial.0).1)); } - output.extend_from_slice(&partial.1[..]); + output.extend_from_slice(partial.1); output } @@ -271,19 +269,21 @@ const BITMAP_LENGTH: usize = 2; pub(crate) struct Bitmap(u16); impl Bitmap { - pub fn decode(data: &[u8]) -> Result { - Ok(Bitmap(u16::decode(&mut &data[..])?)) + pub fn decode(mut data: &[u8]) -> Result { + Ok(Bitmap(u16::decode(&mut data)?)) } pub fn value_at(&self, i: usize) -> bool { self.0 & (1u16 << i) != 0 } - pub fn encode>(has_children: I , dest: &mut [u8]) { + pub fn encode>(has_children: I, dest: &mut [u8]) { let mut bitmap: u16 = 0; let mut cursor: u16 = 1; for v in has_children { - if v { bitmap |= cursor } + if v { + bitmap |= cursor + } cursor <<= 1; } dest[0] = (bitmap % 256) as u8; diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 7aa16292549ed..9f05113a35935 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -1,28 +1,28 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! The node header. use crate::trie_constants; -use codec::{Encode, Decode, Input, Output}; +use codec::{Decode, Encode, Input, Output}; use sp_std::iter::once; /// A node header -#[derive(Copy, Clone, PartialEq, Eq)] -#[derive(sp_core::RuntimeDebug)] +#[derive(Copy, Clone, PartialEq, Eq, sp_core::RuntimeDebug)] pub(crate) enum NodeHeader { Null, Branch(bool, usize), @@ -37,10 +37,10 @@ pub(crate) enum NodeKind { } impl Encode for NodeHeader { - fn encode_to(&self, output: &mut T) { + fn encode_to(&self, output: &mut T) { match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), - NodeHeader::Branch(true, nibble_count) => + NodeHeader::Branch(true, nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, output), NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, output), @@ -56,12 +56,14 @@ impl Decode for NodeHeader { fn decode(input: &mut I) -> Result { let i = input.read_byte()?; if i == trie_constants::EMPTY_TRIE { - return Ok(NodeHeader::Null); + return Ok(NodeHeader::Null) } match i & (0b11 << 6) { trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input)?)), - trie_constants::BRANCH_WITHOUT_MASK => Ok(NodeHeader::Branch(false, decode_size(i, input)?)), - trie_constants::BRANCH_WITH_MASK => Ok(NodeHeader::Branch(true, decode_size(i, input)?)), + trie_constants::BRANCH_WITHOUT_MASK => + Ok(NodeHeader::Branch(false, decode_size(i, input)?)), + trie_constants::BRANCH_WITH_MASK => + Ok(NodeHeader::Branch(true, decode_size(i, input)?)), // do not allow any special encoding _ => Err("Unallowed encoding".into()), } @@ -75,11 +77,8 @@ pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); let l1 = sp_std::cmp::min(62, size); - let (first_byte, mut rem) = if size == l1 { - (once(prefix + l1 as u8), 0) - } else { - (once(prefix + 63), size - l1) - }; + let (first_byte, mut rem) = + if size == l1 { (once(prefix + l1 as u8), 0) } else { (once(prefix + 63), size - l1) }; let next_bytes = move || { if rem > 0 { if rem < 256 { @@ -98,7 +97,7 @@ pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator } /// Encodes size and prefix to a stream output. -fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut impl Output) { +fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut W) { for b in size_and_prefix_iterator(size, prefix) { out.push_byte(b) } @@ -108,13 +107,13 @@ fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut impl Output) { fn decode_size(first: u8, input: &mut impl Input) -> Result { let mut result = (first & 255u8 >> 2) as usize; if result < 63 { - return Ok(result); + return Ok(result) } result -= 1; while result <= trie_constants::NIBBLE_SIZE_BOUND { let n = input.read_byte()? as usize; if n < 255 { - return Ok(result + n + 1); + return Ok(result + n + 1) } result += 255; } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 254adc2fcb48a..cfdb8566ea75f 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -1,22 +1,24 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Decode, Encode}; +use hash_db::{HashDB, Hasher}; +use scale_info::TypeInfo; use sp_std::vec::Vec; -use codec::{Encode, Decode}; -use hash_db::{Hasher, HashDB}; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that @@ -25,11 +27,17 @@ use hash_db::{Hasher, HashDB}; /// The proof consists of the set of serialized nodes in the storage trie accessed when looking up /// the keys covered by the proof. Verifying the proof requires constructing the partial trie from /// the serialized nodes and performing the key lookups. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct StorageProof { trie_nodes: Vec>, } +/// Storage proof in compact form. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] +pub struct CompactProof { + pub encoded_nodes: Vec>, +} + impl StorageProof { /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. pub fn new(trie_nodes: Vec>) -> Self { @@ -41,9 +49,7 @@ impl StorageProof { /// An empty proof is capable of only proving trivial statements (ie. that an empty set of /// key-value pairs exist in storage). pub fn empty() -> Self { - StorageProof { - trie_nodes: Vec::new(), - } + StorageProof { trie_nodes: Vec::new() } } /// Returns whether this is an empty proof. @@ -57,16 +63,24 @@ impl StorageProof { StorageProofNodeIterator::new(self) } + /// Convert into plain node vector. + pub fn into_nodes(self) -> Vec> { + self.trie_nodes + } /// Creates a `MemoryDB` from `Self`. pub fn into_memory_db(self) -> crate::MemoryDB { self.into() } /// Merges multiple storage proofs covering potentially different sets of keys into one proof - /// covering all keys. The merged proof output may be smaller than the aggregate size of the input - /// proofs due to deduplication of trie nodes. - pub fn merge(proofs: I) -> Self where I: IntoIterator { - let trie_nodes = proofs.into_iter() + /// covering all keys. The merged proof output may be smaller than the aggregate size of the + /// input proofs due to deduplication of trie nodes. + pub fn merge(proofs: I) -> Self + where + I: IntoIterator, + { + let trie_nodes = proofs + .into_iter() .flat_map(|proof| proof.iter_nodes()) .collect::>() .into_iter() @@ -74,6 +88,57 @@ impl StorageProof { Self { trie_nodes } } + + /// Encode as a compact proof with default + /// trie layout. + pub fn into_compact_proof( + self, + root: H::Out, + ) -> Result>> { + crate::encode_compact::>(self, root) + } + + /// Returns the estimated encoded size of the compact proof. + /// + /// Runing this operation is a slow operation (build the whole compact proof) and should only be + /// in non sensitive path. + /// Return `None` on error. + pub fn encoded_compact_size(self, root: H::Out) -> Option { + let compact_proof = self.into_compact_proof::(root); + compact_proof.ok().map(|p| p.encoded_size()) + } +} + +impl CompactProof { + /// Return an iterator on the compact encoded nodes. + pub fn iter_compact_encoded_nodes(&self) -> impl Iterator { + self.encoded_nodes.iter().map(Vec::as_slice) + } + + /// Decode to a full storage_proof. + /// + /// Method use a temporary `HashDB`, and `sp_trie::decode_compact` + /// is often better. + pub fn to_storage_proof( + &self, + expected_root: Option<&H::Out>, + ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { + let mut db = crate::MemoryDB::::new(&[]); + let root = crate::decode_compact::, _, _>( + &mut db, + self.iter_compact_encoded_nodes(), + expected_root, + )?; + Ok(( + StorageProof::new( + db.drain() + .into_iter() + .filter_map(|kv| if (kv.1).1 > 0 { Some((kv.1).0) } else { None }) + .collect(), + ), + root, + )) + } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to @@ -84,9 +149,7 @@ pub struct StorageProofNodeIterator { impl StorageProofNodeIterator { fn new(proof: StorageProof) -> Self { - StorageProofNodeIterator { - inner: proof.trie_nodes.into_iter(), - } + StorageProofNodeIterator { inner: proof.trie_nodes.into_iter() } } } diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs new file mode 100644 index 0000000000000..1596229f2b5de --- /dev/null +++ b/primitives/trie/src/trie_codec.rs @@ -0,0 +1,252 @@ +// This file is part of Substrate. + +// Copyright (C) 2021-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Compact proof support. +//! +//! This uses compact proof from trie crate and extends +//! it to substrate specific layout and child trie system. + +use crate::{ + CompactProof, HashDBT, StorageProof, TrieConfiguration, TrieError, TrieHash, EMPTY_PREFIX, +}; +use sp_std::{boxed::Box, vec::Vec}; +#[cfg(feature = "std")] +use std::error::Error as StdError; +#[cfg(feature = "std")] +use std::fmt; +use trie_db::Trie; + +/// Error for trie node decoding. +pub enum Error { + /// Verification failed due to root mismatch. + RootMismatch(TrieHash, TrieHash), + /// Missing nodes in proof. + IncompleteProof, + /// Compact node is not needed. + ExtraneousChildNode, + /// Child content with root not in proof. + ExtraneousChildProof(TrieHash), + /// Bad child trie root. + InvalidChildRoot(Vec, Vec), + /// Errors from trie crate. + TrieError(Box>), +} + +impl From>> for Error { + fn from(error: Box>) -> Self { + Error::TrieError(error) + } +} + +#[cfg(feature = "std")] +impl StdError for Error { + fn description(&self) -> &str { + match self { + Error::InvalidChildRoot(..) => "Invalid child root error", + Error::TrieError(..) => "Trie db error", + Error::RootMismatch(..) => "Trie db error", + Error::IncompleteProof => "Incomplete proof", + Error::ExtraneousChildNode => "Extraneous child node", + Error::ExtraneousChildProof(..) => "Extraneous child proof", + } + } +} + +#[cfg(feature = "std")] +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + ::fmt(&self, f) + } +} + +#[cfg(feature = "std")] +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::InvalidChildRoot(k, v) => write!(f, "InvalidChildRoot at {:x?}: {:x?}", k, v), + Error::TrieError(e) => write!(f, "Trie error: {}", e), + Error::IncompleteProof => write!(f, "Incomplete proof"), + Error::ExtraneousChildNode => write!(f, "Child node content with no root in proof"), + Error::ExtraneousChildProof(root) => { + write!(f, "Proof of child trie {:x?} not in parent proof", root.as_ref()) + }, + Error::RootMismatch(root, expected) => write!( + f, + "Verification error, root is {:x?}, expected: {:x?}", + root.as_ref(), + expected.as_ref(), + ), + } + } +} + +/// Decode a compact proof. +/// +/// Takes as input a destination `db` for decoded node and `encoded` +/// an iterator of compact encoded nodes. +/// +/// Child trie are decoded in order of child trie root present +/// in the top trie. +pub fn decode_compact<'a, L, DB, I>( + db: &mut DB, + encoded: I, + expected_root: Option<&TrieHash>, +) -> Result, Error> +where + L: TrieConfiguration, + DB: HashDBT + hash_db::HashDBRef, + I: IntoIterator, +{ + let mut nodes_iter = encoded.into_iter(); + let (top_root, _nb_used) = + trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; + + // Only check root if expected root is passed as argument. + if let Some(expected_root) = expected_root { + if expected_root != &top_root { + return Err(Error::RootMismatch(top_root.clone(), expected_root.clone())) + } + } + + let mut child_tries = Vec::new(); + { + // fetch child trie roots + let trie = crate::TrieDB::::new(db, &top_root)?; + + let mut iter = trie.iter()?; + + let childtrie_roots = sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; + if iter.seek(childtrie_roots).is_ok() { + loop { + match iter.next() { + Some(Ok((key, value))) if key.starts_with(childtrie_roots) => { + // we expect all default child trie root to be correctly encoded. + // see other child trie functions. + let mut root = TrieHash::::default(); + // still in a proof so prevent panic + if root.as_mut().len() != value.as_slice().len() { + return Err(Error::InvalidChildRoot(key, value)) + } + root.as_mut().copy_from_slice(value.as_ref()); + child_tries.push(root); + }, + // allow incomplete database error: we only + // require access to data in the proof. + Some(Err(error)) => match *error { + trie_db::TrieError::IncompleteDatabase(..) => (), + e => return Err(Box::new(e).into()), + }, + _ => break, + } + } + } + } + + if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { + return Err(Error::IncompleteProof) + } + + let mut previous_extracted_child_trie = None; + let mut nodes_iter = nodes_iter.peekable(); + for child_root in child_tries.into_iter() { + if previous_extracted_child_trie.is_none() && nodes_iter.peek().is_some() { + let (top_root, _) = + trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; + previous_extracted_child_trie = Some(top_root); + } + + // we do not early exit on root mismatch but try the + // other read from proof (some child root may be + // in proof without actual child content). + if Some(child_root) == previous_extracted_child_trie { + previous_extracted_child_trie = None; + } + } + + if let Some(child_root) = previous_extracted_child_trie { + // A child root was read from proof but is not present + // in top trie. + return Err(Error::ExtraneousChildProof(child_root)) + } + + if nodes_iter.next().is_some() { + return Err(Error::ExtraneousChildNode) + } + + Ok(top_root) +} + +/// Encode a compact proof. +/// +/// Takes as input all full encoded node from the proof, and +/// the root. +/// Then parse all child trie root and compress main trie content first +/// then all child trie contents. +/// Child trie are ordered by the order of their roots in the top trie. +pub fn encode_compact(proof: StorageProof, root: TrieHash) -> Result> +where + L: TrieConfiguration, +{ + let mut child_tries = Vec::new(); + let partial_db = proof.into_memory_db(); + let mut compact_proof = { + let trie = crate::TrieDB::::new(&partial_db, &root)?; + + let mut iter = trie.iter()?; + + let childtrie_roots = sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; + if iter.seek(childtrie_roots).is_ok() { + loop { + match iter.next() { + Some(Ok((key, value))) if key.starts_with(childtrie_roots) => { + let mut root = TrieHash::::default(); + if root.as_mut().len() != value.as_slice().len() { + // some child trie root in top trie are not an encoded hash. + return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())) + } + root.as_mut().copy_from_slice(value.as_ref()); + child_tries.push(root); + }, + // allow incomplete database error: we only + // require access to data in the proof. + Some(Err(error)) => match *error { + trie_db::TrieError::IncompleteDatabase(..) => (), + e => return Err(Box::new(e).into()), + }, + _ => break, + } + } + } + + trie_db::encode_compact::(&trie)? + }; + + for child_root in child_tries { + if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { + // child proof are allowed to be missing (unused root can be included + // due to trie structure modification). + continue + } + + let trie = crate::TrieDB::::new(&partial_db, &child_root)?; + let child_proof = trie_db::encode_compact::(&trie)?; + + compact_proof.extend(child_proof); + } + + Ok(CompactProof { encoded_nodes: compact_proof }) +} diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 0c92e673aae93..e0e26fea67c2e 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -1,28 +1,31 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright (C) 2015-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! `TrieStream` implementation for Substrate's trie format. -use hash_db::Hasher; -use trie_root; +use crate::{ + node_codec::Bitmap, + node_header::{size_and_prefix_iterator, NodeKind}, + trie_constants, +}; use codec::Encode; +use hash_db::Hasher; use sp_std::vec::Vec; -use crate::trie_constants; -use crate::node_header::{NodeKind, size_and_prefix_iterator}; -use crate::node_codec::Bitmap; +use trie_root; const BRANCH_NODE_NO_VALUE: u8 = 254; const BRANCH_NODE_WITH_VALUE: u8 = 255; @@ -35,41 +38,42 @@ pub struct TrieStream { impl TrieStream { // useful for debugging but not used otherwise - pub fn as_raw(&self) -> &[u8] { &self.buffer } + pub fn as_raw(&self) -> &[u8] { + &self.buffer + } } fn branch_node_bit_mask(has_children: impl Iterator) -> (u8, u8) { let mut bitmap: u16 = 0; let mut cursor: u16 = 1; for v in has_children { - if v { bitmap |= cursor } + if v { + bitmap |= cursor + } cursor <<= 1; } - ((bitmap % 256 ) as u8, (bitmap / 256 ) as u8) + ((bitmap % 256) as u8, (bitmap / 256) as u8) } - /// Create a leaf/branch node, encoding a number of nibbles. fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator + 'a { let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len()); let iter_start = match kind { NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK), - NodeKind::BranchNoValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK), - NodeKind::BranchWithValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK), + NodeKind::BranchNoValue => + size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK), + NodeKind::BranchWithValue => + size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK), }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) .chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1])) } - impl trie_root::TrieStream for TrieStream { - fn new() -> Self { - TrieStream { - buffer: Vec::new() - } + TrieStream { buffer: Vec::new() } } fn append_empty_data(&mut self) { @@ -94,7 +98,7 @@ impl trie_root::TrieStream for TrieStream { self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchNoValue)); } let bm = branch_node_bit_mask(has_children); - self.buffer.extend([bm.0,bm.1].iter()); + self.buffer.extend([bm.0, bm.1].iter()); } else { debug_assert!(false, "trie stream codec only for no extension trie"); self.buffer.extend(&branch_node(maybe_value.is_some(), has_children)); @@ -116,7 +120,9 @@ impl trie_root::TrieStream for TrieStream { } } - fn out(self) -> Vec { self.buffer } + fn out(self) -> Vec { + self.buffer + } } fn branch_node(has_value: bool, has_children: impl Iterator) -> [u8; 3] { @@ -125,15 +131,11 @@ fn branch_node(has_value: bool, has_children: impl Iterator) -> [u8 result } -fn branch_node_buffered(has_value: bool, has_children: I, output: &mut[u8]) - where - I: Iterator, +fn branch_node_buffered(has_value: bool, has_children: I, output: &mut [u8]) +where + I: Iterator, { - let first = if has_value { - BRANCH_NODE_WITH_VALUE - } else { - BRANCH_NODE_NO_VALUE - }; + let first = if has_value { BRANCH_NODE_WITH_VALUE } else { BRANCH_NODE_NO_VALUE }; output[0] = first; Bitmap::encode(has_children, &mut output[1..]); } diff --git a/primitives/utils/README.md b/primitives/utils/README.md deleted file mode 100644 index b0e04a3f4f198..0000000000000 --- a/primitives/utils/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Utilities Primitives for Substrate - -License: Apache-2.0 \ No newline at end of file diff --git a/primitives/utils/src/metrics.rs b/primitives/utils/src/metrics.rs deleted file mode 100644 index a66589b5927fe..0000000000000 --- a/primitives/utils/src/metrics.rs +++ /dev/null @@ -1,59 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Metering primitives and globals - -use lazy_static::lazy_static; -use prometheus::{ - Registry, Error as PrometheusError, - core::{ AtomicU64, GenericGauge, GenericCounter }, -}; - -#[cfg(feature = "metered")] -use prometheus::{core::GenericCounterVec, Opts}; - - -lazy_static! { - pub static ref TOKIO_THREADS_TOTAL: GenericCounter = GenericCounter::new( - "tokio_threads_total", "Total number of threads created" - ).expect("Creating of statics doesn't fail. qed"); - - pub static ref TOKIO_THREADS_ALIVE: GenericGauge = GenericGauge::new( - "tokio_threads_alive", "Number of threads alive right now" - ).expect("Creating of statics doesn't fail. qed"); -} - -#[cfg(feature = "metered")] -lazy_static! { - pub static ref UNBOUNDED_CHANNELS_COUNTER : GenericCounterVec = GenericCounterVec::new( - Opts::new("unbounded_channel_len", "Items in each mpsc::unbounded instance"), - &["entity", "action"] // 'name of channel, send|received|dropped - ).expect("Creating of statics doesn't fail. qed"); - -} - - -/// Register the statics to report to registry -pub fn register_globals(registry: &Registry) -> Result<(), PrometheusError> { - registry.register(Box::new(TOKIO_THREADS_ALIVE.clone()))?; - registry.register(Box::new(TOKIO_THREADS_TOTAL.clone()))?; - - #[cfg(feature = "metered")] - registry.register(Box::new(UNBOUNDED_CHANNELS_COUNTER.clone()))?; - - Ok(()) -} diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index e9475846246ee..fcab1eeabcaf4 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-version" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -16,10 +16,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] impl-serde = { version = "0.3.1", optional = true } -serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +serde = { version = "1.0.126", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" } +sp-version-proc-macro = { version = "4.0.0-dev", default-features = false, path = "proc-macro" } +parity-wasm = { version = "0.42.2", optional = true } +thiserror = { version = "1.0.21", optional = true } [features] default = ["std"] @@ -27,6 +31,9 @@ std = [ "impl-serde", "serde", "codec/std", + "scale-info/std", "sp-std/std", "sp-runtime/std", + "parity-wasm", + "thiserror", ] diff --git a/primitives/version/proc-macro/Cargo.toml b/primitives/version/proc-macro/Cargo.toml new file mode 100644 index 0000000000000..c3c801431434a --- /dev/null +++ b/primitives/version/proc-macro/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "sp-version-proc-macro" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Macro for defining a runtime version." +documentation = "https://docs.rs/sp-api-proc-macro" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +proc-macro = true + +[dependencies] +quote = "1.0.3" +syn = { version = "1.0.58", features = ["full", "fold", "extra-traits", "visit"] } +proc-macro2 = "1.0.29" +codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] } + +[dev-dependencies] +sp-version = { version = "4.0.0-dev", path = ".." } diff --git a/primitives/version/proc-macro/src/decl_runtime_version.rs b/primitives/version/proc-macro/src/decl_runtime_version.rs new file mode 100644 index 0000000000000..eef6314be4c81 --- /dev/null +++ b/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -0,0 +1,242 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::Encode; +use proc_macro2::{Span, TokenStream}; +use quote::quote; +use syn::{ + parse::{Error, Result}, + parse_macro_input, + spanned::Spanned as _, + Expr, ExprLit, FieldValue, ItemConst, Lit, +}; + +/// This macro accepts a `const` item that has a struct initializer expression of +/// `RuntimeVersion`-like type. The macro will pass through this declaration and append an item +/// declaration that will lead to emitting a wasm custom section with the contents of +/// `RuntimeVersion`. +pub fn decl_runtime_version_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let item = parse_macro_input!(input as ItemConst); + decl_runtime_version_impl_inner(item) + .unwrap_or_else(|e| e.to_compile_error()) + .into() +} + +fn decl_runtime_version_impl_inner(item: ItemConst) -> Result { + let runtime_version = ParseRuntimeVersion::parse_expr(&*item.expr)?.build(item.expr.span())?; + let link_section = + generate_emit_link_section_decl(&runtime_version.encode(), "runtime_version"); + + Ok(quote! { + #item + #link_section + }) +} + +/// This is a duplicate of `sp_version::RuntimeVersion`. We cannot unfortunately use the original +/// declaration, because if we directly depend on `sp_version` from this proc-macro cargo will +/// enable `std` feature even for `no_std` wasm runtime builds. +/// +/// One difference from the original definition is the `apis` field. Since we don't actually parse +/// `apis` from this macro it will always be emitteed as empty. An empty vector can be encoded as +/// a zero-byte, thus `u8` is sufficient here. +#[derive(Encode)] +struct RuntimeVersion { + spec_name: String, + impl_name: String, + authoring_version: u32, + spec_version: u32, + impl_version: u32, + apis: u8, + transaction_version: u32, +} + +#[derive(Default, Debug)] +struct ParseRuntimeVersion { + spec_name: Option, + impl_name: Option, + authoring_version: Option, + spec_version: Option, + impl_version: Option, + transaction_version: Option, +} + +impl ParseRuntimeVersion { + fn parse_expr(init_expr: &Expr) -> Result { + let init_expr = match init_expr { + Expr::Struct(ref e) => e, + _ => + return Err(Error::new(init_expr.span(), "expected a struct initializer expression")), + }; + + let mut parsed = ParseRuntimeVersion::default(); + for field_value in init_expr.fields.iter() { + parsed.parse_field_value(field_value)?; + } + Ok(parsed) + } + + fn parse_field_value(&mut self, field_value: &FieldValue) -> Result<()> { + let field_name = match field_value.member { + syn::Member::Named(ref ident) => ident, + syn::Member::Unnamed(_) => + return Err(Error::new(field_value.span(), "only named members must be used")), + }; + + fn parse_once( + value: &mut Option, + field: &FieldValue, + parser: impl FnOnce(&Expr) -> Result, + ) -> Result<()> { + if value.is_some() { + return Err(Error::new(field.span(), "field is already initialized before")) + } else { + *value = Some(parser(&field.expr)?); + Ok(()) + } + } + + if field_name == "spec_name" { + parse_once(&mut self.spec_name, field_value, Self::parse_str_literal)?; + } else if field_name == "impl_name" { + parse_once(&mut self.impl_name, field_value, Self::parse_str_literal)?; + } else if field_name == "authoring_version" { + parse_once(&mut self.authoring_version, field_value, Self::parse_num_literal)?; + } else if field_name == "spec_version" { + parse_once(&mut self.spec_version, field_value, Self::parse_num_literal)?; + } else if field_name == "impl_version" { + parse_once(&mut self.impl_version, field_value, Self::parse_num_literal)?; + } else if field_name == "transaction_version" { + parse_once(&mut self.transaction_version, field_value, Self::parse_num_literal)?; + } else if field_name == "apis" { + // Intentionally ignored + // + // The definition will pass through for the declaration, however, it won't get into + // the "runtime_version" custom section. `impl_runtime_apis` is responsible for + // generating a custom section with the supported runtime apis descriptor. + } else { + return Err(Error::new(field_name.span(), "unknown field")) + } + + Ok(()) + } + + fn parse_num_literal(expr: &Expr) -> Result { + let lit = match *expr { + Expr::Lit(ExprLit { lit: Lit::Int(ref lit), .. }) => lit, + _ => + return Err(Error::new( + expr.span(), + "only numeric literals (e.g. `10`) are supported here", + )), + }; + lit.base10_parse::() + } + + fn parse_str_literal(expr: &Expr) -> Result { + let mac = match *expr { + Expr::Macro(syn::ExprMacro { ref mac, .. }) => mac, + _ => return Err(Error::new(expr.span(), "a macro expression is expected here")), + }; + + let lit: ExprLit = mac.parse_body().map_err(|e| { + Error::new( + e.span(), + format!("a single literal argument is expected, but parsing is failed: {}", e), + ) + })?; + + match lit.lit { + Lit::Str(ref lit) => Ok(lit.value()), + _ => Err(Error::new(lit.span(), "only string literals are supported here")), + } + } + + fn build(self, span: Span) -> Result { + macro_rules! required { + ($e:expr) => { + $e.ok_or_else(|| { + Error::new(span, format!("required field '{}' is missing", stringify!($e))) + })? + }; + } + + let Self { + spec_name, + impl_name, + authoring_version, + spec_version, + impl_version, + transaction_version, + } = self; + + Ok(RuntimeVersion { + spec_name: required!(spec_name), + impl_name: required!(impl_name), + authoring_version: required!(authoring_version), + spec_version: required!(spec_version), + impl_version: required!(impl_version), + transaction_version: required!(transaction_version), + apis: 0, + }) + } +} + +fn generate_emit_link_section_decl(contents: &[u8], section_name: &str) -> TokenStream { + let len = contents.len(); + quote! { + const _: () = { + #[cfg(not(feature = "std"))] + #[link_section = #section_name] + static SECTION_CONTENTS: [u8; #len] = [#(#contents),*]; + }; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use codec::DecodeAll; + use std::borrow::Cow; + + #[test] + fn version_can_be_deserialized() { + let version_bytes = RuntimeVersion { + spec_name: "hello".to_string(), + impl_name: "world".to_string(), + authoring_version: 10, + spec_version: 265, + impl_version: 1, + apis: 0, + transaction_version: 2, + } + .encode(); + + assert_eq!( + sp_version::RuntimeVersion::decode_all(&mut &version_bytes[..]).unwrap(), + sp_version::RuntimeVersion { + spec_name: "hello".into(), + impl_name: "world".into(), + authoring_version: 10, + spec_version: 265, + impl_version: 1, + apis: Cow::Owned(vec![]), + transaction_version: 2, + }, + ); + } +} diff --git a/frame/timestamp/src/default_weights.rs b/primitives/version/proc-macro/src/lib.rs similarity index 54% rename from frame/timestamp/src/default_weights.rs rename to primitives/version/proc-macro/src/lib.rs index d8db0182282b2..9a6d4d60bbf9f 100644 --- a/frame/timestamp/src/default_weights.rs +++ b/primitives/version/proc-macro/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,19 +15,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 +//! A proc-macro that generates a custom wasm section from a given RuntimeVersion declaration +//! +//! This macro is re-exported from the `sp_version::runtime_version` and intended to be used from +//! there. Documentation can also be found there. -#![allow(unused_parens)] +#![recursion_limit = "512"] -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; +use proc_macro::TokenStream; -impl crate::WeightInfo for () { - fn set() -> Weight { - (9133000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn on_finalize() -> Weight { - (5915000 as Weight) - } +mod decl_runtime_version; + +#[proc_macro_attribute] +pub fn runtime_version(_: TokenStream, input: TokenStream) -> TokenStream { + decl_runtime_version::decl_runtime_version_impl(input) } diff --git a/primitives/version/src/embed.rs b/primitives/version/src/embed.rs new file mode 100644 index 0000000000000..452762dcf687a --- /dev/null +++ b/primitives/version/src/embed.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provides functionality to embed a [`RuntimeVersion`](crate::RuntimeVersion) as custom section +//! into a WASM file. + +use codec::Encode; +use parity_wasm::elements::{deserialize_buffer, serialize, Module}; + +#[derive(Clone, Copy, Eq, PartialEq, Debug, thiserror::Error)] +pub enum Error { + #[error("Deserializing wasm failed")] + Deserialize, + #[error("Serializing wasm failed")] + Serialize, +} + +/// Embed the given `version` to the given `wasm` blob. +/// +/// If there was already a runtime version embedded, this will be overwritten. +/// +/// Returns the new WASM blob. +pub fn embed_runtime_version( + wasm: &[u8], + mut version: crate::RuntimeVersion, +) -> Result, Error> { + let mut module: Module = deserialize_buffer(wasm).map_err(|_| Error::Deserialize)?; + + let apis = version + .apis + .iter() + .map(Encode::encode) + .map(|v| v.into_iter()) + .flatten() + .collect::>(); + + module.set_custom_section("runtime_apis", apis); + + version.apis.to_mut().clear(); + module.set_custom_section("runtime_version", version.encode()); + + serialize(module).map_err(|_| Error::Serialize) +} diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 133d0497a2584..58216bc494dd7 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,22 +20,91 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -#[cfg(feature = "std")] -use std::fmt; +use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use std::collections::HashSet; +#[cfg(feature = "std")] +use std::fmt; -use codec::{Encode, Decode}; -use sp_runtime::RuntimeString; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; pub use sp_runtime::create_runtime_str; +use sp_runtime::RuntimeString; #[doc(hidden)] pub use sp_std; #[cfg(feature = "std")] -use sp_runtime::{traits::Block as BlockT, generic::BlockId}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; + +#[cfg(feature = "std")] +pub mod embed; + +/// An attribute that accepts a version declaration of a runtime and generates a custom wasm +/// section with the equivalent contents. +/// +/// The custom section allows to read the version of the runtime without having to execute any +/// code. Instead, the generated custom section can be relatively easily parsed from the wasm +/// binary. The identifier of the custom section is "runtime_version". +/// +/// A shortcoming of this macro is that it is unable to embed information regarding supported +/// APIs. This is supported by the `construct_runtime!` macro. +/// +/// # Usage +/// +/// This macro accepts a const item like the following: +/// +/// ```rust +/// use sp_version::{create_runtime_str, RuntimeVersion}; +/// +/// #[sp_version::runtime_version] +/// pub const VERSION: RuntimeVersion = RuntimeVersion { +/// spec_name: create_runtime_str!("test"), +/// impl_name: create_runtime_str!("test"), +/// authoring_version: 10, +/// spec_version: 265, +/// impl_version: 1, +/// apis: RUNTIME_API_VERSIONS, +/// transaction_version: 2, +/// }; +/// +/// # const RUNTIME_API_VERSIONS: sp_version::ApisVec = sp_version::create_apis_vec!([]); +/// ``` +/// +/// It will pass it through and add code required for emitting a custom section. The +/// information that will go into the custom section is parsed from the item declaration. Due +/// to that, the macro is somewhat rigid in terms of the code it accepts. There are the +/// following considerations: +/// +/// - The `spec_name` and `impl_name` must be set by a macro-like expression. The name of the +/// macro doesn't matter though. +/// +/// - `authoring_version`, `spec_version`, `impl_version` and `transaction_version` must be set +/// by a literal. Literal must be an integer. No other expressions are allowed there. In +/// particular, you can't supply a constant variable. +/// +/// - `apis` doesn't have any specific constraints. This is because this information doesn't +/// get into the custom section and is not parsed. +/// +/// # Compilation Target & "std" feature +/// +/// This macro assumes it will be used within a runtime. By convention, a runtime crate defines +/// a feature named "std". This feature is enabled when the runtime is compiled to native code +/// and disabled when it is compiled to the wasm code. +/// +/// The custom section can only be emitted while compiling to wasm. In order to detect the +/// compilation target we use the "std" feature. This macro will emit the custom section only +/// if the "std" feature is **not** enabled. +/// +/// Including this macro in the context where there is no "std" feature and the code is not +/// compiled to wasm can lead to cryptic linking errors. +pub use sp_version_proc_macro::runtime_version; /// The identity of a particular API interface that the runtime might provide. +/// +/// The id is generated by hashing the name of the runtime api with BLAKE2 using a hash size +/// of 8 bytes. +/// +/// The name of the runtime api is the name of the trait when using `decl_runtime_apis!` macro. pub type ApiId = [u8; 8]; /// A vector of pairs of `ApiId` and a `u32` for version. @@ -44,15 +113,18 @@ pub type ApisVec = sp_std::borrow::Cow<'static, [(ApiId, u32)]>; /// Create a vector of Api declarations. #[macro_export] macro_rules! create_apis_vec { - ( $y:expr ) => { $crate::sp_std::borrow::Cow::Borrowed(& $y) } + ( $y:expr ) => { + $crate::sp_std::borrow::Cow::Borrowed(&$y) + }; } /// Runtime version. /// This should not be thought of as classic Semver (major/minor/tiny). /// This triplet have different semantics and mis-interpretation could cause problems. -/// In particular: bug fixes should result in an increment of `spec_version` and possibly `authoring_version`, -/// absolutely not `impl_version` since they change the semantics of the runtime. -#[derive(Clone, PartialEq, Eq, Encode, Decode, Default, sp_runtime::RuntimeDebug)] +/// In particular: bug fixes should result in an increment of `spec_version` and possibly +/// `authoring_version`, absolutely not `impl_version` since they change the semantics of the +/// runtime. +#[derive(Clone, PartialEq, Eq, Encode, Decode, Default, sp_runtime::RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct RuntimeVersion { @@ -99,9 +171,9 @@ pub struct RuntimeVersion { /// number changes, then `spec_version` must change, also. /// /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, - /// either through an alteration in its user-level semantics, a parameter added/removed/changed, - /// a dispatchable being removed, a module being removed, or a dispatchable/module changing its - /// index. + /// either through an alteration in its user-level semantics, a parameter + /// added/removed/changed, a dispatchable being removed, a module being removed, or a + /// dispatchable/module changing its index. /// /// It need *not* change when a new module is added or when a dispatchable is added. pub transaction_version: u32, @@ -110,7 +182,9 @@ pub struct RuntimeVersion { #[cfg(feature = "std")] impl fmt::Display for RuntimeVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}-{} ({}-{}.tx{}.au{})", + write!( + f, + "{}-{} ({}-{}.tx{}.au{})", self.spec_name, self.spec_version, self.impl_name, @@ -126,19 +200,20 @@ impl RuntimeVersion { /// Check if this version matches other version for calling into runtime. pub fn can_call_with(&self, other: &RuntimeVersion) -> bool { self.spec_version == other.spec_version && - self.spec_name == other.spec_name && - self.authoring_version == other.authoring_version + self.spec_name == other.spec_name && + self.authoring_version == other.authoring_version } /// Check if the given api with `api_id` is implemented and the version passes the given /// `predicate`. - pub fn has_api_with bool>( - &self, - id: &ApiId, - predicate: P, - ) -> bool { + pub fn has_api_with bool>(&self, id: &ApiId, predicate: P) -> bool { self.apis.iter().any(|(s, v)| s == id && predicate(*v)) } + + /// Returns the api version found for api with `id`. + pub fn api_version(&self, id: &ApiId) -> Option { + self.apis.iter().find_map(|a| (a.0 == *id).then(|| a.1)) + } } #[cfg(feature = "std")] @@ -162,11 +237,10 @@ impl NativeVersion { if self.runtime_version.spec_name != other.spec_name { Err(format!( "`spec_name` does not match `{}` vs `{}`", - self.runtime_version.spec_name, - other.spec_name, + self.runtime_version.spec_name, other.spec_name, )) - } else if self.runtime_version.authoring_version != other.authoring_version - && !self.can_author_with.contains(&other.authoring_version) + } else if self.runtime_version.authoring_version != other.authoring_version && + !self.can_author_with.contains(&other.authoring_version) { Err(format!( "`authoring_version` does not match `{version}` vs `{other_version}` and \ @@ -180,40 +254,47 @@ impl NativeVersion { } } -/// Something that can provide the runtime version at a given block and the native runtime version. #[cfg(feature = "std")] -pub trait GetRuntimeVersion { +/// Returns the version of the native runtime. +pub trait GetNativeVersion { /// Returns the version of the native runtime. fn native_version(&self) -> &NativeVersion; +} +/// Something that can provide the runtime version at a given block. +#[cfg(feature = "std")] +pub trait GetRuntimeVersionAt { /// Returns the version of runtime at the given block. fn runtime_version(&self, at: &BlockId) -> Result; } #[cfg(feature = "std")] -impl, Block: BlockT> GetRuntimeVersion for std::sync::Arc { - fn native_version(&self) -> &NativeVersion { - (&**self).native_version() - } - +impl, Block: BlockT> GetRuntimeVersionAt + for std::sync::Arc +{ fn runtime_version(&self, at: &BlockId) -> Result { (&**self).runtime_version(at) } } +#[cfg(feature = "std")] +impl GetNativeVersion for std::sync::Arc { + fn native_version(&self) -> &NativeVersion { + (&**self).native_version() + } +} + #[cfg(feature = "std")] mod apis_serialize { use super::*; use impl_serde::serialize as bytes; - use serde::{Serializer, de, ser::SerializeTuple}; + use serde::{de, ser::SerializeTuple, Serializer}; #[derive(Serialize)] - struct ApiId<'a>( - #[serde(serialize_with="serialize_bytesref")] &'a super::ApiId, - &'a u32, - ); + struct ApiId<'a>(#[serde(serialize_with = "serialize_bytesref")] &'a super::ApiId, &'a u32); - pub fn serialize(apis: &ApisVec, ser: S) -> Result where + pub fn serialize(apis: &ApisVec, ser: S) -> Result + where S: Serializer, { let len = apis.len(); @@ -224,20 +305,18 @@ mod apis_serialize { seq.end() } - pub fn serialize_bytesref(&apis: &&super::ApiId, ser: S) -> Result where + pub fn serialize_bytesref(&apis: &&super::ApiId, ser: S) -> Result + where S: Serializer, { bytes::serialize(apis, ser) } #[derive(Deserialize)] - struct ApiIdOwned( - #[serde(deserialize_with="deserialize_bytes")] - super::ApiId, - u32, - ); + struct ApiIdOwned(#[serde(deserialize_with = "deserialize_bytes")] super::ApiId, u32); - pub fn deserialize<'de, D>(deserializer: D) -> Result where + pub fn deserialize<'de, D>(deserializer: D) -> Result + where D: de::Deserializer<'de>, { struct Visitor; @@ -248,7 +327,8 @@ mod apis_serialize { formatter.write_str("a sequence of api id and version tuples") } - fn visit_seq(self, mut visitor: V) -> Result where + fn visit_seq(self, mut visitor: V) -> Result + where V: de::SeqAccess<'de>, { let mut apis = Vec::new(); @@ -261,8 +341,9 @@ mod apis_serialize { deserializer.deserialize_seq(Visitor) } - pub fn deserialize_bytes<'de, D>(d: D) -> Result where - D: de::Deserializer<'de> + pub fn deserialize_bytes<'de, D>(d: D) -> Result + where + D: de::Deserializer<'de>, { let mut arr = [0; 8]; bytes::deserialize_check_len(d, bytes::ExpectedLen::Exact(&mut arr[..]))?; diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index a85b6cd1d118a..ba8a7b4e4b466 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-wasm-interface" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,10 +14,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -wasmi = { version = "0.6.2", optional = true } -impl-trait-for-tuples = "0.1.2" -sp-std = { version = "2.0.0", path = "../std", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +wasmi = { version = "0.9.0", optional = true } +impl-trait-for-tuples = "0.2.1" +sp-std = { version = "4.0.0-dev", path = "../std", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/wasm-interface/src/lib.rs b/primitives/wasm-interface/src/lib.rs index c432a966056c5..e1903ef425aeb 100644 --- a/primitives/wasm-interface/src/lib.rs +++ b/primitives/wasm-interface/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,10 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{ - vec, - borrow::Cow, marker::PhantomData, mem, iter::Iterator, result, vec::Vec, -}; +use sp_std::{borrow::Cow, iter::Iterator, marker::PhantomData, mem, result, vec, vec::Vec}; #[cfg(feature = "std")] mod wasmi_impl; @@ -141,24 +138,20 @@ pub struct Pointer { impl Pointer { /// Create a new instance of `Self`. pub fn new(ptr: u32) -> Self { - Self { - ptr, - _marker: Default::default(), - } + Self { ptr, _marker: Default::default() } } /// Calculate the offset from this pointer. /// - /// `offset` is in units of `T`. So, `3` means `3 * mem::size_of::()` as offset to the pointer. + /// `offset` is in units of `T`. So, `3` means `3 * mem::size_of::()` as offset to the + /// pointer. /// /// Returns an `Option` to respect that the pointer could probably overflow. pub fn offset(self, offset: u32) -> Option { - offset.checked_mul(T::SIZE).and_then(|o| self.ptr.checked_add(o)).map(|ptr| { - Self { - ptr, - _marker: Default::default(), - } - }) + offset + .checked_mul(T::SIZE) + .and_then(|o| self.ptr.checked_add(o)) + .map(|ptr| Self { ptr, _marker: Default::default() }) } /// Create a null pointer. @@ -198,7 +191,9 @@ impl From> for usize { impl IntoValue for Pointer { const VALUE_TYPE: ValueType = ValueType::I32; - fn into_value(self) -> Value { Value::I32(self.ptr as _) } + fn into_value(self) -> Value { + Value::I32(self.ptr as _) + } } impl TryFromValue for Pointer { @@ -224,19 +219,16 @@ pub struct Signature { impl Signature { /// Create a new instance of `Signature`. - pub fn new>>(args: T, return_value: Option) -> Self { - Self { - args: args.into(), - return_value, - } + pub fn new>>( + args: T, + return_value: Option, + ) -> Self { + Self { args: args.into(), return_value } } /// Create a new instance of `Signature` with the given `args` and without any return value. pub fn new_with_args>>(args: T) -> Self { - Self { - args: args.into(), - return_value: None, - } + Self { args: args.into(), return_value: None } } } @@ -500,7 +492,6 @@ mod tests { assert_eq!(ptr.offset(32).unwrap(), Pointer::new(256)); } - #[test] fn return_value_encoded_max_size() { let encoded = ReturnValue::Value(Value::I64(-1)).encode(); diff --git a/primitives/wasm-interface/src/wasmi_impl.rs b/primitives/wasm-interface/src/wasmi_impl.rs index 5931671c97ed4..f7e0ec6f16d4a 100644 --- a/primitives/wasm-interface/src/wasmi_impl.rs +++ b/primitives/wasm-interface/src/wasmi_impl.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,7 @@ //! Implementation of conversions between Substrate and wasmi types. -use crate::{Value, ValueType, Signature}; +use crate::{Signature, Value, ValueType}; impl From for wasmi::RuntimeValue { fn from(value: Value) -> Self { diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000000000..441913f619cdc --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,23 @@ +# Basic +hard_tabs = true +max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +# Format comments +comment_width = 100 +wrap_comments = true +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true diff --git a/shell.nix b/shell.nix new file mode 100644 index 0000000000000..9a2d30400631f --- /dev/null +++ b/shell.nix @@ -0,0 +1,27 @@ +let + mozillaOverlay = + import (builtins.fetchGit { + url = "https://github.com/mozilla/nixpkgs-mozilla.git"; + rev = "4a07484cf0e49047f82d83fd119acffbad3b235f"; + }); + nixpkgs = import { overlays = [ mozillaOverlay ]; }; + rust-nightly = with nixpkgs; ((rustChannelOf { date = "2021-07-06"; channel = "nightly"; }).rust.override { + extensions = [ "rust-src" ]; + targets = [ "wasm32-unknown-unknown" ]; + }); +in +with nixpkgs; pkgs.mkShell { + buildInputs = [ + clang + openssl.dev + pkg-config + rust-nightly + ] ++ lib.optionals stdenv.isDarwin [ + darwin.apple_sdk.frameworks.Security + ]; + + RUST_SRC_PATH = "${rust-nightly}/lib/rustlib/src/rust/src"; + LIBCLANG_PATH = "${llvmPackages.libclang.lib}/lib"; + PROTOC = "${protobuf}/bin/protoc"; + ROCKSDB_LIB_DIR = "${rocksdb}/lib"; +} diff --git a/simnet_tests/README.md b/simnet_tests/README.md new file mode 100644 index 0000000000000..cb1b13ae98505 --- /dev/null +++ b/simnet_tests/README.md @@ -0,0 +1,39 @@ +# Simulation tests, or high level integration tests. + + +_The content of this directory is meant to be used by Parity's private CI/CD +infrastructure with private tools. At the moment those tools are still early +stage of development and we don't when if / when they will available for +public use._ + + +## Content of this dir. + +`configs` dir contains config files in toml format that describe how to +configure the simulation network that you want to launch. + +`tests` dir contains [cucumber](https://cucumber.io/) files. Those are +Behavior-Driven Development test files that describe tests in plain English. +Under the hood there are assertions that specific metrics should have specific +values. + +At the moment we have 2 tests: `tests/quick/001-smoketest.feature` and +`tests/long/002-loadtest.feature` +The load test uses a JS script that we added to simnet image and it's launched +by this step in the cucumber file: +`Then launch 'node' with parameters '/usr/local/bin/sub-flood --finalization --url ws://localhost:11222'` + +`run_test.sh` is a script meant to ease up launching a test. +In order to use this script locally, you need to install +[gurke](https://github.com/paritytech/gurke) +This script also helps preparing the test environment. Once you have access to +a kubernetes cluster (meaning you can do `kubectl get pods`) you can run this +script with no arguments, like `./run_test.sh` and tests should run. +Kubernetes cluster can be local, spawned with +[kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +or an instance living in the +[cloud](https://github.com/paritytech/gurke/blob/main/docs/How-to-setup-access-to-gke-k8s-cluster.md) + + +### [Here is link to barcamp presenation of simnet](https://www.crowdcast.io/e/ph49xu01) +### [Here is link to the simnet repo, hosted on private gitlab](https://gitlab.parity.io/parity/simnet/-/tree/master) diff --git a/simnet_tests/configs/default_local_testnet.toml b/simnet_tests/configs/default_local_testnet.toml new file mode 100644 index 0000000000000..066bd4c9e332f --- /dev/null +++ b/simnet_tests/configs/default_local_testnet.toml @@ -0,0 +1,14 @@ +[settings] +bootnode-domain-name = "bootnode.{{get_env(name="NAMESPACE")}}.svc.cluster.local" + + +[settings.setup] +timeout = 300 + +[settings.defaults] +timeout = 300 + +[nodes] +alice = { extra-args = ["--alice"], validator = true } +bob = { extra-args = ["--bob"], validator = true } +charlie = { extra-args = ["--charlie"], validator = true } diff --git a/simnet_tests/run_tests.sh b/simnet_tests/run_tests.sh new file mode 100755 index 0000000000000..3b8ac8a71dadf --- /dev/null +++ b/simnet_tests/run_tests.sh @@ -0,0 +1,126 @@ +#!/bin/bash + +### ARGS FOR THIS SCRIPT ### +# ./${SCRIPT_NAME} NAMESPACE IMAGE LOG_PATH FEATURES +# NAMESPACE the kubernetes namespace where the test will run +# IMAGE Substrate image used to spawn network +# LOG_PATH path to dir where to save logs from external JS script that is run as part +# of step in features file +# FEATURES directory containing cucumber files or single cucumber file that describes +# what to test. +# +# All args have default values, specify args to override +# e.g: ./${SCRIPT_NAME} test-name parity/substrate:latest logs quick + +set -eou pipefail +SCRIPT_NAME="$0" +SCRIPT_PATH=$(dirname "${SCRIPT_NAME}") # relative +SCRIPT_PATH=$(cd "${SCRIPT_PATH}" && pwd) # absolutized and normalized + +function random_string { + head -1 <(fold -w 30 <(tr -dc 'a-z0-9' < /dev/urandom)) + } + +# +### Script args +# + +NAMESPACE=${1:-gurke-"$(random_string)"-runtest} +IMAGE=${2:-"parity/substrate:latest"} +LOG_PATH=${3:-"${SCRIPT_PATH}/logs"} +FEATURES=${4:-"ALL"} + +mkdir -p "${SCRIPT_PATH}"/logs + +echo "Running tests in namespace: ${NAMESPACE}" +echo "Testing image: ${IMAGE}" +echo "Storing scripts logs to: ${LOG_PATH}" +echo "Using features files from: ${FEATURES}" + +# +### Script logic +# + +function forward_port { + # RUN_IN_CONTAINER is env var that is set in the dockerfile + # use the -v operator to explicitly test if a variable is set + if [[ ! -v RUN_IN_CONTAINER ]] ; then + if is_port_forward_running ; then + kill_previous_job + fi + fi + start_forwading_job +} + +FORWARD_GREP_FILTER='kubectl.*[p]ort-forward.*svc/rpc.*11222' + +function is_port_forward_running { + # shellcheck disable=SC2009 + ps aux | grep -qE "${FORWARD_GREP_FILTER}" +} + +function kill_previous_job { + # shellcheck disable=SC2009 + job_pid=$(ps aux | grep -E "${FORWARD_GREP_FILTER}" | awk '{ print $2 }') + echo "INFO Killed forwading port 9944 into bootnode" + kill "${job_pid}" +} + +function start_forwading_job { + kubectl -n "${NAMESPACE}" \ + expose pod bootnode \ + --name=rpc \ + --type=NodePort \ + --target-port=9944 \ + --port=9944 + kubectl -n "${NAMESPACE}" \ + port-forward svc/rpc 11222:9944 &> "${LOG_PATH}/forward-${NAMESPACE}.log" & + sleep 2 + echo "INFO Started forwading port 9944 into bootnode" +} + +function update_api { + echo "INFO: Updating Polkadot JS API" + pwd + cd "${SCRIPT_PATH}"/../../sub-flood/ + npm run build + cd - +} + +function run_test { + case "${FEATURES}" in + quick) + gurke test "${NAMESPACE}" "${SCRIPT_PATH}"/tests/quick --log-path "${LOG_PATH}" + ;; + long) + gurke test "${NAMESPACE}" "${SCRIPT_PATH}"/tests/long --log-path "${LOG_PATH}" + ;; + ALL ) + gurke test "${NAMESPACE}" "${SCRIPT_PATH}"/tests --log-path "${LOG_PATH}" + ;; + ??* ) + gurke test \ + "${NAMESPACE}" \ + "${SCRIPT_PATH}"/"${FEATURES}" \ + --log-path "${LOG_PATH}" + ;; + esac +} + + +export NAMESPACE="${NAMESPACE}" + +set -x # echo the commands to stdout +gurke spawn --config "${SCRIPT_PATH}"/configs/default_local_testnet.toml \ + -n "${NAMESPACE}" \ + --image "${IMAGE}" + +echo "INFO: Checking if pods launched correctly" +kubectl -n "${NAMESPACE}" get pods -o wide + +update_api + +forward_port +run_test + + diff --git a/simnet_tests/tests/long/002-loadtest.feature b/simnet_tests/tests/long/002-loadtest.feature new file mode 100644 index 0000000000000..67d108ea55416 --- /dev/null +++ b/simnet_tests/tests/long/002-loadtest.feature @@ -0,0 +1,5 @@ +Feature: LoadTesting + + Scenario: spawn 50k transactions and wait their finalization + Given a test network + Then launch 'node' with parameters '/usr/local/bin/sub-flood --finalization --url ws://localhost:11222' diff --git a/simnet_tests/tests/quick/001-smoketest.feature b/simnet_tests/tests/quick/001-smoketest.feature new file mode 100644 index 0000000000000..a07041e4ea622 --- /dev/null +++ b/simnet_tests/tests/quick/001-smoketest.feature @@ -0,0 +1,16 @@ +Feature: Smoketest + + Scenario: Minimal Example + Given a test network + Then alice is up + And alice reports substrate_node_roles is 4 + And alice reports substrate_sub_libp2p_is_major_syncing is 0 + When alice's best block should be above 30 + Then alice reports block height is greater than 30 + And alice reports peers count is at least 2 + Then bob is up + And bob reports block height is greater than 30 + And bob reports peers count is at least 2 + Then charlie is up + And charlie reports block height is greater than 30 + And charlie reports peers count is at least 2 diff --git a/ss58-registry.json b/ss58-registry.json index 2485137076cd2..563cc248db9dd 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -21,8 +21,8 @@ }, { "prefix": 1, - "network": "reserved1", - "displayName": "This prefix is reserved.", + "network": null, + "displayName": "Bare 32-bit Schnorr/Ristretto (S/R 25519) public key.", "symbols": null, "decimals": null, "standardAccount": null, @@ -39,8 +39,8 @@ }, { "prefix": 3, - "network": "reserved3", - "displayName": "This prefix is reserved.", + "network": null, + "displayName": "Bare 32-bit Ed25519 public key.", "symbols": null, "decimals": null, "standardAccount": null, @@ -60,9 +60,9 @@ "network": "plasm", "displayName": "Plasm Network", "symbols": ["PLM"], - "decimals": null, + "decimals": [15], "standardAccount": "*25519", - "website": null + "website": "https://plasmnet.io" }, { "prefix": 6, @@ -85,11 +85,11 @@ { "prefix": 8, "network": "karura", - "displayName": "Acala Karura Canary", + "displayName": "Karura", "symbols": ["KAR"], - "decimals": [18], + "decimals": [12], "standardAccount": "*25519", - "website": "https://acala.network/" + "website": "https://karura.network/" }, { "prefix": 9, @@ -105,7 +105,7 @@ "network": "acala", "displayName": "Acala", "symbols": ["ACA"], - "decimals": [18], + "decimals": [12], "standardAccount": "*25519", "website": "https://acala.network/" }, @@ -120,21 +120,39 @@ }, { "prefix": 12, - "network": "polymath", - "displayName": "Polymath", - "symbols": null, - "decimals": null, + "network": "polymesh", + "displayName": "Polymesh", + "symbols": ["POLYX"], + "decimals": [6], "standardAccount": "*25519", - "website": null + "website": "https://polymath.network/" }, { "prefix": 13, - "network": "substratee", - "displayName": "SubstraTEE", - "symbols": null, - "decimals": null, + "network": "integritee", + "displayName": "Integritee", + "symbols": ["TEER"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://integritee.network" + }, + { + "prefix": 14, + "network": "totem", + "displayName": "Totem", + "symbols": ["XTX"], + "decimals": [0], "standardAccount": "*25519", - "website": "https://www.substratee.com" + "website": "https://totemaccounting.com" + }, + { + "prefix": 15, + "network": "synesthesia", + "displayName": "Synesthesia", + "symbols": ["SYN"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://synesthesia.network/" }, { "prefix": 16, @@ -176,10 +194,10 @@ "prefix": 20, "network": "stafi", "displayName": "Stafi", - "symbols": null, - "decimals": null, + "symbols": ["FIS"], + "decimals": [12], "standardAccount": "*25519", - "website": null + "website": "https://stafi.io" }, { "prefix": 21, @@ -226,6 +244,15 @@ "standardAccount": "*25519", "website": "https://zero.io" }, + { + "prefix": 26, + "network": "jupiter", + "displayName": "Jupiter", + "symbols": ["jDOT"], + "decimals": [10], + "standardAccount": "*25519", + "website": "https://jupiter.patract.io" + }, { "prefix": 28, "network": "subsocial", @@ -235,23 +262,41 @@ "standardAccount": "*25519", "website": null }, + { + "prefix": 29, + "network": "cord", + "displayName": "Dhiway CORD Network", + "symbols": ["DCU"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://dhiway.com/" + }, { "prefix": 30, "network": "phala", "displayName": "Phala Network", - "symbols": null, - "decimals": null, + "symbols": ["PHA"], + "decimals": [12], "standardAccount": "*25519", - "website": null + "website": "https://phala.network" + }, + { + "prefix": 31, + "network": "litentry", + "displayName": "Litentry Network", + "symbols": ["LIT"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://litentry.com/" }, { "prefix": 32, "network": "robonomics", - "displayName": "Robonomics Network", - "symbols": null, - "decimals": null, + "displayName": "Robonomics", + "symbols": ["XRT"], + "decimals": [9], "standardAccount": "*25519", - "website": null + "website": "https://robonomics.network" }, { "prefix": 33, @@ -262,11 +307,29 @@ "standardAccount": "*25519", "website": null }, + { + "prefix": 34, + "network": "ares", + "displayName": "Ares Protocol", + "symbols": ["ARES"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://www.aresprotocol.com/" + }, + { + "prefix": 35, + "network": "vln", + "displayName": "Valiu Liquidity Network", + "symbols": ["USDv"], + "decimals": [15], + "standardAccount": "*25519", + "website": "https://valiu.com/" + }, { "prefix": 36, "network": "centrifuge", "displayName": "Centrifuge Chain", - "symbols": ["RAD"], + "symbols": ["CFG"], "decimals": [18], "standardAccount": "*25519", "website": "https://centrifuge.io/" @@ -280,6 +343,15 @@ "standardAccount": "*25519", "website": "https://nodle.io/" }, + { + "prefix": 38, + "network": "kilt", + "displayName": "KILT Chain", + "symbols": ["KILT"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://kilt.io/" + }, { "prefix": 39, "network": "mathchain", @@ -298,6 +370,15 @@ "standardAccount": "*25519", "website": "https://mathwallet.org" }, + { + "prefix": 41, + "network": "poli", + "displayName": "Polimec Chain", + "symbols": null, + "decimals": null, + "standardAccount": "*25519", + "website": "https://polimec.io/" + }, { "prefix": 42, "network": "substrate", @@ -309,8 +390,8 @@ }, { "prefix": 43, - "network": "reserved43", - "displayName": "This prefix is reserved.", + "network": null, + "displayName": "Bare 32-bit ECDSA SECP-256k1 public key.", "symbols": null, "decimals": null, "standardAccount": null, @@ -325,6 +406,15 @@ "standardAccount": "*25519", "website": "https://chainx.org/" }, + { + "prefix": 45, + "network": "uniarts", + "displayName": "UniArts Network", + "symbols": ["UART", "UINK"], + "decimals": [12, 12], + "standardAccount": "*25519", + "website": "https://uniarts.me" + }, { "prefix": 46, "network": "reserved46", @@ -345,12 +435,210 @@ }, { "prefix": 48, - "network": "reserved48", - "displayName": "All prefixes 48 and higher are reserved and cannot be allocated.", + "network": "neatcoin", + "displayName": "Neatcoin Mainnet", + "symbols": ["NEAT"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://neatcoin.org" + }, + { + "prefix": 49, + "network": "picasso", + "displayName": "Picasso", + "symbols": ["PICA"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://picasso.composable.finance" + }, + { + "prefix": 50, + "network": "composable", + "displayName": "Composable", + "symbols": ["LAYR"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://composable.finance" + }, + { + "prefix": 63, + "network": "hydradx", + "displayName": "HydraDX", + "symbols": ["HDX"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://hydradx.io" + }, + { + "prefix": 65, + "network": "aventus", + "displayName": "AvN Mainnet", + "symbols": ["AVT"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://aventus.io" + }, + { + "prefix": 66, + "network": "crust", + "displayName": "Crust Network", + "symbols": ["CRU"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://crust.network" + }, + { + "prefix": 67, + "network": "equilibrium", + "displayName": "Equilibrium Network", + "symbols": ["Unknown", "USD", "EQ", "ETH", "BTC", "EOS", "DOT", "CRV"], + "decimals": [0,9,9,9,9,9,9,9], + "standardAccount": "*25519", + "website": "https://equilibrium.io" + }, + { + "prefix": 69, + "network": "sora", + "displayName": "SORA Network", + "symbols": ["XOR"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://sora.org" + }, + { + "prefix": 73, + "network": "zeitgeist", + "displayName": "Zeitgeist", + "symbols": ["ZTG"], + "decimals": [10], + "standardAccount": "*25519", + "website": "https://zeitgeist.pm" + }, + { + "prefix": 77, + "network": "manta", + "displayName": "Manta network", + "symbols": ["MA"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://manta.network" + }, + { + "prefix": 78, + "network": "calamari", + "displayName": "Calamari: Manta Canary Network", + "symbols": ["KMA"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://manta.network" + }, + { + "prefix": 98, + "network": "polkasmith", + "displayName": "PolkaSmith Canary Network", + "symbols": ["PKS"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://polkafoundry.com" + }, + { + "prefix": 99, + "network": "polkafoundry", + "displayName": "PolkaFoundry Network", + "symbols": ["PKF"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://polkafoundry.com" + }, + { + "prefix": 101, + "network": "origintrail-parachain", + "displayName": "OriginTrail Parachain", + "symbols": ["TRAC"], + "decimals": [18], + "standardAccount": "secp256k1", + "website": "https://origintrail.io" + }, + { + "prefix": 110, + "network": "heiko", + "displayName": "Heiko", + "symbols": ["HKO"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://parallel.fi/" + }, + { + "prefix": 113, + "network": "integritee-incognito", + "displayName": "Integritee Incognito", "symbols": null, "decimals": null, - "standardAccount": null, - "website": null + "standardAccount": "*25519", + "website": "https://integritee.network" + }, + { + "prefix": 128, + "network": "clover", + "displayName": "Clover Finance", + "symbols": ["CLV"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://clover.finance" + }, + { + "prefix": 136, + "network": "altair", + "displayName": "Altair", + "symbols": ["AIR"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://centrifuge.io/" + }, + { + "prefix": 172, + "network": "parallel", + "displayName": "Parallel", + "symbols": ["PARA"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://parallel.fi/" + }, + { + "prefix": 252, + "network": "social-network", + "displayName": "Social Network", + "symbols": ["NET"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://social.network" + }, + { + "prefix": 1284, + "network": "moonbeam", + "displayName": "Moonbeam", + "symbols": ["GLMR"], + "decimals": [18], + "standardAccount": "secp256k1", + "website": "https://moonbeam.network" + }, + { + "prefix": 1285, + "network": "moonriver", + "displayName": "Moonriver", + "symbols": ["MOVR"], + "decimals": [18], + "standardAccount": "secp256k1", + "website": "https://moonbeam.network" + }, + { + "prefix": 10041, + "network": "basilisk", + "displayName": "Basilisk", + "symbols": ["BSX"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://bsx.fi" } ] } diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index ddadc2cb7177d..4eed6e5e29133 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-utils" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,10 +12,10 @@ description = "Substrate test utilities" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = { version = "0.3.1", features = ["compat"] } -substrate-test-utils-derive = { version = "0.8.0", path = "./derive" } -tokio = { version = "0.2.13", features = ["macros"] } +futures = "0.3.16" +substrate-test-utils-derive = { version = "0.10.0-dev", path = "./derive" } +tokio = { version = "1.10", features = ["macros", "time"] } [dev-dependencies] -sc-service = { version = "0.8.0", path = "../client/service" } -trybuild = { version = "1.0", features = ["diff"] } +sc-service = { version = "0.10.0-dev", path = "../client/service" } +trybuild = { version = "1.0.43", features = [ "diff" ] } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 07d28660f6188..34238872cad84 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-client" -version = "2.0.0" +version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -12,23 +12,27 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1" } -futures = "0.3.4" -futures01 = { package = "futures", version = "0.1.29" } -hash-db = "0.15.2" +codec = { package = "parity-scale-codec", version = "2.0.0" } +futures = "0.3.16" hex = "0.4" -serde = "1.0.55" -serde_json = "1.0.55" -sc-client-api = { version = "2.0.0", path = "../../client/api" } -sc-client-db = { version = "0.8.0", features = ["test-helpers"], path = "../../client/db" } -sc-consensus = { version = "0.8.0", path = "../../client/consensus/common" } -sc-executor = { version = "0.8.0", path = "../../client/executor" } -sc-light = { version = "2.0.0", path = "../../client/light" } -sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../client/service" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../primitives/state-machine" } +serde = "1.0.126" +serde_json = "1.0.68" +sc-client-api = { version = "4.0.0-dev", path = "../../client/api" } +sc-client-db = { version = "0.10.0-dev", features = [ + "test-helpers", +], path = "../../client/db" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } +sc-executor = { version = "0.10.0-dev", path = "../../client/executor" } +sc-light = { version = "4.0.0-dev", path = "../../client/light" } +sc-offchain = { version = "4.0.0-dev", path = "../../client/offchain" } +sc-service = { version = "0.10.0-dev", default-features = false, features = [ + "test-helpers", +], path = "../../client/service" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } +sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } +async-trait = "0.1.50" diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index a74bd3258ef0f..bf1c9898972ca 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,17 +17,12 @@ //! Client extension for tests. -use sc_service::client::Client; -use sc_client_api::backend::Finalizer; -use sc_client_api::client::BlockBackend; -use sp_consensus::{ - BlockImportParams, BlockImport, BlockOrigin, Error as ConsensusError, - ForkChoiceStrategy, -}; -use sp_runtime::Justification; -use sp_runtime::traits::{Block as BlockT}; -use sp_runtime::generic::BlockId; use codec::alloc::collections::hash_map::HashMap; +use sc_client_api::{backend::Finalizer, client::BlockBackend}; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; +use sc_service::client::Client; +use sp_consensus::{BlockOrigin, Error as ConsensusError}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification, Justifications}; /// Extension trait for a test client. pub trait ClientExt: Sized { @@ -43,32 +38,40 @@ pub trait ClientExt: Sized { } /// Extension trait for a test client around block importing. +#[async_trait::async_trait] pub trait ClientBlockImportExt: Sized { /// Import block to the chain. No finality. - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; /// Import a block and make it our best block if possible. - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + async fn import_as_best( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError>; /// Import a block and finalize it. - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) - -> Result<(), ConsensusError>; + async fn import_as_final( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError>; - /// Import block with justification, finalizes block. - fn import_justified( + /// Import block with justification(s), finalizes block. + async fn import_justified( &mut self, origin: BlockOrigin, block: Block, - justification: Justification + justifications: Justifications, ) -> Result<(), ConsensusError>; } impl ClientExt for Client - where - B: sc_client_api::backend::Backend, - E: sc_client_api::CallExecutor + 'static, - Self: BlockImport, - Block: BlockT, +where + B: sc_client_api::backend::Backend, + E: sc_client_api::CallExecutor + 'static, + Self: BlockImport, + Block: BlockT, { fn finalize_block( &self, @@ -79,104 +82,129 @@ impl ClientExt for Client } fn genesis_hash(&self) -> ::Hash { - self.block_hash(0.into()).unwrap().unwrap() + self.block_hash(0u32.into()).unwrap().unwrap() } } /// This implementation is required, because of the weird api requirements around `BlockImport`. +#[async_trait::async_trait] impl ClientBlockImportExt for std::sync::Arc - where for<'r> &'r T: BlockImport +where + for<'r> &'r T: BlockImport, + Transaction: Send + 'static, + T: Send + Sync, { - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_best( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_final( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_justified( + async fn import_justified( &mut self, origin: BlockOrigin, block: Block, - justification: Justification, + justifications: Justifications, ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); - import.justification = Some(justification); + import.justifications = Some(justifications); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } } +#[async_trait::async_trait] impl ClientBlockImportExt for Client - where - Self: BlockImport, +where + Self: BlockImport, + RA: Send, + B: Send + Sync, + E: Send, + >::Transaction: Send, { - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_best( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_final( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_justified( + async fn import_justified( &mut self, origin: BlockOrigin, block: Block, - justification: Justification, + justifications: Justifications, ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); - import.justification = Some(justification); + import.justifications = Some(justifications); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 35d6e78ccd41e..9bc411af5d3ed 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,39 +21,44 @@ pub mod client_ext; +pub use self::client_ext::{ClientBlockImportExt, ClientExt}; pub use sc_client_api::{ - execution_extensions::{ExecutionStrategies, ExecutionExtensions}, - ForkBlocks, BadBlocks, + execution_extensions::{ExecutionExtensions, ExecutionStrategies}, + BadBlocks, ForkBlocks, }; -pub use sc_client_db::{Backend, self}; +pub use sc_client_db::{self, Backend}; +pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod}; +pub use sc_service::{client, RpcHandlers, RpcSession}; pub use sp_consensus; -pub use sc_executor::{NativeExecutor, WasmExecutionMethod, self}; pub use sp_keyring::{ - AccountKeyring, - ed25519::Keyring as Ed25519Keyring, - sr25519::Keyring as Sr25519Keyring, + ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, AccountKeyring, }; -pub use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; +pub use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; -pub use sc_service::{RpcHandlers, RpcSession, client}; -pub use self::client_ext::{ClientExt, ClientBlockImportExt}; -use std::pin::Pin; -use std::sync::Arc; -use std::collections::{HashSet, HashMap}; -use futures::{future::{Future, FutureExt}, stream::StreamExt}; +use futures::{ + future::{Future, FutureExt}, + stream::StreamExt, +}; +use sc_client_api::BlockchainEvents; +use sc_service::client::{ClientConfig, LocalCallExecutor}; use serde::Deserialize; use sp_core::storage::ChildInfo; -use sp_runtime::{OpaqueExtrinsic, codec::Encode, traits::{Block as BlockT, BlakeTwo256}}; -use sc_service::client::{LocalCallExecutor, ClientConfig}; -use sc_client_api::BlockchainEvents; +use sp_runtime::{ + codec::Encode, + traits::{BlakeTwo256, Block as BlockT}, + OpaqueExtrinsic, +}; +use std::{ + collections::{HashMap, HashSet}, + pin::Pin, + sync::Arc, +}; /// Test client light database backend. -pub type LightBackend = sc_light::Backend< - sc_client_db::light::LightStorage, - BlakeTwo256, ->; +pub type LightBackend = + sc_light::Backend, BlakeTwo256>; /// A genesis storage initialization trait. pub trait GenesisInit: Default { @@ -68,27 +73,32 @@ impl GenesisInit for () { } /// A builder for creating a test client instance. -pub struct TestClientBuilder { +pub struct TestClientBuilder { execution_strategies: ExecutionStrategies, genesis_init: G, /// The key is an unprefixed storage key, this only contains /// default child trie content. child_storage_extension: HashMap, StorageChild>, backend: Arc, - _executor: std::marker::PhantomData, + _executor: std::marker::PhantomData, keystore: Option, fork_blocks: ForkBlocks, bad_blocks: BadBlocks, + enable_offchain_indexing_api: bool, + no_genesis: bool, } -impl Default - for TestClientBuilder, G> { +impl Default + for TestClientBuilder, G> +{ fn default() -> Self { Self::with_default_backend() } } -impl TestClientBuilder, G> { +impl + TestClientBuilder, G> +{ /// Create new `TestClientBuilder` with default backend. pub fn with_default_backend() -> Self { let backend = Arc::new(Backend::new_test(std::u32::MAX, std::u64::MAX)); @@ -100,9 +110,21 @@ impl TestClientBuilder Self { + let backend = Arc::new(Backend::new_test_with_tx_storage( + keep_blocks, + 0, + sc_client_db::TransactionStorageMode::StorageChain, + )); + Self::with_backend(backend) + } } -impl TestClientBuilder { +impl + TestClientBuilder +{ /// Create a new instance of the test client builder. pub fn with_backend(backend: Arc) -> Self { TestClientBuilder { @@ -114,6 +136,8 @@ impl TestClientBuilder TestClientBuilder, ) -> Self { let storage_key = child_info.storage_key(); - let entry = self.child_storage_extension.entry(storage_key.to_vec()) - .or_insert_with(|| StorageChild { - data: Default::default(), - child_info: child_info.clone(), - }); + let entry = self.child_storage_extension.entry(storage_key.to_vec()).or_insert_with(|| { + StorageChild { data: Default::default(), child_info: child_info.clone() } + }); entry.data.insert(key.as_ref().to_vec(), value.as_ref().to_vec()); self } /// Set the execution strategy that should be used by all contexts. - pub fn set_execution_strategy( - mut self, - execution_strategy: ExecutionStrategy - ) -> Self { + pub fn set_execution_strategy(mut self, execution_strategy: ExecutionStrategy) -> Self { self.execution_strategies = ExecutionStrategies { syncing: execution_strategy, importing: execution_strategy, @@ -166,7 +185,8 @@ impl TestClientBuilder, bad_blocks: BadBlocks, ) -> Self { @@ -175,21 +195,30 @@ impl TestClientBuilder Self { + self.enable_offchain_indexing_api = true; + self + } + + /// Disable writing genesis. + pub fn set_no_genesis(mut self) -> Self { + self.no_genesis = true; + self + } + /// Build the test client with the given native executor. pub fn build_with_executor( self, - executor: Executor, + executor: ExecutorDispatch, ) -> ( - client::Client< - Backend, - Executor, - Block, - RuntimeApi, - >, + client::Client, sc_consensus::LongestChain, - ) where - Executor: sc_client_api::CallExecutor + 'static, + ) + where + ExecutorDispatch: sc_client_api::CallExecutor + 'static, Backend: sc_client_api::backend::Backend, + >::OffchainStorage: 'static, { let storage = { let mut storage = self.genesis_init.genesis_storage(); @@ -217,10 +246,17 @@ impl TestClientBuilder TestClientBuilder TestClientBuilder< - Block, - client::LocalCallExecutor>, - Backend, - G, -> { +impl + TestClientBuilder< + Block, + client::LocalCallExecutor>, + Backend, + G, + > +{ /// Build the test client with the given native executor. pub fn build_with_native_executor( self, @@ -241,25 +279,27 @@ impl TestClientBuilder< ) -> ( client::Client< Backend, - client::LocalCallExecutor>, + client::LocalCallExecutor>, Block, - RuntimeApi + RuntimeApi, >, sc_consensus::LongestChain, - ) where - I: Into>>, - E: sc_executor::NativeExecutionDispatch + 'static, + ) + where + I: Into>>, + D: sc_executor::NativeExecutionDispatch + 'static, Backend: sc_client_api::backend::Backend + 'static, { - let executor = executor.into().unwrap_or_else(|| - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) - ); + let executor = executor.into().unwrap_or_else(|| { + NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8) + }); let executor = LocalCallExecutor::new( self.backend.clone(), executor, Box::new(sp_core::testing::TaskExecutor::new()), Default::default(), - ); + ) + .expect("Creates LocalCallExecutor"); self.build_with_executor(executor) } @@ -272,7 +312,7 @@ pub struct RpcTransactionOutput { /// The session object. pub session: RpcSession, /// An async receiver if data will be returned via a callback. - pub receiver: futures01::sync::mpsc::Receiver, + pub receiver: futures::channel::mpsc::UnboundedReceiver, } impl std::fmt::Debug for RpcTransactionOutput { @@ -312,10 +352,10 @@ impl RpcHandlersExt for RpcHandlers { &self, extrinsic: OpaqueExtrinsic, ) -> Pin> + Send>> { - let (tx, rx) = futures01::sync::mpsc::channel(0); + let (tx, rx) = futures::channel::mpsc::unbounded(); let mem = RpcSession::new(tx.into()); - Box::pin(self - .rpc_query( + Box::pin( + self.rpc_query( &mem, &format!( r#"{{ @@ -327,7 +367,7 @@ impl RpcHandlersExt for RpcHandlers { hex::encode(extrinsic.encode()) ), ) - .map(move |result| parse_rpc_result(result, mem, rx)) + .map(move |result| parse_rpc_result(result, mem, rx)), ) } } @@ -335,29 +375,20 @@ impl RpcHandlersExt for RpcHandlers { pub(crate) fn parse_rpc_result( result: Option, session: RpcSession, - receiver: futures01::sync::mpsc::Receiver, + receiver: futures::channel::mpsc::UnboundedReceiver, ) -> Result { if let Some(ref result) = result { - let json: serde_json::Value = serde_json::from_str(result) - .expect("the result can only be a JSONRPC string; qed"); - let error = json - .as_object() - .expect("JSON result is always an object; qed") - .get("error"); + let json: serde_json::Value = + serde_json::from_str(result).expect("the result can only be a JSONRPC string; qed"); + let error = json.as_object().expect("JSON result is always an object; qed").get("error"); if let Some(error) = error { - return Err( - serde_json::from_value(error.clone()) - .expect("the JSONRPC result's error is always valid; qed") - ) + return Err(serde_json::from_value(error.clone()) + .expect("the JSONRPC result's error is always valid; qed")) } } - Ok(RpcTransactionOutput { - result, - session, - receiver, - }) + Ok(RpcTransactionOutput { result, session, receiver }) } /// An extension trait for `BlockchainEvents`. @@ -366,8 +397,9 @@ where C: BlockchainEvents, B: BlockT, { - /// Wait for `count` blocks to be imported in the node and then exit. This function will not return if no blocks - /// are ever created, thus you should restrict the maximum amount of time of the test execution. + /// Wait for `count` blocks to be imported in the node and then exit. This function will not + /// return if no blocks are ever created, thus you should restrict the maximum amount of time of + /// the test execution. fn wait_for_blocks(&self, count: usize) -> Pin + Send>>; } @@ -387,7 +419,7 @@ where if notification.is_new_best { blocks.insert(notification.hash); if blocks.len() == count { - break; + break } } } @@ -399,8 +431,9 @@ where mod tests { use sc_service::RpcSession; - fn create_session_and_receiver() -> (RpcSession, futures01::sync::mpsc::Receiver) { - let (tx, rx) = futures01::sync::mpsc::channel(0); + fn create_session_and_receiver( + ) -> (RpcSession, futures::channel::mpsc::UnboundedReceiver) { + let (tx, rx) = futures::channel::mpsc::unbounded(); let mem = RpcSession::new(tx.into()); (mem, rx) @@ -412,31 +445,45 @@ mod tests { assert!(super::parse_rpc_result(None, mem, rx).is_ok()); let (mem, rx) = create_session_and_receiver(); - assert!( - super::parse_rpc_result(Some(r#"{ + assert!(super::parse_rpc_result( + Some( + r#"{ "jsonrpc": "2.0", "result": 19, "id": 1 - }"#.to_string()), mem, rx) - .is_ok(), - ); + }"# + .to_string() + ), + mem, + rx + ) + .is_ok(),); let (mem, rx) = create_session_and_receiver(); - let error = super::parse_rpc_result(Some(r#"{ + let error = super::parse_rpc_result( + Some( + r#"{ "jsonrpc": "2.0", "error": { "code": -32601, "message": "Method not found" }, "id": 1 - }"#.to_string()), mem, rx) - .unwrap_err(); + }"# + .to_string(), + ), + mem, + rx, + ) + .unwrap_err(); assert_eq!(error.code, -32601); assert_eq!(error.message, "Method not found"); assert!(error.data.is_none()); let (mem, rx) = create_session_and_receiver(); - let error = super::parse_rpc_result(Some(r#"{ + let error = super::parse_rpc_result( + Some( + r#"{ "jsonrpc": "2.0", "error": { "code": -32601, @@ -444,8 +491,13 @@ mod tests { "data": 42 }, "id": 1 - }"#.to_string()), mem, rx) - .unwrap_err(); + }"# + .to_string(), + ), + mem, + rx, + ) + .unwrap_err(); assert_eq!(error.code, -32601); assert_eq!(error.message, "Method not found"); assert!(error.data.is_some()); diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index 263bfd3537340..545e8cf332618 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-test-utils-derive" -version = "0.8.0" +version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -10,8 +10,9 @@ description = "Substrate test utilities macros" [dependencies] quote = "1.0.6" -syn = { version = "1.0.33", features = ["full"] } -proc-macro-crate = "0.1.4" +syn = { version = "1.0.58", features = ["full"] } +proc-macro-crate = "1.0.0" +proc-macro2 = "1.0.29" [lib] proc-macro = true diff --git a/test-utils/derive/src/lib.rs b/test-utils/derive/src/lib.rs index f5d627068963f..3f14f67477fad 100644 --- a/test-utils/derive/src/lib.rs +++ b/test-utils/derive/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -17,56 +17,38 @@ // along with this program. If not, see . use proc_macro::{Span, TokenStream}; -use proc_macro_crate::crate_name; +use proc_macro_crate::{crate_name, FoundCrate}; use quote::quote; -use std::env; #[proc_macro_attribute] pub fn test(args: TokenStream, item: TokenStream) -> TokenStream { - impl_test(args, item) -} - -fn impl_test(args: TokenStream, item: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(item as syn::ItemFn); - let args = syn::parse_macro_input!(args as syn::AttributeArgs); - parse_knobs(input, args).unwrap_or_else(|e| e.to_compile_error().into()) + parse_knobs(input, args.into()).unwrap_or_else(|e| e.to_compile_error().into()) } fn parse_knobs( mut input: syn::ItemFn, - args: syn::AttributeArgs, + args: proc_macro2::TokenStream, ) -> Result { let sig = &mut input.sig; let body = &input.block; let attrs = &input.attrs; let vis = input.vis; - if sig.inputs.len() != 1 { - let msg = "the test function accepts only one argument of type sc_service::TaskExecutor"; - return Err(syn::Error::new_spanned(&sig, msg)); + if !sig.inputs.is_empty() { + return Err(syn::Error::new_spanned(&sig, "No arguments expected for tests.")) } - let (task_executor_name, task_executor_type) = match sig.inputs.pop().map(|x| x.into_value()) { - Some(syn::FnArg::Typed(x)) => (x.pat, x.ty), - _ => { - let msg = - "the test function accepts only one argument of type sc_service::TaskExecutor"; - return Err(syn::Error::new_spanned(&sig, msg)); - } - }; - - let crate_name = if env::var("CARGO_PKG_NAME").unwrap() == "substrate-test-utils" { - syn::Ident::new("substrate_test_utils", Span::call_site().into()) - } else { - let crate_name = crate_name("substrate-test-utils") - .map_err(|e| syn::Error::new_spanned(&sig, e))?; - syn::Ident::new(&crate_name, Span::call_site().into()) + let crate_name = match crate_name("substrate-test-utils") { + Ok(FoundCrate::Itself) => syn::Ident::new("substrate_test_utils", Span::call_site().into()), + Ok(FoundCrate::Name(crate_name)) => syn::Ident::new(&crate_name, Span::call_site().into()), + Err(e) => return Err(syn::Error::new_spanned(&sig, e)), }; let header = { quote! { - #[#crate_name::tokio::test(#(#args)*)] + #[#crate_name::tokio::test( #args )] } }; @@ -74,31 +56,15 @@ fn parse_knobs( #header #(#attrs)* #vis #sig { - use #crate_name::futures::future::FutureExt; - - let #task_executor_name: #task_executor_type = (|fut, _| { - #crate_name::tokio::spawn(fut).map(drop) - }) - .into(); - let timeout_task = #crate_name::tokio::time::delay_for( + if #crate_name::tokio::time::timeout( std::time::Duration::from_secs( std::env::var("SUBSTRATE_TEST_TIMEOUT") .ok() .and_then(|x| x.parse().ok()) - .unwrap_or(600)) - ).fuse(); - let actual_test_task = async move { - #body - } - .fuse(); - - #crate_name::futures::pin_mut!(timeout_task, actual_test_task); - - #crate_name::futures::select! { - _ = timeout_task => { - panic!("The test took too long!"); - }, - _ = actual_test_task => {}, + .unwrap_or(600)), + async move { #body }, + ).await.is_err() { + panic!("The test took too long!"); } } }; diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index cb6147adf25c6..24f4d404c18bd 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -13,50 +13,52 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/aura" } -sp-consensus-babe = { version = "0.8.0", default-features = false, path = "../../primitives/consensus/babe" } -sp-block-builder = { version = "2.0.0", default-features = false, path = "../../primitives/block-builder" } -codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } -frame-executive = { version = "2.0.0", default-features = false, path = "../../frame/executive" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } -memory-db = { version = "0.24.0", default-features = false } -sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0"} -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0"} -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0", default-features = false, path = "../../frame/support" } -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } -sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } -sp-api = { version = "2.0.0", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -pallet-babe = { version = "2.0.0", default-features = false, path = "../../frame/babe" } -frame-system = { version = "2.0.0", default-features = false, path = "../../frame/system" } -frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../frame/system/rpc/runtime-api" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../frame/timestamp" } -sp-finality-grandpa = { version = "2.0.0", default-features = false, path = "../../primitives/finality-grandpa" } -sp-trie = { version = "2.0.0", default-features = false, path = "../../primitives/trie" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.22.0", default-features = false } -parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } -sc-service = { version = "0.8.0", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } -sp-state-machine = { version = "0.8.0", default-features = false, path = "../../primitives/state-machine" } -sp-externalities = { version = "0.8.0", default-features = false, path = "../../primitives/externalities" } +sp-application-crypto = { version = "4.0.0-dev", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura" } +sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe" } +sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "4.0.0-dev", optional = true, path = "../../primitives/keyring" } +memory-db = { version = "0.27.0", default-features = false } +sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../primitives/offchain" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime-interface" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../frame/support" } +sp-version = { version = "4.0.0-dev", default-features = false, path = "../../primitives/version" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } +frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../frame/system/rpc/runtime-api" } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../frame/timestamp" } +sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/finality-grandpa" } +sp-trie = { version = "4.0.0-dev", default-features = false, path = "../../primitives/trie" } +sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } +trie-db = { version = "0.22.6", default-features = false } +parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } +sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } +sp-state-machine = { version = "0.10.0-dev", default-features = false, path = "../../primitives/state-machine" } +sp-externalities = { version = "0.10.0-dev", default-features = false, path = "../../primitives/externalities" } # 3rd party -cfg-if = "0.1.10" -log = { version = "0.4.8", optional = true } -serde = { version = "1.0.101", optional = true, features = ["derive"] } +cfg-if = "1.0" +log = { version = "0.4.14", default-features = false } +serde = { version = "1.0.126", optional = true, features = ["derive"] } [dev-dependencies] -sc-block-builder = { version = "0.8.0", path = "../../client/block-builder" } -sc-executor = { version = "0.8.0", path = "../../client/executor" } +sc-block-builder = { version = "0.10.0-dev", path = "../../client/block-builder" } +sc-executor = { version = "0.10.0-dev", path = "../../client/executor" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } substrate-test-runtime-client = { version = "2.0.0", path = "./client" } +futures = "0.3.9" [build-dependencies] -wasm-builder-runner = { version = "2.0.0", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } +substrate-wasm-builder = { version = "5.0.0-dev", path = "../../utils/wasm-builder" } [features] default = [ @@ -68,10 +70,10 @@ std = [ "sp-consensus-babe/std", "sp-block-builder/std", "codec/std", - "frame-executive/std", + "scale-info/std", "sp-inherents/std", "sp-keyring", - "log", + "log/std", "memory-db/std", "sp-offchain/std", "sp-core/std", @@ -97,3 +99,5 @@ std = [ "sp-transaction-pool/std", "trie-db/std", ] +# Special feature to disable logging +disable-logging = [ "sp-api/disable-logging" ] diff --git a/test-utils/runtime/build.rs b/test-utils/runtime/build.rs index 834551a7ba12d..50c455b4ad831 100644 --- a/test-utils/runtime/build.rs +++ b/test-utils/runtime/build.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,17 +15,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates_or_path("2.0.1", "../../utils/wasm-builder") .export_heap_base() // Note that we set the stack-size to 1MB explicitly even though it is set // to this value by default. This is because some of our tests (`restoration_of_globals`) // depend on the stack-size. .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") .import_memory() - .build() + .build(); + + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .set_file_name("wasm_binary_logging_disabled.rs") + .enable_feature("disable-logging") + .build(); } diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index b310bbe7a709a..3561697042f2a 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -12,17 +12,16 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-light = { version = "2.0.0", path = "../../../client/light" } -sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0", path = "../../../client/block-builder" } +sc-light = { version = "4.0.0-dev", path = "../../../client/light" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } substrate-test-client = { version = "2.0.0", path = "../../client" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } substrate-test-runtime = { version = "2.0.0", path = "../../runtime" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -codec = { package = "parity-scale-codec", version = "1.3.1" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } -futures = "0.3.4" +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } +futures = "0.3.9" diff --git a/test-utils/runtime/client/src/block_builder_ext.rs b/test-utils/runtime/client/src/block_builder_ext.rs index cc0bbc69e8fc1..e8c1d2ac5cd48 100644 --- a/test-utils/runtime/client/src/block_builder_ext.rs +++ b/test-utils/runtime/client/src/block_builder_ext.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,17 +17,19 @@ //! Block Builder extensions for tests. +use sc_client_api::backend; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::ChangesTrieConfiguration; -use sc_client_api::backend; -use sp_runtime::traits::HashFor; use sc_block_builder::BlockBuilderApi; /// Extension trait for test block builder. pub trait BlockBuilderExt { /// Add transfer extrinsic to the block. - fn push_transfer(&mut self, transfer: substrate_test_runtime::Transfer) -> Result<(), sp_blockchain::Error>; + fn push_transfer( + &mut self, + transfer: substrate_test_runtime::Transfer, + ) -> Result<(), sp_blockchain::Error>; /// Add storage change extrinsic to the block. fn push_storage_change( &mut self, @@ -41,19 +43,21 @@ pub trait BlockBuilderExt { ) -> Result<(), sp_blockchain::Error>; } -impl<'a, A, B> BlockBuilderExt for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> where +impl<'a, A, B> BlockBuilderExt + for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> +where A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi + - ApiExt< + A::Api: BlockBuilderApi + + ApiExt< substrate_test_runtime::Block, - StateBackend = backend::StateBackendFor + StateBackend = backend::StateBackendFor, >, B: backend::Backend, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - backend::StateBackendFor: - sp_api::StateBackend>, { - fn push_transfer(&mut self, transfer: substrate_test_runtime::Transfer) -> Result<(), sp_blockchain::Error> { + fn push_transfer( + &mut self, + transfer: substrate_test_runtime::Transfer, + ) -> Result<(), sp_blockchain::Error> { self.push(transfer.into_signed_tx()) } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 5b343f7748ead..dc5ccadc4574f 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,52 +23,65 @@ pub mod trait_tests; mod block_builder_ext; -use std::sync::Arc; -use std::collections::HashMap; +pub use sc_consensus::LongestChain; +use std::{collections::HashMap, sync::Arc}; pub use substrate_test_client::*; pub use substrate_test_runtime as runtime; -pub use sc_consensus::LongestChain; pub use self::block_builder_ext::BlockBuilderExt; -use sp_core::{sr25519, ChangesTrieConfiguration}; -use sp_core::storage::{ChildInfo, Storage, StorageChild}; -use substrate_test_runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor, HashFor}; use sc_client_api::light::{ - RemoteCallRequest, RemoteChangesRequest, RemoteBodyRequest, - Fetcher, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, + Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, + RemoteReadChildRequest, RemoteReadRequest, }; +use sp_core::{ + sr25519, + storage::{ChildInfo, Storage, StorageChild}, + ChangesTrieConfiguration, +}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, HashFor, Header as HeaderT, NumberFor}; +use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; /// A prelude to import in tests. pub mod prelude { // Trait extensions pub use super::{ - BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilderExt, ClientExt, - ClientBlockImportExt, + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + TestClientBuilderExt, }; // Client structs pub use super::{ - TestClient, TestClientBuilder, Backend, LightBackend, - Executor, LightExecutor, LocalExecutor, NativeExecutor, WasmExecutionMethod, + Backend, ExecutorDispatch, LightBackend, LightExecutor, LocalExecutorDispatch, + NativeElseWasmExecutor, TestClient, TestClientBuilder, WasmExecutionMethod, }; // Keyring pub use super::{AccountKeyring, Sr25519Keyring}; } -sc_executor::native_executor_instance! { - pub LocalExecutor, - substrate_test_runtime::api::dispatch, - substrate_test_runtime::native_version, +/// A unit struct which implements `NativeExecutionDispatch` feeding in the +/// hard-coded runtime. +pub struct LocalExecutorDispatch; + +impl sc_executor::NativeExecutionDispatch for LocalExecutorDispatch { + type ExtendHostFunctions = (); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + substrate_test_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + substrate_test_runtime::native_version() + } } /// Test client database backend. pub type Backend = substrate_test_client::Backend; /// Test client executor. -pub type Executor = client::LocalCallExecutor< +pub type ExecutorDispatch = client::LocalCallExecutor< + substrate_test_runtime::Block, Backend, - NativeExecutor, + NativeElseWasmExecutor, >; /// Test client light database backend. @@ -78,12 +91,13 @@ pub type LightBackend = substrate_test_client::LightBackend, - HashFor + HashFor, >, - NativeExecutor - > + NativeElseWasmExecutor, + >, >; /// Parameters of test-client builder with test-runtime. @@ -92,6 +106,7 @@ pub struct GenesisParameters { changes_trie_config: Option, heap_pages_override: Option, extra_storage: Storage, + wasm_code: Option>, } impl GenesisParameters { @@ -113,6 +128,11 @@ impl GenesisParameters { self.extra_storage.clone(), ) } + + /// Set the wasm code that should be used at genesis. + pub fn set_wasm_code(&mut self, code: Vec) { + self.wasm_code = Some(code); + } } impl substrate_test_client::GenesisInit for GenesisParameters { @@ -121,16 +141,24 @@ impl substrate_test_client::GenesisInit for GenesisParameters { let mut storage = self.genesis_config().genesis_map(); + if let Some(ref code) = self.wasm_code { + storage + .top + .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.clone()); + } + let child_roots = storage.children_default.iter().map(|(_sk, child_content)| { - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - child_content.data.clone().into_iter().collect() - ); + let state_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root( + child_content.data.clone().into_iter().collect(), + ); let prefixed_storage_key = child_content.child_info.prefixed_storage_key(); (prefixed_storage_key.into_inner(), state_root.encode()) }); - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - storage.top.clone().into_iter().chain(child_roots).collect() - ); + let state_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root( + storage.top.clone().into_iter().chain(child_roots).collect(), + ); let block: runtime::Block = client::genesis::construct_genesis_block(state_root); storage.top.extend(additional_storage_with_genesis(&block)); @@ -146,10 +174,14 @@ pub type TestClientBuilder = substrate_test_client::TestClientBuilder< GenesisParameters, >; -/// Test client type with `LocalExecutor` and generic Backend. +/// Test client type with `LocalExecutorDispatch` and generic Backend. pub type Client = client::Client< B, - client::LocalCallExecutor>, + client::LocalCallExecutor< + substrate_test_runtime::Block, + B, + sc_executor::NativeElseWasmExecutor, + >, substrate_test_runtime::Block, substrate_test_runtime::RuntimeApi, >; @@ -163,7 +195,7 @@ pub trait DefaultTestClientBuilderExt: Sized { fn new() -> Self; } -impl DefaultTestClientBuilderExt for TestClientBuilder { +impl DefaultTestClientBuilderExt for TestClientBuilder { fn new() -> Self { Self::with_default_backend() } @@ -201,12 +233,16 @@ pub trait TestClientBuilderExt: Sized { let key = key.into(); assert!(!storage_key.is_empty()); assert!(!key.is_empty()); - self.genesis_init_mut().extra_storage.children_default + self.genesis_init_mut() + .extra_storage + .children_default .entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.clone(), - }).data.insert(key, value.into()); + }) + .data + .insert(key, value.into()); self } @@ -228,26 +264,32 @@ pub trait TestClientBuilderExt: Sized { } /// Build the test client and longest chain selector. - fn build_with_longest_chain(self) -> (Client, sc_consensus::LongestChain); + fn build_with_longest_chain( + self, + ) -> (Client, sc_consensus::LongestChain); /// Build the test client and the backend. fn build_with_backend(self) -> (Client, Arc); } -impl TestClientBuilderExt for TestClientBuilder< - client::LocalCallExecutor>, - B -> where +impl TestClientBuilderExt + for TestClientBuilder< + client::LocalCallExecutor< + substrate_test_runtime::Block, + B, + sc_executor::NativeElseWasmExecutor, + >, + B, + > where B: sc_client_api::backend::Backend + 'static, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - >::State: - sp_api::StateBackend>, { fn genesis_init_mut(&mut self) -> &mut GenesisParameters { Self::genesis_init_mut(self) } - fn build_with_longest_chain(self) -> (Client, sc_consensus::LongestChain) { + fn build_with_longest_chain( + self, + ) -> (Client, sc_consensus::LongestChain) { self.build_with_native_executor(None) } @@ -258,7 +300,8 @@ impl TestClientBuilderExt for TestClientBuilder< } /// Type of optional fetch callback. -type MaybeFetcherCallback = Option Result + Send + Sync>>; +type MaybeFetcherCallback = + Option Result + Send + Sync>>; /// Type of fetcher future result. type FetcherFutureResult = futures::future::Ready>; @@ -267,7 +310,10 @@ type FetcherFutureResult = futures::future::Ready, Vec>, - body: MaybeFetcherCallback, Vec>, + body: MaybeFetcherCallback< + RemoteBodyRequest, + Vec, + >, } impl LightFetcher { @@ -276,21 +322,18 @@ impl LightFetcher { self, call: MaybeFetcherCallback, Vec>, ) -> Self { - LightFetcher { - call, - body: self.body, - } + LightFetcher { call, body: self.body } } /// Sets remote body callback. pub fn with_remote_body( self, - body: MaybeFetcherCallback, Vec>, + body: MaybeFetcherCallback< + RemoteBodyRequest, + Vec, + >, ) -> Self { - LightFetcher { - call: self.call, - body, - } + LightFetcher { call: self.call, body } } } @@ -298,14 +341,21 @@ impl Fetcher for LightFetcher { type RemoteHeaderResult = FetcherFutureResult; type RemoteReadResult = FetcherFutureResult, Option>>>; type RemoteCallResult = FetcherFutureResult>; - type RemoteChangesResult = FetcherFutureResult, u32)>>; + type RemoteChangesResult = + FetcherFutureResult, u32)>>; type RemoteBodyResult = FetcherFutureResult>; - fn remote_header(&self, _: RemoteHeaderRequest) -> Self::RemoteHeaderResult { + fn remote_header( + &self, + _: RemoteHeaderRequest, + ) -> Self::RemoteHeaderResult { unimplemented!() } - fn remote_read(&self, _: RemoteReadRequest) -> Self::RemoteReadResult { + fn remote_read( + &self, + _: RemoteReadRequest, + ) -> Self::RemoteReadResult { unimplemented!() } @@ -316,18 +366,27 @@ impl Fetcher for LightFetcher { unimplemented!() } - fn remote_call(&self, req: RemoteCallRequest) -> Self::RemoteCallResult { + fn remote_call( + &self, + req: RemoteCallRequest, + ) -> Self::RemoteCallResult { match self.call { Some(ref call) => futures::future::ready(call(req)), None => unimplemented!(), } } - fn remote_changes(&self, _: RemoteChangesRequest) -> Self::RemoteChangesResult { + fn remote_changes( + &self, + _: RemoteChangesRequest, + ) -> Self::RemoteChangesResult { unimplemented!() } - fn remote_body(&self, req: RemoteBodyRequest) -> Self::RemoteBodyResult { + fn remote_body( + &self, + req: RemoteBodyRequest, + ) -> Self::RemoteBodyResult { match self.body { Some(ref body) => futures::future::ready(body(req)), None => unimplemented!(), @@ -342,10 +401,14 @@ pub fn new() -> Client { /// Creates new light client instance used for tests. pub fn new_light() -> ( - client::Client, + client::Client< + LightBackend, + LightExecutor, + substrate_test_runtime::Block, + substrate_test_runtime::RuntimeApi, + >, Arc, ) { - let storage = sc_client_db::light::LightStorage::new_test(); let blockchain = Arc::new(sc_light::Blockchain::new(storage)); let backend = Arc::new(LightBackend::new(blockchain)); @@ -355,11 +418,9 @@ pub fn new_light() -> ( executor, Box::new(sp_core::testing::TaskExecutor::new()), Default::default(), - ); - let call_executor = LightExecutor::new( - backend.clone(), - local_call_executor, - ); + ) + .expect("Creates LocalCallExecutor"); + let call_executor = LightExecutor::new(backend.clone(), local_call_executor); ( TestClientBuilder::with_backend(backend.clone()) @@ -375,6 +436,6 @@ pub fn new_light_fetcher() -> LightFetcher { } /// Create a new native executor. -pub fn new_native_executor() -> sc_executor::NativeExecutor { - sc_executor::NativeExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) +pub fn new_native_executor() -> sc_executor::NativeElseWasmExecutor { + sc_executor::NativeElseWasmExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) } diff --git a/test-utils/runtime/client/src/trait_tests.rs b/test-utils/runtime/client/src/trait_tests.rs index b240a42a78555..c5e0ba49fcf5b 100644 --- a/test-utils/runtime/client/src/trait_tests.rs +++ b/test-utils/runtime/client/src/trait_tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,300 +23,278 @@ use std::sync::Arc; use crate::{ - AccountKeyring, ClientBlockImportExt, BlockBuilderExt, TestClientBuilder, TestClientBuilderExt, + AccountKeyring, BlockBuilderExt, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, +}; +use futures::executor::block_on; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{ + backend, + blockchain::{Backend as BlockChainBackendT, HeaderBackend}, }; -use sc_client_api::backend; -use sc_client_api::blockchain::{Backend as BlockChainBackendT, HeaderBackend}; use sp_consensus::BlockOrigin; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use substrate_test_runtime::{self, Transfer}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, HashFor}; -use sc_block_builder::BlockBuilderProvider; /// helper to test the `leaves` implementation for various backends -pub fn test_leaves_for_backend(backend: Arc) where +pub fn test_leaves_for_backend(backend: Arc) +where B: backend::Backend, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - backend::StateBackendFor: - sp_api::StateBackend>, { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 let mut client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); let genesis_hash = client.chain_info().genesis_hash; - assert_eq!( - blockchain.leaves().unwrap(), - vec![genesis_hash]); + assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a1.hash()], - ); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a1.hash()]); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); #[allow(deprecated)] - assert_eq!( - blockchain.leaves().unwrap(), - vec![a2.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a2.hash()]); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - assert_eq!( - blockchain.leaves().unwrap(), - vec![a3.hash()], - ); + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); + + assert_eq!(blockchain.leaves().unwrap(), vec![a3.hash()]); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a4.hash()], - ); + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a4.hash()]); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash()], - ); + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash()]); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b2.hash()], - ); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b2.hash()]); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b3.hash()], - ); + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b3.hash()]); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b4.hash()], - ); + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash()]); // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b4.hash(), c3.hash()], - ); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash()]); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()], - ); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()]); } /// helper to test the `children` implementation for various backends -pub fn test_children_for_backend(backend: Arc) where +pub fn test_children_for_backend(backend: Arc) +where B: backend::LocalBackend, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - >::State: - sp_api::StateBackend>, { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 let mut client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4).unwrap(); + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -333,119 +311,123 @@ pub fn test_children_for_backend(backend: Arc) where assert_eq!(vec![b3.hash(), c3.hash()], children4); } -pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc) where +pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc) +where B: backend::LocalBackend, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - >::State: - sp_api::StateBackend>, { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 let mut client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4).unwrap(); + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3).unwrap(); + block_on(client.import(BlockOrigin::Own, c3)).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2).unwrap(); + block_on(client.import(BlockOrigin::Own, d2)).unwrap(); let genesis_hash = client.chain_info().genesis_hash; diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 126447d481848..a8801b8519dfe 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,14 +17,17 @@ //! Tool for creating the genesis block. -use std::collections::BTreeMap; +use super::{system, wasm_binary_unwrap, AccountId, AuthorityId}; +use codec::{Encode, Joiner, KeyedVec}; +use sc_service::client::genesis; +use sp_core::{ + map, + storage::{well_known_keys, Storage}, + ChangesTrieConfiguration, +}; use sp_io::hashing::{blake2_256, twox_128}; -use super::{AuthorityId, AccountId, wasm_binary_unwrap, system}; -use codec::{Encode, KeyedVec, Joiner}; -use sp_core::{ChangesTrieConfiguration, map}; -use sp_core::storage::{well_known_keys, Storage}; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; -use sc_service::client::genesis; +use std::collections::BTreeMap; /// Configuration of a general Substrate test genesis block. pub struct GenesisConfig { @@ -47,7 +50,7 @@ impl GenesisConfig { ) -> Self { GenesisConfig { changes_trie_config, - authorities: authorities, + authorities, balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), heap_pages_override, extra_storage, @@ -56,16 +59,23 @@ impl GenesisConfig { pub fn genesis_map(&self) -> Storage { let wasm_runtime = wasm_binary_unwrap().to_vec(); - let mut map: BTreeMap, Vec> = self.balances.iter() - .map(|&(ref account, balance)| (account.to_keyed_vec(b"balance:"), vec![].and(&balance))) + let mut map: BTreeMap, Vec> = self + .balances + .iter() + .map(|&(ref account, balance)| { + (account.to_keyed_vec(b"balance:"), vec![].and(&balance)) + }) .map(|(k, v)| (blake2_256(&k[..])[..].to_vec(), v.to_vec())) - .chain(vec![ - (well_known_keys::CODE.into(), wasm_runtime), - ( - well_known_keys::HEAP_PAGES.into(), - vec![].and(&(self.heap_pages_override.unwrap_or(16 as u64))), - ), - ].into_iter()) + .chain( + vec![ + (well_known_keys::CODE.into(), wasm_runtime), + ( + well_known_keys::HEAP_PAGES.into(), + vec![].and(&(self.heap_pages_override.unwrap_or(16 as u64))), + ), + ] + .into_iter(), + ) .collect(); if let Some(ref changes_trie_config) = self.changes_trie_config { map.insert(well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), changes_trie_config.encode()); @@ -75,28 +85,30 @@ impl GenesisConfig { map.extend(self.extra_storage.top.clone().into_iter()); // Assimilate the system genesis config. - let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone()}; + let mut storage = + Storage { top: map, children_default: self.extra_storage.children_default.clone() }; let mut config = system::GenesisConfig::default(); config.authorities = self.authorities.clone(); - config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis"); + config + .assimilate_storage(&mut storage) + .expect("Adding `system::GensisConfig` to the genesis"); storage } } -pub fn insert_genesis_block( - storage: &mut Storage, -) -> sp_core::hash::H256 { +pub fn insert_genesis_block(storage: &mut Storage) -> sp_core::hash::H256 { let child_roots = storage.children_default.iter().map(|(sk, child_content)| { - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - child_content.data.clone().into_iter().collect(), - ); + let state_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root( + child_content.data.clone().into_iter().collect(), + ); (sk.clone(), state_root.encode()) }); // add child roots to storage storage.top.extend(child_roots); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - storage.top.clone().into_iter().collect() + storage.top.clone().into_iter().collect(), ); let block: crate::Block = genesis::construct_genesis_block(state_root); let genesis_hash = block.header.hash(); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index e772a28ee33a2..0d880d508ef38 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! The Substrate runtime. This can be compiled with #[no_std], ready for Wasm. +//! The Substrate runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -23,42 +23,44 @@ pub mod genesismap; pub mod system; -use sp_std::{prelude::*, marker::PhantomData}; -use codec::{Encode, Decode, Input, Error}; +use codec::{Decode, Encode, Error, Input}; +use scale_info::TypeInfo; +use sp_std::{marker::PhantomData, prelude::*}; +use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{offchain::KeyTypeId, ChangesTrieConfiguration, OpaqueMetadata, RuntimeDebug}; -use sp_application_crypto::{ed25519, sr25519, ecdsa, RuntimeAppPublic}; -use trie_db::{TrieMut, Trie}; -use sp_trie::{PrefixedMemoryDB, StorageProof}; -use sp_trie::trie_types::{TrieDB, TrieDBMut}; +use sp_trie::{ + trie_types::{TrieDB, TrieDBMut}, + PrefixedMemoryDB, StorageProof, +}; +use trie_db::{Trie, TrieMut}; +use cfg_if::cfg_if; +use frame_support::{parameter_types, traits::KeyOwnerProofSystem, weights::RuntimeDbWeight}; +use frame_system::limits::{BlockLength, BlockWeights}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; +pub use sp_core::hash::H256; +use sp_inherents::{CheckInherentsResult, InherentData}; +#[cfg(feature = "std")] +use sp_runtime::traits::NumberFor; use sp_runtime::{ create_runtime_str, impl_opaque_keys, - ApplyExtrinsicResult, Perbill, - transaction_validity::{ - TransactionValidity, ValidTransaction, TransactionValidityError, InvalidTransaction, - TransactionSource, - }, traits::{ - BlindCheckable, BlakeTwo256, Block as BlockT, Extrinsic as ExtrinsicT, - GetNodeBlockType, GetRuntimeBlockType, NumberFor, Verify, IdentityLookup, + BlakeTwo256, BlindCheckable, Block as BlockT, Extrinsic as ExtrinsicT, GetNodeBlockType, + GetRuntimeBlockType, IdentityLookup, Verify, }, + transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + ValidTransaction, + }, + ApplyExtrinsicResult, Perbill, }; -use sp_version::RuntimeVersion; -pub use sp_core::hash::H256; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; -use frame_support::{ - impl_outer_origin, parameter_types, - traits::KeyOwnerProofSystem, - weights::{RuntimeDbWeight, Weight}, -}; -use sp_inherents::{CheckInherentsResult, InherentData}; -use cfg_if::cfg_if; +use sp_version::RuntimeVersion; // Ensure Babe and Aura use the same crypto to simplify things a bit. -pub use sp_consensus_babe::{AuthorityId, SlotNumber, AllowedSlots}; +pub use sp_consensus_babe::{AllowedSlots, AuthorityId, Slot}; pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; @@ -67,13 +69,30 @@ pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); #[cfg(feature = "std")] -/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. +pub mod wasm_binary_logging_disabled { + include!(concat!(env!("OUT_DIR"), "/wasm_binary_logging_disabled.rs")); +} + +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. +#[cfg(feature = "std")] pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ - supported with the flag disabled.") + WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only supported with the flag \ + disabled.", + ) +} + +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. +#[cfg(feature = "std")] +pub fn wasm_binary_logging_disabled_unwrap() -> &'static [u8] { + wasm_binary_logging_disabled::WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only supported with the flag \ + disabled.", + ) } /// Test runtime version. +#[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test"), impl_name: create_runtime_str!("parity-test"), @@ -91,10 +110,7 @@ fn version() -> RuntimeVersion { /// Native version. #[cfg(any(feature = "std", test))] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } /// Calls in transactions. @@ -111,12 +127,10 @@ impl Transfer { #[cfg(feature = "std")] pub fn into_signed_tx(self) -> Extrinsic { let signature = sp_keyring::AccountKeyring::from_public(&self.from) - .expect("Creates keyring from public key.").sign(&self.encode()).into(); - Extrinsic::Transfer { - transfer: self, - signature, - exhaust_resources_when_not_first: false, - } + .expect("Creates keyring from public key.") + .sign(&self.encode()) + .into(); + Extrinsic::Transfer { transfer: self, signature, exhaust_resources_when_not_first: false } } /// Convert into a signed extrinsic, which will only end up included in the block @@ -125,12 +139,10 @@ impl Transfer { #[cfg(feature = "std")] pub fn into_resources_exhausting_tx(self) -> Extrinsic { let signature = sp_keyring::AccountKeyring::from_public(&self.from) - .expect("Creates keyring from public key.").sign(&self.encode()).into(); - Extrinsic::Transfer { - transfer: self, - signature, - exhaust_resources_when_not_first: true, - } + .expect("Creates keyring from public key.") + .sign(&self.encode()) + .into(); + Extrinsic::Transfer { transfer: self, signature, exhaust_resources_when_not_first: true } } } @@ -146,13 +158,19 @@ pub enum Extrinsic { IncludeData(Vec), StorageChange(Vec, Option>), ChangesTrieConfigUpdate(Option), + OffchainIndexSet(Vec, Vec), + OffchainIndexClear(Vec), + Store(Vec), } parity_util_mem::malloc_size_of_is_0!(Extrinsic); // non-opaque extrinsic does not need this #[cfg(feature = "std")] impl serde::Serialize for Extrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } @@ -163,17 +181,23 @@ impl BlindCheckable for Extrinsic { fn check(self) -> Result { match self { Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), - Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => { + Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => if sp_runtime::verify_encoded_lazy(&signature, &transfer, &transfer.from) { - Ok(Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first }) + Ok(Extrinsic::Transfer { + transfer, + signature, + exhaust_resources_when_not_first, + }) } else { Err(InvalidTransaction::BadProof.into()) - } - }, - Extrinsic::IncludeData(_) => Err(InvalidTransaction::BadProof.into()), + }, + Extrinsic::IncludeData(v) => Ok(Extrinsic::IncludeData(v)), Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), Extrinsic::ChangesTrieConfigUpdate(new_config) => Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)), + Extrinsic::OffchainIndexSet(key, value) => Ok(Extrinsic::OffchainIndexSet(key, value)), + Extrinsic::OffchainIndexClear(key) => Ok(Extrinsic::OffchainIndexClear(key)), + Extrinsic::Store(data) => Ok(Extrinsic::Store(data)), } } } @@ -197,7 +221,7 @@ impl ExtrinsicT for Extrinsic { impl sp_runtime::traits::Dispatchable for Extrinsic { type Origin = Origin; - type Trait = (); + type Config = (); type Info = (); type PostInfo = (); fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { @@ -274,9 +298,7 @@ impl codec::EncodeLike for DecodeFails {} impl DecodeFails { /// Create a new instance. pub fn new() -> DecodeFails { - DecodeFails { - _phantom: Default::default(), - } + DecodeFails { _phantom: Default::default() } } } @@ -318,9 +340,6 @@ cfg_if! { fn get_block_number() -> u64; /// Takes and returns the initialized block number. fn take_block_number() -> Option; - /// Returns if no block was initialized. - #[skip_initialize_block] - fn without_initialize_block() -> bool; /// Test that `ed25519` crypto works in the runtime. /// /// Returns the signature generated for the message `ed25519` and the public key. @@ -371,9 +390,6 @@ cfg_if! { fn get_block_number() -> u64; /// Takes and returns the initialized block number. fn take_block_number() -> Option; - /// Returns if no block was initialized. - #[skip_initialize_block] - fn without_initialize_block() -> bool; /// Test that `ed25519` crypto works in the runtime. /// /// Returns the signature generated for the message `ed25519` and the public key. @@ -400,7 +416,7 @@ cfg_if! { } } -#[derive(Clone, Eq, PartialEq)] +#[derive(Clone, Eq, PartialEq, TypeInfo)] pub struct Runtime; impl GetNodeBlockType for Runtime { @@ -411,11 +427,64 @@ impl GetRuntimeBlockType for Runtime { type RuntimeBlock = Block; } -impl_outer_origin!{ - pub enum Origin for Runtime where system = frame_system {} +#[derive(Clone, RuntimeDebug)] +pub struct Origin; + +impl From> for Origin { + fn from(_o: frame_system::Origin) -> Self { + unimplemented!("Not required in tests!") + } +} +impl Into, Origin>> for Origin { + fn into(self) -> Result, Origin> { + unimplemented!("Not required in tests!") + } +} + +impl frame_support::traits::OriginTrait for Origin { + type Call = ::Call; + type PalletsOrigin = Origin; + type AccountId = ::AccountId; + + fn add_filter(&mut self, _filter: impl Fn(&Self::Call) -> bool + 'static) { + unimplemented!("Not required in tests!") + } + + fn reset_filter(&mut self) { + unimplemented!("Not required in tests!") + } + + fn set_caller_from(&mut self, _other: impl Into) { + unimplemented!("Not required in tests!") + } + + fn filter_call(&self, _call: &Self::Call) -> bool { + unimplemented!("Not required in tests!") + } + + fn caller(&self) -> &Self::PalletsOrigin { + unimplemented!("Not required in tests!") + } + + fn try_with_caller( + self, + _f: impl FnOnce(Self::PalletsOrigin) -> Result, + ) -> Result { + unimplemented!("Not required in tests!") + } + + fn none() -> Self { + unimplemented!("Not required in tests!") + } + fn root() -> Self { + unimplemented!("Not required in tests!") + } + fn signed(_by: ::AccountId) -> Self { + unimplemented!("Not required in tests!") + } } -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub struct Event; impl From> for Event { @@ -424,20 +493,54 @@ impl From> for Event { } } +impl frame_support::traits::PalletInfo for Runtime { + fn index() -> Option { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some(0) + } + if type_id == sp_std::any::TypeId::of::>() { + return Some(1) + } + if type_id == sp_std::any::TypeId::of::>() { + return Some(2) + } + + None + } + fn name() -> Option<&'static str> { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some("System") + } + if type_id == sp_std::any::TypeId::of::>() { + return Some("Timestamp") + } + if type_id == sp_std::any::TypeId::of::>() { + return Some("Babe") + } + + None + } +} + parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const MinimumPeriod: u64 = 5; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 100, write: 1000, }; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub RuntimeBlockLength: BlockLength = + BlockLength::max(4 * 1024 * 1024); + pub RuntimeBlockWeights: BlockWeights = + BlockWeights::with_sensible_defaults(4 * 1024 * 1024, Perbill::from_percent(75)); } -impl frame_system::Trait for Runtime { - type BaseCallFilter = (); +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; type Origin = Origin; type Call = Extrinsic; type Index = u64; @@ -449,22 +552,18 @@ impl frame_system::Trait for Runtime { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = Self; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = (); @@ -477,13 +576,14 @@ parameter_types! { pub const ExpectedBlockTime: u64 = 10_000; } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; // there is no actual runtime in this test-runtime, so testing crates // are manually adding the digests. normally in this situation you'd use // pallet_babe::SameAuthoritiesForever. type EpochChangeTrigger = pallet_babe::ExternalTrigger; + type DisabledValidators = (); type KeyOwnerProofSystem = (); @@ -515,7 +615,8 @@ fn code_using_trie() -> u64 { let pairs = [ (b"0103000000000000000464".to_vec(), b"0400000000".to_vec()), (b"0103000000000000000469".to_vec(), b"0401000000".to_vec()), - ].to_vec(); + ] + .to_vec(); let mut mdb = PrefixedMemoryDB::default(); let mut root = sp_std::default::Default::default(); @@ -523,10 +624,10 @@ fn code_using_trie() -> u64 { let v = &pairs; let mut t = TrieDBMut::::new(&mut mdb, &mut root); for i in 0..v.len() { - let key: &[u8]= &v[i].0; + let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; if !t.insert(key, val).is_ok() { - return 101; + return 101 } } t @@ -541,8 +642,12 @@ fn code_using_trie() -> u64 { } } iter_pairs.len() as u64 - } else { 102 } - } else { 103 } + } else { + 102 + } + } else { + 103 + } } impl_opaque_keys! { @@ -562,7 +667,7 @@ cfg_if! { } fn execute_block(block: Block) { - system::execute_block(block) + system::execute_block(block); } fn initialize_block(header: &::Header) { @@ -580,6 +685,7 @@ cfg_if! { fn validate_transaction( _source: TransactionSource, utx: ::Extrinsic, + _: ::Hash, ) -> TransactionValidity { if let Extrinsic::IncludeData(data) = utx { return Ok(ValidTransaction { @@ -611,10 +717,6 @@ cfg_if! { fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { CheckInherentsResult::new() } - - fn random_seed() -> ::Hash { - unimplemented!() - } } impl self::TestAPI for Runtime { @@ -669,10 +771,6 @@ cfg_if! { system::get_block_number().expect("Block number is initialized") } - fn without_initialize_block() -> bool { - system::get_block_number().is_none() - } - fn take_block_number() -> Option { system::take_block_number() } @@ -704,13 +802,15 @@ cfg_if! { } fn do_trace_log() { - frame_support::debug::RuntimeLogger::init(); - frame_support::debug::trace!("Hey I'm runtime"); + log::trace!("Hey I'm runtime"); } } impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { 1000 } + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(1000) + } + fn authorities() -> Vec { system::authorities().into_iter().map(|a| { let authority: sr25519::Public = a.into(); @@ -727,13 +827,21 @@ cfg_if! { c: (3, 10), genesis_authorities: system::authorities() .into_iter().map(|x|(x, 1)).collect(), - randomness: >::randomness(), + randomness: >::randomness(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, } } - fn current_epoch_start() -> SlotNumber { - >::current_epoch_start() + fn current_epoch_start() -> Slot { + >::current_epoch_start() + } + + fn current_epoch() -> sp_consensus_babe::Epoch { + >::current_epoch() + } + + fn next_epoch() -> sp_consensus_babe::Epoch { + >::next_epoch() } fn submit_report_equivocation_unsigned_extrinsic( @@ -746,7 +854,7 @@ cfg_if! { } fn generate_key_ownership_proof( - _slot_number: sp_consensus_babe::SlotNumber, + _slot: sp_consensus_babe::Slot, _authority_id: sp_consensus_babe::AuthorityId, ) -> Option { None @@ -777,6 +885,10 @@ cfg_if! { Vec::new() } + fn current_set_id() -> sp_finality_grandpa::SetId { + 0 + } + fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: sp_finality_grandpa::EquivocationProof< ::Hash, @@ -809,7 +921,7 @@ cfg_if! { } fn execute_block(block: Block) { - system::execute_block(block) + system::execute_block(block); } fn initialize_block(header: &::Header) { @@ -827,6 +939,7 @@ cfg_if! { fn validate_transaction( _source: TransactionSource, utx: ::Extrinsic, + _: ::Hash, ) -> TransactionValidity { if let Extrinsic::IncludeData(data) = utx { return Ok(ValidTransaction{ @@ -858,10 +971,6 @@ cfg_if! { fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { CheckInherentsResult::new() } - - fn random_seed() -> ::Hash { - unimplemented!() - } } impl self::TestAPI for Runtime { @@ -920,10 +1029,6 @@ cfg_if! { system::get_block_number().expect("Block number is initialized") } - fn without_initialize_block() -> bool { - system::get_block_number().is_none() - } - fn take_block_number() -> Option { system::take_block_number() } @@ -955,13 +1060,15 @@ cfg_if! { } fn do_trace_log() { - frame_support::debug::RuntimeLogger::init(); - frame_support::debug::trace!("Hey I'm runtime"); + log::trace!("Hey I'm runtime: {}", log::STATIC_MAX_LEVEL); } } impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { 1000 } + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(1000) + } + fn authorities() -> Vec { system::authorities().into_iter().map(|a| { let authority: sr25519::Public = a.into(); @@ -978,13 +1085,21 @@ cfg_if! { c: (3, 10), genesis_authorities: system::authorities() .into_iter().map(|x|(x, 1)).collect(), - randomness: >::randomness(), + randomness: >::randomness(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, } } - fn current_epoch_start() -> SlotNumber { - >::current_epoch_start() + fn current_epoch_start() -> Slot { + >::current_epoch_start() + } + + fn current_epoch() -> sp_consensus_babe::Epoch { + >::current_epoch() + } + + fn next_epoch() -> sp_consensus_babe::Epoch { + >::next_epoch() } fn submit_report_equivocation_unsigned_extrinsic( @@ -997,7 +1112,7 @@ cfg_if! { } fn generate_key_ownership_proof( - _slot_number: sp_consensus_babe::SlotNumber, + _slot: sp_consensus_babe::Slot, _authority_id: sp_consensus_babe::AuthorityId, ) -> Option { None @@ -1096,29 +1211,15 @@ fn test_read_storage() { fn test_read_child_storage() { const STORAGE_KEY: &[u8] = b"unique_id_1"; const KEY: &[u8] = b":read_child_storage"; - sp_io::default_child_storage::set( - STORAGE_KEY, - KEY, - b"test", - ); + sp_io::default_child_storage::set(STORAGE_KEY, KEY, b"test"); let mut v = [0u8; 4]; - let r = sp_io::default_child_storage::read( - STORAGE_KEY, - KEY, - &mut v, - 0, - ); + let r = sp_io::default_child_storage::read(STORAGE_KEY, KEY, &mut v, 0); assert_eq!(r, Some(4)); assert_eq!(&v, b"test"); let mut v = [0u8; 4]; - let r = sp_io::default_child_storage::read( - STORAGE_KEY, - KEY, - &mut v, - 8, - ); + let r = sp_io::default_child_storage::read(STORAGE_KEY, KEY, &mut v, 8); assert_eq!(r, Some(0)); assert_eq!(&v, &[0, 0, 0, 0]); } @@ -1126,18 +1227,11 @@ fn test_read_child_storage() { fn test_witness(proof: StorageProof, root: crate::Hash) { use sp_externalities::Externalities; let db: sp_trie::MemoryDB = proof.into_memory_db(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new( - db, - root, - ); + let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); let mut overlay = sp_state_machine::OverlayedChanges::default(); - #[cfg(feature = "std")] - let mut offchain_overlay = Default::default(); let mut cache = sp_state_machine::StorageTransactionCache::<_, _, BlockNumber>::default(); let mut ext = sp_state_machine::Ext::new( &mut overlay, - #[cfg(feature = "std")] - &mut offchain_overlay, &mut cache, &backend, #[cfg(feature = "std")] @@ -1153,18 +1247,16 @@ fn test_witness(proof: StorageProof, root: crate::Hash) { #[cfg(test)] mod tests { - use substrate_test_runtime_client::{ - prelude::*, - sp_consensus::BlockOrigin, - DefaultTestClientBuilderExt, TestClientBuilder, - runtime::TestAPI, - }; + use codec::Encode; + use sc_block_builder::BlockBuilderProvider; use sp_api::ProvideRuntimeApi; - use sp_runtime::generic::BlockId; + use sp_consensus::BlockOrigin; use sp_core::storage::well_known_keys::HEAP_PAGES; + use sp_runtime::generic::BlockId; use sp_state_machine::ExecutionStrategy; - use codec::Encode; - use sc_block_builder::BlockBuilderProvider; + use substrate_test_runtime_client::{ + prelude::*, runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, + }; #[test] fn heap_pages_is_respected() { @@ -1192,7 +1284,7 @@ mod tests { (BlockId::Hash(hash), block) }; - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); // Allocation of 1024k while having ~2048k should succeed. let ret = client.runtime_api().vec_with_capacity(&new_block_id, 1048576); @@ -1201,9 +1293,8 @@ mod tests { #[test] fn test_storage() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::Both) - .build(); + let client = + TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); @@ -1225,14 +1316,10 @@ mod tests { #[test] fn witness_backend_works() { let (db, root) = witness_backend(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new( - db, - root, - ); + let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap(); - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::Both) - .build(); + let client = + TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 818487a89e518..334569d055a0c 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,35 +18,37 @@ //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. -use sp_std::prelude::*; +use crate::{ + AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, +}; +use codec::{Decode, Encode, KeyedVec}; +use frame_support::{decl_module, decl_storage, storage}; +use frame_system::Config; +use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ - storage::root as storage_root, storage::changes_root as storage_changes_root, - hashing::blake2_256, trie, + hashing::blake2_256, + storage::{changes_root as storage_changes_root, root as storage_root}, + trie, }; -use frame_support::storage; -use frame_support::{decl_storage, decl_module}; use sp_runtime::{ - traits::Header as _, generic, ApplyExtrinsicResult, + generic, + traits::Header as _, transaction_validity::{ - TransactionValidity, ValidTransaction, InvalidTransaction, TransactionValidityError, + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, + ApplyExtrinsicResult, }; -use codec::{KeyedVec, Encode, Decode}; -use frame_system::Trait; -use crate::{ - AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest, AuthorityId -}; -use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; +use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } decl_storage! { - trait Store for Module as TestRuntime { + trait Store for Module as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option; @@ -107,11 +109,11 @@ pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } -pub fn execute_block(mut block: Block) { - execute_block_with_state_root_handler(&mut block, Mode::Verify); +pub fn execute_block(mut block: Block) -> Header { + execute_block_with_state_root_handler(&mut block, Mode::Verify) } -fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { +fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) -> Header { let header = &mut block.header; initialize_block(header); @@ -142,12 +144,14 @@ fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { "Transaction trie root must be valid.", ); } + + new_header } /// The block executor. pub struct BlockExecutor; -impl frame_executive::ExecuteBlock for BlockExecutor { +impl frame_support::traits::ExecuteBlock for BlockExecutor { fn execute_block(block: Block) { execute_block(block); } @@ -157,17 +161,17 @@ impl frame_executive::ExecuteBlock for BlockExecutor { /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { - return InvalidTransaction::Future.into(); + return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); @@ -179,19 +183,14 @@ pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { let provides = vec![encode(&tx.from, tx.nonce)]; - Ok(ValidTransaction { - priority: tx.amount, - requires, - provides, - longevity: 64, - propagate: true, - }) + Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { - let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); + let extrinsic_index: u32 = + storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap_or_default(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); @@ -212,8 +211,8 @@ pub fn finalize_block() -> Header { // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. - let storage_root = Hash::decode(&mut &storage_root()[..]) - .expect("`storage_root` is a valid hash"); + let storage_root = + Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); @@ -228,17 +227,11 @@ pub fn finalize_block() -> Header { if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( - generic::ChangesTrieSignal::NewConfiguration(new_config) + generic::ChangesTrieSignal::NewConfiguration(new_config), )); } - Header { - number, - extrinsics_root, - state_root: storage_root, - parent_hash, - digest, - } + Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[inline(always)] @@ -250,17 +243,25 @@ fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { - Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => + Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } + if extrinsic_index != 0 => Err(InvalidTransaction::ExhaustsResources.into()), - Extrinsic::Transfer { ref transfer, .. } => - execute_transfer_backend(transfer), - Extrinsic::AuthoritiesChange(ref new_auth) => - execute_new_authorities_backend(new_auth), + Extrinsic::Transfer { ref transfer, .. } => execute_transfer_backend(transfer), + Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), + Extrinsic::OffchainIndexSet(key, value) => { + sp_io::offchain_index::set(&key, &value); + Ok(Ok(())) + }, + Extrinsic::OffchainIndexClear(key) => { + sp_io::offchain_index::clear(&key); + Ok(Ok(())) + }, + Extrinsic::Store(data) => execute_store(data.clone()), } } @@ -269,7 +270,7 @@ fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if !(tx.nonce == expected_nonce) { - return Err(InvalidTransaction::Stale.into()); + return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage @@ -281,7 +282,7 @@ fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // enact transfer if !(tx.amount <= from_balance) { - return Err(InvalidTransaction::Payment.into()); + return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); @@ -290,6 +291,13 @@ fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { Ok(Ok(())) } +fn execute_store(data: Vec) -> ApplyExtrinsicResult { + let content_hash = sp_io::hashing::blake2_256(&data); + let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); + sp_io::transaction_index::index(extrinsic_index, data.len() as u32, content_hash); + Ok(Ok(())) +} + fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) @@ -303,12 +311,12 @@ fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicRes Ok(Ok(())) } -fn execute_changes_trie_config_update(new_config: Option) -> ApplyExtrinsicResult { +fn execute_changes_trie_config_update( + new_config: Option, +) -> ApplyExtrinsicResult { match new_config.clone() { - Some(new_config) => storage::unhashed::put_raw( - well_known_keys::CHANGES_TRIE_CONFIG, - &new_config.encode(), - ), + Some(new_config) => + storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } ::put(new_config); @@ -340,29 +348,40 @@ fn info_expect_equal_hash(given: &Hash, expected: &Hash) { mod tests { use super::*; - use sp_io::TestExternalities; + use crate::{wasm_binary_unwrap, Header, Transfer}; + use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod}; + use sp_core::{ + map, + traits::{CodeExecutor, RuntimeCode}, + NeverNativeValue, + }; + use sp_io::{hashing::twox_128, TestExternalities}; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; - use crate::{Header, Transfer, wasm_binary_unwrap}; - use sp_core::{NeverNativeValue, map, traits::{CodeExecutor, RuntimeCode}}; - use sc_executor::{NativeExecutor, WasmExecutionMethod, native_executor_instance}; - use sp_io::hashing::twox_128; // Declare an instance of the native executor dispatch for the test runtime. - native_executor_instance!( - NativeDispatch, - crate::api::dispatch, - crate::native_version - ); - - fn executor() -> NativeExecutor { - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) + pub struct NativeDispatch; + + impl sc_executor::NativeExecutionDispatch for NativeDispatch { + type ExtendHostFunctions = (); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + crate::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + crate::native_version() + } + } + + fn executor() -> NativeElseWasmExecutor { + NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } fn new_test_ext() -> TestExternalities { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), - Sr25519Keyring::Charlie.to_raw_public() + Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), @@ -379,7 +398,10 @@ mod tests { ) } - fn block_import_works(block_executor: F) where F: Fn(Block, &mut TestExternalities) { + fn block_import_works(block_executor: F) + where + F: Fn(Block, &mut TestExternalities), + { let h = Header { parent_hash: [69u8; 32].into(), number: 1, @@ -387,10 +409,7 @@ mod tests { extrinsics_root: Default::default(), digest: Default::default(), }; - let mut b = Block { - header: h, - extrinsics: vec![], - }; + let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); @@ -399,7 +418,11 @@ mod tests { #[test] fn block_import_works_native() { - block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); + block_import_works(|b, ext| { + ext.execute_with(|| { + execute_block(b); + }) + }); } #[test] @@ -412,19 +435,23 @@ mod tests { heap_pages: None, }; - executor().call:: _>( - &mut ext, - &runtime_code, - "Core_execute_block", - &b.encode(), - false, - None, - ).0.unwrap(); + executor() + .call:: _>( + &mut ext, + &runtime_code, + "Core_execute_block", + &b.encode(), + false, + None, + ) + .0 + .unwrap(); }) } fn block_import_with_transaction_works(block_executor: F) - where F: Fn(Block, &mut TestExternalities) + where + F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { @@ -434,14 +461,13 @@ mod tests { extrinsics_root: Default::default(), digest: Default::default(), }, - extrinsics: vec![ - Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), - amount: 69, - nonce: 0, - }.into_signed_tx() - ], + extrinsics: vec![Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 69, + nonce: 0, + } + .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); @@ -461,13 +487,15 @@ mod tests { to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, - }.into_signed_tx(), + } + .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, - }.into_signed_tx(), + } + .into_signed_tx(), ], }; @@ -499,7 +527,11 @@ mod tests { #[test] fn block_import_with_transaction_works_native() { - block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); + block_import_with_transaction_works(|b, ext| { + ext.execute_with(|| { + execute_block(b); + }) + }); } #[test] @@ -512,14 +544,17 @@ mod tests { heap_pages: None, }; - executor().call:: _>( - &mut ext, - &runtime_code, - "Core_execute_block", - &b.encode(), - false, - None, - ).0.unwrap(); + executor() + .call:: _>( + &mut ext, + &runtime_code, + "Core_execute_block", + &b.encode(), + false, + None, + ) + .0 + .unwrap(); }) } } diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index a37477fdae58f..09839ebae6ffe 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -13,11 +13,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../client" } -parking_lot = "0.10.0" -codec = { package = "parity-scale-codec", version = "1.3.1" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-transaction-graph = { version = "2.0.0", path = "../../../client/transaction-pool/graph" } -futures = { version = "0.3.1", features = ["compat"] } +parking_lot = "0.11.1" +codec = { package = "parity-scale-codec", version = "2.0.0" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool", features = ["test-helpers"] } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } +futures = "0.3.16" derive_more = "0.99.2" diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index f772ba9b02d5c..d0cd50394c533 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,29 +20,29 @@ //! See [`TestApi`] for more information. use codec::Encode; +use futures::future::ready; use parking_lot::RwLock; +use sp_blockchain::CachedHeaderMetadata; use sp_runtime::{ generic::{self, BlockId}, - traits::{BlakeTwo256, Hash as HashT, Block as _, Header as _}, + traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as _}, transaction_validity::{ - TransactionValidity, ValidTransaction, TransactionValidityError, InvalidTransaction, - TransactionSource, + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + ValidTransaction, }, }; -use std::collections::{HashSet, HashMap, BTreeMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use substrate_test_runtime_client::{ - runtime::{Index, AccountId, Block, BlockNumber, Extrinsic, Hash, Header, Transfer}, + runtime::{AccountId, Block, BlockNumber, Extrinsic, Hash, Header, Index, Transfer}, AccountKeyring::{self, *}, }; -use sp_blockchain::CachedHeaderMetadata; -use futures::future::ready; /// Error type used by [`TestApi`]. #[derive(Debug, derive_more::From, derive_more::Display)] -pub struct Error(sp_transaction_pool::error::Error); +pub struct Error(sc_transaction_pool_api::error::Error); -impl sp_transaction_pool::error::IntoPoolError for Error { - fn into_pool_error(self) -> Result { +impl sc_transaction_pool_api::error::IntoPoolError for Error { + fn into_pool_error(self) -> Result { Ok(self.0) } } @@ -130,12 +130,9 @@ impl TestApi { block_number .checked_sub(1) .and_then(|num| { - chain.block_by_number - .get(&num) - .map(|blocks| { - blocks[0].0.header.hash() - }) - }).unwrap_or_default() + chain.block_by_number.get(&num).map(|blocks| blocks[0].0.header.hash()) + }) + .unwrap_or_default() }; self.push_block_with_parent(parent_hash, xts, is_best_block) @@ -154,7 +151,9 @@ impl TestApi { let block_number = if parent == Hash::default() { 0 } else { - *self.chain.read() + *self + .chain + .read() .block_by_hash .get(&parent) .expect("`parent` exists") @@ -182,7 +181,11 @@ impl TestApi { let mut chain = self.chain.write(); chain.block_by_hash.insert(hash, block.clone()); - chain.block_by_number.entry(block_number).or_default().push((block, is_best_block.into())); + chain + .block_by_number + .entry(block_number) + .or_default() + .push((block, is_best_block.into())); } fn hash_and_length_inner(ex: &Extrinsic) -> (Hash, usize) { @@ -195,9 +198,7 @@ impl TestApi { /// Next time transaction pool will try to validate this /// extrinsic, api will return invalid result. pub fn add_invalid(&self, xts: &Extrinsic) { - self.chain.write().invalid_hashes.insert( - Self::hash_and_length_inner(xts).0 - ); + self.chain.write().invalid_hashes.insert(Self::hash_and_length_inner(xts).0); } /// Query validation requests received. @@ -226,7 +227,7 @@ impl TestApi { } } -impl sc_transaction_graph::ChainApi for TestApi { +impl sc_transaction_pool::test_helpers::ChainApi for TestApi { type Block = Block; type Error = Error; type ValidationFuture = futures::future::Ready>; @@ -236,13 +237,14 @@ impl sc_transaction_graph::ChainApi for TestApi { &self, at: &BlockId, _source: TransactionSource, - uxt: sc_transaction_graph::ExtrinsicFor, + uxt: sc_transaction_pool::test_helpers::ExtrinsicFor, ) -> Self::ValidationFuture { self.validation_requests.write().push(uxt.clone()); match self.block_id_to_number(at) { Ok(Some(number)) => { - let found_best = self.chain + let found_best = self + .chain .read() .block_by_number .get(&number) @@ -253,24 +255,24 @@ impl sc_transaction_graph::ChainApi for TestApi { // the transaction. (This is not required for this test function, but in real // environment it would fail because of this). if !found_best { - return ready(Ok( - Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(1)).into()) - )) + return ready(Ok(Err(TransactionValidityError::Invalid( + InvalidTransaction::Custom(1), + ) + .into()))) } }, - Ok(None) => return ready(Ok( - Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(2)).into()) - )), + Ok(None) => + return ready(Ok(Err(TransactionValidityError::Invalid( + InvalidTransaction::Custom(2), + ) + .into()))), Err(e) => return ready(Err(e)), } let (requires, provides) = if let Some(transfer) = uxt.try_transfer() { let chain_nonce = self.chain.read().nonces.get(&transfer.from).cloned().unwrap_or(0); - let requires = if chain_nonce == transfer.nonce { - vec![] - } else { - vec![vec![chain_nonce as u8]] - }; + let requires = + if chain_nonce == transfer.nonce { vec![] } else { vec![vec![chain_nonce as u8]] }; let provides = vec![vec![transfer.nonce as u8]]; (requires, provides) @@ -279,18 +281,13 @@ impl sc_transaction_graph::ChainApi for TestApi { }; if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { - return ready(Ok( - Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0)).into()) - )) + return ready(Ok(Err( + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)).into() + ))) } - let mut validity = ValidTransaction { - priority: 1, - requires, - provides, - longevity: 64, - propagate: true, - }; + let mut validity = + ValidTransaction { priority: 1, requires, provides, longevity: 64, propagate: true }; (self.valid_modifier.read())(&mut validity); @@ -300,13 +297,10 @@ impl sc_transaction_graph::ChainApi for TestApi { fn block_id_to_number( &self, at: &BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { Ok(match at { - generic::BlockId::Hash(x) => self.chain - .read() - .block_by_hash - .get(x) - .map(|b| *b.header.number()), + generic::BlockId::Hash(x) => + self.chain.read().block_by_hash.get(x).map(|b| *b.header.number()), generic::BlockId::Number(num) => Some(*num), }) } @@ -314,58 +308,56 @@ impl sc_transaction_graph::ChainApi for TestApi { fn block_id_to_hash( &self, at: &BlockId, - ) -> Result>, Error> { + ) -> Result>, Error> { Ok(match at { generic::BlockId::Hash(x) => Some(x.clone()), - generic::BlockId::Number(num) => self.chain - .read() - .block_by_number - .get(num) - .and_then(|blocks| blocks.iter().find(|b| b.1.is_best()).map(|b| b.0.header().hash())), + generic::BlockId::Number(num) => + self.chain.read().block_by_number.get(num).and_then(|blocks| { + blocks.iter().find(|b| b.1.is_best()).map(|b| b.0.header().hash()) + }), }) } fn hash_and_length( &self, - ex: &sc_transaction_graph::ExtrinsicFor, + ex: &sc_transaction_pool::test_helpers::ExtrinsicFor, ) -> (Hash, usize) { Self::hash_and_length_inner(ex) } fn block_body(&self, id: &BlockId) -> Self::BodyFuture { futures::future::ready(Ok(match id { - BlockId::Number(num) => self.chain - .read() - .block_by_number - .get(num) - .map(|b| b[0].0.extrinsics().to_vec()), - BlockId::Hash(hash) => self.chain - .read() - .block_by_hash - .get(hash) - .map(|b| b.extrinsics().to_vec()), + BlockId::Number(num) => + self.chain.read().block_by_number.get(num).map(|b| b[0].0.extrinsics().to_vec()), + BlockId::Hash(hash) => + self.chain.read().block_by_hash.get(hash).map(|b| b.extrinsics().to_vec()), })) } + + fn block_header( + &self, + at: &BlockId, + ) -> Result::Header>, Self::Error> { + Ok(match at { + BlockId::Number(num) => + self.chain.read().block_by_number.get(num).map(|b| b[0].0.header().clone()), + BlockId::Hash(hash) => + self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()), + }) + } } impl sp_blockchain::HeaderMetadata for TestApi { type Error = Error; - fn header_metadata( - &self, - hash: Hash, - ) -> Result, Self::Error> { + fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { let chain = self.chain.read(); let block = chain.block_by_hash.get(&hash).expect("Hash exists"); Ok(block.header().into()) } - fn insert_header_metadata( - &self, - _: Hash, - _: CachedHeaderMetadata, - ) { + fn insert_header_metadata(&self, _: Hash, _: CachedHeaderMetadata) { unimplemented!("Not implemented for tests") } @@ -378,12 +370,7 @@ impl sp_blockchain::HeaderMetadata for TestApi { /// /// Part of the test api. pub fn uxt(who: AccountKeyring, nonce: Index) -> Extrinsic { - let transfer = Transfer { - from: who.into(), - to: AccountId::default(), - nonce, - amount: 1, - }; + let transfer = Transfer { from: who.into(), to: AccountId::default(), nonce, amount: 1 }; let signature = transfer.using_encoded(|e| who.sign(e)).into(); Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first: false } } diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index 224eacd5129e3..b68994926533a 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,21 +19,18 @@ #[doc(hidden)] pub use futures; -/// Marks async function to be executed by an async runtime and provide a `TaskExecutor`, suitable -/// to test environment. +/// Marks async function to be executed by an async runtime suitable to test environment. /// /// # Requirements /// -/// You must have tokio in the `[dev-dependencies]` of your crate to use this macro. +/// You must have tokio in the `[dev-dependencies]` of your crate to use this macro. /// /// # Example /// /// ``` /// #[substrate_test_utils::test] -/// async fn basic_test(task_executor: TaskExecutor) { +/// async fn basic_test() { /// assert!(true); -/// // create your node in here and use task_executor -/// // then don't forget to gracefully shutdown your node before exit /// } /// ``` pub use substrate_test_utils_derive::test; @@ -64,7 +61,7 @@ macro_rules! assert_eq_uvec { ( $x:expr, $y:expr $(,)? ) => { $crate::__assert_eq_uvec!($x, $y); $crate::__assert_eq_uvec!($y, $x); - } + }; } #[macro_export] @@ -72,7 +69,9 @@ macro_rules! assert_eq_uvec { macro_rules! __assert_eq_uvec { ( $x:expr, $y:expr ) => { $x.iter().for_each(|e| { - if !$y.contains(e) { panic!(format!("vectors not equal: {:?} != {:?}", $x, $y)); } + if !$y.contains(e) { + panic!("vectors not equal: {:?} != {:?}", $x, $y); + } }); - } + }; } diff --git a/test-utils/test-crate/Cargo.toml b/test-utils/test-crate/Cargo.toml index 4e1273b25c993..fff39c3964ad8 100644 --- a/test-utils/test-crate/Cargo.toml +++ b/test-utils/test-crate/Cargo.toml @@ -12,6 +12,6 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -tokio = { version = "0.2.13", features = ["macros"] } -test-utils = { version = "2.0.0", path = "..", package = "substrate-test-utils" } -sc-service = { version = "0.8.0", path = "../../client/service" } +tokio = { version = "1.10", features = ["macros"] } +test-utils = { version = "4.0.0-dev", path = "..", package = "substrate-test-utils" } +sc-service = { version = "0.10.0-dev", path = "../../client/service" } diff --git a/test-utils/test-crate/src/main.rs b/test-utils/test-crate/src/main.rs index 209f29f76132d..554adcb884064 100644 --- a/test-utils/test-crate/src/main.rs +++ b/test-utils/test-crate/src/main.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -18,7 +18,7 @@ #[cfg(test)] #[test_utils::test] -async fn basic_test(_: sc_service::TaskExecutor) { +async fn basic_test() { assert!(true); } diff --git a/test-utils/test-runner/Cargo.toml b/test-utils/test-runner/Cargo.toml new file mode 100644 index 0000000000000..b5b115771b539 --- /dev/null +++ b/test-utils/test-runner/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "test-runner" +version = "0.9.0" +authors = ["Parity Technologies "] +edition = "2018" +publish = false + +[dependencies] +# client deps +sc-executor = { path = "../../client/executor" } +sc-service = { path = "../../client/service" } +sc-informant = { path = "../../client/informant" } +sc-network = { path = "../../client/network" } +sc-cli = { path = "../../client/cli" } +sc-basic-authorship = { path = "../../client/basic-authorship" } +sc-rpc = { path = "../../client/rpc" } +sc-transaction-pool = { path = "../../client/transaction-pool" } +grandpa = { package = "sc-finality-grandpa", path = "../../client/finality-grandpa" } +sp-finality-grandpa = { path = "../../primitives/finality-grandpa" } +sp-consensus-babe = { path = "../../primitives/consensus/babe" } +sc-consensus-babe = { path = "../../client/consensus/babe" } +sc-consensus = { path = "../../client/consensus/common" } +sc-transaction-pool-api = { path = "../../client/transaction-pool/api" } +sc-client-api = { path = "../../client/api" } +sc-rpc-server = { path = "../../client/rpc-servers" } +manual-seal = { package = "sc-consensus-manual-seal", path = "../../client/consensus/manual-seal" } + +# primitive deps +sp-core = { path = "../../primitives/core" } +sp-blockchain = { path = "../../primitives/blockchain" } +sp-block-builder = { path = "../../primitives/block-builder" } +sp-api = { path = "../../primitives/api" } +sp-transaction-pool = { path = "../../primitives/transaction-pool" } +sp-consensus = { path = "../../primitives/consensus/common" } +sp-runtime = { path = "../../primitives/runtime" } +sp-session = { path = "../../primitives/session" } +sp-offchain = { path = "../../primitives/offchain" } +sp-inherents = { path = "../../primitives/inherents" } +sp-keyring = { path = "../../primitives/keyring" } + +sp-externalities = { path = "../../primitives/externalities" } +sp-state-machine = { path = "../../primitives/state-machine" } +sp-wasm-interface = { path = "../../primitives/wasm-interface" } +sp-runtime-interface = { path = "../../primitives/runtime-interface" } + +# pallets +frame-system = { path = "../../frame/system" } + +log = "0.4.8" +futures = "0.3.16" +tokio = { version = "1.10", features = ["signal"] } +# Calling RPC +jsonrpc-core = "18.0" +num-traits = "0.2.14" diff --git a/test-utils/test-runner/src/client.rs b/test-utils/test-runner/src/client.rs new file mode 100644 index 0000000000000..f9ad980e162d1 --- /dev/null +++ b/test-utils/test-runner/src/client.rs @@ -0,0 +1,246 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +//! Client parts +use crate::{default_config, ChainInfo}; +use futures::channel::mpsc; +use jsonrpc_core::MetaIoHandler; +use manual_seal::{ + consensus::babe::{BabeConsensusDataProvider, SlotTimestampProvider}, + import_queue, + rpc::{ManualSeal, ManualSealApi}, + run_manual_seal, EngineCommand, ManualSealParams, +}; +use sc_client_api::backend::Backend; +use sc_executor::NativeElseWasmExecutor; +use sc_service::{ + build_network, new_full_parts, spawn_tasks, BuildNetworkParams, ChainSpec, Configuration, + SpawnTasksParams, TFullBackend, TFullClient, TaskManager, +}; +use sc_transaction_pool::BasicPool; +use sc_transaction_pool_api::TransactionPool; +use sp_api::{ApiExt, ConstructRuntimeApi, Core, Metadata}; +use sp_block_builder::BlockBuilder; +use sp_consensus_babe::BabeApi; +use sp_finality_grandpa::GrandpaApi; +use sp_keyring::sr25519::Keyring::Alice; +use sp_offchain::OffchainWorkerApi; +use sp_runtime::traits::{Block as BlockT, Header}; +use sp_session::SessionKeys; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; +use std::{str::FromStr, sync::Arc}; + +type ClientParts = ( + Arc>, + TaskManager, + Arc< + TFullClient< + ::Block, + ::RuntimeApi, + NativeElseWasmExecutor<::ExecutorDispatch>, + >, + >, + Arc< + dyn TransactionPool< + Block = ::Block, + Hash = <::Block as BlockT>::Hash, + Error = sc_transaction_pool::error::Error, + InPoolTransaction = sc_transaction_pool::Transaction< + <::Block as BlockT>::Hash, + <::Block as BlockT>::Extrinsic, + >, + >, + >, + mpsc::Sender::Block as BlockT>::Hash>>, + Arc::Block>>, +); + +/// Provide the config or chain spec for a given chain +pub enum ConfigOrChainSpec { + /// Configuration object + Config(Configuration), + /// Chain spec object + ChainSpec(Box, tokio::runtime::Handle), +} +/// Creates all the client parts you need for [`Node`](crate::node::Node) +pub fn client_parts( + config_or_chain_spec: ConfigOrChainSpec, +) -> Result, sc_service::Error> +where + T: ChainInfo + 'static, + >, + >>::RuntimeApi: Core + + Metadata + + OffchainWorkerApi + + SessionKeys + + TaggedTransactionQueue + + BlockBuilder + + BabeApi + + ApiExt as Backend>::State> + + GrandpaApi, + ::Call: From>, + <::Block as BlockT>::Hash: FromStr + Unpin, + <::Block as BlockT>::Header: Unpin, + <<::Block as BlockT>::Header as Header>::Number: + num_traits::cast::AsPrimitive, +{ + use sp_consensus_babe::AuthorityId; + let config = match config_or_chain_spec { + ConfigOrChainSpec::Config(config) => config, + ConfigOrChainSpec::ChainSpec(chain_spec, tokio_handle) => + default_config(tokio_handle, chain_spec), + }; + + let executor = NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let (client, backend, keystore, mut task_manager) = + new_full_parts::(&config, None, executor)?; + let client = Arc::new(client); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let (grandpa_block_import, ..) = grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + None, + )?; + + let slot_duration = sc_consensus_babe::Config::get_or_compute(&*client)?; + let (block_import, babe_link) = sc_consensus_babe::block_import( + slot_duration.clone(), + grandpa_block_import, + client.clone(), + )?; + + let consensus_data_provider = BabeConsensusDataProvider::new( + client.clone(), + keystore.sync_keystore(), + babe_link.epoch_changes().clone(), + vec![(AuthorityId::from(Alice.public()), 1000)], + ) + .expect("failed to create ConsensusDataProvider"); + + let import_queue = + import_queue(Box::new(block_import.clone()), &task_manager.spawn_essential_handle(), None); + + let transaction_pool = BasicPool::new_full( + config.transaction_pool.clone(), + true.into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let (network, system_rpc_tx, network_starter) = { + let params = BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + warp_sync: None, + }; + build_network(params)? + }; + + + // offchain workers + sc_service::build_offchain_workers( + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), + task_manager.ipfs_rt.clone(), + ); + + // Proposer object for block authorship. + let env = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + config.prometheus_registry(), + None, + ); + + // Channel for the rpc handler to communicate with the authorship task. + let (command_sink, commands_stream) = mpsc::channel(10); + + let rpc_sink = command_sink.clone(); + + let rpc_handlers = { + let params = SpawnTasksParams { + config, + client: client.clone(), + backend: backend.clone(), + task_manager: &mut task_manager, + keystore: keystore.sync_keystore(), + on_demand: None, + transaction_pool: transaction_pool.clone(), + rpc_extensions_builder: Box::new(move |_, _| { + let mut io = jsonrpc_core::IoHandler::default(); + io.extend_with(ManualSealApi::to_delegate(ManualSeal::new(rpc_sink.clone()))); + Ok(io) + }), + remote_blockchain: None, + network, + system_rpc_tx, + telemetry: None, + }; + spawn_tasks(params)? + }; + + let cloned_client = client.clone(); + let create_inherent_data_providers = Box::new(move |_, _| { + let client = cloned_client.clone(); + async move { + let timestamp = + SlotTimestampProvider::new(client.clone()).map_err(|err| format!("{:?}", err))?; + let babe = + sp_consensus_babe::inherents::InherentDataProvider::new(timestamp.slot().into()); + Ok((timestamp, babe)) + } + }); + + // Background authorship future. + let authorship_future = run_manual_seal(ManualSealParams { + block_import, + env, + client: client.clone(), + pool: transaction_pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: Some(Box::new(consensus_data_provider)), + create_inherent_data_providers, + }); + + // spawn the authorship task as an essential task. + task_manager.spawn_essential_handle().spawn("manual-seal", authorship_future); + + network_starter.start_network(); + let rpc_handler = rpc_handlers.io_handler(); + + Ok((rpc_handler, task_manager, client, transaction_pool, command_sink, backend)) +} diff --git a/test-utils/test-runner/src/host_functions.rs b/test-utils/test-runner/src/host_functions.rs new file mode 100644 index 0000000000000..731abfbb9db05 --- /dev/null +++ b/test-utils/test-runner/src/host_functions.rs @@ -0,0 +1,90 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Use this to override host functions. +/// eg +/// ```rust +/// use test_runner::override_host_functions; +/// pub struct SignatureVerificationOverride; +/// +/// impl sp_wasm_interface::HostFunctions for SignatureVerificationOverride { +/// fn host_functions() -> Vec<&'static dyn sp_wasm_interface::Function> { +/// override_host_functions!( +/// "ext_crypto_ecdsa_verify_version_1", EcdsaVerify, +/// ) +/// } +/// } +/// ``` +#[macro_export] +macro_rules! override_host_functions { + ($($fn_name:expr, $name:ident,)*) => {{ + let mut host_functions = vec![]; + $( + struct $name; + impl sp_wasm_interface::Function for $name { + fn name(&self) -> &str { + &$fn_name + } + + fn signature(&self) -> sp_wasm_interface::Signature { + sp_wasm_interface::Signature { + args: std::borrow::Cow::Owned(vec![ + sp_wasm_interface::ValueType::I32, + sp_wasm_interface::ValueType::I64, + sp_wasm_interface::ValueType::I32, + ]), + return_value: Some(sp_wasm_interface::ValueType::I32), + } + } + + fn execute( + &self, + context: &mut dyn sp_wasm_interface::FunctionContext, + _args: &mut dyn Iterator, + ) -> Result, String> { + ::into_ffi_value(true, context) + .map(sp_wasm_interface::IntoValue::into_value) + .map(Some) + } + } + host_functions.push(&$name as &'static dyn sp_wasm_interface::Function); + )* + host_functions + }}; +} + +/// Provides host functions that overrides runtime signature verification +/// to always return true. +pub struct SignatureVerificationOverride; + +impl sp_wasm_interface::HostFunctions for SignatureVerificationOverride { + fn host_functions() -> Vec<&'static dyn sp_wasm_interface::Function> { + override_host_functions!( + "ext_crypto_ecdsa_verify_version_1", + EcdsaVerify, + "ext_crypto_ecdsa_verify_version_2", + EcdsaVerifyV2, + "ext_crypto_ed25519_verify_version_1", + Ed25519Verify, + "ext_crypto_sr25519_verify_version_1", + Sr25519Verify, + "ext_crypto_sr25519_verify_version_2", + Sr25519VerifyV2, + ) + } +} diff --git a/test-utils/test-runner/src/lib.rs b/test-utils/test-runner/src/lib.rs new file mode 100644 index 0000000000000..ca2c518fd6926 --- /dev/null +++ b/test-utils/test-runner/src/lib.rs @@ -0,0 +1,309 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +#![deny(missing_docs, unused_extern_crates)] + +//! Test runner +//! # Substrate Test Runner +//! +//! Allows you to test +//!
+//! +//! - Migrations +//! - Runtime Upgrades +//! - Pallets and general runtime functionality. +//! +//! This works by running a full node with a Manual Seal-BABE™ hybrid consensus for block authoring. +//! +//!

Note

+//! The running node has no signature verification, which allows us author extrinsics for any +//! account on chain.
+//!
+//! +//!

How do I Use this?

+//! +//! +//! ```rust,ignore +//! use test_runner::{Node, ChainInfo, SignatureVerificationOverride, base_path, NodeConfig}; +//! use sc_finality_grandpa::GrandpaBlockImport; +//! use sc_service::{ +//! TFullBackend, TFullClient, Configuration, TaskManager, new_full_parts, BasePath, +//! DatabaseSource, KeepBlocks, TransactionStorageMode, ChainSpec, Role, +//! config::{NetworkConfiguration, KeystoreConfig}, +//! }; +//! use std::sync::Arc; +//! use sp_inherents::InherentDataProviders; +//! use sc_consensus_babe::BabeBlockImport; +//! use sp_keystore::SyncCryptoStorePtr; +//! use sp_keyring::sr25519::Keyring::{Alice, Bob}; +//! use node_cli::chain_spec::development_config; +//! use sp_consensus_babe::AuthorityId; +//! use manual_seal::{ConsensusDataProvider, consensus::babe::BabeConsensusDataProvider}; +//! use sp_runtime::{traits::IdentifyAccount, MultiSigner, generic::Era}; +//! use sc_executor::WasmExecutionMethod; +//! use sc_network::{multiaddr, config::TransportConfig}; +//! use sc_client_api::execution_extensions::ExecutionStrategies; +//! use sc_informant::OutputFormat; +//! use sp_api::TransactionFor; +//! +//! type BlockImport = BabeBlockImport>; +//! +//! pub struct ExecutorDispatch; +//! +//! impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { +//! type ExtendHostFunctions = SignatureVerificationOverride; +//! +//! fn dispatch(method: &str, data: &[u8]) -> Option> { +//! node_runtime::api::dispatch(method, data) +//! } +//! +//! fn native_version() -> sc_executor::NativeVersion { +//! node_runtime::native_version() +//! } +//! } +//! +//! struct Requirements; +//! +//! impl ChainInfo for Requirements { +//! /// Provide a Block type with an OpaqueExtrinsic +//! type Block = node_primitives::Block; +//! /// Provide an ExecutorDispatch type for the runtime +//! type ExecutorDispatch = ExecutorDispatch; +//! /// Provide the runtime itself +//! type Runtime = node_runtime::Runtime; +//! /// A touch of runtime api +//! type RuntimeApi = node_runtime::RuntimeApi; +//! /// A pinch of SelectChain implementation +//! type SelectChain = sc_consensus::LongestChain, Self::Block>; +//! /// A slice of concrete BlockImport type +//! type BlockImport = BlockImport< +//! Self::Block, +//! TFullBackend, +//! TFullClient>, +//! Self::SelectChain, +//! >; +//! /// and a dash of SignedExtensions +//! type SignedExtras = node_runtime::SignedExtra; +//! +//! /// Create your signed extras here. +//! fn signed_extras( +//! from: ::AccountId, +//! ) -> Self::SignedExtension { +//! let nonce = frame_system::Pallet::::account_nonce(from); +//! +//! ( +//! frame_system::CheckSpecVersion::::new(), +//! frame_system::CheckTxVersion::::new(), +//! frame_system::CheckGenesis::::new(), +//! frame_system::CheckMortality::::from(Era::Immortal), +//! frame_system::CheckNonce::::from(nonce), +//! frame_system::CheckWeight::::new(), +//! pallet_transaction_payment::ChargeTransactionPayment::::from(0), +//! ) +//! } +//! +//! /// The function signature tells you all you need to know. ;) +//! fn create_client_parts(config: &Configuration) -> Result< +//! ( +//! Arc>>, +//! Arc>, +//! KeyStorePtr, +//! TaskManager, +//! InherentDataProviders, +//! Option>, +//! Self::Block +//! >, +//! > +//! >>, +//! Self::SelectChain, +//! Self::BlockImport +//! ), +//! sc_service::Error +//! > { +//! let ( +//! client, +//! backend, +//! keystore, +//! task_manager, +//! ) = new_full_parts::>(config)?; +//! let client = Arc::new(client); +//! +//! let inherent_providers = InherentDataProviders::new(); +//! let select_chain = sc_consensus::LongestChain::new(backend.clone()); +//! +//! let (grandpa_block_import, ..) = +//! sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone())?; +//! +//! let (block_import, babe_link) = sc_consensus_babe::block_import( +//! sc_consensus_babe::Config::get_or_compute(&*client)?, +//! grandpa_block_import, +//! client.clone(), +//! )?; +//! +//! let consensus_data_provider = BabeConsensusDataProvider::new( +//! client.clone(), +//! keystore.clone(), +//! &inherent_providers, +//! babe_link.epoch_changes().clone(), +//! vec![(AuthorityId::from(Alice.public()), 1000)] +//! ) +//! .expect("failed to create ConsensusDataProvider"); +//! +//! Ok(( +//! client, +//! backend, +//! keystore, +//! task_manager, +//! inherent_providers, +//! Some(Box::new(consensus_data_provider)), +//! select_chain, +//! block_import +//! )) +//! } +//! +//! fn dispatch_with_root(call: ::Call, node: &mut Node) { +//! let alice = MultiSigner::from(Alice.public()).into_account(); +//! // for chains that support sudo, otherwise, you'd have to use pallet-democracy here. +//! let call = pallet_sudo::Call::sudo(Box::new(call)); +//! node.submit_extrinsic(call, alice); +//! node.seal_blocks(1); +//! } +//! } +//! +//! /// And now for the most basic test +//! +//! #[test] +//! fn simple_balances_test() { +//! // given +//! let config = NodeConfig { +//! execution_strategies: ExecutionStrategies { +//! syncing: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! importing: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! block_construction: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! offchain_worker: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! other: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! }, +//! chain_spec: Box::new(development_config()), +//! log_targets: vec![], +//! }; +//! let mut node = Node::::new(config).unwrap(); +//! +//! type Balances = pallet_balances::Pallet; +//! +//! let (alice, bob) = (Alice.pair(), Bob.pair()); +//! let (alice_account_id, bob_acount_id) = ( +//! MultiSigner::from(alice.public()).into_account(), +//! MultiSigner::from(bob.public()).into_account() +//! ); +//! +//! /// the function with_state allows us to read state, pretty cool right? :D +//! let old_balance = node.with_state(|| Balances::free_balance(alice_account_id.clone())); +//! +//! // 70 dots +//! let amount = 70_000_000_000_000; +//! +//! /// Send extrinsic in action. +//! node.submit_extrinsic(BalancesCall::transfer(bob_acount_id.clone(), amount), alice_account_id.clone()); +//! +//! /// Produce blocks in action, Powered by manual-seal™. +//! node.seal_blocks(1); +//! +//! /// we can check the new state :D +//! let new_balance = node.with_state(|| Balances::free_balance(alice_account_id)); +//! +//! /// we can now make assertions on how state has changed. +//! assert_eq!(old_balance + amount, new_balance); +//! } +//! ``` + +use sc_consensus::BlockImport; +use sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch}; +use sc_service::TFullClient; +use sp_api::{ConstructRuntimeApi, TransactionFor}; +use sp_consensus::SelectChain; +use sp_inherents::InherentDataProvider; +use sp_runtime::traits::{Block as BlockT, SignedExtension}; + +mod client; +mod host_functions; +mod node; +mod utils; + +pub use client::*; +pub use host_functions::*; +pub use node::*; +pub use utils::*; + +/// Wrapper trait for concrete type required by this testing framework. +pub trait ChainInfo: Sized { + /// Opaque block type + type Block: BlockT; + + /// ExecutorDispatch dispatch type + type ExecutorDispatch: NativeExecutionDispatch + 'static; + + /// Runtime + type Runtime: frame_system::Config; + + /// RuntimeApi + type RuntimeApi: Send + + Sync + + 'static + + ConstructRuntimeApi< + Self::Block, + TFullClient< + Self::Block, + Self::RuntimeApi, + NativeElseWasmExecutor, + >, + >; + + /// select chain type. + type SelectChain: SelectChain + 'static; + + /// Block import type. + type BlockImport: Send + + Sync + + Clone + + BlockImport< + Self::Block, + Error = sp_consensus::Error, + Transaction = TransactionFor< + TFullClient< + Self::Block, + Self::RuntimeApi, + NativeElseWasmExecutor, + >, + Self::Block, + >, + > + 'static; + + /// The signed extras required by the runtime + type SignedExtras: SignedExtension; + + /// The inherent data providers. + type InherentDataProviders: InherentDataProvider + 'static; + + /// Signed extras, this function is caled in an externalities provided environment. + fn signed_extras( + from: ::AccountId, + ) -> Self::SignedExtras; +} diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs new file mode 100644 index 0000000000000..9114013b747f7 --- /dev/null +++ b/test-utils/test-runner/src/node.rs @@ -0,0 +1,288 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; + +use crate::ChainInfo; +use futures::{ + channel::{mpsc, oneshot}, + FutureExt, SinkExt, +}; +use jsonrpc_core::MetaIoHandler; +use manual_seal::EngineCommand; +use sc_client_api::{ + backend::{self, Backend}, + CallExecutor, ExecutorProvider, +}; +use sc_executor::NativeElseWasmExecutor; +use sc_service::{TFullBackend, TFullCallExecutor, TFullClient, TaskManager}; +use sc_transaction_pool_api::TransactionPool; +use sp_api::{OverlayedChanges, StorageTransactionCache}; +use sp_blockchain::HeaderBackend; +use sp_core::ExecutionContext; +use sp_runtime::{ + generic::{BlockId, UncheckedExtrinsic}, + traits::{Block as BlockT, Extrinsic, Header, NumberFor}, + transaction_validity::TransactionSource, + MultiAddress, MultiSignature, +}; +use sp_state_machine::Ext; + +/// This holds a reference to a running node on another thread, +/// the node process is dropped when this struct is dropped +/// also holds logs from the process. +pub struct Node { + /// rpc handler for communicating with the node over rpc. + rpc_handler: Arc>, + /// handle to the running node. + task_manager: Option, + /// client instance + client: Arc>>, + /// transaction pool + pool: Arc< + dyn TransactionPool< + Block = ::Block, + Hash = <::Block as BlockT>::Hash, + Error = sc_transaction_pool::error::Error, + InPoolTransaction = sc_transaction_pool::Transaction< + <::Block as BlockT>::Hash, + <::Block as BlockT>::Extrinsic, + >, + >, + >, + /// channel to communicate with manual seal on. + manual_seal_command_sink: mpsc::Sender::Hash>>, + /// backend type. + backend: Arc>, + /// Block number at initialization of this Node. + initial_block_number: NumberFor, +} + +type EventRecord = frame_system::EventRecord< + ::Event, + ::Hash, +>; + +impl Node +where + T: ChainInfo, + <::Header as Header>::Number: From, +{ + /// Creates a new node. + pub fn new( + rpc_handler: Arc>, + task_manager: TaskManager, + client: Arc< + TFullClient>, + >, + pool: Arc< + dyn TransactionPool< + Block = ::Block, + Hash = <::Block as BlockT>::Hash, + Error = sc_transaction_pool::error::Error, + InPoolTransaction = sc_transaction_pool::Transaction< + <::Block as BlockT>::Hash, + <::Block as BlockT>::Extrinsic, + >, + >, + >, + command_sink: mpsc::Sender::Hash>>, + backend: Arc>, + ) -> Self { + Self { + rpc_handler, + task_manager: Some(task_manager), + client: client.clone(), + pool, + backend, + manual_seal_command_sink: command_sink, + initial_block_number: client.info().best_number, + } + } + + /// Returns a reference to the rpc handlers, use this to send rpc requests. + /// eg + /// ```ignore + /// let request = r#"{"jsonrpc":"2.0","method":"engine_createBlock","params": [true, true],"id":1}"#; + /// let response = node.rpc_handler() + /// .handle_request_sync(request, Default::default()); + /// ``` + pub fn rpc_handler( + &self, + ) -> Arc> { + self.rpc_handler.clone() + } + + /// Return a reference to the Client + pub fn client( + &self, + ) -> Arc>> { + self.client.clone() + } + + /// Return a reference to the pool. + pub fn pool( + &self, + ) -> Arc< + dyn TransactionPool< + Block = ::Block, + Hash = <::Block as BlockT>::Hash, + Error = sc_transaction_pool::error::Error, + InPoolTransaction = sc_transaction_pool::Transaction< + <::Block as BlockT>::Hash, + <::Block as BlockT>::Extrinsic, + >, + >, + > { + self.pool.clone() + } + + /// Executes closure in an externalities provided environment. + pub fn with_state(&self, closure: impl FnOnce() -> R) -> R + where + > as CallExecutor>::Error: + std::fmt::Debug, + { + let id = BlockId::Hash(self.client.info().best_hash); + let mut overlay = OverlayedChanges::default(); + let changes_trie = + backend::changes_tries_state_at_block(&id, self.backend.changes_trie_storage()) + .unwrap(); + let mut cache = StorageTransactionCache::< + T::Block, + as Backend>::State, + >::default(); + let mut extensions = self + .client + .execution_extensions() + .extensions(&id, ExecutionContext::BlockConstruction); + let state_backend = self + .backend + .state_at(id.clone()) + .expect(&format!("State at block {} not found", id)); + + let mut ext = Ext::new( + &mut overlay, + &mut cache, + &state_backend, + changes_trie.clone(), + Some(&mut extensions), + ); + sp_externalities::set_and_run_with_externalities(&mut ext, closure) + } + + /// submit some extrinsic to the node. if signer is None, will submit unsigned_extrinsic. + pub async fn submit_extrinsic( + &self, + call: impl Into<::Call>, + signer: Option<::AccountId>, + ) -> Result<::Hash, sc_transaction_pool::error::Error> + where + ::Extrinsic: From< + UncheckedExtrinsic< + MultiAddress< + ::AccountId, + ::Index, + >, + ::Call, + MultiSignature, + T::SignedExtras, + >, + >, + { + let signed_data = if let Some(signer) = signer { + let extra = self.with_state(|| T::signed_extras(signer.clone())); + Some((signer.into(), MultiSignature::Sr25519(Default::default()), extra)) + } else { + None + }; + let ext = UncheckedExtrinsic::< + MultiAddress< + ::AccountId, + ::Index, + >, + ::Call, + MultiSignature, + T::SignedExtras, + >::new(call.into(), signed_data) + .expect("UncheckedExtrinsic::new() always returns Some"); + let at = self.client.info().best_hash; + + self.pool + .submit_one(&BlockId::Hash(at), TransactionSource::Local, ext.into()) + .await + } + + /// Get the events of the most recently produced block + pub fn events(&self) -> Vec> { + self.with_state(|| frame_system::Pallet::::events()) + } + + /// Instructs manual seal to seal new, possibly empty blocks. + pub async fn seal_blocks(&self, num: usize) { + let mut sink = self.manual_seal_command_sink.clone(); + + for count in 0..num { + let (sender, future_block) = oneshot::channel(); + let future = sink.send(EngineCommand::SealNewBlock { + create_empty: true, + finalize: false, + parent_hash: None, + sender: Some(sender), + }); + + const ERROR: &'static str = "manual-seal authorship task is shutting down"; + future.await.expect(ERROR); + + match future_block.await.expect(ERROR) { + Ok(block) => { + log::info!("sealed {} (hash: {}) of {} blocks", count + 1, block.hash, num) + }, + Err(err) => { + log::error!("failed to seal block {} of {}, error: {:?}", count + 1, num, err) + }, + } + } + } + + /// Revert count number of blocks from the chain. + pub fn revert_blocks(&self, count: NumberFor) { + self.backend.revert(count, true).expect("Failed to revert blocks: "); + } + + /// so you've decided to run the test runner as a binary, use this to shutdown gracefully. + pub async fn until_shutdown(mut self) { + let manager = self.task_manager.take(); + if let Some(mut task_manager) = manager { + let task = task_manager.future().fuse(); + let signal = tokio::signal::ctrl_c(); + futures::pin_mut!(signal); + futures::future::select(task, signal).await; + // we don't really care whichever comes first. + task_manager.clean_shutdown().await + } + } +} + +impl Drop for Node { + fn drop(&mut self) { + // Revert all blocks added since creation of the node. + let diff = self.client.info().best_number - self.initial_block_number; + self.revert_blocks(diff); + } +} diff --git a/test-utils/test-runner/src/utils.rs b/test-utils/test-runner/src/utils.rs new file mode 100644 index 0000000000000..8e8c84e6b4f8a --- /dev/null +++ b/test-utils/test-runner/src/utils.rs @@ -0,0 +1,117 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sc_client_api::execution_extensions::ExecutionStrategies; +use sc_executor::WasmExecutionMethod; +use sc_informant::OutputFormat; +use sc_network::{ + config::{NetworkConfiguration, Role, TransportConfig}, + multiaddr, +}; +use sc_service::{ + config::KeystoreConfig, BasePath, ChainSpec, Configuration, DatabaseSource, KeepBlocks, + TransactionStorageMode, +}; +use sp_keyring::sr25519::Keyring::Alice; +use tokio::runtime::Handle; + +pub use sc_cli::build_runtime; + +/// Base db path gotten from env +pub fn base_path() -> BasePath { + if let Some(base) = std::env::var("DB_BASE_PATH").ok() { + BasePath::new(base) + } else { + BasePath::new_temp_dir().expect("couldn't create a temp dir") + } +} + +/// Produces a default configuration object, suitable for use with most set ups. +pub fn default_config(tokio_handle: Handle, mut chain_spec: Box) -> Configuration { + let base_path = base_path(); + let root_path = base_path.path().to_path_buf().join("chains").join(chain_spec.id()); + + let storage = chain_spec + .as_storage_builder() + .build_storage() + .expect("could not build storage"); + + chain_spec.set_storage(storage); + let key_seed = Alice.to_seed(); + + let mut network_config = NetworkConfiguration::new( + format!("Test Node for: {}", key_seed), + "network/test/0.1", + Default::default(), + None, + ); + let informant_output_format = OutputFormat { enable_color: false }; + network_config.allow_non_globals_in_dht = true; + + network_config.listen_addresses.push(multiaddr::Protocol::Memory(0).into()); + + network_config.transport = TransportConfig::MemoryOnly; + + Configuration { + impl_name: "test-node".to_string(), + impl_version: "0.1".to_string(), + role: Role::Authority, + tokio_handle, + transaction_pool: Default::default(), + network: network_config, + keystore: KeystoreConfig::Path { path: root_path.join("key"), password: None }, + database: DatabaseSource::RocksDb { path: root_path.join("db"), cache_size: 128 }, + state_cache_size: 16777216, + state_cache_child_ratio: None, + chain_spec, + wasm_method: WasmExecutionMethod::Interpreted, + execution_strategies: ExecutionStrategies { + syncing: sc_client_api::ExecutionStrategy::AlwaysWasm, + importing: sc_client_api::ExecutionStrategy::AlwaysWasm, + block_construction: sc_client_api::ExecutionStrategy::AlwaysWasm, + offchain_worker: sc_client_api::ExecutionStrategy::AlwaysWasm, + other: sc_client_api::ExecutionStrategy::AlwaysWasm, + }, + rpc_http: None, + rpc_ws: None, + rpc_ipc: None, + rpc_ws_max_connections: None, + rpc_cors: None, + rpc_methods: Default::default(), + rpc_max_payload: None, + prometheus_config: None, + telemetry_endpoints: None, + default_heap_pages: None, + offchain_worker: Default::default(), + force_authoring: false, + disable_grandpa: false, + dev_key_seed: Some(key_seed), + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 8, + announce_block: true, + base_path: Some(base_path), + wasm_runtime_overrides: None, + informant_output_format, + disable_log_reloading: false, + keystore_remote: None, + keep_blocks: KeepBlocks::All, + state_pruning: Default::default(), + transaction_storage: TransactionStorageMode::BlockBody, + } +} diff --git a/test-utils/tests/basic.rs b/test-utils/tests/basic.rs index 3e96bfe83d3a7..527ca3e365edb 100644 --- a/test-utils/tests/basic.rs +++ b/test-utils/tests/basic.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,39 +16,30 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_service::{TaskExecutor, TaskType}; - #[substrate_test_utils::test] -async fn basic_test(_: TaskExecutor) { +async fn basic_test() { assert!(true); } #[substrate_test_utils::test] #[should_panic(expected = "boo!")] -async fn panicking_test(_: TaskExecutor) { +async fn panicking_test() { panic!("boo!"); } -#[substrate_test_utils::test(max_threads = 2)] -async fn basic_test_with_args(_: TaskExecutor) { - assert!(true); -} - -#[substrate_test_utils::test] -async fn rename_argument(ex: TaskExecutor) { - let ex2 = ex.clone(); - ex2.spawn(Box::pin(async { () }), TaskType::Blocking); +#[substrate_test_utils::test(flavor = "multi_thread", worker_threads = 1)] +async fn basic_test_with_args() { assert!(true); } -#[substrate_test_utils::test] -#[should_panic(expected = "test took too long")] // NOTE: enable this test only after setting SUBSTRATE_TEST_TIMEOUT to a smaller value // // SUBSTRATE_TEST_TIMEOUT=1 cargo test -- --ignored timeout +#[substrate_test_utils::test] +#[should_panic(expected = "test took too long")] #[ignore] -async fn timeout(_: TaskExecutor) { - tokio::time::delay_for(std::time::Duration::from_secs( +async fn timeout() { + tokio::time::sleep(std::time::Duration::from_secs( std::env::var("SUBSTRATE_TEST_TIMEOUT") .expect("env var SUBSTRATE_TEST_TIMEOUT has been provided by the user") .parse::() diff --git a/test-utils/tests/ui.rs b/test-utils/tests/ui.rs index 1f3b466c7dd6e..119162fdc21b8 100644 --- a/test-utils/tests/ui.rs +++ b/test-utils/tests/ui.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -19,6 +19,5 @@ #[test] fn substrate_test_utils_derive_trybuild() { let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/missing-func-parameter.rs"); t.compile_fail("tests/ui/too-many-func-parameters.rs"); } diff --git a/test-utils/tests/ui/missing-func-parameter.stderr b/test-utils/tests/ui/missing-func-parameter.stderr deleted file mode 100644 index fbe0bc69918e8..0000000000000 --- a/test-utils/tests/ui/missing-func-parameter.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: the test function accepts only one argument of type sc_service::TaskExecutor - --> $DIR/missing-func-parameter.rs:20:1 - | -20 | async fn missing_func_parameter() { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/test-utils/tests/ui/too-many-func-parameters.rs b/test-utils/tests/ui/too-many-func-parameters.rs index 9aeadc2a88430..b1789b9d3ee7e 100644 --- a/test-utils/tests/ui/too-many-func-parameters.rs +++ b/test-utils/tests/ui/too-many-func-parameters.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,11 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[allow(unused_imports)] -use sc_service::TaskExecutor; - #[substrate_test_utils::test] -async fn too_many_func_parameters(task_executor_1: TaskExecutor, task_executor_2: TaskExecutor) { +async fn too_many_func_parameters(_: u32) { assert!(true); } diff --git a/test-utils/tests/ui/too-many-func-parameters.stderr b/test-utils/tests/ui/too-many-func-parameters.stderr index e30bb4ed8ee85..1b1630022e4f7 100644 --- a/test-utils/tests/ui/too-many-func-parameters.stderr +++ b/test-utils/tests/ui/too-many-func-parameters.stderr @@ -1,5 +1,5 @@ -error: the test function accepts only one argument of type sc_service::TaskExecutor - --> $DIR/too-many-func-parameters.rs:23:1 +error: No arguments expected for tests. + --> $DIR/too-many-func-parameters.rs:20:1 | -23 | async fn too_many_func_parameters(task_executor_1: TaskExecutor, task_executor_2: TaskExecutor) { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +20 | async fn too_many_func_parameters(_: u32) { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml deleted file mode 100644 index 06e626ef65ffa..0000000000000 --- a/utils/browser/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "substrate-browser-utils" -version = "0.8.0" -authors = ["Parity Technologies "] -description = "Utilities for creating a browser light-client." -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -futures = { version = "0.3", features = ["compat"] } -futures01 = { package = "futures", version = "0.1.29" } -log = "0.4.8" -libp2p-wasm-ext = { version = "0.22", features = ["websocket"] } -console_error_panic_hook = "0.1.6" -console_log = "0.1.2" -js-sys = "0.3.34" -wasm-bindgen = "0.2.57" -wasm-bindgen-futures = "0.4.7" -kvdb-web = "0.7" -sp-database = { version = "2.0.0", path = "../../primitives/database" } -sc-informant = { version = "0.8.0", path = "../../client/informant" } -sc-service = { version = "0.8.0", path = "../../client/service", default-features = false } -sc-network = { path = "../../client/network", version = "0.8.0"} -sc-chain-spec = { path = "../../client/chain-spec", version = "2.0.0"} - -# Imported just for the `wasm-bindgen` feature -rand6 = { package = "rand", version = "0.6", features = ["wasm-bindgen"] } -rand = { version = "0.7", features = ["wasm-bindgen"] } -futures-timer = { version = "3.0.1", features = ["wasm-bindgen"]} -chrono = { version = "0.4", features = ["wasmbind"] } diff --git a/utils/browser/README.md b/utils/browser/README.md deleted file mode 100644 index 9718db58b37e9..0000000000000 --- a/utils/browser/README.md +++ /dev/null @@ -1 +0,0 @@ -License: Apache-2.0 \ No newline at end of file diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs deleted file mode 100644 index f5d3faeb86a09..0000000000000 --- a/utils/browser/src/lib.rs +++ /dev/null @@ -1,204 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use futures01::sync::mpsc as mpsc01; -use log::{debug, info}; -use sc_network::config::TransportConfig; -use sc_service::{ - RpcSession, Role, Configuration, TaskManager, RpcHandlers, - config::{DatabaseConfig, KeystoreConfig, NetworkConfiguration}, - GenericChainSpec, RuntimeGenesis -}; -use wasm_bindgen::prelude::*; -use futures::{ - prelude::*, channel::{oneshot, mpsc}, compat::*, future::{ready, ok, select} -}; -use std::pin::Pin; -use sc_chain_spec::Extension; -use libp2p_wasm_ext::{ExtTransport, ffi}; - -pub use console_error_panic_hook::set_once as set_console_error_panic_hook; -pub use console_log::init_with_level as init_console_log; - -/// Create a service configuration from a chain spec. -/// -/// This configuration contains good defaults for a browser light client. -pub async fn browser_configuration(chain_spec: GenericChainSpec) - -> Result> -where - G: RuntimeGenesis + 'static, - E: Extension + 'static + Send + Sync, -{ - let name = chain_spec.name().to_string(); - - let transport = ExtTransport::new(ffi::websocket_transport()); - let mut network = NetworkConfiguration::new( - format!("{} (Browser)", name), - "unknown", - Default::default(), - None, - ); - network.boot_nodes = chain_spec.boot_nodes().to_vec(); - network.transport = TransportConfig::Normal { - wasm_external_transport: Some(transport.clone()), - allow_private_ipv4: true, - enable_mdns: false, - }; - - let config = Configuration { - network, - telemetry_endpoints: chain_spec.telemetry_endpoints().clone(), - chain_spec: Box::new(chain_spec), - task_executor: (|fut, _| { - wasm_bindgen_futures::spawn_local(fut); - async {} - }).into(), - telemetry_external_transport: Some(transport), - role: Role::Light, - database: { - info!("Opening Indexed DB database '{}'...", name); - let db = kvdb_web::Database::open(name, 10).await?; - - DatabaseConfig::Custom(sp_database::as_database(db)) - }, - keystore: KeystoreConfig::InMemory, - default_heap_pages: Default::default(), - dev_key_seed: Default::default(), - disable_grandpa: Default::default(), - execution_strategies: Default::default(), - force_authoring: Default::default(), - impl_name: String::from("parity-substrate"), - impl_version: String::from("0.0.0"), - offchain_worker: Default::default(), - prometheus_config: Default::default(), - pruning: Default::default(), - rpc_cors: Default::default(), - rpc_http: Default::default(), - rpc_ipc: Default::default(), - rpc_ws: Default::default(), - rpc_ws_max_connections: Default::default(), - rpc_methods: Default::default(), - state_cache_child_ratio: Default::default(), - state_cache_size: Default::default(), - tracing_receiver: Default::default(), - tracing_targets: Default::default(), - transaction_pool: Default::default(), - wasm_method: Default::default(), - max_runtime_instances: 8, - announce_block: true, - base_path: None, - informant_output_format: sc_informant::OutputFormat { - enable_color: false, - }, - }; - - Ok(config) -} - -/// A running client. -#[wasm_bindgen] -pub struct Client { - rpc_send_tx: mpsc::UnboundedSender, -} - -struct RpcMessage { - rpc_json: String, - session: RpcSession, - send_back: oneshot::Sender> + Send>>>, -} - -/// Create a Client object that connects to a service. -pub fn start_client(mut task_manager: TaskManager, rpc_handlers: RpcHandlers) -> Client { - // We dispatch a background task responsible for processing the service. - // - // The main action performed by the code below consists in polling the service with - // `service.poll()`. - // The rest consists in handling RPC requests. - let (rpc_send_tx, rpc_send_rx) = mpsc::unbounded::(); - wasm_bindgen_futures::spawn_local( - select( - rpc_send_rx.for_each(move |message| { - let fut = rpc_handlers.rpc_query(&message.session, &message.rpc_json); - let _ = message.send_back.send(fut); - ready(()) - }), - Box::pin(async move { - let _ = task_manager.future().await; - }), - ).map(drop) - ); - - Client { - rpc_send_tx, - } -} - -#[wasm_bindgen] -impl Client { - /// Allows starting an RPC request. Returns a `Promise` containing the result of that request. - #[wasm_bindgen(js_name = "rpcSend")] - pub fn rpc_send(&mut self, rpc: &str) -> js_sys::Promise { - let rpc_session = RpcSession::new(mpsc01::channel(1).0); - let (tx, rx) = oneshot::channel(); - let _ = self.rpc_send_tx.unbounded_send(RpcMessage { - rpc_json: rpc.to_owned(), - session: rpc_session, - send_back: tx, - }); - wasm_bindgen_futures::future_to_promise(async { - match rx.await { - Ok(fut) => { - fut.await - .map(|s| JsValue::from_str(&s)) - .ok_or_else(|| JsValue::NULL) - }, - Err(_) => Err(JsValue::NULL) - } - }) - } - - /// Subscribes to an RPC pubsub endpoint. - #[wasm_bindgen(js_name = "rpcSubscribe")] - pub fn rpc_subscribe(&mut self, rpc: &str, callback: js_sys::Function) { - let (tx, rx) = mpsc01::channel(4); - let rpc_session = RpcSession::new(tx); - let (fut_tx, fut_rx) = oneshot::channel(); - let _ = self.rpc_send_tx.unbounded_send(RpcMessage { - rpc_json: rpc.to_owned(), - session: rpc_session.clone(), - send_back: fut_tx, - }); - wasm_bindgen_futures::spawn_local(async { - if let Ok(fut) = fut_rx.await { - fut.await; - } - }); - - wasm_bindgen_futures::spawn_local(async move { - let _ = rx.compat() - .try_for_each(|s| { - let _ = callback.call1(&callback, &JsValue::from_str(&s)); - ok(()) - }) - .await; - - // We need to keep `rpc_session` alive. - debug!("RPC subscription has ended"); - drop(rpc_session); - }); - } -} diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index 30c8a4c52b657..fbef70db93bfd 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-build-script-utils" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,4 +13,4 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -platforms = "0.2.1" +platforms = "1.1" diff --git a/utils/build-script-utils/src/git.rs b/utils/build-script-utils/src/git.rs index 29c6a325fe7e9..66a15737f84ca 100644 --- a/utils/build-script-utils/src/git.rs +++ b/utils/build-script-utils/src/git.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,16 +33,16 @@ pub fn rerun_if_git_head_changed() { Err(err) => { eprintln!("cargo:warning=Unable to read the Git repository: {}", err); - return; - } - Ok(None) => {} + return + }, + Ok(None) => {}, Ok(Some(paths)) => { for p in paths { println!("cargo:rerun-if-changed={}", p.display()); } - return; - } + return + }, } manifest_dir.pop(); diff --git a/utils/build-script-utils/src/lib.rs b/utils/build-script-utils/src/lib.rs index 512e6dcaefda7..0c45c4b34ebe8 100644 --- a/utils/build-script-utils/src/lib.rs +++ b/utils/build-script-utils/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ //! Crate with utility functions for `build.rs` scripts. -mod version; mod git; +mod version; pub use git::*; pub use version::*; diff --git a/utils/build-script-utils/src/version.rs b/utils/build-script-utils/src/version.rs index 103fd5b1d24ac..52336eb0b6a24 100644 --- a/utils/build-script-utils/src/version.rs +++ b/utils/build-script-utils/src/version.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,15 +20,13 @@ use std::{borrow::Cow, process::Command}; /// Generate the `cargo:` key output pub fn generate_cargo_keys() { - let output = Command::new("git") - .args(&["rev-parse", "--short", "HEAD"]) - .output(); + let output = Command::new("git").args(&["rev-parse", "--short", "HEAD"]).output(); let commit = match output { Ok(o) if o.status.success() => { let sha = String::from_utf8_lossy(&o.stdout).trim().to_owned(); Cow::from(sha) - } + }, Ok(o) => { println!("cargo:warning=Git command failed with status: {}", o.status); Cow::from("unknown") diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index 23662722a1f6c..11c269bc3cba8 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fork-tree" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,4 +14,4 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 1d01c53417649..9143da89a77e0 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,9 +20,8 @@ #![warn(missing_docs)] -use std::cmp::Reverse; -use std::fmt; use codec::{Decode, Encode}; +use std::{cmp::Reverse, fmt}; /// Error occurred when iterating with the tree. #[derive(Clone, Debug, PartialEq)] @@ -83,7 +82,8 @@ pub struct ForkTree { best_finalized_number: Option, } -impl ForkTree where +impl ForkTree +where H: PartialEq + Clone, N: Ord + Clone, V: Clone, @@ -102,17 +102,14 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { - let new_root_index = self.find_node_index_where( - hash, - number, - is_descendent_of, - predicate, - )?; + let new_root_index = + self.find_node_index_where(hash, number, is_descendent_of, predicate)?; let removed = if let Some(mut root_index) = new_root_index { let mut old_roots = std::mem::take(&mut self.roots); @@ -130,9 +127,10 @@ impl ForkTree where } } - let mut root = root - .expect("find_node_index_where will return array with at least one index; \ - this results in at least one item in removed; qed"); + let mut root = root.expect( + "find_node_index_where will return array with at least one index; \ + this results in at least one item in removed; qed", + ); let mut removed = old_roots; @@ -144,11 +142,11 @@ impl ForkTree where for child in root_children { if is_first && (child.number == *number && child.hash == *hash || - child.number < *number && is_descendent_of(&child.hash, hash).unwrap_or(false)) + child.number < *number && is_descendent_of(&child.hash, hash)?) { root.children.push(child); - // assuming that the tree is well formed only one child should pass this requirement - // due to ancestry restrictions (i.e. they must be different forks). + // assuming that the tree is well formed only one child should pass this + // requirement due to ancestry restrictions (i.e. they must be different forks). is_first = false; } else { removed.push(child); @@ -168,16 +166,14 @@ impl ForkTree where } } -impl ForkTree where +impl ForkTree +where H: PartialEq, N: Ord, { /// Create a new empty tree. pub fn new() -> ForkTree { - ForkTree { - roots: Vec::new(), - best_finalized_number: None, - } + ForkTree { roots: Vec::new(), best_finalized_number: None } } /// Rebalance the tree, i.e. sort child nodes by max branch depth @@ -209,18 +205,19 @@ impl ForkTree where mut data: V, is_descendent_of: &F, ) -> Result> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } for root in self.roots.iter_mut() { if root.hash == hash { - return Err(Error::Duplicate); + return Err(Error::Duplicate) } match root.import(hash, number, data, is_descendent_of)? { @@ -229,16 +226,14 @@ impl ForkTree where number = n; data = d; }, - None => return Ok(false), + None => { + self.rebalance(); + return Ok(false) + }, } } - self.roots.push(Node { - data, - hash: hash, - number: number, - children: Vec::new(), - }); + self.roots.push(Node { data, hash, number, children: Vec::new() }); self.rebalance(); @@ -246,16 +241,18 @@ impl ForkTree where } /// Iterates over the existing roots in the tree. - pub fn roots(&self) -> impl Iterator { + pub fn roots(&self) -> impl Iterator { self.roots.iter().map(|node| (&node.hash, &node.number, &node.data)) } - fn node_iter(&self) -> impl Iterator> { - ForkTreeIterator { stack: self.roots.iter().collect() } + fn node_iter(&self) -> impl Iterator> { + // we need to reverse the order of roots to maintain the expected + // ordering since the iterator uses a stack to track state. + ForkTreeIterator { stack: self.roots.iter().rev().collect() } } /// Iterates the nodes in the tree in pre-order. - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.node_iter().map(|node| (&node.hash, &node.number, &node.data)) } @@ -269,7 +266,8 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result>, Error> where + ) -> Result>, Error> + where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, @@ -280,7 +278,7 @@ impl ForkTree where // found the node, early exit if let FindOutcome::Found(node) = node { - return Ok(Some(node)); + return Ok(Some(node)) } } @@ -288,23 +286,13 @@ impl ForkTree where } /// Map fork tree into values of new types. - pub fn map( - self, - f: &mut F, - ) -> ForkTree where + pub fn map(self, f: &mut F) -> ForkTree + where F: FnMut(&H, &N, V) -> VT, { - let roots = self.roots - .into_iter() - .map(|root| { - root.map(f) - }) - .collect(); - - ForkTree { - roots, - best_finalized_number: self.best_finalized_number, - } + let roots = self.roots.into_iter().map(|root| root.map(f)).collect(); + + ForkTree { roots, best_finalized_number: self.best_finalized_number } } /// Same as [`find_node_where`](ForkTree::find_node_where), but returns mutable reference. @@ -314,7 +302,8 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result>, Error> where + ) -> Result>, Error> + where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, @@ -325,7 +314,7 @@ impl ForkTree where // found the node, early exit if let FindOutcome::Found(node) = node { - return Ok(Some(node)); + return Ok(Some(node)) } } @@ -339,7 +328,8 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result>, Error> where + ) -> Result>, Error> + where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, @@ -351,7 +341,7 @@ impl ForkTree where // found the node, early exit if let FindOutcome::Found(mut node) = node { node.push(index); - return Ok(Some(node)); + return Ok(Some(node)) } } @@ -362,7 +352,9 @@ impl ForkTree where /// with the given hash exists. All other roots are pruned, and the children /// of the finalized node become the new roots. pub fn finalize_root(&mut self, hash: &H) -> Option { - self.roots.iter().position(|node| node.hash == *hash) + self.roots + .iter() + .position(|node| node.hash == *hash) .map(|position| self.finalize_root_at(position)) } @@ -371,7 +363,7 @@ impl ForkTree where let node = self.roots.swap_remove(position); self.roots = node.children; self.best_finalized_number = Some(node.number); - return node.data; + return node.data } /// Finalize a node in the tree. This method will make sure that the node @@ -385,24 +377,25 @@ impl ForkTree where number: N, is_descendent_of: &F, ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } // check if one of the current roots is being finalized if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))); + return Ok(FinalizationResult::Changed(Some(root))) } // make sure we're not finalizing a descendent of any root for root in self.roots.iter() { if number > root.number && is_descendent_of(&root.hash, hash)? { - return Err(Error::UnfinalizedAncestor); + return Err(Error::UnfinalizedAncestor) } } @@ -410,15 +403,15 @@ impl ForkTree where // another fork not part of the tree). make sure to only keep roots that // are part of the finalized branch let mut changed = false; - self.roots.retain(|root| { - let retain = root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); + let roots = std::mem::take(&mut self.roots); - if !retain { + for root in roots { + if root.number > number && is_descendent_of(hash, &root.hash)? { + self.roots.push(root); + } else { changed = true; } - - retain - }); + } self.best_finalized_number = Some(number); @@ -438,18 +431,19 @@ impl ForkTree where number: N, is_descendent_of: &F, ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } // check if one of the current roots is being finalized if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))); + return Ok(FinalizationResult::Changed(Some(root))) } // we need to: @@ -462,22 +456,23 @@ impl ForkTree where let (is_finalized, is_descendant, is_ancestor) = { let root = &self.roots[idx]; let is_finalized = root.hash == *hash; - let is_descendant = !is_finalized - && root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); - let is_ancestor = !is_finalized && !is_descendant - && root.number < number && is_descendent_of(&root.hash, hash).unwrap_or(false); + let is_descendant = + !is_finalized && root.number > number && is_descendent_of(hash, &root.hash)?; + let is_ancestor = !is_finalized && + !is_descendant && root.number < number && + is_descendent_of(&root.hash, hash)?; (is_finalized, is_descendant, is_ancestor) }; // if we have met finalized root - open it and return if is_finalized { - return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))); + return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))) } // if node is descendant of finalized block - just leave it as is if is_descendant { idx += 1; - continue; + continue } // if node is ancestor of finalized block - remove it and continue with children @@ -485,7 +480,7 @@ impl ForkTree where let root = self.roots.swap_remove(idx); self.roots.extend(root.children); changed = true; - continue; + continue } // if node is neither ancestor, nor descendant of the finalized block - remove it @@ -518,13 +513,14 @@ impl ForkTree where is_descendent_of: &F, predicate: P, ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } @@ -536,11 +532,11 @@ impl ForkTree where if node.hash == *hash || is_descendent_of(&node.hash, hash)? { for node in node.children.iter() { if node.number <= number && is_descendent_of(&node.hash, &hash)? { - return Err(Error::UnfinalizedAncestor); + return Err(Error::UnfinalizedAncestor) } } - return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))); + return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))) } } } @@ -562,13 +558,14 @@ impl ForkTree where is_descendent_of: &F, predicate: P, ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } @@ -581,12 +578,12 @@ impl ForkTree where if root.hash == *hash || is_descendent_of(&root.hash, hash)? { for node in root.children.iter() { if node.number <= number && is_descendent_of(&node.hash, &hash)? { - return Err(Error::UnfinalizedAncestor); + return Err(Error::UnfinalizedAncestor) } } position = Some(i); - break; + break } } } @@ -605,18 +602,19 @@ impl ForkTree where // descendent (in this case the node wasn't finalized earlier presumably // because the predicate didn't pass). let mut changed = false; - self.roots.retain(|root| { - let retain = - root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false) || + let roots = std::mem::take(&mut self.roots); + + for root in roots { + let retain = root.number > number && is_descendent_of(hash, &root.hash)? || root.number == number && root.hash == *hash || - is_descendent_of(&root.hash, hash).unwrap_or(false); + is_descendent_of(&root.hash, hash)?; - if !retain { + if retain { + self.roots.push(root); + } else { changed = true; } - - retain - }); + } self.best_finalized_number = Some(number); @@ -672,26 +670,14 @@ mod node_implementation { } /// Map node data into values of new types. - pub fn map( - self, - f: &mut F, - ) -> Node where + pub fn map(self, f: &mut F) -> Node + where F: FnMut(&H, &N, V) -> VT, { - let children = self.children - .into_iter() - .map(|node| { - node.map(f) - }) - .collect(); + let children = self.children.into_iter().map(|node| node.map(f)).collect(); let vt = f(&self.hash, &self.number, self.data); - Node { - hash: self.hash, - number: self.number, - data: vt, - children, - } + Node { hash: self.hash, number: self.number, data: vt, children } } pub fn import( @@ -701,14 +687,17 @@ mod node_implementation { mut data: V, is_descendent_of: &F, ) -> Result, Error> - where E: fmt::Debug, - F: Fn(&H, &H) -> Result, + where + E: fmt::Debug, + F: Fn(&H, &H) -> Result, { if self.hash == hash { - return Err(Error::Duplicate); + return Err(Error::Duplicate) }; - if number <= self.number { return Ok(Some((hash, number, data))); } + if number <= self.number { + return Ok(Some((hash, number, data))) + } for node in self.children.iter_mut() { match node.import(hash, number, data, is_descendent_of)? { @@ -722,12 +711,7 @@ mod node_implementation { } if is_descendent_of(&self.hash, &hash)? { - self.children.push(Node { - data, - hash: hash, - number: number, - children: Vec::new(), - }); + self.children.push(Node { data, hash, number, children: Vec::new() }); Ok(None) } else { @@ -751,13 +735,14 @@ mod node_implementation { is_descendent_of: &F, predicate: &P, ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { // stop searching this branch if *number < self.number { - return Ok(FindOutcome::Failure(false)); + return Ok(FindOutcome::Failure(false)) } let mut known_descendent_of = false; @@ -776,7 +761,7 @@ mod node_implementation { // then it cannot be a descendent of any others, // so we don't search them. known_descendent_of = true; - break; + break }, FindOutcome::Failure(false) => {}, } @@ -790,7 +775,7 @@ mod node_implementation { if is_descendent_of { // if the predicate passes we return the node if predicate(&self.data) { - return Ok(FindOutcome::Found(Vec::new())); + return Ok(FindOutcome::Found(Vec::new())) } } @@ -811,9 +796,10 @@ mod node_implementation { is_descendent_of: &F, predicate: &P, ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; @@ -843,9 +829,10 @@ mod node_implementation { is_descendent_of: &F, predicate: &P, ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; @@ -866,7 +853,7 @@ mod node_implementation { } // Workaround for: https://github.com/rust-lang/rust/issues/34537 -use node_implementation::{Node, FindOutcome}; +use node_implementation::{FindOutcome, Node}; struct ForkTreeIterator<'a, H, N, V> { stack: Vec<&'a Node>, @@ -898,8 +885,7 @@ impl Iterator for RemovedIterator { // child nodes are stored ordered by max branch height (decreasing), // we want to keep this ordering while iterating but since we're // using a stack for iterator state we need to reverse it. - let mut children = Vec::new(); - std::mem::swap(&mut children, &mut node.children); + let children = std::mem::take(&mut node.children); self.stack.extend(children.into_iter().rev()); (node.hash, node.number, node.data) @@ -909,7 +895,7 @@ impl Iterator for RemovedIterator { #[cfg(test)] mod test { - use super::{FinalizationResult, ForkTree, Error}; + use super::{Error, FinalizationResult, ForkTree}; #[derive(Debug, PartialEq)] struct TestError; @@ -922,9 +908,11 @@ mod test { impl std::error::Error for TestError {} - fn test_fork_tree<'a>() -> (ForkTree<&'a str, u64, ()>, impl Fn(&&str, &&str) -> Result) { + fn test_fork_tree<'a>( + ) -> (ForkTree<&'a str, u64, ()>, impl Fn(&&str, &&str) -> Result) { let mut tree = ForkTree::new(); + #[rustfmt::skip] // // - B - C - D - E // / @@ -932,13 +920,16 @@ mod test { // / / // A - F - H - I // \ - // - L - M - // \ + // - L - M \ // - O // \ // — J - K // // (where N is not a part of fork tree) + // + // NOTE: the tree will get automatically rebalance on import and won't be laid out like the + // diagram above. the children will be ordered by subtree depth and the longest branches + // will be on the leftmost side of the tree. let is_descendent_of = |base: &&str, block: &&str| -> Result { let letters = vec!["B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "O"]; match (*base, *block) { @@ -947,7 +938,8 @@ mod test { ("C", b) => Ok(b == "D" || b == "E"), ("D", b) => Ok(b == "E"), ("E", _) => Ok(false), - ("F", b) => Ok(b == "G" || b == "H" || b == "I" || b == "L" || b == "M" || b == "O"), + ("F", b) => + Ok(b == "G" || b == "H" || b == "I" || b == "L" || b == "M" || b == "O"), ("G", _) => Ok(false), ("H", b) => Ok(b == "I" || b == "L" || b == "M" || b == "O"), ("I", _) => Ok(false), @@ -989,40 +981,22 @@ mod test { tree.finalize_root(&"A"); - assert_eq!( - tree.best_finalized_number, - Some(1), - ); + assert_eq!(tree.best_finalized_number, Some(1)); - assert_eq!( - tree.import("A", 1, (), &is_descendent_of), - Err(Error::Revert), - ); + assert_eq!(tree.import("A", 1, (), &is_descendent_of), Err(Error::Revert)); } #[test] fn import_doesnt_add_duplicates() { let (mut tree, is_descendent_of) = test_fork_tree(); - assert_eq!( - tree.import("A", 1, (), &is_descendent_of), - Err(Error::Duplicate), - ); + assert_eq!(tree.import("A", 1, (), &is_descendent_of), Err(Error::Duplicate)); - assert_eq!( - tree.import("I", 4, (), &is_descendent_of), - Err(Error::Duplicate), - ); + assert_eq!(tree.import("I", 4, (), &is_descendent_of), Err(Error::Duplicate)); - assert_eq!( - tree.import("G", 3, (), &is_descendent_of), - Err(Error::Duplicate), - ); + assert_eq!(tree.import("G", 3, (), &is_descendent_of), Err(Error::Duplicate)); - assert_eq!( - tree.import("K", 3, (), &is_descendent_of), - Err(Error::Duplicate), - ); + assert_eq!(tree.import("K", 3, (), &is_descendent_of), Err(Error::Duplicate)); } #[test] @@ -1084,10 +1058,7 @@ mod test { let original_roots = tree.roots.clone(); // finalizing a block prior to any in the node doesn't change the tree - assert_eq!( - tree.finalize(&"0", 0, &is_descendent_of), - Ok(FinalizationResult::Unchanged), - ); + assert_eq!(tree.finalize(&"0", 0, &is_descendent_of), Ok(FinalizationResult::Unchanged)); assert_eq!(tree.roots, original_roots); @@ -1103,21 +1074,12 @@ mod test { ); // finalizing anything lower than what we observed will fail - assert_eq!( - tree.best_finalized_number, - Some(1), - ); + assert_eq!(tree.best_finalized_number, Some(1)); - assert_eq!( - tree.finalize(&"Z", 1, &is_descendent_of), - Err(Error::Revert), - ); + assert_eq!(tree.finalize(&"Z", 1, &is_descendent_of), Err(Error::Revert)); // trying to finalize a node without finalizing its ancestors first will fail - assert_eq!( - tree.finalize(&"H", 3, &is_descendent_of), - Err(Error::UnfinalizedAncestor), - ); + assert_eq!(tree.finalize(&"H", 3, &is_descendent_of), Err(Error::UnfinalizedAncestor)); // after finalizing "F" we can finalize "H" assert_eq!( @@ -1132,7 +1094,7 @@ mod test { assert_eq!( tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("I", 4), ("L", 4)], + vec![("L", 4), ("I", 4)], ); // finalizing a node from another fork that isn't part of the tree clears the tree @@ -1180,13 +1142,10 @@ mod test { assert_eq!( tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("I", 4), ("L", 4)], + vec![("L", 4), ("I", 4)], ); - assert_eq!( - tree.best_finalized_number, - Some(3), - ); + assert_eq!(tree.best_finalized_number, Some(3)); // finalizing N (which is not a part of the tree): // 1) removes roots that are not ancestors/descendants of N (I) @@ -1203,23 +1162,20 @@ mod test { vec![], ); - assert_eq!( - tree.best_finalized_number, - Some(6), - ); + assert_eq!(tree.best_finalized_number, Some(6)); } #[test] fn finalize_with_descendent_works() { #[derive(Debug, PartialEq)] - struct Change { effective: u64 }; + struct Change { + effective: u64, + } let (mut tree, is_descendent_of) = { let mut tree = ForkTree::new(); let is_descendent_of = |base: &&str, block: &&str| -> Result { - - // // A0 #1 - (B #2) - (C #5) - D #10 - E #15 - (F #100) // \ // - (G #100) @@ -1258,24 +1214,15 @@ mod test { // finalizing "D" will finalize a block from the tree, but it can't be applied yet // since it is not a root change assert_eq!( - tree.finalizes_any_with_descendent_if( - &"D", - 10, - &is_descendent_of, - |c| c.effective == 10, - ), + tree.finalizes_any_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective == + 10,), Ok(Some(false)), ); // finalizing "B" doesn't finalize "A0" since the predicate doesn't pass, // although it will clear out "A1" from the tree assert_eq!( - tree.finalize_with_descendent_if( - &"B", - 2, - &is_descendent_of, - |c| c.effective <= 2, - ), + tree.finalize_with_descendent_if(&"B", 2, &is_descendent_of, |c| c.effective <= 2,), Ok(FinalizationResult::Changed(None)), ); @@ -1296,12 +1243,7 @@ mod test { ); assert_eq!( - tree.finalize_with_descendent_if( - &"C", - 5, - &is_descendent_of, - |c| c.effective <= 5, - ), + tree.finalize_with_descendent_if(&"C", 5, &is_descendent_of, |c| c.effective <= 5,), Ok(FinalizationResult::Changed(Some(Change { effective: 5 }))), ); @@ -1312,33 +1254,20 @@ mod test { // finalizing "F" will fail since it would finalize past "E" without finalizing "D" first assert_eq!( - tree.finalizes_any_with_descendent_if( - &"F", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), + tree.finalizes_any_with_descendent_if(&"F", 100, &is_descendent_of, |c| c.effective <= + 100,), Err(Error::UnfinalizedAncestor), ); // it will work with "G" though since it is not in the same branch as "E" assert_eq!( - tree.finalizes_any_with_descendent_if( - &"G", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), + tree.finalizes_any_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= + 100,), Ok(Some(true)), ); assert_eq!( - tree.finalize_with_descendent_if( - &"G", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), + tree.finalize_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= 100,), Ok(FinalizationResult::Changed(Some(Change { effective: 10 }))), ); @@ -1353,12 +1282,19 @@ mod test { tree.iter().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), vec![ ("A", 1), - ("B", 2), ("C", 3), ("D", 4), ("E", 5), + ("B", 2), + ("C", 3), + ("D", 4), + ("E", 5), ("F", 2), + ("H", 3), + ("L", 4), + ("M", 5), + ("O", 5), + ("I", 4), ("G", 3), - ("H", 3), ("I", 4), - ("L", 4), ("M", 5), ("O", 5), - ("J", 2), ("K", 3) + ("J", 2), + ("K", 3), ], ); } @@ -1388,19 +1324,11 @@ mod test { // "L" is a descendent of "K", but the predicate will only pass for "K", // therefore only one call to `is_descendent_of` should be made assert_eq!( - tree.finalizes_any_with_descendent_if( - &"L", - 11, - &is_descendent_of, - |i| *i == 10, - ), + tree.finalizes_any_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), Ok(Some(false)), ); - assert_eq!( - n_is_descendent_of_calls.load(Ordering::SeqCst), - 1, - ); + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1); } n_is_descendent_of_calls.store(0, Ordering::SeqCst); @@ -1419,19 +1347,11 @@ mod test { // "L" is a descendent of "K", but the predicate will only pass for "K", // therefore only one call to `is_descendent_of` should be made assert_eq!( - tree.finalize_with_descendent_if( - &"L", - 11, - &is_descendent_of, - |i| *i == 10, - ), + tree.finalize_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), Ok(FinalizationResult::Changed(Some(10))), ); - assert_eq!( - n_is_descendent_of_calls.load(Ordering::SeqCst), - 1, - ); + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1); } } @@ -1439,12 +1359,7 @@ mod test { fn find_node_works() { let (tree, is_descendent_of) = test_fork_tree(); - let node = tree.find_node_where( - &"D", - &4, - &is_descendent_of, - &|_| true, - ).unwrap().unwrap(); + let node = tree.find_node_where(&"D", &4, &is_descendent_of, &|_| true).unwrap().unwrap(); assert_eq!(node.hash, "C"); assert_eq!(node.number, 3); @@ -1461,17 +1376,9 @@ mod test { fn prune_works() { let (mut tree, is_descendent_of) = test_fork_tree(); - let removed = tree.prune( - &"C", - &3, - &is_descendent_of, - &|_| true, - ).unwrap(); + let removed = tree.prune(&"C", &3, &is_descendent_of, &|_| true).unwrap(); - assert_eq!( - tree.roots.iter().map(|node| node.hash).collect::>(), - vec!["B"], - ); + assert_eq!(tree.roots.iter().map(|node| node.hash).collect::>(), vec!["B"]); assert_eq!( tree.iter().map(|(hash, _, _)| *hash).collect::>(), @@ -1480,37 +1387,22 @@ mod test { assert_eq!( removed.map(|(hash, _, _)| hash).collect::>(), - vec!["A", "F", "G", "H", "I", "L", "M", "O", "J", "K"] + vec!["A", "F", "H", "L", "M", "O", "I", "G", "J", "K"] ); - let removed = tree.prune( - &"E", - &5, - &is_descendent_of, - &|_| true, - ).unwrap(); + let removed = tree.prune(&"E", &5, &is_descendent_of, &|_| true).unwrap(); - assert_eq!( - tree.roots.iter().map(|node| node.hash).collect::>(), - vec!["D"], - ); + assert_eq!(tree.roots.iter().map(|node| node.hash).collect::>(), vec!["D"]); - assert_eq!( - tree.iter().map(|(hash, _, _)| *hash).collect::>(), - vec!["D", "E"], - ); + assert_eq!(tree.iter().map(|(hash, _, _)| *hash).collect::>(), vec!["D", "E"]); - assert_eq!( - removed.map(|(hash, _, _)| hash).collect::>(), - vec!["B", "C"] - ); + assert_eq!(removed.map(|(hash, _, _)| hash).collect::>(), vec!["B", "C"]); } #[test] fn find_node_backtracks_after_finding_highest_descending_node() { let mut tree = ForkTree::new(); - // // A - B // \ // — C @@ -1531,12 +1423,7 @@ mod test { // when searching the tree we reach node `C`, but the // predicate doesn't pass. we should backtrack to `B`, but not to `A`, // since "B" fulfills the predicate. - let node = tree.find_node_where( - &"D", - &3, - &is_descendent_of, - &|data| *data < 3, - ).unwrap(); + let node = tree.find_node_where(&"D", &3, &is_descendent_of, &|data| *data < 3).unwrap(); assert_eq!(node.unwrap().hash, "B"); } @@ -1545,19 +1432,30 @@ mod test { fn tree_rebalance() { let (mut tree, _) = test_fork_tree(); + // the tree is automatically rebalanced on import, therefore we should iterate in preorder + // exploring the longest forks first. check the ascii art above to understand the expected + // output below. assert_eq!( tree.iter().map(|(h, _, _)| *h).collect::>(), - vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "L", "M", "O", "J", "K"], + vec!["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K"], ); - // after rebalancing the tree we should iterate in preorder exploring - // the longest forks first. check the ascii art above to understand the - // expected output below. - tree.rebalance(); + // let's add a block "P" which is a descendent of block "O" + let is_descendent_of = |base: &&str, block: &&str| -> Result { + match (*base, *block) { + (b, "P") => Ok(vec!["A", "F", "L", "O"].into_iter().any(|n| n == b)), + _ => Ok(false), + } + }; + + tree.import("P", 6, (), &is_descendent_of).unwrap(); + // this should re-order the tree, since the branch "A -> B -> C -> D -> E" is no longer tied + // with 5 blocks depth. additionally "O" should be visited before "M" now, since it has one + // descendent "P" which makes that branch 6 blocks long. assert_eq!( tree.iter().map(|(h, _, _)| *h).collect::>(), - ["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K"] + ["A", "F", "H", "L", "O", "P", "M", "I", "G", "B", "C", "D", "E", "J", "K"] ); } } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index f2c227f78228c..93616b590f61e 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking-cli" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,19 +13,25 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-service = { version = "0.8.0", default-features = false, path = "../../../client/service" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-client-db = { version = "0.8.0", path = "../../../client/db" } -sc-executor = { version = "0.8.0", path = "../../../client/executor" } -sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } -sp-keystore = { version = "0.8.0", path = "../../../primitives/keystore" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } +frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../client/service" } +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } +sc-client-db = { version = "0.10.0-dev", path = "../../../client/db" } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } +sp-externalities = { version = "0.10.0-dev", path = "../../../primitives/externalities" } +sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } +codec = { version = "2.0.0", package = "parity-scale-codec" } structopt = "0.3.8" -codec = { version = "1.3.1", package = "parity-scale-codec" } chrono = "0.4" +serde = "1.0.126" +handlebars = "3.5.0" +Inflector = "0.11.4" +linked-hash-map = "0.5.4" +log = "0.4.8" [features] default = ["db"] diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index f5ea83d7b0c47..5efa970d93580 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,23 +15,77 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::Arc; use crate::BenchmarkCmd; use codec::{Decode, Encode}; -use frame_benchmarking::{Analysis, BenchmarkBatch, BenchmarkSelector}; -use sc_cli::{SharedParams, CliConfiguration, ExecutionStrategy, Result}; +use frame_benchmarking::{ + Analysis, BenchmarkBatch, BenchmarkBatchSplitResults, BenchmarkList, BenchmarkParameter, + BenchmarkResult, BenchmarkSelector, +}; +use frame_support::traits::StorageInfo; +use linked_hash_map::LinkedHashMap; +use sc_cli::{CliConfiguration, ExecutionStrategy, Result, SharedParams}; use sc_client_db::BenchmarkingState; -use sc_executor::NativeExecutor; -use sp_state_machine::StateMachine; -use sp_externalities::Extensions; +use sc_executor::NativeElseWasmExecutor; use sc_service::{Configuration, NativeExecutionDispatch}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_core::offchain::{OffchainExt, testing::TestOffchainExt}; -use sp_keystore::{ - SyncCryptoStorePtr, KeystoreExt, - testing::KeyStore, +use sp_core::offchain::{ + testing::{TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }; -use std::fmt::Debug; +use sp_externalities::Extensions; +use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_state_machine::StateMachine; +use std::{fmt::Debug, sync::Arc, time}; + +// This takes multiple benchmark batches and combines all the results where the pallet, instance, +// and benchmark are the same. +fn combine_batches( + time_batches: Vec, + db_batches: Vec, +) -> Vec { + if time_batches.is_empty() && db_batches.is_empty() { + return Default::default() + } + + let mut all_benchmarks = + LinkedHashMap::<_, (Vec, Vec)>::new(); + + db_batches + .into_iter() + .for_each(|BenchmarkBatch { pallet, instance, benchmark, results }| { + // We use this key to uniquely identify a benchmark among batches. + let key = (pallet, instance, benchmark); + + match all_benchmarks.get_mut(&key) { + // We already have this benchmark, so we extend the results. + Some(x) => x.1.extend(results), + // New benchmark, so we add a new entry with the initial results. + None => { + all_benchmarks.insert(key, (Vec::new(), results)); + }, + } + }); + + time_batches + .into_iter() + .for_each(|BenchmarkBatch { pallet, instance, benchmark, results }| { + // We use this key to uniquely identify a benchmark among batches. + let key = (pallet, instance, benchmark); + + match all_benchmarks.get_mut(&key) { + // We already have this benchmark, so we extend the results. + Some(x) => x.0.extend(results), + None => panic!("all benchmark keys should have been populated by db batches"), + } + }); + + all_benchmarks + .into_iter() + .map(|((pallet, instance, benchmark), (time_results, db_results))| { + BenchmarkBatchSplitResults { pallet, instance, benchmark, time_results, db_results } + }) + .collect::>() +} impl BenchmarkCmd { /// Runs the command and benchmarks the chain. @@ -43,137 +97,369 @@ impl BenchmarkCmd { ExecDispatch: NativeExecutionDispatch + 'static, { if let Some(output_path) = &self.output { - if !output_path.is_dir() { return Err("Output path is invalid!".into()) }; + if !output_path.is_dir() && output_path.file_name().is_none() { + return Err("Output file or path is invalid!".into()) + } } if let Some(header_file) = &self.header { - if !header_file.is_file() { return Err("Header file is invalid!".into()) }; + if !header_file.is_file() { + return Err("Header file is invalid!".into()) + }; + } + + if let Some(handlebars_template_file) = &self.template { + if !handlebars_template_file.is_file() { + return Err("Handlebars template file is invalid!".into()) + }; } let spec = config.chain_spec; let wasm_method = self.wasm_method.into(); let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); + let pallet = self.pallet.clone().unwrap_or_else(|| String::new()); + let pallet = pallet.as_bytes(); + let extrinsic = self.extrinsic.clone().unwrap_or_else(|| String::new()); + let extrinsic = extrinsic.as_bytes(); let genesis_storage = spec.build_storage()?; let mut changes = Default::default(); - let mut offchain_changes = Default::default(); let cache_size = Some(self.database_cache_size as usize); - let state = BenchmarkingState::::new(genesis_storage, cache_size)?; - let executor = NativeExecutor::::new( + let state_with_tracking = BenchmarkingState::::new( + genesis_storage.clone(), + cache_size, + self.record_proof, + true, + )?; + let state_without_tracking = + BenchmarkingState::::new(genesis_storage, cache_size, self.record_proof, false)?; + let executor = NativeElseWasmExecutor::::new( wasm_method, self.heap_pages, 2, // The runtime instances cache size. ); - let mut extensions = Extensions::default(); - extensions.register(KeystoreExt(Arc::new(KeyStore::new()) as SyncCryptoStorePtr)); - let (offchain, _) = TestOffchainExt::new(); - extensions.register(OffchainExt::new(offchain)); + let extensions = || -> Extensions { + let mut extensions = Extensions::default(); + extensions.register(KeystoreExt(Arc::new(KeyStore::new()) as SyncCryptoStorePtr)); + let (offchain, _) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + extensions.register(OffchainWorkerExt::new(offchain.clone())); + extensions.register(OffchainDbExt::new(offchain)); + extensions.register(TransactionPoolExt::new(pool)); + return extensions + }; + // Get Benchmark List + let state = &state_without_tracking; let result = StateMachine::<_, _, NumberFor, _>::new( - &state, + state, None, &mut changes, - &mut offchain_changes, &executor, - "Benchmark_dispatch_benchmark", - &( - &self.pallet, - &self.extrinsic, - self.lowest_range_values.clone(), - self.highest_range_values.clone(), - self.steps.clone(), - self.repeat, - !self.no_verify, - self.extra, - ).encode(), - extensions, - &sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?, + "Benchmark_benchmark_metadata", + &(self.extra).encode(), + extensions(), + &sp_state_machine::backend::BackendRuntimeCode::new(state).runtime_code()?, sp_core::testing::TaskExecutor::new(), ) .execute(strategy.into()) - .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; - - let results = , String> as Decode>::decode(&mut &result[..]) - .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))?; - - match results { - Ok(batches) => { - // If we are going to output results to a file... - if let Some(output_path) = &self.output { - if self.trait_def { - crate::writer::write_trait(&batches, output_path, self)?; - } else { - crate::writer::write_results(&batches, output_path, self)?; + .map_err(|e| format!("Error getting benchmark list: {:?}", e))?; + + let (list, storage_info) = + <(Vec, Vec) as Decode>::decode(&mut &result[..]) + .map_err(|e| format!("Failed to decode benchmark metadata: {:?}", e))?; + + // Use the benchmark list and the user input to determine the set of benchmarks to run. + let mut benchmarks_to_run = Vec::new(); + list.iter() + .filter(|item| pallet.is_empty() || pallet == &b"*"[..] || pallet == &item.pallet[..]) + .for_each(|item| { + for benchmark in &item.benchmarks { + if extrinsic.is_empty() || + &extrinsic[..] == &b"*"[..] || + extrinsic == benchmark.name + { + benchmarks_to_run.push(( + item.pallet.clone(), + benchmark.name.clone(), + benchmark.components.clone(), + )) } } + }); - for batch in batches.into_iter() { - // Print benchmark metadata - println!( - "Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}", - String::from_utf8(batch.pallet).expect("Encoded from String; qed"), - String::from_utf8(batch.benchmark).expect("Encoded from String; qed"), - self.lowest_range_values, - self.highest_range_values, - self.steps, - self.repeat, - ); + if benchmarks_to_run.is_empty() { + return Err("No benchmarks found which match your input.".into()) + } - // Skip raw data + analysis if there are no results - if batch.results.is_empty() { continue } - - if self.raw_data { - // Print the table header - batch.results[0].components.iter().for_each(|param| print!("{:?},", param.0)); - - print!("extrinsic_time,storage_root_time,reads,repeat_reads,writes,repeat_writes\n"); - // Print the values - batch.results.iter().for_each(|result| { - let parameters = &result.components; - parameters.iter().for_each(|param| print!("{:?},", param.1)); - // Print extrinsic time and storage root time - print!("{:?},{:?},{:?},{:?},{:?},{:?}\n", - result.extrinsic_time, - result.storage_root_time, - result.reads, - result.repeat_reads, - result.writes, - result.repeat_writes, - ); - }); + if self.list { + // List benchmarks instead of running them + list_benchmark(benchmarks_to_run); + return Ok(()) + } - println!(); - } + // Run the benchmarks + let mut batches = Vec::new(); + let mut batches_db = Vec::new(); + let mut timer = time::SystemTime::now(); + for (pallet, extrinsic, components) in benchmarks_to_run { + let all_components = if components.is_empty() { + vec![Default::default()] + } else { + let mut all_components = Vec::new(); + for (idx, (name, low, high)) in components.iter().enumerate() { + let lowest = self.lowest_range_values.get(idx).cloned().unwrap_or(*low); + let highest = self.highest_range_values.get(idx).cloned().unwrap_or(*high); - // Conduct analysis. - if !self.no_median_slopes { - println!("Median Slopes Analysis\n========"); - if let Some(analysis) = Analysis::median_slopes(&batch.results, BenchmarkSelector::ExtrinsicTime) { - println!("-- Extrinsic Time --\n{}", analysis); - } - if let Some(analysis) = Analysis::median_slopes(&batch.results, BenchmarkSelector::Reads) { - println!("Reads = {:?}", analysis); - } - if let Some(analysis) = Analysis::median_slopes(&batch.results, BenchmarkSelector::Writes) { - println!("Writes = {:?}", analysis); - } + let diff = highest - lowest; + + // Create up to `STEPS` steps for that component between high and low. + let step_size = (diff / self.steps).max(1); + let num_of_steps = diff / step_size + 1; + for s in 0..num_of_steps { + // This is the value we will be testing for component `name` + let component_value = lowest + step_size * s; + + // Select the max value for all the other components. + let c: Vec<(BenchmarkParameter, u32)> = components + .iter() + .enumerate() + .map(|(idx, (n, _, h))| { + if n == name { + (*n, component_value) + } else { + (*n, *self.highest_range_values.get(idx).unwrap_or(h)) + } + }) + .collect(); + all_components.push(c); } - if !self.no_min_squares { - println!("Min Squares Analysis\n========"); - if let Some(analysis) = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime) { - println!("-- Extrinsic Time --\n{}", analysis); - } - if let Some(analysis) = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads) { - println!("Reads = {:?}", analysis); - } - if let Some(analysis) = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes) { - println!("Writes = {:?}", analysis); + } + all_components + }; + for (s, selected_components) in all_components.iter().enumerate() { + // First we run a verification + if !self.no_verify { + // Dont use these results since verification code will add overhead + let state = &state_without_tracking; + let _results = StateMachine::<_, _, NumberFor, _>::new( + state, + None, + &mut changes, + &executor, + "Benchmark_dispatch_benchmark", + &( + &pallet.clone(), + &extrinsic.clone(), + &selected_components.clone(), + true, // run verification code + 1, // no need to do internal repeats + ) + .encode(), + extensions(), + &sp_state_machine::backend::BackendRuntimeCode::new(state) + .runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(strategy.into()) + .map_err(|e| { + format!("Error executing and verifying runtime benchmark: {:?}", e) + })?; + } + // Do one loop of DB tracking. + { + let state = &state_with_tracking; + let result = StateMachine::<_, _, NumberFor, _>::new( + state, // todo remove tracking + None, + &mut changes, + &executor, + "Benchmark_dispatch_benchmark", + &( + &pallet.clone(), + &extrinsic.clone(), + &selected_components.clone(), + false, // dont run verification code for final values + self.repeat, + ) + .encode(), + extensions(), + &sp_state_machine::backend::BackendRuntimeCode::new(state) + .runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(strategy.into()) + .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; + + let batch = + , String> as Decode>::decode( + &mut &result[..], + ) + .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))??; + + batches_db.extend(batch); + } + // Finally run a bunch of loops to get extrinsic timing information. + for r in 0..self.external_repeat { + let state = &state_without_tracking; + let result = StateMachine::<_, _, NumberFor, _>::new( + state, // todo remove tracking + None, + &mut changes, + &executor, + "Benchmark_dispatch_benchmark", + &( + &pallet.clone(), + &extrinsic.clone(), + &selected_components.clone(), + false, // dont run verification code for final values + self.repeat, + ) + .encode(), + extensions(), + &sp_state_machine::backend::BackendRuntimeCode::new(state) + .runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(strategy.into()) + .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; + + let batch = + , String> as Decode>::decode( + &mut &result[..], + ) + .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))??; + + batches.extend(batch); + + // Show progress information + if let Some(elapsed) = timer.elapsed().ok() { + if elapsed >= time::Duration::from_secs(5) { + timer = time::SystemTime::now(); + log::info!( + "Running Benchmark:\t{}\t{}\t{}/{}\t{}/{}", + String::from_utf8(pallet.clone()) + .expect("Encoded from String; qed"), + String::from_utf8(extrinsic.clone()) + .expect("Encoded from String; qed"), + s, // todo show step + self.steps, + r, + self.external_repeat, + ); } } } - }, - Err(error) => eprintln!("Error: {:?}", error), + } + } + + // Combine all of the benchmark results, so that benchmarks of the same pallet/function + // are together. + let batches: Vec = combine_batches(batches, batches_db); + + if let Some(output_path) = &self.output { + crate::writer::write_results(&batches, &storage_info, output_path, self)?; + } + + for batch in batches.into_iter() { + // Print benchmark metadata + println!( + "Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}", + String::from_utf8(batch.pallet).expect("Encoded from String; qed"), + String::from_utf8(batch.benchmark).expect("Encoded from String; qed"), + self.lowest_range_values, + self.highest_range_values, + self.steps, + self.repeat, + ); + + // Skip raw data + analysis if there are no results + if batch.time_results.is_empty() { + continue + } + + if self.raw_data { + // Print the table header + batch.time_results[0] + .components + .iter() + .for_each(|param| print!("{:?},", param.0)); + + print!("extrinsic_time_ns,storage_root_time_ns,reads,repeat_reads,writes,repeat_writes,proof_size_bytes\n"); + // Print the values + batch.time_results.iter().for_each(|result| { + let parameters = &result.components; + parameters.iter().for_each(|param| print!("{:?},", param.1)); + // Print extrinsic time and storage root time + print!( + "{:?},{:?},{:?},{:?},{:?},{:?},{:?}\n", + result.extrinsic_time, + result.storage_root_time, + result.reads, + result.repeat_reads, + result.writes, + result.repeat_writes, + result.proof_size, + ); + }); + + println!(); + } + + if !self.no_storage_info { + let mut comments: Vec = Default::default(); + crate::writer::add_storage_comments( + &mut comments, + &batch.db_results, + &storage_info, + ); + println!("Raw Storage Info\n========"); + for comment in comments { + println!("{}", comment); + } + println!(""); + } + + // Conduct analysis. + if !self.no_median_slopes { + println!("Median Slopes Analysis\n========"); + if let Some(analysis) = + Analysis::median_slopes(&batch.time_results, BenchmarkSelector::ExtrinsicTime) + { + println!("-- Extrinsic Time --\n{}", analysis); + } + if let Some(analysis) = + Analysis::median_slopes(&batch.db_results, BenchmarkSelector::Reads) + { + println!("Reads = {:?}", analysis); + } + if let Some(analysis) = + Analysis::median_slopes(&batch.db_results, BenchmarkSelector::Writes) + { + println!("Writes = {:?}", analysis); + } + println!(""); + } + if !self.no_min_squares { + println!("Min Squares Analysis\n========"); + if let Some(analysis) = + Analysis::min_squares_iqr(&batch.time_results, BenchmarkSelector::ExtrinsicTime) + { + println!("-- Extrinsic Time --\n{}", analysis); + } + if let Some(analysis) = + Analysis::min_squares_iqr(&batch.db_results, BenchmarkSelector::Reads) + { + println!("Reads = {:?}", analysis); + } + if let Some(analysis) = + Analysis::min_squares_iqr(&batch.db_results, BenchmarkSelector::Writes) + { + println!("Writes = {:?}", analysis); + } + println!(""); + } } Ok(()) @@ -192,3 +478,11 @@ impl CliConfiguration for BenchmarkCmd { }) } } + +/// List the benchmarks available in the runtime, in a CSV friendly format. +fn list_benchmark(benchmarks_to_run: Vec<(Vec, Vec, Vec<(BenchmarkParameter, u32, u32)>)>) { + println!("pallet, benchmark"); + for (pallet, extrinsic, _components) in benchmarks_to_run { + println!("{}, {}", String::from_utf8_lossy(&pallet), String::from_utf8_lossy(&extrinsic)); + } +} diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 725ed3113becb..316ddfb8d0c10 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,20 +21,26 @@ mod writer; use sc_cli::{ExecutionStrategy, WasmExecutionMethod}; use std::fmt::Debug; +// Add a more relaxed parsing for pallet names by allowing pallet directory names with `-` to be +// used like crate names with `_` +fn parse_pallet_name(pallet: &str) -> String { + pallet.replace("-", "_") +} + /// The `benchmark` command used to benchmark FRAME Pallets. #[derive(Debug, structopt::StructOpt)] pub struct BenchmarkCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[structopt(short, long)] - pub pallet: String, + #[structopt(short, long, parse(from_str = parse_pallet_name), required_unless = "list")] + pub pallet: Option, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[structopt(short, long)] - pub extrinsic: String, + #[structopt(short, long, required_unless = "list")] + pub extrinsic: Option, /// Select how many samples we should take across the variable components. - #[structopt(short, long, use_delimiter = true)] - pub steps: Vec, + #[structopt(short, long, default_value = "1")] + pub steps: u32, /// Indicates lowest values for each of the component ranges. #[structopt(long = "low", use_delimiter = true)] @@ -44,10 +50,16 @@ pub struct BenchmarkCmd { #[structopt(long = "high", use_delimiter = true)] pub highest_range_values: Vec, - /// Select how many repetitions of this benchmark should run. + /// Select how many repetitions of this benchmark should run from within the wasm. #[structopt(short, long, default_value = "1")] pub repeat: u32, + /// Select how many repetitions of this benchmark should run from the client. + /// + /// NOTE: Using this alone may give slower results, but will afford you maximum Wasm memory. + #[structopt(long, default_value = "1")] + pub external_repeat: u32, + /// Print the raw results. #[structopt(long = "raw")] pub raw_data: bool, @@ -68,11 +80,19 @@ pub struct BenchmarkCmd { #[structopt(long)] pub header: Option, - /// Output the trait definition to a Rust file. + /// Path to Handlebars template file used for outputting benchmark results. (Optional) + #[structopt(long)] + pub template: Option, + + /// Which analysis function to use when outputting benchmarks: + /// * min-squares (default) + /// * median-slopes + /// * max (max of min squares and median slopes for each value) #[structopt(long)] - pub trait_def: bool, + pub output_analysis: Option, - /// Set the heap pages while running benchmarks. + /// Set the heap pages while running benchmarks. If not set, the default value from the client + /// is used. #[structopt(long)] pub heap_pages: Option, @@ -80,21 +100,14 @@ pub struct BenchmarkCmd { #[structopt(long)] pub no_verify: bool, - /// Display and run extra benchmarks that would otherwise not be needed for weight construction. + /// Display and run extra benchmarks that would otherwise not be needed for weight + /// construction. #[structopt(long)] pub extra: bool, - /// Output files using spaces instead of tabs. + /// Estimate PoV size. #[structopt(long)] - pub spaces: bool, - - /// Output benchmarks file using this struct name. - #[structopt(long, default_value = "WeightInfo")] - pub r#struct: String, - - /// Output benchmarks file using this trait name. - #[structopt(long, default_value = "WeightInfo")] - pub r#trait: String, + pub record_proof: bool, #[allow(missing_docs)] #[structopt(flatten)] @@ -113,13 +126,26 @@ pub struct BenchmarkCmd { #[structopt( long = "wasm-execution", value_name = "METHOD", - possible_values = &WasmExecutionMethod::enabled_variants(), + possible_values = &WasmExecutionMethod::variants(), case_insensitive = true, - default_value = "Interpreted" + default_value = "compiled" )] pub wasm_method: WasmExecutionMethod, /// Limit the memory the database cache can use. #[structopt(long = "db-cache", value_name = "MiB", default_value = "128")] pub database_cache_size: u32, + + /// List the benchmarks that match your query rather than running them. + /// + /// When nothing is provided, we list all benchmarks. + #[structopt(long)] + pub list: bool, + + /// If enabled, the storage info is not displayed in the output next to the analysis. + /// + /// This is independent of the storage info appearing in the *output file*. Use a Handlebar + /// template for that purpose. + #[structopt(long)] + pub no_storage_info: bool, } diff --git a/utils/frame/benchmarking-cli/src/template.hbs b/utils/frame/benchmarking-cli/src/template.hbs new file mode 100644 index 0000000000000..36abf27f59a6e --- /dev/null +++ b/utils/frame/benchmarking-cli/src/template.hbs @@ -0,0 +1,51 @@ +{{header}} +//! Autogenerated weights for `{{pallet}}` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} +//! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: {{cmd.repeat}}, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` +//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} + +// Executed Command: +{{#each args as |arg|~}} +// {{arg}} +{{/each}} + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `{{pallet}}`. +pub struct WeightInfo(PhantomData); +impl {{pallet}}::WeightInfo for WeightInfo { + {{~#each benchmarks as |benchmark|}} + {{~#each benchmark.comments as |comment|}} + // {{comment}} + {{~/each}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + ({{underscore benchmark.base_weight}} as Weight) + {{~#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + {{~/each}} + {{~#if (ne benchmark.base_reads "0")}} + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) + {{~/if}} + {{~#each benchmark.component_reads as |cr|}} + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + {{~/each}} + {{~#if (ne benchmark.base_writes "0")}} + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) + {{~/if}} + {{~#each benchmark.component_writes as |cw|}} + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + {{~/each}} + } + {{~/each}} +} diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs index 23c1db06fb9c4..ede5b2d1355a7 100644 --- a/utils/frame/benchmarking-cli/src/writer.rs +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,314 +17,575 @@ // Outputs benchmark results to Rust files that can be ingested by the runtime. +use core::convert::TryInto; +use std::{ + collections::{HashMap, HashSet}, + fs, + path::PathBuf, +}; + +use inflector::Inflector; +use serde::Serialize; + use crate::BenchmarkCmd; -use std::fs::{self, File, OpenOptions}; -use std::io::prelude::*; -use std::path::PathBuf; -use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis}; +use frame_benchmarking::{ + Analysis, AnalysisChoice, BenchmarkBatchSplitResults, BenchmarkResult, BenchmarkSelector, + RegressionModel, +}; +use frame_support::traits::StorageInfo; +use sp_core::hexdisplay::HexDisplay; use sp_runtime::traits::Zero; const VERSION: &'static str = env!("CARGO_PKG_VERSION"); +const TEMPLATE: &str = include_str!("./template.hbs"); + +// This is the final structure we will pass to the Handlebars template. +#[derive(Serialize, Default, Debug, Clone)] +struct TemplateData { + args: Vec, + date: String, + version: String, + pallet: String, + instance: String, + header: String, + cmd: CmdData, + benchmarks: Vec, +} -pub fn open_file(path: PathBuf) -> Result { - OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(path) +// This was the final data we have about each benchmark. +#[derive(Serialize, Default, Debug, Clone)] +struct BenchmarkData { + name: String, + components: Vec, + #[serde(serialize_with = "string_serialize")] + base_weight: u128, + #[serde(serialize_with = "string_serialize")] + base_reads: u128, + #[serde(serialize_with = "string_serialize")] + base_writes: u128, + component_weight: Vec, + component_reads: Vec, + component_writes: Vec, + comments: Vec, } -fn underscore(i: Number) -> String - where Number: std::string::ToString -{ - let mut s = String::new(); - let i_str = i.to_string(); - let a = i_str.chars().rev().enumerate(); - for (idx, val) in a { - if idx != 0 && idx % 3 == 0 { - s.insert(0, '_'); - } - s.insert(0, val); - } - s +// This forwards some specific metadata from the `BenchmarkCmd` +#[derive(Serialize, Default, Debug, Clone)] +struct CmdData { + steps: u32, + repeat: u32, + lowest_range_values: Vec, + highest_range_values: Vec, + execution: String, + wasm_execution: String, + chain: String, + db_cache: u32, + analysis_choice: String, } -pub fn write_trait( - batches: &[BenchmarkBatch], - path: &PathBuf, - cmd: &BenchmarkCmd, -) -> Result<(), std::io::Error> { - let mut file_path = path.clone(); - file_path.push("trait"); - file_path.set_extension("rs"); - let mut file = crate::writer::open_file(file_path)?; +// This encodes the component name and whether that component is used. +#[derive(Serialize, Debug, Clone, Eq, PartialEq)] +struct Component { + name: String, + is_used: bool, +} - let indent = if cmd.spaces {" "} else {"\t"}; +// This encodes the slope of some benchmark related to a component. +#[derive(Serialize, Debug, Clone, Eq, PartialEq)] +struct ComponentSlope { + name: String, + #[serde(serialize_with = "string_serialize")] + slope: u128, + #[serde(serialize_with = "string_serialize")] + error: u128, +} - let mut current_pallet = Vec::::new(); +// Small helper to create an `io::Error` from a string. +fn io_error(s: &str) -> std::io::Error { + use std::io::{Error, ErrorKind}; + Error::new(ErrorKind::Other, s) +} - // Skip writing if there are no batches - if batches.is_empty() { return Ok(()) } +// This function takes a list of `BenchmarkBatch` and organizes them by pallet into a `HashMap`. +// So this: `[(p1, b1), (p1, b2), (p2, b1), (p1, b3), (p2, b2)]` +// Becomes: +// +// ``` +// p1 -> [b1, b2, b3] +// p2 -> [b1, b2] +// ``` +fn map_results( + batches: &[BenchmarkBatchSplitResults], + storage_info: &[StorageInfo], + analysis_choice: &AnalysisChoice, +) -> Result>, std::io::Error> { + // Skip if batches is empty. + if batches.is_empty() { + return Err(io_error("empty batches")) + } + + let mut all_benchmarks = HashMap::<_, Vec>::new(); for batch in batches { - // Skip writing if there are no results - if batch.results.is_empty() { continue } + // Skip if there are no results + if batch.time_results.is_empty() { + continue + } let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); - let benchmark_string = String::from_utf8(batch.benchmark.clone()).unwrap(); - - // only create new trait definitions when we go to a new pallet - if batch.pallet != current_pallet { - if !current_pallet.is_empty() { - // close trait - write!(file, "}}\n")?; - } + let instance_string = String::from_utf8(batch.instance.clone()).unwrap(); + let benchmark_data = get_benchmark_data(batch, storage_info, analysis_choice); + let pallet_benchmarks = all_benchmarks.entry((pallet_string, instance_string)).or_default(); + pallet_benchmarks.push(benchmark_data); + } + Ok(all_benchmarks) +} - // trait wrapper - write!(file, "// {}\n", pallet_string)?; - write!(file, "pub trait {} {{\n", cmd.r#trait)?; +// Get an iterator of errors from a model. If the model is `None` all errors are zero. +fn extract_errors(model: &Option) -> impl Iterator + '_ { + let mut errors = model.as_ref().map(|m| m.se.regressor_values.iter()); + std::iter::from_fn(move || match &mut errors { + Some(model) => model.next().map(|val| *val as u128), + _ => Some(0), + }) +} - current_pallet = batch.pallet.clone() - } +// Analyze and return the relevant results for a given benchmark. +fn get_benchmark_data( + batch: &BenchmarkBatchSplitResults, + storage_info: &[StorageInfo], + analysis_choice: &AnalysisChoice, +) -> BenchmarkData { + // You can use this to put any additional comments with the benchmarking output. + let mut comments = Vec::::new(); + + // Analyze benchmarks to get the linear regression. + let analysis_function = match analysis_choice { + AnalysisChoice::MinSquares => Analysis::min_squares_iqr, + AnalysisChoice::MedianSlopes => Analysis::median_slopes, + AnalysisChoice::Max => Analysis::max, + }; - // function name - write!(file, "{}fn {}(", indent, benchmark_string)?; + let extrinsic_time = analysis_function(&batch.time_results, BenchmarkSelector::ExtrinsicTime) + .expect("analysis function should return an extrinsic time for valid inputs"); + let reads = analysis_function(&batch.db_results, BenchmarkSelector::Reads) + .expect("analysis function should return the number of reads for valid inputs"); + let writes = analysis_function(&batch.db_results, BenchmarkSelector::Writes) + .expect("analysis function should return the number of writes for valid inputs"); + + // Analysis data may include components that are not used, this filters out anything whose value + // is zero. + let mut used_components = Vec::new(); + let mut used_extrinsic_time = Vec::new(); + let mut used_reads = Vec::new(); + let mut used_writes = Vec::new(); + + extrinsic_time + .slopes + .into_iter() + .zip(extrinsic_time.names.iter()) + .zip(extract_errors(&extrinsic_time.model)) + .for_each(|((slope, name), error)| { + if !slope.is_zero() { + if !used_components.contains(&name) { + used_components.push(name); + } + used_extrinsic_time.push(ComponentSlope { + name: name.clone(), + slope: slope.saturating_mul(1000), + error: error.saturating_mul(1000), + }); + } + }); + reads + .slopes + .into_iter() + .zip(reads.names.iter()) + .zip(extract_errors(&reads.model)) + .for_each(|((slope, name), error)| { + if !slope.is_zero() { + if !used_components.contains(&name) { + used_components.push(name); + } + used_reads.push(ComponentSlope { name: name.clone(), slope, error }); + } + }); + writes + .slopes + .into_iter() + .zip(writes.names.iter()) + .zip(extract_errors(&writes.model)) + .for_each(|((slope, name), error)| { + if !slope.is_zero() { + if !used_components.contains(&name) { + used_components.push(name); + } + used_writes.push(ComponentSlope { name: name.clone(), slope, error }); + } + }); - // params - let components = &batch.results[0].components; - for component in components { - write!(file, "{:?}: u32, ", component.0)?; - } - // return value - write!(file, ") -> Weight;\n")?; + // This puts a marker on any component which is entirely unused in the weight formula. + let components = batch.time_results[0] + .components + .iter() + .map(|(name, _)| -> Component { + let name_string = name.to_string(); + let is_used = used_components.contains(&&name_string); + Component { name: name_string, is_used } + }) + .collect::>(); + + // We add additional comments showing which storage items were touched. + add_storage_comments(&mut comments, &batch.db_results, storage_info); + + BenchmarkData { + name: String::from_utf8(batch.benchmark.clone()).unwrap(), + components, + base_weight: extrinsic_time.base.saturating_mul(1000), + base_reads: reads.base, + base_writes: writes.base, + component_weight: used_extrinsic_time, + component_reads: used_reads, + component_writes: used_writes, + comments, } - - // final close trait - write!(file, "}}\n")?; - - Ok(()) } +// Create weight file from benchmark data and Handlebars template. pub fn write_results( - batches: &[BenchmarkBatch], + batches: &[BenchmarkBatchSplitResults], + storage_info: &[StorageInfo], path: &PathBuf, cmd: &BenchmarkCmd, ) -> Result<(), std::io::Error> { + // Use custom template if provided. + let template: String = match &cmd.template { + Some(template_file) => fs::read_to_string(template_file)?, + None => TEMPLATE.to_string(), + }; + // Use header if provided let header_text = match &cmd.header { Some(header_file) => { let text = fs::read_to_string(header_file)?; - Some(text) + text }, - None => None, + None => String::new(), }; - let indent = if cmd.spaces {" "} else {"\t"}; - let date = chrono::Utc::now(); + // Date string metadata + let date = chrono::Utc::now().format("%Y-%m-%d").to_string(); + + // Full CLI args passed to trigger the benchmark. + let args = std::env::args().collect::>(); + + // Which analysis function should be used when outputting benchmarks + let analysis_choice: AnalysisChoice = + cmd.output_analysis.clone().try_into().map_err(|e| io_error(e))?; + + // Capture individual args + let cmd_data = CmdData { + steps: cmd.steps.clone(), + repeat: cmd.repeat.clone(), + lowest_range_values: cmd.lowest_range_values.clone(), + highest_range_values: cmd.highest_range_values.clone(), + execution: format!("{:?}", cmd.execution), + wasm_execution: cmd.wasm_method.to_string(), + chain: format!("{:?}", cmd.shared_params.chain), + db_cache: cmd.database_cache_size, + analysis_choice: format!("{:?}", analysis_choice), + }; - let mut current_pallet = Vec::::new(); + // New Handlebars instance with helpers. + let mut handlebars = handlebars::Handlebars::new(); + handlebars.register_helper("underscore", Box::new(UnderscoreHelper)); + handlebars.register_helper("join", Box::new(JoinHelper)); + // Don't HTML escape any characters. + handlebars.register_escape_fn(|s| -> String { s.to_string() }); + + // Organize results by pallet into a JSON map + let all_results = map_results(batches, storage_info, &analysis_choice)?; + for ((pallet, instance), results) in all_results.iter() { + let mut file_path = path.clone(); + // If a user only specified a directory... + if file_path.is_dir() { + // Check if there might be multiple instances benchmarked. + if all_results.keys().any(|(p, i)| p == pallet && i != instance) { + // Create new file: "path/to/pallet_name_instance_name.rs". + file_path.push(pallet.clone() + "_" + &instance.to_snake_case()); + } else { + // Create new file: "path/to/pallet_name.rs". + file_path.push(pallet.clone()); + } + file_path.set_extension("rs"); + } - // Skip writing if there are no batches - if batches.is_empty() { return Ok(()) } + let hbs_data = TemplateData { + args: args.clone(), + date: date.clone(), + version: VERSION.to_string(), + pallet: pallet.to_string(), + instance: instance.to_string(), + header: header_text.clone(), + cmd: cmd_data.clone(), + benchmarks: results.clone(), + }; + + let mut output_file = fs::File::create(file_path)?; + handlebars + .render_template_to_write(&template, &hbs_data, &mut output_file) + .map_err(|e| io_error(&e.to_string()))?; + } + Ok(()) +} - let mut batches_iter = batches.iter().peekable(); +// This function looks at the keys touched during the benchmark, and the storage info we collected +// from the pallets, and creates comments with information about the storage keys touched during +// each benchmark. +pub(crate) fn add_storage_comments( + comments: &mut Vec, + results: &[BenchmarkResult], + storage_info: &[StorageInfo], +) { + let mut storage_info_map = storage_info + .iter() + .map(|info| (info.prefix.clone(), info)) + .collect::>(); + + // Special hack to show `Skipped Metadata` + let skip_storage_info = StorageInfo { + pallet_name: b"Skipped".to_vec(), + storage_name: b"Metadata".to_vec(), + prefix: b"Skipped Metadata".to_vec(), + max_values: None, + max_size: None, + }; + storage_info_map.insert(skip_storage_info.prefix.clone(), &skip_storage_info); + + // Special hack to show `Benchmark Override` + let benchmark_override = StorageInfo { + pallet_name: b"Benchmark".to_vec(), + storage_name: b"Override".to_vec(), + prefix: b"Benchmark Override".to_vec(), + max_values: None, + max_size: None, + }; + storage_info_map.insert(benchmark_override.prefix.clone(), &benchmark_override); - let first_pallet = String::from_utf8( - batches_iter.peek().expect("we checked that batches is not empty").pallet.clone() - ).unwrap(); + // This tracks the keys we already identified, so we only generate a single comment. + let mut identified = HashSet::>::new(); - let mut file_path = path.clone(); - file_path.push(first_pallet); - file_path.set_extension("rs"); + for result in results.clone() { + for (key, reads, writes, whitelisted) in &result.keys { + // skip keys which are whitelisted + if *whitelisted { + continue + } + let prefix_length = key.len().min(32); + let prefix = key[0..prefix_length].to_vec(); + if identified.contains(&prefix) { + // skip adding comments for keys we already identified + continue + } else { + // track newly identified keys + identified.insert(prefix.clone()); + } + match storage_info_map.get(&prefix) { + Some(key_info) => { + let comment = format!( + "Storage: {} {} (r:{} w:{})", + String::from_utf8(key_info.pallet_name.clone()) + .expect("encoded from string"), + String::from_utf8(key_info.storage_name.clone()) + .expect("encoded from string"), + reads, + writes, + ); + comments.push(comment) + }, + None => { + let comment = format!( + "Storage: unknown [0x{}] (r:{} w:{})", + HexDisplay::from(key), + reads, + writes, + ); + comments.push(comment) + }, + } + } + } +} - let mut file = open_file(file_path)?; +// Add an underscore after every 3rd character, i.e. a separator for large numbers. +fn underscore(i: Number) -> String +where + Number: std::string::ToString, +{ + let mut s = String::new(); + let i_str = i.to_string(); + let a = i_str.chars().rev().enumerate(); + for (idx, val) in a { + if idx != 0 && idx % 3 == 0 { + s.insert(0, '_'); + } + s.insert(0, val); + } + s +} - while let Some(batch) = batches_iter.next() { - // Skip writing if there are no results - if batch.results.is_empty() { continue } +// A Handlebars helper to add an underscore after every 3rd character, +// i.e. a separator for large numbers. +#[derive(Clone, Copy)] +struct UnderscoreHelper; +impl handlebars::HelperDef for UnderscoreHelper { + fn call<'reg: 'rc, 'rc>( + &self, + h: &handlebars::Helper, + _: &handlebars::Handlebars, + _: &handlebars::Context, + _rc: &mut handlebars::RenderContext, + out: &mut dyn handlebars::Output, + ) -> handlebars::HelperResult { + use handlebars::JsonRender; + let param = h.param(0).unwrap(); + let underscore_param = underscore(param.value().render()); + out.write(&underscore_param)?; + Ok(()) + } +} - let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); - let benchmark_string = String::from_utf8(batch.benchmark.clone()).unwrap(); +// A helper to join a string of vectors. +#[derive(Clone, Copy)] +struct JoinHelper; +impl handlebars::HelperDef for JoinHelper { + fn call<'reg: 'rc, 'rc>( + &self, + h: &handlebars::Helper, + _: &handlebars::Handlebars, + _: &handlebars::Context, + _rc: &mut handlebars::RenderContext, + out: &mut dyn handlebars::Output, + ) -> handlebars::HelperResult { + use handlebars::JsonRender; + let param = h.param(0).unwrap(); + let value = param.value(); + let joined = if value.is_array() { + value + .as_array() + .unwrap() + .iter() + .map(|v| v.render()) + .collect::>() + .join(" ") + } else { + value.render() + }; + out.write(&joined)?; + Ok(()) + } +} - // only create new trait definitions when we go to a new pallet - if batch.pallet != current_pallet { - // optional header and copyright - if let Some(header) = &header_text { - write!(file, "{}\n", header)?; - } +// u128 does not serialize well into JSON for `handlebars`, so we represent it as a string. +fn string_serialize(x: &u128, s: S) -> Result +where + S: serde::Serializer, +{ + s.serialize_str(&x.to_string()) +} - // title of file - write!(file, "//! Weights for {}\n", pallet_string)?; - - // auto-generation note - write!( - file, - "//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {}\n", - VERSION, - )?; - - // date of generation + some settings - write!( - file, - "//! DATE: {}, STEPS: {:?}, REPEAT: {}, LOW RANGE: {:?}, HIGH RANGE: {:?}\n", - date.format("%Y-%m-%d"), - cmd.steps, - cmd.repeat, - cmd.lowest_range_values, - cmd.highest_range_values, - )?; - - // more settings - write!( - file, - "//! EXECUTION: {:?}, WASM-EXECUTION: {}, CHAIN: {:?}, DB CACHE: {}\n", - cmd.execution, - cmd.wasm_method, - cmd.shared_params.chain, - cmd.database_cache_size, - )?; - - // allow statements - write!( - file, - "#![allow(unused_parens)]\n#![allow(unused_imports)]\n\n", - )?; - - // general imports - write!( - file, - "use frame_support::{{traits::Get, weights::Weight}};\nuse sp_std::marker::PhantomData;\n\n" - )?; - - // struct for weights - write!(file, "pub struct {}(PhantomData);\n", cmd.r#struct)?; - - // trait wrapper - write!( - file, - "impl {}::{} for {} {{\n", - pallet_string, - cmd.r#trait, - cmd.r#struct, - )?; - - current_pallet = batch.pallet.clone() +#[cfg(test)] +mod test { + use super::*; + use frame_benchmarking::{BenchmarkBatchSplitResults, BenchmarkParameter, BenchmarkResult}; + + fn test_data( + pallet: &[u8], + benchmark: &[u8], + param: BenchmarkParameter, + base: u32, + slope: u32, + ) -> BenchmarkBatchSplitResults { + let mut results = Vec::new(); + for i in 0..5 { + results.push(BenchmarkResult { + components: vec![(param, i), (BenchmarkParameter::z, 0)], + extrinsic_time: (base + slope * i).into(), + storage_root_time: (base + slope * i).into(), + reads: (base + slope * i).into(), + repeat_reads: 0, + writes: (base + slope * i).into(), + repeat_writes: 0, + proof_size: 0, + keys: vec![], + }) } - // Analysis results - let extrinsic_time = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime).unwrap(); - let reads = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads).unwrap(); - let writes = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes).unwrap(); - - // Analysis data may include components that are not used, this filters out anything whose value is zero. - let mut used_components = Vec::new(); - let mut used_extrinsic_time = Vec::new(); - let mut used_reads = Vec::new(); - let mut used_writes = Vec::new(); - extrinsic_time.slopes.iter().zip(extrinsic_time.names.iter()).for_each(|(slope, name)| { - if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_extrinsic_time.push((slope, name)); - } - }); - reads.slopes.iter().zip(reads.names.iter()).for_each(|(slope, name)| { - if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_reads.push((slope, name)); - } - }); - writes.slopes.iter().zip(writes.names.iter()).for_each(|(slope, name)| { - if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_writes.push((slope, name)); - } - }); - - let all_components = batch.results[0].components - .iter() - .map(|(name, _)| -> String { return name.to_string() }) - .collect::>(); - - // function name - write!(file, "{}fn {}(", indent, benchmark_string)?; - // params - for component in all_components { - if used_components.contains(&&component) { - write!(file, "{}: u32, ", component)?; - } else { - write!(file, "_{}: u32, ", component)?; - } - } - // return value - write!(file, ") -> Weight {{\n")?; - - write!(file, "{}{}({} as Weight)\n", indent, indent, underscore(extrinsic_time.base.saturating_mul(1000)))?; - used_extrinsic_time.iter().try_for_each(|(slope, name)| -> Result<(), std::io::Error> { - write!( - file, - "{}{}{}.saturating_add(({} as Weight).saturating_mul({} as Weight))\n", - indent, indent, indent, - underscore(slope.saturating_mul(1000)), - name, - ) - })?; - - if !reads.base.is_zero() { - write!( - file, - "{}{}{}.saturating_add(T::DbWeight::get().reads({} as Weight))\n", - indent, indent, indent, - reads.base, - )?; - } - used_reads.iter().try_for_each(|(slope, name)| -> Result<(), std::io::Error> { - write!( - file, - "{}{}{}.saturating_add(T::DbWeight::get().reads(({} as Weight).saturating_mul({} as Weight)))\n", - indent, indent, indent, - slope, - name, - ) - })?; - - if !writes.base.is_zero() { - write!( - file, - "{}{}{}.saturating_add(T::DbWeight::get().writes({} as Weight))\n", - indent, indent, indent, - writes.base, - )?; - } - used_writes.iter().try_for_each(|(slope, name)| -> Result<(), std::io::Error> { - write!( - file, - "{}{}{}.saturating_add(T::DbWeight::get().writes(({} as Weight).saturating_mul({} as Weight)))\n", - indent, indent, indent, - slope, - name, - ) - })?; - - // close function - write!(file, "{}}}\n", indent)?; - - // Check if this is the end of the iterator - if let Some(next) = batches_iter.peek() { - // Next pallet is different than current pallet, so we close up the file and open a new one. - if next.pallet != current_pallet { - write!(file, "}}\n")?; - let next_pallet = String::from_utf8(next.pallet.clone()).unwrap(); - - let mut file_path = path.clone(); - file_path.push(next_pallet); - file_path.set_extension("rs"); - file = open_file(file_path)?; - } - } else { - // This is the end of the iterator, so we close up the final file. - write!(file, "}}\n")?; + return BenchmarkBatchSplitResults { + pallet: [pallet.to_vec(), b"_pallet".to_vec()].concat(), + instance: b"instance".to_vec(), + benchmark: [benchmark.to_vec(), b"_benchmark".to_vec()].concat(), + time_results: results.clone(), + db_results: results, } } - Ok(()) + fn check_data(benchmark: &BenchmarkData, component: &str, base: u128, slope: u128) { + assert_eq!( + benchmark.components, + vec![ + Component { name: component.to_string(), is_used: true }, + Component { name: "z".to_string(), is_used: false }, + ], + ); + // Weights multiplied by 1,000 + assert_eq!(benchmark.base_weight, base * 1_000); + assert_eq!( + benchmark.component_weight, + vec![ComponentSlope { name: component.to_string(), slope: slope * 1_000, error: 0 }] + ); + // DB Reads/Writes are untouched + assert_eq!(benchmark.base_reads, base); + assert_eq!( + benchmark.component_reads, + vec![ComponentSlope { name: component.to_string(), slope, error: 0 }] + ); + assert_eq!(benchmark.base_writes, base); + assert_eq!( + benchmark.component_writes, + vec![ComponentSlope { name: component.to_string(), slope, error: 0 }] + ); + } + + #[test] + fn map_results_works() { + let mapped_results = map_results( + &[ + test_data(b"first", b"first", BenchmarkParameter::a, 10, 3), + test_data(b"first", b"second", BenchmarkParameter::b, 9, 2), + test_data(b"second", b"first", BenchmarkParameter::c, 3, 4), + ], + &[], + &AnalysisChoice::default(), + ) + .unwrap(); + + let first_benchmark = &mapped_results + .get(&("first_pallet".to_string(), "instance".to_string())) + .unwrap()[0]; + assert_eq!(first_benchmark.name, "first_benchmark"); + check_data(first_benchmark, "a", 10, 3); + + let second_benchmark = &mapped_results + .get(&("first_pallet".to_string(), "instance".to_string())) + .unwrap()[1]; + assert_eq!(second_benchmark.name, "second_benchmark"); + check_data(second_benchmark, "b", 9, 2); + + let second_pallet_benchmark = &mapped_results + .get(&("second_pallet".to_string(), "instance".to_string())) + .unwrap()[0]; + assert_eq!(second_pallet_benchmark.name, "first_benchmark"); + check_data(second_pallet_benchmark, "c", 3, 4); + } } diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index 0e39f35512541..1b6597fc9f2fc 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-cli" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -11,11 +11,12 @@ documentation = "https://docs.rs/substrate-frame-cli" readme = "README.md" [dependencies] -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } structopt = "0.3.8" -frame-system = { version = "2.0.0", path = "../../../frame/system" } +frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } +frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } [dev-dependencies] diff --git a/utils/frame/frame-utilities-cli/src/lib.rs b/utils/frame/frame-utilities-cli/src/lib.rs index 872cfc99a63dc..4f5b1da5766a3 100644 --- a/utils/frame/frame-utilities-cli/src/lib.rs +++ b/utils/frame/frame-utilities-cli/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,6 @@ //! frame-system CLI utilities -mod module_id; - -pub use module_id::ModuleIdCmd; +mod pallet_id; +pub use pallet_id::PalletIdCmd; diff --git a/utils/frame/frame-utilities-cli/src/module_id.rs b/utils/frame/frame-utilities-cli/src/pallet_id.rs similarity index 70% rename from utils/frame/frame-utilities-cli/src/module_id.rs rename to utils/frame/frame-utilities-cli/src/pallet_id.rs index cc76c70d0fa8e..2caac7db588a9 100644 --- a/utils/frame/frame-utilities-cli/src/module_id.rs +++ b/utils/frame/frame-utilities-cli/src/pallet_id.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,25 +15,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Implementation of the `moduleid` subcommand +//! Implementation of the `palletid` subcommand +use frame_support::PalletId; use sc_cli::{ - Error, utils::print_from_uri, CryptoSchemeFlag, - OutputTypeFlag, KeystoreParams, with_crypto_scheme, + utils::print_from_uri, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, + OutputTypeFlag, }; -use sp_runtime::ModuleId; +use sp_core::crypto::{Ss58AddressFormat, Ss58Codec}; use sp_runtime::traits::AccountIdConversion; -use sp_core::crypto::{Ss58Codec, Ss58AddressFormat}; -use std::convert::{TryInto, TryFrom}; +use std::convert::{TryFrom, TryInto}; use structopt::StructOpt; -/// The `moduleid` command +/// The `palletid` command #[derive(Debug, StructOpt)] -#[structopt( - name = "moduleid", - about = "Inspect a module ID address" -)] -pub struct ModuleIdCmd { +#[structopt(name = "palletid", about = "Inspect a module ID address")] +pub struct PalletIdCmd { /// The module ID used to derive the account id: String, @@ -60,23 +57,23 @@ pub struct ModuleIdCmd { pub keystore_params: KeystoreParams, } -impl ModuleIdCmd { +impl PalletIdCmd { /// runs the command pub fn run(&self) -> Result<(), Error> - where - R: frame_system::Trait, - R::AccountId: Ss58Codec, + where + R: frame_system::Config, + R::AccountId: Ss58Codec, { if self.id.len() != 8 { Err("a module id must be a string of 8 characters")? } let password = self.keystore_params.read_password()?; - let id_fixed_array: [u8; 8] = self.id.as_bytes() - .try_into() - .map_err(|_| "Cannot convert argument to moduleid: argument should be 8-character string")?; + let id_fixed_array: [u8; 8] = self.id.as_bytes().try_into().map_err(|_| { + "Cannot convert argument to palletid: argument should be 8-character string" + })?; - let account_id: R::AccountId = ModuleId(id_fixed_array).into_account(); + let account_id: R::AccountId = PalletId(id_fixed_array).into_account(); with_crypto_scheme!( self.crypto_scheme.scheme, @@ -91,4 +88,3 @@ impl ModuleIdCmd { Ok(()) } } - diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml new file mode 100644 index 0000000000000..d255499d6c3ad --- /dev/null +++ b/utils/frame/remote-externalities/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "remote-externalities" +version = "0.10.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "An externalities provided environemnt that can load itself from remote nodes or cache files" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +jsonrpsee-ws-client = { version = "0.3.0", default-features = false, features = [ + "tokio1", +] } +jsonrpsee-proc-macros = "0.3.0" + +env_logger = "0.9" +log = "0.4.11" +codec = { package = "parity-scale-codec", version = "2.0.0" } +serde_json = "1.0" +serde = "1.0.126" + +sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } +sp-version = { version = "4.0.0-dev", path = "../../../primitives/version" } + +[dev-dependencies] +tokio = { version = "1.10", features = ["macros", "rt-multi-thread"] } +pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", version = "5.0.0-dev" } + +[features] +remote-test = [] diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs new file mode 100644 index 0000000000000..addb3d1dd3c17 --- /dev/null +++ b/utils/frame/remote-externalities/src/lib.rs @@ -0,0 +1,643 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Remote Externalities +//! +//! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate +//! based chain, or a local state snapshot file. + +use codec::{Decode, Encode}; +use jsonrpsee_ws_client::{types::v2::params::JsonRpcParams, WsClient, WsClientBuilder}; +use log::*; +use sp_core::{ + hashing::twox_128, + hexdisplay::HexDisplay, + storage::{StorageData, StorageKey}, +}; +pub use sp_io::TestExternalities; +use sp_runtime::traits::Block as BlockT; +use std::{ + fs, + path::{Path, PathBuf}, +}; + +pub mod rpc_api; + +type KeyPair = (StorageKey, StorageData); + +const LOG_TARGET: &str = "remote-ext"; +const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io"; +const BATCH_SIZE: usize = 1000; + +jsonrpsee_proc_macros::rpc_client_api! { + RpcApi { + #[rpc(method = "state_getStorage", positional_params)] + fn get_storage(prefix: StorageKey, hash: Option) -> StorageData; + #[rpc(method = "state_getKeysPaged", positional_params)] + fn get_keys_paged( + prefix: Option, + count: u32, + start_key: Option, + hash: Option, + ) -> Vec; + #[rpc(method = "chain_getFinalizedHead", positional_params)] + fn finalized_head() -> B::Hash; + } +} + +/// The execution mode. +#[derive(Clone)] +pub enum Mode { + /// Online. + Online(OnlineConfig), + /// Offline. Uses a state snapshot file and needs not any client config. + Offline(OfflineConfig), +} + +impl Default for Mode { + fn default() -> Self { + Mode::Online(OnlineConfig::default()) + } +} + +/// Configuration of the offline execution. +/// +/// A state snapshot config must be present. +#[derive(Clone)] +pub struct OfflineConfig { + /// The configuration of the state snapshot file to use. It must be present. + pub state_snapshot: SnapshotConfig, +} + +/// Description of the transport protocol (for online execution). +#[derive(Debug)] +pub struct Transport { + uri: String, + client: Option, +} + +impl Clone for Transport { + fn clone(&self) -> Self { + Self { uri: self.uri.clone(), client: None } + } +} + +impl From for Transport { + fn from(t: String) -> Self { + Self { uri: t, client: None } + } +} + +/// Configuration of the online execution. +/// +/// A state snapshot config may be present and will be written to in that case. +#[derive(Clone)] +pub struct OnlineConfig { + /// The block hash at which to get the runtime state. Will be latest finalized head if not + /// provided. + pub at: Option, + /// An optional state snapshot file to WRITE to, not for reading. Not written if set to `None`. + pub state_snapshot: Option, + /// The modules to scrape. If empty, entire chain state will be scraped. + pub modules: Vec, + /// Transport config. + pub transport: Transport, +} + +impl OnlineConfig { + /// Return rpc (ws) client. + fn rpc_client(&self) -> &WsClient { + self.transport + .client + .as_ref() + .expect("ws client must have been initialized by now; qed.") + } +} + +impl Default for OnlineConfig { + fn default() -> Self { + Self { + transport: Transport { uri: DEFAULT_TARGET.to_owned(), client: None }, + at: None, + state_snapshot: None, + modules: vec![], + } + } +} + +/// Configuration of the state snapshot. +#[derive(Clone)] +pub struct SnapshotConfig { + /// The path to the snapshot file. + pub path: PathBuf, +} + +impl SnapshotConfig { + pub fn new>(path: P) -> Self { + Self { path: path.into() } + } +} + +impl Default for SnapshotConfig { + fn default() -> Self { + Self { path: Path::new("SNAPSHOT").into() } + } +} + +/// Builder for remote-externalities. +pub struct Builder { + /// Custom key-pairs to be injected into the externalities. + inject: Vec, + /// Storage entry key prefixes to be injected into the externalities. The *hashed* prefix must + /// be given. + hashed_prefixes: Vec>, + /// Storage entry keys to be injected into the externalities. The *hashed* key must be given. + hashed_keys: Vec>, + /// connectivity mode, online or offline. + mode: Mode, +} + +// NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for +// that. +impl Default for Builder { + fn default() -> Self { + Self { + inject: Default::default(), + mode: Default::default(), + hashed_prefixes: Default::default(), + hashed_keys: Default::default(), + } + } +} + +// Mode methods +impl Builder { + fn as_online(&self) -> &OnlineConfig { + match &self.mode { + Mode::Online(config) => &config, + _ => panic!("Unexpected mode: Online"), + } + } + + fn as_online_mut(&mut self) -> &mut OnlineConfig { + match &mut self.mode { + Mode::Online(config) => config, + _ => panic!("Unexpected mode: Online"), + } + } +} + +// RPC methods +impl Builder { + async fn rpc_get_storage( + &self, + key: StorageKey, + maybe_at: Option, + ) -> Result { + trace!(target: LOG_TARGET, "rpc: get_storage"); + RpcApi::::get_storage(self.as_online().rpc_client(), key, maybe_at) + .await + .map_err(|e| { + error!("Error = {:?}", e); + "rpc get_storage failed." + }) + } + /// Get the latest finalized head. + async fn rpc_get_head(&self) -> Result { + trace!(target: LOG_TARGET, "rpc: finalized_head"); + RpcApi::::finalized_head(self.as_online().rpc_client()).await.map_err(|e| { + error!("Error = {:?}", e); + "rpc finalized_head failed." + }) + } + + /// Get all the keys at `prefix` at `hash` using the paged, safe RPC methods. + async fn get_keys_paged( + &self, + prefix: StorageKey, + at: B::Hash, + ) -> Result, &'static str> { + const PAGE: u32 = 512; + let mut last_key: Option = None; + let mut all_keys: Vec = vec![]; + let keys = loop { + let page = RpcApi::::get_keys_paged( + self.as_online().rpc_client(), + Some(prefix.clone()), + PAGE, + last_key.clone(), + Some(at), + ) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc get_keys failed" + })?; + let page_len = page.len(); + all_keys.extend(page); + + if page_len < PAGE as usize { + debug!(target: LOG_TARGET, "last page received: {}", page_len); + break all_keys + } else { + let new_last_key = + all_keys.last().expect("all_keys is populated; has .last(); qed"); + debug!( + target: LOG_TARGET, + "new total = {}, full page received: {:?}", + all_keys.len(), + HexDisplay::from(new_last_key) + ); + last_key = Some(new_last_key.clone()); + } + }; + + Ok(keys) + } + + /// Synonym of `rpc_get_pairs_unsafe` that uses paged queries to first get the keys, and then + /// map them to values one by one. + /// + /// This can work with public nodes. But, expect it to be darn slow. + pub(crate) async fn rpc_get_pairs_paged( + &self, + prefix: StorageKey, + at: B::Hash, + ) -> Result, &'static str> { + use jsonrpsee_ws_client::types::traits::Client; + use serde_json::to_value; + let keys = self.get_keys_paged(prefix, at).await?; + let keys_count = keys.len(); + debug!(target: LOG_TARGET, "Querying a total of {} keys", keys.len()); + + let mut key_values: Vec = vec![]; + let client = self.as_online().rpc_client(); + for chunk_keys in keys.chunks(BATCH_SIZE) { + let batch = chunk_keys + .iter() + .cloned() + .map(|key| { + ( + "state_getStorage", + JsonRpcParams::Array(vec![ + to_value(key).expect("json serialization will work; qed."), + to_value(at).expect("json serialization will work; qed."), + ]), + ) + }) + .collect::>(); + let values = client.batch_request::>(batch).await.map_err(|e| { + log::error!( + target: LOG_TARGET, + "failed to execute batch: {:?}. Error: {:?}", + chunk_keys, + e + ); + "batch failed." + })?; + assert_eq!(chunk_keys.len(), values.len()); + for (idx, key) in chunk_keys.into_iter().enumerate() { + let maybe_value = values[idx].clone(); + let value = maybe_value.unwrap_or_else(|| { + log::warn!(target: LOG_TARGET, "key {:?} had none corresponding value.", &key); + StorageData(vec![]) + }); + key_values.push((key.clone(), value)); + if key_values.len() % (10 * BATCH_SIZE) == 0 { + let ratio: f64 = key_values.len() as f64 / keys_count as f64; + debug!( + target: LOG_TARGET, + "progress = {:.2} [{} / {}]", + ratio, + key_values.len(), + keys_count, + ); + } + } + } + + Ok(key_values) + } +} + +// Internal methods +impl Builder { + /// Save the given data as state snapshot. + fn save_state_snapshot(&self, data: &[KeyPair], path: &Path) -> Result<(), &'static str> { + debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); + fs::write(path, data.encode()).map_err(|_| "fs::write failed.")?; + Ok(()) + } + + /// initialize `Self` from state snapshot. Panics if the file does not exist. + fn load_state_snapshot(&self, path: &Path) -> Result, &'static str> { + info!(target: LOG_TARGET, "scraping key-pairs from state snapshot {:?}", path); + let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; + Decode::decode(&mut &*bytes).map_err(|_| "decode failed") + } + + /// Build `Self` from a network node denoted by `uri`. + async fn load_remote(&self) -> Result, &'static str> { + let config = self.as_online(); + let at = self + .as_online() + .at + .expect("online config must be initialized by this point; qed.") + .clone(); + info!(target: LOG_TARGET, "scraping key-pairs from remote @ {:?}", at); + + let mut keys_and_values = if config.modules.len() > 0 { + let mut filtered_kv = vec![]; + for f in config.modules.iter() { + let hashed_prefix = StorageKey(twox_128(f.as_bytes()).to_vec()); + let module_kv = self.rpc_get_pairs_paged(hashed_prefix.clone(), at).await?; + info!( + target: LOG_TARGET, + "downloaded data for module {} (count: {} / prefix: {:?}).", + f, + module_kv.len(), + HexDisplay::from(&hashed_prefix), + ); + filtered_kv.extend(module_kv); + } + filtered_kv + } else { + info!(target: LOG_TARGET, "downloading data for all modules."); + self.rpc_get_pairs_paged(StorageKey(vec![]), at).await? + }; + + for prefix in &self.hashed_prefixes { + debug!( + target: LOG_TARGET, + "adding data for hashed prefix: {:?}", + HexDisplay::from(prefix) + ); + let additional_key_values = + self.rpc_get_pairs_paged(StorageKey(prefix.to_vec()), at).await?; + keys_and_values.extend(additional_key_values); + } + + for key in &self.hashed_keys { + let key = StorageKey(key.to_vec()); + debug!(target: LOG_TARGET, "adding data for hashed key: {:?}", HexDisplay::from(&key)); + let value = self.rpc_get_storage(key.clone(), Some(at)).await?; + keys_and_values.push((key, value)); + } + + Ok(keys_and_values) + } + + pub(crate) async fn init_remote_client(&mut self) -> Result<(), &'static str> { + let mut online = self.as_online_mut(); + debug!(target: LOG_TARGET, "initializing remote client to {:?}", online.transport.uri); + + // First, initialize the ws client. + let ws_client = WsClientBuilder::default() + .max_request_body_size(u32::MAX) + .build(&online.transport.uri) + .await + .map_err(|_| "failed to build ws client")?; + online.transport.client = Some(ws_client); + + // Then, if `at` is not set, set it. + if self.as_online().at.is_none() { + let at = self.rpc_get_head().await?; + self.as_online_mut().at = Some(at); + } + + Ok(()) + } + + pub(crate) async fn pre_build(mut self) -> Result, &'static str> { + let mut base_kv = match self.mode.clone() { + Mode::Offline(config) => self.load_state_snapshot(&config.state_snapshot.path)?, + Mode::Online(config) => { + self.init_remote_client().await?; + let kp = self.load_remote().await?; + if let Some(c) = config.state_snapshot { + self.save_state_snapshot(&kp, &c.path)?; + } + kp + }, + }; + + debug!( + target: LOG_TARGET, + "extending externalities with {} manually injected key-values", + self.inject.len() + ); + base_kv.extend(self.inject.clone()); + Ok(base_kv) + } +} + +// Public methods +impl Builder { + /// Create a new builder. + pub fn new() -> Self { + Default::default() + } + + /// Inject a manual list of key and values to the storage. + pub fn inject_key_value(mut self, injections: &[KeyPair]) -> Self { + for i in injections { + self.inject.push(i.clone()); + } + self + } + + /// Inject a hashed prefix. This is treated as-is, and should be pre-hashed. + /// + /// This should be used to inject a "PREFIX", like a storage (double) map. + pub fn inject_hashed_prefix(mut self, hashed: &[u8]) -> Self { + self.hashed_prefixes.push(hashed.to_vec()); + self + } + + /// Inject a hashed key to scrape. This is treated as-is, and should be pre-hashed. + /// + /// This should be used to inject a "KEY", like a storage value. + pub fn inject_hashed_key(mut self, hashed: &[u8]) -> Self { + self.hashed_keys.push(hashed.to_vec()); + self + } + + /// Configure a state snapshot to be used. + pub fn mode(mut self, mode: Mode) -> Self { + self.mode = mode; + self + } + + /// Build the test externalities. + pub async fn build(self) -> Result { + let kv = self.pre_build().await?; + let mut ext = TestExternalities::new_empty(); + + debug!(target: LOG_TARGET, "injecting a total of {} keys", kv.len()); + for (k, v) in kv { + let (k, v) = (k.0, v.0); + // Insert the key,value pair into the test trie backend + ext.insert(k, v); + } + + Ok(ext) + } +} + +#[cfg(test)] +mod test_prelude { + pub(crate) use super::*; + pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; + + pub(crate) type Block = RawBlock>; + + pub(crate) fn init_logger() { + let _ = env_logger::Builder::from_default_env() + .format_module_path(true) + .format_level(true) + .filter_module(LOG_TARGET, log::LevelFilter::Debug) + .try_init(); + } +} + +#[cfg(test)] +mod tests { + use super::test_prelude::*; + + #[tokio::test] + async fn can_load_state_snapshot() { + init_logger(); + Builder::::new() + .mode(Mode::Offline(OfflineConfig { + state_snapshot: SnapshotConfig::new("test_data/proxy_test"), + })) + .build() + .await + .expect("Can't read state snapshot file") + .execute_with(|| {}); + } +} + +#[cfg(all(test, feature = "remote-test"))] +mod remote_tests { + use super::test_prelude::*; + + #[tokio::test] + async fn can_build_one_pallet() { + init_logger(); + Builder::::new() + .mode(Mode::Online(OnlineConfig { + modules: vec!["System".to_owned()], + ..Default::default() + })) + .build() + .await + .expect("Can't reach the remote node. Is it running?") + .execute_with(|| {}); + } + + #[tokio::test] + async fn can_build_few_pallet() { + init_logger(); + Builder::::new() + .mode(Mode::Online(OnlineConfig { + modules: vec![ + "Proxy".to_owned(), + "Multisig".to_owned(), + "PhragmenElection".to_owned(), + ], + ..Default::default() + })) + .build() + .await + .expect("Can't reach the remote node. Is it running?") + .execute_with(|| {}); + } + + #[tokio::test] + async fn sanity_check_decoding() { + use pallet_elections_phragmen::SeatHolder; + use sp_core::crypto::Ss58Codec; + type AccountId = sp_runtime::AccountId32; + type Balance = u128; + frame_support::generate_storage_alias!( + PhragmenElection, + Members => + Value>> + ); + + init_logger(); + Builder::::new() + .mode(Mode::Online(OnlineConfig { + modules: vec!["PhragmenElection".to_owned()], + ..Default::default() + })) + .build() + .await + .expect("Can't reach the remote node. Is it running?") + .execute_with(|| { + // Gav's polkadot account. 99% this will be in the council. + let gav_polkadot = + AccountId::from_ss58check("13RDY9nrJpyTDBSUdBw12dGwhk19sGwsrVZ2bxkzYHBSagP2") + .unwrap(); + let members = Members::get().unwrap(); + assert!(members + .iter() + .map(|s| s.who.clone()) + .find(|a| a == &gav_polkadot) + .is_some()); + }); + } + + #[tokio::test] + async fn can_create_state_snapshot() { + init_logger(); + Builder::::new() + .mode(Mode::Online(OnlineConfig { + state_snapshot: Some(SnapshotConfig::new("test_snapshot_to_remove.bin")), + modules: vec!["Balances".to_owned()], + ..Default::default() + })) + .build() + .await + .expect("Can't reach the remote node. Is it running?") + .execute_with(|| {}); + + let to_delete = std::fs::read_dir(SnapshotConfig::default().path) + .unwrap() + .into_iter() + .map(|d| d.unwrap()) + .filter(|p| p.path().extension().unwrap_or_default() == "bin") + .collect::>(); + + assert!(to_delete.len() > 0); + + for d in to_delete { + std::fs::remove_file(d.path()).unwrap(); + } + } + + #[tokio::test] + async fn can_fetch_all() { + init_logger(); + Builder::::new() + .build() + .await + .expect("Can't reach the remote node. Is it running?") + .execute_with(|| {}); + } +} diff --git a/utils/frame/remote-externalities/src/rpc_api.rs b/utils/frame/remote-externalities/src/rpc_api.rs new file mode 100644 index 0000000000000..24050856a96a1 --- /dev/null +++ b/utils/frame/remote-externalities/src/rpc_api.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! WS RPC API for one off RPC calls to a substrate node. +// TODO: Consolidate one off RPC calls https://github.com/paritytech/substrate/issues/8988 + +use jsonrpsee_ws_client::{ + types::{traits::Client, v2::params::JsonRpcParams}, + WsClient, WsClientBuilder, +}; +use sp_runtime::{ + generic::SignedBlock, + traits::{Block as BlockT, Header as HeaderT}, +}; + +/// Get the header of the block identified by `at` +pub async fn get_header(from: S, at: Block::Hash) -> Result +where + Block: BlockT, + Block::Header: serde::de::DeserializeOwned, + S: AsRef, +{ + let params = vec![hash_to_json::(at)?]; + let client = build_client(from).await?; + + client + .request::("chain_getHeader", JsonRpcParams::Array(params)) + .await + .map_err(|e| format!("chain_getHeader request failed: {:?}", e)) +} + +/// Get the finalized head +pub async fn get_finalized_head(from: S) -> Result +where + Block: BlockT, + S: AsRef, +{ + let client = build_client(from).await?; + + client + .request::("chain_getFinalizedHead", JsonRpcParams::NoParams) + .await + .map_err(|e| format!("chain_getFinalizedHead request failed: {:?}", e)) +} + +/// Get the signed block identified by `at`. +pub async fn get_block(from: S, at: Block::Hash) -> Result +where + S: AsRef, + Block: BlockT + serde::de::DeserializeOwned, + Block::Header: HeaderT, +{ + let params = vec![hash_to_json::(at)?]; + let client = build_client(from).await?; + let signed_block = client + .request::>("chain_getBlock", JsonRpcParams::Array(params)) + .await + .map_err(|e| format!("chain_getBlock request failed: {:?}", e))?; + + Ok(signed_block.block) +} + +/// Convert a block hash to a serde json value. +fn hash_to_json(hash: Block::Hash) -> Result { + serde_json::to_value(hash) + .map_err(|e| format!("Block hash could not be converted to JSON: {:?}", e)) +} + +/// Build a website client that connects to `from`. +async fn build_client>(from: S) -> Result { + WsClientBuilder::default() + .max_request_body_size(u32::MAX) + .build(from.as_ref()) + .await + .map_err(|e| format!("`WsClientBuilder` failed to build: {:?}", e)) +} + +/// Get the runtime version of a given chain. +pub async fn get_runtime_version( + from: S, + at: Option, +) -> Result +where + S: AsRef, + Block: BlockT + serde::de::DeserializeOwned, + Block::Header: HeaderT, +{ + let params = if let Some(at) = at { vec![hash_to_json::(at)?] } else { vec![] }; + let client = build_client(from).await?; + client + .request::( + "state_getRuntimeVersion", + JsonRpcParams::Array(params), + ) + .await + .map_err(|e| format!("state_getRuntimeVersion request failed: {:?}", e)) +} diff --git a/utils/frame/remote-externalities/test_data/proxy_test b/utils/frame/remote-externalities/test_data/proxy_test new file mode 100644 index 0000000000000..548ce9cdba4f1 Binary files /dev/null and b/utils/frame/remote-externalities/test_data/proxy_test differ diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 2541ed0cf655f..a94f18d0e8925 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "substrate-frame-rpc-support" -version = "2.0.0" -authors = ["Parity Technologies ", "Andrew Dirksen "] +version = "3.0.0" +authors = [ + "Parity Technologies ", + "Andrew Dirksen ", +] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" @@ -12,15 +15,15 @@ description = "Substrate RPC for FRAME's support" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = { version = "0.3.0", features = ["compat"] } -jsonrpc-client-transports = { version = "15.0.0", default-features = false, features = ["http"] } -jsonrpc-core = "15.0.0" -codec = { package = "parity-scale-codec", version = "1.3.1" } +futures = "0.3.16" +jsonrpc-client-transports = { version = "18.0.0", features = ["http"] } +codec = { package = "parity-scale-codec", version = "2.0.0" } serde = "1" -frame-support = { version = "2.0.0", path = "../../../../frame/support" } -sp-storage = { version = "2.0.0", path = "../../../../primitives/storage" } -sc-rpc-api = { version = "0.8.0", path = "../../../../client/rpc-api" } +frame-support = { version = "4.0.0-dev", path = "../../../../frame/support" } +sp-storage = { version = "4.0.0-dev", path = "../../../../primitives/storage" } +sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } [dev-dependencies] -frame-system = { version = "2.0.0", path = "../../../../frame/system" } -tokio = "0.2" +frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" } +scale-info = "1.0" +tokio = "1.10" diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index dc87d6185209d..1b2453c361d97 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,31 +20,27 @@ #![warn(missing_docs)] +use codec::{DecodeAll, FullCodec, FullEncode}; use core::marker::PhantomData; -use futures::compat::Future01CompatExt; +use frame_support::storage::generator::{StorageDoubleMap, StorageMap, StorageValue}; use jsonrpc_client_transports::RpcError; -use codec::{DecodeAll, FullCodec, FullEncode}; +use sc_rpc_api::state::StateClient; use serde::{de::DeserializeOwned, Serialize}; -use frame_support::storage::generator::{ - StorageDoubleMap, StorageMap, StorageValue -}; use sp_storage::{StorageData, StorageKey}; -use sc_rpc_api::state::StateClient; /// A typed query on chain state usable from an RPC client. /// /// ```no_run -/// # use futures::compat::Future01CompatExt; /// # use jsonrpc_client_transports::RpcError; /// # use jsonrpc_client_transports::transports::http; /// # use codec::Encode; /// # use frame_support::{decl_storage, decl_module}; /// # use substrate_frame_rpc_support::StorageQuery; -/// # use frame_system::Trait; +/// # use frame_system::Config; /// # use sc_rpc_api::state::StateClient; /// # -/// # // Hash would normally be ::Hash, but we don't have -/// # // frame_system::Trait implemented for TestRuntime. Here we just pretend. +/// # // Hash would normally be ::Hash, but we don't have +/// # // frame_system::Config implemented for TestRuntime. Here we just pretend. /// # type Hash = (); /// # /// # fn main() -> Result<(), RpcError> { @@ -54,7 +50,7 @@ use sc_rpc_api::state::StateClient; /// # struct TestRuntime; /// # /// # decl_module! { -/// # pub struct Module for enum Call where origin: T::Origin {} +/// # pub struct Module for enum Call where origin: T::Origin {} /// # } /// # /// pub type Loc = (i64, i64, i64); @@ -62,7 +58,7 @@ use sc_rpc_api::state::StateClient; /// /// // Note that all fields are marked pub. /// decl_storage! { -/// trait Store for Module as TestRuntime { +/// trait Store for Module as TestRuntime { /// pub LastActionId: u64; /// pub Voxels: map hasher(blake2_128_concat) Loc => Block; /// pub Actions: map hasher(blake2_128_concat) u64 => Loc; @@ -71,7 +67,7 @@ use sc_rpc_api::state::StateClient; /// } /// /// # async fn test() -> Result<(), RpcError> { -/// let conn = http::connect("http://[::1]:9933").compat().await?; +/// let conn = http::connect("http://[::1]:9933").await?; /// let cl = StateClient::::new(conn); /// /// let q = StorageQuery::value::(); @@ -98,18 +94,12 @@ pub struct StorageQuery { impl StorageQuery { /// Create a storage query for a StorageValue. pub fn value>() -> Self { - Self { - key: StorageKey(St::storage_value_final_key().to_vec()), - _spook: PhantomData, - } + Self { key: StorageKey(St::storage_value_final_key().to_vec()), _spook: PhantomData } } /// Create a storage query for a value in a StorageMap. pub fn map, K: FullEncode>(key: K) -> Self { - Self { - key: StorageKey(St::storage_map_final_key(key)), - _spook: PhantomData, - } + Self { key: StorageKey(St::storage_map_final_key(key)), _spook: PhantomData } } /// Create a storage query for a value in a StorageDoubleMap. @@ -117,15 +107,12 @@ impl StorageQuery { key1: K1, key2: K2, ) -> Self { - Self { - key: StorageKey(St::storage_double_map_final_key(key1, key2)), - _spook: PhantomData, - } + Self { key: StorageKey(St::storage_double_map_final_key(key1, key2)), _spook: PhantomData } } /// Send this query over RPC, await the typed result. /// - /// Hash should be ::Hash. + /// Hash should be ::Hash. /// /// # Arguments /// @@ -138,9 +125,9 @@ impl StorageQuery { state_client: &StateClient, block_index: Option, ) -> Result, RpcError> { - let opt: Option = state_client.storage(self.key, block_index).compat().await?; + let opt: Option = state_client.storage(self.key, block_index).await?; opt.map(|encoded| V::decode_all(&encoded.0)) .transpose() - .map_err(|decode_err| RpcError::Other(decode_err.into())) + .map_err(|decode_err| RpcError::Other(Box::new(decode_err))) } } diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 515ff93251522..e9ae506ef6b06 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-system" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,24 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-client-api = { version = "2.0.0", path = "../../../../client/api" } -codec = { package = "parity-scale-codec", version = "1.3.1" } -futures = { version = "0.3.4", features = ["compat"] } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" +sc-client-api = { version = "4.0.0-dev", path = "../../../../client/api" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +futures = "0.3.16" +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" log = "0.4.8" -serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0", path = "../../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../../primitives/api" } -frame-system-rpc-runtime-api = { version = "2.0.0", path = "../../../../frame/system/rpc/runtime-api" } -sp-core = { version = "2.0.0", path = "../../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" } -sp-transaction-pool = { version = "2.0.0", path = "../../../../primitives/transaction-pool" } -sp-block-builder = { version = "2.0.0", path = "../../../../primitives/block-builder" } -sc-rpc-api = { version = "0.8.0", path = "../../../../client/rpc-api" } +sp-runtime = { version = "4.0.0-dev", path = "../../../../primitives/runtime" } +sp-api = { version = "4.0.0-dev", path = "../../../../primitives/api" } +frame-system-rpc-runtime-api = { version = "4.0.0-dev", path = "../../../../frame/system/rpc/runtime-api" } +sp-core = { version = "4.0.0-dev", path = "../../../../primitives/core" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../../primitives/blockchain" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../../client/transaction-pool/api" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../../primitives/block-builder" } +sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } -sp-tracing = { version = "2.0.0", path = "../../../../primitives/tracing" } -sc-transaction-pool = { version = "2.0.0", path = "../../../../client/transaction-pool" } +sp-tracing = { version = "4.0.0-dev", path = "../../../../primitives/tracing" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../../../../client/transaction-pool" } diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index cefe39534a167..f0f37f0b20675 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,32 +19,23 @@ use std::sync::Arc; -use codec::{self, Codec, Decode, Encode}; -use sc_client_api::light::{future_header, RemoteBlockchain, Fetcher, RemoteCallRequest}; -use jsonrpc_core::{ - Error as RpcError, ErrorCode, - futures::future::{self as rpc_future,result, Future}, -}; +use codec::{Codec, Decode, Encode}; +use futures::{future::ready, FutureExt, TryFutureExt}; +use jsonrpc_core::{Error as RpcError, ErrorCode}; use jsonrpc_derive::rpc; -use futures::future::{ready, TryFutureExt}; -use sp_blockchain::{ - HeaderBackend, - Error as ClientError -}; -use sp_runtime::{ - generic::BlockId, - traits, -}; -use sp_core::{hexdisplay::HexDisplay, Bytes}; -use sp_transaction_pool::{TransactionPool, InPoolTransaction}; -use sp_block_builder::BlockBuilder; +use sc_client_api::light::{future_header, Fetcher, RemoteBlockchain, RemoteCallRequest}; use sc_rpc_api::DenyUnsafe; +use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; +use sp_block_builder::BlockBuilder; +use sp_blockchain::{Error as ClientError, HeaderBackend}; +use sp_core::{hexdisplay::HexDisplay, Bytes}; +use sp_runtime::{generic::BlockId, traits}; -pub use frame_system_rpc_runtime_api::AccountNonceApi; pub use self::gen_client::Client as SystemClient; +pub use frame_system_rpc_runtime_api::AccountNonceApi; /// Future that resolves to account nonce. -pub type FutureResult = Box + Send>; +type FutureResult = jsonrpc_core::BoxFuture>; /// System RPC methods. #[rpc] @@ -89,13 +80,8 @@ pub struct FullSystem { impl FullSystem { /// Create new `FullSystem` given client and transaction pool. - pub fn new(client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe,) -> Self { - FullSystem { - client, - pool, - deny_unsafe, - _marker: Default::default(), - } + pub fn new(client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe) -> Self { + FullSystem { client, pool, deny_unsafe, _marker: Default::default() } } } @@ -127,39 +113,44 @@ where Ok(adjust_nonce(&*self.pool, account, nonce)) }; - Box::new(result(get_nonce())) + let res = get_nonce(); + async move { res }.boxed() } - fn dry_run(&self, extrinsic: Bytes, at: Option<::Hash>) -> FutureResult { + fn dry_run( + &self, + extrinsic: Bytes, + at: Option<::Hash>, + ) -> FutureResult { if let Err(err) = self.deny_unsafe.check_if_safe() { - return Box::new(rpc_future::err(err.into())); + return async move { Err(err.into()) }.boxed() } let dry_run = || { let api = self.client.runtime_api(); let at = BlockId::::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. - self.client.info().best_hash - )); - - let uxt: ::Extrinsic = Decode::decode(&mut &*extrinsic).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::DecodeError.into()), - message: "Unable to dry run extrinsic.".into(), - data: Some(format!("{:?}", e).into()), - })?; + self.client.info().best_hash)); - let result = api.apply_extrinsic(&at, uxt) + let uxt: ::Extrinsic = Decode::decode(&mut &*extrinsic) .map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), + code: ErrorCode::ServerError(Error::DecodeError.into()), message: "Unable to dry run extrinsic.".into(), data: Some(format!("{:?}", e).into()), })?; + let result = api.apply_extrinsic(&at, uxt).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), + message: "Unable to dry run extrinsic.".into(), + data: Some(format!("{:?}", e).into()), + })?; + Ok(Encode::encode(&result).into()) }; + let res = dry_run(); - Box::new(result(dry_run())) + async move { res }.boxed() } } @@ -179,12 +170,7 @@ impl LightSystem { fetcher: Arc, pool: Arc

, ) -> Self { - LightSystem { - client, - remote_blockchain, - fetcher, - pool, - } + LightSystem { client, remote_blockchain, fetcher, pool } } } @@ -205,14 +191,14 @@ where let future_best_header = future_header(&*self.remote_blockchain, &*self.fetcher, best_id); let fetcher = self.fetcher.clone(); let call_data = account.encode(); - let future_best_header = future_best_header - .and_then(move |maybe_best_header| ready( - match maybe_best_header { - Some(best_header) => Ok(best_header), - None => Err(ClientError::UnknownBlock(format!("{}", best_hash))), - } - )); - let future_nonce = future_best_header.and_then(move |best_header| + let future_best_header = future_best_header.and_then(move |maybe_best_header| { + ready( + maybe_best_header + .ok_or_else(|| ClientError::UnknownBlock(format!("{}", best_hash))), + ) + }); + + let future_nonce = future_best_header.and_then(move |best_header| { fetcher.remote_call(RemoteCallRequest { block: best_hash, header: best_header, @@ -220,9 +206,12 @@ where call_data, retry_count: None, }) - ).compat(); - let future_nonce = future_nonce.and_then(|nonce| Decode::decode(&mut &nonce[..]) - .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e))); + }); + + let future_nonce = future_nonce.and_then(|nonce| async move { + Index::decode(&mut &nonce[..]) + .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e)) + }); let future_nonce = future_nonce.map_err(|e| RpcError { code: ErrorCode::ServerError(Error::RuntimeError.into()), message: "Unable to query nonce.".into(), @@ -230,27 +219,29 @@ where }); let pool = self.pool.clone(); - let future_nonce = future_nonce.map(move |nonce| adjust_nonce(&*pool, account, nonce)); - - Box::new(future_nonce) + future_nonce.map_ok(move |nonce| adjust_nonce(&*pool, account, nonce)).boxed() } - fn dry_run(&self, _extrinsic: Bytes, _at: Option<::Hash>) -> FutureResult { - Box::new(result(Err(RpcError { - code: ErrorCode::MethodNotFound, - message: "Unable to dry run extrinsic.".into(), - data: None, - }))) + fn dry_run( + &self, + _extrinsic: Bytes, + _at: Option<::Hash>, + ) -> FutureResult { + async { + Err(RpcError { + code: ErrorCode::MethodNotFound, + message: "Unable to dry run extrinsic.".into(), + data: None, + }) + } + .boxed() } } /// Adjust account nonce from state, so that tx with the nonce will be /// placed after all ready txpool transactions. -fn adjust_nonce( - pool: &P, - account: AccountId, - nonce: Index, -) -> Index where +fn adjust_nonce(pool: &P, account: AccountId, nonce: Index) -> Index +where P: TransactionPool, AccountId: Clone + std::fmt::Display + Encode, Index: Clone + std::fmt::Display + Encode + traits::AtLeast32Bit + 'static, @@ -288,9 +279,12 @@ mod tests { use super::*; use futures::executor::block_on; - use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; use sc_transaction_pool::BasicPool; - use sp_runtime::{ApplyExtrinsicResult, transaction_validity::{TransactionValidityError, InvalidTransaction}}; + use sp_runtime::{ + transaction_validity::{InvalidTransaction, TransactionValidityError}, + ApplyExtrinsicResult, + }; + use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; #[test] fn should_return_next_nonce_for_some_account() { @@ -299,12 +293,8 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let source = sp_runtime::transaction_validity::TransactionSource::External; let new_transaction = |nonce: u64| { @@ -328,7 +318,7 @@ mod tests { let nonce = accounts.nonce(AccountKeyring::Alice.into()); // then - assert_eq!(nonce.wait().unwrap(), 2); + assert_eq!(block_on(nonce).unwrap(), 2); } #[test] @@ -338,12 +328,8 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); @@ -351,7 +337,7 @@ mod tests { let res = accounts.dry_run(vec![].into(), None); // then - assert_eq!(res.wait(), Err(RpcError::method_not_found())); + assert_eq!(block_on(res), Err(RpcError::method_not_found())); } #[test] @@ -361,12 +347,8 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let accounts = FullSystem::new(client, pool, DenyUnsafe::No); @@ -375,13 +357,14 @@ mod tests { to: AccountKeyring::Bob.into(), amount: 5, nonce: 0, - }.into_signed_tx(); + } + .into_signed_tx(); // when let res = accounts.dry_run(tx.encode().into(), None); // then - let bytes = res.wait().unwrap().0; + let bytes = block_on(res).unwrap().0; let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_slice()).unwrap(); assert_eq!(apply_res, Ok(Ok(()))); } @@ -393,12 +376,8 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let accounts = FullSystem::new(client, pool, DenyUnsafe::No); @@ -407,13 +386,14 @@ mod tests { to: AccountKeyring::Bob.into(), amount: 5, nonce: 100, - }.into_signed_tx(); + } + .into_signed_tx(); // when let res = accounts.dry_run(tx.encode().into(), None); // then - let bytes = res.wait().unwrap().0; + let bytes = block_on(res).unwrap().0; let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_slice()).unwrap(); assert_eq!(apply_res, Err(TransactionValidityError::Invalid(InvalidTransaction::Stale))); } diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml new file mode 100644 index 0000000000000..5cc5ae6ee58bb --- /dev/null +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "try-runtime-cli" +version = "0.10.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Cli command runtime testing and dry-running" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +log = "0.4.8" +parity-scale-codec = { version = "2.0.0" } +serde = "1.0.126" +structopt = "0.3.8" + +sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../../client/service" } +sc-cli = { version = "0.10.0-dev", path = "../../../../client/cli" } +sc-executor = { version = "0.10.0-dev", path = "../../../../client/executor" } +sc-chain-spec = { version = "4.0.0-dev", path = "../../../../client/chain-spec" } +sp-state-machine = { version = "0.10.0-dev", path = "../../../../primitives/state-machine" } +sp-runtime = { version = "4.0.0-dev", path = "../../../../primitives/runtime" } +sp-core = { version = "4.0.0-dev", path = "../../../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../../../primitives/keystore" } + +remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities" } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs new file mode 100644 index 0000000000000..c92c3959535e9 --- /dev/null +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -0,0 +1,521 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! `Structopt`-ready structs for `try-runtime`. + +use parity_scale_codec::{Decode, Encode}; +use remote_externalities::{rpc_api, Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig}; +use sc_chain_spec::ChainSpec; +use sc_cli::{CliConfiguration, ExecutionStrategy, WasmExecutionMethod}; +use sc_executor::NativeElseWasmExecutor; +use sc_service::{Configuration, NativeExecutionDispatch}; +use sp_core::{ + hashing::twox_128, + offchain::{ + testing::{TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, + }, + storage::{well_known_keys, StorageData, StorageKey}, +}; +use sp_keystore::{testing::KeyStore, KeystoreExt}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_state_machine::StateMachine; +use std::{fmt::Debug, path::PathBuf, str::FromStr, sync::Arc}; + +mod parse; + +/// Possible subcommands of `try-runtime`. +#[derive(Debug, Clone, structopt::StructOpt)] +pub enum Command { + /// Execute "TryRuntime_on_runtime_upgrade" against the given runtime state. + OnRuntimeUpgrade(OnRuntimeUpgradeCmd), + /// Execute "OffchainWorkerApi_offchain_worker" against the given runtime state. + OffchainWorker(OffchainWorkerCmd), + /// Execute "Core_execute_block" using the given block and the runtime state of the parent + /// block. + ExecuteBlock(ExecuteBlockCmd), +} + +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct OnRuntimeUpgradeCmd { + #[structopt(subcommand)] + pub state: State, +} + +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct OffchainWorkerCmd { + #[structopt(subcommand)] + pub state: State, +} + +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct ExecuteBlockCmd { + #[structopt(subcommand)] + pub state: State, +} + +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct SharedParams { + /// The shared parameters + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: sc_cli::SharedParams, + + /// The execution strategy that should be used for benchmarks + #[structopt( + long = "execution", + value_name = "STRATEGY", + possible_values = &ExecutionStrategy::variants(), + case_insensitive = true, + default_value = "Wasm", + )] + pub execution: ExecutionStrategy, + + /// Method for executing Wasm runtime code. + #[structopt( + long = "wasm-execution", + value_name = "METHOD", + possible_values = &WasmExecutionMethod::variants(), + case_insensitive = true, + default_value = "Compiled" + )] + pub wasm_method: WasmExecutionMethod, + + /// The number of 64KB pages to allocate for Wasm execution. Defaults to + /// sc_service::Configuration.default_heap_pages. + #[structopt(long)] + pub heap_pages: Option, + + /// The block hash at which to read state. This is required for execute-block, offchain-worker, + /// or any command that used the live subcommand. + #[structopt( + short, + long, + multiple = false, + parse(try_from_str = parse::hash), + required_ifs( + &[("command", "offchain-worker"), ("command", "execute-block"), ("subcommand", "live")] + ) + )] + block_at: String, + + /// Whether or not to overwrite the code from state with the code from + /// the specified chain spec. + #[structopt(long)] + pub overwrite_code: bool, + + /// The url to connect to. + // TODO having this a shared parm is a temporary hack; the url is used just + // to get the header/block. We should try and get that out of state, OR allow + // the user to feed in a header/block via file. + // https://github.com/paritytech/substrate/issues/9027 + #[structopt(short, long, default_value = "ws://localhost:9944", parse(try_from_str = parse::url))] + url: String, +} + +impl SharedParams { + /// Get the configured value of `block_at`, interpreted as the hash type of `Block`. + pub fn block_at(&self) -> sc_cli::Result + where + Block: BlockT, + ::Hash: FromStr, + <::Hash as FromStr>::Err: Debug, + { + self.block_at + .parse::<::Hash>() + .map_err(|e| format!("Could not parse block hash: {:?}", e).into()) + } +} + +/// Various commands to try out against runtime state at a specific block. +#[derive(Debug, Clone, structopt::StructOpt)] +pub struct TryRuntimeCmd { + #[structopt(flatten)] + pub shared: SharedParams, + + #[structopt(subcommand)] + pub command: Command, +} + +/// The source of runtime state to try operations against. +#[derive(Debug, Clone, structopt::StructOpt)] +pub enum State { + /// Use a state snapshot as the source of runtime state. NOTE: for the offchain-worker and + /// execute-block command this is only partially supported and requires a archive node url. + Snap { + #[structopt(short, long)] + snapshot_path: PathBuf, + }, + + /// Use a live chain as the source of runtime state. + Live { + /// An optional state snapshot file to WRITE to. Not written if set to `None`. + #[structopt(short, long)] + snapshot_path: Option, + + /// The modules to scrape. If empty, entire chain state will be scraped. + #[structopt(short, long, require_delimiter = true)] + modules: Option>, + }, +} + +async fn on_runtime_upgrade( + shared: SharedParams, + command: OnRuntimeUpgradeCmd, + config: Configuration, +) -> sc_cli::Result<()> +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Hash: FromStr, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let wasm_method = shared.wasm_method; + let execution = shared.execution; + let heap_pages = shared.heap_pages.or(config.default_heap_pages); + + let mut changes = Default::default(); + let max_runtime_instances = config.max_runtime_instances; + let executor = NativeElseWasmExecutor::::new( + wasm_method.into(), + heap_pages, + max_runtime_instances, + ); + + check_spec_name::(shared.url.clone(), config.chain_spec.name().to_string()).await; + + let ext = { + let builder = match command.state { + State::Snap { snapshot_path } => + Builder::::new().mode(Mode::Offline(OfflineConfig { + state_snapshot: SnapshotConfig::new(snapshot_path), + })), + State::Live { snapshot_path, modules } => + Builder::::new().mode(Mode::Online(OnlineConfig { + transport: shared.url.to_owned().into(), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), + modules: modules.to_owned().unwrap_or_default(), + at: Some(shared.block_at::()?), + ..Default::default() + })), + }; + + let (code_key, code) = extract_code(config.chain_spec)?; + builder + .inject_key_value(&[(code_key, code)]) + .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()) + .build() + .await? + }; + + let encoded_result = StateMachine::<_, _, NumberFor, _>::new( + &ext.backend, + None, + &mut changes, + &executor, + "TryRuntime_on_runtime_upgrade", + &[], + ext.extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(execution.into()) + .map_err(|e| format!("failed to execute 'TryRuntime_on_runtime_upgrade': {:?}", e))?; + + let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode output: {:?}", e))?; + log::info!( + "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = {}, total weight = {} ({})", + weight, + total_weight, + weight as f64 / total_weight as f64 + ); + + Ok(()) +} + +async fn offchain_worker( + shared: SharedParams, + command: OffchainWorkerCmd, + config: Configuration, +) -> sc_cli::Result<()> +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Hash: FromStr, + Block::Header: serde::de::DeserializeOwned, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let wasm_method = shared.wasm_method; + let execution = shared.execution; + let heap_pages = shared.heap_pages.or(config.default_heap_pages); + + let mut changes = Default::default(); + let max_runtime_instances = config.max_runtime_instances; + let executor = NativeElseWasmExecutor::::new( + wasm_method.into(), + heap_pages, + max_runtime_instances, + ); + + check_spec_name::(shared.url.clone(), config.chain_spec.name().to_string()).await; + + let mode = match command.state { + State::Live { snapshot_path, modules } => { + let at = shared.block_at::()?; + let online_config = OnlineConfig { + transport: shared.url.to_owned().into(), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), + modules: modules.to_owned().unwrap_or_default(), + at: Some(at), + ..Default::default() + }; + + Mode::Online(online_config) + }, + State::Snap { snapshot_path } => { + let mode = + Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path) }); + + mode + }, + }; + let builder = Builder::::new() + .mode(mode) + .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()); + let mut ext = if shared.overwrite_code { + let (code_key, code) = extract_code(config.chain_spec)?; + builder.inject_key_value(&[(code_key, code)]).build().await? + } else { + builder.inject_hashed_key(well_known_keys::CODE).build().await? + }; + + let (offchain, _offchain_state) = TestOffchainExt::new(); + let (pool, _pool_state) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()))); + ext.register_extension(TransactionPoolExt::new(pool)); + + let header_hash = shared.block_at::()?; + let header = rpc_api::get_header::(shared.url, header_hash).await?; + + let _ = StateMachine::<_, _, NumberFor, _>::new( + &ext.backend, + None, + &mut changes, + &executor, + "OffchainWorkerApi_offchain_worker", + header.encode().as_ref(), + ext.extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(execution.into()) + .map_err(|e| format!("failed to execute 'OffchainWorkerApi_offchain_worker': {:?}", e))?; + + log::info!("OffchainWorkerApi_offchain_worker executed without errors."); + + Ok(()) +} + +async fn execute_block( + shared: SharedParams, + command: ExecuteBlockCmd, + config: Configuration, +) -> sc_cli::Result<()> +where + Block: BlockT + serde::de::DeserializeOwned, + Block::Hash: FromStr, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, +{ + let wasm_method = shared.wasm_method; + let execution = shared.execution; + let heap_pages = shared.heap_pages.or(config.default_heap_pages); + + let mut changes = Default::default(); + let max_runtime_instances = config.max_runtime_instances; + let executor = NativeElseWasmExecutor::::new( + wasm_method.into(), + heap_pages, + max_runtime_instances, + ); + + let block_hash = shared.block_at::()?; + let block: Block = rpc_api::get_block::(shared.url.clone(), block_hash).await?; + + check_spec_name::(shared.url.clone(), config.chain_spec.name().to_string()).await; + + let mode = match command.state { + State::Snap { snapshot_path } => { + let mode = + Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path) }); + + mode + }, + State::Live { snapshot_path, modules } => { + let parent_hash = block.header().parent_hash(); + + let mode = Mode::Online(OnlineConfig { + transport: shared.url.to_owned().into(), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), + modules: modules.to_owned().unwrap_or_default(), + at: Some(parent_hash.to_owned()), + ..Default::default() + }); + + mode + }, + }; + + let ext = { + let builder = Builder::::new() + .mode(mode) + .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()); + let mut ext = if shared.overwrite_code { + let (code_key, code) = extract_code(config.chain_spec)?; + builder.inject_key_value(&[(code_key, code)]).build().await? + } else { + builder.inject_hashed_key(well_known_keys::CODE).build().await? + }; + + // register externality extensions in order to provide host interface for OCW to the + // runtime. + let (offchain, _offchain_state) = TestOffchainExt::new(); + let (pool, _pool_state) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(KeystoreExt(Arc::new(KeyStore::new()))); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext + }; + + // A digest item gets added when the runtime is processing the block, so we need to pop + // the last one to be consistent with what a gossiped block would contain. + let (mut header, extrinsics) = block.deconstruct(); + header.digest_mut().pop(); + let block = Block::new(header, extrinsics); + + let _encoded_result = StateMachine::<_, _, NumberFor, _>::new( + &ext.backend, + None, + &mut changes, + &executor, + "Core_execute_block", + block.encode().as_ref(), + ext.extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(execution.into()) + .map_err(|e| format!("failed to execute 'Core_execute_block': {:?}", e))?; + debug_assert!(_encoded_result == vec![1]); + + log::info!("Core_execute_block executed without errors."); + + Ok(()) +} + +impl TryRuntimeCmd { + pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> + where + Block: BlockT + serde::de::DeserializeOwned, + Block::Header: serde::de::DeserializeOwned, + Block::Hash: FromStr, + ::Err: Debug, + NumberFor: FromStr, + as FromStr>::Err: Debug, + ExecDispatch: NativeExecutionDispatch + 'static, + { + match &self.command { + Command::OnRuntimeUpgrade(ref cmd) => + on_runtime_upgrade::(self.shared.clone(), cmd.clone(), config) + .await, + Command::OffchainWorker(cmd) => + offchain_worker::(self.shared.clone(), cmd.clone(), config) + .await, + Command::ExecuteBlock(cmd) => + execute_block::(self.shared.clone(), cmd.clone(), config).await, + } + } +} + +impl CliConfiguration for TryRuntimeCmd { + fn shared_params(&self) -> &sc_cli::SharedParams { + &self.shared.shared_params + } + + fn chain_id(&self, _is_dev: bool) -> sc_cli::Result { + Ok(match self.shared.shared_params.chain { + Some(ref chain) => chain.clone(), + None => "dev".into(), + }) + } +} + +/// Extract `:code` from the given chain spec and return as `StorageData` along with the +/// corresponding `StorageKey`. +fn extract_code(spec: Box) -> sc_cli::Result<(StorageKey, StorageData)> { + let genesis_storage = spec.build_storage()?; + let code = StorageData( + genesis_storage + .top + .get(well_known_keys::CODE) + .expect("code key must exist in genesis storage; qed") + .to_vec(), + ); + let code_key = StorageKey(well_known_keys::CODE.to_vec()); + + Ok((code_key, code)) +} + +/// Check the spec_name of an `ext` +/// +/// If the version does not exist, or if it does not match with the given, it emits a warning. +async fn check_spec_name( + uri: String, + expected_spec_name: String, +) { + let expected_spec_name = expected_spec_name.to_lowercase(); + match remote_externalities::rpc_api::get_runtime_version::(uri.clone(), None) + .await + .map(|version| String::from(version.spec_name.clone())) + .map(|spec_name| spec_name.to_lowercase()) + { + Ok(spec) if spec == expected_spec_name => { + log::debug!("found matching spec name: {:?}", spec); + }, + Ok(spec) => { + log::warn!( + "version mismatch: remote spec name: '{}', expected (local chain spec, aka. `--chain`): '{}'", + spec, + expected_spec_name, + ); + }, + Err(why) => { + log::error!("failed to fetch runtime version from {}: {:?}", uri, why); + }, + } +} diff --git a/utils/frame/try-runtime/cli/src/parse.rs b/utils/frame/try-runtime/cli/src/parse.rs new file mode 100644 index 0000000000000..7f205fbacd310 --- /dev/null +++ b/utils/frame/try-runtime/cli/src/parse.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Utils for parsing user input + +pub(crate) fn hash(block_hash: &str) -> Result { + let (block_hash, offset) = + if block_hash.starts_with("0x") { (&block_hash[2..], 2) } else { (block_hash, 0) }; + + if let Some(pos) = block_hash.chars().position(|c| !c.is_ascii_hexdigit()) { + Err(format!( + "Expected block hash, found illegal hex character at position: {}", + offset + pos, + )) + } else { + Ok(block_hash.into()) + } +} + +pub(crate) fn url(s: &str) -> Result { + if s.starts_with("ws://") || s.starts_with("wss://") { + // could use Url crate as well, but lets keep it simple for now. + Ok(s.to_string()) + } else { + Err("not a valid WS(S) url: must start with 'ws://' or 'wss://'") + } +} diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 9eed7a2fdcfcd..4a6cec2cac774 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Endpoint to expose Prometheus metrics" name = "substrate-prometheus-endpoint" -version = "0.8.0" +version = "0.9.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" @@ -14,11 +14,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus = { version = "0.10.0", default-features = false } -futures-util = { version = "0.3.1", default-features = false, features = ["io"] } +prometheus = { version = "0.11.0", default-features = false } +futures-util = { version = "0.3.17", default-features = false, features = ["io"] } derive_more = "0.99" - -[target.'cfg(not(target_os = "unknown"))'.dependencies] async-std = { version = "1.6.5", features = ["unstable"] } -hyper = { version = "0.13.1", default-features = false, features = ["stream"] } -tokio = "0.2" +tokio = "1.10" +hyper = { version = "0.14.11", default-features = false, features = ["http1", "server", "tcp"] } + +[dev-dependencies] +hyper = { version = "0.14.11", features = ["client"] } +tokio = { version = "1.10", features = ["rt-multi-thread"] } diff --git a/utils/prometheus/src/lib.rs b/utils/prometheus/src/lib.rs index be7050a8a0736..f81b82cb17646 100644 --- a/utils/prometheus/src/lib.rs +++ b/utils/prometheus/src/lib.rs @@ -1,150 +1,183 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use futures_util::{FutureExt, future::Future}; +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use futures_util::future::Future; +use hyper::{ + http::StatusCode, + server::Server, + service::{make_service_fn, service_fn}, + Body, Request, Response, +}; pub use prometheus::{ self, - Registry, Error as PrometheusError, Opts, - Histogram, HistogramOpts, HistogramVec, - exponential_buckets, core::{ - GenericGauge as Gauge, GenericCounter as Counter, - GenericGaugeVec as GaugeVec, GenericCounterVec as CounterVec, - AtomicF64 as F64, AtomicI64 as I64, AtomicU64 as U64, - } + AtomicF64 as F64, AtomicI64 as I64, AtomicU64 as U64, GenericCounter as Counter, + GenericCounterVec as CounterVec, GenericGauge as Gauge, GenericGaugeVec as GaugeVec, + }, + exponential_buckets, Error as PrometheusError, Histogram, HistogramOpts, HistogramVec, Opts, + Registry, }; -use prometheus::{Encoder, TextEncoder, core::Collector}; +use prometheus::{core::Collector, Encoder, TextEncoder}; use std::net::SocketAddr; -#[cfg(not(target_os = "unknown"))] mod networking; mod sourced; -pub use sourced::{SourcedCounter, SourcedGauge, MetricSource}; - -#[cfg(target_os = "unknown")] -pub use unknown_os::init_prometheus; -#[cfg(not(target_os = "unknown"))] -pub use known_os::init_prometheus; +pub use sourced::{MetricSource, SourcedCounter, SourcedGauge, SourcedMetric}; -pub fn register(metric: T, registry: &Registry) -> Result { +pub fn register( + metric: T, + registry: &Registry, +) -> Result { registry.register(Box::new(metric.clone()))?; Ok(metric) } -// On WASM `init_prometheus` becomes a no-op. -#[cfg(target_os = "unknown")] -mod unknown_os { - use super::*; - - pub enum Error {} +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum Error { + /// Hyper internal error. + Hyper(hyper::Error), + /// Http request error. + Http(hyper::http::Error), + /// i/o error. + Io(std::io::Error), + #[display(fmt = "Prometheus port {} already in use.", _0)] + PortInUse(SocketAddr), +} - pub async fn init_prometheus(_: SocketAddr, _registry: Registry) -> Result<(), Error> { - Ok(()) +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Hyper(error) => Some(error), + Error::Http(error) => Some(error), + Error::Io(error) => Some(error), + Error::PortInUse(_) => None, + } } } -#[cfg(not(target_os = "unknown"))] -mod known_os { - use super::*; - use hyper::http::StatusCode; - use hyper::{Server, Body, Request, Response, service::{service_fn, make_service_fn}}; - - #[derive(Debug, derive_more::Display, derive_more::From)] - pub enum Error { - /// Hyper internal error. - Hyper(hyper::Error), - /// Http request error. - Http(hyper::http::Error), - /// i/o error. - Io(std::io::Error), - #[display(fmt = "Prometheus port {} already in use.", _0)] - PortInUse(SocketAddr) +async fn request_metrics(req: Request, registry: Registry) -> Result, Error> { + if req.uri().path() == "/metrics" { + let metric_families = registry.gather(); + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + encoder.encode(&metric_families, &mut buffer).unwrap(); + + Response::builder() + .status(StatusCode::OK) + .header("Content-Type", encoder.format_type()) + .body(Body::from(buffer)) + .map_err(Error::Http) + } else { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::from("Not found.")) + .map_err(Error::Http) } +} - impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Hyper(error) => Some(error), - Error::Http(error) => Some(error), - Error::Io(error) => Some(error), - Error::PortInUse(_) => None - } - } +#[derive(Clone)] +pub struct Executor; + +impl hyper::rt::Executor for Executor +where + T: Future + Send + 'static, + T::Output: Send + 'static, +{ + fn execute(&self, future: T) { + async_std::task::spawn(future); } +} - async fn request_metrics(req: Request, registry: Registry) -> Result, Error> { - if req.uri().path() == "/metrics" { - let metric_families = registry.gather(); - let mut buffer = vec![]; - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - Response::builder().status(StatusCode::OK) - .header("Content-Type", encoder.format_type()) - .body(Body::from(buffer)) - .map_err(Error::Http) - } else { - Response::builder().status(StatusCode::NOT_FOUND) - .body(Body::from("Not found.")) - .map_err(Error::Http) - } +/// Initializes the metrics context, and starts an HTTP server +/// to serve metrics. +pub async fn init_prometheus(prometheus_addr: SocketAddr, registry: Registry) -> Result<(), Error> { + let listener = async_std::net::TcpListener::bind(&prometheus_addr) + .await + .map_err(|_| Error::PortInUse(prometheus_addr))?; - } + init_prometheus_with_listener(listener, registry).await +} + +/// Init prometheus using the given listener. +async fn init_prometheus_with_listener( + listener: async_std::net::TcpListener, + registry: Registry, +) -> Result<(), Error> { + use networking::Incoming; + + log::info!("〽️ Prometheus exporter started at {}", listener.local_addr()?); - #[derive(Clone)] - pub struct Executor; + let service = make_service_fn(move |_| { + let registry = registry.clone(); - impl hyper::rt::Executor for Executor - where - T: Future + Send + 'static, - T::Output: Send + 'static, - { - fn execute(&self, future: T) { - async_std::task::spawn(future); + async move { + Ok::<_, hyper::Error>(service_fn(move |req: Request| { + request_metrics(req, registry.clone()) + })) } - } + }); - /// Initializes the metrics context, and starts an HTTP server - /// to serve metrics. - pub async fn init_prometheus(prometheus_addr: SocketAddr, registry: Registry) -> Result<(), Error>{ - use networking::Incoming; - let listener = async_std::net::TcpListener::bind(&prometheus_addr) - .await - .map_err(|_| Error::PortInUse(prometheus_addr))?; + let server = Server::builder(Incoming(listener.incoming())).executor(Executor).serve(service); - log::info!("〽️ Prometheus server started at {}", prometheus_addr); + let result = server.await.map_err(Into::into); - let service = make_service_fn(move |_| { - let registry = registry.clone(); + result +} - async move { - Ok::<_, hyper::Error>(service_fn(move |req: Request| { - request_metrics(req, registry.clone()) - })) - } - }); +#[cfg(test)] +mod tests { + use super::*; + use hyper::{Client, Uri}; + use std::convert::TryFrom; - let server = Server::builder(Incoming(listener.incoming())) - .executor(Executor) - .serve(service) - .boxed(); + #[test] + fn prometheus_works() { + const METRIC_NAME: &str = "test_test_metric_name_test_test"; - let result = server.await.map_err(Into::into); + let runtime = tokio::runtime::Runtime::new().expect("Creates the runtime"); - result + let listener = runtime + .block_on(async_std::net::TcpListener::bind("127.0.0.1:0")) + .expect("Creates listener"); + + let local_addr = listener.local_addr().expect("Returns the local addr"); + + let registry = Registry::default(); + register( + prometheus::Counter::new(METRIC_NAME, "yeah").expect("Creates test counter"), + ®istry, + ) + .expect("Registers the test metric"); + + runtime.spawn(init_prometheus_with_listener(listener, registry)); + + runtime.block_on(async { + let client = Client::new(); + + let res = client + .get(Uri::try_from(&format!("http://{}/metrics", local_addr)).expect("Parses URI")) + .await + .expect("Requests metrics"); + + let buf = hyper::body::to_bytes(res).await.expect("Converts body to bytes"); + + let body = String::from_utf8(buf.to_vec()).expect("Converts body to String"); + assert!(body.contains(&format!("{} 0", METRIC_NAME))); + }); } } diff --git a/utils/prometheus/src/networking.rs b/utils/prometheus/src/networking.rs index 92b9fedf6c79a..a24216bd23629 100644 --- a/utils/prometheus/src/networking.rs +++ b/utils/prometheus/src/networking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +16,11 @@ // limitations under the License. use async_std::pin::Pin; -use std::task::{Poll, Context}; -use futures_util::{stream::Stream, io::{AsyncRead, AsyncWrite}}; +use futures_util::{ + io::{AsyncRead, AsyncWrite}, + stream::Stream, +}; +use std::task::{Context, Poll}; pub struct Incoming<'a>(pub async_std::net::Incoming<'a>); @@ -25,7 +28,10 @@ impl hyper::server::accept::Accept for Incoming<'_> { type Conn = TcpStream; type Error = async_std::io::Error; - fn poll_accept(self: Pin<&mut Self>, cx: &mut Context) -> Poll>> { + fn poll_accept( + self: Pin<&mut Self>, + cx: &mut Context, + ) -> Poll>> { Pin::new(&mut Pin::into_inner(self).0) .poll_next(cx) .map(|opt| opt.map(|res| res.map(TcpStream))) @@ -38,10 +44,11 @@ impl tokio::io::AsyncRead for TcpStream { fn poll_read( self: Pin<&mut Self>, cx: &mut Context, - buf: &mut [u8] - ) -> Poll> { + buf: &mut tokio::io::ReadBuf<'_>, + ) -> Poll> { Pin::new(&mut Pin::into_inner(self).0) - .poll_read(cx, buf) + .poll_read(cx, buf.initialize_unfilled()) + .map_ok(|s| buf.set_filled(s)) } } @@ -49,19 +56,16 @@ impl tokio::io::AsyncWrite for TcpStream { fn poll_write( self: Pin<&mut Self>, cx: &mut Context, - buf: &[u8] + buf: &[u8], ) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_write(cx, buf) + Pin::new(&mut Pin::into_inner(self).0).poll_write(cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_flush(cx) + Pin::new(&mut Pin::into_inner(self).0).poll_flush(cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_close(cx) + Pin::new(&mut Pin::into_inner(self).0).poll_close(cx) } } diff --git a/utils/prometheus/src/sourced.rs b/utils/prometheus/src/sourced.rs index 58f60e4969bb8..ca37eef021f68 100644 --- a/utils/prometheus/src/sourced.rs +++ b/utils/prometheus/src/sourced.rs @@ -1,23 +1,26 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Metrics that are collected from existing sources. -use prometheus::core::{Collector, Desc, Describer, Number, Opts}; -use prometheus::proto; +use prometheus::{ + core::{Collector, Desc, Describer, Number, Opts}, + proto, +}; use std::{cmp::Ordering, marker::PhantomData}; /// A counter whose values are obtained from an existing source. @@ -79,35 +82,42 @@ impl Collector for SourcedMetric { let mut c = proto::Counter::default(); c.set_value(value.into_f64()); m.set_counter(c); - } + }, proto::MetricType::GAUGE => { let mut g = proto::Gauge::default(); g.set_value(value.into_f64()); m.set_gauge(g); - } + }, t => { log::error!("Unsupported sourced metric type: {:?}", t); - } + }, } debug_assert_eq!(self.desc.variable_labels.len(), label_values.len()); match self.desc.variable_labels.len().cmp(&label_values.len()) { - Ordering::Greater => - log::warn!("Missing label values for sourced metric {}", self.desc.fq_name), - Ordering::Less => - log::warn!("Too many label values for sourced metric {}", self.desc.fq_name), - Ordering::Equal => {} + Ordering::Greater => { + log::warn!("Missing label values for sourced metric {}", self.desc.fq_name) + }, + Ordering::Less => { + log::warn!("Too many label values for sourced metric {}", self.desc.fq_name) + }, + Ordering::Equal => {}, } - m.set_label(self.desc.variable_labels.iter().zip(label_values) - .map(|(l_name, l_value)| { - let mut l = proto::LabelPair::default(); - l.set_name(l_name.to_string()); - l.set_value(l_value.to_string()); - l - }) - .chain(self.desc.const_label_pairs.iter().cloned()) - .collect::>()); + m.set_label( + self.desc + .variable_labels + .iter() + .zip(label_values) + .map(|(l_name, l_value)| { + let mut l = proto::LabelPair::default(); + l.set_name(l_name.to_string()); + l.set_value(l_value.to_string()); + l + }) + .chain(self.desc.const_label_pairs.iter().cloned()) + .collect::>(), + ); counters.push(m); }); @@ -129,11 +139,15 @@ pub trait SourcedType: private::Sealed + Sync + Send { } impl SourcedType for Counter { - fn proto() -> proto::MetricType { proto::MetricType::COUNTER } + fn proto() -> proto::MetricType { + proto::MetricType::COUNTER + } } impl SourcedType for Gauge { - fn proto() -> proto::MetricType { proto::MetricType::GAUGE } + fn proto() -> proto::MetricType { + proto::MetricType::GAUGE + } } mod private { diff --git a/utils/wasm-builder-runner/Cargo.toml b/utils/wasm-builder-runner/Cargo.toml deleted file mode 100644 index 2c54a5ec3a4d6..0000000000000 --- a/utils/wasm-builder-runner/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "substrate-wasm-builder-runner" -version = "2.0.0" -authors = ["Parity Technologies "] -description = "Runner for substrate-wasm-builder" -edition = "2018" -readme = "README.md" -repository = "https://github.com/paritytech/substrate/" -license = "Apache-2.0" -homepage = "https://substrate.dev" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] diff --git a/utils/wasm-builder-runner/README.md b/utils/wasm-builder-runner/README.md deleted file mode 100644 index 1b9e2b08ca444..0000000000000 --- a/utils/wasm-builder-runner/README.md +++ /dev/null @@ -1,12 +0,0 @@ -## WASM builder runner - -Since cargo contains many bugs when it comes to correct dependency and feature -resolution, we need this little tool. See for -more information. - -It will create a project that will call `substrate-wasm-builder` to prevent any dependencies -from `substrate-wasm-builder` influencing the main project's dependencies. - -For more information see - -License: GPL-3.0 diff --git a/utils/wasm-builder-runner/src/lib.rs b/utils/wasm-builder-runner/src/lib.rs deleted file mode 100644 index 04e06495c69b4..0000000000000 --- a/utils/wasm-builder-runner/src/lib.rs +++ /dev/null @@ -1,498 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # WASM builder runner -//! -//! Since cargo contains many bugs when it comes to correct dependency and feature -//! resolution, we need this little tool. See for -//! more information. -//! -//! It will create a project that will call `substrate-wasm-builder` to prevent any dependencies -//! from `substrate-wasm-builder` influencing the main project's dependencies. -//! -//! For more information see - -use std::{ - env, process::{Command, self}, fs, path::{PathBuf, Path}, hash::{Hash, Hasher}, - collections::hash_map::DefaultHasher, -}; - -/// Environment variable that tells us to skip building the WASM binary. -const SKIP_BUILD_ENV: &str = "SKIP_WASM_BUILD"; - -/// Environment variable that tells us to create a dummy WASM binary. -/// -/// This is useful for `cargo check` to speed-up the compilation. -/// -/// # Caution -/// -/// Enabling this option will just provide `&[]` as WASM binary. -const DUMMY_WASM_BINARY_ENV: &str = "BUILD_DUMMY_WASM_BINARY"; - -/// Environment variable that makes sure the WASM build is triggered. -const FORCE_WASM_BUILD_ENV: &str = "FORCE_WASM_BUILD"; - -/// Replace all backslashes with slashes. -fn replace_back_slashes(path: T) -> String { - path.to_string().replace("\\", "/") -} - -/// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. -fn get_manifest_dir() -> PathBuf { - env::var("CARGO_MANIFEST_DIR") - .expect("`CARGO_MANIFEST_DIR` is always set for `build.rs` files; qed") - .into() -} - -/// First step of the [`WasmBuilder`] to select the project to build. -pub struct WasmBuilderSelectProject { - /// This parameter just exists to make it impossible to construct - /// this type outside of this crate. - _ignore: (), -} - -impl WasmBuilderSelectProject { - /// Use the current project as project for building the WASM binary. - /// - /// # Panics - /// - /// Panics if the `CARGO_MANIFEST_DIR` variable is not set. This variable - /// is always set by `Cargo` in `build.rs` files. - pub fn with_current_project(self) -> WasmBuilderSelectSource { - WasmBuilderSelectSource(get_manifest_dir().join("Cargo.toml")) - } - - /// Use the given `path` as project for building the WASM binary. - /// - /// Returns an error if the given `path` does not points to a `Cargo.toml`. - pub fn with_project( - self, - path: impl Into, - ) -> Result { - let path = path.into(); - - if path.ends_with("Cargo.toml") { - Ok(WasmBuilderSelectSource(path)) - } else { - Err("Project path must point to the `Cargo.toml` of the project") - } - } -} - -/// Second step of the [`WasmBuilder`] to set the source of the `wasm-builder`. -pub struct WasmBuilderSelectSource(PathBuf); - -impl WasmBuilderSelectSource { - /// Use the given `path` as source for `wasm-builder`. - /// - /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for - /// `wasm-builder`. - pub fn with_wasm_builder_from_path(self, path: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Path(path), - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `repo` and `rev` as source for `wasm-builder`. - pub fn with_wasm_builder_from_git(self, repo: &'static str, rev: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Git { repo, rev }, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `version` to fetch `wasm-builder` source from crates.io. - pub fn with_wasm_builder_from_crates(self, version: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Crates(version), - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `version` to fetch `wasm-builder` source from crates.io or use - /// the given `path` as source. - /// - /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for - /// `wasm-builder`. - pub fn with_wasm_builder_from_crates_or_path( - self, - version: &'static str, - path: &'static str, - ) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::CratesOrPath { version, path }, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `source` as source for `wasm-builder`. - pub fn with_wasm_builder_source(self, source: WasmBuilderSource) -> WasmBuilder { - WasmBuilder { - source, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } -} - -/// The builder for building a wasm binary. -/// -/// The builder itself is seperated into multiple structs to make the setup type safe. -/// -/// Building a wasm binary: -/// -/// 1. Call [`WasmBuilder::new`] to create a new builder. -/// 2. Select the project to build using the methods of [`WasmBuilderSelectProject`]. -/// 3. Select the source of the `wasm-builder` crate using the methods of -/// [`WasmBuilderSelectSource`]. -/// 4. Set additional `RUST_FLAGS` or a different name for the file containing the WASM code -/// using methods of [`WasmBuilder`]. -/// 5. Build the WASM binary using [`Self::build`]. -pub struct WasmBuilder { - /// Where should we pull the `wasm-builder` crate from. - source: WasmBuilderSource, - /// Flags that should be appended to `RUST_FLAGS` env variable. - rust_flags: Vec, - /// The name of the file that is being generated in `OUT_DIR`. - /// - /// Defaults to `wasm_binary.rs`. - file_name: Option, - /// The path to the `Cargo.toml` of the project that should be build - /// for wasm. - project_cargo_toml: PathBuf, -} - -impl WasmBuilder { - /// Create a new instance of the builder. - pub fn new() -> WasmBuilderSelectProject { - WasmBuilderSelectProject { - _ignore: (), - } - } - - /// Enable exporting `__heap_base` as global variable in the WASM binary. - /// - /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. - pub fn export_heap_base(mut self) -> Self { - self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); - self - } - - /// Set the name of the file that will be generated in `OUT_DIR`. - /// - /// This file needs to be included to get access to the build WASM binary. - /// - /// If this function is not called, `file_name` defaults to `wasm_binary.rs` - pub fn set_file_name(mut self, file_name: impl Into) -> Self { - self.file_name = Some(file_name.into()); - self - } - - /// Instruct the linker to import the memory into the WASM binary. - /// - /// This adds `-C link-arg=--import-memory` to `RUST_FLAGS`. - pub fn import_memory(mut self) -> Self { - self.rust_flags.push("-C link-arg=--import-memory".into()); - self - } - - /// Append the given `flag` to `RUST_FLAGS`. - /// - /// `flag` is appended as is, so it needs to be a valid flag. - pub fn append_to_rust_flags(mut self, flag: impl Into) -> Self { - self.rust_flags.push(flag.into()); - self - } - - /// Build the WASM binary. - pub fn build(self) { - let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); - let file_path = out_dir.join(self.file_name.unwrap_or_else(|| "wasm_binary.rs".into())); - - if check_skip_build() { - // If we skip the build, we still want to make sure to be called when an env variable - // changes - generate_rerun_if_changed_instructions(); - - provide_dummy_wasm_binary(&file_path, true); - - return; - } - - // Hash the path to the project cargo toml. - let mut hasher = DefaultHasher::new(); - self.project_cargo_toml.hash(&mut hasher); - - let project_name = env::var("CARGO_PKG_NAME").expect("`CARGO_PKG_NAME` is set by cargo!"); - // Make sure the `wasm-builder-runner` path is unique by concatenating the name of the - // project that is compiling the WASM binary with the hash of the path to the project that - // should be compiled as WASM binary. - let project_folder = get_workspace_root() - .join(format!("{}{}", project_name, hasher.finish())); - - if check_provide_dummy_wasm_binary() { - provide_dummy_wasm_binary(&file_path, false); - } else { - create_project( - &project_folder, - &file_path, - self.source, - &self.project_cargo_toml, - &self.rust_flags.into_iter().map(|f| format!("{} ", f)).collect::(), - ); - run_project(&project_folder); - } - - // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't - // want to spam the output! - generate_rerun_if_changed_instructions(); - } -} - -/// The `wasm-builder` dependency source. -pub enum WasmBuilderSource { - /// The relative path to the source code from the current manifest dir. - Path(&'static str), - /// The git repository that contains the source code. - Git { - repo: &'static str, - rev: &'static str, - }, - /// Use the given version released on crates.io. - Crates(&'static str), - /// Use the given version released on crates.io or from the given path. - CratesOrPath { - version: &'static str, - path: &'static str, - } -} - -impl WasmBuilderSource { - /// Convert to a valid cargo source declaration. - /// - /// `absolute_path` - The manifest dir. - fn to_cargo_source(&self, manifest_dir: &Path) -> String { - match self { - WasmBuilderSource::Path(path) => { - replace_back_slashes(format!("path = \"{}\"", manifest_dir.join(path).display())) - } - WasmBuilderSource::Git { repo, rev } => { - format!("git = \"{}\", rev=\"{}\"", repo, rev) - } - WasmBuilderSource::Crates(version) => { - format!("version = \"{}\"", version) - } - WasmBuilderSource::CratesOrPath { version, path } => { - replace_back_slashes( - format!( - "path = \"{}\", version = \"{}\"", - manifest_dir.join(path).display(), - version - ) - ) - } - } - } -} - -/// Build the currently built project as WASM binary and extend `RUSTFLAGS` with the given rustflags. -/// -/// For more information, see [`build_current_project`]. -#[deprecated( - since = "1.0.5", - note = "Please switch to [`WasmBuilder`]", -)] -pub fn build_current_project_with_rustflags( - file_name: &str, - wasm_builder_source: WasmBuilderSource, - default_rust_flags: &str, -) { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_source(wasm_builder_source) - .append_to_rust_flags(default_rust_flags) - .set_file_name(file_name) - .build() -} - -/// Build the currently built project as WASM binary. -/// -/// The current project is determined using the `CARGO_MANIFEST_DIR` environment variable. -/// -/// `file_name` - The name of the file being generated in the `OUT_DIR`. The file contains the -/// constant `WASM_BINARY` which contains the build wasm binary. -/// `wasm_builder_path` - Path to the wasm-builder project, relative to `CARGO_MANIFEST_DIR`. -#[deprecated( - since = "1.0.5", - note = "Please switch to [`WasmBuilder`]", -)] -pub fn build_current_project(file_name: &str, wasm_builder_source: WasmBuilderSource) { - #[allow(deprecated)] - build_current_project_with_rustflags(file_name, wasm_builder_source, ""); -} - -/// Returns the root path of the wasm-builder workspace. -/// -/// The wasm-builder workspace contains all wasm-builder's projects. -fn get_workspace_root() -> PathBuf { - let out_dir_env = env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!"); - let mut out_dir = PathBuf::from(&out_dir_env); - - loop { - match out_dir.parent() { - Some(parent) if out_dir.ends_with("build") => return parent.join("wbuild-runner"), - _ => if !out_dir.pop() { - break; - } - } - } - - panic!("Could not find target dir in: {}", out_dir_env) -} - -fn create_project( - project_folder: &Path, - file_path: &Path, - wasm_builder_source: WasmBuilderSource, - cargo_toml_path: &Path, - default_rustflags: &str, -) { - fs::create_dir_all(project_folder.join("src")) - .expect("WASM build runner dir create can not fail; qed"); - - write_file_if_changed( - project_folder.join("Cargo.toml"), - format!( - r#" - [package] - name = "wasm-build-runner-impl" - version = "1.0.0" - edition = "2018" - - [dependencies] - substrate-wasm-builder = {{ {wasm_builder_source} }} - - [workspace] - "#, - wasm_builder_source = wasm_builder_source.to_cargo_source(&get_manifest_dir()), - ), - ); - - write_file_if_changed( - project_folder.join("src/main.rs"), - format!( - r#" - //! This is automatically generated code by `substrate-wasm-builder`. - - use substrate_wasm_builder::build_project_with_default_rustflags; - - fn main() {{ - build_project_with_default_rustflags( - "{file_path}", - "{cargo_toml_path}", - "{default_rustflags}", - ) - }} - "#, - file_path = replace_back_slashes(file_path.display()), - cargo_toml_path = replace_back_slashes(cargo_toml_path.display()), - default_rustflags = default_rustflags, - ), - ); -} - -fn run_project(project_folder: &Path) { - let cargo = env::var("CARGO").expect("`CARGO` env variable is always set when executing `build.rs`."); - let mut cmd = Command::new(cargo); - cmd.arg("run").arg(format!("--manifest-path={}", project_folder.join("Cargo.toml").display())); - - if env::var("DEBUG") != Ok(String::from("true")) { - cmd.arg("--release"); - } - - // Make sure we always run the `wasm-builder` project for the `HOST` architecture. - let host_triple = env::var("HOST").expect("`HOST` is always set when executing `build.rs`."); - cmd.arg(&format!("--target={}", host_triple)); - - // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir exclusive). - // The runner project is created in `CARGO_TARGET_DIR` and executing it will create a sub target - // directory inside of `CARGO_TARGET_DIR`. - cmd.env_remove("CARGO_TARGET_DIR"); - - if !cmd.status().map(|s| s.success()).unwrap_or(false) { - // Don't spam the output with backtraces when a build failed! - process::exit(1); - } -} - -/// Generate the name of the skip build environment variable for the current crate. -fn generate_crate_skip_build_env_name() -> String { - format!( - "SKIP_{}_WASM_BUILD", - env::var("CARGO_PKG_NAME").expect("Package name is set").to_uppercase().replace('-', "_"), - ) -} - -/// Checks if the build of the WASM binary should be skipped. -fn check_skip_build() -> bool { - env::var(SKIP_BUILD_ENV).is_ok() || env::var(generate_crate_skip_build_env_name()).is_ok() -} - -/// Check if we should provide a dummy WASM binary. -fn check_provide_dummy_wasm_binary() -> bool { - env::var(DUMMY_WASM_BINARY_ENV).is_ok() -} - -/// Provide the dummy WASM binary -/// -/// If `skip_build` is `true`, it will only generate the wasm binary if it doesn't exist. -fn provide_dummy_wasm_binary(file_path: &Path, skip_build: bool) { - if !skip_build || !file_path.exists() { - write_file_if_changed( - file_path.into(), - "pub const WASM_BINARY: Option<&[u8]> = None;\ - pub const WASM_BINARY_BLOATY: Option<&[u8]> = None;".into(), - ); - } -} - -/// Generate the `rerun-if-changed` instructions for cargo to make sure that the WASM binary is -/// rebuilt when needed. -fn generate_rerun_if_changed_instructions() { - // Make sure that the `build.rs` is called again if one of the following env variables changes. - println!("cargo:rerun-if-env-changed={}", SKIP_BUILD_ENV); - println!("cargo:rerun-if-env-changed={}", DUMMY_WASM_BINARY_ENV); - println!("cargo:rerun-if-env-changed={}", FORCE_WASM_BUILD_ENV); - println!("cargo:rerun-if-env-changed={}", generate_crate_skip_build_env_name()); -} - -/// Write to the given `file` if the `content` is different. -fn write_file_if_changed(file: PathBuf, content: String) { - if fs::read_to_string(&file).ok().as_ref() != Some(&content) { - fs::write(&file, content).unwrap_or_else(|_| panic!("Writing `{}` can not fail!", file.display())); - } -} diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index e9dd1a97b89e4..721f332e130f0 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-wasm-builder" -version = "2.0.1" +version = "5.0.0-dev" authors = ["Parity Technologies "] description = "Utility for building WASM binaries" edition = "2018" @@ -14,12 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] build-helper = "0.1.1" -cargo_metadata = "0.10.0" +cargo_metadata = "0.13.1" tempfile = "3.1.0" toml = "0.5.4" -walkdir = "2.3.1" -fs2 = "0.4.3" +walkdir = "2.3.2" wasm-gc-api = "0.1.11" -atty = "0.2.13" -itertools = "0.8.2" ansi_term = "0.12.1" +sp-maybe-compressed-blob = { version = "4.0.0-dev", path = "../../primitives/maybe-compressed-blob" } diff --git a/utils/wasm-builder/README.md b/utils/wasm-builder/README.md index 1e24d2cebab32..3868faf1acab5 100644 --- a/utils/wasm-builder/README.md +++ b/utils/wasm-builder/README.md @@ -8,20 +8,23 @@ The Wasm builder is a tool that integrates the process of building the WASM bina A project that should be compiled as a Wasm binary needs to: 1. Add a `build.rs` file. -2. Add `substrate-wasm-builder` as dependency into `build-dependencies`. +2. Add `wasm-builder` as dependency into `build-dependencies`. The `build.rs` file needs to contain the following code: ```rust -use wasm_builder_runner::{build_current_project, WasmBuilderSource}; +use substrate_wasm_builder::WasmBuilder; fn main() { - build_current_project( - // The name of the file being generated in out-dir. - "wasm_binary.rs", - // How to include wasm-builder, in this case from crates.io. - WasmBuilderSource::Crates("1.0.0"), - ); + WasmBuilder::new() + // Tell the builder to build the project (crate) this `build.rs` is part of. + .with_current_project() + // Make sure to export the `heap_base` global, this is required by Substrate + .export_heap_base() + // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) + .import_memory() + // Build it. + .build() } ``` @@ -32,9 +35,10 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); ``` This will include the generated Wasm binary as two constants `WASM_BINARY` and `WASM_BINARY_BLOATY`. -The former is a compact Wasm binary and the latter is not compacted. +The former is a compact Wasm binary and the latter is the Wasm binary as being generated by the compiler. +Both variables have `Option<&'static [u8]>` as type. -### Feature +### Features Wasm builder supports to enable cargo features while building the Wasm binary. By default it will enable all features in the wasm build that are enabled for the native build except the @@ -46,19 +50,19 @@ Wasm binary. If this feature is not present, it will not be enabled. By using environment variables, you can configure which Wasm binaries are built and how: -- `SKIP_WASM_BUILD` - Skips building any wasm binary. This is useful when only native should be recompiled. -- `BUILD_DUMMY_WASM_BINARY` - Builds dummy wasm binaries. These dummy binaries are empty and useful - for `cargo check` runs. -- `WASM_BUILD_TYPE` - Sets the build type for building wasm binaries. Supported values are `release` or `debug`. +- `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be recompiled. + If this is the first run and there doesn't exist a Wasm binary, this will set both + variables to `None`. +- `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are `release` or `debug`. By default the build type is equal to the build type used by the main build. -- `FORCE_WASM_BUILD` - Can be set to force a wasm build. On subsequent calls the value of the variable - needs to change. As wasm builder instructs `cargo` to watch for file changes +- `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the variable + needs to change. As wasm-builder instructs `cargo` to watch for file changes this environment variable should only be required in certain circumstances. - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. -- `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs +- `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path needs to be absolute. -- `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the wasm binaries. The +- `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. diff --git a/utils/wasm-builder/src/builder.rs b/utils/wasm-builder/src/builder.rs new file mode 100644 index 0000000000000..113b5eb068da4 --- /dev/null +++ b/utils/wasm-builder/src/builder.rs @@ -0,0 +1,269 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{ + env, + path::{Path, PathBuf}, + process, +}; + +/// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. +fn get_manifest_dir() -> PathBuf { + env::var("CARGO_MANIFEST_DIR") + .expect("`CARGO_MANIFEST_DIR` is always set for `build.rs` files; qed") + .into() +} + +/// First step of the [`WasmBuilder`] to select the project to build. +pub struct WasmBuilderSelectProject { + /// This parameter just exists to make it impossible to construct + /// this type outside of this crate. + _ignore: (), +} + +impl WasmBuilderSelectProject { + /// Use the current project as project for building the WASM binary. + /// + /// # Panics + /// + /// Panics if the `CARGO_MANIFEST_DIR` variable is not set. This variable + /// is always set by `Cargo` in `build.rs` files. + pub fn with_current_project(self) -> WasmBuilder { + WasmBuilder { + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: get_manifest_dir().join("Cargo.toml"), + features_to_enable: Vec::new(), + } + } + + /// Use the given `path` as project for building the WASM binary. + /// + /// Returns an error if the given `path` does not points to a `Cargo.toml`. + pub fn with_project(self, path: impl Into) -> Result { + let path = path.into(); + + if path.ends_with("Cargo.toml") && path.exists() { + Ok(WasmBuilder { + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: path, + features_to_enable: Vec::new(), + }) + } else { + Err("Project path must point to the `Cargo.toml` of the project") + } + } +} + +/// The builder for building a wasm binary. +/// +/// The builder itself is separated into multiple structs to make the setup type safe. +/// +/// Building a wasm binary: +/// +/// 1. Call [`WasmBuilder::new`] to create a new builder. +/// 2. Select the project to build using the methods of [`WasmBuilderSelectProject`]. +/// 3. Set additional `RUST_FLAGS` or a different name for the file containing the WASM code +/// using methods of [`WasmBuilder`]. +/// 4. Build the WASM binary using [`Self::build`]. +pub struct WasmBuilder { + /// Flags that should be appended to `RUST_FLAGS` env variable. + rust_flags: Vec, + /// The name of the file that is being generated in `OUT_DIR`. + /// + /// Defaults to `wasm_binary.rs`. + file_name: Option, + /// The path to the `Cargo.toml` of the project that should be built + /// for wasm. + project_cargo_toml: PathBuf, + /// Features that should be enabled when building the wasm binary. + features_to_enable: Vec, +} + +impl WasmBuilder { + /// Create a new instance of the builder. + pub fn new() -> WasmBuilderSelectProject { + WasmBuilderSelectProject { _ignore: () } + } + + /// Enable exporting `__heap_base` as global variable in the WASM binary. + /// + /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. + pub fn export_heap_base(mut self) -> Self { + self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); + self + } + + /// Set the name of the file that will be generated in `OUT_DIR`. + /// + /// This file needs to be included to get access to the build WASM binary. + /// + /// If this function is not called, `file_name` defaults to `wasm_binary.rs` + pub fn set_file_name(mut self, file_name: impl Into) -> Self { + self.file_name = Some(file_name.into()); + self + } + + /// Instruct the linker to import the memory into the WASM binary. + /// + /// This adds `-C link-arg=--import-memory` to `RUST_FLAGS`. + pub fn import_memory(mut self) -> Self { + self.rust_flags.push("-C link-arg=--import-memory".into()); + self + } + + /// Append the given `flag` to `RUST_FLAGS`. + /// + /// `flag` is appended as is, so it needs to be a valid flag. + pub fn append_to_rust_flags(mut self, flag: impl Into) -> Self { + self.rust_flags.push(flag.into()); + self + } + + /// Enable the given feature when building the wasm binary. + /// + /// `feature` needs to be a valid feature that is defined in the project `Cargo.toml`. + pub fn enable_feature(mut self, feature: impl Into) -> Self { + self.features_to_enable.push(feature.into()); + self + } + + /// Build the WASM binary. + pub fn build(self) { + let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); + let file_path = + out_dir.join(self.file_name.clone().unwrap_or_else(|| "wasm_binary.rs".into())); + + if check_skip_build() { + // If we skip the build, we still want to make sure to be called when an env variable + // changes + generate_rerun_if_changed_instructions(); + + provide_dummy_wasm_binary_if_not_exist(&file_path); + + return + } + + build_project( + file_path, + self.project_cargo_toml, + self.rust_flags.into_iter().map(|f| format!("{} ", f)).collect(), + self.features_to_enable, + self.file_name, + ); + + // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't + // want to spam the output! + generate_rerun_if_changed_instructions(); + } +} + +/// Generate the name of the skip build environment variable for the current crate. +fn generate_crate_skip_build_env_name() -> String { + format!( + "SKIP_{}_WASM_BUILD", + env::var("CARGO_PKG_NAME") + .expect("Package name is set") + .to_uppercase() + .replace('-', "_"), + ) +} + +/// Checks if the build of the WASM binary should be skipped. +fn check_skip_build() -> bool { + env::var(crate::SKIP_BUILD_ENV).is_ok() || + env::var(generate_crate_skip_build_env_name()).is_ok() +} + +/// Provide a dummy WASM binary if there doesn't exist one. +fn provide_dummy_wasm_binary_if_not_exist(file_path: &Path) { + if !file_path.exists() { + crate::write_file_if_changed( + file_path, + "pub const WASM_BINARY: Option<&[u8]> = None;\ + pub const WASM_BINARY_BLOATY: Option<&[u8]> = None;", + ); + } +} + +/// Generate the `rerun-if-changed` instructions for cargo to make sure that the WASM binary is +/// rebuilt when needed. +fn generate_rerun_if_changed_instructions() { + // Make sure that the `build.rs` is called again if one of the following env variables changes. + println!("cargo:rerun-if-env-changed={}", crate::SKIP_BUILD_ENV); + println!("cargo:rerun-if-env-changed={}", crate::FORCE_WASM_BUILD_ENV); + println!("cargo:rerun-if-env-changed={}", generate_crate_skip_build_env_name()); +} + +/// Build the currently built project as wasm binary. +/// +/// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. +/// +/// `file_name` - The name + path of the file being generated. The file contains the +/// constant `WASM_BINARY`, which contains the built WASM binary. +/// +/// `project_cargo_toml` - The path to the `Cargo.toml` of the project that should be built. +/// +/// `default_rustflags` - Default `RUSTFLAGS` that will always be set for the build. +/// +/// `features_to_enable` - Features that should be enabled for the project. +/// +/// `wasm_binary_name` - The optional wasm binary name that is extended with +/// +/// `.compact.compressed.wasm`. If `None`, the project name will be used. +fn build_project( + file_name: PathBuf, + project_cargo_toml: PathBuf, + default_rustflags: String, + features_to_enable: Vec, + wasm_binary_name: Option, +) { + let cargo_cmd = match crate::prerequisites::check() { + Ok(cmd) => cmd, + Err(err_msg) => { + eprintln!("{}", err_msg); + process::exit(1); + }, + }; + + let (wasm_binary, bloaty) = crate::wasm_project::create_and_compile( + &project_cargo_toml, + &default_rustflags, + cargo_cmd, + features_to_enable, + wasm_binary_name, + ); + + let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { + (wasm_binary.wasm_binary_path_escaped(), bloaty.wasm_binary_bloaty_path_escaped()) + } else { + (bloaty.wasm_binary_bloaty_path_escaped(), bloaty.wasm_binary_bloaty_path_escaped()) + }; + + crate::write_file_if_changed( + file_name, + format!( + r#" + pub const WASM_BINARY: Option<&[u8]> = Some(include_bytes!("{wasm_binary}")); + pub const WASM_BINARY_BLOATY: Option<&[u8]> = Some(include_bytes!("{wasm_binary_bloaty}")); + "#, + wasm_binary = wasm_binary, + wasm_binary_bloaty = wasm_binary_bloaty, + ), + ); +} diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index aa63e9596e190..b13ecc4e4ab34 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,28 +17,31 @@ //! # Wasm builder is a utility for building a project as a Wasm binary //! -//! The Wasm builder is a tool that integrates the process of building the WASM binary of your project into the main -//! `cargo` build process. +//! The Wasm builder is a tool that integrates the process of building the WASM binary of your +//! project into the main `cargo` build process. //! //! ## Project setup //! //! A project that should be compiled as a Wasm binary needs to: //! //! 1. Add a `build.rs` file. -//! 2. Add `substrate-wasm-builder` as dependency into `build-dependencies`. +//! 2. Add `wasm-builder` as dependency into `build-dependencies`. //! //! The `build.rs` file needs to contain the following code: //! -//! ```ignore -//! use wasm_builder_runner::{build_current_project, WasmBuilderSource}; +//! ```no_run +//! use substrate_wasm_builder::WasmBuilder; //! //! fn main() { -//! build_current_project( -//! // The name of the file being generated in out-dir. -//! "wasm_binary.rs", -//! // How to include wasm-builder, in this case from crates.io. -//! WasmBuilderSource::Crates("1.0.0"), -//! ); +//! WasmBuilder::new() +//! // Tell the builder to build the project (crate) this `build.rs` is part of. +//! .with_current_project() +//! // Make sure to export the `heap_base` global, this is required by Substrate +//! .export_heap_base() +//! // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) +//! .import_memory() +//! // Build it. +//! .build() //! } //! ``` //! @@ -48,13 +51,14 @@ //! include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); //! ``` //! -//! This will include the generated Wasm binary as two constants `WASM_BINARY` and `WASM_BINARY_BLOATY`. -//! The former is a compact Wasm binary and the latter is not compacted. +//! This will include the generated Wasm binary as two constants `WASM_BINARY` and +//! `WASM_BINARY_BLOATY`. The former is a compact Wasm binary and the latter is the Wasm binary as +//! being generated by the compiler. Both variables have `Option<&'static [u8]>` as type. //! //! ### Feature //! -//! Wasm builder supports to enable cargo features while building the Wasm binary. By default it will -//! enable all features in the wasm build that are enabled for the native build except the +//! Wasm builder supports to enable cargo features while building the Wasm binary. By default it +//! will enable all features in the wasm build that are enabled for the native build except the //! `default` and `std` features. Besides that, wasm builder supports the special `runtime-wasm` //! feature. This `runtime-wasm` feature will be enabled by the wasm builder when it compiles the //! Wasm binary. If this feature is not present, it will not be enabled. @@ -63,24 +67,26 @@ //! //! By using environment variables, you can configure which Wasm binaries are built and how: //! -//! - `SKIP_WASM_BUILD` - Skips building any wasm binary. This is useful when only native should be recompiled. -//! - `BUILD_DUMMY_WASM_BINARY` - Builds dummy wasm binaries. These dummy binaries are empty and useful -//! for `cargo check` runs. -//! - `WASM_BUILD_TYPE` - Sets the build type for building wasm binaries. Supported values are `release` or `debug`. -//! By default the build type is equal to the build type used by the main build. -//! - `FORCE_WASM_BUILD` - Can be set to force a wasm build. On subsequent calls the value of the variable -//! needs to change. As wasm builder instructs `cargo` to watch for file changes -//! this environment variable should only be required in certain circumstances. -//! - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. +//! - `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be +//! recompiled. If this is the first run and there doesn't exist a Wasm binary, this will set both +//! variables to `None`. +//! - `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are +//! `release` or `debug`. By default the build type is equal to the build type used by the main +//! build. +//! - `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the +//! variable needs to change. As wasm-builder instructs `cargo` to watch for file changes this +//! environment variable should only be required in certain circumstances. +//! - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm +//! binary. //! - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. -//! - `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs -//! to be absolute. -//! - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the wasm binaries. The -//! format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. +//! - `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path +//! needs to be absolute. +//! - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The +//! format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. //! -//! Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. -//! Where `PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `node-runtime` will -//! be `NODE_RUNTIME`. +//! Each project can be skipped individually by using the environment variable +//! `SKIP_PROJECT_NAME_WASM_BUILD`. Where `PROJECT_NAME` needs to be replaced by the name of the +//! cargo project, e.g. `node-runtime` will be `NODE_RUNTIME`. //! //! ## Prerequisites: //! @@ -88,15 +94,24 @@ //! //! - rust nightly + `wasm32-unknown-unknown` toolchain //! -//! If a specific rust nightly is installed with `rustup`, it is important that the wasm target is installed -//! as well. For example if installing the rust nightly from 20.02.2020 using `rustup install nightly-2020-02-20`, -//! the wasm target needs to be installed as well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. - -use std::{env, fs, path::{PathBuf, Path}, process::{Command, self}, io::BufRead}; - +//! If a specific rust nightly is installed with `rustup`, it is important that the wasm target is +//! installed as well. For example if installing the rust nightly from 20.02.2020 using `rustup +//! install nightly-2020-02-20`, the wasm target needs to be installed as well `rustup target add +//! wasm32-unknown-unknown --toolchain nightly-2020-02-20`. + +use std::{ + env, fs, + io::BufRead, + path::{Path, PathBuf}, + process::Command, +}; + +mod builder; mod prerequisites; mod wasm_project; +pub use builder::{WasmBuilder, WasmBuilderSelectProject}; + /// Environment variable that tells us to skip building the wasm binary. const SKIP_BUILD_ENV: &str = "SKIP_WASM_BUILD"; @@ -120,87 +135,8 @@ const WASM_BUILD_NO_COLOR: &str = "WASM_BUILD_NO_COLOR"; /// Environment variable to set the toolchain used to compile the wasm binary. const WASM_BUILD_TOOLCHAIN: &str = "WASM_BUILD_TOOLCHAIN"; -/// Build the currently built project as wasm binary. -/// -/// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. -/// -/// `file_name` - The name + path of the file being generated. The file contains the -/// constant `WASM_BINARY`, which contains the built WASM binary. -/// `cargo_manifest` - The path to the `Cargo.toml` of the project that should be built. -pub fn build_project(file_name: &str, cargo_manifest: &str) { - build_project_with_default_rustflags(file_name, cargo_manifest, ""); -} - -/// Build the currently built project as wasm binary. -/// -/// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. -/// -/// `file_name` - The name + path of the file being generated. The file contains the -/// constant `WASM_BINARY`, which contains the built WASM binary. -/// `cargo_manifest` - The path to the `Cargo.toml` of the project that should be built. -/// `default_rustflags` - Default `RUSTFLAGS` that will always be set for the build. -pub fn build_project_with_default_rustflags( - file_name: &str, - cargo_manifest: &str, - default_rustflags: &str, -) { - if check_skip_build() { - return; - } - - let cargo_manifest = PathBuf::from(cargo_manifest); - - if !cargo_manifest.exists() { - panic!("'{}' does not exist!", cargo_manifest.display()); - } - - if !cargo_manifest.ends_with("Cargo.toml") { - panic!("'{}' no valid path to a `Cargo.toml`!", cargo_manifest.display()); - } - - let cargo_cmd = match prerequisites::check() { - Ok(cmd) => cmd, - Err(err_msg) => { - eprintln!("{}", err_msg); - process::exit(1); - }, - }; - - let (wasm_binary, bloaty) = wasm_project::create_and_compile( - &cargo_manifest, - default_rustflags, - cargo_cmd, - ); - - let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { - ( - wasm_binary.wasm_binary_path_escaped(), - bloaty.wasm_binary_bloaty_path_escaped(), - ) - } else { - ( - bloaty.wasm_binary_bloaty_path_escaped(), - bloaty.wasm_binary_bloaty_path_escaped(), - ) - }; - - write_file_if_changed( - file_name, - format!( - r#" - pub const WASM_BINARY: Option<&[u8]> = Some(include_bytes!("{wasm_binary}")); - pub const WASM_BINARY_BLOATY: Option<&[u8]> = Some(include_bytes!("{wasm_binary_bloaty}")); - "#, - wasm_binary = wasm_binary, - wasm_binary_bloaty = wasm_binary_bloaty, - ), - ); -} - -/// Checks if the build of the WASM binary should be skipped. -fn check_skip_build() -> bool { - env::var(SKIP_BUILD_ENV).is_ok() -} +/// Environment variable that makes sure the WASM build is triggered. +const FORCE_WASM_BUILD_ENV: &str = "FORCE_WASM_BUILD"; /// Write to the given `file` if the `content` is different. fn write_file_if_changed(file: impl AsRef, content: impl AsRef) { @@ -216,16 +152,16 @@ fn copy_file_if_changed(src: PathBuf, dst: PathBuf) { let dst_file = fs::read_to_string(&dst).ok(); if src_file != dst_file { - fs::copy(&src, &dst) - .unwrap_or_else(|_| panic!("Copying `{}` to `{}` can not fail; qed", src.display(), dst.display())); + fs::copy(&src, &dst).unwrap_or_else(|_| { + panic!("Copying `{}` to `{}` can not fail; qed", src.display(), dst.display()) + }); } } /// Get a cargo command that compiles with nightly fn get_nightly_cargo() -> CargoCommand { - let env_cargo = CargoCommand::new( - &env::var("CARGO").expect("`CARGO` env variable is always set by cargo"), - ); + let env_cargo = + CargoCommand::new(&env::var("CARGO").expect("`CARGO` env variable is always set by cargo")); let default_cargo = CargoCommand::new("cargo"); let rustup_run_nightly = CargoCommand::new_with_args("rustup", &["run", "nightly", "cargo"]); let wasm_toolchain = env::var(WASM_BUILD_TOOLCHAIN).ok(); @@ -267,7 +203,7 @@ fn get_rustup_nightly(selected: Option) -> Option { } latest_nightly?.trim_end_matches(&host).into() - } + }, }; Some(CargoCommand::new_with_args("rustup", &["run", &version, "cargo"])) @@ -301,8 +237,8 @@ impl CargoCommand { /// Check if the supplied cargo command is a nightly version fn is_nightly(&self) -> bool { // `RUSTC_BOOTSTRAP` tells a stable compiler to behave like a nightly. So, when this env - // variable is set, we can assume that whatever rust compiler we have, it is a nightly compiler. - // For "more" information, see: + // variable is set, we can assume that whatever rust compiler we have, it is a nightly + // compiler. For "more" information, see: // https://github.com/rust-lang/rust/blob/fa0f7d0080d8e7e9eb20aa9cbf8013f96c81287f/src/libsyntax/feature_gate/check.rs#L891 env::var("RUSTC_BOOTSTRAP").is_ok() || self.command() @@ -323,10 +259,7 @@ struct CargoCommandVersioned { impl CargoCommandVersioned { fn new(command: CargoCommand, version: String) -> Self { - Self { - command, - version, - } + Self { command, version } } /// Returns the `rustc` version. diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index 3df2707d1d441..0dad8b781ae5a 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,12 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{CargoCommandVersioned, CargoCommand, write_file_if_changed}; +use crate::{write_file_if_changed, CargoCommand, CargoCommandVersioned}; use std::{fs, path::Path}; -use tempfile::tempdir; use ansi_term::Color; +use tempfile::tempdir; /// Print an error message. fn print_error_message(message: &str) -> String { @@ -95,7 +95,7 @@ fn create_check_toolchain_project(project_dir: &Path) { rustc_version.unwrap_or_else(|| "unknown rustc version".into()), ); } - "# + "#, ); // Just prints the `RURSTC_VERSION` environment variable that is being created by the // `build.rs` script. @@ -105,7 +105,7 @@ fn create_check_toolchain_project(project_dir: &Path) { fn main() { println!("{}", env!("RUSTC_VERSION")); } - "# + "#, ); } @@ -120,7 +120,12 @@ fn check_wasm_toolchain_installed( let manifest_path = temp.path().join("Cargo.toml").display().to_string(); let mut build_cmd = cargo_command.command(); - build_cmd.args(&["build", "--target=wasm32-unknown-unknown", "--manifest-path", &manifest_path]); + build_cmd.args(&[ + "build", + "--target=wasm32-unknown-unknown", + "--manifest-path", + &manifest_path, + ]); if super::color_output_enabled() { build_cmd.arg("--color=always"); @@ -129,33 +134,31 @@ fn check_wasm_toolchain_installed( let mut run_cmd = cargo_command.command(); run_cmd.args(&["run", "--manifest-path", &manifest_path]); - build_cmd - .output() - .map_err(|_| err_msg.clone()) - .and_then(|s| - if s.status.success() { - let version = run_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()); - Ok(CargoCommandVersioned::new( - cargo_command, - version.unwrap_or_else(|| "unknown rustc version".into()), - )) - } else { - match String::from_utf8(s.stderr) { - Ok(ref err) if err.contains("linker `rust-lld` not found") => { - Err(print_error_message("`rust-lld` not found, please install it!")) - }, - Ok(ref err) => Err( - format!( - "{}\n\n{}\n{}\n{}{}\n", - err_msg, - Color::Yellow.bold().paint("Further error information:"), - Color::Yellow.bold().paint("-".repeat(60)), - err, - Color::Yellow.bold().paint("-".repeat(60)), - ) - ), - Err(_) => Err(err_msg), - } + // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock + build_cmd.env_remove("CARGO_TARGET_DIR"); + run_cmd.env_remove("CARGO_TARGET_DIR"); + + build_cmd.output().map_err(|_| err_msg.clone()).and_then(|s| { + if s.status.success() { + let version = run_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()); + Ok(CargoCommandVersioned::new( + cargo_command, + version.unwrap_or_else(|| "unknown rustc version".into()), + )) + } else { + match String::from_utf8(s.stderr) { + Ok(ref err) if err.contains("linker `rust-lld` not found") => + Err(print_error_message("`rust-lld` not found, please install it!")), + Ok(ref err) => Err(format!( + "{}\n\n{}\n{}\n{}{}\n", + err_msg, + Color::Yellow.bold().paint("Further error information:"), + Color::Yellow.bold().paint("-".repeat(60)), + err, + Color::Yellow.bold().paint("-".repeat(60)), + )), + Err(_) => Err(err_msg), } - ) + } + }) } diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index c27af71988b07..868692d341ff0 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,22 +18,23 @@ use crate::{write_file_if_changed, CargoCommandVersioned}; use std::{ - fs, path::{Path, PathBuf}, borrow::ToOwned, process, env, collections::HashSet, - hash::{Hash, Hasher}, ops::Deref, + borrow::ToOwned, + collections::HashSet, + env, fs, + hash::{Hash, Hasher}, + ops::Deref, + path::{Path, PathBuf}, + process, }; use toml::value::Table; use build_helper::rerun_if_changed; -use cargo_metadata::{MetadataCommand, Metadata}; +use cargo_metadata::{Metadata, MetadataCommand}; use walkdir::WalkDir; -use fs2::FileExt; - -use itertools::Itertools; - /// Colorize an info message. /// /// Returns the colorized message. @@ -70,31 +71,6 @@ impl WasmBinary { } } -/// A lock for the WASM workspace. -struct WorkspaceLock(fs::File); - -impl WorkspaceLock { - /// Create a new lock - fn new(wasm_workspace_root: &Path) -> Self { - let lock = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(wasm_workspace_root.join("wasm_workspace.lock")) - .expect("Opening the lock file does not fail"); - - lock.lock_exclusive().expect("Locking `wasm_workspace.lock` failed"); - - WorkspaceLock(lock) - } -} - -impl Drop for WorkspaceLock { - fn drop(&mut self) { - let _ = self.0.unlock(); - } -} - fn crate_metadata(cargo_manifest: &Path) -> Metadata { let mut cargo_lock = cargo_manifest.to_path_buf(); cargo_lock.set_file_name("Cargo.lock"); @@ -120,37 +96,43 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { /// Creates the WASM project, compiles the WASM binary and compacts the WASM binary. /// /// # Returns +/// /// The path to the compact WASM binary and the bloaty WASM binary. pub(crate) fn create_and_compile( - cargo_manifest: &Path, + project_cargo_toml: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, + features_to_enable: Vec, + wasm_binary_name: Option, ) -> (Option, WasmBinaryBloaty) { let wasm_workspace_root = get_wasm_workspace_root(); let wasm_workspace = wasm_workspace_root.join("wbuild"); - // Lock the workspace exclusively for us - let _lock = WorkspaceLock::new(&wasm_workspace_root); - - let crate_metadata = crate_metadata(cargo_manifest); - - let project = create_project(cargo_manifest, &wasm_workspace, &crate_metadata); - create_wasm_workspace_project(&wasm_workspace, &crate_metadata.workspace_root); + let crate_metadata = crate_metadata(project_cargo_toml); - build_project(&project, default_rustflags, cargo_cmd); - let (wasm_binary, bloaty) = compact_wasm_file( - &project, - cargo_manifest, + let project = create_project( + project_cargo_toml, &wasm_workspace, + &crate_metadata, + crate_metadata.workspace_root.as_ref(), + features_to_enable, ); - wasm_binary.as_ref().map(|wasm_binary| - copy_wasm_to_target_directory(cargo_manifest, wasm_binary) - ); + build_project(&project, default_rustflags, cargo_cmd); + let (wasm_binary, wasm_binary_compressed, bloaty) = + compact_wasm_file(&project, project_cargo_toml, wasm_binary_name); + + wasm_binary + .as_ref() + .map(|wasm_binary| copy_wasm_to_target_directory(project_cargo_toml, wasm_binary)); - generate_rerun_if_changed_instructions(cargo_manifest, &project, &wasm_workspace); + wasm_binary_compressed.as_ref().map(|wasm_binary_compressed| { + copy_wasm_to_target_directory(project_cargo_toml, wasm_binary_compressed) + }); - (wasm_binary, bloaty) + generate_rerun_if_changed_instructions(project_cargo_toml, &project, &wasm_workspace); + + (wasm_binary_compressed.or(wasm_binary), bloaty) } /// Find the `Cargo.lock` relative to the `OUT_DIR` environment variable. @@ -164,17 +146,17 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { } if !path.pop() { - return None; + return None } } } if let Some(path) = find_impl(build_helper::out_dir()) { - return Some(path); + return Some(path) } if let Some(path) = find_impl(cargo_manifest.to_path_buf()) { - return Some(path); + return Some(path) } build_helper::warning!( @@ -189,15 +171,20 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { /// Extract the crate name from the given `Cargo.toml`. fn get_crate_name(cargo_manifest: &Path) -> String { let cargo_toml: Table = toml::from_str( - &fs::read_to_string(cargo_manifest).expect("File exists as checked before; qed") - ).expect("Cargo manifest is a valid toml file; qed"); + &fs::read_to_string(cargo_manifest).expect("File exists as checked before; qed"), + ) + .expect("Cargo manifest is a valid toml file; qed"); let package = cargo_toml .get("package") .and_then(|t| t.as_table()) .expect("`package` key exists in valid `Cargo.toml`; qed"); - package.get("name").and_then(|p| p.as_str()).map(ToOwned::to_owned).expect("Package name exists; qed") + package + .get("name") + .and_then(|p| p.as_str()) + .map(ToOwned::to_owned) + .expect("Package name exists; qed") } /// Returns the name for the wasm binary. @@ -212,83 +199,29 @@ fn get_wasm_workspace_root() -> PathBuf { loop { match out_dir.parent() { Some(parent) if out_dir.ends_with("build") => return parent.to_path_buf(), - _ => if !out_dir.pop() { - break; - } + _ => + if !out_dir.pop() { + break + }, } } panic!("Could not find target dir in: {}", build_helper::out_dir().display()) } -/// Find all workspace members. -/// -/// Each folder in `wasm_workspace` is seen as a member of the workspace. Exceptions are -/// folders starting with "." and the "target" folder. -/// -/// Every workspace member that is not valid anymore is deleted (the folder of it). A -/// member is not valid anymore when the `wasm-project` dependency points to an non-existing -/// folder or the package name is not valid. -fn find_and_clear_workspace_members(wasm_workspace: &Path) -> Vec { - let mut members = WalkDir::new(wasm_workspace) - .min_depth(1) - .max_depth(1) - .into_iter() - .filter_map(|p| p.ok()) - .map(|d| d.into_path()) - .filter(|p| p.is_dir()) - .filter_map(|p| p.file_name().map(|f| f.to_owned()).and_then(|s| s.into_string().ok())) - .filter(|f| !f.starts_with('.') && f != "target") - .collect::>(); - - let mut i = 0; - while i != members.len() { - let path = wasm_workspace.join(&members[i]).join("Cargo.toml"); - - // Extract the `wasm-project` dependency. - // If the path can be extracted and is valid and the package name matches, - // the member is valid. - if let Some(mut wasm_project) = fs::read_to_string(path) - .ok() - .and_then(|s| toml::from_str::

::on_chain_storage_version(), 4); +} + +fn log_migration(stage: &str, old_pallet_name: &str, new_pallet_name: &str) { + log::info!( + target: "runtime::collective", + "{}, prefix: '{}' ==> '{}'", + stage, + old_pallet_name, + new_pallet_name, + ); +} diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs new file mode 100644 index 0000000000000..b8feb64867cf8 --- /dev/null +++ b/frame/collective/src/tests.rs @@ -0,0 +1,1079 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Event as CollectiveEvent, *}; +use crate as pallet_collective; +use frame_support::{ + assert_noop, assert_ok, parameter_types, traits::GenesisBuild, weights::Pays, Hashable, +}; +use frame_system::{EventRecord, Phase}; +use sp_core::{ + u32_trait::{_3, _4}, + H256, +}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Pallet, Call, Event}, + Collective: pallet_collective::::{Pallet, Call, Event, Origin, Config}, + CollectiveMajority: pallet_collective::::{Pallet, Call, Event, Origin, Config}, + DefaultCollective: pallet_collective::{Pallet, Call, Event, Origin, Config}, + Democracy: mock_democracy::{Pallet, Call, Event}, + } +); + +mod mock_democracy { + pub use pallet::*; + #[frame_support::pallet] + pub mod pallet { + use frame_support::{pallet_prelude::*, traits::EnsureOrigin}; + use frame_system::pallet_prelude::*; + use sp_runtime::DispatchResult; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + Sized { + type Event: From> + IsType<::Event>; + type ExternalMajorityOrigin: EnsureOrigin; + } + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn external_propose_majority(origin: OriginFor) -> DispatchResult { + T::ExternalMajorityOrigin::ensure_origin(origin)?; + Self::deposit_event(Event::::ExternalProposed); + Ok(()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + ExternalProposed, + } + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MotionDuration: u64 = 3; + pub const MaxProposals: u32 = 100; + pub const MaxMembers: u32 = 100; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); +} +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +impl Config for Test { + type Origin = Origin; + type Proposal = Call; + type Event = Event; + type MotionDuration = MotionDuration; + type MaxProposals = MaxProposals; + type MaxMembers = MaxMembers; + type DefaultVote = PrimeDefaultVote; + type WeightInfo = (); +} +impl Config for Test { + type Origin = Origin; + type Proposal = Call; + type Event = Event; + type MotionDuration = MotionDuration; + type MaxProposals = MaxProposals; + type MaxMembers = MaxMembers; + type DefaultVote = MoreThanMajorityThenPrimeDefaultVote; + type WeightInfo = (); +} +impl mock_democracy::Config for Test { + type Event = Event; + type ExternalMajorityOrigin = EnsureProportionAtLeast<_3, _4, u64, Instance1>; +} +impl Config for Test { + type Origin = Origin; + type Proposal = Call; + type Event = Event; + type MotionDuration = MotionDuration; + type MaxProposals = MaxProposals; + type MaxMembers = MaxMembers; + type DefaultVote = PrimeDefaultVote; + type WeightInfo = (); +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut ext: sp_io::TestExternalities = GenesisConfig { + collective: pallet_collective::GenesisConfig { + members: vec![1, 2, 3], + phantom: Default::default(), + }, + collective_majority: pallet_collective::GenesisConfig { + members: vec![1, 2, 3, 4, 5], + phantom: Default::default(), + }, + default_collective: Default::default(), + } + .build_storage() + .unwrap() + .into(); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +fn make_proposal(value: u64) -> Call { + Call::System(frame_system::Call::remark { remark: value.encode() }) +} + +fn record(event: Event) -> EventRecord { + EventRecord { phase: Phase::Initialization, event, topics: vec![] } +} + +#[test] +fn motions_basic_environment_works() { + new_test_ext().execute_with(|| { + assert_eq!(Collective::members(), vec![1, 2, 3]); + assert_eq!(*Collective::proposals(), Vec::::new()); + }); +} + +#[test] +fn close_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + + System::set_block_number(3); + assert_noop!( + Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len), + Error::::TooEarly + ); + + System::set_block_number(4); + assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); + + assert_eq!( + System::events(), + vec![ + record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), + record(Event::Collective(CollectiveEvent::Closed(hash, 2, 1))), + record(Event::Collective(CollectiveEvent::Disapproved(hash))) + ] + ); + }); +} + +#[test] +fn proposal_weight_limit_works_on_approve() { + new_test_ext().execute_with(|| { + let proposal = Call::Collective(crate::Call::set_members { + new_members: vec![1, 2, 3], + prime: None, + old_count: MaxMembers::get(), + }); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + // Set 1 as prime voter + Prime::::set(Some(1)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + // With 1's prime vote, this should pass + System::set_block_number(4); + assert_noop!( + Collective::close(Origin::signed(4), hash, 0, proposal_weight - 100, proposal_len), + Error::::WrongProposalWeight + ); + assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); + }) +} + +#[test] +fn proposal_weight_limit_ignored_on_disapprove() { + new_test_ext().execute_with(|| { + let proposal = Call::Collective(crate::Call::set_members { + new_members: vec![1, 2, 3], + prime: None, + old_count: MaxMembers::get(), + }); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + // No votes, this proposal wont pass + System::set_block_number(4); + assert_ok!(Collective::close( + Origin::signed(4), + hash, + 0, + proposal_weight - 100, + proposal_len + )); + }) +} + +#[test] +fn close_with_prime_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::set_members( + Origin::root(), + vec![1, 2, 3], + Some(3), + MaxMembers::get() + )); + + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + + System::set_block_number(4); + assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); + + assert_eq!( + System::events(), + vec![ + record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), + record(Event::Collective(CollectiveEvent::Closed(hash, 2, 1))), + record(Event::Collective(CollectiveEvent::Disapproved(hash))) + ] + ); + }); +} + +#[test] +fn close_with_voting_prime_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::set_members( + Origin::root(), + vec![1, 2, 3], + Some(1), + MaxMembers::get() + )); + + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + + System::set_block_number(4); + assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); + + assert_eq!( + System::events(), + vec![ + record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), + record(Event::Collective(CollectiveEvent::Closed(hash, 3, 0))), + record(Event::Collective(CollectiveEvent::Approved(hash))), + record(Event::Collective(CollectiveEvent::Executed( + hash, + Err(DispatchError::BadOrigin) + ))) + ] + ); + }); +} + +#[test] +fn close_with_no_prime_but_majority_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(CollectiveMajority::set_members( + Origin::root(), + vec![1, 2, 3, 4, 5], + Some(5), + MaxMembers::get() + )); + + assert_ok!(CollectiveMajority::propose( + Origin::signed(1), + 5, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(CollectiveMajority::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(CollectiveMajority::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(CollectiveMajority::vote(Origin::signed(3), hash, 0, true)); + + System::set_block_number(4); + assert_ok!(CollectiveMajority::close( + Origin::signed(4), + hash, + 0, + proposal_weight, + proposal_len + )); + + assert_eq!( + System::events(), + vec![ + record(Event::CollectiveMajority(CollectiveEvent::Proposed(1, 0, hash, 5))), + record(Event::CollectiveMajority(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::CollectiveMajority(CollectiveEvent::Voted(2, hash, true, 2, 0))), + record(Event::CollectiveMajority(CollectiveEvent::Voted(3, hash, true, 3, 0))), + record(Event::CollectiveMajority(CollectiveEvent::Closed(hash, 5, 0))), + record(Event::CollectiveMajority(CollectiveEvent::Approved(hash))), + record(Event::CollectiveMajority(CollectiveEvent::Executed( + hash, + Err(DispatchError::BadOrigin) + ))) + ] + ); + }); +} + +#[test] +fn removal_of_old_voters_votes_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash = BlakeTwo256::hash_of(&proposal); + let end = 4; + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) + ); + Collective::change_members_sorted(&[4], &[1], &[2, 3, 4]); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) + ); + + let proposal = make_proposal(69); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::propose( + Origin::signed(2), + 2, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(2), hash, 1, true)); + assert_ok!(Collective::vote(Origin::signed(3), hash, 1, false)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) + ); + Collective::change_members_sorted(&[], &[3], &[2, 4]); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) + ); + }); +} + +#[test] +fn removal_of_old_voters_votes_works_with_set_members() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash = BlakeTwo256::hash_of(&proposal); + let end = 4; + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) + ); + assert_ok!(Collective::set_members(Origin::root(), vec![2, 3, 4], None, MaxMembers::get())); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) + ); + + let proposal = make_proposal(69); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::propose( + Origin::signed(2), + 2, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(2), hash, 1, true)); + assert_ok!(Collective::vote(Origin::signed(3), hash, 1, false)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) + ); + assert_ok!(Collective::set_members(Origin::root(), vec![2, 4], None, MaxMembers::get())); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) + ); + }); +} + +#[test] +fn propose_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash = proposal.blake2_256().into(); + let end = 4; + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_eq!(*Collective::proposals(), vec![hash]); + assert_eq!(Collective::proposal_of(&hash), Some(proposal)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 3, ayes: vec![], nays: vec![], end }) + ); + + assert_eq!( + System::events(), + vec![record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3)))] + ); + }); +} + +#[test] +fn limit_active_proposals() { + new_test_ext().execute_with(|| { + for i in 0..MaxProposals::get() { + let proposal = make_proposal(i as u64); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + } + let proposal = make_proposal(MaxProposals::get() as u64 + 1); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + assert_noop!( + Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len), + Error::::TooManyProposals + ); + }) +} + +#[test] +fn correct_validate_and_get_proposal() { + new_test_ext().execute_with(|| { + let proposal = Call::Collective(crate::Call::set_members { + new_members: vec![1, 2, 3], + prime: None, + old_count: MaxMembers::get(), + }); + let length = proposal.encode().len() as u32; + assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), length)); + + let hash = BlakeTwo256::hash_of(&proposal); + let weight = proposal.get_dispatch_info().weight; + assert_noop!( + Collective::validate_and_get_proposal( + &BlakeTwo256::hash_of(&vec![3; 4]), + length, + weight + ), + Error::::ProposalMissing + ); + assert_noop!( + Collective::validate_and_get_proposal(&hash, length - 2, weight), + Error::::WrongProposalLength + ); + assert_noop!( + Collective::validate_and_get_proposal(&hash, length, weight - 10), + Error::::WrongProposalWeight + ); + let res = Collective::validate_and_get_proposal(&hash, length, weight); + assert_ok!(res.clone()); + let (retrieved_proposal, len) = res.unwrap(); + assert_eq!(length as usize, len); + assert_eq!(proposal, retrieved_proposal); + }) +} + +#[test] +fn motions_ignoring_non_collective_proposals_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + assert_noop!( + Collective::propose(Origin::signed(42), 3, Box::new(proposal.clone()), proposal_len), + Error::::NotMember + ); + }); +} + +#[test] +fn motions_ignoring_non_collective_votes_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_noop!( + Collective::vote(Origin::signed(42), hash, 0, true), + Error::::NotMember, + ); + }); +} + +#[test] +fn motions_ignoring_bad_index_collective_vote_works() { + new_test_ext().execute_with(|| { + System::set_block_number(3); + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_noop!( + Collective::vote(Origin::signed(2), hash, 1, true), + Error::::WrongIndex, + ); + }); +} + +#[test] +fn motions_vote_after_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); + let end = 4; + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); + // Initially there a no votes when the motion is proposed. + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) + ); + // Cast first aye vote. + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) + ); + // Try to cast a duplicate aye vote. + assert_noop!( + Collective::vote(Origin::signed(1), hash, 0, true), + Error::::DuplicateVote, + ); + // Cast a nay vote. + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, false)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1], end }) + ); + // Try to cast a duplicate nay vote. + assert_noop!( + Collective::vote(Origin::signed(1), hash, 0, false), + Error::::DuplicateVote, + ); + + assert_eq!( + System::events(), + vec![ + record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 2))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, false, 0, 1))), + ] + ); + }); +} + +#[test] +fn motions_all_first_vote_free_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); + let end = 4; + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len, + )); + assert_eq!( + Collective::voting(&hash), + Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) + ); + + // For the motion, acc 2's first vote, expecting Ok with Pays::No. + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(2), hash, 0, true); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); + + // Duplicate vote, expecting error with Pays::Yes. + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(2), hash, 0, true); + assert_eq!(vote_rval.unwrap_err().post_info.pays_fee, Pays::Yes); + + // Modifying vote, expecting ok with Pays::Yes. + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(2), hash, 0, false); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); + + // For the motion, acc 3's first vote, expecting Ok with Pays::No. + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(3), hash, 0, true); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); + + // acc 3 modify the vote, expecting Ok with Pays::Yes. + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(3), hash, 0, false); + assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); + + // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info + + let proposal_weight = proposal.get_dispatch_info().weight; + let close_rval: DispatchResultWithPostInfo = + Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len); + assert_eq!(close_rval.unwrap().pays_fee, Pays::No); + + // trying to close the proposal, which is already closed. + // Expecting error "ProposalMissing" with Pays::Yes + let close_rval: DispatchResultWithPostInfo = + Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len); + assert_eq!(close_rval.unwrap_err().post_info.pays_fee, Pays::Yes); + }); +} + +#[test] +fn motions_reproposing_disapproved_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + assert_eq!(*Collective::proposals(), vec![]); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); + assert_eq!(*Collective::proposals(), vec![hash]); + }); +} + +#[test] +fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { + new_test_ext().execute_with(|| { + let proposal = Call::Democracy(mock_democracy::Call::external_propose_majority {}); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash: H256 = proposal.blake2_256().into(); + // The voting threshold is 2, but the required votes for `ExternalMajorityOrigin` is 3. + // The proposal will be executed regardless of the voting threshold + // as long as we have enough yes votes. + // + // Failed to execute with only 2 yes votes. + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + assert_eq!( + System::events(), + vec![ + record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 2))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), + record(Event::Collective(CollectiveEvent::Closed(hash, 2, 0))), + record(Event::Collective(CollectiveEvent::Approved(hash))), + record(Event::Collective(CollectiveEvent::Executed( + hash, + Err(DispatchError::BadOrigin) + ))), + ] + ); + + System::reset_events(); + + // Executed with 3 yes votes. + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 1, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 1, true)); + assert_ok!(Collective::vote(Origin::signed(3), hash, 1, true)); + assert_ok!(Collective::close(Origin::signed(2), hash, 1, proposal_weight, proposal_len)); + assert_eq!( + System::events(), + vec![ + record(Event::Collective(CollectiveEvent::Proposed(1, 1, hash, 2))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), + record(Event::Collective(CollectiveEvent::Voted(3, hash, true, 3, 0))), + record(Event::Collective(CollectiveEvent::Closed(hash, 3, 0))), + record(Event::Collective(CollectiveEvent::Approved(hash))), + record(Event::Democracy(mock_democracy::pallet::Event::::ExternalProposed)), + record(Event::Collective(CollectiveEvent::Executed(hash, Ok(())))), + ] + ); + }); +} + +#[test] +fn motions_disapproval_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + + assert_eq!( + System::events(), + vec![ + record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::Collective(CollectiveEvent::Voted(2, hash, false, 1, 1))), + record(Event::Collective(CollectiveEvent::Closed(hash, 1, 1))), + record(Event::Collective(CollectiveEvent::Disapproved(hash))), + ] + ); + }); +} + +#[test] +fn motions_approval_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + + assert_eq!( + System::events(), + vec![ + record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 2))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), + record(Event::Collective(CollectiveEvent::Closed(hash, 2, 0))), + record(Event::Collective(CollectiveEvent::Approved(hash))), + record(Event::Collective(CollectiveEvent::Executed( + hash, + Err(DispatchError::BadOrigin) + ))), + ] + ); + }); +} + +#[test] +fn motion_with_no_votes_closes_with_disapproval() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_eq!( + System::events()[0], + record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))) + ); + + // Closing the motion too early is not possible because it has neither + // an approving or disapproving simple majority due to the lack of votes. + assert_noop!( + Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len), + Error::::TooEarly + ); + + // Once the motion duration passes, + let closing_block = System::block_number() + MotionDuration::get(); + System::set_block_number(closing_block); + // we can successfully close the motion. + assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + + // Events show that the close ended in a disapproval. + assert_eq!( + System::events()[1], + record(Event::Collective(CollectiveEvent::Closed(hash, 0, 3))) + ); + assert_eq!( + System::events()[2], + record(Event::Collective(CollectiveEvent::Disapproved(hash))) + ); + }) +} + +#[test] +fn close_disapprove_does_not_care_about_weight_or_len() { + // This test confirms that if you close a proposal that would be disapproved, + // we do not care about the proposal length or proposal weight since it will + // not be read from storage or executed. + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); + // First we make the proposal succeed + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + // It will not close with bad weight/len information + assert_noop!( + Collective::close(Origin::signed(2), hash, 0, 0, 0), + Error::::WrongProposalLength, + ); + assert_noop!( + Collective::close(Origin::signed(2), hash, 0, 0, proposal_len), + Error::::WrongProposalWeight, + ); + // Now we make the proposal fail + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, false)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); + // It can close even if the weight/len information is bad + assert_ok!(Collective::close(Origin::signed(2), hash, 0, 0, 0)); + }) +} + +#[test] +fn disapprove_proposal_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); + // Proposal would normally succeed + assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + // But Root can disapprove and remove it anyway + assert_ok!(Collective::disapprove_proposal(Origin::root(), hash)); + assert_eq!( + System::events(), + vec![ + record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 2))), + record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), + record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), + record(Event::Collective(CollectiveEvent::Disapproved(hash))), + ] + ); + }) +} + +#[test] +#[should_panic(expected = "Members cannot contain duplicate accounts.")] +fn genesis_build_panics_with_duplicate_members() { + pallet_collective::GenesisConfig:: { + members: vec![1, 2, 3, 1], + phantom: Default::default(), + } + .build_storage() + .unwrap(); +} + +#[test] +fn migration_v4() { + new_test_ext().execute_with(|| { + use frame_support::traits::PalletInfoAccess; + + let old_pallet = "OldCollective"; + let new_pallet = ::name(); + frame_support::storage::migration::move_pallet( + new_pallet.as_bytes(), + old_pallet.as_bytes(), + ); + StorageVersion::new(0).put::(); + + crate::migrations::v4::pre_migrate::(old_pallet); + crate::migrations::v4::migrate::(old_pallet); + crate::migrations::v4::post_migrate::(old_pallet); + + let old_pallet = "OldCollectiveMajority"; + let new_pallet = ::name(); + frame_support::storage::migration::move_pallet( + new_pallet.as_bytes(), + old_pallet.as_bytes(), + ); + StorageVersion::new(0).put::(); + + crate::migrations::v4::pre_migrate::(old_pallet); + crate::migrations::v4::migrate::(old_pallet); + crate::migrations::v4::post_migrate::(old_pallet); + + let old_pallet = "OldDefaultCollective"; + let new_pallet = ::name(); + frame_support::storage::migration::move_pallet( + new_pallet.as_bytes(), + old_pallet.as_bytes(), + ); + StorageVersion::new(0).put::(); + + crate::migrations::v4::pre_migrate::(old_pallet); + crate::migrations::v4::migrate::(old_pallet); + crate::migrations::v4::post_migrate::(old_pallet); + }); +} diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs new file mode 100644 index 0000000000000..40ac9eabdd6e4 --- /dev/null +++ b/frame/collective/src/weights.rs @@ -0,0 +1,325 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_collective +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_collective +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/collective/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_collective. +pub trait WeightInfo { + fn set_members(m: u32, n: u32, p: u32, ) -> Weight; + fn execute(b: u32, m: u32, ) -> Weight; + fn propose_execute(b: u32, m: u32, ) -> Weight; + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight; + fn vote(m: u32, ) -> Weight; + fn close_early_disapproved(m: u32, p: u32, ) -> Weight; + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight; + fn close_disapproved(m: u32, p: u32, ) -> Weight; + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight; + fn disapprove_proposal(p: u32, ) -> Weight; +} + +/// Weights for pallet_collective using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Instance1Collective Members (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:0) + // Storage: Instance1Collective Voting (r:100 w:100) + // Storage: Instance1Collective Prime (r:0 w:1) + fn set_members(m: u32, n: u32, p: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 4_000 + .saturating_add((14_084_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 4_000 + .saturating_add((161_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 4_000 + .saturating_add((19_201_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + // Storage: Instance1Collective Members (r:1 w:0) + fn execute(b: u32, m: u32, ) -> Weight { + (22_748_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((92_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective ProposalOf (r:1 w:0) + fn propose_execute(b: u32, m: u32, ) -> Weight { + (27_465_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((178_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + } + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective ProposalOf (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:1) + // Storage: Instance1Collective ProposalCount (r:1 w:1) + // Storage: Instance1Collective Voting (r:0 w:1) + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + (39_869_000 as Weight) + // Standard Error: 0 + .saturating_add((8_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((107_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((406_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective Voting (r:1 w:1) + fn vote(m: u32, ) -> Weight { + (37_387_000 as Weight) + // Standard Error: 2_000 + .saturating_add((223_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Instance1Collective Voting (r:1 w:1) + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective Proposals (r:1 w:1) + // Storage: Instance1Collective ProposalOf (r:0 w:1) + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + (45_670_000 as Weight) + // Standard Error: 1_000 + .saturating_add((170_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((358_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Collective Voting (r:1 w:1) + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective ProposalOf (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:1) + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { + (52_529_000 as Weight) + // Standard Error: 0 + .saturating_add((7_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((206_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((412_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Collective Voting (r:1 w:1) + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective Prime (r:1 w:0) + // Storage: Instance1Collective Proposals (r:1 w:1) + // Storage: Instance1Collective ProposalOf (r:0 w:1) + fn close_disapproved(m: u32, p: u32, ) -> Weight { + (50_427_000 as Weight) + // Standard Error: 1_000 + .saturating_add((170_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((354_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Collective Voting (r:1 w:1) + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective Prime (r:1 w:0) + // Storage: Instance1Collective ProposalOf (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:1) + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { + (57_031_000 as Weight) + // Standard Error: 0 + .saturating_add((7_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((208_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((408_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Collective Proposals (r:1 w:1) + // Storage: Instance1Collective Voting (r:0 w:1) + // Storage: Instance1Collective ProposalOf (r:0 w:1) + fn disapprove_proposal(p: u32, ) -> Weight { + (27_458_000 as Weight) + // Standard Error: 1_000 + .saturating_add((402_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Instance1Collective Members (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:0) + // Storage: Instance1Collective Voting (r:100 w:100) + // Storage: Instance1Collective Prime (r:0 w:1) + fn set_members(m: u32, n: u32, p: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 4_000 + .saturating_add((14_084_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 4_000 + .saturating_add((161_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 4_000 + .saturating_add((19_201_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + // Storage: Instance1Collective Members (r:1 w:0) + fn execute(b: u32, m: u32, ) -> Weight { + (22_748_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((92_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective ProposalOf (r:1 w:0) + fn propose_execute(b: u32, m: u32, ) -> Weight { + (27_465_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((178_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + } + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective ProposalOf (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:1) + // Storage: Instance1Collective ProposalCount (r:1 w:1) + // Storage: Instance1Collective Voting (r:0 w:1) + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + (39_869_000 as Weight) + // Standard Error: 0 + .saturating_add((8_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((107_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((406_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective Voting (r:1 w:1) + fn vote(m: u32, ) -> Weight { + (37_387_000 as Weight) + // Standard Error: 2_000 + .saturating_add((223_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Instance1Collective Voting (r:1 w:1) + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective Proposals (r:1 w:1) + // Storage: Instance1Collective ProposalOf (r:0 w:1) + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + (45_670_000 as Weight) + // Standard Error: 1_000 + .saturating_add((170_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((358_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Collective Voting (r:1 w:1) + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective ProposalOf (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:1) + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { + (52_529_000 as Weight) + // Standard Error: 0 + .saturating_add((7_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((206_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((412_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Collective Voting (r:1 w:1) + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective Prime (r:1 w:0) + // Storage: Instance1Collective Proposals (r:1 w:1) + // Storage: Instance1Collective ProposalOf (r:0 w:1) + fn close_disapproved(m: u32, p: u32, ) -> Weight { + (50_427_000 as Weight) + // Standard Error: 1_000 + .saturating_add((170_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((354_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Collective Voting (r:1 w:1) + // Storage: Instance1Collective Members (r:1 w:0) + // Storage: Instance1Collective Prime (r:1 w:0) + // Storage: Instance1Collective ProposalOf (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:1) + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { + (57_031_000 as Weight) + // Standard Error: 0 + .saturating_add((7_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 1_000 + .saturating_add((208_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((408_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Instance1Collective Proposals (r:1 w:1) + // Storage: Instance1Collective Voting (r:0 w:1) + // Storage: Instance1Collective ProposalOf (r:0 w:1) + fn disapprove_proposal(p: u32, ) -> Weight { + (27_458_000 as Weight) + // Standard Error: 1_000 + .saturating_add((402_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } +} diff --git a/frame/contracts/CHANGELOG.md b/frame/contracts/CHANGELOG.md new file mode 100644 index 0000000000000..eaedd28bf3e47 --- /dev/null +++ b/frame/contracts/CHANGELOG.md @@ -0,0 +1,113 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +The semantic versioning guarantees cover the interface to the substrate runtime which +includes this pallet as a dependency. This module will also add storage migrations whenever +changes require it. Stability with regard to offchain tooling is explicitly excluded from +this guarantee: For example adding a new field to an in-storage data structure will require +changes to frontends to properly display it. However, those changes will still be regarded +as a minor version bump. + +The interface provided to smart contracts will adhere to semver with one exception: Even +major version bumps will be backwards compatible with regard to already deployed contracts. +In other words: Upgrading this pallet will not break pre-existing contracts. + +## [Unreleased] + +### Added + +- Allow contracts to dispatch calls into the runtime (**unstable**) +[#9276](https://github.com/paritytech/substrate/pull/9276) + +- New **unstable** version of `seal_call` that offers more features. +[#8909](https://github.com/paritytech/substrate/pull/8909) + +- New **unstable** `seal_rent_params` and `seal_rent_status` contract callable function. +[#8231](https://github.com/paritytech/substrate/pull/8231) +[#8780](https://github.com/paritytech/substrate/pull/8780) + +- New `instantiate` RPC that allows clients to dry-run contract instantiation. +[#8451](https://github.com/paritytech/substrate/pull/8451) + +- New version of `seal_random` which exposes additional information. +[#8329](https://github.com/paritytech/substrate/pull/8329) + +### Changed + +- Replaced `seal_println` with the `seal_debug_message` API which allows outputting debug +messages to the console and RPC clients. +[#8773](https://github.com/paritytech/substrate/pull/8773) +[#9550](https://github.com/paritytech/substrate/pull/9550) + +- Make storage and fields of `Schedule` private to the crate. +[#8359](https://github.com/paritytech/substrate/pull/8359) + +### Fixed + +- Remove pre-charging which caused wrongly estimated weights +[#8976](https://github.com/paritytech/substrate/pull/8976) + +## [v3.0.0] 2021-02-25 + +This version constitutes the first release that brings any stability guarantees (see above). + +### Added + +- Emit an event when a contract terminates (self-destructs). +[#8014](https://github.com/paritytech/substrate/pull/8014) + +- Charge rent for code stored on the chain in addition to the already existing +rent that is paid for data storage. +[#7935](https://github.com/paritytech/substrate/pull/7935) + +- Allow the runtime to configure per storage item costs in addition +to the already existing per byte costs. +[#7819](https://github.com/paritytech/substrate/pull/7819) + +- Contracts are now deleted lazily so that the user who removes a contract +does not need to pay for the deletion of the contract storage. +[#7740](https://github.com/paritytech/substrate/pull/7740) + +- Allow runtime authors to define chain extensions in order to provide custom +functionality to contracts. +[#7548](https://github.com/paritytech/substrate/pull/7548) +[#8003](https://github.com/paritytech/substrate/pull/8003) + +- Proper weights which are fully automated by benchmarking. +[#6715](https://github.com/paritytech/substrate/pull/6715) +[#7017](https://github.com/paritytech/substrate/pull/7017) +[#7361](https://github.com/paritytech/substrate/pull/7361) + +### Changed + +- Collect the rent for one block during instantiation. +[#7847](https://github.com/paritytech/substrate/pull/7847) + +- Instantiation takes a `salt` argument to allow for easier instantion of the +same code by the same sender. +[#7482](https://github.com/paritytech/substrate/pull/7482) + +- Improve the information returned by the `contracts_call` RPC. +[#7468](https://github.com/paritytech/substrate/pull/7468) + +- Simplify the node configuration necessary to add this module. +[#7409](https://github.com/paritytech/substrate/pull/7409) + +### Fixed + +- Consider the code size of a contract in the weight that is charged for +loading a contract from storage. +[#8086](https://github.com/paritytech/substrate/pull/8086) + +- Fix possible overflow in storage size calculation +[#7885](https://github.com/paritytech/substrate/pull/7885) + +- Cap the surcharge reward that can be claimed. +[#7870](https://github.com/paritytech/substrate/pull/7870) + +- Fix a possible DoS vector where contracts could allocate too large buffers. +[#7818](https://github.com/paritytech/substrate/pull/7818) diff --git a/frame/contracts/COMPLEXITY.md b/frame/contracts/COMPLEXITY.md index 32f6f84b89b6a..1fc1932fe1b5f 100644 --- a/frame/contracts/COMPLEXITY.md +++ b/frame/contracts/COMPLEXITY.md @@ -176,7 +176,7 @@ Before a call or instantiate can be performed the execution context must be init For the first call or instantiation in the handling of an extrinsic, this involves two calls: 1. `>::now()` -2. `>::block_number()` +2. `>::block_number()` The complexity of initialization depends on the complexity of these functions. In the current implementation they just involve a DB read. @@ -468,3 +468,20 @@ algorithms have different inherent complexity so users must expect the above mentioned crypto hashes to have varying gas costs. The complexity of each cryptographic hash function highly depends on the underlying implementation. + +### seal_ecdsa_recover + +This function receives the following arguments: + +- `signature` is 65 bytes buffer, +- `message_hash` is 32 bytes buffer, +- `output` is 33 bytes buffer to return compressed public key, + +It consists of the following steps: + +1. Loading `signature` buffer from the sandbox memory (see sandboxing memory get). +2. Loading `message_hash` buffer from the sandbox memory. +3. Invoking the executive function `secp256k1_ecdsa_recover_compressed`. +4. Copy the bytes of compressed public key into the contract side output buffer. + +**complexity**: Complexity is partially constant(it doesn't depend on input) but still depends on points of ECDSA and calculation. \ No newline at end of file diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 41c4e893f8ca3..80dc0b05e7511 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,49 +13,79 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -bitflags = "1.0" -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "common" } -parity-wasm = { version = "0.41.0", default-features = false } -pwasm-utils = { version = "0.14.0", default-features = false } -serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-sandbox = { version = "0.8.0", default-features = false, path = "../../primitives/sandbox" } -wasmi-validation = { version = "0.3.0", default-features = false } +bitflags = "1.3" +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = [ + "derive", + "max-encoded-len", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +log = { version = "0.4", default-features = false } +pwasm-utils = { version = "0.18.2", default-features = false } +serde = { version = "1", optional = true, features = ["derive"] } +smallvec = { version = "1", default-features = false, features = [ + "const_generics", +] } +wasmi-validation = { version = "0.4", default-features = false } + +# Only used in benchmarking to generate random contract code +libsecp256k1 = { version = "0.3.5", optional = true, default-features = false, features = ["hmac"] } +rand = { version = "0.7.3", optional = true, default-features = false } +rand_pcg = { version = "0.2", optional = true } + +# Substrate Dependencies +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-contracts-primitives = { version = "4.0.0-dev", default-features = false, path = "common" } +pallet-contracts-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../primitives/sandbox" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } [dev-dependencies] -assert_matches = "1.3.0" -hex-literal = "0.3.1" -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-timestamp = { version = "2.0.0", path = "../timestamp" } -pallet-randomness-collective-flip = { version = "2.0.0", path = "../randomness-collective-flip" } -paste = "1.0" -pretty_assertions = "0.6.1" -wat = "1.0" +assert_matches = "1" +hex-literal = "0.3" +pretty_assertions = "0.7" +wat = "1" + +# Substrate Dependencies +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } +pallet-randomness-collective-flip = { version = "4.0.0-dev", path = "../randomness-collective-flip" } +pallet-utility = { version = "4.0.0-dev", path = "../utility" } [features] default = ["std"] std = [ "serde", "codec/std", + "scale-info/std", "sp-core/std", "sp-runtime/std", "sp-io/std", "sp-std/std", "sp-sandbox/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", - "parity-wasm/std", "pwasm-utils/std", "wasmi-validation/std", "pallet-contracts-primitives/std", + "pallet-contracts-proc-macro/full", + "log/std", + "rand/std", + "libsecp256k1/std", ] runtime-benchmarks = [ "frame-benchmarking", + "libsecp256k1", + "rand", + "rand_pcg", + "unstable-interface", ] +try-runtime = ["frame-support/try-runtime"] +# Make contract callable functions marked as __unstable__ available. Do not enable +# on live chains as those are subject to change. +unstable-interface = [] diff --git a/frame/contracts/README.md b/frame/contracts/README.md index dddcc3c8b8b85..f3a8d13f6e77d 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -2,8 +2,10 @@ The Contract module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. -- [`contract::Trait`](https://docs.rs/pallet-contracts/latest/pallet_contracts/trait.Trait.html) - [`Call`](https://docs.rs/pallet-contracts/latest/pallet_contracts/enum.Call.html) +- [`Config`](https://docs.rs/pallet-contracts/latest/pallet_contracts/trait.Config.html) +- [`Error`](https://docs.rs/pallet-contracts/latest/pallet_contracts/enum.Error.html) +- [`Event`](https://docs.rs/pallet-contracts/latest/pallet_contracts/enum.Event.html) ## Overview @@ -32,6 +34,9 @@ reverted at the current call's contract level. For example, if contract A calls then all of B's calls are reverted. Assuming correct error handling by contract A, A's other calls and state changes still persist. +One gas is equivalent to one [weight](https://substrate.dev/docs/en/knowledgebase/learn-substrate/weight) +which is defined as one picosecond of execution time on the runtime's reference machine. + ### Notable Scenarios Contract call failures are not always cascading. When failures occur in a sub-call, they do not "bubble up", @@ -42,23 +47,62 @@ fails, A can decide how to handle that failure, either proceeding or reverting A ### Dispatchable functions -* `put_code` - Stores the given binary Wasm code into the chain's storage and returns its `code_hash`. -* `instantiate` - Deploys a new contract from the given `code_hash`, optionally transferring some balance. -This instantiates a new smart contract account and calls its contract deploy handler to -initialize the contract. -* `call` - Makes a call to an account, optionally transferring some balance. +Those are documented in the [reference documentation](https://docs.rs/pallet-contracts/latest/pallet_contracts/#dispatchable-functions). ## Usage -The Contract module is a work in progress. The following examples show how this Contract module -can be used to instantiate and call contracts. +This module executes WebAssembly smart contracts. These can potentially be written in any language +that compiles to web assembly. However, using a language that specifically targets this module +will make things a lot easier. One such language is [`ink`](https://github.com/paritytech/ink) +which is an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables +writing WebAssembly based smart contracts in the Rust programming language. + +## Debugging + +Contracts can emit messages to the client when called as RPC through the `seal_debug_message` +API. This is exposed in ink! via +[`ink_env::debug_println()`](https://docs.rs/ink_env/latest/ink_env/fn.debug_println.html). + +Those messages are gathered into an internal buffer and send to the RPC client. +It is up the the individual client if and how those messages are presented to the user. + +This buffer is also printed as a debug message. In order to see these messages on the node +console the log level for the `runtime::contracts` target needs to be raised to at least +the `debug` level. However, those messages are easy to overlook because of the noise generated +by block production. A good starting point for observing them on the console is using this +command line in the root directory of the substrate repository: + +```bash +cargo run --release -- --dev --tmp -lerror,runtime::contracts=debug +``` + +This raises the log level of `runtime::contracts` to `debug` and all other targets +to `error` in order to prevent them from spamming the console. + +`--dev`: Use a dev chain spec +`--tmp`: Use temporary storage for chain data (the chain state is deleted on exit) + +## Unstable Interfaces + +Driven by the desire to have an iterative approach in developing new contract interfaces +this pallet contains the concept of an unstable interface. Akin to the rust nightly compiler +it allows us to add new interfaces but mark them as unstable so that contract languages can +experiment with them and give feedback before we stabilize those. + +In order to access interfaces marked as `__unstable__` in `runtime.rs` one need to compile +this crate with the `unstable-interface` feature enabled. It should be obvious that any +live runtime should never be compiled with this feature: In addition to be subject to +change or removal those interfaces do not have proper weights associated with them and +are therefore considered unsafe. -* [`ink`](https://github.com/paritytech/ink) is -an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing -WebAssembly based smart contracts in the Rust programming language. This is a work in progress. +The substrate runtime exposes this feature as `contracts-unstable-interface`. Example +commandline for running the substrate node with unstable contracts interfaces: -## Related Modules +```bash +cargo run --release --features contracts-unstable-interface -- --dev +``` -* [Balances](https://docs.rs/pallet-balances/latest/pallet_balances/) +New interfaces are generally added as unstable and might go through several iterations +before they are promoted to a stable interface. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/contracts/benchmarks/README.md b/frame/contracts/benchmarks/README.md new file mode 100644 index 0000000000000..a4b15bd840db4 --- /dev/null +++ b/frame/contracts/benchmarks/README.md @@ -0,0 +1,9 @@ +# Benchmarks + +This directory contains real world (ink!, solang) contracts which are used in macro benchmarks. +Those benchmarks are not used to determine weights but rather to compare different contract +languages and execution engines with larger wasm modules. + +Files in this directory are used by `#[extra]` benchmarks in `src/benchmarking`. The json +files are for informational purposes only and are not consumed by the benchmarks. + diff --git a/frame/contracts/benchmarks/ink_erc20.json b/frame/contracts/benchmarks/ink_erc20.json new file mode 100644 index 0000000000000..390dd9b06cd4c --- /dev/null +++ b/frame/contracts/benchmarks/ink_erc20.json @@ -0,0 +1,819 @@ +{ + "metadataVersion": "0.1.0", + "source": { + "hash": "0x6be8492017fe96b7a92bb39b4ede04b96effb8fcaf9237bfdccef7d9e732c760", + "language": "ink! 3.0.0-rc4", + "compiler": "rustc 1.56.0-nightly" + }, + "contract": { + "name": "erc20", + "version": "3.0.0-rc4", + "authors": [ + "Parity Technologies " + ] + }, + "spec": { + "constructors": [ + { + "args": [ + { + "name": "initial_supply", + "type": { + "displayName": [ + "Balance" + ], + "type": 1 + } + } + ], + "docs": [ + "Creates a new ERC-20 contract with the specified initial supply." + ], + "name": [ + "new" + ], + "selector": "0x9bae9d5e" + } + ], + "docs": [], + "events": [ + { + "args": [ + { + "docs": [], + "indexed": true, + "name": "from", + "type": { + "displayName": [ + "Option" + ], + "type": 15 + } + }, + { + "docs": [], + "indexed": true, + "name": "to", + "type": { + "displayName": [ + "Option" + ], + "type": 15 + } + }, + { + "docs": [], + "indexed": false, + "name": "value", + "type": { + "displayName": [ + "Balance" + ], + "type": 1 + } + } + ], + "docs": [ + " Event emitted when a token transfer occurs." + ], + "name": "Transfer" + }, + { + "args": [ + { + "docs": [], + "indexed": true, + "name": "owner", + "type": { + "displayName": [ + "AccountId" + ], + "type": 5 + } + }, + { + "docs": [], + "indexed": true, + "name": "spender", + "type": { + "displayName": [ + "AccountId" + ], + "type": 5 + } + }, + { + "docs": [], + "indexed": false, + "name": "value", + "type": { + "displayName": [ + "Balance" + ], + "type": 1 + } + } + ], + "docs": [ + " Event emitted when an approval occurs that `spender` is allowed to withdraw", + " up to the amount of `value` tokens from `owner`." + ], + "name": "Approval" + } + ], + "messages": [ + { + "args": [], + "docs": [ + " Returns the total token supply." + ], + "mutates": false, + "name": [ + "total_supply" + ], + "payable": false, + "returnType": { + "displayName": [ + "Balance" + ], + "type": 1 + }, + "selector": "0xdb6375a8" + }, + { + "args": [ + { + "name": "owner", + "type": { + "displayName": [ + "AccountId" + ], + "type": 5 + } + } + ], + "docs": [ + " Returns the account balance for the specified `owner`.", + "", + " Returns `0` if the account is non-existent." + ], + "mutates": false, + "name": [ + "balance_of" + ], + "payable": false, + "returnType": { + "displayName": [ + "Balance" + ], + "type": 1 + }, + "selector": "0x0f755a56" + }, + { + "args": [ + { + "name": "owner", + "type": { + "displayName": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "spender", + "type": { + "displayName": [ + "AccountId" + ], + "type": 5 + } + } + ], + "docs": [ + " Returns the amount which `spender` is still allowed to withdraw from `owner`.", + "", + " Returns `0` if no allowance has been set `0`." + ], + "mutates": false, + "name": [ + "allowance" + ], + "payable": false, + "returnType": { + "displayName": [ + "Balance" + ], + "type": 1 + }, + "selector": "0x6a00165e" + }, + { + "args": [ + { + "name": "to", + "type": { + "displayName": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "value", + "type": { + "displayName": [ + "Balance" + ], + "type": 1 + } + } + ], + "docs": [ + " Transfers `value` amount of tokens from the caller's account to account `to`.", + "", + " On success a `Transfer` event is emitted.", + "", + " # Errors", + "", + " Returns `InsufficientBalance` error if there are not enough tokens on", + " the caller's account balance." + ], + "mutates": true, + "name": [ + "transfer" + ], + "payable": false, + "returnType": { + "displayName": [ + "Result" + ], + "type": 12 + }, + "selector": "0x84a15da1" + }, + { + "args": [ + { + "name": "spender", + "type": { + "displayName": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "value", + "type": { + "displayName": [ + "Balance" + ], + "type": 1 + } + } + ], + "docs": [ + " Allows `spender` to withdraw from the caller's account multiple times, up to", + " the `value` amount.", + "", + " If this function is called again it overwrites the current allowance with `value`.", + "", + " An `Approval` event is emitted." + ], + "mutates": true, + "name": [ + "approve" + ], + "payable": false, + "returnType": { + "displayName": [ + "Result" + ], + "type": 12 + }, + "selector": "0x681266a0" + }, + { + "args": [ + { + "name": "from", + "type": { + "displayName": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "to", + "type": { + "displayName": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "value", + "type": { + "displayName": [ + "Balance" + ], + "type": 1 + } + } + ], + "docs": [ + " Transfers `value` tokens on the behalf of `from` to the account `to`.", + "", + " This can be used to allow a contract to transfer tokens on ones behalf and/or", + " to charge fees in sub-currencies, for example.", + "", + " On success a `Transfer` event is emitted.", + "", + " # Errors", + "", + " Returns `InsufficientAllowance` error if there are not enough tokens allowed", + " for the caller to withdraw from `from`.", + "", + " Returns `InsufficientBalance` error if there are not enough tokens on", + " the account balance of `from`." + ], + "mutates": true, + "name": [ + "transfer_from" + ], + "payable": false, + "returnType": { + "displayName": [ + "Result" + ], + "type": 12 + }, + "selector": "0x0b396f18" + } + ] + }, + "storage": { + "struct": { + "fields": [ + { + "layout": { + "cell": { + "key": "0x0000000000000000000000000000000000000000000000000000000000000000", + "ty": 1 + } + }, + "name": "total_supply" + }, + { + "layout": { + "struct": { + "fields": [ + { + "layout": { + "struct": { + "fields": [ + { + "layout": { + "cell": { + "key": "0x0100000000000000000000000000000000000000000000000000000000000000", + "ty": 2 + } + }, + "name": "header" + }, + { + "layout": { + "struct": { + "fields": [ + { + "layout": { + "cell": { + "key": "0x0200000000000000000000000000000000000000000000000000000000000000", + "ty": 3 + } + }, + "name": "len" + }, + { + "layout": { + "array": { + "cellsPerElem": 1, + "layout": { + "cell": { + "key": "0x0200000001000000000000000000000000000000000000000000000000000000", + "ty": 4 + } + }, + "len": 4294967295, + "offset": "0x0300000000000000000000000000000000000000000000000000000000000000" + } + }, + "name": "elems" + } + ] + } + }, + "name": "entries" + } + ] + } + }, + "name": "keys" + }, + { + "layout": { + "hash": { + "layout": { + "cell": { + "key": "0x0300000001000000000000000000000000000000000000000000000000000000", + "ty": 9 + } + }, + "offset": "0x0200000001000000000000000000000000000000000000000000000000000000", + "strategy": { + "hasher": "Blake2x256", + "postfix": "", + "prefix": "0x696e6b20686173686d6170" + } + } + }, + "name": "values" + } + ] + } + }, + "name": "balances" + }, + { + "layout": { + "struct": { + "fields": [ + { + "layout": { + "struct": { + "fields": [ + { + "layout": { + "cell": { + "key": "0x0300000001000000000000000000000000000000000000000000000000000000", + "ty": 2 + } + }, + "name": "header" + }, + { + "layout": { + "struct": { + "fields": [ + { + "layout": { + "cell": { + "key": "0x0400000001000000000000000000000000000000000000000000000000000000", + "ty": 3 + } + }, + "name": "len" + }, + { + "layout": { + "array": { + "cellsPerElem": 1, + "layout": { + "cell": { + "key": "0x0400000002000000000000000000000000000000000000000000000000000000", + "ty": 10 + } + }, + "len": 4294967295, + "offset": "0x0500000001000000000000000000000000000000000000000000000000000000" + } + }, + "name": "elems" + } + ] + } + }, + "name": "entries" + } + ] + } + }, + "name": "keys" + }, + { + "layout": { + "hash": { + "layout": { + "cell": { + "key": "0x0500000002000000000000000000000000000000000000000000000000000000", + "ty": 9 + } + }, + "offset": "0x0400000002000000000000000000000000000000000000000000000000000000", + "strategy": { + "hasher": "Blake2x256", + "postfix": "", + "prefix": "0x696e6b20686173686d6170" + } + } + }, + "name": "values" + } + ] + } + }, + "name": "allowances" + } + ] + } + }, + "types": [ + { + "def": { + "primitive": "u128" + } + }, + { + "def": { + "composite": { + "fields": [ + { + "name": "last_vacant", + "type": 3, + "typeName": "Index" + }, + { + "name": "len", + "type": 3, + "typeName": "u32" + }, + { + "name": "len_entries", + "type": 3, + "typeName": "u32" + } + ] + } + }, + "path": [ + "ink_storage", + "collections", + "stash", + "Header" + ] + }, + { + "def": { + "primitive": "u32" + } + }, + { + "def": { + "variant": { + "variants": [ + { + "fields": [ + { + "type": 8, + "typeName": "VacantEntry" + } + ], + "name": "Vacant" + }, + { + "fields": [ + { + "type": 5, + "typeName": "T" + } + ], + "name": "Occupied" + } + ] + } + }, + "params": [ + 5 + ], + "path": [ + "ink_storage", + "collections", + "stash", + "Entry" + ] + }, + { + "def": { + "composite": { + "fields": [ + { + "type": 6, + "typeName": "[u8; 32]" + } + ] + } + }, + "path": [ + "ink_env", + "types", + "AccountId" + ] + }, + { + "def": { + "array": { + "len": 32, + "type": 7 + } + } + }, + { + "def": { + "primitive": "u8" + } + }, + { + "def": { + "composite": { + "fields": [ + { + "name": "next", + "type": 3, + "typeName": "Index" + }, + { + "name": "prev", + "type": 3, + "typeName": "Index" + } + ] + } + }, + "path": [ + "ink_storage", + "collections", + "stash", + "VacantEntry" + ] + }, + { + "def": { + "composite": { + "fields": [ + { + "name": "value", + "type": 1, + "typeName": "V" + }, + { + "name": "key_index", + "type": 3, + "typeName": "KeyIndex" + } + ] + } + }, + "params": [ + 1 + ], + "path": [ + "ink_storage", + "collections", + "hashmap", + "ValueEntry" + ] + }, + { + "def": { + "variant": { + "variants": [ + { + "fields": [ + { + "type": 8, + "typeName": "VacantEntry" + } + ], + "name": "Vacant" + }, + { + "fields": [ + { + "type": 11, + "typeName": "T" + } + ], + "name": "Occupied" + } + ] + } + }, + "params": [ + 11 + ], + "path": [ + "ink_storage", + "collections", + "stash", + "Entry" + ] + }, + { + "def": { + "tuple": [ + 5, + 5 + ] + } + }, + { + "def": { + "variant": { + "variants": [ + { + "fields": [ + { + "type": 13, + "typeName": "T" + } + ], + "name": "Ok" + }, + { + "fields": [ + { + "type": 14, + "typeName": "E" + } + ], + "name": "Err" + } + ] + } + }, + "params": [ + 13, + 14 + ], + "path": [ + "Result" + ] + }, + { + "def": { + "tuple": [] + } + }, + { + "def": { + "variant": { + "variants": [ + { + "discriminant": 0, + "name": "InsufficientBalance" + }, + { + "discriminant": 1, + "name": "InsufficientAllowance" + } + ] + } + }, + "path": [ + "erc20", + "erc20", + "Error" + ] + }, + { + "def": { + "variant": { + "variants": [ + { + "name": "None" + }, + { + "fields": [ + { + "type": 5, + "typeName": "T" + } + ], + "name": "Some" + } + ] + } + }, + "params": [ + 5 + ], + "path": [ + "Option" + ] + } + ] +} \ No newline at end of file diff --git a/frame/contracts/benchmarks/ink_erc20.wasm b/frame/contracts/benchmarks/ink_erc20.wasm new file mode 100644 index 0000000000000..ffd522760a02d Binary files /dev/null and b/frame/contracts/benchmarks/ink_erc20.wasm differ diff --git a/frame/contracts/benchmarks/ink_erc20_test.wasm b/frame/contracts/benchmarks/ink_erc20_test.wasm new file mode 100644 index 0000000000000..f5d84552960a3 Binary files /dev/null and b/frame/contracts/benchmarks/ink_erc20_test.wasm differ diff --git a/frame/contracts/benchmarks/solang_erc20.json b/frame/contracts/benchmarks/solang_erc20.json new file mode 100644 index 0000000000000..9d8fd5ce70e70 --- /dev/null +++ b/frame/contracts/benchmarks/solang_erc20.json @@ -0,0 +1,581 @@ +{ + "contract": { + "authors": [ + "unknown" + ], + "name": "ERC20PresetFixedSupply", + "version": "0.0.1" + }, + "metadataVersion": "0.1.0", + "source": { + "compiler": "solang 0.1.7", + "hash": "0x9c55e342566e89c741eb641eec3af796836da750fc930c55bccc0604a47ef700", + "language": "Solidity 0.1.7" + }, + "spec": { + "constructors": [ + { + "args": [ + { + "name": "name", + "type": { + "display_name": [ + "String" + ], + "type": 2 + } + }, + { + "name": "symbol", + "type": { + "display_name": [ + "String" + ], + "type": 2 + } + }, + { + "name": "initialSupply", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + }, + { + "name": "owner", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + } + ], + "docs": [ + "" + ], + "name": "new", + "selector": "0xa6f1f5e1" + } + ], + "events": [ + { + "args": [ + { + "indexed": true, + "name": "owner", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "indexed": true, + "name": "spender", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "indexed": false, + "name": "value", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + } + ], + "docs": [ + "" + ], + "name": "Approval" + }, + { + "args": [ + { + "indexed": true, + "name": "from", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "indexed": true, + "name": "to", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "indexed": false, + "name": "value", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + } + ], + "docs": [ + "" + ], + "name": "Transfer" + } + ], + "messages": [ + { + "args": [ + { + "name": "account", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "amount", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + } + ], + "docs": [ + "" + ], + "mutates": true, + "name": "burnFrom", + "payable": false, + "return_type": null, + "selector": "0x0f1354f3" + }, + { + "args": [ + { + "name": "account", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + } + ], + "docs": [ + "" + ], + "mutates": false, + "name": "balanceOf", + "payable": false, + "return_type": { + "display_name": [ + "u256" + ], + "type": 1 + }, + "selector": "0x6c7f1542" + }, + { + "args": [], + "docs": [ + "" + ], + "mutates": false, + "name": "totalSupply", + "payable": false, + "return_type": { + "display_name": [ + "u256" + ], + "type": 1 + }, + "selector": "0x18160ddd" + }, + { + "args": [], + "docs": [ + "" + ], + "mutates": false, + "name": "decimals", + "payable": false, + "return_type": { + "display_name": [ + "u8" + ], + "type": 3 + }, + "selector": "0x313ce567" + }, + { + "args": [ + { + "name": "owner", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "spender", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + } + ], + "docs": [ + "" + ], + "mutates": false, + "name": "allowance", + "payable": false, + "return_type": { + "display_name": [ + "u256" + ], + "type": 1 + }, + "selector": "0xf2a9a8c7" + }, + { + "args": [], + "docs": [ + "" + ], + "mutates": false, + "name": "name", + "payable": false, + "return_type": { + "display_name": [ + "String" + ], + "type": 2 + }, + "selector": "0x06fdde03" + }, + { + "args": [ + { + "name": "spender", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "subtractedValue", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + } + ], + "docs": [ + "" + ], + "mutates": true, + "name": "decreaseAllowance", + "payable": false, + "return_type": { + "display_name": [ + "bool" + ], + "type": 6 + }, + "selector": "0x4b76697b" + }, + { + "args": [ + { + "name": "sender", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "recipient", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "amount", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + } + ], + "docs": [ + "" + ], + "mutates": true, + "name": "transferFrom", + "payable": false, + "return_type": { + "display_name": [ + "bool" + ], + "type": 6 + }, + "selector": "0x2fb840f5" + }, + { + "args": [], + "docs": [ + "" + ], + "mutates": false, + "name": "symbol", + "payable": false, + "return_type": { + "display_name": [ + "String" + ], + "type": 2 + }, + "selector": "0x95d89b41" + }, + { + "args": [ + { + "name": "spender", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "addedValue", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + } + ], + "docs": [ + "" + ], + "mutates": true, + "name": "increaseAllowance", + "payable": false, + "return_type": { + "display_name": [ + "bool" + ], + "type": 6 + }, + "selector": "0xb936c899" + }, + { + "args": [ + { + "name": "recipient", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "amount", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + } + ], + "docs": [ + "" + ], + "mutates": true, + "name": "transfer", + "payable": false, + "return_type": { + "display_name": [ + "bool" + ], + "type": 6 + }, + "selector": "0x6a467394" + }, + { + "args": [ + { + "name": "spender", + "type": { + "display_name": [ + "AccountId" + ], + "type": 5 + } + }, + { + "name": "amount", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + } + ], + "docs": [ + "" + ], + "mutates": true, + "name": "approve", + "payable": false, + "return_type": { + "display_name": [ + "bool" + ], + "type": 6 + }, + "selector": "0x47144421" + }, + { + "args": [ + { + "name": "amount", + "type": { + "display_name": [ + "u256" + ], + "type": 1 + } + } + ], + "docs": [ + "" + ], + "mutates": true, + "name": "burn", + "payable": false, + "return_type": null, + "selector": "0x42966c68" + } + ] + }, + "storage": { + "struct": { + "fields": [ + { + "layout": { + "cell": { + "key": "0x0000000000000000000000000000000000000000000000000000000000000002", + "ty": 1 + } + }, + "name": "_totalSupply" + }, + { + "layout": { + "cell": { + "key": "0x0000000000000000000000000000000000000000000000000000000000000003", + "ty": 2 + } + }, + "name": "_name" + }, + { + "layout": { + "cell": { + "key": "0x0000000000000000000000000000000000000000000000000000000000000004", + "ty": 2 + } + }, + "name": "_symbol" + } + ] + } + }, + "types": [ + { + "def": { + "primitive": "u256" + } + }, + { + "def": { + "primitive": "str" + } + }, + { + "def": { + "primitive": "u8" + } + }, + { + "def": { + "array": { + "len": 32, + "type": 3 + } + } + }, + { + "def": { + "composite": { + "fields": [ + { + "type": 4 + } + ] + } + }, + "path": [ + "AccountId" + ] + }, + { + "def": { + "primitive": "bool" + } + } + ] +} diff --git a/frame/contracts/benchmarks/solang_erc20.wasm b/frame/contracts/benchmarks/solang_erc20.wasm new file mode 100644 index 0000000000000..0796085d33249 Binary files /dev/null and b/frame/contracts/benchmarks/solang_erc20.wasm differ diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index 753ef9c08122f..b441d88453ae2 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-primitives" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,15 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -# This crate should not rely on any of the frame primitives. -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +bitflags = "1.0" +codec = { package = "parity-scale-codec", version = "2", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1", features = ["derive"], optional = true } + +# Substrate Dependencies (This crate should not rely on frame) +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core", default-features = false } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", + "sp-core/std", "sp-runtime/std", "sp-std/std", + "serde", ] diff --git a/frame/contracts/common/src/lib.rs b/frame/contracts/common/src/lib.rs index 6a74a417fa0fe..c57f728c26b68 100644 --- a/frame/contracts/common/src/lib.rs +++ b/frame/contracts/common/src/lib.rs @@ -1,47 +1,151 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! A crate that hosts a common definitions that are relevant for the pallet-contracts. #![cfg_attr(not(feature = "std"), no_std)] +use bitflags::bitflags; +use codec::{Decode, Encode}; +use sp_core::Bytes; +use sp_runtime::{DispatchError, RuntimeDebug}; use sp_std::prelude::*; -/// A result type of a get storage call. +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; + +/// Result type of a `bare_call` or `bare_instantiate` call. +/// +/// It contains the execution result together with some auxiliary information. +#[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub struct ContractResult { + /// How much gas was consumed during execution. + pub gas_consumed: u64, + /// How much gas is required as gas limit in order to execute this call. + /// + /// This value should be used to determine the gas limit for on-chain execution. + /// + /// # Note + /// + /// This can only different from [`Self::gas_consumed`] when weight pre charging + /// is used. Currently, only `seal_call_runtime` makes use of pre charging. + pub gas_required: u64, + /// An optional debug message. This message is only filled when explicitly requested + /// by the code that calls into the contract. Otherwise it is empty. + /// + /// The contained bytes are valid UTF-8. This is not declared as `String` because + /// this type is not allowed within the runtime. + /// + /// Clients should not make any assumptions about the format of the buffer. + /// They should just display it as-is. It is **not** only a collection of log lines + /// provided by a contract but a formatted buffer with different sections. + /// + /// # Note + /// + /// The debug message is never generated during on-chain execution. It is reserved for + /// RPC calls. + #[cfg_attr(feature = "std", serde(with = "as_string"))] + pub debug_message: Vec, + /// The execution result of the wasm code. + pub result: T, +} + +/// Result type of a `bare_call` call. +pub type ContractExecResult = ContractResult>; + +/// Result type of a `bare_instantiate` call. +pub type ContractInstantiateResult = + ContractResult, DispatchError>>; + +/// Result type of a `get_storage` call. pub type GetStorageResult = Result>, ContractAccessError>; /// The possible errors that can happen querying the storage of a contract. -#[derive(Eq, PartialEq, codec::Encode, codec::Decode, sp_runtime::RuntimeDebug)] +#[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] pub enum ContractAccessError { /// The given address doesn't point to a contract. DoesntExist, - /// The specified contract is a tombstone and thus cannot have any storage. - IsTombstone, } -/// A result type of a `rent_projection` call. -pub type RentProjectionResult = - Result, ContractAccessError>; +bitflags! { + /// Flags used by a contract to customize exit behaviour. + #[derive(Encode, Decode)] + #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] + #[cfg_attr(feature = "std", serde(rename_all = "camelCase", transparent))] + pub struct ReturnFlags: u32 { + /// If this bit is set all changes made by the contract execution are rolled back. + const REVERT = 0x0000_0001; + } +} -#[derive(Eq, PartialEq, codec::Encode, codec::Decode, sp_runtime::RuntimeDebug)] -pub enum RentProjection { - /// Eviction is projected to happen at the specified block number. - EvictionAt(BlockNumber), - /// No eviction is scheduled. - /// - /// E.g. because the contract accumulated enough funds to offset the rent storage costs. - NoEviction, +/// Output of a contract call or instantiation which ran to completion. +#[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub struct ExecReturnValue { + /// Flags passed along by `seal_return`. Empty when `seal_return` was never called. + pub flags: ReturnFlags, + /// Buffer passed along by `seal_return`. Empty when `seal_return` was never called. + pub data: Bytes, +} + +impl ExecReturnValue { + /// We understand the absense of a revert flag as success. + pub fn is_success(&self) -> bool { + !self.flags.contains(ReturnFlags::REVERT) + } +} + +/// The result of a successful contract instantiation. +#[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub struct InstantiateReturnValue { + /// The output of the called constructor. + pub result: ExecReturnValue, + /// The account id of the new contract. + pub account_id: AccountId, +} + +/// Reference to an existing code hash or a new wasm module. +#[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub enum Code { + /// A wasm module as raw bytes. + Upload(Bytes), + /// The code hash of an on-chain wasm blob. + Existing(Hash), +} + +#[cfg(feature = "std")] +mod as_string { + use super::*; + use serde::{ser::Error, Deserializer, Serializer}; + + pub fn serialize(bytes: &Vec, serializer: S) -> Result { + std::str::from_utf8(bytes) + .map_err(|e| S::Error::custom(format!("Debug buffer contains invalid UTF8: {}", e)))? + .serialize(serializer) + } + + pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result, D::Error> { + Ok(String::deserialize(deserializer)?.into_bytes()) + } } diff --git a/frame/contracts/fixtures/call_return_code.wat b/frame/contracts/fixtures/call_return_code.wat index f7a7ff20a49e3..4e9ab4dd77ce1 100644 --- a/frame/contracts/fixtures/call_return_code.wat +++ b/frame/contracts/fixtures/call_return_code.wat @@ -1,5 +1,5 @@ -;; This calls Django (4) and transfers 100 balance during this call and copies the return code -;; of this call to the output buffer. +;; This calls the supplied dest and transfers 100 balance during this call and copies +;; the return code of this call to the output buffer. ;; It also forwards its input to the callee. (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) @@ -7,38 +7,36 @@ (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) address of django - (data (i32.const 0) "\04\00\00\00\00\00\00\00") + ;; [0, 8) 100 balance + (data (i32.const 0) "\64\00\00\00\00\00\00\00") - ;; [8, 16) 100 balance - (data (i32.const 8) "\64\00\00\00\00\00\00\00") + ;; [8, 12) here we store the return code of the transfer - ;; [16, 20) here we store the return code of the transfer + ;; [12, 16) size of the input data + (data (i32.const 12) "\24") - ;; [20, 24) here we store the input data - - ;; [24, 28) size of the input data - (data (i32.const 24) "\04") + ;; [16, inf) here we store the input data + ;; 32 byte dest + 4 byte forward (func (export "deploy")) (func (export "call") - (call $seal_input (i32.const 20) (i32.const 24)) + (call $seal_input (i32.const 16) (i32.const 12)) (i32.store - (i32.const 16) + (i32.const 8) (call $seal_call - (i32.const 0) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 16) ;; Pointer to "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 20) ;; Pointer to input data buffer address - (i32.load (i32.const 24)) ;; Length of input data buffer + (i32.const 48) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Ptr to output buffer len ) ) ;; exit with success and take transfer return code to the output buffer - (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 8) (i32.const 4)) ) ) diff --git a/frame/contracts/fixtures/call_runtime.wat b/frame/contracts/fixtures/call_runtime.wat new file mode 100644 index 0000000000000..d5467f6e95e3e --- /dev/null +++ b/frame/contracts/fixtures/call_runtime.wat @@ -0,0 +1,33 @@ +;; This passes its input to `seal_call_runtime` and returns the return value to its caller. +(module + (import "__unstable__" "seal_call_runtime" (func $seal_call_runtime (param i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; size of input buffer + (data (i32.const 0) "\00\10") + + (func (export "call") + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + ;; Just use the call passed as input and store result to memory + (i32.store (i32.const 0) + (call $seal_call_runtime + (i32.const 4) ;; Pointer where the call is stored + (i32.load (i32.const 0)) ;; Size of the call + ) + ) + (call $seal_return + (i32.const 0) ;; flags + (i32.const 0) ;; returned value + (i32.const 4) ;; length of returned value + ) + ) + + (func (export "deploy")) +) diff --git a/frame/contracts/fixtures/call_with_limit.wat b/frame/contracts/fixtures/call_with_limit.wat new file mode 100644 index 0000000000000..abb8708267271 --- /dev/null +++ b/frame/contracts/fixtures/call_with_limit.wat @@ -0,0 +1,37 @@ +;; This expects [account_id, gas_limit] as input and calls the account_id with the supplied gas_limit. +;; It returns the result of the call as output data. +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; size of input buffer + (data (i32.const 0) "\00\10") + + (func (export "deploy")) + + (func (export "call") + ;; Receive the encoded call + gas_limit + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + (i32.store + (i32.const 0) + (call $seal_call + (i32.const 4) ;; Pointer to "callee" address. + (i32.const 32) ;; Length of "callee" address. + (i64.load (i32.const 36)) ;; How much gas to devote for the execution. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 0) ;; Length of the buffer with value to transfer. + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 0) ;; Length of input data buffer + (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Ptr to output buffer len + ) + ) + (call $seal_return (i32.const 0) (i32.const 0) (i32.const 4)) + ) +) diff --git a/frame/contracts/fixtures/caller_contract.wat b/frame/contracts/fixtures/caller_contract.wat index 408af92e18296..9c7cdf62abfc9 100644 --- a/frame/contracts/fixtures/caller_contract.wat +++ b/frame/contracts/fixtures/caller_contract.wat @@ -2,8 +2,9 @@ (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_balance" (func $seal_balance (param i32 i32))) (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) - (import "seal0" "seal_println" (func $seal_println (param i32 i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "env" "memory" (memory 1 1)) (func $assert (param i32) @@ -71,6 +72,8 @@ (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le ) ) @@ -98,6 +101,9 @@ (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le + ) ) @@ -114,7 +120,7 @@ ;; Length of the output buffer (i32.store (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 8) + (i32.const 256) ) ;; Deploy the contract successfully. @@ -131,6 +137,8 @@ (i32.sub (get_local $sp) (i32.const 4)) ;; Pointer to the address buffer length (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_le ) ) @@ -142,7 +150,7 @@ ;; Check that address has the expected length (call $assert - (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 8)) + (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 32)) ) ;; Check that balance has been deducted. @@ -169,7 +177,7 @@ (set_local $exit_code (call $seal_call (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. @@ -205,7 +213,7 @@ (set_local $exit_code (call $seal_call (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 1) ;; Supply too little gas (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. @@ -242,7 +250,7 @@ (set_local $exit_code (call $seal_call (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. diff --git a/frame/contracts/fixtures/chain_extension.wat b/frame/contracts/fixtures/chain_extension.wat new file mode 100644 index 0000000000000..db7e83fd96b42 --- /dev/null +++ b/frame/contracts/fixtures/chain_extension.wat @@ -0,0 +1,46 @@ +;; Call chain extension by passing through input and output of this contract +(module + (import "seal0" "seal_call_chain_extension" + (func $seal_call_chain_extension (param i32 i32 i32 i32 i32) (result i32)) + ) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 16 16)) + + (func $assert (param i32) + (block $ok + (br_if $ok (get_local 0)) + (unreachable) + ) + ) + + ;; [0, 4) len of input output + (data (i32.const 0) "\02") + + ;; [4, 12) buffer for input + + ;; [12, 16) len of output buffer + (data (i32.const 12) "\02") + + ;; [16, inf) buffer for output + + (func (export "deploy")) + + (func (export "call") + (call $seal_input (i32.const 4) (i32.const 0)) + + ;; the chain extension passes through the input and returns it as output + (call $seal_call_chain_extension + (i32.load8_u (i32.const 4)) ;; func_id + (i32.const 4) ;; input_ptr + (i32.load (i32.const 0)) ;; input_len + (i32.const 16) ;; output_ptr + (i32.const 12) ;; output_len_ptr + ) + + ;; the chain extension passes through the func_id + (call $assert (i32.eq (i32.load8_u (i32.const 4)))) + + (call $seal_return (i32.const 0) (i32.const 16) (i32.load (i32.const 12))) + ) +) diff --git a/frame/contracts/fixtures/check_default_rent_allowance.wat b/frame/contracts/fixtures/check_default_rent_allowance.wat deleted file mode 100644 index 64cd67186bff2..0000000000000 --- a/frame/contracts/fixtures/check_default_rent_allowance.wat +++ /dev/null @@ -1,43 +0,0 @@ -(module - (import "seal0" "seal_rent_allowance" (func $seal_rent_allowance (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; [0, 8) reserved for $seal_rent_allowance output - - ;; [8, 16) length of the buffer - (data (i32.const 8) "\08") - - ;; [16, inf) zero initialized - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "call")) - - (func (export "deploy") - ;; fill the buffer with the rent allowance. - (call $seal_rent_allowance (i32.const 0) (i32.const 8)) - - ;; assert len == 8 - (call $assert - (i32.eq - (i32.load (i32.const 8)) - (i32.const 8) - ) - ) - - ;; assert that contents of the buffer is equal to >::max_value(). - (call $assert - (i64.eq - (i64.load (i32.const 0)) - (i64.const 0xFFFFFFFFFFFFFFFF) - ) - ) - ) -) diff --git a/frame/contracts/fixtures/debug_message_invalid_utf8.wat b/frame/contracts/fixtures/debug_message_invalid_utf8.wat new file mode 100644 index 0000000000000..c60371076440e --- /dev/null +++ b/frame/contracts/fixtures/debug_message_invalid_utf8.wat @@ -0,0 +1,18 @@ +;; Emit a "Hello World!" debug message +(module + (import "seal0" "seal_debug_message" (func $seal_debug_message (param i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (data (i32.const 0) "\fc") + + (func (export "call") + (call $seal_debug_message + (i32.const 0) ;; Pointer to the text buffer + (i32.const 12) ;; The size of the buffer + ) + ;; the above call traps because we supplied invalid utf8 + unreachable + ) + + (func (export "deploy")) +) diff --git a/frame/contracts/fixtures/debug_message_logging_disabled.wat b/frame/contracts/fixtures/debug_message_logging_disabled.wat new file mode 100644 index 0000000000000..cfe238943ad06 --- /dev/null +++ b/frame/contracts/fixtures/debug_message_logging_disabled.wat @@ -0,0 +1,28 @@ +;; Emit a "Hello World!" debug message but assume that logging is disabled. +(module + (import "seal0" "seal_debug_message" (func $seal_debug_message (param i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (data (i32.const 0) "Hello World!") + + (func $assert_eq (param i32 i32) + (block $ok + (br_if $ok + (i32.eq (get_local 0) (get_local 1)) + ) + (unreachable) + ) + ) + + (func (export "call") + (call $assert_eq + (call $seal_debug_message + (i32.const 0) ;; Pointer to the text buffer + (i32.const 12) ;; The size of the buffer + ) + (i32.const 9) ;; LoggingDisabled return code + ) + ) + + (func (export "deploy")) +) diff --git a/frame/contracts/fixtures/debug_message_works.wat b/frame/contracts/fixtures/debug_message_works.wat new file mode 100644 index 0000000000000..61933c2329611 --- /dev/null +++ b/frame/contracts/fixtures/debug_message_works.wat @@ -0,0 +1,28 @@ +;; Emit a "Hello World!" debug message +(module + (import "seal0" "seal_debug_message" (func $seal_debug_message (param i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (data (i32.const 0) "Hello World!") + + (func $assert_eq (param i32 i32) + (block $ok + (br_if $ok + (i32.eq (get_local 0) (get_local 1)) + ) + (unreachable) + ) + ) + + (func (export "call") + (call $assert_eq + (call $seal_debug_message + (i32.const 0) ;; Pointer to the text buffer + (i32.const 12) ;; The size of the buffer + ) + (i32.const 0) ;; success return code + ) + ) + + (func (export "deploy")) +) diff --git a/frame/contracts/fixtures/destroy_and_transfer.wat b/frame/contracts/fixtures/destroy_and_transfer.wat index 3220f4e612d7d..aa13cd8b81072 100644 --- a/frame/contracts/fixtures/destroy_and_transfer.wat +++ b/frame/contracts/fixtures/destroy_and_transfer.wat @@ -4,7 +4,9 @@ (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) (import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32))) (import "seal0" "seal_transfer" (func $seal_transfer (param i32 i32 i32 i32) (result i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "env" "memory" (memory 1 1)) ;; [0, 8) Endowment to send when creating contract. @@ -16,14 +18,18 @@ ;; [48, 80) Buffer where to store the input to the contract - ;; [80, 88) Buffer where to store the address of the instantiated contract - ;; [88, 96) Size of the buffer - (data (i32.const 88) "\08") + (data (i32.const 88) "\FF") ;; [96, 100) Size of the input buffer (data (i32.const 96) "\20") + ;; [100, 132) Buffer where to store the address of the instantiated contract + + ;; [132, 134) Salt + (data (i32.const 132) "\47\11") + + (func $assert (param i32) (block $ok (br_if $ok @@ -54,10 +60,12 @@ (i32.const 8) ;; Length of the buffer with value to transfer. (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer - (i32.const 80) ;; Buffer where to store address of new contract + (i32.const 100) ;; Buffer where to store address of new contract (i32.const 88) ;; Pointer to the length of the buffer (i32.const 4294967295) ;; u32 max sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this cas + (i32.const 0) ;; Length is ignored in this case + (i32.const 132) ;; salt_ptr + (i32.const 2) ;; salt_len ) (i32.const 0) ) @@ -67,15 +75,15 @@ (call $assert (i32.eq (i32.load (i32.const 88)) - (i32.const 8) + (i32.const 32) ) ) ;; Store the return address. (call $seal_set_storage (i32.const 16) ;; Pointer to the key - (i32.const 80) ;; Pointer to the value - (i32.const 8) ;; Length of the value + (i32.const 100) ;; Pointer to the value + (i32.const 32) ;; Length of the value ) ) @@ -85,7 +93,7 @@ (i32.eq (call $seal_get_storage (i32.const 16) ;; Pointer to the key - (i32.const 80) ;; Pointer to the value + (i32.const 100) ;; Pointer to the value (i32.const 88) ;; Pointer to the len of the value ) (i32.const 0) @@ -94,7 +102,7 @@ (call $assert (i32.eq (i32.load (i32.const 88)) - (i32.const 8) + (i32.const 32) ) ) @@ -102,8 +110,8 @@ (call $assert (i32.eq (call $seal_call - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 100) ;; Pointer to destination address + (i32.const 32) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer @@ -121,8 +129,8 @@ (call $assert (i32.eq (call $seal_call - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 100) ;; Pointer to destination address + (i32.const 32) ;; Length of destination address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. (i32.const 8) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer @@ -137,12 +145,12 @@ ;; Calling the destination address with non-empty input data should now work since the ;; contract has been removed. Also transfer a balance to the address so we can ensure this - ;; does not keep the contract alive. + ;; does not hinder the contract from being removed. (call $assert (i32.eq (call $seal_transfer - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 100) ;; Pointer to destination address + (i32.const 32) ;; Length of destination address (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer ) diff --git a/frame/contracts/fixtures/drain.wat b/frame/contracts/fixtures/drain.wat index 9180047f5d015..546026ac95986 100644 --- a/frame/contracts/fixtures/drain.wat +++ b/frame/contracts/fixtures/drain.wat @@ -38,7 +38,7 @@ (i32.eq (call $seal_transfer (i32.const 16) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 32) ;; Length of destination address (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer ) diff --git a/frame/contracts/fixtures/dummy.wat b/frame/contracts/fixtures/dummy.wat new file mode 100644 index 0000000000000..0aeefbcb7ebfe --- /dev/null +++ b/frame/contracts/fixtures/dummy.wat @@ -0,0 +1,5 @@ +;; A valid contract which does nothing at all +(module + (func (export "deploy")) + (func (export "call")) +) diff --git a/frame/contracts/fixtures/ecdsa_recover.wat b/frame/contracts/fixtures/ecdsa_recover.wat new file mode 100644 index 0000000000000..c196e88094d2c --- /dev/null +++ b/frame/contracts/fixtures/ecdsa_recover.wat @@ -0,0 +1,55 @@ +;; This contract: +;; 1) Reads signature and message hash from the input +;; 2) Calls ecdsa_recover +;; 3) Validates that result is Success +;; 4) Returns recovered compressed public key +(module + (import "__unstable__" "seal_ecdsa_recover" (func $seal_ecdsa_recover (param i32 i32 i32) (result i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "deploy")) + + ;; [4, 8) len of signature + message hash - 65 bytes + 32 byte = 97 bytes + (data (i32.const 4) "\61") + + ;; Memory layout during `call` + ;; [10, 75) signature + ;; [75, 107) message hash + (func (export "call") + (local $signature_ptr i32) + (local $message_hash_ptr i32) + (local $result i32) + (local.set $signature_ptr (i32.const 10)) + (local.set $message_hash_ptr (i32.const 75)) + ;; Read signature and message hash - 97 bytes + (call $seal_input (local.get $signature_ptr) (i32.const 4)) + (local.set + $result + (call $seal_ecdsa_recover + (local.get $signature_ptr) + (local.get $message_hash_ptr) + (local.get $signature_ptr) ;; Store output into message signature ptr, because we don't need it anymore + ) + ) + (call $assert + (i32.eq + (local.get $result) ;; The result of recovery execution + (i32.const 0x0) ;; 0x0 - Success result + ) + ) + + ;; exit with success and return recovered public key + (call $seal_return (i32.const 0) (local.get $signature_ptr) (i32.const 33)) + ) +) diff --git a/frame/contracts/fixtures/instantiate_return_code.wat b/frame/contracts/fixtures/instantiate_return_code.wat index 20ab96d88ad2e..6a8654520f106 100644 --- a/frame/contracts/fixtures/instantiate_return_code.wat +++ b/frame/contracts/fixtures/instantiate_return_code.wat @@ -1,47 +1,47 @@ -;; This instantiats Charlie (3) and transfers 100 balance during this call and copies the return code +;; This instantiats a contract and transfers 100 balance during this call and copies the return code ;; of this call to the output buffer. ;; The first 32 byte of input is the code hash to instantiate ;; The rest of the input is forwarded to the constructor of the callee (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal1" "seal_instantiate" (func $seal_instantiate + (param i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) address of django - (data (i32.const 0) "\04\00\00\00\00\00\00\00") + ;; [0, 8) 10_000 balance + (data (i32.const 0) "\10\27\00\00\00\00\00\00") - ;; [8, 16) 100 balance - (data (i32.const 8) "\64\00\00\00\00\00\00\00") + ;; [8, 12) here we store the return code of the transfer - ;; [16, 20) here we store the return code of the transfer + ;; [12, 16) size of the input buffer + (data (i32.const 12) "\24") - ;; [20, 24) size of the input buffer - (data (i32.const 20) "\FF") - - ;; [24, inf) input buffer + ;; [16, inf) input buffer + ;; 32 bye code hash + 4 byte forward (func (export "deploy")) (func (export "call") - (call $seal_input (i32.const 24) (i32.const 20)) + (call $seal_input (i32.const 16) (i32.const 12)) (i32.store - (i32.const 16) + (i32.const 8) (call $seal_instantiate - (i32.const 24) ;; Pointer to the code hash. - (i32.const 32) ;; Length of the code hash. + (i32.const 16) ;; Pointer to the code hash. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 56) ;; Pointer to input data buffer address - (i32.sub (i32.load (i32.const 20)) (i32.const 32)) ;; Length of input data buffer + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 48) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy address (i32.const 0) ;; Length is ignored in this case (i32.const 0xffffffff) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 0) ;; salt_len ) ) ;; exit with success and take transfer return code to the output buffer - (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 8) (i32.const 4)) ) ) diff --git a/frame/contracts/fixtures/ok_trap_revert.wat b/frame/contracts/fixtures/ok_trap_revert.wat index b71a6435db9c1..b7eaa9b700af5 100644 --- a/frame/contracts/fixtures/ok_trap_revert.wat +++ b/frame/contracts/fixtures/ok_trap_revert.wat @@ -32,4 +32,4 @@ ;; 2 = trap (unreachable) ) -) \ No newline at end of file +) diff --git a/frame/contracts/fixtures/restoration.wat b/frame/contracts/fixtures/restoration.wat deleted file mode 100644 index 3c15f7ae0881e..0000000000000 --- a/frame/contracts/fixtures/restoration.wat +++ /dev/null @@ -1,78 +0,0 @@ -(module - (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_restore_to" - (func $seal_restore_to - (param i32 i32 i32 i32 i32 i32 i32 i32) - ) - ) - (import "env" "memory" (memory 1 1)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "call") - ;; copy code hash to contract memory - (call $seal_input (i32.const 264) (i32.const 304)) - (call $assert - (i32.eq - (i32.load (i32.const 304)) - (i32.const 32) - ) - ) - - (call $seal_restore_to - ;; Pointer and length of the encoded dest buffer. - (i32.const 256) - (i32.const 8) - ;; Pointer and length of the encoded code hash buffer - (i32.const 264) - (i32.const 32) - ;; Pointer and length of the encoded rent_allowance buffer - (i32.const 296) - (i32.const 8) - ;; Pointer and number of items in the delta buffer. - ;; This buffer specifies multiple keys for removal before restoration. - (i32.const 100) - (i32.const 1) - ) - ) - (func (export "deploy") - ;; Data to restore - (call $seal_set_storage - (i32.const 0) - (i32.const 0) - (i32.const 4) - ) - - ;; ACL - (call $seal_set_storage - (i32.const 100) - (i32.const 0) - (i32.const 4) - ) - ) - - ;; Data to restore - (data (i32.const 0) "\28") - - ;; Buffer that has ACL storage keys. - (data (i32.const 100) "\01") - - ;; Address of bob - (data (i32.const 256) "\02\00\00\00\00\00\00\00") - - ;; [264, 296) Code hash of SET_RENT (copied here by seal_input) - - ;; [296, 304) Rent allowance - (data (i32.const 296) "\32\00\00\00\00\00\00\00") - - ;; [304, 308) Size of SET_RENT buffer - (data (i32.const 304) "\20") -) diff --git a/frame/contracts/fixtures/self_destruct.wat b/frame/contracts/fixtures/self_destruct.wat index 6898e746b0836..b8a37306e2011 100644 --- a/frame/contracts/fixtures/self_destruct.wat +++ b/frame/contracts/fixtures/self_destruct.wat @@ -5,20 +5,23 @@ (import "seal0" "seal_terminate" (func $seal_terminate (param i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) reserved for $seal_address output + ;; [0, 32) reserved for $seal_address output - ;; [8, 16) length of the buffer - (data (i32.const 8) "\08") + ;; [32, 36) length of the buffer + (data (i32.const 32) "\20") - ;; [16, 24) Address of django - (data (i32.const 16) "\04\00\00\00\00\00\00\00") + ;; [36, 68) Address of django + (data (i32.const 36) + "\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04" + "\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04\04" + ) - ;; [24, 32) reserved for output of $seal_input + ;; [68, 72) reserved for output of $seal_input - ;; [32, 36) length of the buffer - (data (i32.const 32) "\04") + ;; [72, 76) length of the buffer + (data (i32.const 72) "\04") - ;; [36, inf) zero initialized + ;; [76, inf) zero initialized (func $assert (param i32) (block $ok @@ -36,16 +39,16 @@ ;; This should trap instead of self-destructing since a contract cannot be removed live in ;; the execution stack cannot be removed. If the recursive call traps, then trap here as ;; well. - (call $seal_input (i32.const 24) (i32.const 32)) - (if (i32.load (i32.const 32)) + (call $seal_input (i32.const 68) (i32.const 72)) + (if (i32.load (i32.const 72)) (then - (call $seal_address (i32.const 0) (i32.const 8)) + (call $seal_address (i32.const 0) (i32.const 32)) ;; Expect address to be 8 bytes. (call $assert (i32.eq - (i32.load (i32.const 8)) - (i32.const 8) + (i32.load (i32.const 32)) + (i32.const 32) ) ) @@ -54,9 +57,9 @@ (i32.eq (call $seal_call (i32.const 0) ;; Pointer to own address - (i32.const 8) ;; Length of own address + (i32.const 32) ;; Length of own address (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 36) ;; Pointer to the buffer with value to transfer + (i32.const 76) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer (i32.const 0) ;; Pointer to input data buffer address (i32.const 0) ;; Length of input data buffer @@ -70,8 +73,8 @@ (else ;; Try to terminate and give balance to django. (call $seal_terminate - (i32.const 16) ;; Pointer to beneficiary address - (i32.const 8) ;; Length of beneficiary address + (i32.const 36) ;; Pointer to beneficiary address + (i32.const 32) ;; Length of beneficiary address ) (unreachable) ;; seal_terminate never returns ) diff --git a/frame/contracts/fixtures/self_destructing_constructor.wat b/frame/contracts/fixtures/self_destructing_constructor.wat index ab8c289f1b564..85fce511e21b9 100644 --- a/frame/contracts/fixtures/self_destructing_constructor.wat +++ b/frame/contracts/fixtures/self_destructing_constructor.wat @@ -15,7 +15,7 @@ ;; Self-destruct by sending full balance to the 0 address. (call $seal_terminate (i32.const 0) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address + (i32.const 32) ;; Length of destination address ) ) diff --git a/frame/contracts/fixtures/set_rent.wat b/frame/contracts/fixtures/set_rent.wat deleted file mode 100644 index a09d3dc4bd47a..0000000000000 --- a/frame/contracts/fixtures/set_rent.wat +++ /dev/null @@ -1,105 +0,0 @@ -(module - (import "seal0" "seal_transfer" (func $seal_transfer (param i32 i32 i32 i32) (result i32))) - (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) - (import "seal0" "seal_clear_storage" (func $seal_clear_storage (param i32))) - (import "seal0" "seal_set_rent_allowance" (func $seal_set_rent_allowance (param i32 i32))) - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; insert a value of 4 bytes into storage - (func $call_0 - (call $seal_set_storage - (i32.const 1) - (i32.const 0) - (i32.const 4) - ) - ) - - ;; remove the value inserted by call_1 - (func $call_1 - (call $seal_clear_storage - (i32.const 1) - ) - ) - - ;; transfer 50 to CHARLIE - (func $call_2 - (call $assert - (i32.eq - (call $seal_transfer (i32.const 68) (i32.const 8) (i32.const 76) (i32.const 8)) - (i32.const 0) - ) - ) - ) - - ;; do nothing - (func $call_else) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - ;; Dispatch the call according to input size - (func (export "call") - (local $input_size i32) - (i32.store (i32.const 64) (i32.const 64)) - (call $seal_input (i32.const 1024) (i32.const 64)) - (set_local $input_size - (i32.load (i32.const 64)) - ) - (block $IF_ELSE - (block $IF_2 - (block $IF_1 - (block $IF_0 - (br_table $IF_0 $IF_1 $IF_2 $IF_ELSE - (get_local $input_size) - ) - (unreachable) - ) - (call $call_0) - return - ) - (call $call_1) - return - ) - (call $call_2) - return - ) - (call $call_else) - ) - - ;; Set into storage a 4 bytes value - ;; Set call set_rent_allowance with input - (func (export "deploy") - (call $seal_set_storage - (i32.const 0) - (i32.const 0) - (i32.const 4) - ) - (call $seal_input - (i32.const 0) - (i32.const 64) - ) - (call $seal_set_rent_allowance - (i32.const 0) - (i32.load (i32.const 64)) - ) - ) - - ;; Encoding of 10 in balance - (data (i32.const 0) "\28") - - ;; Size of the buffer at address 0 - (data (i32.const 64) "\40") - - ;; encoding of Charlies's account id - (data (i32.const 68) "\03") - - ;; encoding of 50 balance - (data (i32.const 76) "\32") -) diff --git a/frame/contracts/fixtures/transfer_return_code.wat b/frame/contracts/fixtures/transfer_return_code.wat index 7a1bec9adf38c..50098851dcf81 100644 --- a/frame/contracts/fixtures/transfer_return_code.wat +++ b/frame/contracts/fixtures/transfer_return_code.wat @@ -5,27 +5,30 @@ (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) - ;; [0, 8) zero-adress - (data (i32.const 0) "\00\00\00\00\00\00\00\00") + ;; [0, 32) zero-adress + (data (i32.const 0) + "\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" + "\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" + ) - ;; [8, 16) 100 balance - (data (i32.const 8) "\64\00\00\00\00\00\00\00") + ;; [32, 40) 100 balance + (data (i32.const 32) "\64\00\00\00\00\00\00\00") - ;; [16, 20) here we store the return code of the transfer + ;; [40, 44) here we store the return code of the transfer (func (export "deploy")) (func (export "call") (i32.store - (i32.const 16) + (i32.const 40) (call $seal_transfer (i32.const 0) ;; ptr to destination address - (i32.const 8) ;; length of destination address - (i32.const 8) ;; ptr to value to transfer + (i32.const 32) ;; length of destination address + (i32.const 32) ;; ptr to value to transfer (i32.const 8) ;; length of value to transfer ) ) ;; exit with success and take transfer return code to the output buffer - (call $seal_return (i32.const 0) (i32.const 16) (i32.const 4)) + (call $seal_return (i32.const 0) (i32.const 40) (i32.const 4)) ) ) diff --git a/primitives/npos-elections/compact/Cargo.toml b/frame/contracts/proc-macro/Cargo.toml similarity index 51% rename from primitives/npos-elections/compact/Cargo.toml rename to frame/contracts/proc-macro/Cargo.toml index 1873f8fa16057..605c69fe73e25 100644 --- a/primitives/npos-elections/compact/Cargo.toml +++ b/frame/contracts/proc-macro/Cargo.toml @@ -1,12 +1,12 @@ [package] -name = "sp-npos-elections-compact" -version = "2.0.0" +name = "pallet-contracts-proc-macro" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" -description = "NPoS Compact Solution Type" +description = "Procedural macros used in pallet_contracts" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -15,7 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.7", features = ["full", "visit"] } -quote = "1.0" -proc-macro2 = "1.0.6" -proc-macro-crate = "0.1.4" +proc-macro2 = "1" +quote = "1" +syn = "1" + +[dev-dependencies] + +[features] +# If set the full output is generated. Do NOT set when generating for wasm runtime. +full = [] diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs new file mode 100644 index 0000000000000..302a0d01a93d9 --- /dev/null +++ b/frame/contracts/proc-macro/src/lib.rs @@ -0,0 +1,140 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Proc macros used in the contracts module. + +#![no_std] + +extern crate alloc; + +use alloc::string::ToString; +use proc_macro2::TokenStream; +use quote::{quote, quote_spanned}; +use syn::{parse_macro_input, spanned::Spanned, Data, DataStruct, DeriveInput, Fields, Ident}; + +/// This derives `Debug` for a struct where each field must be of some numeric type. +/// It interprets each field as its represents some weight and formats it as times so that +/// it is readable by humans. +#[proc_macro_derive(WeightDebug)] +pub fn derive_weight_debug(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + derive_debug(input, format_weight) +} + +/// This is basically identical to the std libs Debug derive but without adding any +/// bounds to existing generics. +#[proc_macro_derive(ScheduleDebug)] +pub fn derive_schedule_debug(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + derive_debug(input, format_default) +} + +fn derive_debug( + input: proc_macro::TokenStream, + fmt: impl Fn(&Ident) -> TokenStream, +) -> proc_macro::TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let name = &input.ident; + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let data = if let Data::Struct(data) = &input.data { + data + } else { + return quote_spanned! { + name.span() => + compile_error!("WeightDebug is only supported for structs."); + } + .into() + }; + + #[cfg(feature = "full")] + let fields = iterate_fields(data, fmt); + + #[cfg(not(feature = "full"))] + let fields = { + drop(fmt); + drop(data); + TokenStream::new() + }; + + let tokens = quote! { + impl #impl_generics core::fmt::Debug for #name #ty_generics #where_clause { + fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + use ::sp_runtime::{FixedPointNumber, FixedU128 as Fixed}; + let mut formatter = formatter.debug_struct(stringify!(#name)); + #fields + formatter.finish() + } + } + }; + + tokens.into() +} + +/// This is only used then the `full` feature is activated. +#[cfg(feature = "full")] +fn iterate_fields(data: &DataStruct, fmt: impl Fn(&Ident) -> TokenStream) -> TokenStream { + match &data.fields { + Fields::Named(fields) => { + let recurse = fields.named.iter().filter_map(|f| { + let name = f.ident.as_ref()?; + if name.to_string().starts_with('_') { + return None + } + let value = fmt(name); + let ret = quote_spanned! { f.span() => + formatter.field(stringify!(#name), #value); + }; + Some(ret) + }); + quote! { + #( #recurse )* + } + }, + Fields::Unnamed(fields) => quote_spanned! { + fields.span() => + compile_error!("Unnamed fields are not supported") + }, + Fields::Unit => quote!(), + } +} + +fn format_weight(field: &Ident) -> TokenStream { + quote_spanned! { field.span() => + &if self.#field > 1_000_000_000 { + format!( + "{:.1?} ms", + Fixed::saturating_from_rational(self.#field, 1_000_000_000).to_float() + ) + } else if self.#field > 1_000_000 { + format!( + "{:.1?} µs", + Fixed::saturating_from_rational(self.#field, 1_000_000).to_float() + ) + } else if self.#field > 1_000 { + format!( + "{:.1?} ns", + Fixed::saturating_from_rational(self.#field, 1_000).to_float() + ) + } else { + format!("{} ps", self.#field) + } + } +} + +fn format_default(field: &Ident) -> TokenStream { + quote_spanned! { field.span() => + &self.#field + } +} diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index 587abcbcddaec..b73039ba7191e 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc" -version = "0.8.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,18 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4" } -jsonrpc-core = "15.0.0" -jsonrpc-core-client = "15.0.0" -jsonrpc-derive = "15.0.0" -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0", path = "../../../primitives/rpc" } -serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -pallet-contracts-primitives = { version = "2.0.0", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0", path = "./runtime-api" } +codec = { package = "parity-scale-codec", version = "2" } +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" +serde = { version = "1", features = ["derive"] } + +# Substrate Dependencies +pallet-contracts-primitives = { version = "4.0.0-dev", path = "../common" } +pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-rpc = { version = "4.0.0-dev", path = "../../../primitives/rpc" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.41" +serde_json = "1" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index 04becf2b45f49..e5f6d1ec7eb8e 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,17 +13,21 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../../primitives/runtime" } -pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../common" } +codec = { package = "parity-scale-codec", version = "2", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } + +# Substrate Dependencies +pallet-contracts-primitives = { version = "4.0.0-dev", default-features = false, path = "../../common" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/std" } [features] default = ["std"] std = [ "sp-api/std", "codec/std", + "scale-info/std", "sp-std/std", "sp-runtime/std", "pallet-contracts-primitives/std", diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/rpc/runtime-api/src/lib.rs index 7d208cf7763e7..20dfbe210e5ce 100644 --- a/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/frame/contracts/rpc/runtime-api/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,42 +23,23 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Codec, Decode, Encode}; -use pallet_contracts_primitives::{GetStorageResult, RentProjectionResult}; -use sp_runtime::RuntimeDebug; +use codec::Codec; +use pallet_contracts_primitives::{ + Code, ContractExecResult, ContractInstantiateResult, GetStorageResult, +}; use sp_std::vec::Vec; -/// A result of execution of a contract. -#[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] -pub enum ContractExecResult { - /// The contract returned successfully. - /// - /// There is a status code and, optionally, some data returned by the contract. - Success { - /// Flags that the contract passed along on returning to alter its exit behaviour. - /// Described in `pallet_contracts::exec::ReturnFlags`. - flags: u32, - /// Output data returned by the contract. - /// - /// Can be empty. - data: Vec, - /// How much gas was consumed by the call. - gas_consumed: u64, - }, - /// The contract execution either trapped or returned an error. - Error, -} - sp_api::decl_runtime_apis! { /// The API to interact with contracts without using executive. - pub trait ContractsApi where + pub trait ContractsApi where AccountId: Codec, Balance: Codec, BlockNumber: Codec, + Hash: Codec, { /// Perform a call from a specified account to a given contract. /// - /// See the contracts' `call` dispatchable function for more details. + /// See `pallet_contracts::Pallet::call`. fn call( origin: AccountId, dest: AccountId, @@ -67,23 +48,26 @@ sp_api::decl_runtime_apis! { input_data: Vec, ) -> ContractExecResult; + /// Instantiate a new contract. + /// + /// See `pallet_contracts::Pallet::instantiate`. + fn instantiate( + origin: AccountId, + endowment: Balance, + gas_limit: u64, + code: Code, + data: Vec, + salt: Vec, + ) -> ContractInstantiateResult; + /// Query a given storage key in a given contract. /// /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the /// specified account and `Ok(None)` if it doesn't. If the account specified by the address - /// doesn't exist, or doesn't have a contract or if the contract is a tombstone, then `Err` - /// is returned. + /// doesn't exist, or doesn't have a contract then `Err` is returned. fn get_storage( address: AccountId, key: [u8; 32], ) -> GetStorageResult; - - /// Returns the projected time a given contract will be able to sustain paying its rent. - /// - /// The returned projection is relevant for the current block, i.e. it is as if the contract - /// was accessed at the current block. - /// - /// Returns `Err` if the contract is in a tombstone state or doesn't exist. - fn rent_projection(address: AccountId) -> RentProjectionResult; } } diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index d99ed1e78a652..e0796af056540 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,26 +22,24 @@ use std::sync::Arc; use codec::Codec; use jsonrpc_core::{Error, ErrorCode, Result}; use jsonrpc_derive::rpc; -use pallet_contracts_primitives::RentProjection; +use pallet_contracts_primitives::{Code, ContractExecResult, ContractInstantiateResult}; use serde::{Deserialize, Serialize}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::{Bytes, H256}; -use sp_rpc::number; +use sp_rpc::number::NumberOrHex; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, }; -use std::convert::TryInto; +use std::convert::{TryFrom, TryInto}; -pub use self::gen_client::Client as ContractsClient; -pub use pallet_contracts_rpc_runtime_api::{ - self as runtime_api, ContractExecResult, ContractsApi as ContractsRuntimeApi, -}; +pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; const RUNTIME_ERROR: i64 = 1; const CONTRACT_DOESNT_EXIST: i64 = 2; -const CONTRACT_IS_A_TOMBSTONE: i64 = 3; + +pub type Weight = u64; /// A rough estimate of how much gas a decent hardware consumes per second, /// using native execution. @@ -50,8 +48,12 @@ const CONTRACT_IS_A_TOMBSTONE: i64 = 3; /// /// As 1 gas is equal to 1 weight we base this on the conducted benchmarks which /// determined runtime weights: -/// https://github.com/paritytech/substrate/pull/5446 -const GAS_PER_SECOND: u64 = 1_000_000_000_000; +/// +const GAS_PER_SECOND: Weight = 1_000_000_000_000; + +/// The maximum amount of weight that the call and instantiate rpcs are allowed to consume. +/// This puts a ceiling on the weight limit that is supplied to the rpc as an argument. +const GAS_LIMIT: Weight = 5 * GAS_PER_SECOND; /// A private newtype for converting `ContractAccessError` into an RPC error. struct ContractAccessError(pallet_contracts_primitives::ContractAccessError); @@ -64,11 +66,6 @@ impl From for Error { message: "The specified contract doesn't exist.".into(), data: None, }, - IsTombstone => Error { - code: ErrorCode::ServerError(CONTRACT_IS_A_TOMBSTONE), - message: "The contract is a tombstone and doesn't have any storage.".into(), - data: None, - }, } } } @@ -77,52 +74,30 @@ impl From for Error { #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] -pub struct CallRequest { +pub struct CallRequest { origin: AccountId, dest: AccountId, - value: Balance, - gas_limit: number::NumberOrHex, + value: NumberOrHex, + gas_limit: NumberOrHex, input_data: Bytes, } -/// An RPC serializable result of contract execution +/// A struct that encodes RPC parameters required to instantiate a new smart-contract. #[derive(Serialize, Deserialize)] -#[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] -pub enum RpcContractExecResult { - /// Successful execution - Success { - /// The return flags - flags: u32, - /// Output data - data: Bytes, - /// How much gas was consumed by the call. - gas_consumed: u64, - }, - /// Error execution - Error(()), -} - -impl From for RpcContractExecResult { - fn from(r: ContractExecResult) -> Self { - match r { - ContractExecResult::Success { - flags, - data, - gas_consumed - } => RpcContractExecResult::Success { - flags, - data: data.into(), - gas_consumed, - }, - ContractExecResult::Error => RpcContractExecResult::Error(()), - } - } +#[serde(deny_unknown_fields)] +pub struct InstantiateRequest { + origin: AccountId, + endowment: NumberOrHex, + gas_limit: NumberOrHex, + code: Code, + data: Bytes, + salt: Bytes, } /// Contracts RPC methods. #[rpc] -pub trait ContractsApi { +pub trait ContractsApi { /// Executes a call to a contract. /// /// This call is performed locally without submitting any transactions. Thus executing this @@ -132,9 +107,22 @@ pub trait ContractsApi { #[rpc(name = "contracts_call")] fn call( &self, - call_request: CallRequest, + call_request: CallRequest, + at: Option, + ) -> Result; + + /// Instantiate a new contract. + /// + /// This call is performed locally without submitting any transactions. Thus the contract + /// is not actually created. + /// + /// This method is useful for UIs to dry-run contract instantiations. + #[rpc(name = "contracts_instantiate")] + fn instantiate( + &self, + instantiate_request: InstantiateRequest, at: Option, - ) -> Result; + ) -> Result>; /// Returns the value under a specified storage `key` in a contract given by `address` param, /// or `None` if it is not set. @@ -145,19 +133,6 @@ pub trait ContractsApi { key: H256, at: Option, ) -> Result>; - - /// Returns the projected time a given contract will be able to sustain paying its rent. - /// - /// The returned projection is relevant for the given block, i.e. it is as if the contract was - /// accessed at the beginning of that block. - /// - /// Returns `None` if the contract is exempted from rent. - #[rpc(name = "contracts_rentProjection")] - fn rent_projection( - &self, - address: AccountId, - at: Option, - ) -> Result>; } /// An implementation of contract specific RPC methods. @@ -169,18 +144,16 @@ pub struct Contracts { impl Contracts { /// Create new `Contracts` with the given reference to the client. pub fn new(client: Arc) -> Self { - Contracts { - client, - _marker: Default::default(), - } + Contracts { client, _marker: Default::default() } } } -impl +impl ContractsApi< ::Hash, <::Header as HeaderT>::Number, AccountId, Balance, + Hash, > for Contracts where Block: BlockT, @@ -190,93 +163,77 @@ where AccountId, Balance, <::Header as HeaderT>::Number, + Hash, >, AccountId: Codec, - Balance: Codec, + Balance: Codec + TryFrom, + Hash: Codec, { fn call( &self, - call_request: CallRequest, + call_request: CallRequest, at: Option<::Hash>, - ) -> Result { + ) -> Result { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash)); - let CallRequest { - origin, - dest, - value, - gas_limit, - input_data, - } = call_request; + let CallRequest { origin, dest, value, gas_limit, input_data } = call_request; - // Make sure that gas_limit fits into 64 bits. - let gas_limit: u64 = gas_limit.try_into().map_err(|_| Error { - code: ErrorCode::InvalidParams, - message: format!("{:?} doesn't fit in 64 bit unsigned value", gas_limit), - data: None, - })?; - - let max_gas_limit = 5 * GAS_PER_SECOND; - if gas_limit > max_gas_limit { - return Err(Error { - code: ErrorCode::InvalidParams, - message: format!( - "Requested gas limit is greater than maximum allowed: {} > {}", - gas_limit, max_gas_limit - ), - data: None, - }); - } + let value: Balance = decode_hex(value, "balance")?; + let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + limit_gas(gas_limit)?; let exec_result = api .call(&at, origin, dest, value, gas_limit, input_data.to_vec()) - .map_err(|e| runtime_error_into_rpc_err(e))?; + .map_err(runtime_error_into_rpc_err)?; - Ok(exec_result.into()) + Ok(exec_result) } - fn get_storage( + fn instantiate( &self, - address: AccountId, - key: H256, + instantiate_request: InstantiateRequest, at: Option<::Hash>, - ) -> Result> { + ) -> Result> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash)); - let result = api - .get_storage(&at, address, key.into()) - .map_err(|e| runtime_error_into_rpc_err(e))? - .map_err(ContractAccessError)? - .map(Bytes); + let InstantiateRequest { origin, endowment, gas_limit, code, data, salt } = + instantiate_request; - Ok(result) + let endowment: Balance = decode_hex(endowment, "balance")?; + let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + limit_gas(gas_limit)?; + + let exec_result = api + .instantiate(&at, origin, endowment, gas_limit, code, data.to_vec(), salt.to_vec()) + .map_err(runtime_error_into_rpc_err)?; + + Ok(exec_result) } - fn rent_projection( + fn get_storage( &self, address: AccountId, + key: H256, at: Option<::Hash>, - ) -> Result::Header as HeaderT>::Number>> { + ) -> Result> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash)); let result = api - .rent_projection(&at, address) - .map_err(|e| runtime_error_into_rpc_err(e))? - .map_err(ContractAccessError)?; + .get_storage(&at, address, key.into()) + .map_err(runtime_error_into_rpc_err)? + .map_err(ContractAccessError)? + .map(Bytes); - Ok(match result { - RentProjection::NoEviction => None, - RentProjection::EvictionAt(block_num) => Some(block_num), - }) + Ok(result) } } @@ -284,39 +241,155 @@ where fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> Error { Error { code: ErrorCode::ServerError(RUNTIME_ERROR), - message: "Runtime trapped".into(), + message: "Runtime error".into(), data: Some(format!("{:?}", err).into()), } } +fn decode_hex>(from: H, name: &str) -> Result { + from.try_into().map_err(|_| Error { + code: ErrorCode::InvalidParams, + message: format!("{:?} does not fit into the {} type", from, name), + data: None, + }) +} + +fn limit_gas(gas_limit: Weight) -> Result<()> { + if gas_limit > GAS_LIMIT { + Err(Error { + code: ErrorCode::InvalidParams, + message: format!( + "Requested gas limit is greater than maximum allowed: {} > {}", + gas_limit, GAS_LIMIT + ), + data: None, + }) + } else { + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; use sp_core::U256; + fn trim(json: &str) -> String { + json.chars().filter(|c| !c.is_whitespace()).collect() + } + #[test] fn call_request_should_serialize_deserialize_properly() { - type Req = CallRequest; - let req: Req = serde_json::from_str(r#" + type Req = CallRequest; + let req: Req = serde_json::from_str( + r#" { "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", "dest": "5DRakbLVnjVrW6niwLfHGW24EeCEvDAFGEXrtaYS5M4ynoom", - "value": 0, + "value": "0x112210f4B16c1cb1", "gasLimit": 1000000000000, "inputData": "0x8c97db39" } - "#).unwrap(); + "#, + ) + .unwrap(); assert_eq!(req.gas_limit.into_u256(), U256::from(0xe8d4a51000u64)); + assert_eq!(req.value.into_u256(), U256::from(1234567890987654321u128)); + } + + #[test] + fn instantiate_request_should_serialize_deserialize_properly() { + type Req = InstantiateRequest; + let req: Req = serde_json::from_str( + r#" + { + "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + "endowment": "0x88", + "gasLimit": 42, + "code": { "existing": "0x1122" }, + "data": "0x4299", + "salt": "0x9988" + } + "#, + ) + .unwrap(); + + assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); + assert_eq!(req.endowment.into_u256(), 0x88.into()); + assert_eq!(req.gas_limit.into_u256(), 42.into()); + assert_eq!(&*req.data, [0x42, 0x99].as_ref()); + assert_eq!(&*req.salt, [0x99, 0x88].as_ref()); + let code = match req.code { + Code::Existing(hash) => hash, + _ => panic!("json encoded an existing hash"), + }; + assert_eq!(&code, "0x1122"); + } + + #[test] + fn call_result_should_serialize_deserialize_properly() { + fn test(expected: &str) { + let res: ContractExecResult = serde_json::from_str(expected).unwrap(); + let actual = serde_json::to_string(&res).unwrap(); + assert_eq!(actual, trim(expected).as_str()); + } + test( + r#"{ + "gasConsumed": 5000, + "gasRequired": 8000, + "debugMessage": "HelloWorld", + "result": { + "Ok": { + "flags": 5, + "data": "0x1234" + } + } + }"#, + ); + test( + r#"{ + "gasConsumed": 3400, + "gasRequired": 5200, + "debugMessage": "HelloWorld", + "result": { + "Err": "BadOrigin" + } + }"#, + ); } #[test] - fn result_should_serialize_deserialize_properly() { + fn instantiate_result_should_serialize_deserialize_properly() { fn test(expected: &str) { - let res: RpcContractExecResult = serde_json::from_str(expected).unwrap(); + let res: ContractInstantiateResult = serde_json::from_str(expected).unwrap(); let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, expected); + assert_eq!(actual, trim(expected).as_str()); } - test(r#"{"success":{"flags":5,"data":"0x1234","gas_consumed":5000}}"#); - test(r#"{"error":null}"#); + test( + r#"{ + "gasConsumed": 5000, + "gasRequired": 8000, + "debugMessage": "HelloWorld", + "result": { + "Ok": { + "result": { + "flags": 5, + "data": "0x1234" + }, + "accountId": "5CiPP" + } + } + }"#, + ); + test( + r#"{ + "gasConsumed": 3400, + "gasRequired": 5200, + "debugMessage": "HelloWorld", + "result": { + "Err": "BadOrigin" + } + }"#, + ); } } diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index dc3730e95ca1f..b24005ec58699 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,32 +24,63 @@ //! we define this simple definition of a contract that can be passed to `create_code` that //! compiles it down into a `WasmModule` that can be used as a contract's code. -use crate::Trait; -use crate::Module as Contracts; - -use parity_wasm::elements::{Instruction, Instructions, FuncBody, ValueType, BlockType}; +use crate::Config; +use frame_support::traits::Get; +use pwasm_utils::parity_wasm::{ + builder, + elements::{ + self, BlockType, CustomSection, External, FuncBody, Instruction, Instructions, Module, + Section, ValueType, + }, +}; +use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::Hash; -use sp_std::{prelude::*, convert::TryFrom}; +use sp_sandbox::{EnvironmentDefinitionBuilder, Memory}; +use sp_std::{borrow::ToOwned, convert::TryFrom, prelude::*}; /// Pass to `create_code` in order to create a compiled `WasmModule`. +/// +/// This exists to have a more declarative way to describe a wasm module than to use +/// parity-wasm directly. It is tailored to fit the structure of contracts that are +/// needed for benchmarking. +#[derive(Default)] pub struct ModuleDefinition { - pub data_segments: Vec, + /// Imported memory attached to the module. No memory is imported if `None`. pub memory: Option, + /// Initializers for the imported memory. + pub data_segments: Vec, + /// Creates the supplied amount of i64 mutable globals initialized with random values. + pub num_globals: u32, + /// List of functions that the module should import. They start with index 0. pub imported_functions: Vec, + /// Function body of the exported `deploy` function. Body is empty if `None`. + /// Its index is `imported_functions.len()`. pub deploy_body: Option, + /// Function body of the exported `call` function. Body is empty if `None`. + /// Its index is `imported_functions.len() + 1`. pub call_body: Option, + /// Function body of a non-exported function with index `imported_functions.len() + 2`. + pub aux_body: Option, + /// The amount of I64 arguments the aux function should have. + pub aux_arg_num: u32, + /// If set to true the stack height limiter is injected into the the module. This is + /// needed for instruction debugging because the cost of executing the stack height + /// instrumentation should be included in the costs for the individual instructions + /// that cause more metering code (only call). + pub inject_stack_metering: bool, + /// Create a table containing function pointers. + pub table: Option, + /// Create a section named "dummy" of the specified size. This is useful in order to + /// benchmark the overhead of loading and storing codes of specified sizes. The dummy + /// section only contributes to the size of the contract but does not affect execution. + pub dummy_section: u32, } -impl Default for ModuleDefinition { - fn default() -> Self { - Self { - data_segments: vec![], - memory: None, - imported_functions: vec![], - deploy_body: None, - call_body: None, - } - } +pub struct TableSegment { + /// How many elements should be created inside the table. + pub num_elements: u32, + /// The function index with which all table elements should be initialized. + pub function_index: u32, } pub struct DataSegment { @@ -57,125 +88,249 @@ pub struct DataSegment { pub value: Vec, } +#[derive(Clone)] pub struct ImportedMemory { pub min_pages: u32, pub max_pages: u32, } impl ImportedMemory { - pub fn max() -> Self { + pub fn max() -> Self + where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, + { let pages = max_pages::(); Self { min_pages: pages, max_pages: pages } } } pub struct ImportedFunction { + pub module: &'static str, pub name: &'static str, pub params: Vec, pub return_type: Option, } -/// A wasm module ready to be put on chain with `put_code`. +/// A wasm module ready to be put on chain. #[derive(Clone)] -pub struct WasmModule { +pub struct WasmModule { pub code: Vec, pub hash: ::Output, + memory: Option, } -impl From for WasmModule { +impl From for WasmModule +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ fn from(def: ModuleDefinition) -> Self { // internal functions start at that offset. let func_offset = u32::try_from(def.imported_functions.len()).unwrap(); // Every contract must export "deploy" and "call" functions - let mut contract = parity_wasm::builder::module() + let mut contract = builder::module() // deploy function (first internal function) .function() - .signature().with_params(vec![]).with_return_type(None).build() - .with_body(def.deploy_body.unwrap_or_else(|| - FuncBody::new(Vec::new(), Instructions::empty()) - )) - .build() + .signature() + .build() + .with_body( + def.deploy_body + .unwrap_or_else(|| FuncBody::new(Vec::new(), Instructions::empty())), + ) + .build() // call function (second internal function) .function() - .signature().with_params(vec![]).with_return_type(None).build() - .with_body(def.call_body.unwrap_or_else(|| - FuncBody::new(Vec::new(), Instructions::empty()) - )) - .build() - .export().field("deploy").internal().func(func_offset).build() - .export().field("call").internal().func(func_offset + 1).build(); + .signature() + .build() + .with_body( + def.call_body + .unwrap_or_else(|| FuncBody::new(Vec::new(), Instructions::empty())), + ) + .build() + .export() + .field("deploy") + .internal() + .func(func_offset) + .build() + .export() + .field("call") + .internal() + .func(func_offset + 1) + .build(); + + // If specified we add an additional internal function + if let Some(body) = def.aux_body { + let mut signature = contract.function().signature(); + for _ in 0..def.aux_arg_num { + signature = signature.with_param(ValueType::I64); + } + contract = signature.build().with_body(body).build(); + } // Grant access to linear memory. - if let Some(memory) = def.memory { - contract = contract.import() - .module("env").field("memory") - .external().memory(memory.min_pages, Some(memory.max_pages)) + if let Some(memory) = &def.memory { + contract = contract + .import() + .module("env") + .field("memory") + .external() + .memory(memory.min_pages, Some(memory.max_pages)) .build(); } // Import supervisor functions. They start with idx 0. for func in def.imported_functions { - let sig = parity_wasm::builder::signature() + let sig = builder::signature() .with_params(func.params) - .with_return_type(func.return_type) + .with_results(func.return_type.into_iter().collect()) .build_sig(); let sig = contract.push_signature(sig); - contract = contract.import() - .module("seal0") + contract = contract + .import() + .module(func.module) .field(func.name) - .with_external(parity_wasm::elements::External::Function(sig)) + .with_external(elements::External::Function(sig)) .build(); } // Initialize memory for data in def.data_segments { - contract = contract.data() + contract = contract + .data() .offset(Instruction::I32Const(data.offset as i32)) .value(data.value) .build() } - let code = contract.build().to_bytes().unwrap(); - let hash = T::Hashing::hash(&code); - Self { - code, - hash + // Add global variables + if def.num_globals > 0 { + use rand::{distributions::Standard, prelude::*}; + let rng = rand_pcg::Pcg32::seed_from_u64(3112244599778833558); + for val in rng.sample_iter(Standard).take(def.num_globals as usize) { + contract = contract + .global() + .value_type() + .i64() + .mutable() + .init_expr(Instruction::I64Const(val)) + .build() + } + } + + // Add function pointer table + if let Some(table) = def.table { + contract = contract + .table() + .with_min(table.num_elements) + .with_max(Some(table.num_elements)) + .with_element(0, vec![table.function_index; table.num_elements as usize]) + .build(); } + + // Add the dummy section + if def.dummy_section > 0 { + contract = contract.with_section(Section::Custom(CustomSection::new( + "dummy".to_owned(), + vec![42; def.dummy_section as usize], + ))); + } + + let mut code = contract.build(); + + if def.inject_stack_metering { + code = inject_stack_metering::(code); + } + + let code = code.to_bytes().unwrap(); + let hash = T::Hashing::hash(&code); + Self { code, hash, memory: def.memory } } } -impl WasmModule { +impl WasmModule +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + /// Uses the supplied wasm module and instruments it when requested. + pub fn instrumented(code: &[u8], inject_gas: bool, inject_stack: bool) -> Self { + let module = { + let mut module = Module::from_bytes(code).unwrap(); + if inject_gas { + module = inject_gas_metering::(module); + } + if inject_stack { + module = inject_stack_metering::(module); + } + module + }; + let limits = module + .import_section() + .unwrap() + .entries() + .iter() + .find_map(|e| if let External::Memory(mem) = e.external() { Some(mem) } else { None }) + .unwrap() + .limits() + .clone(); + let code = module.to_bytes().unwrap(); + let hash = T::Hashing::hash(&code); + let memory = + ImportedMemory { min_pages: limits.initial(), max_pages: limits.maximum().unwrap() }; + Self { code, hash, memory: Some(memory) } + } + + /// Creates a wasm module with an empty `call` and `deploy` function and nothing else. pub fn dummy() -> Self { ModuleDefinition::default().into() } + /// Same as `dummy` but with maximum sized linear memory and a dummy section of specified size. + pub fn dummy_with_bytes(dummy_bytes: u32) -> Self { + // We want the module to have the size `dummy_bytes`. + // This is not completely correct as the overhead grows when the contract grows + // because of variable length integer encoding. However, it is good enough to be that + // close for benchmarking purposes. + let module_overhead = 65; + ModuleDefinition { + memory: Some(ImportedMemory::max::()), + dummy_section: dummy_bytes.saturating_sub(module_overhead), + ..Default::default() + } + .into() + } + + /// Creates a wasm module of `target_bytes` size. Used to benchmark the performance of + /// `instantiate_with_code` for different sizes of wasm modules. The generated module maximizes + /// instrumentation runtime by nesting blocks as deeply as possible given the byte budget. pub fn sized(target_bytes: u32) -> Self { - use parity_wasm::elements::Instruction::{If, I32Const, Return, End}; - // Base size of a contract is 47 bytes and each expansion adds 6 bytes. + use self::elements::Instruction::{End, I32Const, If, Return}; + // Base size of a contract is 63 bytes and each expansion adds 6 bytes. // We do one expansion less to account for the code section and function body // size fields inside the binary wasm module representation which are leb128 encoded // and therefore grow in size when the contract grows. We are not allowed to overshoot - // because of the maximum code size that is enforced by `put_code`. - let expansions = (target_bytes.saturating_sub(47) / 6).saturating_sub(1); - const EXPANSION: [Instruction; 4] = [ - I32Const(0), - If(BlockType::NoResult), - Return, - End, - ]; + // because of the maximum code size that is enforced by `instantiate_with_code`. + let expansions = (target_bytes.saturating_sub(63) / 6).saturating_sub(1); + const EXPANSION: [Instruction; 4] = [I32Const(0), If(BlockType::NoResult), Return, End]; ModuleDefinition { call_body: Some(body::repeated(expansions, &EXPANSION)), - .. Default::default() + memory: Some(ImportedMemory::max::()), + ..Default::default() } .into() } + /// Creates a wasm module that calls the imported function named `getter_name` `repeat` + /// times. The imported function is expected to have the "getter signature" of + /// (out_ptr: u32, len_ptr: u32) -> (). pub fn getter(getter_name: &'static str, repeat: u32) -> Self { let pages = max_pages::(); ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: getter_name, params: vec![ValueType::I32, ValueType::I32], return_type: None, @@ -188,44 +343,118 @@ impl WasmModule { offset: 0, value: (pages * 64 * 1024 - 4).to_le_bytes().to_vec(), }], - call_body: Some(body::repeated(repeat, &[ - Instruction::I32Const(4), // ptr where to store output - Instruction::I32Const(0), // ptr to length - Instruction::Call(0), // call the imported function - ])), - .. Default::default() + call_body: Some(body::repeated( + repeat, + &[ + Instruction::I32Const(4), // ptr where to store output + Instruction::I32Const(0), // ptr to length + Instruction::Call(0), // call the imported function + ], + )), + ..Default::default() } .into() } + /// Creates a wasm module that calls the imported hash function named `name` `repeat` times + /// with an input of size `data_size`. Hash functions have the signature + /// (input_ptr: u32, input_len: u32, output_ptr: u32) -> () pub fn hasher(name: &'static str, repeat: u32, data_size: u32) -> Self { ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - name: name, + module: "seal0", + name, params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, }], - call_body: Some(body::repeated(repeat, &[ - Instruction::I32Const(0), // input_ptr - Instruction::I32Const(data_size as i32), // input_len - Instruction::I32Const(0), // output_ptr - Instruction::Call(0), - ])), - .. Default::default() + call_body: Some(body::repeated( + repeat, + &[ + Instruction::I32Const(0), // input_ptr + Instruction::I32Const(data_size as i32), // input_len + Instruction::I32Const(0), // output_ptr + Instruction::Call(0), + ], + )), + ..Default::default() + } + .into() + } + + /// Creates a memory instance for use in a sandbox with dimensions declared in this module + /// and adds it to `env`. A reference to that memory is returned so that it can be used to + /// access the memory contents from the supervisor. + pub fn add_memory(&self, env: &mut EnvironmentDefinitionBuilder) -> Option { + let memory = if let Some(memory) = &self.memory { memory } else { return None }; + let memory = Memory::new(memory.min_pages, Some(memory.max_pages)).unwrap(); + env.add_memory("env", "memory", memory.clone()); + Some(memory) + } + + pub fn unary_instr(instr: Instruction, repeat: u32) -> Self { + use body::DynInstr::{RandomI64Repeated, Regular}; + ModuleDefinition { + call_body: Some(body::repeated_dyn( + repeat, + vec![RandomI64Repeated(1), Regular(instr), Regular(Instruction::Drop)], + )), + ..Default::default() + } + .into() + } + + pub fn binary_instr(instr: Instruction, repeat: u32) -> Self { + use body::DynInstr::{RandomI64Repeated, Regular}; + ModuleDefinition { + call_body: Some(body::repeated_dyn( + repeat, + vec![RandomI64Repeated(2), Regular(instr), Regular(Instruction::Drop)], + )), + ..Default::default() } .into() } } -/// Mechanisms to create a function body that can be used inside a `ModuleDefinition`. +/// Mechanisms to generate a function body that can be used inside a `ModuleDefinition`. pub mod body { use super::*; - pub enum CountedInstruction { - // (offset, increment_by) - Counter(u32, u32), + /// When generating contract code by repeating a wasm sequence, it's sometimes necessary + /// to change those instructions on each repetition. The variants of this enum describe + /// various ways in which this can happen. + pub enum DynInstr { + /// Insert the associated instruction. Regular(Instruction), + /// Insert a I32Const with incrementing value for each insertion. + /// (start_at, increment_by) + Counter(u32, u32), + /// Insert a I32Const with a random value in [low, high) not divisible by two. + /// (low, high) + RandomUnaligned(u32, u32), + /// Insert a I32Const with a random value in [low, high). + /// (low, high) + RandomI32(i32, i32), + /// Insert the specified amount of I32Const with a random value. + RandomI32Repeated(usize), + /// Insert the specified amount of I64Const with a random value. + RandomI64Repeated(usize), + /// Insert a GetLocal with a random offset in [low, high). + /// (low, high) + RandomGetLocal(u32, u32), + /// Insert a SetLocal with a random offset in [low, high). + /// (low, high) + RandomSetLocal(u32, u32), + /// Insert a TeeLocal with a random offset in [low, high). + /// (low, high) + RandomTeeLocal(u32, u32), + /// Insert a GetGlobal with a random offset in [low, high). + /// (low, high) + RandomGetGlobal(u32, u32), + /// Insert a SetGlobal with a random offset in [low, high). + /// (low, high) + RandomSetGlobal(u32, u32), } pub fn plain(instructions: Vec) -> FuncBody { @@ -240,33 +469,89 @@ pub mod body { .take(instructions.len() * usize::try_from(repetitions).unwrap()) .cloned() .chain(sp_std::iter::once(Instruction::End)) - .collect() + .collect(), ); FuncBody::new(Vec::new(), instructions) } - pub fn counted(repetitions: u32, mut instructions: Vec) -> FuncBody { + pub fn repeated_dyn(repetitions: u32, mut instructions: Vec) -> FuncBody { + use rand::{distributions::Standard, prelude::*}; + + // We do not need to be secure here. + let mut rng = rand_pcg::Pcg32::seed_from_u64(8446744073709551615); + // We need to iterate over indices because we cannot cycle over mutable references let body = (0..instructions.len()) .cycle() .take(instructions.len() * usize::try_from(repetitions).unwrap()) - .map(|idx| { - match &mut instructions[idx] { - CountedInstruction::Counter(offset, increment_by) => { - let current = *offset; - *offset += *increment_by; - Instruction::I32Const(current as i32) - }, - CountedInstruction::Regular(instruction) => instruction.clone(), - } + .flat_map(|idx| match &mut instructions[idx] { + DynInstr::Regular(instruction) => vec![instruction.clone()], + DynInstr::Counter(offset, increment_by) => { + let current = *offset; + *offset += *increment_by; + vec![Instruction::I32Const(current as i32)] + }, + DynInstr::RandomUnaligned(low, high) => { + let unaligned = rng.gen_range(*low, *high) | 1; + vec![Instruction::I32Const(unaligned as i32)] + }, + DynInstr::RandomI32(low, high) => { + vec![Instruction::I32Const(rng.gen_range(*low, *high))] + }, + DynInstr::RandomI32Repeated(num) => (&mut rng) + .sample_iter(Standard) + .take(*num) + .map(|val| Instruction::I32Const(val)) + .collect(), + DynInstr::RandomI64Repeated(num) => (&mut rng) + .sample_iter(Standard) + .take(*num) + .map(|val| Instruction::I64Const(val)) + .collect(), + DynInstr::RandomGetLocal(low, high) => { + vec![Instruction::GetLocal(rng.gen_range(*low, *high))] + }, + DynInstr::RandomSetLocal(low, high) => { + vec![Instruction::SetLocal(rng.gen_range(*low, *high))] + }, + DynInstr::RandomTeeLocal(low, high) => { + vec![Instruction::TeeLocal(rng.gen_range(*low, *high))] + }, + DynInstr::RandomGetGlobal(low, high) => { + vec![Instruction::GetGlobal(rng.gen_range(*low, *high))] + }, + DynInstr::RandomSetGlobal(low, high) => { + vec![Instruction::SetGlobal(rng.gen_range(*low, *high))] + }, }) .chain(sp_std::iter::once(Instruction::End)) .collect(); FuncBody::new(Vec::new(), Instructions::new(body)) } + + /// Replace the locals of the supplied `body` with `num` i64 locals. + pub fn inject_locals(body: &mut FuncBody, num: u32) { + use self::elements::Local; + *body.locals_mut() = vec![Local::new(num, ValueType::I64)]; + } } /// The maximum amount of pages any contract is allowed to have according to the current `Schedule`. -pub fn max_pages() -> u32 { - Contracts::::current_schedule().max_memory_pages +pub fn max_pages() -> u32 +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + T::Schedule::get().limits.memory_pages +} + +fn inject_gas_metering(module: Module) -> Module { + let schedule = T::Schedule::get(); + let gas_rules = schedule.rules(&module); + pwasm_utils::inject_gas_counter(module, &gas_rules, "seal0").unwrap() +} + +fn inject_stack_metering(module: Module) -> Module { + let height = T::Schedule::get().limits.stack_height; + pwasm_utils::stack_height::inject_limiter(module, height).unwrap() } diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 22bcc3bc4e860..db657e618322e 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,60 +20,54 @@ #![cfg(feature = "runtime-benchmarks")] mod code; - -use crate::*; -use crate::Module as Contracts; -use crate::exec::StorageKey; -use crate::schedule::API_BENCHMARK_BATCH_SIZE; -use self::code::{ - body, ModuleDefinition, DataSegment, ImportedMemory, ImportedFunction, WasmModule, +mod sandbox; + +use self::{ + code::{ + body::{self, DynInstr::*}, + DataSegment, ImportedFunction, ImportedMemory, ModuleDefinition, WasmModule, + }, + sandbox::Sandbox, }; - -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; -use frame_system::{Module as System, RawOrigin}; -use parity_wasm::elements::{Instruction, ValueType, BlockType}; -use sp_runtime::traits::{Hash, Bounded}; -use sp_std::{default::Default, convert::{TryInto}}; +use crate::{ + exec::{AccountIdOf, StorageKey}, + schedule::{API_BENCHMARK_BATCH_SIZE, INSTR_BENCHMARK_BATCH_SIZE}, + storage::Storage, + Pallet as Contracts, *, +}; +use codec::Encode; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::weights::Weight; +use frame_system::RawOrigin; +use pwasm_utils::parity_wasm::elements::{BlockType, BrTableData, Instruction, ValueType}; +use sp_runtime::{ + traits::{Bounded, Hash}, + Perbill, +}; +use sp_std::{convert::TryInto, default::Default, vec, vec::Vec}; /// How many batches we do per API benchmark. const API_BENCHMARK_BATCHES: u32 = 20; +/// How many batches we do per Instruction benchmark. +const INSTR_BENCHMARK_BATCHES: u32 = 50; + /// An instantiated and deployed contract. -struct Contract { +struct Contract { caller: T::AccountId, account_id: T::AccountId, addr: ::Source, endowment: BalanceOf, - code_hash: ::Output, -} - -/// Describes how much balance should be transferred on instantiate from the caller. -enum Endow { - /// Endow the contract with a maximum amount of balance. This value is described by - /// `Contract::max_endowment`. - Max, - /// Endow so that the amount of balance that is transferred is big but not so big - /// to offset the rent payment. This is needed in order to test rent collection. - CollectRent, } -impl Endow { - /// The maximum amount of balance a caller can transfer without being brought below - /// the existential deposit. This assumes that every caller is funded with the amount - /// returned by `caller_funding`. - fn max() -> BalanceOf { - caller_funding::().saturating_sub(T::Currency::minimum_balance()) - } -} - -impl Contract { +impl Contract +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ /// Create new contract and use a default account id as instantiator. - fn new( - module: WasmModule, - data: Vec, - endowment: Endow, - ) -> Result, &'static str> { - Self::with_index(0, module, data, endowment) + fn new(module: WasmModule, data: Vec) -> Result, &'static str> { + Self::with_index(0, module, data) } /// Create new contract and use an account id derived from the supplied index as instantiator. @@ -81,9 +75,8 @@ impl Contract { index: u32, module: WasmModule, data: Vec, - endowment: Endow, ) -> Result, &'static str> { - Self::with_caller(account("instantiator", index, 0), module, data, endowment) + Self::with_caller(account("instantiator", index, 0), module, data) } /// Create new contract and use the supplied `caller` as instantiator. @@ -91,38 +84,20 @@ impl Contract { caller: T::AccountId, module: WasmModule, data: Vec, - endowment: Endow, - ) -> Result, &'static str> - { - use sp_runtime::traits::{CheckedDiv, SaturatedConversion}; - let (storage_size, endowment) = match endowment { - Endow::CollectRent => { - // storage_size cannot be zero because otherwise a contract that is just above - // the subsistence threshold does not pay rent given a large enough subsistence - // threshold. But we need rent payments to occur in order to benchmark for worst cases. - let storage_size = Config::::subsistence_threshold_uncached() - .checked_div(&T::RentDepositOffset::get()) - .unwrap_or_else(Zero::zero); - - // Endowment should be large but not as large to inhibit rent payments. - let endowment = T::RentDepositOffset::get() - .saturating_mul(storage_size + T::StorageSizeOffset::get().into()) - .saturating_sub(1.into()); - - (storage_size, endowment) - }, - Endow::Max => (0.into(), Endow::max::()), - }; + ) -> Result, &'static str> { + let endowment = contract_funding::(); T::Currency::make_free_balance_be(&caller, caller_funding::()); - let addr = T::DetermineContractAddress::contract_address_for(&module.hash, &data, &caller); - init_block_number::(); - Contracts::::put_code_raw(module.code)?; + let salt = vec![0xff]; + let addr = Contracts::::contract_address(&caller, &module.hash, &salt); + + Contracts::::store_code_raw(module.code)?; Contracts::::instantiate( RawOrigin::Signed(caller.clone()).into(), endowment, Weight::max_value(), module.hash, data, + salt, )?; let result = Contract { @@ -130,168 +105,200 @@ impl Contract { account_id: addr.clone(), addr: T::Lookup::unlookup(addr), endowment, - code_hash: module.hash.clone(), }; - let mut contract = result.alive_info()?; - contract.storage_size = storage_size.saturated_into::(); - ContractInfoOf::::insert(&result.account_id, ContractInfo::Alive(contract)); + ContractInfoOf::::insert(&result.account_id, result.info()?); Ok(result) } + /// Create a new contract with the supplied storage item count and size each. + fn with_storage( + code: WasmModule, + stor_num: u32, + stor_size: u32, + ) -> Result { + let contract = Contract::::new(code, vec![])?; + let storage_items = (0..stor_num) + .map(|i| { + let hash = T::Hashing::hash_of(&i) + .as_ref() + .try_into() + .map_err(|_| "Hash too big for storage key")?; + Ok((hash, vec![42u8; stor_size as usize])) + }) + .collect::, &'static str>>()?; + contract.store(&storage_items)?; + Ok(contract) + } + /// Store the supplied storage items into this contracts storage. fn store(&self, items: &Vec<(StorageKey, Vec)>) -> Result<(), &'static str> { - let info = self.alive_info()?; + let mut info = self.info()?; for item in items { - crate::storage::write_contract_storage::( - &self.account_id, - &info.trie_id, - &item.0, - Some(item.1.clone()), - ) - .map_err(|_| "Failed to write storage to restoration dest")?; + Storage::::write(&mut info, &item.0, Some(item.1.clone())) + .map_err(|_| "Failed to write storage to restoration dest")?; } + >::insert(&self.account_id, info.clone()); Ok(()) } - /// Get the `AliveContractInfo` of the `addr` or an error if it is no longer alive. - fn address_alive_info(addr: &T::AccountId) -> Result, &'static str> { - ContractInfoOf::::get(addr).and_then(|c| c.get_alive()) - .ok_or("Expected contract to be alive at this point.") + /// Get the `ContractInfo` of the `addr` or an error if it no longer exists. + fn address_info(addr: &T::AccountId) -> Result, &'static str> { + ContractInfoOf::::get(addr).ok_or("Expected contract to exist at this point.") } - /// Get the `AliveContractInfo` of this contract or an error if it is no longer alive. - fn alive_info(&self) -> Result, &'static str> { - Self::address_alive_info(&self.account_id) + /// Get the `ContractInfo` of this contract or an error if it no longer exists. + fn info(&self) -> Result, &'static str> { + Self::address_info(&self.account_id) } +} - /// Return an error if this contract is no tombstone. - fn ensure_tombstone(&self) -> Result<(), &'static str> { - ContractInfoOf::::get(&self.account_id).and_then(|c| c.get_tombstone()) - .ok_or("Expected contract to be a tombstone at this point.") - .map(|_| ()) - } +/// The funding that each account that either calls or instantiates contracts is funded with. +fn caller_funding() -> BalanceOf { + BalanceOf::::max_value() / 2u32.into() +} - /// Get the block number when this contract will be evicted. Returns an error when - /// the rent collection won't happen because the contract has to much endowment. - fn eviction_at(&self) -> Result { - let projection = crate::rent::compute_rent_projection::(&self.account_id) - .map_err(|_| "Invalid acc for rent")?; - match projection { - RentProjection::EvictionAt(at) => Ok(at), - _ => Err("Account does not pay rent.")?, - } - } +/// The funding used for contracts. It is less than `caller_funding` in purpose. +fn contract_funding() -> BalanceOf { + caller_funding::().saturating_sub(T::Currency::minimum_balance() * 100u32.into()) } -/// A `Contract` that was evicted after accumulating some storage. +/// Load the specified contract file from disk by including it into the runtime. /// -/// This is used to benchmark contract resurrection. -struct Tombstone { - /// The contract that was evicted. - contract: Contract, - /// The storage the contract held when it was avicted. - storage: Vec<(StorageKey, Vec)>, +/// We need to load a different version of ink! contracts when the benchmark is run as +/// a test. This is because ink! contracts depend on the sizes of types that are defined +/// differently in the test environment. Solang is more lax in that regard. +macro_rules! load_benchmark { + ($name:expr) => {{ + #[cfg(not(test))] + { + include_bytes!(concat!("../../benchmarks/", $name, ".wasm")) + } + #[cfg(test)] + { + include_bytes!(concat!("../../benchmarks/", $name, "_test.wasm")) + } + }}; } -impl Tombstone { - /// Create and evict a new contract with the supplied storage item count and size each. - fn new(stor_num: u32, stor_size: u32) -> Result { - let contract = Contract::::new(WasmModule::dummy(), vec![], Endow::CollectRent)?; - let storage_items = create_storage::(stor_num, stor_size)?; - contract.store(&storage_items)?; - System::::set_block_number( - contract.eviction_at()? + T::SignedClaimHandicap::get() + 5.into() - ); - crate::rent::collect_rent::(&contract.account_id); - contract.ensure_tombstone()?; +benchmarks! { + where_clause { where + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + } - Ok(Tombstone { - contract, - storage: storage_items, - }) + // The base weight without any actual work performed apart from the setup costs. + on_initialize {}: { + Storage::::process_deletion_queue_batch(Weight::max_value()) } -} -/// Generate `stor_num` storage items. Each has the size `stor_size`. -fn create_storage( - stor_num: u32, - stor_size: u32 -) -> Result)>, &'static str> { - (0..stor_num).map(|i| { - let hash = T::Hashing::hash_of(&i) - .as_ref() - .try_into() - .map_err(|_| "Hash too big for storage key")?; - Ok((hash, vec![42u8; stor_size as usize])) - }).collect::, &'static str>>() -} + #[skip_meta] + on_initialize_per_trie_key { + let k in 0..1024; + let instance = Contract::::with_storage(WasmModule::dummy(), k, T::Schedule::get().limits.payload_len)?; + Storage::::queue_trie_for_deletion(&instance.info()?)?; + }: { + Storage::::process_deletion_queue_batch(Weight::max_value()) + } -/// The funding that each account that either calls or instantiates contracts is funded with. -fn caller_funding() -> BalanceOf { - BalanceOf::::max_value() / 2.into() -} + on_initialize_per_queue_item { + let q in 0..1024.min(T::DeletionQueueDepth::get()); + for i in 0 .. q { + let instance = Contract::::with_index(i, WasmModule::dummy(), vec![])?; + Storage::::queue_trie_for_deletion(&instance.info()?)?; + ContractInfoOf::::remove(instance.account_id); + } + }: { + Storage::::process_deletion_queue_batch(Weight::max_value()) + } -/// Set the block number to one. -/// -/// The default block number is zero. The benchmarking system bumps the block number -/// to one for the benchmarking closure when it is set to zero. In order to prevent this -/// undesired implicit bump (which messes with rent collection), wo do the bump ourselfs -/// in the setup closure so that both the instantiate and subsequent call are run with the -/// same block number. -fn init_block_number() { - System::::set_block_number(1.into()); -} + // This benchmarks the additional weight that is charged when a contract is executed the + // first time after a new schedule was deployed: For every new schedule a contract needs + // to re-run the instrumentation once. + instrument { + let c in 0 .. T::Schedule::get().limits.code_len / 1024; + let WasmModule { code, hash, .. } = WasmModule::::sized(c * 1024); + Contracts::::store_code_raw(code)?; + let mut module = PrefabWasmModule::from_storage_noinstr(hash)?; + let schedule = T::Schedule::get(); + }: { + Contracts::::reinstrument_module(&mut module, &schedule)?; + } -benchmarks! { - _ { + // The weight of loading and decoding of a contract's code per kilobyte. + code_load { + let c in 0 .. T::Schedule::get().limits.code_len / 1024; + let WasmModule { code, hash, .. } = WasmModule::::dummy_with_bytes(c * 1024); + Contracts::::store_code_raw(code)?; + }: { + >::from_storage_noinstr(hash)?; } - // This extrinsic is pretty much constant as it is only a simple setter. - update_schedule { - let schedule = Schedule { - version: 1, - .. Default::default() - }; - }: _(RawOrigin::Root, schedule) + // The weight of changing the refcount of a contract's code per kilobyte. + code_refcount { + let c in 0 .. T::Schedule::get().limits.code_len / 1024; + let WasmModule { code, hash, .. } = WasmModule::::dummy_with_bytes(c * 1024); + Contracts::::store_code_raw(code)?; + let mut gas_meter = GasMeter::new(Weight::max_value()); + }: { + >::add_user(hash, &mut gas_meter)?; + } // This constructs a contract that is maximal expensive to instrument. // It creates a maximum number of metering blocks per byte. - // `n`: Size of the code in kilobytes. - put_code { - let n in 0 .. Contracts::::current_schedule().max_code_size / 1024; + // The size of the salt influences the runtime because is is hashed in order to + // determine the contract address. + // `c`: Size of the code in kilobytes. + // `s`: Size of the salt in kilobytes. + // + // # Note + // + // We cannot let `c` grow to the maximum code size because the code is not allowed + // to be larger than the maximum size **after instrumentation**. + instantiate_with_code { + let c in 0 .. Perbill::from_percent(50).mul_ceil(T::Schedule::get().limits.code_len / 1024); + let s in 0 .. code::max_pages::() * 64; + let salt = vec![42u8; (s * 1024) as usize]; + let endowment = contract_funding::() / 3u32.into(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); - let module = WasmModule::::sized(n * 1024); - let origin = RawOrigin::Signed(caller); - }: _(origin, module.code) + let WasmModule { code, hash, .. } = WasmModule::::sized(c * 1024); + let origin = RawOrigin::Signed(caller.clone()); + let addr = Contracts::::contract_address(&caller, &hash, &salt); + }: _(origin, endowment, Weight::max_value(), code, vec![], salt) + verify { + // endowment was removed from the caller + assert_eq!(T::Currency::free_balance(&caller), caller_funding::() - endowment); + // contract has the full endowment + assert_eq!(T::Currency::free_balance(&addr), endowment); + // instantiate should leave a contract + Contract::::address_info(&addr)?; + } // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. - // The size of the input data influences the runtime because it is hashed in order to determine - // the contract address. - // `n`: Size of the data passed to constructor in kilobytes. + // `s`: Size of the salt in kilobytes. instantiate { - let n in 0 .. code::max_pages::() * 64; - let data = vec![42u8; (n * 1024) as usize]; - let endowment = Config::::subsistence_threshold_uncached(); + let s in 0 .. code::max_pages::() * 64; + let salt = vec![42u8; (s * 1024) as usize]; + let endowment = contract_funding::() / 3u32.into(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); - let WasmModule { code, hash } = WasmModule::::dummy(); + let WasmModule { code, hash, .. } = WasmModule::::dummy(); let origin = RawOrigin::Signed(caller.clone()); - let addr = T::DetermineContractAddress::contract_address_for(&hash, &data, &caller); - Contracts::::put_code_raw(code)?; - }: _(origin, endowment, Weight::max_value(), hash, data) + let addr = Contracts::::contract_address(&caller, &hash, &salt); + Contracts::::store_code_raw(code)?; + }: _(origin, endowment, Weight::max_value(), hash, vec![], salt) verify { // endowment was removed from the caller assert_eq!(T::Currency::free_balance(&caller), caller_funding::() - endowment); - // contract has the full endowment because no rent collection happended + // contract has the full endowment assert_eq!(T::Currency::free_balance(&addr), endowment); - // instantiate should leave a alive contract - Contract::::address_alive_info(&addr)?; + // instantiate should leave a contract + Contract::::address_info(&addr)?; } - // We just call a dummy contract to measure to overhead of the call extrinsic. + // We just call a dummy contract to measure the overhead of the call extrinsic. // The size of the data has no influence on the costs of this extrinsic as long as the contract // won't call `seal_input` in its constructor to copy the data to contract memory. // The dummy contract used here does not do this. The costs for the data copy is billed as @@ -299,14 +306,11 @@ benchmarks! { call { let data = vec![42u8; 1024]; let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::dummy(), vec![], Endow::CollectRent + whitelisted_caller(), WasmModule::dummy(), vec![], )?; - let value = T::Currency::minimum_balance() * 100.into(); + let value = T::Currency::minimum_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr.clone(); - - // trigger rent collection for worst case performance of call - System::::set_block_number(instance.eviction_at()? - 5.into()); let before = T::Currency::free_balance(&instance.account_id); }: _(origin, callee, value, Weight::max_value(), data) verify { @@ -315,123 +319,83 @@ benchmarks! { T::Currency::free_balance(&instance.caller), caller_funding::() - instance.endowment - value, ); - // rent should have lowered the amount of balance of the contract - assert!(T::Currency::free_balance(&instance.account_id) < before + value); - // but it should not have been evicted by the rent collection - instance.alive_info()?; - } - - // We benchmark the costs for sucessfully evicting an empty contract. - // The actual costs are depending on how many storage items the evicted contract - // does have. However, those costs are not to be payed by the sender but - // will be distributed over multiple blocks using a scheduler. Otherwise there is - // no incentive to remove large contracts when the removal is more expensive than - // the reward for removing them. - claim_surcharge { - let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::dummy(), vec![], Endow::CollectRent - )?; - let origin = RawOrigin::Signed(instance.caller.clone()); - let account_id = instance.account_id.clone(); - - // instantiate should leave us with an alive contract - instance.alive_info()?; - - // generate enough rent so that the contract is evicted - System::::set_block_number( - instance.eviction_at()? + T::SignedClaimHandicap::get() + 5.into() - ); - }: _(origin, account_id, None) - verify { - // the claim surcharge should have evicted the contract - instance.ensure_tombstone()?; - - // the caller should get the reward for being a good snitch - assert_eq!( - T::Currency::free_balance(&instance.caller), - caller_funding::() - instance.endowment + ::SurchargeReward::get(), - ); + // contract should have received the value + assert_eq!(T::Currency::free_balance(&instance.account_id), before + value); + // contract should still exist + instance.info()?; } seal_caller { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::getter( "seal_caller", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_address { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::getter( "seal_address", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_gas_left { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::getter( "seal_gas_left", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_balance { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::getter( "seal_balance", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_value_transferred { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::getter( "seal_value_transferred", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_minimum_balance { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::getter( "seal_minimum_balance", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_tombstone_deposit { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::getter( "seal_tombstone_deposit", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; - let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) - - seal_rent_allowance { - let r in 0 .. API_BENCHMARK_BATCHES; - let instance = Contract::::new(WasmModule::getter( - "seal_rent_allowance", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_block_number { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::getter( "seal_block_number", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_now { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::getter( "seal_now", r * API_BENCHMARK_BATCH_SIZE - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_weight_to_fee { let r in 0 .. API_BENCHMARK_BATCHES; @@ -439,6 +403,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_weight_to_fee", params: vec![ValueType::I64, ValueType::I32, ValueType::I32], return_type: None, @@ -455,14 +420,15 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_gas { let r in 0 .. API_BENCHMARK_BATCHES; let code = WasmModule::::from(ModuleDefinition { imported_functions: vec![ImportedFunction { + module: "seal0", name: "gas", params: vec![ValueType::I32], return_type: None, @@ -473,19 +439,17 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) - // We cannot call seal_input multiple times. Therefore our weight determination is not - // as precise as with other APIs. Because this function can only be called once per - // contract it cannot be used for Dos. seal_input { - let r in 0 .. 1; + let r in 0 .. API_BENCHMARK_BATCHES; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_input", params: vec![ValueType::I32, ValueType::I32], return_type: None, @@ -496,16 +460,16 @@ benchmarks! { value: 0u32.to_le_bytes().to_vec(), }, ], - call_body: Some(body::repeated(r, &[ + call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ Instruction::I32Const(4), // ptr where to store output Instruction::I32Const(0), // ptr to length Instruction::Call(0), ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_input_per_kb { let n in 0 .. code::max_pages::() * 64; @@ -514,6 +478,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_input", params: vec![ValueType::I32, ValueType::I32], return_type: None, @@ -524,25 +489,27 @@ benchmarks! { value: buffer_size.to_le_bytes().to_vec(), }, ], - call_body: Some(body::plain(vec![ + call_body: Some(body::repeated(API_BENCHMARK_BATCH_SIZE, &[ Instruction::I32Const(4), // ptr where to store output Instruction::I32Const(0), // ptr to length Instruction::Call(0), - Instruction::End, ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let data = vec![42u8; (n * 1024).min(buffer_size) as usize]; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), data) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), data) - // The same argument as for `seal_input` is true here. + // We cannot call `seal_return` multiple times. Therefore our weight determination is not + // as precise as with other APIs. Because this function can only be called once per + // contract it cannot be used as an attack vector. seal_return { let r in 0 .. 1; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_return", params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, @@ -555,15 +522,16 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_return_per_kb { let n in 0 .. code::max_pages::() * 64; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_return", params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, @@ -577,11 +545,11 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) - // The same argument as for `seal_input` is true here. + // The same argument as for `seal_return` is true here. seal_terminate { let r in 0 .. 1; let beneficiary = account::("beneficiary", 0, 0); @@ -590,6 +558,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_terminate", params: vec![ValueType::I32, ValueType::I32], return_type: None, @@ -607,186 +576,30 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; - let origin = RawOrigin::Signed(instance.caller.clone()); - assert_eq!(T::Currency::total_balance(&beneficiary), 0.into()); - assert_eq!(T::Currency::total_balance(&instance.account_id), Endow::max::()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) - verify { - if r > 0 { - assert_eq!(T::Currency::total_balance(&instance.account_id), 0.into()); - assert_eq!(T::Currency::total_balance(&beneficiary), Endow::max::()); - } - } - - seal_restore_to { - let r in 0 .. 1; - - // Restore just moves the trie id from origin to destination and therefore - // does not depend on the size of the destination contract. However, to not - // trigger any edge case we won't use an empty contract as destination. - let tombstone = Tombstone::::new(10, T::MaxValueSize::get())?; - - let dest = tombstone.contract.account_id.encode(); - let dest_len = dest.len(); - let code_hash = tombstone.contract.code_hash.encode(); - let code_hash_len = code_hash.len(); - let rent_allowance = BalanceOf::::max_value().encode(); - let rent_allowance_len = rent_allowance.len(); - - let dest_offset = 0; - let code_hash_offset = dest_offset + dest_len; - let rent_allowance_offset = code_hash_offset + code_hash_len; - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - name: "seal_restore_to", - params: vec![ - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: None, - }], - data_segments: vec![ - DataSegment { - offset: dest_offset as u32, - value: dest, - }, - DataSegment { - offset: code_hash_offset as u32, - value: code_hash, - }, - DataSegment { - offset: rent_allowance_offset as u32, - value: rent_allowance, - }, - ], - call_body: Some(body::repeated(r, &[ - Instruction::I32Const(dest_offset as i32), - Instruction::I32Const(dest_len as i32), - Instruction::I32Const(code_hash_offset as i32), - Instruction::I32Const(code_hash_len as i32), - Instruction::I32Const(rent_allowance_offset as i32), - Instruction::I32Const(rent_allowance_len as i32), - Instruction::I32Const(0), // delta_ptr - Instruction::I32Const(0), // delta_count - Instruction::Call(0), - ])), - .. Default::default() - }); - - let instance = Contract::::with_caller( - account("origin", 0, 0), code, vec![], Endow::Max - )?; - instance.store(&tombstone.storage)?; - System::::set_block_number(System::::block_number() + 1.into()); - + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); + assert_eq!(T::Currency::total_balance(&instance.account_id), contract_funding::()); + }: call(origin, instance.addr.clone(), 0u32.into(), Weight::max_value(), vec![]) verify { if r > 0 { - tombstone.contract.alive_info()?; + assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); + assert_eq!(T::Currency::total_balance(&beneficiary), contract_funding::()); } } - seal_restore_to_per_delta { - let d in 0 .. API_BENCHMARK_BATCHES; - let tombstone = Tombstone::::new(0, 0)?; - let delta = create_storage::(d * API_BENCHMARK_BATCH_SIZE, T::MaxValueSize::get())?; - - let dest = tombstone.contract.account_id.encode(); - let dest_len = dest.len(); - let code_hash = tombstone.contract.code_hash.encode(); - let code_hash_len = code_hash.len(); - let rent_allowance = BalanceOf::::max_value().encode(); - let rent_allowance_len = rent_allowance.len(); - let delta_keys = delta.iter().flat_map(|(key, _)| key).cloned().collect::>(); - - let dest_offset = 0; - let code_hash_offset = dest_offset + dest_len; - let rent_allowance_offset = code_hash_offset + code_hash_len; - let delta_keys_offset = rent_allowance_offset + rent_allowance_len; - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - name: "seal_restore_to", - params: vec![ - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: None, - }], - data_segments: vec![ - DataSegment { - offset: dest_offset as u32, - value: dest, - }, - DataSegment { - offset: code_hash_offset as u32, - value: code_hash, - }, - DataSegment { - offset: rent_allowance_offset as u32, - value: rent_allowance, - }, - DataSegment { - offset: delta_keys_offset as u32, - value: delta_keys, - }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(dest_offset as i32), - Instruction::I32Const(dest_len as i32), - Instruction::I32Const(code_hash_offset as i32), - Instruction::I32Const(code_hash_len as i32), - Instruction::I32Const(rent_allowance_offset as i32), - Instruction::I32Const(rent_allowance_len as i32), - Instruction::I32Const(delta_keys_offset as i32), // delta_ptr - Instruction::I32Const(delta.len() as i32), // delta_count - Instruction::Call(0), - Instruction::End, - ])), - .. Default::default() - }); - - let instance = Contract::::with_caller( - account("origin", 0, 0), code, vec![], Endow::Max - )?; - instance.store(&tombstone.storage)?; - instance.store(&delta)?; - System::::set_block_number(System::::block_number() + 1.into()); - - let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) - verify { - tombstone.contract.alive_info()?; - } - // We benchmark only for the maximum subject length. We assume that this is some lowish // number (< 1 KB). Therefore we are not overcharging too much in case a smaller subject is // used. seal_random { let r in 0 .. API_BENCHMARK_BATCHES; let pages = code::max_pages::(); - let subject_len = Contracts::::current_schedule().max_subject_len; + let subject_len = T::Schedule::get().limits.subject_len; assert!(subject_len < 1024); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_random", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, @@ -806,9 +619,9 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // Overhead of calling the function without any topic. // We benchmark for the worst case (largest event). @@ -817,6 +630,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_deposit_event", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, @@ -830,25 +644,25 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // Benchmark the overhead that topics generate. // `t`: Number of topics // `n`: Size of event payload in kb seal_deposit_event_per_topic_and_kb { - let t in 0 .. Contracts::::current_schedule().max_event_topics; - let n in 0 .. T::MaxValueSize::get() / 1024; + let t in 0 .. T::Schedule::get().limits.event_topics; + let n in 0 .. T::Schedule::get().limits.payload_len / 1024; let mut topics = (0..API_BENCHMARK_BATCH_SIZE) .map(|n| (n * t..n * t + t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode()) .peekable(); let topics_len = topics.peek().map(|i| i.len()).unwrap_or(0); let topics = topics.flatten().collect(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_deposit_event", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, @@ -859,7 +673,7 @@ benchmarks! { value: topics, }, ], - call_body: Some(body::counted(API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ Counter(0, topics_len as u32), // topics_ptr Regular(Instruction::I32Const(topics_len as i32)), // topics_len Regular(Instruction::I32Const(0)), // data_ptr @@ -868,52 +682,51 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) - seal_set_rent_allowance { + // The size of the supplied message does not influence the weight because as it is never + // processed during on-chain execution: It is only ever read during debugging which happens + // when the contract is called as RPC where weights do not matter. + seal_debug_message { let r in 0 .. API_BENCHMARK_BATCHES; - let allowance = caller_funding::().encode(); - let allowance_len = allowance.len(); + let max_bytes = code::max_pages::() * 64 * 1024; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory { min_pages: 1, max_pages: 1 }), imported_functions: vec![ImportedFunction { - name: "seal_set_rent_allowance", + module: "seal0", + name: "seal_debug_message", params: vec![ValueType::I32, ValueType::I32], - return_type: None, + return_type: Some(ValueType::I32), }], - data_segments: vec![ - DataSegment { - offset: 0, - value: allowance, - }, - ], call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ Instruction::I32Const(0), // value_ptr - Instruction::I32Const(allowance_len as i32), // value_len + Instruction::I32Const(max_bytes as i32), // value_len Instruction::Call(0), + Instruction::Drop, ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // Only the overhead of calling the function itself with minimal arguments. // The contract is a bit more complex because I needs to use different keys in order // to generate unique storage accesses. However, it is still dominated by the storage // accesses. + #[skip_meta] seal_set_storage { let r in 0 .. API_BENCHMARK_BATCHES; let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) .flat_map(|n| T::Hashing::hash_of(&n).as_ref().to_vec()) .collect::>(); let key_len = sp_std::mem::size_of::<::Output>(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, @@ -924,7 +737,7 @@ benchmarks! { value: keys, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(0, key_len as u32), // key_ptr Regular(Instruction::I32Const(0)), // value_ptr Regular(Instruction::I32Const(0)), // value_len @@ -932,17 +745,18 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_set_storage_per_kb { - let n in 0 .. T::MaxValueSize::get() / 1024; + let n in 0 .. T::Schedule::get().limits.payload_len / 1024; let key = T::Hashing::hash_of(&1u32).as_ref().to_vec(); let key_len = key.len(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, @@ -961,13 +775,14 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // Similar to seal_set_storage. However, we store all the keys that we are about to // delete beforehand in order to prevent any optimizations that could occur when // deleting a non existing key. + #[skip_meta] seal_clear_storage { let r in 0 .. API_BENCHMARK_BATCHES; let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) @@ -975,10 +790,10 @@ benchmarks! { .collect::>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_len = sp_std::mem::size_of::<::Output>(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_clear_storage", params: vec![ValueType::I32], return_type: None, @@ -989,27 +804,28 @@ benchmarks! { value: key_bytes, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(0, key_len as u32), Regular(Instruction::Call(0)), ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; - let trie_id = instance.alive_info()?.trie_id; + let instance = Contract::::new(code, vec![])?; + let mut info = instance.info()?; for key in keys { - crate::storage::write_contract_storage::( - &instance.account_id, - &trie_id, + Storage::::write( + &mut info, key.as_slice().try_into().map_err(|e| "Key has wrong length")?, - Some(vec![42; T::MaxValueSize::get() as usize]) + Some(vec![42; T::Schedule::get().limits.payload_len as usize]) ) .map_err(|_| "Failed to write to storage during setup.")?; } + >::insert(&instance.account_id, info.clone()); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // We make sure that all storage accesses are to unique keys. + #[skip_meta] seal_get_storage { let r in 0 .. API_BENCHMARK_BATCHES; let keys = (0 .. r * API_BENCHMARK_BATCH_SIZE) @@ -1018,10 +834,10 @@ benchmarks! { let key_len = sp_std::mem::size_of::<::Output>(); let key_bytes = keys.iter().flatten().cloned().collect::>(); let key_bytes_len = key_bytes.len(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_get_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1032,7 +848,7 @@ benchmarks! { value: key_bytes, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(0, key_len as u32), // key_ptr Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr @@ -1041,27 +857,28 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; - let trie_id = instance.alive_info()?.trie_id; + let instance = Contract::::new(code, vec![])?; + let mut info = instance.info()?; for key in keys { - crate::storage::write_contract_storage::( - &instance.account_id, - &trie_id, + Storage::::write( + &mut info, key.as_slice().try_into().map_err(|e| "Key has wrong length")?, Some(vec![]) ) .map_err(|_| "Failed to write to storage during setup.")?; } + >::insert(&instance.account_id, info.clone()); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_get_storage_per_kb { - let n in 0 .. T::MaxValueSize::get() / 1024; + let n in 0 .. T::Schedule::get().limits.payload_len / 1024; let key = T::Hashing::hash_of(&1u32).as_ref().to_vec(); let key_len = key.len(); let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_get_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1073,7 +890,7 @@ benchmarks! { }, DataSegment { offset: key_len as u32, - value: T::MaxValueSize::get().to_le_bytes().into(), + value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), }, ], call_body: Some(body::repeated(API_BENCHMARK_BATCH_SIZE, &[ @@ -1086,17 +903,17 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; - let trie_id = instance.alive_info()?.trie_id; - crate::storage::write_contract_storage::( - &instance.account_id, - &trie_id, + let instance = Contract::::new(code, vec![])?; + let mut info = instance.info()?; + Storage::::write( + &mut info, key.as_slice().try_into().map_err(|e| "Key has wrong length")?, Some(vec![42u8; (n * 1024) as usize]) ) .map_err(|_| "Failed to write to storage during setup.")?; + >::insert(&instance.account_id, info.clone()); let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // We transfer to unique accounts. seal_transfer { @@ -1106,14 +923,14 @@ benchmarks! { .collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let account_bytes = accounts.iter().flat_map(|x| x.encode()).collect(); - let value = Config::::subsistence_threshold_uncached(); - assert!(value > 0.into()); + let value = Contracts::::subsistence_threshold(); + assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_transfer", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1128,7 +945,7 @@ benchmarks! { value: account_bytes, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(value_len as u32, account_len as u32), // account_ptr Regular(Instruction::I32Const(account_len as i32)), // account_len Regular(Instruction::I32Const(0)), // value_ptr @@ -1138,12 +955,12 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); for account in &accounts { - assert_eq!(T::Currency::total_balance(account), 0.into()); + assert_eq!(T::Currency::total_balance(account), 0u32.into()); } - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) verify { for account in &accounts { assert_eq!(T::Currency::total_balance(account), value); @@ -1153,19 +970,19 @@ benchmarks! { // We call unique accounts. seal_call { let r in 0 .. API_BENCHMARK_BATCHES; - let dummy_code = WasmModule::::dummy(); + let dummy_code = WasmModule::::dummy_with_bytes(0); let callees = (0..r * API_BENCHMARK_BATCH_SIZE) - .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![], Endow::Max)) + .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![])) .collect::, _>>()?; let callee_len = callees.get(0).map(|i| i.account_id.encode().len()).unwrap_or(0); let callee_bytes = callees.iter().flat_map(|x| x.account_id.encode()).collect(); - let value: BalanceOf = 0.into(); + let value: BalanceOf = 0u32.into(); let value_bytes = value.encode(); let value_len = value_bytes.len(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_call", params: vec![ ValueType::I32, @@ -1190,7 +1007,7 @@ benchmarks! { value: callee_bytes, }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(value_len as u32, callee_len as u32), // callee_ptr Regular(Instruction::I32Const(callee_len as i32)), // callee_len Regular(Instruction::I64Const(0)), // gas @@ -1205,9 +1022,9 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) seal_call_per_transfer_input_output_kb { let t in 0 .. 1; @@ -1216,6 +1033,7 @@ benchmarks! { let callee_code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_return", params: vec![ ValueType::I32, @@ -1234,7 +1052,7 @@ benchmarks! { .. Default::default() }); let callees = (0..API_BENCHMARK_BATCH_SIZE) - .map(|i| Contract::with_index(i + 1, callee_code.clone(), vec![], Endow::Max)) + .map(|i| Contract::with_index(i + 1, callee_code.clone(), vec![])) .collect::, _>>()?; let callee_len = callees.get(0).map(|i| i.account_id.encode().len()).unwrap_or(0); let callee_bytes = callees.iter().flat_map(|x| x.account_id.encode()).collect::>(); @@ -1242,10 +1060,10 @@ benchmarks! { let value: BalanceOf = t.into(); let value_bytes = value.encode(); let value_len = value_bytes.len(); - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_call", params: vec![ ValueType::I32, @@ -1274,7 +1092,7 @@ benchmarks! { value: (o * 1024).to_le_bytes().into(), }, ], - call_body: Some(body::counted(API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ Counter(value_len as u32, callee_len as u32), // callee_ptr Regular(Instruction::I32Const(callee_len as i32)), // callee_len Regular(Instruction::I64Const(0)), // gas @@ -1289,9 +1107,9 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // We assume that every instantiate sends at least the subsistence amount. seal_instantiate { @@ -1299,22 +1117,25 @@ benchmarks! { let hashes = (0..r * API_BENCHMARK_BATCH_SIZE) .map(|i| { let code = WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), call_body: Some(body::plain(vec![ + // we need to add this in order to make contracts unique + // so that they can be deployed from the same sender Instruction::I32Const(i as i32), Instruction::Drop, Instruction::End, ])), .. Default::default() }); - Contracts::::put_code_raw(code.code)?; + Contracts::::store_code_raw(code.code)?; Ok(code.hash) }) .collect::, &'static str>>()?; let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); let hashes_len = hashes_bytes.len(); - let value = Config::::subsistence_threshold_uncached(); - assert!(value > 0.into()); + let value = contract_funding::() / (r * API_BENCHMARK_BATCH_SIZE + 2).into(); + assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); let addr_len = sp_std::mem::size_of::(); @@ -1325,10 +1146,10 @@ benchmarks! { let addr_len_offset = hashes_offset + hashes_len; let addr_offset = addr_len_offset + addr_len; - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_instantiate", params: vec![ ValueType::I32, @@ -1341,7 +1162,9 @@ benchmarks! { ValueType::I32, ValueType::I32, ValueType::I32, - ValueType::I32 + ValueType::I32, + ValueType::I32, + ValueType::I32, ], return_type: Some(ValueType::I32), }], @@ -1359,7 +1182,7 @@ benchmarks! { value: addr_len.to_le_bytes().into(), }, ], - call_body: Some(body::counted(r * API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr Regular(Instruction::I32Const(hash_len as i32)), // code_hash_len Regular(Instruction::I64Const(0)), // gas @@ -1371,39 +1194,44 @@ benchmarks! { Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr Regular(Instruction::I32Const(u32::max_value() as i32)), // output_ptr Regular(Instruction::I32Const(0)), // output_len_ptr + Regular(Instruction::I32Const(0)), // salt_ptr + Regular(Instruction::I32Const(0)), // salt_ptr_len Regular(Instruction::Call(0)), Regular(Instruction::Drop), ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr.clone(); let addresses = hashes .iter() - .map(|hash| T::DetermineContractAddress::contract_address_for( - hash, &[], &instance.account_id + .map(|hash| Contracts::::contract_address( + &instance.account_id, hash, &[], )) .collect::>(); for addr in &addresses { if let Some(_) = ContractInfoOf::::get(&addr) { - return Err("Expected that contract does not exist at this point."); + return Err("Expected that contract does not exist at this point.".into()); } } - }: call(origin, callee, 0.into(), Weight::max_value(), vec![]) + }: call(origin, callee, 0u32.into(), Weight::max_value(), vec![]) verify { for addr in &addresses { - instance.alive_info()?; + ContractInfoOf::::get(&addr) + .ok_or_else(|| "Contract should have been instantiated")?; } } - seal_instantiate_per_input_output_kb { + seal_instantiate_per_input_output_salt_kb { let i in 0 .. (code::max_pages::() - 1) * 64; let o in 0 .. (code::max_pages::() - 1) * 64; + let s in 0 .. (code::max_pages::() - 1) * 64; let callee_code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_return", params: vec![ ValueType::I32, @@ -1424,13 +1252,13 @@ benchmarks! { let hash = callee_code.hash.clone(); let hash_bytes = callee_code.hash.encode(); let hash_len = hash_bytes.len(); - Contracts::::put_code_raw(callee_code.code)?; + Contracts::::store_code_raw(callee_code.code)?; let inputs = (0..API_BENCHMARK_BATCH_SIZE).map(|x| x.encode()).collect::>(); let input_len = inputs.get(0).map(|x| x.len()).unwrap_or(0); let input_bytes = inputs.iter().cloned().flatten().collect::>(); let inputs_len = input_bytes.len(); - let value = Config::::subsistence_threshold_uncached(); - assert!(value > 0.into()); + let value = contract_funding::() / (API_BENCHMARK_BATCH_SIZE + 2).into(); + assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); let addr_len = sp_std::mem::size_of::(); @@ -1443,10 +1271,10 @@ benchmarks! { let output_len_offset = addr_len_offset + 4; let output_offset = output_len_offset + 4; - use body::CountedInstruction::{Counter, Regular}; let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { + module: "seal0", name: "seal_instantiate", params: vec![ ValueType::I32, @@ -1459,7 +1287,9 @@ benchmarks! { ValueType::I32, ValueType::I32, ValueType::I32, - ValueType::I32 + ValueType::I32, + ValueType::I32, + ValueType::I32, ], return_type: Some(ValueType::I32), }], @@ -1485,7 +1315,7 @@ benchmarks! { value: (o * 1024).to_le_bytes().into(), }, ], - call_body: Some(body::counted(API_BENCHMARK_BATCH_SIZE, vec![ + call_body: Some(body::repeated_dyn(API_BENCHMARK_BATCH_SIZE, vec![ Regular(Instruction::I32Const(hash_offset as i32)), // code_hash_ptr Regular(Instruction::I32Const(hash_len as i32)), // code_hash_len Regular(Instruction::I64Const(0)), // gas @@ -1497,6 +1327,8 @@ benchmarks! { Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr Regular(Instruction::I32Const(output_offset as i32)), // output_ptr Regular(Instruction::I32Const(output_len_offset as i32)), // output_len_ptr + Counter(input_offset as u32, input_len as u32), // salt_ptr + Regular(Instruction::I32Const((s * 1024).max(input_len as u32) as i32)), // salt_len Regular(Instruction::Call(0)), Regular(Instruction::I32Eqz), Regular(Instruction::If(BlockType::NoResult)), @@ -1507,145 +1339,996 @@ benchmarks! { ])), .. Default::default() }); - let instance = Contract::::new(code, vec![], Endow::Max)?; + let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // Only the overhead of calling the function itself with minimal arguments. seal_hash_sha2_256 { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::hasher( "seal_hash_sha2_256", r * API_BENCHMARK_BATCH_SIZE, 0, - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // `n`: Input to hash in kilobytes seal_hash_sha2_256_per_kb { let n in 0 .. code::max_pages::() * 64; let instance = Contract::::new(WasmModule::hasher( "seal_hash_sha2_256", API_BENCHMARK_BATCH_SIZE, n * 1024, - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // Only the overhead of calling the function itself with minimal arguments. seal_hash_keccak_256 { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::hasher( "seal_hash_keccak_256", r * API_BENCHMARK_BATCH_SIZE, 0, - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // `n`: Input to hash in kilobytes seal_hash_keccak_256_per_kb { let n in 0 .. code::max_pages::() * 64; let instance = Contract::::new(WasmModule::hasher( "seal_hash_keccak_256", API_BENCHMARK_BATCH_SIZE, n * 1024, - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // Only the overhead of calling the function itself with minimal arguments. seal_hash_blake2_256 { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::hasher( "seal_hash_blake2_256", r * API_BENCHMARK_BATCH_SIZE, 0, - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // `n`: Input to hash in kilobytes seal_hash_blake2_256_per_kb { let n in 0 .. code::max_pages::() * 64; let instance = Contract::::new(WasmModule::hasher( "seal_hash_blake2_256", API_BENCHMARK_BATCH_SIZE, n * 1024, - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // Only the overhead of calling the function itself with minimal arguments. seal_hash_blake2_128 { let r in 0 .. API_BENCHMARK_BATCHES; let instance = Contract::::new(WasmModule::hasher( "seal_hash_blake2_128", r * API_BENCHMARK_BATCH_SIZE, 0, - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) // `n`: Input to hash in kilobytes seal_hash_blake2_128_per_kb { let n in 0 .. code::max_pages::() * 64; let instance = Contract::::new(WasmModule::hasher( "seal_hash_blake2_128", API_BENCHMARK_BATCH_SIZE, n * 1024, - ), vec![], Endow::Max)?; + ), vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); - }: call(origin, instance.addr, 0.into(), Weight::max_value(), vec![]) -} + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{ExtBuilder, Test}; - use frame_support::assert_ok; - use paste::paste; - - macro_rules! create_test { - ($name:ident) => { - #[test] - fn $name() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(paste!{ - []::() - }); - }); - } + // Only calling the function itself with valid arguments. + // It generates different private keys and signatures for the message "Hello world". + seal_ecdsa_recover { + let r in 0 .. API_BENCHMARK_BATCHES; + use rand::SeedableRng; + let mut rng = rand_pcg::Pcg32::seed_from_u64(123456); + + let message_hash = sp_io::hashing::blake2_256("Hello world".as_bytes()); + let signatures = (0..r * API_BENCHMARK_BATCH_SIZE) + .map(|i| { + use secp256k1::{SecretKey, Message, sign}; + + let private_key = SecretKey::random(&mut rng); + let (signature, recovery_id) = sign(&Message::parse(&message_hash), &private_key); + let mut full_signature = [0; 65]; + full_signature[..64].copy_from_slice(&signature.serialize()); + full_signature[64] = recovery_id.serialize(); + full_signature + }) + .collect::>(); + let signatures = signatures.iter().flatten().cloned().collect::>(); + let signatures_bytes_len = signatures.len() as i32; + + let code = WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + imported_functions: vec![ImportedFunction { + module: "__unstable__", + name: "seal_ecdsa_recover", + params: vec![ValueType::I32, ValueType::I32, ValueType::I32], + return_type: Some(ValueType::I32), + }], + data_segments: vec![ + DataSegment { + offset: 0, + value: message_hash[..].to_vec(), + }, + DataSegment { + offset: 32, + value: signatures, + }, + ], + call_body: Some(body::repeated_dyn(r * API_BENCHMARK_BATCH_SIZE, vec![ + Counter(32, 65), // signature_ptr + Regular(Instruction::I32Const(0)), // message_hash_ptr + Regular(Instruction::I32Const(signatures_bytes_len + 32)), // output_len_ptr + Regular(Instruction::Call(0)), + Regular(Instruction::Drop), + ])), + .. Default::default() + }); + let instance = Contract::::new(code, vec![])?; + let origin = RawOrigin::Signed(instance.caller.clone()); + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) + + // We make the assumption that pushing a constant and dropping a value takes roughly + // the same amount of time. We follow that `t.load` and `drop` both have the weight + // of this benchmark / 2. We need to make this assumption because there is no way + // to measure them on their own using a valid wasm module. We need their individual + // values to derive the weight of individual instructions (by substraction) from + // benchmarks that include those for parameter pushing and return type dropping. + // We call the weight of `t.load` and `drop`: `w_param`. + // The weight that would result from the respective benchmark we call: `w_bench`. + // + // w_i{32,64}const = w_drop = w_bench / 2 + instr_i64const { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_i{32,64}load = w_bench - 2 * w_param + instr_i64load { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomUnaligned(0, code::max_pages::() * 64 * 1024 - 8), + Regular(Instruction::I64Load(3, 0)), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_i{32,64}store{...} = w_bench - 2 * w_param + instr_i64store { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomUnaligned(0, code::max_pages::() * 64 * 1024 - 8), + RandomI64Repeated(1), + Regular(Instruction::I64Store(3, 0)), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_select = w_bench - 4 * w_param + instr_select { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + RandomI64Repeated(1), + RandomI32(0, 2), + Regular(Instruction::Select), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_if = w_bench - 3 * w_param + instr_if { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI32(0, 2), + Regular(Instruction::If(BlockType::Value(ValueType::I64))), + RandomI64Repeated(1), + Regular(Instruction::Else), + RandomI64Repeated(1), + Regular(Instruction::End), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_br = w_bench - 2 * w_param + // Block instructions are not counted. + instr_br { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Br(1)), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_br_if = w_bench - 3 * w_param + // Block instructions are not counted. + instr_br_if { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::I32Const(1)), + Regular(Instruction::BrIf(1)), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_br_table = w_bench - 3 * w_param + // Block instructions are not counted. + instr_br_table { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let table = Box::new(BrTableData { + table: Box::new([1, 1, 1]), + default: 1, + }); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + RandomI32(0, 4), + Regular(Instruction::BrTable(table)), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_br_table_per_entry = w_bench + instr_br_table_per_entry { + let e in 1 .. T::Schedule::get().limits.br_table_size; + let entry: Vec = [0, 1].iter() + .cloned() + .cycle() + .take((e / 2) as usize).collect(); + let table = Box::new(BrTableData { + table: entry.into_boxed_slice(), + default: 0, + }); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(INSTR_BENCHMARK_BATCH_SIZE, vec![ + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + Regular(Instruction::Block(BlockType::NoResult)), + RandomI32(0, (e + 1) as i32), // Make sure the default entry is also used + Regular(Instruction::BrTable(table)), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + RandomI64Repeated(1), + Regular(Instruction::Drop), + Regular(Instruction::End), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_call = w_bench - 2 * w_param + instr_call { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + // We need to make use of the stack here in order to trigger stack height + // instrumentation. + aux_body: Some(body::plain(vec![ + Instruction::I64Const(42), + Instruction::Drop, + Instruction::End, + ])), + call_body: Some(body::repeated(r * INSTR_BENCHMARK_BATCH_SIZE, &[ + Instruction::Call(2), // call aux + ])), + inject_stack_metering: true, + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_call_indrect = w_bench - 3 * w_param + instr_call_indirect { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let num_elements = T::Schedule::get().limits.table_size; + use self::code::TableSegment; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + // We need to make use of the stack here in order to trigger stack height + // instrumentation. + aux_body: Some(body::plain(vec![ + Instruction::I64Const(42), + Instruction::Drop, + Instruction::End, + ])), + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI32(0, num_elements as i32), + Regular(Instruction::CallIndirect(0, 0)), // we only have one sig: 0 + ])), + inject_stack_metering: true, + table: Some(TableSegment { + num_elements, + function_index: 2, // aux + }), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_instr_call_indirect_per_param = w_bench - 1 * w_param + // Calling a function indirectly causes it to go through a thunk function whose runtime + // linearly depend on the amount of parameters to this function. + // Please note that this is not necessary with a direct call. + instr_call_indirect_per_param { + let p in 0 .. T::Schedule::get().limits.parameters; + let num_elements = T::Schedule::get().limits.table_size; + use self::code::TableSegment; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + // We need to make use of the stack here in order to trigger stack height + // instrumentation. + aux_body: Some(body::plain(vec![ + Instruction::I64Const(42), + Instruction::Drop, + Instruction::End, + ])), + aux_arg_num: p, + call_body: Some(body::repeated_dyn(INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(p as usize), + RandomI32(0, num_elements as i32), + Regular(Instruction::CallIndirect(p.min(1), 0)), // aux signature: 1 or 0 + ])), + inject_stack_metering: true, + table: Some(TableSegment { + num_elements, + function_index: 2, // aux + }), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_local_get = w_bench - 1 * w_param + instr_local_get { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_locals = T::Schedule::get().limits.stack_height; + let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomGetLocal(0, max_locals), + Regular(Instruction::Drop), + ]); + body::inject_locals(&mut call_body, max_locals); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(call_body), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_local_set = w_bench - 1 * w_param + instr_local_set { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_locals = T::Schedule::get().limits.stack_height; + let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + RandomSetLocal(0, max_locals), + ]); + body::inject_locals(&mut call_body, max_locals); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(call_body), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_local_tee = w_bench - 2 * w_param + instr_local_tee { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_locals = T::Schedule::get().limits.stack_height; + let mut call_body = body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + RandomTeeLocal(0, max_locals), + Regular(Instruction::Drop), + ]); + body::inject_locals(&mut call_body, max_locals); + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(call_body), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_global_get = w_bench - 1 * w_param + instr_global_get { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_globals = T::Schedule::get().limits.globals; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomGetGlobal(0, max_globals), + Regular(Instruction::Drop), + ])), + num_globals: max_globals, + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_global_set = w_bench - 1 * w_param + instr_global_set { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let max_globals = T::Schedule::get().limits.globals; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI64Repeated(1), + RandomSetGlobal(0, max_globals), + ])), + num_globals: max_globals, + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_memory_get = w_bench - 1 * w_param + instr_memory_current { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + call_body: Some(body::repeated(r * INSTR_BENCHMARK_BATCH_SIZE, &[ + Instruction::CurrentMemory(0), + Instruction::Drop + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // w_memory_grow = w_bench - 2 * w_param + // We can only allow allocate as much memory as it is allowed in a a contract. + // Therefore the repeat count is limited by the maximum memory any contract can have. + // Using a contract with more memory will skew the benchmark because the runtime of grow + // depends on how much memory is already allocated. + instr_memory_grow { + let r in 0 .. 1; + let max_pages = ImportedMemory::max::().max_pages; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory { + min_pages: 0, + max_pages, + }), + call_body: Some(body::repeated(r * max_pages, &[ + Instruction::I32Const(1), + Instruction::GrowMemory(0), + Instruction::Drop, + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + // Unary numeric instructions. + // All use w = w_bench - 2 * w_param. + + instr_i64clz { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I64Clz, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64ctz { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I64Ctz, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64popcnt { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I64Popcnt, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64eqz { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I64Eqz, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64extendsi32 { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI32Repeated(1), + Regular(Instruction::I64ExtendSI32), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + instr_i64extendui32 { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::from(ModuleDefinition { + call_body: Some(body::repeated_dyn(r * INSTR_BENCHMARK_BATCH_SIZE, vec![ + RandomI32Repeated(1), + Regular(Instruction::I64ExtendUI32), + Regular(Instruction::Drop), + ])), + .. Default::default() + })); + }: { + sbox.invoke(); + } + + instr_i32wrapi64 { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::unary_instr( + Instruction::I32WrapI64, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + // Binary numeric instructions. + // All use w = w_bench - 3 * w_param. + + instr_i64eq { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Eq, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64ne { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Ne, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64lts { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64LtS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64ltu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64LtU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64gts { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64GtS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64gtu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64GtU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64les { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64LeS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64leu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64LeU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64ges { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64GeS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64geu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64GeU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64add { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Add, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64sub { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Sub, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64mul { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Mul, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64divs { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64DivS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64divu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64DivU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64rems { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64RemS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64remu { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64RemU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64and { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64And, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64or { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Or, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64xor { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Xor, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64shl { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Shl, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64shrs { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64ShrS, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64shru { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64ShrU, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64rotl { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Rotl, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + instr_i64rotr { + let r in 0 .. INSTR_BENCHMARK_BATCHES; + let mut sbox = Sandbox::from(&WasmModule::::binary_instr( + Instruction::I64Rotr, + r * INSTR_BENCHMARK_BATCH_SIZE, + )); + }: { + sbox.invoke(); + } + + // This is no benchmark. It merely exist to have an easy way to pretty print the curently + // configured `Schedule` during benchmark development. + // It can be outputed using the following command: + // cargo run --manifest-path=bin/node/cli/Cargo.toml --release \ + // --features runtime-benchmarks -- benchmark --dev --execution=native \ + // -p pallet_contracts -e print_schedule --no-median-slopes --no-min-squares + #[extra] + print_schedule { + #[cfg(feature = "std")] + { + let weight_per_key = T::WeightInfo::on_initialize_per_trie_key(1) - + T::WeightInfo::on_initialize_per_trie_key(0); + let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - + T::WeightInfo::on_initialize_per_queue_item(0); + let weight_limit = T::DeletionWeightLimit::get(); + let queue_depth: u64 = T::DeletionQueueDepth::get().into(); + println!("{:#?}", Schedule::::default()); + println!("###############################################"); + println!("Lazy deletion throughput per block (empty queue, full queue): {}, {}", + weight_limit / weight_per_key, + (weight_limit - weight_per_queue_item * queue_depth) / weight_per_key, + ); } + #[cfg(not(feature = "std"))] + return Err("Run this bench with a native runtime in order to see the schedule.".into()); + }: {} + + // Execute one erc20 transfer using the ink! erc20 example contract. + // + // `g` is used to enable gas instrumentation to compare the performance impact of + // that instrumentation at runtime. + #[extra] + ink_erc20_transfer { + let g in 0 .. 1; + let gas_metering = if g == 0 { false } else { true }; + let code = load_benchmark!("ink_erc20"); + let data = { + let new: ([u8; 4], BalanceOf) = ([0x9b, 0xae, 0x9d, 0x5e], 1000u32.into()); + new.encode() + }; + let instance = Contract::::new( + WasmModule::instrumented(code, gas_metering, true), data, + )?; + let data = { + let transfer: ([u8; 4], AccountIdOf, BalanceOf) = ( + [0x84, 0xa1, 0x5d, 0xa1], + account::("receiver", 0, 0), + 1u32.into(), + ); + transfer.encode() + }; + }: { + >::bare_call( + instance.caller, + instance.account_id, + 0u32.into(), + Weight::MAX, + data, + false, + ) + .result?; } - create_test!(update_schedule); - create_test!(put_code); - create_test!(instantiate); - create_test!(call); - create_test!(claim_surcharge); - create_test!(seal_caller); - create_test!(seal_address); - create_test!(seal_gas_left); - create_test!(seal_balance); - create_test!(seal_value_transferred); - create_test!(seal_minimum_balance); - create_test!(seal_tombstone_deposit); - create_test!(seal_rent_allowance); - create_test!(seal_block_number); - create_test!(seal_now); - create_test!(seal_weight_to_fee); - create_test!(seal_gas); - create_test!(seal_input); - create_test!(seal_input_per_kb); - create_test!(seal_return); - create_test!(seal_return_per_kb); - create_test!(seal_terminate); - create_test!(seal_restore_to); - create_test!(seal_restore_to_per_delta); - create_test!(seal_random); - create_test!(seal_deposit_event); - create_test!(seal_deposit_event_per_topic_and_kb); - create_test!(seal_set_rent_allowance); - create_test!(seal_set_storage); - create_test!(seal_set_storage_per_kb); - create_test!(seal_get_storage); - create_test!(seal_get_storage_per_kb); - create_test!(seal_transfer); - create_test!(seal_call); - create_test!(seal_call_per_transfer_input_output_kb); - create_test!(seal_clear_storage); - create_test!(seal_hash_sha2_256); - create_test!(seal_hash_sha2_256_per_kb); - create_test!(seal_hash_keccak_256); - create_test!(seal_hash_keccak_256_per_kb); - create_test!(seal_hash_blake2_256); - create_test!(seal_hash_blake2_256_per_kb); - create_test!(seal_hash_blake2_128); - create_test!(seal_hash_blake2_128_per_kb); + // Execute one erc20 transfer using the open zeppelin erc20 contract compiled with solang. + // + // `g` is used to enable gas instrumentation to compare the performance impact of + // that instrumentation at runtime. + #[extra] + solang_erc20_transfer { + let g in 0 .. 1; + let gas_metering = if g == 0 { false } else { true }; + let code = include_bytes!("../../benchmarks/solang_erc20.wasm"); + let caller = account::("instantiator", 0, 0); + let mut balance = [0u8; 32]; + balance[0] = 100; + let data = { + let new: ([u8; 4], &str, &str, [u8; 32], AccountIdOf) = ( + [0xa6, 0xf1, 0xf5, 0xe1], + "KSM", + "K", + balance, + caller.clone(), + ); + new.encode() + }; + let instance = Contract::::with_caller( + caller, WasmModule::instrumented(code, gas_metering, true), data, + )?; + balance[0] = 1; + let data = { + let transfer: ([u8; 4], AccountIdOf, [u8; 32]) = ( + [0x6a, 0x46, 0x73, 0x94], + account::("receiver", 0, 0), + balance, + ); + transfer.encode() + }; + }: { + >::bare_call( + instance.caller, + instance.account_id, + 0u32.into(), + Weight::MAX, + data, + false, + ) + .result?; + } } + +impl_benchmark_test_suite!( + Contracts, + crate::tests::ExtBuilder::default().build(), + crate::tests::Test, +); diff --git a/frame/contracts/src/benchmarking/sandbox.rs b/frame/contracts/src/benchmarking/sandbox.rs new file mode 100644 index 0000000000000..320ac90cce64e --- /dev/null +++ b/frame/contracts/src/benchmarking/sandbox.rs @@ -0,0 +1,52 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// ! For instruction benchmarking we do no instantiate a full contract but merely the +/// ! sandbox to execute the wasm code. This is because we do not need the full +/// ! environment that provides the seal interface as imported functions. +use super::{code::WasmModule, Config}; +use sp_core::crypto::UncheckedFrom; +use sp_sandbox::{EnvironmentDefinitionBuilder, Instance, Memory}; + +/// Minimal execution environment without any exported functions. +pub struct Sandbox { + instance: Instance<()>, + _memory: Option, +} + +impl Sandbox { + /// Invoke the `call` function of a contract code and panic on any execution error. + pub fn invoke(&mut self) { + self.instance.invoke("call", &[], &mut ()).unwrap(); + } +} + +impl From<&WasmModule> for Sandbox +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + /// Creates an instance from the supplied module and supplies as much memory + /// to the instance as the module declares as imported. + fn from(module: &WasmModule) -> Self { + let mut env_builder = EnvironmentDefinitionBuilder::new(); + let memory = module.add_memory(&mut env_builder); + let instance = Instance::new(&module.code, &env_builder, &mut ()) + .expect("Failed to create benchmarking Sandbox instance"); + Self { instance, _memory: memory } + } +} diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs new file mode 100644 index 0000000000000..14080102933cb --- /dev/null +++ b/frame/contracts/src/chain_extension.rs @@ -0,0 +1,398 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A mechanism for runtime authors to augment the functionality of contracts. +//! +//! The runtime is able to call into any contract and retrieve the result using +//! [`bare_call`](crate::Pallet::bare_call). This already allows customization of runtime +//! behaviour by user generated code (contracts). However, often it is more straightforward +//! to allow the reverse behaviour: The contract calls into the runtime. We call the latter +//! one a "chain extension" because it allows the chain to extend the set of functions that are +//! callable by a contract. +//! +//! In order to create a chain extension the runtime author implements the [`ChainExtension`] +//! trait and declares it in this pallet's [configuration Trait](crate::Config). All types +//! required for this endeavour are defined or re-exported in this module. There is an +//! implementation on `()` which can be used to signal that no chain extension is available. +//! +//! # Security +//! +//! The chain author alone is responsible for the security of the chain extension. +//! This includes avoiding the exposure of exploitable functions and charging the +//! appropriate amount of weight. In order to do so benchmarks must be written and the +//! [`charge_weight`](Environment::charge_weight) function must be called **before** +//! carrying out any action that causes the consumption of the chargeable weight. +//! It cannot be overstated how delicate of a process the creation of a chain extension +//! is. Check whether using [`bare_call`](crate::Pallet::bare_call) suffices for the +//! use case at hand. +//! +//! # Benchmarking +//! +//! The builtin contract callable functions that pallet-contracts provides all have +//! benchmarks that determine the correct weight that an invocation of these functions +//! induces. In order to be able to charge the correct weight for the functions defined +//! by a chain extension benchmarks must be written, too. In the near future this crate +//! will provide the means for easier creation of those specialized benchmarks. +//! +//! # Example +//! +//! The ink! repository maintains an +//! [end-to-end example](https://github.com/paritytech/ink/tree/master/examples/rand-extension) +//! on how to use a chain extension in order to provide new features to ink! contracts. + +use crate::{ + gas::ChargedAmount, + wasm::{Runtime, RuntimeCosts}, + Error, +}; +use codec::{Decode, MaxEncodedLen}; +use frame_support::weights::Weight; +use sp_runtime::DispatchError; +use sp_std::{marker::PhantomData, vec::Vec}; + +pub use crate::{exec::Ext, Config}; +pub use frame_system::Config as SysConfig; +pub use pallet_contracts_primitives::ReturnFlags; +pub use sp_core::crypto::UncheckedFrom; +pub use state::Init as InitState; + +/// Result that returns a [`DispatchError`] on error. +pub type Result = sp_std::result::Result; + +/// A trait used to extend the set of contract callable functions. +/// +/// In order to create a custom chain extension this trait must be implemented and supplied +/// to the pallet contracts configuration trait as the associated type of the same name. +/// Consult the [module documentation](self) for a general explanation of chain extensions. +pub trait ChainExtension { + /// Call the chain extension logic. + /// + /// This is the only function that needs to be implemented in order to write a + /// chain extensions. It is called whenever a contract calls the `seal_call_chain_extension` + /// imported wasm function. + /// + /// # Parameters + /// - `func_id`: The first argument to `seal_call_chain_extension`. Usually used to determine + /// which function to realize. + /// - `env`: Access to the remaining arguments and the execution environment. + /// + /// # Return + /// + /// In case of `Err` the contract execution is immediately suspended and the passed error + /// is returned to the caller. Otherwise the value of [`RetVal`] determines the exit + /// behaviour. + fn call(func_id: u32, env: Environment) -> Result + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>; + + /// Determines whether chain extensions are enabled for this chain. + /// + /// The default implementation returns `true`. Therefore it is not necessary to overwrite + /// this function when implementing a chain extension. In case of `false` the deployment of + /// a contract that references `seal_call_chain_extension` will be denied and calling this + /// function will return [`NoChainExtension`](Error::NoChainExtension) without first calling + /// into [`call`](Self::call). + fn enabled() -> bool { + true + } +} + +/// Implementation that indicates that no chain extension is available. +impl ChainExtension for () { + fn call(_func_id: u32, mut _env: Environment) -> Result + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + // Never called since [`Self::enabled()`] is set to `false`. Because we want to + // avoid panics at all costs we supply a sensible error value here instead + // of an `unimplemented!`. + Err(Error::::NoChainExtension.into()) + } + + fn enabled() -> bool { + false + } +} + +/// Determines the exit behaviour and return value of a chain extension. +pub enum RetVal { + /// The chain extensions returns the supplied value to its calling contract. + Converging(u32), + /// The control does **not** return to the calling contract. + /// + /// Use this to stop the execution of the contract when the chain extension returns. + /// The semantic is the same as for calling `seal_return`: The control returns to + /// the caller of the currently executing contract yielding the supplied buffer and + /// flags. + Diverging { flags: ReturnFlags, data: Vec }, +} + +/// Grants the chain extension access to its parameters and execution environment. +/// +/// It uses [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html) +/// to enforce the correct usage of the parameters passed to the chain extension. +pub struct Environment<'a, 'b, E: Ext, S: state::State> { + /// The actual data of this type. + inner: Inner<'a, 'b, E>, + /// `S` is only used in the type system but never as value. + phantom: PhantomData, +} + +/// Functions that are available in every state of this type. +impl<'a, 'b, E: Ext, S: state::State> Environment<'a, 'b, E, S> +where + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, +{ + /// Charge the passed `amount` of weight from the overall limit. + /// + /// It returns `Ok` when there the remaining weight budget is larger than the passed + /// `weight`. It returns `Err` otherwise. In this case the chain extension should + /// abort the execution and pass through the error. + /// + /// The returned value can be used to with [`Self::adjust_weight`]. Other than that + /// it has no purpose. + /// + /// # Note + /// + /// Weight is synonymous with gas in substrate. + pub fn charge_weight(&mut self, amount: Weight) -> Result { + self.inner.runtime.charge_gas(RuntimeCosts::ChainExtension(amount)) + } + + /// Adjust a previously charged amount down to its actual amount. + /// + /// This is when a maximum a priori amount was charged and then should be partially + /// refunded to match the actual amount. + pub fn adjust_weight(&mut self, charged: ChargedAmount, actual_weight: Weight) { + self.inner + .runtime + .adjust_gas(charged, RuntimeCosts::ChainExtension(actual_weight)) + } + + /// Grants access to the execution environment of the current contract call. + /// + /// Consult the functions on the returned type before re-implementing those functions. + pub fn ext(&mut self) -> &mut E { + self.inner.runtime.ext() + } +} + +/// Functions that are only available in the initial state of this type. +/// +/// Those are the functions that determine how the arguments to the chain extensions +/// should be consumed. +impl<'a, 'b, E: Ext> Environment<'a, 'b, E, state::Init> { + /// Creates a new environment for consumption by a chain extension. + /// + /// It is only available to this crate because only the wasm runtime module needs to + /// ever create this type. Chain extensions merely consume it. + pub(crate) fn new( + runtime: &'a mut Runtime<'b, E>, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + output_len_ptr: u32, + ) -> Self { + Environment { + inner: Inner { runtime, input_ptr, input_len, output_ptr, output_len_ptr }, + phantom: PhantomData, + } + } + + /// Use all arguments as integer values. + pub fn only_in(self) -> Environment<'a, 'b, E, state::OnlyIn> { + Environment { inner: self.inner, phantom: PhantomData } + } + + /// Use input arguments as integer and output arguments as pointer to a buffer. + pub fn prim_in_buf_out(self) -> Environment<'a, 'b, E, state::PrimInBufOut> { + Environment { inner: self.inner, phantom: PhantomData } + } + + /// Use input and output arguments as pointers to a buffer. + pub fn buf_in_buf_out(self) -> Environment<'a, 'b, E, state::BufInBufOut> { + Environment { inner: self.inner, phantom: PhantomData } + } +} + +/// Functions to use the input arguments as integers. +impl<'a, 'b, E: Ext, S: state::PrimIn> Environment<'a, 'b, E, S> { + /// The `input_ptr` argument. + pub fn val0(&self) -> u32 { + self.inner.input_ptr + } + + /// The `input_len` argument. + pub fn val1(&self) -> u32 { + self.inner.input_len + } +} + +/// Functions to use the output arguments as integers. +impl<'a, 'b, E: Ext, S: state::PrimOut> Environment<'a, 'b, E, S> { + /// The `output_ptr` argument. + pub fn val2(&self) -> u32 { + self.inner.output_ptr + } + + /// The `output_len_ptr` argument. + pub fn val3(&self) -> u32 { + self.inner.output_len_ptr + } +} + +/// Functions to use the input arguments as pointer to a buffer. +impl<'a, 'b, E: Ext, S: state::BufIn> Environment<'a, 'b, E, S> +where + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, +{ + /// Reads `min(max_len, in_len)` from contract memory. + /// + /// This does **not** charge any weight. The caller must make sure that the an + /// appropriate amount of weight is charged **before** reading from contract memory. + /// The reason for that is that usually the costs for reading data and processing + /// said data cannot be separated in a benchmark. Therefore a chain extension would + /// charge the overall costs either using `max_len` (worst case approximation) or using + /// [`in_len()`](Self::in_len). + pub fn read(&self, max_len: u32) -> Result> { + self.inner + .runtime + .read_sandbox_memory(self.inner.input_ptr, self.inner.input_len.min(max_len)) + } + + /// Reads `min(buffer.len(), in_len) from contract memory. + /// + /// This takes a mutable pointer to a buffer fills it with data and shrinks it to + /// the size of the actual data. Apart from supporting pre-allocated buffers it is + /// equivalent to to [`read()`](Self::read). + pub fn read_into(&self, buffer: &mut &mut [u8]) -> Result<()> { + let len = buffer.len(); + let sliced = { + let buffer = core::mem::take(buffer); + &mut buffer[..len.min(self.inner.input_len as usize)] + }; + self.inner.runtime.read_sandbox_memory_into_buf(self.inner.input_ptr, sliced)?; + *buffer = sliced; + Ok(()) + } + + /// Reads and decodes a type with a size fixed at compile time from contract memory. + /// + /// This function is secure and recommended for all input types of fixed size + /// as long as the cost of reading the memory is included in the overall already charged + /// weight of the chain extension. This should usually be the case when fixed input types + /// are used. + pub fn read_as(&mut self) -> Result { + self.inner.runtime.read_sandbox_memory_as(self.inner.input_ptr) + } + + /// Reads and decodes a type with a dynamic size from contract memory. + /// + /// Make sure to include `len` in your weight calculations. + pub fn read_as_unbounded(&mut self, len: u32) -> Result { + self.inner.runtime.read_sandbox_memory_as_unbounded(self.inner.input_ptr, len) + } + + /// The length of the input as passed in as `input_len`. + /// + /// A chain extension would use this value to calculate the dynamic part of its + /// weight. For example a chain extension that calculates the hash of some passed in + /// bytes would use `in_len` to charge the costs of hashing that amount of bytes. + /// This also subsumes the act of copying those bytes as a benchmarks measures both. + pub fn in_len(&self) -> u32 { + self.inner.input_len + } +} + +/// Functions to use the output arguments as pointer to a buffer. +impl<'a, 'b, E: Ext, S: state::BufOut> Environment<'a, 'b, E, S> +where + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, +{ + /// Write the supplied buffer to contract memory. + /// + /// If the contract supplied buffer is smaller than the passed `buffer` an `Err` is returned. + /// If `allow_skip` is set to true the contract is allowed to skip the copying of the buffer + /// by supplying the guard value of `u32::MAX` as `out_ptr`. The + /// `weight_per_byte` is only charged when the write actually happens and is not skipped or + /// failed due to a too small output buffer. + pub fn write( + &mut self, + buffer: &[u8], + allow_skip: bool, + weight_per_byte: Option, + ) -> Result<()> { + self.inner.runtime.write_sandbox_output( + self.inner.output_ptr, + self.inner.output_len_ptr, + buffer, + allow_skip, + |len| { + weight_per_byte.map(|w| RuntimeCosts::ChainExtension(w.saturating_mul(len.into()))) + }, + ) + } +} + +/// The actual data of an `Environment`. +/// +/// All data is put into this struct to easily pass it around as part of the typestate +/// pattern. Also it creates the opportunity to box this struct in the future in case it +/// gets too large. +struct Inner<'a, 'b, E: Ext> { + /// The runtime contains all necessary functions to interact with the running contract. + runtime: &'a mut Runtime<'b, E>, + /// Verbatim argument passed to `seal_call_chain_extension`. + input_ptr: u32, + /// Verbatim argument passed to `seal_call_chain_extension`. + input_len: u32, + /// Verbatim argument passed to `seal_call_chain_extension`. + output_ptr: u32, + /// Verbatim argument passed to `seal_call_chain_extension`. + output_len_ptr: u32, +} + +/// Private submodule with public types to prevent other modules from naming them. +mod state { + pub trait State {} + + pub trait PrimIn: State {} + pub trait PrimOut: State {} + pub trait BufIn: State {} + pub trait BufOut: State {} + + /// The initial state of an [`Environment`](`super::Environment`). + /// See [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html). + pub enum Init {} + pub enum OnlyIn {} + pub enum PrimInBufOut {} + pub enum BufInBufOut {} + + impl State for Init {} + impl State for OnlyIn {} + impl State for PrimInBufOut {} + impl State for BufInBufOut {} + + impl PrimIn for OnlyIn {} + impl PrimOut for OnlyIn {} + impl PrimIn for PrimInBufOut {} + impl BufOut for PrimInBufOut {} + impl BufIn for BufInBufOut {} + impl BufOut for BufInBufOut {} +} diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index bc99431c85e65..cc468466c2922 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1,81 +1,63 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::{ - CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, - TrieId, BalanceOf, ContractInfo, TrieIdGenerator, - gas::GasMeter, rent, storage, Error, ContractInfoOf + gas::GasMeter, storage::Storage, AccountCounter, BalanceOf, CodeHash, Config, ContractInfo, + ContractInfoOf, Error, Event, Pallet as Contracts, Schedule, }; -use bitflags::bitflags; -use sp_std::prelude::*; -use sp_runtime::traits::{Bounded, Zero, Convert, Saturating}; use frame_support::{ - dispatch::DispatchError, - traits::{ExistenceRequirement, Currency, Time, Randomness}, + dispatch::{DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable}, + ensure, + storage::{with_transaction, TransactionOutcome}, + traits::{Contains, Currency, ExistenceRequirement, Get, OriginTrait, Randomness, Time}, weights::Weight, - ensure, StorageMap, }; - -pub type AccountIdOf = ::AccountId; -pub type MomentOf = <::Time as Time>::Moment; -pub type SeedOf = ::Hash; -pub type BlockNumberOf = ::BlockNumber; +use frame_system::RawOrigin; +use pallet_contracts_primitives::ExecReturnValue; +use smallvec::{Array, SmallVec}; +use sp_core::crypto::UncheckedFrom; +use sp_io::crypto::secp256k1_ecdsa_recover_compressed; +use sp_runtime::traits::{Convert, Saturating}; +use sp_std::{marker::PhantomData, mem, prelude::*}; + +/// When fields are added to the [`ContractInfo`] that can change during execution this +/// variable needs to be set to true. This will also force changes to the +/// `in_memory_changes_not_discarded` test. +const CONTRACT_INFO_CAN_CHANGE: bool = false; + +pub type AccountIdOf = ::AccountId; +pub type MomentOf = <::Time as Time>::Moment; +pub type SeedOf = ::Hash; +pub type BlockNumberOf = ::BlockNumber; pub type StorageKey = [u8; 32]; +pub type ExecResult = Result; /// A type that represents a topic of an event. At the moment a hash is used. -pub type TopicOf = ::Hash; - -bitflags! { - /// Flags used by a contract to customize exit behaviour. - pub struct ReturnFlags: u32 { - /// If this bit is set all changes made by the contract exection are rolled back. - const REVERT = 0x0000_0001; - } -} +pub type TopicOf = ::Hash; -/// Describes whether we deal with a contract or a plain account. -pub enum TransactorKind { - /// Transaction was initiated from a plain account. That can be either be through a - /// signed transaction or through RPC. - PlainAccount, - /// The call was initiated by a contract account. - Contract, -} - -/// Output of a contract call or instantiation which ran to completion. -#[cfg_attr(test, derive(PartialEq, Eq, Debug))] -pub struct ExecReturnValue { - /// Flags passed along by `seal_return`. Empty when `seal_return` was never called. - pub flags: ReturnFlags, - /// Buffer passed along by `seal_return`. Empty when `seal_return` was never called. - pub data: Vec, -} - -impl ExecReturnValue { - /// We understand the absense of a revert flag as success. - pub fn is_success(&self) -> bool { - !self.flags.contains(ReturnFlags::REVERT) - } -} - -/// Call or instantiate both call into other contracts and pass through errors happening -/// in those to the caller. This enum is for the caller to distinguish whether the error +/// Origin of the error. +/// +/// Call or instantiate both called into other contracts and pass through errors happening +/// in those to the caller. This enum is for the caller to distinguish whether the error /// happened during the execution of the callee or in the current execution context. -#[cfg_attr(test, derive(PartialEq, Eq, Debug))] +#[cfg_attr(test, derive(Debug, PartialEq))] pub enum ErrorOrigin { + /// Caller error origin. + /// /// The error happened in the current exeuction context rather than in the one /// of the contract that is called into. Caller, @@ -84,7 +66,7 @@ pub enum ErrorOrigin { } /// Error returned by contract exection. -#[cfg_attr(test, derive(PartialEq, Eq, Debug))] +#[cfg_attr(test, derive(Debug, PartialEq))] pub struct ExecError { /// The reason why the execution failed. pub error: DispatchError, @@ -94,89 +76,77 @@ pub struct ExecError { impl> From for ExecError { fn from(error: T) -> Self { - Self { - error: error.into(), - origin: ErrorOrigin::Caller, - } + Self { error: error.into(), origin: ErrorOrigin::Caller } } } -/// The result that is returned from contract execution. It either contains the output -/// buffer or an error describing the reason for failure. -pub type ExecResult = Result; - /// An interface that provides access to the external environment in which the /// smart-contract is executed. /// /// This interface is specialized to an account of the executing code, so all /// operations are implicitly performed on that account. -pub trait Ext { - type T: Trait; +/// +/// # Note +/// +/// This trait is sealed and cannot be implemented by downstream crates. +pub trait Ext: sealing::Sealed { + type T: Config; - /// Returns the storage entry of the executing account by the given `key`. + /// Call (possibly transferring some amount of funds) into the specified account. /// - /// Returns `None` if the `key` wasn't previously set by `set_storage` or - /// was deleted. - fn get_storage(&self, key: &StorageKey) -> Option>; - - /// Sets the storage entry by the given key to the specified value. If `value` is `None` then - /// the storage entry is deleted. - fn set_storage(&mut self, key: StorageKey, value: Option>); + /// Returns the original code size of the called contract. + /// + /// # Return Value + /// + /// Result<(ExecReturnValue, CodeSize), (ExecError, CodeSize)> + fn call( + &mut self, + gas_limit: Weight, + to: AccountIdOf, + value: BalanceOf, + input_data: Vec, + allows_reentry: bool, + ) -> Result; /// Instantiate a contract from the given code. /// - /// The newly created account will be associated with `code`. `value` specifies the amount of value - /// transferred from this to the newly created account (also known as endowment). + /// Returns the original code size of the called contract. + /// The newly created account will be associated with `code`. `value` specifies the amount of + /// value transferred from this to the newly created account (also known as endowment). + /// + /// # Return Value + /// + /// Result<(AccountId, ExecReturnValue, CodeSize), (ExecError, CodeSize)> fn instantiate( &mut self, - code: &CodeHash, + gas_limit: Weight, + code: CodeHash, value: BalanceOf, - gas_meter: &mut GasMeter, input_data: Vec, + salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError>; - /// Transfer some amount of funds into the specified account. - fn transfer( - &mut self, - to: &AccountIdOf, - value: BalanceOf, - ) -> Result<(), DispatchError>; - /// Transfer all funds to `beneficiary` and delete the contract. /// - /// Since this function removes the self contract eagerly, if succeeded, no further actions should - /// be performed on this `Ext` instance. + /// Since this function removes the self contract eagerly, if succeeded, no further actions + /// should be performed on this `Ext` instance. /// /// This function will fail if the same contract is present on the contract /// call stack. - fn terminate( - &mut self, - beneficiary: &AccountIdOf, - ) -> Result<(), DispatchError>; + fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError>; - /// Call (possibly transferring some amount of funds) into the specified account. - fn call( - &mut self, - to: &AccountIdOf, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> ExecResult; + /// Transfer some amount of funds into the specified account. + fn transfer(&mut self, to: &AccountIdOf, value: BalanceOf) -> DispatchResult; - /// Restores the given destination contract sacrificing the current one. - /// - /// Since this function removes the self contract eagerly, if succeeded, no further actions should - /// be performed on this `Ext` instance. + /// Returns the storage entry of the executing account by the given `key`. /// - /// This function will fail if the same contract is present - /// on the contract call stack. - fn restore_to( - &mut self, - dest: AccountIdOf, - code_hash: CodeHash, - rent_allowance: BalanceOf, - delta: Vec, - ) -> Result<(), &'static str>; + /// Returns `None` if the `key` wasn't previously set by `set_storage` or + /// was deleted. + fn get_storage(&mut self, key: &StorageKey) -> Option>; + + /// Sets the storage entry by the given key to the specified value. If `value` is `None` then + /// the storage entry is deleted. + fn set_storage(&mut self, key: StorageKey, value: Option>) -> DispatchResult; /// Returns a reference to the account id of the caller. fn caller(&self) -> &AccountIdOf; @@ -198,23 +168,17 @@ pub trait Ext { /// Returns the minimum balance that is required for creating an account. fn minimum_balance(&self) -> BalanceOf; - /// Returns the deposit required to create a tombstone upon contract eviction. - fn tombstone_deposit(&self) -> BalanceOf; + /// Returns the deposit required to instantiate a contract. + fn contract_deposit(&self) -> BalanceOf; /// Returns a random number for the current block with the given subject. - fn random(&self, subject: &[u8]) -> SeedOf; + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf); /// Deposit an event with the given topics. /// /// There should not be any duplicates in `topics`. fn deposit_event(&mut self, topics: Vec>, data: Vec); - /// Set rent allowance of the contract - fn set_rent_allowance(&mut self, rent_allowance: BalanceOf); - - /// Rent allowance of the contract - fn rent_allowance(&self) -> BalanceOf; - /// Returns the current block number. fn block_number(&self) -> BlockNumberOf; @@ -223,484 +187,798 @@ pub trait Ext { /// Returns the price for the specified amount of weight. fn get_weight_price(&self, weight: Weight) -> BalanceOf; + + /// Get a reference to the schedule used by the current call. + fn schedule(&self) -> &Schedule; + + /// Get a mutable reference to the nested gas meter. + fn gas_meter(&mut self) -> &mut GasMeter; + + /// Append a string to the debug buffer. + /// + /// It is added as-is without any additional new line. + /// + /// This is a no-op if debug message recording is disabled which is always the case + /// when the code is executing on-chain. + /// + /// Returns `true` if debug message recording is enabled. Otherwise `false` is returned. + fn append_debug_buffer(&mut self, msg: &str) -> bool; + + /// Call some dispatchable and return the result. + fn call_runtime(&self, call: ::Call) -> DispatchResultWithPostInfo; + + /// Recovers ECDSA compressed public key based on signature and message hash. + fn ecdsa_recover(&self, signature: &[u8; 65], message_hash: &[u8; 32]) -> Result<[u8; 33], ()>; } -/// Loader is a companion of the `Vm` trait. It loads an appropriate abstract -/// executable to be executed by an accompanying `Vm` implementation. -pub trait Loader { - type Executable; - - /// Load the initializer portion of the code specified by the `code_hash`. This - /// executable is called upon instantiation. - fn load_init(&self, code_hash: &CodeHash) -> Result; - /// Load the main portion of the code specified by the `code_hash`. This executable - /// is called for each call to a contract. - fn load_main(&self, code_hash: &CodeHash) -> Result; +/// Describes the different functions that can be exported by an [`Executable`]. +#[derive(Clone, Copy, PartialEq)] +pub enum ExportedFunction { + /// The constructor function which is executed on deployment of a contract. + Constructor, + /// The function which is executed when a contract is called. + Call, } -/// A trait that represent a virtual machine. -/// -/// You can view a virtual machine as something that takes code, an input data buffer, -/// queries it and/or performs actions on the given `Ext` and optionally -/// returns an output data buffer. The type of code depends on the particular virtual machine. +/// A trait that represents something that can be executed. /// -/// Execution of code can end by either implicit termination (that is, reached the end of -/// executable), explicit termination via returning a buffer or termination due to a trap. -pub trait Vm { - type Executable; +/// In the on-chain environment this would be represented by a wasm module. This trait exists in +/// order to be able to mock the wasm logic for testing. +pub trait Executable: Sized { + /// Load the executable from storage. + /// + /// # Note + /// Charges size base load and instrumentation weight from the gas meter. + fn from_storage( + code_hash: CodeHash, + schedule: &Schedule, + gas_meter: &mut GasMeter, + ) -> Result; + /// Load the module from storage without re-instrumenting it. + /// + /// A code module is re-instrumented on-load when it was originally instrumented with + /// an older schedule. This skips this step for cases where the code storage is + /// queried for purposes other than execution. + /// + /// # Note + /// + /// Does not charge from the gas meter. Do not call in contexts where this is important. + fn from_storage_noinstr(code_hash: CodeHash) -> Result; + + /// Increment the refcount by one. Fails if the code does not exist on-chain. + /// + /// Returns the size of the original code. + /// + /// # Note + /// + /// Charges weight proportional to the code size from the gas meter. + fn add_user(code_hash: CodeHash, gas_meter: &mut GasMeter) -> Result<(), DispatchError>; + + /// Decrement the refcount by one and remove the code when it drops to zero. + /// + /// Returns the size of the original code. + /// + /// # Note + /// + /// Charges weight proportional to the code size from the gas meter + fn remove_user( + code_hash: CodeHash, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError>; + + /// Execute the specified exported function and return the result. + /// + /// When the specified function is `Constructor` the executable is stored and its + /// refcount incremented. + /// + /// # Note + /// + /// This functions expects to be executed in a storage transaction that rolls back + /// all of its emitted storage changes. fn execute>( - &self, - exec: &Self::Executable, - ext: E, + self, + ext: &mut E, + function: &ExportedFunction, input_data: Vec, - gas_meter: &mut GasMeter, ) -> ExecResult; + + /// The code hash of the executable. + fn code_hash(&self) -> &CodeHash; + + /// Size of the instrumented code in bytes. + fn code_len(&self) -> u32; + + /// Sum of instrumented and pristine code len. + fn aggregate_code_len(&self) -> u32; + + // The number of contracts using this executable. + fn refcount(&self) -> u32; +} + +/// The complete call stack of a contract execution. +/// +/// The call stack is initiated by either a signed origin or one of the contract RPC calls. +/// This type implements `Ext` and by that exposes the business logic of contract execution to +/// the runtime module which interfaces with the contract (the wasm blob) itself. +pub struct Stack<'a, T: Config, E> { + /// The account id of a plain account that initiated the call stack. + /// + /// # Note + /// + /// Please note that it is possible that the id belongs to a contract rather than a plain + /// account when being called through one of the contract RPCs where the client can freely + /// choose the origin. This usually makes no sense but is still possible. + origin: T::AccountId, + /// The cost schedule used when charging from the gas meter. + schedule: &'a Schedule, + /// The gas meter where costs are charged to. + gas_meter: &'a mut GasMeter, + /// The timestamp at the point of call stack instantiation. + timestamp: MomentOf, + /// The block number at the time of call stack instantiation. + block_number: T::BlockNumber, + /// The account counter is cached here when accessed. It is written back when the call stack + /// finishes executing. + account_counter: Option, + /// The actual call stack. One entry per nested contract called/instantiated. + /// This does **not** include the [`Self::first_frame`]. + frames: SmallVec, + /// Statically guarantee that each call stack has at least one frame. + first_frame: Frame, + /// A text buffer used to output human readable information. + /// + /// All the bytes added to this field should be valid UTF-8. The buffer has no defined + /// structure and is intended to be shown to users as-is for debugging purposes. + debug_message: Option<&'a mut Vec>, + /// No executable is held by the struct but influences its behaviour. + _phantom: PhantomData, } -pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { - pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, - pub self_account: T::AccountId, - pub self_trie_id: Option, - pub depth: usize, - pub config: &'a Config, - pub vm: &'a V, - pub loader: &'a L, - pub timestamp: MomentOf, - pub block_number: T::BlockNumber, +/// Represents one entry in the call stack. +/// +/// For each nested contract call or instantiate one frame is created. It holds specific +/// information for the said call and caches the in-storage `ContractInfo` data structure. +/// +/// # Note +/// +/// This is an internal data structure. It is exposed to the public for the sole reason +/// of specifying [`Config::CallStack`]. +pub struct Frame { + /// The account id of the executing contract. + account_id: T::AccountId, + /// The cached in-storage data of the contract. + contract_info: CachedContract, + /// The amount of balance transferred by the caller as part of the call. + value_transferred: BalanceOf, + /// Determines whether this is a call or instantiate frame. + entry_point: ExportedFunction, + /// The gas meter capped to the supplied gas limit. + nested_meter: GasMeter, + /// If `false` the contract enabled its defense against reentrance attacks. + allows_reentry: bool, } -impl<'a, T, E, V, L> ExecutionContext<'a, T, V, L> -where - T: Trait, - L: Loader, - V: Vm, -{ - /// Create the top level execution context. +/// Parameter passed in when creating a new `Frame`. +/// +/// It determines whether the new frame is for a call or an instantiate. +enum FrameArgs<'a, T: Config, E> { + Call { + /// The account id of the contract that is to be called. + dest: T::AccountId, + /// If `None` the contract info needs to be reloaded from storage. + cached_info: Option>, + }, + Instantiate { + /// The contract or signed origin which instantiates the new contract. + sender: T::AccountId, + /// The seed that should be used to derive a new trie id for the contract. + trie_seed: u64, + /// The executable whose `deploy` function is run. + executable: E, + /// A salt used in the contract address deriviation of the new contract. + salt: &'a [u8], + }, +} + +/// Describes the different states of a contract as contained in a `Frame`. +enum CachedContract { + /// The cached contract is up to date with the in-storage value. + Cached(ContractInfo), + /// A recursive call into the same contract did write to the contract info. /// - /// The specified `origin` address will be used as `sender` for. The `origin` must be a regular - /// account (not a contract). - pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { - ExecutionContext { - caller: None, - self_trie_id: None, - self_account: origin, - depth: 0, - config: &cfg, - vm: &vm, - loader: &loader, - timestamp: T::Time::now(), - block_number: >::block_number(), - } + /// In this case the cached contract is stale and needs to be reloaded from storage. + Invalidated, + /// The current contract executed `terminate` and removed the contract. + /// + /// In this case a reload is neither allowed nor possible. Please note that recursive + /// calls cannot remove a contract as this is checked and denied. + Terminated, +} + +impl Frame { + /// Return the `contract_info` of the current contract. + fn contract_info(&mut self) -> &mut ContractInfo { + self.contract_info.get(&self.account_id) + } + + /// Terminate and return the `contract_info` of the current contract. + /// + /// # Note + /// + /// Under no circumstances the contract is allowed to access the `contract_info` after + /// a call to this function. This would constitute a programming error in the exec module. + fn terminate(&mut self) -> ContractInfo { + self.contract_info.terminate(&self.account_id) } +} + +/// Extract the contract info after loading it from storage. +/// +/// This assumes that `load` was executed before calling this macro. +macro_rules! get_cached_or_panic_after_load { + ($c:expr) => {{ + if let CachedContract::Cached(contract) = $c { + contract + } else { + panic!( + "It is impossible to remove a contract that is on the call stack;\ + See implementations of terminate;\ + Therefore fetching a contract will never fail while using an account id + that is currently active on the call stack;\ + qed" + ); + } + }}; +} - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: TrieId) - -> ExecutionContext<'b, T, V, L> - { - ExecutionContext { - caller: Some(self), - self_trie_id: Some(trie_id), - self_account: dest, - depth: self.depth + 1, - config: self.config, - vm: self.vm, - loader: self.loader, - timestamp: self.timestamp.clone(), - block_number: self.block_number.clone(), +impl CachedContract { + /// Load the `contract_info` from storage if necessary. + fn load(&mut self, account_id: &T::AccountId) { + if let CachedContract::Invalidated = self { + let contract = >::get(&account_id); + if let Some(contract) = contract { + *self = CachedContract::Cached(contract); + } } } - /// Make a call to the specified address, optionally transferring some funds. - pub fn call( - &mut self, + /// Return the cached contract_info. + fn get(&mut self, account_id: &T::AccountId) -> &mut ContractInfo { + self.load(account_id); + get_cached_or_panic_after_load!(self) + } + + /// Terminate and return the contract info. + fn terminate(&mut self, account_id: &T::AccountId) -> ContractInfo { + self.load(account_id); + get_cached_or_panic_after_load!(mem::replace(self, Self::Terminated)) + } +} + +impl<'a, T, E> Stack<'a, T, E> +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, + E: Executable, +{ + /// Create an run a new call stack by calling into `dest`. + /// + /// # Note + /// + /// `debug_message` should only ever be set to `Some` when executing as an RPC because + /// it adds allocations and could be abused to drive the runtime into an OOM panic. + /// + /// # Return Value + /// + /// Result<(ExecReturnValue, CodeSize), (ExecError, CodeSize)> + pub fn run_call( + origin: T::AccountId, dest: T::AccountId, + gas_meter: &'a mut GasMeter, + schedule: &'a Schedule, value: BalanceOf, - gas_meter: &mut GasMeter, input_data: Vec, - ) -> ExecResult { - if self.depth == self.config.max_depth as usize { - Err(Error::::MaxCallDepthReached)? - } + debug_message: Option<&'a mut Vec>, + ) -> Result { + let (mut stack, executable) = Self::new( + FrameArgs::Call { dest, cached_info: None }, + origin, + gas_meter, + schedule, + value, + debug_message, + )?; + stack.run(executable, input_data) + } - // Assumption: `collect_rent` doesn't collide with overlay because - // `collect_rent` will be done on first call and destination contract and balance - // cannot be changed before the first call - // We do not allow 'calling' plain accounts. For transfering value - // `seal_transfer` must be used. - let contract = if let Some(ContractInfo::Alive(info)) = rent::collect_rent::(&dest) { - info - } else { - Err(Error::::NotCallable)? + /// Create and run a new call stack by instantiating a new contract. + /// + /// # Note + /// + /// `debug_message` should only ever be set to `Some` when executing as an RPC because + /// it adds allocations and could be abused to drive the runtime into an OOM panic. + /// + /// # Return Value + /// + /// Result<(NewContractAccountId, ExecReturnValue), ExecError)> + pub fn run_instantiate( + origin: T::AccountId, + executable: E, + gas_meter: &'a mut GasMeter, + schedule: &'a Schedule, + value: BalanceOf, + input_data: Vec, + salt: &[u8], + debug_message: Option<&'a mut Vec>, + ) -> Result<(T::AccountId, ExecReturnValue), ExecError> { + let (mut stack, executable) = Self::new( + FrameArgs::Instantiate { + sender: origin.clone(), + trie_seed: Self::initial_trie_seed(), + executable, + salt, + }, + origin, + gas_meter, + schedule, + value, + debug_message, + )?; + let account_id = stack.top_frame().account_id.clone(); + stack.run(executable, input_data).map(|ret| (account_id, ret)) + } + + /// Create a new call stack. + fn new( + args: FrameArgs, + origin: T::AccountId, + gas_meter: &'a mut GasMeter, + schedule: &'a Schedule, + value: BalanceOf, + debug_message: Option<&'a mut Vec>, + ) -> Result<(Self, E), ExecError> { + let (first_frame, executable) = Self::new_frame(args, value, gas_meter, 0, &schedule)?; + let stack = Self { + origin, + schedule, + gas_meter, + timestamp: T::Time::now(), + block_number: >::block_number(), + account_counter: None, + first_frame, + frames: Default::default(), + debug_message, + _phantom: Default::default(), }; - let transactor_kind = self.transactor_kind(); - let caller = self.self_account.clone(); - - self.with_nested_context(dest.clone(), contract.trie_id.clone(), |nested| { - if value > BalanceOf::::zero() { - transfer( - TransferCause::Call, - transactor_kind, - &caller, - &dest, - value, - nested, - )? - } + Ok((stack, executable)) + } - let executable = nested.loader.load_main(&contract.code_hash) - .map_err(|_| Error::::CodeNotFound)?; - let output = nested.vm.execute( - &executable, - nested.new_call_context(caller, value), - input_data, - gas_meter, - ).map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; - Ok(output) - }) + /// Construct a new frame. + /// + /// This does not take `self` because when constructing the first frame `self` is + /// not initialized, yet. + fn new_frame( + frame_args: FrameArgs, + value_transferred: BalanceOf, + gas_meter: &mut GasMeter, + gas_limit: Weight, + schedule: &Schedule, + ) -> Result<(Frame, E), ExecError> { + let (account_id, contract_info, executable, entry_point) = match frame_args { + FrameArgs::Call { dest, cached_info } => { + let contract = if let Some(contract) = cached_info { + contract + } else { + >::get(&dest).ok_or(>::ContractNotFound)? + }; + + let executable = E::from_storage(contract.code_hash, schedule, gas_meter)?; + + (dest, contract, executable, ExportedFunction::Call) + }, + FrameArgs::Instantiate { sender, trie_seed, executable, salt } => { + let account_id = + >::contract_address(&sender, executable.code_hash(), &salt); + let trie_id = Storage::::generate_trie_id(&account_id, trie_seed); + let contract = Storage::::new_contract( + &account_id, + trie_id, + executable.code_hash().clone(), + )?; + (account_id, contract, executable, ExportedFunction::Constructor) + }, + }; + + let frame = Frame { + value_transferred, + contract_info: CachedContract::Cached(contract_info), + account_id, + entry_point, + nested_meter: gas_meter.nested(gas_limit)?, + allows_reentry: true, + }; + + Ok((frame, executable)) } - pub fn instantiate( + /// Create a subsequent nested frame. + fn push_frame( &mut self, - endowment: BalanceOf, - gas_meter: &mut GasMeter, - code_hash: &CodeHash, - input_data: Vec, - ) -> Result<(T::AccountId, ExecReturnValue), ExecError> { - if self.depth == self.config.max_depth as usize { - Err(Error::::MaxCallDepthReached)? + frame_args: FrameArgs, + value_transferred: BalanceOf, + gas_limit: Weight, + ) -> Result { + if self.frames.len() == T::CallStack::size() { + return Err(Error::::MaxCallDepthReached.into()) } - let transactor_kind = self.transactor_kind(); - let caller = self.self_account.clone(); - let dest = T::DetermineContractAddress::contract_address_for( - code_hash, - &input_data, - &caller, - ); - - // TrieId has not been generated yet and storage is empty since contract is new. - // - // Generate it now. - let dest_trie_id = ::TrieIdGenerator::trie_id(&dest); - - let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { - storage::place_contract::( - &dest, - nested - .self_trie_id - .clone() - .expect("the nested context always has to have self_trie_id"), - code_hash.clone() - )?; - - // Send funds unconditionally here. If the `endowment` is below existential_deposit - // then error will be returned here. - transfer( - TransferCause::Instantiate, - transactor_kind, - &caller, - &dest, - endowment, - nested, - )?; - - let executable = nested.loader.load_init(&code_hash) - .map_err(|_| Error::::CodeNotFound)?; - let output = nested.vm - .execute( - &executable, - nested.new_call_context(caller.clone(), endowment), - input_data, - gas_meter, - ).map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; - - // We need each contract that exists to be above the subsistence threshold - // in order to keep up the guarantuee that we always leave a tombstone behind - // with the exception of a contract that called `seal_terminate`. - if T::Currency::total_balance(&dest) < nested.config.subsistence_threshold() { - Err(Error::::NewContractNotFunded)? + if CONTRACT_INFO_CAN_CHANGE { + // We need to make sure that changes made to the contract info are not discarded. + // See the `in_memory_changes_not_discarded` test for more information. + // We do not store on instantiate because we do not allow to call into a contract + // from its own constructor. + let frame = self.top_frame(); + if let (CachedContract::Cached(contract), ExportedFunction::Call) = + (&frame.contract_info, frame.entry_point) + { + >::insert(frame.account_id.clone(), contract.clone()); } + } - // Deposit an instantiation event. - deposit_event::(vec![], RawEvent::Instantiated(caller.clone(), dest.clone())); + let nested_meter = + &mut self.frames.last_mut().unwrap_or(&mut self.first_frame).nested_meter; + let (frame, executable) = + Self::new_frame(frame_args, value_transferred, nested_meter, gas_limit, self.schedule)?; + self.frames.push(frame); + Ok(executable) + } + + /// Run the current (top) frame. + /// + /// This can be either a call or an instantiate. + fn run(&mut self, executable: E, input_data: Vec) -> Result { + let entry_point = self.top_frame().entry_point; + let do_transaction = || { + // Every call or instantiate also optionally transferres balance. + self.initial_transfer()?; + + // Call into the wasm blob. + let output = executable + .execute(self, &entry_point, input_data) + .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; + + // Additional work needs to be performed in case of an instantiation. + if output.is_success() && entry_point == ExportedFunction::Constructor { + let frame = self.top_frame_mut(); + let account_id = frame.account_id.clone(); + + // It is not allowed to terminate a contract inside its constructor. + if let CachedContract::Terminated = frame.contract_info { + return Err(Error::::TerminatedInConstructor.into()) + } + + // Deposit an instantiation event. + deposit_event::(vec![], Event::Instantiated(self.caller().clone(), account_id)); + } Ok(output) - })?; + }; - Ok((dest, output)) + // All changes performed by the contract are executed under a storage transaction. + // This allows for roll back on error. Changes to the cached contract_info are + // comitted or rolled back when popping the frame. + let (success, output) = with_transaction(|| { + let output = do_transaction(); + match &output { + Ok(result) if result.is_success() => TransactionOutcome::Commit((true, output)), + _ => TransactionOutcome::Rollback((false, output)), + } + }); + self.pop_frame(success); + output } - fn new_call_context<'b>( - &'b mut self, - caller: T::AccountId, - value: BalanceOf, - ) -> CallContext<'b, 'a, T, V, L> { - let timestamp = self.timestamp.clone(); - let block_number = self.block_number.clone(); - CallContext { - ctx: self, - caller, - value_transferred: value, - timestamp, - block_number, + /// Remove the current (top) frame from the stack. + /// + /// This is called after running the current frame. It commits cached values to storage + /// and invalidates all stale references to it that might exist further down the call stack. + fn pop_frame(&mut self, persist: bool) { + // Revert the account counter in case of a failed instantiation. + if !persist && self.top_frame().entry_point == ExportedFunction::Constructor { + self.account_counter.as_mut().map(|c| *c = c.wrapping_sub(1)); } - } - /// Execute the given closure within a nested execution context. - fn with_nested_context(&mut self, dest: T::AccountId, trie_id: TrieId, func: F) - -> ExecResult - where F: FnOnce(&mut ExecutionContext) -> ExecResult - { - use frame_support::storage::TransactionOutcome::*; - let mut nested = self.nested(dest, trie_id); - frame_support::storage::with_transaction(|| { - let output = func(&mut nested); - match output { - Ok(ref rv) if !rv.flags.contains(ReturnFlags::REVERT) => Commit(output), - _ => Rollback(output), + // Pop the current frame from the stack and return it in case it needs to interact + // with duplicates that might exist on the stack. + // A `None` means that we are returning from the `first_frame`. + let frame = self.frames.pop(); + + if let Some(frame) = frame { + let prev = self.top_frame_mut(); + let account_id = &frame.account_id; + prev.nested_meter.absorb_nested(frame.nested_meter); + // Only gas counter changes are persisted in case of a failure. + if !persist { + return + } + if let CachedContract::Cached(contract) = frame.contract_info { + // optimization: Predecessor is the same contract. + // We can just copy the contract into the predecessor without a storage write. + // This is possible when there is no other contract in-between that could + // trigger a rollback. + if prev.account_id == *account_id { + prev.contract_info = CachedContract::Cached(contract); + return + } + + // Predecessor is a different contract: We persist the info and invalidate the first + // stale cache we find. This triggers a reload from storage on next use. We skip(1) + // because that case is already handled by the optimization above. Only the first + // cache needs to be invalidated because that one will invalidate the next cache + // when it is popped from the stack. + >::insert(account_id, contract); + if let Some(c) = self.frames_mut().skip(1).find(|f| f.account_id == *account_id) { + c.contract_info = CachedContract::Invalidated; + } + } + } else { + if let Some((msg, false)) = self.debug_message.as_ref().map(|m| (m, m.is_empty())) { + log::debug!( + target: "runtime::contracts", + "Execution finished with debug buffer: {}", + core::str::from_utf8(msg).unwrap_or(""), + ); + } + // Write back to the root gas meter. + self.gas_meter.absorb_nested(mem::take(&mut self.first_frame.nested_meter)); + // Only gas counter changes are persisted in case of a failure. + if !persist { + return + } + if let CachedContract::Cached(contract) = &self.first_frame.contract_info { + >::insert(&self.first_frame.account_id, contract.clone()); } - }) + if let Some(counter) = self.account_counter { + >::set(counter); + } + } } - /// Returns whether a contract, identified by address, is currently live in the execution - /// stack, meaning it is in the middle of an execution. - fn is_live(&self, account: &T::AccountId) -> bool { - &self.self_account == account || - self.caller.map_or(false, |caller| caller.is_live(account)) + /// Transfer some funds from `from` to `to`. + /// + /// We only allow allow for draining all funds of the sender if `allow_death` is + /// is specified as `true`. Otherwise, any transfer that would bring the sender below the + /// subsistence threshold (for contracts) or the existential deposit (for plain accounts) + /// results in an error. + fn transfer( + sender_is_contract: bool, + allow_death: bool, + from: &T::AccountId, + to: &T::AccountId, + value: BalanceOf, + ) -> DispatchResult { + if value == 0u32.into() { + return Ok(()) + } + + let existence_requirement = match (allow_death, sender_is_contract) { + (true, _) => ExistenceRequirement::AllowDeath, + (false, true) => { + ensure!( + T::Currency::total_balance(from).saturating_sub(value) >= + Contracts::::subsistence_threshold(), + Error::::BelowSubsistenceThreshold, + ); + ExistenceRequirement::KeepAlive + }, + (false, false) => ExistenceRequirement::KeepAlive, + }; + + T::Currency::transfer(from, to, value, existence_requirement) + .map_err(|_| Error::::TransferFailed)?; + + Ok(()) } - fn transactor_kind(&self) -> TransactorKind { - if self.depth == 0 { - debug_assert!(self.self_trie_id.is_none()); - debug_assert!(self.caller.is_none()); - debug_assert!(ContractInfoOf::::get(&self.self_account).is_none()); - TransactorKind::PlainAccount - } else { - TransactorKind::Contract + // The transfer as performed by a call or instantiate. + fn initial_transfer(&self) -> DispatchResult { + let frame = self.top_frame(); + let value = frame.value_transferred; + let subsistence_threshold = >::subsistence_threshold(); + + // If the value transferred to a new contract is less than the subsistence threshold + // we can error out early. This avoids executing the constructor in cases where + // we already know that the contract has too little balance. + if frame.entry_point == ExportedFunction::Constructor && value < subsistence_threshold { + return Err(>::NewContractNotFunded.into()) } + + Self::transfer(self.caller_is_origin(), false, self.caller(), &frame.account_id, value) } -} -/// Describes possible transfer causes. -enum TransferCause { - Call, - Instantiate, - Terminate, -} + /// Wether the caller is the initiator of the call stack. + fn caller_is_origin(&self) -> bool { + !self.frames.is_empty() + } -/// Transfer some funds from `transactor` to `dest`. -/// -/// We only allow allow for draining all funds of the sender if `cause` is -/// is specified as `Terminate`. Otherwise, any transfer that would bring the sender below the -/// subsistence threshold (for contracts) or the existential deposit (for plain accounts) -/// results in an error. -fn transfer<'a, T: Trait, V: Vm, L: Loader>( - cause: TransferCause, - origin: TransactorKind, - transactor: &T::AccountId, - dest: &T::AccountId, - value: BalanceOf, - ctx: &mut ExecutionContext<'a, T, V, L>, -) -> Result<(), DispatchError> { - use self::TransferCause::*; - use self::TransactorKind::*; - - // Only seal_terminate is allowed to bring the sender below the subsistence - // threshold or even existential deposit. - let existence_requirement = match (cause, origin) { - (Terminate, _) => ExistenceRequirement::AllowDeath, - (_, Contract) => { - ensure!( - T::Currency::total_balance(transactor).saturating_sub(value) >= - ctx.config.subsistence_threshold(), - Error::::BelowSubsistenceThreshold, - ); - ExistenceRequirement::KeepAlive - }, - (_, PlainAccount) => ExistenceRequirement::KeepAlive, - }; + /// Reference to the current (top) frame. + fn top_frame(&self) -> &Frame { + self.frames.last().unwrap_or(&self.first_frame) + } - T::Currency::transfer(transactor, dest, value, existence_requirement) - .map_err(|_| Error::::TransferFailed)?; + /// Mutable reference to the current (top) frame. + fn top_frame_mut(&mut self) -> &mut Frame { + self.frames.last_mut().unwrap_or(&mut self.first_frame) + } - Ok(()) -} + /// Iterator over all frames. + /// + /// The iterator starts with the top frame and ends with the root frame. + fn frames(&self) -> impl Iterator> { + sp_std::iter::once(&self.first_frame).chain(&self.frames).rev() + } -/// A context that is active within a call. -/// -/// This context has some invariants that must be held at all times. Specifically: -///`ctx` always points to a context of an alive contract. That implies that it has an existent -/// `self_trie_id`. -/// -/// Be advised that there are brief time spans where these invariants could be invalidated. -/// For example, when a contract requests self-termination the contract is removed eagerly. That -/// implies that the control won't be returned to the contract anymore, but there is still some code -/// on the path of the return from that call context. Therefore, care must be taken in these -/// situations. -struct CallContext<'a, 'b: 'a, T: Trait + 'b, V: Vm + 'b, L: Loader> { - ctx: &'a mut ExecutionContext<'b, T, V, L>, - caller: T::AccountId, - value_transferred: BalanceOf, - timestamp: MomentOf, - block_number: T::BlockNumber, + /// Same as `frames` but with a mutable reference as iterator item. + fn frames_mut(&mut self) -> impl Iterator> { + sp_std::iter::once(&mut self.first_frame).chain(&mut self.frames).rev() + } + + /// Returns whether the current contract is on the stack multiple times. + fn is_recursive(&self) -> bool { + let account_id = &self.top_frame().account_id; + self.frames().skip(1).any(|f| &f.account_id == account_id) + } + + /// Returns whether the specified contract allows to be reentered right now. + fn allows_reentry(&self, id: &AccountIdOf) -> bool { + !self.frames().any(|f| &f.account_id == id && !f.allows_reentry) + } + + /// Increments the cached account id and returns the value to be used for the trie_id. + fn next_trie_seed(&mut self) -> u64 { + let next = if let Some(current) = self.account_counter { + current + 1 + } else { + Self::initial_trie_seed() + }; + self.account_counter = Some(next); + next + } + + /// The account seed to be used to instantiate the account counter cache. + fn initial_trie_seed() -> u64 { + >::get().wrapping_add(1) + } } -impl<'a, 'b: 'a, T, E, V, L> Ext for CallContext<'a, 'b, T, V, L> +impl<'a, T, E> Ext for Stack<'a, T, E> where - T: Trait + 'b, - V: Vm, - L: Loader, + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, + E: Executable, { type T = T; - fn get_storage(&self, key: &StorageKey) -> Option> { - let trie_id = self.ctx.self_trie_id.as_ref().expect( - "`ctx.self_trie_id` points to an alive contract within the `CallContext`;\ - it cannot be `None`;\ - expect can't fail;\ - qed", - ); - storage::read_contract_storage(trie_id, key) - } + fn call( + &mut self, + gas_limit: Weight, + to: T::AccountId, + value: BalanceOf, + input_data: Vec, + allows_reentry: bool, + ) -> Result { + // Before pushing the new frame: Protect the caller contract against reentrancy attacks. + // It is important to do this before calling `allows_reentry` so that a direct recursion + // is caught by it. + self.top_frame_mut().allows_reentry = allows_reentry; + + let try_call = || { + if !self.allows_reentry(&to) { + return Err(>::ReentranceDenied.into()) + } + // We ignore instantiate frames in our search for a cached contract. + // Otherwise it would be possible to recursively call a contract from its own + // constructor: We disallow calling not fully constructed contracts. + let cached_info = self + .frames() + .find(|f| f.entry_point == ExportedFunction::Call && f.account_id == to) + .and_then(|f| match &f.contract_info { + CachedContract::Cached(contract) => Some(contract.clone()), + _ => None, + }); + let executable = + self.push_frame(FrameArgs::Call { dest: to, cached_info }, value, gas_limit)?; + self.run(executable, input_data) + }; - fn set_storage(&mut self, key: StorageKey, value: Option>) { - let trie_id = self.ctx.self_trie_id.as_ref().expect( - "`ctx.self_trie_id` points to an alive contract within the `CallContext`;\ - it cannot be `None`;\ - expect can't fail;\ - qed", - ); - if let Err(storage::ContractAbsentError) = - storage::write_contract_storage::(&self.ctx.self_account, trie_id, &key, value) - { - panic!( - "the contract must be in the alive state within the `CallContext`;\ - the contract cannot be absent in storage; - write_contract_storage cannot return `None`; - qed" - ); - } + // We need to make sure to reset `allows_reentry` even on failure. + let result = try_call(); + + // Protection is on a per call basis. + self.top_frame_mut().allows_reentry = true; + + result } fn instantiate( &mut self, - code_hash: &CodeHash, + gas_limit: Weight, + code_hash: CodeHash, endowment: BalanceOf, - gas_meter: &mut GasMeter, input_data: Vec, + salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { - self.ctx.instantiate(endowment, gas_meter, code_hash, input_data) - } - - fn transfer( - &mut self, - to: &T::AccountId, - value: BalanceOf, - ) -> Result<(), DispatchError> { - transfer( - TransferCause::Call, - TransactorKind::Contract, - &self.ctx.self_account.clone(), - to, - value, - self.ctx, - ) + let executable = E::from_storage(code_hash, &self.schedule, self.gas_meter())?; + let trie_seed = self.next_trie_seed(); + let executable = self.push_frame( + FrameArgs::Instantiate { + sender: self.top_frame().account_id.clone(), + trie_seed, + executable, + salt, + }, + endowment, + gas_limit, + )?; + let account_id = self.top_frame().account_id.clone(); + self.run(executable, input_data).map(|ret| (account_id, ret)) } - fn terminate( - &mut self, - beneficiary: &AccountIdOf, - ) -> Result<(), DispatchError> { - let self_id = self.ctx.self_account.clone(); - let value = T::Currency::free_balance(&self_id); - if let Some(caller_ctx) = self.ctx.caller { - if caller_ctx.is_live(&self_id) { - return Err(DispatchError::Other( - "Cannot terminate a contract that is present on the call stack", - )); - } + fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError> { + if self.is_recursive() { + return Err(Error::::TerminatedWhileReentrant.into()) } - transfer( - TransferCause::Terminate, - TransactorKind::Contract, - &self_id, + let frame = self.top_frame_mut(); + let info = frame.terminate(); + Storage::::queue_trie_for_deletion(&info)?; + >::transfer( + true, + true, + &frame.account_id, beneficiary, - value, - self.ctx, + T::Currency::free_balance(&frame.account_id), )?; - let self_trie_id = self.ctx.self_trie_id.as_ref().expect( - "this function is only invoked by in the context of a contract;\ - a contract has a trie id;\ - this can't be None; qed", - ); - storage::destroy_contract::(&self_id, self_trie_id); + ContractInfoOf::::remove(&frame.account_id); + E::remove_user(info.code_hash, &mut frame.nested_meter)?; + Contracts::::deposit_event(Event::Terminated( + frame.account_id.clone(), + beneficiary.clone(), + )); Ok(()) } - fn call( - &mut self, - to: &T::AccountId, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> ExecResult { - self.ctx.call(to.clone(), value, gas_meter, input_data) + fn transfer(&mut self, to: &T::AccountId, value: BalanceOf) -> DispatchResult { + Self::transfer(true, false, &self.top_frame().account_id, to, value) } - fn restore_to( - &mut self, - dest: AccountIdOf, - code_hash: CodeHash, - rent_allowance: BalanceOf, - delta: Vec, - ) -> Result<(), &'static str> { - if let Some(caller_ctx) = self.ctx.caller { - if caller_ctx.is_live(&self.ctx.self_account) { - return Err( - "Cannot perform restoration of a contract that is present on the call stack", - ); - } - } + fn get_storage(&mut self, key: &StorageKey) -> Option> { + Storage::::read(&self.top_frame_mut().contract_info().trie_id, key) + } - let result = crate::rent::restore_to::( - self.ctx.self_account.clone(), - dest.clone(), - code_hash.clone(), - rent_allowance, - delta, - ); - if let Ok(_) = result { - deposit_event::( - vec![], - RawEvent::Restored( - self.ctx.self_account.clone(), - dest, - code_hash, - rent_allowance, - ), - ); - } - result + fn set_storage(&mut self, key: StorageKey, value: Option>) -> DispatchResult { + let frame = self.top_frame_mut(); + Storage::::write(frame.contract_info(), &key, value) } fn address(&self) -> &T::AccountId { - &self.ctx.self_account + &self.top_frame().account_id } fn caller(&self) -> &T::AccountId { - &self.caller + self.frames().nth(1).map(|f| &f.account_id).unwrap_or(&self.origin) } fn balance(&self) -> BalanceOf { - T::Currency::free_balance(&self.ctx.self_account) + T::Currency::free_balance(&self.top_frame().account_id) } fn value_transferred(&self) -> BalanceOf { - self.value_transferred + self.top_frame().value_transferred } - fn random(&self, subject: &[u8]) -> SeedOf { + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf) { T::Randomness::random(subject) } @@ -709,90 +987,129 @@ where } fn minimum_balance(&self) -> BalanceOf { - self.ctx.config.existential_deposit + T::Currency::minimum_balance() } - fn tombstone_deposit(&self) -> BalanceOf { - self.ctx.config.tombstone_deposit + fn contract_deposit(&self) -> BalanceOf { + T::ContractDeposit::get() } fn deposit_event(&mut self, topics: Vec, data: Vec) { deposit_event::( topics, - RawEvent::ContractExecution(self.ctx.self_account.clone(), data) + Event::ContractEmitted(self.top_frame().account_id.clone(), data), ); } - fn set_rent_allowance(&mut self, rent_allowance: BalanceOf) { - if let Err(storage::ContractAbsentError) = - storage::set_rent_allowance::(&self.ctx.self_account, rent_allowance) - { - panic!( - "`self_account` points to an alive contract within the `CallContext`; - set_rent_allowance cannot return `Err`; qed" - ); - } + fn block_number(&self) -> T::BlockNumber { + self.block_number + } + + fn max_value_size(&self) -> u32 { + T::Schedule::get().limits.payload_len + } + + fn get_weight_price(&self, weight: Weight) -> BalanceOf { + T::WeightPrice::convert(weight) + } + + fn schedule(&self) -> &Schedule { + &self.schedule } - fn rent_allowance(&self) -> BalanceOf { - storage::rent_allowance::(&self.ctx.self_account) - .unwrap_or_else(|_| >::max_value()) // Must never be triggered actually + fn gas_meter(&mut self) -> &mut GasMeter { + &mut self.top_frame_mut().nested_meter } - fn block_number(&self) -> T::BlockNumber { self.block_number } + fn append_debug_buffer(&mut self, msg: &str) -> bool { + if let Some(buffer) = &mut self.debug_message { + if !msg.is_empty() { + buffer.extend(msg.as_bytes()); + } + true + } else { + false + } + } - fn max_value_size(&self) -> u32 { - self.ctx.config.max_value_size + fn call_runtime(&self, call: ::Call) -> DispatchResultWithPostInfo { + let mut origin: T::Origin = RawOrigin::Signed(self.address().clone()).into(); + origin.add_filter(T::CallFilter::contains); + call.dispatch(origin) } - fn get_weight_price(&self, weight: Weight) -> BalanceOf { - T::WeightPrice::convert(weight) + fn ecdsa_recover(&self, signature: &[u8; 65], message_hash: &[u8; 32]) -> Result<[u8; 33], ()> { + secp256k1_ecdsa_recover_compressed(&signature, &message_hash).map_err(|_| ()) } } -fn deposit_event( - topics: Vec, - event: Event, -) { - >::deposit_event_indexed( +fn deposit_event(topics: Vec, event: Event) { + >::deposit_event_indexed( &*topics, - ::Event::from(event).into(), + ::Event::from(event).into(), ) } +mod sealing { + use super::*; + + pub trait Sealed {} + + impl<'a, T: Config, E> Sealed for Stack<'a, T, E> {} + + #[cfg(test)] + impl Sealed for crate::wasm::MockExt {} + + #[cfg(test)] + impl Sealed for &mut crate::wasm::MockExt {} +} + /// These tests exercise the executive layer. /// -/// In these tests the VM/loader are mocked. Instead of dealing with wasm bytecode they use simple closures. -/// This allows you to tackle executive logic more thoroughly without writing a +/// In these tests the VM/loader are mocked. Instead of dealing with wasm bytecode they use simple +/// closures. This allows you to tackle executive logic more thoroughly without writing a /// wasm VM code. #[cfg(test)] mod tests { - use super::{ - BalanceOf, Event, ExecResult, ExecutionContext, Ext, Loader, - RawEvent, Vm, ReturnFlags, ExecError, ErrorOrigin - }; + use super::*; use crate::{ - gas::GasMeter, tests::{ExtBuilder, Test, MetaEvent}, - exec::ExecReturnValue, CodeHash, Config, - gas::Gas, - storage, Error + exec::ExportedFunction::*, + gas::GasMeter, + storage::Storage, + tests::{ + test_utils::{get_balance, place_contract, set_balance}, + Call, Event as MetaEvent, ExtBuilder, Test, TestFilter, ALICE, BOB, CHARLIE, + }, + Error, Weight, }; - use crate::tests::test_utils::{place_contract, set_balance, get_balance}; - use sp_runtime::DispatchError; use assert_matches::assert_matches; - use std::{cell::RefCell, collections::HashMap, marker::PhantomData, rc::Rc}; + use codec::{Decode, Encode}; + use frame_support::{assert_err, assert_ok}; + use frame_system::{EventRecord, Phase}; + use pallet_contracts_primitives::ReturnFlags; + use pretty_assertions::assert_eq; + use sp_core::Bytes; + use sp_runtime::{ + traits::{BadOrigin, Hash}, + DispatchError, + }; + use std::{cell::RefCell, collections::HashMap, rc::Rc}; + + type System = frame_system::Pallet; - const ALICE: u64 = 1; - const BOB: u64 = 2; - const CHARLIE: u64 = 3; + type MockStack<'a> = Stack<'a, Test, MockExecutable>; - const GAS_LIMIT: Gas = 10_000_000_000; + const GAS_LIMIT: Weight = 10_000_000_000; + + thread_local! { + static LOADER: RefCell = RefCell::new(MockLoader::default()); + } fn events() -> Vec> { - >::events() + System::events() .into_iter() .filter_map(|meta| match meta.event { - MetaEvent::contracts(contract_event) => Some(contract_event), + MetaEvent::Contracts(contract_event) => Some(contract_event), _ => None, }) .collect() @@ -801,118 +1118,174 @@ mod tests { struct MockCtx<'a> { ext: &'a mut dyn Ext, input_data: Vec, - gas_meter: &'a mut GasMeter, } #[derive(Clone)] - struct MockExecutable<'a>(Rc ExecResult + 'a>); - - impl<'a> MockExecutable<'a> { - fn new(f: impl Fn(MockCtx) -> ExecResult + 'a) -> Self { - MockExecutable(Rc::new(f)) - } + struct MockExecutable { + func: Rc ExecResult + 'static>, + func_type: ExportedFunction, + code_hash: CodeHash, + refcount: u64, } - struct MockLoader<'a> { - map: HashMap, MockExecutable<'a>>, + #[derive(Default)] + struct MockLoader { + map: HashMap, MockExecutable>, counter: u64, } - impl<'a> MockLoader<'a> { - fn empty() -> Self { - MockLoader { - map: HashMap::new(), - counter: 0, - } + impl MockLoader { + fn insert( + func_type: ExportedFunction, + f: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static, + ) -> CodeHash { + LOADER.with(|loader| { + let mut loader = loader.borrow_mut(); + // Generate code hashes as monotonically increasing values. + let hash = ::Hash::from_low_u64_be(loader.counter); + loader.counter += 1; + loader.map.insert( + hash, + MockExecutable { + func: Rc::new(f), + func_type, + code_hash: hash.clone(), + refcount: 1, + }, + ); + hash + }) } - fn insert(&mut self, f: impl Fn(MockCtx) -> ExecResult + 'a) -> CodeHash { - // Generate code hashes as monotonically increasing values. - let code_hash = ::Hash::from_low_u64_be(self.counter); - - self.counter += 1; - self.map.insert(code_hash, MockExecutable::new(f)); - code_hash + fn increment_refcount(code_hash: CodeHash) { + LOADER.with(|loader| { + let mut loader = loader.borrow_mut(); + loader + .map + .entry(code_hash) + .and_modify(|executable| executable.refcount += 1) + .or_insert_with(|| panic!("code_hash does not exist")); + }); } - } - struct MockVm<'a> { - _marker: PhantomData<&'a ()>, - } - - impl<'a> MockVm<'a> { - fn new() -> Self { - MockVm { _marker: PhantomData } + fn decrement_refcount(code_hash: CodeHash) { + use std::collections::hash_map::Entry::Occupied; + LOADER.with(|loader| { + let mut loader = loader.borrow_mut(); + let mut entry = match loader.map.entry(code_hash) { + Occupied(e) => e, + _ => panic!("code_hash does not exist"), + }; + let refcount = &mut entry.get_mut().refcount; + *refcount -= 1; + if *refcount == 0 { + entry.remove(); + } + }); } } - impl<'a> Loader for MockLoader<'a> { - type Executable = MockExecutable<'a>; + impl Executable for MockExecutable { + fn from_storage( + code_hash: CodeHash, + _schedule: &Schedule, + _gas_meter: &mut GasMeter, + ) -> Result { + Self::from_storage_noinstr(code_hash) + } - fn load_init(&self, code_hash: &CodeHash) -> Result { - self.map - .get(code_hash) - .cloned() - .ok_or_else(|| "code not found") + fn from_storage_noinstr(code_hash: CodeHash) -> Result { + LOADER.with(|loader| { + loader + .borrow_mut() + .map + .get(&code_hash) + .cloned() + .ok_or(Error::::CodeNotFound.into()) + }) } - fn load_main(&self, code_hash: &CodeHash) -> Result { - self.map - .get(code_hash) - .cloned() - .ok_or_else(|| "code not found") + + fn add_user( + code_hash: CodeHash, + _: &mut GasMeter, + ) -> Result<(), DispatchError> { + MockLoader::increment_refcount(code_hash); + Ok(()) } - } - impl<'a> Vm for MockVm<'a> { - type Executable = MockExecutable<'a>; + fn remove_user( + code_hash: CodeHash, + _: &mut GasMeter, + ) -> Result<(), DispatchError> { + MockLoader::decrement_refcount(code_hash); + Ok(()) + } fn execute>( - &self, - exec: &MockExecutable, - mut ext: E, + self, + ext: &mut E, + function: &ExportedFunction, input_data: Vec, - gas_meter: &mut GasMeter, ) -> ExecResult { - (exec.0)(MockCtx { - ext: &mut ext, - input_data, - gas_meter, - }) + if let &Constructor = function { + MockLoader::increment_refcount(self.code_hash); + } + if function == &self.func_type { + (self.func)(MockCtx { ext, input_data }, &self) + } else { + exec_success() + } + } + + fn code_hash(&self) -> &CodeHash { + &self.code_hash + } + + fn code_len(&self) -> u32 { + 0 + } + + fn aggregate_code_len(&self) -> u32 { + 0 + } + + fn refcount(&self) -> u32 { + self.refcount as u32 } } fn exec_success() -> ExecResult { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + } + + fn exec_trapped() -> ExecResult { + Err(ExecError { error: >::ContractTrapped.into(), origin: ErrorOrigin::Callee }) } #[test] fn it_works() { + thread_local! { + static TEST_DATA: RefCell> = RefCell::new(vec![0]); + } + let value = Default::default(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let data = vec![]; - - let vm = MockVm::new(); - - let test_data = Rc::new(RefCell::new(vec![0usize])); - - let mut loader = MockLoader::empty(); - let exec_ch = loader.insert(|_ctx| { - test_data.borrow_mut().push(1); + let exec_ch = MockLoader::insert(Call, |_ctx, _executable| { + TEST_DATA.with(|data| data.borrow_mut().push(1)); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); place_contract(&BOB, exec_ch); assert_matches!( - ctx.call(BOB, value, &mut gas_meter, data), + MockStack::run_call(ALICE, BOB, &mut gas_meter, &schedule, value, vec![], None,), Ok(_) ); }); - assert_eq!(&*test_data.borrow(), &vec![0, 1]); + TEST_DATA.with(|data| assert_eq!(*data.borrow(), vec![0, 1])); } #[test] @@ -922,23 +1295,11 @@ mod tests { let origin = ALICE; let dest = BOB; - let vm = MockVm::new(); - let loader = MockLoader::empty(); - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); set_balance(&origin, 100); set_balance(&dest, 0); - super::transfer( - super::TransferCause::Call, - super::TransactorKind::PlainAccount, - &origin, - &dest, - 55, - &mut ctx, - ).unwrap(); + MockStack::transfer(true, false, &origin, &dest, 55).unwrap(); assert_eq!(get_balance(&origin), 45); assert_eq!(get_balance(&dest), 55); @@ -952,29 +1313,30 @@ mod tests { let origin = ALICE; let dest = BOB; - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert( - |_| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() }) - ); + let return_ch = MockLoader::insert(Call, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(Vec::new()) }) + }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - place_contract(&BOB, return_ch); + let schedule = ::Schedule::get(); + place_contract(&dest, return_ch); set_balance(&origin, 100); - set_balance(&dest, 0); + let balance = get_balance(&dest); - let output = ctx.call( - dest, - 55, + let output = MockStack::run_call( + origin.clone(), + dest.clone(), &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 55, vec![], - ).unwrap(); + None, + ) + .unwrap(); assert!(!output.is_success()); assert_eq!(get_balance(&origin), 100); - assert_eq!(get_balance(&dest), 0); + assert_eq!(get_balance(&dest), balance); }); } @@ -985,27 +1347,12 @@ mod tests { let origin = ALICE; let dest = BOB; - let vm = MockVm::new(); - let loader = MockLoader::empty(); - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); set_balance(&origin, 0); - let result = super::transfer( - super::TransferCause::Call, - super::TransactorKind::PlainAccount, - &origin, - &dest, - 100, - &mut ctx, - ); + let result = MockStack::transfer(false, false, &origin, &dest, 100); - assert_eq!( - result, - Err(Error::::TransferFailed.into()) - ); + assert_eq!(result, Err(Error::::TransferFailed.into())); assert_eq!(get_balance(&origin), 0); assert_eq!(get_balance(&dest), 0); }); @@ -1017,28 +1364,27 @@ mod tests { // is returned from the execution context. let origin = ALICE; let dest = BOB; - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert( - |_| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }) - ); + let return_ch = MockLoader::insert(Call, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) }) + }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); place_contract(&BOB, return_ch); - let result = ctx.call( + let result = MockStack::run_call( + origin, dest, - 0, &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, vec![], + None, ); let output = result.unwrap(); assert!(output.is_success()); - assert_eq!(output.data, vec![1, 2, 3, 4]); + assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); }); } @@ -1048,51 +1394,50 @@ mod tests { // is returned from the execution context. let origin = ALICE; let dest = BOB; - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert( - |_| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] }) - ); + let return_ch = MockLoader::insert(Call, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![1, 2, 3, 4]) }) + }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); place_contract(&BOB, return_ch); - let result = ctx.call( + let result = MockStack::run_call( + origin, dest, - 0, &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, vec![], + None, ); let output = result.unwrap(); assert!(!output.is_success()); - assert_eq!(output.data, vec![1, 2, 3, 4]); + assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); }); } #[test] fn input_data_to_call() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let input_data_ch = loader.insert(|ctx| { + let input_data_ch = MockLoader::insert(Call, |ctx, _| { assert_eq!(ctx.input_data, &[1, 2, 3, 4]); exec_success() }); // This one tests passing the input data into a contract via call. ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); place_contract(&BOB, input_data_ch); - let result = ctx.call( + let result = MockStack::run_call( + ALICE, BOB, - 0, &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, vec![1, 2, 3, 4], + None, ); assert_matches!(result, Ok(_)); }); @@ -1100,25 +1445,30 @@ mod tests { #[test] fn input_data_to_instantiate() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let input_data_ch = loader.insert(|ctx| { + let input_data_ch = MockLoader::insert(Constructor, |ctx, _| { assert_eq!(ctx.input_data, &[1, 2, 3, 4]); exec_success() }); // This one tests passing the input data into a contract via instantiate. ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - - set_balance(&ALICE, 100); - - let result = ctx.instantiate( - cfg.subsistence_threshold(), - &mut GasMeter::::new(GAS_LIMIT), - &input_data_ch, + let schedule = ::Schedule::get(); + let subsistence = Contracts::::subsistence_threshold(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = + MockExecutable::from_storage(input_data_ch, &schedule, &mut gas_meter).unwrap(); + + set_balance(&ALICE, subsistence * 10); + + let result = MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &schedule, + subsistence * 3, vec![1, 2, 3, 4], + &[], + None, ); assert_matches!(result, Ok(_)); }); @@ -1128,43 +1478,43 @@ mod tests { fn max_depth() { // This test verifies that when we reach the maximal depth creation of an // yet another context fails. + thread_local! { + static REACHED_BOTTOM: RefCell = RefCell::new(false); + } let value = Default::default(); - let reached_bottom = RefCell::new(false); - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let recurse_ch = loader.insert(|ctx| { + let recurse_ch = MockLoader::insert(Call, |ctx, _| { // Try to call into yourself. - let r = ctx.ext.call(&BOB, 0, ctx.gas_meter, vec![]); - - let mut reached_bottom = reached_bottom.borrow_mut(); - if !*reached_bottom { - // We are first time here, it means we just reached bottom. - // Verify that we've got proper error and set `reached_bottom`. - assert_eq!( - r, - Err(Error::::MaxCallDepthReached.into()) - ); - *reached_bottom = true; - } else { - // We just unwinding stack here. - assert_matches!(r, Ok(_)); - } + let r = ctx.ext.call(0, BOB, 0, vec![], true); + + REACHED_BOTTOM.with(|reached_bottom| { + let mut reached_bottom = reached_bottom.borrow_mut(); + if !*reached_bottom { + // We are first time here, it means we just reached bottom. + // Verify that we've got proper error and set `reached_bottom`. + assert_eq!(r, Err(Error::::MaxCallDepthReached.into())); + *reached_bottom = true; + } else { + // We just unwinding stack here. + assert_matches!(r, Ok(_)); + } + }); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); set_balance(&BOB, 1); place_contract(&BOB, recurse_ch); - let result = ctx.call( + let result = MockStack::run_call( + ALICE, BOB, - value, &mut GasMeter::::new(GAS_LIMIT), + &schedule, + value, vec![], + None, ); assert_matches!(result, Ok(_)); @@ -1176,82 +1526,77 @@ mod tests { let origin = ALICE; let dest = BOB; - let vm = MockVm::new(); - - let witnessed_caller_bob = RefCell::new(None::); - let witnessed_caller_charlie = RefCell::new(None::); + thread_local! { + static WITNESSED_CALLER_BOB: RefCell>> = RefCell::new(None); + static WITNESSED_CALLER_CHARLIE: RefCell>> = RefCell::new(None); + } - let mut loader = MockLoader::empty(); - let bob_ch = loader.insert(|ctx| { + let bob_ch = MockLoader::insert(Call, |ctx, _| { // Record the caller for bob. - *witnessed_caller_bob.borrow_mut() = Some(*ctx.ext.caller()); + WITNESSED_CALLER_BOB + .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); // Call into CHARLIE contract. - assert_matches!( - ctx.ext.call(&CHARLIE, 0, ctx.gas_meter, vec![]), - Ok(_) - ); + assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); - let charlie_ch = loader.insert(|ctx| { + let charlie_ch = MockLoader::insert(Call, |ctx, _| { // Record the caller for charlie. - *witnessed_caller_charlie.borrow_mut() = Some(*ctx.ext.caller()); + WITNESSED_CALLER_CHARLIE + .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); place_contract(&dest, bob_ch); place_contract(&CHARLIE, charlie_ch); - let result = ctx.call( - dest, - 0, + let result = MockStack::run_call( + origin.clone(), + dest.clone(), &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, vec![], + None, ); assert_matches!(result, Ok(_)); }); - assert_eq!(&*witnessed_caller_bob.borrow(), &Some(origin)); - assert_eq!(&*witnessed_caller_charlie.borrow(), &Some(dest)); + WITNESSED_CALLER_BOB.with(|caller| assert_eq!(*caller.borrow(), Some(origin))); + WITNESSED_CALLER_CHARLIE.with(|caller| assert_eq!(*caller.borrow(), Some(dest))); } #[test] fn address_returns_proper_values() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let bob_ch = loader.insert(|ctx| { + let bob_ch = MockLoader::insert(Call, |ctx, _| { // Verify that address matches BOB. assert_eq!(*ctx.ext.address(), BOB); // Call into charlie contract. - assert_matches!( - ctx.ext.call(&CHARLIE, 0, ctx.gas_meter, vec![]), - Ok(_) - ); + assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); - let charlie_ch = loader.insert(|ctx| { + let charlie_ch = MockLoader::insert(Call, |ctx, _| { assert_eq!(*ctx.ext.address(), CHARLIE); exec_success() }); ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); place_contract(&BOB, bob_ch); place_contract(&CHARLIE, charlie_ch); - let result = ctx.call( + let result = MockStack::run_call( + ALICE, BOB, - 0, &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, vec![], + None, ); assert_matches!(result, Ok(_)); @@ -1260,21 +1605,24 @@ mod tests { #[test] fn refuse_instantiate_with_value_below_existential_deposit() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| exec_success()); + let dummy_ch = MockLoader::insert(Constructor, |_, _| exec_success()); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = + MockExecutable::from_storage(dummy_ch, &schedule, &mut gas_meter).unwrap(); assert_matches!( - ctx.instantiate( + MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &schedule, 0, // <- zero endowment - &mut GasMeter::::new(GAS_LIMIT), - &dummy_ch, vec![], + &[], + None, ), Err(_) ); @@ -1283,85 +1631,93 @@ mod tests { #[test] fn instantiation_work_with_success_output() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert( - |_| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] }) - ); + let dummy_ch = MockLoader::insert(Constructor, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![80, 65, 83, 83]) }) + }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = + MockExecutable::from_storage(dummy_ch, &schedule, &mut gas_meter).unwrap(); set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( - ctx.instantiate( + MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &schedule, 100, - &mut GasMeter::::new(GAS_LIMIT), - &dummy_ch, vec![], + &[], + None, ), - Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address + Ok((address, ref output)) if output.data == Bytes(vec![80, 65, 83, 83]) => address ); // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(storage::code_hash::(&instantiated_contract_address).unwrap(), dummy_ch); - assert_eq!(&events(), &[ - RawEvent::Instantiated(ALICE, instantiated_contract_address) - ]); + assert_eq!( + Storage::::code_hash(&instantiated_contract_address).unwrap(), + dummy_ch + ); + assert_eq!(&events(), &[Event::Instantiated(ALICE, instantiated_contract_address)]); }); } #[test] fn instantiation_fails_with_failing_output() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert( - |_| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] }) - ); + let dummy_ch = MockLoader::insert(Constructor, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![70, 65, 73, 76]) }) + }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = + MockExecutable::from_storage(dummy_ch, &schedule, &mut gas_meter).unwrap(); set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( - ctx.instantiate( + MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &schedule, 100, - &mut GasMeter::::new(GAS_LIMIT), - &dummy_ch, vec![], + &[], + None, ), - Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address + Ok((address, ref output)) if output.data == Bytes(vec![70, 65, 73, 76]) => address ); // Check that the account has not been created. - assert!(storage::code_hash::(&instantiated_contract_address).is_err()); + assert!(Storage::::code_hash(&instantiated_contract_address).is_none()); assert!(events().is_empty()); }); } #[test] fn instantiation_from_contract() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| exec_success()); - let instantiated_contract_address = Rc::new(RefCell::new(None::)); - let instantiator_ch = loader.insert({ + let dummy_ch = MockLoader::insert(Call, |_, _| exec_success()); + let instantiated_contract_address = Rc::new(RefCell::new(None::>)); + let instantiator_ch = MockLoader::insert(Call, { let dummy_ch = dummy_ch.clone(); let instantiated_contract_address = Rc::clone(&instantiated_contract_address); - move |ctx| { + move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. - let (address, output) = ctx.ext.instantiate( - &dummy_ch, - Config::::subsistence_threshold_uncached(), - ctx.gas_meter, - vec![] - ).unwrap(); + let (address, output) = ctx + .ext + .instantiate( + 0, + dummy_ch, + Contracts::::subsistence_threshold() * 3, + vec![], + &[48, 49, 50], + ) + .unwrap(); *instantiated_contract_address.borrow_mut() = address.into(); Ok(output) @@ -1369,46 +1725,50 @@ mod tests { }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - set_balance(&ALICE, 1000); - set_balance(&BOB, 100); + let schedule = ::Schedule::get(); + set_balance(&ALICE, Contracts::::subsistence_threshold() * 100); place_contract(&BOB, instantiator_ch); assert_matches!( - ctx.call(BOB, 20, &mut GasMeter::::new(GAS_LIMIT), vec![]), + MockStack::run_call( + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 20, + vec![], + None, + ), Ok(_) ); - let instantiated_contract_address = instantiated_contract_address.borrow().as_ref().unwrap().clone(); + let instantiated_contract_address = + instantiated_contract_address.borrow().as_ref().unwrap().clone(); // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(storage::code_hash::(&instantiated_contract_address).unwrap(), dummy_ch); - assert_eq!(&events(), &[ - RawEvent::Instantiated(BOB, instantiated_contract_address) - ]); + assert_eq!( + Storage::::code_hash(&instantiated_contract_address).unwrap(), + dummy_ch + ); + assert_eq!(&events(), &[Event::Instantiated(BOB, instantiated_contract_address)]); }); } #[test] fn instantiation_traps() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert( - |_| Err("It's a trap!".into()) - ); - let instantiator_ch = loader.insert({ + let dummy_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into())); + let instantiator_ch = MockLoader::insert(Call, { let dummy_ch = dummy_ch.clone(); - move |ctx| { + move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. assert_matches!( ctx.ext.instantiate( - &dummy_ch, - 15u64, - ctx.gas_meter, - vec![] + 0, + dummy_ch, + Contracts::::subsistence_threshold(), + vec![], + &[], ), Err(ExecError { error: DispatchError::Other("It's a trap!"), @@ -1421,14 +1781,21 @@ mod tests { }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + let schedule = ::Schedule::get(); set_balance(&ALICE, 1000); set_balance(&BOB, 100); place_contract(&BOB, instantiator_ch); assert_matches!( - ctx.call(BOB, 20, &mut GasMeter::::new(GAS_LIMIT), vec![]), + MockStack::run_call( + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 20, + vec![], + None, + ), Ok(_) ); @@ -1440,63 +1807,353 @@ mod tests { #[test] fn termination_from_instantiate_fails() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - - let terminate_ch = loader.insert(|ctx| { + let terminate_ch = MockLoader::insert(Constructor, |ctx, _| { ctx.ext.terminate(&ALICE).unwrap(); exec_success() }); - ExtBuilder::default() - .existential_deposit(15) - .build() - .execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - set_balance(&ALICE, 1000); - - assert_eq!( - ctx.instantiate( - 100, - &mut GasMeter::::new(GAS_LIMIT), - &terminate_ch, - vec![], - ), - Err(Error::::NewContractNotFunded.into()) - ); + ExtBuilder::default().existential_deposit(15).build().execute_with(|| { + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = + MockExecutable::from_storage(terminate_ch, &schedule, &mut gas_meter).unwrap(); + set_balance(&ALICE, 1000); - assert_eq!( - &events(), - &[] - ); - }); + assert_eq!( + MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &schedule, + 100, + vec![], + &[], + None, + ), + Err(Error::::TerminatedInConstructor.into()) + ); + + assert_eq!(&events(), &[]); + }); } #[test] - fn rent_allowance() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let rent_allowance_ch = loader.insert(|ctx| { - assert_eq!(ctx.ext.rent_allowance(), >::max_value()); - ctx.ext.set_rent_allowance(10); - assert_eq!(ctx.ext.rent_allowance(), 10); + fn in_memory_changes_not_discarded() { + // Remove this assert and fill out the "DO" stubs once fields are added to the + // contract info that can be modified during exection. + assert!(!CONTRACT_INFO_CAN_CHANGE); + + // Call stack: BOB -> CHARLIE (trap) -> BOB' (success) + // This tests verfies some edge case of the contract info cache: + // We change some value in our contract info before calling into a contract + // that calls into ourself. This triggers a case where BOBs contract info + // is written to storage and invalidated by the successful execution of BOB'. + // The trap of CHARLIE reverts the storage changes to BOB. When the root BOB regains + // control it reloads its contract info from storage. We check that changes that + // are made before calling into CHARLIE are not discarded. + let code_bob = MockLoader::insert(Call, |ctx, _| { + if ctx.input_data[0] == 0 { + // DO: modify medata (ContractInfo) of own contract through ctx.ext functions + assert_eq!(ctx.ext.call(0, CHARLIE, 0, vec![], true), exec_trapped()); + // DO: check that the value is not discarded (query via ctx.ext) + } exec_success() }); + let code_charlie = MockLoader::insert(Call, |ctx, _| { + assert!(ctx.ext.call(0, BOB, 0, vec![99], true).is_ok()); + exec_trapped() + }); + // This one tests passing the input data into a contract via call. ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - set_balance(&ALICE, 100); + let schedule = ::Schedule::get(); + place_contract(&BOB, code_bob); + place_contract(&CHARLIE, code_charlie); - let result = ctx.instantiate( - cfg.subsistence_threshold(), + let result = MockStack::run_call( + ALICE, + BOB, &mut GasMeter::::new(GAS_LIMIT), - &rent_allowance_ch, + &schedule, + 0, + vec![0], + None, + ); + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn recursive_call_during_constructor_fails() { + let code = MockLoader::insert(Constructor, |ctx, _| { + assert_matches!( + ctx.ext.call(0, ctx.ext.address().clone(), 0, vec![], true), + Err(ExecError{error, ..}) if error == >::ContractNotFound.into() + ); + exec_success() + }); + + // This one tests passing the input data into a contract via instantiate. + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + let subsistence = Contracts::::subsistence_threshold(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage(code, &schedule, &mut gas_meter).unwrap(); + + set_balance(&ALICE, subsistence * 10); + + let result = MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &schedule, + subsistence * 3, vec![], + &[], + None, ); assert_matches!(result, Ok(_)); }); } + + #[test] + fn printing_works() { + let code_hash = MockLoader::insert(Call, |ctx, _| { + ctx.ext.append_debug_buffer("This is a test"); + ctx.ext.append_debug_buffer("More text"); + exec_success() + }); + + let mut debug_buffer = Vec::new(); + + ExtBuilder::default().build().execute_with(|| { + let subsistence = Contracts::::subsistence_threshold(); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, subsistence * 10); + place_contract(&BOB, code_hash); + MockStack::run_call( + ALICE, + BOB, + &mut gas_meter, + &schedule, + 0, + vec![], + Some(&mut debug_buffer), + ) + .unwrap(); + }); + + assert_eq!(&String::from_utf8(debug_buffer).unwrap(), "This is a testMore text"); + } + + #[test] + fn printing_works_on_fail() { + let code_hash = MockLoader::insert(Call, |ctx, _| { + ctx.ext.append_debug_buffer("This is a test"); + ctx.ext.append_debug_buffer("More text"); + exec_trapped() + }); + + let mut debug_buffer = Vec::new(); + + ExtBuilder::default().build().execute_with(|| { + let subsistence = Contracts::::subsistence_threshold(); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, subsistence * 10); + place_contract(&BOB, code_hash); + let result = MockStack::run_call( + ALICE, + BOB, + &mut gas_meter, + &schedule, + 0, + vec![], + Some(&mut debug_buffer), + ); + assert!(result.is_err()); + }); + + assert_eq!(&String::from_utf8(debug_buffer).unwrap(), "This is a testMore text"); + } + + #[test] + fn call_reentry_direct_recursion() { + // call the contract passed as input with disabled reentry + let code_bob = MockLoader::insert(Call, |ctx, _| { + let dest = Decode::decode(&mut ctx.input_data.as_ref()).unwrap(); + ctx.ext.call(0, dest, 0, vec![], false) + }); + + let code_charlie = MockLoader::insert(Call, |_, _| exec_success()); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&BOB, code_bob); + place_contract(&CHARLIE, code_charlie); + + // Calling another contract should succeed + assert_ok!(MockStack::run_call( + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, + CHARLIE.encode(), + None, + )); + + // Calling into oneself fails + assert_err!( + MockStack::run_call( + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, + BOB.encode(), + None, + ) + .map_err(|e| e.error), + >::ReentranceDenied, + ); + }); + } + + #[test] + fn call_deny_reentry() { + let code_bob = MockLoader::insert(Call, |ctx, _| { + if ctx.input_data[0] == 0 { + ctx.ext.call(0, CHARLIE, 0, vec![], false) + } else { + exec_success() + } + }); + + // call BOB with input set to '1' + let code_charlie = + MockLoader::insert(Call, |ctx, _| ctx.ext.call(0, BOB, 0, vec![1], true)); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + place_contract(&BOB, code_bob); + place_contract(&CHARLIE, code_charlie); + + // BOB -> CHARLIE -> BOB fails as BOB denies reentry. + assert_err!( + MockStack::run_call( + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 0, + vec![0], + None, + ) + .map_err(|e| e.error), + >::ReentranceDenied, + ); + }); + } + + #[test] + fn call_runtime_works() { + let code_hash = MockLoader::insert(Call, |ctx, _| { + let call = Call::System(frame_system::Call::remark_with_event { + remark: b"Hello World".to_vec(), + }); + ctx.ext.call_runtime(call).unwrap(); + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let subsistence = Contracts::::subsistence_threshold(); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, subsistence * 10); + place_contract(&BOB, code_hash); + System::reset_events(); + MockStack::run_call(ALICE, BOB, &mut gas_meter, &schedule, 0, vec![], None).unwrap(); + + let remark_hash = ::Hashing::hash(b"Hello World"); + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: MetaEvent::System(frame_system::Event::Remarked(BOB, remark_hash)), + topics: vec![], + },] + ); + }); + } + + #[test] + fn call_runtime_filter() { + let code_hash = MockLoader::insert(Call, |ctx, _| { + use frame_system::Call as SysCall; + use pallet_balances::Call as BalanceCall; + use pallet_utility::Call as UtilCall; + + // remark should still be allowed + let allowed_call = + Call::System(SysCall::remark_with_event { remark: b"Hello".to_vec() }); + + // transfers are disallowed by the `TestFiler` (see below) + let forbidden_call = Call::Balances(BalanceCall::transfer { dest: CHARLIE, value: 22 }); + + // simple cases: direct call + assert_err!(ctx.ext.call_runtime(forbidden_call.clone()), BadOrigin); + + // as part of a patch: return is OK (but it interrupted the batch) + assert_ok!(ctx.ext.call_runtime(Call::Utility(UtilCall::batch { + calls: vec![allowed_call.clone(), forbidden_call, allowed_call] + })),); + + // the transfer wasn't performed + assert_eq!(get_balance(&CHARLIE), 0); + + exec_success() + }); + + TestFilter::set_filter(|call| match call { + Call::Balances(pallet_balances::Call::transfer { .. }) => false, + _ => true, + }); + + ExtBuilder::default().build().execute_with(|| { + let subsistence = Contracts::::subsistence_threshold(); + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + set_balance(&ALICE, subsistence * 10); + place_contract(&BOB, code_hash); + System::reset_events(); + MockStack::run_call(ALICE, BOB, &mut gas_meter, &schedule, 0, vec![], None).unwrap(); + + let remark_hash = ::Hashing::hash(b"Hello"); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::System(frame_system::Event::Remarked(BOB, remark_hash)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::Utility(pallet_utility::Event::ItemCompleted), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::Utility(pallet_utility::Event::BatchInterrupted( + 1, + BadOrigin.into() + ),), + topics: vec![], + }, + ] + ); + }); + } } diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index decaf11b796f7..38d18c1e24c19 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -1,45 +1,41 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use crate::{Trait, exec::ExecError}; -use sp_std::marker::PhantomData; -use sp_runtime::traits::Zero; -use frame_support::dispatch::{ - DispatchResultWithPostInfo, PostDispatchInfo, DispatchErrorWithPostInfo, +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{exec::ExecError, Config, Error}; +use frame_support::{ + dispatch::{ + DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo, PostDispatchInfo, + }, + weights::Weight, + DefaultNoBound, }; +use sp_core::crypto::UncheckedFrom; +use sp_runtime::traits::Zero; +use sp_std::marker::PhantomData; #[cfg(test)] use std::{any::Any, fmt::Debug}; -// Gas is essentially the same as weight. It is a 1 to 1 correspondence. -pub type Gas = frame_support::weights::Weight; - -#[must_use] #[derive(Debug, PartialEq, Eq)] -pub enum GasMeterResult { - Proceed, - OutOfGas, -} +pub struct ChargedAmount(Weight); -impl GasMeterResult { - pub fn is_out_of_gas(&self) -> bool { - match *self { - GasMeterResult::OutOfGas => true, - GasMeterResult::Proceed => false, - } +impl ChargedAmount { + pub fn amount(&self) -> Weight { + self.0 } } @@ -59,13 +55,8 @@ impl TestAuxiliaries for T {} /// Implementing type is expected to be super lightweight hence `Copy` (`Clone` is added /// for consistency). If inlined there should be no observable difference compared /// to a hand-written code. -pub trait Token: Copy + Clone + TestAuxiliaries { - /// Metadata type, which the token can require for calculating the amount - /// of gas to charge. Can be a some configuration type or - /// just the `()`. - type Metadata; - - /// Calculate amount of gas that should be taken by this token. +pub trait Token: Copy + Clone + TestAuxiliaries { + /// Return the amount of gas that should be taken by this token. /// /// This function should be really lightweight and must not fail. It is not /// expected that implementors will query the storage or do any kinds of heavy operations. @@ -73,7 +64,7 @@ pub trait Token: Copy + Clone + TestAuxiliaries { /// That said, implementors of this function still can run into overflows /// while calculating the amount. In this case it is ok to use saturating operations /// since on overflow they will return `max_value` which should consume all gas. - fn calculate_amount(&self, metadata: &Self::Metadata) -> Gas; + fn weight(&self) -> Weight; } /// A wrapper around a type-erased trait object of what used to be a `Token`. @@ -83,25 +74,69 @@ pub struct ErasedToken { pub token: Box, } -pub struct GasMeter { - gas_limit: Gas, +#[derive(DefaultNoBound)] +pub struct GasMeter { + gas_limit: Weight, /// Amount of gas left from initial gas limit. Can reach zero. - gas_left: Gas, + gas_left: Weight, + /// Due to `adjust_gas` and `nested` the `gas_left` can temporarily dip below its final value. + gas_left_lowest: Weight, _phantom: PhantomData, #[cfg(test)] tokens: Vec, } -impl GasMeter { - pub fn new(gas_limit: Gas) -> Self { + +impl GasMeter +where + T::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, +{ + pub fn new(gas_limit: Weight) -> Self { GasMeter { gas_limit, gas_left: gas_limit, + gas_left_lowest: gas_limit, _phantom: PhantomData, #[cfg(test)] tokens: Vec::new(), } } + /// Create a new gas meter by removing gas from the current meter. + /// + /// # Note + /// + /// Passing `0` as amount is interpreted as "all remaining gas". + pub fn nested(&mut self, amount: Weight) -> Result { + let amount = if amount == 0 { self.gas_left } else { amount }; + + // NOTE that it is ok to allocate all available gas since it still ensured + // by `charge` that it doesn't reach zero. + if self.gas_left < amount { + Err(>::OutOfGas.into()) + } else { + self.gas_left = self.gas_left - amount; + Ok(GasMeter::new(amount)) + } + } + + /// Absorb the remaining gas of a nested meter after we are done using it. + pub fn absorb_nested(&mut self, nested: Self) { + if self.gas_left == 0 { + // All of the remaining gas was inherited by the nested gas meter. When absorbing + // we can therefore safely inherit the lowest gas that the nested gas meter experienced + // as long as it is lower than the lowest gas that was experienced by the parent. + // We cannot call `self.gas_left_lowest()` here because in the state that this + // code is run the parent gas meter has `0` gas left. + self.gas_left_lowest = nested.gas_left_lowest().min(self.gas_left_lowest); + } else { + // The nested gas meter was created with a fixed amount that did not consume all of the + // parents (self) gas. The lowest gas that self will experience is when the nested + // gas was pre charged with the fixed amount. + self.gas_left_lowest = self.gas_left_lowest(); + } + self.gas_left += nested.gas_left; + } + /// Account for used gas. /// /// Amount is calculated by the given `token`. @@ -112,89 +147,66 @@ impl GasMeter { /// NOTE that amount is always consumed, i.e. if there is not enough gas /// then the counter will be set to zero. #[inline] - pub fn charge>( - &mut self, - metadata: &Tok::Metadata, - token: Tok, - ) -> GasMeterResult { + pub fn charge>(&mut self, token: Tok) -> Result { #[cfg(test)] { // Unconditionally add the token to the storage. - let erased_tok = ErasedToken { - description: format!("{:?}", token), - token: Box::new(token), - }; + let erased_tok = + ErasedToken { description: format!("{:?}", token), token: Box::new(token) }; self.tokens.push(erased_tok); } - let amount = token.calculate_amount(metadata); - let new_value = match self.gas_left.checked_sub(amount) { - None => None, - Some(val) => Some(val), - }; + let amount = token.weight(); + let new_value = self.gas_left.checked_sub(amount); // We always consume the gas even if there is not enough gas. self.gas_left = new_value.unwrap_or_else(Zero::zero); match new_value { - Some(_) => GasMeterResult::Proceed, - None => GasMeterResult::OutOfGas, + Some(_) => Ok(ChargedAmount(amount)), + None => Err(Error::::OutOfGas.into()), } } - // Account for not fully used gas. - // - // This can be used after dispatching a runtime call to refund gas that was not - // used by the dispatchable. - pub fn refund(&mut self, gas: Gas) { - self.gas_left = self.gas_left.saturating_add(gas).max(self.gas_limit); + /// Adjust a previously charged amount down to its actual amount. + /// + /// This is when a maximum a priori amount was charged and then should be partially + /// refunded to match the actual amount. + pub fn adjust_gas>(&mut self, charged_amount: ChargedAmount, token: Tok) { + self.gas_left_lowest = self.gas_left_lowest(); + let adjustment = charged_amount.0.saturating_sub(token.weight()); + self.gas_left = self.gas_left.saturating_add(adjustment).min(self.gas_limit); } - /// Allocate some amount of gas and perform some work with - /// a newly created nested gas meter. + /// Returns the amount of gas that is required to run the same call. /// - /// Invokes `f` with either the gas meter that has `amount` gas left or - /// with `None`, if this gas meter has not enough gas to allocate given `amount`. - /// - /// All unused gas in the nested gas meter is returned to this gas meter. - pub fn with_nested>) -> R>( - &mut self, - amount: Gas, - f: F, - ) -> R { - // NOTE that it is ok to allocate all available gas since it still ensured - // by `charge` that it doesn't reach zero. - if self.gas_left < amount { - f(None) - } else { - self.gas_left = self.gas_left - amount; - let mut nested = GasMeter::new(amount); - - let r = f(Some(&mut nested)); - - self.gas_left = self.gas_left + nested.gas_left; - - r - } + /// This can be different from `gas_spent` because due to `adjust_gas` the amount of + /// spent gas can temporarily drop and be refunded later. + pub fn gas_required(&self) -> Weight { + self.gas_limit - self.gas_left_lowest() } - /// Returns how much gas was used. - pub fn gas_spent(&self) -> Gas { + /// Returns how much gas was spent + pub fn gas_consumed(&self) -> Weight { self.gas_limit - self.gas_left } /// Returns how much gas left from the initial budget. - pub fn gas_left(&self) -> Gas { + pub fn gas_left(&self) -> Weight { self.gas_left } /// Turn this GasMeter into a DispatchResult that contains the actually used gas. - pub fn into_dispatch_result(self, result: Result) -> DispatchResultWithPostInfo + pub fn into_dispatch_result( + self, + result: Result, + base_weight: Weight, + ) -> DispatchResultWithPostInfo where E: Into, { let post_info = PostDispatchInfo { - actual_weight: Some(self.gas_spent()), + actual_weight: Some(self.gas_consumed().saturating_add(base_weight)), pays_fee: Default::default(), }; @@ -203,76 +215,64 @@ impl GasMeter { .map_err(|e| DispatchErrorWithPostInfo { post_info, error: e.into().error }) } + fn gas_left_lowest(&self) -> Weight { + self.gas_left_lowest.min(self.gas_left) + } + #[cfg(test)] pub fn tokens(&self) -> &[ErasedToken] { &self.tokens } } -/// A simple utility macro that helps to match against a -/// list of tokens. -#[macro_export] -macro_rules! match_tokens { - ($tokens_iter:ident,) => { - }; - ($tokens_iter:ident, $x:expr, $($rest:tt)*) => { - { - let next = ($tokens_iter).next().unwrap(); - let pattern = $x; - - // Note that we don't specify the type name directly in this macro, - // we only have some expression $x of some type. At the same time, we - // have an iterator of Box and to downcast we need to specify - // the type which we want downcast to. - // - // So what we do is we assign `_pattern_typed_next_ref` to a variable which has - // the required type. - // - // Then we make `_pattern_typed_next_ref = token.downcast_ref()`. This makes - // rustc infer the type `T` (in `downcast_ref`) to be the same as in $x. - - let mut _pattern_typed_next_ref = &pattern; - _pattern_typed_next_ref = match next.token.downcast_ref() { - Some(p) => { - assert_eq!(p, &pattern); - p - } - None => { - panic!("expected type {} got {}", stringify!($x), next.description); - } - }; - } - - match_tokens!($tokens_iter, $($rest)*); - }; -} - #[cfg(test)] mod tests { use super::{GasMeter, Token}; use crate::tests::Test; + /// A simple utility macro that helps to match against a + /// list of tokens. + macro_rules! match_tokens { + ($tokens_iter:ident,) => { + }; + ($tokens_iter:ident, $x:expr, $($rest:tt)*) => { + { + let next = ($tokens_iter).next().unwrap(); + let pattern = $x; + + // Note that we don't specify the type name directly in this macro, + // we only have some expression $x of some type. At the same time, we + // have an iterator of Box and to downcast we need to specify + // the type which we want downcast to. + // + // So what we do is we assign `_pattern_typed_next_ref` to a variable which has + // the required type. + // + // Then we make `_pattern_typed_next_ref = token.downcast_ref()`. This makes + // rustc infer the type `T` (in `downcast_ref`) to be the same as in $x. + + let mut _pattern_typed_next_ref = &pattern; + _pattern_typed_next_ref = match next.token.downcast_ref() { + Some(p) => { + assert_eq!(p, &pattern); + p + } + None => { + panic!("expected type {} got {}", stringify!($x), next.description); + } + }; + } + + match_tokens!($tokens_iter, $($rest)*); + }; + } + /// A trivial token that charges the specified number of gas units. #[derive(Copy, Clone, PartialEq, Eq, Debug)] struct SimpleToken(u64); impl Token for SimpleToken { - type Metadata = (); - fn calculate_amount(&self, _metadata: &()) -> u64 { self.0 } - } - - struct MultiplierTokenMetadata { - multiplier: u64, - } - /// A simple token that charges for the given amount multiplied to - /// a multiplier taken from a given metadata. - #[derive(Copy, Clone, PartialEq, Eq, Debug)] - struct MultiplierToken(u64); - - impl Token for MultiplierToken { - type Metadata = MultiplierTokenMetadata; - fn calculate_amount(&self, metadata: &MultiplierTokenMetadata) -> u64 { - // Probably you want to use saturating mul in production code. - self.0 * metadata.multiplier + fn weight(&self) -> u64 { + self.0 } } @@ -282,34 +282,20 @@ mod tests { assert_eq!(gas_meter.gas_left(), 50000); } - #[test] - fn simple() { - let mut gas_meter = GasMeter::::new(50000); - - let result = gas_meter - .charge(&MultiplierTokenMetadata { multiplier: 3 }, MultiplierToken(10)); - assert!(!result.is_out_of_gas()); - - assert_eq!(gas_meter.gas_left(), 49_970); - } - #[test] fn tracing() { let mut gas_meter = GasMeter::::new(50000); - assert!(!gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); - assert!(!gas_meter - .charge(&MultiplierTokenMetadata { multiplier: 3 }, MultiplierToken(10)) - .is_out_of_gas()); + assert!(!gas_meter.charge(SimpleToken(1)).is_err()); - let mut tokens = gas_meter.tokens()[0..2].iter(); - match_tokens!(tokens, SimpleToken(1), MultiplierToken(10),); + let mut tokens = gas_meter.tokens().iter(); + match_tokens!(tokens, SimpleToken(1),); } // This test makes sure that nothing can be executed if there is no gas. #[test] fn refuse_to_execute_anything_if_zero() { let mut gas_meter = GasMeter::::new(0); - assert!(gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); + assert!(gas_meter.charge(SimpleToken(1)).is_err()); } // Make sure that if the gas meter is charged by exceeding amount then not only an error @@ -322,18 +308,17 @@ mod tests { let mut gas_meter = GasMeter::::new(200); // The first charge is should lead to OOG. - assert!(gas_meter.charge(&(), SimpleToken(300)).is_out_of_gas()); + assert!(gas_meter.charge(SimpleToken(300)).is_err()); // The gas meter is emptied at this moment, so this should also fail. - assert!(gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); + assert!(gas_meter.charge(SimpleToken(1)).is_err()); } - // Charging the exact amount that the user paid for should be // possible. #[test] fn charge_exact_amount() { let mut gas_meter = GasMeter::::new(25); - assert!(!gas_meter.charge(&(), SimpleToken(25)).is_out_of_gas()); + assert!(!gas_meter.charge(SimpleToken(25)).is_err()); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index cd5cbe5d32a40..77efcc6986e64 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -1,68 +1,75 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! # Contract Module +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Contract Pallet //! -//! The Contract module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. +//! The Contract module provides functionality for the runtime to deploy and execute WebAssembly +//! smart-contracts. //! -//! - [`contract::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! -//! This module extends accounts based on the `Currency` trait to have smart-contract functionality. It can -//! be used with other modules that implement accounts based on `Currency`. These "smart-contract accounts" -//! have the ability to instantiate smart-contracts and make calls to other contract and non-contract accounts. +//! This module extends accounts based on the [`Currency`] trait to have smart-contract +//! functionality. It can be used with other modules that implement accounts based on [`Currency`]. +//! These "smart-contract accounts" have the ability to instantiate smart-contracts and make calls +//! to other contract and non-contract accounts. //! -//! The smart-contract code is stored once in a `code_cache`, and later retrievable via its `code_hash`. -//! This means that multiple smart-contracts can be instantiated from the same `code_cache`, without replicating -//! the code each time. +//! The smart-contract code is stored once in a code cache, and later retrievable via its hash. +//! This means that multiple smart-contracts can be instantiated from the same hash, without +//! replicating the code each time. //! -//! When a smart-contract is called, its associated code is retrieved via the code hash and gets executed. -//! This call can alter the storage entries of the smart-contract account, instantiate new smart-contracts, -//! or call other smart-contracts. +//! When a smart-contract is called, its associated code is retrieved via the code hash and gets +//! executed. This call can alter the storage entries of the smart-contract account, instantiate new +//! smart-contracts, or call other smart-contracts. //! -//! Finally, when an account is reaped, its associated code and storage of the smart-contract account -//! will also be deleted. +//! Finally, when an account is reaped, its associated code and storage of the smart-contract +//! account will also be deleted. //! //! ### Gas //! -//! Senders must specify a gas limit with every call, as all instructions invoked by the smart-contract require gas. -//! Unused gas is refunded after the call, regardless of the execution outcome. +//! Senders must specify a gas limit with every call, as all instructions invoked by the +//! smart-contract require gas. Unused gas is refunded after the call, regardless of the execution +//! outcome. //! -//! If the gas limit is reached, then all calls and state changes (including balance transfers) are only -//! reverted at the current call's contract level. For example, if contract A calls B and B runs out of gas mid-call, -//! then all of B's calls are reverted. Assuming correct error handling by contract A, A's other calls and state -//! changes still persist. +//! If the gas limit is reached, then all calls and state changes (including balance transfers) are +//! only reverted at the current call's contract level. For example, if contract A calls B and B +//! runs out of gas mid-call, then all of B's calls are reverted. Assuming correct error handling by +//! contract A, A's other calls and state changes still persist. //! //! ### Notable Scenarios //! -//! Contract call failures are not always cascading. When failures occur in a sub-call, they do not "bubble up", -//! and the call will only revert at the specific contract level. For example, if contract A calls contract B, and B -//! fails, A can decide how to handle that failure, either proceeding or reverting A's changes. +//! Contract call failures are not always cascading. When failures occur in a sub-call, they do not +//! "bubble up", and the call will only revert at the specific contract level. For example, if +//! contract A calls contract B, and B fails, A can decide how to handle that failure, either +//! proceeding or reverting A's changes. //! //! ## Interface //! //! ### Dispatchable functions //! -//! * `put_code` - Stores the given binary Wasm code into the chain's storage and returns its `code_hash`. -//! * `instantiate` - Deploys a new contract from the given `code_hash`, optionally transferring some balance. -//! This instantiates a new smart contract account and calls its contract deploy handler to -//! initialize the contract. -//! * `call` - Makes a call to an account, optionally transferring some balance. +//! * [`Pallet::instantiate_with_code`] - Deploys a new contract from the supplied wasm binary, +//! optionally transferring +//! some balance. This instantiates a new smart contract account with the supplied code and +//! calls its constructor to initialize the contract. +//! * [`Pallet::instantiate`] - The same as `instantiate_with_code` but instead of uploading new +//! code an existing `code_hash` is supplied. +//! * [`Pallet::call`] - Makes a call to an account, optionally transferring some balance. //! //! ## Usage //! @@ -72,362 +79,370 @@ //! * [`ink`](https://github.com/paritytech/ink) is //! an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing //! WebAssembly based smart contracts in the Rust programming language. This is a work in progress. -//! -//! ## Related Modules -//! -//! * [Balances](../pallet_balances/index.html) #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "runtime-benchmarks", recursion_limit="256")] +#![cfg_attr(feature = "runtime-benchmarks", recursion_limit = "512")] #[macro_use] mod gas; -mod storage; -mod exec; -mod wasm; -mod rent; mod benchmarking; +mod exec; +mod migration; mod schedule; -mod weight_info; +mod storage; +mod wasm; + +pub mod chain_extension; +pub mod weights; #[cfg(test)] mod tests; -use crate::exec::ExecutionContext; -use crate::wasm::{WasmLoader, WasmVm}; - -pub use crate::gas::{Gas, GasMeter}; -pub use crate::exec::{ExecResult, ExecReturnValue}; -pub use crate::wasm::ReturnCode as RuntimeReturnCode; -pub use crate::weight_info::WeightInfo; -pub use crate::schedule::{Schedule, HostFnWeights, InstructionWeights}; - -use sp_core::crypto::UncheckedFrom; -use sp_std::{prelude::*, marker::PhantomData, fmt::Debug}; -use codec::{Codec, Encode, Decode}; -use sp_runtime::{ - traits::{ - Hash, StaticLookup, Zero, MaybeSerializeDeserialize, Member, Convert, Saturating, - }, - RuntimeDebug, +pub use crate::{ + exec::Frame, + pallet::*, + schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, +}; +use crate::{ + exec::{Executable, Stack as ExecStack}, + gas::GasMeter, + storage::{ContractInfo, DeletedContract, Storage}, + wasm::PrefabWasmModule, + weights::WeightInfo, }; use frame_support::{ - decl_module, decl_event, decl_storage, decl_error, ensure, - parameter_types, storage::child::ChildInfo, - dispatch::{DispatchResult, DispatchResultWithPostInfo}, - traits::{OnUnbalanced, Currency, Get, Time, Randomness}, + dispatch::Dispatchable, + traits::{Contains, Currency, Get, Randomness, StorageVersion, Time}, + weights::{GetDispatchInfo, PostDispatchInfo, Weight}, +}; +use frame_system::Pallet as System; +use pallet_contracts_primitives::{ + Code, ContractAccessError, ContractExecResult, ContractInstantiateResult, GetStorageResult, + InstantiateReturnValue, }; -use frame_system::{ensure_signed, ensure_root}; -use pallet_contracts_primitives::{RentProjection, ContractAccessError}; -use frame_support::weights::Weight; +use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup}; +use sp_std::prelude::*; -pub type CodeHash = ::Hash; -pub type TrieId = Vec; +type CodeHash = ::Hash; +type TrieId = Vec; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; -/// A function that generates an `AccountId` for a contract upon instantiation. -pub trait ContractAddressFor { - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &AccountId) -> AccountId; -} +/// The current storage version. +const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); -/// Information for managing an account and its sub trie abstraction. -/// This is the required info to cache for an account -#[derive(Encode, Decode, RuntimeDebug)] -pub enum ContractInfo { - Alive(AliveContractInfo), - Tombstone(TombstoneContractInfo), -} +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; -impl ContractInfo { - /// If contract is alive then return some alive info - pub fn get_alive(self) -> Option> { - if let ContractInfo::Alive(alive) = self { - Some(alive) - } else { - None - } - } - /// If contract is alive then return some reference to alive info - pub fn as_alive(&self) -> Option<&AliveContractInfo> { - if let ContractInfo::Alive(ref alive) = self { - Some(alive) - } else { - None - } - } - /// If contract is alive then return some mutable reference to alive info - pub fn as_alive_mut(&mut self) -> Option<&mut AliveContractInfo> { - if let ContractInfo::Alive(ref mut alive) = self { - Some(alive) - } else { - None - } - } + #[pallet::config] + pub trait Config: frame_system::Config { + /// The time implementation used to supply timestamps to conntracts through `seal_now`. + type Time: Time; - /// If contract is tombstone then return some tombstone info - pub fn get_tombstone(self) -> Option> { - if let ContractInfo::Tombstone(tombstone) = self { - Some(tombstone) - } else { - None - } - } - /// If contract is tombstone then return some reference to tombstone info - pub fn as_tombstone(&self) -> Option<&TombstoneContractInfo> { - if let ContractInfo::Tombstone(ref tombstone) = self { - Some(tombstone) - } else { - None - } - } - /// If contract is tombstone then return some mutable reference to tombstone info - pub fn as_tombstone_mut(&mut self) -> Option<&mut TombstoneContractInfo> { - if let ContractInfo::Tombstone(ref mut tombstone) = self { - Some(tombstone) - } else { - None - } - } -} + /// The generator used to supply randomness to contracts through `seal_random`. + type Randomness: Randomness; -pub type AliveContractInfo = - RawAliveContractInfo, BalanceOf, ::BlockNumber>; + /// The currency in which fees are paid and contract balances are held. + type Currency: Currency; -/// Information for managing an account and its sub trie abstraction. -/// This is the required info to cache for an account. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct RawAliveContractInfo { - /// Unique ID for the subtree encoded as a bytes vector. - pub trie_id: TrieId, - /// The total number of bytes used by this contract. - /// - /// It is a sum of each key-value pair stored by this contract. - pub storage_size: u32, - /// The number of key-value pairs that have values of zero length. - /// The condition `empty_pair_count ≤ total_pair_count` always holds. - pub empty_pair_count: u32, - /// The total number of key-value pairs in storage of this contract. - pub total_pair_count: u32, - /// The code associated with a given account. - pub code_hash: CodeHash, - /// Pay rent at most up to this value. - pub rent_allowance: Balance, - /// Last block rent has been payed. - pub deduct_block: BlockNumber, - /// Last block child storage has been written. - pub last_write: Option, -} - -impl RawAliveContractInfo { - /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_info(&self) -> ChildInfo { - child_trie_info(&self.trie_id[..]) - } -} + /// The overarching event type. + type Event: From> + IsType<::Event>; -/// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo { - ChildInfo::new_default(trie_id) -} - -pub type TombstoneContractInfo = - RawTombstoneContractInfo<::Hash, ::Hashing>; + /// The overarching call type. + type Call: Dispatchable + + GetDispatchInfo + + codec::Decode + + IsType<::Call>; -#[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug)] -pub struct RawTombstoneContractInfo(H, PhantomData); - -impl RawTombstoneContractInfo -where - H: Member + MaybeSerializeDeserialize+ Debug - + AsRef<[u8]> + AsMut<[u8]> + Copy + Default - + sp_std::hash::Hash + Codec, - Hasher: Hash, -{ - fn new(storage_root: &[u8], code_hash: H) -> Self { - let mut buf = Vec::new(); - storage_root.using_encoded(|encoded| buf.extend_from_slice(encoded)); - buf.extend_from_slice(code_hash.as_ref()); - RawTombstoneContractInfo(::hash(&buf[..]), PhantomData) - } -} - -impl From> for ContractInfo { - fn from(alive_info: AliveContractInfo) -> Self { - Self::Alive(alive_info) + /// Filter that is applied to calls dispatched by contracts. + /// + /// Use this filter to control which dispatchables are callable by contracts. + /// This is applied in **addition** to [`frame_system::Config::BaseCallFilter`]. + /// It is recommended to treat this as a whitelist. + /// + /// # Subsistence Threshold + /// + /// The runtime **must** make sure that any allowed dispatchable makes sure that the + /// `total_balance` of the contract stays above [`Pallet::subsistence_threshold()`]. + /// Otherwise users could clutter the storage with contracts. + /// + /// # Stability + /// + /// The runtime **must** make sure that all dispatchables that are callable by + /// contracts remain stable. In addition [`Self::Call`] itself must remain stable. + /// This means that no existing variants are allowed to switch their positions. + /// + /// # Note + /// + /// Note that dispatchables that are called via contracts do not spawn their + /// own wasm instance for each call (as opposed to when called via a transaction). + /// Therefore please make sure to be restrictive about which dispatchables are allowed + /// in order to not introduce a new DoS vector like memory allocation patterns that can + /// be exploited to drive the runtime into a panic. + type CallFilter: Contains<::Call>; + + /// Used to answer contracts' queries regarding the current weight price. This is **not** + /// used to calculate the actual fee and is only for informational purposes. + type WeightPrice: Convert>; + + /// Describes the weights of the dispatchables of this module and is also used to + /// construct a default cost schedule. + type WeightInfo: WeightInfo; + + /// Type that allows the runtime authors to add new host functions for a contract to call. + type ChainExtension: chain_extension::ChainExtension; + + /// Cost schedule and limits. + #[pallet::constant] + type Schedule: Get>; + + /// The deposit that must be placed into the contract's account to instantiate it. + /// This is in **addition** to the [`pallet_balances::Pallet::ExistenialDeposit`]. + /// The minimum balance for a contract's account can be queried using + /// [`Pallet::subsistence_threshold`]. + #[pallet::constant] + type ContractDeposit: Get>; + + /// The type of the call stack determines the maximum nesting depth of contract calls. + /// + /// The allowed depth is `CallStack::size() + 1`. + /// Therefore a size of `0` means that a contract cannot use call or instantiate. + /// In other words only the origin called "root contract" is allowed to execute then. + type CallStack: smallvec::Array>; + + /// The maximum number of tries that can be queued for deletion. + #[pallet::constant] + type DeletionQueueDepth: Get; + + /// The maximum amount of weight that can be consumed per block for lazy trie removal. + #[pallet::constant] + type DeletionWeightLimit: Get; } -} -/// Get a trie id (trie id must be unique and collision resistant depending upon its context). -/// Note that it is different than encode because trie id should be collision resistant -/// (being a proper unique identifier). -pub trait TrieIdGenerator { - /// Get a trie id for an account, using reference to parent account trie id to ensure - /// uniqueness of trie id. - /// - /// The implementation must ensure every new trie id is unique: two consecutive calls with the - /// same parameter needs to return different trie id values. - fn trie_id(account_id: &AccountId) -> TrieId; -} + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(PhantomData); -/// Get trie id from `account_id`. -pub struct TrieIdFromParentCounter(PhantomData); + #[pallet::hooks] + impl Hooks> for Pallet + where + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + { + fn on_initialize(_block: T::BlockNumber) -> Weight { + // We do not want to go above the block limit and rather avoid lazy deletion + // in that case. This should only happen on runtime upgrades. + let weight_limit = T::BlockWeights::get() + .max_block + .saturating_sub(System::::block_weight().total()) + .min(T::DeletionWeightLimit::get()); + Storage::::process_deletion_queue_batch(weight_limit) + .saturating_add(T::WeightInfo::on_initialize()) + } -/// This generator uses inner counter for account id and applies the hash over `AccountId + -/// accountid_counter`. -impl TrieIdGenerator for TrieIdFromParentCounter -where - T::AccountId: AsRef<[u8]> -{ - fn trie_id(account_id: &T::AccountId) -> TrieId { - // Note that skipping a value due to error is not an issue here. - // We only need uniqueness, not sequence. - let new_seed = AccountCounter::mutate(|v| { - *v = v.wrapping_add(1); - *v - }); - - let mut buf = Vec::new(); - buf.extend_from_slice(account_id.as_ref()); - buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - T::Hashing::hash(&buf[..]).as_ref().into() + fn on_runtime_upgrade() -> Weight { + migration::migrate::() + } } -} -pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; -pub type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; - -parameter_types! { - /// A reasonable default value for [`Trait::SignedClaimedHandicap`]. - pub const DefaultSignedClaimHandicap: u32 = 2; - /// A reasonable default value for [`Trait::TombstoneDeposit`]. - pub const DefaultTombstoneDeposit: u32 = 16; - /// A reasonable default value for [`Trait::StorageSizeOffset`]. - pub const DefaultStorageSizeOffset: u32 = 8; - /// A reasonable default value for [`Trait::RentByteFee`]. - pub const DefaultRentByteFee: u32 = 4; - /// A reasonable default value for [`Trait::RentDepositOffset`]. - pub const DefaultRentDepositOffset: u32 = 1000; - /// A reasonable default value for [`Trait::SurchargeReward`]. - pub const DefaultSurchargeReward: u32 = 150; - /// A reasonable default value for [`Trait::MaxDepth`]. - pub const DefaultMaxDepth: u32 = 32; - /// A reasonable default value for [`Trait::MaxValueSize`]. - pub const DefaultMaxValueSize: u32 = 16_384; -} - -pub trait Trait: frame_system::Trait { - type Time: Time; - type Randomness: Randomness; - - /// The currency in which fees are paid and contract balances are held. - type Currency: Currency; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// A function type to get the contract address given the instantiator. - type DetermineContractAddress: ContractAddressFor, Self::AccountId>; - - /// trie id generator - type TrieIdGenerator: TrieIdGenerator; - - /// Handler for rent payments. - type RentPayment: OnUnbalanced>; - - /// Number of block delay an extrinsic claim surcharge has. - /// - /// When claim surcharge is called by an extrinsic the rent is checked - /// for current_block - delay - type SignedClaimHandicap: Get; - - /// The minimum amount required to generate a tombstone. - type TombstoneDeposit: Get>; - - /// A size offset for an contract. A just created account with untouched storage will have that - /// much of storage from the perspective of the state rent. - /// - /// This is a simple way to ensure that contracts with empty storage eventually get deleted by - /// making them pay rent. This creates an incentive to remove them early in order to save rent. - type StorageSizeOffset: Get; - - /// Price of a byte of storage per one block interval. Should be greater than 0. - type RentByteFee: Get>; - - /// The amount of funds a contract should deposit in order to offset - /// the cost of one byte. - /// - /// Let's suppose the deposit is 1,000 BU (balance units)/byte and the rent is 1 BU/byte/day, - /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. - /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, - /// then it would pay 500 BU/day. - type RentDepositOffset: Get>; + #[pallet::call] + impl Pallet + where + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + { + /// Makes a call to an account, optionally transferring some balance. + /// + /// * If the account is a smart-contract account, the associated code will be + /// executed and any value will be transferred. + /// * If the account is a regular account, any value will be transferred. + /// * If no account exists and the call value is not less than `existential_deposit`, + /// a regular account will be created and any value will be transferred. + #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] + pub fn call( + origin: OriginFor, + dest: ::Source, + #[pallet::compact] value: BalanceOf, + #[pallet::compact] gas_limit: Weight, + data: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = T::Schedule::get(); + let result = ExecStack::>::run_call( + origin, + dest, + &mut gas_meter, + &schedule, + value, + data, + None, + ); + gas_meter.into_dispatch_result(result, T::WeightInfo::call()) + } - /// Reward that is received by the party whose touch has led - /// to removal of a contract. - type SurchargeReward: Get>; + /// Instantiates a new contract from the supplied `code` optionally transferring + /// some balance. + /// + /// This is the only function that can deploy new code to the chain. + /// + /// # Parameters + /// + /// * `endowment`: The balance to transfer from the `origin` to the newly created contract. + /// * `gas_limit`: The gas limit enforced when executing the constructor. + /// * `code`: The contract code to deploy in raw bytes. + /// * `data`: The input data to pass to the contract constructor. + /// * `salt`: Used for the address derivation. See [`Pallet::contract_address`]. + /// + /// Instantiation is executed as follows: + /// + /// - The supplied `code` is instrumented, deployed, and a `code_hash` is created for that + /// code. + /// - If the `code_hash` already exists on the chain the underlying `code` will be shared. + /// - The destination address is computed based on the sender, code_hash and the salt. + /// - The smart-contract account is created at the computed address. + /// - The `endowment` is transferred to the new account. + /// - The `deploy` function is executed in the context of the newly-created account. + #[pallet::weight( + T::WeightInfo::instantiate_with_code( + code.len() as u32 / 1024, + salt.len() as u32 / 1024, + ) + .saturating_add(*gas_limit) + )] + pub fn instantiate_with_code( + origin: OriginFor, + #[pallet::compact] endowment: BalanceOf, + #[pallet::compact] gas_limit: Weight, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let code_len = code.len() as u32; + ensure!(code_len <= T::Schedule::get().limits.code_len, Error::::CodeTooLarge); + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = T::Schedule::get(); + let executable = PrefabWasmModule::from_code(code, &schedule)?; + let code_len = executable.code_len(); + ensure!(code_len <= T::Schedule::get().limits.code_len, Error::::CodeTooLarge); + let result = ExecStack::>::run_instantiate( + origin, + executable, + &mut gas_meter, + &schedule, + endowment, + data, + &salt, + None, + ) + .map(|(_address, output)| output); + gas_meter.into_dispatch_result( + result, + T::WeightInfo::instantiate_with_code(code_len / 1024, salt.len() as u32 / 1024), + ) + } - /// The maximum nesting level of a call/instantiate stack. - type MaxDepth: Get; + /// Instantiates a contract from a previously deployed wasm binary. + /// + /// This function is identical to [`Self::instantiate_with_code`] but without the + /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary + /// must be supplied. + #[pallet::weight( + T::WeightInfo::instantiate(salt.len() as u32 / 1024).saturating_add(*gas_limit) + )] + pub fn instantiate( + origin: OriginFor, + #[pallet::compact] endowment: BalanceOf, + #[pallet::compact] gas_limit: Weight, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = T::Schedule::get(); + let executable = PrefabWasmModule::from_storage(code_hash, &schedule, &mut gas_meter)?; + let result = ExecStack::>::run_instantiate( + origin, + executable, + &mut gas_meter, + &schedule, + endowment, + data, + &salt, + None, + ) + .map(|(_address, output)| output); + gas_meter + .into_dispatch_result(result, T::WeightInfo::instantiate(salt.len() as u32 / 1024)) + } + } - /// The maximum size of a storage value and event payload in bytes. - type MaxValueSize: Get; + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Contract deployed by address at the specified address. \[deployer, contract\] + Instantiated(T::AccountId, T::AccountId), - /// Used to answer contracts's queries regarding the current weight price. This is **not** - /// used to calculate the actual fee and is only for informational purposes. - type WeightPrice: Convert>; + /// Contract has been removed. + /// \[contract, beneficiary\] + /// + /// # Params + /// + /// - `contract`: The contract that was terminated. + /// - `beneficiary`: The account that received the contracts remaining balance. + /// + /// # Note + /// + /// The only way for a contract to be removed and emitting this event is by calling + /// `seal_terminate`. + Terminated(T::AccountId, T::AccountId), - /// Describes the weights of the dispatchables of this module and is also used to - /// construct a default cost schedule. - type WeightInfo: WeightInfo; -} + /// Code with the specified hash has been stored. \[code_hash\] + CodeStored(T::Hash), -/// Simple contract address determiner. -/// -/// Address calculated from the code (of the constructor), input data to the constructor, -/// and the account id that requested the account creation. -/// -/// Formula: `blake2_256(blake2_256(code) + blake2_256(data) + origin)` -pub struct SimpleAddressDeterminer(PhantomData); -impl ContractAddressFor, T::AccountId> for SimpleAddressDeterminer -where - T::AccountId: UncheckedFrom + AsRef<[u8]> -{ - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &T::AccountId) -> T::AccountId { - let data_hash = T::Hashing::hash(data); + /// Triggered when the current schedule is updated. + /// \[version\] + /// + /// # Params + /// + /// - `version`: The version of the newly set schedule. + ScheduleUpdated(u32), - let mut buf = Vec::new(); - buf.extend_from_slice(code_hash.as_ref()); - buf.extend_from_slice(data_hash.as_ref()); - buf.extend_from_slice(origin.as_ref()); + /// A custom event emitted by the contract. + /// \[contract, data\] + /// + /// # Params + /// + /// - `contract`: The contract that emitted the event. + /// - `data`: Data supplied by the contract. Metadata generated during contract compilation + /// is needed to decode it. + ContractEmitted(T::AccountId, Vec), - UncheckedFrom::unchecked_from(T::Hashing::hash(&buf[..])) + /// A code with the specified hash was removed. + /// \[code_hash\] + /// + /// This happens when the last contract that uses this code hash was removed. + CodeRemoved(T::Hash), } -} -decl_error! { - /// Error for the contracts module. - pub enum Error for Module { + #[pallet::error] + pub enum Error { /// A new schedule must have a greater version than the current one. InvalidScheduleVersion, - /// An origin must be signed or inherent and auxiliary sender only provided on inherent. - InvalidSurchargeClaim, - /// Cannot restore from nonexisting or tombstone contract. - InvalidSourceContract, - /// Cannot restore to nonexisting or alive contract. - InvalidDestinationContract, - /// Tombstones don't match. - InvalidTombstone, - /// An origin TrieId written in the current block. - InvalidContractOrigin, /// The executed contract exhausted its gas limit. OutOfGas, /// The output buffer supplied to a contract API call was too small. OutputBufferTooSmall, /// Performing the requested transfer would have brought the contract below - /// the subsistence threshold. No transfer is allowed to do this in order to allow - /// for a tombstone to be created. Use `seal_terminate` to remove a contract without - /// leaving a tombstone behind. + /// the subsistence threshold. No transfer is allowed to do this. Use `seal_terminate` + /// to recover a deposit. BelowSubsistenceThreshold, /// The newly created contract is below the subsistence threshold after executing /// its contructor. No contracts are allowed to exist below that threshold. @@ -439,10 +454,10 @@ decl_error! { /// Performing a call was denied because the calling depth reached the limit /// of what is specified in the schedule. MaxCallDepthReached, - /// The contract that was called is either no contract at all (a plain account) - /// or is a tombstone. - NotCallable, - /// The code supplied to `put_code` exceeds the limit specified in the current schedule. + /// No contract was found at the specified address. + ContractNotFound, + /// The code supplied to `instantiate_with_code` exceeds the limit specified in the + /// current schedule. CodeTooLarge, /// No code could be found at the supplied code hash. CodeNotFound, @@ -454,343 +469,244 @@ decl_error! { ContractTrapped, /// The size defined in `T::MaxValueSize` was exceeded. ValueTooLarge, - } -} - -decl_module! { - /// Contracts module. - pub struct Module for enum Call where origin: ::Origin { - type Error = Error; - - /// Number of block delay an extrinsic claim surcharge has. - /// - /// When claim surcharge is called by an extrinsic the rent is checked - /// for current_block - delay - const SignedClaimHandicap: T::BlockNumber = T::SignedClaimHandicap::get(); - - /// The minimum amount required to generate a tombstone. - const TombstoneDeposit: BalanceOf = T::TombstoneDeposit::get(); - - /// A size offset for an contract. A just created account with untouched storage will have that - /// much of storage from the perspective of the state rent. + /// Termination of a contract is not allowed while the contract is already + /// on the call stack. Can be triggered by `seal_terminate`. + TerminatedWhileReentrant, + /// `seal_call` forwarded this contracts input. It therefore is no longer available. + InputForwarded, + /// The subject passed to `seal_random` exceeds the limit. + RandomSubjectTooLong, + /// The amount of topics passed to `seal_deposit_events` exceeds the limit. + TooManyTopics, + /// The topics passed to `seal_deposit_events` contains at least one duplicate. + DuplicateTopics, + /// The chain does not provide a chain extension. Calling the chain extension results + /// in this error. Note that this usually shouldn't happen as deploying such contracts + /// is rejected. + NoChainExtension, + /// Removal of a contract failed because the deletion queue is full. /// - /// This is a simple way to ensure that contracts with empty storage eventually get deleted - /// by making them pay rent. This creates an incentive to remove them early in order to save - /// rent. - const StorageSizeOffset: u32 = T::StorageSizeOffset::get(); - - /// Price of a byte of storage per one block interval. Should be greater than 0. - const RentByteFee: BalanceOf = T::RentByteFee::get(); - - /// The amount of funds a contract should deposit in order to offset - /// the cost of one byte. + /// This can happen when calling `seal_terminate`. + /// The queue is filled by deleting contracts and emptied by a fixed amount each block. + /// Trying again during another block is the only way to resolve this issue. + DeletionQueueFull, + /// A storage modification exhausted the 32bit type that holds the storage size. /// - /// Let's suppose the deposit is 1,000 BU (balance units)/byte and the rent is 1 BU/byte/day, - /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. - /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, - /// then it would pay 500 BU/day. - const RentDepositOffset: BalanceOf = T::RentDepositOffset::get(); - - /// Reward that is received by the party whose touch has led - /// to removal of a contract. - const SurchargeReward: BalanceOf = T::SurchargeReward::get(); - - /// The maximum nesting level of a call/instantiate stack. A reasonable default - /// value is 100. - const MaxDepth: u32 = T::MaxDepth::get(); - - /// The maximum size of a storage value in bytes. A reasonable default is 16 KiB. - const MaxValueSize: u32 = T::MaxValueSize::get(); - - fn deposit_event() = default; - - /// Updates the schedule for metering contracts. + /// This can either happen when the accumulated storage in bytes is too large or + /// when number of storage items is too large. + StorageExhausted, + /// A contract with the same AccountId already exists. + DuplicateContract, + /// A contract self destructed in its constructor. /// - /// The schedule must have a greater version than the stored schedule. - #[weight = T::WeightInfo::update_schedule()] - pub fn update_schedule(origin, schedule: Schedule) -> DispatchResult { - ensure_root(origin)?; - if >::current_schedule().version >= schedule.version { - Err(Error::::InvalidScheduleVersion)? - } - - Self::deposit_event(RawEvent::ScheduleUpdated(schedule.version)); - CurrentSchedule::put(schedule); - - Ok(()) - } - - /// Stores the given binary Wasm code into the chain's storage and returns its `codehash`. - /// You can instantiate contracts only with stored code. - #[weight = T::WeightInfo::put_code(code.len() as u32 / 1024)] - pub fn put_code( - origin, - code: Vec - ) -> DispatchResult { - ensure_signed(origin)?; - let schedule = >::current_schedule(); - ensure!(code.len() as u32 <= schedule.max_code_size, Error::::CodeTooLarge); - let result = wasm::save_code::(code, &schedule); - if let Ok(code_hash) = result { - Self::deposit_event(RawEvent::CodeStored(code_hash)); - } - result.map(|_| ()).map_err(Into::into) - } + /// This can be triggered by a call to `seal_terminate`. + TerminatedInConstructor, + /// The debug message specified to `seal_debug_message` does contain invalid UTF-8. + DebugMessageInvalidUTF8, + /// A call tried to invoke a contract that is flagged as non-reentrant. + ReentranceDenied, + } - /// Makes a call to an account, optionally transferring some balance. - /// - /// * If the account is a smart-contract account, the associated code will be - /// executed and any value will be transferred. - /// * If the account is a regular account, any value will be transferred. - /// * If no account exists and the call value is not less than `existential_deposit`, - /// a regular account will be created and any value will be transferred. - #[weight = T::WeightInfo::call().saturating_add(*gas_limit)] - pub fn call( - origin, - dest: ::Source, - #[compact] value: BalanceOf, - #[compact] gas_limit: Gas, - data: Vec - ) -> DispatchResultWithPostInfo { - let origin = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - let mut gas_meter = GasMeter::new(gas_limit); + /// A mapping from an original code hash to the original code, untouched by instrumentation. + #[pallet::storage] + pub(crate) type PristineCode = StorageMap<_, Identity, CodeHash, Vec>; - let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - ctx.call(dest, value, gas_meter, data) - }); - gas_meter.into_dispatch_result(result) - } + /// A mapping between an original code hash and instrumented wasm code, ready for execution. + #[pallet::storage] + pub(crate) type CodeStorage = + StorageMap<_, Identity, CodeHash, PrefabWasmModule>; - /// Instantiates a new contract from the `codehash` generated by `put_code`, optionally transferring some balance. - /// - /// Instantiation is executed as follows: - /// - /// - The destination address is computed based on the sender and hash of the code. - /// - The smart-contract account is created at the computed address. - /// - The `ctor_code` is executed in the context of the newly-created account. Buffer returned - /// after the execution is saved as the `code` of the account. That code will be invoked - /// upon any call received by this account. - /// - The contract is initialized. - #[weight = T::WeightInfo::instantiate(data.len() as u32 / 1024).saturating_add(*gas_limit)] - pub fn instantiate( - origin, - #[compact] endowment: BalanceOf, - #[compact] gas_limit: Gas, - code_hash: CodeHash, - data: Vec - ) -> DispatchResultWithPostInfo { - let origin = ensure_signed(origin)?; - let mut gas_meter = GasMeter::new(gas_limit); + /// The subtrie counter. + #[pallet::storage] + pub(crate) type AccountCounter = StorageValue<_, u64, ValueQuery>; - let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - ctx.instantiate(endowment, gas_meter, &code_hash, data) - .map(|(_address, output)| output) - }); - gas_meter.into_dispatch_result(result) - } + /// The code associated with a given account. + /// + /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. + #[pallet::storage] + pub(crate) type ContractInfoOf = + StorageMap<_, Twox64Concat, T::AccountId, ContractInfo>; - /// Allows block producers to claim a small reward for evicting a contract. If a block producer - /// fails to do so, a regular users will be allowed to claim the reward. - /// - /// If contract is not evicted as a result of this call, no actions are taken and - /// the sender is not eligible for the reward. - #[weight = T::WeightInfo::claim_surcharge()] - fn claim_surcharge(origin, dest: T::AccountId, aux_sender: Option) { - let origin = origin.into(); - let (signed, rewarded) = match (origin, aux_sender) { - (Ok(frame_system::RawOrigin::Signed(account)), None) => { - (true, account) - }, - (Ok(frame_system::RawOrigin::None), Some(aux_sender)) => { - (false, aux_sender) - }, - _ => Err(Error::::InvalidSurchargeClaim)?, - }; - - // Add some advantage for block producers (who send unsigned extrinsics) by - // adding a handicap: for signed extrinsics we use a slightly older block number - // for the eviction check. This can be viewed as if we pushed regular users back in past. - let handicap = if signed { - T::SignedClaimHandicap::get() - } else { - Zero::zero() - }; - - // If poking the contract has lead to eviction of the contract, give out the rewards. - if rent::snitch_contract_should_be_evicted::(&dest, handicap) { - T::Currency::deposit_into_existing(&rewarded, T::SurchargeReward::get())?; - } - } - } + /// Evicted contracts that await child trie deletion. + /// + /// Child trie deletion is a heavy operation depending on the amount of storage items + /// stored in said trie. Therefore this operation is performed lazily in `on_initialize`. + #[pallet::storage] + pub(crate) type DeletionQueue = StorageValue<_, Vec, ValueQuery>; } -/// Public APIs provided by the contracts module. -impl Module { +impl Pallet +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ /// Perform a call to a specified contract. /// - /// This function is similar to `Self::call`, but doesn't perform any address lookups and better - /// suitable for calling directly from Rust. + /// This function is similar to [`Self::call`], but doesn't perform any address lookups + /// and better suitable for calling directly from Rust. + /// + /// # Note /// - /// It returns the exection result and the amount of used weight. + /// `debug` should only ever be set to `true` when executing as an RPC because + /// it adds allocations and could be abused to drive the runtime into an OOM panic. + /// If set to `true` it returns additional human readable debugging information. + /// + /// It returns the execution result and the amount of used weight. pub fn bare_call( origin: T::AccountId, dest: T::AccountId, value: BalanceOf, - gas_limit: Gas, + gas_limit: Weight, input_data: Vec, - ) -> (ExecResult, Gas) { + debug: bool, + ) -> ContractExecResult { let mut gas_meter = GasMeter::new(gas_limit); - ( - Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - ctx.call(dest, value, gas_meter, input_data) - }), - gas_meter.gas_spent(), - ) - } - - /// Query storage of a specified contract under a specified key. - pub fn get_storage( - address: T::AccountId, - key: [u8; 32], - ) -> sp_std::result::Result>, ContractAccessError> { - let contract_info = ContractInfoOf::::get(&address) - .ok_or(ContractAccessError::DoesntExist)? - .get_alive() - .ok_or(ContractAccessError::IsTombstone)?; - - let maybe_value = storage::read_contract_storage(&contract_info.trie_id, &key); - Ok(maybe_value) - } - - pub fn rent_projection( - address: T::AccountId, - ) -> sp_std::result::Result, ContractAccessError> { - rent::compute_rent_projection::(&address) - } - - /// Put code for benchmarks which does not check or instrument the code. - #[cfg(feature = "runtime-benchmarks")] - pub fn put_code_raw(code: Vec) -> DispatchResult { - let schedule = >::current_schedule(); - let result = wasm::save_code_raw::(code, &schedule); - result.map(|_| ()).map_err(Into::into) + let schedule = T::Schedule::get(); + let mut debug_message = if debug { Some(Vec::new()) } else { None }; + let result = ExecStack::>::run_call( + origin, + dest, + &mut gas_meter, + &schedule, + value, + input_data, + debug_message.as_mut(), + ); + ContractExecResult { + result: result.map_err(|r| r.error), + gas_consumed: gas_meter.gas_consumed(), + gas_required: gas_meter.gas_required(), + debug_message: debug_message.unwrap_or_default(), + } } -} -impl Module { - fn execute_wasm( + /// Instantiate a new contract. + /// + /// This function is similar to [`Self::instantiate`], but doesn't perform any address lookups + /// and better suitable for calling directly from Rust. + /// + /// It returns the execution result, account id and the amount of used weight. + /// + /// + /// # Note + /// + /// `debug` should only ever be set to `true` when executing as an RPC because + /// it adds allocations and could be abused to drive the runtime into an OOM panic. + /// If set to `true` it returns additional human readable debugging information. + pub fn bare_instantiate( origin: T::AccountId, - gas_meter: &mut GasMeter, - func: impl FnOnce(&mut ExecutionContext, WasmLoader>, &mut GasMeter) -> ExecResult, - ) -> ExecResult { - let cfg = Config::preload(); - let vm = WasmVm::new(&cfg.schedule); - let loader = WasmLoader::new(&cfg.schedule); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - func(&mut ctx, gas_meter) + endowment: BalanceOf, + gas_limit: Weight, + code: Code>, + data: Vec, + salt: Vec, + debug: bool, + ) -> ContractInstantiateResult { + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = T::Schedule::get(); + let executable = match code { + Code::Upload(Bytes(binary)) => PrefabWasmModule::from_code(binary, &schedule), + Code::Existing(hash) => PrefabWasmModule::from_storage(hash, &schedule, &mut gas_meter), + }; + let executable = match executable { + Ok(executable) => executable, + Err(error) => + return ContractInstantiateResult { + result: Err(error.into()), + gas_consumed: gas_meter.gas_consumed(), + gas_required: gas_meter.gas_required(), + debug_message: Vec::new(), + }, + }; + let mut debug_message = if debug { Some(Vec::new()) } else { None }; + let result = ExecStack::>::run_instantiate( + origin, + executable, + &mut gas_meter, + &schedule, + endowment, + data, + &salt, + debug_message.as_mut(), + ) + .and_then(|(account_id, result)| Ok(InstantiateReturnValue { result, account_id })); + ContractInstantiateResult { + result: result.map_err(|e| e.error), + gas_consumed: gas_meter.gas_consumed(), + gas_required: gas_meter.gas_required(), + debug_message: debug_message.unwrap_or_default(), + } } -} - -decl_event! { - pub enum Event - where - Balance = BalanceOf, - ::AccountId, - ::Hash - { - /// Contract deployed by address at the specified address. \[owner, contract\] - Instantiated(AccountId, AccountId), - - /// Contract has been evicted and is now in tombstone state. - /// \[contract, tombstone\] - /// - /// # Params - /// - /// - `contract`: `AccountId`: The account ID of the evicted contract. - /// - `tombstone`: `bool`: True if the evicted contract left behind a tombstone. - Evicted(AccountId, bool), - /// Restoration for a contract has been successful. - /// \[donor, dest, code_hash, rent_allowance\] - /// - /// # Params - /// - /// - `donor`: `AccountId`: Account ID of the restoring contract - /// - `dest`: `AccountId`: Account ID of the restored contract - /// - `code_hash`: `Hash`: Code hash of the restored contract - /// - `rent_allowance: `Balance`: Rent allowance of the restored contract - Restored(AccountId, AccountId, Hash, Balance), - - /// Code with the specified hash has been stored. - /// \[code_hash\] - CodeStored(Hash), - - /// Triggered when the current \[schedule\] is updated. - ScheduleUpdated(u32), + /// Query storage of a specified contract under a specified key. + pub fn get_storage(address: T::AccountId, key: [u8; 32]) -> GetStorageResult { + let contract_info = + ContractInfoOf::::get(&address).ok_or(ContractAccessError::DoesntExist)?; - /// An event deposited upon execution of a contract from the account. - /// \[account, data\] - ContractExecution(AccountId, Vec), + let maybe_value = Storage::::read(&contract_info.trie_id, &key); + Ok(maybe_value) } -} -decl_storage! { - trait Store for Module as Contracts { - /// Current cost schedule for contracts. - CurrentSchedule get(fn current_schedule) config(): Schedule = Default::default(); - /// A mapping from an original code hash to the original code, untouched by instrumentation. - pub PristineCode: map hasher(identity) CodeHash => Option>; - /// A mapping between an original code hash and instrumented wasm code, ready for execution. - pub CodeStorage: map hasher(identity) CodeHash => Option; - /// The subtrie counter. - pub AccountCounter: u64 = 0; - /// The code associated with a given account. - /// - /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. - pub ContractInfoOf: map hasher(twox_64_concat) T::AccountId => Option>; + /// Determine the address of a contract, + /// + /// This is the address generation function used by contract instantiation. Its result + /// is only dependend on its inputs. It can therefore be used to reliably predict the + /// address of a contract. This is akin to the formular of eth's CREATE2 opcode. There + /// is no CREATE equivalent because CREATE2 is strictly more powerful. + /// + /// Formula: `hash(deploying_address ++ code_hash ++ salt)` + pub fn contract_address( + deploying_address: &T::AccountId, + code_hash: &CodeHash, + salt: &[u8], + ) -> T::AccountId { + let buf: Vec<_> = deploying_address + .as_ref() + .iter() + .chain(code_hash.as_ref()) + .chain(salt) + .cloned() + .collect(); + UncheckedFrom::unchecked_from(T::Hashing::hash(&buf)) } -} - -/// In-memory cache of configuration values. -/// -/// We assume that these values can't be changed in the -/// course of transaction execution. -pub struct Config { - pub schedule: Schedule, - pub existential_deposit: BalanceOf, - pub tombstone_deposit: BalanceOf, - pub max_depth: u32, - pub max_value_size: u32, -} -impl Config { - fn preload() -> Config { - Config { - schedule: >::current_schedule(), - existential_deposit: T::Currency::minimum_balance(), - tombstone_deposit: T::TombstoneDeposit::get(), - max_depth: T::MaxDepth::get(), - max_value_size: T::MaxValueSize::get(), - } + /// Subsistence threshold is the extension of the minimum balance (aka existential deposit) + /// by the contract deposit. It is the minimum balance any contract must hold. + /// + /// Any contract initiated balance transfer mechanism cannot make the balance lower + /// than the subsistence threshold. The only way to recover the balance is to remove + /// contract using `seal_terminate`. + pub fn subsistence_threshold() -> BalanceOf { + T::Currency::minimum_balance().saturating_add(T::ContractDeposit::get()) } - /// Subsistence threshold is the extension of the minimum balance (aka existential deposit) by the - /// tombstone deposit, required for leaving a tombstone. + /// The in-memory size in bytes of the data structure associated with each contract. + /// + /// The data structure is also put into storage for each contract. The in-storage size + /// is never larger than the in-memory representation and usually smaller due to compact + /// encoding and lack of padding. /// - /// Rent or any contract initiated balance transfer mechanism cannot make the balance lower - /// than the subsistence threshold in order to guarantee that a tombstone is created. + /// # Note /// - /// The only way to completely kill a contract without a tombstone is calling `seal_terminate`. - pub fn subsistence_threshold(&self) -> BalanceOf { - self.existential_deposit.saturating_add(self.tombstone_deposit) + /// This returns the in-memory size because the in-storage size (SCALE encoded) cannot + /// be efficiently determined. Treat this as an upper bound of the in-storage size. + pub fn contract_info_size() -> u32 { + sp_std::mem::size_of::>() as u32 } - /// The same as `subsistence_threshold` but without the need for a preloaded instance. - /// - /// This is for cases where this value is needed in rent calculation rather than - /// during contract execution. - pub fn subsistence_threshold_uncached() -> BalanceOf { - T::Currency::minimum_balance().saturating_add(T::TombstoneDeposit::get()) + /// Store code for benchmarks which does not check nor instrument the code. + #[cfg(feature = "runtime-benchmarks")] + fn store_code_raw(code: Vec) -> frame_support::dispatch::DispatchResult { + let schedule = T::Schedule::get(); + PrefabWasmModule::store_code_unchecked(code, &schedule)?; + Ok(()) + } + + /// This exists so that benchmarks can determine the weight of running an instrumentation. + #[cfg(feature = "runtime-benchmarks")] + fn reinstrument_module( + module: &mut PrefabWasmModule, + schedule: &Schedule, + ) -> frame_support::dispatch::DispatchResult { + self::wasm::reinstrument(module, schedule) } } diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs new file mode 100644 index 0000000000000..b7fa9575e23b5 --- /dev/null +++ b/frame/contracts/src/migration.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Config, Pallet, Weight}; +use frame_support::{ + storage::migration, + traits::{Get, PalletInfoAccess}, +}; +use sp_std::prelude::*; + +pub fn migrate() -> Weight { + use frame_support::traits::StorageVersion; + + let version = StorageVersion::get::>(); + let mut weight: Weight = 0; + + if version < 4 { + weight = weight.saturating_add(v4::migrate::()); + StorageVersion::new(4).put::>(); + } + + if version < 5 { + weight = weight.saturating_add(v5::migrate::()); + StorageVersion::new(5).put::>(); + } + + weight +} + +/// V4: `Schedule` is changed to be a config item rather than an in-storage value. +mod v4 { + use super::*; + + pub fn migrate() -> Weight { + migration::remove_storage_prefix(>::name().as_bytes(), b"CurrentSchedule", b""); + T::DbWeight::get().writes(1) + } +} + +/// V5: State rent is removed which obsoletes some fields in `ContractInfo`. +mod v5 { + use super::*; + use crate::{ + BalanceOf, CodeHash, ContractInfo, ContractInfoOf, DeletedContract, DeletionQueue, TrieId, + }; + use codec::Decode; + use sp_std::marker::PhantomData; + + type AliveContractInfo = + RawAliveContractInfo, BalanceOf, ::BlockNumber>; + type TombstoneContractInfo = RawTombstoneContractInfo< + ::Hash, + ::Hashing, + >; + + #[derive(Decode)] + enum OldContractInfo { + Alive(AliveContractInfo), + Tombstone(TombstoneContractInfo), + } + + #[derive(Decode)] + struct RawAliveContractInfo { + trie_id: TrieId, + _storage_size: u32, + _pair_count: u32, + code_hash: CodeHash, + _rent_allowance: Balance, + _rent_paid: Balance, + _deduct_block: BlockNumber, + _last_write: Option, + _reserved: Option<()>, + } + + #[derive(Decode)] + struct RawTombstoneContractInfo(H, PhantomData); + + #[derive(Decode)] + struct OldDeletedContract { + _pair_count: u32, + trie_id: TrieId, + } + + pub fn migrate() -> Weight { + let mut weight: Weight = 0; + + >::translate(|_key, old: OldContractInfo| { + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + match old { + OldContractInfo::Alive(old) => Some(ContractInfo:: { + trie_id: old.trie_id, + code_hash: old.code_hash, + _reserved: old._reserved, + }), + OldContractInfo::Tombstone(_) => None, + } + }); + + >::translate(|old: Option>| { + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + old.map(|old| old.into_iter().map(|o| DeletedContract { trie_id: o.trie_id }).collect()) + }) + .ok(); + + weight + } +} diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs deleted file mode 100644 index 908faca9a6c0c..0000000000000 --- a/frame/contracts/src/rent.rs +++ /dev/null @@ -1,486 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! A module responsible for computing the right amount of weight and charging it. - -use crate::{ - AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Module, RawEvent, - TombstoneContractInfo, Trait, CodeHash, Config -}; -use sp_std::prelude::*; -use sp_io::hashing::blake2_256; -use frame_support::storage::child; -use frame_support::traits::{Currency, ExistenceRequirement, Get, OnUnbalanced, WithdrawReason}; -use frame_support::StorageMap; -use pallet_contracts_primitives::{ContractAccessError, RentProjection, RentProjectionResult}; -use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul, SaturatedConversion, Saturating, Zero}; - -/// The amount to charge. -/// -/// This amount respects the contract's rent allowance and the subsistence deposit. -/// Because of that, charging the amount cannot remove the contract. -struct OutstandingAmount { - amount: BalanceOf, -} - -impl OutstandingAmount { - /// Create the new outstanding amount. - /// - /// The amount should be always withdrawable and it should not kill the account. - fn new(amount: BalanceOf) -> Self { - Self { amount } - } - - /// Returns the amount this instance wraps. - fn peek(&self) -> BalanceOf { - self.amount - } - - /// Withdraws the outstanding amount from the given account. - fn withdraw(self, account: &T::AccountId) { - if let Ok(imbalance) = T::Currency::withdraw( - account, - self.amount, - WithdrawReason::Fee.into(), - ExistenceRequirement::KeepAlive, - ) { - // This should never fail. However, let's err on the safe side. - T::RentPayment::on_unbalanced(imbalance); - } - } -} - -enum Verdict { - /// The contract is exempted from paying rent. - /// - /// For example, it already paid its rent in the current block, or it has enough deposit for not - /// paying rent at all. - Exempt, - /// Funds dropped below the subsistence deposit. - /// - /// Remove the contract along with it's storage. - Kill, - /// The contract cannot afford payment within its rent budget so it gets evicted. However, - /// because its balance is greater than the subsistence threshold it leaves a tombstone. - Evict { - amount: Option>, - }, - /// Everything is OK, we just only take some charge. - Charge { amount: OutstandingAmount }, -} - -/// Returns a fee charged per block from the contract. -/// -/// This function accounts for the storage rent deposit. I.e. if the contract possesses enough funds -/// then the fee can drop to zero. -fn compute_fee_per_block( - free_balance: &BalanceOf, - contract: &AliveContractInfo, -) -> BalanceOf { - let free_storage = free_balance - .checked_div(&T::RentDepositOffset::get()) - .unwrap_or_else(Zero::zero); - - // For now, we treat every empty KV pair as if it was one byte long. - let empty_pairs_equivalent = contract.empty_pair_count; - - let effective_storage_size = >::from( - contract.storage_size + T::StorageSizeOffset::get() + empty_pairs_equivalent, - ) - .saturating_sub(free_storage); - - effective_storage_size - .checked_mul(&T::RentByteFee::get()) - .unwrap_or_else(|| >::max_value()) -} - -/// Returns amount of funds available to consume by rent mechanism. -/// -/// Rent mechanism cannot consume more than `rent_allowance` set by the contract and it cannot make -/// the balance lower than [`subsistence_threshold`]. -/// -/// In case the toal_balance is below the subsistence threshold, this function returns `None`. -fn rent_budget( - total_balance: &BalanceOf, - free_balance: &BalanceOf, - contract: &AliveContractInfo, -) -> Option> { - let subsistence_threshold = Config::::subsistence_threshold_uncached(); - // Reserved balance contributes towards the subsistence threshold to stay consistent - // with the existential deposit where the reserved balance is also counted. - if *total_balance < subsistence_threshold { - return None; - } - - // However, reserved balance cannot be charged so we need to use the free balance - // to calculate the actual budget (which can be 0). - let rent_allowed_to_charge = free_balance.saturating_sub(subsistence_threshold); - Some(>::min( - contract.rent_allowance, - rent_allowed_to_charge, - )) -} - -/// Consider the case for rent payment of the given account and returns a `Verdict`. -/// -/// Use `handicap` in case you want to change the reference block number. (To get more details see -/// `snitch_contract_should_be_evicted` ). -fn consider_case( - account: &T::AccountId, - current_block_number: T::BlockNumber, - handicap: T::BlockNumber, - contract: &AliveContractInfo, -) -> Verdict { - // How much block has passed since the last deduction for the contract. - let blocks_passed = { - // Calculate an effective block number, i.e. after adjusting for handicap. - let effective_block_number = current_block_number.saturating_sub(handicap); - effective_block_number.saturating_sub(contract.deduct_block) - }; - if blocks_passed.is_zero() { - // Rent has already been paid - return Verdict::Exempt; - } - - let total_balance = T::Currency::total_balance(account); - let free_balance = T::Currency::free_balance(account); - - // An amount of funds to charge per block for storage taken up by the contract. - let fee_per_block = compute_fee_per_block::(&free_balance, contract); - if fee_per_block.is_zero() { - // The rent deposit offset reduced the fee to 0. This means that the contract - // gets the rent for free. - return Verdict::Exempt; - } - - let rent_budget = match rent_budget::(&total_balance, &free_balance, contract) { - Some(rent_budget) => rent_budget, - None => { - // The contract's total balance is already below subsistence threshold. That - // indicates that the contract cannot afford to leave a tombstone. - // - // So cleanly wipe the contract. - return Verdict::Kill; - } - }; - - let dues = fee_per_block - .checked_mul(&blocks_passed.saturated_into::().into()) - .unwrap_or_else(|| >::max_value()); - let insufficient_rent = rent_budget < dues; - - // If the rent payment cannot be withdrawn due to locks on the account balance, then evict the - // account. - // - // NOTE: This seems problematic because it provides a way to tombstone an account while - // avoiding the last rent payment. In effect, someone could retroactively set rent_allowance - // for their contract to 0. - let dues_limited = dues.min(rent_budget); - let can_withdraw_rent = T::Currency::ensure_can_withdraw( - account, - dues_limited, - WithdrawReason::Fee.into(), - free_balance.saturating_sub(dues_limited), - ) - .is_ok(); - - if insufficient_rent || !can_withdraw_rent { - // The contract cannot afford the rent payment and has a balance above the subsistence - // threshold, so it leaves a tombstone. - let amount = if can_withdraw_rent { - Some(OutstandingAmount::new(dues_limited)) - } else { - None - }; - return Verdict::Evict { amount }; - } - - return Verdict::Charge { - // We choose to use `dues_limited` here instead of `dues` just to err on the safer side. - amount: OutstandingAmount::new(dues_limited), - }; -} - -/// Enacts the given verdict and returns the updated `ContractInfo`. -/// -/// `alive_contract_info` should be from the same address as `account`. -fn enact_verdict( - account: &T::AccountId, - alive_contract_info: AliveContractInfo, - current_block_number: T::BlockNumber, - verdict: Verdict, -) -> Option> { - match verdict { - Verdict::Exempt => return Some(ContractInfo::Alive(alive_contract_info)), - Verdict::Kill => { - >::remove(account); - child::kill_storage( - &alive_contract_info.child_trie_info(), - ); - >::deposit_event(RawEvent::Evicted(account.clone(), false)); - None - } - Verdict::Evict { amount } => { - if let Some(amount) = amount { - amount.withdraw(account); - } - - // Note: this operation is heavy. - let child_storage_root = child::root( - &alive_contract_info.child_trie_info(), - ); - - let tombstone = >::new( - &child_storage_root[..], - alive_contract_info.code_hash, - ); - let tombstone_info = ContractInfo::Tombstone(tombstone); - >::insert(account, &tombstone_info); - - child::kill_storage( - &alive_contract_info.child_trie_info(), - ); - - >::deposit_event(RawEvent::Evicted(account.clone(), true)); - Some(tombstone_info) - } - Verdict::Charge { amount } => { - let contract_info = ContractInfo::Alive(AliveContractInfo:: { - rent_allowance: alive_contract_info.rent_allowance - amount.peek(), - deduct_block: current_block_number, - ..alive_contract_info - }); - >::insert(account, &contract_info); - - amount.withdraw(account); - Some(contract_info) - } - } -} - -/// Make account paying the rent for the current block number -/// -/// NOTE this function performs eviction eagerly. All changes are read and written directly to -/// storage. -pub fn collect_rent(account: &T::AccountId) -> Option> { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return contract_info, - Some(ContractInfo::Alive(contract)) => contract, - }; - - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - Zero::zero(), - &alive_contract_info, - ); - enact_verdict(account, alive_contract_info, current_block_number, verdict) -} - -/// Process a report that a contract under the given address should be evicted. -/// -/// Enact the eviction right away if the contract should be evicted and return true. -/// Otherwise, **do nothing** and return false. -/// -/// The `handicap` parameter gives a way to check the rent to a moment in the past instead -/// of current block. E.g. if the contract is going to be evicted at the current block, -/// `handicap = 1` can defer the eviction for 1 block. This is useful to handicap certain snitchers -/// relative to others. -/// -/// NOTE this function performs eviction eagerly. All changes are read and written directly to -/// storage. -pub fn snitch_contract_should_be_evicted( - account: &T::AccountId, - handicap: T::BlockNumber, -) -> bool { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return false, - Some(ContractInfo::Alive(contract)) => contract, - }; - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - handicap, - &alive_contract_info, - ); - - // Enact the verdict only if the contract gets removed. - match verdict { - Verdict::Kill | Verdict::Evict { .. } => { - enact_verdict(account, alive_contract_info, current_block_number, verdict); - true - } - _ => false, - } -} - -/// Returns the projected time a given contract will be able to sustain paying its rent. The -/// returned projection is relevant for the current block, i.e. it is as if the contract was -/// accessed at the beginning of the current block. Returns `None` in case if the contract was -/// evicted before or as a result of the rent collection. -/// -/// The returned value is only an estimation. It doesn't take into account any top ups, changing the -/// rent allowance, or any problems coming from withdrawing the dues. -/// -/// NOTE that this is not a side-effect free function! It will actually collect rent and then -/// compute the projection. This function is only used for implementation of an RPC method through -/// `RuntimeApi` meaning that the changes will be discarded anyway. -pub fn compute_rent_projection( - account: &T::AccountId, -) -> RentProjectionResult { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), - Some(ContractInfo::Alive(contract)) => contract, - }; - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - Zero::zero(), - &alive_contract_info, - ); - let new_contract_info = - enact_verdict(account, alive_contract_info, current_block_number, verdict); - - // Check what happened after enaction of the verdict. - let alive_contract_info = match new_contract_info { - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), - Some(ContractInfo::Alive(contract)) => contract, - }; - - // Compute how much would the fee per block be with the *updated* balance. - let total_balance = T::Currency::total_balance(account); - let free_balance = T::Currency::free_balance(account); - let fee_per_block = compute_fee_per_block::(&free_balance, &alive_contract_info); - if fee_per_block.is_zero() { - return Ok(RentProjection::NoEviction); - } - - // Then compute how much the contract will sustain under these circumstances. - let rent_budget = rent_budget::(&total_balance, &free_balance, &alive_contract_info).expect( - "the contract exists and in the alive state; - the updated balance must be greater than subsistence deposit; - this function doesn't return `None`; - qed - ", - ); - let blocks_left = match rent_budget.checked_div(&fee_per_block) { - Some(blocks_left) => blocks_left, - None => { - // `fee_per_block` is not zero here, so `checked_div` can return `None` if - // there is an overflow. This cannot happen with integers though. Return - // `NoEviction` here just in case. - return Ok(RentProjection::NoEviction); - } - }; - - let blocks_left = blocks_left.saturated_into::().into(); - Ok(RentProjection::EvictionAt( - current_block_number + blocks_left, - )) -} - -/// Restores the destination account using the origin as prototype. -/// -/// The restoration will be performed iff: -/// - origin exists and is alive, -/// - the origin's storage is not written in the current block -/// - the restored account has tombstone -/// - the tombstone matches the hash of the origin storage root, and code hash. -/// -/// Upon succesful restoration, `origin` will be destroyed, all its funds are transferred to -/// the restored account. The restored account will inherit the last write block and its last -/// deduct block will be set to the current block. -pub fn restore_to( - origin: T::AccountId, - dest: T::AccountId, - code_hash: CodeHash, - rent_allowance: BalanceOf, - delta: Vec, -) -> Result<(), &'static str> { - let mut origin_contract = >::get(&origin) - .and_then(|c| c.get_alive()) - .ok_or("Cannot restore from inexisting or tombstone contract")?; - - let child_trie_info = origin_contract.child_trie_info(); - - let current_block = >::block_number(); - - if origin_contract.last_write == Some(current_block) { - return Err("Origin TrieId written in the current block"); - } - - let dest_tombstone = >::get(&dest) - .and_then(|c| c.get_tombstone()) - .ok_or("Cannot restore to inexisting or alive contract")?; - - let last_write = if !delta.is_empty() { - Some(current_block) - } else { - origin_contract.last_write - }; - - let key_values_taken = delta.iter() - .filter_map(|key| { - child::get_raw(&child_trie_info, &blake2_256(key)).map(|value| { - child::kill(&child_trie_info, &blake2_256(key)); - (key, value) - }) - }) - .collect::>(); - - let tombstone = >::new( - // This operation is cheap enough because last_write (delta not included) - // is not this block as it has been checked earlier. - &child::root(&child_trie_info)[..], - code_hash, - ); - - if tombstone != dest_tombstone { - for (key, value) in key_values_taken { - child::put_raw(&child_trie_info, &blake2_256(key), &value); - } - - return Err("Tombstones don't match"); - } - - origin_contract.storage_size -= key_values_taken.iter() - .map(|(_, value)| value.len() as u32) - .sum::(); - - >::remove(&origin); - >::insert(&dest, ContractInfo::Alive(AliveContractInfo:: { - trie_id: origin_contract.trie_id, - storage_size: origin_contract.storage_size, - empty_pair_count: origin_contract.empty_pair_count, - total_pair_count: origin_contract.total_pair_count, - code_hash, - rent_allowance, - deduct_block: current_block, - last_write, - })); - - let origin_free_balance = T::Currency::free_balance(&origin); - T::Currency::make_free_balance_be(&origin, >::zero()); - T::Currency::deposit_creating(&dest, origin_free_balance); - - Ok(()) -} diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index fb38b1b895d18..c14165b4c6aec 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -1,93 +1,258 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! This module contains the cost schedule and supporting code that constructs a //! sane default schedule from a `WeightInfo` implementation. -use crate::{Trait, WeightInfo}; +use crate::{weights::WeightInfo, Config}; +use codec::{Decode, Encode}; +use frame_support::{weights::Weight, DefaultNoBound}; +use pallet_contracts_proc_macro::{ScheduleDebug, WeightDebug}; +use pwasm_utils::{parity_wasm::elements, rules}; +use scale_info::TypeInfo; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use frame_support::weights::Weight; -use sp_std::{marker::PhantomData, fmt}; -use codec::{Encode, Decode}; +use serde::{Deserialize, Serialize}; +use sp_runtime::RuntimeDebug; +use sp_std::{marker::PhantomData, vec::Vec}; /// How many API calls are executed in a single batch. The reason for increasing the amount /// of API calls in batches (per benchmark component increase) is so that the linear regression /// has an easier time determining the contribution of that component. pub const API_BENCHMARK_BATCH_SIZE: u32 = 100; -/// Definition of the cost schedule and other parameterizations for wasm vm. +/// How many instructions are executed in a single batch. The reasoning is the same +/// as for `API_BENCHMARK_BATCH_SIZE`. +pub const INSTR_BENCHMARK_BATCH_SIZE: u32 = 100; + +/// Definition of the cost schedule and other parameterizations for the wasm vm. +/// +/// Its [`Default`] implementation is the designated way to initialize this type. It uses +/// the benchmarked information supplied by [`Config::WeightInfo`]. All of its fields are +/// public and can therefore be modified. For example in order to change some of the limits +/// and set a custom instruction weight version the following code could be used: +/// ```rust +/// use pallet_contracts::{Schedule, Limits, InstructionWeights, Config}; +/// +/// fn create_schedule() -> Schedule { +/// Schedule { +/// limits: Limits { +/// globals: 3, +/// parameters: 3, +/// memory_pages: 16, +/// table_size: 3, +/// br_table_size: 3, +/// .. Default::default() +/// }, +/// instruction_weights: InstructionWeights { +/// version: 5, +/// .. Default::default() +/// }, +/// .. Default::default() +/// } +/// } +/// ``` +/// +/// # Note +/// +/// Please make sure to bump the [`InstructionWeights::version`] whenever substantial +/// changes are made to its values. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[derive(Clone, Encode, Decode, PartialEq, Eq)] -pub struct Schedule { - /// Version of the schedule. - pub version: u32, +#[cfg_attr(feature = "std", serde(bound(serialize = "", deserialize = "")))] +#[derive(Clone, Encode, Decode, PartialEq, Eq, ScheduleDebug, DefaultNoBound, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct Schedule { + /// Describes the upper limits on various metrics. + pub limits: Limits, /// The weights for individual wasm instructions. - pub instruction_weights: InstructionWeights, + pub instruction_weights: InstructionWeights, /// The weights for each imported function a contract is allowed to call. - pub host_fn_weights: HostFnWeights, - - /// Whether the `seal_println` function is allowed to be used contracts. - /// MUST only be enabled for `dev` chains, NOT for production chains - pub enable_println: bool, + pub host_fn_weights: HostFnWeights, +} +/// Describes the upper limits on various metrics. +/// +/// # Note +/// +/// The values in this struct should never be decreased. The reason is that decreasing those +/// values will break existing contracts which are above the new limits when a +/// re-instrumentation is triggered. +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct Limits { /// The maximum number of topics supported by an event. - pub max_event_topics: u32, + pub event_topics: u32, - /// Maximum allowed stack height. + /// Maximum allowed stack height in number of elements. /// - /// See https://wiki.parity.io/WebAssembly-StackHeight to find out - /// how the stack frame cost is calculated. - pub max_stack_height: u32, + /// See to find out + /// how the stack frame cost is calculated. Each element can be of one of the + /// wasm value types. This means the maximum size per element is 64bit. + pub stack_height: u32, + + /// Maximum number of globals a module is allowed to declare. + /// + /// Globals are not limited through the `stack_height` as locals are. Neither does + /// the linear memory limit `memory_pages` applies to them. + pub globals: u32, + + /// Maximum numbers of parameters a function can have. + /// + /// Those need to be limited to prevent a potentially exploitable interaction with + /// the stack height instrumentation: The costs of executing the stack height + /// instrumentation for an indirectly called function scales linearly with the amount + /// of parameters of this function. Because the stack height instrumentation itself is + /// is not weight metered its costs must be static (via this limit) and included in + /// the costs of the instructions that cause them (call, call_indirect). + pub parameters: u32, /// Maximum number of memory pages allowed for a contract. - pub max_memory_pages: u32, + pub memory_pages: u32, - /// Maximum allowed size of a declared table. - pub max_table_size: u32, + /// Maximum number of elements allowed in a table. + /// + /// Currently, the only type of element that is allowed in a table is funcref. + pub table_size: u32, - /// The maximum length of a subject used for PRNG generation. - pub max_subject_len: u32, + /// Maximum number of elements that can appear as immediate value to the br_table instruction. + pub br_table_size: u32, - /// The maximum length of a contract code in bytes. This limit applies to the uninstrumented - /// and pristine form of the code as supplied to `put_code`. - pub max_code_size: u32, + /// The maximum length of a subject in bytes used for PRNG generation. + pub subject_len: u32, - /// The type parameter is used in the default implementation. - pub _phantom: PhantomData, + /// The maximum nesting level of the call stack. + pub call_depth: u32, + + /// The maximum size of a storage value and event payload in bytes. + pub payload_len: u32, + + /// The maximum length of a contract code in bytes. This limit applies to the instrumented + /// version of the code. Therefore `instantiate_with_code` can fail even when supplying + /// a wasm binary below this maximum size. + pub code_len: u32, +} + +impl Limits { + /// The maximum memory size in bytes that a contract can occupy. + pub fn max_memory_size(&self) -> u32 { + self.memory_pages * 64 * 1024 + } } /// Describes the weight for all categories of supported wasm instructions. +/// +/// There there is one field for each wasm instruction that describes the weight to +/// execute one instruction of that name. There are a few execptions: +/// +/// 1. If there is a i64 and a i32 variant of an instruction we use the weight +/// of the former for both. +/// 2. The following instructions are free of charge because they merely structure the +/// wasm module and cannot be spammed without making the module invalid (and rejected): +/// End, Unreachable, Return, Else +/// 3. The following instructions cannot be benchmarked because they are removed by any +/// real world execution engine as a preprocessing step and therefore don't yield a +/// meaningful benchmark result. However, in contrast to the instructions mentioned +/// in 2. they can be spammed. We price them with the same weight as the "default" +/// instruction (i64.const): Block, Loop, Nop +/// 4. We price both i64.const and drop as InstructionWeights.i64const / 2. The reason +/// for that is that we cannot benchmark either of them on its own but we need their +/// individual values to derive (by subtraction) the weight of all other instructions +/// that use them as supporting instructions. Supporting means mainly pushing arguments +/// and dropping return values in order to maintain a valid module. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[derive(Clone, Encode, Decode, PartialEq, Eq)] -pub struct InstructionWeights { - /// Weight of a growing memory by single page. - pub grow_mem: Weight, - - /// Weight of a regular operation. - pub regular: Weight, +#[derive(Clone, Encode, Decode, PartialEq, Eq, WeightDebug, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct InstructionWeights { + /// Version of the instruction weights. + /// + /// # Note + /// + /// Should be incremented whenever any instruction weight is changed. The + /// reason is that changes to instruction weights require a re-instrumentation + /// in order to apply the changes to an already deployed code. The re-instrumentation + /// is triggered by comparing the version of the current schedule with the version the code was + /// instrumented with. Changes usually happen when pallet_contracts is re-benchmarked. + /// + /// Changes to other parts of the schedule should not increment the version in + /// order to avoid unnecessary re-instrumentations. + pub version: u32, + pub i64const: u32, + pub i64load: u32, + pub i64store: u32, + pub select: u32, + pub r#if: u32, + pub br: u32, + pub br_if: u32, + pub br_table: u32, + pub br_table_per_entry: u32, + pub call: u32, + pub call_indirect: u32, + pub call_indirect_per_param: u32, + pub local_get: u32, + pub local_set: u32, + pub local_tee: u32, + pub global_get: u32, + pub global_set: u32, + pub memory_current: u32, + pub memory_grow: u32, + pub i64clz: u32, + pub i64ctz: u32, + pub i64popcnt: u32, + pub i64eqz: u32, + pub i64extendsi32: u32, + pub i64extendui32: u32, + pub i32wrapi64: u32, + pub i64eq: u32, + pub i64ne: u32, + pub i64lts: u32, + pub i64ltu: u32, + pub i64gts: u32, + pub i64gtu: u32, + pub i64les: u32, + pub i64leu: u32, + pub i64ges: u32, + pub i64geu: u32, + pub i64add: u32, + pub i64sub: u32, + pub i64mul: u32, + pub i64divs: u32, + pub i64divu: u32, + pub i64rems: u32, + pub i64remu: u32, + pub i64and: u32, + pub i64or: u32, + pub i64xor: u32, + pub i64shl: u32, + pub i64shrs: u32, + pub i64shru: u32, + pub i64rotl: u32, + pub i64rotr: u32, + /// The type parameter is used in the default implementation. + #[codec(skip)] + pub _phantom: PhantomData, } /// Describes the weight for each imported function that a contract is allowed to call. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[derive(Clone, Encode, Decode, PartialEq, Eq)] -pub struct HostFnWeights { +#[derive(Clone, Encode, Decode, PartialEq, Eq, WeightDebug, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct HostFnWeights { /// Weight of calling `seal_caller`. pub caller: Weight, @@ -106,11 +271,8 @@ pub struct HostFnWeights { /// Weight of calling `seal_minimum_balance`. pub minimum_balance: Weight, - /// Weight of calling `seal_tombstone_deposit`. - pub tombstone_deposit: Weight, - - /// Weight of calling `seal_rent_allowance`. - pub rent_allowance: Weight, + /// Weight of calling `seal_contract_deposit`. + pub contract_deposit: Weight, /// Weight of calling `seal_block_number`. pub block_number: Weight, @@ -139,12 +301,6 @@ pub struct HostFnWeights { /// Weight of calling `seal_terminate`. pub terminate: Weight, - /// Weight of calling `seal_restore_to`. - pub restore_to: Weight, - - /// Weight per delta key supplied to `seal_restore_to`. - pub restore_to_per_delta: Weight, - /// Weight of calling `seal_random`. pub random: Weight, @@ -157,8 +313,8 @@ pub struct HostFnWeights { /// Weight per byte of an event deposited through `seal_deposit_event`. pub deposit_event_per_byte: Weight, - /// Weight of calling `seal_set_rent_allowance`. - pub set_rent_allowance: Weight, + /// Weight of calling `seal_debug_message`. + pub debug_message: Weight, /// Weight of calling `seal_set_storage`. pub set_storage: Weight, @@ -199,6 +355,9 @@ pub struct HostFnWeights { /// Weight per output byte received through `seal_instantiate`. pub instantiate_per_output_byte: Weight, + /// Weight per salt byte supplied to `seal_instantiate`. + pub instantiate_per_salt_byte: Weight, + /// Weight of calling `seal_hash_sha_256`. pub hash_sha2_256: Weight, @@ -222,23 +381,19 @@ pub struct HostFnWeights { /// Weight per byte hashed by `seal_hash_blake2_128`. pub hash_blake2_128_per_byte: Weight, -} -/// We need to implement Debug manually because the automatic derive enforces T -/// to also implement Debug. -impl fmt::Debug for Schedule { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Schedule").finish() - } -} + /// Weight of calling `seal_ecdsa_recover`. + pub ecdsa_recover: Weight, -/// 500 (2 instructions per nano second on 2GHZ) * 1000x slowdown through wasmi -/// This is a wild guess and should be viewed as a rough estimation. -/// Proper benchmarks are needed before this value and its derivatives can be used in production. -const WASM_INSTRUCTION_COST: Weight = 500_000; + /// The type parameter is used in the default implementation. + #[codec(skip)] + pub _phantom: PhantomData, +} macro_rules! replace_token { - ($_in:tt $replacement:tt) => { $replacement }; + ($_in:tt $replacement:tt) => { + $replacement + }; } macro_rules! call_zero { @@ -259,6 +414,27 @@ macro_rules! cost_batched_args { } } +macro_rules! cost_instr_no_params_with_batch_size { + ($name:ident, $batch_size:expr) => { + (cost_args!($name, 1) / Weight::from($batch_size)) as u32 + }; +} + +macro_rules! cost_instr_with_batch_size { + ($name:ident, $num_params:expr, $batch_size:expr) => { + cost_instr_no_params_with_batch_size!($name, $batch_size).saturating_sub( + (cost_instr_no_params_with_batch_size!(instr_i64const, $batch_size) / 2) + .saturating_mul($num_params), + ) + }; +} + +macro_rules! cost_instr { + ($name:ident, $num_params:expr) => { + cost_instr_with_batch_size!($name, $num_params, INSTR_BENCHMARK_BATCH_SIZE) + }; +} + macro_rules! cost_byte_args { ($name:ident, $( $arg: expr ),+) => { cost_args!($name, $( $arg ),+) / 1024 @@ -274,59 +450,136 @@ macro_rules! cost_byte_batched_args { macro_rules! cost { ($name:ident) => { cost_args!($name, 1) - } + }; } macro_rules! cost_batched { ($name:ident) => { cost_batched_args!($name, 1) - } + }; } macro_rules! cost_byte { ($name:ident) => { cost_byte_args!($name, 1) - } + }; } macro_rules! cost_byte_batched { ($name:ident) => { cost_byte_batched_args!($name, 1) + }; +} + +impl Default for Limits { + fn default() -> Self { + Self { + event_topics: 4, + // 512 * sizeof(i64) will give us a 4k stack. + stack_height: 512, + globals: 256, + parameters: 128, + memory_pages: 16, + // 4k function pointers (This is in count not bytes). + table_size: 4096, + br_table_size: 256, + subject_len: 32, + call_depth: 32, + payload_len: 16 * 1024, + code_len: 128 * 1024, + } } } -impl Default for Schedule { +impl Default for InstructionWeights { fn default() -> Self { - let instruction_weights = InstructionWeights { - grow_mem: WASM_INSTRUCTION_COST, - regular: WASM_INSTRUCTION_COST, - }; + let max_pages = Limits::default().memory_pages; + Self { + version: 2, + i64const: cost_instr!(instr_i64const, 1), + i64load: cost_instr!(instr_i64load, 2), + i64store: cost_instr!(instr_i64store, 2), + select: cost_instr!(instr_select, 4), + r#if: cost_instr!(instr_if, 3), + br: cost_instr!(instr_br, 2), + br_if: cost_instr!(instr_br_if, 3), + br_table: cost_instr!(instr_br_table, 3), + br_table_per_entry: cost_instr!(instr_br_table_per_entry, 0), + call: cost_instr!(instr_call, 2), + call_indirect: cost_instr!(instr_call_indirect, 3), + call_indirect_per_param: cost_instr!(instr_call_indirect_per_param, 1), + local_get: cost_instr!(instr_local_get, 1), + local_set: cost_instr!(instr_local_set, 1), + local_tee: cost_instr!(instr_local_tee, 2), + global_get: cost_instr!(instr_global_get, 1), + global_set: cost_instr!(instr_global_set, 1), + memory_current: cost_instr!(instr_memory_current, 1), + memory_grow: cost_instr_with_batch_size!(instr_memory_grow, 1, max_pages), + i64clz: cost_instr!(instr_i64clz, 2), + i64ctz: cost_instr!(instr_i64ctz, 2), + i64popcnt: cost_instr!(instr_i64popcnt, 2), + i64eqz: cost_instr!(instr_i64eqz, 2), + i64extendsi32: cost_instr!(instr_i64extendsi32, 2), + i64extendui32: cost_instr!(instr_i64extendui32, 2), + i32wrapi64: cost_instr!(instr_i32wrapi64, 2), + i64eq: cost_instr!(instr_i64eq, 3), + i64ne: cost_instr!(instr_i64ne, 3), + i64lts: cost_instr!(instr_i64lts, 3), + i64ltu: cost_instr!(instr_i64ltu, 3), + i64gts: cost_instr!(instr_i64gts, 3), + i64gtu: cost_instr!(instr_i64gtu, 3), + i64les: cost_instr!(instr_i64les, 3), + i64leu: cost_instr!(instr_i64leu, 3), + i64ges: cost_instr!(instr_i64ges, 3), + i64geu: cost_instr!(instr_i64geu, 3), + i64add: cost_instr!(instr_i64add, 3), + i64sub: cost_instr!(instr_i64sub, 3), + i64mul: cost_instr!(instr_i64mul, 3), + i64divs: cost_instr!(instr_i64divs, 3), + i64divu: cost_instr!(instr_i64divu, 3), + i64rems: cost_instr!(instr_i64rems, 3), + i64remu: cost_instr!(instr_i64remu, 3), + i64and: cost_instr!(instr_i64and, 3), + i64or: cost_instr!(instr_i64or, 3), + i64xor: cost_instr!(instr_i64xor, 3), + i64shl: cost_instr!(instr_i64shl, 3), + i64shrs: cost_instr!(instr_i64shrs, 3), + i64shru: cost_instr!(instr_i64shru, 3), + i64rotl: cost_instr!(instr_i64rotl, 3), + i64rotr: cost_instr!(instr_i64rotr, 3), + _phantom: PhantomData, + } + } +} - let host_fn_weights = HostFnWeights { +impl Default for HostFnWeights { + fn default() -> Self { + Self { caller: cost_batched!(seal_caller), address: cost_batched!(seal_address), gas_left: cost_batched!(seal_gas_left), balance: cost_batched!(seal_balance), value_transferred: cost_batched!(seal_value_transferred), minimum_balance: cost_batched!(seal_minimum_balance), - tombstone_deposit: cost_batched!(seal_tombstone_deposit), - rent_allowance: cost_batched!(seal_rent_allowance), + contract_deposit: cost_batched!(seal_tombstone_deposit), block_number: cost_batched!(seal_block_number), now: cost_batched!(seal_now), weight_to_fee: cost_batched!(seal_weight_to_fee), gas: cost_batched!(seal_gas), - input: cost!(seal_input), - input_per_byte: cost_byte!(seal_input_per_kb), + input: cost_batched!(seal_input), + input_per_byte: cost_byte_batched!(seal_input_per_kb), r#return: cost!(seal_return), return_per_byte: cost_byte!(seal_return_per_kb), terminate: cost!(seal_terminate), - restore_to: cost!(seal_restore_to), - restore_to_per_delta: cost_batched!(seal_restore_to_per_delta), random: cost_batched!(seal_random), deposit_event: cost_batched!(seal_deposit_event), deposit_event_per_topic: cost_batched_args!(seal_deposit_event_per_topic_and_kb, 1, 0), - deposit_event_per_byte: cost_byte_batched_args!(seal_deposit_event_per_topic_and_kb, 0, 1), - set_rent_allowance: cost_batched!(seal_set_rent_allowance), + deposit_event_per_byte: cost_byte_batched_args!( + seal_deposit_event_per_topic_and_kb, + 0, + 1 + ), + debug_message: cost_batched!(seal_debug_message), set_storage: cost_batched!(seal_set_storage), set_storage_per_byte: cost_byte_batched!(seal_set_storage_per_kb), clear_storage: cost_batched!(seal_clear_storage), @@ -334,12 +587,43 @@ impl Default for Schedule { get_storage_per_byte: cost_byte_batched!(seal_get_storage_per_kb), transfer: cost_batched!(seal_transfer), call: cost_batched!(seal_call), - call_transfer_surcharge: cost_batched_args!(seal_call_per_transfer_input_output_kb, 1, 0, 0), - call_per_input_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 1, 0), - call_per_output_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 0, 1), + call_transfer_surcharge: cost_batched_args!( + seal_call_per_transfer_input_output_kb, + 1, + 0, + 0 + ), + call_per_input_byte: cost_byte_batched_args!( + seal_call_per_transfer_input_output_kb, + 0, + 1, + 0 + ), + call_per_output_byte: cost_byte_batched_args!( + seal_call_per_transfer_input_output_kb, + 0, + 0, + 1 + ), instantiate: cost_batched!(seal_instantiate), - instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_kb, 1, 0), - instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_kb, 0, 1), + instantiate_per_input_byte: cost_byte_batched_args!( + seal_instantiate_per_input_output_salt_kb, + 1, + 0, + 0 + ), + instantiate_per_output_byte: cost_byte_batched_args!( + seal_instantiate_per_input_output_salt_kb, + 0, + 1, + 0 + ), + instantiate_per_salt_byte: cost_byte_batched_args!( + seal_instantiate_per_input_output_salt_kb, + 0, + 0, + 1 + ), hash_sha2_256: cost_batched!(seal_hash_sha2_256), hash_sha2_256_per_byte: cost_byte_batched!(seal_hash_sha2_256_per_kb), hash_keccak_256: cost_batched!(seal_hash_keccak_256), @@ -348,20 +632,133 @@ impl Default for Schedule { hash_blake2_256_per_byte: cost_byte_batched!(seal_hash_blake2_256_per_kb), hash_blake2_128: cost_batched!(seal_hash_blake2_128), hash_blake2_128_per_byte: cost_byte_batched!(seal_hash_blake2_128_per_kb), - }; - - Self { - version: 0, - instruction_weights, - host_fn_weights, - enable_println: false, - max_event_topics: 4, - max_stack_height: 64 * 1024, - max_memory_pages: 16, - max_table_size: 16 * 1024, - max_subject_len: 32, - max_code_size: 512 * 1024, + ecdsa_recover: cost_batched!(seal_ecdsa_recover), _phantom: PhantomData, } } } + +struct ScheduleRules<'a, T: Config> { + schedule: &'a Schedule, + params: Vec, +} + +impl Schedule { + pub(crate) fn rules(&self, module: &elements::Module) -> impl rules::Rules + '_ { + ScheduleRules { + schedule: &self, + params: module + .type_section() + .iter() + .flat_map(|section| section.types()) + .map(|func| { + let elements::Type::Function(func) = func; + func.params().len() as u32 + }) + .collect(), + } + } +} + +impl<'a, T: Config> rules::Rules for ScheduleRules<'a, T> { + fn instruction_cost(&self, instruction: &elements::Instruction) -> Option { + use self::elements::Instruction::*; + let w = &self.schedule.instruction_weights; + let max_params = self.schedule.limits.parameters; + + let weight = match *instruction { + End | Unreachable | Return | Else => 0, + I32Const(_) | I64Const(_) | Block(_) | Loop(_) | Nop | Drop => w.i64const, + I32Load(_, _) | + I32Load8S(_, _) | + I32Load8U(_, _) | + I32Load16S(_, _) | + I32Load16U(_, _) | + I64Load(_, _) | + I64Load8S(_, _) | + I64Load8U(_, _) | + I64Load16S(_, _) | + I64Load16U(_, _) | + I64Load32S(_, _) | + I64Load32U(_, _) => w.i64load, + I32Store(_, _) | + I32Store8(_, _) | + I32Store16(_, _) | + I64Store(_, _) | + I64Store8(_, _) | + I64Store16(_, _) | + I64Store32(_, _) => w.i64store, + Select => w.select, + If(_) => w.r#if, + Br(_) => w.br, + BrIf(_) => w.br_if, + Call(_) => w.call, + GetLocal(_) => w.local_get, + SetLocal(_) => w.local_set, + TeeLocal(_) => w.local_tee, + GetGlobal(_) => w.global_get, + SetGlobal(_) => w.global_set, + CurrentMemory(_) => w.memory_current, + GrowMemory(_) => w.memory_grow, + CallIndirect(idx, _) => *self.params.get(idx as usize).unwrap_or(&max_params), + BrTable(ref data) => w + .br_table + .saturating_add(w.br_table_per_entry.saturating_mul(data.table.len() as u32)), + I32Clz | I64Clz => w.i64clz, + I32Ctz | I64Ctz => w.i64ctz, + I32Popcnt | I64Popcnt => w.i64popcnt, + I32Eqz | I64Eqz => w.i64eqz, + I64ExtendSI32 => w.i64extendsi32, + I64ExtendUI32 => w.i64extendui32, + I32WrapI64 => w.i32wrapi64, + I32Eq | I64Eq => w.i64eq, + I32Ne | I64Ne => w.i64ne, + I32LtS | I64LtS => w.i64lts, + I32LtU | I64LtU => w.i64ltu, + I32GtS | I64GtS => w.i64gts, + I32GtU | I64GtU => w.i64gtu, + I32LeS | I64LeS => w.i64les, + I32LeU | I64LeU => w.i64leu, + I32GeS | I64GeS => w.i64ges, + I32GeU | I64GeU => w.i64geu, + I32Add | I64Add => w.i64add, + I32Sub | I64Sub => w.i64sub, + I32Mul | I64Mul => w.i64mul, + I32DivS | I64DivS => w.i64divs, + I32DivU | I64DivU => w.i64divu, + I32RemS | I64RemS => w.i64rems, + I32RemU | I64RemU => w.i64remu, + I32And | I64And => w.i64and, + I32Or | I64Or => w.i64or, + I32Xor | I64Xor => w.i64xor, + I32Shl | I64Shl => w.i64shl, + I32ShrS | I64ShrS => w.i64shrs, + I32ShrU | I64ShrU => w.i64shru, + I32Rotl | I64Rotl => w.i64rotl, + I32Rotr | I64Rotr => w.i64rotr, + + // Returning None makes the gas instrumentation fail which we intend for + // unsupported or unknown instructions. + _ => return None, + }; + Some(weight) + } + + fn memory_grow_cost(&self) -> Option { + // We benchmarked the memory.grow instruction with the maximum allowed pages. + // The cost for growing is therefore already included in the instruction cost. + None + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::tests::Test; + + #[test] + fn print_test_schedule() { + let schedule = Schedule::::default(); + println!("{:#?}", schedule); + } +} diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 3740952778fd3..41db0796717e4 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -1,196 +1,222 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! This module contains routines for accessing and altering a contract related state. use crate::{ exec::{AccountIdOf, StorageKey}, - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, + weights::WeightInfo, + CodeHash, Config, ContractInfoOf, DeletionQueue, Error, TrieId, +}; +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{DispatchError, DispatchResult}, + storage::child::{self, ChildInfo, KillStorageResult}, + traits::Get, + weights::Weight, }; -use sp_std::prelude::*; +use scale_info::TypeInfo; +use sp_core::crypto::UncheckedFrom; use sp_io::hashing::blake2_256; -use sp_runtime::traits::Bounded; -use frame_support::{storage::child, StorageMap}; - -/// An error that means that the account requested either doesn't exist or represents a tombstone -/// account. -#[cfg_attr(test, derive(PartialEq, Eq, Debug))] -pub struct ContractAbsentError; - -/// Reads a storage kv pair of a contract. -/// -/// The read is performed from the `trie_id` only. The `address` is not necessary. If the contract -/// doesn't store under the given `key` `None` is returned. -pub fn read_contract_storage(trie_id: &TrieId, key: &StorageKey) -> Option> { - child::get_raw(&crate::child_trie_info(&trie_id), &blake2_256(key)) +use sp_runtime::{traits::Hash, RuntimeDebug}; +use sp_std::{marker::PhantomData, prelude::*}; + +pub type ContractInfo = RawContractInfo>; + +/// Information for managing an account and its sub trie abstraction. +/// This is the required info to cache for an account. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct RawContractInfo { + /// Unique ID for the subtree encoded as a bytes vector. + pub trie_id: TrieId, + /// The code associated with a given account. + pub code_hash: CodeHash, + /// This field is reserved for future evolution of format. + pub _reserved: Option<()>, } -/// Update a storage entry into a contract's kv storage. -/// -/// If the `opt_new_value` is `None` then the kv pair is removed. -/// -/// This function also updates the bookkeeping info such as: number of total non-empty pairs a -/// contract owns, the last block the storage was written to, etc. That's why, in contrast to -/// `read_contract_storage`, this function also requires the `account` ID. -/// -/// If the contract specified by the id `account` doesn't exist `Err` is returned.` -pub fn write_contract_storage( - account: &AccountIdOf, - trie_id: &TrieId, - key: &StorageKey, - opt_new_value: Option>, -) -> Result<(), ContractAbsentError> { - let mut new_info = match >::get(account) { - Some(ContractInfo::Alive(alive)) => alive, - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAbsentError), - }; - - let hashed_key = blake2_256(key); - let child_trie_info = &crate::child_trie_info(&trie_id); - - // In order to correctly update the book keeping we need to fetch the previous - // value of the key-value pair. - // - // It might be a bit more clean if we had an API that supported getting the size - // of the value without going through the loading of it. But at the moment of - // writing, there is no such API. - // - // That's not a show stopper in any case, since the performance cost is - // dominated by the trie traversal anyway. - let opt_prev_value = child::get_raw(&child_trie_info, &hashed_key); - - // Update the total number of KV pairs and the number of empty pairs. - match (&opt_prev_value, &opt_new_value) { - (Some(prev_value), None) => { - new_info.total_pair_count -= 1; - if prev_value.is_empty() { - new_info.empty_pair_count -= 1; - } - }, - (None, Some(new_value)) => { - new_info.total_pair_count += 1; - if new_value.is_empty() { - new_info.empty_pair_count += 1; - } - }, - (Some(prev_value), Some(new_value)) => { - if prev_value.is_empty() { - new_info.empty_pair_count -= 1; - } - if new_value.is_empty() { - new_info.empty_pair_count += 1; - } - } - (None, None) => {} - } - - // Update the total storage size. - let prev_value_len = opt_prev_value - .as_ref() - .map(|old_value| old_value.len() as u32) - .unwrap_or(0); - let new_value_len = opt_new_value - .as_ref() - .map(|new_value| new_value.len() as u32) - .unwrap_or(0); - new_info.storage_size = new_info - .storage_size - .saturating_add(new_value_len) - .saturating_sub(prev_value_len); - - new_info.last_write = Some(>::block_number()); - >::insert(&account, ContractInfo::Alive(new_info)); - - // Finally, perform the change on the storage. - match opt_new_value { - Some(new_value) => child::put_raw(&child_trie_info, &hashed_key, &new_value[..]), - None => child::kill(&child_trie_info, &hashed_key), +impl RawContractInfo { + /// Associated child trie unique id is built from the hash part of the trie id. + #[cfg(test)] + pub fn child_trie_info(&self) -> ChildInfo { + child_trie_info(&self.trie_id[..]) } +} - Ok(()) +/// Associated child trie unique id is built from the hash part of the trie id. +fn child_trie_info(trie_id: &[u8]) -> ChildInfo { + ChildInfo::new_default(trie_id) } -/// Returns the rent allowance set for the contract give by the account id. -pub fn rent_allowance( - account: &AccountIdOf, -) -> Result, ContractAbsentError> { - >::get(account) - .and_then(|i| i.as_alive().map(|i| i.rent_allowance)) - .ok_or(ContractAbsentError) +#[derive(Encode, Decode, TypeInfo)] +pub struct DeletedContract { + pub(crate) trie_id: TrieId, } -/// Set the rent allowance for the contract given by the account id. -/// -/// Returns `Err` if the contract doesn't exist or is a tombstone. -pub fn set_rent_allowance( - account: &AccountIdOf, - rent_allowance: BalanceOf, -) -> Result<(), ContractAbsentError> { - >::mutate(account, |maybe_contract_info| match maybe_contract_info { - Some(ContractInfo::Alive(ref mut alive_info)) => { - alive_info.rent_allowance = rent_allowance; +pub struct Storage(PhantomData); + +impl Storage +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + /// Reads a storage kv pair of a contract. + /// + /// The read is performed from the `trie_id` only. The `address` is not necessary. If the + /// contract doesn't store under the given `key` `None` is returned. + pub fn read(trie_id: &TrieId, key: &StorageKey) -> Option> { + child::get_raw(&child_trie_info(&trie_id), &blake2_256(key)) + } + + /// Update a storage entry into a contract's kv storage. + /// + /// If the `opt_new_value` is `None` then the kv pair is removed. + /// + /// This function also updates the bookkeeping info such as: number of total non-empty pairs a + /// contract owns, the last block the storage was written to, etc. That's why, in contrast to + /// `read`, this function also requires the `account` ID. + pub fn write( + new_info: &mut ContractInfo, + key: &StorageKey, + opt_new_value: Option>, + ) -> DispatchResult { + let hashed_key = blake2_256(key); + let child_trie_info = &child_trie_info(&new_info.trie_id); + + match opt_new_value { + Some(new_value) => child::put_raw(&child_trie_info, &hashed_key, &new_value[..]), + None => child::kill(&child_trie_info, &hashed_key), + } + + Ok(()) + } + + /// Creates a new contract descriptor in the storage with the given code hash at the given + /// address. + /// + /// Returns `Err` if there is already a contract at the given address. + pub fn new_contract( + account: &AccountIdOf, + trie_id: TrieId, + ch: CodeHash, + ) -> Result, DispatchError> { + if >::contains_key(account) { + return Err(Error::::DuplicateContract.into()) + } + + let contract = ContractInfo:: { code_hash: ch, trie_id, _reserved: None }; + + Ok(contract) + } + + /// Push a contract's trie to the deletion queue for lazy removal. + /// + /// You must make sure that the contract is also removed when queuing the trie for deletion. + pub fn queue_trie_for_deletion(contract: &ContractInfo) -> DispatchResult { + if >::decode_len().unwrap_or(0) >= T::DeletionQueueDepth::get() as usize { + Err(Error::::DeletionQueueFull.into()) + } else { + >::append(DeletedContract { trie_id: contract.trie_id.clone() }); Ok(()) } - _ => Err(ContractAbsentError), - }) -} + } -/// Returns the code hash of the contract specified by `account` ID. -#[cfg(test)] -pub fn code_hash(account: &AccountIdOf) -> Result, ContractAbsentError> { - >::get(account) - .and_then(|i| i.as_alive().map(|i| i.code_hash)) - .ok_or(ContractAbsentError) -} + /// Calculates the weight that is necessary to remove one key from the trie and how many + /// of those keys can be deleted from the deletion queue given the supplied queue length + /// and weight limit. + pub fn deletion_budget(queue_len: usize, weight_limit: Weight) -> (u64, u32) { + let base_weight = T::WeightInfo::on_initialize(); + let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - + T::WeightInfo::on_initialize_per_queue_item(0); + let weight_per_key = T::WeightInfo::on_initialize_per_trie_key(1) - + T::WeightInfo::on_initialize_per_trie_key(0); + let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as Weight); + + // `weight_per_key` being zero makes no sense and would constitute a failure to + // benchmark properly. We opt for not removing any keys at all in this case. + let key_budget = weight_limit + .saturating_sub(base_weight) + .saturating_sub(decoding_weight) + .checked_div(weight_per_key) + .unwrap_or(0) as u32; + + (weight_per_key, key_budget) + } -/// Creates a new contract descriptor in the storage with the given code hash at the given address. -/// -/// Returns `Err` if there is already a contract (or a tombstone) exists at the given address. -pub fn place_contract( - account: &AccountIdOf, - trie_id: TrieId, - ch: CodeHash, -) -> Result<(), &'static str> { - >::mutate(account, |maybe_contract_info| { - if maybe_contract_info.is_some() { - return Err("Alive contract or tombstone already exists"); + /// Delete as many items from the deletion queue possible within the supplied weight limit. + /// + /// It returns the amount of weight used for that task. + pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { + let queue_len = >::decode_len().unwrap_or(0); + if queue_len == 0 { + return 0 } - *maybe_contract_info = Some( - AliveContractInfo:: { - code_hash: ch, - storage_size: 0, - trie_id, - deduct_block: >::block_number(), - rent_allowance: >::max_value(), - empty_pair_count: 0, - total_pair_count: 0, - last_write: None, - } - .into(), - ); + let (weight_per_key, mut remaining_key_budget) = + Self::deletion_budget(queue_len, weight_limit); - Ok(()) - }) -} + // We want to check whether we have enough weight to decode the queue before + // proceeding. Too little weight for decoding might happen during runtime upgrades + // which consume the whole block before the other `on_initialize` blocks are called. + if remaining_key_budget == 0 { + return weight_limit + } -/// Removes the contract and all the storage associated with it. -/// -/// This function doesn't affect the account. -pub fn destroy_contract(address: &AccountIdOf, trie_id: &TrieId) { - >::remove(address); - child::kill_storage(&crate::child_trie_info(&trie_id)); + let mut queue = >::get(); + + if let (Some(trie), true) = (queue.get(0), remaining_key_budget > 0) { + let outcome = + child::kill_storage(&child_trie_info(&trie.trie_id), Some(remaining_key_budget)); + let keys_removed = match outcome { + // This should not happen as our budget was large enough to remove all keys. + KillStorageResult::SomeRemaining(count) => count, + KillStorageResult::AllRemoved(count) => { + // We do not care to preserve order. The contract is deleted already and + // noone waits for the trie to be deleted. + queue.swap_remove(0); + count + }, + }; + remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed); + } + + >::put(queue); + weight_limit.saturating_sub(weight_per_key.saturating_mul(remaining_key_budget as Weight)) + } + + /// This generator uses inner counter for account id and applies the hash over `AccountId + + /// accountid_counter`. + pub fn generate_trie_id(account_id: &AccountIdOf, seed: u64) -> TrieId { + let buf: Vec<_> = account_id.as_ref().iter().chain(&seed.to_le_bytes()).cloned().collect(); + T::Hashing::hash(&buf).as_ref().into() + } + + /// Returns the code hash of the contract specified by `account` ID. + #[cfg(test)] + pub fn code_hash(account: &AccountIdOf) -> Option> { + >::get(account).map(|i| i.code_hash) + } + + /// Fill up the queue in order to exercise the limits during testing. + #[cfg(test)] + pub fn fill_queue_with_dummies() { + let queue: Vec<_> = (0..T::DeletionQueueDepth::get()) + .map(|_| DeletedContract { trie_id: vec![] }) + .collect(); + >::put(queue); + } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 1c14e3e35f248..f5b95c192c42e 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -1,152 +1,232 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::{ - BalanceOf, ContractAddressFor, ContractInfo, ContractInfoOf, GenesisConfig, Module, - RawAliveContractInfo, RawEvent, Trait, TrieId, Schedule, TrieIdGenerator, gas::Gas, - Error, Config, RuntimeReturnCode, + chain_extension::{ + ChainExtension, Environment, Ext, InitState, Result as ExtensionResult, RetVal, + ReturnFlags, SysConfig, UncheckedFrom, + }, + exec::Frame, + storage::{RawContractInfo, Storage}, + wasm::{PrefabWasmModule, ReturnCode as RuntimeReturnCode}, + weights::WeightInfo, + BalanceOf, Config, ContractInfoOf, Error, Pallet, Schedule, }; use assert_matches::assert_matches; -use hex_literal::*; use codec::Encode; -use sp_runtime::{ - Perbill, - traits::{BlakeTwo256, Hash, IdentityLookup, Convert}, - testing::{Header, H256}, -}; use frame_support::{ - assert_ok, assert_err_ignore_postinfo, impl_outer_dispatch, impl_outer_event, - impl_outer_origin, parameter_types, StorageMap, StorageValue, - traits::{Currency, Get, ReservableCurrency}, - weights::{Weight, PostDispatchInfo}, + assert_err, assert_err_ignore_postinfo, assert_ok, dispatch::DispatchErrorWithPostInfo, + parameter_types, + storage::child, + traits::{Contains, Currency, OnInitialize, ReservableCurrency}, + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, PostDispatchInfo, Weight}, }; -use std::cell::RefCell; use frame_system::{self as system, EventRecord, Phase}; +use pretty_assertions::assert_eq; +use sp_core::Bytes; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + testing::{Header, H256}, + traits::{BlakeTwo256, Convert, Hash, IdentityLookup}, + AccountId32, +}; +use std::cell::RefCell; -mod contracts { - // Re-export contents of the root. This basically - // needs to give a name for the current crate. - // This hack is required for `impl_outer_event!`. - pub use super::super::*; - pub use frame_support::impl_outer_event; -} - -use pallet_balances as balances; - -impl_outer_event! { - pub enum MetaEvent for Test { - system, - balances, - contracts, - } -} -impl_outer_origin! { - pub enum Origin for Test where system = frame_system { } -} -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - balances::Balances, - contracts::Contracts, +use crate as pallet_contracts; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Randomness: pallet_randomness_collective_flip::{Pallet, Storage}, + Utility: pallet_utility::{Pallet, Call, Storage, Event}, + Contracts: pallet_contracts::{Pallet, Call, Storage, Event}, } -} +); #[macro_use] pub mod test_utils { - use super::{Test, Balances}; - use crate::{ContractInfoOf, TrieIdGenerator, CodeHash}; - use crate::storage::{write_contract_storage, read_contract_storage}; - use crate::exec::StorageKey; - use frame_support::{StorageMap, traits::Currency}; - - pub fn set_storage(addr: &u64, key: &StorageKey, value: Option>) { - let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); - write_contract_storage::(&1, &contract_info.trie_id, key, value).unwrap(); + use super::{Balances, Test}; + use crate::{ + exec::{AccountIdOf, StorageKey}, + storage::Storage, + AccountCounter, CodeHash, ContractInfoOf, Pallet as Contracts, TrieId, + }; + use frame_support::traits::Currency; + + pub fn set_storage(addr: &AccountIdOf, key: &StorageKey, value: Option>) { + let mut contract_info = >::get(&addr).unwrap(); + Storage::::write(&mut contract_info, key, value).unwrap(); + } + pub fn get_storage(addr: &AccountIdOf, key: &StorageKey) -> Option> { + let contract_info = >::get(&addr).unwrap(); + Storage::::read(&contract_info.trie_id, key) } - pub fn get_storage(addr: &u64, key: &StorageKey) -> Option> { - let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); - read_contract_storage(&contract_info.trie_id, key) + pub fn generate_trie_id(address: &AccountIdOf) -> TrieId { + let seed = >::mutate(|counter| { + *counter += 1; + *counter + }); + Storage::::generate_trie_id(address, seed) } - pub fn place_contract(address: &u64, code_hash: CodeHash) { - let trie_id = ::TrieIdGenerator::trie_id(address); - crate::storage::place_contract::(&address, trie_id, code_hash).unwrap() + pub fn place_contract(address: &AccountIdOf, code_hash: CodeHash) { + let trie_id = generate_trie_id(address); + set_balance(address, Contracts::::subsistence_threshold() * 10); + let contract = Storage::::new_contract(&address, trie_id, code_hash).unwrap(); + >::insert(address, contract); } - pub fn set_balance(who: &u64, amount: u64) { + pub fn set_balance(who: &AccountIdOf, amount: u64) { let imbalance = Balances::deposit_creating(who, amount); drop(imbalance); } - pub fn get_balance(who: &u64) -> u64 { + pub fn get_balance(who: &AccountIdOf) -> u64 { Balances::free_balance(who) } macro_rules! assert_return_code { ( $x:expr , $y:expr $(,)? ) => {{ use sp_std::convert::TryInto; assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); - }} + }}; + } + macro_rules! assert_refcount { + ( $code_hash:expr , $should:expr $(,)? ) => {{ + let is = crate::CodeStorage::::get($code_hash).map(|m| m.refcount()).unwrap_or(0); + assert_eq!(is, $should); + }}; } } thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); + static TEST_EXTENSION: RefCell = Default::default(); +} + +pub struct TestExtension { + enabled: bool, + last_seen_buffer: Vec, + last_seen_inputs: (u32, u32, u32, u32), +} + +impl TestExtension { + fn disable() { + TEST_EXTENSION.with(|e| e.borrow_mut().enabled = false) + } + + fn last_seen_buffer() -> Vec { + TEST_EXTENSION.with(|e| e.borrow().last_seen_buffer.clone()) + } + + fn last_seen_inputs() -> (u32, u32, u32, u32) { + TEST_EXTENSION.with(|e| e.borrow().last_seen_inputs.clone()) + } +} + +impl Default for TestExtension { + fn default() -> Self { + Self { enabled: true, last_seen_buffer: vec![], last_seen_inputs: (0, 0, 0, 0) } + } } -pub struct ExistentialDeposit; -impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } +impl ChainExtension for TestExtension { + fn call(func_id: u32, env: Environment) -> ExtensionResult + where + E: Ext, + ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, + { + match func_id { + 0 => { + let mut env = env.buf_in_buf_out(); + let input = env.read(2)?; + env.write(&input, false, None)?; + TEST_EXTENSION.with(|e| e.borrow_mut().last_seen_buffer = input); + Ok(RetVal::Converging(func_id)) + }, + 1 => { + let env = env.only_in(); + TEST_EXTENSION.with(|e| { + e.borrow_mut().last_seen_inputs = + (env.val0(), env.val1(), env.val2(), env.val3()) + }); + Ok(RetVal::Converging(func_id)) + }, + 2 => { + let mut env = env.buf_in_buf_out(); + let weight = env.read(2)?[1].into(); + env.charge_weight(weight)?; + Ok(RetVal::Converging(func_id)) + }, + 3 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), + _ => { + panic!("Passed unknown func_id to test chain extension: {}", func_id); + }, + } + } + + fn enabled() -> bool { + TEST_EXTENSION.with(|e| e.borrow().enabled) + } } -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); + pub static ExistentialDeposit: u64 = 0; } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; type Hash = H256; type Call = Call; type Hashing = BlakeTwo256; - type AccountId = u64; + type AccountId = AccountId32; type Lookup = IdentityLookup; type Header = Header; - type Event = MetaEvent; + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -impl pallet_balances::Trait for Test { +impl pallet_randomness_collective_flip::Config for Test {} +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = MetaEvent; + type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -155,24 +235,24 @@ impl pallet_balances::Trait for Test { parameter_types! { pub const MinimumPeriod: u64 = 1; } -impl pallet_timestamp::Trait for Test { +impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } -parameter_types! { - pub const SignedClaimHandicap: u64 = 2; - pub const TombstoneDeposit: u64 = 16; - pub const StorageSizeOffset: u32 = 8; - pub const RentByteFee: u64 = 4; - pub const RentDepositOffset: u64 = 10_000; - pub const SurchargeReward: u64 = 150; - pub const MaxDepth: u32 = 100; - pub const MaxValueSize: u32 = 16_384; +impl pallet_utility::Config for Test { + type Event = Event; + type Call = Call; + type WeightInfo = (); } - parameter_types! { + pub const ContractDeposit: u64 = 16; + pub const MaxValueSize: u32 = 16_384; + pub const DeletionQueueDepth: u32 = 1024; + pub const DeletionWeightLimit: Weight = 500_000_000_000; + pub const MaxCodeSize: u32 = 2 * 1024; + pub MySchedule: Schedule = >::default(); pub const TransactionByteFee: u64 = 0; } @@ -182,69 +262,55 @@ impl Convert> for Test { } } -impl Trait for Test { - type Time = Timestamp; - type Randomness = Randomness; - type Currency = Balances; - type DetermineContractAddress = DummyContractAddressFor; - type Event = MetaEvent; - type TrieIdGenerator = DummyTrieIdGenerator; - type RentPayment = (); - type SignedClaimHandicap = SignedClaimHandicap; - type TombstoneDeposit = TombstoneDeposit; - type StorageSizeOffset = StorageSizeOffset; - type RentByteFee = RentByteFee; - type RentDepositOffset = RentDepositOffset; - type SurchargeReward = SurchargeReward; - type MaxDepth = MaxDepth; - type MaxValueSize = MaxValueSize; - type WeightPrice = Self; - type WeightInfo = (); -} +/// A filter whose filter function can be swapped at runtime. +pub struct TestFilter; -type Balances = pallet_balances::Module; -type Timestamp = pallet_timestamp::Module; -type Contracts = Module; -type System = frame_system::Module; -type Randomness = pallet_randomness_collective_flip::Module; +thread_local! { + static CALL_FILTER: RefCell bool> = RefCell::new(|_| true); +} -pub struct DummyContractAddressFor; -impl ContractAddressFor for DummyContractAddressFor { - fn contract_address_for(_code_hash: &H256, _data: &[u8], origin: &u64) -> u64 { - *origin + 1 +impl TestFilter { + pub fn set_filter(filter: fn(&Call) -> bool) { + CALL_FILTER.with(|fltr| *fltr.borrow_mut() = filter); } } -pub struct DummyTrieIdGenerator; -impl TrieIdGenerator for DummyTrieIdGenerator { - fn trie_id(account_id: &u64) -> TrieId { - let new_seed = super::AccountCounter::mutate(|v| { - *v = v.wrapping_add(1); - *v - }); - - let mut res = vec![]; - res.extend_from_slice(&new_seed.to_le_bytes()); - res.extend_from_slice(&account_id.to_le_bytes()); - res +impl Contains for TestFilter { + fn contains(call: &Call) -> bool { + CALL_FILTER.with(|fltr| fltr.borrow()(call)) } } -const ALICE: u64 = 1; -const BOB: u64 = 2; -const CHARLIE: u64 = 3; -const DJANGO: u64 = 4; +impl Config for Test { + type Time = Timestamp; + type Randomness = Randomness; + type Currency = Balances; + type Event = Event; + type Call = Call; + type CallFilter = TestFilter; + type ContractDeposit = ContractDeposit; + type CallStack = [Frame; 31]; + type WeightPrice = Self; + type WeightInfo = (); + type ChainExtension = TestExtension; + type DeletionQueueDepth = DeletionQueueDepth; + type DeletionWeightLimit = DeletionWeightLimit; + type Schedule = MySchedule; +} + +pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); +pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); +pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -const GAS_LIMIT: Gas = 10_000_000_000; +const GAS_LIMIT: Weight = 10_000_000_000; pub struct ExtBuilder { existential_deposit: u64, } impl Default for ExtBuilder { fn default() -> Self { - Self { - existential_deposit: 1, - } + Self { existential_deposit: 1 } } } impl ExtBuilder { @@ -258,15 +324,9 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { self.set_associated_consts(); let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig { - current_schedule: Schedule:: { - enable_println: true, - ..Default::default() - }, - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![] } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -277,11 +337,9 @@ impl ExtBuilder { /// with it's hash. /// /// The fixture files are located under the `fixtures/` directory. -fn compile_module( - fixture_name: &str, -) -> wat::Result<(Vec, ::Output)> +fn compile_module(fixture_name: &str) -> wat::Result<(Vec, ::Output)> where - T: frame_system::Trait, + T: frame_system::Config, { let fixture_path = ["fixtures/", fixture_name, ".wat"].concat(); let wasm_binary = wat::parse_file(fixture_path)?; @@ -291,65 +349,53 @@ where // Perform a call to a plain account. // The actual transfer fails because we can only call contracts. -// Then we check that no gas was used because the base costs for calling are either charged -// as part of the `call` extrinsic or by `seal_call`. +// Then we check that at least the base costs where charged (no runtime gas costs.) #[test] fn calling_plain_account_fails() { ExtBuilder::default().build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 100_000_000); + let base_cost = <::WeightInfo as WeightInfo>::call(); assert_eq!( Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, Vec::new()), - Err( - DispatchErrorWithPostInfo { - error: Error::::NotCallable.into(), - post_info: PostDispatchInfo { - actual_weight: Some(0), - pays_fee: Default::default(), - }, - } - ) + Err(DispatchErrorWithPostInfo { + error: Error::::ContractNotFound.into(), + post_info: PostDispatchInfo { + actual_weight: Some(base_cost), + pays_fee: Default::default(), + }, + }) ); }); } #[test] fn account_removal_does_not_remove_storage() { - use self::test_utils::{set_storage, get_storage}; + use self::test_utils::{get_storage, set_storage}; ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let trie_id1 = ::TrieIdGenerator::trie_id(&1); - let trie_id2 = ::TrieIdGenerator::trie_id(&2); + let trie_id1 = test_utils::generate_trie_id(&ALICE); + let trie_id2 = test_utils::generate_trie_id(&BOB); let key1 = &[1; 32]; let key2 = &[2; 32]; // Set up two accounts with free balance above the existential threshold. { - let alice_contract_info = ContractInfo::Alive(RawAliveContractInfo { + let alice_contract_info = RawContractInfo { trie_id: trie_id1.clone(), - storage_size: 0, - empty_pair_count: 0, - total_pair_count: 0, - deduct_block: System::block_number(), code_hash: H256::repeat_byte(1), - rent_allowance: 40, - last_write: None, - }); + _reserved: None, + }; let _ = Balances::deposit_creating(&ALICE, 110); ContractInfoOf::::insert(ALICE, &alice_contract_info); set_storage(&ALICE, &key1, Some(b"1".to_vec())); set_storage(&ALICE, &key2, Some(b"2".to_vec())); - let bob_contract_info = ContractInfo::Alive(RawAliveContractInfo { + let bob_contract_info = RawContractInfo { trie_id: trie_id2.clone(), - storage_size: 0, - empty_pair_count: 0, - total_pair_count: 0, - deduct_block: System::block_number(), code_hash: H256::repeat_byte(2), - rent_allowance: 40, - last_write: None, - }); + _reserved: None, + }; let _ = Balances::deposit_creating(&BOB, 110); ContractInfoOf::::insert(BOB, &bob_contract_info); set_storage(&BOB, &key1, Some(b"3".to_vec())); @@ -367,23 +413,11 @@ fn account_removal_does_not_remove_storage() { // Verify that no entries are removed. { - assert_eq!( - get_storage(&ALICE, key1), - Some(b"1".to_vec()) - ); - assert_eq!( - get_storage(&ALICE, key2), - Some(b"2".to_vec()) - ); - - assert_eq!( - get_storage(&BOB, key1), - Some(b"3".to_vec()) - ); - assert_eq!( - get_storage(&BOB, key2), - Some(b"4".to_vec()) - ); + assert_eq!(get_storage(&ALICE, key1), Some(b"1".to_vec())); + assert_eq!(get_storage(&ALICE, key2), Some(b"2".to_vec())); + + assert_eq!(get_storage(&BOB, key1), Some(b"3".to_vec())); + assert_eq!(get_storage(&BOB, key2), Some(b"4".to_vec())); } }); } @@ -392,1317 +426,513 @@ fn account_removal_does_not_remove_storage() { fn instantiate_and_call_and_deposit_event() { let (wasm, code_hash) = compile_module::("return_from_start_fn").unwrap(); - ExtBuilder::default() - .existential_deposit(100) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let subsistence = super::Config::::subsistence_threshold_uncached(); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let subsistence = Pallet::::subsistence_threshold(); - // Check at the end to get hash on error easily - let creation = Contracts::instantiate( - Origin::signed(ALICE), - subsistence, - GAS_LIMIT, - code_hash.into(), - vec![], - ); + // Check at the end to get hash on error easily + let creation = Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + wasm, + vec![], + vec![], + ); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - pretty_assertions::assert_eq!(System::events(), vec![ + assert_eq!( + System::events(), + vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + event: Event::System(frame_system::Event::NewAccount(ALICE.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), + event: Event::Balances(pallet_balances::Event::Endowed(ALICE, 1_000_000)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), + event: Event::System(frame_system::Event::NewAccount(addr.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), + event: Event::Balances(pallet_balances::Event::Endowed( + addr.clone(), + subsistence * 100 + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(BOB, subsistence) - ), + event: Event::Balances(pallet_balances::Event::Transfer( + ALICE, + addr.clone(), + subsistence * 100 + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(ALICE, BOB, subsistence) - ), + event: Event::Contracts(crate::Event::CodeStored(code_hash.into())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::ContractExecution(BOB, vec![1, 2, 3, 4])), + event: Event::Contracts(crate::Event::ContractEmitted( + addr.clone(), + vec![1, 2, 3, 4] + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), + event: Event::Contracts(crate::Event::Instantiated(ALICE, addr.clone())), topics: vec![], - } - ]); + }, + ] + ); - assert_ok!(creation); - assert!(ContractInfoOf::::contains_key(BOB)); - }); + assert_ok!(creation); + assert!(ContractInfoOf::::contains_key(&addr)); + }); } #[test] fn deposit_event_max_value_limit() { let (wasm, code_hash) = compile_module::("event_size").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - code_hash.into(), - vec![], - )); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, >::max_value()); + // Call contract with allowed storage value. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT * 2, // we are copying a huge buffer, + ::Schedule::get().limits.payload_len.encode(), + )); - // Call contract with allowed storage value. - assert_ok!(Contracts::call( + // Call contract with too large a storage value. + assert_err_ignore_postinfo!( + Contracts::call( Origin::signed(ALICE), - BOB, + addr, 0, - GAS_LIMIT * 2, // we are copying a huge buffer, - ::MaxValueSize::get().encode(), - )); - - // Call contract with too large a storage value. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - (::MaxValueSize::get() + 1).encode(), - ), - Error::::ValueTooLarge, - ); - }); + GAS_LIMIT, + (::Schedule::get().limits.payload_len + 1).encode(), + ), + Error::::ValueTooLarge, + ); + }); } #[test] fn run_out_of_gas() { let (wasm, code_hash) = compile_module::("run_out_of_gas").unwrap(); + let subsistence = Pallet::::subsistence_threshold(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100 * subsistence, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - assert_ok!(Contracts::instantiate( + // Call the contract with a fixed gas limit. It must run out of gas because it just + // loops forever. + assert_err_ignore_postinfo!( + Contracts::call( Origin::signed(ALICE), - 100, - GAS_LIMIT, - code_hash.into(), + addr, // newly created account + 0, + 1_000_000_000_000, vec![], - )); - - // Call the contract with a fixed gas limit. It must run out of gas because it just - // loops forever. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - BOB, // newly created account - 0, - 67_500_000, - vec![], - ), - Error::::OutOfGas, - ); - }); -} - -/// Input data for each call in set_rent code -mod call { - pub fn set_storage_4_byte() -> Vec { vec![] } - pub fn remove_storage_4_byte() -> Vec { vec![0] } - pub fn transfer() -> Vec { vec![0, 0] } - pub fn null() -> Vec { vec![0, 0, 0] } + ), + Error::::OutOfGas, + ); + }); } -/// Test correspondence of set_rent code and its hash. -/// Also test that encoded extrinsic in code correspond to the correct transfer -#[test] -fn test_set_rent_code_and_hash() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); - assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - - let (wasm, code_hash) = compile_module::("set_rent").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - // If you ever need to update the wasm source this test will fail - // and will show you the actual hash. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - ]); - }); +fn initialize_block(number: u64) { + System::initialize(&number, &[0u8; 32].into(), &Default::default(), Default::default()); } #[test] -fn storage_size() { - let (wasm, code_hash) = compile_module::("set_rent").unwrap(); - - // Storage size - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - let bob_contract = ContractInfoOf::::get(BOB) - .unwrap() - .get_alive() - .unwrap(); - assert_eq!( - bob_contract.storage_size, - 4 - ); - assert_eq!( - bob_contract.total_pair_count, - 1, - ); - assert_eq!( - bob_contract.empty_pair_count, - 0, - ); +fn storage_max_value_limit() { + let (wasm, code_hash) = compile_module::("storage_size").unwrap(); - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - call::set_storage_4_byte() - )); - let bob_contract = ContractInfoOf::::get(BOB) - .unwrap() - .get_alive() - .unwrap(); - assert_eq!( - bob_contract.storage_size, - 4 + 4 - ); - assert_eq!( - bob_contract.total_pair_count, - 2, - ); - assert_eq!( - bob_contract.empty_pair_count, - 0, - ); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + ContractInfoOf::::get(&addr).unwrap(); + + // Call contract with allowed storage value. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT * 2, // we are copying a huge buffer + ::Schedule::get().limits.payload_len.encode(), + )); - assert_ok!(Contracts::call( + // Call contract with too large a storage value. + assert_err_ignore_postinfo!( + Contracts::call( Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - call::remove_storage_4_byte() - )); - let bob_contract = ContractInfoOf::::get(BOB) - .unwrap() - .get_alive() - .unwrap(); - assert_eq!( - bob_contract.storage_size, - 4 - ); - assert_eq!( - bob_contract.total_pair_count, - 1, - ); - assert_eq!( - bob_contract.empty_pair_count, + addr, 0, - ); - }); -} - -#[test] -fn empty_kv_pairs() { - let (wasm, code_hash) = compile_module::("set_empty_storage").unwrap(); - - ExtBuilder::default() - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, GAS_LIMIT, - code_hash.into(), - vec![], - )); - let bob_contract = ContractInfoOf::::get(BOB) - .unwrap() - .get_alive() - .unwrap(); - - assert_eq!( - bob_contract.storage_size, - 0, - ); - assert_eq!( - bob_contract.total_pair_count, - 1, - ); - assert_eq!( - bob_contract.empty_pair_count, - 1, - ); - }); -} - -fn initialize_block(number: u64) { - System::initialize( - &number, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - Default::default(), - ); -} - -#[test] -fn deduct_blocks() { - let (wasm, code_hash) = compile_module::("set_rent").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000); - - // Advance 4 blocks - initialize_block(5); - - // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); - - // Check result - let rent = (8 + 4 - 3) // storage size = size_offset + deploy_set_storage - deposit_offset - * 4 // rent byte price - * 4; // blocks to rent - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000 - rent); - assert_eq!(bob_contract.deduct_block, 5); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent); - - // Advance 7 blocks more - initialize_block(12); - - // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); - - // Check result - let rent_2 = (8 + 4 - 2) // storage size = size_offset + deploy_set_storage - deposit_offset - * 4 // rent byte price - * 7; // blocks to rent - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); - assert_eq!(bob_contract.deduct_block, 12); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); - - // Second call on same block should have no effect on rent - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); - - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); - assert_eq!(bob_contract.deduct_block, 12); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); - }); -} - -#[test] -fn call_contract_removals() { - removals(|| { - // Call on already-removed account might fail, and this is fine. - let _ = Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()); - true + (::Schedule::get().limits.payload_len + 1).encode(), + ), + Error::::ValueTooLarge, + ); }); } #[test] -fn inherent_claim_surcharge_contract_removals() { - removals(|| Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok()); -} +fn deploy_and_call_other_contract() { + let (callee_wasm, callee_code_hash) = compile_module::("return_with_data").unwrap(); + let (caller_wasm, caller_code_hash) = compile_module::("caller_contract").unwrap(); -#[test] -fn signed_claim_surcharge_contract_removals() { - removals(|| Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok()); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + caller_wasm, + vec![], + vec![], + )); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + callee_wasm, + 0u32.to_le_bytes().encode(), + vec![42], + )); + + // Call BOB contract, which attempts to instantiate and call the callee contract and + // makes various assertions on the results from those calls. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + Contracts::contract_address(&ALICE, &caller_code_hash, &[]), + 0, + GAS_LIMIT, + callee_code_hash.as_ref().to_vec(), + )); + }); } #[test] -fn claim_surcharge_malus() { - // Test surcharge malus for inherent - claim_surcharge(4, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(3, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(2, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(1, || Contracts::claim_surcharge(Origin::none(), BOB, Some(ALICE)).is_ok(), false); - - // Test surcharge malus for signed - claim_surcharge(4, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), true); - claim_surcharge(3, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); - claim_surcharge(2, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); - claim_surcharge(1, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); -} - -/// Claim surcharge with the given trigger_call at the given blocks. -/// If `removes` is true then assert that the contract is a tombstone. -fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) { - let (wasm, code_hash) = compile_module::("set_rent").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - // Advance blocks - initialize_block(blocks); - - // Trigger rent through call - assert!(trigger_call()); - - if removes { - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - } else { - assert!(ContractInfoOf::::get(BOB).unwrap().get_alive().is_some()); - } - }); -} - -/// Test for all kind of removals for the given trigger: -/// * if balance is reached and balance > subsistence threshold -/// * if allowance is exceeded -/// * if balance is reached and balance < subsistence threshold -/// * this case cannot be triggered by a contract: we check whether a tombstone is left -fn removals(trigger_call: impl Fn() -> bool) { - let (wasm, code_hash) = compile_module::("set_rent").unwrap(); - - // Balance reached and superior to subsistence threshold - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm.clone())); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - let subsistence_threshold = 50 /*existential_deposit*/ + 16 /*tombstone_deposit*/; - - // Trigger rent must have no effect - assert!(trigger_call()); - assert_eq!(ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap().rent_allowance, 1_000); - assert_eq!(Balances::free_balance(BOB), 100); - - // Advance blocks - initialize_block(10); - - // Trigger rent through call - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); - - // Advance blocks - initialize_block(20); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); - }); +fn cannot_self_destruct_through_draning() { + let (wasm, code_hash) = compile_module::("drain").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); - // Allowance exceeded - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm.clone())); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 1_000, - GAS_LIMIT, - code_hash.into(), - ::Balance::from(100u32).encode() // rent allowance - )); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert_eq!( - ContractInfoOf::::get(BOB) - .unwrap() - .get_alive() - .unwrap() - .rent_allowance, - 100 - ); - assert_eq!(Balances::free_balance(BOB), 1_000); - - // Advance blocks - initialize_block(10); - - // Trigger rent through call - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB) - .unwrap() - .get_tombstone() - .is_some()); - // Balance should be initial balance - initial rent_allowance - assert_eq!(Balances::free_balance(BOB), 900); - - // Advance blocks - initialize_block(20); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB) - .unwrap() - .get_tombstone() - .is_some()); - assert_eq!(Balances::free_balance(BOB), 900); - }); + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Balance reached and inferior to subsistence threshold - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let subsistence_threshold = - Balances::minimum_balance() + ::TombstoneDeposit::get(); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm.clone())); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 50 + subsistence_threshold, - GAS_LIMIT, - code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert_eq!( - ContractInfoOf::::get(BOB) - .unwrap() - .get_alive() - .unwrap() - .rent_allowance, - 1_000 - ); - assert_eq!( - Balances::free_balance(BOB), - 50 + subsistence_threshold, - ); - - // Transfer funds - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - call::transfer() - )); - assert_eq!( - ContractInfoOf::::get(BOB) - .unwrap() - .get_alive() - .unwrap() - .rent_allowance, - 1_000 - ); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); - - // Advance blocks - initialize_block(10); - - // Trigger rent through call - assert!(trigger_call()); - assert_matches!(ContractInfoOf::::get(BOB), Some(ContractInfo::Tombstone(_))); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); - - // Advance blocks - initialize_block(20); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert_matches!(ContractInfoOf::::get(BOB), Some(ContractInfo::Tombstone(_))); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); - }); -} + // Check that the BOB contract has been instantiated. + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); -#[test] -fn call_removed_contract() { - let (wasm, code_hash) = compile_module::("set_rent").unwrap(); - - // Balance reached and superior to subsistence threshold - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm.clone())); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - GAS_LIMIT, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - // Calling contract should succeed. - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); - - // Advance blocks - initialize_block(10); - - // Calling contract should remove contract and fail. - assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), - Error::::NotCallable - ); - // Calling a contract that is about to evict shall emit an event. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), - topics: vec![], - }, - ]); - - // Subsequent contract calls should also fail. - assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), - Error::::NotCallable - ); - }) + // Call BOB which makes it send all funds to the zero address + // The contract code asserts that the correct error value is returned. + assert_ok!(Contracts::call(Origin::signed(ALICE), addr, 0, GAS_LIMIT, vec![])); + }); } #[test] -fn default_rent_allowance_on_instantiate() { - let (wasm, code_hash) = compile_module::("check_default_rent_allowance").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - code_hash.into(), - vec![], - )); +fn cannot_self_destruct_while_live() { + let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, >::max_value()); + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Advance blocks - initialize_block(5); + // Check that the BOB contract has been instantiated. + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); - // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null())); + // Call BOB with input data, forcing it make a recursive call to itself to + // self-destruct, resulting in a trap. + assert_err_ignore_postinfo!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![0],), + Error::::ContractTrapped, + ); - // Check contract is still alive - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive(); - assert!(bob_contract.is_some()) - }); + // Check that BOB is still there. + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); + }); } #[test] -fn restorations_dirty_storage_and_different_storage() { - restoration(true, true); -} +fn self_destruct_works() { + let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = Balances::deposit_creating(&DJANGO, 1_000_000); -#[test] -fn restorations_dirty_storage() { - restoration(false, true); -} + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); -#[test] -fn restoration_different_storage() { - restoration(true, false); -} + // Check that the BOB contract has been instantiated. + assert_matches!(ContractInfoOf::::get(&addr), Some(_)); -#[test] -fn restoration_success() { - restoration(false, false); -} + // Drop all previous events + initialize_block(2); -fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: bool) { - let (set_rent_wasm, set_rent_code_hash) = compile_module::("set_rent").unwrap(); - let (restoration_wasm, restoration_code_hash) = compile_module::("restoration").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), restoration_wasm)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), set_rent_wasm)); - - // If you ever need to update the wasm source this test will fail - // and will show you the actual hash. - assert_eq!(System::events(), vec![ + // Call BOB without input data which triggers termination. + assert_matches!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],), + Ok(_) + ); + + pretty_assertions::assert_eq!( + System::events(), + vec![ EventRecord { phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + event: Event::System(frame_system::Event::KilledAccount(addr.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), + event: Event::Balances(pallet_balances::Event::Transfer( + addr.clone(), + DJANGO, + 100_000, + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(restoration_code_hash.into())), + event: Event::Contracts(crate::Event::CodeRemoved(code_hash)), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(set_rent_code_hash.into())), + event: Event::Contracts(crate::Event::Terminated(addr.clone(), DJANGO)), topics: vec![], }, - ]); + ], + ); - // Create an account with address `BOB` with code `CODE_SET_RENT`. - // The input parameter sets the rent allowance to 0. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - set_rent_code_hash.into(), - ::Balance::from(0u32).encode() - )); - - // Check if `BOB` was created successfully and that the rent allowance is - // set to 0. - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 0); - - if test_different_storage { - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, 0, GAS_LIMIT, - call::set_storage_4_byte()) - ); - } + // Check that account is gone + assert!(ContractInfoOf::::get(&addr).is_none()); - // Advance 4 blocks, to the 5th. - initialize_block(5); - - // Call `BOB`, which makes it pay rent. Since the rent allowance is set to 0 - // we expect that it will get removed leaving tombstone. - assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, call::null()), - Error::::NotCallable - ); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts( - RawEvent::Evicted(BOB.clone(), true) - ), - topics: vec![], - }, - ]); - - // Create another account with the address `DJANGO` with `CODE_RESTORATION`. - // - // Note that we can't use `ALICE` for creating `DJANGO` so we create yet another - // account `CHARLIE` and create `DJANGO` with it. - let _ = Balances::deposit_creating(&CHARLIE, 1_000_000); - assert_ok!(Contracts::instantiate( - Origin::signed(CHARLIE), - 30_000, - GAS_LIMIT, - restoration_code_hash.into(), - ::Balance::from(0u32).encode() - )); - - // Before performing a call to `DJANGO` save its original trie id. - let django_trie_id = ContractInfoOf::::get(DJANGO).unwrap() - .get_alive().unwrap().trie_id; - - if !test_restore_to_with_dirty_storage { - // Advance 1 block, to the 6th. - initialize_block(6); - } - - // Perform a call to `DJANGO`. This should either perform restoration successfully or - // fail depending on the test parameters. - let perform_the_restoration = || { - Contracts::call( - Origin::signed(ALICE), - DJANGO, - 0, - GAS_LIMIT, - set_rent_code_hash.as_ref().to_vec(), - ) - }; - - if test_different_storage || test_restore_to_with_dirty_storage { - // Parametrization of the test imply restoration failure. Check that `DJANGO` aka - // restoration contract is still in place and also that `BOB` doesn't exist. - - assert_err_ignore_postinfo!( - perform_the_restoration(), - Error::::ContractTrapped, - ); - - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - let django_contract = ContractInfoOf::::get(DJANGO).unwrap() - .get_alive().unwrap(); - assert_eq!(django_contract.storage_size, 8); - assert_eq!(django_contract.trie_id, django_trie_id); - assert_eq!(django_contract.deduct_block, System::block_number()); - match (test_different_storage, test_restore_to_with_dirty_storage) { - (true, false) => { - assert_eq!(System::events(), vec![]); - } - (_, true) => { - pretty_assertions::assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(CHARLIE)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(CHARLIE, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(DJANGO)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(DJANGO, 30_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(CHARLIE, DJANGO, 30_000) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(CHARLIE, DJANGO)), - topics: vec![], - }, - ]); - } - _ => unreachable!(), - } - } else { - assert_ok!(perform_the_restoration()); - - // Here we expect that the restoration is succeeded. Check that the restoration - // contract `DJANGO` ceased to exist and that `BOB` returned back. - println!("{:?}", ContractInfoOf::::get(BOB)); - let bob_contract = ContractInfoOf::::get(BOB).unwrap() - .get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 50); - assert_eq!(bob_contract.storage_size, 4); - assert_eq!(bob_contract.trie_id, django_trie_id); - assert_eq!(bob_contract.deduct_block, System::block_number()); - assert!(ContractInfoOf::::get(DJANGO).is_none()); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(system::RawEvent::KilledAccount(DJANGO)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts( - RawEvent::Restored(DJANGO, BOB, bob_contract.code_hash, 50) - ), - topics: vec![], - }, - ]); - } - }); + // check that the beneficiary (django) got remaining balance + // some rent was deducted before termination + assert_eq!(Balances::free_balance(DJANGO), 1_000_000 + 100_000); + }); } +// This tests that one contract cannot prevent another from self-destructing by sending it +// additional funds after it has been drained. #[test] -fn storage_max_value_limit() { - let (wasm, code_hash) = compile_module::("storage_size").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - code_hash.into(), - vec![], - )); - - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, >::max_value()); - - // Call contract with allowed storage value. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT * 2, // we are copying a huge buffer - ::MaxValueSize::get().encode(), - )); - - // Call contract with too large a storage value. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - (::MaxValueSize::get() + 1).encode(), - ), - Error::::ValueTooLarge, - ); - }); -} +fn destroy_contract_and_transfer_funds() { + let (callee_wasm, callee_code_hash) = compile_module::("self_destruct").unwrap(); + let (caller_wasm, caller_code_hash) = compile_module::("destroy_and_transfer").unwrap(); -#[test] -fn deploy_and_call_other_contract() { - let (callee_wasm, callee_code_hash) = compile_module::("return_with_data").unwrap(); - let (caller_wasm, caller_code_hash) = compile_module::("caller_contract").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 200_000, + GAS_LIMIT, + callee_wasm, + vec![], + vec![42] + )); + + // This deploys the BOB contract, which in turn deploys the CHARLIE contract during + // construction. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 200_000, + GAS_LIMIT, + caller_wasm, + callee_code_hash.as_ref().to_vec(), + vec![], + )); + let addr_bob = Contracts::contract_address(&ALICE, &caller_code_hash, &[]); + let addr_charlie = Contracts::contract_address(&addr_bob, &callee_code_hash, &[0x47, 0x11]); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_wasm)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_wasm)); + // Check that the CHARLIE contract has been instantiated. + assert_matches!(ContractInfoOf::::get(&addr_charlie), Some(_)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - caller_code_hash.into(), - vec![], - )); + // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr_bob, + 0, + GAS_LIMIT, + addr_charlie.encode(), + )); - // Call BOB contract, which attempts to instantiate and call the callee contract and - // makes various assertions on the results from those calls. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - callee_code_hash.as_ref().to_vec(), - )); - }); + // Check that CHARLIE has moved on to the great beyond (ie. died). + assert!(ContractInfoOf::::get(&addr_charlie).is_none()); + }); } #[test] -fn cannot_self_destruct_through_draning() { - let (wasm, code_hash) = compile_module::("drain").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - code_hash.into(), - vec![], - )); - - // Check that the BOB contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(BOB), - Some(ContractInfo::Alive(_)) - ); - - // Call BOB which makes it send all funds to the zero address - // The contract code asserts that the correct error value is returned. - assert_ok!( - Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - vec![], - ) - ); - }); -} +fn cannot_self_destruct_in_constructor() { + let (wasm, _) = compile_module::("self_destructing_constructor").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); -#[test] -fn cannot_self_destruct_while_live() { - let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate( + // Fail to instantiate the BOB because the contructor calls seal_terminate. + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( Origin::signed(ALICE), 100_000, GAS_LIMIT, - code_hash.into(), + wasm, vec![], - )); - - // Check that the BOB contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(BOB), - Some(ContractInfo::Alive(_)) - ); - - // Call BOB with input data, forcing it make a recursive call to itself to - // self-destruct, resulting in a trap. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - vec![0], - ), - Error::::ContractTrapped, - ); - - // Check that BOB is still alive. - assert_matches!( - ContractInfoOf::::get(BOB), - Some(ContractInfo::Alive(_)) - ); - }); -} - -#[test] -fn self_destruct_works() { - let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - code_hash.into(), vec![], - )); - - // Check that the BOB contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(BOB), - Some(ContractInfo::Alive(_)) - ); - - // Call BOB without input data which triggers termination. - assert_matches!( - Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - vec![], - ), - Ok(_) - ); - - // Check that account is gone - assert!(ContractInfoOf::::get(BOB).is_none()); - - // check that the beneficiary (django) got remaining balance - assert_eq!(Balances::free_balance(DJANGO), 100_000); - }); -} - -// This tests that one contract cannot prevent another from self-destructing by sending it -// additional funds after it has been drained. -#[test] -fn destroy_contract_and_transfer_funds() { - let (callee_wasm, callee_code_hash) = compile_module::("self_destruct").unwrap(); - let (caller_wasm, caller_code_hash) = compile_module::("destroy_and_transfer").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_wasm)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_wasm)); - - // This deploys the BOB contract, which in turn deploys the CHARLIE contract during - // construction. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 200_000, - GAS_LIMIT, - caller_code_hash.into(), - callee_code_hash.as_ref().to_vec(), - )); - - // Check that the CHARLIE contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(CHARLIE), - Some(ContractInfo::Alive(_)) - ); - - // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - GAS_LIMIT, - CHARLIE.encode(), - )); - - // Check that CHARLIE has moved on to the great beyond (ie. died). - assert!(ContractInfoOf::::get(CHARLIE).is_none()); - }); -} - -#[test] -fn cannot_self_destruct_in_constructor() { - let (wasm, code_hash) = compile_module::("self_destructing_constructor").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - // Fail to instantiate the BOB because the contructor calls seal_terminate. - assert_err_ignore_postinfo!( - Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - code_hash.into(), - vec![], - ), - Error::::NewContractNotFunded, - ); - }); + ), + Error::::TerminatedInConstructor, + ); + }); } #[test] fn crypto_hashes() { let (wasm, code_hash) = compile_module::("crypto_hashes").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); - // Instantiate the CRYPTO_HASHES contract. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - code_hash.into(), - vec![], - )); - // Perform the call. - let input = b"_DEAD_BEEF"; - use sp_io::hashing::*; - // Wraps a hash function into a more dynamic form usable for testing. - macro_rules! dyn_hash_fn { - ($name:ident) => { - Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) - }; - } - // All hash functions and their associated output byte lengths. - let test_cases: &[(Box Box<[u8]>>, usize)] = &[ - (dyn_hash_fn!(sha2_256), 32), - (dyn_hash_fn!(keccak_256), 32), - (dyn_hash_fn!(blake2_256), 32), - (dyn_hash_fn!(blake2_128), 16), - ]; - // Test the given hash functions for the input: "_DEAD_BEEF" - for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { - // We offset data in the contract tables by 1. - let mut params = vec![(n + 1) as u8]; - params.extend_from_slice(input); - let result = >::bare_call( - ALICE, - BOB, - 0, - GAS_LIMIT, - params, - ).0.unwrap(); - assert!(result.is_success()); - let expected = hash_fn(input.as_ref()); - assert_eq!(&result.data[..*expected_size], &*expected); - } - }) + // Instantiate the CRYPTO_HASHES contract. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + // Perform the call. + let input = b"_DEAD_BEEF"; + use sp_io::hashing::*; + // Wraps a hash function into a more dynamic form usable for testing. + macro_rules! dyn_hash_fn { + ($name:ident) => { + Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) + }; + } + // All hash functions and their associated output byte lengths. + let test_cases: &[(Box Box<[u8]>>, usize)] = &[ + (dyn_hash_fn!(sha2_256), 32), + (dyn_hash_fn!(keccak_256), 32), + (dyn_hash_fn!(blake2_256), 32), + (dyn_hash_fn!(blake2_128), 16), + ]; + // Test the given hash functions for the input: "_DEAD_BEEF" + for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { + // We offset data in the contract tables by 1. + let mut params = vec![(n + 1) as u8]; + params.extend_from_slice(input); + let result = + >::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, params, false) + .result + .unwrap(); + assert!(result.is_success()); + let expected = hash_fn(input.as_ref()); + assert_eq!(&result.data[..*expected_size], &*expected); + } + }) } #[test] fn transfer_return_code() { let (wasm, code_hash) = compile_module::("transfer_return_code").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Config::::subsistence_threshold_uncached(); - let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), wasm)); - - assert_ok!( - Contracts::instantiate( - Origin::signed(ALICE), - subsistence, - GAS_LIMIT, - code_hash.into(), - vec![], - ), - ); + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); - // Contract has only the minimal balance so any transfer will return BelowSubsistence. - let result = Contracts::bare_call( - ALICE, - BOB, - 0, + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, GAS_LIMIT, + wasm, + vec![], vec![], - ).0.unwrap(); + ),); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + + // Contract has only the minimal balance so any transfer will return BelowSubsistence. + Balances::make_free_balance_be(&addr, subsistence); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![], false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence // threshold when transfering 100 balance but this balance is reserved so // the transfer still fails but with another return code. - Balances::make_free_balance_be(&BOB, subsistence + 100); - Balances::reserve(&BOB, subsistence + 100).unwrap(); - let result = Contracts::bare_call( - ALICE, - BOB, - 0, - GAS_LIMIT, - vec![], - ).0.unwrap(); + Balances::make_free_balance_be(&addr, subsistence + 100); + Balances::reserve(&addr, subsistence + 100).unwrap(); + let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, vec![], false).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); }); } @@ -1712,87 +942,117 @@ fn call_return_code() { let (caller_code, caller_hash) = compile_module::("call_return_code").unwrap(); let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Config::::subsistence_threshold_uncached(); - let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); - let _ = Balances::deposit_creating(&CHARLIE, 10 * subsistence); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_code)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_code)); + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate( - Origin::signed(ALICE), - subsistence, - GAS_LIMIT, - caller_hash.into(), - vec![0], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + caller_code, + vec![0], + vec![], + ),); + let addr_bob = Contracts::contract_address(&ALICE, &caller_hash, &[]); + Balances::make_free_balance_be(&addr_bob, subsistence); // Contract calls into Django which is no valid contract let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![0], - ).0.unwrap(); + AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), + false, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::NotCallable); - assert_ok!( - Contracts::instantiate( - Origin::signed(CHARLIE), - subsistence, - GAS_LIMIT, - callee_hash.into(), - vec![0], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(CHARLIE), + subsistence * 100, + GAS_LIMIT, + callee_code, + vec![0], + vec![], + ),); + let addr_django = Contracts::contract_address(&CHARLIE, &callee_hash, &[]); + Balances::make_free_balance_be(&addr_django, subsistence); // Contract has only the minimal balance so any transfer will return BelowSubsistence. let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![0], - ).0.unwrap(); + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&0u32.to_le_bytes()) + .cloned() + .collect(), + false, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence // threshold when transfering 100 balance but this balance is reserved so // the transfer still fails but with another return code. - Balances::make_free_balance_be(&BOB, subsistence + 100); - Balances::reserve(&BOB, subsistence + 100).unwrap(); + Balances::make_free_balance_be(&addr_bob, subsistence + 100); + Balances::reserve(&addr_bob, subsistence + 100).unwrap(); let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![0], - ).0.unwrap(); + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&0u32.to_le_bytes()) + .cloned() + .collect(), + false, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but callee reverts because "1" is passed. - Balances::make_free_balance_be(&BOB, subsistence + 1000); + Balances::make_free_balance_be(&addr_bob, subsistence + 1000); let result = Contracts::bare_call( ALICE, - BOB, + addr_bob.clone(), 0, GAS_LIMIT, - vec![1], - ).0.unwrap(); + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&1u32.to_le_bytes()) + .cloned() + .collect(), + false, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. let result = Contracts::bare_call( ALICE, - BOB, + addr_bob, 0, GAS_LIMIT, - vec![2], - ).0.unwrap(); + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&2u32.to_le_bytes()) + .cloned() + .collect(), + false, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); - }); } @@ -1801,77 +1061,792 @@ fn instantiate_return_code() { let (caller_code, caller_hash) = compile_module::("instantiate_return_code").unwrap(); let (callee_code, callee_hash) = compile_module::("ok_trap_revert").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let subsistence = Config::::subsistence_threshold_uncached(); - let _ = Balances::deposit_creating(&ALICE, 10 * subsistence); - let _ = Balances::deposit_creating(&CHARLIE, 10 * subsistence); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), caller_code)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), callee_code)); + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); let callee_hash = callee_hash.as_ref().to_vec(); - assert_ok!( - Contracts::instantiate( - Origin::signed(ALICE), - subsistence, - GAS_LIMIT, - caller_hash.into(), - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + callee_code, + vec![], + vec![], + ),); - // Contract has only the minimal balance so any transfer will return BelowSubsistence. - let result = Contracts::bare_call( - ALICE, - BOB, - 0, + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, GAS_LIMIT, - vec![0; 33], - ).0.unwrap(); + caller_code, + vec![], + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &caller_hash, &[]); + + // Contract has only the minimal balance so any transfer will return BelowSubsistence. + Balances::make_free_balance_be(&addr, subsistence); + let result = + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, callee_hash.clone(), false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence - // threshold when transfering 100 balance but this balance is reserved so + // threshold when transfering the balance but this balance is reserved so // the transfer still fails but with another return code. - Balances::make_free_balance_be(&BOB, subsistence + 100); - Balances::reserve(&BOB, subsistence + 100).unwrap(); - let result = Contracts::bare_call( - ALICE, - BOB, - 0, - GAS_LIMIT, - vec![0; 33], - ).0.unwrap(); + Balances::make_free_balance_be(&addr, subsistence + 10_000); + Balances::reserve(&addr, subsistence + 10_000).unwrap(); + let result = + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, callee_hash.clone(), false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but the passed code hash is invalid - Balances::make_free_balance_be(&BOB, subsistence + 1000); - let result = Contracts::bare_call( - ALICE, - BOB, - 0, - GAS_LIMIT, - vec![0; 33], - ).0.unwrap(); + Balances::make_free_balance_be(&addr, subsistence + 10_000); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![0; 33], false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. let result = Contracts::bare_call( ALICE, - BOB, + addr.clone(), 0, GAS_LIMIT, - callee_hash.iter().cloned().chain(sp_std::iter::once(1)).collect(), - ).0.unwrap(); + callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), + false, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. let result = Contracts::bare_call( ALICE, - BOB, + addr, 0, GAS_LIMIT, - callee_hash.iter().cloned().chain(sp_std::iter::once(2)).collect(), - ).0.unwrap(); + callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), + false, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); + }); +} +#[test] +fn disabled_chain_extension_wont_deploy() { + let (code, _hash) = compile_module::("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + TestExtension::disable(); + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + Origin::signed(ALICE), + 3 * subsistence, + GAS_LIMIT, + code, + vec![], + vec![], + ), + "module uses chain extensions but chain extensions are disabled", + ); }); } + +#[test] +fn disabled_chain_extension_errors_on_call() { + let (code, hash) = compile_module::("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + TestExtension::disable(); + assert_err_ignore_postinfo!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],), + Error::::NoChainExtension, + ); + }); +} + +#[test] +fn chain_extension_works() { + let (code, hash) = compile_module::("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + + // The contract takes a up to 2 byte buffer where the first byte passed is used as + // as func_id to the chain extension which behaves differently based on the + // func_id. + + // 0 = read input buffer and pass it through as output + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![0, 99], false); + let gas_consumed = result.gas_consumed; + assert_eq!(TestExtension::last_seen_buffer(), vec![0, 99]); + assert_eq!(result.result.unwrap().data, Bytes(vec![0, 99])); + + // 1 = treat inputs as integer primitives and store the supplied integers + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![1], false) + .result + .unwrap(); + // those values passed in the fixture + assert_eq!(TestExtension::last_seen_inputs(), (4, 1, 16, 12)); + + // 2 = charge some extra weight (amount supplied in second byte) + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![2, 42], false); + assert_ok!(result.result); + assert_eq!(result.gas_consumed, gas_consumed + 42); + + // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![3], false) + .result + .unwrap(); + assert_eq!(result.flags, ReturnFlags::REVERT); + assert_eq!(result.data, Bytes(vec![42, 99])); + }); +} + +#[test] +fn lazy_removal_works() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + let info = >::get(&addr).unwrap(); + let trie = &info.child_trie_info(); + + // Put value into the contracts child trie + child::put(trie, &[99], &42); + + // Terminate the contract + assert_ok!(Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![])); + + // Contract info should be gone + assert!(!>::contains_key(&addr)); + + // But value should be still there as the lazy removal did not run, yet. + assert_matches!(child::get(trie, &[99]), Some(42)); + + // Run the lazy removal + Contracts::on_initialize(Weight::max_value()); + + // Value should be gone now + assert_matches!(child::get::(trie, &[99]), None); + }); +} + +#[test] +fn lazy_removal_partial_remove_works() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + + // We create a contract with some extra keys above the weight limit + let extra_keys = 7u32; + let weight_limit = 5_000_000_000; + let (_, max_keys) = Storage::::deletion_budget(1, weight_limit); + let vals: Vec<_> = (0..max_keys + extra_keys) + .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) + .collect(); + + let mut ext = ExtBuilder::default().existential_deposit(50).build(); + + let trie = ext.execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + let mut info = >::get(&addr).unwrap(); + + // Put value into the contracts child trie + for val in &vals { + Storage::::write(&mut info, &val.0, Some(val.2.clone())).unwrap(); + } + >::insert(&addr, info.clone()); + + // Terminate the contract + assert_ok!(Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![])); + + // Contract info should be gone + assert!(!>::contains_key(&addr)); + + let trie = info.child_trie_info(); + + // But value should be still there as the lazy removal did not run, yet. + for val in &vals { + assert_eq!(child::get::(&trie, &blake2_256(&val.0)), Some(val.1)); + } + + trie.clone() + }); + + // The lazy removal limit only applies to the backend but not to the overlay. + // This commits all keys from the overlay to the backend. + ext.commit_all().unwrap(); + + ext.execute_with(|| { + // Run the lazy removal + let weight_used = Storage::::process_deletion_queue_batch(weight_limit); + + // Weight should be exhausted because we could not even delete all keys + assert_eq!(weight_used, weight_limit); + + let mut num_deleted = 0u32; + let mut num_remaining = 0u32; + + for val in &vals { + match child::get::(&trie, &blake2_256(&val.0)) { + None => num_deleted += 1, + Some(x) if x == val.1 => num_remaining += 1, + Some(_) => panic!("Unexpected value in contract storage"), + } + } + + // All but one key is removed + assert_eq!(num_deleted + num_remaining, vals.len() as u32); + assert_eq!(num_deleted, max_keys); + assert_eq!(num_remaining, extra_keys); + }); +} + +#[test] +fn lazy_removal_does_no_run_on_full_block() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + let mut info = >::get(&addr).unwrap(); + let max_keys = 30; + + // Create some storage items for the contract. + let vals: Vec<_> = (0..max_keys) + .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) + .collect(); + + // Put value into the contracts child trie + for val in &vals { + Storage::::write(&mut info, &val.0, Some(val.2.clone())).unwrap(); + } + >::insert(&addr, info.clone()); + + // Terminate the contract + assert_ok!(Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![])); + + // Contract info should be gone + assert!(!>::contains_key(&addr)); + + let trie = info.child_trie_info(); + + // But value should be still there as the lazy removal did not run, yet. + for val in &vals { + assert_eq!(child::get::(&trie, &blake2_256(&val.0)), Some(val.1)); + } + + // Fill up the block which should prevent the lazy storage removal from running. + System::register_extra_weight_unchecked( + ::BlockWeights::get().max_block, + DispatchClass::Mandatory, + ); + + // Run the lazy removal without any limit so that all keys would be removed if there + // had been some weight left in the block. + let weight_used = Contracts::on_initialize(Weight::max_value()); + let base = <::WeightInfo as WeightInfo>::on_initialize(); + assert_eq!(weight_used, base); + + // All the keys are still in place + for val in &vals { + assert_eq!(child::get::(&trie, &blake2_256(&val.0)), Some(val.1)); + } + + // Run the lazy removal directly which disregards the block limits + Storage::::process_deletion_queue_batch(Weight::max_value()); + + // Now the keys should be gone + for val in &vals { + assert_eq!(child::get::(&trie, &blake2_256(&val.0)), None); + } + }); +} + +#[test] +fn lazy_removal_does_not_use_all_weight() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + + let weight_limit = 5_000_000_000; + let mut ext = ExtBuilder::default().existential_deposit(50).build(); + + let (trie, vals, weight_per_key) = ext.execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + let mut info = >::get(&addr).unwrap(); + let (weight_per_key, max_keys) = Storage::::deletion_budget(1, weight_limit); + + // We create a contract with one less storage item than we can remove within the limit + let vals: Vec<_> = (0..max_keys - 1) + .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) + .collect(); + + // Put value into the contracts child trie + for val in &vals { + Storage::::write(&mut info, &val.0, Some(val.2.clone())).unwrap(); + } + >::insert(&addr, info.clone()); + + // Terminate the contract + assert_ok!(Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![])); + + // Contract info should be gone + assert!(!>::contains_key(&addr)); + + let trie = info.child_trie_info(); + + // But value should be still there as the lazy removal did not run, yet. + for val in &vals { + assert_eq!(child::get::(&trie, &blake2_256(&val.0)), Some(val.1)); + } + + (trie, vals, weight_per_key) + }); + + // The lazy removal limit only applies to the backend but not to the overlay. + // This commits all keys from the overlay to the backend. + ext.commit_all().unwrap(); + + ext.execute_with(|| { + // Run the lazy removal + let weight_used = Storage::::process_deletion_queue_batch(weight_limit); + + // We have one less key in our trie than our weight limit suffices for + assert_eq!(weight_used, weight_limit - weight_per_key); + + // All the keys are removed + for val in vals { + assert_eq!(child::get::(&trie, &blake2_256(&val.0)), None); + } + }); +} + +#[test] +fn deletion_queue_full() { + let (code, hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); + + let addr = Contracts::contract_address(&ALICE, &hash, &[]); + + // fill the deletion queue up until its limit + Storage::::fill_queue_with_dummies(); + + // Terminate the contract should fail + assert_err_ignore_postinfo!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],), + Error::::DeletionQueueFull, + ); + + // Contract should exist because removal failed + >::get(&addr).unwrap(); + }); +} + +#[test] +fn refcounter() { + let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let subsistence = Pallet::::subsistence_threshold(); + + // Create two contracts with the same code and check that they do in fact share it. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + wasm.clone(), + vec![], + vec![0], + )); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + wasm.clone(), + vec![], + vec![1], + )); + assert_refcount!(code_hash, 2); + + // Sharing should also work with the usual instantiate call + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code_hash, + vec![], + vec![2], + )); + assert_refcount!(code_hash, 3); + + // addresses of all three existing contracts + let addr0 = Contracts::contract_address(&ALICE, &code_hash, &[0]); + let addr1 = Contracts::contract_address(&ALICE, &code_hash, &[1]); + let addr2 = Contracts::contract_address(&ALICE, &code_hash, &[2]); + + // Terminating one contract should decrement the refcount + assert_ok!(Contracts::call(Origin::signed(ALICE), addr0, 0, GAS_LIMIT, vec![])); + assert_refcount!(code_hash, 2); + + // remove another one + assert_ok!(Contracts::call(Origin::signed(ALICE), addr1, 0, GAS_LIMIT, vec![])); + assert_refcount!(code_hash, 1); + + // Pristine code should still be there + crate::PristineCode::::get(code_hash).unwrap(); + + // remove the last contract + assert_ok!(Contracts::call(Origin::signed(ALICE), addr2, 0, GAS_LIMIT, vec![])); + assert_refcount!(code_hash, 0); + + // all code should be gone + assert_matches!(crate::PristineCode::::get(code_hash), None); + assert_matches!(crate::CodeStorage::::get(code_hash), None); + }); +} + +#[test] +fn reinstrument_does_charge() { + let (wasm, code_hash) = compile_module::("return_with_data").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let subsistence = Pallet::::subsistence_threshold(); + let zero = 0u32.to_le_bytes().encode(); + let code_len = wasm.len() as u32; + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + wasm, + zero.clone(), + vec![], + )); + + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + + // Call the contract two times without reinstrument + + let result0 = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, zero.clone(), false); + assert!(result0.result.unwrap().is_success()); + + let result1 = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, zero.clone(), false); + assert!(result1.result.unwrap().is_success()); + + // They should match because both where called with the same schedule. + assert_eq!(result0.gas_consumed, result1.gas_consumed); + + // We cannot change the schedule. Instead, we decrease the version of the deployed + // contract below the current schedule's version. + crate::CodeStorage::mutate(&code_hash, |code: &mut Option>| { + code.as_mut().unwrap().decrement_version(); + }); + + // This call should trigger reinstrumentation + let result2 = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, zero.clone(), false); + assert!(result2.result.unwrap().is_success()); + assert!(result2.gas_consumed > result1.gas_consumed); + assert_eq!( + result2.gas_consumed, + result1.gas_consumed + ::WeightInfo::instrument(code_len / 1024), + ); + }); +} + +#[test] +fn debug_message_works() { + let (wasm, code_hash) = compile_module::("debug_message_works").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, vec![], true); + + assert_matches!(result.result, Ok(_)); + assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!"); + }); +} + +#[test] +fn debug_message_logging_disabled() { + let (wasm, code_hash) = compile_module::("debug_message_logging_disabled").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + // disable logging by passing `false` + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![], false); + assert_matches!(result.result, Ok(_)); + // the dispatchables always run without debugging + assert_ok!(Contracts::call(Origin::signed(ALICE), addr, 0, GAS_LIMIT, vec![])); + assert!(result.debug_message.is_empty()); + }); +} + +#[test] +fn debug_message_invalid_utf8() { + let (wasm, code_hash) = compile_module::("debug_message_invalid_utf8").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, vec![], true); + assert_err!(result.result, >::DebugMessageInvalidUTF8); + }); +} + +#[test] +fn gas_estimation_nested_call_fixed_limit() { + let (caller_code, caller_hash) = compile_module::("call_with_limit").unwrap(); + let (callee_code, callee_hash) = compile_module::("dummy").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + caller_code, + vec![], + vec![0], + ),); + let addr_caller = Contracts::contract_address(&ALICE, &caller_hash, &[0]); + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + callee_code, + vec![], + vec![1], + ),); + let addr_callee = Contracts::contract_address(&ALICE, &callee_hash, &[1]); + + let input: Vec = AsRef::<[u8]>::as_ref(&addr_callee) + .iter() + .cloned() + .chain((GAS_LIMIT / 5).to_le_bytes()) + .collect(); + + // Call in order to determine the gas that is required for this call + let result = + Contracts::bare_call(ALICE, addr_caller.clone(), 0, GAS_LIMIT, input.clone(), false); + assert_ok!(&result.result); + + assert!(result.gas_required > result.gas_consumed); + + // Make the same call using the estimated gas. Should succeed. + assert_ok!( + Contracts::bare_call(ALICE, addr_caller, 0, result.gas_required, input, false,).result + ); + }); +} + +#[test] +#[cfg(feature = "unstable-interface")] +fn gas_estimation_call_runtime() { + let (caller_code, caller_hash) = compile_module::("call_runtime").unwrap(); + let (callee_code, callee_hash) = compile_module::("dummy").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let subsistence = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); + let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + caller_code, + vec![], + vec![0], + ),); + let addr_caller = Contracts::contract_address(&ALICE, &caller_hash, &[0]); + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + callee_code, + vec![], + vec![1], + ),); + let addr_callee = Contracts::contract_address(&ALICE, &callee_hash, &[1]); + + // Call something trivial with a huge gas limit so that we can observe the effects + // of pre-charging. This should create a difference between consumed and required. + let call = Call::Contracts(crate::Call::call { + dest: addr_callee, + value: 0, + gas_limit: GAS_LIMIT / 3, + data: vec![], + }); + let result = + Contracts::bare_call(ALICE, addr_caller.clone(), 0, GAS_LIMIT, call.encode(), false); + assert_ok!(&result.result); + + assert!(result.gas_required > result.gas_consumed); + + // Make the same call using the required gas. Should succeed. + assert_ok!( + Contracts::bare_call(ALICE, addr_caller, 0, result.gas_required, call.encode(), false,) + .result + ); + }); +} + +#[test] +#[cfg(feature = "unstable-interface")] +fn ecdsa_recover() { + let (wasm, code_hash) = compile_module::("ecdsa_recover").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + // Instantiate the ecdsa_recover contract. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + + #[rustfmt::skip] + let signature: [u8; 65] = [ + 161, 234, 203, 74, 147, 96, 51, 212, 5, 174, 231, 9, 142, 48, 137, 201, + 162, 118, 192, 67, 239, 16, 71, 216, 125, 86, 167, 139, 70, 7, 86, 241, + 33, 87, 154, 251, 81, 29, 160, 4, 176, 239, 88, 211, 244, 232, 232, 52, + 211, 234, 100, 115, 230, 47, 80, 44, 152, 166, 62, 50, 8, 13, 86, 175, + 28, + ]; + #[rustfmt::skip] + let message_hash: [u8; 32] = [ + 162, 28, 244, 179, 96, 76, 244, 178, 188, 83, 230, 248, 143, 106, 77, 117, + 239, 95, 244, 171, 65, 95, 62, 153, 174, 166, 182, 28, 130, 73, 196, 208 + ]; + #[rustfmt::skip] + const EXPECTED_COMPRESSED_PUBLIC_KEY: [u8; 33] = [ + 2, 121, 190, 102, 126, 249, 220, 187, 172, 85, 160, 98, 149, 206, 135, 11, + 7, 2, 155, 252, 219, 45, 206, 40, 217, 89, 242, 129, 91, 22, 248, 23, + 152, + ]; + let mut params = vec![]; + params.extend_from_slice(&signature); + params.extend_from_slice(&message_hash); + assert!(params.len() == 65 + 32); + let result = >::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, params, false) + .result + .unwrap(); + assert!(result.is_success()); + assert_eq!(result.data.as_ref(), &EXPECTED_COMPRESSED_PUBLIC_KEY); + }) +} diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 34b8ea7443538..08a7449683ed6 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -1,18 +1,19 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! A module that implements instrumented code cache. //! @@ -22,49 +23,87 @@ //! - Before running contract code we check if the cached code has the schedule version that //! is equal to the current saved schedule. //! If it is equal then run the code, if it isn't reinstrument with the current schedule. -//! - When we update the schedule we want it to have strictly greater version than the current saved one: -//! this guarantees that every instrumented contract code in cache cannot have the version equal to the current one. -//! Thus, before executing a contract it should be reinstrument with new schedule. - -use crate::wasm::{prepare, runtime::Env, PrefabWasmModule}; -use crate::{CodeHash, CodeStorage, PristineCode, Schedule, Trait}; -use sp_std::prelude::*; -use sp_runtime::traits::Hash; -use frame_support::StorageMap; - -/// Put code in the storage. The hash of code is used as a key and is returned -/// as a result of this function. -/// -/// This function instruments the given code and caches it in the storage. -pub fn save( - original_code: Vec, - schedule: &Schedule, -) -> Result, &'static str> { - let prefab_module = prepare::prepare_contract::(&original_code, schedule)?; - let code_hash = T::Hashing::hash(&original_code); - - >::insert(code_hash, prefab_module); - >::insert(code_hash, original_code); - - Ok(code_hash) -} +//! - When we update the schedule we want it to have strictly greater version than the current saved +//! one: +//! this guarantees that every instrumented contract code in cache cannot have the version equal to +//! the current one. Thus, before executing a contract it should be reinstrument with new schedule. -/// Version of `save` to be used in runtime benchmarks. -// -/// This version neither checks nor instruments the passed in code. This is useful -/// when code needs to be benchmarked without the injected instrumentation. #[cfg(feature = "runtime-benchmarks")] -pub fn save_raw( - original_code: Vec, - schedule: &Schedule, -) -> Result, &'static str> { - let prefab_module = prepare::benchmarking::prepare_contract::(&original_code, schedule)?; - let code_hash = T::Hashing::hash(&original_code); +pub use self::private::reinstrument; +use crate::{ + gas::{GasMeter, Token}, + wasm::{prepare, PrefabWasmModule}, + weights::WeightInfo, + CodeHash, CodeStorage, Config, Error, Event, Pallet as Contracts, PristineCode, Schedule, + Weight, +}; +use frame_support::dispatch::DispatchError; +use sp_core::crypto::UncheckedFrom; + +/// Put the instrumented module in storage. +/// +/// Increments the refcount of the in-storage `prefab_module` if it already exists in storage +/// under the specified `code_hash`. +pub fn store(mut prefab_module: PrefabWasmModule) +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + let code_hash = sp_std::mem::take(&mut prefab_module.code_hash); + + // original_code is only `Some` if the contract was instantiated from a new code + // but `None` if it was loaded from storage. + if let Some(code) = prefab_module.original_code.take() { + >::insert(&code_hash, code); + } + >::mutate(&code_hash, |existing| match existing { + Some(module) => increment_64(&mut module.refcount), + None => { + *existing = Some(prefab_module); + Contracts::::deposit_event(Event::CodeStored(code_hash)) + }, + }); +} - >::insert(code_hash, prefab_module); - >::insert(code_hash, original_code); +/// Increment the refcount of a code in-storage by one. +pub fn increment_refcount( + code_hash: CodeHash, + gas_meter: &mut GasMeter, +) -> Result<(), DispatchError> +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + gas_meter.charge(CodeToken::UpdateRefcount(estimate_code_size::(&code_hash)?))?; + >::mutate(code_hash, |existing| { + if let Some(module) = existing { + increment_64(&mut module.refcount); + Ok(()) + } else { + Err(Error::::CodeNotFound.into()) + } + }) +} - Ok(code_hash) +/// Decrement the refcount of a code in-storage by one and remove the code when it drops to zero. +pub fn decrement_refcount( + code_hash: CodeHash, + gas_meter: &mut GasMeter, +) -> Result<(), DispatchError> +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + if let Ok(len) = estimate_code_size::(&code_hash) { + gas_meter.charge(CodeToken::UpdateRefcount(len))?; + } + >::mutate_exists(code_hash, |existing| { + if let Some(module) = existing { + module.refcount = module.refcount.saturating_sub(1); + if module.refcount == 0 { + *existing = None; + finish_removal::(code_hash); + } + } + }); + Ok(()) } /// Load code with the given code hash. @@ -72,22 +111,129 @@ pub fn save_raw( /// If the module was instrumented with a lower version of schedule than /// the current one given as an argument, then this function will perform /// re-instrumentation and update the cache in the storage. -pub fn load( - code_hash: &CodeHash, - schedule: &Schedule, -) -> Result { +/// +/// # Note +/// +/// If `reinstrument` is set it is assumed that the load is performed in the context of +/// a contract call: This means we charge the size based cased for loading the contract. +pub fn load( + code_hash: CodeHash, + mut reinstrument: Option<(&Schedule, &mut GasMeter)>, +) -> Result, DispatchError> +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + // The reinstrument case coincides with the cases where we need to charge extra + // based upon the code size: On-chain execution. + if let Some((_, gas_meter)) = &mut reinstrument { + gas_meter.charge(CodeToken::Load(estimate_code_size::(&code_hash)?))?; + } + let mut prefab_module = - >::get(code_hash).ok_or_else(|| "code is not found")?; - - if prefab_module.schedule_version < schedule.version { - // The current schedule version is greater than the version of the one cached - // in the storage. - // - // We need to re-instrument the code with the latest schedule here. - let original_code = - >::get(code_hash).ok_or_else(|| "pristine code is not found")?; - prefab_module = prepare::prepare_contract::(&original_code, schedule)?; - >::insert(&code_hash, &prefab_module); + >::get(code_hash).ok_or_else(|| Error::::CodeNotFound)?; + prefab_module.code_hash = code_hash; + + if let Some((schedule, gas_meter)) = reinstrument { + if prefab_module.instruction_weights_version < schedule.instruction_weights.version { + // The instruction weights have changed. + // We need to re-instrument the code with the new instruction weights. + gas_meter.charge(CodeToken::Instrument(prefab_module.original_code_len))?; + private::reinstrument(&mut prefab_module, schedule)?; + } } Ok(prefab_module) } + +mod private { + use super::*; + + /// Instruments the passed prefab wasm module with the supplied schedule. + pub fn reinstrument( + prefab_module: &mut PrefabWasmModule, + schedule: &Schedule, + ) -> Result<(), DispatchError> + where + T::AccountId: UncheckedFrom + AsRef<[u8]>, + { + let original_code = >::get(&prefab_module.code_hash) + .ok_or_else(|| Error::::CodeNotFound)?; + prefab_module.code = prepare::reinstrument_contract::(original_code, schedule)?; + prefab_module.instruction_weights_version = schedule.instruction_weights.version; + >::insert(&prefab_module.code_hash, &*prefab_module); + Ok(()) + } +} + +/// Finish removal of a code by deleting the pristine code and emitting an event. +fn finish_removal(code_hash: CodeHash) +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + >::remove(code_hash); + Contracts::::deposit_event(Event::CodeRemoved(code_hash)) +} + +/// Increment the refcount panicking if it should ever overflow (which will not happen). +/// +/// We try hard to be infallible here because otherwise more storage transactions would be +/// necessary to account for failures in storing code for an already instantiated contract. +fn increment_64(refcount: &mut u64) { + *refcount = refcount.checked_add(1).expect( + " + refcount is 64bit. Generating this overflow would require to store + _at least_ 18 exabyte of data assuming that a contract consumes only + one byte of data. Any node would run out of storage space before hitting + this overflow. + qed + ", + ); +} + +/// Get the size of the instrumented code stored at `code_hash` without loading it. +/// +/// The returned value is slightly too large because it also contains the fields apart from +/// `code` which are located inside [`PrefabWasmModule`]. However, those are negligible when +/// compared to the code size. Additionally, charging too much weight is completely safe. +fn estimate_code_size(code_hash: &CodeHash) -> Result +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + let key = >::hashed_key_for(code_hash); + let mut data = [0u8; 0]; + let len = sp_io::storage::read(&key, &mut data, 0).ok_or_else(|| Error::::CodeNotFound)?; + Ok(len) +} + +/// Costs for operations that are related to code handling. +#[cfg_attr(test, derive(Debug, PartialEq, Eq))] +#[derive(Clone, Copy)] +enum CodeToken { + /// Weight for instrumenting a contract contract of the supplied size in bytes. + Instrument(u32), + /// Weight for loading a contract per kilobyte. + Load(u32), + /// Weight for changing the refcount of a contract per kilobyte. + UpdateRefcount(u32), +} + +impl Token for CodeToken +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + fn weight(&self) -> Weight { + use self::CodeToken::*; + // In case of `Load` and `UpdateRefcount` we already covered the general costs of + // accessing the storage but still need to account for the actual size of the + // contract code. This is why we substract `T::*::(0)`. We need to do this at this + // point because when charging the general weight we do not know the size of + // the contract. + match *self { + Instrument(len) => T::WeightInfo::instrument(len / 1024), + Load(len) => + T::WeightInfo::code_load(len / 1024).saturating_sub(T::WeightInfo::code_load(0)), + UpdateRefcount(len) => T::WeightInfo::code_refcount(len / 1024) + .saturating_sub(T::WeightInfo::code_refcount(0)), + } + } +} diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs index 2538f85fb7385..ea7f51da75264 100644 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ b/frame/contracts/src/wasm/env_def/macros.rs @@ -1,65 +1,69 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Definition of macros that hides boilerplate of defining external environment //! for a wasm module. //! //! Most likely you should use `define_env` macro. -#[macro_export] macro_rules! convert_args { () => (vec![]); ( $( $t:ty ),* ) => ( vec![ $( { use $crate::wasm::env_def::ConvertibleToWasm; <$t>::VALUE_TYPE }, )* ] ); } -#[macro_export] macro_rules! gen_signature { ( ( $( $params: ty ),* ) ) => ( { - parity_wasm::elements::FunctionType::new(convert_args!($($params),*), None) + pwasm_utils::parity_wasm::elements::FunctionType::new( + convert_args!($($params),*), vec![], + ) } ); ( ( $( $params: ty ),* ) -> $returns: ty ) => ( { - parity_wasm::elements::FunctionType::new(convert_args!($($params),*), Some({ - use $crate::wasm::env_def::ConvertibleToWasm; <$returns>::VALUE_TYPE - })) + pwasm_utils::parity_wasm::elements::FunctionType::new( + convert_args!($($params),*), + vec![{use $crate::wasm::env_def::ConvertibleToWasm; <$returns>::VALUE_TYPE}], + ) } ); } -#[macro_export] macro_rules! gen_signature_dispatch { ( + $needle_module:ident, $needle_name:ident, $needle_sig:ident ; + $module:ident, $name:ident - ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* , $($rest:tt)* ) => { - if stringify!($name).as_bytes() == $needle_name { + ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* , $($rest:tt)* + ) => { + let module = stringify!($module).as_bytes(); + if module == $needle_module && stringify!($name).as_bytes() == $needle_name { let signature = gen_signature!( ( $( $params ),* ) $( -> $returns )* ); if $needle_sig == &signature { return true; } } else { - gen_signature_dispatch!($needle_name, $needle_sig ; $($rest)*); + gen_signature_dispatch!($needle_module, $needle_name, $needle_sig ; $($rest)*); } }; - ( $needle_name:ident, $needle_sig:ident ; ) => { - }; + ( $needle_module:ident, $needle_name:ident, $needle_sig:ident ; ) => {}; } /// Unmarshall arguments and then execute `body` expression and return its result. @@ -96,12 +100,11 @@ macro_rules! unmarshall_then_body { #[inline(always)] pub fn constrain_closure(f: F) -> F where - F: FnOnce() -> Result, + F: FnOnce() -> Result, { f } -#[macro_export] macro_rules! unmarshall_then_body_then_marshall { ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) -> $returns:ty => $body:tt ) => ({ let body = $crate::wasm::env_def::macros::constrain_closure::< @@ -109,25 +112,35 @@ macro_rules! unmarshall_then_body_then_marshall { >(|| { unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) }); - let r = body()?; + let r = body().map_err(|reason| { + $ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; return Ok(sp_sandbox::ReturnValue::Value({ use $crate::wasm::env_def::ConvertibleToWasm; r.to_typed_value() })) }); ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) => $body:tt ) => ({ let body = $crate::wasm::env_def::macros::constrain_closure::<(), _>(|| { unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) }); - body()?; + body().map_err(|reason| { + $ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; return Ok(sp_sandbox::ReturnValue::Unit) }) } -#[macro_export] macro_rules! define_func { - ( < E: $seal_ty:tt > $name:ident ( $ctx: ident $(, $names:ident : $params:ty)*) $(-> $returns:ty)* => $body:tt ) => { - fn $name< E: $seal_ty >( + ( $trait:tt $name:ident ( $ctx: ident $(, $names:ident : $params:ty)*) $(-> $returns:ty)* => $body:tt ) => { + fn $name< E: $trait >( $ctx: &mut $crate::wasm::Runtime, args: &[sp_sandbox::Value], - ) -> Result { + ) -> Result + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]> + { #[allow(unused)] let mut args = args.iter(); @@ -140,24 +153,52 @@ macro_rules! define_func { }; } -#[macro_export] -macro_rules! register_func { - ( $reg_cb:ident, < E: $seal_ty:tt > ; ) => {}; - - ( $reg_cb:ident, < E: $seal_ty:tt > ; - $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) - $( -> $returns:ty )* => $body:tt $($rest:tt)* +macro_rules! register_body { + ( $reg_cb:ident, $trait:tt; + $module:ident $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + $( -> $returns:ty )* => $body:tt ) => { $reg_cb( + stringify!($module).as_bytes(), stringify!($name).as_bytes(), { define_func!( - < E: $seal_ty > $name ( $ctx $(, $names : $params )* ) $( -> $returns )* => $body + $trait $name ( $ctx $(, $names : $params )* ) $( -> $returns )* => $body ); $name:: } ); - register_func!( $reg_cb, < E: $seal_ty > ; $($rest)* ); + } +} + +macro_rules! register_func { + ( $reg_cb:ident, $trait:tt; ) => {}; + + ( $reg_cb:ident, $trait:tt; + __unstable__ $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + $( -> $returns:ty )* => $body:tt $($rest:tt)* + ) => { + #[cfg(feature = "unstable-interface")] + register_body!( + $reg_cb, $trait; + __unstable__ $name + ( $ctx $( , $names : $params )* ) + $( -> $returns )* => $body + ); + register_func!( $reg_cb, $trait; $($rest)* ); + }; + + ( $reg_cb:ident, $trait:tt; + $module:ident $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + $( -> $returns:ty )* => $body:tt $($rest:tt)* + ) => { + register_body!( + $reg_cb, $trait; + $module $name + ( $ctx $( , $names : $params )* ) + $( -> $returns )* => $body + ); + register_func!( $reg_cb, $trait; $($rest)* ); }; } @@ -169,23 +210,44 @@ macro_rules! register_func { /// It's up to the user of this macro to check signatures of wasm code to be executed /// and reject the code if any imported function has a mismatched signature. macro_rules! define_env { - ( $init_name:ident , < E: $seal_ty:tt > , - $( $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) + ( $init_name:ident , < E: $trait:tt > , + $( [$module:ident] $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* => $body:tt , )* ) => { pub struct $init_name; impl $crate::wasm::env_def::ImportSatisfyCheck for $init_name { - fn can_satisfy(name: &[u8], func_type: &parity_wasm::elements::FunctionType) -> bool { - gen_signature_dispatch!( name, func_type ; $( $name ( $ctx $(, $names : $params )* ) $( -> $returns )* , )* ); + fn can_satisfy( + module: &[u8], + name: &[u8], + func_type: &pwasm_utils::parity_wasm::elements::FunctionType, + ) -> bool + { + #[cfg(not(feature = "unstable-interface"))] + if module == b"__unstable__" { + return false; + } + gen_signature_dispatch!( + module, name, func_type ; + $( $module, $name ( $ctx $(, $names : $params )* ) $( -> $returns )* , )* + ); return false; } } - impl $crate::wasm::env_def::FunctionImplProvider for $init_name { - fn impls)>(f: &mut F) { - register_func!(f, < E: $seal_ty > ; $( $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* ); + impl $crate::wasm::env_def::FunctionImplProvider for $init_name + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]> + { + fn impls)>(f: &mut F) { + register_func!( + f, + $trait; + $( $module $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* + ); } } }; @@ -193,19 +255,27 @@ macro_rules! define_env { #[cfg(test)] mod tests { - use parity_wasm::elements::FunctionType; - use parity_wasm::elements::ValueType; + use crate::{ + exec::Ext, + wasm::{runtime::TrapReason, tests::MockExt, Runtime}, + Weight, + }; + use pwasm_utils::parity_wasm::elements::{FunctionType, ValueType}; use sp_runtime::traits::Zero; use sp_sandbox::{ReturnValue, Value}; - use crate::wasm::tests::MockExt; - use crate::wasm::Runtime; - use crate::exec::Ext; - use crate::gas::Gas; + + struct TestRuntime { + value: u32, + } + + impl TestRuntime { + fn set_trap_reason(&mut self, _reason: TrapReason) {} + } #[test] fn macro_unmarshall_then_body_then_marshall_value_or_trap() { fn test_value( - _ctx: &mut u32, + _ctx: &mut TestRuntime, args: &[sp_sandbox::Value], ) -> Result { let mut args = args.iter(); @@ -214,7 +284,7 @@ mod tests { _ctx, (a: u32, b: u32) -> u32 => { if b == 0 { - Err(sp_sandbox::HostError) + Err(crate::wasm::runtime::TrapReason::Termination) } else { Ok(a / b) } @@ -222,7 +292,7 @@ mod tests { ) } - let ctx = &mut 0; + let ctx = &mut TestRuntime { value: 0 }; assert_eq!( test_value(ctx, &[Value::I32(15), Value::I32(3)]).unwrap(), ReturnValue::Value(Value::I32(5)), @@ -233,7 +303,7 @@ mod tests { #[test] fn macro_unmarshall_then_body_then_marshall_unit() { fn test_unit( - ctx: &mut u32, + ctx: &mut TestRuntime, args: &[sp_sandbox::Value], ) -> Result { let mut args = args.iter(); @@ -241,42 +311,41 @@ mod tests { args, ctx, (a: u32, b: u32) => { - *ctx = a + b; + ctx.value = a + b; Ok(()) } ) } - let ctx = &mut 0; + let ctx = &mut TestRuntime { value: 0 }; let result = test_unit(ctx, &[Value::I32(2), Value::I32(3)]).unwrap(); assert_eq!(result, ReturnValue::Unit); - assert_eq!(*ctx, 5); + assert_eq!(ctx.value, 5); } #[test] fn macro_define_func() { - define_func!( seal_gas (_ctx, amount: u32) => { - let amount = Gas::from(amount); + define_func!( Ext seal_gas (_ctx, amount: u32) => { + let amount = Weight::from(amount); if !amount.is_zero() { Ok(()) } else { - Err(sp_sandbox::HostError) + Err(TrapReason::Termination) } }); - let _f: fn(&mut Runtime, &[sp_sandbox::Value]) - -> Result = seal_gas::; + let _f: fn( + &mut Runtime, + &[sp_sandbox::Value], + ) -> Result = seal_gas::; } #[test] fn macro_gen_signature() { - assert_eq!( - gen_signature!((i32)), - FunctionType::new(vec![ValueType::I32], None), - ); + assert_eq!(gen_signature!((i32)), FunctionType::new(vec![ValueType::I32], vec![])); assert_eq!( gen_signature!( (i32, u32) -> u32 ), - FunctionType::new(vec![ValueType::I32, ValueType::I32], Some(ValueType::I32)), + FunctionType::new(vec![ValueType::I32, ValueType::I32], vec![ValueType::I32]), ); } @@ -307,17 +376,21 @@ mod tests { use crate::wasm::env_def::ImportSatisfyCheck; define_env!(Env, , - seal_gas( _ctx, amount: u32 ) => { - let amount = Gas::from(amount); + [seal0] seal_gas( _ctx, amount: u32 ) => { + let amount = Weight::from(amount); if !amount.is_zero() { Ok(()) } else { - Err(sp_sandbox::HostError) + Err(crate::wasm::runtime::TrapReason::Termination) } }, ); - assert!(Env::can_satisfy(b"seal_gas", &FunctionType::new(vec![ValueType::I32], None))); - assert!(!Env::can_satisfy(b"not_exists", &FunctionType::new(vec![], None))); + assert!(Env::can_satisfy( + b"seal0", + b"seal_gas", + &FunctionType::new(vec![ValueType::I32], vec![]) + )); + assert!(!Env::can_satisfy(b"seal0", b"not_exists", &FunctionType::new(vec![], vec![]))); } } diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index 7b67f74ec95c8..6a55677f69a01 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -1,27 +1,28 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use super::Runtime; use crate::exec::Ext; +use pwasm_utils::parity_wasm::elements::{FunctionType, ValueType}; use sp_sandbox::Value; -use parity_wasm::elements::{FunctionType, ValueType}; #[macro_use] -pub(crate) mod macros; +pub mod macros; pub trait ConvertibleToWasm: Sized { const VALUE_TYPE: ValueType; @@ -66,14 +67,13 @@ impl ConvertibleToWasm for u64 { } } -pub(crate) type HostFunc = - fn( - &mut Runtime, - &[sp_sandbox::Value] - ) -> Result; +pub type HostFunc = fn( + &mut Runtime, + &[sp_sandbox::Value], +) -> Result; -pub(crate) trait FunctionImplProvider { - fn impls)>(f: &mut F); +pub trait FunctionImplProvider { + fn impls)>(f: &mut F); } /// This trait can be used to check whether the host environment can satisfy @@ -82,5 +82,5 @@ pub trait ImportSatisfyCheck { /// Returns `true` if the host environment contains a function with /// the specified name and its type matches to the given type, or `false` /// otherwise. - fn can_satisfy(name: &[u8], func_type: &FunctionType) -> bool; + fn can_satisfy(module: &[u8], name: &[u8], func_type: &FunctionType) -> bool; } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 100148b18dcd4..855cb6e45091f 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -1,181 +1,260 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! This module provides a means for executing contracts //! represented in wasm. -use crate::{CodeHash, Schedule, Trait}; -use crate::wasm::env_def::FunctionImplProvider; -use crate::exec::{Ext, ExecResult}; -use crate::gas::GasMeter; - -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_sandbox; - #[macro_use] mod env_def; mod code_cache; mod prepare; mod runtime; -use self::runtime::{to_execution_result, Runtime}; -use self::code_cache::load as load_code; - -pub use self::code_cache::save as save_code; #[cfg(feature = "runtime-benchmarks")] -pub use self::code_cache::save_raw as save_code_raw; -pub use self::runtime::ReturnCode; +pub use self::code_cache::reinstrument; +pub use self::runtime::{ReturnCode, Runtime, RuntimeCosts}; +use crate::{ + exec::{ExecResult, Executable, ExportedFunction, Ext}, + gas::GasMeter, + wasm::env_def::FunctionImplProvider, + CodeHash, Config, Schedule, +}; +use codec::{Decode, Encode}; +use frame_support::dispatch::DispatchError; +use sp_core::crypto::UncheckedFrom; +use sp_std::prelude::*; +#[cfg(test)] +pub use tests::MockExt; /// A prepared wasm module ready for execution. -#[derive(Clone, Encode, Decode)] -pub struct PrefabWasmModule { - /// Version of the schedule with which the code was instrumented. +/// +/// # Note +/// +/// This data structure is mostly immutable once created and stored. The exceptions that +/// can be changed by calling a contract are `refcount`, `instruction_weights_version` and `code`. +/// `refcount` can change when a contract instantiates a new contract or self terminates. +/// `instruction_weights_version` and `code` when a contract with an outdated instrumention is +/// called. Therefore one must be careful when holding any in-memory representation of this +/// type while calling into a contract as those fields can get out of date. +#[derive(Clone, Encode, Decode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct PrefabWasmModule { + /// Version of the instruction weights with which the code was instrumented. #[codec(compact)] - schedule_version: u32, + instruction_weights_version: u32, + /// Initial memory size of a contract's sandbox. #[codec(compact)] initial: u32, + /// The maximum memory size of a contract's sandbox. #[codec(compact)] maximum: u32, + /// The number of contracts that use this as their contract code. + /// + /// If this number drops to zero this module is removed from storage. + #[codec(compact)] + refcount: u64, /// This field is reserved for future evolution of format. /// - /// Basically, for now this field will be serialized as `None`. In the future - /// we would be able to extend this structure with. + /// For now this field is serialized as `None`. In the future we are able to change the + /// type parameter to a new struct that contains the fields that we want to add. + /// That new struct would also contain a reserved field for its future extensions. + /// This works because in SCALE `None` is encoded independently from the type parameter + /// of the option. _reserved: Option<()>, /// Code instrumented with the latest schedule. code: Vec, + /// The size of the uninstrumented code. + /// + /// We cache this value here in order to avoid the need to pull the pristine code + /// from storage when we only need its length for rent calculations. + original_code_len: u32, + /// The uninstrumented, pristine version of the code. + /// + /// It is not stored because the pristine code has its own storage item. The value + /// is only `Some` when this module was created from an `original_code` and `None` if + /// it was loaded from storage. + #[codec(skip)] + original_code: Option>, + /// The code hash of the stored code which is defined as the hash over the `original_code`. + /// + /// As the map key there is no need to store the hash in the value, too. It is set manually + /// when loading the module from storage. + #[codec(skip)] + code_hash: CodeHash, } -/// Wasm executable loaded by `WasmLoader` and executed by `WasmVm`. -pub struct WasmExecutable { - entrypoint_name: &'static str, - prefab_module: PrefabWasmModule, -} - -/// Loader which fetches `WasmExecutable` from the code cache. -pub struct WasmLoader<'a, T: Trait> { - schedule: &'a Schedule, +impl ExportedFunction { + /// The wasm export name for the function. + fn identifier(&self) -> &str { + match self { + Self::Constructor => "deploy", + Self::Call => "call", + } + } } -impl<'a, T: Trait> WasmLoader<'a, T> { - pub fn new(schedule: &'a Schedule) -> Self { - WasmLoader { schedule } +impl PrefabWasmModule +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + /// Create the module by checking and instrumenting `original_code`. + pub fn from_code( + original_code: Vec, + schedule: &Schedule, + ) -> Result { + prepare::prepare_contract(original_code, schedule).map_err(Into::into) } -} -impl<'a, T: Trait> crate::exec::Loader for WasmLoader<'a, T> { - type Executable = WasmExecutable; + /// Create and store the module without checking nor instrumenting the passed code. + /// + /// # Note + /// + /// This is useful for benchmarking where we don't want instrumentation to skew + /// our results. + #[cfg(feature = "runtime-benchmarks")] + pub fn store_code_unchecked( + original_code: Vec, + schedule: &Schedule, + ) -> Result<(), DispatchError> { + let executable = prepare::benchmarking::prepare_contract(original_code, schedule) + .map_err::(Into::into)?; + code_cache::store(executable); + Ok(()) + } - fn load_init(&self, code_hash: &CodeHash) -> Result { - let prefab_module = load_code::(code_hash, self.schedule)?; - Ok(WasmExecutable { - entrypoint_name: "deploy", - prefab_module, - }) + /// Return the refcount of the module. + #[cfg(test)] + pub fn refcount(&self) -> u64 { + self.refcount } - fn load_main(&self, code_hash: &CodeHash) -> Result { - let prefab_module = load_code::(code_hash, self.schedule)?; - Ok(WasmExecutable { - entrypoint_name: "call", - prefab_module, - }) + + /// Decrement instruction_weights_version by 1. Panics if it is already 0. + #[cfg(test)] + pub fn decrement_version(&mut self) { + self.instruction_weights_version = self.instruction_weights_version.checked_sub(1).unwrap(); } } -/// Implementation of `Vm` that takes `WasmExecutable` and executes it. -pub struct WasmVm<'a, T: Trait> { - schedule: &'a Schedule, -} +impl Executable for PrefabWasmModule +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ + fn from_storage( + code_hash: CodeHash, + schedule: &Schedule, + gas_meter: &mut GasMeter, + ) -> Result { + code_cache::load(code_hash, Some((schedule, gas_meter))) + } -impl<'a, T: Trait> WasmVm<'a, T> { - pub fn new(schedule: &'a Schedule) -> Self { - WasmVm { schedule } + fn from_storage_noinstr(code_hash: CodeHash) -> Result { + code_cache::load(code_hash, None) } -} -impl<'a, T: Trait> crate::exec::Vm for WasmVm<'a, T> { - type Executable = WasmExecutable; + fn add_user(code_hash: CodeHash, gas_meter: &mut GasMeter) -> Result<(), DispatchError> { + code_cache::increment_refcount::(code_hash, gas_meter) + } + + fn remove_user( + code_hash: CodeHash, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + code_cache::decrement_refcount::(code_hash, gas_meter) + } fn execute>( - &self, - exec: &WasmExecutable, - mut ext: E, + self, + ext: &mut E, + function: &ExportedFunction, input_data: Vec, - gas_meter: &mut GasMeter, ) -> ExecResult { let memory = - sp_sandbox::Memory::new(exec.prefab_module.initial, Some(exec.prefab_module.maximum)) - .unwrap_or_else(|_| { + sp_sandbox::Memory::new(self.initial, Some(self.maximum)).unwrap_or_else(|_| { // unlike `.expect`, explicit panic preserves the source location. // Needed as we can't use `RUST_BACKTRACE` in here. - panic!( - "exec.prefab_module.initial can't be greater than exec.prefab_module.maximum; + panic!( + "exec.prefab_module.initial can't be greater than exec.prefab_module.maximum; thus Memory::new must not fail; qed" - ) - }); + ) + }); let mut imports = sp_sandbox::EnvironmentDefinitionBuilder::new(); imports.add_memory(self::prepare::IMPORT_MODULE_MEMORY, "memory", memory.clone()); - runtime::Env::impls(&mut |name, func_ptr| { - imports.add_host_func(self::prepare::IMPORT_MODULE_FN, name, func_ptr); + runtime::Env::impls(&mut |module, name, func_ptr| { + imports.add_host_func(module, name, func_ptr); }); - let mut runtime = Runtime::new( - &mut ext, - input_data, - &self.schedule, - memory, - gas_meter, - ); + let mut runtime = Runtime::new(ext, input_data, memory); + + // We store before executing so that the code hash is available in the constructor. + let code = self.code.clone(); + if let &ExportedFunction::Constructor = function { + code_cache::store(self) + } // Instantiate the instance from the instrumented module code and invoke the contract // entrypoint. - let result = sp_sandbox::Instance::new(&exec.prefab_module.code, &imports, &mut runtime) - .and_then(|mut instance| instance.invoke(exec.entrypoint_name, &[], &mut runtime)); - to_execution_result(runtime, result) + let result = sp_sandbox::Instance::new(&code, &imports, &mut runtime) + .and_then(|mut instance| instance.invoke(function.identifier(), &[], &mut runtime)); + + runtime.to_execution_result(result) + } + + fn code_hash(&self) -> &CodeHash { + &self.code_hash + } + + fn code_len(&self) -> u32 { + self.code.len() as u32 + } + + fn aggregate_code_len(&self) -> u32 { + self.original_code_len.saturating_add(self.code_len()) + } + + fn refcount(&self) -> u32 { + self.refcount as u32 } } #[cfg(test)] mod tests { use super::*; - use std::collections::HashMap; - use sp_core::H256; - use crate::exec::{Ext, StorageKey, ExecReturnValue, ReturnFlags, ExecError, ErrorOrigin}; - use crate::gas::{Gas, GasMeter}; - use crate::tests::{Test, Call}; - use crate::wasm::prepare::prepare_contract; - use crate::{CodeHash, BalanceOf, Error}; + use crate::{ + exec::{ + AccountIdOf, BlockNumberOf, ErrorOrigin, ExecError, Executable, Ext, SeedOf, StorageKey, + }, + gas::GasMeter, + tests::{Call, Test, ALICE, BOB}, + BalanceOf, CodeHash, Error, Pallet as Contracts, + }; + use assert_matches::assert_matches; + use frame_support::{ + assert_ok, + dispatch::{DispatchResult, DispatchResultWithPostInfo}, + weights::Weight, + }; use hex_literal::hex; + use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; + use pretty_assertions::assert_eq; + use sp_core::{Bytes, H256}; use sp_runtime::DispatchError; - use frame_support::weights::Weight; - - const GAS_LIMIT: Gas = 10_000_000_000; - - #[derive(Debug, PartialEq, Eq)] - struct DispatchEntry(Call); - - #[derive(Debug, PartialEq, Eq)] - struct RestoreEntry { - dest: u64, - code_hash: H256, - rent_allowance: u64, - delta: Vec, - } + use std::{borrow::BorrowMut, cell::RefCell, collections::HashMap}; #[derive(Debug, PartialEq, Eq)] struct InstantiateEntry { @@ -183,123 +262,120 @@ mod tests { endowment: u64, data: Vec, gas_left: u64, + salt: Vec, } #[derive(Debug, PartialEq, Eq)] struct TerminationEntry { - beneficiary: u64, + beneficiary: AccountIdOf, } #[derive(Debug, PartialEq, Eq)] struct TransferEntry { - to: u64, + to: AccountIdOf, + value: u64, + } + + #[derive(Debug, PartialEq, Eq)] + struct CallEntry { + to: AccountIdOf, value: u64, data: Vec, + allows_reentry: bool, } - #[derive(Default)] pub struct MockExt { storage: HashMap>, - rent_allowance: u64, instantiates: Vec, terminations: Vec, + calls: Vec, transfers: Vec, - restores: Vec, // (topics, data) events: Vec<(Vec, Vec)>, - next_account_id: u64, + runtime_calls: RefCell>, + schedule: Schedule, + gas_meter: GasMeter, + debug_buffer: Vec, + ecdsa_recover: RefCell>, + } + + /// The call is mocked and just returns this hardcoded value. + fn call_return_data() -> Bytes { + Bytes(vec![0xDE, 0xAD, 0xBE, 0xEF]) + } + + impl Default for MockExt { + fn default() -> Self { + Self { + storage: Default::default(), + instantiates: Default::default(), + terminations: Default::default(), + calls: Default::default(), + transfers: Default::default(), + events: Default::default(), + runtime_calls: Default::default(), + schedule: Default::default(), + gas_meter: GasMeter::new(10_000_000_000), + debug_buffer: Default::default(), + ecdsa_recover: Default::default(), + } + } } impl Ext for MockExt { type T = Test; - fn get_storage(&self, key: &StorageKey) -> Option> { - self.storage.get(key).cloned() - } - fn set_storage(&mut self, key: StorageKey, value: Option>) { - *self.storage.entry(key).or_insert(Vec::new()) = value.unwrap_or(Vec::new()); + fn call( + &mut self, + _gas_limit: Weight, + to: AccountIdOf, + value: u64, + data: Vec, + allows_reentry: bool, + ) -> Result { + self.calls.push(CallEntry { to, value, data, allows_reentry }); + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: call_return_data() }) } fn instantiate( &mut self, - code_hash: &CodeHash, + gas_limit: Weight, + code_hash: CodeHash, endowment: u64, - gas_meter: &mut GasMeter, data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { + salt: &[u8], + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { self.instantiates.push(InstantiateEntry { code_hash: code_hash.clone(), endowment, data: data.to_vec(), - gas_left: gas_meter.gas_left(), + gas_left: gas_limit, + salt: salt.to_vec(), }); - let address = self.next_account_id; - self.next_account_id += 1; - Ok(( - address, - ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }, + Contracts::::contract_address(&ALICE, &code_hash, salt), + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }, )) } - fn transfer( - &mut self, - to: &u64, - value: u64, - ) -> Result<(), DispatchError> { - self.transfers.push(TransferEntry { - to: *to, - value, - data: Vec::new(), - }); + fn transfer(&mut self, to: &AccountIdOf, value: u64) -> Result<(), DispatchError> { + self.transfers.push(TransferEntry { to: to.clone(), value }); Ok(()) } - fn call( - &mut self, - to: &u64, - value: u64, - _gas_meter: &mut GasMeter, - data: Vec, - ) -> ExecResult { - self.transfers.push(TransferEntry { - to: *to, - value, - data: data, - }); - // Assume for now that it was just a plain transfer. - // TODO: Add tests for different call outcomes. - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) - } - fn terminate( - &mut self, - beneficiary: &u64, - ) -> Result<(), DispatchError> { - self.terminations.push(TerminationEntry { - beneficiary: *beneficiary, - }); + fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError> { + self.terminations.push(TerminationEntry { beneficiary: beneficiary.clone() }); Ok(()) } - fn restore_to( - &mut self, - dest: u64, - code_hash: H256, - rent_allowance: u64, - delta: Vec, - ) -> Result<(), &'static str> { - self.restores.push(RestoreEntry { - dest, - code_hash, - rent_allowance, - delta, - }); + fn get_storage(&mut self, key: &StorageKey) -> Option> { + self.storage.get(key).cloned() + } + fn set_storage(&mut self, key: StorageKey, value: Option>) -> DispatchResult { + *self.storage.entry(key).or_insert(Vec::new()) = value.unwrap_or(Vec::new()); Ok(()) } - fn caller(&self) -> &u64 { - &42 + fn caller(&self) -> &AccountIdOf { + &ALICE } - fn address(&self) -> &u64 { - &69 + fn address(&self) -> &AccountIdOf { + &BOB } fn balance(&self) -> u64 { 228 @@ -307,165 +383,61 @@ mod tests { fn value_transferred(&self) -> u64 { 1337 } - fn now(&self) -> &u64 { &1111 } - fn minimum_balance(&self) -> u64 { 666 } - - fn tombstone_deposit(&self) -> u64 { + fn contract_deposit(&self) -> u64 { 16 } - - fn random(&self, subject: &[u8]) -> H256 { - H256::from_slice(subject) + fn random(&self, subject: &[u8]) -> (SeedOf, BlockNumberOf) { + (H256::from_slice(subject), 42) } - fn deposit_event(&mut self, topics: Vec, data: Vec) { self.events.push((topics, data)) } - - fn set_rent_allowance(&mut self, rent_allowance: u64) { - self.rent_allowance = rent_allowance; + fn block_number(&self) -> u64 { + 121 } - - fn rent_allowance(&self) -> u64 { - self.rent_allowance + fn max_value_size(&self) -> u32 { + 16_384 } - - fn block_number(&self) -> u64 { 121 } - - fn max_value_size(&self) -> u32 { 16_384 } - fn get_weight_price(&self, weight: Weight) -> BalanceOf { BalanceOf::::from(1312_u32).saturating_mul(weight.into()) } - } - - impl Ext for &mut MockExt { - type T = ::T; - - fn get_storage(&self, key: &[u8; 32]) -> Option> { - (**self).get_storage(key) + fn schedule(&self) -> &Schedule { + &self.schedule } - fn set_storage(&mut self, key: [u8; 32], value: Option>) { - (**self).set_storage(key, value) + fn gas_meter(&mut self) -> &mut GasMeter { + &mut self.gas_meter } - fn instantiate( - &mut self, - code: &CodeHash, - value: u64, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { - (**self).instantiate(code, value, gas_meter, input_data) + fn append_debug_buffer(&mut self, msg: &str) -> bool { + self.debug_buffer.extend(msg.as_bytes()); + true } - fn transfer( - &mut self, - to: &u64, - value: u64, - ) -> Result<(), DispatchError> { - (**self).transfer(to, value) + fn call_runtime(&self, call: ::Call) -> DispatchResultWithPostInfo { + self.runtime_calls.borrow_mut().push(call); + Ok(Default::default()) } - fn terminate( - &mut self, - beneficiary: &u64, - ) -> Result<(), DispatchError> { - (**self).terminate(beneficiary) - } - fn call( - &mut self, - to: &u64, - value: u64, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> ExecResult { - (**self).call(to, value, gas_meter, input_data) - } - fn restore_to( - &mut self, - dest: u64, - code_hash: H256, - rent_allowance: u64, - delta: Vec, - ) -> Result<(), &'static str> { - (**self).restore_to( - dest, - code_hash, - rent_allowance, - delta, - ) - } - fn caller(&self) -> &u64 { - (**self).caller() - } - fn address(&self) -> &u64 { - (**self).address() - } - fn balance(&self) -> u64 { - (**self).balance() - } - fn value_transferred(&self) -> u64 { - (**self).value_transferred() - } - fn now(&self) -> &u64 { - (**self).now() - } - fn minimum_balance(&self) -> u64 { - (**self).minimum_balance() - } - fn tombstone_deposit(&self) -> u64 { - (**self).tombstone_deposit() - } - fn random(&self, subject: &[u8]) -> H256 { - (**self).random(subject) - } - fn deposit_event(&mut self, topics: Vec, data: Vec) { - (**self).deposit_event(topics, data) - } - fn set_rent_allowance(&mut self, rent_allowance: u64) { - (**self).set_rent_allowance(rent_allowance) - } - fn rent_allowance(&self) -> u64 { - (**self).rent_allowance() - } - fn block_number(&self) -> u64 { - (**self).block_number() - } - fn max_value_size(&self) -> u32 { - (**self).max_value_size() - } - fn get_weight_price(&self, weight: Weight) -> BalanceOf { - (**self).get_weight_price(weight) + + fn ecdsa_recover( + &self, + signature: &[u8; 65], + message_hash: &[u8; 32], + ) -> Result<[u8; 33], ()> { + self.ecdsa_recover.borrow_mut().push((signature.clone(), message_hash.clone())); + Ok([3; 33]) } } - fn execute( - wat: &str, - input_data: Vec, - ext: E, - gas_meter: &mut GasMeter, - ) -> ExecResult { - use crate::exec::Vm; - + fn execute>(wat: &str, input_data: Vec, mut ext: E) -> ExecResult { let wasm = wat::parse_str(wat).unwrap(); let schedule = crate::Schedule::default(); - let prefab_module = - prepare_contract::(&wasm, &schedule).unwrap(); - - let exec = WasmExecutable { - // Use a "call" convention. - entrypoint_name: "call", - prefab_module, - }; - - let cfg = Default::default(); - let vm = WasmVm::new(&cfg); - - vm.execute(&exec, ext, input_data, gas_meter) + let executable = + PrefabWasmModule::<::T>::from_code(wasm, &schedule).unwrap(); + executable.execute(ext.borrow_mut(), &ExportedFunction::Call, input_data) } const CODE_TRANSFER: &str = r#" @@ -482,42 +454,32 @@ mod tests { (drop (call $seal_transfer (i32.const 4) ;; Pointer to "account" address. - (i32.const 8) ;; Length of "account" address. - (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 32) ;; Length of "account" address. + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. ) ) ) (func (export "deploy")) - ;; Destination AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\07\00\00\00\00\00\00\00") + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 12) "\99\00\00\00\00\00\00\00") + (data (i32.const 36) "\99\00\00\00\00\00\00\00") ) "#; #[test] fn contract_transfer() { let mut mock_ext = MockExt::default(); - let _ = execute( - CODE_TRANSFER, - vec![], - &mut mock_ext, - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + assert_ok!(execute(CODE_TRANSFER, vec![], &mut mock_ext)); - assert_eq!( - &mock_ext.transfers, - &[TransferEntry { - to: 7, - value: 153, - data: Vec::new(), - }] - ); + assert_eq!(&mock_ext.transfers, &[TransferEntry { to: ALICE, value: 153 }]); } const CODE_CALL: &str = r#" @@ -539,11 +501,11 @@ mod tests { (drop (call $seal_call (i32.const 4) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 20) ;; Pointer to input data buffer address + (i32.const 44) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case @@ -552,34 +514,187 @@ mod tests { ) (func (export "deploy")) - ;; Destination AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\09\00\00\00\00\00\00\00") + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 12) "\06\00\00\00\00\00\00\00") + (data (i32.const 36) "\06\00\00\00\00\00\00\00") - (data (i32.const 20) "\01\02\03\04") + (data (i32.const 44) "\01\02\03\04") ) "#; #[test] fn contract_call() { let mut mock_ext = MockExt::default(); - let _ = execute( - CODE_CALL, - vec![], - &mut mock_ext, - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + assert_ok!(execute(CODE_CALL, vec![], &mut mock_ext)); + + assert_eq!( + &mock_ext.calls, + &[CallEntry { to: ALICE, value: 6, data: vec![1, 2, 3, 4], allows_reentry: true }] + ); + } + + #[test] + #[cfg(feature = "unstable-interface")] + fn contract_call_forward_input() { + const CODE: &str = r#" +(module + (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (drop + (call $seal_call + (i32.const 1) ;; Set FORWARD_INPUT bit + (i32.const 4) ;; Pointer to "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 36) ;; Pointer to the buffer with value to transfer + (i32.const 44) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + ) + + ;; triggers a trap because we already forwarded the input + (call $seal_input (i32.const 1) (i32.const 44)) + ) + + (func (export "deploy")) + + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + ;; Amount of value to transfer. + ;; Represented by u64 (8 bytes long) in little endian. + (data (i32.const 36) "\2A\00\00\00\00\00\00\00") + + ;; The input is ignored because we forward our own input + (data (i32.const 44) "\01\02\03\04") +) +"#; + let mut mock_ext = MockExt::default(); + let input = vec![0xff, 0x2a, 0x99, 0x88]; + frame_support::assert_err!( + execute(CODE, input.clone(), &mut mock_ext), + >::InputForwarded, + ); + + assert_eq!( + &mock_ext.calls, + &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: false }] + ); + } + + #[test] + #[cfg(feature = "unstable-interface")] + fn contract_call_clone_input() { + const CODE: &str = r#" +(module + (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (drop + (call $seal_call + (i32.const 11) ;; Set FORWARD_INPUT | CLONE_INPUT | ALLOW_REENTRY bits + (i32.const 4) ;; Pointer to "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 36) ;; Pointer to the buffer with value to transfer + (i32.const 44) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + ) + + ;; works because the input was cloned + (call $seal_input (i32.const 0) (i32.const 44)) + + ;; return the input to caller for inspection + (call $seal_return (i32.const 0) (i32.const 0) (i32.load (i32.const 44))) + ) + + (func (export "deploy")) + + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + ;; Amount of value to transfer. + ;; Represented by u64 (8 bytes long) in little endian. + (data (i32.const 36) "\2A\00\00\00\00\00\00\00") + + ;; The input is ignored because we forward our own input + (data (i32.const 44) "\01\02\03\04") +) +"#; + let mut mock_ext = MockExt::default(); + let input = vec![0xff, 0x2a, 0x99, 0x88]; + let result = execute(CODE, input.clone(), &mut mock_ext).unwrap(); + assert_eq!(result.data.0, input); + assert_eq!( + &mock_ext.calls, + &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: true }] + ); + } + #[test] + #[cfg(feature = "unstable-interface")] + fn contract_call_tail_call() { + const CODE: &str = r#" +(module + (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (drop + (call $seal_call + (i32.const 5) ;; Set FORWARD_INPUT | TAIL_CALL bit + (i32.const 4) ;; Pointer to "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 36) ;; Pointer to the buffer with value to transfer + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 0) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + ) + + ;; a tail call never returns + (unreachable) + ) + + (func (export "deploy")) + + ;; Destination AccountId (ALICE) + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + + ;; Amount of value to transfer. + ;; Represented by u64 (8 bytes long) in little endian. + (data (i32.const 36) "\2A\00\00\00\00\00\00\00") +) +"#; + let mut mock_ext = MockExt::default(); + let input = vec![0xff, 0x2a, 0x99, 0x88]; + let result = execute(CODE, input.clone(), &mut mock_ext).unwrap(); + assert_eq!(result.data, call_return_data()); assert_eq!( - &mock_ext.transfers, - &[TransferEntry { - to: 9, - value: 6, - data: vec![1, 2, 3, 4], - }] + &mock_ext.calls, + &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: false }] ); } @@ -599,7 +714,9 @@ mod tests { ;; output_ptr: u32, ;; output_len_ptr: u32 ;; ) -> u32 - (import "seal0" "seal_instantiate" (func $seal_instantiate (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_instantiate" (func $seal_instantiate + (param i32 i32 i64 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32) + )) (import "env" "memory" (memory 1 1)) (func (export "call") (drop @@ -615,11 +732,15 @@ mod tests { (i32.const 0) ;; Length is ignored in this case (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case + (i32.const 0) ;; salt_ptr + (i32.const 4) ;; salt_len ) ) ) (func (export "deploy")) + ;; Salt + (data (i32.const 0) "\42\43\44\45") ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. (data (i32.const 4) "\03\00\00\00\00\00\00\00") @@ -636,21 +757,20 @@ mod tests { #[test] fn contract_instantiate() { let mut mock_ext = MockExt::default(); - let _ = execute( - CODE_INSTANTIATE, - vec![], - &mut mock_ext, - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + assert_ok!(execute(CODE_INSTANTIATE, vec![], &mut mock_ext)); - assert_eq!( - &mock_ext.instantiates, - &[InstantiateEntry { - code_hash: [0x11; 32].into(), + assert_matches!( + &mock_ext.instantiates[..], + [InstantiateEntry { + code_hash, endowment: 3, - data: vec![1, 2, 3, 4], - gas_left: 9392302058, - }] + data, + gas_left: _, + salt, + }] if + code_hash == &[0x11; 32].into() && + data == &vec![1, 2, 3, 4] && + salt == &vec![0x42, 0x43, 0x44, 0x45] ); } @@ -665,33 +785,25 @@ mod tests { (func (export "call") (call $seal_terminate (i32.const 4) ;; Pointer to "beneficiary" address. - (i32.const 8) ;; Length of "beneficiary" address. + (i32.const 32) ;; Length of "beneficiary" address. ) ) (func (export "deploy")) ;; Beneficiary AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\09\00\00\00\00\00\00\00") + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) ) "#; #[test] fn contract_terminate() { let mut mock_ext = MockExt::default(); - execute( - CODE_TERMINATE, - vec![], - &mut mock_ext, - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + execute(CODE_TERMINATE, vec![], &mut mock_ext).unwrap(); - assert_eq!( - &mock_ext.terminations, - &[TerminationEntry { - beneficiary: 0x09, - }] - ); + assert_eq!(&mock_ext.terminations, &[TerminationEntry { beneficiary: ALICE }]); } const CODE_TRANSFER_LIMITED_GAS: &str = r#" @@ -713,11 +825,11 @@ mod tests { (drop (call $seal_call (i32.const 4) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. + (i32.const 32) ;; Length of "callee" address. (i64.const 228) ;; How much gas to devote for the execution. - (i32.const 12) ;; Pointer to the buffer with value to transfer + (i32.const 36) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 20) ;; Pointer to input data buffer address + (i32.const 44) ;; Pointer to input data buffer address (i32.const 4) ;; Length of input data buffer (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this cas @@ -727,36 +839,74 @@ mod tests { (func (export "deploy")) ;; Destination AccountId to transfer the funds. - ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 4) "\09\00\00\00\00\00\00\00") + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) ;; Amount of value to transfer. ;; Represented by u64 (8 bytes long) in little endian. - (data (i32.const 12) "\06\00\00\00\00\00\00\00") + (data (i32.const 36) "\06\00\00\00\00\00\00\00") - (data (i32.const 20) "\01\02\03\04") + (data (i32.const 44) "\01\02\03\04") ) "#; #[test] fn contract_call_limited_gas() { let mut mock_ext = MockExt::default(); - let _ = execute( - &CODE_TRANSFER_LIMITED_GAS, - vec![], - &mut mock_ext, - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + assert_ok!(execute(&CODE_TRANSFER_LIMITED_GAS, vec![], &mut mock_ext)); assert_eq!( - &mock_ext.transfers, - &[TransferEntry { - to: 9, - value: 6, - data: vec![1, 2, 3, 4], - }] + &mock_ext.calls, + &[CallEntry { to: ALICE, value: 6, data: vec![1, 2, 3, 4], allows_reentry: true }] ); } + #[cfg(feature = "unstable-interface")] + const CODE_ECDSA_RECOVER: &str = r#" +(module + ;; seal_ecdsa_recover( + ;; signature_ptr: u32, + ;; message_hash_ptr: u32, + ;; output_ptr: u32 + ;; ) -> u32 + (import "__unstable__" "seal_ecdsa_recover" (func $seal_ecdsa_recover (param i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (drop + (call $seal_ecdsa_recover + (i32.const 36) ;; Pointer to signature. + (i32.const 4) ;; Pointer to message hash. + (i32.const 36) ;; Pointer for output - public key. + ) + ) + ) + (func (export "deploy")) + + ;; Hash of message. + (data (i32.const 4) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + ) + ;; Signature + (data (i32.const 36) + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01\01" + "\01" + ) +) +"#; + + #[test] + #[cfg(feature = "unstable-interface")] + fn contract_ecdsa_recover() { + let mut mock_ext = MockExt::default(); + assert_ok!(execute(&CODE_ECDSA_RECOVER, vec![], &mut mock_ext)); + assert_eq!(mock_ext.ecdsa_recover.into_inner(), [([1; 65], [1; 32])]); + } + const CODE_GET_STORAGE: &str = r#" (module (import "seal0" "seal_get_storage" (func $seal_get_storage (param i32 i32 i32) (result i32))) @@ -824,18 +974,14 @@ mod tests { #[test] fn get_storage_puts_data_into_buf() { let mut mock_ext = MockExt::default(); - mock_ext - .storage - .insert([0x11; 32], [0x22; 32].to_vec()); + mock_ext.storage.insert([0x11; 32], [0x22; 32].to_vec()); - let output = execute( - CODE_GET_STORAGE, - vec![], - mock_ext, - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + let output = execute(CODE_GET_STORAGE, vec![], mock_ext).unwrap(); - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: [0x22; 32].to_vec() }); + assert_eq!( + output, + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x22; 32].to_vec()) } + ); } /// calls `seal_caller` and compares the result with the constant 42. @@ -860,19 +1006,19 @@ mod tests { ;; fill the buffer with the caller. (call $seal_caller (i32.const 0) (i32.const 32)) - ;; assert len == 8 + ;; assert len == 32 (call $assert (i32.eq (i32.load (i32.const 32)) - (i32.const 8) + (i32.const 32) ) ) - ;; assert that contents of the buffer is equal to the i64 value of 42. + ;; assert that the first 64 byte are the beginning of "ALICE" (call $assert (i64.eq (i64.load (i32.const 0)) - (i64.const 42) + (i64.const 0x0101010101010101) ) ) ) @@ -883,12 +1029,7 @@ mod tests { #[test] fn caller() { - let _ = execute( - CODE_CALLER, - vec![], - MockExt::default(), - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + assert_ok!(execute(CODE_CALLER, vec![], MockExt::default())); } /// calls `seal_address` and compares the result with the constant 69. @@ -913,19 +1054,19 @@ mod tests { ;; fill the buffer with the self address. (call $seal_address (i32.const 0) (i32.const 32)) - ;; assert size == 8 + ;; assert size == 32 (call $assert (i32.eq (i32.load (i32.const 32)) - (i32.const 8) + (i32.const 32) ) ) - ;; assert that contents of the buffer is equal to the i64 value of 69. + ;; assert that the first 64 byte are the beginning of "BOB" (call $assert (i64.eq (i64.load (i32.const 0)) - (i64.const 69) + (i64.const 0x0202020202020202) ) ) ) @@ -936,12 +1077,7 @@ mod tests { #[test] fn address() { - let _ = execute( - CODE_ADDRESS, - vec![], - MockExt::default(), - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + assert_ok!(execute(CODE_ADDRESS, vec![], MockExt::default())); } const CODE_BALANCE: &str = r#" @@ -987,13 +1123,7 @@ mod tests { #[test] fn balance() { - let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( - CODE_BALANCE, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); + assert_ok!(execute(CODE_BALANCE, vec![], MockExt::default())); } const CODE_GAS_PRICE: &str = r#" @@ -1039,13 +1169,7 @@ mod tests { #[test] fn gas_price() { - let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( - CODE_GAS_PRICE, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); + assert_ok!(execute(CODE_GAS_PRICE, vec![], MockExt::default())); } const CODE_GAS_LEFT: &str = r#" @@ -1089,18 +1213,15 @@ mod tests { #[test] fn gas_left() { - let mut gas_meter = GasMeter::new(GAS_LIMIT); + let mut ext = MockExt::default(); + let gas_limit = ext.gas_meter.gas_left(); - let output = execute( - CODE_GAS_LEFT, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); + let output = execute(CODE_GAS_LEFT, vec![], &mut ext).unwrap(); - let gas_left = Gas::decode(&mut output.data.as_slice()).unwrap(); - assert!(gas_left < GAS_LIMIT, "gas_left must be less than initial"); - assert!(gas_left > gas_meter.gas_left(), "gas_left must be greater than final"); + let gas_left = Weight::decode(&mut &*output.data).unwrap(); + let actual_left = ext.gas_meter.gas_left(); + assert!(gas_left < gas_limit, "gas_left must be less than initial"); + assert!(gas_left > actual_left, "gas_left must be greater than final"); } const CODE_VALUE_TRANSFERRED: &str = r#" @@ -1146,13 +1267,7 @@ mod tests { #[test] fn value_transferred() { - let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( - CODE_VALUE_TRANSFERRED, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); + assert_ok!(execute(CODE_VALUE_TRANSFERRED, vec![], MockExt::default())); } const CODE_RETURN_FROM_START_FN: &str = r#" @@ -1181,14 +1296,12 @@ mod tests { #[test] fn return_from_start_fn() { - let output = execute( - CODE_RETURN_FROM_START_FN, - vec![], - MockExt::default(), - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + let output = execute(CODE_RETURN_FROM_START_FN, vec![], MockExt::default()).unwrap(); - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }); + assert_eq!( + output, + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) } + ); } const CODE_TIMESTAMP_NOW: &str = r#" @@ -1234,13 +1347,7 @@ mod tests { #[test] fn now() { - let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( - CODE_TIMESTAMP_NOW, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); + assert_ok!(execute(CODE_TIMESTAMP_NOW, vec![], MockExt::default())); } const CODE_MINIMUM_BALANCE: &str = r#" @@ -1285,18 +1392,12 @@ mod tests { #[test] fn minimum_balance() { - let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( - CODE_MINIMUM_BALANCE, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); + assert_ok!(execute(CODE_MINIMUM_BALANCE, vec![], MockExt::default())); } - const CODE_TOMBSTONE_DEPOSIT: &str = r#" + const CODE_CONTRACT_DEPOSIT: &str = r#" (module - (import "seal0" "seal_tombstone_deposit" (func $seal_tombstone_deposit (param i32 i32))) + (import "seal0" "seal_contract_deposit" (func $seal_contract_deposit (param i32 i32))) (import "env" "memory" (memory 1 1)) ;; size of our buffer is 32 bytes @@ -1312,7 +1413,7 @@ mod tests { ) (func (export "call") - (call $seal_tombstone_deposit (i32.const 0) (i32.const 32)) + (call $seal_contract_deposit (i32.const 0) (i32.const 32)) ;; assert len == 8 (call $assert @@ -1335,14 +1436,8 @@ mod tests { "#; #[test] - fn tombstone_deposit() { - let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( - CODE_TOMBSTONE_DEPOSIT, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); + fn contract_deposit() { + assert_ok!(execute(CODE_CONTRACT_DEPOSIT, vec![], MockExt::default())); } const CODE_RANDOM: &str = r#" @@ -1361,7 +1456,7 @@ mod tests { ;; size of our buffer is 128 bytes (data (i32.const 160) "\80") - + (func $assert (param i32) (block $ok (br_if $ok @@ -1401,21 +1496,91 @@ mod tests { #[test] fn random() { - let mut gas_meter = GasMeter::new(GAS_LIMIT); + let output = execute(CODE_RANDOM, vec![], MockExt::default()).unwrap(); - let output = execute( - CODE_RANDOM, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); + // The mock ext just returns the same data that was passed as the subject. + assert_eq!( + output, + ExecReturnValue { + flags: ReturnFlags::empty(), + data: Bytes( + hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F") + .to_vec() + ), + }, + ); + } + + const CODE_RANDOM_V1: &str = r#" +(module + (import "seal1" "seal_random" (func $seal_random (param i32 i32 i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; [0,128) is reserved for the result of PRNG. + + ;; the subject used for the PRNG. [128,160) + (data (i32.const 128) + "\00\01\02\03\04\05\06\07\08\09\0A\0B\0C\0D\0E\0F" + "\00\01\02\03\04\05\06\07\08\09\0A\0B\0C\0D\0E\0F" + ) + + ;; size of our buffer is 128 bytes + (data (i32.const 160) "\80") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + ;; This stores the block random seed in the buffer + (call $seal_random + (i32.const 128) ;; Pointer in memory to the start of the subject buffer + (i32.const 32) ;; The subject buffer's length + (i32.const 0) ;; Pointer to the output buffer + (i32.const 160) ;; Pointer to the output buffer length + ) + + ;; assert len == 32 + (call $assert + (i32.eq + (i32.load (i32.const 160)) + (i32.const 40) + ) + ) + + ;; return the random data + (call $seal_return + (i32.const 0) + (i32.const 0) + (i32.const 40) + ) + ) + (func (export "deploy")) +) +"#; + + #[test] + fn random_v1() { + let output = execute(CODE_RANDOM_V1, vec![], MockExt::default()).unwrap(); // The mock ext just returns the same data that was passed as the subject. assert_eq!( output, ExecReturnValue { flags: ReturnFlags::empty(), - data: hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F").to_vec(), + data: Bytes( + ( + hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F"), + 42u64, + ) + .encode() + ), }, ); } @@ -1446,20 +1611,17 @@ mod tests { #[test] fn deposit_event() { let mut mock_ext = MockExt::default(); - let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( - CODE_DEPOSIT_EVENT, - vec![], - &mut mock_ext, - &mut gas_meter - ).unwrap(); - - assert_eq!(mock_ext.events, vec![ - (vec![H256::repeat_byte(0x33)], - vec![0x00, 0x01, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x14, 0x00]) - ]); - - assert_eq!(gas_meter.gas_left(), 9834099446); + assert_ok!(execute(CODE_DEPOSIT_EVENT, vec![], &mut mock_ext)); + + assert_eq!( + mock_ext.events, + vec![( + vec![H256::repeat_byte(0x33)], + vec![0x00, 0x01, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x14, 0x00] + )] + ); + + assert!(mock_ext.gas_meter.gas_left() > 0); } const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" @@ -1489,20 +1651,13 @@ mod tests { ) "#; + /// Checks that the runtime traps if there are more than `max_topic_events` topics. #[test] fn deposit_event_max_topics() { - // Checks that the runtime traps if there are more than `max_topic_events` topics. - let mut gas_meter = GasMeter::new(GAS_LIMIT); - assert_eq!( - execute( - CODE_DEPOSIT_EVENT_MAX_TOPICS, - vec![], - MockExt::default(), - &mut gas_meter - ), + execute(CODE_DEPOSIT_EVENT_MAX_TOPICS, vec![], MockExt::default(),), Err(ExecError { - error: Error::::ContractTrapped.into(), + error: Error::::TooManyTopics.into(), origin: ErrorOrigin::Caller, }) ); @@ -1534,20 +1689,13 @@ mod tests { ) "#; + /// Checks that the runtime traps if there are duplicates. #[test] fn deposit_event_duplicates() { - // Checks that the runtime traps if there are duplicates. - let mut gas_meter = GasMeter::new(GAS_LIMIT); - assert_eq!( - execute( - CODE_DEPOSIT_EVENT_DUPLICATES, - vec![], - MockExt::default(), - &mut gas_meter - ), + execute(CODE_DEPOSIT_EVENT_DUPLICATES, vec![], MockExt::default(),), Err(ExecError { - error: Error::::ContractTrapped.into(), + error: Error::::DuplicateTopics.into(), origin: ErrorOrigin::Caller, }) ); @@ -1598,12 +1746,7 @@ mod tests { #[test] fn block_number() { - let _ = execute( - CODE_BLOCK_NUMBER, - vec![], - MockExt::default(), - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + let _ = execute(CODE_BLOCK_NUMBER, vec![], MockExt::default()).unwrap(); } const CODE_RETURN_WITH_DATA: &str = r#" @@ -1644,23 +1787,32 @@ mod tests { CODE_RETURN_WITH_DATA, hex!("00000000445566778899").to_vec(), MockExt::default(), - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + ) + .unwrap(); - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: hex!("445566778899").to_vec() }); + assert_eq!( + output, + ExecReturnValue { + flags: ReturnFlags::empty(), + data: Bytes(hex!("445566778899").to_vec()), + } + ); assert!(output.is_success()); } #[test] fn return_with_revert_status() { - let output = execute( - CODE_RETURN_WITH_DATA, - hex!("010000005566778899").to_vec(), - MockExt::default(), - &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + let output = + execute(CODE_RETURN_WITH_DATA, hex!("010000005566778899").to_vec(), MockExt::default()) + .unwrap(); - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::REVERT, data: hex!("5566778899").to_vec() }); + assert_eq!( + output, + ExecReturnValue { + flags: ReturnFlags::REVERT, + data: Bytes(hex!("5566778899").to_vec()), + } + ); assert!(!output.is_success()); } @@ -1683,12 +1835,7 @@ mod tests { #[test] fn contract_out_of_bounds_access() { let mut mock_ext = MockExt::default(); - let result = execute( - CODE_OUT_OF_BOUNDS_ACCESS, - vec![], - &mut mock_ext, - &mut GasMeter::new(GAS_LIMIT), - ); + let result = execute(CODE_OUT_OF_BOUNDS_ACCESS, vec![], &mut mock_ext); assert_eq!( result, @@ -1716,15 +1863,124 @@ mod tests { "#; #[test] - fn contract_decode_failure() { + fn contract_decode_length_ignored() { let mut mock_ext = MockExt::default(); - let result = execute( - CODE_DECODE_FAILURE, - vec![], - &mut mock_ext, - &mut GasMeter::new(GAS_LIMIT), + let result = execute(CODE_DECODE_FAILURE, vec![], &mut mock_ext); + // AccountID implements `MaxEncodeLen` and therefore the supplied length is + // no longer needed nor used to determine how much is read from contract memory. + assert_ok!(result); + } + + #[test] + fn debug_message_works() { + const CODE_DEBUG_MESSAGE: &str = r#" +(module + (import "seal0" "seal_debug_message" (func $seal_debug_message (param i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (data (i32.const 0) "Hello World!") + + (func (export "call") + (call $seal_debug_message + (i32.const 0) ;; Pointer to the text buffer + (i32.const 12) ;; The size of the buffer + ) + drop + ) + + (func (export "deploy")) +) +"#; + let mut ext = MockExt::default(); + execute(CODE_DEBUG_MESSAGE, vec![], &mut ext).unwrap(); + + assert_eq!(std::str::from_utf8(&ext.debug_buffer).unwrap(), "Hello World!"); + } + + #[test] + fn debug_message_invalid_utf8_fails() { + const CODE_DEBUG_MESSAGE_FAIL: &str = r#" +(module + (import "seal0" "seal_debug_message" (func $seal_debug_message (param i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (data (i32.const 0) "\fc") + + (func (export "call") + (call $seal_debug_message + (i32.const 0) ;; Pointer to the text buffer + (i32.const 1) ;; The size of the buffer + ) + drop + ) + + (func (export "deploy")) +) +"#; + let mut ext = MockExt::default(); + let result = execute(CODE_DEBUG_MESSAGE_FAIL, vec![], &mut ext); + assert_eq!( + result, + Err(ExecError { + error: Error::::DebugMessageInvalidUTF8.into(), + origin: ErrorOrigin::Caller, + }) ); + } + + #[cfg(feature = "unstable-interface")] + const CODE_CALL_RUNTIME: &str = r#" +(module + (import "__unstable__" "seal_call_runtime" (func $seal_call_runtime (param i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; size of input buffer + (data (i32.const 0) "\00\10") + (func (export "call") + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + ;; Just use the call passed as input and store result to memory + (i32.store (i32.const 0) + (call $seal_call_runtime + (i32.const 4) ;; Pointer where the call is stored + (i32.load (i32.const 0)) ;; Size of the call + ) + ) + (call $seal_return + (i32.const 0) ;; flags + (i32.const 0) ;; returned value + (i32.const 4) ;; length of returned value + ) + ) + + (func (export "deploy")) +) +"#; + + #[test] + #[cfg(feature = "unstable-interface")] + fn call_runtime_works() { + use std::convert::TryInto; + let call = Call::System(frame_system::Call::remark { remark: b"Hello World".to_vec() }); + let mut ext = MockExt::default(); + let result = execute(CODE_CALL_RUNTIME, call.encode(), &mut ext).unwrap(); + assert_eq!(*ext.runtime_calls.borrow(), vec![call]); + // 0 = ReturnCode::Success + assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + } + + #[test] + #[cfg(feature = "unstable-interface")] + fn call_runtime_panics_on_invalid_call() { + let mut ext = MockExt::default(); + let result = execute(CODE_CALL_RUNTIME, vec![0x42], &mut ext); assert_eq!( result, Err(ExecError { @@ -1732,6 +1988,6 @@ mod tests { origin: ErrorOrigin::Caller, }) ); + assert_eq!(*ext.runtime_calls.borrow(), vec![]); } - } diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 171fca6339fd3..c766914f3d46e 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -1,56 +1,49 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! This module takes care of loading, checking and preprocessing of a //! wasm module before execution. It also extracts some essential information //! from a module. -use crate::wasm::env_def::ImportSatisfyCheck; -use crate::wasm::PrefabWasmModule; -use crate::{Schedule, Trait}; - -use parity_wasm::elements::{self, Internal, External, MemoryType, Type, ValueType}; -use pwasm_utils; -use pwasm_utils::rules; +use crate::{ + chain_extension::ChainExtension, + wasm::{env_def::ImportSatisfyCheck, PrefabWasmModule}, + Config, Schedule, +}; +use pwasm_utils::parity_wasm::elements::{self, External, Internal, MemoryType, Type, ValueType}; +use sp_runtime::traits::Hash; use sp_std::prelude::*; -use sp_runtime::traits::{SaturatedConversion}; -/// Currently, all imported functions must be located inside this module. We might support -/// additional modules for versioning later. -pub const IMPORT_MODULE_FN: &str = "seal0"; - -/// Imported memory must be located inside this module. The reason for that is that current +/// Imported memory must be located inside this module. The reason for hardcoding is that current /// compiler toolchains might not support specifying other modules than "env" for memory imports. pub const IMPORT_MODULE_MEMORY: &str = "env"; -struct ContractModule<'a, T: Trait> { +struct ContractModule<'a, T: Config> { /// A deserialized module. The module is valid (this is Guaranteed by `new` method). module: elements::Module, schedule: &'a Schedule, } -impl<'a, T: Trait> ContractModule<'a, T> { +impl<'a, T: Config> ContractModule<'a, T> { /// Creates a new instance of `ContractModule`. /// /// Returns `Err` if the `original_code` couldn't be decoded or /// if it contains an invalid module. - fn new( - original_code: &[u8], - schedule: &'a Schedule, - ) -> Result { + fn new(original_code: &[u8], schedule: &'a Schedule) -> Result { use wasmi_validation::{validate_module, PlainValidator}; let module = @@ -61,10 +54,7 @@ impl<'a, T: Trait> ContractModule<'a, T> { // Return a `ContractModule` instance with // __valid__ module. - Ok(ContractModule { - module, - schedule, - }) + Ok(ContractModule { module, schedule }) } /// Ensures that module doesn't declare internal memories. @@ -73,11 +63,8 @@ impl<'a, T: Trait> ContractModule<'a, T> { /// Memory section contains declarations of internal linear memories, so if we find one /// we reject such a module. fn ensure_no_internal_memory(&self) -> Result<(), &'static str> { - if self.module - .memory_section() - .map_or(false, |ms| ms.entries().len() > 0) - { - return Err("module declares internal memory"); + if self.module.memory_section().map_or(false, |ms| ms.entries().len() > 0) { + return Err("module declares internal memory") } Ok(()) } @@ -88,7 +75,7 @@ impl<'a, T: Trait> ContractModule<'a, T> { // In Wasm MVP spec, there may be at most one table declared. Double check this // explicitly just in case the Wasm version changes. if table_section.entries().len() > 1 { - return Err("multiple tables declared"); + return Err("multiple tables declared") } if let Some(table_type) = table_section.entries().first() { // Check the table's initial size as there is no instruction or environment function @@ -101,6 +88,33 @@ impl<'a, T: Trait> ContractModule<'a, T> { Ok(()) } + /// Ensure that any `br_table` instruction adheres to its immediate value limit. + fn ensure_br_table_size_limit(&self, limit: u32) -> Result<(), &'static str> { + let code_section = if let Some(type_section) = self.module.code_section() { + type_section + } else { + return Ok(()) + }; + for instr in code_section.bodies().iter().flat_map(|body| body.code().elements()) { + use self::elements::Instruction::BrTable; + if let BrTable(table) = instr { + if table.table.len() > limit as usize { + return Err("BrTable's immediate value is too big.") + } + } + } + Ok(()) + } + + fn ensure_global_variable_limit(&self, limit: u32) -> Result<(), &'static str> { + if let Some(global_section) = self.module.global_section() { + if global_section.entries().len() > limit as usize { + return Err("module declares too many globals") + } + } + Ok(()) + } + /// Ensures that no floating point types are in use. fn ensure_no_floating_types(&self) -> Result<(), &'static str> { if let Some(global_section) = self.module.global_section() { @@ -108,7 +122,7 @@ impl<'a, T: Trait> ContractModule<'a, T> { match global.global_type().content_type() { ValueType::F32 | ValueType::F64 => return Err("use of floating point type in globals is forbidden"), - _ => {} + _ => {}, } } } @@ -119,7 +133,7 @@ impl<'a, T: Trait> ContractModule<'a, T> { match local.value_type() { ValueType::F32 | ValueType::F64 => return Err("use of floating point type in locals is forbidden"), - _ => {} + _ => {}, } } } @@ -129,15 +143,17 @@ impl<'a, T: Trait> ContractModule<'a, T> { for wasm_type in type_section.types() { match wasm_type { Type::Function(func_type) => { - let return_type = func_type.return_type(); - for value_type in func_type.params().iter().chain(return_type.iter()) { + let return_type = func_type.results().get(0); + for value_type in func_type.params().iter().chain(return_type) { match value_type { ValueType::F32 | ValueType::F64 => - return Err("use of floating point type in function types is forbidden"), - _ => {} + return Err( + "use of floating point type in function types is forbidden", + ), + _ => {}, } } - } + }, } } } @@ -145,34 +161,37 @@ impl<'a, T: Trait> ContractModule<'a, T> { Ok(()) } - fn inject_gas_metering(self) -> Result { - let gas_rules = - rules::Set::new( - self.schedule.instruction_weights.regular.clone().saturated_into(), - Default::default(), - ) - .with_grow_cost(self.schedule.instruction_weights.grow_mem.clone().saturated_into()) - .with_forbidden_floats(); + /// Ensure that no function exists that has more parameters than allowed. + fn ensure_parameter_limit(&self, limit: u32) -> Result<(), &'static str> { + let type_section = if let Some(type_section) = self.module.type_section() { + type_section + } else { + return Ok(()) + }; - let contract_module = pwasm_utils::inject_gas_counter( - self.module, - &gas_rules, - IMPORT_MODULE_FN - ).map_err(|_| "gas instrumentation failed")?; - Ok(ContractModule { - module: contract_module, - schedule: self.schedule, - }) + for Type::Function(func) in type_section.types() { + if func.params().len() > limit as usize { + return Err("Use of a function type with too many parameters.") + } + } + + Ok(()) + } + + fn inject_gas_metering(self) -> Result { + let gas_rules = self.schedule.rules(&self.module); + let contract_module = pwasm_utils::inject_gas_counter(self.module, &gas_rules, "seal0") + .map_err(|_| "gas instrumentation failed")?; + Ok(ContractModule { module: contract_module, schedule: self.schedule }) } fn inject_stack_height_metering(self) -> Result { - let contract_module = - pwasm_utils::stack_height::inject_limiter(self.module, self.schedule.max_stack_height) - .map_err(|_| "stack height instrumentation failed")?; - Ok(ContractModule { - module: contract_module, - schedule: self.schedule, - }) + let contract_module = pwasm_utils::stack_height::inject_limiter( + self.module, + self.schedule.limits.stack_height, + ) + .map_err(|_| "stack height instrumentation failed")?; + Ok(ContractModule { module: contract_module, schedule: self.schedule }) } /// Check that the module has required exported functions. For now @@ -189,14 +208,8 @@ impl<'a, T: Trait> ContractModule<'a, T> { let module = &self.module; let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); - let export_entries = module - .export_section() - .map(|is| is.entries()) - .unwrap_or(&[]); - let func_entries = module - .function_section() - .map(|fs| fs.entries()) - .unwrap_or(&[]); + let export_entries = module.export_section().map(|is| is.entries()).unwrap_or(&[]); + let func_entries = module.function_section().map(|fs| fs.entries()).unwrap_or(&[]); // Function index space consists of imported function following by // declared functions. Calculate the total number of imported functions so @@ -206,11 +219,9 @@ impl<'a, T: Trait> ContractModule<'a, T> { .map(|is| is.entries()) .unwrap_or(&[]) .iter() - .filter(|entry| { - match *entry.external() { - External::Function(_) => true, - _ => false, - } + .filter(|entry| match *entry.external() { + External::Function(_) => true, + _ => false, }) .count(); @@ -233,30 +244,32 @@ impl<'a, T: Trait> ContractModule<'a, T> { Some(fn_idx) => fn_idx, None => { // Underflow here means fn_idx points to imported function which we don't allow! - return Err("entry point points to an imported function"); - } + return Err("entry point points to an imported function") + }, }; // Then check the signature. // Both "call" and "deploy" has a () -> () function type. - let func_ty_idx = func_entries.get(fn_idx as usize) + // We still support () -> (i32) for backwards compatibility. + let func_ty_idx = func_entries + .get(fn_idx as usize) .ok_or_else(|| "export refers to non-existent function")? .type_ref(); let Type::Function(ref func_ty) = types .get(func_ty_idx as usize) .ok_or_else(|| "function has a non-existent type")?; - if !func_ty.params().is_empty() || - !(func_ty.return_type().is_none() || - func_ty.return_type() == Some(ValueType::I32)) { - return Err("entry point has wrong signature"); + if !(func_ty.params().is_empty() && + (func_ty.results().is_empty() || func_ty.results() == [ValueType::I32])) + { + return Err("entry point has wrong signature") } } if !deploy_found { - return Err("deploy function isn't exported"); + return Err("deploy function isn't exported") } if !call_found { - return Err("call function isn't exported"); + return Err("call function isn't exported") } Ok(()) @@ -266,20 +279,17 @@ impl<'a, T: Trait> ContractModule<'a, T> { /// /// This accomplishes two tasks: /// - /// - checks any imported function against defined host functions set, incl. - /// their signatures. + /// - checks any imported function against defined host functions set, incl. their signatures. /// - if there is a memory import, returns it's descriptor /// `import_fn_banlist`: list of function names that are disallowed to be imported - fn scan_imports(&self, import_fn_banlist: &[&[u8]]) - -> Result, &'static str> - { + fn scan_imports( + &self, + import_fn_banlist: &[&[u8]], + ) -> Result, &'static str> { let module = &self.module; let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); - let import_entries = module - .import_section() - .map(|is| is.entries()) - .unwrap_or(&[]); + let import_entries = module.import_section().map(|is| is.entries()).unwrap_or(&[]); let mut imported_mem_type = None; @@ -287,15 +297,10 @@ impl<'a, T: Trait> ContractModule<'a, T> { let type_idx = match import.external() { &External::Table(_) => return Err("Cannot import tables"), &External::Global(_) => return Err("Cannot import globals"), - &External::Function(ref type_idx) => { - if import.module() != IMPORT_MODULE_FN { - return Err("Invalid module for imported function"); - } - type_idx - }, + &External::Function(ref type_idx) => type_idx, &External::Memory(ref memory_type) => { if import.module() != IMPORT_MODULE_MEMORY { - return Err("Invalid module for imported memory"); + return Err("Invalid module for imported memory") } if import.field() != "memory" { return Err("Memory import must have the field name 'memory'") @@ -304,57 +309,55 @@ impl<'a, T: Trait> ContractModule<'a, T> { return Err("Multiple memory imports defined") } imported_mem_type = Some(memory_type); - continue; - } + continue + }, }; let Type::Function(ref func_ty) = types .get(*type_idx as usize) .ok_or_else(|| "validation: import entry points to a non-existent type")?; - // We disallow importing `seal_println` unless debug features are enabled, - // which should only be allowed on a dev chain - if !self.schedule.enable_println && import.field().as_bytes() == b"seal_println" { - return Err("module imports `seal_println` but debug features disabled"); + if !T::ChainExtension::enabled() && + import.field().as_bytes() == b"seal_call_chain_extension" + { + return Err("module uses chain extensions but chain extensions are disabled") } - if import_fn_banlist.iter().any(|f| import.field().as_bytes() == *f) - || !C::can_satisfy(import.field().as_bytes(), func_ty) + if import_fn_banlist.iter().any(|f| import.field().as_bytes() == *f) || + !C::can_satisfy(import.module().as_bytes(), import.field().as_bytes(), func_ty) { - return Err("module imports a non-existent function"); + return Err("module imports a non-existent function") } } Ok(imported_mem_type) } fn into_wasm_code(self) -> Result, &'static str> { - elements::serialize(self.module) - .map_err(|_| "error serializing instrumented module") + elements::serialize(self.module).map_err(|_| "error serializing instrumented module") } } -fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule) - -> Result<(u32, u32), &'static str> -{ +fn get_memory_limits( + module: Option<&MemoryType>, + schedule: &Schedule, +) -> Result<(u32, u32), &'static str> { if let Some(memory_type) = module { // Inspect the module to extract the initial and maximum page count. let limits = memory_type.limits(); match (limits.initial(), limits.maximum()) { - (initial, Some(maximum)) if initial > maximum => { + (initial, Some(maximum)) if initial > maximum => return Err( "Requested initial number of pages should not exceed the requested maximum", - ); - } - (_, Some(maximum)) if maximum > schedule.max_memory_pages => { - return Err("Maximum number of pages should not exceed the configured maximum."); - } + ), + (_, Some(maximum)) if maximum > schedule.limits.memory_pages => + return Err("Maximum number of pages should not exceed the configured maximum."), (initial, Some(maximum)) => Ok((initial, maximum)), (_, None) => { // Maximum number of pages should be always declared. // This isn't a hard requirement and can be treated as a maximum set // to configured maximum. - return Err("Maximum number of pages should be always declared."); - } + return Err("Maximum number of pages should be always declared.") + }, } } else { // If none memory imported then just crate an empty placeholder. @@ -363,47 +366,81 @@ fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule< } } -/// Loads the given module given in `original_code`, performs some checks on it and -/// does some preprocessing. -/// -/// The checks are: -/// -/// - provided code is a valid wasm module. -/// - the module doesn't define an internal memory instance, -/// - imported memory (if any) doesn't reserve more memory than permitted by the `schedule`, -/// - all imported functions from the external environment matches defined by `env` module, -/// -/// The preprocessing includes injecting code for gas metering and metering the height of stack. -pub fn prepare_contract( +fn check_and_instrument( original_code: &[u8], schedule: &Schedule, -) -> Result { - let mut contract_module = ContractModule::new(original_code, schedule)?; +) -> Result<(Vec, (u32, u32)), &'static str> { + let contract_module = ContractModule::new(&original_code, schedule)?; contract_module.scan_exports()?; contract_module.ensure_no_internal_memory()?; - contract_module.ensure_table_size_limit(schedule.max_table_size)?; + contract_module.ensure_table_size_limit(schedule.limits.table_size)?; + contract_module.ensure_global_variable_limit(schedule.limits.globals)?; contract_module.ensure_no_floating_types()?; + contract_module.ensure_parameter_limit(schedule.limits.parameters)?; + contract_module.ensure_br_table_size_limit(schedule.limits.br_table_size)?; // We disallow importing `gas` function here since it is treated as implementation detail. let disallowed_imports = [b"gas".as_ref()]; - let memory_limits = get_memory_limits( - contract_module.scan_imports::(&disallowed_imports)?, - schedule - )?; + let memory_limits = + get_memory_limits(contract_module.scan_imports::(&disallowed_imports)?, schedule)?; - contract_module = contract_module + let code = contract_module .inject_gas_metering()? - .inject_stack_height_metering()?; + .inject_stack_height_metering()? + .into_wasm_code()?; + + Ok((code, memory_limits)) +} +fn do_preparation( + original_code: Vec, + schedule: &Schedule, +) -> Result, &'static str> { + let (code, (initial, maximum)) = + check_and_instrument::(original_code.as_ref(), schedule)?; Ok(PrefabWasmModule { - schedule_version: schedule.version, - initial: memory_limits.0, - maximum: memory_limits.1, + instruction_weights_version: schedule.instruction_weights.version, + initial, + maximum, _reserved: None, - code: contract_module.into_wasm_code()?, + code, + original_code_len: original_code.len() as u32, + refcount: 1, + code_hash: T::Hashing::hash(&original_code), + original_code: Some(original_code), }) } +/// Loads the given module given in `original_code`, performs some checks on it and +/// does some preprocessing. +/// +/// The checks are: +/// +/// - provided code is a valid wasm module. +/// - the module doesn't define an internal memory instance, +/// - imported memory (if any) doesn't reserve more memory than permitted by the `schedule`, +/// - all imported functions from the external environment matches defined by `env` module, +/// +/// The preprocessing includes injecting code for gas metering and metering the height of stack. +pub fn prepare_contract( + original_code: Vec, + schedule: &Schedule, +) -> Result, &'static str> { + do_preparation::(original_code, schedule) +} + +/// The same as [`prepare_contract`] but without constructing a new [`PrefabWasmModule`] +/// +/// # Note +/// +/// Use this when an existing contract should be re-instrumented with a newer schedule version. +pub fn reinstrument_contract( + original_code: Vec, + schedule: &Schedule, +) -> Result, &'static str> { + Ok(check_and_instrument::(&original_code, schedule)?.0) +} + /// Alternate (possibly unsafe) preparation functions used only for benchmarking. /// /// For benchmarking we need to construct special contracts that might not pass our @@ -412,29 +449,31 @@ pub fn prepare_contract( /// in production code. #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking { - use super::{ - Trait, ContractModule, PrefabWasmModule, ImportSatisfyCheck, Schedule, get_memory_limits - }; - use parity_wasm::elements::FunctionType; + use super::{elements::FunctionType, *}; impl ImportSatisfyCheck for () { - fn can_satisfy(_name: &[u8], _func_type: &FunctionType) -> bool { + fn can_satisfy(_module: &[u8], _name: &[u8], _func_type: &FunctionType) -> bool { true } } /// Prepare function that neither checks nor instruments the passed in code. - pub fn prepare_contract(original_code: &[u8], schedule: &Schedule) - -> Result - { - let contract_module = ContractModule::new(original_code, schedule)?; + pub fn prepare_contract( + original_code: Vec, + schedule: &Schedule, + ) -> Result, &'static str> { + let contract_module = ContractModule::new(&original_code, schedule)?; let memory_limits = get_memory_limits(contract_module.scan_imports::<()>(&[])?, schedule)?; Ok(PrefabWasmModule { - schedule_version: schedule.version, + instruction_weights_version: schedule.instruction_weights.version, initial: memory_limits.0, maximum: memory_limits.1, _reserved: None, code: contract_module.into_wasm_code()?, + original_code_len: original_code.len() as u32, + refcount: 1, + code_hash: T::Hashing::hash(&original_code), + original_code: Some(original_code), }) } } @@ -442,42 +481,59 @@ pub mod benchmarking { #[cfg(test)] mod tests { use super::*; - use crate::exec::Ext; + use crate::{exec::Ext, schedule::Limits}; use std::fmt; - use assert_matches::assert_matches; - impl fmt::Debug for PrefabWasmModule { + impl fmt::Debug for PrefabWasmModule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "PreparedContract {{ .. }}") } } - // Define test environment for tests. We need ImportSatisfyCheck - // implementation from it. So actual implementations doesn't matter. - define_env!(TestEnv, , - panic(_ctx) => { unreachable!(); }, + /// Using unreachable statements triggers unreachable warnings in the generated code + #[allow(unreachable_code)] + mod env { + use super::*; - // gas is an implementation defined function and a contract can't import it. - gas(_ctx, _amount: u32) => { unreachable!(); }, + // Define test environment for tests. We need ImportSatisfyCheck + // implementation from it. So actual implementations doesn't matter. + define_env!(Test, , + [seal0] panic(_ctx) => { unreachable!(); }, - nop(_ctx, _unused: u64) => { unreachable!(); }, + // gas is an implementation defined function and a contract can't import it. + [seal0] gas(_ctx, _amount: u32) => { unreachable!(); }, - seal_println(_ctx, _ptr: u32, _len: u32) => { unreachable!(); }, - ); + [seal0] nop(_ctx, _unused: u64) => { unreachable!(); }, + + // new version of nop with other data type for argumebt + [seal1] nop(_ctx, _unused: i32) => { unreachable!(); }, + ); + } macro_rules! prepare_test { ($name:ident, $wat:expr, $($expected:tt)*) => { #[test] fn $name() { let wasm = wat::parse_str($wat).unwrap(); - let schedule = Schedule::default(); - let r = prepare_contract::(wasm.as_ref(), &schedule); - assert_matches!(r, $($expected)*); + let schedule = Schedule { + limits: Limits { + globals: 3, + parameters: 3, + memory_pages: 16, + table_size: 3, + br_table_size: 3, + .. Default::default() + }, + .. Default::default() + }; + let r = do_preparation::(wasm, &schedule); + assert_matches::assert_matches!(r, $($expected)*); } }; } - prepare_test!(no_floats, + prepare_test!( + no_floats, r#" (module (func (export "call") @@ -493,16 +549,73 @@ mod tests { Err("gas instrumentation failed") ); - mod memories { + mod functions { use super::*; - // Tests below assumes that maximum page number is configured to a certain number. - #[test] - fn assume_memory_size() { - assert_eq!(>::default().max_memory_pages, 16); - } + prepare_test!( + param_number_valid, + r#" + (module + (func (export "call")) + (func (export "deploy")) + (func (param i32 i32 i32)) + ) + "#, + Ok(_) + ); + + prepare_test!( + param_number_invalid, + r#" + (module + (func (export "call")) + (func (export "deploy")) + (func (param i32 i32 i32 i32)) + (func (param i32)) + ) + "#, + Err("Use of a function type with too many parameters.") + ); + } + + mod globals { + use super::*; + + prepare_test!( + global_number_valid, + r#" + (module + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (func (export "call")) + (func (export "deploy")) + ) + "#, + Ok(_) + ); - prepare_test!(memory_with_one_page, + prepare_test!( + global_number_too_high, + r#" + (module + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (global i64 (i64.const 0)) + (func (export "call")) + (func (export "deploy")) + ) + "#, + Err("module declares too many globals") + ); + } + + mod memories { + use super::*; + + prepare_test!( + memory_with_one_page, r#" (module (import "env" "memory" (memory 1 1)) @@ -514,7 +627,8 @@ mod tests { Ok(_) ); - prepare_test!(internal_memory_declaration, + prepare_test!( + internal_memory_declaration, r#" (module (memory 1 1) @@ -526,7 +640,8 @@ mod tests { Err("module declares internal memory") ); - prepare_test!(no_memory_import, + prepare_test!( + no_memory_import, r#" (module ;; no memory imported @@ -537,7 +652,8 @@ mod tests { Ok(_) ); - prepare_test!(initial_exceeds_maximum, + prepare_test!( + initial_exceeds_maximum, r#" (module (import "env" "memory" (memory 16 1)) @@ -549,7 +665,8 @@ mod tests { Err("Module is not valid") ); - prepare_test!(no_maximum, + prepare_test!( + no_maximum, r#" (module (import "env" "memory" (memory 1)) @@ -561,7 +678,21 @@ mod tests { Err("Maximum number of pages should be always declared.") ); - prepare_test!(requested_maximum_exceeds_configured_maximum, + prepare_test!( + requested_maximum_valid, + r#" + (module + (import "env" "memory" (memory 1 16)) + + (func (export "call")) + (func (export "deploy")) + ) + "#, + Ok(_) + ); + + prepare_test!( + requested_maximum_exceeds_configured_maximum, r#" (module (import "env" "memory" (memory 1 17)) @@ -573,7 +704,8 @@ mod tests { Err("Maximum number of pages should not exceed the configured maximum.") ); - prepare_test!(field_name_not_memory, + prepare_test!( + field_name_not_memory, r#" (module (import "env" "forgetit" (memory 1 1)) @@ -585,7 +717,8 @@ mod tests { Err("Memory import must have the field name 'memory'") ); - prepare_test!(multiple_memory_imports, + prepare_test!( + multiple_memory_imports, r#" (module (import "env" "memory" (memory 1 1)) @@ -598,7 +731,8 @@ mod tests { Err("Module is not valid") ); - prepare_test!(table_import, + prepare_test!( + table_import, r#" (module (import "seal0" "table" (table 1 anyfunc)) @@ -610,7 +744,8 @@ mod tests { Err("Cannot import tables") ); - prepare_test!(global_import, + prepare_test!( + global_import, r#" (module (global $g (import "seal0" "global") i32) @@ -625,13 +760,8 @@ mod tests { mod tables { use super::*; - // Tests below assumes that maximum table size is configured to a certain number. - #[test] - fn assume_table_size() { - assert_eq!(>::default().max_table_size, 16384); - } - - prepare_test!(no_tables, + prepare_test!( + no_tables, r#" (module (func (export "call")) @@ -641,10 +771,11 @@ mod tests { Ok(_) ); - prepare_test!(table_valid_size, + prepare_test!( + table_valid_size, r#" (module - (table 10000 funcref) + (table 3 funcref) (func (export "call")) (func (export "deploy")) @@ -653,22 +784,53 @@ mod tests { Ok(_) ); - prepare_test!(table_too_big, + prepare_test!( + table_too_big, r#" (module - (table 20000 funcref) + (table 4 funcref) (func (export "call")) (func (export "deploy")) )"#, Err("table exceeds maximum size allowed") ); + + prepare_test!( + br_table_valid_size, + r#" + (module + (func (export "call")) + (func (export "deploy")) + (func + i32.const 0 + br_table 0 0 0 0 + ) + ) + "#, + Ok(_) + ); + + prepare_test!( + br_table_too_big, + r#" + (module + (func (export "call")) + (func (export "deploy")) + (func + i32.const 0 + br_table 0 0 0 0 0 + ) + )"#, + Err("BrTable's immediate value is too big.") + ); } mod imports { use super::*; - prepare_test!(can_import_legit_function, + prepare_test!( + can_import_legit_function, r#" (module (import "seal0" "nop" (func (param i64))) @@ -682,7 +844,8 @@ mod tests { // even though gas is defined the contract can't import it since // it is an implementation defined. - prepare_test!(can_not_import_gas_function, + prepare_test!( + can_not_import_gas_function, r#" (module (import "seal0" "gas" (func (param i32))) @@ -695,7 +858,8 @@ mod tests { ); // memory is in "env" and not in "seal0" - prepare_test!(memory_not_in_seal0, + prepare_test!( + memory_not_in_seal0, r#" (module (import "seal0" "memory" (memory 1 1)) @@ -708,7 +872,8 @@ mod tests { ); // memory is in "env" and not in some arbitrary module - prepare_test!(memory_not_in_arbitrary_module, + prepare_test!( + memory_not_in_arbitrary_module, r#" (module (import "any_module" "memory" (memory 1 1)) @@ -720,34 +885,22 @@ mod tests { Err("Invalid module for imported memory") ); - // functions are in "env" and not in "seal0" - prepare_test!(function_not_in_env, + prepare_test!( + function_in_other_module_works, r#" (module - (import "env" "nop" (func (param i64))) + (import "seal1" "nop" (func (param i32))) (func (export "call")) (func (export "deploy")) ) "#, - Err("Invalid module for imported function") - ); - - // functions are in "seal0" and not in in some arbitrary module - prepare_test!(function_not_arbitrary_module, - r#" - (module - (import "any_module" "nop" (func (param i64))) - - (func (export "call")) - (func (export "deploy")) - ) - "#, - Err("Invalid module for imported function") + Ok(_) ); // wrong signature - prepare_test!(wrong_signature, + prepare_test!( + wrong_signature, r#" (module (import "seal0" "gas" (func (param i64))) @@ -759,7 +912,8 @@ mod tests { Err("module imports a non-existent function") ); - prepare_test!(unknown_func_name, + prepare_test!( + unknown_func_name, r#" (module (import "seal0" "unknown_func" (func)) @@ -770,42 +924,13 @@ mod tests { "#, Err("module imports a non-existent function") ); - - prepare_test!(seal_println_debug_disabled, - r#" - (module - (import "seal0" "seal_println" (func $seal_println (param i32 i32))) - - (func (export "call")) - (func (export "deploy")) - ) - "#, - Err("module imports `seal_println` but debug features disabled") - ); - - #[test] - fn seal_println_debug_enabled() { - let wasm = wat::parse_str( - r#" - (module - (import "seal0" "seal_println" (func $seal_println (param i32 i32))) - - (func (export "call")) - (func (export "deploy")) - ) - "# - ).unwrap(); - let mut schedule = Schedule::default(); - schedule.enable_println = true; - let r = prepare_contract::(wasm.as_ref(), &schedule); - assert_matches!(r, Ok(_)); - } } mod entrypoints { use super::*; - prepare_test!(it_works, + prepare_test!( + it_works, r#" (module (func (export "call")) @@ -815,7 +940,8 @@ mod tests { Ok(_) ); - prepare_test!(omit_deploy, + prepare_test!( + omit_deploy, r#" (module (func (export "call")) @@ -824,7 +950,8 @@ mod tests { Err("deploy function isn't exported") ); - prepare_test!(omit_call, + prepare_test!( + omit_call, r#" (module (func (export "deploy")) @@ -834,7 +961,8 @@ mod tests { ); // Try to use imported function as an entry point. - prepare_test!(try_sneak_export_as_entrypoint, + prepare_test!( + try_sneak_export_as_entrypoint, r#" (module (import "seal0" "panic" (func)) @@ -848,7 +976,8 @@ mod tests { ); // Try to use imported function as an entry point. - prepare_test!(try_sneak_export_as_global, + prepare_test!( + try_sneak_export_as_global, r#" (module (func (export "deploy")) @@ -858,7 +987,8 @@ mod tests { Err("expected a function") ); - prepare_test!(wrong_signature, + prepare_test!( + wrong_signature, r#" (module (func (export "deploy")) @@ -868,7 +998,8 @@ mod tests { Err("entry point has wrong signature") ); - prepare_test!(unknown_exports, + prepare_test!( + unknown_exports, r#" (module (func (export "call")) @@ -879,7 +1010,8 @@ mod tests { Err("unknown export: expecting only deploy and call functions") ); - prepare_test!(global_float, + prepare_test!( + global_float, r#" (module (global $x f32 (f32.const 0)) @@ -890,7 +1022,8 @@ mod tests { Err("use of floating point type in globals is forbidden") ); - prepare_test!(local_float, + prepare_test!( + local_float, r#" (module (func $foo (local f32)) @@ -901,7 +1034,8 @@ mod tests { Err("use of floating point type in locals is forbidden") ); - prepare_test!(param_float, + prepare_test!( + param_float, r#" (module (func $foo (param f32)) @@ -912,7 +1046,8 @@ mod tests { Err("use of floating point type in function types is forbidden") ); - prepare_test!(result_float, + prepare_test!( + result_float, r#" (module (func $foo (result f32) (f32.const 0)) diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index d966ff85d9652..52b864bf18eac 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -1,42 +1,47 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Environment definition of the wasm smart-contract runtime. -use crate::{HostFnWeights, Schedule, Trait, CodeHash, BalanceOf, Error}; -use crate::exec::{ - Ext, ExecResult, ExecReturnValue, StorageKey, TopicOf, ReturnFlags, ExecError +use crate::{ + exec::{ExecError, ExecResult, Ext, StorageKey, TopicOf}, + gas::{ChargedAmount, Token}, + schedule::HostFnWeights, + wasm::env_def::ConvertibleToWasm, + BalanceOf, CodeHash, Config, Error, }; -use crate::gas::{Gas, GasMeter, Token, GasMeterResult}; -use crate::wasm::env_def::ConvertibleToWasm; -use sp_sandbox; -use parity_wasm::elements::ValueType; -use frame_system; -use frame_support::dispatch::DispatchError; +use bitflags::bitflags; +use codec::{Decode, DecodeAll, Encode, MaxEncodedLen}; +use frame_support::{dispatch::DispatchError, ensure, weights::Weight}; +use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; +use pwasm_utils::parity_wasm::elements::ValueType; +use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; +use sp_runtime::traits::Bounded; use sp_std::prelude::*; -use codec::{Decode, Encode}; -use sp_runtime::traits::SaturatedConversion; -use sp_io::hashing::{ - keccak_256, - blake2_256, - blake2_128, - sha2_256, -}; /// Every error that can be returned to a contract when it calls any of the host functions. +/// +/// # Note +/// +/// This enum can be extended in the future: New codes can be added but existing codes +/// will not be changed or removed. This means that any contract **must not** exhaustively +/// match return codes. Instead, contracts should prepare for unknown variants and deal with +/// those errors gracefuly in order to be forward compatible. #[repr(u32)] pub enum ReturnCode { /// API call successful. @@ -60,9 +65,17 @@ pub enum ReturnCode { NewContractNotFunded = 6, /// No code could be found at the supplied code hash. CodeNotFound = 7, - /// The contract that was called is either no contract at all (a plain account) - /// or is a tombstone. + /// The contract that was called is no contract (a plain account). NotCallable = 8, + /// The call to `seal_debug_message` had no effect because debug message + /// recording was disabled. + LoggingDisabled = 9, + /// The call dispatched by `seal_call_runtime` was executed but returned an error. + #[cfg(feature = "unstable-interface")] + CallRuntimeReturnedError = 10, + /// ECDSA pubkey recovery failed. Most probably wrong recovery id or signature. + #[cfg(feature = "unstable-interface")] + EcdsaRecoverFailed = 11, } impl ConvertibleToWasm for ReturnCode { @@ -88,7 +101,7 @@ impl From for ReturnCode { } /// The data passed through when a contract uses `seal_return`. -struct ReturnData { +pub struct ReturnData { /// The flags as passed through by the contract. They are still unchecked and /// will later be parsed into a `ReturnFlags` bitflags struct. flags: u32, @@ -102,7 +115,7 @@ struct ReturnData { /// occurred (the SupervisorError variant). /// The other case is where the trap does not constitute an error but rather was invoked /// as a quick way to terminate the application (all other variants). -enum TrapReason { +pub enum TrapReason { /// The supervisor trapped the contract because of an error condition occurred during /// execution in privileged code. SupervisorError(DispatchError), @@ -111,99 +124,17 @@ enum TrapReason { /// Signals that a trap was generated in response to a successful call to the /// `seal_terminate` host function. Termination, - /// Signals that a trap was generated because of a successful restoration. - Restoration, } -/// Can only be used for one call. -pub(crate) struct Runtime<'a, E: Ext + 'a> { - ext: &'a mut E, - input_data: Option>, - schedule: &'a Schedule, - memory: sp_sandbox::Memory, - gas_meter: &'a mut GasMeter, - trap_reason: Option, -} -impl<'a, E: Ext + 'a> Runtime<'a, E> { - pub(crate) fn new( - ext: &'a mut E, - input_data: Vec, - schedule: &'a Schedule, - memory: sp_sandbox::Memory, - gas_meter: &'a mut GasMeter, - ) -> Self { - Runtime { - ext, - input_data: Some(input_data), - schedule, - memory, - gas_meter, - trap_reason: None, - } - } -} - -/// Converts the sandbox result and the runtime state into the execution outcome. -/// -/// It evaluates information stored in the `trap_reason` variable of the runtime and -/// bases the outcome on the value if this variable. Only if `trap_reason` is `None` -/// the result of the sandbox is evaluated. -pub(crate) fn to_execution_result( - runtime: Runtime, - sandbox_result: Result, -) -> ExecResult { - // If a trap reason is set we base our decision solely on that. - if let Some(trap_reason) = runtime.trap_reason { - return match trap_reason { - // The trap was the result of the execution `return` host function. - TrapReason::Return(ReturnData{ flags, data }) => { - let flags = ReturnFlags::from_bits(flags).ok_or_else(|| - "used reserved bit in return flags" - )?; - Ok(ExecReturnValue { - flags, - data, - }) - }, - TrapReason::Termination => { - Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }) - }, - TrapReason::Restoration => { - Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Vec::new(), - }) - }, - TrapReason::SupervisorError(error) => Err(error)?, - } - } - - // Check the exact type of the error. - match sandbox_result { - // No traps were generated. Proceed normally. - Ok(_) => { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) - } - // `Error::Module` is returned only if instantiation or linking failed (i.e. - // wasm binary tried to import a function that is not provided by the host). - // This shouldn't happen because validation process ought to reject such binaries. - // - // Because panics are really undesirable in the runtime code, we treat this as - // a trap for now. Eventually, we might want to revisit this. - Err(sp_sandbox::Error::Module) => - Err("validation error")?, - // Any other kind of a trap should result in a failure. - Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => - Err(Error::::ContractTrapped)? +impl> From for TrapReason { + fn from(from: T) -> Self { + Self::SupervisorError(from.into()) } } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] -pub enum RuntimeToken { +pub enum RuntimeCosts { /// Charge the gas meter with the cost of a metering block. The charged costs are /// the supplied cost of the block plus the overhead of the metering itself. MeteringBlock(u32), @@ -219,10 +150,8 @@ pub enum RuntimeToken { ValueTransferred, /// Weight of calling `seal_minimum_balance`. MinimumBalance, - /// Weight of calling `seal_tombstone_deposit`. - TombstoneDeposit, - /// Weight of calling `seal_rent_allowance`. - RentAllowance, + /// Weight of calling `seal_contract_deposit`. + ContractDeposit, /// Weight of calling `seal_block_number`. BlockNumber, /// Weight of calling `seal_now`. @@ -237,14 +166,12 @@ pub enum RuntimeToken { Return(u32), /// Weight of calling `seal_terminate`. Terminate, - /// Weight of calling `seal_restore_to` per number of supplied delta entries. - RestoreTo(u32), /// Weight of calling `seal_random`. It includes the weight for copying the subject. Random, - /// Weight of calling `seal_reposit_event` with the given number of topics and event size. - DepositEvent{num_topic: u32, len: u32}, - /// Weight of calling `seal_set_rent_allowance`. - SetRentAllowance, + /// Weight of calling `seal_deposit_event` with the given number of topics and event size. + DepositEvent { num_topic: u32, len: u32 }, + /// Weight of calling `seal_debug_message`. + DebugMessage, /// Weight of calling `seal_set_storage` for the given storage item size. SetStorage(u32), /// Weight of calling `seal_clear_storage`. @@ -261,10 +188,10 @@ pub enum RuntimeToken { CallSurchargeTransfer, /// Weight of output received through `seal_call` for the given size. CallCopyOut(u32), - /// Weight of calling `seal_instantiate` for the given input size without output weight. + /// Weight of calling `seal_instantiate` for the given input and salt without output weight. /// This includes the transfer as an instantiate without a value will always be below /// the existential deposit and is disregarded as corner case. - InstantiateBase(u32), + InstantiateBase { input_data_len: u32, salt_len: u32 }, /// Weight of output received through `seal_instantiate` for the given size. InstantiateCopyOut(u32), /// Weight of calling `seal_hash_sha_256` for the given input size. @@ -275,14 +202,27 @@ pub enum RuntimeToken { HashBlake256(u32), /// Weight of calling `seal_hash_blake2_128` for the given input size. HashBlake128(u32), + /// Weight of calling `seal_ecdsa_recover`. + #[cfg(feature = "unstable-interface")] + EcdsaRecovery, + /// Weight charged by a chain extension through `seal_call_chain_extension`. + ChainExtension(u64), + /// Weight charged for copying data from the sandbox. + #[cfg(feature = "unstable-interface")] + CopyIn(u32), + /// Weight charged for calling into the runtime. + #[cfg(feature = "unstable-interface")] + CallRuntime(Weight), } -impl Token for RuntimeToken { - type Metadata = HostFnWeights; - - fn calculate_amount(&self, s: &Self::Metadata) -> Gas { - use self::RuntimeToken::*; - match *self { +impl RuntimeCosts { + fn token(&self, s: &HostFnWeights) -> RuntimeToken + where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, + { + use self::RuntimeCosts::*; + let weight = match *self { MeteringBlock(amount) => s.gas.saturating_add(amount.into()), Caller => s.caller, Address => s.address, @@ -290,258 +230,504 @@ impl Token for RuntimeToken { Balance => s.balance, ValueTransferred => s.value_transferred, MinimumBalance => s.minimum_balance, - TombstoneDeposit => s.tombstone_deposit, - RentAllowance => s.rent_allowance, + ContractDeposit => s.contract_deposit, BlockNumber => s.block_number, Now => s.now, WeightToFee => s.weight_to_fee, InputBase => s.input, InputCopyOut(len) => s.input_per_byte.saturating_mul(len.into()), - Return(len) => s.r#return - .saturating_add(s.return_per_byte.saturating_mul(len.into())), + Return(len) => s.r#return.saturating_add(s.return_per_byte.saturating_mul(len.into())), Terminate => s.terminate, - RestoreTo(delta) => s.restore_to - .saturating_add(s.restore_to_per_delta.saturating_mul(delta.into())), Random => s.random, - DepositEvent{num_topic, len} => s.deposit_event + DepositEvent { num_topic, len } => s + .deposit_event .saturating_add(s.deposit_event_per_topic.saturating_mul(num_topic.into())) .saturating_add(s.deposit_event_per_byte.saturating_mul(len.into())), - SetRentAllowance => s.set_rent_allowance, - SetStorage(len) => s.set_storage - .saturating_add(s.set_storage_per_byte.saturating_mul(len.into())), + DebugMessage => s.debug_message, + SetStorage(len) => + s.set_storage.saturating_add(s.set_storage_per_byte.saturating_mul(len.into())), ClearStorage => s.clear_storage, GetStorageBase => s.get_storage, GetStorageCopyOut(len) => s.get_storage_per_byte.saturating_mul(len.into()), Transfer => s.transfer, - CallBase(len) => s.call - .saturating_add(s.call_per_input_byte.saturating_mul(len.into())), + CallBase(len) => + s.call.saturating_add(s.call_per_input_byte.saturating_mul(len.into())), CallSurchargeTransfer => s.call_transfer_surcharge, CallCopyOut(len) => s.call_per_output_byte.saturating_mul(len.into()), - InstantiateBase(len) => s.instantiate - .saturating_add(s.instantiate_per_input_byte.saturating_mul(len.into())), - InstantiateCopyOut(len) => s.instantiate_per_output_byte - .saturating_mul(len.into()), - HashSha256(len) => s.hash_sha2_256 + InstantiateBase { input_data_len, salt_len } => s + .instantiate + .saturating_add(s.instantiate_per_input_byte.saturating_mul(input_data_len.into())) + .saturating_add(s.instantiate_per_salt_byte.saturating_mul(salt_len.into())), + InstantiateCopyOut(len) => s.instantiate_per_output_byte.saturating_mul(len.into()), + HashSha256(len) => s + .hash_sha2_256 .saturating_add(s.hash_sha2_256_per_byte.saturating_mul(len.into())), - HashKeccak256(len) => s.hash_keccak_256 + HashKeccak256(len) => s + .hash_keccak_256 .saturating_add(s.hash_keccak_256_per_byte.saturating_mul(len.into())), - HashBlake256(len) => s.hash_blake2_256 + HashBlake256(len) => s + .hash_blake2_256 .saturating_add(s.hash_blake2_256_per_byte.saturating_mul(len.into())), - HashBlake128(len) => s.hash_blake2_128 + HashBlake128(len) => s + .hash_blake2_128 .saturating_add(s.hash_blake2_128_per_byte.saturating_mul(len.into())), + #[cfg(feature = "unstable-interface")] + EcdsaRecovery => s.ecdsa_recover, + ChainExtension(amount) => amount, + #[cfg(feature = "unstable-interface")] + CopyIn(len) => s.return_per_byte.saturating_mul(len.into()), + #[cfg(feature = "unstable-interface")] + CallRuntime(weight) => weight, + }; + RuntimeToken { + #[cfg(test)] + _created_from: *self, + weight, } } } -/// Charge the gas meter with the specified token. -/// -/// Returns `Err(HostError)` if there is not enough gas. -fn charge_gas(ctx: &mut Runtime, token: Tok) -> Result<(), sp_sandbox::HostError> +#[cfg_attr(test, derive(Debug, PartialEq, Eq))] +#[derive(Copy, Clone)] +struct RuntimeToken { + #[cfg(test)] + _created_from: RuntimeCosts, + weight: Weight, +} + +impl Token for RuntimeToken where - E: Ext, - Tok: Token, + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, { - match ctx.gas_meter.charge(&ctx.schedule.host_fn_weights, token) { - GasMeterResult::Proceed => Ok(()), - GasMeterResult::OutOfGas => { - ctx.trap_reason = Some(TrapReason::SupervisorError(Error::::OutOfGas.into())); - Err(sp_sandbox::HostError) - }, + fn weight(&self) -> Weight { + self.weight } } -/// Read designated chunk from the sandbox memory. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - requested buffer is not within the bounds of the sandbox memory. -fn read_sandbox_memory( - ctx: &mut Runtime, - ptr: u32, - len: u32, -) -> Result, sp_sandbox::HostError> { - let mut buf = vec![0u8; len as usize]; - ctx.memory.get(ptr, buf.as_mut_slice()) - .map_err(|_| store_err(ctx, Error::::OutOfBounds))?; - Ok(buf) +bitflags! { + /// Flags used to change the behaviour of `seal_call`. + struct CallFlags: u32 { + /// Forward the input of current function to the callee. + /// + /// Supplied input pointers are ignored when set. + /// + /// # Note + /// + /// A forwarding call will consume the current contracts input. Any attempt to + /// access the input after this call returns will lead to [`Error::InputForwarded`]. + /// It does not matter if this is due to calling `seal_input` or trying another + /// forwarding call. Consider using [`Self::CLONE_INPUT`] in order to preserve + /// the input. + const FORWARD_INPUT = 0b0000_0001; + /// Identical to [`Self::FORWARD_INPUT`] but without consuming the input. + /// + /// This adds some additional weight costs to the call. + /// + /// # Note + /// + /// This implies [`Self::FORWARD_INPUT`] and takes precedence when both are set. + const CLONE_INPUT = 0b0000_0010; + /// Do not return from the call but rather return the result of the callee to the + /// callers caller. + /// + /// # Note + /// + /// This makes the current contract completely transparent to its caller by replacing + /// this contracts potential output by the callee ones. Any code after `seal_call` + /// can be safely considered unreachable. + const TAIL_CALL = 0b0000_0100; + /// Allow the callee to reenter into the current contract. + /// + /// Without this flag any reentrancy into the current contract that originates from + /// the callee (or any of its callees) is denied. This includes the first callee: + /// You cannot call into yourself with this flag set. + const ALLOW_REENTRY = 0b0000_1000; + } } -/// Read designated chunk from the sandbox memory into the supplied buffer. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - requested buffer is not within the bounds of the sandbox memory. -fn read_sandbox_memory_into_buf( - ctx: &mut Runtime, - ptr: u32, - buf: &mut [u8], -) -> Result<(), sp_sandbox::HostError> { - ctx.memory.get(ptr, buf).map_err(|_| store_err(ctx, Error::::OutOfBounds)) +/// This is only appropriate when writing out data of constant size that does not depend on user +/// input. In this case the costs for this copy was already charged as part of the token at +/// the beginning of the API entry point. +fn already_charged(_: u32) -> Option { + None } -/// Read designated chunk from the sandbox memory and attempt to decode into the specified type. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - requested buffer is not within the bounds of the sandbox memory. -/// - the buffer contents cannot be decoded as the required type. -fn read_sandbox_memory_as( - ctx: &mut Runtime, - ptr: u32, - len: u32, -) -> Result { - let buf = read_sandbox_memory(ctx, ptr, len)?; - D::decode(&mut &buf[..]).map_err(|_| store_err(ctx, Error::::DecodingFailed)) +/// Can only be used for one call. +pub struct Runtime<'a, E: Ext + 'a> { + ext: &'a mut E, + input_data: Option>, + memory: sp_sandbox::Memory, + trap_reason: Option, } -/// Write the given buffer to the designated location in the sandbox memory. -/// -/// Returns `Err` if one of the following conditions occurs: -/// -/// - designated area is not within the bounds of the sandbox memory. -fn write_sandbox_memory( - ctx: &mut Runtime, - ptr: u32, - buf: &[u8], -) -> Result<(), sp_sandbox::HostError> { - ctx.memory.set(ptr, buf).map_err(|_| store_err(ctx, Error::::OutOfBounds)) -} +impl<'a, E> Runtime<'a, E> +where + E: Ext + 'a, + ::AccountId: + UncheckedFrom<::Hash> + AsRef<[u8]>, +{ + pub fn new(ext: &'a mut E, input_data: Vec, memory: sp_sandbox::Memory) -> Self { + Runtime { ext, input_data: Some(input_data), memory, trap_reason: None } + } -/// Write the given buffer and its length to the designated locations in sandbox memory and -/// charge gas according to the token returned by `create_token`. -// -/// `out_ptr` is the location in sandbox memory where `buf` should be written to. -/// `out_len_ptr` is an in-out location in sandbox memory. It is read to determine the -/// length of the buffer located at `out_ptr`. If that buffer is large enough the actual -/// `buf.len()` is written to this location. -/// -/// If `out_ptr` is set to the sentinel value of `u32::max_value()` and `allow_skip` is true the -/// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying -/// output optional. For example to skip copying back the output buffer of an `seal_call` -/// when the caller is not interested in the result. -/// -/// `create_token` can optionally instruct this function to charge the gas meter with the token -/// it returns. `create_token` receives the variable amount of bytes that are about to be copied by -/// this function. -/// -/// In addition to the error conditions of `write_sandbox_memory` this functions returns -/// `Err` if the size of the buffer located at `out_ptr` is too small to fit `buf`. -fn write_sandbox_output( - ctx: &mut Runtime, - out_ptr: u32, - out_len_ptr: u32, - buf: &[u8], - allow_skip: bool, - create_token: impl FnOnce(u32) -> Option, -) -> Result<(), sp_sandbox::HostError> { - if allow_skip && out_ptr == u32::max_value() { - return Ok(()); + /// Converts the sandbox result and the runtime state into the execution outcome. + /// + /// It evaluates information stored in the `trap_reason` variable of the runtime and + /// bases the outcome on the value if this variable. Only if `trap_reason` is `None` + /// the result of the sandbox is evaluated. + pub fn to_execution_result( + self, + sandbox_result: Result, + ) -> ExecResult { + // If a trap reason is set we base our decision solely on that. + if let Some(trap_reason) = self.trap_reason { + return match trap_reason { + // The trap was the result of the execution `return` host function. + TrapReason::Return(ReturnData { flags, data }) => { + let flags = ReturnFlags::from_bits(flags) + .ok_or_else(|| "used reserved bit in return flags")?; + Ok(ExecReturnValue { flags, data: Bytes(data) }) + }, + TrapReason::Termination => + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), + TrapReason::SupervisorError(error) => Err(error)?, + } + } + + // Check the exact type of the error. + match sandbox_result { + // No traps were generated. Proceed normally. + Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), + // `Error::Module` is returned only if instantiation or linking failed (i.e. + // wasm binary tried to import a function that is not provided by the host). + // This shouldn't happen because validation process ought to reject such binaries. + // + // Because panics are really undesirable in the runtime code, we treat this as + // a trap for now. Eventually, we might want to revisit this. + Err(sp_sandbox::Error::Module) => Err("validation error")?, + // Any other kind of a trap should result in a failure. + Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => + Err(Error::::ContractTrapped)?, + } } - let buf_len = buf.len() as u32; - let len: u32 = read_sandbox_memory_as(ctx, out_len_ptr, 4)?; + /// Get a mutable reference to the inner `Ext`. + /// + /// This is mainly for the chain extension to have access to the environment the + /// contract is executing in. + pub fn ext(&mut self) -> &mut E { + self.ext + } - if len < buf_len { - Err(store_err(ctx, Error::::OutputBufferTooSmall))? + /// Store the reason for a host function triggered trap. + /// + /// This is called by the `define_env` macro in order to store any error returned by + /// the host functions defined through the said macro. It should **not** be called + /// manually. + pub fn set_trap_reason(&mut self, reason: TrapReason) { + self.trap_reason = Some(reason); } - if let Some(token) = create_token(buf_len) { - charge_gas(ctx, token)?; + /// Charge the gas meter with the specified token. + /// + /// Returns `Err(HostError)` if there is not enough gas. + pub fn charge_gas(&mut self, costs: RuntimeCosts) -> Result { + let token = costs.token(&self.ext.schedule().host_fn_weights); + self.ext.gas_meter().charge(token) } - ctx.memory.set(out_ptr, buf).and_then(|_| { - ctx.memory.set(out_len_ptr, &buf_len.encode()) - }) - .map_err(|_| store_err(ctx, Error::::OutOfBounds))?; + /// Adjust a previously charged amount down to its actual amount. + /// + /// This is when a maximum a priori amount was charged and then should be partially + /// refunded to match the actual amount. + pub fn adjust_gas(&mut self, charged: ChargedAmount, actual_costs: RuntimeCosts) { + let token = actual_costs.token(&self.ext.schedule().host_fn_weights); + self.ext.gas_meter().adjust_gas(charged, token); + } - Ok(()) -} + /// Read designated chunk from the sandbox memory. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - requested buffer is not within the bounds of the sandbox memory. + pub fn read_sandbox_memory(&self, ptr: u32, len: u32) -> Result, DispatchError> { + ensure!(len <= self.ext.schedule().limits.max_memory_size(), Error::::OutOfBounds); + let mut buf = vec![0u8; len as usize]; + self.memory + .get(ptr, buf.as_mut_slice()) + .map_err(|_| Error::::OutOfBounds)?; + Ok(buf) + } -/// Supply to `write_sandbox_output` to indicate that the gas meter should not be charged. -/// -/// This is only appropriate when writing out data of constant size that does not depend on user -/// input. In this case the costs for this copy was already charged as part of the token at -/// the beginning of the API entry point. -fn already_charged(_: u32) -> Option { - None -} + /// Read designated chunk from the sandbox memory into the supplied buffer. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - requested buffer is not within the bounds of the sandbox memory. + pub fn read_sandbox_memory_into_buf( + &self, + ptr: u32, + buf: &mut [u8], + ) -> Result<(), DispatchError> { + self.memory.get(ptr, buf).map_err(|_| Error::::OutOfBounds.into()) + } -/// Stores a DispatchError returned from an Ext function into the trap_reason. -/// -/// This allows through supervisor generated errors to the caller. -fn store_err(ctx: &mut Runtime, err: Error) -> sp_sandbox::HostError where - E: Ext, - Error: Into, -{ - ctx.trap_reason = Some(TrapReason::SupervisorError(err.into())); - sp_sandbox::HostError -} + /// Reads and decodes a type with a size fixed at compile time from contract memory. + /// + /// # Note + /// + /// The weight of reading a fixed value is included in the overall weight of any + /// contract callable function. + pub fn read_sandbox_memory_as( + &self, + ptr: u32, + ) -> Result { + let buf = self.read_sandbox_memory(ptr, D::max_encoded_len() as u32)?; + let decoded = D::decode_all(&mut &buf[..]) + .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; + Ok(decoded) + } -/// Fallible conversion of `DispatchError` to `ReturnCode`. -fn err_into_return_code(from: DispatchError) -> Result { - use ReturnCode::*; - - let below_sub = Error::::BelowSubsistenceThreshold.into(); - let transfer_failed = Error::::TransferFailed.into(); - let not_funded = Error::::NewContractNotFunded.into(); - let no_code = Error::::CodeNotFound.into(); - let invalid_contract = Error::::NotCallable.into(); - - match from { - x if x == below_sub => Ok(BelowSubsistenceThreshold), - x if x == transfer_failed => Ok(TransferFailed), - x if x == not_funded => Ok(NewContractNotFunded), - x if x == no_code => Ok(CodeNotFound), - x if x == invalid_contract => Ok(NotCallable), - err => Err(err) + /// Read designated chunk from the sandbox memory and attempt to decode into the specified type. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - requested buffer is not within the bounds of the sandbox memory. + /// - the buffer contents cannot be decoded as the required type. + /// + /// # Note + /// + /// There must be an extra benchmark for determining the influence of `len` with + /// regard to the overall weight. + pub fn read_sandbox_memory_as_unbounded( + &self, + ptr: u32, + len: u32, + ) -> Result { + let buf = self.read_sandbox_memory(ptr, len)?; + let decoded = D::decode_all(&mut &buf[..]) + .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; + Ok(decoded) } -} -/// Fallible conversion of a `ExecResult` to `ReturnCode`. -fn exec_into_return_code(from: ExecResult) -> Result { - use crate::exec::ErrorOrigin::Callee; + /// Write the given buffer and its length to the designated locations in sandbox memory and + /// charge gas according to the token returned by `create_token`. + // + /// `out_ptr` is the location in sandbox memory where `buf` should be written to. + /// `out_len_ptr` is an in-out location in sandbox memory. It is read to determine the + /// length of the buffer located at `out_ptr`. If that buffer is large enough the actual + /// `buf.len()` is written to this location. + /// + /// If `out_ptr` is set to the sentinel value of `u32::MAX` and `allow_skip` is true the + /// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying + /// output optional. For example to skip copying back the output buffer of an `seal_call` + /// when the caller is not interested in the result. + /// + /// `create_token` can optionally instruct this function to charge the gas meter with the token + /// it returns. `create_token` receives the variable amount of bytes that are about to be copied + /// by this function. + /// + /// In addition to the error conditions of `write_sandbox_memory` this functions returns + /// `Err` if the size of the buffer located at `out_ptr` is too small to fit `buf`. + pub fn write_sandbox_output( + &mut self, + out_ptr: u32, + out_len_ptr: u32, + buf: &[u8], + allow_skip: bool, + create_token: impl FnOnce(u32) -> Option, + ) -> Result<(), DispatchError> { + if allow_skip && out_ptr == u32::MAX { + return Ok(()) + } + + let buf_len = buf.len() as u32; + let len: u32 = self.read_sandbox_memory_as(out_len_ptr)?; - let ExecError { error, origin } = match from { - Ok(retval) => return Ok(retval.into()), - Err(err) => err, - }; + if len < buf_len { + Err(Error::::OutputBufferTooSmall)? + } + + if let Some(costs) = create_token(buf_len) { + self.charge_gas(costs)?; + } - match (error, origin) { - (_, Callee) => Ok(ReturnCode::CalleeTrapped), - (err, _) => err_into_return_code::(err) + self.memory + .set(out_ptr, buf) + .and_then(|_| self.memory.set(out_len_ptr, &buf_len.encode())) + .map_err(|_| Error::::OutOfBounds)?; + + Ok(()) } -} -/// Used by Runtime API that calls into other contracts. -/// -/// Those need to transform the the `ExecResult` returned from the execution into -/// a `ReturnCode`. If this conversion fails because the `ExecResult` constitutes a -/// a fatal error then this error is stored in the `ExecutionContext` so it can be -/// extracted for display in the UI. -fn map_exec_result(ctx: &mut Runtime, result: ExecResult) - -> Result -{ - match exec_into_return_code::(result) { - Ok(code) => Ok(code), - Err(err) => Err(store_err(ctx, err)), + /// Write the given buffer to the designated location in the sandbox memory. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - designated area is not within the bounds of the sandbox memory. + fn write_sandbox_memory(&mut self, ptr: u32, buf: &[u8]) -> Result<(), DispatchError> { + self.memory.set(ptr, buf).map_err(|_| Error::::OutOfBounds.into()) } -} -/// Try to convert an error into a `ReturnCode`. -/// -/// Used to decide between fatal and non-fatal errors. -fn map_dispatch_result(ctx: &mut Runtime, result: Result) - -> Result -{ - let err = if let Err(err) = result { - err - } else { - return Ok(ReturnCode::Success) - }; - - match err_into_return_code::(err) { - Ok(code) => Ok(code), - Err(err) => Err(store_err(ctx, err)), + /// Computes the given hash function on the supplied input. + /// + /// Reads from the sandboxed input buffer into an intermediate buffer. + /// Returns the result directly to the output buffer of the sandboxed memory. + /// + /// It is the callers responsibility to provide an output buffer that + /// is large enough to hold the expected amount of bytes returned by the + /// chosen hash function. + /// + /// # Note + /// + /// The `input` and `output` buffers may overlap. + fn compute_hash_on_intermediate_buffer( + &mut self, + hash_fn: F, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), DispatchError> + where + F: FnOnce(&[u8]) -> R, + R: AsRef<[u8]>, + { + // Copy input into supervisor memory. + let input = self.read_sandbox_memory(input_ptr, input_len)?; + // Compute the hash on the input buffer using the given hash function. + let hash = hash_fn(&input); + // Write the resulting hash back into the sandboxed output buffer. + self.write_sandbox_memory(output_ptr, hash.as_ref())?; + Ok(()) + } + + /// Fallible conversion of `DispatchError` to `ReturnCode`. + fn err_into_return_code(from: DispatchError) -> Result { + use ReturnCode::*; + + let below_sub = Error::::BelowSubsistenceThreshold.into(); + let transfer_failed = Error::::TransferFailed.into(); + let not_funded = Error::::NewContractNotFunded.into(); + let no_code = Error::::CodeNotFound.into(); + let not_found = Error::::ContractNotFound.into(); + + match from { + x if x == below_sub => Ok(BelowSubsistenceThreshold), + x if x == transfer_failed => Ok(TransferFailed), + x if x == not_funded => Ok(NewContractNotFunded), + x if x == no_code => Ok(CodeNotFound), + x if x == not_found => Ok(NotCallable), + err => Err(err), + } + } + + /// Fallible conversion of a `ExecResult` to `ReturnCode`. + fn exec_into_return_code(from: ExecResult) -> Result { + use crate::exec::ErrorOrigin::Callee; + + let ExecError { error, origin } = match from { + Ok(retval) => return Ok(retval.into()), + Err(err) => err, + }; + + match (error, origin) { + (_, Callee) => Ok(ReturnCode::CalleeTrapped), + (err, _) => Self::err_into_return_code(err), + } + } + + fn call( + &mut self, + flags: CallFlags, + callee_ptr: u32, + gas: u64, + value_ptr: u32, + input_data_ptr: u32, + input_data_len: u32, + output_ptr: u32, + output_len_ptr: u32, + ) -> Result { + self.charge_gas(RuntimeCosts::CallBase(input_data_len))?; + let callee: <::T as frame_system::Config>::AccountId = + self.read_sandbox_memory_as(callee_ptr)?; + let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr)?; + let input_data = if flags.contains(CallFlags::CLONE_INPUT) { + self.input_data.as_ref().ok_or_else(|| Error::::InputForwarded)?.clone() + } else if flags.contains(CallFlags::FORWARD_INPUT) { + self.input_data.take().ok_or_else(|| Error::::InputForwarded)? + } else { + self.read_sandbox_memory(input_data_ptr, input_data_len)? + }; + if value > 0u32.into() { + self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; + } + let ext = &mut self.ext; + let call_outcome = + ext.call(gas, callee, value, input_data, flags.contains(CallFlags::ALLOW_REENTRY)); + + // `TAIL_CALL` only matters on an `OK` result. Otherwise the call stack comes to + // a halt anyways without anymore code being executed. + if flags.contains(CallFlags::TAIL_CALL) { + if let Ok(return_value) = call_outcome { + return Err(TrapReason::Return(ReturnData { + flags: return_value.flags.bits(), + data: return_value.data.0, + })) + } + } + + if let Ok(output) = &call_outcome { + self.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { + Some(RuntimeCosts::CallCopyOut(len)) + })?; + } + Ok(Runtime::::exec_into_return_code(call_outcome)?) + } + + fn instantiate( + &mut self, + code_hash_ptr: u32, + gas: u64, + value_ptr: u32, + input_data_ptr: u32, + input_data_len: u32, + address_ptr: u32, + address_len_ptr: u32, + output_ptr: u32, + output_len_ptr: u32, + salt_ptr: u32, + salt_len: u32, + ) -> Result { + self.charge_gas(RuntimeCosts::InstantiateBase { input_data_len, salt_len })?; + let code_hash: CodeHash<::T> = self.read_sandbox_memory_as(code_hash_ptr)?; + let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr)?; + let input_data = self.read_sandbox_memory(input_data_ptr, input_data_len)?; + let salt = self.read_sandbox_memory(salt_ptr, salt_len)?; + let instantiate_outcome = self.ext.instantiate(gas, code_hash, value, input_data, &salt); + if let Ok((address, output)) = &instantiate_outcome { + if !output.flags.contains(ReturnFlags::REVERT) { + self.write_sandbox_output( + address_ptr, + address_len_ptr, + &address.encode(), + true, + already_charged, + )?; + } + self.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { + Some(RuntimeCosts::InstantiateCopyOut(len)) + })?; + } + Ok(Runtime::::exec_into_return_code(instantiate_outcome.map(|(_, retval)| retval))?) + } + + fn terminate(&mut self, beneficiary_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::Terminate)?; + let beneficiary: <::T as frame_system::Config>::AccountId = + self.read_sandbox_memory_as(beneficiary_ptr)?; + self.ext.terminate(&beneficiary)?; + Err(TrapReason::Termination) } } @@ -558,15 +744,14 @@ fn map_dispatch_result(ctx: &mut Runtime, result: Result, - // Account for used gas. Traps if gas used is greater than gas limit. // // NOTE: This is a implementation defined call and is NOT a part of the public API. // This call is supposed to be called only by instrumentation injected code. // // - amount: How much gas is used. - gas(ctx, amount: u32) => { - charge_gas(ctx, RuntimeToken::MeteringBlock(amount))?; + [seal0] gas(ctx, amount: u32) => { + ctx.charge_gas(RuntimeCosts::MeteringBlock(amount))?; Ok(()) }, @@ -585,16 +770,15 @@ define_env!(Env, , // // - If value length exceeds the configured maximum value length of a storage entry. // - Upon trying to set an empty storage entry (value length is 0). - seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { - charge_gas(ctx, RuntimeToken::SetStorage(value_len))?; + [seal0] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { + ctx.charge_gas(RuntimeCosts::SetStorage(value_len))?; if value_len > ctx.ext.max_value_size() { - Err(store_err(ctx, Error::::ValueTooLarge))?; + Err(Error::::ValueTooLarge)?; } let mut key: StorageKey = [0; 32]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; - let value = Some(read_sandbox_memory(ctx, value_ptr, value_len)?); - ctx.ext.set_storage(key, value); - Ok(()) + ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; + let value = Some(ctx.read_sandbox_memory(value_ptr, value_len)?); + ctx.ext.set_storage(key, value).map_err(Into::into) }, // Clear the value at the given key in the contract storage. @@ -602,12 +786,11 @@ define_env!(Env, , // # Parameters // // - `key_ptr`: pointer into the linear memory where the location to clear the value is placed. - seal_clear_storage(ctx, key_ptr: u32) => { - charge_gas(ctx, RuntimeToken::ClearStorage)?; + [seal0] seal_clear_storage(ctx, key_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::ClearStorage)?; let mut key: StorageKey = [0; 32]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; - ctx.ext.set_storage(key, None); - Ok(()) + ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; + ctx.ext.set_storage(key, None).map_err(Into::into) }, // Retrieve the value under the given key from storage. @@ -622,13 +805,13 @@ define_env!(Env, , // # Errors // // `ReturnCode::KeyNotFound` - seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::GetStorageBase)?; + [seal0] seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + ctx.charge_gas(RuntimeCosts::GetStorageBase)?; let mut key: StorageKey = [0; 32]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut key)?; + ctx.read_sandbox_memory_into_buf(key_ptr, &mut key)?; if let Some(value) = ctx.ext.get_storage(&key) { - write_sandbox_output(ctx, out_ptr, out_len_ptr, &value, false, |len| { - Some(RuntimeToken::GetStorageCopyOut(len)) + ctx.write_sandbox_output(out_ptr, out_len_ptr, &value, false, |len| { + Some(RuntimeCosts::GetStorageCopyOut(len)) })?; Ok(ReturnCode::Success) } else { @@ -651,38 +834,79 @@ define_env!(Env, , // // `ReturnCode::BelowSubsistenceThreshold` // `ReturnCode::TransferFailed` - seal_transfer( + [seal0] seal_transfer( ctx, account_ptr: u32, - account_len: u32, + _account_len: u32, value_ptr: u32, - value_len: u32 + _value_len: u32 ) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::Transfer)?; - let callee: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, account_ptr, account_len)?; + ctx.charge_gas(RuntimeCosts::Transfer)?; + let callee: <::T as frame_system::Config>::AccountId = + ctx.read_sandbox_memory_as(account_ptr)?; let value: BalanceOf<::T> = - read_sandbox_memory_as(ctx, value_ptr, value_len)?; + ctx.read_sandbox_memory_as(value_ptr)?; let result = ctx.ext.transfer(&callee, value); - map_dispatch_result(ctx, result) + match result { + Ok(()) => Ok(ReturnCode::Success), + Err(err) => { + let code = Runtime::::err_into_return_code(err)?; + Ok(code) + } + } + }, + + // Make a call to another contract. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function with + // `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. + // + // # Note + // + // The values `_callee_len` and `_value_len` are ignored because the encoded sizes + // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + // compatibility. Consider switching to the newest version of this function. + [seal0] seal_call( + ctx, + callee_ptr: u32, + _callee_len: u32, + gas: u64, + value_ptr: u32, + _value_len: u32, + input_data_ptr: u32, + input_data_len: u32, + output_ptr: u32, + output_len_ptr: u32 + ) -> ReturnCode => { + ctx.call( + CallFlags::ALLOW_REENTRY, + callee_ptr, + gas, + value_ptr, + input_data_ptr, + input_data_len, + output_ptr, + output_len_ptr, + ) }, // Make a call to another contract. // // The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. // The copy of the output buffer can be skipped by supplying the sentinel value - // of `u32::max_value()` to `output_ptr`. + // of `u32::MAX` to `output_ptr`. // // # Parameters // + // - flags: See [`CallFlags`] for a documenation of the supported flags. // - callee_ptr: a pointer to the address of the callee contract. // Should be decodable as an `T::AccountId`. Traps otherwise. - // - callee_len: length of the address buffer. // - gas: how much gas to devote to the execution. // - value_ptr: a pointer to the buffer with value, how much value to send. // Should be decodable as a `T::Balance`. Traps otherwise. - // - value_len: length of the value buffer. // - input_data_ptr: a pointer to a buffer to be used as input data to the callee. // - input_data_len: length of the input data buffer. // - output_ptr: a pointer where the output buffer is copied to. @@ -699,55 +923,70 @@ define_env!(Env, , // `ReturnCode::BelowSubsistenceThreshold` // `ReturnCode::TransferFailed` // `ReturnCode::NotCallable` - seal_call( + [__unstable__] seal_call( ctx, + flags: u32, callee_ptr: u32, - callee_len: u32, gas: u64, value_ptr: u32, - value_len: u32, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, output_len_ptr: u32 ) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::CallBase(input_data_len))?; - let callee: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, callee_ptr, callee_len)?; - let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; - let input_data = read_sandbox_memory(ctx, input_data_ptr, input_data_len)?; - - if value > 0.into() { - charge_gas(ctx, RuntimeToken::CallSurchargeTransfer)?; - } - - let nested_gas_limit = if gas == 0 { - ctx.gas_meter.gas_left() - } else { - gas.saturated_into() - }; - let ext = &mut ctx.ext; - let call_outcome = ctx.gas_meter.with_nested(nested_gas_limit, |nested_meter| { - match nested_meter { - Some(nested_meter) => { - ext.call( - &callee, - value, - nested_meter, - input_data, - ) - } - // there is not enough gas to allocate for the nested call. - None => Err(Error::<::T>::OutOfGas.into()), - } - }); + ctx.call( + CallFlags::from_bits(flags).ok_or_else(|| "used rerved bit in CallFlags")?, + callee_ptr, + gas, + value_ptr, + input_data_ptr, + input_data_len, + output_ptr, + output_len_ptr, + ) + }, - if let Ok(output) = &call_outcome { - write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true, |len| { - Some(RuntimeToken::CallCopyOut(len)) - })?; - } - map_exec_result(ctx, call_outcome) + // Instantiate a contract with the specified code hash. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function. The newer version + // drops the now unnecessary length fields. + // + // # Note + // + // The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes + // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + // compatibility. Consider switching to the newest version of this function. + [seal0] seal_instantiate( + ctx, + code_hash_ptr: u32, + _code_hash_len: u32, + gas: u64, + value_ptr: u32, + _value_len: u32, + input_data_ptr: u32, + input_data_len: u32, + address_ptr: u32, + address_len_ptr: u32, + output_ptr: u32, + output_len_ptr: u32, + salt_ptr: u32, + salt_len: u32 + ) -> ReturnCode => { + ctx.instantiate ( + code_hash_ptr, + gas, + value_ptr, + input_data_ptr, + input_data_len, + address_ptr, + address_len_ptr, + output_ptr, + output_len_ptr, + salt_ptr, + salt_len, + ) }, // Instantiate a contract with the specified code hash. @@ -756,20 +995,18 @@ define_env!(Env, , // by the code hash. The address of this new account is copied to `address_ptr` and its length // to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its // length to `output_len_ptr`. The copy of the output buffer and address can be skipped by - // supplying the sentinel value of `u32::max_value()` to `output_ptr` or `address_ptr`. + // supplying the sentinel value of `u32::MAX` to `output_ptr` or `address_ptr`. // - // After running the constructor it is verfied that the contract account holds at - // least the subsistence threshold. If that is not the case the instantion fails and + // After running the constructor it is verified that the contract account holds at + // least the subsistence threshold. If that is not the case the instantiation fails and // the contract is not created. // // # Parameters // // - code_hash_ptr: a pointer to the buffer that contains the initializer code. - // - code_hash_len: length of the initializer code buffer. // - gas: how much gas to devote to the execution of the initializer code. // - value_ptr: a pointer to the buffer with value, how much value to send. // Should be decodable as a `T::Balance`. Traps otherwise. - // - value_len: length of the value buffer. // - input_data_ptr: a pointer to a buffer to be used as input data to the initializer code. // - input_data_len: length of the input data buffer. // - address_ptr: a pointer where the new account's address is copied to. @@ -778,6 +1015,8 @@ define_env!(Env, , // - output_ptr: a pointer where the output buffer is copied to. // - output_len_ptr: in-out pointer to where the length of the buffer is read from // and the actual length is written to. + // - salt_ptr: Pointer to raw bytes used for address derivation. See `fn contract_address`. + // - salt_len: length in bytes of the supplied salt. // // # Errors // @@ -793,57 +1032,49 @@ define_env!(Env, , // `ReturnCode::TransferFailed` // `ReturnCode::NewContractNotFunded` // `ReturnCode::CodeNotFound` - seal_instantiate( + [seal1] seal_instantiate( ctx, code_hash_ptr: u32, - code_hash_len: u32, gas: u64, value_ptr: u32, - value_len: u32, input_data_ptr: u32, input_data_len: u32, address_ptr: u32, address_len_ptr: u32, output_ptr: u32, - output_len_ptr: u32 + output_len_ptr: u32, + salt_ptr: u32, + salt_len: u32 ) -> ReturnCode => { - charge_gas(ctx, RuntimeToken::InstantiateBase(input_data_len))?; - let code_hash: CodeHash<::T> = - read_sandbox_memory_as(ctx, code_hash_ptr, code_hash_len)?; - let value: BalanceOf<::T> = read_sandbox_memory_as(ctx, value_ptr, value_len)?; - let input_data = read_sandbox_memory(ctx, input_data_ptr, input_data_len)?; - - let nested_gas_limit = if gas == 0 { - ctx.gas_meter.gas_left() - } else { - gas.saturated_into() - }; - let ext = &mut ctx.ext; - let instantiate_outcome = ctx.gas_meter.with_nested(nested_gas_limit, |nested_meter| { - match nested_meter { - Some(nested_meter) => { - ext.instantiate( - &code_hash, - value, - nested_meter, - input_data - ) - } - // there is not enough gas to allocate for the nested call. - None => Err(Error::<::T>::OutOfGas.into()), - } - }); - if let Ok((address, output)) = &instantiate_outcome { - if !output.flags.contains(ReturnFlags::REVERT) { - write_sandbox_output( - ctx, address_ptr, address_len_ptr, &address.encode(), true, already_charged, - )?; - } - write_sandbox_output(ctx, output_ptr, output_len_ptr, &output.data, true, |len| { - Some(RuntimeToken::InstantiateCopyOut(len)) - })?; - } - map_exec_result(ctx, instantiate_outcome.map(|(_id, retval)| retval)) + ctx.instantiate( + code_hash_ptr, + gas, + value_ptr, + input_data_ptr, + input_data_len, + address_ptr, + address_len_ptr, + output_ptr, + output_len_ptr, + salt_ptr, + salt_len, + ) + }, + + // Remove the calling account and transfer remaining balance. + // + // # Deprecation + // + // This is equivalent to calling the newer version of this function. The newer version + // drops the now unnecessary length fields. + // + // # Note + // + // The value `_beneficiary_len` is ignored because the encoded sizes + // this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards + // compatibility. Consider switching to the newest version of this function. + [seal0] seal_terminate(ctx, beneficiary_ptr: u32, _beneficiary_len: u32) => { + ctx.terminate(beneficiary_ptr) }, // Remove the calling account and transfer remaining balance. @@ -853,42 +1084,44 @@ define_env!(Env, , // which is considered fatal and results in a trap + rollback. // // - beneficiary_ptr: a pointer to the address of the beneficiary account where all - // where all remaining funds of the caller are transfered. + // where all remaining funds of the caller are transferred. // Should be decodable as an `T::AccountId`. Traps otherwise. - // - beneficiary_len: length of the address buffer. // // # Traps // // - The contract is live i.e is already on the call stack. - seal_terminate( - ctx, - beneficiary_ptr: u32, - beneficiary_len: u32 - ) => { - charge_gas(ctx, RuntimeToken::Terminate)?; - let beneficiary: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, beneficiary_ptr, beneficiary_len)?; - - if let Ok(_) = ctx.ext.terminate(&beneficiary) { - ctx.trap_reason = Some(TrapReason::Termination); - } - Err(sp_sandbox::HostError) + // - Failed to send the balance to the beneficiary. + // - The deletion queue is full. + [seal1] seal_terminate(ctx, beneficiary_ptr: u32) => { + ctx.terminate(beneficiary_ptr) }, - seal_input(ctx, buf_ptr: u32, buf_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::InputBase)?; + // Stores the input passed by the caller into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // # Note + // + // This function traps if the input was previously forwarded by a `seal_call`. + [seal0] seal_input(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::InputBase)?; if let Some(input) = ctx.input_data.take() { - write_sandbox_output(ctx, buf_ptr, buf_len_ptr, &input, false, |len| { - Some(RuntimeToken::InputCopyOut(len)) - }) + ctx.write_sandbox_output(out_ptr, out_len_ptr, &input, false, |len| { + Some(RuntimeCosts::InputCopyOut(len)) + })?; + ctx.input_data = Some(input); + Ok(()) } else { - Err(sp_sandbox::HostError) + Err(Error::::InputForwarded.into()) } }, // Cease contract execution and save a data buffer as a result of the execution. // - // This function never retuns as it stops execution of the caller. + // This function never returns as it stops execution of the caller. // This is the only way to return a data buffer to the caller. Returning from // execution without calling this function is equivalent to calling: // ``` @@ -903,17 +1136,12 @@ define_env!(Env, , // --- msb --- // // Using a reserved bit triggers a trap. - seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { - charge_gas(ctx, RuntimeToken::Return(data_len))?; - ctx.trap_reason = Some(TrapReason::Return(ReturnData { + [seal0] seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { + ctx.charge_gas(RuntimeCosts::Return(data_len))?; + Err(TrapReason::Return(ReturnData { flags, - data: read_sandbox_memory(ctx, data_ptr, data_len)?, - })); - - // The trap mechanism is used to immediately terminate the execution. - // This trap should be handled appropriately before returning the result - // to the user of this crate. - Err(sp_sandbox::HostError) + data: ctx.read_sandbox_memory(data_ptr, data_len)?, + })) }, // Stores the address of the caller into the supplied buffer. @@ -926,11 +1154,11 @@ define_env!(Env, , // If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the // extrinsic will be returned. Otherwise, if this call is initiated by another contract then the // address of the contract will be returned. The value is encoded as T::AccountId. - seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Caller)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged - ) + [seal0] seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::Caller)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged + )?) }, // Stores the address of the current contract into the supplied buffer. @@ -939,11 +1167,11 @@ define_env!(Env, , // `out_len_ptr` must point to a u32 value that describes the available space at // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. - seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Address)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged - ) + [seal0] seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::Address)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged + )?) }, // Stores the price for the specified amount of gas into the supplied buffer. @@ -959,12 +1187,11 @@ define_env!(Env, , // // It is recommended to avoid specifying very small values for `gas` as the prices for a single // gas can be smaller than one. - seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::WeightToFee)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, - already_charged - ) + [seal0] seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::WeightToFee)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, already_charged + )?) }, // Stores the amount of gas left into the supplied buffer. @@ -975,11 +1202,12 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as Gas. - seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::GasLeft)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.gas_meter.gas_left().encode(), false, already_charged - ) + [seal0] seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::GasLeft)?; + let gas_left = &ctx.ext.gas_meter().gas_left().encode(); + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &gas_left, false, already_charged, + )?) }, // Stores the balance of the current account into the supplied buffer. @@ -990,11 +1218,11 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Balance. - seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Balance)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged - ) + [seal0] seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::Balance)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged + )?) }, // Stores the value transferred along with this call or as endowment into the supplied buffer. @@ -1005,12 +1233,11 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Balance. - seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::ValueTransferred)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, - already_charged - ) + [seal0] seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::ValueTransferred)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, already_charged + )?) }, // Stores a random number for the current block and the given subject into the supplied buffer. @@ -1021,17 +1248,51 @@ define_env!(Env, , // space at `out_ptr` is less than the size of the value a trap is triggered. // // The data is encoded as T::Hash. - seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Random)?; - // The length of a subject can't exceed `max_subject_len`. - if subject_len > ctx.schedule.max_subject_len { - return Err(sp_sandbox::HostError); + // + // # Deprecation + // + // This function is deprecated. Users should migrate to the version in the "seal1" module. + [seal0] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::Random)?; + if subject_len > ctx.ext.schedule().limits.subject_len { + Err(Error::::RandomSubjectTooLong)?; } - let subject_buf = read_sandbox_memory(ctx, subject_ptr, subject_len)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false, - already_charged - ) + let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).0.encode(), false, already_charged + )?) + }, + + // Stores a random number for the current block and the given subject into the supplied buffer. + // + // The value is stored to linear memory at the address pointed to by `out_ptr`. + // `out_len_ptr` must point to a u32 value that describes the available space at + // `out_ptr`. This call overwrites it with the size of the value. If the available + // space at `out_ptr` is less than the size of the value a trap is triggered. + // + // The data is encoded as (T::Hash, T::BlockNumber). + // + // # Changes from v0 + // + // In addition to the seed it returns the block number since which it was determinable + // by chain observers. + // + // # Note + // + // The returned seed should only be used to distinguish commitments made before + // the returned block number. If the block number is too early (i.e. commitments were + // made afterwards), then ensure no further commitments may be made and repeatedly + // call this on later blocks until the block number returned is later than the latest + // commitment. + [seal1] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::Random)?; + if subject_len > ctx.ext.schedule().limits.subject_len { + Err(Error::::RandomSubjectTooLong)?; + } + let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false, already_charged + )?) }, // Load the latest block timestamp into the supplied buffer @@ -1040,24 +1301,37 @@ define_env!(Env, , // `out_len_ptr` must point to a u32 value that describes the available space at // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. - seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::Now)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged - ) + [seal0] seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::Now)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged + )?) }, // Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. // // The data is encoded as T::Balance. - seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::MinimumBalance)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged - ) + [seal0] seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::MinimumBalance)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged + )?) + }, + + // Stores the contract deposit into the supplied buffer. + // + // # Deprecation + // + // This is equivalent to calling `seal_contract_deposit` and only exists for backwards + // compatibility. See that function for documentation. + [seal0] seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::ContractDeposit)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.contract_deposit().encode(), false, already_charged + )?) }, - // Stores the tombstone deposit into the supplied buffer. + // Stores the contract deposit into the supplied buffer. // // The value is stored to linear memory at the address pointed to by `out_ptr`. // `out_len_ptr` must point to a u32 value that describes the available space at @@ -1068,173 +1342,154 @@ define_env!(Env, , // // # Note // - // The tombstone deposit is on top of the existential deposit. So in order for - // a contract to leave a tombstone the balance of the contract must not go - // below the sum of existential deposit and the tombstone deposit. The sum - // is commonly referred as subsistence threshold in code. - seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::TombstoneDeposit)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.tombstone_deposit().encode(), false, - already_charged - ) + // The contract deposit is on top of the existential deposit. The sum + // is commonly referred as subsistence threshold in code. No contract initiated + // balance transfer can go below this threshold. + [seal0] seal_contract_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::ContractDeposit)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.contract_deposit().encode(), false, already_charged + )?) }, - // Try to restore the given destination contract sacrificing the caller. + // Was used to restore the given destination contract sacrificing the caller. // - // This function will compute a tombstone hash from the caller's storage and the given code hash - // and if the hash matches the hash found in the tombstone at the specified address - kill - // the caller contract and restore the destination contract and set the specified `rent_allowance`. - // All caller's funds are transfered to the destination. - // - // If there is no tombstone at the destination address, the hashes don't match or this contract - // instance is already present on the contract call stack, a trap is generated. - // - // Otherwise, the destination contract is restored. This function is diverging and stops execution - // even on success. + // # Note // - // `dest_ptr`, `dest_len` - the pointer and the length of a buffer that encodes `T::AccountId` - // with the address of the to be restored contract. - // `code_hash_ptr`, `code_hash_len` - the pointer and the length of a buffer that encodes - // a code hash of the to be restored contract. - // `rent_allowance_ptr`, `rent_allowance_len` - the pointer and the length of a buffer that - // encodes the rent allowance that must be set in the case of successful restoration. - // `delta_ptr` is the pointer to the start of a buffer that has `delta_count` storage keys - // laid out sequentially. + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity + [seal0] seal_restore_to( + ctx, + _dest_ptr: u32, + _dest_len: u32, + _code_hash_ptr: u32, + _code_hash_len: u32, + _rent_allowance_ptr: u32, + _rent_allowance_len: u32, + _delta_ptr: u32, + _delta_count: u32 + ) => { + ctx.charge_gas(RuntimeCosts::DebugMessage)?; + Ok(()) + }, + + // Was used to restore the given destination contract sacrificing the caller. // - // # Traps + // # Note // - // - Tombstone hashes do not match - // - Calling cantract is live i.e is already on the call stack. - seal_restore_to( + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity + [seal1] seal_restore_to( ctx, - dest_ptr: u32, - dest_len: u32, - code_hash_ptr: u32, - code_hash_len: u32, - rent_allowance_ptr: u32, - rent_allowance_len: u32, - delta_ptr: u32, - delta_count: u32 + _dest_ptr: u32, + _code_hash_ptr: u32, + _rent_allowance_ptr: u32, + _delta_ptr: u32, + _delta_count: u32 ) => { - charge_gas(ctx, RuntimeToken::RestoreTo(delta_count))?; - let dest: <::T as frame_system::Trait>::AccountId = - read_sandbox_memory_as(ctx, dest_ptr, dest_len)?; - let code_hash: CodeHash<::T> = - read_sandbox_memory_as(ctx, code_hash_ptr, code_hash_len)?; - let rent_allowance: BalanceOf<::T> = - read_sandbox_memory_as(ctx, rent_allowance_ptr, rent_allowance_len)?; - let delta = { - // We can eagerly allocate because we charged for the complete delta count already - let mut delta = Vec::with_capacity(delta_count as usize); - let mut key_ptr = delta_ptr; - - for _ in 0..delta_count { - const KEY_SIZE: usize = 32; - - // Read the delta into the provided buffer and collect it into the buffer. - let mut delta_key: StorageKey = [0; KEY_SIZE]; - read_sandbox_memory_into_buf(ctx, key_ptr, &mut delta_key)?; - delta.push(delta_key); - - // Offset key_ptr to the next element. - key_ptr = key_ptr.checked_add(KEY_SIZE as u32).ok_or_else(|| sp_sandbox::HostError)?; - } - - delta - }; - - if let Ok(()) = ctx.ext.restore_to( - dest, - code_hash, - rent_allowance, - delta, - ) { - ctx.trap_reason = Some(TrapReason::Restoration); - } - Err(sp_sandbox::HostError) + ctx.charge_gas(RuntimeCosts::DebugMessage)?; + Ok(()) }, // Deposit a contract event with the data buffer and optional list of topics. There is a limit - // on the maximum number of topics specified by `max_event_topics`. + // on the maximum number of topics specified by `event_topics`. // // - topics_ptr - a pointer to the buffer of topics encoded as `Vec`. The value of this // is ignored if `topics_len` is set to 0. The topics list can't contain duplicates. // - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. // - data_ptr - a pointer to a raw data buffer which will saved along the event. // - data_len - the length of the data buffer. - seal_deposit_event(ctx, topics_ptr: u32, topics_len: u32, data_ptr: u32, data_len: u32) => { + [seal0] seal_deposit_event( + ctx, + topics_ptr: u32, + topics_len: u32, + data_ptr: u32, + data_len: u32 + ) => { + fn has_duplicates(items: &mut Vec) -> bool { + // # Warning + // + // Unstable sorts are non-deterministic across architectures. The usage here is OK + // because we are rejecting duplicates which removes the non determinism. + items.sort_unstable(); + // Find any two consecutive equal elements. + items.windows(2).any(|w| { + match &w { + &[a, b] => a == b, + _ => false, + } + }) + } + let num_topic = topics_len .checked_div(sp_std::mem::size_of::>() as u32) - .ok_or_else(|| store_err(ctx, "Zero sized topics are not allowed"))?; - charge_gas(ctx, RuntimeToken::DepositEvent { + .ok_or_else(|| "Zero sized topics are not allowed")?; + ctx.charge_gas(RuntimeCosts::DepositEvent { num_topic, len: data_len, })?; if data_len > ctx.ext.max_value_size() { - Err(store_err(ctx, Error::::ValueTooLarge))?; + Err(Error::::ValueTooLarge)?; } let mut topics: Vec::::T>> = match topics_len { 0 => Vec::new(), - _ => read_sandbox_memory_as(ctx, topics_ptr, topics_len)?, + _ => ctx.read_sandbox_memory_as_unbounded(topics_ptr, topics_len)?, }; - // If there are more than `max_event_topics`, then trap. - if topics.len() > ctx.schedule.max_event_topics as usize { - return Err(sp_sandbox::HostError); + // If there are more than `event_topics`, then trap. + if topics.len() > ctx.ext.schedule().limits.event_topics as usize { + Err(Error::::TooManyTopics)?; } // Check for duplicate topics. If there are any, then trap. + // Complexity O(n * log(n)) and no additional allocations. + // This also sorts the topics. if has_duplicates(&mut topics) { - return Err(sp_sandbox::HostError); + Err(Error::::DuplicateTopics)?; } - let event_data = read_sandbox_memory(ctx, data_ptr, data_len)?; + let event_data = ctx.read_sandbox_memory(data_ptr, data_len)?; ctx.ext.deposit_event(topics, event_data); Ok(()) }, - // Set rent allowance of the contract + // Was used to set rent allowance of the contract. // - // - value_ptr: a pointer to the buffer with value, how much to allow for rent - // Should be decodable as a `T::Balance`. Traps otherwise. - // - value_len: length of the value buffer. - seal_set_rent_allowance(ctx, value_ptr: u32, value_len: u32) => { - charge_gas(ctx, RuntimeToken::SetRentAllowance)?; - let value: BalanceOf<::T> = - read_sandbox_memory_as(ctx, value_ptr, value_len)?; - ctx.ext.set_rent_allowance(value); - + // # Note + // + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity. + [seal0] seal_set_rent_allowance(ctx, _value_ptr: u32, _value_len: u32) => { + ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) }, - // Stores the rent allowance into the supplied buffer. + // Was used to set rent allowance of the contract. // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. + // # Note // - // The data is encoded as T::Balance. - seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::RentAllowance)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.rent_allowance().encode(), false, already_charged - ) + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity. + [seal1] seal_set_rent_allowance(ctx, _value_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::DebugMessage)?; + Ok(()) }, - // Prints utf8 encoded string from the data buffer. - // Only available on `--dev` chains. - // This function may be removed at any time, superseded by a more general contract debugging feature. - seal_println(ctx, str_ptr: u32, str_len: u32) => { - let data = read_sandbox_memory(ctx, str_ptr, str_len)?; - if let Ok(utf8) = core::str::from_utf8(&data) { - sp_runtime::print(utf8); - } - Ok(()) + // Was used to store the rent allowance into the supplied buffer. + // + // # Note + // + // The state rent functionality was removed. This is stub only exists for + // backwards compatiblity. + [seal0] seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::Balance)?; + let rent_allowance = >::max_value().encode(); + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &rent_allowance, false, already_charged + )?) }, // Stores the current block number of the current contract into the supplied buffer. @@ -1243,11 +1498,11 @@ define_env!(Env, , // `out_len_ptr` must point to a u32 value that describes the available space at // `out_ptr`. This call overwrites it with the size of the value. If the available // space at `out_ptr` is less than the size of the value a trap is triggered. - seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { - charge_gas(ctx, RuntimeToken::BlockNumber)?; - write_sandbox_output( - ctx, out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged - ) + [seal0] seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::BlockNumber)?; + Ok(ctx.write_sandbox_output( + out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged + )?) }, // Computes the SHA2 256-bit hash on the given input buffer. @@ -1270,9 +1525,9 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashSha256(input_len))?; - compute_hash_on_intermediate_buffer(ctx, sha2_256, input_ptr, input_len, output_ptr) + [seal0] seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::HashSha256(input_len))?; + Ok(ctx.compute_hash_on_intermediate_buffer(sha2_256, input_ptr, input_len, output_ptr)?) }, // Computes the KECCAK 256-bit hash on the given input buffer. @@ -1295,9 +1550,9 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashKeccak256(input_len))?; - compute_hash_on_intermediate_buffer(ctx, keccak_256, input_ptr, input_len, output_ptr) + [seal0] seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::HashKeccak256(input_len))?; + Ok(ctx.compute_hash_on_intermediate_buffer(keccak_256, input_ptr, input_len, output_ptr)?) }, // Computes the BLAKE2 256-bit hash on the given input buffer. @@ -1320,9 +1575,9 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashBlake256(input_len))?; - compute_hash_on_intermediate_buffer(ctx, blake2_256, input_ptr, input_len, output_ptr) + [seal0] seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::HashBlake256(input_len))?; + Ok(ctx.compute_hash_on_intermediate_buffer(blake2_256, input_ptr, input_len, output_ptr)?) }, // Computes the BLAKE2 128-bit hash on the given input buffer. @@ -1345,63 +1600,164 @@ define_env!(Env, , // - `output_ptr`: the pointer into the linear memory where the output // data is placed. The function will write the result // directly into this buffer. - seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { - charge_gas(ctx, RuntimeToken::HashBlake128(input_len))?; - compute_hash_on_intermediate_buffer(ctx, blake2_128, input_ptr, input_len, output_ptr) + [seal0] seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + ctx.charge_gas(RuntimeCosts::HashBlake128(input_len))?; + Ok(ctx.compute_hash_on_intermediate_buffer(blake2_128, input_ptr, input_len, output_ptr)?) }, -); -/// Computes the given hash function on the supplied input. -/// -/// Reads from the sandboxed input buffer into an intermediate buffer. -/// Returns the result directly to the output buffer of the sandboxed memory. -/// -/// It is the callers responsibility to provide an output buffer that -/// is large enough to hold the expected amount of bytes returned by the -/// chosen hash function. -/// -/// # Note -/// -/// The `input` and `output` buffers may overlap. -fn compute_hash_on_intermediate_buffer( - ctx: &mut Runtime, - hash_fn: F, - input_ptr: u32, - input_len: u32, - output_ptr: u32, -) -> Result<(), sp_sandbox::HostError> -where - E: Ext, - F: FnOnce(&[u8]) -> R, - R: AsRef<[u8]>, -{ - // Copy input into supervisor memory. - let input = read_sandbox_memory(ctx, input_ptr, input_len)?; - // Compute the hash on the input buffer using the given hash function. - let hash = hash_fn(&input); - // Write the resulting hash back into the sandboxed output buffer. - write_sandbox_memory( + // Call into the chain extension provided by the chain if any. + // + // Handling of the input values is up to the specific chain extension and so is the + // return value. The extension can decide to use the inputs as primitive inputs or as + // in/out arguments by interpreting them as pointers. Any caller of this function + // must therefore coordinate with the chain that it targets. + // + // # Note + // + // If no chain extension exists the contract will trap with the `NoChainExtension` + // module error. + [seal0] seal_call_chain_extension( ctx, - output_ptr, - hash.as_ref(), - )?; - Ok(()) -} + func_id: u32, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + output_len_ptr: u32 + ) -> u32 => { + use crate::chain_extension::{ChainExtension, Environment, RetVal}; + if ::ChainExtension::enabled() == false { + Err(Error::::NoChainExtension)?; + } + let env = Environment::new(ctx, input_ptr, input_len, output_ptr, output_len_ptr); + match ::ChainExtension::call(func_id, env)? { + RetVal::Converging(val) => Ok(val), + RetVal::Diverging{flags, data} => Err(TrapReason::Return(ReturnData { + flags: flags.bits(), + data, + })), + } + }, -/// Finds duplicates in a given vector. -/// -/// This function has complexity of O(n log n) and no additional memory is required, although -/// the order of items is not preserved. -fn has_duplicates>(items: &mut Vec) -> bool { - // Sort the vector - items.sort_by(|a, b| { - Ord::cmp(a.as_ref(), b.as_ref()) - }); - // And then find any two consecutive equal elements. - items.windows(2).any(|w| { - match w { - &[ref a, ref b] => a == b, - _ => false, + // Emit a custom debug message. + // + // No newlines are added to the supplied message. + // Specifying invalid UTF-8 triggers a trap. + // + // This is a no-op if debug message recording is disabled which is always the case + // when the code is executing on-chain. The message is interpreted as UTF-8 and + // appended to the debug buffer which is then supplied to the calling RPC client. + // + // # Note + // + // Even though no action is taken when debug message recording is disabled there is still + // a non trivial overhead (and weight cost) associated with calling this function. Contract + // languages should remove calls to this function (either at runtime or compile time) when + // not being executed as an RPC. For example, they could allow users to disable logging + // through compile time flags (cargo features) for on-chain deployment. Additionally, the + // return value of this function can be cached in order to prevent further calls at runtime. + [seal0] seal_debug_message(ctx, str_ptr: u32, str_len: u32) -> ReturnCode => { + ctx.charge_gas(RuntimeCosts::DebugMessage)?; + if ctx.ext.append_debug_buffer("") { + let data = ctx.read_sandbox_memory(str_ptr, str_len)?; + let msg = core::str::from_utf8(&data) + .map_err(|_| >::DebugMessageInvalidUTF8)?; + ctx.ext.append_debug_buffer(msg); + return Ok(ReturnCode::Success); } - }) -} + Ok(ReturnCode::LoggingDisabled) + }, + + // Call some dispatchable of the runtime. + // + // This function decodes the passed in data as the overarching `Call` type of the + // runtime and dispatches it. The weight as specified in the runtime is charged + // from the gas meter. Any weight refunds made by the dispatchable are considered. + // + // The filter specified by `Config::CallFilter` is attached to the origin of + // the dispatched call. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input data is placed. + // - `input_len`: the length of the input data in bytes. + // + // # Return Value + // + // Returns `ReturnCode::Success` when the dispatchable was succesfully executed and + // returned `Ok`. When the dispatchable was exeuted but returned an error + // `ReturnCode::CallRuntimeReturnedError` is returned. The full error is not + // provided because it is not guaranteed to be stable. + // + // # Comparison with `ChainExtension` + // + // Just as a chain extension this API allows the runtime to extend the functionality + // of contracts. While making use of this function is generelly easier it cannot be + // used in call cases. Consider writing a chain extension if you need to do perform + // one of the following tasks: + // + // - Return data. + // - Provide functionality **exclusively** to contracts. + // - Provide custom weights. + // - Avoid the need to keep the `Call` data structure stable. + // + // # Unstable + // + // This function is unstable and subject to change (or removal) in the future. Do not + // deploy a contract using it to a production chain. + [__unstable__] seal_call_runtime(ctx, call_ptr: u32, call_len: u32) -> ReturnCode => { + use frame_support::{dispatch::GetDispatchInfo, weights::extract_actual_weight}; + ctx.charge_gas(RuntimeCosts::CopyIn(call_len))?; + let call: ::Call = ctx.read_sandbox_memory_as_unbounded( + call_ptr, call_len + )?; + let dispatch_info = call.get_dispatch_info(); + let charged = ctx.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; + let result = ctx.ext.call_runtime(call); + let actual_weight = extract_actual_weight(&result, &dispatch_info); + ctx.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); + match result { + Ok(_) => Ok(ReturnCode::Success), + Err(_) => Ok(ReturnCode::CallRuntimeReturnedError), + } + }, + + // Recovers the ECDSA public key from the given message hash and signature. + // + // Writes the public key into the given output buffer. + // Assumes the secp256k1 curve. + // + // # Parameters + // + // - `signature_ptr`: the pointer into the linear memory where the signature + // is placed. Should be decodable as a 65 bytes. Traps otherwise. + // - `message_hash_ptr`: the pointer into the linear memory where the message + // hash is placed. Should be decodable as a 32 bytes. Traps otherwise. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The buffer should be 33 bytes. Traps otherwise. + // The function will write the result directly into this buffer. + // + // # Errors + // + // `ReturnCode::EcdsaRecoverFailed` + [__unstable__] seal_ecdsa_recover(ctx, signature_ptr: u32, message_hash_ptr: u32, output_ptr: u32) -> ReturnCode => { + ctx.charge_gas(RuntimeCosts::EcdsaRecovery)?; + + let mut signature: [u8; 65] = [0; 65]; + ctx.read_sandbox_memory_into_buf(signature_ptr, &mut signature)?; + let mut message_hash: [u8; 32] = [0; 32]; + ctx.read_sandbox_memory_into_buf(message_hash_ptr, &mut message_hash)?; + + let result = ctx.ext.ecdsa_recover(&signature, &message_hash); + + match result { + Ok(pub_key) => { + // Write the recovered compressed ecdsa public key back into the sandboxed output + // buffer. + ctx.write_sandbox_memory(output_ptr, pub_key.as_ref())?; + + Ok(ReturnCode::Success) + }, + Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), + } + }, +); diff --git a/frame/contracts/src/weight_info.rs b/frame/contracts/src/weight_info.rs deleted file mode 100644 index 3a0881ed78d9a..0000000000000 --- a/frame/contracts/src/weight_info.rs +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! This module contains the `WeightInfo` trait and its unsafe implementation on `()`. - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -/// Should be implemented by automatically generated code of the benchmarking system for -/// every runtime that makes use of this pallet. -/// This trait is also implemented on `()`. The implemention on `()` is **unsafe** and must -/// only be used during development. Proper weights can be generated by running the -/// pallet_contracts benchmark suite for the runtime in question. -pub trait WeightInfo { - fn update_schedule() -> Weight; - fn put_code(n: u32, ) -> Weight; - fn instantiate(n: u32, ) -> Weight; - fn call() -> Weight; - fn claim_surcharge() -> Weight; - fn seal_caller(r: u32, ) -> Weight; - fn seal_address(r: u32, ) -> Weight; - fn seal_gas_left(r: u32, ) -> Weight; - fn seal_balance(r: u32, ) -> Weight; - fn seal_value_transferred(r: u32, ) -> Weight; - fn seal_minimum_balance(r: u32, ) -> Weight; - fn seal_tombstone_deposit(r: u32, ) -> Weight; - fn seal_rent_allowance(r: u32, ) -> Weight; - fn seal_block_number(r: u32, ) -> Weight; - fn seal_now(r: u32, ) -> Weight; - fn seal_weight_to_fee(r: u32, ) -> Weight; - fn seal_gas(r: u32, ) -> Weight; - fn seal_input(r: u32, ) -> Weight; - fn seal_input_per_kb(n: u32, ) -> Weight; - fn seal_return(r: u32, ) -> Weight; - fn seal_return_per_kb(n: u32, ) -> Weight; - fn seal_terminate(r: u32, ) -> Weight; - fn seal_restore_to(r: u32, ) -> Weight; - fn seal_restore_to_per_delta(d: u32, ) -> Weight; - fn seal_random(r: u32, ) -> Weight; - fn seal_deposit_event(r: u32, ) -> Weight; - fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight; - fn seal_set_rent_allowance(r: u32, ) -> Weight; - fn seal_set_storage(r: u32, ) -> Weight; - fn seal_set_storage_per_kb(n: u32, ) -> Weight; - fn seal_clear_storage(r: u32, ) -> Weight; - fn seal_get_storage(r: u32, ) -> Weight; - fn seal_get_storage_per_kb(n: u32, ) -> Weight; - fn seal_transfer(r: u32, ) -> Weight; - fn seal_call(r: u32, ) -> Weight; - fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight; - fn seal_instantiate(r: u32, ) -> Weight; - fn seal_instantiate_per_input_output_kb(i: u32, o: u32, ) -> Weight; - fn seal_hash_sha2_256(r: u32, ) -> Weight; - fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight; - fn seal_hash_keccak_256(r: u32, ) -> Weight; - fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight; - fn seal_hash_blake2_256(r: u32, ) -> Weight; - fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight; - fn seal_hash_blake2_128(r: u32, ) -> Weight; - fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight; -} - -/// Unsafe implementation that must only be used for development. -impl WeightInfo for () { - fn update_schedule() -> Weight { - (45000000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn put_code(n: u32, ) -> Weight { - (263409000 as Weight) - .saturating_add((169269000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn instantiate(n: u32, ) -> Weight { - (309311000 as Weight) - .saturating_add((1018000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(7 as Weight)) - .saturating_add(DbWeight::get().writes(4 as Weight)) - } - fn call() -> Weight { - (291000000 as Weight) - .saturating_add(DbWeight::get().reads(6 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn claim_surcharge() -> Weight { - (766000000 as Weight) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn seal_caller(r: u32, ) -> Weight { - (182241000 as Weight) - .saturating_add((697428000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_address(r: u32, ) -> Weight { - (193846000 as Weight) - .saturating_add((695989000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_gas_left(r: u32, ) -> Weight { - (166031000 as Weight) - .saturating_add((702533000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_balance(r: u32, ) -> Weight { - (251892000 as Weight) - .saturating_add((1392900000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - } - fn seal_value_transferred(r: u32, ) -> Weight { - (178472000 as Weight) - .saturating_add((694921000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_minimum_balance(r: u32, ) -> Weight { - (191301000 as Weight) - .saturating_add((697871000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_tombstone_deposit(r: u32, ) -> Weight { - (241315000 as Weight) - .saturating_add((686403000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_rent_allowance(r: u32, ) -> Weight { - (104958000 as Weight) - .saturating_add((1459573000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_block_number(r: u32, ) -> Weight { - (174140000 as Weight) - .saturating_add((698152000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_now(r: u32, ) -> Weight { - (203157000 as Weight) - .saturating_add((713595000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_weight_to_fee(r: u32, ) -> Weight { - (178413000 as Weight) - .saturating_add((1071275000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - } - fn seal_gas(r: u32, ) -> Weight { - (171395000 as Weight) - .saturating_add((371653000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_input(r: u32, ) -> Weight { - (184462000 as Weight) - .saturating_add((10538000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_input_per_kb(n: u32, ) -> Weight { - (194668000 as Weight) - .saturating_add((301000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_return(r: u32, ) -> Weight { - (175538000 as Weight) - .saturating_add((7462000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_return_per_kb(n: u32, ) -> Weight { - (189759000 as Weight) - .saturating_add((754000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_terminate(r: u32, ) -> Weight { - (184385000 as Weight) - .saturating_add((542615000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) - } - fn seal_restore_to(r: u32, ) -> Weight { - (380385000 as Weight) - .saturating_add((160308000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) - } - fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (0 as Weight) - .saturating_add((4786197000 as Weight).saturating_mul(d as Weight)) - .saturating_add(DbWeight::get().reads(7 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) - .saturating_add(DbWeight::get().writes(5 as Weight)) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) - } - fn seal_random(r: u32, ) -> Weight { - (187944000 as Weight) - .saturating_add((1592530000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - } - fn seal_deposit_event(r: u32, ) -> Weight { - (126517000 as Weight) - .saturating_add((2346945000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (2953428000 as Weight) - .saturating_add((1117651000 as Weight).saturating_mul(t as Weight)) - .saturating_add((299890000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) - } - fn seal_set_rent_allowance(r: u32, ) -> Weight { - (142094000 as Weight) - .saturating_add((1726665000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn seal_set_storage(r: u32, ) -> Weight { - (4091409000 as Weight) - .saturating_add((26440116000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (3683270000 as Weight) - .saturating_add((233826000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn seal_clear_storage(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((7152747000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_get_storage(r: u32, ) -> Weight { - (19007000 as Weight) - .saturating_add((1774675000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (1477332000 as Weight) - .saturating_add((176601000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - } - fn seal_transfer(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((10274385000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_call(r: u32, ) -> Weight { - (241916000 as Weight) - .saturating_add((14633108000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) - } - fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (15664107000 as Weight) - .saturating_add((8529984000 as Weight).saturating_mul(t as Weight)) - .saturating_add((52860000 as Weight).saturating_mul(i as Weight)) - .saturating_add((81175000 as Weight).saturating_mul(o as Weight)) - .saturating_add(DbWeight::get().reads(105 as Weight)) - .saturating_add(DbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) - .saturating_add(DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) - } - fn seal_instantiate(r: u32, ) -> Weight { - (0 as Weight) - .saturating_add((32247550000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(1 as Weight)) - .saturating_add(DbWeight::get().writes((200 as Weight).saturating_mul(r as Weight))) - } - fn seal_instantiate_per_input_output_kb(i: u32, o: u32, ) -> Weight { - (34376003000 as Weight) - .saturating_add((151350000 as Weight).saturating_mul(i as Weight)) - .saturating_add((82364000 as Weight).saturating_mul(o as Weight)) - .saturating_add(DbWeight::get().reads(207 as Weight)) - .saturating_add(DbWeight::get().writes(202 as Weight)) - } - fn seal_hash_sha2_256(r: u32, ) -> Weight { - (164203000 as Weight) - .saturating_add((565206000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (0 as Weight) - .saturating_add((330063000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_keccak_256(r: u32, ) -> Weight { - (219038000 as Weight) - .saturating_add((567992000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (434654000 as Weight) - .saturating_add((271134000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_256(r: u32, ) -> Weight { - (116374000 as Weight) - .saturating_add((566612000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (756028000 as Weight) - .saturating_add((150363000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_128(r: u32, ) -> Weight { - (150126000 as Weight) - .saturating_add((564827000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } - fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (1021689000 as Weight) - .saturating_add((149452000 as Weight).saturating_mul(n as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - } -} diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs new file mode 100644 index 0000000000000..1cebcb3b5d9a0 --- /dev/null +++ b/frame/contracts/src/weights.rs @@ -0,0 +1,1672 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_contracts +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-09-09, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_contracts +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/contracts/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_contracts. +pub trait WeightInfo { + fn on_initialize() -> Weight; + fn on_initialize_per_trie_key(k: u32, ) -> Weight; + fn on_initialize_per_queue_item(q: u32, ) -> Weight; + fn instrument(c: u32, ) -> Weight; + fn code_load(c: u32, ) -> Weight; + fn code_refcount(c: u32, ) -> Weight; + fn instantiate_with_code(c: u32, s: u32, ) -> Weight; + fn instantiate(s: u32, ) -> Weight; + fn call() -> Weight; + fn seal_caller(r: u32, ) -> Weight; + fn seal_address(r: u32, ) -> Weight; + fn seal_gas_left(r: u32, ) -> Weight; + fn seal_balance(r: u32, ) -> Weight; + fn seal_value_transferred(r: u32, ) -> Weight; + fn seal_minimum_balance(r: u32, ) -> Weight; + fn seal_tombstone_deposit(r: u32, ) -> Weight; + fn seal_block_number(r: u32, ) -> Weight; + fn seal_now(r: u32, ) -> Weight; + fn seal_weight_to_fee(r: u32, ) -> Weight; + fn seal_gas(r: u32, ) -> Weight; + fn seal_input(r: u32, ) -> Weight; + fn seal_input_per_kb(n: u32, ) -> Weight; + fn seal_return(r: u32, ) -> Weight; + fn seal_return_per_kb(n: u32, ) -> Weight; + fn seal_terminate(r: u32, ) -> Weight; + fn seal_random(r: u32, ) -> Weight; + fn seal_deposit_event(r: u32, ) -> Weight; + fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight; + fn seal_debug_message(r: u32, ) -> Weight; + fn seal_set_storage(r: u32, ) -> Weight; + fn seal_set_storage_per_kb(n: u32, ) -> Weight; + fn seal_clear_storage(r: u32, ) -> Weight; + fn seal_get_storage(r: u32, ) -> Weight; + fn seal_get_storage_per_kb(n: u32, ) -> Weight; + fn seal_transfer(r: u32, ) -> Weight; + fn seal_call(r: u32, ) -> Weight; + fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight; + fn seal_instantiate(r: u32, ) -> Weight; + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight; + fn seal_hash_sha2_256(r: u32, ) -> Weight; + fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight; + fn seal_hash_keccak_256(r: u32, ) -> Weight; + fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight; + fn seal_hash_blake2_256(r: u32, ) -> Weight; + fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight; + fn seal_hash_blake2_128(r: u32, ) -> Weight; + fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight; + fn seal_ecdsa_recover(r: u32, ) -> Weight; + fn instr_i64const(r: u32, ) -> Weight; + fn instr_i64load(r: u32, ) -> Weight; + fn instr_i64store(r: u32, ) -> Weight; + fn instr_select(r: u32, ) -> Weight; + fn instr_if(r: u32, ) -> Weight; + fn instr_br(r: u32, ) -> Weight; + fn instr_br_if(r: u32, ) -> Weight; + fn instr_br_table(r: u32, ) -> Weight; + fn instr_br_table_per_entry(e: u32, ) -> Weight; + fn instr_call(r: u32, ) -> Weight; + fn instr_call_indirect(r: u32, ) -> Weight; + fn instr_call_indirect_per_param(p: u32, ) -> Weight; + fn instr_local_get(r: u32, ) -> Weight; + fn instr_local_set(r: u32, ) -> Weight; + fn instr_local_tee(r: u32, ) -> Weight; + fn instr_global_get(r: u32, ) -> Weight; + fn instr_global_set(r: u32, ) -> Weight; + fn instr_memory_current(r: u32, ) -> Weight; + fn instr_memory_grow(r: u32, ) -> Weight; + fn instr_i64clz(r: u32, ) -> Weight; + fn instr_i64ctz(r: u32, ) -> Weight; + fn instr_i64popcnt(r: u32, ) -> Weight; + fn instr_i64eqz(r: u32, ) -> Weight; + fn instr_i64extendsi32(r: u32, ) -> Weight; + fn instr_i64extendui32(r: u32, ) -> Weight; + fn instr_i32wrapi64(r: u32, ) -> Weight; + fn instr_i64eq(r: u32, ) -> Weight; + fn instr_i64ne(r: u32, ) -> Weight; + fn instr_i64lts(r: u32, ) -> Weight; + fn instr_i64ltu(r: u32, ) -> Weight; + fn instr_i64gts(r: u32, ) -> Weight; + fn instr_i64gtu(r: u32, ) -> Weight; + fn instr_i64les(r: u32, ) -> Weight; + fn instr_i64leu(r: u32, ) -> Weight; + fn instr_i64ges(r: u32, ) -> Weight; + fn instr_i64geu(r: u32, ) -> Weight; + fn instr_i64add(r: u32, ) -> Weight; + fn instr_i64sub(r: u32, ) -> Weight; + fn instr_i64mul(r: u32, ) -> Weight; + fn instr_i64divs(r: u32, ) -> Weight; + fn instr_i64divu(r: u32, ) -> Weight; + fn instr_i64rems(r: u32, ) -> Weight; + fn instr_i64remu(r: u32, ) -> Weight; + fn instr_i64and(r: u32, ) -> Weight; + fn instr_i64or(r: u32, ) -> Weight; + fn instr_i64xor(r: u32, ) -> Weight; + fn instr_i64shl(r: u32, ) -> Weight; + fn instr_i64shrs(r: u32, ) -> Weight; + fn instr_i64shru(r: u32, ) -> Weight; + fn instr_i64rotl(r: u32, ) -> Weight; + fn instr_i64rotr(r: u32, ) -> Weight; +} + +/// Weights for pallet_contracts using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Contracts DeletionQueue (r:1 w:0) + fn on_initialize() -> Weight { + (3_226_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + // Storage: Skipped Metadata (r:0 w:0) + fn on_initialize_per_trie_key(k: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_000 + .saturating_add((2_178_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + } + // Storage: Contracts DeletionQueue (r:1 w:0) + fn on_initialize_per_queue_item(q: u32, ) -> Weight { + (78_329_000 as Weight) + // Standard Error: 1_000 + .saturating_add((353_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts PristineCode (r:1 w:0) + // Storage: Contracts CodeStorage (r:0 w:1) + fn instrument(c: u32, ) -> Weight { + (37_190_000 as Weight) + // Standard Error: 80_000 + .saturating_add((72_791_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts CodeStorage (r:1 w:0) + fn code_load(c: u32, ) -> Weight { + (6_191_000 as Weight) + // Standard Error: 0 + .saturating_add((1_426_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + // Storage: Contracts CodeStorage (r:1 w:1) + fn code_refcount(c: u32, ) -> Weight { + (10_333_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_275_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts AccountCounter (r:1 w:0) + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:1) + // Storage: Contracts PristineCode (r:0 w:1) + fn instantiate_with_code(c: u32, s: u32, ) -> Weight { + (438_556_000 as Weight) + // Standard Error: 147_000 + .saturating_add((179_307_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 9_000 + .saturating_add((2_159_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Contracts CodeStorage (r:1 w:1) + // Storage: Contracts AccountCounter (r:1 w:0) + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:1 w:1) + fn instantiate(s: u32, ) -> Weight { + (186_776_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_033_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:1 w:1) + fn call() -> Weight { + (159_247_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_caller(r: u32, ) -> Weight { + (422_263_000 as Weight) + // Standard Error: 159_000 + .saturating_add((125_490_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_address(r: u32, ) -> Weight { + (423_009_000 as Weight) + // Standard Error: 183_000 + .saturating_add((125_795_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_gas_left(r: u32, ) -> Weight { + (429_297_000 as Weight) + // Standard Error: 164_000 + .saturating_add((124_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:1 w:0) + fn seal_balance(r: u32, ) -> Weight { + (442_330_000 as Weight) + // Standard Error: 187_000 + .saturating_add((354_665_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_value_transferred(r: u32, ) -> Weight { + (411_893_000 as Weight) + // Standard Error: 178_000 + .saturating_add((125_971_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_minimum_balance(r: u32, ) -> Weight { + (413_273_000 as Weight) + // Standard Error: 180_000 + .saturating_add((125_103_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_tombstone_deposit(r: u32, ) -> Weight { + (415_613_000 as Weight) + // Standard Error: 192_000 + .saturating_add((126_106_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_block_number(r: u32, ) -> Weight { + (414_718_000 as Weight) + // Standard Error: 170_000 + .saturating_add((124_962_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_now(r: u32, ) -> Weight { + (419_120_000 as Weight) + // Standard Error: 178_000 + .saturating_add((125_188_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) + fn seal_weight_to_fee(r: u32, ) -> Weight { + (419_125_000 as Weight) + // Standard Error: 216_000 + .saturating_add((290_592_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_gas(r: u32, ) -> Weight { + (149_609_000 as Weight) + // Standard Error: 117_000 + .saturating_add((56_860_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_input(r: u32, ) -> Weight { + (423_570_000 as Weight) + // Standard Error: 151_000 + .saturating_add((106_985_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_input_per_kb(n: u32, ) -> Weight { + (566_496_000 as Weight) + // Standard Error: 6_000 + .saturating_add((38_091_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_return(r: u32, ) -> Weight { + (406_811_000 as Weight) + // Standard Error: 1_833_000 + .saturating_add((6_551_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_return_per_kb(n: u32, ) -> Weight { + (412_094_000 as Weight) + // Standard Error: 1_000 + .saturating_add((631_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: Contracts DeletionQueue (r:1 w:1) + // Storage: System Account (r:2 w:2) + fn seal_terminate(r: u32, ) -> Weight { + (415_716_000 as Weight) + // Standard Error: 1_608_000 + .saturating_add((72_648_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) + fn seal_random(r: u32, ) -> Weight { + (421_387_000 as Weight) + // Standard Error: 275_000 + .saturating_add((393_452_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_deposit_event(r: u32, ) -> Weight { + (428_591_000 as Weight) + // Standard Error: 293_000 + .saturating_add((690_833_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:100 w:100) + fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { + (1_245_676_000 as Weight) + // Standard Error: 2_636_000 + .saturating_add((484_691_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 519_000 + .saturating_add((165_836_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_debug_message(r: u32, ) -> Weight { + (162_162_000 as Weight) + // Standard Error: 127_000 + .saturating_add((72_828_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Skipped Metadata (r:0 w:0) + fn seal_set_storage(r: u32, ) -> Weight { + (399_301_000 as Weight) + // Standard Error: 221_000 + .saturating_add((245_222_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:0 w:1) + fn seal_set_storage_per_kb(n: u32, ) -> Weight { + (623_011_000 as Weight) + // Standard Error: 246_000 + .saturating_add((72_051_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Skipped Metadata (r:0 w:0) + fn seal_clear_storage(r: u32, ) -> Weight { + (445_102_000 as Weight) + // Standard Error: 247_000 + .saturating_add((224_384_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + // Storage: Skipped Metadata (r:0 w:0) + fn seal_get_storage(r: u32, ) -> Weight { + (290_227_000 as Weight) + // Standard Error: 694_000 + .saturating_add((547_193_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:1 w:0) + fn seal_get_storage_per_kb(n: u32, ) -> Weight { + (737_772_000 as Weight) + // Standard Error: 267_000 + .saturating_add((112_216_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:101 w:101) + fn seal_transfer(r: u32, ) -> Weight { + (383_402_000 as Weight) + // Standard Error: 2_184_000 + .saturating_add((4_335_681_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_call(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 11_019_000 + .saturating_add((39_806_777_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:101 w:101) + // Storage: Contracts CodeStorage (r:2 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:101 w:101) + fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { + (38_662_592_000 as Weight) + // Standard Error: 52_762_000 + .saturating_add((3_888_801_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 18_000 + .saturating_add((63_571_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 20_000 + .saturating_add((101_610_000 as Weight).saturating_mul(o as Weight)) + .saturating_add(T::DbWeight::get().reads(104 as Weight)) + .saturating_add(T::DbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) + .saturating_add(T::DbWeight::get().writes(101 as Weight)) + .saturating_add(T::DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: Contracts AccountCounter (r:1 w:1) + // Storage: System Account (r:101 w:101) + fn seal_instantiate(r: u32, ) -> Weight { + (626_132_000 as Weight) + // Standard Error: 39_245_000 + .saturating_add((46_398_859_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:101 w:101) + // Storage: Contracts CodeStorage (r:2 w:1) + // Storage: Timestamp Now (r:1 w:0) + // Storage: Contracts AccountCounter (r:1 w:1) + // Storage: System Account (r:101 w:101) + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { + (46_649_369_000 as Weight) + // Standard Error: 26_000 + .saturating_add((63_469_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 26_000 + .saturating_add((100_694_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 26_000 + .saturating_add((201_705_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(206 as Weight)) + .saturating_add(T::DbWeight::get().writes(204 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_sha2_256(r: u32, ) -> Weight { + (417_820_000 as Weight) + // Standard Error: 160_000 + .saturating_add((133_795_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { + (609_012_000 as Weight) + // Standard Error: 23_000 + .saturating_add((499_227_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_keccak_256(r: u32, ) -> Weight { + (419_043_000 as Weight) + // Standard Error: 177_000 + .saturating_add((140_704_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { + (564_451_000 as Weight) + // Standard Error: 19_000 + .saturating_add((346_948_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_blake2_256(r: u32, ) -> Weight { + (420_951_000 as Weight) + // Standard Error: 163_000 + .saturating_add((113_596_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { + (563_168_000 as Weight) + // Standard Error: 17_000 + .saturating_add((164_114_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_blake2_128(r: u32, ) -> Weight { + (418_794_000 as Weight) + // Standard Error: 167_000 + .saturating_add((113_205_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { + (584_668_000 as Weight) + // Standard Error: 15_000 + .saturating_add((164_127_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_ecdsa_recover(r: u32, ) -> Weight { + (435_443_000 as Weight) + // Standard Error: 1_408_000 + .saturating_add((15_624_877_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn instr_i64const(r: u32, ) -> Weight { + (45_937_000 as Weight) + // Standard Error: 10_000 + .saturating_add((1_108_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64load(r: u32, ) -> Weight { + (44_001_000 as Weight) + // Standard Error: 11_000 + .saturating_add((2_412_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64store(r: u32, ) -> Weight { + (43_157_000 as Weight) + // Standard Error: 12_000 + .saturating_add((2_677_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_select(r: u32, ) -> Weight { + (48_475_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_604_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_if(r: u32, ) -> Weight { + (50_649_000 as Weight) + // Standard Error: 12_000 + .saturating_add((2_553_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br(r: u32, ) -> Weight { + (48_433_000 as Weight) + // Standard Error: 8_000 + .saturating_add((1_670_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_if(r: u32, ) -> Weight { + (49_244_000 as Weight) + // Standard Error: 16_000 + .saturating_add((1_946_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_table(r: u32, ) -> Weight { + (46_117_000 as Weight) + // Standard Error: 17_000 + .saturating_add((2_387_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_table_per_entry(_e: u32, ) -> Weight { + (55_204_000 as Weight) + } + fn instr_call(r: u32, ) -> Weight { + (43_651_000 as Weight) + // Standard Error: 26_000 + .saturating_add((19_163_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_call_indirect(r: u32, ) -> Weight { + (54_063_000 as Weight) + // Standard Error: 32_000 + .saturating_add((27_970_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_call_indirect_per_param(p: u32, ) -> Weight { + (88_527_000 as Weight) + // Standard Error: 6_000 + .saturating_add((958_000 as Weight).saturating_mul(p as Weight)) + } + fn instr_local_get(r: u32, ) -> Weight { + (55_066_000 as Weight) + // Standard Error: 12_000 + .saturating_add((682_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_local_set(r: u32, ) -> Weight { + (55_298_000 as Weight) + // Standard Error: 13_000 + .saturating_add((778_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_local_tee(r: u32, ) -> Weight { + (56_302_000 as Weight) + // Standard Error: 11_000 + .saturating_add((1_079_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_global_get(r: u32, ) -> Weight { + (71_567_000 as Weight) + // Standard Error: 11_000 + .saturating_add((1_107_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_global_set(r: u32, ) -> Weight { + (71_186_000 as Weight) + // Standard Error: 12_000 + .saturating_add((1_151_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_memory_current(r: u32, ) -> Weight { + (46_240_000 as Weight) + // Standard Error: 10_000 + .saturating_add((1_044_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_memory_grow(r: u32, ) -> Weight { + (52_369_000 as Weight) + // Standard Error: 2_508_000 + .saturating_add((615_448_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64clz(r: u32, ) -> Weight { + (47_623_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ctz(r: u32, ) -> Weight { + (47_670_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64popcnt(r: u32, ) -> Weight { + (47_508_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64eqz(r: u32, ) -> Weight { + (48_109_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_580_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64extendsi32(r: u32, ) -> Weight { + (55_270_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_102_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64extendui32(r: u32, ) -> Weight { + (55_093_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_108_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i32wrapi64(r: u32, ) -> Weight { + (48_265_000 as Weight) + // Standard Error: 10_000 + .saturating_add((1_573_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64eq(r: u32, ) -> Weight { + (48_733_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_088_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ne(r: u32, ) -> Weight { + (48_831_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_085_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64lts(r: u32, ) -> Weight { + (49_147_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_056_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ltu(r: u32, ) -> Weight { + (49_596_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_049_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64gts(r: u32, ) -> Weight { + (49_872_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_038_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64gtu(r: u32, ) -> Weight { + (48_843_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_081_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64les(r: u32, ) -> Weight { + (48_765_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_089_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64leu(r: u32, ) -> Weight { + (48_720_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_083_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ges(r: u32, ) -> Weight { + (48_736_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_097_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64geu(r: u32, ) -> Weight { + (48_772_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_093_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64add(r: u32, ) -> Weight { + (48_827_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_082_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64sub(r: u32, ) -> Weight { + (48_961_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_072_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64mul(r: u32, ) -> Weight { + (49_069_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_067_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64divs(r: u32, ) -> Weight { + (49_035_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_677_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64divu(r: u32, ) -> Weight { + (48_842_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_449_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rems(r: u32, ) -> Weight { + (48_536_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_723_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64remu(r: u32, ) -> Weight { + (48_851_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_432_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64and(r: u32, ) -> Weight { + (48_624_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_093_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64or(r: u32, ) -> Weight { + (49_348_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64xor(r: u32, ) -> Weight { + (49_112_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_055_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shl(r: u32, ) -> Weight { + (49_654_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_051_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shrs(r: u32, ) -> Weight { + (48_848_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_089_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shru(r: u32, ) -> Weight { + (49_455_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_054_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rotl(r: u32, ) -> Weight { + (49_640_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_048_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rotr(r: u32, ) -> Weight { + (49_498_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_068_000 as Weight).saturating_mul(r as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Contracts DeletionQueue (r:1 w:0) + fn on_initialize() -> Weight { + (3_226_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + // Storage: Skipped Metadata (r:0 w:0) + fn on_initialize_per_trie_key(k: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 2_000 + .saturating_add((2_178_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + } + // Storage: Contracts DeletionQueue (r:1 w:0) + fn on_initialize_per_queue_item(q: u32, ) -> Weight { + (78_329_000 as Weight) + // Standard Error: 1_000 + .saturating_add((353_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts PristineCode (r:1 w:0) + // Storage: Contracts CodeStorage (r:0 w:1) + fn instrument(c: u32, ) -> Weight { + (37_190_000 as Weight) + // Standard Error: 80_000 + .saturating_add((72_791_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts CodeStorage (r:1 w:0) + fn code_load(c: u32, ) -> Weight { + (6_191_000 as Weight) + // Standard Error: 0 + .saturating_add((1_426_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + // Storage: Contracts CodeStorage (r:1 w:1) + fn code_refcount(c: u32, ) -> Weight { + (10_333_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_275_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts AccountCounter (r:1 w:0) + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:1) + // Storage: Contracts PristineCode (r:0 w:1) + fn instantiate_with_code(c: u32, s: u32, ) -> Weight { + (438_556_000 as Weight) + // Standard Error: 147_000 + .saturating_add((179_307_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 9_000 + .saturating_add((2_159_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Contracts CodeStorage (r:1 w:1) + // Storage: Contracts AccountCounter (r:1 w:0) + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:1 w:1) + fn instantiate(s: u32, ) -> Weight { + (186_776_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_033_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:1 w:1) + fn call() -> Weight { + (159_247_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_caller(r: u32, ) -> Weight { + (422_263_000 as Weight) + // Standard Error: 159_000 + .saturating_add((125_490_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_address(r: u32, ) -> Weight { + (423_009_000 as Weight) + // Standard Error: 183_000 + .saturating_add((125_795_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_gas_left(r: u32, ) -> Weight { + (429_297_000 as Weight) + // Standard Error: 164_000 + .saturating_add((124_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:1 w:0) + fn seal_balance(r: u32, ) -> Weight { + (442_330_000 as Weight) + // Standard Error: 187_000 + .saturating_add((354_665_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_value_transferred(r: u32, ) -> Weight { + (411_893_000 as Weight) + // Standard Error: 178_000 + .saturating_add((125_971_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_minimum_balance(r: u32, ) -> Weight { + (413_273_000 as Weight) + // Standard Error: 180_000 + .saturating_add((125_103_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_tombstone_deposit(r: u32, ) -> Weight { + (415_613_000 as Weight) + // Standard Error: 192_000 + .saturating_add((126_106_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_block_number(r: u32, ) -> Weight { + (414_718_000 as Weight) + // Standard Error: 170_000 + .saturating_add((124_962_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_now(r: u32, ) -> Weight { + (419_120_000 as Weight) + // Standard Error: 178_000 + .saturating_add((125_188_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) + fn seal_weight_to_fee(r: u32, ) -> Weight { + (419_125_000 as Weight) + // Standard Error: 216_000 + .saturating_add((290_592_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_gas(r: u32, ) -> Weight { + (149_609_000 as Weight) + // Standard Error: 117_000 + .saturating_add((56_860_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_input(r: u32, ) -> Weight { + (423_570_000 as Weight) + // Standard Error: 151_000 + .saturating_add((106_985_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_input_per_kb(n: u32, ) -> Weight { + (566_496_000 as Weight) + // Standard Error: 6_000 + .saturating_add((38_091_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_return(r: u32, ) -> Weight { + (406_811_000 as Weight) + // Standard Error: 1_833_000 + .saturating_add((6_551_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_return_per_kb(n: u32, ) -> Weight { + (412_094_000 as Weight) + // Standard Error: 1_000 + .saturating_add((631_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: Contracts DeletionQueue (r:1 w:1) + // Storage: System Account (r:2 w:2) + fn seal_terminate(r: u32, ) -> Weight { + (415_716_000 as Weight) + // Standard Error: 1_608_000 + .saturating_add((72_648_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((4 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) + fn seal_random(r: u32, ) -> Weight { + (421_387_000 as Weight) + // Standard Error: 275_000 + .saturating_add((393_452_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_deposit_event(r: u32, ) -> Weight { + (428_591_000 as Weight) + // Standard Error: 293_000 + .saturating_add((690_833_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:100 w:100) + fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { + (1_245_676_000 as Weight) + // Standard Error: 2_636_000 + .saturating_add((484_691_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 519_000 + .saturating_add((165_836_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_debug_message(r: u32, ) -> Weight { + (162_162_000 as Weight) + // Standard Error: 127_000 + .saturating_add((72_828_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Skipped Metadata (r:0 w:0) + fn seal_set_storage(r: u32, ) -> Weight { + (399_301_000 as Weight) + // Standard Error: 221_000 + .saturating_add((245_222_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:0 w:1) + fn seal_set_storage_per_kb(n: u32, ) -> Weight { + (623_011_000 as Weight) + // Standard Error: 246_000 + .saturating_add((72_051_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Skipped Metadata (r:0 w:0) + fn seal_clear_storage(r: u32, ) -> Weight { + (445_102_000 as Weight) + // Standard Error: 247_000 + .saturating_add((224_384_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + // Storage: Skipped Metadata (r:0 w:0) + fn seal_get_storage(r: u32, ) -> Weight { + (290_227_000 as Weight) + // Standard Error: 694_000 + .saturating_add((547_193_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:1 w:0) + fn seal_get_storage_per_kb(n: u32, ) -> Weight { + (737_772_000 as Weight) + // Standard Error: 267_000 + .saturating_add((112_216_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:101 w:101) + fn seal_transfer(r: u32, ) -> Weight { + (383_402_000 as Weight) + // Standard Error: 2_184_000 + .saturating_add((4_335_681_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_call(r: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 11_019_000 + .saturating_add((39_806_777_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:101 w:101) + // Storage: Contracts CodeStorage (r:2 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: System Account (r:101 w:101) + fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { + (38_662_592_000 as Weight) + // Standard Error: 52_762_000 + .saturating_add((3_888_801_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 18_000 + .saturating_add((63_571_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 20_000 + .saturating_add((101_610_000 as Weight).saturating_mul(o as Weight)) + .saturating_add(RocksDbWeight::get().reads(104 as Weight)) + .saturating_add(RocksDbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) + .saturating_add(RocksDbWeight::get().writes(101 as Weight)) + .saturating_add(RocksDbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + // Storage: Contracts AccountCounter (r:1 w:1) + // Storage: System Account (r:101 w:101) + fn seal_instantiate(r: u32, ) -> Weight { + (626_132_000 as Weight) + // Standard Error: 39_245_000 + .saturating_add((46_398_859_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) + } + // Storage: Contracts ContractInfoOf (r:101 w:101) + // Storage: Contracts CodeStorage (r:2 w:1) + // Storage: Timestamp Now (r:1 w:0) + // Storage: Contracts AccountCounter (r:1 w:1) + // Storage: System Account (r:101 w:101) + fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { + (46_649_369_000 as Weight) + // Standard Error: 26_000 + .saturating_add((63_469_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 26_000 + .saturating_add((100_694_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 26_000 + .saturating_add((201_705_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(206 as Weight)) + .saturating_add(RocksDbWeight::get().writes(204 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_sha2_256(r: u32, ) -> Weight { + (417_820_000 as Weight) + // Standard Error: 160_000 + .saturating_add((133_795_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { + (609_012_000 as Weight) + // Standard Error: 23_000 + .saturating_add((499_227_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_keccak_256(r: u32, ) -> Weight { + (419_043_000 as Weight) + // Standard Error: 177_000 + .saturating_add((140_704_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { + (564_451_000 as Weight) + // Standard Error: 19_000 + .saturating_add((346_948_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_blake2_256(r: u32, ) -> Weight { + (420_951_000 as Weight) + // Standard Error: 163_000 + .saturating_add((113_596_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { + (563_168_000 as Weight) + // Standard Error: 17_000 + .saturating_add((164_114_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_blake2_128(r: u32, ) -> Weight { + (418_794_000 as Weight) + // Standard Error: 167_000 + .saturating_add((113_205_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { + (584_668_000 as Weight) + // Standard Error: 15_000 + .saturating_add((164_127_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Contracts ContractInfoOf (r:1 w:1) + // Storage: Contracts CodeStorage (r:1 w:0) + // Storage: Timestamp Now (r:1 w:0) + fn seal_ecdsa_recover(r: u32, ) -> Weight { + (435_443_000 as Weight) + // Standard Error: 1_408_000 + .saturating_add((15_624_877_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn instr_i64const(r: u32, ) -> Weight { + (45_937_000 as Weight) + // Standard Error: 10_000 + .saturating_add((1_108_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64load(r: u32, ) -> Weight { + (44_001_000 as Weight) + // Standard Error: 11_000 + .saturating_add((2_412_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64store(r: u32, ) -> Weight { + (43_157_000 as Weight) + // Standard Error: 12_000 + .saturating_add((2_677_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_select(r: u32, ) -> Weight { + (48_475_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_604_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_if(r: u32, ) -> Weight { + (50_649_000 as Weight) + // Standard Error: 12_000 + .saturating_add((2_553_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br(r: u32, ) -> Weight { + (48_433_000 as Weight) + // Standard Error: 8_000 + .saturating_add((1_670_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_if(r: u32, ) -> Weight { + (49_244_000 as Weight) + // Standard Error: 16_000 + .saturating_add((1_946_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_table(r: u32, ) -> Weight { + (46_117_000 as Weight) + // Standard Error: 17_000 + .saturating_add((2_387_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_br_table_per_entry(_e: u32, ) -> Weight { + (55_204_000 as Weight) + } + fn instr_call(r: u32, ) -> Weight { + (43_651_000 as Weight) + // Standard Error: 26_000 + .saturating_add((19_163_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_call_indirect(r: u32, ) -> Weight { + (54_063_000 as Weight) + // Standard Error: 32_000 + .saturating_add((27_970_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_call_indirect_per_param(p: u32, ) -> Weight { + (88_527_000 as Weight) + // Standard Error: 6_000 + .saturating_add((958_000 as Weight).saturating_mul(p as Weight)) + } + fn instr_local_get(r: u32, ) -> Weight { + (55_066_000 as Weight) + // Standard Error: 12_000 + .saturating_add((682_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_local_set(r: u32, ) -> Weight { + (55_298_000 as Weight) + // Standard Error: 13_000 + .saturating_add((778_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_local_tee(r: u32, ) -> Weight { + (56_302_000 as Weight) + // Standard Error: 11_000 + .saturating_add((1_079_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_global_get(r: u32, ) -> Weight { + (71_567_000 as Weight) + // Standard Error: 11_000 + .saturating_add((1_107_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_global_set(r: u32, ) -> Weight { + (71_186_000 as Weight) + // Standard Error: 12_000 + .saturating_add((1_151_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_memory_current(r: u32, ) -> Weight { + (46_240_000 as Weight) + // Standard Error: 10_000 + .saturating_add((1_044_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_memory_grow(r: u32, ) -> Weight { + (52_369_000 as Weight) + // Standard Error: 2_508_000 + .saturating_add((615_448_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64clz(r: u32, ) -> Weight { + (47_623_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ctz(r: u32, ) -> Weight { + (47_670_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64popcnt(r: u32, ) -> Weight { + (47_508_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64eqz(r: u32, ) -> Weight { + (48_109_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_580_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64extendsi32(r: u32, ) -> Weight { + (55_270_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_102_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64extendui32(r: u32, ) -> Weight { + (55_093_000 as Weight) + // Standard Error: 9_000 + .saturating_add((1_108_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i32wrapi64(r: u32, ) -> Weight { + (48_265_000 as Weight) + // Standard Error: 10_000 + .saturating_add((1_573_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64eq(r: u32, ) -> Weight { + (48_733_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_088_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ne(r: u32, ) -> Weight { + (48_831_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_085_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64lts(r: u32, ) -> Weight { + (49_147_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_056_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ltu(r: u32, ) -> Weight { + (49_596_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_049_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64gts(r: u32, ) -> Weight { + (49_872_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_038_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64gtu(r: u32, ) -> Weight { + (48_843_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_081_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64les(r: u32, ) -> Weight { + (48_765_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_089_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64leu(r: u32, ) -> Weight { + (48_720_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_083_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64ges(r: u32, ) -> Weight { + (48_736_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_097_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64geu(r: u32, ) -> Weight { + (48_772_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_093_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64add(r: u32, ) -> Weight { + (48_827_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_082_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64sub(r: u32, ) -> Weight { + (48_961_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_072_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64mul(r: u32, ) -> Weight { + (49_069_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_067_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64divs(r: u32, ) -> Weight { + (49_035_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_677_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64divu(r: u32, ) -> Weight { + (48_842_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_449_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rems(r: u32, ) -> Weight { + (48_536_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_723_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64remu(r: u32, ) -> Weight { + (48_851_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_432_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64and(r: u32, ) -> Weight { + (48_624_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_093_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64or(r: u32, ) -> Weight { + (49_348_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64xor(r: u32, ) -> Weight { + (49_112_000 as Weight) + // Standard Error: 6_000 + .saturating_add((2_055_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shl(r: u32, ) -> Weight { + (49_654_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_051_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shrs(r: u32, ) -> Weight { + (48_848_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_089_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64shru(r: u32, ) -> Weight { + (49_455_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_054_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rotl(r: u32, ) -> Weight { + (49_640_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_048_000 as Weight).saturating_mul(r as Weight)) + } + fn instr_i64rotr(r: u32, ) -> Weight { + (49_498_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_068_000 as Weight).saturating_mul(r as Weight)) + } +} diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 44639a2275644..94719553e28aa 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-democracy" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,28 +13,29 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +serde = { version = "1.0.126", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-scheduler = { version = "2.0.0", path = "../scheduler" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } -hex-literal = "0.3.1" +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-scheduler = { version = "4.0.0-dev", path = "../scheduler" } [features] default = ["std"] std = [ "serde", "codec/std", + "scale-info/std", "sp-std/std", "sp-io/std", "frame-benchmarking/std", @@ -48,3 +49,4 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/democracy/README.md b/frame/democracy/README.md index ffbf2f36a1760..bbc5f1c65586a 100644 --- a/frame/democracy/README.md +++ b/frame/democracy/README.md @@ -1,6 +1,6 @@ # Democracy Pallet -- [`democracy::Trait`](https://docs.rs/pallet-democracy/latest/pallet_democracy/trait.Trait.html) +- [`democracy::Config`](https://docs.rs/pallet-democracy/latest/pallet_democracy/trait.Config.html) - [`Call`](https://docs.rs/pallet-democracy/latest/pallet_democracy/enum.Call.html) ## Overview @@ -132,4 +132,4 @@ This call can only be made by the `VetoOrigin`. - `cancel_queued` - Cancels a proposal that is queued for enactment. - `clear_public_proposal` - Removes all public proposals. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 0b822e885989e..7d4d7aee140b9 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,50 +19,44 @@ use super::*; -use frame_benchmarking::{benchmarks, account, whitelist_account}; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelist_account}; use frame_support::{ - IterableStorageMap, - traits::{Currency, Get, EnsureOrigin, OnInitialize, UnfilteredDispatchable, schedule::DispatchTime}, + assert_noop, assert_ok, + traits::{ + schedule::DispatchTime, Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable, + }, }; -use frame_system::{RawOrigin, Module as System, self, EventRecord}; -use sp_runtime::traits::{Bounded, One}; +use frame_system::{Pallet as System, RawOrigin}; +use sp_runtime::traits::{BadOrigin, Bounded, One}; -use crate::Module as Democracy; +use crate::Pallet as Democracy; const SEED: u32 = 0; const MAX_REFERENDUMS: u32 = 99; const MAX_SECONDERS: u32 = 100; const MAX_BYTES: u32 = 16_384; -fn assert_last_event(generic_event: ::Event) { - let events = System::::events(); - let system_event: ::Event = generic_event.into(); - // compare to the last event record - let EventRecord { event, .. } = &events[events.len() - 1]; - assert_eq!(event, &system_event); +fn assert_last_event(generic_event: ::Event) { + frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn funded_account(name: &'static str, index: u32) -> T::AccountId { +fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn add_proposal(n: u32) -> Result { +fn add_proposal(n: u32) -> Result { let other = funded_account::("proposer", n); let value = T::MinimumDeposit::get(); let proposal_hash: T::Hash = T::Hashing::hash_of(&n); - Democracy::::propose( - RawOrigin::Signed(other).into(), - proposal_hash, - value.into(), - )?; + Democracy::::propose(RawOrigin::Signed(other).into(), proposal_hash, value.into())?; Ok(proposal_hash) } -fn add_referendum(n: u32) -> Result { +fn add_referendum(n: u32) -> Result { let proposal_hash: T::Hash = T::Hashing::hash_of(&n); let vote_threshold = VoteThreshold::SimpleMajority; @@ -70,35 +64,28 @@ fn add_referendum(n: u32) -> Result { T::LaunchPeriod::get(), proposal_hash, vote_threshold, - 0.into(), + 0u32.into(), ); - let referendum_index: ReferendumIndex = ReferendumCount::get() - 1; + let referendum_index: ReferendumIndex = ReferendumCount::::get() - 1; T::Scheduler::schedule_named( (DEMOCRACY_ID, referendum_index).encode(), - DispatchTime::At(1.into()), + DispatchTime::At(1u32.into()), None, 63, - system::RawOrigin::Root.into(), - Call::enact_proposal(proposal_hash, referendum_index).into(), - ).map_err(|_| "failed to schedule named")?; + frame_system::RawOrigin::Root.into(), + Call::enact_proposal { proposal_hash, index: referendum_index }.into(), + ) + .map_err(|_| "failed to schedule named")?; Ok(referendum_index) } -fn account_vote(b: BalanceOf) -> AccountVote> { - let v = Vote { - aye: true, - conviction: Conviction::Locked1x, - }; +fn account_vote(b: BalanceOf) -> AccountVote> { + let v = Vote { aye: true, conviction: Conviction::Locked1x }; - AccountVote::Standard { - vote: v, - balance: b, - } + AccountVote::Standard { vote: v, balance: b } } benchmarks! { - _ { } - propose { let p = T::MaxProposals::get(); @@ -124,13 +111,13 @@ benchmarks! { // Create s existing "seconds" for i in 0 .. s { let seconder = funded_account::("seconder", i); - Democracy::::second(RawOrigin::Signed(seconder).into(), 0, u32::max_value())?; + Democracy::::second(RawOrigin::Signed(seconder).into(), 0, u32::MAX)?; } let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (s + 1) as usize, "Seconds not recorded"); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), 0, u32::max_value()) + }: _(RawOrigin::Signed(caller), 0, u32::MAX) verify { let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (s + 2) as usize, "`second` benchmark did not work"); @@ -140,7 +127,7 @@ benchmarks! { let r in 1 .. MAX_REFERENDUMS; let caller = funded_account::("caller", 0); - let account_vote = account_vote::(100.into()); + let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes for i in 0 .. r { @@ -149,7 +136,7 @@ benchmarks! { } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); @@ -159,7 +146,7 @@ benchmarks! { verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r + 1) as usize, "Vote was not recorded."); } @@ -168,7 +155,7 @@ benchmarks! { let r in 1 .. MAX_REFERENDUMS; let caller = funded_account::("caller", 0); - let account_vote = account_vote::(100.into()); + let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes for i in 0 ..=r { @@ -177,13 +164,13 @@ benchmarks! { } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r + 1) as usize, "Votes were not recorded."); // Change vote from aye to nay let nay = Vote { aye: false, conviction: Conviction::Locked1x }; - let new_vote = AccountVote::Standard { vote: nay, balance: 1000.into() }; + let new_vote = AccountVote::Standard { vote: nay, balance: 1000u32.into() }; let referendum_index = Democracy::::referendum_count() - 1; // This tests when a user changes a vote @@ -192,27 +179,30 @@ benchmarks! { verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r + 1) as usize, "Vote was incorrectly added"); let referendum_info = Democracy::::referendum_info(referendum_index) .ok_or("referendum doesn't exist")?; let tally = match referendum_info { ReferendumInfo::Ongoing(r) => r.tally, - _ => return Err("referendum not ongoing"), + _ => return Err("referendum not ongoing".into()), }; - assert_eq!(tally.nays, 1000.into(), "changed vote was not recorded"); + assert_eq!(tally.nays, 1000u32.into(), "changed vote was not recorded"); } emergency_cancel { let origin = T::CancellationOrigin::successful_origin(); let referendum_index = add_referendum::(0)?; - let call = Call::::emergency_cancel(referendum_index); - assert!(Democracy::::referendum_status(referendum_index).is_ok()); + let call = Call::::emergency_cancel { ref_index: referendum_index }; + assert_ok!(Democracy::::referendum_status(referendum_index)); }: { call.dispatch_bypass_filter(origin)? } verify { // Referendum has been canceled - assert!(Democracy::::referendum_status(referendum_index).is_err()); + assert_noop!( + Democracy::::referendum_status(referendum_index), + Error::::ReferendumInvalid, + ); } blacklist { @@ -226,18 +216,23 @@ benchmarks! { // Place our proposal in the external queue, too. let hash = T::Hashing::hash_of(&0); - assert!(Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash.clone()).is_ok()); + assert_ok!( + Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash.clone()) + ); // Add a referendum of our proposal. let referendum_index = add_referendum::(0)?; - assert!(Democracy::::referendum_status(referendum_index).is_ok()); + assert_ok!(Democracy::::referendum_status(referendum_index)); - let call = Call::::blacklist(hash, Some(referendum_index)); + let call = Call::::blacklist { proposal_hash: hash, maybe_ref_index: Some(referendum_index) }; let origin = T::BlacklistOrigin::successful_origin(); }: { call.dispatch_bypass_filter(origin)? } verify { // Referendum has been canceled - assert!(Democracy::::referendum_status(referendum_index).is_err()); + assert_noop!( + Democracy::::referendum_status(referendum_index), + Error::::ReferendumInvalid + ); } // Worst case scenario, we external propose a previously blacklisted proposal @@ -252,7 +247,7 @@ benchmarks! { (T::BlockNumber::zero(), vec![T::AccountId::default(); v as usize]) ); - let call = Call::::external_propose(proposal_hash); + let call = Call::::external_propose { proposal_hash }; }: { call.dispatch_bypass_filter(origin)? } verify { // External proposal created @@ -262,7 +257,7 @@ benchmarks! { external_propose_majority { let origin = T::ExternalMajorityOrigin::successful_origin(); let proposal_hash = T::Hashing::hash_of(&0); - let call = Call::::external_propose_majority(proposal_hash); + let call = Call::::external_propose_majority { proposal_hash }; }: { call.dispatch_bypass_filter(origin)? } verify { // External proposal created @@ -272,7 +267,7 @@ benchmarks! { external_propose_default { let origin = T::ExternalDefaultOrigin::successful_origin(); let proposal_hash = T::Hashing::hash_of(&0); - let call = Call::::external_propose_default(proposal_hash); + let call = Call::::external_propose_default { proposal_hash }; }: { call.dispatch_bypass_filter(origin)? } verify { // External proposal created @@ -287,8 +282,12 @@ benchmarks! { // NOTE: Instant origin may invoke a little bit more logic, but may not always succeed. let origin_fast_track = T::FastTrackOrigin::successful_origin(); let voting_period = T::FastTrackVotingPeriod::get(); - let delay = 0; - let call = Call::::fast_track(proposal_hash, voting_period.into(), delay.into()); + let delay = 0u32; + let call = Call::::fast_track { + proposal_hash, + voting_period: voting_period.into(), + delay: delay.into() + }; }: { call.dispatch_bypass_filter(origin_fast_track)? } verify { @@ -311,7 +310,7 @@ benchmarks! { vetoers.sort(); Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); - let call = Call::::veto_external(proposal_hash); + let call = Call::::veto_external { proposal_hash }; let origin = T::VetoOrigin::successful_origin(); ensure!(NextExternal::::get().is_some(), "no external proposal"); }: { call.dispatch_bypass_filter(origin)? } @@ -357,11 +356,11 @@ benchmarks! { assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); // Launch external - LastTabledWasExternal::put(false); + LastTabledWasExternal::::put(false); let origin = T::ExternalMajorityOrigin::successful_origin(); let proposal_hash = T::Hashing::hash_of(&r); - let call = Call::::external_propose_majority(proposal_hash); + let call = Call::::external_propose_majority { proposal_hash }; call.dispatch_bypass_filter(origin)?; // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -379,7 +378,7 @@ benchmarks! { if let Some(value) = ReferendumInfoOf::::get(i) { match value { ReferendumInfo::Finished { .. } => (), - ReferendumInfo::Ongoing(_) => return Err("Referendum was not finished"), + ReferendumInfo::Ongoing(_) => return Err("Referendum was not finished".into()), } } } @@ -399,7 +398,7 @@ benchmarks! { // Launch public assert!(add_proposal::(r).is_ok(), "proposal not created"); - LastTabledWasExternal::put(true); + LastTabledWasExternal::::put(true); let block_number = T::LaunchPeriod::get(); @@ -413,7 +412,7 @@ benchmarks! { if let Some(value) = ReferendumInfoOf::::get(i) { match value { ReferendumInfo::Finished { .. } => (), - ReferendumInfo::Ongoing(_) => return Err("Referendum was not finished"), + ReferendumInfo::Ongoing(_) => return Err("Referendum was not finished".into()), } } } @@ -429,7 +428,7 @@ benchmarks! { for (key, mut info) in ReferendumInfoOf::::iter() { if let ReferendumInfo::Ongoing(ref mut status) = info { - status.end += 100.into(); + status.end += 100u32.into(); } ReferendumInfoOf::::insert(key, info); } @@ -437,13 +436,13 @@ benchmarks! { assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); assert_eq!(Democracy::::lowest_unbaked(), 0, "invalid referenda init"); - }: { Democracy::::on_initialize(0.into()) } + }: { Democracy::::on_initialize(0u32.into()) } verify { // All should be on going for i in 0 .. r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { - ReferendumInfo::Finished { .. } => return Err("Referendum has been finished"), + ReferendumInfo::Finished { .. } => return Err("Referendum has been finished".into()), ReferendumInfo::Ongoing(_) => (), } } @@ -453,8 +452,8 @@ benchmarks! { delegate { let r in 1 .. MAX_REFERENDUMS; - let initial_balance: BalanceOf = 100.into(); - let delegated_balance: BalanceOf = 1000.into(); + let initial_balance: BalanceOf = 100u32.into(); + let delegated_balance: BalanceOf = 1000u32.into(); let caller = funded_account::("caller", 0); // Caller will initially delegate to `old_delegate` @@ -467,7 +466,7 @@ benchmarks! { )?; let (target, balance) = match VotingOf::::get(&caller) { Voting::Delegating { target, balance, .. } => (target, balance), - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(target, old_delegate, "delegation target didn't work"); assert_eq!(balance, delegated_balance, "delegation balance didn't work"); @@ -481,7 +480,7 @@ benchmarks! { } let votes = match VotingOf::::get(&new_delegate) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); @@ -489,13 +488,13 @@ benchmarks! { verify { let (target, balance) = match VotingOf::::get(&caller) { Voting::Delegating { target, balance, .. } => (target, balance), - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(target, new_delegate, "delegation target didn't work"); assert_eq!(balance, delegated_balance, "delegation balance didn't work"); let delegations = match VotingOf::::get(&new_delegate) { Voting::Direct { delegations, .. } => delegations, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(delegations.capital, delegated_balance, "delegation was not recorded."); } @@ -503,8 +502,8 @@ benchmarks! { undelegate { let r in 1 .. MAX_REFERENDUMS; - let initial_balance: BalanceOf = 100.into(); - let delegated_balance: BalanceOf = 1000.into(); + let initial_balance: BalanceOf = 100u32.into(); + let delegated_balance: BalanceOf = 1000u32.into(); let caller = funded_account::("caller", 0); // Caller will delegate @@ -517,7 +516,7 @@ benchmarks! { )?; let (target, balance) = match VotingOf::::get(&caller) { Voting::Delegating { target, balance, .. } => (target, balance), - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(target, the_delegate, "delegation target didn't work"); assert_eq!(balance, delegated_balance, "delegation balance didn't work"); @@ -533,7 +532,7 @@ benchmarks! { } let votes = match VotingOf::::get(&the_delegate) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); @@ -542,7 +541,7 @@ benchmarks! { // Voting should now be direct match VotingOf::::get(&caller) { Voting::Direct { .. } => (), - _ => return Err("undelegation failed"), + _ => return Err("undelegation failed".into()), } } @@ -563,7 +562,7 @@ benchmarks! { let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); match Preimages::::get(proposal_hash) { Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available") + _ => return Err("preimage not available".into()) } } @@ -585,7 +584,7 @@ benchmarks! { let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); match Preimages::::get(proposal_hash) { Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available") + _ => return Err("preimage not available".into()) } } @@ -607,7 +606,7 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), proposal_hash.clone(), u32::max_value()) + }: _(RawOrigin::Signed(caller), proposal_hash.clone(), u32::MAX) verify { let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); assert!(!Preimages::::contains_key(proposal_hash)); @@ -619,7 +618,7 @@ benchmarks! { let locker = funded_account::("locker", 0); // Populate votes so things are locked - let base_balance: BalanceOf = 100.into(); + let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); // Vote and immediately unvote for i in 0 .. r { @@ -643,7 +642,7 @@ benchmarks! { let locker = funded_account::("locker", 0); // Populate votes so things are locked - let base_balance: BalanceOf = 100.into(); + let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); for i in 0 .. r { let ref_idx = add_referendum::(i)?; @@ -651,18 +650,18 @@ benchmarks! { } // Create a big vote so lock increases - let big_vote = account_vote::(base_balance * 10.into()); + let big_vote = account_vote::(base_balance * 10u32.into()); let referendum_index = add_referendum::(r)?; Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), referendum_index, big_vote)?; let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r + 1) as usize, "Votes were not recorded."); let voting = VotingOf::::get(&locker); - assert_eq!(voting.locked_balance(), base_balance * 10.into()); + assert_eq!(voting.locked_balance(), base_balance * 10u32.into()); Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), referendum_index)?; @@ -672,7 +671,7 @@ benchmarks! { verify { let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r as usize, "Vote was not removed"); @@ -685,7 +684,7 @@ benchmarks! { let r in 1 .. MAX_REFERENDUMS; let caller = funded_account::("caller", 0); - let account_vote = account_vote::(100.into()); + let account_vote = account_vote::(100u32.into()); for i in 0 .. r { let ref_idx = add_referendum::(i)?; @@ -694,7 +693,7 @@ benchmarks! { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r as usize, "Votes not created"); @@ -704,7 +703,7 @@ benchmarks! { verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); } @@ -714,7 +713,7 @@ benchmarks! { let r in 1 .. MAX_REFERENDUMS; let caller = funded_account::("caller", r); - let account_vote = account_vote::(100.into()); + let account_vote = account_vote::(100u32.into()); for i in 0 .. r { let ref_idx = add_referendum::(i)?; @@ -723,7 +722,7 @@ benchmarks! { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r as usize, "Votes not created"); @@ -733,7 +732,7 @@ benchmarks! { verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, - _ => return Err("Votes are not direct"), + _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); } @@ -744,7 +743,7 @@ benchmarks! { let b in 0 .. MAX_BYTES; let proposer = funded_account::("proposer", 0); - let raw_call = Call::note_preimage(vec![1; b as usize]); + let raw_call = Call::note_preimage { encoded_proposal: vec![1; b as usize] }; let generic_call: T::Proposal = raw_call.into(); let encoded_proposal = generic_call.encode(); let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); @@ -752,12 +751,12 @@ benchmarks! { match Preimages::::get(proposal_hash) { Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available") + _ => return Err("preimage not available".into()) } }: enact_proposal(RawOrigin::Root, proposal_hash, 0) verify { // Fails due to mismatched origin - assert_last_event::(RawEvent::Executed(0, false).into()); + assert_last_event::(Event::::Executed(0, Err(BadOrigin.into())).into()); } #[extra] @@ -773,7 +772,7 @@ benchmarks! { match Preimages::::get(proposal_hash) { Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available") + _ => return Err("preimage not available".into()) } }: { assert_eq!( @@ -783,44 +782,4 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose::()); - assert_ok!(test_benchmark_second::()); - assert_ok!(test_benchmark_vote_new::()); - assert_ok!(test_benchmark_vote_existing::()); - assert_ok!(test_benchmark_emergency_cancel::()); - assert_ok!(test_benchmark_external_propose::()); - assert_ok!(test_benchmark_external_propose_majority::()); - assert_ok!(test_benchmark_external_propose_default::()); - assert_ok!(test_benchmark_fast_track::()); - assert_ok!(test_benchmark_veto_external::()); - assert_ok!(test_benchmark_cancel_referendum::()); - assert_ok!(test_benchmark_cancel_queued::()); - assert_ok!(test_benchmark_on_initialize_external::()); - assert_ok!(test_benchmark_on_initialize_public::()); - assert_ok!(test_benchmark_on_initialize_base::()); - assert_ok!(test_benchmark_delegate::()); - assert_ok!(test_benchmark_undelegate::()); - assert_ok!(test_benchmark_clear_public_proposals::()); - assert_ok!(test_benchmark_note_preimage::()); - assert_ok!(test_benchmark_note_imminent_preimage::()); - assert_ok!(test_benchmark_reap_preimage::()); - assert_ok!(test_benchmark_unlock_remove::()); - assert_ok!(test_benchmark_unlock_set::()); - assert_ok!(test_benchmark_remove_vote::()); - assert_ok!(test_benchmark_remove_other_vote::()); - assert_ok!(test_benchmark_enact_proposal_execute::()); - assert_ok!(test_benchmark_enact_proposal_slash::()); - assert_ok!(test_benchmark_blacklist::()); - assert_ok!(test_benchmark_cancel_proposal::()); - }); - } -} +impl_benchmark_test_suite!(Democracy, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/democracy/src/conviction.rs b/frame/democracy/src/conviction.rs index bb563e4b74830..b4f24c93bb40f 100644 --- a/frame/democracy/src/conviction.rs +++ b/frame/democracy/src/conviction.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,13 +17,17 @@ //! The conviction datatype. -use sp_std::{result::Result, convert::TryFrom}; -use sp_runtime::{RuntimeDebug, traits::{Zero, Bounded, CheckedMul, CheckedDiv}}; -use codec::{Encode, Decode}; use crate::types::Delegations; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{Bounded, CheckedDiv, CheckedMul, Zero}, + RuntimeDebug, +}; +use sp_std::{convert::TryFrom, result::Result}; /// A value denoting the strength of conviction of a vote. -#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo)] pub enum Conviction { /// 0.1x votes, unlocked. None, @@ -93,9 +97,10 @@ impl Conviction { } /// The votes of a voter of the given `balance` with our conviction. - pub fn votes< - B: From + Zero + Copy + CheckedMul + CheckedDiv + Bounded - >(self, capital: B) -> Delegations { + pub fn votes + Zero + Copy + CheckedMul + CheckedDiv + Bounded>( + self, + capital: B, + ) -> Delegations { let votes = match self { Conviction::None => capital.checked_div(&10u8.into()).unwrap_or_else(Zero::zero), x => capital.checked_mul(&u8::from(x).into()).unwrap_or_else(B::max_value), diff --git a/frame/democracy/src/default_weight.rs b/frame/democracy/src/default_weight.rs deleted file mode 100644 index 28aa45ae2d603..0000000000000 --- a/frame/democracy/src/default_weight.rs +++ /dev/null @@ -1,171 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Weights for pallet_democracy -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-24, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn propose() -> Weight { - (96_316_000 as Weight) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn second(s: u32, ) -> Weight { - (58_386_000 as Weight) - .saturating_add((259_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn vote_new(r: u32, ) -> Weight { - (70_374_000 as Weight) - .saturating_add((291_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn vote_existing(r: u32, ) -> Weight { - (70_097_000 as Weight) - .saturating_add((296_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn emergency_cancel() -> Weight { - (41_731_000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn blacklist(p: u32, ) -> Weight { - (117_847_000 as Weight) - .saturating_add((871_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().writes(6 as Weight)) - } - fn external_propose(v: u32, ) -> Weight { - (20_972_000 as Weight) - .saturating_add((114_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn external_propose_majority() -> Weight { - (5_030_000 as Weight) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn external_propose_default() -> Weight { - (4_981_000 as Weight) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn fast_track() -> Weight { - (42_801_000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn veto_external(v: u32, ) -> Weight { - (44_115_000 as Weight) - .saturating_add((194_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn cancel_proposal(p: u32, ) -> Weight { - (73_937_000 as Weight) - .saturating_add((962_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn cancel_referendum() -> Weight { - (25_233_000 as Weight) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn cancel_queued(r: u32, ) -> Weight { - (48_251_000 as Weight) - .saturating_add((3_590_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn on_initialize_base(r: u32, ) -> Weight { - (17_597_000 as Weight) - .saturating_add((7_248_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - } - fn delegate(r: u32, ) -> Weight { - (93_916_000 as Weight) - .saturating_add((10_794_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(4 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) - } - fn undelegate(r: u32, ) -> Weight { - (47_855_000 as Weight) - .saturating_add((10_805_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(DbWeight::get().writes(2 as Weight)) - .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) - } - fn clear_public_proposals() -> Weight { - (4_864_000 as Weight) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn note_preimage(b: u32, ) -> Weight { - (66_754_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn note_imminent_preimage(b: u32, ) -> Weight { - (44_664_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn reap_preimage(b: u32, ) -> Weight { - (59_968_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn unlock_remove(r: u32, ) -> Weight { - (58_573_000 as Weight) - .saturating_add((131_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn unlock_set(r: u32, ) -> Weight { - (53_831_000 as Weight) - .saturating_add((324_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn remove_vote(r: u32, ) -> Weight { - (31_846_000 as Weight) - .saturating_add((327_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn remove_other_vote(r: u32, ) -> Weight { - (31_880_000 as Weight) - .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } -} diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 884106a63b321..8bc6921c4f8ad 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,8 @@ //! # Democracy Pallet //! -//! - [`democracy::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) +//! - [`Config`] +//! - [`Call`] //! //! ## Overview //! @@ -46,12 +46,12 @@ //! - **Conviction:** An indication of a voter's strength of belief in their vote. An increase //! of one in conviction indicates that a token holder is willing to lock their tokens for twice //! as many lock periods after enactment. -//! - **Vote:** A value that can either be in approval ("Aye") or rejection ("Nay") -//! of a particular referendum. +//! - **Vote:** A value that can either be in approval ("Aye") or rejection ("Nay") of a particular +//! referendum. //! - **Proposal:** A submission to the chain that represents an action that a proposer (either an //! account or an external origin) suggests that the system adopt. -//! - **Referendum:** A proposal that is in the process of being voted on for -//! either acceptance or rejection as a change to the system. +//! - **Referendum:** A proposal that is in the process of being voted on for either acceptance or +//! rejection as a change to the system. //! - **Delegation:** The act of granting your voting power to the decisions of another account for //! up to a certain conviction. //! @@ -92,50 +92,50 @@ //! - `unlock` - Redetermine the account's balance lock, potentially making tokens available. //! //! Preimage actions: -//! - `note_preimage` - Registers the preimage for an upcoming proposal, requires -//! a deposit that is returned once the proposal is enacted. +//! - `note_preimage` - Registers the preimage for an upcoming proposal, requires a deposit that is +//! returned once the proposal is enacted. //! - `note_preimage_operational` - same but provided by `T::OperationalPreimageOrigin`. -//! - `note_imminent_preimage` - Registers the preimage for an upcoming proposal. -//! Does not require a deposit, but the proposal must be in the dispatch queue. +//! - `note_imminent_preimage` - Registers the preimage for an upcoming proposal. Does not require a +//! deposit, but the proposal must be in the dispatch queue. //! - `note_imminent_preimage_operational` - same but provided by `T::OperationalPreimageOrigin`. -//! - `reap_preimage` - Removes the preimage for an expired proposal. Will only -//! work under the condition that it's the same account that noted it and -//! after the voting period, OR it's a different account after the enactment period. +//! - `reap_preimage` - Removes the preimage for an expired proposal. Will only work under the +//! condition that it's the same account that noted it and after the voting period, OR it's a +//! different account after the enactment period. //! //! #### Cancellation Origin //! //! This call can only be made by the `CancellationOrigin`. //! -//! - `emergency_cancel` - Schedules an emergency cancellation of a referendum. -//! Can only happen once to a specific referendum. +//! - `emergency_cancel` - Schedules an emergency cancellation of a referendum. Can only happen once +//! to a specific referendum. //! //! #### ExternalOrigin //! //! This call can only be made by the `ExternalOrigin`. //! -//! - `external_propose` - Schedules a proposal to become a referendum once it is is legal -//! for an externally proposed referendum. +//! - `external_propose` - Schedules a proposal to become a referendum once it is is legal for an +//! externally proposed referendum. //! //! #### External Majority Origin //! //! This call can only be made by the `ExternalMajorityOrigin`. //! -//! - `external_propose_majority` - Schedules a proposal to become a majority-carries -//! referendum once it is legal for an externally proposed referendum. +//! - `external_propose_majority` - Schedules a proposal to become a majority-carries referendum +//! once it is legal for an externally proposed referendum. //! //! #### External Default Origin //! //! This call can only be made by the `ExternalDefaultOrigin`. //! -//! - `external_propose_default` - Schedules a proposal to become a negative-turnout-bias -//! referendum once it is legal for an externally proposed referendum. +//! - `external_propose_default` - Schedules a proposal to become a negative-turnout-bias referendum +//! once it is legal for an externally proposed referendum. //! //! #### Fast Track Origin //! //! This call can only be made by the `FastTrackOrigin`. //! -//! - `fast_track` - Schedules the current externally proposed proposal that -//! is "majority-carries" to become a referendum immediately. +//! - `fast_track` - Schedules the current externally proposed proposal that is "majority-carries" +//! to become a referendum immediately. //! //! #### Veto Origin //! @@ -149,35 +149,37 @@ //! - `cancel_queued` - Cancels a proposal that is queued for enactment. //! - `clear_public_proposal` - Removes all public proposals. -#![recursion_limit="128"] +#![recursion_limit = "128"] #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_runtime::{ - DispatchResult, DispatchError, RuntimeDebug, - traits::{Zero, Hash, Dispatchable, Saturating, Bounded}, -}; -use codec::{Encode, Decode, Input}; +use codec::{Decode, Encode, Input}; use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, ensure, Parameter, - weights::{Weight, DispatchClass, Pays}, + ensure, traits::{ - Currency, ReservableCurrency, LockableCurrency, WithdrawReason, LockIdentifier, Get, - OnUnbalanced, BalanceStatus, schedule::{Named as ScheduleNamed, DispatchTime}, EnsureOrigin + schedule::{DispatchTime, Named as ScheduleNamed}, + BalanceStatus, Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, + ReservableCurrency, WithdrawReasons, }, - dispatch::DispatchResultWithPostInfo, + weights::Weight, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{Bounded, Dispatchable, Hash, Saturating, Zero}, + ArithmeticError, DispatchError, DispatchResult, RuntimeDebug, +}; +use sp_std::prelude::*; -mod vote_threshold; -mod vote; mod conviction; mod types; -mod default_weight; -pub use vote_threshold::{Approved, VoteThreshold}; -pub use vote::{Vote, AccountVote, Voting}; +mod vote; +mod vote_threshold; +pub mod weights; pub use conviction::Conviction; -pub use types::{ReferendumInfo, ReferendumStatus, Tally, UnvoteScope, Delegations}; +pub use pallet::*; +pub use types::{Delegations, ReferendumInfo, ReferendumStatus, Tally, UnvoteScope}; +pub use vote::{AccountVote, Vote, Voting}; +pub use vote_threshold::{Approved, VoteThreshold}; +pub use weights::WeightInfo; #[cfg(test)] mod tests; @@ -198,256 +200,318 @@ pub type PropIndex = u32; /// A referendum index. pub type ReferendumIndex = u32; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; - -pub trait WeightInfo { - fn propose() -> Weight; - fn second(s: u32, ) -> Weight; - fn vote_new(r: u32, ) -> Weight; - fn vote_existing(r: u32, ) -> Weight; - fn emergency_cancel() -> Weight; - fn blacklist(p: u32, ) -> Weight; - fn external_propose(v: u32, ) -> Weight; - fn external_propose_majority() -> Weight; - fn external_propose_default() -> Weight; - fn fast_track() -> Weight; - fn veto_external(v: u32, ) -> Weight; - fn cancel_referendum() -> Weight; - fn cancel_proposal(p: u32, ) -> Weight; - fn cancel_queued(r: u32, ) -> Weight; - fn on_initialize_base(r: u32, ) -> Weight; - fn delegate(r: u32, ) -> Weight; - fn undelegate(r: u32, ) -> Weight; - fn clear_public_proposals() -> Weight; - fn note_preimage(b: u32, ) -> Weight; - fn note_imminent_preimage(b: u32, ) -> Weight; - fn reap_preimage(b: u32, ) -> Weight; - fn unlock_remove(r: u32, ) -> Weight; - fn unlock_set(r: u32, ) -> Weight; - fn remove_vote(r: u32, ) -> Weight; - fn remove_other_vote(r: u32, ) -> Weight; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; + +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum PreimageStatus { + /// The preimage is imminently needed at the argument. + Missing(BlockNumber), + /// The preimage is available. + Available { + data: Vec, + provider: AccountId, + deposit: Balance, + since: BlockNumber, + /// None if it's not imminent. + expiry: Option, + }, } -pub trait Trait: frame_system::Trait + Sized { - type Proposal: Parameter + Dispatchable + From>; - type Event: From> + Into<::Event>; +impl PreimageStatus { + fn to_missing_expiry(self) -> Option { + match self { + PreimageStatus::Missing(expiry) => Some(expiry), + _ => None, + } + } +} - /// Currency type for this module. - type Currency: ReservableCurrency - + LockableCurrency; +// A value placed in storage that represents the current version of the Democracy storage. +// This value is used by the `on_runtime_upgrade` logic to determine whether we run +// storage migration logic. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo)] +enum Releases { + V1, +} - /// The minimum period of locking and the period between a proposal being approved and enacted. - /// - /// It should generally be a little more than the unstake period to ensure that - /// voting stakers have an opportunity to remove themselves from the system in the case where - /// they are on the losing side of a vote. - type EnactmentPeriod: Get; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + dispatch::DispatchResultWithPostInfo, + pallet_prelude::*, + traits::EnsureOrigin, + weights::{DispatchClass, Pays}, + Parameter, + }; + use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; + use sp_runtime::DispatchResult; - /// How often (in blocks) new public referenda are launched. - type LaunchPeriod: Get; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - /// How often (in blocks) to check for new votes. - type VotingPeriod: Get; + #[pallet::config] + pub trait Config: frame_system::Config + Sized { + type Proposal: Parameter + Dispatchable + From>; + type Event: From> + IsType<::Event>; - /// The minimum amount to be used as a deposit for a public referendum proposal. - type MinimumDeposit: Get>; + /// Currency type for this pallet. + type Currency: ReservableCurrency + + LockableCurrency; - /// Origin from which the next tabled referendum may be forced. This is a normal - /// "super-majority-required" referendum. - type ExternalOrigin: EnsureOrigin; + /// The period between a proposal being approved and enacted. + /// + /// It should generally be a little more than the unstake period to ensure that + /// voting stakers have an opportunity to remove themselves from the system in the case + /// where they are on the losing side of a vote. + #[pallet::constant] + type EnactmentPeriod: Get; - /// Origin from which the next tabled referendum may be forced; this allows for the tabling of - /// a majority-carries referendum. - type ExternalMajorityOrigin: EnsureOrigin; + /// How often (in blocks) new public referenda are launched. + #[pallet::constant] + type LaunchPeriod: Get; - /// Origin from which the next tabled referendum may be forced; this allows for the tabling of - /// a negative-turnout-bias (default-carries) referendum. - type ExternalDefaultOrigin: EnsureOrigin; + /// How often (in blocks) to check for new votes. + #[pallet::constant] + type VotingPeriod: Get; - /// Origin from which the next majority-carries (or more permissive) referendum may be tabled to - /// vote according to the `FastTrackVotingPeriod` asynchronously in a similar manner to the - /// emergency origin. It retains its threshold method. - type FastTrackOrigin: EnsureOrigin; + /// The minimum period of vote locking. + /// + /// It should be no shorter than enactment period to ensure that in the case of an approval, + /// those successful voters are locked into the consequences that their votes entail. + #[pallet::constant] + type VoteLockingPeriod: Get; - /// Origin from which the next majority-carries (or more permissive) referendum may be tabled to - /// vote immediately and asynchronously in a similar manner to the emergency origin. It retains - /// its threshold method. - type InstantOrigin: EnsureOrigin; + /// The minimum amount to be used as a deposit for a public referendum proposal. + #[pallet::constant] + type MinimumDeposit: Get>; - /// Indicator for whether an emergency origin is even allowed to happen. Some chains may want - /// to set this permanently to `false`, others may want to condition it on things such as - /// an upgrade having happened recently. - type InstantAllowed: Get; + /// Origin from which the next tabled referendum may be forced. This is a normal + /// "super-majority-required" referendum. + type ExternalOrigin: EnsureOrigin; - /// Minimum voting period allowed for a fast-track referendum. - type FastTrackVotingPeriod: Get; + /// Origin from which the next tabled referendum may be forced; this allows for the tabling + /// of a majority-carries referendum. + type ExternalMajorityOrigin: EnsureOrigin; - /// Origin from which any referendum may be cancelled in an emergency. - type CancellationOrigin: EnsureOrigin; + /// Origin from which the next tabled referendum may be forced; this allows for the tabling + /// of a negative-turnout-bias (default-carries) referendum. + type ExternalDefaultOrigin: EnsureOrigin; - /// Origin from which proposals may be blacklisted. - type BlacklistOrigin: EnsureOrigin; + /// Origin from which the next majority-carries (or more permissive) referendum may be + /// tabled to vote according to the `FastTrackVotingPeriod` asynchronously in a similar + /// manner to the emergency origin. It retains its threshold method. + type FastTrackOrigin: EnsureOrigin; - /// Origin from which a proposal may be cancelled and its backers slashed. - type CancelProposalOrigin: EnsureOrigin; + /// Origin from which the next majority-carries (or more permissive) referendum may be + /// tabled to vote immediately and asynchronously in a similar manner to the emergency + /// origin. It retains its threshold method. + type InstantOrigin: EnsureOrigin; - /// Origin for anyone able to veto proposals. - /// - /// # Warning - /// - /// The number of Vetoers for a proposal must be small, extrinsics are weighted according to - /// [MAX_VETOERS](./const.MAX_VETOERS.html) - type VetoOrigin: EnsureOrigin; + /// Indicator for whether an emergency origin is even allowed to happen. Some chains may + /// want to set this permanently to `false`, others may want to condition it on things such + /// as an upgrade having happened recently. + #[pallet::constant] + type InstantAllowed: Get; - /// Period in blocks where an external proposal may not be re-submitted after being vetoed. - type CooloffPeriod: Get; + /// Minimum voting period allowed for a fast-track referendum. + #[pallet::constant] + type FastTrackVotingPeriod: Get; - /// The amount of balance that must be deposited per byte of preimage stored. - type PreimageByteDeposit: Get>; + /// Origin from which any referendum may be cancelled in an emergency. + type CancellationOrigin: EnsureOrigin; - /// An origin that can provide a preimage using operational extrinsics. - type OperationalPreimageOrigin: EnsureOrigin; + /// Origin from which proposals may be blacklisted. + type BlacklistOrigin: EnsureOrigin; - /// Handler for the unbalanced reduction when slashing a preimage deposit. - type Slash: OnUnbalanced>; + /// Origin from which a proposal may be cancelled and its backers slashed. + type CancelProposalOrigin: EnsureOrigin; - /// The Scheduler. - type Scheduler: ScheduleNamed; + /// Origin for anyone able to veto proposals. + /// + /// # Warning + /// + /// The number of Vetoers for a proposal must be small, extrinsics are weighted according to + /// [MAX_VETOERS](./const.MAX_VETOERS.html) + type VetoOrigin: EnsureOrigin; - /// Overarching type of all pallets origins. - type PalletsOrigin: From>; + /// Period in blocks where an external proposal may not be re-submitted after being vetoed. + #[pallet::constant] + type CooloffPeriod: Get; - /// The maximum number of votes for an account. - /// - /// Also used to compute weight, an overly big value can - /// lead to extrinsic with very big weight: see `delegate` for instance. - type MaxVotes: Get; + /// The amount of balance that must be deposited per byte of preimage stored. + #[pallet::constant] + type PreimageByteDeposit: Get>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; + /// An origin that can provide a preimage using operational extrinsics. + type OperationalPreimageOrigin: EnsureOrigin; - /// The maximum number of public proposals that can exist at any time. - type MaxProposals: Get; -} + /// Handler for the unbalanced reduction when slashing a preimage deposit. + type Slash: OnUnbalanced>; -#[derive(Clone, Encode, Decode, RuntimeDebug)] -pub enum PreimageStatus { - /// The preimage is imminently needed at the argument. - Missing(BlockNumber), - /// The preimage is available. - Available { - data: Vec, - provider: AccountId, - deposit: Balance, - since: BlockNumber, - /// None if it's not imminent. - expiry: Option, - }, -} + /// The Scheduler. + type Scheduler: ScheduleNamed; -impl PreimageStatus { - fn to_missing_expiry(self) -> Option { - match self { - PreimageStatus::Missing(expiry) => Some(expiry), - _ => None, - } + /// Overarching type of all pallets origins. + type PalletsOrigin: From>; + + /// The maximum number of votes for an account. + /// + /// Also used to compute weight, an overly big value can + /// lead to extrinsic with very big weight: see `delegate` for instance. + #[pallet::constant] + type MaxVotes: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + + /// The maximum number of public proposals that can exist at any time. + #[pallet::constant] + type MaxProposals: Get; } -} -// A value placed in storage that represents the current version of the Democracy storage. -// This value is used by the `on_runtime_upgrade` logic to determine whether we run -// storage migration logic. -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] -enum Releases { - V1, -} + // TODO: Refactor public proposal queue into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 + /// The number of (public) proposals that have been made so far. + #[pallet::storage] + #[pallet::getter(fn public_prop_count)] + pub type PublicPropCount = StorageValue<_, PropIndex, ValueQuery>; + + /// The public proposals. Unsorted. The second item is the proposal's hash. + #[pallet::storage] + #[pallet::getter(fn public_props)] + pub type PublicProps = + StorageValue<_, Vec<(PropIndex, T::Hash, T::AccountId)>, ValueQuery>; -decl_storage! { - trait Store for Module as Democracy { - // TODO: Refactor public proposal queue into its own pallet. - // https://github.com/paritytech/substrate/issues/5322 - /// The number of (public) proposals that have been made so far. - pub PublicPropCount get(fn public_prop_count) build(|_| 0 as PropIndex) : PropIndex; - /// The public proposals. Unsorted. The second item is the proposal's hash. - pub PublicProps get(fn public_props): Vec<(PropIndex, T::Hash, T::AccountId)>; - /// Those who have locked a deposit. - /// - /// TWOX-NOTE: Safe, as increasing integer keys are safe. - pub DepositOf get(fn deposit_of): - map hasher(twox_64_concat) PropIndex => Option<(Vec, BalanceOf)>; - - /// Map of hashes to the proposal preimage, along with who registered it and their deposit. - /// The block number is the block at which it was deposited. - // TODO: Refactor Preimages into its own pallet. - // https://github.com/paritytech/substrate/issues/5322 - pub Preimages: - map hasher(identity) T::Hash - => Option, T::BlockNumber>>; - - /// The next free referendum index, aka the number of referenda started so far. - pub ReferendumCount get(fn referendum_count) build(|_| 0 as ReferendumIndex): ReferendumIndex; - /// The lowest referendum index representing an unbaked referendum. Equal to - /// `ReferendumCount` if there isn't a unbaked referendum. - pub LowestUnbaked get(fn lowest_unbaked) build(|_| 0 as ReferendumIndex): ReferendumIndex; - - /// Information concerning any given referendum. - /// - /// TWOX-NOTE: SAFE as indexes are not under an attacker’s control. - pub ReferendumInfoOf get(fn referendum_info): - map hasher(twox_64_concat) ReferendumIndex - => Option>>; - - /// All votes for a particular voter. We store the balance for the number of votes that we - /// have recorded. The second item is the total amount of delegations, that will be added. - /// - /// TWOX-NOTE: SAFE as `AccountId`s are crypto hashes anyway. - pub VotingOf: map hasher(twox_64_concat) T::AccountId => Voting, T::AccountId, T::BlockNumber>; - - /// Accounts for which there are locks in action which may be removed at some point in the - /// future. The value is the block number at which the lock expires and may be removed. - /// - /// TWOX-NOTE: OK ― `AccountId` is a secure hash. - pub Locks get(fn locks): map hasher(twox_64_concat) T::AccountId => Option; - - /// True if the last referendum tabled was submitted externally. False if it was a public - /// proposal. - // TODO: There should be any number of tabling origins, not just public and "external" (council). - // https://github.com/paritytech/substrate/issues/5322 - pub LastTabledWasExternal: bool; - - /// The referendum to be tabled whenever it would be valid to table an external proposal. - /// This happens when a referendum needs to be tabled and one of two conditions are met: - /// - `LastTabledWasExternal` is `false`; or - /// - `PublicProps` is empty. - pub NextExternal: Option<(T::Hash, VoteThreshold)>; - - /// A record of who vetoed what. Maps proposal hash to a possible existent block number - /// (until when it may not be resubmitted) and who vetoed it. - pub Blacklist: map hasher(identity) T::Hash => Option<(T::BlockNumber, Vec)>; - - /// Record of all proposals that have been subject to emergency cancellation. - pub Cancellations: map hasher(identity) T::Hash => bool; - - /// Storage version of the pallet. - /// - /// New networks start with last version. - StorageVersion build(|_| Some(Releases::V1)): Option; + /// Those who have locked a deposit. + /// + /// TWOX-NOTE: Safe, as increasing integer keys are safe. + #[pallet::storage] + #[pallet::getter(fn deposit_of)] + pub type DepositOf = + StorageMap<_, Twox64Concat, PropIndex, (Vec, BalanceOf)>; + + /// Map of hashes to the proposal preimage, along with who registered it and their deposit. + /// The block number is the block at which it was deposited. + // TODO: Refactor Preimages into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 + #[pallet::storage] + pub type Preimages = StorageMap< + _, + Identity, + T::Hash, + PreimageStatus, T::BlockNumber>, + >; + + /// The next free referendum index, aka the number of referenda started so far. + #[pallet::storage] + #[pallet::getter(fn referendum_count)] + pub type ReferendumCount = StorageValue<_, ReferendumIndex, ValueQuery>; + + /// The lowest referendum index representing an unbaked referendum. Equal to + /// `ReferendumCount` if there isn't a unbaked referendum. + #[pallet::storage] + #[pallet::getter(fn lowest_unbaked)] + pub type LowestUnbaked = StorageValue<_, ReferendumIndex, ValueQuery>; + + /// Information concerning any given referendum. + /// + /// TWOX-NOTE: SAFE as indexes are not under an attacker’s control. + #[pallet::storage] + #[pallet::getter(fn referendum_info)] + pub type ReferendumInfoOf = StorageMap< + _, + Twox64Concat, + ReferendumIndex, + ReferendumInfo>, + >; + + /// All votes for a particular voter. We store the balance for the number of votes that we + /// have recorded. The second item is the total amount of delegations, that will be added. + /// + /// TWOX-NOTE: SAFE as `AccountId`s are crypto hashes anyway. + #[pallet::storage] + pub type VotingOf = StorageMap< + _, + Twox64Concat, + T::AccountId, + Voting, T::AccountId, T::BlockNumber>, + ValueQuery, + >; + + /// Accounts for which there are locks in action which may be removed at some point in the + /// future. The value is the block number at which the lock expires and may be removed. + /// + /// TWOX-NOTE: OK ― `AccountId` is a secure hash. + #[pallet::storage] + #[pallet::getter(fn locks)] + pub type Locks = StorageMap<_, Twox64Concat, T::AccountId, T::BlockNumber>; + + /// True if the last referendum tabled was submitted externally. False if it was a public + /// proposal. + // TODO: There should be any number of tabling origins, not just public and "external" + // (council). https://github.com/paritytech/substrate/issues/5322 + #[pallet::storage] + pub type LastTabledWasExternal = StorageValue<_, bool, ValueQuery>; + + /// The referendum to be tabled whenever it would be valid to table an external proposal. + /// This happens when a referendum needs to be tabled and one of two conditions are met: + /// - `LastTabledWasExternal` is `false`; or + /// - `PublicProps` is empty. + #[pallet::storage] + pub type NextExternal = StorageValue<_, (T::Hash, VoteThreshold)>; + + /// A record of who vetoed what. Maps proposal hash to a possible existent block number + /// (until when it may not be resubmitted) and who vetoed it. + #[pallet::storage] + pub type Blacklist = + StorageMap<_, Identity, T::Hash, (T::BlockNumber, Vec)>; + + /// Record of all proposals that have been subject to emergency cancellation. + #[pallet::storage] + pub type Cancellations = StorageMap<_, Identity, T::Hash, bool, ValueQuery>; + + /// Storage version of the pallet. + /// + /// New networks start with last version. + #[pallet::storage] + pub(crate) type StorageVersion = StorageValue<_, Releases>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + _phantom: sp_std::marker::PhantomData, } -} -decl_event! { - pub enum Event where - Balance = BalanceOf, - ::AccountId, - ::Hash, - ::BlockNumber, - { + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { _phantom: Default::default() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + PublicPropCount::::put(0 as PropIndex); + ReferendumCount::::put(0 as ReferendumIndex); + LowestUnbaked::::put(0 as ReferendumIndex); + StorageVersion::::put(Releases::V1); + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// A motion has been proposed by a public account. \[proposal_index, deposit\] - Proposed(PropIndex, Balance), - /// A public proposal has been tabled for referendum vote. \[proposal_index, deposit, depositors\] - Tabled(PropIndex, Balance, Vec), + Proposed(PropIndex, BalanceOf), + /// A public proposal has been tabled for referendum vote. \[proposal_index, deposit, + /// depositors\] + Tabled(PropIndex, BalanceOf, Vec), /// An external proposal has been tabled. ExternalTabled, /// A referendum has begun. \[ref_index, threshold\] @@ -458,43 +522,38 @@ decl_event! { NotPassed(ReferendumIndex), /// A referendum has been cancelled. \[ref_index\] Cancelled(ReferendumIndex), - /// A proposal has been enacted. \[ref_index, is_ok\] - Executed(ReferendumIndex, bool), + /// A proposal has been enacted. \[ref_index, result\] + Executed(ReferendumIndex, DispatchResult), /// An account has delegated their vote to another account. \[who, target\] - Delegated(AccountId, AccountId), + Delegated(T::AccountId, T::AccountId), /// An \[account\] has cancelled a previous delegation operation. - Undelegated(AccountId), + Undelegated(T::AccountId), /// An external proposal has been vetoed. \[who, proposal_hash, until\] - Vetoed(AccountId, Hash, BlockNumber), + Vetoed(T::AccountId, T::Hash, T::BlockNumber), /// A proposal's preimage was noted, and the deposit taken. \[proposal_hash, who, deposit\] - PreimageNoted(Hash, AccountId, Balance), + PreimageNoted(T::Hash, T::AccountId, BalanceOf), /// A proposal preimage was removed and used (the deposit was returned). /// \[proposal_hash, provider, deposit\] - PreimageUsed(Hash, AccountId, Balance), + PreimageUsed(T::Hash, T::AccountId, BalanceOf), /// A proposal could not be executed because its preimage was invalid. /// \[proposal_hash, ref_index\] - PreimageInvalid(Hash, ReferendumIndex), + PreimageInvalid(T::Hash, ReferendumIndex), /// A proposal could not be executed because its preimage was missing. /// \[proposal_hash, ref_index\] - PreimageMissing(Hash, ReferendumIndex), + PreimageMissing(T::Hash, ReferendumIndex), /// A registered preimage was removed and the deposit collected by the reaper. /// \[proposal_hash, provider, deposit, reaper\] - PreimageReaped(Hash, AccountId, Balance, AccountId), - /// An \[account\] has been unlocked successfully. - Unlocked(AccountId), + PreimageReaped(T::Hash, T::AccountId, BalanceOf, T::AccountId), /// A proposal \[hash\] has been blacklisted permanently. - Blacklisted(Hash), + Blacklisted(T::Hash), } -} -decl_error! { - pub enum Error for Module { + #[pallet::error] + pub enum Error { /// Value too low ValueLow, /// Proposal does not exist ProposalMissing, - /// Unknown index - BadIndex, /// Cannot cancel the same proposal twice AlreadyCanceled, /// Proposal already made @@ -509,8 +568,6 @@ decl_error! { NoProposal, /// Identity may not veto a proposal twice AlreadyVetoed, - /// Not delegated - NotDelegated, /// Preimage already noted DuplicatePreimage, /// Not imminent @@ -527,20 +584,12 @@ decl_error! { PreimageInvalid, /// No proposals waiting NoneWaiting, - /// The target account does not have a lock. - NotLocked, - /// The lock on the account to be unlocked has not yet expired. - NotExpired, /// The given account did not vote on the referendum. NotVoter, /// The actor has no permission to conduct the action. NoPermission, /// The account is already delegating. AlreadyDelegating, - /// An unexpected integer overflow occurred. - Overflow, - /// An unexpected integer underflow occurred. - Underflow, /// Too high a balance was provided that the account cannot afford. InsufficientFunds, /// The account is not currently delegating. @@ -556,47 +605,23 @@ decl_error! { WrongUpperBound, /// Maximum number of votes reached. MaxVotesReached, - /// The provided witness data is wrong. - InvalidWitness, /// Maximum number of proposals reached. TooManyProposals, } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// The minimum period of locking and the period between a proposal being approved and enacted. - /// - /// It should generally be a little more than the unstake period to ensure that - /// voting stakers have an opportunity to remove themselves from the system in the case where - /// they are on the losing side of a vote. - const EnactmentPeriod: T::BlockNumber = T::EnactmentPeriod::get(); - /// How often (in blocks) new public referenda are launched. - const LaunchPeriod: T::BlockNumber = T::LaunchPeriod::get(); - - /// How often (in blocks) to check for new votes. - const VotingPeriod: T::BlockNumber = T::VotingPeriod::get(); - - /// The minimum amount to be used as a deposit for a public referendum proposal. - const MinimumDeposit: BalanceOf = T::MinimumDeposit::get(); - - /// Minimum voting period allowed for an emergency referendum. - const FastTrackVotingPeriod: T::BlockNumber = T::FastTrackVotingPeriod::get(); - - /// Period in blocks where an external proposal may not be re-submitted after being vetoed. - const CooloffPeriod: T::BlockNumber = T::CooloffPeriod::get(); - - /// The amount of balance that must be deposited per byte of preimage stored. - const PreimageByteDeposit: BalanceOf = T::PreimageByteDeposit::get(); - - /// The maximum number of votes for an account. - const MaxVotes: u32 = T::MaxVotes::get(); - - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet { + /// Weight: see `begin_block` + fn on_initialize(n: T::BlockNumber) -> Weight { + Self::begin_block(n).unwrap_or_else(|e| { + sp_runtime::print(e); + 0 + }) + } + } + #[pallet::call] + impl Pallet { /// Propose a sensitive action to be taken. /// /// The dispatch origin of this call must be _Signed_ and the sender must @@ -608,11 +633,12 @@ decl_module! { /// Emits `Proposed`. /// /// Weight: `O(p)` - #[weight = T::WeightInfo::propose()] - fn propose(origin, + #[pallet::weight(T::WeightInfo::propose())] + pub fn propose( + origin: OriginFor, proposal_hash: T::Hash, - #[compact] value: BalanceOf, - ) { + #[pallet::compact] value: BalanceOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(value >= T::MinimumDeposit::get(), Error::::ValueLow); @@ -623,18 +649,19 @@ decl_module! { if let Some((until, _)) = >::get(proposal_hash) { ensure!( - >::block_number() >= until, + >::block_number() >= until, Error::::ProposalBlacklisted, ); } T::Currency::reserve(&who, value)?; - PublicPropCount::put(index + 1); + PublicPropCount::::put(index + 1); >::insert(index, (&[&who][..], value)); >::append((index, proposal_hash, who)); - Self::deposit_event(RawEvent::Proposed(index, value)); + Self::deposit_event(Event::::Proposed(index, value)); + Ok(()) } /// Signals agreement with a particular proposal. @@ -647,18 +674,22 @@ decl_module! { /// proposal. Extrinsic is weighted according to this value with no refund. /// /// Weight: `O(S)` where S is the number of seconds a proposal already has. - #[weight = T::WeightInfo::second(*seconds_upper_bound)] - fn second(origin, #[compact] proposal: PropIndex, #[compact] seconds_upper_bound: u32) { + #[pallet::weight(T::WeightInfo::second(*seconds_upper_bound))] + pub fn second( + origin: OriginFor, + #[pallet::compact] proposal: PropIndex, + #[pallet::compact] seconds_upper_bound: u32, + ) -> DispatchResult { let who = ensure_signed(origin)?; - let seconds = Self::len_of_deposit_of(proposal) - .ok_or_else(|| Error::::ProposalMissing)?; + let seconds = + Self::len_of_deposit_of(proposal).ok_or_else(|| Error::::ProposalMissing)?; ensure!(seconds <= seconds_upper_bound, Error::::WrongUpperBound); - let mut deposit = Self::deposit_of(proposal) - .ok_or(Error::::ProposalMissing)?; + let mut deposit = Self::deposit_of(proposal).ok_or(Error::::ProposalMissing)?; T::Currency::reserve(&who, deposit.1)?; deposit.0.push(who); >::insert(proposal, deposit); + Ok(()) } /// Vote in a referendum. If `vote.is_aye()`, the vote is to enact the proposal; @@ -670,10 +701,13 @@ decl_module! { /// - `vote`: The vote configuration. /// /// Weight: `O(R)` where R is the number of referendums the voter has voted on. - #[weight = T::WeightInfo::vote_new(T::MaxVotes::get()) - .max(T::WeightInfo::vote_existing(T::MaxVotes::get()))] - fn vote(origin, - #[compact] ref_index: ReferendumIndex, + #[pallet::weight( + T::WeightInfo::vote_new(T::MaxVotes::get()) + .max(T::WeightInfo::vote_existing(T::MaxVotes::get())) + )] + pub fn vote( + origin: OriginFor, + #[pallet::compact] ref_index: ReferendumIndex, vote: AccountVote>, ) -> DispatchResult { let who = ensure_signed(origin)?; @@ -688,8 +722,11 @@ decl_module! { /// -`ref_index`: The index of the referendum to cancel. /// /// Weight: `O(1)`. - #[weight = (T::WeightInfo::emergency_cancel(), DispatchClass::Operational)] - fn emergency_cancel(origin, ref_index: ReferendumIndex) { + #[pallet::weight((T::WeightInfo::emergency_cancel(), DispatchClass::Operational))] + pub fn emergency_cancel( + origin: OriginFor, + ref_index: ReferendumIndex, + ) -> DispatchResult { T::CancellationOrigin::ensure_origin(origin)?; let status = Self::referendum_status(ref_index)?; @@ -698,6 +735,7 @@ decl_module! { >::insert(h, true); Self::internal_cancel_referendum(ref_index); + Ok(()) } /// Schedule a referendum to be tabled once it is legal to schedule an external @@ -709,17 +747,18 @@ decl_module! { /// /// Weight: `O(V)` with V number of vetoers in the blacklist of proposal. /// Decoding vec of length V. Charged as maximum - #[weight = T::WeightInfo::external_propose(MAX_VETOERS)] - fn external_propose(origin, proposal_hash: T::Hash) { + #[pallet::weight(T::WeightInfo::external_propose(MAX_VETOERS))] + pub fn external_propose(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { T::ExternalOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::DuplicateProposal); if let Some((until, _)) = >::get(proposal_hash) { ensure!( - >::block_number() >= until, + >::block_number() >= until, Error::::ProposalBlacklisted, ); } >::put((proposal_hash, VoteThreshold::SuperMajorityApprove)); + Ok(()) } /// Schedule a majority-carries referendum to be tabled next once it is legal to schedule @@ -733,10 +772,14 @@ decl_module! { /// pre-scheduled `external_propose` call. /// /// Weight: `O(1)` - #[weight = T::WeightInfo::external_propose_majority()] - fn external_propose_majority(origin, proposal_hash: T::Hash) { + #[pallet::weight(T::WeightInfo::external_propose_majority())] + pub fn external_propose_majority( + origin: OriginFor, + proposal_hash: T::Hash, + ) -> DispatchResult { T::ExternalMajorityOrigin::ensure_origin(origin)?; >::put((proposal_hash, VoteThreshold::SimpleMajority)); + Ok(()) } /// Schedule a negative-turnout-bias referendum to be tabled next once it is legal to @@ -750,10 +793,14 @@ decl_module! { /// pre-scheduled `external_propose` call. /// /// Weight: `O(1)` - #[weight = T::WeightInfo::external_propose_default()] - fn external_propose_default(origin, proposal_hash: T::Hash) { + #[pallet::weight(T::WeightInfo::external_propose_default())] + pub fn external_propose_default( + origin: OriginFor, + proposal_hash: T::Hash, + ) -> DispatchResult { T::ExternalDefaultOrigin::ensure_origin(origin)?; >::put((proposal_hash, VoteThreshold::SuperMajorityAgainst)); + Ok(()) } /// Schedule the currently externally-proposed majority-carries referendum to be tabled @@ -771,14 +818,16 @@ decl_module! { /// Emits `Started`. /// /// Weight: `O(1)` - #[weight = T::WeightInfo::fast_track()] - fn fast_track(origin, + #[pallet::weight(T::WeightInfo::fast_track())] + pub fn fast_track( + origin: OriginFor, proposal_hash: T::Hash, voting_period: T::BlockNumber, delay: T::BlockNumber, - ) { + ) -> DispatchResult { // Rather complicated bit of code to ensure that either: - // - `voting_period` is at least `FastTrackVotingPeriod` and `origin` is `FastTrackOrigin`; or + // - `voting_period` is at least `FastTrackVotingPeriod` and `origin` is + // `FastTrackOrigin`; or // - `InstantAllowed` is `true` and `origin` is `InstantOrigin`. let maybe_ensure_instant = if voting_period < T::FastTrackVotingPeriod::get() { Some(origin) @@ -794,8 +843,8 @@ decl_module! { ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); } - let (e_proposal_hash, threshold) = >::get() - .ok_or(Error::::ProposalMissing)?; + let (e_proposal_hash, threshold) = + >::get().ok_or(Error::::ProposalMissing)?; ensure!( threshold != VoteThreshold::SuperMajorityApprove, Error::::NotSimpleMajority, @@ -803,8 +852,9 @@ decl_module! { ensure!(proposal_hash == e_proposal_hash, Error::::InvalidHash); >::kill(); - let now = >::block_number(); + let now = >::block_number(); Self::inject_referendum(now + voting_period, proposal_hash, threshold, delay); + Ok(()) } /// Veto and blacklist the external proposal hash. @@ -816,8 +866,8 @@ decl_module! { /// Emits `Vetoed`. /// /// Weight: `O(V + log(V))` where V is number of `existing vetoers` - #[weight = T::WeightInfo::veto_external(MAX_VETOERS)] - fn veto_external(origin, proposal_hash: T::Hash) { + #[pallet::weight(T::WeightInfo::veto_external(MAX_VETOERS))] + pub fn veto_external(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { let who = T::VetoOrigin::ensure_origin(origin)?; if let Some((e_proposal_hash, _)) = >::get() { @@ -826,18 +876,18 @@ decl_module! { Err(Error::::NoProposal)?; } - let mut existing_vetoers = >::get(&proposal_hash) - .map(|pair| pair.1) - .unwrap_or_else(Vec::new); - let insert_position = existing_vetoers.binary_search(&who) - .err().ok_or(Error::::AlreadyVetoed)?; + let mut existing_vetoers = + >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_else(Vec::new); + let insert_position = + existing_vetoers.binary_search(&who).err().ok_or(Error::::AlreadyVetoed)?; existing_vetoers.insert(insert_position, who.clone()); - let until = >::block_number() + T::CooloffPeriod::get(); + let until = >::block_number() + T::CooloffPeriod::get(); >::insert(&proposal_hash, (until, existing_vetoers)); - Self::deposit_event(RawEvent::Vetoed(who, proposal_hash, until)); + Self::deposit_event(Event::::Vetoed(who, proposal_hash, until)); >::kill(); + Ok(()) } /// Remove a referendum. @@ -847,10 +897,14 @@ decl_module! { /// - `ref_index`: The index of the referendum to cancel. /// /// # Weight: `O(1)`. - #[weight = T::WeightInfo::cancel_referendum()] - fn cancel_referendum(origin, #[compact] ref_index: ReferendumIndex) { + #[pallet::weight(T::WeightInfo::cancel_referendum())] + pub fn cancel_referendum( + origin: OriginFor, + #[pallet::compact] ref_index: ReferendumIndex, + ) -> DispatchResult { ensure_root(origin)?; Self::internal_cancel_referendum(ref_index); + Ok(()) } /// Cancel a proposal queued for enactment. @@ -860,19 +914,12 @@ decl_module! { /// - `which`: The index of the referendum to cancel. /// /// Weight: `O(D)` where `D` is the items in the dispatch queue. Weighted as `D = 10`. - #[weight = (T::WeightInfo::cancel_queued(10), DispatchClass::Operational)] - fn cancel_queued(origin, which: ReferendumIndex) { + #[pallet::weight((T::WeightInfo::cancel_queued(10), DispatchClass::Operational))] + pub fn cancel_queued(origin: OriginFor, which: ReferendumIndex) -> DispatchResult { ensure_root(origin)?; T::Scheduler::cancel_named((DEMOCRACY_ID, which).encode()) .map_err(|_| Error::::ProposalMissing)?; - } - - /// Weight: see `begin_block` - fn on_initialize(n: T::BlockNumber) -> Weight { - Self::begin_block(n).unwrap_or_else(|e| { - sp_runtime::print(e); - 0 - }) + Ok(()) } /// Delegate the voting power (with some given conviction) of the sending account. @@ -888,8 +935,8 @@ decl_module! { /// - `to`: The account whose voting the `target` account's voting power will follow. /// - `conviction`: The conviction that will be attached to the delegated votes. When the /// account is undelegated, the funds will be locked for the corresponding period. - /// - `balance`: The amount of the account's balance to be used in delegating. This must - /// not be more than the account's current balance. + /// - `balance`: The amount of the account's balance to be used in delegating. This must not + /// be more than the account's current balance. /// /// Emits `Delegated`. /// @@ -897,12 +944,12 @@ decl_module! { /// voted on. Weight is charged as if maximum votes. // NOTE: weight must cover an incorrect voting of origin with max votes, this is ensure // because a valid delegation cover decoding a direct voting with max votes. - #[weight = T::WeightInfo::delegate(T::MaxVotes::get())] + #[pallet::weight(T::WeightInfo::delegate(T::MaxVotes::get()))] pub fn delegate( - origin, + origin: OriginFor, to: T::AccountId, conviction: Conviction, - balance: BalanceOf + balance: BalanceOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let votes = Self::try_delegate(who, to, conviction, balance)?; @@ -924,8 +971,8 @@ decl_module! { /// voted on. Weight is charged as if maximum votes. // NOTE: weight must cover an incorrect voting of origin with max votes, this is ensure // because a valid delegation cover decoding a direct voting with max votes. - #[weight = T::WeightInfo::undelegate(T::MaxVotes::get().into())] - fn undelegate(origin) -> DispatchResultWithPostInfo { + #[pallet::weight(T::WeightInfo::undelegate(T::MaxVotes::get().into()))] + pub fn undelegate(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let votes = Self::try_undelegate(who)?; Ok(Some(T::WeightInfo::undelegate(votes)).into()) @@ -936,10 +983,11 @@ decl_module! { /// The dispatch origin of this call must be _Root_. /// /// Weight: `O(1)`. - #[weight = T::WeightInfo::clear_public_proposals()] - fn clear_public_proposals(origin) { + #[pallet::weight(T::WeightInfo::clear_public_proposals())] + pub fn clear_public_proposals(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; >::kill(); + Ok(()) } /// Register the preimage for an upcoming proposal. This doesn't require the proposal to be @@ -952,19 +1000,24 @@ decl_module! { /// Emits `PreimageNoted`. /// /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). - #[weight = T::WeightInfo::note_preimage(encoded_proposal.len() as u32)] - fn note_preimage(origin, encoded_proposal: Vec) { + #[pallet::weight(T::WeightInfo::note_preimage(encoded_proposal.len() as u32))] + pub fn note_preimage(origin: OriginFor, encoded_proposal: Vec) -> DispatchResult { Self::note_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; + Ok(()) } /// Same as `note_preimage` but origin is `OperationalPreimageOrigin`. - #[weight = ( + #[pallet::weight(( T::WeightInfo::note_preimage(encoded_proposal.len() as u32), DispatchClass::Operational, - )] - fn note_preimage_operational(origin, encoded_proposal: Vec) { + ))] + pub fn note_preimage_operational( + origin: OriginFor, + encoded_proposal: Vec, + ) -> DispatchResult { let who = T::OperationalPreimageOrigin::ensure_origin(origin)?; Self::note_preimage_inner(who, encoded_proposal)?; + Ok(()) } /// Register the preimage for an upcoming proposal. This requires the proposal to be @@ -979,24 +1032,32 @@ decl_module! { /// Emits `PreimageNoted`. /// /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). - #[weight = T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32)] - fn note_imminent_preimage(origin, encoded_proposal: Vec) -> DispatchResultWithPostInfo { + #[pallet::weight(T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32))] + pub fn note_imminent_preimage( + origin: OriginFor, + encoded_proposal: Vec, + ) -> DispatchResultWithPostInfo { Self::note_imminent_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; - // We check that this preimage was not uploaded before in `note_imminent_preimage_inner`, - // thus this call can only be successful once. If successful, user does not pay a fee. + // We check that this preimage was not uploaded before in + // `note_imminent_preimage_inner`, thus this call can only be successful once. If + // successful, user does not pay a fee. Ok(Pays::No.into()) } /// Same as `note_imminent_preimage` but origin is `OperationalPreimageOrigin`. - #[weight = ( + #[pallet::weight(( T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32), DispatchClass::Operational, - )] - fn note_imminent_preimage_operational(origin, encoded_proposal: Vec) -> DispatchResultWithPostInfo { + ))] + pub fn note_imminent_preimage_operational( + origin: OriginFor, + encoded_proposal: Vec, + ) -> DispatchResultWithPostInfo { let who = T::OperationalPreimageOrigin::ensure_origin(origin)?; Self::note_imminent_preimage_inner(who, encoded_proposal)?; - // We check that this preimage was not uploaded before in `note_imminent_preimage_inner`, - // thus this call can only be successful once. If successful, user does not pay a fee. + // We check that this preimage was not uploaded before in + // `note_imminent_preimage_inner`, thus this call can only be successful once. If + // successful, user does not pay a fee. Ok(Pays::No.into()) } @@ -1005,8 +1066,8 @@ decl_module! { /// The dispatch origin of this call must be _Signed_. /// /// - `proposal_hash`: The preimage hash of a proposal. - /// - `proposal_length_upper_bound`: an upper bound on length of the proposal. - /// Extrinsic is weighted according to this value with no refund. + /// - `proposal_length_upper_bound`: an upper bound on length of the proposal. Extrinsic is + /// weighted according to this value with no refund. /// /// This will only work after `VotingPeriod` blocks from the time that the preimage was /// noted, if it's the same account doing it. If it's a different account, then it'll only @@ -1015,8 +1076,12 @@ decl_module! { /// Emits `PreimageReaped`. /// /// Weight: `O(D)` where D is length of proposal. - #[weight = T::WeightInfo::reap_preimage(*proposal_len_upper_bound)] - fn reap_preimage(origin, proposal_hash: T::Hash, #[compact] proposal_len_upper_bound: u32) { + #[pallet::weight(T::WeightInfo::reap_preimage(*proposal_len_upper_bound))] + pub fn reap_preimage( + origin: OriginFor, + proposal_hash: T::Hash, + #[pallet::compact] proposal_len_upper_bound: u32, + ) -> DispatchResult { let who = ensure_signed(origin)?; ensure!( @@ -1026,20 +1091,24 @@ decl_module! { let (provider, deposit, since, expiry) = >::get(&proposal_hash) .and_then(|m| match m { - PreimageStatus::Available { provider, deposit, since, expiry, .. } - => Some((provider, deposit, since, expiry)), + PreimageStatus::Available { provider, deposit, since, expiry, .. } => + Some((provider, deposit, since, expiry)), _ => None, - }).ok_or(Error::::PreimageMissing)?; + }) + .ok_or(Error::::PreimageMissing)?; - let now = >::block_number(); + let now = >::block_number(); let (voting, enactment) = (T::VotingPeriod::get(), T::EnactmentPeriod::get()); let additional = if who == provider { Zero::zero() } else { enactment }; ensure!(now >= since + voting + additional, Error::::TooEarly); ensure!(expiry.map_or(true, |e| now > e), Error::::Imminent); - let _ = T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); + let res = + T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); + debug_assert!(res.is_ok()); >::remove(&proposal_hash); - Self::deposit_event(RawEvent::PreimageReaped(proposal_hash, provider, deposit, who)); + Self::deposit_event(Event::::PreimageReaped(proposal_hash, provider, deposit, who)); + Ok(()) } /// Unlock tokens that have an expired lock. @@ -1049,11 +1118,14 @@ decl_module! { /// - `target`: The account to remove the lock on. /// /// Weight: `O(R)` with R number of vote of target. - #[weight = T::WeightInfo::unlock_set(T::MaxVotes::get()) - .max(T::WeightInfo::unlock_remove(T::MaxVotes::get()))] - fn unlock(origin, target: T::AccountId) { + #[pallet::weight( + T::WeightInfo::unlock_set(T::MaxVotes::get()) + .max(T::WeightInfo::unlock_remove(T::MaxVotes::get())) + )] + pub fn unlock(origin: OriginFor, target: T::AccountId) -> DispatchResult { ensure_signed(origin)?; Self::update_lock(&target); + Ok(()) } /// Remove a vote for a referendum. @@ -1083,8 +1155,8 @@ decl_module! { /// /// Weight: `O(R + log R)` where R is the number of referenda that `target` has voted on. /// Weight is calculated for the maximum number of vote. - #[weight = T::WeightInfo::remove_vote(T::MaxVotes::get())] - fn remove_vote(origin, index: ReferendumIndex) -> DispatchResult { + #[pallet::weight(T::WeightInfo::remove_vote(T::MaxVotes::get()))] + pub fn remove_vote(origin: OriginFor, index: ReferendumIndex) -> DispatchResult { let who = ensure_signed(origin)?; Self::try_remove_vote(&who, index, UnvoteScope::Any) } @@ -1104,8 +1176,12 @@ decl_module! { /// /// Weight: `O(R + log R)` where R is the number of referenda that `target` has voted on. /// Weight is calculated for the maximum number of vote. - #[weight = T::WeightInfo::remove_other_vote(T::MaxVotes::get())] - fn remove_other_vote(origin, target: T::AccountId, index: ReferendumIndex) -> DispatchResult { + #[pallet::weight(T::WeightInfo::remove_other_vote(T::MaxVotes::get()))] + pub fn remove_other_vote( + origin: OriginFor, + target: T::AccountId, + index: ReferendumIndex, + ) -> DispatchResult { let who = ensure_signed(origin)?; let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; Self::try_remove_vote(&target, index, scope)?; @@ -1113,8 +1189,12 @@ decl_module! { } /// Enact a proposal from a referendum. For now we just make the weight be the maximum. - #[weight = T::MaximumBlockWeight::get()] - fn enact_proposal(origin, proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { + #[pallet::weight(T::BlockWeights::get().max_block)] + pub fn enact_proposal( + origin: OriginFor, + proposal_hash: T::Hash, + index: ReferendumIndex, + ) -> DispatchResult { ensure_root(origin)?; Self::do_enact_proposal(proposal_hash, index) } @@ -1134,11 +1214,12 @@ decl_module! { /// /// Weight: `O(p)` (though as this is an high-privilege dispatch, we assume it has a /// reasonable value). - #[weight = (T::WeightInfo::blacklist(T::MaxProposals::get()), DispatchClass::Operational)] - fn blacklist(origin, + #[pallet::weight((T::WeightInfo::blacklist(T::MaxProposals::get()), DispatchClass::Operational))] + pub fn blacklist( + origin: OriginFor, proposal_hash: T::Hash, maybe_ref_index: Option, - ) { + ) -> DispatchResult { T::BlacklistOrigin::ensure_origin(origin)?; // Insert the proposal into the blacklist. @@ -1171,7 +1252,8 @@ decl_module! { } } - Self::deposit_event(RawEvent::Blacklisted(proposal_hash)); + Self::deposit_event(Event::::Blacklisted(proposal_hash)); + Ok(()) } /// Remove a proposal. @@ -1181,8 +1263,11 @@ decl_module! { /// - `prop_index`: The index of the proposal to cancel. /// /// Weight: `O(p)` where `p = PublicProps::::decode_len()` - #[weight = T::WeightInfo::cancel_proposal(T::MaxProposals::get())] - fn cancel_proposal(origin, #[compact] prop_index: PropIndex) { + #[pallet::weight(T::WeightInfo::cancel_proposal(T::MaxProposals::get()))] + pub fn cancel_proposal( + origin: OriginFor, + #[pallet::compact] prop_index: PropIndex, + ) -> DispatchResult { T::CancelProposalOrigin::ensure_origin(origin)?; PublicProps::::mutate(|props| props.retain(|p| p.0 != prop_index)); @@ -1191,11 +1276,13 @@ decl_module! { T::Slash::on_unbalanced(T::Currency::slash_reserved(&who, amount).0); } } + + Ok(()) } } } -impl Module { +impl Pallet { // exposed immutables. /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal @@ -1206,7 +1293,7 @@ impl Module { /// Get all referenda ready for tally at block `n`. pub fn maturing_referenda_at( - n: T::BlockNumber + n: T::BlockNumber, ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { let next = Self::lowest_unbaked(); let last = Self::referendum_count(); @@ -1217,7 +1304,8 @@ impl Module { n: T::BlockNumber, range: core::ops::Range, ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { - range.into_iter() + range + .into_iter() .map(|i| (i, Self::referendum_info(i))) .filter_map(|(i, maybe_info)| match maybe_info { Some(ReferendumInfo::Ongoing(status)) => Some((i, status)), @@ -1233,44 +1321,47 @@ impl Module { pub fn internal_start_referendum( proposal_hash: T::Hash, threshold: VoteThreshold, - delay: T::BlockNumber + delay: T::BlockNumber, ) -> ReferendumIndex { - >::inject_referendum( - >::block_number() + T::VotingPeriod::get(), + >::inject_referendum( + >::block_number() + T::VotingPeriod::get(), proposal_hash, threshold, - delay + delay, ) } /// Remove a referendum. pub fn internal_cancel_referendum(ref_index: ReferendumIndex) { - Self::deposit_event(RawEvent::Cancelled(ref_index)); + Self::deposit_event(Event::::Cancelled(ref_index)); ReferendumInfoOf::::remove(ref_index); } // private. /// Ok if the given referendum is active, Err otherwise - fn ensure_ongoing(r: ReferendumInfo>) - -> Result>, DispatchError> - { + fn ensure_ongoing( + r: ReferendumInfo>, + ) -> Result>, DispatchError> { match r { ReferendumInfo::Ongoing(s) => Ok(s), _ => Err(Error::::ReferendumInvalid.into()), } } - fn referendum_status(ref_index: ReferendumIndex) - -> Result>, DispatchError> - { - let info = ReferendumInfoOf::::get(ref_index) - .ok_or(Error::::ReferendumInvalid)?; + fn referendum_status( + ref_index: ReferendumIndex, + ) -> Result>, DispatchError> { + let info = ReferendumInfoOf::::get(ref_index).ok_or(Error::::ReferendumInvalid)?; Self::ensure_ongoing(info) } /// Actually enact a vote, if legit. - fn try_vote(who: &T::AccountId, ref_index: ReferendumIndex, vote: AccountVote>) -> DispatchResult { + fn try_vote( + who: &T::AccountId, + ref_index: ReferendumIndex, + vote: AccountVote>, + ) -> DispatchResult { let mut status = Self::referendum_status(ref_index)?; ensure!(vote.balance() <= T::Currency::free_balance(who), Error::::InsufficientFunds); VotingOf::::try_mutate(who, |voting| -> DispatchResult { @@ -1278,19 +1369,22 @@ impl Module { match votes.binary_search_by_key(&ref_index, |i| i.0) { Ok(i) => { // Shouldn't be possible to fail, but we handle it gracefully. - status.tally.remove(votes[i].1).ok_or(Error::::Underflow)?; + status.tally.remove(votes[i].1).ok_or(ArithmeticError::Underflow)?; if let Some(approve) = votes[i].1.as_standard() { status.tally.reduce(approve, *delegations); } votes[i].1 = vote; - } + }, Err(i) => { - ensure!(votes.len() as u32 <= T::MaxVotes::get(), Error::::MaxVotesReached); + ensure!( + votes.len() as u32 <= T::MaxVotes::get(), + Error::::MaxVotesReached + ); votes.insert(i, (ref_index, vote)); - } + }, } // Shouldn't be possible to fail, but we handle it gracefully. - status.tally.add(vote).ok_or(Error::::Overflow)?; + status.tally.add(vote).ok_or(ArithmeticError::Overflow)?; if let Some(approve) = vote.as_standard() { status.tally.increase(approve, *delegations); } @@ -1301,12 +1395,7 @@ impl Module { })?; // Extend the lock to `balance` (rather than setting it) since we don't know what other // votes are in place. - T::Currency::extend_lock( - DEMOCRACY_ID, - who, - vote.balance(), - WithdrawReason::Transfer.into() - ); + T::Currency::extend_lock(DEMOCRACY_ID, who, vote.balance(), WithdrawReasons::TRANSFER); ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); Ok(()) } @@ -1317,31 +1406,41 @@ impl Module { /// - The referendum has finished and the voter's lock period is up. /// /// This will generally be combined with a call to `unlock`. - fn try_remove_vote(who: &T::AccountId, ref_index: ReferendumIndex, scope: UnvoteScope) -> DispatchResult { + fn try_remove_vote( + who: &T::AccountId, + ref_index: ReferendumIndex, + scope: UnvoteScope, + ) -> DispatchResult { let info = ReferendumInfoOf::::get(ref_index); VotingOf::::try_mutate(who, |voting| -> DispatchResult { if let Voting::Direct { ref mut votes, delegations, ref mut prior } = voting { - let i = votes.binary_search_by_key(&ref_index, |i| i.0).map_err(|_| Error::::NotVoter)?; + let i = votes + .binary_search_by_key(&ref_index, |i| i.0) + .map_err(|_| Error::::NotVoter)?; match info { Some(ReferendumInfo::Ongoing(mut status)) => { ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); // Shouldn't be possible to fail, but we handle it gracefully. - status.tally.remove(votes[i].1).ok_or(Error::::Underflow)?; + status.tally.remove(votes[i].1).ok_or(ArithmeticError::Underflow)?; if let Some(approve) = votes[i].1.as_standard() { status.tally.reduce(approve, *delegations); } ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); - } - Some(ReferendumInfo::Finished{end, approved}) => + }, + Some(ReferendumInfo::Finished { end, approved }) => { if let Some((lock_periods, balance)) = votes[i].1.locked_if(approved) { - let unlock_at = end + T::EnactmentPeriod::get() * lock_periods.into(); - let now = system::Module::::block_number(); + let unlock_at = end + T::VoteLockingPeriod::get() * lock_periods.into(); + let now = frame_system::Pallet::::block_number(); if now < unlock_at { - ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); + ensure!( + matches!(scope, UnvoteScope::Any), + Error::::NoPermission + ); prior.accumulate(unlock_at, balance) } - }, - None => {} // Referendum was cancelled. + } + }, + None => {}, // Referendum was cancelled. } votes.remove(i); } @@ -1362,15 +1461,15 @@ impl Module { *delegations = delegations.saturating_add(amount); for &(ref_index, account_vote) in votes.iter() { if let AccountVote::Standard { vote, .. } = account_vote { - ReferendumInfoOf::::mutate(ref_index, |maybe_info| + ReferendumInfoOf::::mutate(ref_index, |maybe_info| { if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { status.tally.increase(vote.aye, amount); } - ); + }); } } votes.len() as u32 - } + }, }) } @@ -1381,20 +1480,20 @@ impl Module { // We don't support second level delegating, so we don't need to do anything more. *delegations = delegations.saturating_sub(amount); 1 - } + }, Voting::Direct { votes, delegations, .. } => { *delegations = delegations.saturating_sub(amount); for &(ref_index, account_vote) in votes.iter() { if let AccountVote::Standard { vote, .. } = account_vote { - ReferendumInfoOf::::mutate(ref_index, |maybe_info| + ReferendumInfoOf::::mutate(ref_index, |maybe_info| { if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { status.tally.reduce(vote.aye, amount); } - ); + }); } } votes.len() as u32 - } + }, }) } @@ -1423,22 +1522,17 @@ impl Module { // remove any delegation votes to our current target. Self::reduce_upstream_delegation(&target, conviction.votes(balance)); voting.set_common(delegations, prior); - } + }, Voting::Direct { votes, delegations, prior } => { // here we just ensure that we're currently idling with no votes recorded. ensure!(votes.is_empty(), Error::::VotesExist); voting.set_common(delegations, prior); - } + }, } let votes = Self::increase_upstream_delegation(&target, conviction.votes(balance)); // Extend the lock to `balance` (rather than setting it) since we don't know what other // votes are in place. - T::Currency::extend_lock( - DEMOCRACY_ID, - &who, - balance, - WithdrawReason::Transfer.into() - ); + T::Currency::extend_lock(DEMOCRACY_ID, &who, balance, WithdrawReasons::TRANSFER); Ok(votes) })?; Self::deposit_event(Event::::Delegated(who, target)); @@ -1453,25 +1547,18 @@ impl Module { let mut old = Voting::default(); sp_std::mem::swap(&mut old, voting); match old { - Voting::Delegating { - balance, - target, - conviction, - delegations, - mut prior, - } => { + Voting::Delegating { balance, target, conviction, delegations, mut prior } => { // remove any delegation votes to our current target. - let votes = Self::reduce_upstream_delegation(&target, conviction.votes(balance)); - let now = system::Module::::block_number(); + let votes = + Self::reduce_upstream_delegation(&target, conviction.votes(balance)); + let now = frame_system::Pallet::::block_number(); let lock_periods = conviction.lock_periods().into(); - prior.accumulate(now + T::EnactmentPeriod::get() * lock_periods, balance); + prior.accumulate(now + T::VoteLockingPeriod::get() * lock_periods, balance); voting.set_common(delegations, prior); Ok(votes) - } - Voting::Direct { .. } => { - Err(Error::::NotDelegating.into()) - } + }, + Voting::Direct { .. } => Err(Error::::NotDelegating.into()), } })?; Self::deposit_event(Event::::Undelegated(who)); @@ -1482,13 +1569,13 @@ impl Module { /// a security hole) but may be reduced from what they are currently. fn update_lock(who: &T::AccountId) { let lock_needed = VotingOf::::mutate(who, |voting| { - voting.rejig(system::Module::::block_number()); + voting.rejig(frame_system::Pallet::::block_number()); voting.locked_balance() }); if lock_needed.is_zero() { T::Currency::remove_lock(DEMOCRACY_ID, who); } else { - T::Currency::set_lock(DEMOCRACY_ID, who, lock_needed, WithdrawReason::Transfer.into()); + T::Currency::set_lock(DEMOCRACY_ID, who, lock_needed, WithdrawReasons::TRANSFER); } } @@ -1500,28 +1587,30 @@ impl Module { delay: T::BlockNumber, ) -> ReferendumIndex { let ref_index = Self::referendum_count(); - ReferendumCount::put(ref_index + 1); - let status = ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; + ReferendumCount::::put(ref_index + 1); + let status = + ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; let item = ReferendumInfo::Ongoing(status); >::insert(ref_index, item); - Self::deposit_event(RawEvent::Started(ref_index, threshold)); + Self::deposit_event(Event::::Started(ref_index, threshold)); ref_index } /// Table the next waiting proposal for a vote. fn launch_next(now: T::BlockNumber) -> DispatchResult { - if LastTabledWasExternal::take() { + if LastTabledWasExternal::::take() { Self::launch_public(now).or_else(|_| Self::launch_external(now)) } else { Self::launch_external(now).or_else(|_| Self::launch_public(now)) - }.map_err(|_| Error::::NoneWaiting.into()) + } + .map_err(|_| Error::::NoneWaiting.into()) } /// Table the waiting external proposal for a vote, if there is one. fn launch_external(now: T::BlockNumber) -> DispatchResult { if let Some((proposal, threshold)) = >::take() { - LastTabledWasExternal::put(true); - Self::deposit_event(RawEvent::ExternalTabled); + LastTabledWasExternal::::put(true); + Self::deposit_event(Event::::ExternalTabled); Self::inject_referendum( now + T::VotingPeriod::get(), proposal, @@ -1537,11 +1626,10 @@ impl Module { /// Table the waiting public proposal with the highest backing for a vote. fn launch_public(now: T::BlockNumber) -> DispatchResult { let mut public_props = Self::public_props(); - if let Some((winner_index, _)) = public_props.iter() - .enumerate() - .max_by_key(|x| Self::backing_for((x.1).0).unwrap_or_else(Zero::zero) - /* ^^ defensive only: All current public proposals have an amount locked*/) - { + if let Some((winner_index, _)) = public_props.iter().enumerate().max_by_key( + // defensive only: All current public proposals have an amount locked + |x| Self::backing_for((x.1).0).unwrap_or_else(Zero::zero), + ) { let (prop_index, proposal, _) = public_props.swap_remove(winner_index); >::put(public_props); @@ -1550,7 +1638,7 @@ impl Module { for d in &depositors { T::Currency::unreserve(d, deposit); } - Self::deposit_event(RawEvent::Tabled(prop_index, deposit, depositors)); + Self::deposit_event(Event::::Tabled(prop_index, deposit, depositors)); Self::inject_referendum( now + T::VotingPeriod::get(), proposal, @@ -1568,20 +1656,24 @@ impl Module { let preimage = >::take(&proposal_hash); if let Some(PreimageStatus::Available { data, provider, deposit, .. }) = preimage { if let Ok(proposal) = T::Proposal::decode(&mut &data[..]) { - let _ = T::Currency::unreserve(&provider, deposit); - Self::deposit_event(RawEvent::PreimageUsed(proposal_hash, provider, deposit)); + let err_amount = T::Currency::unreserve(&provider, deposit); + debug_assert!(err_amount.is_zero()); + Self::deposit_event(Event::::PreimageUsed(proposal_hash, provider, deposit)); - let ok = proposal.dispatch(frame_system::RawOrigin::Root.into()).is_ok(); - Self::deposit_event(RawEvent::Executed(index, ok)); + let res = proposal + .dispatch(frame_system::RawOrigin::Root.into()) + .map(|_| ()) + .map_err(|e| e.error); + Self::deposit_event(Event::::Executed(index, res)); Ok(()) } else { T::Slash::on_unbalanced(T::Currency::slash_reserved(&provider, deposit).0); - Self::deposit_event(RawEvent::PreimageInvalid(proposal_hash, index)); + Self::deposit_event(Event::::PreimageInvalid(proposal_hash, index)); Err(Error::::PreimageInvalid.into()) } } else { - Self::deposit_event(RawEvent::PreimageMissing(proposal_hash, index)); + Self::deposit_event(Event::::PreimageMissing(proposal_hash, index)); Err(Error::::PreimageMissing.into()) } } @@ -1595,30 +1687,36 @@ impl Module { let approved = status.threshold.approved(status.tally, total_issuance); if approved { - Self::deposit_event(RawEvent::Passed(index)); + Self::deposit_event(Event::::Passed(index)); if status.delay.is_zero() { let _ = Self::do_enact_proposal(status.proposal_hash, index); } else { let when = now + status.delay; // Note that we need the preimage now. - Preimages::::mutate_exists(&status.proposal_hash, |maybe_pre| match *maybe_pre { - Some(PreimageStatus::Available { ref mut expiry, .. }) => *expiry = Some(when), - ref mut a => *a = Some(PreimageStatus::Missing(when)), - }); + Preimages::::mutate_exists( + &status.proposal_hash, + |maybe_pre| match *maybe_pre { + Some(PreimageStatus::Available { ref mut expiry, .. }) => + *expiry = Some(when), + ref mut a => *a = Some(PreimageStatus::Missing(when)), + }, + ); if T::Scheduler::schedule_named( (DEMOCRACY_ID, index).encode(), DispatchTime::At(when), None, 63, - system::RawOrigin::Root.into(), - Call::enact_proposal(status.proposal_hash, index).into(), - ).is_err() { + frame_system::RawOrigin::Root.into(), + Call::enact_proposal { proposal_hash: status.proposal_hash, index }.into(), + ) + .is_err() + { frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); } } } else { - Self::deposit_event(RawEvent::NotPassed(index)); + Self::deposit_event(Event::::NotPassed(index)); } Ok(approved) @@ -1636,6 +1734,7 @@ impl Module { /// - Db reads per R: `DepositOf`, `ReferendumInfoOf` /// # fn begin_block(now: T::BlockNumber) -> Result { + let max_block_weight = T::BlockWeights::get().max_block; let mut weight = 0; // pick out another public referendum if it's time. @@ -1643,7 +1742,7 @@ impl Module { // Errors come from the queue being empty. we don't really care about that, and even if // we did, there is nothing we can do here. let _ = Self::launch_next(now); - weight = T::MaximumBlockWeight::get(); + weight = max_block_weight; } let next = Self::lowest_unbaked(); @@ -1654,7 +1753,7 @@ impl Module { for (index, info) in Self::maturing_referenda_at_inner(now, next..last).into_iter() { let approved = Self::bake_referendum(now, index, info)?; ReferendumInfoOf::::insert(index, ReferendumInfo::Finished { end: now, approved }); - weight = T::MaximumBlockWeight::get(); + weight = max_block_weight; } Ok(weight) @@ -1677,10 +1776,8 @@ impl Module { // To decode the enum variant we only need the first byte. let mut buf = [0u8; 1]; let key = >::hashed_key_for(proposal_hash); - let bytes = match sp_io::storage::read(&key, &mut buf, 0) { - Some(bytes) => bytes, - None => return Err(Error::::NotImminent.into()), - }; + let bytes = + sp_io::storage::read(&key, &mut buf, 0).ok_or_else(|| Error::::NotImminent)?; // The value may be smaller that 1 byte. let mut input = &buf[0..buf.len().min(bytes as usize)]; @@ -1690,7 +1787,7 @@ impl Module { _ => { sp_runtime::print("Failed to decode `PreimageStatus` variant"); Err(Error::::NotImminent.into()) - } + }, } } @@ -1708,10 +1805,8 @@ impl Module { // * at most 5 bytes to decode a `Compact` let mut buf = [0u8; 6]; let key = >::hashed_key_for(proposal_hash); - let bytes = match sp_io::storage::read(&key, &mut buf, 0) { - Some(bytes) => bytes, - None => return Err(Error::::PreimageMissing.into()), - }; + let bytes = + sp_io::storage::read(&key, &mut buf, 0).ok_or_else(|| Error::::PreimageMissing)?; // The value may be smaller that 6 bytes. let mut input = &buf[0..buf.len().min(bytes as usize)]; @@ -1720,15 +1815,17 @@ impl Module { Ok(0) => return Err(Error::::PreimageMissing.into()), _ => { sp_runtime::print("Failed to decode `PreimageStatus` variant"); - return Err(Error::::PreimageMissing.into()); - } + return Err(Error::::PreimageMissing.into()) + }, } // Decode the length of the vector. - let len = codec::Compact::::decode(&mut input).map_err(|_| { - sp_runtime::print("Failed to decode `PreimageStatus` variant"); - DispatchError::from(Error::::PreimageMissing) - })?.0; + let len = codec::Compact::::decode(&mut input) + .map_err(|_| { + sp_runtime::print("Failed to decode `PreimageStatus` variant"); + DispatchError::from(Error::::PreimageMissing) + })? + .0; Ok(len) } @@ -1742,7 +1839,7 @@ impl Module { .saturating_mul(T::PreimageByteDeposit::get()); T::Currency::reserve(&who, deposit)?; - let now = >::block_number(); + let now = >::block_number(); let a = PreimageStatus::Available { data: encoded_proposal, provider: who.clone(), @@ -1752,19 +1849,22 @@ impl Module { }; >::insert(proposal_hash, a); - Self::deposit_event(RawEvent::PreimageNoted(proposal_hash, who, deposit)); + Self::deposit_event(Event::::PreimageNoted(proposal_hash, who, deposit)); Ok(()) } // See `note_imminent_preimage` - fn note_imminent_preimage_inner(who: T::AccountId, encoded_proposal: Vec) -> DispatchResult { + fn note_imminent_preimage_inner( + who: T::AccountId, + encoded_proposal: Vec, + ) -> DispatchResult { let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); Self::check_pre_image_is_missing(proposal_hash)?; let status = Preimages::::get(&proposal_hash).ok_or(Error::::NotImminent)?; let expiry = status.to_missing_expiry().ok_or(Error::::DuplicatePreimage)?; - let now = >::block_number(); + let now = >::block_number(); let free = >::zero(); let a = PreimageStatus::Available { data: encoded_proposal, @@ -1775,7 +1875,7 @@ impl Module { }; >::insert(proposal_hash, a); - Self::deposit_event(RawEvent::PreimageNoted(proposal_hash, who, free)); + Self::deposit_event(Event::::PreimageNoted(proposal_hash, who, free)); Ok(()) } @@ -1785,10 +1885,7 @@ impl Module { fn decode_compact_u32_at(key: &[u8]) -> Option { // `Compact` takes at most 5 bytes. let mut buf = [0u8; 5]; - let bytes = match sp_io::storage::read(&key, &mut buf, 0) { - Some(bytes) => bytes, - None => return None, - }; + let bytes = sp_io::storage::read(&key, &mut buf, 0)?; // The value may be smaller than 5 bytes. let mut input = &buf[0..buf.len().min(bytes as usize)]; match codec::Compact::::decode(&mut input) { @@ -1797,6 +1894,6 @@ fn decode_compact_u32_at(key: &[u8]) -> Option { sp_runtime::print("Failed to decode compact u32 at:"); sp_runtime::print(key); None - } + }, } } diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index bcc7099bb34a4..75104db51b971 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,22 +18,24 @@ //! The crate's tests. use super::*; -use std::cell::RefCell; +use crate as pallet_democracy; use codec::Encode; use frame_support::{ - impl_outer_origin, impl_outer_dispatch, assert_noop, assert_ok, parameter_types, - impl_outer_event, ord_parameter_types, traits::{Contains, OnInitialize, Filter}, + assert_noop, assert_ok, ord_parameter_types, parameter_types, + traits::{Contains, GenesisBuild, OnInitialize, SortedMembers}, weights::Weight, }; +use frame_system::{EnsureRoot, EnsureSignedBy}; +use pallet_balances::{BalanceLock, Error as BalancesError}; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup, BadOrigin}, - testing::Header, Perbill, + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, }; -use pallet_balances::{BalanceLock, Error as BalancesError}; -use frame_system::{EnsureSignedBy, EnsureRoot}; mod cancellation; +mod decoders; mod delegation; mod external_proposing; mod fast_tracking; @@ -42,7 +44,6 @@ mod preimage; mod public_proposals; mod scheduling; mod voting; -mod decoders; const AYE: Vote = Vote { aye: true, conviction: Conviction::None }; const NAY: Vote = Vote { aye: false, conviction: Conviction::None }; @@ -51,50 +52,40 @@ const BIG_NAY: Vote = Vote { aye: false, conviction: Conviction::Locked1x }; const MAX_PROPOSALS: u32 = 100; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; -impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - frame_system::System, - pallet_balances::Balances, - democracy::Democracy, +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Config, Event}, + Democracy: pallet_democracy::{Pallet, Call, Storage, Config, Event}, } -} - -mod democracy { - pub use crate::Event; -} - -impl_outer_event! { - pub enum Event for Test { - system, - pallet_balances, - pallet_scheduler, - democracy, - } -} +); // Test that a fitlered call can be dispatched. pub struct BaseFilter; -impl Filter for BaseFilter { - fn filter(call: &Call) -> bool { - !matches!(call, &Call::Balances(pallet_balances::Call::set_balance(..))) +impl Contains for BaseFilter { + fn contains(call: &Call) -> bool { + !matches!(call, &Call::Balances(pallet_balances::Call::set_balance { .. })) } } -// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, Eq, PartialEq, Debug)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1_000_000; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1_000_000); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -106,24 +97,19 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * MaximumBlockWeight::get(); + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; } -impl pallet_scheduler::Trait for Test { +impl pallet_scheduler::Config for Test { type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; @@ -135,9 +121,12 @@ impl pallet_scheduler::Trait for Test { } parameter_types! { pub const ExistentialDeposit: u64 = 1; + pub const MaxLocks: u32 = 10; } -impl pallet_balances::Trait for Test { - type MaxLocks = (); +impl pallet_balances::Config for Test { + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type MaxLocks = MaxLocks; type Balance = u64; type Event = Event; type DustRemoval = (); @@ -151,9 +140,12 @@ parameter_types! { pub const FastTrackVotingPeriod: u64 = 2; pub const MinimumDeposit: u64 = 1; pub const EnactmentPeriod: u64 = 2; + pub const VoteLockingPeriod: u64 = 3; pub const CooloffPeriod: u64 = 2; pub const MaxVotes: u32 = 100; pub const MaxProposals: u32 = MAX_PROPOSALS; + pub static PreimageByteDeposit: u64 = 0; + pub static InstantAllowed: bool = false; } ord_parameter_types! { pub const One: u64 = 1; @@ -164,32 +156,22 @@ ord_parameter_types! { pub const Six: u64 = 6; } pub struct OneToFive; -impl Contains for OneToFive { +impl SortedMembers for OneToFive { fn sorted_members() -> Vec { vec![1, 2, 3, 4, 5] } #[cfg(feature = "runtime-benchmarks")] fn add(_m: &u64) {} } -thread_local! { - static PREIMAGE_BYTE_DEPOSIT: RefCell = RefCell::new(0); - static INSTANT_ALLOWED: RefCell = RefCell::new(false); -} -pub struct PreimageByteDeposit; -impl Get for PreimageByteDeposit { - fn get() -> u64 { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow()) } -} -pub struct InstantAllowed; -impl Get for InstantAllowed { - fn get() -> bool { INSTANT_ALLOWED.with(|v| *v.borrow()) } -} -impl super::Trait for Test { + +impl Config for Test { type Proposal = Call; type Event = Event; - type Currency = pallet_balances::Module; + type Currency = pallet_balances::Pallet; type EnactmentPeriod = EnactmentPeriod; type LaunchPeriod = LaunchPeriod; type VotingPeriod = VotingPeriod; + type VoteLockingPeriod = VoteLockingPeriod; type FastTrackVotingPeriod = FastTrackVotingPeriod; type MinimumDeposit = MinimumDeposit; type ExternalOrigin = EnsureSignedBy; @@ -215,10 +197,14 @@ impl super::Trait for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); + pallet_democracy::GenesisConfig::::default() + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -230,11 +216,6 @@ pub fn new_test_ext_execute_with_cond(execute: impl FnOnce(bool) -> () + Clone) new_test_ext().execute_with(|| execute(true)); } -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type Scheduler = pallet_scheduler::Module; -type Democracy = Module; - #[test] fn params_should_work() { new_test_ext().execute_with(|| { @@ -245,14 +226,15 @@ fn params_should_work() { } fn set_balance_proposal(value: u64) -> Vec { - Call::Balances(pallet_balances::Call::set_balance(42, value, 0)).encode() + Call::Balances(pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0 }) + .encode() } #[test] fn set_balance_proposal_is_correctly_filtered_out() { for i in 0..10 { let call = Call::decode(&mut &set_balance_proposal(i)[..]).unwrap(); - assert!(!::BaseCallFilter::filter(&call)); + assert!(!::BaseCallFilter::contains(&call)); } } @@ -266,25 +248,17 @@ fn set_balance_proposal_hash_and_note(value: u64) -> H256 { match Democracy::note_preimage(Origin::signed(6), p) { Ok(_) => (), Err(x) if x == Error::::DuplicatePreimage.into() => (), - Err(x) => panic!(x), + Err(x) => panic!("{:?}", x), } h } fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose( - Origin::signed(who), - set_balance_proposal_hash(value), - delay, - ) + Democracy::propose(Origin::signed(who), set_balance_proposal_hash(value), delay) } fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose( - Origin::signed(who), - set_balance_proposal_hash_and_note(value), - delay, - ) + Democracy::propose(Origin::signed(who), set_balance_proposal_hash_and_note(value), delay) } fn next_block() { diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs index 4221865a3e5b0..c2bd725ce934a 100644 --- a/frame/democracy/src/tests/cancellation.rs +++ b/frame/democracy/src/tests/cancellation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ fn cancel_referendum_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_ok!(Democracy::cancel_referendum(Origin::root(), r.into())); @@ -67,7 +67,7 @@ fn emergency_cancel_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 2 + 2, ); assert!(Democracy::referendum_status(r).is_ok()); @@ -81,7 +81,7 @@ fn emergency_cancel_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 2 + 2, ); assert!(Democracy::referendum_status(r).is_ok()); assert_noop!( diff --git a/frame/democracy/src/tests/decoders.rs b/frame/democracy/src/tests/decoders.rs index 6b8e661ca9fd9..3c1729c4355c0 100644 --- a/frame/democracy/src/tests/decoders.rs +++ b/frame/democracy/src/tests/decoders.rs @@ -1,32 +1,33 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! The for various partial storage decoders use super::*; -use frame_support::storage::{migration, StorageMap, unhashed}; +use frame_support::storage::{migration, unhashed}; #[test] fn test_decode_compact_u32_at() { new_test_ext().execute_with(|| { - let v = codec::Compact(u64::max_value()); + let v = codec::Compact(u64::MAX); migration::put_storage_value(b"test", b"", &[], v); assert_eq!(decode_compact_u32_at(b"test"), None); - for v in vec![0, 10, u32::max_value()] { + for v in vec![0, 10, u32::MAX] { let compact_v = codec::Compact(v); unhashed::put(b"test", &compact_v); assert_eq!(decode_compact_u32_at(b"test"), Some(v)); @@ -57,15 +58,15 @@ fn pre_image() { let key = Default::default(); let missing = PreimageStatus::Missing(0); Preimages::::insert(key, missing); - assert!(Democracy::pre_image_data_len(key).is_err()); + assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); assert_eq!(Democracy::check_pre_image_is_missing(key), Ok(())); Preimages::::remove(key); - assert!(Democracy::pre_image_data_len(key).is_err()); - assert!(Democracy::check_pre_image_is_missing(key).is_err()); + assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); + assert_noop!(Democracy::check_pre_image_is_missing(key), Error::::NotImminent); for l in vec![0, 10, 100, 1000u32] { - let available = PreimageStatus::Available{ + let available = PreimageStatus::Available { data: (0..l).map(|i| i as u8).collect(), provider: 0, deposit: 0, @@ -75,7 +76,10 @@ fn pre_image() { Preimages::::insert(key, available); assert_eq!(Democracy::pre_image_data_len(key), Ok(l)); - assert!(Democracy::check_pre_image_is_missing(key).is_err()); + assert_noop!( + Democracy::check_pre_image_is_missing(key), + Error::::DuplicatePreimage + ); } }) } diff --git a/frame/democracy/src/tests/delegation.rs b/frame/democracy/src/tests/delegation.rs index 34dec6d0b49a6..d3afa1c13f90b 100644 --- a/frame/democracy/src/tests/delegation.rs +++ b/frame/democracy/src/tests/delegation.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index 3f9be2137906b..7442964584fa9 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,17 +34,17 @@ fn veto_external_works() { // cancelled. assert!(!>::exists()); // fails - same proposal can't be resubmitted. - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Error::::ProposalBlacklisted + ); fast_forward_to(1); // fails as we're still in cooloff period. - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Error::::ProposalBlacklisted + ); fast_forward_to(2); // works; as we're out of the cooloff period. @@ -67,10 +67,10 @@ fn veto_external_works() { fast_forward_to(3); // same proposal fails as we're still in cooloff - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Error::::ProposalBlacklisted + ); // different proposal works fine. assert_ok!(Democracy::external_propose( Origin::signed(2), @@ -93,13 +93,10 @@ fn external_blacklisting_should_work() { assert_ok!(Democracy::blacklist(Origin::root(), hash, None)); fast_forward_to(2); - assert!(Democracy::referendum_status(0).is_err()); + assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); assert_noop!( - Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2), - ), + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash_and_note(2),), Error::::ProposalBlacklisted, ); }); @@ -110,20 +107,17 @@ fn external_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose( - Origin::signed(1), - set_balance_proposal_hash(2), - ), + Democracy::external_propose(Origin::signed(1), set_balance_proposal_hash(2),), BadOrigin, ); assert_ok!(Democracy::external_propose( Origin::signed(2), set_balance_proposal_hash_and_note(2), )); - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(1), - ), Error::::DuplicateProposal); + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(1),), + Error::::DuplicateProposal + ); fast_forward_to(2); assert_eq!( Democracy::referendum_status(0), @@ -143,10 +137,7 @@ fn external_majority_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_majority( - Origin::signed(1), - set_balance_proposal_hash(2) - ), + Democracy::external_propose_majority(Origin::signed(1), set_balance_proposal_hash(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_majority( @@ -172,10 +163,7 @@ fn external_default_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_default( - Origin::signed(3), - set_balance_proposal_hash(2) - ), + Democracy::external_propose_default(Origin::signed(3), set_balance_proposal_hash(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_default( @@ -196,7 +184,6 @@ fn external_default_referendum_works() { }); } - #[test] fn external_and_public_interleaving_works() { new_test_ext().execute_with(|| { @@ -222,9 +209,9 @@ fn external_and_public_interleaving_works() { ); // replenish external assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(3), - )); + Origin::signed(2), + set_balance_proposal_hash_and_note(3), + )); fast_forward_to(4); @@ -256,9 +243,9 @@ fn external_and_public_interleaving_works() { ); // replenish external assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(5), - )); + Origin::signed(2), + set_balance_proposal_hash_and_note(5), + )); fast_forward_to(8); diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs index 8df34001cde04..9b2f2760bde1c 100644 --- a/frame/democracy/src/tests/fast_tracking.rs +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,7 +24,10 @@ fn fast_track_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); let h = set_balance_proposal_hash_and_note(2); - assert_noop!(Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing); + assert_noop!( + Democracy::fast_track(Origin::signed(5), h, 3, 2), + Error::::ProposalMissing + ); assert_ok!(Democracy::external_propose_majority( Origin::signed(3), set_balance_proposal_hash_and_note(2) @@ -49,14 +52,20 @@ fn instant_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); let h = set_balance_proposal_hash_and_note(2); - assert_noop!(Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing); + assert_noop!( + Democracy::fast_track(Origin::signed(5), h, 3, 2), + Error::::ProposalMissing + ); assert_ok!(Democracy::external_propose_majority( Origin::signed(3), set_balance_proposal_hash_and_note(2) )); assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); assert_noop!(Democracy::fast_track(Origin::signed(5), h, 1, 0), BadOrigin); - assert_noop!(Democracy::fast_track(Origin::signed(6), h, 1, 0), Error::::InstantNotAllowed); + assert_noop!( + Democracy::fast_track(Origin::signed(6), h, 1, 0), + Error::::InstantNotAllowed + ); INSTANT_ALLOWED.with(|v| *v.borrow_mut() = true); assert_ok!(Democracy::fast_track(Origin::signed(6), h, 1, 0)); assert_eq!( diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs index 93867030588c3..8b80b39c14aab 100644 --- a/frame/democracy/src/tests/lock_voting.rs +++ b/frame/democracy/src/tests/lock_voting.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,23 +23,19 @@ use std::convert::TryFrom; fn aye(x: u8, balance: u64) -> AccountVote { AccountVote::Standard { vote: Vote { aye: true, conviction: Conviction::try_from(x).unwrap() }, - balance + balance, } } fn nay(x: u8, balance: u64) -> AccountVote { AccountVote::Standard { vote: Vote { aye: false, conviction: Conviction::try_from(x).unwrap() }, - balance + balance, } } fn the_lock(amount: u64) -> BalanceLock { - BalanceLock { - id: DEMOCRACY_ID, - amount, - reasons: pallet_balances::Reasons::Misc, - } + BalanceLock { id: DEMOCRACY_ID, amount, reasons: pallet_balances::Reasons::Misc } } #[test] @@ -50,7 +46,7 @@ fn lock_voting_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); @@ -74,7 +70,10 @@ fn lock_voting_should_work() { assert_ok!(Democracy::unlock(Origin::signed(2), 5)); // 2, 3, 4 got their way with the vote, so they cannot be reaped by others. - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 2, r), Error::::NoPermission); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 2, r), + Error::::NoPermission + ); // However, they can be unvoted by the owner, though it will make no difference to the lock. assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); assert_ok!(Democracy::unlock(Origin::signed(2), 2)); @@ -86,32 +85,37 @@ fn lock_voting_should_work() { assert_eq!(Balances::locks(5), vec![]); assert_eq!(Balances::free_balance(42), 2); - - fast_forward_to(5); + fast_forward_to(7); // No change yet... - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 4, r), Error::::NoPermission); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 4, r), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(1), 4)); assert_eq!(Balances::locks(4), vec![the_lock(40)]); - fast_forward_to(6); + fast_forward_to(8); // 4 should now be able to reap and unlock assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 4, r)); assert_ok!(Democracy::unlock(Origin::signed(1), 4)); assert_eq!(Balances::locks(4), vec![]); - fast_forward_to(9); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 3, r), Error::::NoPermission); + fast_forward_to(13); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 3, r), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(1), 3)); assert_eq!(Balances::locks(3), vec![the_lock(30)]); - fast_forward_to(10); + fast_forward_to(14); assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 3, r)); assert_ok!(Democracy::unlock(Origin::signed(1), 3)); assert_eq!(Balances::locks(3), vec![]); // 2 doesn't need to reap_vote here because it was already done before. - fast_forward_to(17); + fast_forward_to(25); assert_ok!(Democracy::unlock(Origin::signed(1), 2)); assert_eq!(Balances::locks(2), vec![the_lock(20)]); - fast_forward_to(18); + fast_forward_to(26); assert_ok!(Democracy::unlock(Origin::signed(1), 2)); assert_eq!(Balances::locks(2), vec![]); }); @@ -145,7 +149,7 @@ fn lock_voting_should_work_with_delegation() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); @@ -168,7 +172,7 @@ fn setup_three_referenda() -> (u32, u32, u32) { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SimpleMajority, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r1, aye(4, 10))); @@ -176,7 +180,7 @@ fn setup_three_referenda() -> (u32, u32, u32) { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SimpleMajority, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r2, aye(3, 20))); @@ -184,7 +188,7 @@ fn setup_three_referenda() -> (u32, u32, u32) { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SimpleMajority, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r3, aye(2, 50))); @@ -197,31 +201,40 @@ fn setup_three_referenda() -> (u32, u32, u32) { fn prior_lockvotes_should_be_enforced() { new_test_ext().execute_with(|| { let r = setup_three_referenda(); - // r.0 locked 10 until #18. - // r.1 locked 20 until #10. - // r.2 locked 50 until #6. - - fast_forward_to(5); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2), Error::::NoPermission); + // r.0 locked 10 until 2 + 8 * 3 = #26 + // r.1 locked 20 until 2 + 4 * 3 = #14 + // r.2 locked 50 until 2 + 2 * 3 = #8 + + fast_forward_to(7); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 5, r.2), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(50)]); - fast_forward_to(6); + fast_forward_to(8); assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2)); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); - fast_forward_to(9); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1), Error::::NoPermission); + fast_forward_to(13); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 5, r.1), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); - fast_forward_to(10); + fast_forward_to(14); assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1)); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); - fast_forward_to(17); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0), Error::::NoPermission); + fast_forward_to(25); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 5, r.0), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); - fast_forward_to(18); + fast_forward_to(26); assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0)); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); @@ -232,31 +245,31 @@ fn prior_lockvotes_should_be_enforced() { fn single_consolidation_of_lockvotes_should_work_as_before() { new_test_ext().execute_with(|| { let r = setup_three_referenda(); - // r.0 locked 10 until #18. - // r.1 locked 20 until #10. - // r.2 locked 50 until #6. + // r.0 locked 10 until 2 + 8 * 3 = #26 + // r.1 locked 20 until 2 + 4 * 3 = #14 + // r.2 locked 50 until 2 + 2 * 3 = #8 - fast_forward_to(5); + fast_forward_to(7); assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(50)]); - fast_forward_to(6); + fast_forward_to(8); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); - fast_forward_to(9); + fast_forward_to(13); assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); - fast_forward_to(10); + fast_forward_to(14); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); - fast_forward_to(17); + fast_forward_to(25); assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); - fast_forward_to(18); + fast_forward_to(26); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); @@ -266,23 +279,23 @@ fn single_consolidation_of_lockvotes_should_work_as_before() { fn multi_consolidation_of_lockvotes_should_be_conservative() { new_test_ext().execute_with(|| { let r = setup_three_referenda(); - // r.0 locked 10 until #18. - // r.1 locked 20 until #10. - // r.2 locked 50 until #6. + // r.0 locked 10 until 2 + 8 * 3 = #26 + // r.1 locked 20 until 2 + 4 * 3 = #14 + // r.2 locked 50 until 2 + 2 * 3 = #8 assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); - fast_forward_to(6); + fast_forward_to(8); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 20); - fast_forward_to(10); + fast_forward_to(14); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); - fast_forward_to(18); + fast_forward_to(26); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); @@ -296,33 +309,33 @@ fn locks_should_persist_from_voting_to_delegation() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SimpleMajority, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r, aye(4, 10))); fast_forward_to(2); assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); - // locked 10 until #18. + // locked 10 until #26. assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked3x, 20)); // locked 20. assert!(Balances::locks(5)[0].amount == 20); assert_ok!(Democracy::undelegate(Origin::signed(5))); - // locked 20 until #10 + // locked 20 until #14 - fast_forward_to(9); + fast_forward_to(13); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount == 20); - fast_forward_to(10); + fast_forward_to(14); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); - fast_forward_to(17); + fast_forward_to(25); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); - fast_forward_to(18); + fast_forward_to(26); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); @@ -334,30 +347,30 @@ fn locks_should_persist_from_delegation_to_voting() { System::set_block_number(0); assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked5x, 5)); assert_ok!(Democracy::undelegate(Origin::signed(5))); - // locked 5 until #32 + // locked 5 until 16 * 3 = #48 let r = setup_three_referenda(); - // r.0 locked 10 until #18. - // r.1 locked 20 until #10. - // r.2 locked 50 until #6. + // r.0 locked 10 until 2 + 8 * 3 = #26 + // r.1 locked 20 until 2 + 4 * 3 = #14 + // r.2 locked 50 until 2 + 2 * 3 = #8 assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); - fast_forward_to(6); + fast_forward_to(8); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 20); - fast_forward_to(10); + fast_forward_to(14); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); - fast_forward_to(18); + fast_forward_to(26); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 5); - fast_forward_to(32); + fast_forward_to(48); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs index 8a2cbaf534032..6d478fcaa68c7 100644 --- a/frame/democracy/src/tests/preimage.rs +++ b/frame/democracy/src/tests/preimage.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ fn missing_preimage_should_fail() { 2, set_balance_proposal_hash(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); @@ -43,8 +43,11 @@ fn preimage_deposit_should_be_required_and_returned() { // fee of 100 is too much. PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); assert_noop!( - if operational { Democracy::note_preimage_operational(Origin::signed(6), vec![0; 500]) } - else { Democracy::note_preimage(Origin::signed(6), vec![0; 500]) }, + if operational { + Democracy::note_preimage_operational(Origin::signed(6), vec![0; 500]) + } else { + Democracy::note_preimage(Origin::signed(6), vec![0; 500]) + }, BalancesError::::InsufficientBalance, ); // fee of 1 is reasonable. @@ -53,7 +56,7 @@ fn preimage_deposit_should_be_required_and_returned() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); @@ -72,20 +75,25 @@ fn preimage_deposit_should_be_required_and_returned() { fn preimage_deposit_should_be_reapable_earlier_by_owner() { new_test_ext_execute_with_cond(|operational| { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!( - if operational { Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) } - else { Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) } - ); + assert_ok!(if operational { + Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) + } else { + Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) + }); assert_eq!(Balances::reserved_balance(6), 12); next_block(); assert_noop!( - Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::max_value()), + Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::MAX), Error::::TooEarly ); next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::max_value())); + assert_ok!(Democracy::reap_preimage( + Origin::signed(6), + set_balance_proposal_hash(2), + u32::MAX + )); assert_eq!(Balances::free_balance(6), 60); assert_eq!(Balances::reserved_balance(6), 0); @@ -96,27 +104,32 @@ fn preimage_deposit_should_be_reapable_earlier_by_owner() { fn preimage_deposit_should_be_reapable() { new_test_ext_execute_with_cond(|operational| { assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::max_value()), - Error::::PreimageMissing - ); + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), + Error::::PreimageMissing + ); PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!( - if operational { Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) } - else { Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) } - ); + assert_ok!(if operational { + Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) + } else { + Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) + }); assert_eq!(Balances::reserved_balance(6), 12); next_block(); next_block(); next_block(); assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::max_value()), - Error::::TooEarly - ); + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), + Error::::TooEarly + ); next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::max_value())); + assert_ok!(Democracy::reap_preimage( + Origin::signed(5), + set_balance_proposal_hash(2), + u32::MAX + )); assert_eq!(Balances::reserved_balance(6), 0); assert_eq!(Balances::free_balance(6), 48); assert_eq!(Balances::free_balance(5), 62); @@ -132,13 +145,19 @@ fn noting_imminent_preimage_for_free_should_work() { 2, set_balance_proposal_hash(2), VoteThreshold::SuperMajorityApprove, - 1 + 1, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_noop!( - if operational { Democracy::note_imminent_preimage_operational(Origin::signed(6), set_balance_proposal(2)) } - else { Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2)) }, + if operational { + Democracy::note_imminent_preimage_operational( + Origin::signed(6), + set_balance_proposal(2), + ) + } else { + Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2)) + }, Error::::NotImminent ); @@ -161,7 +180,10 @@ fn reaping_imminent_preimage_should_fail() { assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); next_block(); next_block(); - assert_noop!(Democracy::reap_preimage(Origin::signed(6), h, u32::max_value()), Error::::Imminent); + assert_noop!( + Democracy::reap_preimage(Origin::signed(6), h, u32::MAX), + Error::::Imminent + ); }); } @@ -174,7 +196,7 @@ fn note_imminent_preimage_can_only_be_successful_once() { 2, set_balance_proposal_hash(2), VoteThreshold::SuperMajorityApprove, - 1 + 1, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); next_block(); diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index d862aa98e7880..34713c3e15725 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -35,10 +35,10 @@ fn backing_for_should_work() { fn deposit_for_proposals_should_be_taken() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); + assert_ok!(Democracy::second(Origin::signed(2), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::free_balance(2), 15); assert_eq!(Balances::free_balance(5), 35); @@ -49,10 +49,10 @@ fn deposit_for_proposals_should_be_taken() { fn deposit_for_proposals_should_be_returned() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::max_value())); + assert_ok!(Democracy::second(Origin::signed(2), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); fast_forward_to(3); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 20); @@ -79,7 +79,7 @@ fn poor_seconder_should_not_work() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(2, 2, 11)); assert_noop!( - Democracy::second(Origin::signed(1), 0, u32::max_value()), + Democracy::second(Origin::signed(1), 0, u32::MAX), BalancesError::::InsufficientBalance ); }); @@ -89,10 +89,7 @@ fn poor_seconder_should_not_work() { fn invalid_seconds_upper_bound_should_not_work() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_noop!( - Democracy::second(Origin::signed(2), 0, 0), - Error::::WrongUpperBound - ); + assert_noop!(Democracy::second(Origin::signed(2), 0, 0), Error::::WrongUpperBound); }); } @@ -129,9 +126,9 @@ fn blacklisting_should_work() { fast_forward_to(2); let hash = set_balance_proposal_hash(4); - assert!(Democracy::referendum_status(0).is_ok()); + assert_ok!(Democracy::referendum_status(0)); assert_ok!(Democracy::blacklist(Origin::root(), hash, Some(0))); - assert!(Democracy::referendum_status(0).is_err()); + assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); }); } diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs index 5bcfbae994689..06b492bc6093c 100644 --- a/frame/democracy/src/tests/scheduling.rs +++ b/frame/democracy/src/tests/scheduling.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ fn simple_passing_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); @@ -43,7 +43,7 @@ fn simple_failing_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); assert_eq!(tally(r), Tally { ayes: 0, nays: 1, turnout: 10 }); @@ -62,13 +62,13 @@ fn ooo_inject_referendums_should_work() { 3, set_balance_proposal_hash_and_note(3), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); let r2 = Democracy::inject_referendum( 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); @@ -92,7 +92,7 @@ fn delayed_enactment_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 1 + 1, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); diff --git a/frame/democracy/src/tests/voting.rs b/frame/democracy/src/tests/voting.rs index 9ae57797d15dd..e035c2d46c1b6 100644 --- a/frame/democracy/src/tests/voting.rs +++ b/frame/democracy/src/tests/voting.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,10 @@ use super::*; fn overvoting_should_fail() { new_test_ext().execute_with(|| { let r = begin_referendum(); - assert_noop!(Democracy::vote(Origin::signed(1), r, aye(2)), Error::::InsufficientFunds); + assert_noop!( + Democracy::vote(Origin::signed(1), r, aye(2)), + Error::::InsufficientFunds + ); }); } @@ -80,12 +83,12 @@ fn single_proposal_should_work() { fast_forward_to(3); // referendum still running - assert!(Democracy::referendum_status(0).is_ok()); + assert_ok!(Democracy::referendum_status(0)); // referendum runs during 2 and 3, ends @ start of 4. fast_forward_to(4); - assert!(Democracy::referendum_status(0).is_err()); + assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); // referendum passes and wait another two blocks for enactment. @@ -102,7 +105,7 @@ fn controversial_voting_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, big_aye(1))); @@ -128,7 +131,7 @@ fn controversial_low_turnout_voting_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); @@ -152,7 +155,7 @@ fn passing_low_turnout_voting_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); diff --git a/frame/democracy/src/types.rs b/frame/democracy/src/types.rs index 8ee0838f8a36d..2eb004ba61bc4 100644 --- a/frame/democracy/src/types.rs +++ b/frame/democracy/src/types.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,29 +17,32 @@ //! Miscellaneous additional datatypes. -use codec::{Encode, Decode}; -use sp_runtime::RuntimeDebug; -use sp_runtime::traits::{Zero, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, Saturating}; -use crate::{Vote, VoteThreshold, AccountVote, Conviction}; +use crate::{AccountVote, Conviction, Vote, VoteThreshold}; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Saturating, Zero}, + RuntimeDebug, +}; /// Info regarding an ongoing referendum. -#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Tally { /// The number of aye votes, expressed in terms of post-conviction lock-vote. - pub (crate) ayes: Balance, + pub ayes: Balance, /// The number of nay votes, expressed in terms of post-conviction lock-vote. - pub (crate) nays: Balance, + pub nays: Balance, /// The amount of funds currently expressing its opinion. Pre-conviction. - pub (crate) turnout: Balance, + pub turnout: Balance, } /// Amount of votes and capital placed in delegation for an account. -#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Delegations { /// The number of votes (this is post-conviction). - pub (crate) votes: Balance, + pub votes: Balance, /// The amount of raw capital, used for the turnout. - pub (crate) capital: Balance, + pub capital: Balance, } impl Saturating for Delegations { @@ -65,22 +68,24 @@ impl Saturating for Delegations { } fn saturating_pow(self, exp: usize) -> Self { - Self { - votes: self.votes.saturating_pow(exp), - capital: self.capital.saturating_pow(exp), - } + Self { votes: self.votes.saturating_pow(exp), capital: self.capital.saturating_pow(exp) } } } impl< - Balance: From + Zero + Copy + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Bounded + - Saturating -> Tally { + Balance: From + + Zero + + Copy + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + Bounded + + Saturating, + > Tally +{ /// Create a new tally. - pub fn new( - vote: Vote, - balance: Balance, - ) -> Self { + pub fn new(vote: Vote, balance: Balance) -> Self { let Delegations { votes, capital } = vote.conviction.votes(balance); Self { ayes: if vote.aye { votes } else { Zero::zero() }, @@ -90,10 +95,7 @@ impl< } /// Add an account's vote into the tally. - pub fn add( - &mut self, - vote: AccountVote, - ) -> Option<()> { + pub fn add(&mut self, vote: AccountVote) -> Option<()> { match vote { AccountVote::Standard { vote, balance } => { let Delegations { votes, capital } = vote.conviction.votes(balance); @@ -102,23 +104,20 @@ impl< true => self.ayes = self.ayes.checked_add(&votes)?, false => self.nays = self.nays.checked_add(&votes)?, } - } + }, AccountVote::Split { aye, nay } => { let aye = Conviction::None.votes(aye); let nay = Conviction::None.votes(nay); self.turnout = self.turnout.checked_add(&aye.capital)?.checked_add(&nay.capital)?; self.ayes = self.ayes.checked_add(&aye.votes)?; self.nays = self.nays.checked_add(&nay.votes)?; - } + }, } Some(()) } /// Remove an account's vote from the tally. - pub fn remove( - &mut self, - vote: AccountVote, - ) -> Option<()> { + pub fn remove(&mut self, vote: AccountVote) -> Option<()> { match vote { AccountVote::Standard { vote, balance } => { let Delegations { votes, capital } = vote.conviction.votes(balance); @@ -127,14 +126,14 @@ impl< true => self.ayes = self.ayes.checked_sub(&votes)?, false => self.nays = self.nays.checked_sub(&votes)?, } - } + }, AccountVote::Split { aye, nay } => { let aye = Conviction::None.votes(aye); let nay = Conviction::None.votes(nay); self.turnout = self.turnout.checked_sub(&aye.capital)?.checked_sub(&nay.capital)?; self.ayes = self.ayes.checked_sub(&aye.votes)?; self.nays = self.nays.checked_sub(&nay.votes)?; - } + }, } Some(()) } @@ -161,27 +160,27 @@ impl< } /// Info regarding an ongoing referendum. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct ReferendumStatus { /// When voting on this referendum will end. - pub (crate) end: BlockNumber, + pub end: BlockNumber, /// The hash of the proposal being voted on. - pub (crate) proposal_hash: Hash, + pub proposal_hash: Hash, /// The thresholding mechanism to determine whether it passed. - pub (crate) threshold: VoteThreshold, + pub threshold: VoteThreshold, /// The delay (in blocks) to wait after a successful referendum before deploying. - pub (crate) delay: BlockNumber, + pub delay: BlockNumber, /// The current tally of votes in this referendum. - pub (crate) tally: Tally, + pub tally: Tally, } /// Info regarding a referendum, present or past. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum ReferendumInfo { /// Referendum is happening, the arg is the block number at which it will end. Ongoing(ReferendumStatus), /// Referendum finished at `end`, and has been `approved` or rejected. - Finished{approved: bool, end: BlockNumber}, + Finished { approved: bool, end: BlockNumber }, } impl ReferendumInfo { @@ -192,7 +191,7 @@ impl ReferendumInfo Self { - let s = ReferendumStatus{ end, proposal_hash, threshold, delay, tally: Tally::default() }; + let s = ReferendumStatus { end, proposal_hash, threshold, delay, tally: Tally::default() }; ReferendumInfo::Ongoing(s) } } diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index 09ff0d71e48ca..03ca020ca0949 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,10 +17,14 @@ //! The vote datatype. -use sp_std::{prelude::*, result::Result, convert::TryFrom}; -use codec::{Encode, EncodeLike, Decode, Output, Input}; -use sp_runtime::{RuntimeDebug, traits::{Saturating, Zero}}; -use crate::{Conviction, ReferendumIndex, Delegations}; +use crate::{Conviction, Delegations, ReferendumIndex}; +use codec::{Decode, Encode, EncodeLike, Input, Output}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{Saturating, Zero}, + RuntimeDebug, +}; +use sp_std::{convert::TryFrom, prelude::*, result::Result}; /// A number of lock periods, plus a vote, one way or the other. #[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug)] @@ -30,7 +34,7 @@ pub struct Vote { } impl Encode for Vote { - fn encode_to(&self, output: &mut T) { + fn encode_to(&self, output: &mut T) { output.push_byte(u8::from(self.conviction) | if self.aye { 0b1000_0000 } else { 0 }); } } @@ -48,8 +52,21 @@ impl Decode for Vote { } } +impl TypeInfo for Vote { + type Identity = Self; + + fn type_info() -> scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("Vote", module_path!())) + .composite( + scale_info::build::Fields::unnamed() + .field(|f| f.ty::().docs(&["Raw vote byte, encodes aye + conviction"])), + ) + } +} + /// A vote for a referendum of a particular account. -#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug)] +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub enum AccountVote { /// A standard vote, one-way (approve or reject) with a given amount of conviction. Standard { vote: Vote, balance: Balance }, @@ -89,7 +106,9 @@ impl AccountVote { } /// A "prior" lock, i.e. a lock for some now-forgotten reason. -#[derive(Encode, Decode, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] +#[derive( + Encode, Decode, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo, +)] pub struct PriorLock(BlockNumber, Balance); impl PriorLock { @@ -112,7 +131,7 @@ impl PriorLock { /// The account is voting directly. `delegations` is the total amount of post-conviction voting /// weight that it controls from those that have delegated to it. @@ -136,7 +155,9 @@ pub enum Voting { }, } -impl Default for Voting { +impl Default + for Voting +{ fn default() -> Self { Voting::Direct { votes: Vec::new(), @@ -146,31 +167,30 @@ impl Default for Voting Voting { +impl + Voting +{ pub fn rejig(&mut self, now: BlockNumber) { match self { Voting::Direct { prior, .. } => prior, Voting::Delegating { prior, .. } => prior, - }.rejig(now); + } + .rejig(now); } /// The amount of this account's balance that much currently be locked due to voting. pub fn locked_balance(&self) -> Balance { match self { - Voting::Direct { votes, prior, .. } => votes.iter() - .map(|i| i.1.balance()) - .fold(prior.locked(), |a, i| a.max(i)), + Voting::Direct { votes, prior, .. } => + votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)), Voting::Delegating { balance, .. } => *balance, } } - pub fn set_common(&mut self, + pub fn set_common( + &mut self, delegations: Delegations, - prior: PriorLock + prior: PriorLock, ) { let (d, p) = match self { Voting::Direct { ref mut delegations, ref mut prior, .. } => (delegations, prior), diff --git a/frame/democracy/src/vote_threshold.rs b/frame/democracy/src/vote_threshold.rs index 2268a55936c50..ad8bce290ed4f 100644 --- a/frame/democracy/src/vote_threshold.rs +++ b/frame/democracy/src/vote_threshold.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,15 +17,16 @@ //! Voting thresholds. -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; -use sp_runtime::traits::{Zero, IntegerSquareRoot}; -use sp_std::ops::{Add, Mul, Div, Rem}; use crate::Tally; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +use sp_runtime::traits::{IntegerSquareRoot, Zero}; +use sp_std::ops::{Add, Div, Mul, Rem}; /// A means of determining if a vote is past pass threshold. -#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug)] +#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum VoteThreshold { /// A supermajority of approvals is needed to pass this vote. @@ -43,25 +44,32 @@ pub trait Approved { } /// Return `true` iff `n1 / d1 < n2 / d2`. `d1` and `d2` may not be zero. -fn compare_rationals + Div + Rem + Ord + Copy>(mut n1: T, mut d1: T, mut n2: T, mut d2: T) -> bool { +fn compare_rationals< + T: Zero + Mul + Div + Rem + Ord + Copy, +>( + mut n1: T, + mut d1: T, + mut n2: T, + mut d2: T, +) -> bool { // Uses a continued fractional representation for a non-overflowing compare. // Detailed at https://janmr.com/blog/2014/05/comparing-rational-numbers-without-overflow/. loop { let q1 = n1 / d1; let q2 = n2 / d2; if q1 < q2 { - return true; + return true } if q2 < q1 { - return false; + return false } let r1 = n1 % d1; let r2 = n2 % d2; if r2.is_zero() { - return false; + return false } if r1.is_zero() { - return true; + return true } n1 = d2; n2 = d1; @@ -71,14 +79,22 @@ fn compare_rationals + Div + Rem - + Mul + Div - + Rem + Copy, -> Approved for VoteThreshold { + Balance: IntegerSquareRoot + + Zero + + Ord + + Add + + Mul + + Div + + Rem + + Copy, + > Approved for VoteThreshold +{ fn approved(&self, tally: Tally, electorate: Balance) -> bool { let sqrt_voters = tally.turnout.integer_sqrt(); let sqrt_electorate = electorate.integer_sqrt(); - if sqrt_voters.is_zero() { return false; } + if sqrt_voters.is_zero() { + return false + } match *self { VoteThreshold::SuperMajorityApprove => compare_rationals(tally.nays, sqrt_voters, tally.ayes, sqrt_electorate), @@ -95,7 +111,9 @@ mod tests { #[test] fn should_work() { - assert!(!VoteThreshold::SuperMajorityApprove.approved(Tally{ayes: 60, nays: 50, turnout: 110}, 210)); - assert!(VoteThreshold::SuperMajorityApprove.approved(Tally{ayes: 100, nays: 50, turnout: 150}, 210)); + assert!(!VoteThreshold::SuperMajorityApprove + .approved(Tally { ayes: 60, nays: 50, turnout: 110 }, 210)); + assert!(VoteThreshold::SuperMajorityApprove + .approved(Tally { ayes: 100, nays: 50, turnout: 150 }, 210)); } } diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs new file mode 100644 index 0000000000000..e3f22f4fc0ab3 --- /dev/null +++ b/frame/democracy/src/weights.rs @@ -0,0 +1,524 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_democracy +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_democracy +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/democracy/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_democracy. +pub trait WeightInfo { + fn propose() -> Weight; + fn second(s: u32, ) -> Weight; + fn vote_new(r: u32, ) -> Weight; + fn vote_existing(r: u32, ) -> Weight; + fn emergency_cancel() -> Weight; + fn blacklist(p: u32, ) -> Weight; + fn external_propose(v: u32, ) -> Weight; + fn external_propose_majority() -> Weight; + fn external_propose_default() -> Weight; + fn fast_track() -> Weight; + fn veto_external(v: u32, ) -> Weight; + fn cancel_proposal(p: u32, ) -> Weight; + fn cancel_referendum() -> Weight; + fn cancel_queued(r: u32, ) -> Weight; + fn on_initialize_base(r: u32, ) -> Weight; + fn delegate(r: u32, ) -> Weight; + fn undelegate(r: u32, ) -> Weight; + fn clear_public_proposals() -> Weight; + fn note_preimage(b: u32, ) -> Weight; + fn note_imminent_preimage(b: u32, ) -> Weight; + fn reap_preimage(b: u32, ) -> Weight; + fn unlock_remove(r: u32, ) -> Weight; + fn unlock_set(r: u32, ) -> Weight; + fn remove_vote(r: u32, ) -> Weight; + fn remove_other_vote(r: u32, ) -> Weight; +} + +/// Weights for pallet_democracy using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Democracy PublicPropCount (r:1 w:1) + // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy Blacklist (r:1 w:0) + // Storage: Democracy DepositOf (r:0 w:1) + fn propose() -> Weight { + (65_665_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy DepositOf (r:1 w:1) + fn second(s: u32, ) -> Weight { + (40_003_000 as Weight) + // Standard Error: 1_000 + .saturating_add((180_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy VotingOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_new(r: u32, ) -> Weight { + (45_465_000 as Weight) + // Standard Error: 1_000 + .saturating_add((220_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy VotingOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_existing(r: u32, ) -> Weight { + (45_112_000 as Weight) + // Standard Error: 1_000 + .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy Cancellations (r:1 w:1) + fn emergency_cancel() -> Weight { + (26_651_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy NextExternal (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy Blacklist (r:0 w:1) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn blacklist(p: u32, ) -> Weight { + (77_737_000 as Weight) + // Standard Error: 4_000 + .saturating_add((512_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) + } + // Storage: Democracy NextExternal (r:1 w:1) + // Storage: Democracy Blacklist (r:1 w:0) + fn external_propose(v: u32, ) -> Weight { + (13_126_000 as Weight) + // Standard Error: 0 + .saturating_add((89_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy NextExternal (r:0 w:1) + fn external_propose_majority() -> Weight { + (2_923_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy NextExternal (r:0 w:1) + fn external_propose_default() -> Weight { + (2_889_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy NextExternal (r:1 w:1) + // Storage: Democracy ReferendumCount (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:0 w:1) + fn fast_track() -> Weight { + (27_598_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy NextExternal (r:1 w:1) + // Storage: Democracy Blacklist (r:1 w:1) + fn veto_external(v: u32, ) -> Weight { + (28_416_000 as Weight) + // Standard Error: 0 + .saturating_add((132_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn cancel_proposal(p: u32, ) -> Weight { + (52_836_000 as Weight) + // Standard Error: 2_000 + .saturating_add((478_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:0 w:1) + fn cancel_referendum() -> Weight { + (16_891_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Scheduler Agenda (r:1 w:1) + fn cancel_queued(r: u32, ) -> Weight { + (30_504_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_480_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Democracy LastTabledWasExternal (r:1 w:0) + // Storage: Democracy NextExternal (r:1 w:0) + // Storage: Democracy PublicProps (r:1 w:0) + // Storage: Democracy LowestUnbaked (r:1 w:0) + // Storage: Democracy ReferendumCount (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:1 w:0) + fn on_initialize_base(r: u32, ) -> Weight { + (6_259_000 as Weight) + // Standard Error: 4_000 + .saturating_add((5_032_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy VotingOf (r:3 w:3) + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn delegate(r: u32, ) -> Weight { + (51_719_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_210_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy VotingOf (r:2 w:2) + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + fn undelegate(r: u32, ) -> Weight { + (23_203_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_206_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy PublicProps (r:0 w:1) + fn clear_public_proposals() -> Weight { + (3_127_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_preimage(b: u32, ) -> Weight { + (44_130_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_imminent_preimage(b: u32, ) -> Weight { + (28_756_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + // Storage: System Account (r:1 w:0) + fn reap_preimage(b: u32, ) -> Weight { + (39_922_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy VotingOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unlock_remove(r: u32, ) -> Weight { + (38_621_000 as Weight) + // Standard Error: 1_000 + .saturating_add((110_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy VotingOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unlock_set(r: u32, ) -> Weight { + (36_631_000 as Weight) + // Standard Error: 1_000 + .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy VotingOf (r:1 w:1) + fn remove_vote(r: u32, ) -> Weight { + (21_025_000 as Weight) + // Standard Error: 1_000 + .saturating_add((195_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy VotingOf (r:1 w:1) + fn remove_other_vote(r: u32, ) -> Weight { + (20_628_000 as Weight) + // Standard Error: 1_000 + .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Democracy PublicPropCount (r:1 w:1) + // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy Blacklist (r:1 w:0) + // Storage: Democracy DepositOf (r:0 w:1) + fn propose() -> Weight { + (65_665_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy DepositOf (r:1 w:1) + fn second(s: u32, ) -> Weight { + (40_003_000 as Weight) + // Standard Error: 1_000 + .saturating_add((180_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy VotingOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_new(r: u32, ) -> Weight { + (45_465_000 as Weight) + // Standard Error: 1_000 + .saturating_add((220_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy VotingOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_existing(r: u32, ) -> Weight { + (45_112_000 as Weight) + // Standard Error: 1_000 + .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy Cancellations (r:1 w:1) + fn emergency_cancel() -> Weight { + (26_651_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy NextExternal (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy Blacklist (r:0 w:1) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn blacklist(p: u32, ) -> Weight { + (77_737_000 as Weight) + // Standard Error: 4_000 + .saturating_add((512_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + } + // Storage: Democracy NextExternal (r:1 w:1) + // Storage: Democracy Blacklist (r:1 w:0) + fn external_propose(v: u32, ) -> Weight { + (13_126_000 as Weight) + // Standard Error: 0 + .saturating_add((89_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy NextExternal (r:0 w:1) + fn external_propose_majority() -> Weight { + (2_923_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy NextExternal (r:0 w:1) + fn external_propose_default() -> Weight { + (2_889_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy NextExternal (r:1 w:1) + // Storage: Democracy ReferendumCount (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:0 w:1) + fn fast_track() -> Weight { + (27_598_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy NextExternal (r:1 w:1) + // Storage: Democracy Blacklist (r:1 w:1) + fn veto_external(v: u32, ) -> Weight { + (28_416_000 as Weight) + // Standard Error: 0 + .saturating_add((132_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn cancel_proposal(p: u32, ) -> Weight { + (52_836_000 as Weight) + // Standard Error: 2_000 + .saturating_add((478_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:0 w:1) + fn cancel_referendum() -> Weight { + (16_891_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Scheduler Lookup (r:1 w:1) + // Storage: Scheduler Agenda (r:1 w:1) + fn cancel_queued(r: u32, ) -> Weight { + (30_504_000 as Weight) + // Standard Error: 2_000 + .saturating_add((1_480_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Democracy LastTabledWasExternal (r:1 w:0) + // Storage: Democracy NextExternal (r:1 w:0) + // Storage: Democracy PublicProps (r:1 w:0) + // Storage: Democracy LowestUnbaked (r:1 w:0) + // Storage: Democracy ReferendumCount (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:1 w:0) + fn on_initialize_base(r: u32, ) -> Weight { + (6_259_000 as Weight) + // Standard Error: 4_000 + .saturating_add((5_032_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy VotingOf (r:3 w:3) + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn delegate(r: u32, ) -> Weight { + (51_719_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_210_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy VotingOf (r:2 w:2) + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + fn undelegate(r: u32, ) -> Weight { + (23_203_000 as Weight) + // Standard Error: 5_000 + .saturating_add((7_206_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy PublicProps (r:0 w:1) + fn clear_public_proposals() -> Weight { + (3_127_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_preimage(b: u32, ) -> Weight { + (44_130_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + fn note_imminent_preimage(b: u32, ) -> Weight { + (28_756_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy Preimages (r:1 w:1) + // Storage: System Account (r:1 w:0) + fn reap_preimage(b: u32, ) -> Weight { + (39_922_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Democracy VotingOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unlock_remove(r: u32, ) -> Weight { + (38_621_000 as Weight) + // Standard Error: 1_000 + .saturating_add((110_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy VotingOf (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn unlock_set(r: u32, ) -> Weight { + (36_631_000 as Weight) + // Standard Error: 1_000 + .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy VotingOf (r:1 w:1) + fn remove_vote(r: u32, ) -> Weight { + (21_025_000 as Weight) + // Standard Error: 1_000 + .saturating_add((195_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy VotingOf (r:1 w:1) + fn remove_other_vote(r: u32, ) -> Weight { + (20_628_000 as Weight) + // Standard Error: 1_000 + .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml new file mode 100644 index 0000000000000..b2d50321e8cd3 --- /dev/null +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -0,0 +1,81 @@ +[package] +name = "pallet-election-provider-multi-phase" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "PALLET two phase election providers" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +static_assertions = "1.1.0" +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +log = { version = "0.4.14", default-features = false } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } + +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../primitives/npos-elections" } +sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../../primitives/arithmetic" } +frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../election-provider-support" } + +# Optional imports for benchmarking +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +rand = { version = "0.7.3", default-features = false, optional = true, features = [ + "alloc", + "small_rng", +] } +strum = { optional = true, version = "0.21.0" } +strum_macros = { optional = true, version = "0.21.1" } + +[dev-dependencies] +parking_lot = "0.11.0" +rand = { version = "0.7.3" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../primitives/npos-elections" } +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } +frame-election-provider-support = { version = "4.0.0-dev", features = [ +], path = "../election-provider-support" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "log/std", + + "frame-support/std", + "frame-system/std", + + "sp-io/std", + "sp-std/std", + "sp-core/std", + "sp-runtime/std", + "sp-npos-elections/std", + "sp-arithmetic/std", + "frame-election-provider-support/std", + "log/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-election-provider-support/runtime-benchmarks", + "rand", + "strum", + "strum_macros", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs new file mode 100644 index 0000000000000..fb5adda52e166 --- /dev/null +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -0,0 +1,528 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Two phase election pallet benchmarking. + +use super::*; +use crate::{unsigned::IndexAssignmentOf, Pallet as MultiPhase}; +use frame_benchmarking::{account, impl_benchmark_test_suite}; +use frame_support::{assert_ok, traits::Hooks}; +use frame_system::RawOrigin; +use rand::{prelude::SliceRandom, rngs::SmallRng, SeedableRng}; +use sp_arithmetic::{per_things::Percent, traits::One}; +use sp_npos_elections::IndexAssignment; +use sp_runtime::InnerOf; +use sp_std::{ + boxed::Box, + convert::{TryFrom, TryInto}, +}; + +const SEED: u32 = 999; + +/// Creates a **valid** solution with exactly the given size. +/// +/// The snapshot is also created internally. +fn solution_with_size( + size: SolutionOrSnapshotSize, + active_voters_count: u32, + desired_targets: u32, +) -> Result>, &'static str> { + ensure!(size.targets >= desired_targets, "must have enough targets"); + ensure!( + size.targets >= (>::LIMIT * 2) as u32, + "must have enough targets for unique votes." + ); + ensure!(size.voters >= active_voters_count, "must have enough voters"); + ensure!( + (>::LIMIT as u32) < desired_targets, + "must have enough winners to give them votes." + ); + + let ed: VoteWeight = T::Currency::minimum_balance().saturated_into::(); + let stake: VoteWeight = ed.max(One::one()).saturating_mul(100); + + // first generates random targets. + let targets: Vec = (0..size.targets) + .map(|i| frame_benchmarking::account("Targets", i, SEED)) + .collect(); + + let mut rng = SmallRng::seed_from_u64(SEED.into()); + + // decide who are the winners. + let winners = targets + .as_slice() + .choose_multiple(&mut rng, desired_targets as usize) + .cloned() + .collect::>(); + + // first generate active voters who must vote for a subset of winners. + let active_voters = (0..active_voters_count) + .map(|i| { + // chose a random subset of winners. + let winner_votes = winners + .as_slice() + .choose_multiple(&mut rng, >::LIMIT) + .cloned() + .collect::>(); + let voter = frame_benchmarking::account::("Voter", i, SEED); + (voter, stake, winner_votes) + }) + .collect::>(); + + // rest of the voters. They can only vote for non-winners. + let non_winners = targets + .iter() + .filter(|t| !winners.contains(t)) + .cloned() + .collect::>(); + let rest_voters = (active_voters_count..size.voters) + .map(|i| { + let votes = (&non_winners) + .choose_multiple(&mut rng, >::LIMIT) + .cloned() + .collect::>(); + let voter = frame_benchmarking::account::("Voter", i, SEED); + (voter, stake, votes) + }) + .collect::>(); + + let mut all_voters = active_voters.clone(); + all_voters.extend(rest_voters); + all_voters.shuffle(&mut rng); + + assert_eq!(active_voters.len() as u32, active_voters_count); + assert_eq!(all_voters.len() as u32, size.voters); + assert_eq!(winners.len() as u32, desired_targets); + + >::put(SolutionOrSnapshotSize { + voters: all_voters.len() as u32, + targets: targets.len() as u32, + }); + >::put(desired_targets); + >::put(RoundSnapshot { voters: all_voters.clone(), targets: targets.clone() }); + + // write the snapshot to staking or whoever is the data provider, in case it is needed further + // down the road. + T::DataProvider::put_snapshot(all_voters.clone(), targets.clone(), Some(stake)); + + let cache = helpers::generate_voter_cache::(&all_voters); + let stake_of = helpers::stake_of_fn::(&all_voters, &cache); + let voter_index = helpers::voter_index_fn::(&cache); + let target_index = helpers::target_index_fn::(&targets); + let voter_at = helpers::voter_at_fn::(&all_voters); + let target_at = helpers::target_at_fn::(&targets); + + let assignments = active_voters + .iter() + .map(|(voter, _stake, votes)| { + let percent_per_edge: InnerOf> = + (100 / votes.len()).try_into().unwrap_or_else(|_| panic!("failed to convert")); + crate::unsigned::Assignment:: { + who: voter.clone(), + distribution: votes + .iter() + .map(|t| (t.clone(), >::from_percent(percent_per_edge))) + .collect::>(), + } + }) + .collect::>(); + + let solution = + >::from_assignment(&assignments, &voter_index, &target_index).unwrap(); + let score = solution.clone().score(stake_of, voter_at, target_at).unwrap(); + let round = >::round(); + + assert!(score[0] > 0, "score is zero, this probably means that the stakes are not set."); + Ok(RawSolution { solution, score, round }) +} + +fn set_up_data_provider(v: u32, t: u32) { + T::DataProvider::clear(); + log!( + info, + "setting up with voters = {} [degree = {}], targets = {}", + v, + T::DataProvider::MAXIMUM_VOTES_PER_VOTER, + t + ); + + // fill targets. + let mut targets = (0..t) + .map(|i| { + let target = frame_benchmarking::account::("Target", i, SEED); + T::DataProvider::add_target(target.clone()); + target + }) + .collect::>(); + // we should always have enough voters to fill. + assert!(targets.len() > T::DataProvider::MAXIMUM_VOTES_PER_VOTER as usize); + targets.truncate(T::DataProvider::MAXIMUM_VOTES_PER_VOTER as usize); + + // fill voters. + (0..v).for_each(|i| { + let voter = frame_benchmarking::account::("Voter", i, SEED); + let weight = T::Currency::minimum_balance().saturated_into::() * 1000; + T::DataProvider::add_voter(voter, weight, targets.clone()); + }); +} + +frame_benchmarking::benchmarks! { + on_initialize_nothing { + assert!(>::current_phase().is_off()); + }: { + >::on_initialize(1u32.into()); + } verify { + assert!(>::current_phase().is_off()); + } + + on_initialize_open_signed { + assert!(>::snapshot().is_none()); + assert!(>::current_phase().is_off()); + }: { + >::on_initialize_open_signed(); + } verify { + assert!(>::snapshot().is_none()); + assert!(>::current_phase().is_signed()); + } + + on_initialize_open_unsigned { + assert!(>::snapshot().is_none()); + assert!(>::current_phase().is_off()); + }: { + >::on_initialize_open_unsigned(true, 1u32.into()) + } verify { + assert!(>::snapshot().is_none()); + assert!(>::current_phase().is_unsigned()); + } + + finalize_signed_phase_accept_solution { + let receiver = account("receiver", 0, SEED); + let initial_balance = T::Currency::minimum_balance() * 10u32.into(); + T::Currency::make_free_balance_be(&receiver, initial_balance); + let ready: ReadySolution = Default::default(); + let deposit: BalanceOf = 10u32.into(); + let reward: BalanceOf = 20u32.into(); + + assert_ok!(T::Currency::reserve(&receiver, deposit)); + assert_eq!(T::Currency::free_balance(&receiver), initial_balance - 10u32.into()); + }: { + >::finalize_signed_phase_accept_solution(ready, &receiver, deposit, reward) + } verify { + assert_eq!(T::Currency::free_balance(&receiver), initial_balance + 20u32.into()); + assert_eq!(T::Currency::reserved_balance(&receiver), 0u32.into()); + } + + finalize_signed_phase_reject_solution { + let receiver = account("receiver", 0, SEED); + let initial_balance = T::Currency::minimum_balance().max(One::one()) * 10u32.into(); + let deposit: BalanceOf = 10u32.into(); + T::Currency::make_free_balance_be(&receiver, initial_balance); + assert_ok!(T::Currency::reserve(&receiver, deposit)); + + assert_eq!(T::Currency::free_balance(&receiver), initial_balance - 10u32.into()); + assert_eq!(T::Currency::reserved_balance(&receiver), 10u32.into()); + }: { + >::finalize_signed_phase_reject_solution(&receiver, deposit) + } verify { + assert_eq!(T::Currency::free_balance(&receiver), initial_balance - 10u32.into()); + assert_eq!(T::Currency::reserved_balance(&receiver), 0u32.into()); + } + + create_snapshot_internal { + // number of votes in snapshot. Fixed to maximum. + let v = T::BenchmarkingConfig::SNAPSHOT_MAXIMUM_VOTERS; + // number of targets in snapshot. Fixed to maximum. + let t = T::BenchmarkingConfig::MAXIMUM_TARGETS; + + // we don't directly need the data-provider to be populated, but it is just easy to use it. + set_up_data_provider::(v, t); + let targets = T::DataProvider::targets(None)?; + let voters = T::DataProvider::voters(None)?; + let desired_targets = T::DataProvider::desired_targets()?; + assert!(>::snapshot().is_none()); + }: { + >::create_snapshot_internal(targets, voters, desired_targets) + } verify { + assert!(>::snapshot().is_some()); + assert_eq!(>::snapshot_metadata().ok_or("metadata missing")?.voters, v + t); + assert_eq!(>::snapshot_metadata().ok_or("metadata missing")?.targets, t); + } + + // a call to `::elect` where we only return the queued solution. + elect_queued { + // number of assignments, i.e. solution.len(). This means the active nominators, thus must be + // a subset of `v`. + let a in (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + // number of desired targets. Must be a subset of `t`. + let d in (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. T::BenchmarkingConfig::DESIRED_TARGETS[1]; + + // number of votes in snapshot. Not dominant. + let v = T::BenchmarkingConfig::VOTERS[1]; + // number of targets in snapshot. Not dominant. + let t = T::BenchmarkingConfig::TARGETS[1]; + + let witness = SolutionOrSnapshotSize { voters: v, targets: t }; + let raw_solution = solution_with_size::(witness, a, d)?; + let ready_solution = + >::feasibility_check(raw_solution, ElectionCompute::Signed) + .map_err(<&str>::from)?; + >::put(Phase::Signed); + // assume a queued solution is stored, regardless of where it comes from. + >::put(ready_solution); + + // these are set by the `solution_with_size` function. + assert!(>::get().is_some()); + assert!(>::get().is_some()); + assert!(>::get().is_some()); + }: { + assert_ok!( as ElectionProvider>::elect()); + } verify { + assert!(>::queued_solution().is_none()); + assert!(>::get().is_none()); + assert!(>::get().is_none()); + assert!(>::get().is_none()); + assert_eq!(>::get(), >::Off); + } + + submit { + let c in 1 .. (T::SignedMaxSubmissions::get() - 1); + + // the solution will be worse than all of them meaning the score need to be checked against + // ~ log2(c) + let solution = RawSolution { + score: [(10_000_000u128 - 1).into(), 0, 0], + ..Default::default() + }; + + >::create_snapshot().map_err(<&str>::from)?; + MultiPhase::::on_initialize_open_signed(); + >::put(1); + + let mut signed_submissions = SignedSubmissions::::get(); + for i in 0..c { + let raw_solution = RawSolution { + score: [(10_000_000 + i).into(), 0, 0], + ..Default::default() + }; + let signed_submission = SignedSubmission { raw_solution, ..Default::default() }; + signed_submissions.insert(signed_submission); + } + signed_submissions.put(); + + let caller = frame_benchmarking::whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance() * 10u32.into()); + + }: _(RawOrigin::Signed(caller), Box::new(solution), c) + verify { + assert!(>::signed_submissions().len() as u32 == c + 1); + } + + submit_unsigned { + // number of votes in snapshot. + let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; + // number of targets in snapshot. + let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; + // number of assignments, i.e. solution.len(). This means the active nominators, thus must be + // a subset of `v` component. + let a in + (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + // number of desired targets. Must be a subset of `t` component. + let d in + (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. + T::BenchmarkingConfig::DESIRED_TARGETS[1]; + + let witness = SolutionOrSnapshotSize { voters: v, targets: t }; + let raw_solution = solution_with_size::(witness, a, d)?; + + assert!(>::queued_solution().is_none()); + >::put(Phase::Unsigned((true, 1u32.into()))); + + // encode the most significant storage item that needs to be decoded in the dispatch. + let encoded_snapshot = >::snapshot().ok_or("missing snapshot")?.encode(); + let encoded_call = Call::::submit_unsigned { + raw_solution: Box::new(raw_solution.clone()), + witness + }.encode(); + }: { + assert_ok!( + >::submit_unsigned( + RawOrigin::None.into(), + Box::new(raw_solution), + witness, + ) + ); + let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot) + .expect("decoding should not fail; qed."); + let _decoded_call = as Decode>::decode(&mut &*encoded_call).expect("decoding should not fail; qed."); + } verify { + assert!(>::queued_solution().is_some()); + } + + // This is checking a valid solution. The worse case is indeed a valid solution. + feasibility_check { + // number of votes in snapshot. + let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; + // number of targets in snapshot. + let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; + // number of assignments, i.e. solution.len(). This means the active nominators, thus must be + // a subset of `v` component. + let a in (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + // number of desired targets. Must be a subset of `t` component. + let d in (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. T::BenchmarkingConfig::DESIRED_TARGETS[1]; + + let size = SolutionOrSnapshotSize { voters: v, targets: t }; + let raw_solution = solution_with_size::(size, a, d)?; + + assert_eq!(raw_solution.solution.voter_count() as u32, a); + assert_eq!(raw_solution.solution.unique_targets().len() as u32, d); + + // encode the most significant storage item that needs to be decoded in the dispatch. + let encoded_snapshot = >::snapshot().ok_or("snapshot missing")?.encode(); + }: { + assert_ok!(>::feasibility_check(raw_solution, ElectionCompute::Unsigned)); + let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot) + .expect("decoding should not fail; qed."); + } + + // NOTE: this weight is not used anywhere, but the fact that it should succeed when execution in + // isolation is vital to ensure memory-safety. For the same reason, we don't care about the + // components iterating, we merely check that this operation will work with the "maximum" + // numbers. + // + // ONLY run this benchmark in isolation, and pass the `--extra` flag to enable it. + // + // NOTE: If this benchmark does not run out of memory with a given heap pages, it means that the + // OCW process can SURELY succeed with the given configuration, but the opposite is not true. + // This benchmark is doing more work than a raw call to `OffchainWorker_offchain_worker` runtime + // api call, since it is also setting up some mock data, which will itself exhaust the heap to + // some extent. + #[extra] + mine_solution_offchain_memory { + // number of votes in snapshot. Fixed to maximum. + let v = T::BenchmarkingConfig::MINER_MAXIMUM_VOTERS; + // number of targets in snapshot. Fixed to maximum. + let t = T::BenchmarkingConfig::MAXIMUM_TARGETS; + + set_up_data_provider::(v, t); + let now = frame_system::Pallet::::block_number(); + >::put(Phase::Unsigned((true, now))); + >::create_snapshot().unwrap(); + }: { + // we can't really verify this as it won't write anything to state, check logs. + >::offchain_worker(now) + } + + // NOTE: this weight is not used anywhere, but the fact that it should succeed when execution in + // isolation is vital to ensure memory-safety. For the same reason, we don't care about the + // components iterating, we merely check that this operation will work with the "maximum" + // numbers. + // + // ONLY run this benchmark in isolation, and pass the `--extra` flag to enable it. + #[extra] + create_snapshot_memory { + // number of votes in snapshot. Fixed to maximum. + let v = T::BenchmarkingConfig::SNAPSHOT_MAXIMUM_VOTERS; + // number of targets in snapshot. Fixed to maximum. + let t = T::BenchmarkingConfig::MAXIMUM_TARGETS; + + set_up_data_provider::(v, t); + assert!(>::snapshot().is_none()); + }: { + >::create_snapshot().map_err(|_| "could not create snapshot")?; + } verify { + assert!(>::snapshot().is_some()); + assert_eq!(>::snapshot_metadata().ok_or("snapshot missing")?.voters, v + t); + assert_eq!(>::snapshot_metadata().ok_or("snapshot missing")?.targets, t); + } + + #[extra] + trim_assignments_length { + // number of votes in snapshot. + let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; + // number of targets in snapshot. + let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; + // number of assignments, i.e. solution.len(). This means the active nominators, thus must be + // a subset of `v` component. + let a in + (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + // number of desired targets. Must be a subset of `t` component. + let d in + (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. + T::BenchmarkingConfig::DESIRED_TARGETS[1]; + // Subtract this percentage from the actual encoded size + let f in 0 .. 95; + + // Compute a random solution, then work backwards to get the lists of voters, targets, and + // assignments + let witness = SolutionOrSnapshotSize { voters: v, targets: t }; + let RawSolution { solution, .. } = solution_with_size::(witness, a, d)?; + let RoundSnapshot { voters, targets } = MultiPhase::::snapshot().ok_or("snapshot missing")?; + let voter_at = helpers::voter_at_fn::(&voters); + let target_at = helpers::target_at_fn::(&targets); + let mut assignments = solution.into_assignment(voter_at, target_at).expect("solution generated by `solution_with_size` must be valid."); + + // make a voter cache and some helper functions for access + let cache = helpers::generate_voter_cache::(&voters); + let voter_index = helpers::voter_index_fn::(&cache); + let target_index = helpers::target_index_fn::(&targets); + + // sort assignments by decreasing voter stake + assignments.sort_by_key(|crate::unsigned::Assignment:: { who, .. }| { + let stake = cache.get(&who).map(|idx| { + let (_, stake, _) = voters[*idx]; + stake + }).unwrap_or_default(); + sp_std::cmp::Reverse(stake) + }); + + let mut index_assignments = assignments + .into_iter() + .map(|assignment| IndexAssignment::new(&assignment, &voter_index, &target_index)) + .collect::, _>>() + .unwrap(); + + let encoded_size_of = |assignments: &[IndexAssignmentOf]| { + SolutionOf::::try_from(assignments).map(|solution| solution.encoded_size()) + }; + + let desired_size = Percent::from_percent(100 - f.saturated_into::()) + .mul_ceil(encoded_size_of(index_assignments.as_slice()).unwrap()); + log!(trace, "desired_size = {}", desired_size); + }: { + MultiPhase::::trim_assignments_length( + desired_size.saturated_into(), + &mut index_assignments, + &encoded_size_of, + ).unwrap(); + } verify { + let solution = SolutionOf::::try_from(index_assignments.as_slice()).unwrap(); + let encoding = solution.encode(); + log!( + trace, + "encoded size prediction = {}", + encoded_size_of(index_assignments.as_slice()).unwrap(), + ); + log!(trace, "actual encoded size = {}", encoding.len()); + assert!(encoding.len() <= desired_size); + } +} + +impl_benchmark_test_suite!( + MultiPhase, + crate::mock::ExtBuilder::default().build_offchainify(10).0, + crate::mock::Runtime, +); diff --git a/frame/election-provider-multi-phase/src/helpers.rs b/frame/election-provider-multi-phase/src/helpers.rs new file mode 100644 index 0000000000000..72b1b23f27f3c --- /dev/null +++ b/frame/election-provider-multi-phase/src/helpers.rs @@ -0,0 +1,205 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Some helper functions/macros for this crate. + +use super::{Config, SolutionTargetIndexOf, SolutionVoterIndexOf, VoteWeight}; +use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, prelude::*}; + +#[macro_export] +macro_rules! log { + ($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: $crate::LOG_TARGET, + concat!("[#{:?}] 🗳 ", $pattern), >::block_number() $(, $values)* + ) + }; +} + +/// Generate a btree-map cache of the voters and their indices. +/// +/// This can be used to efficiently build index getter closures. +pub fn generate_voter_cache( + snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, +) -> BTreeMap { + let mut cache: BTreeMap = BTreeMap::new(); + snapshot.iter().enumerate().for_each(|(i, (x, _, _))| { + let _existed = cache.insert(x.clone(), i); + // if a duplicate exists, we only consider the last one. Defensive only, should never + // happen. + debug_assert!(_existed.is_none()); + }); + + cache +} + +/// Create a function that returns the index of a voter in the snapshot. +/// +/// The returning index type is the same as the one defined in `T::Solution::Voter`. +/// +/// ## Warning +/// +/// Note that this will represent the snapshot data from which the `cache` is generated. +pub fn voter_index_fn( + cache: &BTreeMap, +) -> impl Fn(&T::AccountId) -> Option> + '_ { + move |who| { + cache + .get(who) + .and_then(|i| >>::try_into(*i).ok()) + } +} + +/// Create a function that returns the index of a voter in the snapshot. +/// +/// Same as [`voter_index_fn`] but the returned function owns all its necessary data; nothing is +/// borrowed. +pub fn voter_index_fn_owned( + cache: BTreeMap, +) -> impl Fn(&T::AccountId) -> Option> { + move |who| { + cache + .get(who) + .and_then(|i| >>::try_into(*i).ok()) + } +} + +/// Same as [`voter_index_fn`], but the returning index is converted into usize, if possible. +/// +/// ## Warning +/// +/// Note that this will represent the snapshot data from which the `cache` is generated. +pub fn voter_index_fn_usize( + cache: &BTreeMap, +) -> impl Fn(&T::AccountId) -> Option + '_ { + move |who| cache.get(who).cloned() +} + +/// A non-optimized, linear version of [`voter_index_fn`] that does not need a cache and does a +/// linear search. +/// +/// ## Warning +/// +/// Not meant to be used in production. +#[cfg(test)] +pub fn voter_index_fn_linear( + snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, +) -> impl Fn(&T::AccountId) -> Option> + '_ { + move |who| { + snapshot + .iter() + .position(|(x, _, _)| x == who) + .and_then(|i| >>::try_into(i).ok()) + } +} + +/// Create a function that returns the index of a target in the snapshot. +/// +/// The returned index type is the same as the one defined in `T::Solution::Target`. +/// +/// Note: to the extent possible, the returned function should be cached and reused. Producing that +/// function requires a `O(n log n)` data transform. Each invocation of that function completes +/// in `O(log n)`. +pub fn target_index_fn( + snapshot: &Vec, +) -> impl Fn(&T::AccountId) -> Option> + '_ { + let cache: BTreeMap<_, _> = + snapshot.iter().enumerate().map(|(idx, account_id)| (account_id, idx)).collect(); + move |who| { + cache + .get(who) + .and_then(|i| >>::try_into(*i).ok()) + } +} + +/// Create a function the returns the index to a target in the snapshot. +/// +/// The returned index type is the same as the one defined in `T::Solution::Target`. +/// +/// ## Warning +/// +/// Not meant to be used in production. +#[cfg(test)] +pub fn target_index_fn_linear( + snapshot: &Vec, +) -> impl Fn(&T::AccountId) -> Option> + '_ { + move |who| { + snapshot + .iter() + .position(|x| x == who) + .and_then(|i| >>::try_into(i).ok()) + } +} + +/// Create a function that can map a voter index ([`SolutionVoterIndexOf`]) to the actual voter +/// account using a linearly indexible snapshot. +pub fn voter_at_fn( + snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, +) -> impl Fn(SolutionVoterIndexOf) -> Option + '_ { + move |i| { + as TryInto>::try_into(i) + .ok() + .and_then(|i| snapshot.get(i).map(|(x, _, _)| x).cloned()) + } +} + +/// Create a function that can map a target index ([`SolutionTargetIndexOf`]) to the actual target +/// account using a linearly indexible snapshot. +pub fn target_at_fn( + snapshot: &Vec, +) -> impl Fn(SolutionTargetIndexOf) -> Option + '_ { + move |i| { + as TryInto>::try_into(i) + .ok() + .and_then(|i| snapshot.get(i).cloned()) + } +} + +/// Create a function to get the stake of a voter. +/// +/// This is not optimized and uses a linear search. +#[cfg(test)] +pub fn stake_of_fn_linear( + snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, +) -> impl Fn(&T::AccountId) -> VoteWeight + '_ { + move |who| { + snapshot + .iter() + .find(|(x, _, _)| x == who) + .map(|(_, x, _)| *x) + .unwrap_or_default() + } +} + +/// Create a function to get the stake of a voter. +/// +/// ## Warning +/// +/// The cache need must be derived from the same snapshot. Zero is returned if a voter is +/// non-existent. +pub fn stake_of_fn<'a, T: Config>( + snapshot: &'a Vec<(T::AccountId, VoteWeight, Vec)>, + cache: &'a BTreeMap, +) -> impl Fn(&T::AccountId) -> VoteWeight + 'a { + move |who| { + if let Some(index) = cache.get(who) { + snapshot.get(*index).map(|(_, x, _)| x).cloned().unwrap_or_default() + } else { + 0 + } + } +} diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs new file mode 100644 index 0000000000000..269057b55b094 --- /dev/null +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -0,0 +1,2006 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Multi phase, offchain election provider pallet. +//! +//! Currently, this election-provider has two distinct phases (see [`Phase`]), **signed** and +//! **unsigned**. +//! +//! ## Phases +//! +//! The timeline of pallet is as follows. At each block, +//! [`frame_election_provider_support::ElectionDataProvider::next_election_prediction`] is used to +//! estimate the time remaining to the next call to +//! [`frame_election_provider_support::ElectionProvider::elect`]. Based on this, a phase is chosen. +//! The timeline is as follows. +//! +//! ```ignore +//! elect() +//! + <--T::SignedPhase--> + <--T::UnsignedPhase--> + +//! +-------------------------------------------------------------------+ +//! Phase::Off + Phase::Signed + Phase::Unsigned + +//! ``` +//! +//! Note that the unsigned phase starts [`pallet::Config::UnsignedPhase`] blocks before the +//! `next_election_prediction`, but only ends when a call to [`ElectionProvider::elect`] happens. If +//! no `elect` happens, the signed phase is extended. +//! +//! > Given this, it is rather important for the user of this pallet to ensure it always terminates +//! election via `elect` before requesting a new one. +//! +//! Each of the phases can be disabled by essentially setting their length to zero. If both phases +//! have length zero, then the pallet essentially runs only the fallback strategy, denoted by +//! [`Config::Fallback`]. +//! +//! ### Signed Phase +//! +//! In the signed phase, solutions (of type [`RawSolution`]) are submitted and queued on chain. A +//! deposit is reserved, based on the size of the solution, for the cost of keeping this solution +//! on-chain for a number of blocks, and the potential weight of the solution upon being checked. A +//! maximum of `pallet::Config::MaxSignedSubmissions` solutions are stored. The queue is always +//! sorted based on score (worse to best). +//! +//! Upon arrival of a new solution: +//! +//! 1. If the queue is not full, it is stored in the appropriate sorted index. +//! 2. If the queue is full but the submitted solution is better than one of the queued ones, the +//! worse solution is discarded, the bond of the outgoing solution is returned, and the new +//! solution is stored in the correct index. +//! 3. If the queue is full and the solution is not an improvement compared to any of the queued +//! ones, it is instantly rejected and no additional bond is reserved. +//! +//! A signed solution cannot be reversed, taken back, updated, or retracted. In other words, the +//! origin can not bail out in any way, if their solution is queued. +//! +//! Upon the end of the signed phase, the solutions are examined from best to worse (i.e. `pop()`ed +//! until drained). Each solution undergoes an expensive `Pallet::feasibility_check`, which +//! ensures the score claimed by this score was correct, and it is valid based on the election data +//! (i.e. votes and candidates). At each step, if the current best solution passes the feasibility +//! check, it is considered to be the best one. The sender of the origin is rewarded, and the rest +//! of the queued solutions get their deposit back and are discarded, without being checked. +//! +//! The following example covers all of the cases at the end of the signed phase: +//! +//! ```ignore +//! Queue +//! +-------------------------------+ +//! |Solution(score=20, valid=false)| +--> Slashed +//! +-------------------------------+ +//! |Solution(score=15, valid=true )| +--> Rewarded, Saved +//! +-------------------------------+ +//! |Solution(score=10, valid=true )| +--> Discarded +//! +-------------------------------+ +//! |Solution(score=05, valid=false)| +--> Discarded +//! +-------------------------------+ +//! | None | +//! +-------------------------------+ +//! ``` +//! +//! Note that both of the bottom solutions end up being discarded and get their deposit back, +//! despite one of them being *invalid*. +//! +//! ## Unsigned Phase +//! +//! The unsigned phase will always follow the signed phase, with the specified duration. In this +//! phase, only validator nodes can submit solutions. A validator node who has offchain workers +//! enabled will start to mine a solution in this phase and submits it back to the chain as an +//! unsigned transaction, thus the name _unsigned_ phase. This unsigned transaction can never be +//! valid if propagated, and it acts similar to an inherent. +//! +//! Validators will only submit solutions if the one that they have computed is sufficiently better +//! than the best queued one (see [`pallet::Config::SolutionImprovementThreshold`]) and will limit +//! the weigh of the solution to [`pallet::Config::MinerMaxWeight`]. +//! +//! The unsigned phase can be made passive depending on how the previous signed phase went, by +//! setting the first inner value of [`Phase`] to `false`. For now, the signed phase is always +//! active. +//! +//! ### Fallback +//! +//! If we reach the end of both phases (i.e. call to [`ElectionProvider::elect`] happens) and no +//! good solution is queued, then the fallback strategy [`pallet::Config::Fallback`] is used to +//! determine what needs to be done. The on-chain election is slow, and contains no balancing or +//! reduction post-processing. [`NoFallback`] does nothing and enables [`Phase::Emergency`], which +//! is a more *fail-safe* approach. +//! +//! ### Emergency Phase +//! +//! If, for any of the below reasons: +//! +//! 1. No signed or unsigned solution submitted & Fallback is `None` or failed +//! 2. Internal error +//! +//! A call to `T::ElectionProvider::elect` is made, and `Ok(_)` cannot be returned, then the pallet +//! proceeds to the [`Phase::Emergency`]. During this phase, any solution can be submitted from +//! [`Config::ForceOrigin`], without any checking. Once submitted, the forced solution is kept in +//! [`QueuedSolution`] until the next call to `T::ElectionProvider::elect`, where it is returned and +//! [`Phase`] goes back to `Off`. +//! +//! This implies that the user of this pallet (i.e. a staking pallet) should re-try calling +//! `T::ElectionProvider::elect` in case of error until `OK(_)` is returned. +//! +//! ## Feasible Solution (correct solution) +//! +//! All submissions must undergo a feasibility check. Signed solutions are checked on by one at the +//! end of the signed phase, and the unsigned solutions are checked on the spot. A feasible solution +//! is as follows: +//! +//! 0. **all** of the used indices must be correct. +//! 1. present *exactly* correct number of winners. +//! 2. any assignment is checked to match with [`RoundSnapshot::voters`]. +//! 3. the claimed score is valid, based on the fixed point arithmetic accuracy. +//! +//! ## Accuracy +//! +//! The accuracy of the election is configured via +//! [`SolutionAccuracyOf`] which is the accuracy that the submitted solutions must adhere to. +//! +//! Note that the accuracy is of great importance. The offchain solution should be as small as +//! possible, reducing solutions size/weight. +//! +//! ## Error types +//! +//! This pallet provides a verbose error system to ease future debugging and debugging. The +//! overall hierarchy of errors is as follows: +//! +//! 1. [`pallet::Error`]: These are the errors that can be returned in the dispatchables of the +//! pallet, either signed or unsigned. Since decomposition with nested enums is not possible +//! here, they are prefixed with the logical sub-system to which they belong. +//! 2. [`ElectionError`]: These are the errors that can be generated while the pallet is doing +//! something in automatic scenarios, such as `offchain_worker` or `on_initialize`. These errors +//! are helpful for logging and are thus nested as: +//! - [`ElectionError::Miner`]: wraps a [`unsigned::MinerError`]. +//! - [`ElectionError::Feasibility`]: wraps a [`FeasibilityError`]. +//! - [`ElectionError::OnChainFallback`]: wraps a +//! [`frame_election_provider_support::onchain::Error`]. +//! +//! Note that there could be an overlap between these sub-errors. For example, A +//! `SnapshotUnavailable` can happen in both miner and feasibility check phase. +//! +//! ## Future Plans +//! +//! **Challenge Phase**. We plan on adding a third phase to the pallet, called the challenge phase. +//! This is a phase in which no further solutions are processed, and the current best solution might +//! be challenged by anyone (signed or unsigned). The main plan here is to enforce the solution to +//! be PJR. Checking PJR on-chain is quite expensive, yet proving that a solution is **not** PJR is +//! rather cheap. If a queued solution is successfully proven bad: +//! +//! 1. We must surely slash whoever submitted that solution (might be a challenge for unsigned +//! solutions). +//! 2. We will fallback to the emergency strategy (likely extending the current era). +//! +//! **Bailing out**. The functionality of bailing out of a queued solution is nice. A miner can +//! submit a solution as soon as they _think_ it is high probability feasible, and do the checks +//! afterwards, and remove their solution (for a small cost of probably just transaction fees, or a +//! portion of the bond). +//! +//! **Conditionally open unsigned phase**: Currently, the unsigned phase is always opened. This is +//! useful because an honest validator will run substrate OCW code, which should be good enough to +//! trump a mediocre or malicious signed submission (assuming in the absence of honest signed bots). +//! If there are signed submissions, they can be checked against an absolute measure (e.g. PJR), +//! then we can only open the unsigned phase in extreme conditions (i.e. "no good signed solution +//! received") to spare some work for the active validators. +//! +//! **Allow smaller solutions and build up**: For now we only allow solutions that are exactly +//! [`DesiredTargets`], no more, no less. Over time, we can change this to a [min, max] where any +//! solution within this range is acceptable, where bigger solutions are prioritized. +//! +//! **Score based on (byte) size**: We should always prioritize small solutions over bigger ones, if +//! there is a tie. Even more harsh should be to enforce the bound of the `reduce` algorithm. +//! +//! **Take into account the encode/decode weight in benchmarks.** Currently, we only take into +//! account the weight of encode/decode in the `submit_unsigned` given its priority. Nonetheless, +//! all operations on the solution and the snapshot are worthy of taking this into account. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_election_provider_support::{ElectionDataProvider, ElectionProvider}; +use frame_support::{ + dispatch::DispatchResultWithPostInfo, + ensure, + traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, + weights::{DispatchClass, Weight}, +}; +use frame_system::{ensure_none, offchain::SendTransactionTypes}; +use scale_info::TypeInfo; +use sp_arithmetic::{ + traits::{CheckedAdd, Saturating, Zero}, + UpperOf, +}; +use sp_npos_elections::{ + assignment_ratio_to_staked_normalized, ElectionScore, EvaluateSupport, NposSolution, Supports, + VoteWeight, +}; +use sp_runtime::{ + traits::Bounded, + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + TransactionValidityError, ValidTransaction, + }, + DispatchError, PerThing, Perbill, RuntimeDebug, SaturatedConversion, +}; +use sp_std::{convert::TryInto, prelude::*}; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +mod mock; +#[macro_use] +pub mod helpers; + +const LOG_TARGET: &'static str = "runtime::election-provider"; + +pub mod signed; +pub mod unsigned; +pub mod weights; +pub use weights::WeightInfo; + +pub use signed::{ + BalanceOf, NegativeImbalanceOf, PositiveImbalanceOf, SignedSubmission, SignedSubmissionOf, + SignedSubmissions, SubmissionIndicesOf, +}; + +/// The solution type used by this crate. +pub type SolutionOf = ::Solution; + +/// The voter index. Derived from [`SolutionOf`]. +pub type SolutionVoterIndexOf = as NposSolution>::VoterIndex; +/// The target index. Derived from [`SolutionOf`]. +pub type SolutionTargetIndexOf = as NposSolution>::TargetIndex; +/// The accuracy of the election, when submitted from offchain. Derived from [`SolutionOf`]. +pub type SolutionAccuracyOf = as NposSolution>::Accuracy; +/// The fallback election type. +pub type FallbackErrorOf = <::Fallback as ElectionProvider< + ::AccountId, + ::BlockNumber, +>>::Error; + +/// Configuration for the benchmarks of the pallet. +pub trait BenchmarkingConfig { + /// Range of voters. + const VOTERS: [u32; 2]; + /// Range of targets. + const TARGETS: [u32; 2]; + /// Range of active voters. + const ACTIVE_VOTERS: [u32; 2]; + /// Range of desired targets. + const DESIRED_TARGETS: [u32; 2]; + /// Maximum number of voters expected. This is used only for memory-benchmarking of snapshot. + const SNAPSHOT_MAXIMUM_VOTERS: u32; + /// Maximum number of voters expected. This is used only for memory-benchmarking of miner. + const MINER_MAXIMUM_VOTERS: u32; + /// Maximum number of targets expected. This is used only for memory-benchmarking. + const MAXIMUM_TARGETS: u32; +} + +impl BenchmarkingConfig for () { + const VOTERS: [u32; 2] = [4000, 6000]; + const TARGETS: [u32; 2] = [1000, 1600]; + const ACTIVE_VOTERS: [u32; 2] = [1000, 3000]; + const DESIRED_TARGETS: [u32; 2] = [400, 800]; + const SNAPSHOT_MAXIMUM_VOTERS: u32 = 10_000; + const MINER_MAXIMUM_VOTERS: u32 = 10_000; + const MAXIMUM_TARGETS: u32 = 2_000; +} + +/// A fallback implementation that transitions the pallet to the emergency phase. +pub struct NoFallback(sp_std::marker::PhantomData); + +impl ElectionProvider for NoFallback { + type DataProvider = T::DataProvider; + type Error = &'static str; + + fn elect() -> Result, Self::Error> { + // Do nothing, this will enable the emergency phase. + Err("NoFallback.") + } +} + +/// Current phase of the pallet. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] +pub enum Phase { + /// Nothing, the election is not happening. + Off, + /// Signed phase is open. + Signed, + /// Unsigned phase. First element is whether it is active or not, second the starting block + /// number. + /// + /// We do not yet check whether the unsigned phase is active or passive. The intent is for the + /// blockchain to be able to declare: "I believe that there exists an adequate signed + /// solution," advising validators not to bother running the unsigned offchain worker. + /// + /// As validator nodes are free to edit their OCW code, they could simply ignore this advisory + /// and always compute their own solution. However, by default, when the unsigned phase is + /// passive, the offchain workers will not bother running. + Unsigned((bool, Bn)), + /// The emergency phase. This is enabled upon a failing call to `T::ElectionProvider::elect`. + /// After that, the only way to leave this phase is through a successful + /// `T::ElectionProvider::elect`. + Emergency, +} + +impl Default for Phase { + fn default() -> Self { + Phase::Off + } +} + +impl Phase { + /// Whether the phase is emergency or not. + pub fn is_emergency(&self) -> bool { + matches!(self, Phase::Emergency) + } + + /// Whether the phase is signed or not. + pub fn is_signed(&self) -> bool { + matches!(self, Phase::Signed) + } + + /// Whether the phase is unsigned or not. + pub fn is_unsigned(&self) -> bool { + matches!(self, Phase::Unsigned(_)) + } + + /// Whether the phase is unsigned and open or not, with specific start. + pub fn is_unsigned_open_at(&self, at: Bn) -> bool { + matches!(self, Phase::Unsigned((true, real)) if *real == at) + } + + /// Whether the phase is unsigned and open or not. + pub fn is_unsigned_open(&self) -> bool { + matches!(self, Phase::Unsigned((true, _))) + } + + /// Whether the phase is off or not. + pub fn is_off(&self) -> bool { + matches!(self, Phase::Off) + } +} + +/// The type of `Computation` that provided this election data. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] +pub enum ElectionCompute { + /// Election was computed on-chain. + OnChain, + /// Election was computed with a signed submission. + Signed, + /// Election was computed with an unsigned submission. + Unsigned, + /// Election was computed using the fallback + Fallback, + /// Election was computed with emergency status. + Emergency, +} + +impl Default for ElectionCompute { + fn default() -> Self { + ElectionCompute::OnChain + } +} + +/// A raw, unchecked solution. +/// +/// This is what will get submitted to the chain. +/// +/// Such a solution should never become effective in anyway before being checked by the +/// `Pallet::feasibility_check`. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, PartialOrd, Ord, TypeInfo)] +pub struct RawSolution { + /// the solution itself. + pub solution: S, + /// The _claimed_ score of the solution. + pub score: ElectionScore, + /// The round at which this solution should be submitted. + pub round: u32, +} + +impl Default for RawSolution { + fn default() -> Self { + // Round 0 is always invalid, only set this to 1. + Self { round: 1, solution: Default::default(), score: Default::default() } + } +} + +/// A checked solution, ready to be enacted. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default, TypeInfo)] +pub struct ReadySolution { + /// The final supports of the solution. + /// + /// This is target-major vector, storing each winners, total backing, and each individual + /// backer. + pub supports: Supports, + /// The score of the solution. + /// + /// This is needed to potentially challenge the solution. + pub score: ElectionScore, + /// How this election was computed. + pub compute: ElectionCompute, +} + +/// A snapshot of all the data that is needed for en entire round. They are provided by +/// [`ElectionDataProvider`] and are kept around until the round is finished. +/// +/// These are stored together because they are often accessed together. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default, TypeInfo)] +pub struct RoundSnapshot { + /// All of the voters. + pub voters: Vec<(A, VoteWeight, Vec)>, + /// All of the targets. + pub targets: Vec, +} + +/// Encodes the length of a solution or a snapshot. +/// +/// This is stored automatically on-chain, and it contains the **size of the entire snapshot**. +/// This is also used in dispatchables as weight witness data and should **only contain the size of +/// the presented solution**, not the entire snapshot. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, Default, TypeInfo)] +pub struct SolutionOrSnapshotSize { + /// The length of voters. + #[codec(compact)] + pub voters: u32, + /// The length of targets. + #[codec(compact)] + pub targets: u32, +} + +/// Internal errors of the pallet. +/// +/// Note that this is different from [`pallet::Error`]. +#[derive(frame_support::DebugNoBound)] +#[cfg_attr(feature = "runtime-benchmarks", derive(strum_macros::IntoStaticStr))] +pub enum ElectionError { + /// An error happened in the feasibility check sub-system. + Feasibility(FeasibilityError), + /// An error in the miner (offchain) sub-system. + Miner(unsigned::MinerError), + /// An error happened in the data provider. + DataProvider(&'static str), + /// An error nested in the fallback. + Fallback(FallbackErrorOf), +} + +// NOTE: we have to do this manually because of the additional where clause needed on +// `FallbackErrorOf`. +#[cfg(test)] +impl PartialEq for ElectionError +where + FallbackErrorOf: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + use ElectionError::*; + match (self, other) { + (&Feasibility(ref x), &Feasibility(ref y)) if x == y => true, + (&Miner(ref x), &Miner(ref y)) if x == y => true, + (&DataProvider(ref x), &DataProvider(ref y)) if x == y => true, + (&Fallback(ref x), &Fallback(ref y)) if x == y => true, + _ => false, + } + } +} + +impl From for ElectionError { + fn from(e: FeasibilityError) -> Self { + ElectionError::Feasibility(e) + } +} + +impl From> for ElectionError { + fn from(e: unsigned::MinerError) -> Self { + ElectionError::Miner(e) + } +} + +/// Errors that can happen in the feasibility check. +#[derive(Debug, Eq, PartialEq)] +#[cfg_attr(feature = "runtime-benchmarks", derive(strum_macros::IntoStaticStr))] +pub enum FeasibilityError { + /// Wrong number of winners presented. + WrongWinnerCount, + /// The snapshot is not available. + /// + /// Kinda defensive: The pallet should technically never attempt to do a feasibility check when + /// no snapshot is present. + SnapshotUnavailable, + /// Internal error from the election crate. + NposElection(sp_npos_elections::Error), + /// A vote is invalid. + InvalidVote, + /// A voter is invalid. + InvalidVoter, + /// The given score was invalid. + InvalidScore, + /// The provided round is incorrect. + InvalidRound, + /// Comparison against `MinimumUntrustedScore` failed. + UntrustedScoreTooLow, +} + +impl From for FeasibilityError { + fn from(e: sp_npos_elections::Error) -> Self { + FeasibilityError::NposElection(e) + } +} + +pub use pallet::*; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_election_provider_support::NposSolver; + use frame_support::{pallet_prelude::*, traits::EstimateCallFee}; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config + SendTransactionTypes> { + type Event: From> + + IsType<::Event> + + TryInto>; + + /// Currency type. + type Currency: ReservableCurrency + Currency; + + /// Something that can predict the fee of a call. Used to sensibly distribute rewards. + type EstimateCallFee: EstimateCallFee, BalanceOf>; + + /// Duration of the unsigned phase. + #[pallet::constant] + type UnsignedPhase: Get; + /// Duration of the signed phase. + #[pallet::constant] + type SignedPhase: Get; + + /// The minimum amount of improvement to the solution score that defines a solution as + /// "better" (in any phase). + #[pallet::constant] + type SolutionImprovementThreshold: Get; + + /// The repeat threshold of the offchain worker. + /// + /// For example, if it is 5, that means that at least 5 blocks will elapse between attempts + /// to submit the worker's solution. + #[pallet::constant] + type OffchainRepeat: Get; + + /// The priority of the unsigned transaction submitted in the unsigned-phase + #[pallet::constant] + type MinerTxPriority: Get; + + /// Maximum weight that the miner should consume. + /// + /// The miner will ensure that the total weight of the unsigned solution will not exceed + /// this value, based on [`WeightInfo::submit_unsigned`]. + #[pallet::constant] + type MinerMaxWeight: Get; + + /// Maximum number of signed submissions that can be queued. + /// + /// It is best to avoid adjusting this during an election, as it impacts downstream data + /// structures. In particular, `SignedSubmissionIndices` is bounded on this value. If you + /// update this value during an election, you _must_ ensure that + /// `SignedSubmissionIndices.len()` is less than or equal to the new value. Otherwise, + /// attempts to submit new solutions may cause a runtime panic. + #[pallet::constant] + type SignedMaxSubmissions: Get; + + /// Maximum weight of a signed solution. + /// + /// This should probably be similar to [`Config::MinerMaxWeight`]. + #[pallet::constant] + type SignedMaxWeight: Get; + + /// Base reward for a signed solution + #[pallet::constant] + type SignedRewardBase: Get>; + + /// Base deposit for a signed solution. + #[pallet::constant] + type SignedDepositBase: Get>; + + /// Per-byte deposit for a signed solution. + #[pallet::constant] + type SignedDepositByte: Get>; + + /// Per-weight deposit for a signed solution. + #[pallet::constant] + type SignedDepositWeight: Get>; + + /// Handler for the slashed deposits. + type SlashHandler: OnUnbalanced>; + + /// Handler for the rewards. + type RewardHandler: OnUnbalanced>; + + /// Maximum length (bytes) that the mined solution should consume. + /// + /// The miner will ensure that the total length of the unsigned solution will not exceed + /// this value. + #[pallet::constant] + type MinerMaxLength: Get; + + /// Something that will provide the election data. + type DataProvider: ElectionDataProvider; + + /// The solution type. + type Solution: codec::Codec + + Default + + PartialEq + + Eq + + Clone + + sp_std::fmt::Debug + + Ord + + NposSolution + + TypeInfo; + + /// Configuration for the fallback + type Fallback: ElectionProvider< + Self::AccountId, + Self::BlockNumber, + DataProvider = Self::DataProvider, + >; + + /// OCW election solution miner algorithm implementation. + type Solver: NposSolver; + + /// Origin that can control this pallet. Note that any action taken by this origin (such) + /// as providing an emergency solution is not checked. Thus, it must be a trusted origin. + type ForceOrigin: EnsureOrigin; + + /// The configuration of benchmarking. + type BenchmarkingConfig: BenchmarkingConfig; + + /// The weight of the pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(now: T::BlockNumber) -> Weight { + let next_election = T::DataProvider::next_election_prediction(now).max(now); + + let signed_deadline = T::SignedPhase::get() + T::UnsignedPhase::get(); + let unsigned_deadline = T::UnsignedPhase::get(); + + let remaining = next_election - now; + let current_phase = Self::current_phase(); + + log!( + trace, + "current phase {:?}, next election {:?}, metadata: {:?}", + current_phase, + next_election, + Self::snapshot_metadata() + ); + match current_phase { + Phase::Off if remaining <= signed_deadline && remaining > unsigned_deadline => { + // NOTE: if signed-phase length is zero, second part of the if-condition fails. + match Self::create_snapshot() { + Ok(_) => { + Self::on_initialize_open_signed(); + T::WeightInfo::on_initialize_open_signed() + }, + Err(why) => { + // Not much we can do about this at this point. + log!(warn, "failed to open signed phase due to {:?}", why); + T::WeightInfo::on_initialize_nothing() + }, + } + }, + Phase::Signed | Phase::Off + if remaining <= unsigned_deadline && remaining > Zero::zero() => + { + // our needs vary according to whether or not the unsigned phase follows a + // signed phase + let (need_snapshot, enabled) = if current_phase == Phase::Signed { + // there was previously a signed phase: close the signed phase, no need for + // snapshot. + // + // Notes: + // + // - `Self::finalize_signed_phase()` also appears in `fn do_elect`. This + // is a guard against the case that `elect` is called prematurely. This + // adds a small amount of overhead, but that is unfortunately + // unavoidable. + let _ = Self::finalize_signed_phase(); + // In the future we can consider disabling the unsigned phase if the signed + // phase completes successfully, but for now we're enabling it + // unconditionally as a defensive measure. + (false, true) + } else { + // No signed phase: create a new snapshot, definitely `enable` the unsigned + // phase. + (true, true) + }; + + if need_snapshot { + match Self::create_snapshot() { + Ok(_) => { + Self::on_initialize_open_unsigned(enabled, now); + T::WeightInfo::on_initialize_open_unsigned() + }, + Err(why) => { + log!(warn, "failed to open unsigned phase due to {:?}", why); + T::WeightInfo::on_initialize_nothing() + }, + } + } else { + Self::on_initialize_open_unsigned(enabled, now); + T::WeightInfo::on_initialize_open_unsigned() + } + } + _ => T::WeightInfo::on_initialize_nothing(), + } + } + + fn offchain_worker(now: T::BlockNumber) { + use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock}; + + // Create a lock with the maximum deadline of number of blocks in the unsigned phase. + // This should only come useful in an **abrupt** termination of execution, otherwise the + // guard will be dropped upon successful execution. + let mut lock = + StorageLock::>>::with_block_deadline( + unsigned::OFFCHAIN_LOCK, + T::UnsignedPhase::get().saturated_into(), + ); + + match lock.try_lock() { + Ok(_guard) => { + Self::do_synchronized_offchain_worker(now); + }, + Err(deadline) => { + log!(debug, "offchain worker lock not released, deadline is {:?}", deadline); + }, + }; + } + + fn integrity_test() { + use sp_std::mem::size_of; + // The index type of both voters and targets need to be smaller than that of usize (very + // unlikely to be the case, but anyhow). + assert!(size_of::>() <= size_of::()); + assert!(size_of::>() <= size_of::()); + + // ---------------------------- + // Based on the requirements of [`sp_npos_elections::Assignment::try_normalize`]. + let max_vote: usize = as NposSolution>::LIMIT; + + // 2. Maximum sum of [SolutionAccuracy; 16] must fit into `UpperOf`. + let maximum_chain_accuracy: Vec>> = (0..max_vote) + .map(|_| { + >>::from( + >::one().deconstruct(), + ) + }) + .collect(); + let _: UpperOf> = maximum_chain_accuracy + .iter() + .fold(Zero::zero(), |acc, x| acc.checked_add(x).unwrap()); + + // We only accept data provider who's maximum votes per voter matches our + // `T::Solution`'s `LIMIT`. + // + // NOTE that this pallet does not really need to enforce this in runtime. The + // solution cannot represent any voters more than `LIMIT` anyhow. + assert_eq!( + >::MAXIMUM_VOTES_PER_VOTER, + as NposSolution>::LIMIT as u32, + ); + } + } + + #[pallet::call] + impl Pallet { + /// Submit a solution for the unsigned phase. + /// + /// The dispatch origin fo this call must be __none__. + /// + /// This submission is checked on the fly. Moreover, this unsigned solution is only + /// validated when submitted to the pool from the **local** node. Effectively, this means + /// that only active validators can submit this transaction when authoring a block (similar + /// to an inherent). + /// + /// To prevent any incorrect solution (and thus wasted time/weight), this transaction will + /// panic if the solution submitted by the validator is invalid in any way, effectively + /// putting their authoring reward at risk. + /// + /// No deposit or reward is associated with this submission. + #[pallet::weight(( + T::WeightInfo::submit_unsigned( + witness.voters, + witness.targets, + raw_solution.solution.voter_count() as u32, + raw_solution.solution.unique_targets().len() as u32 + ), + DispatchClass::Operational, + ))] + pub fn submit_unsigned( + origin: OriginFor, + raw_solution: Box>>, + witness: SolutionOrSnapshotSize, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + let error_message = "Invalid unsigned submission must produce invalid block and \ + deprive validator from their authoring reward."; + + // Check score being an improvement, phase, and desired targets. + Self::unsigned_pre_dispatch_checks(&raw_solution).expect(error_message); + + // Ensure witness was correct. + let SolutionOrSnapshotSize { voters, targets } = + Self::snapshot_metadata().expect(error_message); + + // NOTE: we are asserting, not `ensure`ing -- we want to panic here. + assert!(voters as u32 == witness.voters, "{}", error_message); + assert!(targets as u32 == witness.targets, "{}", error_message); + + let ready = Self::feasibility_check(*raw_solution, ElectionCompute::Unsigned) + .expect(error_message); + + // Store the newly received solution. + log!(info, "queued unsigned solution with score {:?}", ready.score); + let ejected_a_solution = >::exists(); + >::put(ready); + Self::deposit_event(Event::SolutionStored( + ElectionCompute::Unsigned, + ejected_a_solution, + )); + + Ok(None.into()) + } + + /// Set a new value for `MinimumUntrustedScore`. + /// + /// Dispatch origin must be aligned with `T::ForceOrigin`. + /// + /// This check can be turned off by setting the value to `None`. + #[pallet::weight(T::DbWeight::get().writes(1))] + pub fn set_minimum_untrusted_score( + origin: OriginFor, + maybe_next_score: Option, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + >::set(maybe_next_score); + Ok(()) + } + + /// Set a solution in the queue, to be handed out to the client of this pallet in the next + /// call to `ElectionProvider::elect`. + /// + /// This can only be set by `T::ForceOrigin`, and only when the phase is `Emergency`. + /// + /// The solution is not checked for any feasibility and is assumed to be trustworthy, as any + /// feasibility check itself can in principle cause the election process to fail (due to + /// memory/weight constrains). + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn set_emergency_election_result( + origin: OriginFor, + supports: Supports, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); + + // Note: we don't `rotate_round` at this point; the next call to + // `ElectionProvider::elect` will succeed and take care of that. + + let solution = + ReadySolution { supports, score: [0, 0, 0], compute: ElectionCompute::Emergency }; + + >::put(solution); + Ok(()) + } + + /// Submit a solution for the signed phase. + /// + /// The dispatch origin fo this call must be __signed__. + /// + /// The solution is potentially queued, based on the claimed score and processed at the end + /// of the signed phase. + /// + /// A deposit is reserved and recorded for the solution. Based on the outcome, the solution + /// might be rewarded, slashed, or get all or a part of the deposit back. + /// + /// # + /// Queue size must be provided as witness data. + /// # + #[pallet::weight(T::WeightInfo::submit(*num_signed_submissions))] + pub fn submit( + origin: OriginFor, + raw_solution: Box>>, + num_signed_submissions: u32, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + // ensure witness data is correct. + ensure!( + num_signed_submissions >= + >::decode_len().unwrap_or_default() as u32, + Error::::SignedInvalidWitness, + ); + + // ensure solution is timely. + ensure!(Self::current_phase().is_signed(), Error::::PreDispatchEarlySubmission); + + // NOTE: this is the only case where having separate snapshot would have been better + // because could do just decode_len. But we can create abstractions to do this. + + // build size. Note: this is not needed for weight calc, thus not input. + // unlikely to ever return an error: if phase is signed, snapshot will exist. + let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; + + ensure!( + Self::feasibility_weight_of(&raw_solution, size) < T::SignedMaxWeight::get(), + Error::::SignedTooMuchWeight, + ); + + // create the submission + let deposit = Self::deposit_for(&raw_solution, size); + let reward = { + let call = + Call::submit { raw_solution: raw_solution.clone(), num_signed_submissions }; + let call_fee = T::EstimateCallFee::estimate_call_fee(&call, None.into()); + T::SignedRewardBase::get().saturating_add(call_fee) + }; + + let submission = + SignedSubmission { who: who.clone(), deposit, raw_solution: *raw_solution, reward }; + + // insert the submission if the queue has space or it's better than the weakest + // eject the weakest if the queue was full + let mut signed_submissions = Self::signed_submissions(); + let maybe_removed = match signed_submissions.insert(submission) { + // it's an error if we failed to insert a submission: this indicates the queue was + // full but our solution had insufficient score to eject any solution + signed::InsertResult::NotInserted => return Err(Error::::SignedQueueFull.into()), + signed::InsertResult::Inserted => None, + signed::InsertResult::InsertedEjecting(weakest) => Some(weakest), + }; + + // collect deposit. Thereafter, the function cannot fail. + T::Currency::reserve(&who, deposit).map_err(|_| Error::::SignedCannotPayDeposit)?; + + let ejected_a_solution = maybe_removed.is_some(); + // if we had to remove the weakest solution, unreserve its deposit + if let Some(removed) = maybe_removed { + let _remainder = T::Currency::unreserve(&removed.who, removed.deposit); + debug_assert!(_remainder.is_zero()); + } + + signed_submissions.put(); + Self::deposit_event(Event::SolutionStored(ElectionCompute::Signed, ejected_a_solution)); + Ok(()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A solution was stored with the given compute. + /// + /// If the solution is signed, this means that it hasn't yet been processed. If the + /// solution is unsigned, this means that it has also been processed. + /// + /// The `bool` is `true` when a previous solution was ejected to make room for this one. + SolutionStored(ElectionCompute, bool), + /// The election has been finalized, with `Some` of the given computation, or else if the + /// election failed, `None`. + ElectionFinalized(Option), + /// An account has been rewarded for their signed submission being finalized. + Rewarded(::AccountId, BalanceOf), + /// An account has been slashed for submitting an invalid signed submission. + Slashed(::AccountId, BalanceOf), + /// The signed phase of the given round has started. + SignedPhaseStarted(u32), + /// The unsigned phase of the given round has started. + UnsignedPhaseStarted(u32), + } + + /// Error of the pallet that can be returned in response to dispatches. + #[pallet::error] + pub enum Error { + /// Submission was too early. + PreDispatchEarlySubmission, + /// Wrong number of winners presented. + PreDispatchWrongWinnerCount, + /// Submission was too weak, score-wise. + PreDispatchWeakSubmission, + /// The queue was full, and the solution was not better than any of the existing ones. + SignedQueueFull, + /// The origin failed to pay the deposit. + SignedCannotPayDeposit, + /// Witness data to dispatchable is invalid. + SignedInvalidWitness, + /// The signed submission consumes too much weight + SignedTooMuchWeight, + /// OCW submitted solution for wrong round + OcwCallWrongEra, + /// Snapshot metadata should exist but didn't. + MissingSnapshotMetadata, + /// `Self::insert_submission` returned an invalid index. + InvalidSubmissionIndex, + /// The call is not allowed at this point. + CallNotAllowed, + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::submit_unsigned { raw_solution, .. } = call { + // Discard solution not coming from the local OCW. + match source { + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, + _ => return InvalidTransaction::Call.into(), + } + + let _ = Self::unsigned_pre_dispatch_checks(raw_solution) + .map_err(|err| { + log!(debug, "unsigned transaction validation failed due to {:?}", err); + err + }) + .map_err(dispatch_error_to_invalid)?; + + ValidTransaction::with_tag_prefix("OffchainElection") + // The higher the score[0], the better a solution is. + .priority( + T::MinerTxPriority::get() + .saturating_add(raw_solution.score[0].saturated_into()), + ) + // Used to deduplicate unsigned solutions: each validator should produce one + // solution per round at most, and solutions are not propagate. + .and_provides(raw_solution.round) + // Transaction should stay in the pool for the duration of the unsigned phase. + .longevity(T::UnsignedPhase::get().saturated_into::()) + // We don't propagate this. This can never be validated at a remote node. + .propagate(false) + .build() + } else { + InvalidTransaction::Call.into() + } + } + + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + if let Call::submit_unsigned { raw_solution, .. } = call { + Self::unsigned_pre_dispatch_checks(raw_solution) + .map_err(dispatch_error_to_invalid) + .map_err(Into::into) + } else { + Err(InvalidTransaction::Call.into()) + } + } + } + + #[pallet::type_value] + pub fn DefaultForRound() -> u32 { + 1 + } + + /// Internal counter for the number of rounds. + /// + /// This is useful for de-duplication of transactions submitted to the pool, and general + /// diagnostics of the pallet. + /// + /// This is merely incremented once per every time that an upstream `elect` is called. + #[pallet::storage] + #[pallet::getter(fn round)] + pub type Round = StorageValue<_, u32, ValueQuery, DefaultForRound>; + + /// Current phase. + #[pallet::storage] + #[pallet::getter(fn current_phase)] + pub type CurrentPhase = StorageValue<_, Phase, ValueQuery>; + + /// Current best solution, signed or unsigned, queued to be returned upon `elect`. + #[pallet::storage] + #[pallet::getter(fn queued_solution)] + pub type QueuedSolution = StorageValue<_, ReadySolution>; + + /// Snapshot data of the round. + /// + /// This is created at the beginning of the signed phase and cleared upon calling `elect`. + #[pallet::storage] + #[pallet::getter(fn snapshot)] + pub type Snapshot = StorageValue<_, RoundSnapshot>; + + /// Desired number of targets to elect for this round. + /// + /// Only exists when [`Snapshot`] is present. + #[pallet::storage] + #[pallet::getter(fn desired_targets)] + pub type DesiredTargets = StorageValue<_, u32>; + + /// The metadata of the [`RoundSnapshot`] + /// + /// Only exists when [`Snapshot`] is present. + #[pallet::storage] + #[pallet::getter(fn snapshot_metadata)] + pub type SnapshotMetadata = StorageValue<_, SolutionOrSnapshotSize>; + + // The following storage items collectively comprise `SignedSubmissions`, and should never be + // accessed independently. Instead, get `Self::signed_submissions()`, modify it as desired, and + // then do `signed_submissions.put()` when you're done with it. + + /// The next index to be assigned to an incoming signed submission. + /// + /// Every accepted submission is assigned a unique index; that index is bound to that particular + /// submission for the duration of the election. On election finalization, the next index is + /// reset to 0. + /// + /// We can't just use `SignedSubmissionIndices.len()`, because that's a bounded set; past its + /// capacity, it will simply saturate. We can't just iterate over `SignedSubmissionsMap`, + /// because iteration is slow. Instead, we store the value here. + #[pallet::storage] + pub(crate) type SignedSubmissionNextIndex = StorageValue<_, u32, ValueQuery>; + + /// A sorted, bounded set of `(score, index)`, where each `index` points to a value in + /// `SignedSubmissions`. + /// + /// We never need to process more than a single signed submission at a time. Signed submissions + /// can be quite large, so we're willing to pay the cost of multiple database accesses to access + /// them one at a time instead of reading and decoding all of them at once. + #[pallet::storage] + pub(crate) type SignedSubmissionIndices = + StorageValue<_, SubmissionIndicesOf, ValueQuery>; + + /// Unchecked, signed solutions. + /// + /// Together with `SubmissionIndices`, this stores a bounded set of `SignedSubmissions` while + /// allowing us to keep only a single one in memory at a time. + /// + /// Twox note: the key of the map is an auto-incrementing index which users cannot inspect or + /// affect; we shouldn't need a cryptographically secure hasher. + #[pallet::storage] + pub(crate) type SignedSubmissionsMap = + StorageMap<_, Twox64Concat, u32, SignedSubmissionOf, ValueQuery>; + + // `SignedSubmissions` items end here. + + /// The minimum score that each 'untrusted' solution must attain in order to be considered + /// feasible. + /// + /// Can be set via `set_minimum_untrusted_score`. + #[pallet::storage] + #[pallet::getter(fn minimum_untrusted_score)] + pub type MinimumUntrustedScore = StorageValue<_, ElectionScore>; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); +} + +impl Pallet { + /// Internal logic of the offchain worker, to be executed only when the offchain lock is + /// acquired with success. + fn do_synchronized_offchain_worker(now: T::BlockNumber) { + let current_phase = Self::current_phase(); + log!(trace, "lock for offchain worker acquired. Phase = {:?}", current_phase); + match current_phase { + Phase::Unsigned((true, opened)) if opened == now => { + // Mine a new solution, cache it, and attempt to submit it + let initial_output = Self::ensure_offchain_repeat_frequency(now) + .and_then(|_| Self::mine_check_save_submit()); + log!(debug, "initial offchain thread output: {:?}", initial_output); + }, + Phase::Unsigned((true, opened)) if opened < now => { + // Try and resubmit the cached solution, and recompute ONLY if it is not + // feasible. + let resubmit_output = Self::ensure_offchain_repeat_frequency(now) + .and_then(|_| Self::restore_or_compute_then_maybe_submit()); + log!(debug, "resubmit offchain thread output: {:?}", resubmit_output); + }, + _ => {}, + } + + // After election finalization, clear OCW solution storage. + // + // We can read the events here because offchain worker doesn't affect PoV. + if >::read_events_no_consensus() + .into_iter() + .filter_map(|event_record| { + let local_event = ::Event::from(event_record.event); + local_event.try_into().ok() + }) + .any(|event| matches!(event, Event::ElectionFinalized(_))) + { + unsigned::kill_ocw_solution::(); + } + } + + /// Logic for [`::on_initialize`] when signed phase is being opened. + pub fn on_initialize_open_signed() { + log!(info, "Starting signed phase round {}.", Self::round()); + >::put(Phase::Signed); + Self::deposit_event(Event::SignedPhaseStarted(Self::round())); + } + + /// Logic for [`>::on_initialize`] when unsigned phase is being opened. + pub fn on_initialize_open_unsigned(enabled: bool, now: T::BlockNumber) { + let round = Self::round(); + log!(info, "Starting unsigned phase round {} enabled {}.", round, enabled); + >::put(Phase::Unsigned((enabled, now))); + Self::deposit_event(Event::UnsignedPhaseStarted(round)); + } + + /// Parts of [`create_snapshot`] that happen inside of this pallet. + /// + /// Extracted for easier weight calculation. + fn create_snapshot_internal( + targets: Vec, + voters: Vec>, + desired_targets: u32, + ) { + let metadata = + SolutionOrSnapshotSize { voters: voters.len() as u32, targets: targets.len() as u32 }; + log!(info, "creating a snapshot with metadata {:?}", metadata); + + >::put(metadata); + >::put(desired_targets); + + // instead of using storage APIs, we do a manual encoding into a fixed-size buffer. + // `encoded_size` encodes it without storing it anywhere, this should not cause any + // allocation. + let snapshot = RoundSnapshot { voters, targets }; + let size = snapshot.encoded_size(); + log!(debug, "snapshot pre-calculated size {:?}", size); + let mut buffer = Vec::with_capacity(size); + snapshot.encode_to(&mut buffer); + + // do some checks. + debug_assert_eq!(buffer, snapshot.encode()); + // buffer should have not re-allocated since. + debug_assert!(buffer.len() == size && size == buffer.capacity()); + + sp_io::storage::set(&>::hashed_key(), &buffer); + } + + /// Parts of [`create_snapshot`] that happen outside of this pallet. + /// + /// Extracted for easier weight calculation. + fn create_snapshot_external( + ) -> Result<(Vec, Vec>, u32), ElectionError> { + let target_limit = >::max_value().saturated_into::(); + let voter_limit = >::max_value().saturated_into::(); + + let targets = + T::DataProvider::targets(Some(target_limit)).map_err(ElectionError::DataProvider)?; + let voters = + T::DataProvider::voters(Some(voter_limit)).map_err(ElectionError::DataProvider)?; + let desired_targets = + T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?; + + // Defensive-only. + if targets.len() > target_limit || voters.len() > voter_limit { + debug_assert!(false, "Snapshot limit has not been respected."); + return Err(ElectionError::DataProvider("Snapshot too big for submission.")) + } + + Ok((targets, voters, desired_targets)) + } + + /// Creates the snapshot. Writes new data to: + /// + /// 1. [`SnapshotMetadata`] + /// 2. [`RoundSnapshot`] + /// 3. [`DesiredTargets`] + /// + /// Returns `Ok(())` if operation is okay. + /// + /// This is a *self-weighing* function, it will register its own extra weight as + /// [`DispatchClass::Mandatory`] with the system pallet. + pub fn create_snapshot() -> Result<(), ElectionError> { + // this is self-weighing itself.. + let (targets, voters, desired_targets) = Self::create_snapshot_external()?; + + // ..therefore we only measure the weight of this and add it. + Self::create_snapshot_internal(targets, voters, desired_targets); + Self::register_weight(T::WeightInfo::create_snapshot_internal()); + Ok(()) + } + + /// Register some amount of weight directly with the system pallet. + /// + /// This is always mandatory weight. + fn register_weight(weight: Weight) { + >::register_extra_weight_unchecked( + weight, + DispatchClass::Mandatory, + ); + } + + /// Kill everything created by [`Pallet::create_snapshot`]. + pub fn kill_snapshot() { + >::kill(); + >::kill(); + >::kill(); + } + + /// Checks the feasibility of a solution. + pub fn feasibility_check( + raw_solution: RawSolution>, + compute: ElectionCompute, + ) -> Result, FeasibilityError> { + let RawSolution { solution, score, round } = raw_solution; + + // First, check round. + ensure!(Self::round() == round, FeasibilityError::InvalidRound); + + // Winners are not directly encoded in the solution. + let winners = solution.unique_targets(); + + let desired_targets = + Self::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?; + + // NOTE: this is a bit of duplicate, but we keep it around for veracity. The unsigned path + // already checked this in `unsigned_per_dispatch_checks`. The signed path *could* check it + // upon arrival, thus we would then remove it here. Given overlay it is cheap anyhow + ensure!(winners.len() as u32 == desired_targets, FeasibilityError::WrongWinnerCount); + + // Ensure that the solution's score can pass absolute min-score. + let submitted_score = raw_solution.score.clone(); + ensure!( + Self::minimum_untrusted_score().map_or(true, |min_score| { + sp_npos_elections::is_score_better(submitted_score, min_score, Perbill::zero()) + }), + FeasibilityError::UntrustedScoreTooLow + ); + + // Read the entire snapshot. + let RoundSnapshot { voters: snapshot_voters, targets: snapshot_targets } = + Self::snapshot().ok_or(FeasibilityError::SnapshotUnavailable)?; + + // ----- Start building. First, we need some closures. + let cache = helpers::generate_voter_cache::(&snapshot_voters); + let voter_at = helpers::voter_at_fn::(&snapshot_voters); + let target_at = helpers::target_at_fn::(&snapshot_targets); + let voter_index = helpers::voter_index_fn_usize::(&cache); + + // Then convert solution -> assignment. This will fail if any of the indices are gibberish, + // namely any of the voters or targets. + let assignments = solution + .into_assignment(voter_at, target_at) + .map_err::(Into::into)?; + + // Ensure that assignments is correct. + let _ = assignments + .iter() + .map(|ref assignment| { + // Check that assignment.who is actually a voter (defensive-only). + // NOTE: while using the index map from `voter_index` is better than a blind linear + // search, this *still* has room for optimization. Note that we had the index when + // we did `solution -> assignment` and we lost it. Ideal is to keep the index + // around. + + // Defensive-only: must exist in the snapshot. + let snapshot_index = + voter_index(&assignment.who).ok_or(FeasibilityError::InvalidVoter)?; + // Defensive-only: index comes from the snapshot, must exist. + let (_voter, _stake, targets) = + snapshot_voters.get(snapshot_index).ok_or(FeasibilityError::InvalidVoter)?; + + // Check that all of the targets are valid based on the snapshot. + if assignment.distribution.iter().any(|(d, _)| !targets.contains(d)) { + return Err(FeasibilityError::InvalidVote) + } + Ok(()) + }) + .collect::>()?; + + // ----- Start building support. First, we need one more closure. + let stake_of = helpers::stake_of_fn::(&snapshot_voters, &cache); + + // This might fail if the normalization fails. Very unlikely. See `integrity_test`. + let staked_assignments = assignment_ratio_to_staked_normalized(assignments, stake_of) + .map_err::(Into::into)?; + let supports = sp_npos_elections::to_supports(&staked_assignments); + + // Finally, check that the claimed score was indeed correct. + let known_score = supports.evaluate(); + ensure!(known_score == score, FeasibilityError::InvalidScore); + + Ok(ReadySolution { supports, compute, score }) + } + + /// Perform the tasks to be done after a new `elect` has been triggered: + /// + /// 1. Increment round. + /// 2. Change phase to [`Phase::Off`] + /// 3. Clear all snapshot data. + fn rotate_round() { + // Inc round. + >::mutate(|r| *r += 1); + + // Phase is off now. + >::put(Phase::Off); + + // Kill snapshots. + Self::kill_snapshot(); + } + + fn do_elect() -> Result, ElectionError> { + // We have to unconditionally try finalizing the signed phase here. There are only two + // possibilities: + // + // - signed phase was open, in which case this is essential for correct functioning of the + // system + // - signed phase was complete or not started, in which case finalization is idempotent and + // inexpensive (1 read of an empty vector). + let _ = Self::finalize_signed_phase(); + >::take() + .map_or_else( + || { + T::Fallback::elect() + .map_err(|fe| ElectionError::Fallback(fe)) + .map(|supports| (supports, ElectionCompute::Fallback)) + }, + |ReadySolution { supports, compute, .. }| Ok((supports, compute)), + ) + .map(|(supports, compute)| { + Self::deposit_event(Event::ElectionFinalized(Some(compute))); + if Self::round() != 1 { + log!(info, "Finalized election round with compute {:?}.", compute); + } + supports + }) + .map_err(|err| { + Self::deposit_event(Event::ElectionFinalized(None)); + if Self::round() != 1 { + log!(warn, "Failed to finalize election round. reason {:?}", err); + } + err + }) + } + + /// record the weight of the given `supports`. + fn weigh_supports(supports: &Supports) { + let active_voters = supports + .iter() + .map(|(_, x)| x) + .fold(Zero::zero(), |acc, next| acc + next.voters.len() as u32); + let desired_targets = supports.len() as u32; + Self::register_weight(T::WeightInfo::elect_queued(active_voters, desired_targets)); + } +} + +impl ElectionProvider for Pallet { + type Error = ElectionError; + type DataProvider = T::DataProvider; + + fn elect() -> Result, Self::Error> { + match Self::do_elect() { + Ok(supports) => { + // All went okay, record the weight, put sign to be Off, clean snapshot, etc. + Self::weigh_supports(&supports); + Self::rotate_round(); + Ok(supports) + }, + Err(why) => { + log!(error, "Entering emergency mode: {:?}", why); + >::put(Phase::Emergency); + Err(why) + }, + } + } +} + +/// convert a DispatchError to a custom InvalidTransaction with the inner code being the error +/// number. +pub fn dispatch_error_to_invalid(error: DispatchError) -> InvalidTransaction { + let error_number = match error { + DispatchError::Module { error, .. } => error, + _ => 0, + }; + InvalidTransaction::Custom(error_number) +} + +#[cfg(test)] +mod feasibility_check { + //! All of the tests here should be dedicated to only testing the feasibility check and nothing + //! more. The best way to audit and review these tests is to try and come up with a solution + //! that is invalid, but gets through the system as valid. + + use super::*; + use crate::mock::{ + raw_solution, roll_to, EpochLength, ExtBuilder, MultiPhase, Runtime, SignedPhase, + TargetIndex, UnsignedPhase, VoterIndex, + }; + use frame_support::assert_noop; + + const COMPUTE: ElectionCompute = ElectionCompute::OnChain; + + #[test] + fn snapshot_is_there() { + ExtBuilder::default().build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + let solution = raw_solution(); + + // For whatever reason it might be: + >::kill(); + + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::SnapshotUnavailable + ); + }) + } + + #[test] + fn round() { + ExtBuilder::default().build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + solution.round += 1; + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::InvalidRound + ); + }) + } + + #[test] + fn desired_targets() { + ExtBuilder::default().desired_targets(8).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let raw = raw_solution(); + + assert_eq!(raw.solution.unique_targets().len(), 4); + assert_eq!(MultiPhase::desired_targets().unwrap(), 8); + + assert_noop!( + MultiPhase::feasibility_check(raw, COMPUTE), + FeasibilityError::WrongWinnerCount, + ); + }) + } + + #[test] + fn winner_indices() { + ExtBuilder::default().desired_targets(2).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut raw = raw_solution(); + assert_eq!(MultiPhase::snapshot().unwrap().targets.len(), 4); + // ----------------------------------------------------^^ valid range is [0..3]. + + // Swap all votes from 3 to 4. This will ensure that the number of unique winners will + // still be 4, but one of the indices will be gibberish. Requirement is to make sure 3 a + // winner, which we don't do here. + raw.solution + .votes1 + .iter_mut() + .filter(|(_, t)| *t == TargetIndex::from(3u16)) + .for_each(|(_, t)| *t += 1); + raw.solution.votes2.iter_mut().for_each(|(_, [(t0, _)], t1)| { + if *t0 == TargetIndex::from(3u16) { + *t0 += 1 + }; + if *t1 == TargetIndex::from(3u16) { + *t1 += 1 + }; + }); + assert_noop!( + MultiPhase::feasibility_check(raw, COMPUTE), + FeasibilityError::NposElection(sp_npos_elections::Error::SolutionInvalidIndex) + ); + }) + } + + #[test] + fn voter_indices() { + // Should be caught in `solution.into_assignment`. + ExtBuilder::default().desired_targets(2).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); + // ----------------------------------------------------^^ valid range is [0..7]. + + // Check that there is an index 7 in votes1, and flip to 8. + assert!( + solution + .solution + .votes1 + .iter_mut() + .filter(|(v, _)| *v == VoterIndex::from(7u32)) + .map(|(v, _)| *v = 8) + .count() > 0 + ); + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::NposElection(sp_npos_elections::Error::SolutionInvalidIndex), + ); + }) + } + + #[test] + fn voter_votes() { + ExtBuilder::default().desired_targets(2).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); + // ----------------------------------------------------^^ valid range is [0..7]. + + // First, check that voter at index 7 (40) actually voted for 3 (40) -- this is self + // vote. Then, change the vote to 2 (30). + assert_eq!( + solution + .solution + .votes1 + .iter_mut() + .filter(|(v, t)| *v == 7 && *t == 3) + .map(|(_, t)| *t = 2) + .count(), + 1, + ); + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::InvalidVote, + ); + }) + } + + #[test] + fn score() { + ExtBuilder::default().desired_targets(2).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); + + // Simply faff with the score. + solution.score[0] += 1; + + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::InvalidScore, + ); + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + mock::{ + multi_phase_events, roll_to, AccountId, ExtBuilder, MockWeightInfo, MultiPhase, + Runtime, SignedMaxSubmissions, System, TargetIndex, Targets, + }, + Phase, + }; + use frame_election_provider_support::ElectionProvider; + use frame_support::{assert_noop, assert_ok}; + use sp_npos_elections::Support; + + #[test] + fn phase_rotation_works() { + ExtBuilder::default().build_and_execute(|| { + // 0 ------- 15 ------- 25 ------- 30 ------- ------- 45 ------- 55 ------- 60 + // | | | | | | + // Signed Unsigned Elect Signed Unsigned Elect + + assert_eq!(System::block_number(), 0); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + assert_eq!(MultiPhase::round(), 1); + + roll_to(4); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + assert!(MultiPhase::snapshot().is_none()); + assert_eq!(MultiPhase::round(), 1); + + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted(1)]); + assert!(MultiPhase::snapshot().is_some()); + assert_eq!(MultiPhase::round(), 1); + + roll_to(24); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert!(MultiPhase::snapshot().is_some()); + assert_eq!(MultiPhase::round(), 1); + + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + assert_eq!( + multi_phase_events(), + vec![Event::SignedPhaseStarted(1), Event::UnsignedPhaseStarted(1)], + ); + assert!(MultiPhase::snapshot().is_some()); + + roll_to(29); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + assert!(MultiPhase::snapshot().is_some()); + + roll_to(30); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + assert!(MultiPhase::snapshot().is_some()); + + // We close when upstream tells us to elect. + roll_to(32); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + assert!(MultiPhase::snapshot().is_some()); + + assert_ok!(MultiPhase::elect()); + + assert!(MultiPhase::current_phase().is_off()); + assert!(MultiPhase::snapshot().is_none()); + assert_eq!(MultiPhase::round(), 2); + + roll_to(44); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(45); + assert!(MultiPhase::current_phase().is_signed()); + + roll_to(55); + assert!(MultiPhase::current_phase().is_unsigned_open_at(55)); + }) + } + + #[test] + fn signed_phase_void() { + ExtBuilder::default().phases(0, 10).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(19); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(20); + assert!(MultiPhase::current_phase().is_unsigned_open_at(20)); + assert!(MultiPhase::snapshot().is_some()); + + roll_to(30); + assert!(MultiPhase::current_phase().is_unsigned_open_at(20)); + + assert_ok!(MultiPhase::elect()); + + assert!(MultiPhase::current_phase().is_off()); + assert!(MultiPhase::snapshot().is_none()); + }); + } + + #[test] + fn unsigned_phase_void() { + ExtBuilder::default().phases(10, 0).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(19); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(20); + assert!(MultiPhase::current_phase().is_signed()); + assert!(MultiPhase::snapshot().is_some()); + + roll_to(30); + assert!(MultiPhase::current_phase().is_signed()); + + assert_ok!(MultiPhase::elect()); + + assert!(MultiPhase::current_phase().is_off()); + assert!(MultiPhase::snapshot().is_none()); + }); + } + + #[test] + fn both_phases_void() { + ExtBuilder::default().phases(0, 0).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(19); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(20); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(30); + assert!(MultiPhase::current_phase().is_off()); + + // This module is now only capable of doing on-chain backup. + assert_ok!(MultiPhase::elect()); + + assert!(MultiPhase::current_phase().is_off()); + }); + } + + #[test] + fn early_termination() { + // An early termination in the signed phase, with no queued solution. + ExtBuilder::default().build_and_execute(|| { + // Signed phase started at block 15 and will end at 25. + roll_to(14); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + roll_to(15); + assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted(1)]); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert_eq!(MultiPhase::round(), 1); + + // An unexpected call to elect. + roll_to(20); + assert_ok!(MultiPhase::elect()); + + // We surely can't have any feasible solutions. This will cause an on-chain election. + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted(1), + Event::ElectionFinalized(Some(ElectionCompute::Fallback)) + ], + ); + // All storage items must be cleared. + assert_eq!(MultiPhase::round(), 2); + assert!(MultiPhase::snapshot().is_none()); + assert!(MultiPhase::snapshot_metadata().is_none()); + assert!(MultiPhase::desired_targets().is_none()); + assert!(MultiPhase::queued_solution().is_none()); + assert!(MultiPhase::signed_submissions().is_empty()); + }) + } + + #[test] + fn early_termination_with_submissions() { + // an early termination in the signed phase, with no queued solution. + ExtBuilder::default().build_and_execute(|| { + // signed phase started at block 15 and will end at 25. + roll_to(14); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + roll_to(15); + assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted(1)]); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert_eq!(MultiPhase::round(), 1); + + // fill the queue with signed submissions + for s in 0..SignedMaxSubmissions::get() { + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(MultiPhase::submit( + crate::mock::Origin::signed(99), + Box::new(solution), + MultiPhase::signed_submissions().len() as u32 + )); + } + + // an unexpected call to elect. + roll_to(20); + assert_ok!(MultiPhase::elect()); + + // all storage items must be cleared. + assert_eq!(MultiPhase::round(), 2); + assert!(MultiPhase::snapshot().is_none()); + assert!(MultiPhase::snapshot_metadata().is_none()); + assert!(MultiPhase::desired_targets().is_none()); + assert!(MultiPhase::queued_solution().is_none()); + assert!(MultiPhase::signed_submissions().is_empty()); + }) + } + + #[test] + fn fallback_strategy_works() { + ExtBuilder::default().onchain_fallback(true).build_and_execute(|| { + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + + // Zilch solutions thus far, but we get a result. + assert!(MultiPhase::queued_solution().is_none()); + let supports = MultiPhase::elect().unwrap(); + + assert_eq!( + supports, + vec![ + (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), + (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }) + ] + ) + }); + + ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + + // Zilch solutions thus far. + assert!(MultiPhase::queued_solution().is_none()); + assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); + // phase is now emergency. + assert_eq!(MultiPhase::current_phase(), Phase::Emergency); + }) + } + + #[test] + fn snapshot_creation_fails_if_too_big() { + ExtBuilder::default().build_and_execute(|| { + Targets::set((0..(TargetIndex::max_value() as AccountId) + 1).collect::>()); + + // Signed phase failed to open. + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + // Unsigned phase failed to open. + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + // On-chain backup works though. + roll_to(29); + let supports = MultiPhase::elect().unwrap(); + assert!(supports.len() > 0); + }) + } + + #[test] + fn untrusted_score_verification_is_respected() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + + // set the solution balancing to get the desired score. + crate::mock::Balancing::set(Some((2, 0))); + + let (solution, _) = MultiPhase::mine_solution::<::Solver>().unwrap(); + // Default solution has a score of [50, 100, 5000]. + assert_eq!(solution.score, [50, 100, 5000]); + + >::put([49, 0, 0]); + assert_ok!(MultiPhase::feasibility_check(solution.clone(), ElectionCompute::Signed)); + + >::put([51, 0, 0]); + assert_noop!( + MultiPhase::feasibility_check(solution, ElectionCompute::Signed), + FeasibilityError::UntrustedScoreTooLow, + ); + }) + } + + #[test] + fn number_of_voters_allowed_2sec_block() { + // Just a rough estimate with the substrate weights. + assert!(!MockWeightInfo::get()); + + let all_voters: u32 = 10_000; + let all_targets: u32 = 5_000; + let desired: u32 = 1_000; + let weight_with = |active| { + ::WeightInfo::submit_unsigned( + all_voters, + all_targets, + active, + desired, + ) + }; + + let mut active = 1; + while weight_with(active) <= + ::BlockWeights::get().max_block || + active == all_voters + { + active += 1; + } + + println!("can support {} voters to yield a weight of {}", active, weight_with(active)); + } +} diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs new file mode 100644 index 0000000000000..28a15291e6520 --- /dev/null +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -0,0 +1,581 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as multi_phase; +use frame_election_provider_support::{ + data_provider, onchain, ElectionDataProvider, SequentialPhragmen, +}; +pub use frame_support::{assert_noop, assert_ok}; +use frame_support::{parameter_types, traits::Hooks, weights::Weight}; +use multi_phase::unsigned::{IndexAssignmentOf, Voter}; +use parking_lot::RwLock; +use sp_core::{ + offchain::{ + testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, + }, + H256, +}; +use sp_npos_elections::{ + assignment_ratio_to_staked_normalized, seq_phragmen, to_supports, ElectionResult, + EvaluateSupport, ExtendedBalance, NposSolution, +}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + PerU16, +}; +use std::{convert::TryFrom, sync::Arc}; + +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Pallet, Call, Event, Config}, + Balances: pallet_balances::{Pallet, Call, Event, Config}, + MultiPhase: multi_phase::{Pallet, Call, Event}, + } +); + +pub(crate) type Balance = u64; +pub(crate) type AccountId = u64; +pub(crate) type BlockNumber = u64; +pub(crate) type VoterIndex = u32; +pub(crate) type TargetIndex = u16; + +sp_npos_elections::generate_solution_type!( + #[compact] + pub struct TestNposSolution::(16) +); + +/// All events of this pallet. +pub(crate) fn multi_phase_events() -> Vec> { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let Event::MultiPhase(inner) = e { Some(inner) } else { None }) + .collect::>() +} + +/// To from `now` to block `n`. +pub fn roll_to(n: BlockNumber) { + let now = System::block_number(); + for i in now + 1..=n { + System::set_block_number(i); + MultiPhase::on_initialize(i); + } +} + +pub fn roll_to_with_ocw(n: BlockNumber) { + let now = System::block_number(); + for i in now + 1..=n { + System::set_block_number(i); + MultiPhase::on_initialize(i); + MultiPhase::offchain_worker(i); + } +} + +pub struct TrimHelpers { + pub voters: Vec>, + pub assignments: Vec>, + pub encoded_size_of: + Box]) -> Result>, + pub voter_index: Box< + dyn Fn( + &::AccountId, + ) -> Option>, + >, +} + +/// Helpers for setting up trimming tests. +/// +/// Assignments are pre-sorted in reverse order of stake. +pub fn trim_helpers() -> TrimHelpers { + let RoundSnapshot { voters, targets } = MultiPhase::snapshot().unwrap(); + let stakes: std::collections::HashMap<_, _> = + voters.iter().map(|(id, stake, _)| (*id, *stake)).collect(); + + // Compute the size of a solution comprised of the selected arguments. + // + // This function completes in `O(edges)`; it's expensive, but linear. + let encoded_size_of = Box::new(|assignments: &[IndexAssignmentOf]| { + SolutionOf::::try_from(assignments).map(|s| s.encoded_size()) + }); + let cache = helpers::generate_voter_cache::(&voters); + let voter_index = helpers::voter_index_fn_owned::(cache); + let target_index = helpers::target_index_fn::(&targets); + + let desired_targets = MultiPhase::desired_targets().unwrap(); + + let ElectionResult { mut assignments, .. } = seq_phragmen::<_, SolutionAccuracyOf>( + desired_targets as usize, + targets.clone(), + voters.clone(), + None, + ) + .unwrap(); + + // sort by decreasing order of stake + assignments.sort_unstable_by_key(|assignment| { + std::cmp::Reverse(stakes.get(&assignment.who).cloned().unwrap_or_default()) + }); + + // convert to IndexAssignment + let assignments = assignments + .iter() + .map(|assignment| { + IndexAssignmentOf::::new(assignment, &voter_index, &target_index) + }) + .collect::, _>>() + .expect("test assignments don't contain any voters with too many votes"); + + TrimHelpers { voters, assignments, encoded_size_of, voter_index: Box::new(voter_index) } +} + +/// Spit out a verifiable raw solution. +/// +/// This is a good example of what an offchain miner would do. +pub fn raw_solution() -> RawSolution> { + let RoundSnapshot { voters, targets } = MultiPhase::snapshot().unwrap(); + let desired_targets = MultiPhase::desired_targets().unwrap(); + + let ElectionResult { winners: _, assignments } = + seq_phragmen::<_, SolutionAccuracyOf>( + desired_targets as usize, + targets.clone(), + voters.clone(), + None, + ) + .unwrap(); + + // closures + let cache = helpers::generate_voter_cache::(&voters); + let voter_index = helpers::voter_index_fn_linear::(&voters); + let target_index = helpers::target_index_fn_linear::(&targets); + let stake_of = helpers::stake_of_fn::(&voters, &cache); + + let score = { + let staked = assignment_ratio_to_staked_normalized(assignments.clone(), &stake_of).unwrap(); + to_supports(&staked).evaluate() + }; + let solution = + >::from_assignment(&assignments, &voter_index, &target_index).unwrap(); + + let round = MultiPhase::round(); + RawSolution { solution, score, round } +} + +pub fn witness() -> SolutionOrSnapshotSize { + MultiPhase::snapshot() + .map(|snap| SolutionOrSnapshotSize { + voters: snap.voters.len() as u32, + targets: snap.targets.len() as u32, + }) + .unwrap_or_default() +} + +impl frame_system::Config for Runtime { + type SS58Prefix = (); + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Index = u64; + type BlockNumber = BlockNumber; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = (); + type DbWeight = (); + type BlockLength = (); + type BlockWeights = BlockWeights; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type OnSetCode = (); +} + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +parameter_types! { + pub const ExistentialDeposit: u64 = 1; + pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights + ::with_sensible_defaults(2 * frame_support::weights::constants::WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); +} + +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type WeightInfo = (); +} + +parameter_types! { + pub static Targets: Vec = vec![10, 20, 30, 40]; + pub static Voters: Vec<(AccountId, VoteWeight, Vec)> = vec![ + (1, 10, vec![10, 20]), + (2, 10, vec![30, 40]), + (3, 10, vec![40]), + (4, 10, vec![10, 20, 30, 40]), + // self votes. + (10, 10, vec![10]), + (20, 20, vec![20]), + (30, 30, vec![30]), + (40, 40, vec![40]), + ]; + + pub static DesiredTargets: u32 = 2; + pub static SignedPhase: BlockNumber = 10; + pub static UnsignedPhase: BlockNumber = 5; + pub static SignedMaxSubmissions: u32 = 5; + pub static SignedDepositBase: Balance = 5; + pub static SignedDepositByte: Balance = 0; + pub static SignedDepositWeight: Balance = 0; + pub static SignedRewardBase: Balance = 7; + pub static SignedMaxWeight: Weight = BlockWeights::get().max_block; + pub static MinerTxPriority: u64 = 100; + pub static SolutionImprovementThreshold: Perbill = Perbill::zero(); + pub static OffchainRepeat: BlockNumber = 5; + pub static MinerMaxWeight: Weight = BlockWeights::get().max_block; + pub static MinerMaxLength: u32 = 256; + pub static MockWeightInfo: bool = false; + + pub static EpochLength: u64 = 30; + pub static OnChianFallback: bool = true; +} + +impl onchain::Config for Runtime { + type Accuracy = sp_runtime::Perbill; + type DataProvider = StakingMock; +} + +pub struct MockFallback; +impl ElectionProvider for MockFallback { + type Error = &'static str; + type DataProvider = StakingMock; + + fn elect() -> Result, Self::Error> { + if OnChianFallback::get() { + onchain::OnChainSequentialPhragmen::::elect() + .map_err(|_| "OnChainSequentialPhragmen failed") + } else { + super::NoFallback::::elect() + } + } +} + +// Hopefully this won't be too much of a hassle to maintain. +pub struct DualMockWeightInfo; +impl multi_phase::weights::WeightInfo for DualMockWeightInfo { + fn on_initialize_nothing() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::on_initialize_nothing() + } + } + fn create_snapshot_internal() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::create_snapshot_internal() + } + } + fn on_initialize_open_signed() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::on_initialize_open_signed() + } + } + fn on_initialize_open_unsigned() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::on_initialize_open_unsigned() + } + } + fn elect_queued(a: u32, d: u32) -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::elect_queued(a, d) + } + } + fn finalize_signed_phase_accept_solution() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::finalize_signed_phase_accept_solution() + } + } + fn finalize_signed_phase_reject_solution() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::finalize_signed_phase_reject_solution() + } + } + fn submit(c: u32) -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::submit(c) + } + } + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32) -> Weight { + if MockWeightInfo::get() { + // 10 base + // 5 per edge. + (10 as Weight).saturating_add((5 as Weight).saturating_mul(a as Weight)) + } else { + <() as multi_phase::weights::WeightInfo>::submit_unsigned(v, t, a, d) + } + } + fn feasibility_check(v: u32, t: u32, a: u32, d: u32) -> Weight { + if MockWeightInfo::get() { + // 10 base + // 5 per edge. + (10 as Weight).saturating_add((5 as Weight).saturating_mul(a as Weight)) + } else { + <() as multi_phase::weights::WeightInfo>::feasibility_check(v, t, a, d) + } + } +} + +parameter_types! { + pub static Balancing: Option<(usize, ExtendedBalance)> = Some((0, 0)); +} + +impl crate::Config for Runtime { + type Event = Event; + type Currency = Balances; + type EstimateCallFee = frame_support::traits::ConstU32<8>; + type SignedPhase = SignedPhase; + type UnsignedPhase = UnsignedPhase; + type SolutionImprovementThreshold = SolutionImprovementThreshold; + type OffchainRepeat = OffchainRepeat; + type MinerMaxWeight = MinerMaxWeight; + type MinerMaxLength = MinerMaxLength; + type MinerTxPriority = MinerTxPriority; + type SignedRewardBase = SignedRewardBase; + type SignedDepositBase = SignedDepositBase; + type SignedDepositByte = (); + type SignedDepositWeight = (); + type SignedMaxWeight = SignedMaxWeight; + type SignedMaxSubmissions = SignedMaxSubmissions; + type SlashHandler = (); + type RewardHandler = (); + type DataProvider = StakingMock; + type WeightInfo = DualMockWeightInfo; + type BenchmarkingConfig = (); + type Fallback = MockFallback; + type ForceOrigin = frame_system::EnsureRoot; + type Solution = TestNposSolution; + type Solver = SequentialPhragmen, Balancing>; +} + +impl frame_system::offchain::SendTransactionTypes for Runtime +where + Call: From, +{ + type OverarchingCall = Call; + type Extrinsic = Extrinsic; +} + +pub type Extrinsic = sp_runtime::testing::TestXt; + +#[derive(Default)] +pub struct ExtBuilder {} + +pub struct StakingMock; +impl ElectionDataProvider for StakingMock { + const MAXIMUM_VOTES_PER_VOTER: u32 = ::LIMIT as u32; + fn targets(maybe_max_len: Option) -> data_provider::Result> { + let targets = Targets::get(); + + if maybe_max_len.map_or(false, |max_len| targets.len() > max_len) { + return Err("Targets too big") + } + + Ok(targets) + } + + fn voters( + maybe_max_len: Option, + ) -> data_provider::Result)>> { + let voters = Voters::get(); + if maybe_max_len.map_or(false, |max_len| voters.len() > max_len) { + return Err("Voters too big") + } + + Ok(voters) + } + + fn desired_targets() -> data_provider::Result { + Ok(DesiredTargets::get()) + } + + fn next_election_prediction(now: u64) -> u64 { + now + EpochLength::get() - now % EpochLength::get() + } + + #[cfg(feature = "runtime-benchmarks")] + fn put_snapshot( + voters: Vec<(AccountId, VoteWeight, Vec)>, + targets: Vec, + _target_stake: Option, + ) { + Targets::set(targets); + Voters::set(voters); + } + + #[cfg(feature = "runtime-benchmarks")] + fn clear() { + Targets::set(vec![]); + Voters::set(vec![]); + } + + #[cfg(feature = "runtime-benchmarks")] + fn add_voter(voter: AccountId, weight: VoteWeight, targets: Vec) { + let mut current = Voters::get(); + current.push((voter, weight, targets)); + Voters::set(current); + } + + #[cfg(feature = "runtime-benchmarks")] + fn add_target(target: AccountId) { + let mut current = Targets::get(); + current.push(target); + Targets::set(current); + + // to be on-par with staking, we add a self vote as well. the stake is really not that + // important. + let mut current = Voters::get(); + current.push((target, ExistentialDeposit::get() as u64, vec![target])); + Voters::set(current); + } +} + +impl ExtBuilder { + pub fn miner_tx_priority(self, p: u64) -> Self { + ::set(p); + self + } + pub fn solution_improvement_threshold(self, p: Perbill) -> Self { + ::set(p); + self + } + pub fn phases(self, signed: BlockNumber, unsigned: BlockNumber) -> Self { + ::set(signed); + ::set(unsigned); + self + } + pub fn onchain_fallback(self, onchain: bool) -> Self { + ::set(onchain); + self + } + pub fn miner_weight(self, weight: Weight) -> Self { + ::set(weight); + self + } + pub fn mock_weight_info(self, mock: bool) -> Self { + ::set(mock); + self + } + pub fn desired_targets(self, t: u32) -> Self { + ::set(t); + self + } + pub fn add_voter(self, who: AccountId, stake: Balance, targets: Vec) -> Self { + VOTERS.with(|v| v.borrow_mut().push((who, stake, targets))); + self + } + pub fn signed_max_submission(self, count: u32) -> Self { + ::set(count); + self + } + pub fn signed_deposit(self, base: u64, byte: u64, weight: u64) -> Self { + ::set(base); + ::set(byte); + ::set(weight); + self + } + pub fn signed_weight(self, weight: Weight) -> Self { + ::set(weight); + self + } + pub fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = + frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let _ = pallet_balances::GenesisConfig:: { + balances: vec![ + // bunch of account for submitting stuff only. + (99, 100), + (999, 100), + (9999, 100), + ], + } + .assimilate_storage(&mut storage); + + sp_io::TestExternalities::from(storage) + } + + pub fn build_offchainify( + self, + iters: u32, + ) -> (sp_io::TestExternalities, Arc>) { + let mut ext = self.build(); + let (offchain, offchain_state) = TestOffchainExt::new(); + let (pool, pool_state) = TestTransactionPoolExt::new(); + + let mut seed = [0_u8; 32]; + seed[0..4].copy_from_slice(&iters.to_le_bytes()); + offchain_state.write().seed = seed; + + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + (ext, pool_state) + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(test) + } +} + +pub(crate) fn balances(who: &AccountId) -> (Balance, Balance) { + (Balances::free_balance(who), Balances::reserved_balance(who)) +} diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs new file mode 100644 index 0000000000000..61215059c53a6 --- /dev/null +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -0,0 +1,942 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The signed phase implementation. + +use crate::{ + Config, ElectionCompute, Pallet, QueuedSolution, RawSolution, ReadySolution, + SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap, SolutionOf, + SolutionOrSnapshotSize, Weight, WeightInfo, +}; +use codec::{Decode, Encode, HasCompact}; +use frame_support::{ + storage::bounded_btree_map::BoundedBTreeMap, + traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, +}; +use sp_arithmetic::traits::SaturatedConversion; +use sp_npos_elections::{is_score_better, ElectionScore, NposSolution}; +use sp_runtime::{ + traits::{Saturating, Zero}, + RuntimeDebug, +}; +use sp_std::{ + cmp::Ordering, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + ops::Deref, +}; + +/// A raw, unchecked signed submission. +/// +/// This is just a wrapper around [`RawSolution`] and some additional info. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default, scale_info::TypeInfo)] +pub struct SignedSubmission { + /// Who submitted this solution. + pub who: AccountId, + /// The deposit reserved for storing this solution. + pub deposit: Balance, + /// The raw solution itself. + pub raw_solution: RawSolution, + /// The reward that should potentially be paid for this solution, if accepted. + pub reward: Balance, +} + +impl Ord for SignedSubmission +where + AccountId: Ord, + Balance: Ord + HasCompact, + Solution: Ord, + RawSolution: Ord, +{ + fn cmp(&self, other: &Self) -> Ordering { + self.raw_solution + .score + .cmp(&other.raw_solution.score) + .then_with(|| self.raw_solution.cmp(&other.raw_solution)) + .then_with(|| self.deposit.cmp(&other.deposit)) + .then_with(|| self.who.cmp(&other.who)) + } +} + +impl PartialOrd for SignedSubmission +where + AccountId: Ord, + Balance: Ord + HasCompact, + Solution: Ord, + RawSolution: Ord, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +pub type PositiveImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::PositiveImbalance; +pub type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; +pub type SignedSubmissionOf = + SignedSubmission<::AccountId, BalanceOf, SolutionOf>; + +pub type SubmissionIndicesOf = + BoundedBTreeMap::SignedMaxSubmissions>; + +/// Outcome of [`SignedSubmissions::insert`]. +pub enum InsertResult { + /// The submission was not inserted because the queue was full and the submission had + /// insufficient score to eject a prior solution from the queue. + NotInserted, + /// The submission was inserted successfully without ejecting a solution. + Inserted, + /// The submission was inserted successfully. As the queue was full, this operation ejected a + /// prior solution, contained in this variant. + InsertedEjecting(SignedSubmissionOf), +} + +/// Mask type which pretends to be a set of `SignedSubmissionOf`, while in fact delegating to the +/// actual implementations in `SignedSubmissionIndices`, `SignedSubmissionsMap`, and +/// `SignedSubmissionNextIndex`. +#[cfg_attr(feature = "std", derive(frame_support::DebugNoBound))] +pub struct SignedSubmissions { + indices: SubmissionIndicesOf, + next_idx: u32, + insertion_overlay: BTreeMap>, + deletion_overlay: BTreeSet, +} + +impl SignedSubmissions { + /// Get the signed submissions from storage. + pub fn get() -> Self { + let submissions = SignedSubmissions { + indices: SignedSubmissionIndices::::get(), + next_idx: SignedSubmissionNextIndex::::get(), + insertion_overlay: BTreeMap::new(), + deletion_overlay: BTreeSet::new(), + }; + // validate that the stored state is sane + debug_assert!(submissions + .indices + .values() + .copied() + .max() + .map_or(true, |max_idx| submissions.next_idx > max_idx,)); + submissions + } + + /// Put the signed submissions back into storage. + pub fn put(mut self) { + // validate that we're going to write only sane things to storage + debug_assert!(self + .insertion_overlay + .keys() + .copied() + .max() + .map_or(true, |max_idx| self.next_idx > max_idx,)); + debug_assert!(self + .indices + .values() + .copied() + .max() + .map_or(true, |max_idx| self.next_idx > max_idx,)); + + SignedSubmissionIndices::::put(self.indices); + SignedSubmissionNextIndex::::put(self.next_idx); + for key in self.deletion_overlay { + self.insertion_overlay.remove(&key); + SignedSubmissionsMap::::remove(key); + } + for (key, value) in self.insertion_overlay { + SignedSubmissionsMap::::insert(key, value); + } + } + + /// Get the submission at a particular index. + fn get_submission(&self, idx: u32) -> Option> { + if self.deletion_overlay.contains(&idx) { + // Note: can't actually remove the item from the insertion overlay (if present) + // because we don't want to use `&mut self` here. There may be some kind of + // `RefCell` optimization possible here in the future. + None + } else { + self.insertion_overlay + .get(&idx) + .cloned() + .or_else(|| SignedSubmissionsMap::::try_get(idx).ok()) + } + } + + /// Perform three operations: + /// + /// - Remove a submission (identified by score) + /// - Insert a new submission (identified by score and insertion index) + /// - Return the submission which was removed. + /// + /// Note: in the case that `weakest_score` is not present in `self.indices`, this will return + /// `None` without inserting the new submission and without further notice. + /// + /// Note: this does not enforce any ordering relation between the submission removed and that + /// inserted. + /// + /// Note: this doesn't insert into `insertion_overlay`, the optional new insertion must be + /// inserted into `insertion_overlay` to keep the variable `self` in a valid state. + fn swap_out_submission( + &mut self, + remove_score: ElectionScore, + insert: Option<(ElectionScore, u32)>, + ) -> Option> { + let remove_idx = self.indices.remove(&remove_score)?; + if let Some((insert_score, insert_idx)) = insert { + self.indices + .try_insert(insert_score, insert_idx) + .expect("just removed an item, we must be under capacity; qed"); + } + + self.insertion_overlay.remove(&remove_idx).or_else(|| { + (!self.deletion_overlay.contains(&remove_idx)) + .then(|| { + self.deletion_overlay.insert(remove_idx); + SignedSubmissionsMap::::try_get(remove_idx).ok() + }) + .flatten() + }) + } + + /// Iterate through the set of signed submissions in order of increasing score. + pub fn iter(&self) -> impl '_ + Iterator> { + self.indices.iter().filter_map(move |(_score, &idx)| { + let maybe_submission = self.get_submission(idx); + if maybe_submission.is_none() { + log!( + error, + "SignedSubmissions internal state is invalid (idx {}); \ + there is a logic error in code handling signed solution submissions", + idx, + ) + } + maybe_submission + }) + } + + /// Empty the set of signed submissions, returning an iterator of signed submissions in + /// arbitrary order. + /// + /// Note that if the iterator is dropped without consuming all elements, not all may be removed + /// from the underlying `SignedSubmissionsMap`, putting the storages into an invalid state. + /// + /// Note that, like `put`, this function consumes `Self` and modifies storage. + fn drain(mut self) -> impl Iterator> { + SignedSubmissionIndices::::kill(); + SignedSubmissionNextIndex::::kill(); + let insertion_overlay = sp_std::mem::take(&mut self.insertion_overlay); + SignedSubmissionsMap::::drain() + .filter(move |(k, _v)| !self.deletion_overlay.contains(k)) + .map(|(_k, v)| v) + .chain(insertion_overlay.into_iter().map(|(_k, v)| v)) + } + + /// Decode the length of the signed submissions without actually reading the entire struct into + /// memory. + /// + /// Note that if you hold an instance of `SignedSubmissions`, this function does _not_ + /// track its current length. This only decodes what is currently stored in memory. + pub fn decode_len() -> Option { + SignedSubmissionIndices::::decode_len() + } + + /// Insert a new signed submission into the set. + /// + /// In the event that the new submission is not better than the current weakest according + /// to `is_score_better`, we do not change anything. + pub fn insert(&mut self, submission: SignedSubmissionOf) -> InsertResult { + // verify the expectation that we never reuse an index + debug_assert!(!self.indices.values().any(|&idx| idx == self.next_idx)); + + let weakest = match self.indices.try_insert(submission.raw_solution.score, self.next_idx) { + Ok(Some(prev_idx)) => { + // a submission of equal score was already present in the set; + // no point editing the actual backing map as we know that the newer solution can't + // be better than the old. However, we do need to put the old value back. + self.indices + .try_insert(submission.raw_solution.score, prev_idx) + .expect("didn't change the map size; qed"); + return InsertResult::NotInserted + }, + Ok(None) => { + // successfully inserted into the set; no need to take out weakest member + None + }, + Err((insert_score, insert_idx)) => { + // could not insert into the set because it is full. + // note that we short-circuit return here in case the iteration produces `None`. + // If there wasn't a weakest entry to remove, then there must be a capacity of 0, + // which means that we can't meaningfully proceed. + let weakest_score = match self.indices.iter().next() { + None => return InsertResult::NotInserted, + Some((score, _)) => *score, + }; + let threshold = T::SolutionImprovementThreshold::get(); + + // if we haven't improved on the weakest score, don't change anything. + if !is_score_better(insert_score, weakest_score, threshold) { + return InsertResult::NotInserted + } + + self.swap_out_submission(weakest_score, Some((insert_score, insert_idx))) + }, + }; + + // we've taken out the weakest, so update the storage map and the next index + debug_assert!(!self.insertion_overlay.contains_key(&self.next_idx)); + self.insertion_overlay.insert(self.next_idx, submission); + debug_assert!(!self.deletion_overlay.contains(&self.next_idx)); + self.next_idx += 1; + match weakest { + Some(weakest) => InsertResult::InsertedEjecting(weakest), + None => InsertResult::Inserted, + } + } + + /// Remove the signed submission with the highest score from the set. + pub fn pop_last(&mut self) -> Option> { + let (score, _) = self.indices.iter().rev().next()?; + // deref in advance to prevent mutable-immutable borrow conflict + let score = *score; + self.swap_out_submission(score, None) + } +} + +impl Deref for SignedSubmissions { + type Target = SubmissionIndicesOf; + + fn deref(&self) -> &Self::Target { + &self.indices + } +} + +impl Pallet { + /// `Self` accessor for `SignedSubmission`. + pub fn signed_submissions() -> SignedSubmissions { + SignedSubmissions::::get() + } + + /// Finish the signed phase. Process the signed submissions from best to worse until a valid one + /// is found, rewarding the best one and slashing the invalid ones along the way. + /// + /// Returns true if we have a good solution in the signed phase. + /// + /// This drains the [`SignedSubmissions`], potentially storing the best valid one in + /// [`QueuedSolution`]. + /// + /// This is a *self-weighing* function, it automatically registers its weight internally when + /// being called. + pub fn finalize_signed_phase() -> bool { + let (weight, found_solution) = Self::finalize_signed_phase_internal(); + Self::register_weight(weight); + found_solution + } + + /// The guts of [`finalized_signed_phase`], that does everything except registering its weight. + pub(crate) fn finalize_signed_phase_internal() -> (Weight, bool) { + let mut all_submissions = Self::signed_submissions(); + let mut found_solution = false; + let mut weight = T::DbWeight::get().reads(1); + + let SolutionOrSnapshotSize { voters, targets } = + Self::snapshot_metadata().unwrap_or_default(); + + while let Some(best) = all_submissions.pop_last() { + let SignedSubmission { raw_solution, who, deposit, reward } = best; + let active_voters = raw_solution.solution.voter_count() as u32; + let feasibility_weight = { + // defensive only: at the end of signed phase, snapshot will exits. + let desired_targets = Self::desired_targets().unwrap_or_default(); + T::WeightInfo::feasibility_check(voters, targets, active_voters, desired_targets) + }; + // the feasibility check itself has some weight + weight = weight.saturating_add(feasibility_weight); + match Self::feasibility_check(raw_solution, ElectionCompute::Signed) { + Ok(ready_solution) => { + Self::finalize_signed_phase_accept_solution( + ready_solution, + &who, + deposit, + reward, + ); + found_solution = true; + + weight = weight + .saturating_add(T::WeightInfo::finalize_signed_phase_accept_solution()); + break + }, + Err(_) => { + Self::finalize_signed_phase_reject_solution(&who, deposit); + weight = weight + .saturating_add(T::WeightInfo::finalize_signed_phase_reject_solution()); + }, + } + } + + // Any unprocessed solution is pointless to even consider. Feasible or malicious, + // they didn't end up being used. Unreserve the bonds. + let discarded = all_submissions.len(); + for SignedSubmission { who, deposit, .. } in all_submissions.drain() { + let _remaining = T::Currency::unreserve(&who, deposit); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + debug_assert!(_remaining.is_zero()); + } + + debug_assert!(!SignedSubmissionIndices::::exists()); + debug_assert!(!SignedSubmissionNextIndex::::exists()); + debug_assert!(SignedSubmissionsMap::::iter().next().is_none()); + + log!( + debug, + "closed signed phase, found solution? {}, discarded {}", + found_solution, + discarded + ); + + (weight, found_solution) + } + /// Helper function for the case where a solution is accepted in the signed phase. + /// + /// Extracted to facilitate with weight calculation. + /// + /// Infallible + pub fn finalize_signed_phase_accept_solution( + ready_solution: ReadySolution, + who: &T::AccountId, + deposit: BalanceOf, + reward: BalanceOf, + ) { + // write this ready solution. + >::put(ready_solution); + + // emit reward event + Self::deposit_event(crate::Event::Rewarded(who.clone(), reward)); + + // unreserve deposit. + let _remaining = T::Currency::unreserve(who, deposit); + debug_assert!(_remaining.is_zero()); + + // Reward. + let positive_imbalance = T::Currency::deposit_creating(who, reward); + T::RewardHandler::on_unbalanced(positive_imbalance); + } + + /// Helper function for the case where a solution is accepted in the rejected phase. + /// + /// Extracted to facilitate with weight calculation. + /// + /// Infallible + pub fn finalize_signed_phase_reject_solution(who: &T::AccountId, deposit: BalanceOf) { + Self::deposit_event(crate::Event::Slashed(who.clone(), deposit)); + let (negative_imbalance, _remaining) = T::Currency::slash_reserved(who, deposit); + debug_assert!(_remaining.is_zero()); + T::SlashHandler::on_unbalanced(negative_imbalance); + } + + /// The feasibility weight of the given raw solution. + pub fn feasibility_weight_of( + raw_solution: &RawSolution>, + size: SolutionOrSnapshotSize, + ) -> Weight { + T::WeightInfo::feasibility_check( + size.voters, + size.targets, + raw_solution.solution.voter_count() as u32, + raw_solution.solution.unique_targets().len() as u32, + ) + } + + /// Collect a sufficient deposit to store this solution. + /// + /// The deposit is composed of 3 main elements: + /// + /// 1. base deposit, fixed for all submissions. + /// 2. a per-byte deposit, for renting the state usage. + /// 3. a per-weight deposit, for the potential weight usage in an upcoming on_initialize + pub fn deposit_for( + raw_solution: &RawSolution>, + size: SolutionOrSnapshotSize, + ) -> BalanceOf { + let encoded_len: u32 = raw_solution.encoded_size().saturated_into(); + let encoded_len: BalanceOf = encoded_len.into(); + let feasibility_weight = Self::feasibility_weight_of(raw_solution, size); + + let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len); + let weight_deposit = + T::SignedDepositWeight::get().saturating_mul(feasibility_weight.saturated_into()); + + T::SignedDepositBase::get() + .saturating_add(len_deposit) + .saturating_add(weight_deposit) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + mock::{ + balances, raw_solution, roll_to, ExtBuilder, MultiPhase, Origin, Runtime, + SignedMaxSubmissions, SignedMaxWeight, + }, + Error, Phase, + }; + use frame_support::{assert_noop, assert_ok, assert_storage_noop, dispatch::DispatchResult}; + + fn submit_with_witness( + origin: Origin, + solution: RawSolution>, + ) -> DispatchResult { + MultiPhase::submit( + origin, + Box::new(solution), + MultiPhase::signed_submissions().len() as u32, + ) + } + + #[test] + fn cannot_submit_too_early() { + ExtBuilder::default().build_and_execute(|| { + roll_to(2); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + // create a temp snapshot only for this test. + MultiPhase::create_snapshot().unwrap(); + let solution = raw_solution(); + + assert_noop!( + submit_with_witness(Origin::signed(10), solution), + Error::::PreDispatchEarlySubmission, + ); + }) + } + + #[test] + fn wrong_witness_fails() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + // submit this once correctly + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + assert_eq!(MultiPhase::signed_submissions().len(), 1); + + // now try and cheat by passing a lower queue length + assert_noop!( + MultiPhase::submit(Origin::signed(99), Box::new(solution), 0), + Error::::SignedInvalidWitness, + ); + }) + } + + #[test] + fn should_pay_deposit() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + assert_eq!(balances(&99), (100, 0)); + + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + + assert_eq!(balances(&99), (95, 5)); + assert_eq!(MultiPhase::signed_submissions().iter().next().unwrap().deposit, 5); + }) + } + + #[test] + fn good_solution_is_rewarded() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + assert_eq!(balances(&99), (100, 0)); + + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + assert_eq!(balances(&99), (95, 5)); + + assert!(MultiPhase::finalize_signed_phase()); + assert_eq!(balances(&99), (100 + 7 + 8, 0)); + }) + } + + #[test] + fn bad_solution_is_slashed() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(balances(&99), (100, 0)); + + // make the solution invalid. + solution.score[0] += 1; + + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + assert_eq!(balances(&99), (95, 5)); + + // no good solution was stored. + assert!(!MultiPhase::finalize_signed_phase()); + // and the bond is gone. + assert_eq!(balances(&99), (95, 0)); + }) + } + + #[test] + fn suppressed_solution_gets_bond_back() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(balances(&99), (100, 0)); + assert_eq!(balances(&999), (100, 0)); + + // submit as correct. + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + + // make the solution invalid and weaker. + solution.score[0] -= 1; + assert_ok!(submit_with_witness(Origin::signed(999), solution)); + assert_eq!(balances(&99), (95, 5)); + assert_eq!(balances(&999), (95, 5)); + + // _some_ good solution was stored. + assert!(MultiPhase::finalize_signed_phase()); + + // 99 is rewarded. + assert_eq!(balances(&99), (100 + 7 + 8, 0)); + // 999 gets everything back. + assert_eq!(balances(&999), (100, 0)); + }) + } + + #[test] + fn cannot_submit_worse_with_full_queue() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 0..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + // weaker. + let solution = RawSolution { score: [4, 0, 0], ..Default::default() }; + + assert_noop!( + submit_with_witness(Origin::signed(99), solution), + Error::::SignedQueueFull, + ); + }) + } + + #[test] + fn weakest_is_removed_if_better_provided() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 0..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.raw_solution.score[0]) + .collect::>(), + vec![5, 6, 7, 8, 9] + ); + + // better. + let solution = RawSolution { score: [20, 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + + // the one with score 5 was rejected, the new one inserted. + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.raw_solution.score[0]) + .collect::>(), + vec![6, 7, 8, 9, 20] + ); + }) + } + + #[test] + fn replace_weakest_works() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 1..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + let solution = RawSolution { score: [4, 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.raw_solution.score[0]) + .collect::>(), + vec![4, 6, 7, 8, 9], + ); + + // better. + let solution = RawSolution { score: [5, 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + + // the one with score 5 was rejected, the new one inserted. + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.raw_solution.score[0]) + .collect::>(), + vec![5, 6, 7, 8, 9], + ); + }) + } + + #[test] + fn early_ejected_solution_gets_bond_back() { + ExtBuilder::default().signed_deposit(2, 0, 0).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 0..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + assert_eq!(balances(&99).1, 2 * 5); + assert_eq!(balances(&999).1, 0); + + // better. + let solution = RawSolution { score: [20, 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(999), solution)); + + // got one bond back. + assert_eq!(balances(&99).1, 2 * 4); + assert_eq!(balances(&999).1, 2); + }) + } + + #[test] + fn equally_good_solution_is_not_accepted() { + ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for i in 0..SignedMaxSubmissions::get() { + let solution = RawSolution { score: [(5 + i).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| s.raw_solution.score[0]) + .collect::>(), + vec![5, 6, 7] + ); + + // 5 is not accepted. This will only cause processing with no benefit. + let solution = RawSolution { score: [5, 0, 0], ..Default::default() }; + assert_noop!( + submit_with_witness(Origin::signed(99), solution), + Error::::SignedQueueFull, + ); + }) + } + + #[test] + fn all_in_one_signed_submission_scenario() { + // a combination of: + // - good_solution_is_rewarded + // - bad_solution_is_slashed + // - suppressed_solution_gets_bond_back + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + assert_eq!(balances(&99), (100, 0)); + assert_eq!(balances(&999), (100, 0)); + assert_eq!(balances(&9999), (100, 0)); + let solution = raw_solution(); + + // submit a correct one. + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + + // make the solution invalidly better and submit. This ought to be slashed. + let mut solution_999 = solution.clone(); + solution_999.score[0] += 1; + assert_ok!(submit_with_witness(Origin::signed(999), solution_999)); + + // make the solution invalidly worse and submit. This ought to be suppressed and + // returned. + let mut solution_9999 = solution.clone(); + solution_9999.score[0] -= 1; + assert_ok!(submit_with_witness(Origin::signed(9999), solution_9999)); + + assert_eq!( + MultiPhase::signed_submissions().iter().map(|x| x.who).collect::>(), + vec![9999, 99, 999] + ); + + // _some_ good solution was stored. + assert!(MultiPhase::finalize_signed_phase()); + + // 99 is rewarded. + assert_eq!(balances(&99), (100 + 7 + 8, 0)); + // 999 is slashed. + assert_eq!(balances(&999), (95, 0)); + // 9999 gets everything back. + assert_eq!(balances(&9999), (100, 0)); + }) + } + + #[test] + fn cannot_consume_too_much_future_weight() { + ExtBuilder::default() + .signed_weight(40) + .mock_weight_info(true) + .build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let (raw, witness) = + MultiPhase::mine_solution::<::Solver>().unwrap(); + let solution_weight = ::WeightInfo::feasibility_check( + witness.voters, + witness.targets, + raw.solution.voter_count() as u32, + raw.solution.unique_targets().len() as u32, + ); + // default solution will have 5 edges (5 * 5 + 10) + assert_eq!(solution_weight, 35); + assert_eq!(raw.solution.voter_count(), 5); + assert_eq!(::SignedMaxWeight::get(), 40); + + assert_ok!(submit_with_witness(Origin::signed(99), raw.clone())); + + ::set(30); + + // note: resubmitting the same solution is technically okay as long as the queue has + // space. + assert_noop!( + submit_with_witness(Origin::signed(99), raw), + Error::::SignedTooMuchWeight, + ); + }) + } + + #[test] + fn insufficient_deposit_does_not_store_submission() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + + assert_eq!(balances(&123), (0, 0)); + assert_noop!( + submit_with_witness(Origin::signed(123), solution), + Error::::SignedCannotPayDeposit, + ); + + assert_eq!(balances(&123), (0, 0)); + }) + } + + // given a full queue, and a solution which _should_ be allowed in, but the proposer of this + // new solution has insufficient deposit, we should not modify storage at all + #[test] + fn insufficient_deposit_with_full_queue_works_properly() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for s in 0..SignedMaxSubmissions::get() { + // score is always getting better + let solution = RawSolution { score: [(5 + s).into(), 0, 0], ..Default::default() }; + assert_ok!(submit_with_witness(Origin::signed(99), solution)); + } + + // this solution has a higher score than any in the queue + let solution = RawSolution { + score: [(5 + SignedMaxSubmissions::get()).into(), 0, 0], + ..Default::default() + }; + + assert_eq!(balances(&123), (0, 0)); + assert_noop!( + submit_with_witness(Origin::signed(123), solution), + Error::::SignedCannotPayDeposit, + ); + + assert_eq!(balances(&123), (0, 0)); + }) + } + + #[test] + fn finalize_signed_phase_is_idempotent_given_no_submissions() { + ExtBuilder::default().build_and_execute(|| { + for block_number in 0..25 { + roll_to(block_number); + + assert_eq!(SignedSubmissions::::decode_len().unwrap_or_default(), 0); + assert_storage_noop!(MultiPhase::finalize_signed_phase_internal()); + } + }) + } + + #[test] + fn finalize_signed_phase_is_idempotent_given_submissions() { + ExtBuilder::default().build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + + // submit a correct one. + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + + // _some_ good solution was stored. + assert!(MultiPhase::finalize_signed_phase()); + + // calling it again doesn't change anything + assert_storage_noop!(MultiPhase::finalize_signed_phase()); + }) + } +} diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs new file mode 100644 index 0000000000000..af0b79177d86c --- /dev/null +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -0,0 +1,1556 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The unsigned phase, and its miner. + +use crate::{ + helpers, Call, Config, ElectionCompute, Error, FeasibilityError, Pallet, RawSolution, + ReadySolution, RoundSnapshot, SolutionAccuracyOf, SolutionOf, SolutionOrSnapshotSize, Weight, + WeightInfo, +}; +use codec::Encode; +use frame_election_provider_support::{NposSolver, PerThing128}; +use frame_support::{dispatch::DispatchResult, ensure, traits::Get}; +use frame_system::offchain::SubmitTransaction; +use sp_arithmetic::Perbill; +use sp_npos_elections::{ + assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, is_score_better, + ElectionResult, NposSolution, +}; +use sp_runtime::{ + offchain::storage::{MutateStorageError, StorageValueRef}, + DispatchError, SaturatedConversion, +}; +use sp_std::{boxed::Box, cmp::Ordering, convert::TryFrom, vec::Vec}; + +/// Storage key used to store the last block number at which offchain worker ran. +pub(crate) const OFFCHAIN_LAST_BLOCK: &[u8] = b"parity/multi-phase-unsigned-election"; +/// Storage key used to store the offchain worker running status. +pub(crate) const OFFCHAIN_LOCK: &[u8] = b"parity/multi-phase-unsigned-election/lock"; + +/// Storage key used to cache the solution `call`. +pub(crate) const OFFCHAIN_CACHED_CALL: &[u8] = b"parity/multi-phase-unsigned-election/call"; + +/// A voter's fundamental data: their ID, their stake, and the list of candidates for whom they +/// voted. +pub type Voter = ( + ::AccountId, + sp_npos_elections::VoteWeight, + Vec<::AccountId>, +); + +/// The relative distribution of a voter's stake among the winning targets. +pub type Assignment = + sp_npos_elections::Assignment<::AccountId, SolutionAccuracyOf>; + +/// The [`IndexAssignment`][sp_npos_elections::IndexAssignment] type specialized for a particular +/// runtime `T`. +pub type IndexAssignmentOf = sp_npos_elections::IndexAssignmentOf>; + +/// Error type of the pallet's [`crate::Config::Solver`]. +pub type SolverErrorOf = <::Solver as NposSolver>::Error; +/// Error type for operations related to the OCW npos solution miner. +#[derive(frame_support::DebugNoBound, frame_support::PartialEqNoBound)] +pub enum MinerError { + /// An internal error in the NPoS elections crate. + NposElections(sp_npos_elections::Error), + /// Snapshot data was unavailable unexpectedly. + SnapshotUnAvailable, + /// Submitting a transaction to the pool failed. + PoolSubmissionFailed, + /// The pre-dispatch checks failed for the mined solution. + PreDispatchChecksFailed(DispatchError), + /// The solution generated from the miner is not feasible. + Feasibility(FeasibilityError), + /// Something went wrong fetching the lock. + Lock(&'static str), + /// Cannot restore a solution that was not stored. + NoStoredSolution, + /// Cached solution is not a `submit_unsigned` call. + SolutionCallInvalid, + /// Failed to store a solution. + FailedToStoreSolution, + /// There are no more voters to remove to trim the solution. + NoMoreVoters, + /// An error from the solver. + Solver(SolverErrorOf), +} + +impl From for MinerError { + fn from(e: sp_npos_elections::Error) -> Self { + MinerError::NposElections(e) + } +} + +impl From for MinerError { + fn from(e: FeasibilityError) -> Self { + MinerError::Feasibility(e) + } +} + +/// Save a given call into OCW storage. +fn save_solution(call: &Call) -> Result<(), MinerError> { + log!(debug, "saving a call to the offchain storage."); + let storage = StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL); + match storage.mutate::<_, (), _>(|_| Ok(call.clone())) { + Ok(_) => Ok(()), + Err(MutateStorageError::ConcurrentModification(_)) => + Err(MinerError::FailedToStoreSolution), + Err(MutateStorageError::ValueFunctionFailed(_)) => { + // this branch should be unreachable according to the definition of + // `StorageValueRef::mutate`: that function should only ever `Err` if the closure we + // pass it returns an error. however, for safety in case the definition changes, we do + // not optimize the branch away or panic. + Err(MinerError::FailedToStoreSolution) + }, + } +} + +/// Get a saved solution from OCW storage if it exists. +fn restore_solution() -> Result, MinerError> { + StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL) + .get() + .ok() + .flatten() + .ok_or(MinerError::NoStoredSolution) +} + +/// Clear a saved solution from OCW storage. +pub(super) fn kill_ocw_solution() { + log!(debug, "clearing offchain call cache storage."); + let mut storage = StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL); + storage.clear(); +} + +/// Clear the offchain repeat storage. +/// +/// After calling this, the next offchain worker is guaranteed to work, with respect to the +/// frequency repeat. +fn clear_offchain_repeat_frequency() { + let mut last_block = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); + last_block.clear(); +} + +/// `true` when OCW storage contains a solution +#[cfg(test)] +fn ocw_solution_exists() -> bool { + matches!(StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL).get::>(), Ok(Some(_))) +} + +impl Pallet { + /// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way, submit + /// if our call's score is greater than that of the cached solution. + pub fn restore_or_compute_then_maybe_submit() -> Result<(), MinerError> { + log!(debug, "miner attempting to restore or compute an unsigned solution."); + + let call = restore_solution::() + .and_then(|call| { + // ensure the cached call is still current before submitting + if let Call::submit_unsigned { raw_solution, .. } = &call { + // prevent errors arising from state changes in a forkful chain + Self::basic_checks(raw_solution, "restored")?; + Ok(call) + } else { + Err(MinerError::SolutionCallInvalid) + } + }) + .or_else::, _>(|error| { + log!(debug, "restoring solution failed due to {:?}", error); + match error { + MinerError::NoStoredSolution => { + log!(trace, "mining a new solution."); + // if not present or cache invalidated due to feasibility, regenerate. + // note that failing `Feasibility` can only mean that the solution was + // computed over a snapshot that has changed due to a fork. + let call = Self::mine_checked_call()?; + save_solution(&call)?; + Ok(call) + }, + MinerError::Feasibility(_) => { + log!(trace, "wiping infeasible solution."); + // kill the infeasible solution, hopefully in the next runs (whenever they + // may be) we mine a new one. + kill_ocw_solution::(); + clear_offchain_repeat_frequency(); + Err(error) + }, + _ => { + // nothing to do. Return the error as-is. + Err(error) + }, + } + })?; + + Self::submit_call(call) + } + + /// Mine a new solution, cache it, and submit it back to the chain as an unsigned transaction. + pub fn mine_check_save_submit() -> Result<(), MinerError> { + log!(debug, "miner attempting to compute an unsigned solution."); + + let call = Self::mine_checked_call()?; + save_solution(&call)?; + Self::submit_call(call) + } + + /// Mine a new solution as a call. Performs all checks. + pub fn mine_checked_call() -> Result, MinerError> { + // get the solution, with a load of checks to ensure if submitted, IT IS ABSOLUTELY VALID. + let (raw_solution, witness) = Self::mine_and_check()?; + + let score = raw_solution.score.clone(); + let call: Call = + Call::submit_unsigned { raw_solution: Box::new(raw_solution), witness }.into(); + + log!( + debug, + "mined a solution with score {:?} and size {}", + score, + call.using_encoded(|b| b.len()) + ); + + Ok(call) + } + + fn submit_call(call: Call) -> Result<(), MinerError> { + log!(debug, "miner submitting a solution as an unsigned transaction"); + + SubmitTransaction::>::submit_unsigned_transaction(call.into()) + .map_err(|_| MinerError::PoolSubmissionFailed) + } + + // perform basic checks of a solution's validity + // + // Performance: note that it internally clones the provided solution. + pub fn basic_checks( + raw_solution: &RawSolution>, + solution_type: &str, + ) -> Result<(), MinerError> { + Self::unsigned_pre_dispatch_checks(raw_solution).map_err(|err| { + log!(debug, "pre-dispatch checks failed for {} solution: {:?}", solution_type, err); + MinerError::PreDispatchChecksFailed(err) + })?; + + Self::feasibility_check(raw_solution.clone(), ElectionCompute::Unsigned).map_err( + |err| { + log!(debug, "feasibility check failed for {} solution: {:?}", solution_type, err); + err + }, + )?; + + Ok(()) + } + + /// Mine a new npos solution, with all the relevant checks to make sure that it will be accepted + /// to the chain. + /// + /// If you want an unchecked solution, use [`Pallet::mine_solution`]. + /// If you want a checked solution and submit it at the same time, use + /// [`Pallet::mine_check_save_submit`]. + pub fn mine_and_check( + ) -> Result<(RawSolution>, SolutionOrSnapshotSize), MinerError> { + let (raw_solution, witness) = Self::mine_solution::()?; + Self::basic_checks(&raw_solution, "mined")?; + Ok((raw_solution, witness)) + } + + /// Mine a new npos solution. + /// + /// The Npos Solver type, `S`, must have the same AccountId and Error type as the + /// [`crate::Config::Solver`] in order to create a unified return type. + pub fn mine_solution( + ) -> Result<(RawSolution>, SolutionOrSnapshotSize), MinerError> + where + S: NposSolver>, + { + let RoundSnapshot { voters, targets } = + Self::snapshot().ok_or(MinerError::SnapshotUnAvailable)?; + let desired_targets = Self::desired_targets().ok_or(MinerError::SnapshotUnAvailable)?; + + S::solve(desired_targets as usize, targets, voters) + .map_err(|e| MinerError::Solver::(e)) + .and_then(|e| Self::prepare_election_result::(e)) + } + + /// Convert a raw solution from [`sp_npos_elections::ElectionResult`] to [`RawSolution`], which + /// is ready to be submitted to the chain. + /// + /// Will always reduce the solution as well. + pub fn prepare_election_result( + election_result: ElectionResult, + ) -> Result<(RawSolution>, SolutionOrSnapshotSize), MinerError> { + // NOTE: This code path is generally not optimized as it is run offchain. Could use some at + // some point though. + + // storage items. Note: we have already read this from storage, they must be in cache. + let RoundSnapshot { voters, targets } = + Self::snapshot().ok_or(MinerError::SnapshotUnAvailable)?; + let desired_targets = Self::desired_targets().ok_or(MinerError::SnapshotUnAvailable)?; + + // now make some helper closures. + let cache = helpers::generate_voter_cache::(&voters); + let voter_index = helpers::voter_index_fn::(&cache); + let target_index = helpers::target_index_fn::(&targets); + let voter_at = helpers::voter_at_fn::(&voters); + let target_at = helpers::target_at_fn::(&targets); + let stake_of = helpers::stake_of_fn::(&voters, &cache); + + // Compute the size of a solution comprised of the selected arguments. + // + // This function completes in `O(edges)`; it's expensive, but linear. + let encoded_size_of = |assignments: &[IndexAssignmentOf]| { + SolutionOf::::try_from(assignments).map(|s| s.encoded_size()) + }; + + let ElectionResult { assignments, winners: _ } = election_result; + + // Reduce (requires round-trip to staked form) + let sorted_assignments = { + // convert to staked and reduce. + let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; + + // we reduce before sorting in order to ensure that the reduction process doesn't + // accidentally change the sort order + sp_npos_elections::reduce(&mut staked); + + // Sort the assignments by reversed voter stake. This ensures that we can efficiently + // truncate the list. + staked.sort_by_key( + |sp_npos_elections::StakedAssignment:: { who, .. }| { + // though staked assignments are expressed in terms of absolute stake, we'd + // still need to iterate over all votes in order to actually compute the total + // stake. it should be faster to look it up from the cache. + let stake = cache + .get(who) + .map(|idx| { + let (_, stake, _) = voters[*idx]; + stake + }) + .unwrap_or_default(); + sp_std::cmp::Reverse(stake) + }, + ); + + // convert back. + assignment_staked_to_ratio_normalized(staked)? + }; + + // convert to `IndexAssignment`. This improves the runtime complexity of repeatedly + // converting to `Solution`. + let mut index_assignments = sorted_assignments + .into_iter() + .map(|assignment| IndexAssignmentOf::::new(&assignment, &voter_index, &target_index)) + .collect::, _>>()?; + + // trim assignments list for weight and length. + let size = + SolutionOrSnapshotSize { voters: voters.len() as u32, targets: targets.len() as u32 }; + Self::trim_assignments_weight( + desired_targets, + size, + T::MinerMaxWeight::get(), + &mut index_assignments, + ); + Self::trim_assignments_length( + T::MinerMaxLength::get(), + &mut index_assignments, + &encoded_size_of, + )?; + + // now make solution. + let solution = SolutionOf::::try_from(&index_assignments)?; + + // re-calc score. + let score = solution.clone().score(stake_of, voter_at, target_at)?; + + let round = Self::round(); + Ok((RawSolution { solution, score, round }, size)) + } + + /// Greedily reduce the size of the solution to fit into the block w.r.t. weight. + /// + /// The weight of the solution is foremost a function of the number of voters (i.e. + /// `assignments.len()`). Aside from this, the other components of the weight are invariant. The + /// number of winners shall not be changed (otherwise the solution is invalid) and the + /// `ElectionSize` is merely a representation of the total number of stakers. + /// + /// Thus, we reside to stripping away some voters from the `assignments`. + /// + /// Note that the solution is already computed, and the winners are elected based on the merit + /// of the entire stake in the system. Nonetheless, some of the voters will be removed further + /// down the line. + /// + /// Indeed, the score must be computed **after** this step. If this step reduces the score too + /// much or remove a winner, then the solution must be discarded **after** this step. + pub fn trim_assignments_weight( + desired_targets: u32, + size: SolutionOrSnapshotSize, + max_weight: Weight, + assignments: &mut Vec>, + ) { + let maximum_allowed_voters = + Self::maximum_voter_for_weight::(desired_targets, size, max_weight); + let removing: usize = + assignments.len().saturating_sub(maximum_allowed_voters.saturated_into()); + log!( + debug, + "from {} assignments, truncating to {} for weight, removing {}", + assignments.len(), + maximum_allowed_voters, + removing, + ); + assignments.truncate(maximum_allowed_voters as usize); + } + + /// Greedily reduce the size of the solution to fit into the block w.r.t length. + /// + /// The length of the solution is largely a function of the number of voters. The number of + /// winners cannot be changed. Thus, to reduce the solution size, we need to strip voters. + /// + /// Note that this solution is already computed, and winners are elected based on the merit of + /// the total stake in the system. Nevertheless, some of the voters may be removed here. + /// + /// Sometimes, removing a voter can cause a validator to also be implicitly removed, if + /// that voter was the only backer of that winner. In such cases, this solution is invalid, + /// which will be caught prior to submission. + /// + /// The score must be computed **after** this step. If this step reduces the score too much, + /// then the solution must be discarded. + pub fn trim_assignments_length( + max_allowed_length: u32, + assignments: &mut Vec>, + encoded_size_of: impl Fn(&[IndexAssignmentOf]) -> Result, + ) -> Result<(), MinerError> { + // Perform a binary search for the max subset of which can fit into the allowed + // length. Having discovered that, we can truncate efficiently. + let max_allowed_length: usize = max_allowed_length.saturated_into(); + let mut high = assignments.len(); + let mut low = 0; + + // not much we can do if assignments are already empty. + if high == low { + return Ok(()) + } + + while high - low > 1 { + let test = (high + low) / 2; + if encoded_size_of(&assignments[..test])? <= max_allowed_length { + low = test; + } else { + high = test; + } + } + let maximum_allowed_voters = if low < assignments.len() && + encoded_size_of(&assignments[..low + 1])? <= max_allowed_length + { + low + 1 + } else { + low + }; + + // ensure our post-conditions are correct + debug_assert!( + encoded_size_of(&assignments[..maximum_allowed_voters]).unwrap() <= max_allowed_length + ); + debug_assert!(if maximum_allowed_voters < assignments.len() { + encoded_size_of(&assignments[..maximum_allowed_voters + 1]).unwrap() > + max_allowed_length + } else { + true + }); + + // NOTE: before this point, every access was immutable. + // after this point, we never error. + // check before edit. + + log!( + debug, + "from {} assignments, truncating to {} for length, removing {}", + assignments.len(), + maximum_allowed_voters, + assignments.len().saturating_sub(maximum_allowed_voters), + ); + assignments.truncate(maximum_allowed_voters); + + Ok(()) + } + + /// Find the maximum `len` that a solution can have in order to fit into the block weight. + /// + /// This only returns a value between zero and `size.nominators`. + pub fn maximum_voter_for_weight( + desired_winners: u32, + size: SolutionOrSnapshotSize, + max_weight: Weight, + ) -> u32 { + if size.voters < 1 { + return size.voters + } + + let max_voters = size.voters.max(1); + let mut voters = max_voters; + + // helper closures. + let weight_with = |active_voters: u32| -> Weight { + W::submit_unsigned(size.voters, size.targets, active_voters, desired_winners) + }; + + let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { + match current_weight.cmp(&max_weight) { + Ordering::Less => { + let next_voters = voters.checked_add(step); + match next_voters { + Some(voters) if voters < max_voters => Ok(voters), + _ => Err(()), + } + }, + Ordering::Greater => voters.checked_sub(step).ok_or(()), + Ordering::Equal => Ok(voters), + } + }; + + // First binary-search the right amount of voters + let mut step = voters / 2; + let mut current_weight = weight_with(voters); + + while step > 0 { + match next_voters(current_weight, voters, step) { + // proceed with the binary search + Ok(next) if next != voters => { + voters = next; + }, + // we are out of bounds, break out of the loop. + Err(()) => break, + // we found the right value - early exit the function. + Ok(next) => return next, + } + step = step / 2; + current_weight = weight_with(voters); + } + + // Time to finish. We might have reduced less than expected due to rounding error. Increase + // one last time if we have any room left, the reduce until we are sure we are below limit. + while voters + 1 <= max_voters && weight_with(voters + 1) < max_weight { + voters += 1; + } + while voters.checked_sub(1).is_some() && weight_with(voters) > max_weight { + voters -= 1; + } + + let final_decision = voters.min(size.voters); + debug_assert!( + weight_with(final_decision) <= max_weight, + "weight_with({}) <= {}", + final_decision, + max_weight, + ); + final_decision + } + + /// Checks if an execution of the offchain worker is permitted at the given block number, or + /// not. + /// + /// This makes sure that + /// 1. we don't run on previous blocks in case of a re-org + /// 2. we don't run twice within a window of length `T::OffchainRepeat`. + /// + /// Returns `Ok(())` if offchain worker limit is respected, `Err(reason)` otherwise. If `Ok()` + /// is returned, `now` is written in storage and will be used in further calls as the baseline. + pub fn ensure_offchain_repeat_frequency(now: T::BlockNumber) -> Result<(), MinerError> { + let threshold = T::OffchainRepeat::get(); + let last_block = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); + + let mutate_stat = last_block.mutate::<_, &'static str, _>( + |maybe_head: Result, _>| { + match maybe_head { + Ok(Some(head)) if now < head => Err("fork."), + Ok(Some(head)) if now >= head && now <= head + threshold => + Err("recently executed."), + Ok(Some(head)) if now > head + threshold => { + // we can run again now. Write the new head. + Ok(now) + }, + _ => { + // value doesn't exists. Probably this node just booted up. Write, and run + Ok(now) + }, + } + }, + ); + + match mutate_stat { + // all good + Ok(_) => Ok(()), + // failed to write. + Err(MutateStorageError::ConcurrentModification(_)) => + Err(MinerError::Lock("failed to write to offchain db (concurrent modification).")), + // fork etc. + Err(MutateStorageError::ValueFunctionFailed(why)) => Err(MinerError::Lock(why)), + } + } + + /// Do the basics checks that MUST happen during the validation and pre-dispatch of an unsigned + /// transaction. + /// + /// Can optionally also be called during dispatch, if needed. + /// + /// NOTE: Ideally, these tests should move more and more outside of this and more to the miner's + /// code, so that we do less and less storage reads here. + pub fn unsigned_pre_dispatch_checks( + raw_solution: &RawSolution>, + ) -> DispatchResult { + // ensure solution is timely. Don't panic yet. This is a cheap check. + ensure!(Self::current_phase().is_unsigned_open(), Error::::PreDispatchEarlySubmission); + + // ensure round is current + ensure!(Self::round() == raw_solution.round, Error::::OcwCallWrongEra); + + // ensure correct number of winners. + ensure!( + Self::desired_targets().unwrap_or_default() == + raw_solution.solution.unique_targets().len() as u32, + Error::::PreDispatchWrongWinnerCount, + ); + + // ensure score is being improved. Panic henceforth. + ensure!( + Self::queued_solution().map_or(true, |q: ReadySolution<_>| is_score_better::( + raw_solution.score, + q.score, + T::SolutionImprovementThreshold::get() + )), + Error::::PreDispatchWeakSubmission, + ); + + Ok(()) + } +} + +#[cfg(test)] +mod max_weight { + #![allow(unused_variables)] + use super::*; + use crate::mock::MultiPhase; + + struct TestWeight; + impl crate::weights::WeightInfo for TestWeight { + fn elect_queued(a: u32, d: u32) -> Weight { + unreachable!() + } + fn create_snapshot_internal() -> Weight { + unreachable!() + } + fn on_initialize_nothing() -> Weight { + unreachable!() + } + fn on_initialize_open_signed() -> Weight { + unreachable!() + } + fn on_initialize_open_unsigned() -> Weight { + unreachable!() + } + fn finalize_signed_phase_accept_solution() -> Weight { + unreachable!() + } + fn finalize_signed_phase_reject_solution() -> Weight { + unreachable!() + } + fn submit(c: u32) -> Weight { + unreachable!() + } + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32) -> Weight { + (0 * v + 0 * t + 1000 * a + 0 * d) as Weight + } + fn feasibility_check(v: u32, _t: u32, a: u32, d: u32) -> Weight { + unreachable!() + } + } + + #[test] + fn find_max_voter_binary_search_works() { + let w = SolutionOrSnapshotSize { voters: 10, targets: 0 }; + + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 0), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 999), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1000), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1001), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1990), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1999), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2000), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2001), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2010), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2990), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2999), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 3000), 3); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 3333), 3); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 5500), 5); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 7777), 7); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 9999), 9); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 10_000), 10); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 10_999), 10); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 11_000), 10); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 22_000), 10); + + let w = SolutionOrSnapshotSize { voters: 1, targets: 0 }; + + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 0), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 999), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1000), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1001), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1990), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1999), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2000), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2001), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2010), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 3333), 1); + + let w = SolutionOrSnapshotSize { voters: 2, targets: 0 }; + + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 0), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 999), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1000), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1001), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1999), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2000), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2001), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2010), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 3333), 2); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + mock::{ + roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, Call as OuterCall, + ExtBuilder, Extrinsic, MinerMaxWeight, MultiPhase, Origin, Runtime, System, + TestNposSolution, TrimHelpers, UnsignedPhase, + }, + CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, + TransactionValidityError, + }; + use codec::Decode; + use frame_benchmarking::Zero; + use frame_support::{assert_noop, assert_ok, dispatch::Dispatchable, traits::OffchainWorker}; + use sp_npos_elections::IndexAssignment; + use sp_runtime::{ + offchain::storage_lock::{BlockAndTime, StorageLock}, + traits::ValidateUnsigned, + PerU16, + }; + + type Assignment = crate::unsigned::Assignment; + + #[test] + fn validate_unsigned_retracts_wrong_phase() { + ExtBuilder::default().desired_targets(0).build_and_execute(|| { + let solution = + RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned { + raw_solution: Box::new(solution.clone()), + witness: witness(), + }; + + // initial + assert_eq!(MultiPhase::current_phase(), Phase::Off); + assert!(matches!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + + // signed + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert!(matches!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + + // unsigned + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + assert!(::validate_unsigned( + TransactionSource::Local, + &call + ) + .is_ok()); + assert!(::pre_dispatch(&call).is_ok()); + + // unsigned -- but not enabled. + >::put(Phase::Unsigned((false, 25))); + assert!(MultiPhase::current_phase().is_unsigned()); + assert!(matches!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + }) + } + + #[test] + fn validate_unsigned_retracts_low_score() { + ExtBuilder::default().desired_targets(0).build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + let solution = + RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned { + raw_solution: Box::new(solution.clone()), + witness: witness(), + }; + + // initial + assert!(::validate_unsigned( + TransactionSource::Local, + &call + ) + .is_ok()); + assert!(::pre_dispatch(&call).is_ok()); + + // set a better score + let ready = ReadySolution { score: [10, 0, 0], ..Default::default() }; + >::put(ready); + + // won't work anymore. + assert!(matches!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(2)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(2)) + )); + }) + } + + #[test] + fn validate_unsigned_retracts_incorrect_winner_count() { + ExtBuilder::default().desired_targets(1).build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + let raw = RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = + Call::submit_unsigned { raw_solution: Box::new(raw.clone()), witness: witness() }; + assert_eq!(raw.solution.unique_targets().len(), 0); + + // won't work anymore. + assert!(matches!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(1)) + )); + }) + } + + #[test] + fn priority_is_set() { + ExtBuilder::default() + .miner_tx_priority(20) + .desired_targets(0) + .build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + let solution = + RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned { + raw_solution: Box::new(solution.clone()), + witness: witness(), + }; + + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap() + .priority, + 25 + ); + }) + } + + #[test] + #[should_panic(expected = "Invalid unsigned submission must produce invalid block and \ + deprive validator from their authoring reward.: \ + Module { index: 2, error: 1, message: \ + Some(\"PreDispatchWrongWinnerCount\") }")] + fn unfeasible_solution_panics() { + ExtBuilder::default().build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // This is in itself an invalid BS solution. + let solution = + RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned { + raw_solution: Box::new(solution.clone()), + witness: witness(), + }; + let outer_call: OuterCall = call.into(); + let _ = outer_call.dispatch(Origin::none()); + }) + } + + #[test] + #[should_panic(expected = "Invalid unsigned submission must produce invalid block and \ + deprive validator from their authoring reward.")] + fn wrong_witness_panics() { + ExtBuilder::default().build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // This solution is unfeasible as well, but we won't even get there. + let solution = + RawSolution:: { score: [5, 0, 0], ..Default::default() }; + + let mut correct_witness = witness(); + correct_witness.voters += 1; + correct_witness.targets -= 1; + let call = Call::submit_unsigned { + raw_solution: Box::new(solution.clone()), + witness: correct_witness, + }; + let outer_call: OuterCall = call.into(); + let _ = outer_call.dispatch(Origin::none()); + }) + } + + #[test] + fn miner_works() { + ExtBuilder::default().build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // ensure we have snapshots in place. + assert!(MultiPhase::snapshot().is_some()); + assert_eq!(MultiPhase::desired_targets().unwrap(), 2); + + // mine seq_phragmen solution with 2 iters. + let (solution, witness) = + MultiPhase::mine_solution::<::Solver>().unwrap(); + + // ensure this solution is valid. + assert!(MultiPhase::queued_solution().is_none()); + assert_ok!(MultiPhase::submit_unsigned(Origin::none(), Box::new(solution), witness)); + assert!(MultiPhase::queued_solution().is_some()); + }) + } + + #[test] + fn miner_trims_weight() { + ExtBuilder::default() + .miner_weight(100) + .mock_weight_info(true) + .build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + let (raw, witness) = + MultiPhase::mine_solution::<::Solver>().unwrap(); + let solution_weight = ::WeightInfo::submit_unsigned( + witness.voters, + witness.targets, + raw.solution.voter_count() as u32, + raw.solution.unique_targets().len() as u32, + ); + // default solution will have 5 edges (5 * 5 + 10) + assert_eq!(solution_weight, 35); + assert_eq!(raw.solution.voter_count(), 5); + + // now reduce the max weight + ::set(25); + + let (raw, witness) = + MultiPhase::mine_solution::<::Solver>().unwrap(); + let solution_weight = ::WeightInfo::submit_unsigned( + witness.voters, + witness.targets, + raw.solution.voter_count() as u32, + raw.solution.unique_targets().len() as u32, + ); + // default solution will have 5 edges (5 * 5 + 10) + assert_eq!(solution_weight, 25); + assert_eq!(raw.solution.voter_count(), 3); + }) + } + + #[test] + fn miner_will_not_submit_if_not_enough_winners() { + let (mut ext, _) = ExtBuilder::default().desired_targets(8).build_offchainify(0); + ext.execute_with(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + assert_eq!( + MultiPhase::mine_check_save_submit().unwrap_err(), + MinerError::PreDispatchChecksFailed(DispatchError::Module { + index: 2, + error: 1, + message: Some("PreDispatchWrongWinnerCount"), + }), + ); + }) + } + + #[test] + fn unsigned_per_dispatch_checks_can_only_submit_threshold_better() { + ExtBuilder::default() + .desired_targets(1) + .add_voter(7, 2, vec![10]) + .add_voter(8, 5, vec![10]) + .solution_improvement_threshold(Perbill::from_percent(50)) + .build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + assert_eq!(MultiPhase::desired_targets().unwrap(), 1); + + // an initial solution + let result = ElectionResult { + // note: This second element of backing stake is not important here. + winners: vec![(10, 10)], + assignments: vec![Assignment { + who: 10, + distribution: vec![(10, PerU16::one())], + }], + }; + let (solution, witness) = MultiPhase::prepare_election_result(result).unwrap(); + assert_ok!(MultiPhase::unsigned_pre_dispatch_checks(&solution)); + assert_ok!(MultiPhase::submit_unsigned( + Origin::none(), + Box::new(solution), + witness + )); + assert_eq!(MultiPhase::queued_solution().unwrap().score[0], 10); + + // trial 1: a solution who's score is only 2, i.e. 20% better in the first element. + let result = ElectionResult { + winners: vec![(10, 12)], + assignments: vec![ + Assignment { who: 10, distribution: vec![(10, PerU16::one())] }, + Assignment { + who: 7, + // note: this percent doesn't even matter, in solution it is 100%. + distribution: vec![(10, PerU16::one())], + }, + ], + }; + let (solution, _) = MultiPhase::prepare_election_result(result).unwrap(); + // 12 is not 50% more than 10 + assert_eq!(solution.score[0], 12); + assert_noop!( + MultiPhase::unsigned_pre_dispatch_checks(&solution), + Error::::PreDispatchWeakSubmission, + ); + // submitting this will actually panic. + + // trial 2: a solution who's score is only 7, i.e. 70% better in the first element. + let result = ElectionResult { + winners: vec![(10, 12)], + assignments: vec![ + Assignment { who: 10, distribution: vec![(10, PerU16::one())] }, + Assignment { who: 7, distribution: vec![(10, PerU16::one())] }, + Assignment { + who: 8, + // note: this percent doesn't even matter, in solution it is 100%. + distribution: vec![(10, PerU16::one())], + }, + ], + }; + let (solution, witness) = MultiPhase::prepare_election_result(result).unwrap(); + assert_eq!(solution.score[0], 17); + + // and it is fine + assert_ok!(MultiPhase::unsigned_pre_dispatch_checks(&solution)); + assert_ok!(MultiPhase::submit_unsigned( + Origin::none(), + Box::new(solution), + witness + )); + }) + } + + #[test] + fn ocw_lock_prevents_frequent_execution() { + let (mut ext, _) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + let offchain_repeat = ::OffchainRepeat::get(); + + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // first execution -- okay. + assert!(MultiPhase::ensure_offchain_repeat_frequency(25).is_ok()); + + // next block: rejected. + assert_noop!( + MultiPhase::ensure_offchain_repeat_frequency(26), + MinerError::Lock("recently executed.") + ); + + // allowed after `OFFCHAIN_REPEAT` + assert!( + MultiPhase::ensure_offchain_repeat_frequency((26 + offchain_repeat).into()).is_ok() + ); + + // a fork like situation: re-execute last 3. + assert!(MultiPhase::ensure_offchain_repeat_frequency( + (26 + offchain_repeat - 3).into() + ) + .is_err()); + assert!(MultiPhase::ensure_offchain_repeat_frequency( + (26 + offchain_repeat - 2).into() + ) + .is_err()); + assert!(MultiPhase::ensure_offchain_repeat_frequency( + (26 + offchain_repeat - 1).into() + ) + .is_err()); + }) + } + + #[test] + fn ocw_lock_released_after_successful_execution() { + // first, ensure that a successful execution releases the lock + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + let guard = StorageValueRef::persistent(&OFFCHAIN_LOCK); + let last_block = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); + + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // initially, the lock is not set. + assert!(guard.get::().unwrap().is_none()); + + // a successful a-z execution. + MultiPhase::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 1); + + // afterwards, the lock is not set either.. + assert!(guard.get::().unwrap().is_none()); + assert_eq!(last_block.get::().unwrap(), Some(25)); + }); + } + + #[test] + fn ocw_lock_prevents_overlapping_execution() { + // ensure that if the guard is in hold, a new execution is not allowed. + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // artificially set the value, as if another thread is mid-way. + let mut lock = StorageLock::>::with_block_deadline( + OFFCHAIN_LOCK, + UnsignedPhase::get().saturated_into(), + ); + let guard = lock.lock(); + + // nothing submitted. + MultiPhase::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 0); + MultiPhase::offchain_worker(26); + assert_eq!(pool.read().transactions.len(), 0); + + drop(guard); + + // 🎉 ! + MultiPhase::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 1); + }); + } + + #[test] + fn ocw_only_runs_when_unsigned_open_now() { + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + + // we must clear the offchain storage to ensure the offchain execution check doesn't get + // in the way. + let mut storage = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); + + MultiPhase::offchain_worker(24); + assert!(pool.read().transactions.len().is_zero()); + storage.clear(); + + // creates, caches, submits without expecting previous cache value + MultiPhase::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 1); + // assume that the tx has been processed + pool.try_write().unwrap().transactions.clear(); + + // locked, but also, has previously cached. + MultiPhase::offchain_worker(26); + assert!(pool.read().transactions.len().is_zero()); + }) + } + + #[test] + fn ocw_clears_cache_after_election() { + let (mut ext, _pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + + // we must clear the offchain storage to ensure the offchain execution check doesn't get + // in the way. + let mut storage = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); + storage.clear(); + + assert!( + !ocw_solution_exists::(), + "no solution should be present before we mine one", + ); + + // creates and cache a solution + MultiPhase::offchain_worker(25); + assert!( + ocw_solution_exists::(), + "a solution must be cached after running the worker", + ); + + // after an election, the solution must be cleared + // we don't actually care about the result of the election + roll_to(26); + let _ = MultiPhase::do_elect(); + MultiPhase::offchain_worker(26); + assert!(!ocw_solution_exists::(), "elections must clear the ocw cache"); + }) + } + + #[test] + fn ocw_resubmits_after_offchain_repeat() { + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + const BLOCK: u64 = 25; + let block_plus = |delta: i32| ((BLOCK as i32) + delta) as u64; + let offchain_repeat = ::OffchainRepeat::get(); + + roll_to(BLOCK); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, BLOCK))); + + // we must clear the offchain storage to ensure the offchain execution check doesn't get + // in the way. + let mut storage = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); + + MultiPhase::offchain_worker(block_plus(-1)); + assert!(pool.read().transactions.len().is_zero()); + storage.clear(); + + // creates, caches, submits without expecting previous cache value + MultiPhase::offchain_worker(BLOCK); + assert_eq!(pool.read().transactions.len(), 1); + let tx_cache = pool.read().transactions[0].clone(); + // assume that the tx has been processed + pool.try_write().unwrap().transactions.clear(); + + // attempts to resubmit the tx after the threshold has expired + // note that we have to add 1: the semantics forbid resubmission at + // BLOCK + offchain_repeat + MultiPhase::offchain_worker(block_plus(1 + offchain_repeat as i32)); + assert_eq!(pool.read().transactions.len(), 1); + + // resubmitted tx is identical to first submission + let tx = &pool.read().transactions[0]; + assert_eq!(&tx_cache, tx); + }) + } + + #[test] + fn ocw_regenerates_and_resubmits_after_offchain_repeat() { + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + const BLOCK: u64 = 25; + let block_plus = |delta: i32| ((BLOCK as i32) + delta) as u64; + let offchain_repeat = ::OffchainRepeat::get(); + + roll_to(BLOCK); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, BLOCK))); + + // we must clear the offchain storage to ensure the offchain execution check doesn't get + // in the way. + let mut storage = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); + + MultiPhase::offchain_worker(block_plus(-1)); + assert!(pool.read().transactions.len().is_zero()); + storage.clear(); + + // creates, caches, submits without expecting previous cache value + MultiPhase::offchain_worker(BLOCK); + assert_eq!(pool.read().transactions.len(), 1); + let tx_cache = pool.read().transactions[0].clone(); + // assume that the tx has been processed + pool.try_write().unwrap().transactions.clear(); + + // remove the cached submitted tx + // this ensures that when the resubmit window rolls around, we're ready to regenerate + // from scratch if necessary + let mut call_cache = StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL); + assert!(matches!(call_cache.get::>(), Ok(Some(_call)))); + call_cache.clear(); + + // attempts to resubmit the tx after the threshold has expired + // note that we have to add 1: the semantics forbid resubmission at + // BLOCK + offchain_repeat + MultiPhase::offchain_worker(block_plus(1 + offchain_repeat as i32)); + assert_eq!(pool.read().transactions.len(), 1); + + // resubmitted tx is identical to first submission + let tx = &pool.read().transactions[0]; + assert_eq!(&tx_cache, tx); + }) + } + + #[test] + fn ocw_can_submit_to_pool() { + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + roll_to_with_ocw(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + // OCW must have submitted now + + let encoded = pool.read().transactions[0].clone(); + let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); + let call = extrinsic.call; + assert!(matches!(call, OuterCall::MultiPhase(Call::submit_unsigned { .. }))); + }) + } + + #[test] + fn ocw_solution_must_have_correct_round() { + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + roll_to_with_ocw(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + // OCW must have submitted now + // now, before we check the call, update the round + >::mutate(|round| *round += 1); + + let encoded = pool.read().transactions[0].clone(); + let extrinsic = Extrinsic::decode(&mut &*encoded).unwrap(); + let call = match extrinsic.call { + OuterCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, + _ => panic!("bad call: unexpected submission"), + }; + + // Custom(7) maps to PreDispatchChecksFailed + let pre_dispatch_check_error = + TransactionValidityError::Invalid(InvalidTransaction::Custom(7)); + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &call, + ) + .unwrap_err(), + pre_dispatch_check_error, + ); + assert_eq!( + ::pre_dispatch(&call).unwrap_err(), + pre_dispatch_check_error, + ); + }) + } + + #[test] + fn trim_assignments_length_does_not_modify_when_short_enough() { + ExtBuilder::default().build_and_execute(|| { + roll_to(25); + + // given + let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); + let solution = SolutionOf::::try_from(assignments.as_slice()).unwrap(); + let encoded_len = solution.encoded_size() as u32; + let solution_clone = solution.clone(); + + // when + MultiPhase::trim_assignments_length(encoded_len, &mut assignments, encoded_size_of) + .unwrap(); + + // then + let solution = SolutionOf::::try_from(assignments.as_slice()).unwrap(); + assert_eq!(solution, solution_clone); + }); + } + + #[test] + fn trim_assignments_length_modifies_when_too_long() { + ExtBuilder::default().build().execute_with(|| { + roll_to(25); + + // given + let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); + let solution = SolutionOf::::try_from(assignments.as_slice()).unwrap(); + let encoded_len = solution.encoded_size(); + let solution_clone = solution.clone(); + + // when + MultiPhase::trim_assignments_length( + encoded_len as u32 - 1, + &mut assignments, + encoded_size_of, + ) + .unwrap(); + + // then + let solution = SolutionOf::::try_from(assignments.as_slice()).unwrap(); + assert_ne!(solution, solution_clone); + assert!(solution.encoded_size() < encoded_len); + }); + } + + #[test] + fn trim_assignments_length_trims_lowest_stake() { + ExtBuilder::default().build().execute_with(|| { + roll_to(25); + + // given + let TrimHelpers { voters, mut assignments, encoded_size_of, voter_index } = + trim_helpers(); + let solution = SolutionOf::::try_from(assignments.as_slice()).unwrap(); + let encoded_len = solution.encoded_size() as u32; + let count = assignments.len(); + let min_stake_voter = voters + .iter() + .map(|(id, weight, _)| (weight, id)) + .min() + .and_then(|(_, id)| voter_index(id)) + .unwrap(); + + // when + MultiPhase::trim_assignments_length(encoded_len - 1, &mut assignments, encoded_size_of) + .unwrap(); + + // then + assert_eq!(assignments.len(), count - 1, "we must have removed exactly one assignment"); + assert!( + assignments.iter().all(|IndexAssignment { who, .. }| *who != min_stake_voter), + "min_stake_voter must no longer be in the set of voters", + ); + }); + } + + #[test] + fn trim_assignments_length_wont_panic() { + // we shan't panic if assignments are initially empty. + ExtBuilder::default().build_and_execute(|| { + let encoded_size_of = Box::new(|assignments: &[IndexAssignmentOf]| { + SolutionOf::::try_from(assignments).map(|solution| solution.encoded_size()) + }); + + let mut assignments = vec![]; + + // since we have 16 fields, we need to store the length fields of 16 vecs, thus 16 bytes + // minimum. + let min_solution_size = encoded_size_of(&assignments).unwrap(); + assert_eq!(min_solution_size, SolutionOf::::LIMIT); + + // all of this should not panic. + MultiPhase::trim_assignments_length(0, &mut assignments, encoded_size_of.clone()) + .unwrap(); + MultiPhase::trim_assignments_length(1, &mut assignments, encoded_size_of.clone()) + .unwrap(); + MultiPhase::trim_assignments_length( + min_solution_size as u32, + &mut assignments, + encoded_size_of, + ) + .unwrap(); + }); + + // or when we trim it to zero. + ExtBuilder::default().build_and_execute(|| { + // we need snapshot for `trim_helpers` to work. + roll_to(25); + let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); + assert!(assignments.len() > 0); + + // trim to min solution size. + let min_solution_size = SolutionOf::::LIMIT as u32; + MultiPhase::trim_assignments_length( + min_solution_size, + &mut assignments, + encoded_size_of, + ) + .unwrap(); + assert_eq!(assignments.len(), 0); + }); + } + + // all the other solution-generation functions end up delegating to `mine_solution`, so if we + // demonstrate that `mine_solution` solutions are all trimmed to an acceptable length, then + // we know that higher-level functions will all also have short-enough solutions. + #[test] + fn mine_solution_solutions_always_within_acceptable_length() { + ExtBuilder::default().build_and_execute(|| { + roll_to(25); + + // how long would the default solution be? + let solution = MultiPhase::mine_solution::<::Solver>().unwrap(); + let max_length = ::MinerMaxLength::get(); + let solution_size = solution.0.solution.encoded_size(); + assert!(solution_size <= max_length as usize); + + // now set the max size to less than the actual size and regenerate + ::MinerMaxLength::set(solution_size as u32 - 1); + let solution = MultiPhase::mine_solution::<::Solver>().unwrap(); + let max_length = ::MinerMaxLength::get(); + let solution_size = solution.0.solution.encoded_size(); + assert!(solution_size <= max_length as usize); + }); + } +} diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs new file mode 100644 index 0000000000000..262838bcb9e70 --- /dev/null +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -0,0 +1,289 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_election_provider_multi_phase +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-18, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_election_provider_multi_phase +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/election-provider-multi-phase/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_election_provider_multi_phase. +pub trait WeightInfo { + fn on_initialize_nothing() -> Weight; + fn on_initialize_open_signed() -> Weight; + fn on_initialize_open_unsigned() -> Weight; + fn finalize_signed_phase_accept_solution() -> Weight; + fn finalize_signed_phase_reject_solution() -> Weight; + fn create_snapshot_internal() -> Weight; + fn elect_queued(a: u32, d: u32, ) -> Weight; + fn submit(c: u32, ) -> Weight; + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight; + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight; +} + +/// Weights for pallet_election_provider_multi_phase using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking CurrentPlannedSession (r:1 w:0) + // Storage: Staking ErasStartSessionIndex (r:1 w:0) + // Storage: Babe EpochIndex (r:1 w:0) + // Storage: Babe GenesisSlot (r:1 w:0) + // Storage: Babe CurrentSlot (r:1 w:0) + // Storage: Staking ForceEra (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + fn on_initialize_nothing() -> Weight { + (23_878_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + } + // Storage: ElectionProviderMultiPhase Round (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) + fn on_initialize_open_signed() -> Weight { + (34_547_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: ElectionProviderMultiPhase Round (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) + fn on_initialize_open_unsigned() -> Weight { + (33_568_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) + fn finalize_signed_phase_accept_solution() -> Weight { + (50_596_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn finalize_signed_phase_reject_solution() -> Weight { + (33_389_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) + // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) + // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) + fn create_snapshot_internal() -> Weight { + (8_835_233_000 as Weight) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) + // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) + // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:1) + // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:1 w:0) + // Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) + // Storage: ElectionProviderMultiPhase Round (r:1 w:1) + // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) + // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) + fn elect_queued(a: u32, d: u32, ) -> Weight { + (82_395_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_769_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 13_000 + .saturating_add((320_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(8 as Weight)) + } + // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) + // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) + // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) + // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) + fn submit(c: u32, ) -> Weight { + (77_368_000 as Weight) + // Standard Error: 9_000 + .saturating_add((369_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + // Storage: ElectionProviderMultiPhase Round (r:1 w:0) + // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) + // Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) + // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) + // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) + // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 4_000 + .saturating_add((3_553_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 23_000 + .saturating_add((35_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 7_000 + .saturating_add((10_600_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 59_000 + .saturating_add((6_128_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: ElectionProviderMultiPhase Round (r:1 w:0) + // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) + // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) + // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) + fn feasibility_check(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 3_000 + .saturating_add((3_478_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 6_000 + .saturating_add((8_930_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 47_000 + .saturating_add((5_199_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking CurrentPlannedSession (r:1 w:0) + // Storage: Staking ErasStartSessionIndex (r:1 w:0) + // Storage: Babe EpochIndex (r:1 w:0) + // Storage: Babe GenesisSlot (r:1 w:0) + // Storage: Babe CurrentSlot (r:1 w:0) + // Storage: Staking ForceEra (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + fn on_initialize_nothing() -> Weight { + (23_878_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + } + // Storage: ElectionProviderMultiPhase Round (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) + fn on_initialize_open_signed() -> Weight { + (34_547_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: ElectionProviderMultiPhase Round (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) + fn on_initialize_open_unsigned() -> Weight { + (33_568_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: System Account (r:1 w:1) + // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) + fn finalize_signed_phase_accept_solution() -> Weight { + (50_596_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: System Account (r:1 w:1) + fn finalize_signed_phase_reject_solution() -> Weight { + (33_389_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) + // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) + // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) + fn create_snapshot_internal() -> Weight { + (8_835_233_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) + // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) + // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:1) + // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:1 w:0) + // Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) + // Storage: ElectionProviderMultiPhase Round (r:1 w:1) + // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) + // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) + fn elect_queued(a: u32, d: u32, ) -> Weight { + (82_395_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_769_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 13_000 + .saturating_add((320_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + } + // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) + // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) + // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) + // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) + fn submit(c: u32, ) -> Weight { + (77_368_000 as Weight) + // Standard Error: 9_000 + .saturating_add((369_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + // Storage: ElectionProviderMultiPhase Round (r:1 w:0) + // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) + // Storage: ElectionProviderMultiPhase QueuedSolution (r:1 w:1) + // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) + // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) + // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 4_000 + .saturating_add((3_553_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 23_000 + .saturating_add((35_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 7_000 + .saturating_add((10_600_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 59_000 + .saturating_add((6_128_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: ElectionProviderMultiPhase Round (r:1 w:0) + // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) + // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) + // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) + fn feasibility_check(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 3_000 + .saturating_add((3_478_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 6_000 + .saturating_add((8_930_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 47_000 + .saturating_add((5_199_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + } +} diff --git a/frame/election-provider-support/Cargo.toml b/frame/election-provider-support/Cargo.toml new file mode 100644 index 0000000000000..dfe2b11024334 --- /dev/null +++ b/frame/election-provider-support/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "frame-election-provider-support" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "election provider supporting traits" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../../primitives/arithmetic" } +sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../primitives/npos-elections" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } + +[dev-dependencies] +sp-npos-elections = { version = "4.0.0-dev", path = "../../primitives/npos-elections" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } + +[features] +default = ["std"] +std = [ + "codec/std", + "sp-std/std", + "sp-npos-elections/std", + "sp-arithmetic/std", + "frame-support/std", + "frame-system/std", +] +runtime-benchmarks = [] diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs new file mode 100644 index 0000000000000..d2c4b1053cc6d --- /dev/null +++ b/frame/election-provider-support/src/lib.rs @@ -0,0 +1,364 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitive traits for providing election functionality. +//! +//! This crate provides two traits that could interact to enable extensible election functionality +//! within FRAME pallets. +//! +//! Something that will provide the functionality of election will implement [`ElectionProvider`], +//! whilst needing an associated [`ElectionProvider::DataProvider`], which needs to be fulfilled by +//! an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* the receiver +//! of the election, resulting in a diagram as below: +//! +//! ```ignore +//! ElectionDataProvider +//! <------------------------------------------+ +//! | | +//! v | +//! +-----+----+ +------+---+ +//! | | | | +//! pallet-do-election | | | | pallet-needs-election +//! | | | | +//! | | | | +//! +-----+----+ +------+---+ +//! | ^ +//! | | +//! +------------------------------------------+ +//! ElectionProvider +//! ``` +//! +//! > It could also be possible that a third party pallet (C), provides the data of election to an +//! > election provider (B), which then passes the election result to another pallet (A). +//! +//! ## Election Types +//! +//! Typically, two types of elections exist: +//! +//! 1. **Stateless**: Election data is provided, and the election result is immediately ready. +//! 2. **Stateful**: Election data is is queried ahead of time, and the election result might be +//! ready some number of blocks in the future. +//! +//! To accommodate both type of elections in one trait, the traits lean toward **stateful +//! election**, as it is more general than the stateless. This is why [`ElectionProvider::elect`] +//! has no parameters. All value and type parameter must be provided by the [`ElectionDataProvider`] +//! trait, even if the election happens immediately. +//! +//! ## Election Data +//! +//! The data associated with an election, essentially what the [`ElectionDataProvider`] must convey +//! is as follows: +//! +//! 1. A list of voters, with their stake. +//! 2. A list of targets (i.e. _candidates_). +//! 3. A number of desired targets to be elected (i.e. _winners_) +//! +//! In addition to that, the [`ElectionDataProvider`] must also hint [`ElectionProvider`] at when +//! the next election might happen ([`ElectionDataProvider::next_election_prediction`]). A stateless +//! election provider would probably ignore this. A stateful election provider can use this to +//! prepare the election result in advance. +//! +//! Nonetheless, an [`ElectionProvider`] shan't rely on this and should preferably provide some +//! means of fallback election as well, in case the `elect` was called immaturely early. +//! +//! ## Example +//! +//! ```rust +//! # use frame_election_provider_support::{*, data_provider}; +//! # use sp_npos_elections::{Support, Assignment}; +//! +//! type AccountId = u64; +//! type Balance = u64; +//! type BlockNumber = u32; +//! +//! mod data_provider_mod { +//! use super::*; +//! +//! pub trait Config: Sized { +//! type ElectionProvider: ElectionProvider< +//! AccountId, +//! BlockNumber, +//! DataProvider = Module, +//! >; +//! } +//! +//! pub struct Module(std::marker::PhantomData); +//! +//! impl ElectionDataProvider for Module { +//! const MAXIMUM_VOTES_PER_VOTER: u32 = 1; +//! fn desired_targets() -> data_provider::Result { +//! Ok(1) +//! } +//! fn voters(maybe_max_len: Option) +//! -> data_provider::Result)>> +//! { +//! Ok(Default::default()) +//! } +//! fn targets(maybe_max_len: Option) -> data_provider::Result> { +//! Ok(vec![10, 20, 30]) +//! } +//! fn next_election_prediction(now: BlockNumber) -> BlockNumber { +//! 0 +//! } +//! } +//! } +//! +//! +//! mod generic_election_provider { +//! use super::*; +//! +//! pub struct GenericElectionProvider(std::marker::PhantomData); +//! +//! pub trait Config { +//! type DataProvider: ElectionDataProvider; +//! } +//! +//! impl ElectionProvider for GenericElectionProvider { +//! type Error = &'static str; +//! type DataProvider = T::DataProvider; +//! +//! fn elect() -> Result, Self::Error> { +//! Self::DataProvider::targets(None) +//! .map_err(|_| "failed to elect") +//! .map(|t| vec![(t[0], Support::default())]) +//! } +//! } +//! } +//! +//! mod runtime { +//! use super::generic_election_provider; +//! use super::data_provider_mod; +//! use super::AccountId; +//! +//! struct Runtime; +//! impl generic_election_provider::Config for Runtime { +//! type DataProvider = data_provider_mod::Module; +//! } +//! +//! impl data_provider_mod::Config for Runtime { +//! type ElectionProvider = generic_election_provider::GenericElectionProvider; +//! } +//! +//! } +//! +//! # fn main() {} +//! ``` + +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod onchain; +use frame_support::traits::Get; +use sp_std::{fmt::Debug, prelude::*}; + +/// Re-export some type as they are used in the interface. +pub use sp_arithmetic::PerThing; +pub use sp_npos_elections::{ + Assignment, ElectionResult, ExtendedBalance, IdentifierT, PerThing128, Support, Supports, + VoteWeight, +}; + +/// Types that are used by the data provider trait. +pub mod data_provider { + /// Alias for the result type of the election data provider. + pub type Result = sp_std::result::Result; +} + +/// Something that can provide the data to an [`ElectionProvider`]. +pub trait ElectionDataProvider { + /// Maximum number of votes per voter that this data provider is providing. + const MAXIMUM_VOTES_PER_VOTER: u32; + + /// All possible targets for the election, i.e. the candidates. + /// + /// If `maybe_max_len` is `Some(v)` then the resulting vector MUST NOT be longer than `v` items + /// long. + /// + /// This should be implemented as a self-weighing function. The implementor should register its + /// appropriate weight at the end of execution with the system pallet directly. + fn targets(maybe_max_len: Option) -> data_provider::Result>; + + /// All possible voters for the election. + /// + /// Note that if a notion of self-vote exists, it should be represented here. + /// + /// If `maybe_max_len` is `Some(v)` then the resulting vector MUST NOT be longer than `v` items + /// long. + /// + /// This should be implemented as a self-weighing function. The implementor should register its + /// appropriate weight at the end of execution with the system pallet directly. + fn voters( + maybe_max_len: Option, + ) -> data_provider::Result)>>; + + /// The number of targets to elect. + /// + /// This should be implemented as a self-weighing function. The implementor should register its + /// appropriate weight at the end of execution with the system pallet directly. + fn desired_targets() -> data_provider::Result; + + /// Provide a best effort prediction about when the next election is about to happen. + /// + /// In essence, the implementor should predict with this function when it will trigger the + /// [`ElectionProvider::elect`]. + /// + /// This is only useful for stateful election providers. + fn next_election_prediction(now: BlockNumber) -> BlockNumber; + + /// Utility function only to be used in benchmarking scenarios, to be implemented optionally, + /// else a noop. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn put_snapshot( + _voters: Vec<(AccountId, VoteWeight, Vec)>, + _targets: Vec, + _target_stake: Option, + ) { + } + + /// Utility function only to be used in benchmarking scenarios, to be implemented optionally, + /// else a noop. + /// + /// Same as `put_snapshot`, but can add a single voter one by one. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn add_voter(_voter: AccountId, _weight: VoteWeight, _targets: Vec) {} + + /// Utility function only to be used in benchmarking scenarios, to be implemented optionally, + /// else a noop. + /// + /// Same as `put_snapshot`, but can add a single voter one by one. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn add_target(_target: AccountId) {} + + /// Clear all voters and targets. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn clear() {} +} + +#[cfg(feature = "std")] +impl ElectionDataProvider for () { + const MAXIMUM_VOTES_PER_VOTER: u32 = 0; + fn targets(_maybe_max_len: Option) -> data_provider::Result> { + Ok(Default::default()) + } + fn voters( + _maybe_max_len: Option, + ) -> data_provider::Result)>> { + Ok(Default::default()) + } + fn desired_targets() -> data_provider::Result { + Ok(Default::default()) + } + fn next_election_prediction(now: BlockNumber) -> BlockNumber { + now + } +} + +/// Something that can compute the result of an election and pass it back to the caller. +/// +/// This trait only provides an interface to _request_ an election, i.e. +/// [`ElectionProvider::elect`]. That data required for the election need to be passed to the +/// implemented of this trait through [`ElectionProvider::DataProvider`]. +pub trait ElectionProvider { + /// The error type that is returned by the provider. + type Error: Debug; + + /// The data provider of the election. + type DataProvider: ElectionDataProvider; + + /// Elect a new set of winners. + /// + /// The result is returned in a target major format, namely as vector of supports. + /// + /// This should be implemented as a self-weighing function. The implementor should register its + /// appropriate weight at the end of execution with the system pallet directly. + fn elect() -> Result, Self::Error>; +} + +#[cfg(feature = "std")] +impl ElectionProvider for () { + type Error = &'static str; + type DataProvider = (); + + fn elect() -> Result, Self::Error> { + Err("<() as ElectionProvider> cannot do anything.") + } +} + +/// Something that can compute the result to an NPoS solution. +pub trait NposSolver { + /// The account identifier type of this solver. + type AccountId: sp_npos_elections::IdentifierT; + /// The accuracy of this solver. This will affect the accuracy of the output. + type Accuracy: PerThing128; + /// The error type of this implementation. + type Error: sp_std::fmt::Debug + sp_std::cmp::PartialEq; + + /// Solve an NPoS solution with the given `voters`, `targets`, and select `to_elect` count + /// of `targets`. + fn solve( + to_elect: usize, + targets: Vec, + voters: Vec<(Self::AccountId, VoteWeight, Vec)>, + ) -> Result, Self::Error>; +} + +/// A wrapper for [`sp_npos_elections::seq_phragmen`] that implements [`super::NposSolver`]. See the +/// documentation of [`sp_npos_elections::seq_phragmen`] for more info. +pub struct SequentialPhragmen( + sp_std::marker::PhantomData<(AccountId, Accuracy, Balancing)>, +); + +impl< + AccountId: IdentifierT, + Accuracy: PerThing128, + Balancing: Get>, + > NposSolver for SequentialPhragmen +{ + type AccountId = AccountId; + type Accuracy = Accuracy; + type Error = sp_npos_elections::Error; + fn solve( + winners: usize, + targets: Vec, + voters: Vec<(Self::AccountId, VoteWeight, Vec)>, + ) -> Result, Self::Error> { + sp_npos_elections::seq_phragmen(winners, targets, voters, Balancing::get()) + } +} + +/// A wrapper for [`sp_npos_elections::phragmms`] that implements [`NposSolver`]. See the +/// documentation of [`sp_npos_elections::phragmms`] for more info. +pub struct PhragMMS( + sp_std::marker::PhantomData<(AccountId, Accuracy, Balancing)>, +); + +impl< + AccountId: IdentifierT, + Accuracy: PerThing128, + Balancing: Get>, + > NposSolver for PhragMMS +{ + type AccountId = AccountId; + type Accuracy = Accuracy; + type Error = sp_npos_elections::Error; + fn solve( + winners: usize, + targets: Vec, + voters: Vec<(Self::AccountId, VoteWeight, Vec)>, + ) -> Result, Self::Error> { + sp_npos_elections::phragmms(winners, targets, voters, Balancing::get()) + } +} diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs new file mode 100644 index 0000000000000..fb1ccfdfe2566 --- /dev/null +++ b/frame/election-provider-support/src/onchain.rs @@ -0,0 +1,196 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! An implementation of [`ElectionProvider`] that does an on-chain sequential phragmen. + +use crate::{ElectionDataProvider, ElectionProvider}; +use frame_support::{traits::Get, weights::DispatchClass}; +use sp_npos_elections::*; +use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData, prelude::*}; + +/// Errors of the on-chain election. +#[derive(Eq, PartialEq, Debug)] +pub enum Error { + /// An internal error in the NPoS elections crate. + NposElections(sp_npos_elections::Error), + /// Errors from the data provider. + DataProvider(&'static str), +} + +impl From for Error { + fn from(e: sp_npos_elections::Error) -> Self { + Error::NposElections(e) + } +} + +/// A simple on-chain implementation of the election provider trait. +/// +/// This will accept voting data on the fly and produce the results immediately. +/// +/// ### Warning +/// +/// This can be very expensive to run frequently on-chain. Use with care. Moreover, this +/// implementation ignores the additional data of the election data provider and gives no insight on +/// how much weight was consumed. +/// +/// Finally, this implementation does not impose any limits on the number of voters and targets that +/// are provided. +pub struct OnChainSequentialPhragmen(PhantomData); + +/// Configuration trait of [`OnChainSequentialPhragmen`]. +/// +/// Note that this is similar to a pallet traits, but [`OnChainSequentialPhragmen`] is not a pallet. +/// +/// WARNING: the user of this pallet must ensure that the `Accuracy` type will work nicely with the +/// normalization operation done inside `seq_phragmen`. See +/// [`sp_npos_elections::assignment::try_normalize`] for more info. +pub trait Config: frame_system::Config { + /// The accuracy used to compute the election: + type Accuracy: PerThing128; + /// Something that provides the data for election. + type DataProvider: ElectionDataProvider; +} + +impl ElectionProvider for OnChainSequentialPhragmen { + type Error = Error; + type DataProvider = T::DataProvider; + + fn elect() -> Result, Self::Error> { + let voters = Self::DataProvider::voters(None).map_err(Error::DataProvider)?; + let targets = Self::DataProvider::targets(None).map_err(Error::DataProvider)?; + let desired_targets = Self::DataProvider::desired_targets().map_err(Error::DataProvider)?; + + let stake_map: BTreeMap = voters + .iter() + .map(|(validator, vote_weight, _)| (validator.clone(), *vote_weight)) + .collect(); + + let stake_of = + |w: &T::AccountId| -> VoteWeight { stake_map.get(w).cloned().unwrap_or_default() }; + + let ElectionResult { winners: _, assignments } = + seq_phragmen::<_, T::Accuracy>(desired_targets as usize, targets, voters, None) + .map_err(Error::from)?; + + let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; + + let weight = T::BlockWeights::get().max_block; + frame_system::Pallet::::register_extra_weight_unchecked( + weight, + DispatchClass::Mandatory, + ); + + Ok(to_supports(&staked)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_npos_elections::Support; + use sp_runtime::Perbill; + + type AccountId = u64; + type BlockNumber = u64; + + pub type Header = sp_runtime::generic::Header; + pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + pub type Block = sp_runtime::generic::Block; + + frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Pallet, Call, Event}, + } + ); + + impl frame_system::Config for Runtime { + type SS58Prefix = (); + type BaseCallFilter = frame_support::traits::Everything; + type Origin = Origin; + type Index = AccountId; + type BlockNumber = BlockNumber; + type Call = Call; + type Hash = sp_core::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = sp_runtime::traits::IdentityLookup; + type Header = sp_runtime::testing::Header; + type Event = (); + type BlockHashCount = (); + type DbWeight = (); + type BlockLength = (); + type BlockWeights = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type OnSetCode = (); + } + + impl Config for Runtime { + type Accuracy = Perbill; + type DataProvider = mock_data_provider::DataProvider; + } + + type OnChainPhragmen = OnChainSequentialPhragmen; + + mod mock_data_provider { + use super::*; + use crate::data_provider; + + pub struct DataProvider; + impl ElectionDataProvider for DataProvider { + const MAXIMUM_VOTES_PER_VOTER: u32 = 2; + fn voters( + _: Option, + ) -> data_provider::Result)>> { + Ok(vec![(1, 10, vec![10, 20]), (2, 20, vec![30, 20]), (3, 30, vec![10, 30])]) + } + + fn targets(_: Option) -> data_provider::Result> { + Ok(vec![10, 20, 30]) + } + + fn desired_targets() -> data_provider::Result { + Ok(2) + } + + fn next_election_prediction(_: BlockNumber) -> BlockNumber { + 0 + } + } + } + + #[test] + fn onchain_seq_phragmen_works() { + sp_io::TestExternalities::new_empty().execute_with(|| { + assert_eq!( + OnChainPhragmen::elect().unwrap(), + vec![ + (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), + (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) + ] + ); + }) + } +} diff --git a/frame/elections-phragmen/CHANGELOG.md b/frame/elections-phragmen/CHANGELOG.md new file mode 100644 index 0000000000000..231de1d2e475e --- /dev/null +++ b/frame/elections-phragmen/CHANGELOG.md @@ -0,0 +1,35 @@ +# Changelog +All notable changes to this crate will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this crate adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [4.0.0] - UNRELEASED + +### Added + +### Changed +\[**Needs Migration**\] [migrate pallet-elections-phragmen to attribute macros](https://github.com/paritytech/substrate/pull/8044) + +### Fixed + +### Security + +## [3.0.0] + +### Added +[Add slashing events to elections-phragmen](https://github.com/paritytech/substrate/pull/7543) + +### Changed + +### Fixed +[Don't slash all outgoing members](https://github.com/paritytech/substrate/pull/7394) +[Fix wrong outgoing calculation in election](https://github.com/paritytech/substrate/pull/7384) + +### Security +\[**Needs Migration**\] [Fix elections-phragmen and proxy issue + Record deposits on-chain](https://github.com/paritytech/substrate/pull/7040) + +## [2.0.0] - 2020-09-2020 + +Initial version from which version tracking has begun. + diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 8d59cde19255a..f2771a9f72783 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections-phragmen" -version = "2.0.0" +version = "5.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,35 +13,42 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-npos-elections = { version = "2.0.0", default-features = false, path = "../../primitives/npos-elections" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../primitives/npos-elections" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -hex-literal = "0.3.1" -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } [features] default = ["std"] std = [ - "serde", "codec/std", + "scale-info/std", "frame-support/std", "sp-runtime/std", "sp-npos-elections/std", "frame-system/std", "sp-std/std", + "sp-io/std", + "sp-core/std", + "log/std", ] runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/elections-phragmen/README.md b/frame/elections-phragmen/README.md index 5507d53970632..26b3f260da563 100644 --- a/frame/elections-phragmen/README.md +++ b/frame/elections-phragmen/README.md @@ -60,8 +60,8 @@ being re-elected at the end of each round. ### Module Information -- [`election_sp_phragmen::Trait`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/trait.Trait.html) +- [`election_sp_phragmen::Config`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/trait.Config.html) - [`Call`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/enum.Call.html) - [`Module`](https://docs.rs/pallet-elections-phragmen/latest/pallet_elections_phragmen/struct.Module.html) -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index e7c3719480b70..7cb83b3dd7799 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -1,18 +1,19 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. //! Elections-Phragmen pallet benchmarking. @@ -20,28 +21,22 @@ use super::*; +use frame_benchmarking::{ + account, benchmarks, impl_benchmark_test_suite, whitelist, BenchmarkError, BenchmarkResult, +}; +use frame_support::{dispatch::DispatchResultWithPostInfo, traits::OnInitialize}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; -use frame_support::traits::OnInitialize; -use crate::Module as Elections; +use crate::Pallet as Elections; const BALANCE_FACTOR: u32 = 250; const MAX_VOTERS: u32 = 500; const MAX_CANDIDATES: u32 = 200; -type Lookup = <::Lookup as StaticLookup>::Source; - -macro_rules! whitelist { - ($acc:ident) => { - frame_benchmarking::benchmarking::add_to_whitelist( - frame_system::Account::::hashed_key_for(&$acc).into() - ); - }; -} +type Lookup = <::Lookup as StaticLookup>::Source; /// grab new account with infinite balance. -fn endowed_account(name: &'static str, index: u32) -> T::AccountId { +fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); let amount = default_stake::(BALANCE_FACTOR); let _ = T::Currency::make_free_balance_be(&account, amount); @@ -53,83 +48,74 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { } /// Account to lookup type of system trait. -fn as_lookup(account: T::AccountId) -> Lookup { +fn as_lookup(account: T::AccountId) -> Lookup { T::Lookup::unlookup(account) } /// Get a reasonable amount of stake based on the execution trait's configuration -fn default_stake(factor: u32) -> BalanceOf { +fn default_stake(factor: u32) -> BalanceOf { let factor = BalanceOf::::from(factor); T::Currency::minimum_balance() * factor } /// Get the current number of candidates. -fn candidate_count() -> u32 { +fn candidate_count() -> u32 { >::decode_len().unwrap_or(0usize) as u32 } -/// Get the number of votes of a voter. -fn vote_count_of(who: &T::AccountId) -> u32 { - >::get(who).1.len() as u32 -} - -/// A `DefunctVoter` struct with correct value -fn defunct_for(who: T::AccountId) -> DefunctVoter> { - DefunctVoter { - who: as_lookup::(who.clone()), - candidate_count: candidate_count::(), - vote_count: vote_count_of::(&who), - } -} - /// Add `c` new candidates. -fn submit_candidates(c: u32, prefix: &'static str) - -> Result, &'static str> -{ - (0..c).map(|i| { - let account = endowed_account::(prefix, i); - >::submit_candidacy( - RawOrigin::Signed(account.clone()).into(), - candidate_count::(), - ).map_err(|_| "failed to submit candidacy")?; - Ok(account) - }).collect::>() +fn submit_candidates( + c: u32, + prefix: &'static str, +) -> Result, &'static str> { + (0..c) + .map(|i| { + let account = endowed_account::(prefix, i); + >::submit_candidacy( + RawOrigin::Signed(account.clone()).into(), + candidate_count::(), + ) + .map_err(|_| "failed to submit candidacy")?; + Ok(account) + }) + .collect::>() } /// Add `c` new candidates with self vote. -fn submit_candidates_with_self_vote(c: u32, prefix: &'static str) - -> Result, &'static str> -{ +fn submit_candidates_with_self_vote( + c: u32, + prefix: &'static str, +) -> Result, &'static str> { let candidates = submit_candidates::(c, prefix)?; let stake = default_stake::(BALANCE_FACTOR); - let _ = candidates.iter().map(|c| - submit_voter::(c.clone(), vec![c.clone()], stake) - ).collect::>()?; + let _ = candidates + .iter() + .map(|c| submit_voter::(c.clone(), vec![c.clone()], stake).map(|_| ())) + .collect::>()?; Ok(candidates) } - /// Submit one voter. -fn submit_voter(caller: T::AccountId, votes: Vec, stake: BalanceOf) - -> Result<(), sp_runtime::DispatchError> -{ +fn submit_voter( + caller: T::AccountId, + votes: Vec, + stake: BalanceOf, +) -> DispatchResultWithPostInfo { >::vote(RawOrigin::Signed(caller).into(), votes, stake) } /// create `num_voter` voters who randomly vote for at most `votes` of `all_candidates` if /// available. -fn distribute_voters(mut all_candidates: Vec, num_voters: u32, votes: usize) - -> Result<(), &'static str> -{ +fn distribute_voters( + mut all_candidates: Vec, + num_voters: u32, + votes: usize, +) -> Result<(), &'static str> { let stake = default_stake::(BALANCE_FACTOR); for i in 0..num_voters { // to ensure that votes are different all_candidates.rotate_left(1); - let votes = all_candidates - .iter() - .cloned() - .take(votes) - .collect::>(); + let votes = all_candidates.iter().cloned().take(votes).collect::>(); let voter = endowed_account::("voter", i); submit_voter::(voter, votes, stake)?; } @@ -138,7 +124,7 @@ fn distribute_voters(mut all_candidates: Vec, num_voters /// Fill the seats of members and runners-up up until `m`. Note that this might include either only /// members, or members and runners-up. -fn fill_seats_up_to(m: u32) -> Result, &'static str> { +fn fill_seats_up_to(m: u32) -> Result, &'static str> { let _ = submit_candidates_with_self_vote::(m, "fill_seats_up_to")?; assert_eq!(>::candidates().len() as u32, m, "wrong number of candidates."); >::do_phragmen(); @@ -148,28 +134,24 @@ fn fill_seats_up_to(m: u32) -> Result, &'static str> m as usize, "wrong number of members and runners-up", ); - Ok( - >::members() - .into_iter() - .map(|(x, _)| x) - .chain(>::runners_up().into_iter().map(|(x, _)| x)) - .collect() - ) + Ok(>::members() + .into_iter() + .map(|m| m.who) + .chain(>::runners_up().into_iter().map(|r| r.who)) + .collect()) } /// removes all the storage items to reverse any genesis state. -fn clean() { +fn clean() { >::kill(); >::kill(); >::kill(); - let _ = >::drain(); + >::remove_all(None); } benchmarks! { - _ {} - // -- Signed ones - vote { + vote_equal { let v in 1 .. (MAXIMUM_VOTE as u32); clean::(); @@ -179,14 +161,39 @@ benchmarks! { let caller = endowed_account::("caller", 0); let stake = default_stake::(BALANCE_FACTOR); - // vote for all of them. - let votes = all_candidates; + // original votes. + let mut votes = all_candidates; + submit_voter::(caller.clone(), votes.clone(), stake)?; + + // new votes. + votes.rotate_left(1); whitelist!(caller); - }: _(RawOrigin::Signed(caller), votes, stake) + }: vote(RawOrigin::Signed(caller), votes, stake) - vote_update { - let v in 1 .. (MAXIMUM_VOTE as u32); + vote_more { + let v in 2 .. (MAXIMUM_VOTE as u32); + clean::(); + + // create a bunch of candidates. + let all_candidates = submit_candidates::(v, "candidates")?; + + let caller = endowed_account::("caller", 0); + let stake = default_stake::(BALANCE_FACTOR); + + // original votes. + let mut votes = all_candidates.iter().skip(1).cloned().collect::>(); + submit_voter::(caller.clone(), votes.clone(), stake / >::from(10u32))?; + + // new votes. + votes = all_candidates; + assert!(votes.len() > >::get(caller.clone()).votes.len()); + + whitelist!(caller); + }: vote(RawOrigin::Signed(caller), votes, stake / >::from(10u32)) + + vote_less { + let v in 2 .. (MAXIMUM_VOTE as u32); clean::(); // create a bunch of candidates. @@ -200,7 +207,8 @@ benchmarks! { submit_voter::(caller.clone(), votes.clone(), stake)?; // new votes. - votes.rotate_left(1); + votes = votes.into_iter().skip(1).collect::>(); + assert!(votes.len() < >::get(caller.clone()).votes.len()); whitelist!(caller); }: vote(RawOrigin::Signed(caller), votes, stake) @@ -221,123 +229,6 @@ benchmarks! { whitelist!(caller); }: _(RawOrigin::Signed(caller)) - report_defunct_voter_correct { - // number of already existing candidates that may or may not be voted by the reported - // account. - let c in 1 .. MAX_CANDIDATES; - // number of candidates that the reported voter voted for. The worse case of search here is - // basically `c * v`. - let v in 1 .. (MAXIMUM_VOTE as u32); - // we fix the number of members to the number of desired members and runners-up. We'll be in - // this state almost always. - let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); - - clean::(); - let stake = default_stake::(BALANCE_FACTOR); - - // create m members and runners combined. - let _ = fill_seats_up_to::(m)?; - - // create a bunch of candidates as well. - let bailing_candidates = submit_candidates::(v, "bailing_candidates")?; - let all_candidates = submit_candidates::(c, "all_candidates")?; - - // account 1 is the reporter and must be whitelisted, and a voter. - let account_1 = endowed_account::("caller", 0); - submit_voter::( - account_1.clone(), - all_candidates.iter().take(1).cloned().collect(), - stake, - )?; - - // account 2 votes for all of the mentioned candidates. - let account_2 = endowed_account::("caller_2", 1); - submit_voter::( - account_2.clone(), - bailing_candidates.clone(), - stake, - )?; - - // all the bailers go away. NOTE: we can simplify this. There's no need to create all these - // candidates and remove them. The defunct voter can just vote for random accounts as long - // as there are enough members (potential candidates). - bailing_candidates.into_iter().for_each(|b| { - let count = candidate_count::(); - assert!(>::renounce_candidacy( - RawOrigin::Signed(b).into(), - Renouncing::Candidate(count), - ).is_ok()); - }); - - let defunct_info = defunct_for::(account_2.clone()); - whitelist!(account_1); - - assert!(>::is_voter(&account_2)); - }: report_defunct_voter(RawOrigin::Signed(account_1.clone()), defunct_info) - verify { - assert!(!>::is_voter(&account_2)); - #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } - } - - report_defunct_voter_incorrect { - // number of already existing candidates that may or may not be voted by the reported - // account. - let c in 1 .. MAX_CANDIDATES; - // number of candidates that the reported voter voted for. The worse case of search here is - // basically `c * v`. - let v in 1 .. (MAXIMUM_VOTE as u32); - // we fix the number of members to the number of desired members and runners-up. We'll be in - // this state almost always. - let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); - - clean::(); - let stake = default_stake::(BALANCE_FACTOR); - - // create m members and runners combined. - let _ = fill_seats_up_to::(m)?; - - // create a bunch of candidates as well. - let all_candidates = submit_candidates::(c, "candidates")?; - - // account 1 is the reporter and need to be whitelisted, and a voter. - let account_1 = endowed_account::("caller", 0); - submit_voter::( - account_1.clone(), - all_candidates.iter().take(1).cloned().collect(), - stake, - )?; - - // account 2 votes for a bunch of crap, and finally a correct candidate. - let account_2 = endowed_account::("caller_2", 1); - let mut invalid: Vec = (0..(v-1)) - .map(|seed| account::("invalid", 0, seed).clone()) - .collect(); - invalid.push(all_candidates.last().unwrap().clone()); - submit_voter::( - account_2.clone(), - invalid, - stake, - )?; - - let defunct_info = defunct_for::(account_2.clone()); - whitelist!(account_1); - }: report_defunct_voter(RawOrigin::Signed(account_1.clone()), defunct_info) - verify { - // account 2 is still a voter. - assert!(>::is_voter(&account_2)); - #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } - } - submit_candidacy { // number of already existing candidates. let c in 1 .. MAX_CANDIDATES; @@ -443,9 +334,16 @@ benchmarks! { } } + // We use the max block weight for this extrinsic for now. See below. + remove_member_without_replacement {}: { + Err(BenchmarkError::Override( + BenchmarkResult::from_weight(T::BlockWeights::get().max_block) + ))?; + } + // -- Root ones #[extra] // this calls into phragmen and consumes a full block for now. - remove_member_without_replacement { + remove_member_without_replacement_extra { // worse case is when we remove a member and we have no runner as a replacement. This // triggers phragmen again. The only parameter is how many candidates will compete for the // new slot. @@ -520,20 +418,52 @@ benchmarks! { } } - #[extra] - on_initialize { - // if n % TermDuration is zero, then we run phragmen. The weight function must and should - // check this as it is cheap to do so. TermDuration is not a storage item, it is a constant - // encoded in the runtime. + clean_defunct_voters { + // total number of voters. + let v in (MAX_VOTERS / 2) .. MAX_VOTERS; + // those that are defunct and need removal. + let d in 1 .. (MAX_VOTERS / 2); + + // remove any previous stuff. + clean::(); + + let all_candidates = submit_candidates::(v, "candidates")?; + distribute_voters::(all_candidates, v, MAXIMUM_VOTE)?; + + // all candidates leave. + >::kill(); + + // now everyone is defunct + assert!(>::iter().all(|(_, v)| >::is_defunct_voter(&v.votes))); + assert_eq!(>::iter().count() as u32, v); + let root = RawOrigin::Root; + }: _(root, v, d) + verify { + assert_eq!(>::iter().count() as u32, 0); + } + + election_phragmen { + // This is just to focus on phragmen in the context of this module. We always select 20 + // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. + // Yet, change the number of voters, candidates and edge per voter to see the impact. Note + // that we give all candidates a self vote to make sure they are all considered. let c in 1 .. MAX_CANDIDATES; + let v in 1 .. MAX_VOTERS; + let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; clean::(); - // create c candidates. + // so we have a situation with v and e. we want e to basically always be in the range of `e + // -> e * MAXIMUM_VOTE`, but we cannot express that now with the benchmarks. So what we do + // is: when c is being iterated, v, and e are max and fine. when v is being iterated, e is + // being set to max and this is a problem. In these cases, we cap e to a lower value, namely + // v * MAXIMUM_VOTE. when e is being iterated, v is at max, and again fine. all in all, + // votes_per_voter can never be more than MAXIMUM_VOTE. Note that this might cause `v` to be + // an overestimate. + let votes_per_voter = (e / v).min(MAXIMUM_VOTE as u32); + let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; - // create 500 voters, each voting the maximum 16 - distribute_voters::(all_candidates, MAX_VOTERS, MAXIMUM_VOTE)?; + let _ = distribute_voters::(all_candidates, v, votes_per_voter as usize)?; }: { - // elect >::on_initialize(T::TermDuration::get()); } verify { @@ -552,18 +482,16 @@ benchmarks! { } #[extra] - phragmen { - // This is just to focus on phragmen in the context of this module. We always select 20 - // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. - // Yet, change the number of voters, candidates and edge per voter to see the impact. Note - // that we give all candidates a self vote to make sure they are all considered. + election_phragmen_c_e { let c in 1 .. MAX_CANDIDATES; - let v in 1 .. MAX_VOTERS; - let e in 1 .. (MAXIMUM_VOTE as u32); + let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; + let fixed_v = MAX_VOTERS; clean::(); + let votes_per_voter = e / fixed_v; + let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; - let _ = distribute_voters::(all_candidates, v, e as usize)?; + let _ = distribute_voters::(all_candidates, fixed_v, votes_per_voter as usize)?; }: { >::on_initialize(T::TermDuration::get()); } @@ -581,62 +509,40 @@ benchmarks! { MEMBERS.with(|m| *m.borrow_mut() = vec![]); } } -} -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks_elections_phragmen() { - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_vote::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_remove_voter::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_report_defunct_voter_correct::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_report_defunct_voter_incorrect::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_submit_candidacy::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_candidate::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_runners_up::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_members::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_remove_member_without_replacement::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_remove_member_with_replacement::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_on_initialize::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_phragmen::()); - }); + #[extra] + election_phragmen_v { + let v in 4 .. 16; + let fixed_c = MAX_CANDIDATES; + let fixed_e = 64; + clean::(); + + let votes_per_voter = fixed_e / v; + + let all_candidates = submit_candidates_with_self_vote::(fixed_c, "candidates")?; + let _ = distribute_voters::(all_candidates, v, votes_per_voter as usize)?; + }: { + >::on_initialize(T::TermDuration::get()); + } + verify { + assert_eq!(>::members().len() as u32, T::DesiredMembers::get().min(fixed_c)); + assert_eq!( + >::runners_up().len() as u32, + T::DesiredRunnersUp::get().min(fixed_c.saturating_sub(T::DesiredMembers::get())), + ); + + #[cfg(test)] + { + // reset members in between benchmark tests. + use crate::tests::MEMBERS; + MEMBERS.with(|m| *m.borrow_mut() = vec![]); + } } } + +impl_benchmark_test_suite!( + Elections, + crate::tests::ExtBuilder::default().desired_members(13).desired_runners_up(7), + crate::tests::Test, + exec_name = build_and_execute, +); diff --git a/frame/elections-phragmen/src/default_weights.rs b/frame/elections-phragmen/src/default_weights.rs deleted file mode 100644 index 4025e61d15af4..0000000000000 --- a/frame/elections-phragmen/src/default_weights.rs +++ /dev/null @@ -1,88 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 - -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; - -impl crate::WeightInfo for () { - fn vote(v: u32, ) -> Weight { - (91_489_000 as Weight) - .saturating_add((199_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn vote_update(v: u32, ) -> Weight { - (56_511_000 as Weight) - .saturating_add((245_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(DbWeight::get().reads(5 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn remove_voter() -> Weight { - (76_714_000 as Weight) - .saturating_add(DbWeight::get().reads(2 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1_743_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((31_750_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(DbWeight::get().reads(7 as Weight)) - .saturating_add(DbWeight::get().writes(3 as Weight)) - } - fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight { - (0 as Weight) - .saturating_add((1_733_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((31_861_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(DbWeight::get().reads(6 as Weight)) - .saturating_add(DbWeight::get().writes(2 as Weight)) - } - fn submit_candidacy(c: u32, ) -> Weight { - (74_714_000 as Weight) - .saturating_add((315_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (50_408_000 as Weight) - .saturating_add((159_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn renounce_candidacy_members() -> Weight { - (79_626_000 as Weight) - .saturating_add(DbWeight::get().reads(3 as Weight)) - .saturating_add(DbWeight::get().writes(4 as Weight)) - } - fn renounce_candidacy_runners_up() -> Weight { - (49_715_000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - .saturating_add(DbWeight::get().writes(1 as Weight)) - } - fn remove_member_with_replacement() -> Weight { - (76_572_000 as Weight) - .saturating_add(DbWeight::get().reads(4 as Weight)) - .saturating_add(DbWeight::get().writes(5 as Weight)) - } - fn remove_member_wrong_refund() -> Weight { - (8_777_000 as Weight) - .saturating_add(DbWeight::get().reads(1 as Weight)) - } -} diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 964cf6daf2cee..d7b42383da757 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,51 +23,66 @@ //! //! The election happens in _rounds_: every `N` blocks, all previous members are retired and a new //! set is elected (which may or may not have an intersection with the previous set). Each round -//! lasts for some number of blocks defined by `TermDuration` storage item. The words _term_ and +//! lasts for some number of blocks defined by [`Config::TermDuration`]. The words _term_ and //! _round_ can be used interchangeably in this context. //! -//! `TermDuration` might change during a round. This can shorten or extend the length of the round. -//! The next election round's block number is never stored but rather always checked on the fly. -//! Based on the current block number and `TermDuration`, the condition `BlockNumber % TermDuration -//! == 0` being satisfied will always trigger a new election round. +//! [`Config::TermDuration`] might change during a round. This can shorten or extend the length of +//! the round. The next election round's block number is never stored but rather always checked on +//! the fly. Based on the current block number and [`Config::TermDuration`], the condition +//! `BlockNumber % TermDuration == 0` being satisfied will always trigger a new election round. +//! +//! ### Bonds and Deposits +//! +//! Both voting and being a candidate requires deposits to be taken, in exchange for the data that +//! needs to be kept on-chain. The terms *bond* and *deposit* can be used interchangeably in this +//! context. +//! +//! Bonds will be unreserved only upon adhering to the protocol laws. Failing to do so will cause in +//! the bond to slashed. //! //! ### Voting //! -//! Voters can vote for any set of the candidates by providing a list of account ids. Invalid votes -//! (voting for non-candidates) are ignored during election. Yet, a voter _might_ vote for a future -//! candidate. Voters reserve a bond as they vote. Each vote defines a `value`. This amount is -//! locked from the account of the voter and indicates the weight of the vote. Voters can update -//! their votes at any time by calling `vote()` again. This keeps the bond untouched but can -//! optionally change the locked `value`. After a round, votes are kept and might still be valid for +//! Voters can vote for a limited number of the candidates by providing a list of account ids, +//! bounded by [`MAXIMUM_VOTE`]. Invalid votes (voting for non-candidates) and duplicate votes are +//! ignored during election. Yet, a voter _might_ vote for a future candidate. Voters reserve a bond +//! as they vote. Each vote defines a `value`. This amount is locked from the account of the voter +//! and indicates the weight of the vote. Voters can update their votes at any time by calling +//! `vote()` again. This can update the vote targets (which might update the deposit) or update the +//! vote's stake ([`Voter::stake`]). After a round, votes are kept and might still be valid for //! further rounds. A voter is responsible for calling `remove_voter` once they are done to have //! their bond back and remove the lock. //! -//! Voters also report other voters as being defunct to earn their bond. A voter is defunct once all -//! of the candidates that they have voted for are neither a valid candidate anymore nor a member. -//! Upon reporting, if the target voter is actually defunct, the reporter will be rewarded by the -//! voting bond of the target. The target will lose their bond and get removed. If the target is not -//! defunct, the reporter is slashed and removed. To prevent being reported, voters should manually -//! submit a `remove_voter()` as soon as they are in the defunct state. +//! See [`Call::vote`], [`Call::remove_voter`]. +//! +//! ### Defunct Voter +//! +//! A voter is defunct once all of the candidates that they have voted for are not a valid candidate +//! (as seen further below, members and runners-up are also always candidates). Defunct voters can +//! be removed via a root call ([`Call::clean_defunct_voters`]). Upon being removed, their bond is +//! returned. This is an administrative operation and can be called only by the root origin in the +//! case of state bloat. //! //! ### Candidacy and Members //! -//! Candidates also reserve a bond as they submit candidacy. A candidate cannot take their candidacy -//! back. A candidate can end up in one of the below situations: -//! - **Winner**: A winner is kept as a _member_. They must still have a bond in reserve and they -//! are automatically counted as a candidate for the next election. +//! Candidates also reserve a bond as they submit candidacy. A candidate can end up in one of the +//! below situations: +//! - **Members**: A winner is kept as a _member_. They must still have a bond in reserve and they +//! are automatically counted as a candidate for the next election. The number of desired +//! members is set by [`Config::DesiredMembers`]. //! - **Runner-up**: Runners-up are the best candidates immediately after the winners. The number -//! of runners_up to keep is configurable. Runners-up are used, in order that they are elected, -//! as replacements when a candidate is kicked by `[remove_member]`, or when an active member -//! renounces their candidacy. Runners are automatically counted as a candidate for the next -//! election. -//! - **Loser**: Any of the candidate who are not a winner are left as losers. A loser might be an -//! _outgoing member or runner_, meaning that they are an active member who failed to keep their -//! spot. An outgoing will always lose their bond. +//! of runners up to keep is set by [`Config::DesiredRunnersUp`]. Runners-up are used, in the +//! same order as they are elected, as replacements when a candidate is kicked by +//! [`Call::remove_member`], or when an active member renounces their candidacy. Runners are +//! automatically counted as a candidate for the next election. +//! - **Loser**: Any of the candidate who are not member/runner-up are left as losers. A loser +//! might be an _outgoing member or runner-up_, meaning that they are an active member who +//! failed to keep their spot. **An outgoing candidate/member/runner-up will always lose their +//! bond**. //! -//! ##### Renouncing candidacy. +//! #### Renouncing candidacy. //! -//! All candidates, elected or not, can renounce their candidacy. A call to [`Module::renounce_candidacy`] -//! will always cause the candidacy bond to be refunded. +//! All candidates, elected or not, can renounce their candidacy. A call to +//! [`Call::renounce_candidacy`] will always cause the candidacy bond to be refunded. //! //! Note that with the members being the default candidates for the next round and votes persisting //! in storage, the election system is entirely stable given no further input. This means that if @@ -77,46 +92,51 @@ //! //! ### Module Information //! -//! - [`election_sp_phragmen::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Module`] #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode}; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, - dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}, - ensure, - storage::{IterableStorageMap, StorageMap}, + dispatch::WithPostDispatchInfo, traits::{ - BalanceStatus, ChangeMembers, Contains, ContainsLengthBound, Currency, CurrencyToVote, Get, + ChangeMembers, Contains, ContainsLengthBound, Currency, CurrencyToVote, Get, InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, ReservableCurrency, - WithdrawReason, WithdrawReasons, + SortedMembers, StorageVersion, WithdrawReasons, }, weights::Weight, }; -use frame_system::{ensure_root, ensure_signed}; +use scale_info::TypeInfo; use sp_npos_elections::{ElectionResult, ExtendedBalance}; use sp_runtime::{ traits::{Saturating, StaticLookup, Zero}, DispatchError, Perbill, RuntimeDebug, }; -use sp_std::prelude::*; +use sp_std::{cmp::Ordering, prelude::*}; mod benchmarking; -mod default_weights; +pub mod weights; +pub use weights::WeightInfo; + +/// All migrations. +pub mod migrations; + +/// The current storage version. +const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); /// The maximum votes allowed per voter. pub const MAXIMUM_VOTE: usize = 16; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; /// An indication that the renouncing account currently has which of the below roles. -#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug, TypeInfo)] pub enum Renouncing { /// A member is renouncing. Member, @@ -126,229 +146,159 @@ pub enum Renouncing { Candidate(#[codec(compact)] u32), } -/// Information needed to prove the defunct-ness of a voter. -#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug)] -pub struct DefunctVoter { - /// the voter's who's being challenged for being defunct - pub who: AccountId, - /// The number of votes that `who` has placed. - #[codec(compact)] - pub vote_count: u32, - /// The number of current active candidates. - #[codec(compact)] - pub candidate_count: u32 +/// An active voter. +#[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq, TypeInfo)] +pub struct Voter { + /// The members being backed. + pub votes: Vec, + /// The amount of stake placed on this vote. + pub stake: Balance, + /// The amount of deposit reserved for this vote. + /// + /// To be unreserved upon removal. + pub deposit: Balance, } -pub trait WeightInfo { - fn vote(v: u32, ) -> Weight; - fn vote_update(v: u32, ) -> Weight; - fn remove_voter() -> Weight; - fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight; - fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight; - fn submit_candidacy(c: u32, ) -> Weight; - fn renounce_candidacy_candidate(c: u32, ) -> Weight; - fn renounce_candidacy_members() -> Weight; - fn renounce_candidacy_runners_up() -> Weight; - fn remove_member_with_replacement() -> Weight; - fn remove_member_wrong_refund() -> Weight; +/// A holder of a seat as either a member or a runner-up. +#[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq, TypeInfo)] +pub struct SeatHolder { + /// The holder. + pub who: AccountId, + /// The total backing stake. + pub stake: Balance, + /// The amount of deposit held on-chain. + /// + /// To be unreserved upon renouncing, or slashed upon being a loser. + pub deposit: Balance, } -pub trait Trait: frame_system::Trait { - /// The overarching event type.c - type Event: From> + Into<::Event>; - - /// Identifier for the elections-phragmen pallet's lock - type ModuleId: Get; - - /// The currency that people are electing with. - type Currency: - LockableCurrency + - ReservableCurrency; - - /// What to do when the members change. - type ChangeMembers: ChangeMembers; - - /// What to do with genesis members - type InitializeMembers: InitializeMembers; +pub use pallet::*; - /// Convert a balance into a number used for election calculation. - /// This must fit into a `u64` but is allowed to be sensibly lossy. - type CurrencyToVote: CurrencyToVote>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - /// How much should be locked up in order to submit one's candidacy. - type CandidacyBond: Get>; + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From> + IsType<::Event>; - /// How much should be locked up in order to be able to submit votes. - type VotingBond: Get>; + /// Identifier for the elections-phragmen pallet's lock + #[pallet::constant] + type PalletId: Get; - /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner-up) - type LoserCandidate: OnUnbalanced>; + /// The currency that people are electing with. + type Currency: LockableCurrency + + ReservableCurrency; - /// Handler for the unbalanced reduction when a reporter has submitted a bad defunct report. - type BadReport: OnUnbalanced>; + /// What to do when the members change. + type ChangeMembers: ChangeMembers; - /// Handler for the unbalanced reduction when a member has been kicked. - type KickedMember: OnUnbalanced>; + /// What to do with genesis members + type InitializeMembers: InitializeMembers; - /// Number of members to elect. - type DesiredMembers: Get; + /// Convert a balance into a number used for election calculation. + /// This must fit into a `u64` but is allowed to be sensibly lossy. + type CurrencyToVote: CurrencyToVote>; - /// Number of runners_up to keep. - type DesiredRunnersUp: Get; + /// How much should be locked up in order to submit one's candidacy. + #[pallet::constant] + type CandidacyBond: Get>; - /// How long each seat is kept. This defines the next block number at which an election - /// round will happen. If set to zero, no elections are ever triggered and the module will - /// be in passive mode. - type TermDuration: Get; + /// Base deposit associated with voting. + /// + /// This should be sensibly high to economically ensure the pallet cannot be attacked by + /// creating a gigantic number of votes. + #[pallet::constant] + type VotingBondBase: Get>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// The amount of bond that need to be locked for each vote (32 bytes). + #[pallet::constant] + type VotingBondFactor: Get>; -decl_storage! { - trait Store for Module as PhragmenElection { - // ---- State - /// The current elected membership. Sorted based on account id. - pub Members get(fn members): Vec<(T::AccountId, BalanceOf)>; - /// The current runners_up. Sorted based on low to high merit (worse to best). - pub RunnersUp get(fn runners_up): Vec<(T::AccountId, BalanceOf)>; - /// The total number of vote rounds that have happened, excluding the upcoming one. - pub ElectionRounds get(fn election_rounds): u32 = Zero::zero(); - - /// Votes and locked stake of a particular voter. - /// - /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash - pub Voting get(fn voting): map hasher(twox_64_concat) T::AccountId => (BalanceOf, Vec); - - /// The present candidate list. Sorted based on account-id. A current member or runner-up - /// can never enter this vector and is always implicitly assumed to be a candidate. - pub Candidates get(fn candidates): Vec; - } add_extra_genesis { - config(members): Vec<(T::AccountId, BalanceOf)>; - build(|config: &GenesisConfig| { - let members = config.members.iter().map(|(ref member, ref stake)| { - // make sure they have enough stake - assert!( - T::Currency::free_balance(member) >= *stake, - "Genesis member does not have enough stake", - ); + /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner-up) + type LoserCandidate: OnUnbalanced>; - // reserve candidacy bond and set as members. - T::Currency::reserve(&member, T::CandidacyBond::get()) - .expect("Genesis member does not have enough balance to be a candidate"); - - // Note: all members will only vote for themselves, hence they must be given exactly - // their own stake as total backing. Any sane election should behave as such. - // Nonetheless, stakes will be updated for term 1 onwards according to the election. - Members::::mutate(|members| { - match members.binary_search_by(|(a, _b)| a.cmp(member)) { - Ok(_) => panic!("Duplicate member in elections phragmen genesis: {}", member), - Err(pos) => members.insert(pos, (member.clone(), *stake)), - } - }); + /// Handler for the unbalanced reduction when a member has been kicked. + type KickedMember: OnUnbalanced>; - // set self-votes to make persistent. - >::vote( - T::Origin::from(Some(member.clone()).into()), - vec![member.clone()], - *stake, - ).expect("Genesis member could not vote."); + /// Number of members to elect. + #[pallet::constant] + type DesiredMembers: Get; - member.clone() - }).collect::>(); + /// Number of runners_up to keep. + #[pallet::constant] + type DesiredRunnersUp: Get; - // report genesis members to upstream, if any. - T::InitializeMembers::initialize_members(&members); - }) - } -} + /// How long each seat is kept. This defines the next block number at which an election + /// round will happen. If set to zero, no elections are ever triggered and the module will + /// be in passive mode. + #[pallet::constant] + type TermDuration: Get; -decl_error! { - pub enum Error for Module { - /// Cannot vote when no candidates or members exist. - UnableToVote, - /// Must vote for at least one candidate. - NoVotes, - /// Cannot vote more than candidates. - TooManyVotes, - /// Cannot vote more than maximum allowed. - MaximumVotesExceeded, - /// Cannot vote with stake less than minimum balance. - LowBalance, - /// Voter can not pay voting bond. - UnableToPayBond, - /// Must be a voter. - MustBeVoter, - /// Cannot report self. - ReportSelf, - /// Duplicated candidate submission. - DuplicatedCandidate, - /// Member cannot re-submit candidacy. - MemberSubmit, - /// Runner cannot re-submit candidacy. - RunnerSubmit, - /// Candidate does not have enough funds. - InsufficientCandidateFunds, - /// Not a member. - NotMember, - /// The provided count of number of candidates is incorrect. - InvalidCandidateCount, - /// The provided count of number of votes is incorrect. - InvalidVoteCount, - /// The renouncing origin presented a wrong `Renouncing` parameter. - InvalidRenouncing, - /// Prediction regarding replacement after member removal is wrong. - InvalidReplacement, + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - fn deposit_event() = default; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(PhantomData); - const CandidacyBond: BalanceOf = T::CandidacyBond::get(); - const VotingBond: BalanceOf = T::VotingBond::get(); - const DesiredMembers: u32 = T::DesiredMembers::get(); - const DesiredRunnersUp: u32 = T::DesiredRunnersUp::get(); - const TermDuration: T::BlockNumber = T::TermDuration::get(); - const ModuleId: LockIdentifier = T::ModuleId::get(); + #[pallet::hooks] + impl Hooks> for Pallet { + /// What to do at the end of each block. + /// + /// Checks if an election needs to happen or not. + fn on_initialize(n: T::BlockNumber) -> Weight { + let term_duration = T::TermDuration::get(); + if !term_duration.is_zero() && (n % term_duration).is_zero() { + Self::do_phragmen() + } else { + 0 + } + } + } + #[pallet::call] + impl Pallet { /// Vote for a set of candidates for the upcoming round of election. This can be called to /// set the initial votes, or update already existing votes. /// - /// Upon initial voting, `value` units of `who`'s balance is locked and a bond amount is - /// reserved. + /// Upon initial voting, `value` units of `who`'s balance is locked and a deposit amount is + /// reserved. The deposit is based on the number of votes and can be updated over time. /// /// The `votes` should: /// - not be empty. /// - be less than the number of possible candidates. Note that all current members and /// runners-up are also automatically candidates for the next round. /// - /// It is the responsibility of the caller to not place all of their balance into the lock - /// and keep some for further transactions. + /// If `value` is more than `who`'s total balance, then the maximum of the two is used. + /// + /// The dispatch origin of this call must be signed. + /// + /// ### Warning + /// + /// It is the responsibility of the caller to **NOT** place all of their balance into the + /// lock and keep some for further operations. /// /// # - /// Base weight: 47.93 µs - /// State reads: - /// - Candidates.len() + Members.len() + RunnersUp.len() - /// - Voting (is_voter) - /// - Lock - /// - [AccountBalance(who) (unreserve + total_balance)] - /// State writes: - /// - Voting - /// - Lock - /// - [AccountBalance(who) (unreserve -- only when creating a new voter)] + /// We assume the maximum weight among all 3 cases: vote_equal, vote_more and vote_less. /// # - #[weight = T::WeightInfo::vote(votes.len() as u32)] - fn vote( - origin, + #[pallet::weight( + T::WeightInfo::vote_more(votes.len() as u32) + .max(T::WeightInfo::vote_less(votes.len() as u32)) + .max(T::WeightInfo::vote_equal(votes.len() as u32)) + )] + pub fn vote( + origin: OriginFor, votes: Vec, - #[compact] value: BalanceOf, - ) { + #[pallet::compact] value: BalanceOf, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + // votes should not be empty and more than `MAXIMUM_VOTE` in any case. ensure!(votes.len() <= MAXIMUM_VOTE, Error::::MaximumVotesExceeded); ensure!(!votes.is_empty(), Error::::NoVotes); @@ -356,261 +306,156 @@ decl_module! { let members_count = >::decode_len().unwrap_or(0); let runners_up_count = >::decode_len().unwrap_or(0); + // can never submit a vote of there are no members, and cannot submit more votes than + // all potential vote targets. // addition is valid: candidates, members and runners-up will never overlap. - let allowed_votes = candidates_count + members_count + runners_up_count; - + let allowed_votes = + candidates_count.saturating_add(members_count).saturating_add(runners_up_count); ensure!(!allowed_votes.is_zero(), Error::::UnableToVote); ensure!(votes.len() <= allowed_votes, Error::::TooManyVotes); ensure!(value > T::Currency::minimum_balance(), Error::::LowBalance); - // first time voter. Reserve bond. - if !Self::is_voter(&who) { - T::Currency::reserve(&who, T::VotingBond::get()) - .map_err(|_| Error::::UnableToPayBond)?; - } + // Reserve bond. + let new_deposit = Self::deposit_of(votes.len()); + let Voter { deposit: old_deposit, .. } = >::get(&who); + match new_deposit.cmp(&old_deposit) { + Ordering::Greater => { + // Must reserve a bit more. + let to_reserve = new_deposit - old_deposit; + T::Currency::reserve(&who, to_reserve) + .map_err(|_| Error::::UnableToPayBond)?; + }, + Ordering::Equal => {}, + Ordering::Less => { + // Must unreserve a bit. + let to_unreserve = old_deposit - new_deposit; + let _remainder = T::Currency::unreserve(&who, to_unreserve); + debug_assert!(_remainder.is_zero()); + }, + }; // Amount to be locked up. - let locked_balance = value.min(T::Currency::total_balance(&who)); - - // lock - T::Currency::set_lock( - T::ModuleId::get(), - &who, - locked_balance, - WithdrawReasons::except(WithdrawReason::TransactionPayment), - ); + let locked_stake = value.min(T::Currency::total_balance(&who)); + T::Currency::set_lock(T::PalletId::get(), &who, locked_stake, WithdrawReasons::all()); - Voting::::insert(&who, (locked_balance, votes)); + Voting::::insert(&who, Voter { votes, deposit: new_deposit, stake: locked_stake }); + Ok(None.into()) } - /// Remove `origin` as a voter. This removes the lock and returns the bond. + /// Remove `origin` as a voter. /// - /// # - /// Base weight: 36.8 µs - /// All state access is from do_remove_voter. - /// State reads: - /// - Voting - /// - [AccountData(who)] - /// State writes: - /// - Voting - /// - Locks - /// - [AccountData(who)] - /// # - #[weight = T::WeightInfo::remove_voter()] - fn remove_voter(origin) { + /// This removes the lock and returns the deposit. + /// + /// The dispatch origin of this call must be signed and be a voter. + #[pallet::weight(T::WeightInfo::remove_voter())] + pub fn remove_voter(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!(Self::is_voter(&who), Error::::MustBeVoter); - - Self::do_remove_voter(&who, true); + Self::do_remove_voter(&who); + Ok(None.into()) } - /// Report `target` for being an defunct voter. In case of a valid report, the reporter is - /// rewarded by the bond amount of `target`. Otherwise, the reporter itself is removed and - /// their bond is slashed. - /// - /// A defunct voter is defined to be: - /// - a voter whose current submitted votes are all invalid. i.e. all of them are no - /// longer a candidate nor an active member or a runner-up. + /// Submit oneself for candidacy. A fixed amount of deposit is recorded. /// + /// All candidates are wiped at the end of the term. They either become a member/runner-up, + /// or leave the system while their deposit is slashed. /// - /// The origin must provide the number of current candidates and votes of the reported target - /// for the purpose of accurate weight calculation. + /// The dispatch origin of this call must be signed. /// - /// # - /// No Base weight based on min square analysis. - /// Complexity of candidate_count: 1.755 µs - /// Complexity of vote_count: 18.51 µs - /// State reads: - /// - Voting(reporter) - /// - Candidate.len() - /// - Voting(Target) - /// - Candidates, Members, RunnersUp (is_defunct_voter) - /// State writes: - /// - Lock(reporter || target) - /// - [AccountBalance(reporter)] + AccountBalance(target) - /// - Voting(reporter || target) - /// Note: the db access is worse with respect to db, which is when the report is correct. - /// # - #[weight = T::WeightInfo::report_defunct_voter_correct( - defunct.candidate_count, - defunct.vote_count, - )] - fn report_defunct_voter( - origin, - defunct: DefunctVoter<::Source>, - ) -> DispatchResultWithPostInfo { - let reporter = ensure_signed(origin)?; - let target = T::Lookup::lookup(defunct.who)?; - - ensure!(reporter != target, Error::::ReportSelf); - ensure!(Self::is_voter(&reporter), Error::::MustBeVoter); - - let DefunctVoter { candidate_count, vote_count, .. } = defunct; - - ensure!( - >::decode_len().unwrap_or(0) as u32 <= candidate_count, - Error::::InvalidCandidateCount, - ); - - let (_, votes) = >::get(&target); - // indirect way to ensure target is a voter. We could call into `::contains()`, but it - // would have the same effect with one extra db access. Note that votes cannot be - // submitted with length 0. Hence, a non-zero length means that the target is a voter. - ensure!(votes.len() > 0, Error::::MustBeVoter); - - // ensure that the size of votes that need to be searched is correct. - ensure!( - votes.len() as u32 <= vote_count, - Error::::InvalidVoteCount, - ); - - let valid = Self::is_defunct_voter(&votes); - let maybe_refund = if valid { - // reporter will get the voting bond of the target - T::Currency::repatriate_reserved(&target, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; - // remove the target. They are defunct. - Self::do_remove_voter(&target, false); - None - } else { - // slash the bond of the reporter. - let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; - T::BadReport::on_unbalanced(imbalance); - // remove the reporter. - Self::do_remove_voter(&reporter, false); - Some(T::WeightInfo::report_defunct_voter_incorrect( - defunct.candidate_count, - defunct.vote_count, - )) - }; - Self::deposit_event(RawEvent::VoterReported(target, reporter, valid)); - Ok(maybe_refund.into()) - } - - /// Submit oneself for candidacy. + /// ### Warning /// - /// A candidate will either: - /// - Lose at the end of the term and forfeit their deposit. - /// - Win and become a member. Members will eventually get their stash back. - /// - Become a runner-up. Runners-ups are reserved members in case one gets forcefully - /// removed. + /// Even if a candidate ends up being a member, they must call [`Call::renounce_candidacy`] + /// to get their deposit back. Losing the spot in an election will always lead to a slash. /// /// # - /// Base weight = 33.33 µs - /// Complexity of candidate_count: 0.375 µs - /// State reads: - /// - Candidates - /// - Members - /// - RunnersUp - /// - [AccountBalance(who)] - /// State writes: - /// - [AccountBalance(who)] - /// - Candidates + /// The number of current candidates must be provided as witness data. /// # - #[weight = T::WeightInfo::submit_candidacy(*candidate_count)] - fn submit_candidacy(origin, #[compact] candidate_count: u32) { + #[pallet::weight(T::WeightInfo::submit_candidacy(*candidate_count))] + pub fn submit_candidacy( + origin: OriginFor, + #[pallet::compact] candidate_count: u32, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let actual_count = >::decode_len().unwrap_or(0); - ensure!( - actual_count as u32 <= candidate_count, - Error::::InvalidCandidateCount, - ); - - let is_candidate = Self::is_candidate(&who); - ensure!(is_candidate.is_err(), Error::::DuplicatedCandidate); + ensure!(actual_count as u32 <= candidate_count, Error::::InvalidWitnessData); - // assured to be an error, error always contains the index. - let index = is_candidate.unwrap_err(); + let index = Self::is_candidate(&who).err().ok_or(Error::::DuplicatedCandidate)?; ensure!(!Self::is_member(&who), Error::::MemberSubmit); - ensure!(!Self::is_runner_up(&who), Error::::RunnerSubmit); + ensure!(!Self::is_runner_up(&who), Error::::RunnerUpSubmit); T::Currency::reserve(&who, T::CandidacyBond::get()) .map_err(|_| Error::::InsufficientCandidateFunds)?; - >::mutate(|c| c.insert(index, who)); + >::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); + Ok(None.into()) } /// Renounce one's intention to be a candidate for the next election round. 3 potential /// outcomes exist: - /// - `origin` is a candidate and not elected in any set. In this case, the bond is + /// + /// - `origin` is a candidate and not elected in any set. In this case, the deposit is /// unreserved, returned and origin is removed as a candidate. - /// - `origin` is a current runner-up. In this case, the bond is unreserved, returned and + /// - `origin` is a current runner-up. In this case, the deposit is unreserved, returned and /// origin is removed as a runner-up. - /// - `origin` is a current member. In this case, the bond is unreserved and origin is + /// - `origin` is a current member. In this case, the deposit is unreserved and origin is /// removed as a member, consequently not being a candidate for the next round anymore. - /// Similar to [`remove_voter`], if replacement runners exists, they are immediately used. - /// - /// If a candidate is renouncing: - /// Base weight: 17.28 µs - /// Complexity of candidate_count: 0.235 µs - /// State reads: - /// - Candidates - /// - [AccountBalance(who) (unreserve)] - /// State writes: - /// - Candidates - /// - [AccountBalance(who) (unreserve)] - /// If member is renouncing: - /// Base weight: 46.25 µs - /// State reads: - /// - Members, RunnersUp (remove_and_replace_member), - /// - [AccountData(who) (unreserve)] - /// State writes: - /// - Members, RunnersUp (remove_and_replace_member), - /// - [AccountData(who) (unreserve)] - /// If runner is renouncing: - /// Base weight: 46.25 µs - /// State reads: - /// - RunnersUp (remove_and_replace_member), - /// - [AccountData(who) (unreserve)] - /// State writes: - /// - RunnersUp (remove_and_replace_member), - /// - [AccountData(who) (unreserve)] - /// - #[weight = match *renouncing { + /// Similar to [`remove_member`](Self::remove_member), if replacement runners exists, they + /// are immediately used. If the prime is renouncing, then no prime will exist until the + /// next round. + /// + /// The dispatch origin of this call must be signed, and have one of the above roles. + /// + /// # + /// The type of renouncing must be provided as witness data. + /// # + #[pallet::weight(match *renouncing { Renouncing::Candidate(count) => T::WeightInfo::renounce_candidacy_candidate(count), Renouncing::Member => T::WeightInfo::renounce_candidacy_members(), Renouncing::RunnerUp => T::WeightInfo::renounce_candidacy_runners_up(), - }] - fn renounce_candidacy(origin, renouncing: Renouncing) { + })] + pub fn renounce_candidacy( + origin: OriginFor, + renouncing: Renouncing, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; match renouncing { Renouncing::Member => { - // returns NoMember error in case of error. - let _ = Self::remove_and_replace_member(&who)?; - T::Currency::unreserve(&who, T::CandidacyBond::get()); - Self::deposit_event(RawEvent::MemberRenounced(who)); + let _ = Self::remove_and_replace_member(&who, false) + .map_err(|_| Error::::InvalidRenouncing)?; + Self::deposit_event(Event::Renounced(who)); }, Renouncing::RunnerUp => { - let mut runners_up_with_stake = Self::runners_up(); - if let Some(index) = runners_up_with_stake - .iter() - .position(|(ref r, ref _s)| r == &who) - { - runners_up_with_stake.remove(index); - // unreserve the bond - T::Currency::unreserve(&who, T::CandidacyBond::get()); - // update storage. - >::put(runners_up_with_stake); - } else { - Err(Error::::InvalidRenouncing)?; - } - } + >::try_mutate::<_, Error, _>(|runners_up| { + let index = runners_up + .iter() + .position(|SeatHolder { who: r, .. }| r == &who) + .ok_or(Error::::InvalidRenouncing)?; + // can't fail anymore. + let SeatHolder { deposit, .. } = runners_up.remove(index); + let _remainder = T::Currency::unreserve(&who, deposit); + debug_assert!(_remainder.is_zero()); + Self::deposit_event(Event::Renounced(who)); + Ok(()) + })?; + }, Renouncing::Candidate(count) => { - let mut candidates = Self::candidates(); - ensure!(count >= candidates.len() as u32, Error::::InvalidRenouncing); - if let Some(index) = candidates.iter().position(|x| *x == who) { - candidates.remove(index); - // unreserve the bond - T::Currency::unreserve(&who, T::CandidacyBond::get()); - // update storage. - >::put(candidates); - } else { - Err(Error::::InvalidRenouncing)?; - } - } + >::try_mutate::<_, Error, _>(|candidates| { + ensure!(count >= candidates.len() as u32, Error::::InvalidWitnessData); + let index = candidates + .binary_search_by(|(c, _)| c.cmp(&who)) + .map_err(|_| Error::::InvalidRenouncing)?; + let (_removed, deposit) = candidates.remove(index); + let _remainder = T::Currency::unreserve(&who, deposit); + debug_assert!(_remainder.is_zero()); + Self::deposit_event(Event::Renounced(who)); + Ok(()) + })?; + }, }; + Ok(None.into()) } /// Remove a particular member from the set. This is effective immediately and the bond of @@ -619,74 +464,82 @@ decl_module! { /// If a runner-up is available, then the best runner-up will be removed and replaces the /// outgoing member. Otherwise, a new phragmen election is started. /// + /// The dispatch origin of this call must be root. + /// /// Note that this does not affect the designated block number of the next election. /// /// # - /// If we have a replacement: - /// - Base weight: 50.93 µs - /// - State reads: - /// - RunnersUp.len() - /// - Members, RunnersUp (remove_and_replace_member) - /// - State writes: - /// - Members, RunnersUp (remove_and_replace_member) - /// Else, since this is a root call and will go into phragmen, we assume full block for now. + /// If we have a replacement, we use a small weight. Else, since this is a root call and + /// will go into phragmen, we assume full block for now. /// # - #[weight = if *has_replacement { + #[pallet::weight(if *has_replacement { T::WeightInfo::remove_member_with_replacement() } else { - T::MaximumBlockWeight::get() - }] - fn remove_member( - origin, + T::WeightInfo::remove_member_without_replacement() + })] + pub fn remove_member( + origin: OriginFor, who: ::Source, has_replacement: bool, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; - let will_have_replacement = >::decode_len().unwrap_or(0) > 0; + let will_have_replacement = >::decode_len().map_or(false, |l| l > 0); if will_have_replacement != has_replacement { - // In both cases, we will change more weight than neede. Refund and abort. + // In both cases, we will change more weight than need. Refund and abort. return Err(Error::::InvalidReplacement.with_weight( // refund. The weight value comes from a benchmark which is special to this. - // 5.751 µs - T::WeightInfo::remove_member_wrong_refund() - )); - } // else, prediction was correct. - - Self::remove_and_replace_member(&who).map(|had_replacement| { - let (imbalance, _) = T::Currency::slash_reserved(&who, T::CandidacyBond::get()); - T::KickedMember::on_unbalanced(imbalance); - Self::deposit_event(RawEvent::MemberKicked(who.clone())); - - if !had_replacement { - // if we end up here, we will charge a full block weight. - Self::do_phragmen(); - } + T::WeightInfo::remove_member_wrong_refund(), + )) + } + + let had_replacement = Self::remove_and_replace_member(&who, true)?; + debug_assert_eq!(has_replacement, had_replacement); + Self::deposit_event(Event::MemberKicked(who.clone())); + + if !had_replacement { + Self::do_phragmen(); + } - // no refund needed. - None.into() - }).map_err(|e| e.into()) + // no refund needed. + Ok(None.into()) } - /// What to do at the end of each block. Checks if an election needs to happen or not. - fn on_initialize(n: T::BlockNumber) -> Weight { - // returns the correct weight. - Self::end_block(n) + /// Clean all voters who are defunct (i.e. they do not serve any purpose at all). The + /// deposit of the removed voters are returned. + /// + /// This is an root function to be used only for cleaning the state. + /// + /// The dispatch origin of this call must be root. + /// + /// # + /// The total number of voters and those that are defunct must be provided as witness data. + /// # + #[pallet::weight(T::WeightInfo::clean_defunct_voters(*_num_voters, *_num_defunct))] + pub fn clean_defunct_voters( + origin: OriginFor, + _num_voters: u32, + _num_defunct: u32, + ) -> DispatchResultWithPostInfo { + let _ = ensure_root(origin)?; + >::iter() + .filter(|(_, x)| Self::is_defunct_voter(&x.votes)) + .for_each(|(dv, _)| Self::do_remove_voter(&dv)); + + Ok(None.into()) } } -} -decl_event!( - pub enum Event where - Balance = BalanceOf, - ::AccountId, - { - /// A new term with \[new_members\]. This indicates that enough candidates existed to run the - /// election, not that enough have has been elected. The inner value must be examined for - /// this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond slashed and - /// none were elected, whilst `EmptyTerm` means that no candidates existed to begin with. - NewTerm(Vec<(AccountId, Balance)>), + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A new term with \[new_members\]. This indicates that enough candidates existed to run + /// the election, not that enough have has been elected. The inner value must be examined + /// for this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond + /// slashed and none were elected, whilst `EmptyTerm` means that no candidates existed to + /// begin with. + NewTerm(Vec<(::AccountId, BalanceOf)>), /// No (or not enough) candidates existed for this round. This is different from /// `NewTerm(\[\])`. See the description of `NewTerm`. EmptyTerm, @@ -694,110 +547,301 @@ decl_event!( ElectionError, /// A \[member\] has been removed. This should always be followed by either `NewTerm` or /// `EmptyTerm`. - MemberKicked(AccountId), - /// A \[member\] has renounced their candidacy. - MemberRenounced(AccountId), - /// A voter was reported with the the report being successful or not. - /// \[voter, reporter, success\] - VoterReported(AccountId, AccountId, bool), - } -); - -impl Module { - /// Attempts to remove a member `who`. If a runner-up exists, it is used as the replacement and - /// Ok(true). is returned. + MemberKicked(::AccountId), + /// Someone has renounced their candidacy. + Renounced(::AccountId), + /// A \[candidate\] was slashed by \[amount\] due to failing to obtain a seat as member or + /// runner-up. + /// + /// Note that old members and runners-up are also candidates. + CandidateSlashed(::AccountId, BalanceOf), + /// A \[seat holder\] was slashed by \[amount\] by being forcefully removed from the set. + SeatHolderSlashed(::AccountId, BalanceOf), + } + + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { + /// Cannot vote when no candidates or members exist. + UnableToVote, + /// Must vote for at least one candidate. + NoVotes, + /// Cannot vote more than candidates. + TooManyVotes, + /// Cannot vote more than maximum allowed. + MaximumVotesExceeded, + /// Cannot vote with stake less than minimum balance. + LowBalance, + /// Voter can not pay voting bond. + UnableToPayBond, + /// Must be a voter. + MustBeVoter, + /// Cannot report self. + ReportSelf, + /// Duplicated candidate submission. + DuplicatedCandidate, + /// Member cannot re-submit candidacy. + MemberSubmit, + /// Runner cannot re-submit candidacy. + RunnerUpSubmit, + /// Candidate does not have enough funds. + InsufficientCandidateFunds, + /// Not a member. + NotMember, + /// The provided count of number of candidates is incorrect. + InvalidWitnessData, + /// The provided count of number of votes is incorrect. + InvalidVoteCount, + /// The renouncing origin presented a wrong `Renouncing` parameter. + InvalidRenouncing, + /// Prediction regarding replacement after member removal is wrong. + InvalidReplacement, + } + + /// The current elected members. /// - /// Otherwise, `Ok(false)` is returned to signal the caller. + /// Invariant: Always sorted based on account id. + #[pallet::storage] + #[pallet::getter(fn members)] + pub type Members = + StorageValue<_, Vec>>, ValueQuery>; + + /// The current reserved runners-up. /// - /// If a replacement exists, `Members` and `RunnersUp` storage is updated, where the first - /// element of `RunnersUp` is used as the replacement and `Ok(true)` is returned. Else, - /// `Ok(false)` is returned with no storage updated. + /// Invariant: Always sorted based on rank (worse to best). Upon removal of a member, the + /// last (i.e. _best_) runner-up will be replaced. + #[pallet::storage] + #[pallet::getter(fn runners_up)] + pub type RunnersUp = + StorageValue<_, Vec>>, ValueQuery>; + + /// The present candidate list. A current member or runner-up can never enter this vector + /// and is always implicitly assumed to be a candidate. /// - /// Note that this function _will_ call into `T::ChangeMembers` in case any change happens - /// (`Ok(true)`). + /// Second element is the deposit. /// - /// If replacement exists, this will read and write from/into both `Members` and `RunnersUp`. - fn remove_and_replace_member(who: &T::AccountId) -> Result { - let mut members_with_stake = Self::members(); - if let Ok(index) = members_with_stake.binary_search_by(|(ref m, ref _s)| m.cmp(who)) { - members_with_stake.remove(index); - - let next_up = >::mutate(|runners_up| runners_up.pop()); - let maybe_replacement = next_up.and_then(|(replacement, stake)| - members_with_stake.binary_search_by(|(ref m, ref _s)| m.cmp(&replacement)) - .err() - .map(|index| { - members_with_stake.insert(index, (replacement.clone(), stake)); - replacement - }) + /// Invariant: Always sorted based on account id. + #[pallet::storage] + #[pallet::getter(fn candidates)] + pub type Candidates = StorageValue<_, Vec<(T::AccountId, BalanceOf)>, ValueQuery>; + + /// The total number of vote rounds that have happened, excluding the upcoming one. + #[pallet::storage] + #[pallet::getter(fn election_rounds)] + pub type ElectionRounds = StorageValue<_, u32, ValueQuery>; + + /// Votes and locked stake of a particular voter. + /// + /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. + #[pallet::storage] + #[pallet::getter(fn voting)] + pub type Voting = + StorageMap<_, Twox64Concat, T::AccountId, Voter>, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub members: Vec<(T::AccountId, BalanceOf)>, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { members: Default::default() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + assert!( + self.members.len() as u32 <= T::DesiredMembers::get(), + "Cannot accept more than DesiredMembers genesis member", ); + let members = self + .members + .iter() + .map(|(ref member, ref stake)| { + // make sure they have enough stake. + assert!( + T::Currency::free_balance(member) >= *stake, + "Genesis member does not have enough stake.", + ); + + // Note: all members will only vote for themselves, hence they must be given + // exactly their own stake as total backing. Any sane election should behave as + // such. Nonetheless, stakes will be updated for term 1 onwards according to the + // election. + Members::::mutate(|members| { + match members.binary_search_by(|m| m.who.cmp(member)) { + Ok(_) => { + panic!("Duplicate member in elections-phragmen genesis: {}", member) + }, + Err(pos) => members.insert( + pos, + SeatHolder { + who: member.clone(), + stake: *stake, + deposit: Zero::zero(), + }, + ), + } + }); + + // set self-votes to make persistent. Genesis voters don't have any bond, nor do + // they have any lock. NOTE: this means that we will still try to remove a lock + // once this genesis voter is removed, and for now it is okay because + // remove_lock is noop if lock is not there. + >::insert( + &member, + Voter { votes: vec![member.clone()], stake: *stake, deposit: Zero::zero() }, + ); + + member.clone() + }) + .collect::>(); + + // report genesis members to upstream, if any. + T::InitializeMembers::initialize_members(&members); + } + } +} - >::put(&members_with_stake); - let members = members_with_stake.into_iter().map(|m| m.0).collect::>(); - let result = Ok(maybe_replacement.is_some()); - let old = [who.clone()]; - match maybe_replacement { - Some(new) => T::ChangeMembers::change_members_sorted(&[new], &old, &members), - None => T::ChangeMembers::change_members_sorted(&[], &old, &members), +impl Pallet { + /// The deposit value of `count` votes. + fn deposit_of(count: usize) -> BalanceOf { + T::VotingBondBase::get() + .saturating_add(T::VotingBondFactor::get().saturating_mul((count as u32).into())) + } + + /// Attempts to remove a member `who`. If a runner-up exists, it is used as the replacement. + /// + /// Returns: + /// + /// - `Ok(true)` if the member was removed and a replacement was found. + /// - `Ok(false)` if the member was removed and but no replacement was found. + /// - `Err(_)` if the member was no found. + /// + /// Both `Members` and `RunnersUp` storage is updated accordingly. `T::ChangeMember` is called + /// if needed. If `slash` is true, the deposit of the potentially removed member is slashed, + /// else, it is unreserved. + /// + /// ### Note: Prime preservation + /// + /// This function attempts to preserve the prime. If the removed members is not the prime, it is + /// set again via [`Config::ChangeMembers`]. + fn remove_and_replace_member(who: &T::AccountId, slash: bool) -> Result { + // closure will return: + // - `Ok(Option(replacement))` if member was removed and replacement was replaced. + // - `Ok(None)` if member was removed but no replacement was found + // - `Err(_)` if who is not a member. + let maybe_replacement = >::try_mutate::<_, Error, _>(|members| { + let remove_index = members + .binary_search_by(|m| m.who.cmp(who)) + .map_err(|_| Error::::NotMember)?; + // we remove the member anyhow, regardless of having a runner-up or not. + let removed = members.remove(remove_index); + + // slash or unreserve + if slash { + let (imbalance, _remainder) = T::Currency::slash_reserved(who, removed.deposit); + debug_assert!(_remainder.is_zero()); + T::LoserCandidate::on_unbalanced(imbalance); + Self::deposit_event(Event::SeatHolderSlashed(who.clone(), removed.deposit)); + } else { + T::Currency::unreserve(who, removed.deposit); + } + + let maybe_next_best = >::mutate(|r| r.pop()).map(|next_best| { + // defensive-only: Members and runners-up are disjoint. This will always be err and + // give us an index to insert. + if let Err(index) = members.binary_search_by(|m| m.who.cmp(&next_best.who)) { + members.insert(index, next_best.clone()); + } else { + // overlap. This can never happen. If so, it seems like our intended replacement + // is already a member, so not much more to do. + log::error!( + target: "runtime::elections-phragmen", + "A member seems to also be a runner-up.", + ); + } + next_best + }); + Ok(maybe_next_best) + })?; + + let remaining_member_ids_sorted = + Self::members().into_iter().map(|x| x.who.clone()).collect::>(); + let outgoing = &[who.clone()]; + let maybe_current_prime = T::ChangeMembers::get_prime(); + let return_value = match maybe_replacement { + // member ids are already sorted, other two elements have one item. + Some(incoming) => { + T::ChangeMembers::change_members_sorted( + &[incoming.who], + outgoing, + &remaining_member_ids_sorted[..], + ); + true + }, + None => { + T::ChangeMembers::change_members_sorted( + &[], + outgoing, + &remaining_member_ids_sorted[..], + ); + false + }, + }; + + // if there was a prime before and they are not the one being removed, then set them + // again. + if let Some(current_prime) = maybe_current_prime { + if ¤t_prime != who { + T::ChangeMembers::set_prime(Some(current_prime)); } - result - } else { - Err(Error::::NotMember)? } + + Ok(return_value) } /// Check if `who` is a candidate. It returns the insert index if the element does not exists as /// an error. - /// - /// O(LogN) given N candidates. fn is_candidate(who: &T::AccountId) -> Result<(), usize> { - Self::candidates().binary_search(who).map(|_| ()) + Self::candidates().binary_search_by(|c| c.0.cmp(who)).map(|_| ()) } /// Check if `who` is a voter. It may or may not be a _current_ one. - /// - /// State: O(1). fn is_voter(who: &T::AccountId) -> bool { Voting::::contains_key(who) } /// Check if `who` is currently an active member. - /// - /// O(LogN) given N members. Since members are limited, O(1). fn is_member(who: &T::AccountId) -> bool { - Self::members().binary_search_by(|(a, _b)| a.cmp(who)).is_ok() + Self::members().binary_search_by(|m| m.who.cmp(who)).is_ok() } /// Check if `who` is currently an active runner-up. - /// - /// O(LogN) given N runners-up. Since runners-up are limited, O(1). fn is_runner_up(who: &T::AccountId) -> bool { - Self::runners_up().iter().position(|(a, _b)| a == who).is_some() - } - - /// Returns number of desired members. - fn desired_members() -> u32 { - T::DesiredMembers::get() - } - - /// Returns number of desired runners up. - fn desired_runners_up() -> u32 { - T::DesiredRunnersUp::get() - } - - /// Returns the term duration - fn term_duration() -> T::BlockNumber { - T::TermDuration::get() + Self::runners_up().iter().position(|r| &r.who == who).is_some() } /// Get the members' account ids. fn members_ids() -> Vec { - Self::members().into_iter().map(|(m, _)| m).collect::>() + Self::members().into_iter().map(|m| m.who).collect::>() } - /// The the runners' up account ids. - fn runners_up_ids() -> Vec { - Self::runners_up().into_iter().map(|(r, _)| r).collect::>() + /// Get a concatenation of previous members and runners-up and their deposits. + /// + /// These accounts are essentially treated as candidates. + fn implicit_candidates_with_deposit() -> Vec<(T::AccountId, BalanceOf)> { + // invariant: these two are always without duplicates. + Self::members() + .into_iter() + .map(|m| (m.who, m.deposit)) + .chain(Self::runners_up().into_iter().map(|r| (r.who, r.deposit))) + .collect::>() } /// Check if `votes` will correspond to a defunct voter. As no origin is part of the inputs, @@ -806,268 +850,278 @@ impl Module { /// O(NLogM) with M candidates and `who` having voted for `N` of them. /// Reads Members, RunnersUp, Candidates and Voting(who) from database. fn is_defunct_voter(votes: &[T::AccountId]) -> bool { - votes.iter().all(|v| - !Self::is_member(v) && - !Self::is_runner_up(v) && - !Self::is_candidate(v).is_ok() - ) + votes.iter().all(|v| { + !Self::is_member(v) && !Self::is_runner_up(v) && !Self::is_candidate(v).is_ok() + }) } /// Remove a certain someone as a voter. - /// - /// This will clean always clean the storage associated with the voter, and remove the balance - /// lock. Optionally, it would also return the reserved voting bond if indicated by `unreserve`. - /// If unreserve is true, has 3 storage reads and 1 reads. - /// - /// DB access: Voting and Lock are always written to, if unreserve, then 1 read and write added. - fn do_remove_voter(who: &T::AccountId, unreserve: bool) { - // remove storage and lock. - Voting::::remove(who); - T::Currency::remove_lock(T::ModuleId::get(), who); - - if unreserve { - T::Currency::unreserve(who, T::VotingBond::get()); - } - } + fn do_remove_voter(who: &T::AccountId) { + let Voter { deposit, .. } = >::take(who); - /// Check there's nothing to do this block. - /// - /// Runs phragmen election and cleans all the previous candidate state. The voter state is NOT - /// cleaned and voters must themselves submit a transaction to retract. - fn end_block(block_number: T::BlockNumber) -> Weight { - if !Self::term_duration().is_zero() { - if (block_number % Self::term_duration()).is_zero() { - Self::do_phragmen(); - return T::MaximumBlockWeight::get() - } - } - 0 + // remove storage, lock and unreserve. + T::Currency::remove_lock(T::PalletId::get(), who); + + // NOTE: we could check the deposit amount before removing and skip if zero, but it will be + // a noop anyhow. + let _remainder = T::Currency::unreserve(who, deposit); + debug_assert!(_remainder.is_zero()); } /// Run the phragmen election with all required side processes and state updates, if election /// succeeds. Else, it will emit an `ElectionError` event. /// /// Calls the appropriate [`ChangeMembers`] function variant internally. - /// - /// Reads: O(C + V*E) where C = candidates, V voters and E votes per voter exits. - /// Writes: O(M + R) with M desired members and R runners_up. - fn do_phragmen() { - let desired_seats = Self::desired_members() as usize; - let desired_runners_up = Self::desired_runners_up() as usize; + fn do_phragmen() -> Weight { + let desired_seats = T::DesiredMembers::get() as usize; + let desired_runners_up = T::DesiredRunnersUp::get() as usize; let num_to_elect = desired_runners_up + desired_seats; - let mut candidates = Self::candidates(); - // candidates who explicitly called `submit_candidacy`. Only these folks are at risk of - // losing their bond. - let exposed_candidates = candidates.clone(); - // current members are always a candidate for the next round as well. - // this is guaranteed to not create any duplicates. - candidates.append(&mut Self::members_ids()); - // previous runners_up are also always candidates for the next round. - candidates.append(&mut Self::runners_up_ids()); - - if candidates.len().is_zero() { - Self::deposit_event(RawEvent::EmptyTerm); - return; + let mut candidates_and_deposit = Self::candidates(); + // add all the previous members and runners-up as candidates as well. + candidates_and_deposit.append(&mut Self::implicit_candidates_with_deposit()); + + if candidates_and_deposit.len().is_zero() { + Self::deposit_event(Event::EmptyTerm); + return T::DbWeight::get().reads(5) } + // All of the new winners that come out of phragmen will thus have a deposit recorded. + let candidate_ids = + candidates_and_deposit.iter().map(|(x, _)| x).cloned().collect::>(); + // helper closures to deal with balance/stake. let total_issuance = T::Currency::total_issuance(); let to_votes = |b: BalanceOf| T::CurrencyToVote::to_vote(b, total_issuance); let to_balance = |e: ExtendedBalance| T::CurrencyToVote::to_currency(e, total_issuance); + let mut num_edges: u32 = 0; // used for prime election. let voters_and_stakes = Voting::::iter() - .map(|(voter, (stake, votes))| (voter, stake, votes)) + .map(|(voter, Voter { stake, votes, .. })| (voter, stake, votes)) .collect::>(); // used for phragmen. - let voters_and_votes = voters_and_stakes.iter() + let voters_and_votes = voters_and_stakes + .iter() .cloned() - .map(|(voter, stake, votes)| { (voter, to_votes(stake), votes)} ) + .map(|(voter, stake, votes)| { + num_edges = num_edges.saturating_add(votes.len() as u32); + (voter, to_votes(stake), votes) + }) .collect::>(); + let weight_candidates = candidates_and_deposit.len() as u32; + let weight_voters = voters_and_votes.len() as u32; + let weight_edges = num_edges; let _ = sp_npos_elections::seq_phragmen::( num_to_elect, - candidates, + candidate_ids, voters_and_votes.clone(), None, - ).map(|ElectionResult { winners, assignments: _ }| { - let old_members_ids = >::take().into_iter() - .map(|(m, _)| m) - .collect::>(); - let old_runners_up_ids = >::take().into_iter() - .map(|(r, _)| r) - .collect::>(); + ) + .map(|ElectionResult { winners, assignments: _ }| { + // this is already sorted by id. + let old_members_ids_sorted = + >::take().into_iter().map(|m| m.who).collect::>(); + // this one needs a sort by id. + let mut old_runners_up_ids_sorted = + >::take().into_iter().map(|r| r.who).collect::>(); + old_runners_up_ids_sorted.sort(); // filter out those who end up with no backing stake. - let new_set_with_stake = winners + let mut new_set_with_stake = winners .into_iter() .filter_map(|(m, b)| if b.is_zero() { None } else { Some((m, to_balance(b))) }) .collect::)>>(); - // OPTIMISATION NOTE: we could bail out here if `new_set.len() == 0`. There isn't much - // left to do. Yet, re-arranging the code would require duplicating the slashing of - // exposed candidates, cleaning any previous members, and so on. For now, in favour of - // readability and veracity, we keep it simple. + // OPTIMIZATION NOTE: we could bail out here if `new_set.len() == 0`. There isn't + // much left to do. Yet, re-arranging the code would require duplicating the + // slashing of exposed candidates, cleaning any previous members, and so on. For + // now, in favor of readability and veracity, we keep it simple. // split new set into winners and runners up. let split_point = desired_seats.min(new_set_with_stake.len()); - let mut new_members = (&new_set_with_stake[..split_point]).to_vec(); - - // save the runners up as-is. They are sorted based on desirability. - // save the members, sorted based on account id. - new_members.sort_by(|i, j| i.0.cmp(&j.0)); - - // Now we select a prime member using a [Borda count](https://en.wikipedia.org/wiki/Borda_count). - // We weigh everyone's vote for that new member by a multiplier based on the order - // of the votes. i.e. the first person a voter votes for gets a 16x multiplier, - // the next person gets a 15x multiplier, an so on... (assuming `MAXIMUM_VOTE` = 16) - let mut prime_votes: Vec<_> = new_members.iter().map(|c| (&c.0, BalanceOf::::zero())).collect(); + let mut new_members_sorted_by_id = + new_set_with_stake.drain(..split_point).collect::>(); + new_members_sorted_by_id.sort_by(|i, j| i.0.cmp(&j.0)); + + // all the rest will be runners-up + new_set_with_stake.reverse(); + let new_runners_up_sorted_by_rank = new_set_with_stake; + let mut new_runners_up_ids_sorted = + new_runners_up_sorted_by_rank.iter().map(|(r, _)| r.clone()).collect::>(); + new_runners_up_ids_sorted.sort(); + + // Now we select a prime member using a [Borda + // count](https://en.wikipedia.org/wiki/Borda_count). We weigh everyone's vote for + // that new member by a multiplier based on the order of the votes. i.e. the first + // person a voter votes for gets a 16x multiplier, the next person gets a 15x + // multiplier, an so on... (assuming `MAXIMUM_VOTE` = 16) + let mut prime_votes = new_members_sorted_by_id + .iter() + .map(|c| (&c.0, BalanceOf::::zero())) + .collect::>(); for (_, stake, votes) in voters_and_stakes.into_iter() { - for (vote_multiplier, who) in votes.iter() + for (vote_multiplier, who) in votes + .iter() .enumerate() .map(|(vote_position, who)| ((MAXIMUM_VOTE - vote_position) as u32, who)) { if let Ok(i) = prime_votes.binary_search_by_key(&who, |k| k.0) { - prime_votes[i].1 = prime_votes[i].1.saturating_add( - stake.saturating_mul(vote_multiplier.into()) - ); + prime_votes[i].1 = prime_votes[i] + .1 + .saturating_add(stake.saturating_mul(vote_multiplier.into())); } } } - // We then select the new member with the highest weighted stake. In the case of - // a tie, the last person in the list with the tied score is selected. This is - // the person with the "highest" account id based on the sort above. + // We then select the new member with the highest weighted stake. In the case of a tie, + // the last person in the list with the tied score is selected. This is the person with + // the "highest" account id based on the sort above. let prime = prime_votes.into_iter().max_by_key(|x| x.1).map(|x| x.0.clone()); - // new_members_ids is sorted by account id. - let new_members_ids = new_members + // new_members_sorted_by_id is sorted by account id. + let new_members_ids_sorted = new_members_sorted_by_id .iter() .map(|(m, _)| m.clone()) .collect::>(); - let new_runners_up = &new_set_with_stake[split_point..] - .into_iter() - .cloned() - .rev() - .collect::)>>(); - // new_runners_up remains sorted by desirability. - let new_runners_up_ids = new_runners_up - .iter() - .map(|(r, _)| r.clone()) - .collect::>(); - // report member changes. We compute diff because we need the outgoing list. - let (incoming, outgoing) = T::ChangeMembers::compute_members_diff( - &new_members_ids, - &old_members_ids, - ); - T::ChangeMembers::change_members_sorted( - &incoming, - &outgoing, - &new_members_ids, + let (incoming, outgoing) = T::ChangeMembers::compute_members_diff_sorted( + &new_members_ids_sorted, + &old_members_ids_sorted, ); + T::ChangeMembers::change_members_sorted(&incoming, &outgoing, &new_members_ids_sorted); T::ChangeMembers::set_prime(prime); - // outgoing candidates lose their bond. - let mut to_burn_bond = outgoing.to_vec(); - - // compute the outgoing of runners up as well and append them to the `to_burn_bond` - { - let (_, outgoing) = T::ChangeMembers::compute_members_diff( - &new_runners_up_ids, - &old_runners_up_ids, - ); - to_burn_bond.extend(outgoing); - } - - // Burn loser bond. members list is sorted. O(NLogM) (N candidates, M members) - // runner up list is not sorted. O(K*N) given K runner ups. Overall: O(NLogM + N*K) - // both the member and runner counts are bounded. - exposed_candidates.into_iter().for_each(|c| { - // any candidate who is not a member and not a runner up. - if new_members.binary_search_by_key(&c, |(m, _)| m.clone()).is_err() - && !new_runners_up_ids.contains(&c) + // All candidates/members/runners-up who are no longer retaining a position as a + // seat holder will lose their bond. + candidates_and_deposit.iter().for_each(|(c, d)| { + if new_members_ids_sorted.binary_search(c).is_err() && + new_runners_up_ids_sorted.binary_search(c).is_err() { - let (imbalance, _) = T::Currency::slash_reserved(&c, T::CandidacyBond::get()); + let (imbalance, _) = T::Currency::slash_reserved(c, *d); T::LoserCandidate::on_unbalanced(imbalance); + Self::deposit_event(Event::CandidateSlashed(c.clone(), *d)); } }); - // Burn outgoing bonds - to_burn_bond.into_iter().for_each(|x| { - let (imbalance, _) = T::Currency::slash_reserved(&x, T::CandidacyBond::get()); - T::LoserCandidate::on_unbalanced(imbalance); - }); - - >::put(&new_members); - >::put(new_runners_up); - - Self::deposit_event(RawEvent::NewTerm(new_members.clone().to_vec())); + // write final values to storage. + let deposit_of_candidate = |x: &T::AccountId| -> BalanceOf { + // defensive-only. This closure is used against the new members and new runners-up, + // both of which are phragmen winners and thus must have deposit. + candidates_and_deposit + .iter() + .find_map(|(c, d)| if c == x { Some(*d) } else { None }) + .unwrap_or_default() + }; + // fetch deposits from the one recorded one. This will make sure that a candidate who + // submitted candidacy before a change to candidacy deposit will have the correct amount + // recorded. + >::put( + new_members_sorted_by_id + .iter() + .map(|(who, stake)| SeatHolder { + deposit: deposit_of_candidate(&who), + who: who.clone(), + stake: stake.clone(), + }) + .collect::>(), + ); + >::put( + new_runners_up_sorted_by_rank + .into_iter() + .map(|(who, stake)| SeatHolder { + deposit: deposit_of_candidate(&who), + who, + stake, + }) + .collect::>(), + ); // clean candidates. >::kill(); - ElectionRounds::mutate(|v| *v += 1); - }).map_err(|e| { - frame_support::debug::error!("elections-phragmen: failed to run election [{:?}].", e); - Self::deposit_event(RawEvent::ElectionError); + Self::deposit_event(Event::NewTerm(new_members_sorted_by_id)); + >::mutate(|v| *v += 1); + }) + .map_err(|e| { + log::error!( + target: "runtime::elections-phragmen", + "Failed to run election [{:?}].", + e, + ); + Self::deposit_event(Event::ElectionError); }); + + T::WeightInfo::election_phragmen(weight_candidates, weight_voters, weight_edges) } } -impl Contains for Module { +impl Contains for Pallet { fn contains(who: &T::AccountId) -> bool { Self::is_member(who) } - fn sorted_members() -> Vec { Self::members_ids() } +} + +impl SortedMembers for Pallet { + fn contains(who: &T::AccountId) -> bool { + Self::is_member(who) + } + + fn sorted_members() -> Vec { + Self::members_ids() + } // A special function to populate members in this pallet for passing Origin // checks in runtime benchmarking. #[cfg(feature = "runtime-benchmarks")] fn add(who: &T::AccountId) { - Members::::mutate(|members| { - match members.binary_search_by(|(a, _b)| a.cmp(who)) { - Ok(_) => (), - Err(pos) => members.insert(pos, (who.clone(), BalanceOf::::default())), - } + Members::::mutate(|members| match members.binary_search_by(|m| m.who.cmp(who)) { + Ok(_) => (), + Err(pos) => members.insert(pos, SeatHolder { who: who.clone(), ..Default::default() }), }) } } -impl ContainsLengthBound for Module { - fn min_len() -> usize { 0 } +impl ContainsLengthBound for Pallet { + fn min_len() -> usize { + 0 + } /// Implementation uses a parameter type so calling is cost-free. fn max_len() -> usize { - Self::desired_members() as usize + T::DesiredMembers::get() as usize } } #[cfg(test)] mod tests { use super::*; - use std::cell::RefCell; - use frame_support::{assert_ok, assert_noop, assert_err_with_weight, parameter_types, - weights::Weight, + use crate as elections_phragmen; + use frame_support::{ + assert_noop, assert_ok, dispatch::DispatchResultWithPostInfo, parameter_types, + traits::OnInitialize, }; - use substrate_test_utils::assert_eq_uvec; + use frame_system::ensure_signed; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, BuildStorage, DispatchResult, - traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; - use crate as elections_phragmen; + use substrate_test_utils::assert_eq_uvec; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Test { - type BaseCallFilter = (); + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -1079,69 +1133,41 @@ mod tests { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u64; type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Module; + type AccountStore = frame_system::Pallet; type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type WeightInfo = (); } - parameter_types! { - pub const CandidacyBond: u64 = 3; - } - - thread_local! { - static VOTING_BOND: RefCell = RefCell::new(2); - static DESIRED_MEMBERS: RefCell = RefCell::new(2); - static DESIRED_RUNNERS_UP: RefCell = RefCell::new(2); - static TERM_DURATION: RefCell = RefCell::new(5); - } - - pub struct VotingBond; - impl Get for VotingBond { - fn get() -> u64 { VOTING_BOND.with(|v| *v.borrow()) } - } - - pub struct DesiredMembers; - impl Get for DesiredMembers { - fn get() -> u32 { DESIRED_MEMBERS.with(|v| *v.borrow()) } - } - - pub struct DesiredRunnersUp; - impl Get for DesiredRunnersUp { - fn get() -> u32 { DESIRED_RUNNERS_UP.with(|v| *v.borrow()) } - } - - pub struct TermDuration; - impl Get for TermDuration { - fn get() -> u64 { TERM_DURATION.with(|v| *v.borrow()) } - } - - thread_local! { - pub static MEMBERS: RefCell> = RefCell::new(vec![]); - pub static PRIME: RefCell> = RefCell::new(None); + frame_support::parameter_types! { + pub static VotingBondBase: u64 = 2; + pub static VotingBondFactor: u64 = 0; + pub static CandidacyBond: u64 = 3; + pub static DesiredMembers: u32 = 2; + pub static DesiredRunnersUp: u32 = 0; + pub static TermDuration: u64 = 5; + pub static Members: Vec = vec![]; + pub static Prime: Option = None; } pub struct TestChangeMembers; @@ -1182,27 +1208,31 @@ mod tests { fn set_prime(who: Option) { PRIME.with(|p| *p.borrow_mut() = who); } + + fn get_prime() -> Option { + PRIME.with(|p| *p.borrow()) + } } - parameter_types!{ - pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect"; + parameter_types! { + pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } - impl Trait for Test { - type ModuleId = ElectionsPhragmenModuleId; + impl Config for Test { + type PalletId = ElectionsPhragmenPalletId; type Event = Event; type Currency = Balances; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type ChangeMembers = TestChangeMembers; type InitializeMembers = (); type CandidacyBond = CandidacyBond; - type VotingBond = VotingBond; + type VotingBondBase = VotingBondBase; + type VotingBondFactor = VotingBondFactor; type TermDuration = TermDuration; type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type LoserCandidate = (); type KickedMember = (); - type BadReport = (); type WeightInfo = (); } @@ -1215,90 +1245,118 @@ mod tests { NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Module, Call, Event}, - Balances: pallet_balances::{Module, Call, Event, Config}, - Elections: elections_phragmen::{Module, Call, Event, Config}, + System: frame_system::{Pallet, Call, Event}, + Balances: pallet_balances::{Pallet, Call, Event, Config}, + Elections: elections_phragmen::{Pallet, Call, Event, Config}, } ); pub struct ExtBuilder { - genesis_members: Vec<(u64, u64)>, balance_factor: u64, - voter_bond: u64, - term_duration: u64, - desired_runners_up: u32, - desired_members: u32, + genesis_members: Vec<(u64, u64)>, } impl Default for ExtBuilder { fn default() -> Self { - Self { - genesis_members: vec![], - balance_factor: 1, - voter_bond: 2, - term_duration: 5, - desired_runners_up: 0, - desired_members: 2, - } + Self { balance_factor: 1, genesis_members: vec![] } } } impl ExtBuilder { - pub fn voter_bond(mut self, fee: u64) -> Self { - self.voter_bond = fee; + pub fn voter_bond(self, bond: u64) -> Self { + VOTING_BOND_BASE.with(|v| *v.borrow_mut() = bond); + self + } + pub fn voter_bond_factor(self, bond: u64) -> Self { + VOTING_BOND_FACTOR.with(|v| *v.borrow_mut() = bond); self } - pub fn desired_runners_up(mut self, count: u32) -> Self { - self.desired_runners_up = count; + pub fn desired_runners_up(self, count: u32) -> Self { + DESIRED_RUNNERS_UP.with(|v| *v.borrow_mut() = count); self } - pub fn term_duration(mut self, duration: u64) -> Self { - self.term_duration = duration; + pub fn term_duration(self, duration: u64) -> Self { + TERM_DURATION.with(|v| *v.borrow_mut() = duration); self } pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self { + MEMBERS.with(|m| { + *m.borrow_mut() = members.iter().map(|(m, _)| m.clone()).collect::>() + }); self.genesis_members = members; self } - #[cfg(feature = "runtime-benchmarks")] - pub fn desired_members(mut self, count: u32) -> Self { - self.desired_members = count; + pub fn desired_members(self, count: u32) -> Self { + DESIRED_MEMBERS.with(|m| *m.borrow_mut() = count); self } pub fn balance_factor(mut self, factor: u64) -> Self { self.balance_factor = factor; self } - fn set_constants(&self) { - VOTING_BOND.with(|v| *v.borrow_mut() = self.voter_bond); - TERM_DURATION.with(|v| *v.borrow_mut() = self.term_duration); - DESIRED_RUNNERS_UP.with(|v| *v.borrow_mut() = self.desired_runners_up); - DESIRED_MEMBERS.with(|m| *m.borrow_mut() = self.desired_members); - MEMBERS.with(|m| *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>()); - } pub fn build_and_execute(self, test: impl FnOnce() -> ()) { - self.set_constants(); + MEMBERS.with(|m| { + *m.borrow_mut() = + self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>() + }); let mut ext: sp_io::TestExternalities = GenesisConfig { - pallet_balances: Some(pallet_balances::GenesisConfig::{ + balances: pallet_balances::GenesisConfig:: { balances: vec![ (1, 10 * self.balance_factor), (2, 20 * self.balance_factor), (3, 30 * self.balance_factor), (4, 40 * self.balance_factor), (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor) + (6, 60 * self.balance_factor), ], - }), - elections_phragmen: Some(elections_phragmen::GenesisConfig:: { - members: self.genesis_members - }), - }.build_storage().unwrap().into(); + }, + elections: elections_phragmen::GenesisConfig:: { + members: self.genesis_members, + }, + } + .build_storage() + .unwrap() + .into(); ext.execute_with(pre_conditions); ext.execute_with(test); ext.execute_with(post_conditions) } } + fn candidate_ids() -> Vec { + Elections::candidates().into_iter().map(|(c, _)| c).collect::>() + } + + fn candidate_deposit(who: &u64) -> u64 { + Elections::candidates() + .into_iter() + .find_map(|(c, d)| if c == *who { Some(d) } else { None }) + .unwrap_or_default() + } + + fn voter_deposit(who: &u64) -> u64 { + Elections::voting(who).deposit + } + + fn runners_up_ids() -> Vec { + Elections::runners_up().into_iter().map(|r| r.who).collect::>() + } + + fn members_ids() -> Vec { + Elections::members_ids() + } + + fn members_and_stake() -> Vec<(u64, u64)> { + Elections::members().into_iter().map(|m| (m.who, m.stake)).collect::>() + } + + fn runners_up_and_stake() -> Vec<(u64, u64)> { + Elections::runners_up() + .into_iter() + .map(|r| (r.who, r.stake)) + .collect::>() + } + fn all_voters() -> Vec { Voting::::iter().map(|(v, _)| v).collect::>() } @@ -1308,9 +1366,14 @@ mod tests { } fn has_lock(who: &u64) -> u64 { - let lock = Balances::locks(who)[0].clone(); - assert_eq!(lock.id, ElectionsPhragmenModuleId::get()); - lock.amount + Balances::locks(who) + .get(0) + .cloned() + .map(|lock| { + assert_eq!(lock.id, ElectionsPhragmenPalletId::get()); + lock.amount + }) + .unwrap_or_default() } fn intersects(a: &[T], b: &[T]) -> bool { @@ -1319,41 +1382,41 @@ mod tests { fn ensure_members_sorted() { let mut members = Elections::members().clone(); - members.sort(); + members.sort_by_key(|m| m.who); assert_eq!(Elections::members(), members); } fn ensure_candidates_sorted() { let mut candidates = Elections::candidates().clone(); - candidates.sort(); + candidates.sort_by_key(|(c, _)| *c); assert_eq!(Elections::candidates(), candidates); } fn locked_stake_of(who: &u64) -> u64 { - Voting::::get(who).0 + Voting::::get(who).stake } fn ensure_members_has_approval_stake() { // we filter members that have no approval state. This means that even we have more seats // than candidates, we will never ever chose a member with no votes. - assert!( - Elections::members().iter().chain( - Elections::runners_up().iter() - ).all(|(_, s)| *s != u64::zero()) - ); + assert!(Elections::members() + .iter() + .chain(Elections::runners_up().iter()) + .all(|s| s.stake != u64::zero())); } fn ensure_member_candidates_runners_up_disjoint() { // members, candidates and runners-up must always be disjoint sets. - assert!(!intersects(&Elections::members_ids(), &Elections::candidates())); - assert!(!intersects(&Elections::members_ids(), &Elections::runners_up_ids())); - assert!(!intersects(&Elections::candidates(), &Elections::runners_up_ids())); + assert!(!intersects(&members_ids(), &candidate_ids())); + assert!(!intersects(&members_ids(), &runners_up_ids())); + assert!(!intersects(&candidate_ids(), &runners_up_ids())); } fn pre_conditions() { System::set_block_number(1); ensure_members_sorted(); ensure_candidates_sorted(); + ensure_member_candidates_runners_up_disjoint(); } fn post_conditions() { @@ -1363,11 +1426,11 @@ mod tests { ensure_members_has_approval_stake(); } - fn submit_candidacy(origin: Origin) -> DispatchResult { + fn submit_candidacy(origin: Origin) -> DispatchResultWithPostInfo { Elections::submit_candidacy(origin, Elections::candidates().len() as u32) } - fn vote(origin: Origin, votes: Vec, stake: u64) -> DispatchResult { + fn vote(origin: Origin, votes: Vec, stake: u64) -> DispatchResultWithPostInfo { // historical note: helper function was created in a period of time in which the API of vote // call was changing. Currently it is a wrapper for the original call and does not do much. // Nonetheless, totally harmless. @@ -1376,28 +1439,24 @@ mod tests { } fn votes_of(who: &u64) -> Vec { - Voting::::get(who).1 - } - - fn defunct_for(who: u64) -> DefunctVoter { - DefunctVoter { - who, - candidate_count: Elections::candidates().len() as u32, - vote_count: votes_of(&who).len() as u32 - } + Voting::::get(who).votes } #[test] fn params_should_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::desired_members(), 2); - assert_eq!(Elections::term_duration(), 5); + assert_eq!(::DesiredMembers::get(), 2); + assert_eq!(::DesiredRunnersUp::get(), 0); + assert_eq!(::VotingBondBase::get(), 2); + assert_eq!(::VotingBondFactor::get(), 0); + assert_eq!(::CandidacyBond::get(), 3); + assert_eq!(::TermDuration::get(), 5); assert_eq!(Elections::election_rounds(), 0); assert!(Elections::members().is_empty()); assert!(Elections::runners_up().is_empty()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); assert_eq!(>::decode_len(), None); assert!(Elections::is_candidate(&1).is_err()); @@ -1408,36 +1467,88 @@ mod tests { #[test] fn genesis_members_should_work() { - ExtBuilder::default().genesis_members(vec![(1, 10), (2, 20)]).build_and_execute(|| { - System::set_block_number(1); - assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); + ExtBuilder::default() + .genesis_members(vec![(1, 10), (2, 20)]) + .build_and_execute(|| { + System::set_block_number(1); + assert_eq!( + Elections::members(), + vec![ + SeatHolder { who: 1, stake: 10, deposit: 0 }, + SeatHolder { who: 2, stake: 20, deposit: 0 } + ] + ); - assert_eq!(Elections::voting(1), (10, vec![1])); - assert_eq!(Elections::voting(2), (20, vec![2])); + assert_eq!( + Elections::voting(1), + Voter { stake: 10u64, votes: vec![1], deposit: 0 } + ); + assert_eq!( + Elections::voting(2), + Voter { stake: 20u64, votes: vec![2], deposit: 0 } + ); - // they will persist since they have self vote. - System::set_block_number(5); - Elections::end_block(System::block_number()); + // they will persist since they have self vote. + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![1, 2]); - }) + assert_eq!(members_ids(), vec![1, 2]); + }) + } + + #[test] + fn genesis_voters_can_remove_lock() { + ExtBuilder::default() + .genesis_members(vec![(1, 10), (2, 20)]) + .build_and_execute(|| { + System::set_block_number(1); + + assert_eq!( + Elections::voting(1), + Voter { stake: 10u64, votes: vec![1], deposit: 0 } + ); + assert_eq!( + Elections::voting(2), + Voter { stake: 20u64, votes: vec![2], deposit: 0 } + ); + + assert_ok!(Elections::remove_voter(Origin::signed(1))); + assert_ok!(Elections::remove_voter(Origin::signed(2))); + + assert_eq!(Elections::voting(1), Default::default()); + assert_eq!(Elections::voting(2), Default::default()); + }) } #[test] fn genesis_members_unsorted_should_work() { - ExtBuilder::default().genesis_members(vec![(2, 20), (1, 10)]).build_and_execute(|| { - System::set_block_number(1); - assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); + ExtBuilder::default() + .genesis_members(vec![(2, 20), (1, 10)]) + .build_and_execute(|| { + System::set_block_number(1); + assert_eq!( + Elections::members(), + vec![ + SeatHolder { who: 1, stake: 10, deposit: 0 }, + SeatHolder { who: 2, stake: 20, deposit: 0 }, + ] + ); - assert_eq!(Elections::voting(1), (10, vec![1])); - assert_eq!(Elections::voting(2), (20, vec![2])); + assert_eq!( + Elections::voting(1), + Voter { stake: 10u64, votes: vec![1], deposit: 0 } + ); + assert_eq!( + Elections::voting(2), + Voter { stake: 20u64, votes: vec![2], deposit: 0 } + ); - // they will persist since they have self vote. - System::set_block_number(5); - Elections::end_block(System::block_number()); + // they will persist since they have self vote. + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![1, 2]); - }) + assert_eq!(members_ids(), vec![1, 2]); + }) } #[test] @@ -1450,50 +1561,47 @@ mod tests { } #[test] - #[should_panic] - fn genesis_members_cannot_over_stake_1() { - // 10 cannot reserve 20 as voting bond and extra genesis will panic. + #[should_panic = "Duplicate member in elections-phragmen genesis: 2"] + fn genesis_members_cannot_be_duplicate() { ExtBuilder::default() - .voter_bond(20) - .genesis_members(vec![(1, 10), (2, 20)]) + .desired_members(3) + .genesis_members(vec![(1, 10), (2, 10), (2, 10)]) .build_and_execute(|| {}); } #[test] - #[should_panic = "Duplicate member in elections phragmen genesis: 2"] - fn genesis_members_cannot_be_duplicate() { + #[should_panic = "Cannot accept more than DesiredMembers genesis member"] + fn genesis_members_cannot_too_many() { ExtBuilder::default() - .genesis_members(vec![(1, 10), (2, 10), (2, 10)]) + .genesis_members(vec![(1, 10), (2, 10), (3, 30)]) + .desired_members(2) .build_and_execute(|| {}); } #[test] fn term_duration_zero_is_passive() { - ExtBuilder::default() - .term_duration(0) - .build_and_execute(|| - { - assert_eq!(Elections::term_duration(), 0); - assert_eq!(Elections::desired_members(), 2); + ExtBuilder::default().term_duration(0).build_and_execute(|| { + assert_eq!(::TermDuration::get(), 0); + assert_eq!(::DesiredMembers::get(), 2); assert_eq!(Elections::election_rounds(), 0); - assert!(Elections::members_ids().is_empty()); + assert!(members_ids().is_empty()); assert!(Elections::runners_up().is_empty()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert!(Elections::members_ids().is_empty()); + assert!(members_ids().is_empty()); assert!(Elections::runners_up().is_empty()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); }); } #[test] fn simple_candidate_submission_should_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert!(Elections::is_candidate(&1).is_err()); assert!(Elections::is_candidate(&2).is_err()); @@ -1501,7 +1609,7 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(1))); assert_eq!(balances(&1), (7, 3)); - assert_eq!(Elections::candidates(), vec![1]); + assert_eq!(candidate_ids(), vec![1]); assert!(Elections::is_candidate(&1).is_ok()); assert!(Elections::is_candidate(&2).is_err()); @@ -1510,50 +1618,68 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(2))); assert_eq!(balances(&2), (17, 3)); - assert_eq!(Elections::candidates(), vec![1, 2]); + assert_eq!(candidate_ids(), vec![1, 2]); assert!(Elections::is_candidate(&1).is_ok()); assert!(Elections::is_candidate(&2).is_ok()); + + assert_eq!(candidate_deposit(&1), 3); + assert_eq!(candidate_deposit(&2), 3); + assert_eq!(candidate_deposit(&3), 0); }); } #[test] - fn simple_candidate_submission_with_no_votes_should_work() { + fn updating_candidacy_bond_works() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - - assert_ok!(submit_candidacy(Origin::signed(1))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_eq!(Elections::candidates(), vec![(5, 3)]); - assert!(Elections::is_candidate(&1).is_ok()); - assert!(Elections::is_candidate(&2).is_ok()); - assert_eq!(Elections::candidates(), vec![1, 2]); + // a runtime upgrade changes the bond. + CANDIDACY_BOND.with(|v| *v.borrow_mut() = 4); - assert!(Elections::members_ids().is_empty()); - assert!(Elections::runners_up().is_empty()); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_eq!(Elections::candidates(), vec![(4, 4), (5, 3)]); + // once elected, they each hold their candidacy bond, no more. System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert!(Elections::is_candidate(&1).is_err()); - assert!(Elections::is_candidate(&2).is_err()); - assert!(Elections::candidates().is_empty()); + assert_eq!( + Elections::members(), + vec![ + SeatHolder { who: 4, stake: 40, deposit: 4 }, + SeatHolder { who: 5, stake: 50, deposit: 3 }, + ] + ); + }) + } - assert!(Elections::members_ids().is_empty()); - assert!(Elections::runners_up().is_empty()); + #[test] + fn candidates_are_always_sorted() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(candidate_ids(), Vec::::new()); + + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_eq!(candidate_ids(), vec![3]); + assert_ok!(submit_candidacy(Origin::signed(1))); + assert_eq!(candidate_ids(), vec![1, 3]); + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_eq!(candidate_ids(), vec![1, 2, 3]); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_eq!(candidate_ids(), vec![1, 2, 3, 4]); }); } #[test] fn dupe_candidate_submission_should_not_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert_ok!(submit_candidacy(Origin::signed(1))); - assert_eq!(Elections::candidates(), vec![1]); - assert_noop!( - submit_candidacy(Origin::signed(1)), - Error::::DuplicatedCandidate, - ); + assert_eq!(candidate_ids(), vec![1]); + assert_noop!(submit_candidacy(Origin::signed(1)), Error::::DuplicatedCandidate); }); } @@ -1565,16 +1691,13 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![5], 20)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![5]); + assert_eq!(members_ids(), vec![5]); assert!(Elections::runners_up().is_empty()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); - assert_noop!( - submit_candidacy(Origin::signed(5)), - Error::::MemberSubmit, - ); + assert_noop!(submit_candidacy(Origin::signed(5)), Error::::MemberSubmit); }); } @@ -1589,22 +1712,19 @@ mod tests { assert_ok!(vote(Origin::signed(1), vec![3], 10)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); - assert_noop!( - submit_candidacy(Origin::signed(3)), - Error::::RunnerSubmit, - ); + assert_noop!(submit_candidacy(Origin::signed(3)), Error::::RunnerUpSubmit); }); } #[test] fn poor_candidate_submission_should_not_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert_noop!( submit_candidacy(Origin::signed(7)), Error::::InsufficientCandidateFunds, @@ -1615,7 +1735,7 @@ mod tests { #[test] fn simple_voting_should_work() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert_eq!(balances(&2), (20, 0)); assert_ok!(submit_candidacy(Origin::signed(5))); @@ -1629,7 +1749,7 @@ mod tests { #[test] fn can_vote_with_custom_stake() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(candidate_ids(), Vec::::new()); assert_eq!(balances(&2), (20, 0)); assert_ok!(submit_candidacy(Origin::signed(5))); @@ -1661,13 +1781,78 @@ mod tests { }); } + #[test] + fn updated_voting_bond_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + + assert_eq!(balances(&2), (20, 0)); + assert_ok!(vote(Origin::signed(2), vec![5], 5)); + assert_eq!(balances(&2), (18, 2)); + assert_eq!(voter_deposit(&2), 2); + + // a runtime upgrade lowers the voting bond to 1. This guy still un-reserves 2 when + // leaving. + VOTING_BOND_BASE.with(|v| *v.borrow_mut() = 1); + + // proof that bond changed. + assert_eq!(balances(&1), (10, 0)); + assert_ok!(vote(Origin::signed(1), vec![5], 5)); + assert_eq!(balances(&1), (9, 1)); + assert_eq!(voter_deposit(&1), 1); + + assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_eq!(balances(&2), (20, 0)); + }) + } + + #[test] + fn voting_reserves_bond_per_vote() { + ExtBuilder::default().voter_bond_factor(1).build_and_execute(|| { + assert_eq!(balances(&2), (20, 0)); + + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + + // initial vote. + assert_ok!(vote(Origin::signed(2), vec![5], 10)); + + // 2 + 1 + assert_eq!(balances(&2), (17, 3)); + assert_eq!(Elections::voting(&2).deposit, 3); + assert_eq!(has_lock(&2), 10); + assert_eq!(locked_stake_of(&2), 10); + + // can update; different stake; different lock and reserve. + assert_ok!(vote(Origin::signed(2), vec![5, 4], 15)); + // 2 + 2 + assert_eq!(balances(&2), (16, 4)); + assert_eq!(Elections::voting(&2).deposit, 4); + assert_eq!(has_lock(&2), 15); + assert_eq!(locked_stake_of(&2), 15); + + // stay at two votes with different stake. + assert_ok!(vote(Origin::signed(2), vec![5, 3], 18)); + // 2 + 2 + assert_eq!(balances(&2), (16, 4)); + assert_eq!(Elections::voting(&2).deposit, 4); + assert_eq!(has_lock(&2), 18); + assert_eq!(locked_stake_of(&2), 18); + + // back to 1 vote. + assert_ok!(vote(Origin::signed(2), vec![4], 12)); + // 2 + 1 + assert_eq!(balances(&2), (17, 3)); + assert_eq!(Elections::voting(&2).deposit, 3); + assert_eq!(has_lock(&2), 12); + assert_eq!(locked_stake_of(&2), 12); + }); + } + #[test] fn cannot_vote_for_no_candidate() { ExtBuilder::default().build_and_execute(|| { - assert_noop!( - vote(Origin::signed(2), vec![], 20), - Error::::NoVotes, - ); + assert_noop!(vote(Origin::signed(2), vec![], 20), Error::::NoVotes); }); } @@ -1677,65 +1862,107 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(5))); assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(vote(Origin::signed(2), vec![4, 5], 20)); + assert_ok!(vote(Origin::signed(2), vec![4, 5], 20)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![4, 5]); + assert!(candidate_ids().is_empty()); + + assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); + }); + } + + #[test] + fn prime_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + + assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); + assert_ok!(vote(Origin::signed(2), vec![4], 20)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![4, 5]); + assert!(candidate_ids().is_empty()); + + assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); + assert_eq!(PRIME.with(|p| *p.borrow()), Some(4)); + }); + } + + #[test] + fn prime_votes_for_exiting_members_are_removed() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(5))); + + assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); + assert_ok!(vote(Origin::signed(2), vec![4], 20)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::candidates().is_empty()); + assert_eq!(members_ids(), vec![3, 5]); + assert!(candidate_ids().is_empty()); - assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); + assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); }); } #[test] - fn prime_works() { + fn prime_is_kept_if_other_members_leave() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(3))); assert_ok!(submit_candidacy(Origin::signed(4))); assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); - assert_ok!(vote(Origin::signed(2), vec![4], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); assert_ok!(vote(Origin::signed(4), vec![4], 40)); assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::candidates().is_empty()); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); - assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); - assert_eq!(PRIME.with(|p| *p.borrow()), Some(4)); - }); + assert_eq!(members_ids(), vec![5]); + assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); + }) } #[test] - fn prime_votes_for_exiting_members_are_removed() { + fn prime_is_gone_if_renouncing() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(3))); assert_ok!(submit_candidacy(Origin::signed(4))); assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); - assert_ok!(vote(Origin::signed(2), vec![4], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); assert_ok!(vote(Origin::signed(4), vec![4], 40)); assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); - System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![3, 5]); - assert!(Elections::candidates().is_empty()); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); - }); + assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Member)); + + assert_eq!(members_ids(), vec![4]); + assert_eq!(PRIME.with(|p| *p.borrow()), None); + }) } #[test] @@ -1743,35 +1970,34 @@ mod tests { ExtBuilder::default() .desired_runners_up(1) .balance_factor(10) - .build_and_execute( - || { - // when we have only candidates - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - - assert_noop!( - // content of the vote is irrelevant. - vote(Origin::signed(1), vec![9, 99, 999, 9999], 5), - Error::::TooManyVotes, - ); + .build_and_execute(|| { + // when we have only candidates + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + + assert_noop!( + // content of the vote is irrelevant. + vote(Origin::signed(1), vec![9, 99, 999, 9999], 5), + Error::::TooManyVotes, + ); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); - System::set_block_number(5); - Elections::end_block(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - // now we have 2 members, 1 runner-up, and 1 new candidate - assert_ok!(submit_candidacy(Origin::signed(2))); + // now we have 2 members, 1 runner-up, and 1 new candidate + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(Origin::signed(1), vec![9, 99, 999, 9999], 5)); - assert_noop!( - vote(Origin::signed(1), vec![9, 99, 999, 9_999, 99_999], 5), - Error::::TooManyVotes, - ); - }); + assert_ok!(vote(Origin::signed(1), vec![9, 99, 999, 9999], 5)); + assert_noop!( + vote(Origin::signed(1), vec![9, 99, 999, 9_999, 99_999], 5), + Error::::TooManyVotes, + ); + }); } #[test] @@ -1780,10 +2006,7 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(5))); assert_ok!(submit_candidacy(Origin::signed(4))); - assert_noop!( - vote(Origin::signed(2), vec![4], 1), - Error::::LowBalance, - ); + assert_noop!(vote(Origin::signed(2), vec![4], 1), Error::::LowBalance); }) } @@ -1859,172 +2082,9 @@ mod tests { assert_ok!(Elections::remove_voter(Origin::signed(4))); System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![3, 5]); - }); - } - - #[test] - fn reporter_must_be_voter() { - ExtBuilder::default().build_and_execute(|| { - assert_noop!( - Elections::report_defunct_voter(Origin::signed(1), defunct_for(2)), - Error::::MustBeVoter, - ); - }); - } - - #[test] - fn reporter_must_provide_lengths() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - - // both are defunct. - assert_ok!(vote(Origin::signed(5), vec![99, 999, 9999], 50)); - assert_ok!(vote(Origin::signed(4), vec![999], 40)); - - // 3 candidates! incorrect candidate length. - assert_noop!( - Elections::report_defunct_voter(Origin::signed(4), DefunctVoter { - who: 5, - candidate_count: 2, - vote_count: 3, - }), - Error::::InvalidCandidateCount, - ); - - // 3 votes! incorrect vote length - assert_noop!( - Elections::report_defunct_voter(Origin::signed(4), DefunctVoter { - who: 5, - candidate_count: 3, - vote_count: 2, - }), - Error::::InvalidVoteCount, - ); - - // correct. - assert_ok!(Elections::report_defunct_voter(Origin::signed(4), DefunctVoter { - who: 5, - candidate_count: 3, - vote_count: 3, - })); - }); - } - - #[test] - fn reporter_can_overestimate_length() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - - // both are defunct. - assert_ok!(vote(Origin::signed(5), vec![99], 50)); - assert_ok!(vote(Origin::signed(4), vec![999], 40)); - - // 2 candidates! overestimation is okay. - assert_ok!(Elections::report_defunct_voter(Origin::signed(4), defunct_for(5))); - }); - } - - #[test] - fn can_detect_defunct_voter() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(6))); - - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(2), vec![4, 5], 20)); - assert_ok!(vote(Origin::signed(6), vec![6], 30)); - // will be soon a defunct voter. - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - - System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![6]); - assert!(Elections::candidates().is_empty()); - - // all of them have a member or runner-up that they voted for. - assert_eq!(Elections::is_defunct_voter(&votes_of(&5)), false); - assert_eq!(Elections::is_defunct_voter(&votes_of(&4)), false); - assert_eq!(Elections::is_defunct_voter(&votes_of(&2)), false); - assert_eq!(Elections::is_defunct_voter(&votes_of(&6)), false); - - // defunct - assert_eq!(Elections::is_defunct_voter(&votes_of(&3)), true); - - assert_ok!(submit_candidacy(Origin::signed(1))); - assert_ok!(vote(Origin::signed(1), vec![1], 10)); - - // has a candidate voted for. - assert_eq!(Elections::is_defunct_voter(&votes_of(&1)), false); - - }); - } - - #[test] - fn report_voter_should_work_and_earn_reward() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(2), vec![4, 5], 20)); - // will be soon a defunct voter. - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - - System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::candidates().is_empty()); - - assert_eq!(balances(&3), (28, 2)); - assert_eq!(balances(&5), (45, 5)); - - assert_ok!(Elections::report_defunct_voter(Origin::signed(5), defunct_for(3))); - assert!(System::events().iter().any(|event| { - event.event == Event::elections_phragmen(RawEvent::VoterReported(3, 5, true)) - })); - - assert_eq!(balances(&3), (28, 0)); - assert_eq!(balances(&5), (47, 5)); - }); - } - - #[test] - fn report_voter_should_slash_when_bad_report() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - - System::set_block_number(5); - Elections::end_block(System::block_number()); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::candidates().is_empty()); - - assert_eq!(balances(&4), (35, 5)); - assert_eq!(balances(&5), (45, 5)); - - assert_ok!(Elections::report_defunct_voter(Origin::signed(5), defunct_for(4))); - assert!(System::events().iter().any(|event| { - event.event == Event::elections_phragmen(RawEvent::VoterReported(4, 5, false)) - })); + Elections::on_initialize(System::block_number()); - assert_eq!(balances(&4), (35, 5)); - assert_eq!(balances(&5), (45, 3)); + assert_eq!(members_ids(), vec![3, 5]); }); } @@ -2045,18 +2105,19 @@ mod tests { assert_eq!(votes_of(&3), vec![3]); assert_eq!(votes_of(&4), vec![4]); - assert_eq!(Elections::candidates(), vec![3, 4, 5]); + assert_eq!(candidate_ids(), vec![3, 4, 5]); assert_eq!(>::decode_len().unwrap(), 3); assert_eq!(Elections::election_rounds(), 0); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members(), vec![(3, 30), (5, 20)]); + assert_eq!(members_and_stake(), vec![(3, 30), (5, 20)]); assert!(Elections::runners_up().is_empty()); + assert_eq_uvec!(all_voters(), vec![2, 3, 4]); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); assert_eq!(>::decode_len(), None); assert_eq!(Elections::election_rounds(), 1); @@ -2068,12 +2129,9 @@ mod tests { ExtBuilder::default().build_and_execute(|| { // no candidates, no nothing. System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!( - System::events().iter().last().unwrap().event, - Event::elections_phragmen(RawEvent::EmptyTerm), - ) + System::assert_last_event(Event::Elections(super::Event::EmptyTerm)); }) } @@ -2087,26 +2145,23 @@ mod tests { assert_ok!(vote(Origin::signed(4), vec![4], 40)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!( - System::events().iter().last().unwrap().event, - Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)])), - ); + System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![ + (4, 40), + (5, 50), + ]))); - assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); - assert_eq!(Elections::runners_up(), vec![]); + assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); + assert_eq!(runners_up_and_stake(), vec![]); assert_ok!(Elections::remove_voter(Origin::signed(5))); assert_ok!(Elections::remove_voter(Origin::signed(4))); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!( - System::events().iter().last().unwrap().event, - Event::elections_phragmen(RawEvent::NewTerm(vec![])), - ); + System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![]))); // outgoing have lost their bond. assert_eq!(balances(&4), (37, 0)); @@ -2124,19 +2179,19 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members(), vec![(5, 50)]); + assert_eq!(members_and_stake(), vec![(5, 50)]); assert_eq!(Elections::election_rounds(), 1); // but now it has a valid target. assert_ok!(submit_candidacy(Origin::signed(4))); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // candidate 4 is affected by an old vote. - assert_eq!(Elections::members(), vec![(4, 30), (5, 50)]); + assert_eq!(members_and_stake(), vec![(4, 30), (5, 50)]); assert_eq!(Elections::election_rounds(), 2); assert_eq_uvec!(all_voters(), vec![3, 5]); }); @@ -2156,10 +2211,10 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); assert_eq!(Elections::election_rounds(), 1); - assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(members_ids(), vec![4, 5]); }); } @@ -2170,16 +2225,13 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(4))); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); assert_eq!(Elections::election_rounds(), 1); - assert!(Elections::members_ids().is_empty()); + assert!(members_ids().is_empty()); - assert_eq!( - System::events().iter().last().unwrap().event, - Event::elections_phragmen(RawEvent::NewTerm(vec![])), - ) + System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![]))); }); } @@ -2197,11 +2249,11 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![4], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // sorted based on account id. - assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(members_ids(), vec![4, 5]); // sorted based on merit (least -> most) - assert_eq!(Elections::runners_up_ids(), vec![3, 2]); + assert_eq!(runners_up_ids(), vec![3, 2]); // runner ups are still locked. assert_eq!(balances(&4), (35, 5)); @@ -2224,16 +2276,17 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); - assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); + assert_eq!(runners_up_and_stake(), vec![(2, 20), (3, 30)]); assert_ok!(vote(Origin::signed(5), vec![5], 15)); System::set_block_number(10); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members(), vec![(3, 30), (4, 40)]); - assert_eq!(Elections::runners_up(), vec![(5, 15), (2, 20)]); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_and_stake(), vec![(3, 30), (4, 40)]); + assert_eq!(runners_up_and_stake(), vec![(5, 15), (2, 20)]); }); } @@ -2249,18 +2302,18 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2]); assert_eq!(balances(&2), (15, 5)); assert_ok!(submit_candidacy(Origin::signed(3))); assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::runners_up_ids(), vec![3]); + assert_eq!(runners_up_ids(), vec![3]); assert_eq!(balances(&2), (15, 2)); }); } @@ -2277,22 +2330,22 @@ mod tests { assert_eq!(balances(&5), (45, 5)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![5]); assert_ok!(Elections::remove_voter(Origin::signed(5))); assert_eq!(balances(&5), (47, 3)); System::set_block_number(10); - Elections::end_block(System::block_number()); - assert!(Elections::members_ids().is_empty()); + Elections::on_initialize(System::block_number()); + assert!(members_ids().is_empty()); assert_eq!(balances(&5), (47, 0)); }); } #[test] - fn losers_will_lose_the_bond() { + fn candidates_lose_the_bond_when_outgoing() { ExtBuilder::default().build_and_execute(|| { assert_ok!(submit_candidacy(Origin::signed(5))); assert_ok!(submit_candidacy(Origin::signed(3))); @@ -2303,9 +2356,9 @@ mod tests { assert_eq!(balances(&3), (27, 3)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![5]); + assert_eq!(members_ids(), vec![5]); // winner assert_eq!(balances(&5), (47, 3)); @@ -2324,9 +2377,9 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(members_ids(), vec![4, 5]); assert_eq!(Elections::election_rounds(), 1); assert_ok!(submit_candidacy(Origin::signed(2))); @@ -2338,13 +2391,13 @@ mod tests { assert_ok!(Elections::remove_voter(Origin::signed(4))); // 5 will persist as candidates despite not being in the list. - assert_eq!(Elections::candidates(), vec![2, 3]); + assert_eq!(candidate_ids(), vec![2, 3]); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // 4 removed; 5 and 3 are the new best. - assert_eq!(Elections::members_ids(), vec![3, 5]); + assert_eq!(members_ids(), vec![3, 5]); }); } @@ -2365,12 +2418,12 @@ mod tests { let check_at_block = |b: u32| { System::set_block_number(b.into()); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // we keep re-electing the same folks. - assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); - assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]); + assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); + assert_eq!(runners_up_and_stake(), vec![(2, 20), (3, 30)]); // no new candidates but old members and runners-up are always added. - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); assert_eq!(Elections::election_rounds(), b / 5); assert_eq_uvec!(all_voters(), vec![2, 3, 4, 5]); }; @@ -2393,8 +2446,8 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); assert_eq!(Elections::election_rounds(), 1); // a new candidate @@ -2405,7 +2458,7 @@ mod tests { assert_eq!(balances(&4), (35, 2)); // slashed assert_eq!(Elections::election_rounds(), 2); // new election round - assert_eq!(Elections::members_ids(), vec![3, 5]); // new members + assert_eq!(members_ids(), vec![3, 5]); // new members }); } @@ -2419,15 +2472,16 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); // no replacement yet. - assert_err_with_weight!( - Elections::remove_member(Origin::root(), 4, true), - Error::::InvalidReplacement, - Some(33777000), // only thing that matters for now is that it is NOT the full block. - ); + let unwrapped_error = Elections::remove_member(Origin::root(), 4, true).unwrap_err(); + assert!(matches!( + unwrapped_error.error, + DispatchError::Module { message: Some("InvalidReplacement"), .. } + )); + assert!(unwrapped_error.post_info.actual_weight.is_some()); }); ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { @@ -2440,16 +2494,17 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![3]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); // there is a replacement! and this one needs a weight refund. - assert_err_with_weight!( - Elections::remove_member(Origin::root(), 4, false), - Error::::InvalidReplacement, - Some(33777000) // only thing that matters for now is that it is NOT the full block. - ); + let unwrapped_error = Elections::remove_member(Origin::root(), 4, false).unwrap_err(); + assert!(matches!( + unwrapped_error.error, + DispatchError::Module { message: Some("InvalidReplacement"), .. } + )); + assert!(unwrapped_error.post_info.actual_weight.is_some()); }); } @@ -2470,8 +2525,8 @@ mod tests { assert_eq!(Elections::election_rounds(), 0); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![3, 5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![3, 5]); assert_eq!(Elections::election_rounds(), 1); assert_ok!(Elections::remove_voter(Origin::signed(2))); @@ -2481,8 +2536,8 @@ mod tests { // meanwhile, no one cares to become a candidate again. System::set_block_number(10); - Elections::end_block(System::block_number()); - assert!(Elections::members_ids().is_empty()); + Elections::on_initialize(System::block_number()); + assert!(members_ids().is_empty()); assert_eq!(Elections::election_rounds(), 2); }); } @@ -2497,8 +2552,8 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![5], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); + Elections::on_initialize(System::block_number()); + assert_eq!(members_ids(), vec![4, 5]); assert_ok!(submit_candidacy(Origin::signed(1))); assert_ok!(submit_candidacy(Origin::signed(2))); @@ -2515,10 +2570,10 @@ mod tests { assert_ok!(vote(Origin::signed(1), vec![1], 10)); System::set_block_number(10); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // 3, 4 are new members, must still be bonded, nothing slashed. - assert_eq!(Elections::members(), vec![(3, 30), (4, 48)]); + assert_eq!(members_and_stake(), vec![(3, 30), (4, 48)]); assert_eq!(balances(&3), (25, 5)); assert_eq!(balances(&4), (35, 5)); @@ -2528,9 +2583,10 @@ mod tests { // 5 is an outgoing loser. will also get slashed. assert_eq!(balances(&5), (45, 2)); - assert!(System::events().iter().any(|event| { - event.event == Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)])) - })); + System::assert_has_event(Event::Elections(super::Event::NewTerm(vec![ + (4, 40), + (5, 50), + ]))); }) } @@ -2545,9 +2601,9 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![10], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq_uvec!(Elections::members_ids(), vec![3, 4]); + assert_eq_uvec!(members_ids(), vec![3, 4]); assert_eq!(Elections::election_rounds(), 1); }); } @@ -2566,30 +2622,14 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![4], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); // id: low -> high. - assert_eq!(Elections::members(), vec![(4, 50), (5, 40)]); + assert_eq!(members_and_stake(), vec![(4, 50), (5, 40)]); // merit: low -> high. - assert_eq!(Elections::runners_up(), vec![(3, 20), (2, 30)]); + assert_eq!(runners_up_and_stake(), vec![(3, 20), (2, 30)]); }); } - #[test] - fn candidates_are_sorted() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(3))); - - assert_eq!(Elections::candidates(), vec![3, 5]); - - assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::Candidate(4))); - - assert_eq!(Elections::candidates(), vec![2, 4, 5]); - }) - } - #[test] fn runner_up_replacement_maintains_members_order() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { @@ -2602,11 +2642,11 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![2], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![2, 4]); + assert_eq!(members_ids(), vec![2, 4]); assert_ok!(Elections::remove_member(Origin::root(), 2, true)); - assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(members_ids(), vec![4, 5]); }); } @@ -2624,16 +2664,16 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2, 3]); assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. - assert_eq!(Elections::members_ids(), vec![3, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2]); + assert_eq!(members_ids(), vec![3, 5]); + assert_eq!(runners_up_ids(), vec![2]); }) } @@ -2647,22 +2687,22 @@ mod tests { assert_ok!(vote(Origin::signed(4), vec![4], 40)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert!(Elections::runners_up_ids().is_empty()); + assert_eq!(members_ids(), vec![4, 5]); + assert!(runners_up_ids().is_empty()); assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. // no replacement - assert_eq!(Elections::members_ids(), vec![5]); - assert!(Elections::runners_up_ids().is_empty()); + assert_eq!(members_ids(), vec![5]); + assert!(runners_up_ids().is_empty()); }) } #[test] - fn can_renounce_candidacy_runner() { + fn can_renounce_candidacy_runner_up() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { assert_ok!(submit_candidacy(Origin::signed(5))); assert_ok!(submit_candidacy(Origin::signed(4))); @@ -2675,16 +2715,16 @@ mod tests { assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2, 3]); assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); assert_eq!(balances(&3), (28, 2)); // 2 is voting bond. - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2]); }) } @@ -2702,13 +2742,13 @@ mod tests { assert_ok!(vote(Origin::signed(5), vec![2], 50)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![2, 4]); - assert_eq!(Elections::runners_up_ids(), vec![5, 3]); + assert_eq!(members_ids(), vec![2, 4]); + assert_eq!(runners_up_ids(), vec![5, 3]); assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); - assert_eq!(Elections::members_ids(), vec![2, 4]); - assert_eq!(Elections::runners_up_ids(), vec![5]); + assert_eq!(members_ids(), vec![2, 4]); + assert_eq!(runners_up_ids(), vec![5]); }); } @@ -2717,11 +2757,11 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_ok!(submit_candidacy(Origin::signed(5))); assert_eq!(balances(&5), (47, 3)); - assert_eq!(Elections::candidates(), vec![5]); + assert_eq!(candidate_ids(), vec![5]); assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(1))); assert_eq!(balances(&5), (50, 0)); - assert!(Elections::candidates().is_empty()); + assert!(candidate_ids().is_empty()); }) } @@ -2734,7 +2774,7 @@ mod tests { ); assert_noop!( Elections::renounce_candidacy(Origin::signed(5), Renouncing::Member), - Error::::NotMember, + Error::::InvalidRenouncing, ); assert_noop!( Elections::renounce_candidacy(Origin::signed(5), Renouncing::RunnerUp), @@ -2755,14 +2795,14 @@ mod tests { assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); assert_noop!( Elections::renounce_candidacy(Origin::signed(3), Renouncing::Member), - Error::::NotMember, + Error::::InvalidRenouncing, ); }) } @@ -2779,10 +2819,10 @@ mod tests { assert_ok!(vote(Origin::signed(3), vec![3], 30)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); assert_noop!( Elections::renounce_candidacy(Origin::signed(4), Renouncing::RunnerUp), @@ -2800,7 +2840,7 @@ mod tests { assert_noop!( Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(2)), - Error::::InvalidRenouncing, + Error::::InvalidWitnessData, ); assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); @@ -2819,21 +2859,295 @@ mod tests { } #[test] - fn behavior_with_dupe_candidate() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - >::put(vec![1, 1, 2, 3, 4]); + fn unsorted_runners_up_are_detected() { + ExtBuilder::default() + .desired_runners_up(2) + .desired_members(1) + .build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 5)); + assert_ok!(vote(Origin::signed(3), vec![3], 15)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids(), vec![4, 3]); + + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(vote(Origin::signed(2), vec![2], 10)); + + System::set_block_number(10); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids(), vec![2, 3]); + + // 4 is outgoing runner-up. Slash candidacy bond. + assert_eq!(balances(&4), (35, 2)); + // 3 stays. + assert_eq!(balances(&3), (25, 5)); + }) + } + + #[test] + fn member_to_runner_up_wont_slash() { + ExtBuilder::default() + .desired_runners_up(2) + .desired_members(1) + .build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![4]); + assert_eq!(runners_up_ids(), vec![2, 3]); + + assert_eq!(balances(&4), (35, 5)); + assert_eq!(balances(&3), (25, 5)); + assert_eq!(balances(&2), (15, 5)); + + // this guy will shift everyone down. + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(10); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids(), vec![3, 4]); + + // 4 went from member to runner-up -- don't slash. + assert_eq!(balances(&4), (35, 5)); + // 3 stayed runner-up -- don't slash. + assert_eq!(balances(&3), (25, 5)); + // 2 was removed -- slash. + assert_eq!(balances(&2), (15, 2)); + }); + } + + #[test] + fn runner_up_to_member_wont_slash() { + ExtBuilder::default() + .desired_runners_up(2) + .desired_members(1) + .build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![4]); + assert_eq!(runners_up_ids(), vec![2, 3]); + + assert_eq!(balances(&4), (35, 5)); + assert_eq!(balances(&3), (25, 5)); + assert_eq!(balances(&2), (15, 5)); + + // swap some votes. + assert_ok!(vote(Origin::signed(4), vec![2], 40)); + assert_ok!(vote(Origin::signed(2), vec![4], 20)); + + System::set_block_number(10); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![2]); + assert_eq!(runners_up_ids(), vec![4, 3]); + + // 2 went from runner to member, don't slash + assert_eq!(balances(&2), (15, 5)); + // 4 went from member to runner, don't slash + assert_eq!(balances(&4), (35, 5)); + // 3 stayed the same + assert_eq!(balances(&3), (25, 5)); + }); + } - assert_ok!(vote(Origin::signed(5), vec![1], 50)); + #[test] + fn remove_and_replace_member_works() { + let setup = || { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + + assert_ok!(vote(Origin::signed(5), vec![5], 50)); assert_ok!(vote(Origin::signed(4), vec![4], 40)); assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); System::set_block_number(5); - Elections::end_block(System::block_number()); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![3]); + }; + + // member removed, replacement found. + ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { + setup(); + assert_eq!(Elections::remove_and_replace_member(&4, false), Ok(true)); + + assert_eq!(members_ids(), vec![3, 5]); + assert_eq!(runners_up_ids().len(), 0); + }); + + // member removed, no replacement found. + ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { + setup(); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); + assert_eq!(Elections::remove_and_replace_member(&4, false), Ok(false)); + + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids().len(), 0); + }); + + // wrong member to remove. + ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { + setup(); + assert!(matches!(Elections::remove_and_replace_member(&2, false), Err(_))); + }); + } + + #[test] + fn no_desired_members() { + // not interested in anything + ExtBuilder::default() + .desired_members(0) + .desired_runners_up(0) + .build_and_execute(|| { + assert_eq!(Elections::candidates().len(), 0); + + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + + assert_eq!(Elections::candidates().len(), 3); + + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids().len(), 0); + assert_eq!(runners_up_ids().len(), 0); + assert_eq!(all_voters().len(), 3); + assert_eq!(Elections::candidates().len(), 0); + }); + + // not interested in members + ExtBuilder::default() + .desired_members(0) + .desired_runners_up(2) + .build_and_execute(|| { + assert_eq!(Elections::candidates().len(), 0); + + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + + assert_eq!(Elections::candidates().len(), 3); + + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids().len(), 0); + assert_eq!(runners_up_ids(), vec![3, 4]); + assert_eq!(all_voters().len(), 3); + assert_eq!(Elections::candidates().len(), 0); + }); + + // not interested in runners-up + ExtBuilder::default() + .desired_members(2) + .desired_runners_up(0) + .build_and_execute(|| { + assert_eq!(Elections::candidates().len(), 0); + + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + + assert_eq!(Elections::candidates().len(), 3); + + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![3, 4]); + assert_eq!(runners_up_ids().len(), 0); + assert_eq!(all_voters().len(), 3); + assert_eq!(Elections::candidates().len(), 0); + }); + } + + #[test] + fn dupe_vote_is_moot() { + ExtBuilder::default().desired_members(1).build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(1))); + + // all these duplicate votes will not cause 2 to win. + assert_ok!(vote(Origin::signed(1), vec![2, 2, 2, 2], 5)); + assert_ok!(vote(Origin::signed(2), vec![2, 2, 2, 2], 20)); + + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + + System::set_block_number(5); + Elections::on_initialize(System::block_number()); + + assert_eq!(members_ids(), vec![3]); + }) + } + + #[test] + fn remove_defunct_voter_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + + // defunct + assert_ok!(vote(Origin::signed(5), vec![5, 4], 5)); + // defunct + assert_ok!(vote(Origin::signed(4), vec![4], 5)); + // ok + assert_ok!(vote(Origin::signed(3), vec![3], 5)); + // ok + assert_ok!(vote(Origin::signed(2), vec![3, 4], 5)); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(3))); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(2))); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::Candidate(1))); - assert_eq!(Elections::members_ids(), vec![1, 4]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); - assert!(Elections::candidates().is_empty()); + assert_ok!(Elections::clean_defunct_voters(Origin::root(), 4, 2)); }) } } diff --git a/primitives/utils/src/lib.rs b/frame/elections-phragmen/src/migrations/mod.rs similarity index 81% rename from primitives/utils/src/lib.rs rename to frame/elections-phragmen/src/migrations/mod.rs index 77bcd096561b4..9a1f86a1ad7ce 100644 --- a/primitives/utils/src/lib.rs +++ b/frame/elections-phragmen/src/migrations/mod.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Utilities Primitives for Substrate +//! All migrations of this pallet. -pub mod metrics; -pub mod mpsc; -pub mod status_sinks; +/// Version 3. +pub mod v3; +/// Version 4. +pub mod v4; diff --git a/frame/elections-phragmen/src/migrations/v3.rs b/frame/elections-phragmen/src/migrations/v3.rs new file mode 100644 index 0000000000000..728e0c4b0c915 --- /dev/null +++ b/frame/elections-phragmen/src/migrations/v3.rs @@ -0,0 +1,176 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Migrations to version [`3.0.0`], as denoted by the changelog. + +use codec::{Decode, Encode, FullCodec}; +use frame_support::{ + pallet_prelude::ValueQuery, + traits::{PalletInfoAccess, StorageVersion}, + weights::Weight, + RuntimeDebug, Twox64Concat, +}; +use sp_std::prelude::*; + +#[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq)] +struct SeatHolder { + who: AccountId, + stake: Balance, + deposit: Balance, +} + +#[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq)] +struct Voter { + votes: Vec, + stake: Balance, + deposit: Balance, +} + +/// Trait to implement to give information about types used for migration +pub trait V2ToV3 { + /// The elections-phragmen pallet. + type Pallet: 'static + PalletInfoAccess; + + /// System config account id + type AccountId: 'static + FullCodec; + + /// Elections-phragmen currency balance. + type Balance: 'static + FullCodec + Copy; +} + +frame_support::generate_storage_alias!( + PhragmenElection, Candidates => Value< + Vec<(T::AccountId, T::Balance)>, + ValueQuery + > +); +frame_support::generate_storage_alias!( + PhragmenElection, Members => Value< + Vec>, + ValueQuery + > +); +frame_support::generate_storage_alias!( + PhragmenElection, RunnersUp => Value< + Vec>, + ValueQuery + > +); +frame_support::generate_storage_alias!( + PhragmenElection, Voting => Map< + (Twox64Concat, T::AccountId), + Voter + > +); + +/// Apply all of the migrations from 2 to 3. +/// +/// ### Warning +/// +/// This code will **ONLY** check that the storage version is less than or equal to 2_0_0. +/// Further check might be needed at the user runtime. +/// +/// Be aware that this migration is intended to be used only for the mentioned versions. Use +/// with care and run at your own risk. +pub fn apply(old_voter_bond: T::Balance, old_candidacy_bond: T::Balance) -> Weight { + let storage_version = StorageVersion::get::(); + log::info!( + target: "runtime::elections-phragmen", + "Running migration for elections-phragmen with storage version {:?}", + storage_version, + ); + + if storage_version <= 2 { + migrate_voters_to_recorded_deposit::(old_voter_bond); + migrate_candidates_to_recorded_deposit::(old_candidacy_bond); + migrate_runners_up_to_recorded_deposit::(old_candidacy_bond); + migrate_members_to_recorded_deposit::(old_candidacy_bond); + + StorageVersion::new(3).put::(); + + Weight::max_value() + } else { + log::warn!( + target: "runtime::elections-phragmen", + "Attempted to apply migration to V3 but failed because storage version is {:?}", + storage_version, + ); + 0 + } +} + +/// Migrate from the old legacy voting bond (fixed) to the new one (per-vote dynamic). +pub fn migrate_voters_to_recorded_deposit(old_deposit: T::Balance) { + >::translate::<(T::Balance, Vec), _>(|_who, (stake, votes)| { + Some(Voter { votes, stake, deposit: old_deposit }) + }); + + log::info!( + target: "runtime::elections-phragmen", + "migrated {} voter accounts.", + >::iter().count(), + ); +} + +/// Migrate all candidates to recorded deposit. +pub fn migrate_candidates_to_recorded_deposit(old_deposit: T::Balance) { + let _ = >::translate::, _>(|maybe_old_candidates| { + maybe_old_candidates.map(|old_candidates| { + log::info!( + target: "runtime::elections-phragmen", + "migrated {} candidate accounts.", + old_candidates.len(), + ); + old_candidates.into_iter().map(|c| (c, old_deposit)).collect::>() + }) + }); +} + +/// Migrate all members to recorded deposit. +pub fn migrate_members_to_recorded_deposit(old_deposit: T::Balance) { + let _ = >::translate::, _>(|maybe_old_members| { + maybe_old_members.map(|old_members| { + log::info!( + target: "runtime::elections-phragmen", + "migrated {} member accounts.", + old_members.len(), + ); + old_members + .into_iter() + .map(|(who, stake)| SeatHolder { who, stake, deposit: old_deposit }) + .collect::>() + }) + }); +} + +/// Migrate all runners-up to recorded deposit. +pub fn migrate_runners_up_to_recorded_deposit(old_deposit: T::Balance) { + let _ = + >::translate::, _>(|maybe_old_runners_up| { + maybe_old_runners_up.map(|old_runners_up| { + log::info!( + target: "runtime::elections-phragmen", + "migrated {} runner-up accounts.", + old_runners_up.len(), + ); + old_runners_up + .into_iter() + .map(|(who, stake)| SeatHolder { who, stake, deposit: old_deposit }) + .collect::>() + }) + }); +} diff --git a/frame/elections-phragmen/src/migrations/v4.rs b/frame/elections-phragmen/src/migrations/v4.rs new file mode 100644 index 0000000000000..9acc1297294d9 --- /dev/null +++ b/frame/elections-phragmen/src/migrations/v4.rs @@ -0,0 +1,106 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Migrations to version [`4.0.0`], as denoted by the changelog. + +use frame_support::{ + traits::{Get, StorageVersion}, + weights::Weight, +}; + +/// The old prefix. +pub const OLD_PREFIX: &[u8] = b"PhragmenElection"; + +/// Migrate the entire storage of this pallet to a new prefix. +/// +/// This new prefix must be the same as the one set in construct_runtime. For safety, use +/// `PalletInfo` to get it, as: +/// `::PalletInfo::name::`. +/// +/// The old storage prefix, `PhragmenElection` is hardcoded in the migration code. +pub fn migrate>(new_pallet_name: N) -> Weight { + if new_pallet_name.as_ref().as_bytes() == OLD_PREFIX { + log::info!( + target: "runtime::elections-phragmen", + "New pallet name is equal to the old prefix. No migration needs to be done.", + ); + return 0 + } + let storage_version = StorageVersion::get::>(); + log::info!( + target: "runtime::elections-phragmen", + "Running migration to v4 for elections-phragmen with storage version {:?}", + storage_version, + ); + + if storage_version <= 3 { + log::info!("new prefix: {}", new_pallet_name.as_ref()); + frame_support::storage::migration::move_pallet( + OLD_PREFIX, + new_pallet_name.as_ref().as_bytes(), + ); + + StorageVersion::new(4).put::>(); + + ::BlockWeights::get().max_block + } else { + log::warn!( + target: "runtime::elections-phragmen", + "Attempted to apply migration to v4 but failed because storage version is {:?}", + storage_version, + ); + 0 + } +} + +/// Some checks prior to migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn pre_migration>(new: N) { + let new = new.as_ref(); + log::info!("pre-migration elections-phragmen test with new = {}", new); + + // the next key must exist, and start with the hash of `OLD_PREFIX`. + let next_key = sp_io::storage::next_key(OLD_PREFIX).unwrap(); + assert!(next_key.starts_with(&sp_io::hashing::twox_128(OLD_PREFIX))); + + // ensure nothing is stored in the new prefix. + assert!( + sp_io::storage::next_key(new.as_bytes()).map_or( + // either nothing is there + true, + // or we ensure that it has no common prefix with twox_128(new). + |next_key| !next_key.starts_with(&sp_io::hashing::twox_128(new.as_bytes())) + ), + "unexpected next_key({}) = {:?}", + new, + sp_core::hexdisplay::HexDisplay::from(&sp_io::storage::next_key(new.as_bytes()).unwrap()) + ); + // ensure storage version is 3. + assert_eq!(StorageVersion::get::>(), 3); +} + +/// Some checks for after migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn post_migration() { + log::info!("post-migration elections-phragmen"); + // ensure we've been updated to v4 by the automatic write of crate version -> storage version. + assert_eq!(StorageVersion::get::>(), 4); +} diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs new file mode 100644 index 0000000000000..b60308c4f0a64 --- /dev/null +++ b/frame/elections-phragmen/src/weights.rs @@ -0,0 +1,334 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_elections_phragmen +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_elections_phragmen +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/elections-phragmen/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_elections_phragmen. +pub trait WeightInfo { + fn vote_equal(v: u32, ) -> Weight; + fn vote_more(v: u32, ) -> Weight; + fn vote_less(v: u32, ) -> Weight; + fn remove_voter() -> Weight; + fn submit_candidacy(c: u32, ) -> Weight; + fn renounce_candidacy_candidate(c: u32, ) -> Weight; + fn renounce_candidacy_members() -> Weight; + fn renounce_candidacy_runners_up() -> Weight; + fn remove_member_with_replacement() -> Weight; + fn remove_member_without_replacement() -> Weight; + fn remove_member_wrong_refund() -> Weight; + fn clean_defunct_voters(v: u32, d: u32, ) -> Weight; + fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight; +} + +/// Weights for pallet_elections_phragmen using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: Elections Candidates (r:1 w:0) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + // Storage: Elections Voting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_equal(v: u32, ) -> Weight { + (42_509_000 as Weight) + // Standard Error: 4_000 + .saturating_add((372_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Candidates (r:1 w:0) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + // Storage: Elections Voting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_more(v: u32, ) -> Weight { + (65_311_000 as Weight) + // Standard Error: 6_000 + .saturating_add((419_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Candidates (r:1 w:0) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + // Storage: Elections Voting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_less(v: u32, ) -> Weight { + (65_444_000 as Weight) + // Standard Error: 5_000 + .saturating_add((376_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Voting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn remove_voter() -> Weight { + (61_585_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Candidates (r:1 w:1) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + fn submit_candidacy(c: u32, ) -> Weight { + (53_333_000 as Weight) + // Standard Error: 1_000 + .saturating_add((267_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Elections Candidates (r:1 w:1) + fn renounce_candidacy_candidate(c: u32, ) -> Weight { + (49_128_000 as Weight) + // Standard Error: 1_000 + .saturating_add((144_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Elections Members (r:1 w:1) + // Storage: Elections RunnersUp (r:1 w:1) + // Storage: Instance1Collective Prime (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:0) + // Storage: Instance1Collective Members (r:0 w:1) + fn renounce_candidacy_members() -> Weight { + (70_685_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + // Storage: Elections RunnersUp (r:1 w:1) + fn renounce_candidacy_runners_up() -> Weight { + (49_766_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: Elections RunnersUp (r:1 w:1) + // Storage: Elections Members (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Instance1Collective Prime (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:0) + // Storage: Instance1Collective Members (r:0 w:1) + fn remove_member_with_replacement() -> Weight { + (76_153_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn remove_member_without_replacement() -> Weight { + T::BlockWeights::get().max_block + } + // Storage: Elections RunnersUp (r:1 w:0) + fn remove_member_wrong_refund() -> Weight { + (6_697_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + // Storage: Elections Voting (r:251 w:250) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + // Storage: Elections Candidates (r:1 w:0) + // Storage: Balances Locks (r:250 w:250) + // Storage: System Account (r:250 w:250) + fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 60_000 + .saturating_add((107_467_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + // Storage: Elections Candidates (r:1 w:1) + // Storage: Elections Members (r:1 w:1) + // Storage: Elections RunnersUp (r:1 w:1) + // Storage: Elections Voting (r:502 w:0) + // Storage: Instance1Collective Proposals (r:1 w:0) + // Storage: Elections ElectionRounds (r:1 w:1) + // Storage: Instance1Collective Members (r:0 w:1) + // Storage: Instance1Collective Prime (r:0 w:1) + // Storage: System Account (r:2 w:2) + fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 1_846_000 + .saturating_add((39_843_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 768_000 + .saturating_add((60_623_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 52_000 + .saturating_add((3_884_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: Elections Candidates (r:1 w:0) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + // Storage: Elections Voting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_equal(v: u32, ) -> Weight { + (42_509_000 as Weight) + // Standard Error: 4_000 + .saturating_add((372_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Candidates (r:1 w:0) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + // Storage: Elections Voting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_more(v: u32, ) -> Weight { + (65_311_000 as Weight) + // Standard Error: 6_000 + .saturating_add((419_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Candidates (r:1 w:0) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + // Storage: Elections Voting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn vote_less(v: u32, ) -> Weight { + (65_444_000 as Weight) + // Standard Error: 5_000 + .saturating_add((376_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Voting (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + fn remove_voter() -> Weight { + (61_585_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + // Storage: Elections Candidates (r:1 w:1) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + fn submit_candidacy(c: u32, ) -> Weight { + (53_333_000 as Weight) + // Standard Error: 1_000 + .saturating_add((267_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Elections Candidates (r:1 w:1) + fn renounce_candidacy_candidate(c: u32, ) -> Weight { + (49_128_000 as Weight) + // Standard Error: 1_000 + .saturating_add((144_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Elections Members (r:1 w:1) + // Storage: Elections RunnersUp (r:1 w:1) + // Storage: Instance1Collective Prime (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:0) + // Storage: Instance1Collective Members (r:0 w:1) + fn renounce_candidacy_members() -> Weight { + (70_685_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + // Storage: Elections RunnersUp (r:1 w:1) + fn renounce_candidacy_runners_up() -> Weight { + (49_766_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: Elections RunnersUp (r:1 w:1) + // Storage: Elections Members (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Instance1Collective Prime (r:1 w:1) + // Storage: Instance1Collective Proposals (r:1 w:0) + // Storage: Instance1Collective Members (r:0 w:1) + fn remove_member_with_replacement() -> Weight { + (76_153_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn remove_member_without_replacement() -> Weight { + (76_153_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + // Storage: Elections RunnersUp (r:1 w:0) + fn remove_member_wrong_refund() -> Weight { + (6_697_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + // Storage: Elections Voting (r:251 w:250) + // Storage: Elections Members (r:1 w:0) + // Storage: Elections RunnersUp (r:1 w:0) + // Storage: Elections Candidates (r:1 w:0) + // Storage: Balances Locks (r:250 w:250) + // Storage: System Account (r:250 w:250) + fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 60_000 + .saturating_add((107_467_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + } + // Storage: Elections Candidates (r:1 w:1) + // Storage: Elections Members (r:1 w:1) + // Storage: Elections RunnersUp (r:1 w:1) + // Storage: Elections Voting (r:502 w:0) + // Storage: Instance1Collective Proposals (r:1 w:0) + // Storage: Elections ElectionRounds (r:1 w:1) + // Storage: Instance1Collective Members (r:0 w:1) + // Storage: Instance1Collective Prime (r:0 w:1) + // Storage: System Account (r:2 w:2) + fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 1_846_000 + .saturating_add((39_843_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 768_000 + .saturating_add((60_623_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 52_000 + .saturating_add((3_884_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) + } +} diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index f0281a3033dd2..8557cfba6b58c 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -13,28 +13,30 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ + "derive", +] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -hex-literal = "0.3.1" -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "sp-core/std", "sp-std/std", - "serde", "sp-io/std", "frame-support/std", "sp-runtime/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index 9b61a9b3509a9..ac13bce31b0f6 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,30 +22,33 @@ //! //! --- //! -//! Election module for stake-weighted membership selection of a collective. +//! Election pallet for stake-weighted membership selection of a collective. //! //! The composition of a set of account IDs works according to one or more approval votes //! weighted by stake. There is a partial carry-over facility to give greater weight to those //! whose voting is serially unsuccessful. #![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit="128"] +#![recursion_limit = "128"] -use sp_std::prelude::*; -use sp_runtime::{ - RuntimeDebug, DispatchResult, print, - traits::{Zero, One, StaticLookup, Saturating}, -}; +use codec::{Decode, Encode}; use frame_support::{ - decl_storage, decl_event, ensure, decl_module, decl_error, - weights::{Weight, DispatchClass}, + ensure, + pallet_prelude::*, traits::{ - Currency, ExistenceRequirement, Get, LockableCurrency, LockIdentifier, BalanceStatus, - OnUnbalanced, ReservableCurrency, WithdrawReason, WithdrawReasons, ChangeMembers, - } + BalanceStatus, ChangeMembers, Currency, ExistenceRequirement, LockIdentifier, + LockableCurrency, OnUnbalanced, ReservableCurrency, WithdrawReasons, + }, + weights::{DispatchClass, Weight}, +}; +use frame_system::pallet_prelude::*; +pub use pallet::*; +use sp_runtime::{ + print, + traits::{One, Saturating, StaticLookup, Zero}, + RuntimeDebug, }; -use codec::{Encode, Decode}; -use frame_system::{ensure_signed, ensure_root}; +use sp_std::prelude::*; mod mock; mod tests; @@ -70,8 +73,8 @@ mod tests; // - remove inactive voter (either you or the target is removed; if the target, you get their // "voter" bond back; O(1); one fewer DB entry, one DB change) // - submit candidacy (you pay a "candidate" bond; O(1); one extra DB entry, two DB changes) -// - present winner/runner-up (you may pay a "presentation" bond of O(voters) if the presentation -// is invalid; O(voters) compute; ) protected operations: +// - present winner/runner-up (you may pay a "presentation" bond of O(voters) if the presentation is +// invalid; O(voters) compute; ) protected operations: // - remove candidacy (remove all votes for a candidate) (one fewer DB entry, two DB changes) // to avoid a potentially problematic case of not-enough approvals prior to voting causing a @@ -108,7 +111,9 @@ mod tests; // entries before they increase the capacity. /// The activity status of a voter. -#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, Default, RuntimeDebug)] +#[derive( + PartialEq, Eq, Copy, Clone, Encode, Decode, Default, RuntimeDebug, scale_info::TypeInfo, +)] pub struct VoterInfo { /// Last VoteIndex in which this voter assigned (or initialized) approvals. last_active: VoteIndex, @@ -125,8 +130,8 @@ pub struct VoterInfo { /// Used to demonstrate the status of a particular index in the global voter list. #[derive(PartialEq, Eq, RuntimeDebug)] pub enum CellStatus { - /// Any out of bound index. Means a push a must happen to the chunk pointed by `NextVoterSet`. - /// Voting fee is applied in case a new chunk is created. + /// Any out of bound index. Means a push a must happen to the chunk pointed by + /// `NextVoterSet`. Voting fee is applied in case a new chunk is created. Head, /// Already occupied by another voter. Voting fee is applied. Occupied, @@ -139,9 +144,11 @@ pub const VOTER_SET_SIZE: usize = 64; /// NUmber of approvals grouped in one chunk. pub const APPROVAL_SET_SIZE: usize = 8; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; /// Index used to access chunks. type SetIndex = u32; @@ -152,141 +159,242 @@ type ApprovalFlag = u32; /// Number of approval flags that can fit into [`ApprovalFlag`] type. const APPROVAL_FLAG_LEN: usize = 32; -pub trait Trait: frame_system::Trait { - type Event: From> + Into<::Event>; +#[frame_support::pallet] +pub mod pallet { + use super::*; - /// Identifier for the elections pallet's lock - type ModuleId: Get; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - /// The currency that people are electing with. - type Currency: - LockableCurrency - + ReservableCurrency; + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From> + IsType<::Event>; - /// Handler for the unbalanced reduction when slashing a validator. - type BadPresentation: OnUnbalanced>; + /// Identifier for the elections pallet's lock + #[pallet::constant] + type PalletId: Get; - /// Handler for the unbalanced reduction when slashing an invalid reaping attempt. - type BadReaper: OnUnbalanced>; + /// The currency that people are electing with. + type Currency: LockableCurrency + + ReservableCurrency; - /// Handler for the unbalanced reduction when submitting a bad `voter_index`. - type BadVoterIndex: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing a validator. + type BadPresentation: OnUnbalanced>; - /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner up) - type LoserCandidate: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing an invalid reaping attempt. + type BadReaper: OnUnbalanced>; - /// What to do when the members change. - type ChangeMembers: ChangeMembers; + /// Handler for the unbalanced reduction when submitting a bad `voter_index`. + type BadVoterIndex: OnUnbalanced>; - /// How much should be locked up in order to submit one's candidacy. A reasonable - /// default value is 9. - type CandidacyBond: Get>; + /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner up) + type LoserCandidate: OnUnbalanced>; - /// How much should be locked up in order to be able to submit votes. - type VotingBond: Get>; + /// What to do when the members change. + type ChangeMembers: ChangeMembers; - /// The amount of fee paid upon each vote submission, unless if they submit a - /// _hole_ index and replace it. - type VotingFee: Get>; + /// How much should be locked up in order to submit one's candidacy. A reasonable + /// default value is 9. + #[pallet::constant] + type CandidacyBond: Get>; - /// Minimum about that can be used as the locked value for voting. - type MinimumVotingLock: Get>; + /// How much should be locked up in order to be able to submit votes. + #[pallet::constant] + type VotingBond: Get>; - /// The punishment, per voter, if you provide an invalid presentation. A - /// reasonable default value is 1. - type PresentSlashPerVoter: Get>; + /// The amount of fee paid upon each vote submission, unless if they submit a + /// _hole_ index and replace it. + #[pallet::constant] + type VotingFee: Get>; - /// How many runners-up should have their approvals persist until the next - /// vote. A reasonable default value is 2. - type CarryCount: Get; + /// Minimum about that can be used as the locked value for voting. + #[pallet::constant] + type MinimumVotingLock: Get>; - /// How many vote indices need to go by after a target voter's last vote before - /// they can be reaped if their approvals are moot. A reasonable default value - /// is 1. - type InactiveGracePeriod: Get; + /// The punishment, per voter, if you provide an invalid presentation. A + /// reasonable default value is 1. + #[pallet::constant] + type PresentSlashPerVoter: Get>; - /// How often (in blocks) to check for new votes. A reasonable default value - /// is 1000. - type VotingPeriod: Get; + /// How many runners-up should have their approvals persist until the next + /// vote. A reasonable default value is 2. + #[pallet::constant] + type CarryCount: Get; - /// Decay factor of weight when being accumulated. It should typically be set to - /// __at least__ `membership_size -1` to keep the collective secure. - /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight - /// increment step `t`. 0 will result in no weight being added at all (normal - /// approval voting). A reasonable default value is 24. - type DecayRatio: Get; -} + /// How many vote indices need to go by after a target voter's last vote before + /// they can be reaped if their approvals are moot. A reasonable default value + /// is 1. + #[pallet::constant] + type InactiveGracePeriod: Get; -decl_storage! { - trait Store for Module as Elections { - // ---- parameters - - /// How long to give each top candidate to present themselves after the vote ends. - pub PresentationDuration get(fn presentation_duration) config(): T::BlockNumber; - /// How long each position is active for. - pub TermDuration get(fn term_duration) config(): T::BlockNumber; - /// Number of accounts that should constitute the collective. - pub DesiredSeats get(fn desired_seats) config(): u32; - - // ---- permanent state (always relevant, changes only at the finalization of voting) - - /// The current membership. When there's a vote going on, this should still be used for - /// executive matters. The block number (second element in the tuple) is the block that - /// their position is active until (calculated by the sum of the block number when the - /// member was elected and their term duration). - pub Members get(fn members) config(): Vec<(T::AccountId, T::BlockNumber)>; - /// The total number of vote rounds that have happened or are in progress. - pub VoteCount get(fn vote_index): VoteIndex; - - // ---- persistent state (always relevant, changes constantly) - - // A list of votes for each voter. The votes are stored as numeric values and parsed in a - // bit-wise manner. In order to get a human-readable representation (`Vec`), use - // [`all_approvals_of`]. Furthermore, each vector of scalars is chunked with the cap of - // `APPROVAL_SET_SIZE`. - /// - /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash and `SetIndex` is not - /// attacker-controlled. - pub ApprovalsOf get(fn approvals_of): - map hasher(twox_64_concat) (T::AccountId, SetIndex) => Vec; - /// The vote index and list slot that the candidate `who` was registered or `None` if they - /// are not currently registered. - /// - /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. - pub RegisterInfoOf get(fn candidate_reg_info): - map hasher(twox_64_concat) T::AccountId => Option<(VoteIndex, u32)>; - /// Basic information about a voter. - /// - /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. - pub VoterInfoOf get(fn voter_info): - map hasher(twox_64_concat) T::AccountId => Option>>; - /// The present voter list (chunked and capped at [`VOTER_SET_SIZE`]). - /// - /// TWOX-NOTE: OKAY ― `SetIndex` is not user-controlled data. - pub Voters get(fn voters): map hasher(twox_64_concat) SetIndex => Vec>; - /// the next free set to store a voter in. This will keep growing. - pub NextVoterSet get(fn next_nonfull_voter_set): SetIndex = 0; - /// Current number of Voters. - pub VoterCount get(fn voter_count): SetIndex = 0; - /// The present candidate list. - pub Candidates get(fn candidates): Vec; // has holes - /// Current number of active candidates - pub CandidateCount get(fn candidate_count): u32; - - // ---- temporary state (only relevant during finalization/presentation) - - /// The accounts holding the seats that will become free on the next tally. - pub NextFinalize get(fn next_finalize): Option<(T::BlockNumber, u32, Vec)>; - /// Get the leaderboard if we're in the presentation phase. The first element is the weight - /// of each entry; It may be the direct summed approval stakes, or a weighted version of it. - /// Sorted from low to high. - pub Leaderboard get(fn leaderboard): Option, T::AccountId)> >; + /// How often (in blocks) to check for new votes. A reasonable default value + /// is 1000. + #[pallet::constant] + type VotingPeriod: Get; + + /// Decay factor of weight when being accumulated. It should typically be set to + /// __at least__ `membership_size -1` to keep the collective secure. + /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight + /// increment step `t`. 0 will result in no weight being added at all (normal + /// approval voting). A reasonable default value is 24. + #[pallet::constant] + type DecayRatio: Get; + } + + #[pallet::extra_constants] + impl Pallet { + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + /// The chunk size of the voter vector. + #[allow(non_snake_case)] + fn VOTER_SET_SIZE() -> u32 { + VOTER_SET_SIZE as u32 + } + + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + /// The chunk size of the approval vector. + #[allow(non_snake_case)] + fn APPROVAL_SET_SIZE() -> u32 { + APPROVAL_SET_SIZE as u32 + } } -} -decl_error! { - /// Error for the elections module. - pub enum Error for Module { + // ---- permanent state (always relevant, changes only at the finalization of voting) + + /// How long to give each top candidate to present themselves after the vote ends. + #[pallet::storage] + #[pallet::getter(fn presentation_duration)] + pub type PresentationDuration = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// How long each position is active for. + #[pallet::storage] + #[pallet::getter(fn term_duration)] + pub type TermDuration = StorageValue<_, T::BlockNumber, ValueQuery>; + + /// Number of accounts that should constitute the collective. + #[pallet::storage] + #[pallet::getter(fn desired_seats)] + pub type DesiredSeats = StorageValue<_, u32, ValueQuery>; + + // ---- permanent state (always relevant, changes only at the finalization of voting) + + /// The current membership. When there's a vote going on, this should still be used for + /// executive matters. The block number (second element in the tuple) is the block that + /// their position is active until (calculated by the sum of the block number when the + /// member was elected and their term duration). + #[pallet::storage] + #[pallet::getter(fn members)] + pub type Members = StorageValue<_, Vec<(T::AccountId, T::BlockNumber)>, ValueQuery>; + + /// The total number of vote rounds that have happened or are in progress. + #[pallet::storage] + #[pallet::getter(fn vote_index)] + pub type VoteCount = StorageValue<_, VoteIndex, ValueQuery>; + + // ---- persistent state (always relevant, changes constantly) + + // A list of votes for each voter. The votes are stored as numeric values and parsed in a + // bit-wise manner. In order to get a human-readable representation (`Vec`), use + // [`all_approvals_of`]. Furthermore, each vector of scalars is chunked with the cap of + // `APPROVAL_SET_SIZE`. + /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash and `SetIndex` is not + /// attacker-controlled. + #[pallet::storage] + #[pallet::getter(fn approvals_of)] + pub type ApprovalsOf = + StorageMap<_, Twox64Concat, (T::AccountId, SetIndex), Vec, ValueQuery>; + + /// The vote index and list slot that the candidate `who` was registered or `None` if they + /// are not currently registered. + /// + /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. + #[pallet::storage] + #[pallet::getter(fn candidate_reg_info)] + pub type RegisterInfoOf = + StorageMap<_, Twox64Concat, T::AccountId, (VoteIndex, u32)>; + + /// Basic information about a voter. + /// + /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. + #[pallet::storage] + #[pallet::getter(fn voter_info)] + pub type VoterInfoOf = + StorageMap<_, Twox64Concat, T::AccountId, VoterInfo>>; + + /// The present voter list (chunked and capped at [`VOTER_SET_SIZE`]). + /// + /// TWOX-NOTE: OKAY ― `SetIndex` is not user-controlled data. + #[pallet::storage] + #[pallet::getter(fn voters)] + pub type Voters = + StorageMap<_, Twox64Concat, SetIndex, Vec>, ValueQuery>; + + /// the next free set to store a voter in. This will keep growing. + #[pallet::storage] + #[pallet::getter(fn next_nonfull_voter_set)] + pub type NextVoterSet = StorageValue<_, SetIndex, ValueQuery>; + + /// Current number of Voters. + #[pallet::storage] + #[pallet::getter(fn voter_count)] + pub type VoterCount = StorageValue<_, SetIndex, ValueQuery>; + + /// The present candidate list. + #[pallet::storage] + #[pallet::getter(fn candidates)] + pub type Candidates = StorageValue<_, Vec, ValueQuery>; // has holes + + /// Current number of active candidates + #[pallet::storage] + #[pallet::getter(fn candidate_count)] + pub type CandidateCount = StorageValue<_, u32, ValueQuery>; + + // ---- temporary state (only relevant during finalization/presentation) + + /// The accounts holding the seats that will become free on the next tally. + #[pallet::storage] + #[pallet::getter(fn next_finalize)] + pub type NextFinalize = StorageValue<_, (T::BlockNumber, u32, Vec)>; + + /// Get the leaderboard if we're in the presentation phase. The first element is the weight + /// of each entry; It may be the direct summed approval stakes, or a weighted version of it. + /// Sorted from low to high. + #[pallet::storage] + #[pallet::getter(fn leaderboard)] + pub type Leaderboard = StorageValue<_, Vec<(BalanceOf, T::AccountId)>>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub presentation_duration: T::BlockNumber, + pub term_duration: T::BlockNumber, + pub desired_seats: u32, + pub members: Vec<(T::AccountId, T::BlockNumber)>, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { + presentation_duration: Default::default(), + term_duration: Default::default(), + desired_seats: Default::default(), + members: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + PresentationDuration::::put(self.presentation_duration); + TermDuration::::put(self.term_duration); + DesiredSeats::::put(self.desired_seats); + Members::::put(&self.members); + } + } + + #[pallet::error] + pub enum Error { /// Reporter must be a voter. NotVoter, /// Target for inactivity cleanup must be active. @@ -342,64 +450,40 @@ decl_error! { /// No approval changes during presentation period. ApprovalPresentation, } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// How much should be locked up in order to submit one's candidacy. A reasonable - /// default value is 9. - const CandidacyBond: BalanceOf = T::CandidacyBond::get(); - - /// How much should be locked up in order to be able to submit votes. - const VotingBond: BalanceOf = T::VotingBond::get(); - - /// The amount of fee paid upon each vote submission, unless if they submit a - /// _hole_ index and replace it. - const VotingFee: BalanceOf = T::VotingFee::get(); - - /// The punishment, per voter, if you provide an invalid presentation. A - /// reasonable default value is 1. - const PresentSlashPerVoter: BalanceOf = T::PresentSlashPerVoter::get(); - - /// How many runners-up should have their approvals persist until the next - /// vote. A reasonable default value is 2. - const CarryCount: u32 = T::CarryCount::get(); - /// How many vote indices need to go by after a target voter's last vote before - /// they can be reaped if their approvals are moot. A reasonable default value - /// is 1. - const InactiveGracePeriod: VoteIndex = T::InactiveGracePeriod::get(); - - /// How often (in blocks) to check for new votes. A reasonable default value - /// is 1000. - const VotingPeriod: T::BlockNumber = T::VotingPeriod::get(); - - /// Minimum about that can be used as the locked value for voting. - const MinimumVotingLock: BalanceOf = T::MinimumVotingLock::get(); - - /// Decay factor of weight when being accumulated. It should typically be set to - /// __at least__ `membership_size -1` to keep the collective secure. - /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight - /// increment step `t`. 0 will result in no weight being added at all (normal - /// approval voting). A reasonable default value is 24. - const DecayRatio: u32 = T::DecayRatio::get(); - - /// The chunk size of the voter vector. - const VOTER_SET_SIZE: u32 = VOTER_SET_SIZE as u32; - /// The chunk size of the approval vector. - const APPROVAL_SET_SIZE: u32 = APPROVAL_SET_SIZE as u32; - - const ModuleId: LockIdentifier = T::ModuleId::get(); + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: T::BlockNumber) -> Weight { + if let Err(e) = Self::end_block(n) { + print("Guru meditation"); + print(e); + } + 0 + } + } - fn deposit_event() = default; + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Reaped \[voter, reaper\]. + VoterReaped(T::AccountId, T::AccountId), + /// Slashed \[reaper\]. + BadReaperSlashed(T::AccountId), + /// A tally (for approval votes of \[seats\]) has started. + TallyStarted(u32), + /// A tally (for approval votes of seat(s)) has ended (with one or more new members). + /// \[incoming, outgoing\] + TallyFinalized(Vec, Vec), + } + #[pallet::call] + impl Pallet { /// Set candidate approvals. Approval slots stay valid as long as candidates in those slots /// are registered. /// - /// Locks `value` from the balance of `origin` indefinitely. Only [`retract_voter`] or - /// [`reap_inactive_voter`] can unlock the balance. + /// Locks `value` from the balance of `origin` indefinitely. Only + /// [`retract_voter`](Self::retract_voter) or + /// [`reap_inactive_voter`](Self::reap_inactive_voter) can unlock the balance. /// /// `hint` argument is interpreted differently based on: /// - if `origin` is setting approvals for the first time: The index will be checked for @@ -407,7 +491,7 @@ decl_module! { /// - if the hint is correctly pointing to a hole, no fee is deducted from `origin`. /// - Otherwise, the call will succeed but the index is ignored and simply a push to the /// last chunk with free space happens. If the new push causes a new chunk to be - /// created, a fee indicated by [`VotingFee`] is deducted. + /// created, a fee indicated by [`Config::VotingFee`] is deducted. /// - if `origin` is already a voter: the index __must__ be valid and point to the correct /// position of the `origin` in the current voters list. /// @@ -419,13 +503,13 @@ decl_module! { /// - Two extra DB entries, one DB change. /// - Argument `votes` is limited in length to number of candidates. /// # - #[weight = 2_500_000_000] - fn set_approvals( - origin, + #[pallet::weight(2_500_000_000)] + pub fn set_approvals( + origin: OriginFor, votes: Vec, - #[compact] index: VoteIndex, + #[pallet::compact] index: VoteIndex, hint: SetIndex, - #[compact] value: BalanceOf, + #[pallet::compact] value: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; Self::do_set_approvals(who, votes, index, hint, value) @@ -435,7 +519,11 @@ decl_module! { /// must now be either unregistered or registered to a candidate that registered the slot /// after the voter gave their last approval set. /// - /// Both indices must be provided as explained in [`voter_at`] function. + /// Both indices must be provided according to the following principle: + /// Voter index does not take holes into account. This means that any account submitting an + /// index at any point in time should submit: + /// `VOTER_SET_SIZE * set_index + local_index`, meaning that you are ignoring all holes in + /// the first `set_index` sets. /// /// May be called by anyone. Returns the voter deposit to `signed`. /// @@ -443,14 +531,14 @@ decl_module! { /// - O(1). /// - Two fewer DB entries, one DB change. /// # - #[weight = 2_500_000_000] - fn reap_inactive_voter( - origin, - #[compact] reporter_index: u32, + #[pallet::weight(2_500_000_000)] + pub fn reap_inactive_voter( + origin: OriginFor, + #[pallet::compact] reporter_index: u32, who: ::Source, - #[compact] who_index: u32, - #[compact] assumed_vote_index: VoteIndex, - ) { + #[pallet::compact] who_index: u32, + #[pallet::compact] assumed_vote_index: VoteIndex, + ) -> DispatchResult { let reporter = ensure_signed(origin)?; let who = T::Lookup::lookup(who)?; @@ -468,7 +556,8 @@ decl_module! { let reporter_index = reporter_index as usize; let who_index = who_index as usize; - let assumed_reporter = Self::voter_at(reporter_index).ok_or(Error::::InvalidReporterIndex)?; + let assumed_reporter = + Self::voter_at(reporter_index).ok_or(Error::::InvalidReporterIndex)?; let assumed_who = Self::voter_at(who_index).ok_or(Error::::InvalidTargetIndex)?; ensure!(assumed_reporter == reporter, Error::::InvalidReporterIndex); @@ -476,49 +565,59 @@ decl_module! { // will definitely kill one of reporter or who now. - let valid = !Self::all_approvals_of(&who).iter() - .zip(Self::candidates().iter()) - .any(|(&appr, addr)| - appr && + let valid = !Self::all_approvals_of(&who).iter().zip(Self::candidates().iter()).any( + |(&appr, addr)| { + appr && *addr != T::AccountId::default() && // defensive only: all items in candidates list are registered Self::candidate_reg_info(addr).map_or(false, |x| x.0 <= last_active) - ); + }, + ); Self::remove_voter( if valid { &who } else { &reporter }, - if valid { who_index } else { reporter_index } + if valid { who_index } else { reporter_index }, ); - T::Currency::remove_lock( - T::ModuleId::get(), - if valid { &who } else { &reporter } - ); + T::Currency::remove_lock(T::PalletId::get(), if valid { &who } else { &reporter }); if valid { // This only fails if `reporter` doesn't exist, which it clearly must do since its // the origin. Still, it's no more harmful to propagate any error at this point. - T::Currency::repatriate_reserved(&who, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; - Self::deposit_event(RawEvent::VoterReaped(who, reporter)); + T::Currency::repatriate_reserved( + &who, + &reporter, + T::VotingBond::get(), + BalanceStatus::Free, + )?; + Self::deposit_event(Event::::VoterReaped(who, reporter)); } else { let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; T::BadReaper::on_unbalanced(imbalance); - Self::deposit_event(RawEvent::BadReaperSlashed(reporter)); + Self::deposit_event(Event::::BadReaperSlashed(reporter)); } + Ok(()) } /// Remove a voter. All votes are cancelled and the voter deposit is returned. /// - /// The index must be provided as explained in [`voter_at`] function. + /// The index must be provided according to the following principle: + /// Voter index does not take holes into account. This means that any account submitting an + /// index at any point in time should submit: + /// `VOTER_SET_SIZE * set_index + local_index`, meaning that you are ignoring all holes in + /// the first `set_index` sets. /// - /// Also removes the lock on the balance of the voter. See [`do_set_approvals()`]. + /// Also removes the lock on the balance of the voter. /// /// # /// - O(1). /// - Two fewer DB entries, one DB change. /// # - #[weight = 1_250_000_000] - fn retract_voter(origin, #[compact] index: u32) { + #[pallet::weight(1_250_000_000)] + pub fn retract_voter( + origin: OriginFor, + #[pallet::compact] index: u32, + ) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!Self::presentation_active(), Error::::CannotRetractPresenting); @@ -529,15 +628,17 @@ decl_module! { Self::remove_voter(&who, index); T::Currency::unreserve(&who, T::VotingBond::get()); - T::Currency::remove_lock(T::ModuleId::get(), &who); + T::Currency::remove_lock(T::PalletId::get(), &who); + Ok(()) } /// Submit oneself for candidacy. /// /// Account must have enough transferrable funds in it to pay the bond. /// - /// NOTE: if `origin` has already assigned approvals via [`set_approvals`], - /// it will NOT have any usable funds to pass candidacy bond and must first retract. + /// NOTE: if `origin` has already assigned approvals via + /// [`set_approvals`](Self::set_approvals), it will NOT have any usable funds to pass + /// candidacy bond and must first retract. /// Note that setting approvals will lock the entire balance of the voter until /// retraction or being reported. /// @@ -545,8 +646,11 @@ decl_module! { /// - Independent of input. /// - Three DB changes. /// # - #[weight = 2_500_000_000] - fn submit_candidacy(origin, #[compact] slot: u32) { + #[pallet::weight(2_500_000_000)] + pub fn submit_candidacy( + origin: OriginFor, + #[pallet::compact] slot: u32, + ) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!Self::is_a_candidate(&who), Error::::DuplicatedCandidate); @@ -570,7 +674,8 @@ decl_module! { candidates[slot] = who; } >::put(candidates); - CandidateCount::put(count as u32 + 1); + CandidateCount::::put(count as u32 + 1); + Ok(()) } /// Claim that `candidate` is one of the top `carry_count + desired_seats` candidates. Only @@ -582,46 +687,39 @@ decl_module! { /// - O(voters) compute. /// - One DB change. /// # - #[weight = 10_000_000_000] - fn present_winner( - origin, + #[pallet::weight(10_000_000_000)] + pub fn present_winner( + origin: OriginFor, candidate: ::Source, - #[compact] total: BalanceOf, - #[compact] index: VoteIndex, + #[pallet::compact] total: BalanceOf, + #[pallet::compact] index: VoteIndex, ) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!( - !total.is_zero(), - Error::::ZeroDeposit, - ); + ensure!(!total.is_zero(), Error::::ZeroDeposit); let candidate = T::Lookup::lookup(candidate)?; ensure!(index == Self::vote_index(), Error::::InvalidVoteIndex); - let (_, _, expiring) = Self::next_finalize() - .ok_or(Error::::NotPresentationPeriod)?; + let (_, _, expiring) = + Self::next_finalize().ok_or(Error::::NotPresentationPeriod)?; let bad_presentation_punishment = - T::PresentSlashPerVoter::get() - * BalanceOf::::from(Self::voter_count() as u32); + T::PresentSlashPerVoter::get() * BalanceOf::::from(Self::voter_count() as u32); ensure!( T::Currency::can_slash(&who, bad_presentation_punishment), Error::::InsufficientPresenterFunds, ); - let mut leaderboard = Self::leaderboard() - .ok_or(Error::::LeaderboardMustExist)?; + let mut leaderboard = Self::leaderboard().ok_or(Error::::LeaderboardMustExist)?; ensure!(total > leaderboard[0].0, Error::::UnworthyCandidate); if let Some(p) = Self::members().iter().position(|&(ref c, _)| c == &candidate) { - ensure!( - p < expiring.len(), - Error::::DuplicatedCandidate, - ); + ensure!(p < expiring.len(), Error::::DuplicatedCandidate); } let voters = Self::all_voters(); let (registered_since, candidate_index): (VoteIndex, u32) = Self::candidate_reg_info(&candidate).ok_or(Error::::InvalidCandidate)?; - let actual_total = voters.iter() + let actual_total = voters + .iter() .filter_map(|maybe_voter| maybe_voter.as_ref()) .filter_map(|voter| match Self::voter_info(voter) { Some(b) if b.last_active >= registered_since => { @@ -632,7 +730,9 @@ decl_module! { let weight = stake + offset + b.pot; if Self::approvals_of_at(voter, candidate_index as usize) { Some(weight) - } else { None } + } else { + None + } }, _ => None, }) @@ -649,77 +749,73 @@ decl_module! { // better safe than sorry. let imbalance = T::Currency::slash(&who, bad_presentation_punishment).0; T::BadPresentation::on_unbalanced(imbalance); - Err(if dupe { Error::::DuplicatedPresentation } else { Error::::IncorrectTotal })? + Err(if dupe { + Error::::DuplicatedPresentation + } else { + Error::::IncorrectTotal + })? } } /// Set the desired member count; if lower than the current count, then seats will not be up /// election when they expire. If more, then a new vote will be started if one is not /// already in progress. - #[weight = (0, DispatchClass::Operational)] - fn set_desired_seats(origin, #[compact] count: u32) { + #[pallet::weight((0, DispatchClass::Operational))] + pub fn set_desired_seats( + origin: OriginFor, + #[pallet::compact] count: u32, + ) -> DispatchResult { ensure_root(origin)?; - DesiredSeats::put(count); + DesiredSeats::::put(count); + Ok(()) } /// Remove a particular member from the set. This is effective immediately. /// /// Note: A tally should happen instantly (if not already in a presentation /// period) to fill the seat if removal means that the desired members are not met. - #[weight = (0, DispatchClass::Operational)] - fn remove_member(origin, who: ::Source) { + #[pallet::weight((0, DispatchClass::Operational))] + pub fn remove_member( + origin: OriginFor, + who: ::Source, + ) -> DispatchResult { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; - let new_set: Vec<(T::AccountId, T::BlockNumber)> = Self::members() - .into_iter() - .filter(|i| i.0 != who) - .collect(); + let new_set: Vec<(T::AccountId, T::BlockNumber)> = + Self::members().into_iter().filter(|i| i.0 != who).collect(); >::put(&new_set); let new_set = new_set.into_iter().map(|x| x.0).collect::>(); T::ChangeMembers::change_members(&[], &[who], new_set); + Ok(()) } /// Set the presentation duration. If there is currently a vote being presented for, will /// invoke `finalize_vote`. - #[weight = (0, DispatchClass::Operational)] - fn set_presentation_duration(origin, #[compact] count: T::BlockNumber) { + #[pallet::weight((0, DispatchClass::Operational))] + pub fn set_presentation_duration( + origin: OriginFor, + #[pallet::compact] count: T::BlockNumber, + ) -> DispatchResult { ensure_root(origin)?; >::put(count); + Ok(()) } /// Set the presentation duration. If there is current a vote being presented for, will /// invoke `finalize_vote`. - #[weight = (0, DispatchClass::Operational)] - fn set_term_duration(origin, #[compact] count: T::BlockNumber) { + #[pallet::weight((0, DispatchClass::Operational))] + pub fn set_term_duration( + origin: OriginFor, + #[pallet::compact] count: T::BlockNumber, + ) -> DispatchResult { ensure_root(origin)?; >::put(count); - } - - fn on_initialize(n: T::BlockNumber) -> Weight { - if let Err(e) = Self::end_block(n) { - print("Guru meditation"); - print(e); - } - 0 + Ok(()) } } } -decl_event!( - pub enum Event where ::AccountId { - /// Reaped \[voter, reaper\]. - VoterReaped(AccountId, AccountId), - /// Slashed \[reaper\]. - BadReaperSlashed(AccountId), - /// A tally (for approval votes of \[seats\]) has started. - TallyStarted(u32), - /// A tally (for approval votes of seat(s)) has ended (with one or more new members). - /// \[incoming, outgoing\] - TallyFinalized(Vec, Vec), - } -); - -impl Module { +impl Pallet { // exposed immutables. /// True if we're currently in a presentation period. @@ -734,7 +830,8 @@ impl Module { /// Iff the member `who` still has a seat at blocknumber `n` returns `true`. pub fn will_still_be_member_at(who: &T::AccountId, n: T::BlockNumber) -> bool { - Self::members().iter() + Self::members() + .iter() .find(|&&(ref a, _)| a == who) .map(|&(_, expires)| expires > n) .unwrap_or(false) @@ -754,13 +851,14 @@ impl Module { None } else { let c = Self::members(); - let (next_possible, count, coming) = - if let Some((tally_end, comers, leavers)) = Self::next_finalize() { - // if there's a tally in progress, then next tally can begin immediately afterwards - (tally_end, c.len() - leavers.len() + comers as usize, comers) - } else { - (>::block_number(), c.len(), 0) - }; + let (next_possible, count, coming) = if let Some((tally_end, comers, leavers)) = + Self::next_finalize() + { + // if there's a tally in progress, then next tally can begin immediately afterwards + (tally_end, c.len() - leavers.len() + comers as usize, comers) + } else { + (>::block_number(), c.len(), 0) + }; if count < desired_seats as usize { Some(next_possible) } else { @@ -772,7 +870,8 @@ impl Module { } else { Some(c[c.len() - (desired_seats - coming) as usize].1) } - }.map(Self::next_vote_from) + } + .map(Self::next_vote_from) } } @@ -800,7 +899,7 @@ impl Module { let mut set = Self::voters(set_index); set[vec_index] = None; >::insert(set_index, set); - VoterCount::mutate(|c| *c = *c - 1); + VoterCount::::mutate(|c| *c = *c - 1); Self::remove_all_approvals_of(voter); >::remove(voter); } @@ -819,18 +918,12 @@ impl Module { ensure!(!Self::presentation_active(), Error::::ApprovalPresentation); ensure!(index == Self::vote_index(), Error::::InvalidVoteIndex); - ensure!( - !candidates_len.is_zero(), - Error::::ZeroCandidates, - ); + ensure!(!candidates_len.is_zero(), Error::::ZeroCandidates); // Prevent a vote from voters that provide a list of votes that exceeds the candidates // length since otherwise an attacker may be able to submit a very long list of `votes` that // far exceeds the amount of candidates and waste more computation than a reasonable voting // bond would cover. - ensure!( - candidates_len >= votes.len(), - Error::::TooManyVotes, - ); + ensure!(candidates_len >= votes.len(), Error::::TooManyVotes); ensure!(value >= T::MinimumVotingLock::get(), Error::::InsufficientLockedValue); // Amount to be locked up. @@ -871,7 +964,7 @@ impl Module { let imbalance = T::Currency::withdraw( &who, T::VotingFee::get(), - WithdrawReason::Fee.into(), + WithdrawReasons::FEE, ExistenceRequirement::KeepAlive, )?; T::BadVoterIndex::on_unbalanced(imbalance); @@ -879,22 +972,17 @@ impl Module { locked_balance -= T::VotingFee::get(); } if set_len + 1 == VOTER_SET_SIZE { - NextVoterSet::put(next + 1); + NextVoterSet::::put(next + 1); } >::append(next, Some(who.clone())); - } + }, } T::Currency::reserve(&who, T::VotingBond::get())?; - VoterCount::mutate(|c| *c = *c + 1); + VoterCount::::mutate(|c| *c = *c + 1); } - T::Currency::set_lock( - T::ModuleId::get(), - &who, - locked_balance, - WithdrawReasons::all(), - ); + T::Currency::set_lock(T::PalletId::get(), &who, locked_balance, WithdrawReasons::all()); >::insert( &who, @@ -903,7 +991,7 @@ impl Module { last_win: index, stake: locked_balance, pot: pot_to_set, - } + }, ); Self::set_approvals_chunked(&who, votes); @@ -914,21 +1002,29 @@ impl Module { fn start_tally() { let members = Self::members(); let desired_seats = Self::desired_seats() as usize; - let number = >::block_number(); - let expiring = - members.iter().take_while(|i| i.1 <= number).map(|i| i.0.clone()).collect::>(); + let number = >::block_number(); + let expiring = members + .iter() + .take_while(|i| i.1 <= number) + .map(|i| i.0.clone()) + .collect::>(); let retaining_seats = members.len() - expiring.len(); if retaining_seats < desired_seats { let empty_seats = desired_seats - retaining_seats; - >::put( - (number + Self::presentation_duration(), empty_seats as u32, expiring) - ); + >::put(( + number + Self::presentation_duration(), + empty_seats as u32, + expiring, + )); // initialize leaderboard. let leaderboard_size = empty_seats + T::CarryCount::get() as usize; - >::put(vec![(BalanceOf::::zero(), T::AccountId::default()); leaderboard_size]); + >::put(vec![ + (BalanceOf::::zero(), T::AccountId::default()); + leaderboard_size + ]); - Self::deposit_event(RawEvent::TallyStarted(empty_seats as u32)); + Self::deposit_event(Event::::TallyStarted(empty_seats as u32)); } } @@ -940,19 +1036,22 @@ impl Module { let (_, coming, expiring): (T::BlockNumber, u32, Vec) = >::take() .ok_or("finalize can only be called after a tally is started.")?; - let leaderboard: Vec<(BalanceOf, T::AccountId)> = >::take() - .unwrap_or_default(); - let new_expiry = >::block_number() + Self::term_duration(); + let leaderboard: Vec<(BalanceOf, T::AccountId)> = + >::take().unwrap_or_default(); + let new_expiry = >::block_number() + Self::term_duration(); // return bond to winners. let candidacy_bond = T::CandidacyBond::get(); - let incoming: Vec<_> = leaderboard.iter() + let incoming: Vec<_> = leaderboard + .iter() .rev() .take_while(|&&(b, _)| !b.is_zero()) .take(coming as usize) .map(|(_, a)| a) .cloned() - .inspect(|a| { T::Currency::unreserve(a, candidacy_bond); }) + .inspect(|a| { + T::Currency::unreserve(a, candidacy_bond); + }) .collect(); // Update last win index for anyone voted for any of the incomings. @@ -962,14 +1061,16 @@ impl Module { .iter() .filter_map(|mv| mv.as_ref()) .filter(|v| Self::approvals_of_at(*v, index)) - .for_each(|v| >::mutate(v, |a| { - if let Some(activity) = a { activity.last_win = Self::vote_index() + 1; } - })); + .for_each(|v| { + >::mutate(v, |a| { + if let Some(activity) = a { + activity.last_win = Self::vote_index() + 1; + } + }) + }); }); let members = Self::members(); - let outgoing: Vec<_> = members.iter() - .take(expiring.len()) - .map(|a| a.0.clone()).collect(); + let outgoing: Vec<_> = members.iter().take(expiring.len()).map(|a| a.0.clone()).collect(); // set the new membership set. let mut new_set: Vec<_> = members @@ -985,8 +1086,9 @@ impl Module { // clear all except runners-up from candidate list. let candidates = Self::candidates(); - let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. - let runners_up = leaderboard.into_iter() + let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. + let runners_up = leaderboard + .into_iter() .rev() .take_while(|&(b, _)| !b.is_zero()) .skip(coming as usize) @@ -1011,17 +1113,16 @@ impl Module { } } // discard any superfluous slots. - if let Some(last_index) = new_candidates - .iter() - .rposition(|c| *c != T::AccountId::default()) { - new_candidates.truncate(last_index + 1); - } + if let Some(last_index) = new_candidates.iter().rposition(|c| *c != T::AccountId::default()) + { + new_candidates.truncate(last_index + 1); + } - Self::deposit_event(RawEvent::TallyFinalized(incoming, outgoing)); + Self::deposit_event(Event::::TallyFinalized(incoming, outgoing)); >::put(new_candidates); - CandidateCount::put(count); - VoteCount::put(Self::vote_index() + 1); + CandidateCount::::put(count); + VoteCount::::put(Self::vote_index() + 1); Ok(()) } @@ -1044,7 +1145,7 @@ impl Module { loop { let next_set = >::get(index); if next_set.is_empty() { - break; + break } else { index += 1; all.extend(next_set); @@ -1090,9 +1191,7 @@ impl Module { approvals_flag_vec .chunks(APPROVAL_SET_SIZE) .enumerate() - .for_each(|(index, slice)| >::insert( - (&who, index as SetIndex), slice) - ); + .for_each(|(index, slice)| >::insert((&who, index as SetIndex), slice)); } /// shorthand for fetching a specific approval of a voter at a specific (global) index. @@ -1117,7 +1216,7 @@ impl Module { /// Return true of the bit `n` of scalar `x` is set to `1` and false otherwise. fn bit_at(x: ApprovalFlag, n: usize) -> bool { if n < APPROVAL_FLAG_LEN { - x & ( 1 << n ) != 0 + x & (1 << n) != 0 } else { false } @@ -1128,7 +1227,7 @@ impl Module { pub fn bool_to_flag(x: Vec) -> Vec { let mut result: Vec = Vec::with_capacity(x.len() / APPROVAL_FLAG_LEN); if x.is_empty() { - return result; + return result } result.push(0); let mut index = 0; @@ -1137,7 +1236,9 @@ impl Module { let shl_index = counter % APPROVAL_FLAG_LEN; result[index] += (if x[counter] { 1 } else { 0 }) << shl_index; counter += 1; - if counter > x.len() - 1 { break; } + if counter > x.len() - 1 { + break + } if counter % APPROVAL_FLAG_LEN == 0 { result.push(0); index += 1; @@ -1149,15 +1250,18 @@ impl Module { /// Convert a vec of flags (u32) to boolean. pub fn flag_to_bool(chunk: Vec) -> Vec { let mut result = Vec::with_capacity(chunk.len()); - if chunk.is_empty() { return vec![] } - chunk.into_iter() - .map(|num| + if chunk.is_empty() { + return vec![] + } + chunk + .into_iter() + .map(|num| { (0..APPROVAL_FLAG_LEN).map(|bit| Self::bit_at(num, bit)).collect::>() - ) + }) .for_each(|c| { let last_approve = match c.iter().rposition(|n| *n) { Some(index) => index + 1, - None => 0 + None => 0, }; result.extend(c.into_iter().take(last_approve)); }); @@ -1171,7 +1275,9 @@ impl Module { let mut index = 0_u32; loop { let chunk = Self::approvals_of((who.clone(), index)); - if chunk.is_empty() { break; } + if chunk.is_empty() { + break + } all.extend(Self::flag_to_bool(chunk)); index += 1; } @@ -1204,7 +1310,9 @@ impl Module { /// returned if `t` is zero. fn get_offset(stake: BalanceOf, t: VoteIndex) -> BalanceOf { let decay_ratio: BalanceOf = T::DecayRatio::get().into(); - if t > 150 { return stake * decay_ratio } + if t > 150 { + return stake * decay_ratio + } let mut offset = stake; let mut r = Zero::zero(); let decay = decay_ratio + One::one(); diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index deec77da7b837..91318e1e07bcc 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,27 +19,28 @@ #![cfg(test)] -use std::cell::RefCell; +use crate as elections; use frame_support::{ - StorageValue, StorageMap, parameter_types, assert_ok, - traits::{Get, ChangeMembers, Currency, LockIdentifier}, - weights::Weight, + assert_ok, parameter_types, + traits::{ChangeMembers, Currency, LockIdentifier}, }; use sp_core::H256; use sp_runtime::{ - Perbill, BuildStorage, testing::Header, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -use crate as elections; - parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -51,26 +52,23 @@ impl frame_system::Trait for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } -impl pallet_balances::Trait for Test { +impl pallet_balances::Config for Test { type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); type Event = Event; @@ -85,34 +83,11 @@ parameter_types! { pub const InactiveGracePeriod: u32 = 1; pub const VotingPeriod: u64 = 4; pub const MinimumVotingLock: u64 = 5; -} - -thread_local! { - static VOTER_BOND: RefCell = RefCell::new(0); - static VOTING_FEE: RefCell = RefCell::new(0); - static PRESENT_SLASH_PER_VOTER: RefCell = RefCell::new(0); - static DECAY_RATIO: RefCell = RefCell::new(0); - static MEMBERS: RefCell> = RefCell::new(vec![]); -} - -pub struct VotingBond; -impl Get for VotingBond { - fn get() -> u64 { VOTER_BOND.with(|v| *v.borrow()) } -} - -pub struct VotingFee; -impl Get for VotingFee { - fn get() -> u64 { VOTING_FEE.with(|v| *v.borrow()) } -} - -pub struct PresentSlashPerVoter; -impl Get for PresentSlashPerVoter { - fn get() -> u64 { PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow()) } -} - -pub struct DecayRatio; -impl Get for DecayRatio { - fn get() -> u32 { DECAY_RATIO.with(|v| *v.borrow()) } + pub static VotingBond: u64 = 0; + pub static VotingFee: u64 = 0; + pub static PresentSlashPerVoter: u64 = 0; + pub static DecayRatio: u32 = 0; + pub static Members: Vec = vec![]; } pub struct TestChangeMembers; @@ -130,11 +105,11 @@ impl ChangeMembers for TestChangeMembers { } } -parameter_types!{ - pub const ElectionModuleId: LockIdentifier = *b"py/elect"; +parameter_types! { + pub const ElectionPalletId: LockIdentifier = *b"py/elect"; } -impl elections::Trait for Test { +impl elections::Config for Test { type Event = Event; type Currency = Balances; type BadPresentation = (); @@ -151,7 +126,7 @@ impl elections::Trait for Test { type InactiveGracePeriod = InactiveGracePeriod; type VotingPeriod = VotingPeriod; type DecayRatio = DecayRatio; - type ModuleId = ElectionModuleId; + type PalletId = ElectionPalletId; } pub type Block = sp_runtime::generic::Block; @@ -164,9 +139,9 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Event}, - Balances: pallet_balances::{Module, Call, Event, Config}, - Elections: elections::{Module, Call, Event, Config}, + System: system::{Pallet, Call, Event}, + Balances: pallet_balances::{Pallet, Call, Event, Config}, + Elections: elections::{Pallet, Call, Event, Config}, } ); @@ -175,7 +150,7 @@ pub struct ExtBuilder { decay_ratio: u32, desired_seats: u32, voting_fee: u64, - voter_bond: u64, + voting_bond: u64, bad_presentation_punishment: u64, } @@ -186,7 +161,7 @@ impl Default for ExtBuilder { decay_ratio: 24, desired_seats: 2, voting_fee: 0, - voter_bond: 0, + voting_bond: 0, bad_presentation_punishment: 1, } } @@ -209,8 +184,8 @@ impl ExtBuilder { self.bad_presentation_punishment = fee; self } - pub fn voter_bond(mut self, fee: u64) -> Self { - self.voter_bond = fee; + pub fn voting_bond(mut self, fee: u64) -> Self { + self.voting_bond = fee; self } pub fn desired_seats(mut self, seats: u32) -> Self { @@ -218,61 +193,60 @@ impl ExtBuilder { self } pub fn build(self) -> sp_io::TestExternalities { - VOTER_BOND.with(|v| *v.borrow_mut() = self.voter_bond); + VOTING_BOND.with(|v| *v.borrow_mut() = self.voting_bond); VOTING_FEE.with(|v| *v.borrow_mut() = self.voting_fee); PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow_mut() = self.bad_presentation_punishment); DECAY_RATIO.with(|v| *v.borrow_mut() = self.decay_ratio); let mut ext: sp_io::TestExternalities = GenesisConfig { - pallet_balances: Some(pallet_balances::GenesisConfig::{ + balances: pallet_balances::GenesisConfig:: { balances: vec![ (1, 10 * self.balance_factor), (2, 20 * self.balance_factor), (3, 30 * self.balance_factor), (4, 40 * self.balance_factor), (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor) + (6, 60 * self.balance_factor), ], - }), - elections: Some(elections::GenesisConfig::{ + }, + elections: elections::GenesisConfig:: { members: vec![], desired_seats: self.desired_seats, presentation_duration: 2, term_duration: 5, - }), - }.build_storage().unwrap().into(); + }, + } + .build_storage() + .unwrap() + .into(); ext.execute_with(|| System::set_block_number(1)); ext } } pub(crate) fn voter_ids() -> Vec { - Elections::all_voters().iter().map(|v| v.unwrap_or(0) ).collect::>() + Elections::all_voters().iter().map(|v| v.unwrap_or(0)).collect::>() } pub(crate) fn vote(i: u64, l: usize) { let _ = Balances::make_free_balance_be(&i, 20); - assert_ok!( - Elections::set_approvals( - Origin::signed(i), - (0..l).map(|_| true).collect::>(), - 0, - 0, - 20, - ) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(i), + (0..l).map(|_| true).collect::>(), + 0, + 0, + 20, + )); } pub(crate) fn vote_at(i: u64, l: usize, index: elections::VoteIndex) { let _ = Balances::make_free_balance_be(&i, 20); - assert_ok!( - Elections::set_approvals( - Origin::signed(i), - (0..l).map(|_| true).collect::>(), - 0, - index, - 20, - ) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(i), + (0..l).map(|_| true).collect::>(), + 0, + index, + 20, + )); } pub(crate) fn create_candidate(i: u64, index: u32) { @@ -292,7 +266,7 @@ pub(crate) fn new_test_ext_with_candidate_holes() -> sp_io::TestExternalities { let mut t = ExtBuilder::default().build(); t.execute_with(|| { >::put(vec![0, 0, 1]); - elections::CandidateCount::put(1); + elections::CandidateCount::::put(1); >::insert(1, (0, 2)); }); t diff --git a/frame/elections/src/tests.rs b/frame/elections/src/tests.rs index 92f6e11252b05..0df84c6d79baf 100644 --- a/frame/elections/src/tests.rs +++ b/frame/elections/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,10 +19,9 @@ #![cfg(test)] -use crate::mock::*; -use crate::*; +use crate::{mock::*, *}; -use frame_support::{assert_ok, assert_err, assert_noop}; +use frame_support::{assert_err, assert_noop, assert_ok}; #[test] fn params_should_work() { @@ -60,38 +59,23 @@ fn chunking_bool_to_flag_should_work() { assert_eq!(Elections::bool_to_flag(vec![true, true, true, true, true]), vec![15 + 16]); let set_1 = vec![ - true, false, false, false, // 0x1 - false, true, true, true, // 0xE + true, false, false, false, // 0x1 + false, true, true, true, // 0xE ]; - assert_eq!( - Elections::bool_to_flag(set_1.clone()), - vec![0x00_00_00_E1_u32] - ); - assert_eq!( - Elections::flag_to_bool(vec![0x00_00_00_E1_u32]), - set_1 - ); + assert_eq!(Elections::bool_to_flag(set_1.clone()), vec![0x00_00_00_E1_u32]); + assert_eq!(Elections::flag_to_bool(vec![0x00_00_00_E1_u32]), set_1); let set_2 = vec![ - false, false, false, true, // 0x8 - false, true, false, true, // 0xA + false, false, false, true, // 0x8 + false, true, false, true, // 0xA ]; - assert_eq!( - Elections::bool_to_flag(set_2.clone()), - vec![0x00_00_00_A8_u32] - ); - assert_eq!( - Elections::flag_to_bool(vec![0x00_00_00_A8_u32]), - set_2 - ); + assert_eq!(Elections::bool_to_flag(set_2.clone()), vec![0x00_00_00_A8_u32]); + assert_eq!(Elections::flag_to_bool(vec![0x00_00_00_A8_u32]), set_2); - let mut rhs = (0..100/APPROVAL_FLAG_LEN).map(|_| 0xFFFFFFFF_u32).collect::>(); + let mut rhs = (0..100 / APPROVAL_FLAG_LEN).map(|_| 0xFFFFFFFF_u32).collect::>(); // NOTE: this might be need change based on `APPROVAL_FLAG_LEN`. rhs.extend(vec![0x00_00_00_0F]); - assert_eq!( - Elections::bool_to_flag((0..100).map(|_| true).collect()), - rhs - ) + assert_eq!(Elections::bool_to_flag((0..100).map(|_| true).collect()), rhs) }) } @@ -160,7 +144,7 @@ fn chunking_voter_set_reclaim_should_work() { fn chunking_approvals_set_growth_should_work() { ExtBuilder::default().build().execute_with(|| { // create candidates and voters. - (1..=250).for_each(|i| create_candidate(i, (i-1) as u32)); + (1..=250).for_each(|i| create_candidate(i, (i - 1) as u32)); (1..=250).for_each(|i| vote(i, i as usize)); // all approvals of should return the exact expected vector. @@ -168,26 +152,11 @@ fn chunking_approvals_set_growth_should_work() { Elections::all_approvals_of(&180), (0..180).map(|_| true).collect::>() ); - assert_eq!( - Elections::all_approvals_of(&32), - (0..32).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&8), - (0..8).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&64), - (0..64).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&65), - (0..65).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&63), - (0..63).map(|_| true).collect::>() - ); + assert_eq!(Elections::all_approvals_of(&32), (0..32).map(|_| true).collect::>()); + assert_eq!(Elections::all_approvals_of(&8), (0..8).map(|_| true).collect::>()); + assert_eq!(Elections::all_approvals_of(&64), (0..64).map(|_| true).collect::>()); + assert_eq!(Elections::all_approvals_of(&65), (0..65).map(|_| true).collect::>()); + assert_eq!(Elections::all_approvals_of(&63), (0..63).map(|_| true).collect::>()); // NOTE: assuming that APPROVAL_SET_SIZE is more or less small-ish. Might fail otherwise. let full_sets = (180 / APPROVAL_FLAG_LEN) / APPROVAL_SET_SIZE; @@ -197,10 +166,9 @@ fn chunking_approvals_set_growth_should_work() { // grab and check the last full set, if it exists. if full_sets > 0 { assert_eq!( - Elections::approvals_of((180, (full_sets-1) as SetIndex )), + Elections::approvals_of((180, (full_sets - 1) as SetIndex)), Elections::bool_to_flag( - (0..APPROVAL_SET_SIZE * APPROVAL_FLAG_LEN) - .map(|_| true).collect::>() + (0..APPROVAL_SET_SIZE * APPROVAL_FLAG_LEN).map(|_| true).collect::>() ) ); } @@ -210,8 +178,7 @@ fn chunking_approvals_set_growth_should_work() { assert_eq!( Elections::approvals_of((180, full_sets as SetIndex)), Elections::bool_to_flag( - (0..left_over * APPROVAL_FLAG_LEN + rem) - .map(|_| true).collect::>() + (0..left_over * APPROVAL_FLAG_LEN + rem).map(|_| true).collect::>() ) ); } @@ -298,7 +265,7 @@ fn voting_initial_set_approvals_ignores_voter_index() { } #[test] fn voting_bad_approval_index_slashes_voters_and_bond_reduces_stake() { - ExtBuilder::default().voting_fee(5).voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_fee(5).voting_bond(2).build().execute_with(|| { assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); (1..=63).for_each(|i| vote(i, 0)); @@ -311,7 +278,7 @@ fn voting_bad_approval_index_slashes_voters_and_bond_reduces_stake() { assert_eq!(balances(&64), (18, 2)); assert_eq!( Elections::voter_info(&64).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 20, pot:0 } + VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0 } ); assert_eq!(Elections::next_nonfull_voter_set(), 1); @@ -321,7 +288,7 @@ fn voting_bad_approval_index_slashes_voters_and_bond_reduces_stake() { assert_eq!(balances(&65), (13, 2)); assert_eq!( Elections::voter_info(&65).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 15, pot:0 } + VoterInfo { last_win: 0, last_active: 0, stake: 15, pot: 0 } ); }); } @@ -365,7 +332,7 @@ fn voting_cannot_lock_less_than_limit() { #[test] fn voting_locking_more_than_total_balance_is_moot() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_bond(2).build().execute_with(|| { assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); assert_eq!(balances(&3), (30, 0)); @@ -374,14 +341,14 @@ fn voting_locking_more_than_total_balance_is_moot() { assert_eq!(balances(&3), (28, 2)); assert_eq!( Elections::voter_info(&3).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 30, pot:0 } + VoterInfo { last_win: 0, last_active: 0, stake: 30, pot: 0 } ); }); } #[test] fn voting_locking_stake_and_reserving_bond_works() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_bond(2).build().execute_with(|| { assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); assert_eq!(balances(&2), (20, 0)); @@ -424,7 +391,7 @@ fn voting_setting_an_approval_vote_count_more_than_candidate_count_should_not_wo assert_eq!(Elections::candidates().len(), 1); assert_noop!( - Elections::set_approvals(Origin::signed(4),vec![true, true], 0, 0, 40), + Elections::set_approvals(Origin::signed(4), vec![true, true], 0, 0, 40), Error::::TooManyVotes, ); }); @@ -498,7 +465,10 @@ fn voting_invalid_retraction_index_should_not_work() { assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); assert_eq!(voter_ids(), vec![1, 2]); - assert_noop!(Elections::retract_voter(Origin::signed(1), 1), Error::::InvalidRetractionIndex); + assert_noop!( + Elections::retract_voter(Origin::signed(1), 1), + Error::::InvalidRetractionIndex + ); }); } @@ -508,7 +478,10 @@ fn voting_overflow_retraction_index_should_not_work() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); - assert_noop!(Elections::retract_voter(Origin::signed(1), 1), Error::::InvalidRetractionIndex); + assert_noop!( + Elections::retract_voter(Origin::signed(1), 1), + Error::::InvalidRetractionIndex + ); }); } @@ -518,7 +491,10 @@ fn voting_non_voter_retraction_should_not_work() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); - assert_noop!(Elections::retract_voter(Origin::signed(2), 0), Error::::RetractNonVoter); + assert_noop!( + Elections::retract_voter(Origin::signed(2), 0), + Error::::RetractNonVoter + ); }); } @@ -543,9 +519,11 @@ fn retracting_inactive_voter_should_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); assert_ok!(Elections::end_block(System::block_number())); - assert_ok!(Elections::reap_inactive_voter(Origin::signed(5), + assert_ok!(Elections::reap_inactive_voter( + Origin::signed(5), (voter_ids().iter().position(|&i| i == 5).unwrap() as u32).into(), - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), 2 )); @@ -558,7 +536,7 @@ fn retracting_inactive_voter_should_work() { #[test] fn retracting_inactive_voter_with_other_candidates_in_slots_should_work() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { + ExtBuilder::default().voting_bond(2).build().execute_with(|| { System::set_block_number(4); assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); @@ -580,9 +558,11 @@ fn retracting_inactive_voter_with_other_candidates_in_slots_should_work() { System::set_block_number(11); assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Elections::reap_inactive_voter(Origin::signed(5), + assert_ok!(Elections::reap_inactive_voter( + Origin::signed(5), (voter_ids().iter().position(|&i| i == 5).unwrap() as u32).into(), - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), 2 )); @@ -612,11 +592,16 @@ fn retracting_inactive_voter_with_bad_reporter_index_should_not_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); assert_ok!(Elections::end_block(System::block_number())); - assert_noop!(Elections::reap_inactive_voter(Origin::signed(2), - 42, - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - ), Error::::InvalidReporterIndex); + assert_noop!( + Elections::reap_inactive_voter( + Origin::signed(2), + 42, + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + ), + Error::::InvalidReporterIndex + ); }); } @@ -641,11 +626,16 @@ fn retracting_inactive_voter_with_bad_target_index_should_not_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); assert_ok!(Elections::end_block(System::block_number())); - assert_noop!(Elections::reap_inactive_voter(Origin::signed(2), - (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2, 42, - 2 - ), Error::::InvalidTargetIndex); + assert_noop!( + Elections::reap_inactive_voter( + Origin::signed(2), + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2, + 42, + 2 + ), + Error::::InvalidTargetIndex + ); }); } @@ -657,10 +647,34 @@ fn retracting_active_voter_should_slash_reporter() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 2)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true, false, false, false], 0, 0, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, true, false, false], 0, 0, 30)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, true, false], 0, 0, 40)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true, false, false, false], + 0, + 0, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, true, false, false], + 0, + 0, + 30 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, true, false], + 0, + 0, + 40 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -675,16 +689,30 @@ fn retracting_active_voter_should_slash_reporter() { assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20 + Elections::get_offset(20, 1), 1)); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30 + Elections::get_offset(30, 1), 1)); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 2, + 20 + Elections::get_offset(20, 1), + 1 + )); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 3, + 30 + Elections::get_offset(30, 1), + 1 + )); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::vote_index(), 2); - assert_eq!(::InactiveGracePeriod::get(), 1); - assert_eq!(::VotingPeriod::get(), 4); - assert_eq!(Elections::voter_info(4), Some(VoterInfo { last_win: 1, last_active: 0, stake: 40, pot: 0 })); + assert_eq!(::InactiveGracePeriod::get(), 1); + assert_eq!(::VotingPeriod::get(), 4); + assert_eq!( + Elections::voter_info(4), + Some(VoterInfo { last_win: 1, last_active: 0, stake: 40, pot: 0 }) + ); - assert_ok!(Elections::reap_inactive_voter(Origin::signed(4), + assert_ok!(Elections::reap_inactive_voter( + Origin::signed(4), (voter_ids().iter().position(|&i| i == 4).unwrap() as u32).into(), 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), @@ -718,11 +746,16 @@ fn retracting_inactive_voter_by_nonvoter_should_not_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); assert_ok!(Elections::end_block(System::block_number())); - assert_noop!(Elections::reap_inactive_voter(Origin::signed(4), - 0, - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - ), Error::::NotVoter); + assert_noop!( + Elections::reap_inactive_voter( + Origin::signed(4), + 0, + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + ), + Error::::NotVoter + ); }); } @@ -933,7 +966,7 @@ fn election_seats_should_be_released() { assert_ok!(Elections::end_block(System::block_number())); if Elections::members().len() == 0 { free_block = current; - break; + break } } // 11 + 2 which is the next voting period. @@ -1021,9 +1054,21 @@ fn election_presenting_loser_should_not_work() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -1032,14 +1077,12 @@ fn election_presenting_loser_should_not_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!(Elections::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); + assert_eq!(Elections::leaderboard(), Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)])); - assert_noop!(Elections::present_winner(Origin::signed(4), 2, 20, 0), Error::::UnworthyCandidate); + assert_noop!( + Elections::present_winner(Origin::signed(4), 2, 20, 0), + Error::::UnworthyCandidate + ); }); } @@ -1054,9 +1097,21 @@ fn election_presenting_loser_first_should_not_matter() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -1066,12 +1121,7 @@ fn election_presenting_loser_first_should_not_matter() { assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!(Elections::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); + assert_eq!(Elections::leaderboard(), Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)])); }); } @@ -1098,7 +1148,10 @@ fn election_present_with_invalid_vote_index_should_not_work() { assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); - assert_noop!(Elections::present_winner(Origin::signed(4), 2, 20, 1), Error::::InvalidVoteIndex); + assert_noop!( + Elections::present_winner(Origin::signed(4), 2, 20, 1), + Error::::InvalidVoteIndex + ); }); } @@ -1107,7 +1160,7 @@ fn election_present_when_presenter_is_poor_should_not_work() { let test_present = |p| { ExtBuilder::default() .voting_fee(5) - .voter_bond(2) + .voting_bond(2) .bad_presentation_punishment(p) .build() .execute_with(|| { @@ -1115,10 +1168,10 @@ fn election_present_when_presenter_is_poor_should_not_work() { let _ = Balances::make_free_balance_be(&1, 15); assert!(!Elections::presentation_active()); - // -3 + // -3 assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 12); - // -2 -5 + // -2 -5 assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 15)); assert_ok!(Elections::end_block(System::block_number())); @@ -1126,8 +1179,8 @@ fn election_present_when_presenter_is_poor_should_not_work() { assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 5); if p > 5 { - assert_noop!(Elections::present_winner( - Origin::signed(1), 1, 10, 0), + assert_noop!( + Elections::present_winner(Origin::signed(1), 1, 10, 0), Error::::InsufficientPresenterFunds, ); } else { @@ -1153,7 +1206,10 @@ fn election_invalid_present_tally_should_slash() { assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); - assert_err!(Elections::present_winner(Origin::signed(4), 2, 80, 0), Error::::IncorrectTotal); + assert_err!( + Elections::present_winner(Origin::signed(4), 2, 80, 0), + Error::::IncorrectTotal + ); assert_eq!(Balances::total_balance(&4), 38); }); @@ -1172,9 +1228,21 @@ fn election_runners_up_should_be_kept() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); @@ -1183,21 +1251,11 @@ fn election_runners_up_should_be_kept() { assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); // leaderboard length is the empty seats plus the carry count (i.e. 5 + 2), where those // to be carried are the lowest and stored in lowest indices - assert_eq!(Elections::leaderboard(), Some(vec![ - (0, 0), - (0, 0), - (0, 0), - (60, 1) - ])); + assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (0, 0), (0, 0), (60, 1)])); assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!(Elections::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); + assert_eq!(Elections::leaderboard(), Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)])); assert_ok!(Elections::end_block(System::block_number())); @@ -1210,11 +1268,26 @@ fn election_runners_up_should_be_kept() { assert!(Elections::is_a_candidate(&3)); assert!(Elections::is_a_candidate(&4)); assert_eq!(Elections::vote_index(), 1); - assert_eq!(Elections::voter_info(2), Some(VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0 })); - assert_eq!(Elections::voter_info(3), Some(VoterInfo { last_win: 0, last_active: 0, stake: 30, pot: 0 })); - assert_eq!(Elections::voter_info(4), Some(VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0 })); - assert_eq!(Elections::voter_info(5), Some(VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0 })); - assert_eq!(Elections::voter_info(6), Some(VoterInfo { last_win: 1, last_active: 0, stake: 60, pot: 0 })); + assert_eq!( + Elections::voter_info(2), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(3), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 30, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(4), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(5), + Some(VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(6), + Some(VoterInfo { last_win: 1, last_active: 0, stake: 60, pot: 0 }) + ); assert_eq!(Elections::candidate_reg_info(3), Some((0, 2))); assert_eq!(Elections::candidate_reg_info(4), Some((0, 3))); }); @@ -1231,9 +1304,21 @@ fn election_second_tally_should_use_runners_up() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -1244,13 +1329,29 @@ fn election_second_tally_should_use_runners_up() { assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(8); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![false, false, true, false], 1, 0, 60)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![false, false, true, false], + 1, + 0, + 60 + )); assert_ok!(Elections::set_desired_seats(Origin::root(), 3)); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30 + Elections::get_offset(30, 1) + 60, 1)); - assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40 + Elections::get_offset(40, 1), 1)); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 3, + 30 + Elections::get_offset(30, 1) + 60, + 1 + )); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 4, + 40 + Elections::get_offset(40, 1), + 1 + )); assert_ok!(Elections::end_block(System::block_number())); assert!(!Elections::presentation_active()); @@ -1262,13 +1363,25 @@ fn election_second_tally_should_use_runners_up() { assert!(!Elections::is_a_candidate(&5)); assert!(Elections::is_a_candidate(&4)); assert_eq!(Elections::vote_index(), 2); - assert_eq!(Elections::voter_info(2), Some( VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0})); - assert_eq!(Elections::voter_info(3), Some( VoterInfo { last_win: 2, last_active: 0, stake: 30, pot: 0})); - assert_eq!(Elections::voter_info(4), Some( VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0})); - assert_eq!(Elections::voter_info(5), Some( VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0})); + assert_eq!( + Elections::voter_info(2), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(3), + Some(VoterInfo { last_win: 2, last_active: 0, stake: 30, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(4), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(5), + Some(VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0 }) + ); assert_eq!( Elections::voter_info(6), - Some(VoterInfo { last_win: 2, last_active: 1, stake: 60, pot: 0}) + Some(VoterInfo { last_win: 2, last_active: 1, stake: 60, pot: 0 }) ); assert_eq!(Elections::candidate_reg_info(4), Some((0, 3))); @@ -1289,9 +1402,13 @@ fn election_loser_candidates_bond_gets_slashed() { assert_eq!(balances(&2), (17, 3)); assert_ok!(Elections::set_approvals(Origin::signed(5), vec![true], 0, 0, 50)); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, true, true, true], 0, 0, 10) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, true, true, true], + 0, + 0, + 10 + )); assert_ok!(Elections::end_block(System::block_number())); @@ -1302,7 +1419,6 @@ fn election_loser_candidates_bond_gets_slashed() { assert_eq!(Elections::present_winner(Origin::signed(2), 2, 10, 0), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(1), 1, 50, 0), Ok(())); - // winner + carry assert_eq!(Elections::leaderboard(), Some(vec![(10, 3), (10, 4), (50, 1)])); assert_ok!(Elections::end_block(System::block_number())); @@ -1324,15 +1440,27 @@ fn pot_accumulating_weight_and_decaying_should_work() { assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(1), 2)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 0, 0, 600) - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 0, 0, 500) - ); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, false, true], 0, 0, 100) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 0, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 0, + 0, + 500 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, false, true], + 0, + 0, + 100 + )); assert_ok!(Elections::end_block(System::block_number())); @@ -1348,15 +1476,15 @@ fn pot_accumulating_weight_and_decaying_should_work() { assert_eq!(Elections::members(), vec![(6, 11), (5, 11)]); assert_eq!( Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 1, last_active: 0, stake: 600, pot: 0}, + VoterInfo { last_win: 1, last_active: 0, stake: 600, pot: 0 }, ); assert_eq!( Elections::voter_info(5).unwrap(), - VoterInfo { last_win: 1, last_active: 0, stake: 500, pot: 0}, + VoterInfo { last_win: 1, last_active: 0, stake: 500, pot: 0 }, ); assert_eq!( Elections::voter_info(1).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0}, + VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0 }, ); System::set_block_number(12); @@ -1365,80 +1493,144 @@ fn pot_accumulating_weight_and_decaying_should_work() { assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 1, 0, 600) - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 1, 1, 500) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 1, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 1, + 1, + 500 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(14); assert!(Elections::presentation_active()); assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 1), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 1), 1), Ok(())); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 1), 1), + Ok(()) + ); assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100 + 96, 1), (500, 5), (600, 6)])); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::members(), vec![(6, 19), (5, 19)]); assert_eq!( Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 2, last_active: 1, stake: 600, pot:0 } + VoterInfo { last_win: 2, last_active: 1, stake: 600, pot: 0 } + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { last_win: 2, last_active: 1, stake: 500, pot: 0 } + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0 } ); - assert_eq!(Elections::voter_info(5).unwrap(), VoterInfo { last_win: 2, last_active: 1, stake: 500, pot:0 }); - assert_eq!(Elections::voter_info(1).unwrap(), VoterInfo { last_win: 0, last_active: 0, stake: 100, pot:0 }); System::set_block_number(20); assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false], 2, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true, false], 2, 1, 500)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 2, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 2, + 1, + 500 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(22); assert!(Elections::presentation_active()); assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 2), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 2), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 2), 2), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100 + 96 + 93, 1), (500, 5), (600, 6)])); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 2), 2), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (100 + 96 + 93, 1), (500, 5), (600, 6)]) + ); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::members(), vec![(6, 27), (5, 27)]); assert_eq!( Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 3, last_active: 2, stake: 600, pot: 0} + VoterInfo { last_win: 3, last_active: 2, stake: 600, pot: 0 } + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { last_win: 3, last_active: 2, stake: 500, pot: 0 } + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0 } ); - assert_eq!(Elections::voter_info(5).unwrap(), VoterInfo { last_win: 3, last_active: 2, stake: 500, pot: 0}); - assert_eq!(Elections::voter_info(1).unwrap(), VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0}); - System::set_block_number(28); assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false], 3, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true, false], 3, 1, 500)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 3, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 3, + 1, + 500 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(30); assert!(Elections::presentation_active()); assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 3), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 3), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 3), 3), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100 + 96 + 93 + 90, 1), (500, 5), (600, 6)])); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 3), 3), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (100 + 96 + 93 + 90, 1), (500, 5), (600, 6)]) + ); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::members(), vec![(6, 35), (5, 35)]); assert_eq!( Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 4, last_active: 3, stake: 600, pot: 0} + VoterInfo { last_win: 4, last_active: 3, stake: 600, pot: 0 } + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { last_win: 4, last_active: 3, stake: 500, pot: 0 } + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0 } ); - assert_eq!(Elections::voter_info(5).unwrap(), VoterInfo { last_win: 4, last_active: 3, stake: 500, pot: 0}); - assert_eq!(Elections::voter_info(1).unwrap(), VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0}); }) } @@ -1453,9 +1645,27 @@ fn pot_winning_resets_accumulated_pot() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::submit_candidacy(Origin::signed(2), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false, false], 0, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, true, false, false], 0, 1, 400)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true, true], 0, 2, 300)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false, false], + 0, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, true, false, false], + 0, + 1, + 400 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, false, true, true], + 0, + 2, + 300 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -1474,16 +1684,34 @@ fn pot_winning_resets_accumulated_pot() { assert_ok!(Elections::retract_voter(Origin::signed(4), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false, false], 1, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, true, false, false], 1, 1, 400)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false, false], + 1, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, true, false, false], + 1, + 1, + 400 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(14); assert!(Elections::presentation_active()); assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 1), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(4), 4, 400, 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(3), 3, 300 + Elections::get_offset(300, 1), 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(2), 2, 300 + Elections::get_offset(300, 1), 1), Ok(())); + assert_eq!( + Elections::present_winner(Origin::signed(3), 3, 300 + Elections::get_offset(300, 1), 1), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(2), 2, 300 + Elections::get_offset(300, 1), 1), + Ok(()) + ); assert_eq!(Elections::leaderboard(), Some(vec![(400, 4), (588, 2), (588, 3), (600, 6)])); assert_ok!(Elections::end_block(System::block_number())); @@ -1497,7 +1725,10 @@ fn pot_winning_resets_accumulated_pot() { // because one of 3's candidates (3) won in previous round // 4 on the other hand will get extra weight since it was unlucky. assert_eq!(Elections::present_winner(Origin::signed(3), 2, 300, 2), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(4), 4, 400 + Elections::get_offset(400, 1), 2), Ok(())); + assert_eq!( + Elections::present_winner(Origin::signed(4), 4, 400 + Elections::get_offset(400, 1), 2), + Ok(()) + ); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::members(), vec![(4, 27), (2, 27)]); @@ -1507,7 +1738,7 @@ fn pot_winning_resets_accumulated_pot() { #[test] fn pot_resubmitting_approvals_stores_pot() { ExtBuilder::default() - .voter_bond(0) + .voting_bond(0) .voting_fee(0) .balance_factor(10) .build() @@ -1519,15 +1750,27 @@ fn pot_resubmitting_approvals_stores_pot() { assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(1), 2)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 0, 0, 600), - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 0, 1, 500), - ); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, false, true], 0, 2, 100), - ); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 0, + 0, + 600 + ),); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 0, + 1, + 500 + ),); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, false, true], + 0, + 2, + 100 + ),); assert_ok!(Elections::end_block(System::block_number())); @@ -1547,18 +1790,31 @@ fn pot_resubmitting_approvals_stores_pot() { assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 1, 0, 600), - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 1, 1, 500), - ); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 1, + 0, + 600 + ),); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 1, + 1, + 500 + ),); // give 1 some new high balance let _ = Balances::make_free_balance_be(&1, 997); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, false, true], 1, 2, 1000), - ); - assert_eq!(Elections::voter_info(1).unwrap(), + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, false, true], + 1, + 2, + 1000 + ),); + assert_eq!( + Elections::voter_info(1).unwrap(), VoterInfo { stake: 1000, // 997 + 3 which is candidacy bond. pot: Elections::get_offset(100, 1), @@ -1599,7 +1855,10 @@ fn pot_get_offset_should_work() { assert_eq!(Elections::get_offset(50_000_000_000, 0), 0); assert_eq!(Elections::get_offset(50_000_000_000, 1), 48_000_000_000); assert_eq!(Elections::get_offset(50_000_000_000, 2), 48_000_000_000 + 46_080_000_000); - assert_eq!(Elections::get_offset(50_000_000_000, 3), 48_000_000_000 + 46_080_000_000 + 44_236_800_000); + assert_eq!( + Elections::get_offset(50_000_000_000, 3), + 48_000_000_000 + 46_080_000_000 + 44_236_800_000 + ); assert_eq!( Elections::get_offset(50_000_000_000, 4), 48_000_000_000 + 46_080_000_000 + 44_236_800_000 + 42_467_328_000 diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml deleted file mode 100644 index a228dfb566be2..0000000000000 --- a/frame/evm/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[package] -name = "pallet-evm" -version = "2.0.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "FRAME EVM contracts pallet" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -primitive-types = { version = "0.7.0", default-features = false, features = ["rlp", "byteorder"] } -rlp = { version = "0.4", default-features = false } -evm = { version = "0.17", default-features = false } -sha3 = { version = "0.8", default-features = false } -impl-trait-for-tuples = "0.1" -ripemd160 = { version = "0.9", default-features = false } - -[features] -default = ["std"] -std = [ - "serde", - "codec/std", - "sp-core/std", - "sp-runtime/std", - "frame-support/std", - "frame-system/std", - "pallet-balances/std", - "sp-io/std", - "sp-std/std", - "sha3/std", - "rlp/std", - "primitive-types/std", - "evm/std", - "pallet-timestamp/std", - "ripemd160/std", -] diff --git a/frame/evm/README.md b/frame/evm/README.md deleted file mode 100644 index f8feadbf58eb4..0000000000000 --- a/frame/evm/README.md +++ /dev/null @@ -1,3 +0,0 @@ -EVM execution module for Substrate - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/evm/src/backend.rs b/frame/evm/src/backend.rs deleted file mode 100644 index b625c0c548026..0000000000000 --- a/frame/evm/src/backend.rs +++ /dev/null @@ -1,216 +0,0 @@ -use sp_std::marker::PhantomData; -use sp_std::vec::Vec; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; -use sp_core::{U256, H256, H160}; -use sp_runtime::traits::UniqueSaturatedInto; -use frame_support::traits::Get; -use frame_support::{debug, storage::{StorageMap, StorageDoubleMap}}; -use sha3::{Keccak256, Digest}; -use evm::backend::{Backend as BackendT, ApplyBackend, Apply}; -use crate::{Trait, AccountStorages, AccountCodes, Module, Event}; - -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -/// Ethereum account nonce, balance and code. Used by storage. -pub struct Account { - /// Account nonce. - pub nonce: U256, - /// Account balance. - pub balance: U256, -} - -#[derive(Clone, Eq, PartialEq, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -/// Ethereum log. Used for `deposit_event`. -pub struct Log { - /// Source address of the log. - pub address: H160, - /// Topics of the log. - pub topics: Vec, - /// Byte array data of the log. - pub data: Vec, -} - -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default)] -#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -/// External input from the transaction. -pub struct Vicinity { - /// Current transaction gas price. - pub gas_price: U256, - /// Origin of the transaction. - pub origin: H160, -} - -/// Substrate backend for EVM. -pub struct Backend<'vicinity, T> { - vicinity: &'vicinity Vicinity, - _marker: PhantomData, -} - -impl<'vicinity, T> Backend<'vicinity, T> { - /// Create a new backend with given vicinity. - pub fn new(vicinity: &'vicinity Vicinity) -> Self { - Self { vicinity, _marker: PhantomData } - } -} - -impl<'vicinity, T: Trait> BackendT for Backend<'vicinity, T> { - fn gas_price(&self) -> U256 { self.vicinity.gas_price } - fn origin(&self) -> H160 { self.vicinity.origin } - - fn block_hash(&self, number: U256) -> H256 { - if number > U256::from(u32::max_value()) { - H256::default() - } else { - let number = T::BlockNumber::from(number.as_u32()); - H256::from_slice(frame_system::Module::::block_hash(number).as_ref()) - } - } - - fn block_number(&self) -> U256 { - let number: u128 = frame_system::Module::::block_number().unique_saturated_into(); - U256::from(number) - } - - fn block_coinbase(&self) -> H160 { - H160::default() - } - - fn block_timestamp(&self) -> U256 { - let now: u128 = pallet_timestamp::Module::::get().unique_saturated_into(); - U256::from(now / 1000) - } - - fn block_difficulty(&self) -> U256 { - U256::zero() - } - - fn block_gas_limit(&self) -> U256 { - U256::zero() - } - - fn chain_id(&self) -> U256 { - U256::from(T::ChainId::get()) - } - - fn exists(&self, _address: H160) -> bool { - true - } - - fn basic(&self, address: H160) -> evm::backend::Basic { - let account = Module::::account_basic(&address); - - evm::backend::Basic { - balance: account.balance, - nonce: account.nonce, - } - } - - fn code_size(&self, address: H160) -> usize { - AccountCodes::decode_len(&address).unwrap_or(0) - } - - fn code_hash(&self, address: H160) -> H256 { - H256::from_slice(Keccak256::digest(&AccountCodes::get(&address)).as_slice()) - } - - fn code(&self, address: H160) -> Vec { - AccountCodes::get(&address) - } - - fn storage(&self, address: H160, index: H256) -> H256 { - AccountStorages::get(address, index) - } -} - -impl<'vicinity, T: Trait> ApplyBackend for Backend<'vicinity, T> { - fn apply( - &mut self, - values: A, - logs: L, - delete_empty: bool, - ) where - A: IntoIterator>, - I: IntoIterator, - L: IntoIterator, - { - for apply in values { - match apply { - Apply::Modify { - address, basic, code, storage, reset_storage, - } => { - Module::::mutate_account_basic(&address, Account { - nonce: basic.nonce, - balance: basic.balance, - }); - - if let Some(code) = code { - debug::debug!( - target: "evm", - "Inserting code ({} bytes) at {:?}", - code.len(), - address - ); - AccountCodes::insert(address, code); - } - - if reset_storage { - AccountStorages::remove_prefix(address); - } - - for (index, value) in storage { - if value == H256::default() { - debug::debug!( - target: "evm", - "Removing storage for {:?} [index: {:?}]", - address, - index - ); - AccountStorages::remove(address, index); - } else { - debug::debug!( - target: "evm", - "Updating storage for {:?} [index: {:?}, value: {:?}]", - address, - index, - value - ); - AccountStorages::insert(address, index, value); - } - } - - if delete_empty { - Module::::remove_account_if_empty(&address); - } - }, - Apply::Delete { address } => { - debug::debug!( - target: "evm", - "Deleting account at {:?}", - address - ); - Module::::remove_account(&address) - }, - } - } - - for log in logs { - debug::trace!( - target: "evm", - "Inserting log for {:?}, topics ({}) {:?}, data ({}): {:?}]", - log.address, - log.topics.len(), - log.topics, - log.data.len(), - log.data - ); - Module::::deposit_event(Event::::Log(Log { - address: log.address, - topics: log.topics, - data: log.data, - })); - } - } -} diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs deleted file mode 100644 index dddb71fc02a74..0000000000000 --- a/frame/evm/src/lib.rs +++ /dev/null @@ -1,645 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! EVM execution module for Substrate - -// Ensure we're `no_std` when compiling for Wasm. -#![cfg_attr(not(feature = "std"), no_std)] - -mod backend; -mod tests; -pub mod precompiles; - -pub use crate::precompiles::{Precompile, Precompiles}; -pub use crate::backend::{Account, Log, Vicinity, Backend}; - -use sp_std::vec::Vec; -#[cfg(feature = "std")] -use codec::{Encode, Decode}; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use frame_support::{debug, ensure, decl_module, decl_storage, decl_event, decl_error}; -use frame_support::weights::{Weight, Pays}; -use frame_support::traits::{Currency, ExistenceRequirement, Get}; -use frame_support::dispatch::DispatchResultWithPostInfo; -use frame_system::RawOrigin; -use sp_core::{U256, H256, H160, Hasher}; -use sp_runtime::{AccountId32, traits::{UniqueSaturatedInto, SaturatedConversion, BadOrigin}}; -use sha3::{Digest, Keccak256}; -pub use evm::{ExitReason, ExitSucceed, ExitError, ExitRevert, ExitFatal}; -use evm::Config; -use evm::executor::StackExecutor; -use evm::backend::ApplyBackend; - -/// Type alias for currency balance. -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - -/// Trait that outputs the current transaction gas price. -pub trait FeeCalculator { - /// Return the minimal required gas price. - fn min_gas_price() -> U256; -} - -impl FeeCalculator for () { - fn min_gas_price() -> U256 { U256::zero() } -} - -pub trait EnsureAddressOrigin { - /// Success return type. - type Success; - - /// Perform the origin check. - fn ensure_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result { - Self::try_address_origin(address, origin).map_err(|_| BadOrigin) - } - - /// Try with origin. - fn try_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result; -} - -/// Ensure that the EVM address is the same as the Substrate address. This only works if the account -/// ID is `H160`. -pub struct EnsureAddressSame; - -impl EnsureAddressOrigin for EnsureAddressSame where - OuterOrigin: Into, OuterOrigin>> + From>, -{ - type Success = H160; - - fn try_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result { - origin.into().and_then(|o| match o { - RawOrigin::Signed(who) if &who == address => Ok(who), - r => Err(OuterOrigin::from(r)) - }) - } -} - -/// Ensure that the origin is root. -pub struct EnsureAddressRoot(sp_std::marker::PhantomData); - -impl EnsureAddressOrigin for EnsureAddressRoot where - OuterOrigin: Into, OuterOrigin>> + From>, -{ - type Success = (); - - fn try_address_origin( - _address: &H160, - origin: OuterOrigin, - ) -> Result<(), OuterOrigin> { - origin.into().and_then(|o| match o { - RawOrigin::Root => Ok(()), - r => Err(OuterOrigin::from(r)), - }) - } -} - -/// Ensure that the origin never happens. -pub struct EnsureAddressNever(sp_std::marker::PhantomData); - -impl EnsureAddressOrigin for EnsureAddressNever { - type Success = AccountId; - - fn try_address_origin( - _address: &H160, - origin: OuterOrigin, - ) -> Result { - Err(origin) - } -} - -/// Ensure that the address is truncated hash of the origin. Only works if the account id is -/// `AccountId32`. -pub struct EnsureAddressTruncated; - -impl EnsureAddressOrigin for EnsureAddressTruncated where - OuterOrigin: Into, OuterOrigin>> + From>, -{ - type Success = AccountId32; - - fn try_address_origin( - address: &H160, - origin: OuterOrigin, - ) -> Result { - origin.into().and_then(|o| match o { - RawOrigin::Signed(who) - if AsRef::<[u8; 32]>::as_ref(&who)[0..20] == address[0..20] => Ok(who), - r => Err(OuterOrigin::from(r)) - }) - } -} - -pub trait AddressMapping { - fn into_account_id(address: H160) -> A; -} - -/// Identity address mapping. -pub struct IdentityAddressMapping; - -impl AddressMapping for IdentityAddressMapping { - fn into_account_id(address: H160) -> H160 { address } -} - -/// Hashed address mapping. -pub struct HashedAddressMapping(sp_std::marker::PhantomData); - -impl> AddressMapping for HashedAddressMapping { - fn into_account_id(address: H160) -> AccountId32 { - let mut data = [0u8; 24]; - data[0..4].copy_from_slice(b"evm:"); - data[4..24].copy_from_slice(&address[..]); - let hash = H::hash(&data); - - AccountId32::from(Into::<[u8; 32]>::into(hash)) - } -} - -/// Substrate system chain ID. -pub struct SystemChainId; - -impl Get for SystemChainId { - fn get() -> u64 { - sp_io::misc::chain_id() - } -} - -static ISTANBUL_CONFIG: Config = Config::istanbul(); - -/// EVM module trait -pub trait Trait: frame_system::Trait + pallet_timestamp::Trait { - /// Calculator for current gas price. - type FeeCalculator: FeeCalculator; - - /// Allow the origin to call on behalf of given address. - type CallOrigin: EnsureAddressOrigin; - /// Allow the origin to withdraw on behalf of given address. - type WithdrawOrigin: EnsureAddressOrigin; - - /// Mapping from address to account id. - type AddressMapping: AddressMapping; - /// Currency type for withdraw and balance storage. - type Currency: Currency; - - /// The overarching event type. - type Event: From> + Into<::Event>; - /// Precompiles associated with this EVM engine. - type Precompiles: Precompiles; - /// Chain ID of EVM. - type ChainId: Get; - - /// EVM config used in the module. - fn config() -> &'static Config { - &ISTANBUL_CONFIG - } -} - -#[cfg(feature = "std")] -#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, Serialize, Deserialize)] -/// Account definition used for genesis block construction. -pub struct GenesisAccount { - /// Account nonce. - pub nonce: U256, - /// Account balance. - pub balance: U256, - /// Full account storage. - pub storage: std::collections::BTreeMap, - /// Account code. - pub code: Vec, -} - -decl_storage! { - trait Store for Module as EVM { - AccountCodes get(fn account_codes): map hasher(blake2_128_concat) H160 => Vec; - AccountStorages get(fn account_storages): - double_map hasher(blake2_128_concat) H160, hasher(blake2_128_concat) H256 => H256; - } - - add_extra_genesis { - config(accounts): std::collections::BTreeMap; - build(|config: &GenesisConfig| { - for (address, account) in &config.accounts { - Module::::mutate_account_basic(&address, Account { - balance: account.balance, - nonce: account.nonce, - }); - AccountCodes::insert(address, &account.code); - - for (index, value) in &account.storage { - AccountStorages::insert(address, index, value); - } - } - }); - } -} - -decl_event! { - /// EVM events - pub enum Event where - ::AccountId, - { - /// Ethereum events from contracts. - Log(Log), - /// A contract has been created at given \[address\]. - Created(H160), - /// A \[contract\] was attempted to be created, but the execution failed. - CreatedFailed(H160), - /// A \[contract\] has been executed successfully with states applied. - Executed(H160), - /// A \[contract\] has been executed with errors. States are reverted with only gas fees applied. - ExecutedFailed(H160), - /// A deposit has been made at a given address. \[sender, address, value\] - BalanceDeposit(AccountId, H160, U256), - /// A withdrawal has been made from a given address. \[sender, address, value\] - BalanceWithdraw(AccountId, H160, U256), - } -} - -decl_error! { - pub enum Error for Module { - /// Not enough balance to perform action - BalanceLow, - /// Calculating total fee overflowed - FeeOverflow, - /// Calculating total payment overflowed - PaymentOverflow, - /// Withdraw fee failed - WithdrawFailed, - /// Gas price is too low. - GasPriceTooLow, - /// Nonce is invalid - InvalidNonce, - } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Withdraw balance from EVM into currency/balances module. - #[weight = 0] - fn withdraw(origin, address: H160, value: BalanceOf) { - let destination = T::WithdrawOrigin::ensure_address_origin(&address, origin)?; - let address_account_id = T::AddressMapping::into_account_id(address); - - T::Currency::transfer( - &address_account_id, - &destination, - value, - ExistenceRequirement::AllowDeath - )?; - } - - /// Issue an EVM call operation. This is similar to a message call transaction in Ethereum. - #[weight = (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight)] - fn call( - origin, - source: H160, - target: H160, - input: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResultWithPostInfo { - T::CallOrigin::ensure_address_origin(&source, origin)?; - - match Self::execute_call( - source, - target, - input, - value, - gas_limit, - gas_price, - nonce, - true, - )? { - (ExitReason::Succeed(_), _, _, _) => { - Module::::deposit_event(Event::::Executed(target)); - }, - (_, _, _, _) => { - Module::::deposit_event(Event::::ExecutedFailed(target)); - }, - } - - Ok(Pays::No.into()) - } - - /// Issue an EVM create operation. This is similar to a contract creation transaction in - /// Ethereum. - #[weight = (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight)] - fn create( - origin, - source: H160, - init: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResultWithPostInfo { - T::CallOrigin::ensure_address_origin(&source, origin)?; - - match Self::execute_create( - source, - init, - value, - gas_limit, - gas_price, - nonce, - true, - )? { - (ExitReason::Succeed(_), create_address, _, _) => { - Module::::deposit_event(Event::::Created(create_address)); - }, - (_, create_address, _, _) => { - Module::::deposit_event(Event::::CreatedFailed(create_address)); - }, - } - - Ok(Pays::No.into()) - } - - /// Issue an EVM create2 operation. - #[weight = (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight)] - fn create2( - origin, - source: H160, - init: Vec, - salt: H256, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResultWithPostInfo { - T::CallOrigin::ensure_address_origin(&source, origin)?; - - match Self::execute_create2( - source, - init, - salt, - value, - gas_limit, - gas_price, - nonce, - true, - )? { - (ExitReason::Succeed(_), create_address, _, _) => { - Module::::deposit_event(Event::::Created(create_address)); - }, - (_, create_address, _, _) => { - Module::::deposit_event(Event::::CreatedFailed(create_address)); - }, - } - - Ok(Pays::No.into()) - } - } -} - -impl Module { - fn remove_account(address: &H160) { - AccountCodes::remove(address); - AccountStorages::remove_prefix(address); - } - - fn mutate_account_basic(address: &H160, new: Account) { - let account_id = T::AddressMapping::into_account_id(*address); - let current = Self::account_basic(address); - - if current.nonce < new.nonce { - // ASSUME: in one single EVM transaction, the nonce will not increase more than - // `u128::max_value()`. - for _ in 0..(new.nonce - current.nonce).low_u128() { - frame_system::Module::::inc_account_nonce(&account_id); - } - } - - if current.balance > new.balance { - let diff = current.balance - new.balance; - T::Currency::slash(&account_id, diff.low_u128().unique_saturated_into()); - } else if current.balance < new.balance { - let diff = new.balance - current.balance; - T::Currency::deposit_creating(&account_id, diff.low_u128().unique_saturated_into()); - } - } - - /// Check whether an account is empty. - pub fn is_account_empty(address: &H160) -> bool { - let account = Self::account_basic(address); - let code_len = AccountCodes::decode_len(address).unwrap_or(0); - - account.nonce == U256::zero() && - account.balance == U256::zero() && - code_len == 0 - } - - /// Remove an account if its empty. - pub fn remove_account_if_empty(address: &H160) { - if Self::is_account_empty(address) { - Self::remove_account(address); - } - } - - /// Get the account basic in EVM format. - pub fn account_basic(address: &H160) -> Account { - let account_id = T::AddressMapping::into_account_id(*address); - - let nonce = frame_system::Module::::account_nonce(&account_id); - let balance = T::Currency::free_balance(&account_id); - - Account { - nonce: U256::from(UniqueSaturatedInto::::unique_saturated_into(nonce)), - balance: U256::from(UniqueSaturatedInto::::unique_saturated_into(balance)), - } - } - - /// Execute a create transaction on behalf of given sender. - pub fn execute_create( - source: H160, - init: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - ) -> Result<(ExitReason, H160, U256, Vec), Error> { - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - apply_state, - |executor| { - let address = executor.create_address( - evm::CreateScheme::Legacy { caller: source }, - ); - (executor.transact_create( - source, - value, - init, - gas_limit as usize, - ), address) - }, - ) - } - - /// Execute a create2 transaction on behalf of a given sender. - pub fn execute_create2( - source: H160, - init: Vec, - salt: H256, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - ) -> Result<(ExitReason, H160, U256, Vec), Error> { - let code_hash = H256::from_slice(Keccak256::digest(&init).as_slice()); - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - apply_state, - |executor| { - let address = executor.create_address( - evm::CreateScheme::Create2 { caller: source, code_hash, salt }, - ); - (executor.transact_create2( - source, - value, - init, - salt, - gas_limit as usize, - ), address) - }, - ) - } - - /// Execute a call transaction on behalf of a given sender. - pub fn execute_call( - source: H160, - target: H160, - input: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - ) -> Result<(ExitReason, Vec, U256, Vec), Error> { - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - apply_state, - |executor| executor.transact_call( - source, - target, - value, - input, - gas_limit as usize, - ), - ) - } - - /// Execute an EVM operation. - fn execute_evm( - source: H160, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - apply_state: bool, - f: F, - ) -> Result<(ExitReason, R, U256, Vec), Error> where - F: FnOnce(&mut StackExecutor>) -> (ExitReason, R), - { - - // Gas price check is skipped when performing a gas estimation. - if apply_state { - ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); - } - - let vicinity = Vicinity { - gas_price, - origin: source, - }; - - let mut backend = Backend::::new(&vicinity); - let mut executor = StackExecutor::new_with_precompile( - &backend, - gas_limit as usize, - T::config(), - T::Precompiles::execute, - ); - - let total_fee = gas_price.checked_mul(U256::from(gas_limit)) - .ok_or(Error::::FeeOverflow)?; - let total_payment = value.checked_add(total_fee).ok_or(Error::::PaymentOverflow)?; - let source_account = Self::account_basic(&source); - ensure!(source_account.balance >= total_payment, Error::::BalanceLow); - executor.withdraw(source, total_fee).map_err(|_| Error::::WithdrawFailed)?; - - if let Some(nonce) = nonce { - ensure!(source_account.nonce == nonce, Error::::InvalidNonce); - } - - let (retv, reason) = f(&mut executor); - - let used_gas = U256::from(executor.used_gas()); - let actual_fee = executor.fee(gas_price); - debug::debug!( - target: "evm", - "Execution {:?} [source: {:?}, value: {}, gas_limit: {}, used_gas: {}, actual_fee: {}]", - retv, - source, - value, - gas_limit, - used_gas, - actual_fee - ); - executor.deposit(source, total_fee.saturating_sub(actual_fee)); - - let (values, logs) = executor.deconstruct(); - let logs_data = logs.into_iter().map(|x| x ).collect::>(); - let logs_result = logs_data.clone().into_iter().map(|it| { - Log { - address: it.address, - topics: it.topics, - data: it.data - } - }).collect(); - if apply_state { - backend.apply(values, logs_data, true); - } - - Ok((retv, reason, used_gas, logs_result)) - } -} diff --git a/frame/evm/src/precompiles.rs b/frame/evm/src/precompiles.rs deleted file mode 100644 index 440d9bf1c68c2..0000000000000 --- a/frame/evm/src/precompiles.rs +++ /dev/null @@ -1,167 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Builtin precompiles. - -use sp_std::{cmp::min, vec::Vec}; -use sp_core::H160; -use evm::{ExitError, ExitSucceed}; -use ripemd160::Digest; -use impl_trait_for_tuples::impl_for_tuples; - -/// Custom precompiles to be used by EVM engine. -pub trait Precompiles { - /// Try to execute the code address as precompile. If the code address is not - /// a precompile or the precompile is not yet available, return `None`. - /// Otherwise, calculate the amount of gas needed with given `input` and - /// `target_gas`. Return `Some(Ok(status, output, gas_used))` if the execution - /// is successful. Otherwise return `Some(Err(_))`. - fn execute( - address: H160, - input: &[u8], - target_gas: Option, - ) -> Option, usize), ExitError>>; -} - -/// One single precompile used by EVM engine. -pub trait Precompile { - /// Try to execute the precompile. Calculate the amount of gas needed with given `input` and - /// `target_gas`. Return `Ok(status, output, gas_used)` if the execution is - /// successful. Otherwise return `Err(_)`. - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError>; -} - -#[impl_for_tuples(16)] -#[tuple_types_no_default_trait_bound] -impl Precompiles for Tuple { - for_tuples!( where #( Tuple: Precompile )* ); - - fn execute( - address: H160, - input: &[u8], - target_gas: Option, - ) -> Option, usize), ExitError>> { - let mut index = 0; - - for_tuples!( #( - index += 1; - if address == H160::from_low_u64_be(index) { - return Some(Tuple::execute(input, target_gas)) - } - )* ); - - None - } -} - -/// Linear gas cost -fn ensure_linear_cost( - target_gas: Option, - len: usize, - base: usize, - word: usize -) -> Result { - let cost = base.checked_add( - word.checked_mul(len.saturating_add(31) / 32).ok_or(ExitError::OutOfGas)? - ).ok_or(ExitError::OutOfGas)?; - - if let Some(target_gas) = target_gas { - if cost > target_gas { - return Err(ExitError::OutOfGas) - } - } - - Ok(cost) -} - -/// The identity precompile. -pub struct Identity; - -impl Precompile for Identity { - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, input.len(), 15, 3)?; - - Ok((ExitSucceed::Returned, input.to_vec(), cost)) - } -} - -/// The ecrecover precompile. -pub struct ECRecover; - -impl Precompile for ECRecover { - fn execute( - i: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, i.len(), 3000, 0)?; - - let mut input = [0u8; 128]; - input[..min(i.len(), 128)].copy_from_slice(&i[..min(i.len(), 128)]); - - let mut msg = [0u8; 32]; - let mut sig = [0u8; 65]; - - msg[0..32].copy_from_slice(&input[0..32]); - sig[0..32].copy_from_slice(&input[64..96]); - sig[32..64].copy_from_slice(&input[96..128]); - sig[64] = input[63]; - - let pubkey = sp_io::crypto::secp256k1_ecdsa_recover(&sig, &msg) - .map_err(|_| ExitError::Other("Public key recover failed"))?; - let mut address = sp_io::hashing::keccak_256(&pubkey); - address[0..12].copy_from_slice(&[0u8; 12]); - - Ok((ExitSucceed::Returned, address.to_vec(), cost)) - } -} - -/// The ripemd precompile. -pub struct Ripemd160; - -impl Precompile for Ripemd160 { - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, input.len(), 600, 120)?; - - let mut ret = [0u8; 32]; - ret[12..32].copy_from_slice(&ripemd160::Ripemd160::digest(input)); - Ok((ExitSucceed::Returned, ret.to_vec(), cost)) - } -} - -/// The sha256 precompile. -pub struct Sha256; - -impl Precompile for Sha256 { - fn execute( - input: &[u8], - target_gas: Option, - ) -> core::result::Result<(ExitSucceed, Vec, usize), ExitError> { - let cost = ensure_linear_cost(target_gas, input.len(), 60, 12)?; - - let ret = sp_io::hashing::sha2_256(input); - Ok((ExitSucceed::Returned, ret.to_vec(), cost)) - } -} diff --git a/frame/evm/src/tests.rs b/frame/evm/src/tests.rs deleted file mode 100644 index d05fdca1407e5..0000000000000 --- a/frame/evm/src/tests.rs +++ /dev/null @@ -1,189 +0,0 @@ -#![cfg(test)] - -use super::*; - -use std::{str::FromStr, collections::BTreeMap}; -use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, impl_outer_dispatch, -}; -use sp_core::{Blake2Hasher, H256}; -use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, -}; - -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} - -impl_outer_dispatch! { - pub enum OuterCall for Test where origin: Origin { - self::EVM, - } -} - -#[derive(Clone, Eq, PartialEq)] -pub struct Test; -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} -impl frame_system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = OuterCall; - type Hashing = BlakeTwo256; - type AccountId = AccountId32; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type PalletInfo = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); -} - -parameter_types! { - pub const ExistentialDeposit: u64 = 1; -} -impl pallet_balances::Trait for Test { - type MaxLocks = (); - type Balance = u64; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); -} - -parameter_types! { - pub const MinimumPeriod: u64 = 1000; -} -impl pallet_timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; - type WeightInfo = (); -} - -/// Fixed gas price of `0`. -pub struct FixedGasPrice; -impl FeeCalculator for FixedGasPrice { - fn min_gas_price() -> U256 { - // Gas price is always one token per gas. - 0.into() - } -} - -impl Trait for Test { - type FeeCalculator = FixedGasPrice; - - type CallOrigin = EnsureAddressRoot; - type WithdrawOrigin = EnsureAddressNever; - - type AddressMapping = HashedAddressMapping; - type Currency = Balances; - - type Event = Event; - type Precompiles = (); - type ChainId = SystemChainId; -} - -type System = frame_system::Module; -type Balances = pallet_balances::Module; -type EVM = Module; - -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - let mut accounts = BTreeMap::new(); - accounts.insert( - H160::from_str("1000000000000000000000000000000000000001").unwrap(), - GenesisAccount { - nonce: U256::from(1), - balance: U256::from(1000000), - storage: Default::default(), - code: vec![ - 0x00, // STOP - ], - } - ); - accounts.insert( - H160::from_str("1000000000000000000000000000000000000002").unwrap(), - GenesisAccount { - nonce: U256::from(1), - balance: U256::from(1000000), - storage: Default::default(), - code: vec![ - 0xff, // INVALID - ], - } - ); - - pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); - GenesisConfig { accounts }.assimilate_storage::(&mut t).unwrap(); - t.into() -} - -#[test] -fn fail_call_return_ok() { - new_test_ext().execute_with(|| { - assert_ok!(EVM::call( - Origin::root(), - H160::default(), - H160::from_str("1000000000000000000000000000000000000001").unwrap(), - Vec::new(), - U256::default(), - 1000000, - U256::default(), - None, - )); - - assert_ok!(EVM::call( - Origin::root(), - H160::default(), - H160::from_str("1000000000000000000000000000000000000002").unwrap(), - Vec::new(), - U256::default(), - 1000000, - U256::default(), - None, - )); - }); -} - -#[test] -fn mutate_account_works() { - new_test_ext().execute_with(|| { - EVM::mutate_account_basic( - &H160::from_str("1000000000000000000000000000000000000001").unwrap(), - Account { - nonce: U256::from(10), - balance: U256::from(1000), - }, - ); - - assert_eq!(EVM::account_basic( - &H160::from_str("1000000000000000000000000000000000000001").unwrap() - ), Account { - nonce: U256::from(10), - balance: U256::from(1000), - }); - }); -} diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index 7db1d348ab2d8..1ccd9f33f0318 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example-offchain-worker" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -13,28 +13,31 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -serde = { version = "1.0.101", optional = true } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-keystore = { version = "0.8.0", path = "../../primitives/keystore", optional = true } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore", optional = true } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } lite-json = { version = "0.1", default-features = false } +log = { version = "0.4.14", default-features = false } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "frame-support/std", "frame-system/std", - "serde", "lite-json/std", "sp-core/std", "sp-io/std", "sp-keystore", "sp-runtime/std", "sp-std/std", + "log/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/example-offchain-worker/README.md b/frame/example-offchain-worker/README.md index 4da1a4c15f814..5299027f39250 100644 --- a/frame/example-offchain-worker/README.md +++ b/frame/example-offchain-worker/README.md @@ -1,3 +1,4 @@ + # Offchain Worker Example Module The Offchain Worker Example: A simple pallet demonstrating @@ -6,9 +7,9 @@ concepts, APIs and structures common to most offchain workers. Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's documentation. -- [`pallet_example_offchain_worker::Trait`](https://docs.rs/pallet-example-offchain-worker/latest/pallet_example_offchain_worker/trait.Trait.html) -- [`Call`](https://docs.rs/pallet-example-offchain-worker/latest/pallet_example_offchain_worker/enum.Call.html) -- [`Module`](https://docs.rs/pallet-example-offchain-worker/latest/pallet_example_offchain_worker/struct.Module.html) +- [`pallet_example_offchain_worker::Trait`](./trait.Trait.html) +- [`Call`](./enum.Call.html) +- [`Module`](./struct.Module.html) ## Overview @@ -23,4 +24,4 @@ Additional logic in OCW is put in place to prevent spamming the network with bot and unsigned transactions, and custom `UnsignedValidator` makes sure that there is only one unsigned transaction floating in the network. -License: Unlicense \ No newline at end of file +License: Unlicense diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs index 8e02a09484ef5..644e1ca299a3c 100644 --- a/frame/example-offchain-worker/src/lib.rs +++ b/frame/example-offchain-worker/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Offchain Worker Example Module +//! +//! # Offchain Worker Example Pallet //! //! The Offchain Worker Example: A simple pallet demonstrating //! concepts, APIs and structures common to most offchain workers. @@ -23,9 +24,9 @@ //! Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's //! documentation. //! -//! - [`pallet_example_offchain_worker::Trait`](./trait.Trait.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! //! ## Overview @@ -41,33 +42,28 @@ //! one unsigned transaction floating in the network. #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode}; +use frame_support::traits::Get; use frame_system::{ self as system, - ensure_signed, - ensure_none, offchain::{ - AppCrypto, CreateSignedTransaction, SendUnsignedTransaction, SendSignedTransaction, - SignedPayload, SigningTypes, Signer, SubmitTransaction, - } -}; -use frame_support::{ - debug, - dispatch::DispatchResult, decl_module, decl_storage, decl_event, - traits::Get, + AppCrypto, CreateSignedTransaction, SendSignedTransaction, SendUnsignedTransaction, + SignedPayload, Signer, SigningTypes, SubmitTransaction, + }, }; +use lite_json::json::JsonValue; use sp_core::crypto::KeyTypeId; use sp_runtime::{ - RuntimeDebug, - offchain::{http, Duration, storage::StorageValueRef}, - traits::Zero, - transaction_validity::{ - InvalidTransaction, ValidTransaction, TransactionValidity, TransactionSource, - TransactionPriority, + offchain::{ + http, + storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, + Duration, }, + traits::Zero, + transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + RuntimeDebug, }; -use codec::{Encode, Decode}; use sp_std::vec::Vec; -use lite_json::json::JsonValue; #[cfg(test)] mod tests; @@ -86,96 +82,126 @@ pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"btc!"); /// the types with this pallet-specific identifier. pub mod crypto { use super::KEY_TYPE; + use sp_core::sr25519::Signature as Sr25519Signature; use sp_runtime::{ app_crypto::{app_crypto, sr25519}, traits::Verify, }; - use sp_core::sr25519::Signature as Sr25519Signature; app_crypto!(sr25519, KEY_TYPE); pub struct TestAuthId; - impl frame_system::offchain::AppCrypto<::Signer, Sr25519Signature> for TestAuthId { + impl frame_system::offchain::AppCrypto<::Signer, Sr25519Signature> + for TestAuthId + { type RuntimeAppPublic = Public; type GenericSignature = sp_core::sr25519::Signature; type GenericPublic = sp_core::sr25519::Public; } } -/// This pallet's configuration trait -pub trait Trait: CreateSignedTransaction> { - /// The identifier type for an offchain worker. - type AuthorityId: AppCrypto; +pub use pallet::*; - /// The overarching event type. - type Event: From> + Into<::Event>; - /// The overarching dispatch call type. - type Call: From>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - // Configuration parameters + /// This pallet's configuration trait + #[pallet::config] + pub trait Config: CreateSignedTransaction> + frame_system::Config { + /// The identifier type for an offchain worker. + type AuthorityId: AppCrypto; - /// A grace period after we send transaction. - /// - /// To avoid sending too many transactions, we only attempt to send one - /// every `GRACE_PERIOD` blocks. We use Local Storage to coordinate - /// sending between distinct runs of this offchain worker. - type GracePeriod: Get; + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// Number of blocks of cooldown after unsigned transaction is included. - /// - /// This ensures that we only accept unsigned transactions once, every `UnsignedInterval` blocks. - type UnsignedInterval: Get; + /// The overarching dispatch call type. + type Call: From>; - /// A configuration for base priority of unsigned transactions. - /// - /// This is exposed so that it can be tuned for particular runtime, when - /// multiple pallets send unsigned transactions. - type UnsignedPriority: Get; -} - -/// Payload used by this example crate to hold price -/// data required to submit a transaction. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct PricePayload { - block_number: BlockNumber, - price: u32, - public: Public, -} + // Configuration parameters -impl SignedPayload for PricePayload { - fn public(&self) -> T::Public { - self.public.clone() - } -} + /// A grace period after we send transaction. + /// + /// To avoid sending too many transactions, we only attempt to send one + /// every `GRACE_PERIOD` blocks. We use Local Storage to coordinate + /// sending between distinct runs of this offchain worker. + #[pallet::constant] + type GracePeriod: Get; -decl_storage! { - trait Store for Module as ExampleOffchainWorker { - /// A vector of recently submitted prices. + /// Number of blocks of cooldown after unsigned transaction is included. /// - /// This is used to calculate average price, should have bounded size. - Prices get(fn prices): Vec; - /// Defines the block when next unsigned transaction will be accepted. + /// This ensures that we only accept unsigned transactions once, every `UnsignedInterval` + /// blocks. + #[pallet::constant] + type UnsignedInterval: Get; + + /// A configuration for base priority of unsigned transactions. /// - /// To prevent spam of unsigned (and unpayed!) transactions on the network, - /// we only allow one transaction every `T::UnsignedInterval` blocks. - /// This storage entry defines when new transaction is going to be accepted. - NextUnsignedAt get(fn next_unsigned_at): T::BlockNumber; + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + #[pallet::constant] + type UnsignedPriority: Get; } -} -decl_event!( - /// Events generated by the module. - pub enum Event where AccountId = ::AccountId { - /// Event generated when new price is accepted to contribute to the average. - /// \[price, who\] - NewPrice(u32, AccountId), + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet { + /// Offchain Worker entry point. + /// + /// By implementing `fn offchain_worker` you declare a new offchain worker. + /// This function will be called when the node is fully synced and a new best block is + /// succesfuly imported. + /// Note that it's not guaranteed for offchain workers to run on EVERY block, there might + /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs), + /// so the code should be able to handle that. + /// You can use `Local Storage` API to coordinate runs of the worker. + fn offchain_worker(block_number: T::BlockNumber) { + // Note that having logs compiled to WASM may cause the size of the blob to increase + // significantly. You can use `RuntimeDebug` custom derive to hide details of the types + // in WASM. The `sp-api` crate also provides a feature `disable-logging` to disable + // all logging and thus, remove any logging from the WASM. + log::info!("Hello World from offchain workers!"); + + // Since off-chain workers are just part of the runtime code, they have direct access + // to the storage and other included pallets. + // + // We can easily import `frame_system` and retrieve a block hash of the parent block. + let parent_hash = >::block_hash(block_number - 1u32.into()); + log::debug!("Current block: {:?} (parent hash: {:?})", block_number, parent_hash); + + // It's a good practice to keep `fn offchain_worker()` function minimal, and move most + // of the code to separate `impl` block. + // Here we call a helper function to calculate current average price. + // This function reads storage entries of the current state. + let average: Option = Self::average_price(); + log::debug!("Current price: {:?}", average); + + // For this example we are going to send both signed and unsigned transactions + // depending on the block number. + // Usually it's enough to choose one or the other. + let should_send = Self::choose_transaction_type(block_number); + let res = match should_send { + TransactionType::Signed => Self::fetch_price_and_send_signed(), + TransactionType::UnsignedForAny => + Self::fetch_price_and_send_unsigned_for_any_account(block_number), + TransactionType::UnsignedForAll => + Self::fetch_price_and_send_unsigned_for_all_accounts(block_number), + TransactionType::Raw => Self::fetch_price_and_send_raw_unsigned(block_number), + TransactionType::None => Ok(()), + }; + if let Err(e) = res { + log::error!("Error: {}", e); + } + } } -); -decl_module! { /// A public part of the pallet. - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - + #[pallet::call] + impl Pallet { /// Submit new price to the list. /// /// This method is a public function of the module and can be called from within @@ -190,13 +216,13 @@ decl_module! { /// working and receives (and provides) meaningful data. /// This example is not focused on correctness of the oracle itself, but rather its /// purpose is to showcase offchain worker capabilities. - #[weight = 0] - pub fn submit_price(origin, price: u32) -> DispatchResult { + #[pallet::weight(0)] + pub fn submit_price(origin: OriginFor, price: u32) -> DispatchResultWithPostInfo { // Retrieve sender of the transaction. let who = ensure_signed(origin)?; // Add the price to the on-chain list. Self::add_price(who, price); - Ok(()) + Ok(().into()) } /// Submit new price to the list via unsigned transaction. @@ -215,86 +241,108 @@ decl_module! { /// /// This example is not focused on correctness of the oracle itself, but rather its /// purpose is to showcase offchain worker capabilities. - #[weight = 0] - pub fn submit_price_unsigned(origin, _block_number: T::BlockNumber, price: u32) - -> DispatchResult - { + #[pallet::weight(0)] + pub fn submit_price_unsigned( + origin: OriginFor, + _block_number: T::BlockNumber, + price: u32, + ) -> DispatchResultWithPostInfo { // This ensures that the function can only be called via unsigned transaction. ensure_none(origin)?; // Add the price to the on-chain list, but mark it as coming from an empty address. Self::add_price(Default::default(), price); // now increment the block number at which we expect next unsigned transaction. - let current_block = >::block_number(); + let current_block = >::block_number(); >::put(current_block + T::UnsignedInterval::get()); - Ok(()) + Ok(().into()) } - #[weight = 0] + #[pallet::weight(0)] pub fn submit_price_unsigned_with_signed_payload( - origin, + origin: OriginFor, price_payload: PricePayload, _signature: T::Signature, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { // This ensures that the function can only be called via unsigned transaction. ensure_none(origin)?; // Add the price to the on-chain list, but mark it as coming from an empty address. Self::add_price(Default::default(), price_payload.price); // now increment the block number at which we expect next unsigned transaction. - let current_block = >::block_number(); + let current_block = >::block_number(); >::put(current_block + T::UnsignedInterval::get()); - Ok(()) + Ok(().into()) } + } - /// Offchain Worker entry point. - /// - /// By implementing `fn offchain_worker` within `decl_module!` you declare a new offchain - /// worker. - /// This function will be called when the node is fully synced and a new best block is - /// succesfuly imported. - /// Note that it's not guaranteed for offchain workers to run on EVERY block, there might - /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs), - /// so the code should be able to handle that. - /// You can use `Local Storage` API to coordinate runs of the worker. - fn offchain_worker(block_number: T::BlockNumber) { - // It's a good idea to add logs to your offchain workers. - // Using the `frame_support::debug` module you have access to the same API exposed by - // the `log` crate. - // Note that having logs compiled to WASM may cause the size of the blob to increase - // significantly. You can use `RuntimeDebug` custom derive to hide details of the types - // in WASM or use `debug::native` namespace to produce logs only when the worker is - // running natively. - debug::native::info!("Hello World from offchain workers!"); - - // Since off-chain workers are just part of the runtime code, they have direct access - // to the storage and other included pallets. - // - // We can easily import `frame_system` and retrieve a block hash of the parent block. - let parent_hash = >::block_hash(block_number - 1.into()); - debug::debug!("Current block: {:?} (parent hash: {:?})", block_number, parent_hash); + /// Events for the pallet. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Event generated when new price is accepted to contribute to the average. + /// \[price, who\] + NewPrice(u32, T::AccountId), + } - // It's a good practice to keep `fn offchain_worker()` function minimal, and move most - // of the code to separate `impl` block. - // Here we call a helper function to calculate current average price. - // This function reads storage entries of the current state. - let average: Option = Self::average_price(); - debug::debug!("Current price: {:?}", average); + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; - // For this example we are going to send both signed and unsigned transactions - // depending on the block number. - // Usually it's enough to choose one or the other. - let should_send = Self::choose_transaction_type(block_number); - let res = match should_send { - TransactionType::Signed => Self::fetch_price_and_send_signed(), - TransactionType::UnsignedForAny => Self::fetch_price_and_send_unsigned_for_any_account(block_number), - TransactionType::UnsignedForAll => Self::fetch_price_and_send_unsigned_for_all_accounts(block_number), - TransactionType::Raw => Self::fetch_price_and_send_raw_unsigned(block_number), - TransactionType::None => Ok(()), - }; - if let Err(e) = res { - debug::error!("Error: {}", e); + /// Validate unsigned call to this module. + /// + /// By default unsigned transactions are disallowed, but implementing the validator + /// here we make sure that some particular calls (the ones produced by offchain worker) + /// are being whitelisted and marked as valid. + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + // Firstly let's check that we call the right function. + if let Call::submit_price_unsigned_with_signed_payload { + price_payload: ref payload, + ref signature, + } = call + { + let signature_valid = + SignedPayload::::verify::(payload, signature.clone()); + if !signature_valid { + return InvalidTransaction::BadProof.into() + } + Self::validate_transaction_parameters(&payload.block_number, &payload.price) + } else if let Call::submit_price_unsigned { block_number, price: new_price } = call { + Self::validate_transaction_parameters(block_number, new_price) + } else { + InvalidTransaction::Call.into() } } } + + /// A vector of recently submitted prices. + /// + /// This is used to calculate average price, should have bounded size. + #[pallet::storage] + #[pallet::getter(fn prices)] + pub(super) type Prices = StorageValue<_, Vec, ValueQuery>; + + /// Defines the block when next unsigned transaction will be accepted. + /// + /// To prevent spam of unsigned (and unpayed!) transactions on the network, + /// we only allow one transaction every `T::UnsignedInterval` blocks. + /// This storage entry defines when new transaction is going to be accepted. + #[pallet::storage] + #[pallet::getter(fn next_unsigned_at)] + pub(super) type NextUnsignedAt = StorageValue<_, T::BlockNumber, ValueQuery>; +} + +/// Payload used by this example crate to hold price +/// data required to submit a transaction. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, scale_info::TypeInfo)] +pub struct PricePayload { + block_number: BlockNumber, + price: u32, + public: Public, +} + +impl SignedPayload for PricePayload { + fn public(&self) -> T::Public { + self.public.clone() + } } enum TransactionType { @@ -305,11 +353,7 @@ enum TransactionType { None, } -/// Most of the functions are moved outside of the `decl_module!` macro. -/// -/// This greatly helps with error messages, as the ones inside the macro -/// can sometimes be hard to debug. -impl Module { +impl Pallet { /// Chooses which transaction type to send. /// /// This function serves mostly to showcase `StorageValue` helper @@ -331,19 +375,14 @@ impl Module { // low-level method of local storage API, which means that only one worker // will be able to "acquire a lock" and send a transaction if multiple workers // happen to be executed concurrently. - let res = val.mutate(|last_send: Option>| { - // We match on the value decoded from the storage. The first `Option` - // indicates if the value was present in the storage at all, - // the second (inner) `Option` indicates if the value was succesfuly - // decoded to expected type (`T::BlockNumber` in our case). + let res = val.mutate(|last_send: Result, StorageRetrievalError>| { match last_send { // If we already have a value in storage and the block number is recent enough // we avoid sending another transaction at this time. - Some(Some(block)) if block_number < block + T::GracePeriod::get() => { - Err(RECENTLY_SENT) - }, + Ok(Some(block)) if block_number < block + T::GracePeriod::get() => + Err(RECENTLY_SENT), // In every other case we attempt to acquire the lock and send a transaction. - _ => Ok(block_number) + _ => Ok(block_number), } }); @@ -355,7 +394,7 @@ impl Module { // written to in the meantime. match res { // The value has been set correctly, which means we can safely send a transaction now. - Ok(Ok(block_number)) => { + Ok(block_number) => { // Depending if the block is even or odd we will send a `Signed` or `Unsigned` // transaction. // Note that this logic doesn't really guarantee that the transactions will be sent @@ -364,20 +403,25 @@ impl Module { // transactions in a row. If a strict order is desired, it's better to use // the storage entry for that. (for instance store both block number and a flag // indicating the type of next transaction to send). - let transaction_type = block_number % 3.into(); - if transaction_type == Zero::zero() { TransactionType::Signed } - else if transaction_type == T::BlockNumber::from(1) { TransactionType::UnsignedForAny } - else if transaction_type == T::BlockNumber::from(2) { TransactionType::UnsignedForAll } - else { TransactionType::Raw } + let transaction_type = block_number % 3u32.into(); + if transaction_type == Zero::zero() { + TransactionType::Signed + } else if transaction_type == T::BlockNumber::from(1u32) { + TransactionType::UnsignedForAny + } else if transaction_type == T::BlockNumber::from(2u32) { + TransactionType::UnsignedForAll + } else { + TransactionType::Raw + } }, // We are in the grace period, we should not send a transaction this time. - Err(RECENTLY_SENT) => TransactionType::None, + Err(MutateStorageError::ValueFunctionFailed(RECENTLY_SENT)) => TransactionType::None, // We wanted to send a transaction, but failed to write the block number (acquire a // lock). This indicates that another offchain worker that was running concurrently // most likely executed the same logic and succeeded at writing to storage. // Thus we don't really want to send the transaction, knowing that the other run // already did. - Ok(Err(_)) => TransactionType::None, + Err(MutateStorageError::ConcurrentModification(_)) => TransactionType::None, } } @@ -386,7 +430,7 @@ impl Module { let signer = Signer::::all_accounts(); if !signer.can_sign() { return Err( - "No local accounts available. Consider adding one via `author_insertKey` RPC." + "No local accounts available. Consider adding one via `author_insertKey` RPC.", )? } // Make an external HTTP request to fetch the current price. @@ -397,19 +441,17 @@ impl Module { // representing the call, we've just created. // Submit signed will return a vector of results for all accounts that were found in the // local keystore with expected `KEY_TYPE`. - let results = signer.send_signed_transaction( - |_account| { - // Received price is wrapped into a call to `submit_price` public function of this pallet. - // This means that the transaction, when executed, will simply call that function passing - // `price` as an argument. - Call::submit_price(price) - } - ); + let results = signer.send_signed_transaction(|_account| { + // Received price is wrapped into a call to `submit_price` public function of this + // pallet. This means that the transaction, when executed, will simply call that + // function passing `price` as an argument. + Call::submit_price { price } + }); for (acc, res) in &results { match res { - Ok(()) => debug::info!("[{:?}] Submitted price of {} cents", acc.id, price), - Err(e) => debug::error!("[{:?}] Failed to submit transaction: {:?}", acc.id, e), + Ok(()) => log::info!("[{:?}] Submitted price of {} cents", acc.id, price), + Err(e) => log::error!("[{:?}] Failed to submit transaction: {:?}", acc.id, e), } } @@ -432,7 +474,7 @@ impl Module { // Received price is wrapped into a call to `submit_price_unsigned` public function of this // pallet. This means that the transaction, when executed, will simply call that function // passing `price` as an argument. - let call = Call::submit_price_unsigned(block_number, price); + let call = Call::submit_price_unsigned { block_number, price }; // Now let's create a transaction out of this call and submit it to the pool. // Here we showcase two ways to send an unsigned transaction / unsigned payload (raw) @@ -449,7 +491,9 @@ impl Module { } /// A helper function to fetch the price, sign payload and send an unsigned transaction - fn fetch_price_and_send_unsigned_for_any_account(block_number: T::BlockNumber) -> Result<(), &'static str> { + fn fetch_price_and_send_unsigned_for_any_account( + block_number: T::BlockNumber, + ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. let next_unsigned_at = >::get(); @@ -462,23 +506,24 @@ impl Module { let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; // -- Sign using any account - let (_, result) = Signer::::any_account().send_unsigned_transaction( - |account| PricePayload { - price, - block_number, - public: account.public.clone() - }, - |payload, signature| { - Call::submit_price_unsigned_with_signed_payload(payload, signature) - } - ).ok_or("No local accounts accounts available.")?; + let (_, result) = Signer::::any_account() + .send_unsigned_transaction( + |account| PricePayload { price, block_number, public: account.public.clone() }, + |payload, signature| Call::submit_price_unsigned_with_signed_payload { + price_payload: payload, + signature, + }, + ) + .ok_or("No local accounts accounts available.")?; result.map_err(|()| "Unable to submit transaction")?; Ok(()) } /// A helper function to fetch the price, sign payload and send an unsigned transaction - fn fetch_price_and_send_unsigned_for_all_accounts(block_number: T::BlockNumber) -> Result<(), &'static str> { + fn fetch_price_and_send_unsigned_for_all_accounts( + block_number: T::BlockNumber, + ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. let next_unsigned_at = >::get(); @@ -493,18 +538,15 @@ impl Module { // -- Sign using all accounts let transaction_results = Signer::::all_accounts() .send_unsigned_transaction( - |account| PricePayload { - price, - block_number, - public: account.public.clone() + |account| PricePayload { price, block_number, public: account.public.clone() }, + |payload, signature| Call::submit_price_unsigned_with_signed_payload { + price_payload: payload, + signature, }, - |payload, signature| { - Call::submit_price_unsigned_with_signed_payload(payload, signature) - } ); for (_account_id, result) in transaction_results.into_iter() { if result.is_err() { - return Err("Unable to submit transaction"); + return Err("Unable to submit transaction") } } @@ -523,16 +565,12 @@ impl Module { // you can find in `sp_io`. The API is trying to be similar to `reqwest`, but // since we are running in a custom WASM execution environment we can't simply // import the library here. - let request = http::Request::get( - "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD" - ); + let request = + http::Request::get("https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD"); // We set the deadline for sending of the request, note that awaiting response can // have a separate deadline. Next we send the request, before that it's also possible // to alter request headers or stream body content in case of non-GET requests. - let pending = request - .deadline(deadline) - .send() - .map_err(|_| http::Error::IoError)?; + let pending = request.deadline(deadline).send().map_err(|_| http::Error::IoError)?; // The request is already being processed by the host, we are free to do anything // else in the worker (we can send multiple concurrent requests too). @@ -540,12 +578,11 @@ impl Module { // so we can block current thread and wait for it to finish. // Note that since the request is being driven by the host, we don't have to wait // for the request to have it complete, we will just not read the response. - let response = pending.try_wait(deadline) - .map_err(|_| http::Error::DeadlineReached)??; + let response = pending.try_wait(deadline).map_err(|_| http::Error::DeadlineReached)??; // Let's check the status code before we proceed to reading the response. if response.code != 200 { - debug::warn!("Unexpected status code: {}", response.code); - return Err(http::Error::Unknown); + log::warn!("Unexpected status code: {}", response.code); + return Err(http::Error::Unknown) } // Next we want to fully read the response body and collect it to a vector of bytes. @@ -555,19 +592,19 @@ impl Module { // Create a str slice from the body. let body_str = sp_std::str::from_utf8(&body).map_err(|_| { - debug::warn!("No UTF8 body"); + log::warn!("No UTF8 body"); http::Error::Unknown })?; let price = match Self::parse_price(body_str) { Some(price) => Ok(price), None => { - debug::warn!("Unable to extract price from the response: {:?}", body_str); + log::warn!("Unable to extract price from the response: {:?}", body_str); Err(http::Error::Unknown) - } + }, }?; - debug::warn!("Got price: {} cents", price); + log::warn!("Got price: {} cents", price); Ok(price) } @@ -577,18 +614,16 @@ impl Module { /// Returns `None` when parsing failed or `Some(price in cents)` when parsing is successful. fn parse_price(price_str: &str) -> Option { let val = lite_json::parse_json(price_str); - let price = val.ok().and_then(|v| match v { + let price = match val.ok()? { JsonValue::Object(obj) => { - let mut chars = "USD".chars(); - obj.into_iter() - .find(|(k, _)| k.iter().all(|k| Some(*k) == chars.next())) - .and_then(|v| match v.1 { - JsonValue::Number(number) => Some(number), - _ => None, - }) + let (_, v) = obj.into_iter().find(|(k, _)| k.iter().copied().eq("USD".chars()))?; + match v { + JsonValue::Number(number) => number, + _ => return None, + } }, - _ => None - })?; + _ => return None, + }; let exp = price.fraction_length.checked_sub(2).unwrap_or(0); Some(price.integer as u32 * 100 + (price.fraction / 10_u64.pow(exp)) as u32) @@ -596,8 +631,8 @@ impl Module { /// Add new price to the list. fn add_price(who: T::AccountId, price: u32) { - debug::info!("Adding to the average: {}", price); - Prices::mutate(|prices| { + log::info!("Adding to the average: {}", price); + >::mutate(|prices| { const MAX_LEN: usize = 64; if prices.len() < MAX_LEN { @@ -609,14 +644,14 @@ impl Module { let average = Self::average_price() .expect("The average is not empty, because it was just mutated; qed"); - debug::info!("Current average price is: {}", average); + log::info!("Current average price is: {}", average); // here we are raising the NewPrice event - Self::deposit_event(RawEvent::NewPrice(price, who)); + Self::deposit_event(Event::NewPrice(price, who)); } /// Calculate current average price. fn average_price() -> Option { - let prices = Prices::get(); + let prices = >::get(); if prices.is_empty() { None } else { @@ -631,12 +666,12 @@ impl Module { // Now let's check if the transaction has any chance to succeed. let next_unsigned_at = >::get(); if &next_unsigned_at > block_number { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } // Let's make sure to reject transactions from the future. - let current_block = >::block_number(); + let current_block = >::block_number(); if ¤t_block < block_number { - return InvalidTransaction::Future.into(); + return InvalidTransaction::Future.into() } // We prioritize transactions that are more far away from current average. @@ -676,33 +711,3 @@ impl Module { .build() } } - -#[allow(deprecated)] // ValidateUnsigned -impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - - /// Validate unsigned call to this module. - /// - /// By default unsigned transactions are disallowed, but implementing the validator - /// here we make sure that some particular calls (the ones produced by offchain worker) - /// are being whitelisted and marked as valid. - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { - // Firstly let's check that we call the right function. - if let Call::submit_price_unsigned_with_signed_payload( - ref payload, ref signature - ) = call { - let signature_valid = SignedPayload::::verify::(payload, signature.clone()); - if !signature_valid { - return InvalidTransaction::BadProof.into(); - } - Self::validate_transaction_parameters(&payload.block_number, &payload.price) - } else if let Call::submit_price_unsigned(block_number, new_price) = call { - Self::validate_transaction_parameters(block_number, new_price) - } else { - InvalidTransaction::Call.into() - } - } -} diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs index 204b366964f47..1dde8a1df60c8 100644 --- a/frame/example-offchain-worker/src/tests.rs +++ b/frame/example-offchain-worker/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,51 +15,51 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate as example_offchain_worker; use crate::*; -use std::sync::Arc; -use codec::{Encode, Decode}; -use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, - weights::Weight, -}; +use codec::Decode; +use frame_support::{assert_ok, parameter_types}; use sp_core::{ - H256, - offchain::{OffchainExt, TransactionPoolExt, testing}, + offchain::{testing, OffchainWorkerExt, TransactionPoolExt}, sr25519::Signature, + H256, }; +use std::sync::Arc; -use sp_keystore::{ - {KeystoreExt, SyncCryptoStore}, - testing::KeyStore, -}; +use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStore}; use sp_runtime::{ - Perbill, RuntimeAppPublic, testing::{Header, TestXt}, - traits::{ - BlakeTwo256, IdentityLookup, Extrinsic as ExtrinsicT, - IdentifyAccount, Verify, - }, + traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify}, + RuntimeAppPublic, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +// For testing the module, we construct a mock runtime. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Example: example_offchain_worker::{Pallet, Call, Storage, Event, ValidateUnsigned}, + } +); -// For testing the module, we construct most of a mock runtime. This means -// first constructing a configuration type (`Test`) which `impl`s each of the -// configuration traits of modules we want to use. -#[derive(Clone, Eq, PartialEq, Encode, Decode)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; - type Call = (); + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -67,24 +67,19 @@ impl frame_system::Trait for Test { type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); - type PalletInfo = (); + type PalletInfo = PalletInfo; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } -type Extrinsic = TestXt, ()>; +type Extrinsic = TestXt; type AccountId = <::Signer as IdentifyAccount>::AccountId; impl frame_system::offchain::SigningTypes for Test { @@ -92,22 +87,24 @@ impl frame_system::offchain::SigningTypes for Test { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Test where - Call: From, +impl frame_system::offchain::SendTransactionTypes for Test +where + Call: From, { - type OverarchingCall = Call; + type OverarchingCall = Call; type Extrinsic = Extrinsic; } -impl frame_system::offchain::CreateSignedTransaction for Test where - Call: From, +impl frame_system::offchain::CreateSignedTransaction for Test +where + Call: From, { fn create_transaction>( - call: Call, + call: Call, _public: ::Signer, _account: AccountId, nonce: u64, - ) -> Option<(Call, ::SignaturePayload)> { + ) -> Option<(Call, ::SignaturePayload)> { Some((call, (nonce, ()))) } } @@ -118,17 +115,15 @@ parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; } -impl Trait for Test { - type Event = (); +impl Config for Test { + type Event = Event; type AuthorityId = crypto::TestAuthId; - type Call = Call; + type Call = Call; type GracePeriod = GracePeriod; type UnsignedInterval = UnsignedInterval; type UnsignedPriority = UnsignedPriority; } -type Example = Module; - #[test] fn it_aggregates_the_price() { sp_io::TestExternalities::default().execute_with(|| { @@ -146,7 +141,7 @@ fn it_aggregates_the_price() { fn should_make_http_call_and_parse_result() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = sp_io::TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainWorkerExt::new(offchain)); price_oracle_response(&mut state.write()); @@ -162,7 +157,7 @@ fn should_make_http_call_and_parse_result() { fn knows_how_to_mock_several_http_calls() { let (offchain, state) = testing::TestOffchainExt::new(); let mut t = sp_io::TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainWorkerExt::new(offchain)); { let mut state = state.write(); @@ -191,7 +186,6 @@ fn knows_how_to_mock_several_http_calls() { }); } - t.execute_with(|| { let price1 = Example::fetch_price().unwrap(); let price2 = Example::fetch_price().unwrap(); @@ -201,12 +195,12 @@ fn knows_how_to_mock_several_http_calls() { assert_eq!(price2, 200); assert_eq!(price3, 300); }) - } #[test] fn should_submit_signed_transaction_on_chain() { - const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; + const PHRASE: &str = + "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); @@ -214,12 +208,12 @@ fn should_submit_signed_transaction_on_chain() { SyncCryptoStore::sr25519_generate_new( &keystore, crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); - + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); let mut t = sp_io::TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainWorkerExt::new(offchain)); t.register_extension(TransactionPoolExt::new(pool)); t.register_extension(KeystoreExt(Arc::new(keystore))); @@ -233,13 +227,14 @@ fn should_submit_signed_transaction_on_chain() { assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, Call::submit_price(15523)); + assert_eq!(tx.call, Call::Example(crate::Call::submit_price { price: 15523 })); }); } #[test] fn should_submit_unsigned_transaction_on_chain_for_any_account() { - const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; + const PHRASE: &str = + "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); @@ -248,8 +243,9 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { SyncCryptoStore::sr25519_generate_new( &keystore, crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) @@ -257,7 +253,7 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { .clone(); let mut t = sp_io::TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainWorkerExt::new(offchain)); t.register_extension(TransactionPoolExt::new(pool)); t.register_extension(KeystoreExt(Arc::new(keystore))); @@ -277,13 +273,18 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let Call::submit_price_unsigned_with_signed_payload(body, signature) = tx.call { + if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload { + price_payload: body, + signature, + }) = tx.call + { assert_eq!(body, price_payload); - let signature_valid = ::Public, - ::BlockNumber - > as SignedPayload>::verify::(&price_payload, signature); + let signature_valid = + ::Public, + ::BlockNumber, + > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); } @@ -292,7 +293,8 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { #[test] fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { - const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; + const PHRASE: &str = + "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); @@ -301,8 +303,9 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { SyncCryptoStore::sr25519_generate_new( &keystore, crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) @@ -310,7 +313,7 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { .clone(); let mut t = sp_io::TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainWorkerExt::new(offchain)); t.register_extension(TransactionPoolExt::new(pool)); t.register_extension(KeystoreExt(Arc::new(keystore))); @@ -330,13 +333,18 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let Call::submit_price_unsigned_with_signed_payload(body, signature) = tx.call { + if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload { + price_payload: body, + signature, + }) = tx.call + { assert_eq!(body, price_payload); - let signature_valid = ::Public, - ::BlockNumber - > as SignedPayload>::verify::(&price_payload, signature); + let signature_valid = + ::Public, + ::BlockNumber, + > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); } @@ -351,7 +359,7 @@ fn should_submit_raw_unsigned_transaction_on_chain() { let keystore = KeyStore::new(); let mut t = sp_io::TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); + t.register_extension(OffchainWorkerExt::new(offchain)); t.register_extension(TransactionPoolExt::new(pool)); t.register_extension(KeystoreExt(Arc::new(keystore))); @@ -365,7 +373,10 @@ fn should_submit_raw_unsigned_transaction_on_chain() { assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - assert_eq!(tx.call, Call::submit_price_unsigned(1, 15523)); + assert_eq!( + tx.call, + Call::Example(crate::Call::submit_price_unsigned { block_number: 1, price: 15523 }) + ); }); } diff --git a/frame/example-parallel/Cargo.toml b/frame/example-parallel/Cargo.toml index 01a612fb82fbf..5e0f6d4bc255a 100644 --- a/frame/example-parallel/Cargo.toml +++ b/frame/example-parallel/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example-parallel" -version = "2.0.0" +version = "3.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -12,19 +12,21 @@ description = "FRAME example pallet using runtime worker threads" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-tasks = { version = "2.0.0", default-features = false, path = "../../primitives/tasks" } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../primitives/tasks" } [features] default = ["std"] std = [ "codec/std", + "scale-info/std", "frame-support/std", "frame-system/std", "sp-core/std", @@ -33,3 +35,4 @@ std = [ "sp-std/std", "sp-tasks/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/example-parallel/src/lib.rs b/frame/example-parallel/src/lib.rs index 4b7ce72b4d40e..c86cac4295684 100644 --- a/frame/example-parallel/src/lib.rs +++ b/frame/example-parallel/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,48 +22,80 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_system::ensure_signed; -use frame_support::{ - dispatch::DispatchResult, decl_module, decl_storage, decl_event, -}; use sp_runtime::RuntimeDebug; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::vec::Vec; #[cfg(test)] mod tests; -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From + Into<::Event>; - /// The overarching dispatch call type. - type Call: From>; -} +pub use pallet::*; -decl_storage! { - trait Store for Module as ExampleOffchainWorker { - /// A vector of current participants - /// - /// To enlist someone to participate, signed payload should be - /// sent to `enlist`. - Participants get(fn participants): Vec>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - /// Current event id to enlist participants to. - CurrentEventId get(fn get_current_event_id): Vec; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching dispatch call type. + type Call: From>; } -} -decl_event!( - /// Events generated by the module. - pub enum Event { - /// When new event is drafted. - NewEventDrafted(Vec), + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// A public part of the pallet. + #[pallet::call] + impl Pallet { + /// Get the new event running. + #[pallet::weight(0)] + pub fn run_event(origin: OriginFor, id: Vec) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + >::kill(); + >::mutate(move |event_id| *event_id = id); + Ok(().into()) + } + + /// Submit list of participants to the current event. + /// + /// The example utilizes parallel execution by checking half of the + /// signatures in spawned task. + #[pallet::weight(0)] + pub fn enlist_participants( + origin: OriginFor, + participants: Vec, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + + if validate_participants_parallel(&>::get(), &participants[..]) { + for participant in participants { + >::append(participant.account); + } + } + Ok(().into()) + } } -); + + /// A vector of current participants + /// + /// To enlist someone to participate, signed payload should be + /// sent to `enlist`. + #[pallet::storage] + #[pallet::getter(fn participants)] + pub(super) type Participants = StorageValue<_, Vec>, ValueQuery>; + + /// Current event id to enlist participants to. + #[pallet::storage] + #[pallet::getter(fn get_current_event_id)] + pub(super) type CurrentEventId = StorageValue<_, Vec, ValueQuery>; +} /// Request to enlist participant. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, scale_info::TypeInfo)] pub struct EnlistedParticipant { pub account: Vec, pub signature: Vec, @@ -72,55 +104,20 @@ pub struct EnlistedParticipant { impl EnlistedParticipant { fn verify(&self, event_id: &[u8]) -> bool { use sp_core::Public; - use std::convert::TryFrom; use sp_runtime::traits::Verify; + use std::convert::TryFrom; match sp_core::sr25519::Signature::try_from(&self.signature[..]) { Ok(signature) => { let public = sp_core::sr25519::Public::from_slice(self.account.as_ref()); signature.verify(event_id, &public) - } - _ => false - } - } -} - -decl_module! { - /// A public part of the pallet. - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - /// Get the new event running. - #[weight = 0] - pub fn run_event(origin, id: Vec) -> DispatchResult { - let _ = ensure_signed(origin)?; - Participants::kill(); - CurrentEventId::mutate(move |event_id| *event_id = id); - Ok(()) - } - - /// Submit list of participants to the current event. - /// - /// The example utilizes parallel execution by checking half of the - /// signatures in spawned task. - #[weight = 0] - pub fn enlist_participants(origin, participants: Vec) - -> DispatchResult - { - let _ = ensure_signed(origin)?; - - if validate_participants_parallel(&CurrentEventId::get(), &participants[..]) { - for participant in participants { - Participants::append(participant.account); - } - } - Ok(()) + }, + _ => false, } } } fn validate_participants_parallel(event_id: &[u8], participants: &[EnlistedParticipant]) -> bool { - fn spawn_verify(data: Vec) -> Vec { let stream = &mut &data[..]; let event_id = Vec::::decode(stream).expect("Failed to decode"); @@ -141,10 +138,10 @@ fn validate_participants_parallel(event_id: &[u8], participants: &[EnlistedParti let handle = sp_tasks::spawn(spawn_verify, async_payload); let mut result = true; - for participant in &participants[participants.len()/2+1..] { + for participant in &participants[participants.len() / 2 + 1..] { if !participant.verify(event_id) { result = false; - break; + break } } diff --git a/frame/example-parallel/src/tests.rs b/frame/example-parallel/src/tests.rs index 1da8c60388266..4c36f0d6eb858 100644 --- a/frame/example-parallel/src/tests.rs +++ b/frame/example-parallel/src/tests.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,35 +15,40 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::*; +use crate::{self as pallet_example_parallel, *}; -use codec::{Encode, Decode}; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use frame_support::parameter_types; use sp_core::H256; use sp_runtime::{ - Perbill, - testing::{Header}, + testing::Header, traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; -impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} -} +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Example: pallet_example_parallel::{Pallet, Call, Storage}, + } +); -#[derive(Clone, Eq, PartialEq, Encode, Decode)] -pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); } -impl frame_system::Trait for Test { - type BaseCallFilter = (); +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; type Origin = Origin; - type Call = (); - type PalletInfo = (); + type Call = Call; + type PalletInfo = PalletInfo; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -51,20 +56,18 @@ impl frame_system::Trait for Test { type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; + type BlockWeights = (); + type BlockLength = (); type Version = (); type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { @@ -73,13 +76,10 @@ parameter_types! { pub const UnsignedPriority: u64 = 1 << 20; } -impl Trait for Test { - type Event = (); - type Call = Call; +impl Config for Test { + type Call = Call; } -type Example = Module; - #[test] fn it_can_enlist() { use sp_core::Pair; @@ -109,7 +109,6 @@ fn it_can_enlist() { assert_eq!(Example::participants().len(), 2); }); - } #[test] @@ -147,5 +146,4 @@ fn one_wrong_will_not_enlist_anyone() { assert_eq!(Example::participants().len(), 0); }); - } diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 41889ea4828d0..58daaf1c75558 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example" -version = "2.0.0" +version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -13,31 +13,33 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } - -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../balances" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core", default-features = false } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } [features] default = ["std"] std = [ - "serde", "codec/std", - "sp-runtime/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", + "scale-info/std", "sp-io/std", + "sp-runtime/std", "sp-std/std" ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/example/README.md b/frame/example/README.md index 05ef4cd4351cf..e06dee78c3f81 100644 --- a/frame/example/README.md +++ b/frame/example/README.md @@ -1,3 +1,4 @@ + # Example Pallet @@ -45,7 +46,7 @@ Copy and paste this template from frame/example/src/lib.rs into file // Include the following links that shows what trait needs to be implemented to use the pallet // and the supported dispatchables that are documented in the Call enum. -- \[`::Trait`](https://docs.rs/pallet-example/latest/pallet_example/trait.Trait.html) +- \[`::Config`](https://docs.rs/pallet-example/latest/pallet_example/trait.Config.html) - \[`Call`](https://docs.rs/pallet-example/latest/pallet_example/enum.Call.html) - \[`Module`](https://docs.rs/pallet-example/latest/pallet_example/struct.Module.html) @@ -194,7 +195,7 @@ Copy and paste this template from frame/example/src/lib.rs into file \```rust use ; -pub trait Trait: ::Trait { } +pub trait Config: ::Config { } \``` \### Simple Code Snippet @@ -234,4 +235,4 @@ pub trait Trait: ::Trait { } // that the implementation is based on.

(&s).ok()) - .and_then(|mut t| t.remove("dependencies")) - .and_then(|p| p.try_into::
().ok()) - .and_then(|mut t| t.remove("wasm_project")) - .and_then(|p| p.try_into::
().ok()) - { - if let Some(path) = wasm_project.remove("path") - .and_then(|p| p.try_into::().ok()) - { - if let Some(name) = wasm_project.remove("package") - .and_then(|p| p.try_into::().ok()) - { - let path = PathBuf::from(path); - if path.exists() { - if name == get_crate_name(&path.join("Cargo.toml")) { - i += 1; - continue - } - } - } - } - } - - fs::remove_dir_all(wasm_workspace.join(&members[i])) - .expect("Removing invalid workspace member can not fail; qed"); - members.remove(i); - } - - members -} - -fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Path) { - let members = find_and_clear_workspace_members(wasm_workspace); - +fn create_project_cargo_toml( + wasm_workspace: &Path, + workspace_root_path: &Path, + crate_name: &str, + crate_path: &Path, + wasm_binary: &str, + enabled_features: impl Iterator, +) { let mut workspace_toml: Table = toml::from_str( - &fs::read_to_string( - workspace_root_path.join("Cargo.toml"), - ).expect("Workspace root `Cargo.toml` exists; qed") - ).expect("Workspace root `Cargo.toml` is a valid toml file; qed"); + &fs::read_to_string(workspace_root_path.join("Cargo.toml")) + .expect("Workspace root `Cargo.toml` exists; qed"), + ) + .expect("Workspace root `Cargo.toml` is a valid toml file; qed"); let mut wasm_workspace_toml = Table::new(); @@ -306,35 +239,58 @@ fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Pa wasm_workspace_toml.insert("profile".into(), profile.into()); - // Add `workspace` with members - let mut workspace = Table::new(); - workspace.insert("members".into(), members.into()); - - wasm_workspace_toml.insert("workspace".into(), workspace.into()); - // Add patch section from the project root `Cargo.toml` - if let Some(mut patch) = workspace_toml.remove("patch").and_then(|p| p.try_into::
().ok()) { + while let Some(mut patch) = + workspace_toml.remove("patch").and_then(|p| p.try_into::
().ok()) + { // Iterate over all patches and make the patch path absolute from the workspace root path. - patch.iter_mut() - .filter_map(|p| + patch + .iter_mut() + .filter_map(|p| { p.1.as_table_mut().map(|t| t.iter_mut().filter_map(|t| t.1.as_table_mut())) - ) + }) .flatten() - .for_each(|p| - p.iter_mut() - .filter(|(k, _)| k == &"path") - .for_each(|(_, v)| { - if let Some(path) = v.as_str().map(PathBuf::from) { - if path.is_relative() { - *v = workspace_root_path.join(path).display().to_string().into(); - } + .for_each(|p| { + p.iter_mut().filter(|(k, _)| k == &"path").for_each(|(_, v)| { + if let Some(path) = v.as_str().map(PathBuf::from) { + if path.is_relative() { + *v = workspace_root_path.join(path).display().to_string().into(); } - }) - ); + } + }) + }); wasm_workspace_toml.insert("patch".into(), patch.into()); } + let mut package = Table::new(); + package.insert("name".into(), format!("{}-wasm", crate_name).into()); + package.insert("version".into(), "1.0.0".into()); + package.insert("edition".into(), "2018".into()); + package.insert("resolver".into(), "2".into()); + + wasm_workspace_toml.insert("package".into(), package.into()); + + let mut lib = Table::new(); + lib.insert("name".into(), wasm_binary.into()); + lib.insert("crate-type".into(), vec!["cdylib".to_string()].into()); + + wasm_workspace_toml.insert("lib".into(), lib.into()); + + let mut dependencies = Table::new(); + + let mut wasm_project = Table::new(); + wasm_project.insert("package".into(), crate_name.into()); + wasm_project.insert("path".into(), crate_path.display().to_string().into()); + wasm_project.insert("default-features".into(), false.into()); + wasm_project.insert("features".into(), enabled_features.collect::>().into()); + + dependencies.insert("wasm-project".into(), wasm_project.into()); + + wasm_workspace_toml.insert("dependencies".into(), dependencies.into()); + + wasm_workspace_toml.insert("workspace".into(), Table::new().into()); + write_file_if_changed( wasm_workspace.join("Cargo.toml"), toml::to_string_pretty(&wasm_workspace_toml).expect("Wasm workspace toml is valid; qed"), @@ -348,7 +304,8 @@ fn find_package_by_manifest_path<'a>( manifest_path: &Path, crate_metadata: &'a cargo_metadata::Metadata, ) -> &'a cargo_metadata::Package { - crate_metadata.packages + crate_metadata + .packages .iter() .find(|p| p.manifest_path == manifest_path) .expect("Wasm project exists in its own metadata; qed") @@ -361,18 +318,19 @@ fn project_enabled_features( ) -> Vec { let package = find_package_by_manifest_path(cargo_manifest, crate_metadata); - let mut enabled_features = package.features.keys() + let mut enabled_features = package + .features + .keys() .filter(|f| { let mut feature_env = f.replace("-", "_"); feature_env.make_ascii_uppercase(); // We don't want to enable the `std`/`default` feature for the wasm build and // we need to check if the feature is enabled by checking the env variable. - *f != "std" - && *f != "default" - && env::var(format!("CARGO_FEATURE_{}", feature_env)) - .map(|v| v == "1") - .unwrap_or_default() + *f != "std" && + *f != "default" && env::var(format!("CARGO_FEATURE_{}", feature_env)) + .map(|v| v == "1") + .unwrap_or_default() }) .cloned() .collect::>(); @@ -394,56 +352,52 @@ fn has_runtime_wasm_feature_declared( /// Create the project used to build the wasm binary. /// /// # Returns -/// The path to the created project. -fn create_project(cargo_manifest: &Path, wasm_workspace: &Path, crate_metadata: &Metadata) -> PathBuf { - let crate_name = get_crate_name(cargo_manifest); - let crate_path = cargo_manifest.parent().expect("Parent path exists; qed"); - let wasm_binary = get_wasm_binary_name(cargo_manifest); - let project_folder = wasm_workspace.join(&crate_name); - - fs::create_dir_all(project_folder.join("src")) +/// +/// The path to the created wasm project. +fn create_project( + project_cargo_toml: &Path, + wasm_workspace: &Path, + crate_metadata: &Metadata, + workspace_root_path: &Path, + features_to_enable: Vec, +) -> PathBuf { + let crate_name = get_crate_name(project_cargo_toml); + let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); + let wasm_binary = get_wasm_binary_name(project_cargo_toml); + let wasm_project_folder = wasm_workspace.join(&crate_name); + + fs::create_dir_all(wasm_project_folder.join("src")) .expect("Wasm project dir create can not fail; qed"); - let mut enabled_features = project_enabled_features(&cargo_manifest, &crate_metadata); + let mut enabled_features = project_enabled_features(&project_cargo_toml, &crate_metadata); - if has_runtime_wasm_feature_declared(cargo_manifest, crate_metadata) { + if has_runtime_wasm_feature_declared(project_cargo_toml, crate_metadata) { enabled_features.push("runtime-wasm".into()); } - write_file_if_changed( - project_folder.join("Cargo.toml"), - format!( - r#" - [package] - name = "{crate_name}-wasm" - version = "1.0.0" - edition = "2018" - - [lib] - name = "{wasm_binary}" - crate-type = ["cdylib"] - - [dependencies] - wasm_project = {{ package = "{crate_name}", path = "{crate_path}", default-features = false, features = [ {features} ] }} - "#, - crate_name = crate_name, - crate_path = crate_path.display(), - wasm_binary = wasm_binary, - features = enabled_features.into_iter().map(|f| format!("\"{}\"", f)).join(","), - ) + let mut enabled_features = enabled_features.into_iter().collect::>(); + enabled_features.extend(features_to_enable.into_iter()); + + create_project_cargo_toml( + &wasm_project_folder, + workspace_root_path, + &crate_name, + &crate_path, + &wasm_binary, + enabled_features.into_iter(), ); write_file_if_changed( - project_folder.join("src/lib.rs"), + wasm_project_folder.join("src/lib.rs"), "#![no_std] pub use wasm_project::*;", ); - if let Some(crate_lock_file) = find_cargo_lock(cargo_manifest) { + if let Some(crate_lock_file) = find_cargo_lock(project_cargo_toml) { // Use the `Cargo.lock` of the main project. - crate::copy_file_if_changed(crate_lock_file, wasm_workspace.join("Cargo.lock")); + crate::copy_file_if_changed(crate_lock_file, wasm_project_folder.join("Cargo.lock")); } - project_folder + wasm_project_folder } /// Returns if the project should be built as a release. @@ -474,9 +428,14 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman env::var(crate::WASM_BUILD_RUSTFLAGS_ENV).unwrap_or_default(), ); - build_cmd.args(&["rustc", "--target=wasm32-unknown-unknown"]) + build_cmd + .args(&["rustc", "--target=wasm32-unknown-unknown"]) .arg(format!("--manifest-path={}", manifest_path.display())) .env("RUSTFLAGS", rustflags) + // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir + // exclusive). The runner project is created in `CARGO_TARGET_DIR` and executing it will + // create a sub target directory inside of `CARGO_TARGET_DIR`. + .env_remove("CARGO_TARGET_DIR") // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, ""); @@ -499,20 +458,25 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman } } -/// Compact the WASM binary using `wasm-gc`. Returns the path to the bloaty WASM binary. +/// Compact the WASM binary using `wasm-gc` and compress it using zstd. fn compact_wasm_file( project: &Path, cargo_manifest: &Path, - wasm_workspace: &Path, -) -> (Option, WasmBinaryBloaty) { + wasm_binary_name: Option, +) -> (Option, Option, WasmBinaryBloaty) { let is_release_build = is_release_build(); let target = if is_release_build { "release" } else { "debug" }; - let wasm_binary = get_wasm_binary_name(cargo_manifest); - let wasm_file = wasm_workspace.join("target/wasm32-unknown-unknown") + let default_wasm_binary_name = get_wasm_binary_name(cargo_manifest); + let wasm_file = project + .join("target/wasm32-unknown-unknown") .join(target) - .join(format!("{}.wasm", wasm_binary)); + .join(format!("{}.wasm", default_wasm_binary_name)); + let wasm_compact_file = if is_release_build { - let wasm_compact_file = project.join(format!("{}.compact.wasm", wasm_binary)); + let wasm_compact_file = project.join(format!( + "{}.compact.wasm", + wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()), + )); wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file) .expect("Failed to compact generated WASM binary."); Some(WasmBinary(wasm_compact_file)) @@ -520,7 +484,49 @@ fn compact_wasm_file( None }; - (wasm_compact_file, WasmBinaryBloaty(wasm_file)) + let wasm_compact_compressed_file = wasm_compact_file.as_ref().and_then(|compact_binary| { + let file_name = + wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()); + + let wasm_compact_compressed_file = + project.join(format!("{}.compact.compressed.wasm", file_name)); + + if compress_wasm(&compact_binary.0, &wasm_compact_compressed_file) { + Some(WasmBinary(wasm_compact_compressed_file)) + } else { + None + } + }); + + let bloaty_file_name = if let Some(name) = wasm_binary_name { + format!("{}.wasm", name) + } else { + format!("{}.wasm", default_wasm_binary_name) + }; + + let bloaty_file = project.join(bloaty_file_name); + fs::copy(wasm_file, &bloaty_file).expect("Copying the bloaty file to the project dir."); + + (wasm_compact_file, wasm_compact_compressed_file, WasmBinaryBloaty(bloaty_file)) +} + +fn compress_wasm(wasm_binary_path: &Path, compressed_binary_out_path: &Path) -> bool { + use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; + + let data = fs::read(wasm_binary_path).expect("Failed to read WASM binary"); + if let Some(compressed) = sp_maybe_compressed_blob::compress(&data, CODE_BLOB_BOMB_LIMIT) { + fs::write(compressed_binary_out_path, &compressed[..]) + .expect("Failed to write WASM binary"); + + true + } else { + println!( + "cargo:warning=Writing uncompressed wasm. Exceeded maximum size {}", + CODE_BLOB_BOMB_LIMIT, + ); + + false + } } /// Custom wrapper for a [`cargo_metadata::Package`] to store it in @@ -579,7 +585,8 @@ fn generate_rerun_if_changed_instructions( .exec() .expect("`cargo metadata` can not fail!"); - let package = metadata.packages + let package = metadata + .packages .iter() .find(|p| p.manifest_path == cargo_manifest) .expect("The crate package is contained in its own metadata; qed"); @@ -592,12 +599,11 @@ fn generate_rerun_if_changed_instructions( packages.insert(DeduplicatePackage::from(package)); while let Some(dependency) = dependencies.pop() { - let path_or_git_dep = dependency.source - .as_ref() - .map(|s| s.starts_with("git+")) - .unwrap_or(true); + let path_or_git_dep = + dependency.source.as_ref().map(|s| s.starts_with("git+")).unwrap_or(true); - let package = metadata.packages + let package = metadata + .packages .iter() .filter(|p| !p.manifest_path.starts_with(wasm_workspace)) .find(|p| { @@ -636,11 +642,10 @@ fn package_rerun_if_changed(package: &DeduplicatePackage) { .into_iter() .filter_entry(|p| { // Ignore this entry if it is a directory that contains a `Cargo.toml` that is not the - // `Cargo.toml` related to the current package. This is done to ignore sub-crates of a crate. - // If such a sub-crate is a dependency, it will be processed independently anyway. - p.path() == manifest_path - || !p.path().is_dir() - || !p.path().join("Cargo.toml").exists() + // `Cargo.toml` related to the current package. This is done to ignore sub-crates of a + // crate. If such a sub-crate is a dependency, it will be processed independently + // anyway. + p.path() == manifest_path || !p.path().is_dir() || !p.path().join("Cargo.toml").exists() }) .filter_map(|p| p.ok().map(|p| p.into_path())) .filter(|p| { @@ -670,5 +675,6 @@ fn copy_wasm_to_target_directory(cargo_manifest: &Path, wasm_binary: &WasmBinary fs::copy( wasm_binary.wasm_binary_path(), target_dir.join(format!("{}.wasm", get_wasm_binary_name(cargo_manifest))), - ).expect("Copies WASM binary to `WASM_TARGET_DIRECTORY`."); + ) + .expect("Copies WASM binary to `WASM_TARGET_DIRECTORY`."); }